1diff --git a/build.rs b/build.rs
2deleted file mode 100644
3index cf1ff45..0000000
4--- a/build.rs
5+++ /dev/null
6@@ -1,408 +0,0 @@
7-// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
8-// file at the top-level directory of this distribution and at
9-// http://rust-lang.org/COPYRIGHT.
10-//
11-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
12-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
13-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
14-// option. This file may not be copied, modified, or distributed
15-// except according to those terms.
16-
17-use std::{
18-    env,
19-    ffi::OsString,
20-    fs, io,
21-    path::{Path, PathBuf},
22-    process::Command,
23-};
24-
25-include!("src/env.rs");
26-
27-macro_rules! info {
28-    ($($args:tt)*) => { println!($($args)*) }
29-}
30-
31-macro_rules! warning {
32-    ($arg:tt, $($args:tt)*) => {
33-        println!(concat!(concat!("cargo:warning=\"", $arg), "\""), $($args)*)
34-    }
35-}
36-
37-fn read_and_watch_env(name: &str) -> Result<String, env::VarError> {
38-    println!("cargo:rerun-if-env-changed={name}");
39-    env::var(name)
40-}
41-
42-fn read_and_watch_env_os(name: &str) -> Option<OsString> {
43-    println!("cargo:rerun-if-env-changed={name}");
44-    env::var_os(name)
45-}
46-
47-fn copy_recursively(src: &Path, dst: &Path) -> io::Result<()> {
48-    if !dst.exists() {
49-        fs::create_dir_all(dst)?;
50-    }
51-    for entry in fs::read_dir(src)? {
52-        let entry = entry?;
53-        let ft = entry.file_type()?;
54-        if ft.is_dir() {
55-            // There should be very few layer in the project, use recusion to keep simple.
56-            copy_recursively(&entry.path(), &dst.join(entry.file_name()))?;
57-        } else {
58-            fs::copy(entry.path(), dst.join(entry.file_name()))?;
59-        }
60-    }
61-    Ok(())
62-}
63-
64-// TODO: split main functions and remove following allow.
65-#[allow(clippy::cognitive_complexity)]
66-fn main() {
67-    let target = env::var("TARGET").expect("TARGET was not set");
68-    let host = env::var("HOST").expect("HOST was not set");
69-    let num_jobs = env::var("NUM_JOBS").expect("NUM_JOBS was not set");
70-    let out_dir = PathBuf::from(env::var_os("OUT_DIR").expect("OUT_DIR was not set"));
71-    let src_dir = env::current_dir().expect("failed to get current directory");
72-
73-    info!("TARGET={}", target);
74-    info!("HOST={}", host);
75-    info!("NUM_JOBS={}", num_jobs);
76-    info!("OUT_DIR={:?}", out_dir);
77-    let build_dir = out_dir.join("build");
78-    info!("BUILD_DIR={:?}", build_dir);
79-    info!("SRC_DIR={:?}", src_dir);
80-
81-    if UNSUPPORTED_TARGETS.iter().any(|i| target.contains(i)) {
82-        panic!("jemalloc does not support target: {}", target);
83-    }
84-
85-    if UNTESTED_TARGETS.iter().any(|i| target.contains(i)) {
86-        warning!("jemalloc support for `{}` is untested", target);
87-    }
88-
89-    let mut use_prefix =
90-        env::var("CARGO_FEATURE_UNPREFIXED_MALLOC_ON_SUPPORTED_PLATFORMS").is_err();
91-
92-    if !use_prefix
93-        && NO_UNPREFIXED_MALLOC_TARGETS
94-            .iter()
95-            .any(|i| target.contains(i))
96-    {
97-        warning!(
98-            "Unprefixed `malloc` requested on unsupported platform `{}` => using prefixed `malloc`",
99-            target
100-        );
101-        use_prefix = true;
102-    }
103-
104-    // this has to occur before the early return when JEMALLOC_OVERRIDE is set
105-    if use_prefix {
106-        println!("cargo:rustc-cfg=prefixed");
107-    }
108-
109-    if let Some(jemalloc) = read_and_watch_env_os("JEMALLOC_OVERRIDE") {
110-        info!("jemalloc override set");
111-        let jemalloc = PathBuf::from(jemalloc);
112-        assert!(
113-            jemalloc.exists(),
114-            "Path to `jemalloc` in `JEMALLOC_OVERRIDE={}` does not exist",
115-            jemalloc.display()
116-        );
117-        println!(
118-            "cargo:rustc-link-search=native={}",
119-            jemalloc.parent().unwrap().display()
120-        );
121-        let stem = jemalloc.file_stem().unwrap().to_str().unwrap();
122-        let name = jemalloc.file_name().unwrap().to_str().unwrap();
123-        let kind = if name.ends_with(".a") {
124-            "static"
125-        } else {
126-            "dylib"
127-        };
128-        println!("cargo:rustc-link-lib={}={}", kind, &stem[3..]);
129-        return;
130-    }
131-    // Disable -Wextra warnings - jemalloc doesn't compile free of warnings with
132-    // it enabled: https://github.com/jemalloc/jemalloc/issues/1196
133-    let compiler = cc::Build::new().extra_warnings(false).get_compiler();
134-    let cflags = compiler
135-        .args()
136-        .iter()
137-        .map(|s| s.to_str().unwrap())
138-        .collect::<Vec<_>>()
139-        .join(" ");
140-    info!("CC={:?}", compiler.path());
141-    info!("CFLAGS={:?}", cflags);
142-
143-    assert!(out_dir.exists(), "OUT_DIR does not exist");
144-    let jemalloc_repo_dir = PathBuf::from("jemalloc");
145-    info!("JEMALLOC_REPO_DIR={:?}", jemalloc_repo_dir);
146-
147-    if build_dir.exists() {
148-        fs::remove_dir_all(build_dir.clone()).unwrap();
149-    }
150-    // Copy jemalloc submodule to the OUT_DIR
151-    copy_recursively(&jemalloc_repo_dir, &build_dir)
152-        .expect("failed to copy jemalloc source code to OUT_DIR");
153-    assert!(build_dir.exists());
154-
155-    // Configuration files
156-    let config_files = ["configure", "VERSION"];
157-
158-    // Copy the configuration files to jemalloc's source directory
159-    for f in &config_files {
160-        fs::copy(Path::new("configure").join(f), build_dir.join(f))
161-            .expect("failed to copy config file to OUT_DIR");
162-    }
163-
164-    // Run configure:
165-    let configure = build_dir.join("configure");
166-    let mut cmd = Command::new("sh");
167-    cmd.arg(
168-        configure
169-            .to_str()
170-            .unwrap()
171-            .replace("C:\\", "/c/")
172-            .replace('\\', "/"),
173-    )
174-    .current_dir(&build_dir)
175-    .env("CC", compiler.path())
176-    .env("CFLAGS", cflags.clone())
177-    .env("LDFLAGS", cflags.clone())
178-    .env("CPPFLAGS", cflags)
179-    .arg("--disable-cxx")
180-    .arg("--enable-doc=no")
181-    .arg("--enable-shared=no");
182-
183-    if target.contains("ios") {
184-        // newer iOS deviced have 16kb page sizes:
185-        // closed: https://github.com/gnzlbg/jemallocator/issues/68
186-        cmd.arg("--with-lg-page=14");
187-    }
188-
189-    // collect `malloc_conf` string:
190-    let mut malloc_conf = String::new();
191-
192-    if let Some(bg) = BackgroundThreadSupport::new(&target) {
193-        // `jemalloc` is compiled with background thread run-time support on
194-        // available platforms by default so there is nothing to do to enable
195-        // it.
196-
197-        if bg.always_enabled {
198-            // Background thread support does not enable background threads at
199-            // run-time, just support for enabling them via run-time configuration
200-            // options (they are disabled by default)
201-
202-            // The `enable_background_threads` cargo feature forces background
203-            // threads to be enabled at run-time by default:
204-            malloc_conf += "background_thread:true";
205-        }
206-    } else {
207-        // Background thread run-time support is disabled by
208-        // disabling background threads at compile-time:
209-        malloc_conf += "background_thread:false";
210-    }
211-
212-    if let Ok(malloc_conf_opts) = read_and_watch_env("JEMALLOC_SYS_WITH_MALLOC_CONF") {
213-        if !malloc_conf.is_empty() {
214-            malloc_conf.push(',');
215-        }
216-        malloc_conf.push_str(&malloc_conf_opts);
217-    }
218-
219-    if !malloc_conf.is_empty() {
220-        info!("--with-malloc-conf={}", malloc_conf);
221-        cmd.arg(format!("--with-malloc-conf={malloc_conf}"));
222-    }
223-
224-    if let Ok(lg_page) = read_and_watch_env("JEMALLOC_SYS_WITH_LG_PAGE") {
225-        info!("--with-lg-page={}", lg_page);
226-        cmd.arg(format!("--with-lg-page={lg_page}"));
227-    }
228-
229-    if let Ok(lg_hugepage) = read_and_watch_env("JEMALLOC_SYS_WITH_LG_HUGEPAGE") {
230-        info!("--with-lg-hugepage={}", lg_hugepage);
231-        cmd.arg(format!("--with-lg-hugepage={lg_hugepage}"));
232-    }
233-
234-    if let Ok(lg_quantum) = read_and_watch_env("JEMALLOC_SYS_WITH_LG_QUANTUM") {
235-        info!("--with-lg-quantum={}", lg_quantum);
236-        cmd.arg(format!("--with-lg-quantum={lg_quantum}"));
237-    }
238-
239-    if let Ok(lg_vaddr) = read_and_watch_env("JEMALLOC_SYS_WITH_LG_VADDR") {
240-        info!("--with-lg-vaddr={}", lg_vaddr);
241-        cmd.arg(format!("--with-lg-vaddr={lg_vaddr}"));
242-    }
243-
244-    if use_prefix {
245-        cmd.arg("--with-jemalloc-prefix=_rjem_");
246-        info!("--with-jemalloc-prefix=_rjem_");
247-    }
248-
249-    cmd.arg("--with-private-namespace=_rjem_");
250-
251-    if env::var("CARGO_FEATURE_DEBUG").is_ok() {
252-        info!("CARGO_FEATURE_DEBUG set");
253-        cmd.arg("--enable-debug");
254-    }
255-
256-    if env::var("CARGO_FEATURE_PROFILING").is_ok() {
257-        info!("CARGO_FEATURE_PROFILING set");
258-        cmd.arg("--enable-prof");
259-    }
260-
261-    if env::var("CARGO_FEATURE_STATS").is_ok() {
262-        info!("CARGO_FEATURE_STATS set");
263-        cmd.arg("--enable-stats");
264-    }
265-
266-    if env::var("CARGO_FEATURE_DISABLE_INITIAL_EXEC_TLS").is_ok() {
267-        info!("CARGO_FEATURE_DISABLE_INITIAL_EXEC_TLS set");
268-        cmd.arg("--disable-initial-exec-tls");
269-    }
270-
271-    if env::var("CARGO_FEATURE_DISABLE_CACHE_OBLIVIOUS").is_ok() {
272-        info!("CARGO_FEATURE_DISABLE_CACHE_OBLIVIOUS set");
273-        cmd.arg("--disable-cache-oblivious");
274-    }
275-
276-    cmd.arg(format!("--host={}", gnu_target(&target)));
277-    cmd.arg(format!("--build={}", gnu_target(&host)));
278-    cmd.arg(format!("--prefix={}", out_dir.display()));
279-
280-    run_and_log(&mut cmd, &build_dir.join("config.log"));
281-
282-    // Make:
283-    let make = make_cmd(&host);
284-    run(Command::new(make)
285-        .current_dir(&build_dir)
286-        .arg("-j")
287-        .arg(num_jobs.clone()));
288-
289-    // Skip watching this environment variables to avoid rebuild in CI.
290-    if env::var("JEMALLOC_SYS_RUN_JEMALLOC_TESTS").is_ok() {
291-        info!("Building and running jemalloc tests...");
292-        // Make tests:
293-        run(Command::new(make)
294-            .current_dir(&build_dir)
295-            .arg("-j")
296-            .arg(num_jobs.clone())
297-            .arg("tests"));
298-
299-        // Run tests:
300-        run(Command::new(make).current_dir(&build_dir).arg("check"));
301-    }
302-
303-    // Make install:
304-    run(Command::new(make)
305-        .current_dir(&build_dir)
306-        .arg("install_lib_static")
307-        .arg("install_include")
308-        .arg("-j")
309-        .arg(num_jobs));
310-
311-    println!("cargo:root={}", out_dir.display());
312-
313-    // Linkage directives to pull in jemalloc and its dependencies.
314-    //
315-    // On some platforms we need to be sure to link in `pthread` which jemalloc
316-    // depends on, and specifically on android we need to also link to libgcc.
317-    // Currently jemalloc is compiled with gcc which will generate calls to
318-    // intrinsics that are libgcc specific (e.g. those intrinsics aren't present in
319-    // libcompiler-rt), so link that in to get that support.
320-    if target.contains("windows") {
321-        println!("cargo:rustc-link-lib=static=jemalloc");
322-    } else {
323-        println!("cargo:rustc-link-lib=static=jemalloc_pic");
324-    }
325-    println!("cargo:rustc-link-search=native={}/lib", build_dir.display());
326-    if target.contains("android") {
327-        println!("cargo:rustc-link-lib=gcc");
328-    } else if !target.contains("windows") {
329-        println!("cargo:rustc-link-arg=-pthread");
330-    }
331-    // GCC may generate a __atomic_exchange_1 library call which requires -latomic
332-    // during the final linking. https://github.com/riscv-collab/riscv-gcc/issues/12
333-    if target.contains("riscv") {
334-        println!("cargo:rustc-link-lib=atomic");
335-    }
336-    println!("cargo:rerun-if-changed=jemalloc");
337-}
338-
339-fn run_and_log(cmd: &mut Command, log_file: &Path) {
340-    execute(cmd, || {
341-        run(Command::new("tail").arg("-n").arg("100").arg(log_file));
342-    })
343-}
344-
345-fn run(cmd: &mut Command) {
346-    execute(cmd, || ());
347-}
348-
349-fn execute(cmd: &mut Command, on_fail: impl FnOnce()) {
350-    println!("running: {cmd:?}");
351-    let status = match cmd.status() {
352-        Ok(status) => status,
353-        Err(e) => panic!("failed to execute command: {}", e),
354-    };
355-    if !status.success() {
356-        on_fail();
357-        panic!(
358-            "command did not execute successfully: {:?}\n\
359-             expected success, got: {}",
360-            cmd, status
361-        );
362-    }
363-}
364-
365-fn gnu_target(target: &str) -> String {
366-    match target {
367-        "i686-pc-windows-msvc" => "i686-pc-win32".to_string(),
368-        "x86_64-pc-windows-msvc" => "x86_64-pc-win32".to_string(),
369-        "i686-pc-windows-gnu" => "i686-w64-mingw32".to_string(),
370-        "x86_64-pc-windows-gnu" => "x86_64-w64-mingw32".to_string(),
371-        "armv7-linux-androideabi" => "arm-linux-androideabi".to_string(),
372-        "riscv64gc-unknown-linux-gnu" => "riscv64-linux-gnu".to_string(),
373-        s => s.to_string(),
374-    }
375-}
376-
377-fn make_cmd(host: &str) -> &'static str {
378-    const GMAKE_HOSTS: &[&str] = &["bitrig", "dragonfly", "freebsd", "netbsd", "openbsd"];
379-    if GMAKE_HOSTS.iter().any(|i| host.contains(i)) {
380-        "gmake"
381-    } else if host.contains("windows") {
382-        "mingw32-make"
383-    } else {
384-        "make"
385-    }
386-}
387-
388-struct BackgroundThreadSupport {
389-    always_enabled: bool,
390-}
391-
392-impl BackgroundThreadSupport {
393-    fn new(target: &str) -> Option<Self> {
394-        let runtime_support = env::var("CARGO_FEATURE_BACKGROUND_THREADS_RUNTIME_SUPPORT").is_ok();
395-        let always_enabled = env::var("CARGO_FEATURE_BACKGROUND_THREADS").is_ok();
396-
397-        if !runtime_support {
398-            assert!(
399-                !always_enabled,
400-                "enabling `background_threads` requires `background_threads_runtime_support`"
401-            );
402-            return None;
403-        }
404-
405-        if NO_BG_THREAD_TARGETS.iter().any(|i| target.contains(i)) {
406-            warning!(
407-                "`background_threads_runtime_support` not supported for `{}`",
408-                target
409-            );
410-        }
411-
412-        Some(Self { always_enabled })
413-    }
414-}
415diff --git a/configure/VERSION b/configure/VERSION
416deleted file mode 100644
417index 1dcfea0..0000000
418--- a/configure/VERSION
419+++ /dev/null
420@@ -1 +0,0 @@
421-5.3.0-0-g54eaed1d8b56b1aa528be3bdd1877e59c56fa90c
422diff --git a/configure/configure b/configure/configure
423deleted file mode 100755
424index 7c4e6e2..0000000
425--- a/configure/configure
426+++ /dev/null
427@@ -1,15820 +0,0 @@
428-#! /bin/sh
429-# Guess values for system-dependent variables and create Makefiles.
430-# Generated by GNU Autoconf 2.69.
431-#
432-#
433-# Copyright (C) 1992-1996, 1998-2012 Free Software Foundation, Inc.
434-#
435-#
436-# This configure script is free software; the Free Software Foundation
437-# gives unlimited permission to copy, distribute and modify it.
438-## -------------------- ##
439-## M4sh Initialization. ##
440-## -------------------- ##
441-
442-# Be more Bourne compatible
443-DUALCASE=1; export DUALCASE # for MKS sh
444-if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then :
445-  emulate sh
446-  NULLCMD=:
447-  # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which
448-  # is contrary to our usage.  Disable this feature.
449-  alias -g '${1+"$@"}'='"$@"'
450-  setopt NO_GLOB_SUBST
451-else
452-  case `(set -o) 2>/dev/null` in #(
453-  *posix*) :
454-    set -o posix ;; #(
455-  *) :
456-     ;;
457-esac
458-fi
459-
460-
461-as_nl='
462-'
463-export as_nl
464-# Printing a long string crashes Solaris 7 /usr/bin/printf.
465-as_echo='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\'
466-as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo
467-as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo$as_echo
468-# Prefer a ksh shell builtin over an external printf program on Solaris,
469-# but without wasting forks for bash or zsh.
470-if test -z "$BASH_VERSION$ZSH_VERSION" \
471-    && (test "X`print -r -- $as_echo`" = "X$as_echo") 2>/dev/null; then
472-  as_echo='print -r --'
473-  as_echo_n='print -rn --'
474-elif (test "X`printf %s $as_echo`" = "X$as_echo") 2>/dev/null; then
475-  as_echo='printf %s\n'
476-  as_echo_n='printf %s'
477-else
478-  if test "X`(/usr/ucb/echo -n -n $as_echo) 2>/dev/null`" = "X-n $as_echo"; then
479-    as_echo_body='eval /usr/ucb/echo -n "$1$as_nl"'
480-    as_echo_n='/usr/ucb/echo -n'
481-  else
482-    as_echo_body='eval expr "X$1" : "X\\(.*\\)"'
483-    as_echo_n_body='eval
484-      arg=$1;
485-      case $arg in #(
486-      *"$as_nl"*)
487-	expr "X$arg" : "X\\(.*\\)$as_nl";
488-	arg=`expr "X$arg" : ".*$as_nl\\(.*\\)"`;;
489-      esac;
490-      expr "X$arg" : "X\\(.*\\)" | tr -d "$as_nl"
491-    '
492-    export as_echo_n_body
493-    as_echo_n='sh -c $as_echo_n_body as_echo'
494-  fi
495-  export as_echo_body
496-  as_echo='sh -c $as_echo_body as_echo'
497-fi
498-
499-# The user is always right.
500-if test "${PATH_SEPARATOR+set}" != set; then
501-  PATH_SEPARATOR=:
502-  (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 && {
503-    (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 ||
504-      PATH_SEPARATOR=';'
505-  }
506-fi
507-
508-
509-# IFS
510-# We need space, tab and new line, in precisely that order.  Quoting is
511-# there to prevent editors from complaining about space-tab.
512-# (If _AS_PATH_WALK were called with IFS unset, it would disable word
513-# splitting by setting IFS to empty value.)
514-IFS=" ""	$as_nl"
515-
516-# Find who we are.  Look in the path if we contain no directory separator.
517-as_myself=
518-case $0 in #((
519-  *[\\/]* ) as_myself=$0 ;;
520-  *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
521-for as_dir in $PATH
522-do
523-  IFS=$as_save_IFS
524-  test -z "$as_dir" && as_dir=.
525-    test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break
526-  done
527-IFS=$as_save_IFS
528-
529-     ;;
530-esac
531-# We did not find ourselves, most probably we were run as `sh COMMAND'
532-# in which case we are not to be found in the path.
533-if test "x$as_myself" = x; then
534-  as_myself=$0
535-fi
536-if test ! -f "$as_myself"; then
537-  $as_echo "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2
538-  exit 1
539-fi
540-
541-# Unset variables that we do not need and which cause bugs (e.g. in
542-# pre-3.0 UWIN ksh).  But do not cause bugs in bash 2.01; the "|| exit 1"
543-# suppresses any "Segmentation fault" message there.  '((' could
544-# trigger a bug in pdksh 5.2.14.
545-for as_var in BASH_ENV ENV MAIL MAILPATH
546-do eval test x\${$as_var+set} = xset \
547-  && ( (unset $as_var) || exit 1) >/dev/null 2>&1 && unset $as_var || :
548-done
549-PS1='$ '
550-PS2='> '
551-PS4='+ '
552-
553-# NLS nuisances.
554-LC_ALL=C
555-export LC_ALL
556-LANGUAGE=C
557-export LANGUAGE
558-
559-# CDPATH.
560-(unset CDPATH) >/dev/null 2>&1 && unset CDPATH
561-
562-# Use a proper internal environment variable to ensure we don't fall
563-  # into an infinite loop, continuously re-executing ourselves.
564-  if test x"${_as_can_reexec}" != xno && test "x$CONFIG_SHELL" != x; then
565-    _as_can_reexec=no; export _as_can_reexec;
566-    # We cannot yet assume a decent shell, so we have to provide a
567-# neutralization value for shells without unset; and this also
568-# works around shells that cannot unset nonexistent variables.
569-# Preserve -v and -x to the replacement shell.
570-BASH_ENV=/dev/null
571-ENV=/dev/null
572-(unset BASH_ENV) >/dev/null 2>&1 && unset BASH_ENV ENV
573-case $- in # ((((
574-  *v*x* | *x*v* ) as_opts=-vx ;;
575-  *v* ) as_opts=-v ;;
576-  *x* ) as_opts=-x ;;
577-  * ) as_opts= ;;
578-esac
579-exec $CONFIG_SHELL $as_opts "$as_myself" ${1+"$@"}
580-# Admittedly, this is quite paranoid, since all the known shells bail
581-# out after a failed `exec'.
582-$as_echo "$0: could not re-execute with $CONFIG_SHELL" >&2
583-as_fn_exit 255
584-  fi
585-  # We don't want this to propagate to other subprocesses.
586-          { _as_can_reexec=; unset _as_can_reexec;}
587-if test "x$CONFIG_SHELL" = x; then
588-  as_bourne_compatible="if test -n \"\${ZSH_VERSION+set}\" && (emulate sh) >/dev/null 2>&1; then :
589-  emulate sh
590-  NULLCMD=:
591-  # Pre-4.2 versions of Zsh do word splitting on \${1+\"\$@\"}, which
592-  # is contrary to our usage.  Disable this feature.
593-  alias -g '\${1+\"\$@\"}'='\"\$@\"'
594-  setopt NO_GLOB_SUBST
595-else
596-  case \`(set -o) 2>/dev/null\` in #(
597-  *posix*) :
598-    set -o posix ;; #(
599-  *) :
600-     ;;
601-esac
602-fi
603-"
604-  as_required="as_fn_return () { (exit \$1); }
605-as_fn_success () { as_fn_return 0; }
606-as_fn_failure () { as_fn_return 1; }
607-as_fn_ret_success () { return 0; }
608-as_fn_ret_failure () { return 1; }
609-
610-exitcode=0
611-as_fn_success || { exitcode=1; echo as_fn_success failed.; }
612-as_fn_failure && { exitcode=1; echo as_fn_failure succeeded.; }
613-as_fn_ret_success || { exitcode=1; echo as_fn_ret_success failed.; }
614-as_fn_ret_failure && { exitcode=1; echo as_fn_ret_failure succeeded.; }
615-if ( set x; as_fn_ret_success y && test x = \"\$1\" ); then :
616-
617-else
618-  exitcode=1; echo positional parameters were not saved.
619-fi
620-test x\$exitcode = x0 || exit 1
621-test -x / || exit 1"
622-  as_suggested="  as_lineno_1=";as_suggested=$as_suggested$LINENO;as_suggested=$as_suggested" as_lineno_1a=\$LINENO
623-  as_lineno_2=";as_suggested=$as_suggested$LINENO;as_suggested=$as_suggested" as_lineno_2a=\$LINENO
624-  eval 'test \"x\$as_lineno_1'\$as_run'\" != \"x\$as_lineno_2'\$as_run'\" &&
625-  test \"x\`expr \$as_lineno_1'\$as_run' + 1\`\" = \"x\$as_lineno_2'\$as_run'\"' || exit 1
626-test \$(( 1 + 1 )) = 2 || exit 1"
627-  if (eval "$as_required") 2>/dev/null; then :
628-  as_have_required=yes
629-else
630-  as_have_required=no
631-fi
632-  if test x$as_have_required = xyes && (eval "$as_suggested") 2>/dev/null; then :
633-
634-else
635-  as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
636-as_found=false
637-for as_dir in /bin$PATH_SEPARATOR/usr/bin$PATH_SEPARATOR$PATH
638-do
639-  IFS=$as_save_IFS
640-  test -z "$as_dir" && as_dir=.
641-  as_found=:
642-  case $as_dir in #(
643-	 /*)
644-	   for as_base in sh bash ksh sh5; do
645-	     # Try only shells that exist, to save several forks.
646-	     as_shell=$as_dir/$as_base
647-	     if { test -f "$as_shell" || test -f "$as_shell.exe"; } &&
648-		    { $as_echo "$as_bourne_compatible""$as_required" | as_run=a "$as_shell"; } 2>/dev/null; then :
649-  CONFIG_SHELL=$as_shell as_have_required=yes
650-		   if { $as_echo "$as_bourne_compatible""$as_suggested" | as_run=a "$as_shell"; } 2>/dev/null; then :
651-  break 2
652-fi
653-fi
654-	   done;;
655-       esac
656-  as_found=false
657-done
658-$as_found || { if { test -f "$SHELL" || test -f "$SHELL.exe"; } &&
659-	      { $as_echo "$as_bourne_compatible""$as_required" | as_run=a "$SHELL"; } 2>/dev/null; then :
660-  CONFIG_SHELL=$SHELL as_have_required=yes
661-fi; }
662-IFS=$as_save_IFS
663-
664-
665-      if test "x$CONFIG_SHELL" != x; then :
666-  export CONFIG_SHELL
667-             # We cannot yet assume a decent shell, so we have to provide a
668-# neutralization value for shells without unset; and this also
669-# works around shells that cannot unset nonexistent variables.
670-# Preserve -v and -x to the replacement shell.
671-BASH_ENV=/dev/null
672-ENV=/dev/null
673-(unset BASH_ENV) >/dev/null 2>&1 && unset BASH_ENV ENV
674-case $- in # ((((
675-  *v*x* | *x*v* ) as_opts=-vx ;;
676-  *v* ) as_opts=-v ;;
677-  *x* ) as_opts=-x ;;
678-  * ) as_opts= ;;
679-esac
680-exec $CONFIG_SHELL $as_opts "$as_myself" ${1+"$@"}
681-# Admittedly, this is quite paranoid, since all the known shells bail
682-# out after a failed `exec'.
683-$as_echo "$0: could not re-execute with $CONFIG_SHELL" >&2
684-exit 255
685-fi
686-
687-    if test x$as_have_required = xno; then :
688-  $as_echo "$0: This script requires a shell more modern than all"
689-  $as_echo "$0: the shells that I found on your system."
690-  if test x${ZSH_VERSION+set} = xset ; then
691-    $as_echo "$0: In particular, zsh $ZSH_VERSION has bugs and should"
692-    $as_echo "$0: be upgraded to zsh 4.3.4 or later."
693-  else
694-    $as_echo "$0: Please tell [email protected] about your system,
695-$0: including any error possibly output before this
696-$0: message. Then install a modern shell, or manually run
697-$0: the script under such a shell if you do have one."
698-  fi
699-  exit 1
700-fi
701-fi
702-fi
703-SHELL=${CONFIG_SHELL-/bin/sh}
704-export SHELL
705-# Unset more variables known to interfere with behavior of common tools.
706-CLICOLOR_FORCE= GREP_OPTIONS=
707-unset CLICOLOR_FORCE GREP_OPTIONS
708-
709-## --------------------- ##
710-## M4sh Shell Functions. ##
711-## --------------------- ##
712-# as_fn_unset VAR
713-# ---------------
714-# Portably unset VAR.
715-as_fn_unset ()
716-{
717-  { eval $1=; unset $1;}
718-}
719-as_unset=as_fn_unset
720-
721-# as_fn_set_status STATUS
722-# -----------------------
723-# Set $? to STATUS, without forking.
724-as_fn_set_status ()
725-{
726-  return $1
727-} # as_fn_set_status
728-
729-# as_fn_exit STATUS
730-# -----------------
731-# Exit the shell with STATUS, even in a "trap 0" or "set -e" context.
732-as_fn_exit ()
733-{
734-  set +e
735-  as_fn_set_status $1
736-  exit $1
737-} # as_fn_exit
738-
739-# as_fn_mkdir_p
740-# -------------
741-# Create "$as_dir" as a directory, including parents if necessary.
742-as_fn_mkdir_p ()
743-{
744-
745-  case $as_dir in #(
746-  -*) as_dir=./$as_dir;;
747-  esac
748-  test -d "$as_dir" || eval $as_mkdir_p || {
749-    as_dirs=
750-    while :; do
751-      case $as_dir in #(
752-      *\'*) as_qdir=`$as_echo "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #'(
753-      *) as_qdir=$as_dir;;
754-      esac
755-      as_dirs="'$as_qdir' $as_dirs"
756-      as_dir=`$as_dirname -- "$as_dir" ||
757-$as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \
758-	 X"$as_dir" : 'X\(//\)[^/]' \| \
759-	 X"$as_dir" : 'X\(//\)$' \| \
760-	 X"$as_dir" : 'X\(/\)' \| . 2>/dev/null ||
761-$as_echo X"$as_dir" |
762-    sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{
763-	    s//\1/
764-	    q
765-	  }
766-	  /^X\(\/\/\)[^/].*/{
767-	    s//\1/
768-	    q
769-	  }
770-	  /^X\(\/\/\)$/{
771-	    s//\1/
772-	    q
773-	  }
774-	  /^X\(\/\).*/{
775-	    s//\1/
776-	    q
777-	  }
778-	  s/.*/./; q'`
779-      test -d "$as_dir" && break
780-    done
781-    test -z "$as_dirs" || eval "mkdir $as_dirs"
782-  } || test -d "$as_dir" || as_fn_error $? "cannot create directory $as_dir"
783-
784-
785-} # as_fn_mkdir_p
786-
787-# as_fn_executable_p FILE
788-# -----------------------
789-# Test if FILE is an executable regular file.
790-as_fn_executable_p ()
791-{
792-  test -f "$1" && test -x "$1"
793-} # as_fn_executable_p
794-# as_fn_append VAR VALUE
795-# ----------------------
796-# Append the text in VALUE to the end of the definition contained in VAR. Take
797-# advantage of any shell optimizations that allow amortized linear growth over
798-# repeated appends, instead of the typical quadratic growth present in naive
799-# implementations.
800-if (eval "as_var=1; as_var+=2; test x\$as_var = x12") 2>/dev/null; then :
801-  eval 'as_fn_append ()
802-  {
803-    eval $1+=\$2
804-  }'
805-else
806-  as_fn_append ()
807-  {
808-    eval $1=\$$1\$2
809-  }
810-fi # as_fn_append
811-
812-# as_fn_arith ARG...
813-# ------------------
814-# Perform arithmetic evaluation on the ARGs, and store the result in the
815-# global $as_val. Take advantage of shells that can avoid forks. The arguments
816-# must be portable across $(()) and expr.
817-if (eval "test \$(( 1 + 1 )) = 2") 2>/dev/null; then :
818-  eval 'as_fn_arith ()
819-  {
820-    as_val=$(( $* ))
821-  }'
822-else
823-  as_fn_arith ()
824-  {
825-    as_val=`expr "$@" || test $? -eq 1`
826-  }
827-fi # as_fn_arith
828-
829-
830-# as_fn_error STATUS ERROR [LINENO LOG_FD]
831-# ----------------------------------------
832-# Output "`basename $0`: error: ERROR" to stderr. If LINENO and LOG_FD are
833-# provided, also output the error to LOG_FD, referencing LINENO. Then exit the
834-# script with STATUS, using 1 if that was 0.
835-as_fn_error ()
836-{
837-  as_status=$1; test $as_status -eq 0 && as_status=1
838-  if test "$4"; then
839-    as_lineno=${as_lineno-"$3"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
840-    $as_echo "$as_me:${as_lineno-$LINENO}: error: $2" >&$4
841-  fi
842-  $as_echo "$as_me: error: $2" >&2
843-  as_fn_exit $as_status
844-} # as_fn_error
845-
846-if expr a : '\(a\)' >/dev/null 2>&1 &&
847-   test "X`expr 00001 : '.*\(...\)'`" = X001; then
848-  as_expr=expr
849-else
850-  as_expr=false
851-fi
852-
853-if (basename -- /) >/dev/null 2>&1 && test "X`basename -- / 2>&1`" = "X/"; then
854-  as_basename=basename
855-else
856-  as_basename=false
857-fi
858-
859-if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then
860-  as_dirname=dirname
861-else
862-  as_dirname=false
863-fi
864-
865-as_me=`$as_basename -- "$0" ||
866-$as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \
867-	 X"$0" : 'X\(//\)$' \| \
868-	 X"$0" : 'X\(/\)' \| . 2>/dev/null ||
869-$as_echo X/"$0" |
870-    sed '/^.*\/\([^/][^/]*\)\/*$/{
871-	    s//\1/
872-	    q
873-	  }
874-	  /^X\/\(\/\/\)$/{
875-	    s//\1/
876-	    q
877-	  }
878-	  /^X\/\(\/\).*/{
879-	    s//\1/
880-	    q
881-	  }
882-	  s/.*/./; q'`
883-
884-# Avoid depending upon Character Ranges.
885-as_cr_letters='abcdefghijklmnopqrstuvwxyz'
886-as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ'
887-as_cr_Letters=$as_cr_letters$as_cr_LETTERS
888-as_cr_digits='0123456789'
889-as_cr_alnum=$as_cr_Letters$as_cr_digits
890-
891-
892-  as_lineno_1=$LINENO as_lineno_1a=$LINENO
893-  as_lineno_2=$LINENO as_lineno_2a=$LINENO
894-  eval 'test "x$as_lineno_1'$as_run'" != "x$as_lineno_2'$as_run'" &&
895-  test "x`expr $as_lineno_1'$as_run' + 1`" = "x$as_lineno_2'$as_run'"' || {
896-  # Blame Lee E. McMahon (1931-1989) for sed's syntax.  :-)
897-  sed -n '
898-    p
899-    /[$]LINENO/=
900-  ' <$as_myself |
901-    sed '
902-      s/[$]LINENO.*/&-/
903-      t lineno
904-      b
905-      :lineno
906-      N
907-      :loop
908-      s/[$]LINENO\([^'$as_cr_alnum'_].*\n\)\(.*\)/\2\1\2/
909-      t loop
910-      s/-\n.*//
911-    ' >$as_me.lineno &&
912-  chmod +x "$as_me.lineno" ||
913-    { $as_echo "$as_me: error: cannot create $as_me.lineno; rerun with a POSIX shell" >&2; as_fn_exit 1; }
914-
915-  # If we had to re-execute with $CONFIG_SHELL, we're ensured to have
916-  # already done that, so ensure we don't try to do so again and fall
917-  # in an infinite loop.  This has already happened in practice.
918-  _as_can_reexec=no; export _as_can_reexec
919-  # Don't try to exec as it changes $[0], causing all sort of problems
920-  # (the dirname of $[0] is not the place where we might find the
921-  # original and so on.  Autoconf is especially sensitive to this).
922-  . "./$as_me.lineno"
923-  # Exit status is that of the last command.
924-  exit
925-}
926-
927-ECHO_C= ECHO_N= ECHO_T=
928-case `echo -n x` in #(((((
929--n*)
930-  case `echo 'xy\c'` in
931-  *c*) ECHO_T='	';;	# ECHO_T is single tab character.
932-  xy)  ECHO_C='\c';;
933-  *)   echo `echo ksh88 bug on AIX 6.1` > /dev/null
934-       ECHO_T='	';;
935-  esac;;
936-*)
937-  ECHO_N='-n';;
938-esac
939-
940-rm -f conf$$ conf$$.exe conf$$.file
941-if test -d conf$$.dir; then
942-  rm -f conf$$.dir/conf$$.file
943-else
944-  rm -f conf$$.dir
945-  mkdir conf$$.dir 2>/dev/null
946-fi
947-if (echo >conf$$.file) 2>/dev/null; then
948-  if ln -s conf$$.file conf$$ 2>/dev/null; then
949-    as_ln_s='ln -s'
950-    # ... but there are two gotchas:
951-    # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail.
952-    # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable.
953-    # In both cases, we have to default to `cp -pR'.
954-    ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe ||
955-      as_ln_s='cp -pR'
956-  elif ln conf$$.file conf$$ 2>/dev/null; then
957-    as_ln_s=ln
958-  else
959-    as_ln_s='cp -pR'
960-  fi
961-else
962-  as_ln_s='cp -pR'
963-fi
964-rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file
965-rmdir conf$$.dir 2>/dev/null
966-
967-if mkdir -p . 2>/dev/null; then
968-  as_mkdir_p='mkdir -p "$as_dir"'
969-else
970-  test -d ./-p && rmdir ./-p
971-  as_mkdir_p=false
972-fi
973-
974-as_test_x='test -x'
975-as_executable_p=as_fn_executable_p
976-
977-# Sed expression to map a string onto a valid CPP name.
978-as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'"
979-
980-# Sed expression to map a string onto a valid variable name.
981-as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'"
982-
983-
984-test -n "$DJDIR" || exec 7<&0 </dev/null
985-exec 6>&1
986-
987-# Name of the host.
988-# hostname on some systems (SVR3.2, old GNU/Linux) returns a bogus exit status,
989-# so uname gets run too.
990-ac_hostname=`(hostname || uname -n) 2>/dev/null | sed 1q`
991-
992-#
993-# Initializations.
994-#
995-ac_default_prefix=/usr/local
996-ac_clean_files=
997-ac_config_libobj_dir=.
998-LIBOBJS=
999-cross_compiling=no
1000-subdirs=
1001-MFLAGS=
1002-MAKEFLAGS=
1003-
1004-# Identity of this package.
1005-PACKAGE_NAME=
1006-PACKAGE_TARNAME=
1007-PACKAGE_VERSION=
1008-PACKAGE_STRING=
1009-PACKAGE_BUGREPORT=
1010-PACKAGE_URL=
1011-
1012-ac_unique_file="Makefile.in"
1013-# Factoring default headers for most tests.
1014-ac_includes_default="\
1015-#include <stdio.h>
1016-#ifdef HAVE_SYS_TYPES_H
1017-# include <sys/types.h>
1018-#endif
1019-#ifdef HAVE_SYS_STAT_H
1020-# include <sys/stat.h>
1021-#endif
1022-#ifdef STDC_HEADERS
1023-# include <stdlib.h>
1024-# include <stddef.h>
1025-#else
1026-# ifdef HAVE_STDLIB_H
1027-#  include <stdlib.h>
1028-# endif
1029-#endif
1030-#ifdef HAVE_STRING_H
1031-# if !defined STDC_HEADERS && defined HAVE_MEMORY_H
1032-#  include <memory.h>
1033-# endif
1034-# include <string.h>
1035-#endif
1036-#ifdef HAVE_STRINGS_H
1037-# include <strings.h>
1038-#endif
1039-#ifdef HAVE_INTTYPES_H
1040-# include <inttypes.h>
1041-#endif
1042-#ifdef HAVE_STDINT_H
1043-# include <stdint.h>
1044-#endif
1045-#ifdef HAVE_UNISTD_H
1046-# include <unistd.h>
1047-#endif"
1048-
1049-ac_subst_vars='LTLIBOBJS
1050-LIBOBJS
1051-cfgoutputs_out
1052-cfgoutputs_in
1053-cfghdrs_out
1054-cfghdrs_in
1055-enable_initial_exec_tls
1056-enable_zone_allocator
1057-enable_tls
1058-enable_lazy_lock
1059-libdl
1060-enable_uaf_detection
1061-enable_opt_size_checks
1062-enable_opt_safety_checks
1063-enable_readlinkat
1064-enable_log
1065-enable_cache_oblivious
1066-enable_xmalloc
1067-enable_utrace
1068-enable_fill
1069-enable_prof
1070-enable_experimental_smallocx
1071-enable_stats
1072-enable_debug
1073-je_
1074-install_suffix
1075-private_namespace
1076-JEMALLOC_CPREFIX
1077-JEMALLOC_PREFIX
1078-enable_static
1079-enable_shared
1080-enable_doc
1081-AUTOCONF
1082-LD
1083-RANLIB
1084-INSTALL_DATA
1085-INSTALL_SCRIPT
1086-INSTALL_PROGRAM
1087-enable_autogen
1088-RPATH_EXTRA
1089-LM
1090-CC_MM
1091-DUMP_SYMS
1092-AROUT
1093-ARFLAGS
1094-MKLIB
1095-TEST_LD_MODE
1096-LDTARGET
1097-CTARGET
1098-PIC_CFLAGS
1099-SOREV
1100-EXTRA_LDFLAGS
1101-DSO_LDFLAGS
1102-link_whole_archive
1103-libprefix
1104-exe
1105-a
1106-o
1107-importlib
1108-so
1109-LD_PRELOAD_VAR
1110-RPATH
1111-abi
1112-jemalloc_version_gid
1113-jemalloc_version_nrev
1114-jemalloc_version_bugfix
1115-jemalloc_version_minor
1116-jemalloc_version_major
1117-jemalloc_version
1118-AWK
1119-NM
1120-AR
1121-host_os
1122-host_vendor
1123-host_cpu
1124-host
1125-build_os
1126-build_vendor
1127-build_cpu
1128-build
1129-EGREP
1130-GREP
1131-EXTRA_CXXFLAGS
1132-SPECIFIED_CXXFLAGS
1133-CONFIGURE_CXXFLAGS
1134-enable_cxx
1135-HAVE_CXX14
1136-HAVE_CXX17
1137-ac_ct_CXX
1138-CXXFLAGS
1139-CXX
1140-CPP
1141-EXTRA_CFLAGS
1142-SPECIFIED_CFLAGS
1143-CONFIGURE_CFLAGS
1144-OBJEXT
1145-EXEEXT
1146-ac_ct_CC
1147-CPPFLAGS
1148-LDFLAGS
1149-CFLAGS
1150-CC
1151-XSLROOT
1152-XSLTPROC
1153-MANDIR
1154-DATADIR
1155-LIBDIR
1156-INCLUDEDIR
1157-BINDIR
1158-PREFIX
1159-abs_objroot
1160-objroot
1161-abs_srcroot
1162-srcroot
1163-rev
1164-CONFIG
1165-target_alias
1166-host_alias
1167-build_alias
1168-LIBS
1169-ECHO_T
1170-ECHO_N
1171-ECHO_C
1172-DEFS
1173-mandir
1174-localedir
1175-libdir
1176-psdir
1177-pdfdir
1178-dvidir
1179-htmldir
1180-infodir
1181-docdir
1182-oldincludedir
1183-includedir
1184-localstatedir
1185-sharedstatedir
1186-sysconfdir
1187-datadir
1188-datarootdir
1189-libexecdir
1190-sbindir
1191-bindir
1192-program_transform_name
1193-prefix
1194-exec_prefix
1195-PACKAGE_URL
1196-PACKAGE_BUGREPORT
1197-PACKAGE_STRING
1198-PACKAGE_VERSION
1199-PACKAGE_TARNAME
1200-PACKAGE_NAME
1201-PATH_SEPARATOR
1202-SHELL'
1203-ac_subst_files=''
1204-ac_user_opts='
1205-enable_option_checking
1206-with_xslroot
1207-enable_cxx
1208-with_lg_vaddr
1209-with_version
1210-with_rpath
1211-enable_autogen
1212-enable_doc
1213-enable_shared
1214-enable_static
1215-with_mangling
1216-with_jemalloc_prefix
1217-with_export
1218-with_private_namespace
1219-with_install_suffix
1220-with_malloc_conf
1221-enable_debug
1222-enable_stats
1223-enable_experimental_smallocx
1224-enable_prof
1225-enable_prof_libunwind
1226-with_static_libunwind
1227-enable_prof_libgcc
1228-enable_prof_gcc
1229-enable_fill
1230-enable_utrace
1231-enable_xmalloc
1232-enable_cache_oblivious
1233-enable_log
1234-enable_readlinkat
1235-enable_opt_safety_checks
1236-enable_opt_size_checks
1237-enable_uaf_detection
1238-with_lg_quantum
1239-with_lg_slab_maxregs
1240-with_lg_page
1241-with_lg_hugepage
1242-enable_libdl
1243-enable_syscall
1244-enable_lazy_lock
1245-enable_zone_allocator
1246-enable_initial_exec_tls
1247-'
1248-      ac_precious_vars='build_alias
1249-host_alias
1250-target_alias
1251-CC
1252-CFLAGS
1253-LDFLAGS
1254-LIBS
1255-CPPFLAGS
1256-CPP
1257-CXX
1258-CXXFLAGS
1259-CCC'
1260-
1261-
1262-# Initialize some variables set by options.
1263-ac_init_help=
1264-ac_init_version=false
1265-ac_unrecognized_opts=
1266-ac_unrecognized_sep=
1267-# The variables have the same names as the options, with
1268-# dashes changed to underlines.
1269-cache_file=/dev/null
1270-exec_prefix=NONE
1271-no_create=
1272-no_recursion=
1273-prefix=NONE
1274-program_prefix=NONE
1275-program_suffix=NONE
1276-program_transform_name=s,x,x,
1277-silent=
1278-site=
1279-srcdir=
1280-verbose=
1281-x_includes=NONE
1282-x_libraries=NONE
1283-
1284-# Installation directory options.
1285-# These are left unexpanded so users can "make install exec_prefix=/foo"
1286-# and all the variables that are supposed to be based on exec_prefix
1287-# by default will actually change.
1288-# Use braces instead of parens because sh, perl, etc. also accept them.
1289-# (The list follows the same order as the GNU Coding Standards.)
1290-bindir='${exec_prefix}/bin'
1291-sbindir='${exec_prefix}/sbin'
1292-libexecdir='${exec_prefix}/libexec'
1293-datarootdir='${prefix}/share'
1294-datadir='${datarootdir}'
1295-sysconfdir='${prefix}/etc'
1296-sharedstatedir='${prefix}/com'
1297-localstatedir='${prefix}/var'
1298-includedir='${prefix}/include'
1299-oldincludedir='/usr/include'
1300-docdir='${datarootdir}/doc/${PACKAGE}'
1301-infodir='${datarootdir}/info'
1302-htmldir='${docdir}'
1303-dvidir='${docdir}'
1304-pdfdir='${docdir}'
1305-psdir='${docdir}'
1306-libdir='${exec_prefix}/lib'
1307-localedir='${datarootdir}/locale'
1308-mandir='${datarootdir}/man'
1309-
1310-ac_prev=
1311-ac_dashdash=
1312-for ac_option
1313-do
1314-  # If the previous option needs an argument, assign it.
1315-  if test -n "$ac_prev"; then
1316-    eval $ac_prev=\$ac_option
1317-    ac_prev=
1318-    continue
1319-  fi
1320-
1321-  case $ac_option in
1322-  *=?*) ac_optarg=`expr "X$ac_option" : '[^=]*=\(.*\)'` ;;
1323-  *=)   ac_optarg= ;;
1324-  *)    ac_optarg=yes ;;
1325-  esac
1326-
1327-  # Accept the important Cygnus configure options, so we can diagnose typos.
1328-
1329-  case $ac_dashdash$ac_option in
1330-  --)
1331-    ac_dashdash=yes ;;
1332-
1333-  -bindir | --bindir | --bindi | --bind | --bin | --bi)
1334-    ac_prev=bindir ;;
1335-  -bindir=* | --bindir=* | --bindi=* | --bind=* | --bin=* | --bi=*)
1336-    bindir=$ac_optarg ;;
1337-
1338-  -build | --build | --buil | --bui | --bu)
1339-    ac_prev=build_alias ;;
1340-  -build=* | --build=* | --buil=* | --bui=* | --bu=*)
1341-    build_alias=$ac_optarg ;;
1342-
1343-  -cache-file | --cache-file | --cache-fil | --cache-fi \
1344-  | --cache-f | --cache- | --cache | --cach | --cac | --ca | --c)
1345-    ac_prev=cache_file ;;
1346-  -cache-file=* | --cache-file=* | --cache-fil=* | --cache-fi=* \
1347-  | --cache-f=* | --cache-=* | --cache=* | --cach=* | --cac=* | --ca=* | --c=*)
1348-    cache_file=$ac_optarg ;;
1349-
1350-  --config-cache | -C)
1351-    cache_file=config.cache ;;
1352-
1353-  -datadir | --datadir | --datadi | --datad)
1354-    ac_prev=datadir ;;
1355-  -datadir=* | --datadir=* | --datadi=* | --datad=*)
1356-    datadir=$ac_optarg ;;
1357-
1358-  -datarootdir | --datarootdir | --datarootdi | --datarootd | --dataroot \
1359-  | --dataroo | --dataro | --datar)
1360-    ac_prev=datarootdir ;;
1361-  -datarootdir=* | --datarootdir=* | --datarootdi=* | --datarootd=* \
1362-  | --dataroot=* | --dataroo=* | --dataro=* | --datar=*)
1363-    datarootdir=$ac_optarg ;;
1364-
1365-  -disable-* | --disable-*)
1366-    ac_useropt=`expr "x$ac_option" : 'x-*disable-\(.*\)'`
1367-    # Reject names that are not valid shell variable names.
1368-    expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null &&
1369-      as_fn_error $? "invalid feature name: $ac_useropt"
1370-    ac_useropt_orig=$ac_useropt
1371-    ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'`
1372-    case $ac_user_opts in
1373-      *"
1374-"enable_$ac_useropt"
1375-"*) ;;
1376-      *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--disable-$ac_useropt_orig"
1377-	 ac_unrecognized_sep=', ';;
1378-    esac
1379-    eval enable_$ac_useropt=no ;;
1380-
1381-  -docdir | --docdir | --docdi | --doc | --do)
1382-    ac_prev=docdir ;;
1383-  -docdir=* | --docdir=* | --docdi=* | --doc=* | --do=*)
1384-    docdir=$ac_optarg ;;
1385-
1386-  -dvidir | --dvidir | --dvidi | --dvid | --dvi | --dv)
1387-    ac_prev=dvidir ;;
1388-  -dvidir=* | --dvidir=* | --dvidi=* | --dvid=* | --dvi=* | --dv=*)
1389-    dvidir=$ac_optarg ;;
1390-
1391-  -enable-* | --enable-*)
1392-    ac_useropt=`expr "x$ac_option" : 'x-*enable-\([^=]*\)'`
1393-    # Reject names that are not valid shell variable names.
1394-    expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null &&
1395-      as_fn_error $? "invalid feature name: $ac_useropt"
1396-    ac_useropt_orig=$ac_useropt
1397-    ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'`
1398-    case $ac_user_opts in
1399-      *"
1400-"enable_$ac_useropt"
1401-"*) ;;
1402-      *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--enable-$ac_useropt_orig"
1403-	 ac_unrecognized_sep=', ';;
1404-    esac
1405-    eval enable_$ac_useropt=\$ac_optarg ;;
1406-
1407-  -exec-prefix | --exec_prefix | --exec-prefix | --exec-prefi \
1408-  | --exec-pref | --exec-pre | --exec-pr | --exec-p | --exec- \
1409-  | --exec | --exe | --ex)
1410-    ac_prev=exec_prefix ;;
1411-  -exec-prefix=* | --exec_prefix=* | --exec-prefix=* | --exec-prefi=* \
1412-  | --exec-pref=* | --exec-pre=* | --exec-pr=* | --exec-p=* | --exec-=* \
1413-  | --exec=* | --exe=* | --ex=*)
1414-    exec_prefix=$ac_optarg ;;
1415-
1416-  -gas | --gas | --ga | --g)
1417-    # Obsolete; use --with-gas.
1418-    with_gas=yes ;;
1419-
1420-  -help | --help | --hel | --he | -h)
1421-    ac_init_help=long ;;
1422-  -help=r* | --help=r* | --hel=r* | --he=r* | -hr*)
1423-    ac_init_help=recursive ;;
1424-  -help=s* | --help=s* | --hel=s* | --he=s* | -hs*)
1425-    ac_init_help=short ;;
1426-
1427-  -host | --host | --hos | --ho)
1428-    ac_prev=host_alias ;;
1429-  -host=* | --host=* | --hos=* | --ho=*)
1430-    host_alias=$ac_optarg ;;
1431-
1432-  -htmldir | --htmldir | --htmldi | --htmld | --html | --htm | --ht)
1433-    ac_prev=htmldir ;;
1434-  -htmldir=* | --htmldir=* | --htmldi=* | --htmld=* | --html=* | --htm=* \
1435-  | --ht=*)
1436-    htmldir=$ac_optarg ;;
1437-
1438-  -includedir | --includedir | --includedi | --included | --include \
1439-  | --includ | --inclu | --incl | --inc)
1440-    ac_prev=includedir ;;
1441-  -includedir=* | --includedir=* | --includedi=* | --included=* | --include=* \
1442-  | --includ=* | --inclu=* | --incl=* | --inc=*)
1443-    includedir=$ac_optarg ;;
1444-
1445-  -infodir | --infodir | --infodi | --infod | --info | --inf)
1446-    ac_prev=infodir ;;
1447-  -infodir=* | --infodir=* | --infodi=* | --infod=* | --info=* | --inf=*)
1448-    infodir=$ac_optarg ;;
1449-
1450-  -libdir | --libdir | --libdi | --libd)
1451-    ac_prev=libdir ;;
1452-  -libdir=* | --libdir=* | --libdi=* | --libd=*)
1453-    libdir=$ac_optarg ;;
1454-
1455-  -libexecdir | --libexecdir | --libexecdi | --libexecd | --libexec \
1456-  | --libexe | --libex | --libe)
1457-    ac_prev=libexecdir ;;
1458-  -libexecdir=* | --libexecdir=* | --libexecdi=* | --libexecd=* | --libexec=* \
1459-  | --libexe=* | --libex=* | --libe=*)
1460-    libexecdir=$ac_optarg ;;
1461-
1462-  -localedir | --localedir | --localedi | --localed | --locale)
1463-    ac_prev=localedir ;;
1464-  -localedir=* | --localedir=* | --localedi=* | --localed=* | --locale=*)
1465-    localedir=$ac_optarg ;;
1466-
1467-  -localstatedir | --localstatedir | --localstatedi | --localstated \
1468-  | --localstate | --localstat | --localsta | --localst | --locals)
1469-    ac_prev=localstatedir ;;
1470-  -localstatedir=* | --localstatedir=* | --localstatedi=* | --localstated=* \
1471-  | --localstate=* | --localstat=* | --localsta=* | --localst=* | --locals=*)
1472-    localstatedir=$ac_optarg ;;
1473-
1474-  -mandir | --mandir | --mandi | --mand | --man | --ma | --m)
1475-    ac_prev=mandir ;;
1476-  -mandir=* | --mandir=* | --mandi=* | --mand=* | --man=* | --ma=* | --m=*)
1477-    mandir=$ac_optarg ;;
1478-
1479-  -nfp | --nfp | --nf)
1480-    # Obsolete; use --without-fp.
1481-    with_fp=no ;;
1482-
1483-  -no-create | --no-create | --no-creat | --no-crea | --no-cre \
1484-  | --no-cr | --no-c | -n)
1485-    no_create=yes ;;
1486-
1487-  -no-recursion | --no-recursion | --no-recursio | --no-recursi \
1488-  | --no-recurs | --no-recur | --no-recu | --no-rec | --no-re | --no-r)
1489-    no_recursion=yes ;;
1490-
1491-  -oldincludedir | --oldincludedir | --oldincludedi | --oldincluded \
1492-  | --oldinclude | --oldinclud | --oldinclu | --oldincl | --oldinc \
1493-  | --oldin | --oldi | --old | --ol | --o)
1494-    ac_prev=oldincludedir ;;
1495-  -oldincludedir=* | --oldincludedir=* | --oldincludedi=* | --oldincluded=* \
1496-  | --oldinclude=* | --oldinclud=* | --oldinclu=* | --oldincl=* | --oldinc=* \
1497-  | --oldin=* | --oldi=* | --old=* | --ol=* | --o=*)
1498-    oldincludedir=$ac_optarg ;;
1499-
1500-  -prefix | --prefix | --prefi | --pref | --pre | --pr | --p)
1501-    ac_prev=prefix ;;
1502-  -prefix=* | --prefix=* | --prefi=* | --pref=* | --pre=* | --pr=* | --p=*)
1503-    prefix=$ac_optarg ;;
1504-
1505-  -program-prefix | --program-prefix | --program-prefi | --program-pref \
1506-  | --program-pre | --program-pr | --program-p)
1507-    ac_prev=program_prefix ;;
1508-  -program-prefix=* | --program-prefix=* | --program-prefi=* \
1509-  | --program-pref=* | --program-pre=* | --program-pr=* | --program-p=*)
1510-    program_prefix=$ac_optarg ;;
1511-
1512-  -program-suffix | --program-suffix | --program-suffi | --program-suff \
1513-  | --program-suf | --program-su | --program-s)
1514-    ac_prev=program_suffix ;;
1515-  -program-suffix=* | --program-suffix=* | --program-suffi=* \
1516-  | --program-suff=* | --program-suf=* | --program-su=* | --program-s=*)
1517-    program_suffix=$ac_optarg ;;
1518-
1519-  -program-transform-name | --program-transform-name \
1520-  | --program-transform-nam | --program-transform-na \
1521-  | --program-transform-n | --program-transform- \
1522-  | --program-transform | --program-transfor \
1523-  | --program-transfo | --program-transf \
1524-  | --program-trans | --program-tran \
1525-  | --progr-tra | --program-tr | --program-t)
1526-    ac_prev=program_transform_name ;;
1527-  -program-transform-name=* | --program-transform-name=* \
1528-  | --program-transform-nam=* | --program-transform-na=* \
1529-  | --program-transform-n=* | --program-transform-=* \
1530-  | --program-transform=* | --program-transfor=* \
1531-  | --program-transfo=* | --program-transf=* \
1532-  | --program-trans=* | --program-tran=* \
1533-  | --progr-tra=* | --program-tr=* | --program-t=*)
1534-    program_transform_name=$ac_optarg ;;
1535-
1536-  -pdfdir | --pdfdir | --pdfdi | --pdfd | --pdf | --pd)
1537-    ac_prev=pdfdir ;;
1538-  -pdfdir=* | --pdfdir=* | --pdfdi=* | --pdfd=* | --pdf=* | --pd=*)
1539-    pdfdir=$ac_optarg ;;
1540-
1541-  -psdir | --psdir | --psdi | --psd | --ps)
1542-    ac_prev=psdir ;;
1543-  -psdir=* | --psdir=* | --psdi=* | --psd=* | --ps=*)
1544-    psdir=$ac_optarg ;;
1545-
1546-  -q | -quiet | --quiet | --quie | --qui | --qu | --q \
1547-  | -silent | --silent | --silen | --sile | --sil)
1548-    silent=yes ;;
1549-
1550-  -sbindir | --sbindir | --sbindi | --sbind | --sbin | --sbi | --sb)
1551-    ac_prev=sbindir ;;
1552-  -sbindir=* | --sbindir=* | --sbindi=* | --sbind=* | --sbin=* \
1553-  | --sbi=* | --sb=*)
1554-    sbindir=$ac_optarg ;;
1555-
1556-  -sharedstatedir | --sharedstatedir | --sharedstatedi \
1557-  | --sharedstated | --sharedstate | --sharedstat | --sharedsta \
1558-  | --sharedst | --shareds | --shared | --share | --shar \
1559-  | --sha | --sh)
1560-    ac_prev=sharedstatedir ;;
1561-  -sharedstatedir=* | --sharedstatedir=* | --sharedstatedi=* \
1562-  | --sharedstated=* | --sharedstate=* | --sharedstat=* | --sharedsta=* \
1563-  | --sharedst=* | --shareds=* | --shared=* | --share=* | --shar=* \
1564-  | --sha=* | --sh=*)
1565-    sharedstatedir=$ac_optarg ;;
1566-
1567-  -site | --site | --sit)
1568-    ac_prev=site ;;
1569-  -site=* | --site=* | --sit=*)
1570-    site=$ac_optarg ;;
1571-
1572-  -srcdir | --srcdir | --srcdi | --srcd | --src | --sr)
1573-    ac_prev=srcdir ;;
1574-  -srcdir=* | --srcdir=* | --srcdi=* | --srcd=* | --src=* | --sr=*)
1575-    srcdir=$ac_optarg ;;
1576-
1577-  -sysconfdir | --sysconfdir | --sysconfdi | --sysconfd | --sysconf \
1578-  | --syscon | --sysco | --sysc | --sys | --sy)
1579-    ac_prev=sysconfdir ;;
1580-  -sysconfdir=* | --sysconfdir=* | --sysconfdi=* | --sysconfd=* | --sysconf=* \
1581-  | --syscon=* | --sysco=* | --sysc=* | --sys=* | --sy=*)
1582-    sysconfdir=$ac_optarg ;;
1583-
1584-  -target | --target | --targe | --targ | --tar | --ta | --t)
1585-    ac_prev=target_alias ;;
1586-  -target=* | --target=* | --targe=* | --targ=* | --tar=* | --ta=* | --t=*)
1587-    target_alias=$ac_optarg ;;
1588-
1589-  -v | -verbose | --verbose | --verbos | --verbo | --verb)
1590-    verbose=yes ;;
1591-
1592-  -version | --version | --versio | --versi | --vers | -V)
1593-    ac_init_version=: ;;
1594-
1595-  -with-* | --with-*)
1596-    ac_useropt=`expr "x$ac_option" : 'x-*with-\([^=]*\)'`
1597-    # Reject names that are not valid shell variable names.
1598-    expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null &&
1599-      as_fn_error $? "invalid package name: $ac_useropt"
1600-    ac_useropt_orig=$ac_useropt
1601-    ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'`
1602-    case $ac_user_opts in
1603-      *"
1604-"with_$ac_useropt"
1605-"*) ;;
1606-      *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--with-$ac_useropt_orig"
1607-	 ac_unrecognized_sep=', ';;
1608-    esac
1609-    eval with_$ac_useropt=\$ac_optarg ;;
1610-
1611-  -without-* | --without-*)
1612-    ac_useropt=`expr "x$ac_option" : 'x-*without-\(.*\)'`
1613-    # Reject names that are not valid shell variable names.
1614-    expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null &&
1615-      as_fn_error $? "invalid package name: $ac_useropt"
1616-    ac_useropt_orig=$ac_useropt
1617-    ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'`
1618-    case $ac_user_opts in
1619-      *"
1620-"with_$ac_useropt"
1621-"*) ;;
1622-      *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--without-$ac_useropt_orig"
1623-	 ac_unrecognized_sep=', ';;
1624-    esac
1625-    eval with_$ac_useropt=no ;;
1626-
1627-  --x)
1628-    # Obsolete; use --with-x.
1629-    with_x=yes ;;
1630-
1631-  -x-includes | --x-includes | --x-include | --x-includ | --x-inclu \
1632-  | --x-incl | --x-inc | --x-in | --x-i)
1633-    ac_prev=x_includes ;;
1634-  -x-includes=* | --x-includes=* | --x-include=* | --x-includ=* | --x-inclu=* \
1635-  | --x-incl=* | --x-inc=* | --x-in=* | --x-i=*)
1636-    x_includes=$ac_optarg ;;
1637-
1638-  -x-libraries | --x-libraries | --x-librarie | --x-librari \
1639-  | --x-librar | --x-libra | --x-libr | --x-lib | --x-li | --x-l)
1640-    ac_prev=x_libraries ;;
1641-  -x-libraries=* | --x-libraries=* | --x-librarie=* | --x-librari=* \
1642-  | --x-librar=* | --x-libra=* | --x-libr=* | --x-lib=* | --x-li=* | --x-l=*)
1643-    x_libraries=$ac_optarg ;;
1644-
1645-  -*) as_fn_error $? "unrecognized option: \`$ac_option'
1646-Try \`$0 --help' for more information"
1647-    ;;
1648-
1649-  *=*)
1650-    ac_envvar=`expr "x$ac_option" : 'x\([^=]*\)='`
1651-    # Reject names that are not valid shell variable names.
1652-    case $ac_envvar in #(
1653-      '' | [0-9]* | *[!_$as_cr_alnum]* )
1654-      as_fn_error $? "invalid variable name: \`$ac_envvar'" ;;
1655-    esac
1656-    eval $ac_envvar=\$ac_optarg
1657-    export $ac_envvar ;;
1658-
1659-  *)
1660-    # FIXME: should be removed in autoconf 3.0.
1661-    $as_echo "$as_me: WARNING: you should use --build, --host, --target" >&2
1662-    expr "x$ac_option" : ".*[^-._$as_cr_alnum]" >/dev/null &&
1663-      $as_echo "$as_me: WARNING: invalid host type: $ac_option" >&2
1664-    : "${build_alias=$ac_option} ${host_alias=$ac_option} ${target_alias=$ac_option}"
1665-    ;;
1666-
1667-  esac
1668-done
1669-
1670-if test -n "$ac_prev"; then
1671-  ac_option=--`echo $ac_prev | sed 's/_/-/g'`
1672-  as_fn_error $? "missing argument to $ac_option"
1673-fi
1674-
1675-if test -n "$ac_unrecognized_opts"; then
1676-  case $enable_option_checking in
1677-    no) ;;
1678-    fatal) as_fn_error $? "unrecognized options: $ac_unrecognized_opts" ;;
1679-    *)     $as_echo "$as_me: WARNING: unrecognized options: $ac_unrecognized_opts" >&2 ;;
1680-  esac
1681-fi
1682-
1683-# Check all directory arguments for consistency.
1684-for ac_var in	exec_prefix prefix bindir sbindir libexecdir datarootdir \
1685-		datadir sysconfdir sharedstatedir localstatedir includedir \
1686-		oldincludedir docdir infodir htmldir dvidir pdfdir psdir \
1687-		libdir localedir mandir
1688-do
1689-  eval ac_val=\$$ac_var
1690-  # Remove trailing slashes.
1691-  case $ac_val in
1692-    */ )
1693-      ac_val=`expr "X$ac_val" : 'X\(.*[^/]\)' \| "X$ac_val" : 'X\(.*\)'`
1694-      eval $ac_var=\$ac_val;;
1695-  esac
1696-  # Be sure to have absolute directory names.
1697-  case $ac_val in
1698-    [\\/$]* | ?:[\\/]* )  continue;;
1699-    NONE | '' ) case $ac_var in *prefix ) continue;; esac;;
1700-  esac
1701-  as_fn_error $? "expected an absolute directory name for --$ac_var: $ac_val"
1702-done
1703-
1704-# There might be people who depend on the old broken behavior: `$host'
1705-# used to hold the argument of --host etc.
1706-# FIXME: To remove some day.
1707-build=$build_alias
1708-host=$host_alias
1709-target=$target_alias
1710-
1711-# FIXME: To remove some day.
1712-if test "x$host_alias" != x; then
1713-  if test "x$build_alias" = x; then
1714-    cross_compiling=maybe
1715-  elif test "x$build_alias" != "x$host_alias"; then
1716-    cross_compiling=yes
1717-  fi
1718-fi
1719-
1720-ac_tool_prefix=
1721-test -n "$host_alias" && ac_tool_prefix=$host_alias-
1722-
1723-test "$silent" = yes && exec 6>/dev/null
1724-
1725-
1726-ac_pwd=`pwd` && test -n "$ac_pwd" &&
1727-ac_ls_di=`ls -di .` &&
1728-ac_pwd_ls_di=`cd "$ac_pwd" && ls -di .` ||
1729-  as_fn_error $? "working directory cannot be determined"
1730-test "X$ac_ls_di" = "X$ac_pwd_ls_di" ||
1731-  as_fn_error $? "pwd does not report name of working directory"
1732-
1733-
1734-# Find the source files, if location was not specified.
1735-if test -z "$srcdir"; then
1736-  ac_srcdir_defaulted=yes
1737-  # Try the directory containing this script, then the parent directory.
1738-  ac_confdir=`$as_dirname -- "$as_myself" ||
1739-$as_expr X"$as_myself" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \
1740-	 X"$as_myself" : 'X\(//\)[^/]' \| \
1741-	 X"$as_myself" : 'X\(//\)$' \| \
1742-	 X"$as_myself" : 'X\(/\)' \| . 2>/dev/null ||
1743-$as_echo X"$as_myself" |
1744-    sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{
1745-	    s//\1/
1746-	    q
1747-	  }
1748-	  /^X\(\/\/\)[^/].*/{
1749-	    s//\1/
1750-	    q
1751-	  }
1752-	  /^X\(\/\/\)$/{
1753-	    s//\1/
1754-	    q
1755-	  }
1756-	  /^X\(\/\).*/{
1757-	    s//\1/
1758-	    q
1759-	  }
1760-	  s/.*/./; q'`
1761-  srcdir=$ac_confdir
1762-  if test ! -r "$srcdir/$ac_unique_file"; then
1763-    srcdir=..
1764-  fi
1765-else
1766-  ac_srcdir_defaulted=no
1767-fi
1768-if test ! -r "$srcdir/$ac_unique_file"; then
1769-  test "$ac_srcdir_defaulted" = yes && srcdir="$ac_confdir or .."
1770-  as_fn_error $? "cannot find sources ($ac_unique_file) in $srcdir"
1771-fi
1772-ac_msg="sources are in $srcdir, but \`cd $srcdir' does not work"
1773-ac_abs_confdir=`(
1774-	cd "$srcdir" && test -r "./$ac_unique_file" || as_fn_error $? "$ac_msg"
1775-	pwd)`
1776-# When building in place, set srcdir=.
1777-if test "$ac_abs_confdir" = "$ac_pwd"; then
1778-  srcdir=.
1779-fi
1780-# Remove unnecessary trailing slashes from srcdir.
1781-# Double slashes in file names in object file debugging info
1782-# mess up M-x gdb in Emacs.
1783-case $srcdir in
1784-*/) srcdir=`expr "X$srcdir" : 'X\(.*[^/]\)' \| "X$srcdir" : 'X\(.*\)'`;;
1785-esac
1786-for ac_var in $ac_precious_vars; do
1787-  eval ac_env_${ac_var}_set=\${${ac_var}+set}
1788-  eval ac_env_${ac_var}_value=\$${ac_var}
1789-  eval ac_cv_env_${ac_var}_set=\${${ac_var}+set}
1790-  eval ac_cv_env_${ac_var}_value=\$${ac_var}
1791-done
1792-
1793-#
1794-# Report the --help message.
1795-#
1796-if test "$ac_init_help" = "long"; then
1797-  # Omit some internal or obsolete options to make the list less imposing.
1798-  # This message is too long to be a string in the A/UX 3.1 sh.
1799-  cat <<_ACEOF
1800-\`configure' configures this package to adapt to many kinds of systems.
1801-
1802-Usage: $0 [OPTION]... [VAR=VALUE]...
1803-
1804-To assign environment variables (e.g., CC, CFLAGS...), specify them as
1805-VAR=VALUE.  See below for descriptions of some of the useful variables.
1806-
1807-Defaults for the options are specified in brackets.
1808-
1809-Configuration:
1810-  -h, --help              display this help and exit
1811-      --help=short        display options specific to this package
1812-      --help=recursive    display the short help of all the included packages
1813-  -V, --version           display version information and exit
1814-  -q, --quiet, --silent   do not print \`checking ...' messages
1815-      --cache-file=FILE   cache test results in FILE [disabled]
1816-  -C, --config-cache      alias for \`--cache-file=config.cache'
1817-  -n, --no-create         do not create output files
1818-      --srcdir=DIR        find the sources in DIR [configure dir or \`..']
1819-
1820-Installation directories:
1821-  --prefix=PREFIX         install architecture-independent files in PREFIX
1822-                          [$ac_default_prefix]
1823-  --exec-prefix=EPREFIX   install architecture-dependent files in EPREFIX
1824-                          [PREFIX]
1825-
1826-By default, \`make install' will install all the files in
1827-\`$ac_default_prefix/bin', \`$ac_default_prefix/lib' etc.  You can specify
1828-an installation prefix other than \`$ac_default_prefix' using \`--prefix',
1829-for instance \`--prefix=\$HOME'.
1830-
1831-For better control, use the options below.
1832-
1833-Fine tuning of the installation directories:
1834-  --bindir=DIR            user executables [EPREFIX/bin]
1835-  --sbindir=DIR           system admin executables [EPREFIX/sbin]
1836-  --libexecdir=DIR        program executables [EPREFIX/libexec]
1837-  --sysconfdir=DIR        read-only single-machine data [PREFIX/etc]
1838-  --sharedstatedir=DIR    modifiable architecture-independent data [PREFIX/com]
1839-  --localstatedir=DIR     modifiable single-machine data [PREFIX/var]
1840-  --libdir=DIR            object code libraries [EPREFIX/lib]
1841-  --includedir=DIR        C header files [PREFIX/include]
1842-  --oldincludedir=DIR     C header files for non-gcc [/usr/include]
1843-  --datarootdir=DIR       read-only arch.-independent data root [PREFIX/share]
1844-  --datadir=DIR           read-only architecture-independent data [DATAROOTDIR]
1845-  --infodir=DIR           info documentation [DATAROOTDIR/info]
1846-  --localedir=DIR         locale-dependent data [DATAROOTDIR/locale]
1847-  --mandir=DIR            man documentation [DATAROOTDIR/man]
1848-  --docdir=DIR            documentation root [DATAROOTDIR/doc/PACKAGE]
1849-  --htmldir=DIR           html documentation [DOCDIR]
1850-  --dvidir=DIR            dvi documentation [DOCDIR]
1851-  --pdfdir=DIR            pdf documentation [DOCDIR]
1852-  --psdir=DIR             ps documentation [DOCDIR]
1853-_ACEOF
1854-
1855-  cat <<\_ACEOF
1856-
1857-System types:
1858-  --build=BUILD     configure for building on BUILD [guessed]
1859-  --host=HOST       cross-compile to build programs to run on HOST [BUILD]
1860-_ACEOF
1861-fi
1862-
1863-if test -n "$ac_init_help"; then
1864-
1865-  cat <<\_ACEOF
1866-
1867-Optional Features:
1868-  --disable-option-checking  ignore unrecognized --enable/--with options
1869-  --disable-FEATURE       do not include FEATURE (same as --enable-FEATURE=no)
1870-  --enable-FEATURE[=ARG]  include FEATURE [ARG=yes]
1871-  --disable-cxx           Disable C++ integration
1872-  --enable-autogen        Automatically regenerate configure output
1873-  --enable-doc            Build documentation
1874-  --enable-shared         Build shared libaries
1875-  --enable-static         Build static libaries
1876-  --enable-debug          Build debugging code
1877-  --disable-stats         Disable statistics calculation/reporting
1878-  --enable-experimental-smallocx
1879-                          Enable experimental smallocx API
1880-  --enable-prof           Enable allocation profiling
1881-  --enable-prof-libunwind Use libunwind for backtracing
1882-  --disable-prof-libgcc   Do not use libgcc for backtracing
1883-  --disable-prof-gcc      Do not use gcc intrinsics for backtracing
1884-  --disable-fill          Disable support for junk/zero filling
1885-  --enable-utrace         Enable utrace(2)-based tracing
1886-  --enable-xmalloc        Support xmalloc option
1887-  --disable-cache-oblivious
1888-                          Disable support for cache-oblivious allocation
1889-                          alignment
1890-  --enable-log            Support debug logging
1891-  --enable-readlinkat     Use readlinkat over readlink
1892-  --enable-opt-safety-checks
1893-                          Perform certain low-overhead checks, even in opt
1894-                          mode
1895-  --enable-opt-size-checks
1896-                          Perform sized-deallocation argument checks, even in
1897-                          opt mode
1898-  --enable-uaf-detection  Allow sampled junk-filling on deallocation to detect
1899-                          use-after-free
1900-  --disable-libdl         Do not use libdl
1901-  --disable-syscall       Disable use of syscall(2)
1902-  --enable-lazy-lock      Enable lazy locking (only lock when multi-threaded)
1903-  --disable-zone-allocator
1904-                          Disable zone allocator for Darwin
1905-  --disable-initial-exec-tls
1906-                          Disable the initial-exec tls model
1907-
1908-Optional Packages:
1909-  --with-PACKAGE[=ARG]    use PACKAGE [ARG=yes]
1910-  --without-PACKAGE       do not use PACKAGE (same as --with-PACKAGE=no)
1911-  --with-xslroot=<path>   XSL stylesheet root path
1912-  --with-lg-vaddr=<lg-vaddr>
1913-                          Number of significant virtual address bits
1914-  --with-version=<major>.<minor>.<bugfix>-<nrev>-g<gid>
1915-                          Version string
1916-  --with-rpath=<rpath>    Colon-separated rpath (ELF systems only)
1917-  --with-mangling=<map>   Mangle symbols in <map>
1918-  --with-jemalloc-prefix=<prefix>
1919-                          Prefix to prepend to all public APIs
1920-  --without-export        disable exporting jemalloc public APIs
1921-  --with-private-namespace=<prefix>
1922-                          Prefix to prepend to all library-private APIs
1923-  --with-install-suffix=<suffix>
1924-                          Suffix to append to all installed files
1925-  --with-malloc-conf=<malloc_conf>
1926-                          config.malloc_conf options string
1927-  --with-static-libunwind=<libunwind.a>
1928-                          Path to static libunwind library; use rather than
1929-                          dynamically linking
1930-  --with-lg-quantum=<lg-quantum>
1931-                          Base 2 log of minimum allocation alignment
1932-  --with-lg-slab-maxregs=<lg-slab-maxregs>
1933-                          Base 2 log of maximum number of regions in a slab
1934-                          (used with malloc_conf slab_sizes)
1935-  --with-lg-page=<lg-page>
1936-                          Base 2 log of system page size
1937-  --with-lg-hugepage=<lg-hugepage>
1938-                          Base 2 log of system huge page size
1939-
1940-Some influential environment variables:
1941-  CC          C compiler command
1942-  CFLAGS      C compiler flags
1943-  LDFLAGS     linker flags, e.g. -L<lib dir> if you have libraries in a
1944-              nonstandard directory <lib dir>
1945-  LIBS        libraries to pass to the linker, e.g. -l<library>
1946-  CPPFLAGS    (Objective) C/C++ preprocessor flags, e.g. -I<include dir> if
1947-              you have headers in a nonstandard directory <include dir>
1948-  CPP         C preprocessor
1949-  CXX         C++ compiler command
1950-  CXXFLAGS    C++ compiler flags
1951-
1952-Use these variables to override the choices made by `configure' or to help
1953-it to find libraries and programs with nonstandard names/locations.
1954-
1955-Report bugs to the package provider.
1956-_ACEOF
1957-ac_status=$?
1958-fi
1959-
1960-if test "$ac_init_help" = "recursive"; then
1961-  # If there are subdirs, report their specific --help.
1962-  for ac_dir in : $ac_subdirs_all; do test "x$ac_dir" = x: && continue
1963-    test -d "$ac_dir" ||
1964-      { cd "$srcdir" && ac_pwd=`pwd` && srcdir=. && test -d "$ac_dir"; } ||
1965-      continue
1966-    ac_builddir=.
1967-
1968-case "$ac_dir" in
1969-.) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;;
1970-*)
1971-  ac_dir_suffix=/`$as_echo "$ac_dir" | sed 's|^\.[\\/]||'`
1972-  # A ".." for each directory in $ac_dir_suffix.
1973-  ac_top_builddir_sub=`$as_echo "$ac_dir_suffix" | sed 's|/[^\\/]*|/..|g;s|/||'`
1974-  case $ac_top_builddir_sub in
1975-  "") ac_top_builddir_sub=. ac_top_build_prefix= ;;
1976-  *)  ac_top_build_prefix=$ac_top_builddir_sub/ ;;
1977-  esac ;;
1978-esac
1979-ac_abs_top_builddir=$ac_pwd
1980-ac_abs_builddir=$ac_pwd$ac_dir_suffix
1981-# for backward compatibility:
1982-ac_top_builddir=$ac_top_build_prefix
1983-
1984-case $srcdir in
1985-  .)  # We are building in place.
1986-    ac_srcdir=.
1987-    ac_top_srcdir=$ac_top_builddir_sub
1988-    ac_abs_top_srcdir=$ac_pwd ;;
1989-  [\\/]* | ?:[\\/]* )  # Absolute name.
1990-    ac_srcdir=$srcdir$ac_dir_suffix;
1991-    ac_top_srcdir=$srcdir
1992-    ac_abs_top_srcdir=$srcdir ;;
1993-  *) # Relative name.
1994-    ac_srcdir=$ac_top_build_prefix$srcdir$ac_dir_suffix
1995-    ac_top_srcdir=$ac_top_build_prefix$srcdir
1996-    ac_abs_top_srcdir=$ac_pwd/$srcdir ;;
1997-esac
1998-ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix
1999-
2000-    cd "$ac_dir" || { ac_status=$?; continue; }
2001-    # Check for guested configure.
2002-    if test -f "$ac_srcdir/configure.gnu"; then
2003-      echo &&
2004-      $SHELL "$ac_srcdir/configure.gnu" --help=recursive
2005-    elif test -f "$ac_srcdir/configure"; then
2006-      echo &&
2007-      $SHELL "$ac_srcdir/configure" --help=recursive
2008-    else
2009-      $as_echo "$as_me: WARNING: no configuration information is in $ac_dir" >&2
2010-    fi || ac_status=$?
2011-    cd "$ac_pwd" || { ac_status=$?; break; }
2012-  done
2013-fi
2014-
2015-test -n "$ac_init_help" && exit $ac_status
2016-if $ac_init_version; then
2017-  cat <<\_ACEOF
2018-configure
2019-generated by GNU Autoconf 2.69
2020-
2021-Copyright (C) 2012 Free Software Foundation, Inc.
2022-This configure script is free software; the Free Software Foundation
2023-gives unlimited permission to copy, distribute and modify it.
2024-_ACEOF
2025-  exit
2026-fi
2027-
2028-## ------------------------ ##
2029-## Autoconf initialization. ##
2030-## ------------------------ ##
2031-
2032-# ac_fn_c_try_compile LINENO
2033-# --------------------------
2034-# Try to compile conftest.$ac_ext, and return whether this succeeded.
2035-ac_fn_c_try_compile ()
2036-{
2037-  as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
2038-  rm -f conftest.$ac_objext
2039-  if { { ac_try="$ac_compile"
2040-case "(($ac_try" in
2041-  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
2042-  *) ac_try_echo=$ac_try;;
2043-esac
2044-eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
2045-$as_echo "$ac_try_echo"; } >&5
2046-  (eval "$ac_compile") 2>conftest.err
2047-  ac_status=$?
2048-  if test -s conftest.err; then
2049-    grep -v '^ *+' conftest.err >conftest.er1
2050-    cat conftest.er1 >&5
2051-    mv -f conftest.er1 conftest.err
2052-  fi
2053-  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
2054-  test $ac_status = 0; } && {
2055-	 test -z "$ac_c_werror_flag" ||
2056-	 test ! -s conftest.err
2057-       } && test -s conftest.$ac_objext; then :
2058-  ac_retval=0
2059-else
2060-  $as_echo "$as_me: failed program was:" >&5
2061-sed 's/^/| /' conftest.$ac_ext >&5
2062-
2063-	ac_retval=1
2064-fi
2065-  eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno
2066-  as_fn_set_status $ac_retval
2067-
2068-} # ac_fn_c_try_compile
2069-
2070-# ac_fn_c_try_cpp LINENO
2071-# ----------------------
2072-# Try to preprocess conftest.$ac_ext, and return whether this succeeded.
2073-ac_fn_c_try_cpp ()
2074-{
2075-  as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
2076-  if { { ac_try="$ac_cpp conftest.$ac_ext"
2077-case "(($ac_try" in
2078-  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
2079-  *) ac_try_echo=$ac_try;;
2080-esac
2081-eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
2082-$as_echo "$ac_try_echo"; } >&5
2083-  (eval "$ac_cpp conftest.$ac_ext") 2>conftest.err
2084-  ac_status=$?
2085-  if test -s conftest.err; then
2086-    grep -v '^ *+' conftest.err >conftest.er1
2087-    cat conftest.er1 >&5
2088-    mv -f conftest.er1 conftest.err
2089-  fi
2090-  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
2091-  test $ac_status = 0; } > conftest.i && {
2092-	 test -z "$ac_c_preproc_warn_flag$ac_c_werror_flag" ||
2093-	 test ! -s conftest.err
2094-       }; then :
2095-  ac_retval=0
2096-else
2097-  $as_echo "$as_me: failed program was:" >&5
2098-sed 's/^/| /' conftest.$ac_ext >&5
2099-
2100-    ac_retval=1
2101-fi
2102-  eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno
2103-  as_fn_set_status $ac_retval
2104-
2105-} # ac_fn_c_try_cpp
2106-
2107-# ac_fn_cxx_try_compile LINENO
2108-# ----------------------------
2109-# Try to compile conftest.$ac_ext, and return whether this succeeded.
2110-ac_fn_cxx_try_compile ()
2111-{
2112-  as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
2113-  rm -f conftest.$ac_objext
2114-  if { { ac_try="$ac_compile"
2115-case "(($ac_try" in
2116-  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
2117-  *) ac_try_echo=$ac_try;;
2118-esac
2119-eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
2120-$as_echo "$ac_try_echo"; } >&5
2121-  (eval "$ac_compile") 2>conftest.err
2122-  ac_status=$?
2123-  if test -s conftest.err; then
2124-    grep -v '^ *+' conftest.err >conftest.er1
2125-    cat conftest.er1 >&5
2126-    mv -f conftest.er1 conftest.err
2127-  fi
2128-  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
2129-  test $ac_status = 0; } && {
2130-	 test -z "$ac_cxx_werror_flag" ||
2131-	 test ! -s conftest.err
2132-       } && test -s conftest.$ac_objext; then :
2133-  ac_retval=0
2134-else
2135-  $as_echo "$as_me: failed program was:" >&5
2136-sed 's/^/| /' conftest.$ac_ext >&5
2137-
2138-	ac_retval=1
2139-fi
2140-  eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno
2141-  as_fn_set_status $ac_retval
2142-
2143-} # ac_fn_cxx_try_compile
2144-
2145-# ac_fn_c_try_link LINENO
2146-# -----------------------
2147-# Try to link conftest.$ac_ext, and return whether this succeeded.
2148-ac_fn_c_try_link ()
2149-{
2150-  as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
2151-  rm -f conftest.$ac_objext conftest$ac_exeext
2152-  if { { ac_try="$ac_link"
2153-case "(($ac_try" in
2154-  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
2155-  *) ac_try_echo=$ac_try;;
2156-esac
2157-eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
2158-$as_echo "$ac_try_echo"; } >&5
2159-  (eval "$ac_link") 2>conftest.err
2160-  ac_status=$?
2161-  if test -s conftest.err; then
2162-    grep -v '^ *+' conftest.err >conftest.er1
2163-    cat conftest.er1 >&5
2164-    mv -f conftest.er1 conftest.err
2165-  fi
2166-  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
2167-  test $ac_status = 0; } && {
2168-	 test -z "$ac_c_werror_flag" ||
2169-	 test ! -s conftest.err
2170-       } && test -s conftest$ac_exeext && {
2171-	 test "$cross_compiling" = yes ||
2172-	 test -x conftest$ac_exeext
2173-       }; then :
2174-  ac_retval=0
2175-else
2176-  $as_echo "$as_me: failed program was:" >&5
2177-sed 's/^/| /' conftest.$ac_ext >&5
2178-
2179-	ac_retval=1
2180-fi
2181-  # Delete the IPA/IPO (Inter Procedural Analysis/Optimization) information
2182-  # created by the PGI compiler (conftest_ipa8_conftest.oo), as it would
2183-  # interfere with the next link command; also delete a directory that is
2184-  # left behind by Apple's compiler.  We do this before executing the actions.
2185-  rm -rf conftest.dSYM conftest_ipa8_conftest.oo
2186-  eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno
2187-  as_fn_set_status $ac_retval
2188-
2189-} # ac_fn_c_try_link
2190-
2191-# ac_fn_c_try_run LINENO
2192-# ----------------------
2193-# Try to link conftest.$ac_ext, and return whether this succeeded. Assumes
2194-# that executables *can* be run.
2195-ac_fn_c_try_run ()
2196-{
2197-  as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
2198-  if { { ac_try="$ac_link"
2199-case "(($ac_try" in
2200-  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
2201-  *) ac_try_echo=$ac_try;;
2202-esac
2203-eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
2204-$as_echo "$ac_try_echo"; } >&5
2205-  (eval "$ac_link") 2>&5
2206-  ac_status=$?
2207-  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
2208-  test $ac_status = 0; } && { ac_try='./conftest$ac_exeext'
2209-  { { case "(($ac_try" in
2210-  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
2211-  *) ac_try_echo=$ac_try;;
2212-esac
2213-eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
2214-$as_echo "$ac_try_echo"; } >&5
2215-  (eval "$ac_try") 2>&5
2216-  ac_status=$?
2217-  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
2218-  test $ac_status = 0; }; }; then :
2219-  ac_retval=0
2220-else
2221-  $as_echo "$as_me: program exited with status $ac_status" >&5
2222-       $as_echo "$as_me: failed program was:" >&5
2223-sed 's/^/| /' conftest.$ac_ext >&5
2224-
2225-       ac_retval=$ac_status
2226-fi
2227-  rm -rf conftest.dSYM conftest_ipa8_conftest.oo
2228-  eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno
2229-  as_fn_set_status $ac_retval
2230-
2231-} # ac_fn_c_try_run
2232-
2233-# ac_fn_c_check_header_compile LINENO HEADER VAR INCLUDES
2234-# -------------------------------------------------------
2235-# Tests whether HEADER exists and can be compiled using the include files in
2236-# INCLUDES, setting the cache variable VAR accordingly.
2237-ac_fn_c_check_header_compile ()
2238-{
2239-  as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
2240-  { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5
2241-$as_echo_n "checking for $2... " >&6; }
2242-if eval \${$3+:} false; then :
2243-  $as_echo_n "(cached) " >&6
2244-else
2245-  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
2246-/* end confdefs.h.  */
2247-$4
2248-#include <$2>
2249-_ACEOF
2250-if ac_fn_c_try_compile "$LINENO"; then :
2251-  eval "$3=yes"
2252-else
2253-  eval "$3=no"
2254-fi
2255-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
2256-fi
2257-eval ac_res=\$$3
2258-	       { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5
2259-$as_echo "$ac_res" >&6; }
2260-  eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno
2261-
2262-} # ac_fn_c_check_header_compile
2263-
2264-# ac_fn_c_compute_int LINENO EXPR VAR INCLUDES
2265-# --------------------------------------------
2266-# Tries to find the compile-time value of EXPR in a program that includes
2267-# INCLUDES, setting VAR accordingly. Returns whether the value could be
2268-# computed
2269-ac_fn_c_compute_int ()
2270-{
2271-  as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
2272-  if test "$cross_compiling" = yes; then
2273-    # Depending upon the size, compute the lo and hi bounds.
2274-cat confdefs.h - <<_ACEOF >conftest.$ac_ext
2275-/* end confdefs.h.  */
2276-$4
2277-int
2278-main ()
2279-{
2280-static int test_array [1 - 2 * !(($2) >= 0)];
2281-test_array [0] = 0;
2282-return test_array [0];
2283-
2284-  ;
2285-  return 0;
2286-}
2287-_ACEOF
2288-if ac_fn_c_try_compile "$LINENO"; then :
2289-  ac_lo=0 ac_mid=0
2290-  while :; do
2291-    cat confdefs.h - <<_ACEOF >conftest.$ac_ext
2292-/* end confdefs.h.  */
2293-$4
2294-int
2295-main ()
2296-{
2297-static int test_array [1 - 2 * !(($2) <= $ac_mid)];
2298-test_array [0] = 0;
2299-return test_array [0];
2300-
2301-  ;
2302-  return 0;
2303-}
2304-_ACEOF
2305-if ac_fn_c_try_compile "$LINENO"; then :
2306-  ac_hi=$ac_mid; break
2307-else
2308-  as_fn_arith $ac_mid + 1 && ac_lo=$as_val
2309-			if test $ac_lo -le $ac_mid; then
2310-			  ac_lo= ac_hi=
2311-			  break
2312-			fi
2313-			as_fn_arith 2 '*' $ac_mid + 1 && ac_mid=$as_val
2314-fi
2315-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
2316-  done
2317-else
2318-  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
2319-/* end confdefs.h.  */
2320-$4
2321-int
2322-main ()
2323-{
2324-static int test_array [1 - 2 * !(($2) < 0)];
2325-test_array [0] = 0;
2326-return test_array [0];
2327-
2328-  ;
2329-  return 0;
2330-}
2331-_ACEOF
2332-if ac_fn_c_try_compile "$LINENO"; then :
2333-  ac_hi=-1 ac_mid=-1
2334-  while :; do
2335-    cat confdefs.h - <<_ACEOF >conftest.$ac_ext
2336-/* end confdefs.h.  */
2337-$4
2338-int
2339-main ()
2340-{
2341-static int test_array [1 - 2 * !(($2) >= $ac_mid)];
2342-test_array [0] = 0;
2343-return test_array [0];
2344-
2345-  ;
2346-  return 0;
2347-}
2348-_ACEOF
2349-if ac_fn_c_try_compile "$LINENO"; then :
2350-  ac_lo=$ac_mid; break
2351-else
2352-  as_fn_arith '(' $ac_mid ')' - 1 && ac_hi=$as_val
2353-			if test $ac_mid -le $ac_hi; then
2354-			  ac_lo= ac_hi=
2355-			  break
2356-			fi
2357-			as_fn_arith 2 '*' $ac_mid && ac_mid=$as_val
2358-fi
2359-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
2360-  done
2361-else
2362-  ac_lo= ac_hi=
2363-fi
2364-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
2365-fi
2366-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
2367-# Binary search between lo and hi bounds.
2368-while test "x$ac_lo" != "x$ac_hi"; do
2369-  as_fn_arith '(' $ac_hi - $ac_lo ')' / 2 + $ac_lo && ac_mid=$as_val
2370-  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
2371-/* end confdefs.h.  */
2372-$4
2373-int
2374-main ()
2375-{
2376-static int test_array [1 - 2 * !(($2) <= $ac_mid)];
2377-test_array [0] = 0;
2378-return test_array [0];
2379-
2380-  ;
2381-  return 0;
2382-}
2383-_ACEOF
2384-if ac_fn_c_try_compile "$LINENO"; then :
2385-  ac_hi=$ac_mid
2386-else
2387-  as_fn_arith '(' $ac_mid ')' + 1 && ac_lo=$as_val
2388-fi
2389-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
2390-done
2391-case $ac_lo in #((
2392-?*) eval "$3=\$ac_lo"; ac_retval=0 ;;
2393-'') ac_retval=1 ;;
2394-esac
2395-  else
2396-    cat confdefs.h - <<_ACEOF >conftest.$ac_ext
2397-/* end confdefs.h.  */
2398-$4
2399-static long int longval () { return $2; }
2400-static unsigned long int ulongval () { return $2; }
2401-#include <stdio.h>
2402-#include <stdlib.h>
2403-int
2404-main ()
2405-{
2406-
2407-  FILE *f = fopen ("conftest.val", "w");
2408-  if (! f)
2409-    return 1;
2410-  if (($2) < 0)
2411-    {
2412-      long int i = longval ();
2413-      if (i != ($2))
2414-	return 1;
2415-      fprintf (f, "%ld", i);
2416-    }
2417-  else
2418-    {
2419-      unsigned long int i = ulongval ();
2420-      if (i != ($2))
2421-	return 1;
2422-      fprintf (f, "%lu", i);
2423-    }
2424-  /* Do not output a trailing newline, as this causes \r\n confusion
2425-     on some platforms.  */
2426-  return ferror (f) || fclose (f) != 0;
2427-
2428-  ;
2429-  return 0;
2430-}
2431-_ACEOF
2432-if ac_fn_c_try_run "$LINENO"; then :
2433-  echo >>conftest.val; read $3 <conftest.val; ac_retval=0
2434-else
2435-  ac_retval=1
2436-fi
2437-rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \
2438-  conftest.$ac_objext conftest.beam conftest.$ac_ext
2439-rm -f conftest.val
2440-
2441-  fi
2442-  eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno
2443-  as_fn_set_status $ac_retval
2444-
2445-} # ac_fn_c_compute_int
2446-
2447-# ac_fn_c_check_header_mongrel LINENO HEADER VAR INCLUDES
2448-# -------------------------------------------------------
2449-# Tests whether HEADER exists, giving a warning if it cannot be compiled using
2450-# the include files in INCLUDES and setting the cache variable VAR
2451-# accordingly.
2452-ac_fn_c_check_header_mongrel ()
2453-{
2454-  as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
2455-  if eval \${$3+:} false; then :
2456-  { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5
2457-$as_echo_n "checking for $2... " >&6; }
2458-if eval \${$3+:} false; then :
2459-  $as_echo_n "(cached) " >&6
2460-fi
2461-eval ac_res=\$$3
2462-	       { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5
2463-$as_echo "$ac_res" >&6; }
2464-else
2465-  # Is the header compilable?
2466-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking $2 usability" >&5
2467-$as_echo_n "checking $2 usability... " >&6; }
2468-cat confdefs.h - <<_ACEOF >conftest.$ac_ext
2469-/* end confdefs.h.  */
2470-$4
2471-#include <$2>
2472-_ACEOF
2473-if ac_fn_c_try_compile "$LINENO"; then :
2474-  ac_header_compiler=yes
2475-else
2476-  ac_header_compiler=no
2477-fi
2478-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
2479-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_header_compiler" >&5
2480-$as_echo "$ac_header_compiler" >&6; }
2481-
2482-# Is the header present?
2483-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking $2 presence" >&5
2484-$as_echo_n "checking $2 presence... " >&6; }
2485-cat confdefs.h - <<_ACEOF >conftest.$ac_ext
2486-/* end confdefs.h.  */
2487-#include <$2>
2488-_ACEOF
2489-if ac_fn_c_try_cpp "$LINENO"; then :
2490-  ac_header_preproc=yes
2491-else
2492-  ac_header_preproc=no
2493-fi
2494-rm -f conftest.err conftest.i conftest.$ac_ext
2495-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_header_preproc" >&5
2496-$as_echo "$ac_header_preproc" >&6; }
2497-
2498-# So?  What about this header?
2499-case $ac_header_compiler:$ac_header_preproc:$ac_c_preproc_warn_flag in #((
2500-  yes:no: )
2501-    { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: accepted by the compiler, rejected by the preprocessor!" >&5
2502-$as_echo "$as_me: WARNING: $2: accepted by the compiler, rejected by the preprocessor!" >&2;}
2503-    { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: proceeding with the compiler's result" >&5
2504-$as_echo "$as_me: WARNING: $2: proceeding with the compiler's result" >&2;}
2505-    ;;
2506-  no:yes:* )
2507-    { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: present but cannot be compiled" >&5
2508-$as_echo "$as_me: WARNING: $2: present but cannot be compiled" >&2;}
2509-    { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2:     check for missing prerequisite headers?" >&5
2510-$as_echo "$as_me: WARNING: $2:     check for missing prerequisite headers?" >&2;}
2511-    { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: see the Autoconf documentation" >&5
2512-$as_echo "$as_me: WARNING: $2: see the Autoconf documentation" >&2;}
2513-    { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2:     section \"Present But Cannot Be Compiled\"" >&5
2514-$as_echo "$as_me: WARNING: $2:     section \"Present But Cannot Be Compiled\"" >&2;}
2515-    { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: proceeding with the compiler's result" >&5
2516-$as_echo "$as_me: WARNING: $2: proceeding with the compiler's result" >&2;}
2517-    ;;
2518-esac
2519-  { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5
2520-$as_echo_n "checking for $2... " >&6; }
2521-if eval \${$3+:} false; then :
2522-  $as_echo_n "(cached) " >&6
2523-else
2524-  eval "$3=\$ac_header_compiler"
2525-fi
2526-eval ac_res=\$$3
2527-	       { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5
2528-$as_echo "$ac_res" >&6; }
2529-fi
2530-  eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno
2531-
2532-} # ac_fn_c_check_header_mongrel
2533-
2534-# ac_fn_c_check_func LINENO FUNC VAR
2535-# ----------------------------------
2536-# Tests whether FUNC exists, setting the cache variable VAR accordingly
2537-ac_fn_c_check_func ()
2538-{
2539-  as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
2540-  { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5
2541-$as_echo_n "checking for $2... " >&6; }
2542-if eval \${$3+:} false; then :
2543-  $as_echo_n "(cached) " >&6
2544-else
2545-  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
2546-/* end confdefs.h.  */
2547-/* Define $2 to an innocuous variant, in case <limits.h> declares $2.
2548-   For example, HP-UX 11i <limits.h> declares gettimeofday.  */
2549-#define $2 innocuous_$2
2550-
2551-/* System header to define __stub macros and hopefully few prototypes,
2552-    which can conflict with char $2 (); below.
2553-    Prefer <limits.h> to <assert.h> if __STDC__ is defined, since
2554-    <limits.h> exists even on freestanding compilers.  */
2555-
2556-#ifdef __STDC__
2557-# include <limits.h>
2558-#else
2559-# include <assert.h>
2560-#endif
2561-
2562-#undef $2
2563-
2564-/* Override any GCC internal prototype to avoid an error.
2565-   Use char because int might match the return type of a GCC
2566-   builtin and then its argument prototype would still apply.  */
2567-#ifdef __cplusplus
2568-extern "C"
2569-#endif
2570-char $2 ();
2571-/* The GNU C library defines this for functions which it implements
2572-    to always fail with ENOSYS.  Some functions are actually named
2573-    something starting with __ and the normal name is an alias.  */
2574-#if defined __stub_$2 || defined __stub___$2
2575-choke me
2576-#endif
2577-
2578-int
2579-main ()
2580-{
2581-return $2 ();
2582-  ;
2583-  return 0;
2584-}
2585-_ACEOF
2586-if ac_fn_c_try_link "$LINENO"; then :
2587-  eval "$3=yes"
2588-else
2589-  eval "$3=no"
2590-fi
2591-rm -f core conftest.err conftest.$ac_objext \
2592-    conftest$ac_exeext conftest.$ac_ext
2593-fi
2594-eval ac_res=\$$3
2595-	       { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5
2596-$as_echo "$ac_res" >&6; }
2597-  eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno
2598-
2599-} # ac_fn_c_check_func
2600-
2601-# ac_fn_c_check_type LINENO TYPE VAR INCLUDES
2602-# -------------------------------------------
2603-# Tests whether TYPE exists after having included INCLUDES, setting cache
2604-# variable VAR accordingly.
2605-ac_fn_c_check_type ()
2606-{
2607-  as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
2608-  { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5
2609-$as_echo_n "checking for $2... " >&6; }
2610-if eval \${$3+:} false; then :
2611-  $as_echo_n "(cached) " >&6
2612-else
2613-  eval "$3=no"
2614-  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
2615-/* end confdefs.h.  */
2616-$4
2617-int
2618-main ()
2619-{
2620-if (sizeof ($2))
2621-	 return 0;
2622-  ;
2623-  return 0;
2624-}
2625-_ACEOF
2626-if ac_fn_c_try_compile "$LINENO"; then :
2627-  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
2628-/* end confdefs.h.  */
2629-$4
2630-int
2631-main ()
2632-{
2633-if (sizeof (($2)))
2634-	    return 0;
2635-  ;
2636-  return 0;
2637-}
2638-_ACEOF
2639-if ac_fn_c_try_compile "$LINENO"; then :
2640-
2641-else
2642-  eval "$3=yes"
2643-fi
2644-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
2645-fi
2646-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
2647-fi
2648-eval ac_res=\$$3
2649-	       { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5
2650-$as_echo "$ac_res" >&6; }
2651-  eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno
2652-
2653-} # ac_fn_c_check_type
2654-cat >config.log <<_ACEOF
2655-This file contains any messages produced by compilers while
2656-running configure, to aid debugging if configure makes a mistake.
2657-
2658-It was created by $as_me, which was
2659-generated by GNU Autoconf 2.69.  Invocation command line was
2660-
2661-  $ $0 $@
2662-
2663-_ACEOF
2664-exec 5>>config.log
2665-{
2666-cat <<_ASUNAME
2667-## --------- ##
2668-## Platform. ##
2669-## --------- ##
2670-
2671-hostname = `(hostname || uname -n) 2>/dev/null | sed 1q`
2672-uname -m = `(uname -m) 2>/dev/null || echo unknown`
2673-uname -r = `(uname -r) 2>/dev/null || echo unknown`
2674-uname -s = `(uname -s) 2>/dev/null || echo unknown`
2675-uname -v = `(uname -v) 2>/dev/null || echo unknown`
2676-
2677-/usr/bin/uname -p = `(/usr/bin/uname -p) 2>/dev/null || echo unknown`
2678-/bin/uname -X     = `(/bin/uname -X) 2>/dev/null     || echo unknown`
2679-
2680-/bin/arch              = `(/bin/arch) 2>/dev/null              || echo unknown`
2681-/usr/bin/arch -k       = `(/usr/bin/arch -k) 2>/dev/null       || echo unknown`
2682-/usr/convex/getsysinfo = `(/usr/convex/getsysinfo) 2>/dev/null || echo unknown`
2683-/usr/bin/hostinfo      = `(/usr/bin/hostinfo) 2>/dev/null      || echo unknown`
2684-/bin/machine           = `(/bin/machine) 2>/dev/null           || echo unknown`
2685-/usr/bin/oslevel       = `(/usr/bin/oslevel) 2>/dev/null       || echo unknown`
2686-/bin/universe          = `(/bin/universe) 2>/dev/null          || echo unknown`
2687-
2688-_ASUNAME
2689-
2690-as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
2691-for as_dir in $PATH
2692-do
2693-  IFS=$as_save_IFS
2694-  test -z "$as_dir" && as_dir=.
2695-    $as_echo "PATH: $as_dir"
2696-  done
2697-IFS=$as_save_IFS
2698-
2699-} >&5
2700-
2701-cat >&5 <<_ACEOF
2702-
2703-
2704-## ----------- ##
2705-## Core tests. ##
2706-## ----------- ##
2707-
2708-_ACEOF
2709-
2710-
2711-# Keep a trace of the command line.
2712-# Strip out --no-create and --no-recursion so they do not pile up.
2713-# Strip out --silent because we don't want to record it for future runs.
2714-# Also quote any args containing shell meta-characters.
2715-# Make two passes to allow for proper duplicate-argument suppression.
2716-ac_configure_args=
2717-ac_configure_args0=
2718-ac_configure_args1=
2719-ac_must_keep_next=false
2720-for ac_pass in 1 2
2721-do
2722-  for ac_arg
2723-  do
2724-    case $ac_arg in
2725-    -no-create | --no-c* | -n | -no-recursion | --no-r*) continue ;;
2726-    -q | -quiet | --quiet | --quie | --qui | --qu | --q \
2727-    | -silent | --silent | --silen | --sile | --sil)
2728-      continue ;;
2729-    *\'*)
2730-      ac_arg=`$as_echo "$ac_arg" | sed "s/'/'\\\\\\\\''/g"` ;;
2731-    esac
2732-    case $ac_pass in
2733-    1) as_fn_append ac_configure_args0 " '$ac_arg'" ;;
2734-    2)
2735-      as_fn_append ac_configure_args1 " '$ac_arg'"
2736-      if test $ac_must_keep_next = true; then
2737-	ac_must_keep_next=false # Got value, back to normal.
2738-      else
2739-	case $ac_arg in
2740-	  *=* | --config-cache | -C | -disable-* | --disable-* \
2741-	  | -enable-* | --enable-* | -gas | --g* | -nfp | --nf* \
2742-	  | -q | -quiet | --q* | -silent | --sil* | -v | -verb* \
2743-	  | -with-* | --with-* | -without-* | --without-* | --x)
2744-	    case "$ac_configure_args0 " in
2745-	      "$ac_configure_args1"*" '$ac_arg' "* ) continue ;;
2746-	    esac
2747-	    ;;
2748-	  -* ) ac_must_keep_next=true ;;
2749-	esac
2750-      fi
2751-      as_fn_append ac_configure_args " '$ac_arg'"
2752-      ;;
2753-    esac
2754-  done
2755-done
2756-{ ac_configure_args0=; unset ac_configure_args0;}
2757-{ ac_configure_args1=; unset ac_configure_args1;}
2758-
2759-# When interrupted or exit'd, cleanup temporary files, and complete
2760-# config.log.  We remove comments because anyway the quotes in there
2761-# would cause problems or look ugly.
2762-# WARNING: Use '\'' to represent an apostrophe within the trap.
2763-# WARNING: Do not start the trap code with a newline, due to a FreeBSD 4.0 bug.
2764-trap 'exit_status=$?
2765-  # Save into config.log some information that might help in debugging.
2766-  {
2767-    echo
2768-
2769-    $as_echo "## ---------------- ##
2770-## Cache variables. ##
2771-## ---------------- ##"
2772-    echo
2773-    # The following way of writing the cache mishandles newlines in values,
2774-(
2775-  for ac_var in `(set) 2>&1 | sed -n '\''s/^\([a-zA-Z_][a-zA-Z0-9_]*\)=.*/\1/p'\''`; do
2776-    eval ac_val=\$$ac_var
2777-    case $ac_val in #(
2778-    *${as_nl}*)
2779-      case $ac_var in #(
2780-      *_cv_*) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: cache variable $ac_var contains a newline" >&5
2781-$as_echo "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} ;;
2782-      esac
2783-      case $ac_var in #(
2784-      _ | IFS | as_nl) ;; #(
2785-      BASH_ARGV | BASH_SOURCE) eval $ac_var= ;; #(
2786-      *) { eval $ac_var=; unset $ac_var;} ;;
2787-      esac ;;
2788-    esac
2789-  done
2790-  (set) 2>&1 |
2791-    case $as_nl`(ac_space='\'' '\''; set) 2>&1` in #(
2792-    *${as_nl}ac_space=\ *)
2793-      sed -n \
2794-	"s/'\''/'\''\\\\'\'''\''/g;
2795-	  s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='\''\\2'\''/p"
2796-      ;; #(
2797-    *)
2798-      sed -n "/^[_$as_cr_alnum]*_cv_[_$as_cr_alnum]*=/p"
2799-      ;;
2800-    esac |
2801-    sort
2802-)
2803-    echo
2804-
2805-    $as_echo "## ----------------- ##
2806-## Output variables. ##
2807-## ----------------- ##"
2808-    echo
2809-    for ac_var in $ac_subst_vars
2810-    do
2811-      eval ac_val=\$$ac_var
2812-      case $ac_val in
2813-      *\'\''*) ac_val=`$as_echo "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;;
2814-      esac
2815-      $as_echo "$ac_var='\''$ac_val'\''"
2816-    done | sort
2817-    echo
2818-
2819-    if test -n "$ac_subst_files"; then
2820-      $as_echo "## ------------------- ##
2821-## File substitutions. ##
2822-## ------------------- ##"
2823-      echo
2824-      for ac_var in $ac_subst_files
2825-      do
2826-	eval ac_val=\$$ac_var
2827-	case $ac_val in
2828-	*\'\''*) ac_val=`$as_echo "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;;
2829-	esac
2830-	$as_echo "$ac_var='\''$ac_val'\''"
2831-      done | sort
2832-      echo
2833-    fi
2834-
2835-    if test -s confdefs.h; then
2836-      $as_echo "## ----------- ##
2837-## confdefs.h. ##
2838-## ----------- ##"
2839-      echo
2840-      cat confdefs.h
2841-      echo
2842-    fi
2843-    test "$ac_signal" != 0 &&
2844-      $as_echo "$as_me: caught signal $ac_signal"
2845-    $as_echo "$as_me: exit $exit_status"
2846-  } >&5
2847-  rm -f core *.core core.conftest.* &&
2848-    rm -f -r conftest* confdefs* conf$$* $ac_clean_files &&
2849-    exit $exit_status
2850-' 0
2851-for ac_signal in 1 2 13 15; do
2852-  trap 'ac_signal='$ac_signal'; as_fn_exit 1' $ac_signal
2853-done
2854-ac_signal=0
2855-
2856-# confdefs.h avoids OS command line length limits that DEFS can exceed.
2857-rm -f -r conftest* confdefs.h
2858-
2859-$as_echo "/* confdefs.h */" > confdefs.h
2860-
2861-# Predefined preprocessor variables.
2862-
2863-cat >>confdefs.h <<_ACEOF
2864-#define PACKAGE_NAME "$PACKAGE_NAME"
2865-_ACEOF
2866-
2867-cat >>confdefs.h <<_ACEOF
2868-#define PACKAGE_TARNAME "$PACKAGE_TARNAME"
2869-_ACEOF
2870-
2871-cat >>confdefs.h <<_ACEOF
2872-#define PACKAGE_VERSION "$PACKAGE_VERSION"
2873-_ACEOF
2874-
2875-cat >>confdefs.h <<_ACEOF
2876-#define PACKAGE_STRING "$PACKAGE_STRING"
2877-_ACEOF
2878-
2879-cat >>confdefs.h <<_ACEOF
2880-#define PACKAGE_BUGREPORT "$PACKAGE_BUGREPORT"
2881-_ACEOF
2882-
2883-cat >>confdefs.h <<_ACEOF
2884-#define PACKAGE_URL "$PACKAGE_URL"
2885-_ACEOF
2886-
2887-
2888-# Let the site file select an alternate cache file if it wants to.
2889-# Prefer an explicitly selected file to automatically selected ones.
2890-ac_site_file1=NONE
2891-ac_site_file2=NONE
2892-if test -n "$CONFIG_SITE"; then
2893-  # We do not want a PATH search for config.site.
2894-  case $CONFIG_SITE in #((
2895-    -*)  ac_site_file1=./$CONFIG_SITE;;
2896-    */*) ac_site_file1=$CONFIG_SITE;;
2897-    *)   ac_site_file1=./$CONFIG_SITE;;
2898-  esac
2899-elif test "x$prefix" != xNONE; then
2900-  ac_site_file1=$prefix/share/config.site
2901-  ac_site_file2=$prefix/etc/config.site
2902-else
2903-  ac_site_file1=$ac_default_prefix/share/config.site
2904-  ac_site_file2=$ac_default_prefix/etc/config.site
2905-fi
2906-for ac_site_file in "$ac_site_file1" "$ac_site_file2"
2907-do
2908-  test "x$ac_site_file" = xNONE && continue
2909-  if test /dev/null != "$ac_site_file" && test -r "$ac_site_file"; then
2910-    { $as_echo "$as_me:${as_lineno-$LINENO}: loading site script $ac_site_file" >&5
2911-$as_echo "$as_me: loading site script $ac_site_file" >&6;}
2912-    sed 's/^/| /' "$ac_site_file" >&5
2913-    . "$ac_site_file" \
2914-      || { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
2915-$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
2916-as_fn_error $? "failed to load site script $ac_site_file
2917-See \`config.log' for more details" "$LINENO" 5; }
2918-  fi
2919-done
2920-
2921-if test -r "$cache_file"; then
2922-  # Some versions of bash will fail to source /dev/null (special files
2923-  # actually), so we avoid doing that.  DJGPP emulates it as a regular file.
2924-  if test /dev/null != "$cache_file" && test -f "$cache_file"; then
2925-    { $as_echo "$as_me:${as_lineno-$LINENO}: loading cache $cache_file" >&5
2926-$as_echo "$as_me: loading cache $cache_file" >&6;}
2927-    case $cache_file in
2928-      [\\/]* | ?:[\\/]* ) . "$cache_file";;
2929-      *)                      . "./$cache_file";;
2930-    esac
2931-  fi
2932-else
2933-  { $as_echo "$as_me:${as_lineno-$LINENO}: creating cache $cache_file" >&5
2934-$as_echo "$as_me: creating cache $cache_file" >&6;}
2935-  >$cache_file
2936-fi
2937-
2938-# Check that the precious variables saved in the cache have kept the same
2939-# value.
2940-ac_cache_corrupted=false
2941-for ac_var in $ac_precious_vars; do
2942-  eval ac_old_set=\$ac_cv_env_${ac_var}_set
2943-  eval ac_new_set=\$ac_env_${ac_var}_set
2944-  eval ac_old_val=\$ac_cv_env_${ac_var}_value
2945-  eval ac_new_val=\$ac_env_${ac_var}_value
2946-  case $ac_old_set,$ac_new_set in
2947-    set,)
2948-      { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&5
2949-$as_echo "$as_me: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&2;}
2950-      ac_cache_corrupted=: ;;
2951-    ,set)
2952-      { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' was not set in the previous run" >&5
2953-$as_echo "$as_me: error: \`$ac_var' was not set in the previous run" >&2;}
2954-      ac_cache_corrupted=: ;;
2955-    ,);;
2956-    *)
2957-      if test "x$ac_old_val" != "x$ac_new_val"; then
2958-	# differences in whitespace do not lead to failure.
2959-	ac_old_val_w=`echo x $ac_old_val`
2960-	ac_new_val_w=`echo x $ac_new_val`
2961-	if test "$ac_old_val_w" != "$ac_new_val_w"; then
2962-	  { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' has changed since the previous run:" >&5
2963-$as_echo "$as_me: error: \`$ac_var' has changed since the previous run:" >&2;}
2964-	  ac_cache_corrupted=:
2965-	else
2966-	  { $as_echo "$as_me:${as_lineno-$LINENO}: warning: ignoring whitespace changes in \`$ac_var' since the previous run:" >&5
2967-$as_echo "$as_me: warning: ignoring whitespace changes in \`$ac_var' since the previous run:" >&2;}
2968-	  eval $ac_var=\$ac_old_val
2969-	fi
2970-	{ $as_echo "$as_me:${as_lineno-$LINENO}:   former value:  \`$ac_old_val'" >&5
2971-$as_echo "$as_me:   former value:  \`$ac_old_val'" >&2;}
2972-	{ $as_echo "$as_me:${as_lineno-$LINENO}:   current value: \`$ac_new_val'" >&5
2973-$as_echo "$as_me:   current value: \`$ac_new_val'" >&2;}
2974-      fi;;
2975-  esac
2976-  # Pass precious variables to config.status.
2977-  if test "$ac_new_set" = set; then
2978-    case $ac_new_val in
2979-    *\'*) ac_arg=$ac_var=`$as_echo "$ac_new_val" | sed "s/'/'\\\\\\\\''/g"` ;;
2980-    *) ac_arg=$ac_var=$ac_new_val ;;
2981-    esac
2982-    case " $ac_configure_args " in
2983-      *" '$ac_arg' "*) ;; # Avoid dups.  Use of quotes ensures accuracy.
2984-      *) as_fn_append ac_configure_args " '$ac_arg'" ;;
2985-    esac
2986-  fi
2987-done
2988-if $ac_cache_corrupted; then
2989-  { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
2990-$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
2991-  { $as_echo "$as_me:${as_lineno-$LINENO}: error: changes in the environment can compromise the build" >&5
2992-$as_echo "$as_me: error: changes in the environment can compromise the build" >&2;}
2993-  as_fn_error $? "run \`make distclean' and/or \`rm $cache_file' and start over" "$LINENO" 5
2994-fi
2995-## -------------------- ##
2996-## Main body of script. ##
2997-## -------------------- ##
2998-
2999-ac_ext=c
3000-ac_cpp='$CPP $CPPFLAGS'
3001-ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
3002-ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
3003-ac_compiler_gnu=$ac_cv_c_compiler_gnu
3004-
3005-
3006-
3007-ac_aux_dir=
3008-for ac_dir in build-aux "$srcdir"/build-aux; do
3009-  if test -f "$ac_dir/install-sh"; then
3010-    ac_aux_dir=$ac_dir
3011-    ac_install_sh="$ac_aux_dir/install-sh -c"
3012-    break
3013-  elif test -f "$ac_dir/install.sh"; then
3014-    ac_aux_dir=$ac_dir
3015-    ac_install_sh="$ac_aux_dir/install.sh -c"
3016-    break
3017-  elif test -f "$ac_dir/shtool"; then
3018-    ac_aux_dir=$ac_dir
3019-    ac_install_sh="$ac_aux_dir/shtool install -c"
3020-    break
3021-  fi
3022-done
3023-if test -z "$ac_aux_dir"; then
3024-  as_fn_error $? "cannot find install-sh, install.sh, or shtool in build-aux \"$srcdir\"/build-aux" "$LINENO" 5
3025-fi
3026-
3027-# These three variables are undocumented and unsupported,
3028-# and are intended to be withdrawn in a future Autoconf release.
3029-# They can cause serious problems if a builder's source tree is in a directory
3030-# whose full name contains unusual characters.
3031-ac_config_guess="$SHELL $ac_aux_dir/config.guess"  # Please don't use this var.
3032-ac_config_sub="$SHELL $ac_aux_dir/config.sub"  # Please don't use this var.
3033-ac_configure="$SHELL $ac_aux_dir/configure"  # Please don't use this var.
3034-
3035-
3036-
3037-
3038-
3039-
3040-
3041-
3042-CONFIGURE_CFLAGS=
3043-SPECIFIED_CFLAGS="${CFLAGS}"
3044-
3045-
3046-
3047-
3048-
3049-CONFIGURE_CXXFLAGS=
3050-SPECIFIED_CXXFLAGS="${CXXFLAGS}"
3051-
3052-
3053-
3054-
3055-
3056-CONFIG=`echo ${ac_configure_args} | sed -e 's#'"'"'\([^ ]*\)'"'"'#\1#g'`
3057-
3058-
3059-rev=2
3060-
3061-
3062-srcroot=$srcdir
3063-if test "x${srcroot}" = "x." ; then
3064-  srcroot=""
3065-else
3066-  srcroot="${srcroot}/"
3067-fi
3068-
3069-abs_srcroot="`cd \"${srcdir}\"; pwd`/"
3070-
3071-
3072-objroot=""
3073-
3074-abs_objroot="`pwd`/"
3075-
3076-
3077-case "$prefix" in
3078-   *\ * ) as_fn_error $? "Prefix should not contain spaces" "$LINENO" 5 ;;
3079-   "NONE" ) prefix="/usr/local" ;;
3080-esac
3081-case "$exec_prefix" in
3082-   *\ * ) as_fn_error $? "Exec prefix should not contain spaces" "$LINENO" 5 ;;
3083-   "NONE" ) exec_prefix=$prefix ;;
3084-esac
3085-PREFIX=$prefix
3086-
3087-BINDIR=`eval echo $bindir`
3088-BINDIR=`eval echo $BINDIR`
3089-
3090-INCLUDEDIR=`eval echo $includedir`
3091-INCLUDEDIR=`eval echo $INCLUDEDIR`
3092-
3093-LIBDIR=`eval echo $libdir`
3094-LIBDIR=`eval echo $LIBDIR`
3095-
3096-DATADIR=`eval echo $datadir`
3097-DATADIR=`eval echo $DATADIR`
3098-
3099-MANDIR=`eval echo $mandir`
3100-MANDIR=`eval echo $MANDIR`
3101-
3102-
3103-# Extract the first word of "xsltproc", so it can be a program name with args.
3104-set dummy xsltproc; ac_word=$2
3105-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
3106-$as_echo_n "checking for $ac_word... " >&6; }
3107-if ${ac_cv_path_XSLTPROC+:} false; then :
3108-  $as_echo_n "(cached) " >&6
3109-else
3110-  case $XSLTPROC in
3111-  [\\/]* | ?:[\\/]*)
3112-  ac_cv_path_XSLTPROC="$XSLTPROC" # Let the user override the test with a path.
3113-  ;;
3114-  *)
3115-  as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
3116-for as_dir in $PATH
3117-do
3118-  IFS=$as_save_IFS
3119-  test -z "$as_dir" && as_dir=.
3120-    for ac_exec_ext in '' $ac_executable_extensions; do
3121-  if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
3122-    ac_cv_path_XSLTPROC="$as_dir/$ac_word$ac_exec_ext"
3123-    $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
3124-    break 2
3125-  fi
3126-done
3127-  done
3128-IFS=$as_save_IFS
3129-
3130-  test -z "$ac_cv_path_XSLTPROC" && ac_cv_path_XSLTPROC="false"
3131-  ;;
3132-esac
3133-fi
3134-XSLTPROC=$ac_cv_path_XSLTPROC
3135-if test -n "$XSLTPROC"; then
3136-  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $XSLTPROC" >&5
3137-$as_echo "$XSLTPROC" >&6; }
3138-else
3139-  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
3140-$as_echo "no" >&6; }
3141-fi
3142-
3143-
3144-if test -d "/usr/share/xml/docbook/stylesheet/docbook-xsl" ; then
3145-  DEFAULT_XSLROOT="/usr/share/xml/docbook/stylesheet/docbook-xsl"
3146-elif test -d "/usr/share/sgml/docbook/xsl-stylesheets" ; then
3147-  DEFAULT_XSLROOT="/usr/share/sgml/docbook/xsl-stylesheets"
3148-else
3149-    DEFAULT_XSLROOT=""
3150-fi
3151-
3152-# Check whether --with-xslroot was given.
3153-if test "${with_xslroot+set}" = set; then :
3154-  withval=$with_xslroot;
3155-if test "x$with_xslroot" = "xno" ; then
3156-  XSLROOT="${DEFAULT_XSLROOT}"
3157-else
3158-  XSLROOT="${with_xslroot}"
3159-fi
3160-
3161-else
3162-  XSLROOT="${DEFAULT_XSLROOT}"
3163-
3164-fi
3165-
3166-if test "x$XSLTPROC" = "xfalse" ; then
3167-  XSLROOT=""
3168-fi
3169-
3170-
3171-CFLAGS=$CFLAGS
3172-ac_ext=c
3173-ac_cpp='$CPP $CPPFLAGS'
3174-ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
3175-ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
3176-ac_compiler_gnu=$ac_cv_c_compiler_gnu
3177-if test -n "$ac_tool_prefix"; then
3178-  # Extract the first word of "${ac_tool_prefix}gcc", so it can be a program name with args.
3179-set dummy ${ac_tool_prefix}gcc; ac_word=$2
3180-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
3181-$as_echo_n "checking for $ac_word... " >&6; }
3182-if ${ac_cv_prog_CC+:} false; then :
3183-  $as_echo_n "(cached) " >&6
3184-else
3185-  if test -n "$CC"; then
3186-  ac_cv_prog_CC="$CC" # Let the user override the test.
3187-else
3188-as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
3189-for as_dir in $PATH
3190-do
3191-  IFS=$as_save_IFS
3192-  test -z "$as_dir" && as_dir=.
3193-    for ac_exec_ext in '' $ac_executable_extensions; do
3194-  if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
3195-    ac_cv_prog_CC="${ac_tool_prefix}gcc"
3196-    $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
3197-    break 2
3198-  fi
3199-done
3200-  done
3201-IFS=$as_save_IFS
3202-
3203-fi
3204-fi
3205-CC=$ac_cv_prog_CC
3206-if test -n "$CC"; then
3207-  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5
3208-$as_echo "$CC" >&6; }
3209-else
3210-  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
3211-$as_echo "no" >&6; }
3212-fi
3213-
3214-
3215-fi
3216-if test -z "$ac_cv_prog_CC"; then
3217-  ac_ct_CC=$CC
3218-  # Extract the first word of "gcc", so it can be a program name with args.
3219-set dummy gcc; ac_word=$2
3220-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
3221-$as_echo_n "checking for $ac_word... " >&6; }
3222-if ${ac_cv_prog_ac_ct_CC+:} false; then :
3223-  $as_echo_n "(cached) " >&6
3224-else
3225-  if test -n "$ac_ct_CC"; then
3226-  ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test.
3227-else
3228-as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
3229-for as_dir in $PATH
3230-do
3231-  IFS=$as_save_IFS
3232-  test -z "$as_dir" && as_dir=.
3233-    for ac_exec_ext in '' $ac_executable_extensions; do
3234-  if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
3235-    ac_cv_prog_ac_ct_CC="gcc"
3236-    $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
3237-    break 2
3238-  fi
3239-done
3240-  done
3241-IFS=$as_save_IFS
3242-
3243-fi
3244-fi
3245-ac_ct_CC=$ac_cv_prog_ac_ct_CC
3246-if test -n "$ac_ct_CC"; then
3247-  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5
3248-$as_echo "$ac_ct_CC" >&6; }
3249-else
3250-  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
3251-$as_echo "no" >&6; }
3252-fi
3253-
3254-  if test "x$ac_ct_CC" = x; then
3255-    CC=""
3256-  else
3257-    case $cross_compiling:$ac_tool_warned in
3258-yes:)
3259-{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
3260-$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
3261-ac_tool_warned=yes ;;
3262-esac
3263-    CC=$ac_ct_CC
3264-  fi
3265-else
3266-  CC="$ac_cv_prog_CC"
3267-fi
3268-
3269-if test -z "$CC"; then
3270-          if test -n "$ac_tool_prefix"; then
3271-    # Extract the first word of "${ac_tool_prefix}cc", so it can be a program name with args.
3272-set dummy ${ac_tool_prefix}cc; ac_word=$2
3273-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
3274-$as_echo_n "checking for $ac_word... " >&6; }
3275-if ${ac_cv_prog_CC+:} false; then :
3276-  $as_echo_n "(cached) " >&6
3277-else
3278-  if test -n "$CC"; then
3279-  ac_cv_prog_CC="$CC" # Let the user override the test.
3280-else
3281-as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
3282-for as_dir in $PATH
3283-do
3284-  IFS=$as_save_IFS
3285-  test -z "$as_dir" && as_dir=.
3286-    for ac_exec_ext in '' $ac_executable_extensions; do
3287-  if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
3288-    ac_cv_prog_CC="${ac_tool_prefix}cc"
3289-    $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
3290-    break 2
3291-  fi
3292-done
3293-  done
3294-IFS=$as_save_IFS
3295-
3296-fi
3297-fi
3298-CC=$ac_cv_prog_CC
3299-if test -n "$CC"; then
3300-  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5
3301-$as_echo "$CC" >&6; }
3302-else
3303-  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
3304-$as_echo "no" >&6; }
3305-fi
3306-
3307-
3308-  fi
3309-fi
3310-if test -z "$CC"; then
3311-  # Extract the first word of "cc", so it can be a program name with args.
3312-set dummy cc; ac_word=$2
3313-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
3314-$as_echo_n "checking for $ac_word... " >&6; }
3315-if ${ac_cv_prog_CC+:} false; then :
3316-  $as_echo_n "(cached) " >&6
3317-else
3318-  if test -n "$CC"; then
3319-  ac_cv_prog_CC="$CC" # Let the user override the test.
3320-else
3321-  ac_prog_rejected=no
3322-as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
3323-for as_dir in $PATH
3324-do
3325-  IFS=$as_save_IFS
3326-  test -z "$as_dir" && as_dir=.
3327-    for ac_exec_ext in '' $ac_executable_extensions; do
3328-  if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
3329-    if test "$as_dir/$ac_word$ac_exec_ext" = "/usr/ucb/cc"; then
3330-       ac_prog_rejected=yes
3331-       continue
3332-     fi
3333-    ac_cv_prog_CC="cc"
3334-    $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
3335-    break 2
3336-  fi
3337-done
3338-  done
3339-IFS=$as_save_IFS
3340-
3341-if test $ac_prog_rejected = yes; then
3342-  # We found a bogon in the path, so make sure we never use it.
3343-  set dummy $ac_cv_prog_CC
3344-  shift
3345-  if test $# != 0; then
3346-    # We chose a different compiler from the bogus one.
3347-    # However, it has the same basename, so the bogon will be chosen
3348-    # first if we set CC to just the basename; use the full file name.
3349-    shift
3350-    ac_cv_prog_CC="$as_dir/$ac_word${1+' '}$@"
3351-  fi
3352-fi
3353-fi
3354-fi
3355-CC=$ac_cv_prog_CC
3356-if test -n "$CC"; then
3357-  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5
3358-$as_echo "$CC" >&6; }
3359-else
3360-  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
3361-$as_echo "no" >&6; }
3362-fi
3363-
3364-
3365-fi
3366-if test -z "$CC"; then
3367-  if test -n "$ac_tool_prefix"; then
3368-  for ac_prog in cl.exe
3369-  do
3370-    # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args.
3371-set dummy $ac_tool_prefix$ac_prog; ac_word=$2
3372-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
3373-$as_echo_n "checking for $ac_word... " >&6; }
3374-if ${ac_cv_prog_CC+:} false; then :
3375-  $as_echo_n "(cached) " >&6
3376-else
3377-  if test -n "$CC"; then
3378-  ac_cv_prog_CC="$CC" # Let the user override the test.
3379-else
3380-as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
3381-for as_dir in $PATH
3382-do
3383-  IFS=$as_save_IFS
3384-  test -z "$as_dir" && as_dir=.
3385-    for ac_exec_ext in '' $ac_executable_extensions; do
3386-  if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
3387-    ac_cv_prog_CC="$ac_tool_prefix$ac_prog"
3388-    $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
3389-    break 2
3390-  fi
3391-done
3392-  done
3393-IFS=$as_save_IFS
3394-
3395-fi
3396-fi
3397-CC=$ac_cv_prog_CC
3398-if test -n "$CC"; then
3399-  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5
3400-$as_echo "$CC" >&6; }
3401-else
3402-  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
3403-$as_echo "no" >&6; }
3404-fi
3405-
3406-
3407-    test -n "$CC" && break
3408-  done
3409-fi
3410-if test -z "$CC"; then
3411-  ac_ct_CC=$CC
3412-  for ac_prog in cl.exe
3413-do
3414-  # Extract the first word of "$ac_prog", so it can be a program name with args.
3415-set dummy $ac_prog; ac_word=$2
3416-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
3417-$as_echo_n "checking for $ac_word... " >&6; }
3418-if ${ac_cv_prog_ac_ct_CC+:} false; then :
3419-  $as_echo_n "(cached) " >&6
3420-else
3421-  if test -n "$ac_ct_CC"; then
3422-  ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test.
3423-else
3424-as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
3425-for as_dir in $PATH
3426-do
3427-  IFS=$as_save_IFS
3428-  test -z "$as_dir" && as_dir=.
3429-    for ac_exec_ext in '' $ac_executable_extensions; do
3430-  if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
3431-    ac_cv_prog_ac_ct_CC="$ac_prog"
3432-    $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
3433-    break 2
3434-  fi
3435-done
3436-  done
3437-IFS=$as_save_IFS
3438-
3439-fi
3440-fi
3441-ac_ct_CC=$ac_cv_prog_ac_ct_CC
3442-if test -n "$ac_ct_CC"; then
3443-  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5
3444-$as_echo "$ac_ct_CC" >&6; }
3445-else
3446-  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
3447-$as_echo "no" >&6; }
3448-fi
3449-
3450-
3451-  test -n "$ac_ct_CC" && break
3452-done
3453-
3454-  if test "x$ac_ct_CC" = x; then
3455-    CC=""
3456-  else
3457-    case $cross_compiling:$ac_tool_warned in
3458-yes:)
3459-{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
3460-$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
3461-ac_tool_warned=yes ;;
3462-esac
3463-    CC=$ac_ct_CC
3464-  fi
3465-fi
3466-
3467-fi
3468-
3469-
3470-test -z "$CC" && { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
3471-$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
3472-as_fn_error $? "no acceptable C compiler found in \$PATH
3473-See \`config.log' for more details" "$LINENO" 5; }
3474-
3475-# Provide some information about the compiler.
3476-$as_echo "$as_me:${as_lineno-$LINENO}: checking for C compiler version" >&5
3477-set X $ac_compile
3478-ac_compiler=$2
3479-for ac_option in --version -v -V -qversion; do
3480-  { { ac_try="$ac_compiler $ac_option >&5"
3481-case "(($ac_try" in
3482-  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
3483-  *) ac_try_echo=$ac_try;;
3484-esac
3485-eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
3486-$as_echo "$ac_try_echo"; } >&5
3487-  (eval "$ac_compiler $ac_option >&5") 2>conftest.err
3488-  ac_status=$?
3489-  if test -s conftest.err; then
3490-    sed '10a\
3491-... rest of stderr output deleted ...
3492-         10q' conftest.err >conftest.er1
3493-    cat conftest.er1 >&5
3494-  fi
3495-  rm -f conftest.er1 conftest.err
3496-  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
3497-  test $ac_status = 0; }
3498-done
3499-
3500-cat confdefs.h - <<_ACEOF >conftest.$ac_ext
3501-/* end confdefs.h.  */
3502-
3503-int
3504-main ()
3505-{
3506-
3507-  ;
3508-  return 0;
3509-}
3510-_ACEOF
3511-ac_clean_files_save=$ac_clean_files
3512-ac_clean_files="$ac_clean_files a.out a.out.dSYM a.exe b.out"
3513-# Try to create an executable without -o first, disregard a.out.
3514-# It will help us diagnose broken compilers, and finding out an intuition
3515-# of exeext.
3516-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the C compiler works" >&5
3517-$as_echo_n "checking whether the C compiler works... " >&6; }
3518-ac_link_default=`$as_echo "$ac_link" | sed 's/ -o *conftest[^ ]*//'`
3519-
3520-# The possible output files:
3521-ac_files="a.out conftest.exe conftest a.exe a_out.exe b.out conftest.*"
3522-
3523-ac_rmfiles=
3524-for ac_file in $ac_files
3525-do
3526-  case $ac_file in
3527-    *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) ;;
3528-    * ) ac_rmfiles="$ac_rmfiles $ac_file";;
3529-  esac
3530-done
3531-rm -f $ac_rmfiles
3532-
3533-if { { ac_try="$ac_link_default"
3534-case "(($ac_try" in
3535-  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
3536-  *) ac_try_echo=$ac_try;;
3537-esac
3538-eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
3539-$as_echo "$ac_try_echo"; } >&5
3540-  (eval "$ac_link_default") 2>&5
3541-  ac_status=$?
3542-  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
3543-  test $ac_status = 0; }; then :
3544-  # Autoconf-2.13 could set the ac_cv_exeext variable to `no'.
3545-# So ignore a value of `no', otherwise this would lead to `EXEEXT = no'
3546-# in a Makefile.  We should not override ac_cv_exeext if it was cached,
3547-# so that the user can short-circuit this test for compilers unknown to
3548-# Autoconf.
3549-for ac_file in $ac_files ''
3550-do
3551-  test -f "$ac_file" || continue
3552-  case $ac_file in
3553-    *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj )
3554-	;;
3555-    [ab].out )
3556-	# We found the default executable, but exeext='' is most
3557-	# certainly right.
3558-	break;;
3559-    *.* )
3560-	if test "${ac_cv_exeext+set}" = set && test "$ac_cv_exeext" != no;
3561-	then :; else
3562-	   ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'`
3563-	fi
3564-	# We set ac_cv_exeext here because the later test for it is not
3565-	# safe: cross compilers may not add the suffix if given an `-o'
3566-	# argument, so we may need to know it at that point already.
3567-	# Even if this section looks crufty: it has the advantage of
3568-	# actually working.
3569-	break;;
3570-    * )
3571-	break;;
3572-  esac
3573-done
3574-test "$ac_cv_exeext" = no && ac_cv_exeext=
3575-
3576-else
3577-  ac_file=''
3578-fi
3579-if test -z "$ac_file"; then :
3580-  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
3581-$as_echo "no" >&6; }
3582-$as_echo "$as_me: failed program was:" >&5
3583-sed 's/^/| /' conftest.$ac_ext >&5
3584-
3585-{ { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
3586-$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
3587-as_fn_error 77 "C compiler cannot create executables
3588-See \`config.log' for more details" "$LINENO" 5; }
3589-else
3590-  { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
3591-$as_echo "yes" >&6; }
3592-fi
3593-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for C compiler default output file name" >&5
3594-$as_echo_n "checking for C compiler default output file name... " >&6; }
3595-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_file" >&5
3596-$as_echo "$ac_file" >&6; }
3597-ac_exeext=$ac_cv_exeext
3598-
3599-rm -f -r a.out a.out.dSYM a.exe conftest$ac_cv_exeext b.out
3600-ac_clean_files=$ac_clean_files_save
3601-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for suffix of executables" >&5
3602-$as_echo_n "checking for suffix of executables... " >&6; }
3603-if { { ac_try="$ac_link"
3604-case "(($ac_try" in
3605-  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
3606-  *) ac_try_echo=$ac_try;;
3607-esac
3608-eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
3609-$as_echo "$ac_try_echo"; } >&5
3610-  (eval "$ac_link") 2>&5
3611-  ac_status=$?
3612-  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
3613-  test $ac_status = 0; }; then :
3614-  # If both `conftest.exe' and `conftest' are `present' (well, observable)
3615-# catch `conftest.exe'.  For instance with Cygwin, `ls conftest' will
3616-# work properly (i.e., refer to `conftest.exe'), while it won't with
3617-# `rm'.
3618-for ac_file in conftest.exe conftest conftest.*; do
3619-  test -f "$ac_file" || continue
3620-  case $ac_file in
3621-    *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) ;;
3622-    *.* ) ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'`
3623-	  break;;
3624-    * ) break;;
3625-  esac
3626-done
3627-else
3628-  { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
3629-$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
3630-as_fn_error $? "cannot compute suffix of executables: cannot compile and link
3631-See \`config.log' for more details" "$LINENO" 5; }
3632-fi
3633-rm -f conftest conftest$ac_cv_exeext
3634-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_exeext" >&5
3635-$as_echo "$ac_cv_exeext" >&6; }
3636-
3637-rm -f conftest.$ac_ext
3638-EXEEXT=$ac_cv_exeext
3639-ac_exeext=$EXEEXT
3640-cat confdefs.h - <<_ACEOF >conftest.$ac_ext
3641-/* end confdefs.h.  */
3642-#include <stdio.h>
3643-int
3644-main ()
3645-{
3646-FILE *f = fopen ("conftest.out", "w");
3647- return ferror (f) || fclose (f) != 0;
3648-
3649-  ;
3650-  return 0;
3651-}
3652-_ACEOF
3653-ac_clean_files="$ac_clean_files conftest.out"
3654-# Check that the compiler produces executables we can run.  If not, either
3655-# the compiler is broken, or we cross compile.
3656-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are cross compiling" >&5
3657-$as_echo_n "checking whether we are cross compiling... " >&6; }
3658-if test "$cross_compiling" != yes; then
3659-  { { ac_try="$ac_link"
3660-case "(($ac_try" in
3661-  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
3662-  *) ac_try_echo=$ac_try;;
3663-esac
3664-eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
3665-$as_echo "$ac_try_echo"; } >&5
3666-  (eval "$ac_link") 2>&5
3667-  ac_status=$?
3668-  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
3669-  test $ac_status = 0; }
3670-  if { ac_try='./conftest$ac_cv_exeext'
3671-  { { case "(($ac_try" in
3672-  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
3673-  *) ac_try_echo=$ac_try;;
3674-esac
3675-eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
3676-$as_echo "$ac_try_echo"; } >&5
3677-  (eval "$ac_try") 2>&5
3678-  ac_status=$?
3679-  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
3680-  test $ac_status = 0; }; }; then
3681-    cross_compiling=no
3682-  else
3683-    if test "$cross_compiling" = maybe; then
3684-	cross_compiling=yes
3685-    else
3686-	{ { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
3687-$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
3688-as_fn_error $? "cannot run C compiled programs.
3689-If you meant to cross compile, use \`--host'.
3690-See \`config.log' for more details" "$LINENO" 5; }
3691-    fi
3692-  fi
3693-fi
3694-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $cross_compiling" >&5
3695-$as_echo "$cross_compiling" >&6; }
3696-
3697-rm -f conftest.$ac_ext conftest$ac_cv_exeext conftest.out
3698-ac_clean_files=$ac_clean_files_save
3699-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for suffix of object files" >&5
3700-$as_echo_n "checking for suffix of object files... " >&6; }
3701-if ${ac_cv_objext+:} false; then :
3702-  $as_echo_n "(cached) " >&6
3703-else
3704-  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
3705-/* end confdefs.h.  */
3706-
3707-int
3708-main ()
3709-{
3710-
3711-  ;
3712-  return 0;
3713-}
3714-_ACEOF
3715-rm -f conftest.o conftest.obj
3716-if { { ac_try="$ac_compile"
3717-case "(($ac_try" in
3718-  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
3719-  *) ac_try_echo=$ac_try;;
3720-esac
3721-eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
3722-$as_echo "$ac_try_echo"; } >&5
3723-  (eval "$ac_compile") 2>&5
3724-  ac_status=$?
3725-  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
3726-  test $ac_status = 0; }; then :
3727-  for ac_file in conftest.o conftest.obj conftest.*; do
3728-  test -f "$ac_file" || continue;
3729-  case $ac_file in
3730-    *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM ) ;;
3731-    *) ac_cv_objext=`expr "$ac_file" : '.*\.\(.*\)'`
3732-       break;;
3733-  esac
3734-done
3735-else
3736-  $as_echo "$as_me: failed program was:" >&5
3737-sed 's/^/| /' conftest.$ac_ext >&5
3738-
3739-{ { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
3740-$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
3741-as_fn_error $? "cannot compute suffix of object files: cannot compile
3742-See \`config.log' for more details" "$LINENO" 5; }
3743-fi
3744-rm -f conftest.$ac_cv_objext conftest.$ac_ext
3745-fi
3746-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_objext" >&5
3747-$as_echo "$ac_cv_objext" >&6; }
3748-OBJEXT=$ac_cv_objext
3749-ac_objext=$OBJEXT
3750-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are using the GNU C compiler" >&5
3751-$as_echo_n "checking whether we are using the GNU C compiler... " >&6; }
3752-if ${ac_cv_c_compiler_gnu+:} false; then :
3753-  $as_echo_n "(cached) " >&6
3754-else
3755-  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
3756-/* end confdefs.h.  */
3757-
3758-int
3759-main ()
3760-{
3761-#ifndef __GNUC__
3762-       choke me
3763-#endif
3764-
3765-  ;
3766-  return 0;
3767-}
3768-_ACEOF
3769-if ac_fn_c_try_compile "$LINENO"; then :
3770-  ac_compiler_gnu=yes
3771-else
3772-  ac_compiler_gnu=no
3773-fi
3774-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
3775-ac_cv_c_compiler_gnu=$ac_compiler_gnu
3776-
3777-fi
3778-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_compiler_gnu" >&5
3779-$as_echo "$ac_cv_c_compiler_gnu" >&6; }
3780-if test $ac_compiler_gnu = yes; then
3781-  GCC=yes
3782-else
3783-  GCC=
3784-fi
3785-ac_test_CFLAGS=${CFLAGS+set}
3786-ac_save_CFLAGS=$CFLAGS
3787-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CC accepts -g" >&5
3788-$as_echo_n "checking whether $CC accepts -g... " >&6; }
3789-if ${ac_cv_prog_cc_g+:} false; then :
3790-  $as_echo_n "(cached) " >&6
3791-else
3792-  ac_save_c_werror_flag=$ac_c_werror_flag
3793-   ac_c_werror_flag=yes
3794-   ac_cv_prog_cc_g=no
3795-   CFLAGS="-g"
3796-   cat confdefs.h - <<_ACEOF >conftest.$ac_ext
3797-/* end confdefs.h.  */
3798-
3799-int
3800-main ()
3801-{
3802-
3803-  ;
3804-  return 0;
3805-}
3806-_ACEOF
3807-if ac_fn_c_try_compile "$LINENO"; then :
3808-  ac_cv_prog_cc_g=yes
3809-else
3810-  CFLAGS=""
3811-      cat confdefs.h - <<_ACEOF >conftest.$ac_ext
3812-/* end confdefs.h.  */
3813-
3814-int
3815-main ()
3816-{
3817-
3818-  ;
3819-  return 0;
3820-}
3821-_ACEOF
3822-if ac_fn_c_try_compile "$LINENO"; then :
3823-
3824-else
3825-  ac_c_werror_flag=$ac_save_c_werror_flag
3826-	 CFLAGS="-g"
3827-	 cat confdefs.h - <<_ACEOF >conftest.$ac_ext
3828-/* end confdefs.h.  */
3829-
3830-int
3831-main ()
3832-{
3833-
3834-  ;
3835-  return 0;
3836-}
3837-_ACEOF
3838-if ac_fn_c_try_compile "$LINENO"; then :
3839-  ac_cv_prog_cc_g=yes
3840-fi
3841-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
3842-fi
3843-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
3844-fi
3845-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
3846-   ac_c_werror_flag=$ac_save_c_werror_flag
3847-fi
3848-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_g" >&5
3849-$as_echo "$ac_cv_prog_cc_g" >&6; }
3850-if test "$ac_test_CFLAGS" = set; then
3851-  CFLAGS=$ac_save_CFLAGS
3852-elif test $ac_cv_prog_cc_g = yes; then
3853-  if test "$GCC" = yes; then
3854-    CFLAGS="-g -O2"
3855-  else
3856-    CFLAGS="-g"
3857-  fi
3858-else
3859-  if test "$GCC" = yes; then
3860-    CFLAGS="-O2"
3861-  else
3862-    CFLAGS=
3863-  fi
3864-fi
3865-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $CC option to accept ISO C89" >&5
3866-$as_echo_n "checking for $CC option to accept ISO C89... " >&6; }
3867-if ${ac_cv_prog_cc_c89+:} false; then :
3868-  $as_echo_n "(cached) " >&6
3869-else
3870-  ac_cv_prog_cc_c89=no
3871-ac_save_CC=$CC
3872-cat confdefs.h - <<_ACEOF >conftest.$ac_ext
3873-/* end confdefs.h.  */
3874-#include <stdarg.h>
3875-#include <stdio.h>
3876-struct stat;
3877-/* Most of the following tests are stolen from RCS 5.7's src/conf.sh.  */
3878-struct buf { int x; };
3879-FILE * (*rcsopen) (struct buf *, struct stat *, int);
3880-static char *e (p, i)
3881-     char **p;
3882-     int i;
3883-{
3884-  return p[i];
3885-}
3886-static char *f (char * (*g) (char **, int), char **p, ...)
3887-{
3888-  char *s;
3889-  va_list v;
3890-  va_start (v,p);
3891-  s = g (p, va_arg (v,int));
3892-  va_end (v);
3893-  return s;
3894-}
3895-
3896-/* OSF 4.0 Compaq cc is some sort of almost-ANSI by default.  It has
3897-   function prototypes and stuff, but not '\xHH' hex character constants.
3898-   These don't provoke an error unfortunately, instead are silently treated
3899-   as 'x'.  The following induces an error, until -std is added to get
3900-   proper ANSI mode.  Curiously '\x00'!='x' always comes out true, for an
3901-   array size at least.  It's necessary to write '\x00'==0 to get something
3902-   that's true only with -std.  */
3903-int osf4_cc_array ['\x00' == 0 ? 1 : -1];
3904-
3905-/* IBM C 6 for AIX is almost-ANSI by default, but it replaces macro parameters
3906-   inside strings and character constants.  */
3907-#define FOO(x) 'x'
3908-int xlc6_cc_array[FOO(a) == 'x' ? 1 : -1];
3909-
3910-int test (int i, double x);
3911-struct s1 {int (*f) (int a);};
3912-struct s2 {int (*f) (double a);};
3913-int pairnames (int, char **, FILE *(*)(struct buf *, struct stat *, int), int, int);
3914-int argc;
3915-char **argv;
3916-int
3917-main ()
3918-{
3919-return f (e, argv, 0) != argv[0]  ||  f (e, argv, 1) != argv[1];
3920-  ;
3921-  return 0;
3922-}
3923-_ACEOF
3924-for ac_arg in '' -qlanglvl=extc89 -qlanglvl=ansi -std \
3925-	-Ae "-Aa -D_HPUX_SOURCE" "-Xc -D__EXTENSIONS__"
3926-do
3927-  CC="$ac_save_CC $ac_arg"
3928-  if ac_fn_c_try_compile "$LINENO"; then :
3929-  ac_cv_prog_cc_c89=$ac_arg
3930-fi
3931-rm -f core conftest.err conftest.$ac_objext
3932-  test "x$ac_cv_prog_cc_c89" != "xno" && break
3933-done
3934-rm -f conftest.$ac_ext
3935-CC=$ac_save_CC
3936-
3937-fi
3938-# AC_CACHE_VAL
3939-case "x$ac_cv_prog_cc_c89" in
3940-  x)
3941-    { $as_echo "$as_me:${as_lineno-$LINENO}: result: none needed" >&5
3942-$as_echo "none needed" >&6; } ;;
3943-  xno)
3944-    { $as_echo "$as_me:${as_lineno-$LINENO}: result: unsupported" >&5
3945-$as_echo "unsupported" >&6; } ;;
3946-  *)
3947-    CC="$CC $ac_cv_prog_cc_c89"
3948-    { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_c89" >&5
3949-$as_echo "$ac_cv_prog_cc_c89" >&6; } ;;
3950-esac
3951-if test "x$ac_cv_prog_cc_c89" != xno; then :
3952-
3953-fi
3954-
3955-ac_ext=c
3956-ac_cpp='$CPP $CPPFLAGS'
3957-ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
3958-ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
3959-ac_compiler_gnu=$ac_cv_c_compiler_gnu
3960-
3961-
3962-if test "x$GCC" != "xyes" ; then
3963-
3964-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler is MSVC" >&5
3965-$as_echo_n "checking whether compiler is MSVC... " >&6; }
3966-if ${je_cv_msvc+:} false; then :
3967-  $as_echo_n "(cached) " >&6
3968-else
3969-  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
3970-/* end confdefs.h.  */
3971-
3972-int
3973-main ()
3974-{
3975-
3976-#ifndef _MSC_VER
3977-  int fail-1;
3978-#endif
3979-
3980-  ;
3981-  return 0;
3982-}
3983-_ACEOF
3984-if ac_fn_c_try_compile "$LINENO"; then :
3985-  je_cv_msvc=yes
3986-else
3987-  je_cv_msvc=no
3988-fi
3989-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
3990-fi
3991-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_msvc" >&5
3992-$as_echo "$je_cv_msvc" >&6; }
3993-fi
3994-
3995-je_cv_cray_prgenv_wrapper=""
3996-if test "x${PE_ENV}" != "x" ; then
3997-  case "${CC}" in
3998-    CC|cc)
3999-	je_cv_cray_prgenv_wrapper="yes"
4000-	;;
4001-    *)
4002-       ;;
4003-  esac
4004-fi
4005-
4006-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler is cray" >&5
4007-$as_echo_n "checking whether compiler is cray... " >&6; }
4008-if ${je_cv_cray+:} false; then :
4009-  $as_echo_n "(cached) " >&6
4010-else
4011-  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
4012-/* end confdefs.h.  */
4013-
4014-int
4015-main ()
4016-{
4017-
4018-#ifndef _CRAYC
4019-  int fail-1;
4020-#endif
4021-
4022-  ;
4023-  return 0;
4024-}
4025-_ACEOF
4026-if ac_fn_c_try_compile "$LINENO"; then :
4027-  je_cv_cray=yes
4028-else
4029-  je_cv_cray=no
4030-fi
4031-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
4032-fi
4033-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_cray" >&5
4034-$as_echo "$je_cv_cray" >&6; }
4035-
4036-if test "x${je_cv_cray}" = "xyes" ; then
4037-  { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether cray compiler version is 8.4" >&5
4038-$as_echo_n "checking whether cray compiler version is 8.4... " >&6; }
4039-if ${je_cv_cray_84+:} false; then :
4040-  $as_echo_n "(cached) " >&6
4041-else
4042-  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
4043-/* end confdefs.h.  */
4044-
4045-int
4046-main ()
4047-{
4048-
4049-#if !(_RELEASE_MAJOR == 8 && _RELEASE_MINOR == 4)
4050-  int fail-1;
4051-#endif
4052-
4053-  ;
4054-  return 0;
4055-}
4056-_ACEOF
4057-if ac_fn_c_try_compile "$LINENO"; then :
4058-  je_cv_cray_84=yes
4059-else
4060-  je_cv_cray_84=no
4061-fi
4062-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
4063-fi
4064-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_cray_84" >&5
4065-$as_echo "$je_cv_cray_84" >&6; }
4066-fi
4067-
4068-if test "x$GCC" = "xyes" ; then
4069-
4070-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -std=gnu11" >&5
4071-$as_echo_n "checking whether compiler supports -std=gnu11... " >&6; }
4072-T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}"
4073-T_APPEND_V=-std=gnu11
4074-  if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then
4075-  CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}"
4076-else
4077-  CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}"
4078-fi
4079-
4080-
4081-if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then
4082-  CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}"
4083-else
4084-  CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}"
4085-fi
4086-
4087-cat confdefs.h - <<_ACEOF >conftest.$ac_ext
4088-/* end confdefs.h.  */
4089-
4090-
4091-int
4092-main ()
4093-{
4094-
4095-    return 0;
4096-
4097-  ;
4098-  return 0;
4099-}
4100-_ACEOF
4101-if ac_fn_c_try_compile "$LINENO"; then :
4102-  je_cv_cflags_added=-std=gnu11
4103-              { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
4104-$as_echo "yes" >&6; }
4105-else
4106-  je_cv_cflags_added=
4107-              { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
4108-$as_echo "no" >&6; }
4109-              CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}"
4110-
4111-fi
4112-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
4113-if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then
4114-  CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}"
4115-else
4116-  CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}"
4117-fi
4118-
4119-
4120-  if test "x$je_cv_cflags_added" = "x-std=gnu11" ; then
4121-
4122-cat >>confdefs.h <<_ACEOF
4123-#define JEMALLOC_HAS_RESTRICT
4124-_ACEOF
4125-
4126-  else
4127-
4128-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -std=gnu99" >&5
4129-$as_echo_n "checking whether compiler supports -std=gnu99... " >&6; }
4130-T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}"
4131-T_APPEND_V=-std=gnu99
4132-  if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then
4133-  CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}"
4134-else
4135-  CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}"
4136-fi
4137-
4138-
4139-if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then
4140-  CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}"
4141-else
4142-  CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}"
4143-fi
4144-
4145-cat confdefs.h - <<_ACEOF >conftest.$ac_ext
4146-/* end confdefs.h.  */
4147-
4148-
4149-int
4150-main ()
4151-{
4152-
4153-    return 0;
4154-
4155-  ;
4156-  return 0;
4157-}
4158-_ACEOF
4159-if ac_fn_c_try_compile "$LINENO"; then :
4160-  je_cv_cflags_added=-std=gnu99
4161-              { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
4162-$as_echo "yes" >&6; }
4163-else
4164-  je_cv_cflags_added=
4165-              { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
4166-$as_echo "no" >&6; }
4167-              CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}"
4168-
4169-fi
4170-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
4171-if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then
4172-  CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}"
4173-else
4174-  CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}"
4175-fi
4176-
4177-
4178-    if test "x$je_cv_cflags_added" = "x-std=gnu99" ; then
4179-
4180-cat >>confdefs.h <<_ACEOF
4181-#define JEMALLOC_HAS_RESTRICT
4182-_ACEOF
4183-
4184-    fi
4185-  fi
4186-
4187-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -Werror=unknown-warning-option" >&5
4188-$as_echo_n "checking whether compiler supports -Werror=unknown-warning-option... " >&6; }
4189-T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}"
4190-T_APPEND_V=-Werror=unknown-warning-option
4191-  if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then
4192-  CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}"
4193-else
4194-  CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}"
4195-fi
4196-
4197-
4198-if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then
4199-  CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}"
4200-else
4201-  CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}"
4202-fi
4203-
4204-cat confdefs.h - <<_ACEOF >conftest.$ac_ext
4205-/* end confdefs.h.  */
4206-
4207-
4208-int
4209-main ()
4210-{
4211-
4212-    return 0;
4213-
4214-  ;
4215-  return 0;
4216-}
4217-_ACEOF
4218-if ac_fn_c_try_compile "$LINENO"; then :
4219-  je_cv_cflags_added=-Werror=unknown-warning-option
4220-              { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
4221-$as_echo "yes" >&6; }
4222-else
4223-  je_cv_cflags_added=
4224-              { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
4225-$as_echo "no" >&6; }
4226-              CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}"
4227-
4228-fi
4229-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
4230-if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then
4231-  CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}"
4232-else
4233-  CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}"
4234-fi
4235-
4236-
4237-
4238-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -Wall" >&5
4239-$as_echo_n "checking whether compiler supports -Wall... " >&6; }
4240-T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}"
4241-T_APPEND_V=-Wall
4242-  if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then
4243-  CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}"
4244-else
4245-  CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}"
4246-fi
4247-
4248-
4249-if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then
4250-  CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}"
4251-else
4252-  CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}"
4253-fi
4254-
4255-cat confdefs.h - <<_ACEOF >conftest.$ac_ext
4256-/* end confdefs.h.  */
4257-
4258-
4259-int
4260-main ()
4261-{
4262-
4263-    return 0;
4264-
4265-  ;
4266-  return 0;
4267-}
4268-_ACEOF
4269-if ac_fn_c_try_compile "$LINENO"; then :
4270-  je_cv_cflags_added=-Wall
4271-              { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
4272-$as_echo "yes" >&6; }
4273-else
4274-  je_cv_cflags_added=
4275-              { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
4276-$as_echo "no" >&6; }
4277-              CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}"
4278-
4279-fi
4280-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
4281-if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then
4282-  CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}"
4283-else
4284-  CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}"
4285-fi
4286-
4287-
4288-
4289-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -Wextra" >&5
4290-$as_echo_n "checking whether compiler supports -Wextra... " >&6; }
4291-T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}"
4292-T_APPEND_V=-Wextra
4293-  if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then
4294-  CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}"
4295-else
4296-  CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}"
4297-fi
4298-
4299-
4300-if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then
4301-  CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}"
4302-else
4303-  CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}"
4304-fi
4305-
4306-cat confdefs.h - <<_ACEOF >conftest.$ac_ext
4307-/* end confdefs.h.  */
4308-
4309-
4310-int
4311-main ()
4312-{
4313-
4314-    return 0;
4315-
4316-  ;
4317-  return 0;
4318-}
4319-_ACEOF
4320-if ac_fn_c_try_compile "$LINENO"; then :
4321-  je_cv_cflags_added=-Wextra
4322-              { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
4323-$as_echo "yes" >&6; }
4324-else
4325-  je_cv_cflags_added=
4326-              { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
4327-$as_echo "no" >&6; }
4328-              CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}"
4329-
4330-fi
4331-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
4332-if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then
4333-  CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}"
4334-else
4335-  CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}"
4336-fi
4337-
4338-
4339-
4340-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -Wshorten-64-to-32" >&5
4341-$as_echo_n "checking whether compiler supports -Wshorten-64-to-32... " >&6; }
4342-T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}"
4343-T_APPEND_V=-Wshorten-64-to-32
4344-  if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then
4345-  CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}"
4346-else
4347-  CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}"
4348-fi
4349-
4350-
4351-if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then
4352-  CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}"
4353-else
4354-  CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}"
4355-fi
4356-
4357-cat confdefs.h - <<_ACEOF >conftest.$ac_ext
4358-/* end confdefs.h.  */
4359-
4360-
4361-int
4362-main ()
4363-{
4364-
4365-    return 0;
4366-
4367-  ;
4368-  return 0;
4369-}
4370-_ACEOF
4371-if ac_fn_c_try_compile "$LINENO"; then :
4372-  je_cv_cflags_added=-Wshorten-64-to-32
4373-              { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
4374-$as_echo "yes" >&6; }
4375-else
4376-  je_cv_cflags_added=
4377-              { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
4378-$as_echo "no" >&6; }
4379-              CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}"
4380-
4381-fi
4382-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
4383-if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then
4384-  CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}"
4385-else
4386-  CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}"
4387-fi
4388-
4389-
4390-
4391-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -Wsign-compare" >&5
4392-$as_echo_n "checking whether compiler supports -Wsign-compare... " >&6; }
4393-T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}"
4394-T_APPEND_V=-Wsign-compare
4395-  if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then
4396-  CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}"
4397-else
4398-  CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}"
4399-fi
4400-
4401-
4402-if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then
4403-  CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}"
4404-else
4405-  CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}"
4406-fi
4407-
4408-cat confdefs.h - <<_ACEOF >conftest.$ac_ext
4409-/* end confdefs.h.  */
4410-
4411-
4412-int
4413-main ()
4414-{
4415-
4416-    return 0;
4417-
4418-  ;
4419-  return 0;
4420-}
4421-_ACEOF
4422-if ac_fn_c_try_compile "$LINENO"; then :
4423-  je_cv_cflags_added=-Wsign-compare
4424-              { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
4425-$as_echo "yes" >&6; }
4426-else
4427-  je_cv_cflags_added=
4428-              { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
4429-$as_echo "no" >&6; }
4430-              CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}"
4431-
4432-fi
4433-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
4434-if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then
4435-  CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}"
4436-else
4437-  CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}"
4438-fi
4439-
4440-
4441-
4442-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -Wundef" >&5
4443-$as_echo_n "checking whether compiler supports -Wundef... " >&6; }
4444-T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}"
4445-T_APPEND_V=-Wundef
4446-  if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then
4447-  CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}"
4448-else
4449-  CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}"
4450-fi
4451-
4452-
4453-if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then
4454-  CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}"
4455-else
4456-  CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}"
4457-fi
4458-
4459-cat confdefs.h - <<_ACEOF >conftest.$ac_ext
4460-/* end confdefs.h.  */
4461-
4462-
4463-int
4464-main ()
4465-{
4466-
4467-    return 0;
4468-
4469-  ;
4470-  return 0;
4471-}
4472-_ACEOF
4473-if ac_fn_c_try_compile "$LINENO"; then :
4474-  je_cv_cflags_added=-Wundef
4475-              { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
4476-$as_echo "yes" >&6; }
4477-else
4478-  je_cv_cflags_added=
4479-              { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
4480-$as_echo "no" >&6; }
4481-              CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}"
4482-
4483-fi
4484-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
4485-if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then
4486-  CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}"
4487-else
4488-  CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}"
4489-fi
4490-
4491-
4492-
4493-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -Wno-format-zero-length" >&5
4494-$as_echo_n "checking whether compiler supports -Wno-format-zero-length... " >&6; }
4495-T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}"
4496-T_APPEND_V=-Wno-format-zero-length
4497-  if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then
4498-  CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}"
4499-else
4500-  CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}"
4501-fi
4502-
4503-
4504-if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then
4505-  CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}"
4506-else
4507-  CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}"
4508-fi
4509-
4510-cat confdefs.h - <<_ACEOF >conftest.$ac_ext
4511-/* end confdefs.h.  */
4512-
4513-
4514-int
4515-main ()
4516-{
4517-
4518-    return 0;
4519-
4520-  ;
4521-  return 0;
4522-}
4523-_ACEOF
4524-if ac_fn_c_try_compile "$LINENO"; then :
4525-  je_cv_cflags_added=-Wno-format-zero-length
4526-              { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
4527-$as_echo "yes" >&6; }
4528-else
4529-  je_cv_cflags_added=
4530-              { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
4531-$as_echo "no" >&6; }
4532-              CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}"
4533-
4534-fi
4535-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
4536-if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then
4537-  CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}"
4538-else
4539-  CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}"
4540-fi
4541-
4542-
4543-
4544-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -Wpointer-arith" >&5
4545-$as_echo_n "checking whether compiler supports -Wpointer-arith... " >&6; }
4546-T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}"
4547-T_APPEND_V=-Wpointer-arith
4548-  if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then
4549-  CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}"
4550-else
4551-  CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}"
4552-fi
4553-
4554-
4555-if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then
4556-  CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}"
4557-else
4558-  CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}"
4559-fi
4560-
4561-cat confdefs.h - <<_ACEOF >conftest.$ac_ext
4562-/* end confdefs.h.  */
4563-
4564-
4565-int
4566-main ()
4567-{
4568-
4569-    return 0;
4570-
4571-  ;
4572-  return 0;
4573-}
4574-_ACEOF
4575-if ac_fn_c_try_compile "$LINENO"; then :
4576-  je_cv_cflags_added=-Wpointer-arith
4577-              { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
4578-$as_echo "yes" >&6; }
4579-else
4580-  je_cv_cflags_added=
4581-              { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
4582-$as_echo "no" >&6; }
4583-              CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}"
4584-
4585-fi
4586-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
4587-if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then
4588-  CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}"
4589-else
4590-  CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}"
4591-fi
4592-
4593-
4594-
4595-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -Wno-missing-braces" >&5
4596-$as_echo_n "checking whether compiler supports -Wno-missing-braces... " >&6; }
4597-T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}"
4598-T_APPEND_V=-Wno-missing-braces
4599-  if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then
4600-  CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}"
4601-else
4602-  CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}"
4603-fi
4604-
4605-
4606-if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then
4607-  CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}"
4608-else
4609-  CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}"
4610-fi
4611-
4612-cat confdefs.h - <<_ACEOF >conftest.$ac_ext
4613-/* end confdefs.h.  */
4614-
4615-
4616-int
4617-main ()
4618-{
4619-
4620-    return 0;
4621-
4622-  ;
4623-  return 0;
4624-}
4625-_ACEOF
4626-if ac_fn_c_try_compile "$LINENO"; then :
4627-  je_cv_cflags_added=-Wno-missing-braces
4628-              { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
4629-$as_echo "yes" >&6; }
4630-else
4631-  je_cv_cflags_added=
4632-              { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
4633-$as_echo "no" >&6; }
4634-              CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}"
4635-
4636-fi
4637-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
4638-if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then
4639-  CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}"
4640-else
4641-  CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}"
4642-fi
4643-
4644-
4645-
4646-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -Wno-missing-field-initializers" >&5
4647-$as_echo_n "checking whether compiler supports -Wno-missing-field-initializers... " >&6; }
4648-T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}"
4649-T_APPEND_V=-Wno-missing-field-initializers
4650-  if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then
4651-  CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}"
4652-else
4653-  CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}"
4654-fi
4655-
4656-
4657-if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then
4658-  CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}"
4659-else
4660-  CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}"
4661-fi
4662-
4663-cat confdefs.h - <<_ACEOF >conftest.$ac_ext
4664-/* end confdefs.h.  */
4665-
4666-
4667-int
4668-main ()
4669-{
4670-
4671-    return 0;
4672-
4673-  ;
4674-  return 0;
4675-}
4676-_ACEOF
4677-if ac_fn_c_try_compile "$LINENO"; then :
4678-  je_cv_cflags_added=-Wno-missing-field-initializers
4679-              { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
4680-$as_echo "yes" >&6; }
4681-else
4682-  je_cv_cflags_added=
4683-              { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
4684-$as_echo "no" >&6; }
4685-              CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}"
4686-
4687-fi
4688-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
4689-if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then
4690-  CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}"
4691-else
4692-  CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}"
4693-fi
4694-
4695-
4696-
4697-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -Wno-missing-attributes" >&5
4698-$as_echo_n "checking whether compiler supports -Wno-missing-attributes... " >&6; }
4699-T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}"
4700-T_APPEND_V=-Wno-missing-attributes
4701-  if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then
4702-  CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}"
4703-else
4704-  CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}"
4705-fi
4706-
4707-
4708-if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then
4709-  CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}"
4710-else
4711-  CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}"
4712-fi
4713-
4714-cat confdefs.h - <<_ACEOF >conftest.$ac_ext
4715-/* end confdefs.h.  */
4716-
4717-
4718-int
4719-main ()
4720-{
4721-
4722-    return 0;
4723-
4724-  ;
4725-  return 0;
4726-}
4727-_ACEOF
4728-if ac_fn_c_try_compile "$LINENO"; then :
4729-  je_cv_cflags_added=-Wno-missing-attributes
4730-              { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
4731-$as_echo "yes" >&6; }
4732-else
4733-  je_cv_cflags_added=
4734-              { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
4735-$as_echo "no" >&6; }
4736-              CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}"
4737-
4738-fi
4739-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
4740-if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then
4741-  CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}"
4742-else
4743-  CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}"
4744-fi
4745-
4746-
4747-
4748-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -pipe" >&5
4749-$as_echo_n "checking whether compiler supports -pipe... " >&6; }
4750-T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}"
4751-T_APPEND_V=-pipe
4752-  if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then
4753-  CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}"
4754-else
4755-  CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}"
4756-fi
4757-
4758-
4759-if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then
4760-  CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}"
4761-else
4762-  CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}"
4763-fi
4764-
4765-cat confdefs.h - <<_ACEOF >conftest.$ac_ext
4766-/* end confdefs.h.  */
4767-
4768-
4769-int
4770-main ()
4771-{
4772-
4773-    return 0;
4774-
4775-  ;
4776-  return 0;
4777-}
4778-_ACEOF
4779-if ac_fn_c_try_compile "$LINENO"; then :
4780-  je_cv_cflags_added=-pipe
4781-              { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
4782-$as_echo "yes" >&6; }
4783-else
4784-  je_cv_cflags_added=
4785-              { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
4786-$as_echo "no" >&6; }
4787-              CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}"
4788-
4789-fi
4790-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
4791-if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then
4792-  CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}"
4793-else
4794-  CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}"
4795-fi
4796-
4797-
4798-
4799-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -g3" >&5
4800-$as_echo_n "checking whether compiler supports -g3... " >&6; }
4801-T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}"
4802-T_APPEND_V=-g3
4803-  if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then
4804-  CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}"
4805-else
4806-  CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}"
4807-fi
4808-
4809-
4810-if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then
4811-  CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}"
4812-else
4813-  CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}"
4814-fi
4815-
4816-cat confdefs.h - <<_ACEOF >conftest.$ac_ext
4817-/* end confdefs.h.  */
4818-
4819-
4820-int
4821-main ()
4822-{
4823-
4824-    return 0;
4825-
4826-  ;
4827-  return 0;
4828-}
4829-_ACEOF
4830-if ac_fn_c_try_compile "$LINENO"; then :
4831-  je_cv_cflags_added=-g3
4832-              { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
4833-$as_echo "yes" >&6; }
4834-else
4835-  je_cv_cflags_added=
4836-              { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
4837-$as_echo "no" >&6; }
4838-              CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}"
4839-
4840-fi
4841-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
4842-if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then
4843-  CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}"
4844-else
4845-  CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}"
4846-fi
4847-
4848-
4849-elif test "x$je_cv_msvc" = "xyes" ; then
4850-  CC="$CC -nologo"
4851-
4852-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -Zi" >&5
4853-$as_echo_n "checking whether compiler supports -Zi... " >&6; }
4854-T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}"
4855-T_APPEND_V=-Zi
4856-  if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then
4857-  CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}"
4858-else
4859-  CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}"
4860-fi
4861-
4862-
4863-if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then
4864-  CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}"
4865-else
4866-  CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}"
4867-fi
4868-
4869-cat confdefs.h - <<_ACEOF >conftest.$ac_ext
4870-/* end confdefs.h.  */
4871-
4872-
4873-int
4874-main ()
4875-{
4876-
4877-    return 0;
4878-
4879-  ;
4880-  return 0;
4881-}
4882-_ACEOF
4883-if ac_fn_c_try_compile "$LINENO"; then :
4884-  je_cv_cflags_added=-Zi
4885-              { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
4886-$as_echo "yes" >&6; }
4887-else
4888-  je_cv_cflags_added=
4889-              { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
4890-$as_echo "no" >&6; }
4891-              CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}"
4892-
4893-fi
4894-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
4895-if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then
4896-  CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}"
4897-else
4898-  CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}"
4899-fi
4900-
4901-
4902-
4903-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -MT" >&5
4904-$as_echo_n "checking whether compiler supports -MT... " >&6; }
4905-T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}"
4906-T_APPEND_V=-MT
4907-  if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then
4908-  CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}"
4909-else
4910-  CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}"
4911-fi
4912-
4913-
4914-if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then
4915-  CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}"
4916-else
4917-  CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}"
4918-fi
4919-
4920-cat confdefs.h - <<_ACEOF >conftest.$ac_ext
4921-/* end confdefs.h.  */
4922-
4923-
4924-int
4925-main ()
4926-{
4927-
4928-    return 0;
4929-
4930-  ;
4931-  return 0;
4932-}
4933-_ACEOF
4934-if ac_fn_c_try_compile "$LINENO"; then :
4935-  je_cv_cflags_added=-MT
4936-              { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
4937-$as_echo "yes" >&6; }
4938-else
4939-  je_cv_cflags_added=
4940-              { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
4941-$as_echo "no" >&6; }
4942-              CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}"
4943-
4944-fi
4945-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
4946-if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then
4947-  CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}"
4948-else
4949-  CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}"
4950-fi
4951-
4952-
4953-
4954-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -W3" >&5
4955-$as_echo_n "checking whether compiler supports -W3... " >&6; }
4956-T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}"
4957-T_APPEND_V=-W3
4958-  if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then
4959-  CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}"
4960-else
4961-  CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}"
4962-fi
4963-
4964-
4965-if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then
4966-  CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}"
4967-else
4968-  CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}"
4969-fi
4970-
4971-cat confdefs.h - <<_ACEOF >conftest.$ac_ext
4972-/* end confdefs.h.  */
4973-
4974-
4975-int
4976-main ()
4977-{
4978-
4979-    return 0;
4980-
4981-  ;
4982-  return 0;
4983-}
4984-_ACEOF
4985-if ac_fn_c_try_compile "$LINENO"; then :
4986-  je_cv_cflags_added=-W3
4987-              { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
4988-$as_echo "yes" >&6; }
4989-else
4990-  je_cv_cflags_added=
4991-              { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
4992-$as_echo "no" >&6; }
4993-              CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}"
4994-
4995-fi
4996-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
4997-if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then
4998-  CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}"
4999-else
5000-  CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}"
5001-fi
5002-
5003-
5004-
5005-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -FS" >&5
5006-$as_echo_n "checking whether compiler supports -FS... " >&6; }
5007-T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}"
5008-T_APPEND_V=-FS
5009-  if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then
5010-  CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}"
5011-else
5012-  CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}"
5013-fi
5014-
5015-
5016-if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then
5017-  CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}"
5018-else
5019-  CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}"
5020-fi
5021-
5022-cat confdefs.h - <<_ACEOF >conftest.$ac_ext
5023-/* end confdefs.h.  */
5024-
5025-
5026-int
5027-main ()
5028-{
5029-
5030-    return 0;
5031-
5032-  ;
5033-  return 0;
5034-}
5035-_ACEOF
5036-if ac_fn_c_try_compile "$LINENO"; then :
5037-  je_cv_cflags_added=-FS
5038-              { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
5039-$as_echo "yes" >&6; }
5040-else
5041-  je_cv_cflags_added=
5042-              { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
5043-$as_echo "no" >&6; }
5044-              CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}"
5045-
5046-fi
5047-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
5048-if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then
5049-  CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}"
5050-else
5051-  CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}"
5052-fi
5053-
5054-
5055-  T_APPEND_V=-I${srcdir}/include/msvc_compat
5056-  if test "x${CPPFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then
5057-  CPPFLAGS="${CPPFLAGS}${T_APPEND_V}"
5058-else
5059-  CPPFLAGS="${CPPFLAGS} ${T_APPEND_V}"
5060-fi
5061-
5062-
5063-fi
5064-if test "x$je_cv_cray" = "xyes" ; then
5065-    if test "x$je_cv_cray_84" = "xyes" ; then
5066-
5067-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -hipa2" >&5
5068-$as_echo_n "checking whether compiler supports -hipa2... " >&6; }
5069-T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}"
5070-T_APPEND_V=-hipa2
5071-  if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then
5072-  CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}"
5073-else
5074-  CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}"
5075-fi
5076-
5077-
5078-if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then
5079-  CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}"
5080-else
5081-  CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}"
5082-fi
5083-
5084-cat confdefs.h - <<_ACEOF >conftest.$ac_ext
5085-/* end confdefs.h.  */
5086-
5087-
5088-int
5089-main ()
5090-{
5091-
5092-    return 0;
5093-
5094-  ;
5095-  return 0;
5096-}
5097-_ACEOF
5098-if ac_fn_c_try_compile "$LINENO"; then :
5099-  je_cv_cflags_added=-hipa2
5100-              { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
5101-$as_echo "yes" >&6; }
5102-else
5103-  je_cv_cflags_added=
5104-              { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
5105-$as_echo "no" >&6; }
5106-              CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}"
5107-
5108-fi
5109-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
5110-if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then
5111-  CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}"
5112-else
5113-  CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}"
5114-fi
5115-
5116-
5117-
5118-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -hnognu" >&5
5119-$as_echo_n "checking whether compiler supports -hnognu... " >&6; }
5120-T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}"
5121-T_APPEND_V=-hnognu
5122-  if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then
5123-  CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}"
5124-else
5125-  CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}"
5126-fi
5127-
5128-
5129-if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then
5130-  CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}"
5131-else
5132-  CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}"
5133-fi
5134-
5135-cat confdefs.h - <<_ACEOF >conftest.$ac_ext
5136-/* end confdefs.h.  */
5137-
5138-
5139-int
5140-main ()
5141-{
5142-
5143-    return 0;
5144-
5145-  ;
5146-  return 0;
5147-}
5148-_ACEOF
5149-if ac_fn_c_try_compile "$LINENO"; then :
5150-  je_cv_cflags_added=-hnognu
5151-              { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
5152-$as_echo "yes" >&6; }
5153-else
5154-  je_cv_cflags_added=
5155-              { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
5156-$as_echo "no" >&6; }
5157-              CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}"
5158-
5159-fi
5160-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
5161-if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then
5162-  CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}"
5163-else
5164-  CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}"
5165-fi
5166-
5167-
5168-  fi
5169-
5170-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -hnomessage=128" >&5
5171-$as_echo_n "checking whether compiler supports -hnomessage=128... " >&6; }
5172-T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}"
5173-T_APPEND_V=-hnomessage=128
5174-  if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then
5175-  CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}"
5176-else
5177-  CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}"
5178-fi
5179-
5180-
5181-if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then
5182-  CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}"
5183-else
5184-  CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}"
5185-fi
5186-
5187-cat confdefs.h - <<_ACEOF >conftest.$ac_ext
5188-/* end confdefs.h.  */
5189-
5190-
5191-int
5192-main ()
5193-{
5194-
5195-    return 0;
5196-
5197-  ;
5198-  return 0;
5199-}
5200-_ACEOF
5201-if ac_fn_c_try_compile "$LINENO"; then :
5202-  je_cv_cflags_added=-hnomessage=128
5203-              { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
5204-$as_echo "yes" >&6; }
5205-else
5206-  je_cv_cflags_added=
5207-              { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
5208-$as_echo "no" >&6; }
5209-              CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}"
5210-
5211-fi
5212-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
5213-if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then
5214-  CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}"
5215-else
5216-  CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}"
5217-fi
5218-
5219-
5220-
5221-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -hnomessage=1357" >&5
5222-$as_echo_n "checking whether compiler supports -hnomessage=1357... " >&6; }
5223-T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}"
5224-T_APPEND_V=-hnomessage=1357
5225-  if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then
5226-  CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}"
5227-else
5228-  CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}"
5229-fi
5230-
5231-
5232-if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then
5233-  CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}"
5234-else
5235-  CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}"
5236-fi
5237-
5238-cat confdefs.h - <<_ACEOF >conftest.$ac_ext
5239-/* end confdefs.h.  */
5240-
5241-
5242-int
5243-main ()
5244-{
5245-
5246-    return 0;
5247-
5248-  ;
5249-  return 0;
5250-}
5251-_ACEOF
5252-if ac_fn_c_try_compile "$LINENO"; then :
5253-  je_cv_cflags_added=-hnomessage=1357
5254-              { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
5255-$as_echo "yes" >&6; }
5256-else
5257-  je_cv_cflags_added=
5258-              { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
5259-$as_echo "no" >&6; }
5260-              CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}"
5261-
5262-fi
5263-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
5264-if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then
5265-  CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}"
5266-else
5267-  CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}"
5268-fi
5269-
5270-
5271-fi
5272-
5273-
5274-
5275-ac_ext=c
5276-ac_cpp='$CPP $CPPFLAGS'
5277-ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
5278-ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
5279-ac_compiler_gnu=$ac_cv_c_compiler_gnu
5280-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to run the C preprocessor" >&5
5281-$as_echo_n "checking how to run the C preprocessor... " >&6; }
5282-# On Suns, sometimes $CPP names a directory.
5283-if test -n "$CPP" && test -d "$CPP"; then
5284-  CPP=
5285-fi
5286-if test -z "$CPP"; then
5287-  if ${ac_cv_prog_CPP+:} false; then :
5288-  $as_echo_n "(cached) " >&6
5289-else
5290-      # Double quotes because CPP needs to be expanded
5291-    for CPP in "$CC -E" "$CC -E -traditional-cpp" "/lib/cpp"
5292-    do
5293-      ac_preproc_ok=false
5294-for ac_c_preproc_warn_flag in '' yes
5295-do
5296-  # Use a header file that comes with gcc, so configuring glibc
5297-  # with a fresh cross-compiler works.
5298-  # Prefer <limits.h> to <assert.h> if __STDC__ is defined, since
5299-  # <limits.h> exists even on freestanding compilers.
5300-  # On the NeXT, cc -E runs the code through the compiler's parser,
5301-  # not just through cpp. "Syntax error" is here to catch this case.
5302-  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
5303-/* end confdefs.h.  */
5304-#ifdef __STDC__
5305-# include <limits.h>
5306-#else
5307-# include <assert.h>
5308-#endif
5309-		     Syntax error
5310-_ACEOF
5311-if ac_fn_c_try_cpp "$LINENO"; then :
5312-
5313-else
5314-  # Broken: fails on valid input.
5315-continue
5316-fi
5317-rm -f conftest.err conftest.i conftest.$ac_ext
5318-
5319-  # OK, works on sane cases.  Now check whether nonexistent headers
5320-  # can be detected and how.
5321-  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
5322-/* end confdefs.h.  */
5323-#include <ac_nonexistent.h>
5324-_ACEOF
5325-if ac_fn_c_try_cpp "$LINENO"; then :
5326-  # Broken: success on invalid input.
5327-continue
5328-else
5329-  # Passes both tests.
5330-ac_preproc_ok=:
5331-break
5332-fi
5333-rm -f conftest.err conftest.i conftest.$ac_ext
5334-
5335-done
5336-# Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped.
5337-rm -f conftest.i conftest.err conftest.$ac_ext
5338-if $ac_preproc_ok; then :
5339-  break
5340-fi
5341-
5342-    done
5343-    ac_cv_prog_CPP=$CPP
5344-
5345-fi
5346-  CPP=$ac_cv_prog_CPP
5347-else
5348-  ac_cv_prog_CPP=$CPP
5349-fi
5350-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $CPP" >&5
5351-$as_echo "$CPP" >&6; }
5352-ac_preproc_ok=false
5353-for ac_c_preproc_warn_flag in '' yes
5354-do
5355-  # Use a header file that comes with gcc, so configuring glibc
5356-  # with a fresh cross-compiler works.
5357-  # Prefer <limits.h> to <assert.h> if __STDC__ is defined, since
5358-  # <limits.h> exists even on freestanding compilers.
5359-  # On the NeXT, cc -E runs the code through the compiler's parser,
5360-  # not just through cpp. "Syntax error" is here to catch this case.
5361-  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
5362-/* end confdefs.h.  */
5363-#ifdef __STDC__
5364-# include <limits.h>
5365-#else
5366-# include <assert.h>
5367-#endif
5368-		     Syntax error
5369-_ACEOF
5370-if ac_fn_c_try_cpp "$LINENO"; then :
5371-
5372-else
5373-  # Broken: fails on valid input.
5374-continue
5375-fi
5376-rm -f conftest.err conftest.i conftest.$ac_ext
5377-
5378-  # OK, works on sane cases.  Now check whether nonexistent headers
5379-  # can be detected and how.
5380-  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
5381-/* end confdefs.h.  */
5382-#include <ac_nonexistent.h>
5383-_ACEOF
5384-if ac_fn_c_try_cpp "$LINENO"; then :
5385-  # Broken: success on invalid input.
5386-continue
5387-else
5388-  # Passes both tests.
5389-ac_preproc_ok=:
5390-break
5391-fi
5392-rm -f conftest.err conftest.i conftest.$ac_ext
5393-
5394-done
5395-# Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped.
5396-rm -f conftest.i conftest.err conftest.$ac_ext
5397-if $ac_preproc_ok; then :
5398-
5399-else
5400-  { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
5401-$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
5402-as_fn_error $? "C preprocessor \"$CPP\" fails sanity check
5403-See \`config.log' for more details" "$LINENO" 5; }
5404-fi
5405-
5406-ac_ext=c
5407-ac_cpp='$CPP $CPPFLAGS'
5408-ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
5409-ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
5410-ac_compiler_gnu=$ac_cv_c_compiler_gnu
5411-
5412-
5413-# Check whether --enable-cxx was given.
5414-if test "${enable_cxx+set}" = set; then :
5415-  enableval=$enable_cxx; if test "x$enable_cxx" = "xno" ; then
5416-  enable_cxx="0"
5417-else
5418-  enable_cxx="1"
5419-fi
5420-
5421-else
5422-  enable_cxx="1"
5423-
5424-fi
5425-
5426-if test "x$enable_cxx" = "x1" ; then
5427-      # ===========================================================================
5428-#  https://www.gnu.org/software/autoconf-archive/ax_cxx_compile_stdcxx.html
5429-# ===========================================================================
5430-#
5431-# SYNOPSIS
5432-#
5433-#   AX_CXX_COMPILE_STDCXX(VERSION, [ext|noext], [mandatory|optional])
5434-#
5435-# DESCRIPTION
5436-#
5437-#   Check for baseline language coverage in the compiler for the specified
5438-#   version of the C++ standard.  If necessary, add switches to CXX and
5439-#   CXXCPP to enable support.  VERSION may be '11' (for the C++11 standard)
5440-#   or '14' (for the C++14 standard).
5441-#
5442-#   The second argument, if specified, indicates whether you insist on an
5443-#   extended mode (e.g. -std=gnu++11) or a strict conformance mode (e.g.
5444-#   -std=c++11).  If neither is specified, you get whatever works, with
5445-#   preference for an extended mode.
5446-#
5447-#   The third argument, if specified 'mandatory' or if left unspecified,
5448-#   indicates that baseline support for the specified C++ standard is
5449-#   required and that the macro should error out if no mode with that
5450-#   support is found.  If specified 'optional', then configuration proceeds
5451-#   regardless, after defining HAVE_CXX${VERSION} if and only if a
5452-#   supporting mode is found.
5453-#
5454-# LICENSE
5455-#
5456-#   Copyright (c) 2008 Benjamin Kosnik <[email protected]>
5457-#   Copyright (c) 2012 Zack Weinberg <[email protected]>
5458-#   Copyright (c) 2013 Roy Stogner <[email protected]>
5459-#   Copyright (c) 2014, 2015 Google Inc.; contributed by Alexey Sokolov <[email protected]>
5460-#   Copyright (c) 2015 Paul Norman <[email protected]>
5461-#   Copyright (c) 2015 Moritz Klammler <[email protected]>
5462-#   Copyright (c) 2016, 2018 Krzesimir Nowak <[email protected]>
5463-#   Copyright (c) 2019 Enji Cooper <[email protected]>
5464-#
5465-#   Copying and distribution of this file, with or without modification, are
5466-#   permitted in any medium without royalty provided the copyright notice
5467-#   and this notice are preserved.  This file is offered as-is, without any
5468-#   warranty.
5469-
5470-#serial 11
5471-
5472-
5473-
5474-
5475-
5476-
5477-
5478-
5479-
5480-
5481-
5482-
5483-
5484-
5485-
5486-
5487-
5488-
5489-
5490-
5491-
5492-
5493-
5494-
5495-
5496-  ac_ext=cpp
5497-ac_cpp='$CXXCPP $CPPFLAGS'
5498-ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5'
5499-ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
5500-ac_compiler_gnu=$ac_cv_cxx_compiler_gnu
5501-if test -z "$CXX"; then
5502-  if test -n "$CCC"; then
5503-    CXX=$CCC
5504-  else
5505-    if test -n "$ac_tool_prefix"; then
5506-  for ac_prog in g++ c++ gpp aCC CC cxx cc++ cl.exe FCC KCC RCC xlC_r xlC
5507-  do
5508-    # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args.
5509-set dummy $ac_tool_prefix$ac_prog; ac_word=$2
5510-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
5511-$as_echo_n "checking for $ac_word... " >&6; }
5512-if ${ac_cv_prog_CXX+:} false; then :
5513-  $as_echo_n "(cached) " >&6
5514-else
5515-  if test -n "$CXX"; then
5516-  ac_cv_prog_CXX="$CXX" # Let the user override the test.
5517-else
5518-as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
5519-for as_dir in $PATH
5520-do
5521-  IFS=$as_save_IFS
5522-  test -z "$as_dir" && as_dir=.
5523-    for ac_exec_ext in '' $ac_executable_extensions; do
5524-  if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
5525-    ac_cv_prog_CXX="$ac_tool_prefix$ac_prog"
5526-    $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
5527-    break 2
5528-  fi
5529-done
5530-  done
5531-IFS=$as_save_IFS
5532-
5533-fi
5534-fi
5535-CXX=$ac_cv_prog_CXX
5536-if test -n "$CXX"; then
5537-  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CXX" >&5
5538-$as_echo "$CXX" >&6; }
5539-else
5540-  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
5541-$as_echo "no" >&6; }
5542-fi
5543-
5544-
5545-    test -n "$CXX" && break
5546-  done
5547-fi
5548-if test -z "$CXX"; then
5549-  ac_ct_CXX=$CXX
5550-  for ac_prog in g++ c++ gpp aCC CC cxx cc++ cl.exe FCC KCC RCC xlC_r xlC
5551-do
5552-  # Extract the first word of "$ac_prog", so it can be a program name with args.
5553-set dummy $ac_prog; ac_word=$2
5554-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
5555-$as_echo_n "checking for $ac_word... " >&6; }
5556-if ${ac_cv_prog_ac_ct_CXX+:} false; then :
5557-  $as_echo_n "(cached) " >&6
5558-else
5559-  if test -n "$ac_ct_CXX"; then
5560-  ac_cv_prog_ac_ct_CXX="$ac_ct_CXX" # Let the user override the test.
5561-else
5562-as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
5563-for as_dir in $PATH
5564-do
5565-  IFS=$as_save_IFS
5566-  test -z "$as_dir" && as_dir=.
5567-    for ac_exec_ext in '' $ac_executable_extensions; do
5568-  if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
5569-    ac_cv_prog_ac_ct_CXX="$ac_prog"
5570-    $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
5571-    break 2
5572-  fi
5573-done
5574-  done
5575-IFS=$as_save_IFS
5576-
5577-fi
5578-fi
5579-ac_ct_CXX=$ac_cv_prog_ac_ct_CXX
5580-if test -n "$ac_ct_CXX"; then
5581-  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CXX" >&5
5582-$as_echo "$ac_ct_CXX" >&6; }
5583-else
5584-  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
5585-$as_echo "no" >&6; }
5586-fi
5587-
5588-
5589-  test -n "$ac_ct_CXX" && break
5590-done
5591-
5592-  if test "x$ac_ct_CXX" = x; then
5593-    CXX="g++"
5594-  else
5595-    case $cross_compiling:$ac_tool_warned in
5596-yes:)
5597-{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
5598-$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
5599-ac_tool_warned=yes ;;
5600-esac
5601-    CXX=$ac_ct_CXX
5602-  fi
5603-fi
5604-
5605-  fi
5606-fi
5607-# Provide some information about the compiler.
5608-$as_echo "$as_me:${as_lineno-$LINENO}: checking for C++ compiler version" >&5
5609-set X $ac_compile
5610-ac_compiler=$2
5611-for ac_option in --version -v -V -qversion; do
5612-  { { ac_try="$ac_compiler $ac_option >&5"
5613-case "(($ac_try" in
5614-  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
5615-  *) ac_try_echo=$ac_try;;
5616-esac
5617-eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
5618-$as_echo "$ac_try_echo"; } >&5
5619-  (eval "$ac_compiler $ac_option >&5") 2>conftest.err
5620-  ac_status=$?
5621-  if test -s conftest.err; then
5622-    sed '10a\
5623-... rest of stderr output deleted ...
5624-         10q' conftest.err >conftest.er1
5625-    cat conftest.er1 >&5
5626-  fi
5627-  rm -f conftest.er1 conftest.err
5628-  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
5629-  test $ac_status = 0; }
5630-done
5631-
5632-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are using the GNU C++ compiler" >&5
5633-$as_echo_n "checking whether we are using the GNU C++ compiler... " >&6; }
5634-if ${ac_cv_cxx_compiler_gnu+:} false; then :
5635-  $as_echo_n "(cached) " >&6
5636-else
5637-  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
5638-/* end confdefs.h.  */
5639-
5640-int
5641-main ()
5642-{
5643-#ifndef __GNUC__
5644-       choke me
5645-#endif
5646-
5647-  ;
5648-  return 0;
5649-}
5650-_ACEOF
5651-if ac_fn_cxx_try_compile "$LINENO"; then :
5652-  ac_compiler_gnu=yes
5653-else
5654-  ac_compiler_gnu=no
5655-fi
5656-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
5657-ac_cv_cxx_compiler_gnu=$ac_compiler_gnu
5658-
5659-fi
5660-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_cxx_compiler_gnu" >&5
5661-$as_echo "$ac_cv_cxx_compiler_gnu" >&6; }
5662-if test $ac_compiler_gnu = yes; then
5663-  GXX=yes
5664-else
5665-  GXX=
5666-fi
5667-ac_test_CXXFLAGS=${CXXFLAGS+set}
5668-ac_save_CXXFLAGS=$CXXFLAGS
5669-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CXX accepts -g" >&5
5670-$as_echo_n "checking whether $CXX accepts -g... " >&6; }
5671-if ${ac_cv_prog_cxx_g+:} false; then :
5672-  $as_echo_n "(cached) " >&6
5673-else
5674-  ac_save_cxx_werror_flag=$ac_cxx_werror_flag
5675-   ac_cxx_werror_flag=yes
5676-   ac_cv_prog_cxx_g=no
5677-   CXXFLAGS="-g"
5678-   cat confdefs.h - <<_ACEOF >conftest.$ac_ext
5679-/* end confdefs.h.  */
5680-
5681-int
5682-main ()
5683-{
5684-
5685-  ;
5686-  return 0;
5687-}
5688-_ACEOF
5689-if ac_fn_cxx_try_compile "$LINENO"; then :
5690-  ac_cv_prog_cxx_g=yes
5691-else
5692-  CXXFLAGS=""
5693-      cat confdefs.h - <<_ACEOF >conftest.$ac_ext
5694-/* end confdefs.h.  */
5695-
5696-int
5697-main ()
5698-{
5699-
5700-  ;
5701-  return 0;
5702-}
5703-_ACEOF
5704-if ac_fn_cxx_try_compile "$LINENO"; then :
5705-
5706-else
5707-  ac_cxx_werror_flag=$ac_save_cxx_werror_flag
5708-	 CXXFLAGS="-g"
5709-	 cat confdefs.h - <<_ACEOF >conftest.$ac_ext
5710-/* end confdefs.h.  */
5711-
5712-int
5713-main ()
5714-{
5715-
5716-  ;
5717-  return 0;
5718-}
5719-_ACEOF
5720-if ac_fn_cxx_try_compile "$LINENO"; then :
5721-  ac_cv_prog_cxx_g=yes
5722-fi
5723-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
5724-fi
5725-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
5726-fi
5727-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
5728-   ac_cxx_werror_flag=$ac_save_cxx_werror_flag
5729-fi
5730-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cxx_g" >&5
5731-$as_echo "$ac_cv_prog_cxx_g" >&6; }
5732-if test "$ac_test_CXXFLAGS" = set; then
5733-  CXXFLAGS=$ac_save_CXXFLAGS
5734-elif test $ac_cv_prog_cxx_g = yes; then
5735-  if test "$GXX" = yes; then
5736-    CXXFLAGS="-g -O2"
5737-  else
5738-    CXXFLAGS="-g"
5739-  fi
5740-else
5741-  if test "$GXX" = yes; then
5742-    CXXFLAGS="-O2"
5743-  else
5744-    CXXFLAGS=
5745-  fi
5746-fi
5747-ac_ext=cpp
5748-ac_cpp='$CXXCPP $CPPFLAGS'
5749-ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5'
5750-ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
5751-ac_compiler_gnu=$ac_cv_cxx_compiler_gnu
5752-
5753-
5754-  ax_cxx_compile_alternatives="17 1z"    ax_cxx_compile_cxx17_required=false
5755-  ac_ext=cpp
5756-ac_cpp='$CXXCPP $CPPFLAGS'
5757-ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5'
5758-ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
5759-ac_compiler_gnu=$ac_cv_cxx_compiler_gnu
5760-  ac_success=no
5761-
5762-
5763-
5764-    if test x$ac_success = xno; then
5765-                for alternative in ${ax_cxx_compile_alternatives}; do
5766-      for switch in -std=c++${alternative} +std=c++${alternative} "-h std=c++${alternative}"; do
5767-        cachevar=`$as_echo "ax_cv_cxx_compile_cxx17_$switch" | $as_tr_sh`
5768-        { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CXX supports C++17 features with $switch" >&5
5769-$as_echo_n "checking whether $CXX supports C++17 features with $switch... " >&6; }
5770-if eval \${$cachevar+:} false; then :
5771-  $as_echo_n "(cached) " >&6
5772-else
5773-  ac_save_CXX="$CXX"
5774-           CXX="$CXX $switch"
5775-           cat confdefs.h - <<_ACEOF >conftest.$ac_ext
5776-/* end confdefs.h.  */
5777-
5778-
5779-// If the compiler admits that it is not ready for C++11, why torture it?
5780-// Hopefully, this will speed up the test.
5781-
5782-#ifndef __cplusplus
5783-
5784-#error "This is not a C++ compiler"
5785-
5786-#elif __cplusplus < 201103L
5787-
5788-#error "This is not a C++11 compiler"
5789-
5790-#else
5791-
5792-namespace cxx11
5793-{
5794-
5795-  namespace test_static_assert
5796-  {
5797-
5798-    template <typename T>
5799-    struct check
5800-    {
5801-      static_assert(sizeof(int) <= sizeof(T), "not big enough");
5802-    };
5803-
5804-  }
5805-
5806-  namespace test_final_override
5807-  {
5808-
5809-    struct Base
5810-    {
5811-      virtual ~Base() {}
5812-      virtual void f() {}
5813-    };
5814-
5815-    struct Derived : public Base
5816-    {
5817-      virtual ~Derived() override {}
5818-      virtual void f() override {}
5819-    };
5820-
5821-  }
5822-
5823-  namespace test_double_right_angle_brackets
5824-  {
5825-
5826-    template < typename T >
5827-    struct check {};
5828-
5829-    typedef check<void> single_type;
5830-    typedef check<check<void>> double_type;
5831-    typedef check<check<check<void>>> triple_type;
5832-    typedef check<check<check<check<void>>>> quadruple_type;
5833-
5834-  }
5835-
5836-  namespace test_decltype
5837-  {
5838-
5839-    int
5840-    f()
5841-    {
5842-      int a = 1;
5843-      decltype(a) b = 2;
5844-      return a + b;
5845-    }
5846-
5847-  }
5848-
5849-  namespace test_type_deduction
5850-  {
5851-
5852-    template < typename T1, typename T2 >
5853-    struct is_same
5854-    {
5855-      static const bool value = false;
5856-    };
5857-
5858-    template < typename T >
5859-    struct is_same<T, T>
5860-    {
5861-      static const bool value = true;
5862-    };
5863-
5864-    template < typename T1, typename T2 >
5865-    auto
5866-    add(T1 a1, T2 a2) -> decltype(a1 + a2)
5867-    {
5868-      return a1 + a2;
5869-    }
5870-
5871-    int
5872-    test(const int c, volatile int v)
5873-    {
5874-      static_assert(is_same<int, decltype(0)>::value == true, "");
5875-      static_assert(is_same<int, decltype(c)>::value == false, "");
5876-      static_assert(is_same<int, decltype(v)>::value == false, "");
5877-      auto ac = c;
5878-      auto av = v;
5879-      auto sumi = ac + av + 'x';
5880-      auto sumf = ac + av + 1.0;
5881-      static_assert(is_same<int, decltype(ac)>::value == true, "");
5882-      static_assert(is_same<int, decltype(av)>::value == true, "");
5883-      static_assert(is_same<int, decltype(sumi)>::value == true, "");
5884-      static_assert(is_same<int, decltype(sumf)>::value == false, "");
5885-      static_assert(is_same<int, decltype(add(c, v))>::value == true, "");
5886-      return (sumf > 0.0) ? sumi : add(c, v);
5887-    }
5888-
5889-  }
5890-
5891-  namespace test_noexcept
5892-  {
5893-
5894-    int f() { return 0; }
5895-    int g() noexcept { return 0; }
5896-
5897-    static_assert(noexcept(f()) == false, "");
5898-    static_assert(noexcept(g()) == true, "");
5899-
5900-  }
5901-
5902-  namespace test_constexpr
5903-  {
5904-
5905-    template < typename CharT >
5906-    unsigned long constexpr
5907-    strlen_c_r(const CharT *const s, const unsigned long acc) noexcept
5908-    {
5909-      return *s ? strlen_c_r(s + 1, acc + 1) : acc;
5910-    }
5911-
5912-    template < typename CharT >
5913-    unsigned long constexpr
5914-    strlen_c(const CharT *const s) noexcept
5915-    {
5916-      return strlen_c_r(s, 0UL);
5917-    }
5918-
5919-    static_assert(strlen_c("") == 0UL, "");
5920-    static_assert(strlen_c("1") == 1UL, "");
5921-    static_assert(strlen_c("example") == 7UL, "");
5922-    static_assert(strlen_c("another\0example") == 7UL, "");
5923-
5924-  }
5925-
5926-  namespace test_rvalue_references
5927-  {
5928-
5929-    template < int N >
5930-    struct answer
5931-    {
5932-      static constexpr int value = N;
5933-    };
5934-
5935-    answer<1> f(int&)       { return answer<1>(); }
5936-    answer<2> f(const int&) { return answer<2>(); }
5937-    answer<3> f(int&&)      { return answer<3>(); }
5938-
5939-    void
5940-    test()
5941-    {
5942-      int i = 0;
5943-      const int c = 0;
5944-      static_assert(decltype(f(i))::value == 1, "");
5945-      static_assert(decltype(f(c))::value == 2, "");
5946-      static_assert(decltype(f(0))::value == 3, "");
5947-    }
5948-
5949-  }
5950-
5951-  namespace test_uniform_initialization
5952-  {
5953-
5954-    struct test
5955-    {
5956-      static const int zero {};
5957-      static const int one {1};
5958-    };
5959-
5960-    static_assert(test::zero == 0, "");
5961-    static_assert(test::one == 1, "");
5962-
5963-  }
5964-
5965-  namespace test_lambdas
5966-  {
5967-
5968-    void
5969-    test1()
5970-    {
5971-      auto lambda1 = [](){};
5972-      auto lambda2 = lambda1;
5973-      lambda1();
5974-      lambda2();
5975-    }
5976-
5977-    int
5978-    test2()
5979-    {
5980-      auto a = [](int i, int j){ return i + j; }(1, 2);
5981-      auto b = []() -> int { return '0'; }();
5982-      auto c = [=](){ return a + b; }();
5983-      auto d = [&](){ return c; }();
5984-      auto e = [a, &b](int x) mutable {
5985-        const auto identity = [](int y){ return y; };
5986-        for (auto i = 0; i < a; ++i)
5987-          a += b--;
5988-        return x + identity(a + b);
5989-      }(0);
5990-      return a + b + c + d + e;
5991-    }
5992-
5993-    int
5994-    test3()
5995-    {
5996-      const auto nullary = [](){ return 0; };
5997-      const auto unary = [](int x){ return x; };
5998-      using nullary_t = decltype(nullary);
5999-      using unary_t = decltype(unary);
6000-      const auto higher1st = [](nullary_t f){ return f(); };
6001-      const auto higher2nd = [unary](nullary_t f1){
6002-        return [unary, f1](unary_t f2){ return f2(unary(f1())); };
6003-      };
6004-      return higher1st(nullary) + higher2nd(nullary)(unary);
6005-    }
6006-
6007-  }
6008-
6009-  namespace test_variadic_templates
6010-  {
6011-
6012-    template <int...>
6013-    struct sum;
6014-
6015-    template <int N0, int... N1toN>
6016-    struct sum<N0, N1toN...>
6017-    {
6018-      static constexpr auto value = N0 + sum<N1toN...>::value;
6019-    };
6020-
6021-    template <>
6022-    struct sum<>
6023-    {
6024-      static constexpr auto value = 0;
6025-    };
6026-
6027-    static_assert(sum<>::value == 0, "");
6028-    static_assert(sum<1>::value == 1, "");
6029-    static_assert(sum<23>::value == 23, "");
6030-    static_assert(sum<1, 2>::value == 3, "");
6031-    static_assert(sum<5, 5, 11>::value == 21, "");
6032-    static_assert(sum<2, 3, 5, 7, 11, 13>::value == 41, "");
6033-
6034-  }
6035-
6036-  // http://stackoverflow.com/questions/13728184/template-aliases-and-sfinae
6037-  // Clang 3.1 fails with headers of libstd++ 4.8.3 when using std::function
6038-  // because of this.
6039-  namespace test_template_alias_sfinae
6040-  {
6041-
6042-    struct foo {};
6043-
6044-    template<typename T>
6045-    using member = typename T::member_type;
6046-
6047-    template<typename T>
6048-    void func(...) {}
6049-
6050-    template<typename T>
6051-    void func(member<T>*) {}
6052-
6053-    void test();
6054-
6055-    void test() { func<foo>(0); }
6056-
6057-  }
6058-
6059-}  // namespace cxx11
6060-
6061-#endif  // __cplusplus >= 201103L
6062-
6063-
6064-
6065-
6066-// If the compiler admits that it is not ready for C++14, why torture it?
6067-// Hopefully, this will speed up the test.
6068-
6069-#ifndef __cplusplus
6070-
6071-#error "This is not a C++ compiler"
6072-
6073-#elif __cplusplus < 201402L
6074-
6075-#error "This is not a C++14 compiler"
6076-
6077-#else
6078-
6079-namespace cxx14
6080-{
6081-
6082-  namespace test_polymorphic_lambdas
6083-  {
6084-
6085-    int
6086-    test()
6087-    {
6088-      const auto lambda = [](auto&&... args){
6089-        const auto istiny = [](auto x){
6090-          return (sizeof(x) == 1UL) ? 1 : 0;
6091-        };
6092-        const int aretiny[] = { istiny(args)... };
6093-        return aretiny[0];
6094-      };
6095-      return lambda(1, 1L, 1.0f, '1');
6096-    }
6097-
6098-  }
6099-
6100-  namespace test_binary_literals
6101-  {
6102-
6103-    constexpr auto ivii = 0b0000000000101010;
6104-    static_assert(ivii == 42, "wrong value");
6105-
6106-  }
6107-
6108-  namespace test_generalized_constexpr
6109-  {
6110-
6111-    template < typename CharT >
6112-    constexpr unsigned long
6113-    strlen_c(const CharT *const s) noexcept
6114-    {
6115-      auto length = 0UL;
6116-      for (auto p = s; *p; ++p)
6117-        ++length;
6118-      return length;
6119-    }
6120-
6121-    static_assert(strlen_c("") == 0UL, "");
6122-    static_assert(strlen_c("x") == 1UL, "");
6123-    static_assert(strlen_c("test") == 4UL, "");
6124-    static_assert(strlen_c("another\0test") == 7UL, "");
6125-
6126-  }
6127-
6128-  namespace test_lambda_init_capture
6129-  {
6130-
6131-    int
6132-    test()
6133-    {
6134-      auto x = 0;
6135-      const auto lambda1 = [a = x](int b){ return a + b; };
6136-      const auto lambda2 = [a = lambda1(x)](){ return a; };
6137-      return lambda2();
6138-    }
6139-
6140-  }
6141-
6142-  namespace test_digit_separators
6143-  {
6144-
6145-    constexpr auto ten_million = 100'000'000;
6146-    static_assert(ten_million == 100000000, "");
6147-
6148-  }
6149-
6150-  namespace test_return_type_deduction
6151-  {
6152-
6153-    auto f(int& x) { return x; }
6154-    decltype(auto) g(int& x) { return x; }
6155-
6156-    template < typename T1, typename T2 >
6157-    struct is_same
6158-    {
6159-      static constexpr auto value = false;
6160-    };
6161-
6162-    template < typename T >
6163-    struct is_same<T, T>
6164-    {
6165-      static constexpr auto value = true;
6166-    };
6167-
6168-    int
6169-    test()
6170-    {
6171-      auto x = 0;
6172-      static_assert(is_same<int, decltype(f(x))>::value, "");
6173-      static_assert(is_same<int&, decltype(g(x))>::value, "");
6174-      return x;
6175-    }
6176-
6177-  }
6178-
6179-}  // namespace cxx14
6180-
6181-#endif  // __cplusplus >= 201402L
6182-
6183-
6184-
6185-
6186-// If the compiler admits that it is not ready for C++17, why torture it?
6187-// Hopefully, this will speed up the test.
6188-
6189-#ifndef __cplusplus
6190-
6191-#error "This is not a C++ compiler"
6192-
6193-#elif __cplusplus < 201703L
6194-
6195-#error "This is not a C++17 compiler"
6196-
6197-#else
6198-
6199-#include <initializer_list>
6200-#include <utility>
6201-#include <type_traits>
6202-
6203-namespace cxx17
6204-{
6205-
6206-  namespace test_constexpr_lambdas
6207-  {
6208-
6209-    constexpr int foo = [](){return 42;}();
6210-
6211-  }
6212-
6213-  namespace test::nested_namespace::definitions
6214-  {
6215-
6216-  }
6217-
6218-  namespace test_fold_expression
6219-  {
6220-
6221-    template<typename... Args>
6222-    int multiply(Args... args)
6223-    {
6224-      return (args * ... * 1);
6225-    }
6226-
6227-    template<typename... Args>
6228-    bool all(Args... args)
6229-    {
6230-      return (args && ...);
6231-    }
6232-
6233-  }
6234-
6235-  namespace test_extended_static_assert
6236-  {
6237-
6238-    static_assert (true);
6239-
6240-  }
6241-
6242-  namespace test_auto_brace_init_list
6243-  {
6244-
6245-    auto foo = {5};
6246-    auto bar {5};
6247-
6248-    static_assert(std::is_same<std::initializer_list<int>, decltype(foo)>::value);
6249-    static_assert(std::is_same<int, decltype(bar)>::value);
6250-  }
6251-
6252-  namespace test_typename_in_template_template_parameter
6253-  {
6254-
6255-    template<template<typename> typename X> struct D;
6256-
6257-  }
6258-
6259-  namespace test_fallthrough_nodiscard_maybe_unused_attributes
6260-  {
6261-
6262-    int f1()
6263-    {
6264-      return 42;
6265-    }
6266-
6267-    [[nodiscard]] int f2()
6268-    {
6269-      [[maybe_unused]] auto unused = f1();
6270-
6271-      switch (f1())
6272-      {
6273-      case 17:
6274-        f1();
6275-        [[fallthrough]];
6276-      case 42:
6277-        f1();
6278-      }
6279-      return f1();
6280-    }
6281-
6282-  }
6283-
6284-  namespace test_extended_aggregate_initialization
6285-  {
6286-
6287-    struct base1
6288-    {
6289-      int b1, b2 = 42;
6290-    };
6291-
6292-    struct base2
6293-    {
6294-      base2() {
6295-        b3 = 42;
6296-      }
6297-      int b3;
6298-    };
6299-
6300-    struct derived : base1, base2
6301-    {
6302-        int d;
6303-    };
6304-
6305-    derived d1 {{1, 2}, {}, 4};  // full initialization
6306-    derived d2 {{}, {}, 4};      // value-initialized bases
6307-
6308-  }
6309-
6310-  namespace test_general_range_based_for_loop
6311-  {
6312-
6313-    struct iter
6314-    {
6315-      int i;
6316-
6317-      int& operator* ()
6318-      {
6319-        return i;
6320-      }
6321-
6322-      const int& operator* () const
6323-      {
6324-        return i;
6325-      }
6326-
6327-      iter& operator++()
6328-      {
6329-        ++i;
6330-        return *this;
6331-      }
6332-    };
6333-
6334-    struct sentinel
6335-    {
6336-      int i;
6337-    };
6338-
6339-    bool operator== (const iter& i, const sentinel& s)
6340-    {
6341-      return i.i == s.i;
6342-    }
6343-
6344-    bool operator!= (const iter& i, const sentinel& s)
6345-    {
6346-      return !(i == s);
6347-    }
6348-
6349-    struct range
6350-    {
6351-      iter begin() const
6352-      {
6353-        return {0};
6354-      }
6355-
6356-      sentinel end() const
6357-      {
6358-        return {5};
6359-      }
6360-    };
6361-
6362-    void f()
6363-    {
6364-      range r {};
6365-
6366-      for (auto i : r)
6367-      {
6368-        [[maybe_unused]] auto v = i;
6369-      }
6370-    }
6371-
6372-  }
6373-
6374-  namespace test_lambda_capture_asterisk_this_by_value
6375-  {
6376-
6377-    struct t
6378-    {
6379-      int i;
6380-      int foo()
6381-      {
6382-        return [*this]()
6383-        {
6384-          return i;
6385-        }();
6386-      }
6387-    };
6388-
6389-  }
6390-
6391-  namespace test_enum_class_construction
6392-  {
6393-
6394-    enum class byte : unsigned char
6395-    {};
6396-
6397-    byte foo {42};
6398-
6399-  }
6400-
6401-  namespace test_constexpr_if
6402-  {
6403-
6404-    template <bool cond>
6405-    int f ()
6406-    {
6407-      if constexpr(cond)
6408-      {
6409-        return 13;
6410-      }
6411-      else
6412-      {
6413-        return 42;
6414-      }
6415-    }
6416-
6417-  }
6418-
6419-  namespace test_selection_statement_with_initializer
6420-  {
6421-
6422-    int f()
6423-    {
6424-      return 13;
6425-    }
6426-
6427-    int f2()
6428-    {
6429-      if (auto i = f(); i > 0)
6430-      {
6431-        return 3;
6432-      }
6433-
6434-      switch (auto i = f(); i + 4)
6435-      {
6436-      case 17:
6437-        return 2;
6438-
6439-      default:
6440-        return 1;
6441-      }
6442-    }
6443-
6444-  }
6445-
6446-  namespace test_template_argument_deduction_for_class_templates
6447-  {
6448-
6449-    template <typename T1, typename T2>
6450-    struct pair
6451-    {
6452-      pair (T1 p1, T2 p2)
6453-        : m1 {p1},
6454-          m2 {p2}
6455-      {}
6456-
6457-      T1 m1;
6458-      T2 m2;
6459-    };
6460-
6461-    void f()
6462-    {
6463-      [[maybe_unused]] auto p = pair{13, 42u};
6464-    }
6465-
6466-  }
6467-
6468-  namespace test_non_type_auto_template_parameters
6469-  {
6470-
6471-    template <auto n>
6472-    struct B
6473-    {};
6474-
6475-    B<5> b1;
6476-    B<'a'> b2;
6477-
6478-  }
6479-
6480-  namespace test_structured_bindings
6481-  {
6482-
6483-    int arr[2] = { 1, 2 };
6484-    std::pair<int, int> pr = { 1, 2 };
6485-
6486-    auto f1() -> int(&)[2]
6487-    {
6488-      return arr;
6489-    }
6490-
6491-    auto f2() -> std::pair<int, int>&
6492-    {
6493-      return pr;
6494-    }
6495-
6496-    struct S
6497-    {
6498-      int x1 : 2;
6499-      volatile double y1;
6500-    };
6501-
6502-    S f3()
6503-    {
6504-      return {};
6505-    }
6506-
6507-    auto [ x1, y1 ] = f1();
6508-    auto& [ xr1, yr1 ] = f1();
6509-    auto [ x2, y2 ] = f2();
6510-    auto& [ xr2, yr2 ] = f2();
6511-    const auto [ x3, y3 ] = f3();
6512-
6513-  }
6514-
6515-  namespace test_exception_spec_type_system
6516-  {
6517-
6518-    struct Good {};
6519-    struct Bad {};
6520-
6521-    void g1() noexcept;
6522-    void g2();
6523-
6524-    template<typename T>
6525-    Bad
6526-    f(T*, T*);
6527-
6528-    template<typename T1, typename T2>
6529-    Good
6530-    f(T1*, T2*);
6531-
6532-    static_assert (std::is_same_v<Good, decltype(f(g1, g2))>);
6533-
6534-  }
6535-
6536-  namespace test_inline_variables
6537-  {
6538-
6539-    template<class T> void f(T)
6540-    {}
6541-
6542-    template<class T> inline T g(T)
6543-    {
6544-      return T{};
6545-    }
6546-
6547-    template<> inline void f<>(int)
6548-    {}
6549-
6550-    template<> int g<>(int)
6551-    {
6552-      return 5;
6553-    }
6554-
6555-  }
6556-
6557-}  // namespace cxx17
6558-
6559-#endif  // __cplusplus < 201703L
6560-
6561-
6562-
6563-_ACEOF
6564-if ac_fn_cxx_try_compile "$LINENO"; then :
6565-  eval $cachevar=yes
6566-else
6567-  eval $cachevar=no
6568-fi
6569-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
6570-           CXX="$ac_save_CXX"
6571-fi
6572-eval ac_res=\$$cachevar
6573-	       { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5
6574-$as_echo "$ac_res" >&6; }
6575-        if eval test x\$$cachevar = xyes; then
6576-          CXX="$CXX $switch"
6577-          if test -n "$CXXCPP" ; then
6578-            CXXCPP="$CXXCPP $switch"
6579-          fi
6580-          ac_success=yes
6581-          break
6582-        fi
6583-      done
6584-      if test x$ac_success = xyes; then
6585-        break
6586-      fi
6587-    done
6588-  fi
6589-  ac_ext=c
6590-ac_cpp='$CPP $CPPFLAGS'
6591-ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
6592-ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
6593-ac_compiler_gnu=$ac_cv_c_compiler_gnu
6594-
6595-  if test x$ax_cxx_compile_cxx17_required = xtrue; then
6596-    if test x$ac_success = xno; then
6597-      as_fn_error $? "*** A compiler with support for C++17 language features is required." "$LINENO" 5
6598-    fi
6599-  fi
6600-  if test x$ac_success = xno; then
6601-    HAVE_CXX17=0
6602-    { $as_echo "$as_me:${as_lineno-$LINENO}: No compiler with C++17 support was found" >&5
6603-$as_echo "$as_me: No compiler with C++17 support was found" >&6;}
6604-  else
6605-    HAVE_CXX17=1
6606-
6607-$as_echo "#define HAVE_CXX17 1" >>confdefs.h
6608-
6609-  fi
6610-
6611-
6612-  if test "x${HAVE_CXX17}" != "x1"; then
6613-      ax_cxx_compile_alternatives="14 1y"    ax_cxx_compile_cxx14_required=false
6614-  ac_ext=cpp
6615-ac_cpp='$CXXCPP $CPPFLAGS'
6616-ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5'
6617-ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
6618-ac_compiler_gnu=$ac_cv_cxx_compiler_gnu
6619-  ac_success=no
6620-
6621-
6622-
6623-    if test x$ac_success = xno; then
6624-                for alternative in ${ax_cxx_compile_alternatives}; do
6625-      for switch in -std=c++${alternative} +std=c++${alternative} "-h std=c++${alternative}"; do
6626-        cachevar=`$as_echo "ax_cv_cxx_compile_cxx14_$switch" | $as_tr_sh`
6627-        { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CXX supports C++14 features with $switch" >&5
6628-$as_echo_n "checking whether $CXX supports C++14 features with $switch... " >&6; }
6629-if eval \${$cachevar+:} false; then :
6630-  $as_echo_n "(cached) " >&6
6631-else
6632-  ac_save_CXX="$CXX"
6633-           CXX="$CXX $switch"
6634-           cat confdefs.h - <<_ACEOF >conftest.$ac_ext
6635-/* end confdefs.h.  */
6636-
6637-
6638-// If the compiler admits that it is not ready for C++11, why torture it?
6639-// Hopefully, this will speed up the test.
6640-
6641-#ifndef __cplusplus
6642-
6643-#error "This is not a C++ compiler"
6644-
6645-#elif __cplusplus < 201103L
6646-
6647-#error "This is not a C++11 compiler"
6648-
6649-#else
6650-
6651-namespace cxx11
6652-{
6653-
6654-  namespace test_static_assert
6655-  {
6656-
6657-    template <typename T>
6658-    struct check
6659-    {
6660-      static_assert(sizeof(int) <= sizeof(T), "not big enough");
6661-    };
6662-
6663-  }
6664-
6665-  namespace test_final_override
6666-  {
6667-
6668-    struct Base
6669-    {
6670-      virtual ~Base() {}
6671-      virtual void f() {}
6672-    };
6673-
6674-    struct Derived : public Base
6675-    {
6676-      virtual ~Derived() override {}
6677-      virtual void f() override {}
6678-    };
6679-
6680-  }
6681-
6682-  namespace test_double_right_angle_brackets
6683-  {
6684-
6685-    template < typename T >
6686-    struct check {};
6687-
6688-    typedef check<void> single_type;
6689-    typedef check<check<void>> double_type;
6690-    typedef check<check<check<void>>> triple_type;
6691-    typedef check<check<check<check<void>>>> quadruple_type;
6692-
6693-  }
6694-
6695-  namespace test_decltype
6696-  {
6697-
6698-    int
6699-    f()
6700-    {
6701-      int a = 1;
6702-      decltype(a) b = 2;
6703-      return a + b;
6704-    }
6705-
6706-  }
6707-
6708-  namespace test_type_deduction
6709-  {
6710-
6711-    template < typename T1, typename T2 >
6712-    struct is_same
6713-    {
6714-      static const bool value = false;
6715-    };
6716-
6717-    template < typename T >
6718-    struct is_same<T, T>
6719-    {
6720-      static const bool value = true;
6721-    };
6722-
6723-    template < typename T1, typename T2 >
6724-    auto
6725-    add(T1 a1, T2 a2) -> decltype(a1 + a2)
6726-    {
6727-      return a1 + a2;
6728-    }
6729-
6730-    int
6731-    test(const int c, volatile int v)
6732-    {
6733-      static_assert(is_same<int, decltype(0)>::value == true, "");
6734-      static_assert(is_same<int, decltype(c)>::value == false, "");
6735-      static_assert(is_same<int, decltype(v)>::value == false, "");
6736-      auto ac = c;
6737-      auto av = v;
6738-      auto sumi = ac + av + 'x';
6739-      auto sumf = ac + av + 1.0;
6740-      static_assert(is_same<int, decltype(ac)>::value == true, "");
6741-      static_assert(is_same<int, decltype(av)>::value == true, "");
6742-      static_assert(is_same<int, decltype(sumi)>::value == true, "");
6743-      static_assert(is_same<int, decltype(sumf)>::value == false, "");
6744-      static_assert(is_same<int, decltype(add(c, v))>::value == true, "");
6745-      return (sumf > 0.0) ? sumi : add(c, v);
6746-    }
6747-
6748-  }
6749-
6750-  namespace test_noexcept
6751-  {
6752-
6753-    int f() { return 0; }
6754-    int g() noexcept { return 0; }
6755-
6756-    static_assert(noexcept(f()) == false, "");
6757-    static_assert(noexcept(g()) == true, "");
6758-
6759-  }
6760-
6761-  namespace test_constexpr
6762-  {
6763-
6764-    template < typename CharT >
6765-    unsigned long constexpr
6766-    strlen_c_r(const CharT *const s, const unsigned long acc) noexcept
6767-    {
6768-      return *s ? strlen_c_r(s + 1, acc + 1) : acc;
6769-    }
6770-
6771-    template < typename CharT >
6772-    unsigned long constexpr
6773-    strlen_c(const CharT *const s) noexcept
6774-    {
6775-      return strlen_c_r(s, 0UL);
6776-    }
6777-
6778-    static_assert(strlen_c("") == 0UL, "");
6779-    static_assert(strlen_c("1") == 1UL, "");
6780-    static_assert(strlen_c("example") == 7UL, "");
6781-    static_assert(strlen_c("another\0example") == 7UL, "");
6782-
6783-  }
6784-
6785-  namespace test_rvalue_references
6786-  {
6787-
6788-    template < int N >
6789-    struct answer
6790-    {
6791-      static constexpr int value = N;
6792-    };
6793-
6794-    answer<1> f(int&)       { return answer<1>(); }
6795-    answer<2> f(const int&) { return answer<2>(); }
6796-    answer<3> f(int&&)      { return answer<3>(); }
6797-
6798-    void
6799-    test()
6800-    {
6801-      int i = 0;
6802-      const int c = 0;
6803-      static_assert(decltype(f(i))::value == 1, "");
6804-      static_assert(decltype(f(c))::value == 2, "");
6805-      static_assert(decltype(f(0))::value == 3, "");
6806-    }
6807-
6808-  }
6809-
6810-  namespace test_uniform_initialization
6811-  {
6812-
6813-    struct test
6814-    {
6815-      static const int zero {};
6816-      static const int one {1};
6817-    };
6818-
6819-    static_assert(test::zero == 0, "");
6820-    static_assert(test::one == 1, "");
6821-
6822-  }
6823-
6824-  namespace test_lambdas
6825-  {
6826-
6827-    void
6828-    test1()
6829-    {
6830-      auto lambda1 = [](){};
6831-      auto lambda2 = lambda1;
6832-      lambda1();
6833-      lambda2();
6834-    }
6835-
6836-    int
6837-    test2()
6838-    {
6839-      auto a = [](int i, int j){ return i + j; }(1, 2);
6840-      auto b = []() -> int { return '0'; }();
6841-      auto c = [=](){ return a + b; }();
6842-      auto d = [&](){ return c; }();
6843-      auto e = [a, &b](int x) mutable {
6844-        const auto identity = [](int y){ return y; };
6845-        for (auto i = 0; i < a; ++i)
6846-          a += b--;
6847-        return x + identity(a + b);
6848-      }(0);
6849-      return a + b + c + d + e;
6850-    }
6851-
6852-    int
6853-    test3()
6854-    {
6855-      const auto nullary = [](){ return 0; };
6856-      const auto unary = [](int x){ return x; };
6857-      using nullary_t = decltype(nullary);
6858-      using unary_t = decltype(unary);
6859-      const auto higher1st = [](nullary_t f){ return f(); };
6860-      const auto higher2nd = [unary](nullary_t f1){
6861-        return [unary, f1](unary_t f2){ return f2(unary(f1())); };
6862-      };
6863-      return higher1st(nullary) + higher2nd(nullary)(unary);
6864-    }
6865-
6866-  }
6867-
6868-  namespace test_variadic_templates
6869-  {
6870-
6871-    template <int...>
6872-    struct sum;
6873-
6874-    template <int N0, int... N1toN>
6875-    struct sum<N0, N1toN...>
6876-    {
6877-      static constexpr auto value = N0 + sum<N1toN...>::value;
6878-    };
6879-
6880-    template <>
6881-    struct sum<>
6882-    {
6883-      static constexpr auto value = 0;
6884-    };
6885-
6886-    static_assert(sum<>::value == 0, "");
6887-    static_assert(sum<1>::value == 1, "");
6888-    static_assert(sum<23>::value == 23, "");
6889-    static_assert(sum<1, 2>::value == 3, "");
6890-    static_assert(sum<5, 5, 11>::value == 21, "");
6891-    static_assert(sum<2, 3, 5, 7, 11, 13>::value == 41, "");
6892-
6893-  }
6894-
6895-  // http://stackoverflow.com/questions/13728184/template-aliases-and-sfinae
6896-  // Clang 3.1 fails with headers of libstd++ 4.8.3 when using std::function
6897-  // because of this.
6898-  namespace test_template_alias_sfinae
6899-  {
6900-
6901-    struct foo {};
6902-
6903-    template<typename T>
6904-    using member = typename T::member_type;
6905-
6906-    template<typename T>
6907-    void func(...) {}
6908-
6909-    template<typename T>
6910-    void func(member<T>*) {}
6911-
6912-    void test();
6913-
6914-    void test() { func<foo>(0); }
6915-
6916-  }
6917-
6918-}  // namespace cxx11
6919-
6920-#endif  // __cplusplus >= 201103L
6921-
6922-
6923-
6924-
6925-// If the compiler admits that it is not ready for C++14, why torture it?
6926-// Hopefully, this will speed up the test.
6927-
6928-#ifndef __cplusplus
6929-
6930-#error "This is not a C++ compiler"
6931-
6932-#elif __cplusplus < 201402L
6933-
6934-#error "This is not a C++14 compiler"
6935-
6936-#else
6937-
6938-namespace cxx14
6939-{
6940-
6941-  namespace test_polymorphic_lambdas
6942-  {
6943-
6944-    int
6945-    test()
6946-    {
6947-      const auto lambda = [](auto&&... args){
6948-        const auto istiny = [](auto x){
6949-          return (sizeof(x) == 1UL) ? 1 : 0;
6950-        };
6951-        const int aretiny[] = { istiny(args)... };
6952-        return aretiny[0];
6953-      };
6954-      return lambda(1, 1L, 1.0f, '1');
6955-    }
6956-
6957-  }
6958-
6959-  namespace test_binary_literals
6960-  {
6961-
6962-    constexpr auto ivii = 0b0000000000101010;
6963-    static_assert(ivii == 42, "wrong value");
6964-
6965-  }
6966-
6967-  namespace test_generalized_constexpr
6968-  {
6969-
6970-    template < typename CharT >
6971-    constexpr unsigned long
6972-    strlen_c(const CharT *const s) noexcept
6973-    {
6974-      auto length = 0UL;
6975-      for (auto p = s; *p; ++p)
6976-        ++length;
6977-      return length;
6978-    }
6979-
6980-    static_assert(strlen_c("") == 0UL, "");
6981-    static_assert(strlen_c("x") == 1UL, "");
6982-    static_assert(strlen_c("test") == 4UL, "");
6983-    static_assert(strlen_c("another\0test") == 7UL, "");
6984-
6985-  }
6986-
6987-  namespace test_lambda_init_capture
6988-  {
6989-
6990-    int
6991-    test()
6992-    {
6993-      auto x = 0;
6994-      const auto lambda1 = [a = x](int b){ return a + b; };
6995-      const auto lambda2 = [a = lambda1(x)](){ return a; };
6996-      return lambda2();
6997-    }
6998-
6999-  }
7000-
7001-  namespace test_digit_separators
7002-  {
7003-
7004-    constexpr auto ten_million = 100'000'000;
7005-    static_assert(ten_million == 100000000, "");
7006-
7007-  }
7008-
7009-  namespace test_return_type_deduction
7010-  {
7011-
7012-    auto f(int& x) { return x; }
7013-    decltype(auto) g(int& x) { return x; }
7014-
7015-    template < typename T1, typename T2 >
7016-    struct is_same
7017-    {
7018-      static constexpr auto value = false;
7019-    };
7020-
7021-    template < typename T >
7022-    struct is_same<T, T>
7023-    {
7024-      static constexpr auto value = true;
7025-    };
7026-
7027-    int
7028-    test()
7029-    {
7030-      auto x = 0;
7031-      static_assert(is_same<int, decltype(f(x))>::value, "");
7032-      static_assert(is_same<int&, decltype(g(x))>::value, "");
7033-      return x;
7034-    }
7035-
7036-  }
7037-
7038-}  // namespace cxx14
7039-
7040-#endif  // __cplusplus >= 201402L
7041-
7042-
7043-
7044-_ACEOF
7045-if ac_fn_cxx_try_compile "$LINENO"; then :
7046-  eval $cachevar=yes
7047-else
7048-  eval $cachevar=no
7049-fi
7050-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
7051-           CXX="$ac_save_CXX"
7052-fi
7053-eval ac_res=\$$cachevar
7054-	       { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5
7055-$as_echo "$ac_res" >&6; }
7056-        if eval test x\$$cachevar = xyes; then
7057-          CXX="$CXX $switch"
7058-          if test -n "$CXXCPP" ; then
7059-            CXXCPP="$CXXCPP $switch"
7060-          fi
7061-          ac_success=yes
7062-          break
7063-        fi
7064-      done
7065-      if test x$ac_success = xyes; then
7066-        break
7067-      fi
7068-    done
7069-  fi
7070-  ac_ext=c
7071-ac_cpp='$CPP $CPPFLAGS'
7072-ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
7073-ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
7074-ac_compiler_gnu=$ac_cv_c_compiler_gnu
7075-
7076-  if test x$ax_cxx_compile_cxx14_required = xtrue; then
7077-    if test x$ac_success = xno; then
7078-      as_fn_error $? "*** A compiler with support for C++14 language features is required." "$LINENO" 5
7079-    fi
7080-  fi
7081-  if test x$ac_success = xno; then
7082-    HAVE_CXX14=0
7083-    { $as_echo "$as_me:${as_lineno-$LINENO}: No compiler with C++14 support was found" >&5
7084-$as_echo "$as_me: No compiler with C++14 support was found" >&6;}
7085-  else
7086-    HAVE_CXX14=1
7087-
7088-$as_echo "#define HAVE_CXX14 1" >>confdefs.h
7089-
7090-  fi
7091-
7092-
7093-  fi
7094-  if test "x${HAVE_CXX14}" = "x1" -o "x${HAVE_CXX17}" = "x1"; then
7095-
7096-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -Wall" >&5
7097-$as_echo_n "checking whether compiler supports -Wall... " >&6; }
7098-T_CONFIGURE_CXXFLAGS="${CONFIGURE_CXXFLAGS}"
7099-T_APPEND_V=-Wall
7100-  if test "x${CONFIGURE_CXXFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then
7101-  CONFIGURE_CXXFLAGS="${CONFIGURE_CXXFLAGS}${T_APPEND_V}"
7102-else
7103-  CONFIGURE_CXXFLAGS="${CONFIGURE_CXXFLAGS} ${T_APPEND_V}"
7104-fi
7105-
7106-
7107-if test "x${CONFIGURE_CXXFLAGS}" = "x" -o "x${SPECIFIED_CXXFLAGS}" = "x" ; then
7108-  CXXFLAGS="${CONFIGURE_CXXFLAGS}${SPECIFIED_CXXFLAGS}"
7109-else
7110-  CXXFLAGS="${CONFIGURE_CXXFLAGS} ${SPECIFIED_CXXFLAGS}"
7111-fi
7112-
7113-ac_ext=cpp
7114-ac_cpp='$CXXCPP $CPPFLAGS'
7115-ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5'
7116-ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
7117-ac_compiler_gnu=$ac_cv_cxx_compiler_gnu
7118-
7119-cat confdefs.h - <<_ACEOF >conftest.$ac_ext
7120-/* end confdefs.h.  */
7121-
7122-
7123-int
7124-main ()
7125-{
7126-
7127-    return 0;
7128-
7129-  ;
7130-  return 0;
7131-}
7132-_ACEOF
7133-if ac_fn_cxx_try_compile "$LINENO"; then :
7134-  je_cv_cxxflags_added=-Wall
7135-              { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
7136-$as_echo "yes" >&6; }
7137-else
7138-  je_cv_cxxflags_added=
7139-              { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
7140-$as_echo "no" >&6; }
7141-              CONFIGURE_CXXFLAGS="${T_CONFIGURE_CXXFLAGS}"
7142-
7143-fi
7144-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
7145-ac_ext=c
7146-ac_cpp='$CPP $CPPFLAGS'
7147-ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
7148-ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
7149-ac_compiler_gnu=$ac_cv_c_compiler_gnu
7150-
7151-if test "x${CONFIGURE_CXXFLAGS}" = "x" -o "x${SPECIFIED_CXXFLAGS}" = "x" ; then
7152-  CXXFLAGS="${CONFIGURE_CXXFLAGS}${SPECIFIED_CXXFLAGS}"
7153-else
7154-  CXXFLAGS="${CONFIGURE_CXXFLAGS} ${SPECIFIED_CXXFLAGS}"
7155-fi
7156-
7157-
7158-
7159-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -Wextra" >&5
7160-$as_echo_n "checking whether compiler supports -Wextra... " >&6; }
7161-T_CONFIGURE_CXXFLAGS="${CONFIGURE_CXXFLAGS}"
7162-T_APPEND_V=-Wextra
7163-  if test "x${CONFIGURE_CXXFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then
7164-  CONFIGURE_CXXFLAGS="${CONFIGURE_CXXFLAGS}${T_APPEND_V}"
7165-else
7166-  CONFIGURE_CXXFLAGS="${CONFIGURE_CXXFLAGS} ${T_APPEND_V}"
7167-fi
7168-
7169-
7170-if test "x${CONFIGURE_CXXFLAGS}" = "x" -o "x${SPECIFIED_CXXFLAGS}" = "x" ; then
7171-  CXXFLAGS="${CONFIGURE_CXXFLAGS}${SPECIFIED_CXXFLAGS}"
7172-else
7173-  CXXFLAGS="${CONFIGURE_CXXFLAGS} ${SPECIFIED_CXXFLAGS}"
7174-fi
7175-
7176-ac_ext=cpp
7177-ac_cpp='$CXXCPP $CPPFLAGS'
7178-ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5'
7179-ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
7180-ac_compiler_gnu=$ac_cv_cxx_compiler_gnu
7181-
7182-cat confdefs.h - <<_ACEOF >conftest.$ac_ext
7183-/* end confdefs.h.  */
7184-
7185-
7186-int
7187-main ()
7188-{
7189-
7190-    return 0;
7191-
7192-  ;
7193-  return 0;
7194-}
7195-_ACEOF
7196-if ac_fn_cxx_try_compile "$LINENO"; then :
7197-  je_cv_cxxflags_added=-Wextra
7198-              { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
7199-$as_echo "yes" >&6; }
7200-else
7201-  je_cv_cxxflags_added=
7202-              { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
7203-$as_echo "no" >&6; }
7204-              CONFIGURE_CXXFLAGS="${T_CONFIGURE_CXXFLAGS}"
7205-
7206-fi
7207-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
7208-ac_ext=c
7209-ac_cpp='$CPP $CPPFLAGS'
7210-ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
7211-ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
7212-ac_compiler_gnu=$ac_cv_c_compiler_gnu
7213-
7214-if test "x${CONFIGURE_CXXFLAGS}" = "x" -o "x${SPECIFIED_CXXFLAGS}" = "x" ; then
7215-  CXXFLAGS="${CONFIGURE_CXXFLAGS}${SPECIFIED_CXXFLAGS}"
7216-else
7217-  CXXFLAGS="${CONFIGURE_CXXFLAGS} ${SPECIFIED_CXXFLAGS}"
7218-fi
7219-
7220-
7221-
7222-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -g3" >&5
7223-$as_echo_n "checking whether compiler supports -g3... " >&6; }
7224-T_CONFIGURE_CXXFLAGS="${CONFIGURE_CXXFLAGS}"
7225-T_APPEND_V=-g3
7226-  if test "x${CONFIGURE_CXXFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then
7227-  CONFIGURE_CXXFLAGS="${CONFIGURE_CXXFLAGS}${T_APPEND_V}"
7228-else
7229-  CONFIGURE_CXXFLAGS="${CONFIGURE_CXXFLAGS} ${T_APPEND_V}"
7230-fi
7231-
7232-
7233-if test "x${CONFIGURE_CXXFLAGS}" = "x" -o "x${SPECIFIED_CXXFLAGS}" = "x" ; then
7234-  CXXFLAGS="${CONFIGURE_CXXFLAGS}${SPECIFIED_CXXFLAGS}"
7235-else
7236-  CXXFLAGS="${CONFIGURE_CXXFLAGS} ${SPECIFIED_CXXFLAGS}"
7237-fi
7238-
7239-ac_ext=cpp
7240-ac_cpp='$CXXCPP $CPPFLAGS'
7241-ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5'
7242-ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
7243-ac_compiler_gnu=$ac_cv_cxx_compiler_gnu
7244-
7245-cat confdefs.h - <<_ACEOF >conftest.$ac_ext
7246-/* end confdefs.h.  */
7247-
7248-
7249-int
7250-main ()
7251-{
7252-
7253-    return 0;
7254-
7255-  ;
7256-  return 0;
7257-}
7258-_ACEOF
7259-if ac_fn_cxx_try_compile "$LINENO"; then :
7260-  je_cv_cxxflags_added=-g3
7261-              { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
7262-$as_echo "yes" >&6; }
7263-else
7264-  je_cv_cxxflags_added=
7265-              { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
7266-$as_echo "no" >&6; }
7267-              CONFIGURE_CXXFLAGS="${T_CONFIGURE_CXXFLAGS}"
7268-
7269-fi
7270-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
7271-ac_ext=c
7272-ac_cpp='$CPP $CPPFLAGS'
7273-ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
7274-ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
7275-ac_compiler_gnu=$ac_cv_c_compiler_gnu
7276-
7277-if test "x${CONFIGURE_CXXFLAGS}" = "x" -o "x${SPECIFIED_CXXFLAGS}" = "x" ; then
7278-  CXXFLAGS="${CONFIGURE_CXXFLAGS}${SPECIFIED_CXXFLAGS}"
7279-else
7280-  CXXFLAGS="${CONFIGURE_CXXFLAGS} ${SPECIFIED_CXXFLAGS}"
7281-fi
7282-
7283-
7284-
7285-    SAVED_LIBS="${LIBS}"
7286-    T_APPEND_V=-lstdc++
7287-  if test "x${LIBS}" = "x" -o "x${T_APPEND_V}" = "x" ; then
7288-  LIBS="${LIBS}${T_APPEND_V}"
7289-else
7290-  LIBS="${LIBS} ${T_APPEND_V}"
7291-fi
7292-
7293-
7294-
7295-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether libstdc++ linkage is compilable" >&5
7296-$as_echo_n "checking whether libstdc++ linkage is compilable... " >&6; }
7297-if ${je_cv_libstdcxx+:} false; then :
7298-  $as_echo_n "(cached) " >&6
7299-else
7300-  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
7301-/* end confdefs.h.  */
7302-
7303-#include <stdlib.h>
7304-
7305-int
7306-main ()
7307-{
7308-
7309-	int *arr = (int *)malloc(sizeof(int) * 42);
7310-	if (arr == NULL)
7311-		return 1;
7312-
7313-  ;
7314-  return 0;
7315-}
7316-_ACEOF
7317-if ac_fn_c_try_link "$LINENO"; then :
7318-  je_cv_libstdcxx=yes
7319-else
7320-  je_cv_libstdcxx=no
7321-fi
7322-rm -f core conftest.err conftest.$ac_objext \
7323-    conftest$ac_exeext conftest.$ac_ext
7324-fi
7325-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_libstdcxx" >&5
7326-$as_echo "$je_cv_libstdcxx" >&6; }
7327-
7328-    if test "x${je_cv_libstdcxx}" = "xno" ; then
7329-      LIBS="${SAVED_LIBS}"
7330-    fi
7331-  else
7332-    enable_cxx="0"
7333-  fi
7334-fi
7335-if test "x$enable_cxx" = "x1"; then
7336-
7337-$as_echo "#define JEMALLOC_ENABLE_CXX  " >>confdefs.h
7338-
7339-fi
7340-
7341-
7342-
7343-
7344-
7345-
7346-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for grep that handles long lines and -e" >&5
7347-$as_echo_n "checking for grep that handles long lines and -e... " >&6; }
7348-if ${ac_cv_path_GREP+:} false; then :
7349-  $as_echo_n "(cached) " >&6
7350-else
7351-  if test -z "$GREP"; then
7352-  ac_path_GREP_found=false
7353-  # Loop through the user's path and test for each of PROGNAME-LIST
7354-  as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
7355-for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin
7356-do
7357-  IFS=$as_save_IFS
7358-  test -z "$as_dir" && as_dir=.
7359-    for ac_prog in grep ggrep; do
7360-    for ac_exec_ext in '' $ac_executable_extensions; do
7361-      ac_path_GREP="$as_dir/$ac_prog$ac_exec_ext"
7362-      as_fn_executable_p "$ac_path_GREP" || continue
7363-# Check for GNU ac_path_GREP and select it if it is found.
7364-  # Check for GNU $ac_path_GREP
7365-case `"$ac_path_GREP" --version 2>&1` in
7366-*GNU*)
7367-  ac_cv_path_GREP="$ac_path_GREP" ac_path_GREP_found=:;;
7368-*)
7369-  ac_count=0
7370-  $as_echo_n 0123456789 >"conftest.in"
7371-  while :
7372-  do
7373-    cat "conftest.in" "conftest.in" >"conftest.tmp"
7374-    mv "conftest.tmp" "conftest.in"
7375-    cp "conftest.in" "conftest.nl"
7376-    $as_echo 'GREP' >> "conftest.nl"
7377-    "$ac_path_GREP" -e 'GREP$' -e '-(cannot match)-' < "conftest.nl" >"conftest.out" 2>/dev/null || break
7378-    diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break
7379-    as_fn_arith $ac_count + 1 && ac_count=$as_val
7380-    if test $ac_count -gt ${ac_path_GREP_max-0}; then
7381-      # Best one so far, save it but keep looking for a better one
7382-      ac_cv_path_GREP="$ac_path_GREP"
7383-      ac_path_GREP_max=$ac_count
7384-    fi
7385-    # 10*(2^10) chars as input seems more than enough
7386-    test $ac_count -gt 10 && break
7387-  done
7388-  rm -f conftest.in conftest.tmp conftest.nl conftest.out;;
7389-esac
7390-
7391-      $ac_path_GREP_found && break 3
7392-    done
7393-  done
7394-  done
7395-IFS=$as_save_IFS
7396-  if test -z "$ac_cv_path_GREP"; then
7397-    as_fn_error $? "no acceptable grep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" "$LINENO" 5
7398-  fi
7399-else
7400-  ac_cv_path_GREP=$GREP
7401-fi
7402-
7403-fi
7404-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_GREP" >&5
7405-$as_echo "$ac_cv_path_GREP" >&6; }
7406- GREP="$ac_cv_path_GREP"
7407-
7408-
7409-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for egrep" >&5
7410-$as_echo_n "checking for egrep... " >&6; }
7411-if ${ac_cv_path_EGREP+:} false; then :
7412-  $as_echo_n "(cached) " >&6
7413-else
7414-  if echo a | $GREP -E '(a|b)' >/dev/null 2>&1
7415-   then ac_cv_path_EGREP="$GREP -E"
7416-   else
7417-     if test -z "$EGREP"; then
7418-  ac_path_EGREP_found=false
7419-  # Loop through the user's path and test for each of PROGNAME-LIST
7420-  as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
7421-for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin
7422-do
7423-  IFS=$as_save_IFS
7424-  test -z "$as_dir" && as_dir=.
7425-    for ac_prog in egrep; do
7426-    for ac_exec_ext in '' $ac_executable_extensions; do
7427-      ac_path_EGREP="$as_dir/$ac_prog$ac_exec_ext"
7428-      as_fn_executable_p "$ac_path_EGREP" || continue
7429-# Check for GNU ac_path_EGREP and select it if it is found.
7430-  # Check for GNU $ac_path_EGREP
7431-case `"$ac_path_EGREP" --version 2>&1` in
7432-*GNU*)
7433-  ac_cv_path_EGREP="$ac_path_EGREP" ac_path_EGREP_found=:;;
7434-*)
7435-  ac_count=0
7436-  $as_echo_n 0123456789 >"conftest.in"
7437-  while :
7438-  do
7439-    cat "conftest.in" "conftest.in" >"conftest.tmp"
7440-    mv "conftest.tmp" "conftest.in"
7441-    cp "conftest.in" "conftest.nl"
7442-    $as_echo 'EGREP' >> "conftest.nl"
7443-    "$ac_path_EGREP" 'EGREP$' < "conftest.nl" >"conftest.out" 2>/dev/null || break
7444-    diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break
7445-    as_fn_arith $ac_count + 1 && ac_count=$as_val
7446-    if test $ac_count -gt ${ac_path_EGREP_max-0}; then
7447-      # Best one so far, save it but keep looking for a better one
7448-      ac_cv_path_EGREP="$ac_path_EGREP"
7449-      ac_path_EGREP_max=$ac_count
7450-    fi
7451-    # 10*(2^10) chars as input seems more than enough
7452-    test $ac_count -gt 10 && break
7453-  done
7454-  rm -f conftest.in conftest.tmp conftest.nl conftest.out;;
7455-esac
7456-
7457-      $ac_path_EGREP_found && break 3
7458-    done
7459-  done
7460-  done
7461-IFS=$as_save_IFS
7462-  if test -z "$ac_cv_path_EGREP"; then
7463-    as_fn_error $? "no acceptable egrep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" "$LINENO" 5
7464-  fi
7465-else
7466-  ac_cv_path_EGREP=$EGREP
7467-fi
7468-
7469-   fi
7470-fi
7471-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_EGREP" >&5
7472-$as_echo "$ac_cv_path_EGREP" >&6; }
7473- EGREP="$ac_cv_path_EGREP"
7474-
7475-
7476-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for ANSI C header files" >&5
7477-$as_echo_n "checking for ANSI C header files... " >&6; }
7478-if ${ac_cv_header_stdc+:} false; then :
7479-  $as_echo_n "(cached) " >&6
7480-else
7481-  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
7482-/* end confdefs.h.  */
7483-#include <stdlib.h>
7484-#include <stdarg.h>
7485-#include <string.h>
7486-#include <float.h>
7487-
7488-int
7489-main ()
7490-{
7491-
7492-  ;
7493-  return 0;
7494-}
7495-_ACEOF
7496-if ac_fn_c_try_compile "$LINENO"; then :
7497-  ac_cv_header_stdc=yes
7498-else
7499-  ac_cv_header_stdc=no
7500-fi
7501-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
7502-
7503-if test $ac_cv_header_stdc = yes; then
7504-  # SunOS 4.x string.h does not declare mem*, contrary to ANSI.
7505-  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
7506-/* end confdefs.h.  */
7507-#include <string.h>
7508-
7509-_ACEOF
7510-if (eval "$ac_cpp conftest.$ac_ext") 2>&5 |
7511-  $EGREP "memchr" >/dev/null 2>&1; then :
7512-
7513-else
7514-  ac_cv_header_stdc=no
7515-fi
7516-rm -f conftest*
7517-
7518-fi
7519-
7520-if test $ac_cv_header_stdc = yes; then
7521-  # ISC 2.0.2 stdlib.h does not declare free, contrary to ANSI.
7522-  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
7523-/* end confdefs.h.  */
7524-#include <stdlib.h>
7525-
7526-_ACEOF
7527-if (eval "$ac_cpp conftest.$ac_ext") 2>&5 |
7528-  $EGREP "free" >/dev/null 2>&1; then :
7529-
7530-else
7531-  ac_cv_header_stdc=no
7532-fi
7533-rm -f conftest*
7534-
7535-fi
7536-
7537-if test $ac_cv_header_stdc = yes; then
7538-  # /bin/cc in Irix-4.0.5 gets non-ANSI ctype macros unless using -ansi.
7539-  if test "$cross_compiling" = yes; then :
7540-  :
7541-else
7542-  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
7543-/* end confdefs.h.  */
7544-#include <ctype.h>
7545-#include <stdlib.h>
7546-#if ((' ' & 0x0FF) == 0x020)
7547-# define ISLOWER(c) ('a' <= (c) && (c) <= 'z')
7548-# define TOUPPER(c) (ISLOWER(c) ? 'A' + ((c) - 'a') : (c))
7549-#else
7550-# define ISLOWER(c) \
7551-		   (('a' <= (c) && (c) <= 'i') \
7552-		     || ('j' <= (c) && (c) <= 'r') \
7553-		     || ('s' <= (c) && (c) <= 'z'))
7554-# define TOUPPER(c) (ISLOWER(c) ? ((c) | 0x40) : (c))
7555-#endif
7556-
7557-#define XOR(e, f) (((e) && !(f)) || (!(e) && (f)))
7558-int
7559-main ()
7560-{
7561-  int i;
7562-  for (i = 0; i < 256; i++)
7563-    if (XOR (islower (i), ISLOWER (i))
7564-	|| toupper (i) != TOUPPER (i))
7565-      return 2;
7566-  return 0;
7567-}
7568-_ACEOF
7569-if ac_fn_c_try_run "$LINENO"; then :
7570-
7571-else
7572-  ac_cv_header_stdc=no
7573-fi
7574-rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \
7575-  conftest.$ac_objext conftest.beam conftest.$ac_ext
7576-fi
7577-
7578-fi
7579-fi
7580-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_header_stdc" >&5
7581-$as_echo "$ac_cv_header_stdc" >&6; }
7582-if test $ac_cv_header_stdc = yes; then
7583-
7584-$as_echo "#define STDC_HEADERS 1" >>confdefs.h
7585-
7586-fi
7587-
7588-# On IRIX 5.3, sys/types and inttypes.h are conflicting.
7589-for ac_header in sys/types.h sys/stat.h stdlib.h string.h memory.h strings.h \
7590-		  inttypes.h stdint.h unistd.h
7591-do :
7592-  as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh`
7593-ac_fn_c_check_header_compile "$LINENO" "$ac_header" "$as_ac_Header" "$ac_includes_default
7594-"
7595-if eval test \"x\$"$as_ac_Header"\" = x"yes"; then :
7596-  cat >>confdefs.h <<_ACEOF
7597-#define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1
7598-_ACEOF
7599-
7600-fi
7601-
7602-done
7603-
7604-
7605- { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether byte ordering is bigendian" >&5
7606-$as_echo_n "checking whether byte ordering is bigendian... " >&6; }
7607-if ${ac_cv_c_bigendian+:} false; then :
7608-  $as_echo_n "(cached) " >&6
7609-else
7610-  ac_cv_c_bigendian=unknown
7611-    # See if we're dealing with a universal compiler.
7612-    cat confdefs.h - <<_ACEOF >conftest.$ac_ext
7613-/* end confdefs.h.  */
7614-#ifndef __APPLE_CC__
7615-	       not a universal capable compiler
7616-	     #endif
7617-	     typedef int dummy;
7618-
7619-_ACEOF
7620-if ac_fn_c_try_compile "$LINENO"; then :
7621-
7622-	# Check for potential -arch flags.  It is not universal unless
7623-	# there are at least two -arch flags with different values.
7624-	ac_arch=
7625-	ac_prev=
7626-	for ac_word in $CC $CFLAGS $CPPFLAGS $LDFLAGS; do
7627-	 if test -n "$ac_prev"; then
7628-	   case $ac_word in
7629-	     i?86 | x86_64 | ppc | ppc64)
7630-	       if test -z "$ac_arch" || test "$ac_arch" = "$ac_word"; then
7631-		 ac_arch=$ac_word
7632-	       else
7633-		 ac_cv_c_bigendian=universal
7634-		 break
7635-	       fi
7636-	       ;;
7637-	   esac
7638-	   ac_prev=
7639-	 elif test "x$ac_word" = "x-arch"; then
7640-	   ac_prev=arch
7641-	 fi
7642-       done
7643-fi
7644-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
7645-    if test $ac_cv_c_bigendian = unknown; then
7646-      # See if sys/param.h defines the BYTE_ORDER macro.
7647-      cat confdefs.h - <<_ACEOF >conftest.$ac_ext
7648-/* end confdefs.h.  */
7649-#include <sys/types.h>
7650-	     #include <sys/param.h>
7651-
7652-int
7653-main ()
7654-{
7655-#if ! (defined BYTE_ORDER && defined BIG_ENDIAN \
7656-		     && defined LITTLE_ENDIAN && BYTE_ORDER && BIG_ENDIAN \
7657-		     && LITTLE_ENDIAN)
7658-	      bogus endian macros
7659-	     #endif
7660-
7661-  ;
7662-  return 0;
7663-}
7664-_ACEOF
7665-if ac_fn_c_try_compile "$LINENO"; then :
7666-  # It does; now see whether it defined to BIG_ENDIAN or not.
7667-	 cat confdefs.h - <<_ACEOF >conftest.$ac_ext
7668-/* end confdefs.h.  */
7669-#include <sys/types.h>
7670-		#include <sys/param.h>
7671-
7672-int
7673-main ()
7674-{
7675-#if BYTE_ORDER != BIG_ENDIAN
7676-		 not big endian
7677-		#endif
7678-
7679-  ;
7680-  return 0;
7681-}
7682-_ACEOF
7683-if ac_fn_c_try_compile "$LINENO"; then :
7684-  ac_cv_c_bigendian=yes
7685-else
7686-  ac_cv_c_bigendian=no
7687-fi
7688-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
7689-fi
7690-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
7691-    fi
7692-    if test $ac_cv_c_bigendian = unknown; then
7693-      # See if <limits.h> defines _LITTLE_ENDIAN or _BIG_ENDIAN (e.g., Solaris).
7694-      cat confdefs.h - <<_ACEOF >conftest.$ac_ext
7695-/* end confdefs.h.  */
7696-#include <limits.h>
7697-
7698-int
7699-main ()
7700-{
7701-#if ! (defined _LITTLE_ENDIAN || defined _BIG_ENDIAN)
7702-	      bogus endian macros
7703-	     #endif
7704-
7705-  ;
7706-  return 0;
7707-}
7708-_ACEOF
7709-if ac_fn_c_try_compile "$LINENO"; then :
7710-  # It does; now see whether it defined to _BIG_ENDIAN or not.
7711-	 cat confdefs.h - <<_ACEOF >conftest.$ac_ext
7712-/* end confdefs.h.  */
7713-#include <limits.h>
7714-
7715-int
7716-main ()
7717-{
7718-#ifndef _BIG_ENDIAN
7719-		 not big endian
7720-		#endif
7721-
7722-  ;
7723-  return 0;
7724-}
7725-_ACEOF
7726-if ac_fn_c_try_compile "$LINENO"; then :
7727-  ac_cv_c_bigendian=yes
7728-else
7729-  ac_cv_c_bigendian=no
7730-fi
7731-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
7732-fi
7733-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
7734-    fi
7735-    if test $ac_cv_c_bigendian = unknown; then
7736-      # Compile a test program.
7737-      if test "$cross_compiling" = yes; then :
7738-  # Try to guess by grepping values from an object file.
7739-	 cat confdefs.h - <<_ACEOF >conftest.$ac_ext
7740-/* end confdefs.h.  */
7741-short int ascii_mm[] =
7742-		  { 0x4249, 0x4765, 0x6E44, 0x6961, 0x6E53, 0x7953, 0 };
7743-		short int ascii_ii[] =
7744-		  { 0x694C, 0x5454, 0x656C, 0x6E45, 0x6944, 0x6E61, 0 };
7745-		int use_ascii (int i) {
7746-		  return ascii_mm[i] + ascii_ii[i];
7747-		}
7748-		short int ebcdic_ii[] =
7749-		  { 0x89D3, 0xE3E3, 0x8593, 0x95C5, 0x89C4, 0x9581, 0 };
7750-		short int ebcdic_mm[] =
7751-		  { 0xC2C9, 0xC785, 0x95C4, 0x8981, 0x95E2, 0xA8E2, 0 };
7752-		int use_ebcdic (int i) {
7753-		  return ebcdic_mm[i] + ebcdic_ii[i];
7754-		}
7755-		extern int foo;
7756-
7757-int
7758-main ()
7759-{
7760-return use_ascii (foo) == use_ebcdic (foo);
7761-  ;
7762-  return 0;
7763-}
7764-_ACEOF
7765-if ac_fn_c_try_compile "$LINENO"; then :
7766-  if grep BIGenDianSyS conftest.$ac_objext >/dev/null; then
7767-	      ac_cv_c_bigendian=yes
7768-	    fi
7769-	    if grep LiTTleEnDian conftest.$ac_objext >/dev/null ; then
7770-	      if test "$ac_cv_c_bigendian" = unknown; then
7771-		ac_cv_c_bigendian=no
7772-	      else
7773-		# finding both strings is unlikely to happen, but who knows?
7774-		ac_cv_c_bigendian=unknown
7775-	      fi
7776-	    fi
7777-fi
7778-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
7779-else
7780-  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
7781-/* end confdefs.h.  */
7782-$ac_includes_default
7783-int
7784-main ()
7785-{
7786-
7787-	     /* Are we little or big endian?  From Harbison&Steele.  */
7788-	     union
7789-	     {
7790-	       long int l;
7791-	       char c[sizeof (long int)];
7792-	     } u;
7793-	     u.l = 1;
7794-	     return u.c[sizeof (long int) - 1] == 1;
7795-
7796-  ;
7797-  return 0;
7798-}
7799-_ACEOF
7800-if ac_fn_c_try_run "$LINENO"; then :
7801-  ac_cv_c_bigendian=no
7802-else
7803-  ac_cv_c_bigendian=yes
7804-fi
7805-rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \
7806-  conftest.$ac_objext conftest.beam conftest.$ac_ext
7807-fi
7808-
7809-    fi
7810-fi
7811-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_bigendian" >&5
7812-$as_echo "$ac_cv_c_bigendian" >&6; }
7813- case $ac_cv_c_bigendian in #(
7814-   yes)
7815-     ac_cv_big_endian=1;; #(
7816-   no)
7817-     ac_cv_big_endian=0 ;; #(
7818-   universal)
7819-
7820-$as_echo "#define AC_APPLE_UNIVERSAL_BUILD 1" >>confdefs.h
7821-
7822-     ;; #(
7823-   *)
7824-     as_fn_error $? "unknown endianness
7825- presetting ac_cv_c_bigendian=no (or yes) will help" "$LINENO" 5 ;;
7826- esac
7827-
7828-if test "x${ac_cv_big_endian}" = "x1" ; then
7829-
7830-cat >>confdefs.h <<_ACEOF
7831-#define JEMALLOC_BIG_ENDIAN
7832-_ACEOF
7833-
7834-fi
7835-
7836-if test "x${je_cv_msvc}" = "xyes" -a "x${ac_cv_header_inttypes_h}" = "xno"; then
7837-  T_APPEND_V=-I${srcdir}/include/msvc_compat/C99
7838-  if test "x${CPPFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then
7839-  CPPFLAGS="${CPPFLAGS}${T_APPEND_V}"
7840-else
7841-  CPPFLAGS="${CPPFLAGS} ${T_APPEND_V}"
7842-fi
7843-
7844-
7845-fi
7846-
7847-if test "x${je_cv_msvc}" = "xyes" ; then
7848-  LG_SIZEOF_PTR=LG_SIZEOF_PTR_WIN
7849-  { $as_echo "$as_me:${as_lineno-$LINENO}: result: Using a predefined value for sizeof(void *): 4 for 32-bit, 8 for 64-bit" >&5
7850-$as_echo "Using a predefined value for sizeof(void *): 4 for 32-bit, 8 for 64-bit" >&6; }
7851-else
7852-  # The cast to long int works around a bug in the HP C Compiler
7853-# version HP92453-01 B.11.11.23709.GP, which incorrectly rejects
7854-# declarations like `int a3[[(sizeof (unsigned char)) >= 0]];'.
7855-# This bug is HP SR number 8606223364.
7856-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking size of void *" >&5
7857-$as_echo_n "checking size of void *... " >&6; }
7858-if ${ac_cv_sizeof_void_p+:} false; then :
7859-  $as_echo_n "(cached) " >&6
7860-else
7861-  if ac_fn_c_compute_int "$LINENO" "(long int) (sizeof (void *))" "ac_cv_sizeof_void_p"        "$ac_includes_default"; then :
7862-
7863-else
7864-  if test "$ac_cv_type_void_p" = yes; then
7865-     { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
7866-$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
7867-as_fn_error 77 "cannot compute sizeof (void *)
7868-See \`config.log' for more details" "$LINENO" 5; }
7869-   else
7870-     ac_cv_sizeof_void_p=0
7871-   fi
7872-fi
7873-
7874-fi
7875-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_sizeof_void_p" >&5
7876-$as_echo "$ac_cv_sizeof_void_p" >&6; }
7877-
7878-
7879-
7880-cat >>confdefs.h <<_ACEOF
7881-#define SIZEOF_VOID_P $ac_cv_sizeof_void_p
7882-_ACEOF
7883-
7884-
7885-  if test "x${ac_cv_sizeof_void_p}" = "x8" ; then
7886-    LG_SIZEOF_PTR=3
7887-  elif test "x${ac_cv_sizeof_void_p}" = "x4" ; then
7888-    LG_SIZEOF_PTR=2
7889-  else
7890-    as_fn_error $? "Unsupported pointer size: ${ac_cv_sizeof_void_p}" "$LINENO" 5
7891-  fi
7892-fi
7893-
7894-cat >>confdefs.h <<_ACEOF
7895-#define LG_SIZEOF_PTR $LG_SIZEOF_PTR
7896-_ACEOF
7897-
7898-
7899-# The cast to long int works around a bug in the HP C Compiler
7900-# version HP92453-01 B.11.11.23709.GP, which incorrectly rejects
7901-# declarations like `int a3[[(sizeof (unsigned char)) >= 0]];'.
7902-# This bug is HP SR number 8606223364.
7903-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking size of int" >&5
7904-$as_echo_n "checking size of int... " >&6; }
7905-if ${ac_cv_sizeof_int+:} false; then :
7906-  $as_echo_n "(cached) " >&6
7907-else
7908-  if ac_fn_c_compute_int "$LINENO" "(long int) (sizeof (int))" "ac_cv_sizeof_int"        "$ac_includes_default"; then :
7909-
7910-else
7911-  if test "$ac_cv_type_int" = yes; then
7912-     { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
7913-$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
7914-as_fn_error 77 "cannot compute sizeof (int)
7915-See \`config.log' for more details" "$LINENO" 5; }
7916-   else
7917-     ac_cv_sizeof_int=0
7918-   fi
7919-fi
7920-
7921-fi
7922-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_sizeof_int" >&5
7923-$as_echo "$ac_cv_sizeof_int" >&6; }
7924-
7925-
7926-
7927-cat >>confdefs.h <<_ACEOF
7928-#define SIZEOF_INT $ac_cv_sizeof_int
7929-_ACEOF
7930-
7931-
7932-if test "x${ac_cv_sizeof_int}" = "x8" ; then
7933-  LG_SIZEOF_INT=3
7934-elif test "x${ac_cv_sizeof_int}" = "x4" ; then
7935-  LG_SIZEOF_INT=2
7936-else
7937-  as_fn_error $? "Unsupported int size: ${ac_cv_sizeof_int}" "$LINENO" 5
7938-fi
7939-
7940-cat >>confdefs.h <<_ACEOF
7941-#define LG_SIZEOF_INT $LG_SIZEOF_INT
7942-_ACEOF
7943-
7944-
7945-# The cast to long int works around a bug in the HP C Compiler
7946-# version HP92453-01 B.11.11.23709.GP, which incorrectly rejects
7947-# declarations like `int a3[[(sizeof (unsigned char)) >= 0]];'.
7948-# This bug is HP SR number 8606223364.
7949-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking size of long" >&5
7950-$as_echo_n "checking size of long... " >&6; }
7951-if ${ac_cv_sizeof_long+:} false; then :
7952-  $as_echo_n "(cached) " >&6
7953-else
7954-  if ac_fn_c_compute_int "$LINENO" "(long int) (sizeof (long))" "ac_cv_sizeof_long"        "$ac_includes_default"; then :
7955-
7956-else
7957-  if test "$ac_cv_type_long" = yes; then
7958-     { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
7959-$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
7960-as_fn_error 77 "cannot compute sizeof (long)
7961-See \`config.log' for more details" "$LINENO" 5; }
7962-   else
7963-     ac_cv_sizeof_long=0
7964-   fi
7965-fi
7966-
7967-fi
7968-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_sizeof_long" >&5
7969-$as_echo "$ac_cv_sizeof_long" >&6; }
7970-
7971-
7972-
7973-cat >>confdefs.h <<_ACEOF
7974-#define SIZEOF_LONG $ac_cv_sizeof_long
7975-_ACEOF
7976-
7977-
7978-if test "x${ac_cv_sizeof_long}" = "x8" ; then
7979-  LG_SIZEOF_LONG=3
7980-elif test "x${ac_cv_sizeof_long}" = "x4" ; then
7981-  LG_SIZEOF_LONG=2
7982-else
7983-  as_fn_error $? "Unsupported long size: ${ac_cv_sizeof_long}" "$LINENO" 5
7984-fi
7985-
7986-cat >>confdefs.h <<_ACEOF
7987-#define LG_SIZEOF_LONG $LG_SIZEOF_LONG
7988-_ACEOF
7989-
7990-
7991-# The cast to long int works around a bug in the HP C Compiler
7992-# version HP92453-01 B.11.11.23709.GP, which incorrectly rejects
7993-# declarations like `int a3[[(sizeof (unsigned char)) >= 0]];'.
7994-# This bug is HP SR number 8606223364.
7995-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking size of long long" >&5
7996-$as_echo_n "checking size of long long... " >&6; }
7997-if ${ac_cv_sizeof_long_long+:} false; then :
7998-  $as_echo_n "(cached) " >&6
7999-else
8000-  if ac_fn_c_compute_int "$LINENO" "(long int) (sizeof (long long))" "ac_cv_sizeof_long_long"        "$ac_includes_default"; then :
8001-
8002-else
8003-  if test "$ac_cv_type_long_long" = yes; then
8004-     { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
8005-$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
8006-as_fn_error 77 "cannot compute sizeof (long long)
8007-See \`config.log' for more details" "$LINENO" 5; }
8008-   else
8009-     ac_cv_sizeof_long_long=0
8010-   fi
8011-fi
8012-
8013-fi
8014-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_sizeof_long_long" >&5
8015-$as_echo "$ac_cv_sizeof_long_long" >&6; }
8016-
8017-
8018-
8019-cat >>confdefs.h <<_ACEOF
8020-#define SIZEOF_LONG_LONG $ac_cv_sizeof_long_long
8021-_ACEOF
8022-
8023-
8024-if test "x${ac_cv_sizeof_long_long}" = "x8" ; then
8025-  LG_SIZEOF_LONG_LONG=3
8026-elif test "x${ac_cv_sizeof_long_long}" = "x4" ; then
8027-  LG_SIZEOF_LONG_LONG=2
8028-else
8029-  as_fn_error $? "Unsupported long long size: ${ac_cv_sizeof_long_long}" "$LINENO" 5
8030-fi
8031-
8032-cat >>confdefs.h <<_ACEOF
8033-#define LG_SIZEOF_LONG_LONG $LG_SIZEOF_LONG_LONG
8034-_ACEOF
8035-
8036-
8037-# The cast to long int works around a bug in the HP C Compiler
8038-# version HP92453-01 B.11.11.23709.GP, which incorrectly rejects
8039-# declarations like `int a3[[(sizeof (unsigned char)) >= 0]];'.
8040-# This bug is HP SR number 8606223364.
8041-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking size of intmax_t" >&5
8042-$as_echo_n "checking size of intmax_t... " >&6; }
8043-if ${ac_cv_sizeof_intmax_t+:} false; then :
8044-  $as_echo_n "(cached) " >&6
8045-else
8046-  if ac_fn_c_compute_int "$LINENO" "(long int) (sizeof (intmax_t))" "ac_cv_sizeof_intmax_t"        "$ac_includes_default"; then :
8047-
8048-else
8049-  if test "$ac_cv_type_intmax_t" = yes; then
8050-     { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
8051-$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
8052-as_fn_error 77 "cannot compute sizeof (intmax_t)
8053-See \`config.log' for more details" "$LINENO" 5; }
8054-   else
8055-     ac_cv_sizeof_intmax_t=0
8056-   fi
8057-fi
8058-
8059-fi
8060-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_sizeof_intmax_t" >&5
8061-$as_echo "$ac_cv_sizeof_intmax_t" >&6; }
8062-
8063-
8064-
8065-cat >>confdefs.h <<_ACEOF
8066-#define SIZEOF_INTMAX_T $ac_cv_sizeof_intmax_t
8067-_ACEOF
8068-
8069-
8070-if test "x${ac_cv_sizeof_intmax_t}" = "x16" ; then
8071-  LG_SIZEOF_INTMAX_T=4
8072-elif test "x${ac_cv_sizeof_intmax_t}" = "x8" ; then
8073-  LG_SIZEOF_INTMAX_T=3
8074-elif test "x${ac_cv_sizeof_intmax_t}" = "x4" ; then
8075-  LG_SIZEOF_INTMAX_T=2
8076-else
8077-  as_fn_error $? "Unsupported intmax_t size: ${ac_cv_sizeof_intmax_t}" "$LINENO" 5
8078-fi
8079-
8080-cat >>confdefs.h <<_ACEOF
8081-#define LG_SIZEOF_INTMAX_T $LG_SIZEOF_INTMAX_T
8082-_ACEOF
8083-
8084-
8085-# Make sure we can run config.sub.
8086-$SHELL "$ac_aux_dir/config.sub" sun4 >/dev/null 2>&1 ||
8087-  as_fn_error $? "cannot run $SHELL $ac_aux_dir/config.sub" "$LINENO" 5
8088-
8089-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking build system type" >&5
8090-$as_echo_n "checking build system type... " >&6; }
8091-if ${ac_cv_build+:} false; then :
8092-  $as_echo_n "(cached) " >&6
8093-else
8094-  ac_build_alias=$build_alias
8095-test "x$ac_build_alias" = x &&
8096-  ac_build_alias=`$SHELL "$ac_aux_dir/config.guess"`
8097-test "x$ac_build_alias" = x &&
8098-  as_fn_error $? "cannot guess build type; you must specify one" "$LINENO" 5
8099-ac_cv_build=`$SHELL "$ac_aux_dir/config.sub" $ac_build_alias` ||
8100-  as_fn_error $? "$SHELL $ac_aux_dir/config.sub $ac_build_alias failed" "$LINENO" 5
8101-
8102-fi
8103-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_build" >&5
8104-$as_echo "$ac_cv_build" >&6; }
8105-case $ac_cv_build in
8106-*-*-*) ;;
8107-*) as_fn_error $? "invalid value of canonical build" "$LINENO" 5;;
8108-esac
8109-build=$ac_cv_build
8110-ac_save_IFS=$IFS; IFS='-'
8111-set x $ac_cv_build
8112-shift
8113-build_cpu=$1
8114-build_vendor=$2
8115-shift; shift
8116-# Remember, the first character of IFS is used to create $*,
8117-# except with old shells:
8118-build_os=$*
8119-IFS=$ac_save_IFS
8120-case $build_os in *\ *) build_os=`echo "$build_os" | sed 's/ /-/g'`;; esac
8121-
8122-
8123-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking host system type" >&5
8124-$as_echo_n "checking host system type... " >&6; }
8125-if ${ac_cv_host+:} false; then :
8126-  $as_echo_n "(cached) " >&6
8127-else
8128-  if test "x$host_alias" = x; then
8129-  ac_cv_host=$ac_cv_build
8130-else
8131-  ac_cv_host=`$SHELL "$ac_aux_dir/config.sub" $host_alias` ||
8132-    as_fn_error $? "$SHELL $ac_aux_dir/config.sub $host_alias failed" "$LINENO" 5
8133-fi
8134-
8135-fi
8136-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_host" >&5
8137-$as_echo "$ac_cv_host" >&6; }
8138-case $ac_cv_host in
8139-*-*-*) ;;
8140-*) as_fn_error $? "invalid value of canonical host" "$LINENO" 5;;
8141-esac
8142-host=$ac_cv_host
8143-ac_save_IFS=$IFS; IFS='-'
8144-set x $ac_cv_host
8145-shift
8146-host_cpu=$1
8147-host_vendor=$2
8148-shift; shift
8149-# Remember, the first character of IFS is used to create $*,
8150-# except with old shells:
8151-host_os=$*
8152-IFS=$ac_save_IFS
8153-case $host_os in *\ *) host_os=`echo "$host_os" | sed 's/ /-/g'`;; esac
8154-
8155-
8156-CPU_SPINWAIT=""
8157-case "${host_cpu}" in
8158-  i686|x86_64)
8159-	HAVE_CPU_SPINWAIT=1
8160-	if test "x${je_cv_msvc}" = "xyes" ; then
8161-	    if ${je_cv_pause_msvc+:} false; then :
8162-  $as_echo_n "(cached) " >&6
8163-else
8164-
8165-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether pause instruction MSVC is compilable" >&5
8166-$as_echo_n "checking whether pause instruction MSVC is compilable... " >&6; }
8167-if ${je_cv_pause_msvc+:} false; then :
8168-  $as_echo_n "(cached) " >&6
8169-else
8170-  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
8171-/* end confdefs.h.  */
8172-
8173-int
8174-main ()
8175-{
8176-_mm_pause(); return 0;
8177-  ;
8178-  return 0;
8179-}
8180-_ACEOF
8181-if ac_fn_c_try_link "$LINENO"; then :
8182-  je_cv_pause_msvc=yes
8183-else
8184-  je_cv_pause_msvc=no
8185-fi
8186-rm -f core conftest.err conftest.$ac_objext \
8187-    conftest$ac_exeext conftest.$ac_ext
8188-fi
8189-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_pause_msvc" >&5
8190-$as_echo "$je_cv_pause_msvc" >&6; }
8191-
8192-fi
8193-
8194-	    if test "x${je_cv_pause_msvc}" = "xyes" ; then
8195-		CPU_SPINWAIT='_mm_pause()'
8196-	    fi
8197-	else
8198-	    if ${je_cv_pause+:} false; then :
8199-  $as_echo_n "(cached) " >&6
8200-else
8201-
8202-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether pause instruction is compilable" >&5
8203-$as_echo_n "checking whether pause instruction is compilable... " >&6; }
8204-if ${je_cv_pause+:} false; then :
8205-  $as_echo_n "(cached) " >&6
8206-else
8207-  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
8208-/* end confdefs.h.  */
8209-
8210-int
8211-main ()
8212-{
8213-__asm__ volatile("pause"); return 0;
8214-  ;
8215-  return 0;
8216-}
8217-_ACEOF
8218-if ac_fn_c_try_link "$LINENO"; then :
8219-  je_cv_pause=yes
8220-else
8221-  je_cv_pause=no
8222-fi
8223-rm -f core conftest.err conftest.$ac_objext \
8224-    conftest$ac_exeext conftest.$ac_ext
8225-fi
8226-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_pause" >&5
8227-$as_echo "$je_cv_pause" >&6; }
8228-
8229-fi
8230-
8231-	    if test "x${je_cv_pause}" = "xyes" ; then
8232-		CPU_SPINWAIT='__asm__ volatile("pause")'
8233-	    fi
8234-	fi
8235-	;;
8236-  aarch64|arm*)
8237-	HAVE_CPU_SPINWAIT=1
8238-		if ${je_cv_isb+:} false; then :
8239-  $as_echo_n "(cached) " >&6
8240-else
8241-
8242-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether isb instruction is compilable" >&5
8243-$as_echo_n "checking whether isb instruction is compilable... " >&6; }
8244-if ${je_cv_isb+:} false; then :
8245-  $as_echo_n "(cached) " >&6
8246-else
8247-  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
8248-/* end confdefs.h.  */
8249-
8250-int
8251-main ()
8252-{
8253-__asm__ volatile("isb"); return 0;
8254-  ;
8255-  return 0;
8256-}
8257-_ACEOF
8258-if ac_fn_c_try_link "$LINENO"; then :
8259-  je_cv_isb=yes
8260-else
8261-  je_cv_isb=no
8262-fi
8263-rm -f core conftest.err conftest.$ac_objext \
8264-    conftest$ac_exeext conftest.$ac_ext
8265-fi
8266-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_isb" >&5
8267-$as_echo "$je_cv_isb" >&6; }
8268-
8269-fi
8270-
8271-	if test "x${je_cv_isb}" = "xyes" ; then
8272-	    CPU_SPINWAIT='__asm__ volatile("isb")'
8273-	fi
8274-	;;
8275-  *)
8276-	HAVE_CPU_SPINWAIT=0
8277-	;;
8278-esac
8279-
8280-cat >>confdefs.h <<_ACEOF
8281-#define HAVE_CPU_SPINWAIT $HAVE_CPU_SPINWAIT
8282-_ACEOF
8283-
8284-
8285-cat >>confdefs.h <<_ACEOF
8286-#define CPU_SPINWAIT $CPU_SPINWAIT
8287-_ACEOF
8288-
8289-
8290-
8291-# Check whether --with-lg_vaddr was given.
8292-if test "${with_lg_vaddr+set}" = set; then :
8293-  withval=$with_lg_vaddr; LG_VADDR="$with_lg_vaddr"
8294-else
8295-  LG_VADDR="detect"
8296-fi
8297-
8298-
8299-case "${host_cpu}" in
8300-  aarch64)
8301-    if test "x$LG_VADDR" = "xdetect"; then
8302-      { $as_echo "$as_me:${as_lineno-$LINENO}: checking number of significant virtual address bits" >&5
8303-$as_echo_n "checking number of significant virtual address bits... " >&6; }
8304-      if test "x${LG_SIZEOF_PTR}" = "x2" ; then
8305-        #aarch64 ILP32
8306-        LG_VADDR=32
8307-      else
8308-        #aarch64 LP64
8309-        LG_VADDR=48
8310-      fi
8311-      { $as_echo "$as_me:${as_lineno-$LINENO}: result: $LG_VADDR" >&5
8312-$as_echo "$LG_VADDR" >&6; }
8313-    fi
8314-    ;;
8315-  x86_64)
8316-    if test "x$LG_VADDR" = "xdetect"; then
8317-      { $as_echo "$as_me:${as_lineno-$LINENO}: checking number of significant virtual address bits" >&5
8318-$as_echo_n "checking number of significant virtual address bits... " >&6; }
8319-if ${je_cv_lg_vaddr+:} false; then :
8320-  $as_echo_n "(cached) " >&6
8321-else
8322-  if test "$cross_compiling" = yes; then :
8323-  je_cv_lg_vaddr=57
8324-else
8325-  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
8326-/* end confdefs.h.  */
8327-
8328-#include <stdio.h>
8329-#ifdef _WIN32
8330-#include <limits.h>
8331-#include <intrin.h>
8332-typedef unsigned __int32 uint32_t;
8333-#else
8334-#include <stdint.h>
8335-#endif
8336-
8337-int
8338-main ()
8339-{
8340-
8341-	uint32_t r[4];
8342-	uint32_t eax_in = 0x80000008U;
8343-#ifdef _WIN32
8344-	__cpuid((int *)r, (int)eax_in);
8345-#else
8346-	asm volatile ("cpuid"
8347-	    : "=a" (r[0]), "=b" (r[1]), "=c" (r[2]), "=d" (r[3])
8348-	    : "a" (eax_in), "c" (0)
8349-	);
8350-#endif
8351-	uint32_t eax_out = r[0];
8352-	uint32_t vaddr = ((eax_out & 0x0000ff00U) >> 8);
8353-	FILE *f = fopen("conftest.out", "w");
8354-	if (f == NULL) {
8355-		return 1;
8356-	}
8357-	if (vaddr > (sizeof(void *) << 3)) {
8358-		vaddr = sizeof(void *) << 3;
8359-	}
8360-	fprintf(f, "%u", vaddr);
8361-	fclose(f);
8362-	return 0;
8363-
8364-  ;
8365-  return 0;
8366-}
8367-_ACEOF
8368-if ac_fn_c_try_run "$LINENO"; then :
8369-  je_cv_lg_vaddr=`cat conftest.out`
8370-else
8371-  je_cv_lg_vaddr=error
8372-fi
8373-rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \
8374-  conftest.$ac_objext conftest.beam conftest.$ac_ext
8375-fi
8376-
8377-fi
8378-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_lg_vaddr" >&5
8379-$as_echo "$je_cv_lg_vaddr" >&6; }
8380-      if test "x${je_cv_lg_vaddr}" != "x" ; then
8381-        LG_VADDR="${je_cv_lg_vaddr}"
8382-      fi
8383-      if test "x${LG_VADDR}" != "xerror" ; then
8384-
8385-cat >>confdefs.h <<_ACEOF
8386-#define LG_VADDR $LG_VADDR
8387-_ACEOF
8388-
8389-      else
8390-        as_fn_error $? "cannot determine number of significant virtual address bits" "$LINENO" 5
8391-      fi
8392-    fi
8393-    ;;
8394-  *)
8395-    if test "x$LG_VADDR" = "xdetect"; then
8396-      { $as_echo "$as_me:${as_lineno-$LINENO}: checking number of significant virtual address bits" >&5
8397-$as_echo_n "checking number of significant virtual address bits... " >&6; }
8398-      if test "x${LG_SIZEOF_PTR}" = "x3" ; then
8399-        LG_VADDR=64
8400-      elif test "x${LG_SIZEOF_PTR}" = "x2" ; then
8401-        LG_VADDR=32
8402-      elif test "x${LG_SIZEOF_PTR}" = "xLG_SIZEOF_PTR_WIN" ; then
8403-        LG_VADDR="(1U << (LG_SIZEOF_PTR_WIN+3))"
8404-      else
8405-        as_fn_error $? "Unsupported lg(pointer size): ${LG_SIZEOF_PTR}" "$LINENO" 5
8406-      fi
8407-      { $as_echo "$as_me:${as_lineno-$LINENO}: result: $LG_VADDR" >&5
8408-$as_echo "$LG_VADDR" >&6; }
8409-    fi
8410-    ;;
8411-esac
8412-
8413-cat >>confdefs.h <<_ACEOF
8414-#define LG_VADDR $LG_VADDR
8415-_ACEOF
8416-
8417-
8418-LD_PRELOAD_VAR="LD_PRELOAD"
8419-so="so"
8420-importlib="${so}"
8421-o="$ac_objext"
8422-a="a"
8423-exe="$ac_exeext"
8424-libprefix="lib"
8425-link_whole_archive="0"
8426-DSO_LDFLAGS='-shared -Wl,-soname,$(@F)'
8427-RPATH='-Wl,-rpath,$(1)'
8428-SOREV="${so}.${rev}"
8429-PIC_CFLAGS='-fPIC -DPIC'
8430-CTARGET='-o $@'
8431-LDTARGET='-o $@'
8432-TEST_LD_MODE=
8433-EXTRA_LDFLAGS=
8434-ARFLAGS='crus'
8435-AROUT=' $@'
8436-CC_MM=1
8437-
8438-if test "x$je_cv_cray_prgenv_wrapper" = "xyes" ; then
8439-  TEST_LD_MODE='-dynamic'
8440-fi
8441-
8442-if test "x${je_cv_cray}" = "xyes" ; then
8443-  CC_MM=
8444-fi
8445-
8446-
8447-
8448-
8449-if test -n "$ac_tool_prefix"; then
8450-  # Extract the first word of "${ac_tool_prefix}ar", so it can be a program name with args.
8451-set dummy ${ac_tool_prefix}ar; ac_word=$2
8452-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
8453-$as_echo_n "checking for $ac_word... " >&6; }
8454-if ${ac_cv_prog_AR+:} false; then :
8455-  $as_echo_n "(cached) " >&6
8456-else
8457-  if test -n "$AR"; then
8458-  ac_cv_prog_AR="$AR" # Let the user override the test.
8459-else
8460-as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
8461-for as_dir in $PATH
8462-do
8463-  IFS=$as_save_IFS
8464-  test -z "$as_dir" && as_dir=.
8465-    for ac_exec_ext in '' $ac_executable_extensions; do
8466-  if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
8467-    ac_cv_prog_AR="${ac_tool_prefix}ar"
8468-    $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
8469-    break 2
8470-  fi
8471-done
8472-  done
8473-IFS=$as_save_IFS
8474-
8475-fi
8476-fi
8477-AR=$ac_cv_prog_AR
8478-if test -n "$AR"; then
8479-  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $AR" >&5
8480-$as_echo "$AR" >&6; }
8481-else
8482-  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
8483-$as_echo "no" >&6; }
8484-fi
8485-
8486-
8487-fi
8488-if test -z "$ac_cv_prog_AR"; then
8489-  ac_ct_AR=$AR
8490-  # Extract the first word of "ar", so it can be a program name with args.
8491-set dummy ar; ac_word=$2
8492-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
8493-$as_echo_n "checking for $ac_word... " >&6; }
8494-if ${ac_cv_prog_ac_ct_AR+:} false; then :
8495-  $as_echo_n "(cached) " >&6
8496-else
8497-  if test -n "$ac_ct_AR"; then
8498-  ac_cv_prog_ac_ct_AR="$ac_ct_AR" # Let the user override the test.
8499-else
8500-as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
8501-for as_dir in $PATH
8502-do
8503-  IFS=$as_save_IFS
8504-  test -z "$as_dir" && as_dir=.
8505-    for ac_exec_ext in '' $ac_executable_extensions; do
8506-  if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
8507-    ac_cv_prog_ac_ct_AR="ar"
8508-    $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
8509-    break 2
8510-  fi
8511-done
8512-  done
8513-IFS=$as_save_IFS
8514-
8515-fi
8516-fi
8517-ac_ct_AR=$ac_cv_prog_ac_ct_AR
8518-if test -n "$ac_ct_AR"; then
8519-  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_AR" >&5
8520-$as_echo "$ac_ct_AR" >&6; }
8521-else
8522-  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
8523-$as_echo "no" >&6; }
8524-fi
8525-
8526-  if test "x$ac_ct_AR" = x; then
8527-    AR=":"
8528-  else
8529-    case $cross_compiling:$ac_tool_warned in
8530-yes:)
8531-{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
8532-$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
8533-ac_tool_warned=yes ;;
8534-esac
8535-    AR=$ac_ct_AR
8536-  fi
8537-else
8538-  AR="$ac_cv_prog_AR"
8539-fi
8540-
8541-
8542-
8543-
8544-
8545-if test -n "$ac_tool_prefix"; then
8546-  # Extract the first word of "${ac_tool_prefix}nm", so it can be a program name with args.
8547-set dummy ${ac_tool_prefix}nm; ac_word=$2
8548-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
8549-$as_echo_n "checking for $ac_word... " >&6; }
8550-if ${ac_cv_prog_NM+:} false; then :
8551-  $as_echo_n "(cached) " >&6
8552-else
8553-  if test -n "$NM"; then
8554-  ac_cv_prog_NM="$NM" # Let the user override the test.
8555-else
8556-as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
8557-for as_dir in $PATH
8558-do
8559-  IFS=$as_save_IFS
8560-  test -z "$as_dir" && as_dir=.
8561-    for ac_exec_ext in '' $ac_executable_extensions; do
8562-  if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
8563-    ac_cv_prog_NM="${ac_tool_prefix}nm"
8564-    $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
8565-    break 2
8566-  fi
8567-done
8568-  done
8569-IFS=$as_save_IFS
8570-
8571-fi
8572-fi
8573-NM=$ac_cv_prog_NM
8574-if test -n "$NM"; then
8575-  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $NM" >&5
8576-$as_echo "$NM" >&6; }
8577-else
8578-  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
8579-$as_echo "no" >&6; }
8580-fi
8581-
8582-
8583-fi
8584-if test -z "$ac_cv_prog_NM"; then
8585-  ac_ct_NM=$NM
8586-  # Extract the first word of "nm", so it can be a program name with args.
8587-set dummy nm; ac_word=$2
8588-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
8589-$as_echo_n "checking for $ac_word... " >&6; }
8590-if ${ac_cv_prog_ac_ct_NM+:} false; then :
8591-  $as_echo_n "(cached) " >&6
8592-else
8593-  if test -n "$ac_ct_NM"; then
8594-  ac_cv_prog_ac_ct_NM="$ac_ct_NM" # Let the user override the test.
8595-else
8596-as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
8597-for as_dir in $PATH
8598-do
8599-  IFS=$as_save_IFS
8600-  test -z "$as_dir" && as_dir=.
8601-    for ac_exec_ext in '' $ac_executable_extensions; do
8602-  if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
8603-    ac_cv_prog_ac_ct_NM="nm"
8604-    $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
8605-    break 2
8606-  fi
8607-done
8608-  done
8609-IFS=$as_save_IFS
8610-
8611-fi
8612-fi
8613-ac_ct_NM=$ac_cv_prog_ac_ct_NM
8614-if test -n "$ac_ct_NM"; then
8615-  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_NM" >&5
8616-$as_echo "$ac_ct_NM" >&6; }
8617-else
8618-  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
8619-$as_echo "no" >&6; }
8620-fi
8621-
8622-  if test "x$ac_ct_NM" = x; then
8623-    NM=":"
8624-  else
8625-    case $cross_compiling:$ac_tool_warned in
8626-yes:)
8627-{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
8628-$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
8629-ac_tool_warned=yes ;;
8630-esac
8631-    NM=$ac_ct_NM
8632-  fi
8633-else
8634-  NM="$ac_cv_prog_NM"
8635-fi
8636-
8637-
8638-for ac_prog in gawk mawk nawk awk
8639-do
8640-  # Extract the first word of "$ac_prog", so it can be a program name with args.
8641-set dummy $ac_prog; ac_word=$2
8642-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
8643-$as_echo_n "checking for $ac_word... " >&6; }
8644-if ${ac_cv_prog_AWK+:} false; then :
8645-  $as_echo_n "(cached) " >&6
8646-else
8647-  if test -n "$AWK"; then
8648-  ac_cv_prog_AWK="$AWK" # Let the user override the test.
8649-else
8650-as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
8651-for as_dir in $PATH
8652-do
8653-  IFS=$as_save_IFS
8654-  test -z "$as_dir" && as_dir=.
8655-    for ac_exec_ext in '' $ac_executable_extensions; do
8656-  if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
8657-    ac_cv_prog_AWK="$ac_prog"
8658-    $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
8659-    break 2
8660-  fi
8661-done
8662-  done
8663-IFS=$as_save_IFS
8664-
8665-fi
8666-fi
8667-AWK=$ac_cv_prog_AWK
8668-if test -n "$AWK"; then
8669-  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $AWK" >&5
8670-$as_echo "$AWK" >&6; }
8671-else
8672-  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
8673-$as_echo "no" >&6; }
8674-fi
8675-
8676-
8677-  test -n "$AWK" && break
8678-done
8679-
8680-
8681-
8682-
8683-# Check whether --with-version was given.
8684-if test "${with_version+set}" = set; then :
8685-  withval=$with_version;
8686-    echo "${with_version}" | grep '^[0-9]\+\.[0-9]\+\.[0-9]\+-[0-9]\+-g[0-9a-f]\+$' 2>&1 1>/dev/null
8687-    if test $? -eq 0 ; then
8688-      echo "$with_version" > "${objroot}VERSION"
8689-    else
8690-      echo "${with_version}" | grep '^VERSION$' 2>&1 1>/dev/null
8691-      if test $? -ne 0 ; then
8692-        as_fn_error $? "${with_version} does not match <major>.<minor>.<bugfix>-<nrev>-g<gid> or VERSION" "$LINENO" 5
8693-      fi
8694-    fi
8695-
8696-else
8697-
8698-        if test "x`test ! \"${srcroot}\" && cd \"${srcroot}\"; git rev-parse --is-inside-work-tree 2>/dev/null`" = "xtrue" ; then
8699-                        for pattern in '[0-9].[0-9].[0-9]' '[0-9].[0-9].[0-9][0-9]' \
8700-                     '[0-9].[0-9][0-9].[0-9]' '[0-9].[0-9][0-9].[0-9][0-9]' \
8701-                     '[0-9][0-9].[0-9].[0-9]' '[0-9][0-9].[0-9].[0-9][0-9]' \
8702-                     '[0-9][0-9].[0-9][0-9].[0-9]' \
8703-                     '[0-9][0-9].[0-9][0-9].[0-9][0-9]'; do
8704-        (test ! "${srcroot}" && cd "${srcroot}"; git describe --long --abbrev=40 --match="${pattern}") > "${objroot}VERSION.tmp" 2>/dev/null
8705-        if test $? -eq 0 ; then
8706-          mv "${objroot}VERSION.tmp" "${objroot}VERSION"
8707-          break
8708-        fi
8709-      done
8710-    fi
8711-    rm -f "${objroot}VERSION.tmp"
8712-
8713-fi
8714-
8715-
8716-if test ! -e "${objroot}VERSION" ; then
8717-  if test ! -e "${srcroot}VERSION" ; then
8718-    { $as_echo "$as_me:${as_lineno-$LINENO}: result: Missing VERSION file, and unable to generate it; creating bogus VERSION" >&5
8719-$as_echo "Missing VERSION file, and unable to generate it; creating bogus VERSION" >&6; }
8720-    echo "0.0.0-0-g000000missing_version_try_git_fetch_tags" > "${objroot}VERSION"
8721-  else
8722-    cp ${srcroot}VERSION ${objroot}VERSION
8723-  fi
8724-fi
8725-jemalloc_version=`cat "${objroot}VERSION"`
8726-jemalloc_version_major=`echo ${jemalloc_version} | tr ".g-" " " | awk '{print $1}'`
8727-jemalloc_version_minor=`echo ${jemalloc_version} | tr ".g-" " " | awk '{print $2}'`
8728-jemalloc_version_bugfix=`echo ${jemalloc_version} | tr ".g-" " " | awk '{print $3}'`
8729-jemalloc_version_nrev=`echo ${jemalloc_version} | tr ".g-" " " | awk '{print $4}'`
8730-jemalloc_version_gid=`echo ${jemalloc_version} | tr ".g-" " " | awk '{print $5}'`
8731-
8732-
8733-
8734-
8735-
8736-
8737-
8738-default_retain="0"
8739-zero_realloc_default_free="0"
8740-maps_coalesce="1"
8741-DUMP_SYMS="${NM} -a"
8742-SYM_PREFIX=""
8743-case "${host}" in
8744-  *-*-darwin* | *-*-ios*)
8745-	abi="macho"
8746-	RPATH=""
8747-	LD_PRELOAD_VAR="DYLD_INSERT_LIBRARIES"
8748-	so="dylib"
8749-	importlib="${so}"
8750-	force_tls="0"
8751-	DSO_LDFLAGS='-shared -Wl,-install_name,$(LIBDIR)/$(@F)'
8752-	SOREV="${rev}.${so}"
8753-	sbrk_deprecated="1"
8754-	SYM_PREFIX="_"
8755-	;;
8756-  *-*-freebsd*)
8757-	T_APPEND_V=-D_BSD_SOURCE
8758-  if test "x${CPPFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then
8759-  CPPFLAGS="${CPPFLAGS}${T_APPEND_V}"
8760-else
8761-  CPPFLAGS="${CPPFLAGS} ${T_APPEND_V}"
8762-fi
8763-
8764-
8765-	abi="elf"
8766-
8767-$as_echo "#define JEMALLOC_SYSCTL_VM_OVERCOMMIT  " >>confdefs.h
8768-
8769-	force_lazy_lock="1"
8770-	;;
8771-  *-*-dragonfly*)
8772-	abi="elf"
8773-	;;
8774-  *-*-openbsd*)
8775-	abi="elf"
8776-	force_tls="0"
8777-	;;
8778-  *-*-bitrig*)
8779-	abi="elf"
8780-	;;
8781-  *-*-linux-android*)
8782-		T_APPEND_V=-D_GNU_SOURCE
8783-  if test "x${CPPFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then
8784-  CPPFLAGS="${CPPFLAGS}${T_APPEND_V}"
8785-else
8786-  CPPFLAGS="${CPPFLAGS} ${T_APPEND_V}"
8787-fi
8788-
8789-
8790-	abi="elf"
8791-	glibc="0"
8792-
8793-$as_echo "#define JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS  " >>confdefs.h
8794-
8795-
8796-$as_echo "#define JEMALLOC_HAS_ALLOCA_H  " >>confdefs.h
8797-
8798-
8799-$as_echo "#define JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY  " >>confdefs.h
8800-
8801-
8802-$as_echo "#define JEMALLOC_THREADED_INIT  " >>confdefs.h
8803-
8804-
8805-$as_echo "#define JEMALLOC_C11_ATOMICS  " >>confdefs.h
8806-
8807-	force_tls="0"
8808-	if test "${LG_SIZEOF_PTR}" = "3"; then
8809-	  default_retain="1"
8810-	fi
8811-	zero_realloc_default_free="1"
8812-	;;
8813-  *-*-linux*)
8814-		T_APPEND_V=-D_GNU_SOURCE
8815-  if test "x${CPPFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then
8816-  CPPFLAGS="${CPPFLAGS}${T_APPEND_V}"
8817-else
8818-  CPPFLAGS="${CPPFLAGS} ${T_APPEND_V}"
8819-fi
8820-
8821-
8822-	abi="elf"
8823-	glibc="1"
8824-
8825-$as_echo "#define JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS  " >>confdefs.h
8826-
8827-
8828-$as_echo "#define JEMALLOC_HAS_ALLOCA_H  " >>confdefs.h
8829-
8830-
8831-$as_echo "#define JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY  " >>confdefs.h
8832-
8833-
8834-$as_echo "#define JEMALLOC_THREADED_INIT  " >>confdefs.h
8835-
8836-
8837-$as_echo "#define JEMALLOC_USE_CXX_THROW  " >>confdefs.h
8838-
8839-	if test "${LG_SIZEOF_PTR}" = "3"; then
8840-	  default_retain="1"
8841-	fi
8842-	zero_realloc_default_free="1"
8843-	;;
8844-  *-*-kfreebsd*)
8845-		T_APPEND_V=-D_GNU_SOURCE
8846-  if test "x${CPPFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then
8847-  CPPFLAGS="${CPPFLAGS}${T_APPEND_V}"
8848-else
8849-  CPPFLAGS="${CPPFLAGS} ${T_APPEND_V}"
8850-fi
8851-
8852-
8853-	abi="elf"
8854-
8855-$as_echo "#define JEMALLOC_HAS_ALLOCA_H  " >>confdefs.h
8856-
8857-
8858-$as_echo "#define JEMALLOC_SYSCTL_VM_OVERCOMMIT  " >>confdefs.h
8859-
8860-
8861-$as_echo "#define JEMALLOC_THREADED_INIT  " >>confdefs.h
8862-
8863-
8864-$as_echo "#define JEMALLOC_USE_CXX_THROW  " >>confdefs.h
8865-
8866-	;;
8867-  *-*-netbsd*)
8868-	{ $as_echo "$as_me:${as_lineno-$LINENO}: checking ABI" >&5
8869-$as_echo_n "checking ABI... " >&6; }
8870-        cat confdefs.h - <<_ACEOF >conftest.$ac_ext
8871-/* end confdefs.h.  */
8872-#ifdef __ELF__
8873-/* ELF */
8874-#else
8875-#error aout
8876-#endif
8877-
8878-int
8879-main ()
8880-{
8881-
8882-  ;
8883-  return 0;
8884-}
8885-_ACEOF
8886-if ac_fn_c_try_compile "$LINENO"; then :
8887-  abi="elf"
8888-else
8889-  abi="aout"
8890-fi
8891-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
8892-	{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $abi" >&5
8893-$as_echo "$abi" >&6; }
8894-	;;
8895-  *-*-solaris2*)
8896-	abi="elf"
8897-	RPATH='-Wl,-R,$(1)'
8898-		T_APPEND_V=-D_POSIX_PTHREAD_SEMANTICS
8899-  if test "x${CPPFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then
8900-  CPPFLAGS="${CPPFLAGS}${T_APPEND_V}"
8901-else
8902-  CPPFLAGS="${CPPFLAGS} ${T_APPEND_V}"
8903-fi
8904-
8905-
8906-	T_APPEND_V=-lposix4 -lsocket -lnsl
8907-  if test "x${LIBS}" = "x" -o "x${T_APPEND_V}" = "x" ; then
8908-  LIBS="${LIBS}${T_APPEND_V}"
8909-else
8910-  LIBS="${LIBS} ${T_APPEND_V}"
8911-fi
8912-
8913-
8914-	;;
8915-  *-ibm-aix*)
8916-	if test "${LG_SIZEOF_PTR}" = "3"; then
8917-	  	  LD_PRELOAD_VAR="LDR_PRELOAD64"
8918-	else
8919-	  	  LD_PRELOAD_VAR="LDR_PRELOAD"
8920-	fi
8921-	abi="xcoff"
8922-	;;
8923-  *-*-mingw* | *-*-cygwin*)
8924-	abi="pecoff"
8925-	force_tls="0"
8926-	maps_coalesce="0"
8927-	RPATH=""
8928-	so="dll"
8929-	if test "x$je_cv_msvc" = "xyes" ; then
8930-	  importlib="lib"
8931-	  DSO_LDFLAGS="-LD"
8932-	  EXTRA_LDFLAGS="-link -DEBUG"
8933-	  CTARGET='-Fo$@'
8934-	  LDTARGET='-Fe$@'
8935-	  AR='lib'
8936-	  ARFLAGS='-nologo -out:'
8937-	  AROUT='$@'
8938-	  CC_MM=
8939-        else
8940-	  importlib="${so}"
8941-	  DSO_LDFLAGS="-shared"
8942-	  link_whole_archive="1"
8943-	fi
8944-	case "${host}" in
8945-	  *-*-cygwin*)
8946-	    DUMP_SYMS="dumpbin /SYMBOLS"
8947-	    ;;
8948-	  *)
8949-	    ;;
8950-	esac
8951-	a="lib"
8952-	libprefix=""
8953-	SOREV="${so}"
8954-	PIC_CFLAGS=""
8955-	if test "${LG_SIZEOF_PTR}" = "3"; then
8956-	  default_retain="1"
8957-	fi
8958-	zero_realloc_default_free="1"
8959-	;;
8960-  *-*-nto-qnx)
8961-	abi="elf"
8962-  force_tls="0"
8963-
8964-$as_echo "#define JEMALLOC_HAS_ALLOCA_H  " >>confdefs.h
8965-
8966-	;;
8967-  *)
8968-	{ $as_echo "$as_me:${as_lineno-$LINENO}: result: Unsupported operating system: ${host}" >&5
8969-$as_echo "Unsupported operating system: ${host}" >&6; }
8970-	abi="elf"
8971-	;;
8972-esac
8973-
8974-JEMALLOC_USABLE_SIZE_CONST=const
8975-for ac_header in malloc.h
8976-do :
8977-  ac_fn_c_check_header_mongrel "$LINENO" "malloc.h" "ac_cv_header_malloc_h" "$ac_includes_default"
8978-if test "x$ac_cv_header_malloc_h" = xyes; then :
8979-  cat >>confdefs.h <<_ACEOF
8980-#define HAVE_MALLOC_H 1
8981-_ACEOF
8982-
8983-  { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether malloc_usable_size definition can use const argument" >&5
8984-$as_echo_n "checking whether malloc_usable_size definition can use const argument... " >&6; }
8985-  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
8986-/* end confdefs.h.  */
8987-#include <malloc.h>
8988-     #include <stddef.h>
8989-    size_t malloc_usable_size(const void *ptr);
8990-
8991-int
8992-main ()
8993-{
8994-
8995-  ;
8996-  return 0;
8997-}
8998-_ACEOF
8999-if ac_fn_c_try_compile "$LINENO"; then :
9000-
9001-                { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
9002-$as_echo "yes" >&6; }
9003-
9004-else
9005-
9006-                JEMALLOC_USABLE_SIZE_CONST=
9007-                { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
9008-$as_echo "no" >&6; }
9009-
9010-fi
9011-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
9012-
9013-fi
9014-
9015-done
9016-
9017-
9018-cat >>confdefs.h <<_ACEOF
9019-#define JEMALLOC_USABLE_SIZE_CONST $JEMALLOC_USABLE_SIZE_CONST
9020-_ACEOF
9021-
9022-
9023-
9024-
9025-
9026-
9027-
9028-
9029-
9030-
9031-
9032-
9033-
9034-
9035-
9036-
9037-
9038-
9039-
9040-
9041-
9042-
9043-
9044-
9045-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing log" >&5
9046-$as_echo_n "checking for library containing log... " >&6; }
9047-if ${ac_cv_search_log+:} false; then :
9048-  $as_echo_n "(cached) " >&6
9049-else
9050-  ac_func_search_save_LIBS=$LIBS
9051-cat confdefs.h - <<_ACEOF >conftest.$ac_ext
9052-/* end confdefs.h.  */
9053-
9054-/* Override any GCC internal prototype to avoid an error.
9055-   Use char because int might match the return type of a GCC
9056-   builtin and then its argument prototype would still apply.  */
9057-#ifdef __cplusplus
9058-extern "C"
9059-#endif
9060-char log ();
9061-int
9062-main ()
9063-{
9064-return log ();
9065-  ;
9066-  return 0;
9067-}
9068-_ACEOF
9069-for ac_lib in '' m; do
9070-  if test -z "$ac_lib"; then
9071-    ac_res="none required"
9072-  else
9073-    ac_res=-l$ac_lib
9074-    LIBS="-l$ac_lib  $ac_func_search_save_LIBS"
9075-  fi
9076-  if ac_fn_c_try_link "$LINENO"; then :
9077-  ac_cv_search_log=$ac_res
9078-fi
9079-rm -f core conftest.err conftest.$ac_objext \
9080-    conftest$ac_exeext
9081-  if ${ac_cv_search_log+:} false; then :
9082-  break
9083-fi
9084-done
9085-if ${ac_cv_search_log+:} false; then :
9086-
9087-else
9088-  ac_cv_search_log=no
9089-fi
9090-rm conftest.$ac_ext
9091-LIBS=$ac_func_search_save_LIBS
9092-fi
9093-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_log" >&5
9094-$as_echo "$ac_cv_search_log" >&6; }
9095-ac_res=$ac_cv_search_log
9096-if test "$ac_res" != no; then :
9097-  test "$ac_res" = "none required" || LIBS="$ac_res $LIBS"
9098-
9099-else
9100-  as_fn_error $? "Missing math functions" "$LINENO" 5
9101-fi
9102-
9103-if test "x$ac_cv_search_log" != "xnone required" ; then
9104-  LM="$ac_cv_search_log"
9105-else
9106-  LM=
9107-fi
9108-
9109-
9110-
9111-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether __attribute__ syntax is compilable" >&5
9112-$as_echo_n "checking whether __attribute__ syntax is compilable... " >&6; }
9113-if ${je_cv_attribute+:} false; then :
9114-  $as_echo_n "(cached) " >&6
9115-else
9116-  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
9117-/* end confdefs.h.  */
9118-static __attribute__((unused)) void foo(void){}
9119-int
9120-main ()
9121-{
9122-
9123-  ;
9124-  return 0;
9125-}
9126-_ACEOF
9127-if ac_fn_c_try_link "$LINENO"; then :
9128-  je_cv_attribute=yes
9129-else
9130-  je_cv_attribute=no
9131-fi
9132-rm -f core conftest.err conftest.$ac_objext \
9133-    conftest$ac_exeext conftest.$ac_ext
9134-fi
9135-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_attribute" >&5
9136-$as_echo "$je_cv_attribute" >&6; }
9137-
9138-if test "x${je_cv_attribute}" = "xyes" ; then
9139-
9140-$as_echo "#define JEMALLOC_HAVE_ATTR  " >>confdefs.h
9141-
9142-  if test "x${GCC}" = "xyes" -a "x${abi}" = "xelf"; then
9143-
9144-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -fvisibility=hidden" >&5
9145-$as_echo_n "checking whether compiler supports -fvisibility=hidden... " >&6; }
9146-T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}"
9147-T_APPEND_V=-fvisibility=hidden
9148-  if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then
9149-  CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}"
9150-else
9151-  CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}"
9152-fi
9153-
9154-
9155-if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then
9156-  CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}"
9157-else
9158-  CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}"
9159-fi
9160-
9161-cat confdefs.h - <<_ACEOF >conftest.$ac_ext
9162-/* end confdefs.h.  */
9163-
9164-
9165-int
9166-main ()
9167-{
9168-
9169-    return 0;
9170-
9171-  ;
9172-  return 0;
9173-}
9174-_ACEOF
9175-if ac_fn_c_try_compile "$LINENO"; then :
9176-  je_cv_cflags_added=-fvisibility=hidden
9177-              { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
9178-$as_echo "yes" >&6; }
9179-else
9180-  je_cv_cflags_added=
9181-              { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
9182-$as_echo "no" >&6; }
9183-              CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}"
9184-
9185-fi
9186-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
9187-if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then
9188-  CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}"
9189-else
9190-  CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}"
9191-fi
9192-
9193-
9194-
9195-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -fvisibility=hidden" >&5
9196-$as_echo_n "checking whether compiler supports -fvisibility=hidden... " >&6; }
9197-T_CONFIGURE_CXXFLAGS="${CONFIGURE_CXXFLAGS}"
9198-T_APPEND_V=-fvisibility=hidden
9199-  if test "x${CONFIGURE_CXXFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then
9200-  CONFIGURE_CXXFLAGS="${CONFIGURE_CXXFLAGS}${T_APPEND_V}"
9201-else
9202-  CONFIGURE_CXXFLAGS="${CONFIGURE_CXXFLAGS} ${T_APPEND_V}"
9203-fi
9204-
9205-
9206-if test "x${CONFIGURE_CXXFLAGS}" = "x" -o "x${SPECIFIED_CXXFLAGS}" = "x" ; then
9207-  CXXFLAGS="${CONFIGURE_CXXFLAGS}${SPECIFIED_CXXFLAGS}"
9208-else
9209-  CXXFLAGS="${CONFIGURE_CXXFLAGS} ${SPECIFIED_CXXFLAGS}"
9210-fi
9211-
9212-ac_ext=cpp
9213-ac_cpp='$CXXCPP $CPPFLAGS'
9214-ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5'
9215-ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
9216-ac_compiler_gnu=$ac_cv_cxx_compiler_gnu
9217-
9218-cat confdefs.h - <<_ACEOF >conftest.$ac_ext
9219-/* end confdefs.h.  */
9220-
9221-
9222-int
9223-main ()
9224-{
9225-
9226-    return 0;
9227-
9228-  ;
9229-  return 0;
9230-}
9231-_ACEOF
9232-if ac_fn_cxx_try_compile "$LINENO"; then :
9233-  je_cv_cxxflags_added=-fvisibility=hidden
9234-              { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
9235-$as_echo "yes" >&6; }
9236-else
9237-  je_cv_cxxflags_added=
9238-              { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
9239-$as_echo "no" >&6; }
9240-              CONFIGURE_CXXFLAGS="${T_CONFIGURE_CXXFLAGS}"
9241-
9242-fi
9243-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
9244-ac_ext=c
9245-ac_cpp='$CPP $CPPFLAGS'
9246-ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
9247-ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
9248-ac_compiler_gnu=$ac_cv_c_compiler_gnu
9249-
9250-if test "x${CONFIGURE_CXXFLAGS}" = "x" -o "x${SPECIFIED_CXXFLAGS}" = "x" ; then
9251-  CXXFLAGS="${CONFIGURE_CXXFLAGS}${SPECIFIED_CXXFLAGS}"
9252-else
9253-  CXXFLAGS="${CONFIGURE_CXXFLAGS} ${SPECIFIED_CXXFLAGS}"
9254-fi
9255-
9256-
9257-  fi
9258-fi
9259-SAVED_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}"
9260-
9261-
9262-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -Werror" >&5
9263-$as_echo_n "checking whether compiler supports -Werror... " >&6; }
9264-T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}"
9265-T_APPEND_V=-Werror
9266-  if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then
9267-  CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}"
9268-else
9269-  CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}"
9270-fi
9271-
9272-
9273-if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then
9274-  CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}"
9275-else
9276-  CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}"
9277-fi
9278-
9279-cat confdefs.h - <<_ACEOF >conftest.$ac_ext
9280-/* end confdefs.h.  */
9281-
9282-
9283-int
9284-main ()
9285-{
9286-
9287-    return 0;
9288-
9289-  ;
9290-  return 0;
9291-}
9292-_ACEOF
9293-if ac_fn_c_try_compile "$LINENO"; then :
9294-  je_cv_cflags_added=-Werror
9295-              { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
9296-$as_echo "yes" >&6; }
9297-else
9298-  je_cv_cflags_added=
9299-              { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
9300-$as_echo "no" >&6; }
9301-              CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}"
9302-
9303-fi
9304-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
9305-if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then
9306-  CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}"
9307-else
9308-  CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}"
9309-fi
9310-
9311-
9312-
9313-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -herror_on_warning" >&5
9314-$as_echo_n "checking whether compiler supports -herror_on_warning... " >&6; }
9315-T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}"
9316-T_APPEND_V=-herror_on_warning
9317-  if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then
9318-  CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}"
9319-else
9320-  CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}"
9321-fi
9322-
9323-
9324-if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then
9325-  CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}"
9326-else
9327-  CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}"
9328-fi
9329-
9330-cat confdefs.h - <<_ACEOF >conftest.$ac_ext
9331-/* end confdefs.h.  */
9332-
9333-
9334-int
9335-main ()
9336-{
9337-
9338-    return 0;
9339-
9340-  ;
9341-  return 0;
9342-}
9343-_ACEOF
9344-if ac_fn_c_try_compile "$LINENO"; then :
9345-  je_cv_cflags_added=-herror_on_warning
9346-              { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
9347-$as_echo "yes" >&6; }
9348-else
9349-  je_cv_cflags_added=
9350-              { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
9351-$as_echo "no" >&6; }
9352-              CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}"
9353-
9354-fi
9355-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
9356-if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then
9357-  CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}"
9358-else
9359-  CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}"
9360-fi
9361-
9362-
9363-
9364-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether tls_model attribute is compilable" >&5
9365-$as_echo_n "checking whether tls_model attribute is compilable... " >&6; }
9366-if ${je_cv_tls_model+:} false; then :
9367-  $as_echo_n "(cached) " >&6
9368-else
9369-  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
9370-/* end confdefs.h.  */
9371-
9372-int
9373-main ()
9374-{
9375-static __thread int
9376-               __attribute__((tls_model("initial-exec"), unused)) foo;
9377-               foo = 0;
9378-  ;
9379-  return 0;
9380-}
9381-_ACEOF
9382-if ac_fn_c_try_link "$LINENO"; then :
9383-  je_cv_tls_model=yes
9384-else
9385-  je_cv_tls_model=no
9386-fi
9387-rm -f core conftest.err conftest.$ac_objext \
9388-    conftest$ac_exeext conftest.$ac_ext
9389-fi
9390-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_tls_model" >&5
9391-$as_echo "$je_cv_tls_model" >&6; }
9392-
9393-CONFIGURE_CFLAGS="${SAVED_CONFIGURE_CFLAGS}"
9394-if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then
9395-  CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}"
9396-else
9397-  CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}"
9398-fi
9399-
9400-
9401-
9402-SAVED_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}"
9403-
9404-
9405-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -Werror" >&5
9406-$as_echo_n "checking whether compiler supports -Werror... " >&6; }
9407-T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}"
9408-T_APPEND_V=-Werror
9409-  if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then
9410-  CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}"
9411-else
9412-  CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}"
9413-fi
9414-
9415-
9416-if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then
9417-  CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}"
9418-else
9419-  CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}"
9420-fi
9421-
9422-cat confdefs.h - <<_ACEOF >conftest.$ac_ext
9423-/* end confdefs.h.  */
9424-
9425-
9426-int
9427-main ()
9428-{
9429-
9430-    return 0;
9431-
9432-  ;
9433-  return 0;
9434-}
9435-_ACEOF
9436-if ac_fn_c_try_compile "$LINENO"; then :
9437-  je_cv_cflags_added=-Werror
9438-              { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
9439-$as_echo "yes" >&6; }
9440-else
9441-  je_cv_cflags_added=
9442-              { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
9443-$as_echo "no" >&6; }
9444-              CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}"
9445-
9446-fi
9447-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
9448-if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then
9449-  CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}"
9450-else
9451-  CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}"
9452-fi
9453-
9454-
9455-
9456-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -herror_on_warning" >&5
9457-$as_echo_n "checking whether compiler supports -herror_on_warning... " >&6; }
9458-T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}"
9459-T_APPEND_V=-herror_on_warning
9460-  if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then
9461-  CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}"
9462-else
9463-  CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}"
9464-fi
9465-
9466-
9467-if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then
9468-  CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}"
9469-else
9470-  CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}"
9471-fi
9472-
9473-cat confdefs.h - <<_ACEOF >conftest.$ac_ext
9474-/* end confdefs.h.  */
9475-
9476-
9477-int
9478-main ()
9479-{
9480-
9481-    return 0;
9482-
9483-  ;
9484-  return 0;
9485-}
9486-_ACEOF
9487-if ac_fn_c_try_compile "$LINENO"; then :
9488-  je_cv_cflags_added=-herror_on_warning
9489-              { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
9490-$as_echo "yes" >&6; }
9491-else
9492-  je_cv_cflags_added=
9493-              { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
9494-$as_echo "no" >&6; }
9495-              CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}"
9496-
9497-fi
9498-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
9499-if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then
9500-  CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}"
9501-else
9502-  CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}"
9503-fi
9504-
9505-
9506-
9507-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether alloc_size attribute is compilable" >&5
9508-$as_echo_n "checking whether alloc_size attribute is compilable... " >&6; }
9509-if ${je_cv_alloc_size+:} false; then :
9510-  $as_echo_n "(cached) " >&6
9511-else
9512-  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
9513-/* end confdefs.h.  */
9514-#include <stdlib.h>
9515-int
9516-main ()
9517-{
9518-void *foo(size_t size) __attribute__((alloc_size(1)));
9519-  ;
9520-  return 0;
9521-}
9522-_ACEOF
9523-if ac_fn_c_try_link "$LINENO"; then :
9524-  je_cv_alloc_size=yes
9525-else
9526-  je_cv_alloc_size=no
9527-fi
9528-rm -f core conftest.err conftest.$ac_objext \
9529-    conftest$ac_exeext conftest.$ac_ext
9530-fi
9531-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_alloc_size" >&5
9532-$as_echo "$je_cv_alloc_size" >&6; }
9533-
9534-CONFIGURE_CFLAGS="${SAVED_CONFIGURE_CFLAGS}"
9535-if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then
9536-  CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}"
9537-else
9538-  CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}"
9539-fi
9540-
9541-
9542-if test "x${je_cv_alloc_size}" = "xyes" ; then
9543-
9544-$as_echo "#define JEMALLOC_HAVE_ATTR_ALLOC_SIZE  " >>confdefs.h
9545-
9546-fi
9547-SAVED_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}"
9548-
9549-
9550-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -Werror" >&5
9551-$as_echo_n "checking whether compiler supports -Werror... " >&6; }
9552-T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}"
9553-T_APPEND_V=-Werror
9554-  if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then
9555-  CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}"
9556-else
9557-  CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}"
9558-fi
9559-
9560-
9561-if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then
9562-  CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}"
9563-else
9564-  CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}"
9565-fi
9566-
9567-cat confdefs.h - <<_ACEOF >conftest.$ac_ext
9568-/* end confdefs.h.  */
9569-
9570-
9571-int
9572-main ()
9573-{
9574-
9575-    return 0;
9576-
9577-  ;
9578-  return 0;
9579-}
9580-_ACEOF
9581-if ac_fn_c_try_compile "$LINENO"; then :
9582-  je_cv_cflags_added=-Werror
9583-              { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
9584-$as_echo "yes" >&6; }
9585-else
9586-  je_cv_cflags_added=
9587-              { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
9588-$as_echo "no" >&6; }
9589-              CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}"
9590-
9591-fi
9592-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
9593-if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then
9594-  CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}"
9595-else
9596-  CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}"
9597-fi
9598-
9599-
9600-
9601-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -herror_on_warning" >&5
9602-$as_echo_n "checking whether compiler supports -herror_on_warning... " >&6; }
9603-T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}"
9604-T_APPEND_V=-herror_on_warning
9605-  if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then
9606-  CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}"
9607-else
9608-  CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}"
9609-fi
9610-
9611-
9612-if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then
9613-  CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}"
9614-else
9615-  CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}"
9616-fi
9617-
9618-cat confdefs.h - <<_ACEOF >conftest.$ac_ext
9619-/* end confdefs.h.  */
9620-
9621-
9622-int
9623-main ()
9624-{
9625-
9626-    return 0;
9627-
9628-  ;
9629-  return 0;
9630-}
9631-_ACEOF
9632-if ac_fn_c_try_compile "$LINENO"; then :
9633-  je_cv_cflags_added=-herror_on_warning
9634-              { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
9635-$as_echo "yes" >&6; }
9636-else
9637-  je_cv_cflags_added=
9638-              { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
9639-$as_echo "no" >&6; }
9640-              CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}"
9641-
9642-fi
9643-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
9644-if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then
9645-  CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}"
9646-else
9647-  CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}"
9648-fi
9649-
9650-
9651-
9652-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether format(gnu_printf, ...) attribute is compilable" >&5
9653-$as_echo_n "checking whether format(gnu_printf, ...) attribute is compilable... " >&6; }
9654-if ${je_cv_format_gnu_printf+:} false; then :
9655-  $as_echo_n "(cached) " >&6
9656-else
9657-  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
9658-/* end confdefs.h.  */
9659-#include <stdlib.h>
9660-int
9661-main ()
9662-{
9663-void *foo(const char *format, ...) __attribute__((format(gnu_printf, 1, 2)));
9664-  ;
9665-  return 0;
9666-}
9667-_ACEOF
9668-if ac_fn_c_try_link "$LINENO"; then :
9669-  je_cv_format_gnu_printf=yes
9670-else
9671-  je_cv_format_gnu_printf=no
9672-fi
9673-rm -f core conftest.err conftest.$ac_objext \
9674-    conftest$ac_exeext conftest.$ac_ext
9675-fi
9676-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_format_gnu_printf" >&5
9677-$as_echo "$je_cv_format_gnu_printf" >&6; }
9678-
9679-CONFIGURE_CFLAGS="${SAVED_CONFIGURE_CFLAGS}"
9680-if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then
9681-  CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}"
9682-else
9683-  CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}"
9684-fi
9685-
9686-
9687-if test "x${je_cv_format_gnu_printf}" = "xyes" ; then
9688-
9689-$as_echo "#define JEMALLOC_HAVE_ATTR_FORMAT_GNU_PRINTF  " >>confdefs.h
9690-
9691-fi
9692-SAVED_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}"
9693-
9694-
9695-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -Werror" >&5
9696-$as_echo_n "checking whether compiler supports -Werror... " >&6; }
9697-T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}"
9698-T_APPEND_V=-Werror
9699-  if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then
9700-  CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}"
9701-else
9702-  CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}"
9703-fi
9704-
9705-
9706-if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then
9707-  CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}"
9708-else
9709-  CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}"
9710-fi
9711-
9712-cat confdefs.h - <<_ACEOF >conftest.$ac_ext
9713-/* end confdefs.h.  */
9714-
9715-
9716-int
9717-main ()
9718-{
9719-
9720-    return 0;
9721-
9722-  ;
9723-  return 0;
9724-}
9725-_ACEOF
9726-if ac_fn_c_try_compile "$LINENO"; then :
9727-  je_cv_cflags_added=-Werror
9728-              { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
9729-$as_echo "yes" >&6; }
9730-else
9731-  je_cv_cflags_added=
9732-              { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
9733-$as_echo "no" >&6; }
9734-              CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}"
9735-
9736-fi
9737-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
9738-if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then
9739-  CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}"
9740-else
9741-  CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}"
9742-fi
9743-
9744-
9745-
9746-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -herror_on_warning" >&5
9747-$as_echo_n "checking whether compiler supports -herror_on_warning... " >&6; }
9748-T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}"
9749-T_APPEND_V=-herror_on_warning
9750-  if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then
9751-  CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}"
9752-else
9753-  CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}"
9754-fi
9755-
9756-
9757-if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then
9758-  CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}"
9759-else
9760-  CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}"
9761-fi
9762-
9763-cat confdefs.h - <<_ACEOF >conftest.$ac_ext
9764-/* end confdefs.h.  */
9765-
9766-
9767-int
9768-main ()
9769-{
9770-
9771-    return 0;
9772-
9773-  ;
9774-  return 0;
9775-}
9776-_ACEOF
9777-if ac_fn_c_try_compile "$LINENO"; then :
9778-  je_cv_cflags_added=-herror_on_warning
9779-              { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
9780-$as_echo "yes" >&6; }
9781-else
9782-  je_cv_cflags_added=
9783-              { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
9784-$as_echo "no" >&6; }
9785-              CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}"
9786-
9787-fi
9788-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
9789-if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then
9790-  CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}"
9791-else
9792-  CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}"
9793-fi
9794-
9795-
9796-
9797-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether format(printf, ...) attribute is compilable" >&5
9798-$as_echo_n "checking whether format(printf, ...) attribute is compilable... " >&6; }
9799-if ${je_cv_format_printf+:} false; then :
9800-  $as_echo_n "(cached) " >&6
9801-else
9802-  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
9803-/* end confdefs.h.  */
9804-#include <stdlib.h>
9805-int
9806-main ()
9807-{
9808-void *foo(const char *format, ...) __attribute__((format(printf, 1, 2)));
9809-  ;
9810-  return 0;
9811-}
9812-_ACEOF
9813-if ac_fn_c_try_link "$LINENO"; then :
9814-  je_cv_format_printf=yes
9815-else
9816-  je_cv_format_printf=no
9817-fi
9818-rm -f core conftest.err conftest.$ac_objext \
9819-    conftest$ac_exeext conftest.$ac_ext
9820-fi
9821-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_format_printf" >&5
9822-$as_echo "$je_cv_format_printf" >&6; }
9823-
9824-CONFIGURE_CFLAGS="${SAVED_CONFIGURE_CFLAGS}"
9825-if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then
9826-  CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}"
9827-else
9828-  CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}"
9829-fi
9830-
9831-
9832-if test "x${je_cv_format_printf}" = "xyes" ; then
9833-
9834-$as_echo "#define JEMALLOC_HAVE_ATTR_FORMAT_PRINTF  " >>confdefs.h
9835-
9836-fi
9837-
9838-SAVED_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}"
9839-
9840-
9841-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -Werror" >&5
9842-$as_echo_n "checking whether compiler supports -Werror... " >&6; }
9843-T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}"
9844-T_APPEND_V=-Werror
9845-  if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then
9846-  CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}"
9847-else
9848-  CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}"
9849-fi
9850-
9851-
9852-if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then
9853-  CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}"
9854-else
9855-  CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}"
9856-fi
9857-
9858-cat confdefs.h - <<_ACEOF >conftest.$ac_ext
9859-/* end confdefs.h.  */
9860-
9861-
9862-int
9863-main ()
9864-{
9865-
9866-    return 0;
9867-
9868-  ;
9869-  return 0;
9870-}
9871-_ACEOF
9872-if ac_fn_c_try_compile "$LINENO"; then :
9873-  je_cv_cflags_added=-Werror
9874-              { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
9875-$as_echo "yes" >&6; }
9876-else
9877-  je_cv_cflags_added=
9878-              { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
9879-$as_echo "no" >&6; }
9880-              CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}"
9881-
9882-fi
9883-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
9884-if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then
9885-  CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}"
9886-else
9887-  CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}"
9888-fi
9889-
9890-
9891-
9892-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -herror_on_warning" >&5
9893-$as_echo_n "checking whether compiler supports -herror_on_warning... " >&6; }
9894-T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}"
9895-T_APPEND_V=-herror_on_warning
9896-  if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then
9897-  CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}"
9898-else
9899-  CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}"
9900-fi
9901-
9902-
9903-if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then
9904-  CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}"
9905-else
9906-  CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}"
9907-fi
9908-
9909-cat confdefs.h - <<_ACEOF >conftest.$ac_ext
9910-/* end confdefs.h.  */
9911-
9912-
9913-int
9914-main ()
9915-{
9916-
9917-    return 0;
9918-
9919-  ;
9920-  return 0;
9921-}
9922-_ACEOF
9923-if ac_fn_c_try_compile "$LINENO"; then :
9924-  je_cv_cflags_added=-herror_on_warning
9925-              { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
9926-$as_echo "yes" >&6; }
9927-else
9928-  je_cv_cflags_added=
9929-              { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
9930-$as_echo "no" >&6; }
9931-              CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}"
9932-
9933-fi
9934-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
9935-if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then
9936-  CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}"
9937-else
9938-  CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}"
9939-fi
9940-
9941-
9942-
9943-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether format(printf, ...) attribute is compilable" >&5
9944-$as_echo_n "checking whether format(printf, ...) attribute is compilable... " >&6; }
9945-if ${je_cv_format_arg+:} false; then :
9946-  $as_echo_n "(cached) " >&6
9947-else
9948-  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
9949-/* end confdefs.h.  */
9950-#include <stdlib.h>
9951-int
9952-main ()
9953-{
9954-const char * __attribute__((__format_arg__(1))) foo(const char *format);
9955-  ;
9956-  return 0;
9957-}
9958-_ACEOF
9959-if ac_fn_c_try_link "$LINENO"; then :
9960-  je_cv_format_arg=yes
9961-else
9962-  je_cv_format_arg=no
9963-fi
9964-rm -f core conftest.err conftest.$ac_objext \
9965-    conftest$ac_exeext conftest.$ac_ext
9966-fi
9967-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_format_arg" >&5
9968-$as_echo "$je_cv_format_arg" >&6; }
9969-
9970-CONFIGURE_CFLAGS="${SAVED_CONFIGURE_CFLAGS}"
9971-if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then
9972-  CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}"
9973-else
9974-  CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}"
9975-fi
9976-
9977-
9978-if test "x${je_cv_format_arg}" = "xyes" ; then
9979-
9980-$as_echo "#define JEMALLOC_HAVE_ATTR_FORMAT_ARG  " >>confdefs.h
9981-
9982-fi
9983-
9984-SAVED_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}"
9985-
9986-
9987-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -Wimplicit-fallthrough" >&5
9988-$as_echo_n "checking whether compiler supports -Wimplicit-fallthrough... " >&6; }
9989-T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}"
9990-T_APPEND_V=-Wimplicit-fallthrough
9991-  if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then
9992-  CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}"
9993-else
9994-  CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}"
9995-fi
9996-
9997-
9998-if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then
9999-  CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}"
10000-else
10001-  CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}"
10002-fi
10003-
10004-cat confdefs.h - <<_ACEOF >conftest.$ac_ext
10005-/* end confdefs.h.  */
10006-
10007-
10008-int
10009-main ()
10010-{
10011-
10012-    return 0;
10013-
10014-  ;
10015-  return 0;
10016-}
10017-_ACEOF
10018-if ac_fn_c_try_compile "$LINENO"; then :
10019-  je_cv_cflags_added=-Wimplicit-fallthrough
10020-              { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
10021-$as_echo "yes" >&6; }
10022-else
10023-  je_cv_cflags_added=
10024-              { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
10025-$as_echo "no" >&6; }
10026-              CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}"
10027-
10028-fi
10029-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
10030-if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then
10031-  CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}"
10032-else
10033-  CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}"
10034-fi
10035-
10036-
10037-
10038-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether fallthrough attribute is compilable" >&5
10039-$as_echo_n "checking whether fallthrough attribute is compilable... " >&6; }
10040-if ${je_cv_fallthrough+:} false; then :
10041-  $as_echo_n "(cached) " >&6
10042-else
10043-  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
10044-/* end confdefs.h.  */
10045-#if !__has_attribute(fallthrough)
10046-               #error "foo"
10047-               #endif
10048-int
10049-main ()
10050-{
10051-int x = 0;
10052-               switch (x) {
10053-               case 0: __attribute__((__fallthrough__));
10054-               case 1: return 1;
10055-               }
10056-  ;
10057-  return 0;
10058-}
10059-_ACEOF
10060-if ac_fn_c_try_link "$LINENO"; then :
10061-  je_cv_fallthrough=yes
10062-else
10063-  je_cv_fallthrough=no
10064-fi
10065-rm -f core conftest.err conftest.$ac_objext \
10066-    conftest$ac_exeext conftest.$ac_ext
10067-fi
10068-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_fallthrough" >&5
10069-$as_echo "$je_cv_fallthrough" >&6; }
10070-
10071-CONFIGURE_CFLAGS="${SAVED_CONFIGURE_CFLAGS}"
10072-if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then
10073-  CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}"
10074-else
10075-  CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}"
10076-fi
10077-
10078-
10079-if test "x${je_cv_fallthrough}" = "xyes" ; then
10080-
10081-$as_echo "#define JEMALLOC_HAVE_ATTR_FALLTHROUGH  " >>confdefs.h
10082-
10083-
10084-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -Wimplicit-fallthrough" >&5
10085-$as_echo_n "checking whether compiler supports -Wimplicit-fallthrough... " >&6; }
10086-T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}"
10087-T_APPEND_V=-Wimplicit-fallthrough
10088-  if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then
10089-  CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}"
10090-else
10091-  CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}"
10092-fi
10093-
10094-
10095-if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then
10096-  CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}"
10097-else
10098-  CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}"
10099-fi
10100-
10101-cat confdefs.h - <<_ACEOF >conftest.$ac_ext
10102-/* end confdefs.h.  */
10103-
10104-
10105-int
10106-main ()
10107-{
10108-
10109-    return 0;
10110-
10111-  ;
10112-  return 0;
10113-}
10114-_ACEOF
10115-if ac_fn_c_try_compile "$LINENO"; then :
10116-  je_cv_cflags_added=-Wimplicit-fallthrough
10117-              { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
10118-$as_echo "yes" >&6; }
10119-else
10120-  je_cv_cflags_added=
10121-              { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
10122-$as_echo "no" >&6; }
10123-              CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}"
10124-
10125-fi
10126-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
10127-if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then
10128-  CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}"
10129-else
10130-  CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}"
10131-fi
10132-
10133-
10134-
10135-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -Wimplicit-fallthrough" >&5
10136-$as_echo_n "checking whether compiler supports -Wimplicit-fallthrough... " >&6; }
10137-T_CONFIGURE_CXXFLAGS="${CONFIGURE_CXXFLAGS}"
10138-T_APPEND_V=-Wimplicit-fallthrough
10139-  if test "x${CONFIGURE_CXXFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then
10140-  CONFIGURE_CXXFLAGS="${CONFIGURE_CXXFLAGS}${T_APPEND_V}"
10141-else
10142-  CONFIGURE_CXXFLAGS="${CONFIGURE_CXXFLAGS} ${T_APPEND_V}"
10143-fi
10144-
10145-
10146-if test "x${CONFIGURE_CXXFLAGS}" = "x" -o "x${SPECIFIED_CXXFLAGS}" = "x" ; then
10147-  CXXFLAGS="${CONFIGURE_CXXFLAGS}${SPECIFIED_CXXFLAGS}"
10148-else
10149-  CXXFLAGS="${CONFIGURE_CXXFLAGS} ${SPECIFIED_CXXFLAGS}"
10150-fi
10151-
10152-ac_ext=cpp
10153-ac_cpp='$CXXCPP $CPPFLAGS'
10154-ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5'
10155-ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
10156-ac_compiler_gnu=$ac_cv_cxx_compiler_gnu
10157-
10158-cat confdefs.h - <<_ACEOF >conftest.$ac_ext
10159-/* end confdefs.h.  */
10160-
10161-
10162-int
10163-main ()
10164-{
10165-
10166-    return 0;
10167-
10168-  ;
10169-  return 0;
10170-}
10171-_ACEOF
10172-if ac_fn_cxx_try_compile "$LINENO"; then :
10173-  je_cv_cxxflags_added=-Wimplicit-fallthrough
10174-              { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
10175-$as_echo "yes" >&6; }
10176-else
10177-  je_cv_cxxflags_added=
10178-              { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
10179-$as_echo "no" >&6; }
10180-              CONFIGURE_CXXFLAGS="${T_CONFIGURE_CXXFLAGS}"
10181-
10182-fi
10183-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
10184-ac_ext=c
10185-ac_cpp='$CPP $CPPFLAGS'
10186-ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
10187-ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
10188-ac_compiler_gnu=$ac_cv_c_compiler_gnu
10189-
10190-if test "x${CONFIGURE_CXXFLAGS}" = "x" -o "x${SPECIFIED_CXXFLAGS}" = "x" ; then
10191-  CXXFLAGS="${CONFIGURE_CXXFLAGS}${SPECIFIED_CXXFLAGS}"
10192-else
10193-  CXXFLAGS="${CONFIGURE_CXXFLAGS} ${SPECIFIED_CXXFLAGS}"
10194-fi
10195-
10196-
10197-fi
10198-
10199-SAVED_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}"
10200-
10201-
10202-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -Werror" >&5
10203-$as_echo_n "checking whether compiler supports -Werror... " >&6; }
10204-T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}"
10205-T_APPEND_V=-Werror
10206-  if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then
10207-  CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}"
10208-else
10209-  CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}"
10210-fi
10211-
10212-
10213-if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then
10214-  CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}"
10215-else
10216-  CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}"
10217-fi
10218-
10219-cat confdefs.h - <<_ACEOF >conftest.$ac_ext
10220-/* end confdefs.h.  */
10221-
10222-
10223-int
10224-main ()
10225-{
10226-
10227-    return 0;
10228-
10229-  ;
10230-  return 0;
10231-}
10232-_ACEOF
10233-if ac_fn_c_try_compile "$LINENO"; then :
10234-  je_cv_cflags_added=-Werror
10235-              { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
10236-$as_echo "yes" >&6; }
10237-else
10238-  je_cv_cflags_added=
10239-              { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
10240-$as_echo "no" >&6; }
10241-              CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}"
10242-
10243-fi
10244-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
10245-if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then
10246-  CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}"
10247-else
10248-  CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}"
10249-fi
10250-
10251-
10252-
10253-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -herror_on_warning" >&5
10254-$as_echo_n "checking whether compiler supports -herror_on_warning... " >&6; }
10255-T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}"
10256-T_APPEND_V=-herror_on_warning
10257-  if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then
10258-  CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}"
10259-else
10260-  CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}"
10261-fi
10262-
10263-
10264-if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then
10265-  CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}"
10266-else
10267-  CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}"
10268-fi
10269-
10270-cat confdefs.h - <<_ACEOF >conftest.$ac_ext
10271-/* end confdefs.h.  */
10272-
10273-
10274-int
10275-main ()
10276-{
10277-
10278-    return 0;
10279-
10280-  ;
10281-  return 0;
10282-}
10283-_ACEOF
10284-if ac_fn_c_try_compile "$LINENO"; then :
10285-  je_cv_cflags_added=-herror_on_warning
10286-              { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
10287-$as_echo "yes" >&6; }
10288-else
10289-  je_cv_cflags_added=
10290-              { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
10291-$as_echo "no" >&6; }
10292-              CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}"
10293-
10294-fi
10295-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
10296-if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then
10297-  CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}"
10298-else
10299-  CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}"
10300-fi
10301-
10302-
10303-
10304-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether cold attribute is compilable" >&5
10305-$as_echo_n "checking whether cold attribute is compilable... " >&6; }
10306-if ${je_cv_cold+:} false; then :
10307-  $as_echo_n "(cached) " >&6
10308-else
10309-  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
10310-/* end confdefs.h.  */
10311-
10312-int
10313-main ()
10314-{
10315-__attribute__((__cold__)) void foo();
10316-  ;
10317-  return 0;
10318-}
10319-_ACEOF
10320-if ac_fn_c_try_link "$LINENO"; then :
10321-  je_cv_cold=yes
10322-else
10323-  je_cv_cold=no
10324-fi
10325-rm -f core conftest.err conftest.$ac_objext \
10326-    conftest$ac_exeext conftest.$ac_ext
10327-fi
10328-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_cold" >&5
10329-$as_echo "$je_cv_cold" >&6; }
10330-
10331-CONFIGURE_CFLAGS="${SAVED_CONFIGURE_CFLAGS}"
10332-if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then
10333-  CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}"
10334-else
10335-  CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}"
10336-fi
10337-
10338-
10339-if test "x${je_cv_cold}" = "xyes" ; then
10340-
10341-$as_echo "#define JEMALLOC_HAVE_ATTR_COLD  " >>confdefs.h
10342-
10343-fi
10344-
10345-
10346-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether vm_make_tag is compilable" >&5
10347-$as_echo_n "checking whether vm_make_tag is compilable... " >&6; }
10348-if ${je_cv_vm_make_tag+:} false; then :
10349-  $as_echo_n "(cached) " >&6
10350-else
10351-  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
10352-/* end confdefs.h.  */
10353-#include <sys/mman.h>
10354-	       #include <mach/vm_statistics.h>
10355-int
10356-main ()
10357-{
10358-void *p;
10359-	       p = mmap(0, 16, PROT_READ, MAP_ANON|MAP_PRIVATE, VM_MAKE_TAG(1), 0);
10360-	       munmap(p, 16);
10361-  ;
10362-  return 0;
10363-}
10364-_ACEOF
10365-if ac_fn_c_try_link "$LINENO"; then :
10366-  je_cv_vm_make_tag=yes
10367-else
10368-  je_cv_vm_make_tag=no
10369-fi
10370-rm -f core conftest.err conftest.$ac_objext \
10371-    conftest$ac_exeext conftest.$ac_ext
10372-fi
10373-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_vm_make_tag" >&5
10374-$as_echo "$je_cv_vm_make_tag" >&6; }
10375-
10376-if test "x${je_cv_vm_make_tag}" = "xyes" ; then
10377-
10378-$as_echo "#define JEMALLOC_HAVE_VM_MAKE_TAG  " >>confdefs.h
10379-
10380-fi
10381-
10382-
10383-# Check whether --with-rpath was given.
10384-if test "${with_rpath+set}" = set; then :
10385-  withval=$with_rpath; if test "x$with_rpath" = "xno" ; then
10386-  RPATH_EXTRA=
10387-else
10388-  RPATH_EXTRA="`echo $with_rpath | tr \":\" \" \"`"
10389-fi
10390-else
10391-  RPATH_EXTRA=
10392-
10393-fi
10394-
10395-
10396-
10397-# Check whether --enable-autogen was given.
10398-if test "${enable_autogen+set}" = set; then :
10399-  enableval=$enable_autogen; if test "x$enable_autogen" = "xno" ; then
10400-  enable_autogen="0"
10401-else
10402-  enable_autogen="1"
10403-fi
10404-
10405-else
10406-  enable_autogen="0"
10407-
10408-fi
10409-
10410-
10411-
10412-# Find a good install program.  We prefer a C program (faster),
10413-# so one script is as good as another.  But avoid the broken or
10414-# incompatible versions:
10415-# SysV /etc/install, /usr/sbin/install
10416-# SunOS /usr/etc/install
10417-# IRIX /sbin/install
10418-# AIX /bin/install
10419-# AmigaOS /C/install, which installs bootblocks on floppy discs
10420-# AIX 4 /usr/bin/installbsd, which doesn't work without a -g flag
10421-# AFS /usr/afsws/bin/install, which mishandles nonexistent args
10422-# SVR4 /usr/ucb/install, which tries to use the nonexistent group "staff"
10423-# OS/2's system install, which has a completely different semantic
10424-# ./install, which can be erroneously created by make from ./install.sh.
10425-# Reject install programs that cannot install multiple files.
10426-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for a BSD-compatible install" >&5
10427-$as_echo_n "checking for a BSD-compatible install... " >&6; }
10428-if test -z "$INSTALL"; then
10429-if ${ac_cv_path_install+:} false; then :
10430-  $as_echo_n "(cached) " >&6
10431-else
10432-  as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
10433-for as_dir in $PATH
10434-do
10435-  IFS=$as_save_IFS
10436-  test -z "$as_dir" && as_dir=.
10437-    # Account for people who put trailing slashes in PATH elements.
10438-case $as_dir/ in #((
10439-  ./ | .// | /[cC]/* | \
10440-  /etc/* | /usr/sbin/* | /usr/etc/* | /sbin/* | /usr/afsws/bin/* | \
10441-  ?:[\\/]os2[\\/]install[\\/]* | ?:[\\/]OS2[\\/]INSTALL[\\/]* | \
10442-  /usr/ucb/* ) ;;
10443-  *)
10444-    # OSF1 and SCO ODT 3.0 have their own names for install.
10445-    # Don't use installbsd from OSF since it installs stuff as root
10446-    # by default.
10447-    for ac_prog in ginstall scoinst install; do
10448-      for ac_exec_ext in '' $ac_executable_extensions; do
10449-	if as_fn_executable_p "$as_dir/$ac_prog$ac_exec_ext"; then
10450-	  if test $ac_prog = install &&
10451-	    grep dspmsg "$as_dir/$ac_prog$ac_exec_ext" >/dev/null 2>&1; then
10452-	    # AIX install.  It has an incompatible calling convention.
10453-	    :
10454-	  elif test $ac_prog = install &&
10455-	    grep pwplus "$as_dir/$ac_prog$ac_exec_ext" >/dev/null 2>&1; then
10456-	    # program-specific install script used by HP pwplus--don't use.
10457-	    :
10458-	  else
10459-	    rm -rf conftest.one conftest.two conftest.dir
10460-	    echo one > conftest.one
10461-	    echo two > conftest.two
10462-	    mkdir conftest.dir
10463-	    if "$as_dir/$ac_prog$ac_exec_ext" -c conftest.one conftest.two "`pwd`/conftest.dir" &&
10464-	      test -s conftest.one && test -s conftest.two &&
10465-	      test -s conftest.dir/conftest.one &&
10466-	      test -s conftest.dir/conftest.two
10467-	    then
10468-	      ac_cv_path_install="$as_dir/$ac_prog$ac_exec_ext -c"
10469-	      break 3
10470-	    fi
10471-	  fi
10472-	fi
10473-      done
10474-    done
10475-    ;;
10476-esac
10477-
10478-  done
10479-IFS=$as_save_IFS
10480-
10481-rm -rf conftest.one conftest.two conftest.dir
10482-
10483-fi
10484-  if test "${ac_cv_path_install+set}" = set; then
10485-    INSTALL=$ac_cv_path_install
10486-  else
10487-    # As a last resort, use the slow shell script.  Don't cache a
10488-    # value for INSTALL within a source directory, because that will
10489-    # break other packages using the cache if that directory is
10490-    # removed, or if the value is a relative name.
10491-    INSTALL=$ac_install_sh
10492-  fi
10493-fi
10494-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $INSTALL" >&5
10495-$as_echo "$INSTALL" >&6; }
10496-
10497-# Use test -z because SunOS4 sh mishandles braces in ${var-val}.
10498-# It thinks the first close brace ends the variable substitution.
10499-test -z "$INSTALL_PROGRAM" && INSTALL_PROGRAM='${INSTALL}'
10500-
10501-test -z "$INSTALL_SCRIPT" && INSTALL_SCRIPT='${INSTALL}'
10502-
10503-test -z "$INSTALL_DATA" && INSTALL_DATA='${INSTALL} -m 644'
10504-
10505-if test -n "$ac_tool_prefix"; then
10506-  # Extract the first word of "${ac_tool_prefix}ranlib", so it can be a program name with args.
10507-set dummy ${ac_tool_prefix}ranlib; ac_word=$2
10508-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
10509-$as_echo_n "checking for $ac_word... " >&6; }
10510-if ${ac_cv_prog_RANLIB+:} false; then :
10511-  $as_echo_n "(cached) " >&6
10512-else
10513-  if test -n "$RANLIB"; then
10514-  ac_cv_prog_RANLIB="$RANLIB" # Let the user override the test.
10515-else
10516-as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
10517-for as_dir in $PATH
10518-do
10519-  IFS=$as_save_IFS
10520-  test -z "$as_dir" && as_dir=.
10521-    for ac_exec_ext in '' $ac_executable_extensions; do
10522-  if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
10523-    ac_cv_prog_RANLIB="${ac_tool_prefix}ranlib"
10524-    $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
10525-    break 2
10526-  fi
10527-done
10528-  done
10529-IFS=$as_save_IFS
10530-
10531-fi
10532-fi
10533-RANLIB=$ac_cv_prog_RANLIB
10534-if test -n "$RANLIB"; then
10535-  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $RANLIB" >&5
10536-$as_echo "$RANLIB" >&6; }
10537-else
10538-  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
10539-$as_echo "no" >&6; }
10540-fi
10541-
10542-
10543-fi
10544-if test -z "$ac_cv_prog_RANLIB"; then
10545-  ac_ct_RANLIB=$RANLIB
10546-  # Extract the first word of "ranlib", so it can be a program name with args.
10547-set dummy ranlib; ac_word=$2
10548-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
10549-$as_echo_n "checking for $ac_word... " >&6; }
10550-if ${ac_cv_prog_ac_ct_RANLIB+:} false; then :
10551-  $as_echo_n "(cached) " >&6
10552-else
10553-  if test -n "$ac_ct_RANLIB"; then
10554-  ac_cv_prog_ac_ct_RANLIB="$ac_ct_RANLIB" # Let the user override the test.
10555-else
10556-as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
10557-for as_dir in $PATH
10558-do
10559-  IFS=$as_save_IFS
10560-  test -z "$as_dir" && as_dir=.
10561-    for ac_exec_ext in '' $ac_executable_extensions; do
10562-  if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
10563-    ac_cv_prog_ac_ct_RANLIB="ranlib"
10564-    $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
10565-    break 2
10566-  fi
10567-done
10568-  done
10569-IFS=$as_save_IFS
10570-
10571-fi
10572-fi
10573-ac_ct_RANLIB=$ac_cv_prog_ac_ct_RANLIB
10574-if test -n "$ac_ct_RANLIB"; then
10575-  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_RANLIB" >&5
10576-$as_echo "$ac_ct_RANLIB" >&6; }
10577-else
10578-  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
10579-$as_echo "no" >&6; }
10580-fi
10581-
10582-  if test "x$ac_ct_RANLIB" = x; then
10583-    RANLIB=":"
10584-  else
10585-    case $cross_compiling:$ac_tool_warned in
10586-yes:)
10587-{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
10588-$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
10589-ac_tool_warned=yes ;;
10590-esac
10591-    RANLIB=$ac_ct_RANLIB
10592-  fi
10593-else
10594-  RANLIB="$ac_cv_prog_RANLIB"
10595-fi
10596-
10597-# Extract the first word of "ld", so it can be a program name with args.
10598-set dummy ld; ac_word=$2
10599-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
10600-$as_echo_n "checking for $ac_word... " >&6; }
10601-if ${ac_cv_path_LD+:} false; then :
10602-  $as_echo_n "(cached) " >&6
10603-else
10604-  case $LD in
10605-  [\\/]* | ?:[\\/]*)
10606-  ac_cv_path_LD="$LD" # Let the user override the test with a path.
10607-  ;;
10608-  *)
10609-  as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
10610-for as_dir in $PATH
10611-do
10612-  IFS=$as_save_IFS
10613-  test -z "$as_dir" && as_dir=.
10614-    for ac_exec_ext in '' $ac_executable_extensions; do
10615-  if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
10616-    ac_cv_path_LD="$as_dir/$ac_word$ac_exec_ext"
10617-    $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
10618-    break 2
10619-  fi
10620-done
10621-  done
10622-IFS=$as_save_IFS
10623-
10624-  test -z "$ac_cv_path_LD" && ac_cv_path_LD="false"
10625-  ;;
10626-esac
10627-fi
10628-LD=$ac_cv_path_LD
10629-if test -n "$LD"; then
10630-  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $LD" >&5
10631-$as_echo "$LD" >&6; }
10632-else
10633-  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
10634-$as_echo "no" >&6; }
10635-fi
10636-
10637-
10638-# Extract the first word of "autoconf", so it can be a program name with args.
10639-set dummy autoconf; ac_word=$2
10640-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
10641-$as_echo_n "checking for $ac_word... " >&6; }
10642-if ${ac_cv_path_AUTOCONF+:} false; then :
10643-  $as_echo_n "(cached) " >&6
10644-else
10645-  case $AUTOCONF in
10646-  [\\/]* | ?:[\\/]*)
10647-  ac_cv_path_AUTOCONF="$AUTOCONF" # Let the user override the test with a path.
10648-  ;;
10649-  *)
10650-  as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
10651-for as_dir in $PATH
10652-do
10653-  IFS=$as_save_IFS
10654-  test -z "$as_dir" && as_dir=.
10655-    for ac_exec_ext in '' $ac_executable_extensions; do
10656-  if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
10657-    ac_cv_path_AUTOCONF="$as_dir/$ac_word$ac_exec_ext"
10658-    $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
10659-    break 2
10660-  fi
10661-done
10662-  done
10663-IFS=$as_save_IFS
10664-
10665-  test -z "$ac_cv_path_AUTOCONF" && ac_cv_path_AUTOCONF="false"
10666-  ;;
10667-esac
10668-fi
10669-AUTOCONF=$ac_cv_path_AUTOCONF
10670-if test -n "$AUTOCONF"; then
10671-  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $AUTOCONF" >&5
10672-$as_echo "$AUTOCONF" >&6; }
10673-else
10674-  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
10675-$as_echo "no" >&6; }
10676-fi
10677-
10678-
10679-
10680-# Check whether --enable-doc was given.
10681-if test "${enable_doc+set}" = set; then :
10682-  enableval=$enable_doc; if test "x$enable_doc" = "xno" ; then
10683-  enable_doc="0"
10684-else
10685-  enable_doc="1"
10686-fi
10687-
10688-else
10689-  enable_doc="1"
10690-
10691-fi
10692-
10693-
10694-
10695-# Check whether --enable-shared was given.
10696-if test "${enable_shared+set}" = set; then :
10697-  enableval=$enable_shared; if test "x$enable_shared" = "xno" ; then
10698-  enable_shared="0"
10699-else
10700-  enable_shared="1"
10701-fi
10702-
10703-else
10704-  enable_shared="1"
10705-
10706-fi
10707-
10708-
10709-
10710-# Check whether --enable-static was given.
10711-if test "${enable_static+set}" = set; then :
10712-  enableval=$enable_static; if test "x$enable_static" = "xno" ; then
10713-  enable_static="0"
10714-else
10715-  enable_static="1"
10716-fi
10717-
10718-else
10719-  enable_static="1"
10720-
10721-fi
10722-
10723-
10724-
10725-if test "$enable_shared$enable_static" = "00" ; then
10726-  as_fn_error $? "Please enable one of shared or static builds" "$LINENO" 5
10727-fi
10728-
10729-
10730-# Check whether --with-mangling was given.
10731-if test "${with_mangling+set}" = set; then :
10732-  withval=$with_mangling; mangling_map="$with_mangling"
10733-else
10734-  mangling_map=""
10735-fi
10736-
10737-
10738-
10739-# Check whether --with-jemalloc_prefix was given.
10740-if test "${with_jemalloc_prefix+set}" = set; then :
10741-  withval=$with_jemalloc_prefix; JEMALLOC_PREFIX="$with_jemalloc_prefix"
10742-else
10743-  if test "x$abi" != "xmacho" -a "x$abi" != "xpecoff"; then
10744-  JEMALLOC_PREFIX=""
10745-else
10746-  JEMALLOC_PREFIX="je_"
10747-fi
10748-
10749-fi
10750-
10751-if test "x$JEMALLOC_PREFIX" = "x" ; then
10752-
10753-$as_echo "#define JEMALLOC_IS_MALLOC  " >>confdefs.h
10754-
10755-else
10756-  JEMALLOC_CPREFIX=`echo ${JEMALLOC_PREFIX} | tr "a-z" "A-Z"`
10757-
10758-cat >>confdefs.h <<_ACEOF
10759-#define JEMALLOC_PREFIX "$JEMALLOC_PREFIX"
10760-_ACEOF
10761-
10762-
10763-cat >>confdefs.h <<_ACEOF
10764-#define JEMALLOC_CPREFIX "$JEMALLOC_CPREFIX"
10765-_ACEOF
10766-
10767-fi
10768-
10769-
10770-
10771-
10772-# Check whether --with-export was given.
10773-if test "${with_export+set}" = set; then :
10774-  withval=$with_export; if test "x$with_export" = "xno"; then
10775-
10776-$as_echo "#define JEMALLOC_EXPORT /**/" >>confdefs.h
10777-
10778-fi
10779-
10780-fi
10781-
10782-
10783-public_syms="aligned_alloc calloc dallocx free mallctl mallctlbymib mallctlnametomib malloc malloc_conf malloc_conf_2_conf_harder malloc_message malloc_stats_print malloc_usable_size mallocx smallocx_${jemalloc_version_gid} nallocx posix_memalign rallocx realloc sallocx sdallocx xallocx"
10784-ac_fn_c_check_func "$LINENO" "memalign" "ac_cv_func_memalign"
10785-if test "x$ac_cv_func_memalign" = xyes; then :
10786-
10787-$as_echo "#define JEMALLOC_OVERRIDE_MEMALIGN  " >>confdefs.h
10788-
10789-	       public_syms="${public_syms} memalign"
10790-fi
10791-
10792-ac_fn_c_check_func "$LINENO" "valloc" "ac_cv_func_valloc"
10793-if test "x$ac_cv_func_valloc" = xyes; then :
10794-
10795-$as_echo "#define JEMALLOC_OVERRIDE_VALLOC  " >>confdefs.h
10796-
10797-	       public_syms="${public_syms} valloc"
10798-fi
10799-
10800-ac_fn_c_check_func "$LINENO" "malloc_size" "ac_cv_func_malloc_size"
10801-if test "x$ac_cv_func_malloc_size" = xyes; then :
10802-
10803-$as_echo "#define JEMALLOC_HAVE_MALLOC_SIZE  " >>confdefs.h
10804-
10805-	       public_syms="${public_syms} malloc_size"
10806-fi
10807-
10808-
10809-wrap_syms=
10810-if test "x${JEMALLOC_PREFIX}" = "x" ; then
10811-  ac_fn_c_check_func "$LINENO" "__libc_calloc" "ac_cv_func___libc_calloc"
10812-if test "x$ac_cv_func___libc_calloc" = xyes; then :
10813-
10814-$as_echo "#define JEMALLOC_OVERRIDE___LIBC_CALLOC  " >>confdefs.h
10815-
10816-		 wrap_syms="${wrap_syms} __libc_calloc"
10817-fi
10818-
10819-  ac_fn_c_check_func "$LINENO" "__libc_free" "ac_cv_func___libc_free"
10820-if test "x$ac_cv_func___libc_free" = xyes; then :
10821-
10822-$as_echo "#define JEMALLOC_OVERRIDE___LIBC_FREE  " >>confdefs.h
10823-
10824-		 wrap_syms="${wrap_syms} __libc_free"
10825-fi
10826-
10827-  ac_fn_c_check_func "$LINENO" "__libc_malloc" "ac_cv_func___libc_malloc"
10828-if test "x$ac_cv_func___libc_malloc" = xyes; then :
10829-
10830-$as_echo "#define JEMALLOC_OVERRIDE___LIBC_MALLOC  " >>confdefs.h
10831-
10832-		 wrap_syms="${wrap_syms} __libc_malloc"
10833-fi
10834-
10835-  ac_fn_c_check_func "$LINENO" "__libc_memalign" "ac_cv_func___libc_memalign"
10836-if test "x$ac_cv_func___libc_memalign" = xyes; then :
10837-
10838-$as_echo "#define JEMALLOC_OVERRIDE___LIBC_MEMALIGN  " >>confdefs.h
10839-
10840-		 wrap_syms="${wrap_syms} __libc_memalign"
10841-fi
10842-
10843-  ac_fn_c_check_func "$LINENO" "__libc_realloc" "ac_cv_func___libc_realloc"
10844-if test "x$ac_cv_func___libc_realloc" = xyes; then :
10845-
10846-$as_echo "#define JEMALLOC_OVERRIDE___LIBC_REALLOC  " >>confdefs.h
10847-
10848-		 wrap_syms="${wrap_syms} __libc_realloc"
10849-fi
10850-
10851-  ac_fn_c_check_func "$LINENO" "__libc_valloc" "ac_cv_func___libc_valloc"
10852-if test "x$ac_cv_func___libc_valloc" = xyes; then :
10853-
10854-$as_echo "#define JEMALLOC_OVERRIDE___LIBC_VALLOC  " >>confdefs.h
10855-
10856-		 wrap_syms="${wrap_syms} __libc_valloc"
10857-fi
10858-
10859-  ac_fn_c_check_func "$LINENO" "__posix_memalign" "ac_cv_func___posix_memalign"
10860-if test "x$ac_cv_func___posix_memalign" = xyes; then :
10861-
10862-$as_echo "#define JEMALLOC_OVERRIDE___POSIX_MEMALIGN  " >>confdefs.h
10863-
10864-		 wrap_syms="${wrap_syms} __posix_memalign"
10865-fi
10866-
10867-fi
10868-
10869-case "${host}" in
10870-  *-*-mingw* | *-*-cygwin*)
10871-    wrap_syms="${wrap_syms} tls_callback"
10872-    ;;
10873-  *)
10874-    ;;
10875-esac
10876-
10877-
10878-# Check whether --with-private_namespace was given.
10879-if test "${with_private_namespace+set}" = set; then :
10880-  withval=$with_private_namespace; JEMALLOC_PRIVATE_NAMESPACE="${with_private_namespace}je_"
10881-else
10882-  JEMALLOC_PRIVATE_NAMESPACE="je_"
10883-
10884-fi
10885-
10886-
10887-cat >>confdefs.h <<_ACEOF
10888-#define JEMALLOC_PRIVATE_NAMESPACE $JEMALLOC_PRIVATE_NAMESPACE
10889-_ACEOF
10890-
10891-private_namespace="$JEMALLOC_PRIVATE_NAMESPACE"
10892-
10893-
10894-
10895-# Check whether --with-install_suffix was given.
10896-if test "${with_install_suffix+set}" = set; then :
10897-  withval=$with_install_suffix; case "$with_install_suffix" in
10898-   *\ * ) as_fn_error $? "Install suffix should not contain spaces" "$LINENO" 5 ;;
10899-   * ) INSTALL_SUFFIX="$with_install_suffix" ;;
10900-esac
10901-else
10902-  INSTALL_SUFFIX=
10903-
10904-fi
10905-
10906-install_suffix="$INSTALL_SUFFIX"
10907-
10908-
10909-
10910-# Check whether --with-malloc_conf was given.
10911-if test "${with_malloc_conf+set}" = set; then :
10912-  withval=$with_malloc_conf; JEMALLOC_CONFIG_MALLOC_CONF="$with_malloc_conf"
10913-else
10914-  JEMALLOC_CONFIG_MALLOC_CONF=""
10915-
10916-fi
10917-
10918-config_malloc_conf="$JEMALLOC_CONFIG_MALLOC_CONF"
10919-
10920-cat >>confdefs.h <<_ACEOF
10921-#define JEMALLOC_CONFIG_MALLOC_CONF "$config_malloc_conf"
10922-_ACEOF
10923-
10924-
10925-je_="je_"
10926-
10927-
10928-cfgoutputs_in="Makefile.in"
10929-cfgoutputs_in="${cfgoutputs_in} jemalloc.pc.in"
10930-cfgoutputs_in="${cfgoutputs_in} doc/html.xsl.in"
10931-cfgoutputs_in="${cfgoutputs_in} doc/manpages.xsl.in"
10932-cfgoutputs_in="${cfgoutputs_in} doc/jemalloc.xml.in"
10933-cfgoutputs_in="${cfgoutputs_in} include/jemalloc/jemalloc_macros.h.in"
10934-cfgoutputs_in="${cfgoutputs_in} include/jemalloc/jemalloc_protos.h.in"
10935-cfgoutputs_in="${cfgoutputs_in} include/jemalloc/jemalloc_typedefs.h.in"
10936-cfgoutputs_in="${cfgoutputs_in} include/jemalloc/internal/jemalloc_preamble.h.in"
10937-cfgoutputs_in="${cfgoutputs_in} test/test.sh.in"
10938-cfgoutputs_in="${cfgoutputs_in} test/include/test/jemalloc_test.h.in"
10939-
10940-cfgoutputs_out="Makefile"
10941-cfgoutputs_out="${cfgoutputs_out} jemalloc.pc"
10942-cfgoutputs_out="${cfgoutputs_out} doc/html.xsl"
10943-cfgoutputs_out="${cfgoutputs_out} doc/manpages.xsl"
10944-cfgoutputs_out="${cfgoutputs_out} doc/jemalloc.xml"
10945-cfgoutputs_out="${cfgoutputs_out} include/jemalloc/jemalloc_macros.h"
10946-cfgoutputs_out="${cfgoutputs_out} include/jemalloc/jemalloc_protos.h"
10947-cfgoutputs_out="${cfgoutputs_out} include/jemalloc/jemalloc_typedefs.h"
10948-cfgoutputs_out="${cfgoutputs_out} include/jemalloc/internal/jemalloc_preamble.h"
10949-cfgoutputs_out="${cfgoutputs_out} test/test.sh"
10950-cfgoutputs_out="${cfgoutputs_out} test/include/test/jemalloc_test.h"
10951-
10952-cfgoutputs_tup="Makefile"
10953-cfgoutputs_tup="${cfgoutputs_tup} jemalloc.pc:jemalloc.pc.in"
10954-cfgoutputs_tup="${cfgoutputs_tup} doc/html.xsl:doc/html.xsl.in"
10955-cfgoutputs_tup="${cfgoutputs_tup} doc/manpages.xsl:doc/manpages.xsl.in"
10956-cfgoutputs_tup="${cfgoutputs_tup} doc/jemalloc.xml:doc/jemalloc.xml.in"
10957-cfgoutputs_tup="${cfgoutputs_tup} include/jemalloc/jemalloc_macros.h:include/jemalloc/jemalloc_macros.h.in"
10958-cfgoutputs_tup="${cfgoutputs_tup} include/jemalloc/jemalloc_protos.h:include/jemalloc/jemalloc_protos.h.in"
10959-cfgoutputs_tup="${cfgoutputs_tup} include/jemalloc/jemalloc_typedefs.h:include/jemalloc/jemalloc_typedefs.h.in"
10960-cfgoutputs_tup="${cfgoutputs_tup} include/jemalloc/internal/jemalloc_preamble.h"
10961-cfgoutputs_tup="${cfgoutputs_tup} test/test.sh:test/test.sh.in"
10962-cfgoutputs_tup="${cfgoutputs_tup} test/include/test/jemalloc_test.h:test/include/test/jemalloc_test.h.in"
10963-
10964-cfghdrs_in="include/jemalloc/jemalloc_defs.h.in"
10965-cfghdrs_in="${cfghdrs_in} include/jemalloc/internal/jemalloc_internal_defs.h.in"
10966-cfghdrs_in="${cfghdrs_in} include/jemalloc/internal/private_symbols.sh"
10967-cfghdrs_in="${cfghdrs_in} include/jemalloc/internal/private_namespace.sh"
10968-cfghdrs_in="${cfghdrs_in} include/jemalloc/internal/public_namespace.sh"
10969-cfghdrs_in="${cfghdrs_in} include/jemalloc/internal/public_unnamespace.sh"
10970-cfghdrs_in="${cfghdrs_in} include/jemalloc/jemalloc_rename.sh"
10971-cfghdrs_in="${cfghdrs_in} include/jemalloc/jemalloc_mangle.sh"
10972-cfghdrs_in="${cfghdrs_in} include/jemalloc/jemalloc.sh"
10973-cfghdrs_in="${cfghdrs_in} test/include/test/jemalloc_test_defs.h.in"
10974-
10975-cfghdrs_out="include/jemalloc/jemalloc_defs.h"
10976-cfghdrs_out="${cfghdrs_out} include/jemalloc/jemalloc${install_suffix}.h"
10977-cfghdrs_out="${cfghdrs_out} include/jemalloc/internal/private_symbols.awk"
10978-cfghdrs_out="${cfghdrs_out} include/jemalloc/internal/private_symbols_jet.awk"
10979-cfghdrs_out="${cfghdrs_out} include/jemalloc/internal/public_symbols.txt"
10980-cfghdrs_out="${cfghdrs_out} include/jemalloc/internal/public_namespace.h"
10981-cfghdrs_out="${cfghdrs_out} include/jemalloc/internal/public_unnamespace.h"
10982-cfghdrs_out="${cfghdrs_out} include/jemalloc/jemalloc_protos_jet.h"
10983-cfghdrs_out="${cfghdrs_out} include/jemalloc/jemalloc_rename.h"
10984-cfghdrs_out="${cfghdrs_out} include/jemalloc/jemalloc_mangle.h"
10985-cfghdrs_out="${cfghdrs_out} include/jemalloc/jemalloc_mangle_jet.h"
10986-cfghdrs_out="${cfghdrs_out} include/jemalloc/internal/jemalloc_internal_defs.h"
10987-cfghdrs_out="${cfghdrs_out} test/include/test/jemalloc_test_defs.h"
10988-
10989-cfghdrs_tup="include/jemalloc/jemalloc_defs.h:include/jemalloc/jemalloc_defs.h.in"
10990-cfghdrs_tup="${cfghdrs_tup} include/jemalloc/internal/jemalloc_internal_defs.h:include/jemalloc/internal/jemalloc_internal_defs.h.in"
10991-cfghdrs_tup="${cfghdrs_tup} test/include/test/jemalloc_test_defs.h:test/include/test/jemalloc_test_defs.h.in"
10992-
10993-
10994-# Check whether --enable-debug was given.
10995-if test "${enable_debug+set}" = set; then :
10996-  enableval=$enable_debug; if test "x$enable_debug" = "xno" ; then
10997-  enable_debug="0"
10998-else
10999-  enable_debug="1"
11000-fi
11001-
11002-else
11003-  enable_debug="0"
11004-
11005-fi
11006-
11007-if test "x$enable_debug" = "x1" ; then
11008-
11009-$as_echo "#define JEMALLOC_DEBUG  " >>confdefs.h
11010-
11011-fi
11012-
11013-
11014-if test "x$enable_debug" = "x0" ; then
11015-  if test "x$GCC" = "xyes" ; then
11016-
11017-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -O3" >&5
11018-$as_echo_n "checking whether compiler supports -O3... " >&6; }
11019-T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}"
11020-T_APPEND_V=-O3
11021-  if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then
11022-  CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}"
11023-else
11024-  CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}"
11025-fi
11026-
11027-
11028-if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then
11029-  CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}"
11030-else
11031-  CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}"
11032-fi
11033-
11034-cat confdefs.h - <<_ACEOF >conftest.$ac_ext
11035-/* end confdefs.h.  */
11036-
11037-
11038-int
11039-main ()
11040-{
11041-
11042-    return 0;
11043-
11044-  ;
11045-  return 0;
11046-}
11047-_ACEOF
11048-if ac_fn_c_try_compile "$LINENO"; then :
11049-  je_cv_cflags_added=-O3
11050-              { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
11051-$as_echo "yes" >&6; }
11052-else
11053-  je_cv_cflags_added=
11054-              { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
11055-$as_echo "no" >&6; }
11056-              CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}"
11057-
11058-fi
11059-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
11060-if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then
11061-  CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}"
11062-else
11063-  CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}"
11064-fi
11065-
11066-
11067-
11068-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -O3" >&5
11069-$as_echo_n "checking whether compiler supports -O3... " >&6; }
11070-T_CONFIGURE_CXXFLAGS="${CONFIGURE_CXXFLAGS}"
11071-T_APPEND_V=-O3
11072-  if test "x${CONFIGURE_CXXFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then
11073-  CONFIGURE_CXXFLAGS="${CONFIGURE_CXXFLAGS}${T_APPEND_V}"
11074-else
11075-  CONFIGURE_CXXFLAGS="${CONFIGURE_CXXFLAGS} ${T_APPEND_V}"
11076-fi
11077-
11078-
11079-if test "x${CONFIGURE_CXXFLAGS}" = "x" -o "x${SPECIFIED_CXXFLAGS}" = "x" ; then
11080-  CXXFLAGS="${CONFIGURE_CXXFLAGS}${SPECIFIED_CXXFLAGS}"
11081-else
11082-  CXXFLAGS="${CONFIGURE_CXXFLAGS} ${SPECIFIED_CXXFLAGS}"
11083-fi
11084-
11085-ac_ext=cpp
11086-ac_cpp='$CXXCPP $CPPFLAGS'
11087-ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5'
11088-ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
11089-ac_compiler_gnu=$ac_cv_cxx_compiler_gnu
11090-
11091-cat confdefs.h - <<_ACEOF >conftest.$ac_ext
11092-/* end confdefs.h.  */
11093-
11094-
11095-int
11096-main ()
11097-{
11098-
11099-    return 0;
11100-
11101-  ;
11102-  return 0;
11103-}
11104-_ACEOF
11105-if ac_fn_cxx_try_compile "$LINENO"; then :
11106-  je_cv_cxxflags_added=-O3
11107-              { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
11108-$as_echo "yes" >&6; }
11109-else
11110-  je_cv_cxxflags_added=
11111-              { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
11112-$as_echo "no" >&6; }
11113-              CONFIGURE_CXXFLAGS="${T_CONFIGURE_CXXFLAGS}"
11114-
11115-fi
11116-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
11117-ac_ext=c
11118-ac_cpp='$CPP $CPPFLAGS'
11119-ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
11120-ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
11121-ac_compiler_gnu=$ac_cv_c_compiler_gnu
11122-
11123-if test "x${CONFIGURE_CXXFLAGS}" = "x" -o "x${SPECIFIED_CXXFLAGS}" = "x" ; then
11124-  CXXFLAGS="${CONFIGURE_CXXFLAGS}${SPECIFIED_CXXFLAGS}"
11125-else
11126-  CXXFLAGS="${CONFIGURE_CXXFLAGS} ${SPECIFIED_CXXFLAGS}"
11127-fi
11128-
11129-
11130-
11131-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -funroll-loops" >&5
11132-$as_echo_n "checking whether compiler supports -funroll-loops... " >&6; }
11133-T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}"
11134-T_APPEND_V=-funroll-loops
11135-  if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then
11136-  CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}"
11137-else
11138-  CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}"
11139-fi
11140-
11141-
11142-if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then
11143-  CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}"
11144-else
11145-  CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}"
11146-fi
11147-
11148-cat confdefs.h - <<_ACEOF >conftest.$ac_ext
11149-/* end confdefs.h.  */
11150-
11151-
11152-int
11153-main ()
11154-{
11155-
11156-    return 0;
11157-
11158-  ;
11159-  return 0;
11160-}
11161-_ACEOF
11162-if ac_fn_c_try_compile "$LINENO"; then :
11163-  je_cv_cflags_added=-funroll-loops
11164-              { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
11165-$as_echo "yes" >&6; }
11166-else
11167-  je_cv_cflags_added=
11168-              { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
11169-$as_echo "no" >&6; }
11170-              CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}"
11171-
11172-fi
11173-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
11174-if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then
11175-  CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}"
11176-else
11177-  CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}"
11178-fi
11179-
11180-
11181-  elif test "x$je_cv_msvc" = "xyes" ; then
11182-
11183-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -O2" >&5
11184-$as_echo_n "checking whether compiler supports -O2... " >&6; }
11185-T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}"
11186-T_APPEND_V=-O2
11187-  if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then
11188-  CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}"
11189-else
11190-  CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}"
11191-fi
11192-
11193-
11194-if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then
11195-  CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}"
11196-else
11197-  CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}"
11198-fi
11199-
11200-cat confdefs.h - <<_ACEOF >conftest.$ac_ext
11201-/* end confdefs.h.  */
11202-
11203-
11204-int
11205-main ()
11206-{
11207-
11208-    return 0;
11209-
11210-  ;
11211-  return 0;
11212-}
11213-_ACEOF
11214-if ac_fn_c_try_compile "$LINENO"; then :
11215-  je_cv_cflags_added=-O2
11216-              { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
11217-$as_echo "yes" >&6; }
11218-else
11219-  je_cv_cflags_added=
11220-              { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
11221-$as_echo "no" >&6; }
11222-              CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}"
11223-
11224-fi
11225-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
11226-if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then
11227-  CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}"
11228-else
11229-  CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}"
11230-fi
11231-
11232-
11233-
11234-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -O2" >&5
11235-$as_echo_n "checking whether compiler supports -O2... " >&6; }
11236-T_CONFIGURE_CXXFLAGS="${CONFIGURE_CXXFLAGS}"
11237-T_APPEND_V=-O2
11238-  if test "x${CONFIGURE_CXXFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then
11239-  CONFIGURE_CXXFLAGS="${CONFIGURE_CXXFLAGS}${T_APPEND_V}"
11240-else
11241-  CONFIGURE_CXXFLAGS="${CONFIGURE_CXXFLAGS} ${T_APPEND_V}"
11242-fi
11243-
11244-
11245-if test "x${CONFIGURE_CXXFLAGS}" = "x" -o "x${SPECIFIED_CXXFLAGS}" = "x" ; then
11246-  CXXFLAGS="${CONFIGURE_CXXFLAGS}${SPECIFIED_CXXFLAGS}"
11247-else
11248-  CXXFLAGS="${CONFIGURE_CXXFLAGS} ${SPECIFIED_CXXFLAGS}"
11249-fi
11250-
11251-ac_ext=cpp
11252-ac_cpp='$CXXCPP $CPPFLAGS'
11253-ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5'
11254-ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
11255-ac_compiler_gnu=$ac_cv_cxx_compiler_gnu
11256-
11257-cat confdefs.h - <<_ACEOF >conftest.$ac_ext
11258-/* end confdefs.h.  */
11259-
11260-
11261-int
11262-main ()
11263-{
11264-
11265-    return 0;
11266-
11267-  ;
11268-  return 0;
11269-}
11270-_ACEOF
11271-if ac_fn_cxx_try_compile "$LINENO"; then :
11272-  je_cv_cxxflags_added=-O2
11273-              { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
11274-$as_echo "yes" >&6; }
11275-else
11276-  je_cv_cxxflags_added=
11277-              { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
11278-$as_echo "no" >&6; }
11279-              CONFIGURE_CXXFLAGS="${T_CONFIGURE_CXXFLAGS}"
11280-
11281-fi
11282-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
11283-ac_ext=c
11284-ac_cpp='$CPP $CPPFLAGS'
11285-ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
11286-ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
11287-ac_compiler_gnu=$ac_cv_c_compiler_gnu
11288-
11289-if test "x${CONFIGURE_CXXFLAGS}" = "x" -o "x${SPECIFIED_CXXFLAGS}" = "x" ; then
11290-  CXXFLAGS="${CONFIGURE_CXXFLAGS}${SPECIFIED_CXXFLAGS}"
11291-else
11292-  CXXFLAGS="${CONFIGURE_CXXFLAGS} ${SPECIFIED_CXXFLAGS}"
11293-fi
11294-
11295-
11296-  else
11297-
11298-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -O" >&5
11299-$as_echo_n "checking whether compiler supports -O... " >&6; }
11300-T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}"
11301-T_APPEND_V=-O
11302-  if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then
11303-  CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}"
11304-else
11305-  CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}"
11306-fi
11307-
11308-
11309-if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then
11310-  CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}"
11311-else
11312-  CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}"
11313-fi
11314-
11315-cat confdefs.h - <<_ACEOF >conftest.$ac_ext
11316-/* end confdefs.h.  */
11317-
11318-
11319-int
11320-main ()
11321-{
11322-
11323-    return 0;
11324-
11325-  ;
11326-  return 0;
11327-}
11328-_ACEOF
11329-if ac_fn_c_try_compile "$LINENO"; then :
11330-  je_cv_cflags_added=-O
11331-              { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
11332-$as_echo "yes" >&6; }
11333-else
11334-  je_cv_cflags_added=
11335-              { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
11336-$as_echo "no" >&6; }
11337-              CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}"
11338-
11339-fi
11340-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
11341-if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then
11342-  CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}"
11343-else
11344-  CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}"
11345-fi
11346-
11347-
11348-
11349-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -O" >&5
11350-$as_echo_n "checking whether compiler supports -O... " >&6; }
11351-T_CONFIGURE_CXXFLAGS="${CONFIGURE_CXXFLAGS}"
11352-T_APPEND_V=-O
11353-  if test "x${CONFIGURE_CXXFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then
11354-  CONFIGURE_CXXFLAGS="${CONFIGURE_CXXFLAGS}${T_APPEND_V}"
11355-else
11356-  CONFIGURE_CXXFLAGS="${CONFIGURE_CXXFLAGS} ${T_APPEND_V}"
11357-fi
11358-
11359-
11360-if test "x${CONFIGURE_CXXFLAGS}" = "x" -o "x${SPECIFIED_CXXFLAGS}" = "x" ; then
11361-  CXXFLAGS="${CONFIGURE_CXXFLAGS}${SPECIFIED_CXXFLAGS}"
11362-else
11363-  CXXFLAGS="${CONFIGURE_CXXFLAGS} ${SPECIFIED_CXXFLAGS}"
11364-fi
11365-
11366-ac_ext=cpp
11367-ac_cpp='$CXXCPP $CPPFLAGS'
11368-ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5'
11369-ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
11370-ac_compiler_gnu=$ac_cv_cxx_compiler_gnu
11371-
11372-cat confdefs.h - <<_ACEOF >conftest.$ac_ext
11373-/* end confdefs.h.  */
11374-
11375-
11376-int
11377-main ()
11378-{
11379-
11380-    return 0;
11381-
11382-  ;
11383-  return 0;
11384-}
11385-_ACEOF
11386-if ac_fn_cxx_try_compile "$LINENO"; then :
11387-  je_cv_cxxflags_added=-O
11388-              { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
11389-$as_echo "yes" >&6; }
11390-else
11391-  je_cv_cxxflags_added=
11392-              { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
11393-$as_echo "no" >&6; }
11394-              CONFIGURE_CXXFLAGS="${T_CONFIGURE_CXXFLAGS}"
11395-
11396-fi
11397-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
11398-ac_ext=c
11399-ac_cpp='$CPP $CPPFLAGS'
11400-ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
11401-ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
11402-ac_compiler_gnu=$ac_cv_c_compiler_gnu
11403-
11404-if test "x${CONFIGURE_CXXFLAGS}" = "x" -o "x${SPECIFIED_CXXFLAGS}" = "x" ; then
11405-  CXXFLAGS="${CONFIGURE_CXXFLAGS}${SPECIFIED_CXXFLAGS}"
11406-else
11407-  CXXFLAGS="${CONFIGURE_CXXFLAGS} ${SPECIFIED_CXXFLAGS}"
11408-fi
11409-
11410-
11411-  fi
11412-fi
11413-
11414-# Check whether --enable-stats was given.
11415-if test "${enable_stats+set}" = set; then :
11416-  enableval=$enable_stats; if test "x$enable_stats" = "xno" ; then
11417-  enable_stats="0"
11418-else
11419-  enable_stats="1"
11420-fi
11421-
11422-else
11423-  enable_stats="1"
11424-
11425-fi
11426-
11427-if test "x$enable_stats" = "x1" ; then
11428-
11429-$as_echo "#define JEMALLOC_STATS  " >>confdefs.h
11430-
11431-fi
11432-
11433-
11434-# Check whether --enable-experimental_smallocx was given.
11435-if test "${enable_experimental_smallocx+set}" = set; then :
11436-  enableval=$enable_experimental_smallocx; if test "x$enable_experimental_smallocx" = "xno" ; then
11437-enable_experimental_smallocx="0"
11438-else
11439-enable_experimental_smallocx="1"
11440-fi
11441-
11442-else
11443-  enable_experimental_smallocx="0"
11444-
11445-fi
11446-
11447-if test "x$enable_experimental_smallocx" = "x1" ; then
11448-
11449-$as_echo "#define JEMALLOC_EXPERIMENTAL_SMALLOCX_API  " >>confdefs.h
11450-
11451-fi
11452-
11453-
11454-# Check whether --enable-prof was given.
11455-if test "${enable_prof+set}" = set; then :
11456-  enableval=$enable_prof; if test "x$enable_prof" = "xno" ; then
11457-  enable_prof="0"
11458-else
11459-  enable_prof="1"
11460-fi
11461-
11462-else
11463-  enable_prof="0"
11464-
11465-fi
11466-
11467-if test "x$enable_prof" = "x1" ; then
11468-  backtrace_method=""
11469-else
11470-  backtrace_method="N/A"
11471-fi
11472-
11473-# Check whether --enable-prof-libunwind was given.
11474-if test "${enable_prof_libunwind+set}" = set; then :
11475-  enableval=$enable_prof_libunwind; if test "x$enable_prof_libunwind" = "xno" ; then
11476-  enable_prof_libunwind="0"
11477-else
11478-  enable_prof_libunwind="1"
11479-  if test "x$enable_prof" = "x0" ; then
11480-    as_fn_error $? "--enable-prof-libunwind should only be used with --enable-prof" "$LINENO" 5
11481-  fi
11482-fi
11483-
11484-else
11485-  enable_prof_libunwind="0"
11486-
11487-fi
11488-
11489-
11490-# Check whether --with-static_libunwind was given.
11491-if test "${with_static_libunwind+set}" = set; then :
11492-  withval=$with_static_libunwind; if test "x$with_static_libunwind" = "xno" ; then
11493-  LUNWIND="-lunwind"
11494-else
11495-  if test ! -f "$with_static_libunwind" ; then
11496-    as_fn_error $? "Static libunwind not found: $with_static_libunwind" "$LINENO" 5
11497-  fi
11498-  LUNWIND="$with_static_libunwind"
11499-fi
11500-else
11501-  LUNWIND="-lunwind"
11502-
11503-fi
11504-
11505-if test "x$backtrace_method" = "x" -a "x$enable_prof_libunwind" = "x1" ; then
11506-  for ac_header in libunwind.h
11507-do :
11508-  ac_fn_c_check_header_mongrel "$LINENO" "libunwind.h" "ac_cv_header_libunwind_h" "$ac_includes_default"
11509-if test "x$ac_cv_header_libunwind_h" = xyes; then :
11510-  cat >>confdefs.h <<_ACEOF
11511-#define HAVE_LIBUNWIND_H 1
11512-_ACEOF
11513-
11514-else
11515-  enable_prof_libunwind="0"
11516-fi
11517-
11518-done
11519-
11520-  if test "x$LUNWIND" = "x-lunwind" ; then
11521-    { $as_echo "$as_me:${as_lineno-$LINENO}: checking for unw_backtrace in -lunwind" >&5
11522-$as_echo_n "checking for unw_backtrace in -lunwind... " >&6; }
11523-if ${ac_cv_lib_unwind_unw_backtrace+:} false; then :
11524-  $as_echo_n "(cached) " >&6
11525-else
11526-  ac_check_lib_save_LIBS=$LIBS
11527-LIBS="-lunwind  $LIBS"
11528-cat confdefs.h - <<_ACEOF >conftest.$ac_ext
11529-/* end confdefs.h.  */
11530-
11531-/* Override any GCC internal prototype to avoid an error.
11532-   Use char because int might match the return type of a GCC
11533-   builtin and then its argument prototype would still apply.  */
11534-#ifdef __cplusplus
11535-extern "C"
11536-#endif
11537-char unw_backtrace ();
11538-int
11539-main ()
11540-{
11541-return unw_backtrace ();
11542-  ;
11543-  return 0;
11544-}
11545-_ACEOF
11546-if ac_fn_c_try_link "$LINENO"; then :
11547-  ac_cv_lib_unwind_unw_backtrace=yes
11548-else
11549-  ac_cv_lib_unwind_unw_backtrace=no
11550-fi
11551-rm -f core conftest.err conftest.$ac_objext \
11552-    conftest$ac_exeext conftest.$ac_ext
11553-LIBS=$ac_check_lib_save_LIBS
11554-fi
11555-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_unwind_unw_backtrace" >&5
11556-$as_echo "$ac_cv_lib_unwind_unw_backtrace" >&6; }
11557-if test "x$ac_cv_lib_unwind_unw_backtrace" = xyes; then :
11558-  T_APPEND_V=$LUNWIND
11559-  if test "x${LIBS}" = "x" -o "x${T_APPEND_V}" = "x" ; then
11560-  LIBS="${LIBS}${T_APPEND_V}"
11561-else
11562-  LIBS="${LIBS} ${T_APPEND_V}"
11563-fi
11564-
11565-
11566-else
11567-  enable_prof_libunwind="0"
11568-fi
11569-
11570-  else
11571-    T_APPEND_V=$LUNWIND
11572-  if test "x${LIBS}" = "x" -o "x${T_APPEND_V}" = "x" ; then
11573-  LIBS="${LIBS}${T_APPEND_V}"
11574-else
11575-  LIBS="${LIBS} ${T_APPEND_V}"
11576-fi
11577-
11578-
11579-  fi
11580-  if test "x${enable_prof_libunwind}" = "x1" ; then
11581-    backtrace_method="libunwind"
11582-
11583-$as_echo "#define JEMALLOC_PROF_LIBUNWIND  " >>confdefs.h
11584-
11585-  fi
11586-fi
11587-
11588-# Check whether --enable-prof-libgcc was given.
11589-if test "${enable_prof_libgcc+set}" = set; then :
11590-  enableval=$enable_prof_libgcc; if test "x$enable_prof_libgcc" = "xno" ; then
11591-  enable_prof_libgcc="0"
11592-else
11593-  enable_prof_libgcc="1"
11594-fi
11595-
11596-else
11597-  enable_prof_libgcc="1"
11598-
11599-fi
11600-
11601-if test "x$backtrace_method" = "x" -a "x$enable_prof_libgcc" = "x1" \
11602-     -a "x$GCC" = "xyes" ; then
11603-  for ac_header in unwind.h
11604-do :
11605-  ac_fn_c_check_header_mongrel "$LINENO" "unwind.h" "ac_cv_header_unwind_h" "$ac_includes_default"
11606-if test "x$ac_cv_header_unwind_h" = xyes; then :
11607-  cat >>confdefs.h <<_ACEOF
11608-#define HAVE_UNWIND_H 1
11609-_ACEOF
11610-
11611-else
11612-  enable_prof_libgcc="0"
11613-fi
11614-
11615-done
11616-
11617-  if test "x${enable_prof_libgcc}" = "x1" ; then
11618-    { $as_echo "$as_me:${as_lineno-$LINENO}: checking for _Unwind_Backtrace in -lgcc" >&5
11619-$as_echo_n "checking for _Unwind_Backtrace in -lgcc... " >&6; }
11620-if ${ac_cv_lib_gcc__Unwind_Backtrace+:} false; then :
11621-  $as_echo_n "(cached) " >&6
11622-else
11623-  ac_check_lib_save_LIBS=$LIBS
11624-LIBS="-lgcc  $LIBS"
11625-cat confdefs.h - <<_ACEOF >conftest.$ac_ext
11626-/* end confdefs.h.  */
11627-
11628-/* Override any GCC internal prototype to avoid an error.
11629-   Use char because int might match the return type of a GCC
11630-   builtin and then its argument prototype would still apply.  */
11631-#ifdef __cplusplus
11632-extern "C"
11633-#endif
11634-char _Unwind_Backtrace ();
11635-int
11636-main ()
11637-{
11638-return _Unwind_Backtrace ();
11639-  ;
11640-  return 0;
11641-}
11642-_ACEOF
11643-if ac_fn_c_try_link "$LINENO"; then :
11644-  ac_cv_lib_gcc__Unwind_Backtrace=yes
11645-else
11646-  ac_cv_lib_gcc__Unwind_Backtrace=no
11647-fi
11648-rm -f core conftest.err conftest.$ac_objext \
11649-    conftest$ac_exeext conftest.$ac_ext
11650-LIBS=$ac_check_lib_save_LIBS
11651-fi
11652-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_gcc__Unwind_Backtrace" >&5
11653-$as_echo "$ac_cv_lib_gcc__Unwind_Backtrace" >&6; }
11654-if test "x$ac_cv_lib_gcc__Unwind_Backtrace" = xyes; then :
11655-  T_APPEND_V=-lgcc
11656-  if test "x${LIBS}" = "x" -o "x${T_APPEND_V}" = "x" ; then
11657-  LIBS="${LIBS}${T_APPEND_V}"
11658-else
11659-  LIBS="${LIBS} ${T_APPEND_V}"
11660-fi
11661-
11662-
11663-else
11664-  enable_prof_libgcc="0"
11665-fi
11666-
11667-  fi
11668-  if test "x${enable_prof_libgcc}" = "x1" ; then
11669-    backtrace_method="libgcc"
11670-
11671-$as_echo "#define JEMALLOC_PROF_LIBGCC  " >>confdefs.h
11672-
11673-  fi
11674-else
11675-  enable_prof_libgcc="0"
11676-fi
11677-
11678-# Check whether --enable-prof-gcc was given.
11679-if test "${enable_prof_gcc+set}" = set; then :
11680-  enableval=$enable_prof_gcc; if test "x$enable_prof_gcc" = "xno" ; then
11681-  enable_prof_gcc="0"
11682-else
11683-  enable_prof_gcc="1"
11684-fi
11685-
11686-else
11687-  enable_prof_gcc="1"
11688-
11689-fi
11690-
11691-if test "x$backtrace_method" = "x" -a "x$enable_prof_gcc" = "x1" \
11692-     -a "x$GCC" = "xyes" ; then
11693-
11694-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -fno-omit-frame-pointer" >&5
11695-$as_echo_n "checking whether compiler supports -fno-omit-frame-pointer... " >&6; }
11696-T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}"
11697-T_APPEND_V=-fno-omit-frame-pointer
11698-  if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then
11699-  CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}"
11700-else
11701-  CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}"
11702-fi
11703-
11704-
11705-if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then
11706-  CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}"
11707-else
11708-  CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}"
11709-fi
11710-
11711-cat confdefs.h - <<_ACEOF >conftest.$ac_ext
11712-/* end confdefs.h.  */
11713-
11714-
11715-int
11716-main ()
11717-{
11718-
11719-    return 0;
11720-
11721-  ;
11722-  return 0;
11723-}
11724-_ACEOF
11725-if ac_fn_c_try_compile "$LINENO"; then :
11726-  je_cv_cflags_added=-fno-omit-frame-pointer
11727-              { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
11728-$as_echo "yes" >&6; }
11729-else
11730-  je_cv_cflags_added=
11731-              { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
11732-$as_echo "no" >&6; }
11733-              CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}"
11734-
11735-fi
11736-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
11737-if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then
11738-  CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}"
11739-else
11740-  CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}"
11741-fi
11742-
11743-
11744-  backtrace_method="gcc intrinsics"
11745-
11746-$as_echo "#define JEMALLOC_PROF_GCC  " >>confdefs.h
11747-
11748-else
11749-  enable_prof_gcc="0"
11750-fi
11751-
11752-if test "x$backtrace_method" = "x" ; then
11753-  backtrace_method="none (disabling profiling)"
11754-  enable_prof="0"
11755-fi
11756-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking configured backtracing method" >&5
11757-$as_echo_n "checking configured backtracing method... " >&6; }
11758-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $backtrace_method" >&5
11759-$as_echo "$backtrace_method" >&6; }
11760-if test "x$enable_prof" = "x1" ; then
11761-    T_APPEND_V=$LM
11762-  if test "x${LIBS}" = "x" -o "x${T_APPEND_V}" = "x" ; then
11763-  LIBS="${LIBS}${T_APPEND_V}"
11764-else
11765-  LIBS="${LIBS} ${T_APPEND_V}"
11766-fi
11767-
11768-
11769-
11770-
11771-$as_echo "#define JEMALLOC_PROF  " >>confdefs.h
11772-
11773-fi
11774-
11775-
11776-if test "x${maps_coalesce}" = "x1" ; then
11777-
11778-$as_echo "#define JEMALLOC_MAPS_COALESCE  " >>confdefs.h
11779-
11780-fi
11781-
11782-if test "x$default_retain" = "x1" ; then
11783-
11784-$as_echo "#define JEMALLOC_RETAIN  " >>confdefs.h
11785-
11786-fi
11787-
11788-if test "x$zero_realloc_default_free" = "x1" ; then
11789-
11790-$as_echo "#define JEMALLOC_ZERO_REALLOC_DEFAULT_FREE  " >>confdefs.h
11791-
11792-fi
11793-
11794-have_dss="1"
11795-ac_fn_c_check_func "$LINENO" "sbrk" "ac_cv_func_sbrk"
11796-if test "x$ac_cv_func_sbrk" = xyes; then :
11797-  have_sbrk="1"
11798-else
11799-  have_sbrk="0"
11800-fi
11801-
11802-if test "x$have_sbrk" = "x1" ; then
11803-  if test "x$sbrk_deprecated" = "x1" ; then
11804-    { $as_echo "$as_me:${as_lineno-$LINENO}: result: Disabling dss allocation because sbrk is deprecated" >&5
11805-$as_echo "Disabling dss allocation because sbrk is deprecated" >&6; }
11806-    have_dss="0"
11807-  fi
11808-else
11809-  have_dss="0"
11810-fi
11811-
11812-if test "x$have_dss" = "x1" ; then
11813-
11814-$as_echo "#define JEMALLOC_DSS  " >>confdefs.h
11815-
11816-fi
11817-
11818-# Check whether --enable-fill was given.
11819-if test "${enable_fill+set}" = set; then :
11820-  enableval=$enable_fill; if test "x$enable_fill" = "xno" ; then
11821-  enable_fill="0"
11822-else
11823-  enable_fill="1"
11824-fi
11825-
11826-else
11827-  enable_fill="1"
11828-
11829-fi
11830-
11831-if test "x$enable_fill" = "x1" ; then
11832-
11833-$as_echo "#define JEMALLOC_FILL  " >>confdefs.h
11834-
11835-fi
11836-
11837-
11838-# Check whether --enable-utrace was given.
11839-if test "${enable_utrace+set}" = set; then :
11840-  enableval=$enable_utrace; if test "x$enable_utrace" = "xno" ; then
11841-  enable_utrace="0"
11842-else
11843-  enable_utrace="1"
11844-fi
11845-
11846-else
11847-  enable_utrace="0"
11848-
11849-fi
11850-
11851-
11852-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether utrace(2) is compilable" >&5
11853-$as_echo_n "checking whether utrace(2) is compilable... " >&6; }
11854-if ${je_cv_utrace+:} false; then :
11855-  $as_echo_n "(cached) " >&6
11856-else
11857-  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
11858-/* end confdefs.h.  */
11859-
11860-#include <sys/types.h>
11861-#include <sys/param.h>
11862-#include <sys/time.h>
11863-#include <sys/uio.h>
11864-#include <sys/ktrace.h>
11865-
11866-int
11867-main ()
11868-{
11869-
11870-	utrace((void *)0, 0);
11871-
11872-  ;
11873-  return 0;
11874-}
11875-_ACEOF
11876-if ac_fn_c_try_link "$LINENO"; then :
11877-  je_cv_utrace=yes
11878-else
11879-  je_cv_utrace=no
11880-fi
11881-rm -f core conftest.err conftest.$ac_objext \
11882-    conftest$ac_exeext conftest.$ac_ext
11883-fi
11884-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_utrace" >&5
11885-$as_echo "$je_cv_utrace" >&6; }
11886-
11887-if test "x${je_cv_utrace}" = "xno" ; then
11888-
11889-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether utrace(2) with label is compilable" >&5
11890-$as_echo_n "checking whether utrace(2) with label is compilable... " >&6; }
11891-if ${je_cv_utrace_label+:} false; then :
11892-  $as_echo_n "(cached) " >&6
11893-else
11894-  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
11895-/* end confdefs.h.  */
11896-
11897-  #include <sys/types.h>
11898-  #include <sys/param.h>
11899-  #include <sys/time.h>
11900-  #include <sys/uio.h>
11901-  #include <sys/ktrace.h>
11902-
11903-int
11904-main ()
11905-{
11906-
11907-	  utrace((void *)0, (void *)0, 0);
11908-
11909-  ;
11910-  return 0;
11911-}
11912-_ACEOF
11913-if ac_fn_c_try_link "$LINENO"; then :
11914-  je_cv_utrace_label=yes
11915-else
11916-  je_cv_utrace_label=no
11917-fi
11918-rm -f core conftest.err conftest.$ac_objext \
11919-    conftest$ac_exeext conftest.$ac_ext
11920-fi
11921-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_utrace_label" >&5
11922-$as_echo "$je_cv_utrace_label" >&6; }
11923-
11924-  if test "x${je_cv_utrace_label}" = "xno"; then
11925-    enable_utrace="0"
11926-  fi
11927-  if test "x$enable_utrace" = "x1" ; then
11928-
11929-$as_echo "#define JEMALLOC_UTRACE_LABEL  " >>confdefs.h
11930-
11931-  fi
11932-else
11933-  if test "x$enable_utrace" = "x1" ; then
11934-
11935-$as_echo "#define JEMALLOC_UTRACE  " >>confdefs.h
11936-
11937-  fi
11938-fi
11939-
11940-
11941-# Check whether --enable-xmalloc was given.
11942-if test "${enable_xmalloc+set}" = set; then :
11943-  enableval=$enable_xmalloc; if test "x$enable_xmalloc" = "xno" ; then
11944-  enable_xmalloc="0"
11945-else
11946-  enable_xmalloc="1"
11947-fi
11948-
11949-else
11950-  enable_xmalloc="0"
11951-
11952-fi
11953-
11954-if test "x$enable_xmalloc" = "x1" ; then
11955-
11956-$as_echo "#define JEMALLOC_XMALLOC  " >>confdefs.h
11957-
11958-fi
11959-
11960-
11961-# Check whether --enable-cache-oblivious was given.
11962-if test "${enable_cache_oblivious+set}" = set; then :
11963-  enableval=$enable_cache_oblivious; if test "x$enable_cache_oblivious" = "xno" ; then
11964-  enable_cache_oblivious="0"
11965-else
11966-  enable_cache_oblivious="1"
11967-fi
11968-
11969-else
11970-  enable_cache_oblivious="1"
11971-
11972-fi
11973-
11974-if test "x$enable_cache_oblivious" = "x1" ; then
11975-
11976-$as_echo "#define JEMALLOC_CACHE_OBLIVIOUS  " >>confdefs.h
11977-
11978-fi
11979-
11980-
11981-# Check whether --enable-log was given.
11982-if test "${enable_log+set}" = set; then :
11983-  enableval=$enable_log; if test "x$enable_log" = "xno" ; then
11984-  enable_log="0"
11985-else
11986-  enable_log="1"
11987-fi
11988-
11989-else
11990-  enable_log="0"
11991-
11992-fi
11993-
11994-if test "x$enable_log" = "x1" ; then
11995-
11996-$as_echo "#define JEMALLOC_LOG  " >>confdefs.h
11997-
11998-fi
11999-
12000-
12001-# Check whether --enable-readlinkat was given.
12002-if test "${enable_readlinkat+set}" = set; then :
12003-  enableval=$enable_readlinkat; if test "x$enable_readlinkat" = "xno" ; then
12004-  enable_readlinkat="0"
12005-else
12006-  enable_readlinkat="1"
12007-fi
12008-
12009-else
12010-  enable_readlinkat="0"
12011-
12012-fi
12013-
12014-if test "x$enable_readlinkat" = "x1" ; then
12015-
12016-$as_echo "#define JEMALLOC_READLINKAT  " >>confdefs.h
12017-
12018-fi
12019-
12020-
12021-# Check whether --enable-opt-safety-checks was given.
12022-if test "${enable_opt_safety_checks+set}" = set; then :
12023-  enableval=$enable_opt_safety_checks; if test "x$enable_opt_safety_checks" = "xno" ; then
12024-  enable_opt_safety_checks="0"
12025-else
12026-  enable_opt_safety_checks="1"
12027-fi
12028-
12029-else
12030-  enable_opt_safety_checks="0"
12031-
12032-fi
12033-
12034-if test "x$enable_opt_safety_checks" = "x1" ; then
12035-
12036-$as_echo "#define JEMALLOC_OPT_SAFETY_CHECKS  " >>confdefs.h
12037-
12038-fi
12039-
12040-
12041-# Check whether --enable-opt-size-checks was given.
12042-if test "${enable_opt_size_checks+set}" = set; then :
12043-  enableval=$enable_opt_size_checks; if test "x$enable_opt_size_checks" = "xno" ; then
12044-  enable_opt_size_checks="0"
12045-else
12046-  enable_opt_size_checks="1"
12047-fi
12048-
12049-else
12050-  enable_opt_size_checks="0"
12051-
12052-fi
12053-
12054-if test "x$enable_opt_size_checks" = "x1" ; then
12055-
12056-$as_echo "#define JEMALLOC_OPT_SIZE_CHECKS  " >>confdefs.h
12057-
12058-fi
12059-
12060-
12061-# Check whether --enable-uaf-detection was given.
12062-if test "${enable_uaf_detection+set}" = set; then :
12063-  enableval=$enable_uaf_detection; if test "x$enable_uaf_detection" = "xno" ; then
12064-  enable_uaf_detection="0"
12065-else
12066-  enable_uaf_detection="1"
12067-fi
12068-
12069-else
12070-  enable_uaf_detection="0"
12071-
12072-fi
12073-
12074-if test "x$enable_uaf_detection" = "x1" ; then
12075-  $as_echo "#define JEMALLOC_UAF_DETECTION  " >>confdefs.h
12076-
12077-fi
12078-
12079-
12080-
12081-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether a program using __builtin_unreachable is compilable" >&5
12082-$as_echo_n "checking whether a program using __builtin_unreachable is compilable... " >&6; }
12083-if ${je_cv_gcc_builtin_unreachable+:} false; then :
12084-  $as_echo_n "(cached) " >&6
12085-else
12086-  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
12087-/* end confdefs.h.  */
12088-
12089-void foo (void) {
12090-  __builtin_unreachable();
12091-}
12092-
12093-int
12094-main ()
12095-{
12096-
12097-	{
12098-		foo();
12099-	}
12100-
12101-  ;
12102-  return 0;
12103-}
12104-_ACEOF
12105-if ac_fn_c_try_link "$LINENO"; then :
12106-  je_cv_gcc_builtin_unreachable=yes
12107-else
12108-  je_cv_gcc_builtin_unreachable=no
12109-fi
12110-rm -f core conftest.err conftest.$ac_objext \
12111-    conftest$ac_exeext conftest.$ac_ext
12112-fi
12113-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_gcc_builtin_unreachable" >&5
12114-$as_echo "$je_cv_gcc_builtin_unreachable" >&6; }
12115-
12116-if test "x${je_cv_gcc_builtin_unreachable}" = "xyes" ; then
12117-
12118-$as_echo "#define JEMALLOC_INTERNAL_UNREACHABLE __builtin_unreachable" >>confdefs.h
12119-
12120-else
12121-
12122-$as_echo "#define JEMALLOC_INTERNAL_UNREACHABLE abort" >>confdefs.h
12123-
12124-fi
12125-
12126-
12127-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether a program using __builtin_ffsl is compilable" >&5
12128-$as_echo_n "checking whether a program using __builtin_ffsl is compilable... " >&6; }
12129-if ${je_cv_gcc_builtin_ffsl+:} false; then :
12130-  $as_echo_n "(cached) " >&6
12131-else
12132-  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
12133-/* end confdefs.h.  */
12134-
12135-#include <stdio.h>
12136-#include <strings.h>
12137-#include <string.h>
12138-
12139-int
12140-main ()
12141-{
12142-
12143-	{
12144-		int rv = __builtin_ffsl(0x08);
12145-		printf("%d\n", rv);
12146-	}
12147-
12148-  ;
12149-  return 0;
12150-}
12151-_ACEOF
12152-if ac_fn_c_try_link "$LINENO"; then :
12153-  je_cv_gcc_builtin_ffsl=yes
12154-else
12155-  je_cv_gcc_builtin_ffsl=no
12156-fi
12157-rm -f core conftest.err conftest.$ac_objext \
12158-    conftest$ac_exeext conftest.$ac_ext
12159-fi
12160-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_gcc_builtin_ffsl" >&5
12161-$as_echo "$je_cv_gcc_builtin_ffsl" >&6; }
12162-
12163-if test "x${je_cv_gcc_builtin_ffsl}" = "xyes" ; then
12164-
12165-$as_echo "#define JEMALLOC_INTERNAL_FFSLL __builtin_ffsll" >>confdefs.h
12166-
12167-
12168-$as_echo "#define JEMALLOC_INTERNAL_FFSL __builtin_ffsl" >>confdefs.h
12169-
12170-
12171-$as_echo "#define JEMALLOC_INTERNAL_FFS __builtin_ffs" >>confdefs.h
12172-
12173-else
12174-
12175-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether a program using ffsl is compilable" >&5
12176-$as_echo_n "checking whether a program using ffsl is compilable... " >&6; }
12177-if ${je_cv_function_ffsl+:} false; then :
12178-  $as_echo_n "(cached) " >&6
12179-else
12180-  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
12181-/* end confdefs.h.  */
12182-
12183-  #include <stdio.h>
12184-  #include <strings.h>
12185-  #include <string.h>
12186-
12187-int
12188-main ()
12189-{
12190-
12191-	{
12192-		int rv = ffsl(0x08);
12193-		printf("%d\n", rv);
12194-	}
12195-
12196-  ;
12197-  return 0;
12198-}
12199-_ACEOF
12200-if ac_fn_c_try_link "$LINENO"; then :
12201-  je_cv_function_ffsl=yes
12202-else
12203-  je_cv_function_ffsl=no
12204-fi
12205-rm -f core conftest.err conftest.$ac_objext \
12206-    conftest$ac_exeext conftest.$ac_ext
12207-fi
12208-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_function_ffsl" >&5
12209-$as_echo "$je_cv_function_ffsl" >&6; }
12210-
12211-  if test "x${je_cv_function_ffsl}" = "xyes" ; then
12212-
12213-$as_echo "#define JEMALLOC_INTERNAL_FFSLL ffsll" >>confdefs.h
12214-
12215-
12216-$as_echo "#define JEMALLOC_INTERNAL_FFSL ffsl" >>confdefs.h
12217-
12218-
12219-$as_echo "#define JEMALLOC_INTERNAL_FFS ffs" >>confdefs.h
12220-
12221-  else
12222-    as_fn_error $? "Cannot build without ffsl(3) or __builtin_ffsl()" "$LINENO" 5
12223-  fi
12224-fi
12225-
12226-
12227-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether a program using __builtin_popcountl is compilable" >&5
12228-$as_echo_n "checking whether a program using __builtin_popcountl is compilable... " >&6; }
12229-if ${je_cv_gcc_builtin_popcountl+:} false; then :
12230-  $as_echo_n "(cached) " >&6
12231-else
12232-  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
12233-/* end confdefs.h.  */
12234-
12235-#include <stdio.h>
12236-#include <strings.h>
12237-#include <string.h>
12238-
12239-int
12240-main ()
12241-{
12242-
12243-	{
12244-		int rv = __builtin_popcountl(0x08);
12245-		printf("%d\n", rv);
12246-	}
12247-
12248-  ;
12249-  return 0;
12250-}
12251-_ACEOF
12252-if ac_fn_c_try_link "$LINENO"; then :
12253-  je_cv_gcc_builtin_popcountl=yes
12254-else
12255-  je_cv_gcc_builtin_popcountl=no
12256-fi
12257-rm -f core conftest.err conftest.$ac_objext \
12258-    conftest$ac_exeext conftest.$ac_ext
12259-fi
12260-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_gcc_builtin_popcountl" >&5
12261-$as_echo "$je_cv_gcc_builtin_popcountl" >&6; }
12262-
12263-if test "x${je_cv_gcc_builtin_popcountl}" = "xyes" ; then
12264-
12265-$as_echo "#define JEMALLOC_INTERNAL_POPCOUNT __builtin_popcount" >>confdefs.h
12266-
12267-
12268-$as_echo "#define JEMALLOC_INTERNAL_POPCOUNTL __builtin_popcountl" >>confdefs.h
12269-
12270-
12271-$as_echo "#define JEMALLOC_INTERNAL_POPCOUNTLL __builtin_popcountll" >>confdefs.h
12272-
12273-fi
12274-
12275-
12276-# Check whether --with-lg_quantum was given.
12277-if test "${with_lg_quantum+set}" = set; then :
12278-  withval=$with_lg_quantum;
12279-fi
12280-
12281-if test "x$with_lg_quantum" != "x" ; then
12282-
12283-cat >>confdefs.h <<_ACEOF
12284-#define LG_QUANTUM $with_lg_quantum
12285-_ACEOF
12286-
12287-fi
12288-
12289-
12290-# Check whether --with-lg_slab_maxregs was given.
12291-if test "${with_lg_slab_maxregs+set}" = set; then :
12292-  withval=$with_lg_slab_maxregs; CONFIG_LG_SLAB_MAXREGS="with_lg_slab_maxregs"
12293-else
12294-  CONFIG_LG_SLAB_MAXREGS=""
12295-fi
12296-
12297-if test "x$with_lg_slab_maxregs" != "x" ; then
12298-
12299-cat >>confdefs.h <<_ACEOF
12300-#define CONFIG_LG_SLAB_MAXREGS $with_lg_slab_maxregs
12301-_ACEOF
12302-
12303-fi
12304-
12305-
12306-# Check whether --with-lg_page was given.
12307-if test "${with_lg_page+set}" = set; then :
12308-  withval=$with_lg_page; LG_PAGE="$with_lg_page"
12309-else
12310-  LG_PAGE="detect"
12311-fi
12312-
12313-case "${host}" in
12314-  aarch64-apple-darwin*)
12315-                  if test "x${host}" != "x${build}" -a "x$LG_PAGE" = "xdetect"; then
12316-        LG_PAGE=14
12317-      fi
12318-      ;;
12319-esac
12320-if test "x$LG_PAGE" = "xdetect"; then
12321-  { $as_echo "$as_me:${as_lineno-$LINENO}: checking LG_PAGE" >&5
12322-$as_echo_n "checking LG_PAGE... " >&6; }
12323-if ${je_cv_lg_page+:} false; then :
12324-  $as_echo_n "(cached) " >&6
12325-else
12326-  if test "$cross_compiling" = yes; then :
12327-  je_cv_lg_page=12
12328-else
12329-  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
12330-/* end confdefs.h.  */
12331-
12332-#include <strings.h>
12333-#ifdef _WIN32
12334-#include <windows.h>
12335-#else
12336-#include <unistd.h>
12337-#endif
12338-#include <stdio.h>
12339-
12340-int
12341-main ()
12342-{
12343-
12344-    int result;
12345-    FILE *f;
12346-
12347-#ifdef _WIN32
12348-    SYSTEM_INFO si;
12349-    GetSystemInfo(&si);
12350-    result = si.dwPageSize;
12351-#else
12352-    result = sysconf(_SC_PAGESIZE);
12353-#endif
12354-    if (result == -1) {
12355-	return 1;
12356-    }
12357-    result = JEMALLOC_INTERNAL_FFSL(result) - 1;
12358-
12359-    f = fopen("conftest.out", "w");
12360-    if (f == NULL) {
12361-	return 1;
12362-    }
12363-    fprintf(f, "%d", result);
12364-    fclose(f);
12365-
12366-    return 0;
12367-
12368-  ;
12369-  return 0;
12370-}
12371-_ACEOF
12372-if ac_fn_c_try_run "$LINENO"; then :
12373-  je_cv_lg_page=`cat conftest.out`
12374-else
12375-  je_cv_lg_page=undefined
12376-fi
12377-rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \
12378-  conftest.$ac_objext conftest.beam conftest.$ac_ext
12379-fi
12380-
12381-fi
12382-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_lg_page" >&5
12383-$as_echo "$je_cv_lg_page" >&6; }
12384-fi
12385-if test "x${je_cv_lg_page}" != "x" ; then
12386-  LG_PAGE="${je_cv_lg_page}"
12387-fi
12388-if test "x${LG_PAGE}" != "xundefined" ; then
12389-
12390-cat >>confdefs.h <<_ACEOF
12391-#define LG_PAGE $LG_PAGE
12392-_ACEOF
12393-
12394-else
12395-   as_fn_error $? "cannot determine value for LG_PAGE" "$LINENO" 5
12396-fi
12397-
12398-
12399-# Check whether --with-lg_hugepage was given.
12400-if test "${with_lg_hugepage+set}" = set; then :
12401-  withval=$with_lg_hugepage; je_cv_lg_hugepage="${with_lg_hugepage}"
12402-else
12403-  je_cv_lg_hugepage=""
12404-fi
12405-
12406-if test "x${je_cv_lg_hugepage}" = "x" ; then
12407-          if test -e "/proc/meminfo" ; then
12408-    hpsk=`cat /proc/meminfo 2>/dev/null | \
12409-          grep -e '^Hugepagesize:[[:space:]]\+[0-9]\+[[:space:]]kB$' | \
12410-          awk '{print $2}'`
12411-    if test "x${hpsk}" != "x" ; then
12412-      je_cv_lg_hugepage=10
12413-      while test "${hpsk}" -gt 1 ; do
12414-        hpsk="$((hpsk / 2))"
12415-        je_cv_lg_hugepage="$((je_cv_lg_hugepage + 1))"
12416-      done
12417-    fi
12418-  fi
12419-
12420-    if test "x${je_cv_lg_hugepage}" = "x" ; then
12421-    je_cv_lg_hugepage=21
12422-  fi
12423-fi
12424-if test "x${LG_PAGE}" != "xundefined" -a \
12425-        "${je_cv_lg_hugepage}" -lt "${LG_PAGE}" ; then
12426-  as_fn_error $? "Huge page size (2^${je_cv_lg_hugepage}) must be at least page size (2^${LG_PAGE})" "$LINENO" 5
12427-fi
12428-
12429-cat >>confdefs.h <<_ACEOF
12430-#define LG_HUGEPAGE ${je_cv_lg_hugepage}
12431-_ACEOF
12432-
12433-
12434-# Check whether --enable-libdl was given.
12435-if test "${enable_libdl+set}" = set; then :
12436-  enableval=$enable_libdl; if test "x$enable_libdl" = "xno" ; then
12437-  enable_libdl="0"
12438-else
12439-  enable_libdl="1"
12440-fi
12441-
12442-else
12443-  enable_libdl="1"
12444-
12445-fi
12446-
12447-
12448-
12449-
12450-if test "x$abi" != "xpecoff" ; then
12451-
12452-$as_echo "#define JEMALLOC_HAVE_PTHREAD  " >>confdefs.h
12453-
12454-  for ac_header in pthread.h
12455-do :
12456-  ac_fn_c_check_header_mongrel "$LINENO" "pthread.h" "ac_cv_header_pthread_h" "$ac_includes_default"
12457-if test "x$ac_cv_header_pthread_h" = xyes; then :
12458-  cat >>confdefs.h <<_ACEOF
12459-#define HAVE_PTHREAD_H 1
12460-_ACEOF
12461-
12462-else
12463-  as_fn_error $? "pthread.h is missing" "$LINENO" 5
12464-fi
12465-
12466-done
12467-
12468-      { $as_echo "$as_me:${as_lineno-$LINENO}: checking for pthread_create in -lpthread" >&5
12469-$as_echo_n "checking for pthread_create in -lpthread... " >&6; }
12470-if ${ac_cv_lib_pthread_pthread_create+:} false; then :
12471-  $as_echo_n "(cached) " >&6
12472-else
12473-  ac_check_lib_save_LIBS=$LIBS
12474-LIBS="-lpthread  $LIBS"
12475-cat confdefs.h - <<_ACEOF >conftest.$ac_ext
12476-/* end confdefs.h.  */
12477-
12478-/* Override any GCC internal prototype to avoid an error.
12479-   Use char because int might match the return type of a GCC
12480-   builtin and then its argument prototype would still apply.  */
12481-#ifdef __cplusplus
12482-extern "C"
12483-#endif
12484-char pthread_create ();
12485-int
12486-main ()
12487-{
12488-return pthread_create ();
12489-  ;
12490-  return 0;
12491-}
12492-_ACEOF
12493-if ac_fn_c_try_link "$LINENO"; then :
12494-  ac_cv_lib_pthread_pthread_create=yes
12495-else
12496-  ac_cv_lib_pthread_pthread_create=no
12497-fi
12498-rm -f core conftest.err conftest.$ac_objext \
12499-    conftest$ac_exeext conftest.$ac_ext
12500-LIBS=$ac_check_lib_save_LIBS
12501-fi
12502-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_pthread_pthread_create" >&5
12503-$as_echo "$ac_cv_lib_pthread_pthread_create" >&6; }
12504-if test "x$ac_cv_lib_pthread_pthread_create" = xyes; then :
12505-  T_APPEND_V=-pthread
12506-  if test "x${LIBS}" = "x" -o "x${T_APPEND_V}" = "x" ; then
12507-  LIBS="${LIBS}${T_APPEND_V}"
12508-else
12509-  LIBS="${LIBS} ${T_APPEND_V}"
12510-fi
12511-
12512-
12513-else
12514-  { $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing pthread_create" >&5
12515-$as_echo_n "checking for library containing pthread_create... " >&6; }
12516-if ${ac_cv_search_pthread_create+:} false; then :
12517-  $as_echo_n "(cached) " >&6
12518-else
12519-  ac_func_search_save_LIBS=$LIBS
12520-cat confdefs.h - <<_ACEOF >conftest.$ac_ext
12521-/* end confdefs.h.  */
12522-
12523-/* Override any GCC internal prototype to avoid an error.
12524-   Use char because int might match the return type of a GCC
12525-   builtin and then its argument prototype would still apply.  */
12526-#ifdef __cplusplus
12527-extern "C"
12528-#endif
12529-char pthread_create ();
12530-int
12531-main ()
12532-{
12533-return pthread_create ();
12534-  ;
12535-  return 0;
12536-}
12537-_ACEOF
12538-for ac_lib in '' ; do
12539-  if test -z "$ac_lib"; then
12540-    ac_res="none required"
12541-  else
12542-    ac_res=-l$ac_lib
12543-    LIBS="-l$ac_lib  $ac_func_search_save_LIBS"
12544-  fi
12545-  if ac_fn_c_try_link "$LINENO"; then :
12546-  ac_cv_search_pthread_create=$ac_res
12547-fi
12548-rm -f core conftest.err conftest.$ac_objext \
12549-    conftest$ac_exeext
12550-  if ${ac_cv_search_pthread_create+:} false; then :
12551-  break
12552-fi
12553-done
12554-if ${ac_cv_search_pthread_create+:} false; then :
12555-
12556-else
12557-  ac_cv_search_pthread_create=no
12558-fi
12559-rm conftest.$ac_ext
12560-LIBS=$ac_func_search_save_LIBS
12561-fi
12562-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_pthread_create" >&5
12563-$as_echo "$ac_cv_search_pthread_create" >&6; }
12564-ac_res=$ac_cv_search_pthread_create
12565-if test "$ac_res" != no; then :
12566-  test "$ac_res" = "none required" || LIBS="$ac_res $LIBS"
12567-
12568-else
12569-  as_fn_error $? "libpthread is missing" "$LINENO" 5
12570-fi
12571-
12572-fi
12573-
12574-  wrap_syms="${wrap_syms} pthread_create"
12575-  have_pthread="1"
12576-
12577-  if test "x$enable_libdl" = "x1" ; then
12578-    have_dlsym="1"
12579-    for ac_header in dlfcn.h
12580-do :
12581-  ac_fn_c_check_header_mongrel "$LINENO" "dlfcn.h" "ac_cv_header_dlfcn_h" "$ac_includes_default"
12582-if test "x$ac_cv_header_dlfcn_h" = xyes; then :
12583-  cat >>confdefs.h <<_ACEOF
12584-#define HAVE_DLFCN_H 1
12585-_ACEOF
12586- ac_fn_c_check_func "$LINENO" "dlsym" "ac_cv_func_dlsym"
12587-if test "x$ac_cv_func_dlsym" = xyes; then :
12588-
12589-else
12590-  { $as_echo "$as_me:${as_lineno-$LINENO}: checking for dlsym in -ldl" >&5
12591-$as_echo_n "checking for dlsym in -ldl... " >&6; }
12592-if ${ac_cv_lib_dl_dlsym+:} false; then :
12593-  $as_echo_n "(cached) " >&6
12594-else
12595-  ac_check_lib_save_LIBS=$LIBS
12596-LIBS="-ldl  $LIBS"
12597-cat confdefs.h - <<_ACEOF >conftest.$ac_ext
12598-/* end confdefs.h.  */
12599-
12600-/* Override any GCC internal prototype to avoid an error.
12601-   Use char because int might match the return type of a GCC
12602-   builtin and then its argument prototype would still apply.  */
12603-#ifdef __cplusplus
12604-extern "C"
12605-#endif
12606-char dlsym ();
12607-int
12608-main ()
12609-{
12610-return dlsym ();
12611-  ;
12612-  return 0;
12613-}
12614-_ACEOF
12615-if ac_fn_c_try_link "$LINENO"; then :
12616-  ac_cv_lib_dl_dlsym=yes
12617-else
12618-  ac_cv_lib_dl_dlsym=no
12619-fi
12620-rm -f core conftest.err conftest.$ac_objext \
12621-    conftest$ac_exeext conftest.$ac_ext
12622-LIBS=$ac_check_lib_save_LIBS
12623-fi
12624-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_dl_dlsym" >&5
12625-$as_echo "$ac_cv_lib_dl_dlsym" >&6; }
12626-if test "x$ac_cv_lib_dl_dlsym" = xyes; then :
12627-  LIBS="$LIBS -ldl"
12628-else
12629-  have_dlsym="0"
12630-fi
12631-
12632-fi
12633-
12634-else
12635-  have_dlsym="0"
12636-fi
12637-
12638-done
12639-
12640-    if test "x$have_dlsym" = "x1" ; then
12641-
12642-$as_echo "#define JEMALLOC_HAVE_DLSYM  " >>confdefs.h
12643-
12644-    fi
12645-  else
12646-    have_dlsym="0"
12647-  fi
12648-
12649-
12650-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether pthread_atfork(3) is compilable" >&5
12651-$as_echo_n "checking whether pthread_atfork(3) is compilable... " >&6; }
12652-if ${je_cv_pthread_atfork+:} false; then :
12653-  $as_echo_n "(cached) " >&6
12654-else
12655-  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
12656-/* end confdefs.h.  */
12657-
12658-#include <pthread.h>
12659-
12660-int
12661-main ()
12662-{
12663-
12664-  pthread_atfork((void *)0, (void *)0, (void *)0);
12665-
12666-  ;
12667-  return 0;
12668-}
12669-_ACEOF
12670-if ac_fn_c_try_link "$LINENO"; then :
12671-  je_cv_pthread_atfork=yes
12672-else
12673-  je_cv_pthread_atfork=no
12674-fi
12675-rm -f core conftest.err conftest.$ac_objext \
12676-    conftest$ac_exeext conftest.$ac_ext
12677-fi
12678-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_pthread_atfork" >&5
12679-$as_echo "$je_cv_pthread_atfork" >&6; }
12680-
12681-  if test "x${je_cv_pthread_atfork}" = "xyes" ; then
12682-
12683-$as_echo "#define JEMALLOC_HAVE_PTHREAD_ATFORK  " >>confdefs.h
12684-
12685-  fi
12686-
12687-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether pthread_setname_np(3) is compilable" >&5
12688-$as_echo_n "checking whether pthread_setname_np(3) is compilable... " >&6; }
12689-if ${je_cv_pthread_setname_np+:} false; then :
12690-  $as_echo_n "(cached) " >&6
12691-else
12692-  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
12693-/* end confdefs.h.  */
12694-
12695-#include <pthread.h>
12696-
12697-int
12698-main ()
12699-{
12700-
12701-  pthread_setname_np(pthread_self(), "setname_test");
12702-
12703-  ;
12704-  return 0;
12705-}
12706-_ACEOF
12707-if ac_fn_c_try_link "$LINENO"; then :
12708-  je_cv_pthread_setname_np=yes
12709-else
12710-  je_cv_pthread_setname_np=no
12711-fi
12712-rm -f core conftest.err conftest.$ac_objext \
12713-    conftest$ac_exeext conftest.$ac_ext
12714-fi
12715-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_pthread_setname_np" >&5
12716-$as_echo "$je_cv_pthread_setname_np" >&6; }
12717-
12718-  if test "x${je_cv_pthread_setname_np}" = "xyes" ; then
12719-
12720-$as_echo "#define JEMALLOC_HAVE_PTHREAD_SETNAME_NP  " >>confdefs.h
12721-
12722-  fi
12723-
12724-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether pthread_getname_np(3) is compilable" >&5
12725-$as_echo_n "checking whether pthread_getname_np(3) is compilable... " >&6; }
12726-if ${je_cv_pthread_getname_np+:} false; then :
12727-  $as_echo_n "(cached) " >&6
12728-else
12729-  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
12730-/* end confdefs.h.  */
12731-
12732-#include <pthread.h>
12733-#include <stdlib.h>
12734-
12735-int
12736-main ()
12737-{
12738-
12739-  {
12740-  	char *name = malloc(16);
12741-  	pthread_getname_np(pthread_self(), name, 16);
12742-	free(name);
12743-  }
12744-
12745-  ;
12746-  return 0;
12747-}
12748-_ACEOF
12749-if ac_fn_c_try_link "$LINENO"; then :
12750-  je_cv_pthread_getname_np=yes
12751-else
12752-  je_cv_pthread_getname_np=no
12753-fi
12754-rm -f core conftest.err conftest.$ac_objext \
12755-    conftest$ac_exeext conftest.$ac_ext
12756-fi
12757-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_pthread_getname_np" >&5
12758-$as_echo "$je_cv_pthread_getname_np" >&6; }
12759-
12760-  if test "x${je_cv_pthread_getname_np}" = "xyes" ; then
12761-
12762-$as_echo "#define JEMALLOC_HAVE_PTHREAD_GETNAME_NP  " >>confdefs.h
12763-
12764-  fi
12765-
12766-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether pthread_get_name_np(3) is compilable" >&5
12767-$as_echo_n "checking whether pthread_get_name_np(3) is compilable... " >&6; }
12768-if ${je_cv_pthread_get_name_np+:} false; then :
12769-  $as_echo_n "(cached) " >&6
12770-else
12771-  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
12772-/* end confdefs.h.  */
12773-
12774-#include <pthread.h>
12775-#include <pthread_np.h>
12776-#include <stdlib.h>
12777-
12778-int
12779-main ()
12780-{
12781-
12782-  {
12783-  	char *name = malloc(16);
12784-  	pthread_get_name_np(pthread_self(), name, 16);
12785-	free(name);
12786-  }
12787-
12788-  ;
12789-  return 0;
12790-}
12791-_ACEOF
12792-if ac_fn_c_try_link "$LINENO"; then :
12793-  je_cv_pthread_get_name_np=yes
12794-else
12795-  je_cv_pthread_get_name_np=no
12796-fi
12797-rm -f core conftest.err conftest.$ac_objext \
12798-    conftest$ac_exeext conftest.$ac_ext
12799-fi
12800-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_pthread_get_name_np" >&5
12801-$as_echo "$je_cv_pthread_get_name_np" >&6; }
12802-
12803-  if test "x${je_cv_pthread_get_name_np}" = "xyes" ; then
12804-
12805-$as_echo "#define JEMALLOC_HAVE_PTHREAD_GET_NAME_NP  " >>confdefs.h
12806-
12807-  fi
12808-fi
12809-
12810-T_APPEND_V=-D_REENTRANT
12811-  if test "x${CPPFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then
12812-  CPPFLAGS="${CPPFLAGS}${T_APPEND_V}"
12813-else
12814-  CPPFLAGS="${CPPFLAGS} ${T_APPEND_V}"
12815-fi
12816-
12817-
12818-
12819-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing clock_gettime" >&5
12820-$as_echo_n "checking for library containing clock_gettime... " >&6; }
12821-if ${ac_cv_search_clock_gettime+:} false; then :
12822-  $as_echo_n "(cached) " >&6
12823-else
12824-  ac_func_search_save_LIBS=$LIBS
12825-cat confdefs.h - <<_ACEOF >conftest.$ac_ext
12826-/* end confdefs.h.  */
12827-
12828-/* Override any GCC internal prototype to avoid an error.
12829-   Use char because int might match the return type of a GCC
12830-   builtin and then its argument prototype would still apply.  */
12831-#ifdef __cplusplus
12832-extern "C"
12833-#endif
12834-char clock_gettime ();
12835-int
12836-main ()
12837-{
12838-return clock_gettime ();
12839-  ;
12840-  return 0;
12841-}
12842-_ACEOF
12843-for ac_lib in '' rt; do
12844-  if test -z "$ac_lib"; then
12845-    ac_res="none required"
12846-  else
12847-    ac_res=-l$ac_lib
12848-    LIBS="-l$ac_lib  $ac_func_search_save_LIBS"
12849-  fi
12850-  if ac_fn_c_try_link "$LINENO"; then :
12851-  ac_cv_search_clock_gettime=$ac_res
12852-fi
12853-rm -f core conftest.err conftest.$ac_objext \
12854-    conftest$ac_exeext
12855-  if ${ac_cv_search_clock_gettime+:} false; then :
12856-  break
12857-fi
12858-done
12859-if ${ac_cv_search_clock_gettime+:} false; then :
12860-
12861-else
12862-  ac_cv_search_clock_gettime=no
12863-fi
12864-rm conftest.$ac_ext
12865-LIBS=$ac_func_search_save_LIBS
12866-fi
12867-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_clock_gettime" >&5
12868-$as_echo "$ac_cv_search_clock_gettime" >&6; }
12869-ac_res=$ac_cv_search_clock_gettime
12870-if test "$ac_res" != no; then :
12871-  test "$ac_res" = "none required" || LIBS="$ac_res $LIBS"
12872-
12873-fi
12874-
12875-
12876-if test "x$je_cv_cray_prgenv_wrapper" = "xyes" ; then
12877-  if test "$ac_cv_search_clock_gettime" != "-lrt"; then
12878-    SAVED_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}"
12879-
12880-
12881-    unset ac_cv_search_clock_gettime
12882-
12883-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -dynamic" >&5
12884-$as_echo_n "checking whether compiler supports -dynamic... " >&6; }
12885-T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}"
12886-T_APPEND_V=-dynamic
12887-  if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then
12888-  CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}"
12889-else
12890-  CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}"
12891-fi
12892-
12893-
12894-if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then
12895-  CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}"
12896-else
12897-  CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}"
12898-fi
12899-
12900-cat confdefs.h - <<_ACEOF >conftest.$ac_ext
12901-/* end confdefs.h.  */
12902-
12903-
12904-int
12905-main ()
12906-{
12907-
12908-    return 0;
12909-
12910-  ;
12911-  return 0;
12912-}
12913-_ACEOF
12914-if ac_fn_c_try_compile "$LINENO"; then :
12915-  je_cv_cflags_added=-dynamic
12916-              { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
12917-$as_echo "yes" >&6; }
12918-else
12919-  je_cv_cflags_added=
12920-              { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
12921-$as_echo "no" >&6; }
12922-              CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}"
12923-
12924-fi
12925-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
12926-if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then
12927-  CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}"
12928-else
12929-  CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}"
12930-fi
12931-
12932-
12933-    { $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing clock_gettime" >&5
12934-$as_echo_n "checking for library containing clock_gettime... " >&6; }
12935-if ${ac_cv_search_clock_gettime+:} false; then :
12936-  $as_echo_n "(cached) " >&6
12937-else
12938-  ac_func_search_save_LIBS=$LIBS
12939-cat confdefs.h - <<_ACEOF >conftest.$ac_ext
12940-/* end confdefs.h.  */
12941-
12942-/* Override any GCC internal prototype to avoid an error.
12943-   Use char because int might match the return type of a GCC
12944-   builtin and then its argument prototype would still apply.  */
12945-#ifdef __cplusplus
12946-extern "C"
12947-#endif
12948-char clock_gettime ();
12949-int
12950-main ()
12951-{
12952-return clock_gettime ();
12953-  ;
12954-  return 0;
12955-}
12956-_ACEOF
12957-for ac_lib in '' rt; do
12958-  if test -z "$ac_lib"; then
12959-    ac_res="none required"
12960-  else
12961-    ac_res=-l$ac_lib
12962-    LIBS="-l$ac_lib  $ac_func_search_save_LIBS"
12963-  fi
12964-  if ac_fn_c_try_link "$LINENO"; then :
12965-  ac_cv_search_clock_gettime=$ac_res
12966-fi
12967-rm -f core conftest.err conftest.$ac_objext \
12968-    conftest$ac_exeext
12969-  if ${ac_cv_search_clock_gettime+:} false; then :
12970-  break
12971-fi
12972-done
12973-if ${ac_cv_search_clock_gettime+:} false; then :
12974-
12975-else
12976-  ac_cv_search_clock_gettime=no
12977-fi
12978-rm conftest.$ac_ext
12979-LIBS=$ac_func_search_save_LIBS
12980-fi
12981-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_clock_gettime" >&5
12982-$as_echo "$ac_cv_search_clock_gettime" >&6; }
12983-ac_res=$ac_cv_search_clock_gettime
12984-if test "$ac_res" != no; then :
12985-  test "$ac_res" = "none required" || LIBS="$ac_res $LIBS"
12986-
12987-fi
12988-
12989-
12990-    CONFIGURE_CFLAGS="${SAVED_CONFIGURE_CFLAGS}"
12991-if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then
12992-  CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}"
12993-else
12994-  CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}"
12995-fi
12996-
12997-
12998-  fi
12999-fi
13000-
13001-
13002-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is compilable" >&5
13003-$as_echo_n "checking whether clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is compilable... " >&6; }
13004-if ${je_cv_clock_monotonic_coarse+:} false; then :
13005-  $as_echo_n "(cached) " >&6
13006-else
13007-  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
13008-/* end confdefs.h.  */
13009-
13010-#include <time.h>
13011-
13012-int
13013-main ()
13014-{
13015-
13016-	struct timespec ts;
13017-
13018-	clock_gettime(CLOCK_MONOTONIC_COARSE, &ts);
13019-
13020-  ;
13021-  return 0;
13022-}
13023-_ACEOF
13024-if ac_fn_c_try_link "$LINENO"; then :
13025-  je_cv_clock_monotonic_coarse=yes
13026-else
13027-  je_cv_clock_monotonic_coarse=no
13028-fi
13029-rm -f core conftest.err conftest.$ac_objext \
13030-    conftest$ac_exeext conftest.$ac_ext
13031-fi
13032-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_clock_monotonic_coarse" >&5
13033-$as_echo "$je_cv_clock_monotonic_coarse" >&6; }
13034-
13035-if test "x${je_cv_clock_monotonic_coarse}" = "xyes" ; then
13036-
13037-$as_echo "#define JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE  " >>confdefs.h
13038-
13039-fi
13040-
13041-
13042-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether clock_gettime(CLOCK_MONOTONIC, ...) is compilable" >&5
13043-$as_echo_n "checking whether clock_gettime(CLOCK_MONOTONIC, ...) is compilable... " >&6; }
13044-if ${je_cv_clock_monotonic+:} false; then :
13045-  $as_echo_n "(cached) " >&6
13046-else
13047-  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
13048-/* end confdefs.h.  */
13049-
13050-#include <unistd.h>
13051-#include <time.h>
13052-
13053-int
13054-main ()
13055-{
13056-
13057-	struct timespec ts;
13058-
13059-	clock_gettime(CLOCK_MONOTONIC, &ts);
13060-#if !defined(_POSIX_MONOTONIC_CLOCK) || _POSIX_MONOTONIC_CLOCK < 0
13061-#  error _POSIX_MONOTONIC_CLOCK missing/invalid
13062-#endif
13063-
13064-  ;
13065-  return 0;
13066-}
13067-_ACEOF
13068-if ac_fn_c_try_link "$LINENO"; then :
13069-  je_cv_clock_monotonic=yes
13070-else
13071-  je_cv_clock_monotonic=no
13072-fi
13073-rm -f core conftest.err conftest.$ac_objext \
13074-    conftest$ac_exeext conftest.$ac_ext
13075-fi
13076-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_clock_monotonic" >&5
13077-$as_echo "$je_cv_clock_monotonic" >&6; }
13078-
13079-if test "x${je_cv_clock_monotonic}" = "xyes" ; then
13080-
13081-$as_echo "#define JEMALLOC_HAVE_CLOCK_MONOTONIC  " >>confdefs.h
13082-
13083-fi
13084-
13085-
13086-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether mach_absolute_time() is compilable" >&5
13087-$as_echo_n "checking whether mach_absolute_time() is compilable... " >&6; }
13088-if ${je_cv_mach_absolute_time+:} false; then :
13089-  $as_echo_n "(cached) " >&6
13090-else
13091-  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
13092-/* end confdefs.h.  */
13093-
13094-#include <mach/mach_time.h>
13095-
13096-int
13097-main ()
13098-{
13099-
13100-	mach_absolute_time();
13101-
13102-  ;
13103-  return 0;
13104-}
13105-_ACEOF
13106-if ac_fn_c_try_link "$LINENO"; then :
13107-  je_cv_mach_absolute_time=yes
13108-else
13109-  je_cv_mach_absolute_time=no
13110-fi
13111-rm -f core conftest.err conftest.$ac_objext \
13112-    conftest$ac_exeext conftest.$ac_ext
13113-fi
13114-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_mach_absolute_time" >&5
13115-$as_echo "$je_cv_mach_absolute_time" >&6; }
13116-
13117-if test "x${je_cv_mach_absolute_time}" = "xyes" ; then
13118-
13119-$as_echo "#define JEMALLOC_HAVE_MACH_ABSOLUTE_TIME  " >>confdefs.h
13120-
13121-fi
13122-
13123-
13124-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether clock_gettime(CLOCK_REALTIME, ...) is compilable" >&5
13125-$as_echo_n "checking whether clock_gettime(CLOCK_REALTIME, ...) is compilable... " >&6; }
13126-if ${je_cv_clock_realtime+:} false; then :
13127-  $as_echo_n "(cached) " >&6
13128-else
13129-  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
13130-/* end confdefs.h.  */
13131-
13132-#include <time.h>
13133-
13134-int
13135-main ()
13136-{
13137-
13138-	struct timespec ts;
13139-
13140-	clock_gettime(CLOCK_REALTIME, &ts);
13141-
13142-  ;
13143-  return 0;
13144-}
13145-_ACEOF
13146-if ac_fn_c_try_link "$LINENO"; then :
13147-  je_cv_clock_realtime=yes
13148-else
13149-  je_cv_clock_realtime=no
13150-fi
13151-rm -f core conftest.err conftest.$ac_objext \
13152-    conftest$ac_exeext conftest.$ac_ext
13153-fi
13154-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_clock_realtime" >&5
13155-$as_echo "$je_cv_clock_realtime" >&6; }
13156-
13157-if test "x${je_cv_clock_realtime}" = "xyes" ; then
13158-
13159-$as_echo "#define JEMALLOC_HAVE_CLOCK_REALTIME  " >>confdefs.h
13160-
13161-fi
13162-
13163-# Check whether --enable-syscall was given.
13164-if test "${enable_syscall+set}" = set; then :
13165-  enableval=$enable_syscall; if test "x$enable_syscall" = "xno" ; then
13166-  enable_syscall="0"
13167-else
13168-  enable_syscall="1"
13169-fi
13170-
13171-else
13172-  enable_syscall="1"
13173-
13174-fi
13175-
13176-if test "x$enable_syscall" = "x1" ; then
13177-      SAVED_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}"
13178-
13179-
13180-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -Werror" >&5
13181-$as_echo_n "checking whether compiler supports -Werror... " >&6; }
13182-T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}"
13183-T_APPEND_V=-Werror
13184-  if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then
13185-  CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}"
13186-else
13187-  CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}"
13188-fi
13189-
13190-
13191-if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then
13192-  CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}"
13193-else
13194-  CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}"
13195-fi
13196-
13197-cat confdefs.h - <<_ACEOF >conftest.$ac_ext
13198-/* end confdefs.h.  */
13199-
13200-
13201-int
13202-main ()
13203-{
13204-
13205-    return 0;
13206-
13207-  ;
13208-  return 0;
13209-}
13210-_ACEOF
13211-if ac_fn_c_try_compile "$LINENO"; then :
13212-  je_cv_cflags_added=-Werror
13213-              { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
13214-$as_echo "yes" >&6; }
13215-else
13216-  je_cv_cflags_added=
13217-              { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
13218-$as_echo "no" >&6; }
13219-              CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}"
13220-
13221-fi
13222-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
13223-if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then
13224-  CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}"
13225-else
13226-  CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}"
13227-fi
13228-
13229-
13230-
13231-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether syscall(2) is compilable" >&5
13232-$as_echo_n "checking whether syscall(2) is compilable... " >&6; }
13233-if ${je_cv_syscall+:} false; then :
13234-  $as_echo_n "(cached) " >&6
13235-else
13236-  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
13237-/* end confdefs.h.  */
13238-
13239-#include <sys/syscall.h>
13240-#include <unistd.h>
13241-
13242-int
13243-main ()
13244-{
13245-
13246-	syscall(SYS_write, 2, "hello", 5);
13247-
13248-  ;
13249-  return 0;
13250-}
13251-_ACEOF
13252-if ac_fn_c_try_link "$LINENO"; then :
13253-  je_cv_syscall=yes
13254-else
13255-  je_cv_syscall=no
13256-fi
13257-rm -f core conftest.err conftest.$ac_objext \
13258-    conftest$ac_exeext conftest.$ac_ext
13259-fi
13260-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_syscall" >&5
13261-$as_echo "$je_cv_syscall" >&6; }
13262-
13263-  CONFIGURE_CFLAGS="${SAVED_CONFIGURE_CFLAGS}"
13264-if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then
13265-  CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}"
13266-else
13267-  CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}"
13268-fi
13269-
13270-
13271-  if test "x$je_cv_syscall" = "xyes" ; then
13272-
13273-$as_echo "#define JEMALLOC_USE_SYSCALL  " >>confdefs.h
13274-
13275-  fi
13276-fi
13277-
13278-ac_fn_c_check_func "$LINENO" "secure_getenv" "ac_cv_func_secure_getenv"
13279-if test "x$ac_cv_func_secure_getenv" = xyes; then :
13280-  have_secure_getenv="1"
13281-else
13282-  have_secure_getenv="0"
13283-
13284-fi
13285-
13286-if test "x$have_secure_getenv" = "x1" ; then
13287-
13288-$as_echo "#define JEMALLOC_HAVE_SECURE_GETENV  " >>confdefs.h
13289-
13290-fi
13291-
13292-ac_fn_c_check_func "$LINENO" "sched_getcpu" "ac_cv_func_sched_getcpu"
13293-if test "x$ac_cv_func_sched_getcpu" = xyes; then :
13294-  have_sched_getcpu="1"
13295-else
13296-  have_sched_getcpu="0"
13297-
13298-fi
13299-
13300-if test "x$have_sched_getcpu" = "x1" ; then
13301-
13302-$as_echo "#define JEMALLOC_HAVE_SCHED_GETCPU  " >>confdefs.h
13303-
13304-fi
13305-
13306-ac_fn_c_check_func "$LINENO" "sched_setaffinity" "ac_cv_func_sched_setaffinity"
13307-if test "x$ac_cv_func_sched_setaffinity" = xyes; then :
13308-  have_sched_setaffinity="1"
13309-else
13310-  have_sched_setaffinity="0"
13311-
13312-fi
13313-
13314-if test "x$have_sched_setaffinity" = "x1" ; then
13315-
13316-$as_echo "#define JEMALLOC_HAVE_SCHED_SETAFFINITY  " >>confdefs.h
13317-
13318-fi
13319-
13320-ac_fn_c_check_func "$LINENO" "issetugid" "ac_cv_func_issetugid"
13321-if test "x$ac_cv_func_issetugid" = xyes; then :
13322-  have_issetugid="1"
13323-else
13324-  have_issetugid="0"
13325-
13326-fi
13327-
13328-if test "x$have_issetugid" = "x1" ; then
13329-
13330-$as_echo "#define JEMALLOC_HAVE_ISSETUGID  " >>confdefs.h
13331-
13332-fi
13333-
13334-ac_fn_c_check_func "$LINENO" "_malloc_thread_cleanup" "ac_cv_func__malloc_thread_cleanup"
13335-if test "x$ac_cv_func__malloc_thread_cleanup" = xyes; then :
13336-  have__malloc_thread_cleanup="1"
13337-else
13338-  have__malloc_thread_cleanup="0"
13339-
13340-fi
13341-
13342-if test "x$have__malloc_thread_cleanup" = "x1" ; then
13343-
13344-$as_echo "#define JEMALLOC_MALLOC_THREAD_CLEANUP  " >>confdefs.h
13345-
13346-  wrap_syms="${wrap_syms} _malloc_thread_cleanup _malloc_tsd_cleanup_register"
13347-  force_tls="1"
13348-fi
13349-
13350-ac_fn_c_check_func "$LINENO" "_pthread_mutex_init_calloc_cb" "ac_cv_func__pthread_mutex_init_calloc_cb"
13351-if test "x$ac_cv_func__pthread_mutex_init_calloc_cb" = xyes; then :
13352-  have__pthread_mutex_init_calloc_cb="1"
13353-else
13354-  have__pthread_mutex_init_calloc_cb="0"
13355-
13356-fi
13357-
13358-if test "x$have__pthread_mutex_init_calloc_cb" = "x1" ; then
13359-
13360-$as_echo "#define JEMALLOC_MUTEX_INIT_CB  " >>confdefs.h
13361-
13362-  wrap_syms="${wrap_syms} _malloc_prefork _malloc_postfork"
13363-fi
13364-
13365-ac_fn_c_check_func "$LINENO" "memcntl" "ac_cv_func_memcntl"
13366-if test "x$ac_cv_func_memcntl" = xyes; then :
13367-  have_memcntl="1"
13368-else
13369-  have_memcntl="0"
13370-fi
13371-
13372-if test "x$have_memcntl" = "x1" ; then
13373-
13374-$as_echo "#define JEMALLOC_HAVE_MEMCNTL  " >>confdefs.h
13375-
13376-fi
13377-
13378-# Check whether --enable-lazy_lock was given.
13379-if test "${enable_lazy_lock+set}" = set; then :
13380-  enableval=$enable_lazy_lock; if test "x$enable_lazy_lock" = "xno" ; then
13381-  enable_lazy_lock="0"
13382-else
13383-  enable_lazy_lock="1"
13384-fi
13385-
13386-else
13387-  enable_lazy_lock=""
13388-
13389-fi
13390-
13391-if test "x${enable_lazy_lock}" = "x" ; then
13392-  if test "x${force_lazy_lock}" = "x1" ; then
13393-    { $as_echo "$as_me:${as_lineno-$LINENO}: result: Forcing lazy-lock to avoid allocator/threading bootstrap issues" >&5
13394-$as_echo "Forcing lazy-lock to avoid allocator/threading bootstrap issues" >&6; }
13395-    enable_lazy_lock="1"
13396-  else
13397-    enable_lazy_lock="0"
13398-  fi
13399-fi
13400-if test "x${enable_lazy_lock}" = "x1" -a "x${abi}" = "xpecoff" ; then
13401-  { $as_echo "$as_me:${as_lineno-$LINENO}: result: Forcing no lazy-lock because thread creation monitoring is unimplemented" >&5
13402-$as_echo "Forcing no lazy-lock because thread creation monitoring is unimplemented" >&6; }
13403-  enable_lazy_lock="0"
13404-fi
13405-if test "x$enable_lazy_lock" = "x1" ; then
13406-  if test "x$have_dlsym" = "x1" ; then
13407-
13408-$as_echo "#define JEMALLOC_LAZY_LOCK  " >>confdefs.h
13409-
13410-  else
13411-    as_fn_error $? "Missing dlsym support: lazy-lock cannot be enabled." "$LINENO" 5
13412-  fi
13413-fi
13414-
13415-
13416-if test "x${force_tls}" = "x1" ; then
13417-  enable_tls="1"
13418-elif test "x${force_tls}" = "x0" ; then
13419-  enable_tls="0"
13420-else
13421-  enable_tls="1"
13422-fi
13423-if test "x${enable_tls}" = "x1" ; then
13424-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for TLS" >&5
13425-$as_echo_n "checking for TLS... " >&6; }
13426-cat confdefs.h - <<_ACEOF >conftest.$ac_ext
13427-/* end confdefs.h.  */
13428-
13429-    __thread int x;
13430-
13431-int
13432-main ()
13433-{
13434-
13435-    x = 42;
13436-
13437-    return 0;
13438-
13439-  ;
13440-  return 0;
13441-}
13442-_ACEOF
13443-if ac_fn_c_try_compile "$LINENO"; then :
13444-  { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
13445-$as_echo "yes" >&6; }
13446-else
13447-  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
13448-$as_echo "no" >&6; }
13449-              enable_tls="0"
13450-fi
13451-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
13452-else
13453-  enable_tls="0"
13454-fi
13455-
13456-if test "x${enable_tls}" = "x1" ; then
13457-
13458-cat >>confdefs.h <<_ACEOF
13459-#define JEMALLOC_TLS
13460-_ACEOF
13461-
13462-fi
13463-
13464-
13465-
13466-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether C11 atomics is compilable" >&5
13467-$as_echo_n "checking whether C11 atomics is compilable... " >&6; }
13468-if ${je_cv_c11_atomics+:} false; then :
13469-  $as_echo_n "(cached) " >&6
13470-else
13471-  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
13472-/* end confdefs.h.  */
13473-
13474-#include <stdint.h>
13475-#if (__STDC_VERSION__ >= 201112L) && !defined(__STDC_NO_ATOMICS__)
13476-#include <stdatomic.h>
13477-#else
13478-#error Atomics not available
13479-#endif
13480-
13481-int
13482-main ()
13483-{
13484-
13485-    uint64_t *p = (uint64_t *)0;
13486-    uint64_t x = 1;
13487-    volatile atomic_uint_least64_t *a = (volatile atomic_uint_least64_t *)p;
13488-    uint64_t r = atomic_fetch_add(a, x) + x;
13489-    return r == 0;
13490-
13491-  ;
13492-  return 0;
13493-}
13494-_ACEOF
13495-if ac_fn_c_try_link "$LINENO"; then :
13496-  je_cv_c11_atomics=yes
13497-else
13498-  je_cv_c11_atomics=no
13499-fi
13500-rm -f core conftest.err conftest.$ac_objext \
13501-    conftest$ac_exeext conftest.$ac_ext
13502-fi
13503-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_c11_atomics" >&5
13504-$as_echo "$je_cv_c11_atomics" >&6; }
13505-
13506-if test "x${je_cv_c11_atomics}" = "xyes" ; then
13507-
13508-$as_echo "#define JEMALLOC_C11_ATOMICS  " >>confdefs.h
13509-
13510-fi
13511-
13512-
13513-
13514-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether GCC __atomic atomics is compilable" >&5
13515-$as_echo_n "checking whether GCC __atomic atomics is compilable... " >&6; }
13516-if ${je_cv_gcc_atomic_atomics+:} false; then :
13517-  $as_echo_n "(cached) " >&6
13518-else
13519-  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
13520-/* end confdefs.h.  */
13521-
13522-
13523-int
13524-main ()
13525-{
13526-
13527-    int x = 0;
13528-    int val = 1;
13529-    int y = __atomic_fetch_add(&x, val, __ATOMIC_RELAXED);
13530-    int after_add = x;
13531-    return after_add == 1;
13532-
13533-  ;
13534-  return 0;
13535-}
13536-_ACEOF
13537-if ac_fn_c_try_link "$LINENO"; then :
13538-  je_cv_gcc_atomic_atomics=yes
13539-else
13540-  je_cv_gcc_atomic_atomics=no
13541-fi
13542-rm -f core conftest.err conftest.$ac_objext \
13543-    conftest$ac_exeext conftest.$ac_ext
13544-fi
13545-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_gcc_atomic_atomics" >&5
13546-$as_echo "$je_cv_gcc_atomic_atomics" >&6; }
13547-
13548-if test "x${je_cv_gcc_atomic_atomics}" = "xyes" ; then
13549-
13550-$as_echo "#define JEMALLOC_GCC_ATOMIC_ATOMICS  " >>confdefs.h
13551-
13552-
13553-
13554-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether GCC 8-bit __atomic atomics is compilable" >&5
13555-$as_echo_n "checking whether GCC 8-bit __atomic atomics is compilable... " >&6; }
13556-if ${je_cv_gcc_u8_atomic_atomics+:} false; then :
13557-  $as_echo_n "(cached) " >&6
13558-else
13559-  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
13560-/* end confdefs.h.  */
13561-
13562-
13563-int
13564-main ()
13565-{
13566-
13567-      unsigned char x = 0;
13568-      int val = 1;
13569-      int y = __atomic_fetch_add(&x, val, __ATOMIC_RELAXED);
13570-      int after_add = (int)x;
13571-      return after_add == 1;
13572-
13573-  ;
13574-  return 0;
13575-}
13576-_ACEOF
13577-if ac_fn_c_try_link "$LINENO"; then :
13578-  je_cv_gcc_u8_atomic_atomics=yes
13579-else
13580-  je_cv_gcc_u8_atomic_atomics=no
13581-fi
13582-rm -f core conftest.err conftest.$ac_objext \
13583-    conftest$ac_exeext conftest.$ac_ext
13584-fi
13585-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_gcc_u8_atomic_atomics" >&5
13586-$as_echo "$je_cv_gcc_u8_atomic_atomics" >&6; }
13587-
13588-  if test "x${je_cv_gcc_u8_atomic_atomics}" = "xyes" ; then
13589-
13590-$as_echo "#define JEMALLOC_GCC_U8_ATOMIC_ATOMICS  " >>confdefs.h
13591-
13592-  fi
13593-fi
13594-
13595-
13596-
13597-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether GCC __sync atomics is compilable" >&5
13598-$as_echo_n "checking whether GCC __sync atomics is compilable... " >&6; }
13599-if ${je_cv_gcc_sync_atomics+:} false; then :
13600-  $as_echo_n "(cached) " >&6
13601-else
13602-  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
13603-/* end confdefs.h.  */
13604-
13605-
13606-int
13607-main ()
13608-{
13609-
13610-    int x = 0;
13611-    int before_add = __sync_fetch_and_add(&x, 1);
13612-    int after_add = x;
13613-    return (before_add == 0) && (after_add == 1);
13614-
13615-  ;
13616-  return 0;
13617-}
13618-_ACEOF
13619-if ac_fn_c_try_link "$LINENO"; then :
13620-  je_cv_gcc_sync_atomics=yes
13621-else
13622-  je_cv_gcc_sync_atomics=no
13623-fi
13624-rm -f core conftest.err conftest.$ac_objext \
13625-    conftest$ac_exeext conftest.$ac_ext
13626-fi
13627-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_gcc_sync_atomics" >&5
13628-$as_echo "$je_cv_gcc_sync_atomics" >&6; }
13629-
13630-if test "x${je_cv_gcc_sync_atomics}" = "xyes" ; then
13631-
13632-$as_echo "#define JEMALLOC_GCC_SYNC_ATOMICS  " >>confdefs.h
13633-
13634-
13635-
13636-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether GCC 8-bit __sync atomics is compilable" >&5
13637-$as_echo_n "checking whether GCC 8-bit __sync atomics is compilable... " >&6; }
13638-if ${je_cv_gcc_u8_sync_atomics+:} false; then :
13639-  $as_echo_n "(cached) " >&6
13640-else
13641-  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
13642-/* end confdefs.h.  */
13643-
13644-
13645-int
13646-main ()
13647-{
13648-
13649-      unsigned char x = 0;
13650-      int before_add = __sync_fetch_and_add(&x, 1);
13651-      int after_add = (int)x;
13652-      return (before_add == 0) && (after_add == 1);
13653-
13654-  ;
13655-  return 0;
13656-}
13657-_ACEOF
13658-if ac_fn_c_try_link "$LINENO"; then :
13659-  je_cv_gcc_u8_sync_atomics=yes
13660-else
13661-  je_cv_gcc_u8_sync_atomics=no
13662-fi
13663-rm -f core conftest.err conftest.$ac_objext \
13664-    conftest$ac_exeext conftest.$ac_ext
13665-fi
13666-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_gcc_u8_sync_atomics" >&5
13667-$as_echo "$je_cv_gcc_u8_sync_atomics" >&6; }
13668-
13669-  if test "x${je_cv_gcc_u8_sync_atomics}" = "xyes" ; then
13670-
13671-$as_echo "#define JEMALLOC_GCC_U8_SYNC_ATOMICS  " >>confdefs.h
13672-
13673-  fi
13674-fi
13675-
13676-
13677-
13678-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether Darwin OSAtomic*() is compilable" >&5
13679-$as_echo_n "checking whether Darwin OSAtomic*() is compilable... " >&6; }
13680-if ${je_cv_osatomic+:} false; then :
13681-  $as_echo_n "(cached) " >&6
13682-else
13683-  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
13684-/* end confdefs.h.  */
13685-
13686-#include <libkern/OSAtomic.h>
13687-#include <inttypes.h>
13688-
13689-int
13690-main ()
13691-{
13692-
13693-	{
13694-		int32_t x32 = 0;
13695-		volatile int32_t *x32p = &x32;
13696-		OSAtomicAdd32(1, x32p);
13697-	}
13698-	{
13699-		int64_t x64 = 0;
13700-		volatile int64_t *x64p = &x64;
13701-		OSAtomicAdd64(1, x64p);
13702-	}
13703-
13704-  ;
13705-  return 0;
13706-}
13707-_ACEOF
13708-if ac_fn_c_try_link "$LINENO"; then :
13709-  je_cv_osatomic=yes
13710-else
13711-  je_cv_osatomic=no
13712-fi
13713-rm -f core conftest.err conftest.$ac_objext \
13714-    conftest$ac_exeext conftest.$ac_ext
13715-fi
13716-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_osatomic" >&5
13717-$as_echo "$je_cv_osatomic" >&6; }
13718-
13719-if test "x${je_cv_osatomic}" = "xyes" ; then
13720-
13721-$as_echo "#define JEMALLOC_OSATOMIC  " >>confdefs.h
13722-
13723-fi
13724-
13725-
13726-
13727-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether madvise(2) is compilable" >&5
13728-$as_echo_n "checking whether madvise(2) is compilable... " >&6; }
13729-if ${je_cv_madvise+:} false; then :
13730-  $as_echo_n "(cached) " >&6
13731-else
13732-  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
13733-/* end confdefs.h.  */
13734-
13735-#include <sys/mman.h>
13736-
13737-int
13738-main ()
13739-{
13740-
13741-	madvise((void *)0, 0, 0);
13742-
13743-  ;
13744-  return 0;
13745-}
13746-_ACEOF
13747-if ac_fn_c_try_link "$LINENO"; then :
13748-  je_cv_madvise=yes
13749-else
13750-  je_cv_madvise=no
13751-fi
13752-rm -f core conftest.err conftest.$ac_objext \
13753-    conftest$ac_exeext conftest.$ac_ext
13754-fi
13755-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_madvise" >&5
13756-$as_echo "$je_cv_madvise" >&6; }
13757-
13758-if test "x${je_cv_madvise}" = "xyes" ; then
13759-
13760-$as_echo "#define JEMALLOC_HAVE_MADVISE  " >>confdefs.h
13761-
13762-
13763-
13764-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether madvise(..., MADV_FREE) is compilable" >&5
13765-$as_echo_n "checking whether madvise(..., MADV_FREE) is compilable... " >&6; }
13766-if ${je_cv_madv_free+:} false; then :
13767-  $as_echo_n "(cached) " >&6
13768-else
13769-  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
13770-/* end confdefs.h.  */
13771-
13772-#include <sys/mman.h>
13773-
13774-int
13775-main ()
13776-{
13777-
13778-	madvise((void *)0, 0, MADV_FREE);
13779-
13780-  ;
13781-  return 0;
13782-}
13783-_ACEOF
13784-if ac_fn_c_try_link "$LINENO"; then :
13785-  je_cv_madv_free=yes
13786-else
13787-  je_cv_madv_free=no
13788-fi
13789-rm -f core conftest.err conftest.$ac_objext \
13790-    conftest$ac_exeext conftest.$ac_ext
13791-fi
13792-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_madv_free" >&5
13793-$as_echo "$je_cv_madv_free" >&6; }
13794-
13795-  if test "x${je_cv_madv_free}" = "xyes" ; then
13796-
13797-$as_echo "#define JEMALLOC_PURGE_MADVISE_FREE  " >>confdefs.h
13798-
13799-  elif test "x${je_cv_madvise}" = "xyes" ; then
13800-    case "${host_cpu}" in i686|x86_64)
13801-        case "${host}" in *-*-linux*)
13802-
13803-$as_echo "#define JEMALLOC_PURGE_MADVISE_FREE  " >>confdefs.h
13804-
13805-
13806-$as_echo "#define JEMALLOC_DEFINE_MADVISE_FREE  " >>confdefs.h
13807-
13808-	    ;;
13809-        esac
13810-        ;;
13811-    esac
13812-  fi
13813-
13814-
13815-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether madvise(..., MADV_DONTNEED) is compilable" >&5
13816-$as_echo_n "checking whether madvise(..., MADV_DONTNEED) is compilable... " >&6; }
13817-if ${je_cv_madv_dontneed+:} false; then :
13818-  $as_echo_n "(cached) " >&6
13819-else
13820-  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
13821-/* end confdefs.h.  */
13822-
13823-#include <sys/mman.h>
13824-
13825-int
13826-main ()
13827-{
13828-
13829-	madvise((void *)0, 0, MADV_DONTNEED);
13830-
13831-  ;
13832-  return 0;
13833-}
13834-_ACEOF
13835-if ac_fn_c_try_link "$LINENO"; then :
13836-  je_cv_madv_dontneed=yes
13837-else
13838-  je_cv_madv_dontneed=no
13839-fi
13840-rm -f core conftest.err conftest.$ac_objext \
13841-    conftest$ac_exeext conftest.$ac_ext
13842-fi
13843-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_madv_dontneed" >&5
13844-$as_echo "$je_cv_madv_dontneed" >&6; }
13845-
13846-  if test "x${je_cv_madv_dontneed}" = "xyes" ; then
13847-
13848-$as_echo "#define JEMALLOC_PURGE_MADVISE_DONTNEED  " >>confdefs.h
13849-
13850-  fi
13851-
13852-
13853-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether madvise(..., MADV_DO[NT]DUMP) is compilable" >&5
13854-$as_echo_n "checking whether madvise(..., MADV_DO[NT]DUMP) is compilable... " >&6; }
13855-if ${je_cv_madv_dontdump+:} false; then :
13856-  $as_echo_n "(cached) " >&6
13857-else
13858-  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
13859-/* end confdefs.h.  */
13860-
13861-#include <sys/mman.h>
13862-
13863-int
13864-main ()
13865-{
13866-
13867-	madvise((void *)0, 0, MADV_DONTDUMP);
13868-	madvise((void *)0, 0, MADV_DODUMP);
13869-
13870-  ;
13871-  return 0;
13872-}
13873-_ACEOF
13874-if ac_fn_c_try_link "$LINENO"; then :
13875-  je_cv_madv_dontdump=yes
13876-else
13877-  je_cv_madv_dontdump=no
13878-fi
13879-rm -f core conftest.err conftest.$ac_objext \
13880-    conftest$ac_exeext conftest.$ac_ext
13881-fi
13882-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_madv_dontdump" >&5
13883-$as_echo "$je_cv_madv_dontdump" >&6; }
13884-
13885-  if test "x${je_cv_madv_dontdump}" = "xyes" ; then
13886-
13887-$as_echo "#define JEMALLOC_MADVISE_DONTDUMP  " >>confdefs.h
13888-
13889-  fi
13890-
13891-
13892-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether madvise(..., MADV_[NO]HUGEPAGE) is compilable" >&5
13893-$as_echo_n "checking whether madvise(..., MADV_[NO]HUGEPAGE) is compilable... " >&6; }
13894-if ${je_cv_thp+:} false; then :
13895-  $as_echo_n "(cached) " >&6
13896-else
13897-  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
13898-/* end confdefs.h.  */
13899-
13900-#include <sys/mman.h>
13901-
13902-int
13903-main ()
13904-{
13905-
13906-	madvise((void *)0, 0, MADV_HUGEPAGE);
13907-	madvise((void *)0, 0, MADV_NOHUGEPAGE);
13908-
13909-  ;
13910-  return 0;
13911-}
13912-_ACEOF
13913-if ac_fn_c_try_link "$LINENO"; then :
13914-  je_cv_thp=yes
13915-else
13916-  je_cv_thp=no
13917-fi
13918-rm -f core conftest.err conftest.$ac_objext \
13919-    conftest$ac_exeext conftest.$ac_ext
13920-fi
13921-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_thp" >&5
13922-$as_echo "$je_cv_thp" >&6; }
13923-
13924-
13925-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether madvise(..., MADV_[NO]CORE) is compilable" >&5
13926-$as_echo_n "checking whether madvise(..., MADV_[NO]CORE) is compilable... " >&6; }
13927-if ${je_cv_madv_nocore+:} false; then :
13928-  $as_echo_n "(cached) " >&6
13929-else
13930-  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
13931-/* end confdefs.h.  */
13932-
13933-#include <sys/mman.h>
13934-
13935-int
13936-main ()
13937-{
13938-
13939-	madvise((void *)0, 0, MADV_NOCORE);
13940-	madvise((void *)0, 0, MADV_CORE);
13941-
13942-  ;
13943-  return 0;
13944-}
13945-_ACEOF
13946-if ac_fn_c_try_link "$LINENO"; then :
13947-  je_cv_madv_nocore=yes
13948-else
13949-  je_cv_madv_nocore=no
13950-fi
13951-rm -f core conftest.err conftest.$ac_objext \
13952-    conftest$ac_exeext conftest.$ac_ext
13953-fi
13954-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_madv_nocore" >&5
13955-$as_echo "$je_cv_madv_nocore" >&6; }
13956-
13957-  if test "x${je_cv_madv_nocore}" = "xyes" ; then
13958-
13959-$as_echo "#define JEMALLOC_MADVISE_NOCORE  " >>confdefs.h
13960-
13961-  fi
13962-case "${host_cpu}" in
13963-  arm*)
13964-    ;;
13965-  *)
13966-  if test "x${je_cv_thp}" = "xyes" ; then
13967-
13968-$as_echo "#define JEMALLOC_HAVE_MADVISE_HUGE  " >>confdefs.h
13969-
13970-  fi
13971-  ;;
13972-esac
13973-else
13974-
13975-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether posix_madvise is compilable" >&5
13976-$as_echo_n "checking whether posix_madvise is compilable... " >&6; }
13977-if ${je_cv_posix_madvise+:} false; then :
13978-  $as_echo_n "(cached) " >&6
13979-else
13980-  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
13981-/* end confdefs.h.  */
13982-
13983-  #include <sys/mman.h>
13984-
13985-int
13986-main ()
13987-{
13988-
13989-    posix_madvise((void *)0, 0, 0);
13990-
13991-  ;
13992-  return 0;
13993-}
13994-_ACEOF
13995-if ac_fn_c_try_link "$LINENO"; then :
13996-  je_cv_posix_madvise=yes
13997-else
13998-  je_cv_posix_madvise=no
13999-fi
14000-rm -f core conftest.err conftest.$ac_objext \
14001-    conftest$ac_exeext conftest.$ac_ext
14002-fi
14003-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_posix_madvise" >&5
14004-$as_echo "$je_cv_posix_madvise" >&6; }
14005-
14006-  if test "x${je_cv_posix_madvise}" = "xyes" ; then
14007-
14008-$as_echo "#define JEMALLOC_HAVE_POSIX_MADVISE  " >>confdefs.h
14009-
14010-
14011-
14012-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether posix_madvise(..., POSIX_MADV_DONTNEED) is compilable" >&5
14013-$as_echo_n "checking whether posix_madvise(..., POSIX_MADV_DONTNEED) is compilable... " >&6; }
14014-if ${je_cv_posix_madv_dontneed+:} false; then :
14015-  $as_echo_n "(cached) " >&6
14016-else
14017-  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
14018-/* end confdefs.h.  */
14019-
14020-  #include <sys/mman.h>
14021-
14022-int
14023-main ()
14024-{
14025-
14026-    posix_madvise((void *)0, 0, POSIX_MADV_DONTNEED);
14027-
14028-  ;
14029-  return 0;
14030-}
14031-_ACEOF
14032-if ac_fn_c_try_link "$LINENO"; then :
14033-  je_cv_posix_madv_dontneed=yes
14034-else
14035-  je_cv_posix_madv_dontneed=no
14036-fi
14037-rm -f core conftest.err conftest.$ac_objext \
14038-    conftest$ac_exeext conftest.$ac_ext
14039-fi
14040-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_posix_madv_dontneed" >&5
14041-$as_echo "$je_cv_posix_madv_dontneed" >&6; }
14042-
14043-    if test "x${je_cv_posix_madv_dontneed}" = "xyes" ; then
14044-
14045-$as_echo "#define JEMALLOC_PURGE_POSIX_MADVISE_DONTNEED  " >>confdefs.h
14046-
14047-    fi
14048-  fi
14049-fi
14050-
14051-
14052-
14053-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether mprotect(2) is compilable" >&5
14054-$as_echo_n "checking whether mprotect(2) is compilable... " >&6; }
14055-if ${je_cv_mprotect+:} false; then :
14056-  $as_echo_n "(cached) " >&6
14057-else
14058-  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
14059-/* end confdefs.h.  */
14060-
14061-#include <sys/mman.h>
14062-
14063-int
14064-main ()
14065-{
14066-
14067-	mprotect((void *)0, 0, PROT_NONE);
14068-
14069-  ;
14070-  return 0;
14071-}
14072-_ACEOF
14073-if ac_fn_c_try_link "$LINENO"; then :
14074-  je_cv_mprotect=yes
14075-else
14076-  je_cv_mprotect=no
14077-fi
14078-rm -f core conftest.err conftest.$ac_objext \
14079-    conftest$ac_exeext conftest.$ac_ext
14080-fi
14081-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_mprotect" >&5
14082-$as_echo "$je_cv_mprotect" >&6; }
14083-
14084-if test "x${je_cv_mprotect}" = "xyes" ; then
14085-
14086-$as_echo "#define JEMALLOC_HAVE_MPROTECT  " >>confdefs.h
14087-
14088-fi
14089-
14090-
14091-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for __builtin_clz" >&5
14092-$as_echo_n "checking for __builtin_clz... " >&6; }
14093-if ${je_cv_builtin_clz+:} false; then :
14094-  $as_echo_n "(cached) " >&6
14095-else
14096-  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
14097-/* end confdefs.h.  */
14098-
14099-int
14100-main ()
14101-{
14102-
14103-                                                {
14104-                                                        unsigned x = 0;
14105-                                                        int y = __builtin_clz(x);
14106-                                                }
14107-                                                {
14108-                                                        unsigned long x = 0;
14109-                                                        int y = __builtin_clzl(x);
14110-                                                }
14111-                                                {
14112-                                                        unsigned long long x = 0;
14113-                                                        int y = __builtin_clzll(x);
14114-                                                }
14115-
14116-  ;
14117-  return 0;
14118-}
14119-_ACEOF
14120-if ac_fn_c_try_link "$LINENO"; then :
14121-  je_cv_builtin_clz=yes
14122-else
14123-  je_cv_builtin_clz=no
14124-fi
14125-rm -f core conftest.err conftest.$ac_objext \
14126-    conftest$ac_exeext conftest.$ac_ext
14127-fi
14128-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_builtin_clz" >&5
14129-$as_echo "$je_cv_builtin_clz" >&6; }
14130-
14131-if test "x${je_cv_builtin_clz}" = "xyes" ; then
14132-
14133-$as_echo "#define JEMALLOC_HAVE_BUILTIN_CLZ  " >>confdefs.h
14134-
14135-fi
14136-
14137-
14138-
14139-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether Darwin os_unfair_lock_*() is compilable" >&5
14140-$as_echo_n "checking whether Darwin os_unfair_lock_*() is compilable... " >&6; }
14141-if ${je_cv_os_unfair_lock+:} false; then :
14142-  $as_echo_n "(cached) " >&6
14143-else
14144-  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
14145-/* end confdefs.h.  */
14146-
14147-#include <os/lock.h>
14148-#include <AvailabilityMacros.h>
14149-
14150-int
14151-main ()
14152-{
14153-
14154-	#if MAC_OS_X_VERSION_MIN_REQUIRED < 101200
14155-	#error "os_unfair_lock is not supported"
14156-	#else
14157-	os_unfair_lock lock = OS_UNFAIR_LOCK_INIT;
14158-	os_unfair_lock_lock(&lock);
14159-	os_unfair_lock_unlock(&lock);
14160-	#endif
14161-
14162-  ;
14163-  return 0;
14164-}
14165-_ACEOF
14166-if ac_fn_c_try_link "$LINENO"; then :
14167-  je_cv_os_unfair_lock=yes
14168-else
14169-  je_cv_os_unfair_lock=no
14170-fi
14171-rm -f core conftest.err conftest.$ac_objext \
14172-    conftest$ac_exeext conftest.$ac_ext
14173-fi
14174-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_os_unfair_lock" >&5
14175-$as_echo "$je_cv_os_unfair_lock" >&6; }
14176-
14177-if test "x${je_cv_os_unfair_lock}" = "xyes" ; then
14178-
14179-$as_echo "#define JEMALLOC_OS_UNFAIR_LOCK  " >>confdefs.h
14180-
14181-fi
14182-
14183-
14184-# Check whether --enable-zone-allocator was given.
14185-if test "${enable_zone_allocator+set}" = set; then :
14186-  enableval=$enable_zone_allocator; if test "x$enable_zone_allocator" = "xno" ; then
14187-  enable_zone_allocator="0"
14188-else
14189-  enable_zone_allocator="1"
14190-fi
14191-
14192-else
14193-  if test "x${abi}" = "xmacho"; then
14194-  enable_zone_allocator="1"
14195-fi
14196-
14197-
14198-fi
14199-
14200-
14201-
14202-if test "x${enable_zone_allocator}" = "x1" ; then
14203-  if test "x${abi}" != "xmacho"; then
14204-    as_fn_error $? "--enable-zone-allocator is only supported on Darwin" "$LINENO" 5
14205-  fi
14206-
14207-$as_echo "#define JEMALLOC_ZONE  " >>confdefs.h
14208-
14209-fi
14210-
14211-# Check whether --enable-initial-exec-tls was given.
14212-if test "${enable_initial_exec_tls+set}" = set; then :
14213-  enableval=$enable_initial_exec_tls; if test "x$enable_initial_exec_tls" = "xno" ; then
14214-  enable_initial_exec_tls="0"
14215-else
14216-  enable_initial_exec_tls="1"
14217-fi
14218-
14219-else
14220-  enable_initial_exec_tls="1"
14221-
14222-fi
14223-
14224-
14225-
14226-if test "x${je_cv_tls_model}" = "xyes" -a \
14227-       "x${enable_initial_exec_tls}" = "x1" ; then
14228-
14229-$as_echo "#define JEMALLOC_TLS_MODEL __attribute__((tls_model(\"initial-exec\")))" >>confdefs.h
14230-
14231-else
14232-
14233-$as_echo "#define JEMALLOC_TLS_MODEL  " >>confdefs.h
14234-
14235-fi
14236-
14237-
14238-if test "x${have_pthread}" = "x1" -a "x${je_cv_os_unfair_lock}" != "xyes" -a \
14239-       "x${abi}" != "xmacho" ; then
14240-
14241-$as_echo "#define JEMALLOC_BACKGROUND_THREAD  " >>confdefs.h
14242-
14243-fi
14244-
14245-
14246-if test "x$glibc" = "x1" ; then
14247-
14248-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether glibc malloc hook is compilable" >&5
14249-$as_echo_n "checking whether glibc malloc hook is compilable... " >&6; }
14250-if ${je_cv_glibc_malloc_hook+:} false; then :
14251-  $as_echo_n "(cached) " >&6
14252-else
14253-  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
14254-/* end confdefs.h.  */
14255-
14256-  #include <stddef.h>
14257-
14258-  extern void (* __free_hook)(void *ptr);
14259-  extern void *(* __malloc_hook)(size_t size);
14260-  extern void *(* __realloc_hook)(void *ptr, size_t size);
14261-
14262-int
14263-main ()
14264-{
14265-
14266-    void *ptr = 0L;
14267-    if (__malloc_hook) ptr = __malloc_hook(1);
14268-    if (__realloc_hook) ptr = __realloc_hook(ptr, 2);
14269-    if (__free_hook && ptr) __free_hook(ptr);
14270-
14271-  ;
14272-  return 0;
14273-}
14274-_ACEOF
14275-if ac_fn_c_try_link "$LINENO"; then :
14276-  je_cv_glibc_malloc_hook=yes
14277-else
14278-  je_cv_glibc_malloc_hook=no
14279-fi
14280-rm -f core conftest.err conftest.$ac_objext \
14281-    conftest$ac_exeext conftest.$ac_ext
14282-fi
14283-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_glibc_malloc_hook" >&5
14284-$as_echo "$je_cv_glibc_malloc_hook" >&6; }
14285-
14286-  if test "x${je_cv_glibc_malloc_hook}" = "xyes" ; then
14287-    if test "x${JEMALLOC_PREFIX}" = "x" ; then
14288-
14289-$as_echo "#define JEMALLOC_GLIBC_MALLOC_HOOK  " >>confdefs.h
14290-
14291-      wrap_syms="${wrap_syms} __free_hook __malloc_hook __realloc_hook"
14292-    fi
14293-  fi
14294-
14295-
14296-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether glibc memalign hook is compilable" >&5
14297-$as_echo_n "checking whether glibc memalign hook is compilable... " >&6; }
14298-if ${je_cv_glibc_memalign_hook+:} false; then :
14299-  $as_echo_n "(cached) " >&6
14300-else
14301-  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
14302-/* end confdefs.h.  */
14303-
14304-  #include <stddef.h>
14305-
14306-  extern void *(* __memalign_hook)(size_t alignment, size_t size);
14307-
14308-int
14309-main ()
14310-{
14311-
14312-    void *ptr = 0L;
14313-    if (__memalign_hook) ptr = __memalign_hook(16, 7);
14314-
14315-  ;
14316-  return 0;
14317-}
14318-_ACEOF
14319-if ac_fn_c_try_link "$LINENO"; then :
14320-  je_cv_glibc_memalign_hook=yes
14321-else
14322-  je_cv_glibc_memalign_hook=no
14323-fi
14324-rm -f core conftest.err conftest.$ac_objext \
14325-    conftest$ac_exeext conftest.$ac_ext
14326-fi
14327-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_glibc_memalign_hook" >&5
14328-$as_echo "$je_cv_glibc_memalign_hook" >&6; }
14329-
14330-  if test "x${je_cv_glibc_memalign_hook}" = "xyes" ; then
14331-    if test "x${JEMALLOC_PREFIX}" = "x" ; then
14332-
14333-$as_echo "#define JEMALLOC_GLIBC_MEMALIGN_HOOK  " >>confdefs.h
14334-
14335-      wrap_syms="${wrap_syms} __memalign_hook"
14336-    fi
14337-  fi
14338-fi
14339-
14340-
14341-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether pthreads adaptive mutexes is compilable" >&5
14342-$as_echo_n "checking whether pthreads adaptive mutexes is compilable... " >&6; }
14343-if ${je_cv_pthread_mutex_adaptive_np+:} false; then :
14344-  $as_echo_n "(cached) " >&6
14345-else
14346-  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
14347-/* end confdefs.h.  */
14348-
14349-#include <pthread.h>
14350-
14351-int
14352-main ()
14353-{
14354-
14355-  pthread_mutexattr_t attr;
14356-  pthread_mutexattr_init(&attr);
14357-  pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ADAPTIVE_NP);
14358-  pthread_mutexattr_destroy(&attr);
14359-
14360-  ;
14361-  return 0;
14362-}
14363-_ACEOF
14364-if ac_fn_c_try_link "$LINENO"; then :
14365-  je_cv_pthread_mutex_adaptive_np=yes
14366-else
14367-  je_cv_pthread_mutex_adaptive_np=no
14368-fi
14369-rm -f core conftest.err conftest.$ac_objext \
14370-    conftest$ac_exeext conftest.$ac_ext
14371-fi
14372-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_pthread_mutex_adaptive_np" >&5
14373-$as_echo "$je_cv_pthread_mutex_adaptive_np" >&6; }
14374-
14375-if test "x${je_cv_pthread_mutex_adaptive_np}" = "xyes" ; then
14376-
14377-$as_echo "#define JEMALLOC_HAVE_PTHREAD_MUTEX_ADAPTIVE_NP  " >>confdefs.h
14378-
14379-fi
14380-
14381-SAVED_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}"
14382-
14383-
14384-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -D_GNU_SOURCE" >&5
14385-$as_echo_n "checking whether compiler supports -D_GNU_SOURCE... " >&6; }
14386-T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}"
14387-T_APPEND_V=-D_GNU_SOURCE
14388-  if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then
14389-  CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}"
14390-else
14391-  CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}"
14392-fi
14393-
14394-
14395-if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then
14396-  CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}"
14397-else
14398-  CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}"
14399-fi
14400-
14401-cat confdefs.h - <<_ACEOF >conftest.$ac_ext
14402-/* end confdefs.h.  */
14403-
14404-
14405-int
14406-main ()
14407-{
14408-
14409-    return 0;
14410-
14411-  ;
14412-  return 0;
14413-}
14414-_ACEOF
14415-if ac_fn_c_try_compile "$LINENO"; then :
14416-  je_cv_cflags_added=-D_GNU_SOURCE
14417-              { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
14418-$as_echo "yes" >&6; }
14419-else
14420-  je_cv_cflags_added=
14421-              { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
14422-$as_echo "no" >&6; }
14423-              CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}"
14424-
14425-fi
14426-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
14427-if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then
14428-  CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}"
14429-else
14430-  CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}"
14431-fi
14432-
14433-
14434-
14435-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -Werror" >&5
14436-$as_echo_n "checking whether compiler supports -Werror... " >&6; }
14437-T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}"
14438-T_APPEND_V=-Werror
14439-  if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then
14440-  CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}"
14441-else
14442-  CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}"
14443-fi
14444-
14445-
14446-if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then
14447-  CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}"
14448-else
14449-  CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}"
14450-fi
14451-
14452-cat confdefs.h - <<_ACEOF >conftest.$ac_ext
14453-/* end confdefs.h.  */
14454-
14455-
14456-int
14457-main ()
14458-{
14459-
14460-    return 0;
14461-
14462-  ;
14463-  return 0;
14464-}
14465-_ACEOF
14466-if ac_fn_c_try_compile "$LINENO"; then :
14467-  je_cv_cflags_added=-Werror
14468-              { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
14469-$as_echo "yes" >&6; }
14470-else
14471-  je_cv_cflags_added=
14472-              { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
14473-$as_echo "no" >&6; }
14474-              CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}"
14475-
14476-fi
14477-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
14478-if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then
14479-  CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}"
14480-else
14481-  CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}"
14482-fi
14483-
14484-
14485-
14486-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports -herror_on_warning" >&5
14487-$as_echo_n "checking whether compiler supports -herror_on_warning... " >&6; }
14488-T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}"
14489-T_APPEND_V=-herror_on_warning
14490-  if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${T_APPEND_V}" = "x" ; then
14491-  CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}${T_APPEND_V}"
14492-else
14493-  CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS} ${T_APPEND_V}"
14494-fi
14495-
14496-
14497-if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then
14498-  CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}"
14499-else
14500-  CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}"
14501-fi
14502-
14503-cat confdefs.h - <<_ACEOF >conftest.$ac_ext
14504-/* end confdefs.h.  */
14505-
14506-
14507-int
14508-main ()
14509-{
14510-
14511-    return 0;
14512-
14513-  ;
14514-  return 0;
14515-}
14516-_ACEOF
14517-if ac_fn_c_try_compile "$LINENO"; then :
14518-  je_cv_cflags_added=-herror_on_warning
14519-              { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
14520-$as_echo "yes" >&6; }
14521-else
14522-  je_cv_cflags_added=
14523-              { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
14524-$as_echo "no" >&6; }
14525-              CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}"
14526-
14527-fi
14528-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
14529-if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then
14530-  CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}"
14531-else
14532-  CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}"
14533-fi
14534-
14535-
14536-
14537-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether strerror_r returns char with gnu source is compilable" >&5
14538-$as_echo_n "checking whether strerror_r returns char with gnu source is compilable... " >&6; }
14539-if ${je_cv_strerror_r_returns_char_with_gnu_source+:} false; then :
14540-  $as_echo_n "(cached) " >&6
14541-else
14542-  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
14543-/* end confdefs.h.  */
14544-
14545-#include <errno.h>
14546-#include <stdio.h>
14547-#include <stdlib.h>
14548-#include <string.h>
14549-
14550-int
14551-main ()
14552-{
14553-
14554-  char *buffer = (char *) malloc(100);
14555-  char *error = strerror_r(EINVAL, buffer, 100);
14556-  printf("%s\n", error);
14557-
14558-  ;
14559-  return 0;
14560-}
14561-_ACEOF
14562-if ac_fn_c_try_link "$LINENO"; then :
14563-  je_cv_strerror_r_returns_char_with_gnu_source=yes
14564-else
14565-  je_cv_strerror_r_returns_char_with_gnu_source=no
14566-fi
14567-rm -f core conftest.err conftest.$ac_objext \
14568-    conftest$ac_exeext conftest.$ac_ext
14569-fi
14570-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $je_cv_strerror_r_returns_char_with_gnu_source" >&5
14571-$as_echo "$je_cv_strerror_r_returns_char_with_gnu_source" >&6; }
14572-
14573-CONFIGURE_CFLAGS="${SAVED_CONFIGURE_CFLAGS}"
14574-if test "x${CONFIGURE_CFLAGS}" = "x" -o "x${SPECIFIED_CFLAGS}" = "x" ; then
14575-  CFLAGS="${CONFIGURE_CFLAGS}${SPECIFIED_CFLAGS}"
14576-else
14577-  CFLAGS="${CONFIGURE_CFLAGS} ${SPECIFIED_CFLAGS}"
14578-fi
14579-
14580-
14581-if test "x${je_cv_strerror_r_returns_char_with_gnu_source}" = "xyes" ; then
14582-
14583-$as_echo "#define JEMALLOC_STRERROR_R_RETURNS_CHAR_WITH_GNU_SOURCE  " >>confdefs.h
14584-
14585-fi
14586-
14587-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for stdbool.h that conforms to C99" >&5
14588-$as_echo_n "checking for stdbool.h that conforms to C99... " >&6; }
14589-if ${ac_cv_header_stdbool_h+:} false; then :
14590-  $as_echo_n "(cached) " >&6
14591-else
14592-  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
14593-/* end confdefs.h.  */
14594-
14595-             #include <stdbool.h>
14596-             #ifndef bool
14597-              "error: bool is not defined"
14598-             #endif
14599-             #ifndef false
14600-              "error: false is not defined"
14601-             #endif
14602-             #if false
14603-              "error: false is not 0"
14604-             #endif
14605-             #ifndef true
14606-              "error: true is not defined"
14607-             #endif
14608-             #if true != 1
14609-              "error: true is not 1"
14610-             #endif
14611-             #ifndef __bool_true_false_are_defined
14612-              "error: __bool_true_false_are_defined is not defined"
14613-             #endif
14614-
14615-             struct s { _Bool s: 1; _Bool t; } s;
14616-
14617-             char a[true == 1 ? 1 : -1];
14618-             char b[false == 0 ? 1 : -1];
14619-             char c[__bool_true_false_are_defined == 1 ? 1 : -1];
14620-             char d[(bool) 0.5 == true ? 1 : -1];
14621-             /* See body of main program for 'e'.  */
14622-             char f[(_Bool) 0.0 == false ? 1 : -1];
14623-             char g[true];
14624-             char h[sizeof (_Bool)];
14625-             char i[sizeof s.t];
14626-             enum { j = false, k = true, l = false * true, m = true * 256 };
14627-             /* The following fails for
14628-                HP aC++/ANSI C B3910B A.05.55 [Dec 04 2003]. */
14629-             _Bool n[m];
14630-             char o[sizeof n == m * sizeof n[0] ? 1 : -1];
14631-             char p[-1 - (_Bool) 0 < 0 && -1 - (bool) 0 < 0 ? 1 : -1];
14632-             /* Catch a bug in an HP-UX C compiler.  See
14633-                http://gcc.gnu.org/ml/gcc-patches/2003-12/msg02303.html
14634-                http://lists.gnu.org/archive/html/bug-coreutils/2005-11/msg00161.html
14635-              */
14636-             _Bool q = true;
14637-             _Bool *pq = &q;
14638-
14639-int
14640-main ()
14641-{
14642-
14643-             bool e = &s;
14644-             *pq |= q;
14645-             *pq |= ! q;
14646-             /* Refer to every declared value, to avoid compiler optimizations.  */
14647-             return (!a + !b + !c + !d + !e + !f + !g + !h + !i + !!j + !k + !!l
14648-                     + !m + !n + !o + !p + !q + !pq);
14649-
14650-  ;
14651-  return 0;
14652-}
14653-_ACEOF
14654-if ac_fn_c_try_compile "$LINENO"; then :
14655-  ac_cv_header_stdbool_h=yes
14656-else
14657-  ac_cv_header_stdbool_h=no
14658-fi
14659-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
14660-fi
14661-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_header_stdbool_h" >&5
14662-$as_echo "$ac_cv_header_stdbool_h" >&6; }
14663-   ac_fn_c_check_type "$LINENO" "_Bool" "ac_cv_type__Bool" "$ac_includes_default"
14664-if test "x$ac_cv_type__Bool" = xyes; then :
14665-
14666-cat >>confdefs.h <<_ACEOF
14667-#define HAVE__BOOL 1
14668-_ACEOF
14669-
14670-
14671-fi
14672-
14673-
14674-if test $ac_cv_header_stdbool_h = yes; then
14675-
14676-$as_echo "#define HAVE_STDBOOL_H 1" >>confdefs.h
14677-
14678-fi
14679-
14680-
14681-
14682-ac_config_commands="$ac_config_commands include/jemalloc/internal/public_symbols.txt"
14683-
14684-ac_config_commands="$ac_config_commands include/jemalloc/internal/private_symbols.awk"
14685-
14686-ac_config_commands="$ac_config_commands include/jemalloc/internal/private_symbols_jet.awk"
14687-
14688-ac_config_commands="$ac_config_commands include/jemalloc/internal/public_namespace.h"
14689-
14690-ac_config_commands="$ac_config_commands include/jemalloc/internal/public_unnamespace.h"
14691-
14692-ac_config_commands="$ac_config_commands include/jemalloc/jemalloc_protos_jet.h"
14693-
14694-ac_config_commands="$ac_config_commands include/jemalloc/jemalloc_rename.h"
14695-
14696-ac_config_commands="$ac_config_commands include/jemalloc/jemalloc_mangle.h"
14697-
14698-ac_config_commands="$ac_config_commands include/jemalloc/jemalloc_mangle_jet.h"
14699-
14700-ac_config_commands="$ac_config_commands include/jemalloc/jemalloc.h"
14701-
14702-
14703-
14704-
14705-ac_config_headers="$ac_config_headers $cfghdrs_tup"
14706-
14707-
14708-
14709-ac_config_files="$ac_config_files $cfgoutputs_tup config.stamp bin/jemalloc-config bin/jemalloc.sh bin/jeprof"
14710-
14711-
14712-
14713-cat >confcache <<\_ACEOF
14714-# This file is a shell script that caches the results of configure
14715-# tests run on this system so they can be shared between configure
14716-# scripts and configure runs, see configure's option --config-cache.
14717-# It is not useful on other systems.  If it contains results you don't
14718-# want to keep, you may remove or edit it.
14719-#
14720-# config.status only pays attention to the cache file if you give it
14721-# the --recheck option to rerun configure.
14722-#
14723-# `ac_cv_env_foo' variables (set or unset) will be overridden when
14724-# loading this file, other *unset* `ac_cv_foo' will be assigned the
14725-# following values.
14726-
14727-_ACEOF
14728-
14729-# The following way of writing the cache mishandles newlines in values,
14730-# but we know of no workaround that is simple, portable, and efficient.
14731-# So, we kill variables containing newlines.
14732-# Ultrix sh set writes to stderr and can't be redirected directly,
14733-# and sets the high bit in the cache file unless we assign to the vars.
14734-(
14735-  for ac_var in `(set) 2>&1 | sed -n 's/^\([a-zA-Z_][a-zA-Z0-9_]*\)=.*/\1/p'`; do
14736-    eval ac_val=\$$ac_var
14737-    case $ac_val in #(
14738-    *${as_nl}*)
14739-      case $ac_var in #(
14740-      *_cv_*) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: cache variable $ac_var contains a newline" >&5
14741-$as_echo "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} ;;
14742-      esac
14743-      case $ac_var in #(
14744-      _ | IFS | as_nl) ;; #(
14745-      BASH_ARGV | BASH_SOURCE) eval $ac_var= ;; #(
14746-      *) { eval $ac_var=; unset $ac_var;} ;;
14747-      esac ;;
14748-    esac
14749-  done
14750-
14751-  (set) 2>&1 |
14752-    case $as_nl`(ac_space=' '; set) 2>&1` in #(
14753-    *${as_nl}ac_space=\ *)
14754-      # `set' does not quote correctly, so add quotes: double-quote
14755-      # substitution turns \\\\ into \\, and sed turns \\ into \.
14756-      sed -n \
14757-	"s/'/'\\\\''/g;
14758-	  s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='\\2'/p"
14759-      ;; #(
14760-    *)
14761-      # `set' quotes correctly as required by POSIX, so do not add quotes.
14762-      sed -n "/^[_$as_cr_alnum]*_cv_[_$as_cr_alnum]*=/p"
14763-      ;;
14764-    esac |
14765-    sort
14766-) |
14767-  sed '
14768-     /^ac_cv_env_/b end
14769-     t clear
14770-     :clear
14771-     s/^\([^=]*\)=\(.*[{}].*\)$/test "${\1+set}" = set || &/
14772-     t end
14773-     s/^\([^=]*\)=\(.*\)$/\1=${\1=\2}/
14774-     :end' >>confcache
14775-if diff "$cache_file" confcache >/dev/null 2>&1; then :; else
14776-  if test -w "$cache_file"; then
14777-    if test "x$cache_file" != "x/dev/null"; then
14778-      { $as_echo "$as_me:${as_lineno-$LINENO}: updating cache $cache_file" >&5
14779-$as_echo "$as_me: updating cache $cache_file" >&6;}
14780-      if test ! -f "$cache_file" || test -h "$cache_file"; then
14781-	cat confcache >"$cache_file"
14782-      else
14783-        case $cache_file in #(
14784-        */* | ?:*)
14785-	  mv -f confcache "$cache_file"$$ &&
14786-	  mv -f "$cache_file"$$ "$cache_file" ;; #(
14787-        *)
14788-	  mv -f confcache "$cache_file" ;;
14789-	esac
14790-      fi
14791-    fi
14792-  else
14793-    { $as_echo "$as_me:${as_lineno-$LINENO}: not updating unwritable cache $cache_file" >&5
14794-$as_echo "$as_me: not updating unwritable cache $cache_file" >&6;}
14795-  fi
14796-fi
14797-rm -f confcache
14798-
14799-test "x$prefix" = xNONE && prefix=$ac_default_prefix
14800-# Let make expand exec_prefix.
14801-test "x$exec_prefix" = xNONE && exec_prefix='${prefix}'
14802-
14803-DEFS=-DHAVE_CONFIG_H
14804-
14805-ac_libobjs=
14806-ac_ltlibobjs=
14807-U=
14808-for ac_i in : $LIBOBJS; do test "x$ac_i" = x: && continue
14809-  # 1. Remove the extension, and $U if already installed.
14810-  ac_script='s/\$U\././;s/\.o$//;s/\.obj$//'
14811-  ac_i=`$as_echo "$ac_i" | sed "$ac_script"`
14812-  # 2. Prepend LIBOBJDIR.  When used with automake>=1.10 LIBOBJDIR
14813-  #    will be set to the directory where LIBOBJS objects are built.
14814-  as_fn_append ac_libobjs " \${LIBOBJDIR}$ac_i\$U.$ac_objext"
14815-  as_fn_append ac_ltlibobjs " \${LIBOBJDIR}$ac_i"'$U.lo'
14816-done
14817-LIBOBJS=$ac_libobjs
14818-
14819-LTLIBOBJS=$ac_ltlibobjs
14820-
14821-
14822-
14823-
14824-: "${CONFIG_STATUS=./config.status}"
14825-ac_write_fail=0
14826-ac_clean_files_save=$ac_clean_files
14827-ac_clean_files="$ac_clean_files $CONFIG_STATUS"
14828-{ $as_echo "$as_me:${as_lineno-$LINENO}: creating $CONFIG_STATUS" >&5
14829-$as_echo "$as_me: creating $CONFIG_STATUS" >&6;}
14830-as_write_fail=0
14831-cat >$CONFIG_STATUS <<_ASEOF || as_write_fail=1
14832-#! $SHELL
14833-# Generated by $as_me.
14834-# Run this file to recreate the current configuration.
14835-# Compiler output produced by configure, useful for debugging
14836-# configure, is in config.log if it exists.
14837-
14838-debug=false
14839-ac_cs_recheck=false
14840-ac_cs_silent=false
14841-
14842-SHELL=\${CONFIG_SHELL-$SHELL}
14843-export SHELL
14844-_ASEOF
14845-cat >>$CONFIG_STATUS <<\_ASEOF || as_write_fail=1
14846-## -------------------- ##
14847-## M4sh Initialization. ##
14848-## -------------------- ##
14849-
14850-# Be more Bourne compatible
14851-DUALCASE=1; export DUALCASE # for MKS sh
14852-if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then :
14853-  emulate sh
14854-  NULLCMD=:
14855-  # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which
14856-  # is contrary to our usage.  Disable this feature.
14857-  alias -g '${1+"$@"}'='"$@"'
14858-  setopt NO_GLOB_SUBST
14859-else
14860-  case `(set -o) 2>/dev/null` in #(
14861-  *posix*) :
14862-    set -o posix ;; #(
14863-  *) :
14864-     ;;
14865-esac
14866-fi
14867-
14868-
14869-as_nl='
14870-'
14871-export as_nl
14872-# Printing a long string crashes Solaris 7 /usr/bin/printf.
14873-as_echo='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\'
14874-as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo
14875-as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo$as_echo
14876-# Prefer a ksh shell builtin over an external printf program on Solaris,
14877-# but without wasting forks for bash or zsh.
14878-if test -z "$BASH_VERSION$ZSH_VERSION" \
14879-    && (test "X`print -r -- $as_echo`" = "X$as_echo") 2>/dev/null; then
14880-  as_echo='print -r --'
14881-  as_echo_n='print -rn --'
14882-elif (test "X`printf %s $as_echo`" = "X$as_echo") 2>/dev/null; then
14883-  as_echo='printf %s\n'
14884-  as_echo_n='printf %s'
14885-else
14886-  if test "X`(/usr/ucb/echo -n -n $as_echo) 2>/dev/null`" = "X-n $as_echo"; then
14887-    as_echo_body='eval /usr/ucb/echo -n "$1$as_nl"'
14888-    as_echo_n='/usr/ucb/echo -n'
14889-  else
14890-    as_echo_body='eval expr "X$1" : "X\\(.*\\)"'
14891-    as_echo_n_body='eval
14892-      arg=$1;
14893-      case $arg in #(
14894-      *"$as_nl"*)
14895-	expr "X$arg" : "X\\(.*\\)$as_nl";
14896-	arg=`expr "X$arg" : ".*$as_nl\\(.*\\)"`;;
14897-      esac;
14898-      expr "X$arg" : "X\\(.*\\)" | tr -d "$as_nl"
14899-    '
14900-    export as_echo_n_body
14901-    as_echo_n='sh -c $as_echo_n_body as_echo'
14902-  fi
14903-  export as_echo_body
14904-  as_echo='sh -c $as_echo_body as_echo'
14905-fi
14906-
14907-# The user is always right.
14908-if test "${PATH_SEPARATOR+set}" != set; then
14909-  PATH_SEPARATOR=:
14910-  (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 && {
14911-    (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 ||
14912-      PATH_SEPARATOR=';'
14913-  }
14914-fi
14915-
14916-
14917-# IFS
14918-# We need space, tab and new line, in precisely that order.  Quoting is
14919-# there to prevent editors from complaining about space-tab.
14920-# (If _AS_PATH_WALK were called with IFS unset, it would disable word
14921-# splitting by setting IFS to empty value.)
14922-IFS=" ""	$as_nl"
14923-
14924-# Find who we are.  Look in the path if we contain no directory separator.
14925-as_myself=
14926-case $0 in #((
14927-  *[\\/]* ) as_myself=$0 ;;
14928-  *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
14929-for as_dir in $PATH
14930-do
14931-  IFS=$as_save_IFS
14932-  test -z "$as_dir" && as_dir=.
14933-    test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break
14934-  done
14935-IFS=$as_save_IFS
14936-
14937-     ;;
14938-esac
14939-# We did not find ourselves, most probably we were run as `sh COMMAND'
14940-# in which case we are not to be found in the path.
14941-if test "x$as_myself" = x; then
14942-  as_myself=$0
14943-fi
14944-if test ! -f "$as_myself"; then
14945-  $as_echo "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2
14946-  exit 1
14947-fi
14948-
14949-# Unset variables that we do not need and which cause bugs (e.g. in
14950-# pre-3.0 UWIN ksh).  But do not cause bugs in bash 2.01; the "|| exit 1"
14951-# suppresses any "Segmentation fault" message there.  '((' could
14952-# trigger a bug in pdksh 5.2.14.
14953-for as_var in BASH_ENV ENV MAIL MAILPATH
14954-do eval test x\${$as_var+set} = xset \
14955-  && ( (unset $as_var) || exit 1) >/dev/null 2>&1 && unset $as_var || :
14956-done
14957-PS1='$ '
14958-PS2='> '
14959-PS4='+ '
14960-
14961-# NLS nuisances.
14962-LC_ALL=C
14963-export LC_ALL
14964-LANGUAGE=C
14965-export LANGUAGE
14966-
14967-# CDPATH.
14968-(unset CDPATH) >/dev/null 2>&1 && unset CDPATH
14969-
14970-
14971-# as_fn_error STATUS ERROR [LINENO LOG_FD]
14972-# ----------------------------------------
14973-# Output "`basename $0`: error: ERROR" to stderr. If LINENO and LOG_FD are
14974-# provided, also output the error to LOG_FD, referencing LINENO. Then exit the
14975-# script with STATUS, using 1 if that was 0.
14976-as_fn_error ()
14977-{
14978-  as_status=$1; test $as_status -eq 0 && as_status=1
14979-  if test "$4"; then
14980-    as_lineno=${as_lineno-"$3"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
14981-    $as_echo "$as_me:${as_lineno-$LINENO}: error: $2" >&$4
14982-  fi
14983-  $as_echo "$as_me: error: $2" >&2
14984-  as_fn_exit $as_status
14985-} # as_fn_error
14986-
14987-
14988-# as_fn_set_status STATUS
14989-# -----------------------
14990-# Set $? to STATUS, without forking.
14991-as_fn_set_status ()
14992-{
14993-  return $1
14994-} # as_fn_set_status
14995-
14996-# as_fn_exit STATUS
14997-# -----------------
14998-# Exit the shell with STATUS, even in a "trap 0" or "set -e" context.
14999-as_fn_exit ()
15000-{
15001-  set +e
15002-  as_fn_set_status $1
15003-  exit $1
15004-} # as_fn_exit
15005-
15006-# as_fn_unset VAR
15007-# ---------------
15008-# Portably unset VAR.
15009-as_fn_unset ()
15010-{
15011-  { eval $1=; unset $1;}
15012-}
15013-as_unset=as_fn_unset
15014-# as_fn_append VAR VALUE
15015-# ----------------------
15016-# Append the text in VALUE to the end of the definition contained in VAR. Take
15017-# advantage of any shell optimizations that allow amortized linear growth over
15018-# repeated appends, instead of the typical quadratic growth present in naive
15019-# implementations.
15020-if (eval "as_var=1; as_var+=2; test x\$as_var = x12") 2>/dev/null; then :
15021-  eval 'as_fn_append ()
15022-  {
15023-    eval $1+=\$2
15024-  }'
15025-else
15026-  as_fn_append ()
15027-  {
15028-    eval $1=\$$1\$2
15029-  }
15030-fi # as_fn_append
15031-
15032-# as_fn_arith ARG...
15033-# ------------------
15034-# Perform arithmetic evaluation on the ARGs, and store the result in the
15035-# global $as_val. Take advantage of shells that can avoid forks. The arguments
15036-# must be portable across $(()) and expr.
15037-if (eval "test \$(( 1 + 1 )) = 2") 2>/dev/null; then :
15038-  eval 'as_fn_arith ()
15039-  {
15040-    as_val=$(( $* ))
15041-  }'
15042-else
15043-  as_fn_arith ()
15044-  {
15045-    as_val=`expr "$@" || test $? -eq 1`
15046-  }
15047-fi # as_fn_arith
15048-
15049-
15050-if expr a : '\(a\)' >/dev/null 2>&1 &&
15051-   test "X`expr 00001 : '.*\(...\)'`" = X001; then
15052-  as_expr=expr
15053-else
15054-  as_expr=false
15055-fi
15056-
15057-if (basename -- /) >/dev/null 2>&1 && test "X`basename -- / 2>&1`" = "X/"; then
15058-  as_basename=basename
15059-else
15060-  as_basename=false
15061-fi
15062-
15063-if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then
15064-  as_dirname=dirname
15065-else
15066-  as_dirname=false
15067-fi
15068-
15069-as_me=`$as_basename -- "$0" ||
15070-$as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \
15071-	 X"$0" : 'X\(//\)$' \| \
15072-	 X"$0" : 'X\(/\)' \| . 2>/dev/null ||
15073-$as_echo X/"$0" |
15074-    sed '/^.*\/\([^/][^/]*\)\/*$/{
15075-	    s//\1/
15076-	    q
15077-	  }
15078-	  /^X\/\(\/\/\)$/{
15079-	    s//\1/
15080-	    q
15081-	  }
15082-	  /^X\/\(\/\).*/{
15083-	    s//\1/
15084-	    q
15085-	  }
15086-	  s/.*/./; q'`
15087-
15088-# Avoid depending upon Character Ranges.
15089-as_cr_letters='abcdefghijklmnopqrstuvwxyz'
15090-as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ'
15091-as_cr_Letters=$as_cr_letters$as_cr_LETTERS
15092-as_cr_digits='0123456789'
15093-as_cr_alnum=$as_cr_Letters$as_cr_digits
15094-
15095-ECHO_C= ECHO_N= ECHO_T=
15096-case `echo -n x` in #(((((
15097--n*)
15098-  case `echo 'xy\c'` in
15099-  *c*) ECHO_T='	';;	# ECHO_T is single tab character.
15100-  xy)  ECHO_C='\c';;
15101-  *)   echo `echo ksh88 bug on AIX 6.1` > /dev/null
15102-       ECHO_T='	';;
15103-  esac;;
15104-*)
15105-  ECHO_N='-n';;
15106-esac
15107-
15108-rm -f conf$$ conf$$.exe conf$$.file
15109-if test -d conf$$.dir; then
15110-  rm -f conf$$.dir/conf$$.file
15111-else
15112-  rm -f conf$$.dir
15113-  mkdir conf$$.dir 2>/dev/null
15114-fi
15115-if (echo >conf$$.file) 2>/dev/null; then
15116-  if ln -s conf$$.file conf$$ 2>/dev/null; then
15117-    as_ln_s='ln -s'
15118-    # ... but there are two gotchas:
15119-    # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail.
15120-    # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable.
15121-    # In both cases, we have to default to `cp -pR'.
15122-    ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe ||
15123-      as_ln_s='cp -pR'
15124-  elif ln conf$$.file conf$$ 2>/dev/null; then
15125-    as_ln_s=ln
15126-  else
15127-    as_ln_s='cp -pR'
15128-  fi
15129-else
15130-  as_ln_s='cp -pR'
15131-fi
15132-rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file
15133-rmdir conf$$.dir 2>/dev/null
15134-
15135-
15136-# as_fn_mkdir_p
15137-# -------------
15138-# Create "$as_dir" as a directory, including parents if necessary.
15139-as_fn_mkdir_p ()
15140-{
15141-
15142-  case $as_dir in #(
15143-  -*) as_dir=./$as_dir;;
15144-  esac
15145-  test -d "$as_dir" || eval $as_mkdir_p || {
15146-    as_dirs=
15147-    while :; do
15148-      case $as_dir in #(
15149-      *\'*) as_qdir=`$as_echo "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #'(
15150-      *) as_qdir=$as_dir;;
15151-      esac
15152-      as_dirs="'$as_qdir' $as_dirs"
15153-      as_dir=`$as_dirname -- "$as_dir" ||
15154-$as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \
15155-	 X"$as_dir" : 'X\(//\)[^/]' \| \
15156-	 X"$as_dir" : 'X\(//\)$' \| \
15157-	 X"$as_dir" : 'X\(/\)' \| . 2>/dev/null ||
15158-$as_echo X"$as_dir" |
15159-    sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{
15160-	    s//\1/
15161-	    q
15162-	  }
15163-	  /^X\(\/\/\)[^/].*/{
15164-	    s//\1/
15165-	    q
15166-	  }
15167-	  /^X\(\/\/\)$/{
15168-	    s//\1/
15169-	    q
15170-	  }
15171-	  /^X\(\/\).*/{
15172-	    s//\1/
15173-	    q
15174-	  }
15175-	  s/.*/./; q'`
15176-      test -d "$as_dir" && break
15177-    done
15178-    test -z "$as_dirs" || eval "mkdir $as_dirs"
15179-  } || test -d "$as_dir" || as_fn_error $? "cannot create directory $as_dir"
15180-
15181-
15182-} # as_fn_mkdir_p
15183-if mkdir -p . 2>/dev/null; then
15184-  as_mkdir_p='mkdir -p "$as_dir"'
15185-else
15186-  test -d ./-p && rmdir ./-p
15187-  as_mkdir_p=false
15188-fi
15189-
15190-
15191-# as_fn_executable_p FILE
15192-# -----------------------
15193-# Test if FILE is an executable regular file.
15194-as_fn_executable_p ()
15195-{
15196-  test -f "$1" && test -x "$1"
15197-} # as_fn_executable_p
15198-as_test_x='test -x'
15199-as_executable_p=as_fn_executable_p
15200-
15201-# Sed expression to map a string onto a valid CPP name.
15202-as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'"
15203-
15204-# Sed expression to map a string onto a valid variable name.
15205-as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'"
15206-
15207-
15208-exec 6>&1
15209-## ----------------------------------- ##
15210-## Main body of $CONFIG_STATUS script. ##
15211-## ----------------------------------- ##
15212-_ASEOF
15213-test $as_write_fail = 0 && chmod +x $CONFIG_STATUS || ac_write_fail=1
15214-
15215-cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
15216-# Save the log message, to keep $0 and so on meaningful, and to
15217-# report actual input values of CONFIG_FILES etc. instead of their
15218-# values after options handling.
15219-ac_log="
15220-This file was extended by $as_me, which was
15221-generated by GNU Autoconf 2.69.  Invocation command line was
15222-
15223-  CONFIG_FILES    = $CONFIG_FILES
15224-  CONFIG_HEADERS  = $CONFIG_HEADERS
15225-  CONFIG_LINKS    = $CONFIG_LINKS
15226-  CONFIG_COMMANDS = $CONFIG_COMMANDS
15227-  $ $0 $@
15228-
15229-on `(hostname || uname -n) 2>/dev/null | sed 1q`
15230-"
15231-
15232-_ACEOF
15233-
15234-case $ac_config_files in *"
15235-"*) set x $ac_config_files; shift; ac_config_files=$*;;
15236-esac
15237-
15238-case $ac_config_headers in *"
15239-"*) set x $ac_config_headers; shift; ac_config_headers=$*;;
15240-esac
15241-
15242-
15243-cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
15244-# Files that config.status was made for.
15245-config_files="$ac_config_files"
15246-config_headers="$ac_config_headers"
15247-config_commands="$ac_config_commands"
15248-
15249-_ACEOF
15250-
15251-cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
15252-ac_cs_usage="\
15253-\`$as_me' instantiates files and other configuration actions
15254-from templates according to the current configuration.  Unless the files
15255-and actions are specified as TAGs, all are instantiated by default.
15256-
15257-Usage: $0 [OPTION]... [TAG]...
15258-
15259-  -h, --help       print this help, then exit
15260-  -V, --version    print version number and configuration settings, then exit
15261-      --config     print configuration, then exit
15262-  -q, --quiet, --silent
15263-                   do not print progress messages
15264-  -d, --debug      don't remove temporary files
15265-      --recheck    update $as_me by reconfiguring in the same conditions
15266-      --file=FILE[:TEMPLATE]
15267-                   instantiate the configuration file FILE
15268-      --header=FILE[:TEMPLATE]
15269-                   instantiate the configuration header FILE
15270-
15271-Configuration files:
15272-$config_files
15273-
15274-Configuration headers:
15275-$config_headers
15276-
15277-Configuration commands:
15278-$config_commands
15279-
15280-Report bugs to the package provider."
15281-
15282-_ACEOF
15283-cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
15284-ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`"
15285-ac_cs_version="\\
15286-config.status
15287-configured by $0, generated by GNU Autoconf 2.69,
15288-  with options \\"\$ac_cs_config\\"
15289-
15290-Copyright (C) 2012 Free Software Foundation, Inc.
15291-This config.status script is free software; the Free Software Foundation
15292-gives unlimited permission to copy, distribute and modify it."
15293-
15294-ac_pwd='$ac_pwd'
15295-srcdir='$srcdir'
15296-INSTALL='$INSTALL'
15297-AWK='$AWK'
15298-test -n "\$AWK" || AWK=awk
15299-_ACEOF
15300-
15301-cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
15302-# The default lists apply if the user does not specify any file.
15303-ac_need_defaults=:
15304-while test $# != 0
15305-do
15306-  case $1 in
15307-  --*=?*)
15308-    ac_option=`expr "X$1" : 'X\([^=]*\)='`
15309-    ac_optarg=`expr "X$1" : 'X[^=]*=\(.*\)'`
15310-    ac_shift=:
15311-    ;;
15312-  --*=)
15313-    ac_option=`expr "X$1" : 'X\([^=]*\)='`
15314-    ac_optarg=
15315-    ac_shift=:
15316-    ;;
15317-  *)
15318-    ac_option=$1
15319-    ac_optarg=$2
15320-    ac_shift=shift
15321-    ;;
15322-  esac
15323-
15324-  case $ac_option in
15325-  # Handling of the options.
15326-  -recheck | --recheck | --rechec | --reche | --rech | --rec | --re | --r)
15327-    ac_cs_recheck=: ;;
15328-  --version | --versio | --versi | --vers | --ver | --ve | --v | -V )
15329-    $as_echo "$ac_cs_version"; exit ;;
15330-  --config | --confi | --conf | --con | --co | --c )
15331-    $as_echo "$ac_cs_config"; exit ;;
15332-  --debug | --debu | --deb | --de | --d | -d )
15333-    debug=: ;;
15334-  --file | --fil | --fi | --f )
15335-    $ac_shift
15336-    case $ac_optarg in
15337-    *\'*) ac_optarg=`$as_echo "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"` ;;
15338-    '') as_fn_error $? "missing file argument" ;;
15339-    esac
15340-    as_fn_append CONFIG_FILES " '$ac_optarg'"
15341-    ac_need_defaults=false;;
15342-  --header | --heade | --head | --hea )
15343-    $ac_shift
15344-    case $ac_optarg in
15345-    *\'*) ac_optarg=`$as_echo "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"` ;;
15346-    esac
15347-    as_fn_append CONFIG_HEADERS " '$ac_optarg'"
15348-    ac_need_defaults=false;;
15349-  --he | --h)
15350-    # Conflict between --help and --header
15351-    as_fn_error $? "ambiguous option: \`$1'
15352-Try \`$0 --help' for more information.";;
15353-  --help | --hel | -h )
15354-    $as_echo "$ac_cs_usage"; exit ;;
15355-  -q | -quiet | --quiet | --quie | --qui | --qu | --q \
15356-  | -silent | --silent | --silen | --sile | --sil | --si | --s)
15357-    ac_cs_silent=: ;;
15358-
15359-  # This is an error.
15360-  -*) as_fn_error $? "unrecognized option: \`$1'
15361-Try \`$0 --help' for more information." ;;
15362-
15363-  *) as_fn_append ac_config_targets " $1"
15364-     ac_need_defaults=false ;;
15365-
15366-  esac
15367-  shift
15368-done
15369-
15370-ac_configure_extra_args=
15371-
15372-if $ac_cs_silent; then
15373-  exec 6>/dev/null
15374-  ac_configure_extra_args="$ac_configure_extra_args --silent"
15375-fi
15376-
15377-_ACEOF
15378-cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
15379-if \$ac_cs_recheck; then
15380-  set X $SHELL '$0' $ac_configure_args \$ac_configure_extra_args --no-create --no-recursion
15381-  shift
15382-  \$as_echo "running CONFIG_SHELL=$SHELL \$*" >&6
15383-  CONFIG_SHELL='$SHELL'
15384-  export CONFIG_SHELL
15385-  exec "\$@"
15386-fi
15387-
15388-_ACEOF
15389-cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
15390-exec 5>>config.log
15391-{
15392-  echo
15393-  sed 'h;s/./-/g;s/^.../## /;s/...$/ ##/;p;x;p;x' <<_ASBOX
15394-## Running $as_me. ##
15395-_ASBOX
15396-  $as_echo "$ac_log"
15397-} >&5
15398-
15399-_ACEOF
15400-cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
15401-#
15402-# INIT-COMMANDS
15403-#
15404-
15405-  srcdir="${srcdir}"
15406-  objroot="${objroot}"
15407-  mangling_map="${mangling_map}"
15408-  public_syms="${public_syms}"
15409-  JEMALLOC_PREFIX="${JEMALLOC_PREFIX}"
15410-
15411-
15412-  srcdir="${srcdir}"
15413-  objroot="${objroot}"
15414-  public_syms="${public_syms}"
15415-  wrap_syms="${wrap_syms}"
15416-  SYM_PREFIX="${SYM_PREFIX}"
15417-  JEMALLOC_PREFIX="${JEMALLOC_PREFIX}"
15418-
15419-
15420-  srcdir="${srcdir}"
15421-  objroot="${objroot}"
15422-  public_syms="${public_syms}"
15423-  wrap_syms="${wrap_syms}"
15424-  SYM_PREFIX="${SYM_PREFIX}"
15425-
15426-
15427-  srcdir="${srcdir}"
15428-  objroot="${objroot}"
15429-
15430-
15431-  srcdir="${srcdir}"
15432-  objroot="${objroot}"
15433-
15434-
15435-  srcdir="${srcdir}"
15436-  objroot="${objroot}"
15437-
15438-
15439-  srcdir="${srcdir}"
15440-  objroot="${objroot}"
15441-
15442-
15443-  srcdir="${srcdir}"
15444-  objroot="${objroot}"
15445-
15446-
15447-  srcdir="${srcdir}"
15448-  objroot="${objroot}"
15449-
15450-
15451-  srcdir="${srcdir}"
15452-  objroot="${objroot}"
15453-  install_suffix="${install_suffix}"
15454-
15455-
15456-_ACEOF
15457-
15458-cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
15459-
15460-# Handling of arguments.
15461-for ac_config_target in $ac_config_targets
15462-do
15463-  case $ac_config_target in
15464-    "include/jemalloc/internal/public_symbols.txt") CONFIG_COMMANDS="$CONFIG_COMMANDS include/jemalloc/internal/public_symbols.txt" ;;
15465-    "include/jemalloc/internal/private_symbols.awk") CONFIG_COMMANDS="$CONFIG_COMMANDS include/jemalloc/internal/private_symbols.awk" ;;
15466-    "include/jemalloc/internal/private_symbols_jet.awk") CONFIG_COMMANDS="$CONFIG_COMMANDS include/jemalloc/internal/private_symbols_jet.awk" ;;
15467-    "include/jemalloc/internal/public_namespace.h") CONFIG_COMMANDS="$CONFIG_COMMANDS include/jemalloc/internal/public_namespace.h" ;;
15468-    "include/jemalloc/internal/public_unnamespace.h") CONFIG_COMMANDS="$CONFIG_COMMANDS include/jemalloc/internal/public_unnamespace.h" ;;
15469-    "include/jemalloc/jemalloc_protos_jet.h") CONFIG_COMMANDS="$CONFIG_COMMANDS include/jemalloc/jemalloc_protos_jet.h" ;;
15470-    "include/jemalloc/jemalloc_rename.h") CONFIG_COMMANDS="$CONFIG_COMMANDS include/jemalloc/jemalloc_rename.h" ;;
15471-    "include/jemalloc/jemalloc_mangle.h") CONFIG_COMMANDS="$CONFIG_COMMANDS include/jemalloc/jemalloc_mangle.h" ;;
15472-    "include/jemalloc/jemalloc_mangle_jet.h") CONFIG_COMMANDS="$CONFIG_COMMANDS include/jemalloc/jemalloc_mangle_jet.h" ;;
15473-    "include/jemalloc/jemalloc.h") CONFIG_COMMANDS="$CONFIG_COMMANDS include/jemalloc/jemalloc.h" ;;
15474-    "$cfghdrs_tup") CONFIG_HEADERS="$CONFIG_HEADERS $cfghdrs_tup" ;;
15475-    "$cfgoutputs_tup") CONFIG_FILES="$CONFIG_FILES $cfgoutputs_tup" ;;
15476-    "config.stamp") CONFIG_FILES="$CONFIG_FILES config.stamp" ;;
15477-    "bin/jemalloc-config") CONFIG_FILES="$CONFIG_FILES bin/jemalloc-config" ;;
15478-    "bin/jemalloc.sh") CONFIG_FILES="$CONFIG_FILES bin/jemalloc.sh" ;;
15479-    "bin/jeprof") CONFIG_FILES="$CONFIG_FILES bin/jeprof" ;;
15480-
15481-  *) as_fn_error $? "invalid argument: \`$ac_config_target'" "$LINENO" 5;;
15482-  esac
15483-done
15484-
15485-
15486-# If the user did not use the arguments to specify the items to instantiate,
15487-# then the envvar interface is used.  Set only those that are not.
15488-# We use the long form for the default assignment because of an extremely
15489-# bizarre bug on SunOS 4.1.3.
15490-if $ac_need_defaults; then
15491-  test "${CONFIG_FILES+set}" = set || CONFIG_FILES=$config_files
15492-  test "${CONFIG_HEADERS+set}" = set || CONFIG_HEADERS=$config_headers
15493-  test "${CONFIG_COMMANDS+set}" = set || CONFIG_COMMANDS=$config_commands
15494-fi
15495-
15496-# Have a temporary directory for convenience.  Make it in the build tree
15497-# simply because there is no reason against having it here, and in addition,
15498-# creating and moving files from /tmp can sometimes cause problems.
15499-# Hook for its removal unless debugging.
15500-# Note that there is a small window in which the directory will not be cleaned:
15501-# after its creation but before its name has been assigned to `$tmp'.
15502-$debug ||
15503-{
15504-  tmp= ac_tmp=
15505-  trap 'exit_status=$?
15506-  : "${ac_tmp:=$tmp}"
15507-  { test ! -d "$ac_tmp" || rm -fr "$ac_tmp"; } && exit $exit_status
15508-' 0
15509-  trap 'as_fn_exit 1' 1 2 13 15
15510-}
15511-# Create a (secure) tmp directory for tmp files.
15512-
15513-{
15514-  tmp=`(umask 077 && mktemp -d "./confXXXXXX") 2>/dev/null` &&
15515-  test -d "$tmp"
15516-}  ||
15517-{
15518-  tmp=./conf$$-$RANDOM
15519-  (umask 077 && mkdir "$tmp")
15520-} || as_fn_error $? "cannot create a temporary directory in ." "$LINENO" 5
15521-ac_tmp=$tmp
15522-
15523-# Set up the scripts for CONFIG_FILES section.
15524-# No need to generate them if there are no CONFIG_FILES.
15525-# This happens for instance with `./config.status config.h'.
15526-if test -n "$CONFIG_FILES"; then
15527-
15528-
15529-ac_cr=`echo X | tr X '\015'`
15530-# On cygwin, bash can eat \r inside `` if the user requested igncr.
15531-# But we know of no other shell where ac_cr would be empty at this
15532-# point, so we can use a bashism as a fallback.
15533-if test "x$ac_cr" = x; then
15534-  eval ac_cr=\$\'\\r\'
15535-fi
15536-ac_cs_awk_cr=`$AWK 'BEGIN { print "a\rb" }' </dev/null 2>/dev/null`
15537-if test "$ac_cs_awk_cr" = "a${ac_cr}b"; then
15538-  ac_cs_awk_cr='\\r'
15539-else
15540-  ac_cs_awk_cr=$ac_cr
15541-fi
15542-
15543-echo 'BEGIN {' >"$ac_tmp/subs1.awk" &&
15544-_ACEOF
15545-
15546-
15547-{
15548-  echo "cat >conf$$subs.awk <<_ACEOF" &&
15549-  echo "$ac_subst_vars" | sed 's/.*/&!$&$ac_delim/' &&
15550-  echo "_ACEOF"
15551-} >conf$$subs.sh ||
15552-  as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5
15553-ac_delim_num=`echo "$ac_subst_vars" | grep -c '^'`
15554-ac_delim='%!_!# '
15555-for ac_last_try in false false false false false :; do
15556-  . ./conf$$subs.sh ||
15557-    as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5
15558-
15559-  ac_delim_n=`sed -n "s/.*$ac_delim\$/X/p" conf$$subs.awk | grep -c X`
15560-  if test $ac_delim_n = $ac_delim_num; then
15561-    break
15562-  elif $ac_last_try; then
15563-    as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5
15564-  else
15565-    ac_delim="$ac_delim!$ac_delim _$ac_delim!! "
15566-  fi
15567-done
15568-rm -f conf$$subs.sh
15569-
15570-cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
15571-cat >>"\$ac_tmp/subs1.awk" <<\\_ACAWK &&
15572-_ACEOF
15573-sed -n '
15574-h
15575-s/^/S["/; s/!.*/"]=/
15576-p
15577-g
15578-s/^[^!]*!//
15579-:repl
15580-t repl
15581-s/'"$ac_delim"'$//
15582-t delim
15583-:nl
15584-h
15585-s/\(.\{148\}\)..*/\1/
15586-t more1
15587-s/["\\]/\\&/g; s/^/"/; s/$/\\n"\\/
15588-p
15589-n
15590-b repl
15591-:more1
15592-s/["\\]/\\&/g; s/^/"/; s/$/"\\/
15593-p
15594-g
15595-s/.\{148\}//
15596-t nl
15597-:delim
15598-h
15599-s/\(.\{148\}\)..*/\1/
15600-t more2
15601-s/["\\]/\\&/g; s/^/"/; s/$/"/
15602-p
15603-b
15604-:more2
15605-s/["\\]/\\&/g; s/^/"/; s/$/"\\/
15606-p
15607-g
15608-s/.\{148\}//
15609-t delim
15610-' <conf$$subs.awk | sed '
15611-/^[^""]/{
15612-  N
15613-  s/\n//
15614-}
15615-' >>$CONFIG_STATUS || ac_write_fail=1
15616-rm -f conf$$subs.awk
15617-cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
15618-_ACAWK
15619-cat >>"\$ac_tmp/subs1.awk" <<_ACAWK &&
15620-  for (key in S) S_is_set[key] = 1
15621-  FS = ""
15622-
15623-}
15624-{
15625-  line = $ 0
15626-  nfields = split(line, field, "@")
15627-  substed = 0
15628-  len = length(field[1])
15629-  for (i = 2; i < nfields; i++) {
15630-    key = field[i]
15631-    keylen = length(key)
15632-    if (S_is_set[key]) {
15633-      value = S[key]
15634-      line = substr(line, 1, len) "" value "" substr(line, len + keylen + 3)
15635-      len += length(value) + length(field[++i])
15636-      substed = 1
15637-    } else
15638-      len += 1 + keylen
15639-  }
15640-
15641-  print line
15642-}
15643-
15644-_ACAWK
15645-_ACEOF
15646-cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
15647-if sed "s/$ac_cr//" < /dev/null > /dev/null 2>&1; then
15648-  sed "s/$ac_cr\$//; s/$ac_cr/$ac_cs_awk_cr/g"
15649-else
15650-  cat
15651-fi < "$ac_tmp/subs1.awk" > "$ac_tmp/subs.awk" \
15652-  || as_fn_error $? "could not setup config files machinery" "$LINENO" 5
15653-_ACEOF
15654-
15655-# VPATH may cause trouble with some makes, so we remove sole $(srcdir),
15656-# ${srcdir} and @srcdir@ entries from VPATH if srcdir is ".", strip leading and
15657-# trailing colons and then remove the whole line if VPATH becomes empty
15658-# (actually we leave an empty line to preserve line numbers).
15659-if test "x$srcdir" = x.; then
15660-  ac_vpsub='/^[	 ]*VPATH[	 ]*=[	 ]*/{
15661-h
15662-s///
15663-s/^/:/
15664-s/[	 ]*$/:/
15665-s/:\$(srcdir):/:/g
15666-s/:\${srcdir}:/:/g
15667-s/:@srcdir@:/:/g
15668-s/^:*//
15669-s/:*$//
15670-x
15671-s/\(=[	 ]*\).*/\1/
15672-G
15673-s/\n//
15674-s/^[^=]*=[	 ]*$//
15675-}'
15676-fi
15677-
15678-cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
15679-fi # test -n "$CONFIG_FILES"
15680-
15681-# Set up the scripts for CONFIG_HEADERS section.
15682-# No need to generate them if there are no CONFIG_HEADERS.
15683-# This happens for instance with `./config.status Makefile'.
15684-if test -n "$CONFIG_HEADERS"; then
15685-cat >"$ac_tmp/defines.awk" <<\_ACAWK ||
15686-BEGIN {
15687-_ACEOF
15688-
15689-# Transform confdefs.h into an awk script `defines.awk', embedded as
15690-# here-document in config.status, that substitutes the proper values into
15691-# config.h.in to produce config.h.
15692-
15693-# Create a delimiter string that does not exist in confdefs.h, to ease
15694-# handling of long lines.
15695-ac_delim='%!_!# '
15696-for ac_last_try in false false :; do
15697-  ac_tt=`sed -n "/$ac_delim/p" confdefs.h`
15698-  if test -z "$ac_tt"; then
15699-    break
15700-  elif $ac_last_try; then
15701-    as_fn_error $? "could not make $CONFIG_HEADERS" "$LINENO" 5
15702-  else
15703-    ac_delim="$ac_delim!$ac_delim _$ac_delim!! "
15704-  fi
15705-done
15706-
15707-# For the awk script, D is an array of macro values keyed by name,
15708-# likewise P contains macro parameters if any.  Preserve backslash
15709-# newline sequences.
15710-
15711-ac_word_re=[_$as_cr_Letters][_$as_cr_alnum]*
15712-sed -n '
15713-s/.\{148\}/&'"$ac_delim"'/g
15714-t rset
15715-:rset
15716-s/^[	 ]*#[	 ]*define[	 ][	 ]*/ /
15717-t def
15718-d
15719-:def
15720-s/\\$//
15721-t bsnl
15722-s/["\\]/\\&/g
15723-s/^ \('"$ac_word_re"'\)\(([^()]*)\)[	 ]*\(.*\)/P["\1"]="\2"\
15724-D["\1"]=" \3"/p
15725-s/^ \('"$ac_word_re"'\)[	 ]*\(.*\)/D["\1"]=" \2"/p
15726-d
15727-:bsnl
15728-s/["\\]/\\&/g
15729-s/^ \('"$ac_word_re"'\)\(([^()]*)\)[	 ]*\(.*\)/P["\1"]="\2"\
15730-D["\1"]=" \3\\\\\\n"\\/p
15731-t cont
15732-s/^ \('"$ac_word_re"'\)[	 ]*\(.*\)/D["\1"]=" \2\\\\\\n"\\/p
15733-t cont
15734-d
15735-:cont
15736-n
15737-s/.\{148\}/&'"$ac_delim"'/g
15738-t clear
15739-:clear
15740-s/\\$//
15741-t bsnlc
15742-s/["\\]/\\&/g; s/^/"/; s/$/"/p
15743-d
15744-:bsnlc
15745-s/["\\]/\\&/g; s/^/"/; s/$/\\\\\\n"\\/p
15746-b cont
15747-' <confdefs.h | sed '
15748-s/'"$ac_delim"'/"\\\
15749-"/g' >>$CONFIG_STATUS || ac_write_fail=1
15750-
15751-cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
15752-  for (key in D) D_is_set[key] = 1
15753-  FS = ""
15754-}
15755-/^[\t ]*#[\t ]*(define|undef)[\t ]+$ac_word_re([\t (]|\$)/ {
15756-  line = \$ 0
15757-  split(line, arg, " ")
15758-  if (arg[1] == "#") {
15759-    defundef = arg[2]
15760-    mac1 = arg[3]
15761-  } else {
15762-    defundef = substr(arg[1], 2)
15763-    mac1 = arg[2]
15764-  }
15765-  split(mac1, mac2, "(") #)
15766-  macro = mac2[1]
15767-  prefix = substr(line, 1, index(line, defundef) - 1)
15768-  if (D_is_set[macro]) {
15769-    # Preserve the white space surrounding the "#".
15770-    print prefix "define", macro P[macro] D[macro]
15771-    next
15772-  } else {
15773-    # Replace #undef with comments.  This is necessary, for example,
15774-    # in the case of _POSIX_SOURCE, which is predefined and required
15775-    # on some systems where configure will not decide to define it.
15776-    if (defundef == "undef") {
15777-      print "/*", prefix defundef, macro, "*/"
15778-      next
15779-    }
15780-  }
15781-}
15782-{ print }
15783-_ACAWK
15784-_ACEOF
15785-cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
15786-  as_fn_error $? "could not setup config headers machinery" "$LINENO" 5
15787-fi # test -n "$CONFIG_HEADERS"
15788-
15789-
15790-eval set X "  :F $CONFIG_FILES  :H $CONFIG_HEADERS    :C $CONFIG_COMMANDS"
15791-shift
15792-for ac_tag
15793-do
15794-  case $ac_tag in
15795-  :[FHLC]) ac_mode=$ac_tag; continue;;
15796-  esac
15797-  case $ac_mode$ac_tag in
15798-  :[FHL]*:*);;
15799-  :L* | :C*:*) as_fn_error $? "invalid tag \`$ac_tag'" "$LINENO" 5;;
15800-  :[FH]-) ac_tag=-:-;;
15801-  :[FH]*) ac_tag=$ac_tag:$ac_tag.in;;
15802-  esac
15803-  ac_save_IFS=$IFS
15804-  IFS=:
15805-  set x $ac_tag
15806-  IFS=$ac_save_IFS
15807-  shift
15808-  ac_file=$1
15809-  shift
15810-
15811-  case $ac_mode in
15812-  :L) ac_source=$1;;
15813-  :[FH])
15814-    ac_file_inputs=
15815-    for ac_f
15816-    do
15817-      case $ac_f in
15818-      -) ac_f="$ac_tmp/stdin";;
15819-      *) # Look for the file first in the build tree, then in the source tree
15820-	 # (if the path is not absolute).  The absolute path cannot be DOS-style,
15821-	 # because $ac_f cannot contain `:'.
15822-	 test -f "$ac_f" ||
15823-	   case $ac_f in
15824-	   [\\/$]*) false;;
15825-	   *) test -f "$srcdir/$ac_f" && ac_f="$srcdir/$ac_f";;
15826-	   esac ||
15827-	   as_fn_error 1 "cannot find input file: \`$ac_f'" "$LINENO" 5;;
15828-      esac
15829-      case $ac_f in *\'*) ac_f=`$as_echo "$ac_f" | sed "s/'/'\\\\\\\\''/g"`;; esac
15830-      as_fn_append ac_file_inputs " '$ac_f'"
15831-    done
15832-
15833-    # Let's still pretend it is `configure' which instantiates (i.e., don't
15834-    # use $as_me), people would be surprised to read:
15835-    #    /* config.h.  Generated by config.status.  */
15836-    configure_input='Generated from '`
15837-	  $as_echo "$*" | sed 's|^[^:]*/||;s|:[^:]*/|, |g'
15838-	`' by configure.'
15839-    if test x"$ac_file" != x-; then
15840-      configure_input="$ac_file.  $configure_input"
15841-      { $as_echo "$as_me:${as_lineno-$LINENO}: creating $ac_file" >&5
15842-$as_echo "$as_me: creating $ac_file" >&6;}
15843-    fi
15844-    # Neutralize special characters interpreted by sed in replacement strings.
15845-    case $configure_input in #(
15846-    *\&* | *\|* | *\\* )
15847-       ac_sed_conf_input=`$as_echo "$configure_input" |
15848-       sed 's/[\\\\&|]/\\\\&/g'`;; #(
15849-    *) ac_sed_conf_input=$configure_input;;
15850-    esac
15851-
15852-    case $ac_tag in
15853-    *:-:* | *:-) cat >"$ac_tmp/stdin" \
15854-      || as_fn_error $? "could not create $ac_file" "$LINENO" 5 ;;
15855-    esac
15856-    ;;
15857-  esac
15858-
15859-  ac_dir=`$as_dirname -- "$ac_file" ||
15860-$as_expr X"$ac_file" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \
15861-	 X"$ac_file" : 'X\(//\)[^/]' \| \
15862-	 X"$ac_file" : 'X\(//\)$' \| \
15863-	 X"$ac_file" : 'X\(/\)' \| . 2>/dev/null ||
15864-$as_echo X"$ac_file" |
15865-    sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{
15866-	    s//\1/
15867-	    q
15868-	  }
15869-	  /^X\(\/\/\)[^/].*/{
15870-	    s//\1/
15871-	    q
15872-	  }
15873-	  /^X\(\/\/\)$/{
15874-	    s//\1/
15875-	    q
15876-	  }
15877-	  /^X\(\/\).*/{
15878-	    s//\1/
15879-	    q
15880-	  }
15881-	  s/.*/./; q'`
15882-  as_dir="$ac_dir"; as_fn_mkdir_p
15883-  ac_builddir=.
15884-
15885-case "$ac_dir" in
15886-.) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;;
15887-*)
15888-  ac_dir_suffix=/`$as_echo "$ac_dir" | sed 's|^\.[\\/]||'`
15889-  # A ".." for each directory in $ac_dir_suffix.
15890-  ac_top_builddir_sub=`$as_echo "$ac_dir_suffix" | sed 's|/[^\\/]*|/..|g;s|/||'`
15891-  case $ac_top_builddir_sub in
15892-  "") ac_top_builddir_sub=. ac_top_build_prefix= ;;
15893-  *)  ac_top_build_prefix=$ac_top_builddir_sub/ ;;
15894-  esac ;;
15895-esac
15896-ac_abs_top_builddir=$ac_pwd
15897-ac_abs_builddir=$ac_pwd$ac_dir_suffix
15898-# for backward compatibility:
15899-ac_top_builddir=$ac_top_build_prefix
15900-
15901-case $srcdir in
15902-  .)  # We are building in place.
15903-    ac_srcdir=.
15904-    ac_top_srcdir=$ac_top_builddir_sub
15905-    ac_abs_top_srcdir=$ac_pwd ;;
15906-  [\\/]* | ?:[\\/]* )  # Absolute name.
15907-    ac_srcdir=$srcdir$ac_dir_suffix;
15908-    ac_top_srcdir=$srcdir
15909-    ac_abs_top_srcdir=$srcdir ;;
15910-  *) # Relative name.
15911-    ac_srcdir=$ac_top_build_prefix$srcdir$ac_dir_suffix
15912-    ac_top_srcdir=$ac_top_build_prefix$srcdir
15913-    ac_abs_top_srcdir=$ac_pwd/$srcdir ;;
15914-esac
15915-ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix
15916-
15917-
15918-  case $ac_mode in
15919-  :F)
15920-  #
15921-  # CONFIG_FILE
15922-  #
15923-
15924-  case $INSTALL in
15925-  [\\/$]* | ?:[\\/]* ) ac_INSTALL=$INSTALL ;;
15926-  *) ac_INSTALL=$ac_top_build_prefix$INSTALL ;;
15927-  esac
15928-_ACEOF
15929-
15930-cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
15931-# If the template does not know about datarootdir, expand it.
15932-# FIXME: This hack should be removed a few years after 2.60.
15933-ac_datarootdir_hack=; ac_datarootdir_seen=
15934-ac_sed_dataroot='
15935-/datarootdir/ {
15936-  p
15937-  q
15938-}
15939-/@datadir@/p
15940-/@docdir@/p
15941-/@infodir@/p
15942-/@localedir@/p
15943-/@mandir@/p'
15944-case `eval "sed -n \"\$ac_sed_dataroot\" $ac_file_inputs"` in
15945-*datarootdir*) ac_datarootdir_seen=yes;;
15946-*@datadir@*|*@docdir@*|*@infodir@*|*@localedir@*|*@mandir@*)
15947-  { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&5
15948-$as_echo "$as_me: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&2;}
15949-_ACEOF
15950-cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
15951-  ac_datarootdir_hack='
15952-  s&@datadir@&$datadir&g
15953-  s&@docdir@&$docdir&g
15954-  s&@infodir@&$infodir&g
15955-  s&@localedir@&$localedir&g
15956-  s&@mandir@&$mandir&g
15957-  s&\\\${datarootdir}&$datarootdir&g' ;;
15958-esac
15959-_ACEOF
15960-
15961-# Neutralize VPATH when `$srcdir' = `.'.
15962-# Shell code in configure.ac might set extrasub.
15963-# FIXME: do we really want to maintain this feature?
15964-cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
15965-ac_sed_extra="$ac_vpsub
15966-$extrasub
15967-_ACEOF
15968-cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
15969-:t
15970-/@[a-zA-Z_][a-zA-Z_0-9]*@/!b
15971-s|@configure_input@|$ac_sed_conf_input|;t t
15972-s&@top_builddir@&$ac_top_builddir_sub&;t t
15973-s&@top_build_prefix@&$ac_top_build_prefix&;t t
15974-s&@srcdir@&$ac_srcdir&;t t
15975-s&@abs_srcdir@&$ac_abs_srcdir&;t t
15976-s&@top_srcdir@&$ac_top_srcdir&;t t
15977-s&@abs_top_srcdir@&$ac_abs_top_srcdir&;t t
15978-s&@builddir@&$ac_builddir&;t t
15979-s&@abs_builddir@&$ac_abs_builddir&;t t
15980-s&@abs_top_builddir@&$ac_abs_top_builddir&;t t
15981-s&@INSTALL@&$ac_INSTALL&;t t
15982-$ac_datarootdir_hack
15983-"
15984-eval sed \"\$ac_sed_extra\" "$ac_file_inputs" | $AWK -f "$ac_tmp/subs.awk" \
15985-  >$ac_tmp/out || as_fn_error $? "could not create $ac_file" "$LINENO" 5
15986-
15987-test -z "$ac_datarootdir_hack$ac_datarootdir_seen" &&
15988-  { ac_out=`sed -n '/\${datarootdir}/p' "$ac_tmp/out"`; test -n "$ac_out"; } &&
15989-  { ac_out=`sed -n '/^[	 ]*datarootdir[	 ]*:*=/p' \
15990-      "$ac_tmp/out"`; test -z "$ac_out"; } &&
15991-  { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $ac_file contains a reference to the variable \`datarootdir'
15992-which seems to be undefined.  Please make sure it is defined" >&5
15993-$as_echo "$as_me: WARNING: $ac_file contains a reference to the variable \`datarootdir'
15994-which seems to be undefined.  Please make sure it is defined" >&2;}
15995-
15996-  rm -f "$ac_tmp/stdin"
15997-  case $ac_file in
15998-  -) cat "$ac_tmp/out" && rm -f "$ac_tmp/out";;
15999-  *) rm -f "$ac_file" && mv "$ac_tmp/out" "$ac_file";;
16000-  esac \
16001-  || as_fn_error $? "could not create $ac_file" "$LINENO" 5
16002- ;;
16003-  :H)
16004-  #
16005-  # CONFIG_HEADER
16006-  #
16007-  if test x"$ac_file" != x-; then
16008-    {
16009-      $as_echo "/* $configure_input  */" \
16010-      && eval '$AWK -f "$ac_tmp/defines.awk"' "$ac_file_inputs"
16011-    } >"$ac_tmp/config.h" \
16012-      || as_fn_error $? "could not create $ac_file" "$LINENO" 5
16013-    if diff "$ac_file" "$ac_tmp/config.h" >/dev/null 2>&1; then
16014-      { $as_echo "$as_me:${as_lineno-$LINENO}: $ac_file is unchanged" >&5
16015-$as_echo "$as_me: $ac_file is unchanged" >&6;}
16016-    else
16017-      rm -f "$ac_file"
16018-      mv "$ac_tmp/config.h" "$ac_file" \
16019-	|| as_fn_error $? "could not create $ac_file" "$LINENO" 5
16020-    fi
16021-  else
16022-    $as_echo "/* $configure_input  */" \
16023-      && eval '$AWK -f "$ac_tmp/defines.awk"' "$ac_file_inputs" \
16024-      || as_fn_error $? "could not create -" "$LINENO" 5
16025-  fi
16026- ;;
16027-
16028-  :C)  { $as_echo "$as_me:${as_lineno-$LINENO}: executing $ac_file commands" >&5
16029-$as_echo "$as_me: executing $ac_file commands" >&6;}
16030- ;;
16031-  esac
16032-
16033-
16034-  case $ac_file$ac_mode in
16035-    "include/jemalloc/internal/public_symbols.txt":C)
16036-  f="${objroot}include/jemalloc/internal/public_symbols.txt"
16037-  mkdir -p "${objroot}include/jemalloc/internal"
16038-  cp /dev/null "${f}"
16039-  for nm in `echo ${mangling_map} |tr ',' ' '` ; do
16040-    n=`echo ${nm} |tr ':' ' ' |awk '{print $1}'`
16041-    m=`echo ${nm} |tr ':' ' ' |awk '{print $2}'`
16042-    echo "${n}:${m}" >> "${f}"
16043-        public_syms=`for sym in ${public_syms}; do echo "${sym}"; done |grep -v "^${n}\$" |tr '\n' ' '`
16044-  done
16045-  for sym in ${public_syms} ; do
16046-    n="${sym}"
16047-    m="${JEMALLOC_PREFIX}${sym}"
16048-    echo "${n}:${m}" >> "${f}"
16049-  done
16050- ;;
16051-    "include/jemalloc/internal/private_symbols.awk":C)
16052-  f="${objroot}include/jemalloc/internal/private_symbols.awk"
16053-  mkdir -p "${objroot}include/jemalloc/internal"
16054-  export_syms=`for sym in ${public_syms}; do echo "${JEMALLOC_PREFIX}${sym}"; done; for sym in ${wrap_syms}; do echo "${sym}"; done;`
16055-  "${srcdir}/include/jemalloc/internal/private_symbols.sh" "${SYM_PREFIX}" ${export_syms} > "${objroot}include/jemalloc/internal/private_symbols.awk"
16056- ;;
16057-    "include/jemalloc/internal/private_symbols_jet.awk":C)
16058-  f="${objroot}include/jemalloc/internal/private_symbols_jet.awk"
16059-  mkdir -p "${objroot}include/jemalloc/internal"
16060-  export_syms=`for sym in ${public_syms}; do echo "jet_${sym}"; done; for sym in ${wrap_syms}; do echo "${sym}"; done;`
16061-  "${srcdir}/include/jemalloc/internal/private_symbols.sh" "${SYM_PREFIX}" ${export_syms} > "${objroot}include/jemalloc/internal/private_symbols_jet.awk"
16062- ;;
16063-    "include/jemalloc/internal/public_namespace.h":C)
16064-  mkdir -p "${objroot}include/jemalloc/internal"
16065-  "${srcdir}/include/jemalloc/internal/public_namespace.sh" "${objroot}include/jemalloc/internal/public_symbols.txt" > "${objroot}include/jemalloc/internal/public_namespace.h"
16066- ;;
16067-    "include/jemalloc/internal/public_unnamespace.h":C)
16068-  mkdir -p "${objroot}include/jemalloc/internal"
16069-  "${srcdir}/include/jemalloc/internal/public_unnamespace.sh" "${objroot}include/jemalloc/internal/public_symbols.txt" > "${objroot}include/jemalloc/internal/public_unnamespace.h"
16070- ;;
16071-    "include/jemalloc/jemalloc_protos_jet.h":C)
16072-  mkdir -p "${objroot}include/jemalloc"
16073-  cat "${srcdir}/include/jemalloc/jemalloc_protos.h.in" | sed -e 's/@je_@/jet_/g' > "${objroot}include/jemalloc/jemalloc_protos_jet.h"
16074- ;;
16075-    "include/jemalloc/jemalloc_rename.h":C)
16076-  mkdir -p "${objroot}include/jemalloc"
16077-  "${srcdir}/include/jemalloc/jemalloc_rename.sh" "${objroot}include/jemalloc/internal/public_symbols.txt" > "${objroot}include/jemalloc/jemalloc_rename.h"
16078- ;;
16079-    "include/jemalloc/jemalloc_mangle.h":C)
16080-  mkdir -p "${objroot}include/jemalloc"
16081-  "${srcdir}/include/jemalloc/jemalloc_mangle.sh" "${objroot}include/jemalloc/internal/public_symbols.txt" je_ > "${objroot}include/jemalloc/jemalloc_mangle.h"
16082- ;;
16083-    "include/jemalloc/jemalloc_mangle_jet.h":C)
16084-  mkdir -p "${objroot}include/jemalloc"
16085-  "${srcdir}/include/jemalloc/jemalloc_mangle.sh" "${objroot}include/jemalloc/internal/public_symbols.txt" jet_ > "${objroot}include/jemalloc/jemalloc_mangle_jet.h"
16086- ;;
16087-    "include/jemalloc/jemalloc.h":C)
16088-  mkdir -p "${objroot}include/jemalloc"
16089-  "${srcdir}/include/jemalloc/jemalloc.sh" "${objroot}" > "${objroot}include/jemalloc/jemalloc${install_suffix}.h"
16090- ;;
16091-
16092-  esac
16093-done # for ac_tag
16094-
16095-
16096-as_fn_exit 0
16097-_ACEOF
16098-ac_clean_files=$ac_clean_files_save
16099-
16100-test $ac_write_fail = 0 ||
16101-  as_fn_error $? "write failure creating $CONFIG_STATUS" "$LINENO" 5
16102-
16103-
16104-# configure is writing to config.log, and then calls config.status.
16105-# config.status does its own redirection, appending to config.log.
16106-# Unfortunately, on DOS this fails, as config.log is still kept open
16107-# by configure, so config.status won't be able to write to it; its
16108-# output is simply discarded.  So we exec the FD to /dev/null,
16109-# effectively closing config.log, so it can be properly (re)opened and
16110-# appended to by config.status.  When coming back to configure, we
16111-# need to make the FD available again.
16112-if test "$no_create" != yes; then
16113-  ac_cs_success=:
16114-  ac_config_status_args=
16115-  test "$silent" = yes &&
16116-    ac_config_status_args="$ac_config_status_args --quiet"
16117-  exec 5>/dev/null
16118-  $SHELL $CONFIG_STATUS $ac_config_status_args || ac_cs_success=false
16119-  exec 5>>config.log
16120-  # Use ||, not &&, to avoid exiting from the if with $? = 1, which
16121-  # would make configure fail if this is the last instruction.
16122-  $ac_cs_success || as_fn_exit 1
16123-fi
16124-if test -n "$ac_unrecognized_opts" && test "$enable_option_checking" != no; then
16125-  { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: unrecognized options: $ac_unrecognized_opts" >&5
16126-$as_echo "$as_me: WARNING: unrecognized options: $ac_unrecognized_opts" >&2;}
16127-fi
16128-
16129-
16130-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: ===============================================================================" >&5
16131-$as_echo "===============================================================================" >&6; }
16132-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: jemalloc version   : ${jemalloc_version}" >&5
16133-$as_echo "jemalloc version   : ${jemalloc_version}" >&6; }
16134-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: library revision   : ${rev}" >&5
16135-$as_echo "library revision   : ${rev}" >&6; }
16136-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: " >&5
16137-$as_echo "" >&6; }
16138-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: CONFIG             : ${CONFIG}" >&5
16139-$as_echo "CONFIG             : ${CONFIG}" >&6; }
16140-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: CC                 : ${CC}" >&5
16141-$as_echo "CC                 : ${CC}" >&6; }
16142-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: CONFIGURE_CFLAGS   : ${CONFIGURE_CFLAGS}" >&5
16143-$as_echo "CONFIGURE_CFLAGS   : ${CONFIGURE_CFLAGS}" >&6; }
16144-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: SPECIFIED_CFLAGS   : ${SPECIFIED_CFLAGS}" >&5
16145-$as_echo "SPECIFIED_CFLAGS   : ${SPECIFIED_CFLAGS}" >&6; }
16146-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: EXTRA_CFLAGS       : ${EXTRA_CFLAGS}" >&5
16147-$as_echo "EXTRA_CFLAGS       : ${EXTRA_CFLAGS}" >&6; }
16148-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: CPPFLAGS           : ${CPPFLAGS}" >&5
16149-$as_echo "CPPFLAGS           : ${CPPFLAGS}" >&6; }
16150-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: CXX                : ${CXX}" >&5
16151-$as_echo "CXX                : ${CXX}" >&6; }
16152-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: CONFIGURE_CXXFLAGS : ${CONFIGURE_CXXFLAGS}" >&5
16153-$as_echo "CONFIGURE_CXXFLAGS : ${CONFIGURE_CXXFLAGS}" >&6; }
16154-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: SPECIFIED_CXXFLAGS : ${SPECIFIED_CXXFLAGS}" >&5
16155-$as_echo "SPECIFIED_CXXFLAGS : ${SPECIFIED_CXXFLAGS}" >&6; }
16156-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: EXTRA_CXXFLAGS     : ${EXTRA_CXXFLAGS}" >&5
16157-$as_echo "EXTRA_CXXFLAGS     : ${EXTRA_CXXFLAGS}" >&6; }
16158-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: LDFLAGS            : ${LDFLAGS}" >&5
16159-$as_echo "LDFLAGS            : ${LDFLAGS}" >&6; }
16160-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: EXTRA_LDFLAGS      : ${EXTRA_LDFLAGS}" >&5
16161-$as_echo "EXTRA_LDFLAGS      : ${EXTRA_LDFLAGS}" >&6; }
16162-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: DSO_LDFLAGS        : ${DSO_LDFLAGS}" >&5
16163-$as_echo "DSO_LDFLAGS        : ${DSO_LDFLAGS}" >&6; }
16164-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: LIBS               : ${LIBS}" >&5
16165-$as_echo "LIBS               : ${LIBS}" >&6; }
16166-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: RPATH_EXTRA        : ${RPATH_EXTRA}" >&5
16167-$as_echo "RPATH_EXTRA        : ${RPATH_EXTRA}" >&6; }
16168-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: " >&5
16169-$as_echo "" >&6; }
16170-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: XSLTPROC           : ${XSLTPROC}" >&5
16171-$as_echo "XSLTPROC           : ${XSLTPROC}" >&6; }
16172-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: XSLROOT            : ${XSLROOT}" >&5
16173-$as_echo "XSLROOT            : ${XSLROOT}" >&6; }
16174-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: " >&5
16175-$as_echo "" >&6; }
16176-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: PREFIX             : ${PREFIX}" >&5
16177-$as_echo "PREFIX             : ${PREFIX}" >&6; }
16178-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: BINDIR             : ${BINDIR}" >&5
16179-$as_echo "BINDIR             : ${BINDIR}" >&6; }
16180-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: DATADIR            : ${DATADIR}" >&5
16181-$as_echo "DATADIR            : ${DATADIR}" >&6; }
16182-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: INCLUDEDIR         : ${INCLUDEDIR}" >&5
16183-$as_echo "INCLUDEDIR         : ${INCLUDEDIR}" >&6; }
16184-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: LIBDIR             : ${LIBDIR}" >&5
16185-$as_echo "LIBDIR             : ${LIBDIR}" >&6; }
16186-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: MANDIR             : ${MANDIR}" >&5
16187-$as_echo "MANDIR             : ${MANDIR}" >&6; }
16188-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: " >&5
16189-$as_echo "" >&6; }
16190-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: srcroot            : ${srcroot}" >&5
16191-$as_echo "srcroot            : ${srcroot}" >&6; }
16192-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: abs_srcroot        : ${abs_srcroot}" >&5
16193-$as_echo "abs_srcroot        : ${abs_srcroot}" >&6; }
16194-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: objroot            : ${objroot}" >&5
16195-$as_echo "objroot            : ${objroot}" >&6; }
16196-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: abs_objroot        : ${abs_objroot}" >&5
16197-$as_echo "abs_objroot        : ${abs_objroot}" >&6; }
16198-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: " >&5
16199-$as_echo "" >&6; }
16200-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: JEMALLOC_PREFIX    : ${JEMALLOC_PREFIX}" >&5
16201-$as_echo "JEMALLOC_PREFIX    : ${JEMALLOC_PREFIX}" >&6; }
16202-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: JEMALLOC_PRIVATE_NAMESPACE" >&5
16203-$as_echo "JEMALLOC_PRIVATE_NAMESPACE" >&6; }
16204-{ $as_echo "$as_me:${as_lineno-$LINENO}: result:                    : ${JEMALLOC_PRIVATE_NAMESPACE}" >&5
16205-$as_echo "                   : ${JEMALLOC_PRIVATE_NAMESPACE}" >&6; }
16206-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: install_suffix     : ${install_suffix}" >&5
16207-$as_echo "install_suffix     : ${install_suffix}" >&6; }
16208-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: malloc_conf        : ${config_malloc_conf}" >&5
16209-$as_echo "malloc_conf        : ${config_malloc_conf}" >&6; }
16210-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: documentation      : ${enable_doc}" >&5
16211-$as_echo "documentation      : ${enable_doc}" >&6; }
16212-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: shared libs        : ${enable_shared}" >&5
16213-$as_echo "shared libs        : ${enable_shared}" >&6; }
16214-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: static libs        : ${enable_static}" >&5
16215-$as_echo "static libs        : ${enable_static}" >&6; }
16216-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: autogen            : ${enable_autogen}" >&5
16217-$as_echo "autogen            : ${enable_autogen}" >&6; }
16218-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: debug              : ${enable_debug}" >&5
16219-$as_echo "debug              : ${enable_debug}" >&6; }
16220-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: stats              : ${enable_stats}" >&5
16221-$as_echo "stats              : ${enable_stats}" >&6; }
16222-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: experimental_smallocx : ${enable_experimental_smallocx}" >&5
16223-$as_echo "experimental_smallocx : ${enable_experimental_smallocx}" >&6; }
16224-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: prof               : ${enable_prof}" >&5
16225-$as_echo "prof               : ${enable_prof}" >&6; }
16226-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: prof-libunwind     : ${enable_prof_libunwind}" >&5
16227-$as_echo "prof-libunwind     : ${enable_prof_libunwind}" >&6; }
16228-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: prof-libgcc        : ${enable_prof_libgcc}" >&5
16229-$as_echo "prof-libgcc        : ${enable_prof_libgcc}" >&6; }
16230-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: prof-gcc           : ${enable_prof_gcc}" >&5
16231-$as_echo "prof-gcc           : ${enable_prof_gcc}" >&6; }
16232-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: fill               : ${enable_fill}" >&5
16233-$as_echo "fill               : ${enable_fill}" >&6; }
16234-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: utrace             : ${enable_utrace}" >&5
16235-$as_echo "utrace             : ${enable_utrace}" >&6; }
16236-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: xmalloc            : ${enable_xmalloc}" >&5
16237-$as_echo "xmalloc            : ${enable_xmalloc}" >&6; }
16238-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: log                : ${enable_log}" >&5
16239-$as_echo "log                : ${enable_log}" >&6; }
16240-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: lazy_lock          : ${enable_lazy_lock}" >&5
16241-$as_echo "lazy_lock          : ${enable_lazy_lock}" >&6; }
16242-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: cache-oblivious    : ${enable_cache_oblivious}" >&5
16243-$as_echo "cache-oblivious    : ${enable_cache_oblivious}" >&6; }
16244-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: cxx                : ${enable_cxx}" >&5
16245-$as_echo "cxx                : ${enable_cxx}" >&6; }
16246-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: ===============================================================================" >&5
16247-$as_echo "===============================================================================" >&6; }
16248diff --git a/jemalloc/COPYING b/jemalloc/COPYING
16249deleted file mode 100644
16250index 3b7fd35..0000000
16251--- a/jemalloc/COPYING
16252+++ /dev/null
16253@@ -1,27 +0,0 @@
16254-Unless otherwise specified, files in the jemalloc source distribution are
16255-subject to the following license:
16256---------------------------------------------------------------------------------
16257-Copyright (C) 2002-present Jason Evans <[email protected]>.
16258-All rights reserved.
16259-Copyright (C) 2007-2012 Mozilla Foundation.  All rights reserved.
16260-Copyright (C) 2009-present Facebook, Inc.  All rights reserved.
16261-
16262-Redistribution and use in source and binary forms, with or without
16263-modification, are permitted provided that the following conditions are met:
16264-1. Redistributions of source code must retain the above copyright notice(s),
16265-   this list of conditions and the following disclaimer.
16266-2. Redistributions in binary form must reproduce the above copyright notice(s),
16267-   this list of conditions and the following disclaimer in the documentation
16268-   and/or other materials provided with the distribution.
16269-
16270-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY EXPRESS
16271-OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
16272-MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO
16273-EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY DIRECT, INDIRECT,
16274-INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
16275-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
16276-PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
16277-LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
16278-OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
16279-ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
16280---------------------------------------------------------------------------------
16281diff --git a/jemalloc/ChangeLog b/jemalloc/ChangeLog
16282deleted file mode 100644
16283index 32fde56..0000000
16284--- a/jemalloc/ChangeLog
16285+++ /dev/null
16286@@ -1,1618 +0,0 @@
16287-Following are change highlights associated with official releases.  Important
16288-bug fixes are all mentioned, but some internal enhancements are omitted here for
16289-brevity.  Much more detail can be found in the git revision history:
16290-
16291-    https://github.com/jemalloc/jemalloc
16292-
16293-* 5.3.0 (May 6, 2022)
16294-
16295-  This release contains many speed and space optimizations, from micro
16296-  optimizations on common paths to rework of internal data structures and
16297-  locking schemes, and many more too detailed to list below.  Multiple percent
16298-  of system level metric improvements were measured in tested production
16299-  workloads.  The release has gone through large-scale production testing.
16300-
16301-  New features:
16302-  - Add the thread.idle mallctl which hints that the calling thread will be
16303-    idle for a nontrivial period of time.  (@davidtgoldblatt)
16304-  - Allow small size classes to be the maximum size class to cache in the
16305-    thread-specific cache, through the opt.[lg_]tcache_max option.  (@interwq,
16306-    @jordalgo)
16307-  - Make the behavior of realloc(ptr, 0) configurable with opt.zero_realloc.
16308-    (@davidtgoldblatt)
16309-  - Add 'make uninstall' support.  (@sangshuduo, @Lapenkov)
16310-  - Support C++17 over-aligned allocation.  (@marksantaniello)
16311-  - Add the thread.peak mallctl for approximate per-thread peak memory tracking.
16312-    (@davidtgoldblatt)
16313-  - Add interval-based stats output opt.stats_interval.  (@interwq)
16314-  - Add prof.prefix to override filename prefixes for dumps.  (@zhxchen17)
16315-  - Add high resolution timestamp support for profiling.  (@tyroguru)
16316-  - Add the --collapsed flag to jeprof for flamegraph generation.
16317-    (@igorwwwwwwwwwwwwwwwwwwww)
16318-  - Add the --debug-syms-by-id option to jeprof for debug symbols discovery.
16319-    (@DeannaGelbart)
16320-  - Add the opt.prof_leak_error option to exit with error code when leak is
16321-    detected using opt.prof_final.  (@yunxuo)
16322-  - Add opt.cache_oblivious as an runtime alternative to config.cache_oblivious.
16323-    (@interwq)
16324-  - Add mallctl interfaces:
16325-    + opt.zero_realloc  (@davidtgoldblatt)
16326-    + opt.cache_oblivious  (@interwq)
16327-    + opt.prof_leak_error  (@yunxuo)
16328-    + opt.stats_interval  (@interwq)
16329-    + opt.stats_interval_opts  (@interwq)
16330-    + opt.tcache_max  (@interwq)
16331-    + opt.trust_madvise  (@azat)
16332-    + prof.prefix  (@zhxchen17)
16333-    + stats.zero_reallocs  (@davidtgoldblatt)
16334-    + thread.idle  (@davidtgoldblatt)
16335-    + thread.peak.{read,reset}  (@davidtgoldblatt)
16336-
16337-  Bug fixes:
16338-  - Fix the synchronization around explicit tcache creation which could cause
16339-    invalid tcache identifiers.  This regression was first released in 5.0.0.
16340-    (@yoshinorim, @davidtgoldblatt)
16341-  - Fix a profiling biasing issue which could cause incorrect heap usage and
16342-    object counts.  This issue existed in all previous releases with the heap
16343-    profiling feature.  (@davidtgoldblatt)
16344-  - Fix the order of stats counter updating on large realloc which could cause
16345-    failed assertions.  This regression was first released in 5.0.0.  (@azat)
16346-  - Fix the locking on the arena destroy mallctl, which could cause concurrent
16347-    arena creations to fail.  This functionality was first introduced in 5.0.0.
16348-    (@interwq)
16349-
16350-  Portability improvements:
16351-  - Remove nothrow from system function declarations on macOS and FreeBSD.
16352-    (@davidtgoldblatt, @fredemmott, @leres)
16353-  - Improve overcommit and page alignment settings on NetBSD.  (@zoulasc)
16354-  - Improve CPU affinity support on BSD platforms.  (@devnexen)
16355-  - Improve utrace detection and support.  (@devnexen)
16356-  - Improve QEMU support with MADV_DONTNEED zeroed pages detection.  (@azat)
16357-  - Add memcntl support on Solaris / illumos.  (@devnexen)
16358-  - Improve CPU_SPINWAIT on ARM.  (@AWSjswinney)
16359-  - Improve TSD cleanup on FreeBSD.  (@Lapenkov)
16360-  - Disable percpu_arena if the CPU count cannot be reliably detected.  (@azat)
16361-  - Add malloc_size(3) override support.  (@devnexen)
16362-  - Add mmap VM_MAKE_TAG support.  (@devnexen)
16363-  - Add support for MADV_[NO]CORE.  (@devnexen)
16364-  - Add support for DragonFlyBSD.  (@devnexen)
16365-  - Fix the QUANTUM setting on MIPS64.  (@brooksdavis)
16366-  - Add the QUANTUM setting for ARC.  (@vineetgarc)
16367-  - Add the QUANTUM setting for LoongArch.  (@wangjl-uos)
16368-  - Add QNX support.  (@jqian-aurora)
16369-  - Avoid atexit(3) calls unless the relevant profiling features are enabled.
16370-    (@BusyJay, @laiwei-rice, @interwq)
16371-  - Fix unknown option detection when using Clang.  (@Lapenkov)
16372-  - Fix symbol conflict with musl libc.  (@georgthegreat)
16373-  - Add -Wimplicit-fallthrough checks.  (@nickdesaulniers)
16374-  - Add __forceinline support on MSVC.  (@santagada)
16375-  - Improve FreeBSD and Windows CI support.  (@Lapenkov)
16376-  - Add CI support for PPC64LE architecture.  (@ezeeyahoo)
16377-
16378-  Incompatible changes:
16379-  - Maximum size class allowed in tcache (opt.[lg_]tcache_max) now has an upper
16380-    bound of 8MiB.  (@interwq)
16381-
16382-  Optimizations and refactors (@davidtgoldblatt, @Lapenkov, @interwq):
16383-  - Optimize the common cases of the thread cache operations.
16384-  - Optimize internal data structures, including RB tree and pairing heap.
16385-  - Optimize the internal locking on extent management.
16386-  - Extract and refactor the internal page allocator and interface modules.
16387-
16388-  Documentation:
16389-  - Fix doc build with --with-install-suffix.  (@lawmurray, @interwq)
16390-  - Add PROFILING_INTERNALS.md.  (@davidtgoldblatt)
16391-  - Ensure the proper order of doc building and installation.  (@Mingli-Yu)
16392-
16393-* 5.2.1 (August 5, 2019)
16394-
16395-  This release is primarily about Windows.  A critical virtual memory leak is
16396-  resolved on all Windows platforms.  The regression was present in all releases
16397-  since 5.0.0.
16398-
16399-  Bug fixes:
16400-  - Fix a severe virtual memory leak on Windows.  This regression was first
16401-    released in 5.0.0.  (@Ignition, @j0t, @frederik-h, @davidtgoldblatt,
16402-    @interwq)
16403-  - Fix size 0 handling in posix_memalign().  This regression was first released
16404-    in 5.2.0.  (@interwq)
16405-  - Fix the prof_log unit test which may observe unexpected backtraces from
16406-    compiler optimizations.  The test was first added in 5.2.0.  (@marxin,
16407-    @gnzlbg, @interwq)
16408-  - Fix the declaration of the extent_avail tree.  This regression was first
16409-    released in 5.1.0.  (@zoulasc)
16410-  - Fix an incorrect reference in jeprof.  This functionality was first released
16411-    in 3.0.0.  (@prehistoric-penguin)
16412-  - Fix an assertion on the deallocation fast-path.  This regression was first
16413-    released in 5.2.0.  (@yinan1048576)
16414-  - Fix the TLS_MODEL attribute in headers.  This regression was first released
16415-    in 5.0.0.  (@zoulasc, @interwq)
16416-
16417-  Optimizations and refactors:
16418-  - Implement opt.retain on Windows and enable by default on 64-bit.  (@interwq,
16419-    @davidtgoldblatt)
16420-  - Optimize away a branch on the operator delete[] path.  (@mgrice)
16421-  - Add format annotation to the format generator function.  (@zoulasc)
16422-  - Refactor and improve the size class header generation.  (@yinan1048576)
16423-  - Remove best fit.  (@djwatson)
16424-  - Avoid blocking on background thread locks for stats.  (@oranagra, @interwq)
16425-
16426-* 5.2.0 (April 2, 2019)
16427-
16428-  This release includes a few notable improvements, which are summarized below:
16429-  1) improved fast-path performance from the optimizations by @djwatson; 2)
16430-  reduced virtual memory fragmentation and metadata usage; and 3) bug fixes on
16431-  setting the number of background threads.  In addition, peak / spike memory
16432-  usage is improved with certain allocation patterns.  As usual, the release and
16433-  prior dev versions have gone through large-scale production testing.
16434-
16435-  New features:
16436-  - Implement oversize_threshold, which uses a dedicated arena for allocations
16437-    crossing the specified threshold to reduce fragmentation.  (@interwq)
16438-  - Add extents usage information to stats.  (@tyleretzel)
16439-  - Log time information for sampled allocations.  (@tyleretzel)
16440-  - Support 0 size in sdallocx.  (@djwatson)
16441-  - Output rate for certain counters in malloc_stats.  (@zinoale)
16442-  - Add configure option --enable-readlinkat, which allows the use of readlinkat
16443-    over readlink.  (@davidtgoldblatt)
16444-  - Add configure options --{enable,disable}-{static,shared} to allow not
16445-    building unwanted libraries.  (@Ericson2314)
16446-  - Add configure option --disable-libdl to enable fully static builds.
16447-    (@interwq)
16448-  - Add mallctl interfaces:
16449-	+ opt.oversize_threshold (@interwq)
16450-	+ stats.arenas.<i>.extent_avail (@tyleretzel)
16451-	+ stats.arenas.<i>.extents.<j>.n{dirty,muzzy,retained} (@tyleretzel)
16452-	+ stats.arenas.<i>.extents.<j>.{dirty,muzzy,retained}_bytes
16453-	  (@tyleretzel)
16454-
16455-  Portability improvements:
16456-  - Update MSVC builds.  (@maksqwe, @rustyx)
16457-  - Workaround a compiler optimizer bug on s390x.  (@rkmisra)
16458-  - Make use of pthread_set_name_np(3) on FreeBSD.  (@trasz)
16459-  - Implement malloc_getcpu() to enable percpu_arena for windows.  (@santagada)
16460-  - Link against -pthread instead of -lpthread.  (@paravoid)
16461-  - Make background_thread not dependent on libdl.  (@interwq)
16462-  - Add stringify to fix a linker directive issue on MSVC.  (@daverigby)
16463-  - Detect and fall back when 8-bit atomics are unavailable.  (@interwq)
16464-  - Fall back to the default pthread_create if dlsym(3) fails.  (@interwq)
16465-
16466-  Optimizations and refactors:
16467-  - Refactor the TSD module.  (@davidtgoldblatt)
16468-  - Avoid taking extents_muzzy mutex when muzzy is disabled.  (@interwq)
16469-  - Avoid taking large_mtx for auto arenas on the tcache flush path.  (@interwq)
16470-  - Optimize ixalloc by avoiding a size lookup.  (@interwq)
16471-  - Implement opt.oversize_threshold which uses a dedicated arena for requests
16472-    crossing the threshold, also eagerly purges the oversize extents.  Default
16473-    the threshold to 8 MiB.  (@interwq)
16474-  - Clean compilation with -Wextra.  (@gnzlbg, @jasone)
16475-  - Refactor the size class module.  (@davidtgoldblatt)
16476-  - Refactor the stats emitter.  (@tyleretzel)
16477-  - Optimize pow2_ceil.  (@rkmisra)
16478-  - Avoid runtime detection of lazy purging on FreeBSD.  (@trasz)
16479-  - Optimize mmap(2) alignment handling on FreeBSD.  (@trasz)
16480-  - Improve error handling for THP state initialization.  (@jsteemann)
16481-  - Rework the malloc() fast path.  (@djwatson)
16482-  - Rework the free() fast path.  (@djwatson)
16483-  - Refactor and optimize the tcache fill / flush paths.  (@djwatson)
16484-  - Optimize sync / lwsync on PowerPC.  (@chmeeedalf)
16485-  - Bypass extent_dalloc() when retain is enabled.  (@interwq)
16486-  - Optimize the locking on large deallocation.  (@interwq)
16487-  - Reduce the number of pages committed from sanity checking in debug build.
16488-    (@trasz, @interwq)
16489-  - Deprecate OSSpinLock.  (@interwq)
16490-  - Lower the default number of background threads to 4 (when the feature
16491-    is enabled).  (@interwq)
16492-  - Optimize the trylock spin wait.  (@djwatson)
16493-  - Use arena index for arena-matching checks.  (@interwq)
16494-  - Avoid forced decay on thread termination when using background threads.
16495-    (@interwq)
16496-  - Disable muzzy decay by default.  (@djwatson, @interwq)
16497-  - Only initialize libgcc unwinder when profiling is enabled.  (@paravoid,
16498-    @interwq)
16499-
16500-  Bug fixes (all only relevant to jemalloc 5.x):
16501-  - Fix background thread index issues with max_background_threads.  (@djwatson,
16502-    @interwq)
16503-  - Fix stats output for opt.lg_extent_max_active_fit.  (@interwq)
16504-  - Fix opt.prof_prefix initialization.  (@davidtgoldblatt)
16505-  - Properly trigger decay on tcache destroy.  (@interwq, @amosbird)
16506-  - Fix tcache.flush.  (@interwq)
16507-  - Detect whether explicit extent zero out is necessary with huge pages or
16508-    custom extent hooks, which may change the purge semantics.  (@interwq)
16509-  - Fix a side effect caused by extent_max_active_fit combined with decay-based
16510-    purging, where freed extents can accumulate and not be reused for an
16511-    extended period of time.  (@interwq, @mpghf)
16512-  - Fix a missing unlock on extent register error handling.  (@zoulasc)
16513-
16514-  Testing:
16515-  - Simplify the Travis script output.  (@gnzlbg)
16516-  - Update the test scripts for FreeBSD.  (@devnexen)
16517-  - Add unit tests for the producer-consumer pattern.  (@interwq)
16518-  - Add Cirrus-CI config for FreeBSD builds.  (@jasone)
16519-  - Add size-matching sanity checks on tcache flush.  (@davidtgoldblatt,
16520-    @interwq)
16521-
16522-  Incompatible changes:
16523-  - Remove --with-lg-page-sizes.  (@davidtgoldblatt)
16524-
16525-  Documentation:
16526-  - Attempt to build docs by default, however skip doc building when xsltproc
16527-    is missing. (@interwq, @cmuellner)
16528-
16529-* 5.1.0 (May 4, 2018)
16530-
16531-  This release is primarily about fine-tuning, ranging from several new features
16532-  to numerous notable performance and portability enhancements.  The release and
16533-  prior dev versions have been running in multiple large scale applications for
16534-  months, and the cumulative improvements are substantial in many cases.
16535-
16536-  Given the long and successful production runs, this release is likely a good
16537-  candidate for applications to upgrade, from both jemalloc 5.0 and before.  For
16538-  performance-critical applications, the newly added TUNING.md provides
16539-  guidelines on jemalloc tuning.
16540-
16541-  New features:
16542-  - Implement transparent huge page support for internal metadata.  (@interwq)
16543-  - Add opt.thp to allow enabling / disabling transparent huge pages for all
16544-    mappings.  (@interwq)
16545-  - Add maximum background thread count option.  (@djwatson)
16546-  - Allow prof_active to control opt.lg_prof_interval and prof.gdump.
16547-    (@interwq)
16548-  - Allow arena index lookup based on allocation addresses via mallctl.
16549-    (@lionkov)
16550-  - Allow disabling initial-exec TLS model.  (@davidtgoldblatt, @KenMacD)
16551-  - Add opt.lg_extent_max_active_fit to set the max ratio between the size of
16552-    the active extent selected (to split off from) and the size of the requested
16553-    allocation.  (@interwq, @davidtgoldblatt)
16554-  - Add retain_grow_limit to set the max size when growing virtual address
16555-    space.  (@interwq)
16556-  - Add mallctl interfaces:
16557-    + arena.<i>.retain_grow_limit  (@interwq)
16558-    + arenas.lookup  (@lionkov)
16559-    + max_background_threads  (@djwatson)
16560-    + opt.lg_extent_max_active_fit  (@interwq)
16561-    + opt.max_background_threads  (@djwatson)
16562-    + opt.metadata_thp  (@interwq)
16563-    + opt.thp  (@interwq)
16564-    + stats.metadata_thp  (@interwq)
16565-
16566-  Portability improvements:
16567-  - Support GNU/kFreeBSD configuration.  (@paravoid)
16568-  - Support m68k, nios2 and SH3 architectures.  (@paravoid)
16569-  - Fall back to FD_CLOEXEC when O_CLOEXEC is unavailable.  (@zonyitoo)
16570-  - Fix symbol listing for cross-compiling.  (@tamird)
16571-  - Fix high bits computation on ARM.  (@davidtgoldblatt, @paravoid)
16572-  - Disable the CPU_SPINWAIT macro for Power.  (@davidtgoldblatt, @marxin)
16573-  - Fix MSVC 2015 & 2017 builds.  (@rustyx)
16574-  - Improve RISC-V support.  (@EdSchouten)
16575-  - Set name mangling script in strict mode.  (@nicolov)
16576-  - Avoid MADV_HUGEPAGE on ARM.  (@marxin)
16577-  - Modify configure to determine return value of strerror_r.
16578-    (@davidtgoldblatt, @cferris1000)
16579-  - Make sure CXXFLAGS is tested with CPP compiler.  (@nehaljwani)
16580-  - Fix 32-bit build on MSVC.  (@rustyx)
16581-  - Fix external symbol on MSVC.  (@maksqwe)
16582-  - Avoid a printf format specifier warning.  (@jasone)
16583-  - Add configure option --disable-initial-exec-tls which can allow jemalloc to
16584-    be dynamically loaded after program startup.  (@davidtgoldblatt, @KenMacD)
16585-  - AArch64: Add ILP32 support.  (@cmuellner)
16586-  - Add --with-lg-vaddr configure option to support cross compiling.
16587-    (@cmuellner, @davidtgoldblatt)
16588-
16589-  Optimizations and refactors:
16590-  - Improve active extent fit with extent_max_active_fit.  This considerably
16591-    reduces fragmentation over time and improves virtual memory and metadata
16592-    usage.  (@davidtgoldblatt, @interwq)
16593-  - Eagerly coalesce large extents to reduce fragmentation.  (@interwq)
16594-  - sdallocx: only read size info when page aligned (i.e. possibly sampled),
16595-    which speeds up the sized deallocation path significantly.  (@interwq)
16596-  - Avoid attempting new mappings for in place expansion with retain, since
16597-    it rarely succeeds in practice and causes high overhead.  (@interwq)
16598-  - Refactor OOM handling in newImpl.  (@wqfish)
16599-  - Add internal fine-grained logging functionality for debugging use.
16600-    (@davidtgoldblatt)
16601-  - Refactor arena / tcache interactions.  (@davidtgoldblatt)
16602-  - Refactor extent management with dumpable flag.  (@davidtgoldblatt)
16603-  - Add runtime detection of lazy purging.  (@interwq)
16604-  - Use pairing heap instead of red-black tree for extents_avail.  (@djwatson)
16605-  - Use sysctl on startup in FreeBSD.  (@trasz)
16606-  - Use thread local prng state instead of atomic.  (@djwatson)
16607-  - Make decay to always purge one more extent than before, because in
16608-    practice large extents are usually the ones that cross the decay threshold.
16609-    Purging the additional extent helps save memory as well as reduce VM
16610-    fragmentation.  (@interwq)
16611-  - Fast division by dynamic values.  (@davidtgoldblatt)
16612-  - Improve the fit for aligned allocation.  (@interwq, @edwinsmith)
16613-  - Refactor extent_t bitpacking.  (@rkmisra)
16614-  - Optimize the generated assembly for ticker operations.  (@davidtgoldblatt)
16615-  - Convert stats printing to use a structured text emitter.  (@davidtgoldblatt)
16616-  - Remove preserve_lru feature for extents management.  (@djwatson)
16617-  - Consolidate two memory loads into one on the fast deallocation path.
16618-    (@davidtgoldblatt, @interwq)
16619-
16620-  Bug fixes (most of the issues are only relevant to jemalloc 5.0):
16621-  - Fix deadlock with multithreaded fork in OS X.  (@davidtgoldblatt)
16622-  - Validate returned file descriptor before use.  (@zonyitoo)
16623-  - Fix a few background thread initialization and shutdown issues.  (@interwq)
16624-  - Fix an extent coalesce + decay race by taking both coalescing extents off
16625-    the LRU list.  (@interwq)
16626-  - Fix potentially unbound increase during decay, caused by one thread keep
16627-    stashing memory to purge while other threads generating new pages.  The
16628-    number of pages to purge is checked to prevent this.  (@interwq)
16629-  - Fix a FreeBSD bootstrap assertion.  (@strejda, @interwq)
16630-  - Handle 32 bit mutex counters.  (@rkmisra)
16631-  - Fix a indexing bug when creating background threads.  (@davidtgoldblatt,
16632-    @binliu19)
16633-  - Fix arguments passed to extent_init.  (@yuleniwo, @interwq)
16634-  - Fix addresses used for ordering mutexes.  (@rkmisra)
16635-  - Fix abort_conf processing during bootstrap.  (@interwq)
16636-  - Fix include path order for out-of-tree builds.  (@cmuellner)
16637-
16638-  Incompatible changes:
16639-  - Remove --disable-thp.  (@interwq)
16640-  - Remove mallctl interfaces:
16641-    + config.thp  (@interwq)
16642-
16643-  Documentation:
16644-  - Add TUNING.md.  (@interwq, @davidtgoldblatt, @djwatson)
16645-
16646-* 5.0.1 (July 1, 2017)
16647-
16648-  This bugfix release fixes several issues, most of which are obscure enough
16649-  that typical applications are not impacted.
16650-
16651-  Bug fixes:
16652-  - Update decay->nunpurged before purging, in order to avoid potential update
16653-    races and subsequent incorrect purging volume.  (@interwq)
16654-  - Only abort on dlsym(3) error if the failure impacts an enabled feature (lazy
16655-    locking and/or background threads).  This mitigates an initialization
16656-    failure bug for which we still do not have a clear reproduction test case.
16657-    (@interwq)
16658-  - Modify tsd management so that it neither crashes nor leaks if a thread's
16659-    only allocation activity is to call free() after TLS destructors have been
16660-    executed.  This behavior was observed when operating with GNU libc, and is
16661-    unlikely to be an issue with other libc implementations.  (@interwq)
16662-  - Mask signals during background thread creation.  This prevents signals from
16663-    being inadvertently delivered to background threads.  (@jasone,
16664-    @davidtgoldblatt, @interwq)
16665-  - Avoid inactivity checks within background threads, in order to prevent
16666-    recursive mutex acquisition.  (@interwq)
16667-  - Fix extent_grow_retained() to use the specified hooks when the
16668-    arena.<i>.extent_hooks mallctl is used to override the default hooks.
16669-    (@interwq)
16670-  - Add missing reentrancy support for custom extent hooks which allocate.
16671-    (@interwq)
16672-  - Post-fork(2), re-initialize the list of tcaches associated with each arena
16673-    to contain no tcaches except the forking thread's.  (@interwq)
16674-  - Add missing post-fork(2) mutex reinitialization for extent_grow_mtx.  This
16675-    fixes potential deadlocks after fork(2).  (@interwq)
16676-  - Enforce minimum autoconf version (currently 2.68), since 2.63 is known to
16677-    generate corrupt configure scripts.  (@jasone)
16678-  - Ensure that the configured page size (--with-lg-page) is no larger than the
16679-    configured huge page size (--with-lg-hugepage).  (@jasone)
16680-
16681-* 5.0.0 (June 13, 2017)
16682-
16683-  Unlike all previous jemalloc releases, this release does not use naturally
16684-  aligned "chunks" for virtual memory management, and instead uses page-aligned
16685-  "extents".  This change has few externally visible effects, but the internal
16686-  impacts are... extensive.  Many other internal changes combine to make this
16687-  the most cohesively designed version of jemalloc so far, with ample
16688-  opportunity for further enhancements.
16689-
16690-  Continuous integration is now an integral aspect of development thanks to the
16691-  efforts of @davidtgoldblatt, and the dev branch tends to remain reasonably
16692-  stable on the tested platforms (Linux, FreeBSD, macOS, and Windows).  As a
16693-  side effect the official release frequency may decrease over time.
16694-
16695-  New features:
16696-  - Implement optional per-CPU arena support; threads choose which arena to use
16697-    based on current CPU rather than on fixed thread-->arena associations.
16698-    (@interwq)
16699-  - Implement two-phase decay of unused dirty pages.  Pages transition from
16700-    dirty-->muzzy-->clean, where the first phase transition relies on
16701-    madvise(... MADV_FREE) semantics, and the second phase transition discards
16702-    pages such that they are replaced with demand-zeroed pages on next access.
16703-    (@jasone)
16704-  - Increase decay time resolution from seconds to milliseconds.  (@jasone)
16705-  - Implement opt-in per CPU background threads, and use them for asynchronous
16706-    decay-driven unused dirty page purging.  (@interwq)
16707-  - Add mutex profiling, which collects a variety of statistics useful for
16708-    diagnosing overhead/contention issues.  (@interwq)
16709-  - Add C++ new/delete operator bindings.  (@djwatson)
16710-  - Support manually created arena destruction, such that all data and metadata
16711-    are discarded.  Add MALLCTL_ARENAS_DESTROYED for accessing merged stats
16712-    associated with destroyed arenas.  (@jasone)
16713-  - Add MALLCTL_ARENAS_ALL as a fixed index for use in accessing
16714-    merged/destroyed arena statistics via mallctl.  (@jasone)
16715-  - Add opt.abort_conf to optionally abort if invalid configuration options are
16716-    detected during initialization.  (@interwq)
16717-  - Add opt.stats_print_opts, so that e.g. JSON output can be selected for the
16718-    stats dumped during exit if opt.stats_print is true.  (@jasone)
16719-  - Add --with-version=VERSION for use when embedding jemalloc into another
16720-    project's git repository.  (@jasone)
16721-  - Add --disable-thp to support cross compiling.  (@jasone)
16722-  - Add --with-lg-hugepage to support cross compiling.  (@jasone)
16723-  - Add mallctl interfaces (various authors):
16724-    + background_thread
16725-    + opt.abort_conf
16726-    + opt.retain
16727-    + opt.percpu_arena
16728-    + opt.background_thread
16729-    + opt.{dirty,muzzy}_decay_ms
16730-    + opt.stats_print_opts
16731-    + arena.<i>.initialized
16732-    + arena.<i>.destroy
16733-    + arena.<i>.{dirty,muzzy}_decay_ms
16734-    + arena.<i>.extent_hooks
16735-    + arenas.{dirty,muzzy}_decay_ms
16736-    + arenas.bin.<i>.slab_size
16737-    + arenas.nlextents
16738-    + arenas.lextent.<i>.size
16739-    + arenas.create
16740-    + stats.background_thread.{num_threads,num_runs,run_interval}
16741-    + stats.mutexes.{ctl,background_thread,prof,reset}.
16742-      {num_ops,num_spin_acq,num_wait,max_wait_time,total_wait_time,max_num_thds,
16743-      num_owner_switch}
16744-    + stats.arenas.<i>.{dirty,muzzy}_decay_ms
16745-    + stats.arenas.<i>.uptime
16746-    + stats.arenas.<i>.{pmuzzy,base,internal,resident}
16747-    + stats.arenas.<i>.{dirty,muzzy}_{npurge,nmadvise,purged}
16748-    + stats.arenas.<i>.bins.<j>.{nslabs,reslabs,curslabs}
16749-    + stats.arenas.<i>.bins.<j>.mutex.
16750-      {num_ops,num_spin_acq,num_wait,max_wait_time,total_wait_time,max_num_thds,
16751-      num_owner_switch}
16752-    + stats.arenas.<i>.lextents.<j>.{nmalloc,ndalloc,nrequests,curlextents}
16753-    + stats.arenas.i.mutexes.{large,extent_avail,extents_dirty,extents_muzzy,
16754-      extents_retained,decay_dirty,decay_muzzy,base,tcache_list}.
16755-      {num_ops,num_spin_acq,num_wait,max_wait_time,total_wait_time,max_num_thds,
16756-      num_owner_switch}
16757-
16758-  Portability improvements:
16759-  - Improve reentrant allocation support, such that deadlock is less likely if
16760-    e.g. a system library call in turn allocates memory.  (@davidtgoldblatt,
16761-    @interwq)
16762-  - Support static linking of jemalloc with glibc.  (@djwatson)
16763-
16764-  Optimizations and refactors:
16765-  - Organize virtual memory as "extents" of virtual memory pages, rather than as
16766-    naturally aligned "chunks", and store all metadata in arbitrarily distant
16767-    locations.  This reduces virtual memory external fragmentation, and will
16768-    interact better with huge pages (not yet explicitly supported).  (@jasone)
16769-  - Fold large and huge size classes together; only small and large size classes
16770-    remain.  (@jasone)
16771-  - Unify the allocation paths, and merge most fast-path branching decisions.
16772-    (@davidtgoldblatt, @interwq)
16773-  - Embed per thread automatic tcache into thread-specific data, which reduces
16774-    conditional branches and dereferences.  Also reorganize tcache to increase
16775-    fast-path data locality.  (@interwq)
16776-  - Rewrite atomics to closely model the C11 API, convert various
16777-    synchronization from mutex-based to atomic, and use the explicit memory
16778-    ordering control to resolve various hypothetical races without increasing
16779-    synchronization overhead.  (@davidtgoldblatt)
16780-  - Extensively optimize rtree via various methods:
16781-    + Add multiple layers of rtree lookup caching, since rtree lookups are now
16782-      part of fast-path deallocation.  (@interwq)
16783-    + Determine rtree layout at compile time.  (@jasone)
16784-    + Make the tree shallower for common configurations.  (@jasone)
16785-    + Embed the root node in the top-level rtree data structure, thus avoiding
16786-      one level of indirection.  (@jasone)
16787-    + Further specialize leaf elements as compared to internal node elements,
16788-      and directly embed extent metadata needed for fast-path deallocation.
16789-      (@jasone)
16790-    + Ignore leading always-zero address bits (architecture-specific).
16791-      (@jasone)
16792-  - Reorganize headers (ongoing work) to make them hermetic, and disentangle
16793-    various module dependencies.  (@davidtgoldblatt)
16794-  - Convert various internal data structures such as size class metadata from
16795-    boot-time-initialized to compile-time-initialized.  Propagate resulting data
16796-    structure simplifications, such as making arena metadata fixed-size.
16797-    (@jasone)
16798-  - Simplify size class lookups when constrained to size classes that are
16799-    multiples of the page size.  This speeds lookups, but the primary benefit is
16800-    complexity reduction in code that was the source of numerous regressions.
16801-    (@jasone)
16802-  - Lock individual extents when possible for localized extent operations,
16803-    rather than relying on a top-level arena lock.  (@davidtgoldblatt, @jasone)
16804-  - Use first fit layout policy instead of best fit, in order to improve
16805-    packing.  (@jasone)
16806-  - If munmap(2) is not in use, use an exponential series to grow each arena's
16807-    virtual memory, so that the number of disjoint virtual memory mappings
16808-    remains low.  (@jasone)
16809-  - Implement per arena base allocators, so that arenas never share any virtual
16810-    memory pages.  (@jasone)
16811-  - Automatically generate private symbol name mangling macros.  (@jasone)
16812-
16813-  Incompatible changes:
16814-  - Replace chunk hooks with an expanded/normalized set of extent hooks.
16815-    (@jasone)
16816-  - Remove ratio-based purging.  (@jasone)
16817-  - Remove --disable-tcache.  (@jasone)
16818-  - Remove --disable-tls.  (@jasone)
16819-  - Remove --enable-ivsalloc.  (@jasone)
16820-  - Remove --with-lg-size-class-group.  (@jasone)
16821-  - Remove --with-lg-tiny-min.  (@jasone)
16822-  - Remove --disable-cc-silence.  (@jasone)
16823-  - Remove --enable-code-coverage.  (@jasone)
16824-  - Remove --disable-munmap (replaced by opt.retain).  (@jasone)
16825-  - Remove Valgrind support.  (@jasone)
16826-  - Remove quarantine support.  (@jasone)
16827-  - Remove redzone support.  (@jasone)
16828-  - Remove mallctl interfaces (various authors):
16829-    + config.munmap
16830-    + config.tcache
16831-    + config.tls
16832-    + config.valgrind
16833-    + opt.lg_chunk
16834-    + opt.purge
16835-    + opt.lg_dirty_mult
16836-    + opt.decay_time
16837-    + opt.quarantine
16838-    + opt.redzone
16839-    + opt.thp
16840-    + arena.<i>.lg_dirty_mult
16841-    + arena.<i>.decay_time
16842-    + arena.<i>.chunk_hooks
16843-    + arenas.initialized
16844-    + arenas.lg_dirty_mult
16845-    + arenas.decay_time
16846-    + arenas.bin.<i>.run_size
16847-    + arenas.nlruns
16848-    + arenas.lrun.<i>.size
16849-    + arenas.nhchunks
16850-    + arenas.hchunk.<i>.size
16851-    + arenas.extend
16852-    + stats.cactive
16853-    + stats.arenas.<i>.lg_dirty_mult
16854-    + stats.arenas.<i>.decay_time
16855-    + stats.arenas.<i>.metadata.{mapped,allocated}
16856-    + stats.arenas.<i>.{npurge,nmadvise,purged}
16857-    + stats.arenas.<i>.huge.{allocated,nmalloc,ndalloc,nrequests}
16858-    + stats.arenas.<i>.bins.<j>.{nruns,reruns,curruns}
16859-    + stats.arenas.<i>.lruns.<j>.{nmalloc,ndalloc,nrequests,curruns}
16860-    + stats.arenas.<i>.hchunks.<j>.{nmalloc,ndalloc,nrequests,curhchunks}
16861-
16862-  Bug fixes:
16863-  - Improve interval-based profile dump triggering to dump only one profile when
16864-    a single allocation's size exceeds the interval.  (@jasone)
16865-  - Use prefixed function names (as controlled by --with-jemalloc-prefix) when
16866-    pruning backtrace frames in jeprof.  (@jasone)
16867-
16868-* 4.5.0 (February 28, 2017)
16869-
16870-  This is the first release to benefit from much broader continuous integration
16871-  testing, thanks to @davidtgoldblatt.  Had we had this testing infrastructure
16872-  in place for prior releases, it would have caught all of the most serious
16873-  regressions fixed by this release.
16874-
16875-  New features:
16876-  - Add --disable-thp and the opt.thp mallctl to provide opt-out mechanisms for
16877-    transparent huge page integration.  (@jasone)
16878-  - Update zone allocator integration to work with macOS 10.12.  (@glandium)
16879-  - Restructure *CFLAGS configuration, so that CFLAGS behaves typically, and
16880-    EXTRA_CFLAGS provides a way to specify e.g. -Werror during building, but not
16881-    during configuration.  (@jasone, @ronawho)
16882-
16883-  Bug fixes:
16884-  - Fix DSS (sbrk(2)-based) allocation.  This regression was first released in
16885-    4.3.0.  (@jasone)
16886-  - Handle race in per size class utilization computation.  This functionality
16887-    was first released in 4.0.0.  (@interwq)
16888-  - Fix lock order reversal during gdump.  (@jasone)
16889-  - Fix/refactor tcache synchronization.  This regression was first released in
16890-    4.0.0.  (@jasone)
16891-  - Fix various JSON-formatted malloc_stats_print() bugs.  This functionality
16892-    was first released in 4.3.0.  (@jasone)
16893-  - Fix huge-aligned allocation.  This regression was first released in 4.4.0.
16894-    (@jasone)
16895-  - When transparent huge page integration is enabled, detect what state pages
16896-    start in according to the kernel's current operating mode, and only convert
16897-    arena chunks to non-huge during purging if that is not their initial state.
16898-    This functionality was first released in 4.4.0.  (@jasone)
16899-  - Fix lg_chunk clamping for the --enable-cache-oblivious --disable-fill case.
16900-    This regression was first released in 4.0.0.  (@jasone, @428desmo)
16901-  - Properly detect sparc64 when building for Linux.  (@glaubitz)
16902-
16903-* 4.4.0 (December 3, 2016)
16904-
16905-  New features:
16906-  - Add configure support for *-*-linux-android.  (@cferris1000, @jasone)
16907-  - Add the --disable-syscall configure option, for use on systems that place
16908-    security-motivated limitations on syscall(2).  (@jasone)
16909-  - Add support for Debian GNU/kFreeBSD.  (@thesam)
16910-
16911-  Optimizations:
16912-  - Add extent serial numbers and use them where appropriate as a sort key that
16913-    is higher priority than address, so that the allocation policy prefers older
16914-    extents.  This tends to improve locality (decrease fragmentation) when
16915-    memory grows downward.  (@jasone)
16916-  - Refactor madvise(2) configuration so that MADV_FREE is detected and utilized
16917-    on Linux 4.5 and newer.  (@jasone)
16918-  - Mark partially purged arena chunks as non-huge-page.  This improves
16919-    interaction with Linux's transparent huge page functionality.  (@jasone)
16920-
16921-  Bug fixes:
16922-  - Fix size class computations for edge conditions involving extremely large
16923-    allocations.  This regression was first released in 4.0.0.  (@jasone,
16924-    @ingvarha)
16925-  - Remove overly restrictive assertions related to the cactive statistic.  This
16926-    regression was first released in 4.1.0.  (@jasone)
16927-  - Implement a more reliable detection scheme for os_unfair_lock on macOS.
16928-    (@jszakmeister)
16929-
16930-* 4.3.1 (November 7, 2016)
16931-
16932-  Bug fixes:
16933-  - Fix a severe virtual memory leak.  This regression was first released in
16934-    4.3.0.  (@interwq, @jasone)
16935-  - Refactor atomic and prng APIs to restore support for 32-bit platforms that
16936-    use pre-C11 toolchains, e.g. FreeBSD's mips.  (@jasone)
16937-
16938-* 4.3.0 (November 4, 2016)
16939-
16940-  This is the first release that passes the test suite for multiple Windows
16941-  configurations, thanks in large part to @glandium setting up continuous
16942-  integration via AppVeyor (and Travis CI for Linux and OS X).
16943-
16944-  New features:
16945-  - Add "J" (JSON) support to malloc_stats_print().  (@jasone)
16946-  - Add Cray compiler support.  (@ronawho)
16947-
16948-  Optimizations:
16949-  - Add/use adaptive spinning for bootstrapping and radix tree node
16950-    initialization.  (@jasone)
16951-
16952-  Bug fixes:
16953-  - Fix large allocation to search starting in the optimal size class heap,
16954-    which can substantially reduce virtual memory churn and fragmentation.  This
16955-    regression was first released in 4.0.0.  (@mjp41, @jasone)
16956-  - Fix stats.arenas.<i>.nthreads accounting.  (@interwq)
16957-  - Fix and simplify decay-based purging.  (@jasone)
16958-  - Make DSS (sbrk(2)-related) operations lockless, which resolves potential
16959-    deadlocks during thread exit.  (@jasone)
16960-  - Fix over-sized allocation of radix tree leaf nodes.  (@mjp41, @ogaun,
16961-    @jasone)
16962-  - Fix over-sized allocation of arena_t (plus associated stats) data
16963-    structures.  (@jasone, @interwq)
16964-  - Fix EXTRA_CFLAGS to not affect configuration.  (@jasone)
16965-  - Fix a Valgrind integration bug.  (@ronawho)
16966-  - Disallow 0x5a junk filling when running in Valgrind.  (@jasone)
16967-  - Fix a file descriptor leak on Linux.  This regression was first released in
16968-    4.2.0.  (@vsarunas, @jasone)
16969-  - Fix static linking of jemalloc with glibc.  (@djwatson)
16970-  - Use syscall(2) rather than {open,read,close}(2) during boot on Linux.  This
16971-    works around other libraries' system call wrappers performing reentrant
16972-    allocation.  (@kspinka, @Whissi, @jasone)
16973-  - Fix OS X default zone replacement to work with OS X 10.12.  (@glandium,
16974-    @jasone)
16975-  - Fix cached memory management to avoid needless commit/decommit operations
16976-    during purging, which resolves permanent virtual memory map fragmentation
16977-    issues on Windows.  (@mjp41, @jasone)
16978-  - Fix TSD fetches to avoid (recursive) allocation.  This is relevant to
16979-    non-TLS and Windows configurations.  (@jasone)
16980-  - Fix malloc_conf overriding to work on Windows.  (@jasone)
16981-  - Forcibly disable lazy-lock on Windows (was forcibly *enabled*).  (@jasone)
16982-
16983-* 4.2.1 (June 8, 2016)
16984-
16985-  Bug fixes:
16986-  - Fix bootstrapping issues for configurations that require allocation during
16987-    tsd initialization (e.g. --disable-tls).  (@cferris1000, @jasone)
16988-  - Fix gettimeofday() version of nstime_update().  (@ronawho)
16989-  - Fix Valgrind regressions in calloc() and chunk_alloc_wrapper().  (@ronawho)
16990-  - Fix potential VM map fragmentation regression.  (@jasone)
16991-  - Fix opt_zero-triggered in-place huge reallocation zeroing.  (@jasone)
16992-  - Fix heap profiling context leaks in reallocation edge cases.  (@jasone)
16993-
16994-* 4.2.0 (May 12, 2016)
16995-
16996-  New features:
16997-  - Add the arena.<i>.reset mallctl, which makes it possible to discard all of
16998-    an arena's allocations in a single operation.  (@jasone)
16999-  - Add the stats.retained and stats.arenas.<i>.retained statistics.  (@jasone)
17000-  - Add the --with-version configure option.  (@jasone)
17001-  - Support --with-lg-page values larger than actual page size.  (@jasone)
17002-
17003-  Optimizations:
17004-  - Use pairing heaps rather than red-black trees for various hot data
17005-    structures.  (@djwatson, @jasone)
17006-  - Streamline fast paths of rtree operations.  (@jasone)
17007-  - Optimize the fast paths of calloc() and [m,d,sd]allocx().  (@jasone)
17008-  - Decommit unused virtual memory if the OS does not overcommit.  (@jasone)
17009-  - Specify MAP_NORESERVE on Linux if [heuristic] overcommit is active, in order
17010-    to avoid unfortunate interactions during fork(2).  (@jasone)
17011-
17012-  Bug fixes:
17013-  - Fix chunk accounting related to triggering gdump profiles.  (@jasone)
17014-  - Link against librt for clock_gettime(2) if glibc < 2.17.  (@jasone)
17015-  - Scale leak report summary according to sampling probability.  (@jasone)
17016-
17017-* 4.1.1 (May 3, 2016)
17018-
17019-  This bugfix release resolves a variety of mostly minor issues, though the
17020-  bitmap fix is critical for 64-bit Windows.
17021-
17022-  Bug fixes:
17023-  - Fix the linear scan version of bitmap_sfu() to shift by the proper amount
17024-    even when sizeof(long) is not the same as sizeof(void *), as on 64-bit
17025-    Windows.  (@jasone)
17026-  - Fix hashing functions to avoid unaligned memory accesses (and resulting
17027-    crashes).  This is relevant at least to some ARM-based platforms.
17028-    (@rkmisra)
17029-  - Fix fork()-related lock rank ordering reversals.  These reversals were
17030-    unlikely to cause deadlocks in practice except when heap profiling was
17031-    enabled and active.  (@jasone)
17032-  - Fix various chunk leaks in OOM code paths.  (@jasone)
17033-  - Fix malloc_stats_print() to print opt.narenas correctly.  (@jasone)
17034-  - Fix MSVC-specific build/test issues.  (@rustyx, @yuslepukhin)
17035-  - Fix a variety of test failures that were due to test fragility rather than
17036-    core bugs.  (@jasone)
17037-
17038-* 4.1.0 (February 28, 2016)
17039-
17040-  This release is primarily about optimizations, but it also incorporates a lot
17041-  of portability-motivated refactoring and enhancements.  Many people worked on
17042-  this release, to an extent that even with the omission here of minor changes
17043-  (see git revision history), and of the people who reported and diagnosed
17044-  issues, so much of the work was contributed that starting with this release,
17045-  changes are annotated with author credits to help reflect the collaborative
17046-  effort involved.
17047-
17048-  New features:
17049-  - Implement decay-based unused dirty page purging, a major optimization with
17050-    mallctl API impact.  This is an alternative to the existing ratio-based
17051-    unused dirty page purging, and is intended to eventually become the sole
17052-    purging mechanism.  New mallctls:
17053-    + opt.purge
17054-    + opt.decay_time
17055-    + arena.<i>.decay
17056-    + arena.<i>.decay_time
17057-    + arenas.decay_time
17058-    + stats.arenas.<i>.decay_time
17059-    (@jasone, @cevans87)
17060-  - Add --with-malloc-conf, which makes it possible to embed a default
17061-    options string during configuration.  This was motivated by the desire to
17062-    specify --with-malloc-conf=purge:decay , since the default must remain
17063-    purge:ratio until the 5.0.0 release.  (@jasone)
17064-  - Add MS Visual Studio 2015 support.  (@rustyx, @yuslepukhin)
17065-  - Make *allocx() size class overflow behavior defined.  The maximum
17066-    size class is now less than PTRDIFF_MAX to protect applications against
17067-    numerical overflow, and all allocation functions are guaranteed to indicate
17068-    errors rather than potentially crashing if the request size exceeds the
17069-    maximum size class.  (@jasone)
17070-  - jeprof:
17071-    + Add raw heap profile support.  (@jasone)
17072-    + Add --retain and --exclude for backtrace symbol filtering.  (@jasone)
17073-
17074-  Optimizations:
17075-  - Optimize the fast path to combine various bootstrapping and configuration
17076-    checks and execute more streamlined code in the common case.  (@interwq)
17077-  - Use linear scan for small bitmaps (used for small object tracking).  In
17078-    addition to speeding up bitmap operations on 64-bit systems, this reduces
17079-    allocator metadata overhead by approximately 0.2%.  (@djwatson)
17080-  - Separate arena_avail trees, which substantially speeds up run tree
17081-    operations.  (@djwatson)
17082-  - Use memoization (boot-time-computed table) for run quantization.  Separate
17083-    arena_avail trees reduced the importance of this optimization.  (@jasone)
17084-  - Attempt mmap-based in-place huge reallocation.  This can dramatically speed
17085-    up incremental huge reallocation.  (@jasone)
17086-
17087-  Incompatible changes:
17088-  - Make opt.narenas unsigned rather than size_t.  (@jasone)
17089-
17090-  Bug fixes:
17091-  - Fix stats.cactive accounting regression.  (@rustyx, @jasone)
17092-  - Handle unaligned keys in hash().  This caused problems for some ARM systems.
17093-    (@jasone, @cferris1000)
17094-  - Refactor arenas array.  In addition to fixing a fork-related deadlock, this
17095-    makes arena lookups faster and simpler.  (@jasone)
17096-  - Move retained memory allocation out of the default chunk allocation
17097-    function, to a location that gets executed even if the application installs
17098-    a custom chunk allocation function.  This resolves a virtual memory leak.
17099-    (@buchgr)
17100-  - Fix a potential tsd cleanup leak.  (@cferris1000, @jasone)
17101-  - Fix run quantization.  In practice this bug had no impact unless
17102-    applications requested memory with alignment exceeding one page.
17103-    (@jasone, @djwatson)
17104-  - Fix LinuxThreads-specific bootstrapping deadlock.  (Cosmin Paraschiv)
17105-  - jeprof:
17106-    + Don't discard curl options if timeout is not defined.  (@djwatson)
17107-    + Detect failed profile fetches.  (@djwatson)
17108-  - Fix stats.arenas.<i>.{dss,lg_dirty_mult,decay_time,pactive,pdirty} for
17109-    --disable-stats case.  (@jasone)
17110-
17111-* 4.0.4 (October 24, 2015)
17112-
17113-  This bugfix release fixes another xallocx() regression.  No other regressions
17114-  have come to light in over a month, so this is likely a good starting point
17115-  for people who prefer to wait for "dot one" releases with all the major issues
17116-  shaken out.
17117-
17118-  Bug fixes:
17119-  - Fix xallocx(..., MALLOCX_ZERO to zero the last full trailing page of large
17120-    allocations that have been randomly assigned an offset of 0 when
17121-    --enable-cache-oblivious configure option is enabled.
17122-
17123-* 4.0.3 (September 24, 2015)
17124-
17125-  This bugfix release continues the trend of xallocx() and heap profiling fixes.
17126-
17127-  Bug fixes:
17128-  - Fix xallocx(..., MALLOCX_ZERO) to zero all trailing bytes of large
17129-    allocations when --enable-cache-oblivious configure option is enabled.
17130-  - Fix xallocx(..., MALLOCX_ZERO) to zero trailing bytes of huge allocations
17131-    when resizing from/to a size class that is not a multiple of the chunk size.
17132-  - Fix prof_tctx_dump_iter() to filter out nodes that were created after heap
17133-    profile dumping started.
17134-  - Work around a potentially bad thread-specific data initialization
17135-    interaction with NPTL (glibc's pthreads implementation).
17136-
17137-* 4.0.2 (September 21, 2015)
17138-
17139-  This bugfix release addresses a few bugs specific to heap profiling.
17140-
17141-  Bug fixes:
17142-  - Fix ixallocx_prof_sample() to never modify nor create sampled small
17143-    allocations.  xallocx() is in general incapable of moving small allocations,
17144-    so this fix removes buggy code without loss of generality.
17145-  - Fix irallocx_prof_sample() to always allocate large regions, even when
17146-    alignment is non-zero.
17147-  - Fix prof_alloc_rollback() to read tdata from thread-specific data rather
17148-    than dereferencing a potentially invalid tctx.
17149-
17150-* 4.0.1 (September 15, 2015)
17151-
17152-  This is a bugfix release that is somewhat high risk due to the amount of
17153-  refactoring required to address deep xallocx() problems.  As a side effect of
17154-  these fixes, xallocx() now tries harder to partially fulfill requests for
17155-  optional extra space.  Note that a couple of minor heap profiling
17156-  optimizations are included, but these are better thought of as performance
17157-  fixes that were integral to discovering most of the other bugs.
17158-
17159-  Optimizations:
17160-  - Avoid a chunk metadata read in arena_prof_tctx_set(), since it is in the
17161-    fast path when heap profiling is enabled.  Additionally, split a special
17162-    case out into arena_prof_tctx_reset(), which also avoids chunk metadata
17163-    reads.
17164-  - Optimize irallocx_prof() to optimistically update the sampler state.  The
17165-    prior implementation appears to have been a holdover from when
17166-    rallocx()/xallocx() functionality was combined as rallocm().
17167-
17168-  Bug fixes:
17169-  - Fix TLS configuration such that it is enabled by default for platforms on
17170-    which it works correctly.
17171-  - Fix arenas_cache_cleanup() and arena_get_hard() to handle
17172-    allocation/deallocation within the application's thread-specific data
17173-    cleanup functions even after arenas_cache is torn down.
17174-  - Fix xallocx() bugs related to size+extra exceeding HUGE_MAXCLASS.
17175-  - Fix chunk purge hook calls for in-place huge shrinking reallocation to
17176-    specify the old chunk size rather than the new chunk size.  This bug caused
17177-    no correctness issues for the default chunk purge function, but was
17178-    visible to custom functions set via the "arena.<i>.chunk_hooks" mallctl.
17179-  - Fix heap profiling bugs:
17180-    + Fix heap profiling to distinguish among otherwise identical sample sites
17181-      with interposed resets (triggered via the "prof.reset" mallctl).  This bug
17182-      could cause data structure corruption that would most likely result in a
17183-      segfault.
17184-    + Fix irealloc_prof() to prof_alloc_rollback() on OOM.
17185-    + Make one call to prof_active_get_unlocked() per allocation event, and use
17186-      the result throughout the relevant functions that handle an allocation
17187-      event.  Also add a missing check in prof_realloc().  These fixes protect
17188-      allocation events against concurrent prof_active changes.
17189-    + Fix ixallocx_prof() to pass usize_max and zero to ixallocx_prof_sample()
17190-      in the correct order.
17191-    + Fix prof_realloc() to call prof_free_sampled_object() after calling
17192-      prof_malloc_sample_object().  Prior to this fix, if tctx and old_tctx were
17193-      the same, the tctx could have been prematurely destroyed.
17194-  - Fix portability bugs:
17195-    + Don't bitshift by negative amounts when encoding/decoding run sizes in
17196-      chunk header maps.  This affected systems with page sizes greater than 8
17197-      KiB.
17198-    + Rename index_t to szind_t to avoid an existing type on Solaris.
17199-    + Add JEMALLOC_CXX_THROW to the memalign() function prototype, in order to
17200-      match glibc and avoid compilation errors when including both
17201-      jemalloc/jemalloc.h and malloc.h in C++ code.
17202-    + Don't assume that /bin/sh is appropriate when running size_classes.sh
17203-      during configuration.
17204-    + Consider __sparcv9 a synonym for __sparc64__ when defining LG_QUANTUM.
17205-    + Link tests to librt if it contains clock_gettime(2).
17206-
17207-* 4.0.0 (August 17, 2015)
17208-
17209-  This version contains many speed and space optimizations, both minor and
17210-  major.  The major themes are generalization, unification, and simplification.
17211-  Although many of these optimizations cause no visible behavior change, their
17212-  cumulative effect is substantial.
17213-
17214-  New features:
17215-  - Normalize size class spacing to be consistent across the complete size
17216-    range.  By default there are four size classes per size doubling, but this
17217-    is now configurable via the --with-lg-size-class-group option.  Also add the
17218-    --with-lg-page, --with-lg-page-sizes, --with-lg-quantum, and
17219-    --with-lg-tiny-min options, which can be used to tweak page and size class
17220-    settings.  Impacts:
17221-    + Worst case performance for incrementally growing/shrinking reallocation
17222-      is improved because there are far fewer size classes, and therefore
17223-      copying happens less often.
17224-    + Internal fragmentation is limited to 20% for all but the smallest size
17225-      classes (those less than four times the quantum).  (1B + 4 KiB)
17226-      and (1B + 4 MiB) previously suffered nearly 50% internal fragmentation.
17227-    + Chunk fragmentation tends to be lower because there are fewer distinct run
17228-      sizes to pack.
17229-  - Add support for explicit tcaches.  The "tcache.create", "tcache.flush", and
17230-    "tcache.destroy" mallctls control tcache lifetime and flushing, and the
17231-    MALLOCX_TCACHE(tc) and MALLOCX_TCACHE_NONE flags to the *allocx() API
17232-    control which tcache is used for each operation.
17233-  - Implement per thread heap profiling, as well as the ability to
17234-    enable/disable heap profiling on a per thread basis.  Add the "prof.reset",
17235-    "prof.lg_sample", "thread.prof.name", "thread.prof.active",
17236-    "opt.prof_thread_active_init", "prof.thread_active_init", and
17237-    "thread.prof.active" mallctls.
17238-  - Add support for per arena application-specified chunk allocators, configured
17239-    via the "arena.<i>.chunk_hooks" mallctl.
17240-  - Refactor huge allocation to be managed by arenas, so that arenas now
17241-    function as general purpose independent allocators.  This is important in
17242-    the context of user-specified chunk allocators, aside from the scalability
17243-    benefits.  Related new statistics:
17244-    + The "stats.arenas.<i>.huge.allocated", "stats.arenas.<i>.huge.nmalloc",
17245-      "stats.arenas.<i>.huge.ndalloc", and "stats.arenas.<i>.huge.nrequests"
17246-      mallctls provide high level per arena huge allocation statistics.
17247-    + The "arenas.nhchunks", "arenas.hchunk.<i>.size",
17248-      "stats.arenas.<i>.hchunks.<j>.nmalloc",
17249-      "stats.arenas.<i>.hchunks.<j>.ndalloc",
17250-      "stats.arenas.<i>.hchunks.<j>.nrequests", and
17251-      "stats.arenas.<i>.hchunks.<j>.curhchunks" mallctls provide per size class
17252-      statistics.
17253-  - Add the 'util' column to malloc_stats_print() output, which reports the
17254-    proportion of available regions that are currently in use for each small
17255-    size class.
17256-  - Add "alloc" and "free" modes for for junk filling (see the "opt.junk"
17257-    mallctl), so that it is possible to separately enable junk filling for
17258-    allocation versus deallocation.
17259-  - Add the jemalloc-config script, which provides information about how
17260-    jemalloc was configured, and how to integrate it into application builds.
17261-  - Add metadata statistics, which are accessible via the "stats.metadata",
17262-    "stats.arenas.<i>.metadata.mapped", and
17263-    "stats.arenas.<i>.metadata.allocated" mallctls.
17264-  - Add the "stats.resident" mallctl, which reports the upper limit of
17265-    physically resident memory mapped by the allocator.
17266-  - Add per arena control over unused dirty page purging, via the
17267-    "arenas.lg_dirty_mult", "arena.<i>.lg_dirty_mult", and
17268-    "stats.arenas.<i>.lg_dirty_mult" mallctls.
17269-  - Add the "prof.gdump" mallctl, which makes it possible to toggle the gdump
17270-    feature on/off during program execution.
17271-  - Add sdallocx(), which implements sized deallocation.  The primary
17272-    optimization over dallocx() is the removal of a metadata read, which often
17273-    suffers an L1 cache miss.
17274-  - Add missing header includes in jemalloc/jemalloc.h, so that applications
17275-    only have to #include <jemalloc/jemalloc.h>.
17276-  - Add support for additional platforms:
17277-    + Bitrig
17278-    + Cygwin
17279-    + DragonFlyBSD
17280-    + iOS
17281-    + OpenBSD
17282-    + OpenRISC/or1k
17283-
17284-  Optimizations:
17285-  - Maintain dirty runs in per arena LRUs rather than in per arena trees of
17286-    dirty-run-containing chunks.  In practice this change significantly reduces
17287-    dirty page purging volume.
17288-  - Integrate whole chunks into the unused dirty page purging machinery.  This
17289-    reduces the cost of repeated huge allocation/deallocation, because it
17290-    effectively introduces a cache of chunks.
17291-  - Split the arena chunk map into two separate arrays, in order to increase
17292-    cache locality for the frequently accessed bits.
17293-  - Move small run metadata out of runs, into arena chunk headers.  This reduces
17294-    run fragmentation, smaller runs reduce external fragmentation for small size
17295-    classes, and packed (less uniformly aligned) metadata layout improves CPU
17296-    cache set distribution.
17297-  - Randomly distribute large allocation base pointer alignment relative to page
17298-    boundaries in order to more uniformly utilize CPU cache sets.  This can be
17299-    disabled via the --disable-cache-oblivious configure option, and queried via
17300-    the "config.cache_oblivious" mallctl.
17301-  - Micro-optimize the fast paths for the public API functions.
17302-  - Refactor thread-specific data to reside in a single structure.  This assures
17303-    that only a single TLS read is necessary per call into the public API.
17304-  - Implement in-place huge allocation growing and shrinking.
17305-  - Refactor rtree (radix tree for chunk lookups) to be lock-free, and make
17306-    additional optimizations that reduce maximum lookup depth to one or two
17307-    levels.  This resolves what was a concurrency bottleneck for per arena huge
17308-    allocation, because a global data structure is critical for determining
17309-    which arenas own which huge allocations.
17310-
17311-  Incompatible changes:
17312-  - Replace --enable-cc-silence with --disable-cc-silence to suppress spurious
17313-    warnings by default.
17314-  - Assure that the constness of malloc_usable_size()'s return type matches that
17315-    of the system implementation.
17316-  - Change the heap profile dump format to support per thread heap profiling,
17317-    rename pprof to jeprof, and enhance it with the --thread=<n> option.  As a
17318-    result, the bundled jeprof must now be used rather than the upstream
17319-    (gperftools) pprof.
17320-  - Disable "opt.prof_final" by default, in order to avoid atexit(3), which can
17321-    internally deadlock on some platforms.
17322-  - Change the "arenas.nlruns" mallctl type from size_t to unsigned.
17323-  - Replace the "stats.arenas.<i>.bins.<j>.allocated" mallctl with
17324-    "stats.arenas.<i>.bins.<j>.curregs".
17325-  - Ignore MALLOC_CONF in set{uid,gid,cap} binaries.
17326-  - Ignore MALLOCX_ARENA(a) in dallocx(), in favor of using the
17327-    MALLOCX_TCACHE(tc) and MALLOCX_TCACHE_NONE flags to control tcache usage.
17328-
17329-  Removed features:
17330-  - Remove the *allocm() API, which is superseded by the *allocx() API.
17331-  - Remove the --enable-dss options, and make dss non-optional on all platforms
17332-    which support sbrk(2).
17333-  - Remove the "arenas.purge" mallctl, which was obsoleted by the
17334-    "arena.<i>.purge" mallctl in 3.1.0.
17335-  - Remove the unnecessary "opt.valgrind" mallctl; jemalloc automatically
17336-    detects whether it is running inside Valgrind.
17337-  - Remove the "stats.huge.allocated", "stats.huge.nmalloc", and
17338-    "stats.huge.ndalloc" mallctls.
17339-  - Remove the --enable-mremap option.
17340-  - Remove the "stats.chunks.current", "stats.chunks.total", and
17341-    "stats.chunks.high" mallctls.
17342-
17343-  Bug fixes:
17344-  - Fix the cactive statistic to decrease (rather than increase) when active
17345-    memory decreases.  This regression was first released in 3.5.0.
17346-  - Fix OOM handling in memalign() and valloc().  A variant of this bug existed
17347-    in all releases since 2.0.0, which introduced these functions.
17348-  - Fix an OOM-related regression in arena_tcache_fill_small(), which could
17349-    cause cache corruption on OOM.  This regression was present in all releases
17350-    from 2.2.0 through 3.6.0.
17351-  - Fix size class overflow handling for malloc(), posix_memalign(), memalign(),
17352-    calloc(), and realloc() when profiling is enabled.
17353-  - Fix the "arena.<i>.dss" mallctl to return an error if "primary" or
17354-    "secondary" precedence is specified, but sbrk(2) is not supported.
17355-  - Fix fallback lg_floor() implementations to handle extremely large inputs.
17356-  - Ensure the default purgeable zone is after the default zone on OS X.
17357-  - Fix latent bugs in atomic_*().
17358-  - Fix the "arena.<i>.dss" mallctl to handle read-only calls.
17359-  - Fix tls_model configuration to enable the initial-exec model when possible.
17360-  - Mark malloc_conf as a weak symbol so that the application can override it.
17361-  - Correctly detect glibc's adaptive pthread mutexes.
17362-  - Fix the --without-export configure option.
17363-
17364-* 3.6.0 (March 31, 2014)
17365-
17366-  This version contains a critical bug fix for a regression present in 3.5.0 and
17367-  3.5.1.
17368-
17369-  Bug fixes:
17370-  - Fix a regression in arena_chunk_alloc() that caused crashes during
17371-    small/large allocation if chunk allocation failed.  In the absence of this
17372-    bug, chunk allocation failure would result in allocation failure, e.g.  NULL
17373-    return from malloc().  This regression was introduced in 3.5.0.
17374-  - Fix backtracing for gcc intrinsics-based backtracing by specifying
17375-    -fno-omit-frame-pointer to gcc.  Note that the application (and all the
17376-    libraries it links to) must also be compiled with this option for
17377-    backtracing to be reliable.
17378-  - Use dss allocation precedence for huge allocations as well as small/large
17379-    allocations.
17380-  - Fix test assertion failure message formatting.  This bug did not manifest on
17381-    x86_64 systems because of implementation subtleties in va_list.
17382-  - Fix inconsequential test failures for hash and SFMT code.
17383-
17384-  New features:
17385-  - Support heap profiling on FreeBSD.  This feature depends on the proc
17386-    filesystem being mounted during heap profile dumping.
17387-
17388-* 3.5.1 (February 25, 2014)
17389-
17390-  This version primarily addresses minor bugs in test code.
17391-
17392-  Bug fixes:
17393-  - Configure Solaris/Illumos to use MADV_FREE.
17394-  - Fix junk filling for mremap(2)-based huge reallocation.  This is only
17395-    relevant if configuring with the --enable-mremap option specified.
17396-  - Avoid compilation failure if 'restrict' C99 keyword is not supported by the
17397-    compiler.
17398-  - Add a configure test for SSE2 rather than assuming it is usable on i686
17399-    systems.  This fixes test compilation errors, especially on 32-bit Linux
17400-    systems.
17401-  - Fix mallctl argument size mismatches (size_t vs. uint64_t) in the stats unit
17402-    test.
17403-  - Fix/remove flawed alignment-related overflow tests.
17404-  - Prevent compiler optimizations that could change backtraces in the
17405-    prof_accum unit test.
17406-
17407-* 3.5.0 (January 22, 2014)
17408-
17409-  This version focuses on refactoring and automated testing, though it also
17410-  includes some non-trivial heap profiling optimizations not mentioned below.
17411-
17412-  New features:
17413-  - Add the *allocx() API, which is a successor to the experimental *allocm()
17414-    API.  The *allocx() functions are slightly simpler to use because they have
17415-    fewer parameters, they directly return the results of primary interest, and
17416-    mallocx()/rallocx() avoid the strict aliasing pitfall that
17417-    allocm()/rallocm() share with posix_memalign().  Note that *allocm() is
17418-    slated for removal in the next non-bugfix release.
17419-  - Add support for LinuxThreads.
17420-
17421-  Bug fixes:
17422-  - Unless heap profiling is enabled, disable floating point code and don't link
17423-    with libm.  This, in combination with e.g. EXTRA_CFLAGS=-mno-sse on x64
17424-    systems, makes it possible to completely disable floating point register
17425-    use.  Some versions of glibc neglect to save/restore caller-saved floating
17426-    point registers during dynamic lazy symbol loading, and the symbol loading
17427-    code uses whatever malloc the application happens to have linked/loaded
17428-    with, the result being potential floating point register corruption.
17429-  - Report ENOMEM rather than EINVAL if an OOM occurs during heap profiling
17430-    backtrace creation in imemalign().  This bug impacted posix_memalign() and
17431-    aligned_alloc().
17432-  - Fix a file descriptor leak in a prof_dump_maps() error path.
17433-  - Fix prof_dump() to close the dump file descriptor for all relevant error
17434-    paths.
17435-  - Fix rallocm() to use the arena specified by the ALLOCM_ARENA(s) flag for
17436-    allocation, not just deallocation.
17437-  - Fix a data race for large allocation stats counters.
17438-  - Fix a potential infinite loop during thread exit.  This bug occurred on
17439-    Solaris, and could affect other platforms with similar pthreads TSD
17440-    implementations.
17441-  - Don't junk-fill reallocations unless usable size changes.  This fixes a
17442-    violation of the *allocx()/*allocm() semantics.
17443-  - Fix growing large reallocation to junk fill new space.
17444-  - Fix huge deallocation to junk fill when munmap is disabled.
17445-  - Change the default private namespace prefix from empty to je_, and change
17446-    --with-private-namespace-prefix so that it prepends an additional prefix
17447-    rather than replacing je_.  This reduces the likelihood of applications
17448-    which statically link jemalloc experiencing symbol name collisions.
17449-  - Add missing private namespace mangling (relevant when
17450-    --with-private-namespace is specified).
17451-  - Add and use JEMALLOC_INLINE_C so that static inline functions are marked as
17452-    static even for debug builds.
17453-  - Add a missing mutex unlock in a malloc_init_hard() error path.  In practice
17454-    this error path is never executed.
17455-  - Fix numerous bugs in malloc_strotumax() error handling/reporting.  These
17456-    bugs had no impact except for malformed inputs.
17457-  - Fix numerous bugs in malloc_snprintf().  These bugs were not exercised by
17458-    existing calls, so they had no impact.
17459-
17460-* 3.4.1 (October 20, 2013)
17461-
17462-  Bug fixes:
17463-  - Fix a race in the "arenas.extend" mallctl that could cause memory corruption
17464-    of internal data structures and subsequent crashes.
17465-  - Fix Valgrind integration flaws that caused Valgrind warnings about reads of
17466-    uninitialized memory in:
17467-    + arena chunk headers
17468-    + internal zero-initialized data structures (relevant to tcache and prof
17469-      code)
17470-  - Preserve errno during the first allocation.  A readlink(2) call during
17471-    initialization fails unless /etc/malloc.conf exists, so errno was typically
17472-    set during the first allocation prior to this fix.
17473-  - Fix compilation warnings reported by gcc 4.8.1.
17474-
17475-* 3.4.0 (June 2, 2013)
17476-
17477-  This version is essentially a small bugfix release, but the addition of
17478-  aarch64 support requires that the minor version be incremented.
17479-
17480-  Bug fixes:
17481-  - Fix race-triggered deadlocks in chunk_record().  These deadlocks were
17482-    typically triggered by multiple threads concurrently deallocating huge
17483-    objects.
17484-
17485-  New features:
17486-  - Add support for the aarch64 architecture.
17487-
17488-* 3.3.1 (March 6, 2013)
17489-
17490-  This version fixes bugs that are typically encountered only when utilizing
17491-  custom run-time options.
17492-
17493-  Bug fixes:
17494-  - Fix a locking order bug that could cause deadlock during fork if heap
17495-    profiling were enabled.
17496-  - Fix a chunk recycling bug that could cause the allocator to lose track of
17497-    whether a chunk was zeroed.  On FreeBSD, NetBSD, and OS X, it could cause
17498-    corruption if allocating via sbrk(2) (unlikely unless running with the
17499-    "dss:primary" option specified).  This was completely harmless on Linux
17500-    unless using mlockall(2) (and unlikely even then, unless the
17501-    --disable-munmap configure option or the "dss:primary" option was
17502-    specified).  This regression was introduced in 3.1.0 by the
17503-    mlockall(2)/madvise(2) interaction fix.
17504-  - Fix TLS-related memory corruption that could occur during thread exit if the
17505-    thread never allocated memory.  Only the quarantine and prof facilities were
17506-    susceptible.
17507-  - Fix two quarantine bugs:
17508-    + Internal reallocation of the quarantined object array leaked the old
17509-      array.
17510-    + Reallocation failure for internal reallocation of the quarantined object
17511-      array (very unlikely) resulted in memory corruption.
17512-  - Fix Valgrind integration to annotate all internally allocated memory in a
17513-    way that keeps Valgrind happy about internal data structure access.
17514-  - Fix building for s390 systems.
17515-
17516-* 3.3.0 (January 23, 2013)
17517-
17518-  This version includes a few minor performance improvements in addition to the
17519-  listed new features and bug fixes.
17520-
17521-  New features:
17522-  - Add clipping support to lg_chunk option processing.
17523-  - Add the --enable-ivsalloc option.
17524-  - Add the --without-export option.
17525-  - Add the --disable-zone-allocator option.
17526-
17527-  Bug fixes:
17528-  - Fix "arenas.extend" mallctl to output the number of arenas.
17529-  - Fix chunk_recycle() to unconditionally inform Valgrind that returned memory
17530-    is undefined.
17531-  - Fix build break on FreeBSD related to alloca.h.
17532-
17533-* 3.2.0 (November 9, 2012)
17534-
17535-  In addition to a couple of bug fixes, this version modifies page run
17536-  allocation and dirty page purging algorithms in order to better control
17537-  page-level virtual memory fragmentation.
17538-
17539-  Incompatible changes:
17540-  - Change the "opt.lg_dirty_mult" default from 5 to 3 (32:1 to 8:1).
17541-
17542-  Bug fixes:
17543-  - Fix dss/mmap allocation precedence code to use recyclable mmap memory only
17544-    after primary dss allocation fails.
17545-  - Fix deadlock in the "arenas.purge" mallctl.  This regression was introduced
17546-    in 3.1.0 by the addition of the "arena.<i>.purge" mallctl.
17547-
17548-* 3.1.0 (October 16, 2012)
17549-
17550-  New features:
17551-  - Auto-detect whether running inside Valgrind, thus removing the need to
17552-    manually specify MALLOC_CONF=valgrind:true.
17553-  - Add the "arenas.extend" mallctl, which allows applications to create
17554-    manually managed arenas.
17555-  - Add the ALLOCM_ARENA() flag for {,r,d}allocm().
17556-  - Add the "opt.dss", "arena.<i>.dss", and "stats.arenas.<i>.dss" mallctls,
17557-    which provide control over dss/mmap precedence.
17558-  - Add the "arena.<i>.purge" mallctl, which obsoletes "arenas.purge".
17559-  - Define LG_QUANTUM for hppa.
17560-
17561-  Incompatible changes:
17562-  - Disable tcache by default if running inside Valgrind, in order to avoid
17563-    making unallocated objects appear reachable to Valgrind.
17564-  - Drop const from malloc_usable_size() argument on Linux.
17565-
17566-  Bug fixes:
17567-  - Fix heap profiling crash if sampled object is freed via realloc(p, 0).
17568-  - Remove const from __*_hook variable declarations, so that glibc can modify
17569-    them during process forking.
17570-  - Fix mlockall(2)/madvise(2) interaction.
17571-  - Fix fork(2)-related deadlocks.
17572-  - Fix error return value for "thread.tcache.enabled" mallctl.
17573-
17574-* 3.0.0 (May 11, 2012)
17575-
17576-  Although this version adds some major new features, the primary focus is on
17577-  internal code cleanup that facilitates maintainability and portability, most
17578-  of which is not reflected in the ChangeLog.  This is the first release to
17579-  incorporate substantial contributions from numerous other developers, and the
17580-  result is a more broadly useful allocator (see the git revision history for
17581-  contribution details).  Note that the license has been unified, thanks to
17582-  Facebook granting a license under the same terms as the other copyright
17583-  holders (see COPYING).
17584-
17585-  New features:
17586-  - Implement Valgrind support, redzones, and quarantine.
17587-  - Add support for additional platforms:
17588-    + FreeBSD
17589-    + Mac OS X Lion
17590-    + MinGW
17591-    + Windows (no support yet for replacing the system malloc)
17592-  - Add support for additional architectures:
17593-    + MIPS
17594-    + SH4
17595-    + Tilera
17596-  - Add support for cross compiling.
17597-  - Add nallocm(), which rounds a request size up to the nearest size class
17598-    without actually allocating.
17599-  - Implement aligned_alloc() (blame C11).
17600-  - Add the "thread.tcache.enabled" mallctl.
17601-  - Add the "opt.prof_final" mallctl.
17602-  - Update pprof (from gperftools 2.0).
17603-  - Add the --with-mangling option.
17604-  - Add the --disable-experimental option.
17605-  - Add the --disable-munmap option, and make it the default on Linux.
17606-  - Add the --enable-mremap option, which disables use of mremap(2) by default.
17607-
17608-  Incompatible changes:
17609-  - Enable stats by default.
17610-  - Enable fill by default.
17611-  - Disable lazy locking by default.
17612-  - Rename the "tcache.flush" mallctl to "thread.tcache.flush".
17613-  - Rename the "arenas.pagesize" mallctl to "arenas.page".
17614-  - Change the "opt.lg_prof_sample" default from 0 to 19 (1 B to 512 KiB).
17615-  - Change the "opt.prof_accum" default from true to false.
17616-
17617-  Removed features:
17618-  - Remove the swap feature, including the "config.swap", "swap.avail",
17619-    "swap.prezeroed", "swap.nfds", and "swap.fds" mallctls.
17620-  - Remove highruns statistics, including the
17621-    "stats.arenas.<i>.bins.<j>.highruns" and
17622-    "stats.arenas.<i>.lruns.<j>.highruns" mallctls.
17623-  - As part of small size class refactoring, remove the "opt.lg_[qc]space_max",
17624-    "arenas.cacheline", "arenas.subpage", "arenas.[tqcs]space_{min,max}", and
17625-    "arenas.[tqcs]bins" mallctls.
17626-  - Remove the "arenas.chunksize" mallctl.
17627-  - Remove the "opt.lg_prof_tcmax" option.
17628-  - Remove the "opt.lg_prof_bt_max" option.
17629-  - Remove the "opt.lg_tcache_gc_sweep" option.
17630-  - Remove the --disable-tiny option, including the "config.tiny" mallctl.
17631-  - Remove the --enable-dynamic-page-shift configure option.
17632-  - Remove the --enable-sysv configure option.
17633-
17634-  Bug fixes:
17635-  - Fix a statistics-related bug in the "thread.arena" mallctl that could cause
17636-    invalid statistics and crashes.
17637-  - Work around TLS deallocation via free() on Linux.  This bug could cause
17638-    write-after-free memory corruption.
17639-  - Fix a potential deadlock that could occur during interval- and
17640-    growth-triggered heap profile dumps.
17641-  - Fix large calloc() zeroing bugs due to dropping chunk map unzeroed flags.
17642-  - Fix chunk_alloc_dss() to stop claiming memory is zeroed.  This bug could
17643-    cause memory corruption and crashes with --enable-dss specified.
17644-  - Fix fork-related bugs that could cause deadlock in children between fork
17645-    and exec.
17646-  - Fix malloc_stats_print() to honor 'b' and 'l' in the opts parameter.
17647-  - Fix realloc(p, 0) to act like free(p).
17648-  - Do not enforce minimum alignment in memalign().
17649-  - Check for NULL pointer in malloc_usable_size().
17650-  - Fix an off-by-one heap profile statistics bug that could be observed in
17651-    interval- and growth-triggered heap profiles.
17652-  - Fix the "epoch" mallctl to update cached stats even if the passed in epoch
17653-    is 0.
17654-  - Fix bin->runcur management to fix a layout policy bug.  This bug did not
17655-    affect correctness.
17656-  - Fix a bug in choose_arena_hard() that potentially caused more arenas to be
17657-    initialized than necessary.
17658-  - Add missing "opt.lg_tcache_max" mallctl implementation.
17659-  - Use glibc allocator hooks to make mixed allocator usage less likely.
17660-  - Fix build issues for --disable-tcache.
17661-  - Don't mangle pthread_create() when --with-private-namespace is specified.
17662-
17663-* 2.2.5 (November 14, 2011)
17664-
17665-  Bug fixes:
17666-  - Fix huge_ralloc() race when using mremap(2).  This is a serious bug that
17667-    could cause memory corruption and/or crashes.
17668-  - Fix huge_ralloc() to maintain chunk statistics.
17669-  - Fix malloc_stats_print(..., "a") output.
17670-
17671-* 2.2.4 (November 5, 2011)
17672-
17673-  Bug fixes:
17674-  - Initialize arenas_tsd before using it.  This bug existed for 2.2.[0-3], as
17675-    well as for --disable-tls builds in earlier releases.
17676-  - Do not assume a 4 KiB page size in test/rallocm.c.
17677-
17678-* 2.2.3 (August 31, 2011)
17679-
17680-  This version fixes numerous bugs related to heap profiling.
17681-
17682-  Bug fixes:
17683-  - Fix a prof-related race condition.  This bug could cause memory corruption,
17684-    but only occurred in non-default configurations (prof_accum:false).
17685-  - Fix off-by-one backtracing issues (make sure that prof_alloc_prep() is
17686-    excluded from backtraces).
17687-  - Fix a prof-related bug in realloc() (only triggered by OOM errors).
17688-  - Fix prof-related bugs in allocm() and rallocm().
17689-  - Fix prof_tdata_cleanup() for --disable-tls builds.
17690-  - Fix a relative include path, to fix objdir builds.
17691-
17692-* 2.2.2 (July 30, 2011)
17693-
17694-  Bug fixes:
17695-  - Fix a build error for --disable-tcache.
17696-  - Fix assertions in arena_purge() (for real this time).
17697-  - Add the --with-private-namespace option.  This is a workaround for symbol
17698-    conflicts that can inadvertently arise when using static libraries.
17699-
17700-* 2.2.1 (March 30, 2011)
17701-
17702-  Bug fixes:
17703-  - Implement atomic operations for x86/x64.  This fixes compilation failures
17704-    for versions of gcc that are still in wide use.
17705-  - Fix an assertion in arena_purge().
17706-
17707-* 2.2.0 (March 22, 2011)
17708-
17709-  This version incorporates several improvements to algorithms and data
17710-  structures that tend to reduce fragmentation and increase speed.
17711-
17712-  New features:
17713-  - Add the "stats.cactive" mallctl.
17714-  - Update pprof (from google-perftools 1.7).
17715-  - Improve backtracing-related configuration logic, and add the
17716-    --disable-prof-libgcc option.
17717-
17718-  Bug fixes:
17719-  - Change default symbol visibility from "internal", to "hidden", which
17720-    decreases the overhead of library-internal function calls.
17721-  - Fix symbol visibility so that it is also set on OS X.
17722-  - Fix a build dependency regression caused by the introduction of the .pic.o
17723-    suffix for PIC object files.
17724-  - Add missing checks for mutex initialization failures.
17725-  - Don't use libgcc-based backtracing except on x64, where it is known to work.
17726-  - Fix deadlocks on OS X that were due to memory allocation in
17727-    pthread_mutex_lock().
17728-  - Heap profiling-specific fixes:
17729-    + Fix memory corruption due to integer overflow in small region index
17730-      computation, when using a small enough sample interval that profiling
17731-      context pointers are stored in small run headers.
17732-    + Fix a bootstrap ordering bug that only occurred with TLS disabled.
17733-    + Fix a rallocm() rsize bug.
17734-    + Fix error detection bugs for aligned memory allocation.
17735-
17736-* 2.1.3 (March 14, 2011)
17737-
17738-  Bug fixes:
17739-  - Fix a cpp logic regression (due to the "thread.{de,}allocatedp" mallctl fix
17740-    for OS X in 2.1.2).
17741-  - Fix a "thread.arena" mallctl bug.
17742-  - Fix a thread cache stats merging bug.
17743-
17744-* 2.1.2 (March 2, 2011)
17745-
17746-  Bug fixes:
17747-  - Fix "thread.{de,}allocatedp" mallctl for OS X.
17748-  - Add missing jemalloc.a to build system.
17749-
17750-* 2.1.1 (January 31, 2011)
17751-
17752-  Bug fixes:
17753-  - Fix aligned huge reallocation (affected allocm()).
17754-  - Fix the ALLOCM_LG_ALIGN macro definition.
17755-  - Fix a heap dumping deadlock.
17756-  - Fix a "thread.arena" mallctl bug.
17757-
17758-* 2.1.0 (December 3, 2010)
17759-
17760-  This version incorporates some optimizations that can't quite be considered
17761-  bug fixes.
17762-
17763-  New features:
17764-  - Use Linux's mremap(2) for huge object reallocation when possible.
17765-  - Avoid locking in mallctl*() when possible.
17766-  - Add the "thread.[de]allocatedp" mallctl's.
17767-  - Convert the manual page source from roff to DocBook, and generate both roff
17768-    and HTML manuals.
17769-
17770-  Bug fixes:
17771-  - Fix a crash due to incorrect bootstrap ordering.  This only impacted
17772-    --enable-debug --enable-dss configurations.
17773-  - Fix a minor statistics bug for mallctl("swap.avail", ...).
17774-
17775-* 2.0.1 (October 29, 2010)
17776-
17777-  Bug fixes:
17778-  - Fix a race condition in heap profiling that could cause undefined behavior
17779-    if "opt.prof_accum" were disabled.
17780-  - Add missing mutex unlocks for some OOM error paths in the heap profiling
17781-    code.
17782-  - Fix a compilation error for non-C99 builds.
17783-
17784-* 2.0.0 (October 24, 2010)
17785-
17786-  This version focuses on the experimental *allocm() API, and on improved
17787-  run-time configuration/introspection.  Nonetheless, numerous performance
17788-  improvements are also included.
17789-
17790-  New features:
17791-  - Implement the experimental {,r,s,d}allocm() API, which provides a superset
17792-    of the functionality available via malloc(), calloc(), posix_memalign(),
17793-    realloc(), malloc_usable_size(), and free().  These functions can be used to
17794-    allocate/reallocate aligned zeroed memory, ask for optional extra memory
17795-    during reallocation, prevent object movement during reallocation, etc.
17796-  - Replace JEMALLOC_OPTIONS/JEMALLOC_PROF_PREFIX with MALLOC_CONF, which is
17797-    more human-readable, and more flexible.  For example:
17798-      JEMALLOC_OPTIONS=AJP
17799-    is now:
17800-      MALLOC_CONF=abort:true,fill:true,stats_print:true
17801-  - Port to Apple OS X.  Sponsored by Mozilla.
17802-  - Make it possible for the application to control thread-->arena mappings via
17803-    the "thread.arena" mallctl.
17804-  - Add compile-time support for all TLS-related functionality via pthreads TSD.
17805-    This is mainly of interest for OS X, which does not support TLS, but has a
17806-    TSD implementation with similar performance.
17807-  - Override memalign() and valloc() if they are provided by the system.
17808-  - Add the "arenas.purge" mallctl, which can be used to synchronously purge all
17809-    dirty unused pages.
17810-  - Make cumulative heap profiling data optional, so that it is possible to
17811-    limit the amount of memory consumed by heap profiling data structures.
17812-  - Add per thread allocation counters that can be accessed via the
17813-    "thread.allocated" and "thread.deallocated" mallctls.
17814-
17815-  Incompatible changes:
17816-  - Remove JEMALLOC_OPTIONS and malloc_options (see MALLOC_CONF above).
17817-  - Increase default backtrace depth from 4 to 128 for heap profiling.
17818-  - Disable interval-based profile dumps by default.
17819-
17820-  Bug fixes:
17821-  - Remove bad assertions in fork handler functions.  These assertions could
17822-    cause aborts for some combinations of configure settings.
17823-  - Fix strerror_r() usage to deal with non-standard semantics in GNU libc.
17824-  - Fix leak context reporting.  This bug tended to cause the number of contexts
17825-    to be underreported (though the reported number of objects and bytes were
17826-    correct).
17827-  - Fix a realloc() bug for large in-place growing reallocation.  This bug could
17828-    cause memory corruption, but it was hard to trigger.
17829-  - Fix an allocation bug for small allocations that could be triggered if
17830-    multiple threads raced to create a new run of backing pages.
17831-  - Enhance the heap profiler to trigger samples based on usable size, rather
17832-    than request size.
17833-  - Fix a heap profiling bug due to sometimes losing track of requested object
17834-    size for sampled objects.
17835-
17836-* 1.0.3 (August 12, 2010)
17837-
17838-  Bug fixes:
17839-  - Fix the libunwind-based implementation of stack backtracing (used for heap
17840-    profiling).  This bug could cause zero-length backtraces to be reported.
17841-  - Add a missing mutex unlock in library initialization code.  If multiple
17842-    threads raced to initialize malloc, some of them could end up permanently
17843-    blocked.
17844-
17845-* 1.0.2 (May 11, 2010)
17846-
17847-  Bug fixes:
17848-  - Fix junk filling of large objects, which could cause memory corruption.
17849-  - Add MAP_NORESERVE support for chunk mapping, because otherwise virtual
17850-    memory limits could cause swap file configuration to fail.  Contributed by
17851-    Jordan DeLong.
17852-
17853-* 1.0.1 (April 14, 2010)
17854-
17855-  Bug fixes:
17856-  - Fix compilation when --enable-fill is specified.
17857-  - Fix threads-related profiling bugs that affected accuracy and caused memory
17858-    to be leaked during thread exit.
17859-  - Fix dirty page purging race conditions that could cause crashes.
17860-  - Fix crash in tcache flushing code during thread destruction.
17861-
17862-* 1.0.0 (April 11, 2010)
17863-
17864-  This release focuses on speed and run-time introspection.  Numerous
17865-  algorithmic improvements make this release substantially faster than its
17866-  predecessors.
17867-
17868-  New features:
17869-  - Implement autoconf-based configuration system.
17870-  - Add mallctl*(), for the purposes of introspection and run-time
17871-    configuration.
17872-  - Make it possible for the application to manually flush a thread's cache, via
17873-    the "tcache.flush" mallctl.
17874-  - Base maximum dirty page count on proportion of active memory.
17875-  - Compute various additional run-time statistics, including per size class
17876-    statistics for large objects.
17877-  - Expose malloc_stats_print(), which can be called repeatedly by the
17878-    application.
17879-  - Simplify the malloc_message() signature to only take one string argument,
17880-    and incorporate an opaque data pointer argument for use by the application
17881-    in combination with malloc_stats_print().
17882-  - Add support for allocation backed by one or more swap files, and allow the
17883-    application to disable over-commit if swap files are in use.
17884-  - Implement allocation profiling and leak checking.
17885-
17886-  Removed features:
17887-  - Remove the dynamic arena rebalancing code, since thread-specific caching
17888-    reduces its utility.
17889-
17890-  Bug fixes:
17891-  - Modify chunk allocation to work when address space layout randomization
17892-    (ASLR) is in use.
17893-  - Fix thread cleanup bugs related to TLS destruction.
17894-  - Handle 0-size allocation requests in posix_memalign().
17895-  - Fix a chunk leak.  The leaked chunks were never touched, so this impacted
17896-    virtual memory usage, but not physical memory usage.
17897-
17898-* linux_2008082[78]a (August 27/28, 2008)
17899-
17900-  These snapshot releases are the simple result of incorporating Linux-specific
17901-  support into the FreeBSD malloc sources.
17902-
17903---------------------------------------------------------------------------------
17904-vim:filetype=text:textwidth=80
17905diff --git a/jemalloc/INSTALL.md b/jemalloc/INSTALL.md
17906deleted file mode 100644
17907index 90da718..0000000
17908--- a/jemalloc/INSTALL.md
17909+++ /dev/null
17910@@ -1,424 +0,0 @@
17911-Building and installing a packaged release of jemalloc can be as simple as
17912-typing the following while in the root directory of the source tree:
17913-
17914-    ./configure
17915-    make
17916-    make install
17917-
17918-If building from unpackaged developer sources, the simplest command sequence
17919-that might work is:
17920-
17921-    ./autogen.sh
17922-    make
17923-    make install
17924-
17925-You can uninstall the installed build artifacts like this:
17926-
17927-    make uninstall
17928-
17929-Notes:
17930- - "autoconf" needs to be installed
17931- - Documentation is built by the default target only when xsltproc is
17932-available.  Build will warn but not stop if the dependency is missing.
17933-
17934-
17935-## Advanced configuration
17936-
17937-The 'configure' script supports numerous options that allow control of which
17938-functionality is enabled, where jemalloc is installed, etc.  Optionally, pass
17939-any of the following arguments (not a definitive list) to 'configure':
17940-
17941-* `--help`
17942-
17943-    Print a definitive list of options.
17944-
17945-* `--prefix=<install-root-dir>`
17946-
17947-    Set the base directory in which to install.  For example:
17948-
17949-        ./configure --prefix=/usr/local
17950-
17951-    will cause files to be installed into /usr/local/include, /usr/local/lib,
17952-    and /usr/local/man.
17953-
17954-* `--with-version=(<major>.<minor>.<bugfix>-<nrev>-g<gid>|VERSION)`
17955-
17956-    The VERSION file is mandatory for successful configuration, and the
17957-    following steps are taken to assure its presence:
17958-    1) If --with-version=<major>.<minor>.<bugfix>-<nrev>-g<gid> is specified,
17959-       generate VERSION using the specified value.
17960-    2) If --with-version is not specified in either form and the source
17961-       directory is inside a git repository, try to generate VERSION via 'git
17962-       describe' invocations that pattern-match release tags.
17963-    3) If VERSION is missing, generate it with a bogus version:
17964-       0.0.0-0-g0000000000000000000000000000000000000000
17965-
17966-    Note that --with-version=VERSION bypasses (1) and (2), which simplifies
17967-    VERSION configuration when embedding a jemalloc release into another
17968-    project's git repository.
17969-
17970-* `--with-rpath=<colon-separated-rpath>`
17971-
17972-    Embed one or more library paths, so that libjemalloc can find the libraries
17973-    it is linked to.  This works only on ELF-based systems.
17974-
17975-* `--with-mangling=<map>`
17976-
17977-    Mangle public symbols specified in <map> which is a comma-separated list of
17978-    name:mangled pairs.
17979-
17980-    For example, to use ld's --wrap option as an alternative method for
17981-    overriding libc's malloc implementation, specify something like:
17982-
17983-      --with-mangling=malloc:__wrap_malloc,free:__wrap_free[...]
17984-
17985-    Note that mangling happens prior to application of the prefix specified by
17986-    --with-jemalloc-prefix, and mangled symbols are then ignored when applying
17987-    the prefix.
17988-
17989-* `--with-jemalloc-prefix=<prefix>`
17990-
17991-    Prefix all public APIs with <prefix>.  For example, if <prefix> is
17992-    "prefix_", API changes like the following occur:
17993-
17994-      malloc()         --> prefix_malloc()
17995-      malloc_conf      --> prefix_malloc_conf
17996-      /etc/malloc.conf --> /etc/prefix_malloc.conf
17997-      MALLOC_CONF      --> PREFIX_MALLOC_CONF
17998-
17999-    This makes it possible to use jemalloc at the same time as the system
18000-    allocator, or even to use multiple copies of jemalloc simultaneously.
18001-
18002-    By default, the prefix is "", except on OS X, where it is "je_".  On OS X,
18003-    jemalloc overlays the default malloc zone, but makes no attempt to actually
18004-    replace the "malloc", "calloc", etc. symbols.
18005-
18006-* `--without-export`
18007-
18008-    Don't export public APIs.  This can be useful when building jemalloc as a
18009-    static library, or to avoid exporting public APIs when using the zone
18010-    allocator on OSX.
18011-
18012-* `--with-private-namespace=<prefix>`
18013-
18014-    Prefix all library-private APIs with <prefix>je_.  For shared libraries,
18015-    symbol visibility mechanisms prevent these symbols from being exported, but
18016-    for static libraries, naming collisions are a real possibility.  By
18017-    default, <prefix> is empty, which results in a symbol prefix of je_ .
18018-
18019-* `--with-install-suffix=<suffix>`
18020-
18021-    Append <suffix> to the base name of all installed files, such that multiple
18022-    versions of jemalloc can coexist in the same installation directory.  For
18023-    example, libjemalloc.so.0 becomes libjemalloc<suffix>.so.0.
18024-
18025-* `--with-malloc-conf=<malloc_conf>`
18026-
18027-    Embed `<malloc_conf>` as a run-time options string that is processed prior to
18028-    the malloc_conf global variable, the /etc/malloc.conf symlink, and the
18029-    MALLOC_CONF environment variable.  For example, to change the default decay
18030-    time to 30 seconds:
18031-
18032-      --with-malloc-conf=decay_ms:30000
18033-
18034-* `--enable-debug`
18035-
18036-    Enable assertions and validation code.  This incurs a substantial
18037-    performance hit, but is very useful during application development.
18038-
18039-* `--disable-stats`
18040-
18041-    Disable statistics gathering functionality.  See the "opt.stats_print"
18042-    option documentation for usage details.
18043-
18044-* `--enable-prof`
18045-
18046-    Enable heap profiling and leak detection functionality.  See the "opt.prof"
18047-    option documentation for usage details.  When enabled, there are several
18048-    approaches to backtracing, and the configure script chooses the first one
18049-    in the following list that appears to function correctly:
18050-
18051-    + libunwind      (requires --enable-prof-libunwind)
18052-    + libgcc         (unless --disable-prof-libgcc)
18053-    + gcc intrinsics (unless --disable-prof-gcc)
18054-
18055-* `--enable-prof-libunwind`
18056-
18057-    Use the libunwind library (http://www.nongnu.org/libunwind/) for stack
18058-    backtracing.
18059-
18060-* `--disable-prof-libgcc`
18061-
18062-    Disable the use of libgcc's backtracing functionality.
18063-
18064-* `--disable-prof-gcc`
18065-
18066-    Disable the use of gcc intrinsics for backtracing.
18067-
18068-* `--with-static-libunwind=<libunwind.a>`
18069-
18070-    Statically link against the specified libunwind.a rather than dynamically
18071-    linking with -lunwind.
18072-
18073-* `--disable-fill`
18074-
18075-    Disable support for junk/zero filling of memory.  See the "opt.junk" and
18076-    "opt.zero" option documentation for usage details.
18077-
18078-* `--disable-zone-allocator`
18079-
18080-    Disable zone allocator for Darwin.  This means jemalloc won't be hooked as
18081-    the default allocator on OSX/iOS.
18082-
18083-* `--enable-utrace`
18084-
18085-    Enable utrace(2)-based allocation tracing.  This feature is not broadly
18086-    portable (FreeBSD has it, but Linux and OS X do not).
18087-
18088-* `--enable-xmalloc`
18089-
18090-    Enable support for optional immediate termination due to out-of-memory
18091-    errors, as is commonly implemented by "xmalloc" wrapper function for malloc.
18092-    See the "opt.xmalloc" option documentation for usage details.
18093-
18094-* `--enable-lazy-lock`
18095-
18096-    Enable code that wraps pthread_create() to detect when an application
18097-    switches from single-threaded to multi-threaded mode, so that it can avoid
18098-    mutex locking/unlocking operations while in single-threaded mode.  In
18099-    practice, this feature usually has little impact on performance unless
18100-    thread-specific caching is disabled.
18101-
18102-* `--disable-cache-oblivious`
18103-
18104-    Disable cache-oblivious large allocation alignment by default, for large
18105-    allocation requests with no alignment constraints.  If this feature is
18106-    disabled, all large allocations are page-aligned as an implementation
18107-    artifact, which can severely harm CPU cache utilization.  However, the
18108-    cache-oblivious layout comes at the cost of one extra page per large
18109-    allocation, which in the most extreme case increases physical memory usage
18110-    for the 16 KiB size class to 20 KiB.
18111-
18112-* `--disable-syscall`
18113-
18114-    Disable use of syscall(2) rather than {open,read,write,close}(2).  This is
18115-    intended as a workaround for systems that place security limitations on
18116-    syscall(2).
18117-
18118-* `--disable-cxx`
18119-
18120-    Disable C++ integration.  This will cause new and delete operator
18121-    implementations to be omitted.
18122-
18123-* `--with-xslroot=<path>`
18124-
18125-    Specify where to find DocBook XSL stylesheets when building the
18126-    documentation.
18127-
18128-* `--with-lg-page=<lg-page>`
18129-
18130-    Specify the base 2 log of the allocator page size, which must in turn be at
18131-    least as large as the system page size.  By default the configure script
18132-    determines the host's page size and sets the allocator page size equal to
18133-    the system page size, so this option need not be specified unless the
18134-    system page size may change between configuration and execution, e.g. when
18135-    cross compiling.
18136-
18137-* `--with-lg-hugepage=<lg-hugepage>`
18138-
18139-    Specify the base 2 log of the system huge page size.  This option is useful
18140-    when cross compiling, or when overriding the default for systems that do
18141-    not explicitly support huge pages.
18142-
18143-* `--with-lg-quantum=<lg-quantum>`
18144-
18145-    Specify the base 2 log of the minimum allocation alignment.  jemalloc needs
18146-    to know the minimum alignment that meets the following C standard
18147-    requirement (quoted from the April 12, 2011 draft of the C11 standard):
18148-
18149-    >  The pointer returned if the allocation succeeds is suitably aligned so
18150-      that it may be assigned to a pointer to any type of object with a
18151-      fundamental alignment requirement and then used to access such an object
18152-      or an array of such objects in the space allocated [...]
18153-
18154-    This setting is architecture-specific, and although jemalloc includes known
18155-    safe values for the most commonly used modern architectures, there is a
18156-    wrinkle related to GNU libc (glibc) that may impact your choice of
18157-    <lg-quantum>.  On most modern architectures, this mandates 16-byte
18158-    alignment (<lg-quantum>=4), but the glibc developers chose not to meet this
18159-    requirement for performance reasons.  An old discussion can be found at
18160-    <https://sourceware.org/bugzilla/show_bug.cgi?id=206> .  Unlike glibc,
18161-    jemalloc does follow the C standard by default (caveat: jemalloc
18162-    technically cheats for size classes smaller than the quantum), but the fact
18163-    that Linux systems already work around this allocator noncompliance means
18164-    that it is generally safe in practice to let jemalloc's minimum alignment
18165-    follow glibc's lead.  If you specify `--with-lg-quantum=3` during
18166-    configuration, jemalloc will provide additional size classes that are not
18167-    16-byte-aligned (24, 40, and 56).
18168-
18169-* `--with-lg-vaddr=<lg-vaddr>`
18170-
18171-    Specify the number of significant virtual address bits.  By default, the
18172-    configure script attempts to detect virtual address size on those platforms
18173-    where it knows how, and picks a default otherwise.  This option may be
18174-    useful when cross-compiling.
18175-
18176-* `--disable-initial-exec-tls`
18177-
18178-    Disable the initial-exec TLS model for jemalloc's internal thread-local
18179-    storage (on those platforms that support explicit settings).  This can allow
18180-    jemalloc to be dynamically loaded after program startup (e.g. using dlopen).
18181-    Note that in this case, there will be two malloc implementations operating
18182-    in the same process, which will almost certainly result in confusing runtime
18183-    crashes if pointers leak from one implementation to the other.
18184-
18185-* `--disable-libdl`
18186-
18187-    Disable the usage of libdl, namely dlsym(3) which is required by the lazy
18188-    lock option.  This can allow building static binaries.
18189-
18190-The following environment variables (not a definitive list) impact configure's
18191-behavior:
18192-
18193-* `CFLAGS="?"`
18194-* `CXXFLAGS="?"`
18195-
18196-    Pass these flags to the C/C++ compiler.  Any flags set by the configure
18197-    script are prepended, which means explicitly set flags generally take
18198-    precedence.  Take care when specifying flags such as -Werror, because
18199-    configure tests may be affected in undesirable ways.
18200-
18201-* `EXTRA_CFLAGS="?"`
18202-* `EXTRA_CXXFLAGS="?"`
18203-
18204-    Append these flags to CFLAGS/CXXFLAGS, without passing them to the
18205-    compiler(s) during configuration.  This makes it possible to add flags such
18206-    as -Werror, while allowing the configure script to determine what other
18207-    flags are appropriate for the specified configuration.
18208-
18209-* `CPPFLAGS="?"`
18210-
18211-    Pass these flags to the C preprocessor.  Note that CFLAGS is not passed to
18212-    'cpp' when 'configure' is looking for include files, so you must use
18213-    CPPFLAGS instead if you need to help 'configure' find header files.
18214-
18215-* `LD_LIBRARY_PATH="?"`
18216-
18217-    'ld' uses this colon-separated list to find libraries.
18218-
18219-* `LDFLAGS="?"`
18220-
18221-    Pass these flags when linking.
18222-
18223-* `PATH="?"`
18224-
18225-    'configure' uses this to find programs.
18226-
18227-In some cases it may be necessary to work around configuration results that do
18228-not match reality.  For example, Linux 4.5 added support for the MADV_FREE flag
18229-to madvise(2), which can cause problems if building on a host with MADV_FREE
18230-support and deploying to a target without.  To work around this, use a cache
18231-file to override the relevant configuration variable defined in configure.ac,
18232-e.g.:
18233-
18234-    echo "je_cv_madv_free=no" > config.cache && ./configure -C
18235-
18236-
18237-## Advanced compilation
18238-
18239-To build only parts of jemalloc, use the following targets:
18240-
18241-    build_lib_shared
18242-    build_lib_static
18243-    build_lib
18244-    build_doc_html
18245-    build_doc_man
18246-    build_doc
18247-
18248-To install only parts of jemalloc, use the following targets:
18249-
18250-    install_bin
18251-    install_include
18252-    install_lib_shared
18253-    install_lib_static
18254-    install_lib_pc
18255-    install_lib
18256-    install_doc_html
18257-    install_doc_man
18258-    install_doc
18259-
18260-To clean up build results to varying degrees, use the following make targets:
18261-
18262-    clean
18263-    distclean
18264-    relclean
18265-
18266-
18267-## Advanced installation
18268-
18269-Optionally, define make variables when invoking make, including (not
18270-exclusively):
18271-
18272-* `INCLUDEDIR="?"`
18273-
18274-    Use this as the installation prefix for header files.
18275-
18276-* `LIBDIR="?"`
18277-
18278-    Use this as the installation prefix for libraries.
18279-
18280-* `MANDIR="?"`
18281-
18282-    Use this as the installation prefix for man pages.
18283-
18284-* `DESTDIR="?"`
18285-
18286-    Prepend DESTDIR to INCLUDEDIR, LIBDIR, DATADIR, and MANDIR.  This is useful
18287-    when installing to a different path than was specified via --prefix.
18288-
18289-* `CC="?"`
18290-
18291-    Use this to invoke the C compiler.
18292-
18293-* `CFLAGS="?"`
18294-
18295-    Pass these flags to the compiler.
18296-
18297-* `CPPFLAGS="?"`
18298-
18299-    Pass these flags to the C preprocessor.
18300-
18301-* `LDFLAGS="?"`
18302-
18303-    Pass these flags when linking.
18304-
18305-* `PATH="?"`
18306-
18307-    Use this to search for programs used during configuration and building.
18308-
18309-
18310-## Development
18311-
18312-If you intend to make non-trivial changes to jemalloc, use the 'autogen.sh'
18313-script rather than 'configure'.  This re-generates 'configure', enables
18314-configuration dependency rules, and enables re-generation of automatically
18315-generated source files.
18316-
18317-The build system supports using an object directory separate from the source
18318-tree.  For example, you can create an 'obj' directory, and from within that
18319-directory, issue configuration and build commands:
18320-
18321-    autoconf
18322-    mkdir obj
18323-    cd obj
18324-    ../configure --enable-autogen
18325-    make
18326-
18327-
18328-## Documentation
18329-
18330-The manual page is generated in both html and roff formats.  Any web browser
18331-can be used to view the html manual.  The roff manual page can be formatted
18332-prior to installation via the following command:
18333-
18334-    nroff -man -t doc/jemalloc.3
18335diff --git a/jemalloc/Makefile.in b/jemalloc/Makefile.in
18336deleted file mode 100644
18337index 6809fb2..0000000
18338--- a/jemalloc/Makefile.in
18339+++ /dev/null
18340@@ -1,768 +0,0 @@
18341-# Clear out all vpaths, then set just one (default vpath) for the main build
18342-# directory.
18343-vpath
18344-vpath % .
18345-
18346-# Clear the default suffixes, so that built-in rules are not used.
18347-.SUFFIXES :
18348-
18349-SHELL := /bin/sh
18350-
18351-CC := @CC@
18352-CXX := @CXX@
18353-
18354-# Configuration parameters.
18355-DESTDIR =
18356-BINDIR := $(DESTDIR)@BINDIR@
18357-INCLUDEDIR := $(DESTDIR)@INCLUDEDIR@
18358-LIBDIR := $(DESTDIR)@LIBDIR@
18359-DATADIR := $(DESTDIR)@DATADIR@
18360-MANDIR := $(DESTDIR)@MANDIR@
18361-srcroot := @srcroot@
18362-objroot := @objroot@
18363-abs_srcroot := @abs_srcroot@
18364-abs_objroot := @abs_objroot@
18365-
18366-# Build parameters.
18367-CPPFLAGS := @CPPFLAGS@ -I$(objroot)include -I$(srcroot)include
18368-CONFIGURE_CFLAGS := @CONFIGURE_CFLAGS@
18369-SPECIFIED_CFLAGS := @SPECIFIED_CFLAGS@
18370-EXTRA_CFLAGS := @EXTRA_CFLAGS@
18371-CFLAGS := $(strip $(CONFIGURE_CFLAGS) $(SPECIFIED_CFLAGS) $(EXTRA_CFLAGS))
18372-CONFIGURE_CXXFLAGS := @CONFIGURE_CXXFLAGS@
18373-SPECIFIED_CXXFLAGS := @SPECIFIED_CXXFLAGS@
18374-EXTRA_CXXFLAGS := @EXTRA_CXXFLAGS@
18375-CXXFLAGS := $(strip $(CONFIGURE_CXXFLAGS) $(SPECIFIED_CXXFLAGS) $(EXTRA_CXXFLAGS))
18376-LDFLAGS := @LDFLAGS@
18377-EXTRA_LDFLAGS := @EXTRA_LDFLAGS@
18378-LIBS := @LIBS@
18379-RPATH_EXTRA := @RPATH_EXTRA@
18380-SO := @so@
18381-IMPORTLIB := @importlib@
18382-O := @o@
18383-A := @a@
18384-EXE := @exe@
18385-LIBPREFIX := @libprefix@
18386-REV := @rev@
18387-install_suffix := @install_suffix@
18388-ABI := @abi@
18389-XSLTPROC := @XSLTPROC@
18390-XSLROOT := @XSLROOT@
18391-AUTOCONF := @AUTOCONF@
18392-_RPATH = @RPATH@
18393-RPATH = $(if $(1),$(call _RPATH,$(1)))
18394-cfghdrs_in := $(addprefix $(srcroot),@cfghdrs_in@)
18395-cfghdrs_out := @cfghdrs_out@
18396-cfgoutputs_in := $(addprefix $(srcroot),@cfgoutputs_in@)
18397-cfgoutputs_out := @cfgoutputs_out@
18398-enable_autogen := @enable_autogen@
18399-enable_doc := @enable_doc@
18400-enable_shared := @enable_shared@
18401-enable_static := @enable_static@
18402-enable_prof := @enable_prof@
18403-enable_zone_allocator := @enable_zone_allocator@
18404-enable_experimental_smallocx := @enable_experimental_smallocx@
18405-MALLOC_CONF := @JEMALLOC_CPREFIX@MALLOC_CONF
18406-link_whole_archive := @link_whole_archive@
18407-DSO_LDFLAGS = @DSO_LDFLAGS@
18408-SOREV = @SOREV@
18409-PIC_CFLAGS = @PIC_CFLAGS@
18410-CTARGET = @CTARGET@
18411-LDTARGET = @LDTARGET@
18412-TEST_LD_MODE = @TEST_LD_MODE@
18413-MKLIB = @MKLIB@
18414-AR = @AR@
18415-ARFLAGS = @ARFLAGS@
18416-DUMP_SYMS = @DUMP_SYMS@
18417-AWK := @AWK@
18418-CC_MM = @CC_MM@
18419-LM := @LM@
18420-INSTALL = @INSTALL@
18421-
18422-ifeq (macho, $(ABI))
18423-TEST_LIBRARY_PATH := DYLD_FALLBACK_LIBRARY_PATH="$(objroot)lib"
18424-else
18425-ifeq (pecoff, $(ABI))
18426-TEST_LIBRARY_PATH := PATH="$(PATH):$(objroot)lib"
18427-else
18428-TEST_LIBRARY_PATH :=
18429-endif
18430-endif
18431-
18432-LIBJEMALLOC := $(LIBPREFIX)jemalloc$(install_suffix)
18433-
18434-# Lists of files.
18435-BINS := $(objroot)bin/jemalloc-config $(objroot)bin/jemalloc.sh $(objroot)bin/jeprof
18436-C_HDRS := $(objroot)include/jemalloc/jemalloc$(install_suffix).h
18437-C_SRCS := $(srcroot)src/jemalloc.c \
18438-	$(srcroot)src/arena.c \
18439-	$(srcroot)src/background_thread.c \
18440-	$(srcroot)src/base.c \
18441-	$(srcroot)src/bin.c \
18442-	$(srcroot)src/bin_info.c \
18443-	$(srcroot)src/bitmap.c \
18444-	$(srcroot)src/buf_writer.c \
18445-	$(srcroot)src/cache_bin.c \
18446-	$(srcroot)src/ckh.c \
18447-	$(srcroot)src/counter.c \
18448-	$(srcroot)src/ctl.c \
18449-	$(srcroot)src/decay.c \
18450-	$(srcroot)src/div.c \
18451-	$(srcroot)src/ecache.c \
18452-	$(srcroot)src/edata.c \
18453-	$(srcroot)src/edata_cache.c \
18454-	$(srcroot)src/ehooks.c \
18455-	$(srcroot)src/emap.c \
18456-	$(srcroot)src/eset.c \
18457-	$(srcroot)src/exp_grow.c \
18458-	$(srcroot)src/extent.c \
18459-	$(srcroot)src/extent_dss.c \
18460-	$(srcroot)src/extent_mmap.c \
18461-	$(srcroot)src/fxp.c \
18462-	$(srcroot)src/san.c \
18463-	$(srcroot)src/san_bump.c \
18464-	$(srcroot)src/hook.c \
18465-	$(srcroot)src/hpa.c \
18466-	$(srcroot)src/hpa_hooks.c \
18467-	$(srcroot)src/hpdata.c \
18468-	$(srcroot)src/inspect.c \
18469-	$(srcroot)src/large.c \
18470-	$(srcroot)src/log.c \
18471-	$(srcroot)src/malloc_io.c \
18472-	$(srcroot)src/mutex.c \
18473-	$(srcroot)src/nstime.c \
18474-	$(srcroot)src/pa.c \
18475-	$(srcroot)src/pa_extra.c \
18476-	$(srcroot)src/pai.c \
18477-	$(srcroot)src/pac.c \
18478-	$(srcroot)src/pages.c \
18479-	$(srcroot)src/peak_event.c \
18480-	$(srcroot)src/prof.c \
18481-	$(srcroot)src/prof_data.c \
18482-	$(srcroot)src/prof_log.c \
18483-	$(srcroot)src/prof_recent.c \
18484-	$(srcroot)src/prof_stats.c \
18485-	$(srcroot)src/prof_sys.c \
18486-	$(srcroot)src/psset.c \
18487-	$(srcroot)src/rtree.c \
18488-	$(srcroot)src/safety_check.c \
18489-	$(srcroot)src/sc.c \
18490-	$(srcroot)src/sec.c \
18491-	$(srcroot)src/stats.c \
18492-	$(srcroot)src/sz.c \
18493-	$(srcroot)src/tcache.c \
18494-	$(srcroot)src/test_hooks.c \
18495-	$(srcroot)src/thread_event.c \
18496-	$(srcroot)src/ticker.c \
18497-	$(srcroot)src/tsd.c \
18498-	$(srcroot)src/witness.c
18499-ifeq ($(enable_zone_allocator), 1)
18500-C_SRCS += $(srcroot)src/zone.c
18501-endif
18502-ifeq ($(IMPORTLIB),$(SO))
18503-STATIC_LIBS := $(objroot)lib/$(LIBJEMALLOC).$(A)
18504-endif
18505-ifdef PIC_CFLAGS
18506-STATIC_LIBS += $(objroot)lib/$(LIBJEMALLOC)_pic.$(A)
18507-else
18508-STATIC_LIBS += $(objroot)lib/$(LIBJEMALLOC)_s.$(A)
18509-endif
18510-DSOS := $(objroot)lib/$(LIBJEMALLOC).$(SOREV)
18511-ifneq ($(SOREV),$(SO))
18512-DSOS += $(objroot)lib/$(LIBJEMALLOC).$(SO)
18513-endif
18514-ifeq (1, $(link_whole_archive))
18515-LJEMALLOC := -Wl,--whole-archive -L$(objroot)lib -l$(LIBJEMALLOC) -Wl,--no-whole-archive
18516-else
18517-LJEMALLOC := $(objroot)lib/$(LIBJEMALLOC).$(IMPORTLIB)
18518-endif
18519-PC := $(objroot)jemalloc.pc
18520-DOCS_XML := $(objroot)doc/jemalloc$(install_suffix).xml
18521-DOCS_HTML := $(DOCS_XML:$(objroot)%.xml=$(objroot)%.html)
18522-DOCS_MAN3 := $(DOCS_XML:$(objroot)%.xml=$(objroot)%.3)
18523-DOCS := $(DOCS_HTML) $(DOCS_MAN3)
18524-C_TESTLIB_SRCS := $(srcroot)test/src/btalloc.c $(srcroot)test/src/btalloc_0.c \
18525-	$(srcroot)test/src/btalloc_1.c $(srcroot)test/src/math.c \
18526-	$(srcroot)test/src/mtx.c $(srcroot)test/src/sleep.c \
18527-	$(srcroot)test/src/SFMT.c $(srcroot)test/src/test.c \
18528-	$(srcroot)test/src/thd.c $(srcroot)test/src/timer.c
18529-ifeq (1, $(link_whole_archive))
18530-C_UTIL_INTEGRATION_SRCS :=
18531-C_UTIL_CPP_SRCS :=
18532-else
18533-C_UTIL_INTEGRATION_SRCS := $(srcroot)src/nstime.c $(srcroot)src/malloc_io.c \
18534-	$(srcroot)src/ticker.c
18535-C_UTIL_CPP_SRCS := $(srcroot)src/nstime.c $(srcroot)src/malloc_io.c
18536-endif
18537-TESTS_UNIT := \
18538-	$(srcroot)test/unit/a0.c \
18539-	$(srcroot)test/unit/arena_decay.c \
18540-	$(srcroot)test/unit/arena_reset.c \
18541-	$(srcroot)test/unit/atomic.c \
18542-	$(srcroot)test/unit/background_thread.c \
18543-	$(srcroot)test/unit/background_thread_enable.c \
18544-	$(srcroot)test/unit/base.c \
18545-	$(srcroot)test/unit/batch_alloc.c \
18546-	$(srcroot)test/unit/binshard.c \
18547-	$(srcroot)test/unit/bitmap.c \
18548-	$(srcroot)test/unit/bit_util.c \
18549-	$(srcroot)test/unit/buf_writer.c \
18550-	$(srcroot)test/unit/cache_bin.c \
18551-	$(srcroot)test/unit/ckh.c \
18552-	$(srcroot)test/unit/counter.c \
18553-	$(srcroot)test/unit/decay.c \
18554-	$(srcroot)test/unit/div.c \
18555-	$(srcroot)test/unit/double_free.c \
18556-	$(srcroot)test/unit/edata_cache.c \
18557-	$(srcroot)test/unit/emitter.c \
18558-	$(srcroot)test/unit/extent_quantize.c \
18559-	${srcroot}test/unit/fb.c \
18560-	$(srcroot)test/unit/fork.c \
18561-	${srcroot}test/unit/fxp.c \
18562-	${srcroot}test/unit/san.c \
18563-	${srcroot}test/unit/san_bump.c \
18564-	$(srcroot)test/unit/hash.c \
18565-	$(srcroot)test/unit/hook.c \
18566-	$(srcroot)test/unit/hpa.c \
18567-	$(srcroot)test/unit/hpa_background_thread.c \
18568-	$(srcroot)test/unit/hpdata.c \
18569-	$(srcroot)test/unit/huge.c \
18570-	$(srcroot)test/unit/inspect.c \
18571-	$(srcroot)test/unit/junk.c \
18572-	$(srcroot)test/unit/junk_alloc.c \
18573-	$(srcroot)test/unit/junk_free.c \
18574-	$(srcroot)test/unit/log.c \
18575-	$(srcroot)test/unit/mallctl.c \
18576-	$(srcroot)test/unit/malloc_conf_2.c \
18577-	$(srcroot)test/unit/malloc_io.c \
18578-	$(srcroot)test/unit/math.c \
18579-	$(srcroot)test/unit/mpsc_queue.c \
18580-	$(srcroot)test/unit/mq.c \
18581-	$(srcroot)test/unit/mtx.c \
18582-	$(srcroot)test/unit/nstime.c \
18583-	$(srcroot)test/unit/oversize_threshold.c \
18584-	$(srcroot)test/unit/pa.c \
18585-	$(srcroot)test/unit/pack.c \
18586-	$(srcroot)test/unit/pages.c \
18587-	$(srcroot)test/unit/peak.c \
18588-	$(srcroot)test/unit/ph.c \
18589-	$(srcroot)test/unit/prng.c \
18590-	$(srcroot)test/unit/prof_accum.c \
18591-	$(srcroot)test/unit/prof_active.c \
18592-	$(srcroot)test/unit/prof_gdump.c \
18593-	$(srcroot)test/unit/prof_hook.c \
18594-	$(srcroot)test/unit/prof_idump.c \
18595-	$(srcroot)test/unit/prof_log.c \
18596-	$(srcroot)test/unit/prof_mdump.c \
18597-	$(srcroot)test/unit/prof_recent.c \
18598-	$(srcroot)test/unit/prof_reset.c \
18599-	$(srcroot)test/unit/prof_stats.c \
18600-	$(srcroot)test/unit/prof_tctx.c \
18601-	$(srcroot)test/unit/prof_thread_name.c \
18602-	$(srcroot)test/unit/prof_sys_thread_name.c \
18603-	$(srcroot)test/unit/psset.c \
18604-	$(srcroot)test/unit/ql.c \
18605-	$(srcroot)test/unit/qr.c \
18606-	$(srcroot)test/unit/rb.c \
18607-	$(srcroot)test/unit/retained.c \
18608-	$(srcroot)test/unit/rtree.c \
18609-	$(srcroot)test/unit/safety_check.c \
18610-	$(srcroot)test/unit/sc.c \
18611-	$(srcroot)test/unit/sec.c \
18612-	$(srcroot)test/unit/seq.c \
18613-	$(srcroot)test/unit/SFMT.c \
18614-	$(srcroot)test/unit/size_check.c \
18615-	$(srcroot)test/unit/size_classes.c \
18616-	$(srcroot)test/unit/slab.c \
18617-	$(srcroot)test/unit/smoothstep.c \
18618-	$(srcroot)test/unit/spin.c \
18619-	$(srcroot)test/unit/stats.c \
18620-	$(srcroot)test/unit/stats_print.c \
18621-	$(srcroot)test/unit/sz.c \
18622-	$(srcroot)test/unit/tcache_max.c \
18623-	$(srcroot)test/unit/test_hooks.c \
18624-	$(srcroot)test/unit/thread_event.c \
18625-	$(srcroot)test/unit/ticker.c \
18626-	$(srcroot)test/unit/tsd.c \
18627-	$(srcroot)test/unit/uaf.c \
18628-	$(srcroot)test/unit/witness.c \
18629-	$(srcroot)test/unit/zero.c \
18630-	$(srcroot)test/unit/zero_realloc_abort.c \
18631-	$(srcroot)test/unit/zero_realloc_free.c \
18632-	$(srcroot)test/unit/zero_realloc_alloc.c \
18633-	$(srcroot)test/unit/zero_reallocs.c
18634-ifeq (@enable_prof@, 1)
18635-TESTS_UNIT += \
18636-	$(srcroot)test/unit/arena_reset_prof.c \
18637-	$(srcroot)test/unit/batch_alloc_prof.c
18638-endif
18639-TESTS_INTEGRATION := $(srcroot)test/integration/aligned_alloc.c \
18640-	$(srcroot)test/integration/allocated.c \
18641-	$(srcroot)test/integration/extent.c \
18642-	$(srcroot)test/integration/malloc.c \
18643-	$(srcroot)test/integration/mallocx.c \
18644-	$(srcroot)test/integration/MALLOCX_ARENA.c \
18645-	$(srcroot)test/integration/overflow.c \
18646-	$(srcroot)test/integration/posix_memalign.c \
18647-	$(srcroot)test/integration/rallocx.c \
18648-	$(srcroot)test/integration/sdallocx.c \
18649-	$(srcroot)test/integration/slab_sizes.c \
18650-	$(srcroot)test/integration/thread_arena.c \
18651-	$(srcroot)test/integration/thread_tcache_enabled.c \
18652-	$(srcroot)test/integration/xallocx.c
18653-ifeq (@enable_experimental_smallocx@, 1)
18654-TESTS_INTEGRATION += \
18655-  $(srcroot)test/integration/smallocx.c
18656-endif
18657-ifeq (@enable_cxx@, 1)
18658-CPP_SRCS := $(srcroot)src/jemalloc_cpp.cpp
18659-TESTS_INTEGRATION_CPP := $(srcroot)test/integration/cpp/basic.cpp \
18660-	$(srcroot)test/integration/cpp/infallible_new_true.cpp \
18661-	$(srcroot)test/integration/cpp/infallible_new_false.cpp
18662-else
18663-CPP_SRCS :=
18664-TESTS_INTEGRATION_CPP :=
18665-endif
18666-TESTS_ANALYZE := $(srcroot)test/analyze/prof_bias.c \
18667-	$(srcroot)test/analyze/rand.c \
18668-	$(srcroot)test/analyze/sizes.c
18669-TESTS_STRESS := $(srcroot)test/stress/batch_alloc.c \
18670-	$(srcroot)test/stress/fill_flush.c \
18671-	$(srcroot)test/stress/hookbench.c \
18672-	$(srcroot)test/stress/large_microbench.c \
18673-	$(srcroot)test/stress/mallctl.c \
18674-	$(srcroot)test/stress/microbench.c
18675-
18676-
18677-TESTS := $(TESTS_UNIT) $(TESTS_INTEGRATION) $(TESTS_INTEGRATION_CPP) \
18678-	$(TESTS_ANALYZE) $(TESTS_STRESS)
18679-
18680-PRIVATE_NAMESPACE_HDRS := $(objroot)include/jemalloc/internal/private_namespace.h $(objroot)include/jemalloc/internal/private_namespace_jet.h
18681-PRIVATE_NAMESPACE_GEN_HDRS := $(PRIVATE_NAMESPACE_HDRS:%.h=%.gen.h)
18682-C_SYM_OBJS := $(C_SRCS:$(srcroot)%.c=$(objroot)%.sym.$(O))
18683-C_SYMS := $(C_SRCS:$(srcroot)%.c=$(objroot)%.sym)
18684-C_OBJS := $(C_SRCS:$(srcroot)%.c=$(objroot)%.$(O))
18685-CPP_OBJS := $(CPP_SRCS:$(srcroot)%.cpp=$(objroot)%.$(O))
18686-C_PIC_OBJS := $(C_SRCS:$(srcroot)%.c=$(objroot)%.pic.$(O))
18687-CPP_PIC_OBJS := $(CPP_SRCS:$(srcroot)%.cpp=$(objroot)%.pic.$(O))
18688-C_JET_SYM_OBJS := $(C_SRCS:$(srcroot)%.c=$(objroot)%.jet.sym.$(O))
18689-C_JET_SYMS := $(C_SRCS:$(srcroot)%.c=$(objroot)%.jet.sym)
18690-C_JET_OBJS := $(C_SRCS:$(srcroot)%.c=$(objroot)%.jet.$(O))
18691-C_TESTLIB_UNIT_OBJS := $(C_TESTLIB_SRCS:$(srcroot)%.c=$(objroot)%.unit.$(O))
18692-C_TESTLIB_INTEGRATION_OBJS := $(C_TESTLIB_SRCS:$(srcroot)%.c=$(objroot)%.integration.$(O))
18693-C_UTIL_INTEGRATION_OBJS := $(C_UTIL_INTEGRATION_SRCS:$(srcroot)%.c=$(objroot)%.integration.$(O))
18694-C_TESTLIB_ANALYZE_OBJS := $(C_TESTLIB_SRCS:$(srcroot)%.c=$(objroot)%.analyze.$(O))
18695-C_TESTLIB_STRESS_OBJS := $(C_TESTLIB_SRCS:$(srcroot)%.c=$(objroot)%.stress.$(O))
18696-C_TESTLIB_OBJS := $(C_TESTLIB_UNIT_OBJS) $(C_TESTLIB_INTEGRATION_OBJS) \
18697-	$(C_UTIL_INTEGRATION_OBJS) $(C_TESTLIB_ANALYZE_OBJS) \
18698-	$(C_TESTLIB_STRESS_OBJS)
18699-
18700-TESTS_UNIT_OBJS := $(TESTS_UNIT:$(srcroot)%.c=$(objroot)%.$(O))
18701-TESTS_INTEGRATION_OBJS := $(TESTS_INTEGRATION:$(srcroot)%.c=$(objroot)%.$(O))
18702-TESTS_INTEGRATION_CPP_OBJS := $(TESTS_INTEGRATION_CPP:$(srcroot)%.cpp=$(objroot)%.$(O))
18703-TESTS_ANALYZE_OBJS := $(TESTS_ANALYZE:$(srcroot)%.c=$(objroot)%.$(O))
18704-TESTS_STRESS_OBJS := $(TESTS_STRESS:$(srcroot)%.c=$(objroot)%.$(O))
18705-TESTS_OBJS := $(TESTS_UNIT_OBJS) $(TESTS_INTEGRATION_OBJS) $(TESTS_ANALYZE_OBJS) \
18706-	$(TESTS_STRESS_OBJS)
18707-TESTS_CPP_OBJS := $(TESTS_INTEGRATION_CPP_OBJS)
18708-
18709-.PHONY: all dist build_doc_html build_doc_man build_doc
18710-.PHONY: install_bin install_include install_lib
18711-.PHONY: install_doc_html install_doc_man install_doc install
18712-.PHONY: tests check clean distclean relclean
18713-
18714-.SECONDARY : $(PRIVATE_NAMESPACE_GEN_HDRS) $(TESTS_OBJS) $(TESTS_CPP_OBJS)
18715-
18716-# Default target.
18717-all: build_lib
18718-
18719-dist: build_doc
18720-
18721-$(objroot)doc/%$(install_suffix).html : $(objroot)doc/%.xml $(srcroot)doc/stylesheet.xsl $(objroot)doc/html.xsl
18722-ifneq ($(XSLROOT),)
18723-	$(XSLTPROC) -o $@ $(objroot)doc/html.xsl $<
18724-else
18725-ifeq ($(wildcard $(DOCS_HTML)),)
18726-	@echo "<p>Missing xsltproc.  Doc not built.</p>" > $@
18727-endif
18728-	@echo "Missing xsltproc.  "$@" not (re)built."
18729-endif
18730-
18731-$(objroot)doc/%$(install_suffix).3 : $(objroot)doc/%.xml $(srcroot)doc/stylesheet.xsl $(objroot)doc/manpages.xsl
18732-ifneq ($(XSLROOT),)
18733-	$(XSLTPROC) -o $@ $(objroot)doc/manpages.xsl $<
18734-# The -o option (output filename) of xsltproc may not work (it uses the
18735-# <refname> in the .xml file).  Manually add the suffix if so.
18736-  ifneq ($(install_suffix),)
18737-	@if [ -f $(objroot)doc/jemalloc.3 ]; then \
18738-		mv $(objroot)doc/jemalloc.3 $(objroot)doc/jemalloc$(install_suffix).3 ; \
18739-	fi
18740-  endif
18741-else
18742-ifeq ($(wildcard $(DOCS_MAN3)),)
18743-	@echo "Missing xsltproc.  Doc not built." > $@
18744-endif
18745-	@echo "Missing xsltproc.  "$@" not (re)built."
18746-endif
18747-
18748-build_doc_html: $(DOCS_HTML)
18749-build_doc_man: $(DOCS_MAN3)
18750-build_doc: $(DOCS)
18751-
18752-#
18753-# Include generated dependency files.
18754-#
18755-ifdef CC_MM
18756--include $(C_SYM_OBJS:%.$(O)=%.d)
18757--include $(C_OBJS:%.$(O)=%.d)
18758--include $(CPP_OBJS:%.$(O)=%.d)
18759--include $(C_PIC_OBJS:%.$(O)=%.d)
18760--include $(CPP_PIC_OBJS:%.$(O)=%.d)
18761--include $(C_JET_SYM_OBJS:%.$(O)=%.d)
18762--include $(C_JET_OBJS:%.$(O)=%.d)
18763--include $(C_TESTLIB_OBJS:%.$(O)=%.d)
18764--include $(TESTS_OBJS:%.$(O)=%.d)
18765--include $(TESTS_CPP_OBJS:%.$(O)=%.d)
18766-endif
18767-
18768-$(C_SYM_OBJS): $(objroot)src/%.sym.$(O): $(srcroot)src/%.c
18769-$(C_SYM_OBJS): CPPFLAGS += -DJEMALLOC_NO_PRIVATE_NAMESPACE
18770-$(C_SYMS): $(objroot)src/%.sym: $(objroot)src/%.sym.$(O)
18771-$(C_OBJS): $(objroot)src/%.$(O): $(srcroot)src/%.c
18772-$(CPP_OBJS): $(objroot)src/%.$(O): $(srcroot)src/%.cpp
18773-$(C_PIC_OBJS): $(objroot)src/%.pic.$(O): $(srcroot)src/%.c
18774-$(C_PIC_OBJS): CFLAGS += $(PIC_CFLAGS)
18775-$(CPP_PIC_OBJS): $(objroot)src/%.pic.$(O): $(srcroot)src/%.cpp
18776-$(CPP_PIC_OBJS): CXXFLAGS += $(PIC_CFLAGS)
18777-$(C_JET_SYM_OBJS): $(objroot)src/%.jet.sym.$(O): $(srcroot)src/%.c
18778-$(C_JET_SYM_OBJS): CPPFLAGS += -DJEMALLOC_JET -DJEMALLOC_NO_PRIVATE_NAMESPACE
18779-$(C_JET_SYMS): $(objroot)src/%.jet.sym: $(objroot)src/%.jet.sym.$(O)
18780-$(C_JET_OBJS): $(objroot)src/%.jet.$(O): $(srcroot)src/%.c
18781-$(C_JET_OBJS): CPPFLAGS += -DJEMALLOC_JET
18782-$(C_TESTLIB_UNIT_OBJS): $(objroot)test/src/%.unit.$(O): $(srcroot)test/src/%.c
18783-$(C_TESTLIB_UNIT_OBJS): CPPFLAGS += -DJEMALLOC_UNIT_TEST
18784-$(C_TESTLIB_INTEGRATION_OBJS): $(objroot)test/src/%.integration.$(O): $(srcroot)test/src/%.c
18785-$(C_TESTLIB_INTEGRATION_OBJS): CPPFLAGS += -DJEMALLOC_INTEGRATION_TEST
18786-$(C_UTIL_INTEGRATION_OBJS): $(objroot)src/%.integration.$(O): $(srcroot)src/%.c
18787-$(C_TESTLIB_ANALYZE_OBJS): $(objroot)test/src/%.analyze.$(O): $(srcroot)test/src/%.c
18788-$(C_TESTLIB_ANALYZE_OBJS): CPPFLAGS += -DJEMALLOC_ANALYZE_TEST
18789-$(C_TESTLIB_STRESS_OBJS): $(objroot)test/src/%.stress.$(O): $(srcroot)test/src/%.c
18790-$(C_TESTLIB_STRESS_OBJS): CPPFLAGS += -DJEMALLOC_STRESS_TEST -DJEMALLOC_STRESS_TESTLIB
18791-$(C_TESTLIB_OBJS): CPPFLAGS += -I$(srcroot)test/include -I$(objroot)test/include
18792-$(TESTS_UNIT_OBJS): CPPFLAGS += -DJEMALLOC_UNIT_TEST
18793-$(TESTS_INTEGRATION_OBJS): CPPFLAGS += -DJEMALLOC_INTEGRATION_TEST
18794-$(TESTS_INTEGRATION_CPP_OBJS): CPPFLAGS += -DJEMALLOC_INTEGRATION_CPP_TEST
18795-$(TESTS_ANALYZE_OBJS): CPPFLAGS += -DJEMALLOC_ANALYZE_TEST
18796-$(TESTS_STRESS_OBJS): CPPFLAGS += -DJEMALLOC_STRESS_TEST
18797-$(TESTS_OBJS): $(objroot)test/%.$(O): $(srcroot)test/%.c
18798-$(TESTS_CPP_OBJS): $(objroot)test/%.$(O): $(srcroot)test/%.cpp
18799-$(TESTS_OBJS): CPPFLAGS += -I$(srcroot)test/include -I$(objroot)test/include
18800-$(TESTS_CPP_OBJS): CPPFLAGS += -I$(srcroot)test/include -I$(objroot)test/include
18801-ifneq ($(IMPORTLIB),$(SO))
18802-$(CPP_OBJS) $(C_SYM_OBJS) $(C_OBJS) $(C_JET_SYM_OBJS) $(C_JET_OBJS): CPPFLAGS += -DDLLEXPORT
18803-endif
18804-
18805-# Dependencies.
18806-ifndef CC_MM
18807-HEADER_DIRS = $(srcroot)include/jemalloc/internal \
18808-	$(objroot)include/jemalloc $(objroot)include/jemalloc/internal
18809-HEADERS = $(filter-out $(PRIVATE_NAMESPACE_HDRS),$(wildcard $(foreach dir,$(HEADER_DIRS),$(dir)/*.h)))
18810-$(C_SYM_OBJS) $(C_OBJS) $(CPP_OBJS) $(C_PIC_OBJS) $(CPP_PIC_OBJS) $(C_JET_SYM_OBJS) $(C_JET_OBJS) $(C_TESTLIB_OBJS) $(TESTS_OBJS) $(TESTS_CPP_OBJS): $(HEADERS)
18811-$(TESTS_OBJS) $(TESTS_CPP_OBJS): $(objroot)test/include/test/jemalloc_test.h
18812-endif
18813-
18814-$(C_OBJS) $(CPP_OBJS) $(C_PIC_OBJS) $(CPP_PIC_OBJS) $(C_TESTLIB_INTEGRATION_OBJS) $(C_UTIL_INTEGRATION_OBJS) $(TESTS_INTEGRATION_OBJS) $(TESTS_INTEGRATION_CPP_OBJS): $(objroot)include/jemalloc/internal/private_namespace.h
18815-$(C_JET_OBJS) $(C_TESTLIB_UNIT_OBJS) $(C_TESTLIB_ANALYZE_OBJS) $(C_TESTLIB_STRESS_OBJS) $(TESTS_UNIT_OBJS) $(TESTS_ANALYZE_OBJS) $(TESTS_STRESS_OBJS): $(objroot)include/jemalloc/internal/private_namespace_jet.h
18816-
18817-$(C_SYM_OBJS) $(C_OBJS) $(C_PIC_OBJS) $(C_JET_SYM_OBJS) $(C_JET_OBJS) $(C_TESTLIB_OBJS) $(TESTS_OBJS): %.$(O):
18818-	@mkdir -p $(@D)
18819-	$(CC) $(CFLAGS) -c $(CPPFLAGS) $(CTARGET) $<
18820-ifdef CC_MM
18821-	@$(CC) -MM $(CPPFLAGS) -MT $@ -o $(@:%.$(O)=%.d) $<
18822-endif
18823-
18824-$(C_SYMS): %.sym:
18825-	@mkdir -p $(@D)
18826-	$(DUMP_SYMS) $< | $(AWK) -f $(objroot)include/jemalloc/internal/private_symbols.awk > $@
18827-
18828-$(C_JET_SYMS): %.sym:
18829-	@mkdir -p $(@D)
18830-	$(DUMP_SYMS) $< | $(AWK) -f $(objroot)include/jemalloc/internal/private_symbols_jet.awk > $@
18831-
18832-$(objroot)include/jemalloc/internal/private_namespace.gen.h: $(C_SYMS)
18833-	$(SHELL) $(srcroot)include/jemalloc/internal/private_namespace.sh $^ > $@
18834-
18835-$(objroot)include/jemalloc/internal/private_namespace_jet.gen.h: $(C_JET_SYMS)
18836-	$(SHELL) $(srcroot)include/jemalloc/internal/private_namespace.sh $^ > $@
18837-
18838-%.h: %.gen.h
18839-	@if ! `cmp -s $< $@` ; then echo "cp $< $@"; cp $< $@ ; fi
18840-
18841-$(CPP_OBJS) $(CPP_PIC_OBJS) $(TESTS_CPP_OBJS): %.$(O):
18842-	@mkdir -p $(@D)
18843-	$(CXX) $(CXXFLAGS) -c $(CPPFLAGS) $(CTARGET) $<
18844-ifdef CC_MM
18845-	@$(CXX) -MM $(CPPFLAGS) -MT $@ -o $(@:%.$(O)=%.d) $<
18846-endif
18847-
18848-ifneq ($(SOREV),$(SO))
18849-%.$(SO) : %.$(SOREV)
18850-	@mkdir -p $(@D)
18851-	ln -sf $(<F) $@
18852-endif
18853-
18854-$(objroot)lib/$(LIBJEMALLOC).$(SOREV) : $(if $(PIC_CFLAGS),$(C_PIC_OBJS),$(C_OBJS)) $(if $(PIC_CFLAGS),$(CPP_PIC_OBJS),$(CPP_OBJS))
18855-	@mkdir -p $(@D)
18856-	$(CC) $(DSO_LDFLAGS) $(call RPATH,$(RPATH_EXTRA)) $(LDTARGET) $+ $(LDFLAGS) $(LIBS) $(EXTRA_LDFLAGS)
18857-
18858-$(objroot)lib/$(LIBJEMALLOC)_pic.$(A) : $(C_PIC_OBJS) $(CPP_PIC_OBJS)
18859-$(objroot)lib/$(LIBJEMALLOC).$(A) : $(C_OBJS) $(CPP_OBJS)
18860-$(objroot)lib/$(LIBJEMALLOC)_s.$(A) : $(C_OBJS) $(CPP_OBJS)
18861-
18862-$(STATIC_LIBS):
18863-	@mkdir -p $(@D)
18864-	$(AR) $(ARFLAGS)@AROUT@ $+
18865-
18866-$(objroot)test/unit/%$(EXE): $(objroot)test/unit/%.$(O) $(C_JET_OBJS) $(C_TESTLIB_UNIT_OBJS)
18867-	@mkdir -p $(@D)
18868-	$(CC) $(LDTARGET) $(filter %.$(O),$^) $(call RPATH,$(objroot)lib) $(LDFLAGS) $(filter-out -lm,$(LIBS)) $(LM) $(EXTRA_LDFLAGS)
18869-
18870-$(objroot)test/integration/%$(EXE): $(objroot)test/integration/%.$(O) $(C_TESTLIB_INTEGRATION_OBJS) $(C_UTIL_INTEGRATION_OBJS) $(objroot)lib/$(LIBJEMALLOC).$(IMPORTLIB)
18871-	@mkdir -p $(@D)
18872-	$(CC) $(TEST_LD_MODE) $(LDTARGET) $(filter %.$(O),$^) $(call RPATH,$(objroot)lib) $(LJEMALLOC) $(LDFLAGS) $(filter-out -lm,$(filter -lrt -pthread -lstdc++,$(LIBS))) $(LM) $(EXTRA_LDFLAGS)
18873-
18874-$(objroot)test/integration/cpp/%$(EXE): $(objroot)test/integration/cpp/%.$(O) $(C_TESTLIB_INTEGRATION_OBJS) $(C_UTIL_INTEGRATION_OBJS) $(objroot)lib/$(LIBJEMALLOC).$(IMPORTLIB)
18875-	@mkdir -p $(@D)
18876-	$(CXX) $(LDTARGET) $(filter %.$(O),$^) $(call RPATH,$(objroot)lib) $(objroot)lib/$(LIBJEMALLOC).$(IMPORTLIB) $(LDFLAGS) $(filter-out -lm,$(LIBS)) -lm $(EXTRA_LDFLAGS)
18877-
18878-$(objroot)test/analyze/%$(EXE): $(objroot)test/analyze/%.$(O) $(C_JET_OBJS) $(C_TESTLIB_ANALYZE_OBJS)
18879-	@mkdir -p $(@D)
18880-	$(CC) $(LDTARGET) $(filter %.$(O),$^) $(call RPATH,$(objroot)lib) $(LDFLAGS) $(filter-out -lm,$(LIBS)) $(LM) $(EXTRA_LDFLAGS)
18881-
18882-$(objroot)test/stress/%$(EXE): $(objroot)test/stress/%.$(O) $(C_JET_OBJS) $(C_TESTLIB_STRESS_OBJS) $(objroot)lib/$(LIBJEMALLOC).$(IMPORTLIB)
18883-	@mkdir -p $(@D)
18884-	$(CC) $(TEST_LD_MODE) $(LDTARGET) $(filter %.$(O),$^) $(call RPATH,$(objroot)lib) $(objroot)lib/$(LIBJEMALLOC).$(IMPORTLIB) $(LDFLAGS) $(filter-out -lm,$(LIBS)) $(LM) $(EXTRA_LDFLAGS)
18885-
18886-build_lib_shared: $(DSOS)
18887-build_lib_static: $(STATIC_LIBS)
18888-ifeq ($(enable_shared), 1)
18889-build_lib: build_lib_shared
18890-endif
18891-ifeq ($(enable_static), 1)
18892-build_lib: build_lib_static
18893-endif
18894-
18895-install_bin:
18896-	$(INSTALL) -d $(BINDIR)
18897-	@for b in $(BINS); do \
18898-	echo "$(INSTALL) -m 755 $$b $(BINDIR)"; \
18899-	$(INSTALL) -m 755 $$b $(BINDIR); \
18900-done
18901-
18902-install_include:
18903-	$(INSTALL) -d $(INCLUDEDIR)/jemalloc
18904-	@for h in $(C_HDRS); do \
18905-	echo "$(INSTALL) -m 644 $$h $(INCLUDEDIR)/jemalloc"; \
18906-	$(INSTALL) -m 644 $$h $(INCLUDEDIR)/jemalloc; \
18907-done
18908-
18909-install_lib_shared: $(DSOS)
18910-	$(INSTALL) -d $(LIBDIR)
18911-	$(INSTALL) -m 755 $(objroot)lib/$(LIBJEMALLOC).$(SOREV) $(LIBDIR)
18912-ifneq ($(SOREV),$(SO))
18913-	ln -sf $(LIBJEMALLOC).$(SOREV) $(LIBDIR)/$(LIBJEMALLOC).$(SO)
18914-endif
18915-
18916-install_lib_static: $(STATIC_LIBS)
18917-	$(INSTALL) -d $(LIBDIR)
18918-	@for l in $(STATIC_LIBS); do \
18919-	echo "$(INSTALL) -m 755 $$l $(LIBDIR)"; \
18920-	$(INSTALL) -m 755 $$l $(LIBDIR); \
18921-done
18922-
18923-install_lib_pc: $(PC)
18924-	$(INSTALL) -d $(LIBDIR)/pkgconfig
18925-	@for l in $(PC); do \
18926-	echo "$(INSTALL) -m 644 $$l $(LIBDIR)/pkgconfig"; \
18927-	$(INSTALL) -m 644 $$l $(LIBDIR)/pkgconfig; \
18928-done
18929-
18930-ifeq ($(enable_shared), 1)
18931-install_lib: install_lib_shared
18932-endif
18933-ifeq ($(enable_static), 1)
18934-install_lib: install_lib_static
18935-endif
18936-install_lib: install_lib_pc
18937-
18938-install_doc_html: build_doc_html
18939-	$(INSTALL) -d $(DATADIR)/doc/jemalloc$(install_suffix)
18940-	@for d in $(DOCS_HTML); do \
18941-	echo "$(INSTALL) -m 644 $$d $(DATADIR)/doc/jemalloc$(install_suffix)"; \
18942-	$(INSTALL) -m 644 $$d $(DATADIR)/doc/jemalloc$(install_suffix); \
18943-done
18944-
18945-install_doc_man: build_doc_man
18946-	$(INSTALL) -d $(MANDIR)/man3
18947-	@for d in $(DOCS_MAN3); do \
18948-	echo "$(INSTALL) -m 644 $$d $(MANDIR)/man3"; \
18949-	$(INSTALL) -m 644 $$d $(MANDIR)/man3; \
18950-done
18951-
18952-install_doc: install_doc_html install_doc_man
18953-
18954-install: install_bin install_include install_lib
18955-
18956-ifeq ($(enable_doc), 1)
18957-install: install_doc
18958-endif
18959-
18960-uninstall_bin:
18961-	$(RM) -v $(foreach b,$(notdir $(BINS)),$(BINDIR)/$(b))
18962-
18963-uninstall_include:
18964-	$(RM) -v $(foreach h,$(notdir $(C_HDRS)),$(INCLUDEDIR)/jemalloc/$(h))
18965-	rmdir -v $(INCLUDEDIR)/jemalloc
18966-
18967-uninstall_lib_shared:
18968-	$(RM) -v $(LIBDIR)/$(LIBJEMALLOC).$(SOREV)
18969-ifneq ($(SOREV),$(SO))
18970-	$(RM) -v $(LIBDIR)/$(LIBJEMALLOC).$(SO)
18971-endif
18972-
18973-uninstall_lib_static:
18974-	$(RM) -v $(foreach l,$(notdir $(STATIC_LIBS)),$(LIBDIR)/$(l))
18975-
18976-uninstall_lib_pc:
18977-	$(RM) -v $(foreach p,$(notdir $(PC)),$(LIBDIR)/pkgconfig/$(p))
18978-
18979-ifeq ($(enable_shared), 1)
18980-uninstall_lib: uninstall_lib_shared
18981-endif
18982-ifeq ($(enable_static), 1)
18983-uninstall_lib: uninstall_lib_static
18984-endif
18985-uninstall_lib: uninstall_lib_pc
18986-
18987-uninstall_doc_html:
18988-	$(RM) -v $(foreach d,$(notdir $(DOCS_HTML)),$(DATADIR)/doc/jemalloc$(install_suffix)/$(d))
18989-	rmdir -v $(DATADIR)/doc/jemalloc$(install_suffix)
18990-
18991-uninstall_doc_man:
18992-	$(RM) -v $(foreach d,$(notdir $(DOCS_MAN3)),$(MANDIR)/man3/$(d))
18993-
18994-uninstall_doc: uninstall_doc_html uninstall_doc_man
18995-
18996-uninstall: uninstall_bin uninstall_include uninstall_lib
18997-
18998-ifeq ($(enable_doc), 1)
18999-uninstall: uninstall_doc
19000-endif
19001-
19002-tests_unit: $(TESTS_UNIT:$(srcroot)%.c=$(objroot)%$(EXE))
19003-tests_integration: $(TESTS_INTEGRATION:$(srcroot)%.c=$(objroot)%$(EXE)) $(TESTS_INTEGRATION_CPP:$(srcroot)%.cpp=$(objroot)%$(EXE))
19004-tests_analyze: $(TESTS_ANALYZE:$(srcroot)%.c=$(objroot)%$(EXE))
19005-tests_stress: $(TESTS_STRESS:$(srcroot)%.c=$(objroot)%$(EXE))
19006-tests: tests_unit tests_integration tests_analyze tests_stress
19007-
19008-check_unit_dir:
19009-	@mkdir -p $(objroot)test/unit
19010-check_integration_dir:
19011-	@mkdir -p $(objroot)test/integration
19012-analyze_dir:
19013-	@mkdir -p $(objroot)test/analyze
19014-stress_dir:
19015-	@mkdir -p $(objroot)test/stress
19016-check_dir: check_unit_dir check_integration_dir
19017-
19018-check_unit: tests_unit check_unit_dir
19019-	$(SHELL) $(objroot)test/test.sh $(TESTS_UNIT:$(srcroot)%.c=$(objroot)%)
19020-check_integration_prof: tests_integration check_integration_dir
19021-ifeq ($(enable_prof), 1)
19022-	$(MALLOC_CONF)="prof:true" $(SHELL) $(objroot)test/test.sh $(TESTS_INTEGRATION:$(srcroot)%.c=$(objroot)%) $(TESTS_INTEGRATION_CPP:$(srcroot)%.cpp=$(objroot)%)
19023-	$(MALLOC_CONF)="prof:true,prof_active:false" $(SHELL) $(objroot)test/test.sh $(TESTS_INTEGRATION:$(srcroot)%.c=$(objroot)%) $(TESTS_INTEGRATION_CPP:$(srcroot)%.cpp=$(objroot)%)
19024-endif
19025-check_integration_decay: tests_integration check_integration_dir
19026-	$(MALLOC_CONF)="dirty_decay_ms:-1,muzzy_decay_ms:-1" $(SHELL) $(objroot)test/test.sh $(TESTS_INTEGRATION:$(srcroot)%.c=$(objroot)%) $(TESTS_INTEGRATION_CPP:$(srcroot)%.cpp=$(objroot)%)
19027-	$(MALLOC_CONF)="dirty_decay_ms:0,muzzy_decay_ms:0" $(SHELL) $(objroot)test/test.sh $(TESTS_INTEGRATION:$(srcroot)%.c=$(objroot)%) $(TESTS_INTEGRATION_CPP:$(srcroot)%.cpp=$(objroot)%)
19028-check_integration: tests_integration check_integration_dir
19029-	$(SHELL) $(objroot)test/test.sh $(TESTS_INTEGRATION:$(srcroot)%.c=$(objroot)%) $(TESTS_INTEGRATION_CPP:$(srcroot)%.cpp=$(objroot)%)
19030-analyze: tests_analyze analyze_dir
19031-ifeq ($(enable_prof), 1)
19032-	$(MALLOC_CONF)="prof:true" $(SHELL) $(objroot)test/test.sh $(TESTS_ANALYZE:$(srcroot)%.c=$(objroot)%)
19033-else
19034-	$(SHELL) $(objroot)test/test.sh $(TESTS_ANALYZE:$(srcroot)%.c=$(objroot)%)
19035-endif
19036-stress: tests_stress stress_dir
19037-	$(SHELL) $(objroot)test/test.sh $(TESTS_STRESS:$(srcroot)%.c=$(objroot)%)
19038-check: check_unit check_integration check_integration_decay check_integration_prof
19039-
19040-clean:
19041-	rm -f $(PRIVATE_NAMESPACE_HDRS)
19042-	rm -f $(PRIVATE_NAMESPACE_GEN_HDRS)
19043-	rm -f $(C_SYM_OBJS)
19044-	rm -f $(C_SYMS)
19045-	rm -f $(C_OBJS)
19046-	rm -f $(CPP_OBJS)
19047-	rm -f $(C_PIC_OBJS)
19048-	rm -f $(CPP_PIC_OBJS)
19049-	rm -f $(C_JET_SYM_OBJS)
19050-	rm -f $(C_JET_SYMS)
19051-	rm -f $(C_JET_OBJS)
19052-	rm -f $(C_TESTLIB_OBJS)
19053-	rm -f $(C_SYM_OBJS:%.$(O)=%.d)
19054-	rm -f $(C_OBJS:%.$(O)=%.d)
19055-	rm -f $(CPP_OBJS:%.$(O)=%.d)
19056-	rm -f $(C_PIC_OBJS:%.$(O)=%.d)
19057-	rm -f $(CPP_PIC_OBJS:%.$(O)=%.d)
19058-	rm -f $(C_JET_SYM_OBJS:%.$(O)=%.d)
19059-	rm -f $(C_JET_OBJS:%.$(O)=%.d)
19060-	rm -f $(C_TESTLIB_OBJS:%.$(O)=%.d)
19061-	rm -f $(TESTS_OBJS:%.$(O)=%$(EXE))
19062-	rm -f $(TESTS_OBJS)
19063-	rm -f $(TESTS_OBJS:%.$(O)=%.d)
19064-	rm -f $(TESTS_OBJS:%.$(O)=%.out)
19065-	rm -f $(TESTS_CPP_OBJS:%.$(O)=%$(EXE))
19066-	rm -f $(TESTS_CPP_OBJS)
19067-	rm -f $(TESTS_CPP_OBJS:%.$(O)=%.d)
19068-	rm -f $(TESTS_CPP_OBJS:%.$(O)=%.out)
19069-	rm -f $(DSOS) $(STATIC_LIBS)
19070-
19071-distclean: clean
19072-	rm -f $(objroot)bin/jemalloc-config
19073-	rm -f $(objroot)bin/jemalloc.sh
19074-	rm -f $(objroot)bin/jeprof
19075-	rm -f $(objroot)config.log
19076-	rm -f $(objroot)config.status
19077-	rm -f $(objroot)config.stamp
19078-	rm -f $(cfghdrs_out)
19079-	rm -f $(cfgoutputs_out)
19080-
19081-relclean: distclean
19082-	rm -f $(objroot)configure
19083-	rm -f $(objroot)VERSION
19084-	rm -f $(DOCS_HTML)
19085-	rm -f $(DOCS_MAN3)
19086-
19087-#===============================================================================
19088-# Re-configuration rules.
19089-
19090-ifeq ($(enable_autogen), 1)
19091-$(srcroot)configure : $(srcroot)configure.ac
19092-	cd ./$(srcroot) && $(AUTOCONF)
19093-
19094-$(objroot)config.status : $(srcroot)configure
19095-	./$(objroot)config.status --recheck
19096-
19097-$(srcroot)config.stamp.in : $(srcroot)configure.ac
19098-	echo stamp > $(srcroot)config.stamp.in
19099-
19100-$(objroot)config.stamp : $(cfgoutputs_in) $(cfghdrs_in) $(srcroot)configure
19101-	./$(objroot)config.status
19102-	@touch $@
19103-
19104-# There must be some action in order for make to re-read Makefile when it is
19105-# out of date.
19106-$(cfgoutputs_out) $(cfghdrs_out) : $(objroot)config.stamp
19107-	@true
19108-endif
19109diff --git a/jemalloc/README b/jemalloc/README
19110deleted file mode 100644
19111index 3a6e0d2..0000000
19112--- a/jemalloc/README
19113+++ /dev/null
19114@@ -1,20 +0,0 @@
19115-jemalloc is a general purpose malloc(3) implementation that emphasizes
19116-fragmentation avoidance and scalable concurrency support.  jemalloc first came
19117-into use as the FreeBSD libc allocator in 2005, and since then it has found its
19118-way into numerous applications that rely on its predictable behavior.  In 2010
19119-jemalloc development efforts broadened to include developer support features
19120-such as heap profiling and extensive monitoring/tuning hooks.  Modern jemalloc
19121-releases continue to be integrated back into FreeBSD, and therefore versatility
19122-remains critical.  Ongoing development efforts trend toward making jemalloc
19123-among the best allocators for a broad range of demanding applications, and
19124-eliminating/mitigating weaknesses that have practical repercussions for real
19125-world applications.
19126-
19127-The COPYING file contains copyright and licensing information.
19128-
19129-The INSTALL file contains information on how to configure, build, and install
19130-jemalloc.
19131-
19132-The ChangeLog file contains a brief summary of changes for each release.
19133-
19134-URL: http://jemalloc.net/
19135diff --git a/jemalloc/TUNING.md b/jemalloc/TUNING.md
19136deleted file mode 100644
19137index e96399d..0000000
19138--- a/jemalloc/TUNING.md
19139+++ /dev/null
19140@@ -1,129 +0,0 @@
19141-This document summarizes the common approaches for performance fine tuning with
19142-jemalloc (as of 5.3.0).  The default configuration of jemalloc tends to work
19143-reasonably well in practice, and most applications should not have to tune any
19144-options. However, in order to cover a wide range of applications and avoid
19145-pathological cases, the default setting is sometimes kept conservative and
19146-suboptimal, even for many common workloads.  When jemalloc is properly tuned for
19147-a specific application / workload, it is common to improve system level metrics
19148-by a few percent, or make favorable trade-offs.
19149-
19150-
19151-## Notable runtime options for performance tuning
19152-
19153-Runtime options can be set via
19154-[malloc_conf](http://jemalloc.net/jemalloc.3.html#tuning).
19155-
19156-* [background_thread](http://jemalloc.net/jemalloc.3.html#background_thread)
19157-
19158-    Enabling jemalloc background threads generally improves the tail latency for
19159-    application threads, since unused memory purging is shifted to the dedicated
19160-    background threads.  In addition, unintended purging delay caused by
19161-    application inactivity is avoided with background threads.
19162-
19163-    Suggested: `background_thread:true` when jemalloc managed threads can be
19164-    allowed.
19165-
19166-* [metadata_thp](http://jemalloc.net/jemalloc.3.html#opt.metadata_thp)
19167-
19168-    Allowing jemalloc to utilize transparent huge pages for its internal
19169-    metadata usually reduces TLB misses significantly, especially for programs
19170-    with large memory footprint and frequent allocation / deallocation
19171-    activities.  Metadata memory usage may increase due to the use of huge
19172-    pages.
19173-
19174-    Suggested for allocation intensive programs: `metadata_thp:auto` or
19175-    `metadata_thp:always`, which is expected to improve CPU utilization at a
19176-    small memory cost.
19177-
19178-* [dirty_decay_ms](http://jemalloc.net/jemalloc.3.html#opt.dirty_decay_ms) and
19179-  [muzzy_decay_ms](http://jemalloc.net/jemalloc.3.html#opt.muzzy_decay_ms)
19180-
19181-    Decay time determines how fast jemalloc returns unused pages back to the
19182-    operating system, and therefore provides a fairly straightforward trade-off
19183-    between CPU and memory usage.  Shorter decay time purges unused pages faster
19184-    to reduces memory usage (usually at the cost of more CPU cycles spent on
19185-    purging), and vice versa.
19186-
19187-    Suggested: tune the values based on the desired trade-offs.
19188-
19189-* [narenas](http://jemalloc.net/jemalloc.3.html#opt.narenas)
19190-
19191-    By default jemalloc uses multiple arenas to reduce internal lock contention.
19192-    However high arena count may also increase overall memory fragmentation,
19193-    since arenas manage memory independently.  When high degree of parallelism
19194-    is not expected at the allocator level, lower number of arenas often
19195-    improves memory usage.
19196-
19197-    Suggested: if low parallelism is expected, try lower arena count while
19198-    monitoring CPU and memory usage.
19199-
19200-* [percpu_arena](http://jemalloc.net/jemalloc.3.html#opt.percpu_arena)
19201-
19202-    Enable dynamic thread to arena association based on running CPU.  This has
19203-    the potential to improve locality, e.g. when thread to CPU affinity is
19204-    present.
19205-
19206-    Suggested: try `percpu_arena:percpu` or `percpu_arena:phycpu` if
19207-    thread migration between processors is expected to be infrequent.
19208-
19209-Examples:
19210-
19211-* High resource consumption application, prioritizing CPU utilization:
19212-
19213-    `background_thread:true,metadata_thp:auto` combined with relaxed decay time
19214-    (increased `dirty_decay_ms` and / or `muzzy_decay_ms`,
19215-    e.g. `dirty_decay_ms:30000,muzzy_decay_ms:30000`).
19216-
19217-* High resource consumption application, prioritizing memory usage:
19218-
19219-    `background_thread:true,tcache_max:4096` combined with shorter decay time
19220-    (decreased `dirty_decay_ms` and / or `muzzy_decay_ms`,
19221-    e.g. `dirty_decay_ms:5000,muzzy_decay_ms:5000`), and lower arena count
19222-    (e.g. number of CPUs).
19223-
19224-* Low resource consumption application:
19225-
19226-    `narenas:1,tcache_max:1024` combined with shorter decay time (decreased
19227-    `dirty_decay_ms` and / or `muzzy_decay_ms`,e.g.
19228-    `dirty_decay_ms:1000,muzzy_decay_ms:0`).
19229-
19230-* Extremely conservative -- minimize memory usage at all costs, only suitable when
19231-allocation activity is very rare:
19232-
19233-    `narenas:1,tcache:false,dirty_decay_ms:0,muzzy_decay_ms:0`
19234-
19235-Note that it is recommended to combine the options with `abort_conf:true` which
19236-aborts immediately on illegal options.
19237-
19238-## Beyond runtime options
19239-
19240-In addition to the runtime options, there are a number of programmatic ways to
19241-improve application performance with jemalloc.
19242-
19243-* [Explicit arenas](http://jemalloc.net/jemalloc.3.html#arenas.create)
19244-
19245-    Manually created arenas can help performance in various ways, e.g. by
19246-    managing locality and contention for specific usages.  For example,
19247-    applications can explicitly allocate frequently accessed objects from a
19248-    dedicated arena with
19249-    [mallocx()](http://jemalloc.net/jemalloc.3.html#MALLOCX_ARENA) to improve
19250-    locality.  In addition, explicit arenas often benefit from individually
19251-    tuned options, e.g. relaxed [decay
19252-    time](http://jemalloc.net/jemalloc.3.html#arena.i.dirty_decay_ms) if
19253-    frequent reuse is expected.
19254-
19255-* [Extent hooks](http://jemalloc.net/jemalloc.3.html#arena.i.extent_hooks)
19256-
19257-    Extent hooks allow customization for managing underlying memory.  One use
19258-    case for performance purpose is to utilize huge pages -- for example,
19259-    [HHVM](https://github.com/facebook/hhvm/blob/master/hphp/util/alloc.cpp)
19260-    uses explicit arenas with customized extent hooks to manage 1GB huge pages
19261-    for frequently accessed data, which reduces TLB misses significantly.
19262-
19263-* [Explicit thread-to-arena
19264-  binding](http://jemalloc.net/jemalloc.3.html#thread.arena)
19265-
19266-    It is common for some threads in an application to have different memory
19267-    access / allocation patterns.  Threads with heavy workloads often benefit
19268-    from explicit binding, e.g. binding very active threads to dedicated arenas
19269-    may reduce contention at the allocator level.
19270diff --git a/jemalloc/autogen.sh b/jemalloc/autogen.sh
19271deleted file mode 100755
19272index 75f32da..0000000
19273--- a/jemalloc/autogen.sh
19274+++ /dev/null
19275@@ -1,17 +0,0 @@
19276-#!/bin/sh
19277-
19278-for i in autoconf; do
19279-    echo "$i"
19280-    $i
19281-    if [ $? -ne 0 ]; then
19282-	echo "Error $? in $i"
19283-	exit 1
19284-    fi
19285-done
19286-
19287-echo "./configure --enable-autogen $@"
19288-./configure --enable-autogen $@
19289-if [ $? -ne 0 ]; then
19290-    echo "Error $? in ./configure"
19291-    exit 1
19292-fi
19293diff --git a/jemalloc/bin/jemalloc-config.in b/jemalloc/bin/jemalloc-config.in
19294deleted file mode 100644
19295index 80eca2e..0000000
19296--- a/jemalloc/bin/jemalloc-config.in
19297+++ /dev/null
19298@@ -1,83 +0,0 @@
19299-#!/bin/sh
19300-
19301-usage() {
19302-	cat <<EOF
19303-Usage:
19304-  @BINDIR@/jemalloc-config <option>
19305-Options:
19306-  --help | -h  : Print usage.
19307-  --version    : Print jemalloc version.
19308-  --revision   : Print shared library revision number.
19309-  --config     : Print configure options used to build jemalloc.
19310-  --prefix     : Print installation directory prefix.
19311-  --bindir     : Print binary installation directory.
19312-  --datadir    : Print data installation directory.
19313-  --includedir : Print include installation directory.
19314-  --libdir     : Print library installation directory.
19315-  --mandir     : Print manual page installation directory.
19316-  --cc         : Print compiler used to build jemalloc.
19317-  --cflags     : Print compiler flags used to build jemalloc.
19318-  --cppflags   : Print preprocessor flags used to build jemalloc.
19319-  --cxxflags   : Print C++ compiler flags used to build jemalloc.
19320-  --ldflags    : Print library flags used to build jemalloc.
19321-  --libs       : Print libraries jemalloc was linked against.
19322-EOF
19323-}
19324-
19325-prefix="@prefix@"
19326-exec_prefix="@exec_prefix@"
19327-
19328-case "$1" in
19329---help | -h)
19330-	usage
19331-	exit 0
19332-	;;
19333---version)
19334-	echo "@jemalloc_version@"
19335-	;;
19336---revision)
19337-	echo "@rev@"
19338-	;;
19339---config)
19340-	echo "@CONFIG@"
19341-	;;
19342---prefix)
19343-	echo "@PREFIX@"
19344-	;;
19345---bindir)
19346-	echo "@BINDIR@"
19347-	;;
19348---datadir)
19349-	echo "@DATADIR@"
19350-	;;
19351---includedir)
19352-	echo "@INCLUDEDIR@"
19353-	;;
19354---libdir)
19355-	echo "@LIBDIR@"
19356-	;;
19357---mandir)
19358-	echo "@MANDIR@"
19359-	;;
19360---cc)
19361-	echo "@CC@"
19362-	;;
19363---cflags)
19364-	echo "@CFLAGS@"
19365-	;;
19366---cppflags)
19367-	echo "@CPPFLAGS@"
19368-	;;
19369---cxxflags)
19370-	echo "@CXXFLAGS@"
19371-	;;
19372---ldflags)
19373-	echo "@LDFLAGS@ @EXTRA_LDFLAGS@"
19374-	;;
19375---libs)
19376-	echo "@LIBS@"
19377-	;;
19378-*)
19379-	usage
19380-	exit 1
19381-esac
19382diff --git a/jemalloc/bin/jemalloc.sh.in b/jemalloc/bin/jemalloc.sh.in
19383deleted file mode 100644
19384index cdf3673..0000000
19385--- a/jemalloc/bin/jemalloc.sh.in
19386+++ /dev/null
19387@@ -1,9 +0,0 @@
19388-#!/bin/sh
19389-
19390-prefix=@prefix@
19391-exec_prefix=@exec_prefix@
19392-libdir=@libdir@
19393-
19394-@LD_PRELOAD_VAR@=${libdir}/libjemalloc.@SOREV@
19395-export @LD_PRELOAD_VAR@
19396-exec "$@"
19397diff --git a/jemalloc/bin/jeprof.in b/jemalloc/bin/jeprof.in
19398deleted file mode 100644
19399index dbf6252..0000000
19400--- a/jemalloc/bin/jeprof.in
19401+++ /dev/null
19402@@ -1,5723 +0,0 @@
19403-#! /usr/bin/env perl
19404-
19405-# Copyright (c) 1998-2007, Google Inc.
19406-# All rights reserved.
19407-#
19408-# Redistribution and use in source and binary forms, with or without
19409-# modification, are permitted provided that the following conditions are
19410-# met:
19411-#
19412-#     * Redistributions of source code must retain the above copyright
19413-# notice, this list of conditions and the following disclaimer.
19414-#     * Redistributions in binary form must reproduce the above
19415-# copyright notice, this list of conditions and the following disclaimer
19416-# in the documentation and/or other materials provided with the
19417-# distribution.
19418-#     * Neither the name of Google Inc. nor the names of its
19419-# contributors may be used to endorse or promote products derived from
19420-# this software without specific prior written permission.
19421-#
19422-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19423-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19424-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19425-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
19426-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
19427-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
19428-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
19429-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
19430-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
19431-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
19432-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
19433-
19434-# ---
19435-# Program for printing the profile generated by common/profiler.cc,
19436-# or by the heap profiler (common/debugallocation.cc)
19437-#
19438-# The profile contains a sequence of entries of the form:
19439-#       <count> <stack trace>
19440-# This program parses the profile, and generates user-readable
19441-# output.
19442-#
19443-# Examples:
19444-#
19445-# % tools/jeprof "program" "profile"
19446-#   Enters "interactive" mode
19447-#
19448-# % tools/jeprof --text "program" "profile"
19449-#   Generates one line per procedure
19450-#
19451-# % tools/jeprof --gv "program" "profile"
19452-#   Generates annotated call-graph and displays via "gv"
19453-#
19454-# % tools/jeprof --gv --focus=Mutex "program" "profile"
19455-#   Restrict to code paths that involve an entry that matches "Mutex"
19456-#
19457-# % tools/jeprof --gv --focus=Mutex --ignore=string "program" "profile"
19458-#   Restrict to code paths that involve an entry that matches "Mutex"
19459-#   and does not match "string"
19460-#
19461-# % tools/jeprof --list=IBF_CheckDocid "program" "profile"
19462-#   Generates disassembly listing of all routines with at least one
19463-#   sample that match the --list=<regexp> pattern.  The listing is
19464-#   annotated with the flat and cumulative sample counts at each line.
19465-#
19466-# % tools/jeprof --disasm=IBF_CheckDocid "program" "profile"
19467-#   Generates disassembly listing of all routines with at least one
19468-#   sample that match the --disasm=<regexp> pattern.  The listing is
19469-#   annotated with the flat and cumulative sample counts at each PC value.
19470-#
19471-# TODO: Use color to indicate files?
19472-
19473-use strict;
19474-use warnings;
19475-use Getopt::Long;
19476-use Cwd;
19477-
19478-my $JEPROF_VERSION = "@jemalloc_version@";
19479-my $PPROF_VERSION = "2.0";
19480-
19481-# These are the object tools we use which can come from a
19482-# user-specified location using --tools, from the JEPROF_TOOLS
19483-# environment variable, or from the environment.
19484-my %obj_tool_map = (
19485-  "objdump" => "objdump",
19486-  "nm" => "nm",
19487-  "addr2line" => "addr2line",
19488-  "c++filt" => "c++filt",
19489-  ## ConfigureObjTools may add architecture-specific entries:
19490-  #"nm_pdb" => "nm-pdb",       # for reading windows (PDB-format) executables
19491-  #"addr2line_pdb" => "addr2line-pdb",                                # ditto
19492-  #"otool" => "otool",         # equivalent of objdump on OS X
19493-);
19494-# NOTE: these are lists, so you can put in commandline flags if you want.
19495-my @DOT = ("dot");          # leave non-absolute, since it may be in /usr/local
19496-my @GV = ("gv");
19497-my @EVINCE = ("evince");    # could also be xpdf or perhaps acroread
19498-my @KCACHEGRIND = ("kcachegrind");
19499-my @PS2PDF = ("ps2pdf");
19500-# These are used for dynamic profiles
19501-my @URL_FETCHER = ("curl", "-s", "--fail");
19502-
19503-# These are the web pages that servers need to support for dynamic profiles
19504-my $HEAP_PAGE = "/pprof/heap";
19505-my $PROFILE_PAGE = "/pprof/profile";   # must support cgi-param "?seconds=#"
19506-my $PMUPROFILE_PAGE = "/pprof/pmuprofile(?:\\?.*)?"; # must support cgi-param
19507-                                                # ?seconds=#&event=x&period=n
19508-my $GROWTH_PAGE = "/pprof/growth";
19509-my $CONTENTION_PAGE = "/pprof/contention";
19510-my $WALL_PAGE = "/pprof/wall(?:\\?.*)?";  # accepts options like namefilter
19511-my $FILTEREDPROFILE_PAGE = "/pprof/filteredprofile(?:\\?.*)?";
19512-my $CENSUSPROFILE_PAGE = "/pprof/censusprofile(?:\\?.*)?"; # must support cgi-param
19513-                                                       # "?seconds=#",
19514-                                                       # "?tags_regexp=#" and
19515-                                                       # "?type=#".
19516-my $SYMBOL_PAGE = "/pprof/symbol";     # must support symbol lookup via POST
19517-my $PROGRAM_NAME_PAGE = "/pprof/cmdline";
19518-
19519-# These are the web pages that can be named on the command line.
19520-# All the alternatives must begin with /.
19521-my $PROFILES = "($HEAP_PAGE|$PROFILE_PAGE|$PMUPROFILE_PAGE|" .
19522-               "$GROWTH_PAGE|$CONTENTION_PAGE|$WALL_PAGE|" .
19523-               "$FILTEREDPROFILE_PAGE|$CENSUSPROFILE_PAGE)";
19524-
19525-# default binary name
19526-my $UNKNOWN_BINARY = "(unknown)";
19527-
19528-# There is a pervasive dependency on the length (in hex characters,
19529-# i.e., nibbles) of an address, distinguishing between 32-bit and
19530-# 64-bit profiles.  To err on the safe size, default to 64-bit here:
19531-my $address_length = 16;
19532-
19533-my $dev_null = "/dev/null";
19534-if (! -e $dev_null && $^O =~ /MSWin/) {    # $^O is the OS perl was built for
19535-  $dev_null = "nul";
19536-}
19537-
19538-# A list of paths to search for shared object files
19539-my @prefix_list = ();
19540-
19541-# Special routine name that should not have any symbols.
19542-# Used as separator to parse "addr2line -i" output.
19543-my $sep_symbol = '_fini';
19544-my $sep_address = undef;
19545-
19546-##### Argument parsing #####
19547-
19548-sub usage_string {
19549-  return <<EOF;
19550-Usage:
19551-jeprof [options] <program> <profiles>
19552-   <profiles> is a space separated list of profile names.
19553-jeprof [options] <symbolized-profiles>
19554-   <symbolized-profiles> is a list of profile files where each file contains
19555-   the necessary symbol mappings  as well as profile data (likely generated
19556-   with --raw).
19557-jeprof [options] <profile>
19558-   <profile> is a remote form.  Symbols are obtained from host:port$SYMBOL_PAGE
19559-
19560-   Each name can be:
19561-   /path/to/profile        - a path to a profile file
19562-   host:port[/<service>]   - a location of a service to get profile from
19563-
19564-   The /<service> can be $HEAP_PAGE, $PROFILE_PAGE, /pprof/pmuprofile,
19565-                         $GROWTH_PAGE, $CONTENTION_PAGE, /pprof/wall,
19566-                         $CENSUSPROFILE_PAGE, or /pprof/filteredprofile.
19567-   For instance:
19568-     jeprof http://myserver.com:80$HEAP_PAGE
19569-   If /<service> is omitted, the service defaults to $PROFILE_PAGE (cpu profiling).
19570-jeprof --symbols <program>
19571-   Maps addresses to symbol names.  In this mode, stdin should be a
19572-   list of library mappings, in the same format as is found in the heap-
19573-   and cpu-profile files (this loosely matches that of /proc/self/maps
19574-   on linux), followed by a list of hex addresses to map, one per line.
19575-
19576-   For more help with querying remote servers, including how to add the
19577-   necessary server-side support code, see this filename (or one like it):
19578-
19579-   /usr/doc/gperftools-$PPROF_VERSION/pprof_remote_servers.html
19580-
19581-Options:
19582-   --cum               Sort by cumulative data
19583-   --base=<base>       Subtract <base> from <profile> before display
19584-   --interactive       Run in interactive mode (interactive "help" gives help) [default]
19585-   --seconds=<n>       Length of time for dynamic profiles [default=30 secs]
19586-   --add_lib=<file>    Read additional symbols and line info from the given library
19587-   --lib_prefix=<dir>  Comma separated list of library path prefixes
19588-
19589-Reporting Granularity:
19590-   --addresses         Report at address level
19591-   --lines             Report at source line level
19592-   --functions         Report at function level [default]
19593-   --files             Report at source file level
19594-
19595-Output type:
19596-   --text              Generate text report
19597-   --callgrind         Generate callgrind format to stdout
19598-   --gv                Generate Postscript and display
19599-   --evince            Generate PDF and display
19600-   --web               Generate SVG and display
19601-   --list=<regexp>     Generate source listing of matching routines
19602-   --disasm=<regexp>   Generate disassembly of matching routines
19603-   --symbols           Print demangled symbol names found at given addresses
19604-   --dot               Generate DOT file to stdout
19605-   --ps                Generate Postcript to stdout
19606-   --pdf               Generate PDF to stdout
19607-   --svg               Generate SVG to stdout
19608-   --gif               Generate GIF to stdout
19609-   --raw               Generate symbolized jeprof data (useful with remote fetch)
19610-   --collapsed         Generate collapsed stacks for building flame graphs
19611-                       (see http://www.brendangregg.com/flamegraphs.html)
19612-
19613-Heap-Profile Options:
19614-   --inuse_space       Display in-use (mega)bytes [default]
19615-   --inuse_objects     Display in-use objects
19616-   --alloc_space       Display allocated (mega)bytes
19617-   --alloc_objects     Display allocated objects
19618-   --show_bytes        Display space in bytes
19619-   --drop_negative     Ignore negative differences
19620-
19621-Contention-profile options:
19622-   --total_delay       Display total delay at each region [default]
19623-   --contentions       Display number of delays at each region
19624-   --mean_delay        Display mean delay at each region
19625-
19626-Call-graph Options:
19627-   --nodecount=<n>     Show at most so many nodes [default=80]
19628-   --nodefraction=<f>  Hide nodes below <f>*total [default=.005]
19629-   --edgefraction=<f>  Hide edges below <f>*total [default=.001]
19630-   --maxdegree=<n>     Max incoming/outgoing edges per node [default=8]
19631-   --focus=<regexp>    Focus on backtraces with nodes matching <regexp>
19632-   --thread=<n>        Show profile for thread <n>
19633-   --ignore=<regexp>   Ignore backtraces with nodes matching <regexp>
19634-   --scale=<n>         Set GV scaling [default=0]
19635-   --heapcheck         Make nodes with non-0 object counts
19636-                       (i.e. direct leak generators) more visible
19637-   --retain=<regexp>   Retain only nodes that match <regexp>
19638-   --exclude=<regexp>  Exclude all nodes that match <regexp>
19639-
19640-Miscellaneous:
19641-   --tools=<prefix or binary:fullpath>[,...]   \$PATH for object tool pathnames
19642-   --test              Run unit tests
19643-   --help              This message
19644-   --version           Version information
19645-   --debug-syms-by-id  (Linux only) Find debug symbol files by build ID as well as by name
19646-
19647-Environment Variables:
19648-   JEPROF_TMPDIR        Profiles directory. Defaults to \$HOME/jeprof
19649-   JEPROF_TOOLS         Prefix for object tools pathnames
19650-
19651-Examples:
19652-
19653-jeprof /bin/ls ls.prof
19654-                       Enters "interactive" mode
19655-jeprof --text /bin/ls ls.prof
19656-                       Outputs one line per procedure
19657-jeprof --web /bin/ls ls.prof
19658-                       Displays annotated call-graph in web browser
19659-jeprof --gv /bin/ls ls.prof
19660-                       Displays annotated call-graph via 'gv'
19661-jeprof --gv --focus=Mutex /bin/ls ls.prof
19662-                       Restricts to code paths including a .*Mutex.* entry
19663-jeprof --gv --focus=Mutex --ignore=string /bin/ls ls.prof
19664-                       Code paths including Mutex but not string
19665-jeprof --list=getdir /bin/ls ls.prof
19666-                       (Per-line) annotated source listing for getdir()
19667-jeprof --disasm=getdir /bin/ls ls.prof
19668-                       (Per-PC) annotated disassembly for getdir()
19669-
19670-jeprof http://localhost:1234/
19671-                       Enters "interactive" mode
19672-jeprof --text localhost:1234
19673-                       Outputs one line per procedure for localhost:1234
19674-jeprof --raw localhost:1234 > ./local.raw
19675-jeprof --text ./local.raw
19676-                       Fetches a remote profile for later analysis and then
19677-                       analyzes it in text mode.
19678-EOF
19679-}
19680-
19681-sub version_string {
19682-  return <<EOF
19683-jeprof (part of jemalloc $JEPROF_VERSION)
19684-based on pprof (part of gperftools $PPROF_VERSION)
19685-
19686-Copyright 1998-2007 Google Inc.
19687-
19688-This is BSD licensed software; see the source for copying conditions
19689-and license information.
19690-There is NO warranty; not even for MERCHANTABILITY or FITNESS FOR A
19691-PARTICULAR PURPOSE.
19692-EOF
19693-}
19694-
19695-sub usage {
19696-  my $msg = shift;
19697-  print STDERR "$msg\n\n";
19698-  print STDERR usage_string();
19699-  print STDERR "\nFATAL ERROR: $msg\n";    # just as a reminder
19700-  exit(1);
19701-}
19702-
19703-sub Init() {
19704-  # Setup tmp-file name and handler to clean it up.
19705-  # We do this in the very beginning so that we can use
19706-  # error() and cleanup() function anytime here after.
19707-  $main::tmpfile_sym = "/tmp/jeprof$$.sym";
19708-  $main::tmpfile_ps = "/tmp/jeprof$$";
19709-  $main::next_tmpfile = 0;
19710-  $SIG{'INT'} = \&sighandler;
19711-
19712-  # Cache from filename/linenumber to source code
19713-  $main::source_cache = ();
19714-
19715-  $main::opt_help = 0;
19716-  $main::opt_version = 0;
19717-
19718-  $main::opt_cum = 0;
19719-  $main::opt_base = '';
19720-  $main::opt_addresses = 0;
19721-  $main::opt_lines = 0;
19722-  $main::opt_functions = 0;
19723-  $main::opt_files = 0;
19724-  $main::opt_lib_prefix = "";
19725-
19726-  $main::opt_text = 0;
19727-  $main::opt_callgrind = 0;
19728-  $main::opt_list = "";
19729-  $main::opt_disasm = "";
19730-  $main::opt_symbols = 0;
19731-  $main::opt_gv = 0;
19732-  $main::opt_evince = 0;
19733-  $main::opt_web = 0;
19734-  $main::opt_dot = 0;
19735-  $main::opt_ps = 0;
19736-  $main::opt_pdf = 0;
19737-  $main::opt_gif = 0;
19738-  $main::opt_svg = 0;
19739-  $main::opt_raw = 0;
19740-  $main::opt_collapsed = 0;
19741-
19742-  $main::opt_nodecount = 80;
19743-  $main::opt_nodefraction = 0.005;
19744-  $main::opt_edgefraction = 0.001;
19745-  $main::opt_maxdegree = 8;
19746-  $main::opt_focus = '';
19747-  $main::opt_thread = undef;
19748-  $main::opt_ignore = '';
19749-  $main::opt_scale = 0;
19750-  $main::opt_heapcheck = 0;
19751-  $main::opt_retain = '';
19752-  $main::opt_exclude = '';
19753-  $main::opt_seconds = 30;
19754-  $main::opt_lib = "";
19755-
19756-  $main::opt_inuse_space   = 0;
19757-  $main::opt_inuse_objects = 0;
19758-  $main::opt_alloc_space   = 0;
19759-  $main::opt_alloc_objects = 0;
19760-  $main::opt_show_bytes    = 0;
19761-  $main::opt_drop_negative = 0;
19762-  $main::opt_interactive   = 0;
19763-
19764-  $main::opt_total_delay = 0;
19765-  $main::opt_contentions = 0;
19766-  $main::opt_mean_delay = 0;
19767-
19768-  $main::opt_tools   = "";
19769-  $main::opt_debug   = 0;
19770-  $main::opt_test    = 0;
19771-  $main::opt_debug_syms_by_id = 0;
19772-
19773-  # These are undocumented flags used only by unittests.
19774-  $main::opt_test_stride = 0;
19775-
19776-  # Are we using $SYMBOL_PAGE?
19777-  $main::use_symbol_page = 0;
19778-
19779-  # Files returned by TempName.
19780-  %main::tempnames = ();
19781-
19782-  # Type of profile we are dealing with
19783-  # Supported types:
19784-  #     cpu
19785-  #     heap
19786-  #     growth
19787-  #     contention
19788-  $main::profile_type = '';     # Empty type means "unknown"
19789-
19790-  GetOptions("help!"          => \$main::opt_help,
19791-             "version!"       => \$main::opt_version,
19792-             "cum!"           => \$main::opt_cum,
19793-             "base=s"         => \$main::opt_base,
19794-             "seconds=i"      => \$main::opt_seconds,
19795-             "add_lib=s"      => \$main::opt_lib,
19796-             "lib_prefix=s"   => \$main::opt_lib_prefix,
19797-             "functions!"     => \$main::opt_functions,
19798-             "lines!"         => \$main::opt_lines,
19799-             "addresses!"     => \$main::opt_addresses,
19800-             "files!"         => \$main::opt_files,
19801-             "text!"          => \$main::opt_text,
19802-             "callgrind!"     => \$main::opt_callgrind,
19803-             "list=s"         => \$main::opt_list,
19804-             "disasm=s"       => \$main::opt_disasm,
19805-             "symbols!"       => \$main::opt_symbols,
19806-             "gv!"            => \$main::opt_gv,
19807-             "evince!"        => \$main::opt_evince,
19808-             "web!"           => \$main::opt_web,
19809-             "dot!"           => \$main::opt_dot,
19810-             "ps!"            => \$main::opt_ps,
19811-             "pdf!"           => \$main::opt_pdf,
19812-             "svg!"           => \$main::opt_svg,
19813-             "gif!"           => \$main::opt_gif,
19814-             "raw!"           => \$main::opt_raw,
19815-             "collapsed!"     => \$main::opt_collapsed,
19816-             "interactive!"   => \$main::opt_interactive,
19817-             "nodecount=i"    => \$main::opt_nodecount,
19818-             "nodefraction=f" => \$main::opt_nodefraction,
19819-             "edgefraction=f" => \$main::opt_edgefraction,
19820-             "maxdegree=i"    => \$main::opt_maxdegree,
19821-             "focus=s"        => \$main::opt_focus,
19822-             "thread=s"       => \$main::opt_thread,
19823-             "ignore=s"       => \$main::opt_ignore,
19824-             "scale=i"        => \$main::opt_scale,
19825-             "heapcheck"      => \$main::opt_heapcheck,
19826-             "retain=s"       => \$main::opt_retain,
19827-             "exclude=s"      => \$main::opt_exclude,
19828-             "inuse_space!"   => \$main::opt_inuse_space,
19829-             "inuse_objects!" => \$main::opt_inuse_objects,
19830-             "alloc_space!"   => \$main::opt_alloc_space,
19831-             "alloc_objects!" => \$main::opt_alloc_objects,
19832-             "show_bytes!"    => \$main::opt_show_bytes,
19833-             "drop_negative!" => \$main::opt_drop_negative,
19834-             "total_delay!"   => \$main::opt_total_delay,
19835-             "contentions!"   => \$main::opt_contentions,
19836-             "mean_delay!"    => \$main::opt_mean_delay,
19837-             "tools=s"        => \$main::opt_tools,
19838-             "test!"          => \$main::opt_test,
19839-             "debug!"         => \$main::opt_debug,
19840-             "debug-syms-by-id!" => \$main::opt_debug_syms_by_id,
19841-             # Undocumented flags used only by unittests:
19842-             "test_stride=i"  => \$main::opt_test_stride,
19843-      ) || usage("Invalid option(s)");
19844-
19845-  # Deal with the standard --help and --version
19846-  if ($main::opt_help) {
19847-    print usage_string();
19848-    exit(0);
19849-  }
19850-
19851-  if ($main::opt_version) {
19852-    print version_string();
19853-    exit(0);
19854-  }
19855-
19856-  # Disassembly/listing/symbols mode requires address-level info
19857-  if ($main::opt_disasm || $main::opt_list || $main::opt_symbols) {
19858-    $main::opt_functions = 0;
19859-    $main::opt_lines = 0;
19860-    $main::opt_addresses = 1;
19861-    $main::opt_files = 0;
19862-  }
19863-
19864-  # Check heap-profiling flags
19865-  if ($main::opt_inuse_space +
19866-      $main::opt_inuse_objects +
19867-      $main::opt_alloc_space +
19868-      $main::opt_alloc_objects > 1) {
19869-    usage("Specify at most on of --inuse/--alloc options");
19870-  }
19871-
19872-  # Check output granularities
19873-  my $grains =
19874-      $main::opt_functions +
19875-      $main::opt_lines +
19876-      $main::opt_addresses +
19877-      $main::opt_files +
19878-      0;
19879-  if ($grains > 1) {
19880-    usage("Only specify one output granularity option");
19881-  }
19882-  if ($grains == 0) {
19883-    $main::opt_functions = 1;
19884-  }
19885-
19886-  # Check output modes
19887-  my $modes =
19888-      $main::opt_text +
19889-      $main::opt_callgrind +
19890-      ($main::opt_list eq '' ? 0 : 1) +
19891-      ($main::opt_disasm eq '' ? 0 : 1) +
19892-      ($main::opt_symbols == 0 ? 0 : 1) +
19893-      $main::opt_gv +
19894-      $main::opt_evince +
19895-      $main::opt_web +
19896-      $main::opt_dot +
19897-      $main::opt_ps +
19898-      $main::opt_pdf +
19899-      $main::opt_svg +
19900-      $main::opt_gif +
19901-      $main::opt_raw +
19902-      $main::opt_collapsed +
19903-      $main::opt_interactive +
19904-      0;
19905-  if ($modes > 1) {
19906-    usage("Only specify one output mode");
19907-  }
19908-  if ($modes == 0) {
19909-    if (-t STDOUT) {  # If STDOUT is a tty, activate interactive mode
19910-      $main::opt_interactive = 1;
19911-    } else {
19912-      $main::opt_text = 1;
19913-    }
19914-  }
19915-
19916-  if ($main::opt_test) {
19917-    RunUnitTests();
19918-    # Should not return
19919-    exit(1);
19920-  }
19921-
19922-  # Binary name and profile arguments list
19923-  $main::prog = "";
19924-  @main::pfile_args = ();
19925-
19926-  # Remote profiling without a binary (using $SYMBOL_PAGE instead)
19927-  if (@ARGV > 0) {
19928-    if (IsProfileURL($ARGV[0])) {
19929-      $main::use_symbol_page = 1;
19930-    } elsif (IsSymbolizedProfileFile($ARGV[0])) {
19931-      $main::use_symbolized_profile = 1;
19932-      $main::prog = $UNKNOWN_BINARY;  # will be set later from the profile file
19933-    }
19934-  }
19935-
19936-  if ($main::use_symbol_page || $main::use_symbolized_profile) {
19937-    # We don't need a binary!
19938-    my %disabled = ('--lines' => $main::opt_lines,
19939-                    '--disasm' => $main::opt_disasm);
19940-    for my $option (keys %disabled) {
19941-      usage("$option cannot be used without a binary") if $disabled{$option};
19942-    }
19943-    # Set $main::prog later...
19944-    scalar(@ARGV) || usage("Did not specify profile file");
19945-  } elsif ($main::opt_symbols) {
19946-    # --symbols needs a binary-name (to run nm on, etc) but not profiles
19947-    $main::prog = shift(@ARGV) || usage("Did not specify program");
19948-  } else {
19949-    $main::prog = shift(@ARGV) || usage("Did not specify program");
19950-    scalar(@ARGV) || usage("Did not specify profile file");
19951-  }
19952-
19953-  # Parse profile file/location arguments
19954-  foreach my $farg (@ARGV) {
19955-    if ($farg =~ m/(.*)\@([0-9]+)(|\/.*)$/ ) {
19956-      my $machine = $1;
19957-      my $num_machines = $2;
19958-      my $path = $3;
19959-      for (my $i = 0; $i < $num_machines; $i++) {
19960-        unshift(@main::pfile_args, "$i.$machine$path");
19961-      }
19962-    } else {
19963-      unshift(@main::pfile_args, $farg);
19964-    }
19965-  }
19966-
19967-  if ($main::use_symbol_page) {
19968-    unless (IsProfileURL($main::pfile_args[0])) {
19969-      error("The first profile should be a remote form to use $SYMBOL_PAGE\n");
19970-    }
19971-    CheckSymbolPage();
19972-    $main::prog = FetchProgramName();
19973-  } elsif (!$main::use_symbolized_profile) {  # may not need objtools!
19974-    ConfigureObjTools($main::prog)
19975-  }
19976-
19977-  # Break the opt_lib_prefix into the prefix_list array
19978-  @prefix_list = split (',', $main::opt_lib_prefix);
19979-
19980-  # Remove trailing / from the prefixes, in the list to prevent
19981-  # searching things like /my/path//lib/mylib.so
19982-  foreach (@prefix_list) {
19983-    s|/+$||;
19984-  }
19985-
19986-  # Flag to prevent us from trying over and over to use
19987-  #  elfutils if it's not installed (used only with
19988-  #  --debug-syms-by-id option).
19989-  $main::gave_up_on_elfutils = 0;
19990-}
19991-
19992-sub FilterAndPrint {
19993-  my ($profile, $symbols, $libs, $thread) = @_;
19994-
19995-  # Get total data in profile
19996-  my $total = TotalProfile($profile);
19997-
19998-  # Remove uniniteresting stack items
19999-  $profile = RemoveUninterestingFrames($symbols, $profile);
20000-
20001-  # Focus?
20002-  if ($main::opt_focus ne '') {
20003-    $profile = FocusProfile($symbols, $profile, $main::opt_focus);
20004-  }
20005-
20006-  # Ignore?
20007-  if ($main::opt_ignore ne '') {
20008-    $profile = IgnoreProfile($symbols, $profile, $main::opt_ignore);
20009-  }
20010-
20011-  my $calls = ExtractCalls($symbols, $profile);
20012-
20013-  # Reduce profiles to required output granularity, and also clean
20014-  # each stack trace so a given entry exists at most once.
20015-  my $reduced = ReduceProfile($symbols, $profile);
20016-
20017-  # Get derived profiles
20018-  my $flat = FlatProfile($reduced);
20019-  my $cumulative = CumulativeProfile($reduced);
20020-
20021-  # Print
20022-  if (!$main::opt_interactive) {
20023-    if ($main::opt_disasm) {
20024-      PrintDisassembly($libs, $flat, $cumulative, $main::opt_disasm);
20025-    } elsif ($main::opt_list) {
20026-      PrintListing($total, $libs, $flat, $cumulative, $main::opt_list, 0);
20027-    } elsif ($main::opt_text) {
20028-      # Make sure the output is empty when have nothing to report
20029-      # (only matters when --heapcheck is given but we must be
20030-      # compatible with old branches that did not pass --heapcheck always):
20031-      if ($total != 0) {
20032-        printf("Total%s: %s %s\n",
20033-               (defined($thread) ? " (t$thread)" : ""),
20034-               Unparse($total), Units());
20035-      }
20036-      PrintText($symbols, $flat, $cumulative, -1);
20037-    } elsif ($main::opt_raw) {
20038-      PrintSymbolizedProfile($symbols, $profile, $main::prog);
20039-    } elsif ($main::opt_collapsed) {
20040-      PrintCollapsedStacks($symbols, $profile);
20041-    } elsif ($main::opt_callgrind) {
20042-      PrintCallgrind($calls);
20043-    } else {
20044-      if (PrintDot($main::prog, $symbols, $profile, $flat, $cumulative, $total)) {
20045-        if ($main::opt_gv) {
20046-          RunGV(TempName($main::next_tmpfile, "ps"), "");
20047-        } elsif ($main::opt_evince) {
20048-          RunEvince(TempName($main::next_tmpfile, "pdf"), "");
20049-        } elsif ($main::opt_web) {
20050-          my $tmp = TempName($main::next_tmpfile, "svg");
20051-          RunWeb($tmp);
20052-          # The command we run might hand the file name off
20053-          # to an already running browser instance and then exit.
20054-          # Normally, we'd remove $tmp on exit (right now),
20055-          # but fork a child to remove $tmp a little later, so that the
20056-          # browser has time to load it first.
20057-          delete $main::tempnames{$tmp};
20058-          if (fork() == 0) {
20059-            sleep 5;
20060-            unlink($tmp);
20061-            exit(0);
20062-          }
20063-        }
20064-      } else {
20065-        cleanup();
20066-        exit(1);
20067-      }
20068-    }
20069-  } else {
20070-    InteractiveMode($profile, $symbols, $libs, $total);
20071-  }
20072-}
20073-
20074-sub Main() {
20075-  Init();
20076-  $main::collected_profile = undef;
20077-  @main::profile_files = ();
20078-  $main::op_time = time();
20079-
20080-  # Printing symbols is special and requires a lot less info that most.
20081-  if ($main::opt_symbols) {
20082-    PrintSymbols(*STDIN);   # Get /proc/maps and symbols output from stdin
20083-    return;
20084-  }
20085-
20086-  # Fetch all profile data
20087-  FetchDynamicProfiles();
20088-
20089-  # this will hold symbols that we read from the profile files
20090-  my $symbol_map = {};
20091-
20092-  # Read one profile, pick the last item on the list
20093-  my $data = ReadProfile($main::prog, pop(@main::profile_files));
20094-  my $profile = $data->{profile};
20095-  my $pcs = $data->{pcs};
20096-  my $libs = $data->{libs};   # Info about main program and shared libraries
20097-  $symbol_map = MergeSymbols($symbol_map, $data->{symbols});
20098-
20099-  # Add additional profiles, if available.
20100-  if (scalar(@main::profile_files) > 0) {
20101-    foreach my $pname (@main::profile_files) {
20102-      my $data2 = ReadProfile($main::prog, $pname);
20103-      $profile = AddProfile($profile, $data2->{profile});
20104-      $pcs = AddPcs($pcs, $data2->{pcs});
20105-      $symbol_map = MergeSymbols($symbol_map, $data2->{symbols});
20106-    }
20107-  }
20108-
20109-  # Subtract base from profile, if specified
20110-  if ($main::opt_base ne '') {
20111-    my $base = ReadProfile($main::prog, $main::opt_base);
20112-    $profile = SubtractProfile($profile, $base->{profile});
20113-    $pcs = AddPcs($pcs, $base->{pcs});
20114-    $symbol_map = MergeSymbols($symbol_map, $base->{symbols});
20115-  }
20116-
20117-  # Collect symbols
20118-  my $symbols;
20119-  if ($main::use_symbolized_profile) {
20120-    $symbols = FetchSymbols($pcs, $symbol_map);
20121-  } elsif ($main::use_symbol_page) {
20122-    $symbols = FetchSymbols($pcs);
20123-  } else {
20124-    # TODO(csilvers): $libs uses the /proc/self/maps data from profile1,
20125-    # which may differ from the data from subsequent profiles, especially
20126-    # if they were run on different machines.  Use appropriate libs for
20127-    # each pc somehow.
20128-    $symbols = ExtractSymbols($libs, $pcs);
20129-  }
20130-
20131-  if (!defined($main::opt_thread)) {
20132-    FilterAndPrint($profile, $symbols, $libs);
20133-  }
20134-  if (defined($data->{threads})) {
20135-    foreach my $thread (sort { $a <=> $b } keys(%{$data->{threads}})) {
20136-      if (defined($main::opt_thread) &&
20137-          ($main::opt_thread eq '*' || $main::opt_thread == $thread)) {
20138-        my $thread_profile = $data->{threads}{$thread};
20139-        FilterAndPrint($thread_profile, $symbols, $libs, $thread);
20140-      }
20141-    }
20142-  }
20143-
20144-  cleanup();
20145-  exit(0);
20146-}
20147-
20148-##### Entry Point #####
20149-
20150-Main();
20151-
20152-# Temporary code to detect if we're running on a Goobuntu system.
20153-# These systems don't have the right stuff installed for the special
20154-# Readline libraries to work, so as a temporary workaround, we default
20155-# to using the normal stdio code, rather than the fancier readline-based
20156-# code
20157-sub ReadlineMightFail {
20158-  if (-e '/lib/libtermcap.so.2') {
20159-    return 0;  # libtermcap exists, so readline should be okay
20160-  } else {
20161-    return 1;
20162-  }
20163-}
20164-
20165-sub RunGV {
20166-  my $fname = shift;
20167-  my $bg = shift;       # "" or " &" if we should run in background
20168-  if (!system(ShellEscape(@GV, "--version") . " >$dev_null 2>&1")) {
20169-    # Options using double dash are supported by this gv version.
20170-    # Also, turn on noantialias to better handle bug in gv for
20171-    # postscript files with large dimensions.
20172-    # TODO: Maybe we should not pass the --noantialias flag
20173-    # if the gv version is known to work properly without the flag.
20174-    system(ShellEscape(@GV, "--scale=$main::opt_scale", "--noantialias", $fname)
20175-           . $bg);
20176-  } else {
20177-    # Old gv version - only supports options that use single dash.
20178-    print STDERR ShellEscape(@GV, "-scale", $main::opt_scale) . "\n";
20179-    system(ShellEscape(@GV, "-scale", "$main::opt_scale", $fname) . $bg);
20180-  }
20181-}
20182-
20183-sub RunEvince {
20184-  my $fname = shift;
20185-  my $bg = shift;       # "" or " &" if we should run in background
20186-  system(ShellEscape(@EVINCE, $fname) . $bg);
20187-}
20188-
20189-sub RunWeb {
20190-  my $fname = shift;
20191-  print STDERR "Loading web page file:///$fname\n";
20192-
20193-  if (`uname` =~ /Darwin/) {
20194-    # OS X: open will use standard preference for SVG files.
20195-    system("/usr/bin/open", $fname);
20196-    return;
20197-  }
20198-
20199-  # Some kind of Unix; try generic symlinks, then specific browsers.
20200-  # (Stop once we find one.)
20201-  # Works best if the browser is already running.
20202-  my @alt = (
20203-    "/etc/alternatives/gnome-www-browser",
20204-    "/etc/alternatives/x-www-browser",
20205-    "google-chrome",
20206-    "firefox",
20207-  );
20208-  foreach my $b (@alt) {
20209-    if (system($b, $fname) == 0) {
20210-      return;
20211-    }
20212-  }
20213-
20214-  print STDERR "Could not load web browser.\n";
20215-}
20216-
20217-sub RunKcachegrind {
20218-  my $fname = shift;
20219-  my $bg = shift;       # "" or " &" if we should run in background
20220-  print STDERR "Starting '@KCACHEGRIND " . $fname . $bg . "'\n";
20221-  system(ShellEscape(@KCACHEGRIND, $fname) . $bg);
20222-}
20223-
20224-
20225-##### Interactive helper routines #####
20226-
20227-sub InteractiveMode {
20228-  $| = 1;  # Make output unbuffered for interactive mode
20229-  my ($orig_profile, $symbols, $libs, $total) = @_;
20230-
20231-  print STDERR "Welcome to jeprof!  For help, type 'help'.\n";
20232-
20233-  # Use ReadLine if it's installed and input comes from a console.
20234-  if ( -t STDIN &&
20235-       !ReadlineMightFail() &&
20236-       defined(eval {require Term::ReadLine}) ) {
20237-    my $term = new Term::ReadLine 'jeprof';
20238-    while ( defined ($_ = $term->readline('(jeprof) '))) {
20239-      $term->addhistory($_) if /\S/;
20240-      if (!InteractiveCommand($orig_profile, $symbols, $libs, $total, $_)) {
20241-        last;    # exit when we get an interactive command to quit
20242-      }
20243-    }
20244-  } else {       # don't have readline
20245-    while (1) {
20246-      print STDERR "(jeprof) ";
20247-      $_ = <STDIN>;
20248-      last if ! defined $_ ;
20249-      s/\r//g;         # turn windows-looking lines into unix-looking lines
20250-
20251-      # Save some flags that might be reset by InteractiveCommand()
20252-      my $save_opt_lines = $main::opt_lines;
20253-
20254-      if (!InteractiveCommand($orig_profile, $symbols, $libs, $total, $_)) {
20255-        last;    # exit when we get an interactive command to quit
20256-      }
20257-
20258-      # Restore flags
20259-      $main::opt_lines = $save_opt_lines;
20260-    }
20261-  }
20262-}
20263-
20264-# Takes two args: orig profile, and command to run.
20265-# Returns 1 if we should keep going, or 0 if we were asked to quit
20266-sub InteractiveCommand {
20267-  my($orig_profile, $symbols, $libs, $total, $command) = @_;
20268-  $_ = $command;                # just to make future m//'s easier
20269-  if (!defined($_)) {
20270-    print STDERR "\n";
20271-    return 0;
20272-  }
20273-  if (m/^\s*quit/) {
20274-    return 0;
20275-  }
20276-  if (m/^\s*help/) {
20277-    InteractiveHelpMessage();
20278-    return 1;
20279-  }
20280-  # Clear all the mode options -- mode is controlled by "$command"
20281-  $main::opt_text = 0;
20282-  $main::opt_callgrind = 0;
20283-  $main::opt_disasm = 0;
20284-  $main::opt_list = 0;
20285-  $main::opt_gv = 0;
20286-  $main::opt_evince = 0;
20287-  $main::opt_cum = 0;
20288-
20289-  if (m/^\s*(text|top)(\d*)\s*(.*)/) {
20290-    $main::opt_text = 1;
20291-
20292-    my $line_limit = ($2 ne "") ? int($2) : 10;
20293-
20294-    my $routine;
20295-    my $ignore;
20296-    ($routine, $ignore) = ParseInteractiveArgs($3);
20297-
20298-    my $profile = ProcessProfile($total, $orig_profile, $symbols, "", $ignore);
20299-    my $reduced = ReduceProfile($symbols, $profile);
20300-
20301-    # Get derived profiles
20302-    my $flat = FlatProfile($reduced);
20303-    my $cumulative = CumulativeProfile($reduced);
20304-
20305-    PrintText($symbols, $flat, $cumulative, $line_limit);
20306-    return 1;
20307-  }
20308-  if (m/^\s*callgrind\s*([^ \n]*)/) {
20309-    $main::opt_callgrind = 1;
20310-
20311-    # Get derived profiles
20312-    my $calls = ExtractCalls($symbols, $orig_profile);
20313-    my $filename = $1;
20314-    if ( $1 eq '' ) {
20315-      $filename = TempName($main::next_tmpfile, "callgrind");
20316-    }
20317-    PrintCallgrind($calls, $filename);
20318-    if ( $1 eq '' ) {
20319-      RunKcachegrind($filename, " & ");
20320-      $main::next_tmpfile++;
20321-    }
20322-
20323-    return 1;
20324-  }
20325-  if (m/^\s*(web)?list\s*(.+)/) {
20326-    my $html = (defined($1) && ($1 eq "web"));
20327-    $main::opt_list = 1;
20328-
20329-    my $routine;
20330-    my $ignore;
20331-    ($routine, $ignore) = ParseInteractiveArgs($2);
20332-
20333-    my $profile = ProcessProfile($total, $orig_profile, $symbols, "", $ignore);
20334-    my $reduced = ReduceProfile($symbols, $profile);
20335-
20336-    # Get derived profiles
20337-    my $flat = FlatProfile($reduced);
20338-    my $cumulative = CumulativeProfile($reduced);
20339-
20340-    PrintListing($total, $libs, $flat, $cumulative, $routine, $html);
20341-    return 1;
20342-  }
20343-  if (m/^\s*disasm\s*(.+)/) {
20344-    $main::opt_disasm = 1;
20345-
20346-    my $routine;
20347-    my $ignore;
20348-    ($routine, $ignore) = ParseInteractiveArgs($1);
20349-
20350-    # Process current profile to account for various settings
20351-    my $profile = ProcessProfile($total, $orig_profile, $symbols, "", $ignore);
20352-    my $reduced = ReduceProfile($symbols, $profile);
20353-
20354-    # Get derived profiles
20355-    my $flat = FlatProfile($reduced);
20356-    my $cumulative = CumulativeProfile($reduced);
20357-
20358-    PrintDisassembly($libs, $flat, $cumulative, $routine);
20359-    return 1;
20360-  }
20361-  if (m/^\s*(gv|web|evince)\s*(.*)/) {
20362-    $main::opt_gv = 0;
20363-    $main::opt_evince = 0;
20364-    $main::opt_web = 0;
20365-    if ($1 eq "gv") {
20366-      $main::opt_gv = 1;
20367-    } elsif ($1 eq "evince") {
20368-      $main::opt_evince = 1;
20369-    } elsif ($1 eq "web") {
20370-      $main::opt_web = 1;
20371-    }
20372-
20373-    my $focus;
20374-    my $ignore;
20375-    ($focus, $ignore) = ParseInteractiveArgs($2);
20376-
20377-    # Process current profile to account for various settings
20378-    my $profile = ProcessProfile($total, $orig_profile, $symbols,
20379-                                 $focus, $ignore);
20380-    my $reduced = ReduceProfile($symbols, $profile);
20381-
20382-    # Get derived profiles
20383-    my $flat = FlatProfile($reduced);
20384-    my $cumulative = CumulativeProfile($reduced);
20385-
20386-    if (PrintDot($main::prog, $symbols, $profile, $flat, $cumulative, $total)) {
20387-      if ($main::opt_gv) {
20388-        RunGV(TempName($main::next_tmpfile, "ps"), " &");
20389-      } elsif ($main::opt_evince) {
20390-        RunEvince(TempName($main::next_tmpfile, "pdf"), " &");
20391-      } elsif ($main::opt_web) {
20392-        RunWeb(TempName($main::next_tmpfile, "svg"));
20393-      }
20394-      $main::next_tmpfile++;
20395-    }
20396-    return 1;
20397-  }
20398-  if (m/^\s*$/) {
20399-    return 1;
20400-  }
20401-  print STDERR "Unknown command: try 'help'.\n";
20402-  return 1;
20403-}
20404-
20405-
20406-sub ProcessProfile {
20407-  my $total_count = shift;
20408-  my $orig_profile = shift;
20409-  my $symbols = shift;
20410-  my $focus = shift;
20411-  my $ignore = shift;
20412-
20413-  # Process current profile to account for various settings
20414-  my $profile = $orig_profile;
20415-  printf("Total: %s %s\n", Unparse($total_count), Units());
20416-  if ($focus ne '') {
20417-    $profile = FocusProfile($symbols, $profile, $focus);
20418-    my $focus_count = TotalProfile($profile);
20419-    printf("After focusing on '%s': %s %s of %s (%0.1f%%)\n",
20420-           $focus,
20421-           Unparse($focus_count), Units(),
20422-           Unparse($total_count), ($focus_count*100.0) / $total_count);
20423-  }
20424-  if ($ignore ne '') {
20425-    $profile = IgnoreProfile($symbols, $profile, $ignore);
20426-    my $ignore_count = TotalProfile($profile);
20427-    printf("After ignoring '%s': %s %s of %s (%0.1f%%)\n",
20428-           $ignore,
20429-           Unparse($ignore_count), Units(),
20430-           Unparse($total_count),
20431-           ($ignore_count*100.0) / $total_count);
20432-  }
20433-
20434-  return $profile;
20435-}
20436-
20437-sub InteractiveHelpMessage {
20438-  print STDERR <<ENDOFHELP;
20439-Interactive jeprof mode
20440-
20441-Commands:
20442-  gv
20443-  gv [focus] [-ignore1] [-ignore2]
20444-      Show graphical hierarchical display of current profile.  Without
20445-      any arguments, shows all samples in the profile.  With the optional
20446-      "focus" argument, restricts the samples shown to just those where
20447-      the "focus" regular expression matches a routine name on the stack
20448-      trace.
20449-
20450-  web
20451-  web [focus] [-ignore1] [-ignore2]
20452-      Like GV, but displays profile in your web browser instead of using
20453-      Ghostview. Works best if your web browser is already running.
20454-      To change the browser that gets used:
20455-      On Linux, set the /etc/alternatives/gnome-www-browser symlink.
20456-      On OS X, change the Finder association for SVG files.
20457-
20458-  list [routine_regexp] [-ignore1] [-ignore2]
20459-      Show source listing of routines whose names match "routine_regexp"
20460-
20461-  weblist [routine_regexp] [-ignore1] [-ignore2]
20462-     Displays a source listing of routines whose names match "routine_regexp"
20463-     in a web browser.  You can click on source lines to view the
20464-     corresponding disassembly.
20465-
20466-  top [--cum] [-ignore1] [-ignore2]
20467-  top20 [--cum] [-ignore1] [-ignore2]
20468-  top37 [--cum] [-ignore1] [-ignore2]
20469-      Show top lines ordered by flat profile count, or cumulative count
20470-      if --cum is specified.  If a number is present after 'top', the
20471-      top K routines will be shown (defaults to showing the top 10)
20472-
20473-  disasm [routine_regexp] [-ignore1] [-ignore2]
20474-      Show disassembly of routines whose names match "routine_regexp",
20475-      annotated with sample counts.
20476-
20477-  callgrind
20478-  callgrind [filename]
20479-      Generates callgrind file. If no filename is given, kcachegrind is called.
20480-
20481-  help - This listing
20482-  quit or ^D - End jeprof
20483-
20484-For commands that accept optional -ignore tags, samples where any routine in
20485-the stack trace matches the regular expression in any of the -ignore
20486-parameters will be ignored.
20487-
20488-Further pprof details are available at this location (or one similar):
20489-
20490- /usr/doc/gperftools-$PPROF_VERSION/cpu_profiler.html
20491- /usr/doc/gperftools-$PPROF_VERSION/heap_profiler.html
20492-
20493-ENDOFHELP
20494-}
20495-sub ParseInteractiveArgs {
20496-  my $args = shift;
20497-  my $focus = "";
20498-  my $ignore = "";
20499-  my @x = split(/ +/, $args);
20500-  foreach $a (@x) {
20501-    if ($a =~ m/^(--|-)lines$/) {
20502-      $main::opt_lines = 1;
20503-    } elsif ($a =~ m/^(--|-)cum$/) {
20504-      $main::opt_cum = 1;
20505-    } elsif ($a =~ m/^-(.*)/) {
20506-      $ignore .= (($ignore ne "") ? "|" : "" ) . $1;
20507-    } else {
20508-      $focus .= (($focus ne "") ? "|" : "" ) . $a;
20509-    }
20510-  }
20511-  if ($ignore ne "") {
20512-    print STDERR "Ignoring samples in call stacks that match '$ignore'\n";
20513-  }
20514-  return ($focus, $ignore);
20515-}
20516-
20517-##### Output code #####
20518-
20519-sub TempName {
20520-  my $fnum = shift;
20521-  my $ext = shift;
20522-  my $file = "$main::tmpfile_ps.$fnum.$ext";
20523-  $main::tempnames{$file} = 1;
20524-  return $file;
20525-}
20526-
20527-# Print profile data in packed binary format (64-bit) to standard out
20528-sub PrintProfileData {
20529-  my $profile = shift;
20530-
20531-  # print header (64-bit style)
20532-  # (zero) (header-size) (version) (sample-period) (zero)
20533-  print pack('L*', 0, 0, 3, 0, 0, 0, 1, 0, 0, 0);
20534-
20535-  foreach my $k (keys(%{$profile})) {
20536-    my $count = $profile->{$k};
20537-    my @addrs = split(/\n/, $k);
20538-    if ($#addrs >= 0) {
20539-      my $depth = $#addrs + 1;
20540-      # int(foo / 2**32) is the only reliable way to get rid of bottom
20541-      # 32 bits on both 32- and 64-bit systems.
20542-      print pack('L*', $count & 0xFFFFFFFF, int($count / 2**32));
20543-      print pack('L*', $depth & 0xFFFFFFFF, int($depth / 2**32));
20544-
20545-      foreach my $full_addr (@addrs) {
20546-        my $addr = $full_addr;
20547-        $addr =~ s/0x0*//;  # strip off leading 0x, zeroes
20548-        if (length($addr) > 16) {
20549-          print STDERR "Invalid address in profile: $full_addr\n";
20550-          next;
20551-        }
20552-        my $low_addr = substr($addr, -8);       # get last 8 hex chars
20553-        my $high_addr = substr($addr, -16, 8);  # get up to 8 more hex chars
20554-        print pack('L*', hex('0x' . $low_addr), hex('0x' . $high_addr));
20555-      }
20556-    }
20557-  }
20558-}
20559-
20560-# Print symbols and profile data
20561-sub PrintSymbolizedProfile {
20562-  my $symbols = shift;
20563-  my $profile = shift;
20564-  my $prog = shift;
20565-
20566-  $SYMBOL_PAGE =~ m,[^/]+$,;    # matches everything after the last slash
20567-  my $symbol_marker = $&;
20568-
20569-  print '--- ', $symbol_marker, "\n";
20570-  if (defined($prog)) {
20571-    print 'binary=', $prog, "\n";
20572-  }
20573-  while (my ($pc, $name) = each(%{$symbols})) {
20574-    my $sep = ' ';
20575-    print '0x', $pc;
20576-    # We have a list of function names, which include the inlined
20577-    # calls.  They are separated (and terminated) by --, which is
20578-    # illegal in function names.
20579-    for (my $j = 2; $j <= $#{$name}; $j += 3) {
20580-      print $sep, $name->[$j];
20581-      $sep = '--';
20582-    }
20583-    print "\n";
20584-  }
20585-  print '---', "\n";
20586-
20587-  my $profile_marker;
20588-  if ($main::profile_type eq 'heap') {
20589-    $HEAP_PAGE =~ m,[^/]+$,;    # matches everything after the last slash
20590-    $profile_marker = $&;
20591-  } elsif ($main::profile_type eq 'growth') {
20592-    $GROWTH_PAGE =~ m,[^/]+$,;    # matches everything after the last slash
20593-    $profile_marker = $&;
20594-  } elsif ($main::profile_type eq 'contention') {
20595-    $CONTENTION_PAGE =~ m,[^/]+$,;    # matches everything after the last slash
20596-    $profile_marker = $&;
20597-  } else { # elsif ($main::profile_type eq 'cpu')
20598-    $PROFILE_PAGE =~ m,[^/]+$,;    # matches everything after the last slash
20599-    $profile_marker = $&;
20600-  }
20601-
20602-  print '--- ', $profile_marker, "\n";
20603-  if (defined($main::collected_profile)) {
20604-    # if used with remote fetch, simply dump the collected profile to output.
20605-    open(SRC, "<$main::collected_profile");
20606-    while (<SRC>) {
20607-      print $_;
20608-    }
20609-    close(SRC);
20610-  } else {
20611-    # --raw/http: For everything to work correctly for non-remote profiles, we
20612-    # would need to extend PrintProfileData() to handle all possible profile
20613-    # types, re-enable the code that is currently disabled in ReadCPUProfile()
20614-    # and FixCallerAddresses(), and remove the remote profile dumping code in
20615-    # the block above.
20616-    die "--raw/http: jeprof can only dump remote profiles for --raw\n";
20617-    # dump a cpu-format profile to standard out
20618-    PrintProfileData($profile);
20619-  }
20620-}
20621-
20622-# Print text output
20623-sub PrintText {
20624-  my $symbols = shift;
20625-  my $flat = shift;
20626-  my $cumulative = shift;
20627-  my $line_limit = shift;
20628-
20629-  my $total = TotalProfile($flat);
20630-
20631-  # Which profile to sort by?
20632-  my $s = $main::opt_cum ? $cumulative : $flat;
20633-
20634-  my $running_sum = 0;
20635-  my $lines = 0;
20636-  foreach my $k (sort { GetEntry($s, $b) <=> GetEntry($s, $a) || $a cmp $b }
20637-                 keys(%{$cumulative})) {
20638-    my $f = GetEntry($flat, $k);
20639-    my $c = GetEntry($cumulative, $k);
20640-    $running_sum += $f;
20641-
20642-    my $sym = $k;
20643-    if (exists($symbols->{$k})) {
20644-      $sym = $symbols->{$k}->[0] . " " . $symbols->{$k}->[1];
20645-      if ($main::opt_addresses) {
20646-        $sym = $k . " " . $sym;
20647-      }
20648-    }
20649-
20650-    if ($f != 0 || $c != 0) {
20651-      printf("%8s %6s %6s %8s %6s %s\n",
20652-             Unparse($f),
20653-             Percent($f, $total),
20654-             Percent($running_sum, $total),
20655-             Unparse($c),
20656-             Percent($c, $total),
20657-             $sym);
20658-    }
20659-    $lines++;
20660-    last if ($line_limit >= 0 && $lines >= $line_limit);
20661-  }
20662-}
20663-
20664-# Callgrind format has a compression for repeated function and file
20665-# names.  You show the name the first time, and just use its number
20666-# subsequently.  This can cut down the file to about a third or a
20667-# quarter of its uncompressed size.  $key and $val are the key/value
20668-# pair that would normally be printed by callgrind; $map is a map from
20669-# value to number.
20670-sub CompressedCGName {
20671-  my($key, $val, $map) = @_;
20672-  my $idx = $map->{$val};
20673-  # For very short keys, providing an index hurts rather than helps.
20674-  if (length($val) <= 3) {
20675-    return "$key=$val\n";
20676-  } elsif (defined($idx)) {
20677-    return "$key=($idx)\n";
20678-  } else {
20679-    # scalar(keys $map) gives the number of items in the map.
20680-    $idx = scalar(keys(%{$map})) + 1;
20681-    $map->{$val} = $idx;
20682-    return "$key=($idx) $val\n";
20683-  }
20684-}
20685-
20686-# Print the call graph in a way that's suiteable for callgrind.
20687-sub PrintCallgrind {
20688-  my $calls = shift;
20689-  my $filename;
20690-  my %filename_to_index_map;
20691-  my %fnname_to_index_map;
20692-
20693-  if ($main::opt_interactive) {
20694-    $filename = shift;
20695-    print STDERR "Writing callgrind file to '$filename'.\n"
20696-  } else {
20697-    $filename = "&STDOUT";
20698-  }
20699-  open(CG, ">$filename");
20700-  printf CG ("events: Hits\n\n");
20701-  foreach my $call ( map { $_->[0] }
20702-                     sort { $a->[1] cmp $b ->[1] ||
20703-                            $a->[2] <=> $b->[2] }
20704-                     map { /([^:]+):(\d+):([^ ]+)( -> ([^:]+):(\d+):(.+))?/;
20705-                           [$_, $1, $2] }
20706-                     keys %$calls ) {
20707-    my $count = int($calls->{$call});
20708-    $call =~ /([^:]+):(\d+):([^ ]+)( -> ([^:]+):(\d+):(.+))?/;
20709-    my ( $caller_file, $caller_line, $caller_function,
20710-         $callee_file, $callee_line, $callee_function ) =
20711-       ( $1, $2, $3, $5, $6, $7 );
20712-
20713-    # TODO(csilvers): for better compression, collect all the
20714-    # caller/callee_files and functions first, before printing
20715-    # anything, and only compress those referenced more than once.
20716-    printf CG CompressedCGName("fl", $caller_file, \%filename_to_index_map);
20717-    printf CG CompressedCGName("fn", $caller_function, \%fnname_to_index_map);
20718-    if (defined $6) {
20719-      printf CG CompressedCGName("cfl", $callee_file, \%filename_to_index_map);
20720-      printf CG CompressedCGName("cfn", $callee_function, \%fnname_to_index_map);
20721-      printf CG ("calls=$count $callee_line\n");
20722-    }
20723-    printf CG ("$caller_line $count\n\n");
20724-  }
20725-}
20726-
20727-# Print disassembly for all all routines that match $main::opt_disasm
20728-sub PrintDisassembly {
20729-  my $libs = shift;
20730-  my $flat = shift;
20731-  my $cumulative = shift;
20732-  my $disasm_opts = shift;
20733-
20734-  my $total = TotalProfile($flat);
20735-
20736-  foreach my $lib (@{$libs}) {
20737-    my $symbol_table = GetProcedureBoundaries($lib->[0], $disasm_opts);
20738-    my $offset = AddressSub($lib->[1], $lib->[3]);
20739-    foreach my $routine (sort ByName keys(%{$symbol_table})) {
20740-      my $start_addr = $symbol_table->{$routine}->[0];
20741-      my $end_addr = $symbol_table->{$routine}->[1];
20742-      # See if there are any samples in this routine
20743-      my $length = hex(AddressSub($end_addr, $start_addr));
20744-      my $addr = AddressAdd($start_addr, $offset);
20745-      for (my $i = 0; $i < $length; $i++) {
20746-        if (defined($cumulative->{$addr})) {
20747-          PrintDisassembledFunction($lib->[0], $offset,
20748-                                    $routine, $flat, $cumulative,
20749-                                    $start_addr, $end_addr, $total);
20750-          last;
20751-        }
20752-        $addr = AddressInc($addr);
20753-      }
20754-    }
20755-  }
20756-}
20757-
20758-# Return reference to array of tuples of the form:
20759-#       [start_address, filename, linenumber, instruction, limit_address]
20760-# E.g.,
20761-#       ["0x806c43d", "/foo/bar.cc", 131, "ret", "0x806c440"]
20762-sub Disassemble {
20763-  my $prog = shift;
20764-  my $offset = shift;
20765-  my $start_addr = shift;
20766-  my $end_addr = shift;
20767-
20768-  my $objdump = $obj_tool_map{"objdump"};
20769-  my $cmd = ShellEscape($objdump, "-C", "-d", "-l", "--no-show-raw-insn",
20770-                        "--start-address=0x$start_addr",
20771-                        "--stop-address=0x$end_addr", $prog);
20772-  open(OBJDUMP, "$cmd |") || error("$cmd: $!\n");
20773-  my @result = ();
20774-  my $filename = "";
20775-  my $linenumber = -1;
20776-  my $last = ["", "", "", ""];
20777-  while (<OBJDUMP>) {
20778-    s/\r//g;         # turn windows-looking lines into unix-looking lines
20779-    chop;
20780-    if (m|\s*([^:\s]+):(\d+)\s*$|) {
20781-      # Location line of the form:
20782-      #   <filename>:<linenumber>
20783-      $filename = $1;
20784-      $linenumber = $2;
20785-    } elsif (m/^ +([0-9a-f]+):\s*(.*)/) {
20786-      # Disassembly line -- zero-extend address to full length
20787-      my $addr = HexExtend($1);
20788-      my $k = AddressAdd($addr, $offset);
20789-      $last->[4] = $k;   # Store ending address for previous instruction
20790-      $last = [$k, $filename, $linenumber, $2, $end_addr];
20791-      push(@result, $last);
20792-    }
20793-  }
20794-  close(OBJDUMP);
20795-  return @result;
20796-}
20797-
20798-# The input file should contain lines of the form /proc/maps-like
20799-# output (same format as expected from the profiles) or that looks
20800-# like hex addresses (like "0xDEADBEEF").  We will parse all
20801-# /proc/maps output, and for all the hex addresses, we will output
20802-# "short" symbol names, one per line, in the same order as the input.
20803-sub PrintSymbols {
20804-  my $maps_and_symbols_file = shift;
20805-
20806-  # ParseLibraries expects pcs to be in a set.  Fine by us...
20807-  my @pclist = ();   # pcs in sorted order
20808-  my $pcs = {};
20809-  my $map = "";
20810-  foreach my $line (<$maps_and_symbols_file>) {
20811-    $line =~ s/\r//g;    # turn windows-looking lines into unix-looking lines
20812-    if ($line =~ /\b(0x[0-9a-f]+)\b/i) {
20813-      push(@pclist, HexExtend($1));
20814-      $pcs->{$pclist[-1]} = 1;
20815-    } else {
20816-      $map .= $line;
20817-    }
20818-  }
20819-
20820-  my $libs = ParseLibraries($main::prog, $map, $pcs);
20821-  my $symbols = ExtractSymbols($libs, $pcs);
20822-
20823-  foreach my $pc (@pclist) {
20824-    # ->[0] is the shortname, ->[2] is the full name
20825-    print(($symbols->{$pc}->[0] || "??") . "\n");
20826-  }
20827-}
20828-
20829-
20830-# For sorting functions by name
20831-sub ByName {
20832-  return ShortFunctionName($a) cmp ShortFunctionName($b);
20833-}
20834-
20835-# Print source-listing for all all routines that match $list_opts
20836-sub PrintListing {
20837-  my $total = shift;
20838-  my $libs = shift;
20839-  my $flat = shift;
20840-  my $cumulative = shift;
20841-  my $list_opts = shift;
20842-  my $html = shift;
20843-
20844-  my $output = \*STDOUT;
20845-  my $fname = "";
20846-
20847-  if ($html) {
20848-    # Arrange to write the output to a temporary file
20849-    $fname = TempName($main::next_tmpfile, "html");
20850-    $main::next_tmpfile++;
20851-    if (!open(TEMP, ">$fname")) {
20852-      print STDERR "$fname: $!\n";
20853-      return;
20854-    }
20855-    $output = \*TEMP;
20856-    print $output HtmlListingHeader();
20857-    printf $output ("<div class=\"legend\">%s<br>Total: %s %s</div>\n",
20858-                    $main::prog, Unparse($total), Units());
20859-  }
20860-
20861-  my $listed = 0;
20862-  foreach my $lib (@{$libs}) {
20863-    my $symbol_table = GetProcedureBoundaries($lib->[0], $list_opts);
20864-    my $offset = AddressSub($lib->[1], $lib->[3]);
20865-    foreach my $routine (sort ByName keys(%{$symbol_table})) {
20866-      # Print if there are any samples in this routine
20867-      my $start_addr = $symbol_table->{$routine}->[0];
20868-      my $end_addr = $symbol_table->{$routine}->[1];
20869-      my $length = hex(AddressSub($end_addr, $start_addr));
20870-      my $addr = AddressAdd($start_addr, $offset);
20871-      for (my $i = 0; $i < $length; $i++) {
20872-        if (defined($cumulative->{$addr})) {
20873-          $listed += PrintSource(
20874-            $lib->[0], $offset,
20875-            $routine, $flat, $cumulative,
20876-            $start_addr, $end_addr,
20877-            $html,
20878-            $output);
20879-          last;
20880-        }
20881-        $addr = AddressInc($addr);
20882-      }
20883-    }
20884-  }
20885-
20886-  if ($html) {
20887-    if ($listed > 0) {
20888-      print $output HtmlListingFooter();
20889-      close($output);
20890-      RunWeb($fname);
20891-    } else {
20892-      close($output);
20893-      unlink($fname);
20894-    }
20895-  }
20896-}
20897-
20898-sub HtmlListingHeader {
20899-  return <<'EOF';
20900-<DOCTYPE html>
20901-<html>
20902-<head>
20903-<title>Pprof listing</title>
20904-<style type="text/css">
20905-body {
20906-  font-family: sans-serif;
20907-}
20908-h1 {
20909-  font-size: 1.5em;
20910-  margin-bottom: 4px;
20911-}
20912-.legend {
20913-  font-size: 1.25em;
20914-}
20915-.line {
20916-  color: #aaaaaa;
20917-}
20918-.nop {
20919-  color: #aaaaaa;
20920-}
20921-.unimportant {
20922-  color: #cccccc;
20923-}
20924-.disasmloc {
20925-  color: #000000;
20926-}
20927-.deadsrc {
20928-  cursor: pointer;
20929-}
20930-.deadsrc:hover {
20931-  background-color: #eeeeee;
20932-}
20933-.livesrc {
20934-  color: #0000ff;
20935-  cursor: pointer;
20936-}
20937-.livesrc:hover {
20938-  background-color: #eeeeee;
20939-}
20940-.asm {
20941-  color: #008800;
20942-  display: none;
20943-}
20944-</style>
20945-<script type="text/javascript">
20946-function jeprof_toggle_asm(e) {
20947-  var target;
20948-  if (!e) e = window.event;
20949-  if (e.target) target = e.target;
20950-  else if (e.srcElement) target = e.srcElement;
20951-
20952-  if (target) {
20953-    var asm = target.nextSibling;
20954-    if (asm && asm.className == "asm") {
20955-      asm.style.display = (asm.style.display == "block" ? "" : "block");
20956-      e.preventDefault();
20957-      return false;
20958-    }
20959-  }
20960-}
20961-</script>
20962-</head>
20963-<body>
20964-EOF
20965-}
20966-
20967-sub HtmlListingFooter {
20968-  return <<'EOF';
20969-</body>
20970-</html>
20971-EOF
20972-}
20973-
20974-sub HtmlEscape {
20975-  my $text = shift;
20976-  $text =~ s/&/&amp;/g;
20977-  $text =~ s/</&lt;/g;
20978-  $text =~ s/>/&gt;/g;
20979-  return $text;
20980-}
20981-
20982-# Returns the indentation of the line, if it has any non-whitespace
20983-# characters.  Otherwise, returns -1.
20984-sub Indentation {
20985-  my $line = shift;
20986-  if (m/^(\s*)\S/) {
20987-    return length($1);
20988-  } else {
20989-    return -1;
20990-  }
20991-}
20992-
20993-# If the symbol table contains inlining info, Disassemble() may tag an
20994-# instruction with a location inside an inlined function.  But for
20995-# source listings, we prefer to use the location in the function we
20996-# are listing.  So use MapToSymbols() to fetch full location
20997-# information for each instruction and then pick out the first
20998-# location from a location list (location list contains callers before
20999-# callees in case of inlining).
21000-#
21001-# After this routine has run, each entry in $instructions contains:
21002-#   [0] start address
21003-#   [1] filename for function we are listing
21004-#   [2] line number for function we are listing
21005-#   [3] disassembly
21006-#   [4] limit address
21007-#   [5] most specific filename (may be different from [1] due to inlining)
21008-#   [6] most specific line number (may be different from [2] due to inlining)
21009-sub GetTopLevelLineNumbers {
21010-  my ($lib, $offset, $instructions) = @_;
21011-  my $pcs = [];
21012-  for (my $i = 0; $i <= $#{$instructions}; $i++) {
21013-    push(@{$pcs}, $instructions->[$i]->[0]);
21014-  }
21015-  my $symbols = {};
21016-  MapToSymbols($lib, $offset, $pcs, $symbols);
21017-  for (my $i = 0; $i <= $#{$instructions}; $i++) {
21018-    my $e = $instructions->[$i];
21019-    push(@{$e}, $e->[1]);
21020-    push(@{$e}, $e->[2]);
21021-    my $addr = $e->[0];
21022-    my $sym = $symbols->{$addr};
21023-    if (defined($sym)) {
21024-      if ($#{$sym} >= 2 && $sym->[1] =~ m/^(.*):(\d+)$/) {
21025-        $e->[1] = $1;  # File name
21026-        $e->[2] = $2;  # Line number
21027-      }
21028-    }
21029-  }
21030-}
21031-
21032-# Print source-listing for one routine
21033-sub PrintSource {
21034-  my $prog = shift;
21035-  my $offset = shift;
21036-  my $routine = shift;
21037-  my $flat = shift;
21038-  my $cumulative = shift;
21039-  my $start_addr = shift;
21040-  my $end_addr = shift;
21041-  my $html = shift;
21042-  my $output = shift;
21043-
21044-  # Disassemble all instructions (just to get line numbers)
21045-  my @instructions = Disassemble($prog, $offset, $start_addr, $end_addr);
21046-  GetTopLevelLineNumbers($prog, $offset, \@instructions);
21047-
21048-  # Hack 1: assume that the first source file encountered in the
21049-  # disassembly contains the routine
21050-  my $filename = undef;
21051-  for (my $i = 0; $i <= $#instructions; $i++) {
21052-    if ($instructions[$i]->[2] >= 0) {
21053-      $filename = $instructions[$i]->[1];
21054-      last;
21055-    }
21056-  }
21057-  if (!defined($filename)) {
21058-    print STDERR "no filename found in $routine\n";
21059-    return 0;
21060-  }
21061-
21062-  # Hack 2: assume that the largest line number from $filename is the
21063-  # end of the procedure.  This is typically safe since if P1 contains
21064-  # an inlined call to P2, then P2 usually occurs earlier in the
21065-  # source file.  If this does not work, we might have to compute a
21066-  # density profile or just print all regions we find.
21067-  my $lastline = 0;
21068-  for (my $i = 0; $i <= $#instructions; $i++) {
21069-    my $f = $instructions[$i]->[1];
21070-    my $l = $instructions[$i]->[2];
21071-    if (($f eq $filename) && ($l > $lastline)) {
21072-      $lastline = $l;
21073-    }
21074-  }
21075-
21076-  # Hack 3: assume the first source location from "filename" is the start of
21077-  # the source code.
21078-  my $firstline = 1;
21079-  for (my $i = 0; $i <= $#instructions; $i++) {
21080-    if ($instructions[$i]->[1] eq $filename) {
21081-      $firstline = $instructions[$i]->[2];
21082-      last;
21083-    }
21084-  }
21085-
21086-  # Hack 4: Extend last line forward until its indentation is less than
21087-  # the indentation we saw on $firstline
21088-  my $oldlastline = $lastline;
21089-  {
21090-    if (!open(FILE, "<$filename")) {
21091-      print STDERR "$filename: $!\n";
21092-      return 0;
21093-    }
21094-    my $l = 0;
21095-    my $first_indentation = -1;
21096-    while (<FILE>) {
21097-      s/\r//g;         # turn windows-looking lines into unix-looking lines
21098-      $l++;
21099-      my $indent = Indentation($_);
21100-      if ($l >= $firstline) {
21101-        if ($first_indentation < 0 && $indent >= 0) {
21102-          $first_indentation = $indent;
21103-          last if ($first_indentation == 0);
21104-        }
21105-      }
21106-      if ($l >= $lastline && $indent >= 0) {
21107-        if ($indent >= $first_indentation) {
21108-          $lastline = $l+1;
21109-        } else {
21110-          last;
21111-        }
21112-      }
21113-    }
21114-    close(FILE);
21115-  }
21116-
21117-  # Assign all samples to the range $firstline,$lastline,
21118-  # Hack 4: If an instruction does not occur in the range, its samples
21119-  # are moved to the next instruction that occurs in the range.
21120-  my $samples1 = {};        # Map from line number to flat count
21121-  my $samples2 = {};        # Map from line number to cumulative count
21122-  my $running1 = 0;         # Unassigned flat counts
21123-  my $running2 = 0;         # Unassigned cumulative counts
21124-  my $total1 = 0;           # Total flat counts
21125-  my $total2 = 0;           # Total cumulative counts
21126-  my %disasm = ();          # Map from line number to disassembly
21127-  my $running_disasm = "";  # Unassigned disassembly
21128-  my $skip_marker = "---\n";
21129-  if ($html) {
21130-    $skip_marker = "";
21131-    for (my $l = $firstline; $l <= $lastline; $l++) {
21132-      $disasm{$l} = "";
21133-    }
21134-  }
21135-  my $last_dis_filename = '';
21136-  my $last_dis_linenum = -1;
21137-  my $last_touched_line = -1;  # To detect gaps in disassembly for a line
21138-  foreach my $e (@instructions) {
21139-    # Add up counts for all address that fall inside this instruction
21140-    my $c1 = 0;
21141-    my $c2 = 0;
21142-    for (my $a = $e->[0]; $a lt $e->[4]; $a = AddressInc($a)) {
21143-      $c1 += GetEntry($flat, $a);
21144-      $c2 += GetEntry($cumulative, $a);
21145-    }
21146-
21147-    if ($html) {
21148-      my $dis = sprintf("      %6s %6s \t\t%8s: %s ",
21149-                        HtmlPrintNumber($c1),
21150-                        HtmlPrintNumber($c2),
21151-                        UnparseAddress($offset, $e->[0]),
21152-                        CleanDisassembly($e->[3]));
21153-
21154-      # Append the most specific source line associated with this instruction
21155-      if (length($dis) < 80) { $dis .= (' ' x (80 - length($dis))) };
21156-      $dis = HtmlEscape($dis);
21157-      my $f = $e->[5];
21158-      my $l = $e->[6];
21159-      if ($f ne $last_dis_filename) {
21160-        $dis .= sprintf("<span class=disasmloc>%s:%d</span>",
21161-                        HtmlEscape(CleanFileName($f)), $l);
21162-      } elsif ($l ne $last_dis_linenum) {
21163-        # De-emphasize the unchanged file name portion
21164-        $dis .= sprintf("<span class=unimportant>%s</span>" .
21165-                        "<span class=disasmloc>:%d</span>",
21166-                        HtmlEscape(CleanFileName($f)), $l);
21167-      } else {
21168-        # De-emphasize the entire location
21169-        $dis .= sprintf("<span class=unimportant>%s:%d</span>",
21170-                        HtmlEscape(CleanFileName($f)), $l);
21171-      }
21172-      $last_dis_filename = $f;
21173-      $last_dis_linenum = $l;
21174-      $running_disasm .= $dis;
21175-      $running_disasm .= "\n";
21176-    }
21177-
21178-    $running1 += $c1;
21179-    $running2 += $c2;
21180-    $total1 += $c1;
21181-    $total2 += $c2;
21182-    my $file = $e->[1];
21183-    my $line = $e->[2];
21184-    if (($file eq $filename) &&
21185-        ($line >= $firstline) &&
21186-        ($line <= $lastline)) {
21187-      # Assign all accumulated samples to this line
21188-      AddEntry($samples1, $line, $running1);
21189-      AddEntry($samples2, $line, $running2);
21190-      $running1 = 0;
21191-      $running2 = 0;
21192-      if ($html) {
21193-        if ($line != $last_touched_line && $disasm{$line} ne '') {
21194-          $disasm{$line} .= "\n";
21195-        }
21196-        $disasm{$line} .= $running_disasm;
21197-        $running_disasm = '';
21198-        $last_touched_line = $line;
21199-      }
21200-    }
21201-  }
21202-
21203-  # Assign any leftover samples to $lastline
21204-  AddEntry($samples1, $lastline, $running1);
21205-  AddEntry($samples2, $lastline, $running2);
21206-  if ($html) {
21207-    if ($lastline != $last_touched_line && $disasm{$lastline} ne '') {
21208-      $disasm{$lastline} .= "\n";
21209-    }
21210-    $disasm{$lastline} .= $running_disasm;
21211-  }
21212-
21213-  if ($html) {
21214-    printf $output (
21215-      "<h1>%s</h1>%s\n<pre onClick=\"jeprof_toggle_asm()\">\n" .
21216-      "Total:%6s %6s (flat / cumulative %s)\n",
21217-      HtmlEscape(ShortFunctionName($routine)),
21218-      HtmlEscape(CleanFileName($filename)),
21219-      Unparse($total1),
21220-      Unparse($total2),
21221-      Units());
21222-  } else {
21223-    printf $output (
21224-      "ROUTINE ====================== %s in %s\n" .
21225-      "%6s %6s Total %s (flat / cumulative)\n",
21226-      ShortFunctionName($routine),
21227-      CleanFileName($filename),
21228-      Unparse($total1),
21229-      Unparse($total2),
21230-      Units());
21231-  }
21232-  if (!open(FILE, "<$filename")) {
21233-    print STDERR "$filename: $!\n";
21234-    return 0;
21235-  }
21236-  my $l = 0;
21237-  while (<FILE>) {
21238-    s/\r//g;         # turn windows-looking lines into unix-looking lines
21239-    $l++;
21240-    if ($l >= $firstline - 5 &&
21241-        (($l <= $oldlastline + 5) || ($l <= $lastline))) {
21242-      chop;
21243-      my $text = $_;
21244-      if ($l == $firstline) { print $output $skip_marker; }
21245-      my $n1 = GetEntry($samples1, $l);
21246-      my $n2 = GetEntry($samples2, $l);
21247-      if ($html) {
21248-        # Emit a span that has one of the following classes:
21249-        #    livesrc -- has samples
21250-        #    deadsrc -- has disassembly, but with no samples
21251-        #    nop     -- has no matching disasembly
21252-        # Also emit an optional span containing disassembly.
21253-        my $dis = $disasm{$l};
21254-        my $asm = "";
21255-        if (defined($dis) && $dis ne '') {
21256-          $asm = "<span class=\"asm\">" . $dis . "</span>";
21257-        }
21258-        my $source_class = (($n1 + $n2 > 0)
21259-                            ? "livesrc"
21260-                            : (($asm ne "") ? "deadsrc" : "nop"));
21261-        printf $output (
21262-          "<span class=\"line\">%5d</span> " .
21263-          "<span class=\"%s\">%6s %6s %s</span>%s\n",
21264-          $l, $source_class,
21265-          HtmlPrintNumber($n1),
21266-          HtmlPrintNumber($n2),
21267-          HtmlEscape($text),
21268-          $asm);
21269-      } else {
21270-        printf $output(
21271-          "%6s %6s %4d: %s\n",
21272-          UnparseAlt($n1),
21273-          UnparseAlt($n2),
21274-          $l,
21275-          $text);
21276-      }
21277-      if ($l == $lastline)  { print $output $skip_marker; }
21278-    };
21279-  }
21280-  close(FILE);
21281-  if ($html) {
21282-    print $output "</pre>\n";
21283-  }
21284-  return 1;
21285-}
21286-
21287-# Return the source line for the specified file/linenumber.
21288-# Returns undef if not found.
21289-sub SourceLine {
21290-  my $file = shift;
21291-  my $line = shift;
21292-
21293-  # Look in cache
21294-  if (!defined($main::source_cache{$file})) {
21295-    if (100 < scalar keys(%main::source_cache)) {
21296-      # Clear the cache when it gets too big
21297-      $main::source_cache = ();
21298-    }
21299-
21300-    # Read all lines from the file
21301-    if (!open(FILE, "<$file")) {
21302-      print STDERR "$file: $!\n";
21303-      $main::source_cache{$file} = [];  # Cache the negative result
21304-      return undef;
21305-    }
21306-    my $lines = [];
21307-    push(@{$lines}, "");        # So we can use 1-based line numbers as indices
21308-    while (<FILE>) {
21309-      push(@{$lines}, $_);
21310-    }
21311-    close(FILE);
21312-
21313-    # Save the lines in the cache
21314-    $main::source_cache{$file} = $lines;
21315-  }
21316-
21317-  my $lines = $main::source_cache{$file};
21318-  if (($line < 0) || ($line > $#{$lines})) {
21319-    return undef;
21320-  } else {
21321-    return $lines->[$line];
21322-  }
21323-}
21324-
21325-# Print disassembly for one routine with interspersed source if available
21326-sub PrintDisassembledFunction {
21327-  my $prog = shift;
21328-  my $offset = shift;
21329-  my $routine = shift;
21330-  my $flat = shift;
21331-  my $cumulative = shift;
21332-  my $start_addr = shift;
21333-  my $end_addr = shift;
21334-  my $total = shift;
21335-
21336-  # Disassemble all instructions
21337-  my @instructions = Disassemble($prog, $offset, $start_addr, $end_addr);
21338-
21339-  # Make array of counts per instruction
21340-  my @flat_count = ();
21341-  my @cum_count = ();
21342-  my $flat_total = 0;
21343-  my $cum_total = 0;
21344-  foreach my $e (@instructions) {
21345-    # Add up counts for all address that fall inside this instruction
21346-    my $c1 = 0;
21347-    my $c2 = 0;
21348-    for (my $a = $e->[0]; $a lt $e->[4]; $a = AddressInc($a)) {
21349-      $c1 += GetEntry($flat, $a);
21350-      $c2 += GetEntry($cumulative, $a);
21351-    }
21352-    push(@flat_count, $c1);
21353-    push(@cum_count, $c2);
21354-    $flat_total += $c1;
21355-    $cum_total += $c2;
21356-  }
21357-
21358-  # Print header with total counts
21359-  printf("ROUTINE ====================== %s\n" .
21360-         "%6s %6s %s (flat, cumulative) %.1f%% of total\n",
21361-         ShortFunctionName($routine),
21362-         Unparse($flat_total),
21363-         Unparse($cum_total),
21364-         Units(),
21365-         ($cum_total * 100.0) / $total);
21366-
21367-  # Process instructions in order
21368-  my $current_file = "";
21369-  for (my $i = 0; $i <= $#instructions; ) {
21370-    my $e = $instructions[$i];
21371-
21372-    # Print the new file name whenever we switch files
21373-    if ($e->[1] ne $current_file) {
21374-      $current_file = $e->[1];
21375-      my $fname = $current_file;
21376-      $fname =~ s|^\./||;   # Trim leading "./"
21377-
21378-      # Shorten long file names
21379-      if (length($fname) >= 58) {
21380-        $fname = "..." . substr($fname, -55);
21381-      }
21382-      printf("-------------------- %s\n", $fname);
21383-    }
21384-
21385-    # TODO: Compute range of lines to print together to deal with
21386-    # small reorderings.
21387-    my $first_line = $e->[2];
21388-    my $last_line = $first_line;
21389-    my %flat_sum = ();
21390-    my %cum_sum = ();
21391-    for (my $l = $first_line; $l <= $last_line; $l++) {
21392-      $flat_sum{$l} = 0;
21393-      $cum_sum{$l} = 0;
21394-    }
21395-
21396-    # Find run of instructions for this range of source lines
21397-    my $first_inst = $i;
21398-    while (($i <= $#instructions) &&
21399-           ($instructions[$i]->[2] >= $first_line) &&
21400-           ($instructions[$i]->[2] <= $last_line)) {
21401-      $e = $instructions[$i];
21402-      $flat_sum{$e->[2]} += $flat_count[$i];
21403-      $cum_sum{$e->[2]} += $cum_count[$i];
21404-      $i++;
21405-    }
21406-    my $last_inst = $i - 1;
21407-
21408-    # Print source lines
21409-    for (my $l = $first_line; $l <= $last_line; $l++) {
21410-      my $line = SourceLine($current_file, $l);
21411-      if (!defined($line)) {
21412-        $line = "?\n";
21413-        next;
21414-      } else {
21415-        $line =~ s/^\s+//;
21416-      }
21417-      printf("%6s %6s %5d: %s",
21418-             UnparseAlt($flat_sum{$l}),
21419-             UnparseAlt($cum_sum{$l}),
21420-             $l,
21421-             $line);
21422-    }
21423-
21424-    # Print disassembly
21425-    for (my $x = $first_inst; $x <= $last_inst; $x++) {
21426-      my $e = $instructions[$x];
21427-      printf("%6s %6s    %8s: %6s\n",
21428-             UnparseAlt($flat_count[$x]),
21429-             UnparseAlt($cum_count[$x]),
21430-             UnparseAddress($offset, $e->[0]),
21431-             CleanDisassembly($e->[3]));
21432-    }
21433-  }
21434-}
21435-
21436-# Print DOT graph
21437-sub PrintDot {
21438-  my $prog = shift;
21439-  my $symbols = shift;
21440-  my $raw = shift;
21441-  my $flat = shift;
21442-  my $cumulative = shift;
21443-  my $overall_total = shift;
21444-
21445-  # Get total
21446-  my $local_total = TotalProfile($flat);
21447-  my $nodelimit = int($main::opt_nodefraction * $local_total);
21448-  my $edgelimit = int($main::opt_edgefraction * $local_total);
21449-  my $nodecount = $main::opt_nodecount;
21450-
21451-  # Find nodes to include
21452-  my @list = (sort { abs(GetEntry($cumulative, $b)) <=>
21453-                     abs(GetEntry($cumulative, $a))
21454-                     || $a cmp $b }
21455-              keys(%{$cumulative}));
21456-  my $last = $nodecount - 1;
21457-  if ($last > $#list) {
21458-    $last = $#list;
21459-  }
21460-  while (($last >= 0) &&
21461-         (abs(GetEntry($cumulative, $list[$last])) <= $nodelimit)) {
21462-    $last--;
21463-  }
21464-  if ($last < 0) {
21465-    print STDERR "No nodes to print\n";
21466-    return 0;
21467-  }
21468-
21469-  if ($nodelimit > 0 || $edgelimit > 0) {
21470-    printf STDERR ("Dropping nodes with <= %s %s; edges with <= %s abs(%s)\n",
21471-                   Unparse($nodelimit), Units(),
21472-                   Unparse($edgelimit), Units());
21473-  }
21474-
21475-  # Open DOT output file
21476-  my $output;
21477-  my $escaped_dot = ShellEscape(@DOT);
21478-  my $escaped_ps2pdf = ShellEscape(@PS2PDF);
21479-  if ($main::opt_gv) {
21480-    my $escaped_outfile = ShellEscape(TempName($main::next_tmpfile, "ps"));
21481-    $output = "| $escaped_dot -Tps2 >$escaped_outfile";
21482-  } elsif ($main::opt_evince) {
21483-    my $escaped_outfile = ShellEscape(TempName($main::next_tmpfile, "pdf"));
21484-    $output = "| $escaped_dot -Tps2 | $escaped_ps2pdf - $escaped_outfile";
21485-  } elsif ($main::opt_ps) {
21486-    $output = "| $escaped_dot -Tps2";
21487-  } elsif ($main::opt_pdf) {
21488-    $output = "| $escaped_dot -Tps2 | $escaped_ps2pdf - -";
21489-  } elsif ($main::opt_web || $main::opt_svg) {
21490-    # We need to post-process the SVG, so write to a temporary file always.
21491-    my $escaped_outfile = ShellEscape(TempName($main::next_tmpfile, "svg"));
21492-    $output = "| $escaped_dot -Tsvg >$escaped_outfile";
21493-  } elsif ($main::opt_gif) {
21494-    $output = "| $escaped_dot -Tgif";
21495-  } else {
21496-    $output = ">&STDOUT";
21497-  }
21498-  open(DOT, $output) || error("$output: $!\n");
21499-
21500-  # Title
21501-  printf DOT ("digraph \"%s; %s %s\" {\n",
21502-              $prog,
21503-              Unparse($overall_total),
21504-              Units());
21505-  if ($main::opt_pdf) {
21506-    # The output is more printable if we set the page size for dot.
21507-    printf DOT ("size=\"8,11\"\n");
21508-  }
21509-  printf DOT ("node [width=0.375,height=0.25];\n");
21510-
21511-  # Print legend
21512-  printf DOT ("Legend [shape=box,fontsize=24,shape=plaintext," .
21513-              "label=\"%s\\l%s\\l%s\\l%s\\l%s\\l\"];\n",
21514-              $prog,
21515-              sprintf("Total %s: %s", Units(), Unparse($overall_total)),
21516-              sprintf("Focusing on: %s", Unparse($local_total)),
21517-              sprintf("Dropped nodes with <= %s abs(%s)",
21518-                      Unparse($nodelimit), Units()),
21519-              sprintf("Dropped edges with <= %s %s",
21520-                      Unparse($edgelimit), Units())
21521-              );
21522-
21523-  # Print nodes
21524-  my %node = ();
21525-  my $nextnode = 1;
21526-  foreach my $a (@list[0..$last]) {
21527-    # Pick font size
21528-    my $f = GetEntry($flat, $a);
21529-    my $c = GetEntry($cumulative, $a);
21530-
21531-    my $fs = 8;
21532-    if ($local_total > 0) {
21533-      $fs = 8 + (50.0 * sqrt(abs($f * 1.0 / $local_total)));
21534-    }
21535-
21536-    $node{$a} = $nextnode++;
21537-    my $sym = $a;
21538-    $sym =~ s/\s+/\\n/g;
21539-    $sym =~ s/::/\\n/g;
21540-
21541-    # Extra cumulative info to print for non-leaves
21542-    my $extra = "";
21543-    if ($f != $c) {
21544-      $extra = sprintf("\\rof %s (%s)",
21545-                       Unparse($c),
21546-                       Percent($c, $local_total));
21547-    }
21548-    my $style = "";
21549-    if ($main::opt_heapcheck) {
21550-      if ($f > 0) {
21551-        # make leak-causing nodes more visible (add a background)
21552-        $style = ",style=filled,fillcolor=gray"
21553-      } elsif ($f < 0) {
21554-        # make anti-leak-causing nodes (which almost never occur)
21555-        # stand out as well (triple border)
21556-        $style = ",peripheries=3"
21557-      }
21558-    }
21559-
21560-    printf DOT ("N%d [label=\"%s\\n%s (%s)%s\\r" .
21561-                "\",shape=box,fontsize=%.1f%s];\n",
21562-                $node{$a},
21563-                $sym,
21564-                Unparse($f),
21565-                Percent($f, $local_total),
21566-                $extra,
21567-                $fs,
21568-                $style,
21569-               );
21570-  }
21571-
21572-  # Get edges and counts per edge
21573-  my %edge = ();
21574-  my $n;
21575-  my $fullname_to_shortname_map = {};
21576-  FillFullnameToShortnameMap($symbols, $fullname_to_shortname_map);
21577-  foreach my $k (keys(%{$raw})) {
21578-    # TODO: omit low %age edges
21579-    $n = $raw->{$k};
21580-    my @translated = TranslateStack($symbols, $fullname_to_shortname_map, $k);
21581-    for (my $i = 1; $i <= $#translated; $i++) {
21582-      my $src = $translated[$i];
21583-      my $dst = $translated[$i-1];
21584-      #next if ($src eq $dst);  # Avoid self-edges?
21585-      if (exists($node{$src}) && exists($node{$dst})) {
21586-        my $edge_label = "$src\001$dst";
21587-        if (!exists($edge{$edge_label})) {
21588-          $edge{$edge_label} = 0;
21589-        }
21590-        $edge{$edge_label} += $n;
21591-      }
21592-    }
21593-  }
21594-
21595-  # Print edges (process in order of decreasing counts)
21596-  my %indegree = ();   # Number of incoming edges added per node so far
21597-  my %outdegree = ();  # Number of outgoing edges added per node so far
21598-  foreach my $e (sort { $edge{$b} <=> $edge{$a} } keys(%edge)) {
21599-    my @x = split(/\001/, $e);
21600-    $n = $edge{$e};
21601-
21602-    # Initialize degree of kept incoming and outgoing edges if necessary
21603-    my $src = $x[0];
21604-    my $dst = $x[1];
21605-    if (!exists($outdegree{$src})) { $outdegree{$src} = 0; }
21606-    if (!exists($indegree{$dst})) { $indegree{$dst} = 0; }
21607-
21608-    my $keep;
21609-    if ($indegree{$dst} == 0) {
21610-      # Keep edge if needed for reachability
21611-      $keep = 1;
21612-    } elsif (abs($n) <= $edgelimit) {
21613-      # Drop if we are below --edgefraction
21614-      $keep = 0;
21615-    } elsif ($outdegree{$src} >= $main::opt_maxdegree ||
21616-             $indegree{$dst} >= $main::opt_maxdegree) {
21617-      # Keep limited number of in/out edges per node
21618-      $keep = 0;
21619-    } else {
21620-      $keep = 1;
21621-    }
21622-
21623-    if ($keep) {
21624-      $outdegree{$src}++;
21625-      $indegree{$dst}++;
21626-
21627-      # Compute line width based on edge count
21628-      my $fraction = abs($local_total ? (3 * ($n / $local_total)) : 0);
21629-      if ($fraction > 1) { $fraction = 1; }
21630-      my $w = $fraction * 2;
21631-      if ($w < 1 && ($main::opt_web || $main::opt_svg)) {
21632-        # SVG output treats line widths < 1 poorly.
21633-        $w = 1;
21634-      }
21635-
21636-      # Dot sometimes segfaults if given edge weights that are too large, so
21637-      # we cap the weights at a large value
21638-      my $edgeweight = abs($n) ** 0.7;
21639-      if ($edgeweight > 100000) { $edgeweight = 100000; }
21640-      $edgeweight = int($edgeweight);
21641-
21642-      my $style = sprintf("setlinewidth(%f)", $w);
21643-      if ($x[1] =~ m/\(inline\)/) {
21644-        $style .= ",dashed";
21645-      }
21646-
21647-      # Use a slightly squashed function of the edge count as the weight
21648-      printf DOT ("N%s -> N%s [label=%s, weight=%d, style=\"%s\"];\n",
21649-                  $node{$x[0]},
21650-                  $node{$x[1]},
21651-                  Unparse($n),
21652-                  $edgeweight,
21653-                  $style);
21654-    }
21655-  }
21656-
21657-  print DOT ("}\n");
21658-  close(DOT);
21659-
21660-  if ($main::opt_web || $main::opt_svg) {
21661-    # Rewrite SVG to be more usable inside web browser.
21662-    RewriteSvg(TempName($main::next_tmpfile, "svg"));
21663-  }
21664-
21665-  return 1;
21666-}
21667-
21668-sub RewriteSvg {
21669-  my $svgfile = shift;
21670-
21671-  open(SVG, $svgfile) || die "open temp svg: $!";
21672-  my @svg = <SVG>;
21673-  close(SVG);
21674-  unlink $svgfile;
21675-  my $svg = join('', @svg);
21676-
21677-  # Dot's SVG output is
21678-  #
21679-  #    <svg width="___" height="___"
21680-  #     viewBox="___" xmlns=...>
21681-  #    <g id="graph0" transform="...">
21682-  #    ...
21683-  #    </g>
21684-  #    </svg>
21685-  #
21686-  # Change it to
21687-  #
21688-  #    <svg width="100%" height="100%"
21689-  #     xmlns=...>
21690-  #    $svg_javascript
21691-  #    <g id="viewport" transform="translate(0,0)">
21692-  #    <g id="graph0" transform="...">
21693-  #    ...
21694-  #    </g>
21695-  #    </g>
21696-  #    </svg>
21697-
21698-  # Fix width, height; drop viewBox.
21699-  $svg =~ s/(?s)<svg width="[^"]+" height="[^"]+"(.*?)viewBox="[^"]+"/<svg width="100%" height="100%"$1/;
21700-
21701-  # Insert script, viewport <g> above first <g>
21702-  my $svg_javascript = SvgJavascript();
21703-  my $viewport = "<g id=\"viewport\" transform=\"translate(0,0)\">\n";
21704-  $svg =~ s/<g id="graph\d"/$svg_javascript$viewport$&/;
21705-
21706-  # Insert final </g> above </svg>.
21707-  $svg =~ s/(.*)(<\/svg>)/$1<\/g>$2/;
21708-  $svg =~ s/<g id="graph\d"(.*?)/<g id="viewport"$1/;
21709-
21710-  if ($main::opt_svg) {
21711-    # --svg: write to standard output.
21712-    print $svg;
21713-  } else {
21714-    # Write back to temporary file.
21715-    open(SVG, ">$svgfile") || die "open $svgfile: $!";
21716-    print SVG $svg;
21717-    close(SVG);
21718-  }
21719-}
21720-
21721-sub SvgJavascript {
21722-  return <<'EOF';
21723-<script type="text/ecmascript"><![CDATA[
21724-// SVGPan
21725-// http://www.cyberz.org/blog/2009/12/08/svgpan-a-javascript-svg-panzoomdrag-library/
21726-// Local modification: if(true || ...) below to force panning, never moving.
21727-
21728-/**
21729- *  SVGPan library 1.2
21730- * ====================
21731- *
21732- * Given an unique existing element with id "viewport", including the
21733- * the library into any SVG adds the following capabilities:
21734- *
21735- *  - Mouse panning
21736- *  - Mouse zooming (using the wheel)
21737- *  - Object dargging
21738- *
21739- * Known issues:
21740- *
21741- *  - Zooming (while panning) on Safari has still some issues
21742- *
21743- * Releases:
21744- *
21745- * 1.2, Sat Mar 20 08:42:50 GMT 2010, Zeng Xiaohui
21746- *	Fixed a bug with browser mouse handler interaction
21747- *
21748- * 1.1, Wed Feb  3 17:39:33 GMT 2010, Zeng Xiaohui
21749- *	Updated the zoom code to support the mouse wheel on Safari/Chrome
21750- *
21751- * 1.0, Andrea Leofreddi
21752- *	First release
21753- *
21754- * This code is licensed under the following BSD license:
21755- *
21756- * Copyright 2009-2010 Andrea Leofreddi <[email protected]>. All rights reserved.
21757- *
21758- * Redistribution and use in source and binary forms, with or without modification, are
21759- * permitted provided that the following conditions are met:
21760- *
21761- *    1. Redistributions of source code must retain the above copyright notice, this list of
21762- *       conditions and the following disclaimer.
21763- *
21764- *    2. Redistributions in binary form must reproduce the above copyright notice, this list
21765- *       of conditions and the following disclaimer in the documentation and/or other materials
21766- *       provided with the distribution.
21767- *
21768- * THIS SOFTWARE IS PROVIDED BY Andrea Leofreddi ``AS IS'' AND ANY EXPRESS OR IMPLIED
21769- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
21770- * FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL Andrea Leofreddi OR
21771- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21772- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
21773- * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
21774- * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
21775- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
21776- * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
21777- *
21778- * The views and conclusions contained in the software and documentation are those of the
21779- * authors and should not be interpreted as representing official policies, either expressed
21780- * or implied, of Andrea Leofreddi.
21781- */
21782-
21783-var root = document.documentElement;
21784-
21785-var state = 'none', stateTarget, stateOrigin, stateTf;
21786-
21787-setupHandlers(root);
21788-
21789-/**
21790- * Register handlers
21791- */
21792-function setupHandlers(root){
21793-	setAttributes(root, {
21794-		"onmouseup" : "add(evt)",
21795-		"onmousedown" : "handleMouseDown(evt)",
21796-		"onmousemove" : "handleMouseMove(evt)",
21797-		"onmouseup" : "handleMouseUp(evt)",
21798-		//"onmouseout" : "handleMouseUp(evt)", // Decomment this to stop the pan functionality when dragging out of the SVG element
21799-	});
21800-
21801-	if(navigator.userAgent.toLowerCase().indexOf('webkit') >= 0)
21802-		window.addEventListener('mousewheel', handleMouseWheel, false); // Chrome/Safari
21803-	else
21804-		window.addEventListener('DOMMouseScroll', handleMouseWheel, false); // Others
21805-
21806-	var g = svgDoc.getElementById("svg");
21807-	g.width = "100%";
21808-	g.height = "100%";
21809-}
21810-
21811-/**
21812- * Instance an SVGPoint object with given event coordinates.
21813- */
21814-function getEventPoint(evt) {
21815-	var p = root.createSVGPoint();
21816-
21817-	p.x = evt.clientX;
21818-	p.y = evt.clientY;
21819-
21820-	return p;
21821-}
21822-
21823-/**
21824- * Sets the current transform matrix of an element.
21825- */
21826-function setCTM(element, matrix) {
21827-	var s = "matrix(" + matrix.a + "," + matrix.b + "," + matrix.c + "," + matrix.d + "," + matrix.e + "," + matrix.f + ")";
21828-
21829-	element.setAttribute("transform", s);
21830-}
21831-
21832-/**
21833- * Dumps a matrix to a string (useful for debug).
21834- */
21835-function dumpMatrix(matrix) {
21836-	var s = "[ " + matrix.a + ", " + matrix.c + ", " + matrix.e + "\n  " + matrix.b + ", " + matrix.d + ", " + matrix.f + "\n  0, 0, 1 ]";
21837-
21838-	return s;
21839-}
21840-
21841-/**
21842- * Sets attributes of an element.
21843- */
21844-function setAttributes(element, attributes){
21845-	for (i in attributes)
21846-		element.setAttributeNS(null, i, attributes[i]);
21847-}
21848-
21849-/**
21850- * Handle mouse move event.
21851- */
21852-function handleMouseWheel(evt) {
21853-	if(evt.preventDefault)
21854-		evt.preventDefault();
21855-
21856-	evt.returnValue = false;
21857-
21858-	var svgDoc = evt.target.ownerDocument;
21859-
21860-	var delta;
21861-
21862-	if(evt.wheelDelta)
21863-		delta = evt.wheelDelta / 3600; // Chrome/Safari
21864-	else
21865-		delta = evt.detail / -90; // Mozilla
21866-
21867-	var z = 1 + delta; // Zoom factor: 0.9/1.1
21868-
21869-	var g = svgDoc.getElementById("viewport");
21870-
21871-	var p = getEventPoint(evt);
21872-
21873-	p = p.matrixTransform(g.getCTM().inverse());
21874-
21875-	// Compute new scale matrix in current mouse position
21876-	var k = root.createSVGMatrix().translate(p.x, p.y).scale(z).translate(-p.x, -p.y);
21877-
21878-        setCTM(g, g.getCTM().multiply(k));
21879-
21880-	stateTf = stateTf.multiply(k.inverse());
21881-}
21882-
21883-/**
21884- * Handle mouse move event.
21885- */
21886-function handleMouseMove(evt) {
21887-	if(evt.preventDefault)
21888-		evt.preventDefault();
21889-
21890-	evt.returnValue = false;
21891-
21892-	var svgDoc = evt.target.ownerDocument;
21893-
21894-	var g = svgDoc.getElementById("viewport");
21895-
21896-	if(state == 'pan') {
21897-		// Pan mode
21898-		var p = getEventPoint(evt).matrixTransform(stateTf);
21899-
21900-		setCTM(g, stateTf.inverse().translate(p.x - stateOrigin.x, p.y - stateOrigin.y));
21901-	} else if(state == 'move') {
21902-		// Move mode
21903-		var p = getEventPoint(evt).matrixTransform(g.getCTM().inverse());
21904-
21905-		setCTM(stateTarget, root.createSVGMatrix().translate(p.x - stateOrigin.x, p.y - stateOrigin.y).multiply(g.getCTM().inverse()).multiply(stateTarget.getCTM()));
21906-
21907-		stateOrigin = p;
21908-	}
21909-}
21910-
21911-/**
21912- * Handle click event.
21913- */
21914-function handleMouseDown(evt) {
21915-	if(evt.preventDefault)
21916-		evt.preventDefault();
21917-
21918-	evt.returnValue = false;
21919-
21920-	var svgDoc = evt.target.ownerDocument;
21921-
21922-	var g = svgDoc.getElementById("viewport");
21923-
21924-	if(true || evt.target.tagName == "svg") {
21925-		// Pan mode
21926-		state = 'pan';
21927-
21928-		stateTf = g.getCTM().inverse();
21929-
21930-		stateOrigin = getEventPoint(evt).matrixTransform(stateTf);
21931-	} else {
21932-		// Move mode
21933-		state = 'move';
21934-
21935-		stateTarget = evt.target;
21936-
21937-		stateTf = g.getCTM().inverse();
21938-
21939-		stateOrigin = getEventPoint(evt).matrixTransform(stateTf);
21940-	}
21941-}
21942-
21943-/**
21944- * Handle mouse button release event.
21945- */
21946-function handleMouseUp(evt) {
21947-	if(evt.preventDefault)
21948-		evt.preventDefault();
21949-
21950-	evt.returnValue = false;
21951-
21952-	var svgDoc = evt.target.ownerDocument;
21953-
21954-	if(state == 'pan' || state == 'move') {
21955-		// Quit pan mode
21956-		state = '';
21957-	}
21958-}
21959-
21960-]]></script>
21961-EOF
21962-}
21963-
21964-# Provides a map from fullname to shortname for cases where the
21965-# shortname is ambiguous.  The symlist has both the fullname and
21966-# shortname for all symbols, which is usually fine, but sometimes --
21967-# such as overloaded functions -- two different fullnames can map to
21968-# the same shortname.  In that case, we use the address of the
21969-# function to disambiguate the two.  This function fills in a map that
21970-# maps fullnames to modified shortnames in such cases.  If a fullname
21971-# is not present in the map, the 'normal' shortname provided by the
21972-# symlist is the appropriate one to use.
21973-sub FillFullnameToShortnameMap {
21974-  my $symbols = shift;
21975-  my $fullname_to_shortname_map = shift;
21976-  my $shortnames_seen_once = {};
21977-  my $shortnames_seen_more_than_once = {};
21978-
21979-  foreach my $symlist (values(%{$symbols})) {
21980-    # TODO(csilvers): deal with inlined symbols too.
21981-    my $shortname = $symlist->[0];
21982-    my $fullname = $symlist->[2];
21983-    if ($fullname !~ /<[0-9a-fA-F]+>$/) {  # fullname doesn't end in an address
21984-      next;       # the only collisions we care about are when addresses differ
21985-    }
21986-    if (defined($shortnames_seen_once->{$shortname}) &&
21987-        $shortnames_seen_once->{$shortname} ne $fullname) {
21988-      $shortnames_seen_more_than_once->{$shortname} = 1;
21989-    } else {
21990-      $shortnames_seen_once->{$shortname} = $fullname;
21991-    }
21992-  }
21993-
21994-  foreach my $symlist (values(%{$symbols})) {
21995-    my $shortname = $symlist->[0];
21996-    my $fullname = $symlist->[2];
21997-    # TODO(csilvers): take in a list of addresses we care about, and only
21998-    # store in the map if $symlist->[1] is in that list.  Saves space.
21999-    next if defined($fullname_to_shortname_map->{$fullname});
22000-    if (defined($shortnames_seen_more_than_once->{$shortname})) {
22001-      if ($fullname =~ /<0*([^>]*)>$/) {   # fullname has address at end of it
22002-        $fullname_to_shortname_map->{$fullname} = "$shortname\@$1";
22003-      }
22004-    }
22005-  }
22006-}
22007-
22008-# Return a small number that identifies the argument.
22009-# Multiple calls with the same argument will return the same number.
22010-# Calls with different arguments will return different numbers.
22011-sub ShortIdFor {
22012-  my $key = shift;
22013-  my $id = $main::uniqueid{$key};
22014-  if (!defined($id)) {
22015-    $id = keys(%main::uniqueid) + 1;
22016-    $main::uniqueid{$key} = $id;
22017-  }
22018-  return $id;
22019-}
22020-
22021-# Translate a stack of addresses into a stack of symbols
22022-sub TranslateStack {
22023-  my $symbols = shift;
22024-  my $fullname_to_shortname_map = shift;
22025-  my $k = shift;
22026-
22027-  my @addrs = split(/\n/, $k);
22028-  my @result = ();
22029-  for (my $i = 0; $i <= $#addrs; $i++) {
22030-    my $a = $addrs[$i];
22031-
22032-    # Skip large addresses since they sometimes show up as fake entries on RH9
22033-    if (length($a) > 8 && $a gt "7fffffffffffffff") {
22034-      next;
22035-    }
22036-
22037-    if ($main::opt_disasm || $main::opt_list) {
22038-      # We want just the address for the key
22039-      push(@result, $a);
22040-      next;
22041-    }
22042-
22043-    my $symlist = $symbols->{$a};
22044-    if (!defined($symlist)) {
22045-      $symlist = [$a, "", $a];
22046-    }
22047-
22048-    # We can have a sequence of symbols for a particular entry
22049-    # (more than one symbol in the case of inlining).  Callers
22050-    # come before callees in symlist, so walk backwards since
22051-    # the translated stack should contain callees before callers.
22052-    for (my $j = $#{$symlist}; $j >= 2; $j -= 3) {
22053-      my $func = $symlist->[$j-2];
22054-      my $fileline = $symlist->[$j-1];
22055-      my $fullfunc = $symlist->[$j];
22056-      if (defined($fullname_to_shortname_map->{$fullfunc})) {
22057-        $func = $fullname_to_shortname_map->{$fullfunc};
22058-      }
22059-      if ($j > 2) {
22060-        $func = "$func (inline)";
22061-      }
22062-
22063-      # Do not merge nodes corresponding to Callback::Run since that
22064-      # causes confusing cycles in dot display.  Instead, we synthesize
22065-      # a unique name for this frame per caller.
22066-      if ($func =~ m/Callback.*::Run$/) {
22067-        my $caller = ($i > 0) ? $addrs[$i-1] : 0;
22068-        $func = "Run#" . ShortIdFor($caller);
22069-      }
22070-
22071-      if ($main::opt_addresses) {
22072-        push(@result, "$a $func $fileline");
22073-      } elsif ($main::opt_lines) {
22074-        if ($func eq '??' && $fileline eq '??:0') {
22075-          push(@result, "$a");
22076-        } else {
22077-          push(@result, "$func $fileline");
22078-        }
22079-      } elsif ($main::opt_functions) {
22080-        if ($func eq '??') {
22081-          push(@result, "$a");
22082-        } else {
22083-          push(@result, $func);
22084-        }
22085-      } elsif ($main::opt_files) {
22086-        if ($fileline eq '??:0' || $fileline eq '') {
22087-          push(@result, "$a");
22088-        } else {
22089-          my $f = $fileline;
22090-          $f =~ s/:\d+$//;
22091-          push(@result, $f);
22092-        }
22093-      } else {
22094-        push(@result, $a);
22095-        last;  # Do not print inlined info
22096-      }
22097-    }
22098-  }
22099-
22100-  # print join(",", @addrs), " => ", join(",", @result), "\n";
22101-  return @result;
22102-}
22103-
22104-# Generate percent string for a number and a total
22105-sub Percent {
22106-  my $num = shift;
22107-  my $tot = shift;
22108-  if ($tot != 0) {
22109-    return sprintf("%.1f%%", $num * 100.0 / $tot);
22110-  } else {
22111-    return ($num == 0) ? "nan" : (($num > 0) ? "+inf" : "-inf");
22112-  }
22113-}
22114-
22115-# Generate pretty-printed form of number
22116-sub Unparse {
22117-  my $num = shift;
22118-  if ($main::profile_type eq 'heap' || $main::profile_type eq 'growth') {
22119-    if ($main::opt_inuse_objects || $main::opt_alloc_objects) {
22120-      return sprintf("%d", $num);
22121-    } else {
22122-      if ($main::opt_show_bytes) {
22123-        return sprintf("%d", $num);
22124-      } else {
22125-        return sprintf("%.1f", $num / 1048576.0);
22126-      }
22127-    }
22128-  } elsif ($main::profile_type eq 'contention' && !$main::opt_contentions) {
22129-    return sprintf("%.3f", $num / 1e9); # Convert nanoseconds to seconds
22130-  } else {
22131-    return sprintf("%d", $num);
22132-  }
22133-}
22134-
22135-# Alternate pretty-printed form: 0 maps to "."
22136-sub UnparseAlt {
22137-  my $num = shift;
22138-  if ($num == 0) {
22139-    return ".";
22140-  } else {
22141-    return Unparse($num);
22142-  }
22143-}
22144-
22145-# Alternate pretty-printed form: 0 maps to ""
22146-sub HtmlPrintNumber {
22147-  my $num = shift;
22148-  if ($num == 0) {
22149-    return "";
22150-  } else {
22151-    return Unparse($num);
22152-  }
22153-}
22154-
22155-# Return output units
22156-sub Units {
22157-  if ($main::profile_type eq 'heap' || $main::profile_type eq 'growth') {
22158-    if ($main::opt_inuse_objects || $main::opt_alloc_objects) {
22159-      return "objects";
22160-    } else {
22161-      if ($main::opt_show_bytes) {
22162-        return "B";
22163-      } else {
22164-        return "MB";
22165-      }
22166-    }
22167-  } elsif ($main::profile_type eq 'contention' && !$main::opt_contentions) {
22168-    return "seconds";
22169-  } else {
22170-    return "samples";
22171-  }
22172-}
22173-
22174-##### Profile manipulation code #####
22175-
22176-# Generate flattened profile:
22177-# If count is charged to stack [a,b,c,d], in generated profile,
22178-# it will be charged to [a]
22179-sub FlatProfile {
22180-  my $profile = shift;
22181-  my $result = {};
22182-  foreach my $k (keys(%{$profile})) {
22183-    my $count = $profile->{$k};
22184-    my @addrs = split(/\n/, $k);
22185-    if ($#addrs >= 0) {
22186-      AddEntry($result, $addrs[0], $count);
22187-    }
22188-  }
22189-  return $result;
22190-}
22191-
22192-# Generate cumulative profile:
22193-# If count is charged to stack [a,b,c,d], in generated profile,
22194-# it will be charged to [a], [b], [c], [d]
22195-sub CumulativeProfile {
22196-  my $profile = shift;
22197-  my $result = {};
22198-  foreach my $k (keys(%{$profile})) {
22199-    my $count = $profile->{$k};
22200-    my @addrs = split(/\n/, $k);
22201-    foreach my $a (@addrs) {
22202-      AddEntry($result, $a, $count);
22203-    }
22204-  }
22205-  return $result;
22206-}
22207-
22208-# If the second-youngest PC on the stack is always the same, returns
22209-# that pc.  Otherwise, returns undef.
22210-sub IsSecondPcAlwaysTheSame {
22211-  my $profile = shift;
22212-
22213-  my $second_pc = undef;
22214-  foreach my $k (keys(%{$profile})) {
22215-    my @addrs = split(/\n/, $k);
22216-    if ($#addrs < 1) {
22217-      return undef;
22218-    }
22219-    if (not defined $second_pc) {
22220-      $second_pc = $addrs[1];
22221-    } else {
22222-      if ($second_pc ne $addrs[1]) {
22223-        return undef;
22224-      }
22225-    }
22226-  }
22227-  return $second_pc;
22228-}
22229-
22230-sub ExtractSymbolNameInlineStack {
22231-  my $symbols = shift;
22232-  my $address = shift;
22233-
22234-  my @stack = ();
22235-
22236-  if (exists $symbols->{$address}) {
22237-    my @localinlinestack = @{$symbols->{$address}};
22238-    for (my $i = $#localinlinestack; $i > 0; $i-=3) {
22239-      my $file = $localinlinestack[$i-1];
22240-      my $fn = $localinlinestack[$i-0];
22241-
22242-      if ($file eq "?" || $file eq ":0") {
22243-        $file = "??:0";
22244-      }
22245-      if ($fn eq '??') {
22246-        # If we can't get the symbol name, at least use the file information.
22247-        $fn = $file;
22248-      }
22249-      my $suffix = "[inline]";
22250-      if ($i == 2) {
22251-        $suffix = "";
22252-      }
22253-      push (@stack, $fn.$suffix);
22254-    }
22255-  }
22256-  else {
22257-    # If we can't get a symbol name, at least fill in the address.
22258-    push (@stack, $address);
22259-  }
22260-
22261-  return @stack;
22262-}
22263-
22264-sub ExtractSymbolLocation {
22265-  my $symbols = shift;
22266-  my $address = shift;
22267-  # 'addr2line' outputs "??:0" for unknown locations; we do the
22268-  # same to be consistent.
22269-  my $location = "??:0:unknown";
22270-  if (exists $symbols->{$address}) {
22271-    my $file = $symbols->{$address}->[1];
22272-    if ($file eq "?") {
22273-      $file = "??:0"
22274-    }
22275-    $location = $file . ":" . $symbols->{$address}->[0];
22276-  }
22277-  return $location;
22278-}
22279-
22280-# Extracts a graph of calls.
22281-sub ExtractCalls {
22282-  my $symbols = shift;
22283-  my $profile = shift;
22284-
22285-  my $calls = {};
22286-  while( my ($stack_trace, $count) = each %$profile ) {
22287-    my @address = split(/\n/, $stack_trace);
22288-    my $destination = ExtractSymbolLocation($symbols, $address[0]);
22289-    AddEntry($calls, $destination, $count);
22290-    for (my $i = 1; $i <= $#address; $i++) {
22291-      my $source = ExtractSymbolLocation($symbols, $address[$i]);
22292-      my $call = "$source -> $destination";
22293-      AddEntry($calls, $call, $count);
22294-      $destination = $source;
22295-    }
22296-  }
22297-
22298-  return $calls;
22299-}
22300-
22301-sub FilterFrames {
22302-  my $symbols = shift;
22303-  my $profile = shift;
22304-
22305-  if ($main::opt_retain eq '' && $main::opt_exclude eq '') {
22306-    return $profile;
22307-  }
22308-
22309-  my $result = {};
22310-  foreach my $k (keys(%{$profile})) {
22311-    my $count = $profile->{$k};
22312-    my @addrs = split(/\n/, $k);
22313-    my @path = ();
22314-    foreach my $a (@addrs) {
22315-      my $sym;
22316-      if (exists($symbols->{$a})) {
22317-        $sym = $symbols->{$a}->[0];
22318-      } else {
22319-        $sym = $a;
22320-      }
22321-      if ($main::opt_retain ne '' && $sym !~ m/$main::opt_retain/) {
22322-        next;
22323-      }
22324-      if ($main::opt_exclude ne '' && $sym =~ m/$main::opt_exclude/) {
22325-        next;
22326-      }
22327-      push(@path, $a);
22328-    }
22329-    if (scalar(@path) > 0) {
22330-      my $reduced_path = join("\n", @path);
22331-      AddEntry($result, $reduced_path, $count);
22332-    }
22333-  }
22334-
22335-  return $result;
22336-}
22337-
22338-sub PrintCollapsedStacks {
22339-  my $symbols = shift;
22340-  my $profile = shift;
22341-
22342-  while (my ($stack_trace, $count) = each %$profile) {
22343-    my @address = split(/\n/, $stack_trace);
22344-    my @names = reverse ( map { ExtractSymbolNameInlineStack($symbols, $_) } @address );
22345-    printf("%s %d\n", join(";", @names), $count);
22346-  }
22347-}
22348-
22349-sub RemoveUninterestingFrames {
22350-  my $symbols = shift;
22351-  my $profile = shift;
22352-
22353-  # List of function names to skip
22354-  my %skip = ();
22355-  my $skip_regexp = 'NOMATCH';
22356-  if ($main::profile_type eq 'heap' || $main::profile_type eq 'growth') {
22357-    foreach my $name ('@JEMALLOC_PREFIX@calloc',
22358-                      'cfree',
22359-                      '@JEMALLOC_PREFIX@malloc',
22360-                      'newImpl',
22361-                      'void* newImpl',
22362-                      '@JEMALLOC_PREFIX@free',
22363-                      '@JEMALLOC_PREFIX@memalign',
22364-                      '@JEMALLOC_PREFIX@posix_memalign',
22365-                      '@JEMALLOC_PREFIX@aligned_alloc',
22366-                      'pvalloc',
22367-                      '@JEMALLOC_PREFIX@valloc',
22368-                      '@JEMALLOC_PREFIX@realloc',
22369-                      '@JEMALLOC_PREFIX@mallocx',
22370-                      '@JEMALLOC_PREFIX@rallocx',
22371-                      '@JEMALLOC_PREFIX@xallocx',
22372-                      '@JEMALLOC_PREFIX@dallocx',
22373-                      '@JEMALLOC_PREFIX@sdallocx',
22374-                      '@JEMALLOC_PREFIX@sdallocx_noflags',
22375-                      'tc_calloc',
22376-                      'tc_cfree',
22377-                      'tc_malloc',
22378-                      'tc_free',
22379-                      'tc_memalign',
22380-                      'tc_posix_memalign',
22381-                      'tc_pvalloc',
22382-                      'tc_valloc',
22383-                      'tc_realloc',
22384-                      'tc_new',
22385-                      'tc_delete',
22386-                      'tc_newarray',
22387-                      'tc_deletearray',
22388-                      'tc_new_nothrow',
22389-                      'tc_newarray_nothrow',
22390-                      'do_malloc',
22391-                      '::do_malloc',   # new name -- got moved to an unnamed ns
22392-                      '::do_malloc_or_cpp_alloc',
22393-                      'DoSampledAllocation',
22394-                      'simple_alloc::allocate',
22395-                      '__malloc_alloc_template::allocate',
22396-                      '__builtin_delete',
22397-                      '__builtin_new',
22398-                      '__builtin_vec_delete',
22399-                      '__builtin_vec_new',
22400-                      'operator new',
22401-                      'operator new[]',
22402-                      # The entry to our memory-allocation routines on OS X
22403-                      'malloc_zone_malloc',
22404-                      'malloc_zone_calloc',
22405-                      'malloc_zone_valloc',
22406-                      'malloc_zone_realloc',
22407-                      'malloc_zone_memalign',
22408-                      'malloc_zone_free',
22409-                      # These mark the beginning/end of our custom sections
22410-                      '__start_google_malloc',
22411-                      '__stop_google_malloc',
22412-                      '__start_malloc_hook',
22413-                      '__stop_malloc_hook') {
22414-      $skip{$name} = 1;
22415-      $skip{"_" . $name} = 1;   # Mach (OS X) adds a _ prefix to everything
22416-    }
22417-    # TODO: Remove TCMalloc once everything has been
22418-    # moved into the tcmalloc:: namespace and we have flushed
22419-    # old code out of the system.
22420-    $skip_regexp = "TCMalloc|^tcmalloc::";
22421-  } elsif ($main::profile_type eq 'contention') {
22422-    foreach my $vname ('base::RecordLockProfileData',
22423-                       'base::SubmitMutexProfileData',
22424-                       'base::SubmitSpinLockProfileData',
22425-                       'Mutex::Unlock',
22426-                       'Mutex::UnlockSlow',
22427-                       'Mutex::ReaderUnlock',
22428-                       'MutexLock::~MutexLock',
22429-                       'SpinLock::Unlock',
22430-                       'SpinLock::SlowUnlock',
22431-                       'SpinLockHolder::~SpinLockHolder') {
22432-      $skip{$vname} = 1;
22433-    }
22434-  } elsif ($main::profile_type eq 'cpu') {
22435-    # Drop signal handlers used for CPU profile collection
22436-    # TODO(dpeng): this should not be necessary; it's taken
22437-    # care of by the general 2nd-pc mechanism below.
22438-    foreach my $name ('ProfileData::Add',           # historical
22439-                      'ProfileData::prof_handler',  # historical
22440-                      'CpuProfiler::prof_handler',
22441-                      '__FRAME_END__',
22442-                      '__pthread_sighandler',
22443-                      '__restore') {
22444-      $skip{$name} = 1;
22445-    }
22446-  } else {
22447-    # Nothing skipped for unknown types
22448-  }
22449-
22450-  if ($main::profile_type eq 'cpu') {
22451-    # If all the second-youngest program counters are the same,
22452-    # this STRONGLY suggests that it is an artifact of measurement,
22453-    # i.e., stack frames pushed by the CPU profiler signal handler.
22454-    # Hence, we delete them.
22455-    # (The topmost PC is read from the signal structure, not from
22456-    # the stack, so it does not get involved.)
22457-    while (my $second_pc = IsSecondPcAlwaysTheSame($profile)) {
22458-      my $result = {};
22459-      my $func = '';
22460-      if (exists($symbols->{$second_pc})) {
22461-        $second_pc = $symbols->{$second_pc}->[0];
22462-      }
22463-      print STDERR "Removing $second_pc from all stack traces.\n";
22464-      foreach my $k (keys(%{$profile})) {
22465-        my $count = $profile->{$k};
22466-        my @addrs = split(/\n/, $k);
22467-        splice @addrs, 1, 1;
22468-        my $reduced_path = join("\n", @addrs);
22469-        AddEntry($result, $reduced_path, $count);
22470-      }
22471-      $profile = $result;
22472-    }
22473-  }
22474-
22475-  my $result = {};
22476-  foreach my $k (keys(%{$profile})) {
22477-    my $count = $profile->{$k};
22478-    my @addrs = split(/\n/, $k);
22479-    my @path = ();
22480-    foreach my $a (@addrs) {
22481-      if (exists($symbols->{$a})) {
22482-        my $func = $symbols->{$a}->[0];
22483-        if ($skip{$func} || ($func =~ m/$skip_regexp/)) {
22484-          # Throw away the portion of the backtrace seen so far, under the
22485-          # assumption that previous frames were for functions internal to the
22486-          # allocator.
22487-          @path = ();
22488-          next;
22489-        }
22490-      }
22491-      push(@path, $a);
22492-    }
22493-    my $reduced_path = join("\n", @path);
22494-    AddEntry($result, $reduced_path, $count);
22495-  }
22496-
22497-  $result = FilterFrames($symbols, $result);
22498-
22499-  return $result;
22500-}
22501-
22502-# Reduce profile to granularity given by user
22503-sub ReduceProfile {
22504-  my $symbols = shift;
22505-  my $profile = shift;
22506-  my $result = {};
22507-  my $fullname_to_shortname_map = {};
22508-  FillFullnameToShortnameMap($symbols, $fullname_to_shortname_map);
22509-  foreach my $k (keys(%{$profile})) {
22510-    my $count = $profile->{$k};
22511-    my @translated = TranslateStack($symbols, $fullname_to_shortname_map, $k);
22512-    my @path = ();
22513-    my %seen = ();
22514-    $seen{''} = 1;      # So that empty keys are skipped
22515-    foreach my $e (@translated) {
22516-      # To avoid double-counting due to recursion, skip a stack-trace
22517-      # entry if it has already been seen
22518-      if (!$seen{$e}) {
22519-        $seen{$e} = 1;
22520-        push(@path, $e);
22521-      }
22522-    }
22523-    my $reduced_path = join("\n", @path);
22524-    AddEntry($result, $reduced_path, $count);
22525-  }
22526-  return $result;
22527-}
22528-
22529-# Does the specified symbol array match the regexp?
22530-sub SymbolMatches {
22531-  my $sym = shift;
22532-  my $re = shift;
22533-  if (defined($sym)) {
22534-    for (my $i = 0; $i < $#{$sym}; $i += 3) {
22535-      if ($sym->[$i] =~ m/$re/ || $sym->[$i+1] =~ m/$re/) {
22536-        return 1;
22537-      }
22538-    }
22539-  }
22540-  return 0;
22541-}
22542-
22543-# Focus only on paths involving specified regexps
22544-sub FocusProfile {
22545-  my $symbols = shift;
22546-  my $profile = shift;
22547-  my $focus = shift;
22548-  my $result = {};
22549-  foreach my $k (keys(%{$profile})) {
22550-    my $count = $profile->{$k};
22551-    my @addrs = split(/\n/, $k);
22552-    foreach my $a (@addrs) {
22553-      # Reply if it matches either the address/shortname/fileline
22554-      if (($a =~ m/$focus/) || SymbolMatches($symbols->{$a}, $focus)) {
22555-        AddEntry($result, $k, $count);
22556-        last;
22557-      }
22558-    }
22559-  }
22560-  return $result;
22561-}
22562-
22563-# Focus only on paths not involving specified regexps
22564-sub IgnoreProfile {
22565-  my $symbols = shift;
22566-  my $profile = shift;
22567-  my $ignore = shift;
22568-  my $result = {};
22569-  foreach my $k (keys(%{$profile})) {
22570-    my $count = $profile->{$k};
22571-    my @addrs = split(/\n/, $k);
22572-    my $matched = 0;
22573-    foreach my $a (@addrs) {
22574-      # Reply if it matches either the address/shortname/fileline
22575-      if (($a =~ m/$ignore/) || SymbolMatches($symbols->{$a}, $ignore)) {
22576-        $matched = 1;
22577-        last;
22578-      }
22579-    }
22580-    if (!$matched) {
22581-      AddEntry($result, $k, $count);
22582-    }
22583-  }
22584-  return $result;
22585-}
22586-
22587-# Get total count in profile
22588-sub TotalProfile {
22589-  my $profile = shift;
22590-  my $result = 0;
22591-  foreach my $k (keys(%{$profile})) {
22592-    $result += $profile->{$k};
22593-  }
22594-  return $result;
22595-}
22596-
22597-# Add A to B
22598-sub AddProfile {
22599-  my $A = shift;
22600-  my $B = shift;
22601-
22602-  my $R = {};
22603-  # add all keys in A
22604-  foreach my $k (keys(%{$A})) {
22605-    my $v = $A->{$k};
22606-    AddEntry($R, $k, $v);
22607-  }
22608-  # add all keys in B
22609-  foreach my $k (keys(%{$B})) {
22610-    my $v = $B->{$k};
22611-    AddEntry($R, $k, $v);
22612-  }
22613-  return $R;
22614-}
22615-
22616-# Merges symbol maps
22617-sub MergeSymbols {
22618-  my $A = shift;
22619-  my $B = shift;
22620-
22621-  my $R = {};
22622-  foreach my $k (keys(%{$A})) {
22623-    $R->{$k} = $A->{$k};
22624-  }
22625-  if (defined($B)) {
22626-    foreach my $k (keys(%{$B})) {
22627-      $R->{$k} = $B->{$k};
22628-    }
22629-  }
22630-  return $R;
22631-}
22632-
22633-
22634-# Add A to B
22635-sub AddPcs {
22636-  my $A = shift;
22637-  my $B = shift;
22638-
22639-  my $R = {};
22640-  # add all keys in A
22641-  foreach my $k (keys(%{$A})) {
22642-    $R->{$k} = 1
22643-  }
22644-  # add all keys in B
22645-  foreach my $k (keys(%{$B})) {
22646-    $R->{$k} = 1
22647-  }
22648-  return $R;
22649-}
22650-
22651-# Subtract B from A
22652-sub SubtractProfile {
22653-  my $A = shift;
22654-  my $B = shift;
22655-
22656-  my $R = {};
22657-  foreach my $k (keys(%{$A})) {
22658-    my $v = $A->{$k} - GetEntry($B, $k);
22659-    if ($v < 0 && $main::opt_drop_negative) {
22660-      $v = 0;
22661-    }
22662-    AddEntry($R, $k, $v);
22663-  }
22664-  if (!$main::opt_drop_negative) {
22665-    # Take care of when subtracted profile has more entries
22666-    foreach my $k (keys(%{$B})) {
22667-      if (!exists($A->{$k})) {
22668-        AddEntry($R, $k, 0 - $B->{$k});
22669-      }
22670-    }
22671-  }
22672-  return $R;
22673-}
22674-
22675-# Get entry from profile; zero if not present
22676-sub GetEntry {
22677-  my $profile = shift;
22678-  my $k = shift;
22679-  if (exists($profile->{$k})) {
22680-    return $profile->{$k};
22681-  } else {
22682-    return 0;
22683-  }
22684-}
22685-
22686-# Add entry to specified profile
22687-sub AddEntry {
22688-  my $profile = shift;
22689-  my $k = shift;
22690-  my $n = shift;
22691-  if (!exists($profile->{$k})) {
22692-    $profile->{$k} = 0;
22693-  }
22694-  $profile->{$k} += $n;
22695-}
22696-
22697-# Add a stack of entries to specified profile, and add them to the $pcs
22698-# list.
22699-sub AddEntries {
22700-  my $profile = shift;
22701-  my $pcs = shift;
22702-  my $stack = shift;
22703-  my $count = shift;
22704-  my @k = ();
22705-
22706-  foreach my $e (split(/\s+/, $stack)) {
22707-    my $pc = HexExtend($e);
22708-    $pcs->{$pc} = 1;
22709-    push @k, $pc;
22710-  }
22711-  AddEntry($profile, (join "\n", @k), $count);
22712-}
22713-
22714-##### Code to profile a server dynamically #####
22715-
22716-sub CheckSymbolPage {
22717-  my $url = SymbolPageURL();
22718-  my $command = ShellEscape(@URL_FETCHER, $url);
22719-  open(SYMBOL, "$command |") or error($command);
22720-  my $line = <SYMBOL>;
22721-  $line =~ s/\r//g;         # turn windows-looking lines into unix-looking lines
22722-  close(SYMBOL);
22723-  unless (defined($line)) {
22724-    error("$url doesn't exist\n");
22725-  }
22726-
22727-  if ($line =~ /^num_symbols:\s+(\d+)$/) {
22728-    if ($1 == 0) {
22729-      error("Stripped binary. No symbols available.\n");
22730-    }
22731-  } else {
22732-    error("Failed to get the number of symbols from $url\n");
22733-  }
22734-}
22735-
22736-sub IsProfileURL {
22737-  my $profile_name = shift;
22738-  if (-f $profile_name) {
22739-    printf STDERR "Using local file $profile_name.\n";
22740-    return 0;
22741-  }
22742-  return 1;
22743-}
22744-
22745-sub ParseProfileURL {
22746-  my $profile_name = shift;
22747-
22748-  if (!defined($profile_name) || $profile_name eq "") {
22749-    return ();
22750-  }
22751-
22752-  # Split profile URL - matches all non-empty strings, so no test.
22753-  $profile_name =~ m,^(https?://)?([^/]+)(.*?)(/|$PROFILES)?$,;
22754-
22755-  my $proto = $1 || "http://";
22756-  my $hostport = $2;
22757-  my $prefix = $3;
22758-  my $profile = $4 || "/";
22759-
22760-  my $host = $hostport;
22761-  $host =~ s/:.*//;
22762-
22763-  my $baseurl = "$proto$hostport$prefix";
22764-  return ($host, $baseurl, $profile);
22765-}
22766-
22767-# We fetch symbols from the first profile argument.
22768-sub SymbolPageURL {
22769-  my ($host, $baseURL, $path) = ParseProfileURL($main::pfile_args[0]);
22770-  return "$baseURL$SYMBOL_PAGE";
22771-}
22772-
22773-sub FetchProgramName() {
22774-  my ($host, $baseURL, $path) = ParseProfileURL($main::pfile_args[0]);
22775-  my $url = "$baseURL$PROGRAM_NAME_PAGE";
22776-  my $command_line = ShellEscape(@URL_FETCHER, $url);
22777-  open(CMDLINE, "$command_line |") or error($command_line);
22778-  my $cmdline = <CMDLINE>;
22779-  $cmdline =~ s/\r//g;   # turn windows-looking lines into unix-looking lines
22780-  close(CMDLINE);
22781-  error("Failed to get program name from $url\n") unless defined($cmdline);
22782-  $cmdline =~ s/\x00.+//;  # Remove argv[1] and latters.
22783-  $cmdline =~ s!\n!!g;  # Remove LFs.
22784-  return $cmdline;
22785-}
22786-
22787-# Gee, curl's -L (--location) option isn't reliable at least
22788-# with its 7.12.3 version.  Curl will forget to post data if
22789-# there is a redirection.  This function is a workaround for
22790-# curl.  Redirection happens on borg hosts.
22791-sub ResolveRedirectionForCurl {
22792-  my $url = shift;
22793-  my $command_line = ShellEscape(@URL_FETCHER, "--head", $url);
22794-  open(CMDLINE, "$command_line |") or error($command_line);
22795-  while (<CMDLINE>) {
22796-    s/\r//g;         # turn windows-looking lines into unix-looking lines
22797-    if (/^Location: (.*)/) {
22798-      $url = $1;
22799-    }
22800-  }
22801-  close(CMDLINE);
22802-  return $url;
22803-}
22804-
22805-# Add a timeout flat to URL_FETCHER.  Returns a new list.
22806-sub AddFetchTimeout {
22807-  my $timeout = shift;
22808-  my @fetcher = @_;
22809-  if (defined($timeout)) {
22810-    if (join(" ", @fetcher) =~ m/\bcurl -s/) {
22811-      push(@fetcher, "--max-time", sprintf("%d", $timeout));
22812-    } elsif (join(" ", @fetcher) =~ m/\brpcget\b/) {
22813-      push(@fetcher, sprintf("--deadline=%d", $timeout));
22814-    }
22815-  }
22816-  return @fetcher;
22817-}
22818-
22819-# Reads a symbol map from the file handle name given as $1, returning
22820-# the resulting symbol map.  Also processes variables relating to symbols.
22821-# Currently, the only variable processed is 'binary=<value>' which updates
22822-# $main::prog to have the correct program name.
22823-sub ReadSymbols {
22824-  my $in = shift;
22825-  my $map = {};
22826-  while (<$in>) {
22827-    s/\r//g;         # turn windows-looking lines into unix-looking lines
22828-    # Removes all the leading zeroes from the symbols, see comment below.
22829-    if (m/^0x0*([0-9a-f]+)\s+(.+)/) {
22830-      $map->{$1} = $2;
22831-    } elsif (m/^---/) {
22832-      last;
22833-    } elsif (m/^([a-z][^=]*)=(.*)$/ ) {
22834-      my ($variable, $value) = ($1, $2);
22835-      for ($variable, $value) {
22836-        s/^\s+//;
22837-        s/\s+$//;
22838-      }
22839-      if ($variable eq "binary") {
22840-        if ($main::prog ne $UNKNOWN_BINARY && $main::prog ne $value) {
22841-          printf STDERR ("Warning: Mismatched binary name '%s', using '%s'.\n",
22842-                         $main::prog, $value);
22843-        }
22844-        $main::prog = $value;
22845-      } else {
22846-        printf STDERR ("Ignoring unknown variable in symbols list: " .
22847-            "'%s' = '%s'\n", $variable, $value);
22848-      }
22849-    }
22850-  }
22851-  return $map;
22852-}
22853-
22854-sub URLEncode {
22855-  my $str = shift;
22856-  $str =~ s/([^A-Za-z0-9\-_.!~*'()])/ sprintf "%%%02x", ord $1 /eg;
22857-  return $str;
22858-}
22859-
22860-sub AppendSymbolFilterParams {
22861-  my $url = shift;
22862-  my @params = ();
22863-  if ($main::opt_retain ne '') {
22864-    push(@params, sprintf("retain=%s", URLEncode($main::opt_retain)));
22865-  }
22866-  if ($main::opt_exclude ne '') {
22867-    push(@params, sprintf("exclude=%s", URLEncode($main::opt_exclude)));
22868-  }
22869-  if (scalar @params > 0) {
22870-    $url = sprintf("%s?%s", $url, join("&", @params));
22871-  }
22872-  return $url;
22873-}
22874-
22875-# Fetches and processes symbols to prepare them for use in the profile output
22876-# code.  If the optional 'symbol_map' arg is not given, fetches symbols from
22877-# $SYMBOL_PAGE for all PC values found in profile.  Otherwise, the raw symbols
22878-# are assumed to have already been fetched into 'symbol_map' and are simply
22879-# extracted and processed.
22880-sub FetchSymbols {
22881-  my $pcset = shift;
22882-  my $symbol_map = shift;
22883-
22884-  my %seen = ();
22885-  my @pcs = grep { !$seen{$_}++ } keys(%$pcset);  # uniq
22886-
22887-  if (!defined($symbol_map)) {
22888-    my $post_data = join("+", sort((map {"0x" . "$_"} @pcs)));
22889-
22890-    open(POSTFILE, ">$main::tmpfile_sym");
22891-    print POSTFILE $post_data;
22892-    close(POSTFILE);
22893-
22894-    my $url = SymbolPageURL();
22895-
22896-    my $command_line;
22897-    if (join(" ", @URL_FETCHER) =~ m/\bcurl -s/) {
22898-      $url = ResolveRedirectionForCurl($url);
22899-      $url = AppendSymbolFilterParams($url);
22900-      $command_line = ShellEscape(@URL_FETCHER, "-d", "\@$main::tmpfile_sym",
22901-                                  $url);
22902-    } else {
22903-      $url = AppendSymbolFilterParams($url);
22904-      $command_line = (ShellEscape(@URL_FETCHER, "--post", $url)
22905-                       . " < " . ShellEscape($main::tmpfile_sym));
22906-    }
22907-    # We use c++filt in case $SYMBOL_PAGE gives us mangled symbols.
22908-    my $escaped_cppfilt = ShellEscape($obj_tool_map{"c++filt"});
22909-    open(SYMBOL, "$command_line | $escaped_cppfilt |") or error($command_line);
22910-    $symbol_map = ReadSymbols(*SYMBOL{IO});
22911-    close(SYMBOL);
22912-  }
22913-
22914-  my $symbols = {};
22915-  foreach my $pc (@pcs) {
22916-    my $fullname;
22917-    # For 64 bits binaries, symbols are extracted with 8 leading zeroes.
22918-    # Then /symbol reads the long symbols in as uint64, and outputs
22919-    # the result with a "0x%08llx" format which get rid of the zeroes.
22920-    # By removing all the leading zeroes in both $pc and the symbols from
22921-    # /symbol, the symbols match and are retrievable from the map.
22922-    my $shortpc = $pc;
22923-    $shortpc =~ s/^0*//;
22924-    # Each line may have a list of names, which includes the function
22925-    # and also other functions it has inlined.  They are separated (in
22926-    # PrintSymbolizedProfile), by --, which is illegal in function names.
22927-    my $fullnames;
22928-    if (defined($symbol_map->{$shortpc})) {
22929-      $fullnames = $symbol_map->{$shortpc};
22930-    } else {
22931-      $fullnames = "0x" . $pc;  # Just use addresses
22932-    }
22933-    my $sym = [];
22934-    $symbols->{$pc} = $sym;
22935-    foreach my $fullname (split("--", $fullnames)) {
22936-      my $name = ShortFunctionName($fullname);
22937-      push(@{$sym}, $name, "?", $fullname);
22938-    }
22939-  }
22940-  return $symbols;
22941-}
22942-
22943-sub BaseName {
22944-  my $file_name = shift;
22945-  $file_name =~ s!^.*/!!;  # Remove directory name
22946-  return $file_name;
22947-}
22948-
22949-sub MakeProfileBaseName {
22950-  my ($binary_name, $profile_name) = @_;
22951-  my ($host, $baseURL, $path) = ParseProfileURL($profile_name);
22952-  my $binary_shortname = BaseName($binary_name);
22953-  return sprintf("%s.%s.%s",
22954-                 $binary_shortname, $main::op_time, $host);
22955-}
22956-
22957-sub FetchDynamicProfile {
22958-  my $binary_name = shift;
22959-  my $profile_name = shift;
22960-  my $fetch_name_only = shift;
22961-  my $encourage_patience = shift;
22962-
22963-  if (!IsProfileURL($profile_name)) {
22964-    return $profile_name;
22965-  } else {
22966-    my ($host, $baseURL, $path) = ParseProfileURL($profile_name);
22967-    if ($path eq "" || $path eq "/") {
22968-      # Missing type specifier defaults to cpu-profile
22969-      $path = $PROFILE_PAGE;
22970-    }
22971-
22972-    my $profile_file = MakeProfileBaseName($binary_name, $profile_name);
22973-
22974-    my $url = "$baseURL$path";
22975-    my $fetch_timeout = undef;
22976-    if ($path =~ m/$PROFILE_PAGE|$PMUPROFILE_PAGE/) {
22977-      if ($path =~ m/[?]/) {
22978-        $url .= "&";
22979-      } else {
22980-        $url .= "?";
22981-      }
22982-      $url .= sprintf("seconds=%d", $main::opt_seconds);
22983-      $fetch_timeout = $main::opt_seconds * 1.01 + 60;
22984-      # Set $profile_type for consumption by PrintSymbolizedProfile.
22985-      $main::profile_type = 'cpu';
22986-    } else {
22987-      # For non-CPU profiles, we add a type-extension to
22988-      # the target profile file name.
22989-      my $suffix = $path;
22990-      $suffix =~ s,/,.,g;
22991-      $profile_file .= $suffix;
22992-      # Set $profile_type for consumption by PrintSymbolizedProfile.
22993-      if ($path =~ m/$HEAP_PAGE/) {
22994-        $main::profile_type = 'heap';
22995-      } elsif ($path =~ m/$GROWTH_PAGE/) {
22996-        $main::profile_type = 'growth';
22997-      } elsif ($path =~ m/$CONTENTION_PAGE/) {
22998-        $main::profile_type = 'contention';
22999-      }
23000-    }
23001-
23002-    my $profile_dir = $ENV{"JEPROF_TMPDIR"} || ($ENV{HOME} . "/jeprof");
23003-    if (! -d $profile_dir) {
23004-      mkdir($profile_dir)
23005-          || die("Unable to create profile directory $profile_dir: $!\n");
23006-    }
23007-    my $tmp_profile = "$profile_dir/.tmp.$profile_file";
23008-    my $real_profile = "$profile_dir/$profile_file";
23009-
23010-    if ($fetch_name_only > 0) {
23011-      return $real_profile;
23012-    }
23013-
23014-    my @fetcher = AddFetchTimeout($fetch_timeout, @URL_FETCHER);
23015-    my $cmd = ShellEscape(@fetcher, $url) . " > " . ShellEscape($tmp_profile);
23016-    if ($path =~ m/$PROFILE_PAGE|$PMUPROFILE_PAGE|$CENSUSPROFILE_PAGE/){
23017-      print STDERR "Gathering CPU profile from $url for $main::opt_seconds seconds to\n  ${real_profile}\n";
23018-      if ($encourage_patience) {
23019-        print STDERR "Be patient...\n";
23020-      }
23021-    } else {
23022-      print STDERR "Fetching $path profile from $url to\n  ${real_profile}\n";
23023-    }
23024-
23025-    (system($cmd) == 0) || error("Failed to get profile: $cmd: $!\n");
23026-    (system("mv", $tmp_profile, $real_profile) == 0) || error("Unable to rename profile\n");
23027-    print STDERR "Wrote profile to $real_profile\n";
23028-    $main::collected_profile = $real_profile;
23029-    return $main::collected_profile;
23030-  }
23031-}
23032-
23033-# Collect profiles in parallel
23034-sub FetchDynamicProfiles {
23035-  my $items = scalar(@main::pfile_args);
23036-  my $levels = log($items) / log(2);
23037-
23038-  if ($items == 1) {
23039-    $main::profile_files[0] = FetchDynamicProfile($main::prog, $main::pfile_args[0], 0, 1);
23040-  } else {
23041-    # math rounding issues
23042-    if ((2 ** $levels) < $items) {
23043-     $levels++;
23044-    }
23045-    my $count = scalar(@main::pfile_args);
23046-    for (my $i = 0; $i < $count; $i++) {
23047-      $main::profile_files[$i] = FetchDynamicProfile($main::prog, $main::pfile_args[$i], 1, 0);
23048-    }
23049-    print STDERR "Fetching $count profiles, Be patient...\n";
23050-    FetchDynamicProfilesRecurse($levels, 0, 0);
23051-    $main::collected_profile = join(" \\\n    ", @main::profile_files);
23052-  }
23053-}
23054-
23055-# Recursively fork a process to get enough processes
23056-# collecting profiles
23057-sub FetchDynamicProfilesRecurse {
23058-  my $maxlevel = shift;
23059-  my $level = shift;
23060-  my $position = shift;
23061-
23062-  if (my $pid = fork()) {
23063-    $position = 0 | ($position << 1);
23064-    TryCollectProfile($maxlevel, $level, $position);
23065-    wait;
23066-  } else {
23067-    $position = 1 | ($position << 1);
23068-    TryCollectProfile($maxlevel, $level, $position);
23069-    cleanup();
23070-    exit(0);
23071-  }
23072-}
23073-
23074-# Collect a single profile
23075-sub TryCollectProfile {
23076-  my $maxlevel = shift;
23077-  my $level = shift;
23078-  my $position = shift;
23079-
23080-  if ($level >= ($maxlevel - 1)) {
23081-    if ($position < scalar(@main::pfile_args)) {
23082-      FetchDynamicProfile($main::prog, $main::pfile_args[$position], 0, 0);
23083-    }
23084-  } else {
23085-    FetchDynamicProfilesRecurse($maxlevel, $level+1, $position);
23086-  }
23087-}
23088-
23089-##### Parsing code #####
23090-
23091-# Provide a small streaming-read module to handle very large
23092-# cpu-profile files.  Stream in chunks along a sliding window.
23093-# Provides an interface to get one 'slot', correctly handling
23094-# endian-ness differences.  A slot is one 32-bit or 64-bit word
23095-# (depending on the input profile).  We tell endianness and bit-size
23096-# for the profile by looking at the first 8 bytes: in cpu profiles,
23097-# the second slot is always 3 (we'll accept anything that's not 0).
23098-BEGIN {
23099-  package CpuProfileStream;
23100-
23101-  sub new {
23102-    my ($class, $file, $fname) = @_;
23103-    my $self = { file        => $file,
23104-                 base        => 0,
23105-                 stride      => 512 * 1024,   # must be a multiple of bitsize/8
23106-                 slots       => [],
23107-                 unpack_code => "",           # N for big-endian, V for little
23108-                 perl_is_64bit => 1,          # matters if profile is 64-bit
23109-    };
23110-    bless $self, $class;
23111-    # Let unittests adjust the stride
23112-    if ($main::opt_test_stride > 0) {
23113-      $self->{stride} = $main::opt_test_stride;
23114-    }
23115-    # Read the first two slots to figure out bitsize and endianness.
23116-    my $slots = $self->{slots};
23117-    my $str;
23118-    read($self->{file}, $str, 8);
23119-    # Set the global $address_length based on what we see here.
23120-    # 8 is 32-bit (8 hexadecimal chars); 16 is 64-bit (16 hexadecimal chars).
23121-    $address_length = ($str eq (chr(0)x8)) ? 16 : 8;
23122-    if ($address_length == 8) {
23123-      if (substr($str, 6, 2) eq chr(0)x2) {
23124-        $self->{unpack_code} = 'V';  # Little-endian.
23125-      } elsif (substr($str, 4, 2) eq chr(0)x2) {
23126-        $self->{unpack_code} = 'N';  # Big-endian
23127-      } else {
23128-        ::error("$fname: header size >= 2**16\n");
23129-      }
23130-      @$slots = unpack($self->{unpack_code} . "*", $str);
23131-    } else {
23132-      # If we're a 64-bit profile, check if we're a 64-bit-capable
23133-      # perl.  Otherwise, each slot will be represented as a float
23134-      # instead of an int64, losing precision and making all the
23135-      # 64-bit addresses wrong.  We won't complain yet, but will
23136-      # later if we ever see a value that doesn't fit in 32 bits.
23137-      my $has_q = 0;
23138-      eval { $has_q = pack("Q", "1") ? 1 : 1; };
23139-      if (!$has_q) {
23140-        $self->{perl_is_64bit} = 0;
23141-      }
23142-      read($self->{file}, $str, 8);
23143-      if (substr($str, 4, 4) eq chr(0)x4) {
23144-        # We'd love to use 'Q', but it's a) not universal, b) not endian-proof.
23145-        $self->{unpack_code} = 'V';  # Little-endian.
23146-      } elsif (substr($str, 0, 4) eq chr(0)x4) {
23147-        $self->{unpack_code} = 'N';  # Big-endian
23148-      } else {
23149-        ::error("$fname: header size >= 2**32\n");
23150-      }
23151-      my @pair = unpack($self->{unpack_code} . "*", $str);
23152-      # Since we know one of the pair is 0, it's fine to just add them.
23153-      @$slots = (0, $pair[0] + $pair[1]);
23154-    }
23155-    return $self;
23156-  }
23157-
23158-  # Load more data when we access slots->get(X) which is not yet in memory.
23159-  sub overflow {
23160-    my ($self) = @_;
23161-    my $slots = $self->{slots};
23162-    $self->{base} += $#$slots + 1;   # skip over data we're replacing
23163-    my $str;
23164-    read($self->{file}, $str, $self->{stride});
23165-    if ($address_length == 8) {      # the 32-bit case
23166-      # This is the easy case: unpack provides 32-bit unpacking primitives.
23167-      @$slots = unpack($self->{unpack_code} . "*", $str);
23168-    } else {
23169-      # We need to unpack 32 bits at a time and combine.
23170-      my @b32_values = unpack($self->{unpack_code} . "*", $str);
23171-      my @b64_values = ();
23172-      for (my $i = 0; $i < $#b32_values; $i += 2) {
23173-        # TODO(csilvers): if this is a 32-bit perl, the math below
23174-        #    could end up in a too-large int, which perl will promote
23175-        #    to a double, losing necessary precision.  Deal with that.
23176-        #    Right now, we just die.
23177-        my ($lo, $hi) = ($b32_values[$i], $b32_values[$i+1]);
23178-        if ($self->{unpack_code} eq 'N') {    # big-endian
23179-          ($lo, $hi) = ($hi, $lo);
23180-        }
23181-        my $value = $lo + $hi * (2**32);
23182-        if (!$self->{perl_is_64bit} &&   # check value is exactly represented
23183-            (($value % (2**32)) != $lo || int($value / (2**32)) != $hi)) {
23184-          ::error("Need a 64-bit perl to process this 64-bit profile.\n");
23185-        }
23186-        push(@b64_values, $value);
23187-      }
23188-      @$slots = @b64_values;
23189-    }
23190-  }
23191-
23192-  # Access the i-th long in the file (logically), or -1 at EOF.
23193-  sub get {
23194-    my ($self, $idx) = @_;
23195-    my $slots = $self->{slots};
23196-    while ($#$slots >= 0) {
23197-      if ($idx < $self->{base}) {
23198-        # The only time we expect a reference to $slots[$i - something]
23199-        # after referencing $slots[$i] is reading the very first header.
23200-        # Since $stride > |header|, that shouldn't cause any lookback
23201-        # errors.  And everything after the header is sequential.
23202-        print STDERR "Unexpected look-back reading CPU profile";
23203-        return -1;   # shrug, don't know what better to return
23204-      } elsif ($idx > $self->{base} + $#$slots) {
23205-        $self->overflow();
23206-      } else {
23207-        return $slots->[$idx - $self->{base}];
23208-      }
23209-    }
23210-    # If we get here, $slots is [], which means we've reached EOF
23211-    return -1;  # unique since slots is supposed to hold unsigned numbers
23212-  }
23213-}
23214-
23215-# Reads the top, 'header' section of a profile, and returns the last
23216-# line of the header, commonly called a 'header line'.  The header
23217-# section of a profile consists of zero or more 'command' lines that
23218-# are instructions to jeprof, which jeprof executes when reading the
23219-# header.  All 'command' lines start with a %.  After the command
23220-# lines is the 'header line', which is a profile-specific line that
23221-# indicates what type of profile it is, and perhaps other global
23222-# information about the profile.  For instance, here's a header line
23223-# for a heap profile:
23224-#   heap profile:     53:    38236 [  5525:  1284029] @ heapprofile
23225-# For historical reasons, the CPU profile does not contain a text-
23226-# readable header line.  If the profile looks like a CPU profile,
23227-# this function returns "".  If no header line could be found, this
23228-# function returns undef.
23229-#
23230-# The following commands are recognized:
23231-#   %warn -- emit the rest of this line to stderr, prefixed by 'WARNING:'
23232-#
23233-# The input file should be in binmode.
23234-sub ReadProfileHeader {
23235-  local *PROFILE = shift;
23236-  my $firstchar = "";
23237-  my $line = "";
23238-  read(PROFILE, $firstchar, 1);
23239-  seek(PROFILE, -1, 1);                    # unread the firstchar
23240-  if ($firstchar !~ /[[:print:]]/) {       # is not a text character
23241-    return "";
23242-  }
23243-  while (defined($line = <PROFILE>)) {
23244-    $line =~ s/\r//g;   # turn windows-looking lines into unix-looking lines
23245-    if ($line =~ /^%warn\s+(.*)/) {        # 'warn' command
23246-      # Note this matches both '%warn blah\n' and '%warn\n'.
23247-      print STDERR "WARNING: $1\n";        # print the rest of the line
23248-    } elsif ($line =~ /^%/) {
23249-      print STDERR "Ignoring unknown command from profile header: $line";
23250-    } else {
23251-      # End of commands, must be the header line.
23252-      return $line;
23253-    }
23254-  }
23255-  return undef;     # got to EOF without seeing a header line
23256-}
23257-
23258-sub IsSymbolizedProfileFile {
23259-  my $file_name = shift;
23260-  if (!(-e $file_name) || !(-r $file_name)) {
23261-    return 0;
23262-  }
23263-  # Check if the file contains a symbol-section marker.
23264-  open(TFILE, "<$file_name");
23265-  binmode TFILE;
23266-  my $firstline = ReadProfileHeader(*TFILE);
23267-  close(TFILE);
23268-  if (!$firstline) {
23269-    return 0;
23270-  }
23271-  $SYMBOL_PAGE =~ m,[^/]+$,;    # matches everything after the last slash
23272-  my $symbol_marker = $&;
23273-  return $firstline =~ /^--- *$symbol_marker/;
23274-}
23275-
23276-# Parse profile generated by common/profiler.cc and return a reference
23277-# to a map:
23278-#      $result->{version}     Version number of profile file
23279-#      $result->{period}      Sampling period (in microseconds)
23280-#      $result->{profile}     Profile object
23281-#      $result->{threads}     Map of thread IDs to profile objects
23282-#      $result->{map}         Memory map info from profile
23283-#      $result->{pcs}         Hash of all PC values seen, key is hex address
23284-sub ReadProfile {
23285-  my $prog = shift;
23286-  my $fname = shift;
23287-  my $result;            # return value
23288-
23289-  $CONTENTION_PAGE =~ m,[^/]+$,;    # matches everything after the last slash
23290-  my $contention_marker = $&;
23291-  $GROWTH_PAGE  =~ m,[^/]+$,;    # matches everything after the last slash
23292-  my $growth_marker = $&;
23293-  $SYMBOL_PAGE =~ m,[^/]+$,;    # matches everything after the last slash
23294-  my $symbol_marker = $&;
23295-  $PROFILE_PAGE =~ m,[^/]+$,;    # matches everything after the last slash
23296-  my $profile_marker = $&;
23297-  $HEAP_PAGE =~ m,[^/]+$,;    # matches everything after the last slash
23298-  my $heap_marker = $&;
23299-
23300-  # Look at first line to see if it is a heap or a CPU profile.
23301-  # CPU profile may start with no header at all, and just binary data
23302-  # (starting with \0\0\0\0) -- in that case, don't try to read the
23303-  # whole firstline, since it may be gigabytes(!) of data.
23304-  open(PROFILE, "<$fname") || error("$fname: $!\n");
23305-  binmode PROFILE;      # New perls do UTF-8 processing
23306-  my $header = ReadProfileHeader(*PROFILE);
23307-  if (!defined($header)) {   # means "at EOF"
23308-    error("Profile is empty.\n");
23309-  }
23310-
23311-  my $symbols;
23312-  if ($header =~ m/^--- *$symbol_marker/o) {
23313-    # Verify that the user asked for a symbolized profile
23314-    if (!$main::use_symbolized_profile) {
23315-      # we have both a binary and symbolized profiles, abort
23316-      error("FATAL ERROR: Symbolized profile\n   $fname\ncannot be used with " .
23317-            "a binary arg. Try again without passing\n   $prog\n");
23318-    }
23319-    # Read the symbol section of the symbolized profile file.
23320-    $symbols = ReadSymbols(*PROFILE{IO});
23321-    # Read the next line to get the header for the remaining profile.
23322-    $header = ReadProfileHeader(*PROFILE) || "";
23323-  }
23324-
23325-  if ($header =~ m/^--- *($heap_marker|$growth_marker)/o) {
23326-    # Skip "--- ..." line for profile types that have their own headers.
23327-    $header = ReadProfileHeader(*PROFILE) || "";
23328-  }
23329-
23330-  $main::profile_type = '';
23331-
23332-  if ($header =~ m/^heap profile:.*$growth_marker/o) {
23333-    $main::profile_type = 'growth';
23334-    $result =  ReadHeapProfile($prog, *PROFILE, $header);
23335-  } elsif ($header =~ m/^heap profile:/) {
23336-    $main::profile_type = 'heap';
23337-    $result =  ReadHeapProfile($prog, *PROFILE, $header);
23338-  } elsif ($header =~ m/^heap/) {
23339-    $main::profile_type = 'heap';
23340-    $result = ReadThreadedHeapProfile($prog, $fname, $header);
23341-  } elsif ($header =~ m/^--- *$contention_marker/o) {
23342-    $main::profile_type = 'contention';
23343-    $result = ReadSynchProfile($prog, *PROFILE);
23344-  } elsif ($header =~ m/^--- *Stacks:/) {
23345-    print STDERR
23346-      "Old format contention profile: mistakenly reports " .
23347-      "condition variable signals as lock contentions.\n";
23348-    $main::profile_type = 'contention';
23349-    $result = ReadSynchProfile($prog, *PROFILE);
23350-  } elsif ($header =~ m/^--- *$profile_marker/) {
23351-    # the binary cpu profile data starts immediately after this line
23352-    $main::profile_type = 'cpu';
23353-    $result = ReadCPUProfile($prog, $fname, *PROFILE);
23354-  } else {
23355-    if (defined($symbols)) {
23356-      # a symbolized profile contains a format we don't recognize, bail out
23357-      error("$fname: Cannot recognize profile section after symbols.\n");
23358-    }
23359-    # no ascii header present -- must be a CPU profile
23360-    $main::profile_type = 'cpu';
23361-    $result = ReadCPUProfile($prog, $fname, *PROFILE);
23362-  }
23363-
23364-  close(PROFILE);
23365-
23366-  # if we got symbols along with the profile, return those as well
23367-  if (defined($symbols)) {
23368-    $result->{symbols} = $symbols;
23369-  }
23370-
23371-  return $result;
23372-}
23373-
23374-# Subtract one from caller pc so we map back to call instr.
23375-# However, don't do this if we're reading a symbolized profile
23376-# file, in which case the subtract-one was done when the file
23377-# was written.
23378-#
23379-# We apply the same logic to all readers, though ReadCPUProfile uses an
23380-# independent implementation.
23381-sub FixCallerAddresses {
23382-  my $stack = shift;
23383-  # --raw/http: Always subtract one from pc's, because PrintSymbolizedProfile()
23384-  # dumps unadjusted profiles.
23385-  {
23386-    $stack =~ /(\s)/;
23387-    my $delimiter = $1;
23388-    my @addrs = split(' ', $stack);
23389-    my @fixedaddrs;
23390-    $#fixedaddrs = $#addrs;
23391-    if ($#addrs >= 0) {
23392-      $fixedaddrs[0] = $addrs[0];
23393-    }
23394-    for (my $i = 1; $i <= $#addrs; $i++) {
23395-      $fixedaddrs[$i] = AddressSub($addrs[$i], "0x1");
23396-    }
23397-    return join $delimiter, @fixedaddrs;
23398-  }
23399-}
23400-
23401-# CPU profile reader
23402-sub ReadCPUProfile {
23403-  my $prog = shift;
23404-  my $fname = shift;       # just used for logging
23405-  local *PROFILE = shift;
23406-  my $version;
23407-  my $period;
23408-  my $i;
23409-  my $profile = {};
23410-  my $pcs = {};
23411-
23412-  # Parse string into array of slots.
23413-  my $slots = CpuProfileStream->new(*PROFILE, $fname);
23414-
23415-  # Read header.  The current header version is a 5-element structure
23416-  # containing:
23417-  #   0: header count (always 0)
23418-  #   1: header "words" (after this one: 3)
23419-  #   2: format version (0)
23420-  #   3: sampling period (usec)
23421-  #   4: unused padding (always 0)
23422-  if ($slots->get(0) != 0 ) {
23423-    error("$fname: not a profile file, or old format profile file\n");
23424-  }
23425-  $i = 2 + $slots->get(1);
23426-  $version = $slots->get(2);
23427-  $period = $slots->get(3);
23428-  # Do some sanity checking on these header values.
23429-  if ($version > (2**32) || $period > (2**32) || $i > (2**32) || $i < 5) {
23430-    error("$fname: not a profile file, or corrupted profile file\n");
23431-  }
23432-
23433-  # Parse profile
23434-  while ($slots->get($i) != -1) {
23435-    my $n = $slots->get($i++);
23436-    my $d = $slots->get($i++);
23437-    if ($d > (2**16)) {  # TODO(csilvers): what's a reasonable max-stack-depth?
23438-      my $addr = sprintf("0%o", $i * ($address_length == 8 ? 4 : 8));
23439-      print STDERR "At index $i (address $addr):\n";
23440-      error("$fname: stack trace depth >= 2**32\n");
23441-    }
23442-    if ($slots->get($i) == 0) {
23443-      # End of profile data marker
23444-      $i += $d;
23445-      last;
23446-    }
23447-
23448-    # Make key out of the stack entries
23449-    my @k = ();
23450-    for (my $j = 0; $j < $d; $j++) {
23451-      my $pc = $slots->get($i+$j);
23452-      # Subtract one from caller pc so we map back to call instr.
23453-      $pc--;
23454-      $pc = sprintf("%0*x", $address_length, $pc);
23455-      $pcs->{$pc} = 1;
23456-      push @k, $pc;
23457-    }
23458-
23459-    AddEntry($profile, (join "\n", @k), $n);
23460-    $i += $d;
23461-  }
23462-
23463-  # Parse map
23464-  my $map = '';
23465-  seek(PROFILE, $i * 4, 0);
23466-  read(PROFILE, $map, (stat PROFILE)[7]);
23467-
23468-  my $r = {};
23469-  $r->{version} = $version;
23470-  $r->{period} = $period;
23471-  $r->{profile} = $profile;
23472-  $r->{libs} = ParseLibraries($prog, $map, $pcs);
23473-  $r->{pcs} = $pcs;
23474-
23475-  return $r;
23476-}
23477-
23478-sub HeapProfileIndex {
23479-  my $index = 1;
23480-  if ($main::opt_inuse_space) {
23481-    $index = 1;
23482-  } elsif ($main::opt_inuse_objects) {
23483-    $index = 0;
23484-  } elsif ($main::opt_alloc_space) {
23485-    $index = 3;
23486-  } elsif ($main::opt_alloc_objects) {
23487-    $index = 2;
23488-  }
23489-  return $index;
23490-}
23491-
23492-sub ReadMappedLibraries {
23493-  my $fh = shift;
23494-  my $map = "";
23495-  # Read the /proc/self/maps data
23496-  while (<$fh>) {
23497-    s/\r//g;         # turn windows-looking lines into unix-looking lines
23498-    $map .= $_;
23499-  }
23500-  return $map;
23501-}
23502-
23503-sub ReadMemoryMap {
23504-  my $fh = shift;
23505-  my $map = "";
23506-  # Read /proc/self/maps data as formatted by DumpAddressMap()
23507-  my $buildvar = "";
23508-  while (<PROFILE>) {
23509-    s/\r//g;         # turn windows-looking lines into unix-looking lines
23510-    # Parse "build=<dir>" specification if supplied
23511-    if (m/^\s*build=(.*)\n/) {
23512-      $buildvar = $1;
23513-    }
23514-
23515-    # Expand "$build" variable if available
23516-    $_ =~ s/\$build\b/$buildvar/g;
23517-
23518-    $map .= $_;
23519-  }
23520-  return $map;
23521-}
23522-
23523-sub AdjustSamples {
23524-  my ($sample_adjustment, $sampling_algorithm, $n1, $s1, $n2, $s2) = @_;
23525-  if ($sample_adjustment) {
23526-    if ($sampling_algorithm == 2) {
23527-      # Remote-heap version 2
23528-      # The sampling frequency is the rate of a Poisson process.
23529-      # This means that the probability of sampling an allocation of
23530-      # size X with sampling rate Y is 1 - exp(-X/Y)
23531-      if ($n1 != 0) {
23532-        my $ratio = (($s1*1.0)/$n1)/($sample_adjustment);
23533-        my $scale_factor = 1/(1 - exp(-$ratio));
23534-        $n1 *= $scale_factor;
23535-        $s1 *= $scale_factor;
23536-      }
23537-      if ($n2 != 0) {
23538-        my $ratio = (($s2*1.0)/$n2)/($sample_adjustment);
23539-        my $scale_factor = 1/(1 - exp(-$ratio));
23540-        $n2 *= $scale_factor;
23541-        $s2 *= $scale_factor;
23542-      }
23543-    } else {
23544-      # Remote-heap version 1
23545-      my $ratio;
23546-      $ratio = (($s1*1.0)/$n1)/($sample_adjustment);
23547-      if ($ratio < 1) {
23548-        $n1 /= $ratio;
23549-        $s1 /= $ratio;
23550-      }
23551-      $ratio = (($s2*1.0)/$n2)/($sample_adjustment);
23552-      if ($ratio < 1) {
23553-        $n2 /= $ratio;
23554-        $s2 /= $ratio;
23555-      }
23556-    }
23557-  }
23558-  return ($n1, $s1, $n2, $s2);
23559-}
23560-
23561-sub ReadHeapProfile {
23562-  my $prog = shift;
23563-  local *PROFILE = shift;
23564-  my $header = shift;
23565-
23566-  my $index = HeapProfileIndex();
23567-
23568-  # Find the type of this profile.  The header line looks like:
23569-  #    heap profile:   1246:  8800744 [  1246:  8800744] @ <heap-url>/266053
23570-  # There are two pairs <count: size>, the first inuse objects/space, and the
23571-  # second allocated objects/space.  This is followed optionally by a profile
23572-  # type, and if that is present, optionally by a sampling frequency.
23573-  # For remote heap profiles (v1):
23574-  # The interpretation of the sampling frequency is that the profiler, for
23575-  # each sample, calculates a uniformly distributed random integer less than
23576-  # the given value, and records the next sample after that many bytes have
23577-  # been allocated.  Therefore, the expected sample interval is half of the
23578-  # given frequency.  By default, if not specified, the expected sample
23579-  # interval is 128KB.  Only remote-heap-page profiles are adjusted for
23580-  # sample size.
23581-  # For remote heap profiles (v2):
23582-  # The sampling frequency is the rate of a Poisson process. This means that
23583-  # the probability of sampling an allocation of size X with sampling rate Y
23584-  # is 1 - exp(-X/Y)
23585-  # For version 2, a typical header line might look like this:
23586-  # heap profile:   1922: 127792360 [  1922: 127792360] @ <heap-url>_v2/524288
23587-  # the trailing number (524288) is the sampling rate. (Version 1 showed
23588-  # double the 'rate' here)
23589-  my $sampling_algorithm = 0;
23590-  my $sample_adjustment = 0;
23591-  chomp($header);
23592-  my $type = "unknown";
23593-  if ($header =~ m"^heap profile:\s*(\d+):\s+(\d+)\s+\[\s*(\d+):\s+(\d+)\](\s*@\s*([^/]*)(/(\d+))?)?") {
23594-    if (defined($6) && ($6 ne '')) {
23595-      $type = $6;
23596-      my $sample_period = $8;
23597-      # $type is "heapprofile" for profiles generated by the
23598-      # heap-profiler, and either "heap" or "heap_v2" for profiles
23599-      # generated by sampling directly within tcmalloc.  It can also
23600-      # be "growth" for heap-growth profiles.  The first is typically
23601-      # found for profiles generated locally, and the others for
23602-      # remote profiles.
23603-      if (($type eq "heapprofile") || ($type !~ /heap/) ) {
23604-        # No need to adjust for the sampling rate with heap-profiler-derived data
23605-        $sampling_algorithm = 0;
23606-      } elsif ($type =~ /_v2/) {
23607-        $sampling_algorithm = 2;     # version 2 sampling
23608-        if (defined($sample_period) && ($sample_period ne '')) {
23609-          $sample_adjustment = int($sample_period);
23610-        }
23611-      } else {
23612-        $sampling_algorithm = 1;     # version 1 sampling
23613-        if (defined($sample_period) && ($sample_period ne '')) {
23614-          $sample_adjustment = int($sample_period)/2;
23615-        }
23616-      }
23617-    } else {
23618-      # We detect whether or not this is a remote-heap profile by checking
23619-      # that the total-allocated stats ($n2,$s2) are exactly the
23620-      # same as the in-use stats ($n1,$s1).  It is remotely conceivable
23621-      # that a non-remote-heap profile may pass this check, but it is hard
23622-      # to imagine how that could happen.
23623-      # In this case it's so old it's guaranteed to be remote-heap version 1.
23624-      my ($n1, $s1, $n2, $s2) = ($1, $2, $3, $4);
23625-      if (($n1 == $n2) && ($s1 == $s2)) {
23626-        # This is likely to be a remote-heap based sample profile
23627-        $sampling_algorithm = 1;
23628-      }
23629-    }
23630-  }
23631-
23632-  if ($sampling_algorithm > 0) {
23633-    # For remote-heap generated profiles, adjust the counts and sizes to
23634-    # account for the sample rate (we sample once every 128KB by default).
23635-    if ($sample_adjustment == 0) {
23636-      # Turn on profile adjustment.
23637-      $sample_adjustment = 128*1024;
23638-      print STDERR "Adjusting heap profiles for 1-in-128KB sampling rate\n";
23639-    } else {
23640-      printf STDERR ("Adjusting heap profiles for 1-in-%d sampling rate\n",
23641-                     $sample_adjustment);
23642-    }
23643-    if ($sampling_algorithm > 1) {
23644-      # We don't bother printing anything for the original version (version 1)
23645-      printf STDERR "Heap version $sampling_algorithm\n";
23646-    }
23647-  }
23648-
23649-  my $profile = {};
23650-  my $pcs = {};
23651-  my $map = "";
23652-
23653-  while (<PROFILE>) {
23654-    s/\r//g;         # turn windows-looking lines into unix-looking lines
23655-    if (/^MAPPED_LIBRARIES:/) {
23656-      $map .= ReadMappedLibraries(*PROFILE);
23657-      last;
23658-    }
23659-
23660-    if (/^--- Memory map:/) {
23661-      $map .= ReadMemoryMap(*PROFILE);
23662-      last;
23663-    }
23664-
23665-    # Read entry of the form:
23666-    #  <count1>: <bytes1> [<count2>: <bytes2>] @ a1 a2 a3 ... an
23667-    s/^\s*//;
23668-    s/\s*$//;
23669-    if (m/^\s*(\d+):\s+(\d+)\s+\[\s*(\d+):\s+(\d+)\]\s+@\s+(.*)$/) {
23670-      my $stack = $5;
23671-      my ($n1, $s1, $n2, $s2) = ($1, $2, $3, $4);
23672-      my @counts = AdjustSamples($sample_adjustment, $sampling_algorithm,
23673-                                 $n1, $s1, $n2, $s2);
23674-      AddEntries($profile, $pcs, FixCallerAddresses($stack), $counts[$index]);
23675-    }
23676-  }
23677-
23678-  my $r = {};
23679-  $r->{version} = "heap";
23680-  $r->{period} = 1;
23681-  $r->{profile} = $profile;
23682-  $r->{libs} = ParseLibraries($prog, $map, $pcs);
23683-  $r->{pcs} = $pcs;
23684-  return $r;
23685-}
23686-
23687-sub ReadThreadedHeapProfile {
23688-  my ($prog, $fname, $header) = @_;
23689-
23690-  my $index = HeapProfileIndex();
23691-  my $sampling_algorithm = 0;
23692-  my $sample_adjustment = 0;
23693-  chomp($header);
23694-  my $type = "unknown";
23695-  # Assuming a very specific type of header for now.
23696-  if ($header =~ m"^heap_v2/(\d+)") {
23697-    $type = "_v2";
23698-    $sampling_algorithm = 2;
23699-    $sample_adjustment = int($1);
23700-  }
23701-  if ($type ne "_v2" || !defined($sample_adjustment)) {
23702-    die "Threaded heap profiles require v2 sampling with a sample rate\n";
23703-  }
23704-
23705-  my $profile = {};
23706-  my $thread_profiles = {};
23707-  my $pcs = {};
23708-  my $map = "";
23709-  my $stack = "";
23710-
23711-  while (<PROFILE>) {
23712-    s/\r//g;
23713-    if (/^MAPPED_LIBRARIES:/) {
23714-      $map .= ReadMappedLibraries(*PROFILE);
23715-      last;
23716-    }
23717-
23718-    if (/^--- Memory map:/) {
23719-      $map .= ReadMemoryMap(*PROFILE);
23720-      last;
23721-    }
23722-
23723-    # Read entry of the form:
23724-    # @ a1 a2 ... an
23725-    #   t*: <count1>: <bytes1> [<count2>: <bytes2>]
23726-    #   t1: <count1>: <bytes1> [<count2>: <bytes2>]
23727-    #     ...
23728-    #   tn: <count1>: <bytes1> [<count2>: <bytes2>]
23729-    s/^\s*//;
23730-    s/\s*$//;
23731-    if (m/^@\s+(.*)$/) {
23732-      $stack = $1;
23733-    } elsif (m/^\s*(t(\*|\d+)):\s+(\d+):\s+(\d+)\s+\[\s*(\d+):\s+(\d+)\]$/) {
23734-      if ($stack eq "") {
23735-        # Still in the header, so this is just a per-thread summary.
23736-        next;
23737-      }
23738-      my $thread = $2;
23739-      my ($n1, $s1, $n2, $s2) = ($3, $4, $5, $6);
23740-      my @counts = AdjustSamples($sample_adjustment, $sampling_algorithm,
23741-                                 $n1, $s1, $n2, $s2);
23742-      if ($thread eq "*") {
23743-        AddEntries($profile, $pcs, FixCallerAddresses($stack), $counts[$index]);
23744-      } else {
23745-        if (!exists($thread_profiles->{$thread})) {
23746-          $thread_profiles->{$thread} = {};
23747-        }
23748-        AddEntries($thread_profiles->{$thread}, $pcs,
23749-                   FixCallerAddresses($stack), $counts[$index]);
23750-      }
23751-    }
23752-  }
23753-
23754-  my $r = {};
23755-  $r->{version} = "heap";
23756-  $r->{period} = 1;
23757-  $r->{profile} = $profile;
23758-  $r->{threads} = $thread_profiles;
23759-  $r->{libs} = ParseLibraries($prog, $map, $pcs);
23760-  $r->{pcs} = $pcs;
23761-  return $r;
23762-}
23763-
23764-sub ReadSynchProfile {
23765-  my $prog = shift;
23766-  local *PROFILE = shift;
23767-  my $header = shift;
23768-
23769-  my $map = '';
23770-  my $profile = {};
23771-  my $pcs = {};
23772-  my $sampling_period = 1;
23773-  my $cyclespernanosec = 2.8;   # Default assumption for old binaries
23774-  my $seen_clockrate = 0;
23775-  my $line;
23776-
23777-  my $index = 0;
23778-  if ($main::opt_total_delay) {
23779-    $index = 0;
23780-  } elsif ($main::opt_contentions) {
23781-    $index = 1;
23782-  } elsif ($main::opt_mean_delay) {
23783-    $index = 2;
23784-  }
23785-
23786-  while ( $line = <PROFILE> ) {
23787-    $line =~ s/\r//g;      # turn windows-looking lines into unix-looking lines
23788-    if ( $line =~ /^\s*(\d+)\s+(\d+) \@\s*(.*?)\s*$/ ) {
23789-      my ($cycles, $count, $stack) = ($1, $2, $3);
23790-
23791-      # Convert cycles to nanoseconds
23792-      $cycles /= $cyclespernanosec;
23793-
23794-      # Adjust for sampling done by application
23795-      $cycles *= $sampling_period;
23796-      $count *= $sampling_period;
23797-
23798-      my @values = ($cycles, $count, $cycles / $count);
23799-      AddEntries($profile, $pcs, FixCallerAddresses($stack), $values[$index]);
23800-
23801-    } elsif ( $line =~ /^(slow release).*thread \d+  \@\s*(.*?)\s*$/ ||
23802-              $line =~ /^\s*(\d+) \@\s*(.*?)\s*$/ ) {
23803-      my ($cycles, $stack) = ($1, $2);
23804-      if ($cycles !~ /^\d+$/) {
23805-        next;
23806-      }
23807-
23808-      # Convert cycles to nanoseconds
23809-      $cycles /= $cyclespernanosec;
23810-
23811-      # Adjust for sampling done by application
23812-      $cycles *= $sampling_period;
23813-
23814-      AddEntries($profile, $pcs, FixCallerAddresses($stack), $cycles);
23815-
23816-    } elsif ( $line =~ m/^([a-z][^=]*)=(.*)$/ ) {
23817-      my ($variable, $value) = ($1,$2);
23818-      for ($variable, $value) {
23819-        s/^\s+//;
23820-        s/\s+$//;
23821-      }
23822-      if ($variable eq "cycles/second") {
23823-        $cyclespernanosec = $value / 1e9;
23824-        $seen_clockrate = 1;
23825-      } elsif ($variable eq "sampling period") {
23826-        $sampling_period = $value;
23827-      } elsif ($variable eq "ms since reset") {
23828-        # Currently nothing is done with this value in jeprof
23829-        # So we just silently ignore it for now
23830-      } elsif ($variable eq "discarded samples") {
23831-        # Currently nothing is done with this value in jeprof
23832-        # So we just silently ignore it for now
23833-      } else {
23834-        printf STDERR ("Ignoring unnknown variable in /contention output: " .
23835-                       "'%s' = '%s'\n",$variable,$value);
23836-      }
23837-    } else {
23838-      # Memory map entry
23839-      $map .= $line;
23840-    }
23841-  }
23842-
23843-  if (!$seen_clockrate) {
23844-    printf STDERR ("No cycles/second entry in profile; Guessing %.1f GHz\n",
23845-                   $cyclespernanosec);
23846-  }
23847-
23848-  my $r = {};
23849-  $r->{version} = 0;
23850-  $r->{period} = $sampling_period;
23851-  $r->{profile} = $profile;
23852-  $r->{libs} = ParseLibraries($prog, $map, $pcs);
23853-  $r->{pcs} = $pcs;
23854-  return $r;
23855-}
23856-
23857-# Given a hex value in the form "0x1abcd" or "1abcd", return either
23858-# "0001abcd" or "000000000001abcd", depending on the current (global)
23859-# address length.
23860-sub HexExtend {
23861-  my $addr = shift;
23862-
23863-  $addr =~ s/^(0x)?0*//;
23864-  my $zeros_needed = $address_length - length($addr);
23865-  if ($zeros_needed < 0) {
23866-    printf STDERR "Warning: address $addr is longer than address length $address_length\n";
23867-    return $addr;
23868-  }
23869-  return ("0" x $zeros_needed) . $addr;
23870-}
23871-
23872-##### Symbol extraction #####
23873-
23874-# Aggressively search the lib_prefix values for the given library
23875-# If all else fails, just return the name of the library unmodified.
23876-# If the lib_prefix is "/my/path,/other/path" and $file is "/lib/dir/mylib.so"
23877-# it will search the following locations in this order, until it finds a file:
23878-#   /my/path/lib/dir/mylib.so
23879-#   /other/path/lib/dir/mylib.so
23880-#   /my/path/dir/mylib.so
23881-#   /other/path/dir/mylib.so
23882-#   /my/path/mylib.so
23883-#   /other/path/mylib.so
23884-#   /lib/dir/mylib.so              (returned as last resort)
23885-sub FindLibrary {
23886-  my $file = shift;
23887-  my $suffix = $file;
23888-
23889-  # Search for the library as described above
23890-  do {
23891-    foreach my $prefix (@prefix_list) {
23892-      my $fullpath = $prefix . $suffix;
23893-      if (-e $fullpath) {
23894-        return $fullpath;
23895-      }
23896-    }
23897-  } while ($suffix =~ s|^/[^/]+/|/|);
23898-  return $file;
23899-}
23900-
23901-# Return path to library with debugging symbols.
23902-# For libc libraries, the copy in /usr/lib/debug contains debugging symbols
23903-sub DebuggingLibrary {
23904-  my $file = shift;
23905-
23906-  if ($file !~ m|^/|) {
23907-    return undef;
23908-  }
23909-
23910-  # Find debug symbol file if it's named after the library's name.
23911-
23912-  if (-f "/usr/lib/debug$file") {
23913-    if($main::opt_debug) { print STDERR "found debug info for $file in /usr/lib/debug$file\n"; }
23914-    return "/usr/lib/debug$file";
23915-  } elsif (-f "/usr/lib/debug$file.debug") {
23916-    if($main::opt_debug) { print STDERR "found debug info for $file in /usr/lib/debug$file.debug\n"; }
23917-    return "/usr/lib/debug$file.debug";
23918-  }
23919-
23920-  if(!$main::opt_debug_syms_by_id) {
23921-    if($main::opt_debug) { print STDERR "no debug symbols found for $file\n" };
23922-    return undef;
23923-  }
23924-
23925-  # Find debug file if it's named after the library's build ID.
23926-
23927-  my $readelf = '';
23928-  if (!$main::gave_up_on_elfutils) {
23929-    $readelf = qx/eu-readelf -n ${file}/;
23930-    if ($?) {
23931-      print STDERR "Cannot run eu-readelf. To use --debug-syms-by-id you must be on Linux, with elfutils installed.\n";
23932-      $main::gave_up_on_elfutils = 1;
23933-      return undef;
23934-    }
23935-    my $buildID = $1 if $readelf =~ /Build ID: ([A-Fa-f0-9]+)/s;
23936-    if (defined $buildID && length $buildID > 0) {
23937-      my $symbolFile = '/usr/lib/debug/.build-id/' . substr($buildID, 0, 2) . '/' . substr($buildID, 2) . '.debug';
23938-      if (-e $symbolFile) {
23939-        if($main::opt_debug) { print STDERR "found debug symbol file $symbolFile for $file\n" };
23940-        return $symbolFile;
23941-      } else {
23942-        if($main::opt_debug) { print STDERR "no debug symbol file found for $file, build ID: $buildID\n" };
23943-        return undef;
23944-      }
23945-    }
23946-  }
23947-
23948-  if($main::opt_debug) { print STDERR "no debug symbols found for $file, build ID unknown\n" };
23949-  return undef;
23950-}
23951-
23952-
23953-# Parse text section header of a library using objdump
23954-sub ParseTextSectionHeaderFromObjdump {
23955-  my $lib = shift;
23956-
23957-  my $size = undef;
23958-  my $vma;
23959-  my $file_offset;
23960-  # Get objdump output from the library file to figure out how to
23961-  # map between mapped addresses and addresses in the library.
23962-  my $cmd = ShellEscape($obj_tool_map{"objdump"}, "-h", $lib);
23963-  open(OBJDUMP, "$cmd |") || error("$cmd: $!\n");
23964-  while (<OBJDUMP>) {
23965-    s/\r//g;         # turn windows-looking lines into unix-looking lines
23966-    # Idx Name          Size      VMA       LMA       File off  Algn
23967-    #  10 .text         00104b2c  420156f0  420156f0  000156f0  2**4
23968-    # For 64-bit objects, VMA and LMA will be 16 hex digits, size and file
23969-    # offset may still be 8.  But AddressSub below will still handle that.
23970-    my @x = split;
23971-    if (($#x >= 6) && ($x[1] eq '.text')) {
23972-      $size = $x[2];
23973-      $vma = $x[3];
23974-      $file_offset = $x[5];
23975-      last;
23976-    }
23977-  }
23978-  close(OBJDUMP);
23979-
23980-  if (!defined($size)) {
23981-    return undef;
23982-  }
23983-
23984-  my $r = {};
23985-  $r->{size} = $size;
23986-  $r->{vma} = $vma;
23987-  $r->{file_offset} = $file_offset;
23988-
23989-  return $r;
23990-}
23991-
23992-# Parse text section header of a library using otool (on OS X)
23993-sub ParseTextSectionHeaderFromOtool {
23994-  my $lib = shift;
23995-
23996-  my $size = undef;
23997-  my $vma = undef;
23998-  my $file_offset = undef;
23999-  # Get otool output from the library file to figure out how to
24000-  # map between mapped addresses and addresses in the library.
24001-  my $command = ShellEscape($obj_tool_map{"otool"}, "-l", $lib);
24002-  open(OTOOL, "$command |") || error("$command: $!\n");
24003-  my $cmd = "";
24004-  my $sectname = "";
24005-  my $segname = "";
24006-  foreach my $line (<OTOOL>) {
24007-    $line =~ s/\r//g;      # turn windows-looking lines into unix-looking lines
24008-    # Load command <#>
24009-    #       cmd LC_SEGMENT
24010-    # [...]
24011-    # Section
24012-    #   sectname __text
24013-    #    segname __TEXT
24014-    #       addr 0x000009f8
24015-    #       size 0x00018b9e
24016-    #     offset 2552
24017-    #      align 2^2 (4)
24018-    # We will need to strip off the leading 0x from the hex addresses,
24019-    # and convert the offset into hex.
24020-    if ($line =~ /Load command/) {
24021-      $cmd = "";
24022-      $sectname = "";
24023-      $segname = "";
24024-    } elsif ($line =~ /Section/) {
24025-      $sectname = "";
24026-      $segname = "";
24027-    } elsif ($line =~ /cmd (\w+)/) {
24028-      $cmd = $1;
24029-    } elsif ($line =~ /sectname (\w+)/) {
24030-      $sectname = $1;
24031-    } elsif ($line =~ /segname (\w+)/) {
24032-      $segname = $1;
24033-    } elsif (!(($cmd eq "LC_SEGMENT" || $cmd eq "LC_SEGMENT_64") &&
24034-               $sectname eq "__text" &&
24035-               $segname eq "__TEXT")) {
24036-      next;
24037-    } elsif ($line =~ /\baddr 0x([0-9a-fA-F]+)/) {
24038-      $vma = $1;
24039-    } elsif ($line =~ /\bsize 0x([0-9a-fA-F]+)/) {
24040-      $size = $1;
24041-    } elsif ($line =~ /\boffset ([0-9]+)/) {
24042-      $file_offset = sprintf("%016x", $1);
24043-    }
24044-    if (defined($vma) && defined($size) && defined($file_offset)) {
24045-      last;
24046-    }
24047-  }
24048-  close(OTOOL);
24049-
24050-  if (!defined($vma) || !defined($size) || !defined($file_offset)) {
24051-     return undef;
24052-  }
24053-
24054-  my $r = {};
24055-  $r->{size} = $size;
24056-  $r->{vma} = $vma;
24057-  $r->{file_offset} = $file_offset;
24058-
24059-  return $r;
24060-}
24061-
24062-sub ParseTextSectionHeader {
24063-  # obj_tool_map("otool") is only defined if we're in a Mach-O environment
24064-  if (defined($obj_tool_map{"otool"})) {
24065-    my $r = ParseTextSectionHeaderFromOtool(@_);
24066-    if (defined($r)){
24067-      return $r;
24068-    }
24069-  }
24070-  # If otool doesn't work, or we don't have it, fall back to objdump
24071-  return ParseTextSectionHeaderFromObjdump(@_);
24072-}
24073-
24074-# Split /proc/pid/maps dump into a list of libraries
24075-sub ParseLibraries {
24076-  return if $main::use_symbol_page;  # We don't need libraries info.
24077-  my $prog = Cwd::abs_path(shift);
24078-  my $map = shift;
24079-  my $pcs = shift;
24080-
24081-  my $result = [];
24082-  my $h = "[a-f0-9]+";
24083-  my $zero_offset = HexExtend("0");
24084-
24085-  my $buildvar = "";
24086-  foreach my $l (split("\n", $map)) {
24087-    if ($l =~ m/^\s*build=(.*)$/) {
24088-      $buildvar = $1;
24089-    }
24090-
24091-    my $start;
24092-    my $finish;
24093-    my $offset;
24094-    my $lib;
24095-    if ($l =~ /^($h)-($h)\s+..x.\s+($h)\s+\S+:\S+\s+\d+\s+(\S+\.(so|dll|dylib|bundle)((\.\d+)+\w*(\.\d+){0,3})?)$/i) {
24096-      # Full line from /proc/self/maps.  Example:
24097-      #   40000000-40015000 r-xp 00000000 03:01 12845071   /lib/ld-2.3.2.so
24098-      $start = HexExtend($1);
24099-      $finish = HexExtend($2);
24100-      $offset = HexExtend($3);
24101-      $lib = $4;
24102-      $lib =~ s|\\|/|g;     # turn windows-style paths into unix-style paths
24103-    } elsif ($l =~ /^\s*($h)-($h):\s*(\S+\.so(\.\d+)*)/) {
24104-      # Cooked line from DumpAddressMap.  Example:
24105-      #   40000000-40015000: /lib/ld-2.3.2.so
24106-      $start = HexExtend($1);
24107-      $finish = HexExtend($2);
24108-      $offset = $zero_offset;
24109-      $lib = $3;
24110-    } elsif (($l =~ /^($h)-($h)\s+..x.\s+($h)\s+\S+:\S+\s+\d+\s+(\S+)$/i) && ($4 eq $prog)) {
24111-      # PIEs and address space randomization do not play well with our
24112-      # default assumption that main executable is at lowest
24113-      # addresses. So we're detecting main executable in
24114-      # /proc/self/maps as well.
24115-      $start = HexExtend($1);
24116-      $finish = HexExtend($2);
24117-      $offset = HexExtend($3);
24118-      $lib = $4;
24119-      $lib =~ s|\\|/|g;     # turn windows-style paths into unix-style paths
24120-    }
24121-    # FreeBSD 10.0 virtual memory map /proc/curproc/map as defined in
24122-    # function procfs_doprocmap (sys/fs/procfs/procfs_map.c)
24123-    #
24124-    # Example:
24125-    # 0x800600000 0x80061a000 26 0 0xfffff800035a0000 r-x 75 33 0x1004 COW NC vnode /libexec/ld-elf.s
24126-    # o.1 NCH -1
24127-    elsif ($l =~ /^(0x$h)\s(0x$h)\s\d+\s\d+\s0x$h\sr-x\s\d+\s\d+\s0x\d+\s(COW|NCO)\s(NC|NNC)\svnode\s(\S+\.so(\.\d+)*)/) {
24128-      $start = HexExtend($1);
24129-      $finish = HexExtend($2);
24130-      $offset = $zero_offset;
24131-      $lib = FindLibrary($5);
24132-
24133-    } else {
24134-      next;
24135-    }
24136-
24137-    # Expand "$build" variable if available
24138-    $lib =~ s/\$build\b/$buildvar/g;
24139-
24140-    $lib = FindLibrary($lib);
24141-
24142-    # Check for pre-relocated libraries, which use pre-relocated symbol tables
24143-    # and thus require adjusting the offset that we'll use to translate
24144-    # VM addresses into symbol table addresses.
24145-    # Only do this if we're not going to fetch the symbol table from a
24146-    # debugging copy of the library.
24147-    if (!DebuggingLibrary($lib)) {
24148-      my $text = ParseTextSectionHeader($lib);
24149-      if (defined($text)) {
24150-         my $vma_offset = AddressSub($text->{vma}, $text->{file_offset});
24151-         $offset = AddressAdd($offset, $vma_offset);
24152-      }
24153-    }
24154-
24155-    if($main::opt_debug) { printf STDERR "$start:$finish ($offset) $lib\n"; }
24156-    push(@{$result}, [$lib, $start, $finish, $offset]);
24157-  }
24158-
24159-  # Append special entry for additional library (not relocated)
24160-  if ($main::opt_lib ne "") {
24161-    my $text = ParseTextSectionHeader($main::opt_lib);
24162-    if (defined($text)) {
24163-       my $start = $text->{vma};
24164-       my $finish = AddressAdd($start, $text->{size});
24165-
24166-       push(@{$result}, [$main::opt_lib, $start, $finish, $start]);
24167-    }
24168-  }
24169-
24170-  # Append special entry for the main program.  This covers
24171-  # 0..max_pc_value_seen, so that we assume pc values not found in one
24172-  # of the library ranges will be treated as coming from the main
24173-  # program binary.
24174-  my $min_pc = HexExtend("0");
24175-  my $max_pc = $min_pc;          # find the maximal PC value in any sample
24176-  foreach my $pc (keys(%{$pcs})) {
24177-    if (HexExtend($pc) gt $max_pc) { $max_pc = HexExtend($pc); }
24178-  }
24179-  push(@{$result}, [$prog, $min_pc, $max_pc, $zero_offset]);
24180-
24181-  return $result;
24182-}
24183-
24184-# Add two hex addresses of length $address_length.
24185-# Run jeprof --test for unit test if this is changed.
24186-sub AddressAdd {
24187-  my $addr1 = shift;
24188-  my $addr2 = shift;
24189-  my $sum;
24190-
24191-  if ($address_length == 8) {
24192-    # Perl doesn't cope with wraparound arithmetic, so do it explicitly:
24193-    $sum = (hex($addr1)+hex($addr2)) % (0x10000000 * 16);
24194-    return sprintf("%08x", $sum);
24195-
24196-  } else {
24197-    # Do the addition in 7-nibble chunks to trivialize carry handling.
24198-
24199-    if ($main::opt_debug and $main::opt_test) {
24200-      print STDERR "AddressAdd $addr1 + $addr2 = ";
24201-    }
24202-
24203-    my $a1 = substr($addr1,-7);
24204-    $addr1 = substr($addr1,0,-7);
24205-    my $a2 = substr($addr2,-7);
24206-    $addr2 = substr($addr2,0,-7);
24207-    $sum = hex($a1) + hex($a2);
24208-    my $c = 0;
24209-    if ($sum > 0xfffffff) {
24210-      $c = 1;
24211-      $sum -= 0x10000000;
24212-    }
24213-    my $r = sprintf("%07x", $sum);
24214-
24215-    $a1 = substr($addr1,-7);
24216-    $addr1 = substr($addr1,0,-7);
24217-    $a2 = substr($addr2,-7);
24218-    $addr2 = substr($addr2,0,-7);
24219-    $sum = hex($a1) + hex($a2) + $c;
24220-    $c = 0;
24221-    if ($sum > 0xfffffff) {
24222-      $c = 1;
24223-      $sum -= 0x10000000;
24224-    }
24225-    $r = sprintf("%07x", $sum) . $r;
24226-
24227-    $sum = hex($addr1) + hex($addr2) + $c;
24228-    if ($sum > 0xff) { $sum -= 0x100; }
24229-    $r = sprintf("%02x", $sum) . $r;
24230-
24231-    if ($main::opt_debug and $main::opt_test) { print STDERR "$r\n"; }
24232-
24233-    return $r;
24234-  }
24235-}
24236-
24237-
24238-# Subtract two hex addresses of length $address_length.
24239-# Run jeprof --test for unit test if this is changed.
24240-sub AddressSub {
24241-  my $addr1 = shift;
24242-  my $addr2 = shift;
24243-  my $diff;
24244-
24245-  if ($address_length == 8) {
24246-    # Perl doesn't cope with wraparound arithmetic, so do it explicitly:
24247-    $diff = (hex($addr1)-hex($addr2)) % (0x10000000 * 16);
24248-    return sprintf("%08x", $diff);
24249-
24250-  } else {
24251-    # Do the addition in 7-nibble chunks to trivialize borrow handling.
24252-    # if ($main::opt_debug) { print STDERR "AddressSub $addr1 - $addr2 = "; }
24253-
24254-    my $a1 = hex(substr($addr1,-7));
24255-    $addr1 = substr($addr1,0,-7);
24256-    my $a2 = hex(substr($addr2,-7));
24257-    $addr2 = substr($addr2,0,-7);
24258-    my $b = 0;
24259-    if ($a2 > $a1) {
24260-      $b = 1;
24261-      $a1 += 0x10000000;
24262-    }
24263-    $diff = $a1 - $a2;
24264-    my $r = sprintf("%07x", $diff);
24265-
24266-    $a1 = hex(substr($addr1,-7));
24267-    $addr1 = substr($addr1,0,-7);
24268-    $a2 = hex(substr($addr2,-7)) + $b;
24269-    $addr2 = substr($addr2,0,-7);
24270-    $b = 0;
24271-    if ($a2 > $a1) {
24272-      $b = 1;
24273-      $a1 += 0x10000000;
24274-    }
24275-    $diff = $a1 - $a2;
24276-    $r = sprintf("%07x", $diff) . $r;
24277-
24278-    $a1 = hex($addr1);
24279-    $a2 = hex($addr2) + $b;
24280-    if ($a2 > $a1) { $a1 += 0x100; }
24281-    $diff = $a1 - $a2;
24282-    $r = sprintf("%02x", $diff) . $r;
24283-
24284-    # if ($main::opt_debug) { print STDERR "$r\n"; }
24285-
24286-    return $r;
24287-  }
24288-}
24289-
24290-# Increment a hex addresses of length $address_length.
24291-# Run jeprof --test for unit test if this is changed.
24292-sub AddressInc {
24293-  my $addr = shift;
24294-  my $sum;
24295-
24296-  if ($address_length == 8) {
24297-    # Perl doesn't cope with wraparound arithmetic, so do it explicitly:
24298-    $sum = (hex($addr)+1) % (0x10000000 * 16);
24299-    return sprintf("%08x", $sum);
24300-
24301-  } else {
24302-    # Do the addition in 7-nibble chunks to trivialize carry handling.
24303-    # We are always doing this to step through the addresses in a function,
24304-    # and will almost never overflow the first chunk, so we check for this
24305-    # case and exit early.
24306-
24307-    # if ($main::opt_debug) { print STDERR "AddressInc $addr1 = "; }
24308-
24309-    my $a1 = substr($addr,-7);
24310-    $addr = substr($addr,0,-7);
24311-    $sum = hex($a1) + 1;
24312-    my $r = sprintf("%07x", $sum);
24313-    if ($sum <= 0xfffffff) {
24314-      $r = $addr . $r;
24315-      # if ($main::opt_debug) { print STDERR "$r\n"; }
24316-      return HexExtend($r);
24317-    } else {
24318-      $r = "0000000";
24319-    }
24320-
24321-    $a1 = substr($addr,-7);
24322-    $addr = substr($addr,0,-7);
24323-    $sum = hex($a1) + 1;
24324-    $r = sprintf("%07x", $sum) . $r;
24325-    if ($sum <= 0xfffffff) {
24326-      $r = $addr . $r;
24327-      # if ($main::opt_debug) { print STDERR "$r\n"; }
24328-      return HexExtend($r);
24329-    } else {
24330-      $r = "00000000000000";
24331-    }
24332-
24333-    $sum = hex($addr) + 1;
24334-    if ($sum > 0xff) { $sum -= 0x100; }
24335-    $r = sprintf("%02x", $sum) . $r;
24336-
24337-    # if ($main::opt_debug) { print STDERR "$r\n"; }
24338-    return $r;
24339-  }
24340-}
24341-
24342-# Extract symbols for all PC values found in profile
24343-sub ExtractSymbols {
24344-  my $libs = shift;
24345-  my $pcset = shift;
24346-
24347-  my $symbols = {};
24348-
24349-  # Map each PC value to the containing library.  To make this faster,
24350-  # we sort libraries by their starting pc value (highest first), and
24351-  # advance through the libraries as we advance the pc.  Sometimes the
24352-  # addresses of libraries may overlap with the addresses of the main
24353-  # binary, so to make sure the libraries 'win', we iterate over the
24354-  # libraries in reverse order (which assumes the binary doesn't start
24355-  # in the middle of a library, which seems a fair assumption).
24356-  my @pcs = (sort { $a cmp $b } keys(%{$pcset}));  # pcset is 0-extended strings
24357-  foreach my $lib (sort {$b->[1] cmp $a->[1]} @{$libs}) {
24358-    my $libname = $lib->[0];
24359-    my $start = $lib->[1];
24360-    my $finish = $lib->[2];
24361-    my $offset = $lib->[3];
24362-
24363-    # Use debug library if it exists
24364-    my $debug_libname = DebuggingLibrary($libname);
24365-    if ($debug_libname) {
24366-        $libname = $debug_libname;
24367-    }
24368-
24369-    # Get list of pcs that belong in this library.
24370-    my $contained = [];
24371-    my ($start_pc_index, $finish_pc_index);
24372-    # Find smallest finish_pc_index such that $finish < $pc[$finish_pc_index].
24373-    for ($finish_pc_index = $#pcs + 1; $finish_pc_index > 0;
24374-         $finish_pc_index--) {
24375-      last if $pcs[$finish_pc_index - 1] le $finish;
24376-    }
24377-    # Find smallest start_pc_index such that $start <= $pc[$start_pc_index].
24378-    for ($start_pc_index = $finish_pc_index; $start_pc_index > 0;
24379-         $start_pc_index--) {
24380-      last if $pcs[$start_pc_index - 1] lt $start;
24381-    }
24382-    # This keeps PC values higher than $pc[$finish_pc_index] in @pcs,
24383-    # in case there are overlaps in libraries and the main binary.
24384-    @{$contained} = splice(@pcs, $start_pc_index,
24385-                           $finish_pc_index - $start_pc_index);
24386-    # Map to symbols
24387-    MapToSymbols($libname, AddressSub($start, $offset), $contained, $symbols);
24388-  }
24389-
24390-  return $symbols;
24391-}
24392-
24393-# Map list of PC values to symbols for a given image
24394-sub MapToSymbols {
24395-  my $image = shift;
24396-  my $offset = shift;
24397-  my $pclist = shift;
24398-  my $symbols = shift;
24399-
24400-  my $debug = 0;
24401-
24402-  # Ignore empty binaries
24403-  if ($#{$pclist} < 0) { return; }
24404-
24405-  # Figure out the addr2line command to use
24406-  my $addr2line = $obj_tool_map{"addr2line"};
24407-  my $cmd = ShellEscape($addr2line, "-f", "-C", "-e", $image);
24408-  if (exists $obj_tool_map{"addr2line_pdb"}) {
24409-    $addr2line = $obj_tool_map{"addr2line_pdb"};
24410-    $cmd = ShellEscape($addr2line, "--demangle", "-f", "-C", "-e", $image);
24411-  }
24412-
24413-  # If "addr2line" isn't installed on the system at all, just use
24414-  # nm to get what info we can (function names, but not line numbers).
24415-  if (system(ShellEscape($addr2line, "--help") . " >$dev_null 2>&1") != 0) {
24416-    MapSymbolsWithNM($image, $offset, $pclist, $symbols);
24417-    return;
24418-  }
24419-
24420-  # "addr2line -i" can produce a variable number of lines per input
24421-  # address, with no separator that allows us to tell when data for
24422-  # the next address starts.  So we find the address for a special
24423-  # symbol (_fini) and interleave this address between all real
24424-  # addresses passed to addr2line.  The name of this special symbol
24425-  # can then be used as a separator.
24426-  $sep_address = undef;  # May be filled in by MapSymbolsWithNM()
24427-  my $nm_symbols = {};
24428-  MapSymbolsWithNM($image, $offset, $pclist, $nm_symbols);
24429-  if (defined($sep_address)) {
24430-    # Only add " -i" to addr2line if the binary supports it.
24431-    # addr2line --help returns 0, but not if it sees an unknown flag first.
24432-    if (system("$cmd -i --help >$dev_null 2>&1") == 0) {
24433-      $cmd .= " -i";
24434-    } else {
24435-      $sep_address = undef;   # no need for sep_address if we don't support -i
24436-    }
24437-  }
24438-
24439-  # Make file with all PC values with intervening 'sep_address' so
24440-  # that we can reliably detect the end of inlined function list
24441-  open(ADDRESSES, ">$main::tmpfile_sym") || error("$main::tmpfile_sym: $!\n");
24442-  if ($debug) { print("---- $image ---\n"); }
24443-  for (my $i = 0; $i <= $#{$pclist}; $i++) {
24444-    # addr2line always reads hex addresses, and does not need '0x' prefix.
24445-    if ($debug) { printf STDERR ("%s\n", $pclist->[$i]); }
24446-    printf ADDRESSES ("%s\n", AddressSub($pclist->[$i], $offset));
24447-    if (defined($sep_address)) {
24448-      printf ADDRESSES ("%s\n", $sep_address);
24449-    }
24450-  }
24451-  close(ADDRESSES);
24452-  if ($debug) {
24453-    print("----\n");
24454-    system("cat", $main::tmpfile_sym);
24455-    print("----\n");
24456-    system("$cmd < " . ShellEscape($main::tmpfile_sym));
24457-    print("----\n");
24458-  }
24459-
24460-  open(SYMBOLS, "$cmd <" . ShellEscape($main::tmpfile_sym) . " |")
24461-      || error("$cmd: $!\n");
24462-  my $count = 0;   # Index in pclist
24463-  while (<SYMBOLS>) {
24464-    # Read fullfunction and filelineinfo from next pair of lines
24465-    s/\r?\n$//g;
24466-    my $fullfunction = $_;
24467-    $_ = <SYMBOLS>;
24468-    s/\r?\n$//g;
24469-    my $filelinenum = $_;
24470-
24471-    if (defined($sep_address) && $fullfunction eq $sep_symbol) {
24472-      # Terminating marker for data for this address
24473-      $count++;
24474-      next;
24475-    }
24476-
24477-    $filelinenum =~ s|\\|/|g; # turn windows-style paths into unix-style paths
24478-
24479-    my $pcstr = $pclist->[$count];
24480-    my $function = ShortFunctionName($fullfunction);
24481-    my $nms = $nm_symbols->{$pcstr};
24482-    if (defined($nms)) {
24483-      if ($fullfunction eq '??') {
24484-        # nm found a symbol for us.
24485-        $function = $nms->[0];
24486-        $fullfunction = $nms->[2];
24487-      } else {
24488-	# MapSymbolsWithNM tags each routine with its starting address,
24489-	# useful in case the image has multiple occurrences of this
24490-	# routine.  (It uses a syntax that resembles template parameters,
24491-	# that are automatically stripped out by ShortFunctionName().)
24492-	# addr2line does not provide the same information.  So we check
24493-	# if nm disambiguated our symbol, and if so take the annotated
24494-	# (nm) version of the routine-name.  TODO(csilvers): this won't
24495-	# catch overloaded, inlined symbols, which nm doesn't see.
24496-	# Better would be to do a check similar to nm's, in this fn.
24497-	if ($nms->[2] =~ m/^\Q$function\E/) {  # sanity check it's the right fn
24498-	  $function = $nms->[0];
24499-	  $fullfunction = $nms->[2];
24500-	}
24501-      }
24502-    }
24503-
24504-    # Prepend to accumulated symbols for pcstr
24505-    # (so that caller comes before callee)
24506-    my $sym = $symbols->{$pcstr};
24507-    if (!defined($sym)) {
24508-      $sym = [];
24509-      $symbols->{$pcstr} = $sym;
24510-    }
24511-    unshift(@{$sym}, $function, $filelinenum, $fullfunction);
24512-    if ($debug) { printf STDERR ("%s => [%s]\n", $pcstr, join(" ", @{$sym})); }
24513-    if (!defined($sep_address)) {
24514-      # Inlining is off, so this entry ends immediately
24515-      $count++;
24516-    }
24517-  }
24518-  close(SYMBOLS);
24519-}
24520-
24521-# Use nm to map the list of referenced PCs to symbols.  Return true iff we
24522-# are able to read procedure information via nm.
24523-sub MapSymbolsWithNM {
24524-  my $image = shift;
24525-  my $offset = shift;
24526-  my $pclist = shift;
24527-  my $symbols = shift;
24528-
24529-  # Get nm output sorted by increasing address
24530-  my $symbol_table = GetProcedureBoundaries($image, ".");
24531-  if (!%{$symbol_table}) {
24532-    return 0;
24533-  }
24534-  # Start addresses are already the right length (8 or 16 hex digits).
24535-  my @names = sort { $symbol_table->{$a}->[0] cmp $symbol_table->{$b}->[0] }
24536-    keys(%{$symbol_table});
24537-
24538-  if ($#names < 0) {
24539-    # No symbols: just use addresses
24540-    foreach my $pc (@{$pclist}) {
24541-      my $pcstr = "0x" . $pc;
24542-      $symbols->{$pc} = [$pcstr, "?", $pcstr];
24543-    }
24544-    return 0;
24545-  }
24546-
24547-  # Sort addresses so we can do a join against nm output
24548-  my $index = 0;
24549-  my $fullname = $names[0];
24550-  my $name = ShortFunctionName($fullname);
24551-  foreach my $pc (sort { $a cmp $b } @{$pclist}) {
24552-    # Adjust for mapped offset
24553-    my $mpc = AddressSub($pc, $offset);
24554-    while (($index < $#names) && ($mpc ge $symbol_table->{$fullname}->[1])){
24555-      $index++;
24556-      $fullname = $names[$index];
24557-      $name = ShortFunctionName($fullname);
24558-    }
24559-    if ($mpc lt $symbol_table->{$fullname}->[1]) {
24560-      $symbols->{$pc} = [$name, "?", $fullname];
24561-    } else {
24562-      my $pcstr = "0x" . $pc;
24563-      $symbols->{$pc} = [$pcstr, "?", $pcstr];
24564-    }
24565-  }
24566-  return 1;
24567-}
24568-
24569-sub ShortFunctionName {
24570-  my $function = shift;
24571-  while ($function =~ s/\([^()]*\)(\s*const)?//g) { }   # Argument types
24572-  while ($function =~ s/<[^<>]*>//g)  { }    # Remove template arguments
24573-  $function =~ s/^.*\s+(\w+::)/$1/;          # Remove leading type
24574-  return $function;
24575-}
24576-
24577-# Trim overly long symbols found in disassembler output
24578-sub CleanDisassembly {
24579-  my $d = shift;
24580-  while ($d =~ s/\([^()%]*\)(\s*const)?//g) { } # Argument types, not (%rax)
24581-  while ($d =~ s/(\w+)<[^<>]*>/$1/g)  { }       # Remove template arguments
24582-  return $d;
24583-}
24584-
24585-# Clean file name for display
24586-sub CleanFileName {
24587-  my ($f) = @_;
24588-  $f =~ s|^/proc/self/cwd/||;
24589-  $f =~ s|^\./||;
24590-  return $f;
24591-}
24592-
24593-# Make address relative to section and clean up for display
24594-sub UnparseAddress {
24595-  my ($offset, $address) = @_;
24596-  $address = AddressSub($address, $offset);
24597-  $address =~ s/^0x//;
24598-  $address =~ s/^0*//;
24599-  return $address;
24600-}
24601-
24602-##### Miscellaneous #####
24603-
24604-# Find the right versions of the above object tools to use.  The
24605-# argument is the program file being analyzed, and should be an ELF
24606-# 32-bit or ELF 64-bit executable file.  The location of the tools
24607-# is determined by considering the following options in this order:
24608-#   1) --tools option, if set
24609-#   2) JEPROF_TOOLS environment variable, if set
24610-#   3) the environment
24611-sub ConfigureObjTools {
24612-  my $prog_file = shift;
24613-
24614-  # Check for the existence of $prog_file because /usr/bin/file does not
24615-  # predictably return error status in prod.
24616-  (-e $prog_file)  || error("$prog_file does not exist.\n");
24617-
24618-  my $file_type = undef;
24619-  if (-e "/usr/bin/file") {
24620-    # Follow symlinks (at least for systems where "file" supports that).
24621-    my $escaped_prog_file = ShellEscape($prog_file);
24622-    $file_type = `/usr/bin/file -L $escaped_prog_file 2>$dev_null ||
24623-                  /usr/bin/file $escaped_prog_file`;
24624-  } elsif ($^O == "MSWin32") {
24625-    $file_type = "MS Windows";
24626-  } else {
24627-    print STDERR "WARNING: Can't determine the file type of $prog_file";
24628-  }
24629-
24630-  if ($file_type =~ /64-bit/) {
24631-    # Change $address_length to 16 if the program file is ELF 64-bit.
24632-    # We can't detect this from many (most?) heap or lock contention
24633-    # profiles, since the actual addresses referenced are generally in low
24634-    # memory even for 64-bit programs.
24635-    $address_length = 16;
24636-  }
24637-
24638-  if ($file_type =~ /MS Windows/) {
24639-    # For windows, we provide a version of nm and addr2line as part of
24640-    # the opensource release, which is capable of parsing
24641-    # Windows-style PDB executables.  It should live in the path, or
24642-    # in the same directory as jeprof.
24643-    $obj_tool_map{"nm_pdb"} = "nm-pdb";
24644-    $obj_tool_map{"addr2line_pdb"} = "addr2line-pdb";
24645-  }
24646-
24647-  if ($file_type =~ /Mach-O/) {
24648-    # OS X uses otool to examine Mach-O files, rather than objdump.
24649-    $obj_tool_map{"otool"} = "otool";
24650-    $obj_tool_map{"addr2line"} = "false";  # no addr2line
24651-    $obj_tool_map{"objdump"} = "false";  # no objdump
24652-  }
24653-
24654-  # Go fill in %obj_tool_map with the pathnames to use:
24655-  foreach my $tool (keys %obj_tool_map) {
24656-    $obj_tool_map{$tool} = ConfigureTool($obj_tool_map{$tool});
24657-  }
24658-}
24659-
24660-# Returns the path of a caller-specified object tool.  If --tools or
24661-# JEPROF_TOOLS are specified, then returns the full path to the tool
24662-# with that prefix.  Otherwise, returns the path unmodified (which
24663-# means we will look for it on PATH).
24664-sub ConfigureTool {
24665-  my $tool = shift;
24666-  my $path;
24667-
24668-  # --tools (or $JEPROF_TOOLS) is a comma separated list, where each
24669-  # item is either a) a pathname prefix, or b) a map of the form
24670-  # <tool>:<path>.  First we look for an entry of type (b) for our
24671-  # tool.  If one is found, we use it.  Otherwise, we consider all the
24672-  # pathname prefixes in turn, until one yields an existing file.  If
24673-  # none does, we use a default path.
24674-  my $tools = $main::opt_tools || $ENV{"JEPROF_TOOLS"} || "";
24675-  if ($tools =~ m/(,|^)\Q$tool\E:([^,]*)/) {
24676-    $path = $2;
24677-    # TODO(csilvers): sanity-check that $path exists?  Hard if it's relative.
24678-  } elsif ($tools ne '') {
24679-    foreach my $prefix (split(',', $tools)) {
24680-      next if ($prefix =~ /:/);    # ignore "tool:fullpath" entries in the list
24681-      if (-x $prefix . $tool) {
24682-        $path = $prefix . $tool;
24683-        last;
24684-      }
24685-    }
24686-    if (!$path) {
24687-      error("No '$tool' found with prefix specified by " .
24688-            "--tools (or \$JEPROF_TOOLS) '$tools'\n");
24689-    }
24690-  } else {
24691-    # ... otherwise use the version that exists in the same directory as
24692-    # jeprof.  If there's nothing there, use $PATH.
24693-    $0 =~ m,[^/]*$,;     # this is everything after the last slash
24694-    my $dirname = $`;    # this is everything up to and including the last slash
24695-    if (-x "$dirname$tool") {
24696-      $path = "$dirname$tool";
24697-    } else {
24698-      $path = $tool;
24699-    }
24700-  }
24701-  if ($main::opt_debug) { print STDERR "Using '$path' for '$tool'.\n"; }
24702-  return $path;
24703-}
24704-
24705-sub ShellEscape {
24706-  my @escaped_words = ();
24707-  foreach my $word (@_) {
24708-    my $escaped_word = $word;
24709-    if ($word =~ m![^a-zA-Z0-9/.,_=-]!) {  # check for anything not in whitelist
24710-      $escaped_word =~ s/'/'\\''/;
24711-      $escaped_word = "'$escaped_word'";
24712-    }
24713-    push(@escaped_words, $escaped_word);
24714-  }
24715-  return join(" ", @escaped_words);
24716-}
24717-
24718-sub cleanup {
24719-  unlink($main::tmpfile_sym);
24720-  unlink(keys %main::tempnames);
24721-
24722-  # We leave any collected profiles in $HOME/jeprof in case the user wants
24723-  # to look at them later.  We print a message informing them of this.
24724-  if ((scalar(@main::profile_files) > 0) &&
24725-      defined($main::collected_profile)) {
24726-    if (scalar(@main::profile_files) == 1) {
24727-      print STDERR "Dynamically gathered profile is in $main::collected_profile\n";
24728-    }
24729-    print STDERR "If you want to investigate this profile further, you can do:\n";
24730-    print STDERR "\n";
24731-    print STDERR "  jeprof \\\n";
24732-    print STDERR "    $main::prog \\\n";
24733-    print STDERR "    $main::collected_profile\n";
24734-    print STDERR "\n";
24735-  }
24736-}
24737-
24738-sub sighandler {
24739-  cleanup();
24740-  exit(1);
24741-}
24742-
24743-sub error {
24744-  my $msg = shift;
24745-  print STDERR $msg;
24746-  cleanup();
24747-  exit(1);
24748-}
24749-
24750-
24751-# Run $nm_command and get all the resulting procedure boundaries whose
24752-# names match "$regexp" and returns them in a hashtable mapping from
24753-# procedure name to a two-element vector of [start address, end address]
24754-sub GetProcedureBoundariesViaNm {
24755-  my $escaped_nm_command = shift;    # shell-escaped
24756-  my $regexp = shift;
24757-
24758-  my $symbol_table = {};
24759-  open(NM, "$escaped_nm_command |") || error("$escaped_nm_command: $!\n");
24760-  my $last_start = "0";
24761-  my $routine = "";
24762-  while (<NM>) {
24763-    s/\r//g;         # turn windows-looking lines into unix-looking lines
24764-    if (m/^\s*([0-9a-f]+) (.) (..*)/) {
24765-      my $start_val = $1;
24766-      my $type = $2;
24767-      my $this_routine = $3;
24768-
24769-      # It's possible for two symbols to share the same address, if
24770-      # one is a zero-length variable (like __start_google_malloc) or
24771-      # one symbol is a weak alias to another (like __libc_malloc).
24772-      # In such cases, we want to ignore all values except for the
24773-      # actual symbol, which in nm-speak has type "T".  The logic
24774-      # below does this, though it's a bit tricky: what happens when
24775-      # we have a series of lines with the same address, is the first
24776-      # one gets queued up to be processed.  However, it won't
24777-      # *actually* be processed until later, when we read a line with
24778-      # a different address.  That means that as long as we're reading
24779-      # lines with the same address, we have a chance to replace that
24780-      # item in the queue, which we do whenever we see a 'T' entry --
24781-      # that is, a line with type 'T'.  If we never see a 'T' entry,
24782-      # we'll just go ahead and process the first entry (which never
24783-      # got touched in the queue), and ignore the others.
24784-      if ($start_val eq $last_start && $type =~ /t/i) {
24785-        # We are the 'T' symbol at this address, replace previous symbol.
24786-        $routine = $this_routine;
24787-        next;
24788-      } elsif ($start_val eq $last_start) {
24789-        # We're not the 'T' symbol at this address, so ignore us.
24790-        next;
24791-      }
24792-
24793-      if ($this_routine eq $sep_symbol) {
24794-        $sep_address = HexExtend($start_val);
24795-      }
24796-
24797-      # Tag this routine with the starting address in case the image
24798-      # has multiple occurrences of this routine.  We use a syntax
24799-      # that resembles template parameters that are automatically
24800-      # stripped out by ShortFunctionName()
24801-      $this_routine .= "<$start_val>";
24802-
24803-      if (defined($routine) && $routine =~ m/$regexp/) {
24804-        $symbol_table->{$routine} = [HexExtend($last_start),
24805-                                     HexExtend($start_val)];
24806-      }
24807-      $last_start = $start_val;
24808-      $routine = $this_routine;
24809-    } elsif (m/^Loaded image name: (.+)/) {
24810-      # The win32 nm workalike emits information about the binary it is using.
24811-      if ($main::opt_debug) { print STDERR "Using Image $1\n"; }
24812-    } elsif (m/^PDB file name: (.+)/) {
24813-      # The win32 nm workalike emits information about the pdb it is using.
24814-      if ($main::opt_debug) { print STDERR "Using PDB $1\n"; }
24815-    }
24816-  }
24817-  close(NM);
24818-  # Handle the last line in the nm output.  Unfortunately, we don't know
24819-  # how big this last symbol is, because we don't know how big the file
24820-  # is.  For now, we just give it a size of 0.
24821-  # TODO(csilvers): do better here.
24822-  if (defined($routine) && $routine =~ m/$regexp/) {
24823-    $symbol_table->{$routine} = [HexExtend($last_start),
24824-                                 HexExtend($last_start)];
24825-  }
24826-  return $symbol_table;
24827-}
24828-
24829-# Gets the procedure boundaries for all routines in "$image" whose names
24830-# match "$regexp" and returns them in a hashtable mapping from procedure
24831-# name to a two-element vector of [start address, end address].
24832-# Will return an empty map if nm is not installed or not working properly.
24833-sub GetProcedureBoundaries {
24834-  my $image = shift;
24835-  my $regexp = shift;
24836-
24837-  # If $image doesn't start with /, then put ./ in front of it.  This works
24838-  # around an obnoxious bug in our probing of nm -f behavior.
24839-  # "nm -f $image" is supposed to fail on GNU nm, but if:
24840-  #
24841-  # a. $image starts with [BbSsPp] (for example, bin/foo/bar), AND
24842-  # b. you have a.out in your current directory (a not uncommon occurrence)
24843-  #
24844-  # then "nm -f $image" succeeds because -f only looks at the first letter of
24845-  # the argument, which looks valid because it's [BbSsPp], and then since
24846-  # there's no image provided, it looks for a.out and finds it.
24847-  #
24848-  # This regex makes sure that $image starts with . or /, forcing the -f
24849-  # parsing to fail since . and / are not valid formats.
24850-  $image =~ s#^[^/]#./$&#;
24851-
24852-  # For libc libraries, the copy in /usr/lib/debug contains debugging symbols
24853-  my $debugging = DebuggingLibrary($image);
24854-  if ($debugging) {
24855-    $image = $debugging;
24856-  }
24857-
24858-  my $nm = $obj_tool_map{"nm"};
24859-  my $cppfilt = $obj_tool_map{"c++filt"};
24860-
24861-  # nm can fail for two reasons: 1) $image isn't a debug library; 2) nm
24862-  # binary doesn't support --demangle.  In addition, for OS X we need
24863-  # to use the -f flag to get 'flat' nm output (otherwise we don't sort
24864-  # properly and get incorrect results).  Unfortunately, GNU nm uses -f
24865-  # in an incompatible way.  So first we test whether our nm supports
24866-  # --demangle and -f.
24867-  my $demangle_flag = "";
24868-  my $cppfilt_flag = "";
24869-  my $to_devnull = ">$dev_null 2>&1";
24870-  if (system(ShellEscape($nm, "--demangle", $image) . $to_devnull) == 0) {
24871-    # In this mode, we do "nm --demangle <foo>"
24872-    $demangle_flag = "--demangle";
24873-    $cppfilt_flag = "";
24874-  } elsif (system(ShellEscape($cppfilt, $image) . $to_devnull) == 0) {
24875-    # In this mode, we do "nm <foo> | c++filt"
24876-    $cppfilt_flag = " | " . ShellEscape($cppfilt);
24877-  };
24878-  my $flatten_flag = "";
24879-  if (system(ShellEscape($nm, "-f", $image) . $to_devnull) == 0) {
24880-    $flatten_flag = "-f";
24881-  }
24882-
24883-  # Finally, in the case $imagie isn't a debug library, we try again with
24884-  # -D to at least get *exported* symbols.  If we can't use --demangle,
24885-  # we use c++filt instead, if it exists on this system.
24886-  my @nm_commands = (ShellEscape($nm, "-n", $flatten_flag, $demangle_flag,
24887-                                 $image) . " 2>$dev_null $cppfilt_flag",
24888-                     ShellEscape($nm, "-D", "-n", $flatten_flag, $demangle_flag,
24889-                                 $image) . " 2>$dev_null $cppfilt_flag",
24890-                     # 6nm is for Go binaries
24891-                     ShellEscape("6nm", "$image") . " 2>$dev_null | sort",
24892-                     );
24893-
24894-  # If the executable is an MS Windows PDB-format executable, we'll
24895-  # have set up obj_tool_map("nm_pdb").  In this case, we actually
24896-  # want to use both unix nm and windows-specific nm_pdb, since
24897-  # PDB-format executables can apparently include dwarf .o files.
24898-  if (exists $obj_tool_map{"nm_pdb"}) {
24899-    push(@nm_commands,
24900-         ShellEscape($obj_tool_map{"nm_pdb"}, "--demangle", $image)
24901-         . " 2>$dev_null");
24902-  }
24903-
24904-  foreach my $nm_command (@nm_commands) {
24905-    my $symbol_table = GetProcedureBoundariesViaNm($nm_command, $regexp);
24906-    return $symbol_table if (%{$symbol_table});
24907-  }
24908-  my $symbol_table = {};
24909-  return $symbol_table;
24910-}
24911-
24912-
24913-# The test vectors for AddressAdd/Sub/Inc are 8-16-nibble hex strings.
24914-# To make them more readable, we add underscores at interesting places.
24915-# This routine removes the underscores, producing the canonical representation
24916-# used by jeprof to represent addresses, particularly in the tested routines.
24917-sub CanonicalHex {
24918-  my $arg = shift;
24919-  return join '', (split '_',$arg);
24920-}
24921-
24922-
24923-# Unit test for AddressAdd:
24924-sub AddressAddUnitTest {
24925-  my $test_data_8 = shift;
24926-  my $test_data_16 = shift;
24927-  my $error_count = 0;
24928-  my $fail_count = 0;
24929-  my $pass_count = 0;
24930-  # print STDERR "AddressAddUnitTest: ", 1+$#{$test_data_8}, " tests\n";
24931-
24932-  # First a few 8-nibble addresses.  Note that this implementation uses
24933-  # plain old arithmetic, so a quick sanity check along with verifying what
24934-  # happens to overflow (we want it to wrap):
24935-  $address_length = 8;
24936-  foreach my $row (@{$test_data_8}) {
24937-    if ($main::opt_debug and $main::opt_test) { print STDERR "@{$row}\n"; }
24938-    my $sum = AddressAdd ($row->[0], $row->[1]);
24939-    if ($sum ne $row->[2]) {
24940-      printf STDERR "ERROR: %s != %s + %s = %s\n", $sum,
24941-             $row->[0], $row->[1], $row->[2];
24942-      ++$fail_count;
24943-    } else {
24944-      ++$pass_count;
24945-    }
24946-  }
24947-  printf STDERR "AddressAdd 32-bit tests: %d passes, %d failures\n",
24948-         $pass_count, $fail_count;
24949-  $error_count = $fail_count;
24950-  $fail_count = 0;
24951-  $pass_count = 0;
24952-
24953-  # Now 16-nibble addresses.
24954-  $address_length = 16;
24955-  foreach my $row (@{$test_data_16}) {
24956-    if ($main::opt_debug and $main::opt_test) { print STDERR "@{$row}\n"; }
24957-    my $sum = AddressAdd (CanonicalHex($row->[0]), CanonicalHex($row->[1]));
24958-    my $expected = join '', (split '_',$row->[2]);
24959-    if ($sum ne CanonicalHex($row->[2])) {
24960-      printf STDERR "ERROR: %s != %s + %s = %s\n", $sum,
24961-             $row->[0], $row->[1], $row->[2];
24962-      ++$fail_count;
24963-    } else {
24964-      ++$pass_count;
24965-    }
24966-  }
24967-  printf STDERR "AddressAdd 64-bit tests: %d passes, %d failures\n",
24968-         $pass_count, $fail_count;
24969-  $error_count += $fail_count;
24970-
24971-  return $error_count;
24972-}
24973-
24974-
24975-# Unit test for AddressSub:
24976-sub AddressSubUnitTest {
24977-  my $test_data_8 = shift;
24978-  my $test_data_16 = shift;
24979-  my $error_count = 0;
24980-  my $fail_count = 0;
24981-  my $pass_count = 0;
24982-  # print STDERR "AddressSubUnitTest: ", 1+$#{$test_data_8}, " tests\n";
24983-
24984-  # First a few 8-nibble addresses.  Note that this implementation uses
24985-  # plain old arithmetic, so a quick sanity check along with verifying what
24986-  # happens to overflow (we want it to wrap):
24987-  $address_length = 8;
24988-  foreach my $row (@{$test_data_8}) {
24989-    if ($main::opt_debug and $main::opt_test) { print STDERR "@{$row}\n"; }
24990-    my $sum = AddressSub ($row->[0], $row->[1]);
24991-    if ($sum ne $row->[3]) {
24992-      printf STDERR "ERROR: %s != %s - %s = %s\n", $sum,
24993-             $row->[0], $row->[1], $row->[3];
24994-      ++$fail_count;
24995-    } else {
24996-      ++$pass_count;
24997-    }
24998-  }
24999-  printf STDERR "AddressSub 32-bit tests: %d passes, %d failures\n",
25000-         $pass_count, $fail_count;
25001-  $error_count = $fail_count;
25002-  $fail_count = 0;
25003-  $pass_count = 0;
25004-
25005-  # Now 16-nibble addresses.
25006-  $address_length = 16;
25007-  foreach my $row (@{$test_data_16}) {
25008-    if ($main::opt_debug and $main::opt_test) { print STDERR "@{$row}\n"; }
25009-    my $sum = AddressSub (CanonicalHex($row->[0]), CanonicalHex($row->[1]));
25010-    if ($sum ne CanonicalHex($row->[3])) {
25011-      printf STDERR "ERROR: %s != %s - %s = %s\n", $sum,
25012-             $row->[0], $row->[1], $row->[3];
25013-      ++$fail_count;
25014-    } else {
25015-      ++$pass_count;
25016-    }
25017-  }
25018-  printf STDERR "AddressSub 64-bit tests: %d passes, %d failures\n",
25019-         $pass_count, $fail_count;
25020-  $error_count += $fail_count;
25021-
25022-  return $error_count;
25023-}
25024-
25025-
25026-# Unit test for AddressInc:
25027-sub AddressIncUnitTest {
25028-  my $test_data_8 = shift;
25029-  my $test_data_16 = shift;
25030-  my $error_count = 0;
25031-  my $fail_count = 0;
25032-  my $pass_count = 0;
25033-  # print STDERR "AddressIncUnitTest: ", 1+$#{$test_data_8}, " tests\n";
25034-
25035-  # First a few 8-nibble addresses.  Note that this implementation uses
25036-  # plain old arithmetic, so a quick sanity check along with verifying what
25037-  # happens to overflow (we want it to wrap):
25038-  $address_length = 8;
25039-  foreach my $row (@{$test_data_8}) {
25040-    if ($main::opt_debug and $main::opt_test) { print STDERR "@{$row}\n"; }
25041-    my $sum = AddressInc ($row->[0]);
25042-    if ($sum ne $row->[4]) {
25043-      printf STDERR "ERROR: %s != %s + 1 = %s\n", $sum,
25044-             $row->[0], $row->[4];
25045-      ++$fail_count;
25046-    } else {
25047-      ++$pass_count;
25048-    }
25049-  }
25050-  printf STDERR "AddressInc 32-bit tests: %d passes, %d failures\n",
25051-         $pass_count, $fail_count;
25052-  $error_count = $fail_count;
25053-  $fail_count = 0;
25054-  $pass_count = 0;
25055-
25056-  # Now 16-nibble addresses.
25057-  $address_length = 16;
25058-  foreach my $row (@{$test_data_16}) {
25059-    if ($main::opt_debug and $main::opt_test) { print STDERR "@{$row}\n"; }
25060-    my $sum = AddressInc (CanonicalHex($row->[0]));
25061-    if ($sum ne CanonicalHex($row->[4])) {
25062-      printf STDERR "ERROR: %s != %s + 1 = %s\n", $sum,
25063-             $row->[0], $row->[4];
25064-      ++$fail_count;
25065-    } else {
25066-      ++$pass_count;
25067-    }
25068-  }
25069-  printf STDERR "AddressInc 64-bit tests: %d passes, %d failures\n",
25070-         $pass_count, $fail_count;
25071-  $error_count += $fail_count;
25072-
25073-  return $error_count;
25074-}
25075-
25076-
25077-# Driver for unit tests.
25078-# Currently just the address add/subtract/increment routines for 64-bit.
25079-sub RunUnitTests {
25080-  my $error_count = 0;
25081-
25082-  # This is a list of tuples [a, b, a+b, a-b, a+1]
25083-  my $unit_test_data_8 = [
25084-    [qw(aaaaaaaa 50505050 fafafafa 5a5a5a5a aaaaaaab)],
25085-    [qw(50505050 aaaaaaaa fafafafa a5a5a5a6 50505051)],
25086-    [qw(ffffffff aaaaaaaa aaaaaaa9 55555555 00000000)],
25087-    [qw(00000001 ffffffff 00000000 00000002 00000002)],
25088-    [qw(00000001 fffffff0 fffffff1 00000011 00000002)],
25089-  ];
25090-  my $unit_test_data_16 = [
25091-    # The implementation handles data in 7-nibble chunks, so those are the
25092-    # interesting boundaries.
25093-    [qw(aaaaaaaa 50505050
25094-        00_000000f_afafafa 00_0000005_a5a5a5a 00_000000a_aaaaaab)],
25095-    [qw(50505050 aaaaaaaa
25096-        00_000000f_afafafa ff_ffffffa_5a5a5a6 00_0000005_0505051)],
25097-    [qw(ffffffff aaaaaaaa
25098-        00_000001a_aaaaaa9 00_0000005_5555555 00_0000010_0000000)],
25099-    [qw(00000001 ffffffff
25100-        00_0000010_0000000 ff_ffffff0_0000002 00_0000000_0000002)],
25101-    [qw(00000001 fffffff0
25102-        00_000000f_ffffff1 ff_ffffff0_0000011 00_0000000_0000002)],
25103-
25104-    [qw(00_a00000a_aaaaaaa 50505050
25105-        00_a00000f_afafafa 00_a000005_a5a5a5a 00_a00000a_aaaaaab)],
25106-    [qw(0f_fff0005_0505050 aaaaaaaa
25107-        0f_fff000f_afafafa 0f_ffefffa_5a5a5a6 0f_fff0005_0505051)],
25108-    [qw(00_000000f_fffffff 01_800000a_aaaaaaa
25109-        01_800001a_aaaaaa9 fe_8000005_5555555 00_0000010_0000000)],
25110-    [qw(00_0000000_0000001 ff_fffffff_fffffff
25111-        00_0000000_0000000 00_0000000_0000002 00_0000000_0000002)],
25112-    [qw(00_0000000_0000001 ff_fffffff_ffffff0
25113-        ff_fffffff_ffffff1 00_0000000_0000011 00_0000000_0000002)],
25114-  ];
25115-
25116-  $error_count += AddressAddUnitTest($unit_test_data_8, $unit_test_data_16);
25117-  $error_count += AddressSubUnitTest($unit_test_data_8, $unit_test_data_16);
25118-  $error_count += AddressIncUnitTest($unit_test_data_8, $unit_test_data_16);
25119-  if ($error_count > 0) {
25120-    print STDERR $error_count, " errors: FAILED\n";
25121-  } else {
25122-    print STDERR "PASS\n";
25123-  }
25124-  exit ($error_count);
25125-}
25126diff --git a/jemalloc/build-aux/config.guess b/jemalloc/build-aux/config.guess
25127deleted file mode 100755
25128index f772702..0000000
25129--- a/jemalloc/build-aux/config.guess
25130+++ /dev/null
25131@@ -1,1701 +0,0 @@
25132-#! /bin/sh
25133-# Attempt to guess a canonical system name.
25134-#   Copyright 1992-2021 Free Software Foundation, Inc.
25135-
25136-timestamp='2021-01-01'
25137-
25138-# This file is free software; you can redistribute it and/or modify it
25139-# under the terms of the GNU General Public License as published by
25140-# the Free Software Foundation; either version 3 of the License, or
25141-# (at your option) any later version.
25142-#
25143-# This program is distributed in the hope that it will be useful, but
25144-# WITHOUT ANY WARRANTY; without even the implied warranty of
25145-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
25146-# General Public License for more details.
25147-#
25148-# You should have received a copy of the GNU General Public License
25149-# along with this program; if not, see <https://www.gnu.org/licenses/>.
25150-#
25151-# As a special exception to the GNU General Public License, if you
25152-# distribute this file as part of a program that contains a
25153-# configuration script generated by Autoconf, you may include it under
25154-# the same distribution terms that you use for the rest of that
25155-# program.  This Exception is an additional permission under section 7
25156-# of the GNU General Public License, version 3 ("GPLv3").
25157-#
25158-# Originally written by Per Bothner; maintained since 2000 by Ben Elliston.
25159-#
25160-# You can get the latest version of this script from:
25161-# https://git.savannah.gnu.org/cgit/config.git/plain/config.guess
25162-#
25163-# Please send patches to <[email protected]>.
25164-
25165-
25166-me=$(echo "$0" | sed -e 's,.*/,,')
25167-
25168-usage="\
25169-Usage: $0 [OPTION]
25170-
25171-Output the configuration name of the system \`$me' is run on.
25172-
25173-Options:
25174-  -h, --help         print this help, then exit
25175-  -t, --time-stamp   print date of last modification, then exit
25176-  -v, --version      print version number, then exit
25177-
25178-Report bugs and patches to <[email protected]>."
25179-
25180-version="\
25181-GNU config.guess ($timestamp)
25182-
25183-Originally written by Per Bothner.
25184-Copyright 1992-2021 Free Software Foundation, Inc.
25185-
25186-This is free software; see the source for copying conditions.  There is NO
25187-warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE."
25188-
25189-help="
25190-Try \`$me --help' for more information."
25191-
25192-# Parse command line
25193-while test $# -gt 0 ; do
25194-  case $1 in
25195-    --time-stamp | --time* | -t )
25196-       echo "$timestamp" ; exit ;;
25197-    --version | -v )
25198-       echo "$version" ; exit ;;
25199-    --help | --h* | -h )
25200-       echo "$usage"; exit ;;
25201-    -- )     # Stop option processing
25202-       shift; break ;;
25203-    - )	# Use stdin as input.
25204-       break ;;
25205-    -* )
25206-       echo "$me: invalid option $1$help" >&2
25207-       exit 1 ;;
25208-    * )
25209-       break ;;
25210-  esac
25211-done
25212-
25213-if test $# != 0; then
25214-  echo "$me: too many arguments$help" >&2
25215-  exit 1
25216-fi
25217-
25218-# CC_FOR_BUILD -- compiler used by this script. Note that the use of a
25219-# compiler to aid in system detection is discouraged as it requires
25220-# temporary files to be created and, as you can see below, it is a
25221-# headache to deal with in a portable fashion.
25222-
25223-# Historically, `CC_FOR_BUILD' used to be named `HOST_CC'. We still
25224-# use `HOST_CC' if defined, but it is deprecated.
25225-
25226-# Portable tmp directory creation inspired by the Autoconf team.
25227-
25228-tmp=
25229-# shellcheck disable=SC2172
25230-trap 'test -z "$tmp" || rm -fr "$tmp"' 0 1 2 13 15
25231-
25232-set_cc_for_build() {
25233-    # prevent multiple calls if $tmp is already set
25234-    test "$tmp" && return 0
25235-    : "${TMPDIR=/tmp}"
25236-    # shellcheck disable=SC2039
25237-    { tmp=$( (umask 077 && mktemp -d "$TMPDIR/cgXXXXXX") 2>/dev/null) && test -n "$tmp" && test -d "$tmp" ; } ||
25238-	{ test -n "$RANDOM" && tmp=$TMPDIR/cg$$-$RANDOM && (umask 077 && mkdir "$tmp" 2>/dev/null) ; } ||
25239-	{ tmp=$TMPDIR/cg-$$ && (umask 077 && mkdir "$tmp" 2>/dev/null) && echo "Warning: creating insecure temp directory" >&2 ; } ||
25240-	{ echo "$me: cannot create a temporary directory in $TMPDIR" >&2 ; exit 1 ; }
25241-    dummy=$tmp/dummy
25242-    case ${CC_FOR_BUILD-},${HOST_CC-},${CC-} in
25243-	,,)    echo "int x;" > "$dummy.c"
25244-	       for driver in cc gcc c89 c99 ; do
25245-		   if ($driver -c -o "$dummy.o" "$dummy.c") >/dev/null 2>&1 ; then
25246-		       CC_FOR_BUILD="$driver"
25247-		       break
25248-		   fi
25249-	       done
25250-	       if test x"$CC_FOR_BUILD" = x ; then
25251-		   CC_FOR_BUILD=no_compiler_found
25252-	       fi
25253-	       ;;
25254-	,,*)   CC_FOR_BUILD=$CC ;;
25255-	,*,*)  CC_FOR_BUILD=$HOST_CC ;;
25256-    esac
25257-}
25258-
25259-# This is needed to find uname on a Pyramid OSx when run in the BSD universe.
25260-# ([email protected] 1994-08-24)
25261-if test -f /.attbin/uname ; then
25262-	PATH=$PATH:/.attbin ; export PATH
25263-fi
25264-
25265-UNAME_MACHINE=$( (uname -m) 2>/dev/null) || UNAME_MACHINE=unknown
25266-UNAME_RELEASE=$( (uname -r) 2>/dev/null) || UNAME_RELEASE=unknown
25267-UNAME_SYSTEM=$( (uname -s) 2>/dev/null) || UNAME_SYSTEM=unknown
25268-UNAME_VERSION=$( (uname -v) 2>/dev/null) || UNAME_VERSION=unknown
25269-
25270-case "$UNAME_SYSTEM" in
25271-Linux|GNU|GNU/*)
25272-	LIBC=unknown
25273-
25274-	set_cc_for_build
25275-	cat <<-EOF > "$dummy.c"
25276-	#include <features.h>
25277-	#if defined(__UCLIBC__)
25278-	LIBC=uclibc
25279-	#elif defined(__dietlibc__)
25280-	LIBC=dietlibc
25281-	#elif defined(__GLIBC__)
25282-	LIBC=gnu
25283-	#else
25284-	#include <stdarg.h>
25285-	/* First heuristic to detect musl libc.  */
25286-	#ifdef __DEFINED_va_list
25287-	LIBC=musl
25288-	#endif
25289-	#endif
25290-	EOF
25291-	eval "$($CC_FOR_BUILD -E "$dummy.c" 2>/dev/null | grep '^LIBC' | sed 's, ,,g')"
25292-
25293-	# Second heuristic to detect musl libc.
25294-	if [ "$LIBC" = unknown ] &&
25295-	   command -v ldd >/dev/null &&
25296-	   ldd --version 2>&1 | grep -q ^musl; then
25297-		LIBC=musl
25298-	fi
25299-
25300-	# If the system lacks a compiler, then just pick glibc.
25301-	# We could probably try harder.
25302-	if [ "$LIBC" = unknown ]; then
25303-		LIBC=gnu
25304-	fi
25305-	;;
25306-esac
25307-
25308-# Note: order is significant - the case branches are not exclusive.
25309-
25310-case "$UNAME_MACHINE:$UNAME_SYSTEM:$UNAME_RELEASE:$UNAME_VERSION" in
25311-    *:NetBSD:*:*)
25312-	# NetBSD (nbsd) targets should (where applicable) match one or
25313-	# more of the tuples: *-*-netbsdelf*, *-*-netbsdaout*,
25314-	# *-*-netbsdecoff* and *-*-netbsd*.  For targets that recently
25315-	# switched to ELF, *-*-netbsd* would select the old
25316-	# object file format.  This provides both forward
25317-	# compatibility and a consistent mechanism for selecting the
25318-	# object file format.
25319-	#
25320-	# Note: NetBSD doesn't particularly care about the vendor
25321-	# portion of the name.  We always set it to "unknown".
25322-	sysctl="sysctl -n hw.machine_arch"
25323-	UNAME_MACHINE_ARCH=$( (uname -p 2>/dev/null || \
25324-	    "/sbin/$sysctl" 2>/dev/null || \
25325-	    "/usr/sbin/$sysctl" 2>/dev/null || \
25326-	    echo unknown))
25327-	case "$UNAME_MACHINE_ARCH" in
25328-	    aarch64eb) machine=aarch64_be-unknown ;;
25329-	    armeb) machine=armeb-unknown ;;
25330-	    arm*) machine=arm-unknown ;;
25331-	    sh3el) machine=shl-unknown ;;
25332-	    sh3eb) machine=sh-unknown ;;
25333-	    sh5el) machine=sh5le-unknown ;;
25334-	    earmv*)
25335-		arch=$(echo "$UNAME_MACHINE_ARCH" | sed -e 's,^e\(armv[0-9]\).*$,\1,')
25336-		endian=$(echo "$UNAME_MACHINE_ARCH" | sed -ne 's,^.*\(eb\)$,\1,p')
25337-		machine="${arch}${endian}"-unknown
25338-		;;
25339-	    *) machine="$UNAME_MACHINE_ARCH"-unknown ;;
25340-	esac
25341-	# The Operating System including object format, if it has switched
25342-	# to ELF recently (or will in the future) and ABI.
25343-	case "$UNAME_MACHINE_ARCH" in
25344-	    earm*)
25345-		os=netbsdelf
25346-		;;
25347-	    arm*|i386|m68k|ns32k|sh3*|sparc|vax)
25348-		set_cc_for_build
25349-		if echo __ELF__ | $CC_FOR_BUILD -E - 2>/dev/null \
25350-			| grep -q __ELF__
25351-		then
25352-		    # Once all utilities can be ECOFF (netbsdecoff) or a.out (netbsdaout).
25353-		    # Return netbsd for either.  FIX?
25354-		    os=netbsd
25355-		else
25356-		    os=netbsdelf
25357-		fi
25358-		;;
25359-	    *)
25360-		os=netbsd
25361-		;;
25362-	esac
25363-	# Determine ABI tags.
25364-	case "$UNAME_MACHINE_ARCH" in
25365-	    earm*)
25366-		expr='s/^earmv[0-9]/-eabi/;s/eb$//'
25367-		abi=$(echo "$UNAME_MACHINE_ARCH" | sed -e "$expr")
25368-		;;
25369-	esac
25370-	# The OS release
25371-	# Debian GNU/NetBSD machines have a different userland, and
25372-	# thus, need a distinct triplet. However, they do not need
25373-	# kernel version information, so it can be replaced with a
25374-	# suitable tag, in the style of linux-gnu.
25375-	case "$UNAME_VERSION" in
25376-	    Debian*)
25377-		release='-gnu'
25378-		;;
25379-	    *)
25380-		release=$(echo "$UNAME_RELEASE" | sed -e 's/[-_].*//' | cut -d. -f1,2)
25381-		;;
25382-	esac
25383-	# Since CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM:
25384-	# contains redundant information, the shorter form:
25385-	# CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM is used.
25386-	echo "$machine-${os}${release}${abi-}"
25387-	exit ;;
25388-    *:Bitrig:*:*)
25389-	UNAME_MACHINE_ARCH=$(arch | sed 's/Bitrig.//')
25390-	echo "$UNAME_MACHINE_ARCH"-unknown-bitrig"$UNAME_RELEASE"
25391-	exit ;;
25392-    *:OpenBSD:*:*)
25393-	UNAME_MACHINE_ARCH=$(arch | sed 's/OpenBSD.//')
25394-	echo "$UNAME_MACHINE_ARCH"-unknown-openbsd"$UNAME_RELEASE"
25395-	exit ;;
25396-    *:LibertyBSD:*:*)
25397-	UNAME_MACHINE_ARCH=$(arch | sed 's/^.*BSD\.//')
25398-	echo "$UNAME_MACHINE_ARCH"-unknown-libertybsd"$UNAME_RELEASE"
25399-	exit ;;
25400-    *:MidnightBSD:*:*)
25401-	echo "$UNAME_MACHINE"-unknown-midnightbsd"$UNAME_RELEASE"
25402-	exit ;;
25403-    *:ekkoBSD:*:*)
25404-	echo "$UNAME_MACHINE"-unknown-ekkobsd"$UNAME_RELEASE"
25405-	exit ;;
25406-    *:SolidBSD:*:*)
25407-	echo "$UNAME_MACHINE"-unknown-solidbsd"$UNAME_RELEASE"
25408-	exit ;;
25409-    *:OS108:*:*)
25410-	echo "$UNAME_MACHINE"-unknown-os108_"$UNAME_RELEASE"
25411-	exit ;;
25412-    macppc:MirBSD:*:*)
25413-	echo powerpc-unknown-mirbsd"$UNAME_RELEASE"
25414-	exit ;;
25415-    *:MirBSD:*:*)
25416-	echo "$UNAME_MACHINE"-unknown-mirbsd"$UNAME_RELEASE"
25417-	exit ;;
25418-    *:Sortix:*:*)
25419-	echo "$UNAME_MACHINE"-unknown-sortix
25420-	exit ;;
25421-    *:Twizzler:*:*)
25422-	echo "$UNAME_MACHINE"-unknown-twizzler
25423-	exit ;;
25424-    *:Redox:*:*)
25425-	echo "$UNAME_MACHINE"-unknown-redox
25426-	exit ;;
25427-    mips:OSF1:*.*)
25428-	echo mips-dec-osf1
25429-	exit ;;
25430-    alpha:OSF1:*:*)
25431-	case $UNAME_RELEASE in
25432-	*4.0)
25433-		UNAME_RELEASE=$(/usr/sbin/sizer -v | awk '{print $3}')
25434-		;;
25435-	*5.*)
25436-		UNAME_RELEASE=$(/usr/sbin/sizer -v | awk '{print $4}')
25437-		;;
25438-	esac
25439-	# According to Compaq, /usr/sbin/psrinfo has been available on
25440-	# OSF/1 and Tru64 systems produced since 1995.  I hope that
25441-	# covers most systems running today.  This code pipes the CPU
25442-	# types through head -n 1, so we only detect the type of CPU 0.
25443-	ALPHA_CPU_TYPE=$(/usr/sbin/psrinfo -v | sed -n -e 's/^  The alpha \(.*\) processor.*$/\1/p' | head -n 1)
25444-	case "$ALPHA_CPU_TYPE" in
25445-	    "EV4 (21064)")
25446-		UNAME_MACHINE=alpha ;;
25447-	    "EV4.5 (21064)")
25448-		UNAME_MACHINE=alpha ;;
25449-	    "LCA4 (21066/21068)")
25450-		UNAME_MACHINE=alpha ;;
25451-	    "EV5 (21164)")
25452-		UNAME_MACHINE=alphaev5 ;;
25453-	    "EV5.6 (21164A)")
25454-		UNAME_MACHINE=alphaev56 ;;
25455-	    "EV5.6 (21164PC)")
25456-		UNAME_MACHINE=alphapca56 ;;
25457-	    "EV5.7 (21164PC)")
25458-		UNAME_MACHINE=alphapca57 ;;
25459-	    "EV6 (21264)")
25460-		UNAME_MACHINE=alphaev6 ;;
25461-	    "EV6.7 (21264A)")
25462-		UNAME_MACHINE=alphaev67 ;;
25463-	    "EV6.8CB (21264C)")
25464-		UNAME_MACHINE=alphaev68 ;;
25465-	    "EV6.8AL (21264B)")
25466-		UNAME_MACHINE=alphaev68 ;;
25467-	    "EV6.8CX (21264D)")
25468-		UNAME_MACHINE=alphaev68 ;;
25469-	    "EV6.9A (21264/EV69A)")
25470-		UNAME_MACHINE=alphaev69 ;;
25471-	    "EV7 (21364)")
25472-		UNAME_MACHINE=alphaev7 ;;
25473-	    "EV7.9 (21364A)")
25474-		UNAME_MACHINE=alphaev79 ;;
25475-	esac
25476-	# A Pn.n version is a patched version.
25477-	# A Vn.n version is a released version.
25478-	# A Tn.n version is a released field test version.
25479-	# A Xn.n version is an unreleased experimental baselevel.
25480-	# 1.2 uses "1.2" for uname -r.
25481-	echo "$UNAME_MACHINE"-dec-osf"$(echo "$UNAME_RELEASE" | sed -e 's/^[PVTX]//' | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz)"
25482-	# Reset EXIT trap before exiting to avoid spurious non-zero exit code.
25483-	exitcode=$?
25484-	trap '' 0
25485-	exit $exitcode ;;
25486-    Amiga*:UNIX_System_V:4.0:*)
25487-	echo m68k-unknown-sysv4
25488-	exit ;;
25489-    *:[Aa]miga[Oo][Ss]:*:*)
25490-	echo "$UNAME_MACHINE"-unknown-amigaos
25491-	exit ;;
25492-    *:[Mm]orph[Oo][Ss]:*:*)
25493-	echo "$UNAME_MACHINE"-unknown-morphos
25494-	exit ;;
25495-    *:OS/390:*:*)
25496-	echo i370-ibm-openedition
25497-	exit ;;
25498-    *:z/VM:*:*)
25499-	echo s390-ibm-zvmoe
25500-	exit ;;
25501-    *:OS400:*:*)
25502-	echo powerpc-ibm-os400
25503-	exit ;;
25504-    arm:RISC*:1.[012]*:*|arm:riscix:1.[012]*:*)
25505-	echo arm-acorn-riscix"$UNAME_RELEASE"
25506-	exit ;;
25507-    arm*:riscos:*:*|arm*:RISCOS:*:*)
25508-	echo arm-unknown-riscos
25509-	exit ;;
25510-    SR2?01:HI-UX/MPP:*:* | SR8000:HI-UX/MPP:*:*)
25511-	echo hppa1.1-hitachi-hiuxmpp
25512-	exit ;;
25513-    Pyramid*:OSx*:*:* | MIS*:OSx*:*:* | MIS*:SMP_DC-OSx*:*:*)
25514-	# [email protected] (Earle F. Ake) contributed MIS and NILE.
25515-	if test "$( (/bin/universe) 2>/dev/null)" = att ; then
25516-		echo pyramid-pyramid-sysv3
25517-	else
25518-		echo pyramid-pyramid-bsd
25519-	fi
25520-	exit ;;
25521-    NILE*:*:*:dcosx)
25522-	echo pyramid-pyramid-svr4
25523-	exit ;;
25524-    DRS?6000:unix:4.0:6*)
25525-	echo sparc-icl-nx6
25526-	exit ;;
25527-    DRS?6000:UNIX_SV:4.2*:7* | DRS?6000:isis:4.2*:7*)
25528-	case $(/usr/bin/uname -p) in
25529-	    sparc) echo sparc-icl-nx7; exit ;;
25530-	esac ;;
25531-    s390x:SunOS:*:*)
25532-	echo "$UNAME_MACHINE"-ibm-solaris2"$(echo "$UNAME_RELEASE" | sed -e 's/[^.]*//')"
25533-	exit ;;
25534-    sun4H:SunOS:5.*:*)
25535-	echo sparc-hal-solaris2"$(echo "$UNAME_RELEASE"|sed -e 's/[^.]*//')"
25536-	exit ;;
25537-    sun4*:SunOS:5.*:* | tadpole*:SunOS:5.*:*)
25538-	echo sparc-sun-solaris2"$(echo "$UNAME_RELEASE" | sed -e 's/[^.]*//')"
25539-	exit ;;
25540-    i86pc:AuroraUX:5.*:* | i86xen:AuroraUX:5.*:*)
25541-	echo i386-pc-auroraux"$UNAME_RELEASE"
25542-	exit ;;
25543-    i86pc:SunOS:5.*:* | i86xen:SunOS:5.*:*)
25544-	set_cc_for_build
25545-	SUN_ARCH=i386
25546-	# If there is a compiler, see if it is configured for 64-bit objects.
25547-	# Note that the Sun cc does not turn __LP64__ into 1 like gcc does.
25548-	# This test works for both compilers.
25549-	if test "$CC_FOR_BUILD" != no_compiler_found; then
25550-	    if (echo '#ifdef __amd64'; echo IS_64BIT_ARCH; echo '#endif') | \
25551-		(CCOPTS="" $CC_FOR_BUILD -E - 2>/dev/null) | \
25552-		grep IS_64BIT_ARCH >/dev/null
25553-	    then
25554-		SUN_ARCH=x86_64
25555-	    fi
25556-	fi
25557-	echo "$SUN_ARCH"-pc-solaris2"$(echo "$UNAME_RELEASE"|sed -e 's/[^.]*//')"
25558-	exit ;;
25559-    sun4*:SunOS:6*:*)
25560-	# According to config.sub, this is the proper way to canonicalize
25561-	# SunOS6.  Hard to guess exactly what SunOS6 will be like, but
25562-	# it's likely to be more like Solaris than SunOS4.
25563-	echo sparc-sun-solaris3"$(echo "$UNAME_RELEASE"|sed -e 's/[^.]*//')"
25564-	exit ;;
25565-    sun4*:SunOS:*:*)
25566-	case "$(/usr/bin/arch -k)" in
25567-	    Series*|S4*)
25568-		UNAME_RELEASE=$(uname -v)
25569-		;;
25570-	esac
25571-	# Japanese Language versions have a version number like `4.1.3-JL'.
25572-	echo sparc-sun-sunos"$(echo "$UNAME_RELEASE"|sed -e 's/-/_/')"
25573-	exit ;;
25574-    sun3*:SunOS:*:*)
25575-	echo m68k-sun-sunos"$UNAME_RELEASE"
25576-	exit ;;
25577-    sun*:*:4.2BSD:*)
25578-	UNAME_RELEASE=$( (sed 1q /etc/motd | awk '{print substr($5,1,3)}') 2>/dev/null)
25579-	test "x$UNAME_RELEASE" = x && UNAME_RELEASE=3
25580-	case "$(/bin/arch)" in
25581-	    sun3)
25582-		echo m68k-sun-sunos"$UNAME_RELEASE"
25583-		;;
25584-	    sun4)
25585-		echo sparc-sun-sunos"$UNAME_RELEASE"
25586-		;;
25587-	esac
25588-	exit ;;
25589-    aushp:SunOS:*:*)
25590-	echo sparc-auspex-sunos"$UNAME_RELEASE"
25591-	exit ;;
25592-    # The situation for MiNT is a little confusing.  The machine name
25593-    # can be virtually everything (everything which is not
25594-    # "atarist" or "atariste" at least should have a processor
25595-    # > m68000).  The system name ranges from "MiNT" over "FreeMiNT"
25596-    # to the lowercase version "mint" (or "freemint").  Finally
25597-    # the system name "TOS" denotes a system which is actually not
25598-    # MiNT.  But MiNT is downward compatible to TOS, so this should
25599-    # be no problem.
25600-    atarist[e]:*MiNT:*:* | atarist[e]:*mint:*:* | atarist[e]:*TOS:*:*)
25601-	echo m68k-atari-mint"$UNAME_RELEASE"
25602-	exit ;;
25603-    atari*:*MiNT:*:* | atari*:*mint:*:* | atarist[e]:*TOS:*:*)
25604-	echo m68k-atari-mint"$UNAME_RELEASE"
25605-	exit ;;
25606-    *falcon*:*MiNT:*:* | *falcon*:*mint:*:* | *falcon*:*TOS:*:*)
25607-	echo m68k-atari-mint"$UNAME_RELEASE"
25608-	exit ;;
25609-    milan*:*MiNT:*:* | milan*:*mint:*:* | *milan*:*TOS:*:*)
25610-	echo m68k-milan-mint"$UNAME_RELEASE"
25611-	exit ;;
25612-    hades*:*MiNT:*:* | hades*:*mint:*:* | *hades*:*TOS:*:*)
25613-	echo m68k-hades-mint"$UNAME_RELEASE"
25614-	exit ;;
25615-    *:*MiNT:*:* | *:*mint:*:* | *:*TOS:*:*)
25616-	echo m68k-unknown-mint"$UNAME_RELEASE"
25617-	exit ;;
25618-    m68k:machten:*:*)
25619-	echo m68k-apple-machten"$UNAME_RELEASE"
25620-	exit ;;
25621-    powerpc:machten:*:*)
25622-	echo powerpc-apple-machten"$UNAME_RELEASE"
25623-	exit ;;
25624-    RISC*:Mach:*:*)
25625-	echo mips-dec-mach_bsd4.3
25626-	exit ;;
25627-    RISC*:ULTRIX:*:*)
25628-	echo mips-dec-ultrix"$UNAME_RELEASE"
25629-	exit ;;
25630-    VAX*:ULTRIX*:*:*)
25631-	echo vax-dec-ultrix"$UNAME_RELEASE"
25632-	exit ;;
25633-    2020:CLIX:*:* | 2430:CLIX:*:*)
25634-	echo clipper-intergraph-clix"$UNAME_RELEASE"
25635-	exit ;;
25636-    mips:*:*:UMIPS | mips:*:*:RISCos)
25637-	set_cc_for_build
25638-	sed 's/^	//' << EOF > "$dummy.c"
25639-#ifdef __cplusplus
25640-#include <stdio.h>  /* for printf() prototype */
25641-	int main (int argc, char *argv[]) {
25642-#else
25643-	int main (argc, argv) int argc; char *argv[]; {
25644-#endif
25645-	#if defined (host_mips) && defined (MIPSEB)
25646-	#if defined (SYSTYPE_SYSV)
25647-	  printf ("mips-mips-riscos%ssysv\\n", argv[1]); exit (0);
25648-	#endif
25649-	#if defined (SYSTYPE_SVR4)
25650-	  printf ("mips-mips-riscos%ssvr4\\n", argv[1]); exit (0);
25651-	#endif
25652-	#if defined (SYSTYPE_BSD43) || defined(SYSTYPE_BSD)
25653-	  printf ("mips-mips-riscos%sbsd\\n", argv[1]); exit (0);
25654-	#endif
25655-	#endif
25656-	  exit (-1);
25657-	}
25658-EOF
25659-	$CC_FOR_BUILD -o "$dummy" "$dummy.c" &&
25660-	  dummyarg=$(echo "$UNAME_RELEASE" | sed -n 's/\([0-9]*\).*/\1/p') &&
25661-	  SYSTEM_NAME=$("$dummy" "$dummyarg") &&
25662-	    { echo "$SYSTEM_NAME"; exit; }
25663-	echo mips-mips-riscos"$UNAME_RELEASE"
25664-	exit ;;
25665-    Motorola:PowerMAX_OS:*:*)
25666-	echo powerpc-motorola-powermax
25667-	exit ;;
25668-    Motorola:*:4.3:PL8-*)
25669-	echo powerpc-harris-powermax
25670-	exit ;;
25671-    Night_Hawk:*:*:PowerMAX_OS | Synergy:PowerMAX_OS:*:*)
25672-	echo powerpc-harris-powermax
25673-	exit ;;
25674-    Night_Hawk:Power_UNIX:*:*)
25675-	echo powerpc-harris-powerunix
25676-	exit ;;
25677-    m88k:CX/UX:7*:*)
25678-	echo m88k-harris-cxux7
25679-	exit ;;
25680-    m88k:*:4*:R4*)
25681-	echo m88k-motorola-sysv4
25682-	exit ;;
25683-    m88k:*:3*:R3*)
25684-	echo m88k-motorola-sysv3
25685-	exit ;;
25686-    AViiON:dgux:*:*)
25687-	# DG/UX returns AViiON for all architectures
25688-	UNAME_PROCESSOR=$(/usr/bin/uname -p)
25689-	if test "$UNAME_PROCESSOR" = mc88100 || test "$UNAME_PROCESSOR" = mc88110
25690-	then
25691-	    if test "$TARGET_BINARY_INTERFACE"x = m88kdguxelfx || \
25692-	       test "$TARGET_BINARY_INTERFACE"x = x
25693-	    then
25694-		echo m88k-dg-dgux"$UNAME_RELEASE"
25695-	    else
25696-		echo m88k-dg-dguxbcs"$UNAME_RELEASE"
25697-	    fi
25698-	else
25699-	    echo i586-dg-dgux"$UNAME_RELEASE"
25700-	fi
25701-	exit ;;
25702-    M88*:DolphinOS:*:*)	# DolphinOS (SVR3)
25703-	echo m88k-dolphin-sysv3
25704-	exit ;;
25705-    M88*:*:R3*:*)
25706-	# Delta 88k system running SVR3
25707-	echo m88k-motorola-sysv3
25708-	exit ;;
25709-    XD88*:*:*:*) # Tektronix XD88 system running UTekV (SVR3)
25710-	echo m88k-tektronix-sysv3
25711-	exit ;;
25712-    Tek43[0-9][0-9]:UTek:*:*) # Tektronix 4300 system running UTek (BSD)
25713-	echo m68k-tektronix-bsd
25714-	exit ;;
25715-    *:IRIX*:*:*)
25716-	echo mips-sgi-irix"$(echo "$UNAME_RELEASE"|sed -e 's/-/_/g')"
25717-	exit ;;
25718-    ????????:AIX?:[12].1:2)   # AIX 2.2.1 or AIX 2.1.1 is RT/PC AIX.
25719-	echo romp-ibm-aix     # uname -m gives an 8 hex-code CPU id
25720-	exit ;;               # Note that: echo "'$(uname -s)'" gives 'AIX '
25721-    i*86:AIX:*:*)
25722-	echo i386-ibm-aix
25723-	exit ;;
25724-    ia64:AIX:*:*)
25725-	if test -x /usr/bin/oslevel ; then
25726-		IBM_REV=$(/usr/bin/oslevel)
25727-	else
25728-		IBM_REV="$UNAME_VERSION.$UNAME_RELEASE"
25729-	fi
25730-	echo "$UNAME_MACHINE"-ibm-aix"$IBM_REV"
25731-	exit ;;
25732-    *:AIX:2:3)
25733-	if grep bos325 /usr/include/stdio.h >/dev/null 2>&1; then
25734-		set_cc_for_build
25735-		sed 's/^		//' << EOF > "$dummy.c"
25736-		#include <sys/systemcfg.h>
25737-
25738-		main()
25739-			{
25740-			if (!__power_pc())
25741-				exit(1);
25742-			puts("powerpc-ibm-aix3.2.5");
25743-			exit(0);
25744-			}
25745-EOF
25746-		if $CC_FOR_BUILD -o "$dummy" "$dummy.c" && SYSTEM_NAME=$("$dummy")
25747-		then
25748-			echo "$SYSTEM_NAME"
25749-		else
25750-			echo rs6000-ibm-aix3.2.5
25751-		fi
25752-	elif grep bos324 /usr/include/stdio.h >/dev/null 2>&1; then
25753-		echo rs6000-ibm-aix3.2.4
25754-	else
25755-		echo rs6000-ibm-aix3.2
25756-	fi
25757-	exit ;;
25758-    *:AIX:*:[4567])
25759-	IBM_CPU_ID=$(/usr/sbin/lsdev -C -c processor -S available | sed 1q | awk '{ print $1 }')
25760-	if /usr/sbin/lsattr -El "$IBM_CPU_ID" | grep ' POWER' >/dev/null 2>&1; then
25761-		IBM_ARCH=rs6000
25762-	else
25763-		IBM_ARCH=powerpc
25764-	fi
25765-	if test -x /usr/bin/lslpp ; then
25766-		IBM_REV=$(/usr/bin/lslpp -Lqc bos.rte.libc |
25767-			   awk -F: '{ print $3 }' | sed s/[0-9]*$/0/)
25768-	else
25769-		IBM_REV="$UNAME_VERSION.$UNAME_RELEASE"
25770-	fi
25771-	echo "$IBM_ARCH"-ibm-aix"$IBM_REV"
25772-	exit ;;
25773-    *:AIX:*:*)
25774-	echo rs6000-ibm-aix
25775-	exit ;;
25776-    ibmrt:4.4BSD:*|romp-ibm:4.4BSD:*)
25777-	echo romp-ibm-bsd4.4
25778-	exit ;;
25779-    ibmrt:*BSD:*|romp-ibm:BSD:*)            # covers RT/PC BSD and
25780-	echo romp-ibm-bsd"$UNAME_RELEASE"   # 4.3 with uname added to
25781-	exit ;;                             # report: romp-ibm BSD 4.3
25782-    *:BOSX:*:*)
25783-	echo rs6000-bull-bosx
25784-	exit ;;
25785-    DPX/2?00:B.O.S.:*:*)
25786-	echo m68k-bull-sysv3
25787-	exit ;;
25788-    9000/[34]??:4.3bsd:1.*:*)
25789-	echo m68k-hp-bsd
25790-	exit ;;
25791-    hp300:4.4BSD:*:* | 9000/[34]??:4.3bsd:2.*:*)
25792-	echo m68k-hp-bsd4.4
25793-	exit ;;
25794-    9000/[34678]??:HP-UX:*:*)
25795-	HPUX_REV=$(echo "$UNAME_RELEASE"|sed -e 's/[^.]*.[0B]*//')
25796-	case "$UNAME_MACHINE" in
25797-	    9000/31?)            HP_ARCH=m68000 ;;
25798-	    9000/[34]??)         HP_ARCH=m68k ;;
25799-	    9000/[678][0-9][0-9])
25800-		if test -x /usr/bin/getconf; then
25801-		    sc_cpu_version=$(/usr/bin/getconf SC_CPU_VERSION 2>/dev/null)
25802-		    sc_kernel_bits=$(/usr/bin/getconf SC_KERNEL_BITS 2>/dev/null)
25803-		    case "$sc_cpu_version" in
25804-		      523) HP_ARCH=hppa1.0 ;; # CPU_PA_RISC1_0
25805-		      528) HP_ARCH=hppa1.1 ;; # CPU_PA_RISC1_1
25806-		      532)                      # CPU_PA_RISC2_0
25807-			case "$sc_kernel_bits" in
25808-			  32) HP_ARCH=hppa2.0n ;;
25809-			  64) HP_ARCH=hppa2.0w ;;
25810-			  '') HP_ARCH=hppa2.0 ;;   # HP-UX 10.20
25811-			esac ;;
25812-		    esac
25813-		fi
25814-		if test "$HP_ARCH" = ""; then
25815-		    set_cc_for_build
25816-		    sed 's/^		//' << EOF > "$dummy.c"
25817-
25818-		#define _HPUX_SOURCE
25819-		#include <stdlib.h>
25820-		#include <unistd.h>
25821-
25822-		int main ()
25823-		{
25824-		#if defined(_SC_KERNEL_BITS)
25825-		    long bits = sysconf(_SC_KERNEL_BITS);
25826-		#endif
25827-		    long cpu  = sysconf (_SC_CPU_VERSION);
25828-
25829-		    switch (cpu)
25830-			{
25831-			case CPU_PA_RISC1_0: puts ("hppa1.0"); break;
25832-			case CPU_PA_RISC1_1: puts ("hppa1.1"); break;
25833-			case CPU_PA_RISC2_0:
25834-		#if defined(_SC_KERNEL_BITS)
25835-			    switch (bits)
25836-				{
25837-				case 64: puts ("hppa2.0w"); break;
25838-				case 32: puts ("hppa2.0n"); break;
25839-				default: puts ("hppa2.0"); break;
25840-				} break;
25841-		#else  /* !defined(_SC_KERNEL_BITS) */
25842-			    puts ("hppa2.0"); break;
25843-		#endif
25844-			default: puts ("hppa1.0"); break;
25845-			}
25846-		    exit (0);
25847-		}
25848-EOF
25849-		    (CCOPTS="" $CC_FOR_BUILD -o "$dummy" "$dummy.c" 2>/dev/null) && HP_ARCH=$("$dummy")
25850-		    test -z "$HP_ARCH" && HP_ARCH=hppa
25851-		fi ;;
25852-	esac
25853-	if test "$HP_ARCH" = hppa2.0w
25854-	then
25855-	    set_cc_for_build
25856-
25857-	    # hppa2.0w-hp-hpux* has a 64-bit kernel and a compiler generating
25858-	    # 32-bit code.  hppa64-hp-hpux* has the same kernel and a compiler
25859-	    # generating 64-bit code.  GNU and HP use different nomenclature:
25860-	    #
25861-	    # $ CC_FOR_BUILD=cc ./config.guess
25862-	    # => hppa2.0w-hp-hpux11.23
25863-	    # $ CC_FOR_BUILD="cc +DA2.0w" ./config.guess
25864-	    # => hppa64-hp-hpux11.23
25865-
25866-	    if echo __LP64__ | (CCOPTS="" $CC_FOR_BUILD -E - 2>/dev/null) |
25867-		grep -q __LP64__
25868-	    then
25869-		HP_ARCH=hppa2.0w
25870-	    else
25871-		HP_ARCH=hppa64
25872-	    fi
25873-	fi
25874-	echo "$HP_ARCH"-hp-hpux"$HPUX_REV"
25875-	exit ;;
25876-    ia64:HP-UX:*:*)
25877-	HPUX_REV=$(echo "$UNAME_RELEASE"|sed -e 's/[^.]*.[0B]*//')
25878-	echo ia64-hp-hpux"$HPUX_REV"
25879-	exit ;;
25880-    3050*:HI-UX:*:*)
25881-	set_cc_for_build
25882-	sed 's/^	//' << EOF > "$dummy.c"
25883-	#include <unistd.h>
25884-	int
25885-	main ()
25886-	{
25887-	  long cpu = sysconf (_SC_CPU_VERSION);
25888-	  /* The order matters, because CPU_IS_HP_MC68K erroneously returns
25889-	     true for CPU_PA_RISC1_0.  CPU_IS_PA_RISC returns correct
25890-	     results, however.  */
25891-	  if (CPU_IS_PA_RISC (cpu))
25892-	    {
25893-	      switch (cpu)
25894-		{
25895-		  case CPU_PA_RISC1_0: puts ("hppa1.0-hitachi-hiuxwe2"); break;
25896-		  case CPU_PA_RISC1_1: puts ("hppa1.1-hitachi-hiuxwe2"); break;
25897-		  case CPU_PA_RISC2_0: puts ("hppa2.0-hitachi-hiuxwe2"); break;
25898-		  default: puts ("hppa-hitachi-hiuxwe2"); break;
25899-		}
25900-	    }
25901-	  else if (CPU_IS_HP_MC68K (cpu))
25902-	    puts ("m68k-hitachi-hiuxwe2");
25903-	  else puts ("unknown-hitachi-hiuxwe2");
25904-	  exit (0);
25905-	}
25906-EOF
25907-	$CC_FOR_BUILD -o "$dummy" "$dummy.c" && SYSTEM_NAME=$("$dummy") &&
25908-		{ echo "$SYSTEM_NAME"; exit; }
25909-	echo unknown-hitachi-hiuxwe2
25910-	exit ;;
25911-    9000/7??:4.3bsd:*:* | 9000/8?[79]:4.3bsd:*:*)
25912-	echo hppa1.1-hp-bsd
25913-	exit ;;
25914-    9000/8??:4.3bsd:*:*)
25915-	echo hppa1.0-hp-bsd
25916-	exit ;;
25917-    *9??*:MPE/iX:*:* | *3000*:MPE/iX:*:*)
25918-	echo hppa1.0-hp-mpeix
25919-	exit ;;
25920-    hp7??:OSF1:*:* | hp8?[79]:OSF1:*:*)
25921-	echo hppa1.1-hp-osf
25922-	exit ;;
25923-    hp8??:OSF1:*:*)
25924-	echo hppa1.0-hp-osf
25925-	exit ;;
25926-    i*86:OSF1:*:*)
25927-	if test -x /usr/sbin/sysversion ; then
25928-	    echo "$UNAME_MACHINE"-unknown-osf1mk
25929-	else
25930-	    echo "$UNAME_MACHINE"-unknown-osf1
25931-	fi
25932-	exit ;;
25933-    parisc*:Lites*:*:*)
25934-	echo hppa1.1-hp-lites
25935-	exit ;;
25936-    C1*:ConvexOS:*:* | convex:ConvexOS:C1*:*)
25937-	echo c1-convex-bsd
25938-	exit ;;
25939-    C2*:ConvexOS:*:* | convex:ConvexOS:C2*:*)
25940-	if getsysinfo -f scalar_acc
25941-	then echo c32-convex-bsd
25942-	else echo c2-convex-bsd
25943-	fi
25944-	exit ;;
25945-    C34*:ConvexOS:*:* | convex:ConvexOS:C34*:*)
25946-	echo c34-convex-bsd
25947-	exit ;;
25948-    C38*:ConvexOS:*:* | convex:ConvexOS:C38*:*)
25949-	echo c38-convex-bsd
25950-	exit ;;
25951-    C4*:ConvexOS:*:* | convex:ConvexOS:C4*:*)
25952-	echo c4-convex-bsd
25953-	exit ;;
25954-    CRAY*Y-MP:*:*:*)
25955-	echo ymp-cray-unicos"$UNAME_RELEASE" | sed -e 's/\.[^.]*$/.X/'
25956-	exit ;;
25957-    CRAY*[A-Z]90:*:*:*)
25958-	echo "$UNAME_MACHINE"-cray-unicos"$UNAME_RELEASE" \
25959-	| sed -e 's/CRAY.*\([A-Z]90\)/\1/' \
25960-	      -e y/ABCDEFGHIJKLMNOPQRSTUVWXYZ/abcdefghijklmnopqrstuvwxyz/ \
25961-	      -e 's/\.[^.]*$/.X/'
25962-	exit ;;
25963-    CRAY*TS:*:*:*)
25964-	echo t90-cray-unicos"$UNAME_RELEASE" | sed -e 's/\.[^.]*$/.X/'
25965-	exit ;;
25966-    CRAY*T3E:*:*:*)
25967-	echo alphaev5-cray-unicosmk"$UNAME_RELEASE" | sed -e 's/\.[^.]*$/.X/'
25968-	exit ;;
25969-    CRAY*SV1:*:*:*)
25970-	echo sv1-cray-unicos"$UNAME_RELEASE" | sed -e 's/\.[^.]*$/.X/'
25971-	exit ;;
25972-    *:UNICOS/mp:*:*)
25973-	echo craynv-cray-unicosmp"$UNAME_RELEASE" | sed -e 's/\.[^.]*$/.X/'
25974-	exit ;;
25975-    F30[01]:UNIX_System_V:*:* | F700:UNIX_System_V:*:*)
25976-	FUJITSU_PROC=$(uname -m | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz)
25977-	FUJITSU_SYS=$(uname -p | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz | sed -e 's/\///')
25978-	FUJITSU_REL=$(echo "$UNAME_RELEASE" | sed -e 's/ /_/')
25979-	echo "${FUJITSU_PROC}-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}"
25980-	exit ;;
25981-    5000:UNIX_System_V:4.*:*)
25982-	FUJITSU_SYS=$(uname -p | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz | sed -e 's/\///')
25983-	FUJITSU_REL=$(echo "$UNAME_RELEASE" | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz | sed -e 's/ /_/')
25984-	echo "sparc-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}"
25985-	exit ;;
25986-    i*86:BSD/386:*:* | i*86:BSD/OS:*:* | *:Ascend\ Embedded/OS:*:*)
25987-	echo "$UNAME_MACHINE"-pc-bsdi"$UNAME_RELEASE"
25988-	exit ;;
25989-    sparc*:BSD/OS:*:*)
25990-	echo sparc-unknown-bsdi"$UNAME_RELEASE"
25991-	exit ;;
25992-    *:BSD/OS:*:*)
25993-	echo "$UNAME_MACHINE"-unknown-bsdi"$UNAME_RELEASE"
25994-	exit ;;
25995-    arm:FreeBSD:*:*)
25996-	UNAME_PROCESSOR=$(uname -p)
25997-	set_cc_for_build
25998-	if echo __ARM_PCS_VFP | $CC_FOR_BUILD -E - 2>/dev/null \
25999-	    | grep -q __ARM_PCS_VFP
26000-	then
26001-	    echo "${UNAME_PROCESSOR}"-unknown-freebsd"$(echo ${UNAME_RELEASE}|sed -e 's/[-(].*//')"-gnueabi
26002-	else
26003-	    echo "${UNAME_PROCESSOR}"-unknown-freebsd"$(echo ${UNAME_RELEASE}|sed -e 's/[-(].*//')"-gnueabihf
26004-	fi
26005-	exit ;;
26006-    *:FreeBSD:*:*)
26007-	UNAME_PROCESSOR=$(/usr/bin/uname -p)
26008-	case "$UNAME_PROCESSOR" in
26009-	    amd64)
26010-		UNAME_PROCESSOR=x86_64 ;;
26011-	    i386)
26012-		UNAME_PROCESSOR=i586 ;;
26013-	esac
26014-	echo "$UNAME_PROCESSOR"-unknown-freebsd"$(echo "$UNAME_RELEASE"|sed -e 's/[-(].*//')"
26015-	exit ;;
26016-    i*:CYGWIN*:*)
26017-	echo "$UNAME_MACHINE"-pc-cygwin
26018-	exit ;;
26019-    *:MINGW64*:*)
26020-	echo "$UNAME_MACHINE"-pc-mingw64
26021-	exit ;;
26022-    *:MINGW*:*)
26023-	echo "$UNAME_MACHINE"-pc-mingw32
26024-	exit ;;
26025-    *:MSYS*:*)
26026-	echo "$UNAME_MACHINE"-pc-msys
26027-	exit ;;
26028-    i*:PW*:*)
26029-	echo "$UNAME_MACHINE"-pc-pw32
26030-	exit ;;
26031-    *:Interix*:*)
26032-	case "$UNAME_MACHINE" in
26033-	    x86)
26034-		echo i586-pc-interix"$UNAME_RELEASE"
26035-		exit ;;
26036-	    authenticamd | genuineintel | EM64T)
26037-		echo x86_64-unknown-interix"$UNAME_RELEASE"
26038-		exit ;;
26039-	    IA64)
26040-		echo ia64-unknown-interix"$UNAME_RELEASE"
26041-		exit ;;
26042-	esac ;;
26043-    i*:UWIN*:*)
26044-	echo "$UNAME_MACHINE"-pc-uwin
26045-	exit ;;
26046-    amd64:CYGWIN*:*:* | x86_64:CYGWIN*:*:*)
26047-	echo x86_64-pc-cygwin
26048-	exit ;;
26049-    prep*:SunOS:5.*:*)
26050-	echo powerpcle-unknown-solaris2"$(echo "$UNAME_RELEASE"|sed -e 's/[^.]*//')"
26051-	exit ;;
26052-    *:GNU:*:*)
26053-	# the GNU system
26054-	echo "$(echo "$UNAME_MACHINE"|sed -e 's,[-/].*$,,')-unknown-$LIBC$(echo "$UNAME_RELEASE"|sed -e 's,/.*$,,')"
26055-	exit ;;
26056-    *:GNU/*:*:*)
26057-	# other systems with GNU libc and userland
26058-	echo "$UNAME_MACHINE-unknown-$(echo "$UNAME_SYSTEM" | sed 's,^[^/]*/,,' | tr "[:upper:]" "[:lower:]")$(echo "$UNAME_RELEASE"|sed -e 's/[-(].*//')-$LIBC"
26059-	exit ;;
26060-    *:Minix:*:*)
26061-	echo "$UNAME_MACHINE"-unknown-minix
26062-	exit ;;
26063-    aarch64:Linux:*:*)
26064-	echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"
26065-	exit ;;
26066-    aarch64_be:Linux:*:*)
26067-	UNAME_MACHINE=aarch64_be
26068-	echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"
26069-	exit ;;
26070-    alpha:Linux:*:*)
26071-	case $(sed -n '/^cpu model/s/^.*: \(.*\)/\1/p' /proc/cpuinfo 2>/dev/null) in
26072-	  EV5)   UNAME_MACHINE=alphaev5 ;;
26073-	  EV56)  UNAME_MACHINE=alphaev56 ;;
26074-	  PCA56) UNAME_MACHINE=alphapca56 ;;
26075-	  PCA57) UNAME_MACHINE=alphapca56 ;;
26076-	  EV6)   UNAME_MACHINE=alphaev6 ;;
26077-	  EV67)  UNAME_MACHINE=alphaev67 ;;
26078-	  EV68*) UNAME_MACHINE=alphaev68 ;;
26079-	esac
26080-	objdump --private-headers /bin/sh | grep -q ld.so.1
26081-	if test "$?" = 0 ; then LIBC=gnulibc1 ; fi
26082-	echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"
26083-	exit ;;
26084-    arc:Linux:*:* | arceb:Linux:*:*)
26085-	echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"
26086-	exit ;;
26087-    arm*:Linux:*:*)
26088-	set_cc_for_build
26089-	if echo __ARM_EABI__ | $CC_FOR_BUILD -E - 2>/dev/null \
26090-	    | grep -q __ARM_EABI__
26091-	then
26092-	    echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"
26093-	else
26094-	    if echo __ARM_PCS_VFP | $CC_FOR_BUILD -E - 2>/dev/null \
26095-		| grep -q __ARM_PCS_VFP
26096-	    then
26097-		echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"eabi
26098-	    else
26099-		echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"eabihf
26100-	    fi
26101-	fi
26102-	exit ;;
26103-    avr32*:Linux:*:*)
26104-	echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"
26105-	exit ;;
26106-    cris:Linux:*:*)
26107-	echo "$UNAME_MACHINE"-axis-linux-"$LIBC"
26108-	exit ;;
26109-    crisv32:Linux:*:*)
26110-	echo "$UNAME_MACHINE"-axis-linux-"$LIBC"
26111-	exit ;;
26112-    e2k:Linux:*:*)
26113-	echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"
26114-	exit ;;
26115-    frv:Linux:*:*)
26116-	echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"
26117-	exit ;;
26118-    hexagon:Linux:*:*)
26119-	echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"
26120-	exit ;;
26121-    i*86:Linux:*:*)
26122-	echo "$UNAME_MACHINE"-pc-linux-"$LIBC"
26123-	exit ;;
26124-    ia64:Linux:*:*)
26125-	echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"
26126-	exit ;;
26127-    k1om:Linux:*:*)
26128-	echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"
26129-	exit ;;
26130-    loongarch32:Linux:*:* | loongarch64:Linux:*:* | loongarchx32:Linux:*:*)
26131-	echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"
26132-	exit ;;
26133-    m32r*:Linux:*:*)
26134-	echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"
26135-	exit ;;
26136-    m68*:Linux:*:*)
26137-	echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"
26138-	exit ;;
26139-    mips:Linux:*:* | mips64:Linux:*:*)
26140-	set_cc_for_build
26141-	IS_GLIBC=0
26142-	test x"${LIBC}" = xgnu && IS_GLIBC=1
26143-	sed 's/^	//' << EOF > "$dummy.c"
26144-	#undef CPU
26145-	#undef mips
26146-	#undef mipsel
26147-	#undef mips64
26148-	#undef mips64el
26149-	#if ${IS_GLIBC} && defined(_ABI64)
26150-	LIBCABI=gnuabi64
26151-	#else
26152-	#if ${IS_GLIBC} && defined(_ABIN32)
26153-	LIBCABI=gnuabin32
26154-	#else
26155-	LIBCABI=${LIBC}
26156-	#endif
26157-	#endif
26158-
26159-	#if ${IS_GLIBC} && defined(__mips64) && defined(__mips_isa_rev) && __mips_isa_rev>=6
26160-	CPU=mipsisa64r6
26161-	#else
26162-	#if ${IS_GLIBC} && !defined(__mips64) && defined(__mips_isa_rev) && __mips_isa_rev>=6
26163-	CPU=mipsisa32r6
26164-	#else
26165-	#if defined(__mips64)
26166-	CPU=mips64
26167-	#else
26168-	CPU=mips
26169-	#endif
26170-	#endif
26171-	#endif
26172-
26173-	#if defined(__MIPSEL__) || defined(__MIPSEL) || defined(_MIPSEL) || defined(MIPSEL)
26174-	MIPS_ENDIAN=el
26175-	#else
26176-	#if defined(__MIPSEB__) || defined(__MIPSEB) || defined(_MIPSEB) || defined(MIPSEB)
26177-	MIPS_ENDIAN=
26178-	#else
26179-	MIPS_ENDIAN=
26180-	#endif
26181-	#endif
26182-EOF
26183-	eval "$($CC_FOR_BUILD -E "$dummy.c" 2>/dev/null | grep '^CPU\|^MIPS_ENDIAN\|^LIBCABI')"
26184-	test "x$CPU" != x && { echo "$CPU${MIPS_ENDIAN}-unknown-linux-$LIBCABI"; exit; }
26185-	;;
26186-    mips64el:Linux:*:*)
26187-	echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"
26188-	exit ;;
26189-    openrisc*:Linux:*:*)
26190-	echo or1k-unknown-linux-"$LIBC"
26191-	exit ;;
26192-    or32:Linux:*:* | or1k*:Linux:*:*)
26193-	echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"
26194-	exit ;;
26195-    padre:Linux:*:*)
26196-	echo sparc-unknown-linux-"$LIBC"
26197-	exit ;;
26198-    parisc64:Linux:*:* | hppa64:Linux:*:*)
26199-	echo hppa64-unknown-linux-"$LIBC"
26200-	exit ;;
26201-    parisc:Linux:*:* | hppa:Linux:*:*)
26202-	# Look for CPU level
26203-	case $(grep '^cpu[^a-z]*:' /proc/cpuinfo 2>/dev/null | cut -d' ' -f2) in
26204-	  PA7*) echo hppa1.1-unknown-linux-"$LIBC" ;;
26205-	  PA8*) echo hppa2.0-unknown-linux-"$LIBC" ;;
26206-	  *)    echo hppa-unknown-linux-"$LIBC" ;;
26207-	esac
26208-	exit ;;
26209-    ppc64:Linux:*:*)
26210-	echo powerpc64-unknown-linux-"$LIBC"
26211-	exit ;;
26212-    ppc:Linux:*:*)
26213-	echo powerpc-unknown-linux-"$LIBC"
26214-	exit ;;
26215-    ppc64le:Linux:*:*)
26216-	echo powerpc64le-unknown-linux-"$LIBC"
26217-	exit ;;
26218-    ppcle:Linux:*:*)
26219-	echo powerpcle-unknown-linux-"$LIBC"
26220-	exit ;;
26221-    riscv32:Linux:*:* | riscv32be:Linux:*:* | riscv64:Linux:*:* | riscv64be:Linux:*:*)
26222-	echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"
26223-	exit ;;
26224-    s390:Linux:*:* | s390x:Linux:*:*)
26225-	echo "$UNAME_MACHINE"-ibm-linux-"$LIBC"
26226-	exit ;;
26227-    sh64*:Linux:*:*)
26228-	echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"
26229-	exit ;;
26230-    sh*:Linux:*:*)
26231-	echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"
26232-	exit ;;
26233-    sparc:Linux:*:* | sparc64:Linux:*:*)
26234-	echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"
26235-	exit ;;
26236-    tile*:Linux:*:*)
26237-	echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"
26238-	exit ;;
26239-    vax:Linux:*:*)
26240-	echo "$UNAME_MACHINE"-dec-linux-"$LIBC"
26241-	exit ;;
26242-    x86_64:Linux:*:*)
26243-	set_cc_for_build
26244-	LIBCABI=$LIBC
26245-	if test "$CC_FOR_BUILD" != no_compiler_found; then
26246-	    if (echo '#ifdef __ILP32__'; echo IS_X32; echo '#endif') | \
26247-		(CCOPTS="" $CC_FOR_BUILD -E - 2>/dev/null) | \
26248-		grep IS_X32 >/dev/null
26249-	    then
26250-		LIBCABI="$LIBC"x32
26251-	    fi
26252-	fi
26253-	echo "$UNAME_MACHINE"-pc-linux-"$LIBCABI"
26254-	exit ;;
26255-    xtensa*:Linux:*:*)
26256-	echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"
26257-	exit ;;
26258-    i*86:DYNIX/ptx:4*:*)
26259-	# ptx 4.0 does uname -s correctly, with DYNIX/ptx in there.
26260-	# earlier versions are messed up and put the nodename in both
26261-	# sysname and nodename.
26262-	echo i386-sequent-sysv4
26263-	exit ;;
26264-    i*86:UNIX_SV:4.2MP:2.*)
26265-	# Unixware is an offshoot of SVR4, but it has its own version
26266-	# number series starting with 2...
26267-	# I am not positive that other SVR4 systems won't match this,
26268-	# I just have to hope.  -- rms.
26269-	# Use sysv4.2uw... so that sysv4* matches it.
26270-	echo "$UNAME_MACHINE"-pc-sysv4.2uw"$UNAME_VERSION"
26271-	exit ;;
26272-    i*86:OS/2:*:*)
26273-	# If we were able to find `uname', then EMX Unix compatibility
26274-	# is probably installed.
26275-	echo "$UNAME_MACHINE"-pc-os2-emx
26276-	exit ;;
26277-    i*86:XTS-300:*:STOP)
26278-	echo "$UNAME_MACHINE"-unknown-stop
26279-	exit ;;
26280-    i*86:atheos:*:*)
26281-	echo "$UNAME_MACHINE"-unknown-atheos
26282-	exit ;;
26283-    i*86:syllable:*:*)
26284-	echo "$UNAME_MACHINE"-pc-syllable
26285-	exit ;;
26286-    i*86:LynxOS:2.*:* | i*86:LynxOS:3.[01]*:* | i*86:LynxOS:4.[02]*:*)
26287-	echo i386-unknown-lynxos"$UNAME_RELEASE"
26288-	exit ;;
26289-    i*86:*DOS:*:*)
26290-	echo "$UNAME_MACHINE"-pc-msdosdjgpp
26291-	exit ;;
26292-    i*86:*:4.*:*)
26293-	UNAME_REL=$(echo "$UNAME_RELEASE" | sed 's/\/MP$//')
26294-	if grep Novell /usr/include/link.h >/dev/null 2>/dev/null; then
26295-		echo "$UNAME_MACHINE"-univel-sysv"$UNAME_REL"
26296-	else
26297-		echo "$UNAME_MACHINE"-pc-sysv"$UNAME_REL"
26298-	fi
26299-	exit ;;
26300-    i*86:*:5:[678]*)
26301-	# UnixWare 7.x, OpenUNIX and OpenServer 6.
26302-	case $(/bin/uname -X | grep "^Machine") in
26303-	    *486*)	     UNAME_MACHINE=i486 ;;
26304-	    *Pentium)	     UNAME_MACHINE=i586 ;;
26305-	    *Pent*|*Celeron) UNAME_MACHINE=i686 ;;
26306-	esac
26307-	echo "$UNAME_MACHINE-unknown-sysv${UNAME_RELEASE}${UNAME_SYSTEM}${UNAME_VERSION}"
26308-	exit ;;
26309-    i*86:*:3.2:*)
26310-	if test -f /usr/options/cb.name; then
26311-		UNAME_REL=$(sed -n 's/.*Version //p' </usr/options/cb.name)
26312-		echo "$UNAME_MACHINE"-pc-isc"$UNAME_REL"
26313-	elif /bin/uname -X 2>/dev/null >/dev/null ; then
26314-		UNAME_REL=$( (/bin/uname -X|grep Release|sed -e 's/.*= //'))
26315-		(/bin/uname -X|grep i80486 >/dev/null) && UNAME_MACHINE=i486
26316-		(/bin/uname -X|grep '^Machine.*Pentium' >/dev/null) \
26317-			&& UNAME_MACHINE=i586
26318-		(/bin/uname -X|grep '^Machine.*Pent *II' >/dev/null) \
26319-			&& UNAME_MACHINE=i686
26320-		(/bin/uname -X|grep '^Machine.*Pentium Pro' >/dev/null) \
26321-			&& UNAME_MACHINE=i686
26322-		echo "$UNAME_MACHINE"-pc-sco"$UNAME_REL"
26323-	else
26324-		echo "$UNAME_MACHINE"-pc-sysv32
26325-	fi
26326-	exit ;;
26327-    pc:*:*:*)
26328-	# Left here for compatibility:
26329-	# uname -m prints for DJGPP always 'pc', but it prints nothing about
26330-	# the processor, so we play safe by assuming i586.
26331-	# Note: whatever this is, it MUST be the same as what config.sub
26332-	# prints for the "djgpp" host, or else GDB configure will decide that
26333-	# this is a cross-build.
26334-	echo i586-pc-msdosdjgpp
26335-	exit ;;
26336-    Intel:Mach:3*:*)
26337-	echo i386-pc-mach3
26338-	exit ;;
26339-    paragon:*:*:*)
26340-	echo i860-intel-osf1
26341-	exit ;;
26342-    i860:*:4.*:*) # i860-SVR4
26343-	if grep Stardent /usr/include/sys/uadmin.h >/dev/null 2>&1 ; then
26344-	  echo i860-stardent-sysv"$UNAME_RELEASE" # Stardent Vistra i860-SVR4
26345-	else # Add other i860-SVR4 vendors below as they are discovered.
26346-	  echo i860-unknown-sysv"$UNAME_RELEASE"  # Unknown i860-SVR4
26347-	fi
26348-	exit ;;
26349-    mini*:CTIX:SYS*5:*)
26350-	# "miniframe"
26351-	echo m68010-convergent-sysv
26352-	exit ;;
26353-    mc68k:UNIX:SYSTEM5:3.51m)
26354-	echo m68k-convergent-sysv
26355-	exit ;;
26356-    M680?0:D-NIX:5.3:*)
26357-	echo m68k-diab-dnix
26358-	exit ;;
26359-    M68*:*:R3V[5678]*:*)
26360-	test -r /sysV68 && { echo 'm68k-motorola-sysv'; exit; } ;;
26361-    3[345]??:*:4.0:3.0 | 3[34]??A:*:4.0:3.0 | 3[34]??,*:*:4.0:3.0 | 3[34]??/*:*:4.0:3.0 | 4400:*:4.0:3.0 | 4850:*:4.0:3.0 | SKA40:*:4.0:3.0 | SDS2:*:4.0:3.0 | SHG2:*:4.0:3.0 | S7501*:*:4.0:3.0)
26362-	OS_REL=''
26363-	test -r /etc/.relid \
26364-	&& OS_REL=.$(sed -n 's/[^ ]* [^ ]* \([0-9][0-9]\).*/\1/p' < /etc/.relid)
26365-	/bin/uname -p 2>/dev/null | grep 86 >/dev/null \
26366-	  && { echo i486-ncr-sysv4.3"$OS_REL"; exit; }
26367-	/bin/uname -p 2>/dev/null | /bin/grep entium >/dev/null \
26368-	  && { echo i586-ncr-sysv4.3"$OS_REL"; exit; } ;;
26369-    3[34]??:*:4.0:* | 3[34]??,*:*:4.0:*)
26370-	/bin/uname -p 2>/dev/null | grep 86 >/dev/null \
26371-	  && { echo i486-ncr-sysv4; exit; } ;;
26372-    NCR*:*:4.2:* | MPRAS*:*:4.2:*)
26373-	OS_REL='.3'
26374-	test -r /etc/.relid \
26375-	    && OS_REL=.$(sed -n 's/[^ ]* [^ ]* \([0-9][0-9]\).*/\1/p' < /etc/.relid)
26376-	/bin/uname -p 2>/dev/null | grep 86 >/dev/null \
26377-	    && { echo i486-ncr-sysv4.3"$OS_REL"; exit; }
26378-	/bin/uname -p 2>/dev/null | /bin/grep entium >/dev/null \
26379-	    && { echo i586-ncr-sysv4.3"$OS_REL"; exit; }
26380-	/bin/uname -p 2>/dev/null | /bin/grep pteron >/dev/null \
26381-	    && { echo i586-ncr-sysv4.3"$OS_REL"; exit; } ;;
26382-    m68*:LynxOS:2.*:* | m68*:LynxOS:3.0*:*)
26383-	echo m68k-unknown-lynxos"$UNAME_RELEASE"
26384-	exit ;;
26385-    mc68030:UNIX_System_V:4.*:*)
26386-	echo m68k-atari-sysv4
26387-	exit ;;
26388-    TSUNAMI:LynxOS:2.*:*)
26389-	echo sparc-unknown-lynxos"$UNAME_RELEASE"
26390-	exit ;;
26391-    rs6000:LynxOS:2.*:*)
26392-	echo rs6000-unknown-lynxos"$UNAME_RELEASE"
26393-	exit ;;
26394-    PowerPC:LynxOS:2.*:* | PowerPC:LynxOS:3.[01]*:* | PowerPC:LynxOS:4.[02]*:*)
26395-	echo powerpc-unknown-lynxos"$UNAME_RELEASE"
26396-	exit ;;
26397-    SM[BE]S:UNIX_SV:*:*)
26398-	echo mips-dde-sysv"$UNAME_RELEASE"
26399-	exit ;;
26400-    RM*:ReliantUNIX-*:*:*)
26401-	echo mips-sni-sysv4
26402-	exit ;;
26403-    RM*:SINIX-*:*:*)
26404-	echo mips-sni-sysv4
26405-	exit ;;
26406-    *:SINIX-*:*:*)
26407-	if uname -p 2>/dev/null >/dev/null ; then
26408-		UNAME_MACHINE=$( (uname -p) 2>/dev/null)
26409-		echo "$UNAME_MACHINE"-sni-sysv4
26410-	else
26411-		echo ns32k-sni-sysv
26412-	fi
26413-	exit ;;
26414-    PENTIUM:*:4.0*:*)	# Unisys `ClearPath HMP IX 4000' SVR4/MP effort
26415-			# says <[email protected]>
26416-	echo i586-unisys-sysv4
26417-	exit ;;
26418-    *:UNIX_System_V:4*:FTX*)
26419-	# From Gerald Hewes <[email protected]>.
26420-	# How about differentiating between stratus architectures? -djm
26421-	echo hppa1.1-stratus-sysv4
26422-	exit ;;
26423-    *:*:*:FTX*)
26424-	# From [email protected].
26425-	echo i860-stratus-sysv4
26426-	exit ;;
26427-    i*86:VOS:*:*)
26428-	# From [email protected].
26429-	echo "$UNAME_MACHINE"-stratus-vos
26430-	exit ;;
26431-    *:VOS:*:*)
26432-	# From [email protected].
26433-	echo hppa1.1-stratus-vos
26434-	exit ;;
26435-    mc68*:A/UX:*:*)
26436-	echo m68k-apple-aux"$UNAME_RELEASE"
26437-	exit ;;
26438-    news*:NEWS-OS:6*:*)
26439-	echo mips-sony-newsos6
26440-	exit ;;
26441-    R[34]000:*System_V*:*:* | R4000:UNIX_SYSV:*:* | R*000:UNIX_SV:*:*)
26442-	if test -d /usr/nec; then
26443-		echo mips-nec-sysv"$UNAME_RELEASE"
26444-	else
26445-		echo mips-unknown-sysv"$UNAME_RELEASE"
26446-	fi
26447-	exit ;;
26448-    BeBox:BeOS:*:*)	# BeOS running on hardware made by Be, PPC only.
26449-	echo powerpc-be-beos
26450-	exit ;;
26451-    BeMac:BeOS:*:*)	# BeOS running on Mac or Mac clone, PPC only.
26452-	echo powerpc-apple-beos
26453-	exit ;;
26454-    BePC:BeOS:*:*)	# BeOS running on Intel PC compatible.
26455-	echo i586-pc-beos
26456-	exit ;;
26457-    BePC:Haiku:*:*)	# Haiku running on Intel PC compatible.
26458-	echo i586-pc-haiku
26459-	exit ;;
26460-    x86_64:Haiku:*:*)
26461-	echo x86_64-unknown-haiku
26462-	exit ;;
26463-    SX-4:SUPER-UX:*:*)
26464-	echo sx4-nec-superux"$UNAME_RELEASE"
26465-	exit ;;
26466-    SX-5:SUPER-UX:*:*)
26467-	echo sx5-nec-superux"$UNAME_RELEASE"
26468-	exit ;;
26469-    SX-6:SUPER-UX:*:*)
26470-	echo sx6-nec-superux"$UNAME_RELEASE"
26471-	exit ;;
26472-    SX-7:SUPER-UX:*:*)
26473-	echo sx7-nec-superux"$UNAME_RELEASE"
26474-	exit ;;
26475-    SX-8:SUPER-UX:*:*)
26476-	echo sx8-nec-superux"$UNAME_RELEASE"
26477-	exit ;;
26478-    SX-8R:SUPER-UX:*:*)
26479-	echo sx8r-nec-superux"$UNAME_RELEASE"
26480-	exit ;;
26481-    SX-ACE:SUPER-UX:*:*)
26482-	echo sxace-nec-superux"$UNAME_RELEASE"
26483-	exit ;;
26484-    Power*:Rhapsody:*:*)
26485-	echo powerpc-apple-rhapsody"$UNAME_RELEASE"
26486-	exit ;;
26487-    *:Rhapsody:*:*)
26488-	echo "$UNAME_MACHINE"-apple-rhapsody"$UNAME_RELEASE"
26489-	exit ;;
26490-    arm64:Darwin:*:*)
26491-	echo aarch64-apple-darwin"$UNAME_RELEASE"
26492-	exit ;;
26493-    *:Darwin:*:*)
26494-	UNAME_PROCESSOR=$(uname -p)
26495-	case $UNAME_PROCESSOR in
26496-	    unknown) UNAME_PROCESSOR=powerpc ;;
26497-	esac
26498-	if command -v xcode-select > /dev/null 2> /dev/null && \
26499-		! xcode-select --print-path > /dev/null 2> /dev/null ; then
26500-	    # Avoid executing cc if there is no toolchain installed as
26501-	    # cc will be a stub that puts up a graphical alert
26502-	    # prompting the user to install developer tools.
26503-	    CC_FOR_BUILD=no_compiler_found
26504-	else
26505-	    set_cc_for_build
26506-	fi
26507-	if test "$CC_FOR_BUILD" != no_compiler_found; then
26508-	    if (echo '#ifdef __LP64__'; echo IS_64BIT_ARCH; echo '#endif') | \
26509-		   (CCOPTS="" $CC_FOR_BUILD -E - 2>/dev/null) | \
26510-		   grep IS_64BIT_ARCH >/dev/null
26511-	    then
26512-		case $UNAME_PROCESSOR in
26513-		    i386) UNAME_PROCESSOR=x86_64 ;;
26514-		    powerpc) UNAME_PROCESSOR=powerpc64 ;;
26515-		esac
26516-	    fi
26517-	    # On 10.4-10.6 one might compile for PowerPC via gcc -arch ppc
26518-	    if (echo '#ifdef __POWERPC__'; echo IS_PPC; echo '#endif') | \
26519-		   (CCOPTS="" $CC_FOR_BUILD -E - 2>/dev/null) | \
26520-		   grep IS_PPC >/dev/null
26521-	    then
26522-		UNAME_PROCESSOR=powerpc
26523-	    fi
26524-	elif test "$UNAME_PROCESSOR" = i386 ; then
26525-	    # uname -m returns i386 or x86_64
26526-	    UNAME_PROCESSOR=$UNAME_MACHINE
26527-	fi
26528-	echo "$UNAME_PROCESSOR"-apple-darwin"$UNAME_RELEASE"
26529-	exit ;;
26530-    *:procnto*:*:* | *:QNX:[0123456789]*:*)
26531-	UNAME_PROCESSOR=$(uname -p)
26532-	if test "$UNAME_PROCESSOR" = x86; then
26533-		UNAME_PROCESSOR=i386
26534-		UNAME_MACHINE=pc
26535-	fi
26536-	echo "$UNAME_PROCESSOR"-"$UNAME_MACHINE"-nto-qnx"$UNAME_RELEASE"
26537-	exit ;;
26538-    *:QNX:*:4*)
26539-	echo i386-pc-qnx
26540-	exit ;;
26541-    NEO-*:NONSTOP_KERNEL:*:*)
26542-	echo neo-tandem-nsk"$UNAME_RELEASE"
26543-	exit ;;
26544-    NSE-*:NONSTOP_KERNEL:*:*)
26545-	echo nse-tandem-nsk"$UNAME_RELEASE"
26546-	exit ;;
26547-    NSR-*:NONSTOP_KERNEL:*:*)
26548-	echo nsr-tandem-nsk"$UNAME_RELEASE"
26549-	exit ;;
26550-    NSV-*:NONSTOP_KERNEL:*:*)
26551-	echo nsv-tandem-nsk"$UNAME_RELEASE"
26552-	exit ;;
26553-    NSX-*:NONSTOP_KERNEL:*:*)
26554-	echo nsx-tandem-nsk"$UNAME_RELEASE"
26555-	exit ;;
26556-    *:NonStop-UX:*:*)
26557-	echo mips-compaq-nonstopux
26558-	exit ;;
26559-    BS2000:POSIX*:*:*)
26560-	echo bs2000-siemens-sysv
26561-	exit ;;
26562-    DS/*:UNIX_System_V:*:*)
26563-	echo "$UNAME_MACHINE"-"$UNAME_SYSTEM"-"$UNAME_RELEASE"
26564-	exit ;;
26565-    *:Plan9:*:*)
26566-	# "uname -m" is not consistent, so use $cputype instead. 386
26567-	# is converted to i386 for consistency with other x86
26568-	# operating systems.
26569-	# shellcheck disable=SC2154
26570-	if test "$cputype" = 386; then
26571-	    UNAME_MACHINE=i386
26572-	else
26573-	    UNAME_MACHINE="$cputype"
26574-	fi
26575-	echo "$UNAME_MACHINE"-unknown-plan9
26576-	exit ;;
26577-    *:TOPS-10:*:*)
26578-	echo pdp10-unknown-tops10
26579-	exit ;;
26580-    *:TENEX:*:*)
26581-	echo pdp10-unknown-tenex
26582-	exit ;;
26583-    KS10:TOPS-20:*:* | KL10:TOPS-20:*:* | TYPE4:TOPS-20:*:*)
26584-	echo pdp10-dec-tops20
26585-	exit ;;
26586-    XKL-1:TOPS-20:*:* | TYPE5:TOPS-20:*:*)
26587-	echo pdp10-xkl-tops20
26588-	exit ;;
26589-    *:TOPS-20:*:*)
26590-	echo pdp10-unknown-tops20
26591-	exit ;;
26592-    *:ITS:*:*)
26593-	echo pdp10-unknown-its
26594-	exit ;;
26595-    SEI:*:*:SEIUX)
26596-	echo mips-sei-seiux"$UNAME_RELEASE"
26597-	exit ;;
26598-    *:DragonFly:*:*)
26599-	echo "$UNAME_MACHINE"-unknown-dragonfly"$(echo "$UNAME_RELEASE"|sed -e 's/[-(].*//')"
26600-	exit ;;
26601-    *:*VMS:*:*)
26602-	UNAME_MACHINE=$( (uname -p) 2>/dev/null)
26603-	case "$UNAME_MACHINE" in
26604-	    A*) echo alpha-dec-vms ; exit ;;
26605-	    I*) echo ia64-dec-vms ; exit ;;
26606-	    V*) echo vax-dec-vms ; exit ;;
26607-	esac ;;
26608-    *:XENIX:*:SysV)
26609-	echo i386-pc-xenix
26610-	exit ;;
26611-    i*86:skyos:*:*)
26612-	echo "$UNAME_MACHINE"-pc-skyos"$(echo "$UNAME_RELEASE" | sed -e 's/ .*$//')"
26613-	exit ;;
26614-    i*86:rdos:*:*)
26615-	echo "$UNAME_MACHINE"-pc-rdos
26616-	exit ;;
26617-    i*86:AROS:*:*)
26618-	echo "$UNAME_MACHINE"-pc-aros
26619-	exit ;;
26620-    x86_64:VMkernel:*:*)
26621-	echo "$UNAME_MACHINE"-unknown-esx
26622-	exit ;;
26623-    amd64:Isilon\ OneFS:*:*)
26624-	echo x86_64-unknown-onefs
26625-	exit ;;
26626-    *:Unleashed:*:*)
26627-	echo "$UNAME_MACHINE"-unknown-unleashed"$UNAME_RELEASE"
26628-	exit ;;
26629-esac
26630-
26631-# No uname command or uname output not recognized.
26632-set_cc_for_build
26633-cat > "$dummy.c" <<EOF
26634-#ifdef _SEQUENT_
26635-#include <sys/types.h>
26636-#include <sys/utsname.h>
26637-#endif
26638-#if defined(ultrix) || defined(_ultrix) || defined(__ultrix) || defined(__ultrix__)
26639-#if defined (vax) || defined (__vax) || defined (__vax__) || defined(mips) || defined(__mips) || defined(__mips__) || defined(MIPS) || defined(__MIPS__)
26640-#include <signal.h>
26641-#if defined(_SIZE_T_) || defined(SIGLOST)
26642-#include <sys/utsname.h>
26643-#endif
26644-#endif
26645-#endif
26646-main ()
26647-{
26648-#if defined (sony)
26649-#if defined (MIPSEB)
26650-  /* BFD wants "bsd" instead of "newsos".  Perhaps BFD should be changed,
26651-     I don't know....  */
26652-  printf ("mips-sony-bsd\n"); exit (0);
26653-#else
26654-#include <sys/param.h>
26655-  printf ("m68k-sony-newsos%s\n",
26656-#ifdef NEWSOS4
26657-  "4"
26658-#else
26659-  ""
26660-#endif
26661-  ); exit (0);
26662-#endif
26663-#endif
26664-
26665-#if defined (NeXT)
26666-#if !defined (__ARCHITECTURE__)
26667-#define __ARCHITECTURE__ "m68k"
26668-#endif
26669-  int version;
26670-  version=$( (hostinfo | sed -n 's/.*NeXT Mach \([0-9]*\).*/\1/p') 2>/dev/null);
26671-  if (version < 4)
26672-    printf ("%s-next-nextstep%d\n", __ARCHITECTURE__, version);
26673-  else
26674-    printf ("%s-next-openstep%d\n", __ARCHITECTURE__, version);
26675-  exit (0);
26676-#endif
26677-
26678-#if defined (MULTIMAX) || defined (n16)
26679-#if defined (UMAXV)
26680-  printf ("ns32k-encore-sysv\n"); exit (0);
26681-#else
26682-#if defined (CMU)
26683-  printf ("ns32k-encore-mach\n"); exit (0);
26684-#else
26685-  printf ("ns32k-encore-bsd\n"); exit (0);
26686-#endif
26687-#endif
26688-#endif
26689-
26690-#if defined (__386BSD__)
26691-  printf ("i386-pc-bsd\n"); exit (0);
26692-#endif
26693-
26694-#if defined (sequent)
26695-#if defined (i386)
26696-  printf ("i386-sequent-dynix\n"); exit (0);
26697-#endif
26698-#if defined (ns32000)
26699-  printf ("ns32k-sequent-dynix\n"); exit (0);
26700-#endif
26701-#endif
26702-
26703-#if defined (_SEQUENT_)
26704-  struct utsname un;
26705-
26706-  uname(&un);
26707-  if (strncmp(un.version, "V2", 2) == 0) {
26708-    printf ("i386-sequent-ptx2\n"); exit (0);
26709-  }
26710-  if (strncmp(un.version, "V1", 2) == 0) { /* XXX is V1 correct? */
26711-    printf ("i386-sequent-ptx1\n"); exit (0);
26712-  }
26713-  printf ("i386-sequent-ptx\n"); exit (0);
26714-#endif
26715-
26716-#if defined (vax)
26717-#if !defined (ultrix)
26718-#include <sys/param.h>
26719-#if defined (BSD)
26720-#if BSD == 43
26721-  printf ("vax-dec-bsd4.3\n"); exit (0);
26722-#else
26723-#if BSD == 199006
26724-  printf ("vax-dec-bsd4.3reno\n"); exit (0);
26725-#else
26726-  printf ("vax-dec-bsd\n"); exit (0);
26727-#endif
26728-#endif
26729-#else
26730-  printf ("vax-dec-bsd\n"); exit (0);
26731-#endif
26732-#else
26733-#if defined(_SIZE_T_) || defined(SIGLOST)
26734-  struct utsname un;
26735-  uname (&un);
26736-  printf ("vax-dec-ultrix%s\n", un.release); exit (0);
26737-#else
26738-  printf ("vax-dec-ultrix\n"); exit (0);
26739-#endif
26740-#endif
26741-#endif
26742-#if defined(ultrix) || defined(_ultrix) || defined(__ultrix) || defined(__ultrix__)
26743-#if defined(mips) || defined(__mips) || defined(__mips__) || defined(MIPS) || defined(__MIPS__)
26744-#if defined(_SIZE_T_) || defined(SIGLOST)
26745-  struct utsname *un;
26746-  uname (&un);
26747-  printf ("mips-dec-ultrix%s\n", un.release); exit (0);
26748-#else
26749-  printf ("mips-dec-ultrix\n"); exit (0);
26750-#endif
26751-#endif
26752-#endif
26753-
26754-#if defined (alliant) && defined (i860)
26755-  printf ("i860-alliant-bsd\n"); exit (0);
26756-#endif
26757-
26758-  exit (1);
26759-}
26760-EOF
26761-
26762-$CC_FOR_BUILD -o "$dummy" "$dummy.c" 2>/dev/null && SYSTEM_NAME=$($dummy) &&
26763-	{ echo "$SYSTEM_NAME"; exit; }
26764-
26765-# Apollos put the system type in the environment.
26766-test -d /usr/apollo && { echo "$ISP-apollo-$SYSTYPE"; exit; }
26767-
26768-echo "$0: unable to guess system type" >&2
26769-
26770-case "$UNAME_MACHINE:$UNAME_SYSTEM" in
26771-    mips:Linux | mips64:Linux)
26772-	# If we got here on MIPS GNU/Linux, output extra information.
26773-	cat >&2 <<EOF
26774-
26775-NOTE: MIPS GNU/Linux systems require a C compiler to fully recognize
26776-the system type. Please install a C compiler and try again.
26777-EOF
26778-	;;
26779-esac
26780-
26781-cat >&2 <<EOF
26782-
26783-This script (version $timestamp), has failed to recognize the
26784-operating system you are using. If your script is old, overwrite *all*
26785-copies of config.guess and config.sub with the latest versions from:
26786-
26787-  https://git.savannah.gnu.org/cgit/config.git/plain/config.guess
26788-and
26789-  https://git.savannah.gnu.org/cgit/config.git/plain/config.sub
26790-EOF
26791-
26792-year=$(echo $timestamp | sed 's,-.*,,')
26793-# shellcheck disable=SC2003
26794-if test "$(expr "$(date +%Y)" - "$year")" -lt 3 ; then
26795-   cat >&2 <<EOF
26796-
26797-If $0 has already been updated, send the following data and any
26798-information you think might be pertinent to [email protected] to
26799-provide the necessary information to handle your system.
26800-
26801-config.guess timestamp = $timestamp
26802-
26803-uname -m = $( (uname -m) 2>/dev/null || echo unknown)
26804-uname -r = $( (uname -r) 2>/dev/null || echo unknown)
26805-uname -s = $( (uname -s) 2>/dev/null || echo unknown)
26806-uname -v = $( (uname -v) 2>/dev/null || echo unknown)
26807-
26808-/usr/bin/uname -p = $( (/usr/bin/uname -p) 2>/dev/null)
26809-/bin/uname -X     = $( (/bin/uname -X) 2>/dev/null)
26810-
26811-hostinfo               = $( (hostinfo) 2>/dev/null)
26812-/bin/universe          = $( (/bin/universe) 2>/dev/null)
26813-/usr/bin/arch -k       = $( (/usr/bin/arch -k) 2>/dev/null)
26814-/bin/arch              = $( (/bin/arch) 2>/dev/null)
26815-/usr/bin/oslevel       = $( (/usr/bin/oslevel) 2>/dev/null)
26816-/usr/convex/getsysinfo = $( (/usr/convex/getsysinfo) 2>/dev/null)
26817-
26818-UNAME_MACHINE = "$UNAME_MACHINE"
26819-UNAME_RELEASE = "$UNAME_RELEASE"
26820-UNAME_SYSTEM  = "$UNAME_SYSTEM"
26821-UNAME_VERSION = "$UNAME_VERSION"
26822-EOF
26823-fi
26824-
26825-exit 1
26826-
26827-# Local variables:
26828-# eval: (add-hook 'before-save-hook 'time-stamp)
26829-# time-stamp-start: "timestamp='"
26830-# time-stamp-format: "%:y-%02m-%02d"
26831-# time-stamp-end: "'"
26832-# End:
26833diff --git a/jemalloc/build-aux/config.sub b/jemalloc/build-aux/config.sub
26834deleted file mode 100755
26835index b0f8492..0000000
26836--- a/jemalloc/build-aux/config.sub
26837+++ /dev/null
26838@@ -1,1855 +0,0 @@
26839-#! /bin/sh
26840-# Configuration validation subroutine script.
26841-#   Copyright 1992-2021 Free Software Foundation, Inc.
26842-
26843-timestamp='2021-01-07'
26844-
26845-# This file is free software; you can redistribute it and/or modify it
26846-# under the terms of the GNU General Public License as published by
26847-# the Free Software Foundation; either version 3 of the License, or
26848-# (at your option) any later version.
26849-#
26850-# This program is distributed in the hope that it will be useful, but
26851-# WITHOUT ANY WARRANTY; without even the implied warranty of
26852-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
26853-# General Public License for more details.
26854-#
26855-# You should have received a copy of the GNU General Public License
26856-# along with this program; if not, see <https://www.gnu.org/licenses/>.
26857-#
26858-# As a special exception to the GNU General Public License, if you
26859-# distribute this file as part of a program that contains a
26860-# configuration script generated by Autoconf, you may include it under
26861-# the same distribution terms that you use for the rest of that
26862-# program.  This Exception is an additional permission under section 7
26863-# of the GNU General Public License, version 3 ("GPLv3").
26864-
26865-
26866-# Please send patches to <[email protected]>.
26867-#
26868-# Configuration subroutine to validate and canonicalize a configuration type.
26869-# Supply the specified configuration type as an argument.
26870-# If it is invalid, we print an error message on stderr and exit with code 1.
26871-# Otherwise, we print the canonical config type on stdout and succeed.
26872-
26873-# You can get the latest version of this script from:
26874-# https://git.savannah.gnu.org/cgit/config.git/plain/config.sub
26875-
26876-# This file is supposed to be the same for all GNU packages
26877-# and recognize all the CPU types, system types and aliases
26878-# that are meaningful with *any* GNU software.
26879-# Each package is responsible for reporting which valid configurations
26880-# it does not support.  The user should be able to distinguish
26881-# a failure to support a valid configuration from a meaningless
26882-# configuration.
26883-
26884-# The goal of this file is to map all the various variations of a given
26885-# machine specification into a single specification in the form:
26886-#	CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM
26887-# or in some cases, the newer four-part form:
26888-#	CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM
26889-# It is wrong to echo any other type of specification.
26890-
26891-me=$(echo "$0" | sed -e 's,.*/,,')
26892-
26893-usage="\
26894-Usage: $0 [OPTION] CPU-MFR-OPSYS or ALIAS
26895-
26896-Canonicalize a configuration name.
26897-
26898-Options:
26899-  -h, --help         print this help, then exit
26900-  -t, --time-stamp   print date of last modification, then exit
26901-  -v, --version      print version number, then exit
26902-
26903-Report bugs and patches to <[email protected]>."
26904-
26905-version="\
26906-GNU config.sub ($timestamp)
26907-
26908-Copyright 1992-2021 Free Software Foundation, Inc.
26909-
26910-This is free software; see the source for copying conditions.  There is NO
26911-warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE."
26912-
26913-help="
26914-Try \`$me --help' for more information."
26915-
26916-# Parse command line
26917-while test $# -gt 0 ; do
26918-  case $1 in
26919-    --time-stamp | --time* | -t )
26920-       echo "$timestamp" ; exit ;;
26921-    --version | -v )
26922-       echo "$version" ; exit ;;
26923-    --help | --h* | -h )
26924-       echo "$usage"; exit ;;
26925-    -- )     # Stop option processing
26926-       shift; break ;;
26927-    - )	# Use stdin as input.
26928-       break ;;
26929-    -* )
26930-       echo "$me: invalid option $1$help" >&2
26931-       exit 1 ;;
26932-
26933-    *local*)
26934-       # First pass through any local machine types.
26935-       echo "$1"
26936-       exit ;;
26937-
26938-    * )
26939-       break ;;
26940-  esac
26941-done
26942-
26943-case $# in
26944- 0) echo "$me: missing argument$help" >&2
26945-    exit 1;;
26946- 1) ;;
26947- *) echo "$me: too many arguments$help" >&2
26948-    exit 1;;
26949-esac
26950-
26951-# Split fields of configuration type
26952-# shellcheck disable=SC2162
26953-IFS="-" read field1 field2 field3 field4 <<EOF
26954-$1
26955-EOF
26956-
26957-# Separate into logical components for further validation
26958-case $1 in
26959-	*-*-*-*-*)
26960-		echo Invalid configuration \`"$1"\': more than four components >&2
26961-		exit 1
26962-		;;
26963-	*-*-*-*)
26964-		basic_machine=$field1-$field2
26965-		basic_os=$field3-$field4
26966-		;;
26967-	*-*-*)
26968-		# Ambiguous whether COMPANY is present, or skipped and KERNEL-OS is two
26969-		# parts
26970-		maybe_os=$field2-$field3
26971-		case $maybe_os in
26972-			nto-qnx* | linux-* | uclinux-uclibc* \
26973-			| uclinux-gnu* | kfreebsd*-gnu* | knetbsd*-gnu* | netbsd*-gnu* \
26974-			| netbsd*-eabi* | kopensolaris*-gnu* | cloudabi*-eabi* \
26975-			| storm-chaos* | os2-emx* | rtmk-nova*)
26976-				basic_machine=$field1
26977-				basic_os=$maybe_os
26978-				;;
26979-			android-linux)
26980-				basic_machine=$field1-unknown
26981-				basic_os=linux-android
26982-				;;
26983-			*)
26984-				basic_machine=$field1-$field2
26985-				basic_os=$field3
26986-				;;
26987-		esac
26988-		;;
26989-	*-*)
26990-		# A lone config we happen to match not fitting any pattern
26991-		case $field1-$field2 in
26992-			decstation-3100)
26993-				basic_machine=mips-dec
26994-				basic_os=
26995-				;;
26996-			*-*)
26997-				# Second component is usually, but not always the OS
26998-				case $field2 in
26999-					# Prevent following clause from handling this valid os
27000-					sun*os*)
27001-						basic_machine=$field1
27002-						basic_os=$field2
27003-						;;
27004-					# Manufacturers
27005-					dec* | mips* | sequent* | encore* | pc533* | sgi* | sony* \
27006-					| att* | 7300* | 3300* | delta* | motorola* | sun[234]* \
27007-					| unicom* | ibm* | next | hp | isi* | apollo | altos* \
27008-					| convergent* | ncr* | news | 32* | 3600* | 3100* \
27009-					| hitachi* | c[123]* | convex* | sun | crds | omron* | dg \
27010-					| ultra | tti* | harris | dolphin | highlevel | gould \
27011-					| cbm | ns | masscomp | apple | axis | knuth | cray \
27012-					| microblaze* | sim | cisco \
27013-					| oki | wec | wrs | winbond)
27014-						basic_machine=$field1-$field2
27015-						basic_os=
27016-						;;
27017-					*)
27018-						basic_machine=$field1
27019-						basic_os=$field2
27020-						;;
27021-				esac
27022-			;;
27023-		esac
27024-		;;
27025-	*)
27026-		# Convert single-component short-hands not valid as part of
27027-		# multi-component configurations.
27028-		case $field1 in
27029-			386bsd)
27030-				basic_machine=i386-pc
27031-				basic_os=bsd
27032-				;;
27033-			a29khif)
27034-				basic_machine=a29k-amd
27035-				basic_os=udi
27036-				;;
27037-			adobe68k)
27038-				basic_machine=m68010-adobe
27039-				basic_os=scout
27040-				;;
27041-			alliant)
27042-				basic_machine=fx80-alliant
27043-				basic_os=
27044-				;;
27045-			altos | altos3068)
27046-				basic_machine=m68k-altos
27047-				basic_os=
27048-				;;
27049-			am29k)
27050-				basic_machine=a29k-none
27051-				basic_os=bsd
27052-				;;
27053-			amdahl)
27054-				basic_machine=580-amdahl
27055-				basic_os=sysv
27056-				;;
27057-			amiga)
27058-				basic_machine=m68k-unknown
27059-				basic_os=
27060-				;;
27061-			amigaos | amigados)
27062-				basic_machine=m68k-unknown
27063-				basic_os=amigaos
27064-				;;
27065-			amigaunix | amix)
27066-				basic_machine=m68k-unknown
27067-				basic_os=sysv4
27068-				;;
27069-			apollo68)
27070-				basic_machine=m68k-apollo
27071-				basic_os=sysv
27072-				;;
27073-			apollo68bsd)
27074-				basic_machine=m68k-apollo
27075-				basic_os=bsd
27076-				;;
27077-			aros)
27078-				basic_machine=i386-pc
27079-				basic_os=aros
27080-				;;
27081-			aux)
27082-				basic_machine=m68k-apple
27083-				basic_os=aux
27084-				;;
27085-			balance)
27086-				basic_machine=ns32k-sequent
27087-				basic_os=dynix
27088-				;;
27089-			blackfin)
27090-				basic_machine=bfin-unknown
27091-				basic_os=linux
27092-				;;
27093-			cegcc)
27094-				basic_machine=arm-unknown
27095-				basic_os=cegcc
27096-				;;
27097-			convex-c1)
27098-				basic_machine=c1-convex
27099-				basic_os=bsd
27100-				;;
27101-			convex-c2)
27102-				basic_machine=c2-convex
27103-				basic_os=bsd
27104-				;;
27105-			convex-c32)
27106-				basic_machine=c32-convex
27107-				basic_os=bsd
27108-				;;
27109-			convex-c34)
27110-				basic_machine=c34-convex
27111-				basic_os=bsd
27112-				;;
27113-			convex-c38)
27114-				basic_machine=c38-convex
27115-				basic_os=bsd
27116-				;;
27117-			cray)
27118-				basic_machine=j90-cray
27119-				basic_os=unicos
27120-				;;
27121-			crds | unos)
27122-				basic_machine=m68k-crds
27123-				basic_os=
27124-				;;
27125-			da30)
27126-				basic_machine=m68k-da30
27127-				basic_os=
27128-				;;
27129-			decstation | pmax | pmin | dec3100 | decstatn)
27130-				basic_machine=mips-dec
27131-				basic_os=
27132-				;;
27133-			delta88)
27134-				basic_machine=m88k-motorola
27135-				basic_os=sysv3
27136-				;;
27137-			dicos)
27138-				basic_machine=i686-pc
27139-				basic_os=dicos
27140-				;;
27141-			djgpp)
27142-				basic_machine=i586-pc
27143-				basic_os=msdosdjgpp
27144-				;;
27145-			ebmon29k)
27146-				basic_machine=a29k-amd
27147-				basic_os=ebmon
27148-				;;
27149-			es1800 | OSE68k | ose68k | ose | OSE)
27150-				basic_machine=m68k-ericsson
27151-				basic_os=ose
27152-				;;
27153-			gmicro)
27154-				basic_machine=tron-gmicro
27155-				basic_os=sysv
27156-				;;
27157-			go32)
27158-				basic_machine=i386-pc
27159-				basic_os=go32
27160-				;;
27161-			h8300hms)
27162-				basic_machine=h8300-hitachi
27163-				basic_os=hms
27164-				;;
27165-			h8300xray)
27166-				basic_machine=h8300-hitachi
27167-				basic_os=xray
27168-				;;
27169-			h8500hms)
27170-				basic_machine=h8500-hitachi
27171-				basic_os=hms
27172-				;;
27173-			harris)
27174-				basic_machine=m88k-harris
27175-				basic_os=sysv3
27176-				;;
27177-			hp300 | hp300hpux)
27178-				basic_machine=m68k-hp
27179-				basic_os=hpux
27180-				;;
27181-			hp300bsd)
27182-				basic_machine=m68k-hp
27183-				basic_os=bsd
27184-				;;
27185-			hppaosf)
27186-				basic_machine=hppa1.1-hp
27187-				basic_os=osf
27188-				;;
27189-			hppro)
27190-				basic_machine=hppa1.1-hp
27191-				basic_os=proelf
27192-				;;
27193-			i386mach)
27194-				basic_machine=i386-mach
27195-				basic_os=mach
27196-				;;
27197-			isi68 | isi)
27198-				basic_machine=m68k-isi
27199-				basic_os=sysv
27200-				;;
27201-			m68knommu)
27202-				basic_machine=m68k-unknown
27203-				basic_os=linux
27204-				;;
27205-			magnum | m3230)
27206-				basic_machine=mips-mips
27207-				basic_os=sysv
27208-				;;
27209-			merlin)
27210-				basic_machine=ns32k-utek
27211-				basic_os=sysv
27212-				;;
27213-			mingw64)
27214-				basic_machine=x86_64-pc
27215-				basic_os=mingw64
27216-				;;
27217-			mingw32)
27218-				basic_machine=i686-pc
27219-				basic_os=mingw32
27220-				;;
27221-			mingw32ce)
27222-				basic_machine=arm-unknown
27223-				basic_os=mingw32ce
27224-				;;
27225-			monitor)
27226-				basic_machine=m68k-rom68k
27227-				basic_os=coff
27228-				;;
27229-			morphos)
27230-				basic_machine=powerpc-unknown
27231-				basic_os=morphos
27232-				;;
27233-			moxiebox)
27234-				basic_machine=moxie-unknown
27235-				basic_os=moxiebox
27236-				;;
27237-			msdos)
27238-				basic_machine=i386-pc
27239-				basic_os=msdos
27240-				;;
27241-			msys)
27242-				basic_machine=i686-pc
27243-				basic_os=msys
27244-				;;
27245-			mvs)
27246-				basic_machine=i370-ibm
27247-				basic_os=mvs
27248-				;;
27249-			nacl)
27250-				basic_machine=le32-unknown
27251-				basic_os=nacl
27252-				;;
27253-			ncr3000)
27254-				basic_machine=i486-ncr
27255-				basic_os=sysv4
27256-				;;
27257-			netbsd386)
27258-				basic_machine=i386-pc
27259-				basic_os=netbsd
27260-				;;
27261-			netwinder)
27262-				basic_machine=armv4l-rebel
27263-				basic_os=linux
27264-				;;
27265-			news | news700 | news800 | news900)
27266-				basic_machine=m68k-sony
27267-				basic_os=newsos
27268-				;;
27269-			news1000)
27270-				basic_machine=m68030-sony
27271-				basic_os=newsos
27272-				;;
27273-			necv70)
27274-				basic_machine=v70-nec
27275-				basic_os=sysv
27276-				;;
27277-			nh3000)
27278-				basic_machine=m68k-harris
27279-				basic_os=cxux
27280-				;;
27281-			nh[45]000)
27282-				basic_machine=m88k-harris
27283-				basic_os=cxux
27284-				;;
27285-			nindy960)
27286-				basic_machine=i960-intel
27287-				basic_os=nindy
27288-				;;
27289-			mon960)
27290-				basic_machine=i960-intel
27291-				basic_os=mon960
27292-				;;
27293-			nonstopux)
27294-				basic_machine=mips-compaq
27295-				basic_os=nonstopux
27296-				;;
27297-			os400)
27298-				basic_machine=powerpc-ibm
27299-				basic_os=os400
27300-				;;
27301-			OSE68000 | ose68000)
27302-				basic_machine=m68000-ericsson
27303-				basic_os=ose
27304-				;;
27305-			os68k)
27306-				basic_machine=m68k-none
27307-				basic_os=os68k
27308-				;;
27309-			paragon)
27310-				basic_machine=i860-intel
27311-				basic_os=osf
27312-				;;
27313-			parisc)
27314-				basic_machine=hppa-unknown
27315-				basic_os=linux
27316-				;;
27317-			psp)
27318-				basic_machine=mipsallegrexel-sony
27319-				basic_os=psp
27320-				;;
27321-			pw32)
27322-				basic_machine=i586-unknown
27323-				basic_os=pw32
27324-				;;
27325-			rdos | rdos64)
27326-				basic_machine=x86_64-pc
27327-				basic_os=rdos
27328-				;;
27329-			rdos32)
27330-				basic_machine=i386-pc
27331-				basic_os=rdos
27332-				;;
27333-			rom68k)
27334-				basic_machine=m68k-rom68k
27335-				basic_os=coff
27336-				;;
27337-			sa29200)
27338-				basic_machine=a29k-amd
27339-				basic_os=udi
27340-				;;
27341-			sei)
27342-				basic_machine=mips-sei
27343-				basic_os=seiux
27344-				;;
27345-			sequent)
27346-				basic_machine=i386-sequent
27347-				basic_os=
27348-				;;
27349-			sps7)
27350-				basic_machine=m68k-bull
27351-				basic_os=sysv2
27352-				;;
27353-			st2000)
27354-				basic_machine=m68k-tandem
27355-				basic_os=
27356-				;;
27357-			stratus)
27358-				basic_machine=i860-stratus
27359-				basic_os=sysv4
27360-				;;
27361-			sun2)
27362-				basic_machine=m68000-sun
27363-				basic_os=
27364-				;;
27365-			sun2os3)
27366-				basic_machine=m68000-sun
27367-				basic_os=sunos3
27368-				;;
27369-			sun2os4)
27370-				basic_machine=m68000-sun
27371-				basic_os=sunos4
27372-				;;
27373-			sun3)
27374-				basic_machine=m68k-sun
27375-				basic_os=
27376-				;;
27377-			sun3os3)
27378-				basic_machine=m68k-sun
27379-				basic_os=sunos3
27380-				;;
27381-			sun3os4)
27382-				basic_machine=m68k-sun
27383-				basic_os=sunos4
27384-				;;
27385-			sun4)
27386-				basic_machine=sparc-sun
27387-				basic_os=
27388-				;;
27389-			sun4os3)
27390-				basic_machine=sparc-sun
27391-				basic_os=sunos3
27392-				;;
27393-			sun4os4)
27394-				basic_machine=sparc-sun
27395-				basic_os=sunos4
27396-				;;
27397-			sun4sol2)
27398-				basic_machine=sparc-sun
27399-				basic_os=solaris2
27400-				;;
27401-			sun386 | sun386i | roadrunner)
27402-				basic_machine=i386-sun
27403-				basic_os=
27404-				;;
27405-			sv1)
27406-				basic_machine=sv1-cray
27407-				basic_os=unicos
27408-				;;
27409-			symmetry)
27410-				basic_machine=i386-sequent
27411-				basic_os=dynix
27412-				;;
27413-			t3e)
27414-				basic_machine=alphaev5-cray
27415-				basic_os=unicos
27416-				;;
27417-			t90)
27418-				basic_machine=t90-cray
27419-				basic_os=unicos
27420-				;;
27421-			toad1)
27422-				basic_machine=pdp10-xkl
27423-				basic_os=tops20
27424-				;;
27425-			tpf)
27426-				basic_machine=s390x-ibm
27427-				basic_os=tpf
27428-				;;
27429-			udi29k)
27430-				basic_machine=a29k-amd
27431-				basic_os=udi
27432-				;;
27433-			ultra3)
27434-				basic_machine=a29k-nyu
27435-				basic_os=sym1
27436-				;;
27437-			v810 | necv810)
27438-				basic_machine=v810-nec
27439-				basic_os=none
27440-				;;
27441-			vaxv)
27442-				basic_machine=vax-dec
27443-				basic_os=sysv
27444-				;;
27445-			vms)
27446-				basic_machine=vax-dec
27447-				basic_os=vms
27448-				;;
27449-			vsta)
27450-				basic_machine=i386-pc
27451-				basic_os=vsta
27452-				;;
27453-			vxworks960)
27454-				basic_machine=i960-wrs
27455-				basic_os=vxworks
27456-				;;
27457-			vxworks68)
27458-				basic_machine=m68k-wrs
27459-				basic_os=vxworks
27460-				;;
27461-			vxworks29k)
27462-				basic_machine=a29k-wrs
27463-				basic_os=vxworks
27464-				;;
27465-			xbox)
27466-				basic_machine=i686-pc
27467-				basic_os=mingw32
27468-				;;
27469-			ymp)
27470-				basic_machine=ymp-cray
27471-				basic_os=unicos
27472-				;;
27473-			*)
27474-				basic_machine=$1
27475-				basic_os=
27476-				;;
27477-		esac
27478-		;;
27479-esac
27480-
27481-# Decode 1-component or ad-hoc basic machines
27482-case $basic_machine in
27483-	# Here we handle the default manufacturer of certain CPU types.  It is in
27484-	# some cases the only manufacturer, in others, it is the most popular.
27485-	w89k)
27486-		cpu=hppa1.1
27487-		vendor=winbond
27488-		;;
27489-	op50n)
27490-		cpu=hppa1.1
27491-		vendor=oki
27492-		;;
27493-	op60c)
27494-		cpu=hppa1.1
27495-		vendor=oki
27496-		;;
27497-	ibm*)
27498-		cpu=i370
27499-		vendor=ibm
27500-		;;
27501-	orion105)
27502-		cpu=clipper
27503-		vendor=highlevel
27504-		;;
27505-	mac | mpw | mac-mpw)
27506-		cpu=m68k
27507-		vendor=apple
27508-		;;
27509-	pmac | pmac-mpw)
27510-		cpu=powerpc
27511-		vendor=apple
27512-		;;
27513-
27514-	# Recognize the various machine names and aliases which stand
27515-	# for a CPU type and a company and sometimes even an OS.
27516-	3b1 | 7300 | 7300-att | att-7300 | pc7300 | safari | unixpc)
27517-		cpu=m68000
27518-		vendor=att
27519-		;;
27520-	3b*)
27521-		cpu=we32k
27522-		vendor=att
27523-		;;
27524-	bluegene*)
27525-		cpu=powerpc
27526-		vendor=ibm
27527-		basic_os=cnk
27528-		;;
27529-	decsystem10* | dec10*)
27530-		cpu=pdp10
27531-		vendor=dec
27532-		basic_os=tops10
27533-		;;
27534-	decsystem20* | dec20*)
27535-		cpu=pdp10
27536-		vendor=dec
27537-		basic_os=tops20
27538-		;;
27539-	delta | 3300 | motorola-3300 | motorola-delta \
27540-	      | 3300-motorola | delta-motorola)
27541-		cpu=m68k
27542-		vendor=motorola
27543-		;;
27544-	dpx2*)
27545-		cpu=m68k
27546-		vendor=bull
27547-		basic_os=sysv3
27548-		;;
27549-	encore | umax | mmax)
27550-		cpu=ns32k
27551-		vendor=encore
27552-		;;
27553-	elxsi)
27554-		cpu=elxsi
27555-		vendor=elxsi
27556-		basic_os=${basic_os:-bsd}
27557-		;;
27558-	fx2800)
27559-		cpu=i860
27560-		vendor=alliant
27561-		;;
27562-	genix)
27563-		cpu=ns32k
27564-		vendor=ns
27565-		;;
27566-	h3050r* | hiux*)
27567-		cpu=hppa1.1
27568-		vendor=hitachi
27569-		basic_os=hiuxwe2
27570-		;;
27571-	hp3k9[0-9][0-9] | hp9[0-9][0-9])
27572-		cpu=hppa1.0
27573-		vendor=hp
27574-		;;
27575-	hp9k2[0-9][0-9] | hp9k31[0-9])
27576-		cpu=m68000
27577-		vendor=hp
27578-		;;
27579-	hp9k3[2-9][0-9])
27580-		cpu=m68k
27581-		vendor=hp
27582-		;;
27583-	hp9k6[0-9][0-9] | hp6[0-9][0-9])
27584-		cpu=hppa1.0
27585-		vendor=hp
27586-		;;
27587-	hp9k7[0-79][0-9] | hp7[0-79][0-9])
27588-		cpu=hppa1.1
27589-		vendor=hp
27590-		;;
27591-	hp9k78[0-9] | hp78[0-9])
27592-		# FIXME: really hppa2.0-hp
27593-		cpu=hppa1.1
27594-		vendor=hp
27595-		;;
27596-	hp9k8[67]1 | hp8[67]1 | hp9k80[24] | hp80[24] | hp9k8[78]9 | hp8[78]9 | hp9k893 | hp893)
27597-		# FIXME: really hppa2.0-hp
27598-		cpu=hppa1.1
27599-		vendor=hp
27600-		;;
27601-	hp9k8[0-9][13679] | hp8[0-9][13679])
27602-		cpu=hppa1.1
27603-		vendor=hp
27604-		;;
27605-	hp9k8[0-9][0-9] | hp8[0-9][0-9])
27606-		cpu=hppa1.0
27607-		vendor=hp
27608-		;;
27609-	i*86v32)
27610-		cpu=$(echo "$1" | sed -e 's/86.*/86/')
27611-		vendor=pc
27612-		basic_os=sysv32
27613-		;;
27614-	i*86v4*)
27615-		cpu=$(echo "$1" | sed -e 's/86.*/86/')
27616-		vendor=pc
27617-		basic_os=sysv4
27618-		;;
27619-	i*86v)
27620-		cpu=$(echo "$1" | sed -e 's/86.*/86/')
27621-		vendor=pc
27622-		basic_os=sysv
27623-		;;
27624-	i*86sol2)
27625-		cpu=$(echo "$1" | sed -e 's/86.*/86/')
27626-		vendor=pc
27627-		basic_os=solaris2
27628-		;;
27629-	j90 | j90-cray)
27630-		cpu=j90
27631-		vendor=cray
27632-		basic_os=${basic_os:-unicos}
27633-		;;
27634-	iris | iris4d)
27635-		cpu=mips
27636-		vendor=sgi
27637-		case $basic_os in
27638-		    irix*)
27639-			;;
27640-		    *)
27641-			basic_os=irix4
27642-			;;
27643-		esac
27644-		;;
27645-	miniframe)
27646-		cpu=m68000
27647-		vendor=convergent
27648-		;;
27649-	*mint | mint[0-9]* | *MiNT | *MiNT[0-9]*)
27650-		cpu=m68k
27651-		vendor=atari
27652-		basic_os=mint
27653-		;;
27654-	news-3600 | risc-news)
27655-		cpu=mips
27656-		vendor=sony
27657-		basic_os=newsos
27658-		;;
27659-	next | m*-next)
27660-		cpu=m68k
27661-		vendor=next
27662-		case $basic_os in
27663-		    openstep*)
27664-		        ;;
27665-		    nextstep*)
27666-			;;
27667-		    ns2*)
27668-		      basic_os=nextstep2
27669-			;;
27670-		    *)
27671-		      basic_os=nextstep3
27672-			;;
27673-		esac
27674-		;;
27675-	np1)
27676-		cpu=np1
27677-		vendor=gould
27678-		;;
27679-	op50n-* | op60c-*)
27680-		cpu=hppa1.1
27681-		vendor=oki
27682-		basic_os=proelf
27683-		;;
27684-	pa-hitachi)
27685-		cpu=hppa1.1
27686-		vendor=hitachi
27687-		basic_os=hiuxwe2
27688-		;;
27689-	pbd)
27690-		cpu=sparc
27691-		vendor=tti
27692-		;;
27693-	pbb)
27694-		cpu=m68k
27695-		vendor=tti
27696-		;;
27697-	pc532)
27698-		cpu=ns32k
27699-		vendor=pc532
27700-		;;
27701-	pn)
27702-		cpu=pn
27703-		vendor=gould
27704-		;;
27705-	power)
27706-		cpu=power
27707-		vendor=ibm
27708-		;;
27709-	ps2)
27710-		cpu=i386
27711-		vendor=ibm
27712-		;;
27713-	rm[46]00)
27714-		cpu=mips
27715-		vendor=siemens
27716-		;;
27717-	rtpc | rtpc-*)
27718-		cpu=romp
27719-		vendor=ibm
27720-		;;
27721-	sde)
27722-		cpu=mipsisa32
27723-		vendor=sde
27724-		basic_os=${basic_os:-elf}
27725-		;;
27726-	simso-wrs)
27727-		cpu=sparclite
27728-		vendor=wrs
27729-		basic_os=vxworks
27730-		;;
27731-	tower | tower-32)
27732-		cpu=m68k
27733-		vendor=ncr
27734-		;;
27735-	vpp*|vx|vx-*)
27736-		cpu=f301
27737-		vendor=fujitsu
27738-		;;
27739-	w65)
27740-		cpu=w65
27741-		vendor=wdc
27742-		;;
27743-	w89k-*)
27744-		cpu=hppa1.1
27745-		vendor=winbond
27746-		basic_os=proelf
27747-		;;
27748-	none)
27749-		cpu=none
27750-		vendor=none
27751-		;;
27752-	leon|leon[3-9])
27753-		cpu=sparc
27754-		vendor=$basic_machine
27755-		;;
27756-	leon-*|leon[3-9]-*)
27757-		cpu=sparc
27758-		vendor=$(echo "$basic_machine" | sed 's/-.*//')
27759-		;;
27760-
27761-	*-*)
27762-		# shellcheck disable=SC2162
27763-		IFS="-" read cpu vendor <<EOF
27764-$basic_machine
27765-EOF
27766-		;;
27767-	# We use `pc' rather than `unknown'
27768-	# because (1) that's what they normally are, and
27769-	# (2) the word "unknown" tends to confuse beginning users.
27770-	i*86 | x86_64)
27771-		cpu=$basic_machine
27772-		vendor=pc
27773-		;;
27774-	# These rules are duplicated from below for sake of the special case above;
27775-	# i.e. things that normalized to x86 arches should also default to "pc"
27776-	pc98)
27777-		cpu=i386
27778-		vendor=pc
27779-		;;
27780-	x64 | amd64)
27781-		cpu=x86_64
27782-		vendor=pc
27783-		;;
27784-	# Recognize the basic CPU types without company name.
27785-	*)
27786-		cpu=$basic_machine
27787-		vendor=unknown
27788-		;;
27789-esac
27790-
27791-unset -v basic_machine
27792-
27793-# Decode basic machines in the full and proper CPU-Company form.
27794-case $cpu-$vendor in
27795-	# Here we handle the default manufacturer of certain CPU types in canonical form. It is in
27796-	# some cases the only manufacturer, in others, it is the most popular.
27797-	craynv-unknown)
27798-		vendor=cray
27799-		basic_os=${basic_os:-unicosmp}
27800-		;;
27801-	c90-unknown | c90-cray)
27802-		vendor=cray
27803-		basic_os=${Basic_os:-unicos}
27804-		;;
27805-	fx80-unknown)
27806-		vendor=alliant
27807-		;;
27808-	romp-unknown)
27809-		vendor=ibm
27810-		;;
27811-	mmix-unknown)
27812-		vendor=knuth
27813-		;;
27814-	microblaze-unknown | microblazeel-unknown)
27815-		vendor=xilinx
27816-		;;
27817-	rs6000-unknown)
27818-		vendor=ibm
27819-		;;
27820-	vax-unknown)
27821-		vendor=dec
27822-		;;
27823-	pdp11-unknown)
27824-		vendor=dec
27825-		;;
27826-	we32k-unknown)
27827-		vendor=att
27828-		;;
27829-	cydra-unknown)
27830-		vendor=cydrome
27831-		;;
27832-	i370-ibm*)
27833-		vendor=ibm
27834-		;;
27835-	orion-unknown)
27836-		vendor=highlevel
27837-		;;
27838-	xps-unknown | xps100-unknown)
27839-		cpu=xps100
27840-		vendor=honeywell
27841-		;;
27842-
27843-	# Here we normalize CPU types with a missing or matching vendor
27844-	dpx20-unknown | dpx20-bull)
27845-		cpu=rs6000
27846-		vendor=bull
27847-		basic_os=${basic_os:-bosx}
27848-		;;
27849-
27850-	# Here we normalize CPU types irrespective of the vendor
27851-	amd64-*)
27852-		cpu=x86_64
27853-		;;
27854-	blackfin-*)
27855-		cpu=bfin
27856-		basic_os=linux
27857-		;;
27858-	c54x-*)
27859-		cpu=tic54x
27860-		;;
27861-	c55x-*)
27862-		cpu=tic55x
27863-		;;
27864-	c6x-*)
27865-		cpu=tic6x
27866-		;;
27867-	e500v[12]-*)
27868-		cpu=powerpc
27869-		basic_os=${basic_os}"spe"
27870-		;;
27871-	mips3*-*)
27872-		cpu=mips64
27873-		;;
27874-	ms1-*)
27875-		cpu=mt
27876-		;;
27877-	m68knommu-*)
27878-		cpu=m68k
27879-		basic_os=linux
27880-		;;
27881-	m9s12z-* | m68hcs12z-* | hcs12z-* | s12z-*)
27882-		cpu=s12z
27883-		;;
27884-	openrisc-*)
27885-		cpu=or32
27886-		;;
27887-	parisc-*)
27888-		cpu=hppa
27889-		basic_os=linux
27890-		;;
27891-	pentium-* | p5-* | k5-* | k6-* | nexgen-* | viac3-*)
27892-		cpu=i586
27893-		;;
27894-	pentiumpro-* | p6-* | 6x86-* | athlon-* | athalon_*-*)
27895-		cpu=i686
27896-		;;
27897-	pentiumii-* | pentium2-* | pentiumiii-* | pentium3-*)
27898-		cpu=i686
27899-		;;
27900-	pentium4-*)
27901-		cpu=i786
27902-		;;
27903-	pc98-*)
27904-		cpu=i386
27905-		;;
27906-	ppc-* | ppcbe-*)
27907-		cpu=powerpc
27908-		;;
27909-	ppcle-* | powerpclittle-*)
27910-		cpu=powerpcle
27911-		;;
27912-	ppc64-*)
27913-		cpu=powerpc64
27914-		;;
27915-	ppc64le-* | powerpc64little-*)
27916-		cpu=powerpc64le
27917-		;;
27918-	sb1-*)
27919-		cpu=mipsisa64sb1
27920-		;;
27921-	sb1el-*)
27922-		cpu=mipsisa64sb1el
27923-		;;
27924-	sh5e[lb]-*)
27925-		cpu=$(echo "$cpu" | sed 's/^\(sh.\)e\(.\)$/\1\2e/')
27926-		;;
27927-	spur-*)
27928-		cpu=spur
27929-		;;
27930-	strongarm-* | thumb-*)
27931-		cpu=arm
27932-		;;
27933-	tx39-*)
27934-		cpu=mipstx39
27935-		;;
27936-	tx39el-*)
27937-		cpu=mipstx39el
27938-		;;
27939-	x64-*)
27940-		cpu=x86_64
27941-		;;
27942-	xscale-* | xscalee[bl]-*)
27943-		cpu=$(echo "$cpu" | sed 's/^xscale/arm/')
27944-		;;
27945-	arm64-*)
27946-		cpu=aarch64
27947-		;;
27948-
27949-	# Recognize the canonical CPU Types that limit and/or modify the
27950-	# company names they are paired with.
27951-	cr16-*)
27952-		basic_os=${basic_os:-elf}
27953-		;;
27954-	crisv32-* | etraxfs*-*)
27955-		cpu=crisv32
27956-		vendor=axis
27957-		;;
27958-	cris-* | etrax*-*)
27959-		cpu=cris
27960-		vendor=axis
27961-		;;
27962-	crx-*)
27963-		basic_os=${basic_os:-elf}
27964-		;;
27965-	neo-tandem)
27966-		cpu=neo
27967-		vendor=tandem
27968-		;;
27969-	nse-tandem)
27970-		cpu=nse
27971-		vendor=tandem
27972-		;;
27973-	nsr-tandem)
27974-		cpu=nsr
27975-		vendor=tandem
27976-		;;
27977-	nsv-tandem)
27978-		cpu=nsv
27979-		vendor=tandem
27980-		;;
27981-	nsx-tandem)
27982-		cpu=nsx
27983-		vendor=tandem
27984-		;;
27985-	mipsallegrexel-sony)
27986-		cpu=mipsallegrexel
27987-		vendor=sony
27988-		;;
27989-	tile*-*)
27990-		basic_os=${basic_os:-linux-gnu}
27991-		;;
27992-
27993-	*)
27994-		# Recognize the canonical CPU types that are allowed with any
27995-		# company name.
27996-		case $cpu in
27997-			1750a | 580 \
27998-			| a29k \
27999-			| aarch64 | aarch64_be \
28000-			| abacus \
28001-			| alpha | alphaev[4-8] | alphaev56 | alphaev6[78] \
28002-			| alpha64 | alpha64ev[4-8] | alpha64ev56 | alpha64ev6[78] \
28003-			| alphapca5[67] | alpha64pca5[67] \
28004-			| am33_2.0 \
28005-			| amdgcn \
28006-			| arc | arceb \
28007-			| arm | arm[lb]e | arme[lb] | armv* \
28008-			| avr | avr32 \
28009-			| asmjs \
28010-			| ba \
28011-			| be32 | be64 \
28012-			| bfin | bpf | bs2000 \
28013-			| c[123]* | c30 | [cjt]90 | c4x \
28014-			| c8051 | clipper | craynv | csky | cydra \
28015-			| d10v | d30v | dlx | dsp16xx \
28016-			| e2k | elxsi | epiphany \
28017-			| f30[01] | f700 | fido | fr30 | frv | ft32 | fx80 \
28018-			| h8300 | h8500 \
28019-			| hppa | hppa1.[01] | hppa2.0 | hppa2.0[nw] | hppa64 \
28020-			| hexagon \
28021-			| i370 | i*86 | i860 | i960 | ia16 | ia64 \
28022-			| ip2k | iq2000 \
28023-			| k1om \
28024-			| le32 | le64 \
28025-			| lm32 \
28026-			| loongarch32 | loongarch64 | loongarchx32 \
28027-			| m32c | m32r | m32rle \
28028-			| m5200 | m68000 | m680[012346]0 | m68360 | m683?2 | m68k \
28029-			| m6811 | m68hc11 | m6812 | m68hc12 | m68hcs12x \
28030-			| m88110 | m88k | maxq | mb | mcore | mep | metag \
28031-			| microblaze | microblazeel \
28032-			| mips | mipsbe | mipseb | mipsel | mipsle \
28033-			| mips16 \
28034-			| mips64 | mips64eb | mips64el \
28035-			| mips64octeon | mips64octeonel \
28036-			| mips64orion | mips64orionel \
28037-			| mips64r5900 | mips64r5900el \
28038-			| mips64vr | mips64vrel \
28039-			| mips64vr4100 | mips64vr4100el \
28040-			| mips64vr4300 | mips64vr4300el \
28041-			| mips64vr5000 | mips64vr5000el \
28042-			| mips64vr5900 | mips64vr5900el \
28043-			| mipsisa32 | mipsisa32el \
28044-			| mipsisa32r2 | mipsisa32r2el \
28045-			| mipsisa32r6 | mipsisa32r6el \
28046-			| mipsisa64 | mipsisa64el \
28047-			| mipsisa64r2 | mipsisa64r2el \
28048-			| mipsisa64r6 | mipsisa64r6el \
28049-			| mipsisa64sb1 | mipsisa64sb1el \
28050-			| mipsisa64sr71k | mipsisa64sr71kel \
28051-			| mipsr5900 | mipsr5900el \
28052-			| mipstx39 | mipstx39el \
28053-			| mmix \
28054-			| mn10200 | mn10300 \
28055-			| moxie \
28056-			| mt \
28057-			| msp430 \
28058-			| nds32 | nds32le | nds32be \
28059-			| nfp \
28060-			| nios | nios2 | nios2eb | nios2el \
28061-			| none | np1 | ns16k | ns32k | nvptx \
28062-			| open8 \
28063-			| or1k* \
28064-			| or32 \
28065-			| orion \
28066-			| picochip \
28067-			| pdp10 | pdp11 | pj | pjl | pn | power \
28068-			| powerpc | powerpc64 | powerpc64le | powerpcle | powerpcspe \
28069-			| pru \
28070-			| pyramid \
28071-			| riscv | riscv32 | riscv32be | riscv64 | riscv64be \
28072-			| rl78 | romp | rs6000 | rx \
28073-			| s390 | s390x \
28074-			| score \
28075-			| sh | shl \
28076-			| sh[1234] | sh[24]a | sh[24]ae[lb] | sh[23]e | she[lb] | sh[lb]e \
28077-			| sh[1234]e[lb] |  sh[12345][lb]e | sh[23]ele | sh64 | sh64le \
28078-			| sparc | sparc64 | sparc64b | sparc64v | sparc86x | sparclet \
28079-			| sparclite \
28080-			| sparcv8 | sparcv9 | sparcv9b | sparcv9v | sv1 | sx* \
28081-			| spu \
28082-			| tahoe \
28083-			| thumbv7* \
28084-			| tic30 | tic4x | tic54x | tic55x | tic6x | tic80 \
28085-			| tron \
28086-			| ubicom32 \
28087-			| v70 | v850 | v850e | v850e1 | v850es | v850e2 | v850e2v3 \
28088-			| vax \
28089-			| visium \
28090-			| w65 \
28091-			| wasm32 | wasm64 \
28092-			| we32k \
28093-			| x86 | x86_64 | xc16x | xgate | xps100 \
28094-			| xstormy16 | xtensa* \
28095-			| ymp \
28096-			| z8k | z80)
28097-				;;
28098-
28099-			*)
28100-				echo Invalid configuration \`"$1"\': machine \`"$cpu-$vendor"\' not recognized 1>&2
28101-				exit 1
28102-				;;
28103-		esac
28104-		;;
28105-esac
28106-
28107-# Here we canonicalize certain aliases for manufacturers.
28108-case $vendor in
28109-	digital*)
28110-		vendor=dec
28111-		;;
28112-	commodore*)
28113-		vendor=cbm
28114-		;;
28115-	*)
28116-		;;
28117-esac
28118-
28119-# Decode manufacturer-specific aliases for certain operating systems.
28120-
28121-if test x$basic_os != x
28122-then
28123-
28124-# First recognize some ad-hoc caes, or perhaps split kernel-os, or else just
28125-# set os.
28126-case $basic_os in
28127-	gnu/linux*)
28128-		kernel=linux
28129-		os=$(echo $basic_os | sed -e 's|gnu/linux|gnu|')
28130-		;;
28131-	os2-emx)
28132-		kernel=os2
28133-		os=$(echo $basic_os | sed -e 's|os2-emx|emx|')
28134-		;;
28135-	nto-qnx*)
28136-		kernel=nto
28137-		os=$(echo $basic_os | sed -e 's|nto-qnx|qnx|')
28138-		;;
28139-	*-*)
28140-		# shellcheck disable=SC2162
28141-		IFS="-" read kernel os <<EOF
28142-$basic_os
28143-EOF
28144-		;;
28145-	# Default OS when just kernel was specified
28146-	nto*)
28147-		kernel=nto
28148-		os=$(echo $basic_os | sed -e 's|nto|qnx|')
28149-		;;
28150-	linux*)
28151-		kernel=linux
28152-		os=$(echo $basic_os | sed -e 's|linux|gnu|')
28153-		;;
28154-	*)
28155-		kernel=
28156-		os=$basic_os
28157-		;;
28158-esac
28159-
28160-# Now, normalize the OS (knowing we just have one component, it's not a kernel,
28161-# etc.)
28162-case $os in
28163-	# First match some system type aliases that might get confused
28164-	# with valid system types.
28165-	# solaris* is a basic system type, with this one exception.
28166-	auroraux)
28167-		os=auroraux
28168-		;;
28169-	bluegene*)
28170-		os=cnk
28171-		;;
28172-	solaris1 | solaris1.*)
28173-		os=$(echo $os | sed -e 's|solaris1|sunos4|')
28174-		;;
28175-	solaris)
28176-		os=solaris2
28177-		;;
28178-	unixware*)
28179-		os=sysv4.2uw
28180-		;;
28181-	# es1800 is here to avoid being matched by es* (a different OS)
28182-	es1800*)
28183-		os=ose
28184-		;;
28185-	# Some version numbers need modification
28186-	chorusos*)
28187-		os=chorusos
28188-		;;
28189-	isc)
28190-		os=isc2.2
28191-		;;
28192-	sco6)
28193-		os=sco5v6
28194-		;;
28195-	sco5)
28196-		os=sco3.2v5
28197-		;;
28198-	sco4)
28199-		os=sco3.2v4
28200-		;;
28201-	sco3.2.[4-9]*)
28202-		os=$(echo $os | sed -e 's/sco3.2./sco3.2v/')
28203-		;;
28204-	sco*v* | scout)
28205-		# Don't match below
28206-		;;
28207-	sco*)
28208-		os=sco3.2v2
28209-		;;
28210-	psos*)
28211-		os=psos
28212-		;;
28213-	qnx*)
28214-		os=qnx
28215-		;;
28216-	hiux*)
28217-		os=hiuxwe2
28218-		;;
28219-	lynx*178)
28220-		os=lynxos178
28221-		;;
28222-	lynx*5)
28223-		os=lynxos5
28224-		;;
28225-	lynxos*)
28226-		# don't get caught up in next wildcard
28227-		;;
28228-	lynx*)
28229-		os=lynxos
28230-		;;
28231-	mac[0-9]*)
28232-		os=$(echo "$os" | sed -e 's|mac|macos|')
28233-		;;
28234-	opened*)
28235-		os=openedition
28236-		;;
28237-	os400*)
28238-		os=os400
28239-		;;
28240-	sunos5*)
28241-		os=$(echo "$os" | sed -e 's|sunos5|solaris2|')
28242-		;;
28243-	sunos6*)
28244-		os=$(echo "$os" | sed -e 's|sunos6|solaris3|')
28245-		;;
28246-	wince*)
28247-		os=wince
28248-		;;
28249-	utek*)
28250-		os=bsd
28251-		;;
28252-	dynix*)
28253-		os=bsd
28254-		;;
28255-	acis*)
28256-		os=aos
28257-		;;
28258-	atheos*)
28259-		os=atheos
28260-		;;
28261-	syllable*)
28262-		os=syllable
28263-		;;
28264-	386bsd)
28265-		os=bsd
28266-		;;
28267-	ctix* | uts*)
28268-		os=sysv
28269-		;;
28270-	nova*)
28271-		os=rtmk-nova
28272-		;;
28273-	ns2)
28274-		os=nextstep2
28275-		;;
28276-	# Preserve the version number of sinix5.
28277-	sinix5.*)
28278-		os=$(echo $os | sed -e 's|sinix|sysv|')
28279-		;;
28280-	sinix*)
28281-		os=sysv4
28282-		;;
28283-	tpf*)
28284-		os=tpf
28285-		;;
28286-	triton*)
28287-		os=sysv3
28288-		;;
28289-	oss*)
28290-		os=sysv3
28291-		;;
28292-	svr4*)
28293-		os=sysv4
28294-		;;
28295-	svr3)
28296-		os=sysv3
28297-		;;
28298-	sysvr4)
28299-		os=sysv4
28300-		;;
28301-	ose*)
28302-		os=ose
28303-		;;
28304-	*mint | mint[0-9]* | *MiNT | MiNT[0-9]*)
28305-		os=mint
28306-		;;
28307-	dicos*)
28308-		os=dicos
28309-		;;
28310-	pikeos*)
28311-		# Until real need of OS specific support for
28312-		# particular features comes up, bare metal
28313-		# configurations are quite functional.
28314-		case $cpu in
28315-		    arm*)
28316-			os=eabi
28317-			;;
28318-		    *)
28319-			os=elf
28320-			;;
28321-		esac
28322-		;;
28323-	*)
28324-		# No normalization, but not necessarily accepted, that comes below.
28325-		;;
28326-esac
28327-
28328-else
28329-
28330-# Here we handle the default operating systems that come with various machines.
28331-# The value should be what the vendor currently ships out the door with their
28332-# machine or put another way, the most popular os provided with the machine.
28333-
28334-# Note that if you're going to try to match "-MANUFACTURER" here (say,
28335-# "-sun"), then you have to tell the case statement up towards the top
28336-# that MANUFACTURER isn't an operating system.  Otherwise, code above
28337-# will signal an error saying that MANUFACTURER isn't an operating
28338-# system, and we'll never get to this point.
28339-
28340-kernel=
28341-case $cpu-$vendor in
28342-	score-*)
28343-		os=elf
28344-		;;
28345-	spu-*)
28346-		os=elf
28347-		;;
28348-	*-acorn)
28349-		os=riscix1.2
28350-		;;
28351-	arm*-rebel)
28352-		kernel=linux
28353-		os=gnu
28354-		;;
28355-	arm*-semi)
28356-		os=aout
28357-		;;
28358-	c4x-* | tic4x-*)
28359-		os=coff
28360-		;;
28361-	c8051-*)
28362-		os=elf
28363-		;;
28364-	clipper-intergraph)
28365-		os=clix
28366-		;;
28367-	hexagon-*)
28368-		os=elf
28369-		;;
28370-	tic54x-*)
28371-		os=coff
28372-		;;
28373-	tic55x-*)
28374-		os=coff
28375-		;;
28376-	tic6x-*)
28377-		os=coff
28378-		;;
28379-	# This must come before the *-dec entry.
28380-	pdp10-*)
28381-		os=tops20
28382-		;;
28383-	pdp11-*)
28384-		os=none
28385-		;;
28386-	*-dec | vax-*)
28387-		os=ultrix4.2
28388-		;;
28389-	m68*-apollo)
28390-		os=domain
28391-		;;
28392-	i386-sun)
28393-		os=sunos4.0.2
28394-		;;
28395-	m68000-sun)
28396-		os=sunos3
28397-		;;
28398-	m68*-cisco)
28399-		os=aout
28400-		;;
28401-	mep-*)
28402-		os=elf
28403-		;;
28404-	mips*-cisco)
28405-		os=elf
28406-		;;
28407-	mips*-*)
28408-		os=elf
28409-		;;
28410-	or32-*)
28411-		os=coff
28412-		;;
28413-	*-tti)	# must be before sparc entry or we get the wrong os.
28414-		os=sysv3
28415-		;;
28416-	sparc-* | *-sun)
28417-		os=sunos4.1.1
28418-		;;
28419-	pru-*)
28420-		os=elf
28421-		;;
28422-	*-be)
28423-		os=beos
28424-		;;
28425-	*-ibm)
28426-		os=aix
28427-		;;
28428-	*-knuth)
28429-		os=mmixware
28430-		;;
28431-	*-wec)
28432-		os=proelf
28433-		;;
28434-	*-winbond)
28435-		os=proelf
28436-		;;
28437-	*-oki)
28438-		os=proelf
28439-		;;
28440-	*-hp)
28441-		os=hpux
28442-		;;
28443-	*-hitachi)
28444-		os=hiux
28445-		;;
28446-	i860-* | *-att | *-ncr | *-altos | *-motorola | *-convergent)
28447-		os=sysv
28448-		;;
28449-	*-cbm)
28450-		os=amigaos
28451-		;;
28452-	*-dg)
28453-		os=dgux
28454-		;;
28455-	*-dolphin)
28456-		os=sysv3
28457-		;;
28458-	m68k-ccur)
28459-		os=rtu
28460-		;;
28461-	m88k-omron*)
28462-		os=luna
28463-		;;
28464-	*-next)
28465-		os=nextstep
28466-		;;
28467-	*-sequent)
28468-		os=ptx
28469-		;;
28470-	*-crds)
28471-		os=unos
28472-		;;
28473-	*-ns)
28474-		os=genix
28475-		;;
28476-	i370-*)
28477-		os=mvs
28478-		;;
28479-	*-gould)
28480-		os=sysv
28481-		;;
28482-	*-highlevel)
28483-		os=bsd
28484-		;;
28485-	*-encore)
28486-		os=bsd
28487-		;;
28488-	*-sgi)
28489-		os=irix
28490-		;;
28491-	*-siemens)
28492-		os=sysv4
28493-		;;
28494-	*-masscomp)
28495-		os=rtu
28496-		;;
28497-	f30[01]-fujitsu | f700-fujitsu)
28498-		os=uxpv
28499-		;;
28500-	*-rom68k)
28501-		os=coff
28502-		;;
28503-	*-*bug)
28504-		os=coff
28505-		;;
28506-	*-apple)
28507-		os=macos
28508-		;;
28509-	*-atari*)
28510-		os=mint
28511-		;;
28512-	*-wrs)
28513-		os=vxworks
28514-		;;
28515-	*)
28516-		os=none
28517-		;;
28518-esac
28519-
28520-fi
28521-
28522-# Now, validate our (potentially fixed-up) OS.
28523-case $os in
28524-	# Sometimes we do "kernel-abi", so those need to count as OSes.
28525-	musl* | newlib* | uclibc*)
28526-		;;
28527-	# Likewise for "kernel-libc"
28528-	eabi* | gnueabi*)
28529-		;;
28530-	# Now accept the basic system types.
28531-	# The portable systems comes first.
28532-	# Each alternative MUST end in a * to match a version number.
28533-	gnu* | android* | bsd* | mach* | minix* | genix* | ultrix* | irix* \
28534-	     | *vms* | esix* | aix* | cnk* | sunos | sunos[34]* \
28535-	     | hpux* | unos* | osf* | luna* | dgux* | auroraux* | solaris* \
28536-	     | sym* |  plan9* | psp* | sim* | xray* | os68k* | v88r* \
28537-	     | hiux* | abug | nacl* | netware* | windows* \
28538-	     | os9* | macos* | osx* | ios* \
28539-	     | mpw* | magic* | mmixware* | mon960* | lnews* \
28540-	     | amigaos* | amigados* | msdos* | newsos* | unicos* | aof* \
28541-	     | aos* | aros* | cloudabi* | sortix* | twizzler* \
28542-	     | nindy* | vxsim* | vxworks* | ebmon* | hms* | mvs* \
28543-	     | clix* | riscos* | uniplus* | iris* | isc* | rtu* | xenix* \
28544-	     | mirbsd* | netbsd* | dicos* | openedition* | ose* \
28545-	     | bitrig* | openbsd* | solidbsd* | libertybsd* | os108* \
28546-	     | ekkobsd* | freebsd* | riscix* | lynxos* | os400* \
28547-	     | bosx* | nextstep* | cxux* | aout* | elf* | oabi* \
28548-	     | ptx* | coff* | ecoff* | winnt* | domain* | vsta* \
28549-	     | udi* | lites* | ieee* | go32* | aux* | hcos* \
28550-	     | chorusrdb* | cegcc* | glidix* \
28551-	     | cygwin* | msys* | pe* | moss* | proelf* | rtems* \
28552-	     | midipix* | mingw32* | mingw64* | mint* \
28553-	     | uxpv* | beos* | mpeix* | udk* | moxiebox* \
28554-	     | interix* | uwin* | mks* | rhapsody* | darwin* \
28555-	     | openstep* | oskit* | conix* | pw32* | nonstopux* \
28556-	     | storm-chaos* | tops10* | tenex* | tops20* | its* \
28557-	     | os2* | vos* | palmos* | uclinux* | nucleus* | morphos* \
28558-	     | scout* | superux* | sysv* | rtmk* | tpf* | windiss* \
28559-	     | powermax* | dnix* | nx6 | nx7 | sei* | dragonfly* \
28560-	     | skyos* | haiku* | rdos* | toppers* | drops* | es* \
28561-	     | onefs* | tirtos* | phoenix* | fuchsia* | redox* | bme* \
28562-	     | midnightbsd* | amdhsa* | unleashed* | emscripten* | wasi* \
28563-	     | nsk* | powerunix* | genode* | zvmoe* | qnx* | emx*)
28564-		;;
28565-	# This one is extra strict with allowed versions
28566-	sco3.2v2 | sco3.2v[4-9]* | sco5v6*)
28567-		# Don't forget version if it is 3.2v4 or newer.
28568-		;;
28569-	none)
28570-		;;
28571-	*)
28572-		echo Invalid configuration \`"$1"\': OS \`"$os"\' not recognized 1>&2
28573-		exit 1
28574-		;;
28575-esac
28576-
28577-# As a final step for OS-related things, validate the OS-kernel combination
28578-# (given a valid OS), if there is a kernel.
28579-case $kernel-$os in
28580-	linux-gnu* | linux-dietlibc* | linux-android* | linux-newlib* | linux-musl* | linux-uclibc* )
28581-		;;
28582-	uclinux-uclibc* )
28583-		;;
28584-	-dietlibc* | -newlib* | -musl* | -uclibc* )
28585-		# These are just libc implementations, not actual OSes, and thus
28586-		# require a kernel.
28587-		echo "Invalid configuration \`$1': libc \`$os' needs explicit kernel." 1>&2
28588-		exit 1
28589-		;;
28590-	kfreebsd*-gnu* | kopensolaris*-gnu*)
28591-		;;
28592-	nto-qnx*)
28593-		;;
28594-	os2-emx)
28595-		;;
28596-	*-eabi* | *-gnueabi*)
28597-		;;
28598-	-*)
28599-		# Blank kernel with real OS is always fine.
28600-		;;
28601-	*-*)
28602-		echo "Invalid configuration \`$1': Kernel \`$kernel' not known to work with OS \`$os'." 1>&2
28603-		exit 1
28604-		;;
28605-esac
28606-
28607-# Here we handle the case where we know the os, and the CPU type, but not the
28608-# manufacturer.  We pick the logical manufacturer.
28609-case $vendor in
28610-	unknown)
28611-		case $cpu-$os in
28612-			*-riscix*)
28613-				vendor=acorn
28614-				;;
28615-			*-sunos*)
28616-				vendor=sun
28617-				;;
28618-			*-cnk* | *-aix*)
28619-				vendor=ibm
28620-				;;
28621-			*-beos*)
28622-				vendor=be
28623-				;;
28624-			*-hpux*)
28625-				vendor=hp
28626-				;;
28627-			*-mpeix*)
28628-				vendor=hp
28629-				;;
28630-			*-hiux*)
28631-				vendor=hitachi
28632-				;;
28633-			*-unos*)
28634-				vendor=crds
28635-				;;
28636-			*-dgux*)
28637-				vendor=dg
28638-				;;
28639-			*-luna*)
28640-				vendor=omron
28641-				;;
28642-			*-genix*)
28643-				vendor=ns
28644-				;;
28645-			*-clix*)
28646-				vendor=intergraph
28647-				;;
28648-			*-mvs* | *-opened*)
28649-				vendor=ibm
28650-				;;
28651-			*-os400*)
28652-				vendor=ibm
28653-				;;
28654-			s390-* | s390x-*)
28655-				vendor=ibm
28656-				;;
28657-			*-ptx*)
28658-				vendor=sequent
28659-				;;
28660-			*-tpf*)
28661-				vendor=ibm
28662-				;;
28663-			*-vxsim* | *-vxworks* | *-windiss*)
28664-				vendor=wrs
28665-				;;
28666-			*-aux*)
28667-				vendor=apple
28668-				;;
28669-			*-hms*)
28670-				vendor=hitachi
28671-				;;
28672-			*-mpw* | *-macos*)
28673-				vendor=apple
28674-				;;
28675-			*-*mint | *-mint[0-9]* | *-*MiNT | *-MiNT[0-9]*)
28676-				vendor=atari
28677-				;;
28678-			*-vos*)
28679-				vendor=stratus
28680-				;;
28681-		esac
28682-		;;
28683-esac
28684-
28685-echo "$cpu-$vendor-${kernel:+$kernel-}$os"
28686-exit
28687-
28688-# Local variables:
28689-# eval: (add-hook 'before-save-hook 'time-stamp)
28690-# time-stamp-start: "timestamp='"
28691-# time-stamp-format: "%:y-%02m-%02d"
28692-# time-stamp-end: "'"
28693-# End:
28694diff --git a/jemalloc/build-aux/install-sh b/jemalloc/build-aux/install-sh
28695deleted file mode 100755
28696index ebc6691..0000000
28697--- a/jemalloc/build-aux/install-sh
28698+++ /dev/null
28699@@ -1,250 +0,0 @@
28700-#! /bin/sh
28701-#
28702-# install - install a program, script, or datafile
28703-# This comes from X11R5 (mit/util/scripts/install.sh).
28704-#
28705-# Copyright 1991 by the Massachusetts Institute of Technology
28706-#
28707-# Permission to use, copy, modify, distribute, and sell this software and its
28708-# documentation for any purpose is hereby granted without fee, provided that
28709-# the above copyright notice appear in all copies and that both that
28710-# copyright notice and this permission notice appear in supporting
28711-# documentation, and that the name of M.I.T. not be used in advertising or
28712-# publicity pertaining to distribution of the software without specific,
28713-# written prior permission.  M.I.T. makes no representations about the
28714-# suitability of this software for any purpose.  It is provided "as is"
28715-# without express or implied warranty.
28716-#
28717-# Calling this script install-sh is preferred over install.sh, to prevent
28718-# `make' implicit rules from creating a file called install from it
28719-# when there is no Makefile.
28720-#
28721-# This script is compatible with the BSD install script, but was written
28722-# from scratch.  It can only install one file at a time, a restriction
28723-# shared with many OS's install programs.
28724-
28725-
28726-# set DOITPROG to echo to test this script
28727-
28728-# Don't use :- since 4.3BSD and earlier shells don't like it.
28729-doit="${DOITPROG-}"
28730-
28731-
28732-# put in absolute paths if you don't have them in your path; or use env. vars.
28733-
28734-mvprog="${MVPROG-mv}"
28735-cpprog="${CPPROG-cp}"
28736-chmodprog="${CHMODPROG-chmod}"
28737-chownprog="${CHOWNPROG-chown}"
28738-chgrpprog="${CHGRPPROG-chgrp}"
28739-stripprog="${STRIPPROG-strip}"
28740-rmprog="${RMPROG-rm}"
28741-mkdirprog="${MKDIRPROG-mkdir}"
28742-
28743-transformbasename=""
28744-transform_arg=""
28745-instcmd="$mvprog"
28746-chmodcmd="$chmodprog 0755"
28747-chowncmd=""
28748-chgrpcmd=""
28749-stripcmd=""
28750-rmcmd="$rmprog -f"
28751-mvcmd="$mvprog"
28752-src=""
28753-dst=""
28754-dir_arg=""
28755-
28756-while [ x"$1" != x ]; do
28757-    case $1 in
28758-	-c) instcmd="$cpprog"
28759-	    shift
28760-	    continue;;
28761-
28762-	-d) dir_arg=true
28763-	    shift
28764-	    continue;;
28765-
28766-	-m) chmodcmd="$chmodprog $2"
28767-	    shift
28768-	    shift
28769-	    continue;;
28770-
28771-	-o) chowncmd="$chownprog $2"
28772-	    shift
28773-	    shift
28774-	    continue;;
28775-
28776-	-g) chgrpcmd="$chgrpprog $2"
28777-	    shift
28778-	    shift
28779-	    continue;;
28780-
28781-	-s) stripcmd="$stripprog"
28782-	    shift
28783-	    continue;;
28784-
28785-	-t=*) transformarg=`echo $1 | sed 's/-t=//'`
28786-	    shift
28787-	    continue;;
28788-
28789-	-b=*) transformbasename=`echo $1 | sed 's/-b=//'`
28790-	    shift
28791-	    continue;;
28792-
28793-	*)  if [ x"$src" = x ]
28794-	    then
28795-		src=$1
28796-	    else
28797-		# this colon is to work around a 386BSD /bin/sh bug
28798-		:
28799-		dst=$1
28800-	    fi
28801-	    shift
28802-	    continue;;
28803-    esac
28804-done
28805-
28806-if [ x"$src" = x ]
28807-then
28808-	echo "install:	no input file specified"
28809-	exit 1
28810-else
28811-	true
28812-fi
28813-
28814-if [ x"$dir_arg" != x ]; then
28815-	dst=$src
28816-	src=""
28817-
28818-	if [ -d $dst ]; then
28819-		instcmd=:
28820-	else
28821-		instcmd=mkdir
28822-	fi
28823-else
28824-
28825-# Waiting for this to be detected by the "$instcmd $src $dsttmp" command
28826-# might cause directories to be created, which would be especially bad
28827-# if $src (and thus $dsttmp) contains '*'.
28828-
28829-	if [ -f $src -o -d $src ]
28830-	then
28831-		true
28832-	else
28833-		echo "install:  $src does not exist"
28834-		exit 1
28835-	fi
28836-
28837-	if [ x"$dst" = x ]
28838-	then
28839-		echo "install:	no destination specified"
28840-		exit 1
28841-	else
28842-		true
28843-	fi
28844-
28845-# If destination is a directory, append the input filename; if your system
28846-# does not like double slashes in filenames, you may need to add some logic
28847-
28848-	if [ -d $dst ]
28849-	then
28850-		dst="$dst"/`basename $src`
28851-	else
28852-		true
28853-	fi
28854-fi
28855-
28856-## this sed command emulates the dirname command
28857-dstdir=`echo $dst | sed -e 's,[^/]*$,,;s,/$,,;s,^$,.,'`
28858-
28859-# Make sure that the destination directory exists.
28860-#  this part is taken from Noah Friedman's mkinstalldirs script
28861-
28862-# Skip lots of stat calls in the usual case.
28863-if [ ! -d "$dstdir" ]; then
28864-defaultIFS='
28865-'
28866-IFS="${IFS-${defaultIFS}}"
28867-
28868-oIFS="${IFS}"
28869-# Some sh's can't handle IFS=/ for some reason.
28870-IFS='%'
28871-set - `echo ${dstdir} | sed -e 's@/@%@g' -e 's@^%@/@'`
28872-IFS="${oIFS}"
28873-
28874-pathcomp=''
28875-
28876-while [ $# -ne 0 ] ; do
28877-	pathcomp="${pathcomp}${1}"
28878-	shift
28879-
28880-	if [ ! -d "${pathcomp}" ] ;
28881-        then
28882-		$mkdirprog "${pathcomp}"
28883-	else
28884-		true
28885-	fi
28886-
28887-	pathcomp="${pathcomp}/"
28888-done
28889-fi
28890-
28891-if [ x"$dir_arg" != x ]
28892-then
28893-	$doit $instcmd $dst &&
28894-
28895-	if [ x"$chowncmd" != x ]; then $doit $chowncmd $dst; else true ; fi &&
28896-	if [ x"$chgrpcmd" != x ]; then $doit $chgrpcmd $dst; else true ; fi &&
28897-	if [ x"$stripcmd" != x ]; then $doit $stripcmd $dst; else true ; fi &&
28898-	if [ x"$chmodcmd" != x ]; then $doit $chmodcmd $dst; else true ; fi
28899-else
28900-
28901-# If we're going to rename the final executable, determine the name now.
28902-
28903-	if [ x"$transformarg" = x ]
28904-	then
28905-		dstfile=`basename $dst`
28906-	else
28907-		dstfile=`basename $dst $transformbasename |
28908-			sed $transformarg`$transformbasename
28909-	fi
28910-
28911-# don't allow the sed command to completely eliminate the filename
28912-
28913-	if [ x"$dstfile" = x ]
28914-	then
28915-		dstfile=`basename $dst`
28916-	else
28917-		true
28918-	fi
28919-
28920-# Make a temp file name in the proper directory.
28921-
28922-	dsttmp=$dstdir/#inst.$$#
28923-
28924-# Move or copy the file name to the temp name
28925-
28926-	$doit $instcmd $src $dsttmp &&
28927-
28928-	trap "rm -f ${dsttmp}" 0 &&
28929-
28930-# and set any options; do chmod last to preserve setuid bits
28931-
28932-# If any of these fail, we abort the whole thing.  If we want to
28933-# ignore errors from any of these, just make sure not to ignore
28934-# errors from the above "$doit $instcmd $src $dsttmp" command.
28935-
28936-	if [ x"$chowncmd" != x ]; then $doit $chowncmd $dsttmp; else true;fi &&
28937-	if [ x"$chgrpcmd" != x ]; then $doit $chgrpcmd $dsttmp; else true;fi &&
28938-	if [ x"$stripcmd" != x ]; then $doit $stripcmd $dsttmp; else true;fi &&
28939-	if [ x"$chmodcmd" != x ]; then $doit $chmodcmd $dsttmp; else true;fi &&
28940-
28941-# Now rename the file to the real destination.
28942-
28943-	$doit $rmcmd -f $dstdir/$dstfile &&
28944-	$doit $mvcmd $dsttmp $dstdir/$dstfile
28945-
28946-fi &&
28947-
28948-
28949-exit 0
28950diff --git a/jemalloc/config.stamp.in b/jemalloc/config.stamp.in
28951deleted file mode 100644
28952index e69de29..0000000
28953diff --git a/jemalloc/configure.ac b/jemalloc/configure.ac
28954deleted file mode 100644
28955index f6d25f3..0000000
28956--- a/jemalloc/configure.ac
28957+++ /dev/null
28958@@ -1,2669 +0,0 @@
28959-dnl Process this file with autoconf to produce a configure script.
28960-AC_PREREQ(2.68)
28961-AC_INIT([Makefile.in])
28962-
28963-AC_CONFIG_AUX_DIR([build-aux])
28964-
28965-dnl ============================================================================
28966-dnl Custom macro definitions.
28967-
28968-dnl JE_CONCAT_VVV(r, a, b)
28969-dnl
28970-dnl Set $r to the concatenation of $a and $b, with a space separating them iff
28971-dnl both $a and $b are non-empty.
28972-AC_DEFUN([JE_CONCAT_VVV],
28973-if test "x[$]{$2}" = "x" -o "x[$]{$3}" = "x" ; then
28974-  $1="[$]{$2}[$]{$3}"
28975-else
28976-  $1="[$]{$2} [$]{$3}"
28977-fi
28978-)
28979-
28980-dnl JE_APPEND_VS(a, b)
28981-dnl
28982-dnl Set $a to the concatenation of $a and b, with a space separating them iff
28983-dnl both $a and b are non-empty.
28984-AC_DEFUN([JE_APPEND_VS],
28985-  T_APPEND_V=$2
28986-  JE_CONCAT_VVV($1, $1, T_APPEND_V)
28987-)
28988-
28989-CONFIGURE_CFLAGS=
28990-SPECIFIED_CFLAGS="${CFLAGS}"
28991-dnl JE_CFLAGS_ADD(cflag)
28992-dnl
28993-dnl CFLAGS is the concatenation of CONFIGURE_CFLAGS and SPECIFIED_CFLAGS
28994-dnl (ignoring EXTRA_CFLAGS, which does not impact configure tests.  This macro
28995-dnl appends to CONFIGURE_CFLAGS and regenerates CFLAGS.
28996-AC_DEFUN([JE_CFLAGS_ADD],
28997-[
28998-AC_MSG_CHECKING([whether compiler supports $1])
28999-T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}"
29000-JE_APPEND_VS(CONFIGURE_CFLAGS, $1)
29001-JE_CONCAT_VVV(CFLAGS, CONFIGURE_CFLAGS, SPECIFIED_CFLAGS)
29002-AC_COMPILE_IFELSE([AC_LANG_PROGRAM(
29003-[[
29004-]], [[
29005-    return 0;
29006-]])],
29007-              [je_cv_cflags_added=$1]
29008-              AC_MSG_RESULT([yes]),
29009-              [je_cv_cflags_added=]
29010-              AC_MSG_RESULT([no])
29011-              [CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}"]
29012-)
29013-JE_CONCAT_VVV(CFLAGS, CONFIGURE_CFLAGS, SPECIFIED_CFLAGS)
29014-])
29015-
29016-dnl JE_CFLAGS_SAVE()
29017-dnl JE_CFLAGS_RESTORE()
29018-dnl
29019-dnl Save/restore CFLAGS.  Nesting is not supported.
29020-AC_DEFUN([JE_CFLAGS_SAVE],
29021-SAVED_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}"
29022-)
29023-AC_DEFUN([JE_CFLAGS_RESTORE],
29024-CONFIGURE_CFLAGS="${SAVED_CONFIGURE_CFLAGS}"
29025-JE_CONCAT_VVV(CFLAGS, CONFIGURE_CFLAGS, SPECIFIED_CFLAGS)
29026-)
29027-
29028-CONFIGURE_CXXFLAGS=
29029-SPECIFIED_CXXFLAGS="${CXXFLAGS}"
29030-dnl JE_CXXFLAGS_ADD(cxxflag)
29031-AC_DEFUN([JE_CXXFLAGS_ADD],
29032-[
29033-AC_MSG_CHECKING([whether compiler supports $1])
29034-T_CONFIGURE_CXXFLAGS="${CONFIGURE_CXXFLAGS}"
29035-JE_APPEND_VS(CONFIGURE_CXXFLAGS, $1)
29036-JE_CONCAT_VVV(CXXFLAGS, CONFIGURE_CXXFLAGS, SPECIFIED_CXXFLAGS)
29037-AC_LANG_PUSH([C++])
29038-AC_COMPILE_IFELSE([AC_LANG_PROGRAM(
29039-[[
29040-]], [[
29041-    return 0;
29042-]])],
29043-              [je_cv_cxxflags_added=$1]
29044-              AC_MSG_RESULT([yes]),
29045-              [je_cv_cxxflags_added=]
29046-              AC_MSG_RESULT([no])
29047-              [CONFIGURE_CXXFLAGS="${T_CONFIGURE_CXXFLAGS}"]
29048-)
29049-AC_LANG_POP([C++])
29050-JE_CONCAT_VVV(CXXFLAGS, CONFIGURE_CXXFLAGS, SPECIFIED_CXXFLAGS)
29051-])
29052-
29053-dnl JE_COMPILABLE(label, hcode, mcode, rvar)
29054-dnl
29055-dnl Use AC_LINK_IFELSE() rather than AC_COMPILE_IFELSE() so that linker errors
29056-dnl cause failure.
29057-AC_DEFUN([JE_COMPILABLE],
29058-[
29059-AC_CACHE_CHECK([whether $1 is compilable],
29060-               [$4],
29061-               [AC_LINK_IFELSE([AC_LANG_PROGRAM([$2],
29062-                                                [$3])],
29063-                               [$4=yes],
29064-                               [$4=no])])
29065-])
29066-
29067-dnl ============================================================================
29068-
29069-CONFIG=`echo ${ac_configure_args} | sed -e 's#'"'"'\([^ ]*\)'"'"'#\1#g'`
29070-AC_SUBST([CONFIG])
29071-
29072-dnl Library revision.
29073-rev=2
29074-AC_SUBST([rev])
29075-
29076-srcroot=$srcdir
29077-if test "x${srcroot}" = "x." ; then
29078-  srcroot=""
29079-else
29080-  srcroot="${srcroot}/"
29081-fi
29082-AC_SUBST([srcroot])
29083-abs_srcroot="`cd \"${srcdir}\"; pwd`/"
29084-AC_SUBST([abs_srcroot])
29085-
29086-objroot=""
29087-AC_SUBST([objroot])
29088-abs_objroot="`pwd`/"
29089-AC_SUBST([abs_objroot])
29090-
29091-dnl Munge install path variables.
29092-case "$prefix" in
29093-   *\ * ) AC_MSG_ERROR([Prefix should not contain spaces]) ;;
29094-   "NONE" ) prefix="/usr/local" ;;
29095-esac
29096-case "$exec_prefix" in
29097-   *\ * ) AC_MSG_ERROR([Exec prefix should not contain spaces]) ;;
29098-   "NONE" ) exec_prefix=$prefix ;;
29099-esac
29100-PREFIX=$prefix
29101-AC_SUBST([PREFIX])
29102-BINDIR=`eval echo $bindir`
29103-BINDIR=`eval echo $BINDIR`
29104-AC_SUBST([BINDIR])
29105-INCLUDEDIR=`eval echo $includedir`
29106-INCLUDEDIR=`eval echo $INCLUDEDIR`
29107-AC_SUBST([INCLUDEDIR])
29108-LIBDIR=`eval echo $libdir`
29109-LIBDIR=`eval echo $LIBDIR`
29110-AC_SUBST([LIBDIR])
29111-DATADIR=`eval echo $datadir`
29112-DATADIR=`eval echo $DATADIR`
29113-AC_SUBST([DATADIR])
29114-MANDIR=`eval echo $mandir`
29115-MANDIR=`eval echo $MANDIR`
29116-AC_SUBST([MANDIR])
29117-
29118-dnl Support for building documentation.
29119-AC_PATH_PROG([XSLTPROC], [xsltproc], [false], [$PATH])
29120-if test -d "/usr/share/xml/docbook/stylesheet/docbook-xsl" ; then
29121-  DEFAULT_XSLROOT="/usr/share/xml/docbook/stylesheet/docbook-xsl"
29122-elif test -d "/usr/share/sgml/docbook/xsl-stylesheets" ; then
29123-  DEFAULT_XSLROOT="/usr/share/sgml/docbook/xsl-stylesheets"
29124-else
29125-  dnl Documentation building will fail if this default gets used.
29126-  DEFAULT_XSLROOT=""
29127-fi
29128-AC_ARG_WITH([xslroot],
29129-  [AS_HELP_STRING([--with-xslroot=<path>], [XSL stylesheet root path])], [
29130-if test "x$with_xslroot" = "xno" ; then
29131-  XSLROOT="${DEFAULT_XSLROOT}"
29132-else
29133-  XSLROOT="${with_xslroot}"
29134-fi
29135-],
29136-  XSLROOT="${DEFAULT_XSLROOT}"
29137-)
29138-if test "x$XSLTPROC" = "xfalse" ; then
29139-  XSLROOT=""
29140-fi
29141-AC_SUBST([XSLROOT])
29142-
29143-dnl If CFLAGS isn't defined, set CFLAGS to something reasonable.  Otherwise,
29144-dnl just prevent autoconf from molesting CFLAGS.
29145-CFLAGS=$CFLAGS
29146-AC_PROG_CC
29147-
29148-if test "x$GCC" != "xyes" ; then
29149-  AC_CACHE_CHECK([whether compiler is MSVC],
29150-                 [je_cv_msvc],
29151-                 [AC_COMPILE_IFELSE([AC_LANG_PROGRAM([],
29152-                                                     [
29153-#ifndef _MSC_VER
29154-  int fail[-1];
29155-#endif
29156-])],
29157-                               [je_cv_msvc=yes],
29158-                               [je_cv_msvc=no])])
29159-fi
29160-
29161-dnl check if a cray prgenv wrapper compiler is being used
29162-je_cv_cray_prgenv_wrapper=""
29163-if test "x${PE_ENV}" != "x" ; then
29164-  case "${CC}" in
29165-    CC|cc)
29166-	je_cv_cray_prgenv_wrapper="yes"
29167-	;;
29168-    *)
29169-       ;;
29170-  esac
29171-fi
29172-
29173-AC_CACHE_CHECK([whether compiler is cray],
29174-              [je_cv_cray],
29175-              [AC_COMPILE_IFELSE([AC_LANG_PROGRAM([],
29176-                                                  [
29177-#ifndef _CRAYC
29178-  int fail[-1];
29179-#endif
29180-])],
29181-                            [je_cv_cray=yes],
29182-                            [je_cv_cray=no])])
29183-
29184-if test "x${je_cv_cray}" = "xyes" ; then
29185-  AC_CACHE_CHECK([whether cray compiler version is 8.4],
29186-                [je_cv_cray_84],
29187-                [AC_COMPILE_IFELSE([AC_LANG_PROGRAM([],
29188-                                                      [
29189-#if !(_RELEASE_MAJOR == 8 && _RELEASE_MINOR == 4)
29190-  int fail[-1];
29191-#endif
29192-])],
29193-                              [je_cv_cray_84=yes],
29194-                              [je_cv_cray_84=no])])
29195-fi
29196-
29197-if test "x$GCC" = "xyes" ; then
29198-  JE_CFLAGS_ADD([-std=gnu11])
29199-  if test "x$je_cv_cflags_added" = "x-std=gnu11" ; then
29200-    AC_DEFINE_UNQUOTED([JEMALLOC_HAS_RESTRICT], [ ], [ ])
29201-  else
29202-    JE_CFLAGS_ADD([-std=gnu99])
29203-    if test "x$je_cv_cflags_added" = "x-std=gnu99" ; then
29204-      AC_DEFINE_UNQUOTED([JEMALLOC_HAS_RESTRICT], [ ], [ ])
29205-    fi
29206-  fi
29207-  JE_CFLAGS_ADD([-Werror=unknown-warning-option])
29208-  JE_CFLAGS_ADD([-Wall])
29209-  JE_CFLAGS_ADD([-Wextra])
29210-  JE_CFLAGS_ADD([-Wshorten-64-to-32])
29211-  JE_CFLAGS_ADD([-Wsign-compare])
29212-  JE_CFLAGS_ADD([-Wundef])
29213-  JE_CFLAGS_ADD([-Wno-format-zero-length])
29214-  JE_CFLAGS_ADD([-Wpointer-arith])
29215-  dnl This warning triggers on the use of the universal zero initializer, which
29216-  dnl is a very handy idiom for things like the tcache static initializer (which
29217-  dnl has lots of nested structs).  See the discussion at.
29218-  dnl https://gcc.gnu.org/bugzilla/show_bug.cgi?id=53119
29219-  JE_CFLAGS_ADD([-Wno-missing-braces])
29220-  dnl This one too.
29221-  JE_CFLAGS_ADD([-Wno-missing-field-initializers])
29222-  JE_CFLAGS_ADD([-Wno-missing-attributes])
29223-  JE_CFLAGS_ADD([-pipe])
29224-  JE_CFLAGS_ADD([-g3])
29225-elif test "x$je_cv_msvc" = "xyes" ; then
29226-  CC="$CC -nologo"
29227-  JE_CFLAGS_ADD([-Zi])
29228-  JE_CFLAGS_ADD([-MT])
29229-  JE_CFLAGS_ADD([-W3])
29230-  JE_CFLAGS_ADD([-FS])
29231-  JE_APPEND_VS(CPPFLAGS, -I${srcdir}/include/msvc_compat)
29232-fi
29233-if test "x$je_cv_cray" = "xyes" ; then
29234-  dnl cray compiler 8.4 has an inlining bug
29235-  if test "x$je_cv_cray_84" = "xyes" ; then
29236-    JE_CFLAGS_ADD([-hipa2])
29237-    JE_CFLAGS_ADD([-hnognu])
29238-  fi
29239-  dnl ignore unreachable code warning
29240-  JE_CFLAGS_ADD([-hnomessage=128])
29241-  dnl ignore redefinition of "malloc", "free", etc warning
29242-  JE_CFLAGS_ADD([-hnomessage=1357])
29243-fi
29244-AC_SUBST([CONFIGURE_CFLAGS])
29245-AC_SUBST([SPECIFIED_CFLAGS])
29246-AC_SUBST([EXTRA_CFLAGS])
29247-AC_PROG_CPP
29248-
29249-AC_ARG_ENABLE([cxx],
29250-  [AS_HELP_STRING([--disable-cxx], [Disable C++ integration])],
29251-if test "x$enable_cxx" = "xno" ; then
29252-  enable_cxx="0"
29253-else
29254-  enable_cxx="1"
29255-fi
29256-,
29257-enable_cxx="1"
29258-)
29259-if test "x$enable_cxx" = "x1" ; then
29260-  dnl Require at least c++14, which is the first version to support sized
29261-  dnl deallocation.  C++ support is not compiled otherwise.
29262-  m4_include([m4/ax_cxx_compile_stdcxx.m4])
29263-  AX_CXX_COMPILE_STDCXX([17], [noext], [optional])
29264-  if test "x${HAVE_CXX17}" != "x1"; then
29265-    AX_CXX_COMPILE_STDCXX([14], [noext], [optional])
29266-  fi
29267-  if test "x${HAVE_CXX14}" = "x1" -o "x${HAVE_CXX17}" = "x1"; then
29268-    JE_CXXFLAGS_ADD([-Wall])
29269-    JE_CXXFLAGS_ADD([-Wextra])
29270-    JE_CXXFLAGS_ADD([-g3])
29271-
29272-    SAVED_LIBS="${LIBS}"
29273-    JE_APPEND_VS(LIBS, -lstdc++)
29274-    JE_COMPILABLE([libstdc++ linkage], [
29275-#include <stdlib.h>
29276-], [[
29277-	int *arr = (int *)malloc(sizeof(int) * 42);
29278-	if (arr == NULL)
29279-		return 1;
29280-]], [je_cv_libstdcxx])
29281-    if test "x${je_cv_libstdcxx}" = "xno" ; then
29282-      LIBS="${SAVED_LIBS}"
29283-    fi
29284-  else
29285-    enable_cxx="0"
29286-  fi
29287-fi
29288-if test "x$enable_cxx" = "x1"; then
29289-  AC_DEFINE([JEMALLOC_ENABLE_CXX], [ ], [ ])
29290-fi
29291-AC_SUBST([enable_cxx])
29292-AC_SUBST([CONFIGURE_CXXFLAGS])
29293-AC_SUBST([SPECIFIED_CXXFLAGS])
29294-AC_SUBST([EXTRA_CXXFLAGS])
29295-
29296-AC_C_BIGENDIAN([ac_cv_big_endian=1], [ac_cv_big_endian=0])
29297-if test "x${ac_cv_big_endian}" = "x1" ; then
29298-  AC_DEFINE_UNQUOTED([JEMALLOC_BIG_ENDIAN], [ ], [ ])
29299-fi
29300-
29301-if test "x${je_cv_msvc}" = "xyes" -a "x${ac_cv_header_inttypes_h}" = "xno"; then
29302-  JE_APPEND_VS(CPPFLAGS, -I${srcdir}/include/msvc_compat/C99)
29303-fi
29304-
29305-if test "x${je_cv_msvc}" = "xyes" ; then
29306-  LG_SIZEOF_PTR=LG_SIZEOF_PTR_WIN
29307-  AC_MSG_RESULT([Using a predefined value for sizeof(void *): 4 for 32-bit, 8 for 64-bit])
29308-else
29309-  AC_CHECK_SIZEOF([void *])
29310-  if test "x${ac_cv_sizeof_void_p}" = "x8" ; then
29311-    LG_SIZEOF_PTR=3
29312-  elif test "x${ac_cv_sizeof_void_p}" = "x4" ; then
29313-    LG_SIZEOF_PTR=2
29314-  else
29315-    AC_MSG_ERROR([Unsupported pointer size: ${ac_cv_sizeof_void_p}])
29316-  fi
29317-fi
29318-AC_DEFINE_UNQUOTED([LG_SIZEOF_PTR], [$LG_SIZEOF_PTR], [ ])
29319-
29320-AC_CHECK_SIZEOF([int])
29321-if test "x${ac_cv_sizeof_int}" = "x8" ; then
29322-  LG_SIZEOF_INT=3
29323-elif test "x${ac_cv_sizeof_int}" = "x4" ; then
29324-  LG_SIZEOF_INT=2
29325-else
29326-  AC_MSG_ERROR([Unsupported int size: ${ac_cv_sizeof_int}])
29327-fi
29328-AC_DEFINE_UNQUOTED([LG_SIZEOF_INT], [$LG_SIZEOF_INT], [ ])
29329-
29330-AC_CHECK_SIZEOF([long])
29331-if test "x${ac_cv_sizeof_long}" = "x8" ; then
29332-  LG_SIZEOF_LONG=3
29333-elif test "x${ac_cv_sizeof_long}" = "x4" ; then
29334-  LG_SIZEOF_LONG=2
29335-else
29336-  AC_MSG_ERROR([Unsupported long size: ${ac_cv_sizeof_long}])
29337-fi
29338-AC_DEFINE_UNQUOTED([LG_SIZEOF_LONG], [$LG_SIZEOF_LONG], [ ])
29339-
29340-AC_CHECK_SIZEOF([long long])
29341-if test "x${ac_cv_sizeof_long_long}" = "x8" ; then
29342-  LG_SIZEOF_LONG_LONG=3
29343-elif test "x${ac_cv_sizeof_long_long}" = "x4" ; then
29344-  LG_SIZEOF_LONG_LONG=2
29345-else
29346-  AC_MSG_ERROR([Unsupported long long size: ${ac_cv_sizeof_long_long}])
29347-fi
29348-AC_DEFINE_UNQUOTED([LG_SIZEOF_LONG_LONG], [$LG_SIZEOF_LONG_LONG], [ ])
29349-
29350-AC_CHECK_SIZEOF([intmax_t])
29351-if test "x${ac_cv_sizeof_intmax_t}" = "x16" ; then
29352-  LG_SIZEOF_INTMAX_T=4
29353-elif test "x${ac_cv_sizeof_intmax_t}" = "x8" ; then
29354-  LG_SIZEOF_INTMAX_T=3
29355-elif test "x${ac_cv_sizeof_intmax_t}" = "x4" ; then
29356-  LG_SIZEOF_INTMAX_T=2
29357-else
29358-  AC_MSG_ERROR([Unsupported intmax_t size: ${ac_cv_sizeof_intmax_t}])
29359-fi
29360-AC_DEFINE_UNQUOTED([LG_SIZEOF_INTMAX_T], [$LG_SIZEOF_INTMAX_T], [ ])
29361-
29362-AC_CANONICAL_HOST
29363-dnl CPU-specific settings.
29364-CPU_SPINWAIT=""
29365-case "${host_cpu}" in
29366-  i686|x86_64)
29367-	HAVE_CPU_SPINWAIT=1
29368-	if test "x${je_cv_msvc}" = "xyes" ; then
29369-	    AC_CACHE_VAL([je_cv_pause_msvc],
29370-	      [JE_COMPILABLE([pause instruction MSVC], [],
29371-					[[_mm_pause(); return 0;]],
29372-					[je_cv_pause_msvc])])
29373-	    if test "x${je_cv_pause_msvc}" = "xyes" ; then
29374-		CPU_SPINWAIT='_mm_pause()'
29375-	    fi
29376-	else
29377-	    AC_CACHE_VAL([je_cv_pause],
29378-	      [JE_COMPILABLE([pause instruction], [],
29379-					[[__asm__ volatile("pause"); return 0;]],
29380-					[je_cv_pause])])
29381-	    if test "x${je_cv_pause}" = "xyes" ; then
29382-		CPU_SPINWAIT='__asm__ volatile("pause")'
29383-	    fi
29384-	fi
29385-	;;
29386-  aarch64|arm*)
29387-	HAVE_CPU_SPINWAIT=1
29388-	dnl isb is a better equivalent to the pause instruction on x86.
29389-	AC_CACHE_VAL([je_cv_isb],
29390-	  [JE_COMPILABLE([isb instruction], [],
29391-			[[__asm__ volatile("isb"); return 0;]],
29392-			[je_cv_isb])])
29393-	if test "x${je_cv_isb}" = "xyes" ; then
29394-	    CPU_SPINWAIT='__asm__ volatile("isb")'
29395-	fi
29396-	;;
29397-  *)
29398-	HAVE_CPU_SPINWAIT=0
29399-	;;
29400-esac
29401-AC_DEFINE_UNQUOTED([HAVE_CPU_SPINWAIT], [$HAVE_CPU_SPINWAIT], [ ])
29402-AC_DEFINE_UNQUOTED([CPU_SPINWAIT], [$CPU_SPINWAIT], [ ])
29403-
29404-AC_ARG_WITH([lg_vaddr],
29405-  [AS_HELP_STRING([--with-lg-vaddr=<lg-vaddr>], [Number of significant virtual address bits])],
29406-  [LG_VADDR="$with_lg_vaddr"], [LG_VADDR="detect"])
29407-
29408-case "${host_cpu}" in
29409-  aarch64)
29410-    if test "x$LG_VADDR" = "xdetect"; then
29411-      AC_MSG_CHECKING([number of significant virtual address bits])
29412-      if test "x${LG_SIZEOF_PTR}" = "x2" ; then
29413-        #aarch64 ILP32
29414-        LG_VADDR=32
29415-      else
29416-        #aarch64 LP64
29417-        LG_VADDR=48
29418-      fi
29419-      AC_MSG_RESULT([$LG_VADDR])
29420-    fi
29421-    ;;
29422-  x86_64)
29423-    if test "x$LG_VADDR" = "xdetect"; then
29424-      AC_CACHE_CHECK([number of significant virtual address bits],
29425-                     [je_cv_lg_vaddr],
29426-                     AC_RUN_IFELSE([AC_LANG_PROGRAM(
29427-[[
29428-#include <stdio.h>
29429-#ifdef _WIN32
29430-#include <limits.h>
29431-#include <intrin.h>
29432-typedef unsigned __int32 uint32_t;
29433-#else
29434-#include <stdint.h>
29435-#endif
29436-]], [[
29437-	uint32_t r[[4]];
29438-	uint32_t eax_in = 0x80000008U;
29439-#ifdef _WIN32
29440-	__cpuid((int *)r, (int)eax_in);
29441-#else
29442-	asm volatile ("cpuid"
29443-	    : "=a" (r[[0]]), "=b" (r[[1]]), "=c" (r[[2]]), "=d" (r[[3]])
29444-	    : "a" (eax_in), "c" (0)
29445-	);
29446-#endif
29447-	uint32_t eax_out = r[[0]];
29448-	uint32_t vaddr = ((eax_out & 0x0000ff00U) >> 8);
29449-	FILE *f = fopen("conftest.out", "w");
29450-	if (f == NULL) {
29451-		return 1;
29452-	}
29453-	if (vaddr > (sizeof(void *) << 3)) {
29454-		vaddr = sizeof(void *) << 3;
29455-	}
29456-	fprintf(f, "%u", vaddr);
29457-	fclose(f);
29458-	return 0;
29459-]])],
29460-                   [je_cv_lg_vaddr=`cat conftest.out`],
29461-                   [je_cv_lg_vaddr=error],
29462-                   [je_cv_lg_vaddr=57]))
29463-      if test "x${je_cv_lg_vaddr}" != "x" ; then
29464-        LG_VADDR="${je_cv_lg_vaddr}"
29465-      fi
29466-      if test "x${LG_VADDR}" != "xerror" ; then
29467-        AC_DEFINE_UNQUOTED([LG_VADDR], [$LG_VADDR], [ ])
29468-      else
29469-        AC_MSG_ERROR([cannot determine number of significant virtual address bits])
29470-      fi
29471-    fi
29472-    ;;
29473-  *)
29474-    if test "x$LG_VADDR" = "xdetect"; then
29475-      AC_MSG_CHECKING([number of significant virtual address bits])
29476-      if test "x${LG_SIZEOF_PTR}" = "x3" ; then
29477-        LG_VADDR=64
29478-      elif test "x${LG_SIZEOF_PTR}" = "x2" ; then
29479-        LG_VADDR=32
29480-      elif test "x${LG_SIZEOF_PTR}" = "xLG_SIZEOF_PTR_WIN" ; then
29481-        LG_VADDR="(1U << (LG_SIZEOF_PTR_WIN+3))"
29482-      else
29483-        AC_MSG_ERROR([Unsupported lg(pointer size): ${LG_SIZEOF_PTR}])
29484-      fi
29485-      AC_MSG_RESULT([$LG_VADDR])
29486-    fi
29487-    ;;
29488-esac
29489-AC_DEFINE_UNQUOTED([LG_VADDR], [$LG_VADDR], [ ])
29490-
29491-LD_PRELOAD_VAR="LD_PRELOAD"
29492-so="so"
29493-importlib="${so}"
29494-o="$ac_objext"
29495-a="a"
29496-exe="$ac_exeext"
29497-libprefix="lib"
29498-link_whole_archive="0"
29499-DSO_LDFLAGS='-shared -Wl,-soname,$(@F)'
29500-RPATH='-Wl,-rpath,$(1)'
29501-SOREV="${so}.${rev}"
29502-PIC_CFLAGS='-fPIC -DPIC'
29503-CTARGET='-o $@'
29504-LDTARGET='-o $@'
29505-TEST_LD_MODE=
29506-EXTRA_LDFLAGS=
29507-ARFLAGS='crus'
29508-AROUT=' $@'
29509-CC_MM=1
29510-
29511-if test "x$je_cv_cray_prgenv_wrapper" = "xyes" ; then
29512-  TEST_LD_MODE='-dynamic'
29513-fi
29514-
29515-if test "x${je_cv_cray}" = "xyes" ; then
29516-  CC_MM=
29517-fi
29518-
29519-AN_MAKEVAR([AR], [AC_PROG_AR])
29520-AN_PROGRAM([ar], [AC_PROG_AR])
29521-AC_DEFUN([AC_PROG_AR], [AC_CHECK_TOOL(AR, ar, :)])
29522-AC_PROG_AR
29523-
29524-AN_MAKEVAR([NM], [AC_PROG_NM])
29525-AN_PROGRAM([nm], [AC_PROG_NM])
29526-AC_DEFUN([AC_PROG_NM], [AC_CHECK_TOOL(NM, nm, :)])
29527-AC_PROG_NM
29528-
29529-AC_PROG_AWK
29530-
29531-dnl ============================================================================
29532-dnl jemalloc version.
29533-dnl
29534-
29535-AC_ARG_WITH([version],
29536-  [AS_HELP_STRING([--with-version=<major>.<minor>.<bugfix>-<nrev>-g<gid>],
29537-   [Version string])],
29538-  [
29539-    echo "${with_version}" | grep ['^[0-9]\+\.[0-9]\+\.[0-9]\+-[0-9]\+-g[0-9a-f]\+$'] 2>&1 1>/dev/null
29540-    if test $? -eq 0 ; then
29541-      echo "$with_version" > "${objroot}VERSION"
29542-    else
29543-      echo "${with_version}" | grep ['^VERSION$'] 2>&1 1>/dev/null
29544-      if test $? -ne 0 ; then
29545-        AC_MSG_ERROR([${with_version} does not match <major>.<minor>.<bugfix>-<nrev>-g<gid> or VERSION])
29546-      fi
29547-    fi
29548-  ], [
29549-    dnl Set VERSION if source directory is inside a git repository.
29550-    if test "x`test ! \"${srcroot}\" && cd \"${srcroot}\"; git rev-parse --is-inside-work-tree 2>/dev/null`" = "xtrue" ; then
29551-      dnl Pattern globs aren't powerful enough to match both single- and
29552-      dnl double-digit version numbers, so iterate over patterns to support up
29553-      dnl to version 99.99.99 without any accidental matches.
29554-      for pattern in ['[0-9].[0-9].[0-9]' '[0-9].[0-9].[0-9][0-9]' \
29555-                     '[0-9].[0-9][0-9].[0-9]' '[0-9].[0-9][0-9].[0-9][0-9]' \
29556-                     '[0-9][0-9].[0-9].[0-9]' '[0-9][0-9].[0-9].[0-9][0-9]' \
29557-                     '[0-9][0-9].[0-9][0-9].[0-9]' \
29558-                     '[0-9][0-9].[0-9][0-9].[0-9][0-9]']; do
29559-        (test ! "${srcroot}" && cd "${srcroot}"; git describe --long --abbrev=40 --match="${pattern}") > "${objroot}VERSION.tmp" 2>/dev/null
29560-        if test $? -eq 0 ; then
29561-          mv "${objroot}VERSION.tmp" "${objroot}VERSION"
29562-          break
29563-        fi
29564-      done
29565-    fi
29566-    rm -f "${objroot}VERSION.tmp"
29567-  ])
29568-
29569-if test ! -e "${objroot}VERSION" ; then
29570-  if test ! -e "${srcroot}VERSION" ; then
29571-    AC_MSG_RESULT(
29572-      [Missing VERSION file, and unable to generate it; creating bogus VERSION])
29573-    echo "0.0.0-0-g000000missing_version_try_git_fetch_tags" > "${objroot}VERSION"
29574-  else
29575-    cp ${srcroot}VERSION ${objroot}VERSION
29576-  fi
29577-fi
29578-jemalloc_version=`cat "${objroot}VERSION"`
29579-jemalloc_version_major=`echo ${jemalloc_version} | tr ".g-" " " | awk '{print [$]1}'`
29580-jemalloc_version_minor=`echo ${jemalloc_version} | tr ".g-" " " | awk '{print [$]2}'`
29581-jemalloc_version_bugfix=`echo ${jemalloc_version} | tr ".g-" " " | awk '{print [$]3}'`
29582-jemalloc_version_nrev=`echo ${jemalloc_version} | tr ".g-" " " | awk '{print [$]4}'`
29583-jemalloc_version_gid=`echo ${jemalloc_version} | tr ".g-" " " | awk '{print [$]5}'`
29584-AC_SUBST([jemalloc_version])
29585-AC_SUBST([jemalloc_version_major])
29586-AC_SUBST([jemalloc_version_minor])
29587-AC_SUBST([jemalloc_version_bugfix])
29588-AC_SUBST([jemalloc_version_nrev])
29589-AC_SUBST([jemalloc_version_gid])
29590-
29591-dnl Platform-specific settings.  abi and RPATH can probably be determined
29592-dnl programmatically, but doing so is error-prone, which makes it generally
29593-dnl not worth the trouble.
29594-dnl
29595-dnl Define cpp macros in CPPFLAGS, rather than doing AC_DEFINE(macro), since the
29596-dnl definitions need to be seen before any headers are included, which is a pain
29597-dnl to make happen otherwise.
29598-default_retain="0"
29599-zero_realloc_default_free="0"
29600-maps_coalesce="1"
29601-DUMP_SYMS="${NM} -a"
29602-SYM_PREFIX=""
29603-case "${host}" in
29604-  *-*-darwin* | *-*-ios*)
29605-	abi="macho"
29606-	RPATH=""
29607-	LD_PRELOAD_VAR="DYLD_INSERT_LIBRARIES"
29608-	so="dylib"
29609-	importlib="${so}"
29610-	force_tls="0"
29611-	DSO_LDFLAGS='-shared -Wl,-install_name,$(LIBDIR)/$(@F)'
29612-	SOREV="${rev}.${so}"
29613-	sbrk_deprecated="1"
29614-	SYM_PREFIX="_"
29615-	;;
29616-  *-*-freebsd*)
29617-	JE_APPEND_VS(CPPFLAGS, -D_BSD_SOURCE)
29618-	abi="elf"
29619-	AC_DEFINE([JEMALLOC_SYSCTL_VM_OVERCOMMIT], [ ], [ ])
29620-	force_lazy_lock="1"
29621-	;;
29622-  *-*-dragonfly*)
29623-	abi="elf"
29624-	;;
29625-  *-*-openbsd*)
29626-	abi="elf"
29627-	force_tls="0"
29628-	;;
29629-  *-*-bitrig*)
29630-	abi="elf"
29631-	;;
29632-  *-*-linux-android*)
29633-	dnl syscall(2) and secure_getenv(3) are exposed by _GNU_SOURCE.
29634-	JE_APPEND_VS(CPPFLAGS, -D_GNU_SOURCE)
29635-	abi="elf"
29636-	glibc="0"
29637-	AC_DEFINE([JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS], [ ], [ ])
29638-	AC_DEFINE([JEMALLOC_HAS_ALLOCA_H], [ ], [ ])
29639-	AC_DEFINE([JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY], [ ], [ ])
29640-	AC_DEFINE([JEMALLOC_THREADED_INIT], [ ], [ ])
29641-	AC_DEFINE([JEMALLOC_C11_ATOMICS], [ ], [ ])
29642-	force_tls="0"
29643-	if test "${LG_SIZEOF_PTR}" = "3"; then
29644-	  default_retain="1"
29645-	fi
29646-	zero_realloc_default_free="1"
29647-	;;
29648-  *-*-linux*)
29649-	dnl syscall(2) and secure_getenv(3) are exposed by _GNU_SOURCE.
29650-	JE_APPEND_VS(CPPFLAGS, -D_GNU_SOURCE)
29651-	abi="elf"
29652-	glibc="1"
29653-	AC_DEFINE([JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS], [ ], [ ])
29654-	AC_DEFINE([JEMALLOC_HAS_ALLOCA_H], [ ], [ ])
29655-	AC_DEFINE([JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY], [ ], [ ])
29656-	AC_DEFINE([JEMALLOC_THREADED_INIT], [ ], [ ])
29657-	AC_DEFINE([JEMALLOC_USE_CXX_THROW], [ ], [ ])
29658-	if test "${LG_SIZEOF_PTR}" = "3"; then
29659-	  default_retain="1"
29660-	fi
29661-	zero_realloc_default_free="1"
29662-	;;
29663-  *-*-kfreebsd*)
29664-	dnl syscall(2) and secure_getenv(3) are exposed by _GNU_SOURCE.
29665-	JE_APPEND_VS(CPPFLAGS, -D_GNU_SOURCE)
29666-	abi="elf"
29667-	AC_DEFINE([JEMALLOC_HAS_ALLOCA_H], [ ], [ ])
29668-	AC_DEFINE([JEMALLOC_SYSCTL_VM_OVERCOMMIT], [ ], [ ])
29669-	AC_DEFINE([JEMALLOC_THREADED_INIT], [ ], [ ])
29670-	AC_DEFINE([JEMALLOC_USE_CXX_THROW], [ ], [ ])
29671-	;;
29672-  *-*-netbsd*)
29673-	AC_MSG_CHECKING([ABI])
29674-        AC_COMPILE_IFELSE([AC_LANG_PROGRAM(
29675-[[#ifdef __ELF__
29676-/* ELF */
29677-#else
29678-#error aout
29679-#endif
29680-]])],
29681-                          [abi="elf"],
29682-                          [abi="aout"])
29683-	AC_MSG_RESULT([$abi])
29684-	;;
29685-  *-*-solaris2*)
29686-	abi="elf"
29687-	RPATH='-Wl,-R,$(1)'
29688-	dnl Solaris needs this for sigwait().
29689-	JE_APPEND_VS(CPPFLAGS, -D_POSIX_PTHREAD_SEMANTICS)
29690-	JE_APPEND_VS(LIBS, -lposix4 -lsocket -lnsl)
29691-	;;
29692-  *-ibm-aix*)
29693-	if test "${LG_SIZEOF_PTR}" = "3"; then
29694-	  dnl 64bit AIX
29695-	  LD_PRELOAD_VAR="LDR_PRELOAD64"
29696-	else
29697-	  dnl 32bit AIX
29698-	  LD_PRELOAD_VAR="LDR_PRELOAD"
29699-	fi
29700-	abi="xcoff"
29701-	;;
29702-  *-*-mingw* | *-*-cygwin*)
29703-	abi="pecoff"
29704-	force_tls="0"
29705-	maps_coalesce="0"
29706-	RPATH=""
29707-	so="dll"
29708-	if test "x$je_cv_msvc" = "xyes" ; then
29709-	  importlib="lib"
29710-	  DSO_LDFLAGS="-LD"
29711-	  EXTRA_LDFLAGS="-link -DEBUG"
29712-	  CTARGET='-Fo$@'
29713-	  LDTARGET='-Fe$@'
29714-	  AR='lib'
29715-	  ARFLAGS='-nologo -out:'
29716-	  AROUT='$@'
29717-	  CC_MM=
29718-        else
29719-	  importlib="${so}"
29720-	  DSO_LDFLAGS="-shared"
29721-	  link_whole_archive="1"
29722-	fi
29723-	case "${host}" in
29724-	  *-*-cygwin*)
29725-	    DUMP_SYMS="dumpbin /SYMBOLS"
29726-	    ;;
29727-	  *)
29728-	    ;;
29729-	esac
29730-	a="lib"
29731-	libprefix=""
29732-	SOREV="${so}"
29733-	PIC_CFLAGS=""
29734-	if test "${LG_SIZEOF_PTR}" = "3"; then
29735-	  default_retain="1"
29736-	fi
29737-	zero_realloc_default_free="1"
29738-	;;
29739-  *-*-nto-qnx)
29740-	abi="elf"
29741-  force_tls="0"
29742-  AC_DEFINE([JEMALLOC_HAS_ALLOCA_H], [ ], [ ])
29743-	;;
29744-  *)
29745-	AC_MSG_RESULT([Unsupported operating system: ${host}])
29746-	abi="elf"
29747-	;;
29748-esac
29749-
29750-JEMALLOC_USABLE_SIZE_CONST=const
29751-AC_CHECK_HEADERS([malloc.h], [
29752-  AC_MSG_CHECKING([whether malloc_usable_size definition can use const argument])
29753-  AC_COMPILE_IFELSE([AC_LANG_PROGRAM(
29754-    [#include <malloc.h>
29755-     #include <stddef.h>
29756-    size_t malloc_usable_size(const void *ptr);
29757-    ],
29758-    [])],[
29759-                AC_MSG_RESULT([yes])
29760-         ],[
29761-                JEMALLOC_USABLE_SIZE_CONST=
29762-                AC_MSG_RESULT([no])
29763-         ])
29764-])
29765-AC_DEFINE_UNQUOTED([JEMALLOC_USABLE_SIZE_CONST], [$JEMALLOC_USABLE_SIZE_CONST], [ ])
29766-AC_SUBST([abi])
29767-AC_SUBST([RPATH])
29768-AC_SUBST([LD_PRELOAD_VAR])
29769-AC_SUBST([so])
29770-AC_SUBST([importlib])
29771-AC_SUBST([o])
29772-AC_SUBST([a])
29773-AC_SUBST([exe])
29774-AC_SUBST([libprefix])
29775-AC_SUBST([link_whole_archive])
29776-AC_SUBST([DSO_LDFLAGS])
29777-AC_SUBST([EXTRA_LDFLAGS])
29778-AC_SUBST([SOREV])
29779-AC_SUBST([PIC_CFLAGS])
29780-AC_SUBST([CTARGET])
29781-AC_SUBST([LDTARGET])
29782-AC_SUBST([TEST_LD_MODE])
29783-AC_SUBST([MKLIB])
29784-AC_SUBST([ARFLAGS])
29785-AC_SUBST([AROUT])
29786-AC_SUBST([DUMP_SYMS])
29787-AC_SUBST([CC_MM])
29788-
29789-dnl Determine whether libm must be linked to use e.g. log(3).
29790-AC_SEARCH_LIBS([log], [m], , [AC_MSG_ERROR([Missing math functions])])
29791-if test "x$ac_cv_search_log" != "xnone required" ; then
29792-  LM="$ac_cv_search_log"
29793-else
29794-  LM=
29795-fi
29796-AC_SUBST(LM)
29797-
29798-JE_COMPILABLE([__attribute__ syntax],
29799-              [static __attribute__((unused)) void foo(void){}],
29800-              [],
29801-              [je_cv_attribute])
29802-if test "x${je_cv_attribute}" = "xyes" ; then
29803-  AC_DEFINE([JEMALLOC_HAVE_ATTR], [ ], [ ])
29804-  if test "x${GCC}" = "xyes" -a "x${abi}" = "xelf"; then
29805-    JE_CFLAGS_ADD([-fvisibility=hidden])
29806-    JE_CXXFLAGS_ADD([-fvisibility=hidden])
29807-  fi
29808-fi
29809-dnl Check for tls_model attribute support (clang 3.0 still lacks support).
29810-JE_CFLAGS_SAVE()
29811-JE_CFLAGS_ADD([-Werror])
29812-JE_CFLAGS_ADD([-herror_on_warning])
29813-JE_COMPILABLE([tls_model attribute], [],
29814-              [static __thread int
29815-               __attribute__((tls_model("initial-exec"), unused)) foo;
29816-               foo = 0;],
29817-              [je_cv_tls_model])
29818-JE_CFLAGS_RESTORE()
29819-dnl (Setting of JEMALLOC_TLS_MODEL is done later, after we've checked for
29820-dnl --disable-initial-exec-tls)
29821-
29822-dnl Check for alloc_size attribute support.
29823-JE_CFLAGS_SAVE()
29824-JE_CFLAGS_ADD([-Werror])
29825-JE_CFLAGS_ADD([-herror_on_warning])
29826-JE_COMPILABLE([alloc_size attribute], [#include <stdlib.h>],
29827-              [void *foo(size_t size) __attribute__((alloc_size(1)));],
29828-              [je_cv_alloc_size])
29829-JE_CFLAGS_RESTORE()
29830-if test "x${je_cv_alloc_size}" = "xyes" ; then
29831-  AC_DEFINE([JEMALLOC_HAVE_ATTR_ALLOC_SIZE], [ ], [ ])
29832-fi
29833-dnl Check for format(gnu_printf, ...) attribute support.
29834-JE_CFLAGS_SAVE()
29835-JE_CFLAGS_ADD([-Werror])
29836-JE_CFLAGS_ADD([-herror_on_warning])
29837-JE_COMPILABLE([format(gnu_printf, ...) attribute], [#include <stdlib.h>],
29838-              [void *foo(const char *format, ...) __attribute__((format(gnu_printf, 1, 2)));],
29839-              [je_cv_format_gnu_printf])
29840-JE_CFLAGS_RESTORE()
29841-if test "x${je_cv_format_gnu_printf}" = "xyes" ; then
29842-  AC_DEFINE([JEMALLOC_HAVE_ATTR_FORMAT_GNU_PRINTF], [ ], [ ])
29843-fi
29844-dnl Check for format(printf, ...) attribute support.
29845-JE_CFLAGS_SAVE()
29846-JE_CFLAGS_ADD([-Werror])
29847-JE_CFLAGS_ADD([-herror_on_warning])
29848-JE_COMPILABLE([format(printf, ...) attribute], [#include <stdlib.h>],
29849-              [void *foo(const char *format, ...) __attribute__((format(printf, 1, 2)));],
29850-              [je_cv_format_printf])
29851-JE_CFLAGS_RESTORE()
29852-if test "x${je_cv_format_printf}" = "xyes" ; then
29853-  AC_DEFINE([JEMALLOC_HAVE_ATTR_FORMAT_PRINTF], [ ], [ ])
29854-fi
29855-
29856-dnl Check for format_arg(...) attribute support.
29857-JE_CFLAGS_SAVE()
29858-JE_CFLAGS_ADD([-Werror])
29859-JE_CFLAGS_ADD([-herror_on_warning])
29860-JE_COMPILABLE([format(printf, ...) attribute], [#include <stdlib.h>],
29861-              [const char * __attribute__((__format_arg__(1))) foo(const char *format);],
29862-              [je_cv_format_arg])
29863-JE_CFLAGS_RESTORE()
29864-if test "x${je_cv_format_arg}" = "xyes" ; then
29865-  AC_DEFINE([JEMALLOC_HAVE_ATTR_FORMAT_ARG], [ ], [ ])
29866-fi
29867-
29868-dnl Check for fallthrough attribute support.
29869-JE_CFLAGS_SAVE()
29870-JE_CFLAGS_ADD([-Wimplicit-fallthrough])
29871-JE_COMPILABLE([fallthrough attribute],
29872-              [#if !__has_attribute(fallthrough)
29873-               #error "foo"
29874-               #endif],
29875-              [int x = 0;
29876-               switch (x) {
29877-               case 0: __attribute__((__fallthrough__));
29878-               case 1: return 1;
29879-               }],
29880-              [je_cv_fallthrough])
29881-JE_CFLAGS_RESTORE()
29882-if test "x${je_cv_fallthrough}" = "xyes" ; then
29883-  AC_DEFINE([JEMALLOC_HAVE_ATTR_FALLTHROUGH], [ ], [ ])
29884-  JE_CFLAGS_ADD([-Wimplicit-fallthrough])
29885-  JE_CXXFLAGS_ADD([-Wimplicit-fallthrough])
29886-fi
29887-
29888-dnl Check for cold attribute support.
29889-JE_CFLAGS_SAVE()
29890-JE_CFLAGS_ADD([-Werror])
29891-JE_CFLAGS_ADD([-herror_on_warning])
29892-JE_COMPILABLE([cold attribute], [],
29893-              [__attribute__((__cold__)) void foo();],
29894-              [je_cv_cold])
29895-JE_CFLAGS_RESTORE()
29896-if test "x${je_cv_cold}" = "xyes" ; then
29897-  AC_DEFINE([JEMALLOC_HAVE_ATTR_COLD], [ ], [ ])
29898-fi
29899-
29900-dnl Check for VM_MAKE_TAG for mmap support.
29901-JE_COMPILABLE([vm_make_tag],
29902-	      [#include <sys/mman.h>
29903-	       #include <mach/vm_statistics.h>],
29904-	      [void *p;
29905-	       p = mmap(0, 16, PROT_READ, MAP_ANON|MAP_PRIVATE, VM_MAKE_TAG(1), 0);
29906-	       munmap(p, 16);],
29907-	      [je_cv_vm_make_tag])
29908-if test "x${je_cv_vm_make_tag}" = "xyes" ; then
29909-  AC_DEFINE([JEMALLOC_HAVE_VM_MAKE_TAG], [ ], [ ])
29910-fi
29911-
29912-dnl Support optional additions to rpath.
29913-AC_ARG_WITH([rpath],
29914-  [AS_HELP_STRING([--with-rpath=<rpath>], [Colon-separated rpath (ELF systems only)])],
29915-if test "x$with_rpath" = "xno" ; then
29916-  RPATH_EXTRA=
29917-else
29918-  RPATH_EXTRA="`echo $with_rpath | tr \":\" \" \"`"
29919-fi,
29920-  RPATH_EXTRA=
29921-)
29922-AC_SUBST([RPATH_EXTRA])
29923-
29924-dnl Disable rules that do automatic regeneration of configure output by default.
29925-AC_ARG_ENABLE([autogen],
29926-  [AS_HELP_STRING([--enable-autogen], [Automatically regenerate configure output])],
29927-if test "x$enable_autogen" = "xno" ; then
29928-  enable_autogen="0"
29929-else
29930-  enable_autogen="1"
29931-fi
29932-,
29933-enable_autogen="0"
29934-)
29935-AC_SUBST([enable_autogen])
29936-
29937-AC_PROG_INSTALL
29938-AC_PROG_RANLIB
29939-AC_PATH_PROG([LD], [ld], [false], [$PATH])
29940-AC_PATH_PROG([AUTOCONF], [autoconf], [false], [$PATH])
29941-
29942-dnl Enable documentation
29943-AC_ARG_ENABLE([doc],
29944-	      [AS_HELP_STRING([--enable-doc], [Build documentation])],
29945-if test "x$enable_doc" = "xno" ; then
29946-  enable_doc="0"
29947-else
29948-  enable_doc="1"
29949-fi
29950-,
29951-enable_doc="1"
29952-)
29953-AC_SUBST([enable_doc])
29954-
29955-dnl Enable shared libs
29956-AC_ARG_ENABLE([shared],
29957-  [AS_HELP_STRING([--enable-shared], [Build shared libaries])],
29958-if test "x$enable_shared" = "xno" ; then
29959-  enable_shared="0"
29960-else
29961-  enable_shared="1"
29962-fi
29963-,
29964-enable_shared="1"
29965-)
29966-AC_SUBST([enable_shared])
29967-
29968-dnl Enable static libs
29969-AC_ARG_ENABLE([static],
29970-  [AS_HELP_STRING([--enable-static], [Build static libaries])],
29971-if test "x$enable_static" = "xno" ; then
29972-  enable_static="0"
29973-else
29974-  enable_static="1"
29975-fi
29976-,
29977-enable_static="1"
29978-)
29979-AC_SUBST([enable_static])
29980-
29981-if test "$enable_shared$enable_static" = "00" ; then
29982-  AC_MSG_ERROR([Please enable one of shared or static builds])
29983-fi
29984-
29985-dnl Perform no name mangling by default.
29986-AC_ARG_WITH([mangling],
29987-  [AS_HELP_STRING([--with-mangling=<map>], [Mangle symbols in <map>])],
29988-  [mangling_map="$with_mangling"], [mangling_map=""])
29989-
29990-dnl Do not prefix public APIs by default.
29991-AC_ARG_WITH([jemalloc_prefix],
29992-  [AS_HELP_STRING([--with-jemalloc-prefix=<prefix>], [Prefix to prepend to all public APIs])],
29993-  [JEMALLOC_PREFIX="$with_jemalloc_prefix"],
29994-  [if test "x$abi" != "xmacho" -a "x$abi" != "xpecoff"; then
29995-  JEMALLOC_PREFIX=""
29996-else
29997-  JEMALLOC_PREFIX="je_"
29998-fi]
29999-)
30000-if test "x$JEMALLOC_PREFIX" = "x" ; then
30001-  AC_DEFINE([JEMALLOC_IS_MALLOC], [ ], [ ])
30002-else
30003-  JEMALLOC_CPREFIX=`echo ${JEMALLOC_PREFIX} | tr "a-z" "A-Z"`
30004-  AC_DEFINE_UNQUOTED([JEMALLOC_PREFIX], ["$JEMALLOC_PREFIX"], [ ])
30005-  AC_DEFINE_UNQUOTED([JEMALLOC_CPREFIX], ["$JEMALLOC_CPREFIX"], [ ])
30006-fi
30007-AC_SUBST([JEMALLOC_PREFIX])
30008-AC_SUBST([JEMALLOC_CPREFIX])
30009-
30010-AC_ARG_WITH([export],
30011-  [AS_HELP_STRING([--without-export], [disable exporting jemalloc public APIs])],
30012-  [if test "x$with_export" = "xno"; then
30013-  AC_DEFINE([JEMALLOC_EXPORT],[], [ ])
30014-fi]
30015-)
30016-
30017-public_syms="aligned_alloc calloc dallocx free mallctl mallctlbymib mallctlnametomib malloc malloc_conf malloc_conf_2_conf_harder malloc_message malloc_stats_print malloc_usable_size mallocx smallocx_${jemalloc_version_gid} nallocx posix_memalign rallocx realloc sallocx sdallocx xallocx"
30018-dnl Check for additional platform-specific public API functions.
30019-AC_CHECK_FUNC([memalign],
30020-	      [AC_DEFINE([JEMALLOC_OVERRIDE_MEMALIGN], [ ], [ ])
30021-	       public_syms="${public_syms} memalign"])
30022-AC_CHECK_FUNC([valloc],
30023-	      [AC_DEFINE([JEMALLOC_OVERRIDE_VALLOC], [ ], [ ])
30024-	       public_syms="${public_syms} valloc"])
30025-AC_CHECK_FUNC([malloc_size],
30026-	      [AC_DEFINE([JEMALLOC_HAVE_MALLOC_SIZE], [ ], [ ])
30027-	       public_syms="${public_syms} malloc_size"])
30028-
30029-dnl Check for allocator-related functions that should be wrapped.
30030-wrap_syms=
30031-if test "x${JEMALLOC_PREFIX}" = "x" ; then
30032-  AC_CHECK_FUNC([__libc_calloc],
30033-		[AC_DEFINE([JEMALLOC_OVERRIDE___LIBC_CALLOC], [ ], [ ])
30034-		 wrap_syms="${wrap_syms} __libc_calloc"])
30035-  AC_CHECK_FUNC([__libc_free],
30036-		[AC_DEFINE([JEMALLOC_OVERRIDE___LIBC_FREE], [ ], [ ])
30037-		 wrap_syms="${wrap_syms} __libc_free"])
30038-  AC_CHECK_FUNC([__libc_malloc],
30039-		[AC_DEFINE([JEMALLOC_OVERRIDE___LIBC_MALLOC], [ ], [ ])
30040-		 wrap_syms="${wrap_syms} __libc_malloc"])
30041-  AC_CHECK_FUNC([__libc_memalign],
30042-		[AC_DEFINE([JEMALLOC_OVERRIDE___LIBC_MEMALIGN], [ ], [ ])
30043-		 wrap_syms="${wrap_syms} __libc_memalign"])
30044-  AC_CHECK_FUNC([__libc_realloc],
30045-		[AC_DEFINE([JEMALLOC_OVERRIDE___LIBC_REALLOC], [ ], [ ])
30046-		 wrap_syms="${wrap_syms} __libc_realloc"])
30047-  AC_CHECK_FUNC([__libc_valloc],
30048-		[AC_DEFINE([JEMALLOC_OVERRIDE___LIBC_VALLOC], [ ], [ ])
30049-		 wrap_syms="${wrap_syms} __libc_valloc"])
30050-  AC_CHECK_FUNC([__posix_memalign],
30051-		[AC_DEFINE([JEMALLOC_OVERRIDE___POSIX_MEMALIGN], [ ], [ ])
30052-		 wrap_syms="${wrap_syms} __posix_memalign"])
30053-fi
30054-
30055-case "${host}" in
30056-  *-*-mingw* | *-*-cygwin*)
30057-    wrap_syms="${wrap_syms} tls_callback"
30058-    ;;
30059-  *)
30060-    ;;
30061-esac
30062-
30063-dnl Mangle library-private APIs.
30064-AC_ARG_WITH([private_namespace],
30065-  [AS_HELP_STRING([--with-private-namespace=<prefix>], [Prefix to prepend to all library-private APIs])],
30066-  [JEMALLOC_PRIVATE_NAMESPACE="${with_private_namespace}je_"],
30067-  [JEMALLOC_PRIVATE_NAMESPACE="je_"]
30068-)
30069-AC_DEFINE_UNQUOTED([JEMALLOC_PRIVATE_NAMESPACE], [$JEMALLOC_PRIVATE_NAMESPACE], [ ])
30070-private_namespace="$JEMALLOC_PRIVATE_NAMESPACE"
30071-AC_SUBST([private_namespace])
30072-
30073-dnl Do not add suffix to installed files by default.
30074-AC_ARG_WITH([install_suffix],
30075-  [AS_HELP_STRING([--with-install-suffix=<suffix>], [Suffix to append to all installed files])],
30076-  [case "$with_install_suffix" in
30077-   *\ * ) AC_MSG_ERROR([Install suffix should not contain spaces]) ;;
30078-   * ) INSTALL_SUFFIX="$with_install_suffix" ;;
30079-esac],
30080-  [INSTALL_SUFFIX=]
30081-)
30082-install_suffix="$INSTALL_SUFFIX"
30083-AC_SUBST([install_suffix])
30084-
30085-dnl Specify default malloc_conf.
30086-AC_ARG_WITH([malloc_conf],
30087-  [AS_HELP_STRING([--with-malloc-conf=<malloc_conf>], [config.malloc_conf options string])],
30088-  [JEMALLOC_CONFIG_MALLOC_CONF="$with_malloc_conf"],
30089-  [JEMALLOC_CONFIG_MALLOC_CONF=""]
30090-)
30091-config_malloc_conf="$JEMALLOC_CONFIG_MALLOC_CONF"
30092-AC_DEFINE_UNQUOTED([JEMALLOC_CONFIG_MALLOC_CONF], ["$config_malloc_conf"], [ ])
30093-
30094-dnl Substitute @je_@ in jemalloc_protos.h.in, primarily to make generation of
30095-dnl jemalloc_protos_jet.h easy.
30096-je_="je_"
30097-AC_SUBST([je_])
30098-
30099-cfgoutputs_in="Makefile.in"
30100-cfgoutputs_in="${cfgoutputs_in} jemalloc.pc.in"
30101-cfgoutputs_in="${cfgoutputs_in} doc/html.xsl.in"
30102-cfgoutputs_in="${cfgoutputs_in} doc/manpages.xsl.in"
30103-cfgoutputs_in="${cfgoutputs_in} doc/jemalloc.xml.in"
30104-cfgoutputs_in="${cfgoutputs_in} include/jemalloc/jemalloc_macros.h.in"
30105-cfgoutputs_in="${cfgoutputs_in} include/jemalloc/jemalloc_protos.h.in"
30106-cfgoutputs_in="${cfgoutputs_in} include/jemalloc/jemalloc_typedefs.h.in"
30107-cfgoutputs_in="${cfgoutputs_in} include/jemalloc/internal/jemalloc_preamble.h.in"
30108-cfgoutputs_in="${cfgoutputs_in} test/test.sh.in"
30109-cfgoutputs_in="${cfgoutputs_in} test/include/test/jemalloc_test.h.in"
30110-
30111-cfgoutputs_out="Makefile"
30112-cfgoutputs_out="${cfgoutputs_out} jemalloc.pc"
30113-cfgoutputs_out="${cfgoutputs_out} doc/html.xsl"
30114-cfgoutputs_out="${cfgoutputs_out} doc/manpages.xsl"
30115-cfgoutputs_out="${cfgoutputs_out} doc/jemalloc.xml"
30116-cfgoutputs_out="${cfgoutputs_out} include/jemalloc/jemalloc_macros.h"
30117-cfgoutputs_out="${cfgoutputs_out} include/jemalloc/jemalloc_protos.h"
30118-cfgoutputs_out="${cfgoutputs_out} include/jemalloc/jemalloc_typedefs.h"
30119-cfgoutputs_out="${cfgoutputs_out} include/jemalloc/internal/jemalloc_preamble.h"
30120-cfgoutputs_out="${cfgoutputs_out} test/test.sh"
30121-cfgoutputs_out="${cfgoutputs_out} test/include/test/jemalloc_test.h"
30122-
30123-cfgoutputs_tup="Makefile"
30124-cfgoutputs_tup="${cfgoutputs_tup} jemalloc.pc:jemalloc.pc.in"
30125-cfgoutputs_tup="${cfgoutputs_tup} doc/html.xsl:doc/html.xsl.in"
30126-cfgoutputs_tup="${cfgoutputs_tup} doc/manpages.xsl:doc/manpages.xsl.in"
30127-cfgoutputs_tup="${cfgoutputs_tup} doc/jemalloc.xml:doc/jemalloc.xml.in"
30128-cfgoutputs_tup="${cfgoutputs_tup} include/jemalloc/jemalloc_macros.h:include/jemalloc/jemalloc_macros.h.in"
30129-cfgoutputs_tup="${cfgoutputs_tup} include/jemalloc/jemalloc_protos.h:include/jemalloc/jemalloc_protos.h.in"
30130-cfgoutputs_tup="${cfgoutputs_tup} include/jemalloc/jemalloc_typedefs.h:include/jemalloc/jemalloc_typedefs.h.in"
30131-cfgoutputs_tup="${cfgoutputs_tup} include/jemalloc/internal/jemalloc_preamble.h"
30132-cfgoutputs_tup="${cfgoutputs_tup} test/test.sh:test/test.sh.in"
30133-cfgoutputs_tup="${cfgoutputs_tup} test/include/test/jemalloc_test.h:test/include/test/jemalloc_test.h.in"
30134-
30135-cfghdrs_in="include/jemalloc/jemalloc_defs.h.in"
30136-cfghdrs_in="${cfghdrs_in} include/jemalloc/internal/jemalloc_internal_defs.h.in"
30137-cfghdrs_in="${cfghdrs_in} include/jemalloc/internal/private_symbols.sh"
30138-cfghdrs_in="${cfghdrs_in} include/jemalloc/internal/private_namespace.sh"
30139-cfghdrs_in="${cfghdrs_in} include/jemalloc/internal/public_namespace.sh"
30140-cfghdrs_in="${cfghdrs_in} include/jemalloc/internal/public_unnamespace.sh"
30141-cfghdrs_in="${cfghdrs_in} include/jemalloc/jemalloc_rename.sh"
30142-cfghdrs_in="${cfghdrs_in} include/jemalloc/jemalloc_mangle.sh"
30143-cfghdrs_in="${cfghdrs_in} include/jemalloc/jemalloc.sh"
30144-cfghdrs_in="${cfghdrs_in} test/include/test/jemalloc_test_defs.h.in"
30145-
30146-cfghdrs_out="include/jemalloc/jemalloc_defs.h"
30147-cfghdrs_out="${cfghdrs_out} include/jemalloc/jemalloc${install_suffix}.h"
30148-cfghdrs_out="${cfghdrs_out} include/jemalloc/internal/private_symbols.awk"
30149-cfghdrs_out="${cfghdrs_out} include/jemalloc/internal/private_symbols_jet.awk"
30150-cfghdrs_out="${cfghdrs_out} include/jemalloc/internal/public_symbols.txt"
30151-cfghdrs_out="${cfghdrs_out} include/jemalloc/internal/public_namespace.h"
30152-cfghdrs_out="${cfghdrs_out} include/jemalloc/internal/public_unnamespace.h"
30153-cfghdrs_out="${cfghdrs_out} include/jemalloc/jemalloc_protos_jet.h"
30154-cfghdrs_out="${cfghdrs_out} include/jemalloc/jemalloc_rename.h"
30155-cfghdrs_out="${cfghdrs_out} include/jemalloc/jemalloc_mangle.h"
30156-cfghdrs_out="${cfghdrs_out} include/jemalloc/jemalloc_mangle_jet.h"
30157-cfghdrs_out="${cfghdrs_out} include/jemalloc/internal/jemalloc_internal_defs.h"
30158-cfghdrs_out="${cfghdrs_out} test/include/test/jemalloc_test_defs.h"
30159-
30160-cfghdrs_tup="include/jemalloc/jemalloc_defs.h:include/jemalloc/jemalloc_defs.h.in"
30161-cfghdrs_tup="${cfghdrs_tup} include/jemalloc/internal/jemalloc_internal_defs.h:include/jemalloc/internal/jemalloc_internal_defs.h.in"
30162-cfghdrs_tup="${cfghdrs_tup} test/include/test/jemalloc_test_defs.h:test/include/test/jemalloc_test_defs.h.in"
30163-
30164-dnl ============================================================================
30165-dnl jemalloc build options.
30166-dnl
30167-
30168-dnl Do not compile with debugging by default.
30169-AC_ARG_ENABLE([debug],
30170-  [AS_HELP_STRING([--enable-debug],
30171-                  [Build debugging code])],
30172-[if test "x$enable_debug" = "xno" ; then
30173-  enable_debug="0"
30174-else
30175-  enable_debug="1"
30176-fi
30177-],
30178-[enable_debug="0"]
30179-)
30180-if test "x$enable_debug" = "x1" ; then
30181-  AC_DEFINE([JEMALLOC_DEBUG], [ ], [ ])
30182-fi
30183-AC_SUBST([enable_debug])
30184-
30185-dnl Only optimize if not debugging.
30186-if test "x$enable_debug" = "x0" ; then
30187-  if test "x$GCC" = "xyes" ; then
30188-    JE_CFLAGS_ADD([-O3])
30189-    JE_CXXFLAGS_ADD([-O3])
30190-    JE_CFLAGS_ADD([-funroll-loops])
30191-  elif test "x$je_cv_msvc" = "xyes" ; then
30192-    JE_CFLAGS_ADD([-O2])
30193-    JE_CXXFLAGS_ADD([-O2])
30194-  else
30195-    JE_CFLAGS_ADD([-O])
30196-    JE_CXXFLAGS_ADD([-O])
30197-  fi
30198-fi
30199-
30200-dnl Enable statistics calculation by default.
30201-AC_ARG_ENABLE([stats],
30202-  [AS_HELP_STRING([--disable-stats],
30203-                  [Disable statistics calculation/reporting])],
30204-[if test "x$enable_stats" = "xno" ; then
30205-  enable_stats="0"
30206-else
30207-  enable_stats="1"
30208-fi
30209-],
30210-[enable_stats="1"]
30211-)
30212-if test "x$enable_stats" = "x1" ; then
30213-  AC_DEFINE([JEMALLOC_STATS], [ ], [ ])
30214-fi
30215-AC_SUBST([enable_stats])
30216-
30217-dnl Do not enable smallocx by default.
30218-AC_ARG_ENABLE([experimental_smallocx],
30219-  [AS_HELP_STRING([--enable-experimental-smallocx], [Enable experimental smallocx API])],
30220-[if test "x$enable_experimental_smallocx" = "xno" ; then
30221-enable_experimental_smallocx="0"
30222-else
30223-enable_experimental_smallocx="1"
30224-fi
30225-],
30226-[enable_experimental_smallocx="0"]
30227-)
30228-if test "x$enable_experimental_smallocx" = "x1" ; then
30229-  AC_DEFINE([JEMALLOC_EXPERIMENTAL_SMALLOCX_API], [ ], [ ])
30230-fi
30231-AC_SUBST([enable_experimental_smallocx])
30232-
30233-dnl Do not enable profiling by default.
30234-AC_ARG_ENABLE([prof],
30235-  [AS_HELP_STRING([--enable-prof], [Enable allocation profiling])],
30236-[if test "x$enable_prof" = "xno" ; then
30237-  enable_prof="0"
30238-else
30239-  enable_prof="1"
30240-fi
30241-],
30242-[enable_prof="0"]
30243-)
30244-if test "x$enable_prof" = "x1" ; then
30245-  backtrace_method=""
30246-else
30247-  backtrace_method="N/A"
30248-fi
30249-
30250-AC_ARG_ENABLE([prof-libunwind],
30251-  [AS_HELP_STRING([--enable-prof-libunwind], [Use libunwind for backtracing])],
30252-[if test "x$enable_prof_libunwind" = "xno" ; then
30253-  enable_prof_libunwind="0"
30254-else
30255-  enable_prof_libunwind="1"
30256-  if test "x$enable_prof" = "x0" ; then
30257-    AC_MSG_ERROR([--enable-prof-libunwind should only be used with --enable-prof])
30258-  fi
30259-fi
30260-],
30261-[enable_prof_libunwind="0"]
30262-)
30263-AC_ARG_WITH([static_libunwind],
30264-  [AS_HELP_STRING([--with-static-libunwind=<libunwind.a>],
30265-  [Path to static libunwind library; use rather than dynamically linking])],
30266-if test "x$with_static_libunwind" = "xno" ; then
30267-  LUNWIND="-lunwind"
30268-else
30269-  if test ! -f "$with_static_libunwind" ; then
30270-    AC_MSG_ERROR([Static libunwind not found: $with_static_libunwind])
30271-  fi
30272-  LUNWIND="$with_static_libunwind"
30273-fi,
30274-  LUNWIND="-lunwind"
30275-)
30276-if test "x$backtrace_method" = "x" -a "x$enable_prof_libunwind" = "x1" ; then
30277-  AC_CHECK_HEADERS([libunwind.h], , [enable_prof_libunwind="0"])
30278-  if test "x$LUNWIND" = "x-lunwind" ; then
30279-    AC_CHECK_LIB([unwind], [unw_backtrace], [JE_APPEND_VS(LIBS, $LUNWIND)],
30280-                 [enable_prof_libunwind="0"])
30281-  else
30282-    JE_APPEND_VS(LIBS, $LUNWIND)
30283-  fi
30284-  if test "x${enable_prof_libunwind}" = "x1" ; then
30285-    backtrace_method="libunwind"
30286-    AC_DEFINE([JEMALLOC_PROF_LIBUNWIND], [ ], [ ])
30287-  fi
30288-fi
30289-
30290-AC_ARG_ENABLE([prof-libgcc],
30291-  [AS_HELP_STRING([--disable-prof-libgcc],
30292-  [Do not use libgcc for backtracing])],
30293-[if test "x$enable_prof_libgcc" = "xno" ; then
30294-  enable_prof_libgcc="0"
30295-else
30296-  enable_prof_libgcc="1"
30297-fi
30298-],
30299-[enable_prof_libgcc="1"]
30300-)
30301-if test "x$backtrace_method" = "x" -a "x$enable_prof_libgcc" = "x1" \
30302-     -a "x$GCC" = "xyes" ; then
30303-  AC_CHECK_HEADERS([unwind.h], , [enable_prof_libgcc="0"])
30304-  if test "x${enable_prof_libgcc}" = "x1" ; then
30305-    AC_CHECK_LIB([gcc], [_Unwind_Backtrace], [JE_APPEND_VS(LIBS, -lgcc)], [enable_prof_libgcc="0"])
30306-  fi
30307-  if test "x${enable_prof_libgcc}" = "x1" ; then
30308-    backtrace_method="libgcc"
30309-    AC_DEFINE([JEMALLOC_PROF_LIBGCC], [ ], [ ])
30310-  fi
30311-else
30312-  enable_prof_libgcc="0"
30313-fi
30314-
30315-AC_ARG_ENABLE([prof-gcc],
30316-  [AS_HELP_STRING([--disable-prof-gcc],
30317-  [Do not use gcc intrinsics for backtracing])],
30318-[if test "x$enable_prof_gcc" = "xno" ; then
30319-  enable_prof_gcc="0"
30320-else
30321-  enable_prof_gcc="1"
30322-fi
30323-],
30324-[enable_prof_gcc="1"]
30325-)
30326-if test "x$backtrace_method" = "x" -a "x$enable_prof_gcc" = "x1" \
30327-     -a "x$GCC" = "xyes" ; then
30328-  JE_CFLAGS_ADD([-fno-omit-frame-pointer])
30329-  backtrace_method="gcc intrinsics"
30330-  AC_DEFINE([JEMALLOC_PROF_GCC], [ ], [ ])
30331-else
30332-  enable_prof_gcc="0"
30333-fi
30334-
30335-if test "x$backtrace_method" = "x" ; then
30336-  backtrace_method="none (disabling profiling)"
30337-  enable_prof="0"
30338-fi
30339-AC_MSG_CHECKING([configured backtracing method])
30340-AC_MSG_RESULT([$backtrace_method])
30341-if test "x$enable_prof" = "x1" ; then
30342-  dnl Heap profiling uses the log(3) function.
30343-  JE_APPEND_VS(LIBS, $LM)
30344-
30345-  AC_DEFINE([JEMALLOC_PROF], [ ], [ ])
30346-fi
30347-AC_SUBST([enable_prof])
30348-
30349-dnl Indicate whether adjacent virtual memory mappings automatically coalesce
30350-dnl (and fragment on demand).
30351-if test "x${maps_coalesce}" = "x1" ; then
30352-  AC_DEFINE([JEMALLOC_MAPS_COALESCE], [ ], [ ])
30353-fi
30354-
30355-dnl Indicate whether to retain memory (rather than using munmap()) by default.
30356-if test "x$default_retain" = "x1" ; then
30357-  AC_DEFINE([JEMALLOC_RETAIN], [ ], [ ])
30358-fi
30359-
30360-dnl Indicate whether realloc(ptr, 0) defaults to the "alloc" behavior.
30361-if test "x$zero_realloc_default_free" = "x1" ; then
30362-  AC_DEFINE([JEMALLOC_ZERO_REALLOC_DEFAULT_FREE], [ ], [ ])
30363-fi
30364-
30365-dnl Enable allocation from DSS if supported by the OS.
30366-have_dss="1"
30367-dnl Check whether the BSD/SUSv1 sbrk() exists.  If not, disable DSS support.
30368-AC_CHECK_FUNC([sbrk], [have_sbrk="1"], [have_sbrk="0"])
30369-if test "x$have_sbrk" = "x1" ; then
30370-  if test "x$sbrk_deprecated" = "x1" ; then
30371-    AC_MSG_RESULT([Disabling dss allocation because sbrk is deprecated])
30372-    have_dss="0"
30373-  fi
30374-else
30375-  have_dss="0"
30376-fi
30377-
30378-if test "x$have_dss" = "x1" ; then
30379-  AC_DEFINE([JEMALLOC_DSS], [ ], [ ])
30380-fi
30381-
30382-dnl Support the junk/zero filling option by default.
30383-AC_ARG_ENABLE([fill],
30384-  [AS_HELP_STRING([--disable-fill], [Disable support for junk/zero filling])],
30385-[if test "x$enable_fill" = "xno" ; then
30386-  enable_fill="0"
30387-else
30388-  enable_fill="1"
30389-fi
30390-],
30391-[enable_fill="1"]
30392-)
30393-if test "x$enable_fill" = "x1" ; then
30394-  AC_DEFINE([JEMALLOC_FILL], [ ], [ ])
30395-fi
30396-AC_SUBST([enable_fill])
30397-
30398-dnl Disable utrace(2)-based tracing by default.
30399-AC_ARG_ENABLE([utrace],
30400-  [AS_HELP_STRING([--enable-utrace], [Enable utrace(2)-based tracing])],
30401-[if test "x$enable_utrace" = "xno" ; then
30402-  enable_utrace="0"
30403-else
30404-  enable_utrace="1"
30405-fi
30406-],
30407-[enable_utrace="0"]
30408-)
30409-JE_COMPILABLE([utrace(2)], [
30410-#include <sys/types.h>
30411-#include <sys/param.h>
30412-#include <sys/time.h>
30413-#include <sys/uio.h>
30414-#include <sys/ktrace.h>
30415-], [
30416-	utrace((void *)0, 0);
30417-], [je_cv_utrace])
30418-if test "x${je_cv_utrace}" = "xno" ; then
30419-  JE_COMPILABLE([utrace(2) with label], [
30420-  #include <sys/types.h>
30421-  #include <sys/param.h>
30422-  #include <sys/time.h>
30423-  #include <sys/uio.h>
30424-  #include <sys/ktrace.h>
30425-  ], [
30426-	  utrace((void *)0, (void *)0, 0);
30427-  ], [je_cv_utrace_label])
30428-  if test "x${je_cv_utrace_label}" = "xno"; then
30429-    enable_utrace="0"
30430-  fi
30431-  if test "x$enable_utrace" = "x1" ; then
30432-    AC_DEFINE([JEMALLOC_UTRACE_LABEL], [ ], [ ])
30433-  fi
30434-else
30435-  if test "x$enable_utrace" = "x1" ; then
30436-    AC_DEFINE([JEMALLOC_UTRACE], [ ], [ ])
30437-  fi
30438-fi
30439-AC_SUBST([enable_utrace])
30440-
30441-dnl Do not support the xmalloc option by default.
30442-AC_ARG_ENABLE([xmalloc],
30443-  [AS_HELP_STRING([--enable-xmalloc], [Support xmalloc option])],
30444-[if test "x$enable_xmalloc" = "xno" ; then
30445-  enable_xmalloc="0"
30446-else
30447-  enable_xmalloc="1"
30448-fi
30449-],
30450-[enable_xmalloc="0"]
30451-)
30452-if test "x$enable_xmalloc" = "x1" ; then
30453-  AC_DEFINE([JEMALLOC_XMALLOC], [ ], [ ])
30454-fi
30455-AC_SUBST([enable_xmalloc])
30456-
30457-dnl Support cache-oblivious allocation alignment by default.
30458-AC_ARG_ENABLE([cache-oblivious],
30459-  [AS_HELP_STRING([--disable-cache-oblivious],
30460-                  [Disable support for cache-oblivious allocation alignment])],
30461-[if test "x$enable_cache_oblivious" = "xno" ; then
30462-  enable_cache_oblivious="0"
30463-else
30464-  enable_cache_oblivious="1"
30465-fi
30466-],
30467-[enable_cache_oblivious="1"]
30468-)
30469-if test "x$enable_cache_oblivious" = "x1" ; then
30470-  AC_DEFINE([JEMALLOC_CACHE_OBLIVIOUS], [ ], [ ])
30471-fi
30472-AC_SUBST([enable_cache_oblivious])
30473-
30474-dnl Do not log by default.
30475-AC_ARG_ENABLE([log],
30476-  [AS_HELP_STRING([--enable-log], [Support debug logging])],
30477-[if test "x$enable_log" = "xno" ; then
30478-  enable_log="0"
30479-else
30480-  enable_log="1"
30481-fi
30482-],
30483-[enable_log="0"]
30484-)
30485-if test "x$enable_log" = "x1" ; then
30486-  AC_DEFINE([JEMALLOC_LOG], [ ], [ ])
30487-fi
30488-AC_SUBST([enable_log])
30489-
30490-dnl Do not use readlinkat by default
30491-AC_ARG_ENABLE([readlinkat],
30492-  [AS_HELP_STRING([--enable-readlinkat], [Use readlinkat over readlink])],
30493-[if test "x$enable_readlinkat" = "xno" ; then
30494-  enable_readlinkat="0"
30495-else
30496-  enable_readlinkat="1"
30497-fi
30498-],
30499-[enable_readlinkat="0"]
30500-)
30501-if test "x$enable_readlinkat" = "x1" ; then
30502-  AC_DEFINE([JEMALLOC_READLINKAT], [ ], [ ])
30503-fi
30504-AC_SUBST([enable_readlinkat])
30505-
30506-dnl Avoid extra safety checks by default
30507-AC_ARG_ENABLE([opt-safety-checks],
30508-  [AS_HELP_STRING([--enable-opt-safety-checks],
30509-  [Perform certain low-overhead checks, even in opt mode])],
30510-[if test "x$enable_opt_safety_checks" = "xno" ; then
30511-  enable_opt_safety_checks="0"
30512-else
30513-  enable_opt_safety_checks="1"
30514-fi
30515-],
30516-[enable_opt_safety_checks="0"]
30517-)
30518-if test "x$enable_opt_safety_checks" = "x1" ; then
30519-  AC_DEFINE([JEMALLOC_OPT_SAFETY_CHECKS], [ ], [ ])
30520-fi
30521-AC_SUBST([enable_opt_safety_checks])
30522-
30523-dnl Look for sized-deallocation bugs while otherwise being in opt mode.
30524-AC_ARG_ENABLE([opt-size-checks],
30525-  [AS_HELP_STRING([--enable-opt-size-checks],
30526-  [Perform sized-deallocation argument checks, even in opt mode])],
30527-[if test "x$enable_opt_size_checks" = "xno" ; then
30528-  enable_opt_size_checks="0"
30529-else
30530-  enable_opt_size_checks="1"
30531-fi
30532-],
30533-[enable_opt_size_checks="0"]
30534-)
30535-if test "x$enable_opt_size_checks" = "x1" ; then
30536-  AC_DEFINE([JEMALLOC_OPT_SIZE_CHECKS], [ ], [ ])
30537-fi
30538-AC_SUBST([enable_opt_size_checks])
30539-
30540-dnl Do not check for use-after-free by default.
30541-AC_ARG_ENABLE([uaf-detection],
30542-  [AS_HELP_STRING([--enable-uaf-detection],
30543-  [Allow sampled junk-filling on deallocation to detect use-after-free])],
30544-[if test "x$enable_uaf_detection" = "xno" ; then
30545-  enable_uaf_detection="0"
30546-else
30547-  enable_uaf_detection="1"
30548-fi
30549-],
30550-[enable_uaf_detection="0"]
30551-)
30552-if test "x$enable_uaf_detection" = "x1" ; then
30553-  AC_DEFINE([JEMALLOC_UAF_DETECTION], [ ])
30554-fi
30555-AC_SUBST([enable_uaf_detection])
30556-
30557-JE_COMPILABLE([a program using __builtin_unreachable], [
30558-void foo (void) {
30559-  __builtin_unreachable();
30560-}
30561-], [
30562-	{
30563-		foo();
30564-	}
30565-], [je_cv_gcc_builtin_unreachable])
30566-if test "x${je_cv_gcc_builtin_unreachable}" = "xyes" ; then
30567-  AC_DEFINE([JEMALLOC_INTERNAL_UNREACHABLE], [__builtin_unreachable], [ ])
30568-else
30569-  AC_DEFINE([JEMALLOC_INTERNAL_UNREACHABLE], [abort], [ ])
30570-fi
30571-
30572-dnl ============================================================================
30573-dnl Check for  __builtin_ffsl(), then ffsl(3), and fail if neither are found.
30574-dnl One of those two functions should (theoretically) exist on all platforms
30575-dnl that jemalloc currently has a chance of functioning on without modification.
30576-dnl We additionally assume ffs[ll]() or __builtin_ffs[ll]() are defined if
30577-dnl ffsl() or __builtin_ffsl() are defined, respectively.
30578-JE_COMPILABLE([a program using __builtin_ffsl], [
30579-#include <stdio.h>
30580-#include <strings.h>
30581-#include <string.h>
30582-], [
30583-	{
30584-		int rv = __builtin_ffsl(0x08);
30585-		printf("%d\n", rv);
30586-	}
30587-], [je_cv_gcc_builtin_ffsl])
30588-if test "x${je_cv_gcc_builtin_ffsl}" = "xyes" ; then
30589-  AC_DEFINE([JEMALLOC_INTERNAL_FFSLL], [__builtin_ffsll], [ ])
30590-  AC_DEFINE([JEMALLOC_INTERNAL_FFSL], [__builtin_ffsl], [ ])
30591-  AC_DEFINE([JEMALLOC_INTERNAL_FFS], [__builtin_ffs], [ ])
30592-else
30593-  JE_COMPILABLE([a program using ffsl], [
30594-  #include <stdio.h>
30595-  #include <strings.h>
30596-  #include <string.h>
30597-  ], [
30598-	{
30599-		int rv = ffsl(0x08);
30600-		printf("%d\n", rv);
30601-	}
30602-  ], [je_cv_function_ffsl])
30603-  if test "x${je_cv_function_ffsl}" = "xyes" ; then
30604-    AC_DEFINE([JEMALLOC_INTERNAL_FFSLL], [ffsll], [ ])
30605-    AC_DEFINE([JEMALLOC_INTERNAL_FFSL], [ffsl], [ ])
30606-    AC_DEFINE([JEMALLOC_INTERNAL_FFS], [ffs], [ ])
30607-  else
30608-    AC_MSG_ERROR([Cannot build without ffsl(3) or __builtin_ffsl()])
30609-  fi
30610-fi
30611-
30612-JE_COMPILABLE([a program using __builtin_popcountl], [
30613-#include <stdio.h>
30614-#include <strings.h>
30615-#include <string.h>
30616-], [
30617-	{
30618-		int rv = __builtin_popcountl(0x08);
30619-		printf("%d\n", rv);
30620-	}
30621-], [je_cv_gcc_builtin_popcountl])
30622-if test "x${je_cv_gcc_builtin_popcountl}" = "xyes" ; then
30623-  AC_DEFINE([JEMALLOC_INTERNAL_POPCOUNT], [__builtin_popcount], [ ])
30624-  AC_DEFINE([JEMALLOC_INTERNAL_POPCOUNTL], [__builtin_popcountl], [ ])
30625-  AC_DEFINE([JEMALLOC_INTERNAL_POPCOUNTLL], [__builtin_popcountll], [ ])
30626-fi
30627-
30628-AC_ARG_WITH([lg_quantum],
30629-  [AS_HELP_STRING([--with-lg-quantum=<lg-quantum>],
30630-   [Base 2 log of minimum allocation alignment])])
30631-if test "x$with_lg_quantum" != "x" ; then
30632-  AC_DEFINE_UNQUOTED([LG_QUANTUM], [$with_lg_quantum], [ ])
30633-fi
30634-
30635-AC_ARG_WITH([lg_slab_maxregs],
30636-  [AS_HELP_STRING([--with-lg-slab-maxregs=<lg-slab-maxregs>],
30637-   [Base 2 log of maximum number of regions in a slab (used with malloc_conf slab_sizes)])],
30638-  [CONFIG_LG_SLAB_MAXREGS="with_lg_slab_maxregs"],
30639-  [CONFIG_LG_SLAB_MAXREGS=""])
30640-if test "x$with_lg_slab_maxregs" != "x" ; then
30641-  AC_DEFINE_UNQUOTED([CONFIG_LG_SLAB_MAXREGS], [$with_lg_slab_maxregs], [ ])
30642-fi
30643-
30644-AC_ARG_WITH([lg_page],
30645-  [AS_HELP_STRING([--with-lg-page=<lg-page>], [Base 2 log of system page size])],
30646-  [LG_PAGE="$with_lg_page"], [LG_PAGE="detect"])
30647-case "${host}" in
30648-  aarch64-apple-darwin*)
30649-      dnl When cross-compile for Apple M1 and no page size specified, use the
30650-      dnl default and skip detecting the page size (which is likely incorrect).
30651-      if test "x${host}" != "x${build}" -a "x$LG_PAGE" = "xdetect"; then
30652-        LG_PAGE=14
30653-      fi
30654-      ;;
30655-esac
30656-if test "x$LG_PAGE" = "xdetect"; then
30657-  AC_CACHE_CHECK([LG_PAGE],
30658-               [je_cv_lg_page],
30659-               AC_RUN_IFELSE([AC_LANG_PROGRAM(
30660-[[
30661-#include <strings.h>
30662-#ifdef _WIN32
30663-#include <windows.h>
30664-#else
30665-#include <unistd.h>
30666-#endif
30667-#include <stdio.h>
30668-]],
30669-[[
30670-    int result;
30671-    FILE *f;
30672-
30673-#ifdef _WIN32
30674-    SYSTEM_INFO si;
30675-    GetSystemInfo(&si);
30676-    result = si.dwPageSize;
30677-#else
30678-    result = sysconf(_SC_PAGESIZE);
30679-#endif
30680-    if (result == -1) {
30681-	return 1;
30682-    }
30683-    result = JEMALLOC_INTERNAL_FFSL(result) - 1;
30684-
30685-    f = fopen("conftest.out", "w");
30686-    if (f == NULL) {
30687-	return 1;
30688-    }
30689-    fprintf(f, "%d", result);
30690-    fclose(f);
30691-
30692-    return 0;
30693-]])],
30694-                             [je_cv_lg_page=`cat conftest.out`],
30695-                             [je_cv_lg_page=undefined],
30696-                             [je_cv_lg_page=12]))
30697-fi
30698-if test "x${je_cv_lg_page}" != "x" ; then
30699-  LG_PAGE="${je_cv_lg_page}"
30700-fi
30701-if test "x${LG_PAGE}" != "xundefined" ; then
30702-   AC_DEFINE_UNQUOTED([LG_PAGE], [$LG_PAGE], [ ])
30703-else
30704-   AC_MSG_ERROR([cannot determine value for LG_PAGE])
30705-fi
30706-
30707-AC_ARG_WITH([lg_hugepage],
30708-  [AS_HELP_STRING([--with-lg-hugepage=<lg-hugepage>],
30709-   [Base 2 log of system huge page size])],
30710-  [je_cv_lg_hugepage="${with_lg_hugepage}"],
30711-  [je_cv_lg_hugepage=""])
30712-if test "x${je_cv_lg_hugepage}" = "x" ; then
30713-  dnl Look in /proc/meminfo (Linux-specific) for information on the default huge
30714-  dnl page size, if any.  The relevant line looks like:
30715-  dnl
30716-  dnl   Hugepagesize:       2048 kB
30717-  if test -e "/proc/meminfo" ; then
30718-    hpsk=[`cat /proc/meminfo 2>/dev/null | \
30719-          grep -e '^Hugepagesize:[[:space:]]\+[0-9]\+[[:space:]]kB$' | \
30720-          awk '{print $2}'`]
30721-    if test "x${hpsk}" != "x" ; then
30722-      je_cv_lg_hugepage=10
30723-      while test "${hpsk}" -gt 1 ; do
30724-        hpsk="$((hpsk / 2))"
30725-        je_cv_lg_hugepage="$((je_cv_lg_hugepage + 1))"
30726-      done
30727-    fi
30728-  fi
30729-
30730-  dnl Set default if unable to automatically configure.
30731-  if test "x${je_cv_lg_hugepage}" = "x" ; then
30732-    je_cv_lg_hugepage=21
30733-  fi
30734-fi
30735-if test "x${LG_PAGE}" != "xundefined" -a \
30736-        "${je_cv_lg_hugepage}" -lt "${LG_PAGE}" ; then
30737-  AC_MSG_ERROR([Huge page size (2^${je_cv_lg_hugepage}) must be at least page size (2^${LG_PAGE})])
30738-fi
30739-AC_DEFINE_UNQUOTED([LG_HUGEPAGE], [${je_cv_lg_hugepage}], [ ])
30740-
30741-dnl ============================================================================
30742-dnl Enable libdl by default.
30743-AC_ARG_ENABLE([libdl],
30744-  [AS_HELP_STRING([--disable-libdl],
30745-  [Do not use libdl])],
30746-[if test "x$enable_libdl" = "xno" ; then
30747-  enable_libdl="0"
30748-else
30749-  enable_libdl="1"
30750-fi
30751-],
30752-[enable_libdl="1"]
30753-)
30754-AC_SUBST([libdl])
30755-
30756-dnl ============================================================================
30757-dnl Configure pthreads.
30758-
30759-if test "x$abi" != "xpecoff" ; then
30760-  AC_DEFINE([JEMALLOC_HAVE_PTHREAD], [ ], [ ])
30761-  AC_CHECK_HEADERS([pthread.h], , [AC_MSG_ERROR([pthread.h is missing])])
30762-  dnl Some systems may embed pthreads functionality in libc; check for libpthread
30763-  dnl first, but try libc too before failing.
30764-  AC_CHECK_LIB([pthread], [pthread_create], [JE_APPEND_VS(LIBS, -pthread)],
30765-               [AC_SEARCH_LIBS([pthread_create], , ,
30766-                               AC_MSG_ERROR([libpthread is missing]))])
30767-  wrap_syms="${wrap_syms} pthread_create"
30768-  have_pthread="1"
30769-
30770-dnl Check if we have dlsym support.
30771-  if test "x$enable_libdl" = "x1" ; then
30772-    have_dlsym="1"
30773-    AC_CHECK_HEADERS([dlfcn.h],
30774-      AC_CHECK_FUNC([dlsym], [],
30775-        [AC_CHECK_LIB([dl], [dlsym], [LIBS="$LIBS -ldl"], [have_dlsym="0"])]),
30776-      [have_dlsym="0"])
30777-    if test "x$have_dlsym" = "x1" ; then
30778-      AC_DEFINE([JEMALLOC_HAVE_DLSYM], [ ], [ ])
30779-    fi
30780-  else
30781-    have_dlsym="0"
30782-  fi
30783-
30784-  JE_COMPILABLE([pthread_atfork(3)], [
30785-#include <pthread.h>
30786-], [
30787-  pthread_atfork((void *)0, (void *)0, (void *)0);
30788-], [je_cv_pthread_atfork])
30789-  if test "x${je_cv_pthread_atfork}" = "xyes" ; then
30790-    AC_DEFINE([JEMALLOC_HAVE_PTHREAD_ATFORK], [ ], [ ])
30791-  fi
30792-  dnl Check if pthread_setname_np is available with the expected API.
30793-  JE_COMPILABLE([pthread_setname_np(3)], [
30794-#include <pthread.h>
30795-], [
30796-  pthread_setname_np(pthread_self(), "setname_test");
30797-], [je_cv_pthread_setname_np])
30798-  if test "x${je_cv_pthread_setname_np}" = "xyes" ; then
30799-    AC_DEFINE([JEMALLOC_HAVE_PTHREAD_SETNAME_NP], [ ], [ ])
30800-  fi
30801-  dnl Check if pthread_getname_np is not necessarily present despite
30802-  dnl the pthread_setname_np counterpart
30803-  JE_COMPILABLE([pthread_getname_np(3)], [
30804-#include <pthread.h>
30805-#include <stdlib.h>
30806-], [
30807-  {
30808-  	char *name = malloc(16);
30809-  	pthread_getname_np(pthread_self(), name, 16);
30810-	free(name);
30811-  }
30812-], [je_cv_pthread_getname_np])
30813-  if test "x${je_cv_pthread_getname_np}" = "xyes" ; then
30814-    AC_DEFINE([JEMALLOC_HAVE_PTHREAD_GETNAME_NP], [ ], [ ])
30815-  fi
30816-  dnl Check if pthread_get_name_np is not necessarily present despite
30817-  dnl the pthread_set_name_np counterpart
30818-  JE_COMPILABLE([pthread_get_name_np(3)], [
30819-#include <pthread.h>
30820-#include <pthread_np.h>
30821-#include <stdlib.h>
30822-], [
30823-  {
30824-  	char *name = malloc(16);
30825-  	pthread_get_name_np(pthread_self(), name, 16);
30826-	free(name);
30827-  }
30828-], [je_cv_pthread_get_name_np])
30829-  if test "x${je_cv_pthread_get_name_np}" = "xyes" ; then
30830-    AC_DEFINE([JEMALLOC_HAVE_PTHREAD_GET_NAME_NP], [ ], [ ])
30831-  fi
30832-fi
30833-
30834-JE_APPEND_VS(CPPFLAGS, -D_REENTRANT)
30835-
30836-dnl Check whether clock_gettime(2) is in libc or librt.
30837-AC_SEARCH_LIBS([clock_gettime], [rt])
30838-
30839-dnl Cray wrapper compiler often adds `-lrt` when using `-static`. Check with
30840-dnl `-dynamic` as well in case a user tries to dynamically link in jemalloc
30841-if test "x$je_cv_cray_prgenv_wrapper" = "xyes" ; then
30842-  if test "$ac_cv_search_clock_gettime" != "-lrt"; then
30843-    JE_CFLAGS_SAVE()
30844-
30845-    unset ac_cv_search_clock_gettime
30846-    JE_CFLAGS_ADD([-dynamic])
30847-    AC_SEARCH_LIBS([clock_gettime], [rt])
30848-
30849-    JE_CFLAGS_RESTORE()
30850-  fi
30851-fi
30852-
30853-dnl check for CLOCK_MONOTONIC_COARSE (Linux-specific).
30854-JE_COMPILABLE([clock_gettime(CLOCK_MONOTONIC_COARSE, ...)], [
30855-#include <time.h>
30856-], [
30857-	struct timespec ts;
30858-
30859-	clock_gettime(CLOCK_MONOTONIC_COARSE, &ts);
30860-], [je_cv_clock_monotonic_coarse])
30861-if test "x${je_cv_clock_monotonic_coarse}" = "xyes" ; then
30862-  AC_DEFINE([JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE], [ ], [ ])
30863-fi
30864-
30865-dnl check for CLOCK_MONOTONIC.
30866-JE_COMPILABLE([clock_gettime(CLOCK_MONOTONIC, ...)], [
30867-#include <unistd.h>
30868-#include <time.h>
30869-], [
30870-	struct timespec ts;
30871-
30872-	clock_gettime(CLOCK_MONOTONIC, &ts);
30873-#if !defined(_POSIX_MONOTONIC_CLOCK) || _POSIX_MONOTONIC_CLOCK < 0
30874-#  error _POSIX_MONOTONIC_CLOCK missing/invalid
30875-#endif
30876-], [je_cv_clock_monotonic])
30877-if test "x${je_cv_clock_monotonic}" = "xyes" ; then
30878-  AC_DEFINE([JEMALLOC_HAVE_CLOCK_MONOTONIC], [ ], [ ])
30879-fi
30880-
30881-dnl Check for mach_absolute_time().
30882-JE_COMPILABLE([mach_absolute_time()], [
30883-#include <mach/mach_time.h>
30884-], [
30885-	mach_absolute_time();
30886-], [je_cv_mach_absolute_time])
30887-if test "x${je_cv_mach_absolute_time}" = "xyes" ; then
30888-  AC_DEFINE([JEMALLOC_HAVE_MACH_ABSOLUTE_TIME], [ ], [ ])
30889-fi
30890-
30891-dnl check for CLOCK_REALTIME (always should be available on Linux)
30892-JE_COMPILABLE([clock_gettime(CLOCK_REALTIME, ...)], [
30893-#include <time.h>
30894-], [
30895-	struct timespec ts;
30896-
30897-	clock_gettime(CLOCK_REALTIME, &ts);
30898-], [je_cv_clock_realtime])
30899-if test "x${je_cv_clock_realtime}" = "xyes" ; then
30900-  AC_DEFINE([JEMALLOC_HAVE_CLOCK_REALTIME], [ ], [ ])
30901-fi
30902-
30903-dnl Use syscall(2) (if available) by default.
30904-AC_ARG_ENABLE([syscall],
30905-  [AS_HELP_STRING([--disable-syscall], [Disable use of syscall(2)])],
30906-[if test "x$enable_syscall" = "xno" ; then
30907-  enable_syscall="0"
30908-else
30909-  enable_syscall="1"
30910-fi
30911-],
30912-[enable_syscall="1"]
30913-)
30914-if test "x$enable_syscall" = "x1" ; then
30915-  dnl Check if syscall(2) is usable.  Treat warnings as errors, so that e.g. OS
30916-  dnl X 10.12's deprecation warning prevents use.
30917-  JE_CFLAGS_SAVE()
30918-  JE_CFLAGS_ADD([-Werror])
30919-  JE_COMPILABLE([syscall(2)], [
30920-#include <sys/syscall.h>
30921-#include <unistd.h>
30922-], [
30923-	syscall(SYS_write, 2, "hello", 5);
30924-],
30925-                [je_cv_syscall])
30926-  JE_CFLAGS_RESTORE()
30927-  if test "x$je_cv_syscall" = "xyes" ; then
30928-    AC_DEFINE([JEMALLOC_USE_SYSCALL], [ ], [ ])
30929-  fi
30930-fi
30931-
30932-dnl Check if the GNU-specific secure_getenv function exists.
30933-AC_CHECK_FUNC([secure_getenv],
30934-              [have_secure_getenv="1"],
30935-              [have_secure_getenv="0"]
30936-             )
30937-if test "x$have_secure_getenv" = "x1" ; then
30938-  AC_DEFINE([JEMALLOC_HAVE_SECURE_GETENV], [ ], [ ])
30939-fi
30940-
30941-dnl Check if the GNU-specific sched_getcpu function exists.
30942-AC_CHECK_FUNC([sched_getcpu],
30943-              [have_sched_getcpu="1"],
30944-              [have_sched_getcpu="0"]
30945-             )
30946-if test "x$have_sched_getcpu" = "x1" ; then
30947-  AC_DEFINE([JEMALLOC_HAVE_SCHED_GETCPU], [ ], [ ])
30948-fi
30949-
30950-dnl Check if the GNU-specific sched_setaffinity function exists.
30951-AC_CHECK_FUNC([sched_setaffinity],
30952-              [have_sched_setaffinity="1"],
30953-              [have_sched_setaffinity="0"]
30954-             )
30955-if test "x$have_sched_setaffinity" = "x1" ; then
30956-  AC_DEFINE([JEMALLOC_HAVE_SCHED_SETAFFINITY], [ ], [ ])
30957-fi
30958-
30959-dnl Check if the Solaris/BSD issetugid function exists.
30960-AC_CHECK_FUNC([issetugid],
30961-              [have_issetugid="1"],
30962-              [have_issetugid="0"]
30963-             )
30964-if test "x$have_issetugid" = "x1" ; then
30965-  AC_DEFINE([JEMALLOC_HAVE_ISSETUGID], [ ], [ ])
30966-fi
30967-
30968-dnl Check whether the BSD-specific _malloc_thread_cleanup() exists.  If so, use
30969-dnl it rather than pthreads TSD cleanup functions to support cleanup during
30970-dnl thread exit, in order to avoid pthreads library recursion during
30971-dnl bootstrapping.
30972-AC_CHECK_FUNC([_malloc_thread_cleanup],
30973-              [have__malloc_thread_cleanup="1"],
30974-              [have__malloc_thread_cleanup="0"]
30975-             )
30976-if test "x$have__malloc_thread_cleanup" = "x1" ; then
30977-  AC_DEFINE([JEMALLOC_MALLOC_THREAD_CLEANUP], [ ], [ ])
30978-  wrap_syms="${wrap_syms} _malloc_thread_cleanup _malloc_tsd_cleanup_register"
30979-  force_tls="1"
30980-fi
30981-
30982-dnl Check whether the BSD-specific _pthread_mutex_init_calloc_cb() exists.  If
30983-dnl so, mutex initialization causes allocation, and we need to implement this
30984-dnl callback function in order to prevent recursive allocation.
30985-AC_CHECK_FUNC([_pthread_mutex_init_calloc_cb],
30986-              [have__pthread_mutex_init_calloc_cb="1"],
30987-              [have__pthread_mutex_init_calloc_cb="0"]
30988-             )
30989-if test "x$have__pthread_mutex_init_calloc_cb" = "x1" ; then
30990-  AC_DEFINE([JEMALLOC_MUTEX_INIT_CB], [ ], [ ])
30991-  wrap_syms="${wrap_syms} _malloc_prefork _malloc_postfork"
30992-fi
30993-
30994-AC_CHECK_FUNC([memcntl],
30995-	      [have_memcntl="1"],
30996-	      [have_memcntl="0"],
30997-	      )
30998-if test "x$have_memcntl" = "x1" ; then
30999-  AC_DEFINE([JEMALLOC_HAVE_MEMCNTL], [ ], [ ])
31000-fi
31001-
31002-dnl Disable lazy locking by default.
31003-AC_ARG_ENABLE([lazy_lock],
31004-  [AS_HELP_STRING([--enable-lazy-lock],
31005-  [Enable lazy locking (only lock when multi-threaded)])],
31006-[if test "x$enable_lazy_lock" = "xno" ; then
31007-  enable_lazy_lock="0"
31008-else
31009-  enable_lazy_lock="1"
31010-fi
31011-],
31012-[enable_lazy_lock=""]
31013-)
31014-if test "x${enable_lazy_lock}" = "x" ; then
31015-  if test "x${force_lazy_lock}" = "x1" ; then
31016-    AC_MSG_RESULT([Forcing lazy-lock to avoid allocator/threading bootstrap issues])
31017-    enable_lazy_lock="1"
31018-  else
31019-    enable_lazy_lock="0"
31020-  fi
31021-fi
31022-if test "x${enable_lazy_lock}" = "x1" -a "x${abi}" = "xpecoff" ; then
31023-  AC_MSG_RESULT([Forcing no lazy-lock because thread creation monitoring is unimplemented])
31024-  enable_lazy_lock="0"
31025-fi
31026-if test "x$enable_lazy_lock" = "x1" ; then
31027-  if test "x$have_dlsym" = "x1" ; then
31028-    AC_DEFINE([JEMALLOC_LAZY_LOCK], [ ], [ ])
31029-  else
31030-    AC_MSG_ERROR([Missing dlsym support: lazy-lock cannot be enabled.])
31031-  fi
31032-fi
31033-AC_SUBST([enable_lazy_lock])
31034-
31035-dnl Automatically configure TLS.
31036-if test "x${force_tls}" = "x1" ; then
31037-  enable_tls="1"
31038-elif test "x${force_tls}" = "x0" ; then
31039-  enable_tls="0"
31040-else
31041-  enable_tls="1"
31042-fi
31043-if test "x${enable_tls}" = "x1" ; then
31044-AC_MSG_CHECKING([for TLS])
31045-AC_COMPILE_IFELSE([AC_LANG_PROGRAM(
31046-[[
31047-    __thread int x;
31048-]], [[
31049-    x = 42;
31050-
31051-    return 0;
31052-]])],
31053-              AC_MSG_RESULT([yes]),
31054-              AC_MSG_RESULT([no])
31055-              enable_tls="0")
31056-else
31057-  enable_tls="0"
31058-fi
31059-AC_SUBST([enable_tls])
31060-if test "x${enable_tls}" = "x1" ; then
31061-  AC_DEFINE_UNQUOTED([JEMALLOC_TLS], [ ], [ ])
31062-fi
31063-
31064-dnl ============================================================================
31065-dnl Check for C11 atomics.
31066-
31067-JE_COMPILABLE([C11 atomics], [
31068-#include <stdint.h>
31069-#if (__STDC_VERSION__ >= 201112L) && !defined(__STDC_NO_ATOMICS__)
31070-#include <stdatomic.h>
31071-#else
31072-#error Atomics not available
31073-#endif
31074-], [
31075-    uint64_t *p = (uint64_t *)0;
31076-    uint64_t x = 1;
31077-    volatile atomic_uint_least64_t *a = (volatile atomic_uint_least64_t *)p;
31078-    uint64_t r = atomic_fetch_add(a, x) + x;
31079-    return r == 0;
31080-], [je_cv_c11_atomics])
31081-if test "x${je_cv_c11_atomics}" = "xyes" ; then
31082-  AC_DEFINE([JEMALLOC_C11_ATOMICS], [ ], [ ])
31083-fi
31084-
31085-dnl ============================================================================
31086-dnl Check for GCC-style __atomic atomics.
31087-
31088-JE_COMPILABLE([GCC __atomic atomics], [
31089-], [
31090-    int x = 0;
31091-    int val = 1;
31092-    int y = __atomic_fetch_add(&x, val, __ATOMIC_RELAXED);
31093-    int after_add = x;
31094-    return after_add == 1;
31095-], [je_cv_gcc_atomic_atomics])
31096-if test "x${je_cv_gcc_atomic_atomics}" = "xyes" ; then
31097-  AC_DEFINE([JEMALLOC_GCC_ATOMIC_ATOMICS], [ ], [ ])
31098-
31099-  dnl check for 8-bit atomic support
31100-  JE_COMPILABLE([GCC 8-bit __atomic atomics], [
31101-  ], [
31102-      unsigned char x = 0;
31103-      int val = 1;
31104-      int y = __atomic_fetch_add(&x, val, __ATOMIC_RELAXED);
31105-      int after_add = (int)x;
31106-      return after_add == 1;
31107-  ], [je_cv_gcc_u8_atomic_atomics])
31108-  if test "x${je_cv_gcc_u8_atomic_atomics}" = "xyes" ; then
31109-    AC_DEFINE([JEMALLOC_GCC_U8_ATOMIC_ATOMICS], [ ], [ ])
31110-  fi
31111-fi
31112-
31113-dnl ============================================================================
31114-dnl Check for GCC-style __sync atomics.
31115-
31116-JE_COMPILABLE([GCC __sync atomics], [
31117-], [
31118-    int x = 0;
31119-    int before_add = __sync_fetch_and_add(&x, 1);
31120-    int after_add = x;
31121-    return (before_add == 0) && (after_add == 1);
31122-], [je_cv_gcc_sync_atomics])
31123-if test "x${je_cv_gcc_sync_atomics}" = "xyes" ; then
31124-  AC_DEFINE([JEMALLOC_GCC_SYNC_ATOMICS], [ ], [ ])
31125-
31126-  dnl check for 8-bit atomic support
31127-  JE_COMPILABLE([GCC 8-bit __sync atomics], [
31128-  ], [
31129-      unsigned char x = 0;
31130-      int before_add = __sync_fetch_and_add(&x, 1);
31131-      int after_add = (int)x;
31132-      return (before_add == 0) && (after_add == 1);
31133-  ], [je_cv_gcc_u8_sync_atomics])
31134-  if test "x${je_cv_gcc_u8_sync_atomics}" = "xyes" ; then
31135-    AC_DEFINE([JEMALLOC_GCC_U8_SYNC_ATOMICS], [ ], [ ])
31136-  fi
31137-fi
31138-
31139-dnl ============================================================================
31140-dnl Check for atomic(3) operations as provided on Darwin.
31141-dnl We need this not for the atomic operations (which are provided above), but
31142-dnl rather for the OS_unfair_lock type it exposes.
31143-
31144-JE_COMPILABLE([Darwin OSAtomic*()], [
31145-#include <libkern/OSAtomic.h>
31146-#include <inttypes.h>
31147-], [
31148-	{
31149-		int32_t x32 = 0;
31150-		volatile int32_t *x32p = &x32;
31151-		OSAtomicAdd32(1, x32p);
31152-	}
31153-	{
31154-		int64_t x64 = 0;
31155-		volatile int64_t *x64p = &x64;
31156-		OSAtomicAdd64(1, x64p);
31157-	}
31158-], [je_cv_osatomic])
31159-if test "x${je_cv_osatomic}" = "xyes" ; then
31160-  AC_DEFINE([JEMALLOC_OSATOMIC], [ ], [ ])
31161-fi
31162-
31163-dnl ============================================================================
31164-dnl Check for madvise(2).
31165-
31166-JE_COMPILABLE([madvise(2)], [
31167-#include <sys/mman.h>
31168-], [
31169-	madvise((void *)0, 0, 0);
31170-], [je_cv_madvise])
31171-if test "x${je_cv_madvise}" = "xyes" ; then
31172-  AC_DEFINE([JEMALLOC_HAVE_MADVISE], [ ], [ ])
31173-
31174-  dnl Check for madvise(..., MADV_FREE).
31175-  JE_COMPILABLE([madvise(..., MADV_FREE)], [
31176-#include <sys/mman.h>
31177-], [
31178-	madvise((void *)0, 0, MADV_FREE);
31179-], [je_cv_madv_free])
31180-  if test "x${je_cv_madv_free}" = "xyes" ; then
31181-    AC_DEFINE([JEMALLOC_PURGE_MADVISE_FREE], [ ], [ ])
31182-  elif test "x${je_cv_madvise}" = "xyes" ; then
31183-    case "${host_cpu}" in i686|x86_64)
31184-        case "${host}" in *-*-linux*)
31185-            AC_DEFINE([JEMALLOC_PURGE_MADVISE_FREE], [ ], [ ])
31186-            AC_DEFINE([JEMALLOC_DEFINE_MADVISE_FREE], [ ], [ ])
31187-	    ;;
31188-        esac
31189-        ;;
31190-    esac
31191-  fi
31192-
31193-  dnl Check for madvise(..., MADV_DONTNEED).
31194-  JE_COMPILABLE([madvise(..., MADV_DONTNEED)], [
31195-#include <sys/mman.h>
31196-], [
31197-	madvise((void *)0, 0, MADV_DONTNEED);
31198-], [je_cv_madv_dontneed])
31199-  if test "x${je_cv_madv_dontneed}" = "xyes" ; then
31200-    AC_DEFINE([JEMALLOC_PURGE_MADVISE_DONTNEED], [ ], [ ])
31201-  fi
31202-
31203-  dnl Check for madvise(..., MADV_DO[NT]DUMP).
31204-  JE_COMPILABLE([madvise(..., MADV_DO[[NT]]DUMP)], [
31205-#include <sys/mman.h>
31206-], [
31207-	madvise((void *)0, 0, MADV_DONTDUMP);
31208-	madvise((void *)0, 0, MADV_DODUMP);
31209-], [je_cv_madv_dontdump])
31210-  if test "x${je_cv_madv_dontdump}" = "xyes" ; then
31211-    AC_DEFINE([JEMALLOC_MADVISE_DONTDUMP], [ ], [ ])
31212-  fi
31213-
31214-  dnl Check for madvise(..., MADV_[NO]HUGEPAGE).
31215-  JE_COMPILABLE([madvise(..., MADV_[[NO]]HUGEPAGE)], [
31216-#include <sys/mman.h>
31217-], [
31218-	madvise((void *)0, 0, MADV_HUGEPAGE);
31219-	madvise((void *)0, 0, MADV_NOHUGEPAGE);
31220-], [je_cv_thp])
31221-  dnl Check for madvise(..., MADV_[NO]CORE).
31222-  JE_COMPILABLE([madvise(..., MADV_[[NO]]CORE)], [
31223-#include <sys/mman.h>
31224-], [
31225-	madvise((void *)0, 0, MADV_NOCORE);
31226-	madvise((void *)0, 0, MADV_CORE);
31227-], [je_cv_madv_nocore])
31228-  if test "x${je_cv_madv_nocore}" = "xyes" ; then
31229-    AC_DEFINE([JEMALLOC_MADVISE_NOCORE], [ ], [ ])
31230-  fi
31231-case "${host_cpu}" in
31232-  arm*)
31233-    ;;
31234-  *)
31235-  if test "x${je_cv_thp}" = "xyes" ; then
31236-    AC_DEFINE([JEMALLOC_HAVE_MADVISE_HUGE], [ ], [ ])
31237-  fi
31238-  ;;
31239-esac
31240-else
31241-  dnl Check for posix_madvise.
31242-  JE_COMPILABLE([posix_madvise], [
31243-  #include <sys/mman.h>
31244-  ], [
31245-    posix_madvise((void *)0, 0, 0);
31246-  ], [je_cv_posix_madvise])
31247-  if test "x${je_cv_posix_madvise}" = "xyes" ; then
31248-    AC_DEFINE([JEMALLOC_HAVE_POSIX_MADVISE], [ ], [ ])
31249-
31250-    dnl Check for posix_madvise(..., POSIX_MADV_DONTNEED).
31251-    JE_COMPILABLE([posix_madvise(..., POSIX_MADV_DONTNEED)], [
31252-  #include <sys/mman.h>
31253-  ], [
31254-    posix_madvise((void *)0, 0, POSIX_MADV_DONTNEED);
31255-  ], [je_cv_posix_madv_dontneed])
31256-    if test "x${je_cv_posix_madv_dontneed}" = "xyes" ; then
31257-      AC_DEFINE([JEMALLOC_PURGE_POSIX_MADVISE_DONTNEED], [ ], [ ])
31258-    fi
31259-  fi
31260-fi
31261-
31262-dnl ============================================================================
31263-dnl Check for mprotect(2).
31264-
31265-JE_COMPILABLE([mprotect(2)], [
31266-#include <sys/mman.h>
31267-], [
31268-	mprotect((void *)0, 0, PROT_NONE);
31269-], [je_cv_mprotect])
31270-if test "x${je_cv_mprotect}" = "xyes" ; then
31271-  AC_DEFINE([JEMALLOC_HAVE_MPROTECT], [ ], [ ])
31272-fi
31273-
31274-dnl ============================================================================
31275-dnl Check for __builtin_clz(), __builtin_clzl(), and __builtin_clzll().
31276-
31277-AC_CACHE_CHECK([for __builtin_clz],
31278-               [je_cv_builtin_clz],
31279-               [AC_LINK_IFELSE([AC_LANG_PROGRAM([],
31280-                                                [
31281-                                                {
31282-                                                        unsigned x = 0;
31283-                                                        int y = __builtin_clz(x);
31284-                                                }
31285-                                                {
31286-                                                        unsigned long x = 0;
31287-                                                        int y = __builtin_clzl(x);
31288-                                                }
31289-                                                {
31290-                                                        unsigned long long x = 0;
31291-                                                        int y = __builtin_clzll(x);
31292-                                                }
31293-                                                ])],
31294-                               [je_cv_builtin_clz=yes],
31295-                               [je_cv_builtin_clz=no])])
31296-
31297-if test "x${je_cv_builtin_clz}" = "xyes" ; then
31298-  AC_DEFINE([JEMALLOC_HAVE_BUILTIN_CLZ], [ ], [ ])
31299-fi
31300-
31301-dnl ============================================================================
31302-dnl Check for os_unfair_lock operations as provided on Darwin.
31303-
31304-JE_COMPILABLE([Darwin os_unfair_lock_*()], [
31305-#include <os/lock.h>
31306-#include <AvailabilityMacros.h>
31307-], [
31308-	#if MAC_OS_X_VERSION_MIN_REQUIRED < 101200
31309-	#error "os_unfair_lock is not supported"
31310-	#else
31311-	os_unfair_lock lock = OS_UNFAIR_LOCK_INIT;
31312-	os_unfair_lock_lock(&lock);
31313-	os_unfair_lock_unlock(&lock);
31314-	#endif
31315-], [je_cv_os_unfair_lock])
31316-if test "x${je_cv_os_unfair_lock}" = "xyes" ; then
31317-  AC_DEFINE([JEMALLOC_OS_UNFAIR_LOCK], [ ], [ ])
31318-fi
31319-
31320-dnl ============================================================================
31321-dnl Darwin-related configuration.
31322-
31323-AC_ARG_ENABLE([zone-allocator],
31324-  [AS_HELP_STRING([--disable-zone-allocator],
31325-                  [Disable zone allocator for Darwin])],
31326-[if test "x$enable_zone_allocator" = "xno" ; then
31327-  enable_zone_allocator="0"
31328-else
31329-  enable_zone_allocator="1"
31330-fi
31331-],
31332-[if test "x${abi}" = "xmacho"; then
31333-  enable_zone_allocator="1"
31334-fi
31335-]
31336-)
31337-AC_SUBST([enable_zone_allocator])
31338-
31339-if test "x${enable_zone_allocator}" = "x1" ; then
31340-  if test "x${abi}" != "xmacho"; then
31341-    AC_MSG_ERROR([--enable-zone-allocator is only supported on Darwin])
31342-  fi
31343-  AC_DEFINE([JEMALLOC_ZONE], [ ], [ ])
31344-fi
31345-
31346-dnl ============================================================================
31347-dnl Use initial-exec TLS by default.
31348-AC_ARG_ENABLE([initial-exec-tls],
31349-  [AS_HELP_STRING([--disable-initial-exec-tls],
31350-                  [Disable the initial-exec tls model])],
31351-[if test "x$enable_initial_exec_tls" = "xno" ; then
31352-  enable_initial_exec_tls="0"
31353-else
31354-  enable_initial_exec_tls="1"
31355-fi
31356-],
31357-[enable_initial_exec_tls="1"]
31358-)
31359-AC_SUBST([enable_initial_exec_tls])
31360-
31361-if test "x${je_cv_tls_model}" = "xyes" -a \
31362-       "x${enable_initial_exec_tls}" = "x1" ; then
31363-  AC_DEFINE([JEMALLOC_TLS_MODEL],
31364-            [__attribute__((tls_model("initial-exec")))],
31365-            [ ])
31366-else
31367-  AC_DEFINE([JEMALLOC_TLS_MODEL], [ ], [ ])
31368-fi
31369-
31370-dnl ============================================================================
31371-dnl Enable background threads if possible.
31372-
31373-if test "x${have_pthread}" = "x1" -a "x${je_cv_os_unfair_lock}" != "xyes" -a \
31374-       "x${abi}" != "xmacho" ; then
31375-  AC_DEFINE([JEMALLOC_BACKGROUND_THREAD], [ ], [ ])
31376-fi
31377-
31378-dnl ============================================================================
31379-dnl Check for glibc malloc hooks
31380-
31381-if test "x$glibc" = "x1" ; then
31382-  JE_COMPILABLE([glibc malloc hook], [
31383-  #include <stddef.h>
31384-
31385-  extern void (* __free_hook)(void *ptr);
31386-  extern void *(* __malloc_hook)(size_t size);
31387-  extern void *(* __realloc_hook)(void *ptr, size_t size);
31388-], [
31389-    void *ptr = 0L;
31390-    if (__malloc_hook) ptr = __malloc_hook(1);
31391-    if (__realloc_hook) ptr = __realloc_hook(ptr, 2);
31392-    if (__free_hook && ptr) __free_hook(ptr);
31393-], [je_cv_glibc_malloc_hook])
31394-  if test "x${je_cv_glibc_malloc_hook}" = "xyes" ; then
31395-    if test "x${JEMALLOC_PREFIX}" = "x" ; then
31396-      AC_DEFINE([JEMALLOC_GLIBC_MALLOC_HOOK], [ ], [ ])
31397-      wrap_syms="${wrap_syms} __free_hook __malloc_hook __realloc_hook"
31398-    fi
31399-  fi
31400-
31401-  JE_COMPILABLE([glibc memalign hook], [
31402-  #include <stddef.h>
31403-
31404-  extern void *(* __memalign_hook)(size_t alignment, size_t size);
31405-], [
31406-    void *ptr = 0L;
31407-    if (__memalign_hook) ptr = __memalign_hook(16, 7);
31408-], [je_cv_glibc_memalign_hook])
31409-  if test "x${je_cv_glibc_memalign_hook}" = "xyes" ; then
31410-    if test "x${JEMALLOC_PREFIX}" = "x" ; then
31411-      AC_DEFINE([JEMALLOC_GLIBC_MEMALIGN_HOOK], [ ], [ ])
31412-      wrap_syms="${wrap_syms} __memalign_hook"
31413-    fi
31414-  fi
31415-fi
31416-
31417-JE_COMPILABLE([pthreads adaptive mutexes], [
31418-#include <pthread.h>
31419-], [
31420-  pthread_mutexattr_t attr;
31421-  pthread_mutexattr_init(&attr);
31422-  pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ADAPTIVE_NP);
31423-  pthread_mutexattr_destroy(&attr);
31424-], [je_cv_pthread_mutex_adaptive_np])
31425-if test "x${je_cv_pthread_mutex_adaptive_np}" = "xyes" ; then
31426-  AC_DEFINE([JEMALLOC_HAVE_PTHREAD_MUTEX_ADAPTIVE_NP], [ ], [ ])
31427-fi
31428-
31429-JE_CFLAGS_SAVE()
31430-JE_CFLAGS_ADD([-D_GNU_SOURCE])
31431-JE_CFLAGS_ADD([-Werror])
31432-JE_CFLAGS_ADD([-herror_on_warning])
31433-JE_COMPILABLE([strerror_r returns char with gnu source], [
31434-#include <errno.h>
31435-#include <stdio.h>
31436-#include <stdlib.h>
31437-#include <string.h>
31438-], [
31439-  char *buffer = (char *) malloc(100);
31440-  char *error = strerror_r(EINVAL, buffer, 100);
31441-  printf("%s\n", error);
31442-], [je_cv_strerror_r_returns_char_with_gnu_source])
31443-JE_CFLAGS_RESTORE()
31444-if test "x${je_cv_strerror_r_returns_char_with_gnu_source}" = "xyes" ; then
31445-  AC_DEFINE([JEMALLOC_STRERROR_R_RETURNS_CHAR_WITH_GNU_SOURCE], [ ], [ ])
31446-fi
31447-
31448-dnl ============================================================================
31449-dnl Check for typedefs, structures, and compiler characteristics.
31450-AC_HEADER_STDBOOL
31451-
31452-dnl ============================================================================
31453-dnl Define commands that generate output files.
31454-
31455-AC_CONFIG_COMMANDS([include/jemalloc/internal/public_symbols.txt], [
31456-  f="${objroot}include/jemalloc/internal/public_symbols.txt"
31457-  mkdir -p "${objroot}include/jemalloc/internal"
31458-  cp /dev/null "${f}"
31459-  for nm in `echo ${mangling_map} |tr ',' ' '` ; do
31460-    n=`echo ${nm} |tr ':' ' ' |awk '{print $[]1}'`
31461-    m=`echo ${nm} |tr ':' ' ' |awk '{print $[]2}'`
31462-    echo "${n}:${m}" >> "${f}"
31463-    dnl Remove name from public_syms so that it isn't redefined later.
31464-    public_syms=`for sym in ${public_syms}; do echo "${sym}"; done |grep -v "^${n}\$" |tr '\n' ' '`
31465-  done
31466-  for sym in ${public_syms} ; do
31467-    n="${sym}"
31468-    m="${JEMALLOC_PREFIX}${sym}"
31469-    echo "${n}:${m}" >> "${f}"
31470-  done
31471-], [
31472-  srcdir="${srcdir}"
31473-  objroot="${objroot}"
31474-  mangling_map="${mangling_map}"
31475-  public_syms="${public_syms}"
31476-  JEMALLOC_PREFIX="${JEMALLOC_PREFIX}"
31477-])
31478-AC_CONFIG_COMMANDS([include/jemalloc/internal/private_symbols.awk], [
31479-  f="${objroot}include/jemalloc/internal/private_symbols.awk"
31480-  mkdir -p "${objroot}include/jemalloc/internal"
31481-  export_syms=`for sym in ${public_syms}; do echo "${JEMALLOC_PREFIX}${sym}"; done; for sym in ${wrap_syms}; do echo "${sym}"; done;`
31482-  "${srcdir}/include/jemalloc/internal/private_symbols.sh" "${SYM_PREFIX}" ${export_syms} > "${objroot}include/jemalloc/internal/private_symbols.awk"
31483-], [
31484-  srcdir="${srcdir}"
31485-  objroot="${objroot}"
31486-  public_syms="${public_syms}"
31487-  wrap_syms="${wrap_syms}"
31488-  SYM_PREFIX="${SYM_PREFIX}"
31489-  JEMALLOC_PREFIX="${JEMALLOC_PREFIX}"
31490-])
31491-AC_CONFIG_COMMANDS([include/jemalloc/internal/private_symbols_jet.awk], [
31492-  f="${objroot}include/jemalloc/internal/private_symbols_jet.awk"
31493-  mkdir -p "${objroot}include/jemalloc/internal"
31494-  export_syms=`for sym in ${public_syms}; do echo "jet_${sym}"; done; for sym in ${wrap_syms}; do echo "${sym}"; done;`
31495-  "${srcdir}/include/jemalloc/internal/private_symbols.sh" "${SYM_PREFIX}" ${export_syms} > "${objroot}include/jemalloc/internal/private_symbols_jet.awk"
31496-], [
31497-  srcdir="${srcdir}"
31498-  objroot="${objroot}"
31499-  public_syms="${public_syms}"
31500-  wrap_syms="${wrap_syms}"
31501-  SYM_PREFIX="${SYM_PREFIX}"
31502-])
31503-AC_CONFIG_COMMANDS([include/jemalloc/internal/public_namespace.h], [
31504-  mkdir -p "${objroot}include/jemalloc/internal"
31505-  "${srcdir}/include/jemalloc/internal/public_namespace.sh" "${objroot}include/jemalloc/internal/public_symbols.txt" > "${objroot}include/jemalloc/internal/public_namespace.h"
31506-], [
31507-  srcdir="${srcdir}"
31508-  objroot="${objroot}"
31509-])
31510-AC_CONFIG_COMMANDS([include/jemalloc/internal/public_unnamespace.h], [
31511-  mkdir -p "${objroot}include/jemalloc/internal"
31512-  "${srcdir}/include/jemalloc/internal/public_unnamespace.sh" "${objroot}include/jemalloc/internal/public_symbols.txt" > "${objroot}include/jemalloc/internal/public_unnamespace.h"
31513-], [
31514-  srcdir="${srcdir}"
31515-  objroot="${objroot}"
31516-])
31517-AC_CONFIG_COMMANDS([include/jemalloc/jemalloc_protos_jet.h], [
31518-  mkdir -p "${objroot}include/jemalloc"
31519-  cat "${srcdir}/include/jemalloc/jemalloc_protos.h.in" | sed -e 's/@je_@/jet_/g' > "${objroot}include/jemalloc/jemalloc_protos_jet.h"
31520-], [
31521-  srcdir="${srcdir}"
31522-  objroot="${objroot}"
31523-])
31524-AC_CONFIG_COMMANDS([include/jemalloc/jemalloc_rename.h], [
31525-  mkdir -p "${objroot}include/jemalloc"
31526-  "${srcdir}/include/jemalloc/jemalloc_rename.sh" "${objroot}include/jemalloc/internal/public_symbols.txt" > "${objroot}include/jemalloc/jemalloc_rename.h"
31527-], [
31528-  srcdir="${srcdir}"
31529-  objroot="${objroot}"
31530-])
31531-AC_CONFIG_COMMANDS([include/jemalloc/jemalloc_mangle.h], [
31532-  mkdir -p "${objroot}include/jemalloc"
31533-  "${srcdir}/include/jemalloc/jemalloc_mangle.sh" "${objroot}include/jemalloc/internal/public_symbols.txt" je_ > "${objroot}include/jemalloc/jemalloc_mangle.h"
31534-], [
31535-  srcdir="${srcdir}"
31536-  objroot="${objroot}"
31537-])
31538-AC_CONFIG_COMMANDS([include/jemalloc/jemalloc_mangle_jet.h], [
31539-  mkdir -p "${objroot}include/jemalloc"
31540-  "${srcdir}/include/jemalloc/jemalloc_mangle.sh" "${objroot}include/jemalloc/internal/public_symbols.txt" jet_ > "${objroot}include/jemalloc/jemalloc_mangle_jet.h"
31541-], [
31542-  srcdir="${srcdir}"
31543-  objroot="${objroot}"
31544-])
31545-AC_CONFIG_COMMANDS([include/jemalloc/jemalloc.h], [
31546-  mkdir -p "${objroot}include/jemalloc"
31547-  "${srcdir}/include/jemalloc/jemalloc.sh" "${objroot}" > "${objroot}include/jemalloc/jemalloc${install_suffix}.h"
31548-], [
31549-  srcdir="${srcdir}"
31550-  objroot="${objroot}"
31551-  install_suffix="${install_suffix}"
31552-])
31553-
31554-dnl Process .in files.
31555-AC_SUBST([cfghdrs_in])
31556-AC_SUBST([cfghdrs_out])
31557-AC_CONFIG_HEADERS([$cfghdrs_tup])
31558-
31559-dnl ============================================================================
31560-dnl Generate outputs.
31561-
31562-AC_CONFIG_FILES([$cfgoutputs_tup config.stamp bin/jemalloc-config bin/jemalloc.sh bin/jeprof])
31563-AC_SUBST([cfgoutputs_in])
31564-AC_SUBST([cfgoutputs_out])
31565-AC_OUTPUT
31566-
31567-dnl ============================================================================
31568-dnl Print out the results of configuration.
31569-AC_MSG_RESULT([===============================================================================])
31570-AC_MSG_RESULT([jemalloc version   : ${jemalloc_version}])
31571-AC_MSG_RESULT([library revision   : ${rev}])
31572-AC_MSG_RESULT([])
31573-AC_MSG_RESULT([CONFIG             : ${CONFIG}])
31574-AC_MSG_RESULT([CC                 : ${CC}])
31575-AC_MSG_RESULT([CONFIGURE_CFLAGS   : ${CONFIGURE_CFLAGS}])
31576-AC_MSG_RESULT([SPECIFIED_CFLAGS   : ${SPECIFIED_CFLAGS}])
31577-AC_MSG_RESULT([EXTRA_CFLAGS       : ${EXTRA_CFLAGS}])
31578-AC_MSG_RESULT([CPPFLAGS           : ${CPPFLAGS}])
31579-AC_MSG_RESULT([CXX                : ${CXX}])
31580-AC_MSG_RESULT([CONFIGURE_CXXFLAGS : ${CONFIGURE_CXXFLAGS}])
31581-AC_MSG_RESULT([SPECIFIED_CXXFLAGS : ${SPECIFIED_CXXFLAGS}])
31582-AC_MSG_RESULT([EXTRA_CXXFLAGS     : ${EXTRA_CXXFLAGS}])
31583-AC_MSG_RESULT([LDFLAGS            : ${LDFLAGS}])
31584-AC_MSG_RESULT([EXTRA_LDFLAGS      : ${EXTRA_LDFLAGS}])
31585-AC_MSG_RESULT([DSO_LDFLAGS        : ${DSO_LDFLAGS}])
31586-AC_MSG_RESULT([LIBS               : ${LIBS}])
31587-AC_MSG_RESULT([RPATH_EXTRA        : ${RPATH_EXTRA}])
31588-AC_MSG_RESULT([])
31589-AC_MSG_RESULT([XSLTPROC           : ${XSLTPROC}])
31590-AC_MSG_RESULT([XSLROOT            : ${XSLROOT}])
31591-AC_MSG_RESULT([])
31592-AC_MSG_RESULT([PREFIX             : ${PREFIX}])
31593-AC_MSG_RESULT([BINDIR             : ${BINDIR}])
31594-AC_MSG_RESULT([DATADIR            : ${DATADIR}])
31595-AC_MSG_RESULT([INCLUDEDIR         : ${INCLUDEDIR}])
31596-AC_MSG_RESULT([LIBDIR             : ${LIBDIR}])
31597-AC_MSG_RESULT([MANDIR             : ${MANDIR}])
31598-AC_MSG_RESULT([])
31599-AC_MSG_RESULT([srcroot            : ${srcroot}])
31600-AC_MSG_RESULT([abs_srcroot        : ${abs_srcroot}])
31601-AC_MSG_RESULT([objroot            : ${objroot}])
31602-AC_MSG_RESULT([abs_objroot        : ${abs_objroot}])
31603-AC_MSG_RESULT([])
31604-AC_MSG_RESULT([JEMALLOC_PREFIX    : ${JEMALLOC_PREFIX}])
31605-AC_MSG_RESULT([JEMALLOC_PRIVATE_NAMESPACE])
31606-AC_MSG_RESULT([                   : ${JEMALLOC_PRIVATE_NAMESPACE}])
31607-AC_MSG_RESULT([install_suffix     : ${install_suffix}])
31608-AC_MSG_RESULT([malloc_conf        : ${config_malloc_conf}])
31609-AC_MSG_RESULT([documentation      : ${enable_doc}])
31610-AC_MSG_RESULT([shared libs        : ${enable_shared}])
31611-AC_MSG_RESULT([static libs        : ${enable_static}])
31612-AC_MSG_RESULT([autogen            : ${enable_autogen}])
31613-AC_MSG_RESULT([debug              : ${enable_debug}])
31614-AC_MSG_RESULT([stats              : ${enable_stats}])
31615-AC_MSG_RESULT([experimental_smallocx : ${enable_experimental_smallocx}])
31616-AC_MSG_RESULT([prof               : ${enable_prof}])
31617-AC_MSG_RESULT([prof-libunwind     : ${enable_prof_libunwind}])
31618-AC_MSG_RESULT([prof-libgcc        : ${enable_prof_libgcc}])
31619-AC_MSG_RESULT([prof-gcc           : ${enable_prof_gcc}])
31620-AC_MSG_RESULT([fill               : ${enable_fill}])
31621-AC_MSG_RESULT([utrace             : ${enable_utrace}])
31622-AC_MSG_RESULT([xmalloc            : ${enable_xmalloc}])
31623-AC_MSG_RESULT([log                : ${enable_log}])
31624-AC_MSG_RESULT([lazy_lock          : ${enable_lazy_lock}])
31625-AC_MSG_RESULT([cache-oblivious    : ${enable_cache_oblivious}])
31626-AC_MSG_RESULT([cxx                : ${enable_cxx}])
31627-AC_MSG_RESULT([===============================================================================])
31628diff --git a/jemalloc/doc/html.xsl.in b/jemalloc/doc/html.xsl.in
31629deleted file mode 100644
31630index ec4fa65..0000000
31631--- a/jemalloc/doc/html.xsl.in
31632+++ /dev/null
31633@@ -1,5 +0,0 @@
31634-<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
31635-  <xsl:import href="@XSLROOT@/html/docbook.xsl"/>
31636-  <xsl:import href="@abs_srcroot@doc/stylesheet.xsl"/>
31637-  <xsl:output method="xml" encoding="utf-8"/>
31638-</xsl:stylesheet>
31639diff --git a/jemalloc/doc/jemalloc.xml.in b/jemalloc/doc/jemalloc.xml.in
31640deleted file mode 100644
31641index e28e8f3..0000000
31642--- a/jemalloc/doc/jemalloc.xml.in
31643+++ /dev/null
31644@@ -1,3763 +0,0 @@
31645-<?xml version='1.0' encoding='UTF-8'?>
31646-<?xml-stylesheet type="text/xsl"
31647-        href="http://docbook.sourceforge.net/release/xsl/current/manpages/docbook.xsl"?>
31648-<!DOCTYPE refentry PUBLIC "-//OASIS//DTD DocBook XML V4.4//EN"
31649-        "http://www.oasis-open.org/docbook/xml/4.4/docbookx.dtd" [
31650-]>
31651-
31652-<refentry>
31653-  <refentryinfo>
31654-    <title>User Manual</title>
31655-    <productname>jemalloc</productname>
31656-    <releaseinfo role="version">@jemalloc_version@</releaseinfo>
31657-    <authorgroup>
31658-      <author>
31659-        <firstname>Jason</firstname>
31660-        <surname>Evans</surname>
31661-        <personblurb>Author</personblurb>
31662-      </author>
31663-    </authorgroup>
31664-  </refentryinfo>
31665-  <refmeta>
31666-    <refentrytitle>JEMALLOC</refentrytitle>
31667-    <manvolnum>3</manvolnum>
31668-  </refmeta>
31669-  <refnamediv>
31670-    <refdescriptor>jemalloc</refdescriptor>
31671-    <refname>jemalloc</refname>
31672-    <!-- Each refname causes a man page file to be created.  Only if this were
31673-         the system malloc(3) implementation would these files be appropriate.
31674-    <refname>malloc</refname>
31675-    <refname>calloc</refname>
31676-    <refname>posix_memalign</refname>
31677-    <refname>aligned_alloc</refname>
31678-    <refname>realloc</refname>
31679-    <refname>free</refname>
31680-    <refname>mallocx</refname>
31681-    <refname>rallocx</refname>
31682-    <refname>xallocx</refname>
31683-    <refname>sallocx</refname>
31684-    <refname>dallocx</refname>
31685-    <refname>sdallocx</refname>
31686-    <refname>nallocx</refname>
31687-    <refname>mallctl</refname>
31688-    <refname>mallctlnametomib</refname>
31689-    <refname>mallctlbymib</refname>
31690-    <refname>malloc_stats_print</refname>
31691-    <refname>malloc_usable_size</refname>
31692-    -->
31693-    <refpurpose>general purpose memory allocation functions</refpurpose>
31694-  </refnamediv>
31695-  <refsect1 id="library">
31696-    <title>LIBRARY</title>
31697-    <para>This manual describes jemalloc @jemalloc_version@.  More information
31698-    can be found at the <ulink
31699-    url="http://jemalloc.net/">jemalloc website</ulink>.</para>
31700-  </refsect1>
31701-  <refsynopsisdiv>
31702-    <title>SYNOPSIS</title>
31703-    <funcsynopsis>
31704-      <funcsynopsisinfo>#include &lt;<filename class="headerfile">jemalloc/jemalloc.h</filename>&gt;</funcsynopsisinfo>
31705-      <refsect2>
31706-        <title>Standard API</title>
31707-        <funcprototype>
31708-          <funcdef>void *<function>malloc</function></funcdef>
31709-          <paramdef>size_t <parameter>size</parameter></paramdef>
31710-        </funcprototype>
31711-        <funcprototype>
31712-          <funcdef>void *<function>calloc</function></funcdef>
31713-          <paramdef>size_t <parameter>number</parameter></paramdef>
31714-          <paramdef>size_t <parameter>size</parameter></paramdef>
31715-        </funcprototype>
31716-        <funcprototype>
31717-          <funcdef>int <function>posix_memalign</function></funcdef>
31718-          <paramdef>void **<parameter>ptr</parameter></paramdef>
31719-          <paramdef>size_t <parameter>alignment</parameter></paramdef>
31720-          <paramdef>size_t <parameter>size</parameter></paramdef>
31721-        </funcprototype>
31722-        <funcprototype>
31723-          <funcdef>void *<function>aligned_alloc</function></funcdef>
31724-          <paramdef>size_t <parameter>alignment</parameter></paramdef>
31725-          <paramdef>size_t <parameter>size</parameter></paramdef>
31726-        </funcprototype>
31727-        <funcprototype>
31728-          <funcdef>void *<function>realloc</function></funcdef>
31729-          <paramdef>void *<parameter>ptr</parameter></paramdef>
31730-          <paramdef>size_t <parameter>size</parameter></paramdef>
31731-        </funcprototype>
31732-        <funcprototype>
31733-          <funcdef>void <function>free</function></funcdef>
31734-          <paramdef>void *<parameter>ptr</parameter></paramdef>
31735-        </funcprototype>
31736-      </refsect2>
31737-      <refsect2>
31738-        <title>Non-standard API</title>
31739-        <funcprototype>
31740-          <funcdef>void *<function>mallocx</function></funcdef>
31741-          <paramdef>size_t <parameter>size</parameter></paramdef>
31742-          <paramdef>int <parameter>flags</parameter></paramdef>
31743-        </funcprototype>
31744-        <funcprototype>
31745-          <funcdef>void *<function>rallocx</function></funcdef>
31746-          <paramdef>void *<parameter>ptr</parameter></paramdef>
31747-          <paramdef>size_t <parameter>size</parameter></paramdef>
31748-          <paramdef>int <parameter>flags</parameter></paramdef>
31749-        </funcprototype>
31750-        <funcprototype>
31751-          <funcdef>size_t <function>xallocx</function></funcdef>
31752-          <paramdef>void *<parameter>ptr</parameter></paramdef>
31753-          <paramdef>size_t <parameter>size</parameter></paramdef>
31754-          <paramdef>size_t <parameter>extra</parameter></paramdef>
31755-          <paramdef>int <parameter>flags</parameter></paramdef>
31756-        </funcprototype>
31757-        <funcprototype>
31758-          <funcdef>size_t <function>sallocx</function></funcdef>
31759-          <paramdef>void *<parameter>ptr</parameter></paramdef>
31760-          <paramdef>int <parameter>flags</parameter></paramdef>
31761-        </funcprototype>
31762-        <funcprototype>
31763-          <funcdef>void <function>dallocx</function></funcdef>
31764-          <paramdef>void *<parameter>ptr</parameter></paramdef>
31765-          <paramdef>int <parameter>flags</parameter></paramdef>
31766-        </funcprototype>
31767-        <funcprototype>
31768-          <funcdef>void <function>sdallocx</function></funcdef>
31769-          <paramdef>void *<parameter>ptr</parameter></paramdef>
31770-          <paramdef>size_t <parameter>size</parameter></paramdef>
31771-          <paramdef>int <parameter>flags</parameter></paramdef>
31772-        </funcprototype>
31773-        <funcprototype>
31774-          <funcdef>size_t <function>nallocx</function></funcdef>
31775-          <paramdef>size_t <parameter>size</parameter></paramdef>
31776-          <paramdef>int <parameter>flags</parameter></paramdef>
31777-        </funcprototype>
31778-        <funcprototype>
31779-          <funcdef>int <function>mallctl</function></funcdef>
31780-          <paramdef>const char *<parameter>name</parameter></paramdef>
31781-          <paramdef>void *<parameter>oldp</parameter></paramdef>
31782-          <paramdef>size_t *<parameter>oldlenp</parameter></paramdef>
31783-          <paramdef>void *<parameter>newp</parameter></paramdef>
31784-          <paramdef>size_t <parameter>newlen</parameter></paramdef>
31785-        </funcprototype>
31786-        <funcprototype>
31787-          <funcdef>int <function>mallctlnametomib</function></funcdef>
31788-          <paramdef>const char *<parameter>name</parameter></paramdef>
31789-          <paramdef>size_t *<parameter>mibp</parameter></paramdef>
31790-          <paramdef>size_t *<parameter>miblenp</parameter></paramdef>
31791-        </funcprototype>
31792-        <funcprototype>
31793-          <funcdef>int <function>mallctlbymib</function></funcdef>
31794-          <paramdef>const size_t *<parameter>mib</parameter></paramdef>
31795-          <paramdef>size_t <parameter>miblen</parameter></paramdef>
31796-          <paramdef>void *<parameter>oldp</parameter></paramdef>
31797-          <paramdef>size_t *<parameter>oldlenp</parameter></paramdef>
31798-          <paramdef>void *<parameter>newp</parameter></paramdef>
31799-          <paramdef>size_t <parameter>newlen</parameter></paramdef>
31800-        </funcprototype>
31801-        <funcprototype>
31802-          <funcdef>void <function>malloc_stats_print</function></funcdef>
31803-          <paramdef>void <parameter>(*write_cb)</parameter>
31804-            <funcparams>void *, const char *</funcparams>
31805-          </paramdef>
31806-          <paramdef>void *<parameter>cbopaque</parameter></paramdef>
31807-          <paramdef>const char *<parameter>opts</parameter></paramdef>
31808-        </funcprototype>
31809-        <funcprototype>
31810-          <funcdef>size_t <function>malloc_usable_size</function></funcdef>
31811-          <paramdef>const void *<parameter>ptr</parameter></paramdef>
31812-        </funcprototype>
31813-        <funcprototype>
31814-          <funcdef>void <function>(*malloc_message)</function></funcdef>
31815-          <paramdef>void *<parameter>cbopaque</parameter></paramdef>
31816-          <paramdef>const char *<parameter>s</parameter></paramdef>
31817-        </funcprototype>
31818-        <para><type>const char *</type><varname>malloc_conf</varname>;</para>
31819-      </refsect2>
31820-    </funcsynopsis>
31821-  </refsynopsisdiv>
31822-  <refsect1 id="description">
31823-    <title>DESCRIPTION</title>
31824-    <refsect2>
31825-      <title>Standard API</title>
31826-
31827-      <para>The <function>malloc()</function> function allocates
31828-      <parameter>size</parameter> bytes of uninitialized memory.  The allocated
31829-      space is suitably aligned (after possible pointer coercion) for storage
31830-      of any type of object.</para>
31831-
31832-      <para>The <function>calloc()</function> function allocates
31833-      space for <parameter>number</parameter> objects, each
31834-      <parameter>size</parameter> bytes in length.  The result is identical to
31835-      calling <function>malloc()</function> with an argument of
31836-      <parameter>number</parameter> * <parameter>size</parameter>, with the
31837-      exception that the allocated memory is explicitly initialized to zero
31838-      bytes.</para>
31839-
31840-      <para>The <function>posix_memalign()</function> function
31841-      allocates <parameter>size</parameter> bytes of memory such that the
31842-      allocation's base address is a multiple of
31843-      <parameter>alignment</parameter>, and returns the allocation in the value
31844-      pointed to by <parameter>ptr</parameter>.  The requested
31845-      <parameter>alignment</parameter> must be a power of 2 at least as large as
31846-      <code language="C">sizeof(<type>void *</type>)</code>.</para>
31847-
31848-      <para>The <function>aligned_alloc()</function> function
31849-      allocates <parameter>size</parameter> bytes of memory such that the
31850-      allocation's base address is a multiple of
31851-      <parameter>alignment</parameter>.  The requested
31852-      <parameter>alignment</parameter> must be a power of 2.  Behavior is
31853-      undefined if <parameter>size</parameter> is not an integral multiple of
31854-      <parameter>alignment</parameter>.</para>
31855-
31856-      <para>The <function>realloc()</function> function changes the
31857-      size of the previously allocated memory referenced by
31858-      <parameter>ptr</parameter> to <parameter>size</parameter> bytes.  The
31859-      contents of the memory are unchanged up to the lesser of the new and old
31860-      sizes.  If the new size is larger, the contents of the newly allocated
31861-      portion of the memory are undefined.  Upon success, the memory referenced
31862-      by <parameter>ptr</parameter> is freed and a pointer to the newly
31863-      allocated memory is returned.  Note that
31864-      <function>realloc()</function> may move the memory allocation,
31865-      resulting in a different return value than <parameter>ptr</parameter>.
31866-      If <parameter>ptr</parameter> is <constant>NULL</constant>, the
31867-      <function>realloc()</function> function behaves identically to
31868-      <function>malloc()</function> for the specified size.</para>
31869-
31870-      <para>The <function>free()</function> function causes the
31871-      allocated memory referenced by <parameter>ptr</parameter> to be made
31872-      available for future allocations.  If <parameter>ptr</parameter> is
31873-      <constant>NULL</constant>, no action occurs.</para>
31874-    </refsect2>
31875-    <refsect2>
31876-      <title>Non-standard API</title>
31877-      <para>The <function>mallocx()</function>,
31878-      <function>rallocx()</function>,
31879-      <function>xallocx()</function>,
31880-      <function>sallocx()</function>,
31881-      <function>dallocx()</function>,
31882-      <function>sdallocx()</function>, and
31883-      <function>nallocx()</function> functions all have a
31884-      <parameter>flags</parameter> argument that can be used to specify
31885-      options.  The functions only check the options that are contextually
31886-      relevant.  Use bitwise or (<code language="C">|</code>) operations to
31887-      specify one or more of the following:
31888-        <variablelist>
31889-          <varlistentry id="MALLOCX_LG_ALIGN">
31890-            <term><constant>MALLOCX_LG_ALIGN(<parameter>la</parameter>)
31891-            </constant></term>
31892-
31893-            <listitem><para>Align the memory allocation to start at an address
31894-            that is a multiple of <code language="C">(1 &lt;&lt;
31895-            <parameter>la</parameter>)</code>.  This macro does not validate
31896-            that <parameter>la</parameter> is within the valid
31897-            range.</para></listitem>
31898-          </varlistentry>
31899-          <varlistentry id="MALLOCX_ALIGN">
31900-            <term><constant>MALLOCX_ALIGN(<parameter>a</parameter>)
31901-            </constant></term>
31902-
31903-            <listitem><para>Align the memory allocation to start at an address
31904-            that is a multiple of <parameter>a</parameter>, where
31905-            <parameter>a</parameter> is a power of two.  This macro does not
31906-            validate that <parameter>a</parameter> is a power of 2.
31907-            </para></listitem>
31908-          </varlistentry>
31909-          <varlistentry id="MALLOCX_ZERO">
31910-            <term><constant>MALLOCX_ZERO</constant></term>
31911-
31912-            <listitem><para>Initialize newly allocated memory to contain zero
31913-            bytes.  In the growing reallocation case, the real size prior to
31914-            reallocation defines the boundary between untouched bytes and those
31915-            that are initialized to contain zero bytes.  If this macro is
31916-            absent, newly allocated memory is uninitialized.</para></listitem>
31917-          </varlistentry>
31918-          <varlistentry id="MALLOCX_TCACHE">
31919-            <term><constant>MALLOCX_TCACHE(<parameter>tc</parameter>)
31920-            </constant></term>
31921-
31922-            <listitem><para>Use the thread-specific cache (tcache) specified by
31923-            the identifier <parameter>tc</parameter>, which must have been
31924-            acquired via the <link
31925-            linkend="tcache.create"><mallctl>tcache.create</mallctl></link>
31926-            mallctl.  This macro does not validate that
31927-            <parameter>tc</parameter> specifies a valid
31928-            identifier.</para></listitem>
31929-          </varlistentry>
31930-          <varlistentry id="MALLOC_TCACHE_NONE">
31931-            <term><constant>MALLOCX_TCACHE_NONE</constant></term>
31932-
31933-            <listitem><para>Do not use a thread-specific cache (tcache).  Unless
31934-            <constant>MALLOCX_TCACHE(<parameter>tc</parameter>)</constant> or
31935-            <constant>MALLOCX_TCACHE_NONE</constant> is specified, an
31936-            automatically managed tcache will be used under many circumstances.
31937-            This macro cannot be used in the same <parameter>flags</parameter>
31938-            argument as
31939-            <constant>MALLOCX_TCACHE(<parameter>tc</parameter>)</constant>.</para></listitem>
31940-          </varlistentry>
31941-          <varlistentry id="MALLOCX_ARENA">
31942-            <term><constant>MALLOCX_ARENA(<parameter>a</parameter>)
31943-            </constant></term>
31944-
31945-            <listitem><para>Use the arena specified by the index
31946-            <parameter>a</parameter>.  This macro has no effect for regions that
31947-            were allocated via an arena other than the one specified.  This
31948-            macro does not validate that <parameter>a</parameter> specifies an
31949-            arena index in the valid range.</para></listitem>
31950-          </varlistentry>
31951-        </variablelist>
31952-      </para>
31953-
31954-      <para>The <function>mallocx()</function> function allocates at
31955-      least <parameter>size</parameter> bytes of memory, and returns a pointer
31956-      to the base address of the allocation.  Behavior is undefined if
31957-      <parameter>size</parameter> is <constant>0</constant>.</para>
31958-
31959-      <para>The <function>rallocx()</function> function resizes the
31960-      allocation at <parameter>ptr</parameter> to be at least
31961-      <parameter>size</parameter> bytes, and returns a pointer to the base
31962-      address of the resulting allocation, which may or may not have moved from
31963-      its original location.  Behavior is undefined if
31964-      <parameter>size</parameter> is <constant>0</constant>.</para>
31965-
31966-      <para>The <function>xallocx()</function> function resizes the
31967-      allocation at <parameter>ptr</parameter> in place to be at least
31968-      <parameter>size</parameter> bytes, and returns the real size of the
31969-      allocation.  If <parameter>extra</parameter> is non-zero, an attempt is
31970-      made to resize the allocation to be at least <code
31971-      language="C">(<parameter>size</parameter> +
31972-      <parameter>extra</parameter>)</code> bytes, though inability to allocate
31973-      the extra byte(s) will not by itself result in failure to resize.
31974-      Behavior is undefined if <parameter>size</parameter> is
31975-      <constant>0</constant>, or if <code
31976-      language="C">(<parameter>size</parameter> + <parameter>extra</parameter>
31977-      &gt; <constant>SIZE_T_MAX</constant>)</code>.</para>
31978-
31979-      <para>The <function>sallocx()</function> function returns the
31980-      real size of the allocation at <parameter>ptr</parameter>.</para>
31981-
31982-      <para>The <function>dallocx()</function> function causes the
31983-      memory referenced by <parameter>ptr</parameter> to be made available for
31984-      future allocations.</para>
31985-
31986-      <para>The <function>sdallocx()</function> function is an
31987-      extension of <function>dallocx()</function> with a
31988-      <parameter>size</parameter> parameter to allow the caller to pass in the
31989-      allocation size as an optimization.  The minimum valid input size is the
31990-      original requested size of the allocation, and the maximum valid input
31991-      size is the corresponding value returned by
31992-      <function>nallocx()</function> or
31993-      <function>sallocx()</function>.</para>
31994-
31995-      <para>The <function>nallocx()</function> function allocates no
31996-      memory, but it performs the same size computation as the
31997-      <function>mallocx()</function> function, and returns the real
31998-      size of the allocation that would result from the equivalent
31999-      <function>mallocx()</function> function call, or
32000-      <constant>0</constant> if the inputs exceed the maximum supported size
32001-      class and/or alignment.  Behavior is undefined if
32002-      <parameter>size</parameter> is <constant>0</constant>.</para>
32003-
32004-      <para>The <function>mallctl()</function> function provides a
32005-      general interface for introspecting the memory allocator, as well as
32006-      setting modifiable parameters and triggering actions.  The
32007-      period-separated <parameter>name</parameter> argument specifies a
32008-      location in a tree-structured namespace; see the <xref
32009-      linkend="mallctl_namespace" xrefstyle="template:%t"/> section for
32010-      documentation on the tree contents.  To read a value, pass a pointer via
32011-      <parameter>oldp</parameter> to adequate space to contain the value, and a
32012-      pointer to its length via <parameter>oldlenp</parameter>; otherwise pass
32013-      <constant>NULL</constant> and <constant>NULL</constant>.  Similarly, to
32014-      write a value, pass a pointer to the value via
32015-      <parameter>newp</parameter>, and its length via
32016-      <parameter>newlen</parameter>; otherwise pass <constant>NULL</constant>
32017-      and <constant>0</constant>.</para>
32018-
32019-      <para>The <function>mallctlnametomib()</function> function
32020-      provides a way to avoid repeated name lookups for applications that
32021-      repeatedly query the same portion of the namespace, by translating a name
32022-      to a <quote>Management Information Base</quote> (MIB) that can be passed
32023-      repeatedly to <function>mallctlbymib()</function>.  Upon
32024-      successful return from <function>mallctlnametomib()</function>,
32025-      <parameter>mibp</parameter> contains an array of
32026-      <parameter>*miblenp</parameter> integers, where
32027-      <parameter>*miblenp</parameter> is the lesser of the number of components
32028-      in <parameter>name</parameter> and the input value of
32029-      <parameter>*miblenp</parameter>.  Thus it is possible to pass a
32030-      <parameter>*miblenp</parameter> that is smaller than the number of
32031-      period-separated name components, which results in a partial MIB that can
32032-      be used as the basis for constructing a complete MIB.  For name
32033-      components that are integers (e.g. the 2 in
32034-      <link
32035-      linkend="arenas.bin.i.size"><mallctl>arenas.bin.2.size</mallctl></link>),
32036-      the corresponding MIB component will always be that integer.  Therefore,
32037-      it is legitimate to construct code like the following: <programlisting
32038-      language="C"><![CDATA[
32039-unsigned nbins, i;
32040-size_t mib[4];
32041-size_t len, miblen;
32042-
32043-len = sizeof(nbins);
32044-mallctl("arenas.nbins", &nbins, &len, NULL, 0);
32045-
32046-miblen = 4;
32047-mallctlnametomib("arenas.bin.0.size", mib, &miblen);
32048-for (i = 0; i < nbins; i++) {
32049-	size_t bin_size;
32050-
32051-	mib[2] = i;
32052-	len = sizeof(bin_size);
32053-	mallctlbymib(mib, miblen, (void *)&bin_size, &len, NULL, 0);
32054-	/* Do something with bin_size... */
32055-}]]></programlisting></para>
32056-
32057-      <varlistentry id="malloc_stats_print_opts">
32058-      </varlistentry>
32059-      <para>The <function>malloc_stats_print()</function> function writes
32060-      summary statistics via the <parameter>write_cb</parameter> callback
32061-      function pointer and <parameter>cbopaque</parameter> data passed to
32062-      <parameter>write_cb</parameter>, or <function>malloc_message()</function>
32063-      if <parameter>write_cb</parameter> is <constant>NULL</constant>.  The
32064-      statistics are presented in human-readable form unless <quote>J</quote> is
32065-      specified as a character within the <parameter>opts</parameter> string, in
32066-      which case the statistics are presented in <ulink
32067-      url="http://www.json.org/">JSON format</ulink>.  This function can be
32068-      called repeatedly.  General information that never changes during
32069-      execution can be omitted by specifying <quote>g</quote> as a character
32070-      within the <parameter>opts</parameter> string.  Note that
32071-      <function>malloc_stats_print()</function> uses the
32072-      <function>mallctl*()</function> functions internally, so inconsistent
32073-      statistics can be reported if multiple threads use these functions
32074-      simultaneously.  If <option>--enable-stats</option> is specified during
32075-      configuration, <quote>m</quote>, <quote>d</quote>, and <quote>a</quote>
32076-      can be specified to omit merged arena, destroyed merged arena, and per
32077-      arena statistics, respectively; <quote>b</quote> and <quote>l</quote> can
32078-      be specified to omit per size class statistics for bins and large objects,
32079-      respectively; <quote>x</quote> can be specified to omit all mutex
32080-      statistics; <quote>e</quote> can be used to omit extent statistics.
32081-      Unrecognized characters are silently ignored.  Note that thread caching
32082-      may prevent some statistics from being completely up to date, since extra
32083-      locking would be required to merge counters that track thread cache
32084-      operations.</para>
32085-
32086-      <para>The <function>malloc_usable_size()</function> function
32087-      returns the usable size of the allocation pointed to by
32088-      <parameter>ptr</parameter>.  The return value may be larger than the size
32089-      that was requested during allocation.  The
32090-      <function>malloc_usable_size()</function> function is not a
32091-      mechanism for in-place <function>realloc()</function>; rather
32092-      it is provided solely as a tool for introspection purposes.  Any
32093-      discrepancy between the requested allocation size and the size reported
32094-      by <function>malloc_usable_size()</function> should not be
32095-      depended on, since such behavior is entirely implementation-dependent.
32096-      </para>
32097-    </refsect2>
32098-  </refsect1>
32099-  <refsect1 id="tuning">
32100-    <title>TUNING</title>
32101-    <para>Once, when the first call is made to one of the memory allocation
32102-    routines, the allocator initializes its internals based in part on various
32103-    options that can be specified at compile- or run-time.</para>
32104-
32105-    <para>The string specified via <option>--with-malloc-conf</option>, the
32106-    string pointed to by the global variable <varname>malloc_conf</varname>, the
32107-    <quote>name</quote> of the file referenced by the symbolic link named
32108-    <filename class="symlink">/etc/malloc.conf</filename>, and the value of the
32109-    environment variable <envar>MALLOC_CONF</envar>, will be interpreted, in
32110-    that order, from left to right as options.  Note that
32111-    <varname>malloc_conf</varname> may be read before
32112-    <function>main()</function> is entered, so the declaration of
32113-    <varname>malloc_conf</varname> should specify an initializer that contains
32114-    the final value to be read by jemalloc.  <option>--with-malloc-conf</option>
32115-    and <varname>malloc_conf</varname> are compile-time mechanisms, whereas
32116-    <filename class="symlink">/etc/malloc.conf</filename> and
32117-    <envar>MALLOC_CONF</envar> can be safely set any time prior to program
32118-    invocation.</para>
32119-
32120-    <para>An options string is a comma-separated list of option:value pairs.
32121-    There is one key corresponding to each <link
32122-    linkend="opt.abort"><mallctl>opt.*</mallctl></link> mallctl (see the <xref
32123-    linkend="mallctl_namespace" xrefstyle="template:%t"/> section for options
32124-    documentation).  For example, <literal>abort:true,narenas:1</literal> sets
32125-    the <link linkend="opt.abort"><mallctl>opt.abort</mallctl></link> and <link
32126-    linkend="opt.narenas"><mallctl>opt.narenas</mallctl></link> options.  Some
32127-    options have boolean values (true/false), others have integer values (base
32128-    8, 10, or 16, depending on prefix), and yet others have raw string
32129-    values.</para>
32130-  </refsect1>
32131-  <refsect1 id="implementation_notes">
32132-    <title>IMPLEMENTATION NOTES</title>
32133-    <para>Traditionally, allocators have used
32134-    <citerefentry><refentrytitle>sbrk</refentrytitle>
32135-    <manvolnum>2</manvolnum></citerefentry> to obtain memory, which is
32136-    suboptimal for several reasons, including race conditions, increased
32137-    fragmentation, and artificial limitations on maximum usable memory.  If
32138-    <citerefentry><refentrytitle>sbrk</refentrytitle>
32139-    <manvolnum>2</manvolnum></citerefentry> is supported by the operating
32140-    system, this allocator uses both
32141-    <citerefentry><refentrytitle>mmap</refentrytitle>
32142-    <manvolnum>2</manvolnum></citerefentry> and
32143-    <citerefentry><refentrytitle>sbrk</refentrytitle>
32144-    <manvolnum>2</manvolnum></citerefentry>, in that order of preference;
32145-    otherwise only <citerefentry><refentrytitle>mmap</refentrytitle>
32146-    <manvolnum>2</manvolnum></citerefentry> is used.</para>
32147-
32148-    <para>This allocator uses multiple arenas in order to reduce lock
32149-    contention for threaded programs on multi-processor systems.  This works
32150-    well with regard to threading scalability, but incurs some costs.  There is
32151-    a small fixed per-arena overhead, and additionally, arenas manage memory
32152-    completely independently of each other, which means a small fixed increase
32153-    in overall memory fragmentation.  These overheads are not generally an
32154-    issue, given the number of arenas normally used.  Note that using
32155-    substantially more arenas than the default is not likely to improve
32156-    performance, mainly due to reduced cache performance.  However, it may make
32157-    sense to reduce the number of arenas if an application does not make much
32158-    use of the allocation functions.</para>
32159-
32160-    <para>In addition to multiple arenas, this allocator supports
32161-    thread-specific caching, in order to make it possible to completely avoid
32162-    synchronization for most allocation requests.  Such caching allows very fast
32163-    allocation in the common case, but it increases memory usage and
32164-    fragmentation, since a bounded number of objects can remain allocated in
32165-    each thread cache.</para>
32166-
32167-    <para>Memory is conceptually broken into extents.  Extents are always
32168-    aligned to multiples of the page size.  This alignment makes it possible to
32169-    find metadata for user objects quickly.  User objects are broken into two
32170-    categories according to size: small and large.  Contiguous small objects
32171-    comprise a slab, which resides within a single extent, whereas large objects
32172-    each have their own extents backing them.</para>
32173-
32174-    <para>Small objects are managed in groups by slabs.  Each slab maintains
32175-    a bitmap to track which regions are in use.  Allocation requests that are no
32176-    more than half the quantum (8 or 16, depending on architecture) are rounded
32177-    up to the nearest power of two that is at least <code
32178-    language="C">sizeof(<type>double</type>)</code>.  All other object size
32179-    classes are multiples of the quantum, spaced such that there are four size
32180-    classes for each doubling in size, which limits internal fragmentation to
32181-    approximately 20% for all but the smallest size classes.  Small size classes
32182-    are smaller than four times the page size, and large size classes extend
32183-    from four times the page size up to the largest size class that does not
32184-    exceed <constant>PTRDIFF_MAX</constant>.</para>
32185-
32186-    <para>Allocations are packed tightly together, which can be an issue for
32187-    multi-threaded applications.  If you need to assure that allocations do not
32188-    suffer from cacheline sharing, round your allocation requests up to the
32189-    nearest multiple of the cacheline size, or specify cacheline alignment when
32190-    allocating.</para>
32191-
32192-    <para>The <function>realloc()</function>,
32193-    <function>rallocx()</function>, and
32194-    <function>xallocx()</function> functions may resize allocations
32195-    without moving them under limited circumstances.  Unlike the
32196-    <function>*allocx()</function> API, the standard API does not
32197-    officially round up the usable size of an allocation to the nearest size
32198-    class, so technically it is necessary to call
32199-    <function>realloc()</function> to grow e.g. a 9-byte allocation to
32200-    16 bytes, or shrink a 16-byte allocation to 9 bytes.  Growth and shrinkage
32201-    trivially succeeds in place as long as the pre-size and post-size both round
32202-    up to the same size class.  No other API guarantees are made regarding
32203-    in-place resizing, but the current implementation also tries to resize large
32204-    allocations in place, as long as the pre-size and post-size are both large.
32205-    For shrinkage to succeed, the extent allocator must support splitting (see
32206-    <link
32207-    linkend="arena.i.extent_hooks"><mallctl>arena.&lt;i&gt;.extent_hooks</mallctl></link>).
32208-    Growth only succeeds if the trailing memory is currently available, and the
32209-    extent allocator supports merging.</para>
32210-
32211-    <para>Assuming 4 KiB pages and a 16-byte quantum on a 64-bit system, the
32212-    size classes in each category are as shown in <xref linkend="size_classes"
32213-    xrefstyle="template:Table %n"/>.</para>
32214-
32215-    <table xml:id="size_classes" frame="all">
32216-      <title>Size classes</title>
32217-      <tgroup cols="3" colsep="1" rowsep="1">
32218-      <colspec colname="c1" align="left"/>
32219-      <colspec colname="c2" align="right"/>
32220-      <colspec colname="c3" align="left"/>
32221-      <thead>
32222-        <row>
32223-          <entry>Category</entry>
32224-          <entry>Spacing</entry>
32225-          <entry>Size</entry>
32226-        </row>
32227-      </thead>
32228-      <tbody>
32229-        <row>
32230-          <entry morerows="8">Small</entry>
32231-          <entry>lg</entry>
32232-          <entry>[8]</entry>
32233-        </row>
32234-        <row>
32235-          <entry>16</entry>
32236-          <entry>[16, 32, 48, 64, 80, 96, 112, 128]</entry>
32237-        </row>
32238-        <row>
32239-          <entry>32</entry>
32240-          <entry>[160, 192, 224, 256]</entry>
32241-        </row>
32242-        <row>
32243-          <entry>64</entry>
32244-          <entry>[320, 384, 448, 512]</entry>
32245-        </row>
32246-        <row>
32247-          <entry>128</entry>
32248-          <entry>[640, 768, 896, 1024]</entry>
32249-        </row>
32250-        <row>
32251-          <entry>256</entry>
32252-          <entry>[1280, 1536, 1792, 2048]</entry>
32253-        </row>
32254-        <row>
32255-          <entry>512</entry>
32256-          <entry>[2560, 3072, 3584, 4096]</entry>
32257-        </row>
32258-        <row>
32259-          <entry>1 KiB</entry>
32260-          <entry>[5 KiB, 6 KiB, 7 KiB, 8 KiB]</entry>
32261-        </row>
32262-        <row>
32263-          <entry>2 KiB</entry>
32264-          <entry>[10 KiB, 12 KiB, 14 KiB]</entry>
32265-        </row>
32266-        <row>
32267-          <entry morerows="15">Large</entry>
32268-          <entry>2 KiB</entry>
32269-          <entry>[16 KiB]</entry>
32270-        </row>
32271-        <row>
32272-          <entry>4 KiB</entry>
32273-          <entry>[20 KiB, 24 KiB, 28 KiB, 32 KiB]</entry>
32274-        </row>
32275-        <row>
32276-          <entry>8 KiB</entry>
32277-          <entry>[40 KiB, 48 KiB, 56 KiB, 64 KiB]</entry>
32278-        </row>
32279-        <row>
32280-          <entry>16 KiB</entry>
32281-          <entry>[80 KiB, 96 KiB, 112 KiB, 128 KiB]</entry>
32282-        </row>
32283-        <row>
32284-          <entry>32 KiB</entry>
32285-          <entry>[160 KiB, 192 KiB, 224 KiB, 256 KiB]</entry>
32286-        </row>
32287-        <row>
32288-          <entry>64 KiB</entry>
32289-          <entry>[320 KiB, 384 KiB, 448 KiB, 512 KiB]</entry>
32290-        </row>
32291-        <row>
32292-          <entry>128 KiB</entry>
32293-          <entry>[640 KiB, 768 KiB, 896 KiB, 1 MiB]</entry>
32294-        </row>
32295-        <row>
32296-          <entry>256 KiB</entry>
32297-          <entry>[1280 KiB, 1536 KiB, 1792 KiB, 2 MiB]</entry>
32298-        </row>
32299-        <row>
32300-          <entry>512 KiB</entry>
32301-          <entry>[2560 KiB, 3 MiB, 3584 KiB, 4 MiB]</entry>
32302-        </row>
32303-        <row>
32304-          <entry>1 MiB</entry>
32305-          <entry>[5 MiB, 6 MiB, 7 MiB, 8 MiB]</entry>
32306-        </row>
32307-        <row>
32308-          <entry>2 MiB</entry>
32309-          <entry>[10 MiB, 12 MiB, 14 MiB, 16 MiB]</entry>
32310-        </row>
32311-        <row>
32312-          <entry>4 MiB</entry>
32313-          <entry>[20 MiB, 24 MiB, 28 MiB, 32 MiB]</entry>
32314-        </row>
32315-        <row>
32316-          <entry>8 MiB</entry>
32317-          <entry>[40 MiB, 48 MiB, 56 MiB, 64 MiB]</entry>
32318-        </row>
32319-        <row>
32320-          <entry>...</entry>
32321-          <entry>...</entry>
32322-        </row>
32323-        <row>
32324-          <entry>512 PiB</entry>
32325-          <entry>[2560 PiB, 3 EiB, 3584 PiB, 4 EiB]</entry>
32326-        </row>
32327-        <row>
32328-          <entry>1 EiB</entry>
32329-          <entry>[5 EiB, 6 EiB, 7 EiB]</entry>
32330-        </row>
32331-      </tbody>
32332-      </tgroup>
32333-    </table>
32334-  </refsect1>
32335-  <refsect1 id="mallctl_namespace">
32336-    <title>MALLCTL NAMESPACE</title>
32337-    <para>The following names are defined in the namespace accessible via the
32338-    <function>mallctl*()</function> functions.  Value types are specified in
32339-    parentheses, their readable/writable statuses are encoded as
32340-    <literal>rw</literal>, <literal>r-</literal>, <literal>-w</literal>, or
32341-    <literal>--</literal>, and required build configuration flags follow, if
32342-    any.  A name element encoded as <literal>&lt;i&gt;</literal> or
32343-    <literal>&lt;j&gt;</literal> indicates an integer component, where the
32344-    integer varies from 0 to some upper value that must be determined via
32345-    introspection.  In the case of <mallctl>stats.arenas.&lt;i&gt;.*</mallctl>
32346-    and <mallctl>arena.&lt;i&gt;.{initialized,purge,decay,dss}</mallctl>,
32347-    <literal>&lt;i&gt;</literal> equal to
32348-    <constant>MALLCTL_ARENAS_ALL</constant> can be used to operate on all arenas
32349-    or access the summation of statistics from all arenas; similarly
32350-    <literal>&lt;i&gt;</literal> equal to
32351-    <constant>MALLCTL_ARENAS_DESTROYED</constant> can be used to access the
32352-    summation of statistics from all destroyed arenas.  These constants can be
32353-    utilized either via <function>mallctlnametomib()</function> followed by
32354-    <function>mallctlbymib()</function>, or via code such as the following:
32355-    <programlisting language="C"><![CDATA[
32356-#define STRINGIFY_HELPER(x) #x
32357-#define STRINGIFY(x) STRINGIFY_HELPER(x)
32358-
32359-mallctl("arena." STRINGIFY(MALLCTL_ARENAS_ALL) ".decay",
32360-    NULL, NULL, NULL, 0);]]></programlisting>
32361-    Take special note of the <link
32362-    linkend="epoch"><mallctl>epoch</mallctl></link> mallctl, which controls
32363-    refreshing of cached dynamic statistics.</para>
32364-
32365-    <variablelist>
32366-      <varlistentry id="version">
32367-        <term>
32368-          <mallctl>version</mallctl>
32369-          (<type>const char *</type>)
32370-          <literal>r-</literal>
32371-        </term>
32372-        <listitem><para>Return the jemalloc version string.</para></listitem>
32373-      </varlistentry>
32374-
32375-      <varlistentry id="epoch">
32376-        <term>
32377-          <mallctl>epoch</mallctl>
32378-          (<type>uint64_t</type>)
32379-          <literal>rw</literal>
32380-        </term>
32381-        <listitem><para>If a value is passed in, refresh the data from which
32382-        the <function>mallctl*()</function> functions report values,
32383-        and increment the epoch.  Return the current epoch.  This is useful for
32384-        detecting whether another thread caused a refresh.</para></listitem>
32385-      </varlistentry>
32386-
32387-      <varlistentry id="background_thread">
32388-        <term>
32389-          <mallctl>background_thread</mallctl>
32390-          (<type>bool</type>)
32391-          <literal>rw</literal>
32392-        </term>
32393-        <listitem><para>Enable/disable internal background worker threads.  When
32394-        set to true, background threads are created on demand (the number of
32395-        background threads will be no more than the number of CPUs or active
32396-        arenas).  Threads run periodically, and handle <link
32397-        linkend="arena.i.decay">purging</link> asynchronously.  When switching
32398-        off, background threads are terminated synchronously.  Note that after
32399-        <citerefentry><refentrytitle>fork</refentrytitle><manvolnum>2</manvolnum></citerefentry>
32400-        function, the state in the child process will be disabled regardless
32401-        the state in parent process. See <link
32402-        linkend="stats.background_thread.num_threads"><mallctl>stats.background_thread</mallctl></link>
32403-        for related stats.  <link
32404-        linkend="opt.background_thread"><mallctl>opt.background_thread</mallctl></link>
32405-        can be used to set the default option.  This option is only available on
32406-        selected pthread-based platforms.</para></listitem>
32407-      </varlistentry>
32408-
32409-      <varlistentry id="max_background_threads">
32410-        <term>
32411-          <mallctl>max_background_threads</mallctl>
32412-          (<type>size_t</type>)
32413-          <literal>rw</literal>
32414-        </term>
32415-        <listitem><para>Maximum number of background worker threads that will
32416-        be created.  This value is capped at <link
32417-        linkend="opt.max_background_threads"><mallctl>opt.max_background_threads</mallctl></link> at
32418-        startup.</para></listitem>
32419-      </varlistentry>
32420-
32421-      <varlistentry id="config.cache_oblivious">
32422-        <term>
32423-          <mallctl>config.cache_oblivious</mallctl>
32424-          (<type>bool</type>)
32425-          <literal>r-</literal>
32426-        </term>
32427-        <listitem><para><option>--enable-cache-oblivious</option> was specified
32428-        during build configuration.</para></listitem>
32429-      </varlistentry>
32430-
32431-      <varlistentry id="config.debug">
32432-        <term>
32433-          <mallctl>config.debug</mallctl>
32434-          (<type>bool</type>)
32435-          <literal>r-</literal>
32436-        </term>
32437-        <listitem><para><option>--enable-debug</option> was specified during
32438-        build configuration.</para></listitem>
32439-      </varlistentry>
32440-
32441-      <varlistentry id="config.fill">
32442-        <term>
32443-          <mallctl>config.fill</mallctl>
32444-          (<type>bool</type>)
32445-          <literal>r-</literal>
32446-        </term>
32447-        <listitem><para><option>--enable-fill</option> was specified during
32448-        build configuration.</para></listitem>
32449-      </varlistentry>
32450-
32451-      <varlistentry id="config.lazy_lock">
32452-        <term>
32453-          <mallctl>config.lazy_lock</mallctl>
32454-          (<type>bool</type>)
32455-          <literal>r-</literal>
32456-        </term>
32457-        <listitem><para><option>--enable-lazy-lock</option> was specified
32458-        during build configuration.</para></listitem>
32459-      </varlistentry>
32460-
32461-      <varlistentry id="config.malloc_conf">
32462-        <term>
32463-          <mallctl>config.malloc_conf</mallctl>
32464-          (<type>const char *</type>)
32465-          <literal>r-</literal>
32466-        </term>
32467-        <listitem><para>Embedded configure-time-specified run-time options
32468-        string, empty unless <option>--with-malloc-conf</option> was specified
32469-        during build configuration.</para></listitem>
32470-      </varlistentry>
32471-
32472-      <varlistentry id="config.prof">
32473-        <term>
32474-          <mallctl>config.prof</mallctl>
32475-          (<type>bool</type>)
32476-          <literal>r-</literal>
32477-        </term>
32478-        <listitem><para><option>--enable-prof</option> was specified during
32479-        build configuration.</para></listitem>
32480-      </varlistentry>
32481-
32482-      <varlistentry id="config.prof_libgcc">
32483-        <term>
32484-          <mallctl>config.prof_libgcc</mallctl>
32485-          (<type>bool</type>)
32486-          <literal>r-</literal>
32487-        </term>
32488-        <listitem><para><option>--disable-prof-libgcc</option> was not
32489-        specified during build configuration.</para></listitem>
32490-      </varlistentry>
32491-
32492-      <varlistentry id="config.prof_libunwind">
32493-        <term>
32494-          <mallctl>config.prof_libunwind</mallctl>
32495-          (<type>bool</type>)
32496-          <literal>r-</literal>
32497-        </term>
32498-        <listitem><para><option>--enable-prof-libunwind</option> was specified
32499-        during build configuration.</para></listitem>
32500-      </varlistentry>
32501-
32502-      <varlistentry id="config.stats">
32503-        <term>
32504-          <mallctl>config.stats</mallctl>
32505-          (<type>bool</type>)
32506-          <literal>r-</literal>
32507-        </term>
32508-        <listitem><para><option>--enable-stats</option> was specified during
32509-        build configuration.</para></listitem>
32510-      </varlistentry>
32511-
32512-
32513-      <varlistentry id="config.utrace">
32514-        <term>
32515-          <mallctl>config.utrace</mallctl>
32516-          (<type>bool</type>)
32517-          <literal>r-</literal>
32518-        </term>
32519-        <listitem><para><option>--enable-utrace</option> was specified during
32520-        build configuration.</para></listitem>
32521-      </varlistentry>
32522-
32523-      <varlistentry id="config.xmalloc">
32524-        <term>
32525-          <mallctl>config.xmalloc</mallctl>
32526-          (<type>bool</type>)
32527-          <literal>r-</literal>
32528-        </term>
32529-        <listitem><para><option>--enable-xmalloc</option> was specified during
32530-        build configuration.</para></listitem>
32531-      </varlistentry>
32532-
32533-      <varlistentry id="opt.abort">
32534-        <term>
32535-          <mallctl>opt.abort</mallctl>
32536-          (<type>bool</type>)
32537-          <literal>r-</literal>
32538-        </term>
32539-        <listitem><para>Abort-on-warning enabled/disabled.  If true, most
32540-        warnings are fatal.  Note that runtime option warnings are not included
32541-        (see <link
32542-        linkend="opt.abort_conf"><mallctl>opt.abort_conf</mallctl></link> for
32543-        that). The process will call
32544-        <citerefentry><refentrytitle>abort</refentrytitle>
32545-        <manvolnum>3</manvolnum></citerefentry> in these cases.  This option is
32546-        disabled by default unless <option>--enable-debug</option> is
32547-        specified during configuration, in which case it is enabled by default.
32548-        </para></listitem>
32549-      </varlistentry>
32550-
32551-      <varlistentry id="opt.confirm_conf">
32552-        <term>
32553-          <mallctl>opt.confirm_conf</mallctl>
32554-          (<type>bool</type>)
32555-          <literal>r-</literal>
32556-        </term>
32557-	<listitem><para>Confirm-runtime-options-when-program-starts
32558-	enabled/disabled.  If true, the string specified via
32559-	<option>--with-malloc-conf</option>, the string pointed to by the
32560-	global variable <varname>malloc_conf</varname>, the <quote>name</quote>
32561-	of the file referenced by the symbolic link named
32562-	<filename class="symlink">/etc/malloc.conf</filename>, and the value of
32563-	the environment variable <envar>MALLOC_CONF</envar>, will be printed in
32564-	order.  Then, each option being set will be individually printed.  This
32565-	option is disabled by default.</para></listitem>
32566-      </varlistentry>
32567-
32568-      <varlistentry id="opt.abort_conf">
32569-        <term>
32570-          <mallctl>opt.abort_conf</mallctl>
32571-          (<type>bool</type>)
32572-          <literal>r-</literal>
32573-        </term>
32574-        <listitem><para>Abort-on-invalid-configuration enabled/disabled.  If
32575-        true, invalid runtime options are fatal.  The process will call
32576-        <citerefentry><refentrytitle>abort</refentrytitle>
32577-        <manvolnum>3</manvolnum></citerefentry> in these cases.  This option is
32578-        disabled by default unless <option>--enable-debug</option> is
32579-        specified during configuration, in which case it is enabled by default.
32580-        </para></listitem>
32581-      </varlistentry>
32582-
32583-      <varlistentry id="opt.cache_oblivious">
32584-        <term>
32585-          <mallctl>opt.cache_oblivious</mallctl>
32586-          (<type>bool</type>)
32587-          <literal>r-</literal>
32588-        </term>
32589-        <listitem><para>Enable / Disable cache-oblivious large allocation
32590-        alignment, for large requests with no alignment constraints.  If this
32591-        feature is disabled, all large allocations are page-aligned as an
32592-        implementation artifact, which can severely harm CPU cache utilization.
32593-        However, the cache-oblivious layout comes at the cost of one extra page
32594-        per large allocation, which in the most extreme case increases physical
32595-        memory usage for the 16 KiB size class to 20 KiB. This option is enabled
32596-        by default.</para></listitem>
32597-      </varlistentry>
32598-
32599-      <varlistentry id="opt.metadata_thp">
32600-        <term>
32601-          <mallctl>opt.metadata_thp</mallctl>
32602-          (<type>const char *</type>)
32603-          <literal>r-</literal>
32604-        </term>
32605-        <listitem><para>Controls whether to allow jemalloc to use transparent
32606-        huge page (THP) for internal metadata (see <link
32607-        linkend="stats.metadata">stats.metadata</link>).  <quote>always</quote>
32608-        allows such usage.  <quote>auto</quote> uses no THP initially, but may
32609-        begin to do so when metadata usage reaches certain level.  The default
32610-        is <quote>disabled</quote>.</para></listitem>
32611-      </varlistentry>
32612-
32613-      <varlistentry id="opt.trust_madvise">
32614-        <term>
32615-          <mallctl>opt.trust_madvise</mallctl>
32616-          (<type>bool</type>)
32617-          <literal>r-</literal>
32618-        </term>
32619-        <listitem><para>If true, do not perform runtime check for MADV_DONTNEED,
32620-        to check that it actually zeros pages.  The default is disabled on Linux
32621-        and enabled elsewhere.</para></listitem>
32622-      </varlistentry>
32623-
32624-      <varlistentry id="opt.retain">
32625-        <term>
32626-          <mallctl>opt.retain</mallctl>
32627-          (<type>bool</type>)
32628-          <literal>r-</literal>
32629-        </term>
32630-        <listitem><para>If true, retain unused virtual memory for later reuse
32631-        rather than discarding it by calling
32632-        <citerefentry><refentrytitle>munmap</refentrytitle>
32633-        <manvolnum>2</manvolnum></citerefentry> or equivalent (see <link
32634-        linkend="stats.retained">stats.retained</link> for related details).
32635-        It also makes jemalloc use <citerefentry>
32636-        <refentrytitle>mmap</refentrytitle><manvolnum>2</manvolnum>
32637-        </citerefentry> or equivalent in a more greedy way, mapping larger
32638-        chunks in one go.  This option is disabled by default unless discarding
32639-        virtual memory is known to trigger platform-specific performance
32640-        problems, namely 1) for [64-bit] Linux, which has a quirk in its virtual
32641-        memory allocation algorithm that causes semi-permanent VM map holes
32642-        under normal jemalloc operation; and 2) for [64-bit] Windows, which
32643-        disallows split / merged regions with
32644-        <parameter><constant>MEM_RELEASE</constant></parameter>.  Although the
32645-        same issues may present on 32-bit platforms as well, retaining virtual
32646-        memory for 32-bit Linux and Windows is disabled by default due to the
32647-        practical possibility of address space exhaustion.  </para></listitem>
32648-      </varlistentry>
32649-
32650-      <varlistentry id="opt.dss">
32651-        <term>
32652-          <mallctl>opt.dss</mallctl>
32653-          (<type>const char *</type>)
32654-          <literal>r-</literal>
32655-        </term>
32656-        <listitem><para>dss (<citerefentry><refentrytitle>sbrk</refentrytitle>
32657-        <manvolnum>2</manvolnum></citerefentry>) allocation precedence as
32658-        related to <citerefentry><refentrytitle>mmap</refentrytitle>
32659-        <manvolnum>2</manvolnum></citerefentry> allocation.  The following
32660-        settings are supported if
32661-        <citerefentry><refentrytitle>sbrk</refentrytitle>
32662-        <manvolnum>2</manvolnum></citerefentry> is supported by the operating
32663-        system: <quote>disabled</quote>, <quote>primary</quote>, and
32664-        <quote>secondary</quote>; otherwise only <quote>disabled</quote> is
32665-        supported.  The default is <quote>secondary</quote> if
32666-        <citerefentry><refentrytitle>sbrk</refentrytitle>
32667-        <manvolnum>2</manvolnum></citerefentry> is supported by the operating
32668-        system; <quote>disabled</quote> otherwise.
32669-        </para></listitem>
32670-      </varlistentry>
32671-
32672-      <varlistentry id="opt.narenas">
32673-        <term>
32674-          <mallctl>opt.narenas</mallctl>
32675-          (<type>unsigned</type>)
32676-          <literal>r-</literal>
32677-        </term>
32678-        <listitem><para>Maximum number of arenas to use for automatic
32679-        multiplexing of threads and arenas.  The default is four times the
32680-        number of CPUs, or one if there is a single CPU.</para></listitem>
32681-      </varlistentry>
32682-
32683-      <varlistentry id="opt.oversize_threshold">
32684-        <term>
32685-          <mallctl>opt.oversize_threshold</mallctl>
32686-          (<type>size_t</type>)
32687-          <literal>r-</literal>
32688-        </term>
32689-        <listitem><para>The threshold in bytes of which requests are considered
32690-        oversize.  Allocation requests with greater sizes are fulfilled from a
32691-        dedicated arena (automatically managed, however not within
32692-        <literal>narenas</literal>), in order to reduce fragmentation by not
32693-        mixing huge allocations with small ones.  In addition, the decay API
32694-        guarantees on the extents greater than the specified threshold may be
32695-        overridden.  Note that requests with arena index specified via
32696-        <constant>MALLOCX_ARENA</constant>, or threads associated with explicit
32697-        arenas will not be considered.  The default threshold is 8MiB.  Values
32698-        not within large size classes disables this feature.</para></listitem>
32699-      </varlistentry>
32700-
32701-      <varlistentry id="opt.percpu_arena">
32702-        <term>
32703-          <mallctl>opt.percpu_arena</mallctl>
32704-          (<type>const char *</type>)
32705-          <literal>r-</literal>
32706-        </term>
32707-        <listitem><para>Per CPU arena mode.  Use the <quote>percpu</quote>
32708-        setting to enable this feature, which uses number of CPUs to determine
32709-        number of arenas, and bind threads to arenas dynamically based on the
32710-        CPU the thread runs on currently.  <quote>phycpu</quote> setting uses
32711-        one arena per physical CPU, which means the two hyper threads on the
32712-        same CPU share one arena.  Note that no runtime checking regarding the
32713-        availability of hyper threading is done at the moment.  When set to
32714-        <quote>disabled</quote>, narenas and thread to arena association will
32715-        not be impacted by this option.  The default is <quote>disabled</quote>.
32716-        </para></listitem>
32717-      </varlistentry>
32718-
32719-      <varlistentry id="opt.background_thread">
32720-        <term>
32721-          <mallctl>opt.background_thread</mallctl>
32722-          (<type>bool</type>)
32723-          <literal>r-</literal>
32724-        </term>
32725-        <listitem><para>Internal background worker threads enabled/disabled.
32726-        Because of potential circular dependencies, enabling background thread
32727-        using this option may cause crash or deadlock during initialization. For
32728-        a reliable way to use this feature, see <link
32729-        linkend="background_thread">background_thread</link> for dynamic control
32730-        options and details.  This option is disabled by
32731-        default.</para></listitem>
32732-      </varlistentry>
32733-
32734-      <varlistentry id="opt.max_background_threads">
32735-        <term>
32736-          <mallctl>opt.max_background_threads</mallctl>
32737-          (<type>size_t</type>)
32738-          <literal>r-</literal>
32739-        </term>
32740-        <listitem><para>Maximum number of background threads that will be created
32741-        if <link linkend="background_thread">background_thread</link> is set.
32742-        Defaults to number of cpus.</para></listitem>
32743-      </varlistentry>
32744-
32745-      <varlistentry id="opt.dirty_decay_ms">
32746-        <term>
32747-          <mallctl>opt.dirty_decay_ms</mallctl>
32748-          (<type>ssize_t</type>)
32749-          <literal>r-</literal>
32750-        </term>
32751-        <listitem><para>Approximate time in milliseconds from the creation of a
32752-        set of unused dirty pages until an equivalent set of unused dirty pages
32753-        is purged (i.e. converted to muzzy via e.g.
32754-        <function>madvise(<parameter>...</parameter><parameter><constant>MADV_FREE</constant></parameter>)</function>
32755-        if supported by the operating system, or converted to clean otherwise)
32756-        and/or reused.  Dirty pages are defined as previously having been
32757-        potentially written to by the application, and therefore consuming
32758-        physical memory, yet having no current use.  The pages are incrementally
32759-        purged according to a sigmoidal decay curve that starts and ends with
32760-        zero purge rate.  A decay time of 0 causes all unused dirty pages to be
32761-        purged immediately upon creation.  A decay time of -1 disables purging.
32762-        The default decay time is 10 seconds.  See <link
32763-        linkend="arenas.dirty_decay_ms"><mallctl>arenas.dirty_decay_ms</mallctl></link>
32764-        and <link
32765-        linkend="arena.i.dirty_decay_ms"><mallctl>arena.&lt;i&gt;.dirty_decay_ms</mallctl></link>
32766-        for related dynamic control options.  See <link
32767-        linkend="opt.muzzy_decay_ms"><mallctl>opt.muzzy_decay_ms</mallctl></link>
32768-        for a description of muzzy pages.for a description of muzzy pages.  Note
32769-        that when the <link
32770-        linkend="opt.oversize_threshold"><mallctl>oversize_threshold</mallctl></link>
32771-        feature is enabled, the arenas reserved for oversize requests may have
32772-        its own default decay settings.</para></listitem>
32773-      </varlistentry>
32774-
32775-      <varlistentry id="opt.muzzy_decay_ms">
32776-        <term>
32777-          <mallctl>opt.muzzy_decay_ms</mallctl>
32778-          (<type>ssize_t</type>)
32779-          <literal>r-</literal>
32780-        </term>
32781-        <listitem><para>Approximate time in milliseconds from the creation of a
32782-        set of unused muzzy pages until an equivalent set of unused muzzy pages
32783-        is purged (i.e. converted to clean) and/or reused.  Muzzy pages are
32784-        defined as previously having been unused dirty pages that were
32785-        subsequently purged in a manner that left them subject to the
32786-        reclamation whims of the operating system (e.g.
32787-        <function>madvise(<parameter>...</parameter><parameter><constant>MADV_FREE</constant></parameter>)</function>),
32788-        and therefore in an indeterminate state.  The pages are incrementally
32789-        purged according to a sigmoidal decay curve that starts and ends with
32790-        zero purge rate.  A decay time of 0 causes all unused muzzy pages to be
32791-        purged immediately upon creation.  A decay time of -1 disables purging.
32792-        The default decay time is 10 seconds.  See <link
32793-        linkend="arenas.muzzy_decay_ms"><mallctl>arenas.muzzy_decay_ms</mallctl></link>
32794-        and <link
32795-        linkend="arena.i.muzzy_decay_ms"><mallctl>arena.&lt;i&gt;.muzzy_decay_ms</mallctl></link>
32796-        for related dynamic control options.</para></listitem>
32797-      </varlistentry>
32798-
32799-      <varlistentry id="opt.lg_extent_max_active_fit">
32800-        <term>
32801-          <mallctl>opt.lg_extent_max_active_fit</mallctl>
32802-          (<type>size_t</type>)
32803-          <literal>r-</literal>
32804-        </term>
32805-        <listitem><para>When reusing dirty extents, this determines the (log
32806-        base 2 of the) maximum ratio between the size of the active extent
32807-        selected (to split off from) and the size of the requested allocation.
32808-        This prevents the splitting of large active extents for smaller
32809-        allocations, which can reduce fragmentation over the long run
32810-        (especially for non-active extents).  Lower value may reduce
32811-        fragmentation, at the cost of extra active extents.  The default value
32812-        is 6, which gives a maximum ratio of 64 (2^6).</para></listitem>
32813-      </varlistentry>
32814-
32815-      <varlistentry id="opt.stats_print">
32816-        <term>
32817-          <mallctl>opt.stats_print</mallctl>
32818-          (<type>bool</type>)
32819-          <literal>r-</literal>
32820-        </term>
32821-        <listitem><para>Enable/disable statistics printing at exit.  If
32822-        enabled, the <function>malloc_stats_print()</function>
32823-        function is called at program exit via an
32824-        <citerefentry><refentrytitle>atexit</refentrytitle>
32825-        <manvolnum>3</manvolnum></citerefentry> function.  <link
32826-        linkend="opt.stats_print_opts"><mallctl>opt.stats_print_opts</mallctl></link>
32827-        can be combined to specify output options. If
32828-        <option>--enable-stats</option> is specified during configuration, this
32829-        has the potential to cause deadlock for a multi-threaded process that
32830-        exits while one or more threads are executing in the memory allocation
32831-        functions.  Furthermore, <function>atexit()</function> may
32832-        allocate memory during application initialization and then deadlock
32833-        internally when jemalloc in turn calls
32834-        <function>atexit()</function>, so this option is not
32835-        universally usable (though the application can register its own
32836-        <function>atexit()</function> function with equivalent
32837-        functionality).  Therefore, this option should only be used with care;
32838-        it is primarily intended as a performance tuning aid during application
32839-        development.  This option is disabled by default.</para></listitem>
32840-      </varlistentry>
32841-
32842-      <varlistentry id="opt.stats_print_opts">
32843-        <term>
32844-          <mallctl>opt.stats_print_opts</mallctl>
32845-          (<type>const char *</type>)
32846-          <literal>r-</literal>
32847-        </term>
32848-        <listitem><para>Options (the <parameter>opts</parameter> string) to pass
32849-        to the <function>malloc_stats_print()</function> at exit (enabled
32850-        through <link
32851-        linkend="opt.stats_print"><mallctl>opt.stats_print</mallctl></link>). See
32852-        available options in <link
32853-        linkend="malloc_stats_print_opts"><function>malloc_stats_print()</function></link>.
32854-        Has no effect unless <link
32855-        linkend="opt.stats_print"><mallctl>opt.stats_print</mallctl></link> is
32856-        enabled.  The default is <quote></quote>.</para></listitem>
32857-      </varlistentry>
32858-
32859-      <varlistentry id="opt.stats_interval">
32860-        <term>
32861-          <mallctl>opt.stats_interval</mallctl>
32862-          (<type>int64_t</type>)
32863-          <literal>r-</literal>
32864-        </term>
32865-        <listitem><para>Average interval between statistics outputs, as measured
32866-        in bytes of allocation activity.  The actual interval may be sporadic
32867-        because decentralized event counters are used to avoid synchronization
32868-        bottlenecks.  The output may be triggered on any thread, which then
32869-        calls <function>malloc_stats_print()</function>.  <link
32870-        linkend="opt.stats_interval_opts"><mallctl>opt.stats_interval_opts</mallctl></link>
32871-        can be combined to specify output options.  By default,
32872-        interval-triggered stats output is disabled (encoded as
32873-        -1).</para></listitem>
32874-      </varlistentry>
32875-
32876-      <varlistentry id="opt.stats_interval_opts">
32877-        <term>
32878-          <mallctl>opt.stats_interval_opts</mallctl>
32879-          (<type>const char *</type>)
32880-          <literal>r-</literal>
32881-        </term>
32882-        <listitem><para>Options (the <parameter>opts</parameter> string) to pass
32883-        to the <function>malloc_stats_print()</function> for interval based
32884-	statistics printing (enabled
32885-        through <link
32886-        linkend="opt.stats_interval"><mallctl>opt.stats_interval</mallctl></link>). See
32887-        available options in <link
32888-        linkend="malloc_stats_print_opts"><function>malloc_stats_print()</function></link>.
32889-        Has no effect unless <link
32890-        linkend="opt.stats_interval"><mallctl>opt.stats_interval</mallctl></link> is
32891-        enabled.  The default is <quote></quote>.</para></listitem>
32892-      </varlistentry>
32893-
32894-      <varlistentry id="opt.junk">
32895-        <term>
32896-          <mallctl>opt.junk</mallctl>
32897-          (<type>const char *</type>)
32898-          <literal>r-</literal>
32899-          [<option>--enable-fill</option>]
32900-        </term>
32901-        <listitem><para>Junk filling.  If set to <quote>alloc</quote>, each byte
32902-        of uninitialized allocated memory will be initialized to
32903-        <literal>0xa5</literal>.  If set to <quote>free</quote>, all deallocated
32904-        memory will be initialized to <literal>0x5a</literal>.  If set to
32905-        <quote>true</quote>, both allocated and deallocated memory will be
32906-        initialized, and if set to <quote>false</quote>, junk filling be
32907-        disabled entirely.  This is intended for debugging and will impact
32908-        performance negatively.  This option is <quote>false</quote> by default
32909-        unless <option>--enable-debug</option> is specified during
32910-        configuration, in which case it is <quote>true</quote> by
32911-        default.</para></listitem>
32912-      </varlistentry>
32913-
32914-      <varlistentry id="opt.zero">
32915-        <term>
32916-          <mallctl>opt.zero</mallctl>
32917-          (<type>bool</type>)
32918-          <literal>r-</literal>
32919-          [<option>--enable-fill</option>]
32920-        </term>
32921-        <listitem><para>Zero filling enabled/disabled.  If enabled, each byte
32922-        of uninitialized allocated memory will be initialized to 0.  Note that
32923-        this initialization only happens once for each byte, so
32924-        <function>realloc()</function> and
32925-        <function>rallocx()</function> calls do not zero memory that
32926-        was previously allocated.  This is intended for debugging and will
32927-        impact performance negatively.  This option is disabled by default.
32928-        </para></listitem>
32929-      </varlistentry>
32930-
32931-      <varlistentry id="opt.utrace">
32932-        <term>
32933-          <mallctl>opt.utrace</mallctl>
32934-          (<type>bool</type>)
32935-          <literal>r-</literal>
32936-          [<option>--enable-utrace</option>]
32937-        </term>
32938-        <listitem><para>Allocation tracing based on
32939-        <citerefentry><refentrytitle>utrace</refentrytitle>
32940-        <manvolnum>2</manvolnum></citerefentry> enabled/disabled.  This option
32941-        is disabled by default.</para></listitem>
32942-      </varlistentry>
32943-
32944-      <varlistentry id="opt.xmalloc">
32945-        <term>
32946-          <mallctl>opt.xmalloc</mallctl>
32947-          (<type>bool</type>)
32948-          <literal>r-</literal>
32949-          [<option>--enable-xmalloc</option>]
32950-        </term>
32951-        <listitem><para>Abort-on-out-of-memory enabled/disabled.  If enabled,
32952-        rather than returning failure for any allocation function, display a
32953-        diagnostic message on <constant>STDERR_FILENO</constant> and cause the
32954-        program to drop core (using
32955-        <citerefentry><refentrytitle>abort</refentrytitle>
32956-        <manvolnum>3</manvolnum></citerefentry>).  If an application is
32957-        designed to depend on this behavior, set the option at compile time by
32958-        including the following in the source code:
32959-        <programlisting language="C"><![CDATA[
32960-malloc_conf = "xmalloc:true";]]></programlisting>
32961-        This option is disabled by default.</para></listitem>
32962-      </varlistentry>
32963-
32964-      <varlistentry id="opt.tcache">
32965-        <term>
32966-          <mallctl>opt.tcache</mallctl>
32967-          (<type>bool</type>)
32968-          <literal>r-</literal>
32969-        </term>
32970-        <listitem><para>Thread-specific caching (tcache) enabled/disabled.  When
32971-        there are multiple threads, each thread uses a tcache for objects up to
32972-        a certain size.  Thread-specific caching allows many allocations to be
32973-        satisfied without performing any thread synchronization, at the cost of
32974-        increased memory use.  See the <link
32975-        linkend="opt.tcache_max"><mallctl>opt.tcache_max</mallctl></link>
32976-        option for related tuning information.  This option is enabled by
32977-        default.</para></listitem>
32978-      </varlistentry>
32979-
32980-      <varlistentry id="opt.tcache_max">
32981-        <term>
32982-          <mallctl>opt.tcache_max</mallctl>
32983-          (<type>size_t</type>)
32984-          <literal>r-</literal>
32985-        </term>
32986-        <listitem><para>Maximum size class to cache in the thread-specific cache
32987-        (tcache).  At a minimum, the first size class is cached; and at a
32988-        maximum, size classes up to 8 MiB can be cached.  The default maximum is
32989-        32 KiB (2^15).  As a convenience, this may also be set by specifying
32990-        lg_tcache_max, which will be taken to be the base-2 logarithm of the
32991-        setting of tcache_max.</para></listitem>
32992-      </varlistentry>
32993-
32994-      <varlistentry id="opt.thp">
32995-        <term>
32996-          <mallctl>opt.thp</mallctl>
32997-          (<type>const char *</type>)
32998-          <literal>r-</literal>
32999-        </term>
33000-        <listitem><para>Transparent hugepage (THP) mode. Settings "always",
33001-        "never" and "default" are available if THP is supported by the operating
33002-        system.  The "always" setting enables transparent hugepage for all user
33003-        memory mappings with
33004-        <parameter><constant>MADV_HUGEPAGE</constant></parameter>; "never"
33005-        ensures no transparent hugepage with
33006-        <parameter><constant>MADV_NOHUGEPAGE</constant></parameter>; the default
33007-        setting "default" makes no changes.  Note that: this option does not
33008-        affect THP for jemalloc internal metadata (see <link
33009-        linkend="opt.metadata_thp"><mallctl>opt.metadata_thp</mallctl></link>);
33010-        in addition, for arenas with customized <link
33011-        linkend="arena.i.extent_hooks"><mallctl>extent_hooks</mallctl></link>,
33012-        this option is bypassed as it is implemented as part of the default
33013-        extent hooks.</para></listitem>
33014-      </varlistentry>
33015-
33016-      <varlistentry id="opt.prof">
33017-        <term>
33018-          <mallctl>opt.prof</mallctl>
33019-          (<type>bool</type>)
33020-          <literal>r-</literal>
33021-          [<option>--enable-prof</option>]
33022-        </term>
33023-        <listitem><para>Memory profiling enabled/disabled.  If enabled, profile
33024-        memory allocation activity.  See the <link
33025-        linkend="opt.prof_active"><mallctl>opt.prof_active</mallctl></link>
33026-        option for on-the-fly activation/deactivation.  See the <link
33027-        linkend="opt.lg_prof_sample"><mallctl>opt.lg_prof_sample</mallctl></link>
33028-        option for probabilistic sampling control.  See the <link
33029-        linkend="opt.prof_accum"><mallctl>opt.prof_accum</mallctl></link>
33030-        option for control of cumulative sample reporting.  See the <link
33031-        linkend="opt.lg_prof_interval"><mallctl>opt.lg_prof_interval</mallctl></link>
33032-        option for information on interval-triggered profile dumping, the <link
33033-        linkend="opt.prof_gdump"><mallctl>opt.prof_gdump</mallctl></link>
33034-        option for information on high-water-triggered profile dumping, and the
33035-        <link linkend="opt.prof_final"><mallctl>opt.prof_final</mallctl></link>
33036-        option for final profile dumping.  Profile output is compatible with
33037-        the <command>jeprof</command> command, which is based on the
33038-        <command>pprof</command> that is developed as part of the <ulink
33039-        url="http://code.google.com/p/gperftools/">gperftools
33040-        package</ulink>.  See <link linkend="heap_profile_format">HEAP PROFILE
33041-        FORMAT</link> for heap profile format documentation.</para></listitem>
33042-      </varlistentry>
33043-
33044-      <varlistentry id="opt.prof_prefix">
33045-        <term>
33046-          <mallctl>opt.prof_prefix</mallctl>
33047-          (<type>const char *</type>)
33048-          <literal>r-</literal>
33049-          [<option>--enable-prof</option>]
33050-        </term>
33051-        <listitem><para>Filename prefix for profile dumps.  If the prefix is
33052-        set to the empty string, no automatic dumps will occur; this is
33053-        primarily useful for disabling the automatic final heap dump (which
33054-        also disables leak reporting, if enabled).  The default prefix is
33055-        <filename>jeprof</filename>.  This prefix value can be overridden by
33056-        <link linkend="prof.prefix"><mallctl>prof.prefix</mallctl></link>.
33057-        </para></listitem>
33058-      </varlistentry>
33059-
33060-      <varlistentry id="opt.prof_active">
33061-        <term>
33062-          <mallctl>opt.prof_active</mallctl>
33063-          (<type>bool</type>)
33064-          <literal>r-</literal>
33065-          [<option>--enable-prof</option>]
33066-        </term>
33067-        <listitem><para>Profiling activated/deactivated.  This is a secondary
33068-        control mechanism that makes it possible to start the application with
33069-        profiling enabled (see the <link
33070-        linkend="opt.prof"><mallctl>opt.prof</mallctl></link> option) but
33071-        inactive, then toggle profiling at any time during program execution
33072-        with the <link
33073-        linkend="prof.active"><mallctl>prof.active</mallctl></link> mallctl.
33074-        This option is enabled by default.</para></listitem>
33075-      </varlistentry>
33076-
33077-      <varlistentry id="opt.prof_thread_active_init">
33078-        <term>
33079-          <mallctl>opt.prof_thread_active_init</mallctl>
33080-          (<type>bool</type>)
33081-          <literal>r-</literal>
33082-          [<option>--enable-prof</option>]
33083-        </term>
33084-        <listitem><para>Initial setting for <link
33085-        linkend="thread.prof.active"><mallctl>thread.prof.active</mallctl></link>
33086-        in newly created threads.  The initial setting for newly created threads
33087-        can also be changed during execution via the <link
33088-        linkend="prof.thread_active_init"><mallctl>prof.thread_active_init</mallctl></link>
33089-        mallctl.  This option is enabled by default.</para></listitem>
33090-      </varlistentry>
33091-
33092-      <varlistentry id="opt.lg_prof_sample">
33093-        <term>
33094-          <mallctl>opt.lg_prof_sample</mallctl>
33095-          (<type>size_t</type>)
33096-          <literal>r-</literal>
33097-          [<option>--enable-prof</option>]
33098-        </term>
33099-        <listitem><para>Average interval (log base 2) between allocation
33100-        samples, as measured in bytes of allocation activity.  Increasing the
33101-        sampling interval decreases profile fidelity, but also decreases the
33102-        computational overhead.  The default sample interval is 512 KiB (2^19
33103-        B).</para></listitem>
33104-      </varlistentry>
33105-
33106-      <varlistentry id="opt.prof_accum">
33107-        <term>
33108-          <mallctl>opt.prof_accum</mallctl>
33109-          (<type>bool</type>)
33110-          <literal>r-</literal>
33111-          [<option>--enable-prof</option>]
33112-        </term>
33113-        <listitem><para>Reporting of cumulative object/byte counts in profile
33114-        dumps enabled/disabled.  If this option is enabled, every unique
33115-        backtrace must be stored for the duration of execution.  Depending on
33116-        the application, this can impose a large memory overhead, and the
33117-        cumulative counts are not always of interest.  This option is disabled
33118-        by default.</para></listitem>
33119-      </varlistentry>
33120-
33121-      <varlistentry id="opt.lg_prof_interval">
33122-        <term>
33123-          <mallctl>opt.lg_prof_interval</mallctl>
33124-          (<type>ssize_t</type>)
33125-          <literal>r-</literal>
33126-          [<option>--enable-prof</option>]
33127-        </term>
33128-        <listitem><para>Average interval (log base 2) between memory profile
33129-        dumps, as measured in bytes of allocation activity.  The actual
33130-        interval between dumps may be sporadic because decentralized allocation
33131-        counters are used to avoid synchronization bottlenecks.  Profiles are
33132-        dumped to files named according to the pattern
33133-        <filename>&lt;prefix&gt;.&lt;pid&gt;.&lt;seq&gt;.i&lt;iseq&gt;.heap</filename>,
33134-        where <literal>&lt;prefix&gt;</literal> is controlled by the
33135-        <link
33136-        linkend="opt.prof_prefix"><mallctl>opt.prof_prefix</mallctl></link> and
33137-        <link linkend="prof.prefix"><mallctl>prof.prefix</mallctl></link>
33138-        options.  By default, interval-triggered profile dumping is disabled
33139-        (encoded as -1).
33140-        </para></listitem>
33141-      </varlistentry>
33142-
33143-      <varlistentry id="opt.prof_gdump">
33144-        <term>
33145-          <mallctl>opt.prof_gdump</mallctl>
33146-          (<type>bool</type>)
33147-          <literal>r-</literal>
33148-          [<option>--enable-prof</option>]
33149-        </term>
33150-        <listitem><para>Set the initial state of <link
33151-        linkend="prof.gdump"><mallctl>prof.gdump</mallctl></link>, which when
33152-        enabled triggers a memory profile dump every time the total virtual
33153-        memory exceeds the previous maximum.  This option is disabled by
33154-        default.</para></listitem>
33155-      </varlistentry>
33156-
33157-      <varlistentry id="opt.prof_final">
33158-        <term>
33159-          <mallctl>opt.prof_final</mallctl>
33160-          (<type>bool</type>)
33161-          <literal>r-</literal>
33162-          [<option>--enable-prof</option>]
33163-        </term>
33164-        <listitem><para>Use an
33165-        <citerefentry><refentrytitle>atexit</refentrytitle>
33166-        <manvolnum>3</manvolnum></citerefentry> function to dump final memory
33167-        usage to a file named according to the pattern
33168-        <filename>&lt;prefix&gt;.&lt;pid&gt;.&lt;seq&gt;.f.heap</filename>,
33169-        where <literal>&lt;prefix&gt;</literal> is controlled by the <link
33170-        linkend="opt.prof_prefix"><mallctl>opt.prof_prefix</mallctl></link> and
33171-        <link linkend="prof.prefix"><mallctl>prof.prefix</mallctl></link>
33172-        options.  Note that <function>atexit()</function> may allocate
33173-        memory during application initialization and then deadlock internally
33174-        when jemalloc in turn calls <function>atexit()</function>, so
33175-        this option is not universally usable (though the application can
33176-        register its own <function>atexit()</function> function with
33177-        equivalent functionality).  This option is disabled by
33178-        default.</para></listitem>
33179-      </varlistentry>
33180-
33181-      <varlistentry id="opt.prof_leak">
33182-        <term>
33183-          <mallctl>opt.prof_leak</mallctl>
33184-          (<type>bool</type>)
33185-          <literal>r-</literal>
33186-          [<option>--enable-prof</option>]
33187-        </term>
33188-        <listitem><para>Leak reporting enabled/disabled.  If enabled, use an
33189-        <citerefentry><refentrytitle>atexit</refentrytitle>
33190-        <manvolnum>3</manvolnum></citerefentry> function to report memory leaks
33191-        detected by allocation sampling.  See the
33192-        <link linkend="opt.prof"><mallctl>opt.prof</mallctl></link> option for
33193-        information on analyzing heap profile output.  Works only when combined
33194-        with <link linkend="opt.prof_final"><mallctl>opt.prof_final</mallctl>
33195-        </link>, otherwise does nothing.  This option is disabled by default.
33196-        </para></listitem>
33197-      </varlistentry>
33198-
33199-      <varlistentry id="opt.prof_leak_error">
33200-        <term>
33201-          <mallctl>opt.prof_leak_error</mallctl>
33202-          (<type>bool</type>)
33203-          <literal>r-</literal>
33204-          [<option>--enable-prof</option>]
33205-        </term>
33206-        <listitem><para>Similar to <link linkend="opt.prof_leak"><mallctl>
33207-        opt.prof_leak</mallctl></link>, but makes the process exit with error
33208-        code 1 if a memory leak is detected.  This option supersedes
33209-        <link linkend="opt.prof_leak"><mallctl>opt.prof_leak</mallctl></link>,
33210-        meaning that if both are specified, this option takes precedence.  When
33211-        enabled, also enables <link linkend="opt.prof_leak"><mallctl>
33212-        opt.prof_leak</mallctl></link>.  Works only when combined with
33213-        <link linkend="opt.prof_final"><mallctl>opt.prof_final</mallctl></link>,
33214-        otherwise does nothing.  This option is disabled by default.
33215-        </para></listitem>
33216-      </varlistentry>
33217-
33218-      <varlistentry id="opt.zero_realloc">
33219-        <term>
33220-          <mallctl>opt.zero_realloc</mallctl>
33221-          (<type>const char *</type>)
33222-          <literal>r-</literal>
33223-        </term>
33224-        <listitem><para> Determines the behavior of
33225-        <function>realloc()</function> when passed a value of zero for the new
33226-        size.  <quote>alloc</quote> treats this as an allocation of size zero
33227-        (and returns a non-null result except in case of resource exhaustion).
33228-        <quote>free</quote> treats this as a deallocation of the pointer, and
33229-        returns <constant>NULL</constant> without setting
33230-        <varname>errno</varname>.  <quote>abort</quote> aborts the process if
33231-        zero is passed.  The default is <quote>free</quote> on Linux and
33232-        Windows, and <quote>alloc</quote> elsewhere.</para>
33233-
33234-	<para>There is considerable divergence of behaviors across
33235-	implementations in handling this case. Many have the behavior of
33236-	<quote>free</quote>. This can introduce security vulnerabilities, since
33237-	a <constant>NULL</constant> return value indicates failure, and the
33238-	continued validity of the passed-in pointer (per POSIX and C11).
33239-	<quote>alloc</quote> is safe, but can cause leaks in programs that
33240-	expect the common behavior.  Programs intended to be portable and
33241-	leak-free cannot assume either behavior, and must therefore never call
33242-	realloc with a size of 0.  The <quote>abort</quote> option enables these
33243-	testing this behavior.</para></listitem>
33244-      </varlistentry>
33245-
33246-      <varlistentry id="thread.arena">
33247-        <term>
33248-          <mallctl>thread.arena</mallctl>
33249-          (<type>unsigned</type>)
33250-          <literal>rw</literal>
33251-        </term>
33252-        <listitem><para>Get or set the arena associated with the calling
33253-        thread.  If the specified arena was not initialized beforehand (see the
33254-        <link
33255-        linkend="arena.i.initialized"><mallctl>arena.i.initialized</mallctl></link>
33256-        mallctl), it will be automatically initialized as a side effect of
33257-        calling this interface.</para></listitem>
33258-      </varlistentry>
33259-
33260-      <varlistentry id="thread.allocated">
33261-        <term>
33262-          <mallctl>thread.allocated</mallctl>
33263-          (<type>uint64_t</type>)
33264-          <literal>r-</literal>
33265-          [<option>--enable-stats</option>]
33266-        </term>
33267-        <listitem><para>Get the total number of bytes ever allocated by the
33268-        calling thread.  This counter has the potential to wrap around; it is
33269-        up to the application to appropriately interpret the counter in such
33270-        cases.</para></listitem>
33271-      </varlistentry>
33272-
33273-      <varlistentry id="thread.allocatedp">
33274-        <term>
33275-          <mallctl>thread.allocatedp</mallctl>
33276-          (<type>uint64_t *</type>)
33277-          <literal>r-</literal>
33278-          [<option>--enable-stats</option>]
33279-        </term>
33280-        <listitem><para>Get a pointer to the the value that is returned by the
33281-        <link
33282-        linkend="thread.allocated"><mallctl>thread.allocated</mallctl></link>
33283-        mallctl.  This is useful for avoiding the overhead of repeated
33284-        <function>mallctl*()</function> calls.  Note that the underlying counter
33285-        should not be modified by the application.</para></listitem>
33286-      </varlistentry>
33287-
33288-      <varlistentry id="thread.deallocated">
33289-        <term>
33290-          <mallctl>thread.deallocated</mallctl>
33291-          (<type>uint64_t</type>)
33292-          <literal>r-</literal>
33293-          [<option>--enable-stats</option>]
33294-        </term>
33295-        <listitem><para>Get the total number of bytes ever deallocated by the
33296-        calling thread.  This counter has the potential to wrap around; it is
33297-        up to the application to appropriately interpret the counter in such
33298-        cases.</para></listitem>
33299-      </varlistentry>
33300-
33301-      <varlistentry id="thread.deallocatedp">
33302-        <term>
33303-          <mallctl>thread.deallocatedp</mallctl>
33304-          (<type>uint64_t *</type>)
33305-          <literal>r-</literal>
33306-          [<option>--enable-stats</option>]
33307-        </term>
33308-        <listitem><para>Get a pointer to the the value that is returned by the
33309-        <link
33310-        linkend="thread.deallocated"><mallctl>thread.deallocated</mallctl></link>
33311-        mallctl.  This is useful for avoiding the overhead of repeated
33312-        <function>mallctl*()</function> calls.  Note that the underlying counter
33313-        should not be modified by the application.</para></listitem>
33314-      </varlistentry>
33315-
33316-      <varlistentry id="thread.peak.read">
33317-        <term>
33318-          <mallctl>thread.peak.read</mallctl>
33319-          (<type>uint64_t</type>)
33320-          <literal>r-</literal>
33321-          [<option>--enable-stats</option>]
33322-        </term>
33323-        <listitem><para>Get an approximation of the maximum value of the
33324-        difference between the number of bytes allocated and the number of bytes
33325-        deallocated by the calling thread since the last call to <link
33326-        linkend="thread.peak.reset"><mallctl>thread.peak.reset</mallctl></link>,
33327-        or since the thread's creation if it has not called <link
33328-        linkend="thread.peak.reset"><mallctl>thread.peak.reset</mallctl></link>.
33329-        No guarantees are made about the quality of the approximation, but
33330-        jemalloc currently endeavors to maintain accuracy to within one hundred
33331-        kilobytes.
33332-        </para></listitem>
33333-      </varlistentry>
33334-
33335-      <varlistentry id="thread.peak.reset">
33336-        <term>
33337-          <mallctl>thread.peak.reset</mallctl>
33338-          (<type>void</type>)
33339-          <literal>--</literal>
33340-          [<option>--enable-stats</option>]
33341-        </term>
33342-        <listitem><para>Resets the counter for net bytes allocated in the calling
33343-        thread to zero. This affects subsequent calls to <link
33344-        linkend="thread.peak.read"><mallctl>thread.peak.read</mallctl></link>,
33345-        but not the values returned by <link
33346-        linkend="thread.allocated"><mallctl>thread.allocated</mallctl></link>
33347-        or <link
33348-        linkend="thread.deallocated"><mallctl>thread.deallocated</mallctl></link>.
33349-        </para></listitem>
33350-      </varlistentry>
33351-
33352-      <varlistentry id="thread.tcache.enabled">
33353-        <term>
33354-          <mallctl>thread.tcache.enabled</mallctl>
33355-          (<type>bool</type>)
33356-          <literal>rw</literal>
33357-        </term>
33358-        <listitem><para>Enable/disable calling thread's tcache.  The tcache is
33359-        implicitly flushed as a side effect of becoming
33360-        disabled (see <link
33361-        linkend="thread.tcache.flush"><mallctl>thread.tcache.flush</mallctl></link>).
33362-        </para></listitem>
33363-      </varlistentry>
33364-
33365-      <varlistentry id="thread.tcache.flush">
33366-        <term>
33367-          <mallctl>thread.tcache.flush</mallctl>
33368-          (<type>void</type>)
33369-          <literal>--</literal>
33370-        </term>
33371-        <listitem><para>Flush calling thread's thread-specific cache (tcache).
33372-        This interface releases all cached objects and internal data structures
33373-        associated with the calling thread's tcache.  Ordinarily, this interface
33374-        need not be called, since automatic periodic incremental garbage
33375-        collection occurs, and the thread cache is automatically discarded when
33376-        a thread exits.  However, garbage collection is triggered by allocation
33377-        activity, so it is possible for a thread that stops
33378-        allocating/deallocating to retain its cache indefinitely, in which case
33379-        the developer may find manual flushing useful.</para></listitem>
33380-      </varlistentry>
33381-
33382-      <varlistentry id="thread.prof.name">
33383-        <term>
33384-          <mallctl>thread.prof.name</mallctl>
33385-          (<type>const char *</type>)
33386-          <literal>r-</literal> or
33387-          <literal>-w</literal>
33388-          [<option>--enable-prof</option>]
33389-        </term>
33390-        <listitem><para>Get/set the descriptive name associated with the calling
33391-        thread in memory profile dumps.  An internal copy of the name string is
33392-        created, so the input string need not be maintained after this interface
33393-        completes execution.  The output string of this interface should be
33394-        copied for non-ephemeral uses, because multiple implementation details
33395-        can cause asynchronous string deallocation.  Furthermore, each
33396-        invocation of this interface can only read or write; simultaneous
33397-        read/write is not supported due to string lifetime limitations.  The
33398-        name string must be nil-terminated and comprised only of characters in
33399-        the sets recognized
33400-        by <citerefentry><refentrytitle>isgraph</refentrytitle>
33401-        <manvolnum>3</manvolnum></citerefentry> and
33402-        <citerefentry><refentrytitle>isblank</refentrytitle>
33403-        <manvolnum>3</manvolnum></citerefentry>.</para></listitem>
33404-      </varlistentry>
33405-
33406-      <varlistentry id="thread.prof.active">
33407-        <term>
33408-          <mallctl>thread.prof.active</mallctl>
33409-          (<type>bool</type>)
33410-          <literal>rw</literal>
33411-          [<option>--enable-prof</option>]
33412-        </term>
33413-        <listitem><para>Control whether sampling is currently active for the
33414-        calling thread.  This is an activation mechanism in addition to <link
33415-        linkend="prof.active"><mallctl>prof.active</mallctl></link>; both must
33416-        be active for the calling thread to sample.  This flag is enabled by
33417-        default.</para></listitem>
33418-      </varlistentry>
33419-
33420-      <varlistentry id="thread.idle">
33421-        <term>
33422-          <mallctl>thread.idle</mallctl>
33423-          (<type>void</type>)
33424-          <literal>--</literal>
33425-        </term>
33426-        <listitem><para>Hints to jemalloc that the calling thread will be idle
33427-	for some nontrivial period of time (say, on the order of seconds), and
33428-	that doing some cleanup operations may be beneficial.  There are no
33429-	guarantees as to what specific operations will be performed; currently
33430-	this flushes the caller's tcache and may (according to some heuristic)
33431-	purge its associated arena.</para>
33432-	<para>This is not intended to be a general-purpose background activity
33433-	mechanism, and threads should not wake up multiple times solely to call
33434-	it.  Rather, a thread waiting for a task should do a timed wait first,
33435-	call <link linkend="thread.idle"><mallctl>thread.idle</mallctl></link>
33436-	if no task appears in the timeout interval, and then do an untimed wait.
33437-	For such a background activity mechanism, see
33438-	<link linkend="background_thread"><mallctl>background_thread</mallctl></link>.
33439-	</para></listitem>
33440-      </varlistentry>
33441-
33442-      <varlistentry id="tcache.create">
33443-        <term>
33444-          <mallctl>tcache.create</mallctl>
33445-          (<type>unsigned</type>)
33446-          <literal>r-</literal>
33447-        </term>
33448-        <listitem><para>Create an explicit thread-specific cache (tcache) and
33449-        return an identifier that can be passed to the <link
33450-        linkend="MALLOCX_TCACHE"><constant>MALLOCX_TCACHE(<parameter>tc</parameter>)</constant></link>
33451-        macro to explicitly use the specified cache rather than the
33452-        automatically managed one that is used by default.  Each explicit cache
33453-        can be used by only one thread at a time; the application must assure
33454-        that this constraint holds.
33455-        </para>
33456-
33457-        <para>If the amount of space supplied for storing the thread-specific
33458-        cache identifier does not equal
33459-        <code language="C">sizeof(<type>unsigned</type>)</code>, no
33460-        thread-specific cache will be created, no data will be written to the
33461-        space pointed by <parameter>oldp</parameter>, and
33462-        <parameter>*oldlenp</parameter> will be set to 0.
33463-        </para></listitem>
33464-
33465-      </varlistentry>
33466-
33467-      <varlistentry id="tcache.flush">
33468-        <term>
33469-          <mallctl>tcache.flush</mallctl>
33470-          (<type>unsigned</type>)
33471-          <literal>-w</literal>
33472-        </term>
33473-        <listitem><para>Flush the specified thread-specific cache (tcache).  The
33474-        same considerations apply to this interface as to <link
33475-        linkend="thread.tcache.flush"><mallctl>thread.tcache.flush</mallctl></link>,
33476-        except that the tcache will never be automatically discarded.
33477-        </para></listitem>
33478-      </varlistentry>
33479-
33480-      <varlistentry id="tcache.destroy">
33481-        <term>
33482-          <mallctl>tcache.destroy</mallctl>
33483-          (<type>unsigned</type>)
33484-          <literal>-w</literal>
33485-        </term>
33486-        <listitem><para>Flush the specified thread-specific cache (tcache) and
33487-        make the identifier available for use during a future tcache creation.
33488-        </para></listitem>
33489-      </varlistentry>
33490-
33491-      <varlistentry id="arena.i.initialized">
33492-        <term>
33493-          <mallctl>arena.&lt;i&gt;.initialized</mallctl>
33494-          (<type>bool</type>)
33495-          <literal>r-</literal>
33496-        </term>
33497-        <listitem><para>Get whether the specified arena's statistics are
33498-        initialized (i.e. the arena was initialized prior to the current epoch).
33499-        This interface can also be nominally used to query whether the merged
33500-        statistics corresponding to <constant>MALLCTL_ARENAS_ALL</constant> are
33501-        initialized (always true).</para></listitem>
33502-      </varlistentry>
33503-
33504-      <varlistentry id="arena.i.decay">
33505-        <term>
33506-          <mallctl>arena.&lt;i&gt;.decay</mallctl>
33507-          (<type>void</type>)
33508-          <literal>--</literal>
33509-        </term>
33510-        <listitem><para>Trigger decay-based purging of unused dirty/muzzy pages
33511-        for arena &lt;i&gt;, or for all arenas if &lt;i&gt; equals
33512-        <constant>MALLCTL_ARENAS_ALL</constant>.  The proportion of unused
33513-        dirty/muzzy pages to be purged depends on the current time; see <link
33514-        linkend="opt.dirty_decay_ms"><mallctl>opt.dirty_decay_ms</mallctl></link>
33515-        and <link
33516-        linkend="opt.muzzy_decay_ms"><mallctl>opt.muzy_decay_ms</mallctl></link>
33517-        for details.</para></listitem>
33518-      </varlistentry>
33519-
33520-      <varlistentry id="arena.i.purge">
33521-        <term>
33522-          <mallctl>arena.&lt;i&gt;.purge</mallctl>
33523-          (<type>void</type>)
33524-          <literal>--</literal>
33525-        </term>
33526-        <listitem><para>Purge all unused dirty pages for arena &lt;i&gt;, or for
33527-        all arenas if &lt;i&gt; equals <constant>MALLCTL_ARENAS_ALL</constant>.
33528-        </para></listitem>
33529-      </varlistentry>
33530-
33531-      <varlistentry id="arena.i.reset">
33532-        <term>
33533-          <mallctl>arena.&lt;i&gt;.reset</mallctl>
33534-          (<type>void</type>)
33535-          <literal>--</literal>
33536-        </term>
33537-        <listitem><para>Discard all of the arena's extant allocations.  This
33538-        interface can only be used with arenas explicitly created via <link
33539-        linkend="arenas.create"><mallctl>arenas.create</mallctl></link>.  None
33540-        of the arena's discarded/cached allocations may accessed afterward.  As
33541-        part of this requirement, all thread caches which were used to
33542-        allocate/deallocate in conjunction with the arena must be flushed
33543-        beforehand.</para></listitem>
33544-      </varlistentry>
33545-
33546-      <varlistentry id="arena.i.destroy">
33547-        <term>
33548-          <mallctl>arena.&lt;i&gt;.destroy</mallctl>
33549-          (<type>void</type>)
33550-          <literal>--</literal>
33551-        </term>
33552-        <listitem><para>Destroy the arena.  Discard all of the arena's extant
33553-        allocations using the same mechanism as for <link
33554-        linkend="arena.i.reset"><mallctl>arena.&lt;i&gt;.reset</mallctl></link>
33555-        (with all the same constraints and side effects), merge the arena stats
33556-        into those accessible at arena index
33557-        <constant>MALLCTL_ARENAS_DESTROYED</constant>, and then completely
33558-        discard all metadata associated with the arena.  Future calls to <link
33559-        linkend="arenas.create"><mallctl>arenas.create</mallctl></link> may
33560-        recycle the arena index.  Destruction will fail if any threads are
33561-        currently associated with the arena as a result of calls to <link
33562-        linkend="thread.arena"><mallctl>thread.arena</mallctl></link>.</para></listitem>
33563-      </varlistentry>
33564-
33565-      <varlistentry id="arena.i.dss">
33566-        <term>
33567-          <mallctl>arena.&lt;i&gt;.dss</mallctl>
33568-          (<type>const char *</type>)
33569-          <literal>rw</literal>
33570-        </term>
33571-        <listitem><para>Set the precedence of dss allocation as related to mmap
33572-        allocation for arena &lt;i&gt;, or for all arenas if &lt;i&gt; equals
33573-        <constant>MALLCTL_ARENAS_ALL</constant>.  See <link
33574-        linkend="opt.dss"><mallctl>opt.dss</mallctl></link> for supported
33575-        settings.</para></listitem>
33576-      </varlistentry>
33577-
33578-      <varlistentry id="arena.i.dirty_decay_ms">
33579-        <term>
33580-          <mallctl>arena.&lt;i&gt;.dirty_decay_ms</mallctl>
33581-          (<type>ssize_t</type>)
33582-          <literal>rw</literal>
33583-        </term>
33584-        <listitem><para>Current per-arena approximate time in milliseconds from
33585-        the creation of a set of unused dirty pages until an equivalent set of
33586-        unused dirty pages is purged and/or reused.  Each time this interface is
33587-        set, all currently unused dirty pages are considered to have fully
33588-        decayed, which causes immediate purging of all unused dirty pages unless
33589-        the decay time is set to -1 (i.e. purging disabled).  See <link
33590-        linkend="opt.dirty_decay_ms"><mallctl>opt.dirty_decay_ms</mallctl></link>
33591-        for additional information.</para></listitem>
33592-      </varlistentry>
33593-
33594-      <varlistentry id="arena.i.muzzy_decay_ms">
33595-        <term>
33596-          <mallctl>arena.&lt;i&gt;.muzzy_decay_ms</mallctl>
33597-          (<type>ssize_t</type>)
33598-          <literal>rw</literal>
33599-        </term>
33600-        <listitem><para>Current per-arena approximate time in milliseconds from
33601-        the creation of a set of unused muzzy pages until an equivalent set of
33602-        unused muzzy pages is purged and/or reused.  Each time this interface is
33603-        set, all currently unused muzzy pages are considered to have fully
33604-        decayed, which causes immediate purging of all unused muzzy pages unless
33605-        the decay time is set to -1 (i.e. purging disabled).  See <link
33606-        linkend="opt.muzzy_decay_ms"><mallctl>opt.muzzy_decay_ms</mallctl></link>
33607-        for additional information.</para></listitem>
33608-      </varlistentry>
33609-
33610-      <varlistentry id="arena.i.retain_grow_limit">
33611-        <term>
33612-          <mallctl>arena.&lt;i&gt;.retain_grow_limit</mallctl>
33613-          (<type>size_t</type>)
33614-          <literal>rw</literal>
33615-        </term>
33616-        <listitem><para>Maximum size to grow retained region (only relevant when
33617-        <link linkend="opt.retain"><mallctl>opt.retain</mallctl></link> is
33618-        enabled).  This controls the maximum increment to expand virtual memory,
33619-        or allocation through <link
33620-        linkend="arena.i.extent_hooks"><mallctl>arena.&lt;i&gt;extent_hooks</mallctl></link>.
33621-        In particular, if customized extent hooks reserve physical memory
33622-        (e.g. 1G huge pages), this is useful to control the allocation hook's
33623-        input size.  The default is no limit.</para></listitem>
33624-      </varlistentry>
33625-
33626-      <varlistentry id="arena.i.extent_hooks">
33627-        <term>
33628-          <mallctl>arena.&lt;i&gt;.extent_hooks</mallctl>
33629-          (<type>extent_hooks_t *</type>)
33630-          <literal>rw</literal>
33631-        </term>
33632-        <listitem><para>Get or set the extent management hook functions for
33633-        arena &lt;i&gt;.  The functions must be capable of operating on all
33634-        extant extents associated with arena &lt;i&gt;, usually by passing
33635-        unknown extents to the replaced functions.  In practice, it is feasible
33636-        to control allocation for arenas explicitly created via <link
33637-        linkend="arenas.create"><mallctl>arenas.create</mallctl></link> such
33638-        that all extents originate from an application-supplied extent allocator
33639-        (by specifying the custom extent hook functions during arena creation).
33640-        However, the API guarantees for the automatically created arenas may be
33641-        relaxed -- hooks set there may be called in a "best effort" fashion; in
33642-        addition there may be extents created prior to the application having an
33643-        opportunity to take over extent allocation.</para>
33644-
33645-        <programlisting language="C"><![CDATA[
33646-typedef extent_hooks_s extent_hooks_t;
33647-struct extent_hooks_s {
33648-	extent_alloc_t		*alloc;
33649-	extent_dalloc_t		*dalloc;
33650-	extent_destroy_t	*destroy;
33651-	extent_commit_t		*commit;
33652-	extent_decommit_t	*decommit;
33653-	extent_purge_t		*purge_lazy;
33654-	extent_purge_t		*purge_forced;
33655-	extent_split_t		*split;
33656-	extent_merge_t		*merge;
33657-};]]></programlisting>
33658-        <para>The <type>extent_hooks_t</type> structure comprises function
33659-        pointers which are described individually below.  jemalloc uses these
33660-        functions to manage extent lifetime, which starts off with allocation of
33661-        mapped committed memory, in the simplest case followed by deallocation.
33662-        However, there are performance and platform reasons to retain extents
33663-        for later reuse.  Cleanup attempts cascade from deallocation to decommit
33664-        to forced purging to lazy purging, which gives the extent management
33665-        functions opportunities to reject the most permanent cleanup operations
33666-        in favor of less permanent (and often less costly) operations.  All
33667-        operations except allocation can be universally opted out of by setting
33668-        the hook pointers to <constant>NULL</constant>, or selectively opted out
33669-        of by returning failure.  Note that once the extent hook is set, the
33670-        structure is accessed directly by the associated arenas, so it must
33671-        remain valid for the entire lifetime of the arenas.</para>
33672-
33673-        <funcsynopsis><funcprototype>
33674-          <funcdef>typedef void *<function>(extent_alloc_t)</function></funcdef>
33675-          <paramdef>extent_hooks_t *<parameter>extent_hooks</parameter></paramdef>
33676-          <paramdef>void *<parameter>new_addr</parameter></paramdef>
33677-          <paramdef>size_t <parameter>size</parameter></paramdef>
33678-          <paramdef>size_t <parameter>alignment</parameter></paramdef>
33679-          <paramdef>bool *<parameter>zero</parameter></paramdef>
33680-          <paramdef>bool *<parameter>commit</parameter></paramdef>
33681-          <paramdef>unsigned <parameter>arena_ind</parameter></paramdef>
33682-        </funcprototype></funcsynopsis>
33683-        <literallayout></literallayout>
33684-        <para>An extent allocation function conforms to the
33685-        <type>extent_alloc_t</type> type and upon success returns a pointer to
33686-        <parameter>size</parameter> bytes of mapped memory on behalf of arena
33687-        <parameter>arena_ind</parameter> such that the extent's base address is
33688-        a multiple of <parameter>alignment</parameter>, as well as setting
33689-        <parameter>*zero</parameter> to indicate whether the extent is zeroed
33690-        and <parameter>*commit</parameter> to indicate whether the extent is
33691-        committed.  Upon error the function returns <constant>NULL</constant>
33692-        and leaves <parameter>*zero</parameter> and
33693-        <parameter>*commit</parameter> unmodified.  The
33694-        <parameter>size</parameter> parameter is always a multiple of the page
33695-        size.  The <parameter>alignment</parameter> parameter is always a power
33696-        of two at least as large as the page size.  Zeroing is mandatory if
33697-        <parameter>*zero</parameter> is true upon function entry.  Committing is
33698-        mandatory if <parameter>*commit</parameter> is true upon function entry.
33699-        If <parameter>new_addr</parameter> is not <constant>NULL</constant>, the
33700-        returned pointer must be <parameter>new_addr</parameter> on success or
33701-        <constant>NULL</constant> on error.  Committed memory may be committed
33702-        in absolute terms as on a system that does not overcommit, or in
33703-        implicit terms as on a system that overcommits and satisfies physical
33704-        memory needs on demand via soft page faults.  Note that replacing the
33705-        default extent allocation function makes the arena's <link
33706-        linkend="arena.i.dss"><mallctl>arena.&lt;i&gt;.dss</mallctl></link>
33707-        setting irrelevant.</para>
33708-
33709-        <funcsynopsis><funcprototype>
33710-          <funcdef>typedef bool <function>(extent_dalloc_t)</function></funcdef>
33711-          <paramdef>extent_hooks_t *<parameter>extent_hooks</parameter></paramdef>
33712-          <paramdef>void *<parameter>addr</parameter></paramdef>
33713-          <paramdef>size_t <parameter>size</parameter></paramdef>
33714-          <paramdef>bool <parameter>committed</parameter></paramdef>
33715-          <paramdef>unsigned <parameter>arena_ind</parameter></paramdef>
33716-        </funcprototype></funcsynopsis>
33717-        <literallayout></literallayout>
33718-        <para>
33719-        An extent deallocation function conforms to the
33720-        <type>extent_dalloc_t</type> type and deallocates an extent at given
33721-        <parameter>addr</parameter> and <parameter>size</parameter> with
33722-        <parameter>committed</parameter>/decommited memory as indicated, on
33723-        behalf of arena <parameter>arena_ind</parameter>, returning false upon
33724-        success.  If the function returns true, this indicates opt-out from
33725-        deallocation; the virtual memory mapping associated with the extent
33726-        remains mapped, in the same commit state, and available for future use,
33727-        in which case it will be automatically retained for later reuse.</para>
33728-
33729-        <funcsynopsis><funcprototype>
33730-          <funcdef>typedef void <function>(extent_destroy_t)</function></funcdef>
33731-          <paramdef>extent_hooks_t *<parameter>extent_hooks</parameter></paramdef>
33732-          <paramdef>void *<parameter>addr</parameter></paramdef>
33733-          <paramdef>size_t <parameter>size</parameter></paramdef>
33734-          <paramdef>bool <parameter>committed</parameter></paramdef>
33735-          <paramdef>unsigned <parameter>arena_ind</parameter></paramdef>
33736-        </funcprototype></funcsynopsis>
33737-        <literallayout></literallayout>
33738-        <para>
33739-        An extent destruction function conforms to the
33740-        <type>extent_destroy_t</type> type and unconditionally destroys an
33741-        extent at given <parameter>addr</parameter> and
33742-        <parameter>size</parameter> with
33743-        <parameter>committed</parameter>/decommited memory as indicated, on
33744-        behalf of arena <parameter>arena_ind</parameter>.  This function may be
33745-        called to destroy retained extents during arena destruction (see <link
33746-        linkend="arena.i.destroy"><mallctl>arena.&lt;i&gt;.destroy</mallctl></link>).</para>
33747-
33748-        <funcsynopsis><funcprototype>
33749-          <funcdef>typedef bool <function>(extent_commit_t)</function></funcdef>
33750-          <paramdef>extent_hooks_t *<parameter>extent_hooks</parameter></paramdef>
33751-          <paramdef>void *<parameter>addr</parameter></paramdef>
33752-          <paramdef>size_t <parameter>size</parameter></paramdef>
33753-          <paramdef>size_t <parameter>offset</parameter></paramdef>
33754-          <paramdef>size_t <parameter>length</parameter></paramdef>
33755-          <paramdef>unsigned <parameter>arena_ind</parameter></paramdef>
33756-        </funcprototype></funcsynopsis>
33757-        <literallayout></literallayout>
33758-        <para>An extent commit function conforms to the
33759-        <type>extent_commit_t</type> type and commits zeroed physical memory to
33760-        back pages within an extent at given <parameter>addr</parameter> and
33761-        <parameter>size</parameter> at <parameter>offset</parameter> bytes,
33762-        extending for <parameter>length</parameter> on behalf of arena
33763-        <parameter>arena_ind</parameter>, returning false upon success.
33764-        Committed memory may be committed in absolute terms as on a system that
33765-        does not overcommit, or in implicit terms as on a system that
33766-        overcommits and satisfies physical memory needs on demand via soft page
33767-        faults. If the function returns true, this indicates insufficient
33768-        physical memory to satisfy the request.</para>
33769-
33770-        <funcsynopsis><funcprototype>
33771-          <funcdef>typedef bool <function>(extent_decommit_t)</function></funcdef>
33772-          <paramdef>extent_hooks_t *<parameter>extent_hooks</parameter></paramdef>
33773-          <paramdef>void *<parameter>addr</parameter></paramdef>
33774-          <paramdef>size_t <parameter>size</parameter></paramdef>
33775-          <paramdef>size_t <parameter>offset</parameter></paramdef>
33776-          <paramdef>size_t <parameter>length</parameter></paramdef>
33777-          <paramdef>unsigned <parameter>arena_ind</parameter></paramdef>
33778-        </funcprototype></funcsynopsis>
33779-        <literallayout></literallayout>
33780-        <para>An extent decommit function conforms to the
33781-        <type>extent_decommit_t</type> type and decommits any physical memory
33782-        that is backing pages within an extent at given
33783-        <parameter>addr</parameter> and <parameter>size</parameter> at
33784-        <parameter>offset</parameter> bytes, extending for
33785-        <parameter>length</parameter> on behalf of arena
33786-        <parameter>arena_ind</parameter>, returning false upon success, in which
33787-        case the pages will be committed via the extent commit function before
33788-        being reused.  If the function returns true, this indicates opt-out from
33789-        decommit; the memory remains committed and available for future use, in
33790-        which case it will be automatically retained for later reuse.</para>
33791-
33792-        <funcsynopsis><funcprototype>
33793-          <funcdef>typedef bool <function>(extent_purge_t)</function></funcdef>
33794-          <paramdef>extent_hooks_t *<parameter>extent_hooks</parameter></paramdef>
33795-          <paramdef>void *<parameter>addr</parameter></paramdef>
33796-          <paramdef>size_t <parameter>size</parameter></paramdef>
33797-          <paramdef>size_t <parameter>offset</parameter></paramdef>
33798-          <paramdef>size_t <parameter>length</parameter></paramdef>
33799-          <paramdef>unsigned <parameter>arena_ind</parameter></paramdef>
33800-        </funcprototype></funcsynopsis>
33801-        <literallayout></literallayout>
33802-        <para>An extent purge function conforms to the
33803-        <type>extent_purge_t</type> type and discards physical pages
33804-        within the virtual memory mapping associated with an extent at given
33805-        <parameter>addr</parameter> and <parameter>size</parameter> at
33806-        <parameter>offset</parameter> bytes, extending for
33807-        <parameter>length</parameter> on behalf of arena
33808-        <parameter>arena_ind</parameter>.  A lazy extent purge function (e.g.
33809-        implemented via
33810-        <function>madvise(<parameter>...</parameter><parameter><constant>MADV_FREE</constant></parameter>)</function>)
33811-        can delay purging indefinitely and leave the pages within the purged
33812-        virtual memory range in an indeterminite state, whereas a forced extent
33813-        purge function immediately purges, and the pages within the virtual
33814-        memory range will be zero-filled the next time they are accessed.  If
33815-        the function returns true, this indicates failure to purge.</para>
33816-
33817-        <funcsynopsis><funcprototype>
33818-          <funcdef>typedef bool <function>(extent_split_t)</function></funcdef>
33819-          <paramdef>extent_hooks_t *<parameter>extent_hooks</parameter></paramdef>
33820-          <paramdef>void *<parameter>addr</parameter></paramdef>
33821-          <paramdef>size_t <parameter>size</parameter></paramdef>
33822-          <paramdef>size_t <parameter>size_a</parameter></paramdef>
33823-          <paramdef>size_t <parameter>size_b</parameter></paramdef>
33824-          <paramdef>bool <parameter>committed</parameter></paramdef>
33825-          <paramdef>unsigned <parameter>arena_ind</parameter></paramdef>
33826-        </funcprototype></funcsynopsis>
33827-        <literallayout></literallayout>
33828-        <para>An extent split function conforms to the
33829-        <type>extent_split_t</type> type and optionally splits an extent at
33830-        given <parameter>addr</parameter> and <parameter>size</parameter> into
33831-        two adjacent extents, the first of <parameter>size_a</parameter> bytes,
33832-        and the second of <parameter>size_b</parameter> bytes, operating on
33833-        <parameter>committed</parameter>/decommitted memory as indicated, on
33834-        behalf of arena <parameter>arena_ind</parameter>, returning false upon
33835-        success.  If the function returns true, this indicates that the extent
33836-        remains unsplit and therefore should continue to be operated on as a
33837-        whole.</para>
33838-
33839-        <funcsynopsis><funcprototype>
33840-          <funcdef>typedef bool <function>(extent_merge_t)</function></funcdef>
33841-          <paramdef>extent_hooks_t *<parameter>extent_hooks</parameter></paramdef>
33842-          <paramdef>void *<parameter>addr_a</parameter></paramdef>
33843-          <paramdef>size_t <parameter>size_a</parameter></paramdef>
33844-          <paramdef>void *<parameter>addr_b</parameter></paramdef>
33845-          <paramdef>size_t <parameter>size_b</parameter></paramdef>
33846-          <paramdef>bool <parameter>committed</parameter></paramdef>
33847-          <paramdef>unsigned <parameter>arena_ind</parameter></paramdef>
33848-        </funcprototype></funcsynopsis>
33849-        <literallayout></literallayout>
33850-        <para>An extent merge function conforms to the
33851-        <type>extent_merge_t</type> type and optionally merges adjacent extents,
33852-        at given <parameter>addr_a</parameter> and <parameter>size_a</parameter>
33853-        with given <parameter>addr_b</parameter> and
33854-        <parameter>size_b</parameter> into one contiguous extent, operating on
33855-        <parameter>committed</parameter>/decommitted memory as indicated, on
33856-        behalf of arena <parameter>arena_ind</parameter>, returning false upon
33857-        success.  If the function returns true, this indicates that the extents
33858-        remain distinct mappings and therefore should continue to be operated on
33859-        independently.</para>
33860-        </listitem>
33861-      </varlistentry>
33862-
33863-      <varlistentry id="arenas.narenas">
33864-        <term>
33865-          <mallctl>arenas.narenas</mallctl>
33866-          (<type>unsigned</type>)
33867-          <literal>r-</literal>
33868-        </term>
33869-        <listitem><para>Current limit on number of arenas.</para></listitem>
33870-      </varlistentry>
33871-
33872-      <varlistentry id="arenas.dirty_decay_ms">
33873-        <term>
33874-          <mallctl>arenas.dirty_decay_ms</mallctl>
33875-          (<type>ssize_t</type>)
33876-          <literal>rw</literal>
33877-        </term>
33878-        <listitem><para>Current default per-arena approximate time in
33879-        milliseconds from the creation of a set of unused dirty pages until an
33880-        equivalent set of unused dirty pages is purged and/or reused, used to
33881-        initialize <link
33882-        linkend="arena.i.dirty_decay_ms"><mallctl>arena.&lt;i&gt;.dirty_decay_ms</mallctl></link>
33883-        during arena creation.  See <link
33884-        linkend="opt.dirty_decay_ms"><mallctl>opt.dirty_decay_ms</mallctl></link>
33885-        for additional information.</para></listitem>
33886-      </varlistentry>
33887-
33888-      <varlistentry id="arenas.muzzy_decay_ms">
33889-        <term>
33890-          <mallctl>arenas.muzzy_decay_ms</mallctl>
33891-          (<type>ssize_t</type>)
33892-          <literal>rw</literal>
33893-        </term>
33894-        <listitem><para>Current default per-arena approximate time in
33895-        milliseconds from the creation of a set of unused muzzy pages until an
33896-        equivalent set of unused muzzy pages is purged and/or reused, used to
33897-        initialize <link
33898-        linkend="arena.i.muzzy_decay_ms"><mallctl>arena.&lt;i&gt;.muzzy_decay_ms</mallctl></link>
33899-        during arena creation.  See <link
33900-        linkend="opt.muzzy_decay_ms"><mallctl>opt.muzzy_decay_ms</mallctl></link>
33901-        for additional information.</para></listitem>
33902-      </varlistentry>
33903-
33904-      <varlistentry id="arenas.quantum">
33905-        <term>
33906-          <mallctl>arenas.quantum</mallctl>
33907-          (<type>size_t</type>)
33908-          <literal>r-</literal>
33909-        </term>
33910-        <listitem><para>Quantum size.</para></listitem>
33911-      </varlistentry>
33912-
33913-      <varlistentry id="arenas.page">
33914-        <term>
33915-          <mallctl>arenas.page</mallctl>
33916-          (<type>size_t</type>)
33917-          <literal>r-</literal>
33918-        </term>
33919-        <listitem><para>Page size.</para></listitem>
33920-      </varlistentry>
33921-
33922-      <varlistentry id="arenas.tcache_max">
33923-        <term>
33924-          <mallctl>arenas.tcache_max</mallctl>
33925-          (<type>size_t</type>)
33926-          <literal>r-</literal>
33927-        </term>
33928-        <listitem><para>Maximum thread-cached size class.</para></listitem>
33929-      </varlistentry>
33930-
33931-      <varlistentry id="arenas.nbins">
33932-        <term>
33933-          <mallctl>arenas.nbins</mallctl>
33934-          (<type>unsigned</type>)
33935-          <literal>r-</literal>
33936-        </term>
33937-        <listitem><para>Number of bin size classes.</para></listitem>
33938-      </varlistentry>
33939-
33940-      <varlistentry id="arenas.nhbins">
33941-        <term>
33942-          <mallctl>arenas.nhbins</mallctl>
33943-          (<type>unsigned</type>)
33944-          <literal>r-</literal>
33945-        </term>
33946-        <listitem><para>Total number of thread cache bin size
33947-        classes.</para></listitem>
33948-      </varlistentry>
33949-
33950-      <varlistentry id="arenas.bin.i.size">
33951-        <term>
33952-          <mallctl>arenas.bin.&lt;i&gt;.size</mallctl>
33953-          (<type>size_t</type>)
33954-          <literal>r-</literal>
33955-        </term>
33956-        <listitem><para>Maximum size supported by size class.</para></listitem>
33957-      </varlistentry>
33958-
33959-      <varlistentry id="arenas.bin.i.nregs">
33960-        <term>
33961-          <mallctl>arenas.bin.&lt;i&gt;.nregs</mallctl>
33962-          (<type>uint32_t</type>)
33963-          <literal>r-</literal>
33964-        </term>
33965-        <listitem><para>Number of regions per slab.</para></listitem>
33966-      </varlistentry>
33967-
33968-      <varlistentry id="arenas.bin.i.slab_size">
33969-        <term>
33970-          <mallctl>arenas.bin.&lt;i&gt;.slab_size</mallctl>
33971-          (<type>size_t</type>)
33972-          <literal>r-</literal>
33973-        </term>
33974-        <listitem><para>Number of bytes per slab.</para></listitem>
33975-      </varlistentry>
33976-
33977-      <varlistentry id="arenas.nlextents">
33978-        <term>
33979-          <mallctl>arenas.nlextents</mallctl>
33980-          (<type>unsigned</type>)
33981-          <literal>r-</literal>
33982-        </term>
33983-        <listitem><para>Total number of large size classes.</para></listitem>
33984-      </varlistentry>
33985-
33986-      <varlistentry id="arenas.lextent.i.size">
33987-        <term>
33988-          <mallctl>arenas.lextent.&lt;i&gt;.size</mallctl>
33989-          (<type>size_t</type>)
33990-          <literal>r-</literal>
33991-        </term>
33992-        <listitem><para>Maximum size supported by this large size
33993-        class.</para></listitem>
33994-      </varlistentry>
33995-
33996-      <varlistentry id="arenas.create">
33997-        <term>
33998-          <mallctl>arenas.create</mallctl>
33999-          (<type>unsigned</type>, <type>extent_hooks_t *</type>)
34000-          <literal>rw</literal>
34001-        </term>
34002-        <listitem><para>Explicitly create a new arena outside the range of
34003-        automatically managed arenas, with optionally specified extent hooks,
34004-        and return the new arena index.</para>
34005-
34006-        <para>If the amount of space supplied for storing the arena index does
34007-        not equal <code language="C">sizeof(<type>unsigned</type>)</code>, no
34008-        arena will be created, no data will be written to the space pointed by
34009-        <parameter>oldp</parameter>, and <parameter>*oldlenp</parameter> will
34010-        be set to 0.
34011-        </para></listitem>
34012-      </varlistentry>
34013-
34014-      <varlistentry id="arenas.lookup">
34015-        <term>
34016-          <mallctl>arenas.lookup</mallctl>
34017-          (<type>unsigned</type>, <type>void*</type>)
34018-          <literal>rw</literal>
34019-        </term>
34020-        <listitem><para>Index of the arena to which an allocation belongs to.</para></listitem>
34021-      </varlistentry>
34022-
34023-      <varlistentry id="prof.thread_active_init">
34024-        <term>
34025-          <mallctl>prof.thread_active_init</mallctl>
34026-          (<type>bool</type>)
34027-          <literal>rw</literal>
34028-          [<option>--enable-prof</option>]
34029-        </term>
34030-        <listitem><para>Control the initial setting for <link
34031-        linkend="thread.prof.active"><mallctl>thread.prof.active</mallctl></link>
34032-        in newly created threads.  See the <link
34033-        linkend="opt.prof_thread_active_init"><mallctl>opt.prof_thread_active_init</mallctl></link>
34034-        option for additional information.</para></listitem>
34035-      </varlistentry>
34036-
34037-      <varlistentry id="prof.active">
34038-        <term>
34039-          <mallctl>prof.active</mallctl>
34040-          (<type>bool</type>)
34041-          <literal>rw</literal>
34042-          [<option>--enable-prof</option>]
34043-        </term>
34044-        <listitem><para>Control whether sampling is currently active.  See the
34045-        <link
34046-        linkend="opt.prof_active"><mallctl>opt.prof_active</mallctl></link>
34047-        option for additional information, as well as the interrelated <link
34048-        linkend="thread.prof.active"><mallctl>thread.prof.active</mallctl></link>
34049-        mallctl.</para></listitem>
34050-      </varlistentry>
34051-
34052-      <varlistentry id="prof.dump">
34053-        <term>
34054-          <mallctl>prof.dump</mallctl>
34055-          (<type>const char *</type>)
34056-          <literal>-w</literal>
34057-          [<option>--enable-prof</option>]
34058-        </term>
34059-        <listitem><para>Dump a memory profile to the specified file, or if NULL
34060-        is specified, to a file according to the pattern
34061-        <filename>&lt;prefix&gt;.&lt;pid&gt;.&lt;seq&gt;.m&lt;mseq&gt;.heap</filename>,
34062-        where <literal>&lt;prefix&gt;</literal> is controlled by the
34063-        <link linkend="opt.prof_prefix"><mallctl>opt.prof_prefix</mallctl></link>
34064-        and <link linkend="prof.prefix"><mallctl>prof.prefix</mallctl></link>
34065-        options.</para></listitem>
34066-      </varlistentry>
34067-
34068-      <varlistentry id="prof.prefix">
34069-        <term>
34070-          <mallctl>prof.prefix</mallctl>
34071-          (<type>const char *</type>)
34072-          <literal>-w</literal>
34073-          [<option>--enable-prof</option>]
34074-        </term>
34075-        <listitem><para>Set the filename prefix for profile dumps. See
34076-        <link
34077-        linkend="opt.prof_prefix"><mallctl>opt.prof_prefix</mallctl></link>
34078-        for the default setting.  This can be useful to differentiate profile
34079-        dumps such as from forked processes.
34080-        </para></listitem>
34081-      </varlistentry>
34082-
34083-      <varlistentry id="prof.gdump">
34084-        <term>
34085-          <mallctl>prof.gdump</mallctl>
34086-          (<type>bool</type>)
34087-          <literal>rw</literal>
34088-          [<option>--enable-prof</option>]
34089-        </term>
34090-        <listitem><para>When enabled, trigger a memory profile dump every time
34091-        the total virtual memory exceeds the previous maximum.  Profiles are
34092-        dumped to files named according to the pattern
34093-        <filename>&lt;prefix&gt;.&lt;pid&gt;.&lt;seq&gt;.u&lt;useq&gt;.heap</filename>,
34094-        where <literal>&lt;prefix&gt;</literal> is controlled by the <link
34095-        linkend="opt.prof_prefix"><mallctl>opt.prof_prefix</mallctl></link> and
34096-        <link linkend="prof.prefix"><mallctl>prof.prefix</mallctl></link>
34097-        options.</para></listitem>
34098-      </varlistentry>
34099-
34100-      <varlistentry id="prof.reset">
34101-        <term>
34102-          <mallctl>prof.reset</mallctl>
34103-          (<type>size_t</type>)
34104-          <literal>-w</literal>
34105-          [<option>--enable-prof</option>]
34106-        </term>
34107-        <listitem><para>Reset all memory profile statistics, and optionally
34108-        update the sample rate (see <link
34109-        linkend="opt.lg_prof_sample"><mallctl>opt.lg_prof_sample</mallctl></link>
34110-        and <link
34111-        linkend="prof.lg_sample"><mallctl>prof.lg_sample</mallctl></link>).
34112-        </para></listitem>
34113-      </varlistentry>
34114-
34115-      <varlistentry id="prof.lg_sample">
34116-        <term>
34117-          <mallctl>prof.lg_sample</mallctl>
34118-          (<type>size_t</type>)
34119-          <literal>r-</literal>
34120-          [<option>--enable-prof</option>]
34121-        </term>
34122-        <listitem><para>Get the current sample rate (see <link
34123-        linkend="opt.lg_prof_sample"><mallctl>opt.lg_prof_sample</mallctl></link>).
34124-        </para></listitem>
34125-      </varlistentry>
34126-
34127-      <varlistentry id="prof.interval">
34128-        <term>
34129-          <mallctl>prof.interval</mallctl>
34130-          (<type>uint64_t</type>)
34131-          <literal>r-</literal>
34132-          [<option>--enable-prof</option>]
34133-        </term>
34134-        <listitem><para>Average number of bytes allocated between
34135-        interval-based profile dumps.  See the
34136-        <link
34137-        linkend="opt.lg_prof_interval"><mallctl>opt.lg_prof_interval</mallctl></link>
34138-        option for additional information.</para></listitem>
34139-      </varlistentry>
34140-
34141-      <varlistentry id="stats.allocated">
34142-        <term>
34143-          <mallctl>stats.allocated</mallctl>
34144-          (<type>size_t</type>)
34145-          <literal>r-</literal>
34146-          [<option>--enable-stats</option>]
34147-        </term>
34148-        <listitem><para>Total number of bytes allocated by the
34149-        application.</para></listitem>
34150-      </varlistentry>
34151-
34152-      <varlistentry id="stats.active">
34153-        <term>
34154-          <mallctl>stats.active</mallctl>
34155-          (<type>size_t</type>)
34156-          <literal>r-</literal>
34157-          [<option>--enable-stats</option>]
34158-        </term>
34159-        <listitem><para>Total number of bytes in active pages allocated by the
34160-        application.  This is a multiple of the page size, and greater than or
34161-        equal to <link
34162-        linkend="stats.allocated"><mallctl>stats.allocated</mallctl></link>.
34163-        This does not include <link linkend="stats.arenas.i.pdirty">
34164-        <mallctl>stats.arenas.&lt;i&gt;.pdirty</mallctl></link>,
34165-        <link linkend="stats.arenas.i.pmuzzy">
34166-        <mallctl>stats.arenas.&lt;i&gt;.pmuzzy</mallctl></link>, nor pages
34167-        entirely devoted to allocator metadata.</para></listitem>
34168-      </varlistentry>
34169-
34170-      <varlistentry id="stats.metadata">
34171-        <term>
34172-          <mallctl>stats.metadata</mallctl>
34173-          (<type>size_t</type>)
34174-          <literal>r-</literal>
34175-          [<option>--enable-stats</option>]
34176-        </term>
34177-        <listitem><para>Total number of bytes dedicated to metadata, which
34178-        comprise base allocations used for bootstrap-sensitive allocator
34179-        metadata structures (see <link
34180-        linkend="stats.arenas.i.base"><mallctl>stats.arenas.&lt;i&gt;.base</mallctl></link>)
34181-        and internal allocations (see <link
34182-        linkend="stats.arenas.i.internal"><mallctl>stats.arenas.&lt;i&gt;.internal</mallctl></link>).
34183-        Transparent huge page (enabled with <link
34184-        linkend="opt.metadata_thp">opt.metadata_thp</link>) usage is not
34185-        considered.</para></listitem>
34186-      </varlistentry>
34187-
34188-      <varlistentry id="stats.metadata_thp">
34189-        <term>
34190-          <mallctl>stats.metadata_thp</mallctl>
34191-          (<type>size_t</type>)
34192-          <literal>r-</literal>
34193-          [<option>--enable-stats</option>]
34194-        </term>
34195-        <listitem><para>Number of transparent huge pages (THP) used for
34196-        metadata.  See <link
34197-        linkend="stats.metadata"><mallctl>stats.metadata</mallctl></link> and
34198-        <link linkend="opt.metadata_thp">opt.metadata_thp</link>) for
34199-        details.</para></listitem>
34200-      </varlistentry>
34201-
34202-      <varlistentry id="stats.resident">
34203-        <term>
34204-          <mallctl>stats.resident</mallctl>
34205-          (<type>size_t</type>)
34206-          <literal>r-</literal>
34207-          [<option>--enable-stats</option>]
34208-        </term>
34209-        <listitem><para>Maximum number of bytes in physically resident data
34210-        pages mapped by the allocator, comprising all pages dedicated to
34211-        allocator metadata, pages backing active allocations, and unused dirty
34212-        pages.  This is a maximum rather than precise because pages may not
34213-        actually be physically resident if they correspond to demand-zeroed
34214-        virtual memory that has not yet been touched.  This is a multiple of the
34215-        page size, and is larger than <link
34216-        linkend="stats.active"><mallctl>stats.active</mallctl></link>.</para></listitem>
34217-      </varlistentry>
34218-
34219-      <varlistentry id="stats.mapped">
34220-        <term>
34221-          <mallctl>stats.mapped</mallctl>
34222-          (<type>size_t</type>)
34223-          <literal>r-</literal>
34224-          [<option>--enable-stats</option>]
34225-        </term>
34226-        <listitem><para>Total number of bytes in active extents mapped by the
34227-        allocator.  This is larger than <link
34228-        linkend="stats.active"><mallctl>stats.active</mallctl></link>.  This
34229-        does not include inactive extents, even those that contain unused dirty
34230-        pages, which means that there is no strict ordering between this and
34231-        <link
34232-        linkend="stats.resident"><mallctl>stats.resident</mallctl></link>.</para></listitem>
34233-      </varlistentry>
34234-
34235-      <varlistentry id="stats.retained">
34236-        <term>
34237-          <mallctl>stats.retained</mallctl>
34238-          (<type>size_t</type>)
34239-          <literal>r-</literal>
34240-          [<option>--enable-stats</option>]
34241-        </term>
34242-        <listitem><para>Total number of bytes in virtual memory mappings that
34243-        were retained rather than being returned to the operating system via
34244-        e.g. <citerefentry><refentrytitle>munmap</refentrytitle>
34245-        <manvolnum>2</manvolnum></citerefentry> or similar.  Retained virtual
34246-        memory is typically untouched, decommitted, or purged, so it has no
34247-        strongly associated physical memory (see <link
34248-        linkend="arena.i.extent_hooks">extent hooks</link> for details).
34249-        Retained memory is excluded from mapped memory statistics, e.g. <link
34250-        linkend="stats.mapped"><mallctl>stats.mapped</mallctl></link>.
34251-        </para></listitem>
34252-      </varlistentry>
34253-
34254-      <varlistentry id="stats.zero_reallocs">
34255-        <term>
34256-          <mallctl>stats.zero_reallocs</mallctl>
34257-          (<type>size_t</type>)
34258-          <literal>r-</literal>
34259-          [<option>--enable-stats</option>]
34260-        </term>
34261-        <listitem><para>Number of times that the <function>realloc()</function>
34262-        was called with a non-<constant>NULL</constant> pointer argument and a
34263-        <constant>0</constant> size argument.  This is a fundamentally unsafe
34264-        pattern in portable programs; see <link linkend="opt.zero_realloc">
34265-        <mallctl>opt.zero_realloc</mallctl></link> for details.
34266-        </para></listitem>
34267-      </varlistentry>
34268-
34269-      <varlistentry id="stats.background_thread.num_threads">
34270-        <term>
34271-          <mallctl>stats.background_thread.num_threads</mallctl>
34272-          (<type>size_t</type>)
34273-          <literal>r-</literal>
34274-          [<option>--enable-stats</option>]
34275-        </term>
34276-        <listitem><para> Number of <link linkend="background_thread">background
34277-        threads</link> running currently.</para></listitem>
34278-      </varlistentry>
34279-
34280-      <varlistentry id="stats.background_thread.num_runs">
34281-        <term>
34282-          <mallctl>stats.background_thread.num_runs</mallctl>
34283-          (<type>uint64_t</type>)
34284-          <literal>r-</literal>
34285-          [<option>--enable-stats</option>]
34286-        </term>
34287-        <listitem><para> Total number of runs from all <link
34288-        linkend="background_thread">background threads</link>.</para></listitem>
34289-      </varlistentry>
34290-
34291-      <varlistentry id="stats.background_thread.run_interval">
34292-        <term>
34293-          <mallctl>stats.background_thread.run_interval</mallctl>
34294-          (<type>uint64_t</type>)
34295-          <literal>r-</literal>
34296-          [<option>--enable-stats</option>]
34297-        </term>
34298-        <listitem><para> Average run interval in nanoseconds of <link
34299-        linkend="background_thread">background threads</link>.</para></listitem>
34300-      </varlistentry>
34301-
34302-      <varlistentry id="stats.mutexes.ctl">
34303-        <term>
34304-          <mallctl>stats.mutexes.ctl.{counter};</mallctl>
34305-          (<type>counter specific type</type>)
34306-          <literal>r-</literal>
34307-          [<option>--enable-stats</option>]
34308-        </term>
34309-        <listitem><para>Statistics on <varname>ctl</varname> mutex (global
34310-        scope; mallctl related).  <mallctl>{counter}</mallctl> is one of the
34311-        counters below:</para>
34312-        <varlistentry id="mutex_counters">
34313-          <listitem><para><varname>num_ops</varname> (<type>uint64_t</type>):
34314-          Total number of lock acquisition operations on this mutex.</para>
34315-
34316-	  <para><varname>num_spin_acq</varname> (<type>uint64_t</type>): Number
34317-	  of times the mutex was spin-acquired.  When the mutex is currently
34318-	  locked and cannot be acquired immediately, a short period of
34319-	  spin-retry within jemalloc will be performed.  Acquired through spin
34320-	  generally means the contention was lightweight and not causing context
34321-	  switches.</para>
34322-
34323-	  <para><varname>num_wait</varname> (<type>uint64_t</type>): Number of
34324-	  times the mutex was wait-acquired, which means the mutex contention
34325-	  was not solved by spin-retry, and blocking operation was likely
34326-	  involved in order to acquire the mutex.  This event generally implies
34327-	  higher cost / longer delay, and should be investigated if it happens
34328-	  often.</para>
34329-
34330-	  <para><varname>max_wait_time</varname> (<type>uint64_t</type>):
34331-	  Maximum length of time in nanoseconds spent on a single wait-acquired
34332-	  lock operation.  Note that to avoid profiling overhead on the common
34333-	  path, this does not consider spin-acquired cases.</para>
34334-
34335-	  <para><varname>total_wait_time</varname> (<type>uint64_t</type>):
34336-	  Cumulative time in nanoseconds spent on wait-acquired lock operations.
34337-	  Similarly, spin-acquired cases are not considered.</para>
34338-
34339-	  <para><varname>max_num_thds</varname> (<type>uint32_t</type>): Maximum
34340-	  number of threads waiting on this mutex simultaneously.  Similarly,
34341-	  spin-acquired cases are not considered.</para>
34342-
34343-	  <para><varname>num_owner_switch</varname> (<type>uint64_t</type>):
34344-	  Number of times the current mutex owner is different from the previous
34345-	  one.  This event does not generally imply an issue; rather it is an
34346-	  indicator of how often the protected data are accessed by different
34347-	  threads.
34348-	  </para>
34349-	  </listitem>
34350-	</varlistentry>
34351-	</listitem>
34352-      </varlistentry>
34353-
34354-      <varlistentry id="stats.mutexes.background_thread">
34355-        <term>
34356-          <mallctl>stats.mutexes.background_thread.{counter}</mallctl>
34357-	  (<type>counter specific type</type>) <literal>r-</literal>
34358-          [<option>--enable-stats</option>]
34359-        </term>
34360-        <listitem><para>Statistics on <varname>background_thread</varname> mutex
34361-        (global scope; <link
34362-        linkend="background_thread"><mallctl>background_thread</mallctl></link>
34363-        related).  <mallctl>{counter}</mallctl> is one of the counters in <link
34364-        linkend="mutex_counters">mutex profiling
34365-        counters</link>.</para></listitem>
34366-      </varlistentry>
34367-
34368-      <varlistentry id="stats.mutexes.prof">
34369-        <term>
34370-          <mallctl>stats.mutexes.prof.{counter}</mallctl>
34371-	  (<type>counter specific type</type>) <literal>r-</literal>
34372-          [<option>--enable-stats</option>]
34373-        </term>
34374-        <listitem><para>Statistics on <varname>prof</varname> mutex (global
34375-        scope; profiling related).  <mallctl>{counter}</mallctl> is one of the
34376-        counters in <link linkend="mutex_counters">mutex profiling
34377-        counters</link>.</para></listitem>
34378-      </varlistentry>
34379-
34380-      <varlistentry id="stats.mutexes.prof_thds_data">
34381-        <term>
34382-          <mallctl>stats.mutexes.prof_thds_data.{counter}</mallctl>
34383-	  (<type>counter specific type</type>) <literal>r-</literal>
34384-          [<option>--enable-stats</option>]
34385-        </term>
34386-	<listitem><para>Statistics on <varname>prof</varname> threads data mutex
34387-	(global scope; profiling related).  <mallctl>{counter}</mallctl> is one
34388-	of the counters in <link linkend="mutex_counters">mutex profiling
34389-        counters</link>.</para></listitem>
34390-      </varlistentry>
34391-
34392-      <varlistentry id="stats.mutexes.prof_dump">
34393-        <term>
34394-          <mallctl>stats.mutexes.prof_dump.{counter}</mallctl>
34395-	  (<type>counter specific type</type>) <literal>r-</literal>
34396-          [<option>--enable-stats</option>]
34397-        </term>
34398-	<listitem><para>Statistics on <varname>prof</varname> dumping mutex
34399-	(global scope; profiling related).  <mallctl>{counter}</mallctl> is one
34400-	of the counters in <link linkend="mutex_counters">mutex profiling
34401-        counters</link>.</para></listitem>
34402-      </varlistentry>
34403-
34404-      <varlistentry id="stats.mutexes.reset">
34405-        <term>
34406-          <mallctl>stats.mutexes.reset</mallctl>
34407-	  (<type>void</type>) <literal>--</literal>
34408-          [<option>--enable-stats</option>]
34409-        </term>
34410-        <listitem><para>Reset all mutex profile statistics, including global
34411-        mutexes, arena mutexes and bin mutexes.</para></listitem>
34412-      </varlistentry>
34413-
34414-      <varlistentry id="stats.arenas.i.dss">
34415-        <term>
34416-          <mallctl>stats.arenas.&lt;i&gt;.dss</mallctl>
34417-          (<type>const char *</type>)
34418-          <literal>r-</literal>
34419-        </term>
34420-        <listitem><para>dss (<citerefentry><refentrytitle>sbrk</refentrytitle>
34421-        <manvolnum>2</manvolnum></citerefentry>) allocation precedence as
34422-        related to <citerefentry><refentrytitle>mmap</refentrytitle>
34423-        <manvolnum>2</manvolnum></citerefentry> allocation.  See <link
34424-        linkend="opt.dss"><mallctl>opt.dss</mallctl></link> for details.
34425-        </para></listitem>
34426-      </varlistentry>
34427-
34428-      <varlistentry id="stats.arenas.i.dirty_decay_ms">
34429-        <term>
34430-          <mallctl>stats.arenas.&lt;i&gt;.dirty_decay_ms</mallctl>
34431-          (<type>ssize_t</type>)
34432-          <literal>r-</literal>
34433-        </term>
34434-        <listitem><para>Approximate time in milliseconds from the creation of a
34435-        set of unused dirty pages until an equivalent set of unused dirty pages
34436-        is purged and/or reused.  See <link
34437-        linkend="opt.dirty_decay_ms"><mallctl>opt.dirty_decay_ms</mallctl></link>
34438-        for details.</para></listitem>
34439-      </varlistentry>
34440-
34441-      <varlistentry id="stats.arenas.i.muzzy_decay_ms">
34442-        <term>
34443-          <mallctl>stats.arenas.&lt;i&gt;.muzzy_decay_ms</mallctl>
34444-          (<type>ssize_t</type>)
34445-          <literal>r-</literal>
34446-        </term>
34447-        <listitem><para>Approximate time in milliseconds from the creation of a
34448-        set of unused muzzy pages until an equivalent set of unused muzzy pages
34449-        is purged and/or reused.  See <link
34450-        linkend="opt.muzzy_decay_ms"><mallctl>opt.muzzy_decay_ms</mallctl></link>
34451-        for details.</para></listitem>
34452-      </varlistentry>
34453-
34454-      <varlistentry id="stats.arenas.i.nthreads">
34455-        <term>
34456-          <mallctl>stats.arenas.&lt;i&gt;.nthreads</mallctl>
34457-          (<type>unsigned</type>)
34458-          <literal>r-</literal>
34459-        </term>
34460-        <listitem><para>Number of threads currently assigned to
34461-        arena.</para></listitem>
34462-      </varlistentry>
34463-
34464-      <varlistentry id="stats.arenas.i.uptime">
34465-        <term>
34466-          <mallctl>stats.arenas.&lt;i&gt;.uptime</mallctl>
34467-          (<type>uint64_t</type>)
34468-          <literal>r-</literal>
34469-        </term>
34470-        <listitem><para>Time elapsed (in nanoseconds) since the arena was
34471-        created.  If &lt;i&gt; equals <constant>0</constant> or
34472-        <constant>MALLCTL_ARENAS_ALL</constant>, this is the uptime since malloc
34473-        initialization.</para></listitem>
34474-      </varlistentry>
34475-
34476-      <varlistentry id="stats.arenas.i.pactive">
34477-        <term>
34478-          <mallctl>stats.arenas.&lt;i&gt;.pactive</mallctl>
34479-          (<type>size_t</type>)
34480-          <literal>r-</literal>
34481-        </term>
34482-        <listitem><para>Number of pages in active extents.</para></listitem>
34483-      </varlistentry>
34484-
34485-      <varlistentry id="stats.arenas.i.pdirty">
34486-        <term>
34487-          <mallctl>stats.arenas.&lt;i&gt;.pdirty</mallctl>
34488-          (<type>size_t</type>)
34489-          <literal>r-</literal>
34490-        </term>
34491-        <listitem><para>Number of pages within unused extents that are
34492-        potentially dirty, and for which <function>madvise()</function> or
34493-        similar has not been called.  See <link
34494-        linkend="opt.dirty_decay_ms"><mallctl>opt.dirty_decay_ms</mallctl></link>
34495-        for a description of dirty pages.</para></listitem>
34496-      </varlistentry>
34497-
34498-      <varlistentry id="stats.arenas.i.pmuzzy">
34499-        <term>
34500-          <mallctl>stats.arenas.&lt;i&gt;.pmuzzy</mallctl>
34501-          (<type>size_t</type>)
34502-          <literal>r-</literal>
34503-        </term>
34504-        <listitem><para>Number of pages within unused extents that are muzzy.
34505-        See <link
34506-        linkend="opt.muzzy_decay_ms"><mallctl>opt.muzzy_decay_ms</mallctl></link>
34507-        for a description of muzzy pages.</para></listitem>
34508-      </varlistentry>
34509-
34510-      <varlistentry id="stats.arenas.i.mapped">
34511-        <term>
34512-          <mallctl>stats.arenas.&lt;i&gt;.mapped</mallctl>
34513-          (<type>size_t</type>)
34514-          <literal>r-</literal>
34515-          [<option>--enable-stats</option>]
34516-        </term>
34517-        <listitem><para>Number of mapped bytes.</para></listitem>
34518-      </varlistentry>
34519-
34520-      <varlistentry id="stats.arenas.i.retained">
34521-        <term>
34522-          <mallctl>stats.arenas.&lt;i&gt;.retained</mallctl>
34523-          (<type>size_t</type>)
34524-          <literal>r-</literal>
34525-          [<option>--enable-stats</option>]
34526-        </term>
34527-        <listitem><para>Number of retained bytes.  See <link
34528-        linkend="stats.retained"><mallctl>stats.retained</mallctl></link> for
34529-        details.</para></listitem>
34530-      </varlistentry>
34531-
34532-      <varlistentry id="stats.arenas.i.extent_avail">
34533-        <term>
34534-          <mallctl>stats.arenas.&lt;i&gt;.extent_avail</mallctl>
34535-          (<type>size_t</type>)
34536-          <literal>r-</literal>
34537-          [<option>--enable-stats</option>]
34538-        </term>
34539-        <listitem><para>Number of allocated (but unused) extent structs in this
34540-	arena.</para></listitem>
34541-      </varlistentry>
34542-
34543-      <varlistentry id="stats.arenas.i.base">
34544-        <term>
34545-          <mallctl>stats.arenas.&lt;i&gt;.base</mallctl>
34546-          (<type>size_t</type>)
34547-          <literal>r-</literal>
34548-          [<option>--enable-stats</option>]
34549-        </term>
34550-        <listitem><para>
34551-        Number of bytes dedicated to bootstrap-sensitive allocator metadata
34552-        structures.</para></listitem>
34553-      </varlistentry>
34554-
34555-      <varlistentry id="stats.arenas.i.internal">
34556-        <term>
34557-          <mallctl>stats.arenas.&lt;i&gt;.internal</mallctl>
34558-          (<type>size_t</type>)
34559-          <literal>r-</literal>
34560-          [<option>--enable-stats</option>]
34561-        </term>
34562-        <listitem><para>Number of bytes dedicated to internal allocations.
34563-        Internal allocations differ from application-originated allocations in
34564-        that they are for internal use, and that they are omitted from heap
34565-        profiles.</para></listitem>
34566-      </varlistentry>
34567-
34568-      <varlistentry id="stats.arenas.i.metadata_thp">
34569-        <term>
34570-          <mallctl>stats.arenas.&lt;i&gt;.metadata_thp</mallctl>
34571-          (<type>size_t</type>)
34572-          <literal>r-</literal>
34573-          [<option>--enable-stats</option>]
34574-        </term>
34575-        <listitem><para>Number of transparent huge pages (THP) used for
34576-        metadata.  See <link linkend="opt.metadata_thp">opt.metadata_thp</link>
34577-        for details.</para></listitem>
34578-      </varlistentry>
34579-
34580-      <varlistentry id="stats.arenas.i.resident">
34581-        <term>
34582-          <mallctl>stats.arenas.&lt;i&gt;.resident</mallctl>
34583-          (<type>size_t</type>)
34584-          <literal>r-</literal>
34585-          [<option>--enable-stats</option>]
34586-        </term>
34587-        <listitem><para>Maximum number of bytes in physically resident data
34588-        pages mapped by the arena, comprising all pages dedicated to allocator
34589-        metadata, pages backing active allocations, and unused dirty pages.
34590-        This is a maximum rather than precise because pages may not actually be
34591-        physically resident if they correspond to demand-zeroed virtual memory
34592-        that has not yet been touched.  This is a multiple of the page
34593-        size.</para></listitem>
34594-      </varlistentry>
34595-
34596-      <varlistentry id="stats.arenas.i.dirty_npurge">
34597-        <term>
34598-          <mallctl>stats.arenas.&lt;i&gt;.dirty_npurge</mallctl>
34599-          (<type>uint64_t</type>)
34600-          <literal>r-</literal>
34601-          [<option>--enable-stats</option>]
34602-        </term>
34603-        <listitem><para>Number of dirty page purge sweeps performed.
34604-        </para></listitem>
34605-      </varlistentry>
34606-
34607-      <varlistentry id="stats.arenas.i.dirty_nmadvise">
34608-        <term>
34609-          <mallctl>stats.arenas.&lt;i&gt;.dirty_nmadvise</mallctl>
34610-          (<type>uint64_t</type>)
34611-          <literal>r-</literal>
34612-          [<option>--enable-stats</option>]
34613-        </term>
34614-        <listitem><para>Number of <function>madvise()</function> or similar
34615-        calls made to purge dirty pages.</para></listitem>
34616-      </varlistentry>
34617-
34618-      <varlistentry id="stats.arenas.i.dirty_purged">
34619-        <term>
34620-          <mallctl>stats.arenas.&lt;i&gt;.dirty_purged</mallctl>
34621-          (<type>uint64_t</type>)
34622-          <literal>r-</literal>
34623-          [<option>--enable-stats</option>]
34624-        </term>
34625-        <listitem><para>Number of dirty pages purged.</para></listitem>
34626-      </varlistentry>
34627-
34628-      <varlistentry id="stats.arenas.i.muzzy_npurge">
34629-        <term>
34630-          <mallctl>stats.arenas.&lt;i&gt;.muzzy_npurge</mallctl>
34631-          (<type>uint64_t</type>)
34632-          <literal>r-</literal>
34633-          [<option>--enable-stats</option>]
34634-        </term>
34635-        <listitem><para>Number of muzzy page purge sweeps performed.
34636-        </para></listitem>
34637-      </varlistentry>
34638-
34639-      <varlistentry id="stats.arenas.i.muzzy_nmadvise">
34640-        <term>
34641-          <mallctl>stats.arenas.&lt;i&gt;.muzzy_nmadvise</mallctl>
34642-          (<type>uint64_t</type>)
34643-          <literal>r-</literal>
34644-          [<option>--enable-stats</option>]
34645-        </term>
34646-        <listitem><para>Number of <function>madvise()</function> or similar
34647-        calls made to purge muzzy pages.</para></listitem>
34648-      </varlistentry>
34649-
34650-      <varlistentry id="stats.arenas.i.muzzy_purged">
34651-        <term>
34652-          <mallctl>stats.arenas.&lt;i&gt;.muzzy_purged</mallctl>
34653-          (<type>uint64_t</type>)
34654-          <literal>r-</literal>
34655-          [<option>--enable-stats</option>]
34656-        </term>
34657-        <listitem><para>Number of muzzy pages purged.</para></listitem>
34658-      </varlistentry>
34659-
34660-      <varlistentry id="stats.arenas.i.small.allocated">
34661-        <term>
34662-          <mallctl>stats.arenas.&lt;i&gt;.small.allocated</mallctl>
34663-          (<type>size_t</type>)
34664-          <literal>r-</literal>
34665-          [<option>--enable-stats</option>]
34666-        </term>
34667-        <listitem><para>Number of bytes currently allocated by small objects.
34668-        </para></listitem>
34669-      </varlistentry>
34670-
34671-      <varlistentry id="stats.arenas.i.small.nmalloc">
34672-        <term>
34673-          <mallctl>stats.arenas.&lt;i&gt;.small.nmalloc</mallctl>
34674-          (<type>uint64_t</type>)
34675-          <literal>r-</literal>
34676-          [<option>--enable-stats</option>]
34677-        </term>
34678-        <listitem><para>Cumulative number of times a small allocation was
34679-        requested from the arena's bins, whether to fill the relevant tcache if
34680-        <link linkend="opt.tcache"><mallctl>opt.tcache</mallctl></link> is
34681-        enabled, or to directly satisfy an allocation request
34682-        otherwise.</para></listitem>
34683-      </varlistentry>
34684-
34685-      <varlistentry id="stats.arenas.i.small.ndalloc">
34686-        <term>
34687-          <mallctl>stats.arenas.&lt;i&gt;.small.ndalloc</mallctl>
34688-          (<type>uint64_t</type>)
34689-          <literal>r-</literal>
34690-          [<option>--enable-stats</option>]
34691-        </term>
34692-        <listitem><para>Cumulative number of times a small allocation was
34693-        returned to the arena's bins, whether to flush the relevant tcache if
34694-        <link linkend="opt.tcache"><mallctl>opt.tcache</mallctl></link> is
34695-        enabled, or to directly deallocate an allocation
34696-        otherwise.</para></listitem>
34697-      </varlistentry>
34698-
34699-      <varlistentry id="stats.arenas.i.small.nrequests">
34700-        <term>
34701-          <mallctl>stats.arenas.&lt;i&gt;.small.nrequests</mallctl>
34702-          (<type>uint64_t</type>)
34703-          <literal>r-</literal>
34704-          [<option>--enable-stats</option>]
34705-        </term>
34706-        <listitem><para>Cumulative number of allocation requests satisfied by
34707-        all bin size classes.</para></listitem>
34708-      </varlistentry>
34709-
34710-      <varlistentry id="stats.arenas.i.small.nfills">
34711-        <term>
34712-          <mallctl>stats.arenas.&lt;i&gt;.small.nfills</mallctl>
34713-          (<type>uint64_t</type>)
34714-          <literal>r-</literal>
34715-          [<option>--enable-stats</option>]
34716-        </term>
34717-        <listitem><para>Cumulative number of tcache fills by all small size
34718-	classes.</para></listitem>
34719-      </varlistentry>
34720-
34721-      <varlistentry id="stats.arenas.i.small.nflushes">
34722-        <term>
34723-          <mallctl>stats.arenas.&lt;i&gt;.small.nflushes</mallctl>
34724-          (<type>uint64_t</type>)
34725-          <literal>r-</literal>
34726-          [<option>--enable-stats</option>]
34727-        </term>
34728-        <listitem><para>Cumulative number of tcache flushes by all small size
34729-        classes.</para></listitem>
34730-      </varlistentry>
34731-
34732-      <varlistentry id="stats.arenas.i.large.allocated">
34733-        <term>
34734-          <mallctl>stats.arenas.&lt;i&gt;.large.allocated</mallctl>
34735-          (<type>size_t</type>)
34736-          <literal>r-</literal>
34737-          [<option>--enable-stats</option>]
34738-        </term>
34739-        <listitem><para>Number of bytes currently allocated by large objects.
34740-        </para></listitem>
34741-      </varlistentry>
34742-
34743-      <varlistentry id="stats.arenas.i.large.nmalloc">
34744-        <term>
34745-          <mallctl>stats.arenas.&lt;i&gt;.large.nmalloc</mallctl>
34746-          (<type>uint64_t</type>)
34747-          <literal>r-</literal>
34748-          [<option>--enable-stats</option>]
34749-        </term>
34750-        <listitem><para>Cumulative number of times a large extent was allocated
34751-        from the arena, whether to fill the relevant tcache if <link
34752-        linkend="opt.tcache"><mallctl>opt.tcache</mallctl></link> is enabled and
34753-        the size class is within the range being cached, or to directly satisfy
34754-        an allocation request otherwise.</para></listitem>
34755-      </varlistentry>
34756-
34757-      <varlistentry id="stats.arenas.i.large.ndalloc">
34758-        <term>
34759-          <mallctl>stats.arenas.&lt;i&gt;.large.ndalloc</mallctl>
34760-          (<type>uint64_t</type>)
34761-          <literal>r-</literal>
34762-          [<option>--enable-stats</option>]
34763-        </term>
34764-        <listitem><para>Cumulative number of times a large extent was returned
34765-        to the arena, whether to flush the relevant tcache if <link
34766-        linkend="opt.tcache"><mallctl>opt.tcache</mallctl></link> is enabled and
34767-        the size class is within the range being cached, or to directly
34768-        deallocate an allocation otherwise.</para></listitem>
34769-      </varlistentry>
34770-
34771-      <varlistentry id="stats.arenas.i.large.nrequests">
34772-        <term>
34773-          <mallctl>stats.arenas.&lt;i&gt;.large.nrequests</mallctl>
34774-          (<type>uint64_t</type>)
34775-          <literal>r-</literal>
34776-          [<option>--enable-stats</option>]
34777-        </term>
34778-        <listitem><para>Cumulative number of allocation requests satisfied by
34779-        all large size classes.</para></listitem>
34780-      </varlistentry>
34781-
34782-      <varlistentry id="stats.arenas.i.large.nfills">
34783-        <term>
34784-          <mallctl>stats.arenas.&lt;i&gt;.large.nfills</mallctl>
34785-          (<type>uint64_t</type>)
34786-          <literal>r-</literal>
34787-          [<option>--enable-stats</option>]
34788-        </term>
34789-        <listitem><para>Cumulative number of tcache fills by all large size
34790-	classes.</para></listitem>
34791-      </varlistentry>
34792-
34793-      <varlistentry id="stats.arenas.i.large.nflushes">
34794-        <term>
34795-          <mallctl>stats.arenas.&lt;i&gt;.large.nflushes</mallctl>
34796-          (<type>uint64_t</type>)
34797-          <literal>r-</literal>
34798-          [<option>--enable-stats</option>]
34799-        </term>
34800-        <listitem><para>Cumulative number of tcache flushes by all large size
34801-        classes.</para></listitem>
34802-      </varlistentry>
34803-
34804-      <varlistentry id="stats.arenas.i.bins.j.nmalloc">
34805-        <term>
34806-          <mallctl>stats.arenas.&lt;i&gt;.bins.&lt;j&gt;.nmalloc</mallctl>
34807-          (<type>uint64_t</type>)
34808-          <literal>r-</literal>
34809-          [<option>--enable-stats</option>]
34810-        </term>
34811-        <listitem><para>Cumulative number of times a bin region of the
34812-        corresponding size class was allocated from the arena, whether to fill
34813-        the relevant tcache if <link
34814-        linkend="opt.tcache"><mallctl>opt.tcache</mallctl></link> is enabled, or
34815-        to directly satisfy an allocation request otherwise.</para></listitem>
34816-      </varlistentry>
34817-
34818-      <varlistentry id="stats.arenas.i.bins.j.ndalloc">
34819-        <term>
34820-          <mallctl>stats.arenas.&lt;i&gt;.bins.&lt;j&gt;.ndalloc</mallctl>
34821-          (<type>uint64_t</type>)
34822-          <literal>r-</literal>
34823-          [<option>--enable-stats</option>]
34824-        </term>
34825-        <listitem><para>Cumulative number of times a bin region of the
34826-        corresponding size class was returned to the arena, whether to flush the
34827-        relevant tcache if <link
34828-        linkend="opt.tcache"><mallctl>opt.tcache</mallctl></link> is enabled, or
34829-        to directly deallocate an allocation otherwise.</para></listitem>
34830-      </varlistentry>
34831-
34832-      <varlistentry id="stats.arenas.i.bins.j.nrequests">
34833-        <term>
34834-          <mallctl>stats.arenas.&lt;i&gt;.bins.&lt;j&gt;.nrequests</mallctl>
34835-          (<type>uint64_t</type>)
34836-          <literal>r-</literal>
34837-          [<option>--enable-stats</option>]
34838-        </term>
34839-        <listitem><para>Cumulative number of allocation requests satisfied by
34840-        bin regions of the corresponding size class.</para></listitem>
34841-      </varlistentry>
34842-
34843-      <varlistentry id="stats.arenas.i.bins.j.curregs">
34844-        <term>
34845-          <mallctl>stats.arenas.&lt;i&gt;.bins.&lt;j&gt;.curregs</mallctl>
34846-          (<type>size_t</type>)
34847-          <literal>r-</literal>
34848-          [<option>--enable-stats</option>]
34849-        </term>
34850-        <listitem><para>Current number of regions for this size
34851-        class.</para></listitem>
34852-      </varlistentry>
34853-
34854-      <varlistentry id="stats.arenas.i.bins.j.nfills">
34855-        <term>
34856-          <mallctl>stats.arenas.&lt;i&gt;.bins.&lt;j&gt;.nfills</mallctl>
34857-          (<type>uint64_t</type>)
34858-          <literal>r-</literal>
34859-        </term>
34860-        <listitem><para>Cumulative number of tcache fills.</para></listitem>
34861-      </varlistentry>
34862-
34863-      <varlistentry id="stats.arenas.i.bins.j.nflushes">
34864-        <term>
34865-          <mallctl>stats.arenas.&lt;i&gt;.bins.&lt;j&gt;.nflushes</mallctl>
34866-          (<type>uint64_t</type>)
34867-          <literal>r-</literal>
34868-        </term>
34869-        <listitem><para>Cumulative number of tcache flushes.</para></listitem>
34870-      </varlistentry>
34871-
34872-      <varlistentry id="stats.arenas.i.bins.j.nslabs">
34873-        <term>
34874-          <mallctl>stats.arenas.&lt;i&gt;.bins.&lt;j&gt;.nslabs</mallctl>
34875-          (<type>uint64_t</type>)
34876-          <literal>r-</literal>
34877-          [<option>--enable-stats</option>]
34878-        </term>
34879-        <listitem><para>Cumulative number of slabs created.</para></listitem>
34880-      </varlistentry>
34881-
34882-      <varlistentry id="stats.arenas.i.bins.j.nreslabs">
34883-        <term>
34884-          <mallctl>stats.arenas.&lt;i&gt;.bins.&lt;j&gt;.nreslabs</mallctl>
34885-          (<type>uint64_t</type>)
34886-          <literal>r-</literal>
34887-          [<option>--enable-stats</option>]
34888-        </term>
34889-        <listitem><para>Cumulative number of times the current slab from which
34890-        to allocate changed.</para></listitem>
34891-      </varlistentry>
34892-
34893-      <varlistentry id="stats.arenas.i.bins.j.curslabs">
34894-        <term>
34895-          <mallctl>stats.arenas.&lt;i&gt;.bins.&lt;j&gt;.curslabs</mallctl>
34896-          (<type>size_t</type>)
34897-          <literal>r-</literal>
34898-          [<option>--enable-stats</option>]
34899-        </term>
34900-        <listitem><para>Current number of slabs.</para></listitem>
34901-      </varlistentry>
34902-
34903-
34904-      <varlistentry id="stats.arenas.i.bins.j.nonfull_slabs">
34905-        <term>
34906-          <mallctl>stats.arenas.&lt;i&gt;.bins.&lt;j&gt;.nonfull_slabs</mallctl>
34907-          (<type>size_t</type>)
34908-          <literal>r-</literal>
34909-          [<option>--enable-stats</option>]
34910-        </term>
34911-        <listitem><para>Current number of nonfull slabs.</para></listitem>
34912-      </varlistentry>
34913-
34914-      <varlistentry id="stats.arenas.i.bins.mutex">
34915-        <term>
34916-          <mallctl>stats.arenas.&lt;i&gt;.bins.&lt;j&gt;.mutex.{counter}</mallctl>
34917-          (<type>counter specific type</type>) <literal>r-</literal>
34918-          [<option>--enable-stats</option>]
34919-        </term>
34920-        <listitem><para>Statistics on
34921-        <varname>arena.&lt;i&gt;.bins.&lt;j&gt;</varname> mutex (arena bin
34922-        scope; bin operation related).  <mallctl>{counter}</mallctl> is one of
34923-        the counters in <link linkend="mutex_counters">mutex profiling
34924-        counters</link>.</para></listitem>
34925-      </varlistentry>
34926-
34927-      <varlistentry id="stats.arenas.i.extents.n">
34928-        <term>
34929-          <mallctl>stats.arenas.&lt;i&gt;.extents.&lt;j&gt;.n{extent_type}</mallctl>
34930-          (<type>size_t</type>)
34931-          <literal>r-</literal>
34932-          [<option>--enable-stats</option>]
34933-        </term>
34934-        <listitem><para> Number of extents of the given type in this arena in
34935-	the bucket corresponding to page size index &lt;j&gt;. The extent type
34936-	is one of dirty, muzzy, or retained.</para></listitem>
34937-      </varlistentry>
34938-
34939-      <varlistentry id="stats.arenas.i.extents.bytes">
34940-        <term>
34941-          <mallctl>stats.arenas.&lt;i&gt;.extents.&lt;j&gt;.{extent_type}_bytes</mallctl>
34942-          (<type>size_t</type>)
34943-          <literal>r-</literal>
34944-          [<option>--enable-stats</option>]
34945-        </term>
34946-	<listitem><para> Sum of the bytes managed by extents of the given type
34947-	in this arena in the bucket corresponding to page size index &lt;j&gt;.
34948-	The extent type is one of dirty, muzzy, or retained.</para></listitem>
34949-      </varlistentry>
34950-
34951-      <varlistentry id="stats.arenas.i.lextents.j.nmalloc">
34952-        <term>
34953-          <mallctl>stats.arenas.&lt;i&gt;.lextents.&lt;j&gt;.nmalloc</mallctl>
34954-          (<type>uint64_t</type>)
34955-          <literal>r-</literal>
34956-          [<option>--enable-stats</option>]
34957-        </term>
34958-        <listitem><para>Cumulative number of times a large extent of the
34959-        corresponding size class was allocated from the arena, whether to fill
34960-        the relevant tcache if <link
34961-        linkend="opt.tcache"><mallctl>opt.tcache</mallctl></link> is enabled and
34962-        the size class is within the range being cached, or to directly satisfy
34963-        an allocation request otherwise.</para></listitem>
34964-      </varlistentry>
34965-
34966-      <varlistentry id="stats.arenas.i.lextents.j.ndalloc">
34967-        <term>
34968-          <mallctl>stats.arenas.&lt;i&gt;.lextents.&lt;j&gt;.ndalloc</mallctl>
34969-          (<type>uint64_t</type>)
34970-          <literal>r-</literal>
34971-          [<option>--enable-stats</option>]
34972-        </term>
34973-        <listitem><para>Cumulative number of times a large extent of the
34974-        corresponding size class was returned to the arena, whether to flush the
34975-        relevant tcache if <link
34976-        linkend="opt.tcache"><mallctl>opt.tcache</mallctl></link> is enabled and
34977-        the size class is within the range being cached, or to directly
34978-        deallocate an allocation otherwise.</para></listitem>
34979-      </varlistentry>
34980-
34981-      <varlistentry id="stats.arenas.i.lextents.j.nrequests">
34982-        <term>
34983-          <mallctl>stats.arenas.&lt;i&gt;.lextents.&lt;j&gt;.nrequests</mallctl>
34984-          (<type>uint64_t</type>)
34985-          <literal>r-</literal>
34986-          [<option>--enable-stats</option>]
34987-        </term>
34988-        <listitem><para>Cumulative number of allocation requests satisfied by
34989-        large extents of the corresponding size class.</para></listitem>
34990-      </varlistentry>
34991-
34992-      <varlistentry id="stats.arenas.i.lextents.j.curlextents">
34993-        <term>
34994-          <mallctl>stats.arenas.&lt;i&gt;.lextents.&lt;j&gt;.curlextents</mallctl>
34995-          (<type>size_t</type>)
34996-          <literal>r-</literal>
34997-          [<option>--enable-stats</option>]
34998-        </term>
34999-        <listitem><para>Current number of large allocations for this size class.
35000-        </para></listitem>
35001-      </varlistentry>
35002-
35003-      <varlistentry id="stats.arenas.i.mutexes.large">
35004-        <term>
35005-          <mallctl>stats.arenas.&lt;i&gt;.mutexes.large.{counter}</mallctl>
35006-          (<type>counter specific type</type>) <literal>r-</literal>
35007-          [<option>--enable-stats</option>]
35008-        </term>
35009-        <listitem><para>Statistics on <varname>arena.&lt;i&gt;.large</varname>
35010-        mutex (arena scope; large allocation related).
35011-        <mallctl>{counter}</mallctl> is one of the counters in <link
35012-        linkend="mutex_counters">mutex profiling
35013-        counters</link>.</para></listitem>
35014-      </varlistentry>
35015-
35016-      <varlistentry id="stats.arenas.i.mutexes.extent_avail">
35017-        <term>
35018-          <mallctl>stats.arenas.&lt;i&gt;.mutexes.extent_avail.{counter}</mallctl>
35019-          (<type>counter specific type</type>) <literal>r-</literal>
35020-          [<option>--enable-stats</option>]
35021-        </term>
35022-        <listitem><para>Statistics on <varname>arena.&lt;i&gt;.extent_avail
35023-        </varname> mutex (arena scope; extent avail related).
35024-        <mallctl>{counter}</mallctl> is one of the counters in <link
35025-        linkend="mutex_counters">mutex profiling
35026-        counters</link>.</para></listitem>
35027-      </varlistentry>
35028-
35029-      <varlistentry id="stats.arenas.i.mutexes.extents_dirty">
35030-        <term>
35031-          <mallctl>stats.arenas.&lt;i&gt;.mutexes.extents_dirty.{counter}</mallctl>
35032-          (<type>counter specific type</type>) <literal>r-</literal>
35033-          [<option>--enable-stats</option>]
35034-        </term>
35035-        <listitem><para>Statistics on <varname>arena.&lt;i&gt;.extents_dirty
35036-        </varname> mutex (arena scope; dirty extents related).
35037-        <mallctl>{counter}</mallctl> is one of the counters in <link
35038-        linkend="mutex_counters">mutex profiling
35039-        counters</link>.</para></listitem>
35040-      </varlistentry>
35041-
35042-      <varlistentry id="stats.arenas.i.mutexes.extents_muzzy">
35043-        <term>
35044-          <mallctl>stats.arenas.&lt;i&gt;.mutexes.extents_muzzy.{counter}</mallctl>
35045-          (<type>counter specific type</type>) <literal>r-</literal>
35046-          [<option>--enable-stats</option>]
35047-        </term>
35048-        <listitem><para>Statistics on <varname>arena.&lt;i&gt;.extents_muzzy
35049-        </varname> mutex (arena scope; muzzy extents related).
35050-        <mallctl>{counter}</mallctl> is one of the counters in <link
35051-        linkend="mutex_counters">mutex profiling
35052-        counters</link>.</para></listitem>
35053-      </varlistentry>
35054-
35055-      <varlistentry id="stats.arenas.i.mutexes.extents_retained">
35056-        <term>
35057-          <mallctl>stats.arenas.&lt;i&gt;.mutexes.extents_retained.{counter}</mallctl>
35058-          (<type>counter specific type</type>) <literal>r-</literal>
35059-          [<option>--enable-stats</option>]
35060-        </term>
35061-        <listitem><para>Statistics on <varname>arena.&lt;i&gt;.extents_retained
35062-        </varname> mutex (arena scope; retained extents related).
35063-        <mallctl>{counter}</mallctl> is one of the counters in <link
35064-        linkend="mutex_counters">mutex profiling
35065-        counters</link>.</para></listitem>
35066-      </varlistentry>
35067-
35068-      <varlistentry id="stats.arenas.i.mutexes.decay_dirty">
35069-        <term>
35070-          <mallctl>stats.arenas.&lt;i&gt;.mutexes.decay_dirty.{counter}</mallctl>
35071-          (<type>counter specific type</type>) <literal>r-</literal>
35072-          [<option>--enable-stats</option>]
35073-        </term>
35074-        <listitem><para>Statistics on <varname>arena.&lt;i&gt;.decay_dirty
35075-        </varname> mutex (arena scope; decay for dirty pages related).
35076-        <mallctl>{counter}</mallctl> is one of the counters in <link
35077-        linkend="mutex_counters">mutex profiling
35078-        counters</link>.</para></listitem>
35079-      </varlistentry>
35080-
35081-      <varlistentry id="stats.arenas.i.mutexes.decay_muzzy">
35082-        <term>
35083-          <mallctl>stats.arenas.&lt;i&gt;.mutexes.decay_muzzy.{counter}</mallctl>
35084-          (<type>counter specific type</type>) <literal>r-</literal>
35085-          [<option>--enable-stats</option>]
35086-        </term>
35087-        <listitem><para>Statistics on <varname>arena.&lt;i&gt;.decay_muzzy
35088-        </varname> mutex (arena scope; decay for muzzy pages related).
35089-        <mallctl>{counter}</mallctl> is one of the counters in <link
35090-        linkend="mutex_counters">mutex profiling
35091-        counters</link>.</para></listitem>
35092-      </varlistentry>
35093-
35094-      <varlistentry id="stats.arenas.i.mutexes.base">
35095-        <term>
35096-          <mallctl>stats.arenas.&lt;i&gt;.mutexes.base.{counter}</mallctl>
35097-          (<type>counter specific type</type>) <literal>r-</literal>
35098-          [<option>--enable-stats</option>]
35099-        </term>
35100-        <listitem><para>Statistics on <varname>arena.&lt;i&gt;.base</varname>
35101-        mutex (arena scope; base allocator related).
35102-        <mallctl>{counter}</mallctl> is one of the counters in <link
35103-        linkend="mutex_counters">mutex profiling
35104-        counters</link>.</para></listitem>
35105-      </varlistentry>
35106-
35107-      <varlistentry id="stats.arenas.i.mutexes.tcache_list">
35108-        <term>
35109-          <mallctl>stats.arenas.&lt;i&gt;.mutexes.tcache_list.{counter}</mallctl>
35110-          (<type>counter specific type</type>) <literal>r-</literal>
35111-          [<option>--enable-stats</option>]
35112-        </term>
35113-        <listitem><para>Statistics on
35114-        <varname>arena.&lt;i&gt;.tcache_list</varname> mutex (arena scope;
35115-        tcache to arena association related).  This mutex is expected to be
35116-        accessed less often.  <mallctl>{counter}</mallctl> is one of the
35117-        counters in <link linkend="mutex_counters">mutex profiling
35118-        counters</link>.</para></listitem>
35119-      </varlistentry>
35120-
35121-    </variablelist>
35122-  </refsect1>
35123-  <refsect1 id="heap_profile_format">
35124-    <title>HEAP PROFILE FORMAT</title>
35125-    <para>Although the heap profiling functionality was originally designed to
35126-    be compatible with the
35127-    <command>pprof</command> command that is developed as part of the <ulink
35128-    url="http://code.google.com/p/gperftools/">gperftools
35129-    package</ulink>, the addition of per thread heap profiling functionality
35130-    required a different heap profile format.  The <command>jeprof</command>
35131-    command is derived from <command>pprof</command>, with enhancements to
35132-    support the heap profile format described here.</para>
35133-
35134-    <para>In the following hypothetical heap profile, <constant>[...]</constant>
35135-    indicates elision for the sake of compactness.  <programlisting><![CDATA[
35136-heap_v2/524288
35137-  t*: 28106: 56637512 [0: 0]
35138-  [...]
35139-  t3: 352: 16777344 [0: 0]
35140-  [...]
35141-  t99: 17754: 29341640 [0: 0]
35142-  [...]
35143-@ 0x5f86da8 0x5f5a1dc [...] 0x29e4d4e 0xa200316 0xabb2988 [...]
35144-  t*: 13: 6688 [0: 0]
35145-  t3: 12: 6496 [0: 0]
35146-  t99: 1: 192 [0: 0]
35147-[...]
35148-
35149-MAPPED_LIBRARIES:
35150-[...]]]></programlisting> The following matches the above heap profile, but most
35151-tokens are replaced with <constant>&lt;description&gt;</constant> to indicate
35152-descriptions of the corresponding fields.  <programlisting><![CDATA[
35153-<heap_profile_format_version>/<mean_sample_interval>
35154-  <aggregate>: <curobjs>: <curbytes> [<cumobjs>: <cumbytes>]
35155-  [...]
35156-  <thread_3_aggregate>: <curobjs>: <curbytes> [<cumobjs>: <cumbytes>]
35157-  [...]
35158-  <thread_99_aggregate>: <curobjs>: <curbytes> [<cumobjs>: <cumbytes>]
35159-  [...]
35160-@ <top_frame> <frame> [...] <frame> <frame> <frame> [...]
35161-  <backtrace_aggregate>: <curobjs>: <curbytes> [<cumobjs>: <cumbytes>]
35162-  <backtrace_thread_3>: <curobjs>: <curbytes> [<cumobjs>: <cumbytes>]
35163-  <backtrace_thread_99>: <curobjs>: <curbytes> [<cumobjs>: <cumbytes>]
35164-[...]
35165-
35166-MAPPED_LIBRARIES:
35167-</proc/<pid>/maps>]]></programlisting></para>
35168-  </refsect1>
35169-
35170-  <refsect1 id="debugging_malloc_problems">
35171-    <title>DEBUGGING MALLOC PROBLEMS</title>
35172-    <para>When debugging, it is a good idea to configure/build jemalloc with
35173-    the <option>--enable-debug</option> and <option>--enable-fill</option>
35174-    options, and recompile the program with suitable options and symbols for
35175-    debugger support.  When so configured, jemalloc incorporates a wide variety
35176-    of run-time assertions that catch application errors such as double-free,
35177-    write-after-free, etc.</para>
35178-
35179-    <para>Programs often accidentally depend on <quote>uninitialized</quote>
35180-    memory actually being filled with zero bytes.  Junk filling
35181-    (see the <link linkend="opt.junk"><mallctl>opt.junk</mallctl></link>
35182-    option) tends to expose such bugs in the form of obviously incorrect
35183-    results and/or coredumps.  Conversely, zero
35184-    filling (see the <link
35185-    linkend="opt.zero"><mallctl>opt.zero</mallctl></link> option) eliminates
35186-    the symptoms of such bugs.  Between these two options, it is usually
35187-    possible to quickly detect, diagnose, and eliminate such bugs.</para>
35188-
35189-    <para>This implementation does not provide much detail about the problems
35190-    it detects, because the performance impact for storing such information
35191-    would be prohibitive.</para>
35192-  </refsect1>
35193-  <refsect1 id="diagnostic_messages">
35194-    <title>DIAGNOSTIC MESSAGES</title>
35195-    <para>If any of the memory allocation/deallocation functions detect an
35196-    error or warning condition, a message will be printed to file descriptor
35197-    <constant>STDERR_FILENO</constant>.  Errors will result in the process
35198-    dumping core.  If the <link
35199-    linkend="opt.abort"><mallctl>opt.abort</mallctl></link> option is set, most
35200-    warnings are treated as errors.</para>
35201-
35202-    <para>The <varname>malloc_message</varname> variable allows the programmer
35203-    to override the function which emits the text strings forming the errors
35204-    and warnings if for some reason the <constant>STDERR_FILENO</constant> file
35205-    descriptor is not suitable for this.
35206-    <function>malloc_message()</function> takes the
35207-    <parameter>cbopaque</parameter> pointer argument that is
35208-    <constant>NULL</constant> unless overridden by the arguments in a call to
35209-    <function>malloc_stats_print()</function>, followed by a string
35210-    pointer.  Please note that doing anything which tries to allocate memory in
35211-    this function is likely to result in a crash or deadlock.</para>
35212-
35213-    <para>All messages are prefixed by
35214-    <quote><computeroutput>&lt;jemalloc&gt;: </computeroutput></quote>.</para>
35215-  </refsect1>
35216-  <refsect1 id="return_values">
35217-    <title>RETURN VALUES</title>
35218-    <refsect2>
35219-      <title>Standard API</title>
35220-      <para>The <function>malloc()</function> and
35221-      <function>calloc()</function> functions return a pointer to the
35222-      allocated memory if successful; otherwise a <constant>NULL</constant>
35223-      pointer is returned and <varname>errno</varname> is set to
35224-      <errorname>ENOMEM</errorname>.</para>
35225-
35226-      <para>The <function>posix_memalign()</function> function
35227-      returns the value 0 if successful; otherwise it returns an error value.
35228-      The <function>posix_memalign()</function> function will fail
35229-      if:
35230-        <variablelist>
35231-          <varlistentry>
35232-            <term><errorname>EINVAL</errorname></term>
35233-
35234-            <listitem><para>The <parameter>alignment</parameter> parameter is
35235-            not a power of 2 at least as large as
35236-            <code language="C">sizeof(<type>void *</type>)</code>.
35237-            </para></listitem>
35238-          </varlistentry>
35239-          <varlistentry>
35240-            <term><errorname>ENOMEM</errorname></term>
35241-
35242-            <listitem><para>Memory allocation error.</para></listitem>
35243-          </varlistentry>
35244-        </variablelist>
35245-      </para>
35246-
35247-      <para>The <function>aligned_alloc()</function> function returns
35248-      a pointer to the allocated memory if successful; otherwise a
35249-      <constant>NULL</constant> pointer is returned and
35250-      <varname>errno</varname> is set.  The
35251-      <function>aligned_alloc()</function> function will fail if:
35252-        <variablelist>
35253-          <varlistentry>
35254-            <term><errorname>EINVAL</errorname></term>
35255-
35256-            <listitem><para>The <parameter>alignment</parameter> parameter is
35257-            not a power of 2.
35258-            </para></listitem>
35259-          </varlistentry>
35260-          <varlistentry>
35261-            <term><errorname>ENOMEM</errorname></term>
35262-
35263-            <listitem><para>Memory allocation error.</para></listitem>
35264-          </varlistentry>
35265-        </variablelist>
35266-      </para>
35267-
35268-      <para>The <function>realloc()</function> function returns a
35269-      pointer, possibly identical to <parameter>ptr</parameter>, to the
35270-      allocated memory if successful; otherwise a <constant>NULL</constant>
35271-      pointer is returned, and <varname>errno</varname> is set to
35272-      <errorname>ENOMEM</errorname> if the error was the result of an
35273-      allocation failure.  The <function>realloc()</function>
35274-      function always leaves the original buffer intact when an error occurs.
35275-      </para>
35276-
35277-      <para>The <function>free()</function> function returns no
35278-      value.</para>
35279-    </refsect2>
35280-    <refsect2>
35281-      <title>Non-standard API</title>
35282-      <para>The <function>mallocx()</function> and
35283-      <function>rallocx()</function> functions return a pointer to
35284-      the allocated memory if successful; otherwise a <constant>NULL</constant>
35285-      pointer is returned to indicate insufficient contiguous memory was
35286-      available to service the allocation request.  </para>
35287-
35288-      <para>The <function>xallocx()</function> function returns the
35289-      real size of the resulting resized allocation pointed to by
35290-      <parameter>ptr</parameter>, which is a value less than
35291-      <parameter>size</parameter> if the allocation could not be adequately
35292-      grown in place.  </para>
35293-
35294-      <para>The <function>sallocx()</function> function returns the
35295-      real size of the allocation pointed to by <parameter>ptr</parameter>.
35296-      </para>
35297-
35298-      <para>The <function>nallocx()</function> returns the real size
35299-      that would result from a successful equivalent
35300-      <function>mallocx()</function> function call, or zero if
35301-      insufficient memory is available to perform the size computation.  </para>
35302-
35303-      <para>The <function>mallctl()</function>,
35304-      <function>mallctlnametomib()</function>, and
35305-      <function>mallctlbymib()</function> functions return 0 on
35306-      success; otherwise they return an error value.  The functions will fail
35307-      if:
35308-        <variablelist>
35309-          <varlistentry>
35310-            <term><errorname>EINVAL</errorname></term>
35311-
35312-            <listitem><para><parameter>newp</parameter> is not
35313-            <constant>NULL</constant>, and <parameter>newlen</parameter> is too
35314-            large or too small.  Alternatively, <parameter>*oldlenp</parameter>
35315-            is too large or too small; when it happens, except for a very few
35316-            cases explicitly documented otherwise, as much data as possible
35317-            are read despite the error, with the amount of data read being
35318-            recorded in <parameter>*oldlenp</parameter>.</para></listitem>
35319-          </varlistentry>
35320-          <varlistentry>
35321-            <term><errorname>ENOENT</errorname></term>
35322-
35323-            <listitem><para><parameter>name</parameter> or
35324-            <parameter>mib</parameter> specifies an unknown/invalid
35325-            value.</para></listitem>
35326-          </varlistentry>
35327-          <varlistentry>
35328-            <term><errorname>EPERM</errorname></term>
35329-
35330-            <listitem><para>Attempt to read or write void value, or attempt to
35331-            write read-only value.</para></listitem>
35332-          </varlistentry>
35333-          <varlistentry>
35334-            <term><errorname>EAGAIN</errorname></term>
35335-
35336-            <listitem><para>A memory allocation failure
35337-            occurred.</para></listitem>
35338-          </varlistentry>
35339-          <varlistentry>
35340-            <term><errorname>EFAULT</errorname></term>
35341-
35342-            <listitem><para>An interface with side effects failed in some way
35343-            not directly related to <function>mallctl*()</function>
35344-            read/write processing.</para></listitem>
35345-          </varlistentry>
35346-        </variablelist>
35347-      </para>
35348-
35349-      <para>The <function>malloc_usable_size()</function> function
35350-      returns the usable size of the allocation pointed to by
35351-      <parameter>ptr</parameter>.  </para>
35352-    </refsect2>
35353-  </refsect1>
35354-  <refsect1 id="environment">
35355-    <title>ENVIRONMENT</title>
35356-    <para>The following environment variable affects the execution of the
35357-    allocation functions:
35358-      <variablelist>
35359-        <varlistentry>
35360-          <term><envar>MALLOC_CONF</envar></term>
35361-
35362-          <listitem><para>If the environment variable
35363-          <envar>MALLOC_CONF</envar> is set, the characters it contains
35364-          will be interpreted as options.</para></listitem>
35365-        </varlistentry>
35366-      </variablelist>
35367-    </para>
35368-  </refsect1>
35369-  <refsect1 id="examples">
35370-    <title>EXAMPLES</title>
35371-    <para>To dump core whenever a problem occurs:
35372-      <screen>ln -s 'abort:true' /etc/malloc.conf</screen>
35373-    </para>
35374-    <para>To specify in the source that only one arena should be automatically
35375-    created:
35376-      <programlisting language="C"><![CDATA[
35377-malloc_conf = "narenas:1";]]></programlisting></para>
35378-  </refsect1>
35379-  <refsect1 id="see_also">
35380-    <title>SEE ALSO</title>
35381-    <para><citerefentry><refentrytitle>madvise</refentrytitle>
35382-    <manvolnum>2</manvolnum></citerefentry>,
35383-    <citerefentry><refentrytitle>mmap</refentrytitle>
35384-    <manvolnum>2</manvolnum></citerefentry>,
35385-    <citerefentry><refentrytitle>sbrk</refentrytitle>
35386-    <manvolnum>2</manvolnum></citerefentry>,
35387-    <citerefentry><refentrytitle>utrace</refentrytitle>
35388-    <manvolnum>2</manvolnum></citerefentry>,
35389-    <citerefentry><refentrytitle>alloca</refentrytitle>
35390-    <manvolnum>3</manvolnum></citerefentry>,
35391-    <citerefentry><refentrytitle>atexit</refentrytitle>
35392-    <manvolnum>3</manvolnum></citerefentry>,
35393-    <citerefentry><refentrytitle>getpagesize</refentrytitle>
35394-    <manvolnum>3</manvolnum></citerefentry></para>
35395-  </refsect1>
35396-  <refsect1 id="standards">
35397-    <title>STANDARDS</title>
35398-    <para>The <function>malloc()</function>,
35399-    <function>calloc()</function>,
35400-    <function>realloc()</function>, and
35401-    <function>free()</function> functions conform to ISO/IEC
35402-    9899:1990 (<quote>ISO C90</quote>).</para>
35403-
35404-    <para>The <function>posix_memalign()</function> function conforms
35405-    to IEEE Std 1003.1-2001 (<quote>POSIX.1</quote>).</para>
35406-  </refsect1>
35407-</refentry>
35408diff --git a/jemalloc/doc/manpages.xsl.in b/jemalloc/doc/manpages.xsl.in
35409deleted file mode 100644
35410index 88b2626..0000000
35411--- a/jemalloc/doc/manpages.xsl.in
35412+++ /dev/null
35413@@ -1,4 +0,0 @@
35414-<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
35415-  <xsl:import href="@XSLROOT@/manpages/docbook.xsl"/>
35416-  <xsl:import href="@abs_srcroot@doc/stylesheet.xsl"/>
35417-</xsl:stylesheet>
35418diff --git a/jemalloc/doc/stylesheet.xsl b/jemalloc/doc/stylesheet.xsl
35419deleted file mode 100644
35420index 619365d..0000000
35421--- a/jemalloc/doc/stylesheet.xsl
35422+++ /dev/null
35423@@ -1,10 +0,0 @@
35424-<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
35425-  <xsl:param name="funcsynopsis.style">ansi</xsl:param>
35426-  <xsl:param name="function.parens" select="0"/>
35427-  <xsl:template match="function">
35428-    <xsl:call-template name="inline.monoseq"/>
35429-  </xsl:template>
35430-  <xsl:template match="mallctl">
35431-    <quote><xsl:call-template name="inline.monoseq"/></quote>
35432-  </xsl:template>
35433-</xsl:stylesheet>
35434diff --git a/jemalloc/doc_internal/PROFILING_INTERNALS.md b/jemalloc/doc_internal/PROFILING_INTERNALS.md
35435deleted file mode 100644
35436index 0a9f31c..0000000
35437--- a/jemalloc/doc_internal/PROFILING_INTERNALS.md
35438+++ /dev/null
35439@@ -1,127 +0,0 @@
35440-# jemalloc profiling
35441-This describes the mathematical basis behind jemalloc's profiling implementation, as well as the implementation tricks that make it effective. Historically, the jemalloc profiling design simply copied tcmalloc's. The implementation has since diverged, due to both the desire to record additional information, and to correct some biasing bugs.
35442-
35443-Note: this document is markdown with embedded LaTeX; different markdown renderers may not produce the expected output.  Viewing with `pandoc -s PROFILING_INTERNALS.md -o PROFILING_INTERNALS.pdf` is recommended.
35444-
35445-## Some tricks in our implementation toolbag
35446-
35447-### Sampling
35448-Recording our metadata is quite expensive; we need to walk up the stack to get a stack trace. On top of that, we need to allocate storage to record that stack trace, and stick it somewhere where a profile-dumping call can find it. That call might happen on another thread, so we'll probably need to take a lock to do so. These costs are quite large compared to the average cost of an allocation. To manage this, we'll only sample some fraction of allocations. This will miss some of them, so our data will be incomplete, but we'll try to make up for it. We can tune our sampling rate to balance accuracy and performance.
35449-
35450-### Fast Bernoulli sampling
35451-Compared to our fast paths, even a `coinflip(p)` function can be quite expensive. Having to do a random-number generation and some floating point operations would be a sizeable relative cost. However (as pointed out in [[Vitter, 1987](https://dl.acm.org/doi/10.1145/23002.23003)]), if we can orchestrate our algorithm so that many of our `coinflip` calls share their parameter value, we can do better. We can sample from the geometric distribution, and initialize a counter with the result. When the counter hits 0, the `coinflip` function returns true (and reinitializes its internal counter).
35452-This can let us do a random-number generation once per (logical) coinflip that comes up heads, rather than once per (logical) coinflip. Since we expect to sample relatively rarely, this can be a large win.
35453-
35454-### Fast-path / slow-path thinking
35455-Most programs have a skewed distribution of allocations. Smaller allocations are much more frequent than large ones, but shorter lived and less common as a fraction of program memory. "Small" and "large" are necessarily sort of fuzzy terms, but if we define "small" as "allocations jemalloc puts into slabs" and "large" as the others, then it's not uncommon for small allocations to be hundreds of times more frequent than large ones, but take up around half the amount of heap space as large ones. Moreover, small allocations tend to be much cheaper than large ones (often by a factor of 20-30): they're more likely to hit in thread caches, less likely to have to do an mmap, and cheaper to fill (by the user) once the allocation has been returned.
35456-
35457-## An unbiased estimator of space consumption from (almost) arbitrary sampling strategies
35458-Suppose we have a sampling strategy that meets the following criteria:
35459-
35460-  - One allocation being sampled is independent of other allocations being sampled.
35461-  - Each allocation has a non-zero probability of being sampled.
35462-
35463-We can then estimate the bytes in live allocations through some particular stack trace as:
35464-
35465-$$ \sum_i S_i I_i \frac{1}{\mathrm{E}[I_i]} $$
35466-
35467-where the sum ranges over some index variable of live allocations from that stack, $S_i$ is the size of the $i$'th allocation, and $I_i$ is an indicator random variable for whether or not the $i'th$ allocation is sampled. $S_i$ and $\mathrm{E}[I_i]$ are constants (the program allocations are fixed; the random variables are the sampling decisions), so taking the expectation we get
35468-
35469-$$ \sum_i S_i \mathrm{E}[I_i] \frac{1}{\mathrm{E}[I_i]}.$$
35470-
35471-This is of course $\sum_i S_i$, as we want (and, a similar calculation could be done for allocation counts as well).
35472-This is a fairly general strategy; note that while we require that sampling decisions be independent of one another's outcomes, they don't have to be independent of previous allocations, total bytes allocated, etc. You can imagine strategies that:
35473-
35474-  - Sample allocations at program startup at a higher rate than subsequent allocations
35475-  - Sample even-indexed allocations more frequently than odd-indexed ones (so long as no allocation has zero sampling probability)
35476-  - Let threads declare themselves as high-sampling-priority, and sample their allocations at an increased rate.
35477-
35478-These can all be fit into this framework to give an unbiased estimator.
35479-
35480-## Evaluating sampling strategies
35481-Not all strategies for picking allocations to sample are equally good, of course. Among unbiased estimators, the lower the variance, the lower the mean squared error. Using the estimator above, the variance is:
35482-
35483-$$
35484-\begin{aligned}
35485-& \mathrm{Var}[\sum_i S_i I_i \frac{1}{\mathrm{E}[I_i]}]  \\
35486-=& \sum_i \mathrm{Var}[S_i I_i \frac{1}{\mathrm{E}[I_i]}] \\
35487-=& \sum_i \frac{S_i^2}{\mathrm{E}[I_i]^2} \mathrm{Var}[I_i] \\
35488-=& \sum_i \frac{S_i^2}{\mathrm{E}[I_i]^2} \mathrm{Var}[I_i] \\
35489-=& \sum_i \frac{S_i^2}{\mathrm{E}[I_i]^2} \mathrm{E}[I_i](1 - \mathrm{E}[I_i]) \\
35490-=& \sum_i S_i^2 \frac{1 - \mathrm{E}[I_i]}{\mathrm{E}[I_i]}.
35491-\end{aligned}
35492-$$
35493-
35494-We can use this formula to compare various strategy choices. All else being equal, lower-variance strategies are better.
35495-
35496-## Possible sampling strategies
35497-Because of the desire to avoid the fast-path costs, we'd like to use our Bernoulli trick if possible. There are two obvious counters to use: a coinflip per allocation, and a coinflip per byte allocated.
35498-
35499-### Bernoulli sampling per-allocation
35500-An obvious strategy is to pick some large $N$, and give each allocation a $1/N$ chance of being sampled. This would let us use our Bernoulli-via-Geometric trick. Using the formula from above, we can compute the variance as:
35501-
35502-$$ \sum_i S_i^2 \frac{1 - \frac{1}{N}}{\frac{1}{N}}  = (N-1) \sum_i S_i^2.$$
35503-
35504-That is, an allocation of size $Z$ contributes a term of $(N-1)Z^2$ to the variance.
35505-
35506-### Bernoulli sampling per-byte
35507-Another option we have is to pick some rate $R$, and give each byte a $1/R$ chance of being picked for sampling (at which point we would sample its contained allocation). The chance of an allocation of size $Z$ being sampled, then, is
35508-
35509-$$1-(1-\frac{1}{R})^{Z}$$
35510-
35511-and an allocation of size $Z$ contributes a term of
35512-
35513-$$Z^2 \frac{(1-\frac{1}{R})^{Z}}{1-(1-\frac{1}{R})^{Z}}.$$
35514-
35515-In practical settings, $R$ is large, and so this is well-approximated by
35516-
35517-$$Z^2 \frac{e^{-Z/R}}{1 - e^{-Z/R}} .$$
35518-
35519-Just to get a sense of the dynamics here, let's look at the behavior for various values of $Z$. When $Z$ is small relative to $R$, we can use $e^z \approx 1 + x$, and conclude that the variance contributed by a small-$Z$ allocation is around
35520-
35521-$$Z^2 \frac{1-Z/R}{Z/R} \approx RZ.$$
35522-
35523-When $Z$ is comparable to $R$, the variance term is near $Z^2$ (we have $\frac{e^{-Z/R}}{1 - e^{-Z/R}} = 1$ when $Z/R = \ln 2 \approx 0.693$). When $Z$ is large relative to $R$, the variance term goes to zero.
35524-
35525-## Picking a sampling strategy
35526-The fast-path/slow-path dynamics of allocation patterns point us towards the per-byte sampling approach:
35527-
35528-  - The quadratic increase in variance per allocation in the first approach is quite costly when heaps have a non-negligible portion of their bytes in those allocations, which is practically often the case.
35529-  - The Bernoulli-per-byte approach shifts more of its samples towards large allocations, which are already a slow-path.
35530-  - We drive several tickers (e.g. tcache gc) by bytes allocated, and report bytes-allocated as a user-visible statistic, so we have to do all the necessary bookkeeping anyways.
35531-
35532-Indeed, this is the approach we use in jemalloc. Our heap dumps record the size of the allocation and the sampling rate $R$, and jeprof unbiases by dividing by $1 - e^{-Z/R}$.  The framework above would suggest dividing by $1-(1-1/R)^Z$; instead, we use the fact that $R$ is large in practical situations, and so $e^{-Z/R}$ is a good approximation (and faster to compute).  (Equivalently, we may also see this as the factor that falls out from viewing sampling as a Poisson process directly).
35533-
35534-## Consequences for heap dump consumers
35535-Using this approach means that there are a few things users need to be aware of.
35536-
35537-### Stack counts are not proportional to allocation frequencies
35538-If one stack appears twice as often as another, this by itself does not imply that it allocates twice as often. Consider the case in which there are only two types of allocating call stacks in a program. Stack A allocates 8 bytes, and occurs a million times in a program. Stack B allocates 8 MB, and occurs just once in a program. If our sampling rate $R$ is about 1MB, we expect stack A to show up about 8 times, and stack B to show up once. Stack A isn't 8 times more frequent than stack B, though; it's a million times more frequent.
35539-
35540-### Aggregation must be done after unbiasing samples
35541-Some tools manually parse heap dump output, and aggregate across stacks (or across program runs) to provide wider-scale data analyses. When doing this aggregation, though, it's important to unbias-and-then-sum, rather than sum-and-then-unbias. Reusing our example from the previous section: suppose we collect heap dumps of the program from a million machines. We then have 8 million occurs of stack A (each of 8 bytes), and a million occurrences of stack B (each of 8 MB). If we sum first, we'll attribute 64 MB to stack A, and 8 TB to stack B. Unbiasing changes these numbers by an infinitesimal amount, so that sum-then-unbias dramatically underreports the amount of memory allocated by stack A.
35542-
35543-## An avenue for future exploration
35544-While the framework we laid out above is pretty general, as an engineering decision we're only interested in fairly simple approaches (i.e. ones for which the chance of an allocation being sampled depends only on its size). Our job is then: for each size class $Z$, pick a probability $p_Z$ that an allocation of that size will be sampled. We made some handwave-y references to statistical distributions to justify our choices, but there's no reason we need to pick them that way. Any set of non-zero probabilities is a valid choice.
35545-The real limiting factor in our ability to reduce estimator variance is that fact that sampling is expensive; we want to make sure we only do it on a small fraction of allocations. Our goal, then, is to pick the $p_Z$ to minimize variance given some maximum sampling rate $P$. If we define $a_Z$ to be the fraction of allocations of size $Z$, and $l_Z$ to be the fraction of allocations of size $Z$ still alive at the time of a heap dump, then we can phrase this as an optimization problem over the choices of $p_Z$:
35546-
35547-Minimize
35548-
35549-$$ \sum_Z Z^2 l_Z \frac{1-p_Z}{p_Z} $$
35550-
35551-subject to
35552-
35553-$$ \sum_Z a_Z p_Z \leq P $$
35554-
35555-Ignoring a term that doesn't depend on $p_Z$, the objective is minimized whenever
35556-
35557-$$ \sum_Z Z^2 l_Z \frac{1}{p_Z} $$
35558-
35559-is. For a particular program, $l_Z$ and $a_Z$ are just numbers that can be obtained (exactly) from existing stats introspection facilities, and we have a fairly tractable convex optimization problem (it can be framed as a second-order cone program). It would be interesting to evaluate, for various common allocation patterns, how well our current strategy adapts. Do our actual choices for $p_Z$ closely correspond to the optimal ones? How close is the variance of our choices to the variance of the optimal strategy?
35560-You can imagine an implementation that actually goes all the way, and makes $p_Z$ selections a tuning parameter. I don't think this is a good use of development time for the foreseeable future; but I do wonder about the answers to some of these questions.
35561-
35562-## Implementation realities
35563-
35564-The nice story above is at least partially a lie. Initially, jeprof (copying its logic from pprof)  had the sum-then-unbias error described above.  The current version of jemalloc does the unbiasing step on a per-allocation basis internally, so that we're always tracking what the unbiased numbers "should" be.  The problem is, actually surfacing those unbiased numbers would require a breaking change to jeprof (and the various already-deployed tools that have copied its logic). Instead, we use a little bit more trickery. Since we know at dump time the numbers we want jeprof to report, we simply choose the values we'll output so that the jeprof numbers will match the true numbers.  The math is described in `src/prof_data.c` (where the only cleverness is a change of variables that lets the exponentials fall out).
35565-
35566-This has the effect of making the output of jeprof (and related tools) correct, while making its inputs incorrect.  This can be annoying to human readers of raw profiling dump output.
35567diff --git a/jemalloc/doc_internal/jemalloc.svg b/jemalloc/doc_internal/jemalloc.svg
35568deleted file mode 100644
35569index 5e77327..0000000
35570--- a/jemalloc/doc_internal/jemalloc.svg
35571+++ /dev/null
35572@@ -1 +0,0 @@
35573-<svg id="Layer_3" data-name="Layer 3" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" viewBox="0 0 499 184.27"><defs><style>.cls-1,.cls-3{fill:none;}.cls-2{clip-path:url(#clip-path);}.cls-3{stroke:#262262;stroke-linecap:round;stroke-linejoin:round;stroke-width:4px;}</style><clipPath id="clip-path" transform="translate(-100.66 -259.87)"><path class="cls-1" d="M144.57,396c0,18.2-9.37,27.83-37.33,23.55V400.1c11.11,2.14,12.18-.27,12.18-11.5V324.11h25Zm-12.71-78.66c-9,0-15.52-1.48-15.52-12.71S122.9,292,131.86,292s15.52,1.2,15.52,12.58C147.38,315.55,141,317.29,131.86,317.29Zm50.57,76.39c-30.64,0-35.85-18.86-35.85-35.59s5.61-35.32,35.72-35.32c35.32,0,33.44,28,33.44,40.67H170.12c.54,9.5,4,14.05,11.37,14.05,6.83,0,9.64-3.34,10-7.89l24.75.13C215.48,383.38,205.84,393.68,182.43,393.68Zm-1.47-55c-6.69,0-10,2.81-10.84,12h21.41C190.73,341.9,188.18,338.69,181,338.69Zm112.78,53.65V351.4c0-4.15-1.33-8.16-6-8.16-5,0-6,3.75-6,8.16v40.94H256.42V351.4c0-4.15-.81-8.16-5.89-8.16s-6.29,3.75-6.29,8.16v40.94H219.09V324.11h14l4.15,8c2.67-4.69,10.56-9.37,18.86-9.37,7.36,0,16.19,2.14,21,9.1,3.48-5.22,11.11-9.1,20.21-9.1,19.13,0,21.54,11.37,21.54,27.16v42.41Zm83.09,0L372.41,383c-5.48,7.22-13.11,10.7-24.35,10.7-14.85,0-26.75-6-26.75-19.93,0-15.26,12.44-20.88,44.28-23,0-9.5-1.61-12.57-8.83-12.57-6.69,0-8.56,3.48-8.56,9.9H323.45c0-12.85,6.82-25.29,32.64-25.29,30,0,34.65,14.45,34.65,31.17v38.4Zm-21.54-28.63c-6.29.94-8.3,4.28-8.3,7.36,0,4.28,2.41,6.69,8.3,6.69s10.17-4.82,10.17-15.12ZM396,392.34V297.75h24.75v94.59Zm30.77,0V297.75h24.75v94.59Zm62.21,1.34c-28.09,0-34.11-18.6-34.11-35.32s6.29-35.59,34.38-35.59c27.7,0,34.12,19,34.12,35.59C523.33,375.22,516.91,393.68,488.94,393.68Zm.27-50.84c-7.89,0-11.37,4.82-11.37,15.52s3.61,15.39,11.1,15.39c7.9,0,11.38-4.42,11.38-15.39C500.32,347.79,497.24,342.84,489.21,342.84Zm69.17,50.84c-28.9,0-34.52-18.6-34.52-35.32s5.76-35.59,34.12-35.59c21.14,0,34.52,10.84,34.52,31.17H568.42c0-9.23-5.49-11.23-10.17-11.23-7,0-11.11,4.54-11.11,15.38s4,15.52,11.11,15.52c4.81,0,10-2.41,10-10.57H592.5C592.5,383.38,579,393.68,558.38,393.68Z"/></clipPath></defs><title>jemalloc Final Logo</title><g class="cls-2"><line class="cls-3" x1="345" y1="182.27" x2="345" y2="2"/><line class="cls-3" x1="225" y1="182.27" x2="225" y2="2"/><line class="cls-3" x1="105" y1="182.27" x2="105" y2="2"/><line class="cls-3" x1="43" y1="182.27" x2="43" y2="2"/><line class="cls-3" x1="475" y1="182.27" x2="475" y2="2"/><line class="cls-3" x1="195" y1="182.27" x2="195" y2="2"/><line class="cls-3" x1="75" y1="182.27" x2="75" y2="2"/><line class="cls-3" x1="337" y1="182.27" x2="337" y2="2"/><line class="cls-3" x1="215" y1="182.27" x2="215" y2="2"/><line class="cls-3" x1="95" y1="182.27" x2="95" y2="2"/><line class="cls-3" x1="415" y1="182.27" x2="415" y2="2"/><line class="cls-3" x1="385" y1="182.27" x2="385" y2="2"/><line class="cls-3" x1="183" y1="182.27" x2="183" y2="2"/><line class="cls-3" x1="65" y1="182.27" x2="65" y2="2"/><line class="cls-3" x1="173" y1="182.27" x2="173" y2="2"/><line class="cls-3" x1="145" y1="182.27" x2="145" y2="2"/><line class="cls-3" x1="163" y1="182.27" x2="163" y2="2"/><line class="cls-3" x1="460" y1="182.27" x2="460" y2="2"/><line class="cls-3" x1="281" y1="182.27" x2="281" y2="2"/><line class="cls-3" x1="313" y1="182.27" x2="313" y2="2"/><line class="cls-3" x1="252" y1="182.27" x2="252" y2="2"/><line class="cls-3" x1="450" y1="182.27" x2="450" y2="2"/><line class="cls-3" x1="271" y1="182.27" x2="271" y2="2"/><line class="cls-3" x1="332" y1="182.27" x2="332" y2="2"/><line class="cls-3" x1="203" y1="182.27" x2="203" y2="2"/><line class="cls-3" x1="13" y1="182.27" x2="13" y2="2"/><line class="cls-3" x1="373" y1="182.27" x2="373" y2="2"/><line class="cls-3" x1="354" y1="182.27" x2="354" y2="2"/><line class="cls-3" x1="235" y1="182.27" x2="235" y2="2"/><line class="cls-3" x1="115" y1="182.27" x2="115" y2="2"/><line class="cls-3" x1="53" y1="182.27" x2="53" y2="2"/><line class="cls-3" x1="484" y1="182.27" x2="484" y2="2"/><line class="cls-3" x1="405" y1="182.27" x2="405" y2="2"/><line class="cls-3" x1="85" y1="182.27" x2="85" y2="2"/><line class="cls-3" x1="225" y1="182.27" x2="225" y2="2"/><line class="cls-3" x1="105" y1="182.27" x2="105" y2="2"/><line class="cls-3" x1="43" y1="182.27" x2="43" y2="2"/><line class="cls-3" x1="435" y1="182.27" x2="435" y2="2"/><line class="cls-3" x1="123" y1="182.27" x2="123" y2="2"/><line class="cls-3" x1="75" y1="182.27" x2="75" y2="2"/><line class="cls-3" x1="183" y1="182.27" x2="183" y2="2"/><line class="cls-3" x1="155" y1="182.27" x2="155" y2="2"/><line class="cls-3" x1="173" y1="182.27" x2="173" y2="2"/><line class="cls-3" x1="145" y1="182.27" x2="145" y2="2"/><line class="cls-3" x1="470" y1="182.27" x2="470" y2="2"/><line class="cls-3" x1="292" y1="182.27" x2="292" y2="2"/><line class="cls-3" x1="262" y1="182.27" x2="262" y2="2"/><line class="cls-3" x1="460" y1="182.27" x2="460" y2="2"/><line class="cls-3" x1="281" y1="182.27" x2="281" y2="2"/><line class="cls-3" x1="313" y1="182.27" x2="313" y2="2"/><line class="cls-3" x1="243" y1="182.27" x2="243" y2="2"/><line class="cls-3" x1="22" y1="182.27" x2="22" y2="2"/><line class="cls-3" x1="383" y1="182.27" x2="383" y2="2"/><line class="cls-3" x1="5" y1="182.27" x2="5" y2="2"/><line class="cls-3" x1="133" y1="182.27" x2="133" y2="2"/><line class="cls-3" x1="362" y1="182.27" x2="362" y2="2"/><line class="cls-3" x1="288" y1="182.27" x2="288" y2="2"/><line class="cls-3" x1="298" y1="182.27" x2="298" y2="2"/><line class="cls-3" x1="423" y1="182.27" x2="423" y2="2"/><line class="cls-3" x1="369" y1="182.27" x2="369" y2="2"/><line class="cls-3" x1="490" y1="182.27" x2="490" y2="2"/><line class="cls-3" x1="2" y1="182.27" x2="2" y2="2"/><line class="cls-3" x1="493" y1="182.27" x2="493" y2="2"/><line class="cls-3" x1="225" y1="182.27" x2="225" y2="2"/><line class="cls-3" x1="105" y1="182.27" x2="105" y2="2"/><line class="cls-3" x1="43" y1="182.27" x2="43" y2="2"/><line class="cls-3" x1="475" y1="182.27" x2="475" y2="2"/><line class="cls-3" x1="195" y1="182.27" x2="195" y2="2"/><line class="cls-3" x1="75" y1="182.27" x2="75" y2="2"/><line class="cls-3" x1="337" y1="182.27" x2="337" y2="2"/><line class="cls-3" x1="215" y1="182.27" x2="215" y2="2"/><line class="cls-3" x1="95" y1="182.27" x2="95" y2="2"/><line class="cls-3" x1="415" y1="182.27" x2="415" y2="2"/><line class="cls-3" x1="385" y1="182.27" x2="385" y2="2"/><line class="cls-3" x1="183" y1="182.27" x2="183" y2="2"/><line class="cls-3" x1="65" y1="182.27" x2="65" y2="2"/><line class="cls-3" x1="173" y1="182.27" x2="173" y2="2"/><line class="cls-3" x1="145" y1="182.27" x2="145" y2="2"/><line class="cls-3" x1="163" y1="182.27" x2="163" y2="2"/><line class="cls-3" x1="460" y1="182.27" x2="460" y2="2"/><line class="cls-3" x1="281" y1="182.27" x2="281" y2="2"/><line class="cls-3" x1="313" y1="182.27" x2="313" y2="2"/><line class="cls-3" x1="252" y1="182.27" x2="252" y2="2"/><line class="cls-3" x1="450" y1="182.27" x2="450" y2="2"/><line class="cls-3" x1="271" y1="182.27" x2="271" y2="2"/><line class="cls-3" x1="306" y1="182.27" x2="306" y2="2"/><line class="cls-3" x1="203" y1="182.27" x2="203" y2="2"/><line class="cls-3" x1="13" y1="182.27" x2="13" y2="2"/><line class="cls-3" x1="373" y1="182.27" x2="373" y2="2"/><line class="cls-3" x1="354" y1="182.27" x2="354" y2="2"/><line class="cls-3" x1="235" y1="182.27" x2="235" y2="2"/><line class="cls-3" x1="115" y1="182.27" x2="115" y2="2"/><line class="cls-3" x1="53" y1="182.27" x2="53" y2="2"/><line class="cls-3" x1="484" y1="182.27" x2="484" y2="2"/><line class="cls-3" x1="405" y1="182.27" x2="405" y2="2"/><line class="cls-3" x1="85" y1="182.27" x2="85" y2="2"/><line class="cls-3" x1="225" y1="182.27" x2="225" y2="2"/><line class="cls-3" x1="105" y1="182.27" x2="105" y2="2"/><line class="cls-3" x1="43" y1="182.27" x2="43" y2="2"/><line class="cls-3" x1="435" y1="182.27" x2="435" y2="2"/><line class="cls-3" x1="123" y1="182.27" x2="123" y2="2"/><line class="cls-3" x1="75" y1="182.27" x2="75" y2="2"/><line class="cls-3" x1="183" y1="182.27" x2="183" y2="2"/><line class="cls-3" x1="155" y1="182.27" x2="155" y2="2"/><line class="cls-3" x1="173" y1="182.27" x2="173" y2="2"/><line class="cls-3" x1="145" y1="182.27" x2="145" y2="2"/><line class="cls-3" x1="470" y1="182.27" x2="470" y2="2"/><line class="cls-3" x1="292" y1="182.27" x2="292" y2="2"/><line class="cls-3" x1="262" y1="182.27" x2="262" y2="2"/><line class="cls-3" x1="460" y1="182.27" x2="460" y2="2"/><line class="cls-3" x1="281" y1="182.27" x2="281" y2="2"/><line class="cls-3" x1="328" y1="182.27" x2="328" y2="2"/><line class="cls-3" x1="243" y1="182.27" x2="243" y2="2"/><line class="cls-3" x1="22" y1="182.27" x2="22" y2="2"/><line class="cls-3" x1="383" y1="182.27" x2="383" y2="2"/><line class="cls-3" x1="5" y1="182.27" x2="5" y2="2"/><line class="cls-3" x1="32" y1="182.27" x2="32" y2="2"/><line class="cls-3" x1="133" y1="182.27" x2="133" y2="2"/><line class="cls-3" x1="362" y1="182.27" x2="362" y2="2"/><line class="cls-3" x1="288" y1="182.27" x2="288" y2="2"/><line class="cls-3" x1="298" y1="182.27" x2="298" y2="2"/><line class="cls-3" x1="423" y1="182.27" x2="423" y2="2"/><line class="cls-3" x1="369" y1="182.27" x2="369" y2="2"/><line class="cls-3" x1="490" y1="182.27" x2="490" y2="2"/><line class="cls-3" x1="2" y1="182.27" x2="2" y2="2"/><line class="cls-3" x1="493" y1="182.27" x2="493" y2="2"/><line class="cls-3" x1="349" y1="182.27" x2="349" y2="2"/><line class="cls-3" x1="229" y1="182.27" x2="229" y2="2"/><line class="cls-3" x1="109" y1="182.27" x2="109" y2="2"/><line class="cls-3" x1="47" y1="182.27" x2="47" y2="2"/><line class="cls-3" x1="479" y1="182.27" x2="479" y2="2"/><line class="cls-3" x1="399" y1="182.27" x2="399" y2="2"/><line class="cls-3" x1="199" y1="182.27" x2="199" y2="2"/><line class="cls-3" x1="79" y1="182.27" x2="79" y2="2"/><line class="cls-3" x1="341" y1="182.27" x2="341" y2="2"/><line class="cls-3" x1="219" y1="182.27" x2="219" y2="2"/><line class="cls-3" x1="99" y1="182.27" x2="99" y2="2"/><line class="cls-3" x1="41" y1="182.27" x2="41" y2="2"/><line class="cls-3" x1="419" y1="182.27" x2="419" y2="2"/><line class="cls-3" x1="389" y1="182.27" x2="389" y2="2"/><line class="cls-3" x1="187" y1="182.27" x2="187" y2="2"/><line class="cls-3" x1="69" y1="182.27" x2="69" y2="2"/><line class="cls-3" x1="177" y1="182.27" x2="177" y2="2"/><line class="cls-3" x1="149" y1="182.27" x2="149" y2="2"/><line class="cls-3" x1="464" y1="182.27" x2="464" y2="2"/><line class="cls-3" x1="285" y1="182.27" x2="285" y2="2"/><line class="cls-3" x1="317" y1="182.27" x2="317" y2="2"/><line class="cls-3" x1="454" y1="182.27" x2="454" y2="2"/><line class="cls-3" x1="275" y1="182.27" x2="275" y2="2"/><line class="cls-3" x1="308" y1="182.27" x2="308" y2="2"/><line class="cls-3" x1="207" y1="182.27" x2="207" y2="2"/><line class="cls-3" x1="17" y1="182.27" x2="17" y2="2"/><line class="cls-3" x1="377" y1="182.27" x2="377" y2="2"/><line class="cls-3" x1="358" y1="182.27" x2="358" y2="2"/><line class="cls-3" x1="238" y1="182.27" x2="238" y2="2"/><line class="cls-3" x1="119" y1="182.27" x2="119" y2="2"/><line class="cls-3" x1="488" y1="182.27" x2="488" y2="2"/><line class="cls-3" x1="409" y1="182.27" x2="409" y2="2"/><line class="cls-3" x1="229" y1="182.27" x2="229" y2="2"/><line class="cls-3" x1="109" y1="182.27" x2="109" y2="2"/><line class="cls-3" x1="47" y1="182.27" x2="47" y2="2"/><line class="cls-3" x1="439" y1="182.27" x2="439" y2="2"/><line class="cls-3" x1="399" y1="182.27" x2="399" y2="2"/><line class="cls-3" x1="127" y1="182.27" x2="127" y2="2"/><line class="cls-3" x1="79" y1="182.27" x2="79" y2="2"/><line class="cls-3" x1="187" y1="182.27" x2="187" y2="2"/><line class="cls-3" x1="159" y1="182.27" x2="159" y2="2"/><line class="cls-3" x1="177" y1="182.27" x2="177" y2="2"/><line class="cls-3" x1="149" y1="182.27" x2="149" y2="2"/><line class="cls-3" x1="474" y1="182.27" x2="474" y2="2"/><line class="cls-3" x1="266" y1="182.27" x2="266" y2="2"/><line class="cls-3" x1="464" y1="182.27" x2="464" y2="2"/><line class="cls-3" x1="285" y1="182.27" x2="285" y2="2"/><line class="cls-3" x1="317" y1="182.27" x2="317" y2="2"/><line class="cls-3" x1="247" y1="182.27" x2="247" y2="2"/><line class="cls-3" x1="26" y1="182.27" x2="26" y2="2"/><line class="cls-3" x1="387" y1="182.27" x2="387" y2="2"/><line class="cls-3" x1="9" y1="182.27" x2="9" y2="2"/><line class="cls-3" x1="137" y1="182.27" x2="137" y2="2"/><line class="cls-3" x1="292" y1="182.27" x2="292" y2="2"/><line class="cls-3" x1="373" y1="182.27" x2="373" y2="2"/><line class="cls-3" x1="56" y1="182.27" x2="56" y2="2"/><line class="cls-3" x1="494" y1="182.27" x2="494" y2="2"/><line class="cls-3" x1="497" y1="182.27" x2="497" y2="2"/><line class="cls-3" x1="349" y1="182.27" x2="349" y2="2"/><line class="cls-3" x1="229" y1="182.27" x2="229" y2="2"/><line class="cls-3" x1="109" y1="182.27" x2="109" y2="2"/><line class="cls-3" x1="47" y1="182.27" x2="47" y2="2"/><line class="cls-3" x1="479" y1="182.27" x2="479" y2="2"/><line class="cls-3" x1="399" y1="182.27" x2="399" y2="2"/><line class="cls-3" x1="199" y1="182.27" x2="199" y2="2"/><line class="cls-3" x1="79" y1="182.27" x2="79" y2="2"/><line class="cls-3" x1="341" y1="182.27" x2="341" y2="2"/><line class="cls-3" x1="219" y1="182.27" x2="219" y2="2"/><line class="cls-3" x1="99" y1="182.27" x2="99" y2="2"/><line class="cls-3" x1="41" y1="182.27" x2="41" y2="2"/><line class="cls-3" x1="419" y1="182.27" x2="419" y2="2"/><line class="cls-3" x1="389" y1="182.27" x2="389" y2="2"/><line class="cls-3" x1="187" y1="182.27" x2="187" y2="2"/><line class="cls-3" x1="69" y1="182.27" x2="69" y2="2"/><line class="cls-3" x1="177" y1="182.27" x2="177" y2="2"/><line class="cls-3" x1="149" y1="182.27" x2="149" y2="2"/><line class="cls-3" x1="141" y1="182.27" x2="141" y2="2"/><line class="cls-3" x1="464" y1="182.27" x2="464" y2="2"/><line class="cls-3" x1="285" y1="182.27" x2="285" y2="2"/><line class="cls-3" x1="317" y1="182.27" x2="317" y2="2"/><line class="cls-3" x1="454" y1="182.27" x2="454" y2="2"/><line class="cls-3" x1="275" y1="182.27" x2="275" y2="2"/><line class="cls-3" x1="308" y1="182.27" x2="308" y2="2"/><line class="cls-3" x1="207" y1="182.27" x2="207" y2="2"/><line class="cls-3" x1="17" y1="182.27" x2="17" y2="2"/><line class="cls-3" x1="377" y1="182.27" x2="377" y2="2"/><line class="cls-3" x1="119" y1="182.27" x2="119" y2="2"/><line class="cls-3" x1="488" y1="182.27" x2="488" y2="2"/><line class="cls-3" x1="409" y1="182.27" x2="409" y2="2"/><line class="cls-3" x1="229" y1="182.27" x2="229" y2="2"/><line class="cls-3" x1="109" y1="182.27" x2="109" y2="2"/><line class="cls-3" x1="47" y1="182.27" x2="47" y2="2"/><line class="cls-3" x1="439" y1="182.27" x2="439" y2="2"/><line class="cls-3" x1="399" y1="182.27" x2="399" y2="2"/><line class="cls-3" x1="127" y1="182.27" x2="127" y2="2"/><line class="cls-3" x1="79" y1="182.27" x2="79" y2="2"/><line class="cls-3" x1="187" y1="182.27" x2="187" y2="2"/><line class="cls-3" x1="159" y1="182.27" x2="159" y2="2"/><line class="cls-3" x1="177" y1="182.27" x2="177" y2="2"/><line class="cls-3" x1="149" y1="182.27" x2="149" y2="2"/><line class="cls-3" x1="474" y1="182.27" x2="474" y2="2"/><line class="cls-3" x1="295" y1="182.27" x2="295" y2="2"/><line class="cls-3" x1="266" y1="182.27" x2="266" y2="2"/><line class="cls-3" x1="464" y1="182.27" x2="464" y2="2"/><line class="cls-3" x1="285" y1="182.27" x2="285" y2="2"/><line class="cls-3" x1="317" y1="182.27" x2="317" y2="2"/><line class="cls-3" x1="247" y1="182.27" x2="247" y2="2"/><line class="cls-3" x1="58" y1="182.27" x2="58" y2="2"/><line class="cls-3" x1="387" y1="182.27" x2="387" y2="2"/><line class="cls-3" x1="9" y1="182.27" x2="9" y2="2"/><line class="cls-3" x1="292" y1="182.27" x2="292" y2="2"/><line class="cls-3" x1="301" y1="182.27" x2="301" y2="2"/><line class="cls-3" x1="428" y1="182.27" x2="428" y2="2"/><line class="cls-3" x1="373" y1="182.27" x2="373" y2="2"/><line class="cls-3" x1="56" y1="182.27" x2="56" y2="2"/><line class="cls-3" x1="494" y1="182.27" x2="494" y2="2"/><line class="cls-3" x1="497" y1="182.27" x2="497" y2="2"/></g></svg>
35574\ No newline at end of file
35575diff --git a/jemalloc/include/jemalloc/internal/activity_callback.h b/jemalloc/include/jemalloc/internal/activity_callback.h
35576deleted file mode 100644
35577index 6c2e84e..0000000
35578--- a/jemalloc/include/jemalloc/internal/activity_callback.h
35579+++ /dev/null
35580@@ -1,23 +0,0 @@
35581-#ifndef JEMALLOC_INTERNAL_ACTIVITY_CALLBACK_H
35582-#define JEMALLOC_INTERNAL_ACTIVITY_CALLBACK_H
35583-
35584-/*
35585- * The callback to be executed "periodically", in response to some amount of
35586- * allocator activity.
35587- *
35588- * This callback need not be computing any sort of peak (although that's the
35589- * intended first use case), but we drive it from the peak counter, so it's
35590- * keeps things tidy to keep it here.
35591- *
35592- * The calls to this thunk get driven by the peak_event module.
35593- */
35594-#define ACTIVITY_CALLBACK_THUNK_INITIALIZER {NULL, NULL}
35595-typedef void (*activity_callback_t)(void *uctx, uint64_t allocated,
35596-    uint64_t deallocated);
35597-typedef struct activity_callback_thunk_s activity_callback_thunk_t;
35598-struct activity_callback_thunk_s {
35599-	activity_callback_t callback;
35600-	void *uctx;
35601-};
35602-
35603-#endif /* JEMALLOC_INTERNAL_ACTIVITY_CALLBACK_H */
35604diff --git a/jemalloc/include/jemalloc/internal/arena_externs.h b/jemalloc/include/jemalloc/internal/arena_externs.h
35605deleted file mode 100644
35606index e6fceaa..0000000
35607--- a/jemalloc/include/jemalloc/internal/arena_externs.h
35608+++ /dev/null
35609@@ -1,121 +0,0 @@
35610-#ifndef JEMALLOC_INTERNAL_ARENA_EXTERNS_H
35611-#define JEMALLOC_INTERNAL_ARENA_EXTERNS_H
35612-
35613-#include "jemalloc/internal/bin.h"
35614-#include "jemalloc/internal/div.h"
35615-#include "jemalloc/internal/extent_dss.h"
35616-#include "jemalloc/internal/hook.h"
35617-#include "jemalloc/internal/pages.h"
35618-#include "jemalloc/internal/stats.h"
35619-
35620-/*
35621- * When the amount of pages to be purged exceeds this amount, deferred purge
35622- * should happen.
35623- */
35624-#define ARENA_DEFERRED_PURGE_NPAGES_THRESHOLD UINT64_C(1024)
35625-
35626-extern ssize_t opt_dirty_decay_ms;
35627-extern ssize_t opt_muzzy_decay_ms;
35628-
35629-extern percpu_arena_mode_t opt_percpu_arena;
35630-extern const char *percpu_arena_mode_names[];
35631-
35632-extern div_info_t arena_binind_div_info[SC_NBINS];
35633-
35634-extern malloc_mutex_t arenas_lock;
35635-extern emap_t arena_emap_global;
35636-
35637-extern size_t opt_oversize_threshold;
35638-extern size_t oversize_threshold;
35639-
35640-/*
35641- * arena_bin_offsets[binind] is the offset of the first bin shard for size class
35642- * binind.
35643- */
35644-extern uint32_t arena_bin_offsets[SC_NBINS];
35645-
35646-void arena_basic_stats_merge(tsdn_t *tsdn, arena_t *arena,
35647-    unsigned *nthreads, const char **dss, ssize_t *dirty_decay_ms,
35648-    ssize_t *muzzy_decay_ms, size_t *nactive, size_t *ndirty, size_t *nmuzzy);
35649-void arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
35650-    const char **dss, ssize_t *dirty_decay_ms, ssize_t *muzzy_decay_ms,
35651-    size_t *nactive, size_t *ndirty, size_t *nmuzzy, arena_stats_t *astats,
35652-    bin_stats_data_t *bstats, arena_stats_large_t *lstats,
35653-    pac_estats_t *estats, hpa_shard_stats_t *hpastats, sec_stats_t *secstats);
35654-void arena_handle_deferred_work(tsdn_t *tsdn, arena_t *arena);
35655-edata_t *arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena,
35656-    size_t usize, size_t alignment, bool zero);
35657-void arena_extent_dalloc_large_prep(tsdn_t *tsdn, arena_t *arena,
35658-    edata_t *edata);
35659-void arena_extent_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena,
35660-    edata_t *edata, size_t oldsize);
35661-void arena_extent_ralloc_large_expand(tsdn_t *tsdn, arena_t *arena,
35662-    edata_t *edata, size_t oldsize);
35663-bool arena_decay_ms_set(tsdn_t *tsdn, arena_t *arena, extent_state_t state,
35664-    ssize_t decay_ms);
35665-ssize_t arena_decay_ms_get(arena_t *arena, extent_state_t state);
35666-void arena_decay(tsdn_t *tsdn, arena_t *arena, bool is_background_thread,
35667-    bool all);
35668-uint64_t arena_time_until_deferred(tsdn_t *tsdn, arena_t *arena);
35669-void arena_do_deferred_work(tsdn_t *tsdn, arena_t *arena);
35670-void arena_reset(tsd_t *tsd, arena_t *arena);
35671-void arena_destroy(tsd_t *tsd, arena_t *arena);
35672-void arena_cache_bin_fill_small(tsdn_t *tsdn, arena_t *arena,
35673-    cache_bin_t *cache_bin, cache_bin_info_t *cache_bin_info, szind_t binind,
35674-    const unsigned nfill);
35675-
35676-void *arena_malloc_hard(tsdn_t *tsdn, arena_t *arena, size_t size,
35677-    szind_t ind, bool zero);
35678-void *arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize,
35679-    size_t alignment, bool zero, tcache_t *tcache);
35680-void arena_prof_promote(tsdn_t *tsdn, void *ptr, size_t usize);
35681-void arena_dalloc_promoted(tsdn_t *tsdn, void *ptr, tcache_t *tcache,
35682-    bool slow_path);
35683-void arena_slab_dalloc(tsdn_t *tsdn, arena_t *arena, edata_t *slab);
35684-
35685-void arena_dalloc_bin_locked_handle_newly_empty(tsdn_t *tsdn, arena_t *arena,
35686-    edata_t *slab, bin_t *bin);
35687-void arena_dalloc_bin_locked_handle_newly_nonempty(tsdn_t *tsdn, arena_t *arena,
35688-    edata_t *slab, bin_t *bin);
35689-void arena_dalloc_small(tsdn_t *tsdn, void *ptr);
35690-bool arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
35691-    size_t extra, bool zero, size_t *newsize);
35692-void *arena_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t oldsize,
35693-    size_t size, size_t alignment, bool zero, tcache_t *tcache,
35694-    hook_ralloc_args_t *hook_args);
35695-dss_prec_t arena_dss_prec_get(arena_t *arena);
35696-ehooks_t *arena_get_ehooks(arena_t *arena);
35697-extent_hooks_t *arena_set_extent_hooks(tsd_t *tsd, arena_t *arena,
35698-    extent_hooks_t *extent_hooks);
35699-bool arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec);
35700-ssize_t arena_dirty_decay_ms_default_get(void);
35701-bool arena_dirty_decay_ms_default_set(ssize_t decay_ms);
35702-ssize_t arena_muzzy_decay_ms_default_get(void);
35703-bool arena_muzzy_decay_ms_default_set(ssize_t decay_ms);
35704-bool arena_retain_grow_limit_get_set(tsd_t *tsd, arena_t *arena,
35705-    size_t *old_limit, size_t *new_limit);
35706-unsigned arena_nthreads_get(arena_t *arena, bool internal);
35707-void arena_nthreads_inc(arena_t *arena, bool internal);
35708-void arena_nthreads_dec(arena_t *arena, bool internal);
35709-arena_t *arena_new(tsdn_t *tsdn, unsigned ind, const arena_config_t *config);
35710-bool arena_init_huge(void);
35711-bool arena_is_huge(unsigned arena_ind);
35712-arena_t *arena_choose_huge(tsd_t *tsd);
35713-bin_t *arena_bin_choose(tsdn_t *tsdn, arena_t *arena, szind_t binind,
35714-    unsigned *binshard);
35715-size_t arena_fill_small_fresh(tsdn_t *tsdn, arena_t *arena, szind_t binind,
35716-    void **ptrs, size_t nfill, bool zero);
35717-bool arena_boot(sc_data_t *sc_data, base_t *base, bool hpa);
35718-void arena_prefork0(tsdn_t *tsdn, arena_t *arena);
35719-void arena_prefork1(tsdn_t *tsdn, arena_t *arena);
35720-void arena_prefork2(tsdn_t *tsdn, arena_t *arena);
35721-void arena_prefork3(tsdn_t *tsdn, arena_t *arena);
35722-void arena_prefork4(tsdn_t *tsdn, arena_t *arena);
35723-void arena_prefork5(tsdn_t *tsdn, arena_t *arena);
35724-void arena_prefork6(tsdn_t *tsdn, arena_t *arena);
35725-void arena_prefork7(tsdn_t *tsdn, arena_t *arena);
35726-void arena_prefork8(tsdn_t *tsdn, arena_t *arena);
35727-void arena_postfork_parent(tsdn_t *tsdn, arena_t *arena);
35728-void arena_postfork_child(tsdn_t *tsdn, arena_t *arena);
35729-
35730-#endif /* JEMALLOC_INTERNAL_ARENA_EXTERNS_H */
35731diff --git a/jemalloc/include/jemalloc/internal/arena_inlines_a.h b/jemalloc/include/jemalloc/internal/arena_inlines_a.h
35732deleted file mode 100644
35733index 8568358..0000000
35734--- a/jemalloc/include/jemalloc/internal/arena_inlines_a.h
35735+++ /dev/null
35736@@ -1,24 +0,0 @@
35737-#ifndef JEMALLOC_INTERNAL_ARENA_INLINES_A_H
35738-#define JEMALLOC_INTERNAL_ARENA_INLINES_A_H
35739-
35740-static inline unsigned
35741-arena_ind_get(const arena_t *arena) {
35742-	return arena->ind;
35743-}
35744-
35745-static inline void
35746-arena_internal_add(arena_t *arena, size_t size) {
35747-	atomic_fetch_add_zu(&arena->stats.internal, size, ATOMIC_RELAXED);
35748-}
35749-
35750-static inline void
35751-arena_internal_sub(arena_t *arena, size_t size) {
35752-	atomic_fetch_sub_zu(&arena->stats.internal, size, ATOMIC_RELAXED);
35753-}
35754-
35755-static inline size_t
35756-arena_internal_get(arena_t *arena) {
35757-	return atomic_load_zu(&arena->stats.internal, ATOMIC_RELAXED);
35758-}
35759-
35760-#endif /* JEMALLOC_INTERNAL_ARENA_INLINES_A_H */
35761diff --git a/jemalloc/include/jemalloc/internal/arena_inlines_b.h b/jemalloc/include/jemalloc/internal/arena_inlines_b.h
35762deleted file mode 100644
35763index fa81537..0000000
35764--- a/jemalloc/include/jemalloc/internal/arena_inlines_b.h
35765+++ /dev/null
35766@@ -1,550 +0,0 @@
35767-#ifndef JEMALLOC_INTERNAL_ARENA_INLINES_B_H
35768-#define JEMALLOC_INTERNAL_ARENA_INLINES_B_H
35769-
35770-#include "jemalloc/internal/div.h"
35771-#include "jemalloc/internal/emap.h"
35772-#include "jemalloc/internal/jemalloc_internal_types.h"
35773-#include "jemalloc/internal/mutex.h"
35774-#include "jemalloc/internal/rtree.h"
35775-#include "jemalloc/internal/safety_check.h"
35776-#include "jemalloc/internal/sc.h"
35777-#include "jemalloc/internal/sz.h"
35778-#include "jemalloc/internal/ticker.h"
35779-
35780-static inline arena_t *
35781-arena_get_from_edata(edata_t *edata) {
35782-	return (arena_t *)atomic_load_p(&arenas[edata_arena_ind_get(edata)],
35783-	    ATOMIC_RELAXED);
35784-}
35785-
35786-JEMALLOC_ALWAYS_INLINE arena_t *
35787-arena_choose_maybe_huge(tsd_t *tsd, arena_t *arena, size_t size) {
35788-	if (arena != NULL) {
35789-		return arena;
35790-	}
35791-
35792-	/*
35793-	 * For huge allocations, use the dedicated huge arena if both are true:
35794-	 * 1) is using auto arena selection (i.e. arena == NULL), and 2) the
35795-	 * thread is not assigned to a manual arena.
35796-	 */
35797-	if (unlikely(size >= oversize_threshold)) {
35798-		arena_t *tsd_arena = tsd_arena_get(tsd);
35799-		if (tsd_arena == NULL || arena_is_auto(tsd_arena)) {
35800-			return arena_choose_huge(tsd);
35801-		}
35802-	}
35803-
35804-	return arena_choose(tsd, NULL);
35805-}
35806-
35807-JEMALLOC_ALWAYS_INLINE void
35808-arena_prof_info_get(tsd_t *tsd, const void *ptr, emap_alloc_ctx_t *alloc_ctx,
35809-    prof_info_t *prof_info, bool reset_recent) {
35810-	cassert(config_prof);
35811-	assert(ptr != NULL);
35812-	assert(prof_info != NULL);
35813-
35814-	edata_t *edata = NULL;
35815-	bool is_slab;
35816-
35817-	/* Static check. */
35818-	if (alloc_ctx == NULL) {
35819-		edata = emap_edata_lookup(tsd_tsdn(tsd), &arena_emap_global,
35820-		    ptr);
35821-		is_slab = edata_slab_get(edata);
35822-	} else if (unlikely(!(is_slab = alloc_ctx->slab))) {
35823-		edata = emap_edata_lookup(tsd_tsdn(tsd), &arena_emap_global,
35824-		    ptr);
35825-	}
35826-
35827-	if (unlikely(!is_slab)) {
35828-		/* edata must have been initialized at this point. */
35829-		assert(edata != NULL);
35830-		large_prof_info_get(tsd, edata, prof_info, reset_recent);
35831-	} else {
35832-		prof_info->alloc_tctx = (prof_tctx_t *)(uintptr_t)1U;
35833-		/*
35834-		 * No need to set other fields in prof_info; they will never be
35835-		 * accessed if (uintptr_t)alloc_tctx == (uintptr_t)1U.
35836-		 */
35837-	}
35838-}
35839-
35840-JEMALLOC_ALWAYS_INLINE void
35841-arena_prof_tctx_reset(tsd_t *tsd, const void *ptr,
35842-    emap_alloc_ctx_t *alloc_ctx) {
35843-	cassert(config_prof);
35844-	assert(ptr != NULL);
35845-
35846-	/* Static check. */
35847-	if (alloc_ctx == NULL) {
35848-		edata_t *edata = emap_edata_lookup(tsd_tsdn(tsd),
35849-		    &arena_emap_global, ptr);
35850-		if (unlikely(!edata_slab_get(edata))) {
35851-			large_prof_tctx_reset(edata);
35852-		}
35853-	} else {
35854-		if (unlikely(!alloc_ctx->slab)) {
35855-			edata_t *edata = emap_edata_lookup(tsd_tsdn(tsd),
35856-			    &arena_emap_global, ptr);
35857-			large_prof_tctx_reset(edata);
35858-		}
35859-	}
35860-}
35861-
35862-JEMALLOC_ALWAYS_INLINE void
35863-arena_prof_tctx_reset_sampled(tsd_t *tsd, const void *ptr) {
35864-	cassert(config_prof);
35865-	assert(ptr != NULL);
35866-
35867-	edata_t *edata = emap_edata_lookup(tsd_tsdn(tsd), &arena_emap_global,
35868-	    ptr);
35869-	assert(!edata_slab_get(edata));
35870-
35871-	large_prof_tctx_reset(edata);
35872-}
35873-
35874-JEMALLOC_ALWAYS_INLINE void
35875-arena_prof_info_set(tsd_t *tsd, edata_t *edata, prof_tctx_t *tctx,
35876-    size_t size) {
35877-	cassert(config_prof);
35878-
35879-	assert(!edata_slab_get(edata));
35880-	large_prof_info_set(edata, tctx, size);
35881-}
35882-
35883-JEMALLOC_ALWAYS_INLINE void
35884-arena_decay_ticks(tsdn_t *tsdn, arena_t *arena, unsigned nticks) {
35885-	if (unlikely(tsdn_null(tsdn))) {
35886-		return;
35887-	}
35888-	tsd_t *tsd = tsdn_tsd(tsdn);
35889-	/*
35890-	 * We use the ticker_geom_t to avoid having per-arena state in the tsd.
35891-	 * Instead of having a countdown-until-decay timer running for every
35892-	 * arena in every thread, we flip a coin once per tick, whose
35893-	 * probability of coming up heads is 1/nticks; this is effectively the
35894-	 * operation of the ticker_geom_t.  Each arena has the same chance of a
35895-	 * coinflip coming up heads (1/ARENA_DECAY_NTICKS_PER_UPDATE), so we can
35896-	 * use a single ticker for all of them.
35897-	 */
35898-	ticker_geom_t *decay_ticker = tsd_arena_decay_tickerp_get(tsd);
35899-	uint64_t *prng_state = tsd_prng_statep_get(tsd);
35900-	if (unlikely(ticker_geom_ticks(decay_ticker, prng_state, nticks))) {
35901-		arena_decay(tsdn, arena, false, false);
35902-	}
35903-}
35904-
35905-JEMALLOC_ALWAYS_INLINE void
35906-arena_decay_tick(tsdn_t *tsdn, arena_t *arena) {
35907-	arena_decay_ticks(tsdn, arena, 1);
35908-}
35909-
35910-JEMALLOC_ALWAYS_INLINE void *
35911-arena_malloc(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind, bool zero,
35912-    tcache_t *tcache, bool slow_path) {
35913-	assert(!tsdn_null(tsdn) || tcache == NULL);
35914-
35915-	if (likely(tcache != NULL)) {
35916-		if (likely(size <= SC_SMALL_MAXCLASS)) {
35917-			return tcache_alloc_small(tsdn_tsd(tsdn), arena,
35918-			    tcache, size, ind, zero, slow_path);
35919-		}
35920-		if (likely(size <= tcache_maxclass)) {
35921-			return tcache_alloc_large(tsdn_tsd(tsdn), arena,
35922-			    tcache, size, ind, zero, slow_path);
35923-		}
35924-		/* (size > tcache_maxclass) case falls through. */
35925-		assert(size > tcache_maxclass);
35926-	}
35927-
35928-	return arena_malloc_hard(tsdn, arena, size, ind, zero);
35929-}
35930-
35931-JEMALLOC_ALWAYS_INLINE arena_t *
35932-arena_aalloc(tsdn_t *tsdn, const void *ptr) {
35933-	edata_t *edata = emap_edata_lookup(tsdn, &arena_emap_global, ptr);
35934-	unsigned arena_ind = edata_arena_ind_get(edata);
35935-	return (arena_t *)atomic_load_p(&arenas[arena_ind], ATOMIC_RELAXED);
35936-}
35937-
35938-JEMALLOC_ALWAYS_INLINE size_t
35939-arena_salloc(tsdn_t *tsdn, const void *ptr) {
35940-	assert(ptr != NULL);
35941-	emap_alloc_ctx_t alloc_ctx;
35942-	emap_alloc_ctx_lookup(tsdn, &arena_emap_global, ptr, &alloc_ctx);
35943-	assert(alloc_ctx.szind != SC_NSIZES);
35944-
35945-	return sz_index2size(alloc_ctx.szind);
35946-}
35947-
35948-JEMALLOC_ALWAYS_INLINE size_t
35949-arena_vsalloc(tsdn_t *tsdn, const void *ptr) {
35950-	/*
35951-	 * Return 0 if ptr is not within an extent managed by jemalloc.  This
35952-	 * function has two extra costs relative to isalloc():
35953-	 * - The rtree calls cannot claim to be dependent lookups, which induces
35954-	 *   rtree lookup load dependencies.
35955-	 * - The lookup may fail, so there is an extra branch to check for
35956-	 *   failure.
35957-	 */
35958-
35959-	emap_full_alloc_ctx_t full_alloc_ctx;
35960-	bool missing = emap_full_alloc_ctx_try_lookup(tsdn, &arena_emap_global,
35961-	    ptr, &full_alloc_ctx);
35962-	if (missing) {
35963-		return 0;
35964-	}
35965-
35966-	if (full_alloc_ctx.edata == NULL) {
35967-		return 0;
35968-	}
35969-	assert(edata_state_get(full_alloc_ctx.edata) == extent_state_active);
35970-	/* Only slab members should be looked up via interior pointers. */
35971-	assert(edata_addr_get(full_alloc_ctx.edata) == ptr
35972-	    || edata_slab_get(full_alloc_ctx.edata));
35973-
35974-	assert(full_alloc_ctx.szind != SC_NSIZES);
35975-
35976-	return sz_index2size(full_alloc_ctx.szind);
35977-}
35978-
35979-JEMALLOC_ALWAYS_INLINE bool
35980-large_dalloc_safety_checks(edata_t *edata, void *ptr, szind_t szind) {
35981-	if (!config_opt_safety_checks) {
35982-		return false;
35983-	}
35984-
35985-	/*
35986-	 * Eagerly detect double free and sized dealloc bugs for large sizes.
35987-	 * The cost is low enough (as edata will be accessed anyway) to be
35988-	 * enabled all the time.
35989-	 */
35990-	if (unlikely(edata == NULL ||
35991-	    edata_state_get(edata) != extent_state_active)) {
35992-		safety_check_fail("Invalid deallocation detected: "
35993-		    "pages being freed (%p) not currently active, "
35994-		    "possibly caused by double free bugs.",
35995-		    (uintptr_t)edata_addr_get(edata));
35996-		return true;
35997-	}
35998-	size_t input_size = sz_index2size(szind);
35999-	if (unlikely(input_size != edata_usize_get(edata))) {
36000-		safety_check_fail_sized_dealloc(/* current_dealloc */ true, ptr,
36001-		    /* true_size */ edata_usize_get(edata), input_size);
36002-		return true;
36003-	}
36004-
36005-	return false;
36006-}
36007-
36008-static inline void
36009-arena_dalloc_large_no_tcache(tsdn_t *tsdn, void *ptr, szind_t szind) {
36010-	if (config_prof && unlikely(szind < SC_NBINS)) {
36011-		arena_dalloc_promoted(tsdn, ptr, NULL, true);
36012-	} else {
36013-		edata_t *edata = emap_edata_lookup(tsdn, &arena_emap_global,
36014-		    ptr);
36015-		if (large_dalloc_safety_checks(edata, ptr, szind)) {
36016-			/* See the comment in isfree. */
36017-			return;
36018-		}
36019-		large_dalloc(tsdn, edata);
36020-	}
36021-}
36022-
36023-static inline void
36024-arena_dalloc_no_tcache(tsdn_t *tsdn, void *ptr) {
36025-	assert(ptr != NULL);
36026-
36027-	emap_alloc_ctx_t alloc_ctx;
36028-	emap_alloc_ctx_lookup(tsdn, &arena_emap_global, ptr, &alloc_ctx);
36029-
36030-	if (config_debug) {
36031-		edata_t *edata = emap_edata_lookup(tsdn, &arena_emap_global,
36032-		    ptr);
36033-		assert(alloc_ctx.szind == edata_szind_get(edata));
36034-		assert(alloc_ctx.szind < SC_NSIZES);
36035-		assert(alloc_ctx.slab == edata_slab_get(edata));
36036-	}
36037-
36038-	if (likely(alloc_ctx.slab)) {
36039-		/* Small allocation. */
36040-		arena_dalloc_small(tsdn, ptr);
36041-	} else {
36042-		arena_dalloc_large_no_tcache(tsdn, ptr, alloc_ctx.szind);
36043-	}
36044-}
36045-
36046-JEMALLOC_ALWAYS_INLINE void
36047-arena_dalloc_large(tsdn_t *tsdn, void *ptr, tcache_t *tcache, szind_t szind,
36048-    bool slow_path) {
36049-	if (szind < nhbins) {
36050-		if (config_prof && unlikely(szind < SC_NBINS)) {
36051-			arena_dalloc_promoted(tsdn, ptr, tcache, slow_path);
36052-		} else {
36053-			tcache_dalloc_large(tsdn_tsd(tsdn), tcache, ptr, szind,
36054-			    slow_path);
36055-		}
36056-	} else {
36057-		edata_t *edata = emap_edata_lookup(tsdn, &arena_emap_global,
36058-		    ptr);
36059-		if (large_dalloc_safety_checks(edata, ptr, szind)) {
36060-			/* See the comment in isfree. */
36061-			return;
36062-		}
36063-		large_dalloc(tsdn, edata);
36064-	}
36065-}
36066-
36067-JEMALLOC_ALWAYS_INLINE void
36068-arena_dalloc(tsdn_t *tsdn, void *ptr, tcache_t *tcache,
36069-    emap_alloc_ctx_t *caller_alloc_ctx, bool slow_path) {
36070-	assert(!tsdn_null(tsdn) || tcache == NULL);
36071-	assert(ptr != NULL);
36072-
36073-	if (unlikely(tcache == NULL)) {
36074-		arena_dalloc_no_tcache(tsdn, ptr);
36075-		return;
36076-	}
36077-
36078-	emap_alloc_ctx_t alloc_ctx;
36079-	if (caller_alloc_ctx != NULL) {
36080-		alloc_ctx = *caller_alloc_ctx;
36081-	} else {
36082-		util_assume(!tsdn_null(tsdn));
36083-		emap_alloc_ctx_lookup(tsdn, &arena_emap_global, ptr,
36084-		    &alloc_ctx);
36085-	}
36086-
36087-	if (config_debug) {
36088-		edata_t *edata = emap_edata_lookup(tsdn, &arena_emap_global,
36089-		    ptr);
36090-		assert(alloc_ctx.szind == edata_szind_get(edata));
36091-		assert(alloc_ctx.szind < SC_NSIZES);
36092-		assert(alloc_ctx.slab == edata_slab_get(edata));
36093-	}
36094-
36095-	if (likely(alloc_ctx.slab)) {
36096-		/* Small allocation. */
36097-		tcache_dalloc_small(tsdn_tsd(tsdn), tcache, ptr,
36098-		    alloc_ctx.szind, slow_path);
36099-	} else {
36100-		arena_dalloc_large(tsdn, ptr, tcache, alloc_ctx.szind,
36101-		    slow_path);
36102-	}
36103-}
36104-
36105-static inline void
36106-arena_sdalloc_no_tcache(tsdn_t *tsdn, void *ptr, size_t size) {
36107-	assert(ptr != NULL);
36108-	assert(size <= SC_LARGE_MAXCLASS);
36109-
36110-	emap_alloc_ctx_t alloc_ctx;
36111-	if (!config_prof || !opt_prof) {
36112-		/*
36113-		 * There is no risk of being confused by a promoted sampled
36114-		 * object, so base szind and slab on the given size.
36115-		 */
36116-		alloc_ctx.szind = sz_size2index(size);
36117-		alloc_ctx.slab = (alloc_ctx.szind < SC_NBINS);
36118-	}
36119-
36120-	if ((config_prof && opt_prof) || config_debug) {
36121-		emap_alloc_ctx_lookup(tsdn, &arena_emap_global, ptr,
36122-		    &alloc_ctx);
36123-
36124-		assert(alloc_ctx.szind == sz_size2index(size));
36125-		assert((config_prof && opt_prof)
36126-		    || alloc_ctx.slab == (alloc_ctx.szind < SC_NBINS));
36127-
36128-		if (config_debug) {
36129-			edata_t *edata = emap_edata_lookup(tsdn,
36130-			    &arena_emap_global, ptr);
36131-			assert(alloc_ctx.szind == edata_szind_get(edata));
36132-			assert(alloc_ctx.slab == edata_slab_get(edata));
36133-		}
36134-	}
36135-
36136-	if (likely(alloc_ctx.slab)) {
36137-		/* Small allocation. */
36138-		arena_dalloc_small(tsdn, ptr);
36139-	} else {
36140-		arena_dalloc_large_no_tcache(tsdn, ptr, alloc_ctx.szind);
36141-	}
36142-}
36143-
36144-JEMALLOC_ALWAYS_INLINE void
36145-arena_sdalloc(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache,
36146-    emap_alloc_ctx_t *caller_alloc_ctx, bool slow_path) {
36147-	assert(!tsdn_null(tsdn) || tcache == NULL);
36148-	assert(ptr != NULL);
36149-	assert(size <= SC_LARGE_MAXCLASS);
36150-
36151-	if (unlikely(tcache == NULL)) {
36152-		arena_sdalloc_no_tcache(tsdn, ptr, size);
36153-		return;
36154-	}
36155-
36156-	emap_alloc_ctx_t alloc_ctx;
36157-	if (config_prof && opt_prof) {
36158-		if (caller_alloc_ctx == NULL) {
36159-			/* Uncommon case and should be a static check. */
36160-			emap_alloc_ctx_lookup(tsdn, &arena_emap_global, ptr,
36161-			    &alloc_ctx);
36162-			assert(alloc_ctx.szind == sz_size2index(size));
36163-		} else {
36164-			alloc_ctx = *caller_alloc_ctx;
36165-		}
36166-	} else {
36167-		/*
36168-		 * There is no risk of being confused by a promoted sampled
36169-		 * object, so base szind and slab on the given size.
36170-		 */
36171-		alloc_ctx.szind = sz_size2index(size);
36172-		alloc_ctx.slab = (alloc_ctx.szind < SC_NBINS);
36173-	}
36174-
36175-	if (config_debug) {
36176-		edata_t *edata = emap_edata_lookup(tsdn, &arena_emap_global,
36177-		    ptr);
36178-		assert(alloc_ctx.szind == edata_szind_get(edata));
36179-		assert(alloc_ctx.slab == edata_slab_get(edata));
36180-	}
36181-
36182-	if (likely(alloc_ctx.slab)) {
36183-		/* Small allocation. */
36184-		tcache_dalloc_small(tsdn_tsd(tsdn), tcache, ptr,
36185-		    alloc_ctx.szind, slow_path);
36186-	} else {
36187-		arena_dalloc_large(tsdn, ptr, tcache, alloc_ctx.szind,
36188-		    slow_path);
36189-	}
36190-}
36191-
36192-static inline void
36193-arena_cache_oblivious_randomize(tsdn_t *tsdn, arena_t *arena, edata_t *edata,
36194-    size_t alignment) {
36195-	assert(edata_base_get(edata) == edata_addr_get(edata));
36196-
36197-	if (alignment < PAGE) {
36198-		unsigned lg_range = LG_PAGE -
36199-		    lg_floor(CACHELINE_CEILING(alignment));
36200-		size_t r;
36201-		if (!tsdn_null(tsdn)) {
36202-			tsd_t *tsd = tsdn_tsd(tsdn);
36203-			r = (size_t)prng_lg_range_u64(
36204-			    tsd_prng_statep_get(tsd), lg_range);
36205-		} else {
36206-			uint64_t stack_value = (uint64_t)(uintptr_t)&r;
36207-			r = (size_t)prng_lg_range_u64(&stack_value, lg_range);
36208-		}
36209-		uintptr_t random_offset = ((uintptr_t)r) << (LG_PAGE -
36210-		    lg_range);
36211-		edata->e_addr = (void *)((uintptr_t)edata->e_addr +
36212-		    random_offset);
36213-		assert(ALIGNMENT_ADDR2BASE(edata->e_addr, alignment) ==
36214-		    edata->e_addr);
36215-	}
36216-}
36217-
36218-/*
36219- * The dalloc bin info contains just the information that the common paths need
36220- * during tcache flushes.  By force-inlining these paths, and using local copies
36221- * of data (so that the compiler knows it's constant), we avoid a whole bunch of
36222- * redundant loads and stores by leaving this information in registers.
36223- */
36224-typedef struct arena_dalloc_bin_locked_info_s arena_dalloc_bin_locked_info_t;
36225-struct arena_dalloc_bin_locked_info_s {
36226-	div_info_t div_info;
36227-	uint32_t nregs;
36228-	uint64_t ndalloc;
36229-};
36230-
36231-JEMALLOC_ALWAYS_INLINE size_t
36232-arena_slab_regind(arena_dalloc_bin_locked_info_t *info, szind_t binind,
36233-    edata_t *slab, const void *ptr) {
36234-	size_t diff, regind;
36235-
36236-	/* Freeing a pointer outside the slab can cause assertion failure. */
36237-	assert((uintptr_t)ptr >= (uintptr_t)edata_addr_get(slab));
36238-	assert((uintptr_t)ptr < (uintptr_t)edata_past_get(slab));
36239-	/* Freeing an interior pointer can cause assertion failure. */
36240-	assert(((uintptr_t)ptr - (uintptr_t)edata_addr_get(slab)) %
36241-	    (uintptr_t)bin_infos[binind].reg_size == 0);
36242-
36243-	diff = (size_t)((uintptr_t)ptr - (uintptr_t)edata_addr_get(slab));
36244-
36245-	/* Avoid doing division with a variable divisor. */
36246-	regind = div_compute(&info->div_info, diff);
36247-
36248-	assert(regind < bin_infos[binind].nregs);
36249-
36250-	return regind;
36251-}
36252-
36253-JEMALLOC_ALWAYS_INLINE void
36254-arena_dalloc_bin_locked_begin(arena_dalloc_bin_locked_info_t *info,
36255-    szind_t binind) {
36256-	info->div_info = arena_binind_div_info[binind];
36257-	info->nregs = bin_infos[binind].nregs;
36258-	info->ndalloc = 0;
36259-}
36260-
36261-/*
36262- * Does the deallocation work associated with freeing a single pointer (a
36263- * "step") in between a arena_dalloc_bin_locked begin and end call.
36264- *
36265- * Returns true if arena_slab_dalloc must be called on slab.  Doesn't do
36266- * stats updates, which happen during finish (this lets running counts get left
36267- * in a register).
36268- */
36269-JEMALLOC_ALWAYS_INLINE bool
36270-arena_dalloc_bin_locked_step(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
36271-    arena_dalloc_bin_locked_info_t *info, szind_t binind, edata_t *slab,
36272-    void *ptr) {
36273-	const bin_info_t *bin_info = &bin_infos[binind];
36274-	size_t regind = arena_slab_regind(info, binind, slab, ptr);
36275-	slab_data_t *slab_data = edata_slab_data_get(slab);
36276-
36277-	assert(edata_nfree_get(slab) < bin_info->nregs);
36278-	/* Freeing an unallocated pointer can cause assertion failure. */
36279-	assert(bitmap_get(slab_data->bitmap, &bin_info->bitmap_info, regind));
36280-
36281-	bitmap_unset(slab_data->bitmap, &bin_info->bitmap_info, regind);
36282-	edata_nfree_inc(slab);
36283-
36284-	if (config_stats) {
36285-		info->ndalloc++;
36286-	}
36287-
36288-	unsigned nfree = edata_nfree_get(slab);
36289-	if (nfree == bin_info->nregs) {
36290-		arena_dalloc_bin_locked_handle_newly_empty(tsdn, arena, slab,
36291-		    bin);
36292-		return true;
36293-	} else if (nfree == 1 && slab != bin->slabcur) {
36294-		arena_dalloc_bin_locked_handle_newly_nonempty(tsdn, arena, slab,
36295-		    bin);
36296-	}
36297-	return false;
36298-}
36299-
36300-JEMALLOC_ALWAYS_INLINE void
36301-arena_dalloc_bin_locked_finish(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
36302-    arena_dalloc_bin_locked_info_t *info) {
36303-	if (config_stats) {
36304-		bin->stats.ndalloc += info->ndalloc;
36305-		assert(bin->stats.curregs >= (size_t)info->ndalloc);
36306-		bin->stats.curregs -= (size_t)info->ndalloc;
36307-	}
36308-}
36309-
36310-static inline bin_t *
36311-arena_get_bin(arena_t *arena, szind_t binind, unsigned binshard) {
36312-	bin_t *shard0 = (bin_t *)((uintptr_t)arena + arena_bin_offsets[binind]);
36313-	return shard0 + binshard;
36314-}
36315-
36316-#endif /* JEMALLOC_INTERNAL_ARENA_INLINES_B_H */
36317diff --git a/jemalloc/include/jemalloc/internal/arena_stats.h b/jemalloc/include/jemalloc/internal/arena_stats.h
36318deleted file mode 100644
36319index 15f1d34..0000000
36320--- a/jemalloc/include/jemalloc/internal/arena_stats.h
36321+++ /dev/null
36322@@ -1,114 +0,0 @@
36323-#ifndef JEMALLOC_INTERNAL_ARENA_STATS_H
36324-#define JEMALLOC_INTERNAL_ARENA_STATS_H
36325-
36326-#include "jemalloc/internal/atomic.h"
36327-#include "jemalloc/internal/lockedint.h"
36328-#include "jemalloc/internal/mutex.h"
36329-#include "jemalloc/internal/mutex_prof.h"
36330-#include "jemalloc/internal/pa.h"
36331-#include "jemalloc/internal/sc.h"
36332-
36333-JEMALLOC_DIAGNOSTIC_DISABLE_SPURIOUS
36334-
36335-typedef struct arena_stats_large_s arena_stats_large_t;
36336-struct arena_stats_large_s {
36337-	/*
36338-	 * Total number of allocation/deallocation requests served directly by
36339-	 * the arena.
36340-	 */
36341-	locked_u64_t	nmalloc;
36342-	locked_u64_t	ndalloc;
36343-
36344-	/*
36345-	 * Number of allocation requests that correspond to this size class.
36346-	 * This includes requests served by tcache, though tcache only
36347-	 * periodically merges into this counter.
36348-	 */
36349-	locked_u64_t	nrequests; /* Partially derived. */
36350-	/*
36351-	 * Number of tcache fills / flushes for large (similarly, periodically
36352-	 * merged).  Note that there is no large tcache batch-fill currently
36353-	 * (i.e. only fill 1 at a time); however flush may be batched.
36354-	 */
36355-	locked_u64_t	nfills; /* Partially derived. */
36356-	locked_u64_t	nflushes; /* Partially derived. */
36357-
36358-	/* Current number of allocations of this size class. */
36359-	size_t		curlextents; /* Derived. */
36360-};
36361-
36362-/*
36363- * Arena stats.  Note that fields marked "derived" are not directly maintained
36364- * within the arena code; rather their values are derived during stats merge
36365- * requests.
36366- */
36367-typedef struct arena_stats_s arena_stats_t;
36368-struct arena_stats_s {
36369-	LOCKEDINT_MTX_DECLARE(mtx)
36370-
36371-	/*
36372-	 * resident includes the base stats -- that's why it lives here and not
36373-	 * in pa_shard_stats_t.
36374-	 */
36375-	size_t			base; /* Derived. */
36376-	size_t			resident; /* Derived. */
36377-	size_t			metadata_thp; /* Derived. */
36378-	size_t			mapped; /* Derived. */
36379-
36380-	atomic_zu_t		internal;
36381-
36382-	size_t			allocated_large; /* Derived. */
36383-	uint64_t		nmalloc_large; /* Derived. */
36384-	uint64_t		ndalloc_large; /* Derived. */
36385-	uint64_t		nfills_large; /* Derived. */
36386-	uint64_t		nflushes_large; /* Derived. */
36387-	uint64_t		nrequests_large; /* Derived. */
36388-
36389-	/*
36390-	 * The stats logically owned by the pa_shard in the same arena.  This
36391-	 * lives here only because it's convenient for the purposes of the ctl
36392-	 * module -- it only knows about the single arena_stats.
36393-	 */
36394-	pa_shard_stats_t	pa_shard_stats;
36395-
36396-	/* Number of bytes cached in tcache associated with this arena. */
36397-	size_t			tcache_bytes; /* Derived. */
36398-	size_t			tcache_stashed_bytes; /* Derived. */
36399-
36400-	mutex_prof_data_t mutex_prof_data[mutex_prof_num_arena_mutexes];
36401-
36402-	/* One element for each large size class. */
36403-	arena_stats_large_t	lstats[SC_NSIZES - SC_NBINS];
36404-
36405-	/* Arena uptime. */
36406-	nstime_t		uptime;
36407-};
36408-
36409-static inline bool
36410-arena_stats_init(tsdn_t *tsdn, arena_stats_t *arena_stats) {
36411-	if (config_debug) {
36412-		for (size_t i = 0; i < sizeof(arena_stats_t); i++) {
36413-			assert(((char *)arena_stats)[i] == 0);
36414-		}
36415-	}
36416-	if (LOCKEDINT_MTX_INIT(arena_stats->mtx, "arena_stats",
36417-	    WITNESS_RANK_ARENA_STATS, malloc_mutex_rank_exclusive)) {
36418-		return true;
36419-	}
36420-	/* Memory is zeroed, so there is no need to clear stats. */
36421-	return false;
36422-}
36423-
36424-static inline void
36425-arena_stats_large_flush_nrequests_add(tsdn_t *tsdn, arena_stats_t *arena_stats,
36426-    szind_t szind, uint64_t nrequests) {
36427-	LOCKEDINT_MTX_LOCK(tsdn, arena_stats->mtx);
36428-	arena_stats_large_t *lstats = &arena_stats->lstats[szind - SC_NBINS];
36429-	locked_inc_u64(tsdn, LOCKEDINT_MTX(arena_stats->mtx),
36430-	    &lstats->nrequests, nrequests);
36431-	locked_inc_u64(tsdn, LOCKEDINT_MTX(arena_stats->mtx),
36432-	    &lstats->nflushes, 1);
36433-	LOCKEDINT_MTX_UNLOCK(tsdn, arena_stats->mtx);
36434-}
36435-
36436-#endif /* JEMALLOC_INTERNAL_ARENA_STATS_H */
36437diff --git a/jemalloc/include/jemalloc/internal/arena_structs.h b/jemalloc/include/jemalloc/internal/arena_structs.h
36438deleted file mode 100644
36439index e2a5a40..0000000
36440--- a/jemalloc/include/jemalloc/internal/arena_structs.h
36441+++ /dev/null
36442@@ -1,101 +0,0 @@
36443-#ifndef JEMALLOC_INTERNAL_ARENA_STRUCTS_H
36444-#define JEMALLOC_INTERNAL_ARENA_STRUCTS_H
36445-
36446-#include "jemalloc/internal/arena_stats.h"
36447-#include "jemalloc/internal/atomic.h"
36448-#include "jemalloc/internal/bin.h"
36449-#include "jemalloc/internal/bitmap.h"
36450-#include "jemalloc/internal/counter.h"
36451-#include "jemalloc/internal/ecache.h"
36452-#include "jemalloc/internal/edata_cache.h"
36453-#include "jemalloc/internal/extent_dss.h"
36454-#include "jemalloc/internal/jemalloc_internal_types.h"
36455-#include "jemalloc/internal/mutex.h"
36456-#include "jemalloc/internal/nstime.h"
36457-#include "jemalloc/internal/pa.h"
36458-#include "jemalloc/internal/ql.h"
36459-#include "jemalloc/internal/sc.h"
36460-#include "jemalloc/internal/ticker.h"
36461-
36462-struct arena_s {
36463-	/*
36464-	 * Number of threads currently assigned to this arena.  Each thread has
36465-	 * two distinct assignments, one for application-serving allocation, and
36466-	 * the other for internal metadata allocation.  Internal metadata must
36467-	 * not be allocated from arenas explicitly created via the arenas.create
36468-	 * mallctl, because the arena.<i>.reset mallctl indiscriminately
36469-	 * discards all allocations for the affected arena.
36470-	 *
36471-	 *   0: Application allocation.
36472-	 *   1: Internal metadata allocation.
36473-	 *
36474-	 * Synchronization: atomic.
36475-	 */
36476-	atomic_u_t		nthreads[2];
36477-
36478-	/* Next bin shard for binding new threads. Synchronization: atomic. */
36479-	atomic_u_t		binshard_next;
36480-
36481-	/*
36482-	 * When percpu_arena is enabled, to amortize the cost of reading /
36483-	 * updating the current CPU id, track the most recent thread accessing
36484-	 * this arena, and only read CPU if there is a mismatch.
36485-	 */
36486-	tsdn_t		*last_thd;
36487-
36488-	/* Synchronization: internal. */
36489-	arena_stats_t		stats;
36490-
36491-	/*
36492-	 * Lists of tcaches and cache_bin_array_descriptors for extant threads
36493-	 * associated with this arena.  Stats from these are merged
36494-	 * incrementally, and at exit if opt_stats_print is enabled.
36495-	 *
36496-	 * Synchronization: tcache_ql_mtx.
36497-	 */
36498-	ql_head(tcache_slow_t)			tcache_ql;
36499-	ql_head(cache_bin_array_descriptor_t)	cache_bin_array_descriptor_ql;
36500-	malloc_mutex_t				tcache_ql_mtx;
36501-
36502-	/*
36503-	 * Represents a dss_prec_t, but atomically.
36504-	 *
36505-	 * Synchronization: atomic.
36506-	 */
36507-	atomic_u_t		dss_prec;
36508-
36509-	/*
36510-	 * Extant large allocations.
36511-	 *
36512-	 * Synchronization: large_mtx.
36513-	 */
36514-	edata_list_active_t	large;
36515-	/* Synchronizes all large allocation/update/deallocation. */
36516-	malloc_mutex_t		large_mtx;
36517-
36518-	/* The page-level allocator shard this arena uses. */
36519-	pa_shard_t		pa_shard;
36520-
36521-	/*
36522-	 * A cached copy of base->ind.  This can get accessed on hot paths;
36523-	 * looking it up in base requires an extra pointer hop / cache miss.
36524-	 */
36525-	unsigned ind;
36526-
36527-	/*
36528-	 * Base allocator, from which arena metadata are allocated.
36529-	 *
36530-	 * Synchronization: internal.
36531-	 */
36532-	base_t			*base;
36533-	/* Used to determine uptime.  Read-only after initialization. */
36534-	nstime_t		create_time;
36535-
36536-	/*
36537-	 * The arena is allocated alongside its bins; really this is a
36538-	 * dynamically sized array determined by the binshard settings.
36539-	 */
36540-	bin_t			bins[0];
36541-};
36542-
36543-#endif /* JEMALLOC_INTERNAL_ARENA_STRUCTS_H */
36544diff --git a/jemalloc/include/jemalloc/internal/arena_types.h b/jemalloc/include/jemalloc/internal/arena_types.h
36545deleted file mode 100644
36546index d0e1291..0000000
36547--- a/jemalloc/include/jemalloc/internal/arena_types.h
36548+++ /dev/null
36549@@ -1,58 +0,0 @@
36550-#ifndef JEMALLOC_INTERNAL_ARENA_TYPES_H
36551-#define JEMALLOC_INTERNAL_ARENA_TYPES_H
36552-
36553-#include "jemalloc/internal/sc.h"
36554-
36555-/* Default decay times in milliseconds. */
36556-#define DIRTY_DECAY_MS_DEFAULT	ZD(10 * 1000)
36557-#define MUZZY_DECAY_MS_DEFAULT	(0)
36558-/* Number of event ticks between time checks. */
36559-#define ARENA_DECAY_NTICKS_PER_UPDATE	1000
36560-
36561-typedef struct arena_decay_s arena_decay_t;
36562-typedef struct arena_s arena_t;
36563-
36564-typedef enum {
36565-	percpu_arena_mode_names_base   = 0, /* Used for options processing. */
36566-
36567-	/*
36568-	 * *_uninit are used only during bootstrapping, and must correspond
36569-	 * to initialized variant plus percpu_arena_mode_enabled_base.
36570-	 */
36571-	percpu_arena_uninit            = 0,
36572-	per_phycpu_arena_uninit        = 1,
36573-
36574-	/* All non-disabled modes must come after percpu_arena_disabled. */
36575-	percpu_arena_disabled          = 2,
36576-
36577-	percpu_arena_mode_names_limit  = 3, /* Used for options processing. */
36578-	percpu_arena_mode_enabled_base = 3,
36579-
36580-	percpu_arena                   = 3,
36581-	per_phycpu_arena               = 4  /* Hyper threads share arena. */
36582-} percpu_arena_mode_t;
36583-
36584-#define PERCPU_ARENA_ENABLED(m)	((m) >= percpu_arena_mode_enabled_base)
36585-#define PERCPU_ARENA_DEFAULT	percpu_arena_disabled
36586-
36587-/*
36588- * When allocation_size >= oversize_threshold, use the dedicated huge arena
36589- * (unless have explicitly spicified arena index).  0 disables the feature.
36590- */
36591-#define OVERSIZE_THRESHOLD_DEFAULT (8 << 20)
36592-
36593-struct arena_config_s {
36594-	/* extent hooks to be used for the arena */
36595-	extent_hooks_t *extent_hooks;
36596-
36597-	/*
36598-	 * Use extent hooks for metadata (base) allocations when true.
36599-	 */
36600-	bool metadata_use_hooks;
36601-};
36602-
36603-typedef struct arena_config_s arena_config_t;
36604-
36605-extern const arena_config_t arena_config_default;
36606-
36607-#endif /* JEMALLOC_INTERNAL_ARENA_TYPES_H */
36608diff --git a/jemalloc/include/jemalloc/internal/assert.h b/jemalloc/include/jemalloc/internal/assert.h
36609deleted file mode 100644
36610index be4d45b..0000000
36611--- a/jemalloc/include/jemalloc/internal/assert.h
36612+++ /dev/null
36613@@ -1,56 +0,0 @@
36614-#include "jemalloc/internal/malloc_io.h"
36615-#include "jemalloc/internal/util.h"
36616-
36617-/*
36618- * Define a custom assert() in order to reduce the chances of deadlock during
36619- * assertion failure.
36620- */
36621-#ifndef assert
36622-#define assert(e) do {							\
36623-	if (unlikely(config_debug && !(e))) {				\
36624-		malloc_printf(						\
36625-		    "<jemalloc>: %s:%d: Failed assertion: \"%s\"\n",	\
36626-		    __FILE__, __LINE__, #e);				\
36627-		abort();						\
36628-	}								\
36629-} while (0)
36630-#endif
36631-
36632-#ifndef not_reached
36633-#define not_reached() do {						\
36634-	if (config_debug) {						\
36635-		malloc_printf(						\
36636-		    "<jemalloc>: %s:%d: Unreachable code reached\n",	\
36637-		    __FILE__, __LINE__);				\
36638-		abort();						\
36639-	}								\
36640-	unreachable();							\
36641-} while (0)
36642-#endif
36643-
36644-#ifndef not_implemented
36645-#define not_implemented() do {						\
36646-	if (config_debug) {						\
36647-		malloc_printf("<jemalloc>: %s:%d: Not implemented\n",	\
36648-		    __FILE__, __LINE__);				\
36649-		abort();						\
36650-	}								\
36651-} while (0)
36652-#endif
36653-
36654-#ifndef assert_not_implemented
36655-#define assert_not_implemented(e) do {					\
36656-	if (unlikely(config_debug && !(e))) {				\
36657-		not_implemented();					\
36658-	}								\
36659-} while (0)
36660-#endif
36661-
36662-/* Use to assert a particular configuration, e.g., cassert(config_debug). */
36663-#ifndef cassert
36664-#define cassert(c) do {							\
36665-	if (unlikely(!(c))) {						\
36666-		not_reached();						\
36667-	}								\
36668-} while (0)
36669-#endif
36670diff --git a/jemalloc/include/jemalloc/internal/atomic.h b/jemalloc/include/jemalloc/internal/atomic.h
36671deleted file mode 100644
36672index c0f7312..0000000
36673--- a/jemalloc/include/jemalloc/internal/atomic.h
36674+++ /dev/null
36675@@ -1,107 +0,0 @@
36676-#ifndef JEMALLOC_INTERNAL_ATOMIC_H
36677-#define JEMALLOC_INTERNAL_ATOMIC_H
36678-
36679-#define ATOMIC_INLINE JEMALLOC_ALWAYS_INLINE
36680-
36681-#define JEMALLOC_U8_ATOMICS
36682-#if defined(JEMALLOC_GCC_ATOMIC_ATOMICS)
36683-#  include "jemalloc/internal/atomic_gcc_atomic.h"
36684-#  if !defined(JEMALLOC_GCC_U8_ATOMIC_ATOMICS)
36685-#    undef JEMALLOC_U8_ATOMICS
36686-#  endif
36687-#elif defined(JEMALLOC_GCC_SYNC_ATOMICS)
36688-#  include "jemalloc/internal/atomic_gcc_sync.h"
36689-#  if !defined(JEMALLOC_GCC_U8_SYNC_ATOMICS)
36690-#    undef JEMALLOC_U8_ATOMICS
36691-#  endif
36692-#elif defined(_MSC_VER)
36693-#  include "jemalloc/internal/atomic_msvc.h"
36694-#elif defined(JEMALLOC_C11_ATOMICS)
36695-#  include "jemalloc/internal/atomic_c11.h"
36696-#else
36697-#  error "Don't have atomics implemented on this platform."
36698-#endif
36699-
36700-/*
36701- * This header gives more or less a backport of C11 atomics. The user can write
36702- * JEMALLOC_GENERATE_ATOMICS(type, short_type, lg_sizeof_type); to generate
36703- * counterparts of the C11 atomic functions for type, as so:
36704- *   JEMALLOC_GENERATE_ATOMICS(int *, pi, 3);
36705- * and then write things like:
36706- *   int *some_ptr;
36707- *   atomic_pi_t atomic_ptr_to_int;
36708- *   atomic_store_pi(&atomic_ptr_to_int, some_ptr, ATOMIC_RELAXED);
36709- *   int *prev_value = atomic_exchange_pi(&ptr_to_int, NULL, ATOMIC_ACQ_REL);
36710- *   assert(some_ptr == prev_value);
36711- * and expect things to work in the obvious way.
36712- *
36713- * Also included (with naming differences to avoid conflicts with the standard
36714- * library):
36715- *   atomic_fence(atomic_memory_order_t) (mimics C11's atomic_thread_fence).
36716- *   ATOMIC_INIT (mimics C11's ATOMIC_VAR_INIT).
36717- */
36718-
36719-/*
36720- * Pure convenience, so that we don't have to type "atomic_memory_order_"
36721- * quite so often.
36722- */
36723-#define ATOMIC_RELAXED atomic_memory_order_relaxed
36724-#define ATOMIC_ACQUIRE atomic_memory_order_acquire
36725-#define ATOMIC_RELEASE atomic_memory_order_release
36726-#define ATOMIC_ACQ_REL atomic_memory_order_acq_rel
36727-#define ATOMIC_SEQ_CST atomic_memory_order_seq_cst
36728-
36729-/*
36730- * Another convenience -- simple atomic helper functions.
36731- */
36732-#define JEMALLOC_GENERATE_EXPANDED_INT_ATOMICS(type, short_type,	\
36733-    lg_size)								\
36734-    JEMALLOC_GENERATE_INT_ATOMICS(type, short_type, lg_size)		\
36735-    ATOMIC_INLINE void							\
36736-    atomic_load_add_store_##short_type(atomic_##short_type##_t *a,	\
36737-	type inc) {							\
36738-	    type oldval = atomic_load_##short_type(a, ATOMIC_RELAXED);	\
36739-	    type newval = oldval + inc;					\
36740-	    atomic_store_##short_type(a, newval, ATOMIC_RELAXED);	\
36741-	}								\
36742-    ATOMIC_INLINE void							\
36743-    atomic_load_sub_store_##short_type(atomic_##short_type##_t *a,	\
36744-	type inc) {							\
36745-	    type oldval = atomic_load_##short_type(a, ATOMIC_RELAXED);	\
36746-	    type newval = oldval - inc;					\
36747-	    atomic_store_##short_type(a, newval, ATOMIC_RELAXED);	\
36748-	}
36749-
36750-/*
36751- * Not all platforms have 64-bit atomics.  If we do, this #define exposes that
36752- * fact.
36753- */
36754-#if (LG_SIZEOF_PTR == 3 || LG_SIZEOF_INT == 3)
36755-#  define JEMALLOC_ATOMIC_U64
36756-#endif
36757-
36758-JEMALLOC_GENERATE_ATOMICS(void *, p, LG_SIZEOF_PTR)
36759-
36760-/*
36761- * There's no actual guarantee that sizeof(bool) == 1, but it's true on the only
36762- * platform that actually needs to know the size, MSVC.
36763- */
36764-JEMALLOC_GENERATE_ATOMICS(bool, b, 0)
36765-
36766-JEMALLOC_GENERATE_EXPANDED_INT_ATOMICS(unsigned, u, LG_SIZEOF_INT)
36767-
36768-JEMALLOC_GENERATE_EXPANDED_INT_ATOMICS(size_t, zu, LG_SIZEOF_PTR)
36769-
36770-JEMALLOC_GENERATE_EXPANDED_INT_ATOMICS(ssize_t, zd, LG_SIZEOF_PTR)
36771-
36772-JEMALLOC_GENERATE_EXPANDED_INT_ATOMICS(uint8_t, u8, 0)
36773-
36774-JEMALLOC_GENERATE_EXPANDED_INT_ATOMICS(uint32_t, u32, 2)
36775-
36776-#ifdef JEMALLOC_ATOMIC_U64
36777-JEMALLOC_GENERATE_EXPANDED_INT_ATOMICS(uint64_t, u64, 3)
36778-#endif
36779-
36780-#undef ATOMIC_INLINE
36781-
36782-#endif /* JEMALLOC_INTERNAL_ATOMIC_H */
36783diff --git a/jemalloc/include/jemalloc/internal/atomic_c11.h b/jemalloc/include/jemalloc/internal/atomic_c11.h
36784deleted file mode 100644
36785index a5f9313..0000000
36786--- a/jemalloc/include/jemalloc/internal/atomic_c11.h
36787+++ /dev/null
36788@@ -1,97 +0,0 @@
36789-#ifndef JEMALLOC_INTERNAL_ATOMIC_C11_H
36790-#define JEMALLOC_INTERNAL_ATOMIC_C11_H
36791-
36792-#include <stdatomic.h>
36793-
36794-#define ATOMIC_INIT(...) ATOMIC_VAR_INIT(__VA_ARGS__)
36795-
36796-#define atomic_memory_order_t memory_order
36797-#define atomic_memory_order_relaxed memory_order_relaxed
36798-#define atomic_memory_order_acquire memory_order_acquire
36799-#define atomic_memory_order_release memory_order_release
36800-#define atomic_memory_order_acq_rel memory_order_acq_rel
36801-#define atomic_memory_order_seq_cst memory_order_seq_cst
36802-
36803-#define atomic_fence atomic_thread_fence
36804-
36805-#define JEMALLOC_GENERATE_ATOMICS(type, short_type,			\
36806-    /* unused */ lg_size)						\
36807-typedef _Atomic(type) atomic_##short_type##_t;				\
36808-									\
36809-ATOMIC_INLINE type							\
36810-atomic_load_##short_type(const atomic_##short_type##_t *a,		\
36811-    atomic_memory_order_t mo) {						\
36812-	/*								\
36813-	 * A strict interpretation of the C standard prevents		\
36814-	 * atomic_load from taking a const argument, but it's		\
36815-	 * convenient for our purposes. This cast is a workaround.	\
36816-	 */								\
36817-	atomic_##short_type##_t* a_nonconst =				\
36818-	    (atomic_##short_type##_t*)a;				\
36819-	return atomic_load_explicit(a_nonconst, mo);			\
36820-}									\
36821-									\
36822-ATOMIC_INLINE void							\
36823-atomic_store_##short_type(atomic_##short_type##_t *a,			\
36824-    type val, atomic_memory_order_t mo) {				\
36825-	atomic_store_explicit(a, val, mo);				\
36826-}									\
36827-									\
36828-ATOMIC_INLINE type							\
36829-atomic_exchange_##short_type(atomic_##short_type##_t *a, type val,	\
36830-    atomic_memory_order_t mo) {						\
36831-	return atomic_exchange_explicit(a, val, mo);			\
36832-}									\
36833-									\
36834-ATOMIC_INLINE bool							\
36835-atomic_compare_exchange_weak_##short_type(atomic_##short_type##_t *a,	\
36836-    type *expected, type desired, atomic_memory_order_t success_mo,	\
36837-    atomic_memory_order_t failure_mo) {					\
36838-	return atomic_compare_exchange_weak_explicit(a, expected,	\
36839-	    desired, success_mo, failure_mo);				\
36840-}									\
36841-									\
36842-ATOMIC_INLINE bool							\
36843-atomic_compare_exchange_strong_##short_type(atomic_##short_type##_t *a,	\
36844-    type *expected, type desired, atomic_memory_order_t success_mo,	\
36845-    atomic_memory_order_t failure_mo) {					\
36846-	return atomic_compare_exchange_strong_explicit(a, expected,	\
36847-	    desired, success_mo, failure_mo);				\
36848-}
36849-
36850-/*
36851- * Integral types have some special operations available that non-integral ones
36852- * lack.
36853- */
36854-#define JEMALLOC_GENERATE_INT_ATOMICS(type, short_type, 		\
36855-    /* unused */ lg_size)						\
36856-JEMALLOC_GENERATE_ATOMICS(type, short_type, /* unused */ lg_size)	\
36857-									\
36858-ATOMIC_INLINE type							\
36859-atomic_fetch_add_##short_type(atomic_##short_type##_t *a,		\
36860-    type val, atomic_memory_order_t mo) {				\
36861-	return atomic_fetch_add_explicit(a, val, mo);			\
36862-}									\
36863-									\
36864-ATOMIC_INLINE type							\
36865-atomic_fetch_sub_##short_type(atomic_##short_type##_t *a,		\
36866-    type val, atomic_memory_order_t mo) {				\
36867-	return atomic_fetch_sub_explicit(a, val, mo);			\
36868-}									\
36869-ATOMIC_INLINE type							\
36870-atomic_fetch_and_##short_type(atomic_##short_type##_t *a,		\
36871-    type val, atomic_memory_order_t mo) {				\
36872-	return atomic_fetch_and_explicit(a, val, mo);			\
36873-}									\
36874-ATOMIC_INLINE type							\
36875-atomic_fetch_or_##short_type(atomic_##short_type##_t *a,		\
36876-    type val, atomic_memory_order_t mo) {				\
36877-	return atomic_fetch_or_explicit(a, val, mo);			\
36878-}									\
36879-ATOMIC_INLINE type							\
36880-atomic_fetch_xor_##short_type(atomic_##short_type##_t *a,		\
36881-    type val, atomic_memory_order_t mo) {				\
36882-	return atomic_fetch_xor_explicit(a, val, mo);			\
36883-}
36884-
36885-#endif /* JEMALLOC_INTERNAL_ATOMIC_C11_H */
36886diff --git a/jemalloc/include/jemalloc/internal/atomic_gcc_atomic.h b/jemalloc/include/jemalloc/internal/atomic_gcc_atomic.h
36887deleted file mode 100644
36888index 471515e..0000000
36889--- a/jemalloc/include/jemalloc/internal/atomic_gcc_atomic.h
36890+++ /dev/null
36891@@ -1,129 +0,0 @@
36892-#ifndef JEMALLOC_INTERNAL_ATOMIC_GCC_ATOMIC_H
36893-#define JEMALLOC_INTERNAL_ATOMIC_GCC_ATOMIC_H
36894-
36895-#include "jemalloc/internal/assert.h"
36896-
36897-#define ATOMIC_INIT(...) {__VA_ARGS__}
36898-
36899-typedef enum {
36900-	atomic_memory_order_relaxed,
36901-	atomic_memory_order_acquire,
36902-	atomic_memory_order_release,
36903-	atomic_memory_order_acq_rel,
36904-	atomic_memory_order_seq_cst
36905-} atomic_memory_order_t;
36906-
36907-ATOMIC_INLINE int
36908-atomic_enum_to_builtin(atomic_memory_order_t mo) {
36909-	switch (mo) {
36910-	case atomic_memory_order_relaxed:
36911-		return __ATOMIC_RELAXED;
36912-	case atomic_memory_order_acquire:
36913-		return __ATOMIC_ACQUIRE;
36914-	case atomic_memory_order_release:
36915-		return __ATOMIC_RELEASE;
36916-	case atomic_memory_order_acq_rel:
36917-		return __ATOMIC_ACQ_REL;
36918-	case atomic_memory_order_seq_cst:
36919-		return __ATOMIC_SEQ_CST;
36920-	}
36921-	/* Can't happen; the switch is exhaustive. */
36922-	not_reached();
36923-}
36924-
36925-ATOMIC_INLINE void
36926-atomic_fence(atomic_memory_order_t mo) {
36927-	__atomic_thread_fence(atomic_enum_to_builtin(mo));
36928-}
36929-
36930-#define JEMALLOC_GENERATE_ATOMICS(type, short_type,			\
36931-    /* unused */ lg_size)						\
36932-typedef struct {							\
36933-	type repr;							\
36934-} atomic_##short_type##_t;						\
36935-									\
36936-ATOMIC_INLINE type							\
36937-atomic_load_##short_type(const atomic_##short_type##_t *a,		\
36938-    atomic_memory_order_t mo) {						\
36939-	type result;							\
36940-	__atomic_load(&a->repr, &result, atomic_enum_to_builtin(mo));	\
36941-	return result;							\
36942-}									\
36943-									\
36944-ATOMIC_INLINE void							\
36945-atomic_store_##short_type(atomic_##short_type##_t *a, type val,		\
36946-    atomic_memory_order_t mo) {						\
36947-	__atomic_store(&a->repr, &val, atomic_enum_to_builtin(mo));	\
36948-}									\
36949-									\
36950-ATOMIC_INLINE type							\
36951-atomic_exchange_##short_type(atomic_##short_type##_t *a, type val,	\
36952-    atomic_memory_order_t mo) {						\
36953-	type result;							\
36954-	__atomic_exchange(&a->repr, &val, &result,			\
36955-	    atomic_enum_to_builtin(mo));				\
36956-	return result;							\
36957-}									\
36958-									\
36959-ATOMIC_INLINE bool							\
36960-atomic_compare_exchange_weak_##short_type(atomic_##short_type##_t *a,	\
36961-    UNUSED type *expected, type desired,				\
36962-    atomic_memory_order_t success_mo,					\
36963-    atomic_memory_order_t failure_mo) {					\
36964-	return __atomic_compare_exchange(&a->repr, expected, &desired,	\
36965-	    true, atomic_enum_to_builtin(success_mo),			\
36966-	    atomic_enum_to_builtin(failure_mo));			\
36967-}									\
36968-									\
36969-ATOMIC_INLINE bool							\
36970-atomic_compare_exchange_strong_##short_type(atomic_##short_type##_t *a,	\
36971-    UNUSED type *expected, type desired,				\
36972-    atomic_memory_order_t success_mo,					\
36973-    atomic_memory_order_t failure_mo) {					\
36974-	return __atomic_compare_exchange(&a->repr, expected, &desired,	\
36975-	    false,							\
36976-	    atomic_enum_to_builtin(success_mo),				\
36977-	    atomic_enum_to_builtin(failure_mo));			\
36978-}
36979-
36980-
36981-#define JEMALLOC_GENERATE_INT_ATOMICS(type, short_type,			\
36982-    /* unused */ lg_size)						\
36983-JEMALLOC_GENERATE_ATOMICS(type, short_type, /* unused */ lg_size)	\
36984-									\
36985-ATOMIC_INLINE type							\
36986-atomic_fetch_add_##short_type(atomic_##short_type##_t *a, type val,	\
36987-    atomic_memory_order_t mo) {						\
36988-	return __atomic_fetch_add(&a->repr, val,			\
36989-	    atomic_enum_to_builtin(mo));				\
36990-}									\
36991-									\
36992-ATOMIC_INLINE type							\
36993-atomic_fetch_sub_##short_type(atomic_##short_type##_t *a, type val,	\
36994-    atomic_memory_order_t mo) {						\
36995-	return __atomic_fetch_sub(&a->repr, val,			\
36996-	    atomic_enum_to_builtin(mo));				\
36997-}									\
36998-									\
36999-ATOMIC_INLINE type							\
37000-atomic_fetch_and_##short_type(atomic_##short_type##_t *a, type val,	\
37001-    atomic_memory_order_t mo) {						\
37002-	return __atomic_fetch_and(&a->repr, val,			\
37003-	    atomic_enum_to_builtin(mo));				\
37004-}									\
37005-									\
37006-ATOMIC_INLINE type							\
37007-atomic_fetch_or_##short_type(atomic_##short_type##_t *a, type val,	\
37008-    atomic_memory_order_t mo) {						\
37009-	return __atomic_fetch_or(&a->repr, val,				\
37010-	    atomic_enum_to_builtin(mo));				\
37011-}									\
37012-									\
37013-ATOMIC_INLINE type							\
37014-atomic_fetch_xor_##short_type(atomic_##short_type##_t *a, type val,	\
37015-    atomic_memory_order_t mo) {						\
37016-	return __atomic_fetch_xor(&a->repr, val,			\
37017-	    atomic_enum_to_builtin(mo));				\
37018-}
37019-
37020-#endif /* JEMALLOC_INTERNAL_ATOMIC_GCC_ATOMIC_H */
37021diff --git a/jemalloc/include/jemalloc/internal/atomic_gcc_sync.h b/jemalloc/include/jemalloc/internal/atomic_gcc_sync.h
37022deleted file mode 100644
37023index e02b7cb..0000000
37024--- a/jemalloc/include/jemalloc/internal/atomic_gcc_sync.h
37025+++ /dev/null
37026@@ -1,195 +0,0 @@
37027-#ifndef JEMALLOC_INTERNAL_ATOMIC_GCC_SYNC_H
37028-#define JEMALLOC_INTERNAL_ATOMIC_GCC_SYNC_H
37029-
37030-#define ATOMIC_INIT(...) {__VA_ARGS__}
37031-
37032-typedef enum {
37033-	atomic_memory_order_relaxed,
37034-	atomic_memory_order_acquire,
37035-	atomic_memory_order_release,
37036-	atomic_memory_order_acq_rel,
37037-	atomic_memory_order_seq_cst
37038-} atomic_memory_order_t;
37039-
37040-ATOMIC_INLINE void
37041-atomic_fence(atomic_memory_order_t mo) {
37042-	/* Easy cases first: no barrier, and full barrier. */
37043-	if (mo == atomic_memory_order_relaxed) {
37044-		asm volatile("" ::: "memory");
37045-		return;
37046-	}
37047-	if (mo == atomic_memory_order_seq_cst) {
37048-		asm volatile("" ::: "memory");
37049-		__sync_synchronize();
37050-		asm volatile("" ::: "memory");
37051-		return;
37052-	}
37053-	asm volatile("" ::: "memory");
37054-#  if defined(__i386__) || defined(__x86_64__)
37055-	/* This is implicit on x86. */
37056-#  elif defined(__ppc64__)
37057-	asm volatile("lwsync");
37058-#  elif defined(__ppc__)
37059-	asm volatile("sync");
37060-#  elif defined(__sparc__) && defined(__arch64__)
37061-	if (mo == atomic_memory_order_acquire) {
37062-		asm volatile("membar #LoadLoad | #LoadStore");
37063-	} else if (mo == atomic_memory_order_release) {
37064-		asm volatile("membar #LoadStore | #StoreStore");
37065-	} else {
37066-		asm volatile("membar #LoadLoad | #LoadStore | #StoreStore");
37067-	}
37068-#  else
37069-	__sync_synchronize();
37070-#  endif
37071-	asm volatile("" ::: "memory");
37072-}
37073-
37074-/*
37075- * A correct implementation of seq_cst loads and stores on weakly ordered
37076- * architectures could do either of the following:
37077- *   1. store() is weak-fence -> store -> strong fence, load() is load ->
37078- *      strong-fence.
37079- *   2. store() is strong-fence -> store, load() is strong-fence -> load ->
37080- *      weak-fence.
37081- * The tricky thing is, load() and store() above can be the load or store
37082- * portions of a gcc __sync builtin, so we have to follow GCC's lead, which
37083- * means going with strategy 2.
37084- * On strongly ordered architectures, the natural strategy is to stick a strong
37085- * fence after seq_cst stores, and have naked loads.  So we want the strong
37086- * fences in different places on different architectures.
37087- * atomic_pre_sc_load_fence and atomic_post_sc_store_fence allow us to
37088- * accomplish this.
37089- */
37090-
37091-ATOMIC_INLINE void
37092-atomic_pre_sc_load_fence() {
37093-#  if defined(__i386__) || defined(__x86_64__) ||			\
37094-    (defined(__sparc__) && defined(__arch64__))
37095-	atomic_fence(atomic_memory_order_relaxed);
37096-#  else
37097-	atomic_fence(atomic_memory_order_seq_cst);
37098-#  endif
37099-}
37100-
37101-ATOMIC_INLINE void
37102-atomic_post_sc_store_fence() {
37103-#  if defined(__i386__) || defined(__x86_64__) ||			\
37104-    (defined(__sparc__) && defined(__arch64__))
37105-	atomic_fence(atomic_memory_order_seq_cst);
37106-#  else
37107-	atomic_fence(atomic_memory_order_relaxed);
37108-#  endif
37109-
37110-}
37111-
37112-#define JEMALLOC_GENERATE_ATOMICS(type, short_type,			\
37113-    /* unused */ lg_size)						\
37114-typedef struct {							\
37115-	type volatile repr;						\
37116-} atomic_##short_type##_t;						\
37117-									\
37118-ATOMIC_INLINE type							\
37119-atomic_load_##short_type(const atomic_##short_type##_t *a,		\
37120-    atomic_memory_order_t mo) {						\
37121-	if (mo == atomic_memory_order_seq_cst) {			\
37122-		atomic_pre_sc_load_fence();				\
37123-	}								\
37124-	type result = a->repr;						\
37125-	if (mo != atomic_memory_order_relaxed) {			\
37126-		atomic_fence(atomic_memory_order_acquire);		\
37127-	}								\
37128-	return result;							\
37129-}									\
37130-									\
37131-ATOMIC_INLINE void							\
37132-atomic_store_##short_type(atomic_##short_type##_t *a,			\
37133-    type val, atomic_memory_order_t mo) {				\
37134-	if (mo != atomic_memory_order_relaxed) {			\
37135-		atomic_fence(atomic_memory_order_release);		\
37136-	}								\
37137-	a->repr = val;							\
37138-	if (mo == atomic_memory_order_seq_cst) {			\
37139-		atomic_post_sc_store_fence();				\
37140-	}								\
37141-}									\
37142-									\
37143-ATOMIC_INLINE type							\
37144-atomic_exchange_##short_type(atomic_##short_type##_t *a, type val, \
37145-    atomic_memory_order_t mo) {                  					 \
37146-	/*								\
37147-	 * Because of FreeBSD, we care about gcc 4.2, which doesn't have\
37148-	 * an atomic exchange builtin.  We fake it with a CAS loop.	\
37149-	 */								\
37150-	while (true) {							\
37151-		type old = a->repr;					\
37152-		if (__sync_bool_compare_and_swap(&a->repr, old, val)) {	\
37153-			return old;					\
37154-		}							\
37155-	}								\
37156-}									\
37157-									\
37158-ATOMIC_INLINE bool							\
37159-atomic_compare_exchange_weak_##short_type(atomic_##short_type##_t *a,	\
37160-    type *expected, type desired,                                     \
37161-    atomic_memory_order_t success_mo,                          \
37162-    atomic_memory_order_t failure_mo) {				                \
37163-	type prev = __sync_val_compare_and_swap(&a->repr, *expected,	\
37164-	    desired);							\
37165-	if (prev == *expected) {					\
37166-		return true;						\
37167-	} else {							\
37168-		*expected = prev;					\
37169-		return false;						\
37170-	}								\
37171-}									\
37172-ATOMIC_INLINE bool							\
37173-atomic_compare_exchange_strong_##short_type(atomic_##short_type##_t *a,	\
37174-    type *expected, type desired,                                       \
37175-    atomic_memory_order_t success_mo,                            \
37176-    atomic_memory_order_t failure_mo) {                          \
37177-	type prev = __sync_val_compare_and_swap(&a->repr, *expected,	\
37178-	    desired);							\
37179-	if (prev == *expected) {					\
37180-		return true;						\
37181-	} else {							\
37182-		*expected = prev;					\
37183-		return false;						\
37184-	}								\
37185-}
37186-
37187-#define JEMALLOC_GENERATE_INT_ATOMICS(type, short_type,			\
37188-    /* unused */ lg_size)						\
37189-JEMALLOC_GENERATE_ATOMICS(type, short_type, /* unused */ lg_size)	\
37190-									\
37191-ATOMIC_INLINE type							\
37192-atomic_fetch_add_##short_type(atomic_##short_type##_t *a, type val,	\
37193-    atomic_memory_order_t mo) {						\
37194-	return __sync_fetch_and_add(&a->repr, val);			\
37195-}									\
37196-									\
37197-ATOMIC_INLINE type							\
37198-atomic_fetch_sub_##short_type(atomic_##short_type##_t *a, type val,	\
37199-    atomic_memory_order_t mo) {						\
37200-	return __sync_fetch_and_sub(&a->repr, val);			\
37201-}									\
37202-									\
37203-ATOMIC_INLINE type							\
37204-atomic_fetch_and_##short_type(atomic_##short_type##_t *a, type val,	\
37205-    atomic_memory_order_t mo) {						\
37206-	return __sync_fetch_and_and(&a->repr, val);			\
37207-}									\
37208-									\
37209-ATOMIC_INLINE type							\
37210-atomic_fetch_or_##short_type(atomic_##short_type##_t *a, type val,	\
37211-    atomic_memory_order_t mo) {						\
37212-	return __sync_fetch_and_or(&a->repr, val);			\
37213-}									\
37214-									\
37215-ATOMIC_INLINE type							\
37216-atomic_fetch_xor_##short_type(atomic_##short_type##_t *a, type val,	\
37217-    atomic_memory_order_t mo) {						\
37218-	return __sync_fetch_and_xor(&a->repr, val);			\
37219-}
37220-
37221-#endif /* JEMALLOC_INTERNAL_ATOMIC_GCC_SYNC_H */
37222diff --git a/jemalloc/include/jemalloc/internal/atomic_msvc.h b/jemalloc/include/jemalloc/internal/atomic_msvc.h
37223deleted file mode 100644
37224index 67057ce..0000000
37225--- a/jemalloc/include/jemalloc/internal/atomic_msvc.h
37226+++ /dev/null
37227@@ -1,158 +0,0 @@
37228-#ifndef JEMALLOC_INTERNAL_ATOMIC_MSVC_H
37229-#define JEMALLOC_INTERNAL_ATOMIC_MSVC_H
37230-
37231-#define ATOMIC_INIT(...) {__VA_ARGS__}
37232-
37233-typedef enum {
37234-	atomic_memory_order_relaxed,
37235-	atomic_memory_order_acquire,
37236-	atomic_memory_order_release,
37237-	atomic_memory_order_acq_rel,
37238-	atomic_memory_order_seq_cst
37239-} atomic_memory_order_t;
37240-
37241-typedef char atomic_repr_0_t;
37242-typedef short atomic_repr_1_t;
37243-typedef long atomic_repr_2_t;
37244-typedef __int64 atomic_repr_3_t;
37245-
37246-ATOMIC_INLINE void
37247-atomic_fence(atomic_memory_order_t mo) {
37248-	_ReadWriteBarrier();
37249-#  if defined(_M_ARM) || defined(_M_ARM64)
37250-	/* ARM needs a barrier for everything but relaxed. */
37251-	if (mo != atomic_memory_order_relaxed) {
37252-		MemoryBarrier();
37253-	}
37254-#  elif defined(_M_IX86) || defined (_M_X64)
37255-	/* x86 needs a barrier only for seq_cst. */
37256-	if (mo == atomic_memory_order_seq_cst) {
37257-		MemoryBarrier();
37258-	}
37259-#  else
37260-#  error "Don't know how to create atomics for this platform for MSVC."
37261-#  endif
37262-	_ReadWriteBarrier();
37263-}
37264-
37265-#define ATOMIC_INTERLOCKED_REPR(lg_size) atomic_repr_ ## lg_size ## _t
37266-
37267-#define ATOMIC_CONCAT(a, b) ATOMIC_RAW_CONCAT(a, b)
37268-#define ATOMIC_RAW_CONCAT(a, b) a ## b
37269-
37270-#define ATOMIC_INTERLOCKED_NAME(base_name, lg_size) ATOMIC_CONCAT(	\
37271-    base_name, ATOMIC_INTERLOCKED_SUFFIX(lg_size))
37272-
37273-#define ATOMIC_INTERLOCKED_SUFFIX(lg_size)				\
37274-    ATOMIC_CONCAT(ATOMIC_INTERLOCKED_SUFFIX_, lg_size)
37275-
37276-#define ATOMIC_INTERLOCKED_SUFFIX_0 8
37277-#define ATOMIC_INTERLOCKED_SUFFIX_1 16
37278-#define ATOMIC_INTERLOCKED_SUFFIX_2
37279-#define ATOMIC_INTERLOCKED_SUFFIX_3 64
37280-
37281-#define JEMALLOC_GENERATE_ATOMICS(type, short_type, lg_size)		\
37282-typedef struct {							\
37283-	ATOMIC_INTERLOCKED_REPR(lg_size) repr;				\
37284-} atomic_##short_type##_t;						\
37285-									\
37286-ATOMIC_INLINE type							\
37287-atomic_load_##short_type(const atomic_##short_type##_t *a,		\
37288-    atomic_memory_order_t mo) {						\
37289-	ATOMIC_INTERLOCKED_REPR(lg_size) ret = a->repr;			\
37290-	if (mo != atomic_memory_order_relaxed) {			\
37291-		atomic_fence(atomic_memory_order_acquire);		\
37292-	}								\
37293-	return (type) ret;						\
37294-}									\
37295-									\
37296-ATOMIC_INLINE void							\
37297-atomic_store_##short_type(atomic_##short_type##_t *a,			\
37298-    type val, atomic_memory_order_t mo) {				\
37299-	if (mo != atomic_memory_order_relaxed) {			\
37300-		atomic_fence(atomic_memory_order_release);		\
37301-	}								\
37302-	a->repr = (ATOMIC_INTERLOCKED_REPR(lg_size)) val;		\
37303-	if (mo == atomic_memory_order_seq_cst) {			\
37304-		atomic_fence(atomic_memory_order_seq_cst);		\
37305-	}								\
37306-}									\
37307-									\
37308-ATOMIC_INLINE type							\
37309-atomic_exchange_##short_type(atomic_##short_type##_t *a, type val,	\
37310-    atomic_memory_order_t mo) {						\
37311-	return (type)ATOMIC_INTERLOCKED_NAME(_InterlockedExchange,	\
37312-	    lg_size)(&a->repr, (ATOMIC_INTERLOCKED_REPR(lg_size))val);	\
37313-}									\
37314-									\
37315-ATOMIC_INLINE bool							\
37316-atomic_compare_exchange_weak_##short_type(atomic_##short_type##_t *a,	\
37317-    type *expected, type desired, atomic_memory_order_t success_mo,	\
37318-    atomic_memory_order_t failure_mo) {					\
37319-	ATOMIC_INTERLOCKED_REPR(lg_size) e =				\
37320-	    (ATOMIC_INTERLOCKED_REPR(lg_size))*expected;		\
37321-	ATOMIC_INTERLOCKED_REPR(lg_size) d =				\
37322-	    (ATOMIC_INTERLOCKED_REPR(lg_size))desired;			\
37323-	ATOMIC_INTERLOCKED_REPR(lg_size) old =				\
37324-	    ATOMIC_INTERLOCKED_NAME(_InterlockedCompareExchange, 	\
37325-		lg_size)(&a->repr, d, e);				\
37326-	if (old == e) {							\
37327-		return true;						\
37328-	} else {							\
37329-		*expected = (type)old;					\
37330-		return false;						\
37331-	}								\
37332-}									\
37333-									\
37334-ATOMIC_INLINE bool							\
37335-atomic_compare_exchange_strong_##short_type(atomic_##short_type##_t *a,	\
37336-    type *expected, type desired, atomic_memory_order_t success_mo,	\
37337-    atomic_memory_order_t failure_mo) {					\
37338-	/* We implement the weak version with strong semantics. */	\
37339-	return atomic_compare_exchange_weak_##short_type(a, expected,	\
37340-	    desired, success_mo, failure_mo);				\
37341-}
37342-
37343-
37344-#define JEMALLOC_GENERATE_INT_ATOMICS(type, short_type, lg_size)	\
37345-JEMALLOC_GENERATE_ATOMICS(type, short_type, lg_size)			\
37346-									\
37347-ATOMIC_INLINE type							\
37348-atomic_fetch_add_##short_type(atomic_##short_type##_t *a,		\
37349-    type val, atomic_memory_order_t mo) {				\
37350-	return (type)ATOMIC_INTERLOCKED_NAME(_InterlockedExchangeAdd,	\
37351-	    lg_size)(&a->repr, (ATOMIC_INTERLOCKED_REPR(lg_size))val);	\
37352-}									\
37353-									\
37354-ATOMIC_INLINE type							\
37355-atomic_fetch_sub_##short_type(atomic_##short_type##_t *a,		\
37356-    type val, atomic_memory_order_t mo) {				\
37357-	/*								\
37358-	 * MSVC warns on negation of unsigned operands, but for us it	\
37359-	 * gives exactly the right semantics (MAX_TYPE + 1 - operand).	\
37360-	 */								\
37361-	__pragma(warning(push))						\
37362-	__pragma(warning(disable: 4146))				\
37363-	return atomic_fetch_add_##short_type(a, -val, mo);		\
37364-	__pragma(warning(pop))						\
37365-}									\
37366-ATOMIC_INLINE type							\
37367-atomic_fetch_and_##short_type(atomic_##short_type##_t *a,		\
37368-    type val, atomic_memory_order_t mo) {				\
37369-	return (type)ATOMIC_INTERLOCKED_NAME(_InterlockedAnd, lg_size)(	\
37370-	    &a->repr, (ATOMIC_INTERLOCKED_REPR(lg_size))val);		\
37371-}									\
37372-ATOMIC_INLINE type							\
37373-atomic_fetch_or_##short_type(atomic_##short_type##_t *a,		\
37374-    type val, atomic_memory_order_t mo) {				\
37375-	return (type)ATOMIC_INTERLOCKED_NAME(_InterlockedOr, lg_size)(	\
37376-	    &a->repr, (ATOMIC_INTERLOCKED_REPR(lg_size))val);		\
37377-}									\
37378-ATOMIC_INLINE type							\
37379-atomic_fetch_xor_##short_type(atomic_##short_type##_t *a,		\
37380-    type val, atomic_memory_order_t mo) {				\
37381-	return (type)ATOMIC_INTERLOCKED_NAME(_InterlockedXor, lg_size)(	\
37382-	    &a->repr, (ATOMIC_INTERLOCKED_REPR(lg_size))val);		\
37383-}
37384-
37385-#endif /* JEMALLOC_INTERNAL_ATOMIC_MSVC_H */
37386diff --git a/jemalloc/include/jemalloc/internal/background_thread_externs.h b/jemalloc/include/jemalloc/internal/background_thread_externs.h
37387deleted file mode 100644
37388index 6ae3c8d..0000000
37389--- a/jemalloc/include/jemalloc/internal/background_thread_externs.h
37390+++ /dev/null
37391@@ -1,33 +0,0 @@
37392-#ifndef JEMALLOC_INTERNAL_BACKGROUND_THREAD_EXTERNS_H
37393-#define JEMALLOC_INTERNAL_BACKGROUND_THREAD_EXTERNS_H
37394-
37395-extern bool opt_background_thread;
37396-extern size_t opt_max_background_threads;
37397-extern malloc_mutex_t background_thread_lock;
37398-extern atomic_b_t background_thread_enabled_state;
37399-extern size_t n_background_threads;
37400-extern size_t max_background_threads;
37401-extern background_thread_info_t *background_thread_info;
37402-
37403-bool background_thread_create(tsd_t *tsd, unsigned arena_ind);
37404-bool background_threads_enable(tsd_t *tsd);
37405-bool background_threads_disable(tsd_t *tsd);
37406-bool background_thread_is_started(background_thread_info_t* info);
37407-void background_thread_wakeup_early(background_thread_info_t *info,
37408-    nstime_t *remaining_sleep);
37409-void background_thread_prefork0(tsdn_t *tsdn);
37410-void background_thread_prefork1(tsdn_t *tsdn);
37411-void background_thread_postfork_parent(tsdn_t *tsdn);
37412-void background_thread_postfork_child(tsdn_t *tsdn);
37413-bool background_thread_stats_read(tsdn_t *tsdn,
37414-    background_thread_stats_t *stats);
37415-void background_thread_ctl_init(tsdn_t *tsdn);
37416-
37417-#ifdef JEMALLOC_PTHREAD_CREATE_WRAPPER
37418-extern int pthread_create_wrapper(pthread_t *__restrict, const pthread_attr_t *,
37419-    void *(*)(void *), void *__restrict);
37420-#endif
37421-bool background_thread_boot0(void);
37422-bool background_thread_boot1(tsdn_t *tsdn, base_t *base);
37423-
37424-#endif /* JEMALLOC_INTERNAL_BACKGROUND_THREAD_EXTERNS_H */
37425diff --git a/jemalloc/include/jemalloc/internal/background_thread_inlines.h b/jemalloc/include/jemalloc/internal/background_thread_inlines.h
37426deleted file mode 100644
37427index 92c5feb..0000000
37428--- a/jemalloc/include/jemalloc/internal/background_thread_inlines.h
37429+++ /dev/null
37430@@ -1,48 +0,0 @@
37431-#ifndef JEMALLOC_INTERNAL_BACKGROUND_THREAD_INLINES_H
37432-#define JEMALLOC_INTERNAL_BACKGROUND_THREAD_INLINES_H
37433-
37434-JEMALLOC_ALWAYS_INLINE bool
37435-background_thread_enabled(void) {
37436-	return atomic_load_b(&background_thread_enabled_state, ATOMIC_RELAXED);
37437-}
37438-
37439-JEMALLOC_ALWAYS_INLINE void
37440-background_thread_enabled_set(tsdn_t *tsdn, bool state) {
37441-	malloc_mutex_assert_owner(tsdn, &background_thread_lock);
37442-	atomic_store_b(&background_thread_enabled_state, state, ATOMIC_RELAXED);
37443-}
37444-
37445-JEMALLOC_ALWAYS_INLINE background_thread_info_t *
37446-arena_background_thread_info_get(arena_t *arena) {
37447-	unsigned arena_ind = arena_ind_get(arena);
37448-	return &background_thread_info[arena_ind % max_background_threads];
37449-}
37450-
37451-JEMALLOC_ALWAYS_INLINE background_thread_info_t *
37452-background_thread_info_get(size_t ind) {
37453-	return &background_thread_info[ind % max_background_threads];
37454-}
37455-
37456-JEMALLOC_ALWAYS_INLINE uint64_t
37457-background_thread_wakeup_time_get(background_thread_info_t *info) {
37458-	uint64_t next_wakeup = nstime_ns(&info->next_wakeup);
37459-	assert(atomic_load_b(&info->indefinite_sleep, ATOMIC_ACQUIRE) ==
37460-	    (next_wakeup == BACKGROUND_THREAD_INDEFINITE_SLEEP));
37461-	return next_wakeup;
37462-}
37463-
37464-JEMALLOC_ALWAYS_INLINE void
37465-background_thread_wakeup_time_set(tsdn_t *tsdn, background_thread_info_t *info,
37466-    uint64_t wakeup_time) {
37467-	malloc_mutex_assert_owner(tsdn, &info->mtx);
37468-	atomic_store_b(&info->indefinite_sleep,
37469-	    wakeup_time == BACKGROUND_THREAD_INDEFINITE_SLEEP, ATOMIC_RELEASE);
37470-	nstime_init(&info->next_wakeup, wakeup_time);
37471-}
37472-
37473-JEMALLOC_ALWAYS_INLINE bool
37474-background_thread_indefinite_sleep(background_thread_info_t *info) {
37475-	return atomic_load_b(&info->indefinite_sleep, ATOMIC_ACQUIRE);
37476-}
37477-
37478-#endif /* JEMALLOC_INTERNAL_BACKGROUND_THREAD_INLINES_H */
37479diff --git a/jemalloc/include/jemalloc/internal/background_thread_structs.h b/jemalloc/include/jemalloc/internal/background_thread_structs.h
37480deleted file mode 100644
37481index 83a9198..0000000
37482--- a/jemalloc/include/jemalloc/internal/background_thread_structs.h
37483+++ /dev/null
37484@@ -1,66 +0,0 @@
37485-#ifndef JEMALLOC_INTERNAL_BACKGROUND_THREAD_STRUCTS_H
37486-#define JEMALLOC_INTERNAL_BACKGROUND_THREAD_STRUCTS_H
37487-
37488-/* This file really combines "structs" and "types", but only transitionally. */
37489-
37490-#if defined(JEMALLOC_BACKGROUND_THREAD) || defined(JEMALLOC_LAZY_LOCK)
37491-#  define JEMALLOC_PTHREAD_CREATE_WRAPPER
37492-#endif
37493-
37494-#define BACKGROUND_THREAD_INDEFINITE_SLEEP UINT64_MAX
37495-#define MAX_BACKGROUND_THREAD_LIMIT MALLOCX_ARENA_LIMIT
37496-#define DEFAULT_NUM_BACKGROUND_THREAD 4
37497-
37498-/*
37499- * These exist only as a transitional state.  Eventually, deferral should be
37500- * part of the PAI, and each implementation can indicate wait times with more
37501- * specificity.
37502- */
37503-#define BACKGROUND_THREAD_HPA_INTERVAL_MAX_UNINITIALIZED (-2)
37504-#define BACKGROUND_THREAD_HPA_INTERVAL_MAX_DEFAULT_WHEN_ENABLED 5000
37505-
37506-#define BACKGROUND_THREAD_DEFERRED_MIN UINT64_C(0)
37507-#define BACKGROUND_THREAD_DEFERRED_MAX UINT64_MAX
37508-
37509-typedef enum {
37510-	background_thread_stopped,
37511-	background_thread_started,
37512-	/* Thread waits on the global lock when paused (for arena_reset). */
37513-	background_thread_paused,
37514-} background_thread_state_t;
37515-
37516-struct background_thread_info_s {
37517-#ifdef JEMALLOC_BACKGROUND_THREAD
37518-	/* Background thread is pthread specific. */
37519-	pthread_t		thread;
37520-	pthread_cond_t		cond;
37521-#endif
37522-	malloc_mutex_t		mtx;
37523-	background_thread_state_t	state;
37524-	/* When true, it means no wakeup scheduled. */
37525-	atomic_b_t		indefinite_sleep;
37526-	/* Next scheduled wakeup time (absolute time in ns). */
37527-	nstime_t		next_wakeup;
37528-	/*
37529-	 *  Since the last background thread run, newly added number of pages
37530-	 *  that need to be purged by the next wakeup.  This is adjusted on
37531-	 *  epoch advance, and is used to determine whether we should signal the
37532-	 *  background thread to wake up earlier.
37533-	 */
37534-	size_t			npages_to_purge_new;
37535-	/* Stats: total number of runs since started. */
37536-	uint64_t		tot_n_runs;
37537-	/* Stats: total sleep time since started. */
37538-	nstime_t		tot_sleep_time;
37539-};
37540-typedef struct background_thread_info_s background_thread_info_t;
37541-
37542-struct background_thread_stats_s {
37543-	size_t num_threads;
37544-	uint64_t num_runs;
37545-	nstime_t run_interval;
37546-	mutex_prof_data_t max_counter_per_bg_thd;
37547-};
37548-typedef struct background_thread_stats_s background_thread_stats_t;
37549-
37550-#endif /* JEMALLOC_INTERNAL_BACKGROUND_THREAD_STRUCTS_H */
37551diff --git a/jemalloc/include/jemalloc/internal/base.h b/jemalloc/include/jemalloc/internal/base.h
37552deleted file mode 100644
37553index 9b2c9fb..0000000
37554--- a/jemalloc/include/jemalloc/internal/base.h
37555+++ /dev/null
37556@@ -1,110 +0,0 @@
37557-#ifndef JEMALLOC_INTERNAL_BASE_H
37558-#define JEMALLOC_INTERNAL_BASE_H
37559-
37560-#include "jemalloc/internal/edata.h"
37561-#include "jemalloc/internal/ehooks.h"
37562-#include "jemalloc/internal/mutex.h"
37563-
37564-enum metadata_thp_mode_e {
37565-	metadata_thp_disabled   = 0,
37566-	/*
37567-	 * Lazily enable hugepage for metadata. To avoid high RSS caused by THP
37568-	 * + low usage arena (i.e. THP becomes a significant percentage), the
37569-	 * "auto" option only starts using THP after a base allocator used up
37570-	 * the first THP region.  Starting from the second hugepage (in a single
37571-	 * arena), "auto" behaves the same as "always", i.e. madvise hugepage
37572-	 * right away.
37573-	 */
37574-	metadata_thp_auto       = 1,
37575-	metadata_thp_always     = 2,
37576-	metadata_thp_mode_limit = 3
37577-};
37578-typedef enum metadata_thp_mode_e metadata_thp_mode_t;
37579-
37580-#define METADATA_THP_DEFAULT metadata_thp_disabled
37581-extern metadata_thp_mode_t opt_metadata_thp;
37582-extern const char *metadata_thp_mode_names[];
37583-
37584-
37585-/* Embedded at the beginning of every block of base-managed virtual memory. */
37586-typedef struct base_block_s base_block_t;
37587-struct base_block_s {
37588-	/* Total size of block's virtual memory mapping. */
37589-	size_t size;
37590-
37591-	/* Next block in list of base's blocks. */
37592-	base_block_t *next;
37593-
37594-	/* Tracks unused trailing space. */
37595-	edata_t edata;
37596-};
37597-
37598-typedef struct base_s base_t;
37599-struct base_s {
37600-	/*
37601-	 * User-configurable extent hook functions.
37602-	 */
37603-	ehooks_t ehooks;
37604-
37605-	/*
37606-	 * User-configurable extent hook functions for metadata allocations.
37607-	 */
37608-	ehooks_t ehooks_base;
37609-
37610-	/* Protects base_alloc() and base_stats_get() operations. */
37611-	malloc_mutex_t mtx;
37612-
37613-	/* Using THP when true (metadata_thp auto mode). */
37614-	bool auto_thp_switched;
37615-	/*
37616-	 * Most recent size class in the series of increasingly large base
37617-	 * extents.  Logarithmic spacing between subsequent allocations ensures
37618-	 * that the total number of distinct mappings remains small.
37619-	 */
37620-	pszind_t pind_last;
37621-
37622-	/* Serial number generation state. */
37623-	size_t extent_sn_next;
37624-
37625-	/* Chain of all blocks associated with base. */
37626-	base_block_t *blocks;
37627-
37628-	/* Heap of extents that track unused trailing space within blocks. */
37629-	edata_heap_t avail[SC_NSIZES];
37630-
37631-	/* Stats, only maintained if config_stats. */
37632-	size_t allocated;
37633-	size_t resident;
37634-	size_t mapped;
37635-	/* Number of THP regions touched. */
37636-	size_t n_thp;
37637-};
37638-
37639-static inline unsigned
37640-base_ind_get(const base_t *base) {
37641-	return ehooks_ind_get(&base->ehooks);
37642-}
37643-
37644-static inline bool
37645-metadata_thp_enabled(void) {
37646-	return (opt_metadata_thp != metadata_thp_disabled);
37647-}
37648-
37649-base_t *b0get(void);
37650-base_t *base_new(tsdn_t *tsdn, unsigned ind,
37651-    const extent_hooks_t *extent_hooks, bool metadata_use_hooks);
37652-void base_delete(tsdn_t *tsdn, base_t *base);
37653-ehooks_t *base_ehooks_get(base_t *base);
37654-ehooks_t *base_ehooks_get_for_metadata(base_t *base);
37655-extent_hooks_t *base_extent_hooks_set(base_t *base,
37656-    extent_hooks_t *extent_hooks);
37657-void *base_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment);
37658-edata_t *base_alloc_edata(tsdn_t *tsdn, base_t *base);
37659-void base_stats_get(tsdn_t *tsdn, base_t *base, size_t *allocated,
37660-    size_t *resident, size_t *mapped, size_t *n_thp);
37661-void base_prefork(tsdn_t *tsdn, base_t *base);
37662-void base_postfork_parent(tsdn_t *tsdn, base_t *base);
37663-void base_postfork_child(tsdn_t *tsdn, base_t *base);
37664-bool base_boot(tsdn_t *tsdn);
37665-
37666-#endif /* JEMALLOC_INTERNAL_BASE_H */
37667diff --git a/jemalloc/include/jemalloc/internal/bin.h b/jemalloc/include/jemalloc/internal/bin.h
37668deleted file mode 100644
37669index 63f9739..0000000
37670--- a/jemalloc/include/jemalloc/internal/bin.h
37671+++ /dev/null
37672@@ -1,82 +0,0 @@
37673-#ifndef JEMALLOC_INTERNAL_BIN_H
37674-#define JEMALLOC_INTERNAL_BIN_H
37675-
37676-#include "jemalloc/internal/bin_stats.h"
37677-#include "jemalloc/internal/bin_types.h"
37678-#include "jemalloc/internal/edata.h"
37679-#include "jemalloc/internal/mutex.h"
37680-#include "jemalloc/internal/sc.h"
37681-
37682-/*
37683- * A bin contains a set of extents that are currently being used for slab
37684- * allocations.
37685- */
37686-typedef struct bin_s bin_t;
37687-struct bin_s {
37688-	/* All operations on bin_t fields require lock ownership. */
37689-	malloc_mutex_t		lock;
37690-
37691-	/*
37692-	 * Bin statistics.  These get touched every time the lock is acquired,
37693-	 * so put them close by in the hopes of getting some cache locality.
37694-	 */
37695-	bin_stats_t	stats;
37696-
37697-	/*
37698-	 * Current slab being used to service allocations of this bin's size
37699-	 * class.  slabcur is independent of slabs_{nonfull,full}; whenever
37700-	 * slabcur is reassigned, the previous slab must be deallocated or
37701-	 * inserted into slabs_{nonfull,full}.
37702-	 */
37703-	edata_t			*slabcur;
37704-
37705-	/*
37706-	 * Heap of non-full slabs.  This heap is used to assure that new
37707-	 * allocations come from the non-full slab that is oldest/lowest in
37708-	 * memory.
37709-	 */
37710-	edata_heap_t		slabs_nonfull;
37711-
37712-	/* List used to track full slabs. */
37713-	edata_list_active_t	slabs_full;
37714-};
37715-
37716-/* A set of sharded bins of the same size class. */
37717-typedef struct bins_s bins_t;
37718-struct bins_s {
37719-	/* Sharded bins.  Dynamically sized. */
37720-	bin_t *bin_shards;
37721-};
37722-
37723-void bin_shard_sizes_boot(unsigned bin_shards[SC_NBINS]);
37724-bool bin_update_shard_size(unsigned bin_shards[SC_NBINS], size_t start_size,
37725-    size_t end_size, size_t nshards);
37726-
37727-/* Initializes a bin to empty.  Returns true on error. */
37728-bool bin_init(bin_t *bin);
37729-
37730-/* Forking. */
37731-void bin_prefork(tsdn_t *tsdn, bin_t *bin);
37732-void bin_postfork_parent(tsdn_t *tsdn, bin_t *bin);
37733-void bin_postfork_child(tsdn_t *tsdn, bin_t *bin);
37734-
37735-/* Stats. */
37736-static inline void
37737-bin_stats_merge(tsdn_t *tsdn, bin_stats_data_t *dst_bin_stats, bin_t *bin) {
37738-	malloc_mutex_lock(tsdn, &bin->lock);
37739-	malloc_mutex_prof_accum(tsdn, &dst_bin_stats->mutex_data, &bin->lock);
37740-	bin_stats_t *stats = &dst_bin_stats->stats_data;
37741-	stats->nmalloc += bin->stats.nmalloc;
37742-	stats->ndalloc += bin->stats.ndalloc;
37743-	stats->nrequests += bin->stats.nrequests;
37744-	stats->curregs += bin->stats.curregs;
37745-	stats->nfills += bin->stats.nfills;
37746-	stats->nflushes += bin->stats.nflushes;
37747-	stats->nslabs += bin->stats.nslabs;
37748-	stats->reslabs += bin->stats.reslabs;
37749-	stats->curslabs += bin->stats.curslabs;
37750-	stats->nonfull_slabs += bin->stats.nonfull_slabs;
37751-	malloc_mutex_unlock(tsdn, &bin->lock);
37752-}
37753-
37754-#endif /* JEMALLOC_INTERNAL_BIN_H */
37755diff --git a/jemalloc/include/jemalloc/internal/bin_info.h b/jemalloc/include/jemalloc/internal/bin_info.h
37756deleted file mode 100644
37757index 7fe65c8..0000000
37758--- a/jemalloc/include/jemalloc/internal/bin_info.h
37759+++ /dev/null
37760@@ -1,50 +0,0 @@
37761-#ifndef JEMALLOC_INTERNAL_BIN_INFO_H
37762-#define JEMALLOC_INTERNAL_BIN_INFO_H
37763-
37764-#include "jemalloc/internal/bitmap.h"
37765-
37766-/*
37767- * Read-only information associated with each element of arena_t's bins array
37768- * is stored separately, partly to reduce memory usage (only one copy, rather
37769- * than one per arena), but mainly to avoid false cacheline sharing.
37770- *
37771- * Each slab has the following layout:
37772- *
37773- *   /--------------------\
37774- *   | region 0           |
37775- *   |--------------------|
37776- *   | region 1           |
37777- *   |--------------------|
37778- *   | ...                |
37779- *   | ...                |
37780- *   | ...                |
37781- *   |--------------------|
37782- *   | region nregs-1     |
37783- *   \--------------------/
37784- */
37785-typedef struct bin_info_s bin_info_t;
37786-struct bin_info_s {
37787-	/* Size of regions in a slab for this bin's size class. */
37788-	size_t			reg_size;
37789-
37790-	/* Total size of a slab for this bin's size class. */
37791-	size_t			slab_size;
37792-
37793-	/* Total number of regions in a slab for this bin's size class. */
37794-	uint32_t		nregs;
37795-
37796-	/* Number of sharded bins in each arena for this size class. */
37797-	uint32_t		n_shards;
37798-
37799-	/*
37800-	 * Metadata used to manipulate bitmaps for slabs associated with this
37801-	 * bin.
37802-	 */
37803-	bitmap_info_t		bitmap_info;
37804-};
37805-
37806-extern bin_info_t bin_infos[SC_NBINS];
37807-
37808-void bin_info_boot(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS]);
37809-
37810-#endif /* JEMALLOC_INTERNAL_BIN_INFO_H */
37811diff --git a/jemalloc/include/jemalloc/internal/bin_stats.h b/jemalloc/include/jemalloc/internal/bin_stats.h
37812deleted file mode 100644
37813index 0b99297..0000000
37814--- a/jemalloc/include/jemalloc/internal/bin_stats.h
37815+++ /dev/null
37816@@ -1,57 +0,0 @@
37817-#ifndef JEMALLOC_INTERNAL_BIN_STATS_H
37818-#define JEMALLOC_INTERNAL_BIN_STATS_H
37819-
37820-#include "jemalloc/internal/mutex_prof.h"
37821-
37822-typedef struct bin_stats_s bin_stats_t;
37823-struct bin_stats_s {
37824-	/*
37825-	 * Total number of allocation/deallocation requests served directly by
37826-	 * the bin.  Note that tcache may allocate an object, then recycle it
37827-	 * many times, resulting many increments to nrequests, but only one
37828-	 * each to nmalloc and ndalloc.
37829-	 */
37830-	uint64_t	nmalloc;
37831-	uint64_t	ndalloc;
37832-
37833-	/*
37834-	 * Number of allocation requests that correspond to the size of this
37835-	 * bin.  This includes requests served by tcache, though tcache only
37836-	 * periodically merges into this counter.
37837-	 */
37838-	uint64_t	nrequests;
37839-
37840-	/*
37841-	 * Current number of regions of this size class, including regions
37842-	 * currently cached by tcache.
37843-	 */
37844-	size_t		curregs;
37845-
37846-	/* Number of tcache fills from this bin. */
37847-	uint64_t	nfills;
37848-
37849-	/* Number of tcache flushes to this bin. */
37850-	uint64_t	nflushes;
37851-
37852-	/* Total number of slabs created for this bin's size class. */
37853-	uint64_t	nslabs;
37854-
37855-	/*
37856-	 * Total number of slabs reused by extracting them from the slabs heap
37857-	 * for this bin's size class.
37858-	 */
37859-	uint64_t	reslabs;
37860-
37861-	/* Current number of slabs in this bin. */
37862-	size_t		curslabs;
37863-
37864-	/* Current size of nonfull slabs heap in this bin. */
37865-	size_t		nonfull_slabs;
37866-};
37867-
37868-typedef struct bin_stats_data_s bin_stats_data_t;
37869-struct bin_stats_data_s {
37870-	bin_stats_t stats_data;
37871-	mutex_prof_data_t mutex_data;
37872-};
37873-#endif /* JEMALLOC_INTERNAL_BIN_STATS_H */
37874diff --git a/jemalloc/include/jemalloc/internal/bin_types.h b/jemalloc/include/jemalloc/internal/bin_types.h
37875deleted file mode 100644
37876index 945e832..0000000
37877--- a/jemalloc/include/jemalloc/internal/bin_types.h
37878+++ /dev/null
37879@@ -1,17 +0,0 @@
37880-#ifndef JEMALLOC_INTERNAL_BIN_TYPES_H
37881-#define JEMALLOC_INTERNAL_BIN_TYPES_H
37882-
37883-#include "jemalloc/internal/sc.h"
37884-
37885-#define BIN_SHARDS_MAX (1 << EDATA_BITS_BINSHARD_WIDTH)
37886-#define N_BIN_SHARDS_DEFAULT 1
37887-
37888-/* Used in TSD static initializer only. Real init in arena_bind(). */
37889-#define TSD_BINSHARDS_ZERO_INITIALIZER {{UINT8_MAX}}
37890-
37891-typedef struct tsd_binshards_s tsd_binshards_t;
37892-struct tsd_binshards_s {
37893-	uint8_t binshard[SC_NBINS];
37894-};
37895-
37896-#endif /* JEMALLOC_INTERNAL_BIN_TYPES_H */
37897diff --git a/jemalloc/include/jemalloc/internal/bit_util.h b/jemalloc/include/jemalloc/internal/bit_util.h
37898deleted file mode 100644
37899index bac5914..0000000
37900--- a/jemalloc/include/jemalloc/internal/bit_util.h
37901+++ /dev/null
37902@@ -1,422 +0,0 @@
37903-#ifndef JEMALLOC_INTERNAL_BIT_UTIL_H
37904-#define JEMALLOC_INTERNAL_BIT_UTIL_H
37905-
37906-#include "jemalloc/internal/assert.h"
37907-
37908-/* Sanity check. */
37909-#if !defined(JEMALLOC_INTERNAL_FFSLL) || !defined(JEMALLOC_INTERNAL_FFSL) \
37910-    || !defined(JEMALLOC_INTERNAL_FFS)
37911-#  error JEMALLOC_INTERNAL_FFS{,L,LL} should have been defined by configure
37912-#endif
37913-
37914-/*
37915- * Unlike the builtins and posix ffs functions, our ffs requires a non-zero
37916- * input, and returns the position of the lowest bit set (as opposed to the
37917- * posix versions, which return 1 larger than that position and use a return
37918- * value of zero as a sentinel.  This tends to simplify logic in callers, and
37919- * allows for consistency with the builtins we build fls on top of.
37920- */
37921-static inline unsigned
37922-ffs_llu(unsigned long long x) {
37923-	util_assume(x != 0);
37924-	return JEMALLOC_INTERNAL_FFSLL(x) - 1;
37925-}
37926-
37927-static inline unsigned
37928-ffs_lu(unsigned long x) {
37929-	util_assume(x != 0);
37930-	return JEMALLOC_INTERNAL_FFSL(x) - 1;
37931-}
37932-
37933-static inline unsigned
37934-ffs_u(unsigned x) {
37935-	util_assume(x != 0);
37936-	return JEMALLOC_INTERNAL_FFS(x) - 1;
37937-}
37938-
37939-#define DO_FLS_SLOW(x, suffix) do {					\
37940-	util_assume(x != 0);						\
37941-	x |= (x >> 1);							\
37942-	x |= (x >> 2);							\
37943-	x |= (x >> 4);							\
37944-	x |= (x >> 8);							\
37945-	x |= (x >> 16);							\
37946-	if (sizeof(x) > 4) {						\
37947-		/*							\
37948-		 * If sizeof(x) is 4, then the expression "x >> 32"	\
37949-		 * will generate compiler warnings even if the code	\
37950-		 * never executes.  This circumvents the warning, and	\
37951-		 * gets compiled out in optimized builds.		\
37952-		 */							\
37953-		int constant_32 = sizeof(x) * 4;			\
37954-		x |= (x >> constant_32);				\
37955-	}								\
37956-	x++;								\
37957-	if (x == 0) {							\
37958-		return 8 * sizeof(x) - 1;				\
37959-	}								\
37960-	return ffs_##suffix(x) - 1;					\
37961-} while(0)
37962-
37963-static inline unsigned
37964-fls_llu_slow(unsigned long long x) {
37965-	DO_FLS_SLOW(x, llu);
37966-}
37967-
37968-static inline unsigned
37969-fls_lu_slow(unsigned long x) {
37970-	DO_FLS_SLOW(x, lu);
37971-}
37972-
37973-static inline unsigned
37974-fls_u_slow(unsigned x) {
37975-	DO_FLS_SLOW(x, u);
37976-}
37977-
37978-#undef DO_FLS_SLOW
37979-
37980-#ifdef JEMALLOC_HAVE_BUILTIN_CLZ
37981-static inline unsigned
37982-fls_llu(unsigned long long x) {
37983-	util_assume(x != 0);
37984-	/*
37985-	 * Note that the xor here is more naturally written as subtraction; the
37986-	 * last bit set is the number of bits in the type minus the number of
37987-	 * leading zero bits.  But GCC implements that as:
37988-	 *    bsr     edi, edi
37989-	 *    mov     eax, 31
37990-	 *    xor     edi, 31
37991-	 *    sub     eax, edi
37992-	 * If we write it as xor instead, then we get
37993-	 *    bsr     eax, edi
37994-	 * as desired.
37995-	 */
37996-	return (8 * sizeof(x) - 1) ^ __builtin_clzll(x);
37997-}
37998-
37999-static inline unsigned
38000-fls_lu(unsigned long x) {
38001-	util_assume(x != 0);
38002-	return (8 * sizeof(x) - 1) ^ __builtin_clzl(x);
38003-}
38004-
38005-static inline unsigned
38006-fls_u(unsigned x) {
38007-	util_assume(x != 0);
38008-	return (8 * sizeof(x) - 1) ^ __builtin_clz(x);
38009-}
38010-#elif defined(_MSC_VER)
38011-
38012-#if LG_SIZEOF_PTR == 3
38013-#define DO_BSR64(bit, x) _BitScanReverse64(&bit, x)
38014-#else
38015-/*
38016- * This never actually runs; we're just dodging a compiler error for the
38017- * never-taken branch where sizeof(void *) == 8.
38018- */
38019-#define DO_BSR64(bit, x) bit = 0; unreachable()
38020-#endif
38021-
38022-#define DO_FLS(x) do {							\
38023-	if (x == 0) {							\
38024-		return 8 * sizeof(x);					\
38025-	}								\
38026-	unsigned long bit;						\
38027-	if (sizeof(x) == 4) {						\
38028-		_BitScanReverse(&bit, (unsigned)x);			\
38029-		return (unsigned)bit;					\
38030-	}								\
38031-	if (sizeof(x) == 8 && sizeof(void *) == 8) {			\
38032-		DO_BSR64(bit, x);					\
38033-		return (unsigned)bit;					\
38034-	}								\
38035-	if (sizeof(x) == 8 && sizeof(void *) == 4) {			\
38036-		/* Dodge a compiler warning, as above. */		\
38037-		int constant_32 = sizeof(x) * 4;			\
38038-		if (_BitScanReverse(&bit,				\
38039-		    (unsigned)(x >> constant_32))) {			\
38040-			return 32 + (unsigned)bit;			\
38041-		} else {						\
38042-			_BitScanReverse(&bit, (unsigned)x);		\
38043-			return (unsigned)bit;				\
38044-		}							\
38045-	}								\
38046-	unreachable();							\
38047-} while (0)
38048-
38049-static inline unsigned
38050-fls_llu(unsigned long long x) {
38051-	DO_FLS(x);
38052-}
38053-
38054-static inline unsigned
38055-fls_lu(unsigned long x) {
38056-	DO_FLS(x);
38057-}
38058-
38059-static inline unsigned
38060-fls_u(unsigned x) {
38061-	DO_FLS(x);
38062-}
38063-
38064-#undef DO_FLS
38065-#undef DO_BSR64
38066-#else
38067-
38068-static inline unsigned
38069-fls_llu(unsigned long long x) {
38070-	return fls_llu_slow(x);
38071-}
38072-
38073-static inline unsigned
38074-fls_lu(unsigned long x) {
38075-	return fls_lu_slow(x);
38076-}
38077-
38078-static inline unsigned
38079-fls_u(unsigned x) {
38080-	return fls_u_slow(x);
38081-}
38082-#endif
38083-
38084-#if LG_SIZEOF_LONG_LONG > 3
38085-#  error "Haven't implemented popcount for 16-byte ints."
38086-#endif
38087-
38088-#define DO_POPCOUNT(x, type) do {					\
38089-	/*								\
38090-	 * Algorithm from an old AMD optimization reference manual.	\
38091-	 * We're putting a little bit more work than you might expect	\
38092-	 * into the no-instrinsic case, since we only support the	\
38093-	 * GCC intrinsics spelling of popcount (for now).  Detecting	\
38094-	 * whether or not the popcount builtin is actually useable in	\
38095-	 * MSVC is nontrivial.						\
38096-	 */								\
38097-									\
38098-	type bmul = (type)0x0101010101010101ULL;			\
38099-									\
38100-	/*								\
38101-	 * Replace each 2 bits with the sideways sum of the original	\
38102-	 * values.  0x5 = 0b0101.					\
38103-	 *								\
38104-	 * You might expect this to be:					\
38105-	 *   x = (x & 0x55...) + ((x >> 1) & 0x55...).			\
38106-	 * That costs an extra mask relative to this, though.		\
38107-	 */								\
38108-	x = x - ((x >> 1) & (0x55U * bmul));				\
38109-	/* Replace each 4 bits with their sideays sum.  0x3 = 0b0011. */\
38110-	x = (x & (bmul * 0x33U)) + ((x >> 2) & (bmul * 0x33U));		\
38111-	/*								\
38112-	 * Replace each 8 bits with their sideways sum.  Note that we	\
38113-	 * can't overflow within each 4-bit sum here, so we can skip	\
38114-	 * the initial mask.						\
38115-	 */								\
38116-	x = (x + (x >> 4)) & (bmul * 0x0FU);				\
38117-	/*								\
38118-	 * None of the partial sums in this multiplication (viewed in	\
38119-	 * base-256) can overflow into the next digit.  So the least	\
38120-	 * significant byte of the product will be the least		\
38121-	 * significant byte of the original value, the second least	\
38122-	 * significant byte will be the sum of the two least		\
38123-	 * significant bytes of the original value, and so on.		\
38124-	 * Importantly, the high byte will be the byte-wise sum of all	\
38125-	 * the bytes of the original value.				\
38126-	 */								\
38127-	x = x * bmul;							\
38128-	x >>= ((sizeof(x) - 1) * 8);					\
38129-	return (unsigned)x;						\
38130-} while(0)
38131-
38132-static inline unsigned
38133-popcount_u_slow(unsigned bitmap) {
38134-	DO_POPCOUNT(bitmap, unsigned);
38135-}
38136-
38137-static inline unsigned
38138-popcount_lu_slow(unsigned long bitmap) {
38139-	DO_POPCOUNT(bitmap, unsigned long);
38140-}
38141-
38142-static inline unsigned
38143-popcount_llu_slow(unsigned long long bitmap) {
38144-	DO_POPCOUNT(bitmap, unsigned long long);
38145-}
38146-
38147-#undef DO_POPCOUNT
38148-
38149-static inline unsigned
38150-popcount_u(unsigned bitmap) {
38151-#ifdef JEMALLOC_INTERNAL_POPCOUNT
38152-	return JEMALLOC_INTERNAL_POPCOUNT(bitmap);
38153-#else
38154-	return popcount_u_slow(bitmap);
38155-#endif
38156-}
38157-
38158-static inline unsigned
38159-popcount_lu(unsigned long bitmap) {
38160-#ifdef JEMALLOC_INTERNAL_POPCOUNTL
38161-	return JEMALLOC_INTERNAL_POPCOUNTL(bitmap);
38162-#else
38163-	return popcount_lu_slow(bitmap);
38164-#endif
38165-}
38166-
38167-static inline unsigned
38168-popcount_llu(unsigned long long bitmap) {
38169-#ifdef JEMALLOC_INTERNAL_POPCOUNTLL
38170-	return JEMALLOC_INTERNAL_POPCOUNTLL(bitmap);
38171-#else
38172-	return popcount_llu_slow(bitmap);
38173-#endif
38174-}
38175-
38176-/*
38177- * Clears first unset bit in bitmap, and returns
38178- * place of bit.  bitmap *must not* be 0.
38179- */
38180-
38181-static inline size_t
38182-cfs_lu(unsigned long* bitmap) {
38183-	util_assume(*bitmap != 0);
38184-	size_t bit = ffs_lu(*bitmap);
38185-	*bitmap ^= ZU(1) << bit;
38186-	return bit;
38187-}
38188-
38189-static inline unsigned
38190-ffs_zu(size_t x) {
38191-#if LG_SIZEOF_PTR == LG_SIZEOF_INT
38192-	return ffs_u(x);
38193-#elif LG_SIZEOF_PTR == LG_SIZEOF_LONG
38194-	return ffs_lu(x);
38195-#elif LG_SIZEOF_PTR == LG_SIZEOF_LONG_LONG
38196-	return ffs_llu(x);
38197-#else
38198-#error No implementation for size_t ffs()
38199-#endif
38200-}
38201-
38202-static inline unsigned
38203-fls_zu(size_t x) {
38204-#if LG_SIZEOF_PTR == LG_SIZEOF_INT
38205-	return fls_u(x);
38206-#elif LG_SIZEOF_PTR == LG_SIZEOF_LONG
38207-	return fls_lu(x);
38208-#elif LG_SIZEOF_PTR == LG_SIZEOF_LONG_LONG
38209-	return fls_llu(x);
38210-#else
38211-#error No implementation for size_t fls()
38212-#endif
38213-}
38214-
38215-
38216-static inline unsigned
38217-ffs_u64(uint64_t x) {
38218-#if LG_SIZEOF_LONG == 3
38219-	return ffs_lu(x);
38220-#elif LG_SIZEOF_LONG_LONG == 3
38221-	return ffs_llu(x);
38222-#else
38223-#error No implementation for 64-bit ffs()
38224-#endif
38225-}
38226-
38227-static inline unsigned
38228-fls_u64(uint64_t x) {
38229-#if LG_SIZEOF_LONG == 3
38230-	return fls_lu(x);
38231-#elif LG_SIZEOF_LONG_LONG == 3
38232-	return fls_llu(x);
38233-#else
38234-#error No implementation for 64-bit fls()
38235-#endif
38236-}
38237-
38238-static inline unsigned
38239-ffs_u32(uint32_t x) {
38240-#if LG_SIZEOF_INT == 2
38241-	return ffs_u(x);
38242-#else
38243-#error No implementation for 32-bit ffs()
38244-#endif
38245-	return ffs_u(x);
38246-}
38247-
38248-static inline unsigned
38249-fls_u32(uint32_t x) {
38250-#if LG_SIZEOF_INT == 2
38251-	return fls_u(x);
38252-#else
38253-#error No implementation for 32-bit fls()
38254-#endif
38255-	return fls_u(x);
38256-}
38257-
38258-static inline uint64_t
38259-pow2_ceil_u64(uint64_t x) {
38260-	if (unlikely(x <= 1)) {
38261-		return x;
38262-	}
38263-	size_t msb_on_index = fls_u64(x - 1);
38264-	/*
38265-	 * Range-check; it's on the callers to ensure that the result of this
38266-	 * call won't overflow.
38267-	 */
38268-	assert(msb_on_index < 63);
38269-	return 1ULL << (msb_on_index + 1);
38270-}
38271-
38272-static inline uint32_t
38273-pow2_ceil_u32(uint32_t x) {
38274-	if (unlikely(x <= 1)) {
38275-	    return x;
38276-	}
38277-	size_t msb_on_index = fls_u32(x - 1);
38278-	/* As above. */
38279-	assert(msb_on_index < 31);
38280-	return 1U << (msb_on_index + 1);
38281-}
38282-
38283-/* Compute the smallest power of 2 that is >= x. */
38284-static inline size_t
38285-pow2_ceil_zu(size_t x) {
38286-#if (LG_SIZEOF_PTR == 3)
38287-	return pow2_ceil_u64(x);
38288-#else
38289-	return pow2_ceil_u32(x);
38290-#endif
38291-}
38292-
38293-static inline unsigned
38294-lg_floor(size_t x) {
38295-	util_assume(x != 0);
38296-#if (LG_SIZEOF_PTR == 3)
38297-	return fls_u64(x);
38298-#else
38299-	return fls_u32(x);
38300-#endif
38301-}
38302-
38303-static inline unsigned
38304-lg_ceil(size_t x) {
38305-	return lg_floor(x) + ((x & (x - 1)) == 0 ? 0 : 1);
38306-}
38307-
38308-/* A compile-time version of lg_floor and lg_ceil. */
38309-#define LG_FLOOR_1(x) 0
38310-#define LG_FLOOR_2(x) (x < (1ULL << 1) ? LG_FLOOR_1(x) : 1 + LG_FLOOR_1(x >> 1))
38311-#define LG_FLOOR_4(x) (x < (1ULL << 2) ? LG_FLOOR_2(x) : 2 + LG_FLOOR_2(x >> 2))
38312-#define LG_FLOOR_8(x) (x < (1ULL << 4) ? LG_FLOOR_4(x) : 4 + LG_FLOOR_4(x >> 4))
38313-#define LG_FLOOR_16(x) (x < (1ULL << 8) ? LG_FLOOR_8(x) : 8 + LG_FLOOR_8(x >> 8))
38314-#define LG_FLOOR_32(x) (x < (1ULL << 16) ? LG_FLOOR_16(x) : 16 + LG_FLOOR_16(x >> 16))
38315-#define LG_FLOOR_64(x) (x < (1ULL << 32) ? LG_FLOOR_32(x) : 32 + LG_FLOOR_32(x >> 32))
38316-#if LG_SIZEOF_PTR == 2
38317-#  define LG_FLOOR(x) LG_FLOOR_32((x))
38318-#else
38319-#  define LG_FLOOR(x) LG_FLOOR_64((x))
38320-#endif
38321-
38322-#define LG_CEIL(x) (LG_FLOOR(x) + (((x) & ((x) - 1)) == 0 ? 0 : 1))
38323-
38324-#endif /* JEMALLOC_INTERNAL_BIT_UTIL_H */
38325diff --git a/jemalloc/include/jemalloc/internal/bitmap.h b/jemalloc/include/jemalloc/internal/bitmap.h
38326deleted file mode 100644
38327index dc19454..0000000
38328--- a/jemalloc/include/jemalloc/internal/bitmap.h
38329+++ /dev/null
38330@@ -1,368 +0,0 @@
38331-#ifndef JEMALLOC_INTERNAL_BITMAP_H
38332-#define JEMALLOC_INTERNAL_BITMAP_H
38333-
38334-#include "jemalloc/internal/bit_util.h"
38335-#include "jemalloc/internal/sc.h"
38336-
38337-typedef unsigned long bitmap_t;
38338-#define LG_SIZEOF_BITMAP	LG_SIZEOF_LONG
38339-
38340-/* Maximum bitmap bit count is 2^LG_BITMAP_MAXBITS. */
38341-#if SC_LG_SLAB_MAXREGS > LG_CEIL(SC_NSIZES)
38342-/* Maximum bitmap bit count is determined by maximum regions per slab. */
38343-#  define LG_BITMAP_MAXBITS	SC_LG_SLAB_MAXREGS
38344-#else
38345-/* Maximum bitmap bit count is determined by number of extent size classes. */
38346-#  define LG_BITMAP_MAXBITS	LG_CEIL(SC_NSIZES)
38347-#endif
38348-#define BITMAP_MAXBITS		(ZU(1) << LG_BITMAP_MAXBITS)
38349-
38350-/* Number of bits per group. */
38351-#define LG_BITMAP_GROUP_NBITS		(LG_SIZEOF_BITMAP + 3)
38352-#define BITMAP_GROUP_NBITS		(1U << LG_BITMAP_GROUP_NBITS)
38353-#define BITMAP_GROUP_NBITS_MASK		(BITMAP_GROUP_NBITS-1)
38354-
38355-/*
38356- * Do some analysis on how big the bitmap is before we use a tree.  For a brute
38357- * force linear search, if we would have to call ffs_lu() more than 2^3 times,
38358- * use a tree instead.
38359- */
38360-#if LG_BITMAP_MAXBITS - LG_BITMAP_GROUP_NBITS > 3
38361-#  define BITMAP_USE_TREE
38362-#endif
38363-
38364-/* Number of groups required to store a given number of bits. */
38365-#define BITMAP_BITS2GROUPS(nbits)					\
38366-    (((nbits) + BITMAP_GROUP_NBITS_MASK) >> LG_BITMAP_GROUP_NBITS)
38367-
38368-/*
38369- * Number of groups required at a particular level for a given number of bits.
38370- */
38371-#define BITMAP_GROUPS_L0(nbits)						\
38372-    BITMAP_BITS2GROUPS(nbits)
38373-#define BITMAP_GROUPS_L1(nbits)						\
38374-    BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(nbits))
38375-#define BITMAP_GROUPS_L2(nbits)						\
38376-    BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS((nbits))))
38377-#define BITMAP_GROUPS_L3(nbits)						\
38378-    BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(		\
38379-	BITMAP_BITS2GROUPS((nbits)))))
38380-#define BITMAP_GROUPS_L4(nbits)						\
38381-    BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(		\
38382-	BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS((nbits))))))
38383-
38384-/*
38385- * Assuming the number of levels, number of groups required for a given number
38386- * of bits.
38387- */
38388-#define BITMAP_GROUPS_1_LEVEL(nbits)					\
38389-    BITMAP_GROUPS_L0(nbits)
38390-#define BITMAP_GROUPS_2_LEVEL(nbits)					\
38391-    (BITMAP_GROUPS_1_LEVEL(nbits) + BITMAP_GROUPS_L1(nbits))
38392-#define BITMAP_GROUPS_3_LEVEL(nbits)					\
38393-    (BITMAP_GROUPS_2_LEVEL(nbits) + BITMAP_GROUPS_L2(nbits))
38394-#define BITMAP_GROUPS_4_LEVEL(nbits)					\
38395-    (BITMAP_GROUPS_3_LEVEL(nbits) + BITMAP_GROUPS_L3(nbits))
38396-#define BITMAP_GROUPS_5_LEVEL(nbits)					\
38397-    (BITMAP_GROUPS_4_LEVEL(nbits) + BITMAP_GROUPS_L4(nbits))
38398-
38399-/*
38400- * Maximum number of groups required to support LG_BITMAP_MAXBITS.
38401- */
38402-#ifdef BITMAP_USE_TREE
38403-
38404-#if LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS
38405-#  define BITMAP_GROUPS(nbits)	BITMAP_GROUPS_1_LEVEL(nbits)
38406-#  define BITMAP_GROUPS_MAX	BITMAP_GROUPS_1_LEVEL(BITMAP_MAXBITS)
38407-#elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 2
38408-#  define BITMAP_GROUPS(nbits)	BITMAP_GROUPS_2_LEVEL(nbits)
38409-#  define BITMAP_GROUPS_MAX	BITMAP_GROUPS_2_LEVEL(BITMAP_MAXBITS)
38410-#elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 3
38411-#  define BITMAP_GROUPS(nbits)	BITMAP_GROUPS_3_LEVEL(nbits)
38412-#  define BITMAP_GROUPS_MAX	BITMAP_GROUPS_3_LEVEL(BITMAP_MAXBITS)
38413-#elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 4
38414-#  define BITMAP_GROUPS(nbits)	BITMAP_GROUPS_4_LEVEL(nbits)
38415-#  define BITMAP_GROUPS_MAX	BITMAP_GROUPS_4_LEVEL(BITMAP_MAXBITS)
38416-#elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 5
38417-#  define BITMAP_GROUPS(nbits)	BITMAP_GROUPS_5_LEVEL(nbits)
38418-#  define BITMAP_GROUPS_MAX	BITMAP_GROUPS_5_LEVEL(BITMAP_MAXBITS)
38419-#else
38420-#  error "Unsupported bitmap size"
38421-#endif
38422-
38423-/*
38424- * Maximum number of levels possible.  This could be statically computed based
38425- * on LG_BITMAP_MAXBITS:
38426- *
38427- * #define BITMAP_MAX_LEVELS \
38428- *     (LG_BITMAP_MAXBITS / LG_SIZEOF_BITMAP) \
38429- *     + !!(LG_BITMAP_MAXBITS % LG_SIZEOF_BITMAP)
38430- *
38431- * However, that would not allow the generic BITMAP_INFO_INITIALIZER() macro, so
38432- * instead hardcode BITMAP_MAX_LEVELS to the largest number supported by the
38433- * various cascading macros.  The only additional cost this incurs is some
38434- * unused trailing entries in bitmap_info_t structures; the bitmaps themselves
38435- * are not impacted.
38436- */
38437-#define BITMAP_MAX_LEVELS	5
38438-
38439-#define BITMAP_INFO_INITIALIZER(nbits) {				\
38440-	/* nbits. */							\
38441-	nbits,								\
38442-	/* nlevels. */							\
38443-	(BITMAP_GROUPS_L0(nbits) > BITMAP_GROUPS_L1(nbits)) +		\
38444-	    (BITMAP_GROUPS_L1(nbits) > BITMAP_GROUPS_L2(nbits)) +	\
38445-	    (BITMAP_GROUPS_L2(nbits) > BITMAP_GROUPS_L3(nbits)) +	\
38446-	    (BITMAP_GROUPS_L3(nbits) > BITMAP_GROUPS_L4(nbits)) + 1,	\
38447-	/* levels. */							\
38448-	{								\
38449-		{0},							\
38450-		{BITMAP_GROUPS_L0(nbits)},				\
38451-		{BITMAP_GROUPS_L1(nbits) + BITMAP_GROUPS_L0(nbits)},	\
38452-		{BITMAP_GROUPS_L2(nbits) + BITMAP_GROUPS_L1(nbits) +	\
38453-		    BITMAP_GROUPS_L0(nbits)},				\
38454-		{BITMAP_GROUPS_L3(nbits) + BITMAP_GROUPS_L2(nbits) +	\
38455-		    BITMAP_GROUPS_L1(nbits) + BITMAP_GROUPS_L0(nbits)},	\
38456-		{BITMAP_GROUPS_L4(nbits) + BITMAP_GROUPS_L3(nbits) +	\
38457-		     BITMAP_GROUPS_L2(nbits) + BITMAP_GROUPS_L1(nbits)	\
38458-		     + BITMAP_GROUPS_L0(nbits)}				\
38459-	}								\
38460-}
38461-
38462-#else /* BITMAP_USE_TREE */
38463-
38464-#define BITMAP_GROUPS(nbits)	BITMAP_BITS2GROUPS(nbits)
38465-#define BITMAP_GROUPS_MAX	BITMAP_BITS2GROUPS(BITMAP_MAXBITS)
38466-
38467-#define BITMAP_INFO_INITIALIZER(nbits) {				\
38468-	/* nbits. */							\
38469-	nbits,								\
38470-	/* ngroups. */							\
38471-	BITMAP_BITS2GROUPS(nbits)					\
38472-}
38473-
38474-#endif /* BITMAP_USE_TREE */
38475-
38476-typedef struct bitmap_level_s {
38477-	/* Offset of this level's groups within the array of groups. */
38478-	size_t group_offset;
38479-} bitmap_level_t;
38480-
38481-typedef struct bitmap_info_s {
38482-	/* Logical number of bits in bitmap (stored at bottom level). */
38483-	size_t nbits;
38484-
38485-#ifdef BITMAP_USE_TREE
38486-	/* Number of levels necessary for nbits. */
38487-	unsigned nlevels;
38488-
38489-	/*
38490-	 * Only the first (nlevels+1) elements are used, and levels are ordered
38491-	 * bottom to top (e.g. the bottom level is stored in levels[0]).
38492-	 */
38493-	bitmap_level_t levels[BITMAP_MAX_LEVELS+1];
38494-#else /* BITMAP_USE_TREE */
38495-	/* Number of groups necessary for nbits. */
38496-	size_t ngroups;
38497-#endif /* BITMAP_USE_TREE */
38498-} bitmap_info_t;
38499-
38500-void bitmap_info_init(bitmap_info_t *binfo, size_t nbits);
38501-void bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo, bool fill);
38502-size_t bitmap_size(const bitmap_info_t *binfo);
38503-
38504-static inline bool
38505-bitmap_full(bitmap_t *bitmap, const bitmap_info_t *binfo) {
38506-#ifdef BITMAP_USE_TREE
38507-	size_t rgoff = binfo->levels[binfo->nlevels].group_offset - 1;
38508-	bitmap_t rg = bitmap[rgoff];
38509-	/* The bitmap is full iff the root group is 0. */
38510-	return (rg == 0);
38511-#else
38512-	size_t i;
38513-
38514-	for (i = 0; i < binfo->ngroups; i++) {
38515-		if (bitmap[i] != 0) {
38516-			return false;
38517-		}
38518-	}
38519-	return true;
38520-#endif
38521-}
38522-
38523-static inline bool
38524-bitmap_get(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit) {
38525-	size_t goff;
38526-	bitmap_t g;
38527-
38528-	assert(bit < binfo->nbits);
38529-	goff = bit >> LG_BITMAP_GROUP_NBITS;
38530-	g = bitmap[goff];
38531-	return !(g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK)));
38532-}
38533-
38534-static inline void
38535-bitmap_set(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit) {
38536-	size_t goff;
38537-	bitmap_t *gp;
38538-	bitmap_t g;
38539-
38540-	assert(bit < binfo->nbits);
38541-	assert(!bitmap_get(bitmap, binfo, bit));
38542-	goff = bit >> LG_BITMAP_GROUP_NBITS;
38543-	gp = &bitmap[goff];
38544-	g = *gp;
38545-	assert(g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK)));
38546-	g ^= ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK);
38547-	*gp = g;
38548-	assert(bitmap_get(bitmap, binfo, bit));
38549-#ifdef BITMAP_USE_TREE
38550-	/* Propagate group state transitions up the tree. */
38551-	if (g == 0) {
38552-		unsigned i;
38553-		for (i = 1; i < binfo->nlevels; i++) {
38554-			bit = goff;
38555-			goff = bit >> LG_BITMAP_GROUP_NBITS;
38556-			gp = &bitmap[binfo->levels[i].group_offset + goff];
38557-			g = *gp;
38558-			assert(g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK)));
38559-			g ^= ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK);
38560-			*gp = g;
38561-			if (g != 0) {
38562-				break;
38563-			}
38564-		}
38565-	}
38566-#endif
38567-}
38568-
38569-/* ffu: find first unset >= bit. */
38570-static inline size_t
38571-bitmap_ffu(const bitmap_t *bitmap, const bitmap_info_t *binfo, size_t min_bit) {
38572-	assert(min_bit < binfo->nbits);
38573-
38574-#ifdef BITMAP_USE_TREE
38575-	size_t bit = 0;
38576-	for (unsigned level = binfo->nlevels; level--;) {
38577-		size_t lg_bits_per_group = (LG_BITMAP_GROUP_NBITS * (level +
38578-		    1));
38579-		bitmap_t group = bitmap[binfo->levels[level].group_offset + (bit
38580-		    >> lg_bits_per_group)];
38581-		unsigned group_nmask = (unsigned)(((min_bit > bit) ? (min_bit -
38582-		    bit) : 0) >> (lg_bits_per_group - LG_BITMAP_GROUP_NBITS));
38583-		assert(group_nmask <= BITMAP_GROUP_NBITS);
38584-		bitmap_t group_mask = ~((1LU << group_nmask) - 1);
38585-		bitmap_t group_masked = group & group_mask;
38586-		if (group_masked == 0LU) {
38587-			if (group == 0LU) {
38588-				return binfo->nbits;
38589-			}
38590-			/*
38591-			 * min_bit was preceded by one or more unset bits in
38592-			 * this group, but there are no other unset bits in this
38593-			 * group.  Try again starting at the first bit of the
38594-			 * next sibling.  This will recurse at most once per
38595-			 * non-root level.
38596-			 */
38597-			size_t sib_base = bit + (ZU(1) << lg_bits_per_group);
38598-			assert(sib_base > min_bit);
38599-			assert(sib_base > bit);
38600-			if (sib_base >= binfo->nbits) {
38601-				return binfo->nbits;
38602-			}
38603-			return bitmap_ffu(bitmap, binfo, sib_base);
38604-		}
38605-		bit += ((size_t)ffs_lu(group_masked)) <<
38606-		    (lg_bits_per_group - LG_BITMAP_GROUP_NBITS);
38607-	}
38608-	assert(bit >= min_bit);
38609-	assert(bit < binfo->nbits);
38610-	return bit;
38611-#else
38612-	size_t i = min_bit >> LG_BITMAP_GROUP_NBITS;
38613-	bitmap_t g = bitmap[i] & ~((1LU << (min_bit & BITMAP_GROUP_NBITS_MASK))
38614-	    - 1);
38615-	size_t bit;
38616-	do {
38617-		if (g != 0) {
38618-			bit = ffs_lu(g);
38619-			return (i << LG_BITMAP_GROUP_NBITS) + bit;
38620-		}
38621-		i++;
38622-		g = bitmap[i];
38623-	} while (i < binfo->ngroups);
38624-	return binfo->nbits;
38625-#endif
38626-}
38627-
38628-/* sfu: set first unset. */
38629-static inline size_t
38630-bitmap_sfu(bitmap_t *bitmap, const bitmap_info_t *binfo) {
38631-	size_t bit;
38632-	bitmap_t g;
38633-	unsigned i;
38634-
38635-	assert(!bitmap_full(bitmap, binfo));
38636-
38637-#ifdef BITMAP_USE_TREE
38638-	i = binfo->nlevels - 1;
38639-	g = bitmap[binfo->levels[i].group_offset];
38640-	bit = ffs_lu(g);
38641-	while (i > 0) {
38642-		i--;
38643-		g = bitmap[binfo->levels[i].group_offset + bit];
38644-		bit = (bit << LG_BITMAP_GROUP_NBITS) + ffs_lu(g);
38645-	}
38646-#else
38647-	i = 0;
38648-	g = bitmap[0];
38649-	while (g == 0) {
38650-		i++;
38651-		g = bitmap[i];
38652-	}
38653-	bit = (i << LG_BITMAP_GROUP_NBITS) + ffs_lu(g);
38654-#endif
38655-	bitmap_set(bitmap, binfo, bit);
38656-	return bit;
38657-}
38658-
38659-static inline void
38660-bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit) {
38661-	size_t goff;
38662-	bitmap_t *gp;
38663-	bitmap_t g;
38664-	UNUSED bool propagate;
38665-
38666-	assert(bit < binfo->nbits);
38667-	assert(bitmap_get(bitmap, binfo, bit));
38668-	goff = bit >> LG_BITMAP_GROUP_NBITS;
38669-	gp = &bitmap[goff];
38670-	g = *gp;
38671-	propagate = (g == 0);
38672-	assert((g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK))) == 0);
38673-	g ^= ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK);
38674-	*gp = g;
38675-	assert(!bitmap_get(bitmap, binfo, bit));
38676-#ifdef BITMAP_USE_TREE
38677-	/* Propagate group state transitions up the tree. */
38678-	if (propagate) {
38679-		unsigned i;
38680-		for (i = 1; i < binfo->nlevels; i++) {
38681-			bit = goff;
38682-			goff = bit >> LG_BITMAP_GROUP_NBITS;
38683-			gp = &bitmap[binfo->levels[i].group_offset + goff];
38684-			g = *gp;
38685-			propagate = (g == 0);
38686-			assert((g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK)))
38687-			    == 0);
38688-			g ^= ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK);
38689-			*gp = g;
38690-			if (!propagate) {
38691-				break;
38692-			}
38693-		}
38694-	}
38695-#endif /* BITMAP_USE_TREE */
38696-}
38697-
38698-#endif /* JEMALLOC_INTERNAL_BITMAP_H */
38699diff --git a/jemalloc/include/jemalloc/internal/buf_writer.h b/jemalloc/include/jemalloc/internal/buf_writer.h
38700deleted file mode 100644
38701index 37aa6de..0000000
38702--- a/jemalloc/include/jemalloc/internal/buf_writer.h
38703+++ /dev/null
38704@@ -1,32 +0,0 @@
38705-#ifndef JEMALLOC_INTERNAL_BUF_WRITER_H
38706-#define JEMALLOC_INTERNAL_BUF_WRITER_H
38707-
38708-/*
38709- * Note: when using the buffered writer, cbopaque is passed to write_cb only
38710- * when the buffer is flushed.  It would make a difference if cbopaque points
38711- * to something that's changing for each write_cb call, or something that
38712- * affects write_cb in a way dependent on the content of the output string.
38713- * However, the most typical usage case in practice is that cbopaque points to
38714- * some "option like" content for the write_cb, so it doesn't matter.
38715- */
38716-
38717-typedef struct {
38718-	write_cb_t *write_cb;
38719-	void *cbopaque;
38720-	char *buf;
38721-	size_t buf_size;
38722-	size_t buf_end;
38723-	bool internal_buf;
38724-} buf_writer_t;
38725-
38726-bool buf_writer_init(tsdn_t *tsdn, buf_writer_t *buf_writer,
38727-    write_cb_t *write_cb, void *cbopaque, char *buf, size_t buf_len);
38728-void buf_writer_flush(buf_writer_t *buf_writer);
38729-write_cb_t buf_writer_cb;
38730-void buf_writer_terminate(tsdn_t *tsdn, buf_writer_t *buf_writer);
38731-
38732-typedef ssize_t (read_cb_t)(void *read_cbopaque, void *buf, size_t limit);
38733-void buf_writer_pipe(buf_writer_t *buf_writer, read_cb_t *read_cb,
38734-    void *read_cbopaque);
38735-
38736-#endif /* JEMALLOC_INTERNAL_BUF_WRITER_H */
38737diff --git a/jemalloc/include/jemalloc/internal/cache_bin.h b/jemalloc/include/jemalloc/internal/cache_bin.h
38738deleted file mode 100644
38739index caf5be3..0000000
38740--- a/jemalloc/include/jemalloc/internal/cache_bin.h
38741+++ /dev/null
38742@@ -1,670 +0,0 @@
38743-#ifndef JEMALLOC_INTERNAL_CACHE_BIN_H
38744-#define JEMALLOC_INTERNAL_CACHE_BIN_H
38745-
38746-#include "jemalloc/internal/ql.h"
38747-#include "jemalloc/internal/sz.h"
38748-
38749-/*
38750- * The cache_bins are the mechanism that the tcache and the arena use to
38751- * communicate.  The tcache fills from and flushes to the arena by passing a
38752- * cache_bin_t to fill/flush.  When the arena needs to pull stats from the
38753- * tcaches associated with it, it does so by iterating over its
38754- * cache_bin_array_descriptor_t objects and reading out per-bin stats it
38755- * contains.  This makes it so that the arena need not know about the existence
38756- * of the tcache at all.
38757- */
38758-
38759-/*
38760- * The size in bytes of each cache bin stack.  We also use this to indicate
38761- * *counts* of individual objects.
38762- */
38763-typedef uint16_t cache_bin_sz_t;
38764-
38765-/*
38766- * Leave a noticeable mark pattern on the cache bin stack boundaries, in case a
38767- * bug starts leaking those.  Make it look like the junk pattern but be distinct
38768- * from it.
38769- */
38770-static const uintptr_t cache_bin_preceding_junk =
38771-    (uintptr_t)0x7a7a7a7a7a7a7a7aULL;
38772-/* Note: a7 vs. 7a above -- this tells you which pointer leaked. */
38773-static const uintptr_t cache_bin_trailing_junk =
38774-    (uintptr_t)0xa7a7a7a7a7a7a7a7ULL;
38775-
38776-/*
38777- * That implies the following value, for the maximum number of items in any
38778- * individual bin.  The cache bins track their bounds looking just at the low
38779- * bits of a pointer, compared against a cache_bin_sz_t.  So that's
38780- *   1 << (sizeof(cache_bin_sz_t) * 8)
38781- * bytes spread across pointer sized objects to get the maximum.
38782- */
38783-#define CACHE_BIN_NCACHED_MAX (((size_t)1 << sizeof(cache_bin_sz_t) * 8) \
38784-    / sizeof(void *) - 1)
38785-
38786-/*
38787- * This lives inside the cache_bin (for locality reasons), and is initialized
38788- * alongside it, but is otherwise not modified by any cache bin operations.
38789- * It's logically public and maintained by its callers.
38790- */
38791-typedef struct cache_bin_stats_s cache_bin_stats_t;
38792-struct cache_bin_stats_s {
38793-	/*
38794-	 * Number of allocation requests that corresponded to the size of this
38795-	 * bin.
38796-	 */
38797-	uint64_t nrequests;
38798-};
38799-
38800-/*
38801- * Read-only information associated with each element of tcache_t's tbins array
38802- * is stored separately, mainly to reduce memory usage.
38803- */
38804-typedef struct cache_bin_info_s cache_bin_info_t;
38805-struct cache_bin_info_s {
38806-	cache_bin_sz_t ncached_max;
38807-};
38808-
38809-/*
38810- * Responsible for caching allocations associated with a single size.
38811- *
38812- * Several pointers are used to track the stack.  To save on metadata bytes,
38813- * only the stack_head is a full sized pointer (which is dereferenced on the
38814- * fastpath), while the others store only the low 16 bits -- this is correct
38815- * because a single stack never takes more space than 2^16 bytes, and at the
38816- * same time only equality checks are performed on the low bits.
38817- *
38818- * (low addr)                                                  (high addr)
38819- * |------stashed------|------available------|------cached-----|
38820- * ^                   ^                     ^                 ^
38821- * low_bound(derived)  low_bits_full         stack_head        low_bits_empty
38822- */
38823-typedef struct cache_bin_s cache_bin_t;
38824-struct cache_bin_s {
38825-	/*
38826-	 * The stack grows down.  Whenever the bin is nonempty, the head points
38827-	 * to an array entry containing a valid allocation.  When it is empty,
38828-	 * the head points to one element past the owned array.
38829-	 */
38830-	void **stack_head;
38831-	/*
38832-	 * cur_ptr and stats are both modified frequently.  Let's keep them
38833-	 * close so that they have a higher chance of being on the same
38834-	 * cacheline, thus less write-backs.
38835-	 */
38836-	cache_bin_stats_t tstats;
38837-
38838-	/*
38839-	 * The low bits of the address of the first item in the stack that
38840-	 * hasn't been used since the last GC, to track the low water mark (min
38841-	 * # of cached items).
38842-	 *
38843-	 * Since the stack grows down, this is a higher address than
38844-	 * low_bits_full.
38845-	 */
38846-	uint16_t low_bits_low_water;
38847-
38848-	/*
38849-	 * The low bits of the value that stack_head will take on when the array
38850-	 * is full (of cached & stashed items).  But remember that stack_head
38851-	 * always points to a valid item when the array is nonempty -- this is
38852-	 * in the array.
38853-	 *
38854-	 * Recall that since the stack grows down, this is the lowest available
38855-	 * address in the array for caching.  Only adjusted when stashing items.
38856-	 */
38857-	uint16_t low_bits_full;
38858-
38859-	/*
38860-	 * The low bits of the value that stack_head will take on when the array
38861-	 * is empty.
38862-	 *
38863-	 * The stack grows down -- this is one past the highest address in the
38864-	 * array.  Immutable after initialization.
38865-	 */
38866-	uint16_t low_bits_empty;
38867-};
38868-
38869-/*
38870- * The cache_bins live inside the tcache, but the arena (by design) isn't
38871- * supposed to know much about tcache internals.  To let the arena iterate over
38872- * associated bins, we keep (with the tcache) a linked list of
38873- * cache_bin_array_descriptor_ts that tell the arena how to find the bins.
38874- */
38875-typedef struct cache_bin_array_descriptor_s cache_bin_array_descriptor_t;
38876-struct cache_bin_array_descriptor_s {
38877-	/*
38878-	 * The arena keeps a list of the cache bins associated with it, for
38879-	 * stats collection.
38880-	 */
38881-	ql_elm(cache_bin_array_descriptor_t) link;
38882-	/* Pointers to the tcache bins. */
38883-	cache_bin_t *bins;
38884-};
38885-
38886-static inline void
38887-cache_bin_array_descriptor_init(cache_bin_array_descriptor_t *descriptor,
38888-    cache_bin_t *bins) {
38889-	ql_elm_new(descriptor, link);
38890-	descriptor->bins = bins;
38891-}
38892-
38893-JEMALLOC_ALWAYS_INLINE bool
38894-cache_bin_nonfast_aligned(const void *ptr) {
38895-	if (!config_uaf_detection) {
38896-		return false;
38897-	}
38898-	/*
38899-	 * Currently we use alignment to decide which pointer to junk & stash on
38900-	 * dealloc (for catching use-after-free).  In some common cases a
38901-	 * page-aligned check is needed already (sdalloc w/ config_prof), so we
38902-	 * are getting it more or less for free -- no added instructions on
38903-	 * free_fastpath.
38904-	 *
38905-	 * Another way of deciding which pointer to sample, is adding another
38906-	 * thread_event to pick one every N bytes.  That also adds no cost on
38907-	 * the fastpath, however it will tend to pick large allocations which is
38908-	 * not the desired behavior.
38909-	 */
38910-	return ((uintptr_t)ptr & san_cache_bin_nonfast_mask) == 0;
38911-}
38912-
38913-/* Returns ncached_max: Upper limit on ncached. */
38914-static inline cache_bin_sz_t
38915-cache_bin_info_ncached_max(cache_bin_info_t *info) {
38916-	return info->ncached_max;
38917-}
38918-
38919-/*
38920- * Internal.
38921- *
38922- * Asserts that the pointer associated with earlier is <= the one associated
38923- * with later.
38924- */
38925-static inline void
38926-cache_bin_assert_earlier(cache_bin_t *bin, uint16_t earlier, uint16_t later) {
38927-	if (earlier > later) {
38928-		assert(bin->low_bits_full > bin->low_bits_empty);
38929-	}
38930-}
38931-
38932-/*
38933- * Internal.
38934- *
38935- * Does difference calculations that handle wraparound correctly.  Earlier must
38936- * be associated with the position earlier in memory.
38937- */
38938-static inline uint16_t
38939-cache_bin_diff(cache_bin_t *bin, uint16_t earlier, uint16_t later, bool racy) {
38940-	/*
38941-	 * When it's racy, bin->low_bits_full can be modified concurrently. It
38942-	 * can cross the uint16_t max value and become less than
38943-	 * bin->low_bits_empty at the time of the check.
38944-	 */
38945-	if (!racy) {
38946-		cache_bin_assert_earlier(bin, earlier, later);
38947-	}
38948-	return later - earlier;
38949-}
38950-
38951-/*
38952- * Number of items currently cached in the bin, without checking ncached_max.
38953- * We require specifying whether or not the request is racy or not (i.e. whether
38954- * or not concurrent modifications are possible).
38955- */
38956-static inline cache_bin_sz_t
38957-cache_bin_ncached_get_internal(cache_bin_t *bin, bool racy) {
38958-	cache_bin_sz_t diff = cache_bin_diff(bin,
38959-	    (uint16_t)(uintptr_t)bin->stack_head, bin->low_bits_empty, racy);
38960-	cache_bin_sz_t n = diff / sizeof(void *);
38961-	/*
38962-	 * We have undefined behavior here; if this function is called from the
38963-	 * arena stats updating code, then stack_head could change from the
38964-	 * first line to the next one.  Morally, these loads should be atomic,
38965-	 * but compilers won't currently generate comparisons with in-memory
38966-	 * operands against atomics, and these variables get accessed on the
38967-	 * fast paths.  This should still be "safe" in the sense of generating
38968-	 * the correct assembly for the foreseeable future, though.
38969-	 */
38970-	assert(n == 0 || *(bin->stack_head) != NULL || racy);
38971-	return n;
38972-}
38973-
38974-/*
38975- * Number of items currently cached in the bin, with checking ncached_max.  The
38976- * caller must know that no concurrent modification of the cache_bin is
38977- * possible.
38978- */
38979-static inline cache_bin_sz_t
38980-cache_bin_ncached_get_local(cache_bin_t *bin, cache_bin_info_t *info) {
38981-	cache_bin_sz_t n = cache_bin_ncached_get_internal(bin,
38982-	    /* racy */ false);
38983-	assert(n <= cache_bin_info_ncached_max(info));
38984-	return n;
38985-}
38986-
38987-/*
38988- * Internal.
38989- *
38990- * A pointer to the position one past the end of the backing array.
38991- *
38992- * Do not call if racy, because both 'bin->stack_head' and 'bin->low_bits_full'
38993- * are subject to concurrent modifications.
38994- */
38995-static inline void **
38996-cache_bin_empty_position_get(cache_bin_t *bin) {
38997-	cache_bin_sz_t diff = cache_bin_diff(bin,
38998-	    (uint16_t)(uintptr_t)bin->stack_head, bin->low_bits_empty,
38999-	    /* racy */ false);
39000-	uintptr_t empty_bits = (uintptr_t)bin->stack_head + diff;
39001-	void **ret = (void **)empty_bits;
39002-
39003-	assert(ret >= bin->stack_head);
39004-
39005-	return ret;
39006-}
39007-
39008-/*
39009- * Internal.
39010- *
39011- * Calculates low bits of the lower bound of the usable cache bin's range (see
39012- * cache_bin_t visual representation above).
39013- *
39014- * No values are concurrently modified, so should be safe to read in a
39015- * multithreaded environment. Currently concurrent access happens only during
39016- * arena statistics collection.
39017- */
39018-static inline uint16_t
39019-cache_bin_low_bits_low_bound_get(cache_bin_t *bin, cache_bin_info_t *info) {
39020-	return (uint16_t)bin->low_bits_empty -
39021-	    info->ncached_max * sizeof(void *);
39022-}
39023-
39024-/*
39025- * Internal.
39026- *
39027- * A pointer to the position with the lowest address of the backing array.
39028- */
39029-static inline void **
39030-cache_bin_low_bound_get(cache_bin_t *bin, cache_bin_info_t *info) {
39031-	cache_bin_sz_t ncached_max = cache_bin_info_ncached_max(info);
39032-	void **ret = cache_bin_empty_position_get(bin) - ncached_max;
39033-	assert(ret <= bin->stack_head);
39034-
39035-	return ret;
39036-}
39037-
39038-/*
39039- * As the name implies.  This is important since it's not correct to try to
39040- * batch fill a nonempty cache bin.
39041- */
39042-static inline void
39043-cache_bin_assert_empty(cache_bin_t *bin, cache_bin_info_t *info) {
39044-	assert(cache_bin_ncached_get_local(bin, info) == 0);
39045-	assert(cache_bin_empty_position_get(bin) == bin->stack_head);
39046-}
39047-
39048-/*
39049- * Get low water, but without any of the correctness checking we do for the
39050- * caller-usable version, if we are temporarily breaking invariants (like
39051- * ncached >= low_water during flush).
39052- */
39053-static inline cache_bin_sz_t
39054-cache_bin_low_water_get_internal(cache_bin_t *bin) {
39055-	return cache_bin_diff(bin, bin->low_bits_low_water,
39056-	    bin->low_bits_empty, /* racy */ false) / sizeof(void *);
39057-}
39058-
39059-/* Returns the numeric value of low water in [0, ncached]. */
39060-static inline cache_bin_sz_t
39061-cache_bin_low_water_get(cache_bin_t *bin, cache_bin_info_t *info) {
39062-	cache_bin_sz_t low_water = cache_bin_low_water_get_internal(bin);
39063-	assert(low_water <= cache_bin_info_ncached_max(info));
39064-	assert(low_water <= cache_bin_ncached_get_local(bin, info));
39065-
39066-	cache_bin_assert_earlier(bin, (uint16_t)(uintptr_t)bin->stack_head,
39067-	    bin->low_bits_low_water);
39068-
39069-	return low_water;
39070-}
39071-
39072-/*
39073- * Indicates that the current cache bin position should be the low water mark
39074- * going forward.
39075- */
39076-static inline void
39077-cache_bin_low_water_set(cache_bin_t *bin) {
39078-	bin->low_bits_low_water = (uint16_t)(uintptr_t)bin->stack_head;
39079-}
39080-
39081-static inline void
39082-cache_bin_low_water_adjust(cache_bin_t *bin) {
39083-	if (cache_bin_ncached_get_internal(bin, /* racy */ false)
39084-	    < cache_bin_low_water_get_internal(bin)) {
39085-		cache_bin_low_water_set(bin);
39086-	}
39087-}
39088-
39089-JEMALLOC_ALWAYS_INLINE void *
39090-cache_bin_alloc_impl(cache_bin_t *bin, bool *success, bool adjust_low_water) {
39091-	/*
39092-	 * success (instead of ret) should be checked upon the return of this
39093-	 * function.  We avoid checking (ret == NULL) because there is never a
39094-	 * null stored on the avail stack (which is unknown to the compiler),
39095-	 * and eagerly checking ret would cause pipeline stall (waiting for the
39096-	 * cacheline).
39097-	 */
39098-
39099-	/*
39100-	 * This may read from the empty position; however the loaded value won't
39101-	 * be used.  It's safe because the stack has one more slot reserved.
39102-	 */
39103-	void *ret = *bin->stack_head;
39104-	uint16_t low_bits = (uint16_t)(uintptr_t)bin->stack_head;
39105-	void **new_head = bin->stack_head + 1;
39106-
39107-	/*
39108-	 * Note that the low water mark is at most empty; if we pass this check,
39109-	 * we know we're non-empty.
39110-	 */
39111-	if (likely(low_bits != bin->low_bits_low_water)) {
39112-		bin->stack_head = new_head;
39113-		*success = true;
39114-		return ret;
39115-	}
39116-	if (!adjust_low_water) {
39117-		*success = false;
39118-		return NULL;
39119-	}
39120-	/*
39121-	 * In the fast-path case where we call alloc_easy and then alloc, the
39122-	 * previous checking and computation is optimized away -- we didn't
39123-	 * actually commit any of our operations.
39124-	 */
39125-	if (likely(low_bits != bin->low_bits_empty)) {
39126-		bin->stack_head = new_head;
39127-		bin->low_bits_low_water = (uint16_t)(uintptr_t)new_head;
39128-		*success = true;
39129-		return ret;
39130-	}
39131-	*success = false;
39132-	return NULL;
39133-}
39134-
39135-/*
39136- * Allocate an item out of the bin, failing if we're at the low-water mark.
39137- */
39138-JEMALLOC_ALWAYS_INLINE void *
39139-cache_bin_alloc_easy(cache_bin_t *bin, bool *success) {
39140-	/* We don't look at info if we're not adjusting low-water. */
39141-	return cache_bin_alloc_impl(bin, success, false);
39142-}
39143-
39144-/*
39145- * Allocate an item out of the bin, even if we're currently at the low-water
39146- * mark (and failing only if the bin is empty).
39147- */
39148-JEMALLOC_ALWAYS_INLINE void *
39149-cache_bin_alloc(cache_bin_t *bin, bool *success) {
39150-	return cache_bin_alloc_impl(bin, success, true);
39151-}
39152-
39153-JEMALLOC_ALWAYS_INLINE cache_bin_sz_t
39154-cache_bin_alloc_batch(cache_bin_t *bin, size_t num, void **out) {
39155-	cache_bin_sz_t n = cache_bin_ncached_get_internal(bin,
39156-	    /* racy */ false);
39157-	if (n > num) {
39158-		n = (cache_bin_sz_t)num;
39159-	}
39160-	memcpy(out, bin->stack_head, n * sizeof(void *));
39161-	bin->stack_head += n;
39162-	cache_bin_low_water_adjust(bin);
39163-
39164-	return n;
39165-}
39166-
39167-JEMALLOC_ALWAYS_INLINE bool
39168-cache_bin_full(cache_bin_t *bin) {
39169-	return ((uint16_t)(uintptr_t)bin->stack_head == bin->low_bits_full);
39170-}
39171-
39172-/*
39173- * Free an object into the given bin.  Fails only if the bin is full.
39174- */
39175-JEMALLOC_ALWAYS_INLINE bool
39176-cache_bin_dalloc_easy(cache_bin_t *bin, void *ptr) {
39177-	if (unlikely(cache_bin_full(bin))) {
39178-		return false;
39179-	}
39180-
39181-	bin->stack_head--;
39182-	*bin->stack_head = ptr;
39183-	cache_bin_assert_earlier(bin, bin->low_bits_full,
39184-	    (uint16_t)(uintptr_t)bin->stack_head);
39185-
39186-	return true;
39187-}
39188-
39189-/* Returns false if failed to stash (i.e. bin is full). */
39190-JEMALLOC_ALWAYS_INLINE bool
39191-cache_bin_stash(cache_bin_t *bin, void *ptr) {
39192-	if (cache_bin_full(bin)) {
39193-		return false;
39194-	}
39195-
39196-	/* Stash at the full position, in the [full, head) range. */
39197-	uint16_t low_bits_head = (uint16_t)(uintptr_t)bin->stack_head;
39198-	/* Wraparound handled as well. */
39199-	uint16_t diff = cache_bin_diff(bin, bin->low_bits_full, low_bits_head,
39200-	    /* racy */ false);
39201-	*(void **)((uintptr_t)bin->stack_head - diff) = ptr;
39202-
39203-	assert(!cache_bin_full(bin));
39204-	bin->low_bits_full += sizeof(void *);
39205-	cache_bin_assert_earlier(bin, bin->low_bits_full, low_bits_head);
39206-
39207-	return true;
39208-}
39209-
39210-/*
39211- * Get the number of stashed pointers.
39212- *
39213- * When called from a thread not owning the TLS (i.e. racy = true), it's
39214- * important to keep in mind that 'bin->stack_head' and 'bin->low_bits_full' can
39215- * be modified concurrently and almost none assertions about their values can be
39216- * made.
39217- */
39218-JEMALLOC_ALWAYS_INLINE cache_bin_sz_t
39219-cache_bin_nstashed_get_internal(cache_bin_t *bin, cache_bin_info_t *info,
39220-    bool racy) {
39221-	cache_bin_sz_t ncached_max = cache_bin_info_ncached_max(info);
39222-	uint16_t low_bits_low_bound = cache_bin_low_bits_low_bound_get(bin,
39223-	    info);
39224-
39225-	cache_bin_sz_t n = cache_bin_diff(bin, low_bits_low_bound,
39226-	    bin->low_bits_full, racy) / sizeof(void *);
39227-	assert(n <= ncached_max);
39228-
39229-	if (!racy) {
39230-		/* Below are for assertions only. */
39231-		void **low_bound = cache_bin_low_bound_get(bin, info);
39232-
39233-		assert((uint16_t)(uintptr_t)low_bound == low_bits_low_bound);
39234-		void *stashed = *(low_bound + n - 1);
39235-		bool aligned = cache_bin_nonfast_aligned(stashed);
39236-#ifdef JEMALLOC_JET
39237-		/* Allow arbitrary pointers to be stashed in tests. */
39238-		aligned = true;
39239-#endif
39240-		assert(n == 0 || (stashed != NULL && aligned));
39241-	}
39242-
39243-	return n;
39244-}
39245-
39246-JEMALLOC_ALWAYS_INLINE cache_bin_sz_t
39247-cache_bin_nstashed_get_local(cache_bin_t *bin, cache_bin_info_t *info) {
39248-	cache_bin_sz_t n = cache_bin_nstashed_get_internal(bin, info,
39249-	    /* racy */ false);
39250-	assert(n <= cache_bin_info_ncached_max(info));
39251-	return n;
39252-}
39253-
39254-/*
39255- * Obtain a racy view of the number of items currently in the cache bin, in the
39256- * presence of possible concurrent modifications.
39257- */
39258-static inline void
39259-cache_bin_nitems_get_remote(cache_bin_t *bin, cache_bin_info_t *info,
39260-    cache_bin_sz_t *ncached, cache_bin_sz_t *nstashed) {
39261-	cache_bin_sz_t n = cache_bin_ncached_get_internal(bin, /* racy */ true);
39262-	assert(n <= cache_bin_info_ncached_max(info));
39263-	*ncached = n;
39264-
39265-	n = cache_bin_nstashed_get_internal(bin, info, /* racy */ true);
39266-	assert(n <= cache_bin_info_ncached_max(info));
39267-	*nstashed = n;
39268-	/* Note that cannot assert ncached + nstashed <= ncached_max (racy). */
39269-}
39270-
39271-/*
39272- * Filling and flushing are done in batch, on arrays of void *s.  For filling,
39273- * the arrays go forward, and can be accessed with ordinary array arithmetic.
39274- * For flushing, we work from the end backwards, and so need to use special
39275- * accessors that invert the usual ordering.
39276- *
39277- * This is important for maintaining first-fit; the arena code fills with
39278- * earliest objects first, and so those are the ones we should return first for
39279- * cache_bin_alloc calls.  When flushing, we should flush the objects that we
39280- * wish to return later; those at the end of the array.  This is better for the
39281- * first-fit heuristic as well as for cache locality; the most recently freed
39282- * objects are the ones most likely to still be in cache.
39283- *
39284- * This all sounds very hand-wavey and theoretical, but reverting the ordering
39285- * on one or the other pathway leads to measurable slowdowns.
39286- */
39287-
39288-typedef struct cache_bin_ptr_array_s cache_bin_ptr_array_t;
39289-struct cache_bin_ptr_array_s {
39290-	cache_bin_sz_t n;
39291-	void **ptr;
39292-};
39293-
39294-/*
39295- * Declare a cache_bin_ptr_array_t sufficient for nval items.
39296- *
39297- * In the current implementation, this could be just part of a
39298- * cache_bin_ptr_array_init_... call, since we reuse the cache bin stack memory.
39299- * Indirecting behind a macro, though, means experimenting with linked-list
39300- * representations is easy (since they'll require an alloca in the calling
39301- * frame).
39302- */
39303-#define CACHE_BIN_PTR_ARRAY_DECLARE(name, nval)				\
39304-    cache_bin_ptr_array_t name;						\
39305-    name.n = (nval)
39306-
39307-/*
39308- * Start a fill.  The bin must be empty, and This must be followed by a
39309- * finish_fill call before doing any alloc/dalloc operations on the bin.
39310- */
39311-static inline void
39312-cache_bin_init_ptr_array_for_fill(cache_bin_t *bin, cache_bin_info_t *info,
39313-    cache_bin_ptr_array_t *arr, cache_bin_sz_t nfill) {
39314-	cache_bin_assert_empty(bin, info);
39315-	arr->ptr = cache_bin_empty_position_get(bin) - nfill;
39316-}
39317-
39318-/*
39319- * While nfill in cache_bin_init_ptr_array_for_fill is the number we *intend* to
39320- * fill, nfilled here is the number we actually filled (which may be less, in
39321- * case of OOM.
39322- */
39323-static inline void
39324-cache_bin_finish_fill(cache_bin_t *bin, cache_bin_info_t *info,
39325-    cache_bin_ptr_array_t *arr, cache_bin_sz_t nfilled) {
39326-	cache_bin_assert_empty(bin, info);
39327-	void **empty_position = cache_bin_empty_position_get(bin);
39328-	if (nfilled < arr->n) {
39329-		memmove(empty_position - nfilled, empty_position - arr->n,
39330-		    nfilled * sizeof(void *));
39331-	}
39332-	bin->stack_head = empty_position - nfilled;
39333-}
39334-
39335-/*
39336- * Same deal, but with flush.  Unlike fill (which can fail), the user must flush
39337- * everything we give them.
39338- */
39339-static inline void
39340-cache_bin_init_ptr_array_for_flush(cache_bin_t *bin, cache_bin_info_t *info,
39341-    cache_bin_ptr_array_t *arr, cache_bin_sz_t nflush) {
39342-	arr->ptr = cache_bin_empty_position_get(bin) - nflush;
39343-	assert(cache_bin_ncached_get_local(bin, info) == 0
39344-	    || *arr->ptr != NULL);
39345-}
39346-
39347-static inline void
39348-cache_bin_finish_flush(cache_bin_t *bin, cache_bin_info_t *info,
39349-    cache_bin_ptr_array_t *arr, cache_bin_sz_t nflushed) {
39350-	unsigned rem = cache_bin_ncached_get_local(bin, info) - nflushed;
39351-	memmove(bin->stack_head + nflushed, bin->stack_head,
39352-	    rem * sizeof(void *));
39353-	bin->stack_head = bin->stack_head + nflushed;
39354-	cache_bin_low_water_adjust(bin);
39355-}
39356-
39357-static inline void
39358-cache_bin_init_ptr_array_for_stashed(cache_bin_t *bin, szind_t binind,
39359-    cache_bin_info_t *info, cache_bin_ptr_array_t *arr,
39360-    cache_bin_sz_t nstashed) {
39361-	assert(nstashed > 0);
39362-	assert(cache_bin_nstashed_get_local(bin, info) == nstashed);
39363-
39364-	void **low_bound = cache_bin_low_bound_get(bin, info);
39365-	arr->ptr = low_bound;
39366-	assert(*arr->ptr != NULL);
39367-}
39368-
39369-static inline void
39370-cache_bin_finish_flush_stashed(cache_bin_t *bin, cache_bin_info_t *info) {
39371-	void **low_bound = cache_bin_low_bound_get(bin, info);
39372-
39373-	/* Reset the bin local full position. */
39374-	bin->low_bits_full = (uint16_t)(uintptr_t)low_bound;
39375-	assert(cache_bin_nstashed_get_local(bin, info) == 0);
39376-}
39377-
39378-/*
39379- * Initialize a cache_bin_info to represent up to the given number of items in
39380- * the cache_bins it is associated with.
39381- */
39382-void cache_bin_info_init(cache_bin_info_t *bin_info,
39383-    cache_bin_sz_t ncached_max);
39384-/*
39385- * Given an array of initialized cache_bin_info_ts, determine how big an
39386- * allocation is required to initialize a full set of cache_bin_ts.
39387- */
39388-void cache_bin_info_compute_alloc(cache_bin_info_t *infos, szind_t ninfos,
39389-    size_t *size, size_t *alignment);
39390-
39391-/*
39392- * Actually initialize some cache bins.  Callers should allocate the backing
39393- * memory indicated by a call to cache_bin_compute_alloc.  They should then
39394- * preincrement, call init once for each bin and info, and then call
39395- * cache_bin_postincrement.  *alloc_cur will then point immediately past the end
39396- * of the allocation.
39397- */
39398-void cache_bin_preincrement(cache_bin_info_t *infos, szind_t ninfos,
39399-    void *alloc, size_t *cur_offset);
39400-void cache_bin_postincrement(cache_bin_info_t *infos, szind_t ninfos,
39401-    void *alloc, size_t *cur_offset);
39402-void cache_bin_init(cache_bin_t *bin, cache_bin_info_t *info, void *alloc,
39403-    size_t *cur_offset);
39404-
39405-/*
39406- * If a cache bin was zero initialized (either because it lives in static or
39407- * thread-local storage, or was memset to 0), this function indicates whether or
39408- * not cache_bin_init was called on it.
39409- */
39410-bool cache_bin_still_zero_initialized(cache_bin_t *bin);
39411-
39412-#endif /* JEMALLOC_INTERNAL_CACHE_BIN_H */
39413diff --git a/jemalloc/include/jemalloc/internal/ckh.h b/jemalloc/include/jemalloc/internal/ckh.h
39414deleted file mode 100644
39415index 7b3850b..0000000
39416--- a/jemalloc/include/jemalloc/internal/ckh.h
39417+++ /dev/null
39418@@ -1,101 +0,0 @@
39419-#ifndef JEMALLOC_INTERNAL_CKH_H
39420-#define JEMALLOC_INTERNAL_CKH_H
39421-
39422-#include "jemalloc/internal/tsd.h"
39423-
39424-/* Cuckoo hashing implementation.  Skip to the end for the interface. */
39425-
39426-/******************************************************************************/
39427-/* INTERNAL DEFINITIONS -- IGNORE */
39428-/******************************************************************************/
39429-
39430-/* Maintain counters used to get an idea of performance. */
39431-/* #define CKH_COUNT */
39432-/* Print counter values in ckh_delete() (requires CKH_COUNT). */
39433-/* #define CKH_VERBOSE */
39434-
39435-/*
39436- * There are 2^LG_CKH_BUCKET_CELLS cells in each hash table bucket.  Try to fit
39437- * one bucket per L1 cache line.
39438- */
39439-#define LG_CKH_BUCKET_CELLS (LG_CACHELINE - LG_SIZEOF_PTR - 1)
39440-
39441-/* Typedefs to allow easy function pointer passing. */
39442-typedef void ckh_hash_t (const void *, size_t[2]);
39443-typedef bool ckh_keycomp_t (const void *, const void *);
39444-
39445-/* Hash table cell. */
39446-typedef struct {
39447-	const void *key;
39448-	const void *data;
39449-} ckhc_t;
39450-
39451-/* The hash table itself. */
39452-typedef struct {
39453-#ifdef CKH_COUNT
39454-	/* Counters used to get an idea of performance. */
39455-	uint64_t ngrows;
39456-	uint64_t nshrinks;
39457-	uint64_t nshrinkfails;
39458-	uint64_t ninserts;
39459-	uint64_t nrelocs;
39460-#endif
39461-
39462-	/* Used for pseudo-random number generation. */
39463-	uint64_t prng_state;
39464-
39465-	/* Total number of items. */
39466-	size_t count;
39467-
39468-	/*
39469-	 * Minimum and current number of hash table buckets.  There are
39470-	 * 2^LG_CKH_BUCKET_CELLS cells per bucket.
39471-	 */
39472-	unsigned lg_minbuckets;
39473-	unsigned lg_curbuckets;
39474-
39475-	/* Hash and comparison functions. */
39476-	ckh_hash_t *hash;
39477-	ckh_keycomp_t *keycomp;
39478-
39479-	/* Hash table with 2^lg_curbuckets buckets. */
39480-	ckhc_t *tab;
39481-} ckh_t;
39482-
39483-/******************************************************************************/
39484-/* BEGIN PUBLIC API */
39485-/******************************************************************************/
39486-
39487-/* Lifetime management.  Minitems is the initial capacity. */
39488-bool ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *hash,
39489-    ckh_keycomp_t *keycomp);
39490-void ckh_delete(tsd_t *tsd, ckh_t *ckh);
39491-
39492-/* Get the number of elements in the set. */
39493-size_t ckh_count(ckh_t *ckh);
39494-
39495-/*
39496- * To iterate over the elements in the table, initialize *tabind to 0 and call
39497- * this function until it returns true.  Each call that returns false will
39498- * update *key and *data to the next element in the table, assuming the pointers
39499- * are non-NULL.
39500- */
39501-bool ckh_iter(ckh_t *ckh, size_t *tabind, void **key, void **data);
39502-
39503-/*
39504- * Basic hash table operations -- insert, removal, lookup.  For ckh_remove and
39505- * ckh_search, key or data can be NULL.  The hash-table only stores pointers to
39506- * the key and value, and doesn't do any lifetime management.
39507- */
39508-bool ckh_insert(tsd_t *tsd, ckh_t *ckh, const void *key, const void *data);
39509-bool ckh_remove(tsd_t *tsd, ckh_t *ckh, const void *searchkey, void **key,
39510-    void **data);
39511-bool ckh_search(ckh_t *ckh, const void *searchkey, void **key, void **data);
39512-
39513-/* Some useful hash and comparison functions for strings and pointers. */
39514-void ckh_string_hash(const void *key, size_t r_hash[2]);
39515-bool ckh_string_keycomp(const void *k1, const void *k2);
39516-void ckh_pointer_hash(const void *key, size_t r_hash[2]);
39517-bool ckh_pointer_keycomp(const void *k1, const void *k2);
39518-
39519-#endif /* JEMALLOC_INTERNAL_CKH_H */
39520diff --git a/jemalloc/include/jemalloc/internal/counter.h b/jemalloc/include/jemalloc/internal/counter.h
39521deleted file mode 100644
39522index 79abf06..0000000
39523--- a/jemalloc/include/jemalloc/internal/counter.h
39524+++ /dev/null
39525@@ -1,34 +0,0 @@
39526-#ifndef JEMALLOC_INTERNAL_COUNTER_H
39527-#define JEMALLOC_INTERNAL_COUNTER_H
39528-
39529-#include "jemalloc/internal/mutex.h"
39530-
39531-typedef struct counter_accum_s {
39532-	LOCKEDINT_MTX_DECLARE(mtx)
39533-	locked_u64_t accumbytes;
39534-	uint64_t interval;
39535-} counter_accum_t;
39536-
39537-JEMALLOC_ALWAYS_INLINE bool
39538-counter_accum(tsdn_t *tsdn, counter_accum_t *counter, uint64_t bytes) {
39539-	uint64_t interval = counter->interval;
39540-	assert(interval > 0);
39541-	LOCKEDINT_MTX_LOCK(tsdn, counter->mtx);
39542-	/*
39543-	 * If the event moves fast enough (and/or if the event handling is slow
39544-	 * enough), extreme overflow can cause counter trigger coalescing.
39545-	 * This is an intentional mechanism that avoids rate-limiting
39546-	 * allocation.
39547-	 */
39548-	bool overflow = locked_inc_mod_u64(tsdn, LOCKEDINT_MTX(counter->mtx),
39549-	    &counter->accumbytes, bytes, interval);
39550-	LOCKEDINT_MTX_UNLOCK(tsdn, counter->mtx);
39551-	return overflow;
39552-}
39553-
39554-bool counter_accum_init(counter_accum_t *counter, uint64_t interval);
39555-void counter_prefork(tsdn_t *tsdn, counter_accum_t *counter);
39556-void counter_postfork_parent(tsdn_t *tsdn, counter_accum_t *counter);
39557-void counter_postfork_child(tsdn_t *tsdn, counter_accum_t *counter);
39558-
39559-#endif /* JEMALLOC_INTERNAL_COUNTER_H */
39560diff --git a/jemalloc/include/jemalloc/internal/ctl.h b/jemalloc/include/jemalloc/internal/ctl.h
39561deleted file mode 100644
39562index 63d27f8..0000000
39563--- a/jemalloc/include/jemalloc/internal/ctl.h
39564+++ /dev/null
39565@@ -1,159 +0,0 @@
39566-#ifndef JEMALLOC_INTERNAL_CTL_H
39567-#define JEMALLOC_INTERNAL_CTL_H
39568-
39569-#include "jemalloc/internal/jemalloc_internal_types.h"
39570-#include "jemalloc/internal/malloc_io.h"
39571-#include "jemalloc/internal/mutex_prof.h"
39572-#include "jemalloc/internal/ql.h"
39573-#include "jemalloc/internal/sc.h"
39574-#include "jemalloc/internal/stats.h"
39575-
39576-/* Maximum ctl tree depth. */
39577-#define CTL_MAX_DEPTH	7
39578-
39579-typedef struct ctl_node_s {
39580-	bool named;
39581-} ctl_node_t;
39582-
39583-typedef struct ctl_named_node_s {
39584-	ctl_node_t node;
39585-	const char *name;
39586-	/* If (nchildren == 0), this is a terminal node. */
39587-	size_t nchildren;
39588-	const ctl_node_t *children;
39589-	int (*ctl)(tsd_t *, const size_t *, size_t, void *, size_t *, void *,
39590-	    size_t);
39591-} ctl_named_node_t;
39592-
39593-typedef struct ctl_indexed_node_s {
39594-	struct ctl_node_s node;
39595-	const ctl_named_node_t *(*index)(tsdn_t *, const size_t *, size_t,
39596-	    size_t);
39597-} ctl_indexed_node_t;
39598-
39599-typedef struct ctl_arena_stats_s {
39600-	arena_stats_t astats;
39601-
39602-	/* Aggregate stats for small size classes, based on bin stats. */
39603-	size_t allocated_small;
39604-	uint64_t nmalloc_small;
39605-	uint64_t ndalloc_small;
39606-	uint64_t nrequests_small;
39607-	uint64_t nfills_small;
39608-	uint64_t nflushes_small;
39609-
39610-	bin_stats_data_t bstats[SC_NBINS];
39611-	arena_stats_large_t lstats[SC_NSIZES - SC_NBINS];
39612-	pac_estats_t estats[SC_NPSIZES];
39613-	hpa_shard_stats_t hpastats;
39614-	sec_stats_t secstats;
39615-} ctl_arena_stats_t;
39616-
39617-typedef struct ctl_stats_s {
39618-	size_t allocated;
39619-	size_t active;
39620-	size_t metadata;
39621-	size_t metadata_thp;
39622-	size_t resident;
39623-	size_t mapped;
39624-	size_t retained;
39625-
39626-	background_thread_stats_t background_thread;
39627-	mutex_prof_data_t mutex_prof_data[mutex_prof_num_global_mutexes];
39628-} ctl_stats_t;
39629-
39630-typedef struct ctl_arena_s ctl_arena_t;
39631-struct ctl_arena_s {
39632-	unsigned arena_ind;
39633-	bool initialized;
39634-	ql_elm(ctl_arena_t) destroyed_link;
39635-
39636-	/* Basic stats, supported even if !config_stats. */
39637-	unsigned nthreads;
39638-	const char *dss;
39639-	ssize_t dirty_decay_ms;
39640-	ssize_t muzzy_decay_ms;
39641-	size_t pactive;
39642-	size_t pdirty;
39643-	size_t pmuzzy;
39644-
39645-	/* NULL if !config_stats. */
39646-	ctl_arena_stats_t *astats;
39647-};
39648-
39649-typedef struct ctl_arenas_s {
39650-	uint64_t epoch;
39651-	unsigned narenas;
39652-	ql_head(ctl_arena_t) destroyed;
39653-
39654-	/*
39655-	 * Element 0 corresponds to merged stats for extant arenas (accessed via
39656-	 * MALLCTL_ARENAS_ALL), element 1 corresponds to merged stats for
39657-	 * destroyed arenas (accessed via MALLCTL_ARENAS_DESTROYED), and the
39658-	 * remaining MALLOCX_ARENA_LIMIT elements correspond to arenas.
39659-	 */
39660-	ctl_arena_t *arenas[2 + MALLOCX_ARENA_LIMIT];
39661-} ctl_arenas_t;
39662-
39663-int ctl_byname(tsd_t *tsd, const char *name, void *oldp, size_t *oldlenp,
39664-    void *newp, size_t newlen);
39665-int ctl_nametomib(tsd_t *tsd, const char *name, size_t *mibp, size_t *miblenp);
39666-int ctl_bymib(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
39667-    size_t *oldlenp, void *newp, size_t newlen);
39668-int ctl_mibnametomib(tsd_t *tsd, size_t *mib, size_t miblen, const char *name,
39669-    size_t *miblenp);
39670-int ctl_bymibname(tsd_t *tsd, size_t *mib, size_t miblen, const char *name,
39671-    size_t *miblenp, void *oldp, size_t *oldlenp, void *newp, size_t newlen);
39672-bool ctl_boot(void);
39673-void ctl_prefork(tsdn_t *tsdn);
39674-void ctl_postfork_parent(tsdn_t *tsdn);
39675-void ctl_postfork_child(tsdn_t *tsdn);
39676-void ctl_mtx_assert_held(tsdn_t *tsdn);
39677-
39678-#define xmallctl(name, oldp, oldlenp, newp, newlen) do {		\
39679-	if (je_mallctl(name, oldp, oldlenp, newp, newlen)		\
39680-	    != 0) {							\
39681-		malloc_printf(						\
39682-		    "<jemalloc>: Failure in xmallctl(\"%s\", ...)\n",	\
39683-		    name);						\
39684-		abort();						\
39685-	}								\
39686-} while (0)
39687-
39688-#define xmallctlnametomib(name, mibp, miblenp) do {			\
39689-	if (je_mallctlnametomib(name, mibp, miblenp) != 0) {		\
39690-		malloc_printf("<jemalloc>: Failure in "			\
39691-		    "xmallctlnametomib(\"%s\", ...)\n", name);		\
39692-		abort();						\
39693-	}								\
39694-} while (0)
39695-
39696-#define xmallctlbymib(mib, miblen, oldp, oldlenp, newp, newlen) do {	\
39697-	if (je_mallctlbymib(mib, miblen, oldp, oldlenp, newp,		\
39698-	    newlen) != 0) {						\
39699-		malloc_write(						\
39700-		    "<jemalloc>: Failure in xmallctlbymib()\n");	\
39701-		abort();						\
39702-	}								\
39703-} while (0)
39704-
39705-#define xmallctlmibnametomib(mib, miblen, name, miblenp) do {		\
39706-	if (ctl_mibnametomib(tsd_fetch(), mib, miblen, name, miblenp)	\
39707-	    != 0) {							\
39708-		malloc_write(						\
39709-		    "<jemalloc>: Failure in ctl_mibnametomib()\n");	\
39710-		abort();						\
39711-	}								\
39712-} while (0)
39713-
39714-#define xmallctlbymibname(mib, miblen, name, miblenp, oldp, oldlenp,	\
39715-    newp, newlen) do {							\
39716-	if (ctl_bymibname(tsd_fetch(), mib, miblen, name, miblenp,	\
39717-	    oldp, oldlenp, newp, newlen) != 0) {			\
39718-		malloc_write(						\
39719-		    "<jemalloc>: Failure in ctl_bymibname()\n");	\
39720-		abort();						\
39721-	}								\
39722-} while (0)
39723-
39724-#endif /* JEMALLOC_INTERNAL_CTL_H */
39725diff --git a/jemalloc/include/jemalloc/internal/decay.h b/jemalloc/include/jemalloc/internal/decay.h
39726deleted file mode 100644
39727index cf6a9d2..0000000
39728--- a/jemalloc/include/jemalloc/internal/decay.h
39729+++ /dev/null
39730@@ -1,186 +0,0 @@
39731-#ifndef JEMALLOC_INTERNAL_DECAY_H
39732-#define JEMALLOC_INTERNAL_DECAY_H
39733-
39734-#include "jemalloc/internal/smoothstep.h"
39735-
39736-#define DECAY_UNBOUNDED_TIME_TO_PURGE ((uint64_t)-1)
39737-
39738-/*
39739- * The decay_t computes the number of pages we should purge at any given time.
39740- * Page allocators inform a decay object when pages enter a decay-able state
39741- * (i.e. dirty or muzzy), and query it to determine how many pages should be
39742- * purged at any given time.
39743- *
39744- * This is mostly a single-threaded data structure and doesn't care about
39745- * synchronization at all; it's the caller's responsibility to manage their
39746- * synchronization on their own.  There are two exceptions:
39747- * 1) It's OK to racily call decay_ms_read (i.e. just the simplest state query).
39748- * 2) The mtx and purging fields live (and are initialized) here, but are
39749- *    logically owned by the page allocator.  This is just a convenience (since
39750- *    those fields would be duplicated for both the dirty and muzzy states
39751- *    otherwise).
39752- */
39753-typedef struct decay_s decay_t;
39754-struct decay_s {
39755-	/* Synchronizes all non-atomic fields. */
39756-	malloc_mutex_t mtx;
39757-	/*
39758-	 * True if a thread is currently purging the extents associated with
39759-	 * this decay structure.
39760-	 */
39761-	bool purging;
39762-	/*
39763-	 * Approximate time in milliseconds from the creation of a set of unused
39764-	 * dirty pages until an equivalent set of unused dirty pages is purged
39765-	 * and/or reused.
39766-	 */
39767-	atomic_zd_t time_ms;
39768-	/* time / SMOOTHSTEP_NSTEPS. */
39769-	nstime_t interval;
39770-	/*
39771-	 * Time at which the current decay interval logically started.  We do
39772-	 * not actually advance to a new epoch until sometime after it starts
39773-	 * because of scheduling and computation delays, and it is even possible
39774-	 * to completely skip epochs.  In all cases, during epoch advancement we
39775-	 * merge all relevant activity into the most recently recorded epoch.
39776-	 */
39777-	nstime_t epoch;
39778-	/* Deadline randomness generator. */
39779-	uint64_t jitter_state;
39780-	/*
39781-	 * Deadline for current epoch.  This is the sum of interval and per
39782-	 * epoch jitter which is a uniform random variable in [0..interval).
39783-	 * Epochs always advance by precise multiples of interval, but we
39784-	 * randomize the deadline to reduce the likelihood of arenas purging in
39785-	 * lockstep.
39786-	 */
39787-	nstime_t deadline;
39788-	/*
39789-	 * The number of pages we cap ourselves at in the current epoch, per
39790-	 * decay policies.  Updated on an epoch change.  After an epoch change,
39791-	 * the caller should take steps to try to purge down to this amount.
39792-	 */
39793-	size_t npages_limit;
39794-	/*
39795-	 * Number of unpurged pages at beginning of current epoch.  During epoch
39796-	 * advancement we use the delta between arena->decay_*.nunpurged and
39797-	 * ecache_npages_get(&arena->ecache_*) to determine how many dirty pages,
39798-	 * if any, were generated.
39799-	 */
39800-	size_t nunpurged;
39801-	/*
39802-	 * Trailing log of how many unused dirty pages were generated during
39803-	 * each of the past SMOOTHSTEP_NSTEPS decay epochs, where the last
39804-	 * element is the most recent epoch.  Corresponding epoch times are
39805-	 * relative to epoch.
39806-	 *
39807-	 * Updated only on epoch advance, triggered by
39808-	 * decay_maybe_advance_epoch, below.
39809-	 */
39810-	size_t backlog[SMOOTHSTEP_NSTEPS];
39811-
39812-	/* Peak number of pages in associated extents.  Used for debug only. */
39813-	uint64_t ceil_npages;
39814-};
39815-
39816-/*
39817- * The current decay time setting.  This is the only public access to a decay_t
39818- * that's allowed without holding mtx.
39819- */
39820-static inline ssize_t
39821-decay_ms_read(const decay_t *decay) {
39822-	return atomic_load_zd(&decay->time_ms, ATOMIC_RELAXED);
39823-}
39824-
39825-/*
39826- * See the comment on the struct field -- the limit on pages we should allow in
39827- * this decay state this epoch.
39828- */
39829-static inline size_t
39830-decay_npages_limit_get(const decay_t *decay) {
39831-	return decay->npages_limit;
39832-}
39833-
39834-/* How many unused dirty pages were generated during the last epoch. */
39835-static inline size_t
39836-decay_epoch_npages_delta(const decay_t *decay) {
39837-	return decay->backlog[SMOOTHSTEP_NSTEPS - 1];
39838-}
39839-
39840-/*
39841- * Current epoch duration, in nanoseconds.  Given that new epochs are started
39842- * somewhat haphazardly, this is not necessarily exactly the time between any
39843- * two calls to decay_maybe_advance_epoch; see the comments on fields in the
39844- * decay_t.
39845- */
39846-static inline uint64_t
39847-decay_epoch_duration_ns(const decay_t *decay) {
39848-	return nstime_ns(&decay->interval);
39849-}
39850-
39851-static inline bool
39852-decay_immediately(const decay_t *decay) {
39853-	ssize_t decay_ms = decay_ms_read(decay);
39854-	return decay_ms == 0;
39855-}
39856-
39857-static inline bool
39858-decay_disabled(const decay_t *decay) {
39859-	ssize_t decay_ms = decay_ms_read(decay);
39860-	return decay_ms < 0;
39861-}
39862-
39863-/* Returns true if decay is enabled and done gradually. */
39864-static inline bool
39865-decay_gradually(const decay_t *decay) {
39866-	ssize_t decay_ms = decay_ms_read(decay);
39867-	return decay_ms > 0;
39868-}
39869-
39870-/*
39871- * Returns true if the passed in decay time setting is valid.
39872- * < -1 : invalid
39873- * -1   : never decay
39874- *  0   : decay immediately
39875- *  > 0 : some positive decay time, up to a maximum allowed value of
39876- *  NSTIME_SEC_MAX * 1000, which corresponds to decaying somewhere in the early
39877- *  27th century.  By that time, we expect to have implemented alternate purging
39878- *  strategies.
39879- */
39880-bool decay_ms_valid(ssize_t decay_ms);
39881-
39882-/*
39883- * As a precondition, the decay_t must be zeroed out (as if with memset).
39884- *
39885- * Returns true on error.
39886- */
39887-bool decay_init(decay_t *decay, nstime_t *cur_time, ssize_t decay_ms);
39888-
39889-/*
39890- * Given an already-initialized decay_t, reinitialize it with the given decay
39891- * time.  The decay_t must have previously been initialized (and should not then
39892- * be zeroed).
39893- */
39894-void decay_reinit(decay_t *decay, nstime_t *cur_time, ssize_t decay_ms);
39895-
39896-/*
39897- * Compute how many of 'npages_new' pages we would need to purge in 'time'.
39898- */
39899-uint64_t decay_npages_purge_in(decay_t *decay, nstime_t *time,
39900-    size_t npages_new);
39901-
39902-/* Returns true if the epoch advanced and there are pages to purge. */
39903-bool decay_maybe_advance_epoch(decay_t *decay, nstime_t *new_time,
39904-    size_t current_npages);
39905-
39906-/*
39907- * Calculates wait time until a number of pages in the interval
39908- * [0.5 * npages_threshold .. 1.5 * npages_threshold] should be purged.
39909- *
39910- * Returns number of nanoseconds or DECAY_UNBOUNDED_TIME_TO_PURGE in case of
39911- * indefinite wait.
39912- */
39913-uint64_t decay_ns_until_purge(decay_t *decay, size_t npages_current,
39914-    uint64_t npages_threshold);
39915-
39916-#endif /* JEMALLOC_INTERNAL_DECAY_H */
39917diff --git a/jemalloc/include/jemalloc/internal/div.h b/jemalloc/include/jemalloc/internal/div.h
39918deleted file mode 100644
39919index aebae93..0000000
39920--- a/jemalloc/include/jemalloc/internal/div.h
39921+++ /dev/null
39922@@ -1,41 +0,0 @@
39923-#ifndef JEMALLOC_INTERNAL_DIV_H
39924-#define JEMALLOC_INTERNAL_DIV_H
39925-
39926-#include "jemalloc/internal/assert.h"
39927-
39928-/*
39929- * This module does the division that computes the index of a region in a slab,
39930- * given its offset relative to the base.
39931- * That is, given a divisor d, an n = i * d (all integers), we'll return i.
39932- * We do some pre-computation to do this more quickly than a CPU division
39933- * instruction.
39934- * We bound n < 2^32, and don't support dividing by one.
39935- */
39936-
39937-typedef struct div_info_s div_info_t;
39938-struct div_info_s {
39939-	uint32_t magic;
39940-#ifdef JEMALLOC_DEBUG
39941-	size_t d;
39942-#endif
39943-};
39944-
39945-void div_init(div_info_t *div_info, size_t divisor);
39946-
39947-static inline size_t
39948-div_compute(div_info_t *div_info, size_t n) {
39949-	assert(n <= (uint32_t)-1);
39950-	/*
39951-	 * This generates, e.g. mov; imul; shr on x86-64. On a 32-bit machine,
39952-	 * the compilers I tried were all smart enough to turn this into the
39953-	 * appropriate "get the high 32 bits of the result of a multiply" (e.g.
39954-	 * mul; mov edx eax; on x86, umull on arm, etc.).
39955-	 */
39956-	size_t i = ((uint64_t)n * (uint64_t)div_info->magic) >> 32;
39957-#ifdef JEMALLOC_DEBUG
39958-	assert(i * div_info->d == n);
39959-#endif
39960-	return i;
39961-}
39962-
39963-#endif /* JEMALLOC_INTERNAL_DIV_H */
39964diff --git a/jemalloc/include/jemalloc/internal/ecache.h b/jemalloc/include/jemalloc/internal/ecache.h
39965deleted file mode 100644
39966index 71cae3e..0000000
39967--- a/jemalloc/include/jemalloc/internal/ecache.h
39968+++ /dev/null
39969@@ -1,55 +0,0 @@
39970-#ifndef JEMALLOC_INTERNAL_ECACHE_H
39971-#define JEMALLOC_INTERNAL_ECACHE_H
39972-
39973-#include "jemalloc/internal/eset.h"
39974-#include "jemalloc/internal/san.h"
39975-#include "jemalloc/internal/mutex.h"
39976-
39977-typedef struct ecache_s ecache_t;
39978-struct ecache_s {
39979-	malloc_mutex_t mtx;
39980-	eset_t eset;
39981-	eset_t guarded_eset;
39982-	/* All stored extents must be in the same state. */
39983-	extent_state_t state;
39984-	/* The index of the ehooks the ecache is associated with. */
39985-	unsigned ind;
39986-	/*
39987-	 * If true, delay coalescing until eviction; otherwise coalesce during
39988-	 * deallocation.
39989-	 */
39990-	bool delay_coalesce;
39991-};
39992-
39993-static inline size_t
39994-ecache_npages_get(ecache_t *ecache) {
39995-	return eset_npages_get(&ecache->eset) +
39996-	    eset_npages_get(&ecache->guarded_eset);
39997-}
39998-
39999-/* Get the number of extents in the given page size index. */
40000-static inline size_t
40001-ecache_nextents_get(ecache_t *ecache, pszind_t ind) {
40002-	return eset_nextents_get(&ecache->eset, ind) +
40003-	    eset_nextents_get(&ecache->guarded_eset, ind);
40004-}
40005-
40006-/* Get the sum total bytes of the extents in the given page size index. */
40007-static inline size_t
40008-ecache_nbytes_get(ecache_t *ecache, pszind_t ind) {
40009-	return eset_nbytes_get(&ecache->eset, ind) +
40010-	    eset_nbytes_get(&ecache->guarded_eset, ind);
40011-}
40012-
40013-static inline unsigned
40014-ecache_ind_get(ecache_t *ecache) {
40015-	return ecache->ind;
40016-}
40017-
40018-bool ecache_init(tsdn_t *tsdn, ecache_t *ecache, extent_state_t state,
40019-    unsigned ind, bool delay_coalesce);
40020-void ecache_prefork(tsdn_t *tsdn, ecache_t *ecache);
40021-void ecache_postfork_parent(tsdn_t *tsdn, ecache_t *ecache);
40022-void ecache_postfork_child(tsdn_t *tsdn, ecache_t *ecache);
40023-
40024-#endif /* JEMALLOC_INTERNAL_ECACHE_H */
40025diff --git a/jemalloc/include/jemalloc/internal/edata.h b/jemalloc/include/jemalloc/internal/edata.h
40026deleted file mode 100644
40027index af039ea..0000000
40028--- a/jemalloc/include/jemalloc/internal/edata.h
40029+++ /dev/null
40030@@ -1,698 +0,0 @@
40031-#ifndef JEMALLOC_INTERNAL_EDATA_H
40032-#define JEMALLOC_INTERNAL_EDATA_H
40033-
40034-#include "jemalloc/internal/atomic.h"
40035-#include "jemalloc/internal/bin_info.h"
40036-#include "jemalloc/internal/bit_util.h"
40037-#include "jemalloc/internal/hpdata.h"
40038-#include "jemalloc/internal/nstime.h"
40039-#include "jemalloc/internal/ph.h"
40040-#include "jemalloc/internal/ql.h"
40041-#include "jemalloc/internal/sc.h"
40042-#include "jemalloc/internal/slab_data.h"
40043-#include "jemalloc/internal/sz.h"
40044-#include "jemalloc/internal/typed_list.h"
40045-
40046-/*
40047- * sizeof(edata_t) is 128 bytes on 64-bit architectures.  Ensure the alignment
40048- * to free up the low bits in the rtree leaf.
40049- */
40050-#define EDATA_ALIGNMENT 128
40051-
40052-enum extent_state_e {
40053-	extent_state_active   = 0,
40054-	extent_state_dirty    = 1,
40055-	extent_state_muzzy    = 2,
40056-	extent_state_retained = 3,
40057-	extent_state_transition = 4, /* States below are intermediate. */
40058-	extent_state_merging = 5,
40059-	extent_state_max = 5 /* Sanity checking only. */
40060-};
40061-typedef enum extent_state_e extent_state_t;
40062-
40063-enum extent_head_state_e {
40064-	EXTENT_NOT_HEAD,
40065-	EXTENT_IS_HEAD   /* See comments in ehooks_default_merge_impl(). */
40066-};
40067-typedef enum extent_head_state_e extent_head_state_t;
40068-
40069-/*
40070- * Which implementation of the page allocator interface, (PAI, defined in
40071- * pai.h) owns the given extent?
40072- */
40073-enum extent_pai_e {
40074-	EXTENT_PAI_PAC = 0,
40075-	EXTENT_PAI_HPA = 1
40076-};
40077-typedef enum extent_pai_e extent_pai_t;
40078-
40079-struct e_prof_info_s {
40080-	/* Time when this was allocated. */
40081-	nstime_t	e_prof_alloc_time;
40082-	/* Allocation request size. */
40083-	size_t		e_prof_alloc_size;
40084-	/* Points to a prof_tctx_t. */
40085-	atomic_p_t	e_prof_tctx;
40086-	/*
40087-	 * Points to a prof_recent_t for the allocation; NULL
40088-	 * means the recent allocation record no longer exists.
40089-	 * Protected by prof_recent_alloc_mtx.
40090-	 */
40091-	atomic_p_t	e_prof_recent_alloc;
40092-};
40093-typedef struct e_prof_info_s e_prof_info_t;
40094-
40095-/*
40096- * The information about a particular edata that lives in an emap.  Space is
40097- * more precious there (the information, plus the edata pointer, has to live in
40098- * a 64-bit word if we want to enable a packed representation.
40099- *
40100- * There are two things that are special about the information here:
40101- * - It's quicker to access.  You have one fewer pointer hop, since finding the
40102- *   edata_t associated with an item always requires accessing the rtree leaf in
40103- *   which this data is stored.
40104- * - It can be read unsynchronized, and without worrying about lifetime issues.
40105- */
40106-typedef struct edata_map_info_s edata_map_info_t;
40107-struct edata_map_info_s {
40108-	bool slab;
40109-	szind_t szind;
40110-};
40111-
40112-typedef struct edata_cmp_summary_s edata_cmp_summary_t;
40113-struct edata_cmp_summary_s {
40114-	uint64_t sn;
40115-	uintptr_t addr;
40116-};
40117-
40118-/* Extent (span of pages).  Use accessor functions for e_* fields. */
40119-typedef struct edata_s edata_t;
40120-ph_structs(edata_avail, edata_t);
40121-ph_structs(edata_heap, edata_t);
40122-struct edata_s {
40123-	/*
40124-	 * Bitfield containing several fields:
40125-	 *
40126-	 * a: arena_ind
40127-	 * b: slab
40128-	 * c: committed
40129-	 * p: pai
40130-	 * z: zeroed
40131-	 * g: guarded
40132-	 * t: state
40133-	 * i: szind
40134-	 * f: nfree
40135-	 * s: bin_shard
40136-	 *
40137-	 * 00000000 ... 0000ssss ssffffff ffffiiii iiiitttg zpcbaaaa aaaaaaaa
40138-	 *
40139-	 * arena_ind: Arena from which this extent came, or all 1 bits if
40140-	 *            unassociated.
40141-	 *
40142-	 * slab: The slab flag indicates whether the extent is used for a slab
40143-	 *       of small regions.  This helps differentiate small size classes,
40144-	 *       and it indicates whether interior pointers can be looked up via
40145-	 *       iealloc().
40146-	 *
40147-	 * committed: The committed flag indicates whether physical memory is
40148-	 *            committed to the extent, whether explicitly or implicitly
40149-	 *            as on a system that overcommits and satisfies physical
40150-	 *            memory needs on demand via soft page faults.
40151-	 *
40152-	 * pai: The pai flag is an extent_pai_t.
40153-	 *
40154-	 * zeroed: The zeroed flag is used by extent recycling code to track
40155-	 *         whether memory is zero-filled.
40156-	 *
40157-	 * guarded: The guarded flag is use by the sanitizer to track whether
40158-	 *          the extent has page guards around it.
40159-	 *
40160-	 * state: The state flag is an extent_state_t.
40161-	 *
40162-	 * szind: The szind flag indicates usable size class index for
40163-	 *        allocations residing in this extent, regardless of whether the
40164-	 *        extent is a slab.  Extent size and usable size often differ
40165-	 *        even for non-slabs, either due to sz_large_pad or promotion of
40166-	 *        sampled small regions.
40167-	 *
40168-	 * nfree: Number of free regions in slab.
40169-	 *
40170-	 * bin_shard: the shard of the bin from which this extent came.
40171-	 */
40172-	uint64_t		e_bits;
40173-#define MASK(CURRENT_FIELD_WIDTH, CURRENT_FIELD_SHIFT) ((((((uint64_t)0x1U) << (CURRENT_FIELD_WIDTH)) - 1)) << (CURRENT_FIELD_SHIFT))
40174-
40175-#define EDATA_BITS_ARENA_WIDTH  MALLOCX_ARENA_BITS
40176-#define EDATA_BITS_ARENA_SHIFT  0
40177-#define EDATA_BITS_ARENA_MASK  MASK(EDATA_BITS_ARENA_WIDTH, EDATA_BITS_ARENA_SHIFT)
40178-
40179-#define EDATA_BITS_SLAB_WIDTH  1
40180-#define EDATA_BITS_SLAB_SHIFT  (EDATA_BITS_ARENA_WIDTH + EDATA_BITS_ARENA_SHIFT)
40181-#define EDATA_BITS_SLAB_MASK  MASK(EDATA_BITS_SLAB_WIDTH, EDATA_BITS_SLAB_SHIFT)
40182-
40183-#define EDATA_BITS_COMMITTED_WIDTH  1
40184-#define EDATA_BITS_COMMITTED_SHIFT  (EDATA_BITS_SLAB_WIDTH + EDATA_BITS_SLAB_SHIFT)
40185-#define EDATA_BITS_COMMITTED_MASK  MASK(EDATA_BITS_COMMITTED_WIDTH, EDATA_BITS_COMMITTED_SHIFT)
40186-
40187-#define EDATA_BITS_PAI_WIDTH  1
40188-#define EDATA_BITS_PAI_SHIFT  (EDATA_BITS_COMMITTED_WIDTH + EDATA_BITS_COMMITTED_SHIFT)
40189-#define EDATA_BITS_PAI_MASK  MASK(EDATA_BITS_PAI_WIDTH, EDATA_BITS_PAI_SHIFT)
40190-
40191-#define EDATA_BITS_ZEROED_WIDTH  1
40192-#define EDATA_BITS_ZEROED_SHIFT  (EDATA_BITS_PAI_WIDTH + EDATA_BITS_PAI_SHIFT)
40193-#define EDATA_BITS_ZEROED_MASK  MASK(EDATA_BITS_ZEROED_WIDTH, EDATA_BITS_ZEROED_SHIFT)
40194-
40195-#define EDATA_BITS_GUARDED_WIDTH  1
40196-#define EDATA_BITS_GUARDED_SHIFT  (EDATA_BITS_ZEROED_WIDTH + EDATA_BITS_ZEROED_SHIFT)
40197-#define EDATA_BITS_GUARDED_MASK  MASK(EDATA_BITS_GUARDED_WIDTH, EDATA_BITS_GUARDED_SHIFT)
40198-
40199-#define EDATA_BITS_STATE_WIDTH  3
40200-#define EDATA_BITS_STATE_SHIFT  (EDATA_BITS_GUARDED_WIDTH + EDATA_BITS_GUARDED_SHIFT)
40201-#define EDATA_BITS_STATE_MASK  MASK(EDATA_BITS_STATE_WIDTH, EDATA_BITS_STATE_SHIFT)
40202-
40203-#define EDATA_BITS_SZIND_WIDTH  LG_CEIL(SC_NSIZES)
40204-#define EDATA_BITS_SZIND_SHIFT  (EDATA_BITS_STATE_WIDTH + EDATA_BITS_STATE_SHIFT)
40205-#define EDATA_BITS_SZIND_MASK  MASK(EDATA_BITS_SZIND_WIDTH, EDATA_BITS_SZIND_SHIFT)
40206-
40207-#define EDATA_BITS_NFREE_WIDTH  (SC_LG_SLAB_MAXREGS + 1)
40208-#define EDATA_BITS_NFREE_SHIFT  (EDATA_BITS_SZIND_WIDTH + EDATA_BITS_SZIND_SHIFT)
40209-#define EDATA_BITS_NFREE_MASK  MASK(EDATA_BITS_NFREE_WIDTH, EDATA_BITS_NFREE_SHIFT)
40210-
40211-#define EDATA_BITS_BINSHARD_WIDTH  6
40212-#define EDATA_BITS_BINSHARD_SHIFT  (EDATA_BITS_NFREE_WIDTH + EDATA_BITS_NFREE_SHIFT)
40213-#define EDATA_BITS_BINSHARD_MASK  MASK(EDATA_BITS_BINSHARD_WIDTH, EDATA_BITS_BINSHARD_SHIFT)
40214-
40215-#define EDATA_BITS_IS_HEAD_WIDTH 1
40216-#define EDATA_BITS_IS_HEAD_SHIFT  (EDATA_BITS_BINSHARD_WIDTH + EDATA_BITS_BINSHARD_SHIFT)
40217-#define EDATA_BITS_IS_HEAD_MASK  MASK(EDATA_BITS_IS_HEAD_WIDTH, EDATA_BITS_IS_HEAD_SHIFT)
40218-
40219-	/* Pointer to the extent that this structure is responsible for. */
40220-	void			*e_addr;
40221-
40222-	union {
40223-		/*
40224-		 * Extent size and serial number associated with the extent
40225-		 * structure (different than the serial number for the extent at
40226-		 * e_addr).
40227-		 *
40228-		 * ssssssss [...] ssssssss ssssnnnn nnnnnnnn
40229-		 */
40230-		size_t			e_size_esn;
40231-	#define EDATA_SIZE_MASK	((size_t)~(PAGE-1))
40232-	#define EDATA_ESN_MASK		((size_t)PAGE-1)
40233-		/* Base extent size, which may not be a multiple of PAGE. */
40234-		size_t			e_bsize;
40235-	};
40236-
40237-	/*
40238-	 * If this edata is a user allocation from an HPA, it comes out of some
40239-	 * pageslab (we don't yet support huegpage allocations that don't fit
40240-	 * into pageslabs).  This tracks it.
40241-	 */
40242-	hpdata_t *e_ps;
40243-
40244-	/*
40245-	 * Serial number.  These are not necessarily unique; splitting an extent
40246-	 * results in two extents with the same serial number.
40247-	 */
40248-	uint64_t e_sn;
40249-
40250-	union {
40251-		/*
40252-		 * List linkage used when the edata_t is active; either in
40253-		 * arena's large allocations or bin_t's slabs_full.
40254-		 */
40255-		ql_elm(edata_t)	ql_link_active;
40256-		/*
40257-		 * Pairing heap linkage.  Used whenever the extent is inactive
40258-		 * (in the page allocators), or when it is active and in
40259-		 * slabs_nonfull, or when the edata_t is unassociated with an
40260-		 * extent and sitting in an edata_cache.
40261-		 */
40262-		union {
40263-			edata_heap_link_t heap_link;
40264-			edata_avail_link_t avail_link;
40265-		};
40266-	};
40267-
40268-	union {
40269-		/*
40270-		 * List linkage used when the extent is inactive:
40271-		 * - Stashed dirty extents
40272-		 * - Ecache LRU functionality.
40273-		 */
40274-		ql_elm(edata_t) ql_link_inactive;
40275-		/* Small region slab metadata. */
40276-		slab_data_t	e_slab_data;
40277-
40278-		/* Profiling data, used for large objects. */
40279-		e_prof_info_t	e_prof_info;
40280-	};
40281-};
40282-
40283-TYPED_LIST(edata_list_active, edata_t, ql_link_active)
40284-TYPED_LIST(edata_list_inactive, edata_t, ql_link_inactive)
40285-
40286-static inline unsigned
40287-edata_arena_ind_get(const edata_t *edata) {
40288-	unsigned arena_ind = (unsigned)((edata->e_bits &
40289-	    EDATA_BITS_ARENA_MASK) >> EDATA_BITS_ARENA_SHIFT);
40290-	assert(arena_ind < MALLOCX_ARENA_LIMIT);
40291-
40292-	return arena_ind;
40293-}
40294-
40295-static inline szind_t
40296-edata_szind_get_maybe_invalid(const edata_t *edata) {
40297-	szind_t szind = (szind_t)((edata->e_bits & EDATA_BITS_SZIND_MASK) >>
40298-	    EDATA_BITS_SZIND_SHIFT);
40299-	assert(szind <= SC_NSIZES);
40300-	return szind;
40301-}
40302-
40303-static inline szind_t
40304-edata_szind_get(const edata_t *edata) {
40305-	szind_t szind = edata_szind_get_maybe_invalid(edata);
40306-	assert(szind < SC_NSIZES); /* Never call when "invalid". */
40307-	return szind;
40308-}
40309-
40310-static inline size_t
40311-edata_usize_get(const edata_t *edata) {
40312-	return sz_index2size(edata_szind_get(edata));
40313-}
40314-
40315-static inline unsigned
40316-edata_binshard_get(const edata_t *edata) {
40317-	unsigned binshard = (unsigned)((edata->e_bits &
40318-	    EDATA_BITS_BINSHARD_MASK) >> EDATA_BITS_BINSHARD_SHIFT);
40319-	assert(binshard < bin_infos[edata_szind_get(edata)].n_shards);
40320-	return binshard;
40321-}
40322-
40323-static inline uint64_t
40324-edata_sn_get(const edata_t *edata) {
40325-	return edata->e_sn;
40326-}
40327-
40328-static inline extent_state_t
40329-edata_state_get(const edata_t *edata) {
40330-	return (extent_state_t)((edata->e_bits & EDATA_BITS_STATE_MASK) >>
40331-	    EDATA_BITS_STATE_SHIFT);
40332-}
40333-
40334-static inline bool
40335-edata_guarded_get(const edata_t *edata) {
40336-	return (bool)((edata->e_bits & EDATA_BITS_GUARDED_MASK) >>
40337-	    EDATA_BITS_GUARDED_SHIFT);
40338-}
40339-
40340-static inline bool
40341-edata_zeroed_get(const edata_t *edata) {
40342-	return (bool)((edata->e_bits & EDATA_BITS_ZEROED_MASK) >>
40343-	    EDATA_BITS_ZEROED_SHIFT);
40344-}
40345-
40346-static inline bool
40347-edata_committed_get(const edata_t *edata) {
40348-	return (bool)((edata->e_bits & EDATA_BITS_COMMITTED_MASK) >>
40349-	    EDATA_BITS_COMMITTED_SHIFT);
40350-}
40351-
40352-static inline extent_pai_t
40353-edata_pai_get(const edata_t *edata) {
40354-	return (extent_pai_t)((edata->e_bits & EDATA_BITS_PAI_MASK) >>
40355-	    EDATA_BITS_PAI_SHIFT);
40356-}
40357-
40358-static inline bool
40359-edata_slab_get(const edata_t *edata) {
40360-	return (bool)((edata->e_bits & EDATA_BITS_SLAB_MASK) >>
40361-	    EDATA_BITS_SLAB_SHIFT);
40362-}
40363-
40364-static inline unsigned
40365-edata_nfree_get(const edata_t *edata) {
40366-	assert(edata_slab_get(edata));
40367-	return (unsigned)((edata->e_bits & EDATA_BITS_NFREE_MASK) >>
40368-	    EDATA_BITS_NFREE_SHIFT);
40369-}
40370-
40371-static inline void *
40372-edata_base_get(const edata_t *edata) {
40373-	assert(edata->e_addr == PAGE_ADDR2BASE(edata->e_addr) ||
40374-	    !edata_slab_get(edata));
40375-	return PAGE_ADDR2BASE(edata->e_addr);
40376-}
40377-
40378-static inline void *
40379-edata_addr_get(const edata_t *edata) {
40380-	assert(edata->e_addr == PAGE_ADDR2BASE(edata->e_addr) ||
40381-	    !edata_slab_get(edata));
40382-	return edata->e_addr;
40383-}
40384-
40385-static inline size_t
40386-edata_size_get(const edata_t *edata) {
40387-	return (edata->e_size_esn & EDATA_SIZE_MASK);
40388-}
40389-
40390-static inline size_t
40391-edata_esn_get(const edata_t *edata) {
40392-	return (edata->e_size_esn & EDATA_ESN_MASK);
40393-}
40394-
40395-static inline size_t
40396-edata_bsize_get(const edata_t *edata) {
40397-	return edata->e_bsize;
40398-}
40399-
40400-static inline hpdata_t *
40401-edata_ps_get(const edata_t *edata) {
40402-	assert(edata_pai_get(edata) == EXTENT_PAI_HPA);
40403-	return edata->e_ps;
40404-}
40405-
40406-static inline void *
40407-edata_before_get(const edata_t *edata) {
40408-	return (void *)((uintptr_t)edata_base_get(edata) - PAGE);
40409-}
40410-
40411-static inline void *
40412-edata_last_get(const edata_t *edata) {
40413-	return (void *)((uintptr_t)edata_base_get(edata) +
40414-	    edata_size_get(edata) - PAGE);
40415-}
40416-
40417-static inline void *
40418-edata_past_get(const edata_t *edata) {
40419-	return (void *)((uintptr_t)edata_base_get(edata) +
40420-	    edata_size_get(edata));
40421-}
40422-
40423-static inline slab_data_t *
40424-edata_slab_data_get(edata_t *edata) {
40425-	assert(edata_slab_get(edata));
40426-	return &edata->e_slab_data;
40427-}
40428-
40429-static inline const slab_data_t *
40430-edata_slab_data_get_const(const edata_t *edata) {
40431-	assert(edata_slab_get(edata));
40432-	return &edata->e_slab_data;
40433-}
40434-
40435-static inline prof_tctx_t *
40436-edata_prof_tctx_get(const edata_t *edata) {
40437-	return (prof_tctx_t *)atomic_load_p(&edata->e_prof_info.e_prof_tctx,
40438-	    ATOMIC_ACQUIRE);
40439-}
40440-
40441-static inline const nstime_t *
40442-edata_prof_alloc_time_get(const edata_t *edata) {
40443-	return &edata->e_prof_info.e_prof_alloc_time;
40444-}
40445-
40446-static inline size_t
40447-edata_prof_alloc_size_get(const edata_t *edata) {
40448-	return edata->e_prof_info.e_prof_alloc_size;
40449-}
40450-
40451-static inline prof_recent_t *
40452-edata_prof_recent_alloc_get_dont_call_directly(const edata_t *edata) {
40453-	return (prof_recent_t *)atomic_load_p(
40454-	    &edata->e_prof_info.e_prof_recent_alloc, ATOMIC_RELAXED);
40455-}
40456-
40457-static inline void
40458-edata_arena_ind_set(edata_t *edata, unsigned arena_ind) {
40459-	edata->e_bits = (edata->e_bits & ~EDATA_BITS_ARENA_MASK) |
40460-	    ((uint64_t)arena_ind << EDATA_BITS_ARENA_SHIFT);
40461-}
40462-
40463-static inline void
40464-edata_binshard_set(edata_t *edata, unsigned binshard) {
40465-	/* The assertion assumes szind is set already. */
40466-	assert(binshard < bin_infos[edata_szind_get(edata)].n_shards);
40467-	edata->e_bits = (edata->e_bits & ~EDATA_BITS_BINSHARD_MASK) |
40468-	    ((uint64_t)binshard << EDATA_BITS_BINSHARD_SHIFT);
40469-}
40470-
40471-static inline void
40472-edata_addr_set(edata_t *edata, void *addr) {
40473-	edata->e_addr = addr;
40474-}
40475-
40476-static inline void
40477-edata_size_set(edata_t *edata, size_t size) {
40478-	assert((size & ~EDATA_SIZE_MASK) == 0);
40479-	edata->e_size_esn = size | (edata->e_size_esn & ~EDATA_SIZE_MASK);
40480-}
40481-
40482-static inline void
40483-edata_esn_set(edata_t *edata, size_t esn) {
40484-	edata->e_size_esn = (edata->e_size_esn & ~EDATA_ESN_MASK) | (esn &
40485-	    EDATA_ESN_MASK);
40486-}
40487-
40488-static inline void
40489-edata_bsize_set(edata_t *edata, size_t bsize) {
40490-	edata->e_bsize = bsize;
40491-}
40492-
40493-static inline void
40494-edata_ps_set(edata_t *edata, hpdata_t *ps) {
40495-	assert(edata_pai_get(edata) == EXTENT_PAI_HPA);
40496-	edata->e_ps = ps;
40497-}
40498-
40499-static inline void
40500-edata_szind_set(edata_t *edata, szind_t szind) {
40501-	assert(szind <= SC_NSIZES); /* SC_NSIZES means "invalid". */
40502-	edata->e_bits = (edata->e_bits & ~EDATA_BITS_SZIND_MASK) |
40503-	    ((uint64_t)szind << EDATA_BITS_SZIND_SHIFT);
40504-}
40505-
40506-static inline void
40507-edata_nfree_set(edata_t *edata, unsigned nfree) {
40508-	assert(edata_slab_get(edata));
40509-	edata->e_bits = (edata->e_bits & ~EDATA_BITS_NFREE_MASK) |
40510-	    ((uint64_t)nfree << EDATA_BITS_NFREE_SHIFT);
40511-}
40512-
40513-static inline void
40514-edata_nfree_binshard_set(edata_t *edata, unsigned nfree, unsigned binshard) {
40515-	/* The assertion assumes szind is set already. */
40516-	assert(binshard < bin_infos[edata_szind_get(edata)].n_shards);
40517-	edata->e_bits = (edata->e_bits &
40518-	    (~EDATA_BITS_NFREE_MASK & ~EDATA_BITS_BINSHARD_MASK)) |
40519-	    ((uint64_t)binshard << EDATA_BITS_BINSHARD_SHIFT) |
40520-	    ((uint64_t)nfree << EDATA_BITS_NFREE_SHIFT);
40521-}
40522-
40523-static inline void
40524-edata_nfree_inc(edata_t *edata) {
40525-	assert(edata_slab_get(edata));
40526-	edata->e_bits += ((uint64_t)1U << EDATA_BITS_NFREE_SHIFT);
40527-}
40528-
40529-static inline void
40530-edata_nfree_dec(edata_t *edata) {
40531-	assert(edata_slab_get(edata));
40532-	edata->e_bits -= ((uint64_t)1U << EDATA_BITS_NFREE_SHIFT);
40533-}
40534-
40535-static inline void
40536-edata_nfree_sub(edata_t *edata, uint64_t n) {
40537-	assert(edata_slab_get(edata));
40538-	edata->e_bits -= (n << EDATA_BITS_NFREE_SHIFT);
40539-}
40540-
40541-static inline void
40542-edata_sn_set(edata_t *edata, uint64_t sn) {
40543-	edata->e_sn = sn;
40544-}
40545-
40546-static inline void
40547-edata_state_set(edata_t *edata, extent_state_t state) {
40548-	edata->e_bits = (edata->e_bits & ~EDATA_BITS_STATE_MASK) |
40549-	    ((uint64_t)state << EDATA_BITS_STATE_SHIFT);
40550-}
40551-
40552-static inline void
40553-edata_guarded_set(edata_t *edata, bool guarded) {
40554-	edata->e_bits = (edata->e_bits & ~EDATA_BITS_GUARDED_MASK) |
40555-	    ((uint64_t)guarded << EDATA_BITS_GUARDED_SHIFT);
40556-}
40557-
40558-static inline void
40559-edata_zeroed_set(edata_t *edata, bool zeroed) {
40560-	edata->e_bits = (edata->e_bits & ~EDATA_BITS_ZEROED_MASK) |
40561-	    ((uint64_t)zeroed << EDATA_BITS_ZEROED_SHIFT);
40562-}
40563-
40564-static inline void
40565-edata_committed_set(edata_t *edata, bool committed) {
40566-	edata->e_bits = (edata->e_bits & ~EDATA_BITS_COMMITTED_MASK) |
40567-	    ((uint64_t)committed << EDATA_BITS_COMMITTED_SHIFT);
40568-}
40569-
40570-static inline void
40571-edata_pai_set(edata_t *edata, extent_pai_t pai) {
40572-	edata->e_bits = (edata->e_bits & ~EDATA_BITS_PAI_MASK) |
40573-	    ((uint64_t)pai << EDATA_BITS_PAI_SHIFT);
40574-}
40575-
40576-static inline void
40577-edata_slab_set(edata_t *edata, bool slab) {
40578-	edata->e_bits = (edata->e_bits & ~EDATA_BITS_SLAB_MASK) |
40579-	    ((uint64_t)slab << EDATA_BITS_SLAB_SHIFT);
40580-}
40581-
40582-static inline void
40583-edata_prof_tctx_set(edata_t *edata, prof_tctx_t *tctx) {
40584-	atomic_store_p(&edata->e_prof_info.e_prof_tctx, tctx, ATOMIC_RELEASE);
40585-}
40586-
40587-static inline void
40588-edata_prof_alloc_time_set(edata_t *edata, nstime_t *t) {
40589-	nstime_copy(&edata->e_prof_info.e_prof_alloc_time, t);
40590-}
40591-
40592-static inline void
40593-edata_prof_alloc_size_set(edata_t *edata, size_t size) {
40594-	edata->e_prof_info.e_prof_alloc_size = size;
40595-}
40596-
40597-static inline void
40598-edata_prof_recent_alloc_set_dont_call_directly(edata_t *edata,
40599-    prof_recent_t *recent_alloc) {
40600-	atomic_store_p(&edata->e_prof_info.e_prof_recent_alloc, recent_alloc,
40601-	    ATOMIC_RELAXED);
40602-}
40603-
40604-static inline bool
40605-edata_is_head_get(edata_t *edata) {
40606-	return (bool)((edata->e_bits & EDATA_BITS_IS_HEAD_MASK) >>
40607-	    EDATA_BITS_IS_HEAD_SHIFT);
40608-}
40609-
40610-static inline void
40611-edata_is_head_set(edata_t *edata, bool is_head) {
40612-	edata->e_bits = (edata->e_bits & ~EDATA_BITS_IS_HEAD_MASK) |
40613-	    ((uint64_t)is_head << EDATA_BITS_IS_HEAD_SHIFT);
40614-}
40615-
40616-static inline bool
40617-edata_state_in_transition(extent_state_t state) {
40618-	return state >= extent_state_transition;
40619-}
40620-
40621-/*
40622- * Because this function is implemented as a sequence of bitfield modifications,
40623- * even though each individual bit is properly initialized, we technically read
40624- * uninitialized data within it.  This is mostly fine, since most callers get
40625- * their edatas from zeroing sources, but callers who make stack edata_ts need
40626- * to manually zero them.
40627- */
40628-static inline void
40629-edata_init(edata_t *edata, unsigned arena_ind, void *addr, size_t size,
40630-    bool slab, szind_t szind, uint64_t sn, extent_state_t state, bool zeroed,
40631-    bool committed, extent_pai_t pai, extent_head_state_t is_head) {
40632-	assert(addr == PAGE_ADDR2BASE(addr) || !slab);
40633-
40634-	edata_arena_ind_set(edata, arena_ind);
40635-	edata_addr_set(edata, addr);
40636-	edata_size_set(edata, size);
40637-	edata_slab_set(edata, slab);
40638-	edata_szind_set(edata, szind);
40639-	edata_sn_set(edata, sn);
40640-	edata_state_set(edata, state);
40641-	edata_guarded_set(edata, false);
40642-	edata_zeroed_set(edata, zeroed);
40643-	edata_committed_set(edata, committed);
40644-	edata_pai_set(edata, pai);
40645-	edata_is_head_set(edata, is_head == EXTENT_IS_HEAD);
40646-	if (config_prof) {
40647-		edata_prof_tctx_set(edata, NULL);
40648-	}
40649-}
40650-
40651-static inline void
40652-edata_binit(edata_t *edata, void *addr, size_t bsize, uint64_t sn) {
40653-	edata_arena_ind_set(edata, (1U << MALLOCX_ARENA_BITS) - 1);
40654-	edata_addr_set(edata, addr);
40655-	edata_bsize_set(edata, bsize);
40656-	edata_slab_set(edata, false);
40657-	edata_szind_set(edata, SC_NSIZES);
40658-	edata_sn_set(edata, sn);
40659-	edata_state_set(edata, extent_state_active);
40660-	edata_guarded_set(edata, false);
40661-	edata_zeroed_set(edata, true);
40662-	edata_committed_set(edata, true);
40663-	/*
40664-	 * This isn't strictly true, but base allocated extents never get
40665-	 * deallocated and can't be looked up in the emap, but no sense in
40666-	 * wasting a state bit to encode this fact.
40667-	 */
40668-	edata_pai_set(edata, EXTENT_PAI_PAC);
40669-}
40670-
40671-static inline int
40672-edata_esn_comp(const edata_t *a, const edata_t *b) {
40673-	size_t a_esn = edata_esn_get(a);
40674-	size_t b_esn = edata_esn_get(b);
40675-
40676-	return (a_esn > b_esn) - (a_esn < b_esn);
40677-}
40678-
40679-static inline int
40680-edata_ead_comp(const edata_t *a, const edata_t *b) {
40681-	uintptr_t a_eaddr = (uintptr_t)a;
40682-	uintptr_t b_eaddr = (uintptr_t)b;
40683-
40684-	return (a_eaddr > b_eaddr) - (a_eaddr < b_eaddr);
40685-}
40686-
40687-static inline edata_cmp_summary_t
40688-edata_cmp_summary_get(const edata_t *edata) {
40689-	return (edata_cmp_summary_t){edata_sn_get(edata),
40690-		(uintptr_t)edata_addr_get(edata)};
40691-}
40692-
40693-static inline int
40694-edata_cmp_summary_comp(edata_cmp_summary_t a, edata_cmp_summary_t b) {
40695-	int ret;
40696-	ret = (a.sn > b.sn) - (a.sn < b.sn);
40697-	if (ret != 0) {
40698-		return ret;
40699-	}
40700-	ret = (a.addr > b.addr) - (a.addr < b.addr);
40701-	return ret;
40702-}
40703-
40704-static inline int
40705-edata_snad_comp(const edata_t *a, const edata_t *b) {
40706-	edata_cmp_summary_t a_cmp = edata_cmp_summary_get(a);
40707-	edata_cmp_summary_t b_cmp = edata_cmp_summary_get(b);
40708-
40709-	return edata_cmp_summary_comp(a_cmp, b_cmp);
40710-}
40711-
40712-static inline int
40713-edata_esnead_comp(const edata_t *a, const edata_t *b) {
40714-	int ret;
40715-
40716-	ret = edata_esn_comp(a, b);
40717-	if (ret != 0) {
40718-		return ret;
40719-	}
40720-
40721-	ret = edata_ead_comp(a, b);
40722-	return ret;
40723-}
40724-
40725-ph_proto(, edata_avail, edata_t)
40726-ph_proto(, edata_heap, edata_t)
40727-
40728-#endif /* JEMALLOC_INTERNAL_EDATA_H */
40729diff --git a/jemalloc/include/jemalloc/internal/edata_cache.h b/jemalloc/include/jemalloc/internal/edata_cache.h
40730deleted file mode 100644
40731index 8b6c0ef..0000000
40732--- a/jemalloc/include/jemalloc/internal/edata_cache.h
40733+++ /dev/null
40734@@ -1,49 +0,0 @@
40735-#ifndef JEMALLOC_INTERNAL_EDATA_CACHE_H
40736-#define JEMALLOC_INTERNAL_EDATA_CACHE_H
40737-
40738-#include "jemalloc/internal/base.h"
40739-
40740-/* For tests only. */
40741-#define EDATA_CACHE_FAST_FILL 4
40742-
40743-/*
40744- * A cache of edata_t structures allocated via base_alloc_edata (as opposed to
40745- * the underlying extents they describe).  The contents of returned edata_t
40746- * objects are garbage and cannot be relied upon.
40747- */
40748-
40749-typedef struct edata_cache_s edata_cache_t;
40750-struct edata_cache_s {
40751-	edata_avail_t avail;
40752-	atomic_zu_t count;
40753-	malloc_mutex_t mtx;
40754-	base_t *base;
40755-};
40756-
40757-bool edata_cache_init(edata_cache_t *edata_cache, base_t *base);
40758-edata_t *edata_cache_get(tsdn_t *tsdn, edata_cache_t *edata_cache);
40759-void edata_cache_put(tsdn_t *tsdn, edata_cache_t *edata_cache, edata_t *edata);
40760-
40761-void edata_cache_prefork(tsdn_t *tsdn, edata_cache_t *edata_cache);
40762-void edata_cache_postfork_parent(tsdn_t *tsdn, edata_cache_t *edata_cache);
40763-void edata_cache_postfork_child(tsdn_t *tsdn, edata_cache_t *edata_cache);
40764-
40765-/*
40766- * An edata_cache_small is like an edata_cache, but it relies on external
40767- * synchronization and avoids first-fit strategies.
40768- */
40769-
40770-typedef struct edata_cache_fast_s edata_cache_fast_t;
40771-struct edata_cache_fast_s {
40772-	edata_list_inactive_t list;
40773-	edata_cache_t *fallback;
40774-	bool disabled;
40775-};
40776-
40777-void edata_cache_fast_init(edata_cache_fast_t *ecs, edata_cache_t *fallback);
40778-edata_t *edata_cache_fast_get(tsdn_t *tsdn, edata_cache_fast_t *ecs);
40779-void edata_cache_fast_put(tsdn_t *tsdn, edata_cache_fast_t *ecs,
40780-    edata_t *edata);
40781-void edata_cache_fast_disable(tsdn_t *tsdn, edata_cache_fast_t *ecs);
40782-
40783-#endif /* JEMALLOC_INTERNAL_EDATA_CACHE_H */
40784diff --git a/jemalloc/include/jemalloc/internal/ehooks.h b/jemalloc/include/jemalloc/internal/ehooks.h
40785deleted file mode 100644
40786index 8d9513e..0000000
40787--- a/jemalloc/include/jemalloc/internal/ehooks.h
40788+++ /dev/null
40789@@ -1,412 +0,0 @@
40790-#ifndef JEMALLOC_INTERNAL_EHOOKS_H
40791-#define JEMALLOC_INTERNAL_EHOOKS_H
40792-
40793-#include "jemalloc/internal/atomic.h"
40794-#include "jemalloc/internal/extent_mmap.h"
40795-
40796-/*
40797- * This module is the internal interface to the extent hooks (both
40798- * user-specified and external).  Eventually, this will give us the flexibility
40799- * to use multiple different versions of user-visible extent-hook APIs under a
40800- * single user interface.
40801- *
40802- * Current API expansions (not available to anyone but the default hooks yet):
40803- *   - Head state tracking.  Hooks can decide whether or not to merge two
40804- *     extents based on whether or not one of them is the head (i.e. was
40805- *     allocated on its own).  The later extent loses its "head" status.
40806- */
40807-
40808-extern const extent_hooks_t ehooks_default_extent_hooks;
40809-
40810-typedef struct ehooks_s ehooks_t;
40811-struct ehooks_s {
40812-	/*
40813-	 * The user-visible id that goes with the ehooks (i.e. that of the base
40814-	 * they're a part of, the associated arena's index within the arenas
40815-	 * array).
40816-	 */
40817-	unsigned ind;
40818-	/* Logically an extent_hooks_t *. */
40819-	atomic_p_t ptr;
40820-};
40821-
40822-extern const extent_hooks_t ehooks_default_extent_hooks;
40823-
40824-/*
40825- * These are not really part of the public API.  Each hook has a fast-path for
40826- * the default-hooks case that can avoid various small inefficiencies:
40827- *   - Forgetting tsd and then calling tsd_get within the hook.
40828- *   - Getting more state than necessary out of the extent_t.
40829- *   - Doing arena_ind -> arena -> arena_ind lookups.
40830- * By making the calls to these functions visible to the compiler, it can move
40831- * those extra bits of computation down below the fast-paths where they get ignored.
40832- */
40833-void *ehooks_default_alloc_impl(tsdn_t *tsdn, void *new_addr, size_t size,
40834-    size_t alignment, bool *zero, bool *commit, unsigned arena_ind);
40835-bool ehooks_default_dalloc_impl(void *addr, size_t size);
40836-void ehooks_default_destroy_impl(void *addr, size_t size);
40837-bool ehooks_default_commit_impl(void *addr, size_t offset, size_t length);
40838-bool ehooks_default_decommit_impl(void *addr, size_t offset, size_t length);
40839-#ifdef PAGES_CAN_PURGE_LAZY
40840-bool ehooks_default_purge_lazy_impl(void *addr, size_t offset, size_t length);
40841-#endif
40842-#ifdef PAGES_CAN_PURGE_FORCED
40843-bool ehooks_default_purge_forced_impl(void *addr, size_t offset, size_t length);
40844-#endif
40845-bool ehooks_default_split_impl();
40846-/*
40847- * Merge is the only default extent hook we declare -- see the comment in
40848- * ehooks_merge.
40849- */
40850-bool ehooks_default_merge(extent_hooks_t *extent_hooks, void *addr_a,
40851-    size_t size_a, void *addr_b, size_t size_b, bool committed,
40852-    unsigned arena_ind);
40853-bool ehooks_default_merge_impl(tsdn_t *tsdn, void *addr_a, void *addr_b);
40854-void ehooks_default_zero_impl(void *addr, size_t size);
40855-void ehooks_default_guard_impl(void *guard1, void *guard2);
40856-void ehooks_default_unguard_impl(void *guard1, void *guard2);
40857-
40858-/*
40859- * We don't officially support reentrancy from wtihin the extent hooks.  But
40860- * various people who sit within throwing distance of the jemalloc team want
40861- * that functionality in certain limited cases.  The default reentrancy guards
40862- * assert that we're not reentrant from a0 (since it's the bootstrap arena,
40863- * where reentrant allocations would be redirected), which we would incorrectly
40864- * trigger in cases where a0 has extent hooks (those hooks themselves can't be
40865- * reentrant, then, but there are reasonable uses for such functionality, like
40866- * putting internal metadata on hugepages).  Therefore, we use the raw
40867- * reentrancy guards.
40868- *
40869- * Eventually, we need to think more carefully about whether and where we
40870- * support allocating from within extent hooks (and what that means for things
40871- * like profiling, stats collection, etc.), and document what the guarantee is.
40872- */
40873-static inline void
40874-ehooks_pre_reentrancy(tsdn_t *tsdn) {
40875-	tsd_t *tsd = tsdn_null(tsdn) ? tsd_fetch() : tsdn_tsd(tsdn);
40876-	tsd_pre_reentrancy_raw(tsd);
40877-}
40878-
40879-static inline void
40880-ehooks_post_reentrancy(tsdn_t *tsdn) {
40881-	tsd_t *tsd = tsdn_null(tsdn) ? tsd_fetch() : tsdn_tsd(tsdn);
40882-	tsd_post_reentrancy_raw(tsd);
40883-}
40884-
40885-/* Beginning of the public API. */
40886-void ehooks_init(ehooks_t *ehooks, extent_hooks_t *extent_hooks, unsigned ind);
40887-
40888-static inline unsigned
40889-ehooks_ind_get(const ehooks_t *ehooks) {
40890-	return ehooks->ind;
40891-}
40892-
40893-static inline void
40894-ehooks_set_extent_hooks_ptr(ehooks_t *ehooks, extent_hooks_t *extent_hooks) {
40895-	atomic_store_p(&ehooks->ptr, extent_hooks, ATOMIC_RELEASE);
40896-}
40897-
40898-static inline extent_hooks_t *
40899-ehooks_get_extent_hooks_ptr(ehooks_t *ehooks) {
40900-	return (extent_hooks_t *)atomic_load_p(&ehooks->ptr, ATOMIC_ACQUIRE);
40901-}
40902-
40903-static inline bool
40904-ehooks_are_default(ehooks_t *ehooks) {
40905-	return ehooks_get_extent_hooks_ptr(ehooks) ==
40906-	    &ehooks_default_extent_hooks;
40907-}
40908-
40909-/*
40910- * In some cases, a caller needs to allocate resources before attempting to call
40911- * a hook.  If that hook is doomed to fail, this is wasteful.  We therefore
40912- * include some checks for such cases.
40913- */
40914-static inline bool
40915-ehooks_dalloc_will_fail(ehooks_t *ehooks) {
40916-	if (ehooks_are_default(ehooks)) {
40917-		return opt_retain;
40918-	} else {
40919-		return ehooks_get_extent_hooks_ptr(ehooks)->dalloc == NULL;
40920-	}
40921-}
40922-
40923-static inline bool
40924-ehooks_split_will_fail(ehooks_t *ehooks) {
40925-	return ehooks_get_extent_hooks_ptr(ehooks)->split == NULL;
40926-}
40927-
40928-static inline bool
40929-ehooks_merge_will_fail(ehooks_t *ehooks) {
40930-	return ehooks_get_extent_hooks_ptr(ehooks)->merge == NULL;
40931-}
40932-
40933-static inline bool
40934-ehooks_guard_will_fail(ehooks_t *ehooks) {
40935-	/*
40936-	 * Before the guard hooks are officially introduced, limit the use to
40937-	 * the default hooks only.
40938-	 */
40939-	return !ehooks_are_default(ehooks);
40940-}
40941-
40942-/*
40943- * Some hooks are required to return zeroed memory in certain situations.  In
40944- * debug mode, we do some heuristic checks that they did what they were supposed
40945- * to.
40946- *
40947- * This isn't really ehooks-specific (i.e. anyone can check for zeroed memory).
40948- * But incorrect zero information indicates an ehook bug.
40949- */
40950-static inline void
40951-ehooks_debug_zero_check(void *addr, size_t size) {
40952-	assert(((uintptr_t)addr & PAGE_MASK) == 0);
40953-	assert((size & PAGE_MASK) == 0);
40954-	assert(size > 0);
40955-	if (config_debug) {
40956-		/* Check the whole first page. */
40957-		size_t *p = (size_t *)addr;
40958-		for (size_t i = 0; i < PAGE / sizeof(size_t); i++) {
40959-			assert(p[i] == 0);
40960-		}
40961-		/*
40962-		 * And 4 spots within.  There's a tradeoff here; the larger
40963-		 * this number, the more likely it is that we'll catch a bug
40964-		 * where ehooks return a sparsely non-zero range.  But
40965-		 * increasing the number of checks also increases the number of
40966-		 * page faults in debug mode.  FreeBSD does much of their
40967-		 * day-to-day development work in debug mode, so we don't want
40968-		 * even the debug builds to be too slow.
40969-		 */
40970-		const size_t nchecks = 4;
40971-		assert(PAGE >= sizeof(size_t) * nchecks);
40972-		for (size_t i = 0; i < nchecks; ++i) {
40973-			assert(p[i * (size / sizeof(size_t) / nchecks)] == 0);
40974-		}
40975-	}
40976-}
40977-
40978-
40979-static inline void *
40980-ehooks_alloc(tsdn_t *tsdn, ehooks_t *ehooks, void *new_addr, size_t size,
40981-    size_t alignment, bool *zero, bool *commit) {
40982-	bool orig_zero = *zero;
40983-	void *ret;
40984-	extent_hooks_t *extent_hooks = ehooks_get_extent_hooks_ptr(ehooks);
40985-	if (extent_hooks == &ehooks_default_extent_hooks) {
40986-		ret = ehooks_default_alloc_impl(tsdn, new_addr, size,
40987-		    alignment, zero, commit, ehooks_ind_get(ehooks));
40988-	} else {
40989-		ehooks_pre_reentrancy(tsdn);
40990-		ret = extent_hooks->alloc(extent_hooks, new_addr, size,
40991-		    alignment, zero, commit, ehooks_ind_get(ehooks));
40992-		ehooks_post_reentrancy(tsdn);
40993-	}
40994-	assert(new_addr == NULL || ret == NULL || new_addr == ret);
40995-	assert(!orig_zero || *zero);
40996-	if (*zero && ret != NULL) {
40997-		ehooks_debug_zero_check(ret, size);
40998-	}
40999-	return ret;
41000-}
41001-
41002-static inline bool
41003-ehooks_dalloc(tsdn_t *tsdn, ehooks_t *ehooks, void *addr, size_t size,
41004-    bool committed) {
41005-	extent_hooks_t *extent_hooks = ehooks_get_extent_hooks_ptr(ehooks);
41006-	if (extent_hooks == &ehooks_default_extent_hooks) {
41007-		return ehooks_default_dalloc_impl(addr, size);
41008-	} else if (extent_hooks->dalloc == NULL) {
41009-		return true;
41010-	} else {
41011-		ehooks_pre_reentrancy(tsdn);
41012-		bool err = extent_hooks->dalloc(extent_hooks, addr, size,
41013-		    committed, ehooks_ind_get(ehooks));
41014-		ehooks_post_reentrancy(tsdn);
41015-		return err;
41016-	}
41017-}
41018-
41019-static inline void
41020-ehooks_destroy(tsdn_t *tsdn, ehooks_t *ehooks, void *addr, size_t size,
41021-    bool committed) {
41022-	extent_hooks_t *extent_hooks = ehooks_get_extent_hooks_ptr(ehooks);
41023-	if (extent_hooks == &ehooks_default_extent_hooks) {
41024-		ehooks_default_destroy_impl(addr, size);
41025-	} else if (extent_hooks->destroy == NULL) {
41026-		/* Do nothing. */
41027-	} else {
41028-		ehooks_pre_reentrancy(tsdn);
41029-		extent_hooks->destroy(extent_hooks, addr, size, committed,
41030-		    ehooks_ind_get(ehooks));
41031-		ehooks_post_reentrancy(tsdn);
41032-	}
41033-}
41034-
41035-static inline bool
41036-ehooks_commit(tsdn_t *tsdn, ehooks_t *ehooks, void *addr, size_t size,
41037-    size_t offset, size_t length) {
41038-	extent_hooks_t *extent_hooks = ehooks_get_extent_hooks_ptr(ehooks);
41039-	bool err;
41040-	if (extent_hooks == &ehooks_default_extent_hooks) {
41041-		err = ehooks_default_commit_impl(addr, offset, length);
41042-	} else if (extent_hooks->commit == NULL) {
41043-		err = true;
41044-	} else {
41045-		ehooks_pre_reentrancy(tsdn);
41046-		err = extent_hooks->commit(extent_hooks, addr, size,
41047-		    offset, length, ehooks_ind_get(ehooks));
41048-		ehooks_post_reentrancy(tsdn);
41049-	}
41050-	if (!err) {
41051-		ehooks_debug_zero_check(addr, size);
41052-	}
41053-	return err;
41054-}
41055-
41056-static inline bool
41057-ehooks_decommit(tsdn_t *tsdn, ehooks_t *ehooks, void *addr, size_t size,
41058-    size_t offset, size_t length) {
41059-	extent_hooks_t *extent_hooks = ehooks_get_extent_hooks_ptr(ehooks);
41060-	if (extent_hooks == &ehooks_default_extent_hooks) {
41061-		return ehooks_default_decommit_impl(addr, offset, length);
41062-	} else if (extent_hooks->decommit == NULL) {
41063-		return true;
41064-	} else {
41065-		ehooks_pre_reentrancy(tsdn);
41066-		bool err = extent_hooks->decommit(extent_hooks, addr, size,
41067-		    offset, length, ehooks_ind_get(ehooks));
41068-		ehooks_post_reentrancy(tsdn);
41069-		return err;
41070-	}
41071-}
41072-
41073-static inline bool
41074-ehooks_purge_lazy(tsdn_t *tsdn, ehooks_t *ehooks, void *addr, size_t size,
41075-    size_t offset, size_t length) {
41076-	extent_hooks_t *extent_hooks = ehooks_get_extent_hooks_ptr(ehooks);
41077-#ifdef PAGES_CAN_PURGE_LAZY
41078-	if (extent_hooks == &ehooks_default_extent_hooks) {
41079-		return ehooks_default_purge_lazy_impl(addr, offset, length);
41080-	}
41081-#endif
41082-	if (extent_hooks->purge_lazy == NULL) {
41083-		return true;
41084-	} else {
41085-		ehooks_pre_reentrancy(tsdn);
41086-		bool err = extent_hooks->purge_lazy(extent_hooks, addr, size,
41087-		    offset, length, ehooks_ind_get(ehooks));
41088-		ehooks_post_reentrancy(tsdn);
41089-		return err;
41090-	}
41091-}
41092-
41093-static inline bool
41094-ehooks_purge_forced(tsdn_t *tsdn, ehooks_t *ehooks, void *addr, size_t size,
41095-    size_t offset, size_t length) {
41096-	extent_hooks_t *extent_hooks = ehooks_get_extent_hooks_ptr(ehooks);
41097-	/*
41098-	 * It would be correct to have a ehooks_debug_zero_check call at the end
41099-	 * of this function; purge_forced is required to zero.  But checking
41100-	 * would touch the page in question, which may have performance
41101-	 * consequences (imagine the hooks are using hugepages, with a global
41102-	 * zero page off).  Even in debug mode, it's usually a good idea to
41103-	 * avoid cases that can dramatically increase memory consumption.
41104-	 */
41105-#ifdef PAGES_CAN_PURGE_FORCED
41106-	if (extent_hooks == &ehooks_default_extent_hooks) {
41107-		return ehooks_default_purge_forced_impl(addr, offset, length);
41108-	}
41109-#endif
41110-	if (extent_hooks->purge_forced == NULL) {
41111-		return true;
41112-	} else {
41113-		ehooks_pre_reentrancy(tsdn);
41114-		bool err = extent_hooks->purge_forced(extent_hooks, addr, size,
41115-		    offset, length, ehooks_ind_get(ehooks));
41116-		ehooks_post_reentrancy(tsdn);
41117-		return err;
41118-	}
41119-}
41120-
41121-static inline bool
41122-ehooks_split(tsdn_t *tsdn, ehooks_t *ehooks, void *addr, size_t size,
41123-    size_t size_a, size_t size_b, bool committed) {
41124-	extent_hooks_t *extent_hooks = ehooks_get_extent_hooks_ptr(ehooks);
41125-	if (ehooks_are_default(ehooks)) {
41126-		return ehooks_default_split_impl();
41127-	} else if (extent_hooks->split == NULL) {
41128-		return true;
41129-	} else {
41130-		ehooks_pre_reentrancy(tsdn);
41131-		bool err = extent_hooks->split(extent_hooks, addr, size, size_a,
41132-		    size_b, committed, ehooks_ind_get(ehooks));
41133-		ehooks_post_reentrancy(tsdn);
41134-		return err;
41135-	}
41136-}
41137-
41138-static inline bool
41139-ehooks_merge(tsdn_t *tsdn, ehooks_t *ehooks, void *addr_a, size_t size_a,
41140-    void *addr_b, size_t size_b, bool committed) {
41141-	extent_hooks_t *extent_hooks = ehooks_get_extent_hooks_ptr(ehooks);
41142-	if (extent_hooks == &ehooks_default_extent_hooks) {
41143-		return ehooks_default_merge_impl(tsdn, addr_a, addr_b);
41144-	} else if (extent_hooks->merge == NULL) {
41145-		return true;
41146-	} else {
41147-		ehooks_pre_reentrancy(tsdn);
41148-		bool err = extent_hooks->merge(extent_hooks, addr_a, size_a,
41149-		    addr_b, size_b, committed, ehooks_ind_get(ehooks));
41150-		ehooks_post_reentrancy(tsdn);
41151-		return err;
41152-	}
41153-}
41154-
41155-static inline void
41156-ehooks_zero(tsdn_t *tsdn, ehooks_t *ehooks, void *addr, size_t size) {
41157-	extent_hooks_t *extent_hooks = ehooks_get_extent_hooks_ptr(ehooks);
41158-	if (extent_hooks == &ehooks_default_extent_hooks) {
41159-		ehooks_default_zero_impl(addr, size);
41160-	} else {
41161-		/*
41162-		 * It would be correct to try using the user-provided purge
41163-		 * hooks (since they are required to have zeroed the extent if
41164-		 * they indicate success), but we don't necessarily know their
41165-		 * cost.  We'll be conservative and use memset.
41166-		 */
41167-		memset(addr, 0, size);
41168-	}
41169-}
41170-
41171-static inline bool
41172-ehooks_guard(tsdn_t *tsdn, ehooks_t *ehooks, void *guard1, void *guard2) {
41173-	bool err;
41174-	extent_hooks_t *extent_hooks = ehooks_get_extent_hooks_ptr(ehooks);
41175-
41176-	if (extent_hooks == &ehooks_default_extent_hooks) {
41177-		ehooks_default_guard_impl(guard1, guard2);
41178-		err = false;
41179-	} else {
41180-		err = true;
41181-	}
41182-
41183-	return err;
41184-}
41185-
41186-static inline bool
41187-ehooks_unguard(tsdn_t *tsdn, ehooks_t *ehooks, void *guard1, void *guard2) {
41188-	bool err;
41189-	extent_hooks_t *extent_hooks = ehooks_get_extent_hooks_ptr(ehooks);
41190-
41191-	if (extent_hooks == &ehooks_default_extent_hooks) {
41192-		ehooks_default_unguard_impl(guard1, guard2);
41193-		err = false;
41194-	} else {
41195-		err = true;
41196-	}
41197-
41198-	return err;
41199-}
41200-
41201-#endif /* JEMALLOC_INTERNAL_EHOOKS_H */
41202diff --git a/jemalloc/include/jemalloc/internal/emap.h b/jemalloc/include/jemalloc/internal/emap.h
41203deleted file mode 100644
41204index 847af32..0000000
41205--- a/jemalloc/include/jemalloc/internal/emap.h
41206+++ /dev/null
41207@@ -1,357 +0,0 @@
41208-#ifndef JEMALLOC_INTERNAL_EMAP_H
41209-#define JEMALLOC_INTERNAL_EMAP_H
41210-
41211-#include "jemalloc/internal/base.h"
41212-#include "jemalloc/internal/rtree.h"
41213-
41214-/*
41215- * Note: Ends without at semicolon, so that
41216- *     EMAP_DECLARE_RTREE_CTX;
41217- * in uses will avoid empty-statement warnings.
41218- */
41219-#define EMAP_DECLARE_RTREE_CTX						\
41220-    rtree_ctx_t rtree_ctx_fallback;					\
41221-    rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback)
41222-
41223-typedef struct emap_s emap_t;
41224-struct emap_s {
41225-	rtree_t rtree;
41226-};
41227-
41228-/* Used to pass rtree lookup context down the path. */
41229-typedef struct emap_alloc_ctx_t emap_alloc_ctx_t;
41230-struct emap_alloc_ctx_t {
41231-	szind_t szind;
41232-	bool slab;
41233-};
41234-
41235-typedef struct emap_full_alloc_ctx_s emap_full_alloc_ctx_t;
41236-struct emap_full_alloc_ctx_s {
41237-	szind_t szind;
41238-	bool slab;
41239-	edata_t *edata;
41240-};
41241-
41242-bool emap_init(emap_t *emap, base_t *base, bool zeroed);
41243-
41244-void emap_remap(tsdn_t *tsdn, emap_t *emap, edata_t *edata, szind_t szind,
41245-    bool slab);
41246-
41247-void emap_update_edata_state(tsdn_t *tsdn, emap_t *emap, edata_t *edata,
41248-    extent_state_t state);
41249-
41250-/*
41251- * The two acquire functions below allow accessing neighbor edatas, if it's safe
41252- * and valid to do so (i.e. from the same arena, of the same state, etc.).  This
41253- * is necessary because the ecache locks are state based, and only protect
41254- * edatas with the same state.  Therefore the neighbor edata's state needs to be
41255- * verified first, before chasing the edata pointer.  The returned edata will be
41256- * in an acquired state, meaning other threads will be prevented from accessing
41257- * it, even if technically the edata can still be discovered from the rtree.
41258- *
41259- * This means, at any moment when holding pointers to edata, either one of the
41260- * state based locks is held (and the edatas are all of the protected state), or
41261- * the edatas are in an acquired state (e.g. in active or merging state).  The
41262- * acquire operation itself (changing the edata to an acquired state) is done
41263- * under the state locks.
41264- */
41265-edata_t *emap_try_acquire_edata_neighbor(tsdn_t *tsdn, emap_t *emap,
41266-    edata_t *edata, extent_pai_t pai, extent_state_t expected_state,
41267-    bool forward);
41268-edata_t *emap_try_acquire_edata_neighbor_expand(tsdn_t *tsdn, emap_t *emap,
41269-    edata_t *edata, extent_pai_t pai, extent_state_t expected_state);
41270-void emap_release_edata(tsdn_t *tsdn, emap_t *emap, edata_t *edata,
41271-    extent_state_t new_state);
41272-
41273-/*
41274- * Associate the given edata with its beginning and end address, setting the
41275- * szind and slab info appropriately.
41276- * Returns true on error (i.e. resource exhaustion).
41277- */
41278-bool emap_register_boundary(tsdn_t *tsdn, emap_t *emap, edata_t *edata,
41279-    szind_t szind, bool slab);
41280-
41281-/*
41282- * Does the same thing, but with the interior of the range, for slab
41283- * allocations.
41284- *
41285- * You might wonder why we don't just have a single emap_register function that
41286- * does both depending on the value of 'slab'.  The answer is twofold:
41287- * - As a practical matter, in places like the extract->split->commit pathway,
41288- *   we defer the interior operation until we're sure that the commit won't fail
41289- *   (but we have to register the split boundaries there).
41290- * - In general, we're trying to move to a world where the page-specific
41291- *   allocator doesn't know as much about how the pages it allocates will be
41292- *   used, and passing a 'slab' parameter everywhere makes that more
41293- *   complicated.
41294- *
41295- * Unlike the boundary version, this function can't fail; this is because slabs
41296- * can't get big enough to touch a new page that neither of the boundaries
41297- * touched, so no allocation is necessary to fill the interior once the boundary
41298- * has been touched.
41299- */
41300-void emap_register_interior(tsdn_t *tsdn, emap_t *emap, edata_t *edata,
41301-    szind_t szind);
41302-
41303-void emap_deregister_boundary(tsdn_t *tsdn, emap_t *emap, edata_t *edata);
41304-void emap_deregister_interior(tsdn_t *tsdn, emap_t *emap, edata_t *edata);
41305-
41306-typedef struct emap_prepare_s emap_prepare_t;
41307-struct emap_prepare_s {
41308-	rtree_leaf_elm_t *lead_elm_a;
41309-	rtree_leaf_elm_t *lead_elm_b;
41310-	rtree_leaf_elm_t *trail_elm_a;
41311-	rtree_leaf_elm_t *trail_elm_b;
41312-};
41313-
41314-/**
41315- * These functions the emap metadata management for merging, splitting, and
41316- * reusing extents.  In particular, they set the boundary mappings from
41317- * addresses to edatas.  If the result is going to be used as a slab, you
41318- * still need to call emap_register_interior on it, though.
41319- *
41320- * Remap simply changes the szind and slab status of an extent's boundary
41321- * mappings.  If the extent is not a slab, it doesn't bother with updating the
41322- * end mapping (since lookups only occur in the interior of an extent for
41323- * slabs).  Since the szind and slab status only make sense for active extents,
41324- * this should only be called while activating or deactivating an extent.
41325- *
41326- * Split and merge have a "prepare" and a "commit" portion.  The prepare portion
41327- * does the operations that can be done without exclusive access to the extent
41328- * in question, while the commit variant requires exclusive access to maintain
41329- * the emap invariants.  The only function that can fail is emap_split_prepare,
41330- * and it returns true on failure (at which point the caller shouldn't commit).
41331- *
41332- * In all cases, "lead" refers to the lower-addressed extent, and trail to the
41333- * higher-addressed one.  It's the caller's responsibility to set the edata
41334- * state appropriately.
41335- */
41336-bool emap_split_prepare(tsdn_t *tsdn, emap_t *emap, emap_prepare_t *prepare,
41337-    edata_t *edata, size_t size_a, edata_t *trail, size_t size_b);
41338-void emap_split_commit(tsdn_t *tsdn, emap_t *emap, emap_prepare_t *prepare,
41339-    edata_t *lead, size_t size_a, edata_t *trail, size_t size_b);
41340-void emap_merge_prepare(tsdn_t *tsdn, emap_t *emap, emap_prepare_t *prepare,
41341-    edata_t *lead, edata_t *trail);
41342-void emap_merge_commit(tsdn_t *tsdn, emap_t *emap, emap_prepare_t *prepare,
41343-    edata_t *lead, edata_t *trail);
41344-
41345-/* Assert that the emap's view of the given edata matches the edata's view. */
41346-void emap_do_assert_mapped(tsdn_t *tsdn, emap_t *emap, edata_t *edata);
41347-static inline void
41348-emap_assert_mapped(tsdn_t *tsdn, emap_t *emap, edata_t *edata) {
41349-	if (config_debug) {
41350-		emap_do_assert_mapped(tsdn, emap, edata);
41351-	}
41352-}
41353-
41354-/* Assert that the given edata isn't in the map. */
41355-void emap_do_assert_not_mapped(tsdn_t *tsdn, emap_t *emap, edata_t *edata);
41356-static inline void
41357-emap_assert_not_mapped(tsdn_t *tsdn, emap_t *emap, edata_t *edata) {
41358-	if (config_debug) {
41359-		emap_do_assert_not_mapped(tsdn, emap, edata);
41360-	}
41361-}
41362-
41363-JEMALLOC_ALWAYS_INLINE bool
41364-emap_edata_in_transition(tsdn_t *tsdn, emap_t *emap, edata_t *edata) {
41365-	assert(config_debug);
41366-	emap_assert_mapped(tsdn, emap, edata);
41367-
41368-	EMAP_DECLARE_RTREE_CTX;
41369-	rtree_contents_t contents = rtree_read(tsdn, &emap->rtree, rtree_ctx,
41370-	    (uintptr_t)edata_base_get(edata));
41371-
41372-	return edata_state_in_transition(contents.metadata.state);
41373-}
41374-
41375-JEMALLOC_ALWAYS_INLINE bool
41376-emap_edata_is_acquired(tsdn_t *tsdn, emap_t *emap, edata_t *edata) {
41377-	if (!config_debug) {
41378-		/* For assertions only. */
41379-		return false;
41380-	}
41381-
41382-	/*
41383-	 * The edata is considered acquired if no other threads will attempt to
41384-	 * read / write any fields from it.  This includes a few cases:
41385-	 *
41386-	 * 1) edata not hooked into emap yet -- This implies the edata just got
41387-	 * allocated or initialized.
41388-	 *
41389-	 * 2) in an active or transition state -- In both cases, the edata can
41390-	 * be discovered from the emap, however the state tracked in the rtree
41391-	 * will prevent other threads from accessing the actual edata.
41392-	 */
41393-	EMAP_DECLARE_RTREE_CTX;
41394-	rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, &emap->rtree,
41395-	    rtree_ctx, (uintptr_t)edata_base_get(edata), /* dependent */ true,
41396-	    /* init_missing */ false);
41397-	if (elm == NULL) {
41398-		return true;
41399-	}
41400-	rtree_contents_t contents = rtree_leaf_elm_read(tsdn, &emap->rtree, elm,
41401-	    /* dependent */ true);
41402-	if (contents.edata == NULL ||
41403-	    contents.metadata.state == extent_state_active ||
41404-	    edata_state_in_transition(contents.metadata.state)) {
41405-		return true;
41406-	}
41407-
41408-	return false;
41409-}
41410-
41411-JEMALLOC_ALWAYS_INLINE void
41412-extent_assert_can_coalesce(const edata_t *inner, const edata_t *outer) {
41413-	assert(edata_arena_ind_get(inner) == edata_arena_ind_get(outer));
41414-	assert(edata_pai_get(inner) == edata_pai_get(outer));
41415-	assert(edata_committed_get(inner) == edata_committed_get(outer));
41416-	assert(edata_state_get(inner) == extent_state_active);
41417-	assert(edata_state_get(outer) == extent_state_merging);
41418-	assert(!edata_guarded_get(inner) && !edata_guarded_get(outer));
41419-	assert(edata_base_get(inner) == edata_past_get(outer) ||
41420-	    edata_base_get(outer) == edata_past_get(inner));
41421-}
41422-
41423-JEMALLOC_ALWAYS_INLINE void
41424-extent_assert_can_expand(const edata_t *original, const edata_t *expand) {
41425-	assert(edata_arena_ind_get(original) == edata_arena_ind_get(expand));
41426-	assert(edata_pai_get(original) == edata_pai_get(expand));
41427-	assert(edata_state_get(original) == extent_state_active);
41428-	assert(edata_state_get(expand) == extent_state_merging);
41429-	assert(edata_past_get(original) == edata_base_get(expand));
41430-}
41431-
41432-JEMALLOC_ALWAYS_INLINE edata_t *
41433-emap_edata_lookup(tsdn_t *tsdn, emap_t *emap, const void *ptr) {
41434-	EMAP_DECLARE_RTREE_CTX;
41435-
41436-	return rtree_read(tsdn, &emap->rtree, rtree_ctx, (uintptr_t)ptr).edata;
41437-}
41438-
41439-/* Fills in alloc_ctx with the info in the map. */
41440-JEMALLOC_ALWAYS_INLINE void
41441-emap_alloc_ctx_lookup(tsdn_t *tsdn, emap_t *emap, const void *ptr,
41442-    emap_alloc_ctx_t *alloc_ctx) {
41443-	EMAP_DECLARE_RTREE_CTX;
41444-
41445-	rtree_metadata_t metadata = rtree_metadata_read(tsdn, &emap->rtree,
41446-	    rtree_ctx, (uintptr_t)ptr);
41447-	alloc_ctx->szind = metadata.szind;
41448-	alloc_ctx->slab = metadata.slab;
41449-}
41450-
41451-/* The pointer must be mapped. */
41452-JEMALLOC_ALWAYS_INLINE void
41453-emap_full_alloc_ctx_lookup(tsdn_t *tsdn, emap_t *emap, const void *ptr,
41454-    emap_full_alloc_ctx_t *full_alloc_ctx) {
41455-	EMAP_DECLARE_RTREE_CTX;
41456-
41457-	rtree_contents_t contents = rtree_read(tsdn, &emap->rtree, rtree_ctx,
41458-	    (uintptr_t)ptr);
41459-	full_alloc_ctx->edata = contents.edata;
41460-	full_alloc_ctx->szind = contents.metadata.szind;
41461-	full_alloc_ctx->slab = contents.metadata.slab;
41462-}
41463-
41464-/*
41465- * The pointer is allowed to not be mapped.
41466- *
41467- * Returns true when the pointer is not present.
41468- */
41469-JEMALLOC_ALWAYS_INLINE bool
41470-emap_full_alloc_ctx_try_lookup(tsdn_t *tsdn, emap_t *emap, const void *ptr,
41471-    emap_full_alloc_ctx_t *full_alloc_ctx) {
41472-	EMAP_DECLARE_RTREE_CTX;
41473-
41474-	rtree_contents_t contents;
41475-	bool err = rtree_read_independent(tsdn, &emap->rtree, rtree_ctx,
41476-	    (uintptr_t)ptr, &contents);
41477-	if (err) {
41478-		return true;
41479-	}
41480-	full_alloc_ctx->edata = contents.edata;
41481-	full_alloc_ctx->szind = contents.metadata.szind;
41482-	full_alloc_ctx->slab = contents.metadata.slab;
41483-	return false;
41484-}
41485-
41486-/*
41487- * Only used on the fastpath of free.  Returns true when cannot be fulfilled by
41488- * fast path, e.g. when the metadata key is not cached.
41489- */
41490-JEMALLOC_ALWAYS_INLINE bool
41491-emap_alloc_ctx_try_lookup_fast(tsd_t *tsd, emap_t *emap, const void *ptr,
41492-    emap_alloc_ctx_t *alloc_ctx) {
41493-	/* Use the unsafe getter since this may gets called during exit. */
41494-	rtree_ctx_t *rtree_ctx = tsd_rtree_ctxp_get_unsafe(tsd);
41495-
41496-	rtree_metadata_t metadata;
41497-	bool err = rtree_metadata_try_read_fast(tsd_tsdn(tsd), &emap->rtree,
41498-	    rtree_ctx, (uintptr_t)ptr, &metadata);
41499-	if (err) {
41500-		return true;
41501-	}
41502-	alloc_ctx->szind = metadata.szind;
41503-	alloc_ctx->slab = metadata.slab;
41504-	return false;
41505-}
41506-
41507-/*
41508- * We want to do batch lookups out of the cache bins, which use
41509- * cache_bin_ptr_array_get to access the i'th element of the bin (since they
41510- * invert usual ordering in deciding what to flush).  This lets the emap avoid
41511- * caring about its caller's ordering.
41512- */
41513-typedef const void *(*emap_ptr_getter)(void *ctx, size_t ind);
41514-/*
41515- * This allows size-checking assertions, which we can only do while we're in the
41516- * process of edata lookups.
41517- */
41518-typedef void (*emap_metadata_visitor)(void *ctx, emap_full_alloc_ctx_t *alloc_ctx);
41519-
41520-typedef union emap_batch_lookup_result_u emap_batch_lookup_result_t;
41521-union emap_batch_lookup_result_u {
41522-	edata_t *edata;
41523-	rtree_leaf_elm_t *rtree_leaf;
41524-};
41525-
41526-JEMALLOC_ALWAYS_INLINE void
41527-emap_edata_lookup_batch(tsd_t *tsd, emap_t *emap, size_t nptrs,
41528-    emap_ptr_getter ptr_getter, void *ptr_getter_ctx,
41529-    emap_metadata_visitor metadata_visitor, void *metadata_visitor_ctx,
41530-    emap_batch_lookup_result_t *result) {
41531-	/* Avoids null-checking tsdn in the loop below. */
41532-	util_assume(tsd != NULL);
41533-	rtree_ctx_t *rtree_ctx = tsd_rtree_ctxp_get(tsd);
41534-
41535-	for (size_t i = 0; i < nptrs; i++) {
41536-		const void *ptr = ptr_getter(ptr_getter_ctx, i);
41537-		/*
41538-		 * Reuse the edatas array as a temp buffer, lying a little about
41539-		 * the types.
41540-		 */
41541-		result[i].rtree_leaf = rtree_leaf_elm_lookup(tsd_tsdn(tsd),
41542-		    &emap->rtree, rtree_ctx, (uintptr_t)ptr,
41543-		    /* dependent */ true, /* init_missing */ false);
41544-	}
41545-
41546-	for (size_t i = 0; i < nptrs; i++) {
41547-		rtree_leaf_elm_t *elm = result[i].rtree_leaf;
41548-		rtree_contents_t contents = rtree_leaf_elm_read(tsd_tsdn(tsd),
41549-		    &emap->rtree, elm, /* dependent */ true);
41550-		result[i].edata = contents.edata;
41551-		emap_full_alloc_ctx_t alloc_ctx;
41552-		/*
41553-		 * Not all these fields are read in practice by the metadata
41554-		 * visitor.  But the compiler can easily optimize away the ones
41555-		 * that aren't, so no sense in being incomplete.
41556-		 */
41557-		alloc_ctx.szind = contents.metadata.szind;
41558-		alloc_ctx.slab = contents.metadata.slab;
41559-		alloc_ctx.edata = contents.edata;
41560-		metadata_visitor(metadata_visitor_ctx, &alloc_ctx);
41561-	}
41562-}
41563-
41564-#endif /* JEMALLOC_INTERNAL_EMAP_H */
41565diff --git a/jemalloc/include/jemalloc/internal/emitter.h b/jemalloc/include/jemalloc/internal/emitter.h
41566deleted file mode 100644
41567index 9482f68..0000000
41568--- a/jemalloc/include/jemalloc/internal/emitter.h
41569+++ /dev/null
41570@@ -1,510 +0,0 @@
41571-#ifndef JEMALLOC_INTERNAL_EMITTER_H
41572-#define JEMALLOC_INTERNAL_EMITTER_H
41573-
41574-#include "jemalloc/internal/ql.h"
41575-
41576-typedef enum emitter_output_e emitter_output_t;
41577-enum emitter_output_e {
41578-	emitter_output_json,
41579-	emitter_output_json_compact,
41580-	emitter_output_table
41581-};
41582-
41583-typedef enum emitter_justify_e emitter_justify_t;
41584-enum emitter_justify_e {
41585-	emitter_justify_left,
41586-	emitter_justify_right,
41587-	/* Not for users; just to pass to internal functions. */
41588-	emitter_justify_none
41589-};
41590-
41591-typedef enum emitter_type_e emitter_type_t;
41592-enum emitter_type_e {
41593-	emitter_type_bool,
41594-	emitter_type_int,
41595-	emitter_type_int64,
41596-	emitter_type_unsigned,
41597-	emitter_type_uint32,
41598-	emitter_type_uint64,
41599-	emitter_type_size,
41600-	emitter_type_ssize,
41601-	emitter_type_string,
41602-	/*
41603-	 * A title is a column title in a table; it's just a string, but it's
41604-	 * not quoted.
41605-	 */
41606-	emitter_type_title,
41607-};
41608-
41609-typedef struct emitter_col_s emitter_col_t;
41610-struct emitter_col_s {
41611-	/* Filled in by the user. */
41612-	emitter_justify_t justify;
41613-	int width;
41614-	emitter_type_t type;
41615-	union {
41616-		bool bool_val;
41617-		int int_val;
41618-		unsigned unsigned_val;
41619-		uint32_t uint32_val;
41620-		uint32_t uint32_t_val;
41621-		uint64_t uint64_val;
41622-		uint64_t uint64_t_val;
41623-		size_t size_val;
41624-		ssize_t ssize_val;
41625-		const char *str_val;
41626-	};
41627-
41628-	/* Filled in by initialization. */
41629-	ql_elm(emitter_col_t) link;
41630-};
41631-
41632-typedef struct emitter_row_s emitter_row_t;
41633-struct emitter_row_s {
41634-	ql_head(emitter_col_t) cols;
41635-};
41636-
41637-typedef struct emitter_s emitter_t;
41638-struct emitter_s {
41639-	emitter_output_t output;
41640-	/* The output information. */
41641-	write_cb_t *write_cb;
41642-	void *cbopaque;
41643-	int nesting_depth;
41644-	/* True if we've already emitted a value at the given depth. */
41645-	bool item_at_depth;
41646-	/* True if we emitted a key and will emit corresponding value next. */
41647-	bool emitted_key;
41648-};
41649-
41650-static inline bool
41651-emitter_outputs_json(emitter_t *emitter) {
41652-	return emitter->output == emitter_output_json ||
41653-	    emitter->output == emitter_output_json_compact;
41654-}
41655-
41656-/* Internal convenience function.  Write to the emitter the given string. */
41657-JEMALLOC_FORMAT_PRINTF(2, 3)
41658-static inline void
41659-emitter_printf(emitter_t *emitter, const char *format, ...) {
41660-	va_list ap;
41661-
41662-	va_start(ap, format);
41663-	malloc_vcprintf(emitter->write_cb, emitter->cbopaque, format, ap);
41664-	va_end(ap);
41665-}
41666-
41667-static inline const char * JEMALLOC_FORMAT_ARG(3)
41668-emitter_gen_fmt(char *out_fmt, size_t out_size, const char *fmt_specifier,
41669-    emitter_justify_t justify, int width) {
41670-	size_t written;
41671-	fmt_specifier++;
41672-	if (justify == emitter_justify_none) {
41673-		written = malloc_snprintf(out_fmt, out_size,
41674-		    "%%%s", fmt_specifier);
41675-	} else if (justify == emitter_justify_left) {
41676-		written = malloc_snprintf(out_fmt, out_size,
41677-		    "%%-%d%s", width, fmt_specifier);
41678-	} else {
41679-		written = malloc_snprintf(out_fmt, out_size,
41680-		    "%%%d%s", width, fmt_specifier);
41681-	}
41682-	/* Only happens in case of bad format string, which *we* choose. */
41683-	assert(written <  out_size);
41684-	return out_fmt;
41685-}
41686-
41687-/*
41688- * Internal.  Emit the given value type in the relevant encoding (so that the
41689- * bool true gets mapped to json "true", but the string "true" gets mapped to
41690- * json "\"true\"", for instance.
41691- *
41692- * Width is ignored if justify is emitter_justify_none.
41693- */
41694-static inline void
41695-emitter_print_value(emitter_t *emitter, emitter_justify_t justify, int width,
41696-    emitter_type_t value_type, const void *value) {
41697-	size_t str_written;
41698-#define BUF_SIZE 256
41699-#define FMT_SIZE 10
41700-	/*
41701-	 * We dynamically generate a format string to emit, to let us use the
41702-	 * snprintf machinery.  This is kinda hacky, but gets the job done
41703-	 * quickly without having to think about the various snprintf edge
41704-	 * cases.
41705-	 */
41706-	char fmt[FMT_SIZE];
41707-	char buf[BUF_SIZE];
41708-
41709-#define EMIT_SIMPLE(type, format)					\
41710-	emitter_printf(emitter,						\
41711-	    emitter_gen_fmt(fmt, FMT_SIZE, format, justify, width),	\
41712-	    *(const type *)value);
41713-
41714-	switch (value_type) {
41715-	case emitter_type_bool:
41716-		emitter_printf(emitter,
41717-		    emitter_gen_fmt(fmt, FMT_SIZE, "%s", justify, width),
41718-		    *(const bool *)value ?  "true" : "false");
41719-		break;
41720-	case emitter_type_int:
41721-		EMIT_SIMPLE(int, "%d")
41722-		break;
41723-	case emitter_type_int64:
41724-		EMIT_SIMPLE(int64_t, "%" FMTd64)
41725-		break;
41726-	case emitter_type_unsigned:
41727-		EMIT_SIMPLE(unsigned, "%u")
41728-		break;
41729-	case emitter_type_ssize:
41730-		EMIT_SIMPLE(ssize_t, "%zd")
41731-		break;
41732-	case emitter_type_size:
41733-		EMIT_SIMPLE(size_t, "%zu")
41734-		break;
41735-	case emitter_type_string:
41736-		str_written = malloc_snprintf(buf, BUF_SIZE, "\"%s\"",
41737-		    *(const char *const *)value);
41738-		/*
41739-		 * We control the strings we output; we shouldn't get anything
41740-		 * anywhere near the fmt size.
41741-		 */
41742-		assert(str_written < BUF_SIZE);
41743-		emitter_printf(emitter,
41744-		    emitter_gen_fmt(fmt, FMT_SIZE, "%s", justify, width), buf);
41745-		break;
41746-	case emitter_type_uint32:
41747-		EMIT_SIMPLE(uint32_t, "%" FMTu32)
41748-		break;
41749-	case emitter_type_uint64:
41750-		EMIT_SIMPLE(uint64_t, "%" FMTu64)
41751-		break;
41752-	case emitter_type_title:
41753-		EMIT_SIMPLE(char *const, "%s");
41754-		break;
41755-	default:
41756-		unreachable();
41757-	}
41758-#undef BUF_SIZE
41759-#undef FMT_SIZE
41760-}
41761-
41762-
41763-/* Internal functions.  In json mode, tracks nesting state. */
41764-static inline void
41765-emitter_nest_inc(emitter_t *emitter) {
41766-	emitter->nesting_depth++;
41767-	emitter->item_at_depth = false;
41768-}
41769-
41770-static inline void
41771-emitter_nest_dec(emitter_t *emitter) {
41772-	emitter->nesting_depth--;
41773-	emitter->item_at_depth = true;
41774-}
41775-
41776-static inline void
41777-emitter_indent(emitter_t *emitter) {
41778-	int amount = emitter->nesting_depth;
41779-	const char *indent_str;
41780-	assert(emitter->output != emitter_output_json_compact);
41781-	if (emitter->output == emitter_output_json) {
41782-		indent_str = "\t";
41783-	} else {
41784-		amount *= 2;
41785-		indent_str = " ";
41786-	}
41787-	for (int i = 0; i < amount; i++) {
41788-		emitter_printf(emitter, "%s", indent_str);
41789-	}
41790-}
41791-
41792-static inline void
41793-emitter_json_key_prefix(emitter_t *emitter) {
41794-	assert(emitter_outputs_json(emitter));
41795-	if (emitter->emitted_key) {
41796-		emitter->emitted_key = false;
41797-		return;
41798-	}
41799-	if (emitter->item_at_depth) {
41800-		emitter_printf(emitter, ",");
41801-	}
41802-	if (emitter->output != emitter_output_json_compact) {
41803-		emitter_printf(emitter, "\n");
41804-		emitter_indent(emitter);
41805-	}
41806-}
41807-
41808-/******************************************************************************/
41809-/* Public functions for emitter_t. */
41810-
41811-static inline void
41812-emitter_init(emitter_t *emitter, emitter_output_t emitter_output,
41813-    write_cb_t *write_cb, void *cbopaque) {
41814-	emitter->output = emitter_output;
41815-	emitter->write_cb = write_cb;
41816-	emitter->cbopaque = cbopaque;
41817-	emitter->item_at_depth = false;
41818-	emitter->emitted_key = false;
41819-	emitter->nesting_depth = 0;
41820-}
41821-
41822-/******************************************************************************/
41823-/* JSON public API. */
41824-
41825-/*
41826- * Emits a key (e.g. as appears in an object). The next json entity emitted will
41827- * be the corresponding value.
41828- */
41829-static inline void
41830-emitter_json_key(emitter_t *emitter, const char *json_key) {
41831-	if (emitter_outputs_json(emitter)) {
41832-		emitter_json_key_prefix(emitter);
41833-		emitter_printf(emitter, "\"%s\":%s", json_key,
41834-		    emitter->output == emitter_output_json_compact ? "" : " ");
41835-		emitter->emitted_key = true;
41836-	}
41837-}
41838-
41839-static inline void
41840-emitter_json_value(emitter_t *emitter, emitter_type_t value_type,
41841-    const void *value) {
41842-	if (emitter_outputs_json(emitter)) {
41843-		emitter_json_key_prefix(emitter);
41844-		emitter_print_value(emitter, emitter_justify_none, -1,
41845-		    value_type, value);
41846-		emitter->item_at_depth = true;
41847-	}
41848-}
41849-
41850-/* Shorthand for calling emitter_json_key and then emitter_json_value. */
41851-static inline void
41852-emitter_json_kv(emitter_t *emitter, const char *json_key,
41853-    emitter_type_t value_type, const void *value) {
41854-	emitter_json_key(emitter, json_key);
41855-	emitter_json_value(emitter, value_type, value);
41856-}
41857-
41858-static inline void
41859-emitter_json_array_begin(emitter_t *emitter) {
41860-	if (emitter_outputs_json(emitter)) {
41861-		emitter_json_key_prefix(emitter);
41862-		emitter_printf(emitter, "[");
41863-		emitter_nest_inc(emitter);
41864-	}
41865-}
41866-
41867-/* Shorthand for calling emitter_json_key and then emitter_json_array_begin. */
41868-static inline void
41869-emitter_json_array_kv_begin(emitter_t *emitter, const char *json_key) {
41870-	emitter_json_key(emitter, json_key);
41871-	emitter_json_array_begin(emitter);
41872-}
41873-
41874-static inline void
41875-emitter_json_array_end(emitter_t *emitter) {
41876-	if (emitter_outputs_json(emitter)) {
41877-		assert(emitter->nesting_depth > 0);
41878-		emitter_nest_dec(emitter);
41879-		if (emitter->output != emitter_output_json_compact) {
41880-			emitter_printf(emitter, "\n");
41881-			emitter_indent(emitter);
41882-		}
41883-		emitter_printf(emitter, "]");
41884-	}
41885-}
41886-
41887-static inline void
41888-emitter_json_object_begin(emitter_t *emitter) {
41889-	if (emitter_outputs_json(emitter)) {
41890-		emitter_json_key_prefix(emitter);
41891-		emitter_printf(emitter, "{");
41892-		emitter_nest_inc(emitter);
41893-	}
41894-}
41895-
41896-/* Shorthand for calling emitter_json_key and then emitter_json_object_begin. */
41897-static inline void
41898-emitter_json_object_kv_begin(emitter_t *emitter, const char *json_key) {
41899-	emitter_json_key(emitter, json_key);
41900-	emitter_json_object_begin(emitter);
41901-}
41902-
41903-static inline void
41904-emitter_json_object_end(emitter_t *emitter) {
41905-	if (emitter_outputs_json(emitter)) {
41906-		assert(emitter->nesting_depth > 0);
41907-		emitter_nest_dec(emitter);
41908-		if (emitter->output != emitter_output_json_compact) {
41909-			emitter_printf(emitter, "\n");
41910-			emitter_indent(emitter);
41911-		}
41912-		emitter_printf(emitter, "}");
41913-	}
41914-}
41915-
41916-
41917-/******************************************************************************/
41918-/* Table public API. */
41919-
41920-static inline void
41921-emitter_table_dict_begin(emitter_t *emitter, const char *table_key) {
41922-	if (emitter->output == emitter_output_table) {
41923-		emitter_indent(emitter);
41924-		emitter_printf(emitter, "%s\n", table_key);
41925-		emitter_nest_inc(emitter);
41926-	}
41927-}
41928-
41929-static inline void
41930-emitter_table_dict_end(emitter_t *emitter) {
41931-	if (emitter->output == emitter_output_table) {
41932-		emitter_nest_dec(emitter);
41933-	}
41934-}
41935-
41936-static inline void
41937-emitter_table_kv_note(emitter_t *emitter, const char *table_key,
41938-    emitter_type_t value_type, const void *value,
41939-    const char *table_note_key, emitter_type_t table_note_value_type,
41940-    const void *table_note_value) {
41941-	if (emitter->output == emitter_output_table) {
41942-		emitter_indent(emitter);
41943-		emitter_printf(emitter, "%s: ", table_key);
41944-		emitter_print_value(emitter, emitter_justify_none, -1,
41945-		    value_type, value);
41946-		if (table_note_key != NULL) {
41947-			emitter_printf(emitter, " (%s: ", table_note_key);
41948-			emitter_print_value(emitter, emitter_justify_none, -1,
41949-			    table_note_value_type, table_note_value);
41950-			emitter_printf(emitter, ")");
41951-		}
41952-		emitter_printf(emitter, "\n");
41953-	}
41954-	emitter->item_at_depth = true;
41955-}
41956-
41957-static inline void
41958-emitter_table_kv(emitter_t *emitter, const char *table_key,
41959-    emitter_type_t value_type, const void *value) {
41960-	emitter_table_kv_note(emitter, table_key, value_type, value, NULL,
41961-	    emitter_type_bool, NULL);
41962-}
41963-
41964-
41965-/* Write to the emitter the given string, but only in table mode. */
41966-JEMALLOC_FORMAT_PRINTF(2, 3)
41967-static inline void
41968-emitter_table_printf(emitter_t *emitter, const char *format, ...) {
41969-	if (emitter->output == emitter_output_table) {
41970-		va_list ap;
41971-		va_start(ap, format);
41972-		malloc_vcprintf(emitter->write_cb, emitter->cbopaque, format, ap);
41973-		va_end(ap);
41974-	}
41975-}
41976-
41977-static inline void
41978-emitter_table_row(emitter_t *emitter, emitter_row_t *row) {
41979-	if (emitter->output != emitter_output_table) {
41980-		return;
41981-	}
41982-	emitter_col_t *col;
41983-	ql_foreach(col, &row->cols, link) {
41984-		emitter_print_value(emitter, col->justify, col->width,
41985-		    col->type, (const void *)&col->bool_val);
41986-	}
41987-	emitter_table_printf(emitter, "\n");
41988-}
41989-
41990-static inline void
41991-emitter_row_init(emitter_row_t *row) {
41992-	ql_new(&row->cols);
41993-}
41994-
41995-static inline void
41996-emitter_col_init(emitter_col_t *col, emitter_row_t *row) {
41997-	ql_elm_new(col, link);
41998-	ql_tail_insert(&row->cols, col, link);
41999-}
42000-
42001-
42002-/******************************************************************************/
42003-/*
42004- * Generalized public API. Emits using either JSON or table, according to
42005- * settings in the emitter_t. */
42006-
42007-/*
42008- * Note emits a different kv pair as well, but only in table mode.  Omits the
42009- * note if table_note_key is NULL.
42010- */
42011-static inline void
42012-emitter_kv_note(emitter_t *emitter, const char *json_key, const char *table_key,
42013-    emitter_type_t value_type, const void *value,
42014-    const char *table_note_key, emitter_type_t table_note_value_type,
42015-    const void *table_note_value) {
42016-	if (emitter_outputs_json(emitter)) {
42017-		emitter_json_key(emitter, json_key);
42018-		emitter_json_value(emitter, value_type, value);
42019-	} else {
42020-		emitter_table_kv_note(emitter, table_key, value_type, value,
42021-		    table_note_key, table_note_value_type, table_note_value);
42022-	}
42023-	emitter->item_at_depth = true;
42024-}
42025-
42026-static inline void
42027-emitter_kv(emitter_t *emitter, const char *json_key, const char *table_key,
42028-    emitter_type_t value_type, const void *value) {
42029-	emitter_kv_note(emitter, json_key, table_key, value_type, value, NULL,
42030-	    emitter_type_bool, NULL);
42031-}
42032-
42033-static inline void
42034-emitter_dict_begin(emitter_t *emitter, const char *json_key,
42035-    const char *table_header) {
42036-	if (emitter_outputs_json(emitter)) {
42037-		emitter_json_key(emitter, json_key);
42038-		emitter_json_object_begin(emitter);
42039-	} else {
42040-		emitter_table_dict_begin(emitter, table_header);
42041-	}
42042-}
42043-
42044-static inline void
42045-emitter_dict_end(emitter_t *emitter) {
42046-	if (emitter_outputs_json(emitter)) {
42047-		emitter_json_object_end(emitter);
42048-	} else {
42049-		emitter_table_dict_end(emitter);
42050-	}
42051-}
42052-
42053-static inline void
42054-emitter_begin(emitter_t *emitter) {
42055-	if (emitter_outputs_json(emitter)) {
42056-		assert(emitter->nesting_depth == 0);
42057-		emitter_printf(emitter, "{");
42058-		emitter_nest_inc(emitter);
42059-	} else {
42060-		/*
42061-		 * This guarantees that we always call write_cb at least once.
42062-		 * This is useful if some invariant is established by each call
42063-		 * to write_cb, but doesn't hold initially: e.g., some buffer
42064-		 * holds a null-terminated string.
42065-		 */
42066-		emitter_printf(emitter, "%s", "");
42067-	}
42068-}
42069-
42070-static inline void
42071-emitter_end(emitter_t *emitter) {
42072-	if (emitter_outputs_json(emitter)) {
42073-		assert(emitter->nesting_depth == 1);
42074-		emitter_nest_dec(emitter);
42075-		emitter_printf(emitter, "%s", emitter->output ==
42076-		    emitter_output_json_compact ? "}" : "\n}\n");
42077-	}
42078-}
42079-
42080-#endif /* JEMALLOC_INTERNAL_EMITTER_H */
42081diff --git a/jemalloc/include/jemalloc/internal/eset.h b/jemalloc/include/jemalloc/internal/eset.h
42082deleted file mode 100644
42083index 4f689b4..0000000
42084--- a/jemalloc/include/jemalloc/internal/eset.h
42085+++ /dev/null
42086@@ -1,77 +0,0 @@
42087-#ifndef JEMALLOC_INTERNAL_ESET_H
42088-#define JEMALLOC_INTERNAL_ESET_H
42089-
42090-#include "jemalloc/internal/atomic.h"
42091-#include "jemalloc/internal/fb.h"
42092-#include "jemalloc/internal/edata.h"
42093-#include "jemalloc/internal/mutex.h"
42094-
42095-/*
42096- * An eset ("extent set") is a quantized collection of extents, with built-in
42097- * LRU queue.
42098- *
42099- * This class is not thread-safe; synchronization must be done externally if
42100- * there are mutating operations.  One exception is the stats counters, which
42101- * may be read without any locking.
42102- */
42103-
42104-typedef struct eset_bin_s eset_bin_t;
42105-struct eset_bin_s {
42106-	edata_heap_t heap;
42107-	/*
42108-	 * We do first-fit across multiple size classes.  If we compared against
42109-	 * the min element in each heap directly, we'd take a cache miss per
42110-	 * extent we looked at.  If we co-locate the edata summaries, we only
42111-	 * take a miss on the edata we're actually going to return (which is
42112-	 * inevitable anyways).
42113-	 */
42114-	edata_cmp_summary_t heap_min;
42115-};
42116-
42117-typedef struct eset_bin_stats_s eset_bin_stats_t;
42118-struct eset_bin_stats_s {
42119-	atomic_zu_t nextents;
42120-	atomic_zu_t nbytes;
42121-};
42122-
42123-typedef struct eset_s eset_t;
42124-struct eset_s {
42125-	/* Bitmap for which set bits correspond to non-empty heaps. */
42126-	fb_group_t bitmap[FB_NGROUPS(SC_NPSIZES + 1)];
42127-
42128-	/* Quantized per size class heaps of extents. */
42129-	eset_bin_t bins[SC_NPSIZES + 1];
42130-
42131-	eset_bin_stats_t bin_stats[SC_NPSIZES + 1];
42132-
42133-	/* LRU of all extents in heaps. */
42134-	edata_list_inactive_t lru;
42135-
42136-	/* Page sum for all extents in heaps. */
42137-	atomic_zu_t npages;
42138-
42139-	/*
42140-	 * A duplication of the data in the containing ecache.  We use this only
42141-	 * for assertions on the states of the passed-in extents.
42142-	 */
42143-	extent_state_t state;
42144-};
42145-
42146-void eset_init(eset_t *eset, extent_state_t state);
42147-
42148-size_t eset_npages_get(eset_t *eset);
42149-/* Get the number of extents in the given page size index. */
42150-size_t eset_nextents_get(eset_t *eset, pszind_t ind);
42151-/* Get the sum total bytes of the extents in the given page size index. */
42152-size_t eset_nbytes_get(eset_t *eset, pszind_t ind);
42153-
42154-void eset_insert(eset_t *eset, edata_t *edata);
42155-void eset_remove(eset_t *eset, edata_t *edata);
42156-/*
42157- * Select an extent from this eset of the given size and alignment.  Returns
42158- * null if no such item could be found.
42159- */
42160-edata_t *eset_fit(eset_t *eset, size_t esize, size_t alignment, bool exact_only,
42161-    unsigned lg_max_fit);
42162-
42163-#endif /* JEMALLOC_INTERNAL_ESET_H */
42164diff --git a/jemalloc/include/jemalloc/internal/exp_grow.h b/jemalloc/include/jemalloc/internal/exp_grow.h
42165deleted file mode 100644
42166index 8566b8a..0000000
42167--- a/jemalloc/include/jemalloc/internal/exp_grow.h
42168+++ /dev/null
42169@@ -1,50 +0,0 @@
42170-#ifndef JEMALLOC_INTERNAL_EXP_GROW_H
42171-#define JEMALLOC_INTERNAL_EXP_GROW_H
42172-
42173-typedef struct exp_grow_s exp_grow_t;
42174-struct exp_grow_s {
42175-	/*
42176-	 * Next extent size class in a growing series to use when satisfying a
42177-	 * request via the extent hooks (only if opt_retain).  This limits the
42178-	 * number of disjoint virtual memory ranges so that extent merging can
42179-	 * be effective even if multiple arenas' extent allocation requests are
42180-	 * highly interleaved.
42181-	 *
42182-	 * retain_grow_limit is the max allowed size ind to expand (unless the
42183-	 * required size is greater).  Default is no limit, and controlled
42184-	 * through mallctl only.
42185-	 */
42186-	pszind_t next;
42187-	pszind_t limit;
42188-};
42189-
42190-static inline bool
42191-exp_grow_size_prepare(exp_grow_t *exp_grow, size_t alloc_size_min,
42192-    size_t *r_alloc_size, pszind_t *r_skip) {
42193-	*r_skip = 0;
42194-	*r_alloc_size = sz_pind2sz(exp_grow->next + *r_skip);
42195-	while (*r_alloc_size < alloc_size_min) {
42196-		(*r_skip)++;
42197-		if (exp_grow->next + *r_skip  >=
42198-		    sz_psz2ind(SC_LARGE_MAXCLASS)) {
42199-			/* Outside legal range. */
42200-			return true;
42201-		}
42202-		*r_alloc_size = sz_pind2sz(exp_grow->next + *r_skip);
42203-	}
42204-	return false;
42205-}
42206-
42207-static inline void
42208-exp_grow_size_commit(exp_grow_t *exp_grow, pszind_t skip) {
42209-	if (exp_grow->next + skip + 1 <= exp_grow->limit) {
42210-		exp_grow->next += skip + 1;
42211-	} else {
42212-		exp_grow->next = exp_grow->limit;
42213-	}
42214-
42215-}
42216-
42217-void exp_grow_init(exp_grow_t *exp_grow);
42218-
42219-#endif /* JEMALLOC_INTERNAL_EXP_GROW_H */
42220diff --git a/jemalloc/include/jemalloc/internal/extent.h b/jemalloc/include/jemalloc/internal/extent.h
42221deleted file mode 100644
42222index 1d51d41..0000000
42223--- a/jemalloc/include/jemalloc/internal/extent.h
42224+++ /dev/null
42225@@ -1,137 +0,0 @@
42226-#ifndef JEMALLOC_INTERNAL_EXTENT_H
42227-#define JEMALLOC_INTERNAL_EXTENT_H
42228-
42229-#include "jemalloc/internal/ecache.h"
42230-#include "jemalloc/internal/ehooks.h"
42231-#include "jemalloc/internal/ph.h"
42232-#include "jemalloc/internal/rtree.h"
42233-
42234-/*
42235- * This module contains the page-level allocator.  It chooses the addresses that
42236- * allocations requested by other modules will inhabit, and updates the global
42237- * metadata to reflect allocation/deallocation/purging decisions.
42238- */
42239-
42240-/*
42241- * When reuse (and split) an active extent, (1U << opt_lg_extent_max_active_fit)
42242- * is the max ratio between the size of the active extent and the new extent.
42243- */
42244-#define LG_EXTENT_MAX_ACTIVE_FIT_DEFAULT 6
42245-extern size_t opt_lg_extent_max_active_fit;
42246-
42247-edata_t *ecache_alloc(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
42248-    ecache_t *ecache, edata_t *expand_edata, size_t size, size_t alignment,
42249-    bool zero, bool guarded);
42250-edata_t *ecache_alloc_grow(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
42251-    ecache_t *ecache, edata_t *expand_edata, size_t size, size_t alignment,
42252-    bool zero, bool guarded);
42253-void ecache_dalloc(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
42254-    ecache_t *ecache, edata_t *edata);
42255-edata_t *ecache_evict(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
42256-    ecache_t *ecache, size_t npages_min);
42257-
42258-void extent_gdump_add(tsdn_t *tsdn, const edata_t *edata);
42259-void extent_record(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, ecache_t *ecache,
42260-    edata_t *edata);
42261-void extent_dalloc_gap(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
42262-    edata_t *edata);
42263-edata_t *extent_alloc_wrapper(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
42264-    void *new_addr, size_t size, size_t alignment, bool zero, bool *commit,
42265-    bool growing_retained);
42266-void extent_dalloc_wrapper(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
42267-    edata_t *edata);
42268-void extent_destroy_wrapper(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
42269-    edata_t *edata);
42270-bool extent_commit_wrapper(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
42271-    size_t offset, size_t length);
42272-bool extent_decommit_wrapper(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
42273-    size_t offset, size_t length);
42274-bool extent_purge_lazy_wrapper(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
42275-    size_t offset, size_t length);
42276-bool extent_purge_forced_wrapper(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
42277-    size_t offset, size_t length);
42278-edata_t *extent_split_wrapper(tsdn_t *tsdn, pac_t *pac,
42279-    ehooks_t *ehooks, edata_t *edata, size_t size_a, size_t size_b,
42280-    bool holding_core_locks);
42281-bool extent_merge_wrapper(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
42282-    edata_t *a, edata_t *b);
42283-bool extent_commit_zero(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
42284-    bool commit, bool zero, bool growing_retained);
42285-size_t extent_sn_next(pac_t *pac);
42286-bool extent_boot(void);
42287-
42288-JEMALLOC_ALWAYS_INLINE bool
42289-extent_neighbor_head_state_mergeable(bool edata_is_head,
42290-    bool neighbor_is_head, bool forward) {
42291-	/*
42292-	 * Head states checking: disallow merging if the higher addr extent is a
42293-	 * head extent.  This helps preserve first-fit, and more importantly
42294-	 * makes sure no merge across arenas.
42295-	 */
42296-	if (forward) {
42297-		if (neighbor_is_head) {
42298-			return false;
42299-		}
42300-	} else {
42301-		if (edata_is_head) {
42302-			return false;
42303-		}
42304-	}
42305-	return true;
42306-}
42307-
42308-JEMALLOC_ALWAYS_INLINE bool
42309-extent_can_acquire_neighbor(edata_t *edata, rtree_contents_t contents,
42310-    extent_pai_t pai, extent_state_t expected_state, bool forward,
42311-    bool expanding) {
42312-	edata_t *neighbor = contents.edata;
42313-	if (neighbor == NULL) {
42314-		return false;
42315-	}
42316-	/* It's not safe to access *neighbor yet; must verify states first. */
42317-	bool neighbor_is_head = contents.metadata.is_head;
42318-	if (!extent_neighbor_head_state_mergeable(edata_is_head_get(edata),
42319-	    neighbor_is_head, forward)) {
42320-		return false;
42321-	}
42322-	extent_state_t neighbor_state = contents.metadata.state;
42323-	if (pai == EXTENT_PAI_PAC) {
42324-		if (neighbor_state != expected_state) {
42325-			return false;
42326-		}
42327-		/* From this point, it's safe to access *neighbor. */
42328-		if (!expanding && (edata_committed_get(edata) !=
42329-		    edata_committed_get(neighbor))) {
42330-			/*
42331-			 * Some platforms (e.g. Windows) require an explicit
42332-			 * commit step (and writing to uncommitted memory is not
42333-			 * allowed).
42334-			 */
42335-			return false;
42336-		}
42337-	} else {
42338-		if (neighbor_state == extent_state_active) {
42339-			return false;
42340-		}
42341-		/* From this point, it's safe to access *neighbor. */
42342-	}
42343-
42344-	assert(edata_pai_get(edata) == pai);
42345-	if (edata_pai_get(neighbor) != pai) {
42346-		return false;
42347-	}
42348-	if (opt_retain) {
42349-		assert(edata_arena_ind_get(edata) ==
42350-		    edata_arena_ind_get(neighbor));
42351-	} else {
42352-		if (edata_arena_ind_get(edata) !=
42353-		    edata_arena_ind_get(neighbor)) {
42354-			return false;
42355-		}
42356-	}
42357-	assert(!edata_guarded_get(edata) && !edata_guarded_get(neighbor));
42358-
42359-	return true;
42360-}
42361-
42362-#endif /* JEMALLOC_INTERNAL_EXTENT_H */
42363diff --git a/jemalloc/include/jemalloc/internal/extent_dss.h b/jemalloc/include/jemalloc/internal/extent_dss.h
42364deleted file mode 100644
42365index e8f02ce..0000000
42366--- a/jemalloc/include/jemalloc/internal/extent_dss.h
42367+++ /dev/null
42368@@ -1,26 +0,0 @@
42369-#ifndef JEMALLOC_INTERNAL_EXTENT_DSS_H
42370-#define JEMALLOC_INTERNAL_EXTENT_DSS_H
42371-
42372-typedef enum {
42373-	dss_prec_disabled  = 0,
42374-	dss_prec_primary   = 1,
42375-	dss_prec_secondary = 2,
42376-
42377-	dss_prec_limit     = 3
42378-} dss_prec_t;
42379-#define DSS_PREC_DEFAULT dss_prec_secondary
42380-#define DSS_DEFAULT "secondary"
42381-
42382-extern const char *dss_prec_names[];
42383-
42384-extern const char *opt_dss;
42385-
42386-dss_prec_t extent_dss_prec_get(void);
42387-bool extent_dss_prec_set(dss_prec_t dss_prec);
42388-void *extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr,
42389-    size_t size, size_t alignment, bool *zero, bool *commit);
42390-bool extent_in_dss(void *addr);
42391-bool extent_dss_mergeable(void *addr_a, void *addr_b);
42392-void extent_dss_boot(void);
42393-
42394-#endif /* JEMALLOC_INTERNAL_EXTENT_DSS_H */
42395diff --git a/jemalloc/include/jemalloc/internal/extent_mmap.h b/jemalloc/include/jemalloc/internal/extent_mmap.h
42396deleted file mode 100644
42397index 55f17ee..0000000
42398--- a/jemalloc/include/jemalloc/internal/extent_mmap.h
42399+++ /dev/null
42400@@ -1,10 +0,0 @@
42401-#ifndef JEMALLOC_INTERNAL_EXTENT_MMAP_EXTERNS_H
42402-#define JEMALLOC_INTERNAL_EXTENT_MMAP_EXTERNS_H
42403-
42404-extern bool opt_retain;
42405-
42406-void *extent_alloc_mmap(void *new_addr, size_t size, size_t alignment,
42407-    bool *zero, bool *commit);
42408-bool extent_dalloc_mmap(void *addr, size_t size);
42409-
42410-#endif /* JEMALLOC_INTERNAL_EXTENT_MMAP_EXTERNS_H */
42411diff --git a/jemalloc/include/jemalloc/internal/fb.h b/jemalloc/include/jemalloc/internal/fb.h
42412deleted file mode 100644
42413index 90c4091..0000000
42414--- a/jemalloc/include/jemalloc/internal/fb.h
42415+++ /dev/null
42416@@ -1,373 +0,0 @@
42417-#ifndef JEMALLOC_INTERNAL_FB_H
42418-#define JEMALLOC_INTERNAL_FB_H
42419-
42420-/*
42421- * The flat bitmap module.  This has a larger API relative to the bitmap module
42422- * (supporting things like backwards searches, and searching for both set and
42423- * unset bits), at the cost of slower operations for very large bitmaps.
42424- *
42425- * Initialized flat bitmaps start at all-zeros (all bits unset).
42426- */
42427-
42428-typedef unsigned long fb_group_t;
42429-#define FB_GROUP_BITS (ZU(1) << (LG_SIZEOF_LONG + 3))
42430-#define FB_NGROUPS(nbits) ((nbits) / FB_GROUP_BITS \
42431-    + ((nbits) % FB_GROUP_BITS == 0 ? 0 : 1))
42432-
42433-static inline void
42434-fb_init(fb_group_t *fb, size_t nbits) {
42435-	size_t ngroups = FB_NGROUPS(nbits);
42436-	memset(fb, 0, ngroups * sizeof(fb_group_t));
42437-}
42438-
42439-static inline bool
42440-fb_empty(fb_group_t *fb, size_t nbits) {
42441-	size_t ngroups = FB_NGROUPS(nbits);
42442-	for (size_t i = 0; i < ngroups; i++) {
42443-		if (fb[i] != 0) {
42444-			return false;
42445-		}
42446-	}
42447-	return true;
42448-}
42449-
42450-static inline bool
42451-fb_full(fb_group_t *fb, size_t nbits) {
42452-	size_t ngroups = FB_NGROUPS(nbits);
42453-	size_t trailing_bits = nbits % FB_GROUP_BITS;
42454-	size_t limit = (trailing_bits == 0 ? ngroups : ngroups - 1);
42455-	for (size_t i = 0; i < limit; i++) {
42456-		if (fb[i] != ~(fb_group_t)0) {
42457-			return false;
42458-		}
42459-	}
42460-	if (trailing_bits == 0) {
42461-		return true;
42462-	}
42463-	return fb[ngroups - 1] == ((fb_group_t)1 << trailing_bits) - 1;
42464-}
42465-
42466-static inline bool
42467-fb_get(fb_group_t *fb, size_t nbits, size_t bit) {
42468-	assert(bit < nbits);
42469-	size_t group_ind = bit / FB_GROUP_BITS;
42470-	size_t bit_ind = bit % FB_GROUP_BITS;
42471-	return (bool)(fb[group_ind] & ((fb_group_t)1 << bit_ind));
42472-}
42473-
42474-static inline void
42475-fb_set(fb_group_t *fb, size_t nbits, size_t bit) {
42476-	assert(bit < nbits);
42477-	size_t group_ind = bit / FB_GROUP_BITS;
42478-	size_t bit_ind = bit % FB_GROUP_BITS;
42479-	fb[group_ind] |= ((fb_group_t)1 << bit_ind);
42480-}
42481-
42482-static inline void
42483-fb_unset(fb_group_t *fb, size_t nbits, size_t bit) {
42484-	assert(bit < nbits);
42485-	size_t group_ind = bit / FB_GROUP_BITS;
42486-	size_t bit_ind = bit % FB_GROUP_BITS;
42487-	fb[group_ind] &= ~((fb_group_t)1 << bit_ind);
42488-}
42489-
42490-
42491-/*
42492- * Some implementation details.  This visitation function lets us apply a group
42493- * visitor to each group in the bitmap (potentially modifying it).  The mask
42494- * indicates which bits are logically part of the visitation.
42495- */
42496-typedef void (*fb_group_visitor_t)(void *ctx, fb_group_t *fb, fb_group_t mask);
42497-JEMALLOC_ALWAYS_INLINE void
42498-fb_visit_impl(fb_group_t *fb, size_t nbits, fb_group_visitor_t visit, void *ctx,
42499-    size_t start, size_t cnt) {
42500-	assert(cnt > 0);
42501-	assert(start + cnt <= nbits);
42502-	size_t group_ind = start / FB_GROUP_BITS;
42503-	size_t start_bit_ind = start % FB_GROUP_BITS;
42504-	/*
42505-	 * The first group is special; it's the only one we don't start writing
42506-	 * to from bit 0.
42507-	 */
42508-	size_t first_group_cnt = (start_bit_ind + cnt > FB_GROUP_BITS
42509-		? FB_GROUP_BITS - start_bit_ind : cnt);
42510-	/*
42511-	 * We can basically split affected words into:
42512-	 *   - The first group, where we touch only the high bits
42513-	 *   - The last group, where we touch only the low bits
42514-	 *   - The middle, where we set all the bits to the same thing.
42515-	 * We treat each case individually.  The last two could be merged, but
42516-	 * this can lead to bad codegen for those middle words.
42517-	 */
42518-	/* First group */
42519-	fb_group_t mask = ((~(fb_group_t)0)
42520-	    >> (FB_GROUP_BITS - first_group_cnt))
42521-	    << start_bit_ind;
42522-	visit(ctx, &fb[group_ind], mask);
42523-
42524-	cnt -= first_group_cnt;
42525-	group_ind++;
42526-	/* Middle groups */
42527-	while (cnt > FB_GROUP_BITS) {
42528-		visit(ctx, &fb[group_ind], ~(fb_group_t)0);
42529-		cnt -= FB_GROUP_BITS;
42530-		group_ind++;
42531-	}
42532-	/* Last group */
42533-	if (cnt != 0) {
42534-		mask = (~(fb_group_t)0) >> (FB_GROUP_BITS - cnt);
42535-		visit(ctx, &fb[group_ind], mask);
42536-	}
42537-}
42538-
42539-JEMALLOC_ALWAYS_INLINE void
42540-fb_assign_visitor(void *ctx, fb_group_t *fb, fb_group_t mask) {
42541-	bool val = *(bool *)ctx;
42542-	if (val) {
42543-		*fb |= mask;
42544-	} else {
42545-		*fb &= ~mask;
42546-	}
42547-}
42548-
42549-/* Sets the cnt bits starting at position start.  Must not have a 0 count. */
42550-static inline void
42551-fb_set_range(fb_group_t *fb, size_t nbits, size_t start, size_t cnt) {
42552-	bool val = true;
42553-	fb_visit_impl(fb, nbits, &fb_assign_visitor, &val, start, cnt);
42554-}
42555-
42556-/* Unsets the cnt bits starting at position start.  Must not have a 0 count. */
42557-static inline void
42558-fb_unset_range(fb_group_t *fb, size_t nbits, size_t start, size_t cnt) {
42559-	bool val = false;
42560-	fb_visit_impl(fb, nbits, &fb_assign_visitor, &val, start, cnt);
42561-}
42562-
42563-JEMALLOC_ALWAYS_INLINE void
42564-fb_scount_visitor(void *ctx, fb_group_t *fb, fb_group_t mask) {
42565-	size_t *scount = (size_t *)ctx;
42566-	*scount += popcount_lu(*fb & mask);
42567-}
42568-
42569-/* Finds the number of set bit in the of length cnt starting at start. */
42570-JEMALLOC_ALWAYS_INLINE size_t
42571-fb_scount(fb_group_t *fb, size_t nbits, size_t start, size_t cnt) {
42572-	size_t scount = 0;
42573-	fb_visit_impl(fb, nbits, &fb_scount_visitor, &scount, start, cnt);
42574-	return scount;
42575-}
42576-
42577-/* Finds the number of unset bit in the of length cnt starting at start. */
42578-JEMALLOC_ALWAYS_INLINE size_t
42579-fb_ucount(fb_group_t *fb, size_t nbits, size_t start, size_t cnt) {
42580-	size_t scount = fb_scount(fb, nbits, start, cnt);
42581-	return cnt - scount;
42582-}
42583-
42584-/*
42585- * An implementation detail; find the first bit at position >= min_bit with the
42586- * value val.
42587- *
42588- * Returns the number of bits in the bitmap if no such bit exists.
42589- */
42590-JEMALLOC_ALWAYS_INLINE ssize_t
42591-fb_find_impl(fb_group_t *fb, size_t nbits, size_t start, bool val,
42592-    bool forward) {
42593-	assert(start < nbits);
42594-	size_t ngroups = FB_NGROUPS(nbits);
42595-	ssize_t group_ind = start / FB_GROUP_BITS;
42596-	size_t bit_ind = start % FB_GROUP_BITS;
42597-
42598-	fb_group_t maybe_invert = (val ? 0 : (fb_group_t)-1);
42599-
42600-	fb_group_t group = fb[group_ind];
42601-	group ^= maybe_invert;
42602-	if (forward) {
42603-		/* Only keep ones in bits bit_ind and above. */
42604-		group &= ~((1LU << bit_ind) - 1);
42605-	} else {
42606-		/*
42607-		 * Only keep ones in bits bit_ind and below.  You might more
42608-		 * naturally express this as (1 << (bit_ind + 1)) - 1, but
42609-		 * that shifts by an invalid amount if bit_ind is one less than
42610-		 * FB_GROUP_BITS.
42611-		 */
42612-		group &= ((2LU << bit_ind) - 1);
42613-	}
42614-	ssize_t group_ind_bound = forward ? (ssize_t)ngroups : -1;
42615-	while (group == 0) {
42616-		group_ind += forward ? 1 : -1;
42617-		if (group_ind == group_ind_bound) {
42618-			return forward ? (ssize_t)nbits : (ssize_t)-1;
42619-		}
42620-		group = fb[group_ind];
42621-		group ^= maybe_invert;
42622-	}
42623-	assert(group != 0);
42624-	size_t bit = forward ? ffs_lu(group) : fls_lu(group);
42625-	size_t pos = group_ind * FB_GROUP_BITS + bit;
42626-	/*
42627-	 * The high bits of a partially filled last group are zeros, so if we're
42628-	 * looking for zeros we don't want to report an invalid result.
42629-	 */
42630-	if (forward && !val && pos > nbits) {
42631-		return nbits;
42632-	}
42633-	return pos;
42634-}
42635-
42636-/*
42637- * Find the first set bit in the bitmap with an index >= min_bit.  Returns the
42638- * number of bits in the bitmap if no such bit exists.
42639- */
42640-static inline size_t
42641-fb_ffu(fb_group_t *fb, size_t nbits, size_t min_bit) {
42642-	return (size_t)fb_find_impl(fb, nbits, min_bit, /* val */ false,
42643-	    /* forward */ true);
42644-}
42645-
42646-/* The same, but looks for an unset bit. */
42647-static inline size_t
42648-fb_ffs(fb_group_t *fb, size_t nbits, size_t min_bit) {
42649-	return (size_t)fb_find_impl(fb, nbits, min_bit, /* val */ true,
42650-	    /* forward */ true);
42651-}
42652-
42653-/*
42654- * Find the last set bit in the bitmap with an index <= max_bit.  Returns -1 if
42655- * no such bit exists.
42656- */
42657-static inline ssize_t
42658-fb_flu(fb_group_t *fb, size_t nbits, size_t max_bit) {
42659-	return fb_find_impl(fb, nbits, max_bit, /* val */ false,
42660-	    /* forward */ false);
42661-}
42662-
42663-static inline ssize_t
42664-fb_fls(fb_group_t *fb, size_t nbits, size_t max_bit) {
42665-	return fb_find_impl(fb, nbits, max_bit, /* val */ true,
42666-	    /* forward */ false);
42667-}
42668-
42669-/* Returns whether or not we found a range. */
42670-JEMALLOC_ALWAYS_INLINE bool
42671-fb_iter_range_impl(fb_group_t *fb, size_t nbits, size_t start, size_t *r_begin,
42672-    size_t *r_len, bool val, bool forward) {
42673-	assert(start < nbits);
42674-	ssize_t next_range_begin = fb_find_impl(fb, nbits, start, val, forward);
42675-	if ((forward && next_range_begin == (ssize_t)nbits)
42676-	    || (!forward && next_range_begin == (ssize_t)-1)) {
42677-		return false;
42678-	}
42679-	/* Half open range; the set bits are [begin, end). */
42680-	ssize_t next_range_end = fb_find_impl(fb, nbits, next_range_begin, !val,
42681-	    forward);
42682-	if (forward) {
42683-		*r_begin = next_range_begin;
42684-		*r_len = next_range_end - next_range_begin;
42685-	} else {
42686-		*r_begin = next_range_end + 1;
42687-		*r_len = next_range_begin - next_range_end;
42688-	}
42689-	return true;
42690-}
42691-
42692-/*
42693- * Used to iterate through ranges of set bits.
42694- *
42695- * Tries to find the next contiguous sequence of set bits with a first index >=
42696- * start.  If one exists, puts the earliest bit of the range in *r_begin, its
42697- * length in *r_len, and returns true.  Otherwise, returns false (without
42698- * touching *r_begin or *r_end).
42699- */
42700-static inline bool
42701-fb_srange_iter(fb_group_t *fb, size_t nbits, size_t start, size_t *r_begin,
42702-    size_t *r_len) {
42703-	return fb_iter_range_impl(fb, nbits, start, r_begin, r_len,
42704-	    /* val */ true, /* forward */ true);
42705-}
42706-
42707-/*
42708- * The same as fb_srange_iter, but searches backwards from start rather than
42709- * forwards.  (The position returned is still the earliest bit in the range).
42710- */
42711-static inline bool
42712-fb_srange_riter(fb_group_t *fb, size_t nbits, size_t start, size_t *r_begin,
42713-    size_t *r_len) {
42714-	return fb_iter_range_impl(fb, nbits, start, r_begin, r_len,
42715-	    /* val */ true, /* forward */ false);
42716-}
42717-
42718-/* Similar to fb_srange_iter, but searches for unset bits. */
42719-static inline bool
42720-fb_urange_iter(fb_group_t *fb, size_t nbits, size_t start, size_t *r_begin,
42721-    size_t *r_len) {
42722-	return fb_iter_range_impl(fb, nbits, start, r_begin, r_len,
42723-	    /* val */ false, /* forward */ true);
42724-}
42725-
42726-/* Similar to fb_srange_riter, but searches for unset bits. */
42727-static inline bool
42728-fb_urange_riter(fb_group_t *fb, size_t nbits, size_t start, size_t *r_begin,
42729-    size_t *r_len) {
42730-	return fb_iter_range_impl(fb, nbits, start, r_begin, r_len,
42731-	    /* val */ false, /* forward */ false);
42732-}
42733-
42734-JEMALLOC_ALWAYS_INLINE size_t
42735-fb_range_longest_impl(fb_group_t *fb, size_t nbits, bool val) {
42736-	size_t begin = 0;
42737-	size_t longest_len = 0;
42738-	size_t len = 0;
42739-	while (begin < nbits && fb_iter_range_impl(fb, nbits, begin, &begin,
42740-	    &len, val, /* forward */ true)) {
42741-		if (len > longest_len) {
42742-			longest_len = len;
42743-		}
42744-		begin += len;
42745-	}
42746-	return longest_len;
42747-}
42748-
42749-static inline size_t
42750-fb_srange_longest(fb_group_t *fb, size_t nbits) {
42751-	return fb_range_longest_impl(fb, nbits, /* val */ true);
42752-}
42753-
42754-static inline size_t
42755-fb_urange_longest(fb_group_t *fb, size_t nbits) {
42756-	return fb_range_longest_impl(fb, nbits, /* val */ false);
42757-}
42758-
42759-/*
42760- * Initializes each bit of dst with the bitwise-AND of the corresponding bits of
42761- * src1 and src2.  All bitmaps must be the same size.
42762- */
42763-static inline void
42764-fb_bit_and(fb_group_t *dst, fb_group_t *src1, fb_group_t *src2, size_t nbits) {
42765-	size_t ngroups = FB_NGROUPS(nbits);
42766-	for (size_t i = 0; i < ngroups; i++) {
42767-		dst[i] = src1[i] & src2[i];
42768-	}
42769-}
42770-
42771-/* Like fb_bit_and, but with bitwise-OR. */
42772-static inline void
42773-fb_bit_or(fb_group_t *dst, fb_group_t *src1, fb_group_t *src2, size_t nbits) {
42774-	size_t ngroups = FB_NGROUPS(nbits);
42775-	for (size_t i = 0; i < ngroups; i++) {
42776-		dst[i] = src1[i] | src2[i];
42777-	}
42778-}
42779-
42780-/* Initializes dst bit i to the negation of source bit i. */
42781-static inline void
42782-fb_bit_not(fb_group_t *dst, fb_group_t *src, size_t nbits) {
42783-	size_t ngroups = FB_NGROUPS(nbits);
42784-	for (size_t i = 0; i < ngroups; i++) {
42785-		dst[i] = ~src[i];
42786-	}
42787-}
42788-
42789-#endif /* JEMALLOC_INTERNAL_FB_H */
42790diff --git a/jemalloc/include/jemalloc/internal/fxp.h b/jemalloc/include/jemalloc/internal/fxp.h
42791deleted file mode 100644
42792index 415a982..0000000
42793--- a/jemalloc/include/jemalloc/internal/fxp.h
42794+++ /dev/null
42795@@ -1,126 +0,0 @@
42796-#ifndef JEMALLOC_INTERNAL_FXP_H
42797-#define JEMALLOC_INTERNAL_FXP_H
42798-
42799-/*
42800- * A simple fixed-point math implementation, supporting only unsigned values
42801- * (with overflow being an error).
42802- *
42803- * It's not in general safe to use floating point in core code, because various
42804- * libc implementations we get linked against can assume that malloc won't touch
42805- * floating point state and call it with an unusual calling convention.
42806- */
42807-
42808-/*
42809- * High 16 bits are the integer part, low 16 are the fractional part.  Or
42810- * equivalently, repr == 2**16 * val, where we use "val" to refer to the
42811- * (imaginary) fractional representation of the true value.
42812- *
42813- * We pick a uint32_t here since it's convenient in some places to
42814- * double the representation size (i.e. multiplication and division use
42815- * 64-bit integer types), and a uint64_t is the largest type we're
42816- * certain is available.
42817- */
42818-typedef uint32_t fxp_t;
42819-#define FXP_INIT_INT(x) ((x) << 16)
42820-#define FXP_INIT_PERCENT(pct) (((pct) << 16) / 100)
42821-
42822-/*
42823- * Amount of precision used in parsing and printing numbers.  The integer bound
42824- * is simply because the integer part of the number gets 16 bits, and so is
42825- * bounded by 65536.
42826- *
42827- * We use a lot of precision for the fractional part, even though most of it
42828- * gets rounded off; this lets us get exact values for the important special
42829- * case where the denominator is a small power of 2 (for instance,
42830- * 1/512 == 0.001953125 is exactly representable even with only 16 bits of
42831- * fractional precision).  We need to left-shift by 16 before dividing by
42832- * 10**precision, so we pick precision to be floor(log(2**48)) = 14.
42833- */
42834-#define FXP_INTEGER_PART_DIGITS 5
42835-#define FXP_FRACTIONAL_PART_DIGITS 14
42836-
42837-/*
42838- * In addition to the integer and fractional parts of the number, we need to
42839- * include a null character and (possibly) a decimal point.
42840- */
42841-#define FXP_BUF_SIZE (FXP_INTEGER_PART_DIGITS + FXP_FRACTIONAL_PART_DIGITS + 2)
42842-
42843-static inline fxp_t
42844-fxp_add(fxp_t a, fxp_t b) {
42845-	return a + b;
42846-}
42847-
42848-static inline fxp_t
42849-fxp_sub(fxp_t a, fxp_t b) {
42850-	assert(a >= b);
42851-	return a - b;
42852-}
42853-
42854-static inline fxp_t
42855-fxp_mul(fxp_t a, fxp_t b) {
42856-	uint64_t unshifted = (uint64_t)a * (uint64_t)b;
42857-	/*
42858-	 * Unshifted is (a.val * 2**16) * (b.val * 2**16)
42859-	 *   == (a.val * b.val) * 2**32, but we want
42860-	 * (a.val * b.val) * 2 ** 16.
42861-	 */
42862-	return (uint32_t)(unshifted >> 16);
42863-}
42864-
42865-static inline fxp_t
42866-fxp_div(fxp_t a, fxp_t b) {
42867-	assert(b != 0);
42868-	uint64_t unshifted = ((uint64_t)a << 32) / (uint64_t)b;
42869-	/*
42870-	 * Unshifted is (a.val * 2**16) * (2**32) / (b.val * 2**16)
42871-	 *   == (a.val / b.val) * (2 ** 32), which again corresponds to a right
42872-	 *   shift of 16.
42873-	 */
42874-	return (uint32_t)(unshifted >> 16);
42875-}
42876-
42877-static inline uint32_t
42878-fxp_round_down(fxp_t a) {
42879-	return a >> 16;
42880-}
42881-
42882-static inline uint32_t
42883-fxp_round_nearest(fxp_t a) {
42884-	uint32_t fractional_part = (a  & ((1U << 16) - 1));
42885-	uint32_t increment = (uint32_t)(fractional_part >= (1U << 15));
42886-	return (a >> 16) + increment;
42887-}
42888-
42889-/*
42890- * Approximately computes x * frac, without the size limitations that would be
42891- * imposed by converting u to an fxp_t.
42892- */
42893-static inline size_t
42894-fxp_mul_frac(size_t x_orig, fxp_t frac) {
42895-	assert(frac <= (1U << 16));
42896-	/*
42897-	 * Work around an over-enthusiastic warning about type limits below (on
42898-	 * 32-bit platforms, a size_t is always less than 1ULL << 48).
42899-	 */
42900-	uint64_t x = (uint64_t)x_orig;
42901-	/*
42902-	 * If we can guarantee no overflow, multiply first before shifting, to
42903-	 * preserve some precision.  Otherwise, shift first and then multiply.
42904-	 * In the latter case, we only lose the low 16 bits of a 48-bit number,
42905-	 * so we're still accurate to within 1/2**32.
42906-	 */
42907-	if (x < (1ULL << 48)) {
42908-		return (size_t)((x * frac) >> 16);
42909-	} else {
42910-		return (size_t)((x >> 16) * (uint64_t)frac);
42911-	}
42912-}
42913-
42914-/*
42915- * Returns true on error.  Otherwise, returns false and updates *ptr to point to
42916- * the first character not parsed (because it wasn't a digit).
42917- */
42918-bool fxp_parse(fxp_t *a, const char *ptr, char **end);
42919-void fxp_print(fxp_t a, char buf[FXP_BUF_SIZE]);
42920-
42921-#endif /* JEMALLOC_INTERNAL_FXP_H */
42922diff --git a/jemalloc/include/jemalloc/internal/hash.h b/jemalloc/include/jemalloc/internal/hash.h
42923deleted file mode 100644
42924index 7f94567..0000000
42925--- a/jemalloc/include/jemalloc/internal/hash.h
42926+++ /dev/null
42927@@ -1,320 +0,0 @@
42928-#ifndef JEMALLOC_INTERNAL_HASH_H
42929-#define JEMALLOC_INTERNAL_HASH_H
42930-
42931-#include "jemalloc/internal/assert.h"
42932-
42933-/*
42934- * The following hash function is based on MurmurHash3, placed into the public
42935- * domain by Austin Appleby.  See https://github.com/aappleby/smhasher for
42936- * details.
42937- */
42938-
42939-/******************************************************************************/
42940-/* Internal implementation. */
42941-static inline uint32_t
42942-hash_rotl_32(uint32_t x, int8_t r) {
42943-	return ((x << r) | (x >> (32 - r)));
42944-}
42945-
42946-static inline uint64_t
42947-hash_rotl_64(uint64_t x, int8_t r) {
42948-	return ((x << r) | (x >> (64 - r)));
42949-}
42950-
42951-static inline uint32_t
42952-hash_get_block_32(const uint32_t *p, int i) {
42953-	/* Handle unaligned read. */
42954-	if (unlikely((uintptr_t)p & (sizeof(uint32_t)-1)) != 0) {
42955-		uint32_t ret;
42956-
42957-		memcpy(&ret, (uint8_t *)(p + i), sizeof(uint32_t));
42958-		return ret;
42959-	}
42960-
42961-	return p[i];
42962-}
42963-
42964-static inline uint64_t
42965-hash_get_block_64(const uint64_t *p, int i) {
42966-	/* Handle unaligned read. */
42967-	if (unlikely((uintptr_t)p & (sizeof(uint64_t)-1)) != 0) {
42968-		uint64_t ret;
42969-
42970-		memcpy(&ret, (uint8_t *)(p + i), sizeof(uint64_t));
42971-		return ret;
42972-	}
42973-
42974-	return p[i];
42975-}
42976-
42977-static inline uint32_t
42978-hash_fmix_32(uint32_t h) {
42979-	h ^= h >> 16;
42980-	h *= 0x85ebca6b;
42981-	h ^= h >> 13;
42982-	h *= 0xc2b2ae35;
42983-	h ^= h >> 16;
42984-
42985-	return h;
42986-}
42987-
42988-static inline uint64_t
42989-hash_fmix_64(uint64_t k) {
42990-	k ^= k >> 33;
42991-	k *= KQU(0xff51afd7ed558ccd);
42992-	k ^= k >> 33;
42993-	k *= KQU(0xc4ceb9fe1a85ec53);
42994-	k ^= k >> 33;
42995-
42996-	return k;
42997-}
42998-
42999-static inline uint32_t
43000-hash_x86_32(const void *key, int len, uint32_t seed) {
43001-	const uint8_t *data = (const uint8_t *) key;
43002-	const int nblocks = len / 4;
43003-
43004-	uint32_t h1 = seed;
43005-
43006-	const uint32_t c1 = 0xcc9e2d51;
43007-	const uint32_t c2 = 0x1b873593;
43008-
43009-	/* body */
43010-	{
43011-		const uint32_t *blocks = (const uint32_t *) (data + nblocks*4);
43012-		int i;
43013-
43014-		for (i = -nblocks; i; i++) {
43015-			uint32_t k1 = hash_get_block_32(blocks, i);
43016-
43017-			k1 *= c1;
43018-			k1 = hash_rotl_32(k1, 15);
43019-			k1 *= c2;
43020-
43021-			h1 ^= k1;
43022-			h1 = hash_rotl_32(h1, 13);
43023-			h1 = h1*5 + 0xe6546b64;
43024-		}
43025-	}
43026-
43027-	/* tail */
43028-	{
43029-		const uint8_t *tail = (const uint8_t *) (data + nblocks*4);
43030-
43031-		uint32_t k1 = 0;
43032-
43033-		switch (len & 3) {
43034-		case 3: k1 ^= tail[2] << 16; JEMALLOC_FALLTHROUGH;
43035-		case 2: k1 ^= tail[1] << 8; JEMALLOC_FALLTHROUGH;
43036-		case 1: k1 ^= tail[0]; k1 *= c1; k1 = hash_rotl_32(k1, 15);
43037-			k1 *= c2; h1 ^= k1;
43038-		}
43039-	}
43040-
43041-	/* finalization */
43042-	h1 ^= len;
43043-
43044-	h1 = hash_fmix_32(h1);
43045-
43046-	return h1;
43047-}
43048-
43049-static inline void
43050-hash_x86_128(const void *key, const int len, uint32_t seed,
43051-    uint64_t r_out[2]) {
43052-	const uint8_t * data = (const uint8_t *) key;
43053-	const int nblocks = len / 16;
43054-
43055-	uint32_t h1 = seed;
43056-	uint32_t h2 = seed;
43057-	uint32_t h3 = seed;
43058-	uint32_t h4 = seed;
43059-
43060-	const uint32_t c1 = 0x239b961b;
43061-	const uint32_t c2 = 0xab0e9789;
43062-	const uint32_t c3 = 0x38b34ae5;
43063-	const uint32_t c4 = 0xa1e38b93;
43064-
43065-	/* body */
43066-	{
43067-		const uint32_t *blocks = (const uint32_t *) (data + nblocks*16);
43068-		int i;
43069-
43070-		for (i = -nblocks; i; i++) {
43071-			uint32_t k1 = hash_get_block_32(blocks, i*4 + 0);
43072-			uint32_t k2 = hash_get_block_32(blocks, i*4 + 1);
43073-			uint32_t k3 = hash_get_block_32(blocks, i*4 + 2);
43074-			uint32_t k4 = hash_get_block_32(blocks, i*4 + 3);
43075-
43076-			k1 *= c1; k1 = hash_rotl_32(k1, 15); k1 *= c2; h1 ^= k1;
43077-
43078-			h1 = hash_rotl_32(h1, 19); h1 += h2;
43079-			h1 = h1*5 + 0x561ccd1b;
43080-
43081-			k2 *= c2; k2 = hash_rotl_32(k2, 16); k2 *= c3; h2 ^= k2;
43082-
43083-			h2 = hash_rotl_32(h2, 17); h2 += h3;
43084-			h2 = h2*5 + 0x0bcaa747;
43085-
43086-			k3 *= c3; k3 = hash_rotl_32(k3, 17); k3 *= c4; h3 ^= k3;
43087-
43088-			h3 = hash_rotl_32(h3, 15); h3 += h4;
43089-			h3 = h3*5 + 0x96cd1c35;
43090-
43091-			k4 *= c4; k4 = hash_rotl_32(k4, 18); k4 *= c1; h4 ^= k4;
43092-
43093-			h4 = hash_rotl_32(h4, 13); h4 += h1;
43094-			h4 = h4*5 + 0x32ac3b17;
43095-		}
43096-	}
43097-
43098-	/* tail */
43099-	{
43100-		const uint8_t *tail = (const uint8_t *) (data + nblocks*16);
43101-		uint32_t k1 = 0;
43102-		uint32_t k2 = 0;
43103-		uint32_t k3 = 0;
43104-		uint32_t k4 = 0;
43105-
43106-		switch (len & 15) {
43107-		case 15: k4 ^= tail[14] << 16; JEMALLOC_FALLTHROUGH;
43108-		case 14: k4 ^= tail[13] << 8; JEMALLOC_FALLTHROUGH;
43109-		case 13: k4 ^= tail[12] << 0;
43110-			k4 *= c4; k4 = hash_rotl_32(k4, 18); k4 *= c1; h4 ^= k4;
43111-			JEMALLOC_FALLTHROUGH;
43112-		case 12: k3 ^= (uint32_t) tail[11] << 24; JEMALLOC_FALLTHROUGH;
43113-		case 11: k3 ^= tail[10] << 16; JEMALLOC_FALLTHROUGH;
43114-		case 10: k3 ^= tail[ 9] << 8; JEMALLOC_FALLTHROUGH;
43115-		case  9: k3 ^= tail[ 8] << 0;
43116-			k3 *= c3; k3 = hash_rotl_32(k3, 17); k3 *= c4; h3 ^= k3;
43117-			JEMALLOC_FALLTHROUGH;
43118-		case  8: k2 ^= (uint32_t) tail[ 7] << 24; JEMALLOC_FALLTHROUGH;
43119-		case  7: k2 ^= tail[ 6] << 16; JEMALLOC_FALLTHROUGH;
43120-		case  6: k2 ^= tail[ 5] << 8; JEMALLOC_FALLTHROUGH;
43121-		case  5: k2 ^= tail[ 4] << 0;
43122-			k2 *= c2; k2 = hash_rotl_32(k2, 16); k2 *= c3; h2 ^= k2;
43123-			JEMALLOC_FALLTHROUGH;
43124-		case  4: k1 ^= (uint32_t) tail[ 3] << 24; JEMALLOC_FALLTHROUGH;
43125-		case  3: k1 ^= tail[ 2] << 16; JEMALLOC_FALLTHROUGH;
43126-		case  2: k1 ^= tail[ 1] << 8; JEMALLOC_FALLTHROUGH;
43127-		case  1: k1 ^= tail[ 0] << 0;
43128-			k1 *= c1; k1 = hash_rotl_32(k1, 15); k1 *= c2; h1 ^= k1;
43129-			break;
43130-		}
43131-	}
43132-
43133-	/* finalization */
43134-	h1 ^= len; h2 ^= len; h3 ^= len; h4 ^= len;
43135-
43136-	h1 += h2; h1 += h3; h1 += h4;
43137-	h2 += h1; h3 += h1; h4 += h1;
43138-
43139-	h1 = hash_fmix_32(h1);
43140-	h2 = hash_fmix_32(h2);
43141-	h3 = hash_fmix_32(h3);
43142-	h4 = hash_fmix_32(h4);
43143-
43144-	h1 += h2; h1 += h3; h1 += h4;
43145-	h2 += h1; h3 += h1; h4 += h1;
43146-
43147-	r_out[0] = (((uint64_t) h2) << 32) | h1;
43148-	r_out[1] = (((uint64_t) h4) << 32) | h3;
43149-}
43150-
43151-static inline void
43152-hash_x64_128(const void *key, const int len, const uint32_t seed,
43153-    uint64_t r_out[2]) {
43154-	const uint8_t *data = (const uint8_t *) key;
43155-	const int nblocks = len / 16;
43156-
43157-	uint64_t h1 = seed;
43158-	uint64_t h2 = seed;
43159-
43160-	const uint64_t c1 = KQU(0x87c37b91114253d5);
43161-	const uint64_t c2 = KQU(0x4cf5ad432745937f);
43162-
43163-	/* body */
43164-	{
43165-		const uint64_t *blocks = (const uint64_t *) (data);
43166-		int i;
43167-
43168-		for (i = 0; i < nblocks; i++) {
43169-			uint64_t k1 = hash_get_block_64(blocks, i*2 + 0);
43170-			uint64_t k2 = hash_get_block_64(blocks, i*2 + 1);
43171-
43172-			k1 *= c1; k1 = hash_rotl_64(k1, 31); k1 *= c2; h1 ^= k1;
43173-
43174-			h1 = hash_rotl_64(h1, 27); h1 += h2;
43175-			h1 = h1*5 + 0x52dce729;
43176-
43177-			k2 *= c2; k2 = hash_rotl_64(k2, 33); k2 *= c1; h2 ^= k2;
43178-
43179-			h2 = hash_rotl_64(h2, 31); h2 += h1;
43180-			h2 = h2*5 + 0x38495ab5;
43181-		}
43182-	}
43183-
43184-	/* tail */
43185-	{
43186-		const uint8_t *tail = (const uint8_t*)(data + nblocks*16);
43187-		uint64_t k1 = 0;
43188-		uint64_t k2 = 0;
43189-
43190-		switch (len & 15) {
43191-		case 15: k2 ^= ((uint64_t)(tail[14])) << 48; JEMALLOC_FALLTHROUGH;
43192-		case 14: k2 ^= ((uint64_t)(tail[13])) << 40; JEMALLOC_FALLTHROUGH;
43193-		case 13: k2 ^= ((uint64_t)(tail[12])) << 32; JEMALLOC_FALLTHROUGH;
43194-		case 12: k2 ^= ((uint64_t)(tail[11])) << 24; JEMALLOC_FALLTHROUGH;
43195-		case 11: k2 ^= ((uint64_t)(tail[10])) << 16; JEMALLOC_FALLTHROUGH;
43196-		case 10: k2 ^= ((uint64_t)(tail[ 9])) << 8;  JEMALLOC_FALLTHROUGH;
43197-		case  9: k2 ^= ((uint64_t)(tail[ 8])) << 0;
43198-			k2 *= c2; k2 = hash_rotl_64(k2, 33); k2 *= c1; h2 ^= k2;
43199-			JEMALLOC_FALLTHROUGH;
43200-		case  8: k1 ^= ((uint64_t)(tail[ 7])) << 56; JEMALLOC_FALLTHROUGH;
43201-		case  7: k1 ^= ((uint64_t)(tail[ 6])) << 48; JEMALLOC_FALLTHROUGH;
43202-		case  6: k1 ^= ((uint64_t)(tail[ 5])) << 40; JEMALLOC_FALLTHROUGH;
43203-		case  5: k1 ^= ((uint64_t)(tail[ 4])) << 32; JEMALLOC_FALLTHROUGH;
43204-		case  4: k1 ^= ((uint64_t)(tail[ 3])) << 24; JEMALLOC_FALLTHROUGH;
43205-		case  3: k1 ^= ((uint64_t)(tail[ 2])) << 16; JEMALLOC_FALLTHROUGH;
43206-		case  2: k1 ^= ((uint64_t)(tail[ 1])) << 8;  JEMALLOC_FALLTHROUGH;
43207-		case  1: k1 ^= ((uint64_t)(tail[ 0])) << 0;
43208-			k1 *= c1; k1 = hash_rotl_64(k1, 31); k1 *= c2; h1 ^= k1;
43209-			break;
43210-		}
43211-	}
43212-
43213-	/* finalization */
43214-	h1 ^= len; h2 ^= len;
43215-
43216-	h1 += h2;
43217-	h2 += h1;
43218-
43219-	h1 = hash_fmix_64(h1);
43220-	h2 = hash_fmix_64(h2);
43221-
43222-	h1 += h2;
43223-	h2 += h1;
43224-
43225-	r_out[0] = h1;
43226-	r_out[1] = h2;
43227-}
43228-
43229-/******************************************************************************/
43230-/* API. */
43231-static inline void
43232-hash(const void *key, size_t len, const uint32_t seed, size_t r_hash[2]) {
43233-	assert(len <= INT_MAX); /* Unfortunate implementation limitation. */
43234-
43235-#if (LG_SIZEOF_PTR == 3 && !defined(JEMALLOC_BIG_ENDIAN))
43236-	hash_x64_128(key, (int)len, seed, (uint64_t *)r_hash);
43237-#else
43238-	{
43239-		uint64_t hashes[2];
43240-		hash_x86_128(key, (int)len, seed, hashes);
43241-		r_hash[0] = (size_t)hashes[0];
43242-		r_hash[1] = (size_t)hashes[1];
43243-	}
43244-#endif
43245-}
43246-
43247-#endif /* JEMALLOC_INTERNAL_HASH_H */
43248diff --git a/jemalloc/include/jemalloc/internal/hook.h b/jemalloc/include/jemalloc/internal/hook.h
43249deleted file mode 100644
43250index ee246b1..0000000
43251--- a/jemalloc/include/jemalloc/internal/hook.h
43252+++ /dev/null
43253@@ -1,163 +0,0 @@
43254-#ifndef JEMALLOC_INTERNAL_HOOK_H
43255-#define JEMALLOC_INTERNAL_HOOK_H
43256-
43257-#include "jemalloc/internal/tsd.h"
43258-
43259-/*
43260- * This API is *extremely* experimental, and may get ripped out, changed in API-
43261- * and ABI-incompatible ways, be insufficiently or incorrectly documented, etc.
43262- *
43263- * It allows hooking the stateful parts of the API to see changes as they
43264- * happen.
43265- *
43266- * Allocation hooks are called after the allocation is done, free hooks are
43267- * called before the free is done, and expand hooks are called after the
43268- * allocation is expanded.
43269- *
43270- * For realloc and rallocx, if the expansion happens in place, the expansion
43271- * hook is called.  If it is moved, then the alloc hook is called on the new
43272- * location, and then the free hook is called on the old location (i.e. both
43273- * hooks are invoked in between the alloc and the dalloc).
43274- *
43275- * If we return NULL from OOM, then usize might not be trustworthy.  Calling
43276- * realloc(NULL, size) only calls the alloc hook, and calling realloc(ptr, 0)
43277- * only calls the free hook.  (Calling realloc(NULL, 0) is treated as malloc(0),
43278- * and only calls the alloc hook).
43279- *
43280- * Reentrancy:
43281- *   Reentrancy is guarded against from within the hook implementation.  If you
43282- *   call allocator functions from within a hook, the hooks will not be invoked
43283- *   again.
43284- * Threading:
43285- *   The installation of a hook synchronizes with all its uses.  If you can
43286- *   prove the installation of a hook happens-before a jemalloc entry point,
43287- *   then the hook will get invoked (unless there's a racing removal).
43288- *
43289- *   Hook insertion appears to be atomic at a per-thread level (i.e. if a thread
43290- *   allocates and has the alloc hook invoked, then a subsequent free on the
43291- *   same thread will also have the free hook invoked).
43292- *
43293- *   The *removal* of a hook does *not* block until all threads are done with
43294- *   the hook.  Hook authors have to be resilient to this, and need some
43295- *   out-of-band mechanism for cleaning up any dynamically allocated memory
43296- *   associated with their hook.
43297- * Ordering:
43298- *   Order of hook execution is unspecified, and may be different than insertion
43299- *   order.
43300- */
43301-
43302-#define HOOK_MAX 4
43303-
43304-enum hook_alloc_e {
43305-	hook_alloc_malloc,
43306-	hook_alloc_posix_memalign,
43307-	hook_alloc_aligned_alloc,
43308-	hook_alloc_calloc,
43309-	hook_alloc_memalign,
43310-	hook_alloc_valloc,
43311-	hook_alloc_mallocx,
43312-
43313-	/* The reallocating functions have both alloc and dalloc variants */
43314-	hook_alloc_realloc,
43315-	hook_alloc_rallocx,
43316-};
43317-/*
43318- * We put the enum typedef after the enum, since this file may get included by
43319- * jemalloc_cpp.cpp, and C++ disallows enum forward declarations.
43320- */
43321-typedef enum hook_alloc_e hook_alloc_t;
43322-
43323-enum hook_dalloc_e {
43324-	hook_dalloc_free,
43325-	hook_dalloc_dallocx,
43326-	hook_dalloc_sdallocx,
43327-
43328-	/*
43329-	 * The dalloc halves of reallocation (not called if in-place expansion
43330-	 * happens).
43331-	 */
43332-	hook_dalloc_realloc,
43333-	hook_dalloc_rallocx,
43334-};
43335-typedef enum hook_dalloc_e hook_dalloc_t;
43336-
43337-
43338-enum hook_expand_e {
43339-	hook_expand_realloc,
43340-	hook_expand_rallocx,
43341-	hook_expand_xallocx,
43342-};
43343-typedef enum hook_expand_e hook_expand_t;
43344-
43345-typedef void (*hook_alloc)(
43346-    void *extra, hook_alloc_t type, void *result, uintptr_t result_raw,
43347-    uintptr_t args_raw[3]);
43348-
43349-typedef void (*hook_dalloc)(
43350-    void *extra, hook_dalloc_t type, void *address, uintptr_t args_raw[3]);
43351-
43352-typedef void (*hook_expand)(
43353-    void *extra, hook_expand_t type, void *address, size_t old_usize,
43354-    size_t new_usize, uintptr_t result_raw, uintptr_t args_raw[4]);
43355-
43356-typedef struct hooks_s hooks_t;
43357-struct hooks_s {
43358-	hook_alloc alloc_hook;
43359-	hook_dalloc dalloc_hook;
43360-	hook_expand expand_hook;
43361-	void *extra;
43362-};
43363-
43364-/*
43365- * Begin implementation details; everything above this point might one day live
43366- * in a public API.  Everything below this point never will.
43367- */
43368-
43369-/*
43370- * The realloc pathways haven't gotten any refactoring love in a while, and it's
43371- * fairly difficult to pass information from the entry point to the hooks.  We
43372- * put the informaiton the hooks will need into a struct to encapsulate
43373- * everything.
43374- *
43375- * Much of these pathways are force-inlined, so that the compiler can avoid
43376- * materializing this struct until we hit an extern arena function.  For fairly
43377- * goofy reasons, *many* of the realloc paths hit an extern arena function.
43378- * These paths are cold enough that it doesn't matter; eventually, we should
43379- * rewrite the realloc code to make the expand-in-place and the
43380- * free-then-realloc paths more orthogonal, at which point we don't need to
43381- * spread the hook logic all over the place.
43382- */
43383-typedef struct hook_ralloc_args_s hook_ralloc_args_t;
43384-struct hook_ralloc_args_s {
43385-	/* I.e. as opposed to rallocx. */
43386-	bool is_realloc;
43387-	/*
43388-	 * The expand hook takes 4 arguments, even if only 3 are actually used;
43389-	 * we add an extra one in case the user decides to memcpy without
43390-	 * looking too closely at the hooked function.
43391-	 */
43392-	uintptr_t args[4];
43393-};
43394-
43395-/*
43396- * Returns an opaque handle to be used when removing the hook.  NULL means that
43397- * we couldn't install the hook.
43398- */
43399-bool hook_boot();
43400-
43401-void *hook_install(tsdn_t *tsdn, hooks_t *hooks);
43402-/* Uninstalls the hook with the handle previously returned from hook_install. */
43403-void hook_remove(tsdn_t *tsdn, void *opaque);
43404-
43405-/* Hooks */
43406-
43407-void hook_invoke_alloc(hook_alloc_t type, void *result, uintptr_t result_raw,
43408-    uintptr_t args_raw[3]);
43409-
43410-void hook_invoke_dalloc(hook_dalloc_t type, void *address,
43411-    uintptr_t args_raw[3]);
43412-
43413-void hook_invoke_expand(hook_expand_t type, void *address, size_t old_usize,
43414-    size_t new_usize, uintptr_t result_raw, uintptr_t args_raw[4]);
43415-
43416-#endif /* JEMALLOC_INTERNAL_HOOK_H */
43417diff --git a/jemalloc/include/jemalloc/internal/hpa.h b/jemalloc/include/jemalloc/internal/hpa.h
43418deleted file mode 100644
43419index f356285..0000000
43420--- a/jemalloc/include/jemalloc/internal/hpa.h
43421+++ /dev/null
43422@@ -1,182 +0,0 @@
43423-#ifndef JEMALLOC_INTERNAL_HPA_H
43424-#define JEMALLOC_INTERNAL_HPA_H
43425-
43426-#include "jemalloc/internal/exp_grow.h"
43427-#include "jemalloc/internal/hpa_hooks.h"
43428-#include "jemalloc/internal/hpa_opts.h"
43429-#include "jemalloc/internal/pai.h"
43430-#include "jemalloc/internal/psset.h"
43431-
43432-typedef struct hpa_central_s hpa_central_t;
43433-struct hpa_central_s {
43434-	/*
43435-	 * The mutex guarding most of the operations on the central data
43436-	 * structure.
43437-	 */
43438-	malloc_mutex_t mtx;
43439-	/*
43440-	 * Guards expansion of eden.  We separate this from the regular mutex so
43441-	 * that cheaper operations can still continue while we're doing the OS
43442-	 * call.
43443-	 */
43444-	malloc_mutex_t grow_mtx;
43445-	/*
43446-	 * Either NULL (if empty), or some integer multiple of a
43447-	 * hugepage-aligned number of hugepages.  We carve them off one at a
43448-	 * time to satisfy new pageslab requests.
43449-	 *
43450-	 * Guarded by grow_mtx.
43451-	 */
43452-	void *eden;
43453-	size_t eden_len;
43454-	/* Source for metadata. */
43455-	base_t *base;
43456-	/* Number of grow operations done on this hpa_central_t. */
43457-	uint64_t age_counter;
43458-
43459-	/* The HPA hooks. */
43460-	hpa_hooks_t hooks;
43461-};
43462-
43463-typedef struct hpa_shard_nonderived_stats_s hpa_shard_nonderived_stats_t;
43464-struct hpa_shard_nonderived_stats_s {
43465-	/*
43466-	 * The number of times we've purged within a hugepage.
43467-	 *
43468-	 * Guarded by mtx.
43469-	 */
43470-	uint64_t npurge_passes;
43471-	/*
43472-	 * The number of individual purge calls we perform (which should always
43473-	 * be bigger than npurge_passes, since each pass purges at least one
43474-	 * extent within a hugepage.
43475-	 *
43476-	 * Guarded by mtx.
43477-	 */
43478-	uint64_t npurges;
43479-
43480-	/*
43481-	 * The number of times we've hugified a pageslab.
43482-	 *
43483-	 * Guarded by mtx.
43484-	 */
43485-	uint64_t nhugifies;
43486-	/*
43487-	 * The number of times we've dehugified a pageslab.
43488-	 *
43489-	 * Guarded by mtx.
43490-	 */
43491-	uint64_t ndehugifies;
43492-};
43493-
43494-/* Completely derived; only used by CTL. */
43495-typedef struct hpa_shard_stats_s hpa_shard_stats_t;
43496-struct hpa_shard_stats_s {
43497-	psset_stats_t psset_stats;
43498-	hpa_shard_nonderived_stats_t nonderived_stats;
43499-};
43500-
43501-typedef struct hpa_shard_s hpa_shard_t;
43502-struct hpa_shard_s {
43503-	/*
43504-	 * pai must be the first member; we cast from a pointer to it to a
43505-	 * pointer to the hpa_shard_t.
43506-	 */
43507-	pai_t pai;
43508-
43509-	/* The central allocator we get our hugepages from. */
43510-	hpa_central_t *central;
43511-	/* Protects most of this shard's state. */
43512-	malloc_mutex_t mtx;
43513-	/*
43514-	 * Guards the shard's access to the central allocator (preventing
43515-	 * multiple threads operating on this shard from accessing the central
43516-	 * allocator).
43517-	 */
43518-	malloc_mutex_t grow_mtx;
43519-	/* The base metadata allocator. */
43520-	base_t *base;
43521-
43522-	/*
43523-	 * This edata cache is the one we use when allocating a small extent
43524-	 * from a pageslab.  The pageslab itself comes from the centralized
43525-	 * allocator, and so will use its edata_cache.
43526-	 */
43527-	edata_cache_fast_t ecf;
43528-
43529-	psset_t psset;
43530-
43531-	/*
43532-	 * How many grow operations have occurred.
43533-	 *
43534-	 * Guarded by grow_mtx.
43535-	 */
43536-	uint64_t age_counter;
43537-
43538-	/* The arena ind we're associated with. */
43539-	unsigned ind;
43540-
43541-	/*
43542-	 * Our emap.  This is just a cache of the emap pointer in the associated
43543-	 * hpa_central.
43544-	 */
43545-	emap_t *emap;
43546-
43547-	/* The configuration choices for this hpa shard. */
43548-	hpa_shard_opts_t opts;
43549-
43550-	/*
43551-	 * How many pages have we started but not yet finished purging in this
43552-	 * hpa shard.
43553-	 */
43554-	size_t npending_purge;
43555-
43556-	/*
43557-	 * Those stats which are copied directly into the CTL-centric hpa shard
43558-	 * stats.
43559-	 */
43560-	hpa_shard_nonderived_stats_t stats;
43561-
43562-	/*
43563-	 * Last time we performed purge on this shard.
43564-	 */
43565-	nstime_t last_purge;
43566-};
43567-
43568-/*
43569- * Whether or not the HPA can be used given the current configuration.  This is
43570- * is not necessarily a guarantee that it backs its allocations by hugepages,
43571- * just that it can function properly given the system it's running on.
43572- */
43573-bool hpa_supported();
43574-bool hpa_central_init(hpa_central_t *central, base_t *base, const hpa_hooks_t *hooks);
43575-bool hpa_shard_init(hpa_shard_t *shard, hpa_central_t *central, emap_t *emap,
43576-    base_t *base, edata_cache_t *edata_cache, unsigned ind,
43577-    const hpa_shard_opts_t *opts);
43578-
43579-void hpa_shard_stats_accum(hpa_shard_stats_t *dst, hpa_shard_stats_t *src);
43580-void hpa_shard_stats_merge(tsdn_t *tsdn, hpa_shard_t *shard,
43581-    hpa_shard_stats_t *dst);
43582-
43583-/*
43584- * Notify the shard that we won't use it for allocations much longer.  Due to
43585- * the possibility of races, we don't actually prevent allocations; just flush
43586- * and disable the embedded edata_cache_small.
43587- */
43588-void hpa_shard_disable(tsdn_t *tsdn, hpa_shard_t *shard);
43589-void hpa_shard_destroy(tsdn_t *tsdn, hpa_shard_t *shard);
43590-
43591-void hpa_shard_set_deferral_allowed(tsdn_t *tsdn, hpa_shard_t *shard,
43592-    bool deferral_allowed);
43593-void hpa_shard_do_deferred_work(tsdn_t *tsdn, hpa_shard_t *shard);
43594-
43595-/*
43596- * We share the fork ordering with the PA and arena prefork handling; that's why
43597- * these are 3 and 4 rather than 0 and 1.
43598- */
43599-void hpa_shard_prefork3(tsdn_t *tsdn, hpa_shard_t *shard);
43600-void hpa_shard_prefork4(tsdn_t *tsdn, hpa_shard_t *shard);
43601-void hpa_shard_postfork_parent(tsdn_t *tsdn, hpa_shard_t *shard);
43602-void hpa_shard_postfork_child(tsdn_t *tsdn, hpa_shard_t *shard);
43603-
43604-#endif /* JEMALLOC_INTERNAL_HPA_H */
43605diff --git a/jemalloc/include/jemalloc/internal/hpa_hooks.h b/jemalloc/include/jemalloc/internal/hpa_hooks.h
43606deleted file mode 100644
43607index 4ea221c..0000000
43608--- a/jemalloc/include/jemalloc/internal/hpa_hooks.h
43609+++ /dev/null
43610@@ -1,17 +0,0 @@
43611-#ifndef JEMALLOC_INTERNAL_HPA_HOOKS_H
43612-#define JEMALLOC_INTERNAL_HPA_HOOKS_H
43613-
43614-typedef struct hpa_hooks_s hpa_hooks_t;
43615-struct hpa_hooks_s {
43616-	void *(*map)(size_t size);
43617-	void (*unmap)(void *ptr, size_t size);
43618-	void (*purge)(void *ptr, size_t size);
43619-	void (*hugify)(void *ptr, size_t size);
43620-	void (*dehugify)(void *ptr, size_t size);
43621-	void (*curtime)(nstime_t *r_time, bool first_reading);
43622-	uint64_t (*ms_since)(nstime_t *r_time);
43623-};
43624-
43625-extern hpa_hooks_t hpa_hooks_default;
43626-
43627-#endif /* JEMALLOC_INTERNAL_HPA_HOOKS_H */
43628diff --git a/jemalloc/include/jemalloc/internal/hpa_opts.h b/jemalloc/include/jemalloc/internal/hpa_opts.h
43629deleted file mode 100644
43630index ee84fea..0000000
43631--- a/jemalloc/include/jemalloc/internal/hpa_opts.h
43632+++ /dev/null
43633@@ -1,74 +0,0 @@
43634-#ifndef JEMALLOC_INTERNAL_HPA_OPTS_H
43635-#define JEMALLOC_INTERNAL_HPA_OPTS_H
43636-
43637-#include "jemalloc/internal/fxp.h"
43638-
43639-/*
43640- * This file is morally part of hpa.h, but is split out for header-ordering
43641- * reasons.
43642- */
43643-
43644-typedef struct hpa_shard_opts_s hpa_shard_opts_t;
43645-struct hpa_shard_opts_s {
43646-	/*
43647-	 * The largest size we'll allocate out of the shard.  For those
43648-	 * allocations refused, the caller (in practice, the PA module) will
43649-	 * fall back to the more general (for now) PAC, which can always handle
43650-	 * any allocation request.
43651-	 */
43652-	size_t slab_max_alloc;
43653-
43654-	/*
43655-	 * When the number of active bytes in a hugepage is >=
43656-	 * hugification_threshold, we force hugify it.
43657-	 */
43658-	size_t hugification_threshold;
43659-
43660-	/*
43661-	 * The HPA purges whenever the number of pages exceeds dirty_mult *
43662-	 * active_pages.  This may be set to (fxp_t)-1 to disable purging.
43663-	 */
43664-	fxp_t dirty_mult;
43665-
43666-	/*
43667-	 * Whether or not the PAI methods are allowed to defer work to a
43668-	 * subsequent hpa_shard_do_deferred_work() call.  Practically, this
43669-	 * corresponds to background threads being enabled.  We track this
43670-	 * ourselves for encapsulation purposes.
43671-	 */
43672-	bool deferral_allowed;
43673-
43674-	/*
43675-	 * How long a hugepage has to be a hugification candidate before it will
43676-	 * actually get hugified.
43677-	 */
43678-	uint64_t hugify_delay_ms;
43679-
43680-	/*
43681-	 * Minimum amount of time between purges.
43682-	 */
43683-	uint64_t min_purge_interval_ms;
43684-};
43685-
43686-#define HPA_SHARD_OPTS_DEFAULT {					\
43687-	/* slab_max_alloc */						\
43688-	64 * 1024,							\
43689-	/* hugification_threshold */					\
43690-	HUGEPAGE * 95 / 100,						\
43691-	/* dirty_mult */						\
43692-	FXP_INIT_PERCENT(25),						\
43693-	/*								\
43694-	 * deferral_allowed						\
43695-	 * 								\
43696-	 * Really, this is always set by the arena during creation	\
43697-	 * or by an hpa_shard_set_deferral_allowed call, so the value	\
43698-	 * we put here doesn't matter.					\
43699-	 */								\
43700-	false,								\
43701-	/* hugify_delay_ms */						\
43702-	10 * 1000,							\
43703-	/* min_purge_interval_ms */					\
43704-	5 * 1000							\
43705-}
43706-
43707-#endif /* JEMALLOC_INTERNAL_HPA_OPTS_H */
43708diff --git a/jemalloc/include/jemalloc/internal/hpdata.h b/jemalloc/include/jemalloc/internal/hpdata.h
43709deleted file mode 100644
43710index 1fb534d..0000000
43711--- a/jemalloc/include/jemalloc/internal/hpdata.h
43712+++ /dev/null
43713@@ -1,413 +0,0 @@
43714-#ifndef JEMALLOC_INTERNAL_HPDATA_H
43715-#define JEMALLOC_INTERNAL_HPDATA_H
43716-
43717-#include "jemalloc/internal/fb.h"
43718-#include "jemalloc/internal/ph.h"
43719-#include "jemalloc/internal/ql.h"
43720-#include "jemalloc/internal/typed_list.h"
43721-
43722-/*
43723- * The metadata representation we use for extents in hugepages.  While the PAC
43724- * uses the edata_t to represent both active and inactive extents, the HP only
43725- * uses the edata_t for active ones; instead, inactive extent state is tracked
43726- * within hpdata associated with the enclosing hugepage-sized, hugepage-aligned
43727- * region of virtual address space.
43728- *
43729- * An hpdata need not be "truly" backed by a hugepage (which is not necessarily
43730- * an observable property of any given region of address space).  It's just
43731- * hugepage-sized and hugepage-aligned; it's *potentially* huge.
43732- */
43733-typedef struct hpdata_s hpdata_t;
43734-ph_structs(hpdata_age_heap, hpdata_t);
43735-struct hpdata_s {
43736-	/*
43737-	 * We likewise follow the edata convention of mangling names and forcing
43738-	 * the use of accessors -- this lets us add some consistency checks on
43739-	 * access.
43740-	 */
43741-
43742-	/*
43743-	 * The address of the hugepage in question.  This can't be named h_addr,
43744-	 * since that conflicts with a macro defined in Windows headers.
43745-	 */
43746-	void *h_address;
43747-	/* Its age (measured in psset operations). */
43748-	uint64_t h_age;
43749-	/* Whether or not we think the hugepage is mapped that way by the OS. */
43750-	bool h_huge;
43751-
43752-	/*
43753-	 * For some properties, we keep parallel sets of bools; h_foo_allowed
43754-	 * and h_in_psset_foo_container.  This is a decoupling mechanism to
43755-	 * avoid bothering the hpa (which manages policies) from the psset
43756-	 * (which is the mechanism used to enforce those policies).  This allows
43757-	 * all the container management logic to live in one place, without the
43758-	 * HPA needing to know or care how that happens.
43759-	 */
43760-
43761-	/*
43762-	 * Whether or not the hpdata is allowed to be used to serve allocations,
43763-	 * and whether or not the psset is currently tracking it as such.
43764-	 */
43765-	bool h_alloc_allowed;
43766-	bool h_in_psset_alloc_container;
43767-
43768-	/*
43769-	 * The same, but with purging.  There's no corresponding
43770-	 * h_in_psset_purge_container, because the psset (currently) always
43771-	 * removes hpdatas from their containers during updates (to implement
43772-	 * LRU for purging).
43773-	 */
43774-	bool h_purge_allowed;
43775-
43776-	/* And with hugifying. */
43777-	bool h_hugify_allowed;
43778-	/* When we became a hugification candidate. */
43779-	nstime_t h_time_hugify_allowed;
43780-	bool h_in_psset_hugify_container;
43781-
43782-	/* Whether or not a purge or hugify is currently happening. */
43783-	bool h_mid_purge;
43784-	bool h_mid_hugify;
43785-
43786-	/*
43787-	 * Whether or not the hpdata is being updated in the psset (i.e. if
43788-	 * there has been a psset_update_begin call issued without a matching
43789-	 * psset_update_end call).  Eventually this will expand to other types
43790-	 * of updates.
43791-	 */
43792-	bool h_updating;
43793-
43794-	/* Whether or not the hpdata is in a psset. */
43795-	bool h_in_psset;
43796-
43797-	union {
43798-		/* When nonempty (and also nonfull), used by the psset bins. */
43799-		hpdata_age_heap_link_t age_link;
43800-		/*
43801-		 * When empty (or not corresponding to any hugepage), list
43802-		 * linkage.
43803-		 */
43804-		ql_elm(hpdata_t) ql_link_empty;
43805-	};
43806-
43807-	/*
43808-	 * Linkage for the psset to track candidates for purging and hugifying.
43809-	 */
43810-	ql_elm(hpdata_t) ql_link_purge;
43811-	ql_elm(hpdata_t) ql_link_hugify;
43812-
43813-	/* The length of the largest contiguous sequence of inactive pages. */
43814-	size_t h_longest_free_range;
43815-
43816-	/* Number of active pages. */
43817-	size_t h_nactive;
43818-
43819-	/* A bitmap with bits set in the active pages. */
43820-	fb_group_t active_pages[FB_NGROUPS(HUGEPAGE_PAGES)];
43821-
43822-	/*
43823-	 * Number of dirty or active pages, and a bitmap tracking them.  One
43824-	 * way to think of this is as which pages are dirty from the OS's
43825-	 * perspective.
43826-	 */
43827-	size_t h_ntouched;
43828-
43829-	/* The touched pages (using the same definition as above). */
43830-	fb_group_t touched_pages[FB_NGROUPS(HUGEPAGE_PAGES)];
43831-};
43832-
43833-TYPED_LIST(hpdata_empty_list, hpdata_t, ql_link_empty)
43834-TYPED_LIST(hpdata_purge_list, hpdata_t, ql_link_purge)
43835-TYPED_LIST(hpdata_hugify_list, hpdata_t, ql_link_hugify)
43836-
43837-ph_proto(, hpdata_age_heap, hpdata_t);
43838-
43839-static inline void *
43840-hpdata_addr_get(const hpdata_t *hpdata) {
43841-	return hpdata->h_address;
43842-}
43843-
43844-static inline void
43845-hpdata_addr_set(hpdata_t *hpdata, void *addr) {
43846-	assert(HUGEPAGE_ADDR2BASE(addr) == addr);
43847-	hpdata->h_address = addr;
43848-}
43849-
43850-static inline uint64_t
43851-hpdata_age_get(const hpdata_t *hpdata) {
43852-	return hpdata->h_age;
43853-}
43854-
43855-static inline void
43856-hpdata_age_set(hpdata_t *hpdata, uint64_t age) {
43857-	hpdata->h_age = age;
43858-}
43859-
43860-static inline bool
43861-hpdata_huge_get(const hpdata_t *hpdata) {
43862-	return hpdata->h_huge;
43863-}
43864-
43865-static inline bool
43866-hpdata_alloc_allowed_get(const hpdata_t *hpdata) {
43867-	return hpdata->h_alloc_allowed;
43868-}
43869-
43870-static inline void
43871-hpdata_alloc_allowed_set(hpdata_t *hpdata, bool alloc_allowed) {
43872-	hpdata->h_alloc_allowed = alloc_allowed;
43873-}
43874-
43875-static inline bool
43876-hpdata_in_psset_alloc_container_get(const hpdata_t *hpdata) {
43877-	return hpdata->h_in_psset_alloc_container;
43878-}
43879-
43880-static inline void
43881-hpdata_in_psset_alloc_container_set(hpdata_t *hpdata, bool in_container) {
43882-	assert(in_container != hpdata->h_in_psset_alloc_container);
43883-	hpdata->h_in_psset_alloc_container = in_container;
43884-}
43885-
43886-static inline bool
43887-hpdata_purge_allowed_get(const hpdata_t *hpdata) {
43888-	return hpdata->h_purge_allowed;
43889-}
43890-
43891-static inline void
43892-hpdata_purge_allowed_set(hpdata_t *hpdata, bool purge_allowed) {
43893-       assert(purge_allowed == false || !hpdata->h_mid_purge);
43894-       hpdata->h_purge_allowed = purge_allowed;
43895-}
43896-
43897-static inline bool
43898-hpdata_hugify_allowed_get(const hpdata_t *hpdata) {
43899-	return hpdata->h_hugify_allowed;
43900-}
43901-
43902-static inline void
43903-hpdata_allow_hugify(hpdata_t *hpdata, nstime_t now) {
43904-	assert(!hpdata->h_mid_hugify);
43905-	hpdata->h_hugify_allowed = true;
43906-	hpdata->h_time_hugify_allowed = now;
43907-}
43908-
43909-static inline nstime_t
43910-hpdata_time_hugify_allowed(hpdata_t *hpdata) {
43911-	return hpdata->h_time_hugify_allowed;
43912-}
43913-
43914-static inline void
43915-hpdata_disallow_hugify(hpdata_t *hpdata) {
43916-	hpdata->h_hugify_allowed = false;
43917-}
43918-
43919-static inline bool
43920-hpdata_in_psset_hugify_container_get(const hpdata_t *hpdata) {
43921-	return hpdata->h_in_psset_hugify_container;
43922-}
43923-
43924-static inline void
43925-hpdata_in_psset_hugify_container_set(hpdata_t *hpdata, bool in_container) {
43926-	assert(in_container != hpdata->h_in_psset_hugify_container);
43927-	hpdata->h_in_psset_hugify_container = in_container;
43928-}
43929-
43930-static inline bool
43931-hpdata_mid_purge_get(const hpdata_t *hpdata) {
43932-	return hpdata->h_mid_purge;
43933-}
43934-
43935-static inline void
43936-hpdata_mid_purge_set(hpdata_t *hpdata, bool mid_purge) {
43937-	assert(mid_purge != hpdata->h_mid_purge);
43938-	hpdata->h_mid_purge = mid_purge;
43939-}
43940-
43941-static inline bool
43942-hpdata_mid_hugify_get(const hpdata_t *hpdata) {
43943-	return hpdata->h_mid_hugify;
43944-}
43945-
43946-static inline void
43947-hpdata_mid_hugify_set(hpdata_t *hpdata, bool mid_hugify) {
43948-	assert(mid_hugify != hpdata->h_mid_hugify);
43949-	hpdata->h_mid_hugify = mid_hugify;
43950-}
43951-
43952-static inline bool
43953-hpdata_changing_state_get(const hpdata_t *hpdata) {
43954-	return hpdata->h_mid_purge || hpdata->h_mid_hugify;
43955-}
43956-
43957-
43958-static inline bool
43959-hpdata_updating_get(const hpdata_t *hpdata) {
43960-	return hpdata->h_updating;
43961-}
43962-
43963-static inline void
43964-hpdata_updating_set(hpdata_t *hpdata, bool updating) {
43965-	assert(updating != hpdata->h_updating);
43966-	hpdata->h_updating = updating;
43967-}
43968-
43969-static inline bool
43970-hpdata_in_psset_get(const hpdata_t *hpdata) {
43971-	return hpdata->h_in_psset;
43972-}
43973-
43974-static inline void
43975-hpdata_in_psset_set(hpdata_t *hpdata, bool in_psset) {
43976-	assert(in_psset != hpdata->h_in_psset);
43977-	hpdata->h_in_psset = in_psset;
43978-}
43979-
43980-static inline size_t
43981-hpdata_longest_free_range_get(const hpdata_t *hpdata) {
43982-	return hpdata->h_longest_free_range;
43983-}
43984-
43985-static inline void
43986-hpdata_longest_free_range_set(hpdata_t *hpdata, size_t longest_free_range) {
43987-	assert(longest_free_range <= HUGEPAGE_PAGES);
43988-	hpdata->h_longest_free_range = longest_free_range;
43989-}
43990-
43991-static inline size_t
43992-hpdata_nactive_get(hpdata_t *hpdata) {
43993-	return hpdata->h_nactive;
43994-}
43995-
43996-static inline size_t
43997-hpdata_ntouched_get(hpdata_t *hpdata) {
43998-	return hpdata->h_ntouched;
43999-}
44000-
44001-static inline size_t
44002-hpdata_ndirty_get(hpdata_t *hpdata) {
44003-	return hpdata->h_ntouched - hpdata->h_nactive;
44004-}
44005-
44006-static inline size_t
44007-hpdata_nretained_get(hpdata_t *hpdata) {
44008-	return HUGEPAGE_PAGES - hpdata->h_ntouched;
44009-}
44010-
44011-static inline void
44012-hpdata_assert_empty(hpdata_t *hpdata) {
44013-	assert(fb_empty(hpdata->active_pages, HUGEPAGE_PAGES));
44014-	assert(hpdata->h_nactive == 0);
44015-}
44016-
44017-/*
44018- * Only used in tests, and in hpdata_assert_consistent, below.  Verifies some
44019- * consistency properties of the hpdata (e.g. that cached counts of page stats
44020- * match computed ones).
44021- */
44022-static inline bool
44023-hpdata_consistent(hpdata_t *hpdata) {
44024-	if(fb_urange_longest(hpdata->active_pages, HUGEPAGE_PAGES)
44025-	    != hpdata_longest_free_range_get(hpdata)) {
44026-		return false;
44027-	}
44028-	if (fb_scount(hpdata->active_pages, HUGEPAGE_PAGES, 0, HUGEPAGE_PAGES)
44029-	    != hpdata->h_nactive) {
44030-		return false;
44031-	}
44032-	if (fb_scount(hpdata->touched_pages, HUGEPAGE_PAGES, 0, HUGEPAGE_PAGES)
44033-	    != hpdata->h_ntouched) {
44034-		return false;
44035-	}
44036-	if (hpdata->h_ntouched < hpdata->h_nactive) {
44037-		return false;
44038-	}
44039-	if (hpdata->h_huge && hpdata->h_ntouched != HUGEPAGE_PAGES) {
44040-		return false;
44041-	}
44042-	if (hpdata_changing_state_get(hpdata)
44043-	    && ((hpdata->h_purge_allowed) || hpdata->h_hugify_allowed)) {
44044-		return false;
44045-	}
44046-	if (hpdata_hugify_allowed_get(hpdata)
44047-	    != hpdata_in_psset_hugify_container_get(hpdata)) {
44048-		return false;
44049-	}
44050-	return true;
44051-}
44052-
44053-static inline void
44054-hpdata_assert_consistent(hpdata_t *hpdata) {
44055-	assert(hpdata_consistent(hpdata));
44056-}
44057-
44058-static inline bool
44059-hpdata_empty(hpdata_t *hpdata) {
44060-	return hpdata->h_nactive == 0;
44061-}
44062-
44063-static inline bool
44064-hpdata_full(hpdata_t *hpdata) {
44065-	return hpdata->h_nactive == HUGEPAGE_PAGES;
44066-}
44067-
44068-void hpdata_init(hpdata_t *hpdata, void *addr, uint64_t age);
44069-
44070-/*
44071- * Given an hpdata which can serve an allocation request, pick and reserve an
44072- * offset within that allocation.
44073- */
44074-void *hpdata_reserve_alloc(hpdata_t *hpdata, size_t sz);
44075-void hpdata_unreserve(hpdata_t *hpdata, void *begin, size_t sz);
44076-
44077-/*
44078- * The hpdata_purge_prepare_t allows grabbing the metadata required to purge
44079- * subranges of a hugepage while holding a lock, drop the lock during the actual
44080- * purging of them, and reacquire it to update the metadata again.
44081- */
44082-typedef struct hpdata_purge_state_s hpdata_purge_state_t;
44083-struct hpdata_purge_state_s {
44084-	size_t npurged;
44085-	size_t ndirty_to_purge;
44086-	fb_group_t to_purge[FB_NGROUPS(HUGEPAGE_PAGES)];
44087-	size_t next_purge_search_begin;
44088-};
44089-
44090-/*
44091- * Initializes purge state.  The access to hpdata must be externally
44092- * synchronized with other hpdata_* calls.
44093- *
44094- * You can tell whether or not a thread is purging or hugifying a given hpdata
44095- * via hpdata_changing_state_get(hpdata).  Racing hugification or purging
44096- * operations aren't allowed.
44097- *
44098- * Once you begin purging, you have to follow through and call hpdata_purge_next
44099- * until you're done, and then end.  Allocating out of an hpdata undergoing
44100- * purging is not allowed.
44101- *
44102- * Returns the number of dirty pages that will be purged.
44103- */
44104-size_t hpdata_purge_begin(hpdata_t *hpdata, hpdata_purge_state_t *purge_state);
44105-
44106-/*
44107- * If there are more extents to purge, sets *r_purge_addr and *r_purge_size to
44108- * true, and returns true.  Otherwise, returns false to indicate that we're
44109- * done.
44110- *
44111- * This requires exclusive access to the purge state, but *not* to the hpdata.
44112- * In particular, unreserve calls are allowed while purging (i.e. you can dalloc
44113- * into one part of the hpdata while purging a different part).
44114- */
44115-bool hpdata_purge_next(hpdata_t *hpdata, hpdata_purge_state_t *purge_state,
44116-    void **r_purge_addr, size_t *r_purge_size);
44117-/*
44118- * Updates the hpdata metadata after all purging is done.  Needs external
44119- * synchronization.
44120- */
44121-void hpdata_purge_end(hpdata_t *hpdata, hpdata_purge_state_t *purge_state);
44122-
44123-void hpdata_hugify(hpdata_t *hpdata);
44124-void hpdata_dehugify(hpdata_t *hpdata);
44125-
44126-#endif /* JEMALLOC_INTERNAL_HPDATA_H */
44127diff --git a/jemalloc/include/jemalloc/internal/inspect.h b/jemalloc/include/jemalloc/internal/inspect.h
44128deleted file mode 100644
44129index 65fef51..0000000
44130--- a/jemalloc/include/jemalloc/internal/inspect.h
44131+++ /dev/null
44132@@ -1,40 +0,0 @@
44133-#ifndef JEMALLOC_INTERNAL_INSPECT_H
44134-#define JEMALLOC_INTERNAL_INSPECT_H
44135-
44136-/*
44137- * This module contains the heap introspection capabilities.  For now they are
44138- * exposed purely through mallctl APIs in the experimental namespace, but this
44139- * may change over time.
44140- */
44141-
44142-/*
44143- * The following two structs are for experimental purposes. See
44144- * experimental_utilization_query_ctl and
44145- * experimental_utilization_batch_query_ctl in src/ctl.c.
44146- */
44147-typedef struct inspect_extent_util_stats_s inspect_extent_util_stats_t;
44148-struct inspect_extent_util_stats_s {
44149-	size_t nfree;
44150-	size_t nregs;
44151-	size_t size;
44152-};
44153-
44154-typedef struct inspect_extent_util_stats_verbose_s
44155-    inspect_extent_util_stats_verbose_t;
44156-
44157-struct inspect_extent_util_stats_verbose_s {
44158-	void *slabcur_addr;
44159-	size_t nfree;
44160-	size_t nregs;
44161-	size_t size;
44162-	size_t bin_nfree;
44163-	size_t bin_nregs;
44164-};
44165-
44166-void inspect_extent_util_stats_get(tsdn_t *tsdn, const void *ptr,
44167-    size_t *nfree, size_t *nregs, size_t *size);
44168-void inspect_extent_util_stats_verbose_get(tsdn_t *tsdn, const void *ptr,
44169-    size_t *nfree, size_t *nregs, size_t *size,
44170-    size_t *bin_nfree, size_t *bin_nregs, void **slabcur_addr);
44171-
44172-#endif /* JEMALLOC_INTERNAL_INSPECT_H */
44173diff --git a/jemalloc/include/jemalloc/internal/jemalloc_internal_decls.h b/jemalloc/include/jemalloc/internal/jemalloc_internal_decls.h
44174deleted file mode 100644
44175index 983027c..0000000
44176--- a/jemalloc/include/jemalloc/internal/jemalloc_internal_decls.h
44177+++ /dev/null
44178@@ -1,108 +0,0 @@
44179-#ifndef JEMALLOC_INTERNAL_DECLS_H
44180-#define JEMALLOC_INTERNAL_DECLS_H
44181-
44182-#include <math.h>
44183-#ifdef _WIN32
44184-#  include <windows.h>
44185-#  include "msvc_compat/windows_extra.h"
44186-#  include "msvc_compat/strings.h"
44187-#  ifdef _WIN64
44188-#    if LG_VADDR <= 32
44189-#      error Generate the headers using x64 vcargs
44190-#    endif
44191-#  else
44192-#    if LG_VADDR > 32
44193-#      undef LG_VADDR
44194-#      define LG_VADDR 32
44195-#    endif
44196-#  endif
44197-#else
44198-#  include <sys/param.h>
44199-#  include <sys/mman.h>
44200-#  if !defined(__pnacl__) && !defined(__native_client__)
44201-#    include <sys/syscall.h>
44202-#    if !defined(SYS_write) && defined(__NR_write)
44203-#      define SYS_write __NR_write
44204-#    endif
44205-#    if defined(SYS_open) && defined(__aarch64__)
44206-       /* Android headers may define SYS_open to __NR_open even though
44207-        * __NR_open may not exist on AArch64 (superseded by __NR_openat). */
44208-#      undef SYS_open
44209-#    endif
44210-#    include <sys/uio.h>
44211-#  endif
44212-#  include <pthread.h>
44213-#  if defined(__FreeBSD__) || defined(__DragonFly__)
44214-#  include <pthread_np.h>
44215-#  include <sched.h>
44216-#  if defined(__FreeBSD__)
44217-#    define cpu_set_t cpuset_t
44218-#  endif
44219-#  endif
44220-#  include <signal.h>
44221-#  ifdef JEMALLOC_OS_UNFAIR_LOCK
44222-#    include <os/lock.h>
44223-#  endif
44224-#  ifdef JEMALLOC_GLIBC_MALLOC_HOOK
44225-#    include <sched.h>
44226-#  endif
44227-#  include <errno.h>
44228-#  include <sys/time.h>
44229-#  include <time.h>
44230-#  ifdef JEMALLOC_HAVE_MACH_ABSOLUTE_TIME
44231-#    include <mach/mach_time.h>
44232-#  endif
44233-#endif
44234-#include <sys/types.h>
44235-
44236-#include <limits.h>
44237-#ifndef SIZE_T_MAX
44238-#  define SIZE_T_MAX	SIZE_MAX
44239-#endif
44240-#ifndef SSIZE_MAX
44241-#  define SSIZE_MAX	((ssize_t)(SIZE_T_MAX >> 1))
44242-#endif
44243-#include <stdarg.h>
44244-#include <stdbool.h>
44245-#include <stdio.h>
44246-#include <stdlib.h>
44247-#include <stdint.h>
44248-#include <stddef.h>
44249-#ifndef offsetof
44250-#  define offsetof(type, member)	((size_t)&(((type *)NULL)->member))
44251-#endif
44252-#include <string.h>
44253-#include <strings.h>
44254-#include <ctype.h>
44255-#ifdef _MSC_VER
44256-#  include <io.h>
44257-typedef intptr_t ssize_t;
44258-#  define PATH_MAX 1024
44259-#  define STDERR_FILENO 2
44260-#  define __func__ __FUNCTION__
44261-#  ifdef JEMALLOC_HAS_RESTRICT
44262-#    define restrict __restrict
44263-#  endif
44264-/* Disable warnings about deprecated system functions. */
44265-#  pragma warning(disable: 4996)
44266-#if _MSC_VER < 1800
44267-static int
44268-isblank(int c) {
44269-	return (c == '\t' || c == ' ');
44270-}
44271-#endif
44272-#else
44273-#  include <unistd.h>
44274-#endif
44275-#include <fcntl.h>
44276-
44277-/*
44278- * The Win32 midl compiler has #define small char; we don't use midl, but
44279- * "small" is a nice identifier to have available when talking about size
44280- * classes.
44281- */
44282-#ifdef small
44283-#  undef small
44284-#endif
44285-
44286-#endif /* JEMALLOC_INTERNAL_H */
44287diff --git a/jemalloc/include/jemalloc/internal/jemalloc_internal_defs.h.in b/jemalloc/include/jemalloc/internal/jemalloc_internal_defs.h.in
44288deleted file mode 100644
44289index 3588072..0000000
44290--- a/jemalloc/include/jemalloc/internal/jemalloc_internal_defs.h.in
44291+++ /dev/null
44292@@ -1,427 +0,0 @@
44293-#ifndef JEMALLOC_INTERNAL_DEFS_H_
44294-#define JEMALLOC_INTERNAL_DEFS_H_
44295-/*
44296- * If JEMALLOC_PREFIX is defined via --with-jemalloc-prefix, it will cause all
44297- * public APIs to be prefixed.  This makes it possible, with some care, to use
44298- * multiple allocators simultaneously.
44299- */
44300-#undef JEMALLOC_PREFIX
44301-#undef JEMALLOC_CPREFIX
44302-
44303-/*
44304- * Define overrides for non-standard allocator-related functions if they are
44305- * present on the system.
44306- */
44307-#undef JEMALLOC_OVERRIDE___LIBC_CALLOC
44308-#undef JEMALLOC_OVERRIDE___LIBC_FREE
44309-#undef JEMALLOC_OVERRIDE___LIBC_MALLOC
44310-#undef JEMALLOC_OVERRIDE___LIBC_MEMALIGN
44311-#undef JEMALLOC_OVERRIDE___LIBC_REALLOC
44312-#undef JEMALLOC_OVERRIDE___LIBC_VALLOC
44313-#undef JEMALLOC_OVERRIDE___POSIX_MEMALIGN
44314-
44315-/*
44316- * JEMALLOC_PRIVATE_NAMESPACE is used as a prefix for all library-private APIs.
44317- * For shared libraries, symbol visibility mechanisms prevent these symbols
44318- * from being exported, but for static libraries, naming collisions are a real
44319- * possibility.
44320- */
44321-#undef JEMALLOC_PRIVATE_NAMESPACE
44322-
44323-/*
44324- * Hyper-threaded CPUs may need a special instruction inside spin loops in
44325- * order to yield to another virtual CPU.
44326- */
44327-#undef CPU_SPINWAIT
44328-/* 1 if CPU_SPINWAIT is defined, 0 otherwise. */
44329-#undef HAVE_CPU_SPINWAIT
44330-
44331-/*
44332- * Number of significant bits in virtual addresses.  This may be less than the
44333- * total number of bits in a pointer, e.g. on x64, for which the uppermost 16
44334- * bits are the same as bit 47.
44335- */
44336-#undef LG_VADDR
44337-
44338-/* Defined if C11 atomics are available. */
44339-#undef JEMALLOC_C11_ATOMICS
44340-
44341-/* Defined if GCC __atomic atomics are available. */
44342-#undef JEMALLOC_GCC_ATOMIC_ATOMICS
44343-/* and the 8-bit variant support. */
44344-#undef JEMALLOC_GCC_U8_ATOMIC_ATOMICS
44345-
44346-/* Defined if GCC __sync atomics are available. */
44347-#undef JEMALLOC_GCC_SYNC_ATOMICS
44348-/* and the 8-bit variant support. */
44349-#undef JEMALLOC_GCC_U8_SYNC_ATOMICS
44350-
44351-/*
44352- * Defined if __builtin_clz() and __builtin_clzl() are available.
44353- */
44354-#undef JEMALLOC_HAVE_BUILTIN_CLZ
44355-
44356-/*
44357- * Defined if os_unfair_lock_*() functions are available, as provided by Darwin.
44358- */
44359-#undef JEMALLOC_OS_UNFAIR_LOCK
44360-
44361-/* Defined if syscall(2) is usable. */
44362-#undef JEMALLOC_USE_SYSCALL
44363-
44364-/*
44365- * Defined if secure_getenv(3) is available.
44366- */
44367-#undef JEMALLOC_HAVE_SECURE_GETENV
44368-
44369-/*
44370- * Defined if issetugid(2) is available.
44371- */
44372-#undef JEMALLOC_HAVE_ISSETUGID
44373-
44374-/* Defined if pthread_atfork(3) is available. */
44375-#undef JEMALLOC_HAVE_PTHREAD_ATFORK
44376-
44377-/* Defined if pthread_setname_np(3) is available. */
44378-#undef JEMALLOC_HAVE_PTHREAD_SETNAME_NP
44379-
44380-/* Defined if pthread_getname_np(3) is available. */
44381-#undef JEMALLOC_HAVE_PTHREAD_GETNAME_NP
44382-
44383-/* Defined if pthread_get_name_np(3) is available. */
44384-#undef JEMALLOC_HAVE_PTHREAD_GET_NAME_NP
44385-
44386-/*
44387- * Defined if clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is available.
44388- */
44389-#undef JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE
44390-
44391-/*
44392- * Defined if clock_gettime(CLOCK_MONOTONIC, ...) is available.
44393- */
44394-#undef JEMALLOC_HAVE_CLOCK_MONOTONIC
44395-
44396-/*
44397- * Defined if mach_absolute_time() is available.
44398- */
44399-#undef JEMALLOC_HAVE_MACH_ABSOLUTE_TIME
44400-
44401-/*
44402- * Defined if clock_gettime(CLOCK_REALTIME, ...) is available.
44403- */
44404-#undef JEMALLOC_HAVE_CLOCK_REALTIME
44405-
44406-/*
44407- * Defined if _malloc_thread_cleanup() exists.  At least in the case of
44408- * FreeBSD, pthread_key_create() allocates, which if used during malloc
44409- * bootstrapping will cause recursion into the pthreads library.  Therefore, if
44410- * _malloc_thread_cleanup() exists, use it as the basis for thread cleanup in
44411- * malloc_tsd.
44412- */
44413-#undef JEMALLOC_MALLOC_THREAD_CLEANUP
44414-
44415-/*
44416- * Defined if threaded initialization is known to be safe on this platform.
44417- * Among other things, it must be possible to initialize a mutex without
44418- * triggering allocation in order for threaded allocation to be safe.
44419- */
44420-#undef JEMALLOC_THREADED_INIT
44421-
44422-/*
44423- * Defined if the pthreads implementation defines
44424- * _pthread_mutex_init_calloc_cb(), in which case the function is used in order
44425- * to avoid recursive allocation during mutex initialization.
44426- */
44427-#undef JEMALLOC_MUTEX_INIT_CB
44428-
44429-/* Non-empty if the tls_model attribute is supported. */
44430-#undef JEMALLOC_TLS_MODEL
44431-
44432-/*
44433- * JEMALLOC_DEBUG enables assertions and other sanity checks, and disables
44434- * inline functions.
44435- */
44436-#undef JEMALLOC_DEBUG
44437-
44438-/* JEMALLOC_STATS enables statistics calculation. */
44439-#undef JEMALLOC_STATS
44440-
44441-/* JEMALLOC_EXPERIMENTAL_SMALLOCX_API enables experimental smallocx API. */
44442-#undef JEMALLOC_EXPERIMENTAL_SMALLOCX_API
44443-
44444-/* JEMALLOC_PROF enables allocation profiling. */
44445-#undef JEMALLOC_PROF
44446-
44447-/* Use libunwind for profile backtracing if defined. */
44448-#undef JEMALLOC_PROF_LIBUNWIND
44449-
44450-/* Use libgcc for profile backtracing if defined. */
44451-#undef JEMALLOC_PROF_LIBGCC
44452-
44453-/* Use gcc intrinsics for profile backtracing if defined. */
44454-#undef JEMALLOC_PROF_GCC
44455-
44456-/*
44457- * JEMALLOC_DSS enables use of sbrk(2) to allocate extents from the data storage
44458- * segment (DSS).
44459- */
44460-#undef JEMALLOC_DSS
44461-
44462-/* Support memory filling (junk/zero). */
44463-#undef JEMALLOC_FILL
44464-
44465-/* Support utrace(2)-based tracing. */
44466-#undef JEMALLOC_UTRACE
44467-
44468-/* Support utrace(2)-based tracing (label based signature). */
44469-#undef JEMALLOC_UTRACE_LABEL
44470-
44471-/* Support optional abort() on OOM. */
44472-#undef JEMALLOC_XMALLOC
44473-
44474-/* Support lazy locking (avoid locking unless a second thread is launched). */
44475-#undef JEMALLOC_LAZY_LOCK
44476-
44477-/*
44478- * Minimum allocation alignment is 2^LG_QUANTUM bytes (ignoring tiny size
44479- * classes).
44480- */
44481-#undef LG_QUANTUM
44482-
44483-/* One page is 2^LG_PAGE bytes. */
44484-#undef LG_PAGE
44485-
44486-/* Maximum number of regions in a slab. */
44487-#undef CONFIG_LG_SLAB_MAXREGS
44488-
44489-/*
44490- * One huge page is 2^LG_HUGEPAGE bytes.  Note that this is defined even if the
44491- * system does not explicitly support huge pages; system calls that require
44492- * explicit huge page support are separately configured.
44493- */
44494-#undef LG_HUGEPAGE
44495-
44496-/*
44497- * If defined, adjacent virtual memory mappings with identical attributes
44498- * automatically coalesce, and they fragment when changes are made to subranges.
44499- * This is the normal order of things for mmap()/munmap(), but on Windows
44500- * VirtualAlloc()/VirtualFree() operations must be precisely matched, i.e.
44501- * mappings do *not* coalesce/fragment.
44502- */
44503-#undef JEMALLOC_MAPS_COALESCE
44504-
44505-/*
44506- * If defined, retain memory for later reuse by default rather than using e.g.
44507- * munmap() to unmap freed extents.  This is enabled on 64-bit Linux because
44508- * common sequences of mmap()/munmap() calls will cause virtual memory map
44509- * holes.
44510- */
44511-#undef JEMALLOC_RETAIN
44512-
44513-/* TLS is used to map arenas and magazine caches to threads. */
44514-#undef JEMALLOC_TLS
44515-
44516-/*
44517- * Used to mark unreachable code to quiet "end of non-void" compiler warnings.
44518- * Don't use this directly; instead use unreachable() from util.h
44519- */
44520-#undef JEMALLOC_INTERNAL_UNREACHABLE
44521-
44522-/*
44523- * ffs*() functions to use for bitmapping.  Don't use these directly; instead,
44524- * use ffs_*() from util.h.
44525- */
44526-#undef JEMALLOC_INTERNAL_FFSLL
44527-#undef JEMALLOC_INTERNAL_FFSL
44528-#undef JEMALLOC_INTERNAL_FFS
44529-
44530-/*
44531- * popcount*() functions to use for bitmapping.
44532- */
44533-#undef JEMALLOC_INTERNAL_POPCOUNTL
44534-#undef JEMALLOC_INTERNAL_POPCOUNT
44535-
44536-/*
44537- * If defined, explicitly attempt to more uniformly distribute large allocation
44538- * pointer alignments across all cache indices.
44539- */
44540-#undef JEMALLOC_CACHE_OBLIVIOUS
44541-
44542-/*
44543- * If defined, enable logging facilities.  We make this a configure option to
44544- * avoid taking extra branches everywhere.
44545- */
44546-#undef JEMALLOC_LOG
44547-
44548-/*
44549- * If defined, use readlinkat() (instead of readlink()) to follow
44550- * /etc/malloc_conf.
44551- */
44552-#undef JEMALLOC_READLINKAT
44553-
44554-/*
44555- * Darwin (OS X) uses zones to work around Mach-O symbol override shortcomings.
44556- */
44557-#undef JEMALLOC_ZONE
44558-
44559-/*
44560- * Methods for determining whether the OS overcommits.
44561- * JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY: Linux's
44562- *                                         /proc/sys/vm.overcommit_memory file.
44563- * JEMALLOC_SYSCTL_VM_OVERCOMMIT: FreeBSD's vm.overcommit sysctl.
44564- */
44565-#undef JEMALLOC_SYSCTL_VM_OVERCOMMIT
44566-#undef JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY
44567-
44568-/* Defined if madvise(2) is available. */
44569-#undef JEMALLOC_HAVE_MADVISE
44570-
44571-/*
44572- * Defined if transparent huge pages are supported via the MADV_[NO]HUGEPAGE
44573- * arguments to madvise(2).
44574- */
44575-#undef JEMALLOC_HAVE_MADVISE_HUGE
44576-
44577-/*
44578- * Methods for purging unused pages differ between operating systems.
44579- *
44580- *   madvise(..., MADV_FREE) : This marks pages as being unused, such that they
44581- *                             will be discarded rather than swapped out.
44582- *   madvise(..., MADV_DONTNEED) : If JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS is
44583- *                                 defined, this immediately discards pages,
44584- *                                 such that new pages will be demand-zeroed if
44585- *                                 the address region is later touched;
44586- *                                 otherwise this behaves similarly to
44587- *                                 MADV_FREE, though typically with higher
44588- *                                 system overhead.
44589- */
44590-#undef JEMALLOC_PURGE_MADVISE_FREE
44591-#undef JEMALLOC_PURGE_MADVISE_DONTNEED
44592-#undef JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS
44593-
44594-/* Defined if madvise(2) is available but MADV_FREE is not (x86 Linux only). */
44595-#undef JEMALLOC_DEFINE_MADVISE_FREE
44596-
44597-/*
44598- * Defined if MADV_DO[NT]DUMP is supported as an argument to madvise.
44599- */
44600-#undef JEMALLOC_MADVISE_DONTDUMP
44601-
44602-/*
44603- * Defined if MADV_[NO]CORE is supported as an argument to madvise.
44604- */
44605-#undef JEMALLOC_MADVISE_NOCORE
44606-
44607-/* Defined if mprotect(2) is available. */
44608-#undef JEMALLOC_HAVE_MPROTECT
44609-
44610-/*
44611- * Defined if transparent huge pages (THPs) are supported via the
44612- * MADV_[NO]HUGEPAGE arguments to madvise(2), and THP support is enabled.
44613- */
44614-#undef JEMALLOC_THP
44615-
44616-/* Defined if posix_madvise is available. */
44617-#undef JEMALLOC_HAVE_POSIX_MADVISE
44618-
44619-/*
44620- * Method for purging unused pages using posix_madvise.
44621- *
44622- *   posix_madvise(..., POSIX_MADV_DONTNEED)
44623- */
44624-#undef JEMALLOC_PURGE_POSIX_MADVISE_DONTNEED
44625-#undef JEMALLOC_PURGE_POSIX_MADVISE_DONTNEED_ZEROS
44626-
44627-/*
44628- * Defined if memcntl page admin call is supported
44629- */
44630-#undef JEMALLOC_HAVE_MEMCNTL
44631-
44632-/*
44633- * Defined if malloc_size is supported
44634- */
44635-#undef JEMALLOC_HAVE_MALLOC_SIZE
44636-
44637-/* Define if operating system has alloca.h header. */
44638-#undef JEMALLOC_HAS_ALLOCA_H
44639-
44640-/* C99 restrict keyword supported. */
44641-#undef JEMALLOC_HAS_RESTRICT
44642-
44643-/* For use by hash code. */
44644-#undef JEMALLOC_BIG_ENDIAN
44645-
44646-/* sizeof(int) == 2^LG_SIZEOF_INT. */
44647-#undef LG_SIZEOF_INT
44648-
44649-/* sizeof(long) == 2^LG_SIZEOF_LONG. */
44650-#undef LG_SIZEOF_LONG
44651-
44652-/* sizeof(long long) == 2^LG_SIZEOF_LONG_LONG. */
44653-#undef LG_SIZEOF_LONG_LONG
44654-
44655-/* sizeof(intmax_t) == 2^LG_SIZEOF_INTMAX_T. */
44656-#undef LG_SIZEOF_INTMAX_T
44657-
44658-/* glibc malloc hooks (__malloc_hook, __realloc_hook, __free_hook). */
44659-#undef JEMALLOC_GLIBC_MALLOC_HOOK
44660-
44661-/* glibc memalign hook. */
44662-#undef JEMALLOC_GLIBC_MEMALIGN_HOOK
44663-
44664-/* pthread support */
44665-#undef JEMALLOC_HAVE_PTHREAD
44666-
44667-/* dlsym() support */
44668-#undef JEMALLOC_HAVE_DLSYM
44669-
44670-/* Adaptive mutex support in pthreads. */
44671-#undef JEMALLOC_HAVE_PTHREAD_MUTEX_ADAPTIVE_NP
44672-
44673-/* GNU specific sched_getcpu support */
44674-#undef JEMALLOC_HAVE_SCHED_GETCPU
44675-
44676-/* GNU specific sched_setaffinity support */
44677-#undef JEMALLOC_HAVE_SCHED_SETAFFINITY
44678-
44679-/*
44680- * If defined, all the features necessary for background threads are present.
44681- */
44682-#undef JEMALLOC_BACKGROUND_THREAD
44683-
44684-/*
44685- * If defined, jemalloc symbols are not exported (doesn't work when
44686- * JEMALLOC_PREFIX is not defined).
44687- */
44688-#undef JEMALLOC_EXPORT
44689-
44690-/* config.malloc_conf options string. */
44691-#undef JEMALLOC_CONFIG_MALLOC_CONF
44692-
44693-/* If defined, jemalloc takes the malloc/free/etc. symbol names. */
44694-#undef JEMALLOC_IS_MALLOC
44695-
44696-/*
44697- * Defined if strerror_r returns char * if _GNU_SOURCE is defined.
44698- */
44699-#undef JEMALLOC_STRERROR_R_RETURNS_CHAR_WITH_GNU_SOURCE
44700-
44701-/* Performs additional safety checks when defined. */
44702-#undef JEMALLOC_OPT_SAFETY_CHECKS
44703-
44704-/* Is C++ support being built? */
44705-#undef JEMALLOC_ENABLE_CXX
44706-
44707-/* Performs additional size checks when defined. */
44708-#undef JEMALLOC_OPT_SIZE_CHECKS
44709-
44710-/* Allows sampled junk and stash for checking use-after-free when defined. */
44711-#undef JEMALLOC_UAF_DETECTION
44712-
44713-/* Darwin VM_MAKE_TAG support */
44714-#undef JEMALLOC_HAVE_VM_MAKE_TAG
44715-
44716-/* If defined, realloc(ptr, 0) defaults to "free" instead of "alloc". */
44717-#undef JEMALLOC_ZERO_REALLOC_DEFAULT_FREE
44718-
44719-#endif /* JEMALLOC_INTERNAL_DEFS_H_ */
44720diff --git a/jemalloc/include/jemalloc/internal/jemalloc_internal_externs.h b/jemalloc/include/jemalloc/internal/jemalloc_internal_externs.h
44721deleted file mode 100644
44722index fc834c6..0000000
44723--- a/jemalloc/include/jemalloc/internal/jemalloc_internal_externs.h
44724+++ /dev/null
44725@@ -1,75 +0,0 @@
44726-#ifndef JEMALLOC_INTERNAL_EXTERNS_H
44727-#define JEMALLOC_INTERNAL_EXTERNS_H
44728-
44729-#include "jemalloc/internal/atomic.h"
44730-#include "jemalloc/internal/hpa_opts.h"
44731-#include "jemalloc/internal/sec_opts.h"
44732-#include "jemalloc/internal/tsd_types.h"
44733-#include "jemalloc/internal/nstime.h"
44734-
44735-/* TSD checks this to set thread local slow state accordingly. */
44736-extern bool malloc_slow;
44737-
44738-/* Run-time options. */
44739-extern bool opt_abort;
44740-extern bool opt_abort_conf;
44741-extern bool opt_trust_madvise;
44742-extern bool opt_confirm_conf;
44743-extern bool opt_hpa;
44744-extern hpa_shard_opts_t opt_hpa_opts;
44745-extern sec_opts_t opt_hpa_sec_opts;
44746-
44747-extern const char *opt_junk;
44748-extern bool opt_junk_alloc;
44749-extern bool opt_junk_free;
44750-extern void (*junk_free_callback)(void *ptr, size_t size);
44751-extern void (*junk_alloc_callback)(void *ptr, size_t size);
44752-extern bool opt_utrace;
44753-extern bool opt_xmalloc;
44754-extern bool opt_experimental_infallible_new;
44755-extern bool opt_zero;
44756-extern unsigned opt_narenas;
44757-extern zero_realloc_action_t opt_zero_realloc_action;
44758-extern malloc_init_t malloc_init_state;
44759-extern const char *zero_realloc_mode_names[];
44760-extern atomic_zu_t zero_realloc_count;
44761-extern bool opt_cache_oblivious;
44762-
44763-/* Escape free-fastpath when ptr & mask == 0 (for sanitization purpose). */
44764-extern uintptr_t san_cache_bin_nonfast_mask;
44765-
44766-/* Number of CPUs. */
44767-extern unsigned ncpus;
44768-
44769-/* Number of arenas used for automatic multiplexing of threads and arenas. */
44770-extern unsigned narenas_auto;
44771-
44772-/* Base index for manual arenas. */
44773-extern unsigned manual_arena_base;
44774-
44775-/*
44776- * Arenas that are used to service external requests.  Not all elements of the
44777- * arenas array are necessarily used; arenas are created lazily as needed.
44778- */
44779-extern atomic_p_t arenas[];
44780-
44781-void *a0malloc(size_t size);
44782-void a0dalloc(void *ptr);
44783-void *bootstrap_malloc(size_t size);
44784-void *bootstrap_calloc(size_t num, size_t size);
44785-void bootstrap_free(void *ptr);
44786-void arena_set(unsigned ind, arena_t *arena);
44787-unsigned narenas_total_get(void);
44788-arena_t *arena_init(tsdn_t *tsdn, unsigned ind, const arena_config_t *config);
44789-arena_t *arena_choose_hard(tsd_t *tsd, bool internal);
44790-void arena_migrate(tsd_t *tsd, arena_t *oldarena, arena_t *newarena);
44791-void iarena_cleanup(tsd_t *tsd);
44792-void arena_cleanup(tsd_t *tsd);
44793-size_t batch_alloc(void **ptrs, size_t num, size_t size, int flags);
44794-void jemalloc_prefork(void);
44795-void jemalloc_postfork_parent(void);
44796-void jemalloc_postfork_child(void);
44797-void je_sdallocx_noflags(void *ptr, size_t size);
44798-void *malloc_default(size_t size);
44799-
44800-#endif /* JEMALLOC_INTERNAL_EXTERNS_H */
44801diff --git a/jemalloc/include/jemalloc/internal/jemalloc_internal_includes.h b/jemalloc/include/jemalloc/internal/jemalloc_internal_includes.h
44802deleted file mode 100644
44803index 751c112..0000000
44804--- a/jemalloc/include/jemalloc/internal/jemalloc_internal_includes.h
44805+++ /dev/null
44806@@ -1,84 +0,0 @@
44807-#ifndef JEMALLOC_INTERNAL_INCLUDES_H
44808-#define JEMALLOC_INTERNAL_INCLUDES_H
44809-
44810-/*
44811- * jemalloc can conceptually be broken into components (arena, tcache, etc.),
44812- * but there are circular dependencies that cannot be broken without
44813- * substantial performance degradation.
44814- *
44815- * Historically, we dealt with this by each header into four sections (types,
44816- * structs, externs, and inlines), and included each header file multiple times
44817- * in this file, picking out the portion we want on each pass using the
44818- * following #defines:
44819- *   JEMALLOC_H_TYPES   : Preprocessor-defined constants and pseudo-opaque data
44820- *                        types.
44821- *   JEMALLOC_H_STRUCTS : Data structures.
44822- *   JEMALLOC_H_EXTERNS : Extern data declarations and function prototypes.
44823- *   JEMALLOC_H_INLINES : Inline functions.
44824- *
44825- * We're moving toward a world in which the dependencies are explicit; each file
44826- * will #include the headers it depends on (rather than relying on them being
44827- * implicitly available via this file including every header file in the
44828- * project).
44829- *
44830- * We're now in an intermediate state: we've broken up the header files to avoid
44831- * having to include each one multiple times, but have not yet moved the
44832- * dependency information into the header files (i.e. we still rely on the
44833- * ordering in this file to ensure all a header's dependencies are available in
44834- * its translation unit).  Each component is now broken up into multiple header
44835- * files, corresponding to the sections above (e.g. instead of "foo.h", we now
44836- * have "foo_types.h", "foo_structs.h", "foo_externs.h", "foo_inlines.h").
44837- *
44838- * Those files which have been converted to explicitly include their
44839- * inter-component dependencies are now in the initial HERMETIC HEADERS
44840- * section.  All headers may still rely on jemalloc_preamble.h (which, by fiat,
44841- * must be included first in every translation unit) for system headers and
44842- * global jemalloc definitions, however.
44843- */
44844-
44845-/******************************************************************************/
44846-/* TYPES */
44847-/******************************************************************************/
44848-
44849-#include "jemalloc/internal/arena_types.h"
44850-#include "jemalloc/internal/tcache_types.h"
44851-#include "jemalloc/internal/prof_types.h"
44852-
44853-/******************************************************************************/
44854-/* STRUCTS */
44855-/******************************************************************************/
44856-
44857-#include "jemalloc/internal/prof_structs.h"
44858-#include "jemalloc/internal/arena_structs.h"
44859-#include "jemalloc/internal/tcache_structs.h"
44860-#include "jemalloc/internal/background_thread_structs.h"
44861-
44862-/******************************************************************************/
44863-/* EXTERNS */
44864-/******************************************************************************/
44865-
44866-#include "jemalloc/internal/jemalloc_internal_externs.h"
44867-#include "jemalloc/internal/arena_externs.h"
44868-#include "jemalloc/internal/large_externs.h"
44869-#include "jemalloc/internal/tcache_externs.h"
44870-#include "jemalloc/internal/prof_externs.h"
44871-#include "jemalloc/internal/background_thread_externs.h"
44872-
44873-/******************************************************************************/
44874-/* INLINES */
44875-/******************************************************************************/
44876-
44877-#include "jemalloc/internal/jemalloc_internal_inlines_a.h"
44878-/*
44879- * Include portions of arena code interleaved with tcache code in order to
44880- * resolve circular dependencies.
44881- */
44882-#include "jemalloc/internal/arena_inlines_a.h"
44883-#include "jemalloc/internal/jemalloc_internal_inlines_b.h"
44884-#include "jemalloc/internal/tcache_inlines.h"
44885-#include "jemalloc/internal/arena_inlines_b.h"
44886-#include "jemalloc/internal/jemalloc_internal_inlines_c.h"
44887-#include "jemalloc/internal/prof_inlines.h"
44888-#include "jemalloc/internal/background_thread_inlines.h"
44889-
44890-#endif /* JEMALLOC_INTERNAL_INCLUDES_H */
44891diff --git a/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_a.h b/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_a.h
44892deleted file mode 100644
44893index 9e27cc3..0000000
44894--- a/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_a.h
44895+++ /dev/null
44896@@ -1,122 +0,0 @@
44897-#ifndef JEMALLOC_INTERNAL_INLINES_A_H
44898-#define JEMALLOC_INTERNAL_INLINES_A_H
44899-
44900-#include "jemalloc/internal/atomic.h"
44901-#include "jemalloc/internal/bit_util.h"
44902-#include "jemalloc/internal/jemalloc_internal_types.h"
44903-#include "jemalloc/internal/sc.h"
44904-#include "jemalloc/internal/ticker.h"
44905-
44906-JEMALLOC_ALWAYS_INLINE malloc_cpuid_t
44907-malloc_getcpu(void) {
44908-	assert(have_percpu_arena);
44909-#if defined(_WIN32)
44910-	return GetCurrentProcessorNumber();
44911-#elif defined(JEMALLOC_HAVE_SCHED_GETCPU)
44912-	return (malloc_cpuid_t)sched_getcpu();
44913-#else
44914-	not_reached();
44915-	return -1;
44916-#endif
44917-}
44918-
44919-/* Return the chosen arena index based on current cpu. */
44920-JEMALLOC_ALWAYS_INLINE unsigned
44921-percpu_arena_choose(void) {
44922-	assert(have_percpu_arena && PERCPU_ARENA_ENABLED(opt_percpu_arena));
44923-
44924-	malloc_cpuid_t cpuid = malloc_getcpu();
44925-	assert(cpuid >= 0);
44926-
44927-	unsigned arena_ind;
44928-	if ((opt_percpu_arena == percpu_arena) || ((unsigned)cpuid < ncpus /
44929-	    2)) {
44930-		arena_ind = cpuid;
44931-	} else {
44932-		assert(opt_percpu_arena == per_phycpu_arena);
44933-		/* Hyper threads on the same physical CPU share arena. */
44934-		arena_ind = cpuid - ncpus / 2;
44935-	}
44936-
44937-	return arena_ind;
44938-}
44939-
44940-/* Return the limit of percpu auto arena range, i.e. arenas[0...ind_limit). */
44941-JEMALLOC_ALWAYS_INLINE unsigned
44942-percpu_arena_ind_limit(percpu_arena_mode_t mode) {
44943-	assert(have_percpu_arena && PERCPU_ARENA_ENABLED(mode));
44944-	if (mode == per_phycpu_arena && ncpus > 1) {
44945-		if (ncpus % 2) {
44946-			/* This likely means a misconfig. */
44947-			return ncpus / 2 + 1;
44948-		}
44949-		return ncpus / 2;
44950-	} else {
44951-		return ncpus;
44952-	}
44953-}
44954-
44955-static inline arena_t *
44956-arena_get(tsdn_t *tsdn, unsigned ind, bool init_if_missing) {
44957-	arena_t *ret;
44958-
44959-	assert(ind < MALLOCX_ARENA_LIMIT);
44960-
44961-	ret = (arena_t *)atomic_load_p(&arenas[ind], ATOMIC_ACQUIRE);
44962-	if (unlikely(ret == NULL)) {
44963-		if (init_if_missing) {
44964-			ret = arena_init(tsdn, ind, &arena_config_default);
44965-		}
44966-	}
44967-	return ret;
44968-}
44969-
44970-JEMALLOC_ALWAYS_INLINE bool
44971-tcache_available(tsd_t *tsd) {
44972-	/*
44973-	 * Thread specific auto tcache might be unavailable if: 1) during tcache
44974-	 * initialization, or 2) disabled through thread.tcache.enabled mallctl
44975-	 * or config options.  This check covers all cases.
44976-	 */
44977-	if (likely(tsd_tcache_enabled_get(tsd))) {
44978-		/* Associated arena == NULL implies tcache init in progress. */
44979-		if (config_debug && tsd_tcache_slowp_get(tsd)->arena != NULL) {
44980-			tcache_assert_initialized(tsd_tcachep_get(tsd));
44981-		}
44982-		return true;
44983-	}
44984-
44985-	return false;
44986-}
44987-
44988-JEMALLOC_ALWAYS_INLINE tcache_t *
44989-tcache_get(tsd_t *tsd) {
44990-	if (!tcache_available(tsd)) {
44991-		return NULL;
44992-	}
44993-
44994-	return tsd_tcachep_get(tsd);
44995-}
44996-
44997-JEMALLOC_ALWAYS_INLINE tcache_slow_t *
44998-tcache_slow_get(tsd_t *tsd) {
44999-	if (!tcache_available(tsd)) {
45000-		return NULL;
45001-	}
45002-
45003-	return tsd_tcache_slowp_get(tsd);
45004-}
45005-
45006-static inline void
45007-pre_reentrancy(tsd_t *tsd, arena_t *arena) {
45008-	/* arena is the current context.  Reentry from a0 is not allowed. */
45009-	assert(arena != arena_get(tsd_tsdn(tsd), 0, false));
45010-	tsd_pre_reentrancy_raw(tsd);
45011-}
45012-
45013-static inline void
45014-post_reentrancy(tsd_t *tsd) {
45015-	tsd_post_reentrancy_raw(tsd);
45016-}
45017-
45018-#endif /* JEMALLOC_INTERNAL_INLINES_A_H */
45019diff --git a/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_b.h b/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_b.h
45020deleted file mode 100644
45021index 152f8a0..0000000
45022--- a/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_b.h
45023+++ /dev/null
45024@@ -1,103 +0,0 @@
45025-#ifndef JEMALLOC_INTERNAL_INLINES_B_H
45026-#define JEMALLOC_INTERNAL_INLINES_B_H
45027-
45028-#include "jemalloc/internal/extent.h"
45029-
45030-static inline void
45031-percpu_arena_update(tsd_t *tsd, unsigned cpu) {
45032-	assert(have_percpu_arena);
45033-	arena_t *oldarena = tsd_arena_get(tsd);
45034-	assert(oldarena != NULL);
45035-	unsigned oldind = arena_ind_get(oldarena);
45036-
45037-	if (oldind != cpu) {
45038-		unsigned newind = cpu;
45039-		arena_t *newarena = arena_get(tsd_tsdn(tsd), newind, true);
45040-		assert(newarena != NULL);
45041-
45042-		/* Set new arena/tcache associations. */
45043-		arena_migrate(tsd, oldarena, newarena);
45044-		tcache_t *tcache = tcache_get(tsd);
45045-		if (tcache != NULL) {
45046-			tcache_slow_t *tcache_slow = tsd_tcache_slowp_get(tsd);
45047-			tcache_arena_reassociate(tsd_tsdn(tsd), tcache_slow,
45048-			    tcache, newarena);
45049-		}
45050-	}
45051-}
45052-
45053-
45054-/* Choose an arena based on a per-thread value. */
45055-static inline arena_t *
45056-arena_choose_impl(tsd_t *tsd, arena_t *arena, bool internal) {
45057-	arena_t *ret;
45058-
45059-	if (arena != NULL) {
45060-		return arena;
45061-	}
45062-
45063-	/* During reentrancy, arena 0 is the safest bet. */
45064-	if (unlikely(tsd_reentrancy_level_get(tsd) > 0)) {
45065-		return arena_get(tsd_tsdn(tsd), 0, true);
45066-	}
45067-
45068-	ret = internal ? tsd_iarena_get(tsd) : tsd_arena_get(tsd);
45069-	if (unlikely(ret == NULL)) {
45070-		ret = arena_choose_hard(tsd, internal);
45071-		assert(ret);
45072-		if (tcache_available(tsd)) {
45073-			tcache_slow_t *tcache_slow = tsd_tcache_slowp_get(tsd);
45074-			tcache_t *tcache = tsd_tcachep_get(tsd);
45075-			if (tcache_slow->arena != NULL) {
45076-				/* See comments in tsd_tcache_data_init().*/
45077-				assert(tcache_slow->arena ==
45078-				    arena_get(tsd_tsdn(tsd), 0, false));
45079-				if (tcache_slow->arena != ret) {
45080-					tcache_arena_reassociate(tsd_tsdn(tsd),
45081-					    tcache_slow, tcache, ret);
45082-				}
45083-			} else {
45084-				tcache_arena_associate(tsd_tsdn(tsd),
45085-				    tcache_slow, tcache, ret);
45086-			}
45087-		}
45088-	}
45089-
45090-	/*
45091-	 * Note that for percpu arena, if the current arena is outside of the
45092-	 * auto percpu arena range, (i.e. thread is assigned to a manually
45093-	 * managed arena), then percpu arena is skipped.
45094-	 */
45095-	if (have_percpu_arena && PERCPU_ARENA_ENABLED(opt_percpu_arena) &&
45096-	    !internal && (arena_ind_get(ret) <
45097-	    percpu_arena_ind_limit(opt_percpu_arena)) && (ret->last_thd !=
45098-	    tsd_tsdn(tsd))) {
45099-		unsigned ind = percpu_arena_choose();
45100-		if (arena_ind_get(ret) != ind) {
45101-			percpu_arena_update(tsd, ind);
45102-			ret = tsd_arena_get(tsd);
45103-		}
45104-		ret->last_thd = tsd_tsdn(tsd);
45105-	}
45106-
45107-	return ret;
45108-}
45109-
45110-static inline arena_t *
45111-arena_choose(tsd_t *tsd, arena_t *arena) {
45112-	return arena_choose_impl(tsd, arena, false);
45113-}
45114-
45115-static inline arena_t *
45116-arena_ichoose(tsd_t *tsd, arena_t *arena) {
45117-	return arena_choose_impl(tsd, arena, true);
45118-}
45119-
45120-static inline bool
45121-arena_is_auto(arena_t *arena) {
45122-	assert(narenas_auto > 0);
45123-
45124-	return (arena_ind_get(arena) < manual_arena_base);
45125-}
45126-
45127-#endif /* JEMALLOC_INTERNAL_INLINES_B_H */
45128diff --git a/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_c.h b/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_c.h
45129deleted file mode 100644
45130index b0868b7..0000000
45131--- a/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_c.h
45132+++ /dev/null
45133@@ -1,340 +0,0 @@
45134-#ifndef JEMALLOC_INTERNAL_INLINES_C_H
45135-#define JEMALLOC_INTERNAL_INLINES_C_H
45136-
45137-#include "jemalloc/internal/hook.h"
45138-#include "jemalloc/internal/jemalloc_internal_types.h"
45139-#include "jemalloc/internal/log.h"
45140-#include "jemalloc/internal/sz.h"
45141-#include "jemalloc/internal/thread_event.h"
45142-#include "jemalloc/internal/witness.h"
45143-
45144-/*
45145- * Translating the names of the 'i' functions:
45146- *   Abbreviations used in the first part of the function name (before
45147- *   alloc/dalloc) describe what that function accomplishes:
45148- *     a: arena (query)
45149- *     s: size (query, or sized deallocation)
45150- *     e: extent (query)
45151- *     p: aligned (allocates)
45152- *     vs: size (query, without knowing that the pointer is into the heap)
45153- *     r: rallocx implementation
45154- *     x: xallocx implementation
45155- *   Abbreviations used in the second part of the function name (after
45156- *   alloc/dalloc) describe the arguments it takes
45157- *     z: whether to return zeroed memory
45158- *     t: accepts a tcache_t * parameter
45159- *     m: accepts an arena_t * parameter
45160- */
45161-
45162-JEMALLOC_ALWAYS_INLINE arena_t *
45163-iaalloc(tsdn_t *tsdn, const void *ptr) {
45164-	assert(ptr != NULL);
45165-
45166-	return arena_aalloc(tsdn, ptr);
45167-}
45168-
45169-JEMALLOC_ALWAYS_INLINE size_t
45170-isalloc(tsdn_t *tsdn, const void *ptr) {
45171-	assert(ptr != NULL);
45172-
45173-	return arena_salloc(tsdn, ptr);
45174-}
45175-
45176-JEMALLOC_ALWAYS_INLINE void *
45177-iallocztm(tsdn_t *tsdn, size_t size, szind_t ind, bool zero, tcache_t *tcache,
45178-    bool is_internal, arena_t *arena, bool slow_path) {
45179-	void *ret;
45180-
45181-	assert(!is_internal || tcache == NULL);
45182-	assert(!is_internal || arena == NULL || arena_is_auto(arena));
45183-	if (!tsdn_null(tsdn) && tsd_reentrancy_level_get(tsdn_tsd(tsdn)) == 0) {
45184-		witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
45185-		    WITNESS_RANK_CORE, 0);
45186-	}
45187-
45188-	ret = arena_malloc(tsdn, arena, size, ind, zero, tcache, slow_path);
45189-	if (config_stats && is_internal && likely(ret != NULL)) {
45190-		arena_internal_add(iaalloc(tsdn, ret), isalloc(tsdn, ret));
45191-	}
45192-	return ret;
45193-}
45194-
45195-JEMALLOC_ALWAYS_INLINE void *
45196-ialloc(tsd_t *tsd, size_t size, szind_t ind, bool zero, bool slow_path) {
45197-	return iallocztm(tsd_tsdn(tsd), size, ind, zero, tcache_get(tsd), false,
45198-	    NULL, slow_path);
45199-}
45200-
45201-JEMALLOC_ALWAYS_INLINE void *
45202-ipallocztm(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero,
45203-    tcache_t *tcache, bool is_internal, arena_t *arena) {
45204-	void *ret;
45205-
45206-	assert(usize != 0);
45207-	assert(usize == sz_sa2u(usize, alignment));
45208-	assert(!is_internal || tcache == NULL);
45209-	assert(!is_internal || arena == NULL || arena_is_auto(arena));
45210-	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
45211-	    WITNESS_RANK_CORE, 0);
45212-
45213-	ret = arena_palloc(tsdn, arena, usize, alignment, zero, tcache);
45214-	assert(ALIGNMENT_ADDR2BASE(ret, alignment) == ret);
45215-	if (config_stats && is_internal && likely(ret != NULL)) {
45216-		arena_internal_add(iaalloc(tsdn, ret), isalloc(tsdn, ret));
45217-	}
45218-	return ret;
45219-}
45220-
45221-JEMALLOC_ALWAYS_INLINE void *
45222-ipalloct(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero,
45223-    tcache_t *tcache, arena_t *arena) {
45224-	return ipallocztm(tsdn, usize, alignment, zero, tcache, false, arena);
45225-}
45226-
45227-JEMALLOC_ALWAYS_INLINE void *
45228-ipalloc(tsd_t *tsd, size_t usize, size_t alignment, bool zero) {
45229-	return ipallocztm(tsd_tsdn(tsd), usize, alignment, zero,
45230-	    tcache_get(tsd), false, NULL);
45231-}
45232-
45233-JEMALLOC_ALWAYS_INLINE size_t
45234-ivsalloc(tsdn_t *tsdn, const void *ptr) {
45235-	return arena_vsalloc(tsdn, ptr);
45236-}
45237-
45238-JEMALLOC_ALWAYS_INLINE void
45239-idalloctm(tsdn_t *tsdn, void *ptr, tcache_t *tcache,
45240-    emap_alloc_ctx_t *alloc_ctx, bool is_internal, bool slow_path) {
45241-	assert(ptr != NULL);
45242-	assert(!is_internal || tcache == NULL);
45243-	assert(!is_internal || arena_is_auto(iaalloc(tsdn, ptr)));
45244-	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
45245-	    WITNESS_RANK_CORE, 0);
45246-	if (config_stats && is_internal) {
45247-		arena_internal_sub(iaalloc(tsdn, ptr), isalloc(tsdn, ptr));
45248-	}
45249-	if (!is_internal && !tsdn_null(tsdn) &&
45250-	    tsd_reentrancy_level_get(tsdn_tsd(tsdn)) != 0) {
45251-		assert(tcache == NULL);
45252-	}
45253-	arena_dalloc(tsdn, ptr, tcache, alloc_ctx, slow_path);
45254-}
45255-
45256-JEMALLOC_ALWAYS_INLINE void
45257-idalloc(tsd_t *tsd, void *ptr) {
45258-	idalloctm(tsd_tsdn(tsd), ptr, tcache_get(tsd), NULL, false, true);
45259-}
45260-
45261-JEMALLOC_ALWAYS_INLINE void
45262-isdalloct(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache,
45263-    emap_alloc_ctx_t *alloc_ctx, bool slow_path) {
45264-	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
45265-	    WITNESS_RANK_CORE, 0);
45266-	arena_sdalloc(tsdn, ptr, size, tcache, alloc_ctx, slow_path);
45267-}
45268-
45269-JEMALLOC_ALWAYS_INLINE void *
45270-iralloct_realign(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
45271-    size_t alignment, bool zero, tcache_t *tcache, arena_t *arena,
45272-    hook_ralloc_args_t *hook_args) {
45273-	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
45274-	    WITNESS_RANK_CORE, 0);
45275-	void *p;
45276-	size_t usize, copysize;
45277-
45278-	usize = sz_sa2u(size, alignment);
45279-	if (unlikely(usize == 0 || usize > SC_LARGE_MAXCLASS)) {
45280-		return NULL;
45281-	}
45282-	p = ipalloct(tsdn, usize, alignment, zero, tcache, arena);
45283-	if (p == NULL) {
45284-		return NULL;
45285-	}
45286-	/*
45287-	 * Copy at most size bytes (not size+extra), since the caller has no
45288-	 * expectation that the extra bytes will be reliably preserved.
45289-	 */
45290-	copysize = (size < oldsize) ? size : oldsize;
45291-	memcpy(p, ptr, copysize);
45292-	hook_invoke_alloc(hook_args->is_realloc
45293-	    ? hook_alloc_realloc : hook_alloc_rallocx, p, (uintptr_t)p,
45294-	    hook_args->args);
45295-	hook_invoke_dalloc(hook_args->is_realloc
45296-	    ? hook_dalloc_realloc : hook_dalloc_rallocx, ptr, hook_args->args);
45297-	isdalloct(tsdn, ptr, oldsize, tcache, NULL, true);
45298-	return p;
45299-}
45300-
45301-/*
45302- * is_realloc threads through the knowledge of whether or not this call comes
45303- * from je_realloc (as opposed to je_rallocx); this ensures that we pass the
45304- * correct entry point into any hooks.
45305- * Note that these functions are all force-inlined, so no actual bool gets
45306- * passed-around anywhere.
45307- */
45308-JEMALLOC_ALWAYS_INLINE void *
45309-iralloct(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size, size_t alignment,
45310-    bool zero, tcache_t *tcache, arena_t *arena, hook_ralloc_args_t *hook_args)
45311-{
45312-	assert(ptr != NULL);
45313-	assert(size != 0);
45314-	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
45315-	    WITNESS_RANK_CORE, 0);
45316-
45317-	if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1))
45318-	    != 0) {
45319-		/*
45320-		 * Existing object alignment is inadequate; allocate new space
45321-		 * and copy.
45322-		 */
45323-		return iralloct_realign(tsdn, ptr, oldsize, size, alignment,
45324-		    zero, tcache, arena, hook_args);
45325-	}
45326-
45327-	return arena_ralloc(tsdn, arena, ptr, oldsize, size, alignment, zero,
45328-	    tcache, hook_args);
45329-}
45330-
45331-JEMALLOC_ALWAYS_INLINE void *
45332-iralloc(tsd_t *tsd, void *ptr, size_t oldsize, size_t size, size_t alignment,
45333-    bool zero, hook_ralloc_args_t *hook_args) {
45334-	return iralloct(tsd_tsdn(tsd), ptr, oldsize, size, alignment, zero,
45335-	    tcache_get(tsd), NULL, hook_args);
45336-}
45337-
45338-JEMALLOC_ALWAYS_INLINE bool
45339-ixalloc(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size, size_t extra,
45340-    size_t alignment, bool zero, size_t *newsize) {
45341-	assert(ptr != NULL);
45342-	assert(size != 0);
45343-	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
45344-	    WITNESS_RANK_CORE, 0);
45345-
45346-	if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1))
45347-	    != 0) {
45348-		/* Existing object alignment is inadequate. */
45349-		*newsize = oldsize;
45350-		return true;
45351-	}
45352-
45353-	return arena_ralloc_no_move(tsdn, ptr, oldsize, size, extra, zero,
45354-	    newsize);
45355-}
45356-
45357-JEMALLOC_ALWAYS_INLINE void
45358-fastpath_success_finish(tsd_t *tsd, uint64_t allocated_after,
45359-    cache_bin_t *bin, void *ret) {
45360-	thread_allocated_set(tsd, allocated_after);
45361-	if (config_stats) {
45362-		bin->tstats.nrequests++;
45363-	}
45364-
45365-	LOG("core.malloc.exit", "result: %p", ret);
45366-}
45367-
45368-JEMALLOC_ALWAYS_INLINE bool
45369-malloc_initialized(void) {
45370-	return (malloc_init_state == malloc_init_initialized);
45371-}
45372-
45373-/*
45374- * malloc() fastpath.  Included here so that we can inline it into operator new;
45375- * function call overhead there is non-negligible as a fraction of total CPU in
45376- * allocation-heavy C++ programs.  We take the fallback alloc to allow malloc
45377- * (which can return NULL) to differ in its behavior from operator new (which
45378- * can't).  It matches the signature of malloc / operator new so that we can
45379- * tail-call the fallback allocator, allowing us to avoid setting up the call
45380- * frame in the common case.
45381- *
45382- * Fastpath assumes size <= SC_LOOKUP_MAXCLASS, and that we hit
45383- * tcache.  If either of these is false, we tail-call to the slowpath,
45384- * malloc_default().  Tail-calling is used to avoid any caller-saved
45385- * registers.
45386- *
45387- * fastpath supports ticker and profiling, both of which will also
45388- * tail-call to the slowpath if they fire.
45389- */
45390-JEMALLOC_ALWAYS_INLINE void *
45391-imalloc_fastpath(size_t size, void *(fallback_alloc)(size_t)) {
45392-	LOG("core.malloc.entry", "size: %zu", size);
45393-	if (tsd_get_allocates() && unlikely(!malloc_initialized())) {
45394-		return fallback_alloc(size);
45395-	}
45396-
45397-	tsd_t *tsd = tsd_get(false);
45398-	if (unlikely((size > SC_LOOKUP_MAXCLASS) || tsd == NULL)) {
45399-		return fallback_alloc(size);
45400-	}
45401-	/*
45402-	 * The code below till the branch checking the next_event threshold may
45403-	 * execute before malloc_init(), in which case the threshold is 0 to
45404-	 * trigger slow path and initialization.
45405-	 *
45406-	 * Note that when uninitialized, only the fast-path variants of the sz /
45407-	 * tsd facilities may be called.
45408-	 */
45409-	szind_t ind;
45410-	/*
45411-	 * The thread_allocated counter in tsd serves as a general purpose
45412-	 * accumulator for bytes of allocation to trigger different types of
45413-	 * events.  usize is always needed to advance thread_allocated, though
45414-	 * it's not always needed in the core allocation logic.
45415-	 */
45416-	size_t usize;
45417-	sz_size2index_usize_fastpath(size, &ind, &usize);
45418-	/* Fast path relies on size being a bin. */
45419-	assert(ind < SC_NBINS);
45420-	assert((SC_LOOKUP_MAXCLASS < SC_SMALL_MAXCLASS) &&
45421-	    (size <= SC_SMALL_MAXCLASS));
45422-
45423-	uint64_t allocated, threshold;
45424-	te_malloc_fastpath_ctx(tsd, &allocated, &threshold);
45425-	uint64_t allocated_after = allocated + usize;
45426-	/*
45427-	 * The ind and usize might be uninitialized (or partially) before
45428-	 * malloc_init().  The assertions check for: 1) full correctness (usize
45429-	 * & ind) when initialized; and 2) guaranteed slow-path (threshold == 0)
45430-	 * when !initialized.
45431-	 */
45432-	if (!malloc_initialized()) {
45433-		assert(threshold == 0);
45434-	} else {
45435-		assert(ind == sz_size2index(size));
45436-		assert(usize > 0 && usize == sz_index2size(ind));
45437-	}
45438-	/*
45439-	 * Check for events and tsd non-nominal (fast_threshold will be set to
45440-	 * 0) in a single branch.
45441-	 */
45442-	if (unlikely(allocated_after >= threshold)) {
45443-		return fallback_alloc(size);
45444-	}
45445-	assert(tsd_fast(tsd));
45446-
45447-	tcache_t *tcache = tsd_tcachep_get(tsd);
45448-	assert(tcache == tcache_get(tsd));
45449-	cache_bin_t *bin = &tcache->bins[ind];
45450-	bool tcache_success;
45451-	void *ret;
45452-
45453-	/*
45454-	 * We split up the code this way so that redundant low-water
45455-	 * computation doesn't happen on the (more common) case in which we
45456-	 * don't touch the low water mark.  The compiler won't do this
45457-	 * duplication on its own.
45458-	 */
45459-	ret = cache_bin_alloc_easy(bin, &tcache_success);
45460-	if (tcache_success) {
45461-		fastpath_success_finish(tsd, allocated_after, bin, ret);
45462-		return ret;
45463-	}
45464-	ret = cache_bin_alloc(bin, &tcache_success);
45465-	if (tcache_success) {
45466-		fastpath_success_finish(tsd, allocated_after, bin, ret);
45467-		return ret;
45468-	}
45469-
45470-	return fallback_alloc(size);
45471-}
45472-
45473-#endif /* JEMALLOC_INTERNAL_INLINES_C_H */
45474diff --git a/jemalloc/include/jemalloc/internal/jemalloc_internal_macros.h b/jemalloc/include/jemalloc/internal/jemalloc_internal_macros.h
45475deleted file mode 100644
45476index e97b5f9..0000000
45477--- a/jemalloc/include/jemalloc/internal/jemalloc_internal_macros.h
45478+++ /dev/null
45479@@ -1,111 +0,0 @@
45480-#ifndef JEMALLOC_INTERNAL_MACROS_H
45481-#define JEMALLOC_INTERNAL_MACROS_H
45482-
45483-#ifdef JEMALLOC_DEBUG
45484-#  define JEMALLOC_ALWAYS_INLINE static inline
45485-#else
45486-#  ifdef _MSC_VER
45487-#    define JEMALLOC_ALWAYS_INLINE static __forceinline
45488-#  else
45489-#    define JEMALLOC_ALWAYS_INLINE JEMALLOC_ATTR(always_inline) static inline
45490-#  endif
45491-#endif
45492-#ifdef _MSC_VER
45493-#  define inline _inline
45494-#endif
45495-
45496-#define UNUSED JEMALLOC_ATTR(unused)
45497-
45498-#define ZU(z)	((size_t)z)
45499-#define ZD(z)	((ssize_t)z)
45500-#define QU(q)	((uint64_t)q)
45501-#define QD(q)	((int64_t)q)
45502-
45503-#define KZU(z)	ZU(z##ULL)
45504-#define KZD(z)	ZD(z##LL)
45505-#define KQU(q)	QU(q##ULL)
45506-#define KQD(q)	QI(q##LL)
45507-
45508-#ifndef __DECONST
45509-#  define	__DECONST(type, var)	((type)(uintptr_t)(const void *)(var))
45510-#endif
45511-
45512-#if !defined(JEMALLOC_HAS_RESTRICT) || defined(__cplusplus)
45513-#  define restrict
45514-#endif
45515-
45516-/* Various function pointers are static and immutable except during testing. */
45517-#ifdef JEMALLOC_JET
45518-#  define JET_MUTABLE
45519-#else
45520-#  define JET_MUTABLE const
45521-#endif
45522-
45523-#define JEMALLOC_VA_ARGS_HEAD(head, ...) head
45524-#define JEMALLOC_VA_ARGS_TAIL(head, ...) __VA_ARGS__
45525-
45526-/* Diagnostic suppression macros */
45527-#if defined(_MSC_VER) && !defined(__clang__)
45528-#  define JEMALLOC_DIAGNOSTIC_PUSH __pragma(warning(push))
45529-#  define JEMALLOC_DIAGNOSTIC_POP __pragma(warning(pop))
45530-#  define JEMALLOC_DIAGNOSTIC_IGNORE(W) __pragma(warning(disable:W))
45531-#  define JEMALLOC_DIAGNOSTIC_IGNORE_MISSING_STRUCT_FIELD_INITIALIZERS
45532-#  define JEMALLOC_DIAGNOSTIC_IGNORE_TYPE_LIMITS
45533-#  define JEMALLOC_DIAGNOSTIC_IGNORE_ALLOC_SIZE_LARGER_THAN
45534-#  define JEMALLOC_DIAGNOSTIC_DISABLE_SPURIOUS
45535-/* #pragma GCC diagnostic first appeared in gcc 4.6. */
45536-#elif (defined(__GNUC__) && ((__GNUC__ > 4) || ((__GNUC__ == 4) && \
45537-  (__GNUC_MINOR__ > 5)))) || defined(__clang__)
45538-/*
45539- * The JEMALLOC_PRAGMA__ macro is an implementation detail of the GCC and Clang
45540- * diagnostic suppression macros and should not be used anywhere else.
45541- */
45542-#  define JEMALLOC_PRAGMA__(X) _Pragma(#X)
45543-#  define JEMALLOC_DIAGNOSTIC_PUSH JEMALLOC_PRAGMA__(GCC diagnostic push)
45544-#  define JEMALLOC_DIAGNOSTIC_POP JEMALLOC_PRAGMA__(GCC diagnostic pop)
45545-#  define JEMALLOC_DIAGNOSTIC_IGNORE(W) \
45546-     JEMALLOC_PRAGMA__(GCC diagnostic ignored W)
45547-
45548-/*
45549- * The -Wmissing-field-initializers warning is buggy in GCC versions < 5.1 and
45550- * all clang versions up to version 7 (currently trunk, unreleased).  This macro
45551- * suppresses the warning for the affected compiler versions only.
45552- */
45553-#  if ((defined(__GNUC__) && !defined(__clang__)) && (__GNUC__ < 5)) || \
45554-     defined(__clang__)
45555-#    define JEMALLOC_DIAGNOSTIC_IGNORE_MISSING_STRUCT_FIELD_INITIALIZERS  \
45556-          JEMALLOC_DIAGNOSTIC_IGNORE("-Wmissing-field-initializers")
45557-#  else
45558-#    define JEMALLOC_DIAGNOSTIC_IGNORE_MISSING_STRUCT_FIELD_INITIALIZERS
45559-#  endif
45560-
45561-#  define JEMALLOC_DIAGNOSTIC_IGNORE_TYPE_LIMITS  \
45562-     JEMALLOC_DIAGNOSTIC_IGNORE("-Wtype-limits")
45563-#  define JEMALLOC_DIAGNOSTIC_IGNORE_UNUSED_PARAMETER \
45564-     JEMALLOC_DIAGNOSTIC_IGNORE("-Wunused-parameter")
45565-#  if defined(__GNUC__) && !defined(__clang__) && (__GNUC__ >= 7)
45566-#    define JEMALLOC_DIAGNOSTIC_IGNORE_ALLOC_SIZE_LARGER_THAN \
45567-       JEMALLOC_DIAGNOSTIC_IGNORE("-Walloc-size-larger-than=")
45568-#  else
45569-#    define JEMALLOC_DIAGNOSTIC_IGNORE_ALLOC_SIZE_LARGER_THAN
45570-#  endif
45571-#  define JEMALLOC_DIAGNOSTIC_DISABLE_SPURIOUS \
45572-  JEMALLOC_DIAGNOSTIC_PUSH \
45573-  JEMALLOC_DIAGNOSTIC_IGNORE_UNUSED_PARAMETER
45574-#else
45575-#  define JEMALLOC_DIAGNOSTIC_PUSH
45576-#  define JEMALLOC_DIAGNOSTIC_POP
45577-#  define JEMALLOC_DIAGNOSTIC_IGNORE(W)
45578-#  define JEMALLOC_DIAGNOSTIC_IGNORE_MISSING_STRUCT_FIELD_INITIALIZERS
45579-#  define JEMALLOC_DIAGNOSTIC_IGNORE_TYPE_LIMITS
45580-#  define JEMALLOC_DIAGNOSTIC_IGNORE_ALLOC_SIZE_LARGER_THAN
45581-#  define JEMALLOC_DIAGNOSTIC_DISABLE_SPURIOUS
45582-#endif
45583-
45584-/*
45585- * Disables spurious diagnostics for all headers.  Since these headers are not
45586- * included by users directly, it does not affect their diagnostic settings.
45587- */
45588-JEMALLOC_DIAGNOSTIC_DISABLE_SPURIOUS
45589-
45590-#endif /* JEMALLOC_INTERNAL_MACROS_H */
45591diff --git a/jemalloc/include/jemalloc/internal/jemalloc_internal_types.h b/jemalloc/include/jemalloc/internal/jemalloc_internal_types.h
45592deleted file mode 100644
45593index 62c2b59..0000000
45594--- a/jemalloc/include/jemalloc/internal/jemalloc_internal_types.h
45595+++ /dev/null
45596@@ -1,130 +0,0 @@
45597-#ifndef JEMALLOC_INTERNAL_TYPES_H
45598-#define JEMALLOC_INTERNAL_TYPES_H
45599-
45600-#include "jemalloc/internal/quantum.h"
45601-
45602-/* Processor / core id type. */
45603-typedef int malloc_cpuid_t;
45604-
45605-/* When realloc(non-null-ptr, 0) is called, what happens? */
45606-enum zero_realloc_action_e {
45607-	/* Realloc(ptr, 0) is free(ptr); return malloc(0); */
45608-	zero_realloc_action_alloc = 0,
45609-	/* Realloc(ptr, 0) is free(ptr); */
45610-	zero_realloc_action_free = 1,
45611-	/* Realloc(ptr, 0) aborts. */
45612-	zero_realloc_action_abort = 2
45613-};
45614-typedef enum zero_realloc_action_e zero_realloc_action_t;
45615-
45616-/* Signature of write callback. */
45617-typedef void (write_cb_t)(void *, const char *);
45618-
45619-enum malloc_init_e {
45620-	malloc_init_uninitialized	= 3,
45621-	malloc_init_a0_initialized	= 2,
45622-	malloc_init_recursible		= 1,
45623-	malloc_init_initialized		= 0 /* Common case --> jnz. */
45624-};
45625-typedef enum malloc_init_e malloc_init_t;
45626-
45627-/*
45628- * Flags bits:
45629- *
45630- * a: arena
45631- * t: tcache
45632- * 0: unused
45633- * z: zero
45634- * n: alignment
45635- *
45636- * aaaaaaaa aaaatttt tttttttt 0znnnnnn
45637- */
45638-#define MALLOCX_ARENA_BITS	12
45639-#define MALLOCX_TCACHE_BITS	12
45640-#define MALLOCX_LG_ALIGN_BITS	6
45641-#define MALLOCX_ARENA_SHIFT	20
45642-#define MALLOCX_TCACHE_SHIFT	8
45643-#define MALLOCX_ARENA_MASK \
45644-    (((1 << MALLOCX_ARENA_BITS) - 1) << MALLOCX_ARENA_SHIFT)
45645-/* NB: Arena index bias decreases the maximum number of arenas by 1. */
45646-#define MALLOCX_ARENA_LIMIT	((1 << MALLOCX_ARENA_BITS) - 1)
45647-#define MALLOCX_TCACHE_MASK \
45648-    (((1 << MALLOCX_TCACHE_BITS) - 1) << MALLOCX_TCACHE_SHIFT)
45649-#define MALLOCX_TCACHE_MAX	((1 << MALLOCX_TCACHE_BITS) - 3)
45650-#define MALLOCX_LG_ALIGN_MASK	((1 << MALLOCX_LG_ALIGN_BITS) - 1)
45651-/* Use MALLOCX_ALIGN_GET() if alignment may not be specified in flags. */
45652-#define MALLOCX_ALIGN_GET_SPECIFIED(flags)				\
45653-    (ZU(1) << (flags & MALLOCX_LG_ALIGN_MASK))
45654-#define MALLOCX_ALIGN_GET(flags)					\
45655-    (MALLOCX_ALIGN_GET_SPECIFIED(flags) & (SIZE_T_MAX-1))
45656-#define MALLOCX_ZERO_GET(flags)						\
45657-    ((bool)(flags & MALLOCX_ZERO))
45658-
45659-#define MALLOCX_TCACHE_GET(flags)					\
45660-    (((unsigned)((flags & MALLOCX_TCACHE_MASK) >> MALLOCX_TCACHE_SHIFT)) - 2)
45661-#define MALLOCX_ARENA_GET(flags)					\
45662-    (((unsigned)(((unsigned)flags) >> MALLOCX_ARENA_SHIFT)) - 1)
45663-
45664-/* Smallest size class to support. */
45665-#define TINY_MIN		(1U << LG_TINY_MIN)
45666-
45667-#define LONG			((size_t)(1U << LG_SIZEOF_LONG))
45668-#define LONG_MASK		(LONG - 1)
45669-
45670-/* Return the smallest long multiple that is >= a. */
45671-#define LONG_CEILING(a)							\
45672-	(((a) + LONG_MASK) & ~LONG_MASK)
45673-
45674-#define SIZEOF_PTR		(1U << LG_SIZEOF_PTR)
45675-#define PTR_MASK		(SIZEOF_PTR - 1)
45676-
45677-/* Return the smallest (void *) multiple that is >= a. */
45678-#define PTR_CEILING(a)							\
45679-	(((a) + PTR_MASK) & ~PTR_MASK)
45680-
45681-/*
45682- * Maximum size of L1 cache line.  This is used to avoid cache line aliasing.
45683- * In addition, this controls the spacing of cacheline-spaced size classes.
45684- *
45685- * CACHELINE cannot be based on LG_CACHELINE because __declspec(align()) can
45686- * only handle raw constants.
45687- */
45688-#define LG_CACHELINE		6
45689-#define CACHELINE		64
45690-#define CACHELINE_MASK		(CACHELINE - 1)
45691-
45692-/* Return the smallest cacheline multiple that is >= s. */
45693-#define CACHELINE_CEILING(s)						\
45694-	(((s) + CACHELINE_MASK) & ~CACHELINE_MASK)
45695-
45696-/* Return the nearest aligned address at or below a. */
45697-#define ALIGNMENT_ADDR2BASE(a, alignment)				\
45698-	((void *)((uintptr_t)(a) & ((~(alignment)) + 1)))
45699-
45700-/* Return the offset between a and the nearest aligned address at or below a. */
45701-#define ALIGNMENT_ADDR2OFFSET(a, alignment)				\
45702-	((size_t)((uintptr_t)(a) & (alignment - 1)))
45703-
45704-/* Return the smallest alignment multiple that is >= s. */
45705-#define ALIGNMENT_CEILING(s, alignment)					\
45706-	(((s) + (alignment - 1)) & ((~(alignment)) + 1))
45707-
45708-/* Declare a variable-length array. */
45709-#if __STDC_VERSION__ < 199901L
45710-#  ifdef _MSC_VER
45711-#    include <malloc.h>
45712-#    define alloca _alloca
45713-#  else
45714-#    ifdef JEMALLOC_HAS_ALLOCA_H
45715-#      include <alloca.h>
45716-#    else
45717-#      include <stdlib.h>
45718-#    endif
45719-#  endif
45720-#  define VARIABLE_ARRAY(type, name, count) \
45721-	type *name = alloca(sizeof(type) * (count))
45722-#else
45723-#  define VARIABLE_ARRAY(type, name, count) type name[(count)]
45724-#endif
45725-
45726-#endif /* JEMALLOC_INTERNAL_TYPES_H */
45727diff --git a/jemalloc/include/jemalloc/internal/jemalloc_preamble.h.in b/jemalloc/include/jemalloc/internal/jemalloc_preamble.h.in
45728deleted file mode 100644
45729index 5ce77d9..0000000
45730--- a/jemalloc/include/jemalloc/internal/jemalloc_preamble.h.in
45731+++ /dev/null
45732@@ -1,263 +0,0 @@
45733-#ifndef JEMALLOC_PREAMBLE_H
45734-#define JEMALLOC_PREAMBLE_H
45735-
45736-#include "jemalloc_internal_defs.h"
45737-#include "jemalloc/internal/jemalloc_internal_decls.h"
45738-
45739-#if defined(JEMALLOC_UTRACE) || defined(JEMALLOC_UTRACE_LABEL)
45740-#include <sys/ktrace.h>
45741-#  if defined(JEMALLOC_UTRACE)
45742-#    define UTRACE_CALL(p, l) utrace(p, l)
45743-#  else
45744-#    define UTRACE_CALL(p, l) utrace("jemalloc_process", p, l)
45745-#    define JEMALLOC_UTRACE
45746-#  endif
45747-#endif
45748-
45749-#define JEMALLOC_NO_DEMANGLE
45750-#ifdef JEMALLOC_JET
45751-#  undef JEMALLOC_IS_MALLOC
45752-#  define JEMALLOC_N(n) jet_##n
45753-#  include "jemalloc/internal/public_namespace.h"
45754-#  define JEMALLOC_NO_RENAME
45755-#  include "../jemalloc@[email protected]"
45756-#  undef JEMALLOC_NO_RENAME
45757-#else
45758-#  define JEMALLOC_N(n) @private_namespace@##n
45759-#  include "../jemalloc@[email protected]"
45760-#endif
45761-
45762-#if defined(JEMALLOC_OSATOMIC)
45763-#include <libkern/OSAtomic.h>
45764-#endif
45765-
45766-#ifdef JEMALLOC_ZONE
45767-#include <mach/mach_error.h>
45768-#include <mach/mach_init.h>
45769-#include <mach/vm_map.h>
45770-#endif
45771-
45772-#include "jemalloc/internal/jemalloc_internal_macros.h"
45773-
45774-/*
45775- * Note that the ordering matters here; the hook itself is name-mangled.  We
45776- * want the inclusion of hooks to happen early, so that we hook as much as
45777- * possible.
45778- */
45779-#ifndef JEMALLOC_NO_PRIVATE_NAMESPACE
45780-#  ifndef JEMALLOC_JET
45781-#    include "jemalloc/internal/private_namespace.h"
45782-#  else
45783-#    include "jemalloc/internal/private_namespace_jet.h"
45784-#  endif
45785-#endif
45786-#include "jemalloc/internal/test_hooks.h"
45787-
45788-#ifdef JEMALLOC_DEFINE_MADVISE_FREE
45789-#  define JEMALLOC_MADV_FREE 8
45790-#endif
45791-
45792-static const bool config_debug =
45793-#ifdef JEMALLOC_DEBUG
45794-    true
45795-#else
45796-    false
45797-#endif
45798-    ;
45799-static const bool have_dss =
45800-#ifdef JEMALLOC_DSS
45801-    true
45802-#else
45803-    false
45804-#endif
45805-    ;
45806-static const bool have_madvise_huge =
45807-#ifdef JEMALLOC_HAVE_MADVISE_HUGE
45808-    true
45809-#else
45810-    false
45811-#endif
45812-    ;
45813-static const bool config_fill =
45814-#ifdef JEMALLOC_FILL
45815-    true
45816-#else
45817-    false
45818-#endif
45819-    ;
45820-static const bool config_lazy_lock =
45821-#ifdef JEMALLOC_LAZY_LOCK
45822-    true
45823-#else
45824-    false
45825-#endif
45826-    ;
45827-static const char * const config_malloc_conf = JEMALLOC_CONFIG_MALLOC_CONF;
45828-static const bool config_prof =
45829-#ifdef JEMALLOC_PROF
45830-    true
45831-#else
45832-    false
45833-#endif
45834-    ;
45835-static const bool config_prof_libgcc =
45836-#ifdef JEMALLOC_PROF_LIBGCC
45837-    true
45838-#else
45839-    false
45840-#endif
45841-    ;
45842-static const bool config_prof_libunwind =
45843-#ifdef JEMALLOC_PROF_LIBUNWIND
45844-    true
45845-#else
45846-    false
45847-#endif
45848-    ;
45849-static const bool maps_coalesce =
45850-#ifdef JEMALLOC_MAPS_COALESCE
45851-    true
45852-#else
45853-    false
45854-#endif
45855-    ;
45856-static const bool config_stats =
45857-#ifdef JEMALLOC_STATS
45858-    true
45859-#else
45860-    false
45861-#endif
45862-    ;
45863-static const bool config_tls =
45864-#ifdef JEMALLOC_TLS
45865-    true
45866-#else
45867-    false
45868-#endif
45869-    ;
45870-static const bool config_utrace =
45871-#ifdef JEMALLOC_UTRACE
45872-    true
45873-#else
45874-    false
45875-#endif
45876-    ;
45877-static const bool config_xmalloc =
45878-#ifdef JEMALLOC_XMALLOC
45879-    true
45880-#else
45881-    false
45882-#endif
45883-    ;
45884-static const bool config_cache_oblivious =
45885-#ifdef JEMALLOC_CACHE_OBLIVIOUS
45886-    true
45887-#else
45888-    false
45889-#endif
45890-    ;
45891-/*
45892- * Undocumented, for jemalloc development use only at the moment.  See the note
45893- * in jemalloc/internal/log.h.
45894- */
45895-static const bool config_log =
45896-#ifdef JEMALLOC_LOG
45897-    true
45898-#else
45899-    false
45900-#endif
45901-    ;
45902-/*
45903- * Are extra safety checks enabled; things like checking the size of sized
45904- * deallocations, double-frees, etc.
45905- */
45906-static const bool config_opt_safety_checks =
45907-#ifdef JEMALLOC_OPT_SAFETY_CHECKS
45908-    true
45909-#elif defined(JEMALLOC_DEBUG)
45910-    /*
45911-     * This lets us only guard safety checks by one flag instead of two; fast
45912-     * checks can guard solely by config_opt_safety_checks and run in debug mode
45913-     * too.
45914-     */
45915-    true
45916-#else
45917-    false
45918-#endif
45919-    ;
45920-
45921-/*
45922- * Extra debugging of sized deallocations too onerous to be included in the
45923- * general safety checks.
45924- */
45925-static const bool config_opt_size_checks =
45926-#if defined(JEMALLOC_OPT_SIZE_CHECKS) || defined(JEMALLOC_DEBUG)
45927-    true
45928-#else
45929-    false
45930-#endif
45931-    ;
45932-
45933-static const bool config_uaf_detection =
45934-#if defined(JEMALLOC_UAF_DETECTION) || defined(JEMALLOC_DEBUG)
45935-    true
45936-#else
45937-    false
45938-#endif
45939-    ;
45940-
45941-/* Whether or not the C++ extensions are enabled. */
45942-static const bool config_enable_cxx =
45943-#ifdef JEMALLOC_ENABLE_CXX
45944-    true
45945-#else
45946-    false
45947-#endif
45948-;
45949-
45950-#if defined(_WIN32) || defined(JEMALLOC_HAVE_SCHED_GETCPU)
45951-/* Currently percpu_arena depends on sched_getcpu. */
45952-#define JEMALLOC_PERCPU_ARENA
45953-#endif
45954-static const bool have_percpu_arena =
45955-#ifdef JEMALLOC_PERCPU_ARENA
45956-    true
45957-#else
45958-    false
45959-#endif
45960-    ;
45961-/*
45962- * Undocumented, and not recommended; the application should take full
45963- * responsibility for tracking provenance.
45964- */
45965-static const bool force_ivsalloc =
45966-#ifdef JEMALLOC_FORCE_IVSALLOC
45967-    true
45968-#else
45969-    false
45970-#endif
45971-    ;
45972-static const bool have_background_thread =
45973-#ifdef JEMALLOC_BACKGROUND_THREAD
45974-    true
45975-#else
45976-    false
45977-#endif
45978-    ;
45979-static const bool config_high_res_timer =
45980-#ifdef JEMALLOC_HAVE_CLOCK_REALTIME
45981-    true
45982-#else
45983-    false
45984-#endif
45985-    ;
45986-
45987-static const bool have_memcntl =
45988-#ifdef JEMALLOC_HAVE_MEMCNTL
45989-    true
45990-#else
45991-    false
45992-#endif
45993-    ;
45994-
45995-#endif /* JEMALLOC_PREAMBLE_H */
45996diff --git a/jemalloc/include/jemalloc/internal/large_externs.h b/jemalloc/include/jemalloc/internal/large_externs.h
45997deleted file mode 100644
45998index 8e09122..0000000
45999--- a/jemalloc/include/jemalloc/internal/large_externs.h
46000+++ /dev/null
46001@@ -1,24 +0,0 @@
46002-#ifndef JEMALLOC_INTERNAL_LARGE_EXTERNS_H
46003-#define JEMALLOC_INTERNAL_LARGE_EXTERNS_H
46004-
46005-#include "jemalloc/internal/hook.h"
46006-
46007-void *large_malloc(tsdn_t *tsdn, arena_t *arena, size_t usize, bool zero);
46008-void *large_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
46009-    bool zero);
46010-bool large_ralloc_no_move(tsdn_t *tsdn, edata_t *edata, size_t usize_min,
46011-    size_t usize_max, bool zero);
46012-void *large_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t usize,
46013-    size_t alignment, bool zero, tcache_t *tcache,
46014-    hook_ralloc_args_t *hook_args);
46015-
46016-void large_dalloc_prep_locked(tsdn_t *tsdn, edata_t *edata);
46017-void large_dalloc_finish(tsdn_t *tsdn, edata_t *edata);
46018-void large_dalloc(tsdn_t *tsdn, edata_t *edata);
46019-size_t large_salloc(tsdn_t *tsdn, const edata_t *edata);
46020-void large_prof_info_get(tsd_t *tsd, edata_t *edata, prof_info_t *prof_info,
46021-    bool reset_recent);
46022-void large_prof_tctx_reset(edata_t *edata);
46023-void large_prof_info_set(edata_t *edata, prof_tctx_t *tctx, size_t size);
46024-
46025-#endif /* JEMALLOC_INTERNAL_LARGE_EXTERNS_H */
46026diff --git a/jemalloc/include/jemalloc/internal/lockedint.h b/jemalloc/include/jemalloc/internal/lockedint.h
46027deleted file mode 100644
46028index d020ebe..0000000
46029--- a/jemalloc/include/jemalloc/internal/lockedint.h
46030+++ /dev/null
46031@@ -1,204 +0,0 @@
46032-#ifndef JEMALLOC_INTERNAL_LOCKEDINT_H
46033-#define JEMALLOC_INTERNAL_LOCKEDINT_H
46034-
46035-/*
46036- * In those architectures that support 64-bit atomics, we use atomic updates for
46037- * our 64-bit values.  Otherwise, we use a plain uint64_t and synchronize
46038- * externally.
46039- */
46040-
46041-typedef struct locked_u64_s locked_u64_t;
46042-#ifdef JEMALLOC_ATOMIC_U64
46043-struct locked_u64_s {
46044-	atomic_u64_t val;
46045-};
46046-#else
46047-/* Must hold the associated mutex. */
46048-struct locked_u64_s {
46049-	uint64_t val;
46050-};
46051-#endif
46052-
46053-typedef struct locked_zu_s locked_zu_t;
46054-struct locked_zu_s {
46055-	atomic_zu_t val;
46056-};
46057-
46058-#ifndef JEMALLOC_ATOMIC_U64
46059-#  define LOCKEDINT_MTX_DECLARE(name) malloc_mutex_t name;
46060-#  define LOCKEDINT_MTX_INIT(mu, name, rank, rank_mode)			\
46061-    malloc_mutex_init(&(mu), name, rank, rank_mode)
46062-#  define LOCKEDINT_MTX(mtx) (&(mtx))
46063-#  define LOCKEDINT_MTX_LOCK(tsdn, mu) malloc_mutex_lock(tsdn, &(mu))
46064-#  define LOCKEDINT_MTX_UNLOCK(tsdn, mu) malloc_mutex_unlock(tsdn, &(mu))
46065-#  define LOCKEDINT_MTX_PREFORK(tsdn, mu) malloc_mutex_prefork(tsdn, &(mu))
46066-#  define LOCKEDINT_MTX_POSTFORK_PARENT(tsdn, mu)			\
46067-    malloc_mutex_postfork_parent(tsdn, &(mu))
46068-#  define LOCKEDINT_MTX_POSTFORK_CHILD(tsdn, mu)			\
46069-    malloc_mutex_postfork_child(tsdn, &(mu))
46070-#else
46071-#  define LOCKEDINT_MTX_DECLARE(name)
46072-#  define LOCKEDINT_MTX(mtx) NULL
46073-#  define LOCKEDINT_MTX_INIT(mu, name, rank, rank_mode) false
46074-#  define LOCKEDINT_MTX_LOCK(tsdn, mu)
46075-#  define LOCKEDINT_MTX_UNLOCK(tsdn, mu)
46076-#  define LOCKEDINT_MTX_PREFORK(tsdn, mu)
46077-#  define LOCKEDINT_MTX_POSTFORK_PARENT(tsdn, mu)
46078-#  define LOCKEDINT_MTX_POSTFORK_CHILD(tsdn, mu)
46079-#endif
46080-
46081-#ifdef JEMALLOC_ATOMIC_U64
46082-#  define LOCKEDINT_MTX_ASSERT_INTERNAL(tsdn, mtx) assert((mtx) == NULL)
46083-#else
46084-#  define LOCKEDINT_MTX_ASSERT_INTERNAL(tsdn, mtx)			\
46085-    malloc_mutex_assert_owner(tsdn, (mtx))
46086-#endif
46087-
46088-static inline uint64_t
46089-locked_read_u64(tsdn_t *tsdn, malloc_mutex_t *mtx, locked_u64_t *p) {
46090-	LOCKEDINT_MTX_ASSERT_INTERNAL(tsdn, mtx);
46091-#ifdef JEMALLOC_ATOMIC_U64
46092-	return atomic_load_u64(&p->val, ATOMIC_RELAXED);
46093-#else
46094-	return p->val;
46095-#endif
46096-}
46097-
46098-static inline void
46099-locked_inc_u64(tsdn_t *tsdn, malloc_mutex_t *mtx, locked_u64_t *p,
46100-    uint64_t x) {
46101-	LOCKEDINT_MTX_ASSERT_INTERNAL(tsdn, mtx);
46102-#ifdef JEMALLOC_ATOMIC_U64
46103-	atomic_fetch_add_u64(&p->val, x, ATOMIC_RELAXED);
46104-#else
46105-	p->val += x;
46106-#endif
46107-}
46108-
46109-static inline void
46110-locked_dec_u64(tsdn_t *tsdn, malloc_mutex_t *mtx, locked_u64_t *p,
46111-    uint64_t x) {
46112-	LOCKEDINT_MTX_ASSERT_INTERNAL(tsdn, mtx);
46113-#ifdef JEMALLOC_ATOMIC_U64
46114-	uint64_t r = atomic_fetch_sub_u64(&p->val, x, ATOMIC_RELAXED);
46115-	assert(r - x <= r);
46116-#else
46117-	p->val -= x;
46118-	assert(p->val + x >= p->val);
46119-#endif
46120-}
46121-
46122-/* Increment and take modulus.  Returns whether the modulo made any change.  */
46123-static inline bool
46124-locked_inc_mod_u64(tsdn_t *tsdn, malloc_mutex_t *mtx, locked_u64_t *p,
46125-    const uint64_t x, const uint64_t modulus) {
46126-	LOCKEDINT_MTX_ASSERT_INTERNAL(tsdn, mtx);
46127-	uint64_t before, after;
46128-	bool overflow;
46129-#ifdef JEMALLOC_ATOMIC_U64
46130-	before = atomic_load_u64(&p->val, ATOMIC_RELAXED);
46131-	do {
46132-		after = before + x;
46133-		assert(after >= before);
46134-		overflow = (after >= modulus);
46135-		if (overflow) {
46136-			after %= modulus;
46137-		}
46138-	} while (!atomic_compare_exchange_weak_u64(&p->val, &before, after,
46139-	    ATOMIC_RELAXED, ATOMIC_RELAXED));
46140-#else
46141-	before = p->val;
46142-	after = before + x;
46143-	overflow = (after >= modulus);
46144-	if (overflow) {
46145-		after %= modulus;
46146-	}
46147-	p->val = after;
46148-#endif
46149-	return overflow;
46150-}
46151-
46152-/*
46153- * Non-atomically sets *dst += src.  *dst needs external synchronization.
46154- * This lets us avoid the cost of a fetch_add when its unnecessary (note that
46155- * the types here are atomic).
46156- */
46157-static inline void
46158-locked_inc_u64_unsynchronized(locked_u64_t *dst, uint64_t src) {
46159-#ifdef JEMALLOC_ATOMIC_U64
46160-	uint64_t cur_dst = atomic_load_u64(&dst->val, ATOMIC_RELAXED);
46161-	atomic_store_u64(&dst->val, src + cur_dst, ATOMIC_RELAXED);
46162-#else
46163-	dst->val += src;
46164-#endif
46165-}
46166-
46167-static inline uint64_t
46168-locked_read_u64_unsynchronized(locked_u64_t *p) {
46169-#ifdef JEMALLOC_ATOMIC_U64
46170-	return atomic_load_u64(&p->val, ATOMIC_RELAXED);
46171-#else
46172-	return p->val;
46173-#endif
46174-}
46175-
46176-static inline void
46177-locked_init_u64_unsynchronized(locked_u64_t *p, uint64_t x) {
46178-#ifdef JEMALLOC_ATOMIC_U64
46179-	atomic_store_u64(&p->val, x, ATOMIC_RELAXED);
46180-#else
46181-	p->val = x;
46182-#endif
46183-}
46184-
46185-static inline size_t
46186-locked_read_zu(tsdn_t *tsdn, malloc_mutex_t *mtx, locked_zu_t *p) {
46187-	LOCKEDINT_MTX_ASSERT_INTERNAL(tsdn, mtx);
46188-#ifdef JEMALLOC_ATOMIC_U64
46189-	return atomic_load_zu(&p->val, ATOMIC_RELAXED);
46190-#else
46191-	return atomic_load_zu(&p->val, ATOMIC_RELAXED);
46192-#endif
46193-}
46194-
46195-static inline void
46196-locked_inc_zu(tsdn_t *tsdn, malloc_mutex_t *mtx, locked_zu_t *p,
46197-    size_t x) {
46198-	LOCKEDINT_MTX_ASSERT_INTERNAL(tsdn, mtx);
46199-#ifdef JEMALLOC_ATOMIC_U64
46200-	atomic_fetch_add_zu(&p->val, x, ATOMIC_RELAXED);
46201-#else
46202-	size_t cur = atomic_load_zu(&p->val, ATOMIC_RELAXED);
46203-	atomic_store_zu(&p->val, cur + x, ATOMIC_RELAXED);
46204-#endif
46205-}
46206-
46207-static inline void
46208-locked_dec_zu(tsdn_t *tsdn, malloc_mutex_t *mtx, locked_zu_t *p,
46209-    size_t x) {
46210-	LOCKEDINT_MTX_ASSERT_INTERNAL(tsdn, mtx);
46211-#ifdef JEMALLOC_ATOMIC_U64
46212-	size_t r = atomic_fetch_sub_zu(&p->val, x, ATOMIC_RELAXED);
46213-	assert(r - x <= r);
46214-#else
46215-	size_t cur = atomic_load_zu(&p->val, ATOMIC_RELAXED);
46216-	atomic_store_zu(&p->val, cur - x, ATOMIC_RELAXED);
46217-#endif
46218-}
46219-
46220-/* Like the _u64 variant, needs an externally synchronized *dst. */
46221-static inline void
46222-locked_inc_zu_unsynchronized(locked_zu_t *dst, size_t src) {
46223-	size_t cur_dst = atomic_load_zu(&dst->val, ATOMIC_RELAXED);
46224-	atomic_store_zu(&dst->val, src + cur_dst, ATOMIC_RELAXED);
46225-}
46226-
46227-/*
46228- * Unlike the _u64 variant, this is safe to call unconditionally.
46229- */
46230-static inline size_t
46231-locked_read_atomic_zu(locked_zu_t *p) {
46232-	return atomic_load_zu(&p->val, ATOMIC_RELAXED);
46233-}
46234-
46235-#endif /* JEMALLOC_INTERNAL_LOCKEDINT_H */
46236diff --git a/jemalloc/include/jemalloc/internal/log.h b/jemalloc/include/jemalloc/internal/log.h
46237deleted file mode 100644
46238index 6420858..0000000
46239--- a/jemalloc/include/jemalloc/internal/log.h
46240+++ /dev/null
46241@@ -1,115 +0,0 @@
46242-#ifndef JEMALLOC_INTERNAL_LOG_H
46243-#define JEMALLOC_INTERNAL_LOG_H
46244-
46245-#include "jemalloc/internal/atomic.h"
46246-#include "jemalloc/internal/malloc_io.h"
46247-#include "jemalloc/internal/mutex.h"
46248-
46249-#ifdef JEMALLOC_LOG
46250-#  define JEMALLOC_LOG_VAR_BUFSIZE 1000
46251-#else
46252-#  define JEMALLOC_LOG_VAR_BUFSIZE 1
46253-#endif
46254-
46255-#define JEMALLOC_LOG_BUFSIZE 4096
46256-
46257-/*
46258- * The log malloc_conf option is a '|'-delimited list of log_var name segments
46259- * which should be logged.  The names are themselves hierarchical, with '.' as
46260- * the delimiter (a "segment" is just a prefix in the log namespace).  So, if
46261- * you have:
46262- *
46263- * log("arena", "log msg for arena"); // 1
46264- * log("arena.a", "log msg for arena.a"); // 2
46265- * log("arena.b", "log msg for arena.b"); // 3
46266- * log("arena.a.a", "log msg for arena.a.a"); // 4
46267- * log("extent.a", "log msg for extent.a"); // 5
46268- * log("extent.b", "log msg for extent.b"); // 6
46269- *
46270- * And your malloc_conf option is "log=arena.a|extent", then lines 2, 4, 5, and
46271- * 6 will print at runtime.  You can enable logging from all log vars by
46272- * writing "log=.".
46273- *
46274- * None of this should be regarded as a stable API for right now.  It's intended
46275- * as a debugging interface, to let us keep around some of our printf-debugging
46276- * statements.
46277- */
46278-
46279-extern char log_var_names[JEMALLOC_LOG_VAR_BUFSIZE];
46280-extern atomic_b_t log_init_done;
46281-
46282-typedef struct log_var_s log_var_t;
46283-struct log_var_s {
46284-	/*
46285-	 * Lowest bit is "inited", second lowest is "enabled".  Putting them in
46286-	 * a single word lets us avoid any fences on weak architectures.
46287-	 */
46288-	atomic_u_t state;
46289-	const char *name;
46290-};
46291-
46292-#define LOG_NOT_INITIALIZED 0U
46293-#define LOG_INITIALIZED_NOT_ENABLED 1U
46294-#define LOG_ENABLED 2U
46295-
46296-#define LOG_VAR_INIT(name_str) {ATOMIC_INIT(LOG_NOT_INITIALIZED), name_str}
46297-
46298-/*
46299- * Returns the value we should assume for state (which is not necessarily
46300- * accurate; if logging is done before logging has finished initializing, then
46301- * we default to doing the safe thing by logging everything).
46302- */
46303-unsigned log_var_update_state(log_var_t *log_var);
46304-
46305-/* We factor out the metadata management to allow us to test more easily. */
46306-#define log_do_begin(log_var)						\
46307-if (config_log) {							\
46308-	unsigned log_state = atomic_load_u(&(log_var).state,		\
46309-	    ATOMIC_RELAXED);						\
46310-	if (unlikely(log_state == LOG_NOT_INITIALIZED)) {		\
46311-		log_state = log_var_update_state(&(log_var));		\
46312-		assert(log_state != LOG_NOT_INITIALIZED);		\
46313-	}								\
46314-	if (log_state == LOG_ENABLED) {					\
46315-		{
46316-			/* User code executes here. */
46317-#define log_do_end(log_var)						\
46318-		}							\
46319-	}								\
46320-}
46321-
46322-/*
46323- * MSVC has some preprocessor bugs in its expansion of __VA_ARGS__ during
46324- * preprocessing.  To work around this, we take all potential extra arguments in
46325- * a var-args functions.  Since a varargs macro needs at least one argument in
46326- * the "...", we accept the format string there, and require that the first
46327- * argument in this "..." is a const char *.
46328- */
46329-static inline void
46330-log_impl_varargs(const char *name, ...) {
46331-	char buf[JEMALLOC_LOG_BUFSIZE];
46332-	va_list ap;
46333-
46334-	va_start(ap, name);
46335-	const char *format = va_arg(ap, const char *);
46336-	size_t dst_offset = 0;
46337-	dst_offset += malloc_snprintf(buf, JEMALLOC_LOG_BUFSIZE, "%s: ", name);
46338-	dst_offset += malloc_vsnprintf(buf + dst_offset,
46339-	    JEMALLOC_LOG_BUFSIZE - dst_offset, format, ap);
46340-	dst_offset += malloc_snprintf(buf + dst_offset,
46341-	    JEMALLOC_LOG_BUFSIZE - dst_offset, "\n");
46342-	va_end(ap);
46343-
46344-	malloc_write(buf);
46345-}
46346-
46347-/* Call as log("log.var.str", "format_string %d", arg_for_format_string); */
46348-#define LOG(log_var_str, ...)						\
46349-do {									\
46350-	static log_var_t log_var = LOG_VAR_INIT(log_var_str);		\
46351-	log_do_begin(log_var)						\
46352-		log_impl_varargs((log_var).name, __VA_ARGS__);		\
46353-	log_do_end(log_var)						\
46354-} while (0)
46355-
46356-#endif /* JEMALLOC_INTERNAL_LOG_H */
46357diff --git a/jemalloc/include/jemalloc/internal/malloc_io.h b/jemalloc/include/jemalloc/internal/malloc_io.h
46358deleted file mode 100644
46359index a375bda..0000000
46360--- a/jemalloc/include/jemalloc/internal/malloc_io.h
46361+++ /dev/null
46362@@ -1,105 +0,0 @@
46363-#ifndef JEMALLOC_INTERNAL_MALLOC_IO_H
46364-#define JEMALLOC_INTERNAL_MALLOC_IO_H
46365-
46366-#include "jemalloc/internal/jemalloc_internal_types.h"
46367-
46368-#ifdef _WIN32
46369-#  ifdef _WIN64
46370-#    define FMT64_PREFIX "ll"
46371-#    define FMTPTR_PREFIX "ll"
46372-#  else
46373-#    define FMT64_PREFIX "ll"
46374-#    define FMTPTR_PREFIX ""
46375-#  endif
46376-#  define FMTd32 "d"
46377-#  define FMTu32 "u"
46378-#  define FMTx32 "x"
46379-#  define FMTd64 FMT64_PREFIX "d"
46380-#  define FMTu64 FMT64_PREFIX "u"
46381-#  define FMTx64 FMT64_PREFIX "x"
46382-#  define FMTdPTR FMTPTR_PREFIX "d"
46383-#  define FMTuPTR FMTPTR_PREFIX "u"
46384-#  define FMTxPTR FMTPTR_PREFIX "x"
46385-#else
46386-#  include <inttypes.h>
46387-#  define FMTd32 PRId32
46388-#  define FMTu32 PRIu32
46389-#  define FMTx32 PRIx32
46390-#  define FMTd64 PRId64
46391-#  define FMTu64 PRIu64
46392-#  define FMTx64 PRIx64
46393-#  define FMTdPTR PRIdPTR
46394-#  define FMTuPTR PRIuPTR
46395-#  define FMTxPTR PRIxPTR
46396-#endif
46397-
46398-/* Size of stack-allocated buffer passed to buferror(). */
46399-#define BUFERROR_BUF		64
46400-
46401-/*
46402- * Size of stack-allocated buffer used by malloc_{,v,vc}printf().  This must be
46403- * large enough for all possible uses within jemalloc.
46404- */
46405-#define MALLOC_PRINTF_BUFSIZE	4096
46406-
46407-write_cb_t wrtmessage;
46408-int buferror(int err, char *buf, size_t buflen);
46409-uintmax_t malloc_strtoumax(const char *restrict nptr, char **restrict endptr,
46410-    int base);
46411-void malloc_write(const char *s);
46412-
46413-/*
46414- * malloc_vsnprintf() supports a subset of snprintf(3) that avoids floating
46415- * point math.
46416- */
46417-size_t malloc_vsnprintf(char *str, size_t size, const char *format,
46418-    va_list ap);
46419-size_t malloc_snprintf(char *str, size_t size, const char *format, ...)
46420-    JEMALLOC_FORMAT_PRINTF(3, 4);
46421-/*
46422- * The caller can set write_cb to null to choose to print with the
46423- * je_malloc_message hook.
46424- */
46425-void malloc_vcprintf(write_cb_t *write_cb, void *cbopaque, const char *format,
46426-    va_list ap);
46427-void malloc_cprintf(write_cb_t *write_cb, void *cbopaque, const char *format,
46428-    ...) JEMALLOC_FORMAT_PRINTF(3, 4);
46429-void malloc_printf(const char *format, ...) JEMALLOC_FORMAT_PRINTF(1, 2);
46430-
46431-static inline ssize_t
46432-malloc_write_fd(int fd, const void *buf, size_t count) {
46433-#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_write)
46434-	/*
46435-	 * Use syscall(2) rather than write(2) when possible in order to avoid
46436-	 * the possibility of memory allocation within libc.  This is necessary
46437-	 * on FreeBSD; most operating systems do not have this problem though.
46438-	 *
46439-	 * syscall() returns long or int, depending on platform, so capture the
46440-	 * result in the widest plausible type to avoid compiler warnings.
46441-	 */
46442-	long result = syscall(SYS_write, fd, buf, count);
46443-#else
46444-	ssize_t result = (ssize_t)write(fd, buf,
46445-#ifdef _WIN32
46446-	    (unsigned int)
46447-#endif
46448-	    count);
46449-#endif
46450-	return (ssize_t)result;
46451-}
46452-
46453-static inline ssize_t
46454-malloc_read_fd(int fd, void *buf, size_t count) {
46455-#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_read)
46456-	long result = syscall(SYS_read, fd, buf, count);
46457-#else
46458-	ssize_t result = read(fd, buf,
46459-#ifdef _WIN32
46460-	    (unsigned int)
46461-#endif
46462-	    count);
46463-#endif
46464-	return (ssize_t)result;
46465-}
46466-
46467-#endif /* JEMALLOC_INTERNAL_MALLOC_IO_H */
46468diff --git a/jemalloc/include/jemalloc/internal/mpsc_queue.h b/jemalloc/include/jemalloc/internal/mpsc_queue.h
46469deleted file mode 100644
46470index 316ea9b..0000000
46471--- a/jemalloc/include/jemalloc/internal/mpsc_queue.h
46472+++ /dev/null
46473@@ -1,134 +0,0 @@
46474-#ifndef JEMALLOC_INTERNAL_MPSC_QUEUE_H
46475-#define JEMALLOC_INTERNAL_MPSC_QUEUE_H
46476-
46477-#include "jemalloc/internal/atomic.h"
46478-
46479-/*
46480- * A concurrent implementation of a multi-producer, single-consumer queue.  It
46481- * supports three concurrent operations:
46482- * - Push
46483- * - Push batch
46484- * - Pop batch
46485- *
46486- * These operations are all lock-free.
46487- *
46488- * The implementation is the simple two-stack queue built on a Treiber stack.
46489- * It's not terribly efficient, but this isn't expected to go into anywhere with
46490- * hot code.  In fact, we don't really even need queue semantics in any
46491- * anticipated use cases; we could get away with just the stack.  But this way
46492- * lets us frame the API in terms of the existing list types, which is a nice
46493- * convenience.  We can save on cache misses by introducing our own (parallel)
46494- * single-linked list type here, and dropping FIFO semantics, if we need this to
46495- * get faster.  Since we're currently providing queue semantics though, we use
46496- * the prev field in the link rather than the next field for Treiber-stack
46497- * linkage, so that we can preserve order for bash-pushed lists (recall that the
46498- * two-stack tricks reverses orders in the lock-free first stack).
46499- */
46500-
46501-#define mpsc_queue(a_type)						\
46502-struct {								\
46503-	atomic_p_t tail;						\
46504-}
46505-
46506-#define mpsc_queue_proto(a_attr, a_prefix, a_queue_type, a_type,	\
46507-    a_list_type)							\
46508-/* Initialize a queue. */						\
46509-a_attr void								\
46510-a_prefix##new(a_queue_type *queue);					\
46511-/* Insert all items in src into the queue, clearing src. */		\
46512-a_attr void								\
46513-a_prefix##push_batch(a_queue_type *queue, a_list_type *src);		\
46514-/* Insert node into the queue. */					\
46515-a_attr void								\
46516-a_prefix##push(a_queue_type *queue, a_type *node);			\
46517-/*									\
46518- * Pop all items in the queue into the list at dst.  dst should already	\
46519- * be initialized (and may contain existing items, which then remain	\
46520- * in dst).								\
46521- */									\
46522-a_attr void								\
46523-a_prefix##pop_batch(a_queue_type *queue, a_list_type *dst);
46524-
46525-#define mpsc_queue_gen(a_attr, a_prefix, a_queue_type, a_type,		\
46526-    a_list_type, a_link)						\
46527-a_attr void								\
46528-a_prefix##new(a_queue_type *queue) {					\
46529-	atomic_store_p(&queue->tail, NULL, ATOMIC_RELAXED);		\
46530-}									\
46531-a_attr void								\
46532-a_prefix##push_batch(a_queue_type *queue, a_list_type *src) {		\
46533-	/*								\
46534-	 * Reuse the ql list next field as the Treiber stack next	\
46535-	 * field.							\
46536-	 */								\
46537-	a_type *first = ql_first(src);					\
46538-	a_type *last = ql_last(src, a_link);				\
46539-	void* cur_tail = atomic_load_p(&queue->tail, ATOMIC_RELAXED);	\
46540-	do {								\
46541-		/*							\
46542-		 * Note that this breaks the queue ring structure;	\
46543-		 * it's not a ring any more!				\
46544-		 */							\
46545-		first->a_link.qre_prev = cur_tail;			\
46546-		/*							\
46547-		 * Note: the upcoming CAS doesn't need an atomic; every	\
46548-		 * push only needs to synchronize with the next pop,	\
46549-		 * which we get from the release sequence rules.	\
46550-		 */							\
46551-	} while (!atomic_compare_exchange_weak_p(&queue->tail,		\
46552-	    &cur_tail, last, ATOMIC_RELEASE, ATOMIC_RELAXED));		\
46553-	ql_new(src);							\
46554-}									\
46555-a_attr void								\
46556-a_prefix##push(a_queue_type *queue, a_type *node) {			\
46557-	ql_elm_new(node, a_link);					\
46558-	a_list_type list;						\
46559-	ql_new(&list);							\
46560-	ql_head_insert(&list, node, a_link);				\
46561-	a_prefix##push_batch(queue, &list);				\
46562-}									\
46563-a_attr void								\
46564-a_prefix##pop_batch(a_queue_type *queue, a_list_type *dst) {		\
46565-	a_type *tail = atomic_load_p(&queue->tail, ATOMIC_RELAXED);	\
46566-	if (tail == NULL) {						\
46567-		/*							\
46568-		 * In the common special case where there are no	\
46569-		 * pending elements, bail early without a costly RMW.	\
46570-		 */							\
46571-		return;							\
46572-	}								\
46573-	tail = atomic_exchange_p(&queue->tail, NULL, ATOMIC_ACQUIRE);	\
46574-	/*								\
46575-	 * It's a single-consumer queue, so if cur started non-NULL,	\
46576-	 * it'd better stay non-NULL.					\
46577-	 */								\
46578-	assert(tail != NULL);						\
46579-	/*								\
46580-	 * We iterate through the stack and both fix up the link	\
46581-	 * structure (stack insertion broke the list requirement that	\
46582-	 * the list be circularly linked).  It's just as efficient at	\
46583-	 * this point to make the queue a "real" queue, so do that as	\
46584-	 * well.							\
46585-	 * If this ever gets to be a hot spot, we can omit this fixup	\
46586-	 * and make the queue a bag (i.e. not necessarily ordered), but	\
46587-	 * that would mean jettisoning the existing list API as the 	\
46588-	 * batch pushing/popping interface.				\
46589-	 */								\
46590-	a_list_type reversed;						\
46591-	ql_new(&reversed);						\
46592-	while (tail != NULL) {						\
46593-		/*							\
46594-		 * Pop an item off the stack, prepend it onto the list	\
46595-		 * (reversing the order).  Recall that we use the	\
46596-		 * list prev field as the Treiber stack next field to	\
46597-		 * preserve order of batch-pushed items when reversed.	\
46598-		 */							\
46599-		a_type *next = tail->a_link.qre_prev;			\
46600-		ql_elm_new(tail, a_link);				\
46601-		ql_head_insert(&reversed, tail, a_link);		\
46602-		tail = next;						\
46603-	}								\
46604-	ql_concat(dst, &reversed, a_link);				\
46605-}
46606-
46607-#endif /* JEMALLOC_INTERNAL_MPSC_QUEUE_H */
46608diff --git a/jemalloc/include/jemalloc/internal/mutex.h b/jemalloc/include/jemalloc/internal/mutex.h
46609deleted file mode 100644
46610index 63a0b1b..0000000
46611--- a/jemalloc/include/jemalloc/internal/mutex.h
46612+++ /dev/null
46613@@ -1,319 +0,0 @@
46614-#ifndef JEMALLOC_INTERNAL_MUTEX_H
46615-#define JEMALLOC_INTERNAL_MUTEX_H
46616-
46617-#include "jemalloc/internal/atomic.h"
46618-#include "jemalloc/internal/mutex_prof.h"
46619-#include "jemalloc/internal/tsd.h"
46620-#include "jemalloc/internal/witness.h"
46621-
46622-extern int64_t opt_mutex_max_spin;
46623-
46624-typedef enum {
46625-	/* Can only acquire one mutex of a given witness rank at a time. */
46626-	malloc_mutex_rank_exclusive,
46627-	/*
46628-	 * Can acquire multiple mutexes of the same witness rank, but in
46629-	 * address-ascending order only.
46630-	 */
46631-	malloc_mutex_address_ordered
46632-} malloc_mutex_lock_order_t;
46633-
46634-typedef struct malloc_mutex_s malloc_mutex_t;
46635-struct malloc_mutex_s {
46636-	union {
46637-		struct {
46638-			/*
46639-			 * prof_data is defined first to reduce cacheline
46640-			 * bouncing: the data is not touched by the mutex holder
46641-			 * during unlocking, while might be modified by
46642-			 * contenders.  Having it before the mutex itself could
46643-			 * avoid prefetching a modified cacheline (for the
46644-			 * unlocking thread).
46645-			 */
46646-			mutex_prof_data_t	prof_data;
46647-#ifdef _WIN32
46648-#  if _WIN32_WINNT >= 0x0600
46649-			SRWLOCK         	lock;
46650-#  else
46651-			CRITICAL_SECTION	lock;
46652-#  endif
46653-#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
46654-			os_unfair_lock		lock;
46655-#elif (defined(JEMALLOC_MUTEX_INIT_CB))
46656-			pthread_mutex_t		lock;
46657-			malloc_mutex_t		*postponed_next;
46658-#else
46659-			pthread_mutex_t		lock;
46660-#endif
46661-			/*
46662-			 * Hint flag to avoid exclusive cache line contention
46663-			 * during spin waiting
46664-			 */
46665-			atomic_b_t		locked;
46666-		};
46667-		/*
46668-		 * We only touch witness when configured w/ debug.  However we
46669-		 * keep the field in a union when !debug so that we don't have
46670-		 * to pollute the code base with #ifdefs, while avoid paying the
46671-		 * memory cost.
46672-		 */
46673-#if !defined(JEMALLOC_DEBUG)
46674-		witness_t			witness;
46675-		malloc_mutex_lock_order_t	lock_order;
46676-#endif
46677-	};
46678-
46679-#if defined(JEMALLOC_DEBUG)
46680-	witness_t			witness;
46681-	malloc_mutex_lock_order_t	lock_order;
46682-#endif
46683-};
46684-
46685-#ifdef _WIN32
46686-#  if _WIN32_WINNT >= 0x0600
46687-#    define MALLOC_MUTEX_LOCK(m)    AcquireSRWLockExclusive(&(m)->lock)
46688-#    define MALLOC_MUTEX_UNLOCK(m)  ReleaseSRWLockExclusive(&(m)->lock)
46689-#    define MALLOC_MUTEX_TRYLOCK(m) (!TryAcquireSRWLockExclusive(&(m)->lock))
46690-#  else
46691-#    define MALLOC_MUTEX_LOCK(m)    EnterCriticalSection(&(m)->lock)
46692-#    define MALLOC_MUTEX_UNLOCK(m)  LeaveCriticalSection(&(m)->lock)
46693-#    define MALLOC_MUTEX_TRYLOCK(m) (!TryEnterCriticalSection(&(m)->lock))
46694-#  endif
46695-#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
46696-#    define MALLOC_MUTEX_LOCK(m)    os_unfair_lock_lock(&(m)->lock)
46697-#    define MALLOC_MUTEX_UNLOCK(m)  os_unfair_lock_unlock(&(m)->lock)
46698-#    define MALLOC_MUTEX_TRYLOCK(m) (!os_unfair_lock_trylock(&(m)->lock))
46699-#else
46700-#    define MALLOC_MUTEX_LOCK(m)    pthread_mutex_lock(&(m)->lock)
46701-#    define MALLOC_MUTEX_UNLOCK(m)  pthread_mutex_unlock(&(m)->lock)
46702-#    define MALLOC_MUTEX_TRYLOCK(m) (pthread_mutex_trylock(&(m)->lock) != 0)
46703-#endif
46704-
46705-#define LOCK_PROF_DATA_INITIALIZER					\
46706-    {NSTIME_ZERO_INITIALIZER, NSTIME_ZERO_INITIALIZER, 0, 0, 0,		\
46707-	    ATOMIC_INIT(0), 0, NULL, 0}
46708-
46709-#ifdef _WIN32
46710-#  define MALLOC_MUTEX_INITIALIZER
46711-#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
46712-#  if defined(JEMALLOC_DEBUG)
46713-#    define MALLOC_MUTEX_INITIALIZER					\
46714-  {{{LOCK_PROF_DATA_INITIALIZER, OS_UNFAIR_LOCK_INIT, ATOMIC_INIT(false)}}, \
46715-         WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT), 0}
46716-#  else
46717-#    define MALLOC_MUTEX_INITIALIZER                      \
46718-  {{{LOCK_PROF_DATA_INITIALIZER, OS_UNFAIR_LOCK_INIT, ATOMIC_INIT(false)}},  \
46719-      WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT)}
46720-#  endif
46721-#elif (defined(JEMALLOC_MUTEX_INIT_CB))
46722-#  if (defined(JEMALLOC_DEBUG))
46723-#     define MALLOC_MUTEX_INITIALIZER					\
46724-      {{{LOCK_PROF_DATA_INITIALIZER, PTHREAD_MUTEX_INITIALIZER, NULL, ATOMIC_INIT(false)}},	\
46725-           WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT), 0}
46726-#  else
46727-#     define MALLOC_MUTEX_INITIALIZER					\
46728-      {{{LOCK_PROF_DATA_INITIALIZER, PTHREAD_MUTEX_INITIALIZER, NULL, ATOMIC_INIT(false)}},	\
46729-           WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT)}
46730-#  endif
46731-
46732-#else
46733-#    define MALLOC_MUTEX_TYPE PTHREAD_MUTEX_DEFAULT
46734-#  if defined(JEMALLOC_DEBUG)
46735-#    define MALLOC_MUTEX_INITIALIZER					\
46736-     {{{LOCK_PROF_DATA_INITIALIZER, PTHREAD_MUTEX_INITIALIZER, ATOMIC_INIT(false)}}, \
46737-           WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT), 0}
46738-#  else
46739-#    define MALLOC_MUTEX_INITIALIZER                          \
46740-     {{{LOCK_PROF_DATA_INITIALIZER, PTHREAD_MUTEX_INITIALIZER, ATOMIC_INIT(false)}},	\
46741-      WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT)}
46742-#  endif
46743-#endif
46744-
46745-#ifdef JEMALLOC_LAZY_LOCK
46746-extern bool isthreaded;
46747-#else
46748-#  undef isthreaded /* Undo private_namespace.h definition. */
46749-#  define isthreaded true
46750-#endif
46751-
46752-bool malloc_mutex_init(malloc_mutex_t *mutex, const char *name,
46753-    witness_rank_t rank, malloc_mutex_lock_order_t lock_order);
46754-void malloc_mutex_prefork(tsdn_t *tsdn, malloc_mutex_t *mutex);
46755-void malloc_mutex_postfork_parent(tsdn_t *tsdn, malloc_mutex_t *mutex);
46756-void malloc_mutex_postfork_child(tsdn_t *tsdn, malloc_mutex_t *mutex);
46757-bool malloc_mutex_boot(void);
46758-void malloc_mutex_prof_data_reset(tsdn_t *tsdn, malloc_mutex_t *mutex);
46759-
46760-void malloc_mutex_lock_slow(malloc_mutex_t *mutex);
46761-
46762-static inline void
46763-malloc_mutex_lock_final(malloc_mutex_t *mutex) {
46764-	MALLOC_MUTEX_LOCK(mutex);
46765-	atomic_store_b(&mutex->locked, true, ATOMIC_RELAXED);
46766-}
46767-
46768-static inline bool
46769-malloc_mutex_trylock_final(malloc_mutex_t *mutex) {
46770-	return MALLOC_MUTEX_TRYLOCK(mutex);
46771-}
46772-
46773-static inline void
46774-mutex_owner_stats_update(tsdn_t *tsdn, malloc_mutex_t *mutex) {
46775-	if (config_stats) {
46776-		mutex_prof_data_t *data = &mutex->prof_data;
46777-		data->n_lock_ops++;
46778-		if (data->prev_owner != tsdn) {
46779-			data->prev_owner = tsdn;
46780-			data->n_owner_switches++;
46781-		}
46782-	}
46783-}
46784-
46785-/* Trylock: return false if the lock is successfully acquired. */
46786-static inline bool
46787-malloc_mutex_trylock(tsdn_t *tsdn, malloc_mutex_t *mutex) {
46788-	witness_assert_not_owner(tsdn_witness_tsdp_get(tsdn), &mutex->witness);
46789-	if (isthreaded) {
46790-		if (malloc_mutex_trylock_final(mutex)) {
46791-			atomic_store_b(&mutex->locked, true, ATOMIC_RELAXED);
46792-			return true;
46793-		}
46794-		mutex_owner_stats_update(tsdn, mutex);
46795-	}
46796-	witness_lock(tsdn_witness_tsdp_get(tsdn), &mutex->witness);
46797-
46798-	return false;
46799-}
46800-
46801-/* Aggregate lock prof data. */
46802-static inline void
46803-malloc_mutex_prof_merge(mutex_prof_data_t *sum, mutex_prof_data_t *data) {
46804-	nstime_add(&sum->tot_wait_time, &data->tot_wait_time);
46805-	if (nstime_compare(&sum->max_wait_time, &data->max_wait_time) < 0) {
46806-		nstime_copy(&sum->max_wait_time, &data->max_wait_time);
46807-	}
46808-
46809-	sum->n_wait_times += data->n_wait_times;
46810-	sum->n_spin_acquired += data->n_spin_acquired;
46811-
46812-	if (sum->max_n_thds < data->max_n_thds) {
46813-		sum->max_n_thds = data->max_n_thds;
46814-	}
46815-	uint32_t cur_n_waiting_thds = atomic_load_u32(&sum->n_waiting_thds,
46816-	    ATOMIC_RELAXED);
46817-	uint32_t new_n_waiting_thds = cur_n_waiting_thds + atomic_load_u32(
46818-	    &data->n_waiting_thds, ATOMIC_RELAXED);
46819-	atomic_store_u32(&sum->n_waiting_thds, new_n_waiting_thds,
46820-	    ATOMIC_RELAXED);
46821-	sum->n_owner_switches += data->n_owner_switches;
46822-	sum->n_lock_ops += data->n_lock_ops;
46823-}
46824-
46825-static inline void
46826-malloc_mutex_lock(tsdn_t *tsdn, malloc_mutex_t *mutex) {
46827-	witness_assert_not_owner(tsdn_witness_tsdp_get(tsdn), &mutex->witness);
46828-	if (isthreaded) {
46829-		if (malloc_mutex_trylock_final(mutex)) {
46830-			malloc_mutex_lock_slow(mutex);
46831-			atomic_store_b(&mutex->locked, true, ATOMIC_RELAXED);
46832-		}
46833-		mutex_owner_stats_update(tsdn, mutex);
46834-	}
46835-	witness_lock(tsdn_witness_tsdp_get(tsdn), &mutex->witness);
46836-}
46837-
46838-static inline void
46839-malloc_mutex_unlock(tsdn_t *tsdn, malloc_mutex_t *mutex) {
46840-	atomic_store_b(&mutex->locked, false, ATOMIC_RELAXED);
46841-	witness_unlock(tsdn_witness_tsdp_get(tsdn), &mutex->witness);
46842-	if (isthreaded) {
46843-		MALLOC_MUTEX_UNLOCK(mutex);
46844-	}
46845-}
46846-
46847-static inline void
46848-malloc_mutex_assert_owner(tsdn_t *tsdn, malloc_mutex_t *mutex) {
46849-	witness_assert_owner(tsdn_witness_tsdp_get(tsdn), &mutex->witness);
46850-}
46851-
46852-static inline void
46853-malloc_mutex_assert_not_owner(tsdn_t *tsdn, malloc_mutex_t *mutex) {
46854-	witness_assert_not_owner(tsdn_witness_tsdp_get(tsdn), &mutex->witness);
46855-}
46856-
46857-static inline void
46858-malloc_mutex_prof_copy(mutex_prof_data_t *dst, mutex_prof_data_t *source) {
46859-	/*
46860-	 * Not *really* allowed (we shouldn't be doing non-atomic loads of
46861-	 * atomic data), but the mutex protection makes this safe, and writing
46862-	 * a member-for-member copy is tedious for this situation.
46863-	 */
46864-	*dst = *source;
46865-	/* n_wait_thds is not reported (modified w/o locking). */
46866-	atomic_store_u32(&dst->n_waiting_thds, 0, ATOMIC_RELAXED);
46867-}
46868-
46869-/* Copy the prof data from mutex for processing. */
46870-static inline void
46871-malloc_mutex_prof_read(tsdn_t *tsdn, mutex_prof_data_t *data,
46872-    malloc_mutex_t *mutex) {
46873-	/* Can only read holding the mutex. */
46874-	malloc_mutex_assert_owner(tsdn, mutex);
46875-	malloc_mutex_prof_copy(data, &mutex->prof_data);
46876-}
46877-
46878-static inline void
46879-malloc_mutex_prof_accum(tsdn_t *tsdn, mutex_prof_data_t *data,
46880-    malloc_mutex_t *mutex) {
46881-	mutex_prof_data_t *source = &mutex->prof_data;
46882-	/* Can only read holding the mutex. */
46883-	malloc_mutex_assert_owner(tsdn, mutex);
46884-
46885-	nstime_add(&data->tot_wait_time, &source->tot_wait_time);
46886-	if (nstime_compare(&source->max_wait_time, &data->max_wait_time) > 0) {
46887-		nstime_copy(&data->max_wait_time, &source->max_wait_time);
46888-	}
46889-	data->n_wait_times += source->n_wait_times;
46890-	data->n_spin_acquired += source->n_spin_acquired;
46891-	if (data->max_n_thds < source->max_n_thds) {
46892-		data->max_n_thds = source->max_n_thds;
46893-	}
46894-	/* n_wait_thds is not reported. */
46895-	atomic_store_u32(&data->n_waiting_thds, 0, ATOMIC_RELAXED);
46896-	data->n_owner_switches += source->n_owner_switches;
46897-	data->n_lock_ops += source->n_lock_ops;
46898-}
46899-
46900-/* Compare the prof data and update to the maximum. */
46901-static inline void
46902-malloc_mutex_prof_max_update(tsdn_t *tsdn, mutex_prof_data_t *data,
46903-    malloc_mutex_t *mutex) {
46904-	mutex_prof_data_t *source = &mutex->prof_data;
46905-	/* Can only read holding the mutex. */
46906-	malloc_mutex_assert_owner(tsdn, mutex);
46907-
46908-	if (nstime_compare(&source->tot_wait_time, &data->tot_wait_time) > 0) {
46909-		nstime_copy(&data->tot_wait_time, &source->tot_wait_time);
46910-	}
46911-	if (nstime_compare(&source->max_wait_time, &data->max_wait_time) > 0) {
46912-		nstime_copy(&data->max_wait_time, &source->max_wait_time);
46913-	}
46914-	if (source->n_wait_times > data->n_wait_times) {
46915-		data->n_wait_times = source->n_wait_times;
46916-	}
46917-	if (source->n_spin_acquired > data->n_spin_acquired) {
46918-		data->n_spin_acquired = source->n_spin_acquired;
46919-	}
46920-	if (source->max_n_thds > data->max_n_thds) {
46921-		data->max_n_thds = source->max_n_thds;
46922-	}
46923-	if (source->n_owner_switches > data->n_owner_switches) {
46924-		data->n_owner_switches = source->n_owner_switches;
46925-	}
46926-	if (source->n_lock_ops > data->n_lock_ops) {
46927-		data->n_lock_ops = source->n_lock_ops;
46928-	}
46929-	/* n_wait_thds is not reported. */
46930-}
46931-
46932-#endif /* JEMALLOC_INTERNAL_MUTEX_H */
46933diff --git a/jemalloc/include/jemalloc/internal/mutex_prof.h b/jemalloc/include/jemalloc/internal/mutex_prof.h
46934deleted file mode 100644
46935index 4a526a5..0000000
46936--- a/jemalloc/include/jemalloc/internal/mutex_prof.h
46937+++ /dev/null
46938@@ -1,117 +0,0 @@
46939-#ifndef JEMALLOC_INTERNAL_MUTEX_PROF_H
46940-#define JEMALLOC_INTERNAL_MUTEX_PROF_H
46941-
46942-#include "jemalloc/internal/atomic.h"
46943-#include "jemalloc/internal/nstime.h"
46944-#include "jemalloc/internal/tsd_types.h"
46945-
46946-#define MUTEX_PROF_GLOBAL_MUTEXES					\
46947-    OP(background_thread)						\
46948-    OP(max_per_bg_thd)							\
46949-    OP(ctl)								\
46950-    OP(prof)								\
46951-    OP(prof_thds_data)							\
46952-    OP(prof_dump)							\
46953-    OP(prof_recent_alloc)						\
46954-    OP(prof_recent_dump)						\
46955-    OP(prof_stats)
46956-
46957-typedef enum {
46958-#define OP(mtx) global_prof_mutex_##mtx,
46959-	MUTEX_PROF_GLOBAL_MUTEXES
46960-#undef OP
46961-	mutex_prof_num_global_mutexes
46962-} mutex_prof_global_ind_t;
46963-
46964-#define MUTEX_PROF_ARENA_MUTEXES					\
46965-    OP(large)								\
46966-    OP(extent_avail)							\
46967-    OP(extents_dirty)							\
46968-    OP(extents_muzzy)							\
46969-    OP(extents_retained)						\
46970-    OP(decay_dirty)							\
46971-    OP(decay_muzzy)							\
46972-    OP(base)								\
46973-    OP(tcache_list)							\
46974-    OP(hpa_shard)							\
46975-    OP(hpa_shard_grow)							\
46976-    OP(hpa_sec)
46977-
46978-typedef enum {
46979-#define OP(mtx) arena_prof_mutex_##mtx,
46980-	MUTEX_PROF_ARENA_MUTEXES
46981-#undef OP
46982-	mutex_prof_num_arena_mutexes
46983-} mutex_prof_arena_ind_t;
46984-
46985-/*
46986- * The forth parameter is a boolean value that is true for derived rate counters
46987- * and false for real ones.
46988- */
46989-#define MUTEX_PROF_UINT64_COUNTERS					\
46990-    OP(num_ops, uint64_t, "n_lock_ops", false, num_ops)					\
46991-    OP(num_ops_ps, uint64_t, "(#/sec)", true, num_ops)				\
46992-    OP(num_wait, uint64_t, "n_waiting", false, num_wait)				\
46993-    OP(num_wait_ps, uint64_t, "(#/sec)", true, num_wait)				\
46994-    OP(num_spin_acq, uint64_t, "n_spin_acq", false, num_spin_acq)			\
46995-    OP(num_spin_acq_ps, uint64_t, "(#/sec)", true, num_spin_acq)			\
46996-    OP(num_owner_switch, uint64_t, "n_owner_switch", false, num_owner_switch)		\
46997-    OP(num_owner_switch_ps, uint64_t, "(#/sec)", true, num_owner_switch)	\
46998-    OP(total_wait_time, uint64_t, "total_wait_ns", false, total_wait_time)		\
46999-    OP(total_wait_time_ps, uint64_t, "(#/sec)", true, total_wait_time)		\
47000-    OP(max_wait_time, uint64_t, "max_wait_ns", false, max_wait_time)
47001-
47002-#define MUTEX_PROF_UINT32_COUNTERS					\
47003-    OP(max_num_thds, uint32_t, "max_n_thds", false, max_num_thds)
47004-
47005-#define MUTEX_PROF_COUNTERS						\
47006-		MUTEX_PROF_UINT64_COUNTERS				\
47007-		MUTEX_PROF_UINT32_COUNTERS
47008-
47009-#define OP(counter, type, human, derived, base_counter) mutex_counter_##counter,
47010-
47011-#define COUNTER_ENUM(counter_list, t)					\
47012-		typedef enum {						\
47013-			counter_list					\
47014-			mutex_prof_num_##t##_counters			\
47015-		} mutex_prof_##t##_counter_ind_t;
47016-
47017-COUNTER_ENUM(MUTEX_PROF_UINT64_COUNTERS, uint64_t)
47018-COUNTER_ENUM(MUTEX_PROF_UINT32_COUNTERS, uint32_t)
47019-
47020-#undef COUNTER_ENUM
47021-#undef OP
47022-
47023-typedef struct {
47024-	/*
47025-	 * Counters touched on the slow path, i.e. when there is lock
47026-	 * contention.  We update them once we have the lock.
47027-	 */
47028-	/* Total time (in nano seconds) spent waiting on this mutex. */
47029-	nstime_t		tot_wait_time;
47030-	/* Max time (in nano seconds) spent on a single lock operation. */
47031-	nstime_t		max_wait_time;
47032-	/* # of times have to wait for this mutex (after spinning). */
47033-	uint64_t		n_wait_times;
47034-	/* # of times acquired the mutex through local spinning. */
47035-	uint64_t		n_spin_acquired;
47036-	/* Max # of threads waiting for the mutex at the same time. */
47037-	uint32_t		max_n_thds;
47038-	/* Current # of threads waiting on the lock.  Atomic synced. */
47039-	atomic_u32_t		n_waiting_thds;
47040-
47041-	/*
47042-	 * Data touched on the fast path.  These are modified right after we
47043-	 * grab the lock, so it's placed closest to the end (i.e. right before
47044-	 * the lock) so that we have a higher chance of them being on the same
47045-	 * cacheline.
47046-	 */
47047-	/* # of times the mutex holder is different than the previous one. */
47048-	uint64_t		n_owner_switches;
47049-	/* Previous mutex holder, to facilitate n_owner_switches. */
47050-	tsdn_t			*prev_owner;
47051-	/* # of lock() operations in total. */
47052-	uint64_t		n_lock_ops;
47053-} mutex_prof_data_t;
47054-
47055-#endif /* JEMALLOC_INTERNAL_MUTEX_PROF_H */
47056diff --git a/jemalloc/include/jemalloc/internal/nstime.h b/jemalloc/include/jemalloc/internal/nstime.h
47057deleted file mode 100644
47058index 486e5cc..0000000
47059--- a/jemalloc/include/jemalloc/internal/nstime.h
47060+++ /dev/null
47061@@ -1,73 +0,0 @@
47062-#ifndef JEMALLOC_INTERNAL_NSTIME_H
47063-#define JEMALLOC_INTERNAL_NSTIME_H
47064-
47065-/* Maximum supported number of seconds (~584 years). */
47066-#define NSTIME_SEC_MAX KQU(18446744072)
47067-
47068-#define NSTIME_MAGIC ((uint32_t)0xb8a9ce37)
47069-#ifdef JEMALLOC_DEBUG
47070-#  define NSTIME_ZERO_INITIALIZER {0, NSTIME_MAGIC}
47071-#else
47072-#  define NSTIME_ZERO_INITIALIZER {0}
47073-#endif
47074-
47075-typedef struct {
47076-	uint64_t ns;
47077-#ifdef JEMALLOC_DEBUG
47078-	uint32_t magic; /* Tracks if initialized. */
47079-#endif
47080-} nstime_t;
47081-
47082-static const nstime_t nstime_zero = NSTIME_ZERO_INITIALIZER;
47083-
47084-void nstime_init(nstime_t *time, uint64_t ns);
47085-void nstime_init2(nstime_t *time, uint64_t sec, uint64_t nsec);
47086-uint64_t nstime_ns(const nstime_t *time);
47087-uint64_t nstime_sec(const nstime_t *time);
47088-uint64_t nstime_msec(const nstime_t *time);
47089-uint64_t nstime_nsec(const nstime_t *time);
47090-void nstime_copy(nstime_t *time, const nstime_t *source);
47091-int nstime_compare(const nstime_t *a, const nstime_t *b);
47092-void nstime_add(nstime_t *time, const nstime_t *addend);
47093-void nstime_iadd(nstime_t *time, uint64_t addend);
47094-void nstime_subtract(nstime_t *time, const nstime_t *subtrahend);
47095-void nstime_isubtract(nstime_t *time, uint64_t subtrahend);
47096-void nstime_imultiply(nstime_t *time, uint64_t multiplier);
47097-void nstime_idivide(nstime_t *time, uint64_t divisor);
47098-uint64_t nstime_divide(const nstime_t *time, const nstime_t *divisor);
47099-uint64_t nstime_ns_since(const nstime_t *past);
47100-
47101-typedef bool (nstime_monotonic_t)(void);
47102-extern nstime_monotonic_t *JET_MUTABLE nstime_monotonic;
47103-
47104-typedef void (nstime_update_t)(nstime_t *);
47105-extern nstime_update_t *JET_MUTABLE nstime_update;
47106-
47107-typedef void (nstime_prof_update_t)(nstime_t *);
47108-extern nstime_prof_update_t *JET_MUTABLE nstime_prof_update;
47109-
47110-void nstime_init_update(nstime_t *time);
47111-void nstime_prof_init_update(nstime_t *time);
47112-
47113-enum prof_time_res_e {
47114-	prof_time_res_default = 0,
47115-	prof_time_res_high = 1
47116-};
47117-typedef enum prof_time_res_e prof_time_res_t;
47118-
47119-extern prof_time_res_t opt_prof_time_res;
47120-extern const char *prof_time_res_mode_names[];
47121-
47122-JEMALLOC_ALWAYS_INLINE void
47123-nstime_init_zero(nstime_t *time) {
47124-	nstime_copy(time, &nstime_zero);
47125-}
47126-
47127-JEMALLOC_ALWAYS_INLINE bool
47128-nstime_equals_zero(nstime_t *time) {
47129-	int diff = nstime_compare(time, &nstime_zero);
47130-	assert(diff >= 0);
47131-	return diff == 0;
47132-}
47133-
47134-#endif /* JEMALLOC_INTERNAL_NSTIME_H */
47135diff --git a/jemalloc/include/jemalloc/internal/pa.h b/jemalloc/include/jemalloc/internal/pa.h
47136deleted file mode 100644
47137index 4748a05..0000000
47138--- a/jemalloc/include/jemalloc/internal/pa.h
47139+++ /dev/null
47140@@ -1,243 +0,0 @@
47141-#ifndef JEMALLOC_INTERNAL_PA_H
47142-#define JEMALLOC_INTERNAL_PA_H
47143-
47144-#include "jemalloc/internal/base.h"
47145-#include "jemalloc/internal/decay.h"
47146-#include "jemalloc/internal/ecache.h"
47147-#include "jemalloc/internal/edata_cache.h"
47148-#include "jemalloc/internal/emap.h"
47149-#include "jemalloc/internal/hpa.h"
47150-#include "jemalloc/internal/lockedint.h"
47151-#include "jemalloc/internal/pac.h"
47152-#include "jemalloc/internal/pai.h"
47153-#include "jemalloc/internal/sec.h"
47154-
47155-/*
47156- * The page allocator; responsible for acquiring pages of memory for
47157- * allocations.  It picks the implementation of the page allocator interface
47158- * (i.e. a pai_t) to handle a given page-level allocation request.  For now, the
47159- * only such implementation is the PAC code ("page allocator classic"), but
47160- * others will be coming soon.
47161- */
47162-
47163-typedef struct pa_central_s pa_central_t;
47164-struct pa_central_s {
47165-	hpa_central_t hpa;
47166-};
47167-
47168-/*
47169- * The stats for a particular pa_shard.  Because of the way the ctl module
47170- * handles stats epoch data collection (it has its own arena_stats, and merges
47171- * the stats from each arena into it), this needs to live in the arena_stats_t;
47172- * hence we define it here and let the pa_shard have a pointer (rather than the
47173- * more natural approach of just embedding it in the pa_shard itself).
47174- *
47175- * We follow the arena_stats_t approach of marking the derived fields.  These
47176- * are the ones that are not maintained on their own; instead, their values are
47177- * derived during those stats merges.
47178- */
47179-typedef struct pa_shard_stats_s pa_shard_stats_t;
47180-struct pa_shard_stats_s {
47181-	/* Number of edata_t structs allocated by base, but not being used. */
47182-	size_t edata_avail; /* Derived. */
47183-	/*
47184-	 * Stats specific to the PAC.  For now, these are the only stats that
47185-	 * exist, but there will eventually be other page allocators.  Things
47186-	 * like edata_avail make sense in a cross-PA sense, but things like
47187-	 * npurges don't.
47188-	 */
47189-	pac_stats_t pac_stats;
47190-};
47191-
47192-/*
47193- * The local allocator handle.  Keeps the state necessary to satisfy page-sized
47194- * allocations.
47195- *
47196- * The contents are mostly internal to the PA module.  The key exception is that
47197- * arena decay code is allowed to grab pointers to the dirty and muzzy ecaches
47198- * decay_ts, for a couple of queries, passing them back to a PA function, or
47199- * acquiring decay.mtx and looking at decay.purging.  The reasoning is that,
47200- * while PA decides what and how to purge, the arena code decides when and where
47201- * (e.g. on what thread).  It's allowed to use the presence of another purger to
47202- * decide.
47203- * (The background thread code also touches some other decay internals, but
47204- * that's not fundamental; its' just an artifact of a partial refactoring, and
47205- * its accesses could be straightforwardly moved inside the decay module).
47206- */
47207-typedef struct pa_shard_s pa_shard_t;
47208-struct pa_shard_s {
47209-	/* The central PA this shard is associated with. */
47210-	pa_central_t *central;
47211-
47212-	/*
47213-	 * Number of pages in active extents.
47214-	 *
47215-	 * Synchronization: atomic.
47216-	 */
47217-	atomic_zu_t nactive;
47218-
47219-	/*
47220-	 * Whether or not we should prefer the hugepage allocator.  Atomic since
47221-	 * it may be concurrently modified by a thread setting extent hooks.
47222-	 * Note that we still may do HPA operations in this arena; if use_hpa is
47223-	 * changed from true to false, we'll free back to the hugepage allocator
47224-	 * for those allocations.
47225-	 */
47226-	atomic_b_t use_hpa;
47227-
47228-	/*
47229-	 * If we never used the HPA to begin with, it wasn't initialized, and so
47230-	 * we shouldn't try to e.g. acquire its mutexes during fork.  This
47231-	 * tracks that knowledge.
47232-	 */
47233-	bool ever_used_hpa;
47234-
47235-	/* Allocates from a PAC. */
47236-	pac_t pac;
47237-
47238-	/*
47239-	 * We place a small extent cache in front of the HPA, since we intend
47240-	 * these configurations to use many fewer arenas, and therefore have a
47241-	 * higher risk of hot locks.
47242-	 */
47243-	sec_t hpa_sec;
47244-	hpa_shard_t hpa_shard;
47245-
47246-	/* The source of edata_t objects. */
47247-	edata_cache_t edata_cache;
47248-
47249-	unsigned ind;
47250-
47251-	malloc_mutex_t *stats_mtx;
47252-	pa_shard_stats_t *stats;
47253-
47254-	/* The emap this shard is tied to. */
47255-	emap_t *emap;
47256-
47257-	/* The base from which we get the ehooks and allocate metadat. */
47258-	base_t *base;
47259-};
47260-
47261-static inline bool
47262-pa_shard_dont_decay_muzzy(pa_shard_t *shard) {
47263-	return ecache_npages_get(&shard->pac.ecache_muzzy) == 0 &&
47264-	    pac_decay_ms_get(&shard->pac, extent_state_muzzy) <= 0;
47265-}
47266-
47267-static inline ehooks_t *
47268-pa_shard_ehooks_get(pa_shard_t *shard) {
47269-	return base_ehooks_get(shard->base);
47270-}
47271-
47272-/* Returns true on error. */
47273-bool pa_central_init(pa_central_t *central, base_t *base, bool hpa,
47274-    hpa_hooks_t *hpa_hooks);
47275-
47276-/* Returns true on error. */
47277-bool pa_shard_init(tsdn_t *tsdn, pa_shard_t *shard, pa_central_t *central,
47278-    emap_t *emap, base_t *base, unsigned ind, pa_shard_stats_t *stats,
47279-    malloc_mutex_t *stats_mtx, nstime_t *cur_time, size_t oversize_threshold,
47280-    ssize_t dirty_decay_ms, ssize_t muzzy_decay_ms);
47281-
47282-/*
47283- * This isn't exposed to users; we allow late enablement of the HPA shard so
47284- * that we can boot without worrying about the HPA, then turn it on in a0.
47285- */
47286-bool pa_shard_enable_hpa(tsdn_t *tsdn, pa_shard_t *shard,
47287-    const hpa_shard_opts_t *hpa_opts, const sec_opts_t *hpa_sec_opts);
47288-
47289-/*
47290- * We stop using the HPA when custom extent hooks are installed, but still
47291- * redirect deallocations to it.
47292- */
47293-void pa_shard_disable_hpa(tsdn_t *tsdn, pa_shard_t *shard);
47294-
47295-/*
47296- * This does the PA-specific parts of arena reset (i.e. freeing all active
47297- * allocations).
47298- */
47299-void pa_shard_reset(tsdn_t *tsdn, pa_shard_t *shard);
47300-
47301-/*
47302- * Destroy all the remaining retained extents.  Should only be called after
47303- * decaying all active, dirty, and muzzy extents to the retained state, as the
47304- * last step in destroying the shard.
47305- */
47306-void pa_shard_destroy(tsdn_t *tsdn, pa_shard_t *shard);
47307-
47308-/* Gets an edata for the given allocation. */
47309-edata_t *pa_alloc(tsdn_t *tsdn, pa_shard_t *shard, size_t size,
47310-    size_t alignment, bool slab, szind_t szind, bool zero, bool guarded,
47311-    bool *deferred_work_generated);
47312-/* Returns true on error, in which case nothing changed. */
47313-bool pa_expand(tsdn_t *tsdn, pa_shard_t *shard, edata_t *edata, size_t old_size,
47314-    size_t new_size, szind_t szind, bool zero, bool *deferred_work_generated);
47315-/*
47316- * The same.  Sets *generated_dirty to true if we produced new dirty pages, and
47317- * false otherwise.
47318- */
47319-bool pa_shrink(tsdn_t *tsdn, pa_shard_t *shard, edata_t *edata, size_t old_size,
47320-    size_t new_size, szind_t szind, bool *deferred_work_generated);
47321-/*
47322- * Frees the given edata back to the pa.  Sets *generated_dirty if we produced
47323- * new dirty pages (well, we always set it for now; but this need not be the
47324- * case).
47325- * (We could make generated_dirty the return value of course, but this is more
47326- * consistent with the shrink pathway and our error codes here).
47327- */
47328-void pa_dalloc(tsdn_t *tsdn, pa_shard_t *shard, edata_t *edata,
47329-    bool *deferred_work_generated);
47330-bool pa_decay_ms_set(tsdn_t *tsdn, pa_shard_t *shard, extent_state_t state,
47331-    ssize_t decay_ms, pac_purge_eagerness_t eagerness);
47332-ssize_t pa_decay_ms_get(pa_shard_t *shard, extent_state_t state);
47333-
47334-/*
47335- * Do deferred work on this PA shard.
47336- *
47337- * Morally, this should do both PAC decay and the HPA deferred work.  For now,
47338- * though, the arena, background thread, and PAC modules are tightly interwoven
47339- * in a way that's tricky to extricate, so we only do the HPA-specific parts.
47340- */
47341-void pa_shard_set_deferral_allowed(tsdn_t *tsdn, pa_shard_t *shard,
47342-    bool deferral_allowed);
47343-void pa_shard_do_deferred_work(tsdn_t *tsdn, pa_shard_t *shard);
47344-void pa_shard_try_deferred_work(tsdn_t *tsdn, pa_shard_t *shard);
47345-uint64_t pa_shard_time_until_deferred_work(tsdn_t *tsdn, pa_shard_t *shard);
47346-
47347-/******************************************************************************/
47348-/*
47349- * Various bits of "boring" functionality that are still part of this module,
47350- * but that we relegate to pa_extra.c, to keep the core logic in pa.c as
47351- * readable as possible.
47352- */
47353-
47354-/*
47355- * These fork phases are synchronized with the arena fork phase numbering to
47356- * make it easy to keep straight. That's why there's no prefork1.
47357- */
47358-void pa_shard_prefork0(tsdn_t *tsdn, pa_shard_t *shard);
47359-void pa_shard_prefork2(tsdn_t *tsdn, pa_shard_t *shard);
47360-void pa_shard_prefork3(tsdn_t *tsdn, pa_shard_t *shard);
47361-void pa_shard_prefork4(tsdn_t *tsdn, pa_shard_t *shard);
47362-void pa_shard_prefork5(tsdn_t *tsdn, pa_shard_t *shard);
47363-void pa_shard_postfork_parent(tsdn_t *tsdn, pa_shard_t *shard);
47364-void pa_shard_postfork_child(tsdn_t *tsdn, pa_shard_t *shard);
47365-
47366-void pa_shard_basic_stats_merge(pa_shard_t *shard, size_t *nactive,
47367-    size_t *ndirty, size_t *nmuzzy);
47368-
47369-void pa_shard_stats_merge(tsdn_t *tsdn, pa_shard_t *shard,
47370-    pa_shard_stats_t *pa_shard_stats_out, pac_estats_t *estats_out,
47371-    hpa_shard_stats_t *hpa_stats_out, sec_stats_t *sec_stats_out,
47372-    size_t *resident);
47373-
47374-/*
47375- * Reads the PA-owned mutex stats into the output stats array, at the
47376- * appropriate positions.  Morally, these stats should really live in
47377- * pa_shard_stats_t, but the indices are sort of baked into the various mutex
47378- * prof macros.  This would be a good thing to do at some point.
47379- */
47380-void pa_shard_mtx_stats_read(tsdn_t *tsdn, pa_shard_t *shard,
47381-    mutex_prof_data_t mutex_prof_data[mutex_prof_num_arena_mutexes]);
47382-
47383-#endif /* JEMALLOC_INTERNAL_PA_H */
47384diff --git a/jemalloc/include/jemalloc/internal/pac.h b/jemalloc/include/jemalloc/internal/pac.h
47385deleted file mode 100644
47386index 01c4e6a..0000000
47387--- a/jemalloc/include/jemalloc/internal/pac.h
47388+++ /dev/null
47389@@ -1,179 +0,0 @@
47390-#ifndef JEMALLOC_INTERNAL_PAC_H
47391-#define JEMALLOC_INTERNAL_PAC_H
47392-
47393-#include "jemalloc/internal/exp_grow.h"
47394-#include "jemalloc/internal/pai.h"
47395-#include "san_bump.h"
47396-
47397-
47398-/*
47399- * Page allocator classic; an implementation of the PAI interface that:
47400- * - Can be used for arenas with custom extent hooks.
47401- * - Can always satisfy any allocation request (including highly-fragmentary
47402- *   ones).
47403- * - Can use efficient OS-level zeroing primitives for demand-filled pages.
47404- */
47405-
47406-/* How "eager" decay/purging should be. */
47407-enum pac_purge_eagerness_e {
47408-	PAC_PURGE_ALWAYS,
47409-	PAC_PURGE_NEVER,
47410-	PAC_PURGE_ON_EPOCH_ADVANCE
47411-};
47412-typedef enum pac_purge_eagerness_e pac_purge_eagerness_t;
47413-
47414-typedef struct pac_decay_stats_s pac_decay_stats_t;
47415-struct pac_decay_stats_s {
47416-	/* Total number of purge sweeps. */
47417-	locked_u64_t npurge;
47418-	/* Total number of madvise calls made. */
47419-	locked_u64_t nmadvise;
47420-	/* Total number of pages purged. */
47421-	locked_u64_t purged;
47422-};
47423-
47424-typedef struct pac_estats_s pac_estats_t;
47425-struct pac_estats_s {
47426-	/*
47427-	 * Stats for a given index in the range [0, SC_NPSIZES] in the various
47428-	 * ecache_ts.
47429-	 * We track both bytes and # of extents: two extents in the same bucket
47430-	 * may have different sizes if adjacent size classes differ by more than
47431-	 * a page, so bytes cannot always be derived from # of extents.
47432-	 */
47433-	size_t ndirty;
47434-	size_t dirty_bytes;
47435-	size_t nmuzzy;
47436-	size_t muzzy_bytes;
47437-	size_t nretained;
47438-	size_t retained_bytes;
47439-};
47440-
47441-typedef struct pac_stats_s pac_stats_t;
47442-struct pac_stats_s {
47443-	pac_decay_stats_t decay_dirty;
47444-	pac_decay_stats_t decay_muzzy;
47445-
47446-	/*
47447-	 * Number of unused virtual memory bytes currently retained.  Retained
47448-	 * bytes are technically mapped (though always decommitted or purged),
47449-	 * but they are excluded from the mapped statistic (above).
47450-	 */
47451-	size_t retained; /* Derived. */
47452-
47453-	/*
47454-	 * Number of bytes currently mapped, excluding retained memory (and any
47455-	 * base-allocated memory, which is tracked by the arena stats).
47456-	 *
47457-	 * We name this "pac_mapped" to avoid confusion with the arena_stats
47458-	 * "mapped".
47459-	 */
47460-	atomic_zu_t pac_mapped;
47461-
47462-	/* VM space had to be leaked (undocumented).  Normally 0. */
47463-	atomic_zu_t abandoned_vm;
47464-};
47465-
47466-typedef struct pac_s pac_t;
47467-struct pac_s {
47468-	/*
47469-	 * Must be the first member (we convert it to a PAC given only a
47470-	 * pointer).  The handle to the allocation interface.
47471-	 */
47472-	pai_t pai;
47473-	/*
47474-	 * Collections of extents that were previously allocated.  These are
47475-	 * used when allocating extents, in an attempt to re-use address space.
47476-	 *
47477-	 * Synchronization: internal.
47478-	 */
47479-	ecache_t ecache_dirty;
47480-	ecache_t ecache_muzzy;
47481-	ecache_t ecache_retained;
47482-
47483-	base_t *base;
47484-	emap_t *emap;
47485-	edata_cache_t *edata_cache;
47486-
47487-	/* The grow info for the retained ecache. */
47488-	exp_grow_t exp_grow;
47489-	malloc_mutex_t grow_mtx;
47490-
47491-	/* Special allocator for guarded frequently reused extents. */
47492-	san_bump_alloc_t sba;
47493-
47494-	/* How large extents should be before getting auto-purged. */
47495-	atomic_zu_t oversize_threshold;
47496-
47497-	/*
47498-	 * Decay-based purging state, responsible for scheduling extent state
47499-	 * transitions.
47500-	 *
47501-	 * Synchronization: via the internal mutex.
47502-	 */
47503-	decay_t decay_dirty; /* dirty --> muzzy */
47504-	decay_t decay_muzzy; /* muzzy --> retained */
47505-
47506-	malloc_mutex_t *stats_mtx;
47507-	pac_stats_t *stats;
47508-
47509-	/* Extent serial number generator state. */
47510-	atomic_zu_t extent_sn_next;
47511-};
47512-
47513-bool pac_init(tsdn_t *tsdn, pac_t *pac, base_t *base, emap_t *emap,
47514-    edata_cache_t *edata_cache, nstime_t *cur_time, size_t oversize_threshold,
47515-    ssize_t dirty_decay_ms, ssize_t muzzy_decay_ms, pac_stats_t *pac_stats,
47516-    malloc_mutex_t *stats_mtx);
47517-
47518-static inline size_t
47519-pac_mapped(pac_t *pac) {
47520-	return atomic_load_zu(&pac->stats->pac_mapped, ATOMIC_RELAXED);
47521-}
47522-
47523-static inline ehooks_t *
47524-pac_ehooks_get(pac_t *pac) {
47525-	return base_ehooks_get(pac->base);
47526-}
47527-
47528-/*
47529- * All purging functions require holding decay->mtx.  This is one of the few
47530- * places external modules are allowed to peek inside pa_shard_t internals.
47531- */
47532-
47533-/*
47534- * Decays the number of pages currently in the ecache.  This might not leave the
47535- * ecache empty if other threads are inserting dirty objects into it
47536- * concurrently with the call.
47537- */
47538-void pac_decay_all(tsdn_t *tsdn, pac_t *pac, decay_t *decay,
47539-    pac_decay_stats_t *decay_stats, ecache_t *ecache, bool fully_decay);
47540-/*
47541- * Updates decay settings for the current time, and conditionally purges in
47542- * response (depending on decay_purge_setting).  Returns whether or not the
47543- * epoch advanced.
47544- */
47545-bool pac_maybe_decay_purge(tsdn_t *tsdn, pac_t *pac, decay_t *decay,
47546-    pac_decay_stats_t *decay_stats, ecache_t *ecache,
47547-    pac_purge_eagerness_t eagerness);
47548-
47549-/*
47550- * Gets / sets the maximum amount that we'll grow an arena down the
47551- * grow-retained pathways (unless forced to by an allocaction request).
47552- *
47553- * Set new_limit to NULL if it's just a query, or old_limit to NULL if you don't
47554- * care about the previous value.
47555- *
47556- * Returns true on error (if the new limit is not valid).
47557- */
47558-bool pac_retain_grow_limit_get_set(tsdn_t *tsdn, pac_t *pac, size_t *old_limit,
47559-    size_t *new_limit);
47560-
47561-bool pac_decay_ms_set(tsdn_t *tsdn, pac_t *pac, extent_state_t state,
47562-    ssize_t decay_ms, pac_purge_eagerness_t eagerness);
47563-ssize_t pac_decay_ms_get(pac_t *pac, extent_state_t state);
47564-
47565-void pac_reset(tsdn_t *tsdn, pac_t *pac);
47566-void pac_destroy(tsdn_t *tsdn, pac_t *pac);
47567-
47568-#endif /* JEMALLOC_INTERNAL_PAC_H */
47569diff --git a/jemalloc/include/jemalloc/internal/pages.h b/jemalloc/include/jemalloc/internal/pages.h
47570deleted file mode 100644
47571index ad1f606..0000000
47572--- a/jemalloc/include/jemalloc/internal/pages.h
47573+++ /dev/null
47574@@ -1,119 +0,0 @@
47575-#ifndef JEMALLOC_INTERNAL_PAGES_EXTERNS_H
47576-#define JEMALLOC_INTERNAL_PAGES_EXTERNS_H
47577-
47578-/* Page size.  LG_PAGE is determined by the configure script. */
47579-#ifdef PAGE_MASK
47580-#  undef PAGE_MASK
47581-#endif
47582-#define PAGE		((size_t)(1U << LG_PAGE))
47583-#define PAGE_MASK	((size_t)(PAGE - 1))
47584-/* Return the page base address for the page containing address a. */
47585-#define PAGE_ADDR2BASE(a)						\
47586-	((void *)((uintptr_t)(a) & ~PAGE_MASK))
47587-/* Return the smallest pagesize multiple that is >= s. */
47588-#define PAGE_CEILING(s)							\
47589-	(((s) + PAGE_MASK) & ~PAGE_MASK)
47590-/* Return the largest pagesize multiple that is <=s. */
47591-#define PAGE_FLOOR(s) 							\
47592-	((s) & ~PAGE_MASK)
47593-
47594-/* Huge page size.  LG_HUGEPAGE is determined by the configure script. */
47595-#define HUGEPAGE	((size_t)(1U << LG_HUGEPAGE))
47596-#define HUGEPAGE_MASK	((size_t)(HUGEPAGE - 1))
47597-
47598-#if LG_HUGEPAGE != 0
47599-#  define HUGEPAGE_PAGES (HUGEPAGE / PAGE)
47600-#else
47601-/*
47602- * It's convenient to define arrays (or bitmaps) of HUGEPAGE_PAGES lengths.  If
47603- * we can't autodetect the hugepage size, it gets treated as 0, in which case
47604- * we'll trigger a compiler error in those arrays.  Avoid this case by ensuring
47605- * that this value is at least 1.  (We won't ever run in this degraded state;
47606- * hpa_supported() returns false in this case.
47607- */
47608-#  define HUGEPAGE_PAGES 1
47609-#endif
47610-
47611-/* Return the huge page base address for the huge page containing address a. */
47612-#define HUGEPAGE_ADDR2BASE(a)						\
47613-	((void *)((uintptr_t)(a) & ~HUGEPAGE_MASK))
47614-/* Return the smallest pagesize multiple that is >= s. */
47615-#define HUGEPAGE_CEILING(s)						\
47616-	(((s) + HUGEPAGE_MASK) & ~HUGEPAGE_MASK)
47617-
47618-/* PAGES_CAN_PURGE_LAZY is defined if lazy purging is supported. */
47619-#if defined(_WIN32) || defined(JEMALLOC_PURGE_MADVISE_FREE)
47620-#  define PAGES_CAN_PURGE_LAZY
47621-#endif
47622-/*
47623- * PAGES_CAN_PURGE_FORCED is defined if forced purging is supported.
47624- *
47625- * The only supported way to hard-purge on Windows is to decommit and then
47626- * re-commit, but doing so is racy, and if re-commit fails it's a pain to
47627- * propagate the "poisoned" memory state.  Since we typically decommit as the
47628- * next step after purging on Windows anyway, there's no point in adding such
47629- * complexity.
47630- */
47631-#if !defined(_WIN32) && ((defined(JEMALLOC_PURGE_MADVISE_DONTNEED) && \
47632-    defined(JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS)) || \
47633-    defined(JEMALLOC_MAPS_COALESCE))
47634-#  define PAGES_CAN_PURGE_FORCED
47635-#endif
47636-
47637-static const bool pages_can_purge_lazy =
47638-#ifdef PAGES_CAN_PURGE_LAZY
47639-    true
47640-#else
47641-    false
47642-#endif
47643-    ;
47644-static const bool pages_can_purge_forced =
47645-#ifdef PAGES_CAN_PURGE_FORCED
47646-    true
47647-#else
47648-    false
47649-#endif
47650-    ;
47651-
47652-#if defined(JEMALLOC_HAVE_MADVISE_HUGE) || defined(JEMALLOC_HAVE_MEMCNTL)
47653-#  define PAGES_CAN_HUGIFY
47654-#endif
47655-
47656-static const bool pages_can_hugify =
47657-#ifdef PAGES_CAN_HUGIFY
47658-    true
47659-#else
47660-    false
47661-#endif
47662-    ;
47663-
47664-typedef enum {
47665-	thp_mode_default       = 0, /* Do not change hugepage settings. */
47666-	thp_mode_always        = 1, /* Always set MADV_HUGEPAGE. */
47667-	thp_mode_never         = 2, /* Always set MADV_NOHUGEPAGE. */
47668-
47669-	thp_mode_names_limit   = 3, /* Used for option processing. */
47670-	thp_mode_not_supported = 3  /* No THP support detected. */
47671-} thp_mode_t;
47672-
47673-#define THP_MODE_DEFAULT thp_mode_default
47674-extern thp_mode_t opt_thp;
47675-extern thp_mode_t init_system_thp_mode; /* Initial system wide state. */
47676-extern const char *thp_mode_names[];
47677-
47678-void *pages_map(void *addr, size_t size, size_t alignment, bool *commit);
47679-void pages_unmap(void *addr, size_t size);
47680-bool pages_commit(void *addr, size_t size);
47681-bool pages_decommit(void *addr, size_t size);
47682-bool pages_purge_lazy(void *addr, size_t size);
47683-bool pages_purge_forced(void *addr, size_t size);
47684-bool pages_huge(void *addr, size_t size);
47685-bool pages_nohuge(void *addr, size_t size);
47686-bool pages_dontdump(void *addr, size_t size);
47687-bool pages_dodump(void *addr, size_t size);
47688-bool pages_boot(void);
47689-void pages_set_thp_state (void *ptr, size_t size);
47690-void pages_mark_guards(void *head, void *tail);
47691-void pages_unmark_guards(void *head, void *tail);
47692-
47693-#endif /* JEMALLOC_INTERNAL_PAGES_EXTERNS_H */
47694diff --git a/jemalloc/include/jemalloc/internal/pai.h b/jemalloc/include/jemalloc/internal/pai.h
47695deleted file mode 100644
47696index d978cd7..0000000
47697--- a/jemalloc/include/jemalloc/internal/pai.h
47698+++ /dev/null
47699@@ -1,95 +0,0 @@
47700-#ifndef JEMALLOC_INTERNAL_PAI_H
47701-#define JEMALLOC_INTERNAL_PAI_H
47702-
47703-/* An interface for page allocation. */
47704-
47705-typedef struct pai_s pai_t;
47706-struct pai_s {
47707-	/* Returns NULL on failure. */
47708-	edata_t *(*alloc)(tsdn_t *tsdn, pai_t *self, size_t size,
47709-	    size_t alignment, bool zero, bool guarded, bool frequent_reuse,
47710-	    bool *deferred_work_generated);
47711-	/*
47712-	 * Returns the number of extents added to the list (which may be fewer
47713-	 * than requested, in case of OOM).  The list should already be
47714-	 * initialized.  The only alignment guarantee is page-alignment, and
47715-	 * the results are not necessarily zeroed.
47716-	 */
47717-	size_t (*alloc_batch)(tsdn_t *tsdn, pai_t *self, size_t size,
47718-	    size_t nallocs, edata_list_active_t *results,
47719-	    bool *deferred_work_generated);
47720-	bool (*expand)(tsdn_t *tsdn, pai_t *self, edata_t *edata,
47721-	    size_t old_size, size_t new_size, bool zero,
47722-	    bool *deferred_work_generated);
47723-	bool (*shrink)(tsdn_t *tsdn, pai_t *self, edata_t *edata,
47724-	    size_t old_size, size_t new_size, bool *deferred_work_generated);
47725-	void (*dalloc)(tsdn_t *tsdn, pai_t *self, edata_t *edata,
47726-	    bool *deferred_work_generated);
47727-	/* This function empties out list as a side-effect of being called. */
47728-	void (*dalloc_batch)(tsdn_t *tsdn, pai_t *self,
47729-	    edata_list_active_t *list, bool *deferred_work_generated);
47730-	uint64_t (*time_until_deferred_work)(tsdn_t *tsdn, pai_t *self);
47731-};
47732-
47733-/*
47734- * These are just simple convenience functions to avoid having to reference the
47735- * same pai_t twice on every invocation.
47736- */
47737-
47738-static inline edata_t *
47739-pai_alloc(tsdn_t *tsdn, pai_t *self, size_t size, size_t alignment,
47740-    bool zero, bool guarded, bool frequent_reuse,
47741-    bool *deferred_work_generated) {
47742-	return self->alloc(tsdn, self, size, alignment, zero, guarded,
47743-	    frequent_reuse, deferred_work_generated);
47744-}
47745-
47746-static inline size_t
47747-pai_alloc_batch(tsdn_t *tsdn, pai_t *self, size_t size, size_t nallocs,
47748-    edata_list_active_t *results, bool *deferred_work_generated) {
47749-	return self->alloc_batch(tsdn, self, size, nallocs, results,
47750-	    deferred_work_generated);
47751-}
47752-
47753-static inline bool
47754-pai_expand(tsdn_t *tsdn, pai_t *self, edata_t *edata, size_t old_size,
47755-    size_t new_size, bool zero, bool *deferred_work_generated) {
47756-	return self->expand(tsdn, self, edata, old_size, new_size, zero,
47757-	    deferred_work_generated);
47758-}
47759-
47760-static inline bool
47761-pai_shrink(tsdn_t *tsdn, pai_t *self, edata_t *edata, size_t old_size,
47762-    size_t new_size, bool *deferred_work_generated) {
47763-	return self->shrink(tsdn, self, edata, old_size, new_size,
47764-	    deferred_work_generated);
47765-}
47766-
47767-static inline void
47768-pai_dalloc(tsdn_t *tsdn, pai_t *self, edata_t *edata,
47769-    bool *deferred_work_generated) {
47770-	self->dalloc(tsdn, self, edata, deferred_work_generated);
47771-}
47772-
47773-static inline void
47774-pai_dalloc_batch(tsdn_t *tsdn, pai_t *self, edata_list_active_t *list,
47775-    bool *deferred_work_generated) {
47776-	self->dalloc_batch(tsdn, self, list, deferred_work_generated);
47777-}
47778-
47779-static inline uint64_t
47780-pai_time_until_deferred_work(tsdn_t *tsdn, pai_t *self) {
47781-	return self->time_until_deferred_work(tsdn, self);
47782-}
47783-
47784-/*
47785- * An implementation of batch allocation that simply calls alloc once for
47786- * each item in the list.
47787- */
47788-size_t pai_alloc_batch_default(tsdn_t *tsdn, pai_t *self, size_t size,
47789-    size_t nallocs, edata_list_active_t *results, bool *deferred_work_generated);
47790-/* Ditto, for dalloc. */
47791-void pai_dalloc_batch_default(tsdn_t *tsdn, pai_t *self,
47792-    edata_list_active_t *list, bool *deferred_work_generated);
47793-
47794-#endif /* JEMALLOC_INTERNAL_PAI_H */
47795diff --git a/jemalloc/include/jemalloc/internal/peak.h b/jemalloc/include/jemalloc/internal/peak.h
47796deleted file mode 100644
47797index 59da3e4..0000000
47798--- a/jemalloc/include/jemalloc/internal/peak.h
47799+++ /dev/null
47800@@ -1,37 +0,0 @@
47801-#ifndef JEMALLOC_INTERNAL_PEAK_H
47802-#define JEMALLOC_INTERNAL_PEAK_H
47803-
47804-typedef struct peak_s peak_t;
47805-struct peak_s {
47806-	/* The highest recorded peak value, after adjustment (see below). */
47807-	uint64_t cur_max;
47808-	/*
47809-	 * The difference between alloc and dalloc at the last set_zero call;
47810-	 * this lets us cancel out the appropriate amount of excess.
47811-	 */
47812-	uint64_t adjustment;
47813-};
47814-
47815-#define PEAK_INITIALIZER {0, 0}
47816-
47817-static inline uint64_t
47818-peak_max(peak_t *peak) {
47819-	return peak->cur_max;
47820-}
47821-
47822-static inline void
47823-peak_update(peak_t *peak, uint64_t alloc, uint64_t dalloc) {
47824-	int64_t candidate_max = (int64_t)(alloc - dalloc - peak->adjustment);
47825-	if (candidate_max > (int64_t)peak->cur_max) {
47826-		peak->cur_max = candidate_max;
47827-	}
47828-}
47829-
47830-/* Resets the counter to zero; all peaks are now relative to this point. */
47831-static inline void
47832-peak_set_zero(peak_t *peak, uint64_t alloc, uint64_t dalloc) {
47833-	peak->cur_max = 0;
47834-	peak->adjustment = alloc - dalloc;
47835-}
47836-
47837-#endif /* JEMALLOC_INTERNAL_PEAK_H */
47838diff --git a/jemalloc/include/jemalloc/internal/peak_event.h b/jemalloc/include/jemalloc/internal/peak_event.h
47839deleted file mode 100644
47840index b808ce0..0000000
47841--- a/jemalloc/include/jemalloc/internal/peak_event.h
47842+++ /dev/null
47843@@ -1,24 +0,0 @@
47844-#ifndef JEMALLOC_INTERNAL_PEAK_EVENT_H
47845-#define JEMALLOC_INTERNAL_PEAK_EVENT_H
47846-
47847-/*
47848- * While peak.h contains the simple helper struct that tracks state, this
47849- * contains the allocator tie-ins (and knows about tsd, the event module, etc.).
47850- */
47851-
47852-/* Update the peak with current tsd state. */
47853-void peak_event_update(tsd_t *tsd);
47854-/* Set current state to zero. */
47855-void peak_event_zero(tsd_t *tsd);
47856-uint64_t peak_event_max(tsd_t *tsd);
47857-
47858-/* Manual hooks. */
47859-/* The activity-triggered hooks. */
47860-uint64_t peak_alloc_new_event_wait(tsd_t *tsd);
47861-uint64_t peak_alloc_postponed_event_wait(tsd_t *tsd);
47862-void peak_alloc_event_handler(tsd_t *tsd, uint64_t elapsed);
47863-uint64_t peak_dalloc_new_event_wait(tsd_t *tsd);
47864-uint64_t peak_dalloc_postponed_event_wait(tsd_t *tsd);
47865-void peak_dalloc_event_handler(tsd_t *tsd, uint64_t elapsed);
47866-
47867-#endif /* JEMALLOC_INTERNAL_PEAK_EVENT_H */
47868diff --git a/jemalloc/include/jemalloc/internal/ph.h b/jemalloc/include/jemalloc/internal/ph.h
47869deleted file mode 100644
47870index 5f091c5..0000000
47871--- a/jemalloc/include/jemalloc/internal/ph.h
47872+++ /dev/null
47873@@ -1,520 +0,0 @@
47874-#ifndef JEMALLOC_INTERNAL_PH_H
47875-#define JEMALLOC_INTERNAL_PH_H
47876-
47877-/*
47878- * A Pairing Heap implementation.
47879- *
47880- * "The Pairing Heap: A New Form of Self-Adjusting Heap"
47881- * https://www.cs.cmu.edu/~sleator/papers/pairing-heaps.pdf
47882- *
47883- * With auxiliary twopass list, described in a follow on paper.
47884- *
47885- * "Pairing Heaps: Experiments and Analysis"
47886- * http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.106.2988&rep=rep1&type=pdf
47887- *
47888- *******************************************************************************
47889- *
47890- * We include a non-obvious optimization:
47891- * - First, we introduce a new pop-and-link operation; pop the two most
47892- *   recently-inserted items off the aux-list, link them, and push the resulting
47893- *   heap.
47894- * - We maintain a count of the number of insertions since the last time we
47895- *   merged the aux-list (i.e. via first() or remove_first()).  After N inserts,
47896- *   we do ffs(N) pop-and-link operations.
47897- *
47898- * One way to think of this is that we're progressively building up a tree in
47899- * the aux-list, rather than a linked-list (think of the series of merges that
47900- * will be performed as the aux-count grows).
47901- *
47902- * There's a couple reasons we benefit from this:
47903- * - Ordinarily, after N insertions, the aux-list is of size N.  With our
47904- *   strategy, it's of size O(log(N)).  So we decrease the worst-case time of
47905- *   first() calls, and reduce the average cost of remove_min calls.  Since
47906- *   these almost always occur while holding a lock, we practically reduce the
47907- *   frequency of unusually long hold times.
47908- * - This moves the bulk of the work of merging the aux-list onto the threads
47909- *   that are inserting into the heap.  In some common scenarios, insertions
47910- *   happen in bulk, from a single thread (think tcache flushing; we potentially
47911- *   move many slabs from slabs_full to slabs_nonfull).  All the nodes in this
47912- *   case are in the inserting threads cache, and linking them is very cheap
47913- *   (cache misses dominate linking cost).  Without this optimization, linking
47914- *   happens on the next call to remove_first.  Since that remove_first call
47915- *   likely happens on a different thread (or at least, after the cache has
47916- *   gotten cold if done on the same thread), deferring linking trades cheap
47917- *   link operations now for expensive ones later.
47918- *
47919- * The ffs trick keeps amortized insert cost at constant time.  Similar
47920- * strategies based on periodically sorting the list after a batch of operations
47921- * perform worse than this in practice, even with various fancy tricks; they
47922- * all took amortized complexity of an insert from O(1) to O(log(n)).
47923- */
47924-
47925-typedef int (*ph_cmp_t)(void *, void *);
47926-
47927-/* Node structure. */
47928-typedef struct phn_link_s phn_link_t;
47929-struct phn_link_s {
47930-	void *prev;
47931-	void *next;
47932-	void *lchild;
47933-};
47934-
47935-typedef struct ph_s ph_t;
47936-struct ph_s {
47937-	void *root;
47938-	/*
47939-	 * Inserts done since the last aux-list merge.  This is not necessarily
47940-	 * the size of the aux-list, since it's possible that removals have
47941-	 * happened since, and we don't track whether or not those removals are
47942-	 * from the aux list.
47943-	 */
47944-	size_t auxcount;
47945-};
47946-
47947-JEMALLOC_ALWAYS_INLINE phn_link_t *
47948-phn_link_get(void *phn, size_t offset) {
47949-	return (phn_link_t *)(((uintptr_t)phn) + offset);
47950-}
47951-
47952-JEMALLOC_ALWAYS_INLINE void
47953-phn_link_init(void *phn, size_t offset) {
47954-	phn_link_get(phn, offset)->prev = NULL;
47955-	phn_link_get(phn, offset)->next = NULL;
47956-	phn_link_get(phn, offset)->lchild = NULL;
47957-}
47958-
47959-/* Internal utility helpers. */
47960-JEMALLOC_ALWAYS_INLINE void *
47961-phn_lchild_get(void *phn, size_t offset) {
47962-	return phn_link_get(phn, offset)->lchild;
47963-}
47964-
47965-JEMALLOC_ALWAYS_INLINE void
47966-phn_lchild_set(void *phn, void *lchild, size_t offset) {
47967-	phn_link_get(phn, offset)->lchild = lchild;
47968-}
47969-
47970-JEMALLOC_ALWAYS_INLINE void *
47971-phn_next_get(void *phn, size_t offset) {
47972-	return phn_link_get(phn, offset)->next;
47973-}
47974-
47975-JEMALLOC_ALWAYS_INLINE void
47976-phn_next_set(void *phn, void *next, size_t offset) {
47977-	phn_link_get(phn, offset)->next = next;
47978-}
47979-
47980-JEMALLOC_ALWAYS_INLINE void *
47981-phn_prev_get(void *phn, size_t offset) {
47982-	return phn_link_get(phn, offset)->prev;
47983-}
47984-
47985-JEMALLOC_ALWAYS_INLINE void
47986-phn_prev_set(void *phn, void *prev, size_t offset) {
47987-	phn_link_get(phn, offset)->prev = prev;
47988-}
47989-
47990-JEMALLOC_ALWAYS_INLINE void
47991-phn_merge_ordered(void *phn0, void *phn1, size_t offset,
47992-    ph_cmp_t cmp) {
47993-	void *phn0child;
47994-
47995-	assert(phn0 != NULL);
47996-	assert(phn1 != NULL);
47997-	assert(cmp(phn0, phn1) <= 0);
47998-
47999-	phn_prev_set(phn1, phn0, offset);
48000-	phn0child = phn_lchild_get(phn0, offset);
48001-	phn_next_set(phn1, phn0child, offset);
48002-	if (phn0child != NULL) {
48003-		phn_prev_set(phn0child, phn1, offset);
48004-	}
48005-	phn_lchild_set(phn0, phn1, offset);
48006-}
48007-
48008-JEMALLOC_ALWAYS_INLINE void *
48009-phn_merge(void *phn0, void *phn1, size_t offset, ph_cmp_t cmp) {
48010-	void *result;
48011-	if (phn0 == NULL) {
48012-		result = phn1;
48013-	} else if (phn1 == NULL) {
48014-		result = phn0;
48015-	} else if (cmp(phn0, phn1) < 0) {
48016-		phn_merge_ordered(phn0, phn1, offset, cmp);
48017-		result = phn0;
48018-	} else {
48019-		phn_merge_ordered(phn1, phn0, offset, cmp);
48020-		result = phn1;
48021-	}
48022-	return result;
48023-}
48024-
48025-JEMALLOC_ALWAYS_INLINE void *
48026-phn_merge_siblings(void *phn, size_t offset, ph_cmp_t cmp) {
48027-	void *head = NULL;
48028-	void *tail = NULL;
48029-	void *phn0 = phn;
48030-	void *phn1 = phn_next_get(phn0, offset);
48031-
48032-	/*
48033-	 * Multipass merge, wherein the first two elements of a FIFO
48034-	 * are repeatedly merged, and each result is appended to the
48035-	 * singly linked FIFO, until the FIFO contains only a single
48036-	 * element.  We start with a sibling list but no reference to
48037-	 * its tail, so we do a single pass over the sibling list to
48038-	 * populate the FIFO.
48039-	 */
48040-	if (phn1 != NULL) {
48041-		void *phnrest = phn_next_get(phn1, offset);
48042-		if (phnrest != NULL) {
48043-			phn_prev_set(phnrest, NULL, offset);
48044-		}
48045-		phn_prev_set(phn0, NULL, offset);
48046-		phn_next_set(phn0, NULL, offset);
48047-		phn_prev_set(phn1, NULL, offset);
48048-		phn_next_set(phn1, NULL, offset);
48049-		phn0 = phn_merge(phn0, phn1, offset, cmp);
48050-		head = tail = phn0;
48051-		phn0 = phnrest;
48052-		while (phn0 != NULL) {
48053-			phn1 = phn_next_get(phn0, offset);
48054-			if (phn1 != NULL) {
48055-				phnrest = phn_next_get(phn1, offset);
48056-				if (phnrest != NULL) {
48057-					phn_prev_set(phnrest, NULL, offset);
48058-				}
48059-				phn_prev_set(phn0, NULL, offset);
48060-				phn_next_set(phn0, NULL, offset);
48061-				phn_prev_set(phn1, NULL, offset);
48062-				phn_next_set(phn1, NULL, offset);
48063-				phn0 = phn_merge(phn0, phn1, offset, cmp);
48064-				phn_next_set(tail, phn0, offset);
48065-				tail = phn0;
48066-				phn0 = phnrest;
48067-			} else {
48068-				phn_next_set(tail, phn0, offset);
48069-				tail = phn0;
48070-				phn0 = NULL;
48071-			}
48072-		}
48073-		phn0 = head;
48074-		phn1 = phn_next_get(phn0, offset);
48075-		if (phn1 != NULL) {
48076-			while (true) {
48077-				head = phn_next_get(phn1, offset);
48078-				assert(phn_prev_get(phn0, offset) == NULL);
48079-				phn_next_set(phn0, NULL, offset);
48080-				assert(phn_prev_get(phn1, offset) == NULL);
48081-				phn_next_set(phn1, NULL, offset);
48082-				phn0 = phn_merge(phn0, phn1, offset, cmp);
48083-				if (head == NULL) {
48084-					break;
48085-				}
48086-				phn_next_set(tail, phn0, offset);
48087-				tail = phn0;
48088-				phn0 = head;
48089-				phn1 = phn_next_get(phn0, offset);
48090-			}
48091-		}
48092-	}
48093-	return phn0;
48094-}
48095-
48096-JEMALLOC_ALWAYS_INLINE void
48097-ph_merge_aux(ph_t *ph, size_t offset, ph_cmp_t cmp) {
48098-	ph->auxcount = 0;
48099-	void *phn = phn_next_get(ph->root, offset);
48100-	if (phn != NULL) {
48101-		phn_prev_set(ph->root, NULL, offset);
48102-		phn_next_set(ph->root, NULL, offset);
48103-		phn_prev_set(phn, NULL, offset);
48104-		phn = phn_merge_siblings(phn, offset, cmp);
48105-		assert(phn_next_get(phn, offset) == NULL);
48106-		ph->root = phn_merge(ph->root, phn, offset, cmp);
48107-	}
48108-}
48109-
48110-JEMALLOC_ALWAYS_INLINE void *
48111-ph_merge_children(void *phn, size_t offset, ph_cmp_t cmp) {
48112-	void *result;
48113-	void *lchild = phn_lchild_get(phn, offset);
48114-	if (lchild == NULL) {
48115-		result = NULL;
48116-	} else {
48117-		result = phn_merge_siblings(lchild, offset, cmp);
48118-	}
48119-	return result;
48120-}
48121-
48122-JEMALLOC_ALWAYS_INLINE void
48123-ph_new(ph_t *ph) {
48124-	ph->root = NULL;
48125-	ph->auxcount = 0;
48126-}
48127-
48128-JEMALLOC_ALWAYS_INLINE bool
48129-ph_empty(ph_t *ph) {
48130-	return ph->root == NULL;
48131-}
48132-
48133-JEMALLOC_ALWAYS_INLINE void *
48134-ph_first(ph_t *ph, size_t offset, ph_cmp_t cmp) {
48135-	if (ph->root == NULL) {
48136-		return NULL;
48137-	}
48138-	ph_merge_aux(ph, offset, cmp);
48139-	return ph->root;
48140-}
48141-
48142-JEMALLOC_ALWAYS_INLINE void *
48143-ph_any(ph_t *ph, size_t offset) {
48144-	if (ph->root == NULL) {
48145-		return NULL;
48146-	}
48147-	void *aux = phn_next_get(ph->root, offset);
48148-	if (aux != NULL) {
48149-		return aux;
48150-	}
48151-	return ph->root;
48152-}
48153-
48154-/* Returns true if we should stop trying to merge. */
48155-JEMALLOC_ALWAYS_INLINE bool
48156-ph_try_aux_merge_pair(ph_t *ph, size_t offset, ph_cmp_t cmp) {
48157-	assert(ph->root != NULL);
48158-	void *phn0 = phn_next_get(ph->root, offset);
48159-	if (phn0 == NULL) {
48160-		return true;
48161-	}
48162-	void *phn1 = phn_next_get(phn0, offset);
48163-	if (phn1 == NULL) {
48164-		return true;
48165-	}
48166-	void *next_phn1 = phn_next_get(phn1, offset);
48167-	phn_next_set(phn0, NULL, offset);
48168-	phn_prev_set(phn0, NULL, offset);
48169-	phn_next_set(phn1, NULL, offset);
48170-	phn_prev_set(phn1, NULL, offset);
48171-	phn0 = phn_merge(phn0, phn1, offset, cmp);
48172-	phn_next_set(phn0, next_phn1, offset);
48173-	if (next_phn1 != NULL) {
48174-		phn_prev_set(next_phn1, phn0, offset);
48175-	}
48176-	phn_next_set(ph->root, phn0, offset);
48177-	phn_prev_set(phn0, ph->root, offset);
48178-	return next_phn1 == NULL;
48179-}
48180-
48181-JEMALLOC_ALWAYS_INLINE void
48182-ph_insert(ph_t *ph, void *phn, size_t offset, ph_cmp_t cmp) {
48183-	phn_link_init(phn, offset);
48184-
48185-	/*
48186-	 * Treat the root as an aux list during insertion, and lazily merge
48187-	 * during a_prefix##remove_first().  For elements that are inserted,
48188-	 * then removed via a_prefix##remove() before the aux list is ever
48189-	 * processed, this makes insert/remove constant-time, whereas eager
48190-	 * merging would make insert O(log n).
48191-	 */
48192-	if (ph->root == NULL) {
48193-		ph->root = phn;
48194-	} else {
48195-		/*
48196-		 * As a special case, check to see if we can replace the root.
48197-		 * This is practically common in some important cases, and lets
48198-		 * us defer some insertions (hopefully, until the point where
48199-		 * some of the items in the aux list have been removed, savings
48200-		 * us from linking them at all).
48201-		 */
48202-		if (cmp(phn, ph->root) < 0) {
48203-			phn_lchild_set(phn, ph->root, offset);
48204-			phn_prev_set(ph->root, phn, offset);
48205-			ph->root = phn;
48206-			ph->auxcount = 0;
48207-			return;
48208-		}
48209-		ph->auxcount++;
48210-		phn_next_set(phn, phn_next_get(ph->root, offset), offset);
48211-		if (phn_next_get(ph->root, offset) != NULL) {
48212-			phn_prev_set(phn_next_get(ph->root, offset), phn,
48213-			    offset);
48214-		}
48215-		phn_prev_set(phn, ph->root, offset);
48216-		phn_next_set(ph->root, phn, offset);
48217-	}
48218-	if (ph->auxcount > 1) {
48219-		unsigned nmerges = ffs_zu(ph->auxcount - 1);
48220-		bool done = false;
48221-		for (unsigned i = 0; i < nmerges && !done; i++) {
48222-			done = ph_try_aux_merge_pair(ph, offset, cmp);
48223-		}
48224-	}
48225-}
48226-
48227-JEMALLOC_ALWAYS_INLINE void *
48228-ph_remove_first(ph_t *ph, size_t offset, ph_cmp_t cmp) {
48229-	void *ret;
48230-
48231-	if (ph->root == NULL) {
48232-		return NULL;
48233-	}
48234-	ph_merge_aux(ph, offset, cmp);
48235-	ret = ph->root;
48236-	ph->root = ph_merge_children(ph->root, offset, cmp);
48237-
48238-	return ret;
48239-
48240-}
48241-
48242-JEMALLOC_ALWAYS_INLINE void
48243-ph_remove(ph_t *ph, void *phn, size_t offset, ph_cmp_t cmp) {
48244-	void *replace;
48245-	void *parent;
48246-
48247-	if (ph->root == phn) {
48248-		/*
48249-		 * We can delete from aux list without merging it, but we need
48250-		 * to merge if we are dealing with the root node and it has
48251-		 * children.
48252-		 */
48253-		if (phn_lchild_get(phn, offset) == NULL) {
48254-			ph->root = phn_next_get(phn, offset);
48255-			if (ph->root != NULL) {
48256-				phn_prev_set(ph->root, NULL, offset);
48257-			}
48258-			return;
48259-		}
48260-		ph_merge_aux(ph, offset, cmp);
48261-		if (ph->root == phn) {
48262-			ph->root = ph_merge_children(ph->root, offset, cmp);
48263-			return;
48264-		}
48265-	}
48266-
48267-	/* Get parent (if phn is leftmost child) before mutating. */
48268-	if ((parent = phn_prev_get(phn, offset)) != NULL) {
48269-		if (phn_lchild_get(parent, offset) != phn) {
48270-			parent = NULL;
48271-		}
48272-	}
48273-	/* Find a possible replacement node, and link to parent. */
48274-	replace = ph_merge_children(phn, offset, cmp);
48275-	/* Set next/prev for sibling linked list. */
48276-	if (replace != NULL) {
48277-		if (parent != NULL) {
48278-			phn_prev_set(replace, parent, offset);
48279-			phn_lchild_set(parent, replace, offset);
48280-		} else {
48281-			phn_prev_set(replace, phn_prev_get(phn, offset),
48282-			    offset);
48283-			if (phn_prev_get(phn, offset) != NULL) {
48284-				phn_next_set(phn_prev_get(phn, offset), replace,
48285-				    offset);
48286-			}
48287-		}
48288-		phn_next_set(replace, phn_next_get(phn, offset), offset);
48289-		if (phn_next_get(phn, offset) != NULL) {
48290-			phn_prev_set(phn_next_get(phn, offset), replace,
48291-			    offset);
48292-		}
48293-	} else {
48294-		if (parent != NULL) {
48295-			void *next = phn_next_get(phn, offset);
48296-			phn_lchild_set(parent, next, offset);
48297-			if (next != NULL) {
48298-				phn_prev_set(next, parent, offset);
48299-			}
48300-		} else {
48301-			assert(phn_prev_get(phn, offset) != NULL);
48302-			phn_next_set(
48303-			    phn_prev_get(phn, offset),
48304-			    phn_next_get(phn, offset), offset);
48305-		}
48306-		if (phn_next_get(phn, offset) != NULL) {
48307-			phn_prev_set(
48308-			    phn_next_get(phn, offset),
48309-			    phn_prev_get(phn, offset), offset);
48310-		}
48311-	}
48312-}
48313-
48314-#define ph_structs(a_prefix, a_type)					\
48315-typedef struct {							\
48316-	phn_link_t link;						\
48317-} a_prefix##_link_t;							\
48318-									\
48319-typedef struct {							\
48320-	ph_t ph;							\
48321-} a_prefix##_t;
48322-
48323-/*
48324- * The ph_proto() macro generates function prototypes that correspond to the
48325- * functions generated by an equivalently parameterized call to ph_gen().
48326- */
48327-#define ph_proto(a_attr, a_prefix, a_type)				\
48328-									\
48329-a_attr void a_prefix##_new(a_prefix##_t *ph);				\
48330-a_attr bool a_prefix##_empty(a_prefix##_t *ph);				\
48331-a_attr a_type *a_prefix##_first(a_prefix##_t *ph);			\
48332-a_attr a_type *a_prefix##_any(a_prefix##_t *ph);			\
48333-a_attr void a_prefix##_insert(a_prefix##_t *ph, a_type *phn);		\
48334-a_attr a_type *a_prefix##_remove_first(a_prefix##_t *ph);		\
48335-a_attr void a_prefix##_remove(a_prefix##_t *ph, a_type *phn);		\
48336-a_attr a_type *a_prefix##_remove_any(a_prefix##_t *ph);
48337-
48338-/* The ph_gen() macro generates a type-specific pairing heap implementation. */
48339-#define ph_gen(a_attr, a_prefix, a_type, a_field, a_cmp)		\
48340-JEMALLOC_ALWAYS_INLINE int						\
48341-a_prefix##_ph_cmp(void *a, void *b) {					\
48342-	return a_cmp((a_type *)a, (a_type *)b);				\
48343-}									\
48344-									\
48345-a_attr void								\
48346-a_prefix##_new(a_prefix##_t *ph) {					\
48347-	ph_new(&ph->ph);						\
48348-}									\
48349-									\
48350-a_attr bool								\
48351-a_prefix##_empty(a_prefix##_t *ph) {					\
48352-	return ph_empty(&ph->ph);					\
48353-}									\
48354-									\
48355-a_attr a_type *								\
48356-a_prefix##_first(a_prefix##_t *ph) {					\
48357-	return ph_first(&ph->ph, offsetof(a_type, a_field),		\
48358-	    &a_prefix##_ph_cmp);					\
48359-}									\
48360-									\
48361-a_attr a_type *								\
48362-a_prefix##_any(a_prefix##_t *ph) {					\
48363-	return ph_any(&ph->ph, offsetof(a_type, a_field));		\
48364-}									\
48365-									\
48366-a_attr void								\
48367-a_prefix##_insert(a_prefix##_t *ph, a_type *phn) {			\
48368-	ph_insert(&ph->ph, phn, offsetof(a_type, a_field),		\
48369-	    a_prefix##_ph_cmp);						\
48370-}									\
48371-									\
48372-a_attr a_type *								\
48373-a_prefix##_remove_first(a_prefix##_t *ph) {				\
48374-	return ph_remove_first(&ph->ph, offsetof(a_type, a_field),	\
48375-	    a_prefix##_ph_cmp);						\
48376-}									\
48377-									\
48378-a_attr void								\
48379-a_prefix##_remove(a_prefix##_t *ph, a_type *phn) {			\
48380-	ph_remove(&ph->ph, phn, offsetof(a_type, a_field),		\
48381-	    a_prefix##_ph_cmp);						\
48382-}									\
48383-									\
48384-a_attr a_type *								\
48385-a_prefix##_remove_any(a_prefix##_t *ph) {				\
48386-	a_type *ret = a_prefix##_any(ph);				\
48387-	if (ret != NULL) {						\
48388-		a_prefix##_remove(ph, ret);				\
48389-	}								\
48390-	return ret;							\
48391-}
48392-
48393-#endif /* JEMALLOC_INTERNAL_PH_H */
48394diff --git a/jemalloc/include/jemalloc/internal/private_namespace.sh b/jemalloc/include/jemalloc/internal/private_namespace.sh
48395deleted file mode 100755
48396index 6ef1346..0000000
48397--- a/jemalloc/include/jemalloc/internal/private_namespace.sh
48398+++ /dev/null
48399@@ -1,5 +0,0 @@
48400-#!/bin/sh
48401-
48402-for symbol in `cat "$@"` ; do
48403-  echo "#define ${symbol} JEMALLOC_N(${symbol})"
48404-done
48405diff --git a/jemalloc/include/jemalloc/internal/private_symbols.sh b/jemalloc/include/jemalloc/internal/private_symbols.sh
48406deleted file mode 100755
48407index 442a259..0000000
48408--- a/jemalloc/include/jemalloc/internal/private_symbols.sh
48409+++ /dev/null
48410@@ -1,51 +0,0 @@
48411-#!/bin/sh
48412-#
48413-# Generate private_symbols[_jet].awk.
48414-#
48415-# Usage: private_symbols.sh <sym_prefix> <sym>*
48416-#
48417-# <sym_prefix> is typically "" or "_".
48418-
48419-sym_prefix=$1
48420-shift
48421-
48422-cat <<EOF
48423-#!/usr/bin/env awk -f
48424-
48425-BEGIN {
48426-  sym_prefix = "${sym_prefix}"
48427-  split("\\
48428-EOF
48429-
48430-for public_sym in "$@" ; do
48431-  cat <<EOF
48432-        ${sym_prefix}${public_sym} \\
48433-EOF
48434-done
48435-
48436-cat <<"EOF"
48437-        ", exported_symbol_names)
48438-  # Store exported symbol names as keys in exported_symbols.
48439-  for (i in exported_symbol_names) {
48440-    exported_symbols[exported_symbol_names[i]] = 1
48441-  }
48442-}
48443-
48444-# Process 'nm -a <c_source.o>' output.
48445-#
48446-# Handle lines like:
48447-#   0000000000000008 D opt_junk
48448-#   0000000000007574 T malloc_initialized
48449-(NF == 3 && $2 ~ /^[ABCDGRSTVW]$/ && !($3 in exported_symbols) && $3 ~ /^[A-Za-z0-9_]+$/) {
48450-  print substr($3, 1+length(sym_prefix), length($3)-length(sym_prefix))
48451-}
48452-
48453-# Process 'dumpbin /SYMBOLS <c_source.obj>' output.
48454-#
48455-# Handle lines like:
48456-#   353 00008098 SECT4  notype       External     | opt_junk
48457-#   3F1 00000000 SECT7  notype ()    External     | malloc_initialized
48458-($3 ~ /^SECT[0-9]+/ && $(NF-2) == "External" && !($NF in exported_symbols)) {
48459-  print $NF
48460-}
48461-EOF
48462diff --git a/jemalloc/include/jemalloc/internal/prng.h b/jemalloc/include/jemalloc/internal/prng.h
48463deleted file mode 100644
48464index 14542aa..0000000
48465--- a/jemalloc/include/jemalloc/internal/prng.h
48466+++ /dev/null
48467@@ -1,168 +0,0 @@
48468-#ifndef JEMALLOC_INTERNAL_PRNG_H
48469-#define JEMALLOC_INTERNAL_PRNG_H
48470-
48471-#include "jemalloc/internal/bit_util.h"
48472-
48473-/*
48474- * Simple linear congruential pseudo-random number generator:
48475- *
48476- *   prng(y) = (a*x + c) % m
48477- *
48478- * where the following constants ensure maximal period:
48479- *
48480- *   a == Odd number (relatively prime to 2^n), and (a-1) is a multiple of 4.
48481- *   c == Odd number (relatively prime to 2^n).
48482- *   m == 2^32
48483- *
48484- * See Knuth's TAOCP 3rd Ed., Vol. 2, pg. 17 for details on these constraints.
48485- *
48486- * This choice of m has the disadvantage that the quality of the bits is
48487- * proportional to bit position.  For example, the lowest bit has a cycle of 2,
48488- * the next has a cycle of 4, etc.  For this reason, we prefer to use the upper
48489- * bits.
48490- */
48491-
48492-/******************************************************************************/
48493-/* INTERNAL DEFINITIONS -- IGNORE */
48494-/******************************************************************************/
48495-#define PRNG_A_32	UINT32_C(1103515241)
48496-#define PRNG_C_32	UINT32_C(12347)
48497-
48498-#define PRNG_A_64	UINT64_C(6364136223846793005)
48499-#define PRNG_C_64	UINT64_C(1442695040888963407)
48500-
48501-JEMALLOC_ALWAYS_INLINE uint32_t
48502-prng_state_next_u32(uint32_t state) {
48503-	return (state * PRNG_A_32) + PRNG_C_32;
48504-}
48505-
48506-JEMALLOC_ALWAYS_INLINE uint64_t
48507-prng_state_next_u64(uint64_t state) {
48508-	return (state * PRNG_A_64) + PRNG_C_64;
48509-}
48510-
48511-JEMALLOC_ALWAYS_INLINE size_t
48512-prng_state_next_zu(size_t state) {
48513-#if LG_SIZEOF_PTR == 2
48514-	return (state * PRNG_A_32) + PRNG_C_32;
48515-#elif LG_SIZEOF_PTR == 3
48516-	return (state * PRNG_A_64) + PRNG_C_64;
48517-#else
48518-#error Unsupported pointer size
48519-#endif
48520-}
48521-
48522-/******************************************************************************/
48523-/* BEGIN PUBLIC API */
48524-/******************************************************************************/
48525-
48526-/*
48527- * The prng_lg_range functions give a uniform int in the half-open range [0,
48528- * 2**lg_range).
48529- */
48530-
48531-JEMALLOC_ALWAYS_INLINE uint32_t
48532-prng_lg_range_u32(uint32_t *state, unsigned lg_range) {
48533-	assert(lg_range > 0);
48534-	assert(lg_range <= 32);
48535-
48536-	*state = prng_state_next_u32(*state);
48537-	uint32_t ret = *state >> (32 - lg_range);
48538-
48539-	return ret;
48540-}
48541-
48542-JEMALLOC_ALWAYS_INLINE uint64_t
48543-prng_lg_range_u64(uint64_t *state, unsigned lg_range) {
48544-	assert(lg_range > 0);
48545-	assert(lg_range <= 64);
48546-
48547-	*state = prng_state_next_u64(*state);
48548-	uint64_t ret = *state >> (64 - lg_range);
48549-
48550-	return ret;
48551-}
48552-
48553-JEMALLOC_ALWAYS_INLINE size_t
48554-prng_lg_range_zu(size_t *state, unsigned lg_range) {
48555-	assert(lg_range > 0);
48556-	assert(lg_range <= ZU(1) << (3 + LG_SIZEOF_PTR));
48557-
48558-	*state = prng_state_next_zu(*state);
48559-	size_t ret = *state >> ((ZU(1) << (3 + LG_SIZEOF_PTR)) - lg_range);
48560-
48561-	return ret;
48562-}
48563-
48564-/*
48565- * The prng_range functions behave like the prng_lg_range, but return a result
48566- * in [0, range) instead of [0, 2**lg_range).
48567- */
48568-
48569-JEMALLOC_ALWAYS_INLINE uint32_t
48570-prng_range_u32(uint32_t *state, uint32_t range) {
48571-	assert(range != 0);
48572-	/*
48573-	 * If range were 1, lg_range would be 0, so the shift in
48574-	 * prng_lg_range_u32 would be a shift of a 32-bit variable by 32 bits,
48575-	 * which is UB.  Just handle this case as a one-off.
48576-	 */
48577-	if (range == 1) {
48578-		return 0;
48579-	}
48580-
48581-	/* Compute the ceiling of lg(range). */
48582-	unsigned lg_range = ffs_u32(pow2_ceil_u32(range));
48583-
48584-	/* Generate a result in [0..range) via repeated trial. */
48585-	uint32_t ret;
48586-	do {
48587-		ret = prng_lg_range_u32(state, lg_range);
48588-	} while (ret >= range);
48589-
48590-	return ret;
48591-}
48592-
48593-JEMALLOC_ALWAYS_INLINE uint64_t
48594-prng_range_u64(uint64_t *state, uint64_t range) {
48595-	assert(range != 0);
48596-
48597-	/* See the note in prng_range_u32. */
48598-	if (range == 1) {
48599-		return 0;
48600-	}
48601-
48602-	/* Compute the ceiling of lg(range). */
48603-	unsigned lg_range = ffs_u64(pow2_ceil_u64(range));
48604-
48605-	/* Generate a result in [0..range) via repeated trial. */
48606-	uint64_t ret;
48607-	do {
48608-		ret = prng_lg_range_u64(state, lg_range);
48609-	} while (ret >= range);
48610-
48611-	return ret;
48612-}
48613-
48614-JEMALLOC_ALWAYS_INLINE size_t
48615-prng_range_zu(size_t *state, size_t range) {
48616-	assert(range != 0);
48617-
48618-	/* See the note in prng_range_u32. */
48619-	if (range == 1) {
48620-		return 0;
48621-	}
48622-
48623-	/* Compute the ceiling of lg(range). */
48624-	unsigned lg_range = ffs_u64(pow2_ceil_u64(range));
48625-
48626-	/* Generate a result in [0..range) via repeated trial. */
48627-	size_t ret;
48628-	do {
48629-		ret = prng_lg_range_zu(state, lg_range);
48630-	} while (ret >= range);
48631-
48632-	return ret;
48633-}
48634-
48635-#endif /* JEMALLOC_INTERNAL_PRNG_H */
48636diff --git a/jemalloc/include/jemalloc/internal/prof_data.h b/jemalloc/include/jemalloc/internal/prof_data.h
48637deleted file mode 100644
48638index 4c8e22c..0000000
48639--- a/jemalloc/include/jemalloc/internal/prof_data.h
48640+++ /dev/null
48641@@ -1,37 +0,0 @@
48642-#ifndef JEMALLOC_INTERNAL_PROF_DATA_H
48643-#define JEMALLOC_INTERNAL_PROF_DATA_H
48644-
48645-#include "jemalloc/internal/mutex.h"
48646-
48647-extern malloc_mutex_t bt2gctx_mtx;
48648-extern malloc_mutex_t tdatas_mtx;
48649-extern malloc_mutex_t prof_dump_mtx;
48650-
48651-extern malloc_mutex_t *gctx_locks;
48652-extern malloc_mutex_t *tdata_locks;
48653-
48654-extern size_t prof_unbiased_sz[PROF_SC_NSIZES];
48655-extern size_t prof_shifted_unbiased_cnt[PROF_SC_NSIZES];
48656-
48657-void prof_bt_hash(const void *key, size_t r_hash[2]);
48658-bool prof_bt_keycomp(const void *k1, const void *k2);
48659-
48660-bool prof_data_init(tsd_t *tsd);
48661-prof_tctx_t *prof_lookup(tsd_t *tsd, prof_bt_t *bt);
48662-char *prof_thread_name_alloc(tsd_t *tsd, const char *thread_name);
48663-int prof_thread_name_set_impl(tsd_t *tsd, const char *thread_name);
48664-void prof_unbias_map_init();
48665-void prof_dump_impl(tsd_t *tsd, write_cb_t *prof_dump_write, void *cbopaque,
48666-    prof_tdata_t *tdata, bool leakcheck);
48667-prof_tdata_t * prof_tdata_init_impl(tsd_t *tsd, uint64_t thr_uid,
48668-    uint64_t thr_discrim, char *thread_name, bool active);
48669-void prof_tdata_detach(tsd_t *tsd, prof_tdata_t *tdata);
48670-void prof_reset(tsd_t *tsd, size_t lg_sample);
48671-void prof_tctx_try_destroy(tsd_t *tsd, prof_tctx_t *tctx);
48672-
48673-/* Used in unit tests. */
48674-size_t prof_tdata_count(void);
48675-size_t prof_bt_count(void);
48676-void prof_cnt_all(prof_cnt_t *cnt_all);
48677-
48678-#endif /* JEMALLOC_INTERNAL_PROF_DATA_H */
48679diff --git a/jemalloc/include/jemalloc/internal/prof_externs.h b/jemalloc/include/jemalloc/internal/prof_externs.h
48680deleted file mode 100644
48681index bdff134..0000000
48682--- a/jemalloc/include/jemalloc/internal/prof_externs.h
48683+++ /dev/null
48684@@ -1,95 +0,0 @@
48685-#ifndef JEMALLOC_INTERNAL_PROF_EXTERNS_H
48686-#define JEMALLOC_INTERNAL_PROF_EXTERNS_H
48687-
48688-#include "jemalloc/internal/mutex.h"
48689-#include "jemalloc/internal/prof_hook.h"
48690-
48691-extern bool opt_prof;
48692-extern bool opt_prof_active;
48693-extern bool opt_prof_thread_active_init;
48694-extern size_t opt_lg_prof_sample;    /* Mean bytes between samples. */
48695-extern ssize_t opt_lg_prof_interval; /* lg(prof_interval). */
48696-extern bool opt_prof_gdump;          /* High-water memory dumping. */
48697-extern bool opt_prof_final;          /* Final profile dumping. */
48698-extern bool opt_prof_leak;           /* Dump leak summary at exit. */
48699-extern bool opt_prof_leak_error;     /* Exit with error code if memory leaked */
48700-extern bool opt_prof_accum;          /* Report cumulative bytes. */
48701-extern bool opt_prof_log;            /* Turn logging on at boot. */
48702-extern char opt_prof_prefix[
48703-    /* Minimize memory bloat for non-prof builds. */
48704-#ifdef JEMALLOC_PROF
48705-    PATH_MAX +
48706-#endif
48707-    1];
48708-extern bool opt_prof_unbias;
48709-
48710-/* For recording recent allocations */
48711-extern ssize_t opt_prof_recent_alloc_max;
48712-
48713-/* Whether to use thread name provided by the system or by mallctl. */
48714-extern bool opt_prof_sys_thread_name;
48715-
48716-/* Whether to record per size class counts and request size totals. */
48717-extern bool opt_prof_stats;
48718-
48719-/* Accessed via prof_active_[gs]et{_unlocked,}(). */
48720-extern bool prof_active_state;
48721-
48722-/* Accessed via prof_gdump_[gs]et{_unlocked,}(). */
48723-extern bool prof_gdump_val;
48724-
48725-/* Profile dump interval, measured in bytes allocated. */
48726-extern uint64_t prof_interval;
48727-
48728-/*
48729- * Initialized as opt_lg_prof_sample, and potentially modified during profiling
48730- * resets.
48731- */
48732-extern size_t lg_prof_sample;
48733-
48734-extern bool prof_booted;
48735-
48736-void prof_backtrace_hook_set(prof_backtrace_hook_t hook);
48737-prof_backtrace_hook_t prof_backtrace_hook_get();
48738-
48739-void prof_dump_hook_set(prof_dump_hook_t hook);
48740-prof_dump_hook_t prof_dump_hook_get();
48741-
48742-/* Functions only accessed in prof_inlines.h */
48743-prof_tdata_t *prof_tdata_init(tsd_t *tsd);
48744-prof_tdata_t *prof_tdata_reinit(tsd_t *tsd, prof_tdata_t *tdata);
48745-
48746-void prof_alloc_rollback(tsd_t *tsd, prof_tctx_t *tctx);
48747-void prof_malloc_sample_object(tsd_t *tsd, const void *ptr, size_t size,
48748-    size_t usize, prof_tctx_t *tctx);
48749-void prof_free_sampled_object(tsd_t *tsd, size_t usize, prof_info_t *prof_info);
48750-prof_tctx_t *prof_tctx_create(tsd_t *tsd);
48751-void prof_idump(tsdn_t *tsdn);
48752-bool prof_mdump(tsd_t *tsd, const char *filename);
48753-void prof_gdump(tsdn_t *tsdn);
48754-
48755-void prof_tdata_cleanup(tsd_t *tsd);
48756-bool prof_active_get(tsdn_t *tsdn);
48757-bool prof_active_set(tsdn_t *tsdn, bool active);
48758-const char *prof_thread_name_get(tsd_t *tsd);
48759-int prof_thread_name_set(tsd_t *tsd, const char *thread_name);
48760-bool prof_thread_active_get(tsd_t *tsd);
48761-bool prof_thread_active_set(tsd_t *tsd, bool active);
48762-bool prof_thread_active_init_get(tsdn_t *tsdn);
48763-bool prof_thread_active_init_set(tsdn_t *tsdn, bool active_init);
48764-bool prof_gdump_get(tsdn_t *tsdn);
48765-bool prof_gdump_set(tsdn_t *tsdn, bool active);
48766-void prof_boot0(void);
48767-void prof_boot1(void);
48768-bool prof_boot2(tsd_t *tsd, base_t *base);
48769-void prof_prefork0(tsdn_t *tsdn);
48770-void prof_prefork1(tsdn_t *tsdn);
48771-void prof_postfork_parent(tsdn_t *tsdn);
48772-void prof_postfork_child(tsdn_t *tsdn);
48773-
48774-/* Only accessed by thread event. */
48775-uint64_t prof_sample_new_event_wait(tsd_t *tsd);
48776-uint64_t prof_sample_postponed_event_wait(tsd_t *tsd);
48777-void prof_sample_event_handler(tsd_t *tsd, uint64_t elapsed);
48778-
48779-#endif /* JEMALLOC_INTERNAL_PROF_EXTERNS_H */
48780diff --git a/jemalloc/include/jemalloc/internal/prof_hook.h b/jemalloc/include/jemalloc/internal/prof_hook.h
48781deleted file mode 100644
48782index 150d19d..0000000
48783--- a/jemalloc/include/jemalloc/internal/prof_hook.h
48784+++ /dev/null
48785@@ -1,21 +0,0 @@
48786-#ifndef JEMALLOC_INTERNAL_PROF_HOOK_H
48787-#define JEMALLOC_INTERNAL_PROF_HOOK_H
48788-
48789-/*
48790- * The hooks types of which are declared in this file are experimental and
48791- * undocumented, thus the typedefs are located in an 'internal' header.
48792- */
48793-
48794-/*
48795- * A hook to mock out backtrace functionality.  This can be handy, since it's
48796- * otherwise difficult to guarantee that two allocations are reported as coming
48797- * from the exact same stack trace in the presence of an optimizing compiler.
48798- */
48799-typedef void (*prof_backtrace_hook_t)(void **, unsigned *, unsigned);
48800-
48801-/*
48802- * A callback hook that notifies about recently dumped heap profile.
48803- */
48804-typedef void (*prof_dump_hook_t)(const char *filename);
48805-
48806-#endif /* JEMALLOC_INTERNAL_PROF_HOOK_H */
48807diff --git a/jemalloc/include/jemalloc/internal/prof_inlines.h b/jemalloc/include/jemalloc/internal/prof_inlines.h
48808deleted file mode 100644
48809index a8e7e7f..0000000
48810--- a/jemalloc/include/jemalloc/internal/prof_inlines.h
48811+++ /dev/null
48812@@ -1,261 +0,0 @@
48813-#ifndef JEMALLOC_INTERNAL_PROF_INLINES_H
48814-#define JEMALLOC_INTERNAL_PROF_INLINES_H
48815-
48816-#include "jemalloc/internal/safety_check.h"
48817-#include "jemalloc/internal/sz.h"
48818-#include "jemalloc/internal/thread_event.h"
48819-
48820-JEMALLOC_ALWAYS_INLINE void
48821-prof_active_assert() {
48822-	cassert(config_prof);
48823-	/*
48824-	 * If opt_prof is off, then prof_active must always be off, regardless
48825-	 * of whether prof_active_mtx is in effect or not.
48826-	 */
48827-	assert(opt_prof || !prof_active_state);
48828-}
48829-
48830-JEMALLOC_ALWAYS_INLINE bool
48831-prof_active_get_unlocked(void) {
48832-	prof_active_assert();
48833-	/*
48834-	 * Even if opt_prof is true, sampling can be temporarily disabled by
48835-	 * setting prof_active to false.  No locking is used when reading
48836-	 * prof_active in the fast path, so there are no guarantees regarding
48837-	 * how long it will take for all threads to notice state changes.
48838-	 */
48839-	return prof_active_state;
48840-}
48841-
48842-JEMALLOC_ALWAYS_INLINE bool
48843-prof_gdump_get_unlocked(void) {
48844-	/*
48845-	 * No locking is used when reading prof_gdump_val in the fast path, so
48846-	 * there are no guarantees regarding how long it will take for all
48847-	 * threads to notice state changes.
48848-	 */
48849-	return prof_gdump_val;
48850-}
48851-
48852-JEMALLOC_ALWAYS_INLINE prof_tdata_t *
48853-prof_tdata_get(tsd_t *tsd, bool create) {
48854-	prof_tdata_t *tdata;
48855-
48856-	cassert(config_prof);
48857-
48858-	tdata = tsd_prof_tdata_get(tsd);
48859-	if (create) {
48860-		assert(tsd_reentrancy_level_get(tsd) == 0);
48861-		if (unlikely(tdata == NULL)) {
48862-			if (tsd_nominal(tsd)) {
48863-				tdata = prof_tdata_init(tsd);
48864-				tsd_prof_tdata_set(tsd, tdata);
48865-			}
48866-		} else if (unlikely(tdata->expired)) {
48867-			tdata = prof_tdata_reinit(tsd, tdata);
48868-			tsd_prof_tdata_set(tsd, tdata);
48869-		}
48870-		assert(tdata == NULL || tdata->attached);
48871-	}
48872-
48873-	return tdata;
48874-}
48875-
48876-JEMALLOC_ALWAYS_INLINE void
48877-prof_info_get(tsd_t *tsd, const void *ptr, emap_alloc_ctx_t *alloc_ctx,
48878-    prof_info_t *prof_info) {
48879-	cassert(config_prof);
48880-	assert(ptr != NULL);
48881-	assert(prof_info != NULL);
48882-
48883-	arena_prof_info_get(tsd, ptr, alloc_ctx, prof_info, false);
48884-}
48885-
48886-JEMALLOC_ALWAYS_INLINE void
48887-prof_info_get_and_reset_recent(tsd_t *tsd, const void *ptr,
48888-    emap_alloc_ctx_t *alloc_ctx, prof_info_t *prof_info) {
48889-	cassert(config_prof);
48890-	assert(ptr != NULL);
48891-	assert(prof_info != NULL);
48892-
48893-	arena_prof_info_get(tsd, ptr, alloc_ctx, prof_info, true);
48894-}
48895-
48896-JEMALLOC_ALWAYS_INLINE void
48897-prof_tctx_reset(tsd_t *tsd, const void *ptr, emap_alloc_ctx_t *alloc_ctx) {
48898-	cassert(config_prof);
48899-	assert(ptr != NULL);
48900-
48901-	arena_prof_tctx_reset(tsd, ptr, alloc_ctx);
48902-}
48903-
48904-JEMALLOC_ALWAYS_INLINE void
48905-prof_tctx_reset_sampled(tsd_t *tsd, const void *ptr) {
48906-	cassert(config_prof);
48907-	assert(ptr != NULL);
48908-
48909-	arena_prof_tctx_reset_sampled(tsd, ptr);
48910-}
48911-
48912-JEMALLOC_ALWAYS_INLINE void
48913-prof_info_set(tsd_t *tsd, edata_t *edata, prof_tctx_t *tctx, size_t size) {
48914-	cassert(config_prof);
48915-	assert(edata != NULL);
48916-	assert((uintptr_t)tctx > (uintptr_t)1U);
48917-
48918-	arena_prof_info_set(tsd, edata, tctx, size);
48919-}
48920-
48921-JEMALLOC_ALWAYS_INLINE bool
48922-prof_sample_should_skip(tsd_t *tsd, bool sample_event) {
48923-	cassert(config_prof);
48924-
48925-	/* Fastpath: no need to load tdata */
48926-	if (likely(!sample_event)) {
48927-		return true;
48928-	}
48929-
48930-	/*
48931-	 * sample_event is always obtained from the thread event module, and
48932-	 * whenever it's true, it means that the thread event module has
48933-	 * already checked the reentrancy level.
48934-	 */
48935-	assert(tsd_reentrancy_level_get(tsd) == 0);
48936-
48937-	prof_tdata_t *tdata = prof_tdata_get(tsd, true);
48938-	if (unlikely(tdata == NULL)) {
48939-		return true;
48940-	}
48941-
48942-	return !tdata->active;
48943-}
48944-
48945-JEMALLOC_ALWAYS_INLINE prof_tctx_t *
48946-prof_alloc_prep(tsd_t *tsd, bool prof_active, bool sample_event) {
48947-	prof_tctx_t *ret;
48948-
48949-	if (!prof_active ||
48950-	    likely(prof_sample_should_skip(tsd, sample_event))) {
48951-		ret = (prof_tctx_t *)(uintptr_t)1U;
48952-	} else {
48953-		ret = prof_tctx_create(tsd);
48954-	}
48955-
48956-	return ret;
48957-}
48958-
48959-JEMALLOC_ALWAYS_INLINE void
48960-prof_malloc(tsd_t *tsd, const void *ptr, size_t size, size_t usize,
48961-    emap_alloc_ctx_t *alloc_ctx, prof_tctx_t *tctx) {
48962-	cassert(config_prof);
48963-	assert(ptr != NULL);
48964-	assert(usize == isalloc(tsd_tsdn(tsd), ptr));
48965-
48966-	if (unlikely((uintptr_t)tctx > (uintptr_t)1U)) {
48967-		prof_malloc_sample_object(tsd, ptr, size, usize, tctx);
48968-	} else {
48969-		prof_tctx_reset(tsd, ptr, alloc_ctx);
48970-	}
48971-}
48972-
48973-JEMALLOC_ALWAYS_INLINE void
48974-prof_realloc(tsd_t *tsd, const void *ptr, size_t size, size_t usize,
48975-    prof_tctx_t *tctx, bool prof_active, const void *old_ptr, size_t old_usize,
48976-    prof_info_t *old_prof_info, bool sample_event) {
48977-	bool sampled, old_sampled, moved;
48978-
48979-	cassert(config_prof);
48980-	assert(ptr != NULL || (uintptr_t)tctx <= (uintptr_t)1U);
48981-
48982-	if (prof_active && ptr != NULL) {
48983-		assert(usize == isalloc(tsd_tsdn(tsd), ptr));
48984-		if (prof_sample_should_skip(tsd, sample_event)) {
48985-			/*
48986-			 * Don't sample.  The usize passed to prof_alloc_prep()
48987-			 * was larger than what actually got allocated, so a
48988-			 * backtrace was captured for this allocation, even
48989-			 * though its actual usize was insufficient to cross the
48990-			 * sample threshold.
48991-			 */
48992-			prof_alloc_rollback(tsd, tctx);
48993-			tctx = (prof_tctx_t *)(uintptr_t)1U;
48994-		}
48995-	}
48996-
48997-	sampled = ((uintptr_t)tctx > (uintptr_t)1U);
48998-	old_sampled = ((uintptr_t)old_prof_info->alloc_tctx > (uintptr_t)1U);
48999-	moved = (ptr != old_ptr);
49000-
49001-	if (unlikely(sampled)) {
49002-		prof_malloc_sample_object(tsd, ptr, size, usize, tctx);
49003-	} else if (moved) {
49004-		prof_tctx_reset(tsd, ptr, NULL);
49005-	} else if (unlikely(old_sampled)) {
49006-		/*
49007-		 * prof_tctx_reset() would work for the !moved case as well,
49008-		 * but prof_tctx_reset_sampled() is slightly cheaper, and the
49009-		 * proper thing to do here in the presence of explicit
49010-		 * knowledge re: moved state.
49011-		 */
49012-		prof_tctx_reset_sampled(tsd, ptr);
49013-	} else {
49014-		prof_info_t prof_info;
49015-		prof_info_get(tsd, ptr, NULL, &prof_info);
49016-		assert((uintptr_t)prof_info.alloc_tctx == (uintptr_t)1U);
49017-	}
49018-
49019-	/*
49020-	 * The prof_free_sampled_object() call must come after the
49021-	 * prof_malloc_sample_object() call, because tctx and old_tctx may be
49022-	 * the same, in which case reversing the call order could cause the tctx
49023-	 * to be prematurely destroyed as a side effect of momentarily zeroed
49024-	 * counters.
49025-	 */
49026-	if (unlikely(old_sampled)) {
49027-		prof_free_sampled_object(tsd, old_usize, old_prof_info);
49028-	}
49029-}
49030-
49031-JEMALLOC_ALWAYS_INLINE size_t
49032-prof_sample_align(size_t orig_align) {
49033-	/*
49034-	 * Enforce page alignment, so that sampled allocations can be identified
49035-	 * w/o metadata lookup.
49036-	 */
49037-	assert(opt_prof);
49038-	return (opt_cache_oblivious && orig_align < PAGE) ? PAGE :
49039-	    orig_align;
49040-}
49041-
49042-JEMALLOC_ALWAYS_INLINE bool
49043-prof_sample_aligned(const void *ptr) {
49044-	return ((uintptr_t)ptr & PAGE_MASK) == 0;
49045-}
49046-
49047-JEMALLOC_ALWAYS_INLINE bool
49048-prof_sampled(tsd_t *tsd, const void *ptr) {
49049-	prof_info_t prof_info;
49050-	prof_info_get(tsd, ptr, NULL, &prof_info);
49051-	bool sampled = (uintptr_t)prof_info.alloc_tctx > (uintptr_t)1U;
49052-	if (sampled) {
49053-		assert(prof_sample_aligned(ptr));
49054-	}
49055-	return sampled;
49056-}
49057-
49058-JEMALLOC_ALWAYS_INLINE void
49059-prof_free(tsd_t *tsd, const void *ptr, size_t usize,
49060-    emap_alloc_ctx_t *alloc_ctx) {
49061-	prof_info_t prof_info;
49062-	prof_info_get_and_reset_recent(tsd, ptr, alloc_ctx, &prof_info);
49063-
49064-	cassert(config_prof);
49065-	assert(usize == isalloc(tsd_tsdn(tsd), ptr));
49066-
49067-	if (unlikely((uintptr_t)prof_info.alloc_tctx > (uintptr_t)1U)) {
49068-		assert(prof_sample_aligned(ptr));
49069-		prof_free_sampled_object(tsd, usize, &prof_info);
49070-	}
49071-}
49072-
49073-#endif /* JEMALLOC_INTERNAL_PROF_INLINES_H */
49074diff --git a/jemalloc/include/jemalloc/internal/prof_log.h b/jemalloc/include/jemalloc/internal/prof_log.h
49075deleted file mode 100644
49076index ccb557d..0000000
49077--- a/jemalloc/include/jemalloc/internal/prof_log.h
49078+++ /dev/null
49079@@ -1,22 +0,0 @@
49080-#ifndef JEMALLOC_INTERNAL_PROF_LOG_H
49081-#define JEMALLOC_INTERNAL_PROF_LOG_H
49082-
49083-#include "jemalloc/internal/mutex.h"
49084-
49085-extern malloc_mutex_t log_mtx;
49086-
49087-void prof_try_log(tsd_t *tsd, size_t usize, prof_info_t *prof_info);
49088-bool prof_log_init(tsd_t *tsdn);
49089-
49090-/* Used in unit tests. */
49091-size_t prof_log_bt_count(void);
49092-size_t prof_log_alloc_count(void);
49093-size_t prof_log_thr_count(void);
49094-bool prof_log_is_logging(void);
49095-bool prof_log_rep_check(void);
49096-void prof_log_dummy_set(bool new_value);
49097-
49098-bool prof_log_start(tsdn_t *tsdn, const char *filename);
49099-bool prof_log_stop(tsdn_t *tsdn);
49100-
49101-#endif /* JEMALLOC_INTERNAL_PROF_LOG_H */
49102diff --git a/jemalloc/include/jemalloc/internal/prof_recent.h b/jemalloc/include/jemalloc/internal/prof_recent.h
49103deleted file mode 100644
49104index df41023..0000000
49105--- a/jemalloc/include/jemalloc/internal/prof_recent.h
49106+++ /dev/null
49107@@ -1,23 +0,0 @@
49108-#ifndef JEMALLOC_INTERNAL_PROF_RECENT_H
49109-#define JEMALLOC_INTERNAL_PROF_RECENT_H
49110-
49111-extern malloc_mutex_t prof_recent_alloc_mtx;
49112-extern malloc_mutex_t prof_recent_dump_mtx;
49113-
49114-bool prof_recent_alloc_prepare(tsd_t *tsd, prof_tctx_t *tctx);
49115-void prof_recent_alloc(tsd_t *tsd, edata_t *edata, size_t size, size_t usize);
49116-void prof_recent_alloc_reset(tsd_t *tsd, edata_t *edata);
49117-bool prof_recent_init();
49118-void edata_prof_recent_alloc_init(edata_t *edata);
49119-
49120-/* Used in unit tests. */
49121-typedef ql_head(prof_recent_t) prof_recent_list_t;
49122-extern prof_recent_list_t prof_recent_alloc_list;
49123-edata_t *prof_recent_alloc_edata_get_no_lock_test(const prof_recent_t *node);
49124-prof_recent_t *edata_prof_recent_alloc_get_no_lock_test(const edata_t *edata);
49125-
49126-ssize_t prof_recent_alloc_max_ctl_read();
49127-ssize_t prof_recent_alloc_max_ctl_write(tsd_t *tsd, ssize_t max);
49128-void prof_recent_alloc_dump(tsd_t *tsd, write_cb_t *write_cb, void *cbopaque);
49129-
49130-#endif /* JEMALLOC_INTERNAL_PROF_RECENT_H */
49131diff --git a/jemalloc/include/jemalloc/internal/prof_stats.h b/jemalloc/include/jemalloc/internal/prof_stats.h
49132deleted file mode 100644
49133index 7954e82..0000000
49134--- a/jemalloc/include/jemalloc/internal/prof_stats.h
49135+++ /dev/null
49136@@ -1,17 +0,0 @@
49137-#ifndef JEMALLOC_INTERNAL_PROF_STATS_H
49138-#define JEMALLOC_INTERNAL_PROF_STATS_H
49139-
49140-typedef struct prof_stats_s prof_stats_t;
49141-struct prof_stats_s {
49142-	uint64_t req_sum;
49143-	uint64_t count;
49144-};
49145-
49146-extern malloc_mutex_t prof_stats_mtx;
49147-
49148-void prof_stats_inc(tsd_t *tsd, szind_t ind, size_t size);
49149-void prof_stats_dec(tsd_t *tsd, szind_t ind, size_t size);
49150-void prof_stats_get_live(tsd_t *tsd, szind_t ind, prof_stats_t *stats);
49151-void prof_stats_get_accum(tsd_t *tsd, szind_t ind, prof_stats_t *stats);
49152-
49153-#endif /* JEMALLOC_INTERNAL_PROF_STATS_H */
49154diff --git a/jemalloc/include/jemalloc/internal/prof_structs.h b/jemalloc/include/jemalloc/internal/prof_structs.h
49155deleted file mode 100644
49156index dd22115..0000000
49157--- a/jemalloc/include/jemalloc/internal/prof_structs.h
49158+++ /dev/null
49159@@ -1,221 +0,0 @@
49160-#ifndef JEMALLOC_INTERNAL_PROF_STRUCTS_H
49161-#define JEMALLOC_INTERNAL_PROF_STRUCTS_H
49162-
49163-#include "jemalloc/internal/ckh.h"
49164-#include "jemalloc/internal/edata.h"
49165-#include "jemalloc/internal/mutex.h"
49166-#include "jemalloc/internal/prng.h"
49167-#include "jemalloc/internal/rb.h"
49168-
49169-struct prof_bt_s {
49170-	/* Backtrace, stored as len program counters. */
49171-	void		**vec;
49172-	unsigned	len;
49173-};
49174-
49175-#ifdef JEMALLOC_PROF_LIBGCC
49176-/* Data structure passed to libgcc _Unwind_Backtrace() callback functions. */
49177-typedef struct {
49178-	void 		**vec;
49179-	unsigned	*len;
49180-	unsigned	max;
49181-} prof_unwind_data_t;
49182-#endif
49183-
49184-struct prof_cnt_s {
49185-	/* Profiling counters. */
49186-	uint64_t	curobjs;
49187-	uint64_t	curobjs_shifted_unbiased;
49188-	uint64_t	curbytes;
49189-	uint64_t	curbytes_unbiased;
49190-	uint64_t	accumobjs;
49191-	uint64_t	accumobjs_shifted_unbiased;
49192-	uint64_t	accumbytes;
49193-	uint64_t	accumbytes_unbiased;
49194-};
49195-
49196-typedef enum {
49197-	prof_tctx_state_initializing,
49198-	prof_tctx_state_nominal,
49199-	prof_tctx_state_dumping,
49200-	prof_tctx_state_purgatory /* Dumper must finish destroying. */
49201-} prof_tctx_state_t;
49202-
49203-struct prof_tctx_s {
49204-	/* Thread data for thread that performed the allocation. */
49205-	prof_tdata_t		*tdata;
49206-
49207-	/*
49208-	 * Copy of tdata->thr_{uid,discrim}, necessary because tdata may be
49209-	 * defunct during teardown.
49210-	 */
49211-	uint64_t		thr_uid;
49212-	uint64_t		thr_discrim;
49213-
49214-	/*
49215-	 * Reference count of how many times this tctx object is referenced in
49216-	 * recent allocation / deallocation records, protected by tdata->lock.
49217-	 */
49218-	uint64_t		recent_count;
49219-
49220-	/* Profiling counters, protected by tdata->lock. */
49221-	prof_cnt_t		cnts;
49222-
49223-	/* Associated global context. */
49224-	prof_gctx_t		*gctx;
49225-
49226-	/*
49227-	 * UID that distinguishes multiple tctx's created by the same thread,
49228-	 * but coexisting in gctx->tctxs.  There are two ways that such
49229-	 * coexistence can occur:
49230-	 * - A dumper thread can cause a tctx to be retained in the purgatory
49231-	 *   state.
49232-	 * - Although a single "producer" thread must create all tctx's which
49233-	 *   share the same thr_uid, multiple "consumers" can each concurrently
49234-	 *   execute portions of prof_tctx_destroy().  prof_tctx_destroy() only
49235-	 *   gets called once each time cnts.cur{objs,bytes} drop to 0, but this
49236-	 *   threshold can be hit again before the first consumer finishes
49237-	 *   executing prof_tctx_destroy().
49238-	 */
49239-	uint64_t		tctx_uid;
49240-
49241-	/* Linkage into gctx's tctxs. */
49242-	rb_node(prof_tctx_t)	tctx_link;
49243-
49244-	/*
49245-	 * True during prof_alloc_prep()..prof_malloc_sample_object(), prevents
49246-	 * sample vs destroy race.
49247-	 */
49248-	bool			prepared;
49249-
49250-	/* Current dump-related state, protected by gctx->lock. */
49251-	prof_tctx_state_t	state;
49252-
49253-	/*
49254-	 * Copy of cnts snapshotted during early dump phase, protected by
49255-	 * dump_mtx.
49256-	 */
49257-	prof_cnt_t		dump_cnts;
49258-};
49259-typedef rb_tree(prof_tctx_t) prof_tctx_tree_t;
49260-
49261-struct prof_info_s {
49262-	/* Time when the allocation was made. */
49263-	nstime_t		alloc_time;
49264-	/* Points to the prof_tctx_t corresponding to the allocation. */
49265-	prof_tctx_t		*alloc_tctx;
49266-	/* Allocation request size. */
49267-	size_t			alloc_size;
49268-};
49269-
49270-struct prof_gctx_s {
49271-	/* Protects nlimbo, cnt_summed, and tctxs. */
49272-	malloc_mutex_t		*lock;
49273-
49274-	/*
49275-	 * Number of threads that currently cause this gctx to be in a state of
49276-	 * limbo due to one of:
49277-	 *   - Initializing this gctx.
49278-	 *   - Initializing per thread counters associated with this gctx.
49279-	 *   - Preparing to destroy this gctx.
49280-	 *   - Dumping a heap profile that includes this gctx.
49281-	 * nlimbo must be 1 (single destroyer) in order to safely destroy the
49282-	 * gctx.
49283-	 */
49284-	unsigned		nlimbo;
49285-
49286-	/*
49287-	 * Tree of profile counters, one for each thread that has allocated in
49288-	 * this context.
49289-	 */
49290-	prof_tctx_tree_t	tctxs;
49291-
49292-	/* Linkage for tree of contexts to be dumped. */
49293-	rb_node(prof_gctx_t)	dump_link;
49294-
49295-	/* Temporary storage for summation during dump. */
49296-	prof_cnt_t		cnt_summed;
49297-
49298-	/* Associated backtrace. */
49299-	prof_bt_t		bt;
49300-
49301-	/* Backtrace vector, variable size, referred to by bt. */
49302-	void			*vec[1];
49303-};
49304-typedef rb_tree(prof_gctx_t) prof_gctx_tree_t;
49305-
49306-struct prof_tdata_s {
49307-	malloc_mutex_t		*lock;
49308-
49309-	/* Monotonically increasing unique thread identifier. */
49310-	uint64_t		thr_uid;
49311-
49312-	/*
49313-	 * Monotonically increasing discriminator among tdata structures
49314-	 * associated with the same thr_uid.
49315-	 */
49316-	uint64_t		thr_discrim;
49317-
49318-	/* Included in heap profile dumps if non-NULL. */
49319-	char			*thread_name;
49320-
49321-	bool			attached;
49322-	bool			expired;
49323-
49324-	rb_node(prof_tdata_t)	tdata_link;
49325-
49326-	/*
49327-	 * Counter used to initialize prof_tctx_t's tctx_uid.  No locking is
49328-	 * necessary when incrementing this field, because only one thread ever
49329-	 * does so.
49330-	 */
49331-	uint64_t		tctx_uid_next;
49332-
49333-	/*
49334-	 * Hash of (prof_bt_t *)-->(prof_tctx_t *).  Each thread tracks
49335-	 * backtraces for which it has non-zero allocation/deallocation counters
49336-	 * associated with thread-specific prof_tctx_t objects.  Other threads
49337-	 * may write to prof_tctx_t contents when freeing associated objects.
49338-	 */
49339-	ckh_t			bt2tctx;
49340-
49341-	/* State used to avoid dumping while operating on prof internals. */
49342-	bool			enq;
49343-	bool			enq_idump;
49344-	bool			enq_gdump;
49345-
49346-	/*
49347-	 * Set to true during an early dump phase for tdata's which are
49348-	 * currently being dumped.  New threads' tdata's have this initialized
49349-	 * to false so that they aren't accidentally included in later dump
49350-	 * phases.
49351-	 */
49352-	bool			dumping;
49353-
49354-	/*
49355-	 * True if profiling is active for this tdata's thread
49356-	 * (thread.prof.active mallctl).
49357-	 */
49358-	bool			active;
49359-
49360-	/* Temporary storage for summation during dump. */
49361-	prof_cnt_t		cnt_summed;
49362-
49363-	/* Backtrace vector, used for calls to prof_backtrace(). */
49364-	void			*vec[PROF_BT_MAX];
49365-};
49366-typedef rb_tree(prof_tdata_t) prof_tdata_tree_t;
49367-
49368-struct prof_recent_s {
49369-	nstime_t alloc_time;
49370-	nstime_t dalloc_time;
49371-
49372-	ql_elm(prof_recent_t) link;
49373-	size_t size;
49374-	size_t usize;
49375-	atomic_p_t alloc_edata; /* NULL means allocation has been freed. */
49376-	prof_tctx_t *alloc_tctx;
49377-	prof_tctx_t *dalloc_tctx;
49378-};
49379-
49380-#endif /* JEMALLOC_INTERNAL_PROF_STRUCTS_H */
49381diff --git a/jemalloc/include/jemalloc/internal/prof_sys.h b/jemalloc/include/jemalloc/internal/prof_sys.h
49382deleted file mode 100644
49383index 3d25a42..0000000
49384--- a/jemalloc/include/jemalloc/internal/prof_sys.h
49385+++ /dev/null
49386@@ -1,30 +0,0 @@
49387-#ifndef JEMALLOC_INTERNAL_PROF_SYS_H
49388-#define JEMALLOC_INTERNAL_PROF_SYS_H
49389-
49390-extern malloc_mutex_t prof_dump_filename_mtx;
49391-extern base_t *prof_base;
49392-
49393-void bt_init(prof_bt_t *bt, void **vec);
49394-void prof_backtrace(tsd_t *tsd, prof_bt_t *bt);
49395-void prof_hooks_init();
49396-void prof_unwind_init();
49397-void prof_sys_thread_name_fetch(tsd_t *tsd);
49398-int prof_getpid(void);
49399-void prof_get_default_filename(tsdn_t *tsdn, char *filename, uint64_t ind);
49400-bool prof_prefix_set(tsdn_t *tsdn, const char *prefix);
49401-void prof_fdump_impl(tsd_t *tsd);
49402-void prof_idump_impl(tsd_t *tsd);
49403-bool prof_mdump_impl(tsd_t *tsd, const char *filename);
49404-void prof_gdump_impl(tsd_t *tsd);
49405-
49406-/* Used in unit tests. */
49407-typedef int (prof_sys_thread_name_read_t)(char *buf, size_t limit);
49408-extern prof_sys_thread_name_read_t *JET_MUTABLE prof_sys_thread_name_read;
49409-typedef int (prof_dump_open_file_t)(const char *, int);
49410-extern prof_dump_open_file_t *JET_MUTABLE prof_dump_open_file;
49411-typedef ssize_t (prof_dump_write_file_t)(int, const void *, size_t);
49412-extern prof_dump_write_file_t *JET_MUTABLE prof_dump_write_file;
49413-typedef int (prof_dump_open_maps_t)();
49414-extern prof_dump_open_maps_t *JET_MUTABLE prof_dump_open_maps;
49415-
49416-#endif /* JEMALLOC_INTERNAL_PROF_SYS_H */
49417diff --git a/jemalloc/include/jemalloc/internal/prof_types.h b/jemalloc/include/jemalloc/internal/prof_types.h
49418deleted file mode 100644
49419index ba62865..0000000
49420--- a/jemalloc/include/jemalloc/internal/prof_types.h
49421+++ /dev/null
49422@@ -1,75 +0,0 @@
49423-#ifndef JEMALLOC_INTERNAL_PROF_TYPES_H
49424-#define JEMALLOC_INTERNAL_PROF_TYPES_H
49425-
49426-typedef struct prof_bt_s prof_bt_t;
49427-typedef struct prof_cnt_s prof_cnt_t;
49428-typedef struct prof_tctx_s prof_tctx_t;
49429-typedef struct prof_info_s prof_info_t;
49430-typedef struct prof_gctx_s prof_gctx_t;
49431-typedef struct prof_tdata_s prof_tdata_t;
49432-typedef struct prof_recent_s prof_recent_t;
49433-
49434-/* Option defaults. */
49435-#ifdef JEMALLOC_PROF
49436-#  define PROF_PREFIX_DEFAULT		"jeprof"
49437-#else
49438-#  define PROF_PREFIX_DEFAULT		""
49439-#endif
49440-#define LG_PROF_SAMPLE_DEFAULT		19
49441-#define LG_PROF_INTERVAL_DEFAULT	-1
49442-
49443-/*
49444- * Hard limit on stack backtrace depth.  The version of prof_backtrace() that
49445- * is based on __builtin_return_address() necessarily has a hard-coded number
49446- * of backtrace frame handlers, and should be kept in sync with this setting.
49447- */
49448-#define PROF_BT_MAX			128
49449-
49450-/* Initial hash table size. */
49451-#define PROF_CKH_MINITEMS		64
49452-
49453-/* Size of memory buffer to use when writing dump files. */
49454-#ifndef JEMALLOC_PROF
49455-/* Minimize memory bloat for non-prof builds. */
49456-#  define PROF_DUMP_BUFSIZE		1
49457-#elif defined(JEMALLOC_DEBUG)
49458-/* Use a small buffer size in debug build, mainly to facilitate testing. */
49459-#  define PROF_DUMP_BUFSIZE		16
49460-#else
49461-#  define PROF_DUMP_BUFSIZE		65536
49462-#endif
49463-
49464-/* Size of size class related tables */
49465-#ifdef JEMALLOC_PROF
49466-#  define PROF_SC_NSIZES		SC_NSIZES
49467-#else
49468-/* Minimize memory bloat for non-prof builds. */
49469-#  define PROF_SC_NSIZES		1
49470-#endif
49471-
49472-/* Size of stack-allocated buffer used by prof_printf(). */
49473-#define PROF_PRINTF_BUFSIZE		128
49474-
49475-/*
49476- * Number of mutexes shared among all gctx's.  No space is allocated for these
49477- * unless profiling is enabled, so it's okay to over-provision.
49478- */
49479-#define PROF_NCTX_LOCKS			1024
49480-
49481-/*
49482- * Number of mutexes shared among all tdata's.  No space is allocated for these
49483- * unless profiling is enabled, so it's okay to over-provision.
49484- */
49485-#define PROF_NTDATA_LOCKS		256
49486-
49487-/* Minimize memory bloat for non-prof builds. */
49488-#ifdef JEMALLOC_PROF
49489-#define PROF_DUMP_FILENAME_LEN (PATH_MAX + 1)
49490-#else
49491-#define PROF_DUMP_FILENAME_LEN 1
49492-#endif
49493-
49494-/* Default number of recent allocations to record. */
49495-#define PROF_RECENT_ALLOC_MAX_DEFAULT 0
49496-
49497-#endif /* JEMALLOC_INTERNAL_PROF_TYPES_H */
49498diff --git a/jemalloc/include/jemalloc/internal/psset.h b/jemalloc/include/jemalloc/internal/psset.h
49499deleted file mode 100644
49500index e1d6497..0000000
49501--- a/jemalloc/include/jemalloc/internal/psset.h
49502+++ /dev/null
49503@@ -1,131 +0,0 @@
49504-#ifndef JEMALLOC_INTERNAL_PSSET_H
49505-#define JEMALLOC_INTERNAL_PSSET_H
49506-
49507-#include "jemalloc/internal/hpdata.h"
49508-
49509-/*
49510- * A page-slab set.  What the eset is to PAC, the psset is to HPA.  It maintains
49511- * a collection of page-slabs (the intent being that they are backed by
49512- * hugepages, or at least could be), and handles allocation and deallocation
49513- * requests.
49514- */
49515-
49516-/*
49517- * One more than the maximum pszind_t we will serve out of the HPA.
49518- * Practically, we expect only the first few to be actually used.  This
49519- * corresponds to a maximum size of of 512MB on systems with 4k pages and
49520- * SC_NGROUP == 4, which is already an unreasonably large maximum.  Morally, you
49521- * can think of this as being SC_NPSIZES, but there's no sense in wasting that
49522- * much space in the arena, making bitmaps that much larger, etc.
49523- */
49524-#define PSSET_NPSIZES 64
49525-
49526-/*
49527- * We keep two purge lists per page size class; one for hugified hpdatas (at
49528- * index 2*pszind), and one for the non-hugified hpdatas (at index 2*pszind +
49529- * 1).  This lets us implement a preference for purging non-hugified hpdatas
49530- * among similarly-dirty ones.
49531- * We reserve the last two indices for empty slabs, in that case purging
49532- * hugified ones (which are definitionally all waste) before non-hugified ones
49533- * (i.e. reversing the order).
49534- */
49535-#define PSSET_NPURGE_LISTS (2 * PSSET_NPSIZES)
49536-
49537-typedef struct psset_bin_stats_s psset_bin_stats_t;
49538-struct psset_bin_stats_s {
49539-	/* How many pageslabs are in this bin? */
49540-	size_t npageslabs;
49541-	/* Of them, how many pages are active? */
49542-	size_t nactive;
49543-	/* And how many are dirty? */
49544-	size_t ndirty;
49545-};
49546-
49547-typedef struct psset_stats_s psset_stats_t;
49548-struct psset_stats_s {
49549-	/*
49550-	 * The second index is huge stats; nonfull_slabs[pszind][0] contains
49551-	 * stats for the non-huge slabs in bucket pszind, while
49552-	 * nonfull_slabs[pszind][1] contains stats for the huge slabs.
49553-	 */
49554-	psset_bin_stats_t nonfull_slabs[PSSET_NPSIZES][2];
49555-
49556-	/*
49557-	 * Full slabs don't live in any edata heap, but we still track their
49558-	 * stats.
49559-	 */
49560-	psset_bin_stats_t full_slabs[2];
49561-
49562-	/* Empty slabs are similar. */
49563-	psset_bin_stats_t empty_slabs[2];
49564-};
49565-
49566-typedef struct psset_s psset_t;
49567-struct psset_s {
49568-	/*
49569-	 * The pageslabs, quantized by the size class of the largest contiguous
49570-	 * free run of pages in a pageslab.
49571-	 */
49572-	hpdata_age_heap_t pageslabs[PSSET_NPSIZES];
49573-	/* Bitmap for which set bits correspond to non-empty heaps. */
49574-	fb_group_t pageslab_bitmap[FB_NGROUPS(PSSET_NPSIZES)];
49575-	/*
49576-	 * The sum of all bin stats in stats.  This lets us quickly answer
49577-	 * queries for the number of dirty, active, and retained pages in the
49578-	 * entire set.
49579-	 */
49580-	psset_bin_stats_t merged_stats;
49581-	psset_stats_t stats;
49582-	/*
49583-	 * Slabs with no active allocations, but which are allowed to serve new
49584-	 * allocations.
49585-	 */
49586-	hpdata_empty_list_t empty;
49587-	/*
49588-	 * Slabs which are available to be purged, ordered by how much we want
49589-	 * to purge them (with later indices indicating slabs we want to purge
49590-	 * more).
49591-	 */
49592-	hpdata_purge_list_t to_purge[PSSET_NPURGE_LISTS];
49593-	/* Bitmap for which set bits correspond to non-empty purge lists. */
49594-	fb_group_t purge_bitmap[FB_NGROUPS(PSSET_NPURGE_LISTS)];
49595-	/* Slabs which are available to be hugified. */
49596-	hpdata_hugify_list_t to_hugify;
49597-};
49598-
49599-void psset_init(psset_t *psset);
49600-void psset_stats_accum(psset_stats_t *dst, psset_stats_t *src);
49601-
49602-/*
49603- * Begin or end updating the given pageslab's metadata.  While the pageslab is
49604- * being updated, it won't be returned from psset_fit calls.
49605- */
49606-void psset_update_begin(psset_t *psset, hpdata_t *ps);
49607-void psset_update_end(psset_t *psset, hpdata_t *ps);
49608-
49609-/* Analogous to the eset_fit; pick a hpdata to serve the request. */
49610-hpdata_t *psset_pick_alloc(psset_t *psset, size_t size);
49611-/* Pick one to purge. */
49612-hpdata_t *psset_pick_purge(psset_t *psset);
49613-/* Pick one to hugify. */
49614-hpdata_t *psset_pick_hugify(psset_t *psset);
49615-
49616-void psset_insert(psset_t *psset, hpdata_t *ps);
49617-void psset_remove(psset_t *psset, hpdata_t *ps);
49618-
49619-static inline size_t
49620-psset_npageslabs(psset_t *psset) {
49621-	return psset->merged_stats.npageslabs;
49622-}
49623-
49624-static inline size_t
49625-psset_nactive(psset_t *psset) {
49626-	return psset->merged_stats.nactive;
49627-}
49628-
49629-static inline size_t
49630-psset_ndirty(psset_t *psset) {
49631-	return psset->merged_stats.ndirty;
49632-}
49633-
49634-#endif /* JEMALLOC_INTERNAL_PSSET_H */
49635diff --git a/jemalloc/include/jemalloc/internal/public_namespace.sh b/jemalloc/include/jemalloc/internal/public_namespace.sh
49636deleted file mode 100755
49637index 4d415ba..0000000
49638--- a/jemalloc/include/jemalloc/internal/public_namespace.sh
49639+++ /dev/null
49640@@ -1,6 +0,0 @@
49641-#!/bin/sh
49642-
49643-for nm in `cat $1` ; do
49644-  n=`echo ${nm} |tr ':' ' ' |awk '{print $1}'`
49645-  echo "#define je_${n} JEMALLOC_N(${n})"
49646-done
49647diff --git a/jemalloc/include/jemalloc/internal/public_unnamespace.sh b/jemalloc/include/jemalloc/internal/public_unnamespace.sh
49648deleted file mode 100755
49649index 4239d17..0000000
49650--- a/jemalloc/include/jemalloc/internal/public_unnamespace.sh
49651+++ /dev/null
49652@@ -1,6 +0,0 @@
49653-#!/bin/sh
49654-
49655-for nm in `cat $1` ; do
49656-  n=`echo ${nm} |tr ':' ' ' |awk '{print $1}'`
49657-  echo "#undef je_${n}"
49658-done
49659diff --git a/jemalloc/include/jemalloc/internal/ql.h b/jemalloc/include/jemalloc/internal/ql.h
49660deleted file mode 100644
49661index c7f52f8..0000000
49662--- a/jemalloc/include/jemalloc/internal/ql.h
49663+++ /dev/null
49664@@ -1,197 +0,0 @@
49665-#ifndef JEMALLOC_INTERNAL_QL_H
49666-#define JEMALLOC_INTERNAL_QL_H
49667-
49668-#include "jemalloc/internal/qr.h"
49669-
49670-/*
49671- * A linked-list implementation.
49672- *
49673- * This is built on top of the ring implementation, but that can be viewed as an
49674- * implementation detail (i.e. trying to advance past the tail of the list
49675- * doesn't wrap around).
49676- *
49677- * You define a struct like so:
49678- * typedef strucy my_s my_t;
49679- * struct my_s {
49680- *   int data;
49681- *   ql_elm(my_t) my_link;
49682- * };
49683- *
49684- * // We wobble between "list" and "head" for this type; we're now mostly
49685- * // heading towards "list".
49686- * typedef ql_head(my_t) my_list_t;
49687- *
49688- * You then pass a my_list_t * for a_head arguments, a my_t * for a_elm
49689- * arguments, the token "my_link" for a_field arguments, and the token "my_t"
49690- * for a_type arguments.
49691- */
49692-
49693-/* List definitions. */
49694-#define ql_head(a_type)							\
49695-struct {								\
49696-	a_type *qlh_first;						\
49697-}
49698-
49699-/* Static initializer for an empty list. */
49700-#define ql_head_initializer(a_head) {NULL}
49701-
49702-/* The field definition. */
49703-#define ql_elm(a_type)	qr(a_type)
49704-
49705-/* A pointer to the first element in the list, or NULL if the list is empty. */
49706-#define ql_first(a_head) ((a_head)->qlh_first)
49707-
49708-/* Dynamically initializes a list. */
49709-#define ql_new(a_head) do {						\
49710-	ql_first(a_head) = NULL;					\
49711-} while (0)
49712-
49713-/*
49714- * Sets dest to be the contents of src (overwriting any elements there), leaving
49715- * src empty.
49716- */
49717-#define ql_move(a_head_dest, a_head_src) do {				\
49718-	ql_first(a_head_dest) = ql_first(a_head_src);			\
49719-	ql_new(a_head_src);						\
49720-} while (0)
49721-
49722-/* True if the list is empty, otherwise false. */
49723-#define ql_empty(a_head) (ql_first(a_head) == NULL)
49724-
49725-/*
49726- * Initializes a ql_elm.  Must be called even if the field is about to be
49727- * overwritten.
49728- */
49729-#define ql_elm_new(a_elm, a_field) qr_new((a_elm), a_field)
49730-
49731-/*
49732- * Obtains the last item in the list.
49733- */
49734-#define ql_last(a_head, a_field)					\
49735-	(ql_empty(a_head) ? NULL : qr_prev(ql_first(a_head), a_field))
49736-
49737-/*
49738- * Gets a pointer to the next/prev element in the list.  Trying to advance past
49739- * the end or retreat before the beginning of the list returns NULL.
49740- */
49741-#define ql_next(a_head, a_elm, a_field)					\
49742-	((ql_last(a_head, a_field) != (a_elm))				\
49743-	    ? qr_next((a_elm), a_field)	: NULL)
49744-#define ql_prev(a_head, a_elm, a_field)					\
49745-	((ql_first(a_head) != (a_elm)) ? qr_prev((a_elm), a_field)	\
49746-				       : NULL)
49747-
49748-/* Inserts a_elm before a_qlelm in the list. */
49749-#define ql_before_insert(a_head, a_qlelm, a_elm, a_field) do {		\
49750-	qr_before_insert((a_qlelm), (a_elm), a_field);			\
49751-	if (ql_first(a_head) == (a_qlelm)) {				\
49752-		ql_first(a_head) = (a_elm);				\
49753-	}								\
49754-} while (0)
49755-
49756-/* Inserts a_elm after a_qlelm in the list. */
49757-#define ql_after_insert(a_qlelm, a_elm, a_field)			\
49758-	qr_after_insert((a_qlelm), (a_elm), a_field)
49759-
49760-/* Inserts a_elm as the first item in the list. */
49761-#define ql_head_insert(a_head, a_elm, a_field) do {			\
49762-	if (!ql_empty(a_head)) {					\
49763-		qr_before_insert(ql_first(a_head), (a_elm), a_field);	\
49764-	}								\
49765-	ql_first(a_head) = (a_elm);					\
49766-} while (0)
49767-
49768-/* Inserts a_elm as the last item in the list. */
49769-#define ql_tail_insert(a_head, a_elm, a_field) do {			\
49770-	if (!ql_empty(a_head)) {					\
49771-		qr_before_insert(ql_first(a_head), (a_elm), a_field);	\
49772-	}								\
49773-	ql_first(a_head) = qr_next((a_elm), a_field);			\
49774-} while (0)
49775-
49776-/*
49777- * Given lists a = [a_1, ..., a_n] and [b_1, ..., b_n], results in:
49778- * a = [a1, ..., a_n, b_1, ..., b_n] and b = [].
49779- */
49780-#define ql_concat(a_head_a, a_head_b, a_field) do {			\
49781-	if (ql_empty(a_head_a)) {					\
49782-		ql_move(a_head_a, a_head_b);				\
49783-	} else if (!ql_empty(a_head_b)) {				\
49784-		qr_meld(ql_first(a_head_a), ql_first(a_head_b),		\
49785-		    a_field);						\
49786-		ql_new(a_head_b);					\
49787-	}								\
49788-} while (0)
49789-
49790-/* Removes a_elm from the list. */
49791-#define ql_remove(a_head, a_elm, a_field) do {				\
49792-	if (ql_first(a_head) == (a_elm)) {				\
49793-		ql_first(a_head) = qr_next(ql_first(a_head), a_field);	\
49794-	}								\
49795-	if (ql_first(a_head) != (a_elm)) {				\
49796-		qr_remove((a_elm), a_field);				\
49797-	} else {							\
49798-		ql_new(a_head);						\
49799-	}								\
49800-} while (0)
49801-
49802-/* Removes the first item in the list. */
49803-#define ql_head_remove(a_head, a_type, a_field) do {			\
49804-	a_type *t = ql_first(a_head);					\
49805-	ql_remove((a_head), t, a_field);				\
49806-} while (0)
49807-
49808-/* Removes the last item in the list. */
49809-#define ql_tail_remove(a_head, a_type, a_field) do {			\
49810-	a_type *t = ql_last(a_head, a_field);				\
49811-	ql_remove((a_head), t, a_field);				\
49812-} while (0)
49813-
49814-/*
49815- * Given a = [a_1, a_2, ..., a_n-1, a_n, a_n+1, ...],
49816- * ql_split(a, a_n, b, some_field) results in
49817- *   a = [a_1, a_2, ..., a_n-1]
49818- * and replaces b's contents with:
49819- *   b = [a_n, a_n+1, ...]
49820- */
49821-#define ql_split(a_head_a, a_elm, a_head_b, a_field) do {		\
49822-	if (ql_first(a_head_a) == (a_elm)) {				\
49823-		ql_move(a_head_b, a_head_a);				\
49824-	} else {							\
49825-		qr_split(ql_first(a_head_a), (a_elm), a_field);		\
49826-		ql_first(a_head_b) = (a_elm);				\
49827-	}								\
49828-} while (0)
49829-
49830-/*
49831- * An optimized version of:
49832- *	a_type *t = ql_first(a_head);
49833- *	ql_remove((a_head), t, a_field);
49834- *	ql_tail_insert((a_head), t, a_field);
49835- */
49836-#define ql_rotate(a_head, a_field) do {					\
49837-	ql_first(a_head) = qr_next(ql_first(a_head), a_field);		\
49838-} while (0)
49839-
49840-/*
49841- * Helper macro to iterate over each element in a list in order, starting from
49842- * the head (or in reverse order, starting from the tail).  The usage is
49843- * (assuming my_t and my_list_t defined as above).
49844- *
49845- * int sum(my_list_t *list) {
49846- *   int sum = 0;
49847- *   my_t *iter;
49848- *   ql_foreach(iter, list, link) {
49849- *     sum += iter->data;
49850- *   }
49851- *   return sum;
49852- * }
49853- */
49854-
49855-#define ql_foreach(a_var, a_head, a_field)				\
49856-	qr_foreach((a_var), ql_first(a_head), a_field)
49857-
49858-#define ql_reverse_foreach(a_var, a_head, a_field)			\
49859-	qr_reverse_foreach((a_var), ql_first(a_head), a_field)
49860-
49861-#endif /* JEMALLOC_INTERNAL_QL_H */
49862diff --git a/jemalloc/include/jemalloc/internal/qr.h b/jemalloc/include/jemalloc/internal/qr.h
49863deleted file mode 100644
49864index ece4f55..0000000
49865--- a/jemalloc/include/jemalloc/internal/qr.h
49866+++ /dev/null
49867@@ -1,140 +0,0 @@
49868-#ifndef JEMALLOC_INTERNAL_QR_H
49869-#define JEMALLOC_INTERNAL_QR_H
49870-
49871-/*
49872- * A ring implementation based on an embedded circular doubly-linked list.
49873- *
49874- * You define your struct like so:
49875- *
49876- * typedef struct my_s my_t;
49877- * struct my_s {
49878- *   int data;
49879- *   qr(my_t) my_link;
49880- * };
49881- *
49882- * And then pass a my_t * into macros for a_qr arguments, and the token
49883- * "my_link" into a_field fields.
49884- */
49885-
49886-/* Ring definitions. */
49887-#define qr(a_type)							\
49888-struct {								\
49889-	a_type	*qre_next;						\
49890-	a_type	*qre_prev;						\
49891-}
49892-
49893-/*
49894- * Initialize a qr link.  Every link must be initialized before being used, even
49895- * if that initialization is going to be immediately overwritten (say, by being
49896- * passed into an insertion macro).
49897- */
49898-#define qr_new(a_qr, a_field) do {					\
49899-	(a_qr)->a_field.qre_next = (a_qr);				\
49900-	(a_qr)->a_field.qre_prev = (a_qr);				\
49901-} while (0)
49902-
49903-/*
49904- * Go forwards or backwards in the ring.  Note that (the ring being circular), this
49905- * always succeeds -- you just keep looping around and around the ring if you
49906- * chase pointers without end.
49907- */
49908-#define qr_next(a_qr, a_field) ((a_qr)->a_field.qre_next)
49909-#define qr_prev(a_qr, a_field) ((a_qr)->a_field.qre_prev)
49910-
49911-/*
49912- * Given two rings:
49913- *    a -> a_1 -> ... -> a_n --
49914- *    ^                       |
49915- *    |------------------------
49916- *
49917- *    b -> b_1 -> ... -> b_n --
49918- *    ^                       |
49919- *    |------------------------
49920- *
49921- * Results in the ring:
49922- *   a -> a_1 -> ... -> a_n -> b -> b_1 -> ... -> b_n --
49923- *   ^                                                 |
49924- *   |-------------------------------------------------|
49925- *
49926- * a_qr_a can directly be a qr_next() macro, but a_qr_b cannot.
49927- */
49928-#define qr_meld(a_qr_a, a_qr_b, a_field) do {				\
49929-	(a_qr_b)->a_field.qre_prev->a_field.qre_next =			\
49930-	    (a_qr_a)->a_field.qre_prev;					\
49931-	(a_qr_a)->a_field.qre_prev = (a_qr_b)->a_field.qre_prev;	\
49932-	(a_qr_b)->a_field.qre_prev =					\
49933-	    (a_qr_b)->a_field.qre_prev->a_field.qre_next;		\
49934-	(a_qr_a)->a_field.qre_prev->a_field.qre_next = (a_qr_a);	\
49935-	(a_qr_b)->a_field.qre_prev->a_field.qre_next = (a_qr_b);	\
49936-} while (0)
49937-
49938-/*
49939- * Logically, this is just a meld.  The intent, though, is that a_qrelm is a
49940- * single-element ring, so that "before" has a more obvious interpretation than
49941- * meld.
49942- */
49943-#define qr_before_insert(a_qrelm, a_qr, a_field)			\
49944-	qr_meld((a_qrelm), (a_qr), a_field)
49945-
49946-/* Ditto, but inserting after rather than before. */
49947-#define qr_after_insert(a_qrelm, a_qr, a_field)				\
49948-	qr_before_insert(qr_next(a_qrelm, a_field), (a_qr), a_field)
49949-
49950-/*
49951- * Inverts meld; given the ring:
49952- *   a -> a_1 -> ... -> a_n -> b -> b_1 -> ... -> b_n --
49953- *   ^                                                 |
49954- *   |-------------------------------------------------|
49955- *
49956- * Results in two rings:
49957- *    a -> a_1 -> ... -> a_n --
49958- *    ^                       |
49959- *    |------------------------
49960- *
49961- *    b -> b_1 -> ... -> b_n --
49962- *    ^                       |
49963- *    |------------------------
49964- *
49965- * qr_meld() and qr_split() are functionally equivalent, so there's no need to
49966- * have two copies of the code.
49967- */
49968-#define qr_split(a_qr_a, a_qr_b, a_field)				\
49969-	qr_meld((a_qr_a), (a_qr_b), a_field)
49970-
49971-/*
49972- * Splits off a_qr from the rest of its ring, so that it becomes a
49973- * single-element ring.
49974- */
49975-#define qr_remove(a_qr, a_field)					\
49976-	qr_split(qr_next(a_qr, a_field), (a_qr), a_field)
49977-
49978-/*
49979- * Helper macro to iterate over each element in a ring exactly once, starting
49980- * with a_qr.  The usage is (assuming my_t defined as above):
49981- *
49982- * int sum(my_t *item) {
49983- *   int sum = 0;
49984- *   my_t *iter;
49985- *   qr_foreach(iter, item, link) {
49986- *     sum += iter->data;
49987- *   }
49988- *   return sum;
49989- * }
49990- */
49991-#define qr_foreach(var, a_qr, a_field)					\
49992-	for ((var) = (a_qr);						\
49993-	    (var) != NULL;						\
49994-	    (var) = (((var)->a_field.qre_next != (a_qr))		\
49995-	    ? (var)->a_field.qre_next : NULL))
49996-
49997-/*
49998- * The same (and with the same usage) as qr_foreach, but in the opposite order,
49999- * ending with a_qr.
50000- */
50001-#define qr_reverse_foreach(var, a_qr, a_field)				\
50002-	for ((var) = ((a_qr) != NULL) ? qr_prev(a_qr, a_field) : NULL;	\
50003-	    (var) != NULL;						\
50004-	    (var) = (((var) != (a_qr))					\
50005-	    ? (var)->a_field.qre_prev : NULL))
50006-
50007-#endif /* JEMALLOC_INTERNAL_QR_H */
50008diff --git a/jemalloc/include/jemalloc/internal/quantum.h b/jemalloc/include/jemalloc/internal/quantum.h
50009deleted file mode 100644
50010index c22d753..0000000
50011--- a/jemalloc/include/jemalloc/internal/quantum.h
50012+++ /dev/null
50013@@ -1,87 +0,0 @@
50014-#ifndef JEMALLOC_INTERNAL_QUANTUM_H
50015-#define JEMALLOC_INTERNAL_QUANTUM_H
50016-
50017-/*
50018- * Minimum allocation alignment is 2^LG_QUANTUM bytes (ignoring tiny size
50019- * classes).
50020- */
50021-#ifndef LG_QUANTUM
50022-#  if (defined(__i386__) || defined(_M_IX86))
50023-#    define LG_QUANTUM		4
50024-#  endif
50025-#  ifdef __ia64__
50026-#    define LG_QUANTUM		4
50027-#  endif
50028-#  ifdef __alpha__
50029-#    define LG_QUANTUM		4
50030-#  endif
50031-#  if (defined(__sparc64__) || defined(__sparcv9) || defined(__sparc_v9__))
50032-#    define LG_QUANTUM		4
50033-#  endif
50034-#  if (defined(__amd64__) || defined(__x86_64__) || defined(_M_X64))
50035-#    define LG_QUANTUM		4
50036-#  endif
50037-#  ifdef __arm__
50038-#    define LG_QUANTUM		3
50039-#  endif
50040-#  ifdef __aarch64__
50041-#    define LG_QUANTUM		4
50042-#  endif
50043-#  ifdef __hppa__
50044-#    define LG_QUANTUM		4
50045-#  endif
50046-#  ifdef __loongarch__
50047-#    define LG_QUANTUM		4
50048-#  endif
50049-#  ifdef __m68k__
50050-#    define LG_QUANTUM		3
50051-#  endif
50052-#  ifdef __mips__
50053-#    if defined(__mips_n32) || defined(__mips_n64)
50054-#      define LG_QUANTUM		4
50055-#    else
50056-#      define LG_QUANTUM		3
50057-#    endif
50058-#  endif
50059-#  ifdef __nios2__
50060-#    define LG_QUANTUM		3
50061-#  endif
50062-#  ifdef __or1k__
50063-#    define LG_QUANTUM		3
50064-#  endif
50065-#  ifdef __powerpc__
50066-#    define LG_QUANTUM		4
50067-#  endif
50068-#  if defined(__riscv) || defined(__riscv__)
50069-#    define LG_QUANTUM		4
50070-#  endif
50071-#  ifdef __s390__
50072-#    define LG_QUANTUM		4
50073-#  endif
50074-#  if (defined (__SH3E__) || defined(__SH4_SINGLE__) || defined(__SH4__) || \
50075-	defined(__SH4_SINGLE_ONLY__))
50076-#    define LG_QUANTUM		4
50077-#  endif
50078-#  ifdef __tile__
50079-#    define LG_QUANTUM		4
50080-#  endif
50081-#  ifdef __le32__
50082-#    define LG_QUANTUM		4
50083-#  endif
50084-#  ifdef __arc__
50085-#    define LG_QUANTUM		3
50086-#  endif
50087-#  ifndef LG_QUANTUM
50088-#    error "Unknown minimum alignment for architecture; specify via "
50089-	 "--with-lg-quantum"
50090-#  endif
50091-#endif
50092-
50093-#define QUANTUM			((size_t)(1U << LG_QUANTUM))
50094-#define QUANTUM_MASK		(QUANTUM - 1)
50095-
50096-/* Return the smallest quantum multiple that is >= a. */
50097-#define QUANTUM_CEILING(a)						\
50098-	(((a) + QUANTUM_MASK) & ~QUANTUM_MASK)
50099-
50100-#endif /* JEMALLOC_INTERNAL_QUANTUM_H */
50101diff --git a/jemalloc/include/jemalloc/internal/rb.h b/jemalloc/include/jemalloc/internal/rb.h
50102deleted file mode 100644
50103index a9a51cb..0000000
50104--- a/jemalloc/include/jemalloc/internal/rb.h
50105+++ /dev/null
50106@@ -1,1856 +0,0 @@
50107-#ifndef JEMALLOC_INTERNAL_RB_H
50108-#define JEMALLOC_INTERNAL_RB_H
50109-
50110-/*-
50111- *******************************************************************************
50112- *
50113- * cpp macro implementation of left-leaning 2-3 red-black trees.  Parent
50114- * pointers are not used, and color bits are stored in the least significant
50115- * bit of right-child pointers (if RB_COMPACT is defined), thus making node
50116- * linkage as compact as is possible for red-black trees.
50117- *
50118- * Usage:
50119- *
50120- *   #include <stdint.h>
50121- *   #include <stdbool.h>
50122- *   #define NDEBUG // (Optional, see assert(3).)
50123- *   #include <assert.h>
50124- *   #define RB_COMPACT // (Optional, embed color bits in right-child pointers.)
50125- *   #include <rb.h>
50126- *   ...
50127- *
50128- *******************************************************************************
50129- */
50130-
50131-#ifndef __PGI
50132-#define RB_COMPACT
50133-#endif
50134-
50135-/*
50136- * Each node in the RB tree consumes at least 1 byte of space (for the linkage
50137- * if nothing else, so there are a maximum of sizeof(void *) << 3 rb tree nodes
50138- * in any process (and thus, at most sizeof(void *) << 3 nodes in any rb tree).
50139- * The choice of algorithm bounds the depth of a tree to twice the binary log of
50140- * the number of elements in the tree; the following bound follows.
50141- */
50142-#define RB_MAX_DEPTH (sizeof(void *) << 4)
50143-
50144-#ifdef RB_COMPACT
50145-/* Node structure. */
50146-#define rb_node(a_type)							\
50147-struct {								\
50148-    a_type *rbn_left;							\
50149-    a_type *rbn_right_red;						\
50150-}
50151-#else
50152-#define rb_node(a_type)							\
50153-struct {								\
50154-    a_type *rbn_left;							\
50155-    a_type *rbn_right;							\
50156-    bool rbn_red;							\
50157-}
50158-#endif
50159-
50160-/* Root structure. */
50161-#define rb_tree(a_type)							\
50162-struct {								\
50163-    a_type *rbt_root;							\
50164-}
50165-
50166-/* Left accessors. */
50167-#define rbtn_left_get(a_type, a_field, a_node)				\
50168-    ((a_node)->a_field.rbn_left)
50169-#define rbtn_left_set(a_type, a_field, a_node, a_left) do {		\
50170-    (a_node)->a_field.rbn_left = a_left;				\
50171-} while (0)
50172-
50173-#ifdef RB_COMPACT
50174-/* Right accessors. */
50175-#define rbtn_right_get(a_type, a_field, a_node)				\
50176-    ((a_type *) (((intptr_t) (a_node)->a_field.rbn_right_red)		\
50177-      & ((ssize_t)-2)))
50178-#define rbtn_right_set(a_type, a_field, a_node, a_right) do {		\
50179-    (a_node)->a_field.rbn_right_red = (a_type *) (((uintptr_t) a_right)	\
50180-      | (((uintptr_t) (a_node)->a_field.rbn_right_red) & ((size_t)1)));	\
50181-} while (0)
50182-
50183-/* Color accessors. */
50184-#define rbtn_red_get(a_type, a_field, a_node)				\
50185-    ((bool) (((uintptr_t) (a_node)->a_field.rbn_right_red)		\
50186-      & ((size_t)1)))
50187-#define rbtn_color_set(a_type, a_field, a_node, a_red) do {		\
50188-    (a_node)->a_field.rbn_right_red = (a_type *) ((((intptr_t)		\
50189-      (a_node)->a_field.rbn_right_red) & ((ssize_t)-2))			\
50190-      | ((ssize_t)a_red));						\
50191-} while (0)
50192-#define rbtn_red_set(a_type, a_field, a_node) do {			\
50193-    (a_node)->a_field.rbn_right_red = (a_type *) (((uintptr_t)		\
50194-      (a_node)->a_field.rbn_right_red) | ((size_t)1));			\
50195-} while (0)
50196-#define rbtn_black_set(a_type, a_field, a_node) do {			\
50197-    (a_node)->a_field.rbn_right_red = (a_type *) (((intptr_t)		\
50198-      (a_node)->a_field.rbn_right_red) & ((ssize_t)-2));		\
50199-} while (0)
50200-
50201-/* Node initializer. */
50202-#define rbt_node_new(a_type, a_field, a_rbt, a_node) do {		\
50203-    /* Bookkeeping bit cannot be used by node pointer. */		\
50204-    assert(((uintptr_t)(a_node) & 0x1) == 0);				\
50205-    rbtn_left_set(a_type, a_field, (a_node), NULL);	\
50206-    rbtn_right_set(a_type, a_field, (a_node), NULL);	\
50207-    rbtn_red_set(a_type, a_field, (a_node));				\
50208-} while (0)
50209-#else
50210-/* Right accessors. */
50211-#define rbtn_right_get(a_type, a_field, a_node)				\
50212-    ((a_node)->a_field.rbn_right)
50213-#define rbtn_right_set(a_type, a_field, a_node, a_right) do {		\
50214-    (a_node)->a_field.rbn_right = a_right;				\
50215-} while (0)
50216-
50217-/* Color accessors. */
50218-#define rbtn_red_get(a_type, a_field, a_node)				\
50219-    ((a_node)->a_field.rbn_red)
50220-#define rbtn_color_set(a_type, a_field, a_node, a_red) do {		\
50221-    (a_node)->a_field.rbn_red = (a_red);				\
50222-} while (0)
50223-#define rbtn_red_set(a_type, a_field, a_node) do {			\
50224-    (a_node)->a_field.rbn_red = true;					\
50225-} while (0)
50226-#define rbtn_black_set(a_type, a_field, a_node) do {			\
50227-    (a_node)->a_field.rbn_red = false;					\
50228-} while (0)
50229-
50230-/* Node initializer. */
50231-#define rbt_node_new(a_type, a_field, a_rbt, a_node) do {		\
50232-    rbtn_left_set(a_type, a_field, (a_node), NULL);	\
50233-    rbtn_right_set(a_type, a_field, (a_node), NULL);	\
50234-    rbtn_red_set(a_type, a_field, (a_node));				\
50235-} while (0)
50236-#endif
50237-
50238-/* Tree initializer. */
50239-#define rb_new(a_type, a_field, a_rbt) do {				\
50240-    (a_rbt)->rbt_root = NULL;						\
50241-} while (0)
50242-
50243-/* Internal utility macros. */
50244-#define rbtn_first(a_type, a_field, a_rbt, a_root, r_node) do {		\
50245-    (r_node) = (a_root);						\
50246-    if ((r_node) != NULL) {						\
50247-	for (;								\
50248-	  rbtn_left_get(a_type, a_field, (r_node)) != NULL;		\
50249-	  (r_node) = rbtn_left_get(a_type, a_field, (r_node))) {	\
50250-	}								\
50251-    }									\
50252-} while (0)
50253-
50254-#define rbtn_last(a_type, a_field, a_rbt, a_root, r_node) do {		\
50255-    (r_node) = (a_root);						\
50256-    if ((r_node) != NULL) {						\
50257-	for (; rbtn_right_get(a_type, a_field, (r_node)) != NULL;	\
50258-	  (r_node) = rbtn_right_get(a_type, a_field, (r_node))) {	\
50259-	}								\
50260-    }									\
50261-} while (0)
50262-
50263-#define rbtn_rotate_left(a_type, a_field, a_node, r_node) do {		\
50264-    (r_node) = rbtn_right_get(a_type, a_field, (a_node));		\
50265-    rbtn_right_set(a_type, a_field, (a_node),				\
50266-      rbtn_left_get(a_type, a_field, (r_node)));			\
50267-    rbtn_left_set(a_type, a_field, (r_node), (a_node));			\
50268-} while (0)
50269-
50270-#define rbtn_rotate_right(a_type, a_field, a_node, r_node) do {		\
50271-    (r_node) = rbtn_left_get(a_type, a_field, (a_node));		\
50272-    rbtn_left_set(a_type, a_field, (a_node),				\
50273-      rbtn_right_get(a_type, a_field, (r_node)));			\
50274-    rbtn_right_set(a_type, a_field, (r_node), (a_node));		\
50275-} while (0)
50276-
50277-#define rb_summarized_only_false(...)
50278-#define rb_summarized_only_true(...) __VA_ARGS__
50279-#define rb_empty_summarize(a_node, a_lchild, a_rchild) false
50280-
50281-/*
50282- * The rb_proto() and rb_summarized_proto() macros generate function prototypes
50283- * that correspond to the functions generated by an equivalently parameterized
50284- * call to rb_gen() or rb_summarized_gen(), respectively.
50285- */
50286-
50287-#define rb_proto(a_attr, a_prefix, a_rbt_type, a_type)			\
50288-    rb_proto_impl(a_attr, a_prefix, a_rbt_type, a_type, false)
50289-#define rb_summarized_proto(a_attr, a_prefix, a_rbt_type, a_type)	\
50290-    rb_proto_impl(a_attr, a_prefix, a_rbt_type, a_type, true)
50291-#define rb_proto_impl(a_attr, a_prefix, a_rbt_type, a_type,		\
50292-    a_is_summarized)							\
50293-a_attr void								\
50294-a_prefix##new(a_rbt_type *rbtree);					\
50295-a_attr bool								\
50296-a_prefix##empty(a_rbt_type *rbtree);					\
50297-a_attr a_type *								\
50298-a_prefix##first(a_rbt_type *rbtree);					\
50299-a_attr a_type *								\
50300-a_prefix##last(a_rbt_type *rbtree);					\
50301-a_attr a_type *								\
50302-a_prefix##next(a_rbt_type *rbtree, a_type *node);			\
50303-a_attr a_type *								\
50304-a_prefix##prev(a_rbt_type *rbtree, a_type *node);			\
50305-a_attr a_type *								\
50306-a_prefix##search(a_rbt_type *rbtree, const a_type *key);		\
50307-a_attr a_type *								\
50308-a_prefix##nsearch(a_rbt_type *rbtree, const a_type *key);		\
50309-a_attr a_type *								\
50310-a_prefix##psearch(a_rbt_type *rbtree, const a_type *key);		\
50311-a_attr void								\
50312-a_prefix##insert(a_rbt_type *rbtree, a_type *node);			\
50313-a_attr void								\
50314-a_prefix##remove(a_rbt_type *rbtree, a_type *node);			\
50315-a_attr a_type *								\
50316-a_prefix##iter(a_rbt_type *rbtree, a_type *start, a_type *(*cb)(	\
50317-  a_rbt_type *, a_type *, void *), void *arg);				\
50318-a_attr a_type *								\
50319-a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start,		\
50320-  a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg);		\
50321-a_attr void								\
50322-a_prefix##destroy(a_rbt_type *rbtree, void (*cb)(a_type *, void *),	\
50323-  void *arg);								\
50324-/* Extended API */							\
50325-rb_summarized_only_##a_is_summarized(					\
50326-a_attr void								\
50327-a_prefix##update_summaries(a_rbt_type *rbtree, a_type *node);		\
50328-a_attr bool								\
50329-a_prefix##empty_filtered(a_rbt_type *rbtree,				\
50330-    bool (*filter_node)(void *, a_type *),				\
50331-    bool (*filter_subtree)(void *, a_type *),				\
50332-    void *filter_ctx);							\
50333-a_attr a_type *								\
50334-a_prefix##first_filtered(a_rbt_type *rbtree,				\
50335-    bool (*filter_node)(void *, a_type *),				\
50336-    bool (*filter_subtree)(void *, a_type *),				\
50337-    void *filter_ctx);							\
50338-a_attr a_type *								\
50339-a_prefix##last_filtered(a_rbt_type *rbtree,				\
50340-    bool (*filter_node)(void *, a_type *),				\
50341-    bool (*filter_subtree)(void *, a_type *),				\
50342-    void *filter_ctx);							\
50343-a_attr a_type *								\
50344-a_prefix##next_filtered(a_rbt_type *rbtree, a_type *node,		\
50345-    bool (*filter_node)(void *, a_type *),				\
50346-    bool (*filter_subtree)(void *, a_type *),				\
50347-    void *filter_ctx);							\
50348-a_attr a_type *								\
50349-a_prefix##prev_filtered(a_rbt_type *rbtree, a_type *node,		\
50350-    bool (*filter_node)(void *, a_type *),				\
50351-    bool (*filter_subtree)(void *, a_type *),				\
50352-    void *filter_ctx);							\
50353-a_attr a_type *								\
50354-a_prefix##search_filtered(a_rbt_type *rbtree, const a_type *key,	\
50355-    bool (*filter_node)(void *, a_type *),				\
50356-    bool (*filter_subtree)(void *, a_type *),				\
50357-    void *filter_ctx);							\
50358-a_attr a_type *								\
50359-a_prefix##nsearch_filtered(a_rbt_type *rbtree, const a_type *key,	\
50360-    bool (*filter_node)(void *, a_type *),				\
50361-    bool (*filter_subtree)(void *, a_type *),				\
50362-    void *filter_ctx);							\
50363-a_attr a_type *								\
50364-a_prefix##psearch_filtered(a_rbt_type *rbtree, const a_type *key,	\
50365-    bool (*filter_node)(void *, a_type *),				\
50366-    bool (*filter_subtree)(void *, a_type *),				\
50367-    void *filter_ctx);							\
50368-a_attr a_type *								\
50369-a_prefix##iter_filtered(a_rbt_type *rbtree, a_type *start,		\
50370-    a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg,		\
50371-    bool (*filter_node)(void *, a_type *),				\
50372-    bool (*filter_subtree)(void *, a_type *),				\
50373-    void *filter_ctx);							\
50374-a_attr a_type *								\
50375-a_prefix##reverse_iter_filtered(a_rbt_type *rbtree, a_type *start,	\
50376-  a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg,		\
50377-    bool (*filter_node)(void *, a_type *),				\
50378-    bool (*filter_subtree)(void *, a_type *),				\
50379-    void *filter_ctx);							\
50380-)
50381-
50382-/*
50383- * The rb_gen() macro generates a type-specific red-black tree implementation,
50384- * based on the above cpp macros.
50385- * Arguments:
50386- *
50387- *   a_attr:
50388- *     Function attribute for generated functions (ex: static).
50389- *   a_prefix:
50390- *     Prefix for generated functions (ex: ex_).
50391- *   a_rb_type:
50392- *     Type for red-black tree data structure (ex: ex_t).
50393- *   a_type:
50394- *     Type for red-black tree node data structure (ex: ex_node_t).
50395- *   a_field:
50396- *     Name of red-black tree node linkage (ex: ex_link).
50397- *   a_cmp:
50398- *     Node comparison function name, with the following prototype:
50399- *
50400- *     int a_cmp(a_type *a_node, a_type *a_other);
50401- *                        ^^^^^^
50402- *                        or a_key
50403- *     Interpretation of comparison function return values:
50404- *       -1 : a_node <  a_other
50405- *        0 : a_node == a_other
50406- *        1 : a_node >  a_other
50407- *     In all cases, the a_node or a_key macro argument is the first argument to
50408- *     the comparison function, which makes it possible to write comparison
50409- *     functions that treat the first argument specially.  a_cmp must be a total
50410- *     order on values inserted into the tree -- duplicates are not allowed.
50411- *
50412- * Assuming the following setup:
50413- *
50414- *   typedef struct ex_node_s ex_node_t;
50415- *   struct ex_node_s {
50416- *       rb_node(ex_node_t) ex_link;
50417- *   };
50418- *   typedef rb_tree(ex_node_t) ex_t;
50419- *   rb_gen(static, ex_, ex_t, ex_node_t, ex_link, ex_cmp)
50420- *
50421- * The following API is generated:
50422- *
50423- *   static void
50424- *   ex_new(ex_t *tree);
50425- *       Description: Initialize a red-black tree structure.
50426- *       Args:
50427- *         tree: Pointer to an uninitialized red-black tree object.
50428- *
50429- *   static bool
50430- *   ex_empty(ex_t *tree);
50431- *       Description: Determine whether tree is empty.
50432- *       Args:
50433- *         tree: Pointer to an initialized red-black tree object.
50434- *       Ret: True if tree is empty, false otherwise.
50435- *
50436- *   static ex_node_t *
50437- *   ex_first(ex_t *tree);
50438- *   static ex_node_t *
50439- *   ex_last(ex_t *tree);
50440- *       Description: Get the first/last node in tree.
50441- *       Args:
50442- *         tree: Pointer to an initialized red-black tree object.
50443- *       Ret: First/last node in tree, or NULL if tree is empty.
50444- *
50445- *   static ex_node_t *
50446- *   ex_next(ex_t *tree, ex_node_t *node);
50447- *   static ex_node_t *
50448- *   ex_prev(ex_t *tree, ex_node_t *node);
50449- *       Description: Get node's successor/predecessor.
50450- *       Args:
50451- *         tree: Pointer to an initialized red-black tree object.
50452- *         node: A node in tree.
50453- *       Ret: node's successor/predecessor in tree, or NULL if node is
50454- *            last/first.
50455- *
50456- *   static ex_node_t *
50457- *   ex_search(ex_t *tree, const ex_node_t *key);
50458- *       Description: Search for node that matches key.
50459- *       Args:
50460- *         tree: Pointer to an initialized red-black tree object.
50461- *         key : Search key.
50462- *       Ret: Node in tree that matches key, or NULL if no match.
50463- *
50464- *   static ex_node_t *
50465- *   ex_nsearch(ex_t *tree, const ex_node_t *key);
50466- *   static ex_node_t *
50467- *   ex_psearch(ex_t *tree, const ex_node_t *key);
50468- *       Description: Search for node that matches key.  If no match is found,
50469- *                    return what would be key's successor/predecessor, were
50470- *                    key in tree.
50471- *       Args:
50472- *         tree: Pointer to an initialized red-black tree object.
50473- *         key : Search key.
50474- *       Ret: Node in tree that matches key, or if no match, hypothetical node's
50475- *            successor/predecessor (NULL if no successor/predecessor).
50476- *
50477- *   static void
50478- *   ex_insert(ex_t *tree, ex_node_t *node);
50479- *       Description: Insert node into tree.
50480- *       Args:
50481- *         tree: Pointer to an initialized red-black tree object.
50482- *         node: Node to be inserted into tree.
50483- *
50484- *   static void
50485- *   ex_remove(ex_t *tree, ex_node_t *node);
50486- *       Description: Remove node from tree.
50487- *       Args:
50488- *         tree: Pointer to an initialized red-black tree object.
50489- *         node: Node in tree to be removed.
50490- *
50491- *   static ex_node_t *
50492- *   ex_iter(ex_t *tree, ex_node_t *start, ex_node_t *(*cb)(ex_t *,
50493- *     ex_node_t *, void *), void *arg);
50494- *   static ex_node_t *
50495- *   ex_reverse_iter(ex_t *tree, ex_node_t *start, ex_node *(*cb)(ex_t *,
50496- *     ex_node_t *, void *), void *arg);
50497- *       Description: Iterate forward/backward over tree, starting at node.  If
50498- *                    tree is modified, iteration must be immediately
50499- *                    terminated by the callback function that causes the
50500- *                    modification.
50501- *       Args:
50502- *         tree : Pointer to an initialized red-black tree object.
50503- *         start: Node at which to start iteration, or NULL to start at
50504- *                first/last node.
50505- *         cb   : Callback function, which is called for each node during
50506- *                iteration.  Under normal circumstances the callback function
50507- *                should return NULL, which causes iteration to continue.  If a
50508- *                callback function returns non-NULL, iteration is immediately
50509- *                terminated and the non-NULL return value is returned by the
50510- *                iterator.  This is useful for re-starting iteration after
50511- *                modifying tree.
50512- *         arg  : Opaque pointer passed to cb().
50513- *       Ret: NULL if iteration completed, or the non-NULL callback return value
50514- *            that caused termination of the iteration.
50515- *
50516- *   static void
50517- *   ex_destroy(ex_t *tree, void (*cb)(ex_node_t *, void *), void *arg);
50518- *       Description: Iterate over the tree with post-order traversal, remove
50519- *                    each node, and run the callback if non-null.  This is
50520- *                    used for destroying a tree without paying the cost to
50521- *                    rebalance it.  The tree must not be otherwise altered
50522- *                    during traversal.
50523- *       Args:
50524- *         tree: Pointer to an initialized red-black tree object.
50525- *         cb  : Callback function, which, if non-null, is called for each node
50526- *               during iteration.  There is no way to stop iteration once it
50527- *               has begun.
50528- *         arg : Opaque pointer passed to cb().
50529- *
50530- * The rb_summarized_gen() macro generates all the functions above, but has an
50531- * expanded interface.  In introduces the notion of summarizing subtrees, and of
50532- * filtering searches in the tree according to the information contained in
50533- * those summaries.
50534- * The extra macro argument is:
50535- *   a_summarize:
50536- *     Tree summarization function name, with the following prototype:
50537- *
50538- *     bool a_summarize(a_type *a_node, const a_type *a_left_child,
50539- *         const a_type *a_right_child);
50540- *
50541- *     This function should update a_node with the summary of the subtree rooted
50542- *     there, using the data contained in it and the summaries in a_left_child
50543- *     and a_right_child.  One or both of them may be NULL.  When the tree
50544- *     changes due to an insertion or removal, it updates the summaries of all
50545- *     nodes whose subtrees have changed (always updating the summaries of
50546- *     children before their parents).  If the user alters a node in the tree in
50547- *     a way that may change its summary, they can call the generated
50548- *     update_summaries function to bubble up the summary changes to the root.
50549- *     It should return true if the summary changed (or may have changed), and
50550- *     false if it didn't (which will allow the implementation to terminate
50551- *     "bubbling up" the summaries early).
50552- *     As the parameter names indicate, the children are ordered as they are in
50553- *     the tree, a_left_child, if it is not NULL, compares less than a_node,
50554- *     which in turn compares less than a_right_child (if a_right_child is not
50555- *     NULL).
50556- *
50557- * Using the same setup as above but replacing the macro with
50558- *   rb_summarized_gen(static, ex_, ex_t, ex_node_t, ex_link, ex_cmp,
50559- *       ex_summarize)
50560- *
50561- * Generates all the previous functions, but adds some more:
50562- *
50563- *   static void
50564- *   ex_update_summaries(ex_t *tree, ex_node_t *node);
50565- *       Description: Recompute all summaries of ancestors of node.
50566- *       Args:
50567- *         tree: Pointer to an initialized red-black tree object.
50568- *         node: The element of the tree whose summary may have changed.
50569- *
50570- * For each of ex_empty, ex_first, ex_last, ex_next, ex_prev, ex_search,
50571- * ex_nsearch, ex_psearch, ex_iter, and ex_reverse_iter, an additional function
50572- * is generated as well, with the suffix _filtered (e.g. ex_empty_filtered,
50573- * ex_first_filtered, etc.).  These use the concept of a "filter"; a binary
50574- * property some node either satisfies or does not satisfy.  Clever use of the
50575- * a_summary argument to rb_summarized_gen can allow efficient computation of
50576- * these predicates across whole subtrees of the tree.
50577- * The extended API functions accept three additional arguments after the
50578- * arguments to the corresponding non-extended equivalent.
50579- *
50580- * ex_fn(..., bool (*filter_node)(void *, ex_node_t *),
50581- *     bool (*filter_subtree)(void *, ex_node_t *), void *filter_ctx);
50582- *         filter_node    : Returns true if the node passes the filter.
50583- *         filter_subtree : Returns true if some node in the subtree rooted at
50584- *                          node passes the filter.
50585- *         filter_ctx     : A context argument passed to the filters.
50586- *
50587- * For a more concrete example of summarizing and filtering, suppose we're using
50588- * the red-black tree to track a set of integers:
50589- *
50590- * struct ex_node_s {
50591- *     rb_node(ex_node_t) ex_link;
50592- *     unsigned data;
50593- * };
50594- *
50595- * Suppose, for some application-specific reason, we want to be able to quickly
50596- * find numbers in the set which are divisible by large powers of 2 (say, for
50597- * aligned allocation purposes).  We augment the node with a summary field:
50598- *
50599- * struct ex_node_s {
50600- *     rb_node(ex_node_t) ex_link;
50601- *     unsigned data;
50602- *     unsigned max_subtree_ffs;
50603- * }
50604- *
50605- * and define our summarization function as follows:
50606- *
50607- * bool
50608- * ex_summarize(ex_node_t *node, const ex_node_t *lchild,
50609- *   const ex_node_t *rchild) {
50610- *     unsigned new_max_subtree_ffs = ffs(node->data);
50611- *     if (lchild != NULL && lchild->max_subtree_ffs > new_max_subtree_ffs) {
50612- *         new_max_subtree_ffs = lchild->max_subtree_ffs;
50613- *     }
50614- *     if (rchild != NULL && rchild->max_subtree_ffs > new_max_subtree_ffs) {
50615- *         new_max_subtree_ffs = rchild->max_subtree_ffs;
50616- *     }
50617- *     bool changed = (node->max_subtree_ffs != new_max_subtree_ffs)
50618- *     node->max_subtree_ffs = new_max_subtree_ffs;
50619- *     // This could be "return true" without any correctness or big-O
50620- *     // performance changes; but practically, precisely reporting summary
50621- *     // changes reduces the amount of work that has to be done when "bubbling
50622- *     // up" summary changes.
50623- *     return changed;
50624- * }
50625- *
50626- * We can now implement our filter functions as follows:
50627- * bool
50628- * ex_filter_node(void *filter_ctx, ex_node_t *node) {
50629- *     unsigned required_ffs = *(unsigned *)filter_ctx;
50630- *     return ffs(node->data) >= required_ffs;
50631- * }
50632- * bool
50633- * ex_filter_subtree(void *filter_ctx, ex_node_t *node) {
50634- *     unsigned required_ffs = *(unsigned *)filter_ctx;
50635- *     return node->max_subtree_ffs >= required_ffs;
50636- * }
50637- *
50638- * We can now easily search for, e.g., the smallest integer in the set that's
50639- * divisible by 128:
50640- * ex_node_t *
50641- * find_div_128(ex_tree_t *tree) {
50642- *     unsigned min_ffs = 7;
50643- *     return ex_first_filtered(tree, &ex_filter_node, &ex_filter_subtree,
50644- *         &min_ffs);
50645- * }
50646- *
50647- * We could with similar ease:
50648- * - Fnd the next multiple of 128 in the set that's larger than 12345 (with
50649- *   ex_nsearch_filtered)
50650- * - Iterate over just those multiples of 64 that are in the set (with
50651- *   ex_iter_filtered)
50652- * - Determine if the set contains any multiples of 1024 (with
50653- *   ex_empty_filtered).
50654- *
50655- * Some possibly subtle API notes:
50656- * - The node argument to ex_next_filtered and ex_prev_filtered need not pass
50657- *   the filter; it will find the next/prev node that passes the filter.
50658- * - ex_search_filtered will fail even for a node in the tree, if that node does
50659- *   not pass the filter.  ex_psearch_filtered and ex_nsearch_filtered behave
50660- *   similarly; they may return a node larger/smaller than the key, even if a
50661- *   node equivalent to the key is in the tree (but does not pass the filter).
50662- * - Similarly, if the start argument to a filtered iteration function does not
50663- *   pass the filter, the callback won't be invoked on it.
50664- *
50665- * These should make sense after a moment's reflection; each post-condition is
50666- * the same as with the unfiltered version, with the added constraint that the
50667- * returned node must pass the filter.
50668- */
50669-#define rb_gen(a_attr, a_prefix, a_rbt_type, a_type, a_field, a_cmp)	\
50670-    rb_gen_impl(a_attr, a_prefix, a_rbt_type, a_type, a_field, a_cmp,	\
50671-	rb_empty_summarize, false)
50672-#define rb_summarized_gen(a_attr, a_prefix, a_rbt_type, a_type,		\
50673-    a_field, a_cmp, a_summarize)					\
50674-    rb_gen_impl(a_attr, a_prefix, a_rbt_type, a_type, a_field, a_cmp,	\
50675-	a_summarize, true)
50676-
50677-#define rb_gen_impl(a_attr, a_prefix, a_rbt_type, a_type,		\
50678-    a_field, a_cmp, a_summarize, a_is_summarized)			\
50679-typedef struct {							\
50680-    a_type *node;							\
50681-    int cmp;								\
50682-} a_prefix##path_entry_t;						\
50683-static inline void							\
50684-a_prefix##summarize_range(a_prefix##path_entry_t *rfirst,		\
50685-    a_prefix##path_entry_t *rlast) {					\
50686-    while ((uintptr_t)rlast >= (uintptr_t)rfirst) {			\
50687-	a_type *node = rlast->node;					\
50688-	/* Avoid a warning when a_summarize is rb_empty_summarize. */	\
50689-	(void)node;							\
50690-	bool changed = a_summarize(node, rbtn_left_get(a_type, a_field,	\
50691-	    node), rbtn_right_get(a_type, a_field, node));		\
50692-	if (!changed) {							\
50693-		break;							\
50694-	}								\
50695-	rlast--;							\
50696-    }									\
50697-}									\
50698-/* On the remove pathways, we sometimes swap the node being removed   */\
50699-/* and its first successor; in such cases we need to do two range     */\
50700-/* updates; one from the node to its (former) swapped successor, the  */\
50701-/* next from that successor to the root (with either allowed to       */\
50702-/* bail out early if appropriate.                                     */\
50703-static inline void							\
50704-a_prefix##summarize_swapped_range(a_prefix##path_entry_t *rfirst,	\
50705-    a_prefix##path_entry_t *rlast, a_prefix##path_entry_t *swap_loc) {	\
50706-	if (swap_loc == NULL || rlast <= swap_loc) {			\
50707-		a_prefix##summarize_range(rfirst, rlast);		\
50708-	} else {							\
50709-		a_prefix##summarize_range(swap_loc + 1, rlast);		\
50710-		(void)a_summarize(swap_loc->node,			\
50711-		    rbtn_left_get(a_type, a_field, swap_loc->node),	\
50712-		    rbtn_right_get(a_type, a_field, swap_loc->node));	\
50713-		a_prefix##summarize_range(rfirst, swap_loc - 1);	\
50714-	}								\
50715-}									\
50716-a_attr void								\
50717-a_prefix##new(a_rbt_type *rbtree) {					\
50718-    rb_new(a_type, a_field, rbtree);					\
50719-}									\
50720-a_attr bool								\
50721-a_prefix##empty(a_rbt_type *rbtree) {					\
50722-    return (rbtree->rbt_root == NULL);					\
50723-}									\
50724-a_attr a_type *								\
50725-a_prefix##first(a_rbt_type *rbtree) {					\
50726-    a_type *ret;							\
50727-    rbtn_first(a_type, a_field, rbtree, rbtree->rbt_root, ret);		\
50728-    return ret;								\
50729-}									\
50730-a_attr a_type *								\
50731-a_prefix##last(a_rbt_type *rbtree) {					\
50732-    a_type *ret;							\
50733-    rbtn_last(a_type, a_field, rbtree, rbtree->rbt_root, ret);		\
50734-    return ret;								\
50735-}									\
50736-a_attr a_type *								\
50737-a_prefix##next(a_rbt_type *rbtree, a_type *node) {			\
50738-    a_type *ret;							\
50739-    if (rbtn_right_get(a_type, a_field, node) != NULL) {		\
50740-	rbtn_first(a_type, a_field, rbtree, rbtn_right_get(a_type,	\
50741-	  a_field, node), ret);						\
50742-    } else {								\
50743-	a_type *tnode = rbtree->rbt_root;				\
50744-	assert(tnode != NULL);						\
50745-	ret = NULL;							\
50746-	while (true) {							\
50747-	    int cmp = (a_cmp)(node, tnode);				\
50748-	    if (cmp < 0) {						\
50749-		ret = tnode;						\
50750-		tnode = rbtn_left_get(a_type, a_field, tnode);		\
50751-	    } else if (cmp > 0) {					\
50752-		tnode = rbtn_right_get(a_type, a_field, tnode);		\
50753-	    } else {							\
50754-		break;							\
50755-	    }								\
50756-	    assert(tnode != NULL);					\
50757-	}								\
50758-    }									\
50759-    return ret;								\
50760-}									\
50761-a_attr a_type *								\
50762-a_prefix##prev(a_rbt_type *rbtree, a_type *node) {			\
50763-    a_type *ret;							\
50764-    if (rbtn_left_get(a_type, a_field, node) != NULL) {			\
50765-	rbtn_last(a_type, a_field, rbtree, rbtn_left_get(a_type,	\
50766-	  a_field, node), ret);						\
50767-    } else {								\
50768-	a_type *tnode = rbtree->rbt_root;				\
50769-	assert(tnode != NULL);						\
50770-	ret = NULL;							\
50771-	while (true) {							\
50772-	    int cmp = (a_cmp)(node, tnode);				\
50773-	    if (cmp < 0) {						\
50774-		tnode = rbtn_left_get(a_type, a_field, tnode);		\
50775-	    } else if (cmp > 0) {					\
50776-		ret = tnode;						\
50777-		tnode = rbtn_right_get(a_type, a_field, tnode);		\
50778-	    } else {							\
50779-		break;							\
50780-	    }								\
50781-	    assert(tnode != NULL);					\
50782-	}								\
50783-    }									\
50784-    return ret;								\
50785-}									\
50786-a_attr a_type *								\
50787-a_prefix##search(a_rbt_type *rbtree, const a_type *key) {		\
50788-    a_type *ret;							\
50789-    int cmp;								\
50790-    ret = rbtree->rbt_root;						\
50791-    while (ret != NULL							\
50792-      && (cmp = (a_cmp)(key, ret)) != 0) {				\
50793-	if (cmp < 0) {							\
50794-	    ret = rbtn_left_get(a_type, a_field, ret);			\
50795-	} else {							\
50796-	    ret = rbtn_right_get(a_type, a_field, ret);			\
50797-	}								\
50798-    }									\
50799-    return ret;								\
50800-}									\
50801-a_attr a_type *								\
50802-a_prefix##nsearch(a_rbt_type *rbtree, const a_type *key) {		\
50803-    a_type *ret;							\
50804-    a_type *tnode = rbtree->rbt_root;					\
50805-    ret = NULL;								\
50806-    while (tnode != NULL) {						\
50807-	int cmp = (a_cmp)(key, tnode);					\
50808-	if (cmp < 0) {							\
50809-	    ret = tnode;						\
50810-	    tnode = rbtn_left_get(a_type, a_field, tnode);		\
50811-	} else if (cmp > 0) {						\
50812-	    tnode = rbtn_right_get(a_type, a_field, tnode);		\
50813-	} else {							\
50814-	    ret = tnode;						\
50815-	    break;							\
50816-	}								\
50817-    }									\
50818-    return ret;								\
50819-}									\
50820-a_attr a_type *								\
50821-a_prefix##psearch(a_rbt_type *rbtree, const a_type *key) {		\
50822-    a_type *ret;							\
50823-    a_type *tnode = rbtree->rbt_root;					\
50824-    ret = NULL;								\
50825-    while (tnode != NULL) {						\
50826-	int cmp = (a_cmp)(key, tnode);					\
50827-	if (cmp < 0) {							\
50828-	    tnode = rbtn_left_get(a_type, a_field, tnode);		\
50829-	} else if (cmp > 0) {						\
50830-	    ret = tnode;						\
50831-	    tnode = rbtn_right_get(a_type, a_field, tnode);		\
50832-	} else {							\
50833-	    ret = tnode;						\
50834-	    break;							\
50835-	}								\
50836-    }									\
50837-    return ret;								\
50838-}									\
50839-a_attr void								\
50840-a_prefix##insert(a_rbt_type *rbtree, a_type *node) {			\
50841-    a_prefix##path_entry_t path[RB_MAX_DEPTH];			\
50842-    a_prefix##path_entry_t *pathp;					\
50843-    rbt_node_new(a_type, a_field, rbtree, node);			\
50844-    /* Wind. */								\
50845-    path->node = rbtree->rbt_root;					\
50846-    for (pathp = path; pathp->node != NULL; pathp++) {			\
50847-	int cmp = pathp->cmp = a_cmp(node, pathp->node);		\
50848-	assert(cmp != 0);						\
50849-	if (cmp < 0) {							\
50850-	    pathp[1].node = rbtn_left_get(a_type, a_field,		\
50851-	      pathp->node);						\
50852-	} else {							\
50853-	    pathp[1].node = rbtn_right_get(a_type, a_field,		\
50854-	      pathp->node);						\
50855-	}								\
50856-    }									\
50857-    pathp->node = node;							\
50858-    /* A loop invariant we maintain is that all nodes with            */\
50859-    /* out-of-date summaries live in path[0], path[1], ..., *pathp.   */\
50860-    /* To maintain this, we have to summarize node, since we          */\
50861-    /* decrement pathp before the first iteration.                    */\
50862-    assert(rbtn_left_get(a_type, a_field, node) == NULL);		\
50863-    assert(rbtn_right_get(a_type, a_field, node) == NULL);		\
50864-    (void)a_summarize(node, NULL, NULL);				\
50865-    /* Unwind. */							\
50866-    for (pathp--; (uintptr_t)pathp >= (uintptr_t)path; pathp--) {	\
50867-	a_type *cnode = pathp->node;					\
50868-	if (pathp->cmp < 0) {						\
50869-	    a_type *left = pathp[1].node;				\
50870-	    rbtn_left_set(a_type, a_field, cnode, left);		\
50871-	    if (rbtn_red_get(a_type, a_field, left)) {			\
50872-		a_type *leftleft = rbtn_left_get(a_type, a_field, left);\
50873-		if (leftleft != NULL && rbtn_red_get(a_type, a_field,	\
50874-		  leftleft)) {						\
50875-		    /* Fix up 4-node. */				\
50876-		    a_type *tnode;					\
50877-		    rbtn_black_set(a_type, a_field, leftleft);		\
50878-		    rbtn_rotate_right(a_type, a_field, cnode, tnode);	\
50879-		    (void)a_summarize(cnode,				\
50880-			rbtn_left_get(a_type, a_field, cnode),		\
50881-			rbtn_right_get(a_type, a_field, cnode));	\
50882-		    cnode = tnode;					\
50883-		}							\
50884-	    } else {							\
50885-		a_prefix##summarize_range(path, pathp);			\
50886-		return;							\
50887-	    }								\
50888-	} else {							\
50889-	    a_type *right = pathp[1].node;				\
50890-	    rbtn_right_set(a_type, a_field, cnode, right);		\
50891-	    if (rbtn_red_get(a_type, a_field, right)) {			\
50892-		a_type *left = rbtn_left_get(a_type, a_field, cnode);	\
50893-		if (left != NULL && rbtn_red_get(a_type, a_field,	\
50894-		  left)) {						\
50895-		    /* Split 4-node. */					\
50896-		    rbtn_black_set(a_type, a_field, left);		\
50897-		    rbtn_black_set(a_type, a_field, right);		\
50898-		    rbtn_red_set(a_type, a_field, cnode);		\
50899-		} else {						\
50900-		    /* Lean left. */					\
50901-		    a_type *tnode;					\
50902-		    bool tred = rbtn_red_get(a_type, a_field, cnode);	\
50903-		    rbtn_rotate_left(a_type, a_field, cnode, tnode);	\
50904-		    rbtn_color_set(a_type, a_field, tnode, tred);	\
50905-		    rbtn_red_set(a_type, a_field, cnode);		\
50906-		    (void)a_summarize(cnode,				\
50907-			rbtn_left_get(a_type, a_field, cnode),		\
50908-			rbtn_right_get(a_type, a_field, cnode));	\
50909-		    cnode = tnode;					\
50910-		}							\
50911-	    } else {							\
50912-		a_prefix##summarize_range(path, pathp);			\
50913-		return;							\
50914-	    }								\
50915-	}								\
50916-	pathp->node = cnode;						\
50917-	(void)a_summarize(cnode,					\
50918-	    rbtn_left_get(a_type, a_field, cnode),			\
50919-	    rbtn_right_get(a_type, a_field, cnode));			\
50920-    }									\
50921-    /* Set root, and make it black. */					\
50922-    rbtree->rbt_root = path->node;					\
50923-    rbtn_black_set(a_type, a_field, rbtree->rbt_root);			\
50924-}									\
50925-a_attr void								\
50926-a_prefix##remove(a_rbt_type *rbtree, a_type *node) {			\
50927-    a_prefix##path_entry_t path[RB_MAX_DEPTH];				\
50928-    a_prefix##path_entry_t *pathp;					\
50929-    a_prefix##path_entry_t *nodep;					\
50930-    a_prefix##path_entry_t *swap_loc;					\
50931-    /* This is a "real" sentinel -- NULL means we didn't swap the     */\
50932-    /* node to be pruned with one of its successors, and so           */\
50933-    /* summarization can terminate early whenever some summary        */\
50934-    /* doesn't change.                                                */\
50935-    swap_loc = NULL;							\
50936-    /* This is just to silence a compiler warning. */			\
50937-    nodep = NULL;							\
50938-    /* Wind. */								\
50939-    path->node = rbtree->rbt_root;					\
50940-    for (pathp = path; pathp->node != NULL; pathp++) {			\
50941-	int cmp = pathp->cmp = a_cmp(node, pathp->node);		\
50942-	if (cmp < 0) {							\
50943-	    pathp[1].node = rbtn_left_get(a_type, a_field,		\
50944-	      pathp->node);						\
50945-	} else {							\
50946-	    pathp[1].node = rbtn_right_get(a_type, a_field,		\
50947-	      pathp->node);						\
50948-	    if (cmp == 0) {						\
50949-	        /* Find node's successor, in preparation for swap. */	\
50950-		pathp->cmp = 1;						\
50951-		nodep = pathp;						\
50952-		for (pathp++; pathp->node != NULL; pathp++) {		\
50953-		    pathp->cmp = -1;					\
50954-		    pathp[1].node = rbtn_left_get(a_type, a_field,	\
50955-		      pathp->node);					\
50956-		}							\
50957-		break;							\
50958-	    }								\
50959-	}								\
50960-    }									\
50961-    assert(nodep->node == node);					\
50962-    pathp--;								\
50963-    if (pathp->node != node) {						\
50964-	/* Swap node with its successor. */				\
50965-	swap_loc = nodep;						\
50966-	bool tred = rbtn_red_get(a_type, a_field, pathp->node);		\
50967-	rbtn_color_set(a_type, a_field, pathp->node,			\
50968-	  rbtn_red_get(a_type, a_field, node));				\
50969-	rbtn_left_set(a_type, a_field, pathp->node,			\
50970-	  rbtn_left_get(a_type, a_field, node));			\
50971-	/* If node's successor is its right child, the following code */\
50972-	/* will do the wrong thing for the right child pointer.       */\
50973-	/* However, it doesn't matter, because the pointer will be    */\
50974-	/* properly set when the successor is pruned.                 */\
50975-	rbtn_right_set(a_type, a_field, pathp->node,			\
50976-	  rbtn_right_get(a_type, a_field, node));			\
50977-	rbtn_color_set(a_type, a_field, node, tred);			\
50978-	/* The pruned leaf node's child pointers are never accessed   */\
50979-	/* again, so don't bother setting them to nil.                */\
50980-	nodep->node = pathp->node;					\
50981-	pathp->node = node;						\
50982-	if (nodep == path) {						\
50983-	    rbtree->rbt_root = nodep->node;				\
50984-	} else {							\
50985-	    if (nodep[-1].cmp < 0) {					\
50986-		rbtn_left_set(a_type, a_field, nodep[-1].node,		\
50987-		  nodep->node);						\
50988-	    } else {							\
50989-		rbtn_right_set(a_type, a_field, nodep[-1].node,		\
50990-		  nodep->node);						\
50991-	    }								\
50992-	}								\
50993-    } else {								\
50994-	a_type *left = rbtn_left_get(a_type, a_field, node);		\
50995-	if (left != NULL) {						\
50996-	    /* node has no successor, but it has a left child.        */\
50997-	    /* Splice node out, without losing the left child.        */\
50998-	    assert(!rbtn_red_get(a_type, a_field, node));		\
50999-	    assert(rbtn_red_get(a_type, a_field, left));		\
51000-	    rbtn_black_set(a_type, a_field, left);			\
51001-	    if (pathp == path) {					\
51002-		rbtree->rbt_root = left;				\
51003-		/* Nothing to summarize -- the subtree rooted at the  */\
51004-		/* node's left child hasn't changed, and it's now the */\
51005-		/* root.					      */\
51006-	    } else {							\
51007-		if (pathp[-1].cmp < 0) {				\
51008-		    rbtn_left_set(a_type, a_field, pathp[-1].node,	\
51009-		      left);						\
51010-		} else {						\
51011-		    rbtn_right_set(a_type, a_field, pathp[-1].node,	\
51012-		      left);						\
51013-		}							\
51014-		a_prefix##summarize_swapped_range(path, &pathp[-1],	\
51015-		    swap_loc);						\
51016-	    }								\
51017-	    return;							\
51018-	} else if (pathp == path) {					\
51019-	    /* The tree only contained one node. */			\
51020-	    rbtree->rbt_root = NULL;					\
51021-	    return;							\
51022-	}								\
51023-    }									\
51024-    /* We've now established the invariant that the node has no right */\
51025-    /* child (well, morally; we didn't bother nulling it out if we    */\
51026-    /* swapped it with its successor), and that the only nodes with   */\
51027-    /* out-of-date summaries live in path[0], path[1], ..., pathp[-1].*/\
51028-    if (rbtn_red_get(a_type, a_field, pathp->node)) {			\
51029-	/* Prune red node, which requires no fixup. */			\
51030-	assert(pathp[-1].cmp < 0);					\
51031-	rbtn_left_set(a_type, a_field, pathp[-1].node, NULL);		\
51032-	a_prefix##summarize_swapped_range(path, &pathp[-1], swap_loc);	\
51033-	return;								\
51034-    }									\
51035-    /* The node to be pruned is black, so unwind until balance is     */\
51036-    /* restored.                                                      */\
51037-    pathp->node = NULL;							\
51038-    for (pathp--; (uintptr_t)pathp >= (uintptr_t)path; pathp--) {	\
51039-	assert(pathp->cmp != 0);					\
51040-	if (pathp->cmp < 0) {						\
51041-	    rbtn_left_set(a_type, a_field, pathp->node,			\
51042-	      pathp[1].node);						\
51043-	    if (rbtn_red_get(a_type, a_field, pathp->node)) {		\
51044-		a_type *right = rbtn_right_get(a_type, a_field,		\
51045-		  pathp->node);						\
51046-		a_type *rightleft = rbtn_left_get(a_type, a_field,	\
51047-		  right);						\
51048-		a_type *tnode;						\
51049-		if (rightleft != NULL && rbtn_red_get(a_type, a_field,	\
51050-		  rightleft)) {						\
51051-		    /* In the following diagrams, ||, //, and \\      */\
51052-		    /* indicate the path to the removed node.         */\
51053-		    /*                                                */\
51054-		    /*      ||                                        */\
51055-		    /*    pathp(r)                                    */\
51056-		    /*  //        \                                   */\
51057-		    /* (b)        (b)                                 */\
51058-		    /*           /                                    */\
51059-		    /*          (r)                                   */\
51060-		    /*                                                */\
51061-		    rbtn_black_set(a_type, a_field, pathp->node);	\
51062-		    rbtn_rotate_right(a_type, a_field, right, tnode);	\
51063-		    rbtn_right_set(a_type, a_field, pathp->node, tnode);\
51064-		    rbtn_rotate_left(a_type, a_field, pathp->node,	\
51065-		      tnode);						\
51066-		    (void)a_summarize(pathp->node,			\
51067-			rbtn_left_get(a_type, a_field, pathp->node),	\
51068-			rbtn_right_get(a_type, a_field, pathp->node));	\
51069-		    (void)a_summarize(right,				\
51070-			rbtn_left_get(a_type, a_field, right),		\
51071-			rbtn_right_get(a_type, a_field, right));	\
51072-		} else {						\
51073-		    /*      ||                                        */\
51074-		    /*    pathp(r)                                    */\
51075-		    /*  //        \                                   */\
51076-		    /* (b)        (b)                                 */\
51077-		    /*           /                                    */\
51078-		    /*          (b)                                   */\
51079-		    /*                                                */\
51080-		    rbtn_rotate_left(a_type, a_field, pathp->node,	\
51081-		      tnode);						\
51082-		    (void)a_summarize(pathp->node,			\
51083-			rbtn_left_get(a_type, a_field, pathp->node),	\
51084-			rbtn_right_get(a_type, a_field, pathp->node));	\
51085-		}							\
51086-		(void)a_summarize(tnode, rbtn_left_get(a_type, a_field,	\
51087-		    tnode), rbtn_right_get(a_type, a_field, tnode));	\
51088-		/* Balance restored, but rotation modified subtree    */\
51089-		/* root.                                              */\
51090-		assert((uintptr_t)pathp > (uintptr_t)path);		\
51091-		if (pathp[-1].cmp < 0) {				\
51092-		    rbtn_left_set(a_type, a_field, pathp[-1].node,	\
51093-		      tnode);						\
51094-		} else {						\
51095-		    rbtn_right_set(a_type, a_field, pathp[-1].node,	\
51096-		      tnode);						\
51097-		}							\
51098-		a_prefix##summarize_swapped_range(path, &pathp[-1],	\
51099-		    swap_loc);						\
51100-		return;							\
51101-	    } else {							\
51102-		a_type *right = rbtn_right_get(a_type, a_field,		\
51103-		  pathp->node);						\
51104-		a_type *rightleft = rbtn_left_get(a_type, a_field,	\
51105-		  right);						\
51106-		if (rightleft != NULL && rbtn_red_get(a_type, a_field,	\
51107-		  rightleft)) {						\
51108-		    /*      ||                                        */\
51109-		    /*    pathp(b)                                    */\
51110-		    /*  //        \                                   */\
51111-		    /* (b)        (b)                                 */\
51112-		    /*           /                                    */\
51113-		    /*          (r)                                   */\
51114-		    a_type *tnode;					\
51115-		    rbtn_black_set(a_type, a_field, rightleft);		\
51116-		    rbtn_rotate_right(a_type, a_field, right, tnode);	\
51117-		    rbtn_right_set(a_type, a_field, pathp->node, tnode);\
51118-		    rbtn_rotate_left(a_type, a_field, pathp->node,	\
51119-		      tnode);						\
51120-		    (void)a_summarize(pathp->node,			\
51121-			rbtn_left_get(a_type, a_field, pathp->node),	\
51122-			rbtn_right_get(a_type, a_field, pathp->node));	\
51123-		    (void)a_summarize(right,				\
51124-			rbtn_left_get(a_type, a_field, right),		\
51125-			rbtn_right_get(a_type, a_field, right));	\
51126-		    (void)a_summarize(tnode,				\
51127-			rbtn_left_get(a_type, a_field, tnode),		\
51128-			rbtn_right_get(a_type, a_field, tnode));	\
51129-		    /* Balance restored, but rotation modified        */\
51130-		    /* subtree root, which may actually be the tree   */\
51131-		    /* root.                                          */\
51132-		    if (pathp == path) {				\
51133-			/* Set root. */					\
51134-			rbtree->rbt_root = tnode;			\
51135-		    } else {						\
51136-			if (pathp[-1].cmp < 0) {			\
51137-			    rbtn_left_set(a_type, a_field,		\
51138-			      pathp[-1].node, tnode);			\
51139-			} else {					\
51140-			    rbtn_right_set(a_type, a_field,		\
51141-			      pathp[-1].node, tnode);			\
51142-			}						\
51143-			a_prefix##summarize_swapped_range(path,		\
51144-			    &pathp[-1], swap_loc);			\
51145-		    }							\
51146-		    return;						\
51147-		} else {						\
51148-		    /*      ||                                        */\
51149-		    /*    pathp(b)                                    */\
51150-		    /*  //        \                                   */\
51151-		    /* (b)        (b)                                 */\
51152-		    /*           /                                    */\
51153-		    /*          (b)                                   */\
51154-		    a_type *tnode;					\
51155-		    rbtn_red_set(a_type, a_field, pathp->node);		\
51156-		    rbtn_rotate_left(a_type, a_field, pathp->node,	\
51157-		      tnode);						\
51158-		    (void)a_summarize(pathp->node,			\
51159-			rbtn_left_get(a_type, a_field, pathp->node),	\
51160-			rbtn_right_get(a_type, a_field, pathp->node));	\
51161-		    (void)a_summarize(tnode,				\
51162-			rbtn_left_get(a_type, a_field, tnode),		\
51163-			rbtn_right_get(a_type, a_field, tnode));	\
51164-		    pathp->node = tnode;				\
51165-		}							\
51166-	    }								\
51167-	} else {							\
51168-	    a_type *left;						\
51169-	    rbtn_right_set(a_type, a_field, pathp->node,		\
51170-	      pathp[1].node);						\
51171-	    left = rbtn_left_get(a_type, a_field, pathp->node);		\
51172-	    if (rbtn_red_get(a_type, a_field, left)) {			\
51173-		a_type *tnode;						\
51174-		a_type *leftright = rbtn_right_get(a_type, a_field,	\
51175-		  left);						\
51176-		a_type *leftrightleft = rbtn_left_get(a_type, a_field,	\
51177-		  leftright);						\
51178-		if (leftrightleft != NULL && rbtn_red_get(a_type,	\
51179-		  a_field, leftrightleft)) {				\
51180-		    /*      ||                                        */\
51181-		    /*    pathp(b)                                    */\
51182-		    /*   /        \\                                  */\
51183-		    /* (r)        (b)                                 */\
51184-		    /*   \                                            */\
51185-		    /*   (b)                                          */\
51186-		    /*   /                                            */\
51187-		    /* (r)                                            */\
51188-		    a_type *unode;					\
51189-		    rbtn_black_set(a_type, a_field, leftrightleft);	\
51190-		    rbtn_rotate_right(a_type, a_field, pathp->node,	\
51191-		      unode);						\
51192-		    rbtn_rotate_right(a_type, a_field, pathp->node,	\
51193-		      tnode);						\
51194-		    rbtn_right_set(a_type, a_field, unode, tnode);	\
51195-		    rbtn_rotate_left(a_type, a_field, unode, tnode);	\
51196-		    (void)a_summarize(pathp->node,			\
51197-			rbtn_left_get(a_type, a_field, pathp->node),	\
51198-			rbtn_right_get(a_type, a_field, pathp->node));	\
51199-		    (void)a_summarize(unode,				\
51200-			rbtn_left_get(a_type, a_field, unode),		\
51201-			rbtn_right_get(a_type, a_field, unode));	\
51202-		} else {						\
51203-		    /*      ||                                        */\
51204-		    /*    pathp(b)                                    */\
51205-		    /*   /        \\                                  */\
51206-		    /* (r)        (b)                                 */\
51207-		    /*   \                                            */\
51208-		    /*   (b)                                          */\
51209-		    /*   /                                            */\
51210-		    /* (b)                                            */\
51211-		    assert(leftright != NULL);				\
51212-		    rbtn_red_set(a_type, a_field, leftright);		\
51213-		    rbtn_rotate_right(a_type, a_field, pathp->node,	\
51214-		      tnode);						\
51215-		    rbtn_black_set(a_type, a_field, tnode);		\
51216-		    (void)a_summarize(pathp->node,			\
51217-			rbtn_left_get(a_type, a_field, pathp->node),	\
51218-			rbtn_right_get(a_type, a_field, pathp->node));	\
51219-		}							\
51220-		(void)a_summarize(tnode,				\
51221-		    rbtn_left_get(a_type, a_field, tnode),		\
51222-		    rbtn_right_get(a_type, a_field, tnode));		\
51223-		/* Balance restored, but rotation modified subtree    */\
51224-		/* root, which may actually be the tree root.         */\
51225-		if (pathp == path) {					\
51226-		    /* Set root. */					\
51227-		    rbtree->rbt_root = tnode;				\
51228-		} else {						\
51229-		    if (pathp[-1].cmp < 0) {				\
51230-			rbtn_left_set(a_type, a_field, pathp[-1].node,	\
51231-			  tnode);					\
51232-		    } else {						\
51233-			rbtn_right_set(a_type, a_field, pathp[-1].node,	\
51234-			  tnode);					\
51235-		    }							\
51236-		    a_prefix##summarize_swapped_range(path, &pathp[-1],	\
51237-			swap_loc);					\
51238-		}							\
51239-		return;							\
51240-	    } else if (rbtn_red_get(a_type, a_field, pathp->node)) {	\
51241-		a_type *leftleft = rbtn_left_get(a_type, a_field, left);\
51242-		if (leftleft != NULL && rbtn_red_get(a_type, a_field,	\
51243-		  leftleft)) {						\
51244-		    /*        ||                                      */\
51245-		    /*      pathp(r)                                  */\
51246-		    /*     /        \\                                */\
51247-		    /*   (b)        (b)                               */\
51248-		    /*   /                                            */\
51249-		    /* (r)                                            */\
51250-		    a_type *tnode;					\
51251-		    rbtn_black_set(a_type, a_field, pathp->node);	\
51252-		    rbtn_red_set(a_type, a_field, left);		\
51253-		    rbtn_black_set(a_type, a_field, leftleft);		\
51254-		    rbtn_rotate_right(a_type, a_field, pathp->node,	\
51255-		      tnode);						\
51256-		    (void)a_summarize(pathp->node,			\
51257-			rbtn_left_get(a_type, a_field, pathp->node),	\
51258-			rbtn_right_get(a_type, a_field, pathp->node));	\
51259-		    (void)a_summarize(tnode,				\
51260-			rbtn_left_get(a_type, a_field, tnode),		\
51261-			rbtn_right_get(a_type, a_field, tnode));	\
51262-		    /* Balance restored, but rotation modified        */\
51263-		    /* subtree root.                                  */\
51264-		    assert((uintptr_t)pathp > (uintptr_t)path);		\
51265-		    if (pathp[-1].cmp < 0) {				\
51266-			rbtn_left_set(a_type, a_field, pathp[-1].node,	\
51267-			  tnode);					\
51268-		    } else {						\
51269-			rbtn_right_set(a_type, a_field, pathp[-1].node,	\
51270-			  tnode);					\
51271-		    }							\
51272-		    a_prefix##summarize_swapped_range(path, &pathp[-1],	\
51273-			swap_loc);					\
51274-		    return;						\
51275-		} else {						\
51276-		    /*        ||                                      */\
51277-		    /*      pathp(r)                                  */\
51278-		    /*     /        \\                                */\
51279-		    /*   (b)        (b)                               */\
51280-		    /*   /                                            */\
51281-		    /* (b)                                            */\
51282-		    rbtn_red_set(a_type, a_field, left);		\
51283-		    rbtn_black_set(a_type, a_field, pathp->node);	\
51284-		    /* Balance restored. */				\
51285-		    a_prefix##summarize_swapped_range(path, pathp,	\
51286-			swap_loc);					\
51287-		    return;						\
51288-		}							\
51289-	    } else {							\
51290-		a_type *leftleft = rbtn_left_get(a_type, a_field, left);\
51291-		if (leftleft != NULL && rbtn_red_get(a_type, a_field,	\
51292-		  leftleft)) {						\
51293-		    /*               ||                               */\
51294-		    /*             pathp(b)                           */\
51295-		    /*            /        \\                         */\
51296-		    /*          (b)        (b)                        */\
51297-		    /*          /                                     */\
51298-		    /*        (r)                                     */\
51299-		    a_type *tnode;					\
51300-		    rbtn_black_set(a_type, a_field, leftleft);		\
51301-		    rbtn_rotate_right(a_type, a_field, pathp->node,	\
51302-		      tnode);						\
51303-		    (void)a_summarize(pathp->node,			\
51304-			rbtn_left_get(a_type, a_field, pathp->node),	\
51305-			rbtn_right_get(a_type, a_field, pathp->node));	\
51306-		    (void)a_summarize(tnode,				\
51307-			rbtn_left_get(a_type, a_field, tnode),		\
51308-			rbtn_right_get(a_type, a_field, tnode));	\
51309-		    /* Balance restored, but rotation modified        */\
51310-		    /* subtree root, which may actually be the tree   */\
51311-		    /* root.                                          */\
51312-		    if (pathp == path) {				\
51313-			/* Set root. */					\
51314-			rbtree->rbt_root = tnode;			\
51315-		    } else {						\
51316-			if (pathp[-1].cmp < 0) {			\
51317-			    rbtn_left_set(a_type, a_field,		\
51318-			      pathp[-1].node, tnode);			\
51319-			} else {					\
51320-			    rbtn_right_set(a_type, a_field,		\
51321-			      pathp[-1].node, tnode);			\
51322-			}						\
51323-		        a_prefix##summarize_swapped_range(path,		\
51324-			    &pathp[-1], swap_loc);			\
51325-		    }							\
51326-		    return;						\
51327-		} else {						\
51328-		    /*               ||                               */\
51329-		    /*             pathp(b)                           */\
51330-		    /*            /        \\                         */\
51331-		    /*          (b)        (b)                        */\
51332-		    /*          /                                     */\
51333-		    /*        (b)                                     */\
51334-		    rbtn_red_set(a_type, a_field, left);		\
51335-		    (void)a_summarize(pathp->node,			\
51336-			rbtn_left_get(a_type, a_field, pathp->node),	\
51337-			rbtn_right_get(a_type, a_field, pathp->node));	\
51338-		}							\
51339-	    }								\
51340-	}								\
51341-    }									\
51342-    /* Set root. */							\
51343-    rbtree->rbt_root = path->node;					\
51344-    assert(!rbtn_red_get(a_type, a_field, rbtree->rbt_root));		\
51345-}									\
51346-a_attr a_type *								\
51347-a_prefix##iter_recurse(a_rbt_type *rbtree, a_type *node,		\
51348-  a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) {		\
51349-    if (node == NULL) {							\
51350-	return NULL;							\
51351-    } else {								\
51352-	a_type *ret;							\
51353-	if ((ret = a_prefix##iter_recurse(rbtree, rbtn_left_get(a_type,	\
51354-	  a_field, node), cb, arg)) != NULL || (ret = cb(rbtree, node,	\
51355-	  arg)) != NULL) {						\
51356-	    return ret;							\
51357-	}								\
51358-	return a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type,	\
51359-	  a_field, node), cb, arg);					\
51360-    }									\
51361-}									\
51362-a_attr a_type *								\
51363-a_prefix##iter_start(a_rbt_type *rbtree, a_type *start, a_type *node,	\
51364-  a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) {		\
51365-    int cmp = a_cmp(start, node);					\
51366-    if (cmp < 0) {							\
51367-	a_type *ret;							\
51368-	if ((ret = a_prefix##iter_start(rbtree, start,			\
51369-	  rbtn_left_get(a_type, a_field, node), cb, arg)) != NULL ||	\
51370-	  (ret = cb(rbtree, node, arg)) != NULL) {			\
51371-	    return ret;							\
51372-	}								\
51373-	return a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type,	\
51374-	  a_field, node), cb, arg);					\
51375-    } else if (cmp > 0) {						\
51376-	return a_prefix##iter_start(rbtree, start,			\
51377-	  rbtn_right_get(a_type, a_field, node), cb, arg);		\
51378-    } else {								\
51379-	a_type *ret;							\
51380-	if ((ret = cb(rbtree, node, arg)) != NULL) {			\
51381-	    return ret;							\
51382-	}								\
51383-	return a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type,	\
51384-	  a_field, node), cb, arg);					\
51385-    }									\
51386-}									\
51387-a_attr a_type *								\
51388-a_prefix##iter(a_rbt_type *rbtree, a_type *start, a_type *(*cb)(	\
51389-  a_rbt_type *, a_type *, void *), void *arg) {				\
51390-    a_type *ret;							\
51391-    if (start != NULL) {						\
51392-	ret = a_prefix##iter_start(rbtree, start, rbtree->rbt_root,	\
51393-	  cb, arg);							\
51394-    } else {								\
51395-	ret = a_prefix##iter_recurse(rbtree, rbtree->rbt_root, cb, arg);\
51396-    }									\
51397-    return ret;								\
51398-}									\
51399-a_attr a_type *								\
51400-a_prefix##reverse_iter_recurse(a_rbt_type *rbtree, a_type *node,	\
51401-  a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) {		\
51402-    if (node == NULL) {							\
51403-	return NULL;							\
51404-    } else {								\
51405-	a_type *ret;							\
51406-	if ((ret = a_prefix##reverse_iter_recurse(rbtree,		\
51407-	  rbtn_right_get(a_type, a_field, node), cb, arg)) != NULL ||	\
51408-	  (ret = cb(rbtree, node, arg)) != NULL) {			\
51409-	    return ret;							\
51410-	}								\
51411-	return a_prefix##reverse_iter_recurse(rbtree,			\
51412-	  rbtn_left_get(a_type, a_field, node), cb, arg);		\
51413-    }									\
51414-}									\
51415-a_attr a_type *								\
51416-a_prefix##reverse_iter_start(a_rbt_type *rbtree, a_type *start,		\
51417-  a_type *node, a_type *(*cb)(a_rbt_type *, a_type *, void *),		\
51418-  void *arg) {								\
51419-    int cmp = a_cmp(start, node);					\
51420-    if (cmp > 0) {							\
51421-	a_type *ret;							\
51422-	if ((ret = a_prefix##reverse_iter_start(rbtree, start,		\
51423-	  rbtn_right_get(a_type, a_field, node), cb, arg)) != NULL ||	\
51424-	  (ret = cb(rbtree, node, arg)) != NULL) {			\
51425-	    return ret;							\
51426-	}								\
51427-	return a_prefix##reverse_iter_recurse(rbtree,			\
51428-	  rbtn_left_get(a_type, a_field, node), cb, arg);		\
51429-    } else if (cmp < 0) {						\
51430-	return a_prefix##reverse_iter_start(rbtree, start,		\
51431-	  rbtn_left_get(a_type, a_field, node), cb, arg);		\
51432-    } else {								\
51433-	a_type *ret;							\
51434-	if ((ret = cb(rbtree, node, arg)) != NULL) {			\
51435-	    return ret;							\
51436-	}								\
51437-	return a_prefix##reverse_iter_recurse(rbtree,			\
51438-	  rbtn_left_get(a_type, a_field, node), cb, arg);		\
51439-    }									\
51440-}									\
51441-a_attr a_type *								\
51442-a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start,		\
51443-  a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) {		\
51444-    a_type *ret;							\
51445-    if (start != NULL) {						\
51446-	ret = a_prefix##reverse_iter_start(rbtree, start,		\
51447-	  rbtree->rbt_root, cb, arg);					\
51448-    } else {								\
51449-	ret = a_prefix##reverse_iter_recurse(rbtree, rbtree->rbt_root,	\
51450-	  cb, arg);							\
51451-    }									\
51452-    return ret;								\
51453-}									\
51454-a_attr void								\
51455-a_prefix##destroy_recurse(a_rbt_type *rbtree, a_type *node, void (*cb)(	\
51456-  a_type *, void *), void *arg) {					\
51457-    if (node == NULL) {							\
51458-	return;								\
51459-    }									\
51460-    a_prefix##destroy_recurse(rbtree, rbtn_left_get(a_type, a_field,	\
51461-      node), cb, arg);							\
51462-    rbtn_left_set(a_type, a_field, (node), NULL);			\
51463-    a_prefix##destroy_recurse(rbtree, rbtn_right_get(a_type, a_field,	\
51464-      node), cb, arg);							\
51465-    rbtn_right_set(a_type, a_field, (node), NULL);			\
51466-    if (cb) {								\
51467-	cb(node, arg);							\
51468-    }									\
51469-}									\
51470-a_attr void								\
51471-a_prefix##destroy(a_rbt_type *rbtree, void (*cb)(a_type *, void *),	\
51472-  void *arg) {								\
51473-    a_prefix##destroy_recurse(rbtree, rbtree->rbt_root, cb, arg);	\
51474-    rbtree->rbt_root = NULL;						\
51475-}									\
51476-/* BEGIN SUMMARIZED-ONLY IMPLEMENTATION */				\
51477-rb_summarized_only_##a_is_summarized(					\
51478-static inline a_prefix##path_entry_t *					\
51479-a_prefix##wind(a_rbt_type *rbtree,					\
51480-    a_prefix##path_entry_t path[RB_MAX_DEPTH], a_type *node) {		\
51481-    a_prefix##path_entry_t *pathp;					\
51482-    path->node = rbtree->rbt_root;					\
51483-    for (pathp = path; ; pathp++) {					\
51484-	assert((size_t)(pathp - path) < RB_MAX_DEPTH);			\
51485-	pathp->cmp = a_cmp(node, pathp->node);				\
51486-	if (pathp->cmp < 0) {						\
51487-	    pathp[1].node = rbtn_left_get(a_type, a_field,		\
51488-		pathp->node);						\
51489-	} else if (pathp->cmp == 0) {					\
51490-	    return pathp;						\
51491-	} else {							\
51492-	    pathp[1].node = rbtn_right_get(a_type, a_field,		\
51493-		pathp->node);						\
51494-	}								\
51495-    }									\
51496-    unreachable();							\
51497-}									\
51498-a_attr void								\
51499-a_prefix##update_summaries(a_rbt_type *rbtree, a_type *node) {		\
51500-    a_prefix##path_entry_t path[RB_MAX_DEPTH];				\
51501-    a_prefix##path_entry_t *pathp = a_prefix##wind(rbtree, path, node);	\
51502-    a_prefix##summarize_range(path, pathp);				\
51503-}									\
51504-a_attr bool								\
51505-a_prefix##empty_filtered(a_rbt_type *rbtree,				\
51506-  bool (*filter_node)(void *, a_type *),				\
51507-  bool (*filter_subtree)(void *, a_type *),				\
51508-  void *filter_ctx) {							\
51509-    a_type *node = rbtree->rbt_root;					\
51510-    return node == NULL || !filter_subtree(filter_ctx, node);		\
51511-}									\
51512-static inline a_type *							\
51513-a_prefix##first_filtered_from_node(a_type *node,			\
51514-  bool (*filter_node)(void *, a_type *),				\
51515-  bool (*filter_subtree)(void *, a_type *),				\
51516-  void *filter_ctx) {							\
51517-    assert(node != NULL && filter_subtree(filter_ctx, node));		\
51518-    while (true) {							\
51519-	a_type *left = rbtn_left_get(a_type, a_field, node);		\
51520-	a_type *right = rbtn_right_get(a_type, a_field, node);		\
51521-	if (left != NULL && filter_subtree(filter_ctx, left)) {		\
51522-	    node = left;						\
51523-	} else if (filter_node(filter_ctx, node)) {			\
51524-	    return node;						\
51525-	} else {							\
51526-		assert(right != NULL					\
51527-		    && filter_subtree(filter_ctx, right));		\
51528-		node = right;						\
51529-	}								\
51530-    }									\
51531-    unreachable();							\
51532-}									\
51533-a_attr a_type *								\
51534-a_prefix##first_filtered(a_rbt_type *rbtree,				\
51535-  bool (*filter_node)(void *, a_type *),				\
51536-  bool (*filter_subtree)(void *, a_type *),				\
51537-  void *filter_ctx) {							\
51538-    a_type *node = rbtree->rbt_root;					\
51539-    if (node == NULL || !filter_subtree(filter_ctx, node)) {		\
51540-	return NULL;							\
51541-    }									\
51542-    return a_prefix##first_filtered_from_node(node, filter_node,	\
51543-	filter_subtree, filter_ctx);					\
51544-}									\
51545-static inline a_type *							\
51546-a_prefix##last_filtered_from_node(a_type *node,				\
51547-  bool (*filter_node)(void *, a_type *),				\
51548-  bool (*filter_subtree)(void *, a_type *),				\
51549-  void *filter_ctx) {							\
51550-    assert(node != NULL && filter_subtree(filter_ctx, node));		\
51551-    while (true) {							\
51552-	a_type *left = rbtn_left_get(a_type, a_field, node);		\
51553-	a_type *right = rbtn_right_get(a_type, a_field, node);		\
51554-	if (right != NULL && filter_subtree(filter_ctx, right)) {	\
51555-	    node = right;						\
51556-	} else if (filter_node(filter_ctx, node)) {			\
51557-	    return node;						\
51558-	} else {							\
51559-		assert(left != NULL					\
51560-		    && filter_subtree(filter_ctx, left));		\
51561-		node = left;						\
51562-	}								\
51563-    }									\
51564-    unreachable();							\
51565-}									\
51566-a_attr a_type *								\
51567-a_prefix##last_filtered(a_rbt_type *rbtree,				\
51568-  bool (*filter_node)(void *, a_type *),				\
51569-  bool (*filter_subtree)(void *, a_type *),				\
51570-  void *filter_ctx) {							\
51571-    a_type *node = rbtree->rbt_root;					\
51572-    if (node == NULL || !filter_subtree(filter_ctx, node)) {		\
51573-	return NULL;							\
51574-    }									\
51575-    return a_prefix##last_filtered_from_node(node, filter_node,		\
51576-	filter_subtree, filter_ctx);					\
51577-}									\
51578-/* Internal implementation function.  Search for a node comparing     */\
51579-/* equal to key matching the filter.  If such a node is in the tree,  */\
51580-/* return it.  Additionally, the caller has the option to ask for     */\
51581-/* bounds on the next / prev node in the tree passing the filter.     */\
51582-/* If nextbound is true, then this function will do one of the        */\
51583-/* following:                                                         */\
51584-/* - Fill in *nextbound_node with the smallest node in the tree       */\
51585-/*   greater than key passing the filter, and NULL-out                */\
51586-/*   *nextbound_subtree.                                              */\
51587-/* - Fill in *nextbound_subtree with a parent of that node which is   */\
51588-/*   not a parent of the searched-for node, and NULL-out              */\
51589-/*   *nextbound_node.                                                 */\
51590-/* - NULL-out both *nextbound_node and *nextbound_subtree, in which   */\
51591-/*   case no node greater than key but passing the filter is in the   */\
51592-/*   tree.                                                            */\
51593-/* The prevbound case is similar.  If the caller knows that key is in */\
51594-/* the tree and that the subtree rooted at key does not contain a     */\
51595-/* node satisfying the bound being searched for, then they can pass   */\
51596-/* false for include_subtree, in which case we won't bother searching */\
51597-/* there (risking a cache miss).                                      */\
51598-/*                                                                    */\
51599-/* This API is unfortunately complex; but the logic for filtered      */\
51600-/* searches is very subtle, and otherwise we would have to repeat it  */\
51601-/* multiple times for filtered search, nsearch, psearch, next, and    */\
51602-/* prev.                                                              */\
51603-static inline a_type *							\
51604-a_prefix##search_with_filter_bounds(a_rbt_type *rbtree,			\
51605-  const a_type *key,							\
51606-  bool (*filter_node)(void *, a_type *),				\
51607-  bool (*filter_subtree)(void *, a_type *),				\
51608-  void *filter_ctx,							\
51609-  bool include_subtree,							\
51610-  bool nextbound, a_type **nextbound_node, a_type **nextbound_subtree,	\
51611-  bool prevbound, a_type **prevbound_node, a_type **prevbound_subtree) {\
51612-    if (nextbound) {							\
51613-	    *nextbound_node = NULL;					\
51614-	    *nextbound_subtree = NULL;					\
51615-    }									\
51616-    if (prevbound) {							\
51617-	    *prevbound_node = NULL;					\
51618-	    *prevbound_subtree = NULL;					\
51619-    }									\
51620-    a_type *tnode = rbtree->rbt_root;					\
51621-    while (tnode != NULL && filter_subtree(filter_ctx, tnode)) {	\
51622-	int cmp = a_cmp(key, tnode);					\
51623-	a_type *tleft = rbtn_left_get(a_type, a_field, tnode);		\
51624-	a_type *tright = rbtn_right_get(a_type, a_field, tnode);	\
51625-	if (cmp < 0) {							\
51626-	    if (nextbound) {						\
51627-		if (filter_node(filter_ctx, tnode)) {			\
51628-		    *nextbound_node = tnode;				\
51629-		    *nextbound_subtree = NULL;				\
51630-		} else if (tright != NULL && filter_subtree(		\
51631-		    filter_ctx, tright)) {				\
51632-		    *nextbound_node = NULL;				\
51633-		    *nextbound_subtree = tright;			\
51634-		}							\
51635-	    }								\
51636-	    tnode = tleft;						\
51637-	} else if (cmp > 0) {						\
51638-	    if (prevbound) {						\
51639-		if (filter_node(filter_ctx, tnode)) {			\
51640-		    *prevbound_node = tnode;				\
51641-		    *prevbound_subtree = NULL;				\
51642-		} else if (tleft != NULL && filter_subtree(		\
51643-		    filter_ctx, tleft)) {				\
51644-		    *prevbound_node = NULL;				\
51645-		    *prevbound_subtree = tleft;				\
51646-		}							\
51647-	    }								\
51648-	    tnode = tright;						\
51649-	} else {							\
51650-	    if (filter_node(filter_ctx, tnode)) {			\
51651-		return tnode;						\
51652-	    }								\
51653-	    if (include_subtree) {					\
51654-		if (prevbound && tleft != NULL && filter_subtree(	\
51655-		    filter_ctx, tleft)) {				\
51656-		    *prevbound_node = NULL;				\
51657-		    *prevbound_subtree = tleft;				\
51658-		}							\
51659-		if (nextbound && tright != NULL && filter_subtree(	\
51660-		    filter_ctx, tright)) {				\
51661-		    *nextbound_node = NULL;				\
51662-		    *nextbound_subtree = tright;			\
51663-		}							\
51664-	    }								\
51665-	    return NULL;						\
51666-	}								\
51667-    }									\
51668-    return NULL;							\
51669-}									\
51670-a_attr a_type *								\
51671-a_prefix##next_filtered(a_rbt_type *rbtree, a_type *node,		\
51672-  bool (*filter_node)(void *, a_type *),				\
51673-  bool (*filter_subtree)(void *, a_type *),				\
51674-  void *filter_ctx) {							\
51675-    a_type *nright = rbtn_right_get(a_type, a_field, node);		\
51676-    if (nright != NULL && filter_subtree(filter_ctx, nright)) {		\
51677-	return a_prefix##first_filtered_from_node(nright, filter_node,	\
51678-	    filter_subtree, filter_ctx);				\
51679-    }									\
51680-    a_type *node_candidate;						\
51681-    a_type *subtree_candidate;						\
51682-    a_type *search_result = a_prefix##search_with_filter_bounds(	\
51683-	rbtree, node, filter_node, filter_subtree, filter_ctx,		\
51684-	/* include_subtree */ false,					\
51685-	/* nextbound */ true, &node_candidate, &subtree_candidate,	\
51686-	/* prevbound */ false, NULL, NULL);				\
51687-    assert(node == search_result					\
51688-	|| !filter_node(filter_ctx, node));				\
51689-    if (node_candidate != NULL) {					\
51690-	return node_candidate;						\
51691-    }									\
51692-    if (subtree_candidate != NULL) {					\
51693-	return a_prefix##first_filtered_from_node(			\
51694-	    subtree_candidate, filter_node, filter_subtree,		\
51695-	    filter_ctx);						\
51696-    }									\
51697-    return NULL;							\
51698-}									\
51699-a_attr a_type *								\
51700-a_prefix##prev_filtered(a_rbt_type *rbtree, a_type *node,		\
51701-  bool (*filter_node)(void *, a_type *),				\
51702-  bool (*filter_subtree)(void *, a_type *),				\
51703-  void *filter_ctx) {							\
51704-    a_type *nleft = rbtn_left_get(a_type, a_field, node);		\
51705-    if (nleft != NULL && filter_subtree(filter_ctx, nleft)) {		\
51706-	return a_prefix##last_filtered_from_node(nleft, filter_node,	\
51707-	    filter_subtree, filter_ctx);				\
51708-    }									\
51709-    a_type *node_candidate;						\
51710-    a_type *subtree_candidate;						\
51711-    a_type *search_result = a_prefix##search_with_filter_bounds(	\
51712-	rbtree, node, filter_node, filter_subtree, filter_ctx,		\
51713-	/* include_subtree */ false,					\
51714-	/* nextbound */ false, NULL, NULL,				\
51715-	/* prevbound */ true, &node_candidate, &subtree_candidate);	\
51716-    assert(node == search_result					\
51717-	|| !filter_node(filter_ctx, node));				\
51718-    if (node_candidate != NULL) {					\
51719-	return node_candidate;						\
51720-    }									\
51721-    if (subtree_candidate != NULL) {					\
51722-	return a_prefix##last_filtered_from_node(			\
51723-	    subtree_candidate, filter_node, filter_subtree,		\
51724-	    filter_ctx);						\
51725-    }									\
51726-    return NULL;							\
51727-}									\
51728-a_attr a_type *								\
51729-a_prefix##search_filtered(a_rbt_type *rbtree, const a_type *key,	\
51730-  bool (*filter_node)(void *, a_type *),				\
51731-  bool (*filter_subtree)(void *, a_type *),				\
51732-  void *filter_ctx) {							\
51733-    a_type *result = a_prefix##search_with_filter_bounds(rbtree, key,	\
51734-	filter_node, filter_subtree, filter_ctx,			\
51735-	/* include_subtree */ false,					\
51736-	/* nextbound */ false, NULL, NULL,				\
51737-	/* prevbound */ false, NULL, NULL);				\
51738-    return result;							\
51739-}									\
51740-a_attr a_type *								\
51741-a_prefix##nsearch_filtered(a_rbt_type *rbtree, const a_type *key,	\
51742-  bool (*filter_node)(void *, a_type *),				\
51743-  bool (*filter_subtree)(void *, a_type *),				\
51744-  void *filter_ctx) {							\
51745-    a_type *node_candidate;						\
51746-    a_type *subtree_candidate;						\
51747-    a_type *result = a_prefix##search_with_filter_bounds(rbtree, key,	\
51748-	filter_node, filter_subtree, filter_ctx,			\
51749-	/* include_subtree */ true,					\
51750-	/* nextbound */ true, &node_candidate, &subtree_candidate,	\
51751-	/* prevbound */ false, NULL, NULL);				\
51752-    if (result != NULL) {						\
51753-	return result;							\
51754-    }									\
51755-    if (node_candidate != NULL) {					\
51756-	return node_candidate;						\
51757-    }									\
51758-    if (subtree_candidate != NULL) {					\
51759-	return a_prefix##first_filtered_from_node(			\
51760-	    subtree_candidate, filter_node, filter_subtree,		\
51761-	    filter_ctx);						\
51762-    }									\
51763-    return NULL;							\
51764-}									\
51765-a_attr a_type *								\
51766-a_prefix##psearch_filtered(a_rbt_type *rbtree, const a_type *key,	\
51767-  bool (*filter_node)(void *, a_type *),				\
51768-  bool (*filter_subtree)(void *, a_type *),				\
51769-  void *filter_ctx) {							\
51770-    a_type *node_candidate;						\
51771-    a_type *subtree_candidate;						\
51772-    a_type *result = a_prefix##search_with_filter_bounds(rbtree, key,	\
51773-	filter_node, filter_subtree, filter_ctx,			\
51774-	/* include_subtree */ true,					\
51775-	/* nextbound */ false, NULL, NULL,				\
51776-	/* prevbound */ true, &node_candidate, &subtree_candidate);	\
51777-    if (result != NULL) {						\
51778-	return result;							\
51779-    }									\
51780-    if (node_candidate != NULL) {					\
51781-	return node_candidate;						\
51782-    }									\
51783-    if (subtree_candidate != NULL) {					\
51784-	return a_prefix##last_filtered_from_node(			\
51785-	    subtree_candidate, filter_node, filter_subtree,		\
51786-	    filter_ctx);						\
51787-    }									\
51788-    return NULL;							\
51789-}									\
51790-a_attr a_type *								\
51791-a_prefix##iter_recurse_filtered(a_rbt_type *rbtree, a_type *node,	\
51792-  a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg,		\
51793-  bool (*filter_node)(void *, a_type *),				\
51794-  bool (*filter_subtree)(void *, a_type *),				\
51795-  void *filter_ctx) {							\
51796-    if (node == NULL || !filter_subtree(filter_ctx, node)) {		\
51797-	return NULL;							\
51798-    }									\
51799-    a_type *ret;							\
51800-    a_type *left = rbtn_left_get(a_type, a_field, node);		\
51801-    a_type *right = rbtn_right_get(a_type, a_field, node);		\
51802-    ret = a_prefix##iter_recurse_filtered(rbtree, left, cb, arg,	\
51803-      filter_node, filter_subtree, filter_ctx);				\
51804-    if (ret != NULL) {							\
51805-	return ret;							\
51806-    }									\
51807-    if (filter_node(filter_ctx, node)) {				\
51808-	ret = cb(rbtree, node, arg);					\
51809-    }									\
51810-    if (ret != NULL) {							\
51811-	return ret;							\
51812-    }									\
51813-    return a_prefix##iter_recurse_filtered(rbtree, right, cb, arg,	\
51814-      filter_node, filter_subtree, filter_ctx);				\
51815-}									\
51816-a_attr a_type *								\
51817-a_prefix##iter_start_filtered(a_rbt_type *rbtree, a_type *start,	\
51818-  a_type *node, a_type *(*cb)(a_rbt_type *, a_type *, void *),		\
51819-  void *arg, bool (*filter_node)(void *, a_type *),			\
51820-  bool (*filter_subtree)(void *, a_type *),				\
51821-  void *filter_ctx) {							\
51822-    if (!filter_subtree(filter_ctx, node)) {				\
51823-	return NULL;							\
51824-    }									\
51825-    int cmp = a_cmp(start, node);					\
51826-    a_type *ret;							\
51827-    a_type *left = rbtn_left_get(a_type, a_field, node);		\
51828-    a_type *right = rbtn_right_get(a_type, a_field, node);		\
51829-    if (cmp < 0) {							\
51830-	ret = a_prefix##iter_start_filtered(rbtree, start, left, cb,	\
51831-	    arg, filter_node, filter_subtree, filter_ctx);		\
51832-	if (ret != NULL) {						\
51833-	    return ret;							\
51834-	}								\
51835-	if (filter_node(filter_ctx, node)) {				\
51836-	    ret = cb(rbtree, node, arg);				\
51837-	    if (ret != NULL) {						\
51838-		return ret;						\
51839-	    }								\
51840-	}								\
51841-	return a_prefix##iter_recurse_filtered(rbtree, right, cb, arg,	\
51842-	    filter_node, filter_subtree, filter_ctx);			\
51843-    } else if (cmp > 0) {						\
51844-	return a_prefix##iter_start_filtered(rbtree, start, right,	\
51845-	  cb, arg, filter_node, filter_subtree, filter_ctx);		\
51846-    } else {								\
51847-	if (filter_node(filter_ctx, node)) {				\
51848-	    ret = cb(rbtree, node, arg);				\
51849-	    if (ret != NULL) {						\
51850-		return ret;						\
51851-	    }								\
51852-	}								\
51853-	return a_prefix##iter_recurse_filtered(rbtree, right, cb, arg,	\
51854-	  filter_node, filter_subtree, filter_ctx);			\
51855-    }									\
51856-}									\
51857-a_attr a_type *								\
51858-a_prefix##iter_filtered(a_rbt_type *rbtree, a_type *start,		\
51859-  a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg,		\
51860-  bool (*filter_node)(void *, a_type *),				\
51861-  bool (*filter_subtree)(void *, a_type *),				\
51862-  void *filter_ctx) {							\
51863-    a_type *ret;							\
51864-    if (start != NULL) {						\
51865-	ret = a_prefix##iter_start_filtered(rbtree, start,		\
51866-	    rbtree->rbt_root, cb, arg, filter_node, filter_subtree,	\
51867-	    filter_ctx);						\
51868-    } else {								\
51869-	ret = a_prefix##iter_recurse_filtered(rbtree, rbtree->rbt_root,	\
51870-	    cb, arg, filter_node, filter_subtree, filter_ctx);		\
51871-    }									\
51872-    return ret;								\
51873-}									\
51874-a_attr a_type *								\
51875-a_prefix##reverse_iter_recurse_filtered(a_rbt_type *rbtree,		\
51876-  a_type *node, a_type *(*cb)(a_rbt_type *, a_type *, void *),		\
51877-  void *arg,								\
51878-  bool (*filter_node)(void *, a_type *),				\
51879-  bool (*filter_subtree)(void *, a_type *),				\
51880-  void *filter_ctx) {							\
51881-    if (node == NULL || !filter_subtree(filter_ctx, node)) {		\
51882-	return NULL;							\
51883-    }									\
51884-    a_type *ret;							\
51885-    a_type *left = rbtn_left_get(a_type, a_field, node);		\
51886-    a_type *right = rbtn_right_get(a_type, a_field, node);		\
51887-    ret = a_prefix##reverse_iter_recurse_filtered(rbtree, right, cb,	\
51888-	arg, filter_node, filter_subtree, filter_ctx);			\
51889-    if (ret != NULL) {							\
51890-	return ret;							\
51891-    }									\
51892-    if (filter_node(filter_ctx, node)) {				\
51893-	ret = cb(rbtree, node, arg);					\
51894-    }									\
51895-    if (ret != NULL) {							\
51896-	return ret;							\
51897-    }									\
51898-    return a_prefix##reverse_iter_recurse_filtered(rbtree, left, cb,	\
51899-      arg, filter_node, filter_subtree, filter_ctx);			\
51900-}									\
51901-a_attr a_type *								\
51902-a_prefix##reverse_iter_start_filtered(a_rbt_type *rbtree, a_type *start,\
51903-  a_type *node, a_type *(*cb)(a_rbt_type *, a_type *, void *),		\
51904-  void *arg, bool (*filter_node)(void *, a_type *),			\
51905-  bool (*filter_subtree)(void *, a_type *),				\
51906-  void *filter_ctx) {							\
51907-    if (!filter_subtree(filter_ctx, node)) {				\
51908-	return NULL;							\
51909-    }									\
51910-    int cmp = a_cmp(start, node);					\
51911-    a_type *ret;							\
51912-    a_type *left = rbtn_left_get(a_type, a_field, node);		\
51913-    a_type *right = rbtn_right_get(a_type, a_field, node);		\
51914-    if (cmp > 0) {							\
51915-	ret = a_prefix##reverse_iter_start_filtered(rbtree, start,	\
51916-	    right, cb, arg, filter_node, filter_subtree, filter_ctx);	\
51917-	if (ret != NULL) {						\
51918-	    return ret;							\
51919-	}								\
51920-	if (filter_node(filter_ctx, node)) {				\
51921-	    ret = cb(rbtree, node, arg);				\
51922-	    if (ret != NULL) {						\
51923-		return ret;						\
51924-	    }								\
51925-	}								\
51926-	return a_prefix##reverse_iter_recurse_filtered(rbtree, left, cb,\
51927-	    arg, filter_node, filter_subtree, filter_ctx);		\
51928-    } else if (cmp < 0) {						\
51929-	return a_prefix##reverse_iter_start_filtered(rbtree, start,	\
51930-	  left, cb, arg, filter_node, filter_subtree, filter_ctx);	\
51931-    } else {								\
51932-	if (filter_node(filter_ctx, node)) {				\
51933-	    ret = cb(rbtree, node, arg);				\
51934-	    if (ret != NULL) {						\
51935-		return ret;						\
51936-	    }								\
51937-	}								\
51938-	return a_prefix##reverse_iter_recurse_filtered(rbtree, left, cb,\
51939-	  arg, filter_node, filter_subtree, filter_ctx);		\
51940-    }									\
51941-}									\
51942-a_attr a_type *								\
51943-a_prefix##reverse_iter_filtered(a_rbt_type *rbtree, a_type *start,	\
51944-  a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg,		\
51945-  bool (*filter_node)(void *, a_type *),				\
51946-  bool (*filter_subtree)(void *, a_type *),				\
51947-  void *filter_ctx) {							\
51948-    a_type *ret;							\
51949-    if (start != NULL) {						\
51950-	ret = a_prefix##reverse_iter_start_filtered(rbtree, start,	\
51951-	    rbtree->rbt_root, cb, arg, filter_node, filter_subtree,	\
51952-	    filter_ctx);						\
51953-    } else {								\
51954-	ret = a_prefix##reverse_iter_recurse_filtered(rbtree,		\
51955-	    rbtree->rbt_root, cb, arg, filter_node, filter_subtree,	\
51956-	    filter_ctx);						\
51957-    }									\
51958-    return ret;								\
51959-}									\
51960-) /* end rb_summarized_only */
51961-
51962-#endif /* JEMALLOC_INTERNAL_RB_H */
51963diff --git a/jemalloc/include/jemalloc/internal/rtree.h b/jemalloc/include/jemalloc/internal/rtree.h
51964deleted file mode 100644
51965index a00adb2..0000000
51966--- a/jemalloc/include/jemalloc/internal/rtree.h
51967+++ /dev/null
51968@@ -1,554 +0,0 @@
51969-#ifndef JEMALLOC_INTERNAL_RTREE_H
51970-#define JEMALLOC_INTERNAL_RTREE_H
51971-
51972-#include "jemalloc/internal/atomic.h"
51973-#include "jemalloc/internal/mutex.h"
51974-#include "jemalloc/internal/rtree_tsd.h"
51975-#include "jemalloc/internal/sc.h"
51976-#include "jemalloc/internal/tsd.h"
51977-
51978-/*
51979- * This radix tree implementation is tailored to the singular purpose of
51980- * associating metadata with extents that are currently owned by jemalloc.
51981- *
51982- *******************************************************************************
51983- */
51984-
51985-/* Number of high insignificant bits. */
51986-#define RTREE_NHIB ((1U << (LG_SIZEOF_PTR+3)) - LG_VADDR)
51987-/* Number of low insigificant bits. */
51988-#define RTREE_NLIB LG_PAGE
51989-/* Number of significant bits. */
51990-#define RTREE_NSB (LG_VADDR - RTREE_NLIB)
51991-/* Number of levels in radix tree. */
51992-#if RTREE_NSB <= 10
51993-#  define RTREE_HEIGHT 1
51994-#elif RTREE_NSB <= 36
51995-#  define RTREE_HEIGHT 2
51996-#elif RTREE_NSB <= 52
51997-#  define RTREE_HEIGHT 3
51998-#else
51999-#  error Unsupported number of significant virtual address bits
52000-#endif
52001-/* Use compact leaf representation if virtual address encoding allows. */
52002-#if RTREE_NHIB >= LG_CEIL(SC_NSIZES)
52003-#  define RTREE_LEAF_COMPACT
52004-#endif
52005-
52006-typedef struct rtree_node_elm_s rtree_node_elm_t;
52007-struct rtree_node_elm_s {
52008-	atomic_p_t	child; /* (rtree_{node,leaf}_elm_t *) */
52009-};
52010-
52011-typedef struct rtree_metadata_s rtree_metadata_t;
52012-struct rtree_metadata_s {
52013-	szind_t szind;
52014-	extent_state_t state; /* Mirrors edata->state. */
52015-	bool is_head; /* Mirrors edata->is_head. */
52016-	bool slab;
52017-};
52018-
52019-typedef struct rtree_contents_s rtree_contents_t;
52020-struct rtree_contents_s {
52021-	edata_t *edata;
52022-	rtree_metadata_t metadata;
52023-};
52024-
52025-#define RTREE_LEAF_STATE_WIDTH EDATA_BITS_STATE_WIDTH
52026-#define RTREE_LEAF_STATE_SHIFT 2
52027-#define RTREE_LEAF_STATE_MASK MASK(RTREE_LEAF_STATE_WIDTH, RTREE_LEAF_STATE_SHIFT)
52028-
52029-struct rtree_leaf_elm_s {
52030-#ifdef RTREE_LEAF_COMPACT
52031-	/*
52032-	 * Single pointer-width field containing all three leaf element fields.
52033-	 * For example, on a 64-bit x64 system with 48 significant virtual
52034-	 * memory address bits, the index, edata, and slab fields are packed as
52035-	 * such:
52036-	 *
52037-	 * x: index
52038-	 * e: edata
52039-	 * s: state
52040-	 * h: is_head
52041-	 * b: slab
52042-	 *
52043-	 *   00000000 xxxxxxxx eeeeeeee [...] eeeeeeee e00ssshb
52044-	 */
52045-	atomic_p_t	le_bits;
52046-#else
52047-	atomic_p_t	le_edata; /* (edata_t *) */
52048-	/*
52049-	 * From high to low bits: szind (8 bits), state (4 bits), is_head, slab
52050-	 */
52051-	atomic_u_t	le_metadata;
52052-#endif
52053-};
52054-
52055-typedef struct rtree_level_s rtree_level_t;
52056-struct rtree_level_s {
52057-	/* Number of key bits distinguished by this level. */
52058-	unsigned		bits;
52059-	/*
52060-	 * Cumulative number of key bits distinguished by traversing to
52061-	 * corresponding tree level.
52062-	 */
52063-	unsigned		cumbits;
52064-};
52065-
52066-typedef struct rtree_s rtree_t;
52067-struct rtree_s {
52068-	base_t			*base;
52069-	malloc_mutex_t		init_lock;
52070-	/* Number of elements based on rtree_levels[0].bits. */
52071-#if RTREE_HEIGHT > 1
52072-	rtree_node_elm_t	root[1U << (RTREE_NSB/RTREE_HEIGHT)];
52073-#else
52074-	rtree_leaf_elm_t	root[1U << (RTREE_NSB/RTREE_HEIGHT)];
52075-#endif
52076-};
52077-
52078-/*
52079- * Split the bits into one to three partitions depending on number of
52080- * significant bits.  It the number of bits does not divide evenly into the
52081- * number of levels, place one remainder bit per level starting at the leaf
52082- * level.
52083- */
52084-static const rtree_level_t rtree_levels[] = {
52085-#if RTREE_HEIGHT == 1
52086-	{RTREE_NSB, RTREE_NHIB + RTREE_NSB}
52087-#elif RTREE_HEIGHT == 2
52088-	{RTREE_NSB/2, RTREE_NHIB + RTREE_NSB/2},
52089-	{RTREE_NSB/2 + RTREE_NSB%2, RTREE_NHIB + RTREE_NSB}
52090-#elif RTREE_HEIGHT == 3
52091-	{RTREE_NSB/3, RTREE_NHIB + RTREE_NSB/3},
52092-	{RTREE_NSB/3 + RTREE_NSB%3/2,
52093-	    RTREE_NHIB + RTREE_NSB/3*2 + RTREE_NSB%3/2},
52094-	{RTREE_NSB/3 + RTREE_NSB%3 - RTREE_NSB%3/2, RTREE_NHIB + RTREE_NSB}
52095-#else
52096-#  error Unsupported rtree height
52097-#endif
52098-};
52099-
52100-bool rtree_new(rtree_t *rtree, base_t *base, bool zeroed);
52101-
52102-rtree_leaf_elm_t *rtree_leaf_elm_lookup_hard(tsdn_t *tsdn, rtree_t *rtree,
52103-    rtree_ctx_t *rtree_ctx, uintptr_t key, bool dependent, bool init_missing);
52104-
52105-JEMALLOC_ALWAYS_INLINE unsigned
52106-rtree_leaf_maskbits(void) {
52107-	unsigned ptrbits = ZU(1) << (LG_SIZEOF_PTR+3);
52108-	unsigned cumbits = (rtree_levels[RTREE_HEIGHT-1].cumbits -
52109-	    rtree_levels[RTREE_HEIGHT-1].bits);
52110-	return ptrbits - cumbits;
52111-}
52112-
52113-JEMALLOC_ALWAYS_INLINE uintptr_t
52114-rtree_leafkey(uintptr_t key) {
52115-	uintptr_t mask = ~((ZU(1) << rtree_leaf_maskbits()) - 1);
52116-	return (key & mask);
52117-}
52118-
52119-JEMALLOC_ALWAYS_INLINE size_t
52120-rtree_cache_direct_map(uintptr_t key) {
52121-	return (size_t)((key >> rtree_leaf_maskbits()) &
52122-	    (RTREE_CTX_NCACHE - 1));
52123-}
52124-
52125-JEMALLOC_ALWAYS_INLINE uintptr_t
52126-rtree_subkey(uintptr_t key, unsigned level) {
52127-	unsigned ptrbits = ZU(1) << (LG_SIZEOF_PTR+3);
52128-	unsigned cumbits = rtree_levels[level].cumbits;
52129-	unsigned shiftbits = ptrbits - cumbits;
52130-	unsigned maskbits = rtree_levels[level].bits;
52131-	uintptr_t mask = (ZU(1) << maskbits) - 1;
52132-	return ((key >> shiftbits) & mask);
52133-}
52134-
52135-/*
52136- * Atomic getters.
52137- *
52138- * dependent: Reading a value on behalf of a pointer to a valid allocation
52139- *            is guaranteed to be a clean read even without synchronization,
52140- *            because the rtree update became visible in memory before the
52141- *            pointer came into existence.
52142- * !dependent: An arbitrary read, e.g. on behalf of ivsalloc(), may not be
52143- *             dependent on a previous rtree write, which means a stale read
52144- *             could result if synchronization were omitted here.
52145- */
52146-#  ifdef RTREE_LEAF_COMPACT
52147-JEMALLOC_ALWAYS_INLINE uintptr_t
52148-rtree_leaf_elm_bits_read(tsdn_t *tsdn, rtree_t *rtree,
52149-    rtree_leaf_elm_t *elm, bool dependent) {
52150-	return (uintptr_t)atomic_load_p(&elm->le_bits, dependent
52151-	    ? ATOMIC_RELAXED : ATOMIC_ACQUIRE);
52152-}
52153-
52154-JEMALLOC_ALWAYS_INLINE uintptr_t
52155-rtree_leaf_elm_bits_encode(rtree_contents_t contents) {
52156-	assert((uintptr_t)contents.edata % (uintptr_t)EDATA_ALIGNMENT == 0);
52157-	uintptr_t edata_bits = (uintptr_t)contents.edata
52158-	    & (((uintptr_t)1 << LG_VADDR) - 1);
52159-
52160-	uintptr_t szind_bits = (uintptr_t)contents.metadata.szind << LG_VADDR;
52161-	uintptr_t slab_bits = (uintptr_t)contents.metadata.slab;
52162-	uintptr_t is_head_bits = (uintptr_t)contents.metadata.is_head << 1;
52163-	uintptr_t state_bits = (uintptr_t)contents.metadata.state <<
52164-	    RTREE_LEAF_STATE_SHIFT;
52165-	uintptr_t metadata_bits = szind_bits | state_bits | is_head_bits |
52166-	    slab_bits;
52167-	assert((edata_bits & metadata_bits) == 0);
52168-
52169-	return edata_bits | metadata_bits;
52170-}
52171-
52172-JEMALLOC_ALWAYS_INLINE rtree_contents_t
52173-rtree_leaf_elm_bits_decode(uintptr_t bits) {
52174-	rtree_contents_t contents;
52175-	/* Do the easy things first. */
52176-	contents.metadata.szind = bits >> LG_VADDR;
52177-	contents.metadata.slab = (bool)(bits & 1);
52178-	contents.metadata.is_head = (bool)(bits & (1 << 1));
52179-
52180-	uintptr_t state_bits = (bits & RTREE_LEAF_STATE_MASK) >>
52181-	    RTREE_LEAF_STATE_SHIFT;
52182-	assert(state_bits <= extent_state_max);
52183-	contents.metadata.state = (extent_state_t)state_bits;
52184-
52185-	uintptr_t low_bit_mask = ~((uintptr_t)EDATA_ALIGNMENT - 1);
52186-#    ifdef __aarch64__
52187-	/*
52188-	 * aarch64 doesn't sign extend the highest virtual address bit to set
52189-	 * the higher ones.  Instead, the high bits get zeroed.
52190-	 */
52191-	uintptr_t high_bit_mask = ((uintptr_t)1 << LG_VADDR) - 1;
52192-	/* Mask off metadata. */
52193-	uintptr_t mask = high_bit_mask & low_bit_mask;
52194-	contents.edata = (edata_t *)(bits & mask);
52195-#    else
52196-	/* Restore sign-extended high bits, mask metadata bits. */
52197-	contents.edata = (edata_t *)((uintptr_t)((intptr_t)(bits << RTREE_NHIB)
52198-	    >> RTREE_NHIB) & low_bit_mask);
52199-#    endif
52200-	assert((uintptr_t)contents.edata % (uintptr_t)EDATA_ALIGNMENT == 0);
52201-	return contents;
52202-}
52203-
52204-#  endif /* RTREE_LEAF_COMPACT */
52205-
52206-JEMALLOC_ALWAYS_INLINE rtree_contents_t
52207-rtree_leaf_elm_read(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm,
52208-    bool dependent) {
52209-#ifdef RTREE_LEAF_COMPACT
52210-	uintptr_t bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm, dependent);
52211-	rtree_contents_t contents = rtree_leaf_elm_bits_decode(bits);
52212-	return contents;
52213-#else
52214-	rtree_contents_t contents;
52215-	unsigned metadata_bits = atomic_load_u(&elm->le_metadata, dependent
52216-	    ? ATOMIC_RELAXED : ATOMIC_ACQUIRE);
52217-	contents.metadata.slab = (bool)(metadata_bits & 1);
52218-	contents.metadata.is_head = (bool)(metadata_bits & (1 << 1));
52219-
52220-	uintptr_t state_bits = (metadata_bits & RTREE_LEAF_STATE_MASK) >>
52221-	    RTREE_LEAF_STATE_SHIFT;
52222-	assert(state_bits <= extent_state_max);
52223-	contents.metadata.state = (extent_state_t)state_bits;
52224-	contents.metadata.szind = metadata_bits >> (RTREE_LEAF_STATE_SHIFT +
52225-	    RTREE_LEAF_STATE_WIDTH);
52226-
52227-	contents.edata = (edata_t *)atomic_load_p(&elm->le_edata, dependent
52228-	    ? ATOMIC_RELAXED : ATOMIC_ACQUIRE);
52229-
52230-	return contents;
52231-#endif
52232-}
52233-
52234-JEMALLOC_ALWAYS_INLINE void
52235-rtree_contents_encode(rtree_contents_t contents, void **bits,
52236-    unsigned *additional) {
52237-#ifdef RTREE_LEAF_COMPACT
52238-	*bits = (void *)rtree_leaf_elm_bits_encode(contents);
52239-#else
52240-	*additional = (unsigned)contents.metadata.slab
52241-	    | ((unsigned)contents.metadata.is_head << 1)
52242-	    | ((unsigned)contents.metadata.state << RTREE_LEAF_STATE_SHIFT)
52243-	    | ((unsigned)contents.metadata.szind << (RTREE_LEAF_STATE_SHIFT +
52244-	    RTREE_LEAF_STATE_WIDTH));
52245-	*bits = contents.edata;
52246-#endif
52247-}
52248-
52249-JEMALLOC_ALWAYS_INLINE void
52250-rtree_leaf_elm_write_commit(tsdn_t *tsdn, rtree_t *rtree,
52251-    rtree_leaf_elm_t *elm, void *bits, unsigned additional) {
52252-#ifdef RTREE_LEAF_COMPACT
52253-	atomic_store_p(&elm->le_bits, bits, ATOMIC_RELEASE);
52254-#else
52255-	atomic_store_u(&elm->le_metadata, additional, ATOMIC_RELEASE);
52256-	/*
52257-	 * Write edata last, since the element is atomically considered valid
52258-	 * as soon as the edata field is non-NULL.
52259-	 */
52260-	atomic_store_p(&elm->le_edata, bits, ATOMIC_RELEASE);
52261-#endif
52262-}
52263-
52264-JEMALLOC_ALWAYS_INLINE void
52265-rtree_leaf_elm_write(tsdn_t *tsdn, rtree_t *rtree,
52266-    rtree_leaf_elm_t *elm, rtree_contents_t contents) {
52267-	assert((uintptr_t)contents.edata % EDATA_ALIGNMENT == 0);
52268-	void *bits;
52269-	unsigned additional;
52270-
52271-	rtree_contents_encode(contents, &bits, &additional);
52272-	rtree_leaf_elm_write_commit(tsdn, rtree, elm, bits, additional);
52273-}
52274-
52275-/* The state field can be updated independently (and more frequently). */
52276-JEMALLOC_ALWAYS_INLINE void
52277-rtree_leaf_elm_state_update(tsdn_t *tsdn, rtree_t *rtree,
52278-    rtree_leaf_elm_t *elm1, rtree_leaf_elm_t *elm2, extent_state_t state) {
52279-	assert(elm1 != NULL);
52280-#ifdef RTREE_LEAF_COMPACT
52281-	uintptr_t bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm1,
52282-	    /* dependent */ true);
52283-	bits &= ~RTREE_LEAF_STATE_MASK;
52284-	bits |= state << RTREE_LEAF_STATE_SHIFT;
52285-	atomic_store_p(&elm1->le_bits, (void *)bits, ATOMIC_RELEASE);
52286-	if (elm2 != NULL) {
52287-		atomic_store_p(&elm2->le_bits, (void *)bits, ATOMIC_RELEASE);
52288-	}
52289-#else
52290-	unsigned bits = atomic_load_u(&elm1->le_metadata, ATOMIC_RELAXED);
52291-	bits &= ~RTREE_LEAF_STATE_MASK;
52292-	bits |= state << RTREE_LEAF_STATE_SHIFT;
52293-	atomic_store_u(&elm1->le_metadata, bits, ATOMIC_RELEASE);
52294-	if (elm2 != NULL) {
52295-		atomic_store_u(&elm2->le_metadata, bits, ATOMIC_RELEASE);
52296-	}
52297-#endif
52298-}
52299-
52300-/*
52301- * Tries to look up the key in the L1 cache, returning false if there's a hit, or
52302- * true if there's a miss.
52303- * Key is allowed to be NULL; returns true in this case.
52304- */
52305-JEMALLOC_ALWAYS_INLINE bool
52306-rtree_leaf_elm_lookup_fast(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
52307-    uintptr_t key, rtree_leaf_elm_t **elm) {
52308-	size_t slot = rtree_cache_direct_map(key);
52309-	uintptr_t leafkey = rtree_leafkey(key);
52310-	assert(leafkey != RTREE_LEAFKEY_INVALID);
52311-
52312-	if (unlikely(rtree_ctx->cache[slot].leafkey != leafkey)) {
52313-		return true;
52314-	}
52315-
52316-	rtree_leaf_elm_t *leaf = rtree_ctx->cache[slot].leaf;
52317-	assert(leaf != NULL);
52318-	uintptr_t subkey = rtree_subkey(key, RTREE_HEIGHT-1);
52319-	*elm = &leaf[subkey];
52320-
52321-	return false;
52322-}
52323-
52324-JEMALLOC_ALWAYS_INLINE rtree_leaf_elm_t *
52325-rtree_leaf_elm_lookup(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
52326-    uintptr_t key, bool dependent, bool init_missing) {
52327-	assert(key != 0);
52328-	assert(!dependent || !init_missing);
52329-
52330-	size_t slot = rtree_cache_direct_map(key);
52331-	uintptr_t leafkey = rtree_leafkey(key);
52332-	assert(leafkey != RTREE_LEAFKEY_INVALID);
52333-
52334-	/* Fast path: L1 direct mapped cache. */
52335-	if (likely(rtree_ctx->cache[slot].leafkey == leafkey)) {
52336-		rtree_leaf_elm_t *leaf = rtree_ctx->cache[slot].leaf;
52337-		assert(leaf != NULL);
52338-		uintptr_t subkey = rtree_subkey(key, RTREE_HEIGHT-1);
52339-		return &leaf[subkey];
52340-	}
52341-	/*
52342-	 * Search the L2 LRU cache.  On hit, swap the matching element into the
52343-	 * slot in L1 cache, and move the position in L2 up by 1.
52344-	 */
52345-#define RTREE_CACHE_CHECK_L2(i) do {					\
52346-	if (likely(rtree_ctx->l2_cache[i].leafkey == leafkey)) {	\
52347-		rtree_leaf_elm_t *leaf = rtree_ctx->l2_cache[i].leaf;	\
52348-		assert(leaf != NULL);					\
52349-		if (i > 0) {						\
52350-			/* Bubble up by one. */				\
52351-			rtree_ctx->l2_cache[i].leafkey =		\
52352-				rtree_ctx->l2_cache[i - 1].leafkey;	\
52353-			rtree_ctx->l2_cache[i].leaf =			\
52354-				rtree_ctx->l2_cache[i - 1].leaf;	\
52355-			rtree_ctx->l2_cache[i - 1].leafkey =		\
52356-			    rtree_ctx->cache[slot].leafkey;		\
52357-			rtree_ctx->l2_cache[i - 1].leaf =		\
52358-			    rtree_ctx->cache[slot].leaf;		\
52359-		} else {						\
52360-			rtree_ctx->l2_cache[0].leafkey =		\
52361-			    rtree_ctx->cache[slot].leafkey;		\
52362-			rtree_ctx->l2_cache[0].leaf =			\
52363-			    rtree_ctx->cache[slot].leaf;		\
52364-		}							\
52365-		rtree_ctx->cache[slot].leafkey = leafkey;		\
52366-		rtree_ctx->cache[slot].leaf = leaf;			\
52367-		uintptr_t subkey = rtree_subkey(key, RTREE_HEIGHT-1);	\
52368-		return &leaf[subkey];					\
52369-	}								\
52370-} while (0)
52371-	/* Check the first cache entry. */
52372-	RTREE_CACHE_CHECK_L2(0);
52373-	/* Search the remaining cache elements. */
52374-	for (unsigned i = 1; i < RTREE_CTX_NCACHE_L2; i++) {
52375-		RTREE_CACHE_CHECK_L2(i);
52376-	}
52377-#undef RTREE_CACHE_CHECK_L2
52378-
52379-	return rtree_leaf_elm_lookup_hard(tsdn, rtree, rtree_ctx, key,
52380-	    dependent, init_missing);
52381-}
52382-
52383-/*
52384- * Returns true on lookup failure.
52385- */
52386-static inline bool
52387-rtree_read_independent(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
52388-    uintptr_t key, rtree_contents_t *r_contents) {
52389-	rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, rtree, rtree_ctx,
52390-	    key, /* dependent */ false, /* init_missing */ false);
52391-	if (elm == NULL) {
52392-		return true;
52393-	}
52394-	*r_contents = rtree_leaf_elm_read(tsdn, rtree, elm,
52395-	    /* dependent */ false);
52396-	return false;
52397-}
52398-
52399-static inline rtree_contents_t
52400-rtree_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
52401-    uintptr_t key) {
52402-	rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, rtree, rtree_ctx,
52403-	    key, /* dependent */ true, /* init_missing */ false);
52404-	assert(elm != NULL);
52405-	return rtree_leaf_elm_read(tsdn, rtree, elm, /* dependent */ true);
52406-}
52407-
52408-static inline rtree_metadata_t
52409-rtree_metadata_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
52410-    uintptr_t key) {
52411-	rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, rtree, rtree_ctx,
52412-	    key, /* dependent */ true, /* init_missing */ false);
52413-	assert(elm != NULL);
52414-	return rtree_leaf_elm_read(tsdn, rtree, elm,
52415-	    /* dependent */ true).metadata;
52416-}
52417-
52418-/*
52419- * Returns true when the request cannot be fulfilled by fastpath.
52420- */
52421-static inline bool
52422-rtree_metadata_try_read_fast(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
52423-    uintptr_t key, rtree_metadata_t *r_rtree_metadata) {
52424-	rtree_leaf_elm_t *elm;
52425-	/*
52426-	 * Should check the bool return value (lookup success or not) instead of
52427-	 * elm == NULL (which will result in an extra branch).  This is because
52428-	 * when the cache lookup succeeds, there will never be a NULL pointer
52429-	 * returned (which is unknown to the compiler).
52430-	 */
52431-	if (rtree_leaf_elm_lookup_fast(tsdn, rtree, rtree_ctx, key, &elm)) {
52432-		return true;
52433-	}
52434-	assert(elm != NULL);
52435-	*r_rtree_metadata = rtree_leaf_elm_read(tsdn, rtree, elm,
52436-	    /* dependent */ true).metadata;
52437-	return false;
52438-}
52439-
52440-JEMALLOC_ALWAYS_INLINE void
52441-rtree_write_range_impl(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
52442-    uintptr_t base, uintptr_t end, rtree_contents_t contents, bool clearing) {
52443-	assert((base & PAGE_MASK) == 0 && (end & PAGE_MASK) == 0);
52444-	/*
52445-	 * Only used for emap_(de)register_interior, which implies the
52446-	 * boundaries have been registered already.  Therefore all the lookups
52447-	 * are dependent w/o init_missing, assuming the range spans across at
52448-	 * most 2 rtree leaf nodes (each covers 1 GiB of vaddr).
52449-	 */
52450-	void *bits;
52451-	unsigned additional;
52452-	rtree_contents_encode(contents, &bits, &additional);
52453-
52454-	rtree_leaf_elm_t *elm = NULL; /* Dead store. */
52455-	for (uintptr_t addr = base; addr <= end; addr += PAGE) {
52456-		if (addr == base ||
52457-		    (addr & ((ZU(1) << rtree_leaf_maskbits()) - 1)) == 0) {
52458-			elm = rtree_leaf_elm_lookup(tsdn, rtree, rtree_ctx, addr,
52459-			    /* dependent */ true, /* init_missing */ false);
52460-			assert(elm != NULL);
52461-		}
52462-		assert(elm == rtree_leaf_elm_lookup(tsdn, rtree, rtree_ctx, addr,
52463-		    /* dependent */ true, /* init_missing */ false));
52464-		assert(!clearing || rtree_leaf_elm_read(tsdn, rtree, elm,
52465-		    /* dependent */ true).edata != NULL);
52466-		rtree_leaf_elm_write_commit(tsdn, rtree, elm, bits, additional);
52467-		elm++;
52468-	}
52469-}
52470-
52471-JEMALLOC_ALWAYS_INLINE void
52472-rtree_write_range(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
52473-    uintptr_t base, uintptr_t end, rtree_contents_t contents) {
52474-	rtree_write_range_impl(tsdn, rtree, rtree_ctx, base, end, contents,
52475-	    /* clearing */ false);
52476-}
52477-
52478-JEMALLOC_ALWAYS_INLINE bool
52479-rtree_write(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, uintptr_t key,
52480-    rtree_contents_t contents) {
52481-	rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, rtree, rtree_ctx,
52482-	    key, /* dependent */ false, /* init_missing */ true);
52483-	if (elm == NULL) {
52484-		return true;
52485-	}
52486-
52487-	rtree_leaf_elm_write(tsdn, rtree, elm, contents);
52488-
52489-	return false;
52490-}
52491-
52492-static inline void
52493-rtree_clear(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
52494-    uintptr_t key) {
52495-	rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, rtree, rtree_ctx,
52496-	    key, /* dependent */ true, /* init_missing */ false);
52497-	assert(elm != NULL);
52498-	assert(rtree_leaf_elm_read(tsdn, rtree, elm,
52499-	    /* dependent */ true).edata != NULL);
52500-	rtree_contents_t contents;
52501-	contents.edata = NULL;
52502-	contents.metadata.szind = SC_NSIZES;
52503-	contents.metadata.slab = false;
52504-	contents.metadata.is_head = false;
52505-	contents.metadata.state = (extent_state_t)0;
52506-	rtree_leaf_elm_write(tsdn, rtree, elm, contents);
52507-}
52508-
52509-static inline void
52510-rtree_clear_range(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
52511-    uintptr_t base, uintptr_t end) {
52512-	rtree_contents_t contents;
52513-	contents.edata = NULL;
52514-	contents.metadata.szind = SC_NSIZES;
52515-	contents.metadata.slab = false;
52516-	contents.metadata.is_head = false;
52517-	contents.metadata.state = (extent_state_t)0;
52518-	rtree_write_range_impl(tsdn, rtree, rtree_ctx, base, end, contents,
52519-	    /* clearing */ true);
52520-}
52521-
52522-#endif /* JEMALLOC_INTERNAL_RTREE_H */
52523diff --git a/jemalloc/include/jemalloc/internal/rtree_tsd.h b/jemalloc/include/jemalloc/internal/rtree_tsd.h
52524deleted file mode 100644
52525index e45525c..0000000
52526--- a/jemalloc/include/jemalloc/internal/rtree_tsd.h
52527+++ /dev/null
52528@@ -1,62 +0,0 @@
52529-#ifndef JEMALLOC_INTERNAL_RTREE_CTX_H
52530-#define JEMALLOC_INTERNAL_RTREE_CTX_H
52531-
52532-/*
52533- * Number of leafkey/leaf pairs to cache in L1 and L2 level respectively.  Each
52534- * entry supports an entire leaf, so the cache hit rate is typically high even
52535- * with a small number of entries.  In rare cases extent activity will straddle
52536- * the boundary between two leaf nodes.  Furthermore, an arena may use a
52537- * combination of dss and mmap.  Note that as memory usage grows past the amount
52538- * that this cache can directly cover, the cache will become less effective if
52539- * locality of reference is low, but the consequence is merely cache misses
52540- * while traversing the tree nodes.
52541- *
52542- * The L1 direct mapped cache offers consistent and low cost on cache hit.
52543- * However collision could affect hit rate negatively.  This is resolved by
52544- * combining with a L2 LRU cache, which requires linear search and re-ordering
52545- * on access but suffers no collision.  Note that, the cache will itself suffer
52546- * cache misses if made overly large, plus the cost of linear search in the LRU
52547- * cache.
52548- */
52549-#define RTREE_CTX_NCACHE 16
52550-#define RTREE_CTX_NCACHE_L2 8
52551-
52552-/* Needed for initialization only. */
52553-#define RTREE_LEAFKEY_INVALID ((uintptr_t)1)
52554-#define RTREE_CTX_CACHE_ELM_INVALID {RTREE_LEAFKEY_INVALID, NULL}
52555-
52556-#define RTREE_CTX_INIT_ELM_1 RTREE_CTX_CACHE_ELM_INVALID
52557-#define RTREE_CTX_INIT_ELM_2 RTREE_CTX_INIT_ELM_1, RTREE_CTX_INIT_ELM_1
52558-#define RTREE_CTX_INIT_ELM_4 RTREE_CTX_INIT_ELM_2, RTREE_CTX_INIT_ELM_2
52559-#define RTREE_CTX_INIT_ELM_8 RTREE_CTX_INIT_ELM_4, RTREE_CTX_INIT_ELM_4
52560-#define RTREE_CTX_INIT_ELM_16 RTREE_CTX_INIT_ELM_8, RTREE_CTX_INIT_ELM_8
52561-
52562-#define _RTREE_CTX_INIT_ELM_DATA(n) RTREE_CTX_INIT_ELM_##n
52563-#define RTREE_CTX_INIT_ELM_DATA(n) _RTREE_CTX_INIT_ELM_DATA(n)
52564-
52565-/*
52566- * Static initializer (to invalidate the cache entries) is required because the
52567- * free fastpath may access the rtree cache before a full tsd initialization.
52568- */
52569-#define RTREE_CTX_INITIALIZER {{RTREE_CTX_INIT_ELM_DATA(RTREE_CTX_NCACHE)}, \
52570-			       {RTREE_CTX_INIT_ELM_DATA(RTREE_CTX_NCACHE_L2)}}
52571-
52572-typedef struct rtree_leaf_elm_s rtree_leaf_elm_t;
52573-
52574-typedef struct rtree_ctx_cache_elm_s rtree_ctx_cache_elm_t;
52575-struct rtree_ctx_cache_elm_s {
52576-	uintptr_t		leafkey;
52577-	rtree_leaf_elm_t	*leaf;
52578-};
52579-
52580-typedef struct rtree_ctx_s rtree_ctx_t;
52581-struct rtree_ctx_s {
52582-	/* Direct mapped cache. */
52583-	rtree_ctx_cache_elm_t	cache[RTREE_CTX_NCACHE];
52584-	/* L2 LRU cache. */
52585-	rtree_ctx_cache_elm_t	l2_cache[RTREE_CTX_NCACHE_L2];
52586-};
52587-
52588-void rtree_ctx_data_init(rtree_ctx_t *ctx);
52589-
52590-#endif /* JEMALLOC_INTERNAL_RTREE_CTX_H */
52591diff --git a/jemalloc/include/jemalloc/internal/safety_check.h b/jemalloc/include/jemalloc/internal/safety_check.h
52592deleted file mode 100644
52593index f1a74f1..0000000
52594--- a/jemalloc/include/jemalloc/internal/safety_check.h
52595+++ /dev/null
52596@@ -1,31 +0,0 @@
52597-#ifndef JEMALLOC_INTERNAL_SAFETY_CHECK_H
52598-#define JEMALLOC_INTERNAL_SAFETY_CHECK_H
52599-
52600-void safety_check_fail_sized_dealloc(bool current_dealloc, const void *ptr,
52601-    size_t true_size, size_t input_size);
52602-void safety_check_fail(const char *format, ...);
52603-
52604-typedef void (*safety_check_abort_hook_t)(const char *message);
52605-
52606-/* Can set to NULL for a default. */
52607-void safety_check_set_abort(safety_check_abort_hook_t abort_fn);
52608-
52609-JEMALLOC_ALWAYS_INLINE void
52610-safety_check_set_redzone(void *ptr, size_t usize, size_t bumped_usize) {
52611-	assert(usize < bumped_usize);
52612-	for (size_t i = usize; i < bumped_usize && i < usize + 32; ++i) {
52613-		*((unsigned char *)ptr + i) = 0xBC;
52614-	}
52615-}
52616-
52617-JEMALLOC_ALWAYS_INLINE void
52618-safety_check_verify_redzone(const void *ptr, size_t usize, size_t bumped_usize)
52619-{
52620-	for (size_t i = usize; i < bumped_usize && i < usize + 32; ++i) {
52621-		if (unlikely(*((unsigned char *)ptr + i) != 0xBC)) {
52622-			safety_check_fail("Use after free error\n");
52623-		}
52624-	}
52625-}
52626-
52627-#endif /*JEMALLOC_INTERNAL_SAFETY_CHECK_H */
52628diff --git a/jemalloc/include/jemalloc/internal/san.h b/jemalloc/include/jemalloc/internal/san.h
52629deleted file mode 100644
52630index 8813d6b..0000000
52631--- a/jemalloc/include/jemalloc/internal/san.h
52632+++ /dev/null
52633@@ -1,191 +0,0 @@
52634-#ifndef JEMALLOC_INTERNAL_GUARD_H
52635-#define JEMALLOC_INTERNAL_GUARD_H
52636-
52637-#include "jemalloc/internal/ehooks.h"
52638-#include "jemalloc/internal/emap.h"
52639-
52640-#define SAN_PAGE_GUARD PAGE
52641-#define SAN_PAGE_GUARDS_SIZE (SAN_PAGE_GUARD * 2)
52642-
52643-#define SAN_GUARD_LARGE_EVERY_N_EXTENTS_DEFAULT 0
52644-#define SAN_GUARD_SMALL_EVERY_N_EXTENTS_DEFAULT 0
52645-
52646-#define SAN_LG_UAF_ALIGN_DEFAULT (-1)
52647-#define SAN_CACHE_BIN_NONFAST_MASK_DEFAULT (uintptr_t)(-1)
52648-
52649-static const uintptr_t uaf_detect_junk = (uintptr_t)0x5b5b5b5b5b5b5b5bULL;
52650-
52651-/* 0 means disabled, i.e. never guarded. */
52652-extern size_t opt_san_guard_large;
52653-extern size_t opt_san_guard_small;
52654-/* -1 means disabled, i.e. never check for use-after-free. */
52655-extern ssize_t opt_lg_san_uaf_align;
52656-
52657-void san_guard_pages(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
52658-    emap_t *emap, bool left, bool right, bool remap);
52659-void san_unguard_pages(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
52660-    emap_t *emap, bool left, bool right);
52661-/*
52662- * Unguard the extent, but don't modify emap boundaries. Must be called on an
52663- * extent that has been erased from emap and shouldn't be placed back.
52664- */
52665-void san_unguard_pages_pre_destroy(tsdn_t *tsdn, ehooks_t *ehooks,
52666-    edata_t *edata, emap_t *emap);
52667-void san_check_stashed_ptrs(void **ptrs, size_t nstashed, size_t usize);
52668-
52669-void tsd_san_init(tsd_t *tsd);
52670-void san_init(ssize_t lg_san_uaf_align);
52671-
52672-static inline void
52673-san_guard_pages_two_sided(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
52674-    emap_t *emap, bool remap) {
52675-	san_guard_pages(tsdn, ehooks, edata, emap, true, true, remap);
52676-}
52677-
52678-static inline void
52679-san_unguard_pages_two_sided(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
52680-    emap_t *emap) {
52681-	san_unguard_pages(tsdn, ehooks, edata, emap, true, true);
52682-}
52683-
52684-static inline size_t
52685-san_two_side_unguarded_sz(size_t size) {
52686-	assert(size % PAGE == 0);
52687-	assert(size >= SAN_PAGE_GUARDS_SIZE);
52688-	return size - SAN_PAGE_GUARDS_SIZE;
52689-}
52690-
52691-static inline size_t
52692-san_two_side_guarded_sz(size_t size) {
52693-	assert(size % PAGE == 0);
52694-	return size + SAN_PAGE_GUARDS_SIZE;
52695-}
52696-
52697-static inline size_t
52698-san_one_side_unguarded_sz(size_t size) {
52699-	assert(size % PAGE == 0);
52700-	assert(size >= SAN_PAGE_GUARD);
52701-	return size - SAN_PAGE_GUARD;
52702-}
52703-
52704-static inline size_t
52705-san_one_side_guarded_sz(size_t size) {
52706-	assert(size % PAGE == 0);
52707-	return size + SAN_PAGE_GUARD;
52708-}
52709-
52710-static inline bool
52711-san_guard_enabled(void) {
52712-	return (opt_san_guard_large != 0 || opt_san_guard_small != 0);
52713-}
52714-
52715-static inline bool
52716-san_large_extent_decide_guard(tsdn_t *tsdn, ehooks_t *ehooks, size_t size,
52717-    size_t alignment) {
52718-	if (opt_san_guard_large == 0 || ehooks_guard_will_fail(ehooks) ||
52719-	    tsdn_null(tsdn)) {
52720-		return false;
52721-	}
52722-
52723-	tsd_t *tsd = tsdn_tsd(tsdn);
52724-	uint64_t n = tsd_san_extents_until_guard_large_get(tsd);
52725-	assert(n >= 1);
52726-	if (n > 1) {
52727-		/*
52728-		 * Subtract conditionally because the guard may not happen due
52729-		 * to alignment or size restriction below.
52730-		 */
52731-		*tsd_san_extents_until_guard_largep_get(tsd) = n - 1;
52732-	}
52733-
52734-	if (n == 1 && (alignment <= PAGE) &&
52735-	    (san_two_side_guarded_sz(size) <= SC_LARGE_MAXCLASS)) {
52736-		*tsd_san_extents_until_guard_largep_get(tsd) =
52737-		    opt_san_guard_large;
52738-		return true;
52739-	} else {
52740-		assert(tsd_san_extents_until_guard_large_get(tsd) >= 1);
52741-		return false;
52742-	}
52743-}
52744-
52745-static inline bool
52746-san_slab_extent_decide_guard(tsdn_t *tsdn, ehooks_t *ehooks) {
52747-	if (opt_san_guard_small == 0 || ehooks_guard_will_fail(ehooks) ||
52748-	    tsdn_null(tsdn)) {
52749-		return false;
52750-	}
52751-
52752-	tsd_t *tsd = tsdn_tsd(tsdn);
52753-	uint64_t n = tsd_san_extents_until_guard_small_get(tsd);
52754-	assert(n >= 1);
52755-	if (n == 1) {
52756-		*tsd_san_extents_until_guard_smallp_get(tsd) =
52757-		    opt_san_guard_small;
52758-		return true;
52759-	} else {
52760-		*tsd_san_extents_until_guard_smallp_get(tsd) = n - 1;
52761-		assert(tsd_san_extents_until_guard_small_get(tsd) >= 1);
52762-		return false;
52763-	}
52764-}
52765-
52766-static inline void
52767-san_junk_ptr_locations(void *ptr, size_t usize, void **first, void **mid,
52768-    void **last) {
52769-	size_t ptr_sz = sizeof(void *);
52770-
52771-	*first = ptr;
52772-
52773-	*mid = (void *)((uintptr_t)ptr + ((usize >> 1) & ~(ptr_sz - 1)));
52774-	assert(*first != *mid || usize == ptr_sz);
52775-	assert((uintptr_t)*first <= (uintptr_t)*mid);
52776-
52777-	/*
52778-	 * When usize > 32K, the gap between requested_size and usize might be
52779-	 * greater than 4K -- this means the last write may access an
52780-	 * likely-untouched page (default settings w/ 4K pages).  However by
52781-	 * default the tcache only goes up to the 32K size class, and is usually
52782-	 * tuned lower instead of higher, which makes it less of a concern.
52783-	 */
52784-	*last = (void *)((uintptr_t)ptr + usize - sizeof(uaf_detect_junk));
52785-	assert(*first != *last || usize == ptr_sz);
52786-	assert(*mid != *last || usize <= ptr_sz * 2);
52787-	assert((uintptr_t)*mid <= (uintptr_t)*last);
52788-}
52789-
52790-static inline bool
52791-san_junk_ptr_should_slow(void) {
52792-	/*
52793-	 * The latter condition (pointer size greater than the min size class)
52794-	 * is not expected -- fall back to the slow path for simplicity.
52795-	 */
52796-	return config_debug || (LG_SIZEOF_PTR > SC_LG_TINY_MIN);
52797-}
52798-
52799-static inline void
52800-san_junk_ptr(void *ptr, size_t usize) {
52801-	if (san_junk_ptr_should_slow()) {
52802-		memset(ptr, (char)uaf_detect_junk, usize);
52803-		return;
52804-	}
52805-
52806-	void *first, *mid, *last;
52807-	san_junk_ptr_locations(ptr, usize, &first, &mid, &last);
52808-	*(uintptr_t *)first = uaf_detect_junk;
52809-	*(uintptr_t *)mid = uaf_detect_junk;
52810-	*(uintptr_t *)last = uaf_detect_junk;
52811-}
52812-
52813-static inline bool
52814-san_uaf_detection_enabled(void) {
52815-	bool ret = config_uaf_detection && (opt_lg_san_uaf_align != -1);
52816-	if (config_uaf_detection && ret) {
52817-		assert(san_cache_bin_nonfast_mask == ((uintptr_t)1 <<
52818-		    opt_lg_san_uaf_align) - 1);
52819-	}
52820-
52821-	return ret;
52822-}
52823-
52824-#endif /* JEMALLOC_INTERNAL_GUARD_H */
52825diff --git a/jemalloc/include/jemalloc/internal/san_bump.h b/jemalloc/include/jemalloc/internal/san_bump.h
52826deleted file mode 100644
52827index 8ec4a71..0000000
52828--- a/jemalloc/include/jemalloc/internal/san_bump.h
52829+++ /dev/null
52830@@ -1,52 +0,0 @@
52831-#ifndef JEMALLOC_INTERNAL_SAN_BUMP_H
52832-#define JEMALLOC_INTERNAL_SAN_BUMP_H
52833-
52834-#include "jemalloc/internal/edata.h"
52835-#include "jemalloc/internal/exp_grow.h"
52836-#include "jemalloc/internal/mutex.h"
52837-
52838-#define SBA_RETAINED_ALLOC_SIZE ((size_t)4 << 20)
52839-
52840-extern bool opt_retain;
52841-
52842-typedef struct ehooks_s ehooks_t;
52843-typedef struct pac_s pac_t;
52844-
52845-typedef struct san_bump_alloc_s san_bump_alloc_t;
52846-struct san_bump_alloc_s {
52847-	malloc_mutex_t mtx;
52848-
52849-	edata_t *curr_reg;
52850-};
52851-
52852-static inline bool
52853-san_bump_enabled() {
52854-	/*
52855-	 * We enable san_bump allocator only when it's possible to break up a
52856-	 * mapping and unmap a part of it (maps_coalesce). This is needed to
52857-	 * ensure the arena destruction process can destroy all retained guarded
52858-	 * extents one by one and to unmap a trailing part of a retained guarded
52859-	 * region when it's too small to fit a pending allocation.
52860-	 * opt_retain is required, because this allocator retains a large
52861-	 * virtual memory mapping and returns smaller parts of it.
52862-	 */
52863-	return maps_coalesce && opt_retain;
52864-}
52865-
52866-static inline bool
52867-san_bump_alloc_init(san_bump_alloc_t* sba) {
52868-	bool err = malloc_mutex_init(&sba->mtx, "sanitizer_bump_allocator",
52869-	    WITNESS_RANK_SAN_BUMP_ALLOC, malloc_mutex_rank_exclusive);
52870-	if (err) {
52871-		return true;
52872-	}
52873-	sba->curr_reg = NULL;
52874-
52875-	return false;
52876-}
52877-
52878-edata_t *
52879-san_bump_alloc(tsdn_t *tsdn, san_bump_alloc_t* sba, pac_t *pac, ehooks_t *ehooks,
52880-    size_t size, bool zero);
52881-
52882-#endif /* JEMALLOC_INTERNAL_SAN_BUMP_H */
52883diff --git a/jemalloc/include/jemalloc/internal/sc.h b/jemalloc/include/jemalloc/internal/sc.h
52884deleted file mode 100644
52885index 9bab347..0000000
52886--- a/jemalloc/include/jemalloc/internal/sc.h
52887+++ /dev/null
52888@@ -1,357 +0,0 @@
52889-#ifndef JEMALLOC_INTERNAL_SC_H
52890-#define JEMALLOC_INTERNAL_SC_H
52891-
52892-#include "jemalloc/internal/jemalloc_internal_types.h"
52893-
52894-/*
52895- * Size class computations:
52896- *
52897- * These are a little tricky; we'll first start by describing how things
52898- * generally work, and then describe some of the details.
52899- *
52900- * Ignore the first few size classes for a moment. We can then split all the
52901- * remaining size classes into groups. The size classes in a group are spaced
52902- * such that they cover allocation request sizes in a power-of-2 range. The
52903- * power of two is called the base of the group, and the size classes in it
52904- * satisfy allocations in the half-open range (base, base * 2]. There are
52905- * SC_NGROUP size classes in each group, equally spaced in the range, so that
52906- * each one covers allocations for base / SC_NGROUP possible allocation sizes.
52907- * We call that value (base / SC_NGROUP) the delta of the group. Each size class
52908- * is delta larger than the one before it (including the initial size class in a
52909- * group, which is delta larger than base, the largest size class in the
52910- * previous group).
52911- * To make the math all work out nicely, we require that SC_NGROUP is a power of
52912- * two, and define it in terms of SC_LG_NGROUP. We'll often talk in terms of
52913- * lg_base and lg_delta. For each of these groups then, we have that
52914- * lg_delta == lg_base - SC_LG_NGROUP.
52915- * The size classes in a group with a given lg_base and lg_delta (which, recall,
52916- * can be computed from lg_base for these groups) are therefore:
52917- *   base + 1 * delta
52918- *     which covers allocations in (base, base + 1 * delta]
52919- *   base + 2 * delta
52920- *     which covers allocations in (base + 1 * delta, base + 2 * delta].
52921- *   base + 3 * delta
52922- *     which covers allocations in (base + 2 * delta, base + 3 * delta].
52923- *   ...
52924- *   base + SC_NGROUP * delta ( == 2 * base)
52925- *     which covers allocations in (base + (SC_NGROUP - 1) * delta, 2 * base].
52926- * (Note that currently SC_NGROUP is always 4, so the "..." is empty in
52927- * practice.)
52928- * Note that the last size class in the group is the next power of two (after
52929- * base), so that we've set up the induction correctly for the next group's
52930- * selection of delta.
52931- *
52932- * Now, let's start considering the first few size classes. Two extra constants
52933- * come into play here: LG_QUANTUM and SC_LG_TINY_MIN. LG_QUANTUM ensures
52934- * correct platform alignment; all objects of size (1 << LG_QUANTUM) or larger
52935- * are at least (1 << LG_QUANTUM) aligned; this can be used to ensure that we
52936- * never return improperly aligned memory, by making (1 << LG_QUANTUM) equal the
52937- * highest required alignment of a platform. For allocation sizes smaller than
52938- * (1 << LG_QUANTUM) though, we can be more relaxed (since we don't support
52939- * platforms with types with alignment larger than their size). To allow such
52940- * allocations (without wasting space unnecessarily), we introduce tiny size
52941- * classes; one per power of two, up until we hit the quantum size. There are
52942- * therefore LG_QUANTUM - SC_LG_TINY_MIN such size classes.
52943- *
52944- * Next, we have a size class of size (1 << LG_QUANTUM).  This can't be the
52945- * start of a group in the sense we described above (covering a power of two
52946- * range) since, if we divided into it to pick a value of delta, we'd get a
52947- * delta smaller than (1 << LG_QUANTUM) for sizes >= (1 << LG_QUANTUM), which
52948- * is against the rules.
52949- *
52950- * The first base we can divide by SC_NGROUP while still being at least
52951- * (1 << LG_QUANTUM) is SC_NGROUP * (1 << LG_QUANTUM). We can get there by
52952- * having SC_NGROUP size classes, spaced (1 << LG_QUANTUM) apart. These size
52953- * classes are:
52954- *   1 * (1 << LG_QUANTUM)
52955- *   2 * (1 << LG_QUANTUM)
52956- *   3 * (1 << LG_QUANTUM)
52957- *   ... (although, as above, this "..." is empty in practice)
52958- *   SC_NGROUP * (1 << LG_QUANTUM).
52959- *
52960- * There are SC_NGROUP of these size classes, so we can regard it as a sort of
52961- * pseudo-group, even though it spans multiple powers of 2, is divided
52962- * differently, and both starts and ends on a power of 2 (as opposed to just
52963- * ending). SC_NGROUP is itself a power of two, so the first group after the
52964- * pseudo-group has the power-of-two base SC_NGROUP * (1 << LG_QUANTUM), for a
52965- * lg_base of LG_QUANTUM + SC_LG_NGROUP. We can divide this base into SC_NGROUP
52966- * sizes without violating our LG_QUANTUM requirements, so we can safely set
52967- * lg_delta = lg_base - SC_LG_GROUP (== LG_QUANTUM).
52968- *
52969- * So, in order, the size classes are:
52970- *
52971- * Tiny size classes:
52972- * - Count: LG_QUANTUM - SC_LG_TINY_MIN.
52973- * - Sizes:
52974- *     1 << SC_LG_TINY_MIN
52975- *     1 << (SC_LG_TINY_MIN + 1)
52976- *     1 << (SC_LG_TINY_MIN + 2)
52977- *     ...
52978- *     1 << (LG_QUANTUM - 1)
52979- *
52980- * Initial pseudo-group:
52981- * - Count: SC_NGROUP
52982- * - Sizes:
52983- *     1 * (1 << LG_QUANTUM)
52984- *     2 * (1 << LG_QUANTUM)
52985- *     3 * (1 << LG_QUANTUM)
52986- *     ...
52987- *     SC_NGROUP * (1 << LG_QUANTUM)
52988- *
52989- * Regular group 0:
52990- * - Count: SC_NGROUP
52991- * - Sizes:
52992- *   (relative to lg_base of LG_QUANTUM + SC_LG_NGROUP and lg_delta of
52993- *   lg_base - SC_LG_NGROUP)
52994- *     (1 << lg_base) + 1 * (1 << lg_delta)
52995- *     (1 << lg_base) + 2 * (1 << lg_delta)
52996- *     (1 << lg_base) + 3 * (1 << lg_delta)
52997- *     ...
52998- *     (1 << lg_base) + SC_NGROUP * (1 << lg_delta) [ == (1 << (lg_base + 1)) ]
52999- *
53000- * Regular group 1:
53001- * - Count: SC_NGROUP
53002- * - Sizes:
53003- *   (relative to lg_base of LG_QUANTUM + SC_LG_NGROUP + 1 and lg_delta of
53004- *   lg_base - SC_LG_NGROUP)
53005- *     (1 << lg_base) + 1 * (1 << lg_delta)
53006- *     (1 << lg_base) + 2 * (1 << lg_delta)
53007- *     (1 << lg_base) + 3 * (1 << lg_delta)
53008- *     ...
53009- *     (1 << lg_base) + SC_NGROUP * (1 << lg_delta) [ == (1 << (lg_base + 1)) ]
53010- *
53011- * ...
53012- *
53013- * Regular group N:
53014- * - Count: SC_NGROUP
53015- * - Sizes:
53016- *   (relative to lg_base of LG_QUANTUM + SC_LG_NGROUP + N and lg_delta of
53017- *   lg_base - SC_LG_NGROUP)
53018- *     (1 << lg_base) + 1 * (1 << lg_delta)
53019- *     (1 << lg_base) + 2 * (1 << lg_delta)
53020- *     (1 << lg_base) + 3 * (1 << lg_delta)
53021- *     ...
53022- *     (1 << lg_base) + SC_NGROUP * (1 << lg_delta) [ == (1 << (lg_base + 1)) ]
53023- *
53024- *
53025- * Representation of metadata:
53026- * To make the math easy, we'll mostly work in lg quantities. We record lg_base,
53027- * lg_delta, and ndelta (i.e. number of deltas above the base) on a
53028- * per-size-class basis, and maintain the invariant that, across all size
53029- * classes, size == (1 << lg_base) + ndelta * (1 << lg_delta).
53030- *
53031- * For regular groups (i.e. those with lg_base >= LG_QUANTUM + SC_LG_NGROUP),
53032- * lg_delta is lg_base - SC_LG_NGROUP, and ndelta goes from 1 to SC_NGROUP.
53033- *
53034- * For the initial tiny size classes (if any), lg_base is lg(size class size).
53035- * lg_delta is lg_base for the first size class, and lg_base - 1 for all
53036- * subsequent ones. ndelta is always 0.
53037- *
53038- * For the pseudo-group, if there are no tiny size classes, then we set
53039- * lg_base == LG_QUANTUM, lg_delta == LG_QUANTUM, and have ndelta range from 0
53040- * to SC_NGROUP - 1. (Note that delta == base, so base + (SC_NGROUP - 1) * delta
53041- * is just SC_NGROUP * base, or (1 << (SC_LG_NGROUP + LG_QUANTUM)), so we do
53042- * indeed get a power of two that way). If there *are* tiny size classes, then
53043- * the first size class needs to have lg_delta relative to the largest tiny size
53044- * class. We therefore set lg_base == LG_QUANTUM - 1,
53045- * lg_delta == LG_QUANTUM - 1, and ndelta == 1, keeping the rest of the
53046- * pseudo-group the same.
53047- *
53048- *
53049- * Other terminology:
53050- * "Small" size classes mean those that are allocated out of bins, which is the
53051- * same as those that are slab allocated.
53052- * "Large" size classes are those that are not small. The cutoff for counting as
53053- * large is page size * group size.
53054- */
53055-
53056-/*
53057- * Size class N + (1 << SC_LG_NGROUP) twice the size of size class N.
53058- */
53059-#define SC_LG_NGROUP 2
53060-#define SC_LG_TINY_MIN 3
53061-
53062-#if SC_LG_TINY_MIN == 0
53063-/* The div module doesn't support division by 1, which this would require. */
53064-#error "Unsupported LG_TINY_MIN"
53065-#endif
53066-
53067-/*
53068- * The definitions below are all determined by the above settings and system
53069- * characteristics.
53070- */
53071-#define SC_NGROUP (1ULL << SC_LG_NGROUP)
53072-#define SC_PTR_BITS ((1ULL << LG_SIZEOF_PTR) * 8)
53073-#define SC_NTINY (LG_QUANTUM - SC_LG_TINY_MIN)
53074-#define SC_LG_TINY_MAXCLASS (LG_QUANTUM > SC_LG_TINY_MIN ? LG_QUANTUM - 1 : -1)
53075-#define SC_NPSEUDO SC_NGROUP
53076-#define SC_LG_FIRST_REGULAR_BASE (LG_QUANTUM + SC_LG_NGROUP)
53077-/*
53078- * We cap allocations to be less than 2 ** (ptr_bits - 1), so the highest base
53079- * we need is 2 ** (ptr_bits - 2). (This also means that the last group is 1
53080- * size class shorter than the others).
53081- * We could probably save some space in arenas by capping this at LG_VADDR size.
53082- */
53083-#define SC_LG_BASE_MAX (SC_PTR_BITS - 2)
53084-#define SC_NREGULAR (SC_NGROUP * 					\
53085-    (SC_LG_BASE_MAX - SC_LG_FIRST_REGULAR_BASE + 1) - 1)
53086-#define SC_NSIZES (SC_NTINY + SC_NPSEUDO + SC_NREGULAR)
53087-
53088-/*
53089- * The number of size classes that are a multiple of the page size.
53090- *
53091- * Here are the first few bases that have a page-sized SC.
53092- *
53093- *      lg(base) |     base | highest SC | page-multiple SCs
53094- * --------------|------------------------------------------
53095- *   LG_PAGE - 1 | PAGE / 2 |       PAGE | 1
53096- *       LG_PAGE |     PAGE |   2 * PAGE | 1
53097- *   LG_PAGE + 1 | 2 * PAGE |   4 * PAGE | 2
53098- *   LG_PAGE + 2 | 4 * PAGE |   8 * PAGE | 4
53099- *
53100- * The number of page-multiple SCs continues to grow in powers of two, up until
53101- * lg_delta == lg_page, which corresponds to setting lg_base to lg_page +
53102- * SC_LG_NGROUP.  So, then, the number of size classes that are multiples of the
53103- * page size whose lg_delta is less than the page size are
53104- * is 1 + (2**0 + 2**1 + ... + 2**(lg_ngroup - 1) == 2**lg_ngroup.
53105- *
53106- * For each base with lg_base in [lg_page + lg_ngroup, lg_base_max), there are
53107- * NGROUP page-sized size classes, and when lg_base == lg_base_max, there are
53108- * NGROUP - 1.
53109- *
53110- * This gives us the quantity we seek.
53111- */
53112-#define SC_NPSIZES (							\
53113-    SC_NGROUP								\
53114-    + (SC_LG_BASE_MAX - (LG_PAGE + SC_LG_NGROUP)) * SC_NGROUP		\
53115-    + SC_NGROUP - 1)
53116-
53117-/*
53118- * We declare a size class is binnable if size < page size * group. Or, in other
53119- * words, lg(size) < lg(page size) + lg(group size).
53120- */
53121-#define SC_NBINS (							\
53122-    /* Sub-regular size classes. */					\
53123-    SC_NTINY + SC_NPSEUDO						\
53124-    /* Groups with lg_regular_min_base <= lg_base <= lg_base_max */	\
53125-    + SC_NGROUP * (LG_PAGE + SC_LG_NGROUP - SC_LG_FIRST_REGULAR_BASE)	\
53126-    /* Last SC of the last group hits the bound exactly; exclude it. */	\
53127-    - 1)
53128-
53129-/*
53130- * The size2index_tab lookup table uses uint8_t to encode each bin index, so we
53131- * cannot support more than 256 small size classes.
53132- */
53133-#if (SC_NBINS > 256)
53134-#  error "Too many small size classes"
53135-#endif
53136-
53137-/* The largest size class in the lookup table, and its binary log. */
53138-#define SC_LG_MAX_LOOKUP 12
53139-#define SC_LOOKUP_MAXCLASS (1 << SC_LG_MAX_LOOKUP)
53140-
53141-/* Internal, only used for the definition of SC_SMALL_MAXCLASS. */
53142-#define SC_SMALL_MAX_BASE (1 << (LG_PAGE + SC_LG_NGROUP - 1))
53143-#define SC_SMALL_MAX_DELTA (1 << (LG_PAGE - 1))
53144-
53145-/* The largest size class allocated out of a slab. */
53146-#define SC_SMALL_MAXCLASS (SC_SMALL_MAX_BASE				\
53147-    + (SC_NGROUP - 1) * SC_SMALL_MAX_DELTA)
53148-
53149-/* The fastpath assumes all lookup-able sizes are small. */
53150-#if (SC_SMALL_MAXCLASS < SC_LOOKUP_MAXCLASS)
53151-#  error "Lookup table sizes must be small"
53152-#endif
53153-
53154-/* The smallest size class not allocated out of a slab. */
53155-#define SC_LARGE_MINCLASS ((size_t)1ULL << (LG_PAGE + SC_LG_NGROUP))
53156-#define SC_LG_LARGE_MINCLASS (LG_PAGE + SC_LG_NGROUP)
53157-
53158-/* Internal; only used for the definition of SC_LARGE_MAXCLASS. */
53159-#define SC_MAX_BASE ((size_t)1 << (SC_PTR_BITS - 2))
53160-#define SC_MAX_DELTA ((size_t)1 << (SC_PTR_BITS - 2 - SC_LG_NGROUP))
53161-
53162-/* The largest size class supported. */
53163-#define SC_LARGE_MAXCLASS (SC_MAX_BASE + (SC_NGROUP - 1) * SC_MAX_DELTA)
53164-
53165-/* Maximum number of regions in one slab. */
53166-#ifndef CONFIG_LG_SLAB_MAXREGS
53167-#  define SC_LG_SLAB_MAXREGS (LG_PAGE - SC_LG_TINY_MIN)
53168-#else
53169-#  if CONFIG_LG_SLAB_MAXREGS < (LG_PAGE - SC_LG_TINY_MIN)
53170-#    error "Unsupported SC_LG_SLAB_MAXREGS"
53171-#  else
53172-#    define SC_LG_SLAB_MAXREGS CONFIG_LG_SLAB_MAXREGS
53173-#  endif
53174-#endif
53175-
53176-#define SC_SLAB_MAXREGS (1U << SC_LG_SLAB_MAXREGS)
53177-
53178-typedef struct sc_s sc_t;
53179-struct sc_s {
53180-	/* Size class index, or -1 if not a valid size class. */
53181-	int index;
53182-	/* Lg group base size (no deltas added). */
53183-	int lg_base;
53184-	/* Lg delta to previous size class. */
53185-	int lg_delta;
53186-	/* Delta multiplier.  size == 1<<lg_base + ndelta<<lg_delta */
53187-	int ndelta;
53188-	/*
53189-	 * True if the size class is a multiple of the page size, false
53190-	 * otherwise.
53191-	 */
53192-	bool psz;
53193-	/*
53194-	 * True if the size class is a small, bin, size class. False otherwise.
53195-	 */
53196-	bool bin;
53197-	/* The slab page count if a small bin size class, 0 otherwise. */
53198-	int pgs;
53199-	/* Same as lg_delta if a lookup table size class, 0 otherwise. */
53200-	int lg_delta_lookup;
53201-};
53202-
53203-typedef struct sc_data_s sc_data_t;
53204-struct sc_data_s {
53205-	/* Number of tiny size classes. */
53206-	unsigned ntiny;
53207-	/* Number of bins supported by the lookup table. */
53208-	int nlbins;
53209-	/* Number of small size class bins. */
53210-	int nbins;
53211-	/* Number of size classes. */
53212-	int nsizes;
53213-	/* Number of bits required to store NSIZES. */
53214-	int lg_ceil_nsizes;
53215-	/* Number of size classes that are a multiple of (1U << LG_PAGE). */
53216-	unsigned npsizes;
53217-	/* Lg of maximum tiny size class (or -1, if none). */
53218-	int lg_tiny_maxclass;
53219-	/* Maximum size class included in lookup table. */
53220-	size_t lookup_maxclass;
53221-	/* Maximum small size class. */
53222-	size_t small_maxclass;
53223-	/* Lg of minimum large size class. */
53224-	int lg_large_minclass;
53225-	/* The minimum large size class. */
53226-	size_t large_minclass;
53227-	/* Maximum (large) size class. */
53228-	size_t large_maxclass;
53229-	/* True if the sc_data_t has been initialized (for debugging only). */
53230-	bool initialized;
53231-
53232-	sc_t sc[SC_NSIZES];
53233-};
53234-
53235-size_t reg_size_compute(int lg_base, int lg_delta, int ndelta);
53236-void sc_data_init(sc_data_t *data);
53237-/*
53238- * Updates slab sizes in [begin, end] to be pgs pages in length, if possible.
53239- * Otherwise, does its best to accommodate the request.
53240- */
53241-void sc_data_update_slab_size(sc_data_t *data, size_t begin, size_t end,
53242-    int pgs);
53243-void sc_boot(sc_data_t *data);
53244-
53245-#endif /* JEMALLOC_INTERNAL_SC_H */
53246diff --git a/jemalloc/include/jemalloc/internal/sec.h b/jemalloc/include/jemalloc/internal/sec.h
53247deleted file mode 100644
53248index fa86338..0000000
53249--- a/jemalloc/include/jemalloc/internal/sec.h
53250+++ /dev/null
53251@@ -1,120 +0,0 @@
53252-#ifndef JEMALLOC_INTERNAL_SEC_H
53253-#define JEMALLOC_INTERNAL_SEC_H
53254-
53255-#include "jemalloc/internal/atomic.h"
53256-#include "jemalloc/internal/pai.h"
53257-
53258-/*
53259- * Small extent cache.
53260- *
53261- * This includes some utilities to cache small extents.  We have a per-pszind
53262- * bin with its own list of extents of that size.  We don't try to do any
53263- * coalescing of extents (since it would in general require cross-shard locks or
53264- * knowledge of the underlying PAI implementation).
53265- */
53266-
53267-/*
53268- * For now, this is just one field; eventually, we'll probably want to get more
53269- * fine-grained data out (like per-size class statistics).
53270- */
53271-typedef struct sec_stats_s sec_stats_t;
53272-struct sec_stats_s {
53273-	/* Sum of bytes_cur across all shards. */
53274-	size_t bytes;
53275-};
53276-
53277-static inline void
53278-sec_stats_accum(sec_stats_t *dst, sec_stats_t *src) {
53279-	dst->bytes += src->bytes;
53280-}
53281-
53282-/* A collections of free extents, all of the same size. */
53283-typedef struct sec_bin_s sec_bin_t;
53284-struct sec_bin_s {
53285-	/*
53286-	 * When we fail to fulfill an allocation, we do a batch-alloc on the
53287-	 * underlying allocator to fill extra items, as well.  We drop the SEC
53288-	 * lock while doing so, to allow operations on other bins to succeed.
53289-	 * That introduces the possibility of other threads also trying to
53290-	 * allocate out of this bin, failing, and also going to the backing
53291-	 * allocator.  To avoid a thundering herd problem in which lots of
53292-	 * threads do batch allocs and overfill this bin as a result, we only
53293-	 * allow one batch allocation at a time for a bin.  This bool tracks
53294-	 * whether or not some thread is already batch allocating.
53295-	 *
53296-	 * Eventually, the right answer may be a smarter sharding policy for the
53297-	 * bins (e.g. a mutex per bin, which would also be more scalable
53298-	 * generally; the batch-allocating thread could hold it while
53299-	 * batch-allocating).
53300-	 */
53301-	bool being_batch_filled;
53302-
53303-	/*
53304-	 * Number of bytes in this particular bin (as opposed to the
53305-	 * sec_shard_t's bytes_cur.  This isn't user visible or reported in
53306-	 * stats; rather, it allows us to quickly determine the change in the
53307-	 * centralized counter when flushing.
53308-	 */
53309-	size_t bytes_cur;
53310-	edata_list_active_t freelist;
53311-};
53312-
53313-typedef struct sec_shard_s sec_shard_t;
53314-struct sec_shard_s {
53315-	/*
53316-	 * We don't keep per-bin mutexes, even though that would allow more
53317-	 * sharding; this allows global cache-eviction, which in turn allows for
53318-	 * better balancing across free lists.
53319-	 */
53320-	malloc_mutex_t mtx;
53321-	/*
53322-	 * A SEC may need to be shut down (i.e. flushed of its contents and
53323-	 * prevented from further caching).  To avoid tricky synchronization
53324-	 * issues, we just track enabled-status in each shard, guarded by a
53325-	 * mutex.  In practice, this is only ever checked during brief races,
53326-	 * since the arena-level atomic boolean tracking HPA enabled-ness means
53327-	 * that we won't go down these pathways very often after custom extent
53328-	 * hooks are installed.
53329-	 */
53330-	bool enabled;
53331-	sec_bin_t *bins;
53332-	/* Number of bytes in all bins in the shard. */
53333-	size_t bytes_cur;
53334-	/* The next pszind to flush in the flush-some pathways. */
53335-	pszind_t to_flush_next;
53336-};
53337-
53338-typedef struct sec_s sec_t;
53339-struct sec_s {
53340-	pai_t pai;
53341-	pai_t *fallback;
53342-
53343-	sec_opts_t opts;
53344-	sec_shard_t *shards;
53345-	pszind_t npsizes;
53346-};
53347-
53348-bool sec_init(tsdn_t *tsdn, sec_t *sec, base_t *base, pai_t *fallback,
53349-    const sec_opts_t *opts);
53350-void sec_flush(tsdn_t *tsdn, sec_t *sec);
53351-void sec_disable(tsdn_t *tsdn, sec_t *sec);
53352-
53353-/*
53354- * Morally, these two stats methods probably ought to be a single one (and the
53355- * mutex_prof_data ought to live in the sec_stats_t.  But splitting them apart
53356- * lets them fit easily into the pa_shard stats framework (which also has this
53357- * split), which simplifies the stats management.
53358- */
53359-void sec_stats_merge(tsdn_t *tsdn, sec_t *sec, sec_stats_t *stats);
53360-void sec_mutex_stats_read(tsdn_t *tsdn, sec_t *sec,
53361-    mutex_prof_data_t *mutex_prof_data);
53362-
53363-/*
53364- * We use the arena lock ordering; these are acquired in phase 2 of forking, but
53365- * should be acquired before the underlying allocator mutexes.
53366- */
53367-void sec_prefork2(tsdn_t *tsdn, sec_t *sec);
53368-void sec_postfork_parent(tsdn_t *tsdn, sec_t *sec);
53369-void sec_postfork_child(tsdn_t *tsdn, sec_t *sec);
53370-
53371-#endif /* JEMALLOC_INTERNAL_SEC_H */
53372diff --git a/jemalloc/include/jemalloc/internal/sec_opts.h b/jemalloc/include/jemalloc/internal/sec_opts.h
53373deleted file mode 100644
53374index a3ad72f..0000000
53375--- a/jemalloc/include/jemalloc/internal/sec_opts.h
53376+++ /dev/null
53377@@ -1,59 +0,0 @@
53378-#ifndef JEMALLOC_INTERNAL_SEC_OPTS_H
53379-#define JEMALLOC_INTERNAL_SEC_OPTS_H
53380-
53381-/*
53382- * The configuration settings used by an sec_t.  Morally, this is part of the
53383- * SEC interface, but we put it here for header-ordering reasons.
53384- */
53385-
53386-typedef struct sec_opts_s sec_opts_t;
53387-struct sec_opts_s {
53388-	/*
53389-	 * We don't necessarily always use all the shards; requests are
53390-	 * distributed across shards [0, nshards - 1).
53391-	 */
53392-	size_t nshards;
53393-	/*
53394-	 * We'll automatically refuse to cache any objects in this sec if
53395-	 * they're larger than max_alloc bytes, instead forwarding such objects
53396-	 * directly to the fallback.
53397-	 */
53398-	size_t max_alloc;
53399-	/*
53400-	 * Exceeding this amount of cached extents in a shard causes us to start
53401-	 * flushing bins in that shard until we fall below bytes_after_flush.
53402-	 */
53403-	size_t max_bytes;
53404-	/*
53405-	 * The number of bytes (in all bins) we flush down to when we exceed
53406-	 * bytes_cur.  We want this to be less than bytes_cur, because
53407-	 * otherwise we could get into situations where a shard undergoing
53408-	 * net-deallocation keeps bytes_cur very near to max_bytes, so that
53409-	 * most deallocations get immediately forwarded to the underlying PAI
53410-	 * implementation, defeating the point of the SEC.
53411-	 */
53412-	size_t bytes_after_flush;
53413-	/*
53414-	 * When we can't satisfy an allocation out of the SEC because there are
53415-	 * no available ones cached, we allocate multiple of that size out of
53416-	 * the fallback allocator.  Eventually we might want to do something
53417-	 * cleverer, but for now we just grab a fixed number.
53418-	 */
53419-	size_t batch_fill_extra;
53420-};
53421-
53422-#define SEC_OPTS_DEFAULT {						\
53423-	/* nshards */							\
53424-	4,								\
53425-	/* max_alloc */							\
53426-	(32 * 1024) < PAGE ? PAGE : (32 * 1024),			\
53427-	/* max_bytes */							\
53428-	256 * 1024,							\
53429-	/* bytes_after_flush */						\
53430-	128 * 1024,							\
53431-	/* batch_fill_extra */						\
53432-	0								\
53433-}
53434-
53435-
53436-#endif /* JEMALLOC_INTERNAL_SEC_OPTS_H */
53437diff --git a/jemalloc/include/jemalloc/internal/seq.h b/jemalloc/include/jemalloc/internal/seq.h
53438deleted file mode 100644
53439index ef2df4c..0000000
53440--- a/jemalloc/include/jemalloc/internal/seq.h
53441+++ /dev/null
53442@@ -1,55 +0,0 @@
53443-#ifndef JEMALLOC_INTERNAL_SEQ_H
53444-#define JEMALLOC_INTERNAL_SEQ_H
53445-
53446-#include "jemalloc/internal/atomic.h"
53447-
53448-/*
53449- * A simple seqlock implementation.
53450- */
53451-
53452-#define seq_define(type, short_type)					\
53453-typedef struct {							\
53454-	atomic_zu_t seq;						\
53455-	atomic_zu_t data[						\
53456-	    (sizeof(type) + sizeof(size_t) - 1) / sizeof(size_t)];	\
53457-} seq_##short_type##_t;							\
53458-									\
53459-/*									\
53460- * No internal synchronization -- the caller must ensure that there's	\
53461- * only a single writer at a time.					\
53462- */									\
53463-static inline void							\
53464-seq_store_##short_type(seq_##short_type##_t *dst, type *src) {		\
53465-	size_t buf[sizeof(dst->data) / sizeof(size_t)];			\
53466-	buf[sizeof(buf) / sizeof(size_t) - 1] = 0;			\
53467-	memcpy(buf, src, sizeof(type));					\
53468-	size_t old_seq = atomic_load_zu(&dst->seq, ATOMIC_RELAXED);	\
53469-	atomic_store_zu(&dst->seq, old_seq + 1, ATOMIC_RELAXED);	\
53470-	atomic_fence(ATOMIC_RELEASE);					\
53471-	for (size_t i = 0; i < sizeof(buf) / sizeof(size_t); i++) {	\
53472-		atomic_store_zu(&dst->data[i], buf[i], ATOMIC_RELAXED);	\
53473-	}								\
53474-	atomic_store_zu(&dst->seq, old_seq + 2, ATOMIC_RELEASE);	\
53475-}									\
53476-									\
53477-/* Returns whether or not the read was consistent. */			\
53478-static inline bool							\
53479-seq_try_load_##short_type(type *dst, seq_##short_type##_t *src) {	\
53480-	size_t buf[sizeof(src->data) / sizeof(size_t)];			\
53481-	size_t seq1 = atomic_load_zu(&src->seq, ATOMIC_ACQUIRE);	\
53482-	if (seq1 % 2 != 0) {						\
53483-		return false;						\
53484-	}								\
53485-	for (size_t i = 0; i < sizeof(buf) / sizeof(size_t); i++) {	\
53486-		buf[i] = atomic_load_zu(&src->data[i], ATOMIC_RELAXED);	\
53487-	}								\
53488-	atomic_fence(ATOMIC_ACQUIRE);					\
53489-	size_t seq2 = atomic_load_zu(&src->seq, ATOMIC_RELAXED);	\
53490-	if (seq1 != seq2) {						\
53491-		return false;						\
53492-	}								\
53493-	memcpy(dst, buf, sizeof(type));					\
53494-	return true;							\
53495-}
53496-
53497-#endif /* JEMALLOC_INTERNAL_SEQ_H */
53498diff --git a/jemalloc/include/jemalloc/internal/slab_data.h b/jemalloc/include/jemalloc/internal/slab_data.h
53499deleted file mode 100644
53500index e821863..0000000
53501--- a/jemalloc/include/jemalloc/internal/slab_data.h
53502+++ /dev/null
53503@@ -1,12 +0,0 @@
53504-#ifndef JEMALLOC_INTERNAL_SLAB_DATA_H
53505-#define JEMALLOC_INTERNAL_SLAB_DATA_H
53506-
53507-#include "jemalloc/internal/bitmap.h"
53508-
53509-typedef struct slab_data_s slab_data_t;
53510-struct slab_data_s {
53511-	/* Per region allocated/deallocated bitmap. */
53512-	bitmap_t bitmap[BITMAP_GROUPS_MAX];
53513-};
53514-
53515-#endif /* JEMALLOC_INTERNAL_SLAB_DATA_H */
53516diff --git a/jemalloc/include/jemalloc/internal/smoothstep.h b/jemalloc/include/jemalloc/internal/smoothstep.h
53517deleted file mode 100644
53518index 2e14430..0000000
53519--- a/jemalloc/include/jemalloc/internal/smoothstep.h
53520+++ /dev/null
53521@@ -1,232 +0,0 @@
53522-#ifndef JEMALLOC_INTERNAL_SMOOTHSTEP_H
53523-#define JEMALLOC_INTERNAL_SMOOTHSTEP_H
53524-
53525-/*
53526- * This file was generated by the following command:
53527- *   sh smoothstep.sh smoother 200 24 3 15
53528- */
53529-/******************************************************************************/
53530-
53531-/*
53532- * This header defines a precomputed table based on the smoothstep family of
53533- * sigmoidal curves (https://en.wikipedia.org/wiki/Smoothstep) that grow from 0
53534- * to 1 in 0 <= x <= 1.  The table is stored as integer fixed point values so
53535- * that floating point math can be avoided.
53536- *
53537- *                      3     2
53538- *   smoothstep(x) = -2x  + 3x
53539- *
53540- *                       5      4      3
53541- *   smootherstep(x) = 6x  - 15x  + 10x
53542- *
53543- *                          7      6      5      4
53544- *   smootheststep(x) = -20x  + 70x  - 84x  + 35x
53545- */
53546-
53547-#define SMOOTHSTEP_VARIANT	"smoother"
53548-#define SMOOTHSTEP_NSTEPS	200
53549-#define SMOOTHSTEP_BFP		24
53550-#define SMOOTHSTEP \
53551- /* STEP(step, h,                            x,     y) */ \
53552-    STEP(   1, UINT64_C(0x0000000000000014), 0.005, 0.000001240643750) \
53553-    STEP(   2, UINT64_C(0x00000000000000a5), 0.010, 0.000009850600000) \
53554-    STEP(   3, UINT64_C(0x0000000000000229), 0.015, 0.000032995181250) \
53555-    STEP(   4, UINT64_C(0x0000000000000516), 0.020, 0.000077619200000) \
53556-    STEP(   5, UINT64_C(0x00000000000009dc), 0.025, 0.000150449218750) \
53557-    STEP(   6, UINT64_C(0x00000000000010e8), 0.030, 0.000257995800000) \
53558-    STEP(   7, UINT64_C(0x0000000000001aa4), 0.035, 0.000406555756250) \
53559-    STEP(   8, UINT64_C(0x0000000000002777), 0.040, 0.000602214400000) \
53560-    STEP(   9, UINT64_C(0x00000000000037c2), 0.045, 0.000850847793750) \
53561-    STEP(  10, UINT64_C(0x0000000000004be6), 0.050, 0.001158125000000) \
53562-    STEP(  11, UINT64_C(0x000000000000643c), 0.055, 0.001529510331250) \
53563-    STEP(  12, UINT64_C(0x000000000000811f), 0.060, 0.001970265600000) \
53564-    STEP(  13, UINT64_C(0x000000000000a2e2), 0.065, 0.002485452368750) \
53565-    STEP(  14, UINT64_C(0x000000000000c9d8), 0.070, 0.003079934200000) \
53566-    STEP(  15, UINT64_C(0x000000000000f64f), 0.075, 0.003758378906250) \
53567-    STEP(  16, UINT64_C(0x0000000000012891), 0.080, 0.004525260800000) \
53568-    STEP(  17, UINT64_C(0x00000000000160e7), 0.085, 0.005384862943750) \
53569-    STEP(  18, UINT64_C(0x0000000000019f95), 0.090, 0.006341279400000) \
53570-    STEP(  19, UINT64_C(0x000000000001e4dc), 0.095, 0.007398417481250) \
53571-    STEP(  20, UINT64_C(0x00000000000230fc), 0.100, 0.008560000000000) \
53572-    STEP(  21, UINT64_C(0x0000000000028430), 0.105, 0.009829567518750) \
53573-    STEP(  22, UINT64_C(0x000000000002deb0), 0.110, 0.011210480600000) \
53574-    STEP(  23, UINT64_C(0x00000000000340b1), 0.115, 0.012705922056250) \
53575-    STEP(  24, UINT64_C(0x000000000003aa67), 0.120, 0.014318899200000) \
53576-    STEP(  25, UINT64_C(0x0000000000041c00), 0.125, 0.016052246093750) \
53577-    STEP(  26, UINT64_C(0x00000000000495a8), 0.130, 0.017908625800000) \
53578-    STEP(  27, UINT64_C(0x000000000005178b), 0.135, 0.019890532631250) \
53579-    STEP(  28, UINT64_C(0x000000000005a1cf), 0.140, 0.022000294400000) \
53580-    STEP(  29, UINT64_C(0x0000000000063498), 0.145, 0.024240074668750) \
53581-    STEP(  30, UINT64_C(0x000000000006d009), 0.150, 0.026611875000000) \
53582-    STEP(  31, UINT64_C(0x000000000007743f), 0.155, 0.029117537206250) \
53583-    STEP(  32, UINT64_C(0x0000000000082157), 0.160, 0.031758745600000) \
53584-    STEP(  33, UINT64_C(0x000000000008d76b), 0.165, 0.034537029243750) \
53585-    STEP(  34, UINT64_C(0x0000000000099691), 0.170, 0.037453764200000) \
53586-    STEP(  35, UINT64_C(0x00000000000a5edf), 0.175, 0.040510175781250) \
53587-    STEP(  36, UINT64_C(0x00000000000b3067), 0.180, 0.043707340800000) \
53588-    STEP(  37, UINT64_C(0x00000000000c0b38), 0.185, 0.047046189818750) \
53589-    STEP(  38, UINT64_C(0x00000000000cef5e), 0.190, 0.050527509400000) \
53590-    STEP(  39, UINT64_C(0x00000000000ddce6), 0.195, 0.054151944356250) \
53591-    STEP(  40, UINT64_C(0x00000000000ed3d8), 0.200, 0.057920000000000) \
53592-    STEP(  41, UINT64_C(0x00000000000fd439), 0.205, 0.061832044393750) \
53593-    STEP(  42, UINT64_C(0x000000000010de0e), 0.210, 0.065888310600000) \
53594-    STEP(  43, UINT64_C(0x000000000011f158), 0.215, 0.070088898931250) \
53595-    STEP(  44, UINT64_C(0x0000000000130e17), 0.220, 0.074433779200000) \
53596-    STEP(  45, UINT64_C(0x0000000000143448), 0.225, 0.078922792968750) \
53597-    STEP(  46, UINT64_C(0x00000000001563e7), 0.230, 0.083555655800000) \
53598-    STEP(  47, UINT64_C(0x0000000000169cec), 0.235, 0.088331959506250) \
53599-    STEP(  48, UINT64_C(0x000000000017df4f), 0.240, 0.093251174400000) \
53600-    STEP(  49, UINT64_C(0x0000000000192b04), 0.245, 0.098312651543750) \
53601-    STEP(  50, UINT64_C(0x00000000001a8000), 0.250, 0.103515625000000) \
53602-    STEP(  51, UINT64_C(0x00000000001bde32), 0.255, 0.108859214081250) \
53603-    STEP(  52, UINT64_C(0x00000000001d458b), 0.260, 0.114342425600000) \
53604-    STEP(  53, UINT64_C(0x00000000001eb5f8), 0.265, 0.119964156118750) \
53605-    STEP(  54, UINT64_C(0x0000000000202f65), 0.270, 0.125723194200000) \
53606-    STEP(  55, UINT64_C(0x000000000021b1bb), 0.275, 0.131618222656250) \
53607-    STEP(  56, UINT64_C(0x0000000000233ce3), 0.280, 0.137647820800000) \
53608-    STEP(  57, UINT64_C(0x000000000024d0c3), 0.285, 0.143810466693750) \
53609-    STEP(  58, UINT64_C(0x0000000000266d40), 0.290, 0.150104539400000) \
53610-    STEP(  59, UINT64_C(0x000000000028123d), 0.295, 0.156528321231250) \
53611-    STEP(  60, UINT64_C(0x000000000029bf9c), 0.300, 0.163080000000000) \
53612-    STEP(  61, UINT64_C(0x00000000002b753d), 0.305, 0.169757671268750) \
53613-    STEP(  62, UINT64_C(0x00000000002d32fe), 0.310, 0.176559340600000) \
53614-    STEP(  63, UINT64_C(0x00000000002ef8bc), 0.315, 0.183482925806250) \
53615-    STEP(  64, UINT64_C(0x000000000030c654), 0.320, 0.190526259200000) \
53616-    STEP(  65, UINT64_C(0x0000000000329b9f), 0.325, 0.197687089843750) \
53617-    STEP(  66, UINT64_C(0x0000000000347875), 0.330, 0.204963085800000) \
53618-    STEP(  67, UINT64_C(0x0000000000365cb0), 0.335, 0.212351836381250) \
53619-    STEP(  68, UINT64_C(0x0000000000384825), 0.340, 0.219850854400000) \
53620-    STEP(  69, UINT64_C(0x00000000003a3aa8), 0.345, 0.227457578418750) \
53621-    STEP(  70, UINT64_C(0x00000000003c340f), 0.350, 0.235169375000000) \
53622-    STEP(  71, UINT64_C(0x00000000003e342b), 0.355, 0.242983540956250) \
53623-    STEP(  72, UINT64_C(0x0000000000403ace), 0.360, 0.250897305600000) \
53624-    STEP(  73, UINT64_C(0x00000000004247c8), 0.365, 0.258907832993750) \
53625-    STEP(  74, UINT64_C(0x0000000000445ae9), 0.370, 0.267012224200000) \
53626-    STEP(  75, UINT64_C(0x0000000000467400), 0.375, 0.275207519531250) \
53627-    STEP(  76, UINT64_C(0x00000000004892d8), 0.380, 0.283490700800000) \
53628-    STEP(  77, UINT64_C(0x00000000004ab740), 0.385, 0.291858693568750) \
53629-    STEP(  78, UINT64_C(0x00000000004ce102), 0.390, 0.300308369400000) \
53630-    STEP(  79, UINT64_C(0x00000000004f0fe9), 0.395, 0.308836548106250) \
53631-    STEP(  80, UINT64_C(0x00000000005143bf), 0.400, 0.317440000000000) \
53632-    STEP(  81, UINT64_C(0x0000000000537c4d), 0.405, 0.326115448143750) \
53633-    STEP(  82, UINT64_C(0x000000000055b95b), 0.410, 0.334859570600000) \
53634-    STEP(  83, UINT64_C(0x000000000057fab1), 0.415, 0.343669002681250) \
53635-    STEP(  84, UINT64_C(0x00000000005a4015), 0.420, 0.352540339200000) \
53636-    STEP(  85, UINT64_C(0x00000000005c894e), 0.425, 0.361470136718750) \
53637-    STEP(  86, UINT64_C(0x00000000005ed622), 0.430, 0.370454915800000) \
53638-    STEP(  87, UINT64_C(0x0000000000612655), 0.435, 0.379491163256250) \
53639-    STEP(  88, UINT64_C(0x00000000006379ac), 0.440, 0.388575334400000) \
53640-    STEP(  89, UINT64_C(0x000000000065cfeb), 0.445, 0.397703855293750) \
53641-    STEP(  90, UINT64_C(0x00000000006828d6), 0.450, 0.406873125000000) \
53642-    STEP(  91, UINT64_C(0x00000000006a842f), 0.455, 0.416079517831250) \
53643-    STEP(  92, UINT64_C(0x00000000006ce1bb), 0.460, 0.425319385600000) \
53644-    STEP(  93, UINT64_C(0x00000000006f413a), 0.465, 0.434589059868750) \
53645-    STEP(  94, UINT64_C(0x000000000071a270), 0.470, 0.443884854200000) \
53646-    STEP(  95, UINT64_C(0x000000000074051d), 0.475, 0.453203066406250) \
53647-    STEP(  96, UINT64_C(0x0000000000766905), 0.480, 0.462539980800000) \
53648-    STEP(  97, UINT64_C(0x000000000078cde7), 0.485, 0.471891870443750) \
53649-    STEP(  98, UINT64_C(0x00000000007b3387), 0.490, 0.481254999400000) \
53650-    STEP(  99, UINT64_C(0x00000000007d99a4), 0.495, 0.490625624981250) \
53651-    STEP( 100, UINT64_C(0x0000000000800000), 0.500, 0.500000000000000) \
53652-    STEP( 101, UINT64_C(0x000000000082665b), 0.505, 0.509374375018750) \
53653-    STEP( 102, UINT64_C(0x000000000084cc78), 0.510, 0.518745000600000) \
53654-    STEP( 103, UINT64_C(0x0000000000873218), 0.515, 0.528108129556250) \
53655-    STEP( 104, UINT64_C(0x00000000008996fa), 0.520, 0.537460019200000) \
53656-    STEP( 105, UINT64_C(0x00000000008bfae2), 0.525, 0.546796933593750) \
53657-    STEP( 106, UINT64_C(0x00000000008e5d8f), 0.530, 0.556115145800000) \
53658-    STEP( 107, UINT64_C(0x000000000090bec5), 0.535, 0.565410940131250) \
53659-    STEP( 108, UINT64_C(0x0000000000931e44), 0.540, 0.574680614400000) \
53660-    STEP( 109, UINT64_C(0x0000000000957bd0), 0.545, 0.583920482168750) \
53661-    STEP( 110, UINT64_C(0x000000000097d729), 0.550, 0.593126875000000) \
53662-    STEP( 111, UINT64_C(0x00000000009a3014), 0.555, 0.602296144706250) \
53663-    STEP( 112, UINT64_C(0x00000000009c8653), 0.560, 0.611424665600000) \
53664-    STEP( 113, UINT64_C(0x00000000009ed9aa), 0.565, 0.620508836743750) \
53665-    STEP( 114, UINT64_C(0x0000000000a129dd), 0.570, 0.629545084200000) \
53666-    STEP( 115, UINT64_C(0x0000000000a376b1), 0.575, 0.638529863281250) \
53667-    STEP( 116, UINT64_C(0x0000000000a5bfea), 0.580, 0.647459660800000) \
53668-    STEP( 117, UINT64_C(0x0000000000a8054e), 0.585, 0.656330997318750) \
53669-    STEP( 118, UINT64_C(0x0000000000aa46a4), 0.590, 0.665140429400000) \
53670-    STEP( 119, UINT64_C(0x0000000000ac83b2), 0.595, 0.673884551856250) \
53671-    STEP( 120, UINT64_C(0x0000000000aebc40), 0.600, 0.682560000000000) \
53672-    STEP( 121, UINT64_C(0x0000000000b0f016), 0.605, 0.691163451893750) \
53673-    STEP( 122, UINT64_C(0x0000000000b31efd), 0.610, 0.699691630600000) \
53674-    STEP( 123, UINT64_C(0x0000000000b548bf), 0.615, 0.708141306431250) \
53675-    STEP( 124, UINT64_C(0x0000000000b76d27), 0.620, 0.716509299200000) \
53676-    STEP( 125, UINT64_C(0x0000000000b98c00), 0.625, 0.724792480468750) \
53677-    STEP( 126, UINT64_C(0x0000000000bba516), 0.630, 0.732987775800000) \
53678-    STEP( 127, UINT64_C(0x0000000000bdb837), 0.635, 0.741092167006250) \
53679-    STEP( 128, UINT64_C(0x0000000000bfc531), 0.640, 0.749102694400000) \
53680-    STEP( 129, UINT64_C(0x0000000000c1cbd4), 0.645, 0.757016459043750) \
53681-    STEP( 130, UINT64_C(0x0000000000c3cbf0), 0.650, 0.764830625000000) \
53682-    STEP( 131, UINT64_C(0x0000000000c5c557), 0.655, 0.772542421581250) \
53683-    STEP( 132, UINT64_C(0x0000000000c7b7da), 0.660, 0.780149145600000) \
53684-    STEP( 133, UINT64_C(0x0000000000c9a34f), 0.665, 0.787648163618750) \
53685-    STEP( 134, UINT64_C(0x0000000000cb878a), 0.670, 0.795036914200000) \
53686-    STEP( 135, UINT64_C(0x0000000000cd6460), 0.675, 0.802312910156250) \
53687-    STEP( 136, UINT64_C(0x0000000000cf39ab), 0.680, 0.809473740800000) \
53688-    STEP( 137, UINT64_C(0x0000000000d10743), 0.685, 0.816517074193750) \
53689-    STEP( 138, UINT64_C(0x0000000000d2cd01), 0.690, 0.823440659400000) \
53690-    STEP( 139, UINT64_C(0x0000000000d48ac2), 0.695, 0.830242328731250) \
53691-    STEP( 140, UINT64_C(0x0000000000d64063), 0.700, 0.836920000000000) \
53692-    STEP( 141, UINT64_C(0x0000000000d7edc2), 0.705, 0.843471678768750) \
53693-    STEP( 142, UINT64_C(0x0000000000d992bf), 0.710, 0.849895460600000) \
53694-    STEP( 143, UINT64_C(0x0000000000db2f3c), 0.715, 0.856189533306250) \
53695-    STEP( 144, UINT64_C(0x0000000000dcc31c), 0.720, 0.862352179200000) \
53696-    STEP( 145, UINT64_C(0x0000000000de4e44), 0.725, 0.868381777343750) \
53697-    STEP( 146, UINT64_C(0x0000000000dfd09a), 0.730, 0.874276805800000) \
53698-    STEP( 147, UINT64_C(0x0000000000e14a07), 0.735, 0.880035843881250) \
53699-    STEP( 148, UINT64_C(0x0000000000e2ba74), 0.740, 0.885657574400000) \
53700-    STEP( 149, UINT64_C(0x0000000000e421cd), 0.745, 0.891140785918750) \
53701-    STEP( 150, UINT64_C(0x0000000000e58000), 0.750, 0.896484375000000) \
53702-    STEP( 151, UINT64_C(0x0000000000e6d4fb), 0.755, 0.901687348456250) \
53703-    STEP( 152, UINT64_C(0x0000000000e820b0), 0.760, 0.906748825600000) \
53704-    STEP( 153, UINT64_C(0x0000000000e96313), 0.765, 0.911668040493750) \
53705-    STEP( 154, UINT64_C(0x0000000000ea9c18), 0.770, 0.916444344200000) \
53706-    STEP( 155, UINT64_C(0x0000000000ebcbb7), 0.775, 0.921077207031250) \
53707-    STEP( 156, UINT64_C(0x0000000000ecf1e8), 0.780, 0.925566220800000) \
53708-    STEP( 157, UINT64_C(0x0000000000ee0ea7), 0.785, 0.929911101068750) \
53709-    STEP( 158, UINT64_C(0x0000000000ef21f1), 0.790, 0.934111689400000) \
53710-    STEP( 159, UINT64_C(0x0000000000f02bc6), 0.795, 0.938167955606250) \
53711-    STEP( 160, UINT64_C(0x0000000000f12c27), 0.800, 0.942080000000000) \
53712-    STEP( 161, UINT64_C(0x0000000000f22319), 0.805, 0.945848055643750) \
53713-    STEP( 162, UINT64_C(0x0000000000f310a1), 0.810, 0.949472490600000) \
53714-    STEP( 163, UINT64_C(0x0000000000f3f4c7), 0.815, 0.952953810181250) \
53715-    STEP( 164, UINT64_C(0x0000000000f4cf98), 0.820, 0.956292659200000) \
53716-    STEP( 165, UINT64_C(0x0000000000f5a120), 0.825, 0.959489824218750) \
53717-    STEP( 166, UINT64_C(0x0000000000f6696e), 0.830, 0.962546235800000) \
53718-    STEP( 167, UINT64_C(0x0000000000f72894), 0.835, 0.965462970756250) \
53719-    STEP( 168, UINT64_C(0x0000000000f7dea8), 0.840, 0.968241254400000) \
53720-    STEP( 169, UINT64_C(0x0000000000f88bc0), 0.845, 0.970882462793750) \
53721-    STEP( 170, UINT64_C(0x0000000000f92ff6), 0.850, 0.973388125000000) \
53722-    STEP( 171, UINT64_C(0x0000000000f9cb67), 0.855, 0.975759925331250) \
53723-    STEP( 172, UINT64_C(0x0000000000fa5e30), 0.860, 0.977999705600000) \
53724-    STEP( 173, UINT64_C(0x0000000000fae874), 0.865, 0.980109467368750) \
53725-    STEP( 174, UINT64_C(0x0000000000fb6a57), 0.870, 0.982091374200000) \
53726-    STEP( 175, UINT64_C(0x0000000000fbe400), 0.875, 0.983947753906250) \
53727-    STEP( 176, UINT64_C(0x0000000000fc5598), 0.880, 0.985681100800000) \
53728-    STEP( 177, UINT64_C(0x0000000000fcbf4e), 0.885, 0.987294077943750) \
53729-    STEP( 178, UINT64_C(0x0000000000fd214f), 0.890, 0.988789519400000) \
53730-    STEP( 179, UINT64_C(0x0000000000fd7bcf), 0.895, 0.990170432481250) \
53731-    STEP( 180, UINT64_C(0x0000000000fdcf03), 0.900, 0.991440000000000) \
53732-    STEP( 181, UINT64_C(0x0000000000fe1b23), 0.905, 0.992601582518750) \
53733-    STEP( 182, UINT64_C(0x0000000000fe606a), 0.910, 0.993658720600000) \
53734-    STEP( 183, UINT64_C(0x0000000000fe9f18), 0.915, 0.994615137056250) \
53735-    STEP( 184, UINT64_C(0x0000000000fed76e), 0.920, 0.995474739200000) \
53736-    STEP( 185, UINT64_C(0x0000000000ff09b0), 0.925, 0.996241621093750) \
53737-    STEP( 186, UINT64_C(0x0000000000ff3627), 0.930, 0.996920065800000) \
53738-    STEP( 187, UINT64_C(0x0000000000ff5d1d), 0.935, 0.997514547631250) \
53739-    STEP( 188, UINT64_C(0x0000000000ff7ee0), 0.940, 0.998029734400000) \
53740-    STEP( 189, UINT64_C(0x0000000000ff9bc3), 0.945, 0.998470489668750) \
53741-    STEP( 190, UINT64_C(0x0000000000ffb419), 0.950, 0.998841875000000) \
53742-    STEP( 191, UINT64_C(0x0000000000ffc83d), 0.955, 0.999149152206250) \
53743-    STEP( 192, UINT64_C(0x0000000000ffd888), 0.960, 0.999397785600000) \
53744-    STEP( 193, UINT64_C(0x0000000000ffe55b), 0.965, 0.999593444243750) \
53745-    STEP( 194, UINT64_C(0x0000000000ffef17), 0.970, 0.999742004200000) \
53746-    STEP( 195, UINT64_C(0x0000000000fff623), 0.975, 0.999849550781250) \
53747-    STEP( 196, UINT64_C(0x0000000000fffae9), 0.980, 0.999922380800000) \
53748-    STEP( 197, UINT64_C(0x0000000000fffdd6), 0.985, 0.999967004818750) \
53749-    STEP( 198, UINT64_C(0x0000000000ffff5a), 0.990, 0.999990149400000) \
53750-    STEP( 199, UINT64_C(0x0000000000ffffeb), 0.995, 0.999998759356250) \
53751-    STEP( 200, UINT64_C(0x0000000001000000), 1.000, 1.000000000000000) \
53752-
53753-#endif /* JEMALLOC_INTERNAL_SMOOTHSTEP_H */
53754diff --git a/jemalloc/include/jemalloc/internal/smoothstep.sh b/jemalloc/include/jemalloc/internal/smoothstep.sh
53755deleted file mode 100755
53756index 65de97b..0000000
53757--- a/jemalloc/include/jemalloc/internal/smoothstep.sh
53758+++ /dev/null
53759@@ -1,101 +0,0 @@
53760-#!/bin/sh
53761-#
53762-# Generate a discrete lookup table for a sigmoid function in the smoothstep
53763-# family (https://en.wikipedia.org/wiki/Smoothstep), where the lookup table
53764-# entries correspond to x in [1/nsteps, 2/nsteps, ..., nsteps/nsteps].  Encode
53765-# the entries using a binary fixed point representation.
53766-#
53767-# Usage: smoothstep.sh <variant> <nsteps> <bfp> <xprec> <yprec>
53768-#
53769-#        <variant> is in {smooth, smoother, smoothest}.
53770-#        <nsteps> must be greater than zero.
53771-#        <bfp> must be in [0..62]; reasonable values are roughly [10..30].
53772-#        <xprec> is x decimal precision.
53773-#        <yprec> is y decimal precision.
53774-
53775-#set -x
53776-
53777-cmd="sh smoothstep.sh $*"
53778-variant=$1
53779-nsteps=$2
53780-bfp=$3
53781-xprec=$4
53782-yprec=$5
53783-
53784-case "${variant}" in
53785-  smooth)
53786-    ;;
53787-  smoother)
53788-    ;;
53789-  smoothest)
53790-    ;;
53791-  *)
53792-    echo "Unsupported variant"
53793-    exit 1
53794-    ;;
53795-esac
53796-
53797-smooth() {
53798-  step=$1
53799-  y=`echo ${yprec} k ${step} ${nsteps} / sx _2 lx 3 ^ '*' 3 lx 2 ^ '*' + p | dc | tr -d '\\\\\n' | sed -e 's#^\.#0.#g'`
53800-  h=`echo ${yprec} k 2 ${bfp} ^ ${y} '*' p | dc | tr -d '\\\\\n' | sed -e 's#^\.#0.#g' | tr '.' ' ' | awk '{print $1}' `
53801-}
53802-
53803-smoother() {
53804-  step=$1
53805-  y=`echo ${yprec} k ${step} ${nsteps} / sx 6 lx 5 ^ '*' _15 lx 4 ^ '*' + 10 lx 3 ^ '*' + p | dc | tr -d '\\\\\n' | sed -e 's#^\.#0.#g'`
53806-  h=`echo ${yprec} k 2 ${bfp} ^ ${y} '*' p | dc | tr -d '\\\\\n' | sed -e 's#^\.#0.#g' | tr '.' ' ' | awk '{print $1}' `
53807-}
53808-
53809-smoothest() {
53810-  step=$1
53811-  y=`echo ${yprec} k ${step} ${nsteps} / sx _20 lx 7 ^ '*' 70 lx 6 ^ '*' + _84 lx 5 ^ '*' + 35 lx 4 ^ '*' + p | dc | tr -d '\\\\\n' | sed -e 's#^\.#0.#g'`
53812-  h=`echo ${yprec} k 2 ${bfp} ^ ${y} '*' p | dc | tr -d '\\\\\n' | sed -e 's#^\.#0.#g' | tr '.' ' ' | awk '{print $1}' `
53813-}
53814-
53815-cat <<EOF
53816-#ifndef JEMALLOC_INTERNAL_SMOOTHSTEP_H
53817-#define JEMALLOC_INTERNAL_SMOOTHSTEP_H
53818-
53819-/*
53820- * This file was generated by the following command:
53821- *   $cmd
53822- */
53823-/******************************************************************************/
53824-
53825-/*
53826- * This header defines a precomputed table based on the smoothstep family of
53827- * sigmoidal curves (https://en.wikipedia.org/wiki/Smoothstep) that grow from 0
53828- * to 1 in 0 <= x <= 1.  The table is stored as integer fixed point values so
53829- * that floating point math can be avoided.
53830- *
53831- *                      3     2
53832- *   smoothstep(x) = -2x  + 3x
53833- *
53834- *                       5      4      3
53835- *   smootherstep(x) = 6x  - 15x  + 10x
53836- *
53837- *                          7      6      5      4
53838- *   smootheststep(x) = -20x  + 70x  - 84x  + 35x
53839- */
53840-
53841-#define SMOOTHSTEP_VARIANT	"${variant}"
53842-#define SMOOTHSTEP_NSTEPS	${nsteps}
53843-#define SMOOTHSTEP_BFP		${bfp}
53844-#define SMOOTHSTEP \\
53845- /* STEP(step, h,                            x,     y) */ \\
53846-EOF
53847-
53848-s=1
53849-while [ $s -le $nsteps ] ; do
53850-  $variant ${s}
53851-  x=`echo ${xprec} k ${s} ${nsteps} / p | dc | tr -d '\\\\\n' | sed -e 's#^\.#0.#g'`
53852-  printf '    STEP(%4d, UINT64_C(0x%016x), %s, %s) \\\n' ${s} ${h} ${x} ${y}
53853-
53854-  s=$((s+1))
53855-done
53856-echo
53857-
53858-cat <<EOF
53859-#endif /* JEMALLOC_INTERNAL_SMOOTHSTEP_H */
53860-EOF
53861diff --git a/jemalloc/include/jemalloc/internal/spin.h b/jemalloc/include/jemalloc/internal/spin.h
53862deleted file mode 100644
53863index 22804c6..0000000
53864--- a/jemalloc/include/jemalloc/internal/spin.h
53865+++ /dev/null
53866@@ -1,40 +0,0 @@
53867-#ifndef JEMALLOC_INTERNAL_SPIN_H
53868-#define JEMALLOC_INTERNAL_SPIN_H
53869-
53870-#define SPIN_INITIALIZER {0U}
53871-
53872-typedef struct {
53873-	unsigned iteration;
53874-} spin_t;
53875-
53876-static inline void
53877-spin_cpu_spinwait() {
53878-#  if HAVE_CPU_SPINWAIT
53879-	CPU_SPINWAIT;
53880-#  else
53881-	volatile int x = 0;
53882-	x = x;
53883-#  endif
53884-}
53885-
53886-static inline void
53887-spin_adaptive(spin_t *spin) {
53888-	volatile uint32_t i;
53889-
53890-	if (spin->iteration < 5) {
53891-		for (i = 0; i < (1U << spin->iteration); i++) {
53892-			spin_cpu_spinwait();
53893-		}
53894-		spin->iteration++;
53895-	} else {
53896-#ifdef _WIN32
53897-		SwitchToThread();
53898-#else
53899-		sched_yield();
53900-#endif
53901-	}
53902-}
53903-
53904-#undef SPIN_INLINE
53905-
53906-#endif /* JEMALLOC_INTERNAL_SPIN_H */
53907diff --git a/jemalloc/include/jemalloc/internal/stats.h b/jemalloc/include/jemalloc/internal/stats.h
53908deleted file mode 100644
53909index 727f7dc..0000000
53910--- a/jemalloc/include/jemalloc/internal/stats.h
53911+++ /dev/null
53912@@ -1,54 +0,0 @@
53913-#ifndef JEMALLOC_INTERNAL_STATS_H
53914-#define JEMALLOC_INTERNAL_STATS_H
53915-
53916-/*  OPTION(opt,		var_name,	default,	set_value_to) */
53917-#define STATS_PRINT_OPTIONS						\
53918-    OPTION('J',		json,		false,		true)		\
53919-    OPTION('g',		general,	true,		false)		\
53920-    OPTION('m',		merged,		config_stats,	false)		\
53921-    OPTION('d',		destroyed,	config_stats,	false)		\
53922-    OPTION('a',		unmerged,	config_stats,	false)		\
53923-    OPTION('b',		bins,		true,		false)		\
53924-    OPTION('l',		large,		true,		false)		\
53925-    OPTION('x',		mutex,		true,		false)		\
53926-    OPTION('e',		extents,	true,		false)		\
53927-    OPTION('h',		hpa,		config_stats,	false)
53928-
53929-enum {
53930-#define OPTION(o, v, d, s) stats_print_option_num_##v,
53931-    STATS_PRINT_OPTIONS
53932-#undef OPTION
53933-    stats_print_tot_num_options
53934-};
53935-
53936-/* Options for stats_print. */
53937-extern bool opt_stats_print;
53938-extern char opt_stats_print_opts[stats_print_tot_num_options+1];
53939-
53940-/* Utilities for stats_interval. */
53941-extern int64_t opt_stats_interval;
53942-extern char opt_stats_interval_opts[stats_print_tot_num_options+1];
53943-
53944-#define STATS_INTERVAL_DEFAULT -1
53945-/*
53946- * Batch-increment the counter to reduce synchronization overhead.  Each thread
53947- * merges after (interval >> LG_BATCH_SIZE) bytes of allocations; also limit the
53948- * BATCH_MAX for accuracy when the interval is huge (which is expected).
53949- */
53950-#define STATS_INTERVAL_ACCUM_LG_BATCH_SIZE 6
53951-#define STATS_INTERVAL_ACCUM_BATCH_MAX (4 << 20)
53952-
53953-/* Only accessed by thread event. */
53954-uint64_t stats_interval_new_event_wait(tsd_t *tsd);
53955-uint64_t stats_interval_postponed_event_wait(tsd_t *tsd);
53956-void stats_interval_event_handler(tsd_t *tsd, uint64_t elapsed);
53957-
53958-/* Implements je_malloc_stats_print. */
53959-void stats_print(write_cb_t *write_cb, void *cbopaque, const char *opts);
53960-
53961-bool stats_boot(void);
53962-void stats_prefork(tsdn_t *tsdn);
53963-void stats_postfork_parent(tsdn_t *tsdn);
53964-void stats_postfork_child(tsdn_t *tsdn);
53965-
53966-#endif /* JEMALLOC_INTERNAL_STATS_H */
53967diff --git a/jemalloc/include/jemalloc/internal/sz.h b/jemalloc/include/jemalloc/internal/sz.h
53968deleted file mode 100644
53969index 3c0fc1d..0000000
53970--- a/jemalloc/include/jemalloc/internal/sz.h
53971+++ /dev/null
53972@@ -1,371 +0,0 @@
53973-#ifndef JEMALLOC_INTERNAL_SIZE_H
53974-#define JEMALLOC_INTERNAL_SIZE_H
53975-
53976-#include "jemalloc/internal/bit_util.h"
53977-#include "jemalloc/internal/pages.h"
53978-#include "jemalloc/internal/sc.h"
53979-#include "jemalloc/internal/util.h"
53980-
53981-/*
53982- * sz module: Size computations.
53983- *
53984- * Some abbreviations used here:
53985- *   p: Page
53986- *   ind: Index
53987- *   s, sz: Size
53988- *   u: Usable size
53989- *   a: Aligned
53990- *
53991- * These are not always used completely consistently, but should be enough to
53992- * interpret function names.  E.g. sz_psz2ind converts page size to page size
53993- * index; sz_sa2u converts a (size, alignment) allocation request to the usable
53994- * size that would result from such an allocation.
53995- */
53996-
53997-/* Page size index type. */
53998-typedef unsigned pszind_t;
53999-
54000-/* Size class index type. */
54001-typedef unsigned szind_t;
54002-
54003-/*
54004- * sz_pind2sz_tab encodes the same information as could be computed by
54005- * sz_pind2sz_compute().
54006- */
54007-extern size_t sz_pind2sz_tab[SC_NPSIZES + 1];
54008-/*
54009- * sz_index2size_tab encodes the same information as could be computed (at
54010- * unacceptable cost in some code paths) by sz_index2size_compute().
54011- */
54012-extern size_t sz_index2size_tab[SC_NSIZES];
54013-/*
54014- * sz_size2index_tab is a compact lookup table that rounds request sizes up to
54015- * size classes.  In order to reduce cache footprint, the table is compressed,
54016- * and all accesses are via sz_size2index().
54017- */
54018-extern uint8_t sz_size2index_tab[];
54019-
54020-/*
54021- * Padding for large allocations: PAGE when opt_cache_oblivious == true (to
54022- * enable cache index randomization); 0 otherwise.
54023- */
54024-extern size_t sz_large_pad;
54025-
54026-extern void sz_boot(const sc_data_t *sc_data, bool cache_oblivious);
54027-
54028-JEMALLOC_ALWAYS_INLINE pszind_t
54029-sz_psz2ind(size_t psz) {
54030-	assert(psz > 0);
54031-	if (unlikely(psz > SC_LARGE_MAXCLASS)) {
54032-		return SC_NPSIZES;
54033-	}
54034-	/* x is the lg of the first base >= psz. */
54035-	pszind_t x = lg_ceil(psz);
54036-	/*
54037-	 * sc.h introduces a lot of size classes. These size classes are divided
54038-	 * into different size class groups. There is a very special size class
54039-	 * group, each size class in or after it is an integer multiple of PAGE.
54040-	 * We call it first_ps_rg. It means first page size regular group. The
54041-	 * range of first_ps_rg is (base, base * 2], and base == PAGE *
54042-	 * SC_NGROUP. off_to_first_ps_rg begins from 1, instead of 0. e.g.
54043-	 * off_to_first_ps_rg is 1 when psz is (PAGE * SC_NGROUP + 1).
54044-	 */
54045-	pszind_t off_to_first_ps_rg = (x < SC_LG_NGROUP + LG_PAGE) ?
54046-	    0 : x - (SC_LG_NGROUP + LG_PAGE);
54047-
54048-	/*
54049-	 * Same as sc_s::lg_delta.
54050-	 * Delta for off_to_first_ps_rg == 1 is PAGE,
54051-	 * for each increase in offset, it's multiplied by two.
54052-	 * Therefore, lg_delta = LG_PAGE + (off_to_first_ps_rg - 1).
54053-	 */
54054-	pszind_t lg_delta = (off_to_first_ps_rg == 0) ?
54055-	    LG_PAGE : LG_PAGE + (off_to_first_ps_rg - 1);
54056-
54057-	/*
54058-	 * Let's write psz in binary, e.g. 0011 for 0x3, 0111 for 0x7.
54059-	 * The leftmost bits whose len is lg_base decide the base of psz.
54060-	 * The rightmost bits whose len is lg_delta decide (pgz % PAGE).
54061-	 * The middle bits whose len is SC_LG_NGROUP decide ndelta.
54062-	 * ndelta is offset to the first size class in the size class group,
54063-	 * starts from 1.
54064-	 * If you don't know lg_base, ndelta or lg_delta, see sc.h.
54065-	 * |xxxxxxxxxxxxxxxxxxxx|------------------------|yyyyyyyyyyyyyyyyyyyyy|
54066-	 * |<-- len: lg_base -->|<-- len: SC_LG_NGROUP-->|<-- len: lg_delta -->|
54067-	 *                      |<--      ndelta      -->|
54068-	 * rg_inner_off = ndelta - 1
54069-	 * Why use (psz - 1)?
54070-	 * To handle case: psz % (1 << lg_delta) == 0.
54071-	 */
54072-	pszind_t rg_inner_off = (((psz - 1)) >> lg_delta) & (SC_NGROUP - 1);
54073-
54074-	pszind_t base_ind = off_to_first_ps_rg << SC_LG_NGROUP;
54075-	pszind_t ind = base_ind + rg_inner_off;
54076-	return ind;
54077-}
54078-
54079-static inline size_t
54080-sz_pind2sz_compute(pszind_t pind) {
54081-	if (unlikely(pind == SC_NPSIZES)) {
54082-		return SC_LARGE_MAXCLASS + PAGE;
54083-	}
54084-	size_t grp = pind >> SC_LG_NGROUP;
54085-	size_t mod = pind & ((ZU(1) << SC_LG_NGROUP) - 1);
54086-
54087-	size_t grp_size_mask = ~((!!grp)-1);
54088-	size_t grp_size = ((ZU(1) << (LG_PAGE + (SC_LG_NGROUP-1))) << grp)
54089-	    & grp_size_mask;
54090-
54091-	size_t shift = (grp == 0) ? 1 : grp;
54092-	size_t lg_delta = shift + (LG_PAGE-1);
54093-	size_t mod_size = (mod+1) << lg_delta;
54094-
54095-	size_t sz = grp_size + mod_size;
54096-	return sz;
54097-}
54098-
54099-static inline size_t
54100-sz_pind2sz_lookup(pszind_t pind) {
54101-	size_t ret = (size_t)sz_pind2sz_tab[pind];
54102-	assert(ret == sz_pind2sz_compute(pind));
54103-	return ret;
54104-}
54105-
54106-static inline size_t
54107-sz_pind2sz(pszind_t pind) {
54108-	assert(pind < SC_NPSIZES + 1);
54109-	return sz_pind2sz_lookup(pind);
54110-}
54111-
54112-static inline size_t
54113-sz_psz2u(size_t psz) {
54114-	if (unlikely(psz > SC_LARGE_MAXCLASS)) {
54115-		return SC_LARGE_MAXCLASS + PAGE;
54116-	}
54117-	size_t x = lg_floor((psz<<1)-1);
54118-	size_t lg_delta = (x < SC_LG_NGROUP + LG_PAGE + 1) ?
54119-	    LG_PAGE : x - SC_LG_NGROUP - 1;
54120-	size_t delta = ZU(1) << lg_delta;
54121-	size_t delta_mask = delta - 1;
54122-	size_t usize = (psz + delta_mask) & ~delta_mask;
54123-	return usize;
54124-}
54125-
54126-static inline szind_t
54127-sz_size2index_compute(size_t size) {
54128-	if (unlikely(size > SC_LARGE_MAXCLASS)) {
54129-		return SC_NSIZES;
54130-	}
54131-
54132-	if (size == 0) {
54133-		return 0;
54134-	}
54135-#if (SC_NTINY != 0)
54136-	if (size <= (ZU(1) << SC_LG_TINY_MAXCLASS)) {
54137-		szind_t lg_tmin = SC_LG_TINY_MAXCLASS - SC_NTINY + 1;
54138-		szind_t lg_ceil = lg_floor(pow2_ceil_zu(size));
54139-		return (lg_ceil < lg_tmin ? 0 : lg_ceil - lg_tmin);
54140-	}
54141-#endif
54142-	{
54143-		szind_t x = lg_floor((size<<1)-1);
54144-		szind_t shift = (x < SC_LG_NGROUP + LG_QUANTUM) ? 0 :
54145-		    x - (SC_LG_NGROUP + LG_QUANTUM);
54146-		szind_t grp = shift << SC_LG_NGROUP;
54147-
54148-		szind_t lg_delta = (x < SC_LG_NGROUP + LG_QUANTUM + 1)
54149-		    ? LG_QUANTUM : x - SC_LG_NGROUP - 1;
54150-
54151-		size_t delta_inverse_mask = ZU(-1) << lg_delta;
54152-		szind_t mod = ((((size-1) & delta_inverse_mask) >> lg_delta)) &
54153-		    ((ZU(1) << SC_LG_NGROUP) - 1);
54154-
54155-		szind_t index = SC_NTINY + grp + mod;
54156-		return index;
54157-	}
54158-}
54159-
54160-JEMALLOC_ALWAYS_INLINE szind_t
54161-sz_size2index_lookup_impl(size_t size) {
54162-	assert(size <= SC_LOOKUP_MAXCLASS);
54163-	return sz_size2index_tab[(size + (ZU(1) << SC_LG_TINY_MIN) - 1)
54164-	    >> SC_LG_TINY_MIN];
54165-}
54166-
54167-JEMALLOC_ALWAYS_INLINE szind_t
54168-sz_size2index_lookup(size_t size) {
54169-	szind_t ret = sz_size2index_lookup_impl(size);
54170-	assert(ret == sz_size2index_compute(size));
54171-	return ret;
54172-}
54173-
54174-JEMALLOC_ALWAYS_INLINE szind_t
54175-sz_size2index(size_t size) {
54176-	if (likely(size <= SC_LOOKUP_MAXCLASS)) {
54177-		return sz_size2index_lookup(size);
54178-	}
54179-	return sz_size2index_compute(size);
54180-}
54181-
54182-static inline size_t
54183-sz_index2size_compute(szind_t index) {
54184-#if (SC_NTINY > 0)
54185-	if (index < SC_NTINY) {
54186-		return (ZU(1) << (SC_LG_TINY_MAXCLASS - SC_NTINY + 1 + index));
54187-	}
54188-#endif
54189-	{
54190-		size_t reduced_index = index - SC_NTINY;
54191-		size_t grp = reduced_index >> SC_LG_NGROUP;
54192-		size_t mod = reduced_index & ((ZU(1) << SC_LG_NGROUP) -
54193-		    1);
54194-
54195-		size_t grp_size_mask = ~((!!grp)-1);
54196-		size_t grp_size = ((ZU(1) << (LG_QUANTUM +
54197-		    (SC_LG_NGROUP-1))) << grp) & grp_size_mask;
54198-
54199-		size_t shift = (grp == 0) ? 1 : grp;
54200-		size_t lg_delta = shift + (LG_QUANTUM-1);
54201-		size_t mod_size = (mod+1) << lg_delta;
54202-
54203-		size_t usize = grp_size + mod_size;
54204-		return usize;
54205-	}
54206-}
54207-
54208-JEMALLOC_ALWAYS_INLINE size_t
54209-sz_index2size_lookup_impl(szind_t index) {
54210-	return sz_index2size_tab[index];
54211-}
54212-
54213-JEMALLOC_ALWAYS_INLINE size_t
54214-sz_index2size_lookup(szind_t index) {
54215-	size_t ret = sz_index2size_lookup_impl(index);
54216-	assert(ret == sz_index2size_compute(index));
54217-	return ret;
54218-}
54219-
54220-JEMALLOC_ALWAYS_INLINE size_t
54221-sz_index2size(szind_t index) {
54222-	assert(index < SC_NSIZES);
54223-	return sz_index2size_lookup(index);
54224-}
54225-
54226-JEMALLOC_ALWAYS_INLINE void
54227-sz_size2index_usize_fastpath(size_t size, szind_t *ind, size_t *usize) {
54228-	*ind = sz_size2index_lookup_impl(size);
54229-	*usize = sz_index2size_lookup_impl(*ind);
54230-}
54231-
54232-JEMALLOC_ALWAYS_INLINE size_t
54233-sz_s2u_compute(size_t size) {
54234-	if (unlikely(size > SC_LARGE_MAXCLASS)) {
54235-		return 0;
54236-	}
54237-
54238-	if (size == 0) {
54239-		size++;
54240-	}
54241-#if (SC_NTINY > 0)
54242-	if (size <= (ZU(1) << SC_LG_TINY_MAXCLASS)) {
54243-		size_t lg_tmin = SC_LG_TINY_MAXCLASS - SC_NTINY + 1;
54244-		size_t lg_ceil = lg_floor(pow2_ceil_zu(size));
54245-		return (lg_ceil < lg_tmin ? (ZU(1) << lg_tmin) :
54246-		    (ZU(1) << lg_ceil));
54247-	}
54248-#endif
54249-	{
54250-		size_t x = lg_floor((size<<1)-1);
54251-		size_t lg_delta = (x < SC_LG_NGROUP + LG_QUANTUM + 1)
54252-		    ?  LG_QUANTUM : x - SC_LG_NGROUP - 1;
54253-		size_t delta = ZU(1) << lg_delta;
54254-		size_t delta_mask = delta - 1;
54255-		size_t usize = (size + delta_mask) & ~delta_mask;
54256-		return usize;
54257-	}
54258-}
54259-
54260-JEMALLOC_ALWAYS_INLINE size_t
54261-sz_s2u_lookup(size_t size) {
54262-	size_t ret = sz_index2size_lookup(sz_size2index_lookup(size));
54263-
54264-	assert(ret == sz_s2u_compute(size));
54265-	return ret;
54266-}
54267-
54268-/*
54269- * Compute usable size that would result from allocating an object with the
54270- * specified size.
54271- */
54272-JEMALLOC_ALWAYS_INLINE size_t
54273-sz_s2u(size_t size) {
54274-	if (likely(size <= SC_LOOKUP_MAXCLASS)) {
54275-		return sz_s2u_lookup(size);
54276-	}
54277-	return sz_s2u_compute(size);
54278-}
54279-
54280-/*
54281- * Compute usable size that would result from allocating an object with the
54282- * specified size and alignment.
54283- */
54284-JEMALLOC_ALWAYS_INLINE size_t
54285-sz_sa2u(size_t size, size_t alignment) {
54286-	size_t usize;
54287-
54288-	assert(alignment != 0 && ((alignment - 1) & alignment) == 0);
54289-
54290-	/* Try for a small size class. */
54291-	if (size <= SC_SMALL_MAXCLASS && alignment <= PAGE) {
54292-		/*
54293-		 * Round size up to the nearest multiple of alignment.
54294-		 *
54295-		 * This done, we can take advantage of the fact that for each
54296-		 * small size class, every object is aligned at the smallest
54297-		 * power of two that is non-zero in the base two representation
54298-		 * of the size.  For example:
54299-		 *
54300-		 *   Size |   Base 2 | Minimum alignment
54301-		 *   -----+----------+------------------
54302-		 *     96 |  1100000 |  32
54303-		 *    144 | 10100000 |  32
54304-		 *    192 | 11000000 |  64
54305-		 */
54306-		usize = sz_s2u(ALIGNMENT_CEILING(size, alignment));
54307-		if (usize < SC_LARGE_MINCLASS) {
54308-			return usize;
54309-		}
54310-	}
54311-
54312-	/* Large size class.  Beware of overflow. */
54313-
54314-	if (unlikely(alignment > SC_LARGE_MAXCLASS)) {
54315-		return 0;
54316-	}
54317-
54318-	/* Make sure result is a large size class. */
54319-	if (size <= SC_LARGE_MINCLASS) {
54320-		usize = SC_LARGE_MINCLASS;
54321-	} else {
54322-		usize = sz_s2u(size);
54323-		if (usize < size) {
54324-			/* size_t overflow. */
54325-			return 0;
54326-		}
54327-	}
54328-
54329-	/*
54330-	 * Calculate the multi-page mapping that large_palloc() would need in
54331-	 * order to guarantee the alignment.
54332-	 */
54333-	if (usize + sz_large_pad + PAGE_CEILING(alignment) - PAGE < usize) {
54334-		/* size_t overflow. */
54335-		return 0;
54336-	}
54337-	return usize;
54338-}
54339-
54340-size_t sz_psz_quantize_floor(size_t size);
54341-size_t sz_psz_quantize_ceil(size_t size);
54342-
54343-#endif /* JEMALLOC_INTERNAL_SIZE_H */
54344diff --git a/jemalloc/include/jemalloc/internal/tcache_externs.h b/jemalloc/include/jemalloc/internal/tcache_externs.h
54345deleted file mode 100644
54346index a2ab710..0000000
54347--- a/jemalloc/include/jemalloc/internal/tcache_externs.h
54348+++ /dev/null
54349@@ -1,75 +0,0 @@
54350-#ifndef JEMALLOC_INTERNAL_TCACHE_EXTERNS_H
54351-#define JEMALLOC_INTERNAL_TCACHE_EXTERNS_H
54352-
54353-extern bool opt_tcache;
54354-extern size_t opt_tcache_max;
54355-extern ssize_t	opt_lg_tcache_nslots_mul;
54356-extern unsigned opt_tcache_nslots_small_min;
54357-extern unsigned opt_tcache_nslots_small_max;
54358-extern unsigned opt_tcache_nslots_large;
54359-extern ssize_t opt_lg_tcache_shift;
54360-extern size_t opt_tcache_gc_incr_bytes;
54361-extern size_t opt_tcache_gc_delay_bytes;
54362-extern unsigned opt_lg_tcache_flush_small_div;
54363-extern unsigned opt_lg_tcache_flush_large_div;
54364-
54365-/*
54366- * Number of tcache bins.  There are SC_NBINS small-object bins, plus 0 or more
54367- * large-object bins.
54368- */
54369-extern unsigned	nhbins;
54370-
54371-/* Maximum cached size class. */
54372-extern size_t	tcache_maxclass;
54373-
54374-extern cache_bin_info_t *tcache_bin_info;
54375-
54376-/*
54377- * Explicit tcaches, managed via the tcache.{create,flush,destroy} mallctls and
54378- * usable via the MALLOCX_TCACHE() flag.  The automatic per thread tcaches are
54379- * completely disjoint from this data structure.  tcaches starts off as a sparse
54380- * array, so it has no physical memory footprint until individual pages are
54381- * touched.  This allows the entire array to be allocated the first time an
54382- * explicit tcache is created without a disproportionate impact on memory usage.
54383- */
54384-extern tcaches_t	*tcaches;
54385-
54386-size_t tcache_salloc(tsdn_t *tsdn, const void *ptr);
54387-void *tcache_alloc_small_hard(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache,
54388-    cache_bin_t *tbin, szind_t binind, bool *tcache_success);
54389-
54390-void tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, cache_bin_t *tbin,
54391-    szind_t binind, unsigned rem);
54392-void tcache_bin_flush_large(tsd_t *tsd, tcache_t *tcache, cache_bin_t *tbin,
54393-    szind_t binind, unsigned rem);
54394-void tcache_bin_flush_stashed(tsd_t *tsd, tcache_t *tcache, cache_bin_t *bin,
54395-    szind_t binind, bool is_small);
54396-void tcache_arena_reassociate(tsdn_t *tsdn, tcache_slow_t *tcache_slow,
54397-    tcache_t *tcache, arena_t *arena);
54398-tcache_t *tcache_create_explicit(tsd_t *tsd);
54399-void tcache_cleanup(tsd_t *tsd);
54400-void tcache_stats_merge(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena);
54401-bool tcaches_create(tsd_t *tsd, base_t *base, unsigned *r_ind);
54402-void tcaches_flush(tsd_t *tsd, unsigned ind);
54403-void tcaches_destroy(tsd_t *tsd, unsigned ind);
54404-bool tcache_boot(tsdn_t *tsdn, base_t *base);
54405-void tcache_arena_associate(tsdn_t *tsdn, tcache_slow_t *tcache_slow,
54406-    tcache_t *tcache, arena_t *arena);
54407-void tcache_prefork(tsdn_t *tsdn);
54408-void tcache_postfork_parent(tsdn_t *tsdn);
54409-void tcache_postfork_child(tsdn_t *tsdn);
54410-void tcache_flush(tsd_t *tsd);
54411-bool tsd_tcache_data_init(tsd_t *tsd);
54412-bool tsd_tcache_enabled_data_init(tsd_t *tsd);
54413-
54414-void tcache_assert_initialized(tcache_t *tcache);
54415-
54416-/* Only accessed by thread event. */
54417-uint64_t tcache_gc_new_event_wait(tsd_t *tsd);
54418-uint64_t tcache_gc_postponed_event_wait(tsd_t *tsd);
54419-void tcache_gc_event_handler(tsd_t *tsd, uint64_t elapsed);
54420-uint64_t tcache_gc_dalloc_new_event_wait(tsd_t *tsd);
54421-uint64_t tcache_gc_dalloc_postponed_event_wait(tsd_t *tsd);
54422-void tcache_gc_dalloc_event_handler(tsd_t *tsd, uint64_t elapsed);
54423-
54424-#endif /* JEMALLOC_INTERNAL_TCACHE_EXTERNS_H */
54425diff --git a/jemalloc/include/jemalloc/internal/tcache_inlines.h b/jemalloc/include/jemalloc/internal/tcache_inlines.h
54426deleted file mode 100644
54427index 2634f14..0000000
54428--- a/jemalloc/include/jemalloc/internal/tcache_inlines.h
54429+++ /dev/null
54430@@ -1,193 +0,0 @@
54431-#ifndef JEMALLOC_INTERNAL_TCACHE_INLINES_H
54432-#define JEMALLOC_INTERNAL_TCACHE_INLINES_H
54433-
54434-#include "jemalloc/internal/bin.h"
54435-#include "jemalloc/internal/jemalloc_internal_types.h"
54436-#include "jemalloc/internal/san.h"
54437-#include "jemalloc/internal/sc.h"
54438-#include "jemalloc/internal/sz.h"
54439-#include "jemalloc/internal/util.h"
54440-
54441-static inline bool
54442-tcache_enabled_get(tsd_t *tsd) {
54443-	return tsd_tcache_enabled_get(tsd);
54444-}
54445-
54446-static inline void
54447-tcache_enabled_set(tsd_t *tsd, bool enabled) {
54448-	bool was_enabled = tsd_tcache_enabled_get(tsd);
54449-
54450-	if (!was_enabled && enabled) {
54451-		tsd_tcache_data_init(tsd);
54452-	} else if (was_enabled && !enabled) {
54453-		tcache_cleanup(tsd);
54454-	}
54455-	/* Commit the state last.  Above calls check current state. */
54456-	tsd_tcache_enabled_set(tsd, enabled);
54457-	tsd_slow_update(tsd);
54458-}
54459-
54460-JEMALLOC_ALWAYS_INLINE bool
54461-tcache_small_bin_disabled(szind_t ind, cache_bin_t *bin) {
54462-	assert(ind < SC_NBINS);
54463-	bool ret = (cache_bin_info_ncached_max(&tcache_bin_info[ind]) == 0);
54464-	if (ret && bin != NULL) {
54465-		/* small size class but cache bin disabled. */
54466-		assert(ind >= nhbins);
54467-		assert((uintptr_t)(*bin->stack_head) ==
54468-		    cache_bin_preceding_junk);
54469-	}
54470-
54471-	return ret;
54472-}
54473-
54474-JEMALLOC_ALWAYS_INLINE void *
54475-tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache,
54476-    size_t size, szind_t binind, bool zero, bool slow_path) {
54477-	void *ret;
54478-	bool tcache_success;
54479-
54480-	assert(binind < SC_NBINS);
54481-	cache_bin_t *bin = &tcache->bins[binind];
54482-	ret = cache_bin_alloc(bin, &tcache_success);
54483-	assert(tcache_success == (ret != NULL));
54484-	if (unlikely(!tcache_success)) {
54485-		bool tcache_hard_success;
54486-		arena = arena_choose(tsd, arena);
54487-		if (unlikely(arena == NULL)) {
54488-			return NULL;
54489-		}
54490-		if (unlikely(tcache_small_bin_disabled(binind, bin))) {
54491-			/* stats and zero are handled directly by the arena. */
54492-			return arena_malloc_hard(tsd_tsdn(tsd), arena, size,
54493-			    binind, zero);
54494-		}
54495-		tcache_bin_flush_stashed(tsd, tcache, bin, binind,
54496-		    /* is_small */ true);
54497-
54498-		ret = tcache_alloc_small_hard(tsd_tsdn(tsd), arena, tcache,
54499-		    bin, binind, &tcache_hard_success);
54500-		if (tcache_hard_success == false) {
54501-			return NULL;
54502-		}
54503-	}
54504-
54505-	assert(ret);
54506-	if (unlikely(zero)) {
54507-		size_t usize = sz_index2size(binind);
54508-		assert(tcache_salloc(tsd_tsdn(tsd), ret) == usize);
54509-		memset(ret, 0, usize);
54510-	}
54511-	if (config_stats) {
54512-		bin->tstats.nrequests++;
54513-	}
54514-	return ret;
54515-}
54516-
54517-JEMALLOC_ALWAYS_INLINE void *
54518-tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
54519-    szind_t binind, bool zero, bool slow_path) {
54520-	void *ret;
54521-	bool tcache_success;
54522-
54523-	assert(binind >= SC_NBINS && binind < nhbins);
54524-	cache_bin_t *bin = &tcache->bins[binind];
54525-	ret = cache_bin_alloc(bin, &tcache_success);
54526-	assert(tcache_success == (ret != NULL));
54527-	if (unlikely(!tcache_success)) {
54528-		/*
54529-		 * Only allocate one large object at a time, because it's quite
54530-		 * expensive to create one and not use it.
54531-		 */
54532-		arena = arena_choose(tsd, arena);
54533-		if (unlikely(arena == NULL)) {
54534-			return NULL;
54535-		}
54536-		tcache_bin_flush_stashed(tsd, tcache, bin, binind,
54537-		    /* is_small */ false);
54538-
54539-		ret = large_malloc(tsd_tsdn(tsd), arena, sz_s2u(size), zero);
54540-		if (ret == NULL) {
54541-			return NULL;
54542-		}
54543-	} else {
54544-		if (unlikely(zero)) {
54545-			size_t usize = sz_index2size(binind);
54546-			assert(usize <= tcache_maxclass);
54547-			memset(ret, 0, usize);
54548-		}
54549-
54550-		if (config_stats) {
54551-			bin->tstats.nrequests++;
54552-		}
54553-	}
54554-
54555-	return ret;
54556-}
54557-
54558-JEMALLOC_ALWAYS_INLINE void
54559-tcache_dalloc_small(tsd_t *tsd, tcache_t *tcache, void *ptr, szind_t binind,
54560-    bool slow_path) {
54561-	assert(tcache_salloc(tsd_tsdn(tsd), ptr) <= SC_SMALL_MAXCLASS);
54562-
54563-	cache_bin_t *bin = &tcache->bins[binind];
54564-	/*
54565-	 * Not marking the branch unlikely because this is past free_fastpath()
54566-	 * (which handles the most common cases), i.e. at this point it's often
54567-	 * uncommon cases.
54568-	 */
54569-	if (cache_bin_nonfast_aligned(ptr)) {
54570-		/* Junk unconditionally, even if bin is full. */
54571-		san_junk_ptr(ptr, sz_index2size(binind));
54572-		if (cache_bin_stash(bin, ptr)) {
54573-			return;
54574-		}
54575-		assert(cache_bin_full(bin));
54576-		/* Bin full; fall through into the flush branch. */
54577-	}
54578-
54579-	if (unlikely(!cache_bin_dalloc_easy(bin, ptr))) {
54580-		if (unlikely(tcache_small_bin_disabled(binind, bin))) {
54581-			arena_dalloc_small(tsd_tsdn(tsd), ptr);
54582-			return;
54583-		}
54584-		cache_bin_sz_t max = cache_bin_info_ncached_max(
54585-		    &tcache_bin_info[binind]);
54586-		unsigned remain = max >> opt_lg_tcache_flush_small_div;
54587-		tcache_bin_flush_small(tsd, tcache, bin, binind, remain);
54588-		bool ret = cache_bin_dalloc_easy(bin, ptr);
54589-		assert(ret);
54590-	}
54591-}
54592-
54593-JEMALLOC_ALWAYS_INLINE void
54594-tcache_dalloc_large(tsd_t *tsd, tcache_t *tcache, void *ptr, szind_t binind,
54595-    bool slow_path) {
54596-
54597-	assert(tcache_salloc(tsd_tsdn(tsd), ptr)
54598-	    > SC_SMALL_MAXCLASS);
54599-	assert(tcache_salloc(tsd_tsdn(tsd), ptr) <= tcache_maxclass);
54600-
54601-	cache_bin_t *bin = &tcache->bins[binind];
54602-	if (unlikely(!cache_bin_dalloc_easy(bin, ptr))) {
54603-		unsigned remain = cache_bin_info_ncached_max(
54604-		    &tcache_bin_info[binind]) >> opt_lg_tcache_flush_large_div;
54605-		tcache_bin_flush_large(tsd, tcache, bin, binind, remain);
54606-		bool ret = cache_bin_dalloc_easy(bin, ptr);
54607-		assert(ret);
54608-	}
54609-}
54610-
54611-JEMALLOC_ALWAYS_INLINE tcache_t *
54612-tcaches_get(tsd_t *tsd, unsigned ind) {
54613-	tcaches_t *elm = &tcaches[ind];
54614-	if (unlikely(elm->tcache == NULL)) {
54615-		malloc_printf("<jemalloc>: invalid tcache id (%u).\n", ind);
54616-		abort();
54617-	} else if (unlikely(elm->tcache == TCACHES_ELM_NEED_REINIT)) {
54618-		elm->tcache = tcache_create_explicit(tsd);
54619-	}
54620-	return elm->tcache;
54621-}
54622-
54623-#endif /* JEMALLOC_INTERNAL_TCACHE_INLINES_H */
54624diff --git a/jemalloc/include/jemalloc/internal/tcache_structs.h b/jemalloc/include/jemalloc/internal/tcache_structs.h
54625deleted file mode 100644
54626index 176d73d..0000000
54627--- a/jemalloc/include/jemalloc/internal/tcache_structs.h
54628+++ /dev/null
54629@@ -1,68 +0,0 @@
54630-#ifndef JEMALLOC_INTERNAL_TCACHE_STRUCTS_H
54631-#define JEMALLOC_INTERNAL_TCACHE_STRUCTS_H
54632-
54633-#include "jemalloc/internal/cache_bin.h"
54634-#include "jemalloc/internal/ql.h"
54635-#include "jemalloc/internal/sc.h"
54636-#include "jemalloc/internal/ticker.h"
54637-#include "jemalloc/internal/tsd_types.h"
54638-
54639-/*
54640- * The tcache state is split into the slow and hot path data.  Each has a
54641- * pointer to the other, and the data always comes in pairs.  The layout of each
54642- * of them varies in practice; tcache_slow lives in the TSD for the automatic
54643- * tcache, and as part of a dynamic allocation for manual allocations.  Keeping
54644- * a pointer to tcache_slow lets us treat these cases uniformly, rather than
54645- * splitting up the tcache [de]allocation code into those paths called with the
54646- * TSD tcache and those called with a manual tcache.
54647- */
54648-
54649-struct tcache_slow_s {
54650-	/* Lets us track all the tcaches in an arena. */
54651-	ql_elm(tcache_slow_t) link;
54652-
54653-	/*
54654-	 * The descriptor lets the arena find our cache bins without seeing the
54655-	 * tcache definition.  This enables arenas to aggregate stats across
54656-	 * tcaches without having a tcache dependency.
54657-	 */
54658-	cache_bin_array_descriptor_t cache_bin_array_descriptor;
54659-
54660-	/* The arena this tcache is associated with. */
54661-	arena_t		*arena;
54662-	/* Next bin to GC. */
54663-	szind_t		next_gc_bin;
54664-	/* For small bins, fill (ncached_max >> lg_fill_div). */
54665-	uint8_t		lg_fill_div[SC_NBINS];
54666-	/* For small bins, whether has been refilled since last GC. */
54667-	bool		bin_refilled[SC_NBINS];
54668-	/*
54669-	 * For small bins, the number of items we can pretend to flush before
54670-	 * actually flushing.
54671-	 */
54672-	uint8_t		bin_flush_delay_items[SC_NBINS];
54673-	/*
54674-	 * The start of the allocation containing the dynamic allocation for
54675-	 * either the cache bins alone, or the cache bin memory as well as this
54676-	 * tcache_slow_t and its associated tcache_t.
54677-	 */
54678-	void		*dyn_alloc;
54679-
54680-	/* The associated bins. */
54681-	tcache_t	*tcache;
54682-};
54683-
54684-struct tcache_s {
54685-	tcache_slow_t	*tcache_slow;
54686-	cache_bin_t	bins[TCACHE_NBINS_MAX];
54687-};
54688-
54689-/* Linkage for list of available (previously used) explicit tcache IDs. */
54690-struct tcaches_s {
54691-	union {
54692-		tcache_t	*tcache;
54693-		tcaches_t	*next;
54694-	};
54695-};
54696-
54697-#endif /* JEMALLOC_INTERNAL_TCACHE_STRUCTS_H */
54698diff --git a/jemalloc/include/jemalloc/internal/tcache_types.h b/jemalloc/include/jemalloc/internal/tcache_types.h
54699deleted file mode 100644
54700index 583677e..0000000
54701--- a/jemalloc/include/jemalloc/internal/tcache_types.h
54702+++ /dev/null
54703@@ -1,35 +0,0 @@
54704-#ifndef JEMALLOC_INTERNAL_TCACHE_TYPES_H
54705-#define JEMALLOC_INTERNAL_TCACHE_TYPES_H
54706-
54707-#include "jemalloc/internal/sc.h"
54708-
54709-typedef struct tcache_slow_s tcache_slow_t;
54710-typedef struct tcache_s tcache_t;
54711-typedef struct tcaches_s tcaches_t;
54712-
54713-/*
54714- * tcache pointers close to NULL are used to encode state information that is
54715- * used for two purposes: preventing thread caching on a per thread basis and
54716- * cleaning up during thread shutdown.
54717- */
54718-#define TCACHE_STATE_DISABLED		((tcache_t *)(uintptr_t)1)
54719-#define TCACHE_STATE_REINCARNATED	((tcache_t *)(uintptr_t)2)
54720-#define TCACHE_STATE_PURGATORY		((tcache_t *)(uintptr_t)3)
54721-#define TCACHE_STATE_MAX		TCACHE_STATE_PURGATORY
54722-
54723-/* Used in TSD static initializer only. Real init in tsd_tcache_data_init(). */
54724-#define TCACHE_ZERO_INITIALIZER {0}
54725-#define TCACHE_SLOW_ZERO_INITIALIZER {0}
54726-
54727-/* Used in TSD static initializer only. Will be initialized to opt_tcache. */
54728-#define TCACHE_ENABLED_ZERO_INITIALIZER false
54729-
54730-/* Used for explicit tcache only. Means flushed but not destroyed. */
54731-#define TCACHES_ELM_NEED_REINIT ((tcache_t *)(uintptr_t)1)
54732-
54733-#define TCACHE_LG_MAXCLASS_LIMIT 23 /* tcache_maxclass = 8M */
54734-#define TCACHE_MAXCLASS_LIMIT ((size_t)1 << TCACHE_LG_MAXCLASS_LIMIT)
54735-#define TCACHE_NBINS_MAX (SC_NBINS + SC_NGROUP *			\
54736-    (TCACHE_LG_MAXCLASS_LIMIT - SC_LG_LARGE_MINCLASS) + 1)
54737-
54738-#endif /* JEMALLOC_INTERNAL_TCACHE_TYPES_H */
54739diff --git a/jemalloc/include/jemalloc/internal/test_hooks.h b/jemalloc/include/jemalloc/internal/test_hooks.h
54740deleted file mode 100644
54741index 3d530b5..0000000
54742--- a/jemalloc/include/jemalloc/internal/test_hooks.h
54743+++ /dev/null
54744@@ -1,24 +0,0 @@
54745-#ifndef JEMALLOC_INTERNAL_TEST_HOOKS_H
54746-#define JEMALLOC_INTERNAL_TEST_HOOKS_H
54747-
54748-extern JEMALLOC_EXPORT void (*test_hooks_arena_new_hook)();
54749-extern JEMALLOC_EXPORT void (*test_hooks_libc_hook)();
54750-
54751-#if defined(JEMALLOC_JET) || defined(JEMALLOC_UNIT_TEST)
54752-#  define JEMALLOC_TEST_HOOK(fn, hook) ((void)(hook != NULL && (hook(), 0)), fn)
54753-
54754-#  define open JEMALLOC_TEST_HOOK(open, test_hooks_libc_hook)
54755-#  define read JEMALLOC_TEST_HOOK(read, test_hooks_libc_hook)
54756-#  define write JEMALLOC_TEST_HOOK(write, test_hooks_libc_hook)
54757-#  define readlink JEMALLOC_TEST_HOOK(readlink, test_hooks_libc_hook)
54758-#  define close JEMALLOC_TEST_HOOK(close, test_hooks_libc_hook)
54759-#  define creat JEMALLOC_TEST_HOOK(creat, test_hooks_libc_hook)
54760-#  define secure_getenv JEMALLOC_TEST_HOOK(secure_getenv, test_hooks_libc_hook)
54761-/* Note that this is undef'd and re-define'd in src/prof.c. */
54762-#  define _Unwind_Backtrace JEMALLOC_TEST_HOOK(_Unwind_Backtrace, test_hooks_libc_hook)
54763-#else
54764-#  define JEMALLOC_TEST_HOOK(fn, hook) fn
54765-#endif
54766-
54767-
54768-#endif /* JEMALLOC_INTERNAL_TEST_HOOKS_H */
54769diff --git a/jemalloc/include/jemalloc/internal/thread_event.h b/jemalloc/include/jemalloc/internal/thread_event.h
54770deleted file mode 100644
54771index 2f4e1b3..0000000
54772--- a/jemalloc/include/jemalloc/internal/thread_event.h
54773+++ /dev/null
54774@@ -1,301 +0,0 @@
54775-#ifndef JEMALLOC_INTERNAL_THREAD_EVENT_H
54776-#define JEMALLOC_INTERNAL_THREAD_EVENT_H
54777-
54778-#include "jemalloc/internal/tsd.h"
54779-
54780-/* "te" is short for "thread_event" */
54781-
54782-/*
54783- * TE_MIN_START_WAIT should not exceed the minimal allocation usize.
54784- */
54785-#define TE_MIN_START_WAIT ((uint64_t)1U)
54786-#define TE_MAX_START_WAIT UINT64_MAX
54787-
54788-/*
54789- * Maximum threshold on thread_(de)allocated_next_event_fast, so that there is
54790- * no need to check overflow in malloc fast path. (The allocation size in malloc
54791- * fast path never exceeds SC_LOOKUP_MAXCLASS.)
54792- */
54793-#define TE_NEXT_EVENT_FAST_MAX (UINT64_MAX - SC_LOOKUP_MAXCLASS + 1U)
54794-
54795-/*
54796- * The max interval helps make sure that malloc stays on the fast path in the
54797- * common case, i.e. thread_allocated < thread_allocated_next_event_fast.  When
54798- * thread_allocated is within an event's distance to TE_NEXT_EVENT_FAST_MAX
54799- * above, thread_allocated_next_event_fast is wrapped around and we fall back to
54800- * the medium-fast path. The max interval makes sure that we're not staying on
54801- * the fallback case for too long, even if there's no active event or if all
54802- * active events have long wait times.
54803- */
54804-#define TE_MAX_INTERVAL ((uint64_t)(4U << 20))
54805-
54806-/*
54807- * Invalid elapsed time, for situations where elapsed time is not needed.  See
54808- * comments in thread_event.c for more info.
54809- */
54810-#define TE_INVALID_ELAPSED UINT64_MAX
54811-
54812-typedef struct te_ctx_s {
54813-	bool is_alloc;
54814-	uint64_t *current;
54815-	uint64_t *last_event;
54816-	uint64_t *next_event;
54817-	uint64_t *next_event_fast;
54818-} te_ctx_t;
54819-
54820-void te_assert_invariants_debug(tsd_t *tsd);
54821-void te_event_trigger(tsd_t *tsd, te_ctx_t *ctx);
54822-void te_recompute_fast_threshold(tsd_t *tsd);
54823-void tsd_te_init(tsd_t *tsd);
54824-
54825-/*
54826- * List of all events, in the following format:
54827- *  E(event,		(condition), is_alloc_event)
54828- */
54829-#define ITERATE_OVER_ALL_EVENTS						\
54830-    E(tcache_gc,		(opt_tcache_gc_incr_bytes > 0), true)	\
54831-    E(prof_sample,		(config_prof && opt_prof), true)  	\
54832-    E(stats_interval,		(opt_stats_interval >= 0), true)   	\
54833-    E(tcache_gc_dalloc,		(opt_tcache_gc_incr_bytes > 0), false)	\
54834-    E(peak_alloc,		config_stats, true)			\
54835-    E(peak_dalloc,		config_stats, false)
54836-
54837-#define E(event, condition_unused, is_alloc_event_unused)		\
54838-    C(event##_event_wait)
54839-
54840-/* List of all thread event counters. */
54841-#define ITERATE_OVER_ALL_COUNTERS					\
54842-    C(thread_allocated)							\
54843-    C(thread_allocated_last_event)					\
54844-    ITERATE_OVER_ALL_EVENTS						\
54845-    C(prof_sample_last_event)						\
54846-    C(stats_interval_last_event)
54847-
54848-/* Getters directly wrap TSD getters. */
54849-#define C(counter)							\
54850-JEMALLOC_ALWAYS_INLINE uint64_t						\
54851-counter##_get(tsd_t *tsd) {						\
54852-	return tsd_##counter##_get(tsd);				\
54853-}
54854-
54855-ITERATE_OVER_ALL_COUNTERS
54856-#undef C
54857-
54858-/*
54859- * Setters call the TSD pointer getters rather than the TSD setters, so that
54860- * the counters can be modified even when TSD state is reincarnated or
54861- * minimal_initialized: if an event is triggered in such cases, we will
54862- * temporarily delay the event and let it be immediately triggered at the next
54863- * allocation call.
54864- */
54865-#define C(counter)							\
54866-JEMALLOC_ALWAYS_INLINE void						\
54867-counter##_set(tsd_t *tsd, uint64_t v) {					\
54868-	*tsd_##counter##p_get(tsd) = v;					\
54869-}
54870-
54871-ITERATE_OVER_ALL_COUNTERS
54872-#undef C
54873-
54874-/*
54875- * For generating _event_wait getter / setter functions for each individual
54876- * event.
54877- */
54878-#undef E
54879-
54880-/*
54881- * The malloc and free fastpath getters -- use the unsafe getters since tsd may
54882- * be non-nominal, in which case the fast_threshold will be set to 0.  This
54883- * allows checking for events and tsd non-nominal in a single branch.
54884- *
54885- * Note that these can only be used on the fastpath.
54886- */
54887-JEMALLOC_ALWAYS_INLINE void
54888-te_malloc_fastpath_ctx(tsd_t *tsd, uint64_t *allocated, uint64_t *threshold) {
54889-	*allocated = *tsd_thread_allocatedp_get_unsafe(tsd);
54890-	*threshold = *tsd_thread_allocated_next_event_fastp_get_unsafe(tsd);
54891-	assert(*threshold <= TE_NEXT_EVENT_FAST_MAX);
54892-}
54893-
54894-JEMALLOC_ALWAYS_INLINE void
54895-te_free_fastpath_ctx(tsd_t *tsd, uint64_t *deallocated, uint64_t *threshold) {
54896-	/* Unsafe getters since this may happen before tsd_init. */
54897-	*deallocated = *tsd_thread_deallocatedp_get_unsafe(tsd);
54898-	*threshold = *tsd_thread_deallocated_next_event_fastp_get_unsafe(tsd);
54899-	assert(*threshold <= TE_NEXT_EVENT_FAST_MAX);
54900-}
54901-
54902-JEMALLOC_ALWAYS_INLINE bool
54903-te_ctx_is_alloc(te_ctx_t *ctx) {
54904-	return ctx->is_alloc;
54905-}
54906-
54907-JEMALLOC_ALWAYS_INLINE uint64_t
54908-te_ctx_current_bytes_get(te_ctx_t *ctx) {
54909-	return *ctx->current;
54910-}
54911-
54912-JEMALLOC_ALWAYS_INLINE void
54913-te_ctx_current_bytes_set(te_ctx_t *ctx, uint64_t v) {
54914-	*ctx->current = v;
54915-}
54916-
54917-JEMALLOC_ALWAYS_INLINE uint64_t
54918-te_ctx_last_event_get(te_ctx_t *ctx) {
54919-	return *ctx->last_event;
54920-}
54921-
54922-JEMALLOC_ALWAYS_INLINE void
54923-te_ctx_last_event_set(te_ctx_t *ctx, uint64_t v) {
54924-	*ctx->last_event = v;
54925-}
54926-
54927-/* Below 3 for next_event_fast. */
54928-JEMALLOC_ALWAYS_INLINE uint64_t
54929-te_ctx_next_event_fast_get(te_ctx_t *ctx) {
54930-	uint64_t v = *ctx->next_event_fast;
54931-	assert(v <= TE_NEXT_EVENT_FAST_MAX);
54932-	return v;
54933-}
54934-
54935-JEMALLOC_ALWAYS_INLINE void
54936-te_ctx_next_event_fast_set(te_ctx_t *ctx, uint64_t v) {
54937-	assert(v <= TE_NEXT_EVENT_FAST_MAX);
54938-	*ctx->next_event_fast = v;
54939-}
54940-
54941-JEMALLOC_ALWAYS_INLINE void
54942-te_next_event_fast_set_non_nominal(tsd_t *tsd) {
54943-	/*
54944-	 * Set the fast thresholds to zero when tsd is non-nominal.  Use the
54945-	 * unsafe getter as this may get called during tsd init and clean up.
54946-	 */
54947-	*tsd_thread_allocated_next_event_fastp_get_unsafe(tsd) = 0;
54948-	*tsd_thread_deallocated_next_event_fastp_get_unsafe(tsd) = 0;
54949-}
54950-
54951-/* For next_event.  Setter also updates the fast threshold. */
54952-JEMALLOC_ALWAYS_INLINE uint64_t
54953-te_ctx_next_event_get(te_ctx_t *ctx) {
54954-	return *ctx->next_event;
54955-}
54956-
54957-JEMALLOC_ALWAYS_INLINE void
54958-te_ctx_next_event_set(tsd_t *tsd, te_ctx_t *ctx, uint64_t v) {
54959-	*ctx->next_event = v;
54960-	te_recompute_fast_threshold(tsd);
54961-}
54962-
54963-/*
54964- * The function checks in debug mode whether the thread event counters are in
54965- * a consistent state, which forms the invariants before and after each round
54966- * of thread event handling that we can rely on and need to promise.
54967- * The invariants are only temporarily violated in the middle of
54968- * te_event_advance() if an event is triggered (the te_event_trigger() call at
54969- * the end will restore the invariants).
54970- */
54971-JEMALLOC_ALWAYS_INLINE void
54972-te_assert_invariants(tsd_t *tsd) {
54973-	if (config_debug) {
54974-		te_assert_invariants_debug(tsd);
54975-	}
54976-}
54977-
54978-JEMALLOC_ALWAYS_INLINE void
54979-te_ctx_get(tsd_t *tsd, te_ctx_t *ctx, bool is_alloc) {
54980-	ctx->is_alloc = is_alloc;
54981-	if (is_alloc) {
54982-		ctx->current = tsd_thread_allocatedp_get(tsd);
54983-		ctx->last_event = tsd_thread_allocated_last_eventp_get(tsd);
54984-		ctx->next_event = tsd_thread_allocated_next_eventp_get(tsd);
54985-		ctx->next_event_fast =
54986-		    tsd_thread_allocated_next_event_fastp_get(tsd);
54987-	} else {
54988-		ctx->current = tsd_thread_deallocatedp_get(tsd);
54989-		ctx->last_event = tsd_thread_deallocated_last_eventp_get(tsd);
54990-		ctx->next_event = tsd_thread_deallocated_next_eventp_get(tsd);
54991-		ctx->next_event_fast =
54992-		    tsd_thread_deallocated_next_event_fastp_get(tsd);
54993-	}
54994-}
54995-
54996-/*
54997- * The lookahead functionality facilitates events to be able to lookahead, i.e.
54998- * without touching the event counters, to determine whether an event would be
54999- * triggered.  The event counters are not advanced until the end of the
55000- * allocation / deallocation calls, so the lookahead can be useful if some
55001- * preparation work for some event must be done early in the allocation /
55002- * deallocation calls.
55003- *
55004- * Currently only the profiling sampling event needs the lookahead
55005- * functionality, so we don't yet define general purpose lookahead functions.
55006- *
55007- * Surplus is a terminology referring to the amount of bytes beyond what's
55008- * needed for triggering an event, which can be a useful quantity to have in
55009- * general when lookahead is being called.
55010- */
55011-
55012-JEMALLOC_ALWAYS_INLINE bool
55013-te_prof_sample_event_lookahead_surplus(tsd_t *tsd, size_t usize,
55014-    size_t *surplus) {
55015-	if (surplus != NULL) {
55016-		/*
55017-		 * This is a dead store: the surplus will be overwritten before
55018-		 * any read.  The initialization suppresses compiler warnings.
55019-		 * Meanwhile, using SIZE_MAX to initialize is good for
55020-		 * debugging purpose, because a valid surplus value is strictly
55021-		 * less than usize, which is at most SIZE_MAX.
55022-		 */
55023-		*surplus = SIZE_MAX;
55024-	}
55025-	if (unlikely(!tsd_nominal(tsd) || tsd_reentrancy_level_get(tsd) > 0)) {
55026-		return false;
55027-	}
55028-	/* The subtraction is intentionally susceptible to underflow. */
55029-	uint64_t accumbytes = tsd_thread_allocated_get(tsd) + usize -
55030-	    tsd_thread_allocated_last_event_get(tsd);
55031-	uint64_t sample_wait = tsd_prof_sample_event_wait_get(tsd);
55032-	if (accumbytes < sample_wait) {
55033-		return false;
55034-	}
55035-	assert(accumbytes - sample_wait < (uint64_t)usize);
55036-	if (surplus != NULL) {
55037-		*surplus = (size_t)(accumbytes - sample_wait);
55038-	}
55039-	return true;
55040-}
55041-
55042-JEMALLOC_ALWAYS_INLINE bool
55043-te_prof_sample_event_lookahead(tsd_t *tsd, size_t usize) {
55044-	return te_prof_sample_event_lookahead_surplus(tsd, usize, NULL);
55045-}
55046-
55047-JEMALLOC_ALWAYS_INLINE void
55048-te_event_advance(tsd_t *tsd, size_t usize, bool is_alloc) {
55049-	te_assert_invariants(tsd);
55050-
55051-	te_ctx_t ctx;
55052-	te_ctx_get(tsd, &ctx, is_alloc);
55053-
55054-	uint64_t bytes_before = te_ctx_current_bytes_get(&ctx);
55055-	te_ctx_current_bytes_set(&ctx, bytes_before + usize);
55056-
55057-	/* The subtraction is intentionally susceptible to underflow. */
55058-	if (likely(usize < te_ctx_next_event_get(&ctx) - bytes_before)) {
55059-		te_assert_invariants(tsd);
55060-	} else {
55061-		te_event_trigger(tsd, &ctx);
55062-	}
55063-}
55064-
55065-JEMALLOC_ALWAYS_INLINE void
55066-thread_dalloc_event(tsd_t *tsd, size_t usize) {
55067-	te_event_advance(tsd, usize, false);
55068-}
55069-
55070-JEMALLOC_ALWAYS_INLINE void
55071-thread_alloc_event(tsd_t *tsd, size_t usize) {
55072-	te_event_advance(tsd, usize, true);
55073-}
55074-
55075-#endif /* JEMALLOC_INTERNAL_THREAD_EVENT_H */
55076diff --git a/jemalloc/include/jemalloc/internal/ticker.h b/jemalloc/include/jemalloc/internal/ticker.h
55077deleted file mode 100644
55078index 6b51dde..0000000
55079--- a/jemalloc/include/jemalloc/internal/ticker.h
55080+++ /dev/null
55081@@ -1,175 +0,0 @@
55082-#ifndef JEMALLOC_INTERNAL_TICKER_H
55083-#define JEMALLOC_INTERNAL_TICKER_H
55084-
55085-#include "jemalloc/internal/prng.h"
55086-#include "jemalloc/internal/util.h"
55087-
55088-/**
55089- * A ticker makes it easy to count-down events until some limit.  You
55090- * ticker_init the ticker to trigger every nticks events.  You then notify it
55091- * that an event has occurred with calls to ticker_tick (or that nticks events
55092- * have occurred with a call to ticker_ticks), which will return true (and reset
55093- * the counter) if the countdown hit zero.
55094- */
55095-typedef struct ticker_s ticker_t;
55096-struct ticker_s {
55097-	int32_t tick;
55098-	int32_t nticks;
55099-};
55100-
55101-static inline void
55102-ticker_init(ticker_t *ticker, int32_t nticks) {
55103-	ticker->tick = nticks;
55104-	ticker->nticks = nticks;
55105-}
55106-
55107-static inline void
55108-ticker_copy(ticker_t *ticker, const ticker_t *other) {
55109-	*ticker = *other;
55110-}
55111-
55112-static inline int32_t
55113-ticker_read(const ticker_t *ticker) {
55114-	return ticker->tick;
55115-}
55116-
55117-/*
55118- * Not intended to be a public API.  Unfortunately, on x86, neither gcc nor
55119- * clang seems smart enough to turn
55120- *   ticker->tick -= nticks;
55121- *   if (unlikely(ticker->tick < 0)) {
55122- *     fixup ticker
55123- *     return true;
55124- *   }
55125- *   return false;
55126- * into
55127- *   subq %nticks_reg, (%ticker_reg)
55128- *   js fixup ticker
55129- *
55130- * unless we force "fixup ticker" out of line.  In that case, gcc gets it right,
55131- * but clang now does worse than before.  So, on x86 with gcc, we force it out
55132- * of line, but otherwise let the inlining occur.  Ordinarily this wouldn't be
55133- * worth the hassle, but this is on the fast path of both malloc and free (via
55134- * tcache_event).
55135- */
55136-#if defined(__GNUC__) && !defined(__clang__)				\
55137-    && (defined(__x86_64__) || defined(__i386__))
55138-JEMALLOC_NOINLINE
55139-#endif
55140-static bool
55141-ticker_fixup(ticker_t *ticker) {
55142-	ticker->tick = ticker->nticks;
55143-	return true;
55144-}
55145-
55146-static inline bool
55147-ticker_ticks(ticker_t *ticker, int32_t nticks) {
55148-	ticker->tick -= nticks;
55149-	if (unlikely(ticker->tick < 0)) {
55150-		return ticker_fixup(ticker);
55151-	}
55152-	return false;
55153-}
55154-
55155-static inline bool
55156-ticker_tick(ticker_t *ticker) {
55157-	return ticker_ticks(ticker, 1);
55158-}
55159-
55160-/*
55161- * Try to tick.  If ticker would fire, return true, but rely on
55162- * slowpath to reset ticker.
55163- */
55164-static inline bool
55165-ticker_trytick(ticker_t *ticker) {
55166-	--ticker->tick;
55167-	if (unlikely(ticker->tick < 0)) {
55168-		return true;
55169-	}
55170-	return false;
55171-}
55172-
55173-/*
55174- * The ticker_geom_t is much like the ticker_t, except that instead of ticker
55175- * having a constant countdown, it has an approximate one; each tick has
55176- * approximately a 1/nticks chance of triggering the count.
55177- *
55178- * The motivation is in triggering arena decay.  With a naive strategy, each
55179- * thread would maintain a ticker per arena, and check if decay is necessary
55180- * each time that the arena's ticker fires.  This has two costs:
55181- * - Since under reasonable assumptions both threads and arenas can scale
55182- *   linearly with the number of CPUs, maintaining per-arena data in each thread
55183- *   scales quadratically with the number of CPUs.
55184- * - These tickers are often a cache miss down tcache flush pathways.
55185- *
55186- * By giving each tick a 1/nticks chance of firing, we still maintain the same
55187- * average number of ticks-until-firing per arena, with only a single ticker's
55188- * worth of metadata.
55189- */
55190-
55191-/* See ticker.c for an explanation of these constants. */
55192-#define TICKER_GEOM_NBITS 6
55193-#define TICKER_GEOM_MUL 61
55194-extern const uint8_t ticker_geom_table[1 << TICKER_GEOM_NBITS];
55195-
55196-/* Not actually any different from ticker_t; just for type safety. */
55197-typedef struct ticker_geom_s ticker_geom_t;
55198-struct ticker_geom_s {
55199-	int32_t tick;
55200-	int32_t nticks;
55201-};
55202-
55203-/*
55204- * Just pick the average delay for the first counter.  We're more concerned with
55205- * the behavior over long periods of time rather than the exact timing of the
55206- * initial ticks.
55207- */
55208-#define TICKER_GEOM_INIT(nticks) {nticks, nticks}
55209-
55210-static inline void
55211-ticker_geom_init(ticker_geom_t *ticker, int32_t nticks) {
55212-	/*
55213-	 * Make sure there's no overflow possible.  This shouldn't really be a
55214-	 * problem for reasonable nticks choices, which are all static and
55215-	 * relatively small.
55216-	 */
55217-	assert((uint64_t)nticks * (uint64_t)255 / (uint64_t)TICKER_GEOM_MUL
55218-	    <= (uint64_t)INT32_MAX);
55219-	ticker->tick = nticks;
55220-	ticker->nticks = nticks;
55221-}
55222-
55223-static inline int32_t
55224-ticker_geom_read(const ticker_geom_t *ticker) {
55225-	return ticker->tick;
55226-}
55227-
55228-/* Same deal as above. */
55229-#if defined(__GNUC__) && !defined(__clang__)				\
55230-    && (defined(__x86_64__) || defined(__i386__))
55231-JEMALLOC_NOINLINE
55232-#endif
55233-static bool
55234-ticker_geom_fixup(ticker_geom_t *ticker, uint64_t *prng_state) {
55235-	uint64_t idx = prng_lg_range_u64(prng_state, TICKER_GEOM_NBITS);
55236-	ticker->tick = (uint32_t)(
55237-	    (uint64_t)ticker->nticks * (uint64_t)ticker_geom_table[idx]
55238-	    / (uint64_t)TICKER_GEOM_MUL);
55239-	return true;
55240-}
55241-
55242-static inline bool
55243-ticker_geom_ticks(ticker_geom_t *ticker, uint64_t *prng_state, int32_t nticks) {
55244-	ticker->tick -= nticks;
55245-	if (unlikely(ticker->tick < 0)) {
55246-		return ticker_geom_fixup(ticker, prng_state);
55247-	}
55248-	return false;
55249-}
55250-
55251-static inline bool
55252-ticker_geom_tick(ticker_geom_t *ticker, uint64_t *prng_state) {
55253-	return ticker_geom_ticks(ticker, prng_state, 1);
55254-}
55255-
55256-#endif /* JEMALLOC_INTERNAL_TICKER_H */
55257diff --git a/jemalloc/include/jemalloc/internal/tsd.h b/jemalloc/include/jemalloc/internal/tsd.h
55258deleted file mode 100644
55259index 66d6882..0000000
55260--- a/jemalloc/include/jemalloc/internal/tsd.h
55261+++ /dev/null
55262@@ -1,518 +0,0 @@
55263-#ifndef JEMALLOC_INTERNAL_TSD_H
55264-#define JEMALLOC_INTERNAL_TSD_H
55265-
55266-#include "jemalloc/internal/activity_callback.h"
55267-#include "jemalloc/internal/arena_types.h"
55268-#include "jemalloc/internal/assert.h"
55269-#include "jemalloc/internal/bin_types.h"
55270-#include "jemalloc/internal/jemalloc_internal_externs.h"
55271-#include "jemalloc/internal/peak.h"
55272-#include "jemalloc/internal/prof_types.h"
55273-#include "jemalloc/internal/ql.h"
55274-#include "jemalloc/internal/rtree_tsd.h"
55275-#include "jemalloc/internal/tcache_types.h"
55276-#include "jemalloc/internal/tcache_structs.h"
55277-#include "jemalloc/internal/util.h"
55278-#include "jemalloc/internal/witness.h"
55279-
55280-/*
55281- * Thread-Specific-Data layout
55282- *
55283- * At least some thread-local data gets touched on the fast-path of almost all
55284- * malloc operations.  But much of it is only necessary down slow-paths, or
55285- * testing.  We want to colocate the fast-path data so that it can live on the
55286- * same cacheline if possible.  So we define three tiers of hotness:
55287- * TSD_DATA_FAST: Touched on the alloc/dalloc fast paths.
55288- * TSD_DATA_SLOW: Touched down slow paths.  "Slow" here is sort of general;
55289- *     there are "semi-slow" paths like "not a sized deallocation, but can still
55290- *     live in the tcache".  We'll want to keep these closer to the fast-path
55291- *     data.
55292- * TSD_DATA_SLOWER: Only touched in test or debug modes, or not touched at all.
55293- *
55294- * An additional concern is that the larger tcache bins won't be used (we have a
55295- * bin per size class, but by default only cache relatively small objects).  So
55296- * the earlier bins are in the TSD_DATA_FAST tier, but the later ones are in the
55297- * TSD_DATA_SLOWER tier.
55298- *
55299- * As a result of all this, we put the slow data first, then the fast data, then
55300- * the slower data, while keeping the tcache as the last element of the fast
55301- * data (so that the fast -> slower transition happens midway through the
55302- * tcache).  While we don't yet play alignment tricks to guarantee it, this
55303- * increases our odds of getting some cache/page locality on fast paths.
55304- */
55305-
55306-#ifdef JEMALLOC_JET
55307-typedef void (*test_callback_t)(int *);
55308-#  define MALLOC_TSD_TEST_DATA_INIT 0x72b65c10
55309-#  define MALLOC_TEST_TSD \
55310-    O(test_data,		int,			int)		\
55311-    O(test_callback,		test_callback_t,	int)
55312-#  define MALLOC_TEST_TSD_INITIALIZER , MALLOC_TSD_TEST_DATA_INIT, NULL
55313-#else
55314-#  define MALLOC_TEST_TSD
55315-#  define MALLOC_TEST_TSD_INITIALIZER
55316-#endif
55317-
55318-typedef ql_elm(tsd_t) tsd_link_t;
55319-
55320-/*  O(name,			type,			nullable type) */
55321-#define TSD_DATA_SLOW							\
55322-    O(tcache_enabled,		bool,			bool)		\
55323-    O(reentrancy_level,		int8_t,			int8_t)		\
55324-    O(thread_allocated_last_event,	uint64_t,	uint64_t)	\
55325-    O(thread_allocated_next_event,	uint64_t,	uint64_t)	\
55326-    O(thread_deallocated_last_event,	uint64_t,	uint64_t)	\
55327-    O(thread_deallocated_next_event,	uint64_t,	uint64_t)	\
55328-    O(tcache_gc_event_wait,	uint64_t,		uint64_t)	\
55329-    O(tcache_gc_dalloc_event_wait,	uint64_t,	uint64_t)	\
55330-    O(prof_sample_event_wait,	uint64_t,		uint64_t)	\
55331-    O(prof_sample_last_event,	uint64_t,		uint64_t)	\
55332-    O(stats_interval_event_wait,	uint64_t,	uint64_t)	\
55333-    O(stats_interval_last_event,	uint64_t,	uint64_t)	\
55334-    O(peak_alloc_event_wait,	uint64_t,		uint64_t)	\
55335-    O(peak_dalloc_event_wait,	uint64_t,	uint64_t)		\
55336-    O(prof_tdata,		prof_tdata_t *,		prof_tdata_t *)	\
55337-    O(prng_state,		uint64_t,		uint64_t)	\
55338-    O(san_extents_until_guard_small,	uint64_t,	uint64_t)	\
55339-    O(san_extents_until_guard_large,	uint64_t,	uint64_t)	\
55340-    O(iarena,			arena_t *,		arena_t *)	\
55341-    O(arena,			arena_t *,		arena_t *)	\
55342-    O(arena_decay_ticker,	ticker_geom_t,		ticker_geom_t)	\
55343-    O(sec_shard,		uint8_t,		uint8_t)	\
55344-    O(binshards,		tsd_binshards_t,	tsd_binshards_t)\
55345-    O(tsd_link,			tsd_link_t,		tsd_link_t)	\
55346-    O(in_hook,			bool,			bool)		\
55347-    O(peak,			peak_t,			peak_t)		\
55348-    O(activity_callback_thunk,	activity_callback_thunk_t,		\
55349-	activity_callback_thunk_t)					\
55350-    O(tcache_slow,		tcache_slow_t,		tcache_slow_t)	\
55351-    O(rtree_ctx,		rtree_ctx_t,		rtree_ctx_t)
55352-
55353-#define TSD_DATA_SLOW_INITIALIZER					\
55354-    /* tcache_enabled */	TCACHE_ENABLED_ZERO_INITIALIZER,	\
55355-    /* reentrancy_level */	0,					\
55356-    /* thread_allocated_last_event */	0,				\
55357-    /* thread_allocated_next_event */	0,				\
55358-    /* thread_deallocated_last_event */	0,				\
55359-    /* thread_deallocated_next_event */	0,				\
55360-    /* tcache_gc_event_wait */		0,				\
55361-    /* tcache_gc_dalloc_event_wait */	0,				\
55362-    /* prof_sample_event_wait */	0,				\
55363-    /* prof_sample_last_event */	0,				\
55364-    /* stats_interval_event_wait */	0,				\
55365-    /* stats_interval_last_event */	0,				\
55366-    /* peak_alloc_event_wait */		0,				\
55367-    /* peak_dalloc_event_wait */	0,				\
55368-    /* prof_tdata */		NULL,					\
55369-    /* prng_state */		0,					\
55370-    /* san_extents_until_guard_small */	0,				\
55371-    /* san_extents_until_guard_large */	0,				\
55372-    /* iarena */		NULL,					\
55373-    /* arena */			NULL,					\
55374-    /* arena_decay_ticker */						\
55375-	TICKER_GEOM_INIT(ARENA_DECAY_NTICKS_PER_UPDATE),		\
55376-    /* sec_shard */		(uint8_t)-1,				\
55377-    /* binshards */		TSD_BINSHARDS_ZERO_INITIALIZER,		\
55378-    /* tsd_link */		{NULL},					\
55379-    /* in_hook */		false,					\
55380-    /* peak */			PEAK_INITIALIZER,			\
55381-    /* activity_callback_thunk */					\
55382-	ACTIVITY_CALLBACK_THUNK_INITIALIZER,				\
55383-    /* tcache_slow */		TCACHE_SLOW_ZERO_INITIALIZER,		\
55384-    /* rtree_ctx */		RTREE_CTX_INITIALIZER,
55385-
55386-/*  O(name,			type,			nullable type) */
55387-#define TSD_DATA_FAST							\
55388-    O(thread_allocated,		uint64_t,		uint64_t)	\
55389-    O(thread_allocated_next_event_fast,	uint64_t,	uint64_t)	\
55390-    O(thread_deallocated,	uint64_t,		uint64_t)	\
55391-    O(thread_deallocated_next_event_fast, uint64_t,	uint64_t)	\
55392-    O(tcache,			tcache_t,		tcache_t)
55393-
55394-#define TSD_DATA_FAST_INITIALIZER					\
55395-    /* thread_allocated */	0,					\
55396-    /* thread_allocated_next_event_fast */ 0, 				\
55397-    /* thread_deallocated */	0,					\
55398-    /* thread_deallocated_next_event_fast */	0,			\
55399-    /* tcache */		TCACHE_ZERO_INITIALIZER,
55400-
55401-/*  O(name,			type,			nullable type) */
55402-#define TSD_DATA_SLOWER							\
55403-    O(witness_tsd,              witness_tsd_t,		witness_tsdn_t)	\
55404-    MALLOC_TEST_TSD
55405-
55406-#define TSD_DATA_SLOWER_INITIALIZER					\
55407-    /* witness */		WITNESS_TSD_INITIALIZER			\
55408-    /* test data */		MALLOC_TEST_TSD_INITIALIZER
55409-
55410-
55411-#define TSD_INITIALIZER {						\
55412-    				TSD_DATA_SLOW_INITIALIZER		\
55413-    /* state */			ATOMIC_INIT(tsd_state_uninitialized),	\
55414-    				TSD_DATA_FAST_INITIALIZER		\
55415-    				TSD_DATA_SLOWER_INITIALIZER		\
55416-}
55417-
55418-#if defined(JEMALLOC_MALLOC_THREAD_CLEANUP) || defined(_WIN32)
55419-void _malloc_tsd_cleanup_register(bool (*f)(void));
55420-#endif
55421-
55422-void *malloc_tsd_malloc(size_t size);
55423-void malloc_tsd_dalloc(void *wrapper);
55424-tsd_t *malloc_tsd_boot0(void);
55425-void malloc_tsd_boot1(void);
55426-void tsd_cleanup(void *arg);
55427-tsd_t *tsd_fetch_slow(tsd_t *tsd, bool internal);
55428-void tsd_state_set(tsd_t *tsd, uint8_t new_state);
55429-void tsd_slow_update(tsd_t *tsd);
55430-void tsd_prefork(tsd_t *tsd);
55431-void tsd_postfork_parent(tsd_t *tsd);
55432-void tsd_postfork_child(tsd_t *tsd);
55433-
55434-/*
55435- * Call ..._inc when your module wants to take all threads down the slow paths,
55436- * and ..._dec when it no longer needs to.
55437- */
55438-void tsd_global_slow_inc(tsdn_t *tsdn);
55439-void tsd_global_slow_dec(tsdn_t *tsdn);
55440-bool tsd_global_slow();
55441-
55442-enum {
55443-	/* Common case --> jnz. */
55444-	tsd_state_nominal = 0,
55445-	/* Initialized but on slow path. */
55446-	tsd_state_nominal_slow = 1,
55447-	/*
55448-	 * Some thread has changed global state in such a way that all nominal
55449-	 * threads need to recompute their fast / slow status the next time they
55450-	 * get a chance.
55451-	 *
55452-	 * Any thread can change another thread's status *to* recompute, but
55453-	 * threads are the only ones who can change their status *from*
55454-	 * recompute.
55455-	 */
55456-	tsd_state_nominal_recompute = 2,
55457-	/*
55458-	 * The above nominal states should be lower values.  We use
55459-	 * tsd_nominal_max to separate nominal states from threads in the
55460-	 * process of being born / dying.
55461-	 */
55462-	tsd_state_nominal_max = 2,
55463-
55464-	/*
55465-	 * A thread might free() during its death as its only allocator action;
55466-	 * in such scenarios, we need tsd, but set up in such a way that no
55467-	 * cleanup is necessary.
55468-	 */
55469-	tsd_state_minimal_initialized = 3,
55470-	/* States during which we know we're in thread death. */
55471-	tsd_state_purgatory = 4,
55472-	tsd_state_reincarnated = 5,
55473-	/*
55474-	 * What it says on the tin; tsd that hasn't been initialized.  Note
55475-	 * that even when the tsd struct lives in TLS, when need to keep track
55476-	 * of stuff like whether or not our pthread destructors have been
55477-	 * scheduled, so this really truly is different than the nominal state.
55478-	 */
55479-	tsd_state_uninitialized = 6
55480-};
55481-
55482-/*
55483- * Some TSD accesses can only be done in a nominal state.  To enforce this, we
55484- * wrap TSD member access in a function that asserts on TSD state, and mangle
55485- * field names to prevent touching them accidentally.
55486- */
55487-#define TSD_MANGLE(n) cant_access_tsd_items_directly_use_a_getter_or_setter_##n
55488-
55489-#ifdef JEMALLOC_U8_ATOMICS
55490-#  define tsd_state_t atomic_u8_t
55491-#  define tsd_atomic_load atomic_load_u8
55492-#  define tsd_atomic_store atomic_store_u8
55493-#  define tsd_atomic_exchange atomic_exchange_u8
55494-#else
55495-#  define tsd_state_t atomic_u32_t
55496-#  define tsd_atomic_load atomic_load_u32
55497-#  define tsd_atomic_store atomic_store_u32
55498-#  define tsd_atomic_exchange atomic_exchange_u32
55499-#endif
55500-
55501-/* The actual tsd. */
55502-struct tsd_s {
55503-	/*
55504-	 * The contents should be treated as totally opaque outside the tsd
55505-	 * module.  Access any thread-local state through the getters and
55506-	 * setters below.
55507-	 */
55508-
55509-#define O(n, t, nt)							\
55510-	t TSD_MANGLE(n);
55511-
55512-	TSD_DATA_SLOW
55513-	/*
55514-	 * We manually limit the state to just a single byte.  Unless the 8-bit
55515-	 * atomics are unavailable (which is rare).
55516-	 */
55517-	tsd_state_t state;
55518-	TSD_DATA_FAST
55519-	TSD_DATA_SLOWER
55520-#undef O
55521-};
55522-
55523-JEMALLOC_ALWAYS_INLINE uint8_t
55524-tsd_state_get(tsd_t *tsd) {
55525-	/*
55526-	 * This should be atomic.  Unfortunately, compilers right now can't tell
55527-	 * that this can be done as a memory comparison, and forces a load into
55528-	 * a register that hurts fast-path performance.
55529-	 */
55530-	/* return atomic_load_u8(&tsd->state, ATOMIC_RELAXED); */
55531-	return *(uint8_t *)&tsd->state;
55532-}
55533-
55534-/*
55535- * Wrapper around tsd_t that makes it possible to avoid implicit conversion
55536- * between tsd_t and tsdn_t, where tsdn_t is "nullable" and has to be
55537- * explicitly converted to tsd_t, which is non-nullable.
55538- */
55539-struct tsdn_s {
55540-	tsd_t tsd;
55541-};
55542-#define TSDN_NULL ((tsdn_t *)0)
55543-JEMALLOC_ALWAYS_INLINE tsdn_t *
55544-tsd_tsdn(tsd_t *tsd) {
55545-	return (tsdn_t *)tsd;
55546-}
55547-
55548-JEMALLOC_ALWAYS_INLINE bool
55549-tsdn_null(const tsdn_t *tsdn) {
55550-	return tsdn == NULL;
55551-}
55552-
55553-JEMALLOC_ALWAYS_INLINE tsd_t *
55554-tsdn_tsd(tsdn_t *tsdn) {
55555-	assert(!tsdn_null(tsdn));
55556-
55557-	return &tsdn->tsd;
55558-}
55559-
55560-/*
55561- * We put the platform-specific data declarations and inlines into their own
55562- * header files to avoid cluttering this file.  They define tsd_boot0,
55563- * tsd_boot1, tsd_boot, tsd_booted_get, tsd_get_allocates, tsd_get, and tsd_set.
55564- */
55565-#ifdef JEMALLOC_MALLOC_THREAD_CLEANUP
55566-#include "jemalloc/internal/tsd_malloc_thread_cleanup.h"
55567-#elif (defined(JEMALLOC_TLS))
55568-#include "jemalloc/internal/tsd_tls.h"
55569-#elif (defined(_WIN32))
55570-#include "jemalloc/internal/tsd_win.h"
55571-#else
55572-#include "jemalloc/internal/tsd_generic.h"
55573-#endif
55574-
55575-/*
55576- * tsd_foop_get_unsafe(tsd) returns a pointer to the thread-local instance of
55577- * foo.  This omits some safety checks, and so can be used during tsd
55578- * initialization and cleanup.
55579- */
55580-#define O(n, t, nt)							\
55581-JEMALLOC_ALWAYS_INLINE t *						\
55582-tsd_##n##p_get_unsafe(tsd_t *tsd) {					\
55583-	return &tsd->TSD_MANGLE(n);					\
55584-}
55585-TSD_DATA_SLOW
55586-TSD_DATA_FAST
55587-TSD_DATA_SLOWER
55588-#undef O
55589-
55590-/* tsd_foop_get(tsd) returns a pointer to the thread-local instance of foo. */
55591-#define O(n, t, nt)							\
55592-JEMALLOC_ALWAYS_INLINE t *						\
55593-tsd_##n##p_get(tsd_t *tsd) {						\
55594-	/*								\
55595-	 * Because the state might change asynchronously if it's	\
55596-	 * nominal, we need to make sure that we only read it once.	\
55597-	 */								\
55598-	uint8_t state = tsd_state_get(tsd);				\
55599-	assert(state == tsd_state_nominal ||				\
55600-	    state == tsd_state_nominal_slow ||				\
55601-	    state == tsd_state_nominal_recompute ||			\
55602-	    state == tsd_state_reincarnated ||				\
55603-	    state == tsd_state_minimal_initialized);			\
55604-	return tsd_##n##p_get_unsafe(tsd);				\
55605-}
55606-TSD_DATA_SLOW
55607-TSD_DATA_FAST
55608-TSD_DATA_SLOWER
55609-#undef O
55610-
55611-/*
55612- * tsdn_foop_get(tsdn) returns either the thread-local instance of foo (if tsdn
55613- * isn't NULL), or NULL (if tsdn is NULL), cast to the nullable pointer type.
55614- */
55615-#define O(n, t, nt)							\
55616-JEMALLOC_ALWAYS_INLINE nt *						\
55617-tsdn_##n##p_get(tsdn_t *tsdn) {						\
55618-	if (tsdn_null(tsdn)) {						\
55619-		return NULL;						\
55620-	}								\
55621-	tsd_t *tsd = tsdn_tsd(tsdn);					\
55622-	return (nt *)tsd_##n##p_get(tsd);				\
55623-}
55624-TSD_DATA_SLOW
55625-TSD_DATA_FAST
55626-TSD_DATA_SLOWER
55627-#undef O
55628-
55629-/* tsd_foo_get(tsd) returns the value of the thread-local instance of foo. */
55630-#define O(n, t, nt)							\
55631-JEMALLOC_ALWAYS_INLINE t						\
55632-tsd_##n##_get(tsd_t *tsd) {						\
55633-	return *tsd_##n##p_get(tsd);					\
55634-}
55635-TSD_DATA_SLOW
55636-TSD_DATA_FAST
55637-TSD_DATA_SLOWER
55638-#undef O
55639-
55640-/* tsd_foo_set(tsd, val) updates the thread-local instance of foo to be val. */
55641-#define O(n, t, nt)							\
55642-JEMALLOC_ALWAYS_INLINE void						\
55643-tsd_##n##_set(tsd_t *tsd, t val) {					\
55644-	assert(tsd_state_get(tsd) != tsd_state_reincarnated &&		\
55645-	    tsd_state_get(tsd) != tsd_state_minimal_initialized);	\
55646-	*tsd_##n##p_get(tsd) = val;					\
55647-}
55648-TSD_DATA_SLOW
55649-TSD_DATA_FAST
55650-TSD_DATA_SLOWER
55651-#undef O
55652-
55653-JEMALLOC_ALWAYS_INLINE void
55654-tsd_assert_fast(tsd_t *tsd) {
55655-	/*
55656-	 * Note that our fastness assertion does *not* include global slowness
55657-	 * counters; it's not in general possible to ensure that they won't
55658-	 * change asynchronously from underneath us.
55659-	 */
55660-	assert(!malloc_slow && tsd_tcache_enabled_get(tsd) &&
55661-	    tsd_reentrancy_level_get(tsd) == 0);
55662-}
55663-
55664-JEMALLOC_ALWAYS_INLINE bool
55665-tsd_fast(tsd_t *tsd) {
55666-	bool fast = (tsd_state_get(tsd) == tsd_state_nominal);
55667-	if (fast) {
55668-		tsd_assert_fast(tsd);
55669-	}
55670-
55671-	return fast;
55672-}
55673-
55674-JEMALLOC_ALWAYS_INLINE tsd_t *
55675-tsd_fetch_impl(bool init, bool minimal) {
55676-	tsd_t *tsd = tsd_get(init);
55677-
55678-	if (!init && tsd_get_allocates() && tsd == NULL) {
55679-		return NULL;
55680-	}
55681-	assert(tsd != NULL);
55682-
55683-	if (unlikely(tsd_state_get(tsd) != tsd_state_nominal)) {
55684-		return tsd_fetch_slow(tsd, minimal);
55685-	}
55686-	assert(tsd_fast(tsd));
55687-	tsd_assert_fast(tsd);
55688-
55689-	return tsd;
55690-}
55691-
55692-/* Get a minimal TSD that requires no cleanup.  See comments in free(). */
55693-JEMALLOC_ALWAYS_INLINE tsd_t *
55694-tsd_fetch_min(void) {
55695-	return tsd_fetch_impl(true, true);
55696-}
55697-
55698-/* For internal background threads use only. */
55699-JEMALLOC_ALWAYS_INLINE tsd_t *
55700-tsd_internal_fetch(void) {
55701-	tsd_t *tsd = tsd_fetch_min();
55702-	/* Use reincarnated state to prevent full initialization. */
55703-	tsd_state_set(tsd, tsd_state_reincarnated);
55704-
55705-	return tsd;
55706-}
55707-
55708-JEMALLOC_ALWAYS_INLINE tsd_t *
55709-tsd_fetch(void) {
55710-	return tsd_fetch_impl(true, false);
55711-}
55712-
55713-static inline bool
55714-tsd_nominal(tsd_t *tsd) {
55715-	bool nominal = tsd_state_get(tsd) <= tsd_state_nominal_max;
55716-	assert(nominal || tsd_reentrancy_level_get(tsd) > 0);
55717-
55718-	return nominal;
55719-}
55720-
55721-JEMALLOC_ALWAYS_INLINE tsdn_t *
55722-tsdn_fetch(void) {
55723-	if (!tsd_booted_get()) {
55724-		return NULL;
55725-	}
55726-
55727-	return tsd_tsdn(tsd_fetch_impl(false, false));
55728-}
55729-
55730-JEMALLOC_ALWAYS_INLINE rtree_ctx_t *
55731-tsd_rtree_ctx(tsd_t *tsd) {
55732-	return tsd_rtree_ctxp_get(tsd);
55733-}
55734-
55735-JEMALLOC_ALWAYS_INLINE rtree_ctx_t *
55736-tsdn_rtree_ctx(tsdn_t *tsdn, rtree_ctx_t *fallback) {
55737-	/*
55738-	 * If tsd cannot be accessed, initialize the fallback rtree_ctx and
55739-	 * return a pointer to it.
55740-	 */
55741-	if (unlikely(tsdn_null(tsdn))) {
55742-		rtree_ctx_data_init(fallback);
55743-		return fallback;
55744-	}
55745-	return tsd_rtree_ctx(tsdn_tsd(tsdn));
55746-}
55747-
55748-static inline bool
55749-tsd_state_nocleanup(tsd_t *tsd) {
55750-	return tsd_state_get(tsd) == tsd_state_reincarnated ||
55751-	    tsd_state_get(tsd) == tsd_state_minimal_initialized;
55752-}
55753-
55754-/*
55755- * These "raw" tsd reentrancy functions don't have any debug checking to make
55756- * sure that we're not touching arena 0.  Better is to call pre_reentrancy and
55757- * post_reentrancy if this is possible.
55758- */
55759-static inline void
55760-tsd_pre_reentrancy_raw(tsd_t *tsd) {
55761-	bool fast = tsd_fast(tsd);
55762-	assert(tsd_reentrancy_level_get(tsd) < INT8_MAX);
55763-	++*tsd_reentrancy_levelp_get(tsd);
55764-	if (fast) {
55765-		/* Prepare slow path for reentrancy. */
55766-		tsd_slow_update(tsd);
55767-		assert(tsd_state_get(tsd) == tsd_state_nominal_slow);
55768-	}
55769-}
55770-
55771-static inline void
55772-tsd_post_reentrancy_raw(tsd_t *tsd) {
55773-	int8_t *reentrancy_level = tsd_reentrancy_levelp_get(tsd);
55774-	assert(*reentrancy_level > 0);
55775-	if (--*reentrancy_level == 0) {
55776-		tsd_slow_update(tsd);
55777-	}
55778-}
55779-
55780-#endif /* JEMALLOC_INTERNAL_TSD_H */
55781diff --git a/jemalloc/include/jemalloc/internal/tsd_generic.h b/jemalloc/include/jemalloc/internal/tsd_generic.h
55782deleted file mode 100644
55783index a718472..0000000
55784--- a/jemalloc/include/jemalloc/internal/tsd_generic.h
55785+++ /dev/null
55786@@ -1,182 +0,0 @@
55787-#ifdef JEMALLOC_INTERNAL_TSD_GENERIC_H
55788-#error This file should be included only once, by tsd.h.
55789-#endif
55790-#define JEMALLOC_INTERNAL_TSD_GENERIC_H
55791-
55792-typedef struct tsd_init_block_s tsd_init_block_t;
55793-struct tsd_init_block_s {
55794-	ql_elm(tsd_init_block_t) link;
55795-	pthread_t thread;
55796-	void *data;
55797-};
55798-
55799-/* Defined in tsd.c, to allow the mutex headers to have tsd dependencies. */
55800-typedef struct tsd_init_head_s tsd_init_head_t;
55801-
55802-typedef struct {
55803-	bool initialized;
55804-	tsd_t val;
55805-} tsd_wrapper_t;
55806-
55807-void *tsd_init_check_recursion(tsd_init_head_t *head,
55808-    tsd_init_block_t *block);
55809-void tsd_init_finish(tsd_init_head_t *head, tsd_init_block_t *block);
55810-
55811-extern pthread_key_t tsd_tsd;
55812-extern tsd_init_head_t tsd_init_head;
55813-extern tsd_wrapper_t tsd_boot_wrapper;
55814-extern bool tsd_booted;
55815-
55816-/* Initialization/cleanup. */
55817-JEMALLOC_ALWAYS_INLINE void
55818-tsd_cleanup_wrapper(void *arg) {
55819-	tsd_wrapper_t *wrapper = (tsd_wrapper_t *)arg;
55820-
55821-	if (wrapper->initialized) {
55822-		wrapper->initialized = false;
55823-		tsd_cleanup(&wrapper->val);
55824-		if (wrapper->initialized) {
55825-			/* Trigger another cleanup round. */
55826-			if (pthread_setspecific(tsd_tsd, (void *)wrapper) != 0)
55827-			{
55828-				malloc_write("<jemalloc>: Error setting TSD\n");
55829-				if (opt_abort) {
55830-					abort();
55831-				}
55832-			}
55833-			return;
55834-		}
55835-	}
55836-	malloc_tsd_dalloc(wrapper);
55837-}
55838-
55839-JEMALLOC_ALWAYS_INLINE void
55840-tsd_wrapper_set(tsd_wrapper_t *wrapper) {
55841-	if (unlikely(!tsd_booted)) {
55842-		return;
55843-	}
55844-	if (pthread_setspecific(tsd_tsd, (void *)wrapper) != 0) {
55845-		malloc_write("<jemalloc>: Error setting TSD\n");
55846-		abort();
55847-	}
55848-}
55849-
55850-JEMALLOC_ALWAYS_INLINE tsd_wrapper_t *
55851-tsd_wrapper_get(bool init) {
55852-	tsd_wrapper_t *wrapper;
55853-
55854-	if (unlikely(!tsd_booted)) {
55855-		return &tsd_boot_wrapper;
55856-	}
55857-
55858-	wrapper = (tsd_wrapper_t *)pthread_getspecific(tsd_tsd);
55859-
55860-	if (init && unlikely(wrapper == NULL)) {
55861-		tsd_init_block_t block;
55862-		wrapper = (tsd_wrapper_t *)
55863-		    tsd_init_check_recursion(&tsd_init_head, &block);
55864-		if (wrapper) {
55865-			return wrapper;
55866-		}
55867-		wrapper = (tsd_wrapper_t *)
55868-		    malloc_tsd_malloc(sizeof(tsd_wrapper_t));
55869-		block.data = (void *)wrapper;
55870-		if (wrapper == NULL) {
55871-			malloc_write("<jemalloc>: Error allocating TSD\n");
55872-			abort();
55873-		} else {
55874-			wrapper->initialized = false;
55875-      JEMALLOC_DIAGNOSTIC_PUSH
55876-      JEMALLOC_DIAGNOSTIC_IGNORE_MISSING_STRUCT_FIELD_INITIALIZERS
55877-			tsd_t initializer = TSD_INITIALIZER;
55878-      JEMALLOC_DIAGNOSTIC_POP
55879-			wrapper->val = initializer;
55880-		}
55881-		tsd_wrapper_set(wrapper);
55882-		tsd_init_finish(&tsd_init_head, &block);
55883-	}
55884-	return wrapper;
55885-}
55886-
55887-JEMALLOC_ALWAYS_INLINE bool
55888-tsd_boot0(void) {
55889-	tsd_wrapper_t *wrapper;
55890-	tsd_init_block_t block;
55891-
55892-	wrapper = (tsd_wrapper_t *)
55893-	    tsd_init_check_recursion(&tsd_init_head, &block);
55894-	if (wrapper) {
55895-		return false;
55896-	}
55897-	block.data = &tsd_boot_wrapper;
55898-	if (pthread_key_create(&tsd_tsd, tsd_cleanup_wrapper) != 0) {
55899-		return true;
55900-	}
55901-	tsd_booted = true;
55902-	tsd_wrapper_set(&tsd_boot_wrapper);
55903-	tsd_init_finish(&tsd_init_head, &block);
55904-	return false;
55905-}
55906-
55907-JEMALLOC_ALWAYS_INLINE void
55908-tsd_boot1(void) {
55909-	tsd_wrapper_t *wrapper;
55910-	wrapper = (tsd_wrapper_t *)malloc_tsd_malloc(sizeof(tsd_wrapper_t));
55911-	if (wrapper == NULL) {
55912-		malloc_write("<jemalloc>: Error allocating TSD\n");
55913-		abort();
55914-	}
55915-	tsd_boot_wrapper.initialized = false;
55916-	tsd_cleanup(&tsd_boot_wrapper.val);
55917-	wrapper->initialized = false;
55918-  JEMALLOC_DIAGNOSTIC_PUSH
55919-  JEMALLOC_DIAGNOSTIC_IGNORE_MISSING_STRUCT_FIELD_INITIALIZERS
55920-	tsd_t initializer = TSD_INITIALIZER;
55921-  JEMALLOC_DIAGNOSTIC_POP
55922-	wrapper->val = initializer;
55923-	tsd_wrapper_set(wrapper);
55924-}
55925-
55926-JEMALLOC_ALWAYS_INLINE bool
55927-tsd_boot(void) {
55928-	if (tsd_boot0()) {
55929-		return true;
55930-	}
55931-	tsd_boot1();
55932-	return false;
55933-}
55934-
55935-JEMALLOC_ALWAYS_INLINE bool
55936-tsd_booted_get(void) {
55937-	return tsd_booted;
55938-}
55939-
55940-JEMALLOC_ALWAYS_INLINE bool
55941-tsd_get_allocates(void) {
55942-	return true;
55943-}
55944-
55945-/* Get/set. */
55946-JEMALLOC_ALWAYS_INLINE tsd_t *
55947-tsd_get(bool init) {
55948-	tsd_wrapper_t *wrapper;
55949-
55950-	assert(tsd_booted);
55951-	wrapper = tsd_wrapper_get(init);
55952-	if (tsd_get_allocates() && !init && wrapper == NULL) {
55953-		return NULL;
55954-	}
55955-	return &wrapper->val;
55956-}
55957-
55958-JEMALLOC_ALWAYS_INLINE void
55959-tsd_set(tsd_t *val) {
55960-	tsd_wrapper_t *wrapper;
55961-
55962-	assert(tsd_booted);
55963-	wrapper = tsd_wrapper_get(true);
55964-	if (likely(&wrapper->val != val)) {
55965-		wrapper->val = *(val);
55966-	}
55967-	wrapper->initialized = true;
55968-}
55969diff --git a/jemalloc/include/jemalloc/internal/tsd_malloc_thread_cleanup.h b/jemalloc/include/jemalloc/internal/tsd_malloc_thread_cleanup.h
55970deleted file mode 100644
55971index d8f3ef1..0000000
55972--- a/jemalloc/include/jemalloc/internal/tsd_malloc_thread_cleanup.h
55973+++ /dev/null
55974@@ -1,61 +0,0 @@
55975-#ifdef JEMALLOC_INTERNAL_TSD_MALLOC_THREAD_CLEANUP_H
55976-#error This file should be included only once, by tsd.h.
55977-#endif
55978-#define JEMALLOC_INTERNAL_TSD_MALLOC_THREAD_CLEANUP_H
55979-
55980-#define JEMALLOC_TSD_TYPE_ATTR(type) __thread type JEMALLOC_TLS_MODEL
55981-
55982-extern JEMALLOC_TSD_TYPE_ATTR(tsd_t) tsd_tls;
55983-extern JEMALLOC_TSD_TYPE_ATTR(bool) tsd_initialized;
55984-extern bool tsd_booted;
55985-
55986-/* Initialization/cleanup. */
55987-JEMALLOC_ALWAYS_INLINE bool
55988-tsd_cleanup_wrapper(void) {
55989-	if (tsd_initialized) {
55990-		tsd_initialized = false;
55991-		tsd_cleanup(&tsd_tls);
55992-	}
55993-	return tsd_initialized;
55994-}
55995-
55996-JEMALLOC_ALWAYS_INLINE bool
55997-tsd_boot0(void) {
55998-	_malloc_tsd_cleanup_register(&tsd_cleanup_wrapper);
55999-	tsd_booted = true;
56000-	return false;
56001-}
56002-
56003-JEMALLOC_ALWAYS_INLINE void
56004-tsd_boot1(void) {
56005-	/* Do nothing. */
56006-}
56007-
56008-JEMALLOC_ALWAYS_INLINE bool
56009-tsd_boot(void) {
56010-	return tsd_boot0();
56011-}
56012-
56013-JEMALLOC_ALWAYS_INLINE bool
56014-tsd_booted_get(void) {
56015-	return tsd_booted;
56016-}
56017-
56018-JEMALLOC_ALWAYS_INLINE bool
56019-tsd_get_allocates(void) {
56020-	return false;
56021-}
56022-
56023-/* Get/set. */
56024-JEMALLOC_ALWAYS_INLINE tsd_t *
56025-tsd_get(bool init) {
56026-	return &tsd_tls;
56027-}
56028-JEMALLOC_ALWAYS_INLINE void
56029-tsd_set(tsd_t *val) {
56030-	assert(tsd_booted);
56031-	if (likely(&tsd_tls != val)) {
56032-		tsd_tls = (*val);
56033-	}
56034-	tsd_initialized = true;
56035-}
56036diff --git a/jemalloc/include/jemalloc/internal/tsd_tls.h b/jemalloc/include/jemalloc/internal/tsd_tls.h
56037deleted file mode 100644
56038index 7d6c805..0000000
56039--- a/jemalloc/include/jemalloc/internal/tsd_tls.h
56040+++ /dev/null
56041@@ -1,60 +0,0 @@
56042-#ifdef JEMALLOC_INTERNAL_TSD_TLS_H
56043-#error This file should be included only once, by tsd.h.
56044-#endif
56045-#define JEMALLOC_INTERNAL_TSD_TLS_H
56046-
56047-#define JEMALLOC_TSD_TYPE_ATTR(type) __thread type JEMALLOC_TLS_MODEL
56048-
56049-extern JEMALLOC_TSD_TYPE_ATTR(tsd_t) tsd_tls;
56050-extern pthread_key_t tsd_tsd;
56051-extern bool tsd_booted;
56052-
56053-/* Initialization/cleanup. */
56054-JEMALLOC_ALWAYS_INLINE bool
56055-tsd_boot0(void) {
56056-	if (pthread_key_create(&tsd_tsd, &tsd_cleanup) != 0) {
56057-		return true;
56058-	}
56059-	tsd_booted = true;
56060-	return false;
56061-}
56062-
56063-JEMALLOC_ALWAYS_INLINE void
56064-tsd_boot1(void) {
56065-	/* Do nothing. */
56066-}
56067-
56068-JEMALLOC_ALWAYS_INLINE bool
56069-tsd_boot(void) {
56070-	return tsd_boot0();
56071-}
56072-
56073-JEMALLOC_ALWAYS_INLINE bool
56074-tsd_booted_get(void) {
56075-	return tsd_booted;
56076-}
56077-
56078-JEMALLOC_ALWAYS_INLINE bool
56079-tsd_get_allocates(void) {
56080-	return false;
56081-}
56082-
56083-/* Get/set. */
56084-JEMALLOC_ALWAYS_INLINE tsd_t *
56085-tsd_get(bool init) {
56086-	return &tsd_tls;
56087-}
56088-
56089-JEMALLOC_ALWAYS_INLINE void
56090-tsd_set(tsd_t *val) {
56091-	assert(tsd_booted);
56092-	if (likely(&tsd_tls != val)) {
56093-		tsd_tls = (*val);
56094-	}
56095-	if (pthread_setspecific(tsd_tsd, (void *)(&tsd_tls)) != 0) {
56096-		malloc_write("<jemalloc>: Error setting tsd.\n");
56097-		if (opt_abort) {
56098-			abort();
56099-		}
56100-	}
56101-}
56102diff --git a/jemalloc/include/jemalloc/internal/tsd_types.h b/jemalloc/include/jemalloc/internal/tsd_types.h
56103deleted file mode 100644
56104index a6ae37d..0000000
56105--- a/jemalloc/include/jemalloc/internal/tsd_types.h
56106+++ /dev/null
56107@@ -1,10 +0,0 @@
56108-#ifndef JEMALLOC_INTERNAL_TSD_TYPES_H
56109-#define JEMALLOC_INTERNAL_TSD_TYPES_H
56110-
56111-#define MALLOC_TSD_CLEANUPS_MAX	4
56112-
56113-typedef struct tsd_s tsd_t;
56114-typedef struct tsdn_s tsdn_t;
56115-typedef bool (*malloc_tsd_cleanup_t)(void);
56116-
56117-#endif /* JEMALLOC_INTERNAL_TSD_TYPES_H */
56118diff --git a/jemalloc/include/jemalloc/internal/tsd_win.h b/jemalloc/include/jemalloc/internal/tsd_win.h
56119deleted file mode 100644
56120index a91dac8..0000000
56121--- a/jemalloc/include/jemalloc/internal/tsd_win.h
56122+++ /dev/null
56123@@ -1,139 +0,0 @@
56124-#ifdef JEMALLOC_INTERNAL_TSD_WIN_H
56125-#error This file should be included only once, by tsd.h.
56126-#endif
56127-#define JEMALLOC_INTERNAL_TSD_WIN_H
56128-
56129-typedef struct {
56130-	bool initialized;
56131-	tsd_t val;
56132-} tsd_wrapper_t;
56133-
56134-extern DWORD tsd_tsd;
56135-extern tsd_wrapper_t tsd_boot_wrapper;
56136-extern bool tsd_booted;
56137-
56138-/* Initialization/cleanup. */
56139-JEMALLOC_ALWAYS_INLINE bool
56140-tsd_cleanup_wrapper(void) {
56141-	DWORD error = GetLastError();
56142-	tsd_wrapper_t *wrapper = (tsd_wrapper_t *)TlsGetValue(tsd_tsd);
56143-	SetLastError(error);
56144-
56145-	if (wrapper == NULL) {
56146-		return false;
56147-	}
56148-
56149-	if (wrapper->initialized) {
56150-		wrapper->initialized = false;
56151-		tsd_cleanup(&wrapper->val);
56152-		if (wrapper->initialized) {
56153-			/* Trigger another cleanup round. */
56154-			return true;
56155-		}
56156-	}
56157-	malloc_tsd_dalloc(wrapper);
56158-	return false;
56159-}
56160-
56161-JEMALLOC_ALWAYS_INLINE void
56162-tsd_wrapper_set(tsd_wrapper_t *wrapper) {
56163-	if (!TlsSetValue(tsd_tsd, (void *)wrapper)) {
56164-		malloc_write("<jemalloc>: Error setting TSD\n");
56165-		abort();
56166-	}
56167-}
56168-
56169-JEMALLOC_ALWAYS_INLINE tsd_wrapper_t *
56170-tsd_wrapper_get(bool init) {
56171-	DWORD error = GetLastError();
56172-	tsd_wrapper_t *wrapper = (tsd_wrapper_t *) TlsGetValue(tsd_tsd);
56173-	SetLastError(error);
56174-
56175-	if (init && unlikely(wrapper == NULL)) {
56176-		wrapper = (tsd_wrapper_t *)
56177-		    malloc_tsd_malloc(sizeof(tsd_wrapper_t));
56178-		if (wrapper == NULL) {
56179-			malloc_write("<jemalloc>: Error allocating TSD\n");
56180-			abort();
56181-		} else {
56182-			wrapper->initialized = false;
56183-			/* MSVC is finicky about aggregate initialization. */
56184-			tsd_t tsd_initializer = TSD_INITIALIZER;
56185-			wrapper->val = tsd_initializer;
56186-		}
56187-		tsd_wrapper_set(wrapper);
56188-	}
56189-	return wrapper;
56190-}
56191-
56192-JEMALLOC_ALWAYS_INLINE bool
56193-tsd_boot0(void) {
56194-	tsd_tsd = TlsAlloc();
56195-	if (tsd_tsd == TLS_OUT_OF_INDEXES) {
56196-		return true;
56197-	}
56198-	_malloc_tsd_cleanup_register(&tsd_cleanup_wrapper);
56199-	tsd_wrapper_set(&tsd_boot_wrapper);
56200-	tsd_booted = true;
56201-	return false;
56202-}
56203-
56204-JEMALLOC_ALWAYS_INLINE void
56205-tsd_boot1(void) {
56206-	tsd_wrapper_t *wrapper;
56207-	wrapper = (tsd_wrapper_t *)
56208-	    malloc_tsd_malloc(sizeof(tsd_wrapper_t));
56209-	if (wrapper == NULL) {
56210-		malloc_write("<jemalloc>: Error allocating TSD\n");
56211-		abort();
56212-	}
56213-	tsd_boot_wrapper.initialized = false;
56214-	tsd_cleanup(&tsd_boot_wrapper.val);
56215-	wrapper->initialized = false;
56216-	tsd_t initializer = TSD_INITIALIZER;
56217-	wrapper->val = initializer;
56218-	tsd_wrapper_set(wrapper);
56219-}
56220-JEMALLOC_ALWAYS_INLINE bool
56221-tsd_boot(void) {
56222-	if (tsd_boot0()) {
56223-		return true;
56224-	}
56225-	tsd_boot1();
56226-	return false;
56227-}
56228-
56229-JEMALLOC_ALWAYS_INLINE bool
56230-tsd_booted_get(void) {
56231-	return tsd_booted;
56232-}
56233-
56234-JEMALLOC_ALWAYS_INLINE bool
56235-tsd_get_allocates(void) {
56236-	return true;
56237-}
56238-
56239-/* Get/set. */
56240-JEMALLOC_ALWAYS_INLINE tsd_t *
56241-tsd_get(bool init) {
56242-	tsd_wrapper_t *wrapper;
56243-
56244-	assert(tsd_booted);
56245-	wrapper = tsd_wrapper_get(init);
56246-	if (tsd_get_allocates() && !init && wrapper == NULL) {
56247-		return NULL;
56248-	}
56249-	return &wrapper->val;
56250-}
56251-
56252-JEMALLOC_ALWAYS_INLINE void
56253-tsd_set(tsd_t *val) {
56254-	tsd_wrapper_t *wrapper;
56255-
56256-	assert(tsd_booted);
56257-	wrapper = tsd_wrapper_get(true);
56258-	if (likely(&wrapper->val != val)) {
56259-		wrapper->val = *(val);
56260-	}
56261-	wrapper->initialized = true;
56262-}
56263diff --git a/jemalloc/include/jemalloc/internal/typed_list.h b/jemalloc/include/jemalloc/internal/typed_list.h
56264deleted file mode 100644
56265index 6535055..0000000
56266--- a/jemalloc/include/jemalloc/internal/typed_list.h
56267+++ /dev/null
56268@@ -1,55 +0,0 @@
56269-#ifndef JEMALLOC_INTERNAL_TYPED_LIST_H
56270-#define JEMALLOC_INTERNAL_TYPED_LIST_H
56271-
56272-/*
56273- * This wraps the ql module to implement a list class in a way that's a little
56274- * bit easier to use; it handles ql_elm_new calls and provides type safety.
56275- */
56276-
56277-#define TYPED_LIST(list_type, el_type, linkage)				\
56278-typedef struct {							\
56279-	ql_head(el_type) head;						\
56280-} list_type##_t;							\
56281-static inline void							\
56282-list_type##_init(list_type##_t *list) {					\
56283-	ql_new(&list->head);						\
56284-}									\
56285-static inline el_type *							\
56286-list_type##_first(const list_type##_t *list) {				\
56287-	return ql_first(&list->head);					\
56288-}									\
56289-static inline el_type *							\
56290-list_type##_last(const list_type##_t *list) {				\
56291-	return ql_last(&list->head, linkage);				\
56292-}									\
56293-static inline void							\
56294-list_type##_append(list_type##_t *list, el_type *item) {		\
56295-	ql_elm_new(item, linkage);					\
56296-	ql_tail_insert(&list->head, item, linkage);			\
56297-}									\
56298-static inline void							\
56299-list_type##_prepend(list_type##_t *list, el_type *item) {		\
56300-	ql_elm_new(item, linkage);					\
56301-	ql_head_insert(&list->head, item, linkage);			\
56302-}									\
56303-static inline void							\
56304-list_type##_replace(list_type##_t *list, el_type *to_remove,		\
56305-    el_type *to_insert) {						\
56306-	ql_elm_new(to_insert, linkage);					\
56307-	ql_after_insert(to_remove, to_insert, linkage);			\
56308-	ql_remove(&list->head, to_remove, linkage);			\
56309-}									\
56310-static inline void							\
56311-list_type##_remove(list_type##_t *list, el_type *item) {		\
56312-	ql_remove(&list->head, item, linkage);				\
56313-}									\
56314-static inline bool							\
56315-list_type##_empty(list_type##_t *list) {				\
56316-	return ql_empty(&list->head);					\
56317-}									\
56318-static inline void							\
56319-list_type##_concat(list_type##_t *list_a, list_type##_t *list_b) {	\
56320-	ql_concat(&list_a->head, &list_b->head, linkage);		\
56321-}
56322-
56323-#endif /* JEMALLOC_INTERNAL_TYPED_LIST_H */
56324diff --git a/jemalloc/include/jemalloc/internal/util.h b/jemalloc/include/jemalloc/internal/util.h
56325deleted file mode 100644
56326index dcb1c0a..0000000
56327--- a/jemalloc/include/jemalloc/internal/util.h
56328+++ /dev/null
56329@@ -1,123 +0,0 @@
56330-#ifndef JEMALLOC_INTERNAL_UTIL_H
56331-#define JEMALLOC_INTERNAL_UTIL_H
56332-
56333-#define UTIL_INLINE static inline
56334-
56335-/* Junk fill patterns. */
56336-#ifndef JEMALLOC_ALLOC_JUNK
56337-#  define JEMALLOC_ALLOC_JUNK	((uint8_t)0xa5)
56338-#endif
56339-#ifndef JEMALLOC_FREE_JUNK
56340-#  define JEMALLOC_FREE_JUNK	((uint8_t)0x5a)
56341-#endif
56342-
56343-/*
56344- * Wrap a cpp argument that contains commas such that it isn't broken up into
56345- * multiple arguments.
56346- */
56347-#define JEMALLOC_ARG_CONCAT(...) __VA_ARGS__
56348-
56349-/* cpp macro definition stringification. */
56350-#define STRINGIFY_HELPER(x) #x
56351-#define STRINGIFY(x) STRINGIFY_HELPER(x)
56352-
56353-/*
56354- * Silence compiler warnings due to uninitialized values.  This is used
56355- * wherever the compiler fails to recognize that the variable is never used
56356- * uninitialized.
56357- */
56358-#define JEMALLOC_CC_SILENCE_INIT(v) = v
56359-
56360-#ifdef __GNUC__
56361-#  define likely(x)   __builtin_expect(!!(x), 1)
56362-#  define unlikely(x) __builtin_expect(!!(x), 0)
56363-#else
56364-#  define likely(x)   !!(x)
56365-#  define unlikely(x) !!(x)
56366-#endif
56367-
56368-#if !defined(JEMALLOC_INTERNAL_UNREACHABLE)
56369-#  error JEMALLOC_INTERNAL_UNREACHABLE should have been defined by configure
56370-#endif
56371-
56372-#define unreachable() JEMALLOC_INTERNAL_UNREACHABLE()
56373-
56374-/* Set error code. */
56375-UTIL_INLINE void
56376-set_errno(int errnum) {
56377-#ifdef _WIN32
56378-	SetLastError(errnum);
56379-#else
56380-	errno = errnum;
56381-#endif
56382-}
56383-
56384-/* Get last error code. */
56385-UTIL_INLINE int
56386-get_errno(void) {
56387-#ifdef _WIN32
56388-	return GetLastError();
56389-#else
56390-	return errno;
56391-#endif
56392-}
56393-
56394-JEMALLOC_ALWAYS_INLINE void
56395-util_assume(bool b) {
56396-	if (!b) {
56397-		unreachable();
56398-	}
56399-}
56400-
56401-/* ptr should be valid. */
56402-JEMALLOC_ALWAYS_INLINE void
56403-util_prefetch_read(void *ptr) {
56404-	/*
56405-	 * This should arguably be a config check; but any version of GCC so old
56406-	 * that it doesn't support __builtin_prefetch is also too old to build
56407-	 * jemalloc.
56408-	 */
56409-#ifdef __GNUC__
56410-	if (config_debug) {
56411-		/* Enforce the "valid ptr" requirement. */
56412-		*(volatile char *)ptr;
56413-	}
56414-	__builtin_prefetch(ptr, /* read or write */ 0, /* locality hint */ 3);
56415-#else
56416-	*(volatile char *)ptr;
56417-#endif
56418-}
56419-
56420-JEMALLOC_ALWAYS_INLINE void
56421-util_prefetch_write(void *ptr) {
56422-#ifdef __GNUC__
56423-	if (config_debug) {
56424-		*(volatile char *)ptr;
56425-	}
56426-	/*
56427-	 * The only difference from the read variant is that this has a 1 as the
56428-	 * second argument (the write hint).
56429-	 */
56430-	__builtin_prefetch(ptr, 1, 3);
56431-#else
56432-	*(volatile char *)ptr;
56433-#endif
56434-}
56435-
56436-JEMALLOC_ALWAYS_INLINE void
56437-util_prefetch_read_range(void *ptr, size_t sz) {
56438-	for (size_t i = 0; i < sz; i += CACHELINE) {
56439-		util_prefetch_read((void *)((uintptr_t)ptr + i));
56440-	}
56441-}
56442-
56443-JEMALLOC_ALWAYS_INLINE void
56444-util_prefetch_write_range(void *ptr, size_t sz) {
56445-	for (size_t i = 0; i < sz; i += CACHELINE) {
56446-		util_prefetch_write((void *)((uintptr_t)ptr + i));
56447-	}
56448-}
56449-
56450-#undef UTIL_INLINE
56451-
56452-#endif /* JEMALLOC_INTERNAL_UTIL_H */
56453diff --git a/jemalloc/include/jemalloc/internal/witness.h b/jemalloc/include/jemalloc/internal/witness.h
56454deleted file mode 100644
56455index e81b9a0..0000000
56456--- a/jemalloc/include/jemalloc/internal/witness.h
56457+++ /dev/null
56458@@ -1,378 +0,0 @@
56459-#ifndef JEMALLOC_INTERNAL_WITNESS_H
56460-#define JEMALLOC_INTERNAL_WITNESS_H
56461-
56462-#include "jemalloc/internal/ql.h"
56463-
56464-/******************************************************************************/
56465-/* LOCK RANKS */
56466-/******************************************************************************/
56467-
56468-enum witness_rank_e {
56469-	/*
56470-	 * Order matters within this enum listing -- higher valued locks can
56471-	 * only be acquired after lower-valued ones.  We use the
56472-	 * auto-incrementing-ness of enum values to enforce this.
56473-	 */
56474-
56475-	/*
56476-	 * Witnesses with rank WITNESS_RANK_OMIT are completely ignored by the
56477-	 * witness machinery.
56478-	 */
56479-	WITNESS_RANK_OMIT,
56480-	WITNESS_RANK_MIN,
56481-	WITNESS_RANK_INIT = WITNESS_RANK_MIN,
56482-	WITNESS_RANK_CTL,
56483-	WITNESS_RANK_TCACHES,
56484-	WITNESS_RANK_ARENAS,
56485-	WITNESS_RANK_BACKGROUND_THREAD_GLOBAL,
56486-	WITNESS_RANK_PROF_DUMP,
56487-	WITNESS_RANK_PROF_BT2GCTX,
56488-	WITNESS_RANK_PROF_TDATAS,
56489-	WITNESS_RANK_PROF_TDATA,
56490-	WITNESS_RANK_PROF_LOG,
56491-	WITNESS_RANK_PROF_GCTX,
56492-	WITNESS_RANK_PROF_RECENT_DUMP,
56493-	WITNESS_RANK_BACKGROUND_THREAD,
56494-	/*
56495-	 * Used as an argument to witness_assert_depth_to_rank() in order to
56496-	 * validate depth excluding non-core locks with lower ranks.  Since the
56497-	 * rank argument to witness_assert_depth_to_rank() is inclusive rather
56498-	 * than exclusive, this definition can have the same value as the
56499-	 * minimally ranked core lock.
56500-	 */
56501-	WITNESS_RANK_CORE,
56502-	WITNESS_RANK_DECAY = WITNESS_RANK_CORE,
56503-	WITNESS_RANK_TCACHE_QL,
56504-
56505-	WITNESS_RANK_SEC_SHARD,
56506-
56507-	WITNESS_RANK_EXTENT_GROW,
56508-	WITNESS_RANK_HPA_SHARD_GROW = WITNESS_RANK_EXTENT_GROW,
56509-	WITNESS_RANK_SAN_BUMP_ALLOC = WITNESS_RANK_EXTENT_GROW,
56510-
56511-	WITNESS_RANK_EXTENTS,
56512-	WITNESS_RANK_HPA_SHARD = WITNESS_RANK_EXTENTS,
56513-
56514-	WITNESS_RANK_HPA_CENTRAL_GROW,
56515-	WITNESS_RANK_HPA_CENTRAL,
56516-
56517-	WITNESS_RANK_EDATA_CACHE,
56518-
56519-	WITNESS_RANK_RTREE,
56520-	WITNESS_RANK_BASE,
56521-	WITNESS_RANK_ARENA_LARGE,
56522-	WITNESS_RANK_HOOK,
56523-
56524-	WITNESS_RANK_LEAF=0x1000,
56525-	WITNESS_RANK_BIN = WITNESS_RANK_LEAF,
56526-	WITNESS_RANK_ARENA_STATS = WITNESS_RANK_LEAF,
56527-	WITNESS_RANK_COUNTER_ACCUM = WITNESS_RANK_LEAF,
56528-	WITNESS_RANK_DSS = WITNESS_RANK_LEAF,
56529-	WITNESS_RANK_PROF_ACTIVE = WITNESS_RANK_LEAF,
56530-	WITNESS_RANK_PROF_DUMP_FILENAME = WITNESS_RANK_LEAF,
56531-	WITNESS_RANK_PROF_GDUMP = WITNESS_RANK_LEAF,
56532-	WITNESS_RANK_PROF_NEXT_THR_UID = WITNESS_RANK_LEAF,
56533-	WITNESS_RANK_PROF_RECENT_ALLOC = WITNESS_RANK_LEAF,
56534-	WITNESS_RANK_PROF_STATS = WITNESS_RANK_LEAF,
56535-	WITNESS_RANK_PROF_THREAD_ACTIVE_INIT = WITNESS_RANK_LEAF,
56536-};
56537-typedef enum witness_rank_e witness_rank_t;
56538-
56539-/******************************************************************************/
56540-/* PER-WITNESS DATA */
56541-/******************************************************************************/
56542-#if defined(JEMALLOC_DEBUG)
56543-#  define WITNESS_INITIALIZER(name, rank) {name, rank, NULL, NULL, {NULL, NULL}}
56544-#else
56545-#  define WITNESS_INITIALIZER(name, rank)
56546-#endif
56547-
56548-typedef struct witness_s witness_t;
56549-typedef ql_head(witness_t) witness_list_t;
56550-typedef int witness_comp_t (const witness_t *, void *, const witness_t *,
56551-    void *);
56552-
56553-struct witness_s {
56554-	/* Name, used for printing lock order reversal messages. */
56555-	const char		*name;
56556-
56557-	/*
56558-	 * Witness rank, where 0 is lowest and WITNESS_RANK_LEAF is highest.
56559-	 * Witnesses must be acquired in order of increasing rank.
56560-	 */
56561-	witness_rank_t		rank;
56562-
56563-	/*
56564-	 * If two witnesses are of equal rank and they have the samp comp
56565-	 * function pointer, it is called as a last attempt to differentiate
56566-	 * between witnesses of equal rank.
56567-	 */
56568-	witness_comp_t		*comp;
56569-
56570-	/* Opaque data, passed to comp(). */
56571-	void			*opaque;
56572-
56573-	/* Linkage for thread's currently owned locks. */
56574-	ql_elm(witness_t)	link;
56575-};
56576-
56577-/******************************************************************************/
56578-/* PER-THREAD DATA */
56579-/******************************************************************************/
56580-typedef struct witness_tsd_s witness_tsd_t;
56581-struct witness_tsd_s {
56582-	witness_list_t witnesses;
56583-	bool forking;
56584-};
56585-
56586-#define WITNESS_TSD_INITIALIZER { ql_head_initializer(witnesses), false }
56587-#define WITNESS_TSDN_NULL ((witness_tsdn_t *)0)
56588-
56589-/******************************************************************************/
56590-/* (PER-THREAD) NULLABILITY HELPERS */
56591-/******************************************************************************/
56592-typedef struct witness_tsdn_s witness_tsdn_t;
56593-struct witness_tsdn_s {
56594-	witness_tsd_t witness_tsd;
56595-};
56596-
56597-JEMALLOC_ALWAYS_INLINE witness_tsdn_t *
56598-witness_tsd_tsdn(witness_tsd_t *witness_tsd) {
56599-	return (witness_tsdn_t *)witness_tsd;
56600-}
56601-
56602-JEMALLOC_ALWAYS_INLINE bool
56603-witness_tsdn_null(witness_tsdn_t *witness_tsdn) {
56604-	return witness_tsdn == NULL;
56605-}
56606-
56607-JEMALLOC_ALWAYS_INLINE witness_tsd_t *
56608-witness_tsdn_tsd(witness_tsdn_t *witness_tsdn) {
56609-	assert(!witness_tsdn_null(witness_tsdn));
56610-	return &witness_tsdn->witness_tsd;
56611-}
56612-
56613-/******************************************************************************/
56614-/* API */
56615-/******************************************************************************/
56616-void witness_init(witness_t *witness, const char *name, witness_rank_t rank,
56617-    witness_comp_t *comp, void *opaque);
56618-
56619-typedef void (witness_lock_error_t)(const witness_list_t *, const witness_t *);
56620-extern witness_lock_error_t *JET_MUTABLE witness_lock_error;
56621-
56622-typedef void (witness_owner_error_t)(const witness_t *);
56623-extern witness_owner_error_t *JET_MUTABLE witness_owner_error;
56624-
56625-typedef void (witness_not_owner_error_t)(const witness_t *);
56626-extern witness_not_owner_error_t *JET_MUTABLE witness_not_owner_error;
56627-
56628-typedef void (witness_depth_error_t)(const witness_list_t *,
56629-    witness_rank_t rank_inclusive, unsigned depth);
56630-extern witness_depth_error_t *JET_MUTABLE witness_depth_error;
56631-
56632-void witnesses_cleanup(witness_tsd_t *witness_tsd);
56633-void witness_prefork(witness_tsd_t *witness_tsd);
56634-void witness_postfork_parent(witness_tsd_t *witness_tsd);
56635-void witness_postfork_child(witness_tsd_t *witness_tsd);
56636-
56637-/* Helper, not intended for direct use. */
56638-static inline bool
56639-witness_owner(witness_tsd_t *witness_tsd, const witness_t *witness) {
56640-	witness_list_t *witnesses;
56641-	witness_t *w;
56642-
56643-	cassert(config_debug);
56644-
56645-	witnesses = &witness_tsd->witnesses;
56646-	ql_foreach(w, witnesses, link) {
56647-		if (w == witness) {
56648-			return true;
56649-		}
56650-	}
56651-
56652-	return false;
56653-}
56654-
56655-static inline void
56656-witness_assert_owner(witness_tsdn_t *witness_tsdn, const witness_t *witness) {
56657-	witness_tsd_t *witness_tsd;
56658-
56659-	if (!config_debug) {
56660-		return;
56661-	}
56662-
56663-	if (witness_tsdn_null(witness_tsdn)) {
56664-		return;
56665-	}
56666-	witness_tsd = witness_tsdn_tsd(witness_tsdn);
56667-	if (witness->rank == WITNESS_RANK_OMIT) {
56668-		return;
56669-	}
56670-
56671-	if (witness_owner(witness_tsd, witness)) {
56672-		return;
56673-	}
56674-	witness_owner_error(witness);
56675-}
56676-
56677-static inline void
56678-witness_assert_not_owner(witness_tsdn_t *witness_tsdn,
56679-    const witness_t *witness) {
56680-	witness_tsd_t *witness_tsd;
56681-	witness_list_t *witnesses;
56682-	witness_t *w;
56683-
56684-	if (!config_debug) {
56685-		return;
56686-	}
56687-
56688-	if (witness_tsdn_null(witness_tsdn)) {
56689-		return;
56690-	}
56691-	witness_tsd = witness_tsdn_tsd(witness_tsdn);
56692-	if (witness->rank == WITNESS_RANK_OMIT) {
56693-		return;
56694-	}
56695-
56696-	witnesses = &witness_tsd->witnesses;
56697-	ql_foreach(w, witnesses, link) {
56698-		if (w == witness) {
56699-			witness_not_owner_error(witness);
56700-		}
56701-	}
56702-}
56703-
56704-/* Returns depth.  Not intended for direct use. */
56705-static inline unsigned
56706-witness_depth_to_rank(witness_list_t *witnesses, witness_rank_t rank_inclusive)
56707-{
56708-	unsigned d = 0;
56709-	witness_t *w = ql_last(witnesses, link);
56710-
56711-	if (w != NULL) {
56712-		ql_reverse_foreach(w, witnesses, link) {
56713-			if (w->rank < rank_inclusive) {
56714-				break;
56715-			}
56716-			d++;
56717-		}
56718-	}
56719-
56720-	return d;
56721-}
56722-
56723-static inline void
56724-witness_assert_depth_to_rank(witness_tsdn_t *witness_tsdn,
56725-    witness_rank_t rank_inclusive, unsigned depth) {
56726-	if (!config_debug || witness_tsdn_null(witness_tsdn)) {
56727-		return;
56728-	}
56729-
56730-	witness_list_t *witnesses = &witness_tsdn_tsd(witness_tsdn)->witnesses;
56731-	unsigned d = witness_depth_to_rank(witnesses, rank_inclusive);
56732-
56733-	if (d != depth) {
56734-		witness_depth_error(witnesses, rank_inclusive, depth);
56735-	}
56736-}
56737-
56738-static inline void
56739-witness_assert_depth(witness_tsdn_t *witness_tsdn, unsigned depth) {
56740-	witness_assert_depth_to_rank(witness_tsdn, WITNESS_RANK_MIN, depth);
56741-}
56742-
56743-static inline void
56744-witness_assert_lockless(witness_tsdn_t *witness_tsdn) {
56745-	witness_assert_depth(witness_tsdn, 0);
56746-}
56747-
56748-static inline void
56749-witness_assert_positive_depth_to_rank(witness_tsdn_t *witness_tsdn,
56750-    witness_rank_t rank_inclusive) {
56751-	if (!config_debug || witness_tsdn_null(witness_tsdn)) {
56752-		return;
56753-	}
56754-
56755-	witness_list_t *witnesses = &witness_tsdn_tsd(witness_tsdn)->witnesses;
56756-	unsigned d = witness_depth_to_rank(witnesses, rank_inclusive);
56757-
56758-	if (d == 0) {
56759-		witness_depth_error(witnesses, rank_inclusive, 1);
56760-	}
56761-}
56762-
56763-static inline void
56764-witness_lock(witness_tsdn_t *witness_tsdn, witness_t *witness) {
56765-	witness_tsd_t *witness_tsd;
56766-	witness_list_t *witnesses;
56767-	witness_t *w;
56768-
56769-	if (!config_debug) {
56770-		return;
56771-	}
56772-
56773-	if (witness_tsdn_null(witness_tsdn)) {
56774-		return;
56775-	}
56776-	witness_tsd = witness_tsdn_tsd(witness_tsdn);
56777-	if (witness->rank == WITNESS_RANK_OMIT) {
56778-		return;
56779-	}
56780-
56781-	witness_assert_not_owner(witness_tsdn, witness);
56782-
56783-	witnesses = &witness_tsd->witnesses;
56784-	w = ql_last(witnesses, link);
56785-	if (w == NULL) {
56786-		/* No other locks; do nothing. */
56787-	} else if (witness_tsd->forking && w->rank <= witness->rank) {
56788-		/* Forking, and relaxed ranking satisfied. */
56789-	} else if (w->rank > witness->rank) {
56790-		/* Not forking, rank order reversal. */
56791-		witness_lock_error(witnesses, witness);
56792-	} else if (w->rank == witness->rank && (w->comp == NULL || w->comp !=
56793-	    witness->comp || w->comp(w, w->opaque, witness, witness->opaque) >
56794-	    0)) {
56795-		/*
56796-		 * Missing/incompatible comparison function, or comparison
56797-		 * function indicates rank order reversal.
56798-		 */
56799-		witness_lock_error(witnesses, witness);
56800-	}
56801-
56802-	ql_elm_new(witness, link);
56803-	ql_tail_insert(witnesses, witness, link);
56804-}
56805-
56806-static inline void
56807-witness_unlock(witness_tsdn_t *witness_tsdn, witness_t *witness) {
56808-	witness_tsd_t *witness_tsd;
56809-	witness_list_t *witnesses;
56810-
56811-	if (!config_debug) {
56812-		return;
56813-	}
56814-
56815-	if (witness_tsdn_null(witness_tsdn)) {
56816-		return;
56817-	}
56818-	witness_tsd = witness_tsdn_tsd(witness_tsdn);
56819-	if (witness->rank == WITNESS_RANK_OMIT) {
56820-		return;
56821-	}
56822-
56823-	/*
56824-	 * Check whether owner before removal, rather than relying on
56825-	 * witness_assert_owner() to abort, so that unit tests can test this
56826-	 * function's failure mode without causing undefined behavior.
56827-	 */
56828-	if (witness_owner(witness_tsd, witness)) {
56829-		witnesses = &witness_tsd->witnesses;
56830-		ql_remove(witnesses, witness, link);
56831-	} else {
56832-		witness_assert_owner(witness_tsdn, witness);
56833-	}
56834-}
56835-
56836-#endif /* JEMALLOC_INTERNAL_WITNESS_H */
56837diff --git a/jemalloc/include/jemalloc/jemalloc.sh b/jemalloc/include/jemalloc/jemalloc.sh
56838deleted file mode 100755
56839index b19b154..0000000
56840--- a/jemalloc/include/jemalloc/jemalloc.sh
56841+++ /dev/null
56842@@ -1,27 +0,0 @@
56843-#!/bin/sh
56844-
56845-objroot=$1
56846-
56847-cat <<EOF
56848-#ifndef JEMALLOC_H_
56849-#define JEMALLOC_H_
56850-#ifdef __cplusplus
56851-extern "C" {
56852-#endif
56853-
56854-EOF
56855-
56856-for hdr in jemalloc_defs.h jemalloc_rename.h jemalloc_macros.h \
56857-           jemalloc_protos.h jemalloc_typedefs.h jemalloc_mangle.h ; do
56858-  cat "${objroot}include/jemalloc/${hdr}" \
56859-      | grep -v 'Generated from .* by configure\.' \
56860-      | sed -e 's/ $//g'
56861-  echo
56862-done
56863-
56864-cat <<EOF
56865-#ifdef __cplusplus
56866-}
56867-#endif
56868-#endif /* JEMALLOC_H_ */
56869-EOF
56870diff --git a/jemalloc/include/jemalloc/jemalloc_defs.h.in b/jemalloc/include/jemalloc/jemalloc_defs.h.in
56871deleted file mode 100644
56872index cbe2fca..0000000
56873--- a/jemalloc/include/jemalloc/jemalloc_defs.h.in
56874+++ /dev/null
56875@@ -1,54 +0,0 @@
56876-/* Defined if __attribute__((...)) syntax is supported. */
56877-#undef JEMALLOC_HAVE_ATTR
56878-
56879-/* Defined if alloc_size attribute is supported. */
56880-#undef JEMALLOC_HAVE_ATTR_ALLOC_SIZE
56881-
56882-/* Defined if format_arg(...) attribute is supported. */
56883-#undef JEMALLOC_HAVE_ATTR_FORMAT_ARG
56884-
56885-/* Defined if format(gnu_printf, ...) attribute is supported. */
56886-#undef JEMALLOC_HAVE_ATTR_FORMAT_GNU_PRINTF
56887-
56888-/* Defined if format(printf, ...) attribute is supported. */
56889-#undef JEMALLOC_HAVE_ATTR_FORMAT_PRINTF
56890-
56891-/* Defined if fallthrough attribute is supported. */
56892-#undef JEMALLOC_HAVE_ATTR_FALLTHROUGH
56893-
56894-/* Defined if cold attribute is supported. */
56895-#undef JEMALLOC_HAVE_ATTR_COLD
56896-
56897-/*
56898- * Define overrides for non-standard allocator-related functions if they are
56899- * present on the system.
56900- */
56901-#undef JEMALLOC_OVERRIDE_MEMALIGN
56902-#undef JEMALLOC_OVERRIDE_VALLOC
56903-
56904-/*
56905- * At least Linux omits the "const" in:
56906- *
56907- *   size_t malloc_usable_size(const void *ptr);
56908- *
56909- * Match the operating system's prototype.
56910- */
56911-#undef JEMALLOC_USABLE_SIZE_CONST
56912-
56913-/*
56914- * If defined, specify throw() for the public function prototypes when compiling
56915- * with C++.  The only justification for this is to match the prototypes that
56916- * glibc defines.
56917- */
56918-#undef JEMALLOC_USE_CXX_THROW
56919-
56920-#ifdef _MSC_VER
56921-#  ifdef _WIN64
56922-#    define LG_SIZEOF_PTR_WIN 3
56923-#  else
56924-#    define LG_SIZEOF_PTR_WIN 2
56925-#  endif
56926-#endif
56927-
56928-/* sizeof(void *) == 2^LG_SIZEOF_PTR. */
56929-#undef LG_SIZEOF_PTR
56930diff --git a/jemalloc/include/jemalloc/jemalloc_macros.h.in b/jemalloc/include/jemalloc/jemalloc_macros.h.in
56931deleted file mode 100644
56932index ebb3137..0000000
56933--- a/jemalloc/include/jemalloc/jemalloc_macros.h.in
56934+++ /dev/null
56935@@ -1,149 +0,0 @@
56936-#include <stdlib.h>
56937-#include <stdbool.h>
56938-#include <stdint.h>
56939-#include <limits.h>
56940-#include <strings.h>
56941-
56942-#define JEMALLOC_VERSION "@jemalloc_version@"
56943-#define JEMALLOC_VERSION_MAJOR @jemalloc_version_major@
56944-#define JEMALLOC_VERSION_MINOR @jemalloc_version_minor@
56945-#define JEMALLOC_VERSION_BUGFIX @jemalloc_version_bugfix@
56946-#define JEMALLOC_VERSION_NREV @jemalloc_version_nrev@
56947-#define JEMALLOC_VERSION_GID "@jemalloc_version_gid@"
56948-#define JEMALLOC_VERSION_GID_IDENT @jemalloc_version_gid@
56949-
56950-#define MALLOCX_LG_ALIGN(la)	((int)(la))
56951-#if LG_SIZEOF_PTR == 2
56952-#  define MALLOCX_ALIGN(a)	((int)(ffs((int)(a))-1))
56953-#else
56954-#  define MALLOCX_ALIGN(a)						\
56955-     ((int)(((size_t)(a) < (size_t)INT_MAX) ? ffs((int)(a))-1 :	\
56956-     ffs((int)(((size_t)(a))>>32))+31))
56957-#endif
56958-#define MALLOCX_ZERO	((int)0x40)
56959-/*
56960- * Bias tcache index bits so that 0 encodes "automatic tcache management", and 1
56961- * encodes MALLOCX_TCACHE_NONE.
56962- */
56963-#define MALLOCX_TCACHE(tc)	((int)(((tc)+2) << 8))
56964-#define MALLOCX_TCACHE_NONE	MALLOCX_TCACHE(-1)
56965-/*
56966- * Bias arena index bits so that 0 encodes "use an automatically chosen arena".
56967- */
56968-#define MALLOCX_ARENA(a)	((((int)(a))+1) << 20)
56969-
56970-/*
56971- * Use as arena index in "arena.<i>.{purge,decay,dss}" and
56972- * "stats.arenas.<i>.*" mallctl interfaces to select all arenas.  This
56973- * definition is intentionally specified in raw decimal format to support
56974- * cpp-based string concatenation, e.g.
56975- *
56976- *   #define STRINGIFY_HELPER(x) #x
56977- *   #define STRINGIFY(x) STRINGIFY_HELPER(x)
56978- *
56979- *   mallctl("arena." STRINGIFY(MALLCTL_ARENAS_ALL) ".purge", NULL, NULL, NULL,
56980- *       0);
56981- */
56982-#define MALLCTL_ARENAS_ALL	4096
56983-/*
56984- * Use as arena index in "stats.arenas.<i>.*" mallctl interfaces to select
56985- * destroyed arenas.
56986- */
56987-#define MALLCTL_ARENAS_DESTROYED	4097
56988-
56989-#if defined(__cplusplus) && defined(JEMALLOC_USE_CXX_THROW)
56990-#  define JEMALLOC_CXX_THROW throw()
56991-#else
56992-#  define JEMALLOC_CXX_THROW
56993-#endif
56994-
56995-#if defined(_MSC_VER)
56996-#  define JEMALLOC_ATTR(s)
56997-#  define JEMALLOC_ALIGNED(s) __declspec(align(s))
56998-#  define JEMALLOC_ALLOC_SIZE(s)
56999-#  define JEMALLOC_ALLOC_SIZE2(s1, s2)
57000-#  ifndef JEMALLOC_EXPORT
57001-#    ifdef DLLEXPORT
57002-#      define JEMALLOC_EXPORT __declspec(dllexport)
57003-#    else
57004-#      define JEMALLOC_EXPORT __declspec(dllimport)
57005-#    endif
57006-#  endif
57007-#  define JEMALLOC_FORMAT_ARG(i)
57008-#  define JEMALLOC_FORMAT_PRINTF(s, i)
57009-#  define JEMALLOC_FALLTHROUGH
57010-#  define JEMALLOC_NOINLINE __declspec(noinline)
57011-#  ifdef __cplusplus
57012-#    define JEMALLOC_NOTHROW __declspec(nothrow)
57013-#  else
57014-#    define JEMALLOC_NOTHROW
57015-#  endif
57016-#  define JEMALLOC_SECTION(s) __declspec(allocate(s))
57017-#  define JEMALLOC_RESTRICT_RETURN __declspec(restrict)
57018-#  if _MSC_VER >= 1900 && !defined(__EDG__)
57019-#    define JEMALLOC_ALLOCATOR __declspec(allocator)
57020-#  else
57021-#    define JEMALLOC_ALLOCATOR
57022-#  endif
57023-#  define JEMALLOC_COLD
57024-#elif defined(JEMALLOC_HAVE_ATTR)
57025-#  define JEMALLOC_ATTR(s) __attribute__((s))
57026-#  define JEMALLOC_ALIGNED(s) JEMALLOC_ATTR(aligned(s))
57027-#  ifdef JEMALLOC_HAVE_ATTR_ALLOC_SIZE
57028-#    define JEMALLOC_ALLOC_SIZE(s) JEMALLOC_ATTR(alloc_size(s))
57029-#    define JEMALLOC_ALLOC_SIZE2(s1, s2) JEMALLOC_ATTR(alloc_size(s1, s2))
57030-#  else
57031-#    define JEMALLOC_ALLOC_SIZE(s)
57032-#    define JEMALLOC_ALLOC_SIZE2(s1, s2)
57033-#  endif
57034-#  ifndef JEMALLOC_EXPORT
57035-#    define JEMALLOC_EXPORT JEMALLOC_ATTR(visibility("default"))
57036-#  endif
57037-#  ifdef JEMALLOC_HAVE_ATTR_FORMAT_ARG
57038-#    define JEMALLOC_FORMAT_ARG(i) JEMALLOC_ATTR(__format_arg__(3))
57039-#  else
57040-#    define JEMALLOC_FORMAT_ARG(i)
57041-#  endif
57042-#  ifdef JEMALLOC_HAVE_ATTR_FORMAT_GNU_PRINTF
57043-#    define JEMALLOC_FORMAT_PRINTF(s, i) JEMALLOC_ATTR(format(gnu_printf, s, i))
57044-#  elif defined(JEMALLOC_HAVE_ATTR_FORMAT_PRINTF)
57045-#    define JEMALLOC_FORMAT_PRINTF(s, i) JEMALLOC_ATTR(format(printf, s, i))
57046-#  else
57047-#    define JEMALLOC_FORMAT_PRINTF(s, i)
57048-#  endif
57049-#  ifdef JEMALLOC_HAVE_ATTR_FALLTHROUGH
57050-#    define JEMALLOC_FALLTHROUGH JEMALLOC_ATTR(fallthrough)
57051-#  else
57052-#    define JEMALLOC_FALLTHROUGH
57053-#  endif
57054-#  define JEMALLOC_NOINLINE JEMALLOC_ATTR(noinline)
57055-#  define JEMALLOC_NOTHROW JEMALLOC_ATTR(nothrow)
57056-#  define JEMALLOC_SECTION(s) JEMALLOC_ATTR(section(s))
57057-#  define JEMALLOC_RESTRICT_RETURN
57058-#  define JEMALLOC_ALLOCATOR
57059-#  ifdef JEMALLOC_HAVE_ATTR_COLD
57060-#    define JEMALLOC_COLD JEMALLOC_ATTR(__cold__)
57061-#  else
57062-#    define JEMALLOC_COLD
57063-#  endif
57064-#else
57065-#  define JEMALLOC_ATTR(s)
57066-#  define JEMALLOC_ALIGNED(s)
57067-#  define JEMALLOC_ALLOC_SIZE(s)
57068-#  define JEMALLOC_ALLOC_SIZE2(s1, s2)
57069-#  define JEMALLOC_EXPORT
57070-#  define JEMALLOC_FORMAT_PRINTF(s, i)
57071-#  define JEMALLOC_FALLTHROUGH
57072-#  define JEMALLOC_NOINLINE
57073-#  define JEMALLOC_NOTHROW
57074-#  define JEMALLOC_SECTION(s)
57075-#  define JEMALLOC_RESTRICT_RETURN
57076-#  define JEMALLOC_ALLOCATOR
57077-#  define JEMALLOC_COLD
57078-#endif
57079-
57080-#if (defined(__APPLE__) || defined(__FreeBSD__)) && !defined(JEMALLOC_NO_RENAME)
57081-#  define JEMALLOC_SYS_NOTHROW
57082-#else
57083-#  define JEMALLOC_SYS_NOTHROW JEMALLOC_NOTHROW
57084-#endif
57085diff --git a/jemalloc/include/jemalloc/jemalloc_mangle.sh b/jemalloc/include/jemalloc/jemalloc_mangle.sh
57086deleted file mode 100755
57087index c675bb4..0000000
57088--- a/jemalloc/include/jemalloc/jemalloc_mangle.sh
57089+++ /dev/null
57090@@ -1,45 +0,0 @@
57091-#!/bin/sh -eu
57092-
57093-public_symbols_txt=$1
57094-symbol_prefix=$2
57095-
57096-cat <<EOF
57097-/*
57098- * By default application code must explicitly refer to mangled symbol names,
57099- * so that it is possible to use jemalloc in conjunction with another allocator
57100- * in the same application.  Define JEMALLOC_MANGLE in order to cause automatic
57101- * name mangling that matches the API prefixing that happened as a result of
57102- * --with-mangling and/or --with-jemalloc-prefix configuration settings.
57103- */
57104-#ifdef JEMALLOC_MANGLE
57105-#  ifndef JEMALLOC_NO_DEMANGLE
57106-#    define JEMALLOC_NO_DEMANGLE
57107-#  endif
57108-EOF
57109-
57110-for nm in `cat ${public_symbols_txt}` ; do
57111-  n=`echo ${nm} |tr ':' ' ' |awk '{print $1}'`
57112-  echo "#  define ${n} ${symbol_prefix}${n}"
57113-done
57114-
57115-cat <<EOF
57116-#endif
57117-
57118-/*
57119- * The ${symbol_prefix}* macros can be used as stable alternative names for the
57120- * public jemalloc API if JEMALLOC_NO_DEMANGLE is defined.  This is primarily
57121- * meant for use in jemalloc itself, but it can be used by application code to
57122- * provide isolation from the name mangling specified via --with-mangling
57123- * and/or --with-jemalloc-prefix.
57124- */
57125-#ifndef JEMALLOC_NO_DEMANGLE
57126-EOF
57127-
57128-for nm in `cat ${public_symbols_txt}` ; do
57129-  n=`echo ${nm} |tr ':' ' ' |awk '{print $1}'`
57130-  echo "#  undef ${symbol_prefix}${n}"
57131-done
57132-
57133-cat <<EOF
57134-#endif
57135-EOF
57136diff --git a/jemalloc/include/jemalloc/jemalloc_protos.h.in b/jemalloc/include/jemalloc/jemalloc_protos.h.in
57137deleted file mode 100644
57138index 356221c..0000000
57139--- a/jemalloc/include/jemalloc/jemalloc_protos.h.in
57140+++ /dev/null
57141@@ -1,71 +0,0 @@
57142-/*
57143- * The @je_@ prefix on the following public symbol declarations is an artifact
57144- * of namespace management, and should be omitted in application code unless
57145- * JEMALLOC_NO_DEMANGLE is defined (see jemalloc_mangle@[email protected]).
57146- */
57147-extern JEMALLOC_EXPORT const char	*@je_@malloc_conf;
57148-extern JEMALLOC_EXPORT void		(*@je_@malloc_message)(void *cbopaque,
57149-    const char *s);
57150-
57151-JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
57152-    void JEMALLOC_SYS_NOTHROW	*@je_@malloc(size_t size)
57153-    JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1);
57154-JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
57155-    void JEMALLOC_SYS_NOTHROW	*@je_@calloc(size_t num, size_t size)
57156-    JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE2(1, 2);
57157-JEMALLOC_EXPORT int JEMALLOC_SYS_NOTHROW @je_@posix_memalign(
57158-    void **memptr, size_t alignment, size_t size) JEMALLOC_CXX_THROW
57159-    JEMALLOC_ATTR(nonnull(1));
57160-JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
57161-    void JEMALLOC_SYS_NOTHROW	*@je_@aligned_alloc(size_t alignment,
57162-    size_t size) JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc)
57163-    JEMALLOC_ALLOC_SIZE(2);
57164-JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
57165-    void JEMALLOC_SYS_NOTHROW	*@je_@realloc(void *ptr, size_t size)
57166-    JEMALLOC_CXX_THROW JEMALLOC_ALLOC_SIZE(2);
57167-JEMALLOC_EXPORT void JEMALLOC_SYS_NOTHROW	@je_@free(void *ptr)
57168-    JEMALLOC_CXX_THROW;
57169-
57170-JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
57171-    void JEMALLOC_NOTHROW	*@je_@mallocx(size_t size, int flags)
57172-    JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1);
57173-JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
57174-    void JEMALLOC_NOTHROW	*@je_@rallocx(void *ptr, size_t size,
57175-    int flags) JEMALLOC_ALLOC_SIZE(2);
57176-JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW	@je_@xallocx(void *ptr, size_t size,
57177-    size_t extra, int flags);
57178-JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW	@je_@sallocx(const void *ptr,
57179-    int flags) JEMALLOC_ATTR(pure);
57180-JEMALLOC_EXPORT void JEMALLOC_NOTHROW	@je_@dallocx(void *ptr, int flags);
57181-JEMALLOC_EXPORT void JEMALLOC_NOTHROW	@je_@sdallocx(void *ptr, size_t size,
57182-    int flags);
57183-JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW	@je_@nallocx(size_t size, int flags)
57184-    JEMALLOC_ATTR(pure);
57185-
57186-JEMALLOC_EXPORT int JEMALLOC_NOTHROW	@je_@mallctl(const char *name,
57187-    void *oldp, size_t *oldlenp, void *newp, size_t newlen);
57188-JEMALLOC_EXPORT int JEMALLOC_NOTHROW	@je_@mallctlnametomib(const char *name,
57189-    size_t *mibp, size_t *miblenp);
57190-JEMALLOC_EXPORT int JEMALLOC_NOTHROW	@je_@mallctlbymib(const size_t *mib,
57191-    size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen);
57192-JEMALLOC_EXPORT void JEMALLOC_NOTHROW	@je_@malloc_stats_print(
57193-    void (*write_cb)(void *, const char *), void *@je_@cbopaque,
57194-    const char *opts);
57195-JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW	@je_@malloc_usable_size(
57196-    JEMALLOC_USABLE_SIZE_CONST void *ptr) JEMALLOC_CXX_THROW;
57197-#ifdef JEMALLOC_HAVE_MALLOC_SIZE
57198-JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW	@je_@malloc_size(
57199-    const void *ptr);
57200-#endif
57201-
57202-#ifdef JEMALLOC_OVERRIDE_MEMALIGN
57203-JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
57204-    void JEMALLOC_SYS_NOTHROW	*@je_@memalign(size_t alignment, size_t size)
57205-    JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc);
57206-#endif
57207-
57208-#ifdef JEMALLOC_OVERRIDE_VALLOC
57209-JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
57210-    void JEMALLOC_SYS_NOTHROW	*@je_@valloc(size_t size) JEMALLOC_CXX_THROW
57211-    JEMALLOC_ATTR(malloc);
57212-#endif
57213diff --git a/jemalloc/include/jemalloc/jemalloc_rename.sh b/jemalloc/include/jemalloc/jemalloc_rename.sh
57214deleted file mode 100755
57215index f943891..0000000
57216--- a/jemalloc/include/jemalloc/jemalloc_rename.sh
57217+++ /dev/null
57218@@ -1,22 +0,0 @@
57219-#!/bin/sh
57220-
57221-public_symbols_txt=$1
57222-
57223-cat <<EOF
57224-/*
57225- * Name mangling for public symbols is controlled by --with-mangling and
57226- * --with-jemalloc-prefix.  With default settings the je_ prefix is stripped by
57227- * these macro definitions.
57228- */
57229-#ifndef JEMALLOC_NO_RENAME
57230-EOF
57231-
57232-for nm in `cat ${public_symbols_txt}` ; do
57233-  n=`echo ${nm} |tr ':' ' ' |awk '{print $1}'`
57234-  m=`echo ${nm} |tr ':' ' ' |awk '{print $2}'`
57235-  echo "#  define je_${n} ${m}"
57236-done
57237-
57238-cat <<EOF
57239-#endif
57240-EOF
57241diff --git a/jemalloc/include/jemalloc/jemalloc_typedefs.h.in b/jemalloc/include/jemalloc/jemalloc_typedefs.h.in
57242deleted file mode 100644
57243index 1a58874..0000000
57244--- a/jemalloc/include/jemalloc/jemalloc_typedefs.h.in
57245+++ /dev/null
57246@@ -1,77 +0,0 @@
57247-typedef struct extent_hooks_s extent_hooks_t;
57248-
57249-/*
57250- * void *
57251- * extent_alloc(extent_hooks_t *extent_hooks, void *new_addr, size_t size,
57252- *     size_t alignment, bool *zero, bool *commit, unsigned arena_ind);
57253- */
57254-typedef void *(extent_alloc_t)(extent_hooks_t *, void *, size_t, size_t, bool *,
57255-    bool *, unsigned);
57256-
57257-/*
57258- * bool
57259- * extent_dalloc(extent_hooks_t *extent_hooks, void *addr, size_t size,
57260- *     bool committed, unsigned arena_ind);
57261- */
57262-typedef bool (extent_dalloc_t)(extent_hooks_t *, void *, size_t, bool,
57263-    unsigned);
57264-
57265-/*
57266- * void
57267- * extent_destroy(extent_hooks_t *extent_hooks, void *addr, size_t size,
57268- *     bool committed, unsigned arena_ind);
57269- */
57270-typedef void (extent_destroy_t)(extent_hooks_t *, void *, size_t, bool,
57271-    unsigned);
57272-
57273-/*
57274- * bool
57275- * extent_commit(extent_hooks_t *extent_hooks, void *addr, size_t size,
57276- *     size_t offset, size_t length, unsigned arena_ind);
57277- */
57278-typedef bool (extent_commit_t)(extent_hooks_t *, void *, size_t, size_t, size_t,
57279-    unsigned);
57280-
57281-/*
57282- * bool
57283- * extent_decommit(extent_hooks_t *extent_hooks, void *addr, size_t size,
57284- *     size_t offset, size_t length, unsigned arena_ind);
57285- */
57286-typedef bool (extent_decommit_t)(extent_hooks_t *, void *, size_t, size_t,
57287-    size_t, unsigned);
57288-
57289-/*
57290- * bool
57291- * extent_purge(extent_hooks_t *extent_hooks, void *addr, size_t size,
57292- *     size_t offset, size_t length, unsigned arena_ind);
57293- */
57294-typedef bool (extent_purge_t)(extent_hooks_t *, void *, size_t, size_t, size_t,
57295-    unsigned);
57296-
57297-/*
57298- * bool
57299- * extent_split(extent_hooks_t *extent_hooks, void *addr, size_t size,
57300- *     size_t size_a, size_t size_b, bool committed, unsigned arena_ind);
57301- */
57302-typedef bool (extent_split_t)(extent_hooks_t *, void *, size_t, size_t, size_t,
57303-    bool, unsigned);
57304-
57305-/*
57306- * bool
57307- * extent_merge(extent_hooks_t *extent_hooks, void *addr_a, size_t size_a,
57308- *     void *addr_b, size_t size_b, bool committed, unsigned arena_ind);
57309- */
57310-typedef bool (extent_merge_t)(extent_hooks_t *, void *, size_t, void *, size_t,
57311-    bool, unsigned);
57312-
57313-struct extent_hooks_s {
57314-	extent_alloc_t		*alloc;
57315-	extent_dalloc_t		*dalloc;
57316-	extent_destroy_t	*destroy;
57317-	extent_commit_t		*commit;
57318-	extent_decommit_t	*decommit;
57319-	extent_purge_t		*purge_lazy;
57320-	extent_purge_t		*purge_forced;
57321-	extent_split_t		*split;
57322-	extent_merge_t		*merge;
57323-};
57324diff --git a/jemalloc/include/msvc_compat/C99/stdbool.h b/jemalloc/include/msvc_compat/C99/stdbool.h
57325deleted file mode 100644
57326index d92160e..0000000
57327--- a/jemalloc/include/msvc_compat/C99/stdbool.h
57328+++ /dev/null
57329@@ -1,20 +0,0 @@
57330-#ifndef stdbool_h
57331-#define stdbool_h
57332-
57333-#include <wtypes.h>
57334-
57335-/* MSVC doesn't define _Bool or bool in C, but does have BOOL */
57336-/* Note this doesn't pass autoconf's test because (bool) 0.5 != true */
57337-/* Clang-cl uses MSVC headers, so needs msvc_compat, but has _Bool as
57338- * a built-in type. */
57339-#ifndef __clang__
57340-typedef BOOL _Bool;
57341-#endif
57342-
57343-#define bool _Bool
57344-#define true 1
57345-#define false 0
57346-
57347-#define __bool_true_false_are_defined 1
57348-
57349-#endif /* stdbool_h */
57350diff --git a/jemalloc/include/msvc_compat/C99/stdint.h b/jemalloc/include/msvc_compat/C99/stdint.h
57351deleted file mode 100644
57352index d02608a..0000000
57353--- a/jemalloc/include/msvc_compat/C99/stdint.h
57354+++ /dev/null
57355@@ -1,247 +0,0 @@
57356-// ISO C9x  compliant stdint.h for Microsoft Visual Studio
57357-// Based on ISO/IEC 9899:TC2 Committee draft (May 6, 2005) WG14/N1124
57358-//
57359-//  Copyright (c) 2006-2008 Alexander Chemeris
57360-//
57361-// Redistribution and use in source and binary forms, with or without
57362-// modification, are permitted provided that the following conditions are met:
57363-//
57364-//   1. Redistributions of source code must retain the above copyright notice,
57365-//      this list of conditions and the following disclaimer.
57366-//
57367-//   2. Redistributions in binary form must reproduce the above copyright
57368-//      notice, this list of conditions and the following disclaimer in the
57369-//      documentation and/or other materials provided with the distribution.
57370-//
57371-//   3. The name of the author may be used to endorse or promote products
57372-//      derived from this software without specific prior written permission.
57373-//
57374-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
57375-// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
57376-// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
57377-// EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
57378-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
57379-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
57380-// OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
57381-// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
57382-// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
57383-// ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
57384-//
57385-///////////////////////////////////////////////////////////////////////////////
57386-
57387-#ifndef _MSC_VER // [
57388-#error "Use this header only with Microsoft Visual C++ compilers!"
57389-#endif // _MSC_VER ]
57390-
57391-#ifndef _MSC_STDINT_H_ // [
57392-#define _MSC_STDINT_H_
57393-
57394-#if _MSC_VER > 1000
57395-#pragma once
57396-#endif
57397-
57398-#include <limits.h>
57399-
57400-// For Visual Studio 6 in C++ mode and for many Visual Studio versions when
57401-// compiling for ARM we should wrap <wchar.h> include with 'extern "C++" {}'
57402-// or compiler give many errors like this:
57403-//   error C2733: second C linkage of overloaded function 'wmemchr' not allowed
57404-#ifdef __cplusplus
57405-extern "C" {
57406-#endif
57407-#  include <wchar.h>
57408-#ifdef __cplusplus
57409-}
57410-#endif
57411-
57412-// Define _W64 macros to mark types changing their size, like intptr_t.
57413-#ifndef _W64
57414-#  if !defined(__midl) && (defined(_X86_) || defined(_M_IX86)) && _MSC_VER >= 1300
57415-#     define _W64 __w64
57416-#  else
57417-#     define _W64
57418-#  endif
57419-#endif
57420-
57421-
57422-// 7.18.1 Integer types
57423-
57424-// 7.18.1.1 Exact-width integer types
57425-
57426-// Visual Studio 6 and Embedded Visual C++ 4 doesn't
57427-// realize that, e.g. char has the same size as __int8
57428-// so we give up on __intX for them.
57429-#if (_MSC_VER < 1300)
57430-   typedef signed char       int8_t;
57431-   typedef signed short      int16_t;
57432-   typedef signed int        int32_t;
57433-   typedef unsigned char     uint8_t;
57434-   typedef unsigned short    uint16_t;
57435-   typedef unsigned int      uint32_t;
57436-#else
57437-   typedef signed __int8     int8_t;
57438-   typedef signed __int16    int16_t;
57439-   typedef signed __int32    int32_t;
57440-   typedef unsigned __int8   uint8_t;
57441-   typedef unsigned __int16  uint16_t;
57442-   typedef unsigned __int32  uint32_t;
57443-#endif
57444-typedef signed __int64       int64_t;
57445-typedef unsigned __int64     uint64_t;
57446-
57447-
57448-// 7.18.1.2 Minimum-width integer types
57449-typedef int8_t    int_least8_t;
57450-typedef int16_t   int_least16_t;
57451-typedef int32_t   int_least32_t;
57452-typedef int64_t   int_least64_t;
57453-typedef uint8_t   uint_least8_t;
57454-typedef uint16_t  uint_least16_t;
57455-typedef uint32_t  uint_least32_t;
57456-typedef uint64_t  uint_least64_t;
57457-
57458-// 7.18.1.3 Fastest minimum-width integer types
57459-typedef int8_t    int_fast8_t;
57460-typedef int16_t   int_fast16_t;
57461-typedef int32_t   int_fast32_t;
57462-typedef int64_t   int_fast64_t;
57463-typedef uint8_t   uint_fast8_t;
57464-typedef uint16_t  uint_fast16_t;
57465-typedef uint32_t  uint_fast32_t;
57466-typedef uint64_t  uint_fast64_t;
57467-
57468-// 7.18.1.4 Integer types capable of holding object pointers
57469-#ifdef _WIN64 // [
57470-   typedef signed __int64    intptr_t;
57471-   typedef unsigned __int64  uintptr_t;
57472-#else // _WIN64 ][
57473-   typedef _W64 signed int   intptr_t;
57474-   typedef _W64 unsigned int uintptr_t;
57475-#endif // _WIN64 ]
57476-
57477-// 7.18.1.5 Greatest-width integer types
57478-typedef int64_t   intmax_t;
57479-typedef uint64_t  uintmax_t;
57480-
57481-
57482-// 7.18.2 Limits of specified-width integer types
57483-
57484-#if !defined(__cplusplus) || defined(__STDC_LIMIT_MACROS) // [   See footnote 220 at page 257 and footnote 221 at page 259
57485-
57486-// 7.18.2.1 Limits of exact-width integer types
57487-#define INT8_MIN     ((int8_t)_I8_MIN)
57488-#define INT8_MAX     _I8_MAX
57489-#define INT16_MIN    ((int16_t)_I16_MIN)
57490-#define INT16_MAX    _I16_MAX
57491-#define INT32_MIN    ((int32_t)_I32_MIN)
57492-#define INT32_MAX    _I32_MAX
57493-#define INT64_MIN    ((int64_t)_I64_MIN)
57494-#define INT64_MAX    _I64_MAX
57495-#define UINT8_MAX    _UI8_MAX
57496-#define UINT16_MAX   _UI16_MAX
57497-#define UINT32_MAX   _UI32_MAX
57498-#define UINT64_MAX   _UI64_MAX
57499-
57500-// 7.18.2.2 Limits of minimum-width integer types
57501-#define INT_LEAST8_MIN    INT8_MIN
57502-#define INT_LEAST8_MAX    INT8_MAX
57503-#define INT_LEAST16_MIN   INT16_MIN
57504-#define INT_LEAST16_MAX   INT16_MAX
57505-#define INT_LEAST32_MIN   INT32_MIN
57506-#define INT_LEAST32_MAX   INT32_MAX
57507-#define INT_LEAST64_MIN   INT64_MIN
57508-#define INT_LEAST64_MAX   INT64_MAX
57509-#define UINT_LEAST8_MAX   UINT8_MAX
57510-#define UINT_LEAST16_MAX  UINT16_MAX
57511-#define UINT_LEAST32_MAX  UINT32_MAX
57512-#define UINT_LEAST64_MAX  UINT64_MAX
57513-
57514-// 7.18.2.3 Limits of fastest minimum-width integer types
57515-#define INT_FAST8_MIN    INT8_MIN
57516-#define INT_FAST8_MAX    INT8_MAX
57517-#define INT_FAST16_MIN   INT16_MIN
57518-#define INT_FAST16_MAX   INT16_MAX
57519-#define INT_FAST32_MIN   INT32_MIN
57520-#define INT_FAST32_MAX   INT32_MAX
57521-#define INT_FAST64_MIN   INT64_MIN
57522-#define INT_FAST64_MAX   INT64_MAX
57523-#define UINT_FAST8_MAX   UINT8_MAX
57524-#define UINT_FAST16_MAX  UINT16_MAX
57525-#define UINT_FAST32_MAX  UINT32_MAX
57526-#define UINT_FAST64_MAX  UINT64_MAX
57527-
57528-// 7.18.2.4 Limits of integer types capable of holding object pointers
57529-#ifdef _WIN64 // [
57530-#  define INTPTR_MIN   INT64_MIN
57531-#  define INTPTR_MAX   INT64_MAX
57532-#  define UINTPTR_MAX  UINT64_MAX
57533-#else // _WIN64 ][
57534-#  define INTPTR_MIN   INT32_MIN
57535-#  define INTPTR_MAX   INT32_MAX
57536-#  define UINTPTR_MAX  UINT32_MAX
57537-#endif // _WIN64 ]
57538-
57539-// 7.18.2.5 Limits of greatest-width integer types
57540-#define INTMAX_MIN   INT64_MIN
57541-#define INTMAX_MAX   INT64_MAX
57542-#define UINTMAX_MAX  UINT64_MAX
57543-
57544-// 7.18.3 Limits of other integer types
57545-
57546-#ifdef _WIN64 // [
57547-#  define PTRDIFF_MIN  _I64_MIN
57548-#  define PTRDIFF_MAX  _I64_MAX
57549-#else  // _WIN64 ][
57550-#  define PTRDIFF_MIN  _I32_MIN
57551-#  define PTRDIFF_MAX  _I32_MAX
57552-#endif  // _WIN64 ]
57553-
57554-#define SIG_ATOMIC_MIN  INT_MIN
57555-#define SIG_ATOMIC_MAX  INT_MAX
57556-
57557-#ifndef SIZE_MAX // [
57558-#  ifdef _WIN64 // [
57559-#     define SIZE_MAX  _UI64_MAX
57560-#  else // _WIN64 ][
57561-#     define SIZE_MAX  _UI32_MAX
57562-#  endif // _WIN64 ]
57563-#endif // SIZE_MAX ]
57564-
57565-// WCHAR_MIN and WCHAR_MAX are also defined in <wchar.h>
57566-#ifndef WCHAR_MIN // [
57567-#  define WCHAR_MIN  0
57568-#endif  // WCHAR_MIN ]
57569-#ifndef WCHAR_MAX // [
57570-#  define WCHAR_MAX  _UI16_MAX
57571-#endif  // WCHAR_MAX ]
57572-
57573-#define WINT_MIN  0
57574-#define WINT_MAX  _UI16_MAX
57575-
57576-#endif // __STDC_LIMIT_MACROS ]
57577-
57578-
57579-// 7.18.4 Limits of other integer types
57580-
57581-#if !defined(__cplusplus) || defined(__STDC_CONSTANT_MACROS) // [   See footnote 224 at page 260
57582-
57583-// 7.18.4.1 Macros for minimum-width integer constants
57584-
57585-#define INT8_C(val)  val##i8
57586-#define INT16_C(val) val##i16
57587-#define INT32_C(val) val##i32
57588-#define INT64_C(val) val##i64
57589-
57590-#define UINT8_C(val)  val##ui8
57591-#define UINT16_C(val) val##ui16
57592-#define UINT32_C(val) val##ui32
57593-#define UINT64_C(val) val##ui64
57594-
57595-// 7.18.4.2 Macros for greatest-width integer constants
57596-#define INTMAX_C   INT64_C
57597-#define UINTMAX_C  UINT64_C
57598-
57599-#endif // __STDC_CONSTANT_MACROS ]
57600-
57601-
57602-#endif // _MSC_STDINT_H_ ]
57603diff --git a/jemalloc/include/msvc_compat/strings.h b/jemalloc/include/msvc_compat/strings.h
57604deleted file mode 100644
57605index 996f256..0000000
57606--- a/jemalloc/include/msvc_compat/strings.h
57607+++ /dev/null
57608@@ -1,58 +0,0 @@
57609-#ifndef strings_h
57610-#define strings_h
57611-
57612-/* MSVC doesn't define ffs/ffsl. This dummy strings.h header is provided
57613- * for both */
57614-#ifdef _MSC_VER
57615-#  include <intrin.h>
57616-#  pragma intrinsic(_BitScanForward)
57617-static __forceinline int ffsl(long x) {
57618-	unsigned long i;
57619-
57620-	if (_BitScanForward(&i, x)) {
57621-		return i + 1;
57622-	}
57623-	return 0;
57624-}
57625-
57626-static __forceinline int ffs(int x) {
57627-	return ffsl(x);
57628-}
57629-
57630-#  ifdef  _M_X64
57631-#    pragma intrinsic(_BitScanForward64)
57632-#  endif
57633-
57634-static __forceinline int ffsll(unsigned __int64 x) {
57635-	unsigned long i;
57636-#ifdef  _M_X64
57637-	if (_BitScanForward64(&i, x)) {
57638-		return i + 1;
57639-	}
57640-	return 0;
57641-#else
57642-// Fallback for 32-bit build where 64-bit version not available
57643-// assuming little endian
57644-	union {
57645-		unsigned __int64 ll;
57646-		unsigned   long l[2];
57647-	} s;
57648-
57649-	s.ll = x;
57650-
57651-	if (_BitScanForward(&i, s.l[0])) {
57652-		return i + 1;
57653-	} else if(_BitScanForward(&i, s.l[1])) {
57654-		return i + 33;
57655-	}
57656-	return 0;
57657-#endif
57658-}
57659-
57660-#else
57661-#  define ffsll(x) __builtin_ffsll(x)
57662-#  define ffsl(x) __builtin_ffsl(x)
57663-#  define ffs(x) __builtin_ffs(x)
57664-#endif
57665-
57666-#endif /* strings_h */
57667diff --git a/jemalloc/include/msvc_compat/windows_extra.h b/jemalloc/include/msvc_compat/windows_extra.h
57668deleted file mode 100644
57669index a6ebb93..0000000
57670--- a/jemalloc/include/msvc_compat/windows_extra.h
57671+++ /dev/null
57672@@ -1,6 +0,0 @@
57673-#ifndef MSVC_COMPAT_WINDOWS_EXTRA_H
57674-#define MSVC_COMPAT_WINDOWS_EXTRA_H
57675-
57676-#include <errno.h>
57677-
57678-#endif /* MSVC_COMPAT_WINDOWS_EXTRA_H */
57679diff --git a/jemalloc/jemalloc.pc.in b/jemalloc/jemalloc.pc.in
57680deleted file mode 100644
57681index c428a86..0000000
57682--- a/jemalloc/jemalloc.pc.in
57683+++ /dev/null
57684@@ -1,12 +0,0 @@
57685-prefix=@prefix@
57686-exec_prefix=@exec_prefix@
57687-libdir=@libdir@
57688-includedir=@includedir@
57689-install_suffix=@install_suffix@
57690-
57691-Name: jemalloc
57692-Description: A general purpose malloc(3) implementation that emphasizes fragmentation avoidance and scalable concurrency support.
57693-URL: http://jemalloc.net/
57694-Version: @jemalloc_version_major@.@jemalloc_version_minor@.@jemalloc_version_bugfix@_@jemalloc_version_nrev@
57695-Cflags: -I${includedir}
57696-Libs: -L${libdir} -ljemalloc${install_suffix}
57697diff --git a/jemalloc/m4/ax_cxx_compile_stdcxx.m4 b/jemalloc/m4/ax_cxx_compile_stdcxx.m4
57698deleted file mode 100644
57699index 43087b2..0000000
57700--- a/jemalloc/m4/ax_cxx_compile_stdcxx.m4
57701+++ /dev/null
57702@@ -1,951 +0,0 @@
57703-# ===========================================================================
57704-#  https://www.gnu.org/software/autoconf-archive/ax_cxx_compile_stdcxx.html
57705-# ===========================================================================
57706-#
57707-# SYNOPSIS
57708-#
57709-#   AX_CXX_COMPILE_STDCXX(VERSION, [ext|noext], [mandatory|optional])
57710-#
57711-# DESCRIPTION
57712-#
57713-#   Check for baseline language coverage in the compiler for the specified
57714-#   version of the C++ standard.  If necessary, add switches to CXX and
57715-#   CXXCPP to enable support.  VERSION may be '11' (for the C++11 standard)
57716-#   or '14' (for the C++14 standard).
57717-#
57718-#   The second argument, if specified, indicates whether you insist on an
57719-#   extended mode (e.g. -std=gnu++11) or a strict conformance mode (e.g.
57720-#   -std=c++11).  If neither is specified, you get whatever works, with
57721-#   preference for an extended mode.
57722-#
57723-#   The third argument, if specified 'mandatory' or if left unspecified,
57724-#   indicates that baseline support for the specified C++ standard is
57725-#   required and that the macro should error out if no mode with that
57726-#   support is found.  If specified 'optional', then configuration proceeds
57727-#   regardless, after defining HAVE_CXX${VERSION} if and only if a
57728-#   supporting mode is found.
57729-#
57730-# LICENSE
57731-#
57732-#   Copyright (c) 2008 Benjamin Kosnik <[email protected]>
57733-#   Copyright (c) 2012 Zack Weinberg <[email protected]>
57734-#   Copyright (c) 2013 Roy Stogner <[email protected]>
57735-#   Copyright (c) 2014, 2015 Google Inc.; contributed by Alexey Sokolov <[email protected]>
57736-#   Copyright (c) 2015 Paul Norman <[email protected]>
57737-#   Copyright (c) 2015 Moritz Klammler <[email protected]>
57738-#   Copyright (c) 2016, 2018 Krzesimir Nowak <[email protected]>
57739-#   Copyright (c) 2019 Enji Cooper <[email protected]>
57740-#
57741-#   Copying and distribution of this file, with or without modification, are
57742-#   permitted in any medium without royalty provided the copyright notice
57743-#   and this notice are preserved.  This file is offered as-is, without any
57744-#   warranty.
57745-
57746-#serial 11
57747-
57748-dnl  This macro is based on the code from the AX_CXX_COMPILE_STDCXX_11 macro
57749-dnl  (serial version number 13).
57750-
57751-AC_DEFUN([AX_CXX_COMPILE_STDCXX], [dnl
57752-  m4_if([$1], [11], [ax_cxx_compile_alternatives="11 0x"],
57753-        [$1], [14], [ax_cxx_compile_alternatives="14 1y"],
57754-        [$1], [17], [ax_cxx_compile_alternatives="17 1z"],
57755-        [m4_fatal([invalid first argument `$1' to AX_CXX_COMPILE_STDCXX])])dnl
57756-  m4_if([$2], [], [],
57757-        [$2], [ext], [],
57758-        [$2], [noext], [],
57759-        [m4_fatal([invalid second argument `$2' to AX_CXX_COMPILE_STDCXX])])dnl
57760-  m4_if([$3], [], [ax_cxx_compile_cxx$1_required=true],
57761-        [$3], [mandatory], [ax_cxx_compile_cxx$1_required=true],
57762-        [$3], [optional], [ax_cxx_compile_cxx$1_required=false],
57763-        [m4_fatal([invalid third argument `$3' to AX_CXX_COMPILE_STDCXX])])
57764-  AC_LANG_PUSH([C++])dnl
57765-  ac_success=no
57766-
57767-  m4_if([$2], [noext], [], [dnl
57768-  if test x$ac_success = xno; then
57769-    for alternative in ${ax_cxx_compile_alternatives}; do
57770-      switch="-std=gnu++${alternative}"
57771-      cachevar=AS_TR_SH([ax_cv_cxx_compile_cxx$1_$switch])
57772-      AC_CACHE_CHECK(whether $CXX supports C++$1 features with $switch,
57773-                     $cachevar,
57774-        [ac_save_CXX="$CXX"
57775-         CXX="$CXX $switch"
57776-         AC_COMPILE_IFELSE([AC_LANG_SOURCE([_AX_CXX_COMPILE_STDCXX_testbody_$1])],
57777-          [eval $cachevar=yes],
57778-          [eval $cachevar=no])
57779-         CXX="$ac_save_CXX"])
57780-      if eval test x\$$cachevar = xyes; then
57781-        CXX="$CXX $switch"
57782-        if test -n "$CXXCPP" ; then
57783-          CXXCPP="$CXXCPP $switch"
57784-        fi
57785-        ac_success=yes
57786-        break
57787-      fi
57788-    done
57789-  fi])
57790-
57791-  m4_if([$2], [ext], [], [dnl
57792-  if test x$ac_success = xno; then
57793-    dnl HP's aCC needs +std=c++11 according to:
57794-    dnl http://h21007.www2.hp.com/portal/download/files/unprot/aCxx/PDF_Release_Notes/769149-001.pdf
57795-    dnl Cray's crayCC needs "-h std=c++11"
57796-    for alternative in ${ax_cxx_compile_alternatives}; do
57797-      for switch in -std=c++${alternative} +std=c++${alternative} "-h std=c++${alternative}"; do
57798-        cachevar=AS_TR_SH([ax_cv_cxx_compile_cxx$1_$switch])
57799-        AC_CACHE_CHECK(whether $CXX supports C++$1 features with $switch,
57800-                       $cachevar,
57801-          [ac_save_CXX="$CXX"
57802-           CXX="$CXX $switch"
57803-           AC_COMPILE_IFELSE([AC_LANG_SOURCE([_AX_CXX_COMPILE_STDCXX_testbody_$1])],
57804-            [eval $cachevar=yes],
57805-            [eval $cachevar=no])
57806-           CXX="$ac_save_CXX"])
57807-        if eval test x\$$cachevar = xyes; then
57808-          CXX="$CXX $switch"
57809-          if test -n "$CXXCPP" ; then
57810-            CXXCPP="$CXXCPP $switch"
57811-          fi
57812-          ac_success=yes
57813-          break
57814-        fi
57815-      done
57816-      if test x$ac_success = xyes; then
57817-        break
57818-      fi
57819-    done
57820-  fi])
57821-  AC_LANG_POP([C++])
57822-  if test x$ax_cxx_compile_cxx$1_required = xtrue; then
57823-    if test x$ac_success = xno; then
57824-      AC_MSG_ERROR([*** A compiler with support for C++$1 language features is required.])
57825-    fi
57826-  fi
57827-  if test x$ac_success = xno; then
57828-    HAVE_CXX$1=0
57829-    AC_MSG_NOTICE([No compiler with C++$1 support was found])
57830-  else
57831-    HAVE_CXX$1=1
57832-    AC_DEFINE(HAVE_CXX$1,1,
57833-              [define if the compiler supports basic C++$1 syntax])
57834-  fi
57835-  AC_SUBST(HAVE_CXX$1)
57836-])
57837-
57838-
57839-dnl  Test body for checking C++11 support
57840-
57841-m4_define([_AX_CXX_COMPILE_STDCXX_testbody_11],
57842-  _AX_CXX_COMPILE_STDCXX_testbody_new_in_11
57843-)
57844-
57845-
57846-dnl  Test body for checking C++14 support
57847-
57848-m4_define([_AX_CXX_COMPILE_STDCXX_testbody_14],
57849-  _AX_CXX_COMPILE_STDCXX_testbody_new_in_11
57850-  _AX_CXX_COMPILE_STDCXX_testbody_new_in_14
57851-)
57852-
57853-m4_define([_AX_CXX_COMPILE_STDCXX_testbody_17],
57854-  _AX_CXX_COMPILE_STDCXX_testbody_new_in_11
57855-  _AX_CXX_COMPILE_STDCXX_testbody_new_in_14
57856-  _AX_CXX_COMPILE_STDCXX_testbody_new_in_17
57857-)
57858-
57859-dnl  Tests for new features in C++11
57860-
57861-m4_define([_AX_CXX_COMPILE_STDCXX_testbody_new_in_11], [[
57862-
57863-// If the compiler admits that it is not ready for C++11, why torture it?
57864-// Hopefully, this will speed up the test.
57865-
57866-#ifndef __cplusplus
57867-
57868-#error "This is not a C++ compiler"
57869-
57870-#elif __cplusplus < 201103L
57871-
57872-#error "This is not a C++11 compiler"
57873-
57874-#else
57875-
57876-namespace cxx11
57877-{
57878-
57879-  namespace test_static_assert
57880-  {
57881-
57882-    template <typename T>
57883-    struct check
57884-    {
57885-      static_assert(sizeof(int) <= sizeof(T), "not big enough");
57886-    };
57887-
57888-  }
57889-
57890-  namespace test_final_override
57891-  {
57892-
57893-    struct Base
57894-    {
57895-      virtual ~Base() {}
57896-      virtual void f() {}
57897-    };
57898-
57899-    struct Derived : public Base
57900-    {
57901-      virtual ~Derived() override {}
57902-      virtual void f() override {}
57903-    };
57904-
57905-  }
57906-
57907-  namespace test_double_right_angle_brackets
57908-  {
57909-
57910-    template < typename T >
57911-    struct check {};
57912-
57913-    typedef check<void> single_type;
57914-    typedef check<check<void>> double_type;
57915-    typedef check<check<check<void>>> triple_type;
57916-    typedef check<check<check<check<void>>>> quadruple_type;
57917-
57918-  }
57919-
57920-  namespace test_decltype
57921-  {
57922-
57923-    int
57924-    f()
57925-    {
57926-      int a = 1;
57927-      decltype(a) b = 2;
57928-      return a + b;
57929-    }
57930-
57931-  }
57932-
57933-  namespace test_type_deduction
57934-  {
57935-
57936-    template < typename T1, typename T2 >
57937-    struct is_same
57938-    {
57939-      static const bool value = false;
57940-    };
57941-
57942-    template < typename T >
57943-    struct is_same<T, T>
57944-    {
57945-      static const bool value = true;
57946-    };
57947-
57948-    template < typename T1, typename T2 >
57949-    auto
57950-    add(T1 a1, T2 a2) -> decltype(a1 + a2)
57951-    {
57952-      return a1 + a2;
57953-    }
57954-
57955-    int
57956-    test(const int c, volatile int v)
57957-    {
57958-      static_assert(is_same<int, decltype(0)>::value == true, "");
57959-      static_assert(is_same<int, decltype(c)>::value == false, "");
57960-      static_assert(is_same<int, decltype(v)>::value == false, "");
57961-      auto ac = c;
57962-      auto av = v;
57963-      auto sumi = ac + av + 'x';
57964-      auto sumf = ac + av + 1.0;
57965-      static_assert(is_same<int, decltype(ac)>::value == true, "");
57966-      static_assert(is_same<int, decltype(av)>::value == true, "");
57967-      static_assert(is_same<int, decltype(sumi)>::value == true, "");
57968-      static_assert(is_same<int, decltype(sumf)>::value == false, "");
57969-      static_assert(is_same<int, decltype(add(c, v))>::value == true, "");
57970-      return (sumf > 0.0) ? sumi : add(c, v);
57971-    }
57972-
57973-  }
57974-
57975-  namespace test_noexcept
57976-  {
57977-
57978-    int f() { return 0; }
57979-    int g() noexcept { return 0; }
57980-
57981-    static_assert(noexcept(f()) == false, "");
57982-    static_assert(noexcept(g()) == true, "");
57983-
57984-  }
57985-
57986-  namespace test_constexpr
57987-  {
57988-
57989-    template < typename CharT >
57990-    unsigned long constexpr
57991-    strlen_c_r(const CharT *const s, const unsigned long acc) noexcept
57992-    {
57993-      return *s ? strlen_c_r(s + 1, acc + 1) : acc;
57994-    }
57995-
57996-    template < typename CharT >
57997-    unsigned long constexpr
57998-    strlen_c(const CharT *const s) noexcept
57999-    {
58000-      return strlen_c_r(s, 0UL);
58001-    }
58002-
58003-    static_assert(strlen_c("") == 0UL, "");
58004-    static_assert(strlen_c("1") == 1UL, "");
58005-    static_assert(strlen_c("example") == 7UL, "");
58006-    static_assert(strlen_c("another\0example") == 7UL, "");
58007-
58008-  }
58009-
58010-  namespace test_rvalue_references
58011-  {
58012-
58013-    template < int N >
58014-    struct answer
58015-    {
58016-      static constexpr int value = N;
58017-    };
58018-
58019-    answer<1> f(int&)       { return answer<1>(); }
58020-    answer<2> f(const int&) { return answer<2>(); }
58021-    answer<3> f(int&&)      { return answer<3>(); }
58022-
58023-    void
58024-    test()
58025-    {
58026-      int i = 0;
58027-      const int c = 0;
58028-      static_assert(decltype(f(i))::value == 1, "");
58029-      static_assert(decltype(f(c))::value == 2, "");
58030-      static_assert(decltype(f(0))::value == 3, "");
58031-    }
58032-
58033-  }
58034-
58035-  namespace test_uniform_initialization
58036-  {
58037-
58038-    struct test
58039-    {
58040-      static const int zero {};
58041-      static const int one {1};
58042-    };
58043-
58044-    static_assert(test::zero == 0, "");
58045-    static_assert(test::one == 1, "");
58046-
58047-  }
58048-
58049-  namespace test_lambdas
58050-  {
58051-
58052-    void
58053-    test1()
58054-    {
58055-      auto lambda1 = [](){};
58056-      auto lambda2 = lambda1;
58057-      lambda1();
58058-      lambda2();
58059-    }
58060-
58061-    int
58062-    test2()
58063-    {
58064-      auto a = [](int i, int j){ return i + j; }(1, 2);
58065-      auto b = []() -> int { return '0'; }();
58066-      auto c = [=](){ return a + b; }();
58067-      auto d = [&](){ return c; }();
58068-      auto e = [a, &b](int x) mutable {
58069-        const auto identity = [](int y){ return y; };
58070-        for (auto i = 0; i < a; ++i)
58071-          a += b--;
58072-        return x + identity(a + b);
58073-      }(0);
58074-      return a + b + c + d + e;
58075-    }
58076-
58077-    int
58078-    test3()
58079-    {
58080-      const auto nullary = [](){ return 0; };
58081-      const auto unary = [](int x){ return x; };
58082-      using nullary_t = decltype(nullary);
58083-      using unary_t = decltype(unary);
58084-      const auto higher1st = [](nullary_t f){ return f(); };
58085-      const auto higher2nd = [unary](nullary_t f1){
58086-        return [unary, f1](unary_t f2){ return f2(unary(f1())); };
58087-      };
58088-      return higher1st(nullary) + higher2nd(nullary)(unary);
58089-    }
58090-
58091-  }
58092-
58093-  namespace test_variadic_templates
58094-  {
58095-
58096-    template <int...>
58097-    struct sum;
58098-
58099-    template <int N0, int... N1toN>
58100-    struct sum<N0, N1toN...>
58101-    {
58102-      static constexpr auto value = N0 + sum<N1toN...>::value;
58103-    };
58104-
58105-    template <>
58106-    struct sum<>
58107-    {
58108-      static constexpr auto value = 0;
58109-    };
58110-
58111-    static_assert(sum<>::value == 0, "");
58112-    static_assert(sum<1>::value == 1, "");
58113-    static_assert(sum<23>::value == 23, "");
58114-    static_assert(sum<1, 2>::value == 3, "");
58115-    static_assert(sum<5, 5, 11>::value == 21, "");
58116-    static_assert(sum<2, 3, 5, 7, 11, 13>::value == 41, "");
58117-
58118-  }
58119-
58120-  // http://stackoverflow.com/questions/13728184/template-aliases-and-sfinae
58121-  // Clang 3.1 fails with headers of libstd++ 4.8.3 when using std::function
58122-  // because of this.
58123-  namespace test_template_alias_sfinae
58124-  {
58125-
58126-    struct foo {};
58127-
58128-    template<typename T>
58129-    using member = typename T::member_type;
58130-
58131-    template<typename T>
58132-    void func(...) {}
58133-
58134-    template<typename T>
58135-    void func(member<T>*) {}
58136-
58137-    void test();
58138-
58139-    void test() { func<foo>(0); }
58140-
58141-  }
58142-
58143-}  // namespace cxx11
58144-
58145-#endif  // __cplusplus >= 201103L
58146-
58147-]])
58148-
58149-
58150-dnl  Tests for new features in C++14
58151-
58152-m4_define([_AX_CXX_COMPILE_STDCXX_testbody_new_in_14], [[
58153-
58154-// If the compiler admits that it is not ready for C++14, why torture it?
58155-// Hopefully, this will speed up the test.
58156-
58157-#ifndef __cplusplus
58158-
58159-#error "This is not a C++ compiler"
58160-
58161-#elif __cplusplus < 201402L
58162-
58163-#error "This is not a C++14 compiler"
58164-
58165-#else
58166-
58167-namespace cxx14
58168-{
58169-
58170-  namespace test_polymorphic_lambdas
58171-  {
58172-
58173-    int
58174-    test()
58175-    {
58176-      const auto lambda = [](auto&&... args){
58177-        const auto istiny = [](auto x){
58178-          return (sizeof(x) == 1UL) ? 1 : 0;
58179-        };
58180-        const int aretiny[] = { istiny(args)... };
58181-        return aretiny[0];
58182-      };
58183-      return lambda(1, 1L, 1.0f, '1');
58184-    }
58185-
58186-  }
58187-
58188-  namespace test_binary_literals
58189-  {
58190-
58191-    constexpr auto ivii = 0b0000000000101010;
58192-    static_assert(ivii == 42, "wrong value");
58193-
58194-  }
58195-
58196-  namespace test_generalized_constexpr
58197-  {
58198-
58199-    template < typename CharT >
58200-    constexpr unsigned long
58201-    strlen_c(const CharT *const s) noexcept
58202-    {
58203-      auto length = 0UL;
58204-      for (auto p = s; *p; ++p)
58205-        ++length;
58206-      return length;
58207-    }
58208-
58209-    static_assert(strlen_c("") == 0UL, "");
58210-    static_assert(strlen_c("x") == 1UL, "");
58211-    static_assert(strlen_c("test") == 4UL, "");
58212-    static_assert(strlen_c("another\0test") == 7UL, "");
58213-
58214-  }
58215-
58216-  namespace test_lambda_init_capture
58217-  {
58218-
58219-    int
58220-    test()
58221-    {
58222-      auto x = 0;
58223-      const auto lambda1 = [a = x](int b){ return a + b; };
58224-      const auto lambda2 = [a = lambda1(x)](){ return a; };
58225-      return lambda2();
58226-    }
58227-
58228-  }
58229-
58230-  namespace test_digit_separators
58231-  {
58232-
58233-    constexpr auto ten_million = 100'000'000;
58234-    static_assert(ten_million == 100000000, "");
58235-
58236-  }
58237-
58238-  namespace test_return_type_deduction
58239-  {
58240-
58241-    auto f(int& x) { return x; }
58242-    decltype(auto) g(int& x) { return x; }
58243-
58244-    template < typename T1, typename T2 >
58245-    struct is_same
58246-    {
58247-      static constexpr auto value = false;
58248-    };
58249-
58250-    template < typename T >
58251-    struct is_same<T, T>
58252-    {
58253-      static constexpr auto value = true;
58254-    };
58255-
58256-    int
58257-    test()
58258-    {
58259-      auto x = 0;
58260-      static_assert(is_same<int, decltype(f(x))>::value, "");
58261-      static_assert(is_same<int&, decltype(g(x))>::value, "");
58262-      return x;
58263-    }
58264-
58265-  }
58266-
58267-}  // namespace cxx14
58268-
58269-#endif  // __cplusplus >= 201402L
58270-
58271-]])
58272-
58273-
58274-dnl  Tests for new features in C++17
58275-
58276-m4_define([_AX_CXX_COMPILE_STDCXX_testbody_new_in_17], [[
58277-
58278-// If the compiler admits that it is not ready for C++17, why torture it?
58279-// Hopefully, this will speed up the test.
58280-
58281-#ifndef __cplusplus
58282-
58283-#error "This is not a C++ compiler"
58284-
58285-#elif __cplusplus < 201703L
58286-
58287-#error "This is not a C++17 compiler"
58288-
58289-#else
58290-
58291-#include <initializer_list>
58292-#include <utility>
58293-#include <type_traits>
58294-
58295-namespace cxx17
58296-{
58297-
58298-  namespace test_constexpr_lambdas
58299-  {
58300-
58301-    constexpr int foo = [](){return 42;}();
58302-
58303-  }
58304-
58305-  namespace test::nested_namespace::definitions
58306-  {
58307-
58308-  }
58309-
58310-  namespace test_fold_expression
58311-  {
58312-
58313-    template<typename... Args>
58314-    int multiply(Args... args)
58315-    {
58316-      return (args * ... * 1);
58317-    }
58318-
58319-    template<typename... Args>
58320-    bool all(Args... args)
58321-    {
58322-      return (args && ...);
58323-    }
58324-
58325-  }
58326-
58327-  namespace test_extended_static_assert
58328-  {
58329-
58330-    static_assert (true);
58331-
58332-  }
58333-
58334-  namespace test_auto_brace_init_list
58335-  {
58336-
58337-    auto foo = {5};
58338-    auto bar {5};
58339-
58340-    static_assert(std::is_same<std::initializer_list<int>, decltype(foo)>::value);
58341-    static_assert(std::is_same<int, decltype(bar)>::value);
58342-  }
58343-
58344-  namespace test_typename_in_template_template_parameter
58345-  {
58346-
58347-    template<template<typename> typename X> struct D;
58348-
58349-  }
58350-
58351-  namespace test_fallthrough_nodiscard_maybe_unused_attributes
58352-  {
58353-
58354-    int f1()
58355-    {
58356-      return 42;
58357-    }
58358-
58359-    [[nodiscard]] int f2()
58360-    {
58361-      [[maybe_unused]] auto unused = f1();
58362-
58363-      switch (f1())
58364-      {
58365-      case 17:
58366-        f1();
58367-        [[fallthrough]];
58368-      case 42:
58369-        f1();
58370-      }
58371-      return f1();
58372-    }
58373-
58374-  }
58375-
58376-  namespace test_extended_aggregate_initialization
58377-  {
58378-
58379-    struct base1
58380-    {
58381-      int b1, b2 = 42;
58382-    };
58383-
58384-    struct base2
58385-    {
58386-      base2() {
58387-        b3 = 42;
58388-      }
58389-      int b3;
58390-    };
58391-
58392-    struct derived : base1, base2
58393-    {
58394-        int d;
58395-    };
58396-
58397-    derived d1 {{1, 2}, {}, 4};  // full initialization
58398-    derived d2 {{}, {}, 4};      // value-initialized bases
58399-
58400-  }
58401-
58402-  namespace test_general_range_based_for_loop
58403-  {
58404-
58405-    struct iter
58406-    {
58407-      int i;
58408-
58409-      int& operator* ()
58410-      {
58411-        return i;
58412-      }
58413-
58414-      const int& operator* () const
58415-      {
58416-        return i;
58417-      }
58418-
58419-      iter& operator++()
58420-      {
58421-        ++i;
58422-        return *this;
58423-      }
58424-    };
58425-
58426-    struct sentinel
58427-    {
58428-      int i;
58429-    };
58430-
58431-    bool operator== (const iter& i, const sentinel& s)
58432-    {
58433-      return i.i == s.i;
58434-    }
58435-
58436-    bool operator!= (const iter& i, const sentinel& s)
58437-    {
58438-      return !(i == s);
58439-    }
58440-
58441-    struct range
58442-    {
58443-      iter begin() const
58444-      {
58445-        return {0};
58446-      }
58447-
58448-      sentinel end() const
58449-      {
58450-        return {5};
58451-      }
58452-    };
58453-
58454-    void f()
58455-    {
58456-      range r {};
58457-
58458-      for (auto i : r)
58459-      {
58460-        [[maybe_unused]] auto v = i;
58461-      }
58462-    }
58463-
58464-  }
58465-
58466-  namespace test_lambda_capture_asterisk_this_by_value
58467-  {
58468-
58469-    struct t
58470-    {
58471-      int i;
58472-      int foo()
58473-      {
58474-        return [*this]()
58475-        {
58476-          return i;
58477-        }();
58478-      }
58479-    };
58480-
58481-  }
58482-
58483-  namespace test_enum_class_construction
58484-  {
58485-
58486-    enum class byte : unsigned char
58487-    {};
58488-
58489-    byte foo {42};
58490-
58491-  }
58492-
58493-  namespace test_constexpr_if
58494-  {
58495-
58496-    template <bool cond>
58497-    int f ()
58498-    {
58499-      if constexpr(cond)
58500-      {
58501-        return 13;
58502-      }
58503-      else
58504-      {
58505-        return 42;
58506-      }
58507-    }
58508-
58509-  }
58510-
58511-  namespace test_selection_statement_with_initializer
58512-  {
58513-
58514-    int f()
58515-    {
58516-      return 13;
58517-    }
58518-
58519-    int f2()
58520-    {
58521-      if (auto i = f(); i > 0)
58522-      {
58523-        return 3;
58524-      }
58525-
58526-      switch (auto i = f(); i + 4)
58527-      {
58528-      case 17:
58529-        return 2;
58530-
58531-      default:
58532-        return 1;
58533-      }
58534-    }
58535-
58536-  }
58537-
58538-  namespace test_template_argument_deduction_for_class_templates
58539-  {
58540-
58541-    template <typename T1, typename T2>
58542-    struct pair
58543-    {
58544-      pair (T1 p1, T2 p2)
58545-        : m1 {p1},
58546-          m2 {p2}
58547-      {}
58548-
58549-      T1 m1;
58550-      T2 m2;
58551-    };
58552-
58553-    void f()
58554-    {
58555-      [[maybe_unused]] auto p = pair{13, 42u};
58556-    }
58557-
58558-  }
58559-
58560-  namespace test_non_type_auto_template_parameters
58561-  {
58562-
58563-    template <auto n>
58564-    struct B
58565-    {};
58566-
58567-    B<5> b1;
58568-    B<'a'> b2;
58569-
58570-  }
58571-
58572-  namespace test_structured_bindings
58573-  {
58574-
58575-    int arr[2] = { 1, 2 };
58576-    std::pair<int, int> pr = { 1, 2 };
58577-
58578-    auto f1() -> int(&)[2]
58579-    {
58580-      return arr;
58581-    }
58582-
58583-    auto f2() -> std::pair<int, int>&
58584-    {
58585-      return pr;
58586-    }
58587-
58588-    struct S
58589-    {
58590-      int x1 : 2;
58591-      volatile double y1;
58592-    };
58593-
58594-    S f3()
58595-    {
58596-      return {};
58597-    }
58598-
58599-    auto [ x1, y1 ] = f1();
58600-    auto& [ xr1, yr1 ] = f1();
58601-    auto [ x2, y2 ] = f2();
58602-    auto& [ xr2, yr2 ] = f2();
58603-    const auto [ x3, y3 ] = f3();
58604-
58605-  }
58606-
58607-  namespace test_exception_spec_type_system
58608-  {
58609-
58610-    struct Good {};
58611-    struct Bad {};
58612-
58613-    void g1() noexcept;
58614-    void g2();
58615-
58616-    template<typename T>
58617-    Bad
58618-    f(T*, T*);
58619-
58620-    template<typename T1, typename T2>
58621-    Good
58622-    f(T1*, T2*);
58623-
58624-    static_assert (std::is_same_v<Good, decltype(f(g1, g2))>);
58625-
58626-  }
58627-
58628-  namespace test_inline_variables
58629-  {
58630-
58631-    template<class T> void f(T)
58632-    {}
58633-
58634-    template<class T> inline T g(T)
58635-    {
58636-      return T{};
58637-    }
58638-
58639-    template<> inline void f<>(int)
58640-    {}
58641-
58642-    template<> int g<>(int)
58643-    {
58644-      return 5;
58645-    }
58646-
58647-  }
58648-
58649-}  // namespace cxx17
58650-
58651-#endif  // __cplusplus < 201703L
58652-
58653-]])
58654diff --git a/jemalloc/msvc/ReadMe.txt b/jemalloc/msvc/ReadMe.txt
58655deleted file mode 100644
58656index 633a7d4..0000000
58657--- a/jemalloc/msvc/ReadMe.txt
58658+++ /dev/null
58659@@ -1,23 +0,0 @@
58660-
58661-How to build jemalloc for Windows
58662-=================================
58663-
58664-1. Install Cygwin with at least the following packages:
58665-   * autoconf
58666-   * autogen
58667-   * gawk
58668-   * grep
58669-   * sed
58670-
58671-2. Install Visual Studio 2015 or 2017 with Visual C++
58672-
58673-3. Add Cygwin\bin to the PATH environment variable
58674-
58675-4. Open "x64 Native Tools Command Prompt for VS 2017"
58676-   (note: x86/x64 doesn't matter at this point)
58677-
58678-5. Generate header files:
58679-   sh -c "CC=cl ./autogen.sh"
58680-
58681-6. Now the project can be opened and built in Visual Studio:
58682-   msvc\jemalloc_vc2017.sln
58683diff --git a/jemalloc/msvc/jemalloc_vc2015.sln b/jemalloc/msvc/jemalloc_vc2015.sln
58684deleted file mode 100644
58685index aedd5e5..0000000
58686--- a/jemalloc/msvc/jemalloc_vc2015.sln
58687+++ /dev/null
58688@@ -1,63 +0,0 @@
58689-
58690-Microsoft Visual Studio Solution File, Format Version 12.00
58691-# Visual Studio 14
58692-VisualStudioVersion = 14.0.24720.0
58693-MinimumVisualStudioVersion = 10.0.40219.1
58694-Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "Solution Items", "Solution Items", "{70A99006-6DE9-472B-8F83-4CEE6C616DF3}"
58695-	ProjectSection(SolutionItems) = preProject
58696-		ReadMe.txt = ReadMe.txt
58697-	EndProjectSection
58698-EndProject
58699-Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "jemalloc", "projects\vc2015\jemalloc\jemalloc.vcxproj", "{8D6BB292-9E1C-413D-9F98-4864BDC1514A}"
58700-EndProject
58701-Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "test_threads", "projects\vc2015\test_threads\test_threads.vcxproj", "{09028CFD-4EB7-491D-869C-0708DB97ED44}"
58702-EndProject
58703-Global
58704-	GlobalSection(SolutionConfigurationPlatforms) = preSolution
58705-		Debug|x64 = Debug|x64
58706-		Debug|x86 = Debug|x86
58707-		Debug-static|x64 = Debug-static|x64
58708-		Debug-static|x86 = Debug-static|x86
58709-		Release|x64 = Release|x64
58710-		Release|x86 = Release|x86
58711-		Release-static|x64 = Release-static|x64
58712-		Release-static|x86 = Release-static|x86
58713-	EndGlobalSection
58714-	GlobalSection(ProjectConfigurationPlatforms) = postSolution
58715-		{8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Debug|x64.ActiveCfg = Debug|x64
58716-		{8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Debug|x64.Build.0 = Debug|x64
58717-		{8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Debug|x86.ActiveCfg = Debug|Win32
58718-		{8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Debug|x86.Build.0 = Debug|Win32
58719-		{8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Debug-static|x64.ActiveCfg = Debug-static|x64
58720-		{8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Debug-static|x64.Build.0 = Debug-static|x64
58721-		{8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Debug-static|x86.ActiveCfg = Debug-static|Win32
58722-		{8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Debug-static|x86.Build.0 = Debug-static|Win32
58723-		{8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Release|x64.ActiveCfg = Release|x64
58724-		{8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Release|x64.Build.0 = Release|x64
58725-		{8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Release|x86.ActiveCfg = Release|Win32
58726-		{8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Release|x86.Build.0 = Release|Win32
58727-		{8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Release-static|x64.ActiveCfg = Release-static|x64
58728-		{8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Release-static|x64.Build.0 = Release-static|x64
58729-		{8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Release-static|x86.ActiveCfg = Release-static|Win32
58730-		{8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Release-static|x86.Build.0 = Release-static|Win32
58731-		{09028CFD-4EB7-491D-869C-0708DB97ED44}.Debug|x64.ActiveCfg = Debug|x64
58732-		{09028CFD-4EB7-491D-869C-0708DB97ED44}.Debug|x64.Build.0 = Debug|x64
58733-		{09028CFD-4EB7-491D-869C-0708DB97ED44}.Debug|x86.ActiveCfg = Debug|Win32
58734-		{09028CFD-4EB7-491D-869C-0708DB97ED44}.Debug|x86.Build.0 = Debug|Win32
58735-		{09028CFD-4EB7-491D-869C-0708DB97ED44}.Debug-static|x64.ActiveCfg = Debug-static|x64
58736-		{09028CFD-4EB7-491D-869C-0708DB97ED44}.Debug-static|x64.Build.0 = Debug-static|x64
58737-		{09028CFD-4EB7-491D-869C-0708DB97ED44}.Debug-static|x86.ActiveCfg = Debug-static|Win32
58738-		{09028CFD-4EB7-491D-869C-0708DB97ED44}.Debug-static|x86.Build.0 = Debug-static|Win32
58739-		{09028CFD-4EB7-491D-869C-0708DB97ED44}.Release|x64.ActiveCfg = Release|x64
58740-		{09028CFD-4EB7-491D-869C-0708DB97ED44}.Release|x64.Build.0 = Release|x64
58741-		{09028CFD-4EB7-491D-869C-0708DB97ED44}.Release|x86.ActiveCfg = Release|Win32
58742-		{09028CFD-4EB7-491D-869C-0708DB97ED44}.Release|x86.Build.0 = Release|Win32
58743-		{09028CFD-4EB7-491D-869C-0708DB97ED44}.Release-static|x64.ActiveCfg = Release-static|x64
58744-		{09028CFD-4EB7-491D-869C-0708DB97ED44}.Release-static|x64.Build.0 = Release-static|x64
58745-		{09028CFD-4EB7-491D-869C-0708DB97ED44}.Release-static|x86.ActiveCfg = Release-static|Win32
58746-		{09028CFD-4EB7-491D-869C-0708DB97ED44}.Release-static|x86.Build.0 = Release-static|Win32
58747-	EndGlobalSection
58748-	GlobalSection(SolutionProperties) = preSolution
58749-		HideSolutionNode = FALSE
58750-	EndGlobalSection
58751-EndGlobal
58752diff --git a/jemalloc/msvc/jemalloc_vc2017.sln b/jemalloc/msvc/jemalloc_vc2017.sln
58753deleted file mode 100644
58754index c22fcb4..0000000
58755--- a/jemalloc/msvc/jemalloc_vc2017.sln
58756+++ /dev/null
58757@@ -1,63 +0,0 @@
58758-
58759-Microsoft Visual Studio Solution File, Format Version 12.00
58760-# Visual Studio 14
58761-VisualStudioVersion = 14.0.24720.0
58762-MinimumVisualStudioVersion = 10.0.40219.1
58763-Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "Solution Items", "Solution Items", "{70A99006-6DE9-472B-8F83-4CEE6C616DF3}"
58764-	ProjectSection(SolutionItems) = preProject
58765-		ReadMe.txt = ReadMe.txt
58766-	EndProjectSection
58767-EndProject
58768-Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "jemalloc", "projects\vc2017\jemalloc\jemalloc.vcxproj", "{8D6BB292-9E1C-413D-9F98-4864BDC1514A}"
58769-EndProject
58770-Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "test_threads", "projects\vc2017\test_threads\test_threads.vcxproj", "{09028CFD-4EB7-491D-869C-0708DB97ED44}"
58771-EndProject
58772-Global
58773-	GlobalSection(SolutionConfigurationPlatforms) = preSolution
58774-		Debug|x64 = Debug|x64
58775-		Debug|x86 = Debug|x86
58776-		Debug-static|x64 = Debug-static|x64
58777-		Debug-static|x86 = Debug-static|x86
58778-		Release|x64 = Release|x64
58779-		Release|x86 = Release|x86
58780-		Release-static|x64 = Release-static|x64
58781-		Release-static|x86 = Release-static|x86
58782-	EndGlobalSection
58783-	GlobalSection(ProjectConfigurationPlatforms) = postSolution
58784-		{8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Debug|x64.ActiveCfg = Debug|x64
58785-		{8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Debug|x64.Build.0 = Debug|x64
58786-		{8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Debug|x86.ActiveCfg = Debug|Win32
58787-		{8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Debug|x86.Build.0 = Debug|Win32
58788-		{8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Debug-static|x64.ActiveCfg = Debug-static|x64
58789-		{8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Debug-static|x64.Build.0 = Debug-static|x64
58790-		{8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Debug-static|x86.ActiveCfg = Debug-static|Win32
58791-		{8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Debug-static|x86.Build.0 = Debug-static|Win32
58792-		{8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Release|x64.ActiveCfg = Release|x64
58793-		{8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Release|x64.Build.0 = Release|x64
58794-		{8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Release|x86.ActiveCfg = Release|Win32
58795-		{8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Release|x86.Build.0 = Release|Win32
58796-		{8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Release-static|x64.ActiveCfg = Release-static|x64
58797-		{8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Release-static|x64.Build.0 = Release-static|x64
58798-		{8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Release-static|x86.ActiveCfg = Release-static|Win32
58799-		{8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Release-static|x86.Build.0 = Release-static|Win32
58800-		{09028CFD-4EB7-491D-869C-0708DB97ED44}.Debug|x64.ActiveCfg = Debug|x64
58801-		{09028CFD-4EB7-491D-869C-0708DB97ED44}.Debug|x64.Build.0 = Debug|x64
58802-		{09028CFD-4EB7-491D-869C-0708DB97ED44}.Debug|x86.ActiveCfg = Debug|Win32
58803-		{09028CFD-4EB7-491D-869C-0708DB97ED44}.Debug|x86.Build.0 = Debug|Win32
58804-		{09028CFD-4EB7-491D-869C-0708DB97ED44}.Debug-static|x64.ActiveCfg = Debug-static|x64
58805-		{09028CFD-4EB7-491D-869C-0708DB97ED44}.Debug-static|x64.Build.0 = Debug-static|x64
58806-		{09028CFD-4EB7-491D-869C-0708DB97ED44}.Debug-static|x86.ActiveCfg = Debug-static|Win32
58807-		{09028CFD-4EB7-491D-869C-0708DB97ED44}.Debug-static|x86.Build.0 = Debug-static|Win32
58808-		{09028CFD-4EB7-491D-869C-0708DB97ED44}.Release|x64.ActiveCfg = Release|x64
58809-		{09028CFD-4EB7-491D-869C-0708DB97ED44}.Release|x64.Build.0 = Release|x64
58810-		{09028CFD-4EB7-491D-869C-0708DB97ED44}.Release|x86.ActiveCfg = Release|Win32
58811-		{09028CFD-4EB7-491D-869C-0708DB97ED44}.Release|x86.Build.0 = Release|Win32
58812-		{09028CFD-4EB7-491D-869C-0708DB97ED44}.Release-static|x64.ActiveCfg = Release-static|x64
58813-		{09028CFD-4EB7-491D-869C-0708DB97ED44}.Release-static|x64.Build.0 = Release-static|x64
58814-		{09028CFD-4EB7-491D-869C-0708DB97ED44}.Release-static|x86.ActiveCfg = Release-static|Win32
58815-		{09028CFD-4EB7-491D-869C-0708DB97ED44}.Release-static|x86.Build.0 = Release-static|Win32
58816-	EndGlobalSection
58817-	GlobalSection(SolutionProperties) = preSolution
58818-		HideSolutionNode = FALSE
58819-	EndGlobalSection
58820-EndGlobal
58821diff --git a/jemalloc/msvc/projects/vc2015/jemalloc/jemalloc.vcxproj b/jemalloc/msvc/projects/vc2015/jemalloc/jemalloc.vcxproj
58822deleted file mode 100644
58823index ec028a1..0000000
58824--- a/jemalloc/msvc/projects/vc2015/jemalloc/jemalloc.vcxproj
58825+++ /dev/null
58826@@ -1,380 +0,0 @@
58827-<?xml version="1.0" encoding="utf-8"?>
58828-<Project DefaultTargets="Build" ToolsVersion="14.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
58829-  <ItemGroup Label="ProjectConfigurations">
58830-    <ProjectConfiguration Include="Debug-static|Win32">
58831-      <Configuration>Debug-static</Configuration>
58832-      <Platform>Win32</Platform>
58833-    </ProjectConfiguration>
58834-    <ProjectConfiguration Include="Debug-static|x64">
58835-      <Configuration>Debug-static</Configuration>
58836-      <Platform>x64</Platform>
58837-    </ProjectConfiguration>
58838-    <ProjectConfiguration Include="Debug|Win32">
58839-      <Configuration>Debug</Configuration>
58840-      <Platform>Win32</Platform>
58841-    </ProjectConfiguration>
58842-    <ProjectConfiguration Include="Release-static|Win32">
58843-      <Configuration>Release-static</Configuration>
58844-      <Platform>Win32</Platform>
58845-    </ProjectConfiguration>
58846-    <ProjectConfiguration Include="Release-static|x64">
58847-      <Configuration>Release-static</Configuration>
58848-      <Platform>x64</Platform>
58849-    </ProjectConfiguration>
58850-    <ProjectConfiguration Include="Release|Win32">
58851-      <Configuration>Release</Configuration>
58852-      <Platform>Win32</Platform>
58853-    </ProjectConfiguration>
58854-    <ProjectConfiguration Include="Debug|x64">
58855-      <Configuration>Debug</Configuration>
58856-      <Platform>x64</Platform>
58857-    </ProjectConfiguration>
58858-    <ProjectConfiguration Include="Release|x64">
58859-      <Configuration>Release</Configuration>
58860-      <Platform>x64</Platform>
58861-    </ProjectConfiguration>
58862-  </ItemGroup>
58863-  <ItemGroup>
58864-    <ClCompile Include="..\..\..\..\src\arena.c" />
58865-    <ClCompile Include="..\..\..\..\src\background_thread.c" />
58866-    <ClCompile Include="..\..\..\..\src\base.c" />
58867-    <ClCompile Include="..\..\..\..\src\bin.c" />
58868-    <ClCompile Include="..\..\..\..\src\bin_info.c" />
58869-    <ClCompile Include="..\..\..\..\src\bitmap.c" />
58870-    <ClCompile Include="..\..\..\..\src\buf_writer.c" />
58871-    <ClCompile Include="..\..\..\..\src\cache_bin.c" />
58872-    <ClCompile Include="..\..\..\..\src\ckh.c" />
58873-    <ClCompile Include="..\..\..\..\src\counter.c" />
58874-    <ClCompile Include="..\..\..\..\src\ctl.c" />
58875-    <ClCompile Include="..\..\..\..\src\decay.c" />
58876-    <ClCompile Include="..\..\..\..\src\div.c" />
58877-    <ClCompile Include="..\..\..\..\src\ecache.c" />
58878-    <ClCompile Include="..\..\..\..\src\edata.c" />
58879-    <ClCompile Include="..\..\..\..\src\edata_cache.c" />
58880-    <ClCompile Include="..\..\..\..\src\ehooks.c" />
58881-    <ClCompile Include="..\..\..\..\src\emap.c" />
58882-    <ClCompile Include="..\..\..\..\src\eset.c" />
58883-    <ClCompile Include="..\..\..\..\src\exp_grow.c" />
58884-    <ClCompile Include="..\..\..\..\src\extent.c" />
58885-    <ClCompile Include="..\..\..\..\src\extent_dss.c" />
58886-    <ClCompile Include="..\..\..\..\src\extent_mmap.c" />
58887-    <ClCompile Include="..\..\..\..\src\fxp.c" />
58888-    <ClCompile Include="..\..\..\..\src\hook.c" />
58889-    <ClCompile Include="..\..\..\..\src\hpa.c" />
58890-    <ClCompile Include="..\..\..\..\src\hpa_hooks.c" />
58891-    <ClCompile Include="..\..\..\..\src\hpdata.c" />
58892-    <ClCompile Include="..\..\..\..\src\inspect.c" />
58893-    <ClCompile Include="..\..\..\..\src\jemalloc.c" />
58894-    <ClCompile Include="..\..\..\..\src\large.c" />
58895-    <ClCompile Include="..\..\..\..\src\log.c" />
58896-    <ClCompile Include="..\..\..\..\src\malloc_io.c" />
58897-    <ClCompile Include="..\..\..\..\src\mutex.c" />
58898-    <ClCompile Include="..\..\..\..\src\nstime.c" />
58899-    <ClCompile Include="..\..\..\..\src\pa.c" />
58900-    <ClCompile Include="..\..\..\..\src\pa_extra.c" />
58901-    <ClCompile Include="..\..\..\..\src\pai.c" />
58902-    <ClCompile Include="..\..\..\..\src\pac.c" />
58903-    <ClCompile Include="..\..\..\..\src\pages.c" />
58904-    <ClCompile Include="..\..\..\..\src\peak_event.c" />
58905-    <ClCompile Include="..\..\..\..\src\prof.c" />
58906-    <ClCompile Include="..\..\..\..\src\prof_data.c" />
58907-    <ClCompile Include="..\..\..\..\src\prof_log.c" />
58908-    <ClCompile Include="..\..\..\..\src\prof_recent.c" />
58909-    <ClCompile Include="..\..\..\..\src\prof_stats.c" />
58910-    <ClCompile Include="..\..\..\..\src\prof_sys.c" />
58911-    <ClCompile Include="..\..\..\..\src\psset.c" />
58912-    <ClCompile Include="..\..\..\..\src\rtree.c" />
58913-    <ClCompile Include="..\..\..\..\src\safety_check.c" />
58914-    <ClCompile Include="..\..\..\..\src\san.c" />
58915-    <ClCompile Include="..\..\..\..\src\san_bump.c" />
58916-    <ClCompile Include="..\..\..\..\src\sc.c" />
58917-    <ClCompile Include="..\..\..\..\src\sec.c" />
58918-    <ClCompile Include="..\..\..\..\src\stats.c" />
58919-    <ClCompile Include="..\..\..\..\src\sz.c" />
58920-    <ClCompile Include="..\..\..\..\src\tcache.c" />
58921-    <ClCompile Include="..\..\..\..\src\test_hooks.c" />
58922-    <ClCompile Include="..\..\..\..\src\thread_event.c" />
58923-    <ClCompile Include="..\..\..\..\src\ticker.c" />
58924-    <ClCompile Include="..\..\..\..\src\tsd.c" />
58925-    <ClCompile Include="..\..\..\..\src\witness.c" />
58926-  </ItemGroup>
58927-  <PropertyGroup Label="Globals">
58928-    <ProjectGuid>{8D6BB292-9E1C-413D-9F98-4864BDC1514A}</ProjectGuid>
58929-    <Keyword>Win32Proj</Keyword>
58930-    <RootNamespace>jemalloc</RootNamespace>
58931-    <WindowsTargetPlatformVersion>8.1</WindowsTargetPlatformVersion>
58932-  </PropertyGroup>
58933-  <Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />
58934-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="Configuration">
58935-    <ConfigurationType>DynamicLibrary</ConfigurationType>
58936-    <UseDebugLibraries>true</UseDebugLibraries>
58937-    <PlatformToolset>v140</PlatformToolset>
58938-    <CharacterSet>MultiByte</CharacterSet>
58939-  </PropertyGroup>
58940-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|Win32'" Label="Configuration">
58941-    <ConfigurationType>StaticLibrary</ConfigurationType>
58942-    <UseDebugLibraries>true</UseDebugLibraries>
58943-    <PlatformToolset>v140</PlatformToolset>
58944-    <CharacterSet>MultiByte</CharacterSet>
58945-  </PropertyGroup>
58946-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="Configuration">
58947-    <ConfigurationType>DynamicLibrary</ConfigurationType>
58948-    <UseDebugLibraries>false</UseDebugLibraries>
58949-    <PlatformToolset>v140</PlatformToolset>
58950-    <WholeProgramOptimization>true</WholeProgramOptimization>
58951-    <CharacterSet>MultiByte</CharacterSet>
58952-  </PropertyGroup>
58953-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|Win32'" Label="Configuration">
58954-    <ConfigurationType>StaticLibrary</ConfigurationType>
58955-    <UseDebugLibraries>false</UseDebugLibraries>
58956-    <PlatformToolset>v140</PlatformToolset>
58957-    <WholeProgramOptimization>true</WholeProgramOptimization>
58958-    <CharacterSet>MultiByte</CharacterSet>
58959-  </PropertyGroup>
58960-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration">
58961-    <ConfigurationType>DynamicLibrary</ConfigurationType>
58962-    <UseDebugLibraries>true</UseDebugLibraries>
58963-    <PlatformToolset>v140</PlatformToolset>
58964-    <CharacterSet>MultiByte</CharacterSet>
58965-  </PropertyGroup>
58966-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|x64'" Label="Configuration">
58967-    <ConfigurationType>StaticLibrary</ConfigurationType>
58968-    <UseDebugLibraries>true</UseDebugLibraries>
58969-    <PlatformToolset>v140</PlatformToolset>
58970-    <CharacterSet>MultiByte</CharacterSet>
58971-  </PropertyGroup>
58972-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration">
58973-    <ConfigurationType>DynamicLibrary</ConfigurationType>
58974-    <UseDebugLibraries>false</UseDebugLibraries>
58975-    <PlatformToolset>v140</PlatformToolset>
58976-    <WholeProgramOptimization>true</WholeProgramOptimization>
58977-    <CharacterSet>MultiByte</CharacterSet>
58978-  </PropertyGroup>
58979-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|x64'" Label="Configuration">
58980-    <ConfigurationType>StaticLibrary</ConfigurationType>
58981-    <UseDebugLibraries>false</UseDebugLibraries>
58982-    <PlatformToolset>v140</PlatformToolset>
58983-    <WholeProgramOptimization>true</WholeProgramOptimization>
58984-    <CharacterSet>MultiByte</CharacterSet>
58985-  </PropertyGroup>
58986-  <Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
58987-  <ImportGroup Label="ExtensionSettings">
58988-  </ImportGroup>
58989-  <ImportGroup Label="Shared">
58990-  </ImportGroup>
58991-  <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
58992-    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
58993-  </ImportGroup>
58994-  <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|Win32'" Label="PropertySheets">
58995-    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
58996-  </ImportGroup>
58997-  <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
58998-    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
58999-  </ImportGroup>
59000-  <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|Win32'" Label="PropertySheets">
59001-    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
59002-  </ImportGroup>
59003-  <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
59004-    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
59005-  </ImportGroup>
59006-  <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|x64'" Label="PropertySheets">
59007-    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
59008-  </ImportGroup>
59009-  <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
59010-    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
59011-  </ImportGroup>
59012-  <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|x64'" Label="PropertySheets">
59013-    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
59014-  </ImportGroup>
59015-  <PropertyGroup Label="UserMacros" />
59016-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
59017-    <OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
59018-    <IntDir>$(Platform)\$(Configuration)\</IntDir>
59019-    <TargetName>$(ProjectName)d</TargetName>
59020-  </PropertyGroup>
59021-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|Win32'">
59022-    <OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
59023-    <IntDir>$(Platform)\$(Configuration)\</IntDir>
59024-    <TargetName>$(ProjectName)-$(PlatformToolset)-$(Configuration)</TargetName>
59025-  </PropertyGroup>
59026-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
59027-    <OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
59028-    <IntDir>$(Platform)\$(Configuration)\</IntDir>
59029-  </PropertyGroup>
59030-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|Win32'">
59031-    <OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
59032-    <IntDir>$(Platform)\$(Configuration)\</IntDir>
59033-    <TargetName>$(ProjectName)-$(PlatformToolset)-$(Configuration)</TargetName>
59034-  </PropertyGroup>
59035-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
59036-    <OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
59037-    <IntDir>$(Platform)\$(Configuration)\</IntDir>
59038-    <TargetName>$(ProjectName)d</TargetName>
59039-  </PropertyGroup>
59040-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|x64'">
59041-    <OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
59042-    <IntDir>$(Platform)\$(Configuration)\</IntDir>
59043-    <TargetName>$(ProjectName)-vc$(PlatformToolsetVersion)-$(Configuration)</TargetName>
59044-  </PropertyGroup>
59045-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
59046-    <OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
59047-    <IntDir>$(Platform)\$(Configuration)\</IntDir>
59048-  </PropertyGroup>
59049-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|x64'">
59050-    <OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
59051-    <IntDir>$(Platform)\$(Configuration)\</IntDir>
59052-    <TargetName>$(ProjectName)-vc$(PlatformToolsetVersion)-$(Configuration)</TargetName>
59053-  </PropertyGroup>
59054-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
59055-    <ClCompile>
59056-      <PrecompiledHeader>
59057-      </PrecompiledHeader>
59058-      <WarningLevel>Level3</WarningLevel>
59059-      <Optimization>Disabled</Optimization>
59060-      <PreprocessorDefinitions>JEMALLOC_NO_PRIVATE_NAMESPACE;_REENTRANT;_WINDLL;DLLEXPORT;JEMALLOC_DEBUG;_DEBUG;%(PreprocessorDefinitions)</PreprocessorDefinitions>
59061-      <AdditionalIncludeDirectories>..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
59062-      <DisableSpecificWarnings>4090;4146;4267;4334</DisableSpecificWarnings>
59063-      <ProgramDataBaseFileName>$(OutputPath)$(TargetName).pdb</ProgramDataBaseFileName>
59064-    </ClCompile>
59065-    <Link>
59066-      <SubSystem>Windows</SubSystem>
59067-      <GenerateDebugInformation>true</GenerateDebugInformation>
59068-    </Link>
59069-  </ItemDefinitionGroup>
59070-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|Win32'">
59071-    <ClCompile>
59072-      <PrecompiledHeader>
59073-      </PrecompiledHeader>
59074-      <WarningLevel>Level3</WarningLevel>
59075-      <Optimization>Disabled</Optimization>
59076-      <PreprocessorDefinitions>JEMALLOC_NO_PRIVATE_NAMESPACE;JEMALLOC_DEBUG;_REENTRANT;JEMALLOC_EXPORT=;_DEBUG;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
59077-      <AdditionalIncludeDirectories>..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
59078-      <RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary>
59079-      <DisableSpecificWarnings>4090;4146;4267;4334</DisableSpecificWarnings>
59080-      <ProgramDataBaseFileName>$(OutputPath)$(TargetName).pdb</ProgramDataBaseFileName>
59081-    </ClCompile>
59082-    <Link>
59083-      <SubSystem>Windows</SubSystem>
59084-      <GenerateDebugInformation>true</GenerateDebugInformation>
59085-    </Link>
59086-  </ItemDefinitionGroup>
59087-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
59088-    <ClCompile>
59089-      <PrecompiledHeader>
59090-      </PrecompiledHeader>
59091-      <WarningLevel>Level3</WarningLevel>
59092-      <Optimization>Disabled</Optimization>
59093-      <PreprocessorDefinitions>JEMALLOC_NO_PRIVATE_NAMESPACE;_REENTRANT;_WINDLL;DLLEXPORT;JEMALLOC_DEBUG;_DEBUG;%(PreprocessorDefinitions)</PreprocessorDefinitions>
59094-      <AdditionalIncludeDirectories>..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
59095-      <DisableSpecificWarnings>4090;4146;4267;4334</DisableSpecificWarnings>
59096-      <ProgramDataBaseFileName>$(OutputPath)$(TargetName).pdb</ProgramDataBaseFileName>
59097-    </ClCompile>
59098-    <Link>
59099-      <SubSystem>Windows</SubSystem>
59100-      <GenerateDebugInformation>true</GenerateDebugInformation>
59101-    </Link>
59102-  </ItemDefinitionGroup>
59103-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|x64'">
59104-    <ClCompile>
59105-      <PrecompiledHeader>
59106-      </PrecompiledHeader>
59107-      <WarningLevel>Level3</WarningLevel>
59108-      <Optimization>Disabled</Optimization>
59109-      <PreprocessorDefinitions>JEMALLOC_NO_PRIVATE_NAMESPACE;JEMALLOC_DEBUG;_REENTRANT;JEMALLOC_EXPORT=;_DEBUG;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
59110-      <AdditionalIncludeDirectories>..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
59111-      <RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary>
59112-      <DisableSpecificWarnings>4090;4146;4267;4334</DisableSpecificWarnings>
59113-      <DebugInformationFormat>OldStyle</DebugInformationFormat>
59114-      <MinimalRebuild>false</MinimalRebuild>
59115-    </ClCompile>
59116-    <Link>
59117-      <SubSystem>Windows</SubSystem>
59118-      <GenerateDebugInformation>true</GenerateDebugInformation>
59119-    </Link>
59120-  </ItemDefinitionGroup>
59121-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
59122-    <ClCompile>
59123-      <WarningLevel>Level3</WarningLevel>
59124-      <PrecompiledHeader>
59125-      </PrecompiledHeader>
59126-      <Optimization>MaxSpeed</Optimization>
59127-      <FunctionLevelLinking>true</FunctionLevelLinking>
59128-      <IntrinsicFunctions>true</IntrinsicFunctions>
59129-      <PreprocessorDefinitions>JEMALLOC_NO_PRIVATE_NAMESPACE;_REENTRANT;_WINDLL;DLLEXPORT;NDEBUG;%(PreprocessorDefinitions)</PreprocessorDefinitions>
59130-      <AdditionalIncludeDirectories>..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
59131-      <DisableSpecificWarnings>4090;4146;4267;4334</DisableSpecificWarnings>
59132-      <ProgramDataBaseFileName>$(OutputPath)$(TargetName).pdb</ProgramDataBaseFileName>
59133-    </ClCompile>
59134-    <Link>
59135-      <SubSystem>Windows</SubSystem>
59136-      <GenerateDebugInformation>true</GenerateDebugInformation>
59137-      <EnableCOMDATFolding>true</EnableCOMDATFolding>
59138-      <OptimizeReferences>true</OptimizeReferences>
59139-    </Link>
59140-  </ItemDefinitionGroup>
59141-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|Win32'">
59142-    <ClCompile>
59143-      <WarningLevel>Level3</WarningLevel>
59144-      <PrecompiledHeader>
59145-      </PrecompiledHeader>
59146-      <Optimization>MaxSpeed</Optimization>
59147-      <FunctionLevelLinking>true</FunctionLevelLinking>
59148-      <IntrinsicFunctions>true</IntrinsicFunctions>
59149-      <PreprocessorDefinitions>JEMALLOC_NO_PRIVATE_NAMESPACE;_REENTRANT;JEMALLOC_EXPORT=;NDEBUG;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
59150-      <AdditionalIncludeDirectories>..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
59151-      <RuntimeLibrary>MultiThreaded</RuntimeLibrary>
59152-      <DisableSpecificWarnings>4090;4146;4267;4334</DisableSpecificWarnings>
59153-      <ProgramDataBaseFileName>$(OutputPath)$(TargetName).pdb</ProgramDataBaseFileName>
59154-    </ClCompile>
59155-    <Link>
59156-      <SubSystem>Windows</SubSystem>
59157-      <GenerateDebugInformation>true</GenerateDebugInformation>
59158-      <EnableCOMDATFolding>true</EnableCOMDATFolding>
59159-      <OptimizeReferences>true</OptimizeReferences>
59160-    </Link>
59161-  </ItemDefinitionGroup>
59162-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
59163-    <ClCompile>
59164-      <WarningLevel>Level3</WarningLevel>
59165-      <PrecompiledHeader>
59166-      </PrecompiledHeader>
59167-      <Optimization>MaxSpeed</Optimization>
59168-      <FunctionLevelLinking>true</FunctionLevelLinking>
59169-      <IntrinsicFunctions>true</IntrinsicFunctions>
59170-      <AdditionalIncludeDirectories>..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
59171-      <PreprocessorDefinitions>JEMALLOC_NO_PRIVATE_NAMESPACE;_REENTRANT;_WINDLL;DLLEXPORT;NDEBUG;%(PreprocessorDefinitions)</PreprocessorDefinitions>
59172-      <DisableSpecificWarnings>4090;4146;4267;4334</DisableSpecificWarnings>
59173-      <ProgramDataBaseFileName>$(OutputPath)$(TargetName).pdb</ProgramDataBaseFileName>
59174-    </ClCompile>
59175-    <Link>
59176-      <SubSystem>Windows</SubSystem>
59177-      <GenerateDebugInformation>true</GenerateDebugInformation>
59178-      <EnableCOMDATFolding>true</EnableCOMDATFolding>
59179-      <OptimizeReferences>true</OptimizeReferences>
59180-    </Link>
59181-  </ItemDefinitionGroup>
59182-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|x64'">
59183-    <ClCompile>
59184-      <WarningLevel>Level3</WarningLevel>
59185-      <PrecompiledHeader>
59186-      </PrecompiledHeader>
59187-      <Optimization>MaxSpeed</Optimization>
59188-      <FunctionLevelLinking>true</FunctionLevelLinking>
59189-      <IntrinsicFunctions>true</IntrinsicFunctions>
59190-      <PreprocessorDefinitions>JEMALLOC_NO_PRIVATE_NAMESPACE;_REENTRANT;JEMALLOC_EXPORT=;NDEBUG;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
59191-      <AdditionalIncludeDirectories>..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
59192-      <RuntimeLibrary>MultiThreaded</RuntimeLibrary>
59193-      <DisableSpecificWarnings>4090;4146;4267;4334</DisableSpecificWarnings>
59194-      <DebugInformationFormat>OldStyle</DebugInformationFormat>
59195-    </ClCompile>
59196-    <Link>
59197-      <SubSystem>Windows</SubSystem>
59198-      <GenerateDebugInformation>true</GenerateDebugInformation>
59199-      <EnableCOMDATFolding>true</EnableCOMDATFolding>
59200-      <OptimizeReferences>true</OptimizeReferences>
59201-    </Link>
59202-  </ItemDefinitionGroup>
59203-  <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
59204-  <ImportGroup Label="ExtensionTargets">
59205-  </ImportGroup>
59206-</Project>
59207\ No newline at end of file
59208diff --git a/jemalloc/msvc/projects/vc2015/jemalloc/jemalloc.vcxproj.filters b/jemalloc/msvc/projects/vc2015/jemalloc/jemalloc.vcxproj.filters
59209deleted file mode 100644
59210index 1b43e9f..0000000
59211--- a/jemalloc/msvc/projects/vc2015/jemalloc/jemalloc.vcxproj.filters
59212+++ /dev/null
59213@@ -1,197 +0,0 @@
59214-<?xml version="1.0" encoding="utf-8"?>
59215-<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
59216-  <ItemGroup>
59217-    <Filter Include="Source Files">
59218-      <UniqueIdentifier>{4FC737F1-C7A5-4376-A066-2A32D752A2FF}</UniqueIdentifier>
59219-      <Extensions>cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx</Extensions>
59220-    </Filter>
59221-  </ItemGroup>
59222-  <ItemGroup>
59223-    <ClCompile Include="..\..\..\..\src\arena.c">
59224-      <Filter>Source Files</Filter>
59225-    </ClCompile>
59226-    <ClCompile Include="..\..\..\..\src\background_thread.c">
59227-      <Filter>Source Files</Filter>
59228-    </ClCompile>
59229-    <ClCompile Include="..\..\..\..\src\base.c">
59230-      <Filter>Source Files</Filter>
59231-    </ClCompile>
59232-    <ClCompile Include="..\..\..\..\src\bin.c">
59233-      <Filter>Source Files</Filter>
59234-    </ClCompile>
59235-    <ClCompile Include="..\..\..\..\src\bitmap.c">
59236-      <Filter>Source Files</Filter>
59237-    </ClCompile>
59238-    <ClCompile Include="..\..\..\..\src\buf_writer.c">
59239-      <Filter>Source Files</Filter>
59240-    </ClCompile>
59241-    <ClCompile Include="..\..\..\..\src\cache_bin.c">
59242-      <Filter>Source Files</Filter>
59243-    </ClCompile>
59244-    <ClCompile Include="..\..\..\..\src\ckh.c">
59245-      <Filter>Source Files</Filter>
59246-    </ClCompile>
59247-    <ClCompile Include="..\..\..\..\src\counter.c">
59248-      <Filter>Source Files</Filter>
59249-    </ClCompile>
59250-    <ClCompile Include="..\..\..\..\src\ctl.c">
59251-      <Filter>Source Files</Filter>
59252-    </ClCompile>
59253-    <ClCompile Include="..\..\..\..\src\decay.c">
59254-      <Filter>Source Files</Filter>
59255-    </ClCompile>
59256-    <ClCompile Include="..\..\..\..\src\div.c">
59257-      <Filter>Source Files</Filter>
59258-    </ClCompile>
59259-    <ClCompile Include="..\..\..\..\src\emap.c">
59260-      <Filter>Source Files</Filter>
59261-    </ClCompile>
59262-    <ClCompile Include="..\..\..\..\src\exp_grow.c">
59263-      <Filter>Source Files</Filter>
59264-    </ClCompile>
59265-    <ClCompile Include="..\..\..\..\src\extent.c">
59266-      <Filter>Source Files</Filter>
59267-    </ClCompile>
59268-    <ClCompile Include="..\..\..\..\src\extent_dss.c">
59269-      <Filter>Source Files</Filter>
59270-    </ClCompile>
59271-    <ClCompile Include="..\..\..\..\src\extent_mmap.c">
59272-      <Filter>Source Files</Filter>
59273-    </ClCompile>
59274-    <ClCompile Include="..\..\..\..\src\fxp.c">
59275-      <Filter>Source Files</Filter>
59276-    </ClCompile>
59277-    <ClCompile Include="..\..\..\..\src\hook.c">
59278-      <Filter>Source Files</Filter>
59279-    </ClCompile>
59280-    <ClCompile Include="..\..\..\..\src\hpa.c">
59281-      <Filter>Source Files</Filter>
59282-    </ClCompile>
59283-    <ClCompile Include="..\..\..\..\src\hpa_hooks.c">
59284-      <Filter>Source Files</Filter>
59285-    </ClCompile>
59286-    <ClCompile Include="..\..\..\..\src\hpdata.c">
59287-      <Filter>Source Files</Filter>
59288-    </ClCompile>
59289-    <ClCompile Include="..\..\..\..\src\inspect.c">
59290-      <Filter>Source Files</Filter>
59291-    </ClCompile>
59292-    <ClCompile Include="..\..\..\..\src\jemalloc.c">
59293-      <Filter>Source Files</Filter>
59294-    </ClCompile>
59295-    <ClCompile Include="..\..\..\..\src\large.c">
59296-      <Filter>Source Files</Filter>
59297-    </ClCompile>
59298-    <ClCompile Include="..\..\..\..\src\log.c">
59299-      <Filter>Source Files</Filter>
59300-    </ClCompile>
59301-    <ClCompile Include="..\..\..\..\src\malloc_io.c">
59302-      <Filter>Source Files</Filter>
59303-    </ClCompile>
59304-    <ClCompile Include="..\..\..\..\src\mutex.c">
59305-      <Filter>Source Files</Filter>
59306-    </ClCompile>
59307-    <ClCompile Include="..\..\..\..\src\nstime.c">
59308-      <Filter>Source Files</Filter>
59309-    </ClCompile>
59310-    <ClCompile Include="..\..\..\..\src\pa.c">
59311-      <Filter>Source Files</Filter>
59312-    </ClCompile>
59313-    <ClCompile Include="..\..\..\..\src\pa_extra.c">
59314-      <Filter>Source Files</Filter>
59315-    </ClCompile>
59316-    <ClCompile Include="..\..\..\..\src\pai.c">
59317-      <Filter>Source Files</Filter>
59318-    </ClCompile>
59319-    <ClCompile Include="..\..\..\..\src\pac.c">
59320-      <Filter>Source Files</Filter>
59321-    </ClCompile>
59322-    <ClCompile Include="..\..\..\..\src\pages.c">
59323-      <Filter>Source Files</Filter>
59324-    </ClCompile>
59325-    <ClCompile Include="..\..\..\..\src\peak_event.c">
59326-      <Filter>Source Files</Filter>
59327-    </ClCompile>
59328-    <ClCompile Include="..\..\..\..\src\prof.c">
59329-      <Filter>Source Files</Filter>
59330-    </ClCompile>
59331-    <ClCompile Include="..\..\..\..\src\prof_data.c">
59332-      <Filter>Source Files</Filter>
59333-    </ClCompile>
59334-    <ClCompile Include="..\..\..\..\src\prof_log.c">
59335-      <Filter>Source Files</Filter>
59336-    </ClCompile>
59337-    <ClCompile Include="..\..\..\..\src\prof_recent.c">
59338-      <Filter>Source Files</Filter>
59339-    </ClCompile>
59340-    <ClCompile Include="..\..\..\..\src\prof_stats.c">
59341-      <Filter>Source Files</Filter>
59342-    </ClCompile>
59343-    <ClCompile Include="..\..\..\..\src\prof_sys.c">
59344-      <Filter>Source Files</Filter>
59345-    </ClCompile>
59346-    <ClCompile Include="..\..\..\..\src\psset.c">
59347-      <Filter>Source Files</Filter>
59348-    </ClCompile>
59349-    <ClCompile Include="..\..\..\..\src\rtree.c">
59350-      <Filter>Source Files</Filter>
59351-    </ClCompile>
59352-    <ClCompile Include="..\..\..\..\src\safety_check.c">
59353-      <Filter>Source Files</Filter>
59354-    </ClCompile>
59355-    <ClCompile Include="..\..\..\..\src\sc.c">
59356-      <Filter>Source Files</Filter>
59357-    </ClCompile>
59358-    <ClCompile Include="..\..\..\..\src\sec.c">
59359-      <Filter>Source Files</Filter>
59360-    </ClCompile>
59361-    <ClCompile Include="..\..\..\..\src\stats.c">
59362-      <Filter>Source Files</Filter>
59363-    </ClCompile>
59364-    <ClCompile Include="..\..\..\..\src\sz.c">
59365-      <Filter>Source Files</Filter>
59366-    </ClCompile>
59367-    <ClCompile Include="..\..\..\..\src\tcache.c">
59368-      <Filter>Source Files</Filter>
59369-    </ClCompile>
59370-    <ClCompile Include="..\..\..\..\src\test_hooks.c">
59371-      <Filter>Source Files</Filter>
59372-    </ClCompile>
59373-    <ClCompile Include="..\..\..\..\src\thread_event.c">
59374-      <Filter>Source Files</Filter>
59375-    </ClCompile>
59376-    <ClCompile Include="..\..\..\..\src\ticker.c">
59377-      <Filter>Source Files</Filter>
59378-    </ClCompile>
59379-    <ClCompile Include="..\..\..\..\src\tsd.c">
59380-      <Filter>Source Files</Filter>
59381-    </ClCompile>
59382-    <ClCompile Include="..\..\..\..\src\witness.c">
59383-      <Filter>Source Files</Filter>
59384-    </ClCompile>
59385-    <ClCompile Include="..\..\..\..\src\bin_info.c">
59386-      <Filter>Source Files</Filter>
59387-    </ClCompile>
59388-    <ClCompile Include="..\..\..\..\src\ecache.c">
59389-      <Filter>Source Files</Filter>
59390-    </ClCompile>
59391-    <ClCompile Include="..\..\..\..\src\edata.c">
59392-      <Filter>Source Files</Filter>
59393-    </ClCompile>
59394-    <ClCompile Include="..\..\..\..\src\edata_cache.c">
59395-      <Filter>Source Files</Filter>
59396-    </ClCompile>
59397-    <ClCompile Include="..\..\..\..\src\ehooks.c">
59398-      <Filter>Source Files</Filter>
59399-    </ClCompile>
59400-    <ClCompile Include="..\..\..\..\src\eset.c">
59401-      <Filter>Source Files</Filter>
59402-    </ClCompile>
59403-    <ClCompile Include="..\..\..\..\src\san.c">
59404-      <Filter>Source Files</Filter>
59405-    </ClCompile>
59406-    <ClCompile Include="..\..\..\..\src\san_bump.c">
59407-      <Filter>Source Files</Filter>
59408-    </ClCompile>
59409-  </ItemGroup>
59410-</Project>
59411\ No newline at end of file
59412diff --git a/jemalloc/msvc/projects/vc2015/test_threads/test_threads.vcxproj b/jemalloc/msvc/projects/vc2015/test_threads/test_threads.vcxproj
59413deleted file mode 100644
59414index 325876d..0000000
59415--- a/jemalloc/msvc/projects/vc2015/test_threads/test_threads.vcxproj
59416+++ /dev/null
59417@@ -1,327 +0,0 @@
59418-<?xml version="1.0" encoding="utf-8"?>
59419-<Project DefaultTargets="Build" ToolsVersion="14.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
59420-  <ItemGroup Label="ProjectConfigurations">
59421-    <ProjectConfiguration Include="Debug-static|Win32">
59422-      <Configuration>Debug-static</Configuration>
59423-      <Platform>Win32</Platform>
59424-    </ProjectConfiguration>
59425-    <ProjectConfiguration Include="Debug-static|x64">
59426-      <Configuration>Debug-static</Configuration>
59427-      <Platform>x64</Platform>
59428-    </ProjectConfiguration>
59429-    <ProjectConfiguration Include="Debug|Win32">
59430-      <Configuration>Debug</Configuration>
59431-      <Platform>Win32</Platform>
59432-    </ProjectConfiguration>
59433-    <ProjectConfiguration Include="Release-static|Win32">
59434-      <Configuration>Release-static</Configuration>
59435-      <Platform>Win32</Platform>
59436-    </ProjectConfiguration>
59437-    <ProjectConfiguration Include="Release-static|x64">
59438-      <Configuration>Release-static</Configuration>
59439-      <Platform>x64</Platform>
59440-    </ProjectConfiguration>
59441-    <ProjectConfiguration Include="Release|Win32">
59442-      <Configuration>Release</Configuration>
59443-      <Platform>Win32</Platform>
59444-    </ProjectConfiguration>
59445-    <ProjectConfiguration Include="Debug|x64">
59446-      <Configuration>Debug</Configuration>
59447-      <Platform>x64</Platform>
59448-    </ProjectConfiguration>
59449-    <ProjectConfiguration Include="Release|x64">
59450-      <Configuration>Release</Configuration>
59451-      <Platform>x64</Platform>
59452-    </ProjectConfiguration>
59453-  </ItemGroup>
59454-  <PropertyGroup Label="Globals">
59455-    <ProjectGuid>{09028CFD-4EB7-491D-869C-0708DB97ED44}</ProjectGuid>
59456-    <Keyword>Win32Proj</Keyword>
59457-    <RootNamespace>test_threads</RootNamespace>
59458-    <WindowsTargetPlatformVersion>8.1</WindowsTargetPlatformVersion>
59459-  </PropertyGroup>
59460-  <Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />
59461-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="Configuration">
59462-    <ConfigurationType>Application</ConfigurationType>
59463-    <UseDebugLibraries>true</UseDebugLibraries>
59464-    <PlatformToolset>v140</PlatformToolset>
59465-    <CharacterSet>MultiByte</CharacterSet>
59466-  </PropertyGroup>
59467-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|Win32'" Label="Configuration">
59468-    <ConfigurationType>Application</ConfigurationType>
59469-    <UseDebugLibraries>true</UseDebugLibraries>
59470-    <PlatformToolset>v140</PlatformToolset>
59471-    <CharacterSet>MultiByte</CharacterSet>
59472-  </PropertyGroup>
59473-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="Configuration">
59474-    <ConfigurationType>Application</ConfigurationType>
59475-    <UseDebugLibraries>false</UseDebugLibraries>
59476-    <PlatformToolset>v140</PlatformToolset>
59477-    <WholeProgramOptimization>true</WholeProgramOptimization>
59478-    <CharacterSet>MultiByte</CharacterSet>
59479-  </PropertyGroup>
59480-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|Win32'" Label="Configuration">
59481-    <ConfigurationType>Application</ConfigurationType>
59482-    <UseDebugLibraries>false</UseDebugLibraries>
59483-    <PlatformToolset>v140</PlatformToolset>
59484-    <WholeProgramOptimization>true</WholeProgramOptimization>
59485-    <CharacterSet>MultiByte</CharacterSet>
59486-  </PropertyGroup>
59487-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration">
59488-    <ConfigurationType>Application</ConfigurationType>
59489-    <UseDebugLibraries>true</UseDebugLibraries>
59490-    <PlatformToolset>v140</PlatformToolset>
59491-    <CharacterSet>MultiByte</CharacterSet>
59492-  </PropertyGroup>
59493-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|x64'" Label="Configuration">
59494-    <ConfigurationType>Application</ConfigurationType>
59495-    <UseDebugLibraries>true</UseDebugLibraries>
59496-    <PlatformToolset>v140</PlatformToolset>
59497-    <CharacterSet>MultiByte</CharacterSet>
59498-  </PropertyGroup>
59499-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration">
59500-    <ConfigurationType>Application</ConfigurationType>
59501-    <UseDebugLibraries>false</UseDebugLibraries>
59502-    <PlatformToolset>v140</PlatformToolset>
59503-    <WholeProgramOptimization>true</WholeProgramOptimization>
59504-    <CharacterSet>MultiByte</CharacterSet>
59505-  </PropertyGroup>
59506-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|x64'" Label="Configuration">
59507-    <ConfigurationType>Application</ConfigurationType>
59508-    <UseDebugLibraries>false</UseDebugLibraries>
59509-    <PlatformToolset>v140</PlatformToolset>
59510-    <WholeProgramOptimization>true</WholeProgramOptimization>
59511-    <CharacterSet>MultiByte</CharacterSet>
59512-  </PropertyGroup>
59513-  <Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
59514-  <ImportGroup Label="ExtensionSettings">
59515-  </ImportGroup>
59516-  <ImportGroup Label="Shared">
59517-  </ImportGroup>
59518-  <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
59519-    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
59520-  </ImportGroup>
59521-  <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|Win32'" Label="PropertySheets">
59522-    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
59523-  </ImportGroup>
59524-  <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
59525-    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
59526-  </ImportGroup>
59527-  <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|Win32'" Label="PropertySheets">
59528-    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
59529-  </ImportGroup>
59530-  <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
59531-    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
59532-  </ImportGroup>
59533-  <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|x64'" Label="PropertySheets">
59534-    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
59535-  </ImportGroup>
59536-  <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
59537-    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
59538-  </ImportGroup>
59539-  <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|x64'" Label="PropertySheets">
59540-    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
59541-  </ImportGroup>
59542-  <PropertyGroup Label="UserMacros" />
59543-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
59544-    <OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
59545-    <IntDir>$(Platform)\$(Configuration)\</IntDir>
59546-    <LinkIncremental>true</LinkIncremental>
59547-  </PropertyGroup>
59548-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|Win32'">
59549-    <OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
59550-    <IntDir>$(Platform)\$(Configuration)\</IntDir>
59551-    <LinkIncremental>true</LinkIncremental>
59552-  </PropertyGroup>
59553-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
59554-    <LinkIncremental>true</LinkIncremental>
59555-    <OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
59556-  </PropertyGroup>
59557-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|x64'">
59558-    <LinkIncremental>true</LinkIncremental>
59559-    <OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
59560-  </PropertyGroup>
59561-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
59562-    <OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
59563-    <IntDir>$(Platform)\$(Configuration)\</IntDir>
59564-    <LinkIncremental>false</LinkIncremental>
59565-  </PropertyGroup>
59566-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|Win32'">
59567-    <OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
59568-    <IntDir>$(Platform)\$(Configuration)\</IntDir>
59569-    <LinkIncremental>false</LinkIncremental>
59570-  </PropertyGroup>
59571-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
59572-    <OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
59573-    <IntDir>$(Platform)\$(Configuration)\</IntDir>
59574-    <LinkIncremental>false</LinkIncremental>
59575-  </PropertyGroup>
59576-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|x64'">
59577-    <OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
59578-    <IntDir>$(Platform)\$(Configuration)\</IntDir>
59579-    <LinkIncremental>false</LinkIncremental>
59580-  </PropertyGroup>
59581-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
59582-    <ClCompile>
59583-      <PrecompiledHeader>
59584-      </PrecompiledHeader>
59585-      <WarningLevel>Level3</WarningLevel>
59586-      <Optimization>Disabled</Optimization>
59587-      <PreprocessorDefinitions>WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
59588-      <AdditionalIncludeDirectories>..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
59589-    </ClCompile>
59590-    <Link>
59591-      <SubSystem>Console</SubSystem>
59592-      <GenerateDebugInformation>true</GenerateDebugInformation>
59593-      <AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)</AdditionalLibraryDirectories>
59594-      <AdditionalDependencies>jemallocd.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
59595-    </Link>
59596-  </ItemDefinitionGroup>
59597-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|Win32'">
59598-    <ClCompile>
59599-      <PrecompiledHeader>
59600-      </PrecompiledHeader>
59601-      <WarningLevel>Level3</WarningLevel>
59602-      <Optimization>Disabled</Optimization>
59603-      <PreprocessorDefinitions>JEMALLOC_EXPORT=;JEMALLOC_STATIC;_DEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
59604-      <AdditionalIncludeDirectories>..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
59605-      <RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary>
59606-    </ClCompile>
59607-    <Link>
59608-      <SubSystem>Console</SubSystem>
59609-      <GenerateDebugInformation>true</GenerateDebugInformation>
59610-      <AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)</AdditionalLibraryDirectories>
59611-      <AdditionalDependencies>jemalloc-$(PlatformToolset)-$(Configuration).lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
59612-    </Link>
59613-  </ItemDefinitionGroup>
59614-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
59615-    <ClCompile>
59616-      <PrecompiledHeader>
59617-      </PrecompiledHeader>
59618-      <WarningLevel>Level3</WarningLevel>
59619-      <Optimization>Disabled</Optimization>
59620-      <PreprocessorDefinitions>_DEBUG;%(PreprocessorDefinitions)</PreprocessorDefinitions>
59621-      <AdditionalIncludeDirectories>..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
59622-    </ClCompile>
59623-    <Link>
59624-      <SubSystem>Console</SubSystem>
59625-      <GenerateDebugInformation>true</GenerateDebugInformation>
59626-      <AdditionalDependencies>jemallocd.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
59627-      <AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)</AdditionalLibraryDirectories>
59628-    </Link>
59629-  </ItemDefinitionGroup>
59630-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|x64'">
59631-    <ClCompile>
59632-      <PrecompiledHeader>
59633-      </PrecompiledHeader>
59634-      <WarningLevel>Level3</WarningLevel>
59635-      <Optimization>Disabled</Optimization>
59636-      <PreprocessorDefinitions>JEMALLOC_EXPORT=;JEMALLOC_STATIC;_DEBUG;%(PreprocessorDefinitions)</PreprocessorDefinitions>
59637-      <AdditionalIncludeDirectories>..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
59638-      <RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary>
59639-    </ClCompile>
59640-    <Link>
59641-      <SubSystem>Console</SubSystem>
59642-      <GenerateDebugInformation>true</GenerateDebugInformation>
59643-      <AdditionalDependencies>jemalloc-vc$(PlatformToolsetVersion)-$(Configuration).lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
59644-      <AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)</AdditionalLibraryDirectories>
59645-    </Link>
59646-  </ItemDefinitionGroup>
59647-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
59648-    <ClCompile>
59649-      <WarningLevel>Level3</WarningLevel>
59650-      <PrecompiledHeader>
59651-      </PrecompiledHeader>
59652-      <Optimization>MaxSpeed</Optimization>
59653-      <FunctionLevelLinking>true</FunctionLevelLinking>
59654-      <IntrinsicFunctions>true</IntrinsicFunctions>
59655-      <PreprocessorDefinitions>WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
59656-      <AdditionalIncludeDirectories>..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
59657-    </ClCompile>
59658-    <Link>
59659-      <SubSystem>Console</SubSystem>
59660-      <GenerateDebugInformation>true</GenerateDebugInformation>
59661-      <EnableCOMDATFolding>true</EnableCOMDATFolding>
59662-      <OptimizeReferences>true</OptimizeReferences>
59663-      <AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)</AdditionalLibraryDirectories>
59664-      <AdditionalDependencies>jemalloc.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
59665-    </Link>
59666-  </ItemDefinitionGroup>
59667-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|Win32'">
59668-    <ClCompile>
59669-      <WarningLevel>Level3</WarningLevel>
59670-      <PrecompiledHeader>
59671-      </PrecompiledHeader>
59672-      <Optimization>MaxSpeed</Optimization>
59673-      <FunctionLevelLinking>true</FunctionLevelLinking>
59674-      <IntrinsicFunctions>true</IntrinsicFunctions>
59675-      <PreprocessorDefinitions>JEMALLOC_EXPORT=;JEMALLOC_STATIC;NDEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
59676-      <AdditionalIncludeDirectories>..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
59677-      <RuntimeLibrary>MultiThreaded</RuntimeLibrary>
59678-    </ClCompile>
59679-    <Link>
59680-      <SubSystem>Console</SubSystem>
59681-      <GenerateDebugInformation>true</GenerateDebugInformation>
59682-      <EnableCOMDATFolding>true</EnableCOMDATFolding>
59683-      <OptimizeReferences>true</OptimizeReferences>
59684-      <AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)</AdditionalLibraryDirectories>
59685-      <AdditionalDependencies>jemalloc-$(PlatformToolset)-$(Configuration).lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
59686-    </Link>
59687-  </ItemDefinitionGroup>
59688-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
59689-    <ClCompile>
59690-      <WarningLevel>Level3</WarningLevel>
59691-      <PrecompiledHeader>
59692-      </PrecompiledHeader>
59693-      <Optimization>MaxSpeed</Optimization>
59694-      <FunctionLevelLinking>true</FunctionLevelLinking>
59695-      <IntrinsicFunctions>true</IntrinsicFunctions>
59696-      <PreprocessorDefinitions>NDEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
59697-      <AdditionalIncludeDirectories>..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
59698-    </ClCompile>
59699-    <Link>
59700-      <SubSystem>Console</SubSystem>
59701-      <GenerateDebugInformation>true</GenerateDebugInformation>
59702-      <EnableCOMDATFolding>true</EnableCOMDATFolding>
59703-      <OptimizeReferences>true</OptimizeReferences>
59704-      <AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)</AdditionalLibraryDirectories>
59705-      <AdditionalDependencies>jemalloc.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
59706-    </Link>
59707-  </ItemDefinitionGroup>
59708-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|x64'">
59709-    <ClCompile>
59710-      <WarningLevel>Level3</WarningLevel>
59711-      <PrecompiledHeader>
59712-      </PrecompiledHeader>
59713-      <Optimization>MaxSpeed</Optimization>
59714-      <FunctionLevelLinking>true</FunctionLevelLinking>
59715-      <IntrinsicFunctions>true</IntrinsicFunctions>
59716-      <PreprocessorDefinitions>JEMALLOC_EXPORT=;JEMALLOC_STATIC;NDEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
59717-      <AdditionalIncludeDirectories>..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
59718-      <RuntimeLibrary>MultiThreaded</RuntimeLibrary>
59719-    </ClCompile>
59720-    <Link>
59721-      <SubSystem>Console</SubSystem>
59722-      <GenerateDebugInformation>true</GenerateDebugInformation>
59723-      <EnableCOMDATFolding>true</EnableCOMDATFolding>
59724-      <OptimizeReferences>true</OptimizeReferences>
59725-      <AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)</AdditionalLibraryDirectories>
59726-      <AdditionalDependencies>jemalloc-vc$(PlatformToolsetVersion)-$(Configuration).lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
59727-    </Link>
59728-  </ItemDefinitionGroup>
59729-  <ItemGroup>
59730-    <ClCompile Include="..\..\..\test_threads\test_threads.cpp" />
59731-    <ClCompile Include="..\..\..\test_threads\test_threads_main.cpp" />
59732-  </ItemGroup>
59733-  <ItemGroup>
59734-    <ProjectReference Include="..\jemalloc\jemalloc.vcxproj">
59735-      <Project>{8d6bb292-9e1c-413d-9f98-4864bdc1514a}</Project>
59736-    </ProjectReference>
59737-  </ItemGroup>
59738-  <ItemGroup>
59739-    <ClInclude Include="..\..\..\test_threads\test_threads.h" />
59740-  </ItemGroup>
59741-  <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
59742-  <ImportGroup Label="ExtensionTargets">
59743-  </ImportGroup>
59744-</Project>
59745\ No newline at end of file
59746diff --git a/jemalloc/msvc/projects/vc2015/test_threads/test_threads.vcxproj.filters b/jemalloc/msvc/projects/vc2015/test_threads/test_threads.vcxproj.filters
59747deleted file mode 100644
59748index fa4588f..0000000
59749--- a/jemalloc/msvc/projects/vc2015/test_threads/test_threads.vcxproj.filters
59750+++ /dev/null
59751@@ -1,26 +0,0 @@
59752-<?xml version="1.0" encoding="utf-8"?>
59753-<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
59754-  <ItemGroup>
59755-    <Filter Include="Source Files">
59756-      <UniqueIdentifier>{4FC737F1-C7A5-4376-A066-2A32D752A2FF}</UniqueIdentifier>
59757-      <Extensions>cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx</Extensions>
59758-    </Filter>
59759-    <Filter Include="Header Files">
59760-      <UniqueIdentifier>{93995380-89BD-4b04-88EB-625FBE52EBFB}</UniqueIdentifier>
59761-      <Extensions>h;hh;hpp;hxx;hm;inl;inc;xsd</Extensions>
59762-    </Filter>
59763-  </ItemGroup>
59764-  <ItemGroup>
59765-    <ClCompile Include="..\..\..\test_threads\test_threads.cpp">
59766-      <Filter>Source Files</Filter>
59767-    </ClCompile>
59768-    <ClCompile Include="..\..\..\test_threads\test_threads_main.cpp">
59769-      <Filter>Source Files</Filter>
59770-    </ClCompile>
59771-  </ItemGroup>
59772-  <ItemGroup>
59773-    <ClInclude Include="..\..\..\test_threads\test_threads.h">
59774-      <Filter>Header Files</Filter>
59775-    </ClInclude>
59776-  </ItemGroup>
59777-</Project>
59778\ No newline at end of file
59779diff --git a/jemalloc/msvc/projects/vc2017/jemalloc/jemalloc.vcxproj b/jemalloc/msvc/projects/vc2017/jemalloc/jemalloc.vcxproj
59780deleted file mode 100644
59781index a8004db..0000000
59782--- a/jemalloc/msvc/projects/vc2017/jemalloc/jemalloc.vcxproj
59783+++ /dev/null
59784@@ -1,379 +0,0 @@
59785-<?xml version="1.0" encoding="utf-8"?>
59786-<Project DefaultTargets="Build" ToolsVersion="15.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
59787-  <ItemGroup Label="ProjectConfigurations">
59788-    <ProjectConfiguration Include="Debug-static|Win32">
59789-      <Configuration>Debug-static</Configuration>
59790-      <Platform>Win32</Platform>
59791-    </ProjectConfiguration>
59792-    <ProjectConfiguration Include="Debug-static|x64">
59793-      <Configuration>Debug-static</Configuration>
59794-      <Platform>x64</Platform>
59795-    </ProjectConfiguration>
59796-    <ProjectConfiguration Include="Debug|Win32">
59797-      <Configuration>Debug</Configuration>
59798-      <Platform>Win32</Platform>
59799-    </ProjectConfiguration>
59800-    <ProjectConfiguration Include="Release-static|Win32">
59801-      <Configuration>Release-static</Configuration>
59802-      <Platform>Win32</Platform>
59803-    </ProjectConfiguration>
59804-    <ProjectConfiguration Include="Release-static|x64">
59805-      <Configuration>Release-static</Configuration>
59806-      <Platform>x64</Platform>
59807-    </ProjectConfiguration>
59808-    <ProjectConfiguration Include="Release|Win32">
59809-      <Configuration>Release</Configuration>
59810-      <Platform>Win32</Platform>
59811-    </ProjectConfiguration>
59812-    <ProjectConfiguration Include="Debug|x64">
59813-      <Configuration>Debug</Configuration>
59814-      <Platform>x64</Platform>
59815-    </ProjectConfiguration>
59816-    <ProjectConfiguration Include="Release|x64">
59817-      <Configuration>Release</Configuration>
59818-      <Platform>x64</Platform>
59819-    </ProjectConfiguration>
59820-  </ItemGroup>
59821-  <ItemGroup>
59822-    <ClCompile Include="..\..\..\..\src\arena.c" />
59823-    <ClCompile Include="..\..\..\..\src\background_thread.c" />
59824-    <ClCompile Include="..\..\..\..\src\base.c" />
59825-    <ClCompile Include="..\..\..\..\src\bin.c" />
59826-    <ClCompile Include="..\..\..\..\src\bin_info.c" />
59827-    <ClCompile Include="..\..\..\..\src\bitmap.c" />
59828-    <ClCompile Include="..\..\..\..\src\buf_writer.c" />
59829-    <ClCompile Include="..\..\..\..\src\cache_bin.c" />
59830-    <ClCompile Include="..\..\..\..\src\ckh.c" />
59831-    <ClCompile Include="..\..\..\..\src\counter.c" />
59832-    <ClCompile Include="..\..\..\..\src\ctl.c" />
59833-    <ClCompile Include="..\..\..\..\src\decay.c" />
59834-    <ClCompile Include="..\..\..\..\src\div.c" />
59835-    <ClCompile Include="..\..\..\..\src\ecache.c" />
59836-    <ClCompile Include="..\..\..\..\src\edata.c" />
59837-    <ClCompile Include="..\..\..\..\src\edata_cache.c" />
59838-    <ClCompile Include="..\..\..\..\src\ehooks.c" />
59839-    <ClCompile Include="..\..\..\..\src\emap.c" />
59840-    <ClCompile Include="..\..\..\..\src\eset.c" />
59841-    <ClCompile Include="..\..\..\..\src\exp_grow.c" />
59842-    <ClCompile Include="..\..\..\..\src\extent.c" />
59843-    <ClCompile Include="..\..\..\..\src\extent_dss.c" />
59844-    <ClCompile Include="..\..\..\..\src\extent_mmap.c" />
59845-    <ClCompile Include="..\..\..\..\src\fxp.c" />
59846-    <ClCompile Include="..\..\..\..\src\hook.c" />
59847-    <ClCompile Include="..\..\..\..\src\hpa.c" />
59848-    <ClCompile Include="..\..\..\..\src\hpa_hooks.c" />
59849-    <ClCompile Include="..\..\..\..\src\hpdata.c" />
59850-    <ClCompile Include="..\..\..\..\src\inspect.c" />
59851-    <ClCompile Include="..\..\..\..\src\jemalloc.c" />
59852-    <ClCompile Include="..\..\..\..\src\large.c" />
59853-    <ClCompile Include="..\..\..\..\src\log.c" />
59854-    <ClCompile Include="..\..\..\..\src\malloc_io.c" />
59855-    <ClCompile Include="..\..\..\..\src\mutex.c" />
59856-    <ClCompile Include="..\..\..\..\src\nstime.c" />
59857-    <ClCompile Include="..\..\..\..\src\pa.c" />
59858-    <ClCompile Include="..\..\..\..\src\pa_extra.c" />
59859-    <ClCompile Include="..\..\..\..\src\pai.c" />
59860-    <ClCompile Include="..\..\..\..\src\pac.c" />
59861-    <ClCompile Include="..\..\..\..\src\pages.c" />
59862-    <ClCompile Include="..\..\..\..\src\peak_event.c" />
59863-    <ClCompile Include="..\..\..\..\src\prof.c" />
59864-    <ClCompile Include="..\..\..\..\src\prof_data.c" />
59865-    <ClCompile Include="..\..\..\..\src\prof_log.c" />
59866-    <ClCompile Include="..\..\..\..\src\prof_recent.c" />
59867-    <ClCompile Include="..\..\..\..\src\prof_stats.c" />
59868-    <ClCompile Include="..\..\..\..\src\prof_sys.c" />
59869-    <ClCompile Include="..\..\..\..\src\psset.c" />
59870-    <ClCompile Include="..\..\..\..\src\rtree.c" />
59871-    <ClCompile Include="..\..\..\..\src\safety_check.c" />
59872-    <ClCompile Include="..\..\..\..\src\san.c" />
59873-    <ClCompile Include="..\..\..\..\src\san_bump.c" />
59874-    <ClCompile Include="..\..\..\..\src\sc.c" />
59875-    <ClCompile Include="..\..\..\..\src\sec.c" />
59876-    <ClCompile Include="..\..\..\..\src\stats.c" />
59877-    <ClCompile Include="..\..\..\..\src\sz.c" />
59878-    <ClCompile Include="..\..\..\..\src\tcache.c" />
59879-    <ClCompile Include="..\..\..\..\src\test_hooks.c" />
59880-    <ClCompile Include="..\..\..\..\src\thread_event.c" />
59881-    <ClCompile Include="..\..\..\..\src\ticker.c" />
59882-    <ClCompile Include="..\..\..\..\src\tsd.c" />
59883-    <ClCompile Include="..\..\..\..\src\witness.c" />
59884-  </ItemGroup>
59885-  <PropertyGroup Label="Globals">
59886-    <ProjectGuid>{8D6BB292-9E1C-413D-9F98-4864BDC1514A}</ProjectGuid>
59887-    <Keyword>Win32Proj</Keyword>
59888-    <RootNamespace>jemalloc</RootNamespace>
59889-  </PropertyGroup>
59890-  <Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />
59891-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="Configuration">
59892-    <ConfigurationType>DynamicLibrary</ConfigurationType>
59893-    <UseDebugLibraries>true</UseDebugLibraries>
59894-    <PlatformToolset>v141</PlatformToolset>
59895-    <CharacterSet>MultiByte</CharacterSet>
59896-  </PropertyGroup>
59897-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|Win32'" Label="Configuration">
59898-    <ConfigurationType>StaticLibrary</ConfigurationType>
59899-    <UseDebugLibraries>true</UseDebugLibraries>
59900-    <PlatformToolset>v141</PlatformToolset>
59901-    <CharacterSet>MultiByte</CharacterSet>
59902-  </PropertyGroup>
59903-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="Configuration">
59904-    <ConfigurationType>DynamicLibrary</ConfigurationType>
59905-    <UseDebugLibraries>false</UseDebugLibraries>
59906-    <PlatformToolset>v141</PlatformToolset>
59907-    <WholeProgramOptimization>true</WholeProgramOptimization>
59908-    <CharacterSet>MultiByte</CharacterSet>
59909-  </PropertyGroup>
59910-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|Win32'" Label="Configuration">
59911-    <ConfigurationType>StaticLibrary</ConfigurationType>
59912-    <UseDebugLibraries>false</UseDebugLibraries>
59913-    <PlatformToolset>v141</PlatformToolset>
59914-    <WholeProgramOptimization>true</WholeProgramOptimization>
59915-    <CharacterSet>MultiByte</CharacterSet>
59916-  </PropertyGroup>
59917-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration">
59918-    <ConfigurationType>DynamicLibrary</ConfigurationType>
59919-    <UseDebugLibraries>true</UseDebugLibraries>
59920-    <PlatformToolset>v141</PlatformToolset>
59921-    <CharacterSet>MultiByte</CharacterSet>
59922-  </PropertyGroup>
59923-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|x64'" Label="Configuration">
59924-    <ConfigurationType>StaticLibrary</ConfigurationType>
59925-    <UseDebugLibraries>true</UseDebugLibraries>
59926-    <PlatformToolset>v141</PlatformToolset>
59927-    <CharacterSet>MultiByte</CharacterSet>
59928-  </PropertyGroup>
59929-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration">
59930-    <ConfigurationType>DynamicLibrary</ConfigurationType>
59931-    <UseDebugLibraries>false</UseDebugLibraries>
59932-    <PlatformToolset>v141</PlatformToolset>
59933-    <WholeProgramOptimization>true</WholeProgramOptimization>
59934-    <CharacterSet>MultiByte</CharacterSet>
59935-  </PropertyGroup>
59936-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|x64'" Label="Configuration">
59937-    <ConfigurationType>StaticLibrary</ConfigurationType>
59938-    <UseDebugLibraries>false</UseDebugLibraries>
59939-    <PlatformToolset>v141</PlatformToolset>
59940-    <WholeProgramOptimization>true</WholeProgramOptimization>
59941-    <CharacterSet>MultiByte</CharacterSet>
59942-  </PropertyGroup>
59943-  <Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
59944-  <ImportGroup Label="ExtensionSettings">
59945-  </ImportGroup>
59946-  <ImportGroup Label="Shared">
59947-  </ImportGroup>
59948-  <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
59949-    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
59950-  </ImportGroup>
59951-  <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|Win32'" Label="PropertySheets">
59952-    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
59953-  </ImportGroup>
59954-  <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
59955-    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
59956-  </ImportGroup>
59957-  <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|Win32'" Label="PropertySheets">
59958-    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
59959-  </ImportGroup>
59960-  <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
59961-    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
59962-  </ImportGroup>
59963-  <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|x64'" Label="PropertySheets">
59964-    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
59965-  </ImportGroup>
59966-  <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
59967-    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
59968-  </ImportGroup>
59969-  <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|x64'" Label="PropertySheets">
59970-    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
59971-  </ImportGroup>
59972-  <PropertyGroup Label="UserMacros" />
59973-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
59974-    <OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
59975-    <IntDir>$(Platform)\$(Configuration)\</IntDir>
59976-    <TargetName>$(ProjectName)d</TargetName>
59977-  </PropertyGroup>
59978-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|Win32'">
59979-    <OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
59980-    <IntDir>$(Platform)\$(Configuration)\</IntDir>
59981-    <TargetName>$(ProjectName)-$(PlatformToolset)-$(Configuration)</TargetName>
59982-  </PropertyGroup>
59983-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
59984-    <OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
59985-    <IntDir>$(Platform)\$(Configuration)\</IntDir>
59986-  </PropertyGroup>
59987-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|Win32'">
59988-    <OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
59989-    <IntDir>$(Platform)\$(Configuration)\</IntDir>
59990-    <TargetName>$(ProjectName)-$(PlatformToolset)-$(Configuration)</TargetName>
59991-  </PropertyGroup>
59992-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
59993-    <OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
59994-    <IntDir>$(Platform)\$(Configuration)\</IntDir>
59995-    <TargetName>$(ProjectName)d</TargetName>
59996-  </PropertyGroup>
59997-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|x64'">
59998-    <OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
59999-    <IntDir>$(Platform)\$(Configuration)\</IntDir>
60000-    <TargetName>$(ProjectName)-vc$(PlatformToolsetVersion)-$(Configuration)</TargetName>
60001-  </PropertyGroup>
60002-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
60003-    <OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
60004-    <IntDir>$(Platform)\$(Configuration)\</IntDir>
60005-  </PropertyGroup>
60006-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|x64'">
60007-    <OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
60008-    <IntDir>$(Platform)\$(Configuration)\</IntDir>
60009-    <TargetName>$(ProjectName)-vc$(PlatformToolsetVersion)-$(Configuration)</TargetName>
60010-  </PropertyGroup>
60011-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
60012-    <ClCompile>
60013-      <PrecompiledHeader>
60014-      </PrecompiledHeader>
60015-      <WarningLevel>Level3</WarningLevel>
60016-      <Optimization>Disabled</Optimization>
60017-      <PreprocessorDefinitions>_REENTRANT;_WINDLL;DLLEXPORT;JEMALLOC_DEBUG;_DEBUG;%(PreprocessorDefinitions)</PreprocessorDefinitions>
60018-      <AdditionalIncludeDirectories>..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
60019-      <DisableSpecificWarnings>4090;4146;4267;4334</DisableSpecificWarnings>
60020-      <ProgramDataBaseFileName>$(OutputPath)$(TargetName).pdb</ProgramDataBaseFileName>
60021-    </ClCompile>
60022-    <Link>
60023-      <SubSystem>Windows</SubSystem>
60024-      <GenerateDebugInformation>true</GenerateDebugInformation>
60025-    </Link>
60026-  </ItemDefinitionGroup>
60027-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|Win32'">
60028-    <ClCompile>
60029-      <PrecompiledHeader>
60030-      </PrecompiledHeader>
60031-      <WarningLevel>Level3</WarningLevel>
60032-      <Optimization>Disabled</Optimization>
60033-      <PreprocessorDefinitions>JEMALLOC_DEBUG;_REENTRANT;JEMALLOC_EXPORT=;_DEBUG;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
60034-      <AdditionalIncludeDirectories>..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
60035-      <RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary>
60036-      <DisableSpecificWarnings>4090;4146;4267;4334</DisableSpecificWarnings>
60037-      <ProgramDataBaseFileName>$(OutputPath)$(TargetName).pdb</ProgramDataBaseFileName>
60038-    </ClCompile>
60039-    <Link>
60040-      <SubSystem>Windows</SubSystem>
60041-      <GenerateDebugInformation>true</GenerateDebugInformation>
60042-    </Link>
60043-  </ItemDefinitionGroup>
60044-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
60045-    <ClCompile>
60046-      <PrecompiledHeader>
60047-      </PrecompiledHeader>
60048-      <WarningLevel>Level3</WarningLevel>
60049-      <Optimization>Disabled</Optimization>
60050-      <PreprocessorDefinitions>JEMALLOC_NO_PRIVATE_NAMESPACE;_REENTRANT;_WINDLL;DLLEXPORT;JEMALLOC_DEBUG;_DEBUG;%(PreprocessorDefinitions)</PreprocessorDefinitions>
60051-      <AdditionalIncludeDirectories>..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
60052-      <DisableSpecificWarnings>4090;4146;4267;4334</DisableSpecificWarnings>
60053-      <ProgramDataBaseFileName>$(OutputPath)$(TargetName).pdb</ProgramDataBaseFileName>
60054-    </ClCompile>
60055-    <Link>
60056-      <SubSystem>Windows</SubSystem>
60057-      <GenerateDebugInformation>true</GenerateDebugInformation>
60058-    </Link>
60059-  </ItemDefinitionGroup>
60060-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|x64'">
60061-    <ClCompile>
60062-      <PrecompiledHeader>
60063-      </PrecompiledHeader>
60064-      <WarningLevel>Level3</WarningLevel>
60065-      <Optimization>Disabled</Optimization>
60066-      <PreprocessorDefinitions>JEMALLOC_NO_PRIVATE_NAMESPACE;JEMALLOC_DEBUG;_REENTRANT;JEMALLOC_EXPORT=;_DEBUG;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
60067-      <AdditionalIncludeDirectories>..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
60068-      <RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary>
60069-      <DisableSpecificWarnings>4090;4146;4267;4334</DisableSpecificWarnings>
60070-      <DebugInformationFormat>OldStyle</DebugInformationFormat>
60071-      <MinimalRebuild>false</MinimalRebuild>
60072-    </ClCompile>
60073-    <Link>
60074-      <SubSystem>Windows</SubSystem>
60075-      <GenerateDebugInformation>true</GenerateDebugInformation>
60076-    </Link>
60077-  </ItemDefinitionGroup>
60078-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
60079-    <ClCompile>
60080-      <WarningLevel>Level3</WarningLevel>
60081-      <PrecompiledHeader>
60082-      </PrecompiledHeader>
60083-      <Optimization>MaxSpeed</Optimization>
60084-      <FunctionLevelLinking>true</FunctionLevelLinking>
60085-      <IntrinsicFunctions>true</IntrinsicFunctions>
60086-      <PreprocessorDefinitions>_REENTRANT;_WINDLL;DLLEXPORT;NDEBUG;%(PreprocessorDefinitions)</PreprocessorDefinitions>
60087-      <AdditionalIncludeDirectories>..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
60088-      <DisableSpecificWarnings>4090;4146;4267;4334</DisableSpecificWarnings>
60089-      <ProgramDataBaseFileName>$(OutputPath)$(TargetName).pdb</ProgramDataBaseFileName>
60090-    </ClCompile>
60091-    <Link>
60092-      <SubSystem>Windows</SubSystem>
60093-      <GenerateDebugInformation>true</GenerateDebugInformation>
60094-      <EnableCOMDATFolding>true</EnableCOMDATFolding>
60095-      <OptimizeReferences>true</OptimizeReferences>
60096-    </Link>
60097-  </ItemDefinitionGroup>
60098-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|Win32'">
60099-    <ClCompile>
60100-      <WarningLevel>Level3</WarningLevel>
60101-      <PrecompiledHeader>
60102-      </PrecompiledHeader>
60103-      <Optimization>MaxSpeed</Optimization>
60104-      <FunctionLevelLinking>true</FunctionLevelLinking>
60105-      <IntrinsicFunctions>true</IntrinsicFunctions>
60106-      <PreprocessorDefinitions>_REENTRANT;JEMALLOC_EXPORT=;NDEBUG;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
60107-      <AdditionalIncludeDirectories>..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
60108-      <RuntimeLibrary>MultiThreaded</RuntimeLibrary>
60109-      <DisableSpecificWarnings>4090;4146;4267;4334</DisableSpecificWarnings>
60110-      <ProgramDataBaseFileName>$(OutputPath)$(TargetName).pdb</ProgramDataBaseFileName>
60111-    </ClCompile>
60112-    <Link>
60113-      <SubSystem>Windows</SubSystem>
60114-      <GenerateDebugInformation>true</GenerateDebugInformation>
60115-      <EnableCOMDATFolding>true</EnableCOMDATFolding>
60116-      <OptimizeReferences>true</OptimizeReferences>
60117-    </Link>
60118-  </ItemDefinitionGroup>
60119-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
60120-    <ClCompile>
60121-      <WarningLevel>Level3</WarningLevel>
60122-      <PrecompiledHeader>
60123-      </PrecompiledHeader>
60124-      <Optimization>MaxSpeed</Optimization>
60125-      <FunctionLevelLinking>true</FunctionLevelLinking>
60126-      <IntrinsicFunctions>true</IntrinsicFunctions>
60127-      <AdditionalIncludeDirectories>..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
60128-      <PreprocessorDefinitions>JEMALLOC_NO_PRIVATE_NAMESPACE;_REENTRANT;_WINDLL;DLLEXPORT;NDEBUG;%(PreprocessorDefinitions)</PreprocessorDefinitions>
60129-      <DisableSpecificWarnings>4090;4146;4267;4334</DisableSpecificWarnings>
60130-      <ProgramDataBaseFileName>$(OutputPath)$(TargetName).pdb</ProgramDataBaseFileName>
60131-    </ClCompile>
60132-    <Link>
60133-      <SubSystem>Windows</SubSystem>
60134-      <GenerateDebugInformation>true</GenerateDebugInformation>
60135-      <EnableCOMDATFolding>true</EnableCOMDATFolding>
60136-      <OptimizeReferences>true</OptimizeReferences>
60137-    </Link>
60138-  </ItemDefinitionGroup>
60139-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|x64'">
60140-    <ClCompile>
60141-      <WarningLevel>Level3</WarningLevel>
60142-      <PrecompiledHeader>
60143-      </PrecompiledHeader>
60144-      <Optimization>MaxSpeed</Optimization>
60145-      <FunctionLevelLinking>true</FunctionLevelLinking>
60146-      <IntrinsicFunctions>true</IntrinsicFunctions>
60147-      <PreprocessorDefinitions>JEMALLOC_NO_PRIVATE_NAMESPACE;_REENTRANT;JEMALLOC_EXPORT=;NDEBUG;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
60148-      <AdditionalIncludeDirectories>..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
60149-      <RuntimeLibrary>MultiThreaded</RuntimeLibrary>
60150-      <DisableSpecificWarnings>4090;4146;4267;4334</DisableSpecificWarnings>
60151-      <DebugInformationFormat>OldStyle</DebugInformationFormat>
60152-    </ClCompile>
60153-    <Link>
60154-      <SubSystem>Windows</SubSystem>
60155-      <GenerateDebugInformation>true</GenerateDebugInformation>
60156-      <EnableCOMDATFolding>true</EnableCOMDATFolding>
60157-      <OptimizeReferences>true</OptimizeReferences>
60158-    </Link>
60159-  </ItemDefinitionGroup>
60160-  <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
60161-  <ImportGroup Label="ExtensionTargets">
60162-  </ImportGroup>
60163-</Project>
60164\ No newline at end of file
60165diff --git a/jemalloc/msvc/projects/vc2017/jemalloc/jemalloc.vcxproj.filters b/jemalloc/msvc/projects/vc2017/jemalloc/jemalloc.vcxproj.filters
60166deleted file mode 100644
60167index 1b43e9f..0000000
60168--- a/jemalloc/msvc/projects/vc2017/jemalloc/jemalloc.vcxproj.filters
60169+++ /dev/null
60170@@ -1,197 +0,0 @@
60171-<?xml version="1.0" encoding="utf-8"?>
60172-<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
60173-  <ItemGroup>
60174-    <Filter Include="Source Files">
60175-      <UniqueIdentifier>{4FC737F1-C7A5-4376-A066-2A32D752A2FF}</UniqueIdentifier>
60176-      <Extensions>cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx</Extensions>
60177-    </Filter>
60178-  </ItemGroup>
60179-  <ItemGroup>
60180-    <ClCompile Include="..\..\..\..\src\arena.c">
60181-      <Filter>Source Files</Filter>
60182-    </ClCompile>
60183-    <ClCompile Include="..\..\..\..\src\background_thread.c">
60184-      <Filter>Source Files</Filter>
60185-    </ClCompile>
60186-    <ClCompile Include="..\..\..\..\src\base.c">
60187-      <Filter>Source Files</Filter>
60188-    </ClCompile>
60189-    <ClCompile Include="..\..\..\..\src\bin.c">
60190-      <Filter>Source Files</Filter>
60191-    </ClCompile>
60192-    <ClCompile Include="..\..\..\..\src\bitmap.c">
60193-      <Filter>Source Files</Filter>
60194-    </ClCompile>
60195-    <ClCompile Include="..\..\..\..\src\buf_writer.c">
60196-      <Filter>Source Files</Filter>
60197-    </ClCompile>
60198-    <ClCompile Include="..\..\..\..\src\cache_bin.c">
60199-      <Filter>Source Files</Filter>
60200-    </ClCompile>
60201-    <ClCompile Include="..\..\..\..\src\ckh.c">
60202-      <Filter>Source Files</Filter>
60203-    </ClCompile>
60204-    <ClCompile Include="..\..\..\..\src\counter.c">
60205-      <Filter>Source Files</Filter>
60206-    </ClCompile>
60207-    <ClCompile Include="..\..\..\..\src\ctl.c">
60208-      <Filter>Source Files</Filter>
60209-    </ClCompile>
60210-    <ClCompile Include="..\..\..\..\src\decay.c">
60211-      <Filter>Source Files</Filter>
60212-    </ClCompile>
60213-    <ClCompile Include="..\..\..\..\src\div.c">
60214-      <Filter>Source Files</Filter>
60215-    </ClCompile>
60216-    <ClCompile Include="..\..\..\..\src\emap.c">
60217-      <Filter>Source Files</Filter>
60218-    </ClCompile>
60219-    <ClCompile Include="..\..\..\..\src\exp_grow.c">
60220-      <Filter>Source Files</Filter>
60221-    </ClCompile>
60222-    <ClCompile Include="..\..\..\..\src\extent.c">
60223-      <Filter>Source Files</Filter>
60224-    </ClCompile>
60225-    <ClCompile Include="..\..\..\..\src\extent_dss.c">
60226-      <Filter>Source Files</Filter>
60227-    </ClCompile>
60228-    <ClCompile Include="..\..\..\..\src\extent_mmap.c">
60229-      <Filter>Source Files</Filter>
60230-    </ClCompile>
60231-    <ClCompile Include="..\..\..\..\src\fxp.c">
60232-      <Filter>Source Files</Filter>
60233-    </ClCompile>
60234-    <ClCompile Include="..\..\..\..\src\hook.c">
60235-      <Filter>Source Files</Filter>
60236-    </ClCompile>
60237-    <ClCompile Include="..\..\..\..\src\hpa.c">
60238-      <Filter>Source Files</Filter>
60239-    </ClCompile>
60240-    <ClCompile Include="..\..\..\..\src\hpa_hooks.c">
60241-      <Filter>Source Files</Filter>
60242-    </ClCompile>
60243-    <ClCompile Include="..\..\..\..\src\hpdata.c">
60244-      <Filter>Source Files</Filter>
60245-    </ClCompile>
60246-    <ClCompile Include="..\..\..\..\src\inspect.c">
60247-      <Filter>Source Files</Filter>
60248-    </ClCompile>
60249-    <ClCompile Include="..\..\..\..\src\jemalloc.c">
60250-      <Filter>Source Files</Filter>
60251-    </ClCompile>
60252-    <ClCompile Include="..\..\..\..\src\large.c">
60253-      <Filter>Source Files</Filter>
60254-    </ClCompile>
60255-    <ClCompile Include="..\..\..\..\src\log.c">
60256-      <Filter>Source Files</Filter>
60257-    </ClCompile>
60258-    <ClCompile Include="..\..\..\..\src\malloc_io.c">
60259-      <Filter>Source Files</Filter>
60260-    </ClCompile>
60261-    <ClCompile Include="..\..\..\..\src\mutex.c">
60262-      <Filter>Source Files</Filter>
60263-    </ClCompile>
60264-    <ClCompile Include="..\..\..\..\src\nstime.c">
60265-      <Filter>Source Files</Filter>
60266-    </ClCompile>
60267-    <ClCompile Include="..\..\..\..\src\pa.c">
60268-      <Filter>Source Files</Filter>
60269-    </ClCompile>
60270-    <ClCompile Include="..\..\..\..\src\pa_extra.c">
60271-      <Filter>Source Files</Filter>
60272-    </ClCompile>
60273-    <ClCompile Include="..\..\..\..\src\pai.c">
60274-      <Filter>Source Files</Filter>
60275-    </ClCompile>
60276-    <ClCompile Include="..\..\..\..\src\pac.c">
60277-      <Filter>Source Files</Filter>
60278-    </ClCompile>
60279-    <ClCompile Include="..\..\..\..\src\pages.c">
60280-      <Filter>Source Files</Filter>
60281-    </ClCompile>
60282-    <ClCompile Include="..\..\..\..\src\peak_event.c">
60283-      <Filter>Source Files</Filter>
60284-    </ClCompile>
60285-    <ClCompile Include="..\..\..\..\src\prof.c">
60286-      <Filter>Source Files</Filter>
60287-    </ClCompile>
60288-    <ClCompile Include="..\..\..\..\src\prof_data.c">
60289-      <Filter>Source Files</Filter>
60290-    </ClCompile>
60291-    <ClCompile Include="..\..\..\..\src\prof_log.c">
60292-      <Filter>Source Files</Filter>
60293-    </ClCompile>
60294-    <ClCompile Include="..\..\..\..\src\prof_recent.c">
60295-      <Filter>Source Files</Filter>
60296-    </ClCompile>
60297-    <ClCompile Include="..\..\..\..\src\prof_stats.c">
60298-      <Filter>Source Files</Filter>
60299-    </ClCompile>
60300-    <ClCompile Include="..\..\..\..\src\prof_sys.c">
60301-      <Filter>Source Files</Filter>
60302-    </ClCompile>
60303-    <ClCompile Include="..\..\..\..\src\psset.c">
60304-      <Filter>Source Files</Filter>
60305-    </ClCompile>
60306-    <ClCompile Include="..\..\..\..\src\rtree.c">
60307-      <Filter>Source Files</Filter>
60308-    </ClCompile>
60309-    <ClCompile Include="..\..\..\..\src\safety_check.c">
60310-      <Filter>Source Files</Filter>
60311-    </ClCompile>
60312-    <ClCompile Include="..\..\..\..\src\sc.c">
60313-      <Filter>Source Files</Filter>
60314-    </ClCompile>
60315-    <ClCompile Include="..\..\..\..\src\sec.c">
60316-      <Filter>Source Files</Filter>
60317-    </ClCompile>
60318-    <ClCompile Include="..\..\..\..\src\stats.c">
60319-      <Filter>Source Files</Filter>
60320-    </ClCompile>
60321-    <ClCompile Include="..\..\..\..\src\sz.c">
60322-      <Filter>Source Files</Filter>
60323-    </ClCompile>
60324-    <ClCompile Include="..\..\..\..\src\tcache.c">
60325-      <Filter>Source Files</Filter>
60326-    </ClCompile>
60327-    <ClCompile Include="..\..\..\..\src\test_hooks.c">
60328-      <Filter>Source Files</Filter>
60329-    </ClCompile>
60330-    <ClCompile Include="..\..\..\..\src\thread_event.c">
60331-      <Filter>Source Files</Filter>
60332-    </ClCompile>
60333-    <ClCompile Include="..\..\..\..\src\ticker.c">
60334-      <Filter>Source Files</Filter>
60335-    </ClCompile>
60336-    <ClCompile Include="..\..\..\..\src\tsd.c">
60337-      <Filter>Source Files</Filter>
60338-    </ClCompile>
60339-    <ClCompile Include="..\..\..\..\src\witness.c">
60340-      <Filter>Source Files</Filter>
60341-    </ClCompile>
60342-    <ClCompile Include="..\..\..\..\src\bin_info.c">
60343-      <Filter>Source Files</Filter>
60344-    </ClCompile>
60345-    <ClCompile Include="..\..\..\..\src\ecache.c">
60346-      <Filter>Source Files</Filter>
60347-    </ClCompile>
60348-    <ClCompile Include="..\..\..\..\src\edata.c">
60349-      <Filter>Source Files</Filter>
60350-    </ClCompile>
60351-    <ClCompile Include="..\..\..\..\src\edata_cache.c">
60352-      <Filter>Source Files</Filter>
60353-    </ClCompile>
60354-    <ClCompile Include="..\..\..\..\src\ehooks.c">
60355-      <Filter>Source Files</Filter>
60356-    </ClCompile>
60357-    <ClCompile Include="..\..\..\..\src\eset.c">
60358-      <Filter>Source Files</Filter>
60359-    </ClCompile>
60360-    <ClCompile Include="..\..\..\..\src\san.c">
60361-      <Filter>Source Files</Filter>
60362-    </ClCompile>
60363-    <ClCompile Include="..\..\..\..\src\san_bump.c">
60364-      <Filter>Source Files</Filter>
60365-    </ClCompile>
60366-  </ItemGroup>
60367-</Project>
60368\ No newline at end of file
60369diff --git a/jemalloc/msvc/projects/vc2017/test_threads/test_threads.vcxproj b/jemalloc/msvc/projects/vc2017/test_threads/test_threads.vcxproj
60370deleted file mode 100644
60371index c35b0f5..0000000
60372--- a/jemalloc/msvc/projects/vc2017/test_threads/test_threads.vcxproj
60373+++ /dev/null
60374@@ -1,326 +0,0 @@
60375-<?xml version="1.0" encoding="utf-8"?>
60376-<Project DefaultTargets="Build" ToolsVersion="15.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
60377-  <ItemGroup Label="ProjectConfigurations">
60378-    <ProjectConfiguration Include="Debug-static|Win32">
60379-      <Configuration>Debug-static</Configuration>
60380-      <Platform>Win32</Platform>
60381-    </ProjectConfiguration>
60382-    <ProjectConfiguration Include="Debug-static|x64">
60383-      <Configuration>Debug-static</Configuration>
60384-      <Platform>x64</Platform>
60385-    </ProjectConfiguration>
60386-    <ProjectConfiguration Include="Debug|Win32">
60387-      <Configuration>Debug</Configuration>
60388-      <Platform>Win32</Platform>
60389-    </ProjectConfiguration>
60390-    <ProjectConfiguration Include="Release-static|Win32">
60391-      <Configuration>Release-static</Configuration>
60392-      <Platform>Win32</Platform>
60393-    </ProjectConfiguration>
60394-    <ProjectConfiguration Include="Release-static|x64">
60395-      <Configuration>Release-static</Configuration>
60396-      <Platform>x64</Platform>
60397-    </ProjectConfiguration>
60398-    <ProjectConfiguration Include="Release|Win32">
60399-      <Configuration>Release</Configuration>
60400-      <Platform>Win32</Platform>
60401-    </ProjectConfiguration>
60402-    <ProjectConfiguration Include="Debug|x64">
60403-      <Configuration>Debug</Configuration>
60404-      <Platform>x64</Platform>
60405-    </ProjectConfiguration>
60406-    <ProjectConfiguration Include="Release|x64">
60407-      <Configuration>Release</Configuration>
60408-      <Platform>x64</Platform>
60409-    </ProjectConfiguration>
60410-  </ItemGroup>
60411-  <PropertyGroup Label="Globals">
60412-    <ProjectGuid>{09028CFD-4EB7-491D-869C-0708DB97ED44}</ProjectGuid>
60413-    <Keyword>Win32Proj</Keyword>
60414-    <RootNamespace>test_threads</RootNamespace>
60415-  </PropertyGroup>
60416-  <Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />
60417-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="Configuration">
60418-    <ConfigurationType>Application</ConfigurationType>
60419-    <UseDebugLibraries>true</UseDebugLibraries>
60420-    <PlatformToolset>v141</PlatformToolset>
60421-    <CharacterSet>MultiByte</CharacterSet>
60422-  </PropertyGroup>
60423-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|Win32'" Label="Configuration">
60424-    <ConfigurationType>Application</ConfigurationType>
60425-    <UseDebugLibraries>true</UseDebugLibraries>
60426-    <PlatformToolset>v141</PlatformToolset>
60427-    <CharacterSet>MultiByte</CharacterSet>
60428-  </PropertyGroup>
60429-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="Configuration">
60430-    <ConfigurationType>Application</ConfigurationType>
60431-    <UseDebugLibraries>false</UseDebugLibraries>
60432-    <PlatformToolset>v141</PlatformToolset>
60433-    <WholeProgramOptimization>true</WholeProgramOptimization>
60434-    <CharacterSet>MultiByte</CharacterSet>
60435-  </PropertyGroup>
60436-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|Win32'" Label="Configuration">
60437-    <ConfigurationType>Application</ConfigurationType>
60438-    <UseDebugLibraries>false</UseDebugLibraries>
60439-    <PlatformToolset>v141</PlatformToolset>
60440-    <WholeProgramOptimization>true</WholeProgramOptimization>
60441-    <CharacterSet>MultiByte</CharacterSet>
60442-  </PropertyGroup>
60443-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration">
60444-    <ConfigurationType>Application</ConfigurationType>
60445-    <UseDebugLibraries>true</UseDebugLibraries>
60446-    <PlatformToolset>v141</PlatformToolset>
60447-    <CharacterSet>MultiByte</CharacterSet>
60448-  </PropertyGroup>
60449-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|x64'" Label="Configuration">
60450-    <ConfigurationType>Application</ConfigurationType>
60451-    <UseDebugLibraries>true</UseDebugLibraries>
60452-    <PlatformToolset>v141</PlatformToolset>
60453-    <CharacterSet>MultiByte</CharacterSet>
60454-  </PropertyGroup>
60455-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration">
60456-    <ConfigurationType>Application</ConfigurationType>
60457-    <UseDebugLibraries>false</UseDebugLibraries>
60458-    <PlatformToolset>v141</PlatformToolset>
60459-    <WholeProgramOptimization>true</WholeProgramOptimization>
60460-    <CharacterSet>MultiByte</CharacterSet>
60461-  </PropertyGroup>
60462-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|x64'" Label="Configuration">
60463-    <ConfigurationType>Application</ConfigurationType>
60464-    <UseDebugLibraries>false</UseDebugLibraries>
60465-    <PlatformToolset>v141</PlatformToolset>
60466-    <WholeProgramOptimization>true</WholeProgramOptimization>
60467-    <CharacterSet>MultiByte</CharacterSet>
60468-  </PropertyGroup>
60469-  <Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
60470-  <ImportGroup Label="ExtensionSettings">
60471-  </ImportGroup>
60472-  <ImportGroup Label="Shared">
60473-  </ImportGroup>
60474-  <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
60475-    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
60476-  </ImportGroup>
60477-  <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|Win32'" Label="PropertySheets">
60478-    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
60479-  </ImportGroup>
60480-  <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
60481-    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
60482-  </ImportGroup>
60483-  <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|Win32'" Label="PropertySheets">
60484-    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
60485-  </ImportGroup>
60486-  <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
60487-    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
60488-  </ImportGroup>
60489-  <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|x64'" Label="PropertySheets">
60490-    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
60491-  </ImportGroup>
60492-  <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
60493-    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
60494-  </ImportGroup>
60495-  <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|x64'" Label="PropertySheets">
60496-    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
60497-  </ImportGroup>
60498-  <PropertyGroup Label="UserMacros" />
60499-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
60500-    <OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
60501-    <IntDir>$(Platform)\$(Configuration)\</IntDir>
60502-    <LinkIncremental>true</LinkIncremental>
60503-  </PropertyGroup>
60504-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|Win32'">
60505-    <OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
60506-    <IntDir>$(Platform)\$(Configuration)\</IntDir>
60507-    <LinkIncremental>true</LinkIncremental>
60508-  </PropertyGroup>
60509-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
60510-    <LinkIncremental>true</LinkIncremental>
60511-    <OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
60512-  </PropertyGroup>
60513-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|x64'">
60514-    <LinkIncremental>true</LinkIncremental>
60515-    <OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
60516-  </PropertyGroup>
60517-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
60518-    <OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
60519-    <IntDir>$(Platform)\$(Configuration)\</IntDir>
60520-    <LinkIncremental>false</LinkIncremental>
60521-  </PropertyGroup>
60522-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|Win32'">
60523-    <OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
60524-    <IntDir>$(Platform)\$(Configuration)\</IntDir>
60525-    <LinkIncremental>false</LinkIncremental>
60526-  </PropertyGroup>
60527-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
60528-    <OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
60529-    <IntDir>$(Platform)\$(Configuration)\</IntDir>
60530-    <LinkIncremental>false</LinkIncremental>
60531-  </PropertyGroup>
60532-  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|x64'">
60533-    <OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir>
60534-    <IntDir>$(Platform)\$(Configuration)\</IntDir>
60535-    <LinkIncremental>false</LinkIncremental>
60536-  </PropertyGroup>
60537-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
60538-    <ClCompile>
60539-      <PrecompiledHeader>
60540-      </PrecompiledHeader>
60541-      <WarningLevel>Level3</WarningLevel>
60542-      <Optimization>Disabled</Optimization>
60543-      <PreprocessorDefinitions>WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
60544-      <AdditionalIncludeDirectories>..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
60545-    </ClCompile>
60546-    <Link>
60547-      <SubSystem>Console</SubSystem>
60548-      <GenerateDebugInformation>true</GenerateDebugInformation>
60549-      <AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)</AdditionalLibraryDirectories>
60550-      <AdditionalDependencies>jemallocd.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
60551-    </Link>
60552-  </ItemDefinitionGroup>
60553-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|Win32'">
60554-    <ClCompile>
60555-      <PrecompiledHeader>
60556-      </PrecompiledHeader>
60557-      <WarningLevel>Level3</WarningLevel>
60558-      <Optimization>Disabled</Optimization>
60559-      <PreprocessorDefinitions>JEMALLOC_EXPORT=;JEMALLOC_STATIC;_DEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
60560-      <AdditionalIncludeDirectories>..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
60561-      <RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary>
60562-    </ClCompile>
60563-    <Link>
60564-      <SubSystem>Console</SubSystem>
60565-      <GenerateDebugInformation>true</GenerateDebugInformation>
60566-      <AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)</AdditionalLibraryDirectories>
60567-      <AdditionalDependencies>jemalloc-$(PlatformToolset)-$(Configuration).lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
60568-    </Link>
60569-  </ItemDefinitionGroup>
60570-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
60571-    <ClCompile>
60572-      <PrecompiledHeader>
60573-      </PrecompiledHeader>
60574-      <WarningLevel>Level3</WarningLevel>
60575-      <Optimization>Disabled</Optimization>
60576-      <PreprocessorDefinitions>_DEBUG;%(PreprocessorDefinitions)</PreprocessorDefinitions>
60577-      <AdditionalIncludeDirectories>..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
60578-    </ClCompile>
60579-    <Link>
60580-      <SubSystem>Console</SubSystem>
60581-      <GenerateDebugInformation>true</GenerateDebugInformation>
60582-      <AdditionalDependencies>jemallocd.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
60583-      <AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)</AdditionalLibraryDirectories>
60584-    </Link>
60585-  </ItemDefinitionGroup>
60586-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug-static|x64'">
60587-    <ClCompile>
60588-      <PrecompiledHeader>
60589-      </PrecompiledHeader>
60590-      <WarningLevel>Level3</WarningLevel>
60591-      <Optimization>Disabled</Optimization>
60592-      <PreprocessorDefinitions>JEMALLOC_EXPORT=;JEMALLOC_STATIC;_DEBUG;%(PreprocessorDefinitions)</PreprocessorDefinitions>
60593-      <AdditionalIncludeDirectories>..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
60594-      <RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary>
60595-    </ClCompile>
60596-    <Link>
60597-      <SubSystem>Console</SubSystem>
60598-      <GenerateDebugInformation>true</GenerateDebugInformation>
60599-      <AdditionalDependencies>jemalloc-vc$(PlatformToolsetVersion)-$(Configuration).lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
60600-      <AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)</AdditionalLibraryDirectories>
60601-    </Link>
60602-  </ItemDefinitionGroup>
60603-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
60604-    <ClCompile>
60605-      <WarningLevel>Level3</WarningLevel>
60606-      <PrecompiledHeader>
60607-      </PrecompiledHeader>
60608-      <Optimization>MaxSpeed</Optimization>
60609-      <FunctionLevelLinking>true</FunctionLevelLinking>
60610-      <IntrinsicFunctions>true</IntrinsicFunctions>
60611-      <PreprocessorDefinitions>WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
60612-      <AdditionalIncludeDirectories>..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
60613-    </ClCompile>
60614-    <Link>
60615-      <SubSystem>Console</SubSystem>
60616-      <GenerateDebugInformation>true</GenerateDebugInformation>
60617-      <EnableCOMDATFolding>true</EnableCOMDATFolding>
60618-      <OptimizeReferences>true</OptimizeReferences>
60619-      <AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)</AdditionalLibraryDirectories>
60620-      <AdditionalDependencies>jemalloc.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
60621-    </Link>
60622-  </ItemDefinitionGroup>
60623-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|Win32'">
60624-    <ClCompile>
60625-      <WarningLevel>Level3</WarningLevel>
60626-      <PrecompiledHeader>
60627-      </PrecompiledHeader>
60628-      <Optimization>MaxSpeed</Optimization>
60629-      <FunctionLevelLinking>true</FunctionLevelLinking>
60630-      <IntrinsicFunctions>true</IntrinsicFunctions>
60631-      <PreprocessorDefinitions>JEMALLOC_EXPORT=;JEMALLOC_STATIC;NDEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
60632-      <AdditionalIncludeDirectories>..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
60633-      <RuntimeLibrary>MultiThreaded</RuntimeLibrary>
60634-    </ClCompile>
60635-    <Link>
60636-      <SubSystem>Console</SubSystem>
60637-      <GenerateDebugInformation>true</GenerateDebugInformation>
60638-      <EnableCOMDATFolding>true</EnableCOMDATFolding>
60639-      <OptimizeReferences>true</OptimizeReferences>
60640-      <AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)</AdditionalLibraryDirectories>
60641-      <AdditionalDependencies>jemalloc-$(PlatformToolset)-$(Configuration).lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
60642-    </Link>
60643-  </ItemDefinitionGroup>
60644-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
60645-    <ClCompile>
60646-      <WarningLevel>Level3</WarningLevel>
60647-      <PrecompiledHeader>
60648-      </PrecompiledHeader>
60649-      <Optimization>MaxSpeed</Optimization>
60650-      <FunctionLevelLinking>true</FunctionLevelLinking>
60651-      <IntrinsicFunctions>true</IntrinsicFunctions>
60652-      <PreprocessorDefinitions>NDEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
60653-      <AdditionalIncludeDirectories>..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
60654-    </ClCompile>
60655-    <Link>
60656-      <SubSystem>Console</SubSystem>
60657-      <GenerateDebugInformation>true</GenerateDebugInformation>
60658-      <EnableCOMDATFolding>true</EnableCOMDATFolding>
60659-      <OptimizeReferences>true</OptimizeReferences>
60660-      <AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)</AdditionalLibraryDirectories>
60661-      <AdditionalDependencies>jemalloc.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
60662-    </Link>
60663-  </ItemDefinitionGroup>
60664-  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release-static|x64'">
60665-    <ClCompile>
60666-      <WarningLevel>Level3</WarningLevel>
60667-      <PrecompiledHeader>
60668-      </PrecompiledHeader>
60669-      <Optimization>MaxSpeed</Optimization>
60670-      <FunctionLevelLinking>true</FunctionLevelLinking>
60671-      <IntrinsicFunctions>true</IntrinsicFunctions>
60672-      <PreprocessorDefinitions>JEMALLOC_EXPORT=;JEMALLOC_STATIC;NDEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
60673-      <AdditionalIncludeDirectories>..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
60674-      <RuntimeLibrary>MultiThreaded</RuntimeLibrary>
60675-    </ClCompile>
60676-    <Link>
60677-      <SubSystem>Console</SubSystem>
60678-      <GenerateDebugInformation>true</GenerateDebugInformation>
60679-      <EnableCOMDATFolding>true</EnableCOMDATFolding>
60680-      <OptimizeReferences>true</OptimizeReferences>
60681-      <AdditionalLibraryDirectories>$(SolutionDir)$(Platform)\$(Configuration)</AdditionalLibraryDirectories>
60682-      <AdditionalDependencies>jemalloc-vc$(PlatformToolsetVersion)-$(Configuration).lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
60683-    </Link>
60684-  </ItemDefinitionGroup>
60685-  <ItemGroup>
60686-    <ClCompile Include="..\..\..\test_threads\test_threads.cpp" />
60687-    <ClCompile Include="..\..\..\test_threads\test_threads_main.cpp" />
60688-  </ItemGroup>
60689-  <ItemGroup>
60690-    <ProjectReference Include="..\jemalloc\jemalloc.vcxproj">
60691-      <Project>{8d6bb292-9e1c-413d-9f98-4864bdc1514a}</Project>
60692-    </ProjectReference>
60693-  </ItemGroup>
60694-  <ItemGroup>
60695-    <ClInclude Include="..\..\..\test_threads\test_threads.h" />
60696-  </ItemGroup>
60697-  <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
60698-  <ImportGroup Label="ExtensionTargets">
60699-  </ImportGroup>
60700-</Project>
60701\ No newline at end of file
60702diff --git a/jemalloc/msvc/projects/vc2017/test_threads/test_threads.vcxproj.filters b/jemalloc/msvc/projects/vc2017/test_threads/test_threads.vcxproj.filters
60703deleted file mode 100644
60704index fa4588f..0000000
60705--- a/jemalloc/msvc/projects/vc2017/test_threads/test_threads.vcxproj.filters
60706+++ /dev/null
60707@@ -1,26 +0,0 @@
60708-<?xml version="1.0" encoding="utf-8"?>
60709-<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
60710-  <ItemGroup>
60711-    <Filter Include="Source Files">
60712-      <UniqueIdentifier>{4FC737F1-C7A5-4376-A066-2A32D752A2FF}</UniqueIdentifier>
60713-      <Extensions>cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx</Extensions>
60714-    </Filter>
60715-    <Filter Include="Header Files">
60716-      <UniqueIdentifier>{93995380-89BD-4b04-88EB-625FBE52EBFB}</UniqueIdentifier>
60717-      <Extensions>h;hh;hpp;hxx;hm;inl;inc;xsd</Extensions>
60718-    </Filter>
60719-  </ItemGroup>
60720-  <ItemGroup>
60721-    <ClCompile Include="..\..\..\test_threads\test_threads.cpp">
60722-      <Filter>Source Files</Filter>
60723-    </ClCompile>
60724-    <ClCompile Include="..\..\..\test_threads\test_threads_main.cpp">
60725-      <Filter>Source Files</Filter>
60726-    </ClCompile>
60727-  </ItemGroup>
60728-  <ItemGroup>
60729-    <ClInclude Include="..\..\..\test_threads\test_threads.h">
60730-      <Filter>Header Files</Filter>
60731-    </ClInclude>
60732-  </ItemGroup>
60733-</Project>
60734\ No newline at end of file
60735diff --git a/jemalloc/msvc/test_threads/test_threads.cpp b/jemalloc/msvc/test_threads/test_threads.cpp
60736deleted file mode 100644
60737index 6eed028..0000000
60738--- a/jemalloc/msvc/test_threads/test_threads.cpp
60739+++ /dev/null
60740@@ -1,89 +0,0 @@
60741-// jemalloc C++ threaded test
60742-// Author: Rustam Abdullaev
60743-// Public Domain
60744-
60745-#include <atomic>
60746-#include <functional>
60747-#include <future>
60748-#include <random>
60749-#include <thread>
60750-#include <vector>
60751-#include <stdio.h>
60752-#define JEMALLOC_NO_DEMANGLE
60753-#include <jemalloc/jemalloc.h>
60754-
60755-using std::vector;
60756-using std::thread;
60757-using std::uniform_int_distribution;
60758-using std::minstd_rand;
60759-
60760-int test_threads() {
60761-  je_malloc_conf = "narenas:3";
60762-  int narenas = 0;
60763-  size_t sz = sizeof(narenas);
60764-  je_mallctl("opt.narenas", (void *)&narenas, &sz, NULL, 0);
60765-  if (narenas != 3) {
60766-    printf("Error: unexpected number of arenas: %d\n", narenas);
60767-    return 1;
60768-  }
60769-  static const int sizes[] = { 7, 16, 32, 60, 91, 100, 120, 144, 169, 199, 255, 400, 670, 900, 917, 1025, 3333, 5190, 13131, 49192, 99999, 123123, 255265, 2333111 };
60770-  static const int numSizes = (int)(sizeof(sizes) / sizeof(sizes[0]));
60771-  vector<thread> workers;
60772-  static const int numThreads = narenas + 1, numAllocsMax = 25, numIter1 = 50, numIter2 = 50;
60773-  je_malloc_stats_print(NULL, NULL, NULL);
60774-  size_t allocated1;
60775-  size_t sz1 = sizeof(allocated1);
60776-  je_mallctl("stats.active", (void *)&allocated1, &sz1, NULL, 0);
60777-  printf("\nPress Enter to start threads...\n");
60778-  getchar();
60779-  printf("Starting %d threads x %d x %d iterations...\n", numThreads, numIter1, numIter2);
60780-  for (int i = 0; i < numThreads; i++) {
60781-    workers.emplace_back([tid=i]() {
60782-      uniform_int_distribution<int> sizeDist(0, numSizes - 1);
60783-      minstd_rand rnd(tid * 17);
60784-      uint8_t* ptrs[numAllocsMax];
60785-      int ptrsz[numAllocsMax];
60786-      for (int i = 0; i < numIter1; ++i) {
60787-        thread t([&]() {
60788-          for (int i = 0; i < numIter2; ++i) {
60789-            const int numAllocs = numAllocsMax - sizeDist(rnd);
60790-            for (int j = 0; j < numAllocs; j += 64) {
60791-              const int x = sizeDist(rnd);
60792-              const int sz = sizes[x];
60793-              ptrsz[j] = sz;
60794-              ptrs[j] = (uint8_t*)je_malloc(sz);
60795-              if (!ptrs[j]) {
60796-                printf("Unable to allocate %d bytes in thread %d, iter %d, alloc %d. %d\n", sz, tid, i, j, x);
60797-                exit(1);
60798-              }
60799-              for (int k = 0; k < sz; k++)
60800-                ptrs[j][k] = tid + k;
60801-            }
60802-            for (int j = 0; j < numAllocs; j += 64) {
60803-              for (int k = 0, sz = ptrsz[j]; k < sz; k++)
60804-                if (ptrs[j][k] != (uint8_t)(tid + k)) {
60805-                  printf("Memory error in thread %d, iter %d, alloc %d @ %d : %02X!=%02X\n", tid, i, j, k, ptrs[j][k], (uint8_t)(tid + k));
60806-                  exit(1);
60807-                }
60808-              je_free(ptrs[j]);
60809-            }
60810-          }
60811-        });
60812-        t.join();
60813-      }
60814-    });
60815-  }
60816-  for (thread& t : workers) {
60817-    t.join();
60818-  }
60819-  je_malloc_stats_print(NULL, NULL, NULL);
60820-  size_t allocated2;
60821-  je_mallctl("stats.active", (void *)&allocated2, &sz1, NULL, 0);
60822-  size_t leaked = allocated2 - allocated1;
60823-  printf("\nDone. Leaked: %zd bytes\n", leaked);
60824-  bool failed = leaked > 65536; // in case C++ runtime allocated something (e.g. iostream locale or facet)
60825-  printf("\nTest %s!\n", (failed ? "FAILED" : "successful"));
60826-  printf("\nPress Enter to continue...\n");
60827-  getchar();
60828-  return failed ? 1 : 0;
60829-}
60830diff --git a/jemalloc/msvc/test_threads/test_threads.h b/jemalloc/msvc/test_threads/test_threads.h
60831deleted file mode 100644
60832index 64d0cdb..0000000
60833--- a/jemalloc/msvc/test_threads/test_threads.h
60834+++ /dev/null
60835@@ -1,3 +0,0 @@
60836-#pragma once
60837-
60838-int test_threads();
60839diff --git a/jemalloc/msvc/test_threads/test_threads_main.cpp b/jemalloc/msvc/test_threads/test_threads_main.cpp
60840deleted file mode 100644
60841index 0a022fb..0000000
60842--- a/jemalloc/msvc/test_threads/test_threads_main.cpp
60843+++ /dev/null
60844@@ -1,11 +0,0 @@
60845-#include "test_threads.h"
60846-#include <future>
60847-#include <functional>
60848-#include <chrono>
60849-
60850-using namespace std::chrono_literals;
60851-
60852-int main(int argc, char** argv) {
60853-  int rc = test_threads();
60854-  return rc;
60855-}
60856diff --git a/jemalloc/run_tests.sh b/jemalloc/run_tests.sh
60857deleted file mode 100755
60858index b434f15..0000000
60859--- a/jemalloc/run_tests.sh
60860+++ /dev/null
60861@@ -1 +0,0 @@
60862-$(dirname "$)")/scripts/gen_run_tests.py | bash
60863diff --git a/jemalloc/scripts/check-formatting.sh b/jemalloc/scripts/check-formatting.sh
60864deleted file mode 100755
60865index 68cafd8..0000000
60866--- a/jemalloc/scripts/check-formatting.sh
60867+++ /dev/null
60868@@ -1,28 +0,0 @@
60869-#!/bin/bash
60870-
60871-# The files that need to be properly formatted.  We'll grow this incrementally
60872-# until it includes all the jemalloc source files (as we convert things over),
60873-# and then just replace it with
60874-#    find -name '*.c' -o -name '*.h' -o -name '*.cpp
60875-FILES=(
60876-)
60877-
60878-if command -v clang-format &> /dev/null; then
60879-  CLANG_FORMAT="clang-format"
60880-elif command -v clang-format-8 &> /dev/null; then
60881-  CLANG_FORMAT="clang-format-8"
60882-else
60883-  echo "Couldn't find clang-format."
60884-fi
60885-
60886-if ! $CLANG_FORMAT -version | grep "version 8\." &> /dev/null; then
60887-  echo "clang-format is the wrong version."
60888-  exit 1
60889-fi
60890-
60891-for file in ${FILES[@]}; do
60892-  if ! cmp --silent $file <($CLANG_FORMAT $file) &> /dev/null; then
60893-    echo "Error: $file is not clang-formatted"
60894-    exit 1
60895-  fi
60896-done
60897diff --git a/jemalloc/scripts/freebsd/before_install.sh b/jemalloc/scripts/freebsd/before_install.sh
60898deleted file mode 100644
60899index f2bee32..0000000
60900--- a/jemalloc/scripts/freebsd/before_install.sh
60901+++ /dev/null
60902@@ -1,3 +0,0 @@
60903-#!/bin/tcsh
60904-
60905-su -m root -c 'pkg install -y git'
60906diff --git a/jemalloc/scripts/freebsd/before_script.sh b/jemalloc/scripts/freebsd/before_script.sh
60907deleted file mode 100644
60908index 29406f6..0000000
60909--- a/jemalloc/scripts/freebsd/before_script.sh
60910+++ /dev/null
60911@@ -1,10 +0,0 @@
60912-#!/bin/tcsh
60913-
60914-autoconf
60915-# We don't perfectly track freebsd stdlib.h definitions.  This is fine when
60916-# we count as a system header, but breaks otherwise, like during these
60917-# tests.
60918-./configure --with-jemalloc-prefix=ci_ ${COMPILER_FLAGS:+ CC="$CC $COMPILER_FLAGS" CXX="$CXX $COMPILER_FLAGS"} $CONFIGURE_FLAGS
60919-JE_NCPUS=`sysctl -n kern.smp.cpus`
60920-gmake -j${JE_NCPUS}
60921-gmake -j${JE_NCPUS} tests
60922diff --git a/jemalloc/scripts/freebsd/script.sh b/jemalloc/scripts/freebsd/script.sh
60923deleted file mode 100644
60924index d9c53a2..0000000
60925--- a/jemalloc/scripts/freebsd/script.sh
60926+++ /dev/null
60927@@ -1,3 +0,0 @@
60928-#!/bin/tcsh
60929-
60930-gmake check
60931diff --git a/jemalloc/scripts/gen_run_tests.py b/jemalloc/scripts/gen_run_tests.py
60932deleted file mode 100755
60933index 7c3075f..0000000
60934--- a/jemalloc/scripts/gen_run_tests.py
60935+++ /dev/null
60936@@ -1,130 +0,0 @@
60937-#!/usr/bin/env python3
60938-
60939-import sys
60940-from itertools import combinations
60941-from os import uname
60942-from multiprocessing import cpu_count
60943-from subprocess import call
60944-
60945-# Later, we want to test extended vaddr support.  Apparently, the "real" way of
60946-# checking this is flaky on OS X.
60947-bits_64 = sys.maxsize > 2**32
60948-
60949-nparallel = cpu_count() * 2
60950-
60951-uname = uname()[0]
60952-
60953-if call("command -v gmake", shell=True) == 0:
60954-    make_cmd = 'gmake'
60955-else:
60956-    make_cmd = 'make'
60957-
60958-def powerset(items):
60959-    result = []
60960-    for i in range(len(items) + 1):
60961-        result += combinations(items, i)
60962-    return result
60963-
60964-possible_compilers = []
60965-for cc, cxx in (['gcc', 'g++'], ['clang', 'clang++']):
60966-    try:
60967-        cmd_ret = call([cc, "-v"])
60968-        if cmd_ret == 0:
60969-            possible_compilers.append((cc, cxx))
60970-    except:
60971-        pass
60972-possible_compiler_opts = [
60973-    '-m32',
60974-]
60975-possible_config_opts = [
60976-    '--enable-debug',
60977-    '--enable-prof',
60978-    '--disable-stats',
60979-    '--enable-opt-safety-checks',
60980-    '--with-lg-page=16',
60981-]
60982-if bits_64:
60983-    possible_config_opts.append('--with-lg-vaddr=56')
60984-
60985-possible_malloc_conf_opts = [
60986-    'tcache:false',
60987-    'dss:primary',
60988-    'percpu_arena:percpu',
60989-    'background_thread:true',
60990-]
60991-
60992-print('set -e')
60993-print('if [ -f Makefile ] ; then %(make_cmd)s relclean ; fi' % {'make_cmd':
60994-    make_cmd})
60995-print('autoconf')
60996-print('rm -rf run_tests.out')
60997-print('mkdir run_tests.out')
60998-print('cd run_tests.out')
60999-
61000-ind = 0
61001-for cc, cxx in possible_compilers:
61002-    for compiler_opts in powerset(possible_compiler_opts):
61003-        for config_opts in powerset(possible_config_opts):
61004-            for malloc_conf_opts in powerset(possible_malloc_conf_opts):
61005-                if cc == 'clang' \
61006-                  and '-m32' in possible_compiler_opts \
61007-                  and '--enable-prof' in config_opts:
61008-                    continue
61009-                config_line = (
61010-                    'EXTRA_CFLAGS=-Werror EXTRA_CXXFLAGS=-Werror '
61011-                    + 'CC="{} {}" '.format(cc, " ".join(compiler_opts))
61012-                    + 'CXX="{} {}" '.format(cxx, " ".join(compiler_opts))
61013-                    + '../../configure '
61014-                    + " ".join(config_opts) + (' --with-malloc-conf=' +
61015-                    ",".join(malloc_conf_opts) if len(malloc_conf_opts) > 0
61016-                    else '')
61017-                )
61018-
61019-                # We don't want to test large vaddr spaces in 32-bit mode.
61020-                if ('-m32' in compiler_opts and '--with-lg-vaddr=56' in
61021-                    config_opts):
61022-                    continue
61023-
61024-                # Per CPU arenas are only supported on Linux.
61025-                linux_supported = ('percpu_arena:percpu' in malloc_conf_opts \
61026-                  or 'background_thread:true' in malloc_conf_opts)
61027-                # Heap profiling and dss are not supported on OS X.
61028-                darwin_unsupported = ('--enable-prof' in config_opts or \
61029-                  'dss:primary' in malloc_conf_opts)
61030-                if (uname == 'Linux' and linux_supported) \
61031-                  or (not linux_supported and (uname != 'Darwin' or \
61032-                  not darwin_unsupported)):
61033-                    print("""cat <<EOF > run_test_%(ind)d.sh
61034-#!/bin/sh
61035-
61036-set -e
61037-
61038-abort() {
61039-    echo "==> Error" >> run_test.log
61040-    echo "Error; see run_tests.out/run_test_%(ind)d.out/run_test.log"
61041-    exit 255 # Special exit code tells xargs to terminate.
61042-}
61043-
61044-# Environment variables are not supported.
61045-run_cmd() {
61046-    echo "==> \$@" >> run_test.log
61047-    \$@ >> run_test.log 2>&1 || abort
61048-}
61049-
61050-echo "=> run_test_%(ind)d: %(config_line)s"
61051-mkdir run_test_%(ind)d.out
61052-cd run_test_%(ind)d.out
61053-
61054-echo "==> %(config_line)s" >> run_test.log
61055-%(config_line)s >> run_test.log 2>&1 || abort
61056-
61057-run_cmd %(make_cmd)s all tests
61058-run_cmd %(make_cmd)s check
61059-run_cmd %(make_cmd)s distclean
61060-EOF
61061-chmod 755 run_test_%(ind)d.sh""" % {'ind': ind, 'config_line': config_line,
61062-      'make_cmd': make_cmd})
61063-                    ind += 1
61064-
61065-print('for i in `seq 0 %(last_ind)d` ; do echo run_test_${i}.sh ; done | xargs'
61066-    ' -P %(nparallel)d -n 1 sh' % {'last_ind': ind-1, 'nparallel': nparallel})
61067diff --git a/jemalloc/scripts/gen_travis.py b/jemalloc/scripts/gen_travis.py
61068deleted file mode 100755
61069index 4366a06..0000000
61070--- a/jemalloc/scripts/gen_travis.py
61071+++ /dev/null
61072@@ -1,327 +0,0 @@
61073-#!/usr/bin/env python3
61074-
61075-from itertools import combinations, chain
61076-from enum import Enum, auto
61077-
61078-
61079-LINUX = 'linux'
61080-OSX = 'osx'
61081-WINDOWS = 'windows'
61082-FREEBSD = 'freebsd'
61083-
61084-
61085-AMD64 = 'amd64'
61086-ARM64 = 'arm64'
61087-PPC64LE = 'ppc64le'
61088-
61089-
61090-TRAVIS_TEMPLATE = """\
61091-# This config file is generated by ./scripts/gen_travis.py.
61092-# Do not edit by hand.
61093-
61094-# We use 'minimal', because 'generic' makes Windows VMs hang at startup. Also
61095-# the software provided by 'generic' is simply not needed for our tests.
61096-# Differences are explained here:
61097-# https://docs.travis-ci.com/user/languages/minimal-and-generic/
61098-language: minimal
61099-dist: focal
61100-
61101-jobs:
61102-  include:
61103-{jobs}
61104-
61105-before_install:
61106-  - |-
61107-    if test -f "./scripts/$TRAVIS_OS_NAME/before_install.sh"; then
61108-      source ./scripts/$TRAVIS_OS_NAME/before_install.sh
61109-    fi
61110-
61111-before_script:
61112-  - |-
61113-    if test -f "./scripts/$TRAVIS_OS_NAME/before_script.sh"; then
61114-      source ./scripts/$TRAVIS_OS_NAME/before_script.sh
61115-    else
61116-      scripts/gen_travis.py > travis_script && diff .travis.yml travis_script
61117-      autoconf
61118-      # If COMPILER_FLAGS are not empty, add them to CC and CXX
61119-      ./configure ${{COMPILER_FLAGS:+ CC="$CC $COMPILER_FLAGS" \
61120-CXX="$CXX $COMPILER_FLAGS"}} $CONFIGURE_FLAGS
61121-      make -j3
61122-      make -j3 tests
61123-    fi
61124-
61125-script:
61126-  - |-
61127-    if test -f "./scripts/$TRAVIS_OS_NAME/script.sh"; then
61128-      source ./scripts/$TRAVIS_OS_NAME/script.sh
61129-    else
61130-      make check
61131-    fi
61132-"""
61133-
61134-
61135-class Option(object):
61136-    class Type:
61137-        COMPILER = auto()
61138-        COMPILER_FLAG = auto()
61139-        CONFIGURE_FLAG = auto()
61140-        MALLOC_CONF = auto()
61141-        FEATURE = auto()
61142-
61143-    def __init__(self, type, value):
61144-        self.type = type
61145-        self.value = value
61146-
61147-    @staticmethod
61148-    def as_compiler(value):
61149-        return Option(Option.Type.COMPILER, value)
61150-
61151-    @staticmethod
61152-    def as_compiler_flag(value):
61153-        return Option(Option.Type.COMPILER_FLAG, value)
61154-
61155-    @staticmethod
61156-    def as_configure_flag(value):
61157-        return Option(Option.Type.CONFIGURE_FLAG, value)
61158-
61159-    @staticmethod
61160-    def as_malloc_conf(value):
61161-        return Option(Option.Type.MALLOC_CONF, value)
61162-
61163-    @staticmethod
61164-    def as_feature(value):
61165-        return Option(Option.Type.FEATURE, value)
61166-
61167-    def __eq__(self, obj):
61168-        return (isinstance(obj, Option) and obj.type == self.type
61169-                and obj.value == self.value)
61170-
61171-
61172-# The 'default' configuration is gcc, on linux, with no compiler or configure
61173-# flags.  We also test with clang, -m32, --enable-debug, --enable-prof,
61174-# --disable-stats, and --with-malloc-conf=tcache:false.  To avoid abusing
61175-# travis though, we don't test all 2**7 = 128 possible combinations of these;
61176-# instead, we only test combinations of up to 2 'unusual' settings, under the
61177-# hope that bugs involving interactions of such settings are rare.
61178-MAX_UNUSUAL_OPTIONS = 2
61179-
61180-
61181-GCC = Option.as_compiler('CC=gcc CXX=g++')
61182-CLANG = Option.as_compiler('CC=clang CXX=clang++')
61183-CL = Option.as_compiler('CC=cl.exe CXX=cl.exe')
61184-
61185-
61186-compilers_unusual = [CLANG,]
61187-
61188-
61189-CROSS_COMPILE_32BIT = Option.as_feature('CROSS_COMPILE_32BIT')
61190-feature_unusuals = [CROSS_COMPILE_32BIT]
61191-
61192-
61193-configure_flag_unusuals = [Option.as_configure_flag(opt) for opt in (
61194-    '--enable-debug',
61195-    '--enable-prof',
61196-    '--disable-stats',
61197-    '--disable-libdl',
61198-    '--enable-opt-safety-checks',
61199-    '--with-lg-page=16',
61200-)]
61201-
61202-
61203-malloc_conf_unusuals = [Option.as_malloc_conf(opt) for opt in (
61204-    'tcache:false',
61205-    'dss:primary',
61206-    'percpu_arena:percpu',
61207-    'background_thread:true',
61208-)]
61209-
61210-
61211-all_unusuals = (compilers_unusual + feature_unusuals
61212-    + configure_flag_unusuals + malloc_conf_unusuals)
61213-
61214-
61215-def get_extra_cflags(os, compiler):
61216-    if os == FREEBSD:
61217-        return []
61218-
61219-    if os == WINDOWS:
61220-        # For non-CL compilers under Windows (for now it's only MinGW-GCC),
61221-        # -fcommon needs to be specified to correctly handle multiple
61222-        # 'malloc_conf' symbols and such, which are declared weak under Linux.
61223-        # Weak symbols don't work with MinGW-GCC.
61224-        if compiler != CL.value:
61225-            return ['-fcommon']
61226-        else:
61227-            return []
61228-
61229-    # We get some spurious errors when -Warray-bounds is enabled.
61230-    extra_cflags = ['-Werror', '-Wno-array-bounds']
61231-    if compiler == CLANG.value or os == OSX:
61232-        extra_cflags += [
61233-            '-Wno-unknown-warning-option',
61234-            '-Wno-ignored-attributes'
61235-        ]
61236-    if os == OSX:
61237-        extra_cflags += [
61238-            '-Wno-deprecated-declarations',
61239-        ]
61240-    return extra_cflags
61241-
61242-
61243-# Formats a job from a combination of flags
61244-def format_job(os, arch, combination):
61245-    compilers = [x.value for x in combination if x.type == Option.Type.COMPILER]
61246-    assert(len(compilers) <= 1)
61247-    compiler_flags = [x.value for x in combination if x.type == Option.Type.COMPILER_FLAG]
61248-    configure_flags = [x.value for x in combination if x.type == Option.Type.CONFIGURE_FLAG]
61249-    malloc_conf = [x.value for x in combination if x.type == Option.Type.MALLOC_CONF]
61250-    features = [x.value for x in combination if x.type == Option.Type.FEATURE]
61251-
61252-    if len(malloc_conf) > 0:
61253-        configure_flags.append('--with-malloc-conf=' + ','.join(malloc_conf))
61254-
61255-    if not compilers:
61256-        compiler = GCC.value
61257-    else:
61258-        compiler = compilers[0]
61259-
61260-    extra_environment_vars = ''
61261-    cross_compile = CROSS_COMPILE_32BIT.value in features
61262-    if os == LINUX and cross_compile:
61263-        compiler_flags.append('-m32')
61264-
61265-    features_str = ' '.join([' {}=yes'.format(feature) for feature in features])
61266-
61267-    stringify = lambda arr, name: ' {}="{}"'.format(name, ' '.join(arr)) if arr else ''
61268-    env_string = '{}{}{}{}{}{}'.format(
61269-            compiler,
61270-            features_str,
61271-            stringify(compiler_flags, 'COMPILER_FLAGS'),
61272-            stringify(configure_flags, 'CONFIGURE_FLAGS'),
61273-            stringify(get_extra_cflags(os, compiler), 'EXTRA_CFLAGS'),
61274-            extra_environment_vars)
61275-
61276-    job = '    - os: {}\n'.format(os)
61277-    job += '      arch: {}\n'.format(arch)
61278-    job += '      env: {}'.format(env_string)
61279-    return job
61280-
61281-
61282-def generate_unusual_combinations(unusuals, max_unusual_opts):
61283-    """
61284-    Generates different combinations of non-standard compilers, compiler flags,
61285-    configure flags and malloc_conf settings.
61286-
61287-    @param max_unusual_opts: Limit of unusual options per combination.
61288-    """
61289-    return chain.from_iterable(
61290-            [combinations(unusuals, i) for i in range(max_unusual_opts + 1)])
61291-
61292-
61293-def included(combination, exclude):
61294-    """
61295-    Checks if the combination of options should be included in the Travis
61296-    testing matrix.
61297-
61298-    @param exclude: A list of options to be avoided.
61299-    """
61300-    return not any(excluded in combination for excluded in exclude)
61301-
61302-
61303-def generate_jobs(os, arch, exclude, max_unusual_opts, unusuals=all_unusuals):
61304-    jobs = []
61305-    for combination in generate_unusual_combinations(unusuals, max_unusual_opts):
61306-        if included(combination, exclude):
61307-            jobs.append(format_job(os, arch, combination))
61308-    return '\n'.join(jobs)
61309-
61310-
61311-def generate_linux(arch):
61312-    os = LINUX
61313-
61314-    # Only generate 2 unusual options for AMD64 to reduce matrix size
61315-    max_unusual_opts = MAX_UNUSUAL_OPTIONS if arch == AMD64 else 1
61316-
61317-    exclude = []
61318-    if arch == PPC64LE:
61319-        # Avoid 32 bit builds and clang on PowerPC
61320-        exclude = (CROSS_COMPILE_32BIT, CLANG,)
61321-
61322-    return generate_jobs(os, arch, exclude, max_unusual_opts)
61323-
61324-
61325-def generate_macos(arch):
61326-    os = OSX
61327-
61328-    max_unusual_opts = 1
61329-
61330-    exclude = ([Option.as_malloc_conf(opt) for opt in (
61331-            'dss:primary',
61332-            'percpu_arena:percpu',
61333-            'background_thread:true')] +
61334-        [Option.as_configure_flag('--enable-prof')] +
61335-        [CLANG,])
61336-
61337-    return generate_jobs(os, arch, exclude, max_unusual_opts)
61338-
61339-
61340-def generate_windows(arch):
61341-    os = WINDOWS
61342-
61343-    max_unusual_opts = 3
61344-    unusuals = (
61345-        Option.as_configure_flag('--enable-debug'),
61346-        CL,
61347-        CROSS_COMPILE_32BIT,
61348-    )
61349-    return generate_jobs(os, arch, (), max_unusual_opts, unusuals)
61350-
61351-
61352-def generate_freebsd(arch):
61353-    os = FREEBSD
61354-
61355-    max_unusual_opts = 4
61356-    unusuals = (
61357-        Option.as_configure_flag('--enable-debug'),
61358-        Option.as_configure_flag('--enable-prof --enable-prof-libunwind'),
61359-        Option.as_configure_flag('--with-lg-page=16 --with-malloc-conf=tcache:false'),
61360-        CROSS_COMPILE_32BIT,
61361-    )
61362-    return generate_jobs(os, arch, (), max_unusual_opts, unusuals)
61363-
61364-
61365-
61366-def get_manual_jobs():
61367-    return """\
61368-    # Development build
61369-    - os: linux
61370-      env: CC=gcc CXX=g++ CONFIGURE_FLAGS="--enable-debug \
61371---disable-cache-oblivious --enable-stats --enable-log --enable-prof" \
61372-EXTRA_CFLAGS="-Werror -Wno-array-bounds"
61373-    # --enable-expermental-smallocx:
61374-    - os: linux
61375-      env: CC=gcc CXX=g++ CONFIGURE_FLAGS="--enable-debug \
61376---enable-experimental-smallocx --enable-stats --enable-prof" \
61377-EXTRA_CFLAGS="-Werror -Wno-array-bounds"
61378-"""
61379-
61380-
61381-def main():
61382-    jobs = '\n'.join((
61383-        generate_windows(AMD64),
61384-
61385-        generate_freebsd(AMD64),
61386-
61387-        generate_linux(AMD64),
61388-        generate_linux(PPC64LE),
61389-
61390-        generate_macos(AMD64),
61391-
61392-        get_manual_jobs(),
61393-    ))
61394-
61395-    print(TRAVIS_TEMPLATE.format(jobs=jobs))
61396-
61397-
61398-if __name__ == '__main__':
61399-    main()
61400diff --git a/jemalloc/scripts/linux/before_install.sh b/jemalloc/scripts/linux/before_install.sh
61401deleted file mode 100644
61402index 6741746..0000000
61403--- a/jemalloc/scripts/linux/before_install.sh
61404+++ /dev/null
61405@@ -1,13 +0,0 @@
61406-#!/bin/bash
61407-
61408-set -ev
61409-
61410-if [[ "$TRAVIS_OS_NAME" != "linux" ]]; then
61411-    echo "Incorrect \$TRAVIS_OS_NAME: expected linux, got $TRAVIS_OS_NAME"
61412-    exit 1
61413-fi
61414-
61415-if [[ "$CROSS_COMPILE_32BIT" == "yes" ]]; then
61416-    sudo apt-get update
61417-    sudo apt-get -y install gcc-multilib g++-multilib
61418-fi
61419diff --git a/jemalloc/scripts/windows/before_install.sh b/jemalloc/scripts/windows/before_install.sh
61420deleted file mode 100644
61421index 2740c45..0000000
61422--- a/jemalloc/scripts/windows/before_install.sh
61423+++ /dev/null
61424@@ -1,83 +0,0 @@
61425-#!/bin/bash
61426-
61427-set -e
61428-
61429-# The purpose of this script is to install build dependencies and set
61430-# $build_env to a function that sets appropriate environment variables,
61431-# to enable (mingw32|mingw64) environment if we want to compile with gcc, or
61432-# (mingw32|mingw64) + vcvarsall.bat if we want to compile with cl.exe
61433-
61434-if [[ "$TRAVIS_OS_NAME" != "windows" ]]; then
61435-    echo "Incorrect \$TRAVIS_OS_NAME: expected windows, got $TRAVIS_OS_NAME"
61436-    exit 1
61437-fi
61438-
61439-[[ ! -f C:/tools/msys64/msys2_shell.cmd ]] && rm -rf C:/tools/msys64
61440-choco uninstall -y mingw
61441-choco upgrade --no-progress -y msys2
61442-
61443-msys_shell_cmd="cmd //C RefreshEnv.cmd && set MSYS=winsymlinks:nativestrict && C:\\tools\\msys64\\msys2_shell.cmd"
61444-
61445-msys2() { $msys_shell_cmd -defterm -no-start -msys2 -c "$*"; }
61446-mingw32() { $msys_shell_cmd -defterm -no-start -mingw32 -c "$*"; }
61447-mingw64() { $msys_shell_cmd -defterm -no-start -mingw64 -c "$*"; }
61448-
61449-if [[ "$CROSS_COMPILE_32BIT" == "yes" ]]; then
61450-    mingw=mingw32
61451-    mingw_gcc_package_arch=i686
61452-else
61453-    mingw=mingw64
61454-    mingw_gcc_package_arch=x86_64
61455-fi
61456-
61457-if [[ "$CC" == *"gcc"* ]]; then
61458-    $mingw pacman -S --noconfirm --needed \
61459-        autotools \
61460-        git \
61461-        mingw-w64-${mingw_gcc_package_arch}-make \
61462-	    mingw-w64-${mingw_gcc_package_arch}-gcc \
61463-	    mingw-w64-${mingw_gcc_package_arch}-binutils
61464-    build_env=$mingw
61465-elif [[ "$CC" == *"cl"* ]]; then
61466-    $mingw pacman -S --noconfirm --needed \
61467-        autotools \
61468-	    git \
61469-	    mingw-w64-${mingw_gcc_package_arch}-make \
61470-	    mingw-w64-${mingw_gcc_package_arch}-binutils
61471-
61472-    # In order to use MSVC compiler (cl.exe), we need to correctly set some environment
61473-    # variables, namely PATH, INCLUDE, LIB and LIBPATH. The correct values of these
61474-    # variables are set by a batch script "vcvarsall.bat". The code below generates
61475-    # a batch script that calls "vcvarsall.bat" and prints the environment variables.
61476-    #
61477-    # Then, those environment variables are transformed from cmd to bash format and put
61478-    # into a script $apply_vsenv. If cl.exe needs to be used from bash, one can
61479-    # 'source $apply_vsenv' and it will apply the environment variables needed for cl.exe
61480-    # to be located and function correctly.
61481-    #
61482-    # At last, a function "mingw_with_msvc_vars" is generated which forwards user input
61483-    # into a correct mingw (32 or 64) subshell that automatically performs 'source $apply_vsenv',
61484-    # making it possible for autotools to discover and use cl.exe.
61485-    vcvarsall="vcvarsall.tmp.bat"
61486-    echo "@echo off" > $vcvarsall
61487-    echo "call \"c:\Program Files (x86)\Microsoft Visual Studio 14.0\VC\\\vcvarsall.bat\" $USE_MSVC" >> $vcvarsall
61488-    echo "set" >> $vcvarsall
61489-
61490-    apply_vsenv="./apply_vsenv.sh"
61491-    cmd //C $vcvarsall | grep -E "^PATH=" | sed -n -e 's/\(.*\)=\(.*\)/export \1=$PATH:"\2"/g' \
61492-        -e 's/\([a-zA-Z]\):[\\\/]/\/\1\//g' \
61493-        -e 's/\\/\//g' \
61494-        -e 's/;\//:\//gp' > $apply_vsenv
61495-    cmd //C $vcvarsall | grep -E "^(INCLUDE|LIB|LIBPATH)=" | sed -n -e 's/\(.*\)=\(.*\)/export \1="\2"/gp' >> $apply_vsenv
61496-
61497-    cat $apply_vsenv
61498-    mingw_with_msvc_vars() { $msys_shell_cmd -defterm -no-start -$mingw -c "source $apply_vsenv && ""$*"; }
61499-    build_env=mingw_with_msvc_vars
61500-
61501-    rm -f $vcvarsall
61502-else
61503-    echo "Unknown C compiler: $CC"
61504-    exit 1
61505-fi
61506-
61507-echo "Build environment function: $build_env"
61508diff --git a/jemalloc/scripts/windows/before_script.sh b/jemalloc/scripts/windows/before_script.sh
61509deleted file mode 100644
61510index 9d30aba..0000000
61511--- a/jemalloc/scripts/windows/before_script.sh
61512+++ /dev/null
61513@@ -1,20 +0,0 @@
61514-#!/bin/bash
61515-
61516-set -e
61517-
61518-if [[ "$TRAVIS_OS_NAME" != "windows" ]]; then
61519-    echo "Incorrect \$TRAVIS_OS_NAME: expected windows, got $TRAVIS_OS_NAME"
61520-    exit 1
61521-fi
61522-
61523-$build_env autoconf
61524-$build_env ./configure $CONFIGURE_FLAGS
61525-# mingw32-make simply means "make", unrelated to mingw32 vs mingw64.
61526-# Simply disregard the prefix and treat is as "make".
61527-$build_env mingw32-make -j3
61528-# At the moment, it's impossible to make tests in parallel,
61529-# seemingly due to concurrent writes to '.pdb' file. I don't know why
61530-# that happens, because we explicitly supply '/Fs' to the compiler.
61531-# Until we figure out how to fix it, we should build tests sequentially
61532-# on Windows.
61533-$build_env mingw32-make tests
61534diff --git a/jemalloc/scripts/windows/script.sh b/jemalloc/scripts/windows/script.sh
61535deleted file mode 100644
61536index 3a27f70..0000000
61537--- a/jemalloc/scripts/windows/script.sh
61538+++ /dev/null
61539@@ -1,10 +0,0 @@
61540-#!/bin/bash
61541-
61542-set -e
61543-
61544-if [[ "$TRAVIS_OS_NAME" != "windows" ]]; then
61545-    echo "Incorrect \$TRAVIS_OS_NAME: expected windows, got $TRAVIS_OS_NAME"
61546-    exit 1
61547-fi
61548-
61549-$build_env mingw32-make -k check
61550diff --git a/jemalloc/src/arena.c b/jemalloc/src/arena.c
61551deleted file mode 100644
61552index 857b27c..0000000
61553--- a/jemalloc/src/arena.c
61554+++ /dev/null
61555@@ -1,1891 +0,0 @@
61556-#include "jemalloc/internal/jemalloc_preamble.h"
61557-#include "jemalloc/internal/jemalloc_internal_includes.h"
61558-
61559-#include "jemalloc/internal/assert.h"
61560-#include "jemalloc/internal/decay.h"
61561-#include "jemalloc/internal/ehooks.h"
61562-#include "jemalloc/internal/extent_dss.h"
61563-#include "jemalloc/internal/extent_mmap.h"
61564-#include "jemalloc/internal/san.h"
61565-#include "jemalloc/internal/mutex.h"
61566-#include "jemalloc/internal/rtree.h"
61567-#include "jemalloc/internal/safety_check.h"
61568-#include "jemalloc/internal/util.h"
61569-
61570-JEMALLOC_DIAGNOSTIC_DISABLE_SPURIOUS
61571-
61572-/******************************************************************************/
61573-/* Data. */
61574-
61575-/*
61576- * Define names for both unininitialized and initialized phases, so that
61577- * options and mallctl processing are straightforward.
61578- */
61579-const char *percpu_arena_mode_names[] = {
61580-	"percpu",
61581-	"phycpu",
61582-	"disabled",
61583-	"percpu",
61584-	"phycpu"
61585-};
61586-percpu_arena_mode_t opt_percpu_arena = PERCPU_ARENA_DEFAULT;
61587-
61588-ssize_t opt_dirty_decay_ms = DIRTY_DECAY_MS_DEFAULT;
61589-ssize_t opt_muzzy_decay_ms = MUZZY_DECAY_MS_DEFAULT;
61590-
61591-static atomic_zd_t dirty_decay_ms_default;
61592-static atomic_zd_t muzzy_decay_ms_default;
61593-
61594-emap_t arena_emap_global;
61595-pa_central_t arena_pa_central_global;
61596-
61597-div_info_t arena_binind_div_info[SC_NBINS];
61598-
61599-size_t opt_oversize_threshold = OVERSIZE_THRESHOLD_DEFAULT;
61600-size_t oversize_threshold = OVERSIZE_THRESHOLD_DEFAULT;
61601-
61602-uint32_t arena_bin_offsets[SC_NBINS];
61603-static unsigned nbins_total;
61604-
61605-static unsigned huge_arena_ind;
61606-
61607-const arena_config_t arena_config_default = {
61608-	/* .extent_hooks = */ (extent_hooks_t *)&ehooks_default_extent_hooks,
61609-	/* .metadata_use_hooks = */ true,
61610-};
61611-
61612-/******************************************************************************/
61613-/*
61614- * Function prototypes for static functions that are referenced prior to
61615- * definition.
61616- */
61617-
61618-static bool arena_decay_dirty(tsdn_t *tsdn, arena_t *arena,
61619-    bool is_background_thread, bool all);
61620-static void arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena, edata_t *slab,
61621-    bin_t *bin);
61622-static void
61623-arena_maybe_do_deferred_work(tsdn_t *tsdn, arena_t *arena, decay_t *decay,
61624-    size_t npages_new);
61625-
61626-/******************************************************************************/
61627-
61628-void
61629-arena_basic_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
61630-    const char **dss, ssize_t *dirty_decay_ms, ssize_t *muzzy_decay_ms,
61631-    size_t *nactive, size_t *ndirty, size_t *nmuzzy) {
61632-	*nthreads += arena_nthreads_get(arena, false);
61633-	*dss = dss_prec_names[arena_dss_prec_get(arena)];
61634-	*dirty_decay_ms = arena_decay_ms_get(arena, extent_state_dirty);
61635-	*muzzy_decay_ms = arena_decay_ms_get(arena, extent_state_muzzy);
61636-	pa_shard_basic_stats_merge(&arena->pa_shard, nactive, ndirty, nmuzzy);
61637-}
61638-
61639-void
61640-arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
61641-    const char **dss, ssize_t *dirty_decay_ms, ssize_t *muzzy_decay_ms,
61642-    size_t *nactive, size_t *ndirty, size_t *nmuzzy, arena_stats_t *astats,
61643-    bin_stats_data_t *bstats, arena_stats_large_t *lstats,
61644-    pac_estats_t *estats, hpa_shard_stats_t *hpastats, sec_stats_t *secstats) {
61645-	cassert(config_stats);
61646-
61647-	arena_basic_stats_merge(tsdn, arena, nthreads, dss, dirty_decay_ms,
61648-	    muzzy_decay_ms, nactive, ndirty, nmuzzy);
61649-
61650-	size_t base_allocated, base_resident, base_mapped, metadata_thp;
61651-	base_stats_get(tsdn, arena->base, &base_allocated, &base_resident,
61652-	    &base_mapped, &metadata_thp);
61653-	size_t pac_mapped_sz = pac_mapped(&arena->pa_shard.pac);
61654-	astats->mapped += base_mapped + pac_mapped_sz;
61655-	astats->resident += base_resident;
61656-
61657-	LOCKEDINT_MTX_LOCK(tsdn, arena->stats.mtx);
61658-
61659-	astats->base += base_allocated;
61660-	atomic_load_add_store_zu(&astats->internal, arena_internal_get(arena));
61661-	astats->metadata_thp += metadata_thp;
61662-
61663-	for (szind_t i = 0; i < SC_NSIZES - SC_NBINS; i++) {
61664-		uint64_t nmalloc = locked_read_u64(tsdn,
61665-		    LOCKEDINT_MTX(arena->stats.mtx),
61666-		    &arena->stats.lstats[i].nmalloc);
61667-		locked_inc_u64_unsynchronized(&lstats[i].nmalloc, nmalloc);
61668-		astats->nmalloc_large += nmalloc;
61669-
61670-		uint64_t ndalloc = locked_read_u64(tsdn,
61671-		    LOCKEDINT_MTX(arena->stats.mtx),
61672-		    &arena->stats.lstats[i].ndalloc);
61673-		locked_inc_u64_unsynchronized(&lstats[i].ndalloc, ndalloc);
61674-		astats->ndalloc_large += ndalloc;
61675-
61676-		uint64_t nrequests = locked_read_u64(tsdn,
61677-		    LOCKEDINT_MTX(arena->stats.mtx),
61678-		    &arena->stats.lstats[i].nrequests);
61679-		locked_inc_u64_unsynchronized(&lstats[i].nrequests,
61680-		    nmalloc + nrequests);
61681-		astats->nrequests_large += nmalloc + nrequests;
61682-
61683-		/* nfill == nmalloc for large currently. */
61684-		locked_inc_u64_unsynchronized(&lstats[i].nfills, nmalloc);
61685-		astats->nfills_large += nmalloc;
61686-
61687-		uint64_t nflush = locked_read_u64(tsdn,
61688-		    LOCKEDINT_MTX(arena->stats.mtx),
61689-		    &arena->stats.lstats[i].nflushes);
61690-		locked_inc_u64_unsynchronized(&lstats[i].nflushes, nflush);
61691-		astats->nflushes_large += nflush;
61692-
61693-		assert(nmalloc >= ndalloc);
61694-		assert(nmalloc - ndalloc <= SIZE_T_MAX);
61695-		size_t curlextents = (size_t)(nmalloc - ndalloc);
61696-		lstats[i].curlextents += curlextents;
61697-		astats->allocated_large +=
61698-		    curlextents * sz_index2size(SC_NBINS + i);
61699-	}
61700-
61701-	pa_shard_stats_merge(tsdn, &arena->pa_shard, &astats->pa_shard_stats,
61702-	    estats, hpastats, secstats, &astats->resident);
61703-
61704-	LOCKEDINT_MTX_UNLOCK(tsdn, arena->stats.mtx);
61705-
61706-	/* Currently cached bytes and sanitizer-stashed bytes in tcache. */
61707-	astats->tcache_bytes = 0;
61708-	astats->tcache_stashed_bytes = 0;
61709-	malloc_mutex_lock(tsdn, &arena->tcache_ql_mtx);
61710-	cache_bin_array_descriptor_t *descriptor;
61711-	ql_foreach(descriptor, &arena->cache_bin_array_descriptor_ql, link) {
61712-		for (szind_t i = 0; i < nhbins; i++) {
61713-			cache_bin_t *cache_bin = &descriptor->bins[i];
61714-			cache_bin_sz_t ncached, nstashed;
61715-			cache_bin_nitems_get_remote(cache_bin,
61716-			    &tcache_bin_info[i], &ncached, &nstashed);
61717-
61718-			astats->tcache_bytes += ncached * sz_index2size(i);
61719-			astats->tcache_stashed_bytes += nstashed *
61720-			    sz_index2size(i);
61721-		}
61722-	}
61723-	malloc_mutex_prof_read(tsdn,
61724-	    &astats->mutex_prof_data[arena_prof_mutex_tcache_list],
61725-	    &arena->tcache_ql_mtx);
61726-	malloc_mutex_unlock(tsdn, &arena->tcache_ql_mtx);
61727-
61728-#define READ_ARENA_MUTEX_PROF_DATA(mtx, ind)				\
61729-    malloc_mutex_lock(tsdn, &arena->mtx);				\
61730-    malloc_mutex_prof_read(tsdn, &astats->mutex_prof_data[ind],		\
61731-        &arena->mtx);							\
61732-    malloc_mutex_unlock(tsdn, &arena->mtx);
61733-
61734-	/* Gather per arena mutex profiling data. */
61735-	READ_ARENA_MUTEX_PROF_DATA(large_mtx, arena_prof_mutex_large);
61736-	READ_ARENA_MUTEX_PROF_DATA(base->mtx,
61737-	    arena_prof_mutex_base);
61738-#undef READ_ARENA_MUTEX_PROF_DATA
61739-	pa_shard_mtx_stats_read(tsdn, &arena->pa_shard,
61740-	    astats->mutex_prof_data);
61741-
61742-	nstime_copy(&astats->uptime, &arena->create_time);
61743-	nstime_update(&astats->uptime);
61744-	nstime_subtract(&astats->uptime, &arena->create_time);
61745-
61746-	for (szind_t i = 0; i < SC_NBINS; i++) {
61747-		for (unsigned j = 0; j < bin_infos[i].n_shards; j++) {
61748-			bin_stats_merge(tsdn, &bstats[i],
61749-			    arena_get_bin(arena, i, j));
61750-		}
61751-	}
61752-}
61753-
61754-static void
61755-arena_background_thread_inactivity_check(tsdn_t *tsdn, arena_t *arena,
61756-    bool is_background_thread) {
61757-	if (!background_thread_enabled() || is_background_thread) {
61758-		return;
61759-	}
61760-	background_thread_info_t *info =
61761-	    arena_background_thread_info_get(arena);
61762-	if (background_thread_indefinite_sleep(info)) {
61763-		arena_maybe_do_deferred_work(tsdn, arena,
61764-		    &arena->pa_shard.pac.decay_dirty, 0);
61765-	}
61766-}
61767-
61768-/*
61769- * React to deferred work generated by a PAI function.
61770- */
61771-void arena_handle_deferred_work(tsdn_t *tsdn, arena_t *arena) {
61772-	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
61773-	    WITNESS_RANK_CORE, 0);
61774-
61775-	if (decay_immediately(&arena->pa_shard.pac.decay_dirty)) {
61776-		arena_decay_dirty(tsdn, arena, false, true);
61777-	}
61778-	arena_background_thread_inactivity_check(tsdn, arena, false);
61779-}
61780-
61781-static void *
61782-arena_slab_reg_alloc(edata_t *slab, const bin_info_t *bin_info) {
61783-	void *ret;
61784-	slab_data_t *slab_data = edata_slab_data_get(slab);
61785-	size_t regind;
61786-
61787-	assert(edata_nfree_get(slab) > 0);
61788-	assert(!bitmap_full(slab_data->bitmap, &bin_info->bitmap_info));
61789-
61790-	regind = bitmap_sfu(slab_data->bitmap, &bin_info->bitmap_info);
61791-	ret = (void *)((uintptr_t)edata_addr_get(slab) +
61792-	    (uintptr_t)(bin_info->reg_size * regind));
61793-	edata_nfree_dec(slab);
61794-	return ret;
61795-}
61796-
61797-static void
61798-arena_slab_reg_alloc_batch(edata_t *slab, const bin_info_t *bin_info,
61799-			   unsigned cnt, void** ptrs) {
61800-	slab_data_t *slab_data = edata_slab_data_get(slab);
61801-
61802-	assert(edata_nfree_get(slab) >= cnt);
61803-	assert(!bitmap_full(slab_data->bitmap, &bin_info->bitmap_info));
61804-
61805-#if (! defined JEMALLOC_INTERNAL_POPCOUNTL) || (defined BITMAP_USE_TREE)
61806-	for (unsigned i = 0; i < cnt; i++) {
61807-		size_t regind = bitmap_sfu(slab_data->bitmap,
61808-					   &bin_info->bitmap_info);
61809-		*(ptrs + i) = (void *)((uintptr_t)edata_addr_get(slab) +
61810-		    (uintptr_t)(bin_info->reg_size * regind));
61811-	}
61812-#else
61813-	unsigned group = 0;
61814-	bitmap_t g = slab_data->bitmap[group];
61815-	unsigned i = 0;
61816-	while (i < cnt) {
61817-		while (g == 0) {
61818-			g = slab_data->bitmap[++group];
61819-		}
61820-		size_t shift = group << LG_BITMAP_GROUP_NBITS;
61821-		size_t pop = popcount_lu(g);
61822-		if (pop > (cnt - i)) {
61823-			pop = cnt - i;
61824-		}
61825-
61826-		/*
61827-		 * Load from memory locations only once, outside the
61828-		 * hot loop below.
61829-		 */
61830-		uintptr_t base = (uintptr_t)edata_addr_get(slab);
61831-		uintptr_t regsize = (uintptr_t)bin_info->reg_size;
61832-		while (pop--) {
61833-			size_t bit = cfs_lu(&g);
61834-			size_t regind = shift + bit;
61835-			*(ptrs + i) = (void *)(base + regsize * regind);
61836-
61837-			i++;
61838-		}
61839-		slab_data->bitmap[group] = g;
61840-	}
61841-#endif
61842-	edata_nfree_sub(slab, cnt);
61843-}
61844-
61845-static void
61846-arena_large_malloc_stats_update(tsdn_t *tsdn, arena_t *arena, size_t usize) {
61847-	szind_t index, hindex;
61848-
61849-	cassert(config_stats);
61850-
61851-	if (usize < SC_LARGE_MINCLASS) {
61852-		usize = SC_LARGE_MINCLASS;
61853-	}
61854-	index = sz_size2index(usize);
61855-	hindex = (index >= SC_NBINS) ? index - SC_NBINS : 0;
61856-
61857-	locked_inc_u64(tsdn, LOCKEDINT_MTX(arena->stats.mtx),
61858-	    &arena->stats.lstats[hindex].nmalloc, 1);
61859-}
61860-
61861-static void
61862-arena_large_dalloc_stats_update(tsdn_t *tsdn, arena_t *arena, size_t usize) {
61863-	szind_t index, hindex;
61864-
61865-	cassert(config_stats);
61866-
61867-	if (usize < SC_LARGE_MINCLASS) {
61868-		usize = SC_LARGE_MINCLASS;
61869-	}
61870-	index = sz_size2index(usize);
61871-	hindex = (index >= SC_NBINS) ? index - SC_NBINS : 0;
61872-
61873-	locked_inc_u64(tsdn, LOCKEDINT_MTX(arena->stats.mtx),
61874-	    &arena->stats.lstats[hindex].ndalloc, 1);
61875-}
61876-
61877-static void
61878-arena_large_ralloc_stats_update(tsdn_t *tsdn, arena_t *arena, size_t oldusize,
61879-    size_t usize) {
61880-	arena_large_malloc_stats_update(tsdn, arena, usize);
61881-	arena_large_dalloc_stats_update(tsdn, arena, oldusize);
61882-}
61883-
61884-edata_t *
61885-arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize,
61886-    size_t alignment, bool zero) {
61887-	bool deferred_work_generated = false;
61888-	szind_t szind = sz_size2index(usize);
61889-	size_t esize = usize + sz_large_pad;
61890-
61891-	bool guarded = san_large_extent_decide_guard(tsdn,
61892-	    arena_get_ehooks(arena), esize, alignment);
61893-	edata_t *edata = pa_alloc(tsdn, &arena->pa_shard, esize, alignment,
61894-	    /* slab */ false, szind, zero, guarded, &deferred_work_generated);
61895-	assert(deferred_work_generated == false);
61896-
61897-	if (edata != NULL) {
61898-		if (config_stats) {
61899-			LOCKEDINT_MTX_LOCK(tsdn, arena->stats.mtx);
61900-			arena_large_malloc_stats_update(tsdn, arena, usize);
61901-			LOCKEDINT_MTX_UNLOCK(tsdn, arena->stats.mtx);
61902-		}
61903-	}
61904-
61905-	if (edata != NULL && sz_large_pad != 0) {
61906-		arena_cache_oblivious_randomize(tsdn, arena, edata, alignment);
61907-	}
61908-
61909-	return edata;
61910-}
61911-
61912-void
61913-arena_extent_dalloc_large_prep(tsdn_t *tsdn, arena_t *arena, edata_t *edata) {
61914-	if (config_stats) {
61915-		LOCKEDINT_MTX_LOCK(tsdn, arena->stats.mtx);
61916-		arena_large_dalloc_stats_update(tsdn, arena,
61917-		    edata_usize_get(edata));
61918-		LOCKEDINT_MTX_UNLOCK(tsdn, arena->stats.mtx);
61919-	}
61920-}
61921-
61922-void
61923-arena_extent_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena, edata_t *edata,
61924-    size_t oldusize) {
61925-	size_t usize = edata_usize_get(edata);
61926-
61927-	if (config_stats) {
61928-		LOCKEDINT_MTX_LOCK(tsdn, arena->stats.mtx);
61929-		arena_large_ralloc_stats_update(tsdn, arena, oldusize, usize);
61930-		LOCKEDINT_MTX_UNLOCK(tsdn, arena->stats.mtx);
61931-	}
61932-}
61933-
61934-void
61935-arena_extent_ralloc_large_expand(tsdn_t *tsdn, arena_t *arena, edata_t *edata,
61936-    size_t oldusize) {
61937-	size_t usize = edata_usize_get(edata);
61938-
61939-	if (config_stats) {
61940-		LOCKEDINT_MTX_LOCK(tsdn, arena->stats.mtx);
61941-		arena_large_ralloc_stats_update(tsdn, arena, oldusize, usize);
61942-		LOCKEDINT_MTX_UNLOCK(tsdn, arena->stats.mtx);
61943-	}
61944-}
61945-
61946-/*
61947- * In situations where we're not forcing a decay (i.e. because the user
61948- * specifically requested it), should we purge ourselves, or wait for the
61949- * background thread to get to it.
61950- */
61951-static pac_purge_eagerness_t
61952-arena_decide_unforced_purge_eagerness(bool is_background_thread) {
61953-	if (is_background_thread) {
61954-		return PAC_PURGE_ALWAYS;
61955-	} else if (!is_background_thread && background_thread_enabled()) {
61956-		return PAC_PURGE_NEVER;
61957-	} else {
61958-		return PAC_PURGE_ON_EPOCH_ADVANCE;
61959-	}
61960-}
61961-
61962-bool
61963-arena_decay_ms_set(tsdn_t *tsdn, arena_t *arena, extent_state_t state,
61964-    ssize_t decay_ms) {
61965-	pac_purge_eagerness_t eagerness = arena_decide_unforced_purge_eagerness(
61966-	    /* is_background_thread */ false);
61967-	return pa_decay_ms_set(tsdn, &arena->pa_shard, state, decay_ms,
61968-	    eagerness);
61969-}
61970-
61971-ssize_t
61972-arena_decay_ms_get(arena_t *arena, extent_state_t state) {
61973-	return pa_decay_ms_get(&arena->pa_shard, state);
61974-}
61975-
61976-static bool
61977-arena_decay_impl(tsdn_t *tsdn, arena_t *arena, decay_t *decay,
61978-    pac_decay_stats_t *decay_stats, ecache_t *ecache,
61979-    bool is_background_thread, bool all) {
61980-	if (all) {
61981-		malloc_mutex_lock(tsdn, &decay->mtx);
61982-		pac_decay_all(tsdn, &arena->pa_shard.pac, decay, decay_stats,
61983-		    ecache, /* fully_decay */ all);
61984-		malloc_mutex_unlock(tsdn, &decay->mtx);
61985-		return false;
61986-	}
61987-
61988-	if (malloc_mutex_trylock(tsdn, &decay->mtx)) {
61989-		/* No need to wait if another thread is in progress. */
61990-		return true;
61991-	}
61992-	pac_purge_eagerness_t eagerness =
61993-	    arena_decide_unforced_purge_eagerness(is_background_thread);
61994-	bool epoch_advanced = pac_maybe_decay_purge(tsdn, &arena->pa_shard.pac,
61995-	    decay, decay_stats, ecache, eagerness);
61996-	size_t npages_new;
61997-	if (epoch_advanced) {
61998-		/* Backlog is updated on epoch advance. */
61999-		npages_new = decay_epoch_npages_delta(decay);
62000-	}
62001-	malloc_mutex_unlock(tsdn, &decay->mtx);
62002-
62003-	if (have_background_thread && background_thread_enabled() &&
62004-	    epoch_advanced && !is_background_thread) {
62005-		arena_maybe_do_deferred_work(tsdn, arena, decay, npages_new);
62006-	}
62007-
62008-	return false;
62009-}
62010-
62011-static bool
62012-arena_decay_dirty(tsdn_t *tsdn, arena_t *arena, bool is_background_thread,
62013-    bool all) {
62014-	return arena_decay_impl(tsdn, arena, &arena->pa_shard.pac.decay_dirty,
62015-	    &arena->pa_shard.pac.stats->decay_dirty,
62016-	    &arena->pa_shard.pac.ecache_dirty, is_background_thread, all);
62017-}
62018-
62019-static bool
62020-arena_decay_muzzy(tsdn_t *tsdn, arena_t *arena, bool is_background_thread,
62021-    bool all) {
62022-	if (pa_shard_dont_decay_muzzy(&arena->pa_shard)) {
62023-		return false;
62024-	}
62025-	return arena_decay_impl(tsdn, arena, &arena->pa_shard.pac.decay_muzzy,
62026-	    &arena->pa_shard.pac.stats->decay_muzzy,
62027-	    &arena->pa_shard.pac.ecache_muzzy, is_background_thread, all);
62028-}
62029-
62030-void
62031-arena_decay(tsdn_t *tsdn, arena_t *arena, bool is_background_thread, bool all) {
62032-	if (all) {
62033-		/*
62034-		 * We should take a purge of "all" to mean "save as much memory
62035-		 * as possible", including flushing any caches (for situations
62036-		 * like thread death, or manual purge calls).
62037-		 */
62038-		sec_flush(tsdn, &arena->pa_shard.hpa_sec);
62039-	}
62040-	if (arena_decay_dirty(tsdn, arena, is_background_thread, all)) {
62041-		return;
62042-	}
62043-	arena_decay_muzzy(tsdn, arena, is_background_thread, all);
62044-}
62045-
62046-static bool
62047-arena_should_decay_early(tsdn_t *tsdn, arena_t *arena, decay_t *decay,
62048-    background_thread_info_t *info, nstime_t *remaining_sleep,
62049-    size_t npages_new) {
62050-	malloc_mutex_assert_owner(tsdn, &info->mtx);
62051-
62052-	if (malloc_mutex_trylock(tsdn, &decay->mtx)) {
62053-		return false;
62054-	}
62055-
62056-	if (!decay_gradually(decay)) {
62057-		malloc_mutex_unlock(tsdn, &decay->mtx);
62058-		return false;
62059-	}
62060-
62061-	nstime_init(remaining_sleep, background_thread_wakeup_time_get(info));
62062-	if (nstime_compare(remaining_sleep, &decay->epoch) <= 0) {
62063-		malloc_mutex_unlock(tsdn, &decay->mtx);
62064-		return false;
62065-	}
62066-	nstime_subtract(remaining_sleep, &decay->epoch);
62067-	if (npages_new > 0) {
62068-		uint64_t npurge_new = decay_npages_purge_in(decay,
62069-		    remaining_sleep, npages_new);
62070-		info->npages_to_purge_new += npurge_new;
62071-	}
62072-	malloc_mutex_unlock(tsdn, &decay->mtx);
62073-	return info->npages_to_purge_new >
62074-	    ARENA_DEFERRED_PURGE_NPAGES_THRESHOLD;
62075-}
62076-
62077-/*
62078- * Check if deferred work needs to be done sooner than planned.
62079- * For decay we might want to wake up earlier because of an influx of dirty
62080- * pages. Rather than waiting for previously estimated time, we proactively
62081- * purge those pages.
62082- * If background thread sleeps indefinitely, always wake up because some
62083- * deferred work has been generated.
62084- */
62085-static void
62086-arena_maybe_do_deferred_work(tsdn_t *tsdn, arena_t *arena, decay_t *decay,
62087-    size_t npages_new) {
62088-	background_thread_info_t *info = arena_background_thread_info_get(
62089-	    arena);
62090-	if (malloc_mutex_trylock(tsdn, &info->mtx)) {
62091-		/*
62092-		 * Background thread may hold the mutex for a long period of
62093-		 * time.  We'd like to avoid the variance on application
62094-		 * threads.  So keep this non-blocking, and leave the work to a
62095-		 * future epoch.
62096-		 */
62097-		return;
62098-	}
62099-	if (!background_thread_is_started(info)) {
62100-		goto label_done;
62101-	}
62102-
62103-	nstime_t remaining_sleep;
62104-	if (background_thread_indefinite_sleep(info)) {
62105-		background_thread_wakeup_early(info, NULL);
62106-	} else if (arena_should_decay_early(tsdn, arena, decay, info,
62107-	    &remaining_sleep, npages_new)) {
62108-		info->npages_to_purge_new = 0;
62109-		background_thread_wakeup_early(info, &remaining_sleep);
62110-	}
62111-label_done:
62112-	malloc_mutex_unlock(tsdn, &info->mtx);
62113-}
62114-
62115-/* Called from background threads. */
62116-void
62117-arena_do_deferred_work(tsdn_t *tsdn, arena_t *arena) {
62118-	arena_decay(tsdn, arena, true, false);
62119-	pa_shard_do_deferred_work(tsdn, &arena->pa_shard);
62120-}
62121-
62122-void
62123-arena_slab_dalloc(tsdn_t *tsdn, arena_t *arena, edata_t *slab) {
62124-	bool deferred_work_generated = false;
62125-	pa_dalloc(tsdn, &arena->pa_shard, slab, &deferred_work_generated);
62126-	if (deferred_work_generated) {
62127-		arena_handle_deferred_work(tsdn, arena);
62128-	}
62129-}
62130-
62131-static void
62132-arena_bin_slabs_nonfull_insert(bin_t *bin, edata_t *slab) {
62133-	assert(edata_nfree_get(slab) > 0);
62134-	edata_heap_insert(&bin->slabs_nonfull, slab);
62135-	if (config_stats) {
62136-		bin->stats.nonfull_slabs++;
62137-	}
62138-}
62139-
62140-static void
62141-arena_bin_slabs_nonfull_remove(bin_t *bin, edata_t *slab) {
62142-	edata_heap_remove(&bin->slabs_nonfull, slab);
62143-	if (config_stats) {
62144-		bin->stats.nonfull_slabs--;
62145-	}
62146-}
62147-
62148-static edata_t *
62149-arena_bin_slabs_nonfull_tryget(bin_t *bin) {
62150-	edata_t *slab = edata_heap_remove_first(&bin->slabs_nonfull);
62151-	if (slab == NULL) {
62152-		return NULL;
62153-	}
62154-	if (config_stats) {
62155-		bin->stats.reslabs++;
62156-		bin->stats.nonfull_slabs--;
62157-	}
62158-	return slab;
62159-}
62160-
62161-static void
62162-arena_bin_slabs_full_insert(arena_t *arena, bin_t *bin, edata_t *slab) {
62163-	assert(edata_nfree_get(slab) == 0);
62164-	/*
62165-	 *  Tracking extents is required by arena_reset, which is not allowed
62166-	 *  for auto arenas.  Bypass this step to avoid touching the edata
62167-	 *  linkage (often results in cache misses) for auto arenas.
62168-	 */
62169-	if (arena_is_auto(arena)) {
62170-		return;
62171-	}
62172-	edata_list_active_append(&bin->slabs_full, slab);
62173-}
62174-
62175-static void
62176-arena_bin_slabs_full_remove(arena_t *arena, bin_t *bin, edata_t *slab) {
62177-	if (arena_is_auto(arena)) {
62178-		return;
62179-	}
62180-	edata_list_active_remove(&bin->slabs_full, slab);
62181-}
62182-
62183-static void
62184-arena_bin_reset(tsd_t *tsd, arena_t *arena, bin_t *bin) {
62185-	edata_t *slab;
62186-
62187-	malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
62188-	if (bin->slabcur != NULL) {
62189-		slab = bin->slabcur;
62190-		bin->slabcur = NULL;
62191-		malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
62192-		arena_slab_dalloc(tsd_tsdn(tsd), arena, slab);
62193-		malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
62194-	}
62195-	while ((slab = edata_heap_remove_first(&bin->slabs_nonfull)) != NULL) {
62196-		malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
62197-		arena_slab_dalloc(tsd_tsdn(tsd), arena, slab);
62198-		malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
62199-	}
62200-	for (slab = edata_list_active_first(&bin->slabs_full); slab != NULL;
62201-	     slab = edata_list_active_first(&bin->slabs_full)) {
62202-		arena_bin_slabs_full_remove(arena, bin, slab);
62203-		malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
62204-		arena_slab_dalloc(tsd_tsdn(tsd), arena, slab);
62205-		malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
62206-	}
62207-	if (config_stats) {
62208-		bin->stats.curregs = 0;
62209-		bin->stats.curslabs = 0;
62210-	}
62211-	malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
62212-}
62213-
62214-void
62215-arena_reset(tsd_t *tsd, arena_t *arena) {
62216-	/*
62217-	 * Locking in this function is unintuitive.  The caller guarantees that
62218-	 * no concurrent operations are happening in this arena, but there are
62219-	 * still reasons that some locking is necessary:
62220-	 *
62221-	 * - Some of the functions in the transitive closure of calls assume
62222-	 *   appropriate locks are held, and in some cases these locks are
62223-	 *   temporarily dropped to avoid lock order reversal or deadlock due to
62224-	 *   reentry.
62225-	 * - mallctl("epoch", ...) may concurrently refresh stats.  While
62226-	 *   strictly speaking this is a "concurrent operation", disallowing
62227-	 *   stats refreshes would impose an inconvenient burden.
62228-	 */
62229-
62230-	/* Large allocations. */
62231-	malloc_mutex_lock(tsd_tsdn(tsd), &arena->large_mtx);
62232-
62233-	for (edata_t *edata = edata_list_active_first(&arena->large);
62234-	    edata != NULL; edata = edata_list_active_first(&arena->large)) {
62235-		void *ptr = edata_base_get(edata);
62236-		size_t usize;
62237-
62238-		malloc_mutex_unlock(tsd_tsdn(tsd), &arena->large_mtx);
62239-		emap_alloc_ctx_t alloc_ctx;
62240-		emap_alloc_ctx_lookup(tsd_tsdn(tsd), &arena_emap_global, ptr,
62241-		    &alloc_ctx);
62242-		assert(alloc_ctx.szind != SC_NSIZES);
62243-
62244-		if (config_stats || (config_prof && opt_prof)) {
62245-			usize = sz_index2size(alloc_ctx.szind);
62246-			assert(usize == isalloc(tsd_tsdn(tsd), ptr));
62247-		}
62248-		/* Remove large allocation from prof sample set. */
62249-		if (config_prof && opt_prof) {
62250-			prof_free(tsd, ptr, usize, &alloc_ctx);
62251-		}
62252-		large_dalloc(tsd_tsdn(tsd), edata);
62253-		malloc_mutex_lock(tsd_tsdn(tsd), &arena->large_mtx);
62254-	}
62255-	malloc_mutex_unlock(tsd_tsdn(tsd), &arena->large_mtx);
62256-
62257-	/* Bins. */
62258-	for (unsigned i = 0; i < SC_NBINS; i++) {
62259-		for (unsigned j = 0; j < bin_infos[i].n_shards; j++) {
62260-			arena_bin_reset(tsd, arena, arena_get_bin(arena, i, j));
62261-		}
62262-	}
62263-	pa_shard_reset(tsd_tsdn(tsd), &arena->pa_shard);
62264-}
62265-
62266-static void
62267-arena_prepare_base_deletion_sync_finish(tsd_t *tsd, malloc_mutex_t **mutexes,
62268-    unsigned n_mtx) {
62269-	for (unsigned i = 0; i < n_mtx; i++) {
62270-		malloc_mutex_lock(tsd_tsdn(tsd), mutexes[i]);
62271-		malloc_mutex_unlock(tsd_tsdn(tsd), mutexes[i]);
62272-	}
62273-}
62274-
62275-#define ARENA_DESTROY_MAX_DELAYED_MTX 32
62276-static void
62277-arena_prepare_base_deletion_sync(tsd_t *tsd, malloc_mutex_t *mtx,
62278-    malloc_mutex_t **delayed_mtx, unsigned *n_delayed) {
62279-	if (!malloc_mutex_trylock(tsd_tsdn(tsd), mtx)) {
62280-		/* No contention. */
62281-		malloc_mutex_unlock(tsd_tsdn(tsd), mtx);
62282-		return;
62283-	}
62284-	unsigned n = *n_delayed;
62285-	assert(n < ARENA_DESTROY_MAX_DELAYED_MTX);
62286-	/* Add another to the batch. */
62287-	delayed_mtx[n++] = mtx;
62288-
62289-	if (n == ARENA_DESTROY_MAX_DELAYED_MTX) {
62290-		arena_prepare_base_deletion_sync_finish(tsd, delayed_mtx, n);
62291-		n = 0;
62292-	}
62293-	*n_delayed = n;
62294-}
62295-
62296-static void
62297-arena_prepare_base_deletion(tsd_t *tsd, base_t *base_to_destroy) {
62298-	/*
62299-	 * In order to coalesce, emap_try_acquire_edata_neighbor will attempt to
62300-	 * check neighbor edata's state to determine eligibility.  This means
62301-	 * under certain conditions, the metadata from an arena can be accessed
62302-	 * w/o holding any locks from that arena.  In order to guarantee safe
62303-	 * memory access, the metadata and the underlying base allocator needs
62304-	 * to be kept alive, until all pending accesses are done.
62305-	 *
62306-	 * 1) with opt_retain, the arena boundary implies the is_head state
62307-	 * (tracked in the rtree leaf), and the coalesce flow will stop at the
62308-	 * head state branch.  Therefore no cross arena metadata access
62309-	 * possible.
62310-	 *
62311-	 * 2) w/o opt_retain, the arena id needs to be read from the edata_t,
62312-	 * meaning read only cross-arena metadata access is possible.  The
62313-	 * coalesce attempt will stop at the arena_id mismatch, and is always
62314-	 * under one of the ecache locks.  To allow safe passthrough of such
62315-	 * metadata accesses, the loop below will iterate through all manual
62316-	 * arenas' ecache locks.  As all the metadata from this base allocator
62317-	 * have been unlinked from the rtree, after going through all the
62318-	 * relevant ecache locks, it's safe to say that a) pending accesses are
62319-	 * all finished, and b) no new access will be generated.
62320-	 */
62321-	if (opt_retain) {
62322-		return;
62323-	}
62324-	unsigned destroy_ind = base_ind_get(base_to_destroy);
62325-	assert(destroy_ind >= manual_arena_base);
62326-
62327-	tsdn_t *tsdn = tsd_tsdn(tsd);
62328-	malloc_mutex_t *delayed_mtx[ARENA_DESTROY_MAX_DELAYED_MTX];
62329-	unsigned n_delayed = 0, total = narenas_total_get();
62330-	for (unsigned i = 0; i < total; i++) {
62331-		if (i == destroy_ind) {
62332-			continue;
62333-		}
62334-		arena_t *arena = arena_get(tsdn, i, false);
62335-		if (arena == NULL) {
62336-			continue;
62337-		}
62338-		pac_t *pac = &arena->pa_shard.pac;
62339-		arena_prepare_base_deletion_sync(tsd, &pac->ecache_dirty.mtx,
62340-		    delayed_mtx, &n_delayed);
62341-		arena_prepare_base_deletion_sync(tsd, &pac->ecache_muzzy.mtx,
62342-		    delayed_mtx, &n_delayed);
62343-		arena_prepare_base_deletion_sync(tsd, &pac->ecache_retained.mtx,
62344-		    delayed_mtx, &n_delayed);
62345-	}
62346-	arena_prepare_base_deletion_sync_finish(tsd, delayed_mtx, n_delayed);
62347-}
62348-#undef ARENA_DESTROY_MAX_DELAYED_MTX
62349-
62350-void
62351-arena_destroy(tsd_t *tsd, arena_t *arena) {
62352-	assert(base_ind_get(arena->base) >= narenas_auto);
62353-	assert(arena_nthreads_get(arena, false) == 0);
62354-	assert(arena_nthreads_get(arena, true) == 0);
62355-
62356-	/*
62357-	 * No allocations have occurred since arena_reset() was called.
62358-	 * Furthermore, the caller (arena_i_destroy_ctl()) purged all cached
62359-	 * extents, so only retained extents may remain and it's safe to call
62360-	 * pa_shard_destroy_retained.
62361-	 */
62362-	pa_shard_destroy(tsd_tsdn(tsd), &arena->pa_shard);
62363-
62364-	/*
62365-	 * Remove the arena pointer from the arenas array.  We rely on the fact
62366-	 * that there is no way for the application to get a dirty read from the
62367-	 * arenas array unless there is an inherent race in the application
62368-	 * involving access of an arena being concurrently destroyed.  The
62369-	 * application must synchronize knowledge of the arena's validity, so as
62370-	 * long as we use an atomic write to update the arenas array, the
62371-	 * application will get a clean read any time after it synchronizes
62372-	 * knowledge that the arena is no longer valid.
62373-	 */
62374-	arena_set(base_ind_get(arena->base), NULL);
62375-
62376-	/*
62377-	 * Destroy the base allocator, which manages all metadata ever mapped by
62378-	 * this arena.  The prepare function will make sure no pending access to
62379-	 * the metadata in this base anymore.
62380-	 */
62381-	arena_prepare_base_deletion(tsd, arena->base);
62382-	base_delete(tsd_tsdn(tsd), arena->base);
62383-}
62384-
62385-static edata_t *
62386-arena_slab_alloc(tsdn_t *tsdn, arena_t *arena, szind_t binind, unsigned binshard,
62387-    const bin_info_t *bin_info) {
62388-	bool deferred_work_generated = false;
62389-	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
62390-	    WITNESS_RANK_CORE, 0);
62391-
62392-	bool guarded = san_slab_extent_decide_guard(tsdn,
62393-	    arena_get_ehooks(arena));
62394-	edata_t *slab = pa_alloc(tsdn, &arena->pa_shard, bin_info->slab_size,
62395-	    /* alignment */ PAGE, /* slab */ true, /* szind */ binind,
62396-	     /* zero */ false, guarded, &deferred_work_generated);
62397-
62398-	if (deferred_work_generated) {
62399-		arena_handle_deferred_work(tsdn, arena);
62400-	}
62401-
62402-	if (slab == NULL) {
62403-		return NULL;
62404-	}
62405-	assert(edata_slab_get(slab));
62406-
62407-	/* Initialize slab internals. */
62408-	slab_data_t *slab_data = edata_slab_data_get(slab);
62409-	edata_nfree_binshard_set(slab, bin_info->nregs, binshard);
62410-	bitmap_init(slab_data->bitmap, &bin_info->bitmap_info, false);
62411-
62412-	return slab;
62413-}
62414-
62415-/*
62416- * Before attempting the _with_fresh_slab approaches below, the _no_fresh_slab
62417- * variants (i.e. through slabcur and nonfull) must be tried first.
62418- */
62419-static void
62420-arena_bin_refill_slabcur_with_fresh_slab(tsdn_t *tsdn, arena_t *arena,
62421-    bin_t *bin, szind_t binind, edata_t *fresh_slab) {
62422-	malloc_mutex_assert_owner(tsdn, &bin->lock);
62423-	/* Only called after slabcur and nonfull both failed. */
62424-	assert(bin->slabcur == NULL);
62425-	assert(edata_heap_first(&bin->slabs_nonfull) == NULL);
62426-	assert(fresh_slab != NULL);
62427-
62428-	/* A new slab from arena_slab_alloc() */
62429-	assert(edata_nfree_get(fresh_slab) == bin_infos[binind].nregs);
62430-	if (config_stats) {
62431-		bin->stats.nslabs++;
62432-		bin->stats.curslabs++;
62433-	}
62434-	bin->slabcur = fresh_slab;
62435-}
62436-
62437-/* Refill slabcur and then alloc using the fresh slab */
62438-static void *
62439-arena_bin_malloc_with_fresh_slab(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
62440-    szind_t binind, edata_t *fresh_slab) {
62441-	malloc_mutex_assert_owner(tsdn, &bin->lock);
62442-	arena_bin_refill_slabcur_with_fresh_slab(tsdn, arena, bin, binind,
62443-	    fresh_slab);
62444-
62445-	return arena_slab_reg_alloc(bin->slabcur, &bin_infos[binind]);
62446-}
62447-
62448-static bool
62449-arena_bin_refill_slabcur_no_fresh_slab(tsdn_t *tsdn, arena_t *arena,
62450-    bin_t *bin) {
62451-	malloc_mutex_assert_owner(tsdn, &bin->lock);
62452-	/* Only called after arena_slab_reg_alloc[_batch] failed. */
62453-	assert(bin->slabcur == NULL || edata_nfree_get(bin->slabcur) == 0);
62454-
62455-	if (bin->slabcur != NULL) {
62456-		arena_bin_slabs_full_insert(arena, bin, bin->slabcur);
62457-	}
62458-
62459-	/* Look for a usable slab. */
62460-	bin->slabcur = arena_bin_slabs_nonfull_tryget(bin);
62461-	assert(bin->slabcur == NULL || edata_nfree_get(bin->slabcur) > 0);
62462-
62463-	return (bin->slabcur == NULL);
62464-}
62465-
62466-bin_t *
62467-arena_bin_choose(tsdn_t *tsdn, arena_t *arena, szind_t binind,
62468-    unsigned *binshard_p) {
62469-	unsigned binshard;
62470-	if (tsdn_null(tsdn) || tsd_arena_get(tsdn_tsd(tsdn)) == NULL) {
62471-		binshard = 0;
62472-	} else {
62473-		binshard = tsd_binshardsp_get(tsdn_tsd(tsdn))->binshard[binind];
62474-	}
62475-	assert(binshard < bin_infos[binind].n_shards);
62476-	if (binshard_p != NULL) {
62477-		*binshard_p = binshard;
62478-	}
62479-	return arena_get_bin(arena, binind, binshard);
62480-}
62481-
62482-void
62483-arena_cache_bin_fill_small(tsdn_t *tsdn, arena_t *arena,
62484-    cache_bin_t *cache_bin, cache_bin_info_t *cache_bin_info, szind_t binind,
62485-    const unsigned nfill) {
62486-	assert(cache_bin_ncached_get_local(cache_bin, cache_bin_info) == 0);
62487-
62488-	const bin_info_t *bin_info = &bin_infos[binind];
62489-
62490-	CACHE_BIN_PTR_ARRAY_DECLARE(ptrs, nfill);
62491-	cache_bin_init_ptr_array_for_fill(cache_bin, cache_bin_info, &ptrs,
62492-	    nfill);
62493-	/*
62494-	 * Bin-local resources are used first: 1) bin->slabcur, and 2) nonfull
62495-	 * slabs.  After both are exhausted, new slabs will be allocated through
62496-	 * arena_slab_alloc().
62497-	 *
62498-	 * Bin lock is only taken / released right before / after the while(...)
62499-	 * refill loop, with new slab allocation (which has its own locking)
62500-	 * kept outside of the loop.  This setup facilitates flat combining, at
62501-	 * the cost of the nested loop (through goto label_refill).
62502-	 *
62503-	 * To optimize for cases with contention and limited resources
62504-	 * (e.g. hugepage-backed or non-overcommit arenas), each fill-iteration
62505-	 * gets one chance of slab_alloc, and a retry of bin local resources
62506-	 * after the slab allocation (regardless if slab_alloc failed, because
62507-	 * the bin lock is dropped during the slab allocation).
62508-	 *
62509-	 * In other words, new slab allocation is allowed, as long as there was
62510-	 * progress since the previous slab_alloc.  This is tracked with
62511-	 * made_progress below, initialized to true to jump start the first
62512-	 * iteration.
62513-	 *
62514-	 * In other words (again), the loop will only terminate early (i.e. stop
62515-	 * with filled < nfill) after going through the three steps: a) bin
62516-	 * local exhausted, b) unlock and slab_alloc returns null, c) re-lock
62517-	 * and bin local fails again.
62518-	 */
62519-	bool made_progress = true;
62520-	edata_t *fresh_slab = NULL;
62521-	bool alloc_and_retry = false;
62522-	unsigned filled = 0;
62523-	unsigned binshard;
62524-	bin_t *bin = arena_bin_choose(tsdn, arena, binind, &binshard);
62525-
62526-label_refill:
62527-	malloc_mutex_lock(tsdn, &bin->lock);
62528-
62529-	while (filled < nfill) {
62530-		/* Try batch-fill from slabcur first. */
62531-		edata_t *slabcur = bin->slabcur;
62532-		if (slabcur != NULL && edata_nfree_get(slabcur) > 0) {
62533-			unsigned tofill = nfill - filled;
62534-			unsigned nfree = edata_nfree_get(slabcur);
62535-			unsigned cnt = tofill < nfree ? tofill : nfree;
62536-
62537-			arena_slab_reg_alloc_batch(slabcur, bin_info, cnt,
62538-			    &ptrs.ptr[filled]);
62539-			made_progress = true;
62540-			filled += cnt;
62541-			continue;
62542-		}
62543-		/* Next try refilling slabcur from nonfull slabs. */
62544-		if (!arena_bin_refill_slabcur_no_fresh_slab(tsdn, arena, bin)) {
62545-			assert(bin->slabcur != NULL);
62546-			continue;
62547-		}
62548-
62549-		/* Then see if a new slab was reserved already. */
62550-		if (fresh_slab != NULL) {
62551-			arena_bin_refill_slabcur_with_fresh_slab(tsdn, arena,
62552-			    bin, binind, fresh_slab);
62553-			assert(bin->slabcur != NULL);
62554-			fresh_slab = NULL;
62555-			continue;
62556-		}
62557-
62558-		/* Try slab_alloc if made progress (or never did slab_alloc). */
62559-		if (made_progress) {
62560-			assert(bin->slabcur == NULL);
62561-			assert(fresh_slab == NULL);
62562-			alloc_and_retry = true;
62563-			/* Alloc a new slab then come back. */
62564-			break;
62565-		}
62566-
62567-		/* OOM. */
62568-
62569-		assert(fresh_slab == NULL);
62570-		assert(!alloc_and_retry);
62571-		break;
62572-	} /* while (filled < nfill) loop. */
62573-
62574-	if (config_stats && !alloc_and_retry) {
62575-		bin->stats.nmalloc += filled;
62576-		bin->stats.nrequests += cache_bin->tstats.nrequests;
62577-		bin->stats.curregs += filled;
62578-		bin->stats.nfills++;
62579-		cache_bin->tstats.nrequests = 0;
62580-	}
62581-
62582-	malloc_mutex_unlock(tsdn, &bin->lock);
62583-
62584-	if (alloc_and_retry) {
62585-		assert(fresh_slab == NULL);
62586-		assert(filled < nfill);
62587-		assert(made_progress);
62588-
62589-		fresh_slab = arena_slab_alloc(tsdn, arena, binind, binshard,
62590-		    bin_info);
62591-		/* fresh_slab NULL case handled in the for loop. */
62592-
62593-		alloc_and_retry = false;
62594-		made_progress = false;
62595-		goto label_refill;
62596-	}
62597-	assert(filled == nfill || (fresh_slab == NULL && !made_progress));
62598-
62599-	/* Release if allocated but not used. */
62600-	if (fresh_slab != NULL) {
62601-		assert(edata_nfree_get(fresh_slab) == bin_info->nregs);
62602-		arena_slab_dalloc(tsdn, arena, fresh_slab);
62603-		fresh_slab = NULL;
62604-	}
62605-
62606-	cache_bin_finish_fill(cache_bin, cache_bin_info, &ptrs, filled);
62607-	arena_decay_tick(tsdn, arena);
62608-}
62609-
62610-size_t
62611-arena_fill_small_fresh(tsdn_t *tsdn, arena_t *arena, szind_t binind,
62612-    void **ptrs, size_t nfill, bool zero) {
62613-	assert(binind < SC_NBINS);
62614-	const bin_info_t *bin_info = &bin_infos[binind];
62615-	const size_t nregs = bin_info->nregs;
62616-	assert(nregs > 0);
62617-	const size_t usize = bin_info->reg_size;
62618-
62619-	const bool manual_arena = !arena_is_auto(arena);
62620-	unsigned binshard;
62621-	bin_t *bin = arena_bin_choose(tsdn, arena, binind, &binshard);
62622-
62623-	size_t nslab = 0;
62624-	size_t filled = 0;
62625-	edata_t *slab = NULL;
62626-	edata_list_active_t fulls;
62627-	edata_list_active_init(&fulls);
62628-
62629-	while (filled < nfill && (slab = arena_slab_alloc(tsdn, arena, binind,
62630-	    binshard, bin_info)) != NULL) {
62631-		assert((size_t)edata_nfree_get(slab) == nregs);
62632-		++nslab;
62633-		size_t batch = nfill - filled;
62634-		if (batch > nregs) {
62635-			batch = nregs;
62636-		}
62637-		assert(batch > 0);
62638-		arena_slab_reg_alloc_batch(slab, bin_info, (unsigned)batch,
62639-		    &ptrs[filled]);
62640-		assert(edata_addr_get(slab) == ptrs[filled]);
62641-		if (zero) {
62642-			memset(ptrs[filled], 0, batch * usize);
62643-		}
62644-		filled += batch;
62645-		if (batch == nregs) {
62646-			if (manual_arena) {
62647-				edata_list_active_append(&fulls, slab);
62648-			}
62649-			slab = NULL;
62650-		}
62651-	}
62652-
62653-	malloc_mutex_lock(tsdn, &bin->lock);
62654-	/*
62655-	 * Only the last slab can be non-empty, and the last slab is non-empty
62656-	 * iff slab != NULL.
62657-	 */
62658-	if (slab != NULL) {
62659-		arena_bin_lower_slab(tsdn, arena, slab, bin);
62660-	}
62661-	if (manual_arena) {
62662-		edata_list_active_concat(&bin->slabs_full, &fulls);
62663-	}
62664-	assert(edata_list_active_empty(&fulls));
62665-	if (config_stats) {
62666-		bin->stats.nslabs += nslab;
62667-		bin->stats.curslabs += nslab;
62668-		bin->stats.nmalloc += filled;
62669-		bin->stats.nrequests += filled;
62670-		bin->stats.curregs += filled;
62671-	}
62672-	malloc_mutex_unlock(tsdn, &bin->lock);
62673-
62674-	arena_decay_tick(tsdn, arena);
62675-	return filled;
62676-}
62677-
62678-/*
62679- * Without allocating a new slab, try arena_slab_reg_alloc() and re-fill
62680- * bin->slabcur if necessary.
62681- */
62682-static void *
62683-arena_bin_malloc_no_fresh_slab(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
62684-    szind_t binind) {
62685-	malloc_mutex_assert_owner(tsdn, &bin->lock);
62686-	if (bin->slabcur == NULL || edata_nfree_get(bin->slabcur) == 0) {
62687-		if (arena_bin_refill_slabcur_no_fresh_slab(tsdn, arena, bin)) {
62688-			return NULL;
62689-		}
62690-	}
62691-
62692-	assert(bin->slabcur != NULL && edata_nfree_get(bin->slabcur) > 0);
62693-	return arena_slab_reg_alloc(bin->slabcur, &bin_infos[binind]);
62694-}
62695-
62696-static void *
62697-arena_malloc_small(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero) {
62698-	assert(binind < SC_NBINS);
62699-	const bin_info_t *bin_info = &bin_infos[binind];
62700-	size_t usize = sz_index2size(binind);
62701-	unsigned binshard;
62702-	bin_t *bin = arena_bin_choose(tsdn, arena, binind, &binshard);
62703-
62704-	malloc_mutex_lock(tsdn, &bin->lock);
62705-	edata_t *fresh_slab = NULL;
62706-	void *ret = arena_bin_malloc_no_fresh_slab(tsdn, arena, bin, binind);
62707-	if (ret == NULL) {
62708-		malloc_mutex_unlock(tsdn, &bin->lock);
62709-		/******************************/
62710-		fresh_slab = arena_slab_alloc(tsdn, arena, binind, binshard,
62711-		    bin_info);
62712-		/********************************/
62713-		malloc_mutex_lock(tsdn, &bin->lock);
62714-		/* Retry since the lock was dropped. */
62715-		ret = arena_bin_malloc_no_fresh_slab(tsdn, arena, bin, binind);
62716-		if (ret == NULL) {
62717-			if (fresh_slab == NULL) {
62718-				/* OOM */
62719-				malloc_mutex_unlock(tsdn, &bin->lock);
62720-				return NULL;
62721-			}
62722-			ret = arena_bin_malloc_with_fresh_slab(tsdn, arena, bin,
62723-			    binind, fresh_slab);
62724-			fresh_slab = NULL;
62725-		}
62726-	}
62727-	if (config_stats) {
62728-		bin->stats.nmalloc++;
62729-		bin->stats.nrequests++;
62730-		bin->stats.curregs++;
62731-	}
62732-	malloc_mutex_unlock(tsdn, &bin->lock);
62733-
62734-	if (fresh_slab != NULL) {
62735-		arena_slab_dalloc(tsdn, arena, fresh_slab);
62736-	}
62737-	if (zero) {
62738-		memset(ret, 0, usize);
62739-	}
62740-	arena_decay_tick(tsdn, arena);
62741-
62742-	return ret;
62743-}
62744-
62745-void *
62746-arena_malloc_hard(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind,
62747-    bool zero) {
62748-	assert(!tsdn_null(tsdn) || arena != NULL);
62749-
62750-	if (likely(!tsdn_null(tsdn))) {
62751-		arena = arena_choose_maybe_huge(tsdn_tsd(tsdn), arena, size);
62752-	}
62753-	if (unlikely(arena == NULL)) {
62754-		return NULL;
62755-	}
62756-
62757-	if (likely(size <= SC_SMALL_MAXCLASS)) {
62758-		return arena_malloc_small(tsdn, arena, ind, zero);
62759-	}
62760-	return large_malloc(tsdn, arena, sz_index2size(ind), zero);
62761-}
62762-
62763-void *
62764-arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
62765-    bool zero, tcache_t *tcache) {
62766-	void *ret;
62767-
62768-	if (usize <= SC_SMALL_MAXCLASS) {
62769-		/* Small; alignment doesn't require special slab placement. */
62770-
62771-		/* usize should be a result of sz_sa2u() */
62772-		assert((usize & (alignment - 1)) == 0);
62773-
62774-		/*
62775-		 * Small usize can't come from an alignment larger than a page.
62776-		 */
62777-		assert(alignment <= PAGE);
62778-
62779-		ret = arena_malloc(tsdn, arena, usize, sz_size2index(usize),
62780-		    zero, tcache, true);
62781-	} else {
62782-		if (likely(alignment <= CACHELINE)) {
62783-			ret = large_malloc(tsdn, arena, usize, zero);
62784-		} else {
62785-			ret = large_palloc(tsdn, arena, usize, alignment, zero);
62786-		}
62787-	}
62788-	return ret;
62789-}
62790-
62791-void
62792-arena_prof_promote(tsdn_t *tsdn, void *ptr, size_t usize) {
62793-	cassert(config_prof);
62794-	assert(ptr != NULL);
62795-	assert(isalloc(tsdn, ptr) == SC_LARGE_MINCLASS);
62796-	assert(usize <= SC_SMALL_MAXCLASS);
62797-
62798-	if (config_opt_safety_checks) {
62799-		safety_check_set_redzone(ptr, usize, SC_LARGE_MINCLASS);
62800-	}
62801-
62802-	edata_t *edata = emap_edata_lookup(tsdn, &arena_emap_global, ptr);
62803-
62804-	szind_t szind = sz_size2index(usize);
62805-	edata_szind_set(edata, szind);
62806-	emap_remap(tsdn, &arena_emap_global, edata, szind, /* slab */ false);
62807-
62808-	assert(isalloc(tsdn, ptr) == usize);
62809-}
62810-
62811-static size_t
62812-arena_prof_demote(tsdn_t *tsdn, edata_t *edata, const void *ptr) {
62813-	cassert(config_prof);
62814-	assert(ptr != NULL);
62815-
62816-	edata_szind_set(edata, SC_NBINS);
62817-	emap_remap(tsdn, &arena_emap_global, edata, SC_NBINS, /* slab */ false);
62818-
62819-	assert(isalloc(tsdn, ptr) == SC_LARGE_MINCLASS);
62820-
62821-	return SC_LARGE_MINCLASS;
62822-}
62823-
62824-void
62825-arena_dalloc_promoted(tsdn_t *tsdn, void *ptr, tcache_t *tcache,
62826-    bool slow_path) {
62827-	cassert(config_prof);
62828-	assert(opt_prof);
62829-
62830-	edata_t *edata = emap_edata_lookup(tsdn, &arena_emap_global, ptr);
62831-	size_t usize = edata_usize_get(edata);
62832-	size_t bumped_usize = arena_prof_demote(tsdn, edata, ptr);
62833-	if (config_opt_safety_checks && usize < SC_LARGE_MINCLASS) {
62834-		/*
62835-		 * Currently, we only do redzoning for small sampled
62836-		 * allocations.
62837-		 */
62838-		assert(bumped_usize == SC_LARGE_MINCLASS);
62839-		safety_check_verify_redzone(ptr, usize, bumped_usize);
62840-	}
62841-	if (bumped_usize <= tcache_maxclass && tcache != NULL) {
62842-		tcache_dalloc_large(tsdn_tsd(tsdn), tcache, ptr,
62843-		    sz_size2index(bumped_usize), slow_path);
62844-	} else {
62845-		large_dalloc(tsdn, edata);
62846-	}
62847-}
62848-
62849-static void
62850-arena_dissociate_bin_slab(arena_t *arena, edata_t *slab, bin_t *bin) {
62851-	/* Dissociate slab from bin. */
62852-	if (slab == bin->slabcur) {
62853-		bin->slabcur = NULL;
62854-	} else {
62855-		szind_t binind = edata_szind_get(slab);
62856-		const bin_info_t *bin_info = &bin_infos[binind];
62857-
62858-		/*
62859-		 * The following block's conditional is necessary because if the
62860-		 * slab only contains one region, then it never gets inserted
62861-		 * into the non-full slabs heap.
62862-		 */
62863-		if (bin_info->nregs == 1) {
62864-			arena_bin_slabs_full_remove(arena, bin, slab);
62865-		} else {
62866-			arena_bin_slabs_nonfull_remove(bin, slab);
62867-		}
62868-	}
62869-}
62870-
62871-static void
62872-arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena, edata_t *slab,
62873-    bin_t *bin) {
62874-	assert(edata_nfree_get(slab) > 0);
62875-
62876-	/*
62877-	 * Make sure that if bin->slabcur is non-NULL, it refers to the
62878-	 * oldest/lowest non-full slab.  It is okay to NULL slabcur out rather
62879-	 * than proactively keeping it pointing at the oldest/lowest non-full
62880-	 * slab.
62881-	 */
62882-	if (bin->slabcur != NULL && edata_snad_comp(bin->slabcur, slab) > 0) {
62883-		/* Switch slabcur. */
62884-		if (edata_nfree_get(bin->slabcur) > 0) {
62885-			arena_bin_slabs_nonfull_insert(bin, bin->slabcur);
62886-		} else {
62887-			arena_bin_slabs_full_insert(arena, bin, bin->slabcur);
62888-		}
62889-		bin->slabcur = slab;
62890-		if (config_stats) {
62891-			bin->stats.reslabs++;
62892-		}
62893-	} else {
62894-		arena_bin_slabs_nonfull_insert(bin, slab);
62895-	}
62896-}
62897-
62898-static void
62899-arena_dalloc_bin_slab_prepare(tsdn_t *tsdn, edata_t *slab, bin_t *bin) {
62900-	malloc_mutex_assert_owner(tsdn, &bin->lock);
62901-
62902-	assert(slab != bin->slabcur);
62903-	if (config_stats) {
62904-		bin->stats.curslabs--;
62905-	}
62906-}
62907-
62908-void
62909-arena_dalloc_bin_locked_handle_newly_empty(tsdn_t *tsdn, arena_t *arena,
62910-    edata_t *slab, bin_t *bin) {
62911-	arena_dissociate_bin_slab(arena, slab, bin);
62912-	arena_dalloc_bin_slab_prepare(tsdn, slab, bin);
62913-}
62914-
62915-void
62916-arena_dalloc_bin_locked_handle_newly_nonempty(tsdn_t *tsdn, arena_t *arena,
62917-    edata_t *slab, bin_t *bin) {
62918-	arena_bin_slabs_full_remove(arena, bin, slab);
62919-	arena_bin_lower_slab(tsdn, arena, slab, bin);
62920-}
62921-
62922-static void
62923-arena_dalloc_bin(tsdn_t *tsdn, arena_t *arena, edata_t *edata, void *ptr) {
62924-	szind_t binind = edata_szind_get(edata);
62925-	unsigned binshard = edata_binshard_get(edata);
62926-	bin_t *bin = arena_get_bin(arena, binind, binshard);
62927-
62928-	malloc_mutex_lock(tsdn, &bin->lock);
62929-	arena_dalloc_bin_locked_info_t info;
62930-	arena_dalloc_bin_locked_begin(&info, binind);
62931-	bool ret = arena_dalloc_bin_locked_step(tsdn, arena, bin,
62932-	    &info, binind, edata, ptr);
62933-	arena_dalloc_bin_locked_finish(tsdn, arena, bin, &info);
62934-	malloc_mutex_unlock(tsdn, &bin->lock);
62935-
62936-	if (ret) {
62937-		arena_slab_dalloc(tsdn, arena, edata);
62938-	}
62939-}
62940-
62941-void
62942-arena_dalloc_small(tsdn_t *tsdn, void *ptr) {
62943-	edata_t *edata = emap_edata_lookup(tsdn, &arena_emap_global, ptr);
62944-	arena_t *arena = arena_get_from_edata(edata);
62945-
62946-	arena_dalloc_bin(tsdn, arena, edata, ptr);
62947-	arena_decay_tick(tsdn, arena);
62948-}
62949-
62950-bool
62951-arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
62952-    size_t extra, bool zero, size_t *newsize) {
62953-	bool ret;
62954-	/* Calls with non-zero extra had to clamp extra. */
62955-	assert(extra == 0 || size + extra <= SC_LARGE_MAXCLASS);
62956-
62957-	edata_t *edata = emap_edata_lookup(tsdn, &arena_emap_global, ptr);
62958-	if (unlikely(size > SC_LARGE_MAXCLASS)) {
62959-		ret = true;
62960-		goto done;
62961-	}
62962-
62963-	size_t usize_min = sz_s2u(size);
62964-	size_t usize_max = sz_s2u(size + extra);
62965-	if (likely(oldsize <= SC_SMALL_MAXCLASS && usize_min
62966-	    <= SC_SMALL_MAXCLASS)) {
62967-		/*
62968-		 * Avoid moving the allocation if the size class can be left the
62969-		 * same.
62970-		 */
62971-		assert(bin_infos[sz_size2index(oldsize)].reg_size ==
62972-		    oldsize);
62973-		if ((usize_max > SC_SMALL_MAXCLASS
62974-		    || sz_size2index(usize_max) != sz_size2index(oldsize))
62975-		    && (size > oldsize || usize_max < oldsize)) {
62976-			ret = true;
62977-			goto done;
62978-		}
62979-
62980-		arena_t *arena = arena_get_from_edata(edata);
62981-		arena_decay_tick(tsdn, arena);
62982-		ret = false;
62983-	} else if (oldsize >= SC_LARGE_MINCLASS
62984-	    && usize_max >= SC_LARGE_MINCLASS) {
62985-		ret = large_ralloc_no_move(tsdn, edata, usize_min, usize_max,
62986-		    zero);
62987-	} else {
62988-		ret = true;
62989-	}
62990-done:
62991-	assert(edata == emap_edata_lookup(tsdn, &arena_emap_global, ptr));
62992-	*newsize = edata_usize_get(edata);
62993-
62994-	return ret;
62995-}
62996-
62997-static void *
62998-arena_ralloc_move_helper(tsdn_t *tsdn, arena_t *arena, size_t usize,
62999-    size_t alignment, bool zero, tcache_t *tcache) {
63000-	if (alignment == 0) {
63001-		return arena_malloc(tsdn, arena, usize, sz_size2index(usize),
63002-		    zero, tcache, true);
63003-	}
63004-	usize = sz_sa2u(usize, alignment);
63005-	if (unlikely(usize == 0 || usize > SC_LARGE_MAXCLASS)) {
63006-		return NULL;
63007-	}
63008-	return ipalloct(tsdn, usize, alignment, zero, tcache, arena);
63009-}
63010-
63011-void *
63012-arena_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t oldsize,
63013-    size_t size, size_t alignment, bool zero, tcache_t *tcache,
63014-    hook_ralloc_args_t *hook_args) {
63015-	size_t usize = alignment == 0 ? sz_s2u(size) : sz_sa2u(size, alignment);
63016-	if (unlikely(usize == 0 || size > SC_LARGE_MAXCLASS)) {
63017-		return NULL;
63018-	}
63019-
63020-	if (likely(usize <= SC_SMALL_MAXCLASS)) {
63021-		/* Try to avoid moving the allocation. */
63022-		UNUSED size_t newsize;
63023-		if (!arena_ralloc_no_move(tsdn, ptr, oldsize, usize, 0, zero,
63024-		    &newsize)) {
63025-			hook_invoke_expand(hook_args->is_realloc
63026-			    ? hook_expand_realloc : hook_expand_rallocx,
63027-			    ptr, oldsize, usize, (uintptr_t)ptr,
63028-			    hook_args->args);
63029-			return ptr;
63030-		}
63031-	}
63032-
63033-	if (oldsize >= SC_LARGE_MINCLASS
63034-	    && usize >= SC_LARGE_MINCLASS) {
63035-		return large_ralloc(tsdn, arena, ptr, usize,
63036-		    alignment, zero, tcache, hook_args);
63037-	}
63038-
63039-	/*
63040-	 * size and oldsize are different enough that we need to move the
63041-	 * object.  In that case, fall back to allocating new space and copying.
63042-	 */
63043-	void *ret = arena_ralloc_move_helper(tsdn, arena, usize, alignment,
63044-	    zero, tcache);
63045-	if (ret == NULL) {
63046-		return NULL;
63047-	}
63048-
63049-	hook_invoke_alloc(hook_args->is_realloc
63050-	    ? hook_alloc_realloc : hook_alloc_rallocx, ret, (uintptr_t)ret,
63051-	    hook_args->args);
63052-	hook_invoke_dalloc(hook_args->is_realloc
63053-	    ? hook_dalloc_realloc : hook_dalloc_rallocx, ptr, hook_args->args);
63054-
63055-	/*
63056-	 * Junk/zero-filling were already done by
63057-	 * ipalloc()/arena_malloc().
63058-	 */
63059-	size_t copysize = (usize < oldsize) ? usize : oldsize;
63060-	memcpy(ret, ptr, copysize);
63061-	isdalloct(tsdn, ptr, oldsize, tcache, NULL, true);
63062-	return ret;
63063-}
63064-
63065-ehooks_t *
63066-arena_get_ehooks(arena_t *arena) {
63067-	return base_ehooks_get(arena->base);
63068-}
63069-
63070-extent_hooks_t *
63071-arena_set_extent_hooks(tsd_t *tsd, arena_t *arena,
63072-    extent_hooks_t *extent_hooks) {
63073-	background_thread_info_t *info;
63074-	if (have_background_thread) {
63075-		info = arena_background_thread_info_get(arena);
63076-		malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
63077-	}
63078-	/* No using the HPA now that we have the custom hooks. */
63079-	pa_shard_disable_hpa(tsd_tsdn(tsd), &arena->pa_shard);
63080-	extent_hooks_t *ret = base_extent_hooks_set(arena->base, extent_hooks);
63081-	if (have_background_thread) {
63082-		malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
63083-	}
63084-
63085-	return ret;
63086-}
63087-
63088-dss_prec_t
63089-arena_dss_prec_get(arena_t *arena) {
63090-	return (dss_prec_t)atomic_load_u(&arena->dss_prec, ATOMIC_ACQUIRE);
63091-}
63092-
63093-bool
63094-arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec) {
63095-	if (!have_dss) {
63096-		return (dss_prec != dss_prec_disabled);
63097-	}
63098-	atomic_store_u(&arena->dss_prec, (unsigned)dss_prec, ATOMIC_RELEASE);
63099-	return false;
63100-}
63101-
63102-ssize_t
63103-arena_dirty_decay_ms_default_get(void) {
63104-	return atomic_load_zd(&dirty_decay_ms_default, ATOMIC_RELAXED);
63105-}
63106-
63107-bool
63108-arena_dirty_decay_ms_default_set(ssize_t decay_ms) {
63109-	if (!decay_ms_valid(decay_ms)) {
63110-		return true;
63111-	}
63112-	atomic_store_zd(&dirty_decay_ms_default, decay_ms, ATOMIC_RELAXED);
63113-	return false;
63114-}
63115-
63116-ssize_t
63117-arena_muzzy_decay_ms_default_get(void) {
63118-	return atomic_load_zd(&muzzy_decay_ms_default, ATOMIC_RELAXED);
63119-}
63120-
63121-bool
63122-arena_muzzy_decay_ms_default_set(ssize_t decay_ms) {
63123-	if (!decay_ms_valid(decay_ms)) {
63124-		return true;
63125-	}
63126-	atomic_store_zd(&muzzy_decay_ms_default, decay_ms, ATOMIC_RELAXED);
63127-	return false;
63128-}
63129-
63130-bool
63131-arena_retain_grow_limit_get_set(tsd_t *tsd, arena_t *arena, size_t *old_limit,
63132-    size_t *new_limit) {
63133-	assert(opt_retain);
63134-	return pac_retain_grow_limit_get_set(tsd_tsdn(tsd),
63135-	    &arena->pa_shard.pac, old_limit, new_limit);
63136-}
63137-
63138-unsigned
63139-arena_nthreads_get(arena_t *arena, bool internal) {
63140-	return atomic_load_u(&arena->nthreads[internal], ATOMIC_RELAXED);
63141-}
63142-
63143-void
63144-arena_nthreads_inc(arena_t *arena, bool internal) {
63145-	atomic_fetch_add_u(&arena->nthreads[internal], 1, ATOMIC_RELAXED);
63146-}
63147-
63148-void
63149-arena_nthreads_dec(arena_t *arena, bool internal) {
63150-	atomic_fetch_sub_u(&arena->nthreads[internal], 1, ATOMIC_RELAXED);
63151-}
63152-
63153-arena_t *
63154-arena_new(tsdn_t *tsdn, unsigned ind, const arena_config_t *config) {
63155-	arena_t *arena;
63156-	base_t *base;
63157-	unsigned i;
63158-
63159-	if (ind == 0) {
63160-		base = b0get();
63161-	} else {
63162-		base = base_new(tsdn, ind, config->extent_hooks,
63163-		    config->metadata_use_hooks);
63164-		if (base == NULL) {
63165-			return NULL;
63166-		}
63167-	}
63168-
63169-	size_t arena_size = sizeof(arena_t) + sizeof(bin_t) * nbins_total;
63170-	arena = (arena_t *)base_alloc(tsdn, base, arena_size, CACHELINE);
63171-	if (arena == NULL) {
63172-		goto label_error;
63173-	}
63174-
63175-	atomic_store_u(&arena->nthreads[0], 0, ATOMIC_RELAXED);
63176-	atomic_store_u(&arena->nthreads[1], 0, ATOMIC_RELAXED);
63177-	arena->last_thd = NULL;
63178-
63179-	if (config_stats) {
63180-		if (arena_stats_init(tsdn, &arena->stats)) {
63181-			goto label_error;
63182-		}
63183-
63184-		ql_new(&arena->tcache_ql);
63185-		ql_new(&arena->cache_bin_array_descriptor_ql);
63186-		if (malloc_mutex_init(&arena->tcache_ql_mtx, "tcache_ql",
63187-		    WITNESS_RANK_TCACHE_QL, malloc_mutex_rank_exclusive)) {
63188-			goto label_error;
63189-		}
63190-	}
63191-
63192-	atomic_store_u(&arena->dss_prec, (unsigned)extent_dss_prec_get(),
63193-	    ATOMIC_RELAXED);
63194-
63195-	edata_list_active_init(&arena->large);
63196-	if (malloc_mutex_init(&arena->large_mtx, "arena_large",
63197-	    WITNESS_RANK_ARENA_LARGE, malloc_mutex_rank_exclusive)) {
63198-		goto label_error;
63199-	}
63200-
63201-	nstime_t cur_time;
63202-	nstime_init_update(&cur_time);
63203-	if (pa_shard_init(tsdn, &arena->pa_shard, &arena_pa_central_global,
63204-	    &arena_emap_global, base, ind, &arena->stats.pa_shard_stats,
63205-	    LOCKEDINT_MTX(arena->stats.mtx), &cur_time, oversize_threshold,
63206-	    arena_dirty_decay_ms_default_get(),
63207-	    arena_muzzy_decay_ms_default_get())) {
63208-		goto label_error;
63209-	}
63210-
63211-	/* Initialize bins. */
63212-	atomic_store_u(&arena->binshard_next, 0, ATOMIC_RELEASE);
63213-	for (i = 0; i < nbins_total; i++) {
63214-		bool err = bin_init(&arena->bins[i]);
63215-		if (err) {
63216-			goto label_error;
63217-		}
63218-	}
63219-
63220-	arena->base = base;
63221-	/* Set arena before creating background threads. */
63222-	arena_set(ind, arena);
63223-	arena->ind = ind;
63224-
63225-	nstime_init_update(&arena->create_time);
63226-
63227-	/*
63228-	 * We turn on the HPA if set to.  There are two exceptions:
63229-	 * - Custom extent hooks (we should only return memory allocated from
63230-	 *   them in that case).
63231-	 * - Arena 0 initialization.  In this case, we're mid-bootstrapping, and
63232-	 *   so arena_hpa_global is not yet initialized.
63233-	 */
63234-	if (opt_hpa && ehooks_are_default(base_ehooks_get(base)) && ind != 0) {
63235-		hpa_shard_opts_t hpa_shard_opts = opt_hpa_opts;
63236-		hpa_shard_opts.deferral_allowed = background_thread_enabled();
63237-		if (pa_shard_enable_hpa(tsdn, &arena->pa_shard,
63238-		    &hpa_shard_opts, &opt_hpa_sec_opts)) {
63239-			goto label_error;
63240-		}
63241-	}
63242-
63243-	/* We don't support reentrancy for arena 0 bootstrapping. */
63244-	if (ind != 0) {
63245-		/*
63246-		 * If we're here, then arena 0 already exists, so bootstrapping
63247-		 * is done enough that we should have tsd.
63248-		 */
63249-		assert(!tsdn_null(tsdn));
63250-		pre_reentrancy(tsdn_tsd(tsdn), arena);
63251-		if (test_hooks_arena_new_hook) {
63252-			test_hooks_arena_new_hook();
63253-		}
63254-		post_reentrancy(tsdn_tsd(tsdn));
63255-	}
63256-
63257-	return arena;
63258-label_error:
63259-	if (ind != 0) {
63260-		base_delete(tsdn, base);
63261-	}
63262-	return NULL;
63263-}
63264-
63265-arena_t *
63266-arena_choose_huge(tsd_t *tsd) {
63267-	/* huge_arena_ind can be 0 during init (will use a0). */
63268-	if (huge_arena_ind == 0) {
63269-		assert(!malloc_initialized());
63270-	}
63271-
63272-	arena_t *huge_arena = arena_get(tsd_tsdn(tsd), huge_arena_ind, false);
63273-	if (huge_arena == NULL) {
63274-		/* Create the huge arena on demand. */
63275-		assert(huge_arena_ind != 0);
63276-		huge_arena = arena_get(tsd_tsdn(tsd), huge_arena_ind, true);
63277-		if (huge_arena == NULL) {
63278-			return NULL;
63279-		}
63280-		/*
63281-		 * Purge eagerly for huge allocations, because: 1) number of
63282-		 * huge allocations is usually small, which means ticker based
63283-		 * decay is not reliable; and 2) less immediate reuse is
63284-		 * expected for huge allocations.
63285-		 */
63286-		if (arena_dirty_decay_ms_default_get() > 0) {
63287-			arena_decay_ms_set(tsd_tsdn(tsd), huge_arena,
63288-			    extent_state_dirty, 0);
63289-		}
63290-		if (arena_muzzy_decay_ms_default_get() > 0) {
63291-			arena_decay_ms_set(tsd_tsdn(tsd), huge_arena,
63292-			    extent_state_muzzy, 0);
63293-		}
63294-	}
63295-
63296-	return huge_arena;
63297-}
63298-
63299-bool
63300-arena_init_huge(void) {
63301-	bool huge_enabled;
63302-
63303-	/* The threshold should be large size class. */
63304-	if (opt_oversize_threshold > SC_LARGE_MAXCLASS ||
63305-	    opt_oversize_threshold < SC_LARGE_MINCLASS) {
63306-		opt_oversize_threshold = 0;
63307-		oversize_threshold = SC_LARGE_MAXCLASS + PAGE;
63308-		huge_enabled = false;
63309-	} else {
63310-		/* Reserve the index for the huge arena. */
63311-		huge_arena_ind = narenas_total_get();
63312-		oversize_threshold = opt_oversize_threshold;
63313-		huge_enabled = true;
63314-	}
63315-
63316-	return huge_enabled;
63317-}
63318-
63319-bool
63320-arena_is_huge(unsigned arena_ind) {
63321-	if (huge_arena_ind == 0) {
63322-		return false;
63323-	}
63324-	return (arena_ind == huge_arena_ind);
63325-}
63326-
63327-bool
63328-arena_boot(sc_data_t *sc_data, base_t *base, bool hpa) {
63329-	arena_dirty_decay_ms_default_set(opt_dirty_decay_ms);
63330-	arena_muzzy_decay_ms_default_set(opt_muzzy_decay_ms);
63331-	for (unsigned i = 0; i < SC_NBINS; i++) {
63332-		sc_t *sc = &sc_data->sc[i];
63333-		div_init(&arena_binind_div_info[i],
63334-		    (1U << sc->lg_base) + (sc->ndelta << sc->lg_delta));
63335-	}
63336-
63337-	uint32_t cur_offset = (uint32_t)offsetof(arena_t, bins);
63338-	for (szind_t i = 0; i < SC_NBINS; i++) {
63339-		arena_bin_offsets[i] = cur_offset;
63340-		nbins_total += bin_infos[i].n_shards;
63341-		cur_offset += (uint32_t)(bin_infos[i].n_shards * sizeof(bin_t));
63342-	}
63343-	return pa_central_init(&arena_pa_central_global, base, hpa,
63344-	    &hpa_hooks_default);
63345-}
63346-
63347-void
63348-arena_prefork0(tsdn_t *tsdn, arena_t *arena) {
63349-	pa_shard_prefork0(tsdn, &arena->pa_shard);
63350-}
63351-
63352-void
63353-arena_prefork1(tsdn_t *tsdn, arena_t *arena) {
63354-	if (config_stats) {
63355-		malloc_mutex_prefork(tsdn, &arena->tcache_ql_mtx);
63356-	}
63357-}
63358-
63359-void
63360-arena_prefork2(tsdn_t *tsdn, arena_t *arena) {
63361-	pa_shard_prefork2(tsdn, &arena->pa_shard);
63362-}
63363-
63364-void
63365-arena_prefork3(tsdn_t *tsdn, arena_t *arena) {
63366-	pa_shard_prefork3(tsdn, &arena->pa_shard);
63367-}
63368-
63369-void
63370-arena_prefork4(tsdn_t *tsdn, arena_t *arena) {
63371-	pa_shard_prefork4(tsdn, &arena->pa_shard);
63372-}
63373-
63374-void
63375-arena_prefork5(tsdn_t *tsdn, arena_t *arena) {
63376-	pa_shard_prefork5(tsdn, &arena->pa_shard);
63377-}
63378-
63379-void
63380-arena_prefork6(tsdn_t *tsdn, arena_t *arena) {
63381-	base_prefork(tsdn, arena->base);
63382-}
63383-
63384-void
63385-arena_prefork7(tsdn_t *tsdn, arena_t *arena) {
63386-	malloc_mutex_prefork(tsdn, &arena->large_mtx);
63387-}
63388-
63389-void
63390-arena_prefork8(tsdn_t *tsdn, arena_t *arena) {
63391-	for (unsigned i = 0; i < nbins_total; i++) {
63392-		bin_prefork(tsdn, &arena->bins[i]);
63393-	}
63394-}
63395-
63396-void
63397-arena_postfork_parent(tsdn_t *tsdn, arena_t *arena) {
63398-	for (unsigned i = 0; i < nbins_total; i++) {
63399-		bin_postfork_parent(tsdn, &arena->bins[i]);
63400-	}
63401-
63402-	malloc_mutex_postfork_parent(tsdn, &arena->large_mtx);
63403-	base_postfork_parent(tsdn, arena->base);
63404-	pa_shard_postfork_parent(tsdn, &arena->pa_shard);
63405-	if (config_stats) {
63406-		malloc_mutex_postfork_parent(tsdn, &arena->tcache_ql_mtx);
63407-	}
63408-}
63409-
63410-void
63411-arena_postfork_child(tsdn_t *tsdn, arena_t *arena) {
63412-	atomic_store_u(&arena->nthreads[0], 0, ATOMIC_RELAXED);
63413-	atomic_store_u(&arena->nthreads[1], 0, ATOMIC_RELAXED);
63414-	if (tsd_arena_get(tsdn_tsd(tsdn)) == arena) {
63415-		arena_nthreads_inc(arena, false);
63416-	}
63417-	if (tsd_iarena_get(tsdn_tsd(tsdn)) == arena) {
63418-		arena_nthreads_inc(arena, true);
63419-	}
63420-	if (config_stats) {
63421-		ql_new(&arena->tcache_ql);
63422-		ql_new(&arena->cache_bin_array_descriptor_ql);
63423-		tcache_slow_t *tcache_slow = tcache_slow_get(tsdn_tsd(tsdn));
63424-		if (tcache_slow != NULL && tcache_slow->arena == arena) {
63425-			tcache_t *tcache = tcache_slow->tcache;
63426-			ql_elm_new(tcache_slow, link);
63427-			ql_tail_insert(&arena->tcache_ql, tcache_slow, link);
63428-			cache_bin_array_descriptor_init(
63429-			    &tcache_slow->cache_bin_array_descriptor,
63430-			    tcache->bins);
63431-			ql_tail_insert(&arena->cache_bin_array_descriptor_ql,
63432-			    &tcache_slow->cache_bin_array_descriptor, link);
63433-		}
63434-	}
63435-
63436-	for (unsigned i = 0; i < nbins_total; i++) {
63437-		bin_postfork_child(tsdn, &arena->bins[i]);
63438-	}
63439-
63440-	malloc_mutex_postfork_child(tsdn, &arena->large_mtx);
63441-	base_postfork_child(tsdn, arena->base);
63442-	pa_shard_postfork_child(tsdn, &arena->pa_shard);
63443-	if (config_stats) {
63444-		malloc_mutex_postfork_child(tsdn, &arena->tcache_ql_mtx);
63445-	}
63446-}
63447diff --git a/jemalloc/src/background_thread.c b/jemalloc/src/background_thread.c
63448deleted file mode 100644
63449index 3bb8d26..0000000
63450--- a/jemalloc/src/background_thread.c
63451+++ /dev/null
63452@@ -1,820 +0,0 @@
63453-#include "jemalloc/internal/jemalloc_preamble.h"
63454-#include "jemalloc/internal/jemalloc_internal_includes.h"
63455-
63456-#include "jemalloc/internal/assert.h"
63457-
63458-JEMALLOC_DIAGNOSTIC_DISABLE_SPURIOUS
63459-
63460-/******************************************************************************/
63461-/* Data. */
63462-
63463-/* This option should be opt-in only. */
63464-#define BACKGROUND_THREAD_DEFAULT false
63465-/* Read-only after initialization. */
63466-bool opt_background_thread = BACKGROUND_THREAD_DEFAULT;
63467-size_t opt_max_background_threads = MAX_BACKGROUND_THREAD_LIMIT + 1;
63468-
63469-/* Used for thread creation, termination and stats. */
63470-malloc_mutex_t background_thread_lock;
63471-/* Indicates global state.  Atomic because decay reads this w/o locking. */
63472-atomic_b_t background_thread_enabled_state;
63473-size_t n_background_threads;
63474-size_t max_background_threads;
63475-/* Thread info per-index. */
63476-background_thread_info_t *background_thread_info;
63477-
63478-/******************************************************************************/
63479-
63480-#ifdef JEMALLOC_PTHREAD_CREATE_WRAPPER
63481-
63482-static int (*pthread_create_fptr)(pthread_t *__restrict, const pthread_attr_t *,
63483-    void *(*)(void *), void *__restrict);
63484-
63485-static void
63486-pthread_create_wrapper_init(void) {
63487-#ifdef JEMALLOC_LAZY_LOCK
63488-	if (!isthreaded) {
63489-		isthreaded = true;
63490-	}
63491-#endif
63492-}
63493-
63494-int
63495-pthread_create_wrapper(pthread_t *__restrict thread, const pthread_attr_t *attr,
63496-    void *(*start_routine)(void *), void *__restrict arg) {
63497-	pthread_create_wrapper_init();
63498-
63499-	return pthread_create_fptr(thread, attr, start_routine, arg);
63500-}
63501-#endif /* JEMALLOC_PTHREAD_CREATE_WRAPPER */
63502-
63503-#ifndef JEMALLOC_BACKGROUND_THREAD
63504-#define NOT_REACHED { not_reached(); }
63505-bool background_thread_create(tsd_t *tsd, unsigned arena_ind) NOT_REACHED
63506-bool background_threads_enable(tsd_t *tsd) NOT_REACHED
63507-bool background_threads_disable(tsd_t *tsd) NOT_REACHED
63508-bool background_thread_is_started(background_thread_info_t *info) NOT_REACHED
63509-void background_thread_wakeup_early(background_thread_info_t *info,
63510-    nstime_t *remaining_sleep) NOT_REACHED
63511-void background_thread_prefork0(tsdn_t *tsdn) NOT_REACHED
63512-void background_thread_prefork1(tsdn_t *tsdn) NOT_REACHED
63513-void background_thread_postfork_parent(tsdn_t *tsdn) NOT_REACHED
63514-void background_thread_postfork_child(tsdn_t *tsdn) NOT_REACHED
63515-bool background_thread_stats_read(tsdn_t *tsdn,
63516-    background_thread_stats_t *stats) NOT_REACHED
63517-void background_thread_ctl_init(tsdn_t *tsdn) NOT_REACHED
63518-#undef NOT_REACHED
63519-#else
63520-
63521-static bool background_thread_enabled_at_fork;
63522-
63523-static void
63524-background_thread_info_init(tsdn_t *tsdn, background_thread_info_t *info) {
63525-	background_thread_wakeup_time_set(tsdn, info, 0);
63526-	info->npages_to_purge_new = 0;
63527-	if (config_stats) {
63528-		info->tot_n_runs = 0;
63529-		nstime_init_zero(&info->tot_sleep_time);
63530-	}
63531-}
63532-
63533-static inline bool
63534-set_current_thread_affinity(int cpu) {
63535-#if defined(JEMALLOC_HAVE_SCHED_SETAFFINITY)
63536-	cpu_set_t cpuset;
63537-#else
63538-#  ifndef __NetBSD__
63539-	cpuset_t cpuset;
63540-#  else
63541-	cpuset_t *cpuset;
63542-#  endif
63543-#endif
63544-
63545-#ifndef __NetBSD__
63546-	CPU_ZERO(&cpuset);
63547-	CPU_SET(cpu, &cpuset);
63548-#else
63549-	cpuset = cpuset_create();
63550-#endif
63551-
63552-#if defined(JEMALLOC_HAVE_SCHED_SETAFFINITY)
63553-	return (sched_setaffinity(0, sizeof(cpu_set_t), &cpuset) != 0);
63554-#else
63555-#  ifndef __NetBSD__
63556-	int ret = pthread_setaffinity_np(pthread_self(), sizeof(cpuset_t),
63557-	    &cpuset);
63558-#  else
63559-	int ret = pthread_setaffinity_np(pthread_self(), cpuset_size(cpuset),
63560-	    cpuset);
63561-	cpuset_destroy(cpuset);
63562-#  endif
63563-	return ret != 0;
63564-#endif
63565-}
63566-
63567-#define BILLION UINT64_C(1000000000)
63568-/* Minimal sleep interval 100 ms. */
63569-#define BACKGROUND_THREAD_MIN_INTERVAL_NS (BILLION / 10)
63570-
63571-static void
63572-background_thread_sleep(tsdn_t *tsdn, background_thread_info_t *info,
63573-    uint64_t interval) {
63574-	if (config_stats) {
63575-		info->tot_n_runs++;
63576-	}
63577-	info->npages_to_purge_new = 0;
63578-
63579-	struct timeval tv;
63580-	/* Specific clock required by timedwait. */
63581-	gettimeofday(&tv, NULL);
63582-	nstime_t before_sleep;
63583-	nstime_init2(&before_sleep, tv.tv_sec, tv.tv_usec * 1000);
63584-
63585-	int ret;
63586-	if (interval == BACKGROUND_THREAD_INDEFINITE_SLEEP) {
63587-		background_thread_wakeup_time_set(tsdn, info,
63588-		    BACKGROUND_THREAD_INDEFINITE_SLEEP);
63589-		ret = pthread_cond_wait(&info->cond, &info->mtx.lock);
63590-		assert(ret == 0);
63591-	} else {
63592-		assert(interval >= BACKGROUND_THREAD_MIN_INTERVAL_NS &&
63593-		    interval <= BACKGROUND_THREAD_INDEFINITE_SLEEP);
63594-		/* We need malloc clock (can be different from tv). */
63595-		nstime_t next_wakeup;
63596-		nstime_init_update(&next_wakeup);
63597-		nstime_iadd(&next_wakeup, interval);
63598-		assert(nstime_ns(&next_wakeup) <
63599-		    BACKGROUND_THREAD_INDEFINITE_SLEEP);
63600-		background_thread_wakeup_time_set(tsdn, info,
63601-		    nstime_ns(&next_wakeup));
63602-
63603-		nstime_t ts_wakeup;
63604-		nstime_copy(&ts_wakeup, &before_sleep);
63605-		nstime_iadd(&ts_wakeup, interval);
63606-		struct timespec ts;
63607-		ts.tv_sec = (size_t)nstime_sec(&ts_wakeup);
63608-		ts.tv_nsec = (size_t)nstime_nsec(&ts_wakeup);
63609-
63610-		assert(!background_thread_indefinite_sleep(info));
63611-		ret = pthread_cond_timedwait(&info->cond, &info->mtx.lock, &ts);
63612-		assert(ret == ETIMEDOUT || ret == 0);
63613-	}
63614-	if (config_stats) {
63615-		gettimeofday(&tv, NULL);
63616-		nstime_t after_sleep;
63617-		nstime_init2(&after_sleep, tv.tv_sec, tv.tv_usec * 1000);
63618-		if (nstime_compare(&after_sleep, &before_sleep) > 0) {
63619-			nstime_subtract(&after_sleep, &before_sleep);
63620-			nstime_add(&info->tot_sleep_time, &after_sleep);
63621-		}
63622-	}
63623-}
63624-
63625-static bool
63626-background_thread_pause_check(tsdn_t *tsdn, background_thread_info_t *info) {
63627-	if (unlikely(info->state == background_thread_paused)) {
63628-		malloc_mutex_unlock(tsdn, &info->mtx);
63629-		/* Wait on global lock to update status. */
63630-		malloc_mutex_lock(tsdn, &background_thread_lock);
63631-		malloc_mutex_unlock(tsdn, &background_thread_lock);
63632-		malloc_mutex_lock(tsdn, &info->mtx);
63633-		return true;
63634-	}
63635-
63636-	return false;
63637-}
63638-
63639-static inline void
63640-background_work_sleep_once(tsdn_t *tsdn, background_thread_info_t *info,
63641-    unsigned ind) {
63642-	uint64_t ns_until_deferred = BACKGROUND_THREAD_DEFERRED_MAX;
63643-	unsigned narenas = narenas_total_get();
63644-	bool slept_indefinitely = background_thread_indefinite_sleep(info);
63645-
63646-	for (unsigned i = ind; i < narenas; i += max_background_threads) {
63647-		arena_t *arena = arena_get(tsdn, i, false);
63648-		if (!arena) {
63649-			continue;
63650-		}
63651-		/*
63652-		 * If thread was woken up from the indefinite sleep, don't
63653-		 * do the work instantly, but rather check when the deferred
63654-		 * work that caused this thread to wake up is scheduled for.
63655-		 */
63656-		if (!slept_indefinitely) {
63657-			arena_do_deferred_work(tsdn, arena);
63658-		}
63659-		if (ns_until_deferred <= BACKGROUND_THREAD_MIN_INTERVAL_NS) {
63660-			/* Min interval will be used. */
63661-			continue;
63662-		}
63663-		uint64_t ns_arena_deferred = pa_shard_time_until_deferred_work(
63664-		    tsdn, &arena->pa_shard);
63665-		if (ns_arena_deferred < ns_until_deferred) {
63666-			ns_until_deferred = ns_arena_deferred;
63667-		}
63668-	}
63669-
63670-	uint64_t sleep_ns;
63671-	if (ns_until_deferred == BACKGROUND_THREAD_DEFERRED_MAX) {
63672-		sleep_ns = BACKGROUND_THREAD_INDEFINITE_SLEEP;
63673-	} else {
63674-		sleep_ns =
63675-		    (ns_until_deferred < BACKGROUND_THREAD_MIN_INTERVAL_NS)
63676-		    ? BACKGROUND_THREAD_MIN_INTERVAL_NS
63677-		    : ns_until_deferred;
63678-
63679-	}
63680-
63681-	background_thread_sleep(tsdn, info, sleep_ns);
63682-}
63683-
63684-static bool
63685-background_threads_disable_single(tsd_t *tsd, background_thread_info_t *info) {
63686-	if (info == &background_thread_info[0]) {
63687-		malloc_mutex_assert_owner(tsd_tsdn(tsd),
63688-		    &background_thread_lock);
63689-	} else {
63690-		malloc_mutex_assert_not_owner(tsd_tsdn(tsd),
63691-		    &background_thread_lock);
63692-	}
63693-
63694-	pre_reentrancy(tsd, NULL);
63695-	malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
63696-	bool has_thread;
63697-	assert(info->state != background_thread_paused);
63698-	if (info->state == background_thread_started) {
63699-		has_thread = true;
63700-		info->state = background_thread_stopped;
63701-		pthread_cond_signal(&info->cond);
63702-	} else {
63703-		has_thread = false;
63704-	}
63705-	malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
63706-
63707-	if (!has_thread) {
63708-		post_reentrancy(tsd);
63709-		return false;
63710-	}
63711-	void *ret;
63712-	if (pthread_join(info->thread, &ret)) {
63713-		post_reentrancy(tsd);
63714-		return true;
63715-	}
63716-	assert(ret == NULL);
63717-	n_background_threads--;
63718-	post_reentrancy(tsd);
63719-
63720-	return false;
63721-}
63722-
63723-static void *background_thread_entry(void *ind_arg);
63724-
63725-static int
63726-background_thread_create_signals_masked(pthread_t *thread,
63727-    const pthread_attr_t *attr, void *(*start_routine)(void *), void *arg) {
63728-	/*
63729-	 * Mask signals during thread creation so that the thread inherits
63730-	 * an empty signal set.
63731-	 */
63732-	sigset_t set;
63733-	sigfillset(&set);
63734-	sigset_t oldset;
63735-	int mask_err = pthread_sigmask(SIG_SETMASK, &set, &oldset);
63736-	if (mask_err != 0) {
63737-		return mask_err;
63738-	}
63739-	int create_err = pthread_create_wrapper(thread, attr, start_routine,
63740-	    arg);
63741-	/*
63742-	 * Restore the signal mask.  Failure to restore the signal mask here
63743-	 * changes program behavior.
63744-	 */
63745-	int restore_err = pthread_sigmask(SIG_SETMASK, &oldset, NULL);
63746-	if (restore_err != 0) {
63747-		malloc_printf("<jemalloc>: background thread creation "
63748-		    "failed (%d), and signal mask restoration failed "
63749-		    "(%d)\n", create_err, restore_err);
63750-		if (opt_abort) {
63751-			abort();
63752-		}
63753-	}
63754-	return create_err;
63755-}
63756-
63757-static bool
63758-check_background_thread_creation(tsd_t *tsd, unsigned *n_created,
63759-    bool *created_threads) {
63760-	bool ret = false;
63761-	if (likely(*n_created == n_background_threads)) {
63762-		return ret;
63763-	}
63764-
63765-	tsdn_t *tsdn = tsd_tsdn(tsd);
63766-	malloc_mutex_unlock(tsdn, &background_thread_info[0].mtx);
63767-	for (unsigned i = 1; i < max_background_threads; i++) {
63768-		if (created_threads[i]) {
63769-			continue;
63770-		}
63771-		background_thread_info_t *info = &background_thread_info[i];
63772-		malloc_mutex_lock(tsdn, &info->mtx);
63773-		/*
63774-		 * In case of the background_thread_paused state because of
63775-		 * arena reset, delay the creation.
63776-		 */
63777-		bool create = (info->state == background_thread_started);
63778-		malloc_mutex_unlock(tsdn, &info->mtx);
63779-		if (!create) {
63780-			continue;
63781-		}
63782-
63783-		pre_reentrancy(tsd, NULL);
63784-		int err = background_thread_create_signals_masked(&info->thread,
63785-		    NULL, background_thread_entry, (void *)(uintptr_t)i);
63786-		post_reentrancy(tsd);
63787-
63788-		if (err == 0) {
63789-			(*n_created)++;
63790-			created_threads[i] = true;
63791-		} else {
63792-			malloc_printf("<jemalloc>: background thread "
63793-			    "creation failed (%d)\n", err);
63794-			if (opt_abort) {
63795-				abort();
63796-			}
63797-		}
63798-		/* Return to restart the loop since we unlocked. */
63799-		ret = true;
63800-		break;
63801-	}
63802-	malloc_mutex_lock(tsdn, &background_thread_info[0].mtx);
63803-
63804-	return ret;
63805-}
63806-
63807-static void
63808-background_thread0_work(tsd_t *tsd) {
63809-	/* Thread0 is also responsible for launching / terminating threads. */
63810-	VARIABLE_ARRAY(bool, created_threads, max_background_threads);
63811-	unsigned i;
63812-	for (i = 1; i < max_background_threads; i++) {
63813-		created_threads[i] = false;
63814-	}
63815-	/* Start working, and create more threads when asked. */
63816-	unsigned n_created = 1;
63817-	while (background_thread_info[0].state != background_thread_stopped) {
63818-		if (background_thread_pause_check(tsd_tsdn(tsd),
63819-		    &background_thread_info[0])) {
63820-			continue;
63821-		}
63822-		if (check_background_thread_creation(tsd, &n_created,
63823-		    (bool *)&created_threads)) {
63824-			continue;
63825-		}
63826-		background_work_sleep_once(tsd_tsdn(tsd),
63827-		    &background_thread_info[0], 0);
63828-	}
63829-
63830-	/*
63831-	 * Shut down other threads at exit.  Note that the ctl thread is holding
63832-	 * the global background_thread mutex (and is waiting) for us.
63833-	 */
63834-	assert(!background_thread_enabled());
63835-	for (i = 1; i < max_background_threads; i++) {
63836-		background_thread_info_t *info = &background_thread_info[i];
63837-		assert(info->state != background_thread_paused);
63838-		if (created_threads[i]) {
63839-			background_threads_disable_single(tsd, info);
63840-		} else {
63841-			malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
63842-			if (info->state != background_thread_stopped) {
63843-				/* The thread was not created. */
63844-				assert(info->state ==
63845-				    background_thread_started);
63846-				n_background_threads--;
63847-				info->state = background_thread_stopped;
63848-			}
63849-			malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
63850-		}
63851-	}
63852-	background_thread_info[0].state = background_thread_stopped;
63853-	assert(n_background_threads == 1);
63854-}
63855-
63856-static void
63857-background_work(tsd_t *tsd, unsigned ind) {
63858-	background_thread_info_t *info = &background_thread_info[ind];
63859-
63860-	malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
63861-	background_thread_wakeup_time_set(tsd_tsdn(tsd), info,
63862-	    BACKGROUND_THREAD_INDEFINITE_SLEEP);
63863-	if (ind == 0) {
63864-		background_thread0_work(tsd);
63865-	} else {
63866-		while (info->state != background_thread_stopped) {
63867-			if (background_thread_pause_check(tsd_tsdn(tsd),
63868-			    info)) {
63869-				continue;
63870-			}
63871-			background_work_sleep_once(tsd_tsdn(tsd), info, ind);
63872-		}
63873-	}
63874-	assert(info->state == background_thread_stopped);
63875-	background_thread_wakeup_time_set(tsd_tsdn(tsd), info, 0);
63876-	malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
63877-}
63878-
63879-static void *
63880-background_thread_entry(void *ind_arg) {
63881-	unsigned thread_ind = (unsigned)(uintptr_t)ind_arg;
63882-	assert(thread_ind < max_background_threads);
63883-#ifdef JEMALLOC_HAVE_PTHREAD_SETNAME_NP
63884-	pthread_setname_np(pthread_self(), "jemalloc_bg_thd");
63885-#elif defined(__FreeBSD__) || defined(__DragonFly__)
63886-	pthread_set_name_np(pthread_self(), "jemalloc_bg_thd");
63887-#endif
63888-	if (opt_percpu_arena != percpu_arena_disabled) {
63889-		set_current_thread_affinity((int)thread_ind);
63890-	}
63891-	/*
63892-	 * Start periodic background work.  We use internal tsd which avoids
63893-	 * side effects, for example triggering new arena creation (which in
63894-	 * turn triggers another background thread creation).
63895-	 */
63896-	background_work(tsd_internal_fetch(), thread_ind);
63897-	assert(pthread_equal(pthread_self(),
63898-	    background_thread_info[thread_ind].thread));
63899-
63900-	return NULL;
63901-}
63902-
63903-static void
63904-background_thread_init(tsd_t *tsd, background_thread_info_t *info) {
63905-	malloc_mutex_assert_owner(tsd_tsdn(tsd), &background_thread_lock);
63906-	info->state = background_thread_started;
63907-	background_thread_info_init(tsd_tsdn(tsd), info);
63908-	n_background_threads++;
63909-}
63910-
63911-static bool
63912-background_thread_create_locked(tsd_t *tsd, unsigned arena_ind) {
63913-	assert(have_background_thread);
63914-	malloc_mutex_assert_owner(tsd_tsdn(tsd), &background_thread_lock);
63915-
63916-	/* We create at most NCPUs threads. */
63917-	size_t thread_ind = arena_ind % max_background_threads;
63918-	background_thread_info_t *info = &background_thread_info[thread_ind];
63919-
63920-	bool need_new_thread;
63921-	malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
63922-	need_new_thread = background_thread_enabled() &&
63923-	    (info->state == background_thread_stopped);
63924-	if (need_new_thread) {
63925-		background_thread_init(tsd, info);
63926-	}
63927-	malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
63928-	if (!need_new_thread) {
63929-		return false;
63930-	}
63931-	if (arena_ind != 0) {
63932-		/* Threads are created asynchronously by Thread 0. */
63933-		background_thread_info_t *t0 = &background_thread_info[0];
63934-		malloc_mutex_lock(tsd_tsdn(tsd), &t0->mtx);
63935-		assert(t0->state == background_thread_started);
63936-		pthread_cond_signal(&t0->cond);
63937-		malloc_mutex_unlock(tsd_tsdn(tsd), &t0->mtx);
63938-
63939-		return false;
63940-	}
63941-
63942-	pre_reentrancy(tsd, NULL);
63943-	/*
63944-	 * To avoid complications (besides reentrancy), create internal
63945-	 * background threads with the underlying pthread_create.
63946-	 */
63947-	int err = background_thread_create_signals_masked(&info->thread, NULL,
63948-	    background_thread_entry, (void *)thread_ind);
63949-	post_reentrancy(tsd);
63950-
63951-	if (err != 0) {
63952-		malloc_printf("<jemalloc>: arena 0 background thread creation "
63953-		    "failed (%d)\n", err);
63954-		malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
63955-		info->state = background_thread_stopped;
63956-		n_background_threads--;
63957-		malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
63958-
63959-		return true;
63960-	}
63961-
63962-	return false;
63963-}
63964-
63965-/* Create a new background thread if needed. */
63966-bool
63967-background_thread_create(tsd_t *tsd, unsigned arena_ind) {
63968-	assert(have_background_thread);
63969-
63970-	bool ret;
63971-	malloc_mutex_lock(tsd_tsdn(tsd), &background_thread_lock);
63972-	ret = background_thread_create_locked(tsd, arena_ind);
63973-	malloc_mutex_unlock(tsd_tsdn(tsd), &background_thread_lock);
63974-
63975-	return ret;
63976-}
63977-
63978-bool
63979-background_threads_enable(tsd_t *tsd) {
63980-	assert(n_background_threads == 0);
63981-	assert(background_thread_enabled());
63982-	malloc_mutex_assert_owner(tsd_tsdn(tsd), &background_thread_lock);
63983-
63984-	VARIABLE_ARRAY(bool, marked, max_background_threads);
63985-	unsigned nmarked;
63986-	for (unsigned i = 0; i < max_background_threads; i++) {
63987-		marked[i] = false;
63988-	}
63989-	nmarked = 0;
63990-	/* Thread 0 is required and created at the end. */
63991-	marked[0] = true;
63992-	/* Mark the threads we need to create for thread 0. */
63993-	unsigned narenas = narenas_total_get();
63994-	for (unsigned i = 1; i < narenas; i++) {
63995-		if (marked[i % max_background_threads] ||
63996-		    arena_get(tsd_tsdn(tsd), i, false) == NULL) {
63997-			continue;
63998-		}
63999-		background_thread_info_t *info = &background_thread_info[
64000-		    i % max_background_threads];
64001-		malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
64002-		assert(info->state == background_thread_stopped);
64003-		background_thread_init(tsd, info);
64004-		malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
64005-		marked[i % max_background_threads] = true;
64006-		if (++nmarked == max_background_threads) {
64007-			break;
64008-		}
64009-	}
64010-
64011-	bool err = background_thread_create_locked(tsd, 0);
64012-	if (err) {
64013-		return true;
64014-	}
64015-	for (unsigned i = 0; i < narenas; i++) {
64016-		arena_t *arena = arena_get(tsd_tsdn(tsd), i, false);
64017-		if (arena != NULL) {
64018-			pa_shard_set_deferral_allowed(tsd_tsdn(tsd),
64019-			    &arena->pa_shard, true);
64020-		}
64021-	}
64022-	return false;
64023-}
64024-
64025-bool
64026-background_threads_disable(tsd_t *tsd) {
64027-	assert(!background_thread_enabled());
64028-	malloc_mutex_assert_owner(tsd_tsdn(tsd), &background_thread_lock);
64029-
64030-	/* Thread 0 will be responsible for terminating other threads. */
64031-	if (background_threads_disable_single(tsd,
64032-	    &background_thread_info[0])) {
64033-		return true;
64034-	}
64035-	assert(n_background_threads == 0);
64036-	unsigned narenas = narenas_total_get();
64037-	for (unsigned i = 0; i < narenas; i++) {
64038-		arena_t *arena = arena_get(tsd_tsdn(tsd), i, false);
64039-		if (arena != NULL) {
64040-			pa_shard_set_deferral_allowed(tsd_tsdn(tsd),
64041-			    &arena->pa_shard, false);
64042-		}
64043-	}
64044-
64045-	return false;
64046-}
64047-
64048-bool
64049-background_thread_is_started(background_thread_info_t *info) {
64050-	return info->state == background_thread_started;
64051-}
64052-
64053-void
64054-background_thread_wakeup_early(background_thread_info_t *info,
64055-    nstime_t *remaining_sleep) {
64056-	/*
64057-	 * This is an optimization to increase batching. At this point
64058-	 * we know that background thread wakes up soon, so the time to cache
64059-	 * the just freed memory is bounded and low.
64060-	 */
64061-	if (remaining_sleep != NULL && nstime_ns(remaining_sleep) <
64062-	    BACKGROUND_THREAD_MIN_INTERVAL_NS) {
64063-		return;
64064-	}
64065-	pthread_cond_signal(&info->cond);
64066-}
64067-
64068-void
64069-background_thread_prefork0(tsdn_t *tsdn) {
64070-	malloc_mutex_prefork(tsdn, &background_thread_lock);
64071-	background_thread_enabled_at_fork = background_thread_enabled();
64072-}
64073-
64074-void
64075-background_thread_prefork1(tsdn_t *tsdn) {
64076-	for (unsigned i = 0; i < max_background_threads; i++) {
64077-		malloc_mutex_prefork(tsdn, &background_thread_info[i].mtx);
64078-	}
64079-}
64080-
64081-void
64082-background_thread_postfork_parent(tsdn_t *tsdn) {
64083-	for (unsigned i = 0; i < max_background_threads; i++) {
64084-		malloc_mutex_postfork_parent(tsdn,
64085-		    &background_thread_info[i].mtx);
64086-	}
64087-	malloc_mutex_postfork_parent(tsdn, &background_thread_lock);
64088-}
64089-
64090-void
64091-background_thread_postfork_child(tsdn_t *tsdn) {
64092-	for (unsigned i = 0; i < max_background_threads; i++) {
64093-		malloc_mutex_postfork_child(tsdn,
64094-		    &background_thread_info[i].mtx);
64095-	}
64096-	malloc_mutex_postfork_child(tsdn, &background_thread_lock);
64097-	if (!background_thread_enabled_at_fork) {
64098-		return;
64099-	}
64100-
64101-	/* Clear background_thread state (reset to disabled for child). */
64102-	malloc_mutex_lock(tsdn, &background_thread_lock);
64103-	n_background_threads = 0;
64104-	background_thread_enabled_set(tsdn, false);
64105-	for (unsigned i = 0; i < max_background_threads; i++) {
64106-		background_thread_info_t *info = &background_thread_info[i];
64107-		malloc_mutex_lock(tsdn, &info->mtx);
64108-		info->state = background_thread_stopped;
64109-		int ret = pthread_cond_init(&info->cond, NULL);
64110-		assert(ret == 0);
64111-		background_thread_info_init(tsdn, info);
64112-		malloc_mutex_unlock(tsdn, &info->mtx);
64113-	}
64114-	malloc_mutex_unlock(tsdn, &background_thread_lock);
64115-}
64116-
64117-bool
64118-background_thread_stats_read(tsdn_t *tsdn, background_thread_stats_t *stats) {
64119-	assert(config_stats);
64120-	malloc_mutex_lock(tsdn, &background_thread_lock);
64121-	if (!background_thread_enabled()) {
64122-		malloc_mutex_unlock(tsdn, &background_thread_lock);
64123-		return true;
64124-	}
64125-
64126-	nstime_init_zero(&stats->run_interval);
64127-	memset(&stats->max_counter_per_bg_thd, 0, sizeof(mutex_prof_data_t));
64128-
64129-	uint64_t num_runs = 0;
64130-	stats->num_threads = n_background_threads;
64131-	for (unsigned i = 0; i < max_background_threads; i++) {
64132-		background_thread_info_t *info = &background_thread_info[i];
64133-		if (malloc_mutex_trylock(tsdn, &info->mtx)) {
64134-			/*
64135-			 * Each background thread run may take a long time;
64136-			 * avoid waiting on the stats if the thread is active.
64137-			 */
64138-			continue;
64139-		}
64140-		if (info->state != background_thread_stopped) {
64141-			num_runs += info->tot_n_runs;
64142-			nstime_add(&stats->run_interval, &info->tot_sleep_time);
64143-			malloc_mutex_prof_max_update(tsdn,
64144-			    &stats->max_counter_per_bg_thd, &info->mtx);
64145-		}
64146-		malloc_mutex_unlock(tsdn, &info->mtx);
64147-	}
64148-	stats->num_runs = num_runs;
64149-	if (num_runs > 0) {
64150-		nstime_idivide(&stats->run_interval, num_runs);
64151-	}
64152-	malloc_mutex_unlock(tsdn, &background_thread_lock);
64153-
64154-	return false;
64155-}
64156-
64157-#undef BACKGROUND_THREAD_NPAGES_THRESHOLD
64158-#undef BILLION
64159-#undef BACKGROUND_THREAD_MIN_INTERVAL_NS
64160-
64161-#ifdef JEMALLOC_HAVE_DLSYM
64162-#include <dlfcn.h>
64163-#endif
64164-
64165-static bool
64166-pthread_create_fptr_init(void) {
64167-	if (pthread_create_fptr != NULL) {
64168-		return false;
64169-	}
64170-	/*
64171-	 * Try the next symbol first, because 1) when use lazy_lock we have a
64172-	 * wrapper for pthread_create; and 2) application may define its own
64173-	 * wrapper as well (and can call malloc within the wrapper).
64174-	 */
64175-#ifdef JEMALLOC_HAVE_DLSYM
64176-	pthread_create_fptr = dlsym(RTLD_NEXT, "pthread_create");
64177-#else
64178-	pthread_create_fptr = NULL;
64179-#endif
64180-	if (pthread_create_fptr == NULL) {
64181-		if (config_lazy_lock) {
64182-			malloc_write("<jemalloc>: Error in dlsym(RTLD_NEXT, "
64183-			    "\"pthread_create\")\n");
64184-			abort();
64185-		} else {
64186-			/* Fall back to the default symbol. */
64187-			pthread_create_fptr = pthread_create;
64188-		}
64189-	}
64190-
64191-	return false;
64192-}
64193-
64194-/*
64195- * When lazy lock is enabled, we need to make sure setting isthreaded before
64196- * taking any background_thread locks.  This is called early in ctl (instead of
64197- * wait for the pthread_create calls to trigger) because the mutex is required
64198- * before creating background threads.
64199- */
64200-void
64201-background_thread_ctl_init(tsdn_t *tsdn) {
64202-	malloc_mutex_assert_not_owner(tsdn, &background_thread_lock);
64203-#ifdef JEMALLOC_PTHREAD_CREATE_WRAPPER
64204-	pthread_create_fptr_init();
64205-	pthread_create_wrapper_init();
64206-#endif
64207-}
64208-
64209-#endif /* defined(JEMALLOC_BACKGROUND_THREAD) */
64210-
64211-bool
64212-background_thread_boot0(void) {
64213-	if (!have_background_thread && opt_background_thread) {
64214-		malloc_printf("<jemalloc>: option background_thread currently "
64215-		    "supports pthread only\n");
64216-		return true;
64217-	}
64218-#ifdef JEMALLOC_PTHREAD_CREATE_WRAPPER
64219-	if ((config_lazy_lock || opt_background_thread) &&
64220-	    pthread_create_fptr_init()) {
64221-		return true;
64222-	}
64223-#endif
64224-	return false;
64225-}
64226-
64227-bool
64228-background_thread_boot1(tsdn_t *tsdn, base_t *base) {
64229-#ifdef JEMALLOC_BACKGROUND_THREAD
64230-	assert(have_background_thread);
64231-	assert(narenas_total_get() > 0);
64232-
64233-	if (opt_max_background_threads > MAX_BACKGROUND_THREAD_LIMIT) {
64234-		opt_max_background_threads = DEFAULT_NUM_BACKGROUND_THREAD;
64235-	}
64236-	max_background_threads = opt_max_background_threads;
64237-
64238-	background_thread_enabled_set(tsdn, opt_background_thread);
64239-	if (malloc_mutex_init(&background_thread_lock,
64240-	    "background_thread_global",
64241-	    WITNESS_RANK_BACKGROUND_THREAD_GLOBAL,
64242-	    malloc_mutex_rank_exclusive)) {
64243-		return true;
64244-	}
64245-
64246-	background_thread_info = (background_thread_info_t *)base_alloc(tsdn,
64247-	    base, opt_max_background_threads *
64248-	    sizeof(background_thread_info_t), CACHELINE);
64249-	if (background_thread_info == NULL) {
64250-		return true;
64251-	}
64252-
64253-	for (unsigned i = 0; i < max_background_threads; i++) {
64254-		background_thread_info_t *info = &background_thread_info[i];
64255-		/* Thread mutex is rank_inclusive because of thread0. */
64256-		if (malloc_mutex_init(&info->mtx, "background_thread",
64257-		    WITNESS_RANK_BACKGROUND_THREAD,
64258-		    malloc_mutex_address_ordered)) {
64259-			return true;
64260-		}
64261-		if (pthread_cond_init(&info->cond, NULL)) {
64262-			return true;
64263-		}
64264-		malloc_mutex_lock(tsdn, &info->mtx);
64265-		info->state = background_thread_stopped;
64266-		background_thread_info_init(tsdn, info);
64267-		malloc_mutex_unlock(tsdn, &info->mtx);
64268-	}
64269-#endif
64270-
64271-	return false;
64272-}
64273diff --git a/jemalloc/src/base.c b/jemalloc/src/base.c
64274deleted file mode 100644
64275index 7f4d675..0000000
64276--- a/jemalloc/src/base.c
64277+++ /dev/null
64278@@ -1,529 +0,0 @@
64279-#include "jemalloc/internal/jemalloc_preamble.h"
64280-#include "jemalloc/internal/jemalloc_internal_includes.h"
64281-
64282-#include "jemalloc/internal/assert.h"
64283-#include "jemalloc/internal/extent_mmap.h"
64284-#include "jemalloc/internal/mutex.h"
64285-#include "jemalloc/internal/sz.h"
64286-
64287-/*
64288- * In auto mode, arenas switch to huge pages for the base allocator on the
64289- * second base block.  a0 switches to thp on the 5th block (after 20 megabytes
64290- * of metadata), since more metadata (e.g. rtree nodes) come from a0's base.
64291- */
64292-
64293-#define BASE_AUTO_THP_THRESHOLD    2
64294-#define BASE_AUTO_THP_THRESHOLD_A0 5
64295-
64296-/******************************************************************************/
64297-/* Data. */
64298-
64299-static base_t *b0;
64300-
64301-metadata_thp_mode_t opt_metadata_thp = METADATA_THP_DEFAULT;
64302-
64303-const char *metadata_thp_mode_names[] = {
64304-	"disabled",
64305-	"auto",
64306-	"always"
64307-};
64308-
64309-/******************************************************************************/
64310-
64311-static inline bool
64312-metadata_thp_madvise(void) {
64313-	return (metadata_thp_enabled() &&
64314-	    (init_system_thp_mode == thp_mode_default));
64315-}
64316-
64317-static void *
64318-base_map(tsdn_t *tsdn, ehooks_t *ehooks, unsigned ind, size_t size) {
64319-	void *addr;
64320-	bool zero = true;
64321-	bool commit = true;
64322-
64323-	/* Use huge page sizes and alignment regardless of opt_metadata_thp. */
64324-	assert(size == HUGEPAGE_CEILING(size));
64325-	size_t alignment = HUGEPAGE;
64326-	if (ehooks_are_default(ehooks)) {
64327-		addr = extent_alloc_mmap(NULL, size, alignment, &zero, &commit);
64328-		if (have_madvise_huge && addr) {
64329-			pages_set_thp_state(addr, size);
64330-		}
64331-	} else {
64332-		addr = ehooks_alloc(tsdn, ehooks, NULL, size, alignment, &zero,
64333-		    &commit);
64334-	}
64335-
64336-	return addr;
64337-}
64338-
64339-static void
64340-base_unmap(tsdn_t *tsdn, ehooks_t *ehooks, unsigned ind, void *addr,
64341-    size_t size) {
64342-	/*
64343-	 * Cascade through dalloc, decommit, purge_forced, and purge_lazy,
64344-	 * stopping at first success.  This cascade is performed for consistency
64345-	 * with the cascade in extent_dalloc_wrapper() because an application's
64346-	 * custom hooks may not support e.g. dalloc.  This function is only ever
64347-	 * called as a side effect of arena destruction, so although it might
64348-	 * seem pointless to do anything besides dalloc here, the application
64349-	 * may in fact want the end state of all associated virtual memory to be
64350-	 * in some consistent-but-allocated state.
64351-	 */
64352-	if (ehooks_are_default(ehooks)) {
64353-		if (!extent_dalloc_mmap(addr, size)) {
64354-			goto label_done;
64355-		}
64356-		if (!pages_decommit(addr, size)) {
64357-			goto label_done;
64358-		}
64359-		if (!pages_purge_forced(addr, size)) {
64360-			goto label_done;
64361-		}
64362-		if (!pages_purge_lazy(addr, size)) {
64363-			goto label_done;
64364-		}
64365-		/* Nothing worked.  This should never happen. */
64366-		not_reached();
64367-	} else {
64368-		if (!ehooks_dalloc(tsdn, ehooks, addr, size, true)) {
64369-			goto label_done;
64370-		}
64371-		if (!ehooks_decommit(tsdn, ehooks, addr, size, 0, size)) {
64372-			goto label_done;
64373-		}
64374-		if (!ehooks_purge_forced(tsdn, ehooks, addr, size, 0, size)) {
64375-			goto label_done;
64376-		}
64377-		if (!ehooks_purge_lazy(tsdn, ehooks, addr, size, 0, size)) {
64378-			goto label_done;
64379-		}
64380-		/* Nothing worked.  That's the application's problem. */
64381-	}
64382-label_done:
64383-	if (metadata_thp_madvise()) {
64384-		/* Set NOHUGEPAGE after unmap to avoid kernel defrag. */
64385-		assert(((uintptr_t)addr & HUGEPAGE_MASK) == 0 &&
64386-		    (size & HUGEPAGE_MASK) == 0);
64387-		pages_nohuge(addr, size);
64388-	}
64389-}
64390-
64391-static void
64392-base_edata_init(size_t *extent_sn_next, edata_t *edata, void *addr,
64393-    size_t size) {
64394-	size_t sn;
64395-
64396-	sn = *extent_sn_next;
64397-	(*extent_sn_next)++;
64398-
64399-	edata_binit(edata, addr, size, sn);
64400-}
64401-
64402-static size_t
64403-base_get_num_blocks(base_t *base, bool with_new_block) {
64404-	base_block_t *b = base->blocks;
64405-	assert(b != NULL);
64406-
64407-	size_t n_blocks = with_new_block ? 2 : 1;
64408-	while (b->next != NULL) {
64409-		n_blocks++;
64410-		b = b->next;
64411-	}
64412-
64413-	return n_blocks;
64414-}
64415-
64416-static void
64417-base_auto_thp_switch(tsdn_t *tsdn, base_t *base) {
64418-	assert(opt_metadata_thp == metadata_thp_auto);
64419-	malloc_mutex_assert_owner(tsdn, &base->mtx);
64420-	if (base->auto_thp_switched) {
64421-		return;
64422-	}
64423-	/* Called when adding a new block. */
64424-	bool should_switch;
64425-	if (base_ind_get(base) != 0) {
64426-		should_switch = (base_get_num_blocks(base, true) ==
64427-		    BASE_AUTO_THP_THRESHOLD);
64428-	} else {
64429-		should_switch = (base_get_num_blocks(base, true) ==
64430-		    BASE_AUTO_THP_THRESHOLD_A0);
64431-	}
64432-	if (!should_switch) {
64433-		return;
64434-	}
64435-
64436-	base->auto_thp_switched = true;
64437-	assert(!config_stats || base->n_thp == 0);
64438-	/* Make the initial blocks THP lazily. */
64439-	base_block_t *block = base->blocks;
64440-	while (block != NULL) {
64441-		assert((block->size & HUGEPAGE_MASK) == 0);
64442-		pages_huge(block, block->size);
64443-		if (config_stats) {
64444-			base->n_thp += HUGEPAGE_CEILING(block->size -
64445-			    edata_bsize_get(&block->edata)) >> LG_HUGEPAGE;
64446-		}
64447-		block = block->next;
64448-		assert(block == NULL || (base_ind_get(base) == 0));
64449-	}
64450-}
64451-
64452-static void *
64453-base_extent_bump_alloc_helper(edata_t *edata, size_t *gap_size, size_t size,
64454-    size_t alignment) {
64455-	void *ret;
64456-
64457-	assert(alignment == ALIGNMENT_CEILING(alignment, QUANTUM));
64458-	assert(size == ALIGNMENT_CEILING(size, alignment));
64459-
64460-	*gap_size = ALIGNMENT_CEILING((uintptr_t)edata_addr_get(edata),
64461-	    alignment) - (uintptr_t)edata_addr_get(edata);
64462-	ret = (void *)((uintptr_t)edata_addr_get(edata) + *gap_size);
64463-	assert(edata_bsize_get(edata) >= *gap_size + size);
64464-	edata_binit(edata, (void *)((uintptr_t)edata_addr_get(edata) +
64465-	    *gap_size + size), edata_bsize_get(edata) - *gap_size - size,
64466-	    edata_sn_get(edata));
64467-	return ret;
64468-}
64469-
64470-static void
64471-base_extent_bump_alloc_post(base_t *base, edata_t *edata, size_t gap_size,
64472-    void *addr, size_t size) {
64473-	if (edata_bsize_get(edata) > 0) {
64474-		/*
64475-		 * Compute the index for the largest size class that does not
64476-		 * exceed extent's size.
64477-		 */
64478-		szind_t index_floor =
64479-		    sz_size2index(edata_bsize_get(edata) + 1) - 1;
64480-		edata_heap_insert(&base->avail[index_floor], edata);
64481-	}
64482-
64483-	if (config_stats) {
64484-		base->allocated += size;
64485-		/*
64486-		 * Add one PAGE to base_resident for every page boundary that is
64487-		 * crossed by the new allocation. Adjust n_thp similarly when
64488-		 * metadata_thp is enabled.
64489-		 */
64490-		base->resident += PAGE_CEILING((uintptr_t)addr + size) -
64491-		    PAGE_CEILING((uintptr_t)addr - gap_size);
64492-		assert(base->allocated <= base->resident);
64493-		assert(base->resident <= base->mapped);
64494-		if (metadata_thp_madvise() && (opt_metadata_thp ==
64495-		    metadata_thp_always || base->auto_thp_switched)) {
64496-			base->n_thp += (HUGEPAGE_CEILING((uintptr_t)addr + size)
64497-			    - HUGEPAGE_CEILING((uintptr_t)addr - gap_size)) >>
64498-			    LG_HUGEPAGE;
64499-			assert(base->mapped >= base->n_thp << LG_HUGEPAGE);
64500-		}
64501-	}
64502-}
64503-
64504-static void *
64505-base_extent_bump_alloc(base_t *base, edata_t *edata, size_t size,
64506-    size_t alignment) {
64507-	void *ret;
64508-	size_t gap_size;
64509-
64510-	ret = base_extent_bump_alloc_helper(edata, &gap_size, size, alignment);
64511-	base_extent_bump_alloc_post(base, edata, gap_size, ret, size);
64512-	return ret;
64513-}
64514-
64515-/*
64516- * Allocate a block of virtual memory that is large enough to start with a
64517- * base_block_t header, followed by an object of specified size and alignment.
64518- * On success a pointer to the initialized base_block_t header is returned.
64519- */
64520-static base_block_t *
64521-base_block_alloc(tsdn_t *tsdn, base_t *base, ehooks_t *ehooks, unsigned ind,
64522-    pszind_t *pind_last, size_t *extent_sn_next, size_t size,
64523-    size_t alignment) {
64524-	alignment = ALIGNMENT_CEILING(alignment, QUANTUM);
64525-	size_t usize = ALIGNMENT_CEILING(size, alignment);
64526-	size_t header_size = sizeof(base_block_t);
64527-	size_t gap_size = ALIGNMENT_CEILING(header_size, alignment) -
64528-	    header_size;
64529-	/*
64530-	 * Create increasingly larger blocks in order to limit the total number
64531-	 * of disjoint virtual memory ranges.  Choose the next size in the page
64532-	 * size class series (skipping size classes that are not a multiple of
64533-	 * HUGEPAGE), or a size large enough to satisfy the requested size and
64534-	 * alignment, whichever is larger.
64535-	 */
64536-	size_t min_block_size = HUGEPAGE_CEILING(sz_psz2u(header_size + gap_size
64537-	    + usize));
64538-	pszind_t pind_next = (*pind_last + 1 < sz_psz2ind(SC_LARGE_MAXCLASS)) ?
64539-	    *pind_last + 1 : *pind_last;
64540-	size_t next_block_size = HUGEPAGE_CEILING(sz_pind2sz(pind_next));
64541-	size_t block_size = (min_block_size > next_block_size) ? min_block_size
64542-	    : next_block_size;
64543-	base_block_t *block = (base_block_t *)base_map(tsdn, ehooks, ind,
64544-	    block_size);
64545-	if (block == NULL) {
64546-		return NULL;
64547-	}
64548-
64549-	if (metadata_thp_madvise()) {
64550-		void *addr = (void *)block;
64551-		assert(((uintptr_t)addr & HUGEPAGE_MASK) == 0 &&
64552-		    (block_size & HUGEPAGE_MASK) == 0);
64553-		if (opt_metadata_thp == metadata_thp_always) {
64554-			pages_huge(addr, block_size);
64555-		} else if (opt_metadata_thp == metadata_thp_auto &&
64556-		    base != NULL) {
64557-			/* base != NULL indicates this is not a new base. */
64558-			malloc_mutex_lock(tsdn, &base->mtx);
64559-			base_auto_thp_switch(tsdn, base);
64560-			if (base->auto_thp_switched) {
64561-				pages_huge(addr, block_size);
64562-			}
64563-			malloc_mutex_unlock(tsdn, &base->mtx);
64564-		}
64565-	}
64566-
64567-	*pind_last = sz_psz2ind(block_size);
64568-	block->size = block_size;
64569-	block->next = NULL;
64570-	assert(block_size >= header_size);
64571-	base_edata_init(extent_sn_next, &block->edata,
64572-	    (void *)((uintptr_t)block + header_size), block_size - header_size);
64573-	return block;
64574-}
64575-
64576-/*
64577- * Allocate an extent that is at least as large as specified size, with
64578- * specified alignment.
64579- */
64580-static edata_t *
64581-base_extent_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment) {
64582-	malloc_mutex_assert_owner(tsdn, &base->mtx);
64583-
64584-	ehooks_t *ehooks = base_ehooks_get_for_metadata(base);
64585-	/*
64586-	 * Drop mutex during base_block_alloc(), because an extent hook will be
64587-	 * called.
64588-	 */
64589-	malloc_mutex_unlock(tsdn, &base->mtx);
64590-	base_block_t *block = base_block_alloc(tsdn, base, ehooks,
64591-	    base_ind_get(base), &base->pind_last, &base->extent_sn_next, size,
64592-	    alignment);
64593-	malloc_mutex_lock(tsdn, &base->mtx);
64594-	if (block == NULL) {
64595-		return NULL;
64596-	}
64597-	block->next = base->blocks;
64598-	base->blocks = block;
64599-	if (config_stats) {
64600-		base->allocated += sizeof(base_block_t);
64601-		base->resident += PAGE_CEILING(sizeof(base_block_t));
64602-		base->mapped += block->size;
64603-		if (metadata_thp_madvise() &&
64604-		    !(opt_metadata_thp == metadata_thp_auto
64605-		      && !base->auto_thp_switched)) {
64606-			assert(base->n_thp > 0);
64607-			base->n_thp += HUGEPAGE_CEILING(sizeof(base_block_t)) >>
64608-			    LG_HUGEPAGE;
64609-		}
64610-		assert(base->allocated <= base->resident);
64611-		assert(base->resident <= base->mapped);
64612-		assert(base->n_thp << LG_HUGEPAGE <= base->mapped);
64613-	}
64614-	return &block->edata;
64615-}
64616-
64617-base_t *
64618-b0get(void) {
64619-	return b0;
64620-}
64621-
64622-base_t *
64623-base_new(tsdn_t *tsdn, unsigned ind, const extent_hooks_t *extent_hooks,
64624-    bool metadata_use_hooks) {
64625-	pszind_t pind_last = 0;
64626-	size_t extent_sn_next = 0;
64627-
64628-	/*
64629-	 * The base will contain the ehooks eventually, but it itself is
64630-	 * allocated using them.  So we use some stack ehooks to bootstrap its
64631-	 * memory, and then initialize the ehooks within the base_t.
64632-	 */
64633-	ehooks_t fake_ehooks;
64634-	ehooks_init(&fake_ehooks, metadata_use_hooks ?
64635-	    (extent_hooks_t *)extent_hooks :
64636-	    (extent_hooks_t *)&ehooks_default_extent_hooks, ind);
64637-
64638-	base_block_t *block = base_block_alloc(tsdn, NULL, &fake_ehooks, ind,
64639-	    &pind_last, &extent_sn_next, sizeof(base_t), QUANTUM);
64640-	if (block == NULL) {
64641-		return NULL;
64642-	}
64643-
64644-	size_t gap_size;
64645-	size_t base_alignment = CACHELINE;
64646-	size_t base_size = ALIGNMENT_CEILING(sizeof(base_t), base_alignment);
64647-	base_t *base = (base_t *)base_extent_bump_alloc_helper(&block->edata,
64648-	    &gap_size, base_size, base_alignment);
64649-	ehooks_init(&base->ehooks, (extent_hooks_t *)extent_hooks, ind);
64650-	ehooks_init(&base->ehooks_base, metadata_use_hooks ?
64651-	    (extent_hooks_t *)extent_hooks :
64652-	    (extent_hooks_t *)&ehooks_default_extent_hooks, ind);
64653-	if (malloc_mutex_init(&base->mtx, "base", WITNESS_RANK_BASE,
64654-	    malloc_mutex_rank_exclusive)) {
64655-		base_unmap(tsdn, &fake_ehooks, ind, block, block->size);
64656-		return NULL;
64657-	}
64658-	base->pind_last = pind_last;
64659-	base->extent_sn_next = extent_sn_next;
64660-	base->blocks = block;
64661-	base->auto_thp_switched = false;
64662-	for (szind_t i = 0; i < SC_NSIZES; i++) {
64663-		edata_heap_new(&base->avail[i]);
64664-	}
64665-	if (config_stats) {
64666-		base->allocated = sizeof(base_block_t);
64667-		base->resident = PAGE_CEILING(sizeof(base_block_t));
64668-		base->mapped = block->size;
64669-		base->n_thp = (opt_metadata_thp == metadata_thp_always) &&
64670-		    metadata_thp_madvise() ? HUGEPAGE_CEILING(sizeof(base_block_t))
64671-		    >> LG_HUGEPAGE : 0;
64672-		assert(base->allocated <= base->resident);
64673-		assert(base->resident <= base->mapped);
64674-		assert(base->n_thp << LG_HUGEPAGE <= base->mapped);
64675-	}
64676-	base_extent_bump_alloc_post(base, &block->edata, gap_size, base,
64677-	    base_size);
64678-
64679-	return base;
64680-}
64681-
64682-void
64683-base_delete(tsdn_t *tsdn, base_t *base) {
64684-	ehooks_t *ehooks = base_ehooks_get_for_metadata(base);
64685-	base_block_t *next = base->blocks;
64686-	do {
64687-		base_block_t *block = next;
64688-		next = block->next;
64689-		base_unmap(tsdn, ehooks, base_ind_get(base), block,
64690-		    block->size);
64691-	} while (next != NULL);
64692-}
64693-
64694-ehooks_t *
64695-base_ehooks_get(base_t *base) {
64696-	return &base->ehooks;
64697-}
64698-
64699-ehooks_t *
64700-base_ehooks_get_for_metadata(base_t *base) {
64701-	return &base->ehooks_base;
64702-}
64703-
64704-extent_hooks_t *
64705-base_extent_hooks_set(base_t *base, extent_hooks_t *extent_hooks) {
64706-	extent_hooks_t *old_extent_hooks =
64707-	    ehooks_get_extent_hooks_ptr(&base->ehooks);
64708-	ehooks_init(&base->ehooks, extent_hooks, ehooks_ind_get(&base->ehooks));
64709-	return old_extent_hooks;
64710-}
64711-
64712-static void *
64713-base_alloc_impl(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment,
64714-    size_t *esn) {
64715-	alignment = QUANTUM_CEILING(alignment);
64716-	size_t usize = ALIGNMENT_CEILING(size, alignment);
64717-	size_t asize = usize + alignment - QUANTUM;
64718-
64719-	edata_t *edata = NULL;
64720-	malloc_mutex_lock(tsdn, &base->mtx);
64721-	for (szind_t i = sz_size2index(asize); i < SC_NSIZES; i++) {
64722-		edata = edata_heap_remove_first(&base->avail[i]);
64723-		if (edata != NULL) {
64724-			/* Use existing space. */
64725-			break;
64726-		}
64727-	}
64728-	if (edata == NULL) {
64729-		/* Try to allocate more space. */
64730-		edata = base_extent_alloc(tsdn, base, usize, alignment);
64731-	}
64732-	void *ret;
64733-	if (edata == NULL) {
64734-		ret = NULL;
64735-		goto label_return;
64736-	}
64737-
64738-	ret = base_extent_bump_alloc(base, edata, usize, alignment);
64739-	if (esn != NULL) {
64740-		*esn = (size_t)edata_sn_get(edata);
64741-	}
64742-label_return:
64743-	malloc_mutex_unlock(tsdn, &base->mtx);
64744-	return ret;
64745-}
64746-
64747-/*
64748- * base_alloc() returns zeroed memory, which is always demand-zeroed for the
64749- * auto arenas, in order to make multi-page sparse data structures such as radix
64750- * tree nodes efficient with respect to physical memory usage.  Upon success a
64751- * pointer to at least size bytes with specified alignment is returned.  Note
64752- * that size is rounded up to the nearest multiple of alignment to avoid false
64753- * sharing.
64754- */
64755-void *
64756-base_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment) {
64757-	return base_alloc_impl(tsdn, base, size, alignment, NULL);
64758-}
64759-
64760-edata_t *
64761-base_alloc_edata(tsdn_t *tsdn, base_t *base) {
64762-	size_t esn;
64763-	edata_t *edata = base_alloc_impl(tsdn, base, sizeof(edata_t),
64764-	    EDATA_ALIGNMENT, &esn);
64765-	if (edata == NULL) {
64766-		return NULL;
64767-	}
64768-	edata_esn_set(edata, esn);
64769-	return edata;
64770-}
64771-
64772-void
64773-base_stats_get(tsdn_t *tsdn, base_t *base, size_t *allocated, size_t *resident,
64774-    size_t *mapped, size_t *n_thp) {
64775-	cassert(config_stats);
64776-
64777-	malloc_mutex_lock(tsdn, &base->mtx);
64778-	assert(base->allocated <= base->resident);
64779-	assert(base->resident <= base->mapped);
64780-	*allocated = base->allocated;
64781-	*resident = base->resident;
64782-	*mapped = base->mapped;
64783-	*n_thp = base->n_thp;
64784-	malloc_mutex_unlock(tsdn, &base->mtx);
64785-}
64786-
64787-void
64788-base_prefork(tsdn_t *tsdn, base_t *base) {
64789-	malloc_mutex_prefork(tsdn, &base->mtx);
64790-}
64791-
64792-void
64793-base_postfork_parent(tsdn_t *tsdn, base_t *base) {
64794-	malloc_mutex_postfork_parent(tsdn, &base->mtx);
64795-}
64796-
64797-void
64798-base_postfork_child(tsdn_t *tsdn, base_t *base) {
64799-	malloc_mutex_postfork_child(tsdn, &base->mtx);
64800-}
64801-
64802-bool
64803-base_boot(tsdn_t *tsdn) {
64804-	b0 = base_new(tsdn, 0, (extent_hooks_t *)&ehooks_default_extent_hooks,
64805-	    /* metadata_use_hooks */ true);
64806-	return (b0 == NULL);
64807-}
64808diff --git a/jemalloc/src/bin.c b/jemalloc/src/bin.c
64809deleted file mode 100644
64810index fa20458..0000000
64811--- a/jemalloc/src/bin.c
64812+++ /dev/null
64813@@ -1,69 +0,0 @@
64814-#include "jemalloc/internal/jemalloc_preamble.h"
64815-#include "jemalloc/internal/jemalloc_internal_includes.h"
64816-
64817-#include "jemalloc/internal/assert.h"
64818-#include "jemalloc/internal/bin.h"
64819-#include "jemalloc/internal/sc.h"
64820-#include "jemalloc/internal/witness.h"
64821-
64822-bool
64823-bin_update_shard_size(unsigned bin_shard_sizes[SC_NBINS], size_t start_size,
64824-    size_t end_size, size_t nshards) {
64825-	if (nshards > BIN_SHARDS_MAX || nshards == 0) {
64826-		return true;
64827-	}
64828-
64829-	if (start_size > SC_SMALL_MAXCLASS) {
64830-		return false;
64831-	}
64832-	if (end_size > SC_SMALL_MAXCLASS) {
64833-		end_size = SC_SMALL_MAXCLASS;
64834-	}
64835-
64836-	/* Compute the index since this may happen before sz init. */
64837-	szind_t ind1 = sz_size2index_compute(start_size);
64838-	szind_t ind2 = sz_size2index_compute(end_size);
64839-	for (unsigned i = ind1; i <= ind2; i++) {
64840-		bin_shard_sizes[i] = (unsigned)nshards;
64841-	}
64842-
64843-	return false;
64844-}
64845-
64846-void
64847-bin_shard_sizes_boot(unsigned bin_shard_sizes[SC_NBINS]) {
64848-	/* Load the default number of shards. */
64849-	for (unsigned i = 0; i < SC_NBINS; i++) {
64850-		bin_shard_sizes[i] = N_BIN_SHARDS_DEFAULT;
64851-	}
64852-}
64853-
64854-bool
64855-bin_init(bin_t *bin) {
64856-	if (malloc_mutex_init(&bin->lock, "bin", WITNESS_RANK_BIN,
64857-	    malloc_mutex_rank_exclusive)) {
64858-		return true;
64859-	}
64860-	bin->slabcur = NULL;
64861-	edata_heap_new(&bin->slabs_nonfull);
64862-	edata_list_active_init(&bin->slabs_full);
64863-	if (config_stats) {
64864-		memset(&bin->stats, 0, sizeof(bin_stats_t));
64865-	}
64866-	return false;
64867-}
64868-
64869-void
64870-bin_prefork(tsdn_t *tsdn, bin_t *bin) {
64871-	malloc_mutex_prefork(tsdn, &bin->lock);
64872-}
64873-
64874-void
64875-bin_postfork_parent(tsdn_t *tsdn, bin_t *bin) {
64876-	malloc_mutex_postfork_parent(tsdn, &bin->lock);
64877-}
64878-
64879-void
64880-bin_postfork_child(tsdn_t *tsdn, bin_t *bin) {
64881-	malloc_mutex_postfork_child(tsdn, &bin->lock);
64882-}
64883diff --git a/jemalloc/src/bin_info.c b/jemalloc/src/bin_info.c
64884deleted file mode 100644
64885index 8629ef8..0000000
64886--- a/jemalloc/src/bin_info.c
64887+++ /dev/null
64888@@ -1,30 +0,0 @@
64889-#include "jemalloc/internal/jemalloc_preamble.h"
64890-#include "jemalloc/internal/jemalloc_internal_includes.h"
64891-
64892-#include "jemalloc/internal/bin_info.h"
64893-
64894-bin_info_t bin_infos[SC_NBINS];
64895-
64896-static void
64897-bin_infos_init(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS],
64898-    bin_info_t infos[SC_NBINS]) {
64899-	for (unsigned i = 0; i < SC_NBINS; i++) {
64900-		bin_info_t *bin_info = &infos[i];
64901-		sc_t *sc = &sc_data->sc[i];
64902-		bin_info->reg_size = ((size_t)1U << sc->lg_base)
64903-		    + ((size_t)sc->ndelta << sc->lg_delta);
64904-		bin_info->slab_size = (sc->pgs << LG_PAGE);
64905-		bin_info->nregs =
64906-		    (uint32_t)(bin_info->slab_size / bin_info->reg_size);
64907-		bin_info->n_shards = bin_shard_sizes[i];
64908-		bitmap_info_t bitmap_info = BITMAP_INFO_INITIALIZER(
64909-		    bin_info->nregs);
64910-		bin_info->bitmap_info = bitmap_info;
64911-	}
64912-}
64913-
64914-void
64915-bin_info_boot(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS]) {
64916-	assert(sc_data->initialized);
64917-	bin_infos_init(sc_data, bin_shard_sizes, bin_infos);
64918-}
64919diff --git a/jemalloc/src/bitmap.c b/jemalloc/src/bitmap.c
64920deleted file mode 100644
64921index 0ccedc5..0000000
64922--- a/jemalloc/src/bitmap.c
64923+++ /dev/null
64924@@ -1,120 +0,0 @@
64925-#include "jemalloc/internal/jemalloc_preamble.h"
64926-#include "jemalloc/internal/jemalloc_internal_includes.h"
64927-
64928-#include "jemalloc/internal/assert.h"
64929-
64930-/******************************************************************************/
64931-
64932-#ifdef BITMAP_USE_TREE
64933-
64934-void
64935-bitmap_info_init(bitmap_info_t *binfo, size_t nbits) {
64936-	unsigned i;
64937-	size_t group_count;
64938-
64939-	assert(nbits > 0);
64940-	assert(nbits <= (ZU(1) << LG_BITMAP_MAXBITS));
64941-
64942-	/*
64943-	 * Compute the number of groups necessary to store nbits bits, and
64944-	 * progressively work upward through the levels until reaching a level
64945-	 * that requires only one group.
64946-	 */
64947-	binfo->levels[0].group_offset = 0;
64948-	group_count = BITMAP_BITS2GROUPS(nbits);
64949-	for (i = 1; group_count > 1; i++) {
64950-		assert(i < BITMAP_MAX_LEVELS);
64951-		binfo->levels[i].group_offset = binfo->levels[i-1].group_offset
64952-		    + group_count;
64953-		group_count = BITMAP_BITS2GROUPS(group_count);
64954-	}
64955-	binfo->levels[i].group_offset = binfo->levels[i-1].group_offset
64956-	    + group_count;
64957-	assert(binfo->levels[i].group_offset <= BITMAP_GROUPS_MAX);
64958-	binfo->nlevels = i;
64959-	binfo->nbits = nbits;
64960-}
64961-
64962-static size_t
64963-bitmap_info_ngroups(const bitmap_info_t *binfo) {
64964-	return binfo->levels[binfo->nlevels].group_offset;
64965-}
64966-
64967-void
64968-bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo, bool fill) {
64969-	size_t extra;
64970-	unsigned i;
64971-
64972-	/*
64973-	 * Bits are actually inverted with regard to the external bitmap
64974-	 * interface.
64975-	 */
64976-
64977-	if (fill) {
64978-		/* The "filled" bitmap starts out with all 0 bits. */
64979-		memset(bitmap, 0, bitmap_size(binfo));
64980-		return;
64981-	}
64982-
64983-	/*
64984-	 * The "empty" bitmap starts out with all 1 bits, except for trailing
64985-	 * unused bits (if any).  Note that each group uses bit 0 to correspond
64986-	 * to the first logical bit in the group, so extra bits are the most
64987-	 * significant bits of the last group.
64988-	 */
64989-	memset(bitmap, 0xffU, bitmap_size(binfo));
64990-	extra = (BITMAP_GROUP_NBITS - (binfo->nbits & BITMAP_GROUP_NBITS_MASK))
64991-	    & BITMAP_GROUP_NBITS_MASK;
64992-	if (extra != 0) {
64993-		bitmap[binfo->levels[1].group_offset - 1] >>= extra;
64994-	}
64995-	for (i = 1; i < binfo->nlevels; i++) {
64996-		size_t group_count = binfo->levels[i].group_offset -
64997-		    binfo->levels[i-1].group_offset;
64998-		extra = (BITMAP_GROUP_NBITS - (group_count &
64999-		    BITMAP_GROUP_NBITS_MASK)) & BITMAP_GROUP_NBITS_MASK;
65000-		if (extra != 0) {
65001-			bitmap[binfo->levels[i+1].group_offset - 1] >>= extra;
65002-		}
65003-	}
65004-}
65005-
65006-#else /* BITMAP_USE_TREE */
65007-
65008-void
65009-bitmap_info_init(bitmap_info_t *binfo, size_t nbits) {
65010-	assert(nbits > 0);
65011-	assert(nbits <= (ZU(1) << LG_BITMAP_MAXBITS));
65012-
65013-	binfo->ngroups = BITMAP_BITS2GROUPS(nbits);
65014-	binfo->nbits = nbits;
65015-}
65016-
65017-static size_t
65018-bitmap_info_ngroups(const bitmap_info_t *binfo) {
65019-	return binfo->ngroups;
65020-}
65021-
65022-void
65023-bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo, bool fill) {
65024-	size_t extra;
65025-
65026-	if (fill) {
65027-		memset(bitmap, 0, bitmap_size(binfo));
65028-		return;
65029-	}
65030-
65031-	memset(bitmap, 0xffU, bitmap_size(binfo));
65032-	extra = (BITMAP_GROUP_NBITS - (binfo->nbits & BITMAP_GROUP_NBITS_MASK))
65033-	    & BITMAP_GROUP_NBITS_MASK;
65034-	if (extra != 0) {
65035-		bitmap[binfo->ngroups - 1] >>= extra;
65036-	}
65037-}
65038-
65039-#endif /* BITMAP_USE_TREE */
65040-
65041-size_t
65042-bitmap_size(const bitmap_info_t *binfo) {
65043-	return (bitmap_info_ngroups(binfo) << LG_SIZEOF_BITMAP);
65044-}
65045diff --git a/jemalloc/src/buf_writer.c b/jemalloc/src/buf_writer.c
65046deleted file mode 100644
65047index 7c6f794..0000000
65048--- a/jemalloc/src/buf_writer.c
65049+++ /dev/null
65050@@ -1,144 +0,0 @@
65051-#include "jemalloc/internal/jemalloc_preamble.h"
65052-#include "jemalloc/internal/jemalloc_internal_includes.h"
65053-
65054-#include "jemalloc/internal/buf_writer.h"
65055-#include "jemalloc/internal/malloc_io.h"
65056-
65057-static void *
65058-buf_writer_allocate_internal_buf(tsdn_t *tsdn, size_t buf_len) {
65059-#ifdef JEMALLOC_JET
65060-	if (buf_len > SC_LARGE_MAXCLASS) {
65061-		return NULL;
65062-	}
65063-#else
65064-	assert(buf_len <= SC_LARGE_MAXCLASS);
65065-#endif
65066-	return iallocztm(tsdn, buf_len, sz_size2index(buf_len), false, NULL,
65067-	    true, arena_get(tsdn, 0, false), true);
65068-}
65069-
65070-static void
65071-buf_writer_free_internal_buf(tsdn_t *tsdn, void *buf) {
65072-	if (buf != NULL) {
65073-		idalloctm(tsdn, buf, NULL, NULL, true, true);
65074-	}
65075-}
65076-
65077-static void
65078-buf_writer_assert(buf_writer_t *buf_writer) {
65079-	assert(buf_writer != NULL);
65080-	assert(buf_writer->write_cb != NULL);
65081-	if (buf_writer->buf != NULL) {
65082-		assert(buf_writer->buf_size > 0);
65083-	} else {
65084-		assert(buf_writer->buf_size == 0);
65085-		assert(buf_writer->internal_buf);
65086-	}
65087-	assert(buf_writer->buf_end <= buf_writer->buf_size);
65088-}
65089-
65090-bool
65091-buf_writer_init(tsdn_t *tsdn, buf_writer_t *buf_writer, write_cb_t *write_cb,
65092-    void *cbopaque, char *buf, size_t buf_len) {
65093-	if (write_cb != NULL) {
65094-		buf_writer->write_cb = write_cb;
65095-	} else {
65096-		buf_writer->write_cb = je_malloc_message != NULL ?
65097-		    je_malloc_message : wrtmessage;
65098-	}
65099-	buf_writer->cbopaque = cbopaque;
65100-	assert(buf_len >= 2);
65101-	if (buf != NULL) {
65102-		buf_writer->buf = buf;
65103-		buf_writer->internal_buf = false;
65104-	} else {
65105-		buf_writer->buf = buf_writer_allocate_internal_buf(tsdn,
65106-		    buf_len);
65107-		buf_writer->internal_buf = true;
65108-	}
65109-	if (buf_writer->buf != NULL) {
65110-		buf_writer->buf_size = buf_len - 1; /* Allowing for '\0'. */
65111-	} else {
65112-		buf_writer->buf_size = 0;
65113-	}
65114-	buf_writer->buf_end = 0;
65115-	buf_writer_assert(buf_writer);
65116-	return buf_writer->buf == NULL;
65117-}
65118-
65119-void
65120-buf_writer_flush(buf_writer_t *buf_writer) {
65121-	buf_writer_assert(buf_writer);
65122-	if (buf_writer->buf == NULL) {
65123-		return;
65124-	}
65125-	buf_writer->buf[buf_writer->buf_end] = '\0';
65126-	buf_writer->write_cb(buf_writer->cbopaque, buf_writer->buf);
65127-	buf_writer->buf_end = 0;
65128-	buf_writer_assert(buf_writer);
65129-}
65130-
65131-void
65132-buf_writer_cb(void *buf_writer_arg, const char *s) {
65133-	buf_writer_t *buf_writer = (buf_writer_t *)buf_writer_arg;
65134-	buf_writer_assert(buf_writer);
65135-	if (buf_writer->buf == NULL) {
65136-		buf_writer->write_cb(buf_writer->cbopaque, s);
65137-		return;
65138-	}
65139-	size_t i, slen, n;
65140-	for (i = 0, slen = strlen(s); i < slen; i += n) {
65141-		if (buf_writer->buf_end == buf_writer->buf_size) {
65142-			buf_writer_flush(buf_writer);
65143-		}
65144-		size_t s_remain = slen - i;
65145-		size_t buf_remain = buf_writer->buf_size - buf_writer->buf_end;
65146-		n = s_remain < buf_remain ? s_remain : buf_remain;
65147-		memcpy(buf_writer->buf + buf_writer->buf_end, s + i, n);
65148-		buf_writer->buf_end += n;
65149-		buf_writer_assert(buf_writer);
65150-	}
65151-	assert(i == slen);
65152-}
65153-
65154-void
65155-buf_writer_terminate(tsdn_t *tsdn, buf_writer_t *buf_writer) {
65156-	buf_writer_assert(buf_writer);
65157-	buf_writer_flush(buf_writer);
65158-	if (buf_writer->internal_buf) {
65159-		buf_writer_free_internal_buf(tsdn, buf_writer->buf);
65160-	}
65161-}
65162-
65163-void
65164-buf_writer_pipe(buf_writer_t *buf_writer, read_cb_t *read_cb,
65165-    void *read_cbopaque) {
65166-	/*
65167-	 * A tiny local buffer in case the buffered writer failed to allocate
65168-	 * at init.
65169-	 */
65170-	static char backup_buf[16];
65171-	static buf_writer_t backup_buf_writer;
65172-
65173-	buf_writer_assert(buf_writer);
65174-	assert(read_cb != NULL);
65175-	if (buf_writer->buf == NULL) {
65176-		buf_writer_init(TSDN_NULL, &backup_buf_writer,
65177-		    buf_writer->write_cb, buf_writer->cbopaque, backup_buf,
65178-		    sizeof(backup_buf));
65179-		buf_writer = &backup_buf_writer;
65180-	}
65181-	assert(buf_writer->buf != NULL);
65182-	ssize_t nread = 0;
65183-	do {
65184-		buf_writer->buf_end += nread;
65185-		buf_writer_assert(buf_writer);
65186-		if (buf_writer->buf_end == buf_writer->buf_size) {
65187-			buf_writer_flush(buf_writer);
65188-		}
65189-		nread = read_cb(read_cbopaque,
65190-		    buf_writer->buf + buf_writer->buf_end,
65191-		    buf_writer->buf_size - buf_writer->buf_end);
65192-	} while (nread > 0);
65193-	buf_writer_flush(buf_writer);
65194-}
65195diff --git a/jemalloc/src/cache_bin.c b/jemalloc/src/cache_bin.c
65196deleted file mode 100644
65197index 9ae072a..0000000
65198--- a/jemalloc/src/cache_bin.c
65199+++ /dev/null
65200@@ -1,99 +0,0 @@
65201-#include "jemalloc/internal/jemalloc_preamble.h"
65202-#include "jemalloc/internal/jemalloc_internal_includes.h"
65203-
65204-#include "jemalloc/internal/bit_util.h"
65205-#include "jemalloc/internal/cache_bin.h"
65206-#include "jemalloc/internal/safety_check.h"
65207-
65208-void
65209-cache_bin_info_init(cache_bin_info_t *info,
65210-    cache_bin_sz_t ncached_max) {
65211-	assert(ncached_max <= CACHE_BIN_NCACHED_MAX);
65212-	size_t stack_size = (size_t)ncached_max * sizeof(void *);
65213-	assert(stack_size < ((size_t)1 << (sizeof(cache_bin_sz_t) * 8)));
65214-	info->ncached_max = (cache_bin_sz_t)ncached_max;
65215-}
65216-
65217-void
65218-cache_bin_info_compute_alloc(cache_bin_info_t *infos, szind_t ninfos,
65219-    size_t *size, size_t *alignment) {
65220-	/* For the total bin stack region (per tcache), reserve 2 more slots so
65221-	 * that
65222-	 * 1) the empty position can be safely read on the fast path before
65223-	 *    checking "is_empty"; and
65224-	 * 2) the cur_ptr can go beyond the empty position by 1 step safely on
65225-	 * the fast path (i.e. no overflow).
65226-	 */
65227-	*size = sizeof(void *) * 2;
65228-	for (szind_t i = 0; i < ninfos; i++) {
65229-		assert(infos[i].ncached_max > 0);
65230-		*size += infos[i].ncached_max * sizeof(void *);
65231-	}
65232-
65233-	/*
65234-	 * Align to at least PAGE, to minimize the # of TLBs needed by the
65235-	 * smaller sizes; also helps if the larger sizes don't get used at all.
65236-	 */
65237-	*alignment = PAGE;
65238-}
65239-
65240-void
65241-cache_bin_preincrement(cache_bin_info_t *infos, szind_t ninfos, void *alloc,
65242-    size_t *cur_offset) {
65243-	if (config_debug) {
65244-		size_t computed_size;
65245-		size_t computed_alignment;
65246-
65247-		/* Pointer should be as aligned as we asked for. */
65248-		cache_bin_info_compute_alloc(infos, ninfos, &computed_size,
65249-		    &computed_alignment);
65250-		assert(((uintptr_t)alloc & (computed_alignment - 1)) == 0);
65251-	}
65252-
65253-	*(uintptr_t *)((uintptr_t)alloc + *cur_offset) =
65254-	    cache_bin_preceding_junk;
65255-	*cur_offset += sizeof(void *);
65256-}
65257-
65258-void
65259-cache_bin_postincrement(cache_bin_info_t *infos, szind_t ninfos, void *alloc,
65260-    size_t *cur_offset) {
65261-	*(uintptr_t *)((uintptr_t)alloc + *cur_offset) =
65262-	    cache_bin_trailing_junk;
65263-	*cur_offset += sizeof(void *);
65264-}
65265-
65266-void
65267-cache_bin_init(cache_bin_t *bin, cache_bin_info_t *info, void *alloc,
65268-    size_t *cur_offset) {
65269-	/*
65270-	 * The full_position points to the lowest available space.  Allocations
65271-	 * will access the slots toward higher addresses (for the benefit of
65272-	 * adjacent prefetch).
65273-	 */
65274-	void *stack_cur = (void *)((uintptr_t)alloc + *cur_offset);
65275-	void *full_position = stack_cur;
65276-	uint16_t bin_stack_size = info->ncached_max * sizeof(void *);
65277-
65278-	*cur_offset += bin_stack_size;
65279-	void *empty_position = (void *)((uintptr_t)alloc + *cur_offset);
65280-
65281-	/* Init to the empty position. */
65282-	bin->stack_head = (void **)empty_position;
65283-	bin->low_bits_low_water = (uint16_t)(uintptr_t)bin->stack_head;
65284-	bin->low_bits_full = (uint16_t)(uintptr_t)full_position;
65285-	bin->low_bits_empty = (uint16_t)(uintptr_t)empty_position;
65286-	cache_bin_sz_t free_spots = cache_bin_diff(bin,
65287-	    bin->low_bits_full, (uint16_t)(uintptr_t)bin->stack_head,
65288-	    /* racy */ false);
65289-	assert(free_spots == bin_stack_size);
65290-	assert(cache_bin_ncached_get_local(bin, info) == 0);
65291-	assert(cache_bin_empty_position_get(bin) == empty_position);
65292-
65293-	assert(bin_stack_size > 0 || empty_position == full_position);
65294-}
65295-
65296-bool
65297-cache_bin_still_zero_initialized(cache_bin_t *bin) {
65298-	return bin->stack_head == NULL;
65299-}
65300diff --git a/jemalloc/src/ckh.c b/jemalloc/src/ckh.c
65301deleted file mode 100644
65302index 8db4319..0000000
65303--- a/jemalloc/src/ckh.c
65304+++ /dev/null
65305@@ -1,569 +0,0 @@
65306-/*
65307- *******************************************************************************
65308- * Implementation of (2^1+,2) cuckoo hashing, where 2^1+ indicates that each
65309- * hash bucket contains 2^n cells, for n >= 1, and 2 indicates that two hash
65310- * functions are employed.  The original cuckoo hashing algorithm was described
65311- * in:
65312- *
65313- *   Pagh, R., F.F. Rodler (2004) Cuckoo Hashing.  Journal of Algorithms
65314- *     51(2):122-144.
65315- *
65316- * Generalization of cuckoo hashing was discussed in:
65317- *
65318- *   Erlingsson, U., M. Manasse, F. McSherry (2006) A cool and practical
65319- *     alternative to traditional hash tables.  In Proceedings of the 7th
65320- *     Workshop on Distributed Data and Structures (WDAS'06), Santa Clara, CA,
65321- *     January 2006.
65322- *
65323- * This implementation uses precisely two hash functions because that is the
65324- * fewest that can work, and supporting multiple hashes is an implementation
65325- * burden.  Here is a reproduction of Figure 1 from Erlingsson et al. (2006)
65326- * that shows approximate expected maximum load factors for various
65327- * configurations:
65328- *
65329- *           |         #cells/bucket         |
65330- *   #hashes |   1   |   2   |   4   |   8   |
65331- *   --------+-------+-------+-------+-------+
65332- *         1 | 0.006 | 0.006 | 0.03  | 0.12  |
65333- *         2 | 0.49  | 0.86  |>0.93< |>0.96< |
65334- *         3 | 0.91  | 0.97  | 0.98  | 0.999 |
65335- *         4 | 0.97  | 0.99  | 0.999 |       |
65336- *
65337- * The number of cells per bucket is chosen such that a bucket fits in one cache
65338- * line.  So, on 32- and 64-bit systems, we use (8,2) and (4,2) cuckoo hashing,
65339- * respectively.
65340- *
65341- ******************************************************************************/
65342-#include "jemalloc/internal/jemalloc_preamble.h"
65343-
65344-#include "jemalloc/internal/ckh.h"
65345-
65346-#include "jemalloc/internal/jemalloc_internal_includes.h"
65347-
65348-#include "jemalloc/internal/assert.h"
65349-#include "jemalloc/internal/hash.h"
65350-#include "jemalloc/internal/malloc_io.h"
65351-#include "jemalloc/internal/prng.h"
65352-#include "jemalloc/internal/util.h"
65353-
65354-/******************************************************************************/
65355-/* Function prototypes for non-inline static functions. */
65356-
65357-static bool	ckh_grow(tsd_t *tsd, ckh_t *ckh);
65358-static void	ckh_shrink(tsd_t *tsd, ckh_t *ckh);
65359-
65360-/******************************************************************************/
65361-
65362-/*
65363- * Search bucket for key and return the cell number if found; SIZE_T_MAX
65364- * otherwise.
65365- */
65366-static size_t
65367-ckh_bucket_search(ckh_t *ckh, size_t bucket, const void *key) {
65368-	ckhc_t *cell;
65369-	unsigned i;
65370-
65371-	for (i = 0; i < (ZU(1) << LG_CKH_BUCKET_CELLS); i++) {
65372-		cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) + i];
65373-		if (cell->key != NULL && ckh->keycomp(key, cell->key)) {
65374-			return (bucket << LG_CKH_BUCKET_CELLS) + i;
65375-		}
65376-	}
65377-
65378-	return SIZE_T_MAX;
65379-}
65380-
65381-/*
65382- * Search table for key and return cell number if found; SIZE_T_MAX otherwise.
65383- */
65384-static size_t
65385-ckh_isearch(ckh_t *ckh, const void *key) {
65386-	size_t hashes[2], bucket, cell;
65387-
65388-	assert(ckh != NULL);
65389-
65390-	ckh->hash(key, hashes);
65391-
65392-	/* Search primary bucket. */
65393-	bucket = hashes[0] & ((ZU(1) << ckh->lg_curbuckets) - 1);
65394-	cell = ckh_bucket_search(ckh, bucket, key);
65395-	if (cell != SIZE_T_MAX) {
65396-		return cell;
65397-	}
65398-
65399-	/* Search secondary bucket. */
65400-	bucket = hashes[1] & ((ZU(1) << ckh->lg_curbuckets) - 1);
65401-	cell = ckh_bucket_search(ckh, bucket, key);
65402-	return cell;
65403-}
65404-
65405-static bool
65406-ckh_try_bucket_insert(ckh_t *ckh, size_t bucket, const void *key,
65407-    const void *data) {
65408-	ckhc_t *cell;
65409-	unsigned offset, i;
65410-
65411-	/*
65412-	 * Cycle through the cells in the bucket, starting at a random position.
65413-	 * The randomness avoids worst-case search overhead as buckets fill up.
65414-	 */
65415-	offset = (unsigned)prng_lg_range_u64(&ckh->prng_state,
65416-	    LG_CKH_BUCKET_CELLS);
65417-	for (i = 0; i < (ZU(1) << LG_CKH_BUCKET_CELLS); i++) {
65418-		cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) +
65419-		    ((i + offset) & ((ZU(1) << LG_CKH_BUCKET_CELLS) - 1))];
65420-		if (cell->key == NULL) {
65421-			cell->key = key;
65422-			cell->data = data;
65423-			ckh->count++;
65424-			return false;
65425-		}
65426-	}
65427-
65428-	return true;
65429-}
65430-
65431-/*
65432- * No space is available in bucket.  Randomly evict an item, then try to find an
65433- * alternate location for that item.  Iteratively repeat this
65434- * eviction/relocation procedure until either success or detection of an
65435- * eviction/relocation bucket cycle.
65436- */
65437-static bool
65438-ckh_evict_reloc_insert(ckh_t *ckh, size_t argbucket, void const **argkey,
65439-    void const **argdata) {
65440-	const void *key, *data, *tkey, *tdata;
65441-	ckhc_t *cell;
65442-	size_t hashes[2], bucket, tbucket;
65443-	unsigned i;
65444-
65445-	bucket = argbucket;
65446-	key = *argkey;
65447-	data = *argdata;
65448-	while (true) {
65449-		/*
65450-		 * Choose a random item within the bucket to evict.  This is
65451-		 * critical to correct function, because without (eventually)
65452-		 * evicting all items within a bucket during iteration, it
65453-		 * would be possible to get stuck in an infinite loop if there
65454-		 * were an item for which both hashes indicated the same
65455-		 * bucket.
65456-		 */
65457-		i = (unsigned)prng_lg_range_u64(&ckh->prng_state,
65458-		    LG_CKH_BUCKET_CELLS);
65459-		cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) + i];
65460-		assert(cell->key != NULL);
65461-
65462-		/* Swap cell->{key,data} and {key,data} (evict). */
65463-		tkey = cell->key; tdata = cell->data;
65464-		cell->key = key; cell->data = data;
65465-		key = tkey; data = tdata;
65466-
65467-#ifdef CKH_COUNT
65468-		ckh->nrelocs++;
65469-#endif
65470-
65471-		/* Find the alternate bucket for the evicted item. */
65472-		ckh->hash(key, hashes);
65473-		tbucket = hashes[1] & ((ZU(1) << ckh->lg_curbuckets) - 1);
65474-		if (tbucket == bucket) {
65475-			tbucket = hashes[0] & ((ZU(1) << ckh->lg_curbuckets)
65476-			    - 1);
65477-			/*
65478-			 * It may be that (tbucket == bucket) still, if the
65479-			 * item's hashes both indicate this bucket.  However,
65480-			 * we are guaranteed to eventually escape this bucket
65481-			 * during iteration, assuming pseudo-random item
65482-			 * selection (true randomness would make infinite
65483-			 * looping a remote possibility).  The reason we can
65484-			 * never get trapped forever is that there are two
65485-			 * cases:
65486-			 *
65487-			 * 1) This bucket == argbucket, so we will quickly
65488-			 *    detect an eviction cycle and terminate.
65489-			 * 2) An item was evicted to this bucket from another,
65490-			 *    which means that at least one item in this bucket
65491-			 *    has hashes that indicate distinct buckets.
65492-			 */
65493-		}
65494-		/* Check for a cycle. */
65495-		if (tbucket == argbucket) {
65496-			*argkey = key;
65497-			*argdata = data;
65498-			return true;
65499-		}
65500-
65501-		bucket = tbucket;
65502-		if (!ckh_try_bucket_insert(ckh, bucket, key, data)) {
65503-			return false;
65504-		}
65505-	}
65506-}
65507-
65508-static bool
65509-ckh_try_insert(ckh_t *ckh, void const**argkey, void const**argdata) {
65510-	size_t hashes[2], bucket;
65511-	const void *key = *argkey;
65512-	const void *data = *argdata;
65513-
65514-	ckh->hash(key, hashes);
65515-
65516-	/* Try to insert in primary bucket. */
65517-	bucket = hashes[0] & ((ZU(1) << ckh->lg_curbuckets) - 1);
65518-	if (!ckh_try_bucket_insert(ckh, bucket, key, data)) {
65519-		return false;
65520-	}
65521-
65522-	/* Try to insert in secondary bucket. */
65523-	bucket = hashes[1] & ((ZU(1) << ckh->lg_curbuckets) - 1);
65524-	if (!ckh_try_bucket_insert(ckh, bucket, key, data)) {
65525-		return false;
65526-	}
65527-
65528-	/*
65529-	 * Try to find a place for this item via iterative eviction/relocation.
65530-	 */
65531-	return ckh_evict_reloc_insert(ckh, bucket, argkey, argdata);
65532-}
65533-
65534-/*
65535- * Try to rebuild the hash table from scratch by inserting all items from the
65536- * old table into the new.
65537- */
65538-static bool
65539-ckh_rebuild(ckh_t *ckh, ckhc_t *aTab) {
65540-	size_t count, i, nins;
65541-	const void *key, *data;
65542-
65543-	count = ckh->count;
65544-	ckh->count = 0;
65545-	for (i = nins = 0; nins < count; i++) {
65546-		if (aTab[i].key != NULL) {
65547-			key = aTab[i].key;
65548-			data = aTab[i].data;
65549-			if (ckh_try_insert(ckh, &key, &data)) {
65550-				ckh->count = count;
65551-				return true;
65552-			}
65553-			nins++;
65554-		}
65555-	}
65556-
65557-	return false;
65558-}
65559-
65560-static bool
65561-ckh_grow(tsd_t *tsd, ckh_t *ckh) {
65562-	bool ret;
65563-	ckhc_t *tab, *ttab;
65564-	unsigned lg_prevbuckets, lg_curcells;
65565-
65566-#ifdef CKH_COUNT
65567-	ckh->ngrows++;
65568-#endif
65569-
65570-	/*
65571-	 * It is possible (though unlikely, given well behaved hashes) that the
65572-	 * table will have to be doubled more than once in order to create a
65573-	 * usable table.
65574-	 */
65575-	lg_prevbuckets = ckh->lg_curbuckets;
65576-	lg_curcells = ckh->lg_curbuckets + LG_CKH_BUCKET_CELLS;
65577-	while (true) {
65578-		size_t usize;
65579-
65580-		lg_curcells++;
65581-		usize = sz_sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE);
65582-		if (unlikely(usize == 0
65583-		    || usize > SC_LARGE_MAXCLASS)) {
65584-			ret = true;
65585-			goto label_return;
65586-		}
65587-		tab = (ckhc_t *)ipallocztm(tsd_tsdn(tsd), usize, CACHELINE,
65588-		    true, NULL, true, arena_ichoose(tsd, NULL));
65589-		if (tab == NULL) {
65590-			ret = true;
65591-			goto label_return;
65592-		}
65593-		/* Swap in new table. */
65594-		ttab = ckh->tab;
65595-		ckh->tab = tab;
65596-		tab = ttab;
65597-		ckh->lg_curbuckets = lg_curcells - LG_CKH_BUCKET_CELLS;
65598-
65599-		if (!ckh_rebuild(ckh, tab)) {
65600-			idalloctm(tsd_tsdn(tsd), tab, NULL, NULL, true, true);
65601-			break;
65602-		}
65603-
65604-		/* Rebuilding failed, so back out partially rebuilt table. */
65605-		idalloctm(tsd_tsdn(tsd), ckh->tab, NULL, NULL, true, true);
65606-		ckh->tab = tab;
65607-		ckh->lg_curbuckets = lg_prevbuckets;
65608-	}
65609-
65610-	ret = false;
65611-label_return:
65612-	return ret;
65613-}
65614-
65615-static void
65616-ckh_shrink(tsd_t *tsd, ckh_t *ckh) {
65617-	ckhc_t *tab, *ttab;
65618-	size_t usize;
65619-	unsigned lg_prevbuckets, lg_curcells;
65620-
65621-	/*
65622-	 * It is possible (though unlikely, given well behaved hashes) that the
65623-	 * table rebuild will fail.
65624-	 */
65625-	lg_prevbuckets = ckh->lg_curbuckets;
65626-	lg_curcells = ckh->lg_curbuckets + LG_CKH_BUCKET_CELLS - 1;
65627-	usize = sz_sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE);
65628-	if (unlikely(usize == 0 || usize > SC_LARGE_MAXCLASS)) {
65629-		return;
65630-	}
65631-	tab = (ckhc_t *)ipallocztm(tsd_tsdn(tsd), usize, CACHELINE, true, NULL,
65632-	    true, arena_ichoose(tsd, NULL));
65633-	if (tab == NULL) {
65634-		/*
65635-		 * An OOM error isn't worth propagating, since it doesn't
65636-		 * prevent this or future operations from proceeding.
65637-		 */
65638-		return;
65639-	}
65640-	/* Swap in new table. */
65641-	ttab = ckh->tab;
65642-	ckh->tab = tab;
65643-	tab = ttab;
65644-	ckh->lg_curbuckets = lg_curcells - LG_CKH_BUCKET_CELLS;
65645-
65646-	if (!ckh_rebuild(ckh, tab)) {
65647-		idalloctm(tsd_tsdn(tsd), tab, NULL, NULL, true, true);
65648-#ifdef CKH_COUNT
65649-		ckh->nshrinks++;
65650-#endif
65651-		return;
65652-	}
65653-
65654-	/* Rebuilding failed, so back out partially rebuilt table. */
65655-	idalloctm(tsd_tsdn(tsd), ckh->tab, NULL, NULL, true, true);
65656-	ckh->tab = tab;
65657-	ckh->lg_curbuckets = lg_prevbuckets;
65658-#ifdef CKH_COUNT
65659-	ckh->nshrinkfails++;
65660-#endif
65661-}
65662-
65663-bool
65664-ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *ckh_hash,
65665-    ckh_keycomp_t *keycomp) {
65666-	bool ret;
65667-	size_t mincells, usize;
65668-	unsigned lg_mincells;
65669-
65670-	assert(minitems > 0);
65671-	assert(ckh_hash != NULL);
65672-	assert(keycomp != NULL);
65673-
65674-#ifdef CKH_COUNT
65675-	ckh->ngrows = 0;
65676-	ckh->nshrinks = 0;
65677-	ckh->nshrinkfails = 0;
65678-	ckh->ninserts = 0;
65679-	ckh->nrelocs = 0;
65680-#endif
65681-	ckh->prng_state = 42; /* Value doesn't really matter. */
65682-	ckh->count = 0;
65683-
65684-	/*
65685-	 * Find the minimum power of 2 that is large enough to fit minitems
65686-	 * entries.  We are using (2+,2) cuckoo hashing, which has an expected
65687-	 * maximum load factor of at least ~0.86, so 0.75 is a conservative load
65688-	 * factor that will typically allow mincells items to fit without ever
65689-	 * growing the table.
65690-	 */
65691-	assert(LG_CKH_BUCKET_CELLS > 0);
65692-	mincells = ((minitems + (3 - (minitems % 3))) / 3) << 2;
65693-	for (lg_mincells = LG_CKH_BUCKET_CELLS;
65694-	    (ZU(1) << lg_mincells) < mincells;
65695-	    lg_mincells++) {
65696-		/* Do nothing. */
65697-	}
65698-	ckh->lg_minbuckets = lg_mincells - LG_CKH_BUCKET_CELLS;
65699-	ckh->lg_curbuckets = lg_mincells - LG_CKH_BUCKET_CELLS;
65700-	ckh->hash = ckh_hash;
65701-	ckh->keycomp = keycomp;
65702-
65703-	usize = sz_sa2u(sizeof(ckhc_t) << lg_mincells, CACHELINE);
65704-	if (unlikely(usize == 0 || usize > SC_LARGE_MAXCLASS)) {
65705-		ret = true;
65706-		goto label_return;
65707-	}
65708-	ckh->tab = (ckhc_t *)ipallocztm(tsd_tsdn(tsd), usize, CACHELINE, true,
65709-	    NULL, true, arena_ichoose(tsd, NULL));
65710-	if (ckh->tab == NULL) {
65711-		ret = true;
65712-		goto label_return;
65713-	}
65714-
65715-	ret = false;
65716-label_return:
65717-	return ret;
65718-}
65719-
65720-void
65721-ckh_delete(tsd_t *tsd, ckh_t *ckh) {
65722-	assert(ckh != NULL);
65723-
65724-#ifdef CKH_VERBOSE
65725-	malloc_printf(
65726-	    "%s(%p): ngrows: %"FMTu64", nshrinks: %"FMTu64","
65727-	    " nshrinkfails: %"FMTu64", ninserts: %"FMTu64","
65728-	    " nrelocs: %"FMTu64"\n", __func__, ckh,
65729-	    (unsigned long long)ckh->ngrows,
65730-	    (unsigned long long)ckh->nshrinks,
65731-	    (unsigned long long)ckh->nshrinkfails,
65732-	    (unsigned long long)ckh->ninserts,
65733-	    (unsigned long long)ckh->nrelocs);
65734-#endif
65735-
65736-	idalloctm(tsd_tsdn(tsd), ckh->tab, NULL, NULL, true, true);
65737-	if (config_debug) {
65738-		memset(ckh, JEMALLOC_FREE_JUNK, sizeof(ckh_t));
65739-	}
65740-}
65741-
65742-size_t
65743-ckh_count(ckh_t *ckh) {
65744-	assert(ckh != NULL);
65745-
65746-	return ckh->count;
65747-}
65748-
65749-bool
65750-ckh_iter(ckh_t *ckh, size_t *tabind, void **key, void **data) {
65751-	size_t i, ncells;
65752-
65753-	for (i = *tabind, ncells = (ZU(1) << (ckh->lg_curbuckets +
65754-	    LG_CKH_BUCKET_CELLS)); i < ncells; i++) {
65755-		if (ckh->tab[i].key != NULL) {
65756-			if (key != NULL) {
65757-				*key = (void *)ckh->tab[i].key;
65758-			}
65759-			if (data != NULL) {
65760-				*data = (void *)ckh->tab[i].data;
65761-			}
65762-			*tabind = i + 1;
65763-			return false;
65764-		}
65765-	}
65766-
65767-	return true;
65768-}
65769-
65770-bool
65771-ckh_insert(tsd_t *tsd, ckh_t *ckh, const void *key, const void *data) {
65772-	bool ret;
65773-
65774-	assert(ckh != NULL);
65775-	assert(ckh_search(ckh, key, NULL, NULL));
65776-
65777-#ifdef CKH_COUNT
65778-	ckh->ninserts++;
65779-#endif
65780-
65781-	while (ckh_try_insert(ckh, &key, &data)) {
65782-		if (ckh_grow(tsd, ckh)) {
65783-			ret = true;
65784-			goto label_return;
65785-		}
65786-	}
65787-
65788-	ret = false;
65789-label_return:
65790-	return ret;
65791-}
65792-
65793-bool
65794-ckh_remove(tsd_t *tsd, ckh_t *ckh, const void *searchkey, void **key,
65795-    void **data) {
65796-	size_t cell;
65797-
65798-	assert(ckh != NULL);
65799-
65800-	cell = ckh_isearch(ckh, searchkey);
65801-	if (cell != SIZE_T_MAX) {
65802-		if (key != NULL) {
65803-			*key = (void *)ckh->tab[cell].key;
65804-		}
65805-		if (data != NULL) {
65806-			*data = (void *)ckh->tab[cell].data;
65807-		}
65808-		ckh->tab[cell].key = NULL;
65809-		ckh->tab[cell].data = NULL; /* Not necessary. */
65810-
65811-		ckh->count--;
65812-		/* Try to halve the table if it is less than 1/4 full. */
65813-		if (ckh->count < (ZU(1) << (ckh->lg_curbuckets
65814-		    + LG_CKH_BUCKET_CELLS - 2)) && ckh->lg_curbuckets
65815-		    > ckh->lg_minbuckets) {
65816-			/* Ignore error due to OOM. */
65817-			ckh_shrink(tsd, ckh);
65818-		}
65819-
65820-		return false;
65821-	}
65822-
65823-	return true;
65824-}
65825-
65826-bool
65827-ckh_search(ckh_t *ckh, const void *searchkey, void **key, void **data) {
65828-	size_t cell;
65829-
65830-	assert(ckh != NULL);
65831-
65832-	cell = ckh_isearch(ckh, searchkey);
65833-	if (cell != SIZE_T_MAX) {
65834-		if (key != NULL) {
65835-			*key = (void *)ckh->tab[cell].key;
65836-		}
65837-		if (data != NULL) {
65838-			*data = (void *)ckh->tab[cell].data;
65839-		}
65840-		return false;
65841-	}
65842-
65843-	return true;
65844-}
65845-
65846-void
65847-ckh_string_hash(const void *key, size_t r_hash[2]) {
65848-	hash(key, strlen((const char *)key), 0x94122f33U, r_hash);
65849-}
65850-
65851-bool
65852-ckh_string_keycomp(const void *k1, const void *k2) {
65853-	assert(k1 != NULL);
65854-	assert(k2 != NULL);
65855-
65856-	return !strcmp((char *)k1, (char *)k2);
65857-}
65858-
65859-void
65860-ckh_pointer_hash(const void *key, size_t r_hash[2]) {
65861-	union {
65862-		const void	*v;
65863-		size_t		i;
65864-	} u;
65865-
65866-	assert(sizeof(u.v) == sizeof(u.i));
65867-	u.v = key;
65868-	hash(&u.i, sizeof(u.i), 0xd983396eU, r_hash);
65869-}
65870-
65871-bool
65872-ckh_pointer_keycomp(const void *k1, const void *k2) {
65873-	return (k1 == k2);
65874-}
65875diff --git a/jemalloc/src/counter.c b/jemalloc/src/counter.c
65876deleted file mode 100644
65877index 8f1ae3a..0000000
65878--- a/jemalloc/src/counter.c
65879+++ /dev/null
65880@@ -1,30 +0,0 @@
65881-#include "jemalloc/internal/jemalloc_preamble.h"
65882-#include "jemalloc/internal/jemalloc_internal_includes.h"
65883-
65884-#include "jemalloc/internal/counter.h"
65885-
65886-bool
65887-counter_accum_init(counter_accum_t *counter, uint64_t interval) {
65888-	if (LOCKEDINT_MTX_INIT(counter->mtx, "counter_accum",
65889-	    WITNESS_RANK_COUNTER_ACCUM, malloc_mutex_rank_exclusive)) {
65890-		return true;
65891-	}
65892-	locked_init_u64_unsynchronized(&counter->accumbytes, 0);
65893-	counter->interval = interval;
65894-	return false;
65895-}
65896-
65897-void
65898-counter_prefork(tsdn_t *tsdn, counter_accum_t *counter) {
65899-	LOCKEDINT_MTX_PREFORK(tsdn, counter->mtx);
65900-}
65901-
65902-void
65903-counter_postfork_parent(tsdn_t *tsdn, counter_accum_t *counter) {
65904-	LOCKEDINT_MTX_POSTFORK_PARENT(tsdn, counter->mtx);
65905-}
65906-
65907-void
65908-counter_postfork_child(tsdn_t *tsdn, counter_accum_t *counter) {
65909-	LOCKEDINT_MTX_POSTFORK_CHILD(tsdn, counter->mtx);
65910-}
65911diff --git a/jemalloc/src/ctl.c b/jemalloc/src/ctl.c
65912deleted file mode 100644
65913index 135271b..0000000
65914--- a/jemalloc/src/ctl.c
65915+++ /dev/null
65916@@ -1,4414 +0,0 @@
65917-#include "jemalloc/internal/jemalloc_preamble.h"
65918-#include "jemalloc/internal/jemalloc_internal_includes.h"
65919-
65920-#include "jemalloc/internal/assert.h"
65921-#include "jemalloc/internal/ctl.h"
65922-#include "jemalloc/internal/extent_dss.h"
65923-#include "jemalloc/internal/extent_mmap.h"
65924-#include "jemalloc/internal/inspect.h"
65925-#include "jemalloc/internal/mutex.h"
65926-#include "jemalloc/internal/nstime.h"
65927-#include "jemalloc/internal/peak_event.h"
65928-#include "jemalloc/internal/prof_data.h"
65929-#include "jemalloc/internal/prof_log.h"
65930-#include "jemalloc/internal/prof_recent.h"
65931-#include "jemalloc/internal/prof_stats.h"
65932-#include "jemalloc/internal/prof_sys.h"
65933-#include "jemalloc/internal/safety_check.h"
65934-#include "jemalloc/internal/sc.h"
65935-#include "jemalloc/internal/util.h"
65936-
65937-/******************************************************************************/
65938-/* Data. */
65939-
65940-/*
65941- * ctl_mtx protects the following:
65942- * - ctl_stats->*
65943- */
65944-static malloc_mutex_t	ctl_mtx;
65945-static bool		ctl_initialized;
65946-static ctl_stats_t	*ctl_stats;
65947-static ctl_arenas_t	*ctl_arenas;
65948-
65949-/******************************************************************************/
65950-/* Helpers for named and indexed nodes. */
65951-
65952-static const ctl_named_node_t *
65953-ctl_named_node(const ctl_node_t *node) {
65954-	return ((node->named) ? (const ctl_named_node_t *)node : NULL);
65955-}
65956-
65957-static const ctl_named_node_t *
65958-ctl_named_children(const ctl_named_node_t *node, size_t index) {
65959-	const ctl_named_node_t *children = ctl_named_node(node->children);
65960-
65961-	return (children ? &children[index] : NULL);
65962-}
65963-
65964-static const ctl_indexed_node_t *
65965-ctl_indexed_node(const ctl_node_t *node) {
65966-	return (!node->named ? (const ctl_indexed_node_t *)node : NULL);
65967-}
65968-
65969-/******************************************************************************/
65970-/* Function prototypes for non-inline static functions. */
65971-
65972-#define CTL_PROTO(n)							\
65973-static int	n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,	\
65974-    void *oldp, size_t *oldlenp, void *newp, size_t newlen);
65975-
65976-#define INDEX_PROTO(n)							\
65977-static const ctl_named_node_t	*n##_index(tsdn_t *tsdn,		\
65978-    const size_t *mib, size_t miblen, size_t i);
65979-
65980-CTL_PROTO(version)
65981-CTL_PROTO(epoch)
65982-CTL_PROTO(background_thread)
65983-CTL_PROTO(max_background_threads)
65984-CTL_PROTO(thread_tcache_enabled)
65985-CTL_PROTO(thread_tcache_flush)
65986-CTL_PROTO(thread_peak_read)
65987-CTL_PROTO(thread_peak_reset)
65988-CTL_PROTO(thread_prof_name)
65989-CTL_PROTO(thread_prof_active)
65990-CTL_PROTO(thread_arena)
65991-CTL_PROTO(thread_allocated)
65992-CTL_PROTO(thread_allocatedp)
65993-CTL_PROTO(thread_deallocated)
65994-CTL_PROTO(thread_deallocatedp)
65995-CTL_PROTO(thread_idle)
65996-CTL_PROTO(config_cache_oblivious)
65997-CTL_PROTO(config_debug)
65998-CTL_PROTO(config_fill)
65999-CTL_PROTO(config_lazy_lock)
66000-CTL_PROTO(config_malloc_conf)
66001-CTL_PROTO(config_opt_safety_checks)
66002-CTL_PROTO(config_prof)
66003-CTL_PROTO(config_prof_libgcc)
66004-CTL_PROTO(config_prof_libunwind)
66005-CTL_PROTO(config_stats)
66006-CTL_PROTO(config_utrace)
66007-CTL_PROTO(config_xmalloc)
66008-CTL_PROTO(opt_abort)
66009-CTL_PROTO(opt_abort_conf)
66010-CTL_PROTO(opt_cache_oblivious)
66011-CTL_PROTO(opt_trust_madvise)
66012-CTL_PROTO(opt_confirm_conf)
66013-CTL_PROTO(opt_hpa)
66014-CTL_PROTO(opt_hpa_slab_max_alloc)
66015-CTL_PROTO(opt_hpa_hugification_threshold)
66016-CTL_PROTO(opt_hpa_hugify_delay_ms)
66017-CTL_PROTO(opt_hpa_min_purge_interval_ms)
66018-CTL_PROTO(opt_hpa_dirty_mult)
66019-CTL_PROTO(opt_hpa_sec_nshards)
66020-CTL_PROTO(opt_hpa_sec_max_alloc)
66021-CTL_PROTO(opt_hpa_sec_max_bytes)
66022-CTL_PROTO(opt_hpa_sec_bytes_after_flush)
66023-CTL_PROTO(opt_hpa_sec_batch_fill_extra)
66024-CTL_PROTO(opt_metadata_thp)
66025-CTL_PROTO(opt_retain)
66026-CTL_PROTO(opt_dss)
66027-CTL_PROTO(opt_narenas)
66028-CTL_PROTO(opt_percpu_arena)
66029-CTL_PROTO(opt_oversize_threshold)
66030-CTL_PROTO(opt_background_thread)
66031-CTL_PROTO(opt_mutex_max_spin)
66032-CTL_PROTO(opt_max_background_threads)
66033-CTL_PROTO(opt_dirty_decay_ms)
66034-CTL_PROTO(opt_muzzy_decay_ms)
66035-CTL_PROTO(opt_stats_print)
66036-CTL_PROTO(opt_stats_print_opts)
66037-CTL_PROTO(opt_stats_interval)
66038-CTL_PROTO(opt_stats_interval_opts)
66039-CTL_PROTO(opt_junk)
66040-CTL_PROTO(opt_zero)
66041-CTL_PROTO(opt_utrace)
66042-CTL_PROTO(opt_xmalloc)
66043-CTL_PROTO(opt_experimental_infallible_new)
66044-CTL_PROTO(opt_tcache)
66045-CTL_PROTO(opt_tcache_max)
66046-CTL_PROTO(opt_tcache_nslots_small_min)
66047-CTL_PROTO(opt_tcache_nslots_small_max)
66048-CTL_PROTO(opt_tcache_nslots_large)
66049-CTL_PROTO(opt_lg_tcache_nslots_mul)
66050-CTL_PROTO(opt_tcache_gc_incr_bytes)
66051-CTL_PROTO(opt_tcache_gc_delay_bytes)
66052-CTL_PROTO(opt_lg_tcache_flush_small_div)
66053-CTL_PROTO(opt_lg_tcache_flush_large_div)
66054-CTL_PROTO(opt_thp)
66055-CTL_PROTO(opt_lg_extent_max_active_fit)
66056-CTL_PROTO(opt_prof)
66057-CTL_PROTO(opt_prof_prefix)
66058-CTL_PROTO(opt_prof_active)
66059-CTL_PROTO(opt_prof_thread_active_init)
66060-CTL_PROTO(opt_lg_prof_sample)
66061-CTL_PROTO(opt_lg_prof_interval)
66062-CTL_PROTO(opt_prof_gdump)
66063-CTL_PROTO(opt_prof_final)
66064-CTL_PROTO(opt_prof_leak)
66065-CTL_PROTO(opt_prof_leak_error)
66066-CTL_PROTO(opt_prof_accum)
66067-CTL_PROTO(opt_prof_recent_alloc_max)
66068-CTL_PROTO(opt_prof_stats)
66069-CTL_PROTO(opt_prof_sys_thread_name)
66070-CTL_PROTO(opt_prof_time_res)
66071-CTL_PROTO(opt_lg_san_uaf_align)
66072-CTL_PROTO(opt_zero_realloc)
66073-CTL_PROTO(tcache_create)
66074-CTL_PROTO(tcache_flush)
66075-CTL_PROTO(tcache_destroy)
66076-CTL_PROTO(arena_i_initialized)
66077-CTL_PROTO(arena_i_decay)
66078-CTL_PROTO(arena_i_purge)
66079-CTL_PROTO(arena_i_reset)
66080-CTL_PROTO(arena_i_destroy)
66081-CTL_PROTO(arena_i_dss)
66082-CTL_PROTO(arena_i_oversize_threshold)
66083-CTL_PROTO(arena_i_dirty_decay_ms)
66084-CTL_PROTO(arena_i_muzzy_decay_ms)
66085-CTL_PROTO(arena_i_extent_hooks)
66086-CTL_PROTO(arena_i_retain_grow_limit)
66087-INDEX_PROTO(arena_i)
66088-CTL_PROTO(arenas_bin_i_size)
66089-CTL_PROTO(arenas_bin_i_nregs)
66090-CTL_PROTO(arenas_bin_i_slab_size)
66091-CTL_PROTO(arenas_bin_i_nshards)
66092-INDEX_PROTO(arenas_bin_i)
66093-CTL_PROTO(arenas_lextent_i_size)
66094-INDEX_PROTO(arenas_lextent_i)
66095-CTL_PROTO(arenas_narenas)
66096-CTL_PROTO(arenas_dirty_decay_ms)
66097-CTL_PROTO(arenas_muzzy_decay_ms)
66098-CTL_PROTO(arenas_quantum)
66099-CTL_PROTO(arenas_page)
66100-CTL_PROTO(arenas_tcache_max)
66101-CTL_PROTO(arenas_nbins)
66102-CTL_PROTO(arenas_nhbins)
66103-CTL_PROTO(arenas_nlextents)
66104-CTL_PROTO(arenas_create)
66105-CTL_PROTO(arenas_lookup)
66106-CTL_PROTO(prof_thread_active_init)
66107-CTL_PROTO(prof_active)
66108-CTL_PROTO(prof_dump)
66109-CTL_PROTO(prof_gdump)
66110-CTL_PROTO(prof_prefix)
66111-CTL_PROTO(prof_reset)
66112-CTL_PROTO(prof_interval)
66113-CTL_PROTO(lg_prof_sample)
66114-CTL_PROTO(prof_log_start)
66115-CTL_PROTO(prof_log_stop)
66116-CTL_PROTO(prof_stats_bins_i_live)
66117-CTL_PROTO(prof_stats_bins_i_accum)
66118-INDEX_PROTO(prof_stats_bins_i)
66119-CTL_PROTO(prof_stats_lextents_i_live)
66120-CTL_PROTO(prof_stats_lextents_i_accum)
66121-INDEX_PROTO(prof_stats_lextents_i)
66122-CTL_PROTO(stats_arenas_i_small_allocated)
66123-CTL_PROTO(stats_arenas_i_small_nmalloc)
66124-CTL_PROTO(stats_arenas_i_small_ndalloc)
66125-CTL_PROTO(stats_arenas_i_small_nrequests)
66126-CTL_PROTO(stats_arenas_i_small_nfills)
66127-CTL_PROTO(stats_arenas_i_small_nflushes)
66128-CTL_PROTO(stats_arenas_i_large_allocated)
66129-CTL_PROTO(stats_arenas_i_large_nmalloc)
66130-CTL_PROTO(stats_arenas_i_large_ndalloc)
66131-CTL_PROTO(stats_arenas_i_large_nrequests)
66132-CTL_PROTO(stats_arenas_i_large_nfills)
66133-CTL_PROTO(stats_arenas_i_large_nflushes)
66134-CTL_PROTO(stats_arenas_i_bins_j_nmalloc)
66135-CTL_PROTO(stats_arenas_i_bins_j_ndalloc)
66136-CTL_PROTO(stats_arenas_i_bins_j_nrequests)
66137-CTL_PROTO(stats_arenas_i_bins_j_curregs)
66138-CTL_PROTO(stats_arenas_i_bins_j_nfills)
66139-CTL_PROTO(stats_arenas_i_bins_j_nflushes)
66140-CTL_PROTO(stats_arenas_i_bins_j_nslabs)
66141-CTL_PROTO(stats_arenas_i_bins_j_nreslabs)
66142-CTL_PROTO(stats_arenas_i_bins_j_curslabs)
66143-CTL_PROTO(stats_arenas_i_bins_j_nonfull_slabs)
66144-INDEX_PROTO(stats_arenas_i_bins_j)
66145-CTL_PROTO(stats_arenas_i_lextents_j_nmalloc)
66146-CTL_PROTO(stats_arenas_i_lextents_j_ndalloc)
66147-CTL_PROTO(stats_arenas_i_lextents_j_nrequests)
66148-CTL_PROTO(stats_arenas_i_lextents_j_curlextents)
66149-INDEX_PROTO(stats_arenas_i_lextents_j)
66150-CTL_PROTO(stats_arenas_i_extents_j_ndirty)
66151-CTL_PROTO(stats_arenas_i_extents_j_nmuzzy)
66152-CTL_PROTO(stats_arenas_i_extents_j_nretained)
66153-CTL_PROTO(stats_arenas_i_extents_j_dirty_bytes)
66154-CTL_PROTO(stats_arenas_i_extents_j_muzzy_bytes)
66155-CTL_PROTO(stats_arenas_i_extents_j_retained_bytes)
66156-INDEX_PROTO(stats_arenas_i_extents_j)
66157-CTL_PROTO(stats_arenas_i_hpa_shard_npurge_passes)
66158-CTL_PROTO(stats_arenas_i_hpa_shard_npurges)
66159-CTL_PROTO(stats_arenas_i_hpa_shard_nhugifies)
66160-CTL_PROTO(stats_arenas_i_hpa_shard_ndehugifies)
66161-
66162-/* We have a set of stats for full slabs. */
66163-CTL_PROTO(stats_arenas_i_hpa_shard_full_slabs_npageslabs_nonhuge)
66164-CTL_PROTO(stats_arenas_i_hpa_shard_full_slabs_npageslabs_huge)
66165-CTL_PROTO(stats_arenas_i_hpa_shard_full_slabs_nactive_nonhuge)
66166-CTL_PROTO(stats_arenas_i_hpa_shard_full_slabs_nactive_huge)
66167-CTL_PROTO(stats_arenas_i_hpa_shard_full_slabs_ndirty_nonhuge)
66168-CTL_PROTO(stats_arenas_i_hpa_shard_full_slabs_ndirty_huge)
66169-
66170-/* A parallel set for the empty slabs. */
66171-CTL_PROTO(stats_arenas_i_hpa_shard_empty_slabs_npageslabs_nonhuge)
66172-CTL_PROTO(stats_arenas_i_hpa_shard_empty_slabs_npageslabs_huge)
66173-CTL_PROTO(stats_arenas_i_hpa_shard_empty_slabs_nactive_nonhuge)
66174-CTL_PROTO(stats_arenas_i_hpa_shard_empty_slabs_nactive_huge)
66175-CTL_PROTO(stats_arenas_i_hpa_shard_empty_slabs_ndirty_nonhuge)
66176-CTL_PROTO(stats_arenas_i_hpa_shard_empty_slabs_ndirty_huge)
66177-
66178-/*
66179- * And one for the slabs that are neither empty nor full, but indexed by how
66180- * full they are.
66181- */
66182-CTL_PROTO(stats_arenas_i_hpa_shard_nonfull_slabs_j_npageslabs_nonhuge)
66183-CTL_PROTO(stats_arenas_i_hpa_shard_nonfull_slabs_j_npageslabs_huge)
66184-CTL_PROTO(stats_arenas_i_hpa_shard_nonfull_slabs_j_nactive_nonhuge)
66185-CTL_PROTO(stats_arenas_i_hpa_shard_nonfull_slabs_j_nactive_huge)
66186-CTL_PROTO(stats_arenas_i_hpa_shard_nonfull_slabs_j_ndirty_nonhuge)
66187-CTL_PROTO(stats_arenas_i_hpa_shard_nonfull_slabs_j_ndirty_huge)
66188-
66189-INDEX_PROTO(stats_arenas_i_hpa_shard_nonfull_slabs_j)
66190-CTL_PROTO(stats_arenas_i_nthreads)
66191-CTL_PROTO(stats_arenas_i_uptime)
66192-CTL_PROTO(stats_arenas_i_dss)
66193-CTL_PROTO(stats_arenas_i_dirty_decay_ms)
66194-CTL_PROTO(stats_arenas_i_muzzy_decay_ms)
66195-CTL_PROTO(stats_arenas_i_pactive)
66196-CTL_PROTO(stats_arenas_i_pdirty)
66197-CTL_PROTO(stats_arenas_i_pmuzzy)
66198-CTL_PROTO(stats_arenas_i_mapped)
66199-CTL_PROTO(stats_arenas_i_retained)
66200-CTL_PROTO(stats_arenas_i_extent_avail)
66201-CTL_PROTO(stats_arenas_i_dirty_npurge)
66202-CTL_PROTO(stats_arenas_i_dirty_nmadvise)
66203-CTL_PROTO(stats_arenas_i_dirty_purged)
66204-CTL_PROTO(stats_arenas_i_muzzy_npurge)
66205-CTL_PROTO(stats_arenas_i_muzzy_nmadvise)
66206-CTL_PROTO(stats_arenas_i_muzzy_purged)
66207-CTL_PROTO(stats_arenas_i_base)
66208-CTL_PROTO(stats_arenas_i_internal)
66209-CTL_PROTO(stats_arenas_i_metadata_thp)
66210-CTL_PROTO(stats_arenas_i_tcache_bytes)
66211-CTL_PROTO(stats_arenas_i_tcache_stashed_bytes)
66212-CTL_PROTO(stats_arenas_i_resident)
66213-CTL_PROTO(stats_arenas_i_abandoned_vm)
66214-CTL_PROTO(stats_arenas_i_hpa_sec_bytes)
66215-INDEX_PROTO(stats_arenas_i)
66216-CTL_PROTO(stats_allocated)
66217-CTL_PROTO(stats_active)
66218-CTL_PROTO(stats_background_thread_num_threads)
66219-CTL_PROTO(stats_background_thread_num_runs)
66220-CTL_PROTO(stats_background_thread_run_interval)
66221-CTL_PROTO(stats_metadata)
66222-CTL_PROTO(stats_metadata_thp)
66223-CTL_PROTO(stats_resident)
66224-CTL_PROTO(stats_mapped)
66225-CTL_PROTO(stats_retained)
66226-CTL_PROTO(stats_zero_reallocs)
66227-CTL_PROTO(experimental_hooks_install)
66228-CTL_PROTO(experimental_hooks_remove)
66229-CTL_PROTO(experimental_hooks_prof_backtrace)
66230-CTL_PROTO(experimental_hooks_prof_dump)
66231-CTL_PROTO(experimental_hooks_safety_check_abort)
66232-CTL_PROTO(experimental_thread_activity_callback)
66233-CTL_PROTO(experimental_utilization_query)
66234-CTL_PROTO(experimental_utilization_batch_query)
66235-CTL_PROTO(experimental_arenas_i_pactivep)
66236-INDEX_PROTO(experimental_arenas_i)
66237-CTL_PROTO(experimental_prof_recent_alloc_max)
66238-CTL_PROTO(experimental_prof_recent_alloc_dump)
66239-CTL_PROTO(experimental_batch_alloc)
66240-CTL_PROTO(experimental_arenas_create_ext)
66241-
66242-#define MUTEX_STATS_CTL_PROTO_GEN(n)					\
66243-CTL_PROTO(stats_##n##_num_ops)						\
66244-CTL_PROTO(stats_##n##_num_wait)						\
66245-CTL_PROTO(stats_##n##_num_spin_acq)					\
66246-CTL_PROTO(stats_##n##_num_owner_switch)					\
66247-CTL_PROTO(stats_##n##_total_wait_time)					\
66248-CTL_PROTO(stats_##n##_max_wait_time)					\
66249-CTL_PROTO(stats_##n##_max_num_thds)
66250-
66251-/* Global mutexes. */
66252-#define OP(mtx) MUTEX_STATS_CTL_PROTO_GEN(mutexes_##mtx)
66253-MUTEX_PROF_GLOBAL_MUTEXES
66254-#undef OP
66255-
66256-/* Per arena mutexes. */
66257-#define OP(mtx) MUTEX_STATS_CTL_PROTO_GEN(arenas_i_mutexes_##mtx)
66258-MUTEX_PROF_ARENA_MUTEXES
66259-#undef OP
66260-
66261-/* Arena bin mutexes. */
66262-MUTEX_STATS_CTL_PROTO_GEN(arenas_i_bins_j_mutex)
66263-#undef MUTEX_STATS_CTL_PROTO_GEN
66264-
66265-CTL_PROTO(stats_mutexes_reset)
66266-
66267-/******************************************************************************/
66268-/* mallctl tree. */
66269-
66270-#define NAME(n)	{true},	n
66271-#define CHILD(t, c)							\
66272-	sizeof(c##_node) / sizeof(ctl_##t##_node_t),			\
66273-	(ctl_node_t *)c##_node,						\
66274-	NULL
66275-#define CTL(c)	0, NULL, c##_ctl
66276-
66277-/*
66278- * Only handles internal indexed nodes, since there are currently no external
66279- * ones.
66280- */
66281-#define INDEX(i)	{false},	i##_index
66282-
66283-static const ctl_named_node_t	thread_tcache_node[] = {
66284-	{NAME("enabled"),	CTL(thread_tcache_enabled)},
66285-	{NAME("flush"),		CTL(thread_tcache_flush)}
66286-};
66287-
66288-static const ctl_named_node_t	thread_peak_node[] = {
66289-	{NAME("read"),		CTL(thread_peak_read)},
66290-	{NAME("reset"),		CTL(thread_peak_reset)},
66291-};
66292-
66293-static const ctl_named_node_t	thread_prof_node[] = {
66294-	{NAME("name"),		CTL(thread_prof_name)},
66295-	{NAME("active"),	CTL(thread_prof_active)}
66296-};
66297-
66298-static const ctl_named_node_t	thread_node[] = {
66299-	{NAME("arena"),		CTL(thread_arena)},
66300-	{NAME("allocated"),	CTL(thread_allocated)},
66301-	{NAME("allocatedp"),	CTL(thread_allocatedp)},
66302-	{NAME("deallocated"),	CTL(thread_deallocated)},
66303-	{NAME("deallocatedp"),	CTL(thread_deallocatedp)},
66304-	{NAME("tcache"),	CHILD(named, thread_tcache)},
66305-	{NAME("peak"),		CHILD(named, thread_peak)},
66306-	{NAME("prof"),		CHILD(named, thread_prof)},
66307-	{NAME("idle"),		CTL(thread_idle)}
66308-};
66309-
66310-static const ctl_named_node_t	config_node[] = {
66311-	{NAME("cache_oblivious"), CTL(config_cache_oblivious)},
66312-	{NAME("debug"),		CTL(config_debug)},
66313-	{NAME("fill"),		CTL(config_fill)},
66314-	{NAME("lazy_lock"),	CTL(config_lazy_lock)},
66315-	{NAME("malloc_conf"),	CTL(config_malloc_conf)},
66316-	{NAME("opt_safety_checks"),	CTL(config_opt_safety_checks)},
66317-	{NAME("prof"),		CTL(config_prof)},
66318-	{NAME("prof_libgcc"),	CTL(config_prof_libgcc)},
66319-	{NAME("prof_libunwind"), CTL(config_prof_libunwind)},
66320-	{NAME("stats"),		CTL(config_stats)},
66321-	{NAME("utrace"),	CTL(config_utrace)},
66322-	{NAME("xmalloc"),	CTL(config_xmalloc)}
66323-};
66324-
66325-static const ctl_named_node_t opt_node[] = {
66326-	{NAME("abort"),		CTL(opt_abort)},
66327-	{NAME("abort_conf"),	CTL(opt_abort_conf)},
66328-	{NAME("cache_oblivious"),	CTL(opt_cache_oblivious)},
66329-	{NAME("trust_madvise"),	CTL(opt_trust_madvise)},
66330-	{NAME("confirm_conf"),	CTL(opt_confirm_conf)},
66331-	{NAME("hpa"),		CTL(opt_hpa)},
66332-	{NAME("hpa_slab_max_alloc"),	CTL(opt_hpa_slab_max_alloc)},
66333-	{NAME("hpa_hugification_threshold"),
66334-		CTL(opt_hpa_hugification_threshold)},
66335-	{NAME("hpa_hugify_delay_ms"), CTL(opt_hpa_hugify_delay_ms)},
66336-	{NAME("hpa_min_purge_interval_ms"), CTL(opt_hpa_min_purge_interval_ms)},
66337-	{NAME("hpa_dirty_mult"), CTL(opt_hpa_dirty_mult)},
66338-	{NAME("hpa_sec_nshards"),	CTL(opt_hpa_sec_nshards)},
66339-	{NAME("hpa_sec_max_alloc"),	CTL(opt_hpa_sec_max_alloc)},
66340-	{NAME("hpa_sec_max_bytes"),	CTL(opt_hpa_sec_max_bytes)},
66341-	{NAME("hpa_sec_bytes_after_flush"),
66342-		CTL(opt_hpa_sec_bytes_after_flush)},
66343-	{NAME("hpa_sec_batch_fill_extra"),
66344-		CTL(opt_hpa_sec_batch_fill_extra)},
66345-	{NAME("metadata_thp"),	CTL(opt_metadata_thp)},
66346-	{NAME("retain"),	CTL(opt_retain)},
66347-	{NAME("dss"),		CTL(opt_dss)},
66348-	{NAME("narenas"),	CTL(opt_narenas)},
66349-	{NAME("percpu_arena"),	CTL(opt_percpu_arena)},
66350-	{NAME("oversize_threshold"),	CTL(opt_oversize_threshold)},
66351-	{NAME("mutex_max_spin"),	CTL(opt_mutex_max_spin)},
66352-	{NAME("background_thread"),	CTL(opt_background_thread)},
66353-	{NAME("max_background_threads"),	CTL(opt_max_background_threads)},
66354-	{NAME("dirty_decay_ms"), CTL(opt_dirty_decay_ms)},
66355-	{NAME("muzzy_decay_ms"), CTL(opt_muzzy_decay_ms)},
66356-	{NAME("stats_print"),	CTL(opt_stats_print)},
66357-	{NAME("stats_print_opts"),	CTL(opt_stats_print_opts)},
66358-	{NAME("stats_interval"),	CTL(opt_stats_interval)},
66359-	{NAME("stats_interval_opts"),	CTL(opt_stats_interval_opts)},
66360-	{NAME("junk"),		CTL(opt_junk)},
66361-	{NAME("zero"),		CTL(opt_zero)},
66362-	{NAME("utrace"),	CTL(opt_utrace)},
66363-	{NAME("xmalloc"),	CTL(opt_xmalloc)},
66364-	{NAME("experimental_infallible_new"),
66365-		CTL(opt_experimental_infallible_new)},
66366-	{NAME("tcache"),	CTL(opt_tcache)},
66367-	{NAME("tcache_max"),	CTL(opt_tcache_max)},
66368-	{NAME("tcache_nslots_small_min"),
66369-		CTL(opt_tcache_nslots_small_min)},
66370-	{NAME("tcache_nslots_small_max"),
66371-		CTL(opt_tcache_nslots_small_max)},
66372-	{NAME("tcache_nslots_large"),	CTL(opt_tcache_nslots_large)},
66373-	{NAME("lg_tcache_nslots_mul"),	CTL(opt_lg_tcache_nslots_mul)},
66374-	{NAME("tcache_gc_incr_bytes"),	CTL(opt_tcache_gc_incr_bytes)},
66375-	{NAME("tcache_gc_delay_bytes"),	CTL(opt_tcache_gc_delay_bytes)},
66376-	{NAME("lg_tcache_flush_small_div"),
66377-		CTL(opt_lg_tcache_flush_small_div)},
66378-	{NAME("lg_tcache_flush_large_div"),
66379-		CTL(opt_lg_tcache_flush_large_div)},
66380-	{NAME("thp"),		CTL(opt_thp)},
66381-	{NAME("lg_extent_max_active_fit"), CTL(opt_lg_extent_max_active_fit)},
66382-	{NAME("prof"),		CTL(opt_prof)},
66383-	{NAME("prof_prefix"),	CTL(opt_prof_prefix)},
66384-	{NAME("prof_active"),	CTL(opt_prof_active)},
66385-	{NAME("prof_thread_active_init"), CTL(opt_prof_thread_active_init)},
66386-	{NAME("lg_prof_sample"), CTL(opt_lg_prof_sample)},
66387-	{NAME("lg_prof_interval"), CTL(opt_lg_prof_interval)},
66388-	{NAME("prof_gdump"),	CTL(opt_prof_gdump)},
66389-	{NAME("prof_final"),	CTL(opt_prof_final)},
66390-	{NAME("prof_leak"),	CTL(opt_prof_leak)},
66391-	{NAME("prof_leak_error"),	CTL(opt_prof_leak_error)},
66392-	{NAME("prof_accum"),	CTL(opt_prof_accum)},
66393-	{NAME("prof_recent_alloc_max"),	CTL(opt_prof_recent_alloc_max)},
66394-	{NAME("prof_stats"),	CTL(opt_prof_stats)},
66395-	{NAME("prof_sys_thread_name"),	CTL(opt_prof_sys_thread_name)},
66396-	{NAME("prof_time_resolution"),	CTL(opt_prof_time_res)},
66397-	{NAME("lg_san_uaf_align"),	CTL(opt_lg_san_uaf_align)},
66398-	{NAME("zero_realloc"),	CTL(opt_zero_realloc)}
66399-};
66400-
66401-static const ctl_named_node_t	tcache_node[] = {
66402-	{NAME("create"),	CTL(tcache_create)},
66403-	{NAME("flush"),		CTL(tcache_flush)},
66404-	{NAME("destroy"),	CTL(tcache_destroy)}
66405-};
66406-
66407-static const ctl_named_node_t arena_i_node[] = {
66408-	{NAME("initialized"),	CTL(arena_i_initialized)},
66409-	{NAME("decay"),		CTL(arena_i_decay)},
66410-	{NAME("purge"),		CTL(arena_i_purge)},
66411-	{NAME("reset"),		CTL(arena_i_reset)},
66412-	{NAME("destroy"),	CTL(arena_i_destroy)},
66413-	{NAME("dss"),		CTL(arena_i_dss)},
66414-	/*
66415-	 * Undocumented for now, since we anticipate an arena API in flux after
66416-	 * we cut the last 5-series release.
66417-	 */
66418-	{NAME("oversize_threshold"), CTL(arena_i_oversize_threshold)},
66419-	{NAME("dirty_decay_ms"), CTL(arena_i_dirty_decay_ms)},
66420-	{NAME("muzzy_decay_ms"), CTL(arena_i_muzzy_decay_ms)},
66421-	{NAME("extent_hooks"),	CTL(arena_i_extent_hooks)},
66422-	{NAME("retain_grow_limit"),	CTL(arena_i_retain_grow_limit)}
66423-};
66424-static const ctl_named_node_t super_arena_i_node[] = {
66425-	{NAME(""),		CHILD(named, arena_i)}
66426-};
66427-
66428-static const ctl_indexed_node_t arena_node[] = {
66429-	{INDEX(arena_i)}
66430-};
66431-
66432-static const ctl_named_node_t arenas_bin_i_node[] = {
66433-	{NAME("size"),		CTL(arenas_bin_i_size)},
66434-	{NAME("nregs"),		CTL(arenas_bin_i_nregs)},
66435-	{NAME("slab_size"),	CTL(arenas_bin_i_slab_size)},
66436-	{NAME("nshards"),	CTL(arenas_bin_i_nshards)}
66437-};
66438-static const ctl_named_node_t super_arenas_bin_i_node[] = {
66439-	{NAME(""),		CHILD(named, arenas_bin_i)}
66440-};
66441-
66442-static const ctl_indexed_node_t arenas_bin_node[] = {
66443-	{INDEX(arenas_bin_i)}
66444-};
66445-
66446-static const ctl_named_node_t arenas_lextent_i_node[] = {
66447-	{NAME("size"),		CTL(arenas_lextent_i_size)}
66448-};
66449-static const ctl_named_node_t super_arenas_lextent_i_node[] = {
66450-	{NAME(""),		CHILD(named, arenas_lextent_i)}
66451-};
66452-
66453-static const ctl_indexed_node_t arenas_lextent_node[] = {
66454-	{INDEX(arenas_lextent_i)}
66455-};
66456-
66457-static const ctl_named_node_t arenas_node[] = {
66458-	{NAME("narenas"),	CTL(arenas_narenas)},
66459-	{NAME("dirty_decay_ms"), CTL(arenas_dirty_decay_ms)},
66460-	{NAME("muzzy_decay_ms"), CTL(arenas_muzzy_decay_ms)},
66461-	{NAME("quantum"),	CTL(arenas_quantum)},
66462-	{NAME("page"),		CTL(arenas_page)},
66463-	{NAME("tcache_max"),	CTL(arenas_tcache_max)},
66464-	{NAME("nbins"),		CTL(arenas_nbins)},
66465-	{NAME("nhbins"),	CTL(arenas_nhbins)},
66466-	{NAME("bin"),		CHILD(indexed, arenas_bin)},
66467-	{NAME("nlextents"),	CTL(arenas_nlextents)},
66468-	{NAME("lextent"),	CHILD(indexed, arenas_lextent)},
66469-	{NAME("create"),	CTL(arenas_create)},
66470-	{NAME("lookup"),	CTL(arenas_lookup)}
66471-};
66472-
66473-static const ctl_named_node_t prof_stats_bins_i_node[] = {
66474-	{NAME("live"),		CTL(prof_stats_bins_i_live)},
66475-	{NAME("accum"),		CTL(prof_stats_bins_i_accum)}
66476-};
66477-
66478-static const ctl_named_node_t super_prof_stats_bins_i_node[] = {
66479-	{NAME(""),		CHILD(named, prof_stats_bins_i)}
66480-};
66481-
66482-static const ctl_indexed_node_t prof_stats_bins_node[] = {
66483-	{INDEX(prof_stats_bins_i)}
66484-};
66485-
66486-static const ctl_named_node_t prof_stats_lextents_i_node[] = {
66487-	{NAME("live"),		CTL(prof_stats_lextents_i_live)},
66488-	{NAME("accum"),		CTL(prof_stats_lextents_i_accum)}
66489-};
66490-
66491-static const ctl_named_node_t super_prof_stats_lextents_i_node[] = {
66492-	{NAME(""),		CHILD(named, prof_stats_lextents_i)}
66493-};
66494-
66495-static const ctl_indexed_node_t prof_stats_lextents_node[] = {
66496-	{INDEX(prof_stats_lextents_i)}
66497-};
66498-
66499-static const ctl_named_node_t	prof_stats_node[] = {
66500-	{NAME("bins"),		CHILD(indexed, prof_stats_bins)},
66501-	{NAME("lextents"),	CHILD(indexed, prof_stats_lextents)},
66502-};
66503-
66504-static const ctl_named_node_t	prof_node[] = {
66505-	{NAME("thread_active_init"), CTL(prof_thread_active_init)},
66506-	{NAME("active"),	CTL(prof_active)},
66507-	{NAME("dump"),		CTL(prof_dump)},
66508-	{NAME("gdump"),		CTL(prof_gdump)},
66509-	{NAME("prefix"),	CTL(prof_prefix)},
66510-	{NAME("reset"),		CTL(prof_reset)},
66511-	{NAME("interval"),	CTL(prof_interval)},
66512-	{NAME("lg_sample"),	CTL(lg_prof_sample)},
66513-	{NAME("log_start"),	CTL(prof_log_start)},
66514-	{NAME("log_stop"),	CTL(prof_log_stop)},
66515-	{NAME("stats"),		CHILD(named, prof_stats)}
66516-};
66517-
66518-static const ctl_named_node_t stats_arenas_i_small_node[] = {
66519-	{NAME("allocated"),	CTL(stats_arenas_i_small_allocated)},
66520-	{NAME("nmalloc"),	CTL(stats_arenas_i_small_nmalloc)},
66521-	{NAME("ndalloc"),	CTL(stats_arenas_i_small_ndalloc)},
66522-	{NAME("nrequests"),	CTL(stats_arenas_i_small_nrequests)},
66523-	{NAME("nfills"),	CTL(stats_arenas_i_small_nfills)},
66524-	{NAME("nflushes"),	CTL(stats_arenas_i_small_nflushes)}
66525-};
66526-
66527-static const ctl_named_node_t stats_arenas_i_large_node[] = {
66528-	{NAME("allocated"),	CTL(stats_arenas_i_large_allocated)},
66529-	{NAME("nmalloc"),	CTL(stats_arenas_i_large_nmalloc)},
66530-	{NAME("ndalloc"),	CTL(stats_arenas_i_large_ndalloc)},
66531-	{NAME("nrequests"),	CTL(stats_arenas_i_large_nrequests)},
66532-	{NAME("nfills"),	CTL(stats_arenas_i_large_nfills)},
66533-	{NAME("nflushes"),	CTL(stats_arenas_i_large_nflushes)}
66534-};
66535-
66536-#define MUTEX_PROF_DATA_NODE(prefix)					\
66537-static const ctl_named_node_t stats_##prefix##_node[] = {		\
66538-	{NAME("num_ops"),						\
66539-	 CTL(stats_##prefix##_num_ops)},				\
66540-	{NAME("num_wait"),						\
66541-	 CTL(stats_##prefix##_num_wait)},				\
66542-	{NAME("num_spin_acq"),						\
66543-	 CTL(stats_##prefix##_num_spin_acq)},				\
66544-	{NAME("num_owner_switch"),					\
66545-	 CTL(stats_##prefix##_num_owner_switch)},			\
66546-	{NAME("total_wait_time"),					\
66547-	 CTL(stats_##prefix##_total_wait_time)},			\
66548-	{NAME("max_wait_time"),						\
66549-	 CTL(stats_##prefix##_max_wait_time)},				\
66550-	{NAME("max_num_thds"),						\
66551-	 CTL(stats_##prefix##_max_num_thds)}				\
66552-	/* Note that # of current waiting thread not provided. */	\
66553-};
66554-
66555-MUTEX_PROF_DATA_NODE(arenas_i_bins_j_mutex)
66556-
66557-static const ctl_named_node_t stats_arenas_i_bins_j_node[] = {
66558-	{NAME("nmalloc"),	CTL(stats_arenas_i_bins_j_nmalloc)},
66559-	{NAME("ndalloc"),	CTL(stats_arenas_i_bins_j_ndalloc)},
66560-	{NAME("nrequests"),	CTL(stats_arenas_i_bins_j_nrequests)},
66561-	{NAME("curregs"),	CTL(stats_arenas_i_bins_j_curregs)},
66562-	{NAME("nfills"),	CTL(stats_arenas_i_bins_j_nfills)},
66563-	{NAME("nflushes"),	CTL(stats_arenas_i_bins_j_nflushes)},
66564-	{NAME("nslabs"),	CTL(stats_arenas_i_bins_j_nslabs)},
66565-	{NAME("nreslabs"),	CTL(stats_arenas_i_bins_j_nreslabs)},
66566-	{NAME("curslabs"),	CTL(stats_arenas_i_bins_j_curslabs)},
66567-	{NAME("nonfull_slabs"),	CTL(stats_arenas_i_bins_j_nonfull_slabs)},
66568-	{NAME("mutex"),		CHILD(named, stats_arenas_i_bins_j_mutex)}
66569-};
66570-
66571-static const ctl_named_node_t super_stats_arenas_i_bins_j_node[] = {
66572-	{NAME(""),		CHILD(named, stats_arenas_i_bins_j)}
66573-};
66574-
66575-static const ctl_indexed_node_t stats_arenas_i_bins_node[] = {
66576-	{INDEX(stats_arenas_i_bins_j)}
66577-};
66578-
66579-static const ctl_named_node_t stats_arenas_i_lextents_j_node[] = {
66580-	{NAME("nmalloc"),	CTL(stats_arenas_i_lextents_j_nmalloc)},
66581-	{NAME("ndalloc"),	CTL(stats_arenas_i_lextents_j_ndalloc)},
66582-	{NAME("nrequests"),	CTL(stats_arenas_i_lextents_j_nrequests)},
66583-	{NAME("curlextents"),	CTL(stats_arenas_i_lextents_j_curlextents)}
66584-};
66585-static const ctl_named_node_t super_stats_arenas_i_lextents_j_node[] = {
66586-	{NAME(""),		CHILD(named, stats_arenas_i_lextents_j)}
66587-};
66588-
66589-static const ctl_indexed_node_t stats_arenas_i_lextents_node[] = {
66590-	{INDEX(stats_arenas_i_lextents_j)}
66591-};
66592-
66593-static const ctl_named_node_t stats_arenas_i_extents_j_node[] = {
66594-	{NAME("ndirty"),	CTL(stats_arenas_i_extents_j_ndirty)},
66595-	{NAME("nmuzzy"),	CTL(stats_arenas_i_extents_j_nmuzzy)},
66596-	{NAME("nretained"),	CTL(stats_arenas_i_extents_j_nretained)},
66597-	{NAME("dirty_bytes"),	CTL(stats_arenas_i_extents_j_dirty_bytes)},
66598-	{NAME("muzzy_bytes"),	CTL(stats_arenas_i_extents_j_muzzy_bytes)},
66599-	{NAME("retained_bytes"), CTL(stats_arenas_i_extents_j_retained_bytes)}
66600-};
66601-
66602-static const ctl_named_node_t super_stats_arenas_i_extents_j_node[] = {
66603-	{NAME(""),		CHILD(named, stats_arenas_i_extents_j)}
66604-};
66605-
66606-static const ctl_indexed_node_t stats_arenas_i_extents_node[] = {
66607-	{INDEX(stats_arenas_i_extents_j)}
66608-};
66609-
66610-#define OP(mtx)  MUTEX_PROF_DATA_NODE(arenas_i_mutexes_##mtx)
66611-MUTEX_PROF_ARENA_MUTEXES
66612-#undef OP
66613-
66614-static const ctl_named_node_t stats_arenas_i_mutexes_node[] = {
66615-#define OP(mtx) {NAME(#mtx), CHILD(named, stats_arenas_i_mutexes_##mtx)},
66616-MUTEX_PROF_ARENA_MUTEXES
66617-#undef OP
66618-};
66619-
66620-static const ctl_named_node_t stats_arenas_i_hpa_shard_full_slabs_node[] = {
66621-	{NAME("npageslabs_nonhuge"),
66622-		CTL(stats_arenas_i_hpa_shard_full_slabs_npageslabs_nonhuge)},
66623-	{NAME("npageslabs_huge"),
66624-		CTL(stats_arenas_i_hpa_shard_full_slabs_npageslabs_huge)},
66625-	{NAME("nactive_nonhuge"),
66626-		CTL(stats_arenas_i_hpa_shard_full_slabs_nactive_nonhuge)},
66627-	{NAME("nactive_huge"),
66628-		CTL(stats_arenas_i_hpa_shard_full_slabs_nactive_huge)},
66629-	{NAME("ndirty_nonhuge"),
66630-		CTL(stats_arenas_i_hpa_shard_full_slabs_ndirty_nonhuge)},
66631-	{NAME("ndirty_huge"),
66632-		CTL(stats_arenas_i_hpa_shard_full_slabs_ndirty_huge)}
66633-};
66634-
66635-static const ctl_named_node_t stats_arenas_i_hpa_shard_empty_slabs_node[] = {
66636-	{NAME("npageslabs_nonhuge"),
66637-		CTL(stats_arenas_i_hpa_shard_empty_slabs_npageslabs_nonhuge)},
66638-	{NAME("npageslabs_huge"),
66639-		CTL(stats_arenas_i_hpa_shard_empty_slabs_npageslabs_huge)},
66640-	{NAME("nactive_nonhuge"),
66641-		CTL(stats_arenas_i_hpa_shard_empty_slabs_nactive_nonhuge)},
66642-	{NAME("nactive_huge"),
66643-		CTL(stats_arenas_i_hpa_shard_empty_slabs_nactive_huge)},
66644-	{NAME("ndirty_nonhuge"),
66645-		CTL(stats_arenas_i_hpa_shard_empty_slabs_ndirty_nonhuge)},
66646-	{NAME("ndirty_huge"),
66647-		CTL(stats_arenas_i_hpa_shard_empty_slabs_ndirty_huge)}
66648-};
66649-
66650-static const ctl_named_node_t stats_arenas_i_hpa_shard_nonfull_slabs_j_node[] = {
66651-	{NAME("npageslabs_nonhuge"),
66652-		CTL(stats_arenas_i_hpa_shard_nonfull_slabs_j_npageslabs_nonhuge)},
66653-	{NAME("npageslabs_huge"),
66654-		CTL(stats_arenas_i_hpa_shard_nonfull_slabs_j_npageslabs_huge)},
66655-	{NAME("nactive_nonhuge"),
66656-		CTL(stats_arenas_i_hpa_shard_nonfull_slabs_j_nactive_nonhuge)},
66657-	{NAME("nactive_huge"),
66658-		CTL(stats_arenas_i_hpa_shard_nonfull_slabs_j_nactive_huge)},
66659-	{NAME("ndirty_nonhuge"),
66660-		CTL(stats_arenas_i_hpa_shard_nonfull_slabs_j_ndirty_nonhuge)},
66661-	{NAME("ndirty_huge"),
66662-		CTL(stats_arenas_i_hpa_shard_nonfull_slabs_j_ndirty_huge)}
66663-};
66664-
66665-static const ctl_named_node_t super_stats_arenas_i_hpa_shard_nonfull_slabs_j_node[] = {
66666-	{NAME(""),
66667-		CHILD(named, stats_arenas_i_hpa_shard_nonfull_slabs_j)}
66668-};
66669-
66670-static const ctl_indexed_node_t stats_arenas_i_hpa_shard_nonfull_slabs_node[] =
66671-{
66672-	{INDEX(stats_arenas_i_hpa_shard_nonfull_slabs_j)}
66673-};
66674-
66675-static const ctl_named_node_t stats_arenas_i_hpa_shard_node[] = {
66676-	{NAME("full_slabs"),	CHILD(named,
66677-	    stats_arenas_i_hpa_shard_full_slabs)},
66678-	{NAME("empty_slabs"),	CHILD(named,
66679-	    stats_arenas_i_hpa_shard_empty_slabs)},
66680-	{NAME("nonfull_slabs"),	CHILD(indexed,
66681-	    stats_arenas_i_hpa_shard_nonfull_slabs)},
66682-
66683-	{NAME("npurge_passes"),	CTL(stats_arenas_i_hpa_shard_npurge_passes)},
66684-	{NAME("npurges"),	CTL(stats_arenas_i_hpa_shard_npurges)},
66685-	{NAME("nhugifies"),	CTL(stats_arenas_i_hpa_shard_nhugifies)},
66686-	{NAME("ndehugifies"),	CTL(stats_arenas_i_hpa_shard_ndehugifies)}
66687-};
66688-
66689-static const ctl_named_node_t stats_arenas_i_node[] = {
66690-	{NAME("nthreads"),	CTL(stats_arenas_i_nthreads)},
66691-	{NAME("uptime"),	CTL(stats_arenas_i_uptime)},
66692-	{NAME("dss"),		CTL(stats_arenas_i_dss)},
66693-	{NAME("dirty_decay_ms"), CTL(stats_arenas_i_dirty_decay_ms)},
66694-	{NAME("muzzy_decay_ms"), CTL(stats_arenas_i_muzzy_decay_ms)},
66695-	{NAME("pactive"),	CTL(stats_arenas_i_pactive)},
66696-	{NAME("pdirty"),	CTL(stats_arenas_i_pdirty)},
66697-	{NAME("pmuzzy"),	CTL(stats_arenas_i_pmuzzy)},
66698-	{NAME("mapped"),	CTL(stats_arenas_i_mapped)},
66699-	{NAME("retained"),	CTL(stats_arenas_i_retained)},
66700-	{NAME("extent_avail"),	CTL(stats_arenas_i_extent_avail)},
66701-	{NAME("dirty_npurge"),	CTL(stats_arenas_i_dirty_npurge)},
66702-	{NAME("dirty_nmadvise"), CTL(stats_arenas_i_dirty_nmadvise)},
66703-	{NAME("dirty_purged"),	CTL(stats_arenas_i_dirty_purged)},
66704-	{NAME("muzzy_npurge"),	CTL(stats_arenas_i_muzzy_npurge)},
66705-	{NAME("muzzy_nmadvise"), CTL(stats_arenas_i_muzzy_nmadvise)},
66706-	{NAME("muzzy_purged"),	CTL(stats_arenas_i_muzzy_purged)},
66707-	{NAME("base"),		CTL(stats_arenas_i_base)},
66708-	{NAME("internal"),	CTL(stats_arenas_i_internal)},
66709-	{NAME("metadata_thp"),	CTL(stats_arenas_i_metadata_thp)},
66710-	{NAME("tcache_bytes"),	CTL(stats_arenas_i_tcache_bytes)},
66711-	{NAME("tcache_stashed_bytes"),
66712-	    CTL(stats_arenas_i_tcache_stashed_bytes)},
66713-	{NAME("resident"),	CTL(stats_arenas_i_resident)},
66714-	{NAME("abandoned_vm"),	CTL(stats_arenas_i_abandoned_vm)},
66715-	{NAME("hpa_sec_bytes"),	CTL(stats_arenas_i_hpa_sec_bytes)},
66716-	{NAME("small"),		CHILD(named, stats_arenas_i_small)},
66717-	{NAME("large"),		CHILD(named, stats_arenas_i_large)},
66718-	{NAME("bins"),		CHILD(indexed, stats_arenas_i_bins)},
66719-	{NAME("lextents"),	CHILD(indexed, stats_arenas_i_lextents)},
66720-	{NAME("extents"),	CHILD(indexed, stats_arenas_i_extents)},
66721-	{NAME("mutexes"),	CHILD(named, stats_arenas_i_mutexes)},
66722-	{NAME("hpa_shard"),	CHILD(named, stats_arenas_i_hpa_shard)}
66723-};
66724-static const ctl_named_node_t super_stats_arenas_i_node[] = {
66725-	{NAME(""),		CHILD(named, stats_arenas_i)}
66726-};
66727-
66728-static const ctl_indexed_node_t stats_arenas_node[] = {
66729-	{INDEX(stats_arenas_i)}
66730-};
66731-
66732-static const ctl_named_node_t stats_background_thread_node[] = {
66733-	{NAME("num_threads"),	CTL(stats_background_thread_num_threads)},
66734-	{NAME("num_runs"),	CTL(stats_background_thread_num_runs)},
66735-	{NAME("run_interval"),	CTL(stats_background_thread_run_interval)}
66736-};
66737-
66738-#define OP(mtx) MUTEX_PROF_DATA_NODE(mutexes_##mtx)
66739-MUTEX_PROF_GLOBAL_MUTEXES
66740-#undef OP
66741-
66742-static const ctl_named_node_t stats_mutexes_node[] = {
66743-#define OP(mtx) {NAME(#mtx), CHILD(named, stats_mutexes_##mtx)},
66744-MUTEX_PROF_GLOBAL_MUTEXES
66745-#undef OP
66746-	{NAME("reset"),		CTL(stats_mutexes_reset)}
66747-};
66748-#undef MUTEX_PROF_DATA_NODE
66749-
66750-static const ctl_named_node_t stats_node[] = {
66751-	{NAME("allocated"),	CTL(stats_allocated)},
66752-	{NAME("active"),	CTL(stats_active)},
66753-	{NAME("metadata"),	CTL(stats_metadata)},
66754-	{NAME("metadata_thp"),	CTL(stats_metadata_thp)},
66755-	{NAME("resident"),	CTL(stats_resident)},
66756-	{NAME("mapped"),	CTL(stats_mapped)},
66757-	{NAME("retained"),	CTL(stats_retained)},
66758-	{NAME("background_thread"),
66759-	 CHILD(named, stats_background_thread)},
66760-	{NAME("mutexes"),	CHILD(named, stats_mutexes)},
66761-	{NAME("arenas"),	CHILD(indexed, stats_arenas)},
66762-	{NAME("zero_reallocs"),	CTL(stats_zero_reallocs)},
66763-};
66764-
66765-static const ctl_named_node_t experimental_hooks_node[] = {
66766-	{NAME("install"),	CTL(experimental_hooks_install)},
66767-	{NAME("remove"),	CTL(experimental_hooks_remove)},
66768-	{NAME("prof_backtrace"),	CTL(experimental_hooks_prof_backtrace)},
66769-	{NAME("prof_dump"),	CTL(experimental_hooks_prof_dump)},
66770-	{NAME("safety_check_abort"),	CTL(experimental_hooks_safety_check_abort)},
66771-};
66772-
66773-static const ctl_named_node_t experimental_thread_node[] = {
66774-	{NAME("activity_callback"),
66775-		CTL(experimental_thread_activity_callback)}
66776-};
66777-
66778-static const ctl_named_node_t experimental_utilization_node[] = {
66779-	{NAME("query"),		CTL(experimental_utilization_query)},
66780-	{NAME("batch_query"),	CTL(experimental_utilization_batch_query)}
66781-};
66782-
66783-static const ctl_named_node_t experimental_arenas_i_node[] = {
66784-	{NAME("pactivep"),	CTL(experimental_arenas_i_pactivep)}
66785-};
66786-static const ctl_named_node_t super_experimental_arenas_i_node[] = {
66787-	{NAME(""),		CHILD(named, experimental_arenas_i)}
66788-};
66789-
66790-static const ctl_indexed_node_t experimental_arenas_node[] = {
66791-	{INDEX(experimental_arenas_i)}
66792-};
66793-
66794-static const ctl_named_node_t experimental_prof_recent_node[] = {
66795-	{NAME("alloc_max"),	CTL(experimental_prof_recent_alloc_max)},
66796-	{NAME("alloc_dump"),	CTL(experimental_prof_recent_alloc_dump)},
66797-};
66798-
66799-static const ctl_named_node_t experimental_node[] = {
66800-	{NAME("hooks"),		CHILD(named, experimental_hooks)},
66801-	{NAME("utilization"),	CHILD(named, experimental_utilization)},
66802-	{NAME("arenas"),	CHILD(indexed, experimental_arenas)},
66803-	{NAME("arenas_create_ext"),	CTL(experimental_arenas_create_ext)},
66804-	{NAME("prof_recent"),	CHILD(named, experimental_prof_recent)},
66805-	{NAME("batch_alloc"),	CTL(experimental_batch_alloc)},
66806-	{NAME("thread"),	CHILD(named, experimental_thread)}
66807-};
66808-
66809-static const ctl_named_node_t	root_node[] = {
66810-	{NAME("version"),	CTL(version)},
66811-	{NAME("epoch"),		CTL(epoch)},
66812-	{NAME("background_thread"),	CTL(background_thread)},
66813-	{NAME("max_background_threads"),	CTL(max_background_threads)},
66814-	{NAME("thread"),	CHILD(named, thread)},
66815-	{NAME("config"),	CHILD(named, config)},
66816-	{NAME("opt"),		CHILD(named, opt)},
66817-	{NAME("tcache"),	CHILD(named, tcache)},
66818-	{NAME("arena"),		CHILD(indexed, arena)},
66819-	{NAME("arenas"),	CHILD(named, arenas)},
66820-	{NAME("prof"),		CHILD(named, prof)},
66821-	{NAME("stats"),		CHILD(named, stats)},
66822-	{NAME("experimental"),	CHILD(named, experimental)}
66823-};
66824-static const ctl_named_node_t super_root_node[] = {
66825-	{NAME(""),		CHILD(named, root)}
66826-};
66827-
66828-#undef NAME
66829-#undef CHILD
66830-#undef CTL
66831-#undef INDEX
66832-
66833-/******************************************************************************/
66834-
66835-/*
66836- * Sets *dst + *src non-atomically.  This is safe, since everything is
66837- * synchronized by the ctl mutex.
66838- */
66839-static void
66840-ctl_accum_locked_u64(locked_u64_t *dst, locked_u64_t *src) {
66841-	locked_inc_u64_unsynchronized(dst,
66842-	    locked_read_u64_unsynchronized(src));
66843-}
66844-
66845-static void
66846-ctl_accum_atomic_zu(atomic_zu_t *dst, atomic_zu_t *src) {
66847-	size_t cur_dst = atomic_load_zu(dst, ATOMIC_RELAXED);
66848-	size_t cur_src = atomic_load_zu(src, ATOMIC_RELAXED);
66849-	atomic_store_zu(dst, cur_dst + cur_src, ATOMIC_RELAXED);
66850-}
66851-
66852-/******************************************************************************/
66853-
66854-static unsigned
66855-arenas_i2a_impl(size_t i, bool compat, bool validate) {
66856-	unsigned a;
66857-
66858-	switch (i) {
66859-	case MALLCTL_ARENAS_ALL:
66860-		a = 0;
66861-		break;
66862-	case MALLCTL_ARENAS_DESTROYED:
66863-		a = 1;
66864-		break;
66865-	default:
66866-		if (compat && i == ctl_arenas->narenas) {
66867-			/*
66868-			 * Provide deprecated backward compatibility for
66869-			 * accessing the merged stats at index narenas rather
66870-			 * than via MALLCTL_ARENAS_ALL.  This is scheduled for
66871-			 * removal in 6.0.0.
66872-			 */
66873-			a = 0;
66874-		} else if (validate && i >= ctl_arenas->narenas) {
66875-			a = UINT_MAX;
66876-		} else {
66877-			/*
66878-			 * This function should never be called for an index
66879-			 * more than one past the range of indices that have
66880-			 * initialized ctl data.
66881-			 */
66882-			assert(i < ctl_arenas->narenas || (!validate && i ==
66883-			    ctl_arenas->narenas));
66884-			a = (unsigned)i + 2;
66885-		}
66886-		break;
66887-	}
66888-
66889-	return a;
66890-}
66891-
66892-static unsigned
66893-arenas_i2a(size_t i) {
66894-	return arenas_i2a_impl(i, true, false);
66895-}
66896-
66897-static ctl_arena_t *
66898-arenas_i_impl(tsd_t *tsd, size_t i, bool compat, bool init) {
66899-	ctl_arena_t *ret;
66900-
66901-	assert(!compat || !init);
66902-
66903-	ret = ctl_arenas->arenas[arenas_i2a_impl(i, compat, false)];
66904-	if (init && ret == NULL) {
66905-		if (config_stats) {
66906-			struct container_s {
66907-				ctl_arena_t		ctl_arena;
66908-				ctl_arena_stats_t	astats;
66909-			};
66910-			struct container_s *cont =
66911-			    (struct container_s *)base_alloc(tsd_tsdn(tsd),
66912-			    b0get(), sizeof(struct container_s), QUANTUM);
66913-			if (cont == NULL) {
66914-				return NULL;
66915-			}
66916-			ret = &cont->ctl_arena;
66917-			ret->astats = &cont->astats;
66918-		} else {
66919-			ret = (ctl_arena_t *)base_alloc(tsd_tsdn(tsd), b0get(),
66920-			    sizeof(ctl_arena_t), QUANTUM);
66921-			if (ret == NULL) {
66922-				return NULL;
66923-			}
66924-		}
66925-		ret->arena_ind = (unsigned)i;
66926-		ctl_arenas->arenas[arenas_i2a_impl(i, compat, false)] = ret;
66927-	}
66928-
66929-	assert(ret == NULL || arenas_i2a(ret->arena_ind) == arenas_i2a(i));
66930-	return ret;
66931-}
66932-
66933-static ctl_arena_t *
66934-arenas_i(size_t i) {
66935-	ctl_arena_t *ret = arenas_i_impl(tsd_fetch(), i, true, false);
66936-	assert(ret != NULL);
66937-	return ret;
66938-}
66939-
66940-static void
66941-ctl_arena_clear(ctl_arena_t *ctl_arena) {
66942-	ctl_arena->nthreads = 0;
66943-	ctl_arena->dss = dss_prec_names[dss_prec_limit];
66944-	ctl_arena->dirty_decay_ms = -1;
66945-	ctl_arena->muzzy_decay_ms = -1;
66946-	ctl_arena->pactive = 0;
66947-	ctl_arena->pdirty = 0;
66948-	ctl_arena->pmuzzy = 0;
66949-	if (config_stats) {
66950-		memset(&ctl_arena->astats->astats, 0, sizeof(arena_stats_t));
66951-		ctl_arena->astats->allocated_small = 0;
66952-		ctl_arena->astats->nmalloc_small = 0;
66953-		ctl_arena->astats->ndalloc_small = 0;
66954-		ctl_arena->astats->nrequests_small = 0;
66955-		ctl_arena->astats->nfills_small = 0;
66956-		ctl_arena->astats->nflushes_small = 0;
66957-		memset(ctl_arena->astats->bstats, 0, SC_NBINS *
66958-		    sizeof(bin_stats_data_t));
66959-		memset(ctl_arena->astats->lstats, 0, (SC_NSIZES - SC_NBINS) *
66960-		    sizeof(arena_stats_large_t));
66961-		memset(ctl_arena->astats->estats, 0, SC_NPSIZES *
66962-		    sizeof(pac_estats_t));
66963-		memset(&ctl_arena->astats->hpastats, 0,
66964-		    sizeof(hpa_shard_stats_t));
66965-		memset(&ctl_arena->astats->secstats, 0,
66966-		    sizeof(sec_stats_t));
66967-	}
66968-}
66969-
66970-static void
66971-ctl_arena_stats_amerge(tsdn_t *tsdn, ctl_arena_t *ctl_arena, arena_t *arena) {
66972-	unsigned i;
66973-
66974-	if (config_stats) {
66975-		arena_stats_merge(tsdn, arena, &ctl_arena->nthreads,
66976-		    &ctl_arena->dss, &ctl_arena->dirty_decay_ms,
66977-		    &ctl_arena->muzzy_decay_ms, &ctl_arena->pactive,
66978-		    &ctl_arena->pdirty, &ctl_arena->pmuzzy,
66979-		    &ctl_arena->astats->astats, ctl_arena->astats->bstats,
66980-		    ctl_arena->astats->lstats, ctl_arena->astats->estats,
66981-		    &ctl_arena->astats->hpastats, &ctl_arena->astats->secstats);
66982-
66983-		for (i = 0; i < SC_NBINS; i++) {
66984-			bin_stats_t *bstats =
66985-			    &ctl_arena->astats->bstats[i].stats_data;
66986-			ctl_arena->astats->allocated_small += bstats->curregs *
66987-			    sz_index2size(i);
66988-			ctl_arena->astats->nmalloc_small += bstats->nmalloc;
66989-			ctl_arena->astats->ndalloc_small += bstats->ndalloc;
66990-			ctl_arena->astats->nrequests_small += bstats->nrequests;
66991-			ctl_arena->astats->nfills_small += bstats->nfills;
66992-			ctl_arena->astats->nflushes_small += bstats->nflushes;
66993-		}
66994-	} else {
66995-		arena_basic_stats_merge(tsdn, arena, &ctl_arena->nthreads,
66996-		    &ctl_arena->dss, &ctl_arena->dirty_decay_ms,
66997-		    &ctl_arena->muzzy_decay_ms, &ctl_arena->pactive,
66998-		    &ctl_arena->pdirty, &ctl_arena->pmuzzy);
66999-	}
67000-}
67001-
67002-static void
67003-ctl_arena_stats_sdmerge(ctl_arena_t *ctl_sdarena, ctl_arena_t *ctl_arena,
67004-    bool destroyed) {
67005-	unsigned i;
67006-
67007-	if (!destroyed) {
67008-		ctl_sdarena->nthreads += ctl_arena->nthreads;
67009-		ctl_sdarena->pactive += ctl_arena->pactive;
67010-		ctl_sdarena->pdirty += ctl_arena->pdirty;
67011-		ctl_sdarena->pmuzzy += ctl_arena->pmuzzy;
67012-	} else {
67013-		assert(ctl_arena->nthreads == 0);
67014-		assert(ctl_arena->pactive == 0);
67015-		assert(ctl_arena->pdirty == 0);
67016-		assert(ctl_arena->pmuzzy == 0);
67017-	}
67018-
67019-	if (config_stats) {
67020-		ctl_arena_stats_t *sdstats = ctl_sdarena->astats;
67021-		ctl_arena_stats_t *astats = ctl_arena->astats;
67022-
67023-		if (!destroyed) {
67024-			sdstats->astats.mapped += astats->astats.mapped;
67025-			sdstats->astats.pa_shard_stats.pac_stats.retained
67026-			    += astats->astats.pa_shard_stats.pac_stats.retained;
67027-			sdstats->astats.pa_shard_stats.edata_avail
67028-			    += astats->astats.pa_shard_stats.edata_avail;
67029-		}
67030-
67031-		ctl_accum_locked_u64(
67032-		    &sdstats->astats.pa_shard_stats.pac_stats.decay_dirty.npurge,
67033-		    &astats->astats.pa_shard_stats.pac_stats.decay_dirty.npurge);
67034-		ctl_accum_locked_u64(
67035-		    &sdstats->astats.pa_shard_stats.pac_stats.decay_dirty.nmadvise,
67036-		    &astats->astats.pa_shard_stats.pac_stats.decay_dirty.nmadvise);
67037-		ctl_accum_locked_u64(
67038-		    &sdstats->astats.pa_shard_stats.pac_stats.decay_dirty.purged,
67039-		    &astats->astats.pa_shard_stats.pac_stats.decay_dirty.purged);
67040-
67041-		ctl_accum_locked_u64(
67042-		    &sdstats->astats.pa_shard_stats.pac_stats.decay_muzzy.npurge,
67043-		    &astats->astats.pa_shard_stats.pac_stats.decay_muzzy.npurge);
67044-		ctl_accum_locked_u64(
67045-		    &sdstats->astats.pa_shard_stats.pac_stats.decay_muzzy.nmadvise,
67046-		    &astats->astats.pa_shard_stats.pac_stats.decay_muzzy.nmadvise);
67047-		ctl_accum_locked_u64(
67048-		    &sdstats->astats.pa_shard_stats.pac_stats.decay_muzzy.purged,
67049-		    &astats->astats.pa_shard_stats.pac_stats.decay_muzzy.purged);
67050-
67051-#define OP(mtx) malloc_mutex_prof_merge(				\
67052-		    &(sdstats->astats.mutex_prof_data[			\
67053-		        arena_prof_mutex_##mtx]),			\
67054-		    &(astats->astats.mutex_prof_data[			\
67055-		        arena_prof_mutex_##mtx]));
67056-MUTEX_PROF_ARENA_MUTEXES
67057-#undef OP
67058-		if (!destroyed) {
67059-			sdstats->astats.base += astats->astats.base;
67060-			sdstats->astats.resident += astats->astats.resident;
67061-			sdstats->astats.metadata_thp += astats->astats.metadata_thp;
67062-			ctl_accum_atomic_zu(&sdstats->astats.internal,
67063-			    &astats->astats.internal);
67064-		} else {
67065-			assert(atomic_load_zu(
67066-			    &astats->astats.internal, ATOMIC_RELAXED) == 0);
67067-		}
67068-
67069-		if (!destroyed) {
67070-			sdstats->allocated_small += astats->allocated_small;
67071-		} else {
67072-			assert(astats->allocated_small == 0);
67073-		}
67074-		sdstats->nmalloc_small += astats->nmalloc_small;
67075-		sdstats->ndalloc_small += astats->ndalloc_small;
67076-		sdstats->nrequests_small += astats->nrequests_small;
67077-		sdstats->nfills_small += astats->nfills_small;
67078-		sdstats->nflushes_small += astats->nflushes_small;
67079-
67080-		if (!destroyed) {
67081-			sdstats->astats.allocated_large +=
67082-			    astats->astats.allocated_large;
67083-		} else {
67084-			assert(astats->astats.allocated_large == 0);
67085-		}
67086-		sdstats->astats.nmalloc_large += astats->astats.nmalloc_large;
67087-		sdstats->astats.ndalloc_large += astats->astats.ndalloc_large;
67088-		sdstats->astats.nrequests_large
67089-		    += astats->astats.nrequests_large;
67090-		sdstats->astats.nflushes_large += astats->astats.nflushes_large;
67091-		ctl_accum_atomic_zu(
67092-		    &sdstats->astats.pa_shard_stats.pac_stats.abandoned_vm,
67093-		    &astats->astats.pa_shard_stats.pac_stats.abandoned_vm);
67094-
67095-		sdstats->astats.tcache_bytes += astats->astats.tcache_bytes;
67096-		sdstats->astats.tcache_stashed_bytes +=
67097-		    astats->astats.tcache_stashed_bytes;
67098-
67099-		if (ctl_arena->arena_ind == 0) {
67100-			sdstats->astats.uptime = astats->astats.uptime;
67101-		}
67102-
67103-		/* Merge bin stats. */
67104-		for (i = 0; i < SC_NBINS; i++) {
67105-			bin_stats_t *bstats = &astats->bstats[i].stats_data;
67106-			bin_stats_t *merged = &sdstats->bstats[i].stats_data;
67107-			merged->nmalloc += bstats->nmalloc;
67108-			merged->ndalloc += bstats->ndalloc;
67109-			merged->nrequests += bstats->nrequests;
67110-			if (!destroyed) {
67111-				merged->curregs += bstats->curregs;
67112-			} else {
67113-				assert(bstats->curregs == 0);
67114-			}
67115-			merged->nfills += bstats->nfills;
67116-			merged->nflushes += bstats->nflushes;
67117-			merged->nslabs += bstats->nslabs;
67118-			merged->reslabs += bstats->reslabs;
67119-			if (!destroyed) {
67120-				merged->curslabs += bstats->curslabs;
67121-				merged->nonfull_slabs += bstats->nonfull_slabs;
67122-			} else {
67123-				assert(bstats->curslabs == 0);
67124-				assert(bstats->nonfull_slabs == 0);
67125-			}
67126-			malloc_mutex_prof_merge(&sdstats->bstats[i].mutex_data,
67127-			    &astats->bstats[i].mutex_data);
67128-		}
67129-
67130-		/* Merge stats for large allocations. */
67131-		for (i = 0; i < SC_NSIZES - SC_NBINS; i++) {
67132-			ctl_accum_locked_u64(&sdstats->lstats[i].nmalloc,
67133-			    &astats->lstats[i].nmalloc);
67134-			ctl_accum_locked_u64(&sdstats->lstats[i].ndalloc,
67135-			    &astats->lstats[i].ndalloc);
67136-			ctl_accum_locked_u64(&sdstats->lstats[i].nrequests,
67137-			    &astats->lstats[i].nrequests);
67138-			if (!destroyed) {
67139-				sdstats->lstats[i].curlextents +=
67140-				    astats->lstats[i].curlextents;
67141-			} else {
67142-				assert(astats->lstats[i].curlextents == 0);
67143-			}
67144-		}
67145-
67146-		/* Merge extents stats. */
67147-		for (i = 0; i < SC_NPSIZES; i++) {
67148-			sdstats->estats[i].ndirty += astats->estats[i].ndirty;
67149-			sdstats->estats[i].nmuzzy += astats->estats[i].nmuzzy;
67150-			sdstats->estats[i].nretained
67151-			    += astats->estats[i].nretained;
67152-			sdstats->estats[i].dirty_bytes
67153-			    += astats->estats[i].dirty_bytes;
67154-			sdstats->estats[i].muzzy_bytes
67155-			    += astats->estats[i].muzzy_bytes;
67156-			sdstats->estats[i].retained_bytes
67157-			    += astats->estats[i].retained_bytes;
67158-		}
67159-
67160-		/* Merge HPA stats. */
67161-		hpa_shard_stats_accum(&sdstats->hpastats, &astats->hpastats);
67162-		sec_stats_accum(&sdstats->secstats, &astats->secstats);
67163-	}
67164-}
67165-
67166-static void
67167-ctl_arena_refresh(tsdn_t *tsdn, arena_t *arena, ctl_arena_t *ctl_sdarena,
67168-    unsigned i, bool destroyed) {
67169-	ctl_arena_t *ctl_arena = arenas_i(i);
67170-
67171-	ctl_arena_clear(ctl_arena);
67172-	ctl_arena_stats_amerge(tsdn, ctl_arena, arena);
67173-	/* Merge into sum stats as well. */
67174-	ctl_arena_stats_sdmerge(ctl_sdarena, ctl_arena, destroyed);
67175-}
67176-
67177-static unsigned
67178-ctl_arena_init(tsd_t *tsd, const arena_config_t *config) {
67179-	unsigned arena_ind;
67180-	ctl_arena_t *ctl_arena;
67181-
67182-	if ((ctl_arena = ql_last(&ctl_arenas->destroyed, destroyed_link)) !=
67183-	    NULL) {
67184-		ql_remove(&ctl_arenas->destroyed, ctl_arena, destroyed_link);
67185-		arena_ind = ctl_arena->arena_ind;
67186-	} else {
67187-		arena_ind = ctl_arenas->narenas;
67188-	}
67189-
67190-	/* Trigger stats allocation. */
67191-	if (arenas_i_impl(tsd, arena_ind, false, true) == NULL) {
67192-		return UINT_MAX;
67193-	}
67194-
67195-	/* Initialize new arena. */
67196-	if (arena_init(tsd_tsdn(tsd), arena_ind, config) == NULL) {
67197-		return UINT_MAX;
67198-	}
67199-
67200-	if (arena_ind == ctl_arenas->narenas) {
67201-		ctl_arenas->narenas++;
67202-	}
67203-
67204-	return arena_ind;
67205-}
67206-
67207-static void
67208-ctl_background_thread_stats_read(tsdn_t *tsdn) {
67209-	background_thread_stats_t *stats = &ctl_stats->background_thread;
67210-	if (!have_background_thread ||
67211-	    background_thread_stats_read(tsdn, stats)) {
67212-		memset(stats, 0, sizeof(background_thread_stats_t));
67213-		nstime_init_zero(&stats->run_interval);
67214-	}
67215-	malloc_mutex_prof_copy(
67216-	    &ctl_stats->mutex_prof_data[global_prof_mutex_max_per_bg_thd],
67217-	    &stats->max_counter_per_bg_thd);
67218-}
67219-
67220-static void
67221-ctl_refresh(tsdn_t *tsdn) {
67222-	unsigned i;
67223-	ctl_arena_t *ctl_sarena = arenas_i(MALLCTL_ARENAS_ALL);
67224-	VARIABLE_ARRAY(arena_t *, tarenas, ctl_arenas->narenas);
67225-
67226-	/*
67227-	 * Clear sum stats, since they will be merged into by
67228-	 * ctl_arena_refresh().
67229-	 */
67230-	ctl_arena_clear(ctl_sarena);
67231-
67232-	for (i = 0; i < ctl_arenas->narenas; i++) {
67233-		tarenas[i] = arena_get(tsdn, i, false);
67234-	}
67235-
67236-	for (i = 0; i < ctl_arenas->narenas; i++) {
67237-		ctl_arena_t *ctl_arena = arenas_i(i);
67238-		bool initialized = (tarenas[i] != NULL);
67239-
67240-		ctl_arena->initialized = initialized;
67241-		if (initialized) {
67242-			ctl_arena_refresh(tsdn, tarenas[i], ctl_sarena, i,
67243-			    false);
67244-		}
67245-	}
67246-
67247-	if (config_stats) {
67248-		ctl_stats->allocated = ctl_sarena->astats->allocated_small +
67249-		    ctl_sarena->astats->astats.allocated_large;
67250-		ctl_stats->active = (ctl_sarena->pactive << LG_PAGE);
67251-		ctl_stats->metadata = ctl_sarena->astats->astats.base +
67252-		    atomic_load_zu(&ctl_sarena->astats->astats.internal,
67253-			ATOMIC_RELAXED);
67254-		ctl_stats->resident = ctl_sarena->astats->astats.resident;
67255-		ctl_stats->metadata_thp =
67256-		    ctl_sarena->astats->astats.metadata_thp;
67257-		ctl_stats->mapped = ctl_sarena->astats->astats.mapped;
67258-		ctl_stats->retained = ctl_sarena->astats->astats
67259-		    .pa_shard_stats.pac_stats.retained;
67260-
67261-		ctl_background_thread_stats_read(tsdn);
67262-
67263-#define READ_GLOBAL_MUTEX_PROF_DATA(i, mtx)				\
67264-    malloc_mutex_lock(tsdn, &mtx);					\
67265-    malloc_mutex_prof_read(tsdn, &ctl_stats->mutex_prof_data[i], &mtx);	\
67266-    malloc_mutex_unlock(tsdn, &mtx);
67267-
67268-		if (config_prof && opt_prof) {
67269-			READ_GLOBAL_MUTEX_PROF_DATA(
67270-			    global_prof_mutex_prof, bt2gctx_mtx);
67271-			READ_GLOBAL_MUTEX_PROF_DATA(
67272-			    global_prof_mutex_prof_thds_data, tdatas_mtx);
67273-			READ_GLOBAL_MUTEX_PROF_DATA(
67274-			    global_prof_mutex_prof_dump, prof_dump_mtx);
67275-			READ_GLOBAL_MUTEX_PROF_DATA(
67276-			    global_prof_mutex_prof_recent_alloc,
67277-			    prof_recent_alloc_mtx);
67278-			READ_GLOBAL_MUTEX_PROF_DATA(
67279-			    global_prof_mutex_prof_recent_dump,
67280-			    prof_recent_dump_mtx);
67281-			READ_GLOBAL_MUTEX_PROF_DATA(
67282-			    global_prof_mutex_prof_stats, prof_stats_mtx);
67283-		}
67284-		if (have_background_thread) {
67285-			READ_GLOBAL_MUTEX_PROF_DATA(
67286-			    global_prof_mutex_background_thread,
67287-			    background_thread_lock);
67288-		} else {
67289-			memset(&ctl_stats->mutex_prof_data[
67290-			    global_prof_mutex_background_thread], 0,
67291-			    sizeof(mutex_prof_data_t));
67292-		}
67293-		/* We own ctl mutex already. */
67294-		malloc_mutex_prof_read(tsdn,
67295-		    &ctl_stats->mutex_prof_data[global_prof_mutex_ctl],
67296-		    &ctl_mtx);
67297-#undef READ_GLOBAL_MUTEX_PROF_DATA
67298-	}
67299-	ctl_arenas->epoch++;
67300-}
67301-
67302-static bool
67303-ctl_init(tsd_t *tsd) {
67304-	bool ret;
67305-	tsdn_t *tsdn = tsd_tsdn(tsd);
67306-
67307-	malloc_mutex_lock(tsdn, &ctl_mtx);
67308-	if (!ctl_initialized) {
67309-		ctl_arena_t *ctl_sarena, *ctl_darena;
67310-		unsigned i;
67311-
67312-		/*
67313-		 * Allocate demand-zeroed space for pointers to the full
67314-		 * range of supported arena indices.
67315-		 */
67316-		if (ctl_arenas == NULL) {
67317-			ctl_arenas = (ctl_arenas_t *)base_alloc(tsdn,
67318-			    b0get(), sizeof(ctl_arenas_t), QUANTUM);
67319-			if (ctl_arenas == NULL) {
67320-				ret = true;
67321-				goto label_return;
67322-			}
67323-		}
67324-
67325-		if (config_stats && ctl_stats == NULL) {
67326-			ctl_stats = (ctl_stats_t *)base_alloc(tsdn, b0get(),
67327-			    sizeof(ctl_stats_t), QUANTUM);
67328-			if (ctl_stats == NULL) {
67329-				ret = true;
67330-				goto label_return;
67331-			}
67332-		}
67333-
67334-		/*
67335-		 * Allocate space for the current full range of arenas
67336-		 * here rather than doing it lazily elsewhere, in order
67337-		 * to limit when OOM-caused errors can occur.
67338-		 */
67339-		if ((ctl_sarena = arenas_i_impl(tsd, MALLCTL_ARENAS_ALL, false,
67340-		    true)) == NULL) {
67341-			ret = true;
67342-			goto label_return;
67343-		}
67344-		ctl_sarena->initialized = true;
67345-
67346-		if ((ctl_darena = arenas_i_impl(tsd, MALLCTL_ARENAS_DESTROYED,
67347-		    false, true)) == NULL) {
67348-			ret = true;
67349-			goto label_return;
67350-		}
67351-		ctl_arena_clear(ctl_darena);
67352-		/*
67353-		 * Don't toggle ctl_darena to initialized until an arena is
67354-		 * actually destroyed, so that arena.<i>.initialized can be used
67355-		 * to query whether the stats are relevant.
67356-		 */
67357-
67358-		ctl_arenas->narenas = narenas_total_get();
67359-		for (i = 0; i < ctl_arenas->narenas; i++) {
67360-			if (arenas_i_impl(tsd, i, false, true) == NULL) {
67361-				ret = true;
67362-				goto label_return;
67363-			}
67364-		}
67365-
67366-		ql_new(&ctl_arenas->destroyed);
67367-		ctl_refresh(tsdn);
67368-
67369-		ctl_initialized = true;
67370-	}
67371-
67372-	ret = false;
67373-label_return:
67374-	malloc_mutex_unlock(tsdn, &ctl_mtx);
67375-	return ret;
67376-}
67377-
67378-static int
67379-ctl_lookup(tsdn_t *tsdn, const ctl_named_node_t *starting_node,
67380-    const char *name, const ctl_named_node_t **ending_nodep, size_t *mibp,
67381-    size_t *depthp) {
67382-	int ret;
67383-	const char *elm, *tdot, *dot;
67384-	size_t elen, i, j;
67385-	const ctl_named_node_t *node;
67386-
67387-	elm = name;
67388-	/* Equivalent to strchrnul(). */
67389-	dot = ((tdot = strchr(elm, '.')) != NULL) ? tdot : strchr(elm, '\0');
67390-	elen = (size_t)((uintptr_t)dot - (uintptr_t)elm);
67391-	if (elen == 0) {
67392-		ret = ENOENT;
67393-		goto label_return;
67394-	}
67395-	node = starting_node;
67396-	for (i = 0; i < *depthp; i++) {
67397-		assert(node);
67398-		assert(node->nchildren > 0);
67399-		if (ctl_named_node(node->children) != NULL) {
67400-			const ctl_named_node_t *pnode = node;
67401-
67402-			/* Children are named. */
67403-			for (j = 0; j < node->nchildren; j++) {
67404-				const ctl_named_node_t *child =
67405-				    ctl_named_children(node, j);
67406-				if (strlen(child->name) == elen &&
67407-				    strncmp(elm, child->name, elen) == 0) {
67408-					node = child;
67409-					mibp[i] = j;
67410-					break;
67411-				}
67412-			}
67413-			if (node == pnode) {
67414-				ret = ENOENT;
67415-				goto label_return;
67416-			}
67417-		} else {
67418-			uintmax_t index;
67419-			const ctl_indexed_node_t *inode;
67420-
67421-			/* Children are indexed. */
67422-			index = malloc_strtoumax(elm, NULL, 10);
67423-			if (index == UINTMAX_MAX || index > SIZE_T_MAX) {
67424-				ret = ENOENT;
67425-				goto label_return;
67426-			}
67427-
67428-			inode = ctl_indexed_node(node->children);
67429-			node = inode->index(tsdn, mibp, *depthp, (size_t)index);
67430-			if (node == NULL) {
67431-				ret = ENOENT;
67432-				goto label_return;
67433-			}
67434-
67435-			mibp[i] = (size_t)index;
67436-		}
67437-
67438-		/* Reached the end? */
67439-		if (node->ctl != NULL || *dot == '\0') {
67440-			/* Terminal node. */
67441-			if (*dot != '\0') {
67442-				/*
67443-				 * The name contains more elements than are
67444-				 * in this path through the tree.
67445-				 */
67446-				ret = ENOENT;
67447-				goto label_return;
67448-			}
67449-			/* Complete lookup successful. */
67450-			*depthp = i + 1;
67451-			break;
67452-		}
67453-
67454-		/* Update elm. */
67455-		elm = &dot[1];
67456-		dot = ((tdot = strchr(elm, '.')) != NULL) ? tdot :
67457-		    strchr(elm, '\0');
67458-		elen = (size_t)((uintptr_t)dot - (uintptr_t)elm);
67459-	}
67460-	if (ending_nodep != NULL) {
67461-		*ending_nodep = node;
67462-	}
67463-
67464-	ret = 0;
67465-label_return:
67466-	return ret;
67467-}
67468-
67469-int
67470-ctl_byname(tsd_t *tsd, const char *name, void *oldp, size_t *oldlenp,
67471-    void *newp, size_t newlen) {
67472-	int ret;
67473-	size_t depth;
67474-	size_t mib[CTL_MAX_DEPTH];
67475-	const ctl_named_node_t *node;
67476-
67477-	if (!ctl_initialized && ctl_init(tsd)) {
67478-		ret = EAGAIN;
67479-		goto label_return;
67480-	}
67481-
67482-	depth = CTL_MAX_DEPTH;
67483-	ret = ctl_lookup(tsd_tsdn(tsd), super_root_node, name, &node, mib,
67484-	    &depth);
67485-	if (ret != 0) {
67486-		goto label_return;
67487-	}
67488-
67489-	if (node != NULL && node->ctl) {
67490-		ret = node->ctl(tsd, mib, depth, oldp, oldlenp, newp, newlen);
67491-	} else {
67492-		/* The name refers to a partial path through the ctl tree. */
67493-		ret = ENOENT;
67494-	}
67495-
67496-label_return:
67497-	return(ret);
67498-}
67499-
67500-int
67501-ctl_nametomib(tsd_t *tsd, const char *name, size_t *mibp, size_t *miblenp) {
67502-	int ret;
67503-
67504-	if (!ctl_initialized && ctl_init(tsd)) {
67505-		ret = EAGAIN;
67506-		goto label_return;
67507-	}
67508-
67509-	ret = ctl_lookup(tsd_tsdn(tsd), super_root_node, name, NULL, mibp,
67510-	    miblenp);
67511-label_return:
67512-	return(ret);
67513-}
67514-
67515-static int
67516-ctl_lookupbymib(tsdn_t *tsdn, const ctl_named_node_t **ending_nodep,
67517-    const size_t *mib, size_t miblen) {
67518-	int ret;
67519-
67520-	const ctl_named_node_t *node = super_root_node;
67521-	for (size_t i = 0; i < miblen; i++) {
67522-		assert(node);
67523-		assert(node->nchildren > 0);
67524-		if (ctl_named_node(node->children) != NULL) {
67525-			/* Children are named. */
67526-			if (node->nchildren <= mib[i]) {
67527-				ret = ENOENT;
67528-				goto label_return;
67529-			}
67530-			node = ctl_named_children(node, mib[i]);
67531-		} else {
67532-			const ctl_indexed_node_t *inode;
67533-
67534-			/* Indexed element. */
67535-			inode = ctl_indexed_node(node->children);
67536-			node = inode->index(tsdn, mib, miblen, mib[i]);
67537-			if (node == NULL) {
67538-				ret = ENOENT;
67539-				goto label_return;
67540-			}
67541-		}
67542-	}
67543-	assert(ending_nodep != NULL);
67544-	*ending_nodep = node;
67545-	ret = 0;
67546-
67547-label_return:
67548-	return(ret);
67549-}
67550-
67551-int
67552-ctl_bymib(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
67553-    size_t *oldlenp, void *newp, size_t newlen) {
67554-	int ret;
67555-	const ctl_named_node_t *node;
67556-
67557-	if (!ctl_initialized && ctl_init(tsd)) {
67558-		ret = EAGAIN;
67559-		goto label_return;
67560-	}
67561-
67562-	ret = ctl_lookupbymib(tsd_tsdn(tsd), &node, mib, miblen);
67563-	if (ret != 0) {
67564-		goto label_return;
67565-	}
67566-
67567-	/* Call the ctl function. */
67568-	if (node && node->ctl) {
67569-		ret = node->ctl(tsd, mib, miblen, oldp, oldlenp, newp, newlen);
67570-	} else {
67571-		/* Partial MIB. */
67572-		ret = ENOENT;
67573-	}
67574-
67575-label_return:
67576-	return(ret);
67577-}
67578-
67579-int
67580-ctl_mibnametomib(tsd_t *tsd, size_t *mib, size_t miblen, const char *name,
67581-    size_t *miblenp) {
67582-	int ret;
67583-	const ctl_named_node_t *node;
67584-
67585-	if (!ctl_initialized && ctl_init(tsd)) {
67586-		ret = EAGAIN;
67587-		goto label_return;
67588-	}
67589-
67590-	ret = ctl_lookupbymib(tsd_tsdn(tsd), &node, mib, miblen);
67591-	if (ret != 0) {
67592-		goto label_return;
67593-	}
67594-	if (node == NULL || node->ctl != NULL) {
67595-		ret = ENOENT;
67596-		goto label_return;
67597-	}
67598-
67599-	assert(miblenp != NULL);
67600-	assert(*miblenp >= miblen);
67601-	*miblenp -= miblen;
67602-	ret = ctl_lookup(tsd_tsdn(tsd), node, name, NULL, mib + miblen,
67603-	    miblenp);
67604-	*miblenp += miblen;
67605-label_return:
67606-	return(ret);
67607-}
67608-
67609-int
67610-ctl_bymibname(tsd_t *tsd, size_t *mib, size_t miblen, const char *name,
67611-    size_t *miblenp, void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
67612-	int ret;
67613-	const ctl_named_node_t *node;
67614-
67615-	if (!ctl_initialized && ctl_init(tsd)) {
67616-		ret = EAGAIN;
67617-		goto label_return;
67618-	}
67619-
67620-	ret = ctl_lookupbymib(tsd_tsdn(tsd), &node, mib, miblen);
67621-	if (ret != 0) {
67622-		goto label_return;
67623-	}
67624-	if (node == NULL || node->ctl != NULL) {
67625-		ret = ENOENT;
67626-		goto label_return;
67627-	}
67628-
67629-	assert(miblenp != NULL);
67630-	assert(*miblenp >= miblen);
67631-	*miblenp -= miblen;
67632-	/*
67633-	 * The same node supplies the starting node and stores the ending node.
67634-	 */
67635-	ret = ctl_lookup(tsd_tsdn(tsd), node, name, &node, mib + miblen,
67636-	    miblenp);
67637-	*miblenp += miblen;
67638-	if (ret != 0) {
67639-		goto label_return;
67640-	}
67641-
67642-	if (node != NULL && node->ctl) {
67643-		ret = node->ctl(tsd, mib, *miblenp, oldp, oldlenp, newp,
67644-		    newlen);
67645-	} else {
67646-		/* The name refers to a partial path through the ctl tree. */
67647-		ret = ENOENT;
67648-	}
67649-
67650-label_return:
67651-	return(ret);
67652-}
67653-
67654-bool
67655-ctl_boot(void) {
67656-	if (malloc_mutex_init(&ctl_mtx, "ctl", WITNESS_RANK_CTL,
67657-	    malloc_mutex_rank_exclusive)) {
67658-		return true;
67659-	}
67660-
67661-	ctl_initialized = false;
67662-
67663-	return false;
67664-}
67665-
67666-void
67667-ctl_prefork(tsdn_t *tsdn) {
67668-	malloc_mutex_prefork(tsdn, &ctl_mtx);
67669-}
67670-
67671-void
67672-ctl_postfork_parent(tsdn_t *tsdn) {
67673-	malloc_mutex_postfork_parent(tsdn, &ctl_mtx);
67674-}
67675-
67676-void
67677-ctl_postfork_child(tsdn_t *tsdn) {
67678-	malloc_mutex_postfork_child(tsdn, &ctl_mtx);
67679-}
67680-
67681-void
67682-ctl_mtx_assert_held(tsdn_t *tsdn) {
67683-	malloc_mutex_assert_owner(tsdn, &ctl_mtx);
67684-}
67685-
67686-/******************************************************************************/
67687-/* *_ctl() functions. */
67688-
67689-#define READONLY()	do {						\
67690-	if (newp != NULL || newlen != 0) {				\
67691-		ret = EPERM;						\
67692-		goto label_return;					\
67693-	}								\
67694-} while (0)
67695-
67696-#define WRITEONLY()	do {						\
67697-	if (oldp != NULL || oldlenp != NULL) {				\
67698-		ret = EPERM;						\
67699-		goto label_return;					\
67700-	}								\
67701-} while (0)
67702-
67703-/* Can read or write, but not both. */
67704-#define READ_XOR_WRITE()	do {					\
67705-	if ((oldp != NULL && oldlenp != NULL) && (newp != NULL ||	\
67706-	    newlen != 0)) {						\
67707-		ret = EPERM;						\
67708-		goto label_return;					\
67709-	}								\
67710-} while (0)
67711-
67712-/* Can neither read nor write. */
67713-#define NEITHER_READ_NOR_WRITE()	do {				\
67714-	if (oldp != NULL || oldlenp != NULL || newp != NULL ||		\
67715-	    newlen != 0) {						\
67716-		ret = EPERM;						\
67717-		goto label_return;					\
67718-	}								\
67719-} while (0)
67720-
67721-/* Verify that the space provided is enough. */
67722-#define VERIFY_READ(t)	do {						\
67723-	if (oldp == NULL || oldlenp == NULL || *oldlenp != sizeof(t)) {	\
67724-		*oldlenp = 0;						\
67725-		ret = EINVAL;						\
67726-		goto label_return;					\
67727-	}								\
67728-} while (0)
67729-
67730-#define READ(v, t)	do {						\
67731-	if (oldp != NULL && oldlenp != NULL) {				\
67732-		if (*oldlenp != sizeof(t)) {				\
67733-			size_t	copylen = (sizeof(t) <= *oldlenp)	\
67734-			    ? sizeof(t) : *oldlenp;			\
67735-			memcpy(oldp, (void *)&(v), copylen);		\
67736-			*oldlenp = copylen;				\
67737-			ret = EINVAL;					\
67738-			goto label_return;				\
67739-		}							\
67740-		*(t *)oldp = (v);					\
67741-	}								\
67742-} while (0)
67743-
67744-#define WRITE(v, t)	do {						\
67745-	if (newp != NULL) {						\
67746-		if (newlen != sizeof(t)) {				\
67747-			ret = EINVAL;					\
67748-			goto label_return;				\
67749-		}							\
67750-		(v) = *(t *)newp;					\
67751-	}								\
67752-} while (0)
67753-
67754-#define ASSURED_WRITE(v, t)	do {					\
67755-	if (newp == NULL || newlen != sizeof(t)) {			\
67756-		ret = EINVAL;						\
67757-		goto label_return;					\
67758-	}								\
67759-	(v) = *(t *)newp;						\
67760-} while (0)
67761-
67762-#define MIB_UNSIGNED(v, i) do {						\
67763-	if (mib[i] > UINT_MAX) {					\
67764-		ret = EFAULT;						\
67765-		goto label_return;					\
67766-	}								\
67767-	v = (unsigned)mib[i];						\
67768-} while (0)
67769-
67770-/*
67771- * There's a lot of code duplication in the following macros due to limitations
67772- * in how nested cpp macros are expanded.
67773- */
67774-#define CTL_RO_CLGEN(c, l, n, v, t)					\
67775-static int								\
67776-n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,	\
67777-    size_t *oldlenp, void *newp, size_t newlen) {			\
67778-	int ret;							\
67779-	t oldval;							\
67780-									\
67781-	if (!(c)) {							\
67782-		return ENOENT;						\
67783-	}								\
67784-	if (l) {							\
67785-		malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);		\
67786-	}								\
67787-	READONLY();							\
67788-	oldval = (v);							\
67789-	READ(oldval, t);						\
67790-									\
67791-	ret = 0;							\
67792-label_return:								\
67793-	if (l) {							\
67794-		malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);		\
67795-	}								\
67796-	return ret;							\
67797-}
67798-
67799-#define CTL_RO_CGEN(c, n, v, t)						\
67800-static int								\
67801-n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,			\
67802-    void *oldp, size_t *oldlenp, void *newp, size_t newlen) {		\
67803-	int ret;							\
67804-	t oldval;							\
67805-									\
67806-	if (!(c)) {							\
67807-		return ENOENT;						\
67808-	}								\
67809-	malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);			\
67810-	READONLY();							\
67811-	oldval = (v);							\
67812-	READ(oldval, t);						\
67813-									\
67814-	ret = 0;							\
67815-label_return:								\
67816-	malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);			\
67817-	return ret;							\
67818-}
67819-
67820-#define CTL_RO_GEN(n, v, t)						\
67821-static int								\
67822-n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,	\
67823-    size_t *oldlenp, void *newp, size_t newlen) {			\
67824-	int ret;							\
67825-	t oldval;							\
67826-									\
67827-	malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);			\
67828-	READONLY();							\
67829-	oldval = (v);							\
67830-	READ(oldval, t);						\
67831-									\
67832-	ret = 0;							\
67833-label_return:								\
67834-	malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);			\
67835-	return ret;							\
67836-}
67837-
67838-/*
67839- * ctl_mtx is not acquired, under the assumption that no pertinent data will
67840- * mutate during the call.
67841- */
67842-#define CTL_RO_NL_CGEN(c, n, v, t)					\
67843-static int								\
67844-n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,			\
67845-    void *oldp, size_t *oldlenp, void *newp, size_t newlen) {		\
67846-	int ret;							\
67847-	t oldval;							\
67848-									\
67849-	if (!(c)) {							\
67850-		return ENOENT;						\
67851-	}								\
67852-	READONLY();							\
67853-	oldval = (v);							\
67854-	READ(oldval, t);						\
67855-									\
67856-	ret = 0;							\
67857-label_return:								\
67858-	return ret;							\
67859-}
67860-
67861-#define CTL_RO_NL_GEN(n, v, t)						\
67862-static int								\
67863-n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,			\
67864-    void *oldp, size_t *oldlenp, void *newp, size_t newlen) {		\
67865-	int ret;							\
67866-	t oldval;							\
67867-									\
67868-	READONLY();							\
67869-	oldval = (v);							\
67870-	READ(oldval, t);						\
67871-									\
67872-	ret = 0;							\
67873-label_return:								\
67874-	return ret;							\
67875-}
67876-
67877-#define CTL_RO_CONFIG_GEN(n, t)						\
67878-static int								\
67879-n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,			\
67880-    void *oldp, size_t *oldlenp, void *newp, size_t newlen) {		\
67881-	int ret;							\
67882-	t oldval;							\
67883-									\
67884-	READONLY();							\
67885-	oldval = n;							\
67886-	READ(oldval, t);						\
67887-									\
67888-	ret = 0;							\
67889-label_return:								\
67890-	return ret;							\
67891-}
67892-
67893-/******************************************************************************/
67894-
67895-CTL_RO_NL_GEN(version, JEMALLOC_VERSION, const char *)
67896-
67897-static int
67898-epoch_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
67899-    void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
67900-	int ret;
67901-	UNUSED uint64_t newval;
67902-
67903-	malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
67904-	WRITE(newval, uint64_t);
67905-	if (newp != NULL) {
67906-		ctl_refresh(tsd_tsdn(tsd));
67907-	}
67908-	READ(ctl_arenas->epoch, uint64_t);
67909-
67910-	ret = 0;
67911-label_return:
67912-	malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
67913-	return ret;
67914-}
67915-
67916-static int
67917-background_thread_ctl(tsd_t *tsd, const size_t *mib,
67918-    size_t miblen, void *oldp, size_t *oldlenp,
67919-    void *newp, size_t newlen) {
67920-	int ret;
67921-	bool oldval;
67922-
67923-	if (!have_background_thread) {
67924-		return ENOENT;
67925-	}
67926-	background_thread_ctl_init(tsd_tsdn(tsd));
67927-
67928-	malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
67929-	malloc_mutex_lock(tsd_tsdn(tsd), &background_thread_lock);
67930-	if (newp == NULL) {
67931-		oldval = background_thread_enabled();
67932-		READ(oldval, bool);
67933-	} else {
67934-		if (newlen != sizeof(bool)) {
67935-			ret = EINVAL;
67936-			goto label_return;
67937-		}
67938-		oldval = background_thread_enabled();
67939-		READ(oldval, bool);
67940-
67941-		bool newval = *(bool *)newp;
67942-		if (newval == oldval) {
67943-			ret = 0;
67944-			goto label_return;
67945-		}
67946-
67947-		background_thread_enabled_set(tsd_tsdn(tsd), newval);
67948-		if (newval) {
67949-			if (background_threads_enable(tsd)) {
67950-				ret = EFAULT;
67951-				goto label_return;
67952-			}
67953-		} else {
67954-			if (background_threads_disable(tsd)) {
67955-				ret = EFAULT;
67956-				goto label_return;
67957-			}
67958-		}
67959-	}
67960-	ret = 0;
67961-label_return:
67962-	malloc_mutex_unlock(tsd_tsdn(tsd), &background_thread_lock);
67963-	malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
67964-
67965-	return ret;
67966-}
67967-
67968-static int
67969-max_background_threads_ctl(tsd_t *tsd, const size_t *mib,
67970-    size_t miblen, void *oldp, size_t *oldlenp, void *newp,
67971-    size_t newlen) {
67972-	int ret;
67973-	size_t oldval;
67974-
67975-	if (!have_background_thread) {
67976-		return ENOENT;
67977-	}
67978-	background_thread_ctl_init(tsd_tsdn(tsd));
67979-
67980-	malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
67981-	malloc_mutex_lock(tsd_tsdn(tsd), &background_thread_lock);
67982-	if (newp == NULL) {
67983-		oldval = max_background_threads;
67984-		READ(oldval, size_t);
67985-	} else {
67986-		if (newlen != sizeof(size_t)) {
67987-			ret = EINVAL;
67988-			goto label_return;
67989-		}
67990-		oldval = max_background_threads;
67991-		READ(oldval, size_t);
67992-
67993-		size_t newval = *(size_t *)newp;
67994-		if (newval == oldval) {
67995-			ret = 0;
67996-			goto label_return;
67997-		}
67998-		if (newval > opt_max_background_threads) {
67999-			ret = EINVAL;
68000-			goto label_return;
68001-		}
68002-
68003-		if (background_thread_enabled()) {
68004-			background_thread_enabled_set(tsd_tsdn(tsd), false);
68005-			if (background_threads_disable(tsd)) {
68006-				ret = EFAULT;
68007-				goto label_return;
68008-			}
68009-			max_background_threads = newval;
68010-			background_thread_enabled_set(tsd_tsdn(tsd), true);
68011-			if (background_threads_enable(tsd)) {
68012-				ret = EFAULT;
68013-				goto label_return;
68014-			}
68015-		} else {
68016-			max_background_threads = newval;
68017-		}
68018-	}
68019-	ret = 0;
68020-label_return:
68021-	malloc_mutex_unlock(tsd_tsdn(tsd), &background_thread_lock);
68022-	malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
68023-
68024-	return ret;
68025-}
68026-
68027-/******************************************************************************/
68028-
68029-CTL_RO_CONFIG_GEN(config_cache_oblivious, bool)
68030-CTL_RO_CONFIG_GEN(config_debug, bool)
68031-CTL_RO_CONFIG_GEN(config_fill, bool)
68032-CTL_RO_CONFIG_GEN(config_lazy_lock, bool)
68033-CTL_RO_CONFIG_GEN(config_malloc_conf, const char *)
68034-CTL_RO_CONFIG_GEN(config_opt_safety_checks, bool)
68035-CTL_RO_CONFIG_GEN(config_prof, bool)
68036-CTL_RO_CONFIG_GEN(config_prof_libgcc, bool)
68037-CTL_RO_CONFIG_GEN(config_prof_libunwind, bool)
68038-CTL_RO_CONFIG_GEN(config_stats, bool)
68039-CTL_RO_CONFIG_GEN(config_utrace, bool)
68040-CTL_RO_CONFIG_GEN(config_xmalloc, bool)
68041-
68042-/******************************************************************************/
68043-
68044-CTL_RO_NL_GEN(opt_abort, opt_abort, bool)
68045-CTL_RO_NL_GEN(opt_abort_conf, opt_abort_conf, bool)
68046-CTL_RO_NL_GEN(opt_cache_oblivious, opt_cache_oblivious, bool)
68047-CTL_RO_NL_GEN(opt_trust_madvise, opt_trust_madvise, bool)
68048-CTL_RO_NL_GEN(opt_confirm_conf, opt_confirm_conf, bool)
68049-
68050-/* HPA options. */
68051-CTL_RO_NL_GEN(opt_hpa, opt_hpa, bool)
68052-CTL_RO_NL_GEN(opt_hpa_hugification_threshold,
68053-    opt_hpa_opts.hugification_threshold, size_t)
68054-CTL_RO_NL_GEN(opt_hpa_hugify_delay_ms, opt_hpa_opts.hugify_delay_ms, uint64_t)
68055-CTL_RO_NL_GEN(opt_hpa_min_purge_interval_ms, opt_hpa_opts.min_purge_interval_ms,
68056-    uint64_t)
68057-
68058-/*
68059- * This will have to change before we publicly document this option; fxp_t and
68060- * its representation are internal implementation details.
68061- */
68062-CTL_RO_NL_GEN(opt_hpa_dirty_mult, opt_hpa_opts.dirty_mult, fxp_t)
68063-CTL_RO_NL_GEN(opt_hpa_slab_max_alloc, opt_hpa_opts.slab_max_alloc, size_t)
68064-
68065-/* HPA SEC options */
68066-CTL_RO_NL_GEN(opt_hpa_sec_nshards, opt_hpa_sec_opts.nshards, size_t)
68067-CTL_RO_NL_GEN(opt_hpa_sec_max_alloc, opt_hpa_sec_opts.max_alloc, size_t)
68068-CTL_RO_NL_GEN(opt_hpa_sec_max_bytes, opt_hpa_sec_opts.max_bytes, size_t)
68069-CTL_RO_NL_GEN(opt_hpa_sec_bytes_after_flush, opt_hpa_sec_opts.bytes_after_flush,
68070-    size_t)
68071-CTL_RO_NL_GEN(opt_hpa_sec_batch_fill_extra, opt_hpa_sec_opts.batch_fill_extra,
68072-    size_t)
68073-
68074-CTL_RO_NL_GEN(opt_metadata_thp, metadata_thp_mode_names[opt_metadata_thp],
68075-    const char *)
68076-CTL_RO_NL_GEN(opt_retain, opt_retain, bool)
68077-CTL_RO_NL_GEN(opt_dss, opt_dss, const char *)
68078-CTL_RO_NL_GEN(opt_narenas, opt_narenas, unsigned)
68079-CTL_RO_NL_GEN(opt_percpu_arena, percpu_arena_mode_names[opt_percpu_arena],
68080-    const char *)
68081-CTL_RO_NL_GEN(opt_mutex_max_spin, opt_mutex_max_spin, int64_t)
68082-CTL_RO_NL_GEN(opt_oversize_threshold, opt_oversize_threshold, size_t)
68083-CTL_RO_NL_GEN(opt_background_thread, opt_background_thread, bool)
68084-CTL_RO_NL_GEN(opt_max_background_threads, opt_max_background_threads, size_t)
68085-CTL_RO_NL_GEN(opt_dirty_decay_ms, opt_dirty_decay_ms, ssize_t)
68086-CTL_RO_NL_GEN(opt_muzzy_decay_ms, opt_muzzy_decay_ms, ssize_t)
68087-CTL_RO_NL_GEN(opt_stats_print, opt_stats_print, bool)
68088-CTL_RO_NL_GEN(opt_stats_print_opts, opt_stats_print_opts, const char *)
68089-CTL_RO_NL_GEN(opt_stats_interval, opt_stats_interval, int64_t)
68090-CTL_RO_NL_GEN(opt_stats_interval_opts, opt_stats_interval_opts, const char *)
68091-CTL_RO_NL_CGEN(config_fill, opt_junk, opt_junk, const char *)
68092-CTL_RO_NL_CGEN(config_fill, opt_zero, opt_zero, bool)
68093-CTL_RO_NL_CGEN(config_utrace, opt_utrace, opt_utrace, bool)
68094-CTL_RO_NL_CGEN(config_xmalloc, opt_xmalloc, opt_xmalloc, bool)
68095-CTL_RO_NL_CGEN(config_enable_cxx, opt_experimental_infallible_new,
68096-    opt_experimental_infallible_new, bool)
68097-CTL_RO_NL_GEN(opt_tcache, opt_tcache, bool)
68098-CTL_RO_NL_GEN(opt_tcache_max, opt_tcache_max, size_t)
68099-CTL_RO_NL_GEN(opt_tcache_nslots_small_min, opt_tcache_nslots_small_min,
68100-    unsigned)
68101-CTL_RO_NL_GEN(opt_tcache_nslots_small_max, opt_tcache_nslots_small_max,
68102-    unsigned)
68103-CTL_RO_NL_GEN(opt_tcache_nslots_large, opt_tcache_nslots_large, unsigned)
68104-CTL_RO_NL_GEN(opt_lg_tcache_nslots_mul, opt_lg_tcache_nslots_mul, ssize_t)
68105-CTL_RO_NL_GEN(opt_tcache_gc_incr_bytes, opt_tcache_gc_incr_bytes, size_t)
68106-CTL_RO_NL_GEN(opt_tcache_gc_delay_bytes, opt_tcache_gc_delay_bytes, size_t)
68107-CTL_RO_NL_GEN(opt_lg_tcache_flush_small_div, opt_lg_tcache_flush_small_div,
68108-    unsigned)
68109-CTL_RO_NL_GEN(opt_lg_tcache_flush_large_div, opt_lg_tcache_flush_large_div,
68110-    unsigned)
68111-CTL_RO_NL_GEN(opt_thp, thp_mode_names[opt_thp], const char *)
68112-CTL_RO_NL_GEN(opt_lg_extent_max_active_fit, opt_lg_extent_max_active_fit,
68113-    size_t)
68114-CTL_RO_NL_CGEN(config_prof, opt_prof, opt_prof, bool)
68115-CTL_RO_NL_CGEN(config_prof, opt_prof_prefix, opt_prof_prefix, const char *)
68116-CTL_RO_NL_CGEN(config_prof, opt_prof_active, opt_prof_active, bool)
68117-CTL_RO_NL_CGEN(config_prof, opt_prof_thread_active_init,
68118-    opt_prof_thread_active_init, bool)
68119-CTL_RO_NL_CGEN(config_prof, opt_lg_prof_sample, opt_lg_prof_sample, size_t)
68120-CTL_RO_NL_CGEN(config_prof, opt_prof_accum, opt_prof_accum, bool)
68121-CTL_RO_NL_CGEN(config_prof, opt_lg_prof_interval, opt_lg_prof_interval, ssize_t)
68122-CTL_RO_NL_CGEN(config_prof, opt_prof_gdump, opt_prof_gdump, bool)
68123-CTL_RO_NL_CGEN(config_prof, opt_prof_final, opt_prof_final, bool)
68124-CTL_RO_NL_CGEN(config_prof, opt_prof_leak, opt_prof_leak, bool)
68125-CTL_RO_NL_CGEN(config_prof, opt_prof_leak_error, opt_prof_leak_error, bool)
68126-CTL_RO_NL_CGEN(config_prof, opt_prof_recent_alloc_max,
68127-    opt_prof_recent_alloc_max, ssize_t)
68128-CTL_RO_NL_CGEN(config_prof, opt_prof_stats, opt_prof_stats, bool)
68129-CTL_RO_NL_CGEN(config_prof, opt_prof_sys_thread_name, opt_prof_sys_thread_name,
68130-    bool)
68131-CTL_RO_NL_CGEN(config_prof, opt_prof_time_res,
68132-    prof_time_res_mode_names[opt_prof_time_res], const char *)
68133-CTL_RO_NL_CGEN(config_uaf_detection, opt_lg_san_uaf_align,
68134-    opt_lg_san_uaf_align, ssize_t)
68135-CTL_RO_NL_GEN(opt_zero_realloc,
68136-    zero_realloc_mode_names[opt_zero_realloc_action], const char *)
68137-
68138-/******************************************************************************/
68139-
68140-static int
68141-thread_arena_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
68142-    void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
68143-	int ret;
68144-	arena_t *oldarena;
68145-	unsigned newind, oldind;
68146-
68147-	oldarena = arena_choose(tsd, NULL);
68148-	if (oldarena == NULL) {
68149-		return EAGAIN;
68150-	}
68151-	newind = oldind = arena_ind_get(oldarena);
68152-	WRITE(newind, unsigned);
68153-	READ(oldind, unsigned);
68154-
68155-	if (newind != oldind) {
68156-		arena_t *newarena;
68157-
68158-		if (newind >= narenas_total_get()) {
68159-			/* New arena index is out of range. */
68160-			ret = EFAULT;
68161-			goto label_return;
68162-		}
68163-
68164-		if (have_percpu_arena &&
68165-		    PERCPU_ARENA_ENABLED(opt_percpu_arena)) {
68166-			if (newind < percpu_arena_ind_limit(opt_percpu_arena)) {
68167-				/*
68168-				 * If perCPU arena is enabled, thread_arena
68169-				 * control is not allowed for the auto arena
68170-				 * range.
68171-				 */
68172-				ret = EPERM;
68173-				goto label_return;
68174-			}
68175-		}
68176-
68177-		/* Initialize arena if necessary. */
68178-		newarena = arena_get(tsd_tsdn(tsd), newind, true);
68179-		if (newarena == NULL) {
68180-			ret = EAGAIN;
68181-			goto label_return;
68182-		}
68183-		/* Set new arena/tcache associations. */
68184-		arena_migrate(tsd, oldarena, newarena);
68185-		if (tcache_available(tsd)) {
68186-			tcache_arena_reassociate(tsd_tsdn(tsd),
68187-			    tsd_tcache_slowp_get(tsd), tsd_tcachep_get(tsd),
68188-			    newarena);
68189-		}
68190-	}
68191-
68192-	ret = 0;
68193-label_return:
68194-	return ret;
68195-}
68196-
68197-CTL_RO_NL_GEN(thread_allocated, tsd_thread_allocated_get(tsd), uint64_t)
68198-CTL_RO_NL_GEN(thread_allocatedp, tsd_thread_allocatedp_get(tsd), uint64_t *)
68199-CTL_RO_NL_GEN(thread_deallocated, tsd_thread_deallocated_get(tsd), uint64_t)
68200-CTL_RO_NL_GEN(thread_deallocatedp, tsd_thread_deallocatedp_get(tsd), uint64_t *)
68201-
68202-static int
68203-thread_tcache_enabled_ctl(tsd_t *tsd, const size_t *mib,
68204-    size_t miblen, void *oldp, size_t *oldlenp, void *newp,
68205-    size_t newlen) {
68206-	int ret;
68207-	bool oldval;
68208-
68209-	oldval = tcache_enabled_get(tsd);
68210-	if (newp != NULL) {
68211-		if (newlen != sizeof(bool)) {
68212-			ret = EINVAL;
68213-			goto label_return;
68214-		}
68215-		tcache_enabled_set(tsd, *(bool *)newp);
68216-	}
68217-	READ(oldval, bool);
68218-
68219-	ret = 0;
68220-label_return:
68221-	return ret;
68222-}
68223-
68224-static int
68225-thread_tcache_flush_ctl(tsd_t *tsd, const size_t *mib,
68226-    size_t miblen, void *oldp, size_t *oldlenp, void *newp,
68227-    size_t newlen) {
68228-	int ret;
68229-
68230-	if (!tcache_available(tsd)) {
68231-		ret = EFAULT;
68232-		goto label_return;
68233-	}
68234-
68235-	NEITHER_READ_NOR_WRITE();
68236-
68237-	tcache_flush(tsd);
68238-
68239-	ret = 0;
68240-label_return:
68241-	return ret;
68242-}
68243-
68244-static int
68245-thread_peak_read_ctl(tsd_t *tsd, const size_t *mib,
68246-    size_t miblen, void *oldp, size_t *oldlenp, void *newp,
68247-    size_t newlen) {
68248-	int ret;
68249-	if (!config_stats) {
68250-		return ENOENT;
68251-	}
68252-	READONLY();
68253-	peak_event_update(tsd);
68254-	uint64_t result = peak_event_max(tsd);
68255-	READ(result, uint64_t);
68256-	ret = 0;
68257-label_return:
68258-	return ret;
68259-}
68260-
68261-static int
68262-thread_peak_reset_ctl(tsd_t *tsd, const size_t *mib,
68263-    size_t miblen, void *oldp, size_t *oldlenp, void *newp,
68264-    size_t newlen) {
68265-	int ret;
68266-	if (!config_stats) {
68267-		return ENOENT;
68268-	}
68269-	NEITHER_READ_NOR_WRITE();
68270-	peak_event_zero(tsd);
68271-	ret = 0;
68272-label_return:
68273-	return ret;
68274-}
68275-
68276-static int
68277-thread_prof_name_ctl(tsd_t *tsd, const size_t *mib,
68278-    size_t miblen, void *oldp, size_t *oldlenp, void *newp,
68279-    size_t newlen) {
68280-	int ret;
68281-
68282-	if (!config_prof || !opt_prof) {
68283-		return ENOENT;
68284-	}
68285-
68286-	READ_XOR_WRITE();
68287-
68288-	if (newp != NULL) {
68289-		if (newlen != sizeof(const char *)) {
68290-			ret = EINVAL;
68291-			goto label_return;
68292-		}
68293-
68294-		if ((ret = prof_thread_name_set(tsd, *(const char **)newp)) !=
68295-		    0) {
68296-			goto label_return;
68297-		}
68298-	} else {
68299-		const char *oldname = prof_thread_name_get(tsd);
68300-		READ(oldname, const char *);
68301-	}
68302-
68303-	ret = 0;
68304-label_return:
68305-	return ret;
68306-}
68307-
68308-static int
68309-thread_prof_active_ctl(tsd_t *tsd, const size_t *mib,
68310-    size_t miblen, void *oldp, size_t *oldlenp, void *newp,
68311-    size_t newlen) {
68312-	int ret;
68313-	bool oldval;
68314-
68315-	if (!config_prof) {
68316-		return ENOENT;
68317-	}
68318-
68319-	oldval = opt_prof ? prof_thread_active_get(tsd) : false;
68320-	if (newp != NULL) {
68321-		if (!opt_prof) {
68322-			ret = ENOENT;
68323-			goto label_return;
68324-		}
68325-		if (newlen != sizeof(bool)) {
68326-			ret = EINVAL;
68327-			goto label_return;
68328-		}
68329-		if (prof_thread_active_set(tsd, *(bool *)newp)) {
68330-			ret = EAGAIN;
68331-			goto label_return;
68332-		}
68333-	}
68334-	READ(oldval, bool);
68335-
68336-	ret = 0;
68337-label_return:
68338-	return ret;
68339-}
68340-
68341-static int
68342-thread_idle_ctl(tsd_t *tsd, const size_t *mib,
68343-    size_t miblen, void *oldp, size_t *oldlenp, void *newp,
68344-    size_t newlen) {
68345-	int ret;
68346-
68347-	NEITHER_READ_NOR_WRITE();
68348-
68349-	if (tcache_available(tsd)) {
68350-		tcache_flush(tsd);
68351-	}
68352-	/*
68353-	 * This heuristic is perhaps not the most well-considered.  But it
68354-	 * matches the only idling policy we have experience with in the status
68355-	 * quo.  Over time we should investigate more principled approaches.
68356-	 */
68357-	if (opt_narenas > ncpus * 2) {
68358-		arena_t *arena = arena_choose(tsd, NULL);
68359-		if (arena != NULL) {
68360-			arena_decay(tsd_tsdn(tsd), arena, false, true);
68361-		}
68362-		/*
68363-		 * The missing arena case is not actually an error; a thread
68364-		 * might be idle before it associates itself to one.  This is
68365-		 * unusual, but not wrong.
68366-		 */
68367-	}
68368-
68369-	ret = 0;
68370-label_return:
68371-	return ret;
68372-}
68373-
68374-/******************************************************************************/
68375-
68376-static int
68377-tcache_create_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
68378-    void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
68379-	int ret;
68380-	unsigned tcache_ind;
68381-
68382-	READONLY();
68383-	VERIFY_READ(unsigned);
68384-	if (tcaches_create(tsd, b0get(), &tcache_ind)) {
68385-		ret = EFAULT;
68386-		goto label_return;
68387-	}
68388-	READ(tcache_ind, unsigned);
68389-
68390-	ret = 0;
68391-label_return:
68392-	return ret;
68393-}
68394-
68395-static int
68396-tcache_flush_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
68397-    void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
68398-	int ret;
68399-	unsigned tcache_ind;
68400-
68401-	WRITEONLY();
68402-	ASSURED_WRITE(tcache_ind, unsigned);
68403-	tcaches_flush(tsd, tcache_ind);
68404-
68405-	ret = 0;
68406-label_return:
68407-	return ret;
68408-}
68409-
68410-static int
68411-tcache_destroy_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
68412-    void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
68413-	int ret;
68414-	unsigned tcache_ind;
68415-
68416-	WRITEONLY();
68417-	ASSURED_WRITE(tcache_ind, unsigned);
68418-	tcaches_destroy(tsd, tcache_ind);
68419-
68420-	ret = 0;
68421-label_return:
68422-	return ret;
68423-}
68424-
68425-/******************************************************************************/
68426-
68427-static int
68428-arena_i_initialized_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
68429-    void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
68430-	int ret;
68431-	tsdn_t *tsdn = tsd_tsdn(tsd);
68432-	unsigned arena_ind;
68433-	bool initialized;
68434-
68435-	READONLY();
68436-	MIB_UNSIGNED(arena_ind, 1);
68437-
68438-	malloc_mutex_lock(tsdn, &ctl_mtx);
68439-	initialized = arenas_i(arena_ind)->initialized;
68440-	malloc_mutex_unlock(tsdn, &ctl_mtx);
68441-
68442-	READ(initialized, bool);
68443-
68444-	ret = 0;
68445-label_return:
68446-	return ret;
68447-}
68448-
68449-static void
68450-arena_i_decay(tsdn_t *tsdn, unsigned arena_ind, bool all) {
68451-	malloc_mutex_lock(tsdn, &ctl_mtx);
68452-	{
68453-		unsigned narenas = ctl_arenas->narenas;
68454-
68455-		/*
68456-		 * Access via index narenas is deprecated, and scheduled for
68457-		 * removal in 6.0.0.
68458-		 */
68459-		if (arena_ind == MALLCTL_ARENAS_ALL || arena_ind == narenas) {
68460-			unsigned i;
68461-			VARIABLE_ARRAY(arena_t *, tarenas, narenas);
68462-
68463-			for (i = 0; i < narenas; i++) {
68464-				tarenas[i] = arena_get(tsdn, i, false);
68465-			}
68466-
68467-			/*
68468-			 * No further need to hold ctl_mtx, since narenas and
68469-			 * tarenas contain everything needed below.
68470-			 */
68471-			malloc_mutex_unlock(tsdn, &ctl_mtx);
68472-
68473-			for (i = 0; i < narenas; i++) {
68474-				if (tarenas[i] != NULL) {
68475-					arena_decay(tsdn, tarenas[i], false,
68476-					    all);
68477-				}
68478-			}
68479-		} else {
68480-			arena_t *tarena;
68481-
68482-			assert(arena_ind < narenas);
68483-
68484-			tarena = arena_get(tsdn, arena_ind, false);
68485-
68486-			/* No further need to hold ctl_mtx. */
68487-			malloc_mutex_unlock(tsdn, &ctl_mtx);
68488-
68489-			if (tarena != NULL) {
68490-				arena_decay(tsdn, tarena, false, all);
68491-			}
68492-		}
68493-	}
68494-}
68495-
68496-static int
68497-arena_i_decay_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
68498-    size_t *oldlenp, void *newp, size_t newlen) {
68499-	int ret;
68500-	unsigned arena_ind;
68501-
68502-	NEITHER_READ_NOR_WRITE();
68503-	MIB_UNSIGNED(arena_ind, 1);
68504-	arena_i_decay(tsd_tsdn(tsd), arena_ind, false);
68505-
68506-	ret = 0;
68507-label_return:
68508-	return ret;
68509-}
68510-
68511-static int
68512-arena_i_purge_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
68513-    size_t *oldlenp, void *newp, size_t newlen) {
68514-	int ret;
68515-	unsigned arena_ind;
68516-
68517-	NEITHER_READ_NOR_WRITE();
68518-	MIB_UNSIGNED(arena_ind, 1);
68519-	arena_i_decay(tsd_tsdn(tsd), arena_ind, true);
68520-
68521-	ret = 0;
68522-label_return:
68523-	return ret;
68524-}
68525-
68526-static int
68527-arena_i_reset_destroy_helper(tsd_t *tsd, const size_t *mib, size_t miblen,
68528-    void *oldp, size_t *oldlenp, void *newp, size_t newlen, unsigned *arena_ind,
68529-    arena_t **arena) {
68530-	int ret;
68531-
68532-	NEITHER_READ_NOR_WRITE();
68533-	MIB_UNSIGNED(*arena_ind, 1);
68534-
68535-	*arena = arena_get(tsd_tsdn(tsd), *arena_ind, false);
68536-	if (*arena == NULL || arena_is_auto(*arena)) {
68537-		ret = EFAULT;
68538-		goto label_return;
68539-	}
68540-
68541-	ret = 0;
68542-label_return:
68543-	return ret;
68544-}
68545-
68546-static void
68547-arena_reset_prepare_background_thread(tsd_t *tsd, unsigned arena_ind) {
68548-	/* Temporarily disable the background thread during arena reset. */
68549-	if (have_background_thread) {
68550-		malloc_mutex_lock(tsd_tsdn(tsd), &background_thread_lock);
68551-		if (background_thread_enabled()) {
68552-			background_thread_info_t *info =
68553-			    background_thread_info_get(arena_ind);
68554-			assert(info->state == background_thread_started);
68555-			malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
68556-			info->state = background_thread_paused;
68557-			malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
68558-		}
68559-	}
68560-}
68561-
68562-static void
68563-arena_reset_finish_background_thread(tsd_t *tsd, unsigned arena_ind) {
68564-	if (have_background_thread) {
68565-		if (background_thread_enabled()) {
68566-			background_thread_info_t *info =
68567-			    background_thread_info_get(arena_ind);
68568-			assert(info->state == background_thread_paused);
68569-			malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
68570-			info->state = background_thread_started;
68571-			malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
68572-		}
68573-		malloc_mutex_unlock(tsd_tsdn(tsd), &background_thread_lock);
68574-	}
68575-}
68576-
68577-static int
68578-arena_i_reset_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
68579-    size_t *oldlenp, void *newp, size_t newlen) {
68580-	int ret;
68581-	unsigned arena_ind;
68582-	arena_t *arena;
68583-
68584-	ret = arena_i_reset_destroy_helper(tsd, mib, miblen, oldp, oldlenp,
68585-	    newp, newlen, &arena_ind, &arena);
68586-	if (ret != 0) {
68587-		return ret;
68588-	}
68589-
68590-	arena_reset_prepare_background_thread(tsd, arena_ind);
68591-	arena_reset(tsd, arena);
68592-	arena_reset_finish_background_thread(tsd, arena_ind);
68593-
68594-	return ret;
68595-}
68596-
68597-static int
68598-arena_i_destroy_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
68599-    size_t *oldlenp, void *newp, size_t newlen) {
68600-	int ret;
68601-	unsigned arena_ind;
68602-	arena_t *arena;
68603-	ctl_arena_t *ctl_darena, *ctl_arena;
68604-
68605-	malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
68606-
68607-	ret = arena_i_reset_destroy_helper(tsd, mib, miblen, oldp, oldlenp,
68608-	    newp, newlen, &arena_ind, &arena);
68609-	if (ret != 0) {
68610-		goto label_return;
68611-	}
68612-
68613-	if (arena_nthreads_get(arena, false) != 0 || arena_nthreads_get(arena,
68614-	    true) != 0) {
68615-		ret = EFAULT;
68616-		goto label_return;
68617-	}
68618-
68619-	arena_reset_prepare_background_thread(tsd, arena_ind);
68620-	/* Merge stats after resetting and purging arena. */
68621-	arena_reset(tsd, arena);
68622-	arena_decay(tsd_tsdn(tsd), arena, false, true);
68623-	ctl_darena = arenas_i(MALLCTL_ARENAS_DESTROYED);
68624-	ctl_darena->initialized = true;
68625-	ctl_arena_refresh(tsd_tsdn(tsd), arena, ctl_darena, arena_ind, true);
68626-	/* Destroy arena. */
68627-	arena_destroy(tsd, arena);
68628-	ctl_arena = arenas_i(arena_ind);
68629-	ctl_arena->initialized = false;
68630-	/* Record arena index for later recycling via arenas.create. */
68631-	ql_elm_new(ctl_arena, destroyed_link);
68632-	ql_tail_insert(&ctl_arenas->destroyed, ctl_arena, destroyed_link);
68633-	arena_reset_finish_background_thread(tsd, arena_ind);
68634-
68635-	assert(ret == 0);
68636-label_return:
68637-	malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
68638-
68639-	return ret;
68640-}
68641-
68642-static int
68643-arena_i_dss_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
68644-    size_t *oldlenp, void *newp, size_t newlen) {
68645-	int ret;
68646-	const char *dss = NULL;
68647-	unsigned arena_ind;
68648-	dss_prec_t dss_prec_old = dss_prec_limit;
68649-	dss_prec_t dss_prec = dss_prec_limit;
68650-
68651-	malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
68652-	WRITE(dss, const char *);
68653-	MIB_UNSIGNED(arena_ind, 1);
68654-	if (dss != NULL) {
68655-		int i;
68656-		bool match = false;
68657-
68658-		for (i = 0; i < dss_prec_limit; i++) {
68659-			if (strcmp(dss_prec_names[i], dss) == 0) {
68660-				dss_prec = i;
68661-				match = true;
68662-				break;
68663-			}
68664-		}
68665-
68666-		if (!match) {
68667-			ret = EINVAL;
68668-			goto label_return;
68669-		}
68670-	}
68671-
68672-	/*
68673-	 * Access via index narenas is deprecated, and scheduled for removal in
68674-	 * 6.0.0.
68675-	 */
68676-	if (arena_ind == MALLCTL_ARENAS_ALL || arena_ind ==
68677-	    ctl_arenas->narenas) {
68678-		if (dss_prec != dss_prec_limit &&
68679-		    extent_dss_prec_set(dss_prec)) {
68680-			ret = EFAULT;
68681-			goto label_return;
68682-		}
68683-		dss_prec_old = extent_dss_prec_get();
68684-	} else {
68685-		arena_t *arena = arena_get(tsd_tsdn(tsd), arena_ind, false);
68686-		if (arena == NULL || (dss_prec != dss_prec_limit &&
68687-		    arena_dss_prec_set(arena, dss_prec))) {
68688-			ret = EFAULT;
68689-			goto label_return;
68690-		}
68691-		dss_prec_old = arena_dss_prec_get(arena);
68692-	}
68693-
68694-	dss = dss_prec_names[dss_prec_old];
68695-	READ(dss, const char *);
68696-
68697-	ret = 0;
68698-label_return:
68699-	malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
68700-	return ret;
68701-}
68702-
68703-static int
68704-arena_i_oversize_threshold_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
68705-    void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
68706-	int ret;
68707-
68708-	unsigned arena_ind;
68709-	MIB_UNSIGNED(arena_ind, 1);
68710-
68711-	arena_t *arena = arena_get(tsd_tsdn(tsd), arena_ind, false);
68712-	if (arena == NULL) {
68713-		ret = EFAULT;
68714-		goto label_return;
68715-	}
68716-
68717-	if (oldp != NULL && oldlenp != NULL) {
68718-		size_t oldval = atomic_load_zu(
68719-		    &arena->pa_shard.pac.oversize_threshold, ATOMIC_RELAXED);
68720-		READ(oldval, size_t);
68721-	}
68722-	if (newp != NULL) {
68723-		if (newlen != sizeof(size_t)) {
68724-			ret = EINVAL;
68725-			goto label_return;
68726-		}
68727-		atomic_store_zu(&arena->pa_shard.pac.oversize_threshold,
68728-		    *(size_t *)newp, ATOMIC_RELAXED);
68729-	}
68730-	ret = 0;
68731-label_return:
68732-	return ret;
68733-}
68734-
68735-static int
68736-arena_i_decay_ms_ctl_impl(tsd_t *tsd, const size_t *mib, size_t miblen,
68737-    void *oldp, size_t *oldlenp, void *newp, size_t newlen, bool dirty) {
68738-	int ret;
68739-	unsigned arena_ind;
68740-	arena_t *arena;
68741-
68742-	MIB_UNSIGNED(arena_ind, 1);
68743-	arena = arena_get(tsd_tsdn(tsd), arena_ind, false);
68744-	if (arena == NULL) {
68745-		ret = EFAULT;
68746-		goto label_return;
68747-	}
68748-	extent_state_t state = dirty ? extent_state_dirty : extent_state_muzzy;
68749-
68750-	if (oldp != NULL && oldlenp != NULL) {
68751-		size_t oldval = arena_decay_ms_get(arena, state);
68752-		READ(oldval, ssize_t);
68753-	}
68754-	if (newp != NULL) {
68755-		if (newlen != sizeof(ssize_t)) {
68756-			ret = EINVAL;
68757-			goto label_return;
68758-		}
68759-		if (arena_is_huge(arena_ind) && *(ssize_t *)newp > 0) {
68760-			/*
68761-			 * By default the huge arena purges eagerly.  If it is
68762-			 * set to non-zero decay time afterwards, background
68763-			 * thread might be needed.
68764-			 */
68765-			if (background_thread_create(tsd, arena_ind)) {
68766-				ret = EFAULT;
68767-				goto label_return;
68768-			}
68769-		}
68770-
68771-		if (arena_decay_ms_set(tsd_tsdn(tsd), arena, state,
68772-		    *(ssize_t *)newp)) {
68773-			ret = EFAULT;
68774-			goto label_return;
68775-		}
68776-	}
68777-
68778-	ret = 0;
68779-label_return:
68780-	return ret;
68781-}
68782-
68783-static int
68784-arena_i_dirty_decay_ms_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
68785-    void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
68786-	return arena_i_decay_ms_ctl_impl(tsd, mib, miblen, oldp, oldlenp, newp,
68787-	    newlen, true);
68788-}
68789-
68790-static int
68791-arena_i_muzzy_decay_ms_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
68792-    void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
68793-	return arena_i_decay_ms_ctl_impl(tsd, mib, miblen, oldp, oldlenp, newp,
68794-	    newlen, false);
68795-}
68796-
68797-static int
68798-arena_i_extent_hooks_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
68799-    void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
68800-	int ret;
68801-	unsigned arena_ind;
68802-	arena_t *arena;
68803-
68804-	malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
68805-	MIB_UNSIGNED(arena_ind, 1);
68806-	if (arena_ind < narenas_total_get()) {
68807-		extent_hooks_t *old_extent_hooks;
68808-		arena = arena_get(tsd_tsdn(tsd), arena_ind, false);
68809-		if (arena == NULL) {
68810-			if (arena_ind >= narenas_auto) {
68811-				ret = EFAULT;
68812-				goto label_return;
68813-			}
68814-			old_extent_hooks =
68815-			    (extent_hooks_t *)&ehooks_default_extent_hooks;
68816-			READ(old_extent_hooks, extent_hooks_t *);
68817-			if (newp != NULL) {
68818-				/* Initialize a new arena as a side effect. */
68819-				extent_hooks_t *new_extent_hooks
68820-				    JEMALLOC_CC_SILENCE_INIT(NULL);
68821-				WRITE(new_extent_hooks, extent_hooks_t *);
68822-				arena_config_t config = arena_config_default;
68823-				config.extent_hooks = new_extent_hooks;
68824-
68825-				arena = arena_init(tsd_tsdn(tsd), arena_ind,
68826-				    &config);
68827-				if (arena == NULL) {
68828-					ret = EFAULT;
68829-					goto label_return;
68830-				}
68831-			}
68832-		} else {
68833-			if (newp != NULL) {
68834-				extent_hooks_t *new_extent_hooks
68835-				    JEMALLOC_CC_SILENCE_INIT(NULL);
68836-				WRITE(new_extent_hooks, extent_hooks_t *);
68837-				old_extent_hooks = arena_set_extent_hooks(tsd,
68838-				    arena, new_extent_hooks);
68839-				READ(old_extent_hooks, extent_hooks_t *);
68840-			} else {
68841-				old_extent_hooks =
68842-				    ehooks_get_extent_hooks_ptr(
68843-					arena_get_ehooks(arena));
68844-				READ(old_extent_hooks, extent_hooks_t *);
68845-			}
68846-		}
68847-	} else {
68848-		ret = EFAULT;
68849-		goto label_return;
68850-	}
68851-	ret = 0;
68852-label_return:
68853-	malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
68854-	return ret;
68855-}
68856-
68857-static int
68858-arena_i_retain_grow_limit_ctl(tsd_t *tsd, const size_t *mib,
68859-    size_t miblen, void *oldp, size_t *oldlenp, void *newp,
68860-    size_t newlen) {
68861-	int ret;
68862-	unsigned arena_ind;
68863-	arena_t *arena;
68864-
68865-	if (!opt_retain) {
68866-		/* Only relevant when retain is enabled. */
68867-		return ENOENT;
68868-	}
68869-
68870-	malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
68871-	MIB_UNSIGNED(arena_ind, 1);
68872-	if (arena_ind < narenas_total_get() && (arena =
68873-	    arena_get(tsd_tsdn(tsd), arena_ind, false)) != NULL) {
68874-		size_t old_limit, new_limit;
68875-		if (newp != NULL) {
68876-			WRITE(new_limit, size_t);
68877-		}
68878-		bool err = arena_retain_grow_limit_get_set(tsd, arena,
68879-		    &old_limit, newp != NULL ? &new_limit : NULL);
68880-		if (!err) {
68881-			READ(old_limit, size_t);
68882-			ret = 0;
68883-		} else {
68884-			ret = EFAULT;
68885-		}
68886-	} else {
68887-		ret = EFAULT;
68888-	}
68889-label_return:
68890-	malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
68891-	return ret;
68892-}
68893-
68894-static const ctl_named_node_t *
68895-arena_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen,
68896-    size_t i) {
68897-	const ctl_named_node_t *ret;
68898-
68899-	malloc_mutex_lock(tsdn, &ctl_mtx);
68900-	switch (i) {
68901-	case MALLCTL_ARENAS_ALL:
68902-	case MALLCTL_ARENAS_DESTROYED:
68903-		break;
68904-	default:
68905-		if (i > ctl_arenas->narenas) {
68906-			ret = NULL;
68907-			goto label_return;
68908-		}
68909-		break;
68910-	}
68911-
68912-	ret = super_arena_i_node;
68913-label_return:
68914-	malloc_mutex_unlock(tsdn, &ctl_mtx);
68915-	return ret;
68916-}
68917-
68918-/******************************************************************************/
68919-
68920-static int
68921-arenas_narenas_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
68922-    void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
68923-	int ret;
68924-	unsigned narenas;
68925-
68926-	malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
68927-	READONLY();
68928-	narenas = ctl_arenas->narenas;
68929-	READ(narenas, unsigned);
68930-
68931-	ret = 0;
68932-label_return:
68933-	malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
68934-	return ret;
68935-}
68936-
68937-static int
68938-arenas_decay_ms_ctl_impl(tsd_t *tsd, const size_t *mib,
68939-    size_t miblen, void *oldp, size_t *oldlenp, void *newp,
68940-    size_t newlen, bool dirty) {
68941-	int ret;
68942-
68943-	if (oldp != NULL && oldlenp != NULL) {
68944-		size_t oldval = (dirty ? arena_dirty_decay_ms_default_get() :
68945-		    arena_muzzy_decay_ms_default_get());
68946-		READ(oldval, ssize_t);
68947-	}
68948-	if (newp != NULL) {
68949-		if (newlen != sizeof(ssize_t)) {
68950-			ret = EINVAL;
68951-			goto label_return;
68952-		}
68953-		if (dirty ? arena_dirty_decay_ms_default_set(*(ssize_t *)newp)
68954-		    : arena_muzzy_decay_ms_default_set(*(ssize_t *)newp)) {
68955-			ret = EFAULT;
68956-			goto label_return;
68957-		}
68958-	}
68959-
68960-	ret = 0;
68961-label_return:
68962-	return ret;
68963-}
68964-
68965-static int
68966-arenas_dirty_decay_ms_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
68967-    void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
68968-	return arenas_decay_ms_ctl_impl(tsd, mib, miblen, oldp, oldlenp, newp,
68969-	    newlen, true);
68970-}
68971-
68972-static int
68973-arenas_muzzy_decay_ms_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
68974-    void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
68975-	return arenas_decay_ms_ctl_impl(tsd, mib, miblen, oldp, oldlenp, newp,
68976-	    newlen, false);
68977-}
68978-
68979-CTL_RO_NL_GEN(arenas_quantum, QUANTUM, size_t)
68980-CTL_RO_NL_GEN(arenas_page, PAGE, size_t)
68981-CTL_RO_NL_GEN(arenas_tcache_max, tcache_maxclass, size_t)
68982-CTL_RO_NL_GEN(arenas_nbins, SC_NBINS, unsigned)
68983-CTL_RO_NL_GEN(arenas_nhbins, nhbins, unsigned)
68984-CTL_RO_NL_GEN(arenas_bin_i_size, bin_infos[mib[2]].reg_size, size_t)
68985-CTL_RO_NL_GEN(arenas_bin_i_nregs, bin_infos[mib[2]].nregs, uint32_t)
68986-CTL_RO_NL_GEN(arenas_bin_i_slab_size, bin_infos[mib[2]].slab_size, size_t)
68987-CTL_RO_NL_GEN(arenas_bin_i_nshards, bin_infos[mib[2]].n_shards, uint32_t)
68988-static const ctl_named_node_t *
68989-arenas_bin_i_index(tsdn_t *tsdn, const size_t *mib,
68990-    size_t miblen, size_t i) {
68991-	if (i > SC_NBINS) {
68992-		return NULL;
68993-	}
68994-	return super_arenas_bin_i_node;
68995-}
68996-
68997-CTL_RO_NL_GEN(arenas_nlextents, SC_NSIZES - SC_NBINS, unsigned)
68998-CTL_RO_NL_GEN(arenas_lextent_i_size, sz_index2size(SC_NBINS+(szind_t)mib[2]),
68999-    size_t)
69000-static const ctl_named_node_t *
69001-arenas_lextent_i_index(tsdn_t *tsdn, const size_t *mib,
69002-    size_t miblen, size_t i) {
69003-	if (i > SC_NSIZES - SC_NBINS) {
69004-		return NULL;
69005-	}
69006-	return super_arenas_lextent_i_node;
69007-}
69008-
69009-static int
69010-arenas_create_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
69011-    void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
69012-	int ret;
69013-	unsigned arena_ind;
69014-
69015-	malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
69016-
69017-	VERIFY_READ(unsigned);
69018-	arena_config_t config = arena_config_default;
69019-	WRITE(config.extent_hooks, extent_hooks_t *);
69020-	if ((arena_ind = ctl_arena_init(tsd, &config)) == UINT_MAX) {
69021-		ret = EAGAIN;
69022-		goto label_return;
69023-	}
69024-	READ(arena_ind, unsigned);
69025-
69026-	ret = 0;
69027-label_return:
69028-	malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
69029-	return ret;
69030-}
69031-
69032-static int
69033-experimental_arenas_create_ext_ctl(tsd_t *tsd,
69034-    const size_t *mib, size_t miblen,
69035-    void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
69036-	int ret;
69037-	unsigned arena_ind;
69038-
69039-	malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
69040-
69041-	arena_config_t config = arena_config_default;
69042-	VERIFY_READ(unsigned);
69043-	WRITE(config, arena_config_t);
69044-
69045-	if ((arena_ind = ctl_arena_init(tsd, &config)) == UINT_MAX) {
69046-		ret = EAGAIN;
69047-		goto label_return;
69048-	}
69049-	READ(arena_ind, unsigned);
69050-	ret = 0;
69051-label_return:
69052-	malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
69053-	return ret;
69054-}
69055-
69056-static int
69057-arenas_lookup_ctl(tsd_t *tsd, const size_t *mib,
69058-    size_t miblen, void *oldp, size_t *oldlenp, void *newp,
69059-    size_t newlen) {
69060-	int ret;
69061-	unsigned arena_ind;
69062-	void *ptr;
69063-	edata_t *edata;
69064-	arena_t *arena;
69065-
69066-	ptr = NULL;
69067-	ret = EINVAL;
69068-	malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
69069-	WRITE(ptr, void *);
69070-	edata = emap_edata_lookup(tsd_tsdn(tsd), &arena_emap_global, ptr);
69071-	if (edata == NULL) {
69072-		goto label_return;
69073-	}
69074-
69075-	arena = arena_get_from_edata(edata);
69076-	if (arena == NULL) {
69077-		goto label_return;
69078-	}
69079-
69080-	arena_ind = arena_ind_get(arena);
69081-	READ(arena_ind, unsigned);
69082-
69083-	ret = 0;
69084-label_return:
69085-	malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
69086-	return ret;
69087-}
69088-
69089-/******************************************************************************/
69090-
69091-static int
69092-prof_thread_active_init_ctl(tsd_t *tsd, const size_t *mib,
69093-    size_t miblen, void *oldp, size_t *oldlenp, void *newp,
69094-    size_t newlen) {
69095-	int ret;
69096-	bool oldval;
69097-
69098-	if (!config_prof) {
69099-		return ENOENT;
69100-	}
69101-
69102-	if (newp != NULL) {
69103-		if (!opt_prof) {
69104-			ret = ENOENT;
69105-			goto label_return;
69106-		}
69107-		if (newlen != sizeof(bool)) {
69108-			ret = EINVAL;
69109-			goto label_return;
69110-		}
69111-		oldval = prof_thread_active_init_set(tsd_tsdn(tsd),
69112-		    *(bool *)newp);
69113-	} else {
69114-		oldval = opt_prof ? prof_thread_active_init_get(tsd_tsdn(tsd)) :
69115-		    false;
69116-	}
69117-	READ(oldval, bool);
69118-
69119-	ret = 0;
69120-label_return:
69121-	return ret;
69122-}
69123-
69124-static int
69125-prof_active_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
69126-    void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
69127-	int ret;
69128-	bool oldval;
69129-
69130-	if (!config_prof) {
69131-		ret = ENOENT;
69132-		goto label_return;
69133-	}
69134-
69135-	if (newp != NULL) {
69136-		if (newlen != sizeof(bool)) {
69137-			ret = EINVAL;
69138-			goto label_return;
69139-		}
69140-		bool val = *(bool *)newp;
69141-		if (!opt_prof) {
69142-			if (val) {
69143-				ret = ENOENT;
69144-				goto label_return;
69145-			} else {
69146-				/* No change needed (already off). */
69147-				oldval = false;
69148-			}
69149-		} else {
69150-			oldval = prof_active_set(tsd_tsdn(tsd), val);
69151-		}
69152-	} else {
69153-		oldval = opt_prof ? prof_active_get(tsd_tsdn(tsd)) : false;
69154-	}
69155-	READ(oldval, bool);
69156-
69157-	ret = 0;
69158-label_return:
69159-	return ret;
69160-}
69161-
69162-static int
69163-prof_dump_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
69164-    void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
69165-	int ret;
69166-	const char *filename = NULL;
69167-
69168-	if (!config_prof || !opt_prof) {
69169-		return ENOENT;
69170-	}
69171-
69172-	WRITEONLY();
69173-	WRITE(filename, const char *);
69174-
69175-	if (prof_mdump(tsd, filename)) {
69176-		ret = EFAULT;
69177-		goto label_return;
69178-	}
69179-
69180-	ret = 0;
69181-label_return:
69182-	return ret;
69183-}
69184-
69185-static int
69186-prof_gdump_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
69187-    void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
69188-	int ret;
69189-	bool oldval;
69190-
69191-	if (!config_prof) {
69192-		return ENOENT;
69193-	}
69194-
69195-	if (newp != NULL) {
69196-		if (!opt_prof) {
69197-			ret = ENOENT;
69198-			goto label_return;
69199-		}
69200-		if (newlen != sizeof(bool)) {
69201-			ret = EINVAL;
69202-			goto label_return;
69203-		}
69204-		oldval = prof_gdump_set(tsd_tsdn(tsd), *(bool *)newp);
69205-	} else {
69206-		oldval = opt_prof ? prof_gdump_get(tsd_tsdn(tsd)) : false;
69207-	}
69208-	READ(oldval, bool);
69209-
69210-	ret = 0;
69211-label_return:
69212-	return ret;
69213-}
69214-
69215-static int
69216-prof_prefix_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
69217-    void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
69218-	int ret;
69219-	const char *prefix = NULL;
69220-
69221-	if (!config_prof || !opt_prof) {
69222-		return ENOENT;
69223-	}
69224-
69225-	malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
69226-	WRITEONLY();
69227-	WRITE(prefix, const char *);
69228-
69229-	ret = prof_prefix_set(tsd_tsdn(tsd), prefix) ? EFAULT : 0;
69230-label_return:
69231-	malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
69232-	return ret;
69233-}
69234-
69235-static int
69236-prof_reset_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
69237-    void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
69238-	int ret;
69239-	size_t lg_sample = lg_prof_sample;
69240-
69241-	if (!config_prof || !opt_prof) {
69242-		return ENOENT;
69243-	}
69244-
69245-	WRITEONLY();
69246-	WRITE(lg_sample, size_t);
69247-	if (lg_sample >= (sizeof(uint64_t) << 3)) {
69248-		lg_sample = (sizeof(uint64_t) << 3) - 1;
69249-	}
69250-
69251-	prof_reset(tsd, lg_sample);
69252-
69253-	ret = 0;
69254-label_return:
69255-	return ret;
69256-}
69257-
69258-CTL_RO_NL_CGEN(config_prof, prof_interval, prof_interval, uint64_t)
69259-CTL_RO_NL_CGEN(config_prof, lg_prof_sample, lg_prof_sample, size_t)
69260-
69261-static int
69262-prof_log_start_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
69263-    size_t *oldlenp, void *newp, size_t newlen) {
69264-	int ret;
69265-
69266-	const char *filename = NULL;
69267-
69268-	if (!config_prof || !opt_prof) {
69269-		return ENOENT;
69270-	}
69271-
69272-	WRITEONLY();
69273-	WRITE(filename, const char *);
69274-
69275-	if (prof_log_start(tsd_tsdn(tsd), filename)) {
69276-		ret = EFAULT;
69277-		goto label_return;
69278-	}
69279-
69280-	ret = 0;
69281-label_return:
69282-	return ret;
69283-}
69284-
69285-static int
69286-prof_log_stop_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
69287-    size_t *oldlenp, void *newp, size_t newlen) {
69288-	if (!config_prof || !opt_prof) {
69289-		return ENOENT;
69290-	}
69291-
69292-	if (prof_log_stop(tsd_tsdn(tsd))) {
69293-		return EFAULT;
69294-	}
69295-
69296-	return 0;
69297-}
69298-
69299-static int
69300-experimental_hooks_prof_backtrace_ctl(tsd_t *tsd, const size_t *mib,
69301-    size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
69302-	int ret;
69303-
69304-	if (oldp == NULL && newp == NULL) {
69305-		ret = EINVAL;
69306-		goto label_return;
69307-	}
69308-	if (oldp != NULL) {
69309-		prof_backtrace_hook_t old_hook =
69310-		    prof_backtrace_hook_get();
69311-		READ(old_hook, prof_backtrace_hook_t);
69312-	}
69313-	if (newp != NULL) {
69314-		if (!opt_prof) {
69315-			ret = ENOENT;
69316-			goto label_return;
69317-		}
69318-		prof_backtrace_hook_t new_hook JEMALLOC_CC_SILENCE_INIT(NULL);
69319-		WRITE(new_hook, prof_backtrace_hook_t);
69320-		if (new_hook == NULL) {
69321-			ret = EINVAL;
69322-			goto label_return;
69323-		}
69324-		prof_backtrace_hook_set(new_hook);
69325-	}
69326-	ret = 0;
69327-label_return:
69328-	return ret;
69329-}
69330-
69331-static int
69332-experimental_hooks_prof_dump_ctl(tsd_t *tsd, const size_t *mib,
69333-    size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
69334-	int ret;
69335-
69336-	if (oldp == NULL && newp == NULL) {
69337-		ret = EINVAL;
69338-		goto label_return;
69339-	}
69340-	if (oldp != NULL) {
69341-		prof_dump_hook_t old_hook =
69342-		    prof_dump_hook_get();
69343-		READ(old_hook, prof_dump_hook_t);
69344-	}
69345-	if (newp != NULL) {
69346-		if (!opt_prof) {
69347-			ret = ENOENT;
69348-			goto label_return;
69349-		}
69350-		prof_dump_hook_t new_hook JEMALLOC_CC_SILENCE_INIT(NULL);
69351-		WRITE(new_hook, prof_dump_hook_t);
69352-		prof_dump_hook_set(new_hook);
69353-	}
69354-	ret = 0;
69355-label_return:
69356-	return ret;
69357-}
69358-
69359-/* For integration test purpose only.  No plan to move out of experimental. */
69360-static int
69361-experimental_hooks_safety_check_abort_ctl(tsd_t *tsd, const size_t *mib,
69362-    size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
69363-	int ret;
69364-
69365-	WRITEONLY();
69366-	if (newp != NULL) {
69367-		if (newlen != sizeof(safety_check_abort_hook_t)) {
69368-			ret = EINVAL;
69369-			goto label_return;
69370-		}
69371-		safety_check_abort_hook_t hook JEMALLOC_CC_SILENCE_INIT(NULL);
69372-		WRITE(hook, safety_check_abort_hook_t);
69373-		safety_check_set_abort(hook);
69374-	}
69375-	ret = 0;
69376-label_return:
69377-	return ret;
69378-}
69379-
69380-/******************************************************************************/
69381-
69382-CTL_RO_CGEN(config_stats, stats_allocated, ctl_stats->allocated, size_t)
69383-CTL_RO_CGEN(config_stats, stats_active, ctl_stats->active, size_t)
69384-CTL_RO_CGEN(config_stats, stats_metadata, ctl_stats->metadata, size_t)
69385-CTL_RO_CGEN(config_stats, stats_metadata_thp, ctl_stats->metadata_thp, size_t)
69386-CTL_RO_CGEN(config_stats, stats_resident, ctl_stats->resident, size_t)
69387-CTL_RO_CGEN(config_stats, stats_mapped, ctl_stats->mapped, size_t)
69388-CTL_RO_CGEN(config_stats, stats_retained, ctl_stats->retained, size_t)
69389-
69390-CTL_RO_CGEN(config_stats, stats_background_thread_num_threads,
69391-    ctl_stats->background_thread.num_threads, size_t)
69392-CTL_RO_CGEN(config_stats, stats_background_thread_num_runs,
69393-    ctl_stats->background_thread.num_runs, uint64_t)
69394-CTL_RO_CGEN(config_stats, stats_background_thread_run_interval,
69395-    nstime_ns(&ctl_stats->background_thread.run_interval), uint64_t)
69396-
69397-CTL_RO_CGEN(config_stats, stats_zero_reallocs,
69398-    atomic_load_zu(&zero_realloc_count, ATOMIC_RELAXED), size_t)
69399-
69400-CTL_RO_GEN(stats_arenas_i_dss, arenas_i(mib[2])->dss, const char *)
69401-CTL_RO_GEN(stats_arenas_i_dirty_decay_ms, arenas_i(mib[2])->dirty_decay_ms,
69402-    ssize_t)
69403-CTL_RO_GEN(stats_arenas_i_muzzy_decay_ms, arenas_i(mib[2])->muzzy_decay_ms,
69404-    ssize_t)
69405-CTL_RO_GEN(stats_arenas_i_nthreads, arenas_i(mib[2])->nthreads, unsigned)
69406-CTL_RO_GEN(stats_arenas_i_uptime,
69407-    nstime_ns(&arenas_i(mib[2])->astats->astats.uptime), uint64_t)
69408-CTL_RO_GEN(stats_arenas_i_pactive, arenas_i(mib[2])->pactive, size_t)
69409-CTL_RO_GEN(stats_arenas_i_pdirty, arenas_i(mib[2])->pdirty, size_t)
69410-CTL_RO_GEN(stats_arenas_i_pmuzzy, arenas_i(mib[2])->pmuzzy, size_t)
69411-CTL_RO_CGEN(config_stats, stats_arenas_i_mapped,
69412-    arenas_i(mib[2])->astats->astats.mapped, size_t)
69413-CTL_RO_CGEN(config_stats, stats_arenas_i_retained,
69414-    arenas_i(mib[2])->astats->astats.pa_shard_stats.pac_stats.retained, size_t)
69415-CTL_RO_CGEN(config_stats, stats_arenas_i_extent_avail,
69416-    arenas_i(mib[2])->astats->astats.pa_shard_stats.edata_avail, size_t)
69417-
69418-CTL_RO_CGEN(config_stats, stats_arenas_i_dirty_npurge,
69419-    locked_read_u64_unsynchronized(
69420-    &arenas_i(mib[2])->astats->astats.pa_shard_stats.pac_stats.decay_dirty.npurge),
69421-    uint64_t)
69422-CTL_RO_CGEN(config_stats, stats_arenas_i_dirty_nmadvise,
69423-    locked_read_u64_unsynchronized(
69424-    &arenas_i(mib[2])->astats->astats.pa_shard_stats.pac_stats.decay_dirty.nmadvise),
69425-    uint64_t)
69426-CTL_RO_CGEN(config_stats, stats_arenas_i_dirty_purged,
69427-    locked_read_u64_unsynchronized(
69428-    &arenas_i(mib[2])->astats->astats.pa_shard_stats.pac_stats.decay_dirty.purged),
69429-    uint64_t)
69430-
69431-CTL_RO_CGEN(config_stats, stats_arenas_i_muzzy_npurge,
69432-    locked_read_u64_unsynchronized(
69433-    &arenas_i(mib[2])->astats->astats.pa_shard_stats.pac_stats.decay_muzzy.npurge),
69434-    uint64_t)
69435-CTL_RO_CGEN(config_stats, stats_arenas_i_muzzy_nmadvise,
69436-    locked_read_u64_unsynchronized(
69437-    &arenas_i(mib[2])->astats->astats.pa_shard_stats.pac_stats.decay_muzzy.nmadvise),
69438-    uint64_t)
69439-CTL_RO_CGEN(config_stats, stats_arenas_i_muzzy_purged,
69440-    locked_read_u64_unsynchronized(
69441-    &arenas_i(mib[2])->astats->astats.pa_shard_stats.pac_stats.decay_muzzy.purged),
69442-    uint64_t)
69443-
69444-CTL_RO_CGEN(config_stats, stats_arenas_i_base,
69445-    arenas_i(mib[2])->astats->astats.base,
69446-    size_t)
69447-CTL_RO_CGEN(config_stats, stats_arenas_i_internal,
69448-    atomic_load_zu(&arenas_i(mib[2])->astats->astats.internal, ATOMIC_RELAXED),
69449-    size_t)
69450-CTL_RO_CGEN(config_stats, stats_arenas_i_metadata_thp,
69451-    arenas_i(mib[2])->astats->astats.metadata_thp, size_t)
69452-CTL_RO_CGEN(config_stats, stats_arenas_i_tcache_bytes,
69453-    arenas_i(mib[2])->astats->astats.tcache_bytes, size_t)
69454-CTL_RO_CGEN(config_stats, stats_arenas_i_tcache_stashed_bytes,
69455-    arenas_i(mib[2])->astats->astats.tcache_stashed_bytes, size_t)
69456-CTL_RO_CGEN(config_stats, stats_arenas_i_resident,
69457-    arenas_i(mib[2])->astats->astats.resident,
69458-    size_t)
69459-CTL_RO_CGEN(config_stats, stats_arenas_i_abandoned_vm,
69460-    atomic_load_zu(
69461-    &arenas_i(mib[2])->astats->astats.pa_shard_stats.pac_stats.abandoned_vm,
69462-    ATOMIC_RELAXED), size_t)
69463-
69464-CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_sec_bytes,
69465-    arenas_i(mib[2])->astats->secstats.bytes, size_t)
69466-
69467-CTL_RO_CGEN(config_stats, stats_arenas_i_small_allocated,
69468-    arenas_i(mib[2])->astats->allocated_small, size_t)
69469-CTL_RO_CGEN(config_stats, stats_arenas_i_small_nmalloc,
69470-    arenas_i(mib[2])->astats->nmalloc_small, uint64_t)
69471-CTL_RO_CGEN(config_stats, stats_arenas_i_small_ndalloc,
69472-    arenas_i(mib[2])->astats->ndalloc_small, uint64_t)
69473-CTL_RO_CGEN(config_stats, stats_arenas_i_small_nrequests,
69474-    arenas_i(mib[2])->astats->nrequests_small, uint64_t)
69475-CTL_RO_CGEN(config_stats, stats_arenas_i_small_nfills,
69476-    arenas_i(mib[2])->astats->nfills_small, uint64_t)
69477-CTL_RO_CGEN(config_stats, stats_arenas_i_small_nflushes,
69478-    arenas_i(mib[2])->astats->nflushes_small, uint64_t)
69479-CTL_RO_CGEN(config_stats, stats_arenas_i_large_allocated,
69480-    arenas_i(mib[2])->astats->astats.allocated_large, size_t)
69481-CTL_RO_CGEN(config_stats, stats_arenas_i_large_nmalloc,
69482-    arenas_i(mib[2])->astats->astats.nmalloc_large, uint64_t)
69483-CTL_RO_CGEN(config_stats, stats_arenas_i_large_ndalloc,
69484-    arenas_i(mib[2])->astats->astats.ndalloc_large, uint64_t)
69485-CTL_RO_CGEN(config_stats, stats_arenas_i_large_nrequests,
69486-    arenas_i(mib[2])->astats->astats.nrequests_large, uint64_t)
69487-/*
69488- * Note: "nmalloc_large" here instead of "nfills" in the read.  This is
69489- * intentional (large has no batch fill).
69490- */
69491-CTL_RO_CGEN(config_stats, stats_arenas_i_large_nfills,
69492-    arenas_i(mib[2])->astats->astats.nmalloc_large, uint64_t)
69493-CTL_RO_CGEN(config_stats, stats_arenas_i_large_nflushes,
69494-    arenas_i(mib[2])->astats->astats.nflushes_large, uint64_t)
69495-
69496-/* Lock profiling related APIs below. */
69497-#define RO_MUTEX_CTL_GEN(n, l)						\
69498-CTL_RO_CGEN(config_stats, stats_##n##_num_ops,				\
69499-    l.n_lock_ops, uint64_t)						\
69500-CTL_RO_CGEN(config_stats, stats_##n##_num_wait,				\
69501-    l.n_wait_times, uint64_t)						\
69502-CTL_RO_CGEN(config_stats, stats_##n##_num_spin_acq,			\
69503-    l.n_spin_acquired, uint64_t)					\
69504-CTL_RO_CGEN(config_stats, stats_##n##_num_owner_switch,			\
69505-    l.n_owner_switches, uint64_t) 					\
69506-CTL_RO_CGEN(config_stats, stats_##n##_total_wait_time,			\
69507-    nstime_ns(&l.tot_wait_time), uint64_t)				\
69508-CTL_RO_CGEN(config_stats, stats_##n##_max_wait_time,			\
69509-    nstime_ns(&l.max_wait_time), uint64_t)				\
69510-CTL_RO_CGEN(config_stats, stats_##n##_max_num_thds,			\
69511-    l.max_n_thds, uint32_t)
69512-
69513-/* Global mutexes. */
69514-#define OP(mtx)								\
69515-    RO_MUTEX_CTL_GEN(mutexes_##mtx,					\
69516-        ctl_stats->mutex_prof_data[global_prof_mutex_##mtx])
69517-MUTEX_PROF_GLOBAL_MUTEXES
69518-#undef OP
69519-
69520-/* Per arena mutexes */
69521-#define OP(mtx) RO_MUTEX_CTL_GEN(arenas_i_mutexes_##mtx,		\
69522-    arenas_i(mib[2])->astats->astats.mutex_prof_data[arena_prof_mutex_##mtx])
69523-MUTEX_PROF_ARENA_MUTEXES
69524-#undef OP
69525-
69526-/* tcache bin mutex */
69527-RO_MUTEX_CTL_GEN(arenas_i_bins_j_mutex,
69528-    arenas_i(mib[2])->astats->bstats[mib[4]].mutex_data)
69529-#undef RO_MUTEX_CTL_GEN
69530-
69531-/* Resets all mutex stats, including global, arena and bin mutexes. */
69532-static int
69533-stats_mutexes_reset_ctl(tsd_t *tsd, const size_t *mib,
69534-    size_t miblen, void *oldp, size_t *oldlenp,
69535-    void *newp, size_t newlen) {
69536-	if (!config_stats) {
69537-		return ENOENT;
69538-	}
69539-
69540-	tsdn_t *tsdn = tsd_tsdn(tsd);
69541-
69542-#define MUTEX_PROF_RESET(mtx)						\
69543-    malloc_mutex_lock(tsdn, &mtx);					\
69544-    malloc_mutex_prof_data_reset(tsdn, &mtx);				\
69545-    malloc_mutex_unlock(tsdn, &mtx);
69546-
69547-	/* Global mutexes: ctl and prof. */
69548-	MUTEX_PROF_RESET(ctl_mtx);
69549-	if (have_background_thread) {
69550-		MUTEX_PROF_RESET(background_thread_lock);
69551-	}
69552-	if (config_prof && opt_prof) {
69553-		MUTEX_PROF_RESET(bt2gctx_mtx);
69554-		MUTEX_PROF_RESET(tdatas_mtx);
69555-		MUTEX_PROF_RESET(prof_dump_mtx);
69556-		MUTEX_PROF_RESET(prof_recent_alloc_mtx);
69557-		MUTEX_PROF_RESET(prof_recent_dump_mtx);
69558-		MUTEX_PROF_RESET(prof_stats_mtx);
69559-	}
69560-
69561-	/* Per arena mutexes. */
69562-	unsigned n = narenas_total_get();
69563-
69564-	for (unsigned i = 0; i < n; i++) {
69565-		arena_t *arena = arena_get(tsdn, i, false);
69566-		if (!arena) {
69567-			continue;
69568-		}
69569-		MUTEX_PROF_RESET(arena->large_mtx);
69570-		MUTEX_PROF_RESET(arena->pa_shard.edata_cache.mtx);
69571-		MUTEX_PROF_RESET(arena->pa_shard.pac.ecache_dirty.mtx);
69572-		MUTEX_PROF_RESET(arena->pa_shard.pac.ecache_muzzy.mtx);
69573-		MUTEX_PROF_RESET(arena->pa_shard.pac.ecache_retained.mtx);
69574-		MUTEX_PROF_RESET(arena->pa_shard.pac.decay_dirty.mtx);
69575-		MUTEX_PROF_RESET(arena->pa_shard.pac.decay_muzzy.mtx);
69576-		MUTEX_PROF_RESET(arena->tcache_ql_mtx);
69577-		MUTEX_PROF_RESET(arena->base->mtx);
69578-
69579-		for (szind_t j = 0; j < SC_NBINS; j++) {
69580-			for (unsigned k = 0; k < bin_infos[j].n_shards; k++) {
69581-				bin_t *bin = arena_get_bin(arena, j, k);
69582-				MUTEX_PROF_RESET(bin->lock);
69583-			}
69584-		}
69585-	}
69586-#undef MUTEX_PROF_RESET
69587-	return 0;
69588-}
69589-
69590-CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nmalloc,
69591-    arenas_i(mib[2])->astats->bstats[mib[4]].stats_data.nmalloc, uint64_t)
69592-CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_ndalloc,
69593-    arenas_i(mib[2])->astats->bstats[mib[4]].stats_data.ndalloc, uint64_t)
69594-CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nrequests,
69595-    arenas_i(mib[2])->astats->bstats[mib[4]].stats_data.nrequests, uint64_t)
69596-CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_curregs,
69597-    arenas_i(mib[2])->astats->bstats[mib[4]].stats_data.curregs, size_t)
69598-CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nfills,
69599-    arenas_i(mib[2])->astats->bstats[mib[4]].stats_data.nfills, uint64_t)
69600-CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nflushes,
69601-    arenas_i(mib[2])->astats->bstats[mib[4]].stats_data.nflushes, uint64_t)
69602-CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nslabs,
69603-    arenas_i(mib[2])->astats->bstats[mib[4]].stats_data.nslabs, uint64_t)
69604-CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nreslabs,
69605-    arenas_i(mib[2])->astats->bstats[mib[4]].stats_data.reslabs, uint64_t)
69606-CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_curslabs,
69607-    arenas_i(mib[2])->astats->bstats[mib[4]].stats_data.curslabs, size_t)
69608-CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nonfull_slabs,
69609-    arenas_i(mib[2])->astats->bstats[mib[4]].stats_data.nonfull_slabs, size_t)
69610-
69611-static const ctl_named_node_t *
69612-stats_arenas_i_bins_j_index(tsdn_t *tsdn, const size_t *mib,
69613-    size_t miblen, size_t j) {
69614-	if (j > SC_NBINS) {
69615-		return NULL;
69616-	}
69617-	return super_stats_arenas_i_bins_j_node;
69618-}
69619-
69620-CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_nmalloc,
69621-    locked_read_u64_unsynchronized(
69622-    &arenas_i(mib[2])->astats->lstats[mib[4]].nmalloc), uint64_t)
69623-CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_ndalloc,
69624-    locked_read_u64_unsynchronized(
69625-    &arenas_i(mib[2])->astats->lstats[mib[4]].ndalloc), uint64_t)
69626-CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_nrequests,
69627-    locked_read_u64_unsynchronized(
69628-    &arenas_i(mib[2])->astats->lstats[mib[4]].nrequests), uint64_t)
69629-CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_curlextents,
69630-    arenas_i(mib[2])->astats->lstats[mib[4]].curlextents, size_t)
69631-
69632-static const ctl_named_node_t *
69633-stats_arenas_i_lextents_j_index(tsdn_t *tsdn, const size_t *mib,
69634-    size_t miblen, size_t j) {
69635-	if (j > SC_NSIZES - SC_NBINS) {
69636-		return NULL;
69637-	}
69638-	return super_stats_arenas_i_lextents_j_node;
69639-}
69640-
69641-CTL_RO_CGEN(config_stats, stats_arenas_i_extents_j_ndirty,
69642-        arenas_i(mib[2])->astats->estats[mib[4]].ndirty, size_t);
69643-CTL_RO_CGEN(config_stats, stats_arenas_i_extents_j_nmuzzy,
69644-        arenas_i(mib[2])->astats->estats[mib[4]].nmuzzy, size_t);
69645-CTL_RO_CGEN(config_stats, stats_arenas_i_extents_j_nretained,
69646-        arenas_i(mib[2])->astats->estats[mib[4]].nretained, size_t);
69647-CTL_RO_CGEN(config_stats, stats_arenas_i_extents_j_dirty_bytes,
69648-        arenas_i(mib[2])->astats->estats[mib[4]].dirty_bytes, size_t);
69649-CTL_RO_CGEN(config_stats, stats_arenas_i_extents_j_muzzy_bytes,
69650-        arenas_i(mib[2])->astats->estats[mib[4]].muzzy_bytes, size_t);
69651-CTL_RO_CGEN(config_stats, stats_arenas_i_extents_j_retained_bytes,
69652-        arenas_i(mib[2])->astats->estats[mib[4]].retained_bytes, size_t);
69653-
69654-static const ctl_named_node_t *
69655-stats_arenas_i_extents_j_index(tsdn_t *tsdn, const size_t *mib,
69656-    size_t miblen, size_t j) {
69657-	if (j >= SC_NPSIZES) {
69658-		return NULL;
69659-	}
69660-	return super_stats_arenas_i_extents_j_node;
69661-}
69662-
69663-CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_npurge_passes,
69664-    arenas_i(mib[2])->astats->hpastats.nonderived_stats.npurge_passes, uint64_t);
69665-CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_npurges,
69666-    arenas_i(mib[2])->astats->hpastats.nonderived_stats.npurges, uint64_t);
69667-CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_nhugifies,
69668-    arenas_i(mib[2])->astats->hpastats.nonderived_stats.nhugifies, uint64_t);
69669-CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_ndehugifies,
69670-    arenas_i(mib[2])->astats->hpastats.nonderived_stats.ndehugifies, uint64_t);
69671-
69672-/* Full, nonhuge */
69673-CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_full_slabs_npageslabs_nonhuge,
69674-    arenas_i(mib[2])->astats->hpastats.psset_stats.full_slabs[0].npageslabs,
69675-    size_t);
69676-CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_full_slabs_nactive_nonhuge,
69677-    arenas_i(mib[2])->astats->hpastats.psset_stats.full_slabs[0].nactive, size_t);
69678-CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_full_slabs_ndirty_nonhuge,
69679-    arenas_i(mib[2])->astats->hpastats.psset_stats.full_slabs[0].ndirty, size_t);
69680-
69681-/* Full, huge */
69682-CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_full_slabs_npageslabs_huge,
69683-    arenas_i(mib[2])->astats->hpastats.psset_stats.full_slabs[1].npageslabs,
69684-    size_t);
69685-CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_full_slabs_nactive_huge,
69686-    arenas_i(mib[2])->astats->hpastats.psset_stats.full_slabs[1].nactive, size_t);
69687-CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_full_slabs_ndirty_huge,
69688-    arenas_i(mib[2])->astats->hpastats.psset_stats.full_slabs[1].ndirty, size_t);
69689-
69690-/* Empty, nonhuge */
69691-CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_empty_slabs_npageslabs_nonhuge,
69692-    arenas_i(mib[2])->astats->hpastats.psset_stats.empty_slabs[0].npageslabs,
69693-    size_t);
69694-CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_empty_slabs_nactive_nonhuge,
69695-    arenas_i(mib[2])->astats->hpastats.psset_stats.empty_slabs[0].nactive, size_t);
69696-CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_empty_slabs_ndirty_nonhuge,
69697-    arenas_i(mib[2])->astats->hpastats.psset_stats.empty_slabs[0].ndirty, size_t);
69698-
69699-/* Empty, huge */
69700-CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_empty_slabs_npageslabs_huge,
69701-    arenas_i(mib[2])->astats->hpastats.psset_stats.empty_slabs[1].npageslabs,
69702-    size_t);
69703-CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_empty_slabs_nactive_huge,
69704-    arenas_i(mib[2])->astats->hpastats.psset_stats.empty_slabs[1].nactive, size_t);
69705-CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_empty_slabs_ndirty_huge,
69706-    arenas_i(mib[2])->astats->hpastats.psset_stats.empty_slabs[1].ndirty, size_t);
69707-
69708-/* Nonfull, nonhuge */
69709-CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_nonfull_slabs_j_npageslabs_nonhuge,
69710-    arenas_i(mib[2])->astats->hpastats.psset_stats.nonfull_slabs[mib[5]][0].npageslabs,
69711-    size_t);
69712-CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_nonfull_slabs_j_nactive_nonhuge,
69713-    arenas_i(mib[2])->astats->hpastats.psset_stats.nonfull_slabs[mib[5]][0].nactive,
69714-    size_t);
69715-CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_nonfull_slabs_j_ndirty_nonhuge,
69716-    arenas_i(mib[2])->astats->hpastats.psset_stats.nonfull_slabs[mib[5]][0].ndirty,
69717-    size_t);
69718-
69719-/* Nonfull, huge */
69720-CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_nonfull_slabs_j_npageslabs_huge,
69721-    arenas_i(mib[2])->astats->hpastats.psset_stats.nonfull_slabs[mib[5]][1].npageslabs,
69722-    size_t);
69723-CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_nonfull_slabs_j_nactive_huge,
69724-    arenas_i(mib[2])->astats->hpastats.psset_stats.nonfull_slabs[mib[5]][1].nactive,
69725-    size_t);
69726-CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_nonfull_slabs_j_ndirty_huge,
69727-    arenas_i(mib[2])->astats->hpastats.psset_stats.nonfull_slabs[mib[5]][1].ndirty,
69728-    size_t);
69729-
69730-static const ctl_named_node_t *
69731-stats_arenas_i_hpa_shard_nonfull_slabs_j_index(tsdn_t *tsdn, const size_t *mib,
69732-    size_t miblen, size_t j) {
69733-	if (j >= PSSET_NPSIZES) {
69734-		return NULL;
69735-	}
69736-	return super_stats_arenas_i_hpa_shard_nonfull_slabs_j_node;
69737-}
69738-
69739-static bool
69740-ctl_arenas_i_verify(size_t i) {
69741-	size_t a = arenas_i2a_impl(i, true, true);
69742-	if (a == UINT_MAX || !ctl_arenas->arenas[a]->initialized) {
69743-		return true;
69744-	}
69745-
69746-	return false;
69747-}
69748-
69749-static const ctl_named_node_t *
69750-stats_arenas_i_index(tsdn_t *tsdn, const size_t *mib,
69751-    size_t miblen, size_t i) {
69752-	const ctl_named_node_t *ret;
69753-
69754-	malloc_mutex_lock(tsdn, &ctl_mtx);
69755-	if (ctl_arenas_i_verify(i)) {
69756-		ret = NULL;
69757-		goto label_return;
69758-	}
69759-
69760-	ret = super_stats_arenas_i_node;
69761-label_return:
69762-	malloc_mutex_unlock(tsdn, &ctl_mtx);
69763-	return ret;
69764-}
69765-
69766-static int
69767-experimental_hooks_install_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
69768-    void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
69769-	int ret;
69770-	if (oldp == NULL || oldlenp == NULL|| newp == NULL) {
69771-		ret = EINVAL;
69772-		goto label_return;
69773-	}
69774-	/*
69775-	 * Note: this is a *private* struct.  This is an experimental interface;
69776-	 * forcing the user to know the jemalloc internals well enough to
69777-	 * extract the ABI hopefully ensures nobody gets too comfortable with
69778-	 * this API, which can change at a moment's notice.
69779-	 */
69780-	hooks_t hooks;
69781-	WRITE(hooks, hooks_t);
69782-	void *handle = hook_install(tsd_tsdn(tsd), &hooks);
69783-	if (handle == NULL) {
69784-		ret = EAGAIN;
69785-		goto label_return;
69786-	}
69787-	READ(handle, void *);
69788-
69789-	ret = 0;
69790-label_return:
69791-	return ret;
69792-}
69793-
69794-static int
69795-experimental_hooks_remove_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
69796-    void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
69797-	int ret;
69798-	WRITEONLY();
69799-	void *handle = NULL;
69800-	WRITE(handle, void *);
69801-	if (handle == NULL) {
69802-		ret = EINVAL;
69803-		goto label_return;
69804-	}
69805-	hook_remove(tsd_tsdn(tsd), handle);
69806-	ret = 0;
69807-label_return:
69808-	return ret;
69809-}
69810-
69811-static int
69812-experimental_thread_activity_callback_ctl(tsd_t *tsd, const size_t *mib,
69813-    size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
69814-	int ret;
69815-
69816-	if (!config_stats) {
69817-		return ENOENT;
69818-	}
69819-
69820-	activity_callback_thunk_t t_old = tsd_activity_callback_thunk_get(tsd);
69821-	READ(t_old, activity_callback_thunk_t);
69822-
69823-	if (newp != NULL) {
69824-		/*
69825-		 * This initialization is unnecessary.  If it's omitted, though,
69826-		 * clang gets confused and warns on the subsequent use of t_new.
69827-		 */
69828-		activity_callback_thunk_t t_new = {NULL, NULL};
69829-		WRITE(t_new, activity_callback_thunk_t);
69830-		tsd_activity_callback_thunk_set(tsd, t_new);
69831-	}
69832-	ret = 0;
69833-label_return:
69834-	return ret;
69835-}
69836-
69837-/*
69838- * Output six memory utilization entries for an input pointer, the first one of
69839- * type (void *) and the remaining five of type size_t, describing the following
69840- * (in the same order):
69841- *
69842- * (a) memory address of the extent a potential reallocation would go into,
69843- * == the five fields below describe about the extent the pointer resides in ==
69844- * (b) number of free regions in the extent,
69845- * (c) number of regions in the extent,
69846- * (d) size of the extent in terms of bytes,
69847- * (e) total number of free regions in the bin the extent belongs to, and
69848- * (f) total number of regions in the bin the extent belongs to.
69849- *
69850- * Note that "(e)" and "(f)" are only available when stats are enabled;
69851- * otherwise their values are undefined.
69852- *
69853- * This API is mainly intended for small class allocations, where extents are
69854- * used as slab.  Note that if the bin the extent belongs to is completely
69855- * full, "(a)" will be NULL.
69856- *
69857- * In case of large class allocations, "(a)" will be NULL, and "(e)" and "(f)"
69858- * will be zero (if stats are enabled; otherwise undefined).  The other three
69859- * fields will be properly set though the values are trivial: "(b)" will be 0,
69860- * "(c)" will be 1, and "(d)" will be the usable size.
69861- *
69862- * The input pointer and size are respectively passed in by newp and newlen,
69863- * and the output fields and size are respectively oldp and *oldlenp.
69864- *
69865- * It can be beneficial to define the following macros to make it easier to
69866- * access the output:
69867- *
69868- * #define SLABCUR_READ(out) (*(void **)out)
69869- * #define COUNTS(out) ((size_t *)((void **)out + 1))
69870- * #define NFREE_READ(out) COUNTS(out)[0]
69871- * #define NREGS_READ(out) COUNTS(out)[1]
69872- * #define SIZE_READ(out) COUNTS(out)[2]
69873- * #define BIN_NFREE_READ(out) COUNTS(out)[3]
69874- * #define BIN_NREGS_READ(out) COUNTS(out)[4]
69875- *
69876- * and then write e.g. NFREE_READ(oldp) to fetch the output.  See the unit test
69877- * test_query in test/unit/extent_util.c for an example.
69878- *
69879- * For a typical defragmentation workflow making use of this API for
69880- * understanding the fragmentation level, please refer to the comment for
69881- * experimental_utilization_batch_query_ctl.
69882- *
69883- * It's up to the application how to determine the significance of
69884- * fragmentation relying on the outputs returned.  Possible choices are:
69885- *
69886- * (a) if extent utilization ratio is below certain threshold,
69887- * (b) if extent memory consumption is above certain threshold,
69888- * (c) if extent utilization ratio is significantly below bin utilization ratio,
69889- * (d) if input pointer deviates a lot from potential reallocation address, or
69890- * (e) some selection/combination of the above.
69891- *
69892- * The caller needs to make sure that the input/output arguments are valid,
69893- * in particular, that the size of the output is correct, i.e.:
69894- *
69895- *     *oldlenp = sizeof(void *) + sizeof(size_t) * 5
69896- *
69897- * Otherwise, the function immediately returns EINVAL without touching anything.
69898- *
69899- * In the rare case where there's no associated extent found for the input
69900- * pointer, the function zeros out all output fields and return.  Please refer
69901- * to the comment for experimental_utilization_batch_query_ctl to understand the
69902- * motivation from C++.
69903- */
69904-static int
69905-experimental_utilization_query_ctl(tsd_t *tsd, const size_t *mib,
69906-    size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
69907-	int ret;
69908-
69909-	assert(sizeof(inspect_extent_util_stats_verbose_t)
69910-	    == sizeof(void *) + sizeof(size_t) * 5);
69911-
69912-	if (oldp == NULL || oldlenp == NULL
69913-	    || *oldlenp != sizeof(inspect_extent_util_stats_verbose_t)
69914-	    || newp == NULL) {
69915-		ret = EINVAL;
69916-		goto label_return;
69917-	}
69918-
69919-	void *ptr = NULL;
69920-	WRITE(ptr, void *);
69921-	inspect_extent_util_stats_verbose_t *util_stats
69922-	    = (inspect_extent_util_stats_verbose_t *)oldp;
69923-	inspect_extent_util_stats_verbose_get(tsd_tsdn(tsd), ptr,
69924-	    &util_stats->nfree, &util_stats->nregs, &util_stats->size,
69925-	    &util_stats->bin_nfree, &util_stats->bin_nregs,
69926-	    &util_stats->slabcur_addr);
69927-	ret = 0;
69928-
69929-label_return:
69930-	return ret;
69931-}
69932-
69933-/*
69934- * Given an input array of pointers, output three memory utilization entries of
69935- * type size_t for each input pointer about the extent it resides in:
69936- *
69937- * (a) number of free regions in the extent,
69938- * (b) number of regions in the extent, and
69939- * (c) size of the extent in terms of bytes.
69940- *
69941- * This API is mainly intended for small class allocations, where extents are
69942- * used as slab.  In case of large class allocations, the outputs are trivial:
69943- * "(a)" will be 0, "(b)" will be 1, and "(c)" will be the usable size.
69944- *
69945- * Note that multiple input pointers may reside on a same extent so the output
69946- * fields may contain duplicates.
69947- *
69948- * The format of the input/output looks like:
69949- *
69950- * input[0]:  1st_pointer_to_query	|  output[0]: 1st_extent_n_free_regions
69951- *					|  output[1]: 1st_extent_n_regions
69952- *					|  output[2]: 1st_extent_size
69953- * input[1]:  2nd_pointer_to_query	|  output[3]: 2nd_extent_n_free_regions
69954- *					|  output[4]: 2nd_extent_n_regions
69955- *					|  output[5]: 2nd_extent_size
69956- * ...					|  ...
69957- *
69958- * The input array and size are respectively passed in by newp and newlen, and
69959- * the output array and size are respectively oldp and *oldlenp.
69960- *
69961- * It can be beneficial to define the following macros to make it easier to
69962- * access the output:
69963- *
69964- * #define NFREE_READ(out, i) out[(i) * 3]
69965- * #define NREGS_READ(out, i) out[(i) * 3 + 1]
69966- * #define SIZE_READ(out, i) out[(i) * 3 + 2]
69967- *
69968- * and then write e.g. NFREE_READ(oldp, i) to fetch the output.  See the unit
69969- * test test_batch in test/unit/extent_util.c for a concrete example.
69970- *
69971- * A typical workflow would be composed of the following steps:
69972- *
69973- * (1) flush tcache: mallctl("thread.tcache.flush", ...)
69974- * (2) initialize input array of pointers to query fragmentation
69975- * (3) allocate output array to hold utilization statistics
69976- * (4) query utilization: mallctl("experimental.utilization.batch_query", ...)
69977- * (5) (optional) decide if it's worthwhile to defragment; otherwise stop here
69978- * (6) disable tcache: mallctl("thread.tcache.enabled", ...)
69979- * (7) defragment allocations with significant fragmentation, e.g.:
69980- *         for each allocation {
69981- *             if it's fragmented {
69982- *                 malloc(...);
69983- *                 memcpy(...);
69984- *                 free(...);
69985- *             }
69986- *         }
69987- * (8) enable tcache: mallctl("thread.tcache.enabled", ...)
69988- *
69989- * The application can determine the significance of fragmentation themselves
69990- * relying on the statistics returned, both at the overall level i.e. step "(5)"
69991- * and at individual allocation level i.e. within step "(7)".  Possible choices
69992- * are:
69993- *
69994- * (a) whether memory utilization ratio is below certain threshold,
69995- * (b) whether memory consumption is above certain threshold, or
69996- * (c) some combination of the two.
69997- *
69998- * The caller needs to make sure that the input/output arrays are valid and
69999- * their sizes are proper as well as matched, meaning:
70000- *
70001- * (a) newlen = n_pointers * sizeof(const void *)
70002- * (b) *oldlenp = n_pointers * sizeof(size_t) * 3
70003- * (c) n_pointers > 0
70004- *
70005- * Otherwise, the function immediately returns EINVAL without touching anything.
70006- *
70007- * In the rare case where there's no associated extent found for some pointers,
70008- * rather than immediately terminating the computation and raising an error,
70009- * the function simply zeros out the corresponding output fields and continues
70010- * the computation until all input pointers are handled.  The motivations of
70011- * such a design are as follows:
70012- *
70013- * (a) The function always either processes nothing or processes everything, and
70014- * never leaves the output half touched and half untouched.
70015- *
70016- * (b) It facilitates usage needs especially common in C++.  A vast variety of
70017- * C++ objects are instantiated with multiple dynamic memory allocations.  For
70018- * example, std::string and std::vector typically use at least two allocations,
70019- * one for the metadata and one for the actual content.  Other types may use
70020- * even more allocations.  When inquiring about utilization statistics, the
70021- * caller often wants to examine into all such allocations, especially internal
70022- * one(s), rather than just the topmost one.  The issue comes when some
70023- * implementations do certain optimizations to reduce/aggregate some internal
70024- * allocations, e.g. putting short strings directly into the metadata, and such
70025- * decisions are not known to the caller.  Therefore, we permit pointers to
70026- * memory usages that may not be returned by previous malloc calls, and we
70027- * provide the caller a convenient way to identify such cases.
70028- */
70029-static int
70030-experimental_utilization_batch_query_ctl(tsd_t *tsd, const size_t *mib,
70031-    size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
70032-	int ret;
70033-
70034-	assert(sizeof(inspect_extent_util_stats_t) == sizeof(size_t) * 3);
70035-
70036-	const size_t len = newlen / sizeof(const void *);
70037-	if (oldp == NULL || oldlenp == NULL || newp == NULL || newlen == 0
70038-	    || newlen != len * sizeof(const void *)
70039-	    || *oldlenp != len * sizeof(inspect_extent_util_stats_t)) {
70040-		ret = EINVAL;
70041-		goto label_return;
70042-	}
70043-
70044-	void **ptrs = (void **)newp;
70045-	inspect_extent_util_stats_t *util_stats =
70046-	    (inspect_extent_util_stats_t *)oldp;
70047-	size_t i;
70048-	for (i = 0; i < len; ++i) {
70049-		inspect_extent_util_stats_get(tsd_tsdn(tsd), ptrs[i],
70050-		    &util_stats[i].nfree, &util_stats[i].nregs,
70051-		    &util_stats[i].size);
70052-	}
70053-	ret = 0;
70054-
70055-label_return:
70056-	return ret;
70057-}
70058-
70059-static const ctl_named_node_t *
70060-experimental_arenas_i_index(tsdn_t *tsdn, const size_t *mib,
70061-    size_t miblen, size_t i) {
70062-	const ctl_named_node_t *ret;
70063-
70064-	malloc_mutex_lock(tsdn, &ctl_mtx);
70065-	if (ctl_arenas_i_verify(i)) {
70066-		ret = NULL;
70067-		goto label_return;
70068-	}
70069-	ret = super_experimental_arenas_i_node;
70070-label_return:
70071-	malloc_mutex_unlock(tsdn, &ctl_mtx);
70072-	return ret;
70073-}
70074-
70075-static int
70076-experimental_arenas_i_pactivep_ctl(tsd_t *tsd, const size_t *mib,
70077-    size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
70078-	if (!config_stats) {
70079-		return ENOENT;
70080-	}
70081-	if (oldp == NULL || oldlenp == NULL || *oldlenp != sizeof(size_t *)) {
70082-		return EINVAL;
70083-	}
70084-
70085-	unsigned arena_ind;
70086-	arena_t *arena;
70087-	int ret;
70088-	size_t *pactivep;
70089-
70090-	malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
70091-	READONLY();
70092-	MIB_UNSIGNED(arena_ind, 2);
70093-	if (arena_ind < narenas_total_get() && (arena =
70094-	    arena_get(tsd_tsdn(tsd), arena_ind, false)) != NULL) {
70095-#if defined(JEMALLOC_GCC_ATOMIC_ATOMICS) ||				\
70096-    defined(JEMALLOC_GCC_SYNC_ATOMICS) || defined(_MSC_VER)
70097-		/* Expose the underlying counter for fast read. */
70098-		pactivep = (size_t *)&(arena->pa_shard.nactive.repr);
70099-		READ(pactivep, size_t *);
70100-		ret = 0;
70101-#else
70102-		ret = EFAULT;
70103-#endif
70104-	} else {
70105-		ret = EFAULT;
70106-	}
70107-label_return:
70108-	malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
70109-	return ret;
70110-}
70111-
70112-static int
70113-experimental_prof_recent_alloc_max_ctl(tsd_t *tsd, const size_t *mib,
70114-    size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
70115-	int ret;
70116-
70117-	if (!(config_prof && opt_prof)) {
70118-		ret = ENOENT;
70119-		goto label_return;
70120-	}
70121-
70122-	ssize_t old_max;
70123-	if (newp != NULL) {
70124-		if (newlen != sizeof(ssize_t)) {
70125-			ret = EINVAL;
70126-			goto label_return;
70127-		}
70128-		ssize_t max = *(ssize_t *)newp;
70129-		if (max < -1) {
70130-			ret = EINVAL;
70131-			goto label_return;
70132-		}
70133-		old_max = prof_recent_alloc_max_ctl_write(tsd, max);
70134-	} else {
70135-		old_max = prof_recent_alloc_max_ctl_read();
70136-	}
70137-	READ(old_max, ssize_t);
70138-
70139-	ret = 0;
70140-
70141-label_return:
70142-	return ret;
70143-}
70144-
70145-typedef struct write_cb_packet_s write_cb_packet_t;
70146-struct write_cb_packet_s {
70147-	write_cb_t *write_cb;
70148-	void *cbopaque;
70149-};
70150-
70151-static int
70152-experimental_prof_recent_alloc_dump_ctl(tsd_t *tsd, const size_t *mib,
70153-    size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
70154-	int ret;
70155-
70156-	if (!(config_prof && opt_prof)) {
70157-		ret = ENOENT;
70158-		goto label_return;
70159-	}
70160-
70161-	assert(sizeof(write_cb_packet_t) == sizeof(void *) * 2);
70162-
70163-	WRITEONLY();
70164-	write_cb_packet_t write_cb_packet;
70165-	ASSURED_WRITE(write_cb_packet, write_cb_packet_t);
70166-
70167-	prof_recent_alloc_dump(tsd, write_cb_packet.write_cb,
70168-	    write_cb_packet.cbopaque);
70169-
70170-	ret = 0;
70171-
70172-label_return:
70173-	return ret;
70174-}
70175-
70176-typedef struct batch_alloc_packet_s batch_alloc_packet_t;
70177-struct batch_alloc_packet_s {
70178-	void **ptrs;
70179-	size_t num;
70180-	size_t size;
70181-	int flags;
70182-};
70183-
70184-static int
70185-experimental_batch_alloc_ctl(tsd_t *tsd, const size_t *mib,
70186-    size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
70187-	int ret;
70188-
70189-	VERIFY_READ(size_t);
70190-
70191-	batch_alloc_packet_t batch_alloc_packet;
70192-	ASSURED_WRITE(batch_alloc_packet, batch_alloc_packet_t);
70193-	size_t filled = batch_alloc(batch_alloc_packet.ptrs,
70194-	    batch_alloc_packet.num, batch_alloc_packet.size,
70195-	    batch_alloc_packet.flags);
70196-	READ(filled, size_t);
70197-
70198-	ret = 0;
70199-
70200-label_return:
70201-	return ret;
70202-}
70203-
70204-static int
70205-prof_stats_bins_i_live_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
70206-    void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
70207-	int ret;
70208-	unsigned binind;
70209-	prof_stats_t stats;
70210-
70211-	if (!(config_prof && opt_prof && opt_prof_stats)) {
70212-		ret = ENOENT;
70213-		goto label_return;
70214-	}
70215-
70216-	READONLY();
70217-	MIB_UNSIGNED(binind, 3);
70218-	if (binind >= SC_NBINS) {
70219-		ret = EINVAL;
70220-		goto label_return;
70221-	}
70222-	prof_stats_get_live(tsd, (szind_t)binind, &stats);
70223-	READ(stats, prof_stats_t);
70224-
70225-	ret = 0;
70226-label_return:
70227-	return ret;
70228-}
70229-
70230-static int
70231-prof_stats_bins_i_accum_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
70232-    void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
70233-	int ret;
70234-	unsigned binind;
70235-	prof_stats_t stats;
70236-
70237-	if (!(config_prof && opt_prof && opt_prof_stats)) {
70238-		ret = ENOENT;
70239-		goto label_return;
70240-	}
70241-
70242-	READONLY();
70243-	MIB_UNSIGNED(binind, 3);
70244-	if (binind >= SC_NBINS) {
70245-		ret = EINVAL;
70246-		goto label_return;
70247-	}
70248-	prof_stats_get_accum(tsd, (szind_t)binind, &stats);
70249-	READ(stats, prof_stats_t);
70250-
70251-	ret = 0;
70252-label_return:
70253-	return ret;
70254-}
70255-
70256-static const ctl_named_node_t *
70257-prof_stats_bins_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen,
70258-    size_t i) {
70259-	if (!(config_prof && opt_prof && opt_prof_stats)) {
70260-		return NULL;
70261-	}
70262-	if (i >= SC_NBINS) {
70263-		return NULL;
70264-	}
70265-	return super_prof_stats_bins_i_node;
70266-}
70267-
70268-static int
70269-prof_stats_lextents_i_live_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
70270-    void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
70271-	int ret;
70272-	unsigned lextent_ind;
70273-	prof_stats_t stats;
70274-
70275-	if (!(config_prof && opt_prof && opt_prof_stats)) {
70276-		ret = ENOENT;
70277-		goto label_return;
70278-	}
70279-
70280-	READONLY();
70281-	MIB_UNSIGNED(lextent_ind, 3);
70282-	if (lextent_ind >= SC_NSIZES - SC_NBINS) {
70283-		ret = EINVAL;
70284-		goto label_return;
70285-	}
70286-	prof_stats_get_live(tsd, (szind_t)(lextent_ind + SC_NBINS), &stats);
70287-	READ(stats, prof_stats_t);
70288-
70289-	ret = 0;
70290-label_return:
70291-	return ret;
70292-}
70293-
70294-static int
70295-prof_stats_lextents_i_accum_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
70296-    void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
70297-	int ret;
70298-	unsigned lextent_ind;
70299-	prof_stats_t stats;
70300-
70301-	if (!(config_prof && opt_prof && opt_prof_stats)) {
70302-		ret = ENOENT;
70303-		goto label_return;
70304-	}
70305-
70306-	READONLY();
70307-	MIB_UNSIGNED(lextent_ind, 3);
70308-	if (lextent_ind >= SC_NSIZES - SC_NBINS) {
70309-		ret = EINVAL;
70310-		goto label_return;
70311-	}
70312-	prof_stats_get_accum(tsd, (szind_t)(lextent_ind + SC_NBINS), &stats);
70313-	READ(stats, prof_stats_t);
70314-
70315-	ret = 0;
70316-label_return:
70317-	return ret;
70318-}
70319-
70320-static const ctl_named_node_t *
70321-prof_stats_lextents_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen,
70322-    size_t i) {
70323-	if (!(config_prof && opt_prof && opt_prof_stats)) {
70324-		return NULL;
70325-	}
70326-	if (i >= SC_NSIZES - SC_NBINS) {
70327-		return NULL;
70328-	}
70329-	return super_prof_stats_lextents_i_node;
70330-}
70331diff --git a/jemalloc/src/decay.c b/jemalloc/src/decay.c
70332deleted file mode 100644
70333index d801b2b..0000000
70334--- a/jemalloc/src/decay.c
70335+++ /dev/null
70336@@ -1,295 +0,0 @@
70337-#include "jemalloc/internal/jemalloc_preamble.h"
70338-#include "jemalloc/internal/jemalloc_internal_includes.h"
70339-
70340-#include "jemalloc/internal/decay.h"
70341-
70342-static const uint64_t h_steps[SMOOTHSTEP_NSTEPS] = {
70343-#define STEP(step, h, x, y)			\
70344-		h,
70345-		SMOOTHSTEP
70346-#undef STEP
70347-};
70348-
70349-/*
70350- * Generate a new deadline that is uniformly random within the next epoch after
70351- * the current one.
70352- */
70353-void
70354-decay_deadline_init(decay_t *decay) {
70355-	nstime_copy(&decay->deadline, &decay->epoch);
70356-	nstime_add(&decay->deadline, &decay->interval);
70357-	if (decay_ms_read(decay) > 0) {
70358-		nstime_t jitter;
70359-
70360-		nstime_init(&jitter, prng_range_u64(&decay->jitter_state,
70361-		    nstime_ns(&decay->interval)));
70362-		nstime_add(&decay->deadline, &jitter);
70363-	}
70364-}
70365-
70366-void
70367-decay_reinit(decay_t *decay, nstime_t *cur_time, ssize_t decay_ms) {
70368-	atomic_store_zd(&decay->time_ms, decay_ms, ATOMIC_RELAXED);
70369-	if (decay_ms > 0) {
70370-		nstime_init(&decay->interval, (uint64_t)decay_ms *
70371-		    KQU(1000000));
70372-		nstime_idivide(&decay->interval, SMOOTHSTEP_NSTEPS);
70373-	}
70374-
70375-	nstime_copy(&decay->epoch, cur_time);
70376-	decay->jitter_state = (uint64_t)(uintptr_t)decay;
70377-	decay_deadline_init(decay);
70378-	decay->nunpurged = 0;
70379-	memset(decay->backlog, 0, SMOOTHSTEP_NSTEPS * sizeof(size_t));
70380-}
70381-
70382-bool
70383-decay_init(decay_t *decay, nstime_t *cur_time, ssize_t decay_ms) {
70384-	if (config_debug) {
70385-		for (size_t i = 0; i < sizeof(decay_t); i++) {
70386-			assert(((char *)decay)[i] == 0);
70387-		}
70388-		decay->ceil_npages = 0;
70389-	}
70390-	if (malloc_mutex_init(&decay->mtx, "decay", WITNESS_RANK_DECAY,
70391-	    malloc_mutex_rank_exclusive)) {
70392-		return true;
70393-	}
70394-	decay->purging = false;
70395-	decay_reinit(decay, cur_time, decay_ms);
70396-	return false;
70397-}
70398-
70399-bool
70400-decay_ms_valid(ssize_t decay_ms) {
70401-	if (decay_ms < -1) {
70402-		return false;
70403-	}
70404-	if (decay_ms == -1 || (uint64_t)decay_ms <= NSTIME_SEC_MAX *
70405-	    KQU(1000)) {
70406-		return true;
70407-	}
70408-	return false;
70409-}
70410-
70411-static void
70412-decay_maybe_update_time(decay_t *decay, nstime_t *new_time) {
70413-	if (unlikely(!nstime_monotonic() && nstime_compare(&decay->epoch,
70414-	    new_time) > 0)) {
70415-		/*
70416-		 * Time went backwards.  Move the epoch back in time and
70417-		 * generate a new deadline, with the expectation that time
70418-		 * typically flows forward for long enough periods of time that
70419-		 * epochs complete.  Unfortunately, this strategy is susceptible
70420-		 * to clock jitter triggering premature epoch advances, but
70421-		 * clock jitter estimation and compensation isn't feasible here
70422-		 * because calls into this code are event-driven.
70423-		 */
70424-		nstime_copy(&decay->epoch, new_time);
70425-		decay_deadline_init(decay);
70426-	} else {
70427-		/* Verify that time does not go backwards. */
70428-		assert(nstime_compare(&decay->epoch, new_time) <= 0);
70429-	}
70430-}
70431-
70432-static size_t
70433-decay_backlog_npages_limit(const decay_t *decay) {
70434-	/*
70435-	 * For each element of decay_backlog, multiply by the corresponding
70436-	 * fixed-point smoothstep decay factor.  Sum the products, then divide
70437-	 * to round down to the nearest whole number of pages.
70438-	 */
70439-	uint64_t sum = 0;
70440-	for (unsigned i = 0; i < SMOOTHSTEP_NSTEPS; i++) {
70441-		sum += decay->backlog[i] * h_steps[i];
70442-	}
70443-	size_t npages_limit_backlog = (size_t)(sum >> SMOOTHSTEP_BFP);
70444-
70445-	return npages_limit_backlog;
70446-}
70447-
70448-/*
70449- * Update backlog, assuming that 'nadvance_u64' time intervals have passed.
70450- * Trailing 'nadvance_u64' records should be erased and 'current_npages' is
70451- * placed as the newest record.
70452- */
70453-static void
70454-decay_backlog_update(decay_t *decay, uint64_t nadvance_u64,
70455-    size_t current_npages) {
70456-	if (nadvance_u64 >= SMOOTHSTEP_NSTEPS) {
70457-		memset(decay->backlog, 0, (SMOOTHSTEP_NSTEPS-1) *
70458-		    sizeof(size_t));
70459-	} else {
70460-		size_t nadvance_z = (size_t)nadvance_u64;
70461-
70462-		assert((uint64_t)nadvance_z == nadvance_u64);
70463-
70464-		memmove(decay->backlog, &decay->backlog[nadvance_z],
70465-		    (SMOOTHSTEP_NSTEPS - nadvance_z) * sizeof(size_t));
70466-		if (nadvance_z > 1) {
70467-			memset(&decay->backlog[SMOOTHSTEP_NSTEPS -
70468-			    nadvance_z], 0, (nadvance_z-1) * sizeof(size_t));
70469-		}
70470-	}
70471-
70472-	size_t npages_delta = (current_npages > decay->nunpurged) ?
70473-	    current_npages - decay->nunpurged : 0;
70474-	decay->backlog[SMOOTHSTEP_NSTEPS-1] = npages_delta;
70475-
70476-	if (config_debug) {
70477-		if (current_npages > decay->ceil_npages) {
70478-			decay->ceil_npages = current_npages;
70479-		}
70480-		size_t npages_limit = decay_backlog_npages_limit(decay);
70481-		assert(decay->ceil_npages >= npages_limit);
70482-		if (decay->ceil_npages > npages_limit) {
70483-			decay->ceil_npages = npages_limit;
70484-		}
70485-	}
70486-}
70487-
70488-static inline bool
70489-decay_deadline_reached(const decay_t *decay, const nstime_t *time) {
70490-	return (nstime_compare(&decay->deadline, time) <= 0);
70491-}
70492-
70493-uint64_t
70494-decay_npages_purge_in(decay_t *decay, nstime_t *time, size_t npages_new) {
70495-	uint64_t decay_interval_ns = decay_epoch_duration_ns(decay);
70496-	size_t n_epoch = (size_t)(nstime_ns(time) / decay_interval_ns);
70497-
70498-	uint64_t npages_purge;
70499-	if (n_epoch >= SMOOTHSTEP_NSTEPS) {
70500-		npages_purge = npages_new;
70501-	} else {
70502-		uint64_t h_steps_max = h_steps[SMOOTHSTEP_NSTEPS - 1];
70503-		assert(h_steps_max >=
70504-		    h_steps[SMOOTHSTEP_NSTEPS - 1 - n_epoch]);
70505-		npages_purge = npages_new * (h_steps_max -
70506-		    h_steps[SMOOTHSTEP_NSTEPS - 1 - n_epoch]);
70507-		npages_purge >>= SMOOTHSTEP_BFP;
70508-	}
70509-	return npages_purge;
70510-}
70511-
70512-bool
70513-decay_maybe_advance_epoch(decay_t *decay, nstime_t *new_time,
70514-    size_t npages_current) {
70515-	/* Handle possible non-monotonicity of time. */
70516-	decay_maybe_update_time(decay, new_time);
70517-
70518-	if (!decay_deadline_reached(decay, new_time)) {
70519-		return false;
70520-	}
70521-	nstime_t delta;
70522-	nstime_copy(&delta, new_time);
70523-	nstime_subtract(&delta, &decay->epoch);
70524-
70525-	uint64_t nadvance_u64 = nstime_divide(&delta, &decay->interval);
70526-	assert(nadvance_u64 > 0);
70527-
70528-	/* Add nadvance_u64 decay intervals to epoch. */
70529-	nstime_copy(&delta, &decay->interval);
70530-	nstime_imultiply(&delta, nadvance_u64);
70531-	nstime_add(&decay->epoch, &delta);
70532-
70533-	/* Set a new deadline. */
70534-	decay_deadline_init(decay);
70535-
70536-	/* Update the backlog. */
70537-	decay_backlog_update(decay, nadvance_u64, npages_current);
70538-
70539-	decay->npages_limit = decay_backlog_npages_limit(decay);
70540-	decay->nunpurged = (decay->npages_limit > npages_current) ?
70541-	    decay->npages_limit : npages_current;
70542-
70543-	return true;
70544-}
70545-
70546-/*
70547- * Calculate how many pages should be purged after 'interval'.
70548- *
70549- * First, calculate how many pages should remain at the moment, then subtract
70550- * the number of pages that should remain after 'interval'. The difference is
70551- * how many pages should be purged until then.
70552- *
70553- * The number of pages that should remain at a specific moment is calculated
70554- * like this: pages(now) = sum(backlog[i] * h_steps[i]). After 'interval'
70555- * passes, backlog would shift 'interval' positions to the left and sigmoid
70556- * curve would be applied starting with backlog[interval].
70557- *
70558- * The implementation doesn't directly map to the description, but it's
70559- * essentially the same calculation, optimized to avoid iterating over
70560- * [interval..SMOOTHSTEP_NSTEPS) twice.
70561- */
70562-static inline size_t
70563-decay_npurge_after_interval(decay_t *decay, size_t interval) {
70564-	size_t i;
70565-	uint64_t sum = 0;
70566-	for (i = 0; i < interval; i++) {
70567-		sum += decay->backlog[i] * h_steps[i];
70568-	}
70569-	for (; i < SMOOTHSTEP_NSTEPS; i++) {
70570-		sum += decay->backlog[i] *
70571-		    (h_steps[i] - h_steps[i - interval]);
70572-	}
70573-
70574-	return (size_t)(sum >> SMOOTHSTEP_BFP);
70575-}
70576-
70577-uint64_t decay_ns_until_purge(decay_t *decay, size_t npages_current,
70578-    uint64_t npages_threshold) {
70579-	if (!decay_gradually(decay)) {
70580-		return DECAY_UNBOUNDED_TIME_TO_PURGE;
70581-	}
70582-	uint64_t decay_interval_ns = decay_epoch_duration_ns(decay);
70583-	assert(decay_interval_ns > 0);
70584-	if (npages_current == 0) {
70585-		unsigned i;
70586-		for (i = 0; i < SMOOTHSTEP_NSTEPS; i++) {
70587-			if (decay->backlog[i] > 0) {
70588-				break;
70589-			}
70590-		}
70591-		if (i == SMOOTHSTEP_NSTEPS) {
70592-			/* No dirty pages recorded.  Sleep indefinitely. */
70593-			return DECAY_UNBOUNDED_TIME_TO_PURGE;
70594-		}
70595-	}
70596-	if (npages_current <= npages_threshold) {
70597-		/* Use max interval. */
70598-		return decay_interval_ns * SMOOTHSTEP_NSTEPS;
70599-	}
70600-
70601-	/* Minimal 2 intervals to ensure reaching next epoch deadline. */
70602-	size_t lb = 2;
70603-	size_t ub = SMOOTHSTEP_NSTEPS;
70604-
70605-	size_t npurge_lb, npurge_ub;
70606-	npurge_lb = decay_npurge_after_interval(decay, lb);
70607-	if (npurge_lb > npages_threshold) {
70608-		return decay_interval_ns * lb;
70609-	}
70610-	npurge_ub = decay_npurge_after_interval(decay, ub);
70611-	if (npurge_ub < npages_threshold) {
70612-		return decay_interval_ns * ub;
70613-	}
70614-
70615-	unsigned n_search = 0;
70616-	size_t target, npurge;
70617-	while ((npurge_lb + npages_threshold < npurge_ub) && (lb + 2 < ub)) {
70618-		target = (lb + ub) / 2;
70619-		npurge = decay_npurge_after_interval(decay, target);
70620-		if (npurge > npages_threshold) {
70621-			ub = target;
70622-			npurge_ub = npurge;
70623-		} else {
70624-			lb = target;
70625-			npurge_lb = npurge;
70626-		}
70627-		assert(n_search < lg_floor(SMOOTHSTEP_NSTEPS) + 1);
70628-		++n_search;
70629-	}
70630-	return decay_interval_ns * (ub + lb) / 2;
70631-}
70632diff --git a/jemalloc/src/div.c b/jemalloc/src/div.c
70633deleted file mode 100644
70634index 808892a..0000000
70635--- a/jemalloc/src/div.c
70636+++ /dev/null
70637@@ -1,55 +0,0 @@
70638-#include "jemalloc/internal/jemalloc_preamble.h"
70639-
70640-#include "jemalloc/internal/div.h"
70641-
70642-#include "jemalloc/internal/assert.h"
70643-
70644-/*
70645- * Suppose we have n = q * d, all integers. We know n and d, and want q = n / d.
70646- *
70647- * For any k, we have (here, all division is exact; not C-style rounding):
70648- * floor(ceil(2^k / d) * n / 2^k) = floor((2^k + r) / d * n / 2^k), where
70649- * r = (-2^k) mod d.
70650- *
70651- * Expanding this out:
70652- * ... = floor(2^k / d * n / 2^k + r / d * n / 2^k)
70653- *     = floor(n / d + (r / d) * (n / 2^k)).
70654- *
70655- * The fractional part of n / d is 0 (because of the assumption that d divides n
70656- * exactly), so we have:
70657- * ... = n / d + floor((r / d) * (n / 2^k))
70658- *
70659- * So that our initial expression is equal to the quantity we seek, so long as
70660- * (r / d) * (n / 2^k) < 1.
70661- *
70662- * r is a remainder mod d, so r < d and r / d < 1 always. We can make
70663- * n / 2 ^ k < 1 by setting k = 32. This gets us a value of magic that works.
70664- */
70665-
70666-void
70667-div_init(div_info_t *div_info, size_t d) {
70668-	/* Nonsensical. */
70669-	assert(d != 0);
70670-	/*
70671-	 * This would make the value of magic too high to fit into a uint32_t
70672-	 * (we would want magic = 2^32 exactly). This would mess with code gen
70673-	 * on 32-bit machines.
70674-	 */
70675-	assert(d != 1);
70676-
70677-	uint64_t two_to_k = ((uint64_t)1 << 32);
70678-	uint32_t magic = (uint32_t)(two_to_k / d);
70679-
70680-	/*
70681-	 * We want magic = ceil(2^k / d), but C gives us floor. We have to
70682-	 * increment it unless the result was exact (i.e. unless d is a power of
70683-	 * two).
70684-	 */
70685-	if (two_to_k % d != 0) {
70686-		magic++;
70687-	}
70688-	div_info->magic = magic;
70689-#ifdef JEMALLOC_DEBUG
70690-	div_info->d = d;
70691-#endif
70692-}
70693diff --git a/jemalloc/src/ecache.c b/jemalloc/src/ecache.c
70694deleted file mode 100644
70695index a242227..0000000
70696--- a/jemalloc/src/ecache.c
70697+++ /dev/null
70698@@ -1,35 +0,0 @@
70699-#include "jemalloc/internal/jemalloc_preamble.h"
70700-#include "jemalloc/internal/jemalloc_internal_includes.h"
70701-
70702-#include "jemalloc/internal/san.h"
70703-
70704-bool
70705-ecache_init(tsdn_t *tsdn, ecache_t *ecache, extent_state_t state, unsigned ind,
70706-    bool delay_coalesce) {
70707-	if (malloc_mutex_init(&ecache->mtx, "extents", WITNESS_RANK_EXTENTS,
70708-	    malloc_mutex_rank_exclusive)) {
70709-		return true;
70710-	}
70711-	ecache->state = state;
70712-	ecache->ind = ind;
70713-	ecache->delay_coalesce = delay_coalesce;
70714-	eset_init(&ecache->eset, state);
70715-	eset_init(&ecache->guarded_eset, state);
70716-
70717-	return false;
70718-}
70719-
70720-void
70721-ecache_prefork(tsdn_t *tsdn, ecache_t *ecache) {
70722-	malloc_mutex_prefork(tsdn, &ecache->mtx);
70723-}
70724-
70725-void
70726-ecache_postfork_parent(tsdn_t *tsdn, ecache_t *ecache) {
70727-	malloc_mutex_postfork_parent(tsdn, &ecache->mtx);
70728-}
70729-
70730-void
70731-ecache_postfork_child(tsdn_t *tsdn, ecache_t *ecache) {
70732-	malloc_mutex_postfork_child(tsdn, &ecache->mtx);
70733-}
70734diff --git a/jemalloc/src/edata.c b/jemalloc/src/edata.c
70735deleted file mode 100644
70736index 82b6f56..0000000
70737--- a/jemalloc/src/edata.c
70738+++ /dev/null
70739@@ -1,6 +0,0 @@
70740-#include "jemalloc/internal/jemalloc_preamble.h"
70741-#include "jemalloc/internal/jemalloc_internal_includes.h"
70742-
70743-ph_gen(, edata_avail, edata_t, avail_link,
70744-    edata_esnead_comp)
70745-ph_gen(, edata_heap, edata_t, heap_link, edata_snad_comp)
70746diff --git a/jemalloc/src/edata_cache.c b/jemalloc/src/edata_cache.c
70747deleted file mode 100644
70748index 6bc1848..0000000
70749--- a/jemalloc/src/edata_cache.c
70750+++ /dev/null
70751@@ -1,154 +0,0 @@
70752-#include "jemalloc/internal/jemalloc_preamble.h"
70753-#include "jemalloc/internal/jemalloc_internal_includes.h"
70754-
70755-bool
70756-edata_cache_init(edata_cache_t *edata_cache, base_t *base) {
70757-	edata_avail_new(&edata_cache->avail);
70758-	/*
70759-	 * This is not strictly necessary, since the edata_cache_t is only
70760-	 * created inside an arena, which is zeroed on creation.  But this is
70761-	 * handy as a safety measure.
70762-	 */
70763-	atomic_store_zu(&edata_cache->count, 0, ATOMIC_RELAXED);
70764-	if (malloc_mutex_init(&edata_cache->mtx, "edata_cache",
70765-	    WITNESS_RANK_EDATA_CACHE, malloc_mutex_rank_exclusive)) {
70766-		return true;
70767-	}
70768-	edata_cache->base = base;
70769-	return false;
70770-}
70771-
70772-edata_t *
70773-edata_cache_get(tsdn_t *tsdn, edata_cache_t *edata_cache) {
70774-	malloc_mutex_lock(tsdn, &edata_cache->mtx);
70775-	edata_t *edata = edata_avail_first(&edata_cache->avail);
70776-	if (edata == NULL) {
70777-		malloc_mutex_unlock(tsdn, &edata_cache->mtx);
70778-		return base_alloc_edata(tsdn, edata_cache->base);
70779-	}
70780-	edata_avail_remove(&edata_cache->avail, edata);
70781-	atomic_load_sub_store_zu(&edata_cache->count, 1);
70782-	malloc_mutex_unlock(tsdn, &edata_cache->mtx);
70783-	return edata;
70784-}
70785-
70786-void
70787-edata_cache_put(tsdn_t *tsdn, edata_cache_t *edata_cache, edata_t *edata) {
70788-	malloc_mutex_lock(tsdn, &edata_cache->mtx);
70789-	edata_avail_insert(&edata_cache->avail, edata);
70790-	atomic_load_add_store_zu(&edata_cache->count, 1);
70791-	malloc_mutex_unlock(tsdn, &edata_cache->mtx);
70792-}
70793-
70794-void
70795-edata_cache_prefork(tsdn_t *tsdn, edata_cache_t *edata_cache) {
70796-	malloc_mutex_prefork(tsdn, &edata_cache->mtx);
70797-}
70798-
70799-void
70800-edata_cache_postfork_parent(tsdn_t *tsdn, edata_cache_t *edata_cache) {
70801-	malloc_mutex_postfork_parent(tsdn, &edata_cache->mtx);
70802-}
70803-
70804-void
70805-edata_cache_postfork_child(tsdn_t *tsdn, edata_cache_t *edata_cache) {
70806-	malloc_mutex_postfork_child(tsdn, &edata_cache->mtx);
70807-}
70808-
70809-void
70810-edata_cache_fast_init(edata_cache_fast_t *ecs, edata_cache_t *fallback) {
70811-	edata_list_inactive_init(&ecs->list);
70812-	ecs->fallback = fallback;
70813-	ecs->disabled = false;
70814-}
70815-
70816-static void
70817-edata_cache_fast_try_fill_from_fallback(tsdn_t *tsdn,
70818-    edata_cache_fast_t *ecs) {
70819-	edata_t *edata;
70820-	malloc_mutex_lock(tsdn, &ecs->fallback->mtx);
70821-	for (int i = 0; i < EDATA_CACHE_FAST_FILL; i++) {
70822-		edata = edata_avail_remove_first(&ecs->fallback->avail);
70823-		if (edata == NULL) {
70824-			break;
70825-		}
70826-		edata_list_inactive_append(&ecs->list, edata);
70827-		atomic_load_sub_store_zu(&ecs->fallback->count, 1);
70828-	}
70829-	malloc_mutex_unlock(tsdn, &ecs->fallback->mtx);
70830-}
70831-
70832-edata_t *
70833-edata_cache_fast_get(tsdn_t *tsdn, edata_cache_fast_t *ecs) {
70834-	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
70835-	    WITNESS_RANK_EDATA_CACHE, 0);
70836-
70837-	if (ecs->disabled) {
70838-		assert(edata_list_inactive_first(&ecs->list) == NULL);
70839-		return edata_cache_get(tsdn, ecs->fallback);
70840-	}
70841-
70842-	edata_t *edata = edata_list_inactive_first(&ecs->list);
70843-	if (edata != NULL) {
70844-		edata_list_inactive_remove(&ecs->list, edata);
70845-		return edata;
70846-	}
70847-	/* Slow path; requires synchronization. */
70848-	edata_cache_fast_try_fill_from_fallback(tsdn, ecs);
70849-	edata = edata_list_inactive_first(&ecs->list);
70850-	if (edata != NULL) {
70851-		edata_list_inactive_remove(&ecs->list, edata);
70852-	} else {
70853-		/*
70854-		 * Slowest path (fallback was also empty); allocate something
70855-		 * new.
70856-		 */
70857-		edata = base_alloc_edata(tsdn, ecs->fallback->base);
70858-	}
70859-	return edata;
70860-}
70861-
70862-static void
70863-edata_cache_fast_flush_all(tsdn_t *tsdn, edata_cache_fast_t *ecs) {
70864-	/*
70865-	 * You could imagine smarter cache management policies (like
70866-	 * only flushing down to some threshold in anticipation of
70867-	 * future get requests).  But just flushing everything provides
70868-	 * a good opportunity to defrag too, and lets us share code between the
70869-	 * flush and disable pathways.
70870-	 */
70871-	edata_t *edata;
70872-	size_t nflushed = 0;
70873-	malloc_mutex_lock(tsdn, &ecs->fallback->mtx);
70874-	while ((edata = edata_list_inactive_first(&ecs->list)) != NULL) {
70875-		edata_list_inactive_remove(&ecs->list, edata);
70876-		edata_avail_insert(&ecs->fallback->avail, edata);
70877-		nflushed++;
70878-	}
70879-	atomic_load_add_store_zu(&ecs->fallback->count, nflushed);
70880-	malloc_mutex_unlock(tsdn, &ecs->fallback->mtx);
70881-}
70882-
70883-void
70884-edata_cache_fast_put(tsdn_t *tsdn, edata_cache_fast_t *ecs, edata_t *edata) {
70885-	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
70886-	    WITNESS_RANK_EDATA_CACHE, 0);
70887-
70888-	if (ecs->disabled) {
70889-		assert(edata_list_inactive_first(&ecs->list) == NULL);
70890-		edata_cache_put(tsdn, ecs->fallback, edata);
70891-		return;
70892-	}
70893-
70894-	/*
70895-	 * Prepend rather than append, to do LIFO ordering in the hopes of some
70896-	 * cache locality.
70897-	 */
70898-	edata_list_inactive_prepend(&ecs->list, edata);
70899-}
70900-
70901-void
70902-edata_cache_fast_disable(tsdn_t *tsdn, edata_cache_fast_t *ecs) {
70903-	edata_cache_fast_flush_all(tsdn, ecs);
70904-	ecs->disabled = true;
70905-}
70906diff --git a/jemalloc/src/ehooks.c b/jemalloc/src/ehooks.c
70907deleted file mode 100644
70908index 383e9de..0000000
70909--- a/jemalloc/src/ehooks.c
70910+++ /dev/null
70911@@ -1,275 +0,0 @@
70912-#include "jemalloc/internal/jemalloc_preamble.h"
70913-#include "jemalloc/internal/jemalloc_internal_includes.h"
70914-
70915-#include "jemalloc/internal/ehooks.h"
70916-#include "jemalloc/internal/extent_mmap.h"
70917-
70918-void
70919-ehooks_init(ehooks_t *ehooks, extent_hooks_t *extent_hooks, unsigned ind) {
70920-	/* All other hooks are optional; this one is not. */
70921-	assert(extent_hooks->alloc != NULL);
70922-	ehooks->ind = ind;
70923-	ehooks_set_extent_hooks_ptr(ehooks, extent_hooks);
70924-}
70925-
70926-/*
70927- * If the caller specifies (!*zero), it is still possible to receive zeroed
70928- * memory, in which case *zero is toggled to true.  arena_extent_alloc() takes
70929- * advantage of this to avoid demanding zeroed extents, but taking advantage of
70930- * them if they are returned.
70931- */
70932-static void *
70933-extent_alloc_core(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
70934-    size_t alignment, bool *zero, bool *commit, dss_prec_t dss_prec) {
70935-	void *ret;
70936-
70937-	assert(size != 0);
70938-	assert(alignment != 0);
70939-
70940-	/* "primary" dss. */
70941-	if (have_dss && dss_prec == dss_prec_primary && (ret =
70942-	    extent_alloc_dss(tsdn, arena, new_addr, size, alignment, zero,
70943-	    commit)) != NULL) {
70944-		return ret;
70945-	}
70946-	/* mmap. */
70947-	if ((ret = extent_alloc_mmap(new_addr, size, alignment, zero, commit))
70948-	    != NULL) {
70949-		return ret;
70950-	}
70951-	/* "secondary" dss. */
70952-	if (have_dss && dss_prec == dss_prec_secondary && (ret =
70953-	    extent_alloc_dss(tsdn, arena, new_addr, size, alignment, zero,
70954-	    commit)) != NULL) {
70955-		return ret;
70956-	}
70957-
70958-	/* All strategies for allocation failed. */
70959-	return NULL;
70960-}
70961-
70962-void *
70963-ehooks_default_alloc_impl(tsdn_t *tsdn, void *new_addr, size_t size,
70964-    size_t alignment, bool *zero, bool *commit, unsigned arena_ind) {
70965-	arena_t *arena = arena_get(tsdn, arena_ind, false);
70966-	/* NULL arena indicates arena_create. */
70967-	assert(arena != NULL || alignment == HUGEPAGE);
70968-	dss_prec_t dss = (arena == NULL) ? dss_prec_disabled :
70969-	    (dss_prec_t)atomic_load_u(&arena->dss_prec, ATOMIC_RELAXED);
70970-	void *ret = extent_alloc_core(tsdn, arena, new_addr, size, alignment,
70971-	    zero, commit, dss);
70972-	if (have_madvise_huge && ret) {
70973-		pages_set_thp_state(ret, size);
70974-	}
70975-	return ret;
70976-}
70977-
70978-static void *
70979-ehooks_default_alloc(extent_hooks_t *extent_hooks, void *new_addr, size_t size,
70980-    size_t alignment, bool *zero, bool *commit, unsigned arena_ind) {
70981-	return ehooks_default_alloc_impl(tsdn_fetch(), new_addr, size,
70982-	    ALIGNMENT_CEILING(alignment, PAGE), zero, commit, arena_ind);
70983-}
70984-
70985-bool
70986-ehooks_default_dalloc_impl(void *addr, size_t size) {
70987-	if (!have_dss || !extent_in_dss(addr)) {
70988-		return extent_dalloc_mmap(addr, size);
70989-	}
70990-	return true;
70991-}
70992-
70993-static bool
70994-ehooks_default_dalloc(extent_hooks_t *extent_hooks, void *addr, size_t size,
70995-    bool committed, unsigned arena_ind) {
70996-	return ehooks_default_dalloc_impl(addr, size);
70997-}
70998-
70999-void
71000-ehooks_default_destroy_impl(void *addr, size_t size) {
71001-	if (!have_dss || !extent_in_dss(addr)) {
71002-		pages_unmap(addr, size);
71003-	}
71004-}
71005-
71006-static void
71007-ehooks_default_destroy(extent_hooks_t *extent_hooks, void *addr, size_t size,
71008-    bool committed, unsigned arena_ind) {
71009-	ehooks_default_destroy_impl(addr, size);
71010-}
71011-
71012-bool
71013-ehooks_default_commit_impl(void *addr, size_t offset, size_t length) {
71014-	return pages_commit((void *)((uintptr_t)addr + (uintptr_t)offset),
71015-	    length);
71016-}
71017-
71018-static bool
71019-ehooks_default_commit(extent_hooks_t *extent_hooks, void *addr, size_t size,
71020-    size_t offset, size_t length, unsigned arena_ind) {
71021-	return ehooks_default_commit_impl(addr, offset, length);
71022-}
71023-
71024-bool
71025-ehooks_default_decommit_impl(void *addr, size_t offset, size_t length) {
71026-	return pages_decommit((void *)((uintptr_t)addr + (uintptr_t)offset),
71027-	    length);
71028-}
71029-
71030-static bool
71031-ehooks_default_decommit(extent_hooks_t *extent_hooks, void *addr, size_t size,
71032-    size_t offset, size_t length, unsigned arena_ind) {
71033-	return ehooks_default_decommit_impl(addr, offset, length);
71034-}
71035-
71036-#ifdef PAGES_CAN_PURGE_LAZY
71037-bool
71038-ehooks_default_purge_lazy_impl(void *addr, size_t offset, size_t length) {
71039-	return pages_purge_lazy((void *)((uintptr_t)addr + (uintptr_t)offset),
71040-	    length);
71041-}
71042-
71043-static bool
71044-ehooks_default_purge_lazy(extent_hooks_t *extent_hooks, void *addr, size_t size,
71045-    size_t offset, size_t length, unsigned arena_ind) {
71046-	assert(addr != NULL);
71047-	assert((offset & PAGE_MASK) == 0);
71048-	assert(length != 0);
71049-	assert((length & PAGE_MASK) == 0);
71050-	return ehooks_default_purge_lazy_impl(addr, offset, length);
71051-}
71052-#endif
71053-
71054-#ifdef PAGES_CAN_PURGE_FORCED
71055-bool
71056-ehooks_default_purge_forced_impl(void *addr, size_t offset, size_t length) {
71057-	return pages_purge_forced((void *)((uintptr_t)addr +
71058-	    (uintptr_t)offset), length);
71059-}
71060-
71061-static bool
71062-ehooks_default_purge_forced(extent_hooks_t *extent_hooks, void *addr,
71063-    size_t size, size_t offset, size_t length, unsigned arena_ind) {
71064-	assert(addr != NULL);
71065-	assert((offset & PAGE_MASK) == 0);
71066-	assert(length != 0);
71067-	assert((length & PAGE_MASK) == 0);
71068-	return ehooks_default_purge_forced_impl(addr, offset, length);
71069-}
71070-#endif
71071-
71072-bool
71073-ehooks_default_split_impl() {
71074-	if (!maps_coalesce) {
71075-		/*
71076-		 * Without retain, only whole regions can be purged (required by
71077-		 * MEM_RELEASE on Windows) -- therefore disallow splitting.  See
71078-		 * comments in extent_head_no_merge().
71079-		 */
71080-		return !opt_retain;
71081-	}
71082-
71083-	return false;
71084-}
71085-
71086-static bool
71087-ehooks_default_split(extent_hooks_t *extent_hooks, void *addr, size_t size,
71088-    size_t size_a, size_t size_b, bool committed, unsigned arena_ind) {
71089-	return ehooks_default_split_impl();
71090-}
71091-
71092-bool
71093-ehooks_default_merge_impl(tsdn_t *tsdn, void *addr_a, void *addr_b) {
71094-	assert(addr_a < addr_b);
71095-	/*
71096-	 * For non-DSS cases --
71097-	 * a) W/o maps_coalesce, merge is not always allowed (Windows):
71098-	 *   1) w/o retain, never merge (first branch below).
71099-	 *   2) with retain, only merge extents from the same VirtualAlloc
71100-	 *      region (in which case MEM_DECOMMIT is utilized for purging).
71101-	 *
71102-	 * b) With maps_coalesce, it's always possible to merge.
71103-	 *   1) w/o retain, always allow merge (only about dirty / muzzy).
71104-	 *   2) with retain, to preserve the SN / first-fit, merge is still
71105-	 *      disallowed if b is a head extent, i.e. no merging across
71106-	 *      different mmap regions.
71107-	 *
71108-	 * a2) and b2) are implemented in emap_try_acquire_edata_neighbor, and
71109-	 * sanity checked in the second branch below.
71110-	 */
71111-	if (!maps_coalesce && !opt_retain) {
71112-		return true;
71113-	}
71114-	if (config_debug) {
71115-		edata_t *a = emap_edata_lookup(tsdn, &arena_emap_global,
71116-		    addr_a);
71117-		bool head_a = edata_is_head_get(a);
71118-		edata_t *b = emap_edata_lookup(tsdn, &arena_emap_global,
71119-		    addr_b);
71120-		bool head_b = edata_is_head_get(b);
71121-		emap_assert_mapped(tsdn, &arena_emap_global, a);
71122-		emap_assert_mapped(tsdn, &arena_emap_global, b);
71123-		assert(extent_neighbor_head_state_mergeable(head_a, head_b,
71124-		    /* forward */ true));
71125-	}
71126-	if (have_dss && !extent_dss_mergeable(addr_a, addr_b)) {
71127-		return true;
71128-	}
71129-
71130-	return false;
71131-}
71132-
71133-bool
71134-ehooks_default_merge(extent_hooks_t *extent_hooks, void *addr_a, size_t size_a,
71135-    void *addr_b, size_t size_b, bool committed, unsigned arena_ind) {
71136-	tsdn_t *tsdn = tsdn_fetch();
71137-
71138-	return ehooks_default_merge_impl(tsdn, addr_a, addr_b);
71139-}
71140-
71141-void
71142-ehooks_default_zero_impl(void *addr, size_t size) {
71143-	/*
71144-	 * By default, we try to zero out memory using OS-provided demand-zeroed
71145-	 * pages.  If the user has specifically requested hugepages, though, we
71146-	 * don't want to purge in the middle of a hugepage (which would break it
71147-	 * up), so we act conservatively and use memset.
71148-	 */
71149-	bool needs_memset = true;
71150-	if (opt_thp != thp_mode_always) {
71151-		needs_memset = pages_purge_forced(addr, size);
71152-	}
71153-	if (needs_memset) {
71154-		memset(addr, 0, size);
71155-	}
71156-}
71157-
71158-void
71159-ehooks_default_guard_impl(void *guard1, void *guard2) {
71160-	pages_mark_guards(guard1, guard2);
71161-}
71162-
71163-void
71164-ehooks_default_unguard_impl(void *guard1, void *guard2) {
71165-	pages_unmark_guards(guard1, guard2);
71166-}
71167-
71168-const extent_hooks_t ehooks_default_extent_hooks = {
71169-	ehooks_default_alloc,
71170-	ehooks_default_dalloc,
71171-	ehooks_default_destroy,
71172-	ehooks_default_commit,
71173-	ehooks_default_decommit,
71174-#ifdef PAGES_CAN_PURGE_LAZY
71175-	ehooks_default_purge_lazy,
71176-#else
71177-	NULL,
71178-#endif
71179-#ifdef PAGES_CAN_PURGE_FORCED
71180-	ehooks_default_purge_forced,
71181-#else
71182-	NULL,
71183-#endif
71184-	ehooks_default_split,
71185-	ehooks_default_merge
71186-};
71187diff --git a/jemalloc/src/emap.c b/jemalloc/src/emap.c
71188deleted file mode 100644
71189index 9cc95a7..0000000
71190--- a/jemalloc/src/emap.c
71191+++ /dev/null
71192@@ -1,386 +0,0 @@
71193-#include "jemalloc/internal/jemalloc_preamble.h"
71194-#include "jemalloc/internal/jemalloc_internal_includes.h"
71195-
71196-#include "jemalloc/internal/emap.h"
71197-
71198-enum emap_lock_result_e {
71199-	emap_lock_result_success,
71200-	emap_lock_result_failure,
71201-	emap_lock_result_no_extent
71202-};
71203-typedef enum emap_lock_result_e emap_lock_result_t;
71204-
71205-bool
71206-emap_init(emap_t *emap, base_t *base, bool zeroed) {
71207-	return rtree_new(&emap->rtree, base, zeroed);
71208-}
71209-
71210-void
71211-emap_update_edata_state(tsdn_t *tsdn, emap_t *emap, edata_t *edata,
71212-    extent_state_t state) {
71213-	witness_assert_positive_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
71214-	    WITNESS_RANK_CORE);
71215-
71216-	edata_state_set(edata, state);
71217-
71218-	EMAP_DECLARE_RTREE_CTX;
71219-	rtree_leaf_elm_t *elm1 = rtree_leaf_elm_lookup(tsdn, &emap->rtree,
71220-	    rtree_ctx, (uintptr_t)edata_base_get(edata), /* dependent */ true,
71221-	    /* init_missing */ false);
71222-	assert(elm1 != NULL);
71223-	rtree_leaf_elm_t *elm2 = edata_size_get(edata) == PAGE ? NULL :
71224-	    rtree_leaf_elm_lookup(tsdn, &emap->rtree, rtree_ctx,
71225-	    (uintptr_t)edata_last_get(edata), /* dependent */ true,
71226-	    /* init_missing */ false);
71227-
71228-	rtree_leaf_elm_state_update(tsdn, &emap->rtree, elm1, elm2, state);
71229-
71230-	emap_assert_mapped(tsdn, emap, edata);
71231-}
71232-
71233-static inline edata_t *
71234-emap_try_acquire_edata_neighbor_impl(tsdn_t *tsdn, emap_t *emap, edata_t *edata,
71235-    extent_pai_t pai, extent_state_t expected_state, bool forward,
71236-    bool expanding) {
71237-	witness_assert_positive_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
71238-	    WITNESS_RANK_CORE);
71239-	assert(!edata_guarded_get(edata));
71240-	assert(!expanding || forward);
71241-	assert(!edata_state_in_transition(expected_state));
71242-	assert(expected_state == extent_state_dirty ||
71243-	       expected_state == extent_state_muzzy ||
71244-	       expected_state == extent_state_retained);
71245-
71246-	void *neighbor_addr = forward ? edata_past_get(edata) :
71247-	    edata_before_get(edata);
71248-	/*
71249-	 * This is subtle; the rtree code asserts that its input pointer is
71250-	 * non-NULL, and this is a useful thing to check.  But it's possible
71251-	 * that edata corresponds to an address of (void *)PAGE (in practice,
71252-	 * this has only been observed on FreeBSD when address-space
71253-	 * randomization is on, but it could in principle happen anywhere).  In
71254-	 * this case, edata_before_get(edata) is NULL, triggering the assert.
71255-	 */
71256-	if (neighbor_addr == NULL) {
71257-		return NULL;
71258-	}
71259-
71260-	EMAP_DECLARE_RTREE_CTX;
71261-	rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, &emap->rtree,
71262-	    rtree_ctx, (uintptr_t)neighbor_addr, /* dependent*/ false,
71263-	    /* init_missing */ false);
71264-	if (elm == NULL) {
71265-		return NULL;
71266-	}
71267-
71268-	rtree_contents_t neighbor_contents = rtree_leaf_elm_read(tsdn,
71269-	    &emap->rtree, elm, /* dependent */ true);
71270-	if (!extent_can_acquire_neighbor(edata, neighbor_contents, pai,
71271-	    expected_state, forward, expanding)) {
71272-		return NULL;
71273-	}
71274-
71275-	/* From this point, the neighbor edata can be safely acquired. */
71276-	edata_t *neighbor = neighbor_contents.edata;
71277-	assert(edata_state_get(neighbor) == expected_state);
71278-	emap_update_edata_state(tsdn, emap, neighbor, extent_state_merging);
71279-	if (expanding) {
71280-		extent_assert_can_expand(edata, neighbor);
71281-	} else {
71282-		extent_assert_can_coalesce(edata, neighbor);
71283-	}
71284-
71285-	return neighbor;
71286-}
71287-
71288-edata_t *
71289-emap_try_acquire_edata_neighbor(tsdn_t *tsdn, emap_t *emap, edata_t *edata,
71290-    extent_pai_t pai, extent_state_t expected_state, bool forward) {
71291-	return emap_try_acquire_edata_neighbor_impl(tsdn, emap, edata, pai,
71292-	    expected_state, forward, /* expand */ false);
71293-}
71294-
71295-edata_t *
71296-emap_try_acquire_edata_neighbor_expand(tsdn_t *tsdn, emap_t *emap,
71297-    edata_t *edata, extent_pai_t pai, extent_state_t expected_state) {
71298-	/* Try expanding forward. */
71299-	return emap_try_acquire_edata_neighbor_impl(tsdn, emap, edata, pai,
71300-	    expected_state, /* forward */ true, /* expand */ true);
71301-}
71302-
71303-void
71304-emap_release_edata(tsdn_t *tsdn, emap_t *emap, edata_t *edata,
71305-    extent_state_t new_state) {
71306-	assert(emap_edata_in_transition(tsdn, emap, edata));
71307-	assert(emap_edata_is_acquired(tsdn, emap, edata));
71308-
71309-	emap_update_edata_state(tsdn, emap, edata, new_state);
71310-}
71311-
71312-static bool
71313-emap_rtree_leaf_elms_lookup(tsdn_t *tsdn, emap_t *emap, rtree_ctx_t *rtree_ctx,
71314-    const edata_t *edata, bool dependent, bool init_missing,
71315-    rtree_leaf_elm_t **r_elm_a, rtree_leaf_elm_t **r_elm_b) {
71316-	*r_elm_a = rtree_leaf_elm_lookup(tsdn, &emap->rtree, rtree_ctx,
71317-	    (uintptr_t)edata_base_get(edata), dependent, init_missing);
71318-	if (!dependent && *r_elm_a == NULL) {
71319-		return true;
71320-	}
71321-	assert(*r_elm_a != NULL);
71322-
71323-	*r_elm_b = rtree_leaf_elm_lookup(tsdn, &emap->rtree, rtree_ctx,
71324-	    (uintptr_t)edata_last_get(edata), dependent, init_missing);
71325-	if (!dependent && *r_elm_b == NULL) {
71326-		return true;
71327-	}
71328-	assert(*r_elm_b != NULL);
71329-
71330-	return false;
71331-}
71332-
71333-static void
71334-emap_rtree_write_acquired(tsdn_t *tsdn, emap_t *emap, rtree_leaf_elm_t *elm_a,
71335-    rtree_leaf_elm_t *elm_b, edata_t *edata, szind_t szind, bool slab) {
71336-	rtree_contents_t contents;
71337-	contents.edata = edata;
71338-	contents.metadata.szind = szind;
71339-	contents.metadata.slab = slab;
71340-	contents.metadata.is_head = (edata == NULL) ? false :
71341-	    edata_is_head_get(edata);
71342-	contents.metadata.state = (edata == NULL) ? 0 : edata_state_get(edata);
71343-	rtree_leaf_elm_write(tsdn, &emap->rtree, elm_a, contents);
71344-	if (elm_b != NULL) {
71345-		rtree_leaf_elm_write(tsdn, &emap->rtree, elm_b, contents);
71346-	}
71347-}
71348-
71349-bool
71350-emap_register_boundary(tsdn_t *tsdn, emap_t *emap, edata_t *edata,
71351-    szind_t szind, bool slab) {
71352-	assert(edata_state_get(edata) == extent_state_active);
71353-	EMAP_DECLARE_RTREE_CTX;
71354-
71355-	rtree_leaf_elm_t *elm_a, *elm_b;
71356-	bool err = emap_rtree_leaf_elms_lookup(tsdn, emap, rtree_ctx, edata,
71357-	    false, true, &elm_a, &elm_b);
71358-	if (err) {
71359-		return true;
71360-	}
71361-	assert(rtree_leaf_elm_read(tsdn, &emap->rtree, elm_a,
71362-	    /* dependent */ false).edata == NULL);
71363-	assert(rtree_leaf_elm_read(tsdn, &emap->rtree, elm_b,
71364-	    /* dependent */ false).edata == NULL);
71365-	emap_rtree_write_acquired(tsdn, emap, elm_a, elm_b, edata, szind, slab);
71366-	return false;
71367-}
71368-
71369-/* Invoked *after* emap_register_boundary. */
71370-void
71371-emap_register_interior(tsdn_t *tsdn, emap_t *emap, edata_t *edata,
71372-    szind_t szind) {
71373-	EMAP_DECLARE_RTREE_CTX;
71374-
71375-	assert(edata_slab_get(edata));
71376-	assert(edata_state_get(edata) == extent_state_active);
71377-
71378-	if (config_debug) {
71379-		/* Making sure the boundary is registered already. */
71380-		rtree_leaf_elm_t *elm_a, *elm_b;
71381-		bool err = emap_rtree_leaf_elms_lookup(tsdn, emap, rtree_ctx,
71382-		    edata, /* dependent */ true, /* init_missing */ false,
71383-		    &elm_a, &elm_b);
71384-		assert(!err);
71385-		rtree_contents_t contents_a, contents_b;
71386-		contents_a = rtree_leaf_elm_read(tsdn, &emap->rtree, elm_a,
71387-		    /* dependent */ true);
71388-		contents_b = rtree_leaf_elm_read(tsdn, &emap->rtree, elm_b,
71389-		    /* dependent */ true);
71390-		assert(contents_a.edata == edata && contents_b.edata == edata);
71391-		assert(contents_a.metadata.slab && contents_b.metadata.slab);
71392-	}
71393-
71394-	rtree_contents_t contents;
71395-	contents.edata = edata;
71396-	contents.metadata.szind = szind;
71397-	contents.metadata.slab = true;
71398-	contents.metadata.state = extent_state_active;
71399-	contents.metadata.is_head = false; /* Not allowed to access. */
71400-
71401-	assert(edata_size_get(edata) > (2 << LG_PAGE));
71402-	rtree_write_range(tsdn, &emap->rtree, rtree_ctx,
71403-	    (uintptr_t)edata_base_get(edata) + PAGE,
71404-	    (uintptr_t)edata_last_get(edata) - PAGE, contents);
71405-}
71406-
71407-void
71408-emap_deregister_boundary(tsdn_t *tsdn, emap_t *emap, edata_t *edata) {
71409-	/*
71410-	 * The edata must be either in an acquired state, or protected by state
71411-	 * based locks.
71412-	 */
71413-	if (!emap_edata_is_acquired(tsdn, emap, edata)) {
71414-		witness_assert_positive_depth_to_rank(
71415-		    tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE);
71416-	}
71417-
71418-	EMAP_DECLARE_RTREE_CTX;
71419-	rtree_leaf_elm_t *elm_a, *elm_b;
71420-
71421-	emap_rtree_leaf_elms_lookup(tsdn, emap, rtree_ctx, edata,
71422-	    true, false, &elm_a, &elm_b);
71423-	emap_rtree_write_acquired(tsdn, emap, elm_a, elm_b, NULL, SC_NSIZES,
71424-	    false);
71425-}
71426-
71427-void
71428-emap_deregister_interior(tsdn_t *tsdn, emap_t *emap, edata_t *edata) {
71429-	EMAP_DECLARE_RTREE_CTX;
71430-
71431-	assert(edata_slab_get(edata));
71432-	if (edata_size_get(edata) > (2 << LG_PAGE)) {
71433-		rtree_clear_range(tsdn, &emap->rtree, rtree_ctx,
71434-		    (uintptr_t)edata_base_get(edata) + PAGE,
71435-		    (uintptr_t)edata_last_get(edata) - PAGE);
71436-	}
71437-}
71438-
71439-void
71440-emap_remap(tsdn_t *tsdn, emap_t *emap, edata_t *edata, szind_t szind,
71441-    bool slab) {
71442-	EMAP_DECLARE_RTREE_CTX;
71443-
71444-	if (szind != SC_NSIZES) {
71445-		rtree_contents_t contents;
71446-		contents.edata = edata;
71447-		contents.metadata.szind = szind;
71448-		contents.metadata.slab = slab;
71449-		contents.metadata.is_head = edata_is_head_get(edata);
71450-		contents.metadata.state = edata_state_get(edata);
71451-
71452-		rtree_write(tsdn, &emap->rtree, rtree_ctx,
71453-		    (uintptr_t)edata_addr_get(edata), contents);
71454-		/*
71455-		 * Recall that this is called only for active->inactive and
71456-		 * inactive->active transitions (since only active extents have
71457-		 * meaningful values for szind and slab).  Active, non-slab
71458-		 * extents only need to handle lookups at their head (on
71459-		 * deallocation), so we don't bother filling in the end
71460-		 * boundary.
71461-		 *
71462-		 * For slab extents, we do the end-mapping change.  This still
71463-		 * leaves the interior unmodified; an emap_register_interior
71464-		 * call is coming in those cases, though.
71465-		 */
71466-		if (slab && edata_size_get(edata) > PAGE) {
71467-			uintptr_t key = (uintptr_t)edata_past_get(edata)
71468-			    - (uintptr_t)PAGE;
71469-			rtree_write(tsdn, &emap->rtree, rtree_ctx, key,
71470-			    contents);
71471-		}
71472-	}
71473-}
71474-
71475-bool
71476-emap_split_prepare(tsdn_t *tsdn, emap_t *emap, emap_prepare_t *prepare,
71477-    edata_t *edata, size_t size_a, edata_t *trail, size_t size_b) {
71478-	EMAP_DECLARE_RTREE_CTX;
71479-
71480-	/*
71481-	 * We use incorrect constants for things like arena ind, zero, ranged,
71482-	 * and commit state, and head status.  This is a fake edata_t, used to
71483-	 * facilitate a lookup.
71484-	 */
71485-	edata_t lead = {0};
71486-	edata_init(&lead, 0U, edata_addr_get(edata), size_a, false, 0, 0,
71487-	    extent_state_active, false, false, EXTENT_PAI_PAC, EXTENT_NOT_HEAD);
71488-
71489-	emap_rtree_leaf_elms_lookup(tsdn, emap, rtree_ctx, &lead, false, true,
71490-	    &prepare->lead_elm_a, &prepare->lead_elm_b);
71491-	emap_rtree_leaf_elms_lookup(tsdn, emap, rtree_ctx, trail, false, true,
71492-	    &prepare->trail_elm_a, &prepare->trail_elm_b);
71493-
71494-	if (prepare->lead_elm_a == NULL || prepare->lead_elm_b == NULL
71495-	    || prepare->trail_elm_a == NULL || prepare->trail_elm_b == NULL) {
71496-		return true;
71497-	}
71498-	return false;
71499-}
71500-
71501-void
71502-emap_split_commit(tsdn_t *tsdn, emap_t *emap, emap_prepare_t *prepare,
71503-    edata_t *lead, size_t size_a, edata_t *trail, size_t size_b) {
71504-	/*
71505-	 * We should think about not writing to the lead leaf element.  We can
71506-	 * get into situations where a racing realloc-like call can disagree
71507-	 * with a size lookup request.  I think it's fine to declare that these
71508-	 * situations are race bugs, but there's an argument to be made that for
71509-	 * things like xallocx, a size lookup call should return either the old
71510-	 * size or the new size, but not anything else.
71511-	 */
71512-	emap_rtree_write_acquired(tsdn, emap, prepare->lead_elm_a,
71513-	    prepare->lead_elm_b, lead, SC_NSIZES, /* slab */ false);
71514-	emap_rtree_write_acquired(tsdn, emap, prepare->trail_elm_a,
71515-	    prepare->trail_elm_b, trail, SC_NSIZES, /* slab */ false);
71516-}
71517-
71518-void
71519-emap_merge_prepare(tsdn_t *tsdn, emap_t *emap, emap_prepare_t *prepare,
71520-    edata_t *lead, edata_t *trail) {
71521-	EMAP_DECLARE_RTREE_CTX;
71522-	emap_rtree_leaf_elms_lookup(tsdn, emap, rtree_ctx, lead, true, false,
71523-	    &prepare->lead_elm_a, &prepare->lead_elm_b);
71524-	emap_rtree_leaf_elms_lookup(tsdn, emap, rtree_ctx, trail, true, false,
71525-	    &prepare->trail_elm_a, &prepare->trail_elm_b);
71526-}
71527-
71528-void
71529-emap_merge_commit(tsdn_t *tsdn, emap_t *emap, emap_prepare_t *prepare,
71530-    edata_t *lead, edata_t *trail) {
71531-	rtree_contents_t clear_contents;
71532-	clear_contents.edata = NULL;
71533-	clear_contents.metadata.szind = SC_NSIZES;
71534-	clear_contents.metadata.slab = false;
71535-	clear_contents.metadata.is_head = false;
71536-	clear_contents.metadata.state = (extent_state_t)0;
71537-
71538-	if (prepare->lead_elm_b != NULL) {
71539-		rtree_leaf_elm_write(tsdn, &emap->rtree,
71540-		    prepare->lead_elm_b, clear_contents);
71541-	}
71542-
71543-	rtree_leaf_elm_t *merged_b;
71544-	if (prepare->trail_elm_b != NULL) {
71545-		rtree_leaf_elm_write(tsdn, &emap->rtree,
71546-		    prepare->trail_elm_a, clear_contents);
71547-		merged_b = prepare->trail_elm_b;
71548-	} else {
71549-		merged_b = prepare->trail_elm_a;
71550-	}
71551-
71552-	emap_rtree_write_acquired(tsdn, emap, prepare->lead_elm_a, merged_b,
71553-	    lead, SC_NSIZES, false);
71554-}
71555-
71556-void
71557-emap_do_assert_mapped(tsdn_t *tsdn, emap_t *emap, edata_t *edata) {
71558-	EMAP_DECLARE_RTREE_CTX;
71559-
71560-	rtree_contents_t contents = rtree_read(tsdn, &emap->rtree, rtree_ctx,
71561-	    (uintptr_t)edata_base_get(edata));
71562-	assert(contents.edata == edata);
71563-	assert(contents.metadata.is_head == edata_is_head_get(edata));
71564-	assert(contents.metadata.state == edata_state_get(edata));
71565-}
71566-
71567-void
71568-emap_do_assert_not_mapped(tsdn_t *tsdn, emap_t *emap, edata_t *edata) {
71569-	emap_full_alloc_ctx_t context1 = {0};
71570-	emap_full_alloc_ctx_try_lookup(tsdn, emap, edata_base_get(edata),
71571-	    &context1);
71572-	assert(context1.edata == NULL);
71573-
71574-	emap_full_alloc_ctx_t context2 = {0};
71575-	emap_full_alloc_ctx_try_lookup(tsdn, emap, edata_last_get(edata),
71576-	    &context2);
71577-	assert(context2.edata == NULL);
71578-}
71579diff --git a/jemalloc/src/eset.c b/jemalloc/src/eset.c
71580deleted file mode 100644
71581index 6f8f335..0000000
71582--- a/jemalloc/src/eset.c
71583+++ /dev/null
71584@@ -1,282 +0,0 @@
71585-#include "jemalloc/internal/jemalloc_preamble.h"
71586-#include "jemalloc/internal/jemalloc_internal_includes.h"
71587-
71588-#include "jemalloc/internal/eset.h"
71589-
71590-#define ESET_NPSIZES (SC_NPSIZES + 1)
71591-
71592-static void
71593-eset_bin_init(eset_bin_t *bin) {
71594-	edata_heap_new(&bin->heap);
71595-	/*
71596-	 * heap_min doesn't need initialization; it gets filled in when the bin
71597-	 * goes from non-empty to empty.
71598-	 */
71599-}
71600-
71601-static void
71602-eset_bin_stats_init(eset_bin_stats_t *bin_stats) {
71603-	atomic_store_zu(&bin_stats->nextents, 0, ATOMIC_RELAXED);
71604-	atomic_store_zu(&bin_stats->nbytes, 0, ATOMIC_RELAXED);
71605-}
71606-
71607-void
71608-eset_init(eset_t *eset, extent_state_t state) {
71609-	for (unsigned i = 0; i < ESET_NPSIZES; i++) {
71610-		eset_bin_init(&eset->bins[i]);
71611-		eset_bin_stats_init(&eset->bin_stats[i]);
71612-	}
71613-	fb_init(eset->bitmap, ESET_NPSIZES);
71614-	edata_list_inactive_init(&eset->lru);
71615-	eset->state = state;
71616-}
71617-
71618-size_t
71619-eset_npages_get(eset_t *eset) {
71620-	return atomic_load_zu(&eset->npages, ATOMIC_RELAXED);
71621-}
71622-
71623-size_t
71624-eset_nextents_get(eset_t *eset, pszind_t pind) {
71625-	return atomic_load_zu(&eset->bin_stats[pind].nextents, ATOMIC_RELAXED);
71626-}
71627-
71628-size_t
71629-eset_nbytes_get(eset_t *eset, pszind_t pind) {
71630-	return atomic_load_zu(&eset->bin_stats[pind].nbytes, ATOMIC_RELAXED);
71631-}
71632-
71633-static void
71634-eset_stats_add(eset_t *eset, pszind_t pind, size_t sz) {
71635-	size_t cur = atomic_load_zu(&eset->bin_stats[pind].nextents,
71636-	    ATOMIC_RELAXED);
71637-	atomic_store_zu(&eset->bin_stats[pind].nextents, cur + 1,
71638-	    ATOMIC_RELAXED);
71639-	cur = atomic_load_zu(&eset->bin_stats[pind].nbytes, ATOMIC_RELAXED);
71640-	atomic_store_zu(&eset->bin_stats[pind].nbytes, cur + sz,
71641-	    ATOMIC_RELAXED);
71642-}
71643-
71644-static void
71645-eset_stats_sub(eset_t *eset, pszind_t pind, size_t sz) {
71646-	size_t cur = atomic_load_zu(&eset->bin_stats[pind].nextents,
71647-	    ATOMIC_RELAXED);
71648-	atomic_store_zu(&eset->bin_stats[pind].nextents, cur - 1,
71649-	    ATOMIC_RELAXED);
71650-	cur = atomic_load_zu(&eset->bin_stats[pind].nbytes, ATOMIC_RELAXED);
71651-	atomic_store_zu(&eset->bin_stats[pind].nbytes, cur - sz,
71652-	    ATOMIC_RELAXED);
71653-}
71654-
71655-void
71656-eset_insert(eset_t *eset, edata_t *edata) {
71657-	assert(edata_state_get(edata) == eset->state);
71658-
71659-	size_t size = edata_size_get(edata);
71660-	size_t psz = sz_psz_quantize_floor(size);
71661-	pszind_t pind = sz_psz2ind(psz);
71662-
71663-	edata_cmp_summary_t edata_cmp_summary = edata_cmp_summary_get(edata);
71664-	if (edata_heap_empty(&eset->bins[pind].heap)) {
71665-		fb_set(eset->bitmap, ESET_NPSIZES, (size_t)pind);
71666-		/* Only element is automatically the min element. */
71667-		eset->bins[pind].heap_min = edata_cmp_summary;
71668-	} else {
71669-		/*
71670-		 * There's already a min element; update the summary if we're
71671-		 * about to insert a lower one.
71672-		 */
71673-		if (edata_cmp_summary_comp(edata_cmp_summary,
71674-		    eset->bins[pind].heap_min) < 0) {
71675-			eset->bins[pind].heap_min = edata_cmp_summary;
71676-		}
71677-	}
71678-	edata_heap_insert(&eset->bins[pind].heap, edata);
71679-
71680-	if (config_stats) {
71681-		eset_stats_add(eset, pind, size);
71682-	}
71683-
71684-	edata_list_inactive_append(&eset->lru, edata);
71685-	size_t npages = size >> LG_PAGE;
71686-	/*
71687-	 * All modifications to npages hold the mutex (as asserted above), so we
71688-	 * don't need an atomic fetch-add; we can get by with a load followed by
71689-	 * a store.
71690-	 */
71691-	size_t cur_eset_npages =
71692-	    atomic_load_zu(&eset->npages, ATOMIC_RELAXED);
71693-	atomic_store_zu(&eset->npages, cur_eset_npages + npages,
71694-	    ATOMIC_RELAXED);
71695-}
71696-
71697-void
71698-eset_remove(eset_t *eset, edata_t *edata) {
71699-	assert(edata_state_get(edata) == eset->state ||
71700-	    edata_state_in_transition(edata_state_get(edata)));
71701-
71702-	size_t size = edata_size_get(edata);
71703-	size_t psz = sz_psz_quantize_floor(size);
71704-	pszind_t pind = sz_psz2ind(psz);
71705-	if (config_stats) {
71706-		eset_stats_sub(eset, pind, size);
71707-	}
71708-
71709-	edata_cmp_summary_t edata_cmp_summary = edata_cmp_summary_get(edata);
71710-	edata_heap_remove(&eset->bins[pind].heap, edata);
71711-	if (edata_heap_empty(&eset->bins[pind].heap)) {
71712-		fb_unset(eset->bitmap, ESET_NPSIZES, (size_t)pind);
71713-	} else {
71714-		/*
71715-		 * This is a little weird; we compare if the summaries are
71716-		 * equal, rather than if the edata we removed was the heap
71717-		 * minimum.  The reason why is that getting the heap minimum
71718-		 * can cause a pairing heap merge operation.  We can avoid this
71719-		 * if we only update the min if it's changed, in which case the
71720-		 * summaries of the removed element and the min element should
71721-		 * compare equal.
71722-		 */
71723-		if (edata_cmp_summary_comp(edata_cmp_summary,
71724-		    eset->bins[pind].heap_min) == 0) {
71725-			eset->bins[pind].heap_min = edata_cmp_summary_get(
71726-			    edata_heap_first(&eset->bins[pind].heap));
71727-		}
71728-	}
71729-	edata_list_inactive_remove(&eset->lru, edata);
71730-	size_t npages = size >> LG_PAGE;
71731-	/*
71732-	 * As in eset_insert, we hold eset->mtx and so don't need atomic
71733-	 * operations for updating eset->npages.
71734-	 */
71735-	size_t cur_extents_npages =
71736-	    atomic_load_zu(&eset->npages, ATOMIC_RELAXED);
71737-	assert(cur_extents_npages >= npages);
71738-	atomic_store_zu(&eset->npages,
71739-	    cur_extents_npages - (size >> LG_PAGE), ATOMIC_RELAXED);
71740-}
71741-
71742-/*
71743- * Find an extent with size [min_size, max_size) to satisfy the alignment
71744- * requirement.  For each size, try only the first extent in the heap.
71745- */
71746-static edata_t *
71747-eset_fit_alignment(eset_t *eset, size_t min_size, size_t max_size,
71748-    size_t alignment) {
71749-        pszind_t pind = sz_psz2ind(sz_psz_quantize_ceil(min_size));
71750-        pszind_t pind_max = sz_psz2ind(sz_psz_quantize_ceil(max_size));
71751-
71752-	for (pszind_t i =
71753-	    (pszind_t)fb_ffs(eset->bitmap, ESET_NPSIZES, (size_t)pind);
71754-	    i < pind_max;
71755-	    i = (pszind_t)fb_ffs(eset->bitmap, ESET_NPSIZES, (size_t)i + 1)) {
71756-		assert(i < SC_NPSIZES);
71757-		assert(!edata_heap_empty(&eset->bins[i].heap));
71758-		edata_t *edata = edata_heap_first(&eset->bins[i].heap);
71759-		uintptr_t base = (uintptr_t)edata_base_get(edata);
71760-		size_t candidate_size = edata_size_get(edata);
71761-		assert(candidate_size >= min_size);
71762-
71763-		uintptr_t next_align = ALIGNMENT_CEILING((uintptr_t)base,
71764-		    PAGE_CEILING(alignment));
71765-		if (base > next_align || base + candidate_size <= next_align) {
71766-			/* Overflow or not crossing the next alignment. */
71767-			continue;
71768-		}
71769-
71770-		size_t leadsize = next_align - base;
71771-		if (candidate_size - leadsize >= min_size) {
71772-			return edata;
71773-		}
71774-	}
71775-
71776-	return NULL;
71777-}
71778-
71779-/*
71780- * Do first-fit extent selection, i.e. select the oldest/lowest extent that is
71781- * large enough.
71782- *
71783- * lg_max_fit is the (log of the) maximum ratio between the requested size and
71784- * the returned size that we'll allow.  This can reduce fragmentation by
71785- * avoiding reusing and splitting large extents for smaller sizes.  In practice,
71786- * it's set to opt_lg_extent_max_active_fit for the dirty eset and SC_PTR_BITS
71787- * for others.
71788- */
71789-static edata_t *
71790-eset_first_fit(eset_t *eset, size_t size, bool exact_only,
71791-    unsigned lg_max_fit) {
71792-	edata_t *ret = NULL;
71793-	edata_cmp_summary_t ret_summ JEMALLOC_CC_SILENCE_INIT({0});
71794-
71795-	pszind_t pind = sz_psz2ind(sz_psz_quantize_ceil(size));
71796-
71797-	if (exact_only) {
71798-		return edata_heap_empty(&eset->bins[pind].heap) ? NULL :
71799-		    edata_heap_first(&eset->bins[pind].heap);
71800-	}
71801-
71802-	for (pszind_t i =
71803-	    (pszind_t)fb_ffs(eset->bitmap, ESET_NPSIZES, (size_t)pind);
71804-	    i < ESET_NPSIZES;
71805-	    i = (pszind_t)fb_ffs(eset->bitmap, ESET_NPSIZES, (size_t)i + 1)) {
71806-		assert(!edata_heap_empty(&eset->bins[i].heap));
71807-		if (lg_max_fit == SC_PTR_BITS) {
71808-			/*
71809-			 * We'll shift by this below, and shifting out all the
71810-			 * bits is undefined.  Decreasing is safe, since the
71811-			 * page size is larger than 1 byte.
71812-			 */
71813-			lg_max_fit = SC_PTR_BITS - 1;
71814-		}
71815-		if ((sz_pind2sz(i) >> lg_max_fit) > size) {
71816-			break;
71817-		}
71818-		if (ret == NULL || edata_cmp_summary_comp(
71819-		    eset->bins[i].heap_min, ret_summ) < 0) {
71820-			/*
71821-			 * We grab the edata as early as possible, even though
71822-			 * we might change it later.  Practically, a large
71823-			 * portion of eset_fit calls succeed at the first valid
71824-			 * index, so this doesn't cost much, and we get the
71825-			 * effect of prefetching the edata as early as possible.
71826-			 */
71827-			edata_t *edata = edata_heap_first(&eset->bins[i].heap);
71828-			assert(edata_size_get(edata) >= size);
71829-			assert(ret == NULL || edata_snad_comp(edata, ret) < 0);
71830-			assert(ret == NULL || edata_cmp_summary_comp(
71831-			    eset->bins[i].heap_min,
71832-			    edata_cmp_summary_get(edata)) == 0);
71833-			ret = edata;
71834-			ret_summ = eset->bins[i].heap_min;
71835-		}
71836-		if (i == SC_NPSIZES) {
71837-			break;
71838-		}
71839-		assert(i < SC_NPSIZES);
71840-	}
71841-
71842-	return ret;
71843-}
71844-
71845-edata_t *
71846-eset_fit(eset_t *eset, size_t esize, size_t alignment, bool exact_only,
71847-    unsigned lg_max_fit) {
71848-	size_t max_size = esize + PAGE_CEILING(alignment) - PAGE;
71849-	/* Beware size_t wrap-around. */
71850-	if (max_size < esize) {
71851-		return NULL;
71852-	}
71853-
71854-	edata_t *edata = eset_first_fit(eset, max_size, exact_only, lg_max_fit);
71855-
71856-	if (alignment > PAGE && edata == NULL) {
71857-		/*
71858-		 * max_size guarantees the alignment requirement but is rather
71859-		 * pessimistic.  Next we try to satisfy the aligned allocation
71860-		 * with sizes in [esize, max_size).
71861-		 */
71862-		edata = eset_fit_alignment(eset, esize, max_size, alignment);
71863-	}
71864-
71865-	return edata;
71866-}
71867diff --git a/jemalloc/src/exp_grow.c b/jemalloc/src/exp_grow.c
71868deleted file mode 100644
71869index 386471f..0000000
71870--- a/jemalloc/src/exp_grow.c
71871+++ /dev/null
71872@@ -1,8 +0,0 @@
71873-#include "jemalloc/internal/jemalloc_preamble.h"
71874-#include "jemalloc/internal/jemalloc_internal_includes.h"
71875-
71876-void
71877-exp_grow_init(exp_grow_t *exp_grow) {
71878-	exp_grow->next = sz_psz2ind(HUGEPAGE);
71879-	exp_grow->limit = sz_psz2ind(SC_LARGE_MAXCLASS);
71880-}
71881diff --git a/jemalloc/src/extent.c b/jemalloc/src/extent.c
71882deleted file mode 100644
71883index cf3d1f3..0000000
71884--- a/jemalloc/src/extent.c
71885+++ /dev/null
71886@@ -1,1326 +0,0 @@
71887-#include "jemalloc/internal/jemalloc_preamble.h"
71888-#include "jemalloc/internal/jemalloc_internal_includes.h"
71889-
71890-#include "jemalloc/internal/assert.h"
71891-#include "jemalloc/internal/emap.h"
71892-#include "jemalloc/internal/extent_dss.h"
71893-#include "jemalloc/internal/extent_mmap.h"
71894-#include "jemalloc/internal/ph.h"
71895-#include "jemalloc/internal/mutex.h"
71896-
71897-/******************************************************************************/
71898-/* Data. */
71899-
71900-size_t opt_lg_extent_max_active_fit = LG_EXTENT_MAX_ACTIVE_FIT_DEFAULT;
71901-
71902-static bool extent_commit_impl(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
71903-    size_t offset, size_t length, bool growing_retained);
71904-static bool extent_purge_lazy_impl(tsdn_t *tsdn, ehooks_t *ehooks,
71905-    edata_t *edata, size_t offset, size_t length, bool growing_retained);
71906-static bool extent_purge_forced_impl(tsdn_t *tsdn, ehooks_t *ehooks,
71907-    edata_t *edata, size_t offset, size_t length, bool growing_retained);
71908-static edata_t *extent_split_impl(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
71909-    edata_t *edata, size_t size_a, size_t size_b, bool holding_core_locks);
71910-static bool extent_merge_impl(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
71911-    edata_t *a, edata_t *b, bool holding_core_locks);
71912-
71913-/* Used exclusively for gdump triggering. */
71914-static atomic_zu_t curpages;
71915-static atomic_zu_t highpages;
71916-
71917-/******************************************************************************/
71918-/*
71919- * Function prototypes for static functions that are referenced prior to
71920- * definition.
71921- */
71922-
71923-static void extent_deregister(tsdn_t *tsdn, pac_t *pac, edata_t *edata);
71924-static edata_t *extent_recycle(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
71925-    ecache_t *ecache, edata_t *expand_edata, size_t usize, size_t alignment,
71926-    bool zero, bool *commit, bool growing_retained, bool guarded);
71927-static edata_t *extent_try_coalesce(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
71928-    ecache_t *ecache, edata_t *edata, bool *coalesced);
71929-static edata_t *extent_alloc_retained(tsdn_t *tsdn, pac_t *pac,
71930-    ehooks_t *ehooks, edata_t *expand_edata, size_t size, size_t alignment,
71931-    bool zero, bool *commit, bool guarded);
71932-
71933-/******************************************************************************/
71934-
71935-size_t
71936-extent_sn_next(pac_t *pac) {
71937-	return atomic_fetch_add_zu(&pac->extent_sn_next, 1, ATOMIC_RELAXED);
71938-}
71939-
71940-static inline bool
71941-extent_may_force_decay(pac_t *pac) {
71942-	return !(pac_decay_ms_get(pac, extent_state_dirty) == -1
71943-	    || pac_decay_ms_get(pac, extent_state_muzzy) == -1);
71944-}
71945-
71946-static bool
71947-extent_try_delayed_coalesce(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
71948-    ecache_t *ecache, edata_t *edata) {
71949-	emap_update_edata_state(tsdn, pac->emap, edata, extent_state_active);
71950-
71951-	bool coalesced;
71952-	edata = extent_try_coalesce(tsdn, pac, ehooks, ecache,
71953-	    edata, &coalesced);
71954-	emap_update_edata_state(tsdn, pac->emap, edata, ecache->state);
71955-
71956-	if (!coalesced) {
71957-		return true;
71958-	}
71959-	eset_insert(&ecache->eset, edata);
71960-	return false;
71961-}
71962-
71963-edata_t *
71964-ecache_alloc(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, ecache_t *ecache,
71965-    edata_t *expand_edata, size_t size, size_t alignment, bool zero,
71966-    bool guarded) {
71967-	assert(size != 0);
71968-	assert(alignment != 0);
71969-	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
71970-	    WITNESS_RANK_CORE, 0);
71971-
71972-	bool commit = true;
71973-	edata_t *edata = extent_recycle(tsdn, pac, ehooks, ecache, expand_edata,
71974-	    size, alignment, zero, &commit, false, guarded);
71975-	assert(edata == NULL || edata_pai_get(edata) == EXTENT_PAI_PAC);
71976-	assert(edata == NULL || edata_guarded_get(edata) == guarded);
71977-	return edata;
71978-}
71979-
71980-edata_t *
71981-ecache_alloc_grow(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, ecache_t *ecache,
71982-    edata_t *expand_edata, size_t size, size_t alignment, bool zero,
71983-    bool guarded) {
71984-	assert(size != 0);
71985-	assert(alignment != 0);
71986-	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
71987-	    WITNESS_RANK_CORE, 0);
71988-
71989-	bool commit = true;
71990-	edata_t *edata = extent_alloc_retained(tsdn, pac, ehooks, expand_edata,
71991-	    size, alignment, zero, &commit, guarded);
71992-	if (edata == NULL) {
71993-		if (opt_retain && expand_edata != NULL) {
71994-			/*
71995-			 * When retain is enabled and trying to expand, we do
71996-			 * not attempt extent_alloc_wrapper which does mmap that
71997-			 * is very unlikely to succeed (unless it happens to be
71998-			 * at the end).
71999-			 */
72000-			return NULL;
72001-		}
72002-		if (guarded) {
72003-			/*
72004-			 * Means no cached guarded extents available (and no
72005-			 * grow_retained was attempted).  The pac_alloc flow
72006-			 * will alloc regular extents to make new guarded ones.
72007-			 */
72008-			return NULL;
72009-		}
72010-		void *new_addr = (expand_edata == NULL) ? NULL :
72011-		    edata_past_get(expand_edata);
72012-		edata = extent_alloc_wrapper(tsdn, pac, ehooks, new_addr,
72013-		    size, alignment, zero, &commit,
72014-		    /* growing_retained */ false);
72015-	}
72016-
72017-	assert(edata == NULL || edata_pai_get(edata) == EXTENT_PAI_PAC);
72018-	return edata;
72019-}
72020-
72021-void
72022-ecache_dalloc(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, ecache_t *ecache,
72023-    edata_t *edata) {
72024-	assert(edata_base_get(edata) != NULL);
72025-	assert(edata_size_get(edata) != 0);
72026-	assert(edata_pai_get(edata) == EXTENT_PAI_PAC);
72027-	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
72028-	    WITNESS_RANK_CORE, 0);
72029-
72030-	edata_addr_set(edata, edata_base_get(edata));
72031-	edata_zeroed_set(edata, false);
72032-
72033-	extent_record(tsdn, pac, ehooks, ecache, edata);
72034-}
72035-
72036-edata_t *
72037-ecache_evict(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
72038-    ecache_t *ecache, size_t npages_min) {
72039-	malloc_mutex_lock(tsdn, &ecache->mtx);
72040-
72041-	/*
72042-	 * Get the LRU coalesced extent, if any.  If coalescing was delayed,
72043-	 * the loop will iterate until the LRU extent is fully coalesced.
72044-	 */
72045-	edata_t *edata;
72046-	while (true) {
72047-		/* Get the LRU extent, if any. */
72048-		eset_t *eset = &ecache->eset;
72049-		edata = edata_list_inactive_first(&eset->lru);
72050-		if (edata == NULL) {
72051-			/*
72052-			 * Next check if there are guarded extents.  They are
72053-			 * more expensive to purge (since they are not
72054-			 * mergeable), thus in favor of caching them longer.
72055-			 */
72056-			eset = &ecache->guarded_eset;
72057-			edata = edata_list_inactive_first(&eset->lru);
72058-			if (edata == NULL) {
72059-				goto label_return;
72060-			}
72061-		}
72062-		/* Check the eviction limit. */
72063-		size_t extents_npages = ecache_npages_get(ecache);
72064-		if (extents_npages <= npages_min) {
72065-			edata = NULL;
72066-			goto label_return;
72067-		}
72068-		eset_remove(eset, edata);
72069-		if (!ecache->delay_coalesce || edata_guarded_get(edata)) {
72070-			break;
72071-		}
72072-		/* Try to coalesce. */
72073-		if (extent_try_delayed_coalesce(tsdn, pac, ehooks, ecache,
72074-		    edata)) {
72075-			break;
72076-		}
72077-		/*
72078-		 * The LRU extent was just coalesced and the result placed in
72079-		 * the LRU at its neighbor's position.  Start over.
72080-		 */
72081-	}
72082-
72083-	/*
72084-	 * Either mark the extent active or deregister it to protect against
72085-	 * concurrent operations.
72086-	 */
72087-	switch (ecache->state) {
72088-	case extent_state_active:
72089-		not_reached();
72090-	case extent_state_dirty:
72091-	case extent_state_muzzy:
72092-		emap_update_edata_state(tsdn, pac->emap, edata,
72093-		    extent_state_active);
72094-		break;
72095-	case extent_state_retained:
72096-		extent_deregister(tsdn, pac, edata);
72097-		break;
72098-	default:
72099-		not_reached();
72100-	}
72101-
72102-label_return:
72103-	malloc_mutex_unlock(tsdn, &ecache->mtx);
72104-	return edata;
72105-}
72106-
72107-/*
72108- * This can only happen when we fail to allocate a new extent struct (which
72109- * indicates OOM), e.g. when trying to split an existing extent.
72110- */
72111-static void
72112-extents_abandon_vm(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, ecache_t *ecache,
72113-    edata_t *edata, bool growing_retained) {
72114-	size_t sz = edata_size_get(edata);
72115-	if (config_stats) {
72116-		atomic_fetch_add_zu(&pac->stats->abandoned_vm, sz,
72117-		    ATOMIC_RELAXED);
72118-	}
72119-	/*
72120-	 * Leak extent after making sure its pages have already been purged, so
72121-	 * that this is only a virtual memory leak.
72122-	 */
72123-	if (ecache->state == extent_state_dirty) {
72124-		if (extent_purge_lazy_impl(tsdn, ehooks, edata, 0, sz,
72125-		    growing_retained)) {
72126-			extent_purge_forced_impl(tsdn, ehooks, edata, 0,
72127-			    edata_size_get(edata), growing_retained);
72128-		}
72129-	}
72130-	edata_cache_put(tsdn, pac->edata_cache, edata);
72131-}
72132-
72133-static void
72134-extent_deactivate_locked_impl(tsdn_t *tsdn, pac_t *pac, ecache_t *ecache,
72135-    edata_t *edata) {
72136-	malloc_mutex_assert_owner(tsdn, &ecache->mtx);
72137-	assert(edata_arena_ind_get(edata) == ecache_ind_get(ecache));
72138-
72139-	emap_update_edata_state(tsdn, pac->emap, edata, ecache->state);
72140-	eset_t *eset = edata_guarded_get(edata) ? &ecache->guarded_eset :
72141-	    &ecache->eset;
72142-	eset_insert(eset, edata);
72143-}
72144-
72145-static void
72146-extent_deactivate_locked(tsdn_t *tsdn, pac_t *pac, ecache_t *ecache,
72147-    edata_t *edata) {
72148-	assert(edata_state_get(edata) == extent_state_active);
72149-	extent_deactivate_locked_impl(tsdn, pac, ecache, edata);
72150-}
72151-
72152-static void
72153-extent_deactivate_check_state_locked(tsdn_t *tsdn, pac_t *pac, ecache_t *ecache,
72154-    edata_t *edata, extent_state_t expected_state) {
72155-	assert(edata_state_get(edata) == expected_state);
72156-	extent_deactivate_locked_impl(tsdn, pac, ecache, edata);
72157-}
72158-
72159-static void
72160-extent_activate_locked(tsdn_t *tsdn, pac_t *pac, ecache_t *ecache, eset_t *eset,
72161-    edata_t *edata) {
72162-	assert(edata_arena_ind_get(edata) == ecache_ind_get(ecache));
72163-	assert(edata_state_get(edata) == ecache->state ||
72164-	    edata_state_get(edata) == extent_state_merging);
72165-
72166-	eset_remove(eset, edata);
72167-	emap_update_edata_state(tsdn, pac->emap, edata, extent_state_active);
72168-}
72169-
72170-void
72171-extent_gdump_add(tsdn_t *tsdn, const edata_t *edata) {
72172-	cassert(config_prof);
72173-	/* prof_gdump() requirement. */
72174-	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
72175-	    WITNESS_RANK_CORE, 0);
72176-
72177-	if (opt_prof && edata_state_get(edata) == extent_state_active) {
72178-		size_t nadd = edata_size_get(edata) >> LG_PAGE;
72179-		size_t cur = atomic_fetch_add_zu(&curpages, nadd,
72180-		    ATOMIC_RELAXED) + nadd;
72181-		size_t high = atomic_load_zu(&highpages, ATOMIC_RELAXED);
72182-		while (cur > high && !atomic_compare_exchange_weak_zu(
72183-		    &highpages, &high, cur, ATOMIC_RELAXED, ATOMIC_RELAXED)) {
72184-			/*
72185-			 * Don't refresh cur, because it may have decreased
72186-			 * since this thread lost the highpages update race.
72187-			 * Note that high is updated in case of CAS failure.
72188-			 */
72189-		}
72190-		if (cur > high && prof_gdump_get_unlocked()) {
72191-			prof_gdump(tsdn);
72192-		}
72193-	}
72194-}
72195-
72196-static void
72197-extent_gdump_sub(tsdn_t *tsdn, const edata_t *edata) {
72198-	cassert(config_prof);
72199-
72200-	if (opt_prof && edata_state_get(edata) == extent_state_active) {
72201-		size_t nsub = edata_size_get(edata) >> LG_PAGE;
72202-		assert(atomic_load_zu(&curpages, ATOMIC_RELAXED) >= nsub);
72203-		atomic_fetch_sub_zu(&curpages, nsub, ATOMIC_RELAXED);
72204-	}
72205-}
72206-
72207-static bool
72208-extent_register_impl(tsdn_t *tsdn, pac_t *pac, edata_t *edata, bool gdump_add) {
72209-	assert(edata_state_get(edata) == extent_state_active);
72210-	/*
72211-	 * No locking needed, as the edata must be in active state, which
72212-	 * prevents other threads from accessing the edata.
72213-	 */
72214-	if (emap_register_boundary(tsdn, pac->emap, edata, SC_NSIZES,
72215-	    /* slab */ false)) {
72216-		return true;
72217-	}
72218-
72219-	if (config_prof && gdump_add) {
72220-		extent_gdump_add(tsdn, edata);
72221-	}
72222-
72223-	return false;
72224-}
72225-
72226-static bool
72227-extent_register(tsdn_t *tsdn, pac_t *pac, edata_t *edata) {
72228-	return extent_register_impl(tsdn, pac, edata, true);
72229-}
72230-
72231-static bool
72232-extent_register_no_gdump_add(tsdn_t *tsdn, pac_t *pac, edata_t *edata) {
72233-	return extent_register_impl(tsdn, pac, edata, false);
72234-}
72235-
72236-static void
72237-extent_reregister(tsdn_t *tsdn, pac_t *pac, edata_t *edata) {
72238-	bool err = extent_register(tsdn, pac, edata);
72239-	assert(!err);
72240-}
72241-
72242-/*
72243- * Removes all pointers to the given extent from the global rtree.
72244- */
72245-static void
72246-extent_deregister_impl(tsdn_t *tsdn, pac_t *pac, edata_t *edata,
72247-    bool gdump) {
72248-	emap_deregister_boundary(tsdn, pac->emap, edata);
72249-
72250-	if (config_prof && gdump) {
72251-		extent_gdump_sub(tsdn, edata);
72252-	}
72253-}
72254-
72255-static void
72256-extent_deregister(tsdn_t *tsdn, pac_t *pac, edata_t *edata) {
72257-	extent_deregister_impl(tsdn, pac, edata, true);
72258-}
72259-
72260-static void
72261-extent_deregister_no_gdump_sub(tsdn_t *tsdn, pac_t *pac,
72262-    edata_t *edata) {
72263-	extent_deregister_impl(tsdn, pac, edata, false);
72264-}
72265-
72266-/*
72267- * Tries to find and remove an extent from ecache that can be used for the
72268- * given allocation request.
72269- */
72270-static edata_t *
72271-extent_recycle_extract(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
72272-    ecache_t *ecache, edata_t *expand_edata, size_t size, size_t alignment,
72273-    bool guarded) {
72274-	malloc_mutex_assert_owner(tsdn, &ecache->mtx);
72275-	assert(alignment > 0);
72276-	if (config_debug && expand_edata != NULL) {
72277-		/*
72278-		 * Non-NULL expand_edata indicates in-place expanding realloc.
72279-		 * new_addr must either refer to a non-existing extent, or to
72280-		 * the base of an extant extent, since only active slabs support
72281-		 * interior lookups (which of course cannot be recycled).
72282-		 */
72283-		void *new_addr = edata_past_get(expand_edata);
72284-		assert(PAGE_ADDR2BASE(new_addr) == new_addr);
72285-		assert(alignment <= PAGE);
72286-	}
72287-
72288-	edata_t *edata;
72289-	eset_t *eset = guarded ? &ecache->guarded_eset : &ecache->eset;
72290-	if (expand_edata != NULL) {
72291-		edata = emap_try_acquire_edata_neighbor_expand(tsdn, pac->emap,
72292-		    expand_edata, EXTENT_PAI_PAC, ecache->state);
72293-		if (edata != NULL) {
72294-			extent_assert_can_expand(expand_edata, edata);
72295-			if (edata_size_get(edata) < size) {
72296-				emap_release_edata(tsdn, pac->emap, edata,
72297-				    ecache->state);
72298-				edata = NULL;
72299-			}
72300-		}
72301-	} else {
72302-		/*
72303-		 * A large extent might be broken up from its original size to
72304-		 * some small size to satisfy a small request.  When that small
72305-		 * request is freed, though, it won't merge back with the larger
72306-		 * extent if delayed coalescing is on.  The large extent can
72307-		 * then no longer satify a request for its original size.  To
72308-		 * limit this effect, when delayed coalescing is enabled, we
72309-		 * put a cap on how big an extent we can split for a request.
72310-		 */
72311-		unsigned lg_max_fit = ecache->delay_coalesce
72312-		    ? (unsigned)opt_lg_extent_max_active_fit : SC_PTR_BITS;
72313-
72314-		/*
72315-		 * If split and merge are not allowed (Windows w/o retain), try
72316-		 * exact fit only.
72317-		 *
72318-		 * For simplicity purposes, splitting guarded extents is not
72319-		 * supported.  Hence, we do only exact fit for guarded
72320-		 * allocations.
72321-		 */
72322-		bool exact_only = (!maps_coalesce && !opt_retain) || guarded;
72323-		edata = eset_fit(eset, size, alignment, exact_only,
72324-		    lg_max_fit);
72325-	}
72326-	if (edata == NULL) {
72327-		return NULL;
72328-	}
72329-	assert(!guarded || edata_guarded_get(edata));
72330-	extent_activate_locked(tsdn, pac, ecache, eset, edata);
72331-
72332-	return edata;
72333-}
72334-
72335-/*
72336- * Given an allocation request and an extent guaranteed to be able to satisfy
72337- * it, this splits off lead and trail extents, leaving edata pointing to an
72338- * extent satisfying the allocation.
72339- * This function doesn't put lead or trail into any ecache; it's the caller's
72340- * job to ensure that they can be reused.
72341- */
72342-typedef enum {
72343-	/*
72344-	 * Split successfully.  lead, edata, and trail, are modified to extents
72345-	 * describing the ranges before, in, and after the given allocation.
72346-	 */
72347-	extent_split_interior_ok,
72348-	/*
72349-	 * The extent can't satisfy the given allocation request.  None of the
72350-	 * input edata_t *s are touched.
72351-	 */
72352-	extent_split_interior_cant_alloc,
72353-	/*
72354-	 * In a potentially invalid state.  Must leak (if *to_leak is non-NULL),
72355-	 * and salvage what's still salvageable (if *to_salvage is non-NULL).
72356-	 * None of lead, edata, or trail are valid.
72357-	 */
72358-	extent_split_interior_error
72359-} extent_split_interior_result_t;
72360-
72361-static extent_split_interior_result_t
72362-extent_split_interior(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
72363-    /* The result of splitting, in case of success. */
72364-    edata_t **edata, edata_t **lead, edata_t **trail,
72365-    /* The mess to clean up, in case of error. */
72366-    edata_t **to_leak, edata_t **to_salvage,
72367-    edata_t *expand_edata, size_t size, size_t alignment) {
72368-	size_t leadsize = ALIGNMENT_CEILING((uintptr_t)edata_base_get(*edata),
72369-	    PAGE_CEILING(alignment)) - (uintptr_t)edata_base_get(*edata);
72370-	assert(expand_edata == NULL || leadsize == 0);
72371-	if (edata_size_get(*edata) < leadsize + size) {
72372-		return extent_split_interior_cant_alloc;
72373-	}
72374-	size_t trailsize = edata_size_get(*edata) - leadsize - size;
72375-
72376-	*lead = NULL;
72377-	*trail = NULL;
72378-	*to_leak = NULL;
72379-	*to_salvage = NULL;
72380-
72381-	/* Split the lead. */
72382-	if (leadsize != 0) {
72383-		assert(!edata_guarded_get(*edata));
72384-		*lead = *edata;
72385-		*edata = extent_split_impl(tsdn, pac, ehooks, *lead, leadsize,
72386-		    size + trailsize, /* holding_core_locks*/ true);
72387-		if (*edata == NULL) {
72388-			*to_leak = *lead;
72389-			*lead = NULL;
72390-			return extent_split_interior_error;
72391-		}
72392-	}
72393-
72394-	/* Split the trail. */
72395-	if (trailsize != 0) {
72396-		assert(!edata_guarded_get(*edata));
72397-		*trail = extent_split_impl(tsdn, pac, ehooks, *edata, size,
72398-		    trailsize, /* holding_core_locks */ true);
72399-		if (*trail == NULL) {
72400-			*to_leak = *edata;
72401-			*to_salvage = *lead;
72402-			*lead = NULL;
72403-			*edata = NULL;
72404-			return extent_split_interior_error;
72405-		}
72406-	}
72407-
72408-	return extent_split_interior_ok;
72409-}
72410-
72411-/*
72412- * This fulfills the indicated allocation request out of the given extent (which
72413- * the caller should have ensured was big enough).  If there's any unused space
72414- * before or after the resulting allocation, that space is given its own extent
72415- * and put back into ecache.
72416- */
72417-static edata_t *
72418-extent_recycle_split(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
72419-    ecache_t *ecache, edata_t *expand_edata, size_t size, size_t alignment,
72420-    edata_t *edata, bool growing_retained) {
72421-	assert(!edata_guarded_get(edata) || size == edata_size_get(edata));
72422-	malloc_mutex_assert_owner(tsdn, &ecache->mtx);
72423-
72424-	edata_t *lead;
72425-	edata_t *trail;
72426-	edata_t *to_leak JEMALLOC_CC_SILENCE_INIT(NULL);
72427-	edata_t *to_salvage JEMALLOC_CC_SILENCE_INIT(NULL);
72428-
72429-	extent_split_interior_result_t result = extent_split_interior(
72430-	    tsdn, pac, ehooks, &edata, &lead, &trail, &to_leak, &to_salvage,
72431-	    expand_edata, size, alignment);
72432-
72433-	if (!maps_coalesce && result != extent_split_interior_ok
72434-	    && !opt_retain) {
72435-		/*
72436-		 * Split isn't supported (implies Windows w/o retain).  Avoid
72437-		 * leaking the extent.
72438-		 */
72439-		assert(to_leak != NULL && lead == NULL && trail == NULL);
72440-		extent_deactivate_locked(tsdn, pac, ecache, to_leak);
72441-		return NULL;
72442-	}
72443-
72444-	if (result == extent_split_interior_ok) {
72445-		if (lead != NULL) {
72446-			extent_deactivate_locked(tsdn, pac, ecache, lead);
72447-		}
72448-		if (trail != NULL) {
72449-			extent_deactivate_locked(tsdn, pac, ecache, trail);
72450-		}
72451-		return edata;
72452-	} else {
72453-		/*
72454-		 * We should have picked an extent that was large enough to
72455-		 * fulfill our allocation request.
72456-		 */
72457-		assert(result == extent_split_interior_error);
72458-		if (to_salvage != NULL) {
72459-			extent_deregister(tsdn, pac, to_salvage);
72460-		}
72461-		if (to_leak != NULL) {
72462-			extent_deregister_no_gdump_sub(tsdn, pac, to_leak);
72463-			/*
72464-			 * May go down the purge path (which assume no ecache
72465-			 * locks).  Only happens with OOM caused split failures.
72466-			 */
72467-			malloc_mutex_unlock(tsdn, &ecache->mtx);
72468-			extents_abandon_vm(tsdn, pac, ehooks, ecache, to_leak,
72469-			    growing_retained);
72470-			malloc_mutex_lock(tsdn, &ecache->mtx);
72471-		}
72472-		return NULL;
72473-	}
72474-	unreachable();
72475-}
72476-
72477-/*
72478- * Tries to satisfy the given allocation request by reusing one of the extents
72479- * in the given ecache_t.
72480- */
72481-static edata_t *
72482-extent_recycle(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, ecache_t *ecache,
72483-    edata_t *expand_edata, size_t size, size_t alignment, bool zero,
72484-    bool *commit, bool growing_retained, bool guarded) {
72485-	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
72486-	    WITNESS_RANK_CORE, growing_retained ? 1 : 0);
72487-	assert(!guarded || expand_edata == NULL);
72488-	assert(!guarded || alignment <= PAGE);
72489-
72490-	malloc_mutex_lock(tsdn, &ecache->mtx);
72491-
72492-	edata_t *edata = extent_recycle_extract(tsdn, pac, ehooks, ecache,
72493-	    expand_edata, size, alignment, guarded);
72494-	if (edata == NULL) {
72495-		malloc_mutex_unlock(tsdn, &ecache->mtx);
72496-		return NULL;
72497-	}
72498-
72499-	edata = extent_recycle_split(tsdn, pac, ehooks, ecache, expand_edata,
72500-	    size, alignment, edata, growing_retained);
72501-	malloc_mutex_unlock(tsdn, &ecache->mtx);
72502-	if (edata == NULL) {
72503-		return NULL;
72504-	}
72505-
72506-	assert(edata_state_get(edata) == extent_state_active);
72507-	if (extent_commit_zero(tsdn, ehooks, edata, *commit, zero,
72508-	    growing_retained)) {
72509-		extent_record(tsdn, pac, ehooks, ecache, edata);
72510-		return NULL;
72511-	}
72512-	if (edata_committed_get(edata)) {
72513-		/*
72514-		 * This reverses the purpose of this variable - previously it
72515-		 * was treated as an input parameter, now it turns into an
72516-		 * output parameter, reporting if the edata has actually been
72517-		 * committed.
72518-		 */
72519-		*commit = true;
72520-	}
72521-	return edata;
72522-}
72523-
72524-/*
72525- * If virtual memory is retained, create increasingly larger extents from which
72526- * to split requested extents in order to limit the total number of disjoint
72527- * virtual memory ranges retained by each shard.
72528- */
72529-static edata_t *
72530-extent_grow_retained(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
72531-    size_t size, size_t alignment, bool zero, bool *commit) {
72532-	malloc_mutex_assert_owner(tsdn, &pac->grow_mtx);
72533-
72534-	size_t alloc_size_min = size + PAGE_CEILING(alignment) - PAGE;
72535-	/* Beware size_t wrap-around. */
72536-	if (alloc_size_min < size) {
72537-		goto label_err;
72538-	}
72539-	/*
72540-	 * Find the next extent size in the series that would be large enough to
72541-	 * satisfy this request.
72542-	 */
72543-	size_t alloc_size;
72544-	pszind_t exp_grow_skip;
72545-	bool err = exp_grow_size_prepare(&pac->exp_grow, alloc_size_min,
72546-	    &alloc_size, &exp_grow_skip);
72547-	if (err) {
72548-		goto label_err;
72549-	}
72550-
72551-	edata_t *edata = edata_cache_get(tsdn, pac->edata_cache);
72552-	if (edata == NULL) {
72553-		goto label_err;
72554-	}
72555-	bool zeroed = false;
72556-	bool committed = false;
72557-
72558-	void *ptr = ehooks_alloc(tsdn, ehooks, NULL, alloc_size, PAGE, &zeroed,
72559-	    &committed);
72560-
72561-	if (ptr == NULL) {
72562-		edata_cache_put(tsdn, pac->edata_cache, edata);
72563-		goto label_err;
72564-	}
72565-
72566-	edata_init(edata, ecache_ind_get(&pac->ecache_retained), ptr,
72567-	    alloc_size, false, SC_NSIZES, extent_sn_next(pac),
72568-	    extent_state_active, zeroed, committed, EXTENT_PAI_PAC,
72569-	    EXTENT_IS_HEAD);
72570-
72571-	if (extent_register_no_gdump_add(tsdn, pac, edata)) {
72572-		edata_cache_put(tsdn, pac->edata_cache, edata);
72573-		goto label_err;
72574-	}
72575-
72576-	if (edata_committed_get(edata)) {
72577-		*commit = true;
72578-	}
72579-
72580-	edata_t *lead;
72581-	edata_t *trail;
72582-	edata_t *to_leak JEMALLOC_CC_SILENCE_INIT(NULL);
72583-	edata_t *to_salvage JEMALLOC_CC_SILENCE_INIT(NULL);
72584-
72585-	extent_split_interior_result_t result = extent_split_interior(tsdn,
72586-	    pac, ehooks, &edata, &lead, &trail, &to_leak, &to_salvage, NULL,
72587-	    size, alignment);
72588-
72589-	if (result == extent_split_interior_ok) {
72590-		if (lead != NULL) {
72591-			extent_record(tsdn, pac, ehooks, &pac->ecache_retained,
72592-			    lead);
72593-		}
72594-		if (trail != NULL) {
72595-			extent_record(tsdn, pac, ehooks, &pac->ecache_retained,
72596-			    trail);
72597-		}
72598-	} else {
72599-		/*
72600-		 * We should have allocated a sufficiently large extent; the
72601-		 * cant_alloc case should not occur.
72602-		 */
72603-		assert(result == extent_split_interior_error);
72604-		if (to_salvage != NULL) {
72605-			if (config_prof) {
72606-				extent_gdump_add(tsdn, to_salvage);
72607-			}
72608-			extent_record(tsdn, pac, ehooks, &pac->ecache_retained,
72609-			    to_salvage);
72610-		}
72611-		if (to_leak != NULL) {
72612-			extent_deregister_no_gdump_sub(tsdn, pac, to_leak);
72613-			extents_abandon_vm(tsdn, pac, ehooks,
72614-			    &pac->ecache_retained, to_leak, true);
72615-		}
72616-		goto label_err;
72617-	}
72618-
72619-	if (*commit && !edata_committed_get(edata)) {
72620-		if (extent_commit_impl(tsdn, ehooks, edata, 0,
72621-		    edata_size_get(edata), true)) {
72622-			extent_record(tsdn, pac, ehooks,
72623-			    &pac->ecache_retained, edata);
72624-			goto label_err;
72625-		}
72626-		/* A successful commit should return zeroed memory. */
72627-		if (config_debug) {
72628-			void *addr = edata_addr_get(edata);
72629-			size_t *p = (size_t *)(uintptr_t)addr;
72630-			/* Check the first page only. */
72631-			for (size_t i = 0; i < PAGE / sizeof(size_t); i++) {
72632-				assert(p[i] == 0);
72633-			}
72634-		}
72635-	}
72636-
72637-	/*
72638-	 * Increment extent_grow_next if doing so wouldn't exceed the allowed
72639-	 * range.
72640-	 */
72641-	/* All opportunities for failure are past. */
72642-	exp_grow_size_commit(&pac->exp_grow, exp_grow_skip);
72643-	malloc_mutex_unlock(tsdn, &pac->grow_mtx);
72644-
72645-	if (config_prof) {
72646-		/* Adjust gdump stats now that extent is final size. */
72647-		extent_gdump_add(tsdn, edata);
72648-	}
72649-	if (zero && !edata_zeroed_get(edata)) {
72650-		ehooks_zero(tsdn, ehooks, edata_base_get(edata),
72651-		    edata_size_get(edata));
72652-	}
72653-	return edata;
72654-label_err:
72655-	malloc_mutex_unlock(tsdn, &pac->grow_mtx);
72656-	return NULL;
72657-}
72658-
72659-static edata_t *
72660-extent_alloc_retained(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
72661-    edata_t *expand_edata, size_t size, size_t alignment, bool zero,
72662-    bool *commit, bool guarded) {
72663-	assert(size != 0);
72664-	assert(alignment != 0);
72665-
72666-	malloc_mutex_lock(tsdn, &pac->grow_mtx);
72667-
72668-	edata_t *edata = extent_recycle(tsdn, pac, ehooks,
72669-	    &pac->ecache_retained, expand_edata, size, alignment, zero, commit,
72670-	    /* growing_retained */ true, guarded);
72671-	if (edata != NULL) {
72672-		malloc_mutex_unlock(tsdn, &pac->grow_mtx);
72673-		if (config_prof) {
72674-			extent_gdump_add(tsdn, edata);
72675-		}
72676-	} else if (opt_retain && expand_edata == NULL && !guarded) {
72677-		edata = extent_grow_retained(tsdn, pac, ehooks, size,
72678-		    alignment, zero, commit);
72679-		/* extent_grow_retained() always releases pac->grow_mtx. */
72680-	} else {
72681-		malloc_mutex_unlock(tsdn, &pac->grow_mtx);
72682-	}
72683-	malloc_mutex_assert_not_owner(tsdn, &pac->grow_mtx);
72684-
72685-	return edata;
72686-}
72687-
72688-static bool
72689-extent_coalesce(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, ecache_t *ecache,
72690-    edata_t *inner, edata_t *outer, bool forward) {
72691-	extent_assert_can_coalesce(inner, outer);
72692-	eset_remove(&ecache->eset, outer);
72693-
72694-	bool err = extent_merge_impl(tsdn, pac, ehooks,
72695-	    forward ? inner : outer, forward ? outer : inner,
72696-	    /* holding_core_locks */ true);
72697-	if (err) {
72698-		extent_deactivate_check_state_locked(tsdn, pac, ecache, outer,
72699-		    extent_state_merging);
72700-	}
72701-
72702-	return err;
72703-}
72704-
72705-static edata_t *
72706-extent_try_coalesce_impl(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
72707-    ecache_t *ecache, edata_t *edata, bool *coalesced) {
72708-	assert(!edata_guarded_get(edata));
72709-	/*
72710-	 * We avoid checking / locking inactive neighbors for large size
72711-	 * classes, since they are eagerly coalesced on deallocation which can
72712-	 * cause lock contention.
72713-	 */
72714-	/*
72715-	 * Continue attempting to coalesce until failure, to protect against
72716-	 * races with other threads that are thwarted by this one.
72717-	 */
72718-	bool again;
72719-	do {
72720-		again = false;
72721-
72722-		/* Try to coalesce forward. */
72723-		edata_t *next = emap_try_acquire_edata_neighbor(tsdn, pac->emap,
72724-		    edata, EXTENT_PAI_PAC, ecache->state, /* forward */ true);
72725-		if (next != NULL) {
72726-			if (!extent_coalesce(tsdn, pac, ehooks, ecache, edata,
72727-			    next, true)) {
72728-				if (ecache->delay_coalesce) {
72729-					/* Do minimal coalescing. */
72730-					*coalesced = true;
72731-					return edata;
72732-				}
72733-				again = true;
72734-			}
72735-		}
72736-
72737-		/* Try to coalesce backward. */
72738-		edata_t *prev = emap_try_acquire_edata_neighbor(tsdn, pac->emap,
72739-		    edata, EXTENT_PAI_PAC, ecache->state, /* forward */ false);
72740-		if (prev != NULL) {
72741-			if (!extent_coalesce(tsdn, pac, ehooks, ecache, edata,
72742-			    prev, false)) {
72743-				edata = prev;
72744-				if (ecache->delay_coalesce) {
72745-					/* Do minimal coalescing. */
72746-					*coalesced = true;
72747-					return edata;
72748-				}
72749-				again = true;
72750-			}
72751-		}
72752-	} while (again);
72753-
72754-	if (ecache->delay_coalesce) {
72755-		*coalesced = false;
72756-	}
72757-	return edata;
72758-}
72759-
72760-static edata_t *
72761-extent_try_coalesce(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
72762-    ecache_t *ecache, edata_t *edata, bool *coalesced) {
72763-	return extent_try_coalesce_impl(tsdn, pac, ehooks, ecache, edata,
72764-	    coalesced);
72765-}
72766-
72767-static edata_t *
72768-extent_try_coalesce_large(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
72769-    ecache_t *ecache, edata_t *edata, bool *coalesced) {
72770-	return extent_try_coalesce_impl(tsdn, pac, ehooks, ecache, edata,
72771-	    coalesced);
72772-}
72773-
72774-/* Purge a single extent to retained / unmapped directly. */
72775-static void
72776-extent_maximally_purge(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
72777-    edata_t *edata) {
72778-	size_t extent_size = edata_size_get(edata);
72779-	extent_dalloc_wrapper(tsdn, pac, ehooks, edata);
72780-	if (config_stats) {
72781-		/* Update stats accordingly. */
72782-		LOCKEDINT_MTX_LOCK(tsdn, *pac->stats_mtx);
72783-		locked_inc_u64(tsdn,
72784-		    LOCKEDINT_MTX(*pac->stats_mtx),
72785-		    &pac->stats->decay_dirty.nmadvise, 1);
72786-		locked_inc_u64(tsdn,
72787-		    LOCKEDINT_MTX(*pac->stats_mtx),
72788-		    &pac->stats->decay_dirty.purged,
72789-		    extent_size >> LG_PAGE);
72790-		LOCKEDINT_MTX_UNLOCK(tsdn, *pac->stats_mtx);
72791-		atomic_fetch_sub_zu(&pac->stats->pac_mapped, extent_size,
72792-		    ATOMIC_RELAXED);
72793-	}
72794-}
72795-
72796-/*
72797- * Does the metadata management portions of putting an unused extent into the
72798- * given ecache_t (coalesces and inserts into the eset).
72799- */
72800-void
72801-extent_record(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, ecache_t *ecache,
72802-    edata_t *edata) {
72803-	assert((ecache->state != extent_state_dirty &&
72804-	    ecache->state != extent_state_muzzy) ||
72805-	    !edata_zeroed_get(edata));
72806-
72807-	malloc_mutex_lock(tsdn, &ecache->mtx);
72808-
72809-	emap_assert_mapped(tsdn, pac->emap, edata);
72810-
72811-	if (edata_guarded_get(edata)) {
72812-		goto label_skip_coalesce;
72813-	}
72814-	if (!ecache->delay_coalesce) {
72815-		edata = extent_try_coalesce(tsdn, pac, ehooks, ecache, edata,
72816-		    NULL);
72817-	} else if (edata_size_get(edata) >= SC_LARGE_MINCLASS) {
72818-		assert(ecache == &pac->ecache_dirty);
72819-		/* Always coalesce large extents eagerly. */
72820-		bool coalesced;
72821-		do {
72822-			assert(edata_state_get(edata) == extent_state_active);
72823-			edata = extent_try_coalesce_large(tsdn, pac, ehooks,
72824-			    ecache, edata, &coalesced);
72825-		} while (coalesced);
72826-		if (edata_size_get(edata) >=
72827-		    atomic_load_zu(&pac->oversize_threshold, ATOMIC_RELAXED)
72828-		    && extent_may_force_decay(pac)) {
72829-			/* Shortcut to purge the oversize extent eagerly. */
72830-			malloc_mutex_unlock(tsdn, &ecache->mtx);
72831-			extent_maximally_purge(tsdn, pac, ehooks, edata);
72832-			return;
72833-		}
72834-	}
72835-label_skip_coalesce:
72836-	extent_deactivate_locked(tsdn, pac, ecache, edata);
72837-
72838-	malloc_mutex_unlock(tsdn, &ecache->mtx);
72839-}
72840-
72841-void
72842-extent_dalloc_gap(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
72843-    edata_t *edata) {
72844-	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
72845-	    WITNESS_RANK_CORE, 0);
72846-
72847-	if (extent_register(tsdn, pac, edata)) {
72848-		edata_cache_put(tsdn, pac->edata_cache, edata);
72849-		return;
72850-	}
72851-	extent_dalloc_wrapper(tsdn, pac, ehooks, edata);
72852-}
72853-
72854-static bool
72855-extent_dalloc_wrapper_try(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
72856-    edata_t *edata) {
72857-	bool err;
72858-
72859-	assert(edata_base_get(edata) != NULL);
72860-	assert(edata_size_get(edata) != 0);
72861-	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
72862-	    WITNESS_RANK_CORE, 0);
72863-
72864-	edata_addr_set(edata, edata_base_get(edata));
72865-
72866-	/* Try to deallocate. */
72867-	err = ehooks_dalloc(tsdn, ehooks, edata_base_get(edata),
72868-	    edata_size_get(edata), edata_committed_get(edata));
72869-
72870-	if (!err) {
72871-		edata_cache_put(tsdn, pac->edata_cache, edata);
72872-	}
72873-
72874-	return err;
72875-}
72876-
72877-edata_t *
72878-extent_alloc_wrapper(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
72879-    void *new_addr, size_t size, size_t alignment, bool zero, bool *commit,
72880-    bool growing_retained) {
72881-	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
72882-	    WITNESS_RANK_CORE, growing_retained ? 1 : 0);
72883-
72884-	edata_t *edata = edata_cache_get(tsdn, pac->edata_cache);
72885-	if (edata == NULL) {
72886-		return NULL;
72887-	}
72888-	size_t palignment = ALIGNMENT_CEILING(alignment, PAGE);
72889-	void *addr = ehooks_alloc(tsdn, ehooks, new_addr, size, palignment,
72890-	    &zero, commit);
72891-	if (addr == NULL) {
72892-		edata_cache_put(tsdn, pac->edata_cache, edata);
72893-		return NULL;
72894-	}
72895-	edata_init(edata, ecache_ind_get(&pac->ecache_dirty), addr,
72896-	    size, /* slab */ false, SC_NSIZES, extent_sn_next(pac),
72897-	    extent_state_active, zero, *commit, EXTENT_PAI_PAC,
72898-	    opt_retain ? EXTENT_IS_HEAD : EXTENT_NOT_HEAD);
72899-	/*
72900-	 * Retained memory is not counted towards gdump.  Only if an extent is
72901-	 * allocated as a separate mapping, i.e. growing_retained is false, then
72902-	 * gdump should be updated.
72903-	 */
72904-	bool gdump_add = !growing_retained;
72905-	if (extent_register_impl(tsdn, pac, edata, gdump_add)) {
72906-		edata_cache_put(tsdn, pac->edata_cache, edata);
72907-		return NULL;
72908-	}
72909-
72910-	return edata;
72911-}
72912-
72913-void
72914-extent_dalloc_wrapper(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
72915-    edata_t *edata) {
72916-	assert(edata_pai_get(edata) == EXTENT_PAI_PAC);
72917-	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
72918-	    WITNESS_RANK_CORE, 0);
72919-
72920-	/* Avoid calling the default extent_dalloc unless have to. */
72921-	if (!ehooks_dalloc_will_fail(ehooks)) {
72922-		/* Remove guard pages for dalloc / unmap. */
72923-		if (edata_guarded_get(edata)) {
72924-			assert(ehooks_are_default(ehooks));
72925-			san_unguard_pages_two_sided(tsdn, ehooks, edata,
72926-			    pac->emap);
72927-		}
72928-		/*
72929-		 * Deregister first to avoid a race with other allocating
72930-		 * threads, and reregister if deallocation fails.
72931-		 */
72932-		extent_deregister(tsdn, pac, edata);
72933-		if (!extent_dalloc_wrapper_try(tsdn, pac, ehooks, edata)) {
72934-			return;
72935-		}
72936-		extent_reregister(tsdn, pac, edata);
72937-	}
72938-
72939-	/* Try to decommit; purge if that fails. */
72940-	bool zeroed;
72941-	if (!edata_committed_get(edata)) {
72942-		zeroed = true;
72943-	} else if (!extent_decommit_wrapper(tsdn, ehooks, edata, 0,
72944-	    edata_size_get(edata))) {
72945-		zeroed = true;
72946-	} else if (!ehooks_purge_forced(tsdn, ehooks, edata_base_get(edata),
72947-	    edata_size_get(edata), 0, edata_size_get(edata))) {
72948-		zeroed = true;
72949-	} else if (edata_state_get(edata) == extent_state_muzzy ||
72950-	    !ehooks_purge_lazy(tsdn, ehooks, edata_base_get(edata),
72951-	    edata_size_get(edata), 0, edata_size_get(edata))) {
72952-		zeroed = false;
72953-	} else {
72954-		zeroed = false;
72955-	}
72956-	edata_zeroed_set(edata, zeroed);
72957-
72958-	if (config_prof) {
72959-		extent_gdump_sub(tsdn, edata);
72960-	}
72961-
72962-	extent_record(tsdn, pac, ehooks, &pac->ecache_retained, edata);
72963-}
72964-
72965-void
72966-extent_destroy_wrapper(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
72967-    edata_t *edata) {
72968-	assert(edata_base_get(edata) != NULL);
72969-	assert(edata_size_get(edata) != 0);
72970-	extent_state_t state = edata_state_get(edata);
72971-	assert(state == extent_state_retained || state == extent_state_active);
72972-	assert(emap_edata_is_acquired(tsdn, pac->emap, edata));
72973-	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
72974-	    WITNESS_RANK_CORE, 0);
72975-
72976-	if (edata_guarded_get(edata)) {
72977-		assert(opt_retain);
72978-		san_unguard_pages_pre_destroy(tsdn, ehooks, edata, pac->emap);
72979-	}
72980-	edata_addr_set(edata, edata_base_get(edata));
72981-
72982-	/* Try to destroy; silently fail otherwise. */
72983-	ehooks_destroy(tsdn, ehooks, edata_base_get(edata),
72984-	    edata_size_get(edata), edata_committed_get(edata));
72985-
72986-	edata_cache_put(tsdn, pac->edata_cache, edata);
72987-}
72988-
72989-static bool
72990-extent_commit_impl(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
72991-    size_t offset, size_t length, bool growing_retained) {
72992-	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
72993-	    WITNESS_RANK_CORE, growing_retained ? 1 : 0);
72994-	bool err = ehooks_commit(tsdn, ehooks, edata_base_get(edata),
72995-	    edata_size_get(edata), offset, length);
72996-	edata_committed_set(edata, edata_committed_get(edata) || !err);
72997-	return err;
72998-}
72999-
73000-bool
73001-extent_commit_wrapper(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
73002-    size_t offset, size_t length) {
73003-	return extent_commit_impl(tsdn, ehooks, edata, offset, length,
73004-	    /* growing_retained */ false);
73005-}
73006-
73007-bool
73008-extent_decommit_wrapper(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
73009-    size_t offset, size_t length) {
73010-	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
73011-	    WITNESS_RANK_CORE, 0);
73012-	bool err = ehooks_decommit(tsdn, ehooks, edata_base_get(edata),
73013-	    edata_size_get(edata), offset, length);
73014-	edata_committed_set(edata, edata_committed_get(edata) && err);
73015-	return err;
73016-}
73017-
73018-static bool
73019-extent_purge_lazy_impl(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
73020-    size_t offset, size_t length, bool growing_retained) {
73021-	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
73022-	    WITNESS_RANK_CORE, growing_retained ? 1 : 0);
73023-	bool err = ehooks_purge_lazy(tsdn, ehooks, edata_base_get(edata),
73024-	    edata_size_get(edata), offset, length);
73025-	return err;
73026-}
73027-
73028-bool
73029-extent_purge_lazy_wrapper(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
73030-    size_t offset, size_t length) {
73031-	return extent_purge_lazy_impl(tsdn, ehooks, edata, offset,
73032-	    length, false);
73033-}
73034-
73035-static bool
73036-extent_purge_forced_impl(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
73037-    size_t offset, size_t length, bool growing_retained) {
73038-	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
73039-	    WITNESS_RANK_CORE, growing_retained ? 1 : 0);
73040-	bool err = ehooks_purge_forced(tsdn, ehooks, edata_base_get(edata),
73041-	    edata_size_get(edata), offset, length);
73042-	return err;
73043-}
73044-
73045-bool
73046-extent_purge_forced_wrapper(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
73047-    size_t offset, size_t length) {
73048-	return extent_purge_forced_impl(tsdn, ehooks, edata, offset, length,
73049-	    false);
73050-}
73051-
73052-/*
73053- * Accepts the extent to split, and the characteristics of each side of the
73054- * split.  The 'a' parameters go with the 'lead' of the resulting pair of
73055- * extents (the lower addressed portion of the split), and the 'b' parameters go
73056- * with the trail (the higher addressed portion).  This makes 'extent' the lead,
73057- * and returns the trail (except in case of error).
73058- */
73059-static edata_t *
73060-extent_split_impl(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
73061-    edata_t *edata, size_t size_a, size_t size_b, bool holding_core_locks) {
73062-	assert(edata_size_get(edata) == size_a + size_b);
73063-	/* Only the shrink path may split w/o holding core locks. */
73064-	if (holding_core_locks) {
73065-		witness_assert_positive_depth_to_rank(
73066-		    tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE);
73067-	} else {
73068-		witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
73069-		    WITNESS_RANK_CORE, 0);
73070-	}
73071-
73072-	if (ehooks_split_will_fail(ehooks)) {
73073-		return NULL;
73074-	}
73075-
73076-	edata_t *trail = edata_cache_get(tsdn, pac->edata_cache);
73077-	if (trail == NULL) {
73078-		goto label_error_a;
73079-	}
73080-
73081-	edata_init(trail, edata_arena_ind_get(edata),
73082-	    (void *)((uintptr_t)edata_base_get(edata) + size_a), size_b,
73083-	    /* slab */ false, SC_NSIZES, edata_sn_get(edata),
73084-	    edata_state_get(edata), edata_zeroed_get(edata),
73085-	    edata_committed_get(edata), EXTENT_PAI_PAC, EXTENT_NOT_HEAD);
73086-	emap_prepare_t prepare;
73087-	bool err = emap_split_prepare(tsdn, pac->emap, &prepare, edata,
73088-	    size_a, trail, size_b);
73089-	if (err) {
73090-		goto label_error_b;
73091-	}
73092-
73093-	/*
73094-	 * No need to acquire trail or edata, because: 1) trail was new (just
73095-	 * allocated); and 2) edata is either an active allocation (the shrink
73096-	 * path), or in an acquired state (extracted from the ecache on the
73097-	 * extent_recycle_split path).
73098-	 */
73099-	assert(emap_edata_is_acquired(tsdn, pac->emap, edata));
73100-	assert(emap_edata_is_acquired(tsdn, pac->emap, trail));
73101-
73102-	err = ehooks_split(tsdn, ehooks, edata_base_get(edata), size_a + size_b,
73103-	    size_a, size_b, edata_committed_get(edata));
73104-
73105-	if (err) {
73106-		goto label_error_b;
73107-	}
73108-
73109-	edata_size_set(edata, size_a);
73110-	emap_split_commit(tsdn, pac->emap, &prepare, edata, size_a, trail,
73111-	    size_b);
73112-
73113-	return trail;
73114-label_error_b:
73115-	edata_cache_put(tsdn, pac->edata_cache, trail);
73116-label_error_a:
73117-	return NULL;
73118-}
73119-
73120-edata_t *
73121-extent_split_wrapper(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, edata_t *edata,
73122-    size_t size_a, size_t size_b, bool holding_core_locks) {
73123-	return extent_split_impl(tsdn, pac, ehooks, edata, size_a, size_b,
73124-	    holding_core_locks);
73125-}
73126-
73127-static bool
73128-extent_merge_impl(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, edata_t *a,
73129-    edata_t *b, bool holding_core_locks) {
73130-	/* Only the expanding path may merge w/o holding ecache locks. */
73131-	if (holding_core_locks) {
73132-		witness_assert_positive_depth_to_rank(
73133-		    tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE);
73134-	} else {
73135-		witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
73136-		    WITNESS_RANK_CORE, 0);
73137-	}
73138-
73139-	assert(edata_base_get(a) < edata_base_get(b));
73140-	assert(edata_arena_ind_get(a) == edata_arena_ind_get(b));
73141-	assert(edata_arena_ind_get(a) == ehooks_ind_get(ehooks));
73142-	emap_assert_mapped(tsdn, pac->emap, a);
73143-	emap_assert_mapped(tsdn, pac->emap, b);
73144-
73145-	bool err = ehooks_merge(tsdn, ehooks, edata_base_get(a),
73146-	    edata_size_get(a), edata_base_get(b), edata_size_get(b),
73147-	    edata_committed_get(a));
73148-
73149-	if (err) {
73150-		return true;
73151-	}
73152-
73153-	/*
73154-	 * The rtree writes must happen while all the relevant elements are
73155-	 * owned, so the following code uses decomposed helper functions rather
73156-	 * than extent_{,de}register() to do things in the right order.
73157-	 */
73158-	emap_prepare_t prepare;
73159-	emap_merge_prepare(tsdn, pac->emap, &prepare, a, b);
73160-
73161-	assert(edata_state_get(a) == extent_state_active ||
73162-	    edata_state_get(a) == extent_state_merging);
73163-	edata_state_set(a, extent_state_active);
73164-	edata_size_set(a, edata_size_get(a) + edata_size_get(b));
73165-	edata_sn_set(a, (edata_sn_get(a) < edata_sn_get(b)) ?
73166-	    edata_sn_get(a) : edata_sn_get(b));
73167-	edata_zeroed_set(a, edata_zeroed_get(a) && edata_zeroed_get(b));
73168-
73169-	emap_merge_commit(tsdn, pac->emap, &prepare, a, b);
73170-
73171-	edata_cache_put(tsdn, pac->edata_cache, b);
73172-
73173-	return false;
73174-}
73175-
73176-bool
73177-extent_merge_wrapper(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
73178-    edata_t *a, edata_t *b) {
73179-	return extent_merge_impl(tsdn, pac, ehooks, a, b,
73180-	    /* holding_core_locks */ false);
73181-}
73182-
73183-bool
73184-extent_commit_zero(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
73185-    bool commit, bool zero, bool growing_retained) {
73186-	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
73187-	    WITNESS_RANK_CORE, growing_retained ? 1 : 0);
73188-
73189-	if (commit && !edata_committed_get(edata)) {
73190-		if (extent_commit_impl(tsdn, ehooks, edata, 0,
73191-		    edata_size_get(edata), growing_retained)) {
73192-			return true;
73193-		}
73194-	}
73195-	if (zero && !edata_zeroed_get(edata)) {
73196-		void *addr = edata_base_get(edata);
73197-		size_t size = edata_size_get(edata);
73198-		ehooks_zero(tsdn, ehooks, addr, size);
73199-	}
73200-	return false;
73201-}
73202-
73203-bool
73204-extent_boot(void) {
73205-	assert(sizeof(slab_data_t) >= sizeof(e_prof_info_t));
73206-
73207-	if (have_dss) {
73208-		extent_dss_boot();
73209-	}
73210-
73211-	return false;
73212-}
73213diff --git a/jemalloc/src/extent_dss.c b/jemalloc/src/extent_dss.c
73214deleted file mode 100644
73215index 9a35bac..0000000
73216--- a/jemalloc/src/extent_dss.c
73217+++ /dev/null
73218@@ -1,277 +0,0 @@
73219-#include "jemalloc/internal/jemalloc_preamble.h"
73220-#include "jemalloc/internal/jemalloc_internal_includes.h"
73221-
73222-#include "jemalloc/internal/assert.h"
73223-#include "jemalloc/internal/extent_dss.h"
73224-#include "jemalloc/internal/spin.h"
73225-
73226-/******************************************************************************/
73227-/* Data. */
73228-
73229-const char	*opt_dss = DSS_DEFAULT;
73230-
73231-const char	*dss_prec_names[] = {
73232-	"disabled",
73233-	"primary",
73234-	"secondary",
73235-	"N/A"
73236-};
73237-
73238-/*
73239- * Current dss precedence default, used when creating new arenas.  NB: This is
73240- * stored as unsigned rather than dss_prec_t because in principle there's no
73241- * guarantee that sizeof(dss_prec_t) is the same as sizeof(unsigned), and we use
73242- * atomic operations to synchronize the setting.
73243- */
73244-static atomic_u_t	dss_prec_default = ATOMIC_INIT(
73245-    (unsigned)DSS_PREC_DEFAULT);
73246-
73247-/* Base address of the DSS. */
73248-static void		*dss_base;
73249-/* Atomic boolean indicating whether a thread is currently extending DSS. */
73250-static atomic_b_t	dss_extending;
73251-/* Atomic boolean indicating whether the DSS is exhausted. */
73252-static atomic_b_t	dss_exhausted;
73253-/* Atomic current upper limit on DSS addresses. */
73254-static atomic_p_t	dss_max;
73255-
73256-/******************************************************************************/
73257-
73258-static void *
73259-extent_dss_sbrk(intptr_t increment) {
73260-#ifdef JEMALLOC_DSS
73261-	return sbrk(increment);
73262-#else
73263-	not_implemented();
73264-	return NULL;
73265-#endif
73266-}
73267-
73268-dss_prec_t
73269-extent_dss_prec_get(void) {
73270-	dss_prec_t ret;
73271-
73272-	if (!have_dss) {
73273-		return dss_prec_disabled;
73274-	}
73275-	ret = (dss_prec_t)atomic_load_u(&dss_prec_default, ATOMIC_ACQUIRE);
73276-	return ret;
73277-}
73278-
73279-bool
73280-extent_dss_prec_set(dss_prec_t dss_prec) {
73281-	if (!have_dss) {
73282-		return (dss_prec != dss_prec_disabled);
73283-	}
73284-	atomic_store_u(&dss_prec_default, (unsigned)dss_prec, ATOMIC_RELEASE);
73285-	return false;
73286-}
73287-
73288-static void
73289-extent_dss_extending_start(void) {
73290-	spin_t spinner = SPIN_INITIALIZER;
73291-	while (true) {
73292-		bool expected = false;
73293-		if (atomic_compare_exchange_weak_b(&dss_extending, &expected,
73294-		    true, ATOMIC_ACQ_REL, ATOMIC_RELAXED)) {
73295-			break;
73296-		}
73297-		spin_adaptive(&spinner);
73298-	}
73299-}
73300-
73301-static void
73302-extent_dss_extending_finish(void) {
73303-	assert(atomic_load_b(&dss_extending, ATOMIC_RELAXED));
73304-
73305-	atomic_store_b(&dss_extending, false, ATOMIC_RELEASE);
73306-}
73307-
73308-static void *
73309-extent_dss_max_update(void *new_addr) {
73310-	/*
73311-	 * Get the current end of the DSS as max_cur and assure that dss_max is
73312-	 * up to date.
73313-	 */
73314-	void *max_cur = extent_dss_sbrk(0);
73315-	if (max_cur == (void *)-1) {
73316-		return NULL;
73317-	}
73318-	atomic_store_p(&dss_max, max_cur, ATOMIC_RELEASE);
73319-	/* Fixed new_addr can only be supported if it is at the edge of DSS. */
73320-	if (new_addr != NULL && max_cur != new_addr) {
73321-		return NULL;
73322-	}
73323-	return max_cur;
73324-}
73325-
73326-void *
73327-extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
73328-    size_t alignment, bool *zero, bool *commit) {
73329-	edata_t *gap;
73330-
73331-	cassert(have_dss);
73332-	assert(size > 0);
73333-	assert(alignment == ALIGNMENT_CEILING(alignment, PAGE));
73334-
73335-	/*
73336-	 * sbrk() uses a signed increment argument, so take care not to
73337-	 * interpret a large allocation request as a negative increment.
73338-	 */
73339-	if ((intptr_t)size < 0) {
73340-		return NULL;
73341-	}
73342-
73343-	gap = edata_cache_get(tsdn, &arena->pa_shard.edata_cache);
73344-	if (gap == NULL) {
73345-		return NULL;
73346-	}
73347-
73348-	extent_dss_extending_start();
73349-	if (!atomic_load_b(&dss_exhausted, ATOMIC_ACQUIRE)) {
73350-		/*
73351-		 * The loop is necessary to recover from races with other
73352-		 * threads that are using the DSS for something other than
73353-		 * malloc.
73354-		 */
73355-		while (true) {
73356-			void *max_cur = extent_dss_max_update(new_addr);
73357-			if (max_cur == NULL) {
73358-				goto label_oom;
73359-			}
73360-
73361-			bool head_state = opt_retain ? EXTENT_IS_HEAD :
73362-			    EXTENT_NOT_HEAD;
73363-			/*
73364-			 * Compute how much page-aligned gap space (if any) is
73365-			 * necessary to satisfy alignment.  This space can be
73366-			 * recycled for later use.
73367-			 */
73368-			void *gap_addr_page = (void *)(PAGE_CEILING(
73369-			    (uintptr_t)max_cur));
73370-			void *ret = (void *)ALIGNMENT_CEILING(
73371-			    (uintptr_t)gap_addr_page, alignment);
73372-			size_t gap_size_page = (uintptr_t)ret -
73373-			    (uintptr_t)gap_addr_page;
73374-			if (gap_size_page != 0) {
73375-				edata_init(gap, arena_ind_get(arena),
73376-				    gap_addr_page, gap_size_page, false,
73377-				    SC_NSIZES, extent_sn_next(
73378-					&arena->pa_shard.pac),
73379-				    extent_state_active, false, true,
73380-				    EXTENT_PAI_PAC, head_state);
73381-			}
73382-			/*
73383-			 * Compute the address just past the end of the desired
73384-			 * allocation space.
73385-			 */
73386-			void *dss_next = (void *)((uintptr_t)ret + size);
73387-			if ((uintptr_t)ret < (uintptr_t)max_cur ||
73388-			    (uintptr_t)dss_next < (uintptr_t)max_cur) {
73389-				goto label_oom; /* Wrap-around. */
73390-			}
73391-			/* Compute the increment, including subpage bytes. */
73392-			void *gap_addr_subpage = max_cur;
73393-			size_t gap_size_subpage = (uintptr_t)ret -
73394-			    (uintptr_t)gap_addr_subpage;
73395-			intptr_t incr = gap_size_subpage + size;
73396-
73397-			assert((uintptr_t)max_cur + incr == (uintptr_t)ret +
73398-			    size);
73399-
73400-			/* Try to allocate. */
73401-			void *dss_prev = extent_dss_sbrk(incr);
73402-			if (dss_prev == max_cur) {
73403-				/* Success. */
73404-				atomic_store_p(&dss_max, dss_next,
73405-				    ATOMIC_RELEASE);
73406-				extent_dss_extending_finish();
73407-
73408-				if (gap_size_page != 0) {
73409-					ehooks_t *ehooks = arena_get_ehooks(
73410-					    arena);
73411-					extent_dalloc_gap(tsdn,
73412-					    &arena->pa_shard.pac, ehooks, gap);
73413-				} else {
73414-					edata_cache_put(tsdn,
73415-					    &arena->pa_shard.edata_cache, gap);
73416-				}
73417-				if (!*commit) {
73418-					*commit = pages_decommit(ret, size);
73419-				}
73420-				if (*zero && *commit) {
73421-					edata_t edata = {0};
73422-					ehooks_t *ehooks = arena_get_ehooks(
73423-					    arena);
73424-
73425-					edata_init(&edata,
73426-					    arena_ind_get(arena), ret, size,
73427-					    size, false, SC_NSIZES,
73428-					    extent_state_active, false, true,
73429-					    EXTENT_PAI_PAC, head_state);
73430-					if (extent_purge_forced_wrapper(tsdn,
73431-					    ehooks, &edata, 0, size)) {
73432-						memset(ret, 0, size);
73433-					}
73434-				}
73435-				return ret;
73436-			}
73437-			/*
73438-			 * Failure, whether due to OOM or a race with a raw
73439-			 * sbrk() call from outside the allocator.
73440-			 */
73441-			if (dss_prev == (void *)-1) {
73442-				/* OOM. */
73443-				atomic_store_b(&dss_exhausted, true,
73444-				    ATOMIC_RELEASE);
73445-				goto label_oom;
73446-			}
73447-		}
73448-	}
73449-label_oom:
73450-	extent_dss_extending_finish();
73451-	edata_cache_put(tsdn, &arena->pa_shard.edata_cache, gap);
73452-	return NULL;
73453-}
73454-
73455-static bool
73456-extent_in_dss_helper(void *addr, void *max) {
73457-	return ((uintptr_t)addr >= (uintptr_t)dss_base && (uintptr_t)addr <
73458-	    (uintptr_t)max);
73459-}
73460-
73461-bool
73462-extent_in_dss(void *addr) {
73463-	cassert(have_dss);
73464-
73465-	return extent_in_dss_helper(addr, atomic_load_p(&dss_max,
73466-	    ATOMIC_ACQUIRE));
73467-}
73468-
73469-bool
73470-extent_dss_mergeable(void *addr_a, void *addr_b) {
73471-	void *max;
73472-
73473-	cassert(have_dss);
73474-
73475-	if ((uintptr_t)addr_a < (uintptr_t)dss_base && (uintptr_t)addr_b <
73476-	    (uintptr_t)dss_base) {
73477-		return true;
73478-	}
73479-
73480-	max = atomic_load_p(&dss_max, ATOMIC_ACQUIRE);
73481-	return (extent_in_dss_helper(addr_a, max) ==
73482-	    extent_in_dss_helper(addr_b, max));
73483-}
73484-
73485-void
73486-extent_dss_boot(void) {
73487-	cassert(have_dss);
73488-
73489-	dss_base = extent_dss_sbrk(0);
73490-	atomic_store_b(&dss_extending, false, ATOMIC_RELAXED);
73491-	atomic_store_b(&dss_exhausted, dss_base == (void *)-1, ATOMIC_RELAXED);
73492-	atomic_store_p(&dss_max, dss_base, ATOMIC_RELAXED);
73493-}
73494-
73495-/******************************************************************************/
73496diff --git a/jemalloc/src/extent_mmap.c b/jemalloc/src/extent_mmap.c
73497deleted file mode 100644
73498index 5f0ee2d..0000000
73499--- a/jemalloc/src/extent_mmap.c
73500+++ /dev/null
73501@@ -1,41 +0,0 @@
73502-#include "jemalloc/internal/jemalloc_preamble.h"
73503-#include "jemalloc/internal/jemalloc_internal_includes.h"
73504-
73505-#include "jemalloc/internal/assert.h"
73506-#include "jemalloc/internal/extent_mmap.h"
73507-
73508-/******************************************************************************/
73509-/* Data. */
73510-
73511-bool	opt_retain =
73512-#ifdef JEMALLOC_RETAIN
73513-    true
73514-#else
73515-    false
73516-#endif
73517-    ;
73518-
73519-/******************************************************************************/
73520-
73521-void *
73522-extent_alloc_mmap(void *new_addr, size_t size, size_t alignment, bool *zero,
73523-    bool *commit) {
73524-	assert(alignment == ALIGNMENT_CEILING(alignment, PAGE));
73525-	void *ret = pages_map(new_addr, size, alignment, commit);
73526-	if (ret == NULL) {
73527-		return NULL;
73528-	}
73529-	assert(ret != NULL);
73530-	if (*commit) {
73531-		*zero = true;
73532-	}
73533-	return ret;
73534-}
73535-
73536-bool
73537-extent_dalloc_mmap(void *addr, size_t size) {
73538-	if (!opt_retain) {
73539-		pages_unmap(addr, size);
73540-	}
73541-	return opt_retain;
73542-}
73543diff --git a/jemalloc/src/fxp.c b/jemalloc/src/fxp.c
73544deleted file mode 100644
73545index 96585f0..0000000
73546--- a/jemalloc/src/fxp.c
73547+++ /dev/null
73548@@ -1,124 +0,0 @@
73549-#include "jemalloc/internal/jemalloc_preamble.h"
73550-#include "jemalloc/internal/jemalloc_internal_includes.h"
73551-
73552-#include "jemalloc/internal/fxp.h"
73553-
73554-static bool
73555-fxp_isdigit(char c) {
73556-	return '0' <= c && c <= '9';
73557-}
73558-
73559-bool
73560-fxp_parse(fxp_t *result, const char *str, char **end) {
73561-	/*
73562-	 * Using malloc_strtoumax in this method isn't as handy as you might
73563-	 * expect (I tried). In the fractional part, significant leading zeros
73564-	 * mean that you still need to do your own parsing, now with trickier
73565-	 * math.  In the integer part, the casting (uintmax_t to uint32_t)
73566-	 * forces more reasoning about bounds than just checking for overflow as
73567-	 * we parse.
73568-	 */
73569-	uint32_t integer_part = 0;
73570-
73571-	const char *cur = str;
73572-
73573-	/* The string must start with a digit or a decimal point. */
73574-	if (*cur != '.' && !fxp_isdigit(*cur)) {
73575-		return true;
73576-	}
73577-
73578-	while ('0' <= *cur && *cur <= '9') {
73579-		integer_part *= 10;
73580-		integer_part += *cur - '0';
73581-		if (integer_part >= (1U << 16)) {
73582-			return true;
73583-		}
73584-		cur++;
73585-	}
73586-
73587-	/*
73588-	 * We've parsed all digits at the beginning of the string, without
73589-	 * overflow.  Either we're done, or there's a fractional part.
73590-	 */
73591-	if (*cur != '.') {
73592-		*result = (integer_part << 16);
73593-		if (end != NULL) {
73594-			*end = (char *)cur;
73595-		}
73596-		return false;
73597-	}
73598-
73599-	/* There's a fractional part. */
73600-	cur++;
73601-	if (!fxp_isdigit(*cur)) {
73602-		/* Shouldn't end on the decimal point. */
73603-		return true;
73604-	}
73605-
73606-	/*
73607-	 * We use a lot of precision for the fractional part, even though we'll
73608-	 * discard most of it; this lets us get exact values for the important
73609-	 * special case where the denominator is a small power of 2 (for
73610-	 * instance, 1/512 == 0.001953125 is exactly representable even with
73611-	 * only 16 bits of fractional precision).  We need to left-shift by 16
73612-	 * before dividing so we pick the number of digits to be
73613-	 * floor(log(2**48)) = 14.
73614-	 */
73615-	uint64_t fractional_part = 0;
73616-	uint64_t frac_div = 1;
73617-	for (int i = 0; i < FXP_FRACTIONAL_PART_DIGITS; i++) {
73618-		fractional_part *= 10;
73619-		frac_div *= 10;
73620-		if (fxp_isdigit(*cur)) {
73621-			fractional_part += *cur - '0';
73622-			cur++;
73623-		}
73624-	}
73625-	/*
73626-	 * We only parse the first maxdigits characters, but we can still ignore
73627-	 * any digits after that.
73628-	 */
73629-	while (fxp_isdigit(*cur)) {
73630-		cur++;
73631-	}
73632-
73633-	assert(fractional_part < frac_div);
73634-	uint32_t fractional_repr = (uint32_t)(
73635-	    (fractional_part << 16) / frac_div);
73636-
73637-	/* Success! */
73638-	*result = (integer_part << 16) + fractional_repr;
73639-	if (end != NULL) {
73640-		*end = (char *)cur;
73641-	}
73642-	return false;
73643-}
73644-
73645-void
73646-fxp_print(fxp_t a, char buf[FXP_BUF_SIZE]) {
73647-	uint32_t integer_part = fxp_round_down(a);
73648-	uint32_t fractional_part = (a & ((1U << 16) - 1));
73649-
73650-	int leading_fraction_zeros = 0;
73651-	uint64_t fraction_digits = fractional_part;
73652-	for (int i = 0; i < FXP_FRACTIONAL_PART_DIGITS; i++) {
73653-		if (fraction_digits < (1U << 16)
73654-		    && fraction_digits * 10 >= (1U << 16)) {
73655-			leading_fraction_zeros = i;
73656-		}
73657-		fraction_digits *= 10;
73658-	}
73659-	fraction_digits >>= 16;
73660-	while (fraction_digits > 0 && fraction_digits % 10 == 0) {
73661-		fraction_digits /= 10;
73662-	}
73663-
73664-	size_t printed = malloc_snprintf(buf, FXP_BUF_SIZE, "%"FMTu32".",
73665-	    integer_part);
73666-	for (int i = 0; i < leading_fraction_zeros; i++) {
73667-		buf[printed] = '0';
73668-		printed++;
73669-	}
73670-	malloc_snprintf(&buf[printed], FXP_BUF_SIZE - printed, "%"FMTu64,
73671-	    fraction_digits);
73672-}
73673diff --git a/jemalloc/src/hook.c b/jemalloc/src/hook.c
73674deleted file mode 100644
73675index 493edbb..0000000
73676--- a/jemalloc/src/hook.c
73677+++ /dev/null
73678@@ -1,195 +0,0 @@
73679-#include "jemalloc/internal/jemalloc_preamble.h"
73680-
73681-#include "jemalloc/internal/hook.h"
73682-
73683-#include "jemalloc/internal/atomic.h"
73684-#include "jemalloc/internal/mutex.h"
73685-#include "jemalloc/internal/seq.h"
73686-
73687-typedef struct hooks_internal_s hooks_internal_t;
73688-struct hooks_internal_s {
73689-	hooks_t hooks;
73690-	bool in_use;
73691-};
73692-
73693-seq_define(hooks_internal_t, hooks)
73694-
73695-static atomic_u_t nhooks = ATOMIC_INIT(0);
73696-static seq_hooks_t hooks[HOOK_MAX];
73697-static malloc_mutex_t hooks_mu;
73698-
73699-bool
73700-hook_boot() {
73701-	return malloc_mutex_init(&hooks_mu, "hooks", WITNESS_RANK_HOOK,
73702-	    malloc_mutex_rank_exclusive);
73703-}
73704-
73705-static void *
73706-hook_install_locked(hooks_t *to_install) {
73707-	hooks_internal_t hooks_internal;
73708-	for (int i = 0; i < HOOK_MAX; i++) {
73709-		bool success = seq_try_load_hooks(&hooks_internal, &hooks[i]);
73710-		/* We hold mu; no concurrent access. */
73711-		assert(success);
73712-		if (!hooks_internal.in_use) {
73713-			hooks_internal.hooks = *to_install;
73714-			hooks_internal.in_use = true;
73715-			seq_store_hooks(&hooks[i], &hooks_internal);
73716-			atomic_store_u(&nhooks,
73717-			    atomic_load_u(&nhooks, ATOMIC_RELAXED) + 1,
73718-			    ATOMIC_RELAXED);
73719-			return &hooks[i];
73720-		}
73721-	}
73722-	return NULL;
73723-}
73724-
73725-void *
73726-hook_install(tsdn_t *tsdn, hooks_t *to_install) {
73727-	malloc_mutex_lock(tsdn, &hooks_mu);
73728-	void *ret = hook_install_locked(to_install);
73729-	if (ret != NULL) {
73730-		tsd_global_slow_inc(tsdn);
73731-	}
73732-	malloc_mutex_unlock(tsdn, &hooks_mu);
73733-	return ret;
73734-}
73735-
73736-static void
73737-hook_remove_locked(seq_hooks_t *to_remove) {
73738-	hooks_internal_t hooks_internal;
73739-	bool success = seq_try_load_hooks(&hooks_internal, to_remove);
73740-	/* We hold mu; no concurrent access. */
73741-	assert(success);
73742-	/* Should only remove hooks that were added. */
73743-	assert(hooks_internal.in_use);
73744-	hooks_internal.in_use = false;
73745-	seq_store_hooks(to_remove, &hooks_internal);
73746-	atomic_store_u(&nhooks, atomic_load_u(&nhooks, ATOMIC_RELAXED) - 1,
73747-	    ATOMIC_RELAXED);
73748-}
73749-
73750-void
73751-hook_remove(tsdn_t *tsdn, void *opaque) {
73752-	if (config_debug) {
73753-		char *hooks_begin = (char *)&hooks[0];
73754-		char *hooks_end = (char *)&hooks[HOOK_MAX];
73755-		char *hook = (char *)opaque;
73756-		assert(hooks_begin <= hook && hook < hooks_end
73757-		    && (hook - hooks_begin) % sizeof(seq_hooks_t) == 0);
73758-	}
73759-	malloc_mutex_lock(tsdn, &hooks_mu);
73760-	hook_remove_locked((seq_hooks_t *)opaque);
73761-	tsd_global_slow_dec(tsdn);
73762-	malloc_mutex_unlock(tsdn, &hooks_mu);
73763-}
73764-
73765-#define FOR_EACH_HOOK_BEGIN(hooks_internal_ptr)				\
73766-for (int for_each_hook_counter = 0;					\
73767-    for_each_hook_counter < HOOK_MAX;					\
73768-    for_each_hook_counter++) {						\
73769-	bool for_each_hook_success = seq_try_load_hooks(		\
73770-	    (hooks_internal_ptr), &hooks[for_each_hook_counter]);	\
73771-	if (!for_each_hook_success) {					\
73772-		continue;						\
73773-	}								\
73774-	if (!(hooks_internal_ptr)->in_use) {				\
73775-		continue;						\
73776-	}
73777-#define FOR_EACH_HOOK_END						\
73778-}
73779-
73780-static bool *
73781-hook_reentrantp() {
73782-	/*
73783-	 * We prevent user reentrancy within hooks.  This is basically just a
73784-	 * thread-local bool that triggers an early-exit.
73785-	 *
73786-	 * We don't fold in_hook into reentrancy.  There are two reasons for
73787-	 * this:
73788-	 * - Right now, we turn on reentrancy during things like extent hook
73789-	 *   execution.  Allocating during extent hooks is not officially
73790-	 *   supported, but we don't want to break it for the time being.  These
73791-	 *   sorts of allocations should probably still be hooked, though.
73792-	 * - If a hook allocates, we may want it to be relatively fast (after
73793-	 *   all, it executes on every allocator operation).  Turning on
73794-	 *   reentrancy is a fairly heavyweight mode (disabling tcache,
73795-	 *   redirecting to arena 0, etc.).  It's possible we may one day want
73796-	 *   to turn on reentrant mode here, if it proves too difficult to keep
73797-	 *   this working.  But that's fairly easy for us to see; OTOH, people
73798-	 *   not using hooks because they're too slow is easy for us to miss.
73799-	 *
73800-	 * The tricky part is
73801-	 * that this code might get invoked even if we don't have access to tsd.
73802-	 * This function mimics getting a pointer to thread-local data, except
73803-	 * that it might secretly return a pointer to some global data if we
73804-	 * know that the caller will take the early-exit path.
73805-	 * If we return a bool that indicates that we are reentrant, then the
73806-	 * caller will go down the early exit path, leaving the global
73807-	 * untouched.
73808-	 */
73809-	static bool in_hook_global = true;
73810-	tsdn_t *tsdn = tsdn_fetch();
73811-	bool *in_hook = tsdn_in_hookp_get(tsdn);
73812-	if (in_hook!= NULL) {
73813-		return in_hook;
73814-	}
73815-	return &in_hook_global;
73816-}
73817-
73818-#define HOOK_PROLOGUE							\
73819-	if (likely(atomic_load_u(&nhooks, ATOMIC_RELAXED) == 0)) {	\
73820-		return;							\
73821-	}								\
73822-	bool *in_hook = hook_reentrantp();				\
73823-	if (*in_hook) {							\
73824-		return;							\
73825-	}								\
73826-	*in_hook = true;
73827-
73828-#define HOOK_EPILOGUE							\
73829-	*in_hook = false;
73830-
73831-void
73832-hook_invoke_alloc(hook_alloc_t type, void *result, uintptr_t result_raw,
73833-    uintptr_t args_raw[3]) {
73834-	HOOK_PROLOGUE
73835-
73836-	hooks_internal_t hook;
73837-	FOR_EACH_HOOK_BEGIN(&hook)
73838-		hook_alloc h = hook.hooks.alloc_hook;
73839-		if (h != NULL) {
73840-			h(hook.hooks.extra, type, result, result_raw, args_raw);
73841-		}
73842-	FOR_EACH_HOOK_END
73843-
73844-	HOOK_EPILOGUE
73845-}
73846-
73847-void
73848-hook_invoke_dalloc(hook_dalloc_t type, void *address, uintptr_t args_raw[3]) {
73849-	HOOK_PROLOGUE
73850-	hooks_internal_t hook;
73851-	FOR_EACH_HOOK_BEGIN(&hook)
73852-		hook_dalloc h = hook.hooks.dalloc_hook;
73853-		if (h != NULL) {
73854-			h(hook.hooks.extra, type, address, args_raw);
73855-		}
73856-	FOR_EACH_HOOK_END
73857-	HOOK_EPILOGUE
73858-}
73859-
73860-void
73861-hook_invoke_expand(hook_expand_t type, void *address, size_t old_usize,
73862-    size_t new_usize, uintptr_t result_raw, uintptr_t args_raw[4]) {
73863-	HOOK_PROLOGUE
73864-	hooks_internal_t hook;
73865-	FOR_EACH_HOOK_BEGIN(&hook)
73866-		hook_expand h = hook.hooks.expand_hook;
73867-		if (h != NULL) {
73868-			h(hook.hooks.extra, type, address, old_usize, new_usize,
73869-			    result_raw, args_raw);
73870-		}
73871-	FOR_EACH_HOOK_END
73872-	HOOK_EPILOGUE
73873-}
73874diff --git a/jemalloc/src/hpa.c b/jemalloc/src/hpa.c
73875deleted file mode 100644
73876index 7e2aeba..0000000
73877--- a/jemalloc/src/hpa.c
73878+++ /dev/null
73879@@ -1,1044 +0,0 @@
73880-#include "jemalloc/internal/jemalloc_preamble.h"
73881-#include "jemalloc/internal/jemalloc_internal_includes.h"
73882-
73883-#include "jemalloc/internal/hpa.h"
73884-
73885-#include "jemalloc/internal/fb.h"
73886-#include "jemalloc/internal/witness.h"
73887-
73888-#define HPA_EDEN_SIZE (128 * HUGEPAGE)
73889-
73890-static edata_t *hpa_alloc(tsdn_t *tsdn, pai_t *self, size_t size,
73891-    size_t alignment, bool zero, bool guarded, bool frequent_reuse,
73892-    bool *deferred_work_generated);
73893-static size_t hpa_alloc_batch(tsdn_t *tsdn, pai_t *self, size_t size,
73894-    size_t nallocs, edata_list_active_t *results, bool *deferred_work_generated);
73895-static bool hpa_expand(tsdn_t *tsdn, pai_t *self, edata_t *edata,
73896-    size_t old_size, size_t new_size, bool zero, bool *deferred_work_generated);
73897-static bool hpa_shrink(tsdn_t *tsdn, pai_t *self, edata_t *edata,
73898-    size_t old_size, size_t new_size, bool *deferred_work_generated);
73899-static void hpa_dalloc(tsdn_t *tsdn, pai_t *self, edata_t *edata,
73900-    bool *deferred_work_generated);
73901-static void hpa_dalloc_batch(tsdn_t *tsdn, pai_t *self,
73902-    edata_list_active_t *list, bool *deferred_work_generated);
73903-static uint64_t hpa_time_until_deferred_work(tsdn_t *tsdn, pai_t *self);
73904-
73905-bool
73906-hpa_supported() {
73907-#ifdef _WIN32
73908-	/*
73909-	 * At least until the API and implementation is somewhat settled, we
73910-	 * don't want to try to debug the VM subsystem on the hardest-to-test
73911-	 * platform.
73912-	 */
73913-	return false;
73914-#endif
73915-	if (!pages_can_hugify) {
73916-		return false;
73917-	}
73918-	/*
73919-	 * We fundamentally rely on a address-space-hungry growth strategy for
73920-	 * hugepages.
73921-	 */
73922-	if (LG_SIZEOF_PTR != 3) {
73923-		return false;
73924-	}
73925-	/*
73926-	 * If we couldn't detect the value of HUGEPAGE, HUGEPAGE_PAGES becomes
73927-	 * this sentinel value -- see the comment in pages.h.
73928-	 */
73929-	if (HUGEPAGE_PAGES == 1) {
73930-		return false;
73931-	}
73932-	return true;
73933-}
73934-
73935-static void
73936-hpa_do_consistency_checks(hpa_shard_t *shard) {
73937-	assert(shard->base != NULL);
73938-}
73939-
73940-bool
73941-hpa_central_init(hpa_central_t *central, base_t *base, const hpa_hooks_t *hooks) {
73942-	/* malloc_conf processing should have filtered out these cases. */
73943-	assert(hpa_supported());
73944-	bool err;
73945-	err = malloc_mutex_init(&central->grow_mtx, "hpa_central_grow",
73946-	    WITNESS_RANK_HPA_CENTRAL_GROW, malloc_mutex_rank_exclusive);
73947-	if (err) {
73948-		return true;
73949-	}
73950-	err = malloc_mutex_init(&central->mtx, "hpa_central",
73951-	    WITNESS_RANK_HPA_CENTRAL, malloc_mutex_rank_exclusive);
73952-	if (err) {
73953-		return true;
73954-	}
73955-	central->base = base;
73956-	central->eden = NULL;
73957-	central->eden_len = 0;
73958-	central->age_counter = 0;
73959-	central->hooks = *hooks;
73960-	return false;
73961-}
73962-
73963-static hpdata_t *
73964-hpa_alloc_ps(tsdn_t *tsdn, hpa_central_t *central) {
73965-	return (hpdata_t *)base_alloc(tsdn, central->base, sizeof(hpdata_t),
73966-	    CACHELINE);
73967-}
73968-
73969-hpdata_t *
73970-hpa_central_extract(tsdn_t *tsdn, hpa_central_t *central, size_t size,
73971-    bool *oom) {
73972-	/* Don't yet support big allocations; these should get filtered out. */
73973-	assert(size <= HUGEPAGE);
73974-	/*
73975-	 * Should only try to extract from the central allocator if the local
73976-	 * shard is exhausted.  We should hold the grow_mtx on that shard.
73977-	 */
73978-	witness_assert_positive_depth_to_rank(
73979-	    tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_HPA_SHARD_GROW);
73980-
73981-	malloc_mutex_lock(tsdn, &central->grow_mtx);
73982-	*oom = false;
73983-
73984-	hpdata_t *ps = NULL;
73985-
73986-	/* Is eden a perfect fit? */
73987-	if (central->eden != NULL && central->eden_len == HUGEPAGE) {
73988-		ps = hpa_alloc_ps(tsdn, central);
73989-		if (ps == NULL) {
73990-			*oom = true;
73991-			malloc_mutex_unlock(tsdn, &central->grow_mtx);
73992-			return NULL;
73993-		}
73994-		hpdata_init(ps, central->eden, central->age_counter++);
73995-		central->eden = NULL;
73996-		central->eden_len = 0;
73997-		malloc_mutex_unlock(tsdn, &central->grow_mtx);
73998-		return ps;
73999-	}
74000-
74001-	/*
74002-	 * We're about to try to allocate from eden by splitting.  If eden is
74003-	 * NULL, we have to allocate it too.  Otherwise, we just have to
74004-	 * allocate an edata_t for the new psset.
74005-	 */
74006-	if (central->eden == NULL) {
74007-		/*
74008-		 * During development, we're primarily concerned with systems
74009-		 * with overcommit.  Eventually, we should be more careful here.
74010-		 */
74011-		bool commit = true;
74012-		/* Allocate address space, bailing if we fail. */
74013-		void *new_eden = pages_map(NULL, HPA_EDEN_SIZE, HUGEPAGE,
74014-		    &commit);
74015-		if (new_eden == NULL) {
74016-			*oom = true;
74017-			malloc_mutex_unlock(tsdn, &central->grow_mtx);
74018-			return NULL;
74019-		}
74020-		ps = hpa_alloc_ps(tsdn, central);
74021-		if (ps == NULL) {
74022-			pages_unmap(new_eden, HPA_EDEN_SIZE);
74023-			*oom = true;
74024-			malloc_mutex_unlock(tsdn, &central->grow_mtx);
74025-			return NULL;
74026-		}
74027-		central->eden = new_eden;
74028-		central->eden_len = HPA_EDEN_SIZE;
74029-	} else {
74030-		/* Eden is already nonempty; only need an edata for ps. */
74031-		ps = hpa_alloc_ps(tsdn, central);
74032-		if (ps == NULL) {
74033-			*oom = true;
74034-			malloc_mutex_unlock(tsdn, &central->grow_mtx);
74035-			return NULL;
74036-		}
74037-	}
74038-	assert(ps != NULL);
74039-	assert(central->eden != NULL);
74040-	assert(central->eden_len > HUGEPAGE);
74041-	assert(central->eden_len % HUGEPAGE == 0);
74042-	assert(HUGEPAGE_ADDR2BASE(central->eden) == central->eden);
74043-
74044-	hpdata_init(ps, central->eden, central->age_counter++);
74045-
74046-	char *eden_char = (char *)central->eden;
74047-	eden_char += HUGEPAGE;
74048-	central->eden = (void *)eden_char;
74049-	central->eden_len -= HUGEPAGE;
74050-
74051-	malloc_mutex_unlock(tsdn, &central->grow_mtx);
74052-
74053-	return ps;
74054-}
74055-
74056-bool
74057-hpa_shard_init(hpa_shard_t *shard, hpa_central_t *central, emap_t *emap,
74058-    base_t *base, edata_cache_t *edata_cache, unsigned ind,
74059-    const hpa_shard_opts_t *opts) {
74060-	/* malloc_conf processing should have filtered out these cases. */
74061-	assert(hpa_supported());
74062-	bool err;
74063-	err = malloc_mutex_init(&shard->grow_mtx, "hpa_shard_grow",
74064-	    WITNESS_RANK_HPA_SHARD_GROW, malloc_mutex_rank_exclusive);
74065-	if (err) {
74066-		return true;
74067-	}
74068-	err = malloc_mutex_init(&shard->mtx, "hpa_shard",
74069-	    WITNESS_RANK_HPA_SHARD, malloc_mutex_rank_exclusive);
74070-	if (err) {
74071-		return true;
74072-	}
74073-
74074-	assert(edata_cache != NULL);
74075-	shard->central = central;
74076-	shard->base = base;
74077-	edata_cache_fast_init(&shard->ecf, edata_cache);
74078-	psset_init(&shard->psset);
74079-	shard->age_counter = 0;
74080-	shard->ind = ind;
74081-	shard->emap = emap;
74082-
74083-	shard->opts = *opts;
74084-
74085-	shard->npending_purge = 0;
74086-	nstime_init_zero(&shard->last_purge);
74087-
74088-	shard->stats.npurge_passes = 0;
74089-	shard->stats.npurges = 0;
74090-	shard->stats.nhugifies = 0;
74091-	shard->stats.ndehugifies = 0;
74092-
74093-	/*
74094-	 * Fill these in last, so that if an hpa_shard gets used despite
74095-	 * initialization failing, we'll at least crash instead of just
74096-	 * operating on corrupted data.
74097-	 */
74098-	shard->pai.alloc = &hpa_alloc;
74099-	shard->pai.alloc_batch = &hpa_alloc_batch;
74100-	shard->pai.expand = &hpa_expand;
74101-	shard->pai.shrink = &hpa_shrink;
74102-	shard->pai.dalloc = &hpa_dalloc;
74103-	shard->pai.dalloc_batch = &hpa_dalloc_batch;
74104-	shard->pai.time_until_deferred_work = &hpa_time_until_deferred_work;
74105-
74106-	hpa_do_consistency_checks(shard);
74107-
74108-	return false;
74109-}
74110-
74111-/*
74112- * Note that the stats functions here follow the usual stats naming conventions;
74113- * "merge" obtains the stats from some live object of instance, while "accum"
74114- * only combines the stats from one stats objet to another.  Hence the lack of
74115- * locking here.
74116- */
74117-static void
74118-hpa_shard_nonderived_stats_accum(hpa_shard_nonderived_stats_t *dst,
74119-    hpa_shard_nonderived_stats_t *src) {
74120-	dst->npurge_passes += src->npurge_passes;
74121-	dst->npurges += src->npurges;
74122-	dst->nhugifies += src->nhugifies;
74123-	dst->ndehugifies += src->ndehugifies;
74124-}
74125-
74126-void
74127-hpa_shard_stats_accum(hpa_shard_stats_t *dst, hpa_shard_stats_t *src) {
74128-	psset_stats_accum(&dst->psset_stats, &src->psset_stats);
74129-	hpa_shard_nonderived_stats_accum(&dst->nonderived_stats,
74130-	    &src->nonderived_stats);
74131-}
74132-
74133-void
74134-hpa_shard_stats_merge(tsdn_t *tsdn, hpa_shard_t *shard,
74135-    hpa_shard_stats_t *dst) {
74136-	hpa_do_consistency_checks(shard);
74137-
74138-	malloc_mutex_lock(tsdn, &shard->grow_mtx);
74139-	malloc_mutex_lock(tsdn, &shard->mtx);
74140-	psset_stats_accum(&dst->psset_stats, &shard->psset.stats);
74141-	hpa_shard_nonderived_stats_accum(&dst->nonderived_stats, &shard->stats);
74142-	malloc_mutex_unlock(tsdn, &shard->mtx);
74143-	malloc_mutex_unlock(tsdn, &shard->grow_mtx);
74144-}
74145-
74146-static bool
74147-hpa_good_hugification_candidate(hpa_shard_t *shard, hpdata_t *ps) {
74148-	/*
74149-	 * Note that this needs to be >= rather than just >, because of the
74150-	 * important special case in which the hugification threshold is exactly
74151-	 * HUGEPAGE.
74152-	 */
74153-	return hpdata_nactive_get(ps) * PAGE
74154-	    >= shard->opts.hugification_threshold;
74155-}
74156-
74157-static size_t
74158-hpa_adjusted_ndirty(tsdn_t *tsdn, hpa_shard_t *shard) {
74159-	malloc_mutex_assert_owner(tsdn, &shard->mtx);
74160-	return psset_ndirty(&shard->psset) - shard->npending_purge;
74161-}
74162-
74163-static size_t
74164-hpa_ndirty_max(tsdn_t *tsdn, hpa_shard_t *shard) {
74165-	malloc_mutex_assert_owner(tsdn, &shard->mtx);
74166-	if (shard->opts.dirty_mult == (fxp_t)-1) {
74167-		return (size_t)-1;
74168-	}
74169-	return fxp_mul_frac(psset_nactive(&shard->psset),
74170-	    shard->opts.dirty_mult);
74171-}
74172-
74173-static bool
74174-hpa_hugify_blocked_by_ndirty(tsdn_t *tsdn, hpa_shard_t *shard) {
74175-	malloc_mutex_assert_owner(tsdn, &shard->mtx);
74176-	hpdata_t *to_hugify = psset_pick_hugify(&shard->psset);
74177-	if (to_hugify == NULL) {
74178-		return false;
74179-	}
74180-	return hpa_adjusted_ndirty(tsdn, shard)
74181-	    + hpdata_nretained_get(to_hugify) > hpa_ndirty_max(tsdn, shard);
74182-}
74183-
74184-static bool
74185-hpa_should_purge(tsdn_t *tsdn, hpa_shard_t *shard) {
74186-	malloc_mutex_assert_owner(tsdn, &shard->mtx);
74187-	if (hpa_adjusted_ndirty(tsdn, shard) > hpa_ndirty_max(tsdn, shard)) {
74188-		return true;
74189-	}
74190-	if (hpa_hugify_blocked_by_ndirty(tsdn, shard)) {
74191-		return true;
74192-	}
74193-	return false;
74194-}
74195-
74196-static void
74197-hpa_update_purge_hugify_eligibility(tsdn_t *tsdn, hpa_shard_t *shard,
74198-    hpdata_t *ps) {
74199-	malloc_mutex_assert_owner(tsdn, &shard->mtx);
74200-	if (hpdata_changing_state_get(ps)) {
74201-		hpdata_purge_allowed_set(ps, false);
74202-		hpdata_disallow_hugify(ps);
74203-		return;
74204-	}
74205-	/*
74206-	 * Hugepages are distinctly costly to purge, so try to avoid it unless
74207-	 * they're *particularly* full of dirty pages.  Eventually, we should
74208-	 * use a smarter / more dynamic heuristic for situations where we have
74209-	 * to manually hugify.
74210-	 *
74211-	 * In situations where we don't manually hugify, this problem is
74212-	 * reduced.  The "bad" situation we're trying to avoid is one's that's
74213-	 * common in some Linux configurations (where both enabled and defrag
74214-	 * are set to madvise) that can lead to long latency spikes on the first
74215-	 * access after a hugification.  The ideal policy in such configurations
74216-	 * is probably time-based for both purging and hugifying; only hugify a
74217-	 * hugepage if it's met the criteria for some extended period of time,
74218-	 * and only dehugify it if it's failed to meet the criteria for an
74219-	 * extended period of time.  When background threads are on, we should
74220-	 * try to take this hit on one of them, as well.
74221-	 *
74222-	 * I think the ideal setting is THP always enabled, and defrag set to
74223-	 * deferred; in that case we don't need any explicit calls on the
74224-	 * allocator's end at all; we just try to pack allocations in a
74225-	 * hugepage-friendly manner and let the OS hugify in the background.
74226-	 */
74227-	hpdata_purge_allowed_set(ps, hpdata_ndirty_get(ps) > 0);
74228-	if (hpa_good_hugification_candidate(shard, ps)
74229-	    && !hpdata_huge_get(ps)) {
74230-		nstime_t now;
74231-		shard->central->hooks.curtime(&now, /* first_reading */ true);
74232-		hpdata_allow_hugify(ps, now);
74233-	}
74234-	/*
74235-	 * Once a hugepage has become eligible for hugification, we don't mark
74236-	 * it as ineligible just because it stops meeting the criteria (this
74237-	 * could lead to situations where a hugepage that spends most of its
74238-	 * time meeting the criteria never quite getting hugified if there are
74239-	 * intervening deallocations).  The idea is that the hugification delay
74240-	 * will allow them to get purged, reseting their "hugify-allowed" bit.
74241-	 * If they don't get purged, then the hugification isn't hurting and
74242-	 * might help.  As an exception, we don't hugify hugepages that are now
74243-	 * empty; it definitely doesn't help there until the hugepage gets
74244-	 * reused, which is likely not for a while.
74245-	 */
74246-	if (hpdata_nactive_get(ps) == 0) {
74247-		hpdata_disallow_hugify(ps);
74248-	}
74249-}
74250-
74251-static bool
74252-hpa_shard_has_deferred_work(tsdn_t *tsdn, hpa_shard_t *shard) {
74253-	malloc_mutex_assert_owner(tsdn, &shard->mtx);
74254-	hpdata_t *to_hugify = psset_pick_hugify(&shard->psset);
74255-	return to_hugify != NULL || hpa_should_purge(tsdn, shard);
74256-}
74257-
74258-/* Returns whether or not we purged anything. */
74259-static bool
74260-hpa_try_purge(tsdn_t *tsdn, hpa_shard_t *shard) {
74261-	malloc_mutex_assert_owner(tsdn, &shard->mtx);
74262-
74263-	hpdata_t *to_purge = psset_pick_purge(&shard->psset);
74264-	if (to_purge == NULL) {
74265-		return false;
74266-	}
74267-	assert(hpdata_purge_allowed_get(to_purge));
74268-	assert(!hpdata_changing_state_get(to_purge));
74269-
74270-	/*
74271-	 * Don't let anyone else purge or hugify this page while
74272-	 * we're purging it (allocations and deallocations are
74273-	 * OK).
74274-	 */
74275-	psset_update_begin(&shard->psset, to_purge);
74276-	assert(hpdata_alloc_allowed_get(to_purge));
74277-	hpdata_mid_purge_set(to_purge, true);
74278-	hpdata_purge_allowed_set(to_purge, false);
74279-	hpdata_disallow_hugify(to_purge);
74280-	/*
74281-	 * Unlike with hugification (where concurrent
74282-	 * allocations are allowed), concurrent allocation out
74283-	 * of a hugepage being purged is unsafe; we might hand
74284-	 * out an extent for an allocation and then purge it
74285-	 * (clearing out user data).
74286-	 */
74287-	hpdata_alloc_allowed_set(to_purge, false);
74288-	psset_update_end(&shard->psset, to_purge);
74289-
74290-	/* Gather all the metadata we'll need during the purge. */
74291-	bool dehugify = hpdata_huge_get(to_purge);
74292-	hpdata_purge_state_t purge_state;
74293-	size_t num_to_purge = hpdata_purge_begin(to_purge, &purge_state);
74294-
74295-	shard->npending_purge += num_to_purge;
74296-
74297-	malloc_mutex_unlock(tsdn, &shard->mtx);
74298-
74299-	/* Actually do the purging, now that the lock is dropped. */
74300-	if (dehugify) {
74301-		shard->central->hooks.dehugify(hpdata_addr_get(to_purge),
74302-		    HUGEPAGE);
74303-	}
74304-	size_t total_purged = 0;
74305-	uint64_t purges_this_pass = 0;
74306-	void *purge_addr;
74307-	size_t purge_size;
74308-	while (hpdata_purge_next(to_purge, &purge_state, &purge_addr,
74309-	    &purge_size)) {
74310-		total_purged += purge_size;
74311-		assert(total_purged <= HUGEPAGE);
74312-		purges_this_pass++;
74313-		shard->central->hooks.purge(purge_addr, purge_size);
74314-	}
74315-
74316-	malloc_mutex_lock(tsdn, &shard->mtx);
74317-	/* The shard updates */
74318-	shard->npending_purge -= num_to_purge;
74319-	shard->stats.npurge_passes++;
74320-	shard->stats.npurges += purges_this_pass;
74321-	shard->central->hooks.curtime(&shard->last_purge,
74322-	    /* first_reading */ false);
74323-	if (dehugify) {
74324-		shard->stats.ndehugifies++;
74325-	}
74326-
74327-	/* The hpdata updates. */
74328-	psset_update_begin(&shard->psset, to_purge);
74329-	if (dehugify) {
74330-		hpdata_dehugify(to_purge);
74331-	}
74332-	hpdata_purge_end(to_purge, &purge_state);
74333-	hpdata_mid_purge_set(to_purge, false);
74334-
74335-	hpdata_alloc_allowed_set(to_purge, true);
74336-	hpa_update_purge_hugify_eligibility(tsdn, shard, to_purge);
74337-
74338-	psset_update_end(&shard->psset, to_purge);
74339-
74340-	return true;
74341-}
74342-
74343-/* Returns whether or not we hugified anything. */
74344-static bool
74345-hpa_try_hugify(tsdn_t *tsdn, hpa_shard_t *shard) {
74346-	malloc_mutex_assert_owner(tsdn, &shard->mtx);
74347-
74348-	if (hpa_hugify_blocked_by_ndirty(tsdn, shard)) {
74349-		return false;
74350-	}
74351-
74352-	hpdata_t *to_hugify = psset_pick_hugify(&shard->psset);
74353-	if (to_hugify == NULL) {
74354-		return false;
74355-	}
74356-	assert(hpdata_hugify_allowed_get(to_hugify));
74357-	assert(!hpdata_changing_state_get(to_hugify));
74358-
74359-	/* Make sure that it's been hugifiable for long enough. */
74360-	nstime_t time_hugify_allowed = hpdata_time_hugify_allowed(to_hugify);
74361-	uint64_t millis = shard->central->hooks.ms_since(&time_hugify_allowed);
74362-	if (millis < shard->opts.hugify_delay_ms) {
74363-		return false;
74364-	}
74365-
74366-	/*
74367-	 * Don't let anyone else purge or hugify this page while
74368-	 * we're hugifying it (allocations and deallocations are
74369-	 * OK).
74370-	 */
74371-	psset_update_begin(&shard->psset, to_hugify);
74372-	hpdata_mid_hugify_set(to_hugify, true);
74373-	hpdata_purge_allowed_set(to_hugify, false);
74374-	hpdata_disallow_hugify(to_hugify);
74375-	assert(hpdata_alloc_allowed_get(to_hugify));
74376-	psset_update_end(&shard->psset, to_hugify);
74377-
74378-	malloc_mutex_unlock(tsdn, &shard->mtx);
74379-
74380-	shard->central->hooks.hugify(hpdata_addr_get(to_hugify), HUGEPAGE);
74381-
74382-	malloc_mutex_lock(tsdn, &shard->mtx);
74383-	shard->stats.nhugifies++;
74384-
74385-	psset_update_begin(&shard->psset, to_hugify);
74386-	hpdata_hugify(to_hugify);
74387-	hpdata_mid_hugify_set(to_hugify, false);
74388-	hpa_update_purge_hugify_eligibility(tsdn, shard, to_hugify);
74389-	psset_update_end(&shard->psset, to_hugify);
74390-
74391-	return true;
74392-}
74393-
74394-/*
74395- * Execution of deferred work is forced if it's triggered by an explicit
74396- * hpa_shard_do_deferred_work() call.
74397- */
74398-static void
74399-hpa_shard_maybe_do_deferred_work(tsdn_t *tsdn, hpa_shard_t *shard,
74400-    bool forced) {
74401-	malloc_mutex_assert_owner(tsdn, &shard->mtx);
74402-	if (!forced && shard->opts.deferral_allowed) {
74403-		return;
74404-	}
74405-	/*
74406-	 * If we're on a background thread, do work so long as there's work to
74407-	 * be done.  Otherwise, bound latency to not be *too* bad by doing at
74408-	 * most a small fixed number of operations.
74409-	 */
74410-	bool hugified = false;
74411-	bool purged = false;
74412-	size_t max_ops = (forced ? (size_t)-1 : 16);
74413-	size_t nops = 0;
74414-	do {
74415-		/*
74416-		 * Always purge before hugifying, to make sure we get some
74417-		 * ability to hit our quiescence targets.
74418-		 */
74419-		purged = false;
74420-		while (hpa_should_purge(tsdn, shard) && nops < max_ops) {
74421-			purged = hpa_try_purge(tsdn, shard);
74422-			if (purged) {
74423-				nops++;
74424-			}
74425-		}
74426-		hugified = hpa_try_hugify(tsdn, shard);
74427-		if (hugified) {
74428-			nops++;
74429-		}
74430-		malloc_mutex_assert_owner(tsdn, &shard->mtx);
74431-		malloc_mutex_assert_owner(tsdn, &shard->mtx);
74432-	} while ((hugified || purged) && nops < max_ops);
74433-}
74434-
74435-static edata_t *
74436-hpa_try_alloc_one_no_grow(tsdn_t *tsdn, hpa_shard_t *shard, size_t size,
74437-    bool *oom) {
74438-	bool err;
74439-	edata_t *edata = edata_cache_fast_get(tsdn, &shard->ecf);
74440-	if (edata == NULL) {
74441-		*oom = true;
74442-		return NULL;
74443-	}
74444-
74445-	hpdata_t *ps = psset_pick_alloc(&shard->psset, size);
74446-	if (ps == NULL) {
74447-		edata_cache_fast_put(tsdn, &shard->ecf, edata);
74448-		return NULL;
74449-	}
74450-
74451-	psset_update_begin(&shard->psset, ps);
74452-
74453-	if (hpdata_empty(ps)) {
74454-		/*
74455-		 * If the pageslab used to be empty, treat it as though it's
74456-		 * brand new for fragmentation-avoidance purposes; what we're
74457-		 * trying to approximate is the age of the allocations *in* that
74458-		 * pageslab, and the allocations in the new pageslab are
74459-		 * definitionally the youngest in this hpa shard.
74460-		 */
74461-		hpdata_age_set(ps, shard->age_counter++);
74462-	}
74463-
74464-	void *addr = hpdata_reserve_alloc(ps, size);
74465-	edata_init(edata, shard->ind, addr, size, /* slab */ false,
74466-	    SC_NSIZES, /* sn */ hpdata_age_get(ps), extent_state_active,
74467-	    /* zeroed */ false, /* committed */ true, EXTENT_PAI_HPA,
74468-	    EXTENT_NOT_HEAD);
74469-	edata_ps_set(edata, ps);
74470-
74471-	/*
74472-	 * This could theoretically be moved outside of the critical section,
74473-	 * but that introduces the potential for a race.  Without the lock, the
74474-	 * (initially nonempty, since this is the reuse pathway) pageslab we
74475-	 * allocated out of could become otherwise empty while the lock is
74476-	 * dropped.  This would force us to deal with a pageslab eviction down
74477-	 * the error pathway, which is a pain.
74478-	 */
74479-	err = emap_register_boundary(tsdn, shard->emap, edata,
74480-	    SC_NSIZES, /* slab */ false);
74481-	if (err) {
74482-		hpdata_unreserve(ps, edata_addr_get(edata),
74483-		    edata_size_get(edata));
74484-		/*
74485-		 * We should arguably reset dirty state here, but this would
74486-		 * require some sort of prepare + commit functionality that's a
74487-		 * little much to deal with for now.
74488-		 *
74489-		 * We don't have a do_deferred_work down this pathway, on the
74490-		 * principle that we didn't *really* affect shard state (we
74491-		 * tweaked the stats, but our tweaks weren't really accurate).
74492-		 */
74493-		psset_update_end(&shard->psset, ps);
74494-		edata_cache_fast_put(tsdn, &shard->ecf, edata);
74495-		*oom = true;
74496-		return NULL;
74497-	}
74498-
74499-	hpa_update_purge_hugify_eligibility(tsdn, shard, ps);
74500-	psset_update_end(&shard->psset, ps);
74501-	return edata;
74502-}
74503-
74504-static size_t
74505-hpa_try_alloc_batch_no_grow(tsdn_t *tsdn, hpa_shard_t *shard, size_t size,
74506-    bool *oom, size_t nallocs, edata_list_active_t *results,
74507-    bool *deferred_work_generated) {
74508-	malloc_mutex_lock(tsdn, &shard->mtx);
74509-	size_t nsuccess = 0;
74510-	for (; nsuccess < nallocs; nsuccess++) {
74511-		edata_t *edata = hpa_try_alloc_one_no_grow(tsdn, shard, size,
74512-		    oom);
74513-		if (edata == NULL) {
74514-			break;
74515-		}
74516-		edata_list_active_append(results, edata);
74517-	}
74518-
74519-	hpa_shard_maybe_do_deferred_work(tsdn, shard, /* forced */ false);
74520-	*deferred_work_generated = hpa_shard_has_deferred_work(tsdn, shard);
74521-	malloc_mutex_unlock(tsdn, &shard->mtx);
74522-	return nsuccess;
74523-}
74524-
74525-static size_t
74526-hpa_alloc_batch_psset(tsdn_t *tsdn, hpa_shard_t *shard, size_t size,
74527-    size_t nallocs, edata_list_active_t *results,
74528-    bool *deferred_work_generated) {
74529-	assert(size <= shard->opts.slab_max_alloc);
74530-	bool oom = false;
74531-
74532-	size_t nsuccess = hpa_try_alloc_batch_no_grow(tsdn, shard, size, &oom,
74533-	    nallocs, results, deferred_work_generated);
74534-
74535-	if (nsuccess == nallocs || oom) {
74536-		return nsuccess;
74537-	}
74538-
74539-	/*
74540-	 * We didn't OOM, but weren't able to fill everything requested of us;
74541-	 * try to grow.
74542-	 */
74543-	malloc_mutex_lock(tsdn, &shard->grow_mtx);
74544-	/*
74545-	 * Check for grow races; maybe some earlier thread expanded the psset
74546-	 * in between when we dropped the main mutex and grabbed the grow mutex.
74547-	 */
74548-	nsuccess += hpa_try_alloc_batch_no_grow(tsdn, shard, size, &oom,
74549-	    nallocs - nsuccess, results, deferred_work_generated);
74550-	if (nsuccess == nallocs || oom) {
74551-		malloc_mutex_unlock(tsdn, &shard->grow_mtx);
74552-		return nsuccess;
74553-	}
74554-
74555-	/*
74556-	 * Note that we don't hold shard->mtx here (while growing);
74557-	 * deallocations (and allocations of smaller sizes) may still succeed
74558-	 * while we're doing this potentially expensive system call.
74559-	 */
74560-	hpdata_t *ps = hpa_central_extract(tsdn, shard->central, size, &oom);
74561-	if (ps == NULL) {
74562-		malloc_mutex_unlock(tsdn, &shard->grow_mtx);
74563-		return nsuccess;
74564-	}
74565-
74566-	/*
74567-	 * We got the pageslab; allocate from it.  This does an unlock followed
74568-	 * by a lock on the same mutex, and holds the grow mutex while doing
74569-	 * deferred work, but this is an uncommon path; the simplicity is worth
74570-	 * it.
74571-	 */
74572-	malloc_mutex_lock(tsdn, &shard->mtx);
74573-	psset_insert(&shard->psset, ps);
74574-	malloc_mutex_unlock(tsdn, &shard->mtx);
74575-
74576-	nsuccess += hpa_try_alloc_batch_no_grow(tsdn, shard, size, &oom,
74577-	    nallocs - nsuccess, results, deferred_work_generated);
74578-	/*
74579-	 * Drop grow_mtx before doing deferred work; other threads blocked on it
74580-	 * should be allowed to proceed while we're working.
74581-	 */
74582-	malloc_mutex_unlock(tsdn, &shard->grow_mtx);
74583-
74584-	return nsuccess;
74585-}
74586-
74587-static hpa_shard_t *
74588-hpa_from_pai(pai_t *self) {
74589-	assert(self->alloc = &hpa_alloc);
74590-	assert(self->expand = &hpa_expand);
74591-	assert(self->shrink = &hpa_shrink);
74592-	assert(self->dalloc = &hpa_dalloc);
74593-	return (hpa_shard_t *)self;
74594-}
74595-
74596-static size_t
74597-hpa_alloc_batch(tsdn_t *tsdn, pai_t *self, size_t size, size_t nallocs,
74598-    edata_list_active_t *results, bool *deferred_work_generated) {
74599-	assert(nallocs > 0);
74600-	assert((size & PAGE_MASK) == 0);
74601-	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
74602-	    WITNESS_RANK_CORE, 0);
74603-	hpa_shard_t *shard = hpa_from_pai(self);
74604-
74605-	if (size > shard->opts.slab_max_alloc) {
74606-		return 0;
74607-	}
74608-
74609-	size_t nsuccess = hpa_alloc_batch_psset(tsdn, shard, size, nallocs,
74610-	    results, deferred_work_generated);
74611-
74612-	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
74613-	    WITNESS_RANK_CORE, 0);
74614-
74615-	/*
74616-	 * Guard the sanity checks with config_debug because the loop cannot be
74617-	 * proven non-circular by the compiler, even if everything within the
74618-	 * loop is optimized away.
74619-	 */
74620-	if (config_debug) {
74621-		edata_t *edata;
74622-		ql_foreach(edata, &results->head, ql_link_active) {
74623-			emap_assert_mapped(tsdn, shard->emap, edata);
74624-			assert(edata_pai_get(edata) == EXTENT_PAI_HPA);
74625-			assert(edata_state_get(edata) == extent_state_active);
74626-			assert(edata_arena_ind_get(edata) == shard->ind);
74627-			assert(edata_szind_get_maybe_invalid(edata) ==
74628-			    SC_NSIZES);
74629-			assert(!edata_slab_get(edata));
74630-			assert(edata_committed_get(edata));
74631-			assert(edata_base_get(edata) == edata_addr_get(edata));
74632-			assert(edata_base_get(edata) != NULL);
74633-		}
74634-	}
74635-	return nsuccess;
74636-}
74637-
74638-static edata_t *
74639-hpa_alloc(tsdn_t *tsdn, pai_t *self, size_t size, size_t alignment, bool zero,
74640-    bool guarded, bool frequent_reuse, bool *deferred_work_generated) {
74641-	assert((size & PAGE_MASK) == 0);
74642-	assert(!guarded);
74643-	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
74644-	    WITNESS_RANK_CORE, 0);
74645-
74646-	/* We don't handle alignment or zeroing for now. */
74647-	if (alignment > PAGE || zero) {
74648-		return NULL;
74649-	}
74650-	/*
74651-	 * An alloc with alignment == PAGE and zero == false is equivalent to a
74652-	 * batch alloc of 1.  Just do that, so we can share code.
74653-	 */
74654-	edata_list_active_t results;
74655-	edata_list_active_init(&results);
74656-	size_t nallocs = hpa_alloc_batch(tsdn, self, size, /* nallocs */ 1,
74657-	    &results, deferred_work_generated);
74658-	assert(nallocs == 0 || nallocs == 1);
74659-	edata_t *edata = edata_list_active_first(&results);
74660-	return edata;
74661-}
74662-
74663-static bool
74664-hpa_expand(tsdn_t *tsdn, pai_t *self, edata_t *edata, size_t old_size,
74665-    size_t new_size, bool zero, bool *deferred_work_generated) {
74666-	/* Expand not yet supported. */
74667-	return true;
74668-}
74669-
74670-static bool
74671-hpa_shrink(tsdn_t *tsdn, pai_t *self, edata_t *edata,
74672-    size_t old_size, size_t new_size, bool *deferred_work_generated) {
74673-	/* Shrink not yet supported. */
74674-	return true;
74675-}
74676-
74677-static void
74678-hpa_dalloc_prepare_unlocked(tsdn_t *tsdn, hpa_shard_t *shard, edata_t *edata) {
74679-	malloc_mutex_assert_not_owner(tsdn, &shard->mtx);
74680-
74681-	assert(edata_pai_get(edata) == EXTENT_PAI_HPA);
74682-	assert(edata_state_get(edata) == extent_state_active);
74683-	assert(edata_arena_ind_get(edata) == shard->ind);
74684-	assert(edata_szind_get_maybe_invalid(edata) == SC_NSIZES);
74685-	assert(edata_committed_get(edata));
74686-	assert(edata_base_get(edata) != NULL);
74687-
74688-	/*
74689-	 * Another thread shouldn't be trying to touch the metadata of an
74690-	 * allocation being freed.  The one exception is a merge attempt from a
74691-	 * lower-addressed PAC extent; in this case we have a nominal race on
74692-	 * the edata metadata bits, but in practice the fact that the PAI bits
74693-	 * are different will prevent any further access.  The race is bad, but
74694-	 * benign in practice, and the long term plan is to track enough state
74695-	 * in the rtree to prevent these merge attempts in the first place.
74696-	 */
74697-	edata_addr_set(edata, edata_base_get(edata));
74698-	edata_zeroed_set(edata, false);
74699-	emap_deregister_boundary(tsdn, shard->emap, edata);
74700-}
74701-
74702-static void
74703-hpa_dalloc_locked(tsdn_t *tsdn, hpa_shard_t *shard, edata_t *edata) {
74704-	malloc_mutex_assert_owner(tsdn, &shard->mtx);
74705-
74706-	/*
74707-	 * Release the metadata early, to avoid having to remember to do it
74708-	 * while we're also doing tricky purging logic.  First, we need to grab
74709-	 * a few bits of metadata from it.
74710-	 *
74711-	 * Note that the shard mutex protects ps's metadata too; it wouldn't be
74712-	 * correct to try to read most information out of it without the lock.
74713-	 */
74714-	hpdata_t *ps = edata_ps_get(edata);
74715-	/* Currently, all edatas come from pageslabs. */
74716-	assert(ps != NULL);
74717-	void *unreserve_addr = edata_addr_get(edata);
74718-	size_t unreserve_size = edata_size_get(edata);
74719-	edata_cache_fast_put(tsdn, &shard->ecf, edata);
74720-
74721-	psset_update_begin(&shard->psset, ps);
74722-	hpdata_unreserve(ps, unreserve_addr, unreserve_size);
74723-	hpa_update_purge_hugify_eligibility(tsdn, shard, ps);
74724-	psset_update_end(&shard->psset, ps);
74725-}
74726-
74727-static void
74728-hpa_dalloc_batch(tsdn_t *tsdn, pai_t *self, edata_list_active_t *list,
74729-    bool *deferred_work_generated) {
74730-	hpa_shard_t *shard = hpa_from_pai(self);
74731-
74732-	edata_t *edata;
74733-	ql_foreach(edata, &list->head, ql_link_active) {
74734-		hpa_dalloc_prepare_unlocked(tsdn, shard, edata);
74735-	}
74736-
74737-	malloc_mutex_lock(tsdn, &shard->mtx);
74738-	/* Now, remove from the list. */
74739-	while ((edata = edata_list_active_first(list)) != NULL) {
74740-		edata_list_active_remove(list, edata);
74741-		hpa_dalloc_locked(tsdn, shard, edata);
74742-	}
74743-	hpa_shard_maybe_do_deferred_work(tsdn, shard, /* forced */ false);
74744-	*deferred_work_generated =
74745-	    hpa_shard_has_deferred_work(tsdn, shard);
74746-
74747-	malloc_mutex_unlock(tsdn, &shard->mtx);
74748-}
74749-
74750-static void
74751-hpa_dalloc(tsdn_t *tsdn, pai_t *self, edata_t *edata,
74752-    bool *deferred_work_generated) {
74753-	assert(!edata_guarded_get(edata));
74754-	/* Just a dalloc_batch of size 1; this lets us share logic. */
74755-	edata_list_active_t dalloc_list;
74756-	edata_list_active_init(&dalloc_list);
74757-	edata_list_active_append(&dalloc_list, edata);
74758-	hpa_dalloc_batch(tsdn, self, &dalloc_list, deferred_work_generated);
74759-}
74760-
74761-/*
74762- * Calculate time until either purging or hugification ought to happen.
74763- * Called by background threads.
74764- */
74765-static uint64_t
74766-hpa_time_until_deferred_work(tsdn_t *tsdn, pai_t *self) {
74767-	hpa_shard_t *shard = hpa_from_pai(self);
74768-	uint64_t time_ns = BACKGROUND_THREAD_DEFERRED_MAX;
74769-
74770-	malloc_mutex_lock(tsdn, &shard->mtx);
74771-
74772-	hpdata_t *to_hugify = psset_pick_hugify(&shard->psset);
74773-	if (to_hugify != NULL) {
74774-		nstime_t time_hugify_allowed =
74775-		    hpdata_time_hugify_allowed(to_hugify);
74776-		uint64_t since_hugify_allowed_ms =
74777-		    shard->central->hooks.ms_since(&time_hugify_allowed);
74778-		/*
74779-		 * If not enough time has passed since hugification was allowed,
74780-		 * sleep for the rest.
74781-		 */
74782-		if (since_hugify_allowed_ms < shard->opts.hugify_delay_ms) {
74783-			time_ns = shard->opts.hugify_delay_ms -
74784-			    since_hugify_allowed_ms;
74785-			time_ns *= 1000 * 1000;
74786-		} else {
74787-			malloc_mutex_unlock(tsdn, &shard->mtx);
74788-			return BACKGROUND_THREAD_DEFERRED_MIN;
74789-		}
74790-	}
74791-
74792-	if (hpa_should_purge(tsdn, shard)) {
74793-		/*
74794-		 * If we haven't purged before, no need to check interval
74795-		 * between purges. Simply purge as soon as possible.
74796-		 */
74797-		if (shard->stats.npurge_passes == 0) {
74798-			malloc_mutex_unlock(tsdn, &shard->mtx);
74799-			return BACKGROUND_THREAD_DEFERRED_MIN;
74800-		}
74801-		uint64_t since_last_purge_ms = shard->central->hooks.ms_since(
74802-		    &shard->last_purge);
74803-
74804-		if (since_last_purge_ms < shard->opts.min_purge_interval_ms) {
74805-			uint64_t until_purge_ns;
74806-			until_purge_ns = shard->opts.min_purge_interval_ms -
74807-			    since_last_purge_ms;
74808-			until_purge_ns *= 1000 * 1000;
74809-
74810-			if (until_purge_ns < time_ns) {
74811-				time_ns = until_purge_ns;
74812-			}
74813-		} else {
74814-			time_ns = BACKGROUND_THREAD_DEFERRED_MIN;
74815-		}
74816-	}
74817-	malloc_mutex_unlock(tsdn, &shard->mtx);
74818-	return time_ns;
74819-}
74820-
74821-void
74822-hpa_shard_disable(tsdn_t *tsdn, hpa_shard_t *shard) {
74823-	hpa_do_consistency_checks(shard);
74824-
74825-	malloc_mutex_lock(tsdn, &shard->mtx);
74826-	edata_cache_fast_disable(tsdn, &shard->ecf);
74827-	malloc_mutex_unlock(tsdn, &shard->mtx);
74828-}
74829-
74830-static void
74831-hpa_shard_assert_stats_empty(psset_bin_stats_t *bin_stats) {
74832-	assert(bin_stats->npageslabs == 0);
74833-	assert(bin_stats->nactive == 0);
74834-}
74835-
74836-static void
74837-hpa_assert_empty(tsdn_t *tsdn, hpa_shard_t *shard, psset_t *psset) {
74838-	malloc_mutex_assert_owner(tsdn, &shard->mtx);
74839-	for (int huge = 0; huge <= 1; huge++) {
74840-		hpa_shard_assert_stats_empty(&psset->stats.full_slabs[huge]);
74841-		for (pszind_t i = 0; i < PSSET_NPSIZES; i++) {
74842-			hpa_shard_assert_stats_empty(
74843-			    &psset->stats.nonfull_slabs[i][huge]);
74844-		}
74845-	}
74846-}
74847-
74848-void
74849-hpa_shard_destroy(tsdn_t *tsdn, hpa_shard_t *shard) {
74850-	hpa_do_consistency_checks(shard);
74851-	/*
74852-	 * By the time we're here, the arena code should have dalloc'd all the
74853-	 * active extents, which means we should have eventually evicted
74854-	 * everything from the psset, so it shouldn't be able to serve even a
74855-	 * 1-page allocation.
74856-	 */
74857-	if (config_debug) {
74858-		malloc_mutex_lock(tsdn, &shard->mtx);
74859-		hpa_assert_empty(tsdn, shard, &shard->psset);
74860-		malloc_mutex_unlock(tsdn, &shard->mtx);
74861-	}
74862-	hpdata_t *ps;
74863-	while ((ps = psset_pick_alloc(&shard->psset, PAGE)) != NULL) {
74864-		/* There should be no allocations anywhere. */
74865-		assert(hpdata_empty(ps));
74866-		psset_remove(&shard->psset, ps);
74867-		shard->central->hooks.unmap(hpdata_addr_get(ps), HUGEPAGE);
74868-	}
74869-}
74870-
74871-void
74872-hpa_shard_set_deferral_allowed(tsdn_t *tsdn, hpa_shard_t *shard,
74873-    bool deferral_allowed) {
74874-	hpa_do_consistency_checks(shard);
74875-
74876-	malloc_mutex_lock(tsdn, &shard->mtx);
74877-	bool deferral_previously_allowed = shard->opts.deferral_allowed;
74878-	shard->opts.deferral_allowed = deferral_allowed;
74879-	if (deferral_previously_allowed && !deferral_allowed) {
74880-		hpa_shard_maybe_do_deferred_work(tsdn, shard,
74881-		    /* forced */ true);
74882-	}
74883-	malloc_mutex_unlock(tsdn, &shard->mtx);
74884-}
74885-
74886-void
74887-hpa_shard_do_deferred_work(tsdn_t *tsdn, hpa_shard_t *shard) {
74888-	hpa_do_consistency_checks(shard);
74889-
74890-	malloc_mutex_lock(tsdn, &shard->mtx);
74891-	hpa_shard_maybe_do_deferred_work(tsdn, shard, /* forced */ true);
74892-	malloc_mutex_unlock(tsdn, &shard->mtx);
74893-}
74894-
74895-void
74896-hpa_shard_prefork3(tsdn_t *tsdn, hpa_shard_t *shard) {
74897-	hpa_do_consistency_checks(shard);
74898-
74899-	malloc_mutex_prefork(tsdn, &shard->grow_mtx);
74900-}
74901-
74902-void
74903-hpa_shard_prefork4(tsdn_t *tsdn, hpa_shard_t *shard) {
74904-	hpa_do_consistency_checks(shard);
74905-
74906-	malloc_mutex_prefork(tsdn, &shard->mtx);
74907-}
74908-
74909-void
74910-hpa_shard_postfork_parent(tsdn_t *tsdn, hpa_shard_t *shard) {
74911-	hpa_do_consistency_checks(shard);
74912-
74913-	malloc_mutex_postfork_parent(tsdn, &shard->grow_mtx);
74914-	malloc_mutex_postfork_parent(tsdn, &shard->mtx);
74915-}
74916-
74917-void
74918-hpa_shard_postfork_child(tsdn_t *tsdn, hpa_shard_t *shard) {
74919-	hpa_do_consistency_checks(shard);
74920-
74921-	malloc_mutex_postfork_child(tsdn, &shard->grow_mtx);
74922-	malloc_mutex_postfork_child(tsdn, &shard->mtx);
74923-}
74924diff --git a/jemalloc/src/hpa_hooks.c b/jemalloc/src/hpa_hooks.c
74925deleted file mode 100644
74926index ade581e..0000000
74927--- a/jemalloc/src/hpa_hooks.c
74928+++ /dev/null
74929@@ -1,63 +0,0 @@
74930-#include "jemalloc/internal/jemalloc_preamble.h"
74931-#include "jemalloc/internal/jemalloc_internal_includes.h"
74932-
74933-#include "jemalloc/internal/hpa_hooks.h"
74934-
74935-static void *hpa_hooks_map(size_t size);
74936-static void hpa_hooks_unmap(void *ptr, size_t size);
74937-static void hpa_hooks_purge(void *ptr, size_t size);
74938-static void hpa_hooks_hugify(void *ptr, size_t size);
74939-static void hpa_hooks_dehugify(void *ptr, size_t size);
74940-static void hpa_hooks_curtime(nstime_t *r_nstime, bool first_reading);
74941-static uint64_t hpa_hooks_ms_since(nstime_t *past_nstime);
74942-
74943-hpa_hooks_t hpa_hooks_default = {
74944-	&hpa_hooks_map,
74945-	&hpa_hooks_unmap,
74946-	&hpa_hooks_purge,
74947-	&hpa_hooks_hugify,
74948-	&hpa_hooks_dehugify,
74949-	&hpa_hooks_curtime,
74950-	&hpa_hooks_ms_since
74951-};
74952-
74953-static void *
74954-hpa_hooks_map(size_t size) {
74955-	bool commit = true;
74956-	return pages_map(NULL, size, HUGEPAGE, &commit);
74957-}
74958-
74959-static void
74960-hpa_hooks_unmap(void *ptr, size_t size) {
74961-	pages_unmap(ptr, size);
74962-}
74963-
74964-static void
74965-hpa_hooks_purge(void *ptr, size_t size) {
74966-	pages_purge_forced(ptr, size);
74967-}
74968-
74969-static void
74970-hpa_hooks_hugify(void *ptr, size_t size) {
74971-	bool err = pages_huge(ptr, size);
74972-	(void)err;
74973-}
74974-
74975-static void
74976-hpa_hooks_dehugify(void *ptr, size_t size) {
74977-	bool err = pages_nohuge(ptr, size);
74978-	(void)err;
74979-}
74980-
74981-static void
74982-hpa_hooks_curtime(nstime_t *r_nstime, bool first_reading) {
74983-	if (first_reading) {
74984-		nstime_init_zero(r_nstime);
74985-	}
74986-	nstime_update(r_nstime);
74987-}
74988-
74989-static uint64_t
74990-hpa_hooks_ms_since(nstime_t *past_nstime) {
74991-	return nstime_ns_since(past_nstime) / 1000 / 1000;
74992-}
74993diff --git a/jemalloc/src/hpdata.c b/jemalloc/src/hpdata.c
74994deleted file mode 100644
74995index e7d7294..0000000
74996--- a/jemalloc/src/hpdata.c
74997+++ /dev/null
74998@@ -1,325 +0,0 @@
74999-#include "jemalloc/internal/jemalloc_preamble.h"
75000-#include "jemalloc/internal/jemalloc_internal_includes.h"
75001-
75002-#include "jemalloc/internal/hpdata.h"
75003-
75004-static int
75005-hpdata_age_comp(const hpdata_t *a, const hpdata_t *b) {
75006-	uint64_t a_age = hpdata_age_get(a);
75007-	uint64_t b_age = hpdata_age_get(b);
75008-	/*
75009-	 * hpdata ages are operation counts in the psset; no two should be the
75010-	 * same.
75011-	 */
75012-	assert(a_age != b_age);
75013-	return (a_age > b_age) - (a_age < b_age);
75014-}
75015-
75016-ph_gen(, hpdata_age_heap, hpdata_t, age_link, hpdata_age_comp)
75017-
75018-void
75019-hpdata_init(hpdata_t *hpdata, void *addr, uint64_t age) {
75020-	hpdata_addr_set(hpdata, addr);
75021-	hpdata_age_set(hpdata, age);
75022-	hpdata->h_huge = false;
75023-	hpdata->h_alloc_allowed = true;
75024-	hpdata->h_in_psset_alloc_container = false;
75025-	hpdata->h_purge_allowed = false;
75026-	hpdata->h_hugify_allowed = false;
75027-	hpdata->h_in_psset_hugify_container = false;
75028-	hpdata->h_mid_purge = false;
75029-	hpdata->h_mid_hugify = false;
75030-	hpdata->h_updating = false;
75031-	hpdata->h_in_psset = false;
75032-	hpdata_longest_free_range_set(hpdata, HUGEPAGE_PAGES);
75033-	hpdata->h_nactive = 0;
75034-	fb_init(hpdata->active_pages, HUGEPAGE_PAGES);
75035-	hpdata->h_ntouched = 0;
75036-	fb_init(hpdata->touched_pages, HUGEPAGE_PAGES);
75037-
75038-	hpdata_assert_consistent(hpdata);
75039-}
75040-
75041-void *
75042-hpdata_reserve_alloc(hpdata_t *hpdata, size_t sz) {
75043-	hpdata_assert_consistent(hpdata);
75044-	/*
75045-	 * This is a metadata change; the hpdata should therefore either not be
75046-	 * in the psset, or should have explicitly marked itself as being
75047-	 * mid-update.
75048-	 */
75049-	assert(!hpdata->h_in_psset || hpdata->h_updating);
75050-	assert(hpdata->h_alloc_allowed);
75051-	assert((sz & PAGE_MASK) == 0);
75052-	size_t npages = sz >> LG_PAGE;
75053-	assert(npages <= hpdata_longest_free_range_get(hpdata));
75054-
75055-	size_t result;
75056-
75057-	size_t start = 0;
75058-	/*
75059-	 * These are dead stores, but the compiler will issue warnings on them
75060-	 * since it can't tell statically that found is always true below.
75061-	 */
75062-	size_t begin = 0;
75063-	size_t len = 0;
75064-
75065-	size_t largest_unchosen_range = 0;
75066-	while (true) {
75067-		bool found = fb_urange_iter(hpdata->active_pages,
75068-		    HUGEPAGE_PAGES, start, &begin, &len);
75069-		/*
75070-		 * A precondition to this function is that hpdata must be able
75071-		 * to serve the allocation.
75072-		 */
75073-		assert(found);
75074-		assert(len <= hpdata_longest_free_range_get(hpdata));
75075-		if (len >= npages) {
75076-			/*
75077-			 * We use first-fit within the page slabs; this gives
75078-			 * bounded worst-case fragmentation within a slab.  It's
75079-			 * not necessarily right; we could experiment with
75080-			 * various other options.
75081-			 */
75082-			break;
75083-		}
75084-		if (len > largest_unchosen_range) {
75085-			largest_unchosen_range = len;
75086-		}
75087-		start = begin + len;
75088-	}
75089-	/* We found a range; remember it. */
75090-	result = begin;
75091-	fb_set_range(hpdata->active_pages, HUGEPAGE_PAGES, begin, npages);
75092-	hpdata->h_nactive += npages;
75093-
75094-	/*
75095-	 * We might be about to dirty some memory for the first time; update our
75096-	 * count if so.
75097-	 */
75098-	size_t new_dirty = fb_ucount(hpdata->touched_pages,  HUGEPAGE_PAGES,
75099-	    result, npages);
75100-	fb_set_range(hpdata->touched_pages, HUGEPAGE_PAGES, result, npages);
75101-	hpdata->h_ntouched += new_dirty;
75102-
75103-	/*
75104-	 * If we allocated out of a range that was the longest in the hpdata, it
75105-	 * might be the only one of that size and we'll have to adjust the
75106-	 * metadata.
75107-	 */
75108-	if (len == hpdata_longest_free_range_get(hpdata)) {
75109-		start = begin + npages;
75110-		while (start < HUGEPAGE_PAGES) {
75111-			bool found = fb_urange_iter(hpdata->active_pages,
75112-			    HUGEPAGE_PAGES, start, &begin, &len);
75113-			if (!found) {
75114-				break;
75115-			}
75116-			assert(len <= hpdata_longest_free_range_get(hpdata));
75117-			if (len == hpdata_longest_free_range_get(hpdata)) {
75118-				largest_unchosen_range = len;
75119-				break;
75120-			}
75121-			if (len > largest_unchosen_range) {
75122-				largest_unchosen_range = len;
75123-			}
75124-			start = begin + len;
75125-		}
75126-		hpdata_longest_free_range_set(hpdata, largest_unchosen_range);
75127-	}
75128-
75129-	hpdata_assert_consistent(hpdata);
75130-	return (void *)(
75131-	    (uintptr_t)hpdata_addr_get(hpdata) + (result << LG_PAGE));
75132-}
75133-
75134-void
75135-hpdata_unreserve(hpdata_t *hpdata, void *addr, size_t sz) {
75136-	hpdata_assert_consistent(hpdata);
75137-	/* See the comment in reserve. */
75138-	assert(!hpdata->h_in_psset || hpdata->h_updating);
75139-	assert(((uintptr_t)addr & PAGE_MASK) == 0);
75140-	assert((sz & PAGE_MASK) == 0);
75141-	size_t begin = ((uintptr_t)addr - (uintptr_t)hpdata_addr_get(hpdata))
75142-	    >> LG_PAGE;
75143-	assert(begin < HUGEPAGE_PAGES);
75144-	size_t npages = sz >> LG_PAGE;
75145-	size_t old_longest_range = hpdata_longest_free_range_get(hpdata);
75146-
75147-	fb_unset_range(hpdata->active_pages, HUGEPAGE_PAGES, begin, npages);
75148-	/* We might have just created a new, larger range. */
75149-	size_t new_begin = (fb_fls(hpdata->active_pages, HUGEPAGE_PAGES,
75150-	    begin) + 1);
75151-	size_t new_end = fb_ffs(hpdata->active_pages, HUGEPAGE_PAGES,
75152-	    begin + npages - 1);
75153-	size_t new_range_len = new_end - new_begin;
75154-
75155-	if (new_range_len > old_longest_range) {
75156-		hpdata_longest_free_range_set(hpdata, new_range_len);
75157-	}
75158-
75159-	hpdata->h_nactive -= npages;
75160-
75161-	hpdata_assert_consistent(hpdata);
75162-}
75163-
75164-size_t
75165-hpdata_purge_begin(hpdata_t *hpdata, hpdata_purge_state_t *purge_state) {
75166-	hpdata_assert_consistent(hpdata);
75167-	/*
75168-	 * See the comment below; we might purge any inactive extent, so it's
75169-	 * unsafe for any other thread to turn any inactive extent active while
75170-	 * we're operating on it.
75171-	 */
75172-	assert(!hpdata_alloc_allowed_get(hpdata));
75173-
75174-	purge_state->npurged = 0;
75175-	purge_state->next_purge_search_begin = 0;
75176-
75177-	/*
75178-	 * Initialize to_purge.
75179-	 *
75180-	 * It's possible to end up in situations where two dirty extents are
75181-	 * separated by a retained extent:
75182-	 * - 1 page allocated.
75183-	 * - 1 page allocated.
75184-	 * - 1 pages allocated.
75185-	 *
75186-	 * If the middle page is freed and purged, and then the first and third
75187-	 * pages are freed, and then another purge pass happens, the hpdata
75188-	 * looks like this:
75189-	 * - 1 page dirty.
75190-	 * - 1 page retained.
75191-	 * - 1 page dirty.
75192-	 *
75193-	 * But it's safe to do a single 3-page purge.
75194-	 *
75195-	 * We do this by first computing the dirty pages, and then filling in
75196-	 * any gaps by extending each range in the dirty bitmap to extend until
75197-	 * the next active page.  This purges more pages, but the expensive part
75198-	 * of purging is the TLB shootdowns, rather than the kernel state
75199-	 * tracking; doing a little bit more of the latter is fine if it saves
75200-	 * us from doing some of the former.
75201-	 */
75202-
75203-	/*
75204-	 * The dirty pages are those that are touched but not active.  Note that
75205-	 * in a normal-ish case, HUGEPAGE_PAGES is something like 512 and the
75206-	 * fb_group_t is 64 bits, so this is 64 bytes, spread across 8
75207-	 * fb_group_ts.
75208-	 */
75209-	fb_group_t dirty_pages[FB_NGROUPS(HUGEPAGE_PAGES)];
75210-	fb_init(dirty_pages, HUGEPAGE_PAGES);
75211-	fb_bit_not(dirty_pages, hpdata->active_pages, HUGEPAGE_PAGES);
75212-	fb_bit_and(dirty_pages, dirty_pages, hpdata->touched_pages,
75213-	    HUGEPAGE_PAGES);
75214-
75215-	fb_init(purge_state->to_purge, HUGEPAGE_PAGES);
75216-	size_t next_bit = 0;
75217-	while (next_bit < HUGEPAGE_PAGES) {
75218-		size_t next_dirty = fb_ffs(dirty_pages, HUGEPAGE_PAGES,
75219-		    next_bit);
75220-		/* Recall that fb_ffs returns nbits if no set bit is found. */
75221-		if (next_dirty == HUGEPAGE_PAGES) {
75222-			break;
75223-		}
75224-		size_t next_active = fb_ffs(hpdata->active_pages,
75225-		    HUGEPAGE_PAGES, next_dirty);
75226-		/*
75227-		 * Don't purge past the end of the dirty extent, into retained
75228-		 * pages.  This helps the kernel a tiny bit, but honestly it's
75229-		 * mostly helpful for testing (where we tend to write test cases
75230-		 * that think in terms of the dirty ranges).
75231-		 */
75232-		ssize_t last_dirty = fb_fls(dirty_pages, HUGEPAGE_PAGES,
75233-		    next_active - 1);
75234-		assert(last_dirty >= 0);
75235-		assert((size_t)last_dirty >= next_dirty);
75236-		assert((size_t)last_dirty - next_dirty + 1 <= HUGEPAGE_PAGES);
75237-
75238-		fb_set_range(purge_state->to_purge, HUGEPAGE_PAGES, next_dirty,
75239-		    last_dirty - next_dirty + 1);
75240-		next_bit = next_active + 1;
75241-	}
75242-
75243-	/* We should purge, at least, everything dirty. */
75244-	size_t ndirty = hpdata->h_ntouched - hpdata->h_nactive;
75245-	purge_state->ndirty_to_purge = ndirty;
75246-	assert(ndirty <= fb_scount(
75247-	    purge_state->to_purge, HUGEPAGE_PAGES, 0, HUGEPAGE_PAGES));
75248-	assert(ndirty == fb_scount(dirty_pages, HUGEPAGE_PAGES, 0,
75249-	    HUGEPAGE_PAGES));
75250-
75251-	hpdata_assert_consistent(hpdata);
75252-
75253-	return ndirty;
75254-}
75255-
75256-bool
75257-hpdata_purge_next(hpdata_t *hpdata, hpdata_purge_state_t *purge_state,
75258-    void **r_purge_addr, size_t *r_purge_size) {
75259-	/*
75260-	 * Note that we don't have a consistency check here; we're accessing
75261-	 * hpdata without synchronization, and therefore have no right to expect
75262-	 * a consistent state.
75263-	 */
75264-	assert(!hpdata_alloc_allowed_get(hpdata));
75265-
75266-	if (purge_state->next_purge_search_begin == HUGEPAGE_PAGES) {
75267-		return false;
75268-	}
75269-	size_t purge_begin;
75270-	size_t purge_len;
75271-	bool found_range = fb_srange_iter(purge_state->to_purge, HUGEPAGE_PAGES,
75272-	    purge_state->next_purge_search_begin, &purge_begin, &purge_len);
75273-	if (!found_range) {
75274-		return false;
75275-	}
75276-
75277-	*r_purge_addr = (void *)(
75278-	    (uintptr_t)hpdata_addr_get(hpdata) + purge_begin * PAGE);
75279-	*r_purge_size = purge_len * PAGE;
75280-
75281-	purge_state->next_purge_search_begin = purge_begin + purge_len;
75282-	purge_state->npurged += purge_len;
75283-	assert(purge_state->npurged <= HUGEPAGE_PAGES);
75284-
75285-	return true;
75286-}
75287-
75288-void
75289-hpdata_purge_end(hpdata_t *hpdata, hpdata_purge_state_t *purge_state) {
75290-	assert(!hpdata_alloc_allowed_get(hpdata));
75291-	hpdata_assert_consistent(hpdata);
75292-	/* See the comment in reserve. */
75293-	assert(!hpdata->h_in_psset || hpdata->h_updating);
75294-
75295-	assert(purge_state->npurged == fb_scount(purge_state->to_purge,
75296-	    HUGEPAGE_PAGES, 0, HUGEPAGE_PAGES));
75297-	assert(purge_state->npurged >= purge_state->ndirty_to_purge);
75298-
75299-	fb_bit_not(purge_state->to_purge, purge_state->to_purge,
75300-	    HUGEPAGE_PAGES);
75301-	fb_bit_and(hpdata->touched_pages, hpdata->touched_pages,
75302-	    purge_state->to_purge, HUGEPAGE_PAGES);
75303-	assert(hpdata->h_ntouched >= purge_state->ndirty_to_purge);
75304-	hpdata->h_ntouched -= purge_state->ndirty_to_purge;
75305-
75306-	hpdata_assert_consistent(hpdata);
75307-}
75308-
75309-void
75310-hpdata_hugify(hpdata_t *hpdata) {
75311-	hpdata_assert_consistent(hpdata);
75312-	hpdata->h_huge = true;
75313-	fb_set_range(hpdata->touched_pages, HUGEPAGE_PAGES, 0, HUGEPAGE_PAGES);
75314-	hpdata->h_ntouched = HUGEPAGE_PAGES;
75315-	hpdata_assert_consistent(hpdata);
75316-}
75317-
75318-void
75319-hpdata_dehugify(hpdata_t *hpdata) {
75320-	hpdata_assert_consistent(hpdata);
75321-	hpdata->h_huge = false;
75322-	hpdata_assert_consistent(hpdata);
75323-}
75324diff --git a/jemalloc/src/inspect.c b/jemalloc/src/inspect.c
75325deleted file mode 100644
75326index 911b5d5..0000000
75327--- a/jemalloc/src/inspect.c
75328+++ /dev/null
75329@@ -1,77 +0,0 @@
75330-#include "jemalloc/internal/jemalloc_preamble.h"
75331-#include "jemalloc/internal/jemalloc_internal_includes.h"
75332-
75333-void
75334-inspect_extent_util_stats_get(tsdn_t *tsdn, const void *ptr, size_t *nfree,
75335-    size_t *nregs, size_t *size) {
75336-	assert(ptr != NULL && nfree != NULL && nregs != NULL && size != NULL);
75337-
75338-	const edata_t *edata = emap_edata_lookup(tsdn, &arena_emap_global, ptr);
75339-	if (unlikely(edata == NULL)) {
75340-		*nfree = *nregs = *size = 0;
75341-		return;
75342-	}
75343-
75344-	*size = edata_size_get(edata);
75345-	if (!edata_slab_get(edata)) {
75346-		*nfree = 0;
75347-		*nregs = 1;
75348-	} else {
75349-		*nfree = edata_nfree_get(edata);
75350-		*nregs = bin_infos[edata_szind_get(edata)].nregs;
75351-		assert(*nfree <= *nregs);
75352-		assert(*nfree * edata_usize_get(edata) <= *size);
75353-	}
75354-}
75355-
75356-void
75357-inspect_extent_util_stats_verbose_get(tsdn_t *tsdn, const void *ptr,
75358-    size_t *nfree, size_t *nregs, size_t *size, size_t *bin_nfree,
75359-    size_t *bin_nregs, void **slabcur_addr) {
75360-	assert(ptr != NULL && nfree != NULL && nregs != NULL && size != NULL
75361-	    && bin_nfree != NULL && bin_nregs != NULL && slabcur_addr != NULL);
75362-
75363-	const edata_t *edata = emap_edata_lookup(tsdn, &arena_emap_global, ptr);
75364-	if (unlikely(edata == NULL)) {
75365-		*nfree = *nregs = *size = *bin_nfree = *bin_nregs = 0;
75366-		*slabcur_addr = NULL;
75367-		return;
75368-	}
75369-
75370-	*size = edata_size_get(edata);
75371-	if (!edata_slab_get(edata)) {
75372-		*nfree = *bin_nfree = *bin_nregs = 0;
75373-		*nregs = 1;
75374-		*slabcur_addr = NULL;
75375-		return;
75376-	}
75377-
75378-	*nfree = edata_nfree_get(edata);
75379-	const szind_t szind = edata_szind_get(edata);
75380-	*nregs = bin_infos[szind].nregs;
75381-	assert(*nfree <= *nregs);
75382-	assert(*nfree * edata_usize_get(edata) <= *size);
75383-
75384-	arena_t *arena = (arena_t *)atomic_load_p(
75385-	    &arenas[edata_arena_ind_get(edata)], ATOMIC_RELAXED);
75386-	assert(arena != NULL);
75387-	const unsigned binshard = edata_binshard_get(edata);
75388-	bin_t *bin = arena_get_bin(arena, szind, binshard);
75389-
75390-	malloc_mutex_lock(tsdn, &bin->lock);
75391-	if (config_stats) {
75392-		*bin_nregs = *nregs * bin->stats.curslabs;
75393-		assert(*bin_nregs >= bin->stats.curregs);
75394-		*bin_nfree = *bin_nregs - bin->stats.curregs;
75395-	} else {
75396-		*bin_nfree = *bin_nregs = 0;
75397-	}
75398-	edata_t *slab;
75399-	if (bin->slabcur != NULL) {
75400-		slab = bin->slabcur;
75401-	} else {
75402-		slab = edata_heap_first(&bin->slabs_nonfull);
75403-	}
75404-	*slabcur_addr = slab != NULL ? edata_addr_get(slab) : NULL;
75405-	malloc_mutex_unlock(tsdn, &bin->lock);
75406-}
75407diff --git a/jemalloc/src/jemalloc.c b/jemalloc/src/jemalloc.c
75408deleted file mode 100644
75409index 7655de4..0000000
75410--- a/jemalloc/src/jemalloc.c
75411+++ /dev/null
75412@@ -1,4476 +0,0 @@
75413-#define JEMALLOC_C_
75414-#include "jemalloc/internal/jemalloc_preamble.h"
75415-#include "jemalloc/internal/jemalloc_internal_includes.h"
75416-
75417-#include "jemalloc/internal/assert.h"
75418-#include "jemalloc/internal/atomic.h"
75419-#include "jemalloc/internal/buf_writer.h"
75420-#include "jemalloc/internal/ctl.h"
75421-#include "jemalloc/internal/emap.h"
75422-#include "jemalloc/internal/extent_dss.h"
75423-#include "jemalloc/internal/extent_mmap.h"
75424-#include "jemalloc/internal/fxp.h"
75425-#include "jemalloc/internal/san.h"
75426-#include "jemalloc/internal/hook.h"
75427-#include "jemalloc/internal/jemalloc_internal_types.h"
75428-#include "jemalloc/internal/log.h"
75429-#include "jemalloc/internal/malloc_io.h"
75430-#include "jemalloc/internal/mutex.h"
75431-#include "jemalloc/internal/nstime.h"
75432-#include "jemalloc/internal/rtree.h"
75433-#include "jemalloc/internal/safety_check.h"
75434-#include "jemalloc/internal/sc.h"
75435-#include "jemalloc/internal/spin.h"
75436-#include "jemalloc/internal/sz.h"
75437-#include "jemalloc/internal/ticker.h"
75438-#include "jemalloc/internal/thread_event.h"
75439-#include "jemalloc/internal/util.h"
75440-
75441-/******************************************************************************/
75442-/* Data. */
75443-
75444-/* Runtime configuration options. */
75445-const char	*je_malloc_conf
75446-#ifndef _WIN32
75447-    JEMALLOC_ATTR(weak)
75448-#endif
75449-    ;
75450-/*
75451- * The usual rule is that the closer to runtime you are, the higher priority
75452- * your configuration settings are (so the jemalloc config options get lower
75453- * priority than the per-binary setting, which gets lower priority than the /etc
75454- * setting, which gets lower priority than the environment settings).
75455- *
75456- * But it's a fairly common use case in some testing environments for a user to
75457- * be able to control the binary, but nothing else (e.g. a performancy canary
75458- * uses the production OS and environment variables, but can run any binary in
75459- * those circumstances).  For these use cases, it's handy to have an in-binary
75460- * mechanism for overriding environment variable settings, with the idea that if
75461- * the results are positive they get promoted to the official settings, and
75462- * moved from the binary to the environment variable.
75463- *
75464- * We don't actually want this to be widespread, so we'll give it a silly name
75465- * and not mention it in headers or documentation.
75466- */
75467-const char	*je_malloc_conf_2_conf_harder
75468-#ifndef _WIN32
75469-    JEMALLOC_ATTR(weak)
75470-#endif
75471-    ;
75472-
75473-bool	opt_abort =
75474-#ifdef JEMALLOC_DEBUG
75475-    true
75476-#else
75477-    false
75478-#endif
75479-    ;
75480-bool	opt_abort_conf =
75481-#ifdef JEMALLOC_DEBUG
75482-    true
75483-#else
75484-    false
75485-#endif
75486-    ;
75487-/* Intentionally default off, even with debug builds. */
75488-bool	opt_confirm_conf = false;
75489-const char	*opt_junk =
75490-#if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL))
75491-    "true"
75492-#else
75493-    "false"
75494-#endif
75495-    ;
75496-bool	opt_junk_alloc =
75497-#if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL))
75498-    true
75499-#else
75500-    false
75501-#endif
75502-    ;
75503-bool	opt_junk_free =
75504-#if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL))
75505-    true
75506-#else
75507-    false
75508-#endif
75509-    ;
75510-bool	opt_trust_madvise =
75511-#ifdef JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS
75512-    false
75513-#else
75514-    true
75515-#endif
75516-    ;
75517-
75518-bool opt_cache_oblivious =
75519-#ifdef JEMALLOC_CACHE_OBLIVIOUS
75520-    true
75521-#else
75522-    false
75523-#endif
75524-    ;
75525-
75526-zero_realloc_action_t opt_zero_realloc_action =
75527-#ifdef JEMALLOC_ZERO_REALLOC_DEFAULT_FREE
75528-    zero_realloc_action_free
75529-#else
75530-    zero_realloc_action_alloc
75531-#endif
75532-    ;
75533-
75534-atomic_zu_t zero_realloc_count = ATOMIC_INIT(0);
75535-
75536-const char *zero_realloc_mode_names[] = {
75537-	"alloc",
75538-	"free",
75539-	"abort",
75540-};
75541-
75542-/*
75543- * These are the documented values for junk fill debugging facilities -- see the
75544- * man page.
75545- */
75546-static const uint8_t junk_alloc_byte = 0xa5;
75547-static const uint8_t junk_free_byte = 0x5a;
75548-
75549-static void default_junk_alloc(void *ptr, size_t usize) {
75550-	memset(ptr, junk_alloc_byte, usize);
75551-}
75552-
75553-static void default_junk_free(void *ptr, size_t usize) {
75554-	memset(ptr, junk_free_byte, usize);
75555-}
75556-
75557-void (*junk_alloc_callback)(void *ptr, size_t size) = &default_junk_alloc;
75558-void (*junk_free_callback)(void *ptr, size_t size) = &default_junk_free;
75559-
75560-bool	opt_utrace = false;
75561-bool	opt_xmalloc = false;
75562-bool	opt_experimental_infallible_new = false;
75563-bool	opt_zero = false;
75564-unsigned	opt_narenas = 0;
75565-fxp_t		opt_narenas_ratio = FXP_INIT_INT(4);
75566-
75567-unsigned	ncpus;
75568-
75569-/* Protects arenas initialization. */
75570-malloc_mutex_t arenas_lock;
75571-
75572-/* The global hpa, and whether it's on. */
75573-bool opt_hpa = false;
75574-hpa_shard_opts_t opt_hpa_opts = HPA_SHARD_OPTS_DEFAULT;
75575-sec_opts_t opt_hpa_sec_opts = SEC_OPTS_DEFAULT;
75576-
75577-/*
75578- * Arenas that are used to service external requests.  Not all elements of the
75579- * arenas array are necessarily used; arenas are created lazily as needed.
75580- *
75581- * arenas[0..narenas_auto) are used for automatic multiplexing of threads and
75582- * arenas.  arenas[narenas_auto..narenas_total) are only used if the application
75583- * takes some action to create them and allocate from them.
75584- *
75585- * Points to an arena_t.
75586- */
75587-JEMALLOC_ALIGNED(CACHELINE)
75588-atomic_p_t		arenas[MALLOCX_ARENA_LIMIT];
75589-static atomic_u_t	narenas_total; /* Use narenas_total_*(). */
75590-/* Below three are read-only after initialization. */
75591-static arena_t		*a0; /* arenas[0]. */
75592-unsigned		narenas_auto;
75593-unsigned		manual_arena_base;
75594-
75595-malloc_init_t malloc_init_state = malloc_init_uninitialized;
75596-
75597-/* False should be the common case.  Set to true to trigger initialization. */
75598-bool			malloc_slow = true;
75599-
75600-/* When malloc_slow is true, set the corresponding bits for sanity check. */
75601-enum {
75602-	flag_opt_junk_alloc	= (1U),
75603-	flag_opt_junk_free	= (1U << 1),
75604-	flag_opt_zero		= (1U << 2),
75605-	flag_opt_utrace		= (1U << 3),
75606-	flag_opt_xmalloc	= (1U << 4)
75607-};
75608-static uint8_t	malloc_slow_flags;
75609-
75610-#ifdef JEMALLOC_THREADED_INIT
75611-/* Used to let the initializing thread recursively allocate. */
75612-#  define NO_INITIALIZER	((unsigned long)0)
75613-#  define INITIALIZER		pthread_self()
75614-#  define IS_INITIALIZER	(malloc_initializer == pthread_self())
75615-static pthread_t		malloc_initializer = NO_INITIALIZER;
75616-#else
75617-#  define NO_INITIALIZER	false
75618-#  define INITIALIZER		true
75619-#  define IS_INITIALIZER	malloc_initializer
75620-static bool			malloc_initializer = NO_INITIALIZER;
75621-#endif
75622-
75623-/* Used to avoid initialization races. */
75624-#ifdef _WIN32
75625-#if _WIN32_WINNT >= 0x0600
75626-static malloc_mutex_t	init_lock = SRWLOCK_INIT;
75627-#else
75628-static malloc_mutex_t	init_lock;
75629-static bool init_lock_initialized = false;
75630-
75631-JEMALLOC_ATTR(constructor)
75632-static void WINAPI
75633-_init_init_lock(void) {
75634-	/*
75635-	 * If another constructor in the same binary is using mallctl to e.g.
75636-	 * set up extent hooks, it may end up running before this one, and
75637-	 * malloc_init_hard will crash trying to lock the uninitialized lock. So
75638-	 * we force an initialization of the lock in malloc_init_hard as well.
75639-	 * We don't try to care about atomicity of the accessed to the
75640-	 * init_lock_initialized boolean, since it really only matters early in
75641-	 * the process creation, before any separate thread normally starts
75642-	 * doing anything.
75643-	 */
75644-	if (!init_lock_initialized) {
75645-		malloc_mutex_init(&init_lock, "init", WITNESS_RANK_INIT,
75646-		    malloc_mutex_rank_exclusive);
75647-	}
75648-	init_lock_initialized = true;
75649-}
75650-
75651-#ifdef _MSC_VER
75652-#  pragma section(".CRT$XCU", read)
75653-JEMALLOC_SECTION(".CRT$XCU") JEMALLOC_ATTR(used)
75654-static const void (WINAPI *init_init_lock)(void) = _init_init_lock;
75655-#endif
75656-#endif
75657-#else
75658-static malloc_mutex_t	init_lock = MALLOC_MUTEX_INITIALIZER;
75659-#endif
75660-
75661-typedef struct {
75662-	void	*p;	/* Input pointer (as in realloc(p, s)). */
75663-	size_t	s;	/* Request size. */
75664-	void	*r;	/* Result pointer. */
75665-} malloc_utrace_t;
75666-
75667-#ifdef JEMALLOC_UTRACE
75668-#  define UTRACE(a, b, c) do {						\
75669-	if (unlikely(opt_utrace)) {					\
75670-		int utrace_serrno = errno;				\
75671-		malloc_utrace_t ut;					\
75672-		ut.p = (a);						\
75673-		ut.s = (b);						\
75674-		ut.r = (c);						\
75675-		UTRACE_CALL(&ut, sizeof(ut));				\
75676-		errno = utrace_serrno;					\
75677-	}								\
75678-} while (0)
75679-#else
75680-#  define UTRACE(a, b, c)
75681-#endif
75682-
75683-/* Whether encountered any invalid config options. */
75684-static bool had_conf_error = false;
75685-
75686-/******************************************************************************/
75687-/*
75688- * Function prototypes for static functions that are referenced prior to
75689- * definition.
75690- */
75691-
75692-static bool	malloc_init_hard_a0(void);
75693-static bool	malloc_init_hard(void);
75694-
75695-/******************************************************************************/
75696-/*
75697- * Begin miscellaneous support functions.
75698- */
75699-
75700-JEMALLOC_ALWAYS_INLINE bool
75701-malloc_init_a0(void) {
75702-	if (unlikely(malloc_init_state == malloc_init_uninitialized)) {
75703-		return malloc_init_hard_a0();
75704-	}
75705-	return false;
75706-}
75707-
75708-JEMALLOC_ALWAYS_INLINE bool
75709-malloc_init(void) {
75710-	if (unlikely(!malloc_initialized()) && malloc_init_hard()) {
75711-		return true;
75712-	}
75713-	return false;
75714-}
75715-
75716-/*
75717- * The a0*() functions are used instead of i{d,}alloc() in situations that
75718- * cannot tolerate TLS variable access.
75719- */
75720-
75721-static void *
75722-a0ialloc(size_t size, bool zero, bool is_internal) {
75723-	if (unlikely(malloc_init_a0())) {
75724-		return NULL;
75725-	}
75726-
75727-	return iallocztm(TSDN_NULL, size, sz_size2index(size), zero, NULL,
75728-	    is_internal, arena_get(TSDN_NULL, 0, true), true);
75729-}
75730-
75731-static void
75732-a0idalloc(void *ptr, bool is_internal) {
75733-	idalloctm(TSDN_NULL, ptr, NULL, NULL, is_internal, true);
75734-}
75735-
75736-void *
75737-a0malloc(size_t size) {
75738-	return a0ialloc(size, false, true);
75739-}
75740-
75741-void
75742-a0dalloc(void *ptr) {
75743-	a0idalloc(ptr, true);
75744-}
75745-
75746-/*
75747- * FreeBSD's libc uses the bootstrap_*() functions in bootstrap-sensitive
75748- * situations that cannot tolerate TLS variable access (TLS allocation and very
75749- * early internal data structure initialization).
75750- */
75751-
75752-void *
75753-bootstrap_malloc(size_t size) {
75754-	if (unlikely(size == 0)) {
75755-		size = 1;
75756-	}
75757-
75758-	return a0ialloc(size, false, false);
75759-}
75760-
75761-void *
75762-bootstrap_calloc(size_t num, size_t size) {
75763-	size_t num_size;
75764-
75765-	num_size = num * size;
75766-	if (unlikely(num_size == 0)) {
75767-		assert(num == 0 || size == 0);
75768-		num_size = 1;
75769-	}
75770-
75771-	return a0ialloc(num_size, true, false);
75772-}
75773-
75774-void
75775-bootstrap_free(void *ptr) {
75776-	if (unlikely(ptr == NULL)) {
75777-		return;
75778-	}
75779-
75780-	a0idalloc(ptr, false);
75781-}
75782-
75783-void
75784-arena_set(unsigned ind, arena_t *arena) {
75785-	atomic_store_p(&arenas[ind], arena, ATOMIC_RELEASE);
75786-}
75787-
75788-static void
75789-narenas_total_set(unsigned narenas) {
75790-	atomic_store_u(&narenas_total, narenas, ATOMIC_RELEASE);
75791-}
75792-
75793-static void
75794-narenas_total_inc(void) {
75795-	atomic_fetch_add_u(&narenas_total, 1, ATOMIC_RELEASE);
75796-}
75797-
75798-unsigned
75799-narenas_total_get(void) {
75800-	return atomic_load_u(&narenas_total, ATOMIC_ACQUIRE);
75801-}
75802-
75803-/* Create a new arena and insert it into the arenas array at index ind. */
75804-static arena_t *
75805-arena_init_locked(tsdn_t *tsdn, unsigned ind, const arena_config_t *config) {
75806-	arena_t *arena;
75807-
75808-	assert(ind <= narenas_total_get());
75809-	if (ind >= MALLOCX_ARENA_LIMIT) {
75810-		return NULL;
75811-	}
75812-	if (ind == narenas_total_get()) {
75813-		narenas_total_inc();
75814-	}
75815-
75816-	/*
75817-	 * Another thread may have already initialized arenas[ind] if it's an
75818-	 * auto arena.
75819-	 */
75820-	arena = arena_get(tsdn, ind, false);
75821-	if (arena != NULL) {
75822-		assert(arena_is_auto(arena));
75823-		return arena;
75824-	}
75825-
75826-	/* Actually initialize the arena. */
75827-	arena = arena_new(tsdn, ind, config);
75828-
75829-	return arena;
75830-}
75831-
75832-static void
75833-arena_new_create_background_thread(tsdn_t *tsdn, unsigned ind) {
75834-	if (ind == 0) {
75835-		return;
75836-	}
75837-	/*
75838-	 * Avoid creating a new background thread just for the huge arena, which
75839-	 * purges eagerly by default.
75840-	 */
75841-	if (have_background_thread && !arena_is_huge(ind)) {
75842-		if (background_thread_create(tsdn_tsd(tsdn), ind)) {
75843-			malloc_printf("<jemalloc>: error in background thread "
75844-				      "creation for arena %u. Abort.\n", ind);
75845-			abort();
75846-		}
75847-	}
75848-}
75849-
75850-arena_t *
75851-arena_init(tsdn_t *tsdn, unsigned ind, const arena_config_t *config) {
75852-	arena_t *arena;
75853-
75854-	malloc_mutex_lock(tsdn, &arenas_lock);
75855-	arena = arena_init_locked(tsdn, ind, config);
75856-	malloc_mutex_unlock(tsdn, &arenas_lock);
75857-
75858-	arena_new_create_background_thread(tsdn, ind);
75859-
75860-	return arena;
75861-}
75862-
75863-static void
75864-arena_bind(tsd_t *tsd, unsigned ind, bool internal) {
75865-	arena_t *arena = arena_get(tsd_tsdn(tsd), ind, false);
75866-	arena_nthreads_inc(arena, internal);
75867-
75868-	if (internal) {
75869-		tsd_iarena_set(tsd, arena);
75870-	} else {
75871-		tsd_arena_set(tsd, arena);
75872-		unsigned shard = atomic_fetch_add_u(&arena->binshard_next, 1,
75873-		    ATOMIC_RELAXED);
75874-		tsd_binshards_t *bins = tsd_binshardsp_get(tsd);
75875-		for (unsigned i = 0; i < SC_NBINS; i++) {
75876-			assert(bin_infos[i].n_shards > 0 &&
75877-			    bin_infos[i].n_shards <= BIN_SHARDS_MAX);
75878-			bins->binshard[i] = shard % bin_infos[i].n_shards;
75879-		}
75880-	}
75881-}
75882-
75883-void
75884-arena_migrate(tsd_t *tsd, arena_t *oldarena, arena_t *newarena) {
75885-	assert(oldarena != NULL);
75886-	assert(newarena != NULL);
75887-
75888-	arena_nthreads_dec(oldarena, false);
75889-	arena_nthreads_inc(newarena, false);
75890-	tsd_arena_set(tsd, newarena);
75891-
75892-	if (arena_nthreads_get(oldarena, false) == 0) {
75893-		/* Purge if the old arena has no associated threads anymore. */
75894-		arena_decay(tsd_tsdn(tsd), oldarena,
75895-		    /* is_background_thread */ false, /* all */ true);
75896-	}
75897-}
75898-
75899-static void
75900-arena_unbind(tsd_t *tsd, unsigned ind, bool internal) {
75901-	arena_t *arena;
75902-
75903-	arena = arena_get(tsd_tsdn(tsd), ind, false);
75904-	arena_nthreads_dec(arena, internal);
75905-
75906-	if (internal) {
75907-		tsd_iarena_set(tsd, NULL);
75908-	} else {
75909-		tsd_arena_set(tsd, NULL);
75910-	}
75911-}
75912-
75913-/* Slow path, called only by arena_choose(). */
75914-arena_t *
75915-arena_choose_hard(tsd_t *tsd, bool internal) {
75916-	arena_t *ret JEMALLOC_CC_SILENCE_INIT(NULL);
75917-
75918-	if (have_percpu_arena && PERCPU_ARENA_ENABLED(opt_percpu_arena)) {
75919-		unsigned choose = percpu_arena_choose();
75920-		ret = arena_get(tsd_tsdn(tsd), choose, true);
75921-		assert(ret != NULL);
75922-		arena_bind(tsd, arena_ind_get(ret), false);
75923-		arena_bind(tsd, arena_ind_get(ret), true);
75924-
75925-		return ret;
75926-	}
75927-
75928-	if (narenas_auto > 1) {
75929-		unsigned i, j, choose[2], first_null;
75930-		bool is_new_arena[2];
75931-
75932-		/*
75933-		 * Determine binding for both non-internal and internal
75934-		 * allocation.
75935-		 *
75936-		 *   choose[0]: For application allocation.
75937-		 *   choose[1]: For internal metadata allocation.
75938-		 */
75939-
75940-		for (j = 0; j < 2; j++) {
75941-			choose[j] = 0;
75942-			is_new_arena[j] = false;
75943-		}
75944-
75945-		first_null = narenas_auto;
75946-		malloc_mutex_lock(tsd_tsdn(tsd), &arenas_lock);
75947-		assert(arena_get(tsd_tsdn(tsd), 0, false) != NULL);
75948-		for (i = 1; i < narenas_auto; i++) {
75949-			if (arena_get(tsd_tsdn(tsd), i, false) != NULL) {
75950-				/*
75951-				 * Choose the first arena that has the lowest
75952-				 * number of threads assigned to it.
75953-				 */
75954-				for (j = 0; j < 2; j++) {
75955-					if (arena_nthreads_get(arena_get(
75956-					    tsd_tsdn(tsd), i, false), !!j) <
75957-					    arena_nthreads_get(arena_get(
75958-					    tsd_tsdn(tsd), choose[j], false),
75959-					    !!j)) {
75960-						choose[j] = i;
75961-					}
75962-				}
75963-			} else if (first_null == narenas_auto) {
75964-				/*
75965-				 * Record the index of the first uninitialized
75966-				 * arena, in case all extant arenas are in use.
75967-				 *
75968-				 * NB: It is possible for there to be
75969-				 * discontinuities in terms of initialized
75970-				 * versus uninitialized arenas, due to the
75971-				 * "thread.arena" mallctl.
75972-				 */
75973-				first_null = i;
75974-			}
75975-		}
75976-
75977-		for (j = 0; j < 2; j++) {
75978-			if (arena_nthreads_get(arena_get(tsd_tsdn(tsd),
75979-			    choose[j], false), !!j) == 0 || first_null ==
75980-			    narenas_auto) {
75981-				/*
75982-				 * Use an unloaded arena, or the least loaded
75983-				 * arena if all arenas are already initialized.
75984-				 */
75985-				if (!!j == internal) {
75986-					ret = arena_get(tsd_tsdn(tsd),
75987-					    choose[j], false);
75988-				}
75989-			} else {
75990-				arena_t *arena;
75991-
75992-				/* Initialize a new arena. */
75993-				choose[j] = first_null;
75994-				arena = arena_init_locked(tsd_tsdn(tsd),
75995-				    choose[j], &arena_config_default);
75996-				if (arena == NULL) {
75997-					malloc_mutex_unlock(tsd_tsdn(tsd),
75998-					    &arenas_lock);
75999-					return NULL;
76000-				}
76001-				is_new_arena[j] = true;
76002-				if (!!j == internal) {
76003-					ret = arena;
76004-				}
76005-			}
76006-			arena_bind(tsd, choose[j], !!j);
76007-		}
76008-		malloc_mutex_unlock(tsd_tsdn(tsd), &arenas_lock);
76009-
76010-		for (j = 0; j < 2; j++) {
76011-			if (is_new_arena[j]) {
76012-				assert(choose[j] > 0);
76013-				arena_new_create_background_thread(
76014-				    tsd_tsdn(tsd), choose[j]);
76015-			}
76016-		}
76017-
76018-	} else {
76019-		ret = arena_get(tsd_tsdn(tsd), 0, false);
76020-		arena_bind(tsd, 0, false);
76021-		arena_bind(tsd, 0, true);
76022-	}
76023-
76024-	return ret;
76025-}
76026-
76027-void
76028-iarena_cleanup(tsd_t *tsd) {
76029-	arena_t *iarena;
76030-
76031-	iarena = tsd_iarena_get(tsd);
76032-	if (iarena != NULL) {
76033-		arena_unbind(tsd, arena_ind_get(iarena), true);
76034-	}
76035-}
76036-
76037-void
76038-arena_cleanup(tsd_t *tsd) {
76039-	arena_t *arena;
76040-
76041-	arena = tsd_arena_get(tsd);
76042-	if (arena != NULL) {
76043-		arena_unbind(tsd, arena_ind_get(arena), false);
76044-	}
76045-}
76046-
76047-static void
76048-stats_print_atexit(void) {
76049-	if (config_stats) {
76050-		tsdn_t *tsdn;
76051-		unsigned narenas, i;
76052-
76053-		tsdn = tsdn_fetch();
76054-
76055-		/*
76056-		 * Merge stats from extant threads.  This is racy, since
76057-		 * individual threads do not lock when recording tcache stats
76058-		 * events.  As a consequence, the final stats may be slightly
76059-		 * out of date by the time they are reported, if other threads
76060-		 * continue to allocate.
76061-		 */
76062-		for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
76063-			arena_t *arena = arena_get(tsdn, i, false);
76064-			if (arena != NULL) {
76065-				tcache_slow_t *tcache_slow;
76066-
76067-				malloc_mutex_lock(tsdn, &arena->tcache_ql_mtx);
76068-				ql_foreach(tcache_slow, &arena->tcache_ql,
76069-				    link) {
76070-					tcache_stats_merge(tsdn,
76071-					    tcache_slow->tcache, arena);
76072-				}
76073-				malloc_mutex_unlock(tsdn,
76074-				    &arena->tcache_ql_mtx);
76075-			}
76076-		}
76077-	}
76078-	je_malloc_stats_print(NULL, NULL, opt_stats_print_opts);
76079-}
76080-
76081-/*
76082- * Ensure that we don't hold any locks upon entry to or exit from allocator
76083- * code (in a "broad" sense that doesn't count a reentrant allocation as an
76084- * entrance or exit).
76085- */
76086-JEMALLOC_ALWAYS_INLINE void
76087-check_entry_exit_locking(tsdn_t *tsdn) {
76088-	if (!config_debug) {
76089-		return;
76090-	}
76091-	if (tsdn_null(tsdn)) {
76092-		return;
76093-	}
76094-	tsd_t *tsd = tsdn_tsd(tsdn);
76095-	/*
76096-	 * It's possible we hold locks at entry/exit if we're in a nested
76097-	 * allocation.
76098-	 */
76099-	int8_t reentrancy_level = tsd_reentrancy_level_get(tsd);
76100-	if (reentrancy_level != 0) {
76101-		return;
76102-	}
76103-	witness_assert_lockless(tsdn_witness_tsdp_get(tsdn));
76104-}
76105-
76106-/*
76107- * End miscellaneous support functions.
76108- */
76109-/******************************************************************************/
76110-/*
76111- * Begin initialization functions.
76112- */
76113-
76114-static char *
76115-jemalloc_secure_getenv(const char *name) {
76116-#ifdef JEMALLOC_HAVE_SECURE_GETENV
76117-	return secure_getenv(name);
76118-#else
76119-#  ifdef JEMALLOC_HAVE_ISSETUGID
76120-	if (issetugid() != 0) {
76121-		return NULL;
76122-	}
76123-#  endif
76124-	return getenv(name);
76125-#endif
76126-}
76127-
76128-static unsigned
76129-malloc_ncpus(void) {
76130-	long result;
76131-
76132-#ifdef _WIN32
76133-	SYSTEM_INFO si;
76134-	GetSystemInfo(&si);
76135-	result = si.dwNumberOfProcessors;
76136-#elif defined(CPU_COUNT)
76137-	/*
76138-	 * glibc >= 2.6 has the CPU_COUNT macro.
76139-	 *
76140-	 * glibc's sysconf() uses isspace().  glibc allocates for the first time
76141-	 * *before* setting up the isspace tables.  Therefore we need a
76142-	 * different method to get the number of CPUs.
76143-	 *
76144-	 * The getaffinity approach is also preferred when only a subset of CPUs
76145-	 * is available, to avoid using more arenas than necessary.
76146-	 */
76147-	{
76148-#  if defined(__FreeBSD__) || defined(__DragonFly__)
76149-		cpuset_t set;
76150-#  else
76151-		cpu_set_t set;
76152-#  endif
76153-#  if defined(JEMALLOC_HAVE_SCHED_SETAFFINITY)
76154-		sched_getaffinity(0, sizeof(set), &set);
76155-#  else
76156-		pthread_getaffinity_np(pthread_self(), sizeof(set), &set);
76157-#  endif
76158-		result = CPU_COUNT(&set);
76159-	}
76160-#else
76161-	result = sysconf(_SC_NPROCESSORS_ONLN);
76162-#endif
76163-	return ((result == -1) ? 1 : (unsigned)result);
76164-}
76165-
76166-/*
76167- * Ensure that number of CPUs is determistinc, i.e. it is the same based on:
76168- * - sched_getaffinity()
76169- * - _SC_NPROCESSORS_ONLN
76170- * - _SC_NPROCESSORS_CONF
76171- * Since otherwise tricky things is possible with percpu arenas in use.
76172- */
76173-static bool
76174-malloc_cpu_count_is_deterministic()
76175-{
76176-#ifdef _WIN32
76177-	return true;
76178-#else
76179-	long cpu_onln = sysconf(_SC_NPROCESSORS_ONLN);
76180-	long cpu_conf = sysconf(_SC_NPROCESSORS_CONF);
76181-	if (cpu_onln != cpu_conf) {
76182-		return false;
76183-	}
76184-#  if defined(CPU_COUNT)
76185-#    if defined(__FreeBSD__) || defined(__DragonFly__)
76186-	cpuset_t set;
76187-#    else
76188-	cpu_set_t set;
76189-#    endif /* __FreeBSD__ */
76190-#    if defined(JEMALLOC_HAVE_SCHED_SETAFFINITY)
76191-	sched_getaffinity(0, sizeof(set), &set);
76192-#    else /* !JEMALLOC_HAVE_SCHED_SETAFFINITY */
76193-	pthread_getaffinity_np(pthread_self(), sizeof(set), &set);
76194-#    endif /* JEMALLOC_HAVE_SCHED_SETAFFINITY */
76195-	long cpu_affinity = CPU_COUNT(&set);
76196-	if (cpu_affinity != cpu_conf) {
76197-		return false;
76198-	}
76199-#  endif /* CPU_COUNT */
76200-	return true;
76201-#endif
76202-}
76203-
76204-static void
76205-init_opt_stats_opts(const char *v, size_t vlen, char *dest) {
76206-	size_t opts_len = strlen(dest);
76207-	assert(opts_len <= stats_print_tot_num_options);
76208-
76209-	for (size_t i = 0; i < vlen; i++) {
76210-		switch (v[i]) {
76211-#define OPTION(o, v, d, s) case o: break;
76212-			STATS_PRINT_OPTIONS
76213-#undef OPTION
76214-		default: continue;
76215-		}
76216-
76217-		if (strchr(dest, v[i]) != NULL) {
76218-			/* Ignore repeated. */
76219-			continue;
76220-		}
76221-
76222-		dest[opts_len++] = v[i];
76223-		dest[opts_len] = '\0';
76224-		assert(opts_len <= stats_print_tot_num_options);
76225-	}
76226-	assert(opts_len == strlen(dest));
76227-}
76228-
76229-/* Reads the next size pair in a multi-sized option. */
76230-static bool
76231-malloc_conf_multi_sizes_next(const char **slab_size_segment_cur,
76232-    size_t *vlen_left, size_t *slab_start, size_t *slab_end, size_t *new_size) {
76233-	const char *cur = *slab_size_segment_cur;
76234-	char *end;
76235-	uintmax_t um;
76236-
76237-	set_errno(0);
76238-
76239-	/* First number, then '-' */
76240-	um = malloc_strtoumax(cur, &end, 0);
76241-	if (get_errno() != 0 || *end != '-') {
76242-		return true;
76243-	}
76244-	*slab_start = (size_t)um;
76245-	cur = end + 1;
76246-
76247-	/* Second number, then ':' */
76248-	um = malloc_strtoumax(cur, &end, 0);
76249-	if (get_errno() != 0 || *end != ':') {
76250-		return true;
76251-	}
76252-	*slab_end = (size_t)um;
76253-	cur = end + 1;
76254-
76255-	/* Last number */
76256-	um = malloc_strtoumax(cur, &end, 0);
76257-	if (get_errno() != 0) {
76258-		return true;
76259-	}
76260-	*new_size = (size_t)um;
76261-
76262-	/* Consume the separator if there is one. */
76263-	if (*end == '|') {
76264-		end++;
76265-	}
76266-
76267-	*vlen_left -= end - *slab_size_segment_cur;
76268-	*slab_size_segment_cur = end;
76269-
76270-	return false;
76271-}
76272-
76273-static bool
76274-malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p,
76275-    char const **v_p, size_t *vlen_p) {
76276-	bool accept;
76277-	const char *opts = *opts_p;
76278-
76279-	*k_p = opts;
76280-
76281-	for (accept = false; !accept;) {
76282-		switch (*opts) {
76283-		case 'A': case 'B': case 'C': case 'D': case 'E': case 'F':
76284-		case 'G': case 'H': case 'I': case 'J': case 'K': case 'L':
76285-		case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R':
76286-		case 'S': case 'T': case 'U': case 'V': case 'W': case 'X':
76287-		case 'Y': case 'Z':
76288-		case 'a': case 'b': case 'c': case 'd': case 'e': case 'f':
76289-		case 'g': case 'h': case 'i': case 'j': case 'k': case 'l':
76290-		case 'm': case 'n': case 'o': case 'p': case 'q': case 'r':
76291-		case 's': case 't': case 'u': case 'v': case 'w': case 'x':
76292-		case 'y': case 'z':
76293-		case '0': case '1': case '2': case '3': case '4': case '5':
76294-		case '6': case '7': case '8': case '9':
76295-		case '_':
76296-			opts++;
76297-			break;
76298-		case ':':
76299-			opts++;
76300-			*klen_p = (uintptr_t)opts - 1 - (uintptr_t)*k_p;
76301-			*v_p = opts;
76302-			accept = true;
76303-			break;
76304-		case '\0':
76305-			if (opts != *opts_p) {
76306-				malloc_write("<jemalloc>: Conf string ends "
76307-				    "with key\n");
76308-				had_conf_error = true;
76309-			}
76310-			return true;
76311-		default:
76312-			malloc_write("<jemalloc>: Malformed conf string\n");
76313-			had_conf_error = true;
76314-			return true;
76315-		}
76316-	}
76317-
76318-	for (accept = false; !accept;) {
76319-		switch (*opts) {
76320-		case ',':
76321-			opts++;
76322-			/*
76323-			 * Look ahead one character here, because the next time
76324-			 * this function is called, it will assume that end of
76325-			 * input has been cleanly reached if no input remains,
76326-			 * but we have optimistically already consumed the
76327-			 * comma if one exists.
76328-			 */
76329-			if (*opts == '\0') {
76330-				malloc_write("<jemalloc>: Conf string ends "
76331-				    "with comma\n");
76332-				had_conf_error = true;
76333-			}
76334-			*vlen_p = (uintptr_t)opts - 1 - (uintptr_t)*v_p;
76335-			accept = true;
76336-			break;
76337-		case '\0':
76338-			*vlen_p = (uintptr_t)opts - (uintptr_t)*v_p;
76339-			accept = true;
76340-			break;
76341-		default:
76342-			opts++;
76343-			break;
76344-		}
76345-	}
76346-
76347-	*opts_p = opts;
76348-	return false;
76349-}
76350-
76351-static void
76352-malloc_abort_invalid_conf(void) {
76353-	assert(opt_abort_conf);
76354-	malloc_printf("<jemalloc>: Abort (abort_conf:true) on invalid conf "
76355-	    "value (see above).\n");
76356-	abort();
76357-}
76358-
76359-static void
76360-malloc_conf_error(const char *msg, const char *k, size_t klen, const char *v,
76361-    size_t vlen) {
76362-	malloc_printf("<jemalloc>: %s: %.*s:%.*s\n", msg, (int)klen, k,
76363-	    (int)vlen, v);
76364-	/* If abort_conf is set, error out after processing all options. */
76365-	const char *experimental = "experimental_";
76366-	if (strncmp(k, experimental, strlen(experimental)) == 0) {
76367-		/* However, tolerate experimental features. */
76368-		return;
76369-	}
76370-	had_conf_error = true;
76371-}
76372-
76373-static void
76374-malloc_slow_flag_init(void) {
76375-	/*
76376-	 * Combine the runtime options into malloc_slow for fast path.  Called
76377-	 * after processing all the options.
76378-	 */
76379-	malloc_slow_flags |= (opt_junk_alloc ? flag_opt_junk_alloc : 0)
76380-	    | (opt_junk_free ? flag_opt_junk_free : 0)
76381-	    | (opt_zero ? flag_opt_zero : 0)
76382-	    | (opt_utrace ? flag_opt_utrace : 0)
76383-	    | (opt_xmalloc ? flag_opt_xmalloc : 0);
76384-
76385-	malloc_slow = (malloc_slow_flags != 0);
76386-}
76387-
76388-/* Number of sources for initializing malloc_conf */
76389-#define MALLOC_CONF_NSOURCES 5
76390-
76391-static const char *
76392-obtain_malloc_conf(unsigned which_source, char buf[PATH_MAX + 1]) {
76393-	if (config_debug) {
76394-		static unsigned read_source = 0;
76395-		/*
76396-		 * Each source should only be read once, to minimize # of
76397-		 * syscalls on init.
76398-		 */
76399-		assert(read_source++ == which_source);
76400-	}
76401-	assert(which_source < MALLOC_CONF_NSOURCES);
76402-
76403-	const char *ret;
76404-	switch (which_source) {
76405-	case 0:
76406-		ret = config_malloc_conf;
76407-		break;
76408-	case 1:
76409-		if (je_malloc_conf != NULL) {
76410-			/* Use options that were compiled into the program. */
76411-			ret = je_malloc_conf;
76412-		} else {
76413-			/* No configuration specified. */
76414-			ret = NULL;
76415-		}
76416-		break;
76417-	case 2: {
76418-		ssize_t linklen = 0;
76419-#ifndef _WIN32
76420-		int saved_errno = errno;
76421-		const char *linkname =
76422-#  ifdef JEMALLOC_PREFIX
76423-		    "/etc/"JEMALLOC_PREFIX"malloc.conf"
76424-#  else
76425-		    "/etc/malloc.conf"
76426-#  endif
76427-		    ;
76428-
76429-		/*
76430-		 * Try to use the contents of the "/etc/malloc.conf" symbolic
76431-		 * link's name.
76432-		 */
76433-#ifndef JEMALLOC_READLINKAT
76434-		linklen = readlink(linkname, buf, PATH_MAX);
76435-#else
76436-		linklen = readlinkat(AT_FDCWD, linkname, buf, PATH_MAX);
76437-#endif
76438-		if (linklen == -1) {
76439-			/* No configuration specified. */
76440-			linklen = 0;
76441-			/* Restore errno. */
76442-			set_errno(saved_errno);
76443-		}
76444-#endif
76445-		buf[linklen] = '\0';
76446-		ret = buf;
76447-		break;
76448-	} case 3: {
76449-		const char *envname =
76450-#ifdef JEMALLOC_PREFIX
76451-		    JEMALLOC_CPREFIX"MALLOC_CONF"
76452-#else
76453-		    "MALLOC_CONF"
76454-#endif
76455-		    ;
76456-
76457-		if ((ret = jemalloc_secure_getenv(envname)) != NULL) {
76458-			/*
76459-			 * Do nothing; opts is already initialized to the value
76460-			 * of the MALLOC_CONF environment variable.
76461-			 */
76462-		} else {
76463-			/* No configuration specified. */
76464-			ret = NULL;
76465-		}
76466-		break;
76467-	} case 4: {
76468-		ret = je_malloc_conf_2_conf_harder;
76469-		break;
76470-	} default:
76471-		not_reached();
76472-		ret = NULL;
76473-	}
76474-	return ret;
76475-}
76476-
76477-static void
76478-malloc_conf_init_helper(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS],
76479-    bool initial_call, const char *opts_cache[MALLOC_CONF_NSOURCES],
76480-    char buf[PATH_MAX + 1]) {
76481-	static const char *opts_explain[MALLOC_CONF_NSOURCES] = {
76482-		"string specified via --with-malloc-conf",
76483-		"string pointed to by the global variable malloc_conf",
76484-		"\"name\" of the file referenced by the symbolic link named "
76485-		    "/etc/malloc.conf",
76486-		"value of the environment variable MALLOC_CONF",
76487-		"string pointed to by the global variable "
76488-		    "malloc_conf_2_conf_harder",
76489-	};
76490-	unsigned i;
76491-	const char *opts, *k, *v;
76492-	size_t klen, vlen;
76493-
76494-	for (i = 0; i < MALLOC_CONF_NSOURCES; i++) {
76495-		/* Get runtime configuration. */
76496-		if (initial_call) {
76497-			opts_cache[i] = obtain_malloc_conf(i, buf);
76498-		}
76499-		opts = opts_cache[i];
76500-		if (!initial_call && opt_confirm_conf) {
76501-			malloc_printf(
76502-			    "<jemalloc>: malloc_conf #%u (%s): \"%s\"\n",
76503-			    i + 1, opts_explain[i], opts != NULL ? opts : "");
76504-		}
76505-		if (opts == NULL) {
76506-			continue;
76507-		}
76508-
76509-		while (*opts != '\0' && !malloc_conf_next(&opts, &k, &klen, &v,
76510-		    &vlen)) {
76511-
76512-#define CONF_ERROR(msg, k, klen, v, vlen)				\
76513-			if (!initial_call) {				\
76514-				malloc_conf_error(			\
76515-				    msg, k, klen, v, vlen);		\
76516-				cur_opt_valid = false;			\
76517-			}
76518-#define CONF_CONTINUE	{						\
76519-				if (!initial_call && opt_confirm_conf	\
76520-				    && cur_opt_valid) {			\
76521-					malloc_printf("<jemalloc>: -- "	\
76522-					    "Set conf value: %.*s:%.*s"	\
76523-					    "\n", (int)klen, k,		\
76524-					    (int)vlen, v);		\
76525-				}					\
76526-				continue;				\
76527-			}
76528-#define CONF_MATCH(n)							\
76529-	(sizeof(n)-1 == klen && strncmp(n, k, klen) == 0)
76530-#define CONF_MATCH_VALUE(n)						\
76531-	(sizeof(n)-1 == vlen && strncmp(n, v, vlen) == 0)
76532-#define CONF_HANDLE_BOOL(o, n)						\
76533-			if (CONF_MATCH(n)) {				\
76534-				if (CONF_MATCH_VALUE("true")) {		\
76535-					o = true;			\
76536-				} else if (CONF_MATCH_VALUE("false")) {	\
76537-					o = false;			\
76538-				} else {				\
76539-					CONF_ERROR("Invalid conf value",\
76540-					    k, klen, v, vlen);		\
76541-				}					\
76542-				CONF_CONTINUE;				\
76543-			}
76544-      /*
76545-       * One of the CONF_MIN macros below expands, in one of the use points,
76546-       * to "unsigned integer < 0", which is always false, triggering the
76547-       * GCC -Wtype-limits warning, which we disable here and re-enable below.
76548-       */
76549-      JEMALLOC_DIAGNOSTIC_PUSH
76550-      JEMALLOC_DIAGNOSTIC_IGNORE_TYPE_LIMITS
76551-
76552-#define CONF_DONT_CHECK_MIN(um, min)	false
76553-#define CONF_CHECK_MIN(um, min)	((um) < (min))
76554-#define CONF_DONT_CHECK_MAX(um, max)	false
76555-#define CONF_CHECK_MAX(um, max)	((um) > (max))
76556-
76557-#define CONF_VALUE_READ(max_t, result)					\
76558-	      char *end;						\
76559-	      set_errno(0);						\
76560-	      result = (max_t)malloc_strtoumax(v, &end, 0);
76561-#define CONF_VALUE_READ_FAIL()						\
76562-	      (get_errno() != 0 || (uintptr_t)end - (uintptr_t)v != vlen)
76563-
76564-#define CONF_HANDLE_T(t, max_t, o, n, min, max, check_min, check_max, clip) \
76565-			if (CONF_MATCH(n)) {				\
76566-				max_t mv;				\
76567-				CONF_VALUE_READ(max_t, mv)		\
76568-				if (CONF_VALUE_READ_FAIL()) {		\
76569-					CONF_ERROR("Invalid conf value",\
76570-					    k, klen, v, vlen);		\
76571-				} else if (clip) {			\
76572-					if (check_min(mv, (t)(min))) {	\
76573-						o = (t)(min);		\
76574-					} else if (			\
76575-					    check_max(mv, (t)(max))) {	\
76576-						o = (t)(max);		\
76577-					} else {			\
76578-						o = (t)mv;		\
76579-					}				\
76580-				} else {				\
76581-					if (check_min(mv, (t)(min)) ||	\
76582-					    check_max(mv, (t)(max))) {	\
76583-						CONF_ERROR(		\
76584-						    "Out-of-range "	\
76585-						    "conf value",	\
76586-						    k, klen, v, vlen);	\
76587-					} else {			\
76588-						o = (t)mv;		\
76589-					}				\
76590-				}					\
76591-				CONF_CONTINUE;				\
76592-			}
76593-#define CONF_HANDLE_T_U(t, o, n, min, max, check_min, check_max, clip)	\
76594-	      CONF_HANDLE_T(t, uintmax_t, o, n, min, max, check_min,	\
76595-			    check_max, clip)
76596-#define CONF_HANDLE_T_SIGNED(t, o, n, min, max, check_min, check_max, clip)\
76597-	      CONF_HANDLE_T(t, intmax_t, o, n, min, max, check_min,	\
76598-			    check_max, clip)
76599-
76600-#define CONF_HANDLE_UNSIGNED(o, n, min, max, check_min, check_max,	\
76601-    clip)								\
76602-			CONF_HANDLE_T_U(unsigned, o, n, min, max,	\
76603-			    check_min, check_max, clip)
76604-#define CONF_HANDLE_SIZE_T(o, n, min, max, check_min, check_max, clip)	\
76605-			CONF_HANDLE_T_U(size_t, o, n, min, max,		\
76606-			    check_min, check_max, clip)
76607-#define CONF_HANDLE_INT64_T(o, n, min, max, check_min, check_max, clip)	\
76608-			CONF_HANDLE_T_SIGNED(int64_t, o, n, min, max,	\
76609-			    check_min, check_max, clip)
76610-#define CONF_HANDLE_UINT64_T(o, n, min, max, check_min, check_max, clip)\
76611-			CONF_HANDLE_T_U(uint64_t, o, n, min, max,	\
76612-			    check_min, check_max, clip)
76613-#define CONF_HANDLE_SSIZE_T(o, n, min, max)				\
76614-			CONF_HANDLE_T_SIGNED(ssize_t, o, n, min, max,	\
76615-			    CONF_CHECK_MIN, CONF_CHECK_MAX, false)
76616-#define CONF_HANDLE_CHAR_P(o, n, d)					\
76617-			if (CONF_MATCH(n)) {				\
76618-				size_t cpylen = (vlen <=		\
76619-				    sizeof(o)-1) ? vlen :		\
76620-				    sizeof(o)-1;			\
76621-				strncpy(o, v, cpylen);			\
76622-				o[cpylen] = '\0';			\
76623-				CONF_CONTINUE;				\
76624-			}
76625-
76626-			bool cur_opt_valid = true;
76627-
76628-			CONF_HANDLE_BOOL(opt_confirm_conf, "confirm_conf")
76629-			if (initial_call) {
76630-				continue;
76631-			}
76632-
76633-			CONF_HANDLE_BOOL(opt_abort, "abort")
76634-			CONF_HANDLE_BOOL(opt_abort_conf, "abort_conf")
76635-			CONF_HANDLE_BOOL(opt_trust_madvise, "trust_madvise")
76636-			if (strncmp("metadata_thp", k, klen) == 0) {
76637-				int m;
76638-				bool match = false;
76639-				for (m = 0; m < metadata_thp_mode_limit; m++) {
76640-					if (strncmp(metadata_thp_mode_names[m],
76641-					    v, vlen) == 0) {
76642-						opt_metadata_thp = m;
76643-						match = true;
76644-						break;
76645-					}
76646-				}
76647-				if (!match) {
76648-					CONF_ERROR("Invalid conf value",
76649-					    k, klen, v, vlen);
76650-				}
76651-				CONF_CONTINUE;
76652-			}
76653-			CONF_HANDLE_BOOL(opt_retain, "retain")
76654-			if (strncmp("dss", k, klen) == 0) {
76655-				int m;
76656-				bool match = false;
76657-				for (m = 0; m < dss_prec_limit; m++) {
76658-					if (strncmp(dss_prec_names[m], v, vlen)
76659-					    == 0) {
76660-						if (extent_dss_prec_set(m)) {
76661-							CONF_ERROR(
76662-							    "Error setting dss",
76663-							    k, klen, v, vlen);
76664-						} else {
76665-							opt_dss =
76666-							    dss_prec_names[m];
76667-							match = true;
76668-							break;
76669-						}
76670-					}
76671-				}
76672-				if (!match) {
76673-					CONF_ERROR("Invalid conf value",
76674-					    k, klen, v, vlen);
76675-				}
76676-				CONF_CONTINUE;
76677-			}
76678-			if (CONF_MATCH("narenas")) {
76679-				if (CONF_MATCH_VALUE("default")) {
76680-					opt_narenas = 0;
76681-					CONF_CONTINUE;
76682-				} else {
76683-					CONF_HANDLE_UNSIGNED(opt_narenas,
76684-					    "narenas", 1, UINT_MAX,
76685-					    CONF_CHECK_MIN, CONF_DONT_CHECK_MAX,
76686-					    /* clip */ false)
76687-				}
76688-			}
76689-			if (CONF_MATCH("narenas_ratio")) {
76690-				char *end;
76691-				bool err = fxp_parse(&opt_narenas_ratio, v,
76692-				    &end);
76693-				if (err || (size_t)(end - v) != vlen) {
76694-					CONF_ERROR("Invalid conf value",
76695-					    k, klen, v, vlen);
76696-				}
76697-				CONF_CONTINUE;
76698-			}
76699-			if (CONF_MATCH("bin_shards")) {
76700-				const char *bin_shards_segment_cur = v;
76701-				size_t vlen_left = vlen;
76702-				do {
76703-					size_t size_start;
76704-					size_t size_end;
76705-					size_t nshards;
76706-					bool err = malloc_conf_multi_sizes_next(
76707-					    &bin_shards_segment_cur, &vlen_left,
76708-					    &size_start, &size_end, &nshards);
76709-					if (err || bin_update_shard_size(
76710-					    bin_shard_sizes, size_start,
76711-					    size_end, nshards)) {
76712-						CONF_ERROR(
76713-						    "Invalid settings for "
76714-						    "bin_shards", k, klen, v,
76715-						    vlen);
76716-						break;
76717-					}
76718-				} while (vlen_left > 0);
76719-				CONF_CONTINUE;
76720-			}
76721-			CONF_HANDLE_INT64_T(opt_mutex_max_spin,
76722-			    "mutex_max_spin", -1, INT64_MAX, CONF_CHECK_MIN,
76723-			    CONF_DONT_CHECK_MAX, false);
76724-			CONF_HANDLE_SSIZE_T(opt_dirty_decay_ms,
76725-			    "dirty_decay_ms", -1, NSTIME_SEC_MAX * KQU(1000) <
76726-			    QU(SSIZE_MAX) ? NSTIME_SEC_MAX * KQU(1000) :
76727-			    SSIZE_MAX);
76728-			CONF_HANDLE_SSIZE_T(opt_muzzy_decay_ms,
76729-			    "muzzy_decay_ms", -1, NSTIME_SEC_MAX * KQU(1000) <
76730-			    QU(SSIZE_MAX) ? NSTIME_SEC_MAX * KQU(1000) :
76731-			    SSIZE_MAX);
76732-			CONF_HANDLE_BOOL(opt_stats_print, "stats_print")
76733-			if (CONF_MATCH("stats_print_opts")) {
76734-				init_opt_stats_opts(v, vlen,
76735-				    opt_stats_print_opts);
76736-				CONF_CONTINUE;
76737-			}
76738-			CONF_HANDLE_INT64_T(opt_stats_interval,
76739-			    "stats_interval", -1, INT64_MAX,
76740-			    CONF_CHECK_MIN, CONF_DONT_CHECK_MAX, false)
76741-			if (CONF_MATCH("stats_interval_opts")) {
76742-				init_opt_stats_opts(v, vlen,
76743-				    opt_stats_interval_opts);
76744-				CONF_CONTINUE;
76745-			}
76746-			if (config_fill) {
76747-				if (CONF_MATCH("junk")) {
76748-					if (CONF_MATCH_VALUE("true")) {
76749-						opt_junk = "true";
76750-						opt_junk_alloc = opt_junk_free =
76751-						    true;
76752-					} else if (CONF_MATCH_VALUE("false")) {
76753-						opt_junk = "false";
76754-						opt_junk_alloc = opt_junk_free =
76755-						    false;
76756-					} else if (CONF_MATCH_VALUE("alloc")) {
76757-						opt_junk = "alloc";
76758-						opt_junk_alloc = true;
76759-						opt_junk_free = false;
76760-					} else if (CONF_MATCH_VALUE("free")) {
76761-						opt_junk = "free";
76762-						opt_junk_alloc = false;
76763-						opt_junk_free = true;
76764-					} else {
76765-						CONF_ERROR(
76766-						    "Invalid conf value",
76767-						    k, klen, v, vlen);
76768-					}
76769-					CONF_CONTINUE;
76770-				}
76771-				CONF_HANDLE_BOOL(opt_zero, "zero")
76772-			}
76773-			if (config_utrace) {
76774-				CONF_HANDLE_BOOL(opt_utrace, "utrace")
76775-			}
76776-			if (config_xmalloc) {
76777-				CONF_HANDLE_BOOL(opt_xmalloc, "xmalloc")
76778-			}
76779-			if (config_enable_cxx) {
76780-				CONF_HANDLE_BOOL(
76781-				    opt_experimental_infallible_new,
76782-				    "experimental_infallible_new")
76783-			}
76784-
76785-			CONF_HANDLE_BOOL(opt_tcache, "tcache")
76786-			CONF_HANDLE_SIZE_T(opt_tcache_max, "tcache_max",
76787-			    0, TCACHE_MAXCLASS_LIMIT, CONF_DONT_CHECK_MIN,
76788-			    CONF_CHECK_MAX, /* clip */ true)
76789-			if (CONF_MATCH("lg_tcache_max")) {
76790-				size_t m;
76791-				CONF_VALUE_READ(size_t, m)
76792-				if (CONF_VALUE_READ_FAIL()) {
76793-					CONF_ERROR("Invalid conf value",
76794-					    k, klen, v, vlen);
76795-				} else {
76796-					/* clip if necessary */
76797-					if (m > TCACHE_LG_MAXCLASS_LIMIT) {
76798-						m = TCACHE_LG_MAXCLASS_LIMIT;
76799-					}
76800-					opt_tcache_max = (size_t)1 << m;
76801-				}
76802-				CONF_CONTINUE;
76803-			}
76804-			/*
76805-			 * Anyone trying to set a value outside -16 to 16 is
76806-			 * deeply confused.
76807-			 */
76808-			CONF_HANDLE_SSIZE_T(opt_lg_tcache_nslots_mul,
76809-			    "lg_tcache_nslots_mul", -16, 16)
76810-			/* Ditto with values past 2048. */
76811-			CONF_HANDLE_UNSIGNED(opt_tcache_nslots_small_min,
76812-			    "tcache_nslots_small_min", 1, 2048,
76813-			    CONF_CHECK_MIN, CONF_CHECK_MAX, /* clip */ true)
76814-			CONF_HANDLE_UNSIGNED(opt_tcache_nslots_small_max,
76815-			    "tcache_nslots_small_max", 1, 2048,
76816-			    CONF_CHECK_MIN, CONF_CHECK_MAX, /* clip */ true)
76817-			CONF_HANDLE_UNSIGNED(opt_tcache_nslots_large,
76818-			    "tcache_nslots_large", 1, 2048,
76819-			    CONF_CHECK_MIN, CONF_CHECK_MAX, /* clip */ true)
76820-			CONF_HANDLE_SIZE_T(opt_tcache_gc_incr_bytes,
76821-			    "tcache_gc_incr_bytes", 1024, SIZE_T_MAX,
76822-			    CONF_CHECK_MIN, CONF_DONT_CHECK_MAX,
76823-			    /* clip */ true)
76824-			CONF_HANDLE_SIZE_T(opt_tcache_gc_delay_bytes,
76825-			    "tcache_gc_delay_bytes", 0, SIZE_T_MAX,
76826-			    CONF_DONT_CHECK_MIN, CONF_DONT_CHECK_MAX,
76827-			    /* clip */ false)
76828-			CONF_HANDLE_UNSIGNED(opt_lg_tcache_flush_small_div,
76829-			    "lg_tcache_flush_small_div", 1, 16,
76830-			    CONF_CHECK_MIN, CONF_CHECK_MAX, /* clip */ true)
76831-			CONF_HANDLE_UNSIGNED(opt_lg_tcache_flush_large_div,
76832-			    "lg_tcache_flush_large_div", 1, 16,
76833-			    CONF_CHECK_MIN, CONF_CHECK_MAX, /* clip */ true)
76834-
76835-			/*
76836-			 * The runtime option of oversize_threshold remains
76837-			 * undocumented.  It may be tweaked in the next major
76838-			 * release (6.0).  The default value 8M is rather
76839-			 * conservative / safe.  Tuning it further down may
76840-			 * improve fragmentation a bit more, but may also cause
76841-			 * contention on the huge arena.
76842-			 */
76843-			CONF_HANDLE_SIZE_T(opt_oversize_threshold,
76844-			    "oversize_threshold", 0, SC_LARGE_MAXCLASS,
76845-			    CONF_DONT_CHECK_MIN, CONF_CHECK_MAX, false)
76846-			CONF_HANDLE_SIZE_T(opt_lg_extent_max_active_fit,
76847-			    "lg_extent_max_active_fit", 0,
76848-			    (sizeof(size_t) << 3), CONF_DONT_CHECK_MIN,
76849-			    CONF_CHECK_MAX, false)
76850-
76851-			if (strncmp("percpu_arena", k, klen) == 0) {
76852-				bool match = false;
76853-				for (int m = percpu_arena_mode_names_base; m <
76854-				    percpu_arena_mode_names_limit; m++) {
76855-					if (strncmp(percpu_arena_mode_names[m],
76856-					    v, vlen) == 0) {
76857-						if (!have_percpu_arena) {
76858-							CONF_ERROR(
76859-							    "No getcpu support",
76860-							    k, klen, v, vlen);
76861-						}
76862-						opt_percpu_arena = m;
76863-						match = true;
76864-						break;
76865-					}
76866-				}
76867-				if (!match) {
76868-					CONF_ERROR("Invalid conf value",
76869-					    k, klen, v, vlen);
76870-				}
76871-				CONF_CONTINUE;
76872-			}
76873-			CONF_HANDLE_BOOL(opt_background_thread,
76874-			    "background_thread");
76875-			CONF_HANDLE_SIZE_T(opt_max_background_threads,
76876-					   "max_background_threads", 1,
76877-					   opt_max_background_threads,
76878-					   CONF_CHECK_MIN, CONF_CHECK_MAX,
76879-					   true);
76880-			CONF_HANDLE_BOOL(opt_hpa, "hpa")
76881-			CONF_HANDLE_SIZE_T(opt_hpa_opts.slab_max_alloc,
76882-			    "hpa_slab_max_alloc", PAGE, HUGEPAGE,
76883-			    CONF_CHECK_MIN, CONF_CHECK_MAX, true);
76884-
76885-			/*
76886-			 * Accept either a ratio-based or an exact hugification
76887-			 * threshold.
76888-			 */
76889-			CONF_HANDLE_SIZE_T(opt_hpa_opts.hugification_threshold,
76890-			    "hpa_hugification_threshold", PAGE, HUGEPAGE,
76891-			    CONF_CHECK_MIN, CONF_CHECK_MAX, true);
76892-			if (CONF_MATCH("hpa_hugification_threshold_ratio")) {
76893-				fxp_t ratio;
76894-				char *end;
76895-				bool err = fxp_parse(&ratio, v,
76896-				    &end);
76897-				if (err || (size_t)(end - v) != vlen
76898-				    || ratio > FXP_INIT_INT(1)) {
76899-					CONF_ERROR("Invalid conf value",
76900-					    k, klen, v, vlen);
76901-				} else {
76902-					opt_hpa_opts.hugification_threshold =
76903-					    fxp_mul_frac(HUGEPAGE, ratio);
76904-				}
76905-				CONF_CONTINUE;
76906-			}
76907-
76908-			CONF_HANDLE_UINT64_T(
76909-			    opt_hpa_opts.hugify_delay_ms, "hpa_hugify_delay_ms",
76910-			    0, 0, CONF_DONT_CHECK_MIN, CONF_DONT_CHECK_MAX,
76911-			    false);
76912-
76913-			CONF_HANDLE_UINT64_T(
76914-			    opt_hpa_opts.min_purge_interval_ms,
76915-			    "hpa_min_purge_interval_ms", 0, 0,
76916-			    CONF_DONT_CHECK_MIN, CONF_DONT_CHECK_MAX, false);
76917-
76918-			if (CONF_MATCH("hpa_dirty_mult")) {
76919-				if (CONF_MATCH_VALUE("-1")) {
76920-					opt_hpa_opts.dirty_mult = (fxp_t)-1;
76921-					CONF_CONTINUE;
76922-				}
76923-				fxp_t ratio;
76924-				char *end;
76925-				bool err = fxp_parse(&ratio, v,
76926-				    &end);
76927-				if (err || (size_t)(end - v) != vlen) {
76928-					CONF_ERROR("Invalid conf value",
76929-					    k, klen, v, vlen);
76930-				} else {
76931-					opt_hpa_opts.dirty_mult = ratio;
76932-				}
76933-				CONF_CONTINUE;
76934-			}
76935-
76936-			CONF_HANDLE_SIZE_T(opt_hpa_sec_opts.nshards,
76937-			    "hpa_sec_nshards", 0, 0, CONF_CHECK_MIN,
76938-			    CONF_DONT_CHECK_MAX, true);
76939-			CONF_HANDLE_SIZE_T(opt_hpa_sec_opts.max_alloc,
76940-			    "hpa_sec_max_alloc", PAGE, 0, CONF_CHECK_MIN,
76941-			    CONF_DONT_CHECK_MAX, true);
76942-			CONF_HANDLE_SIZE_T(opt_hpa_sec_opts.max_bytes,
76943-			    "hpa_sec_max_bytes", PAGE, 0, CONF_CHECK_MIN,
76944-			    CONF_DONT_CHECK_MAX, true);
76945-			CONF_HANDLE_SIZE_T(opt_hpa_sec_opts.bytes_after_flush,
76946-			    "hpa_sec_bytes_after_flush", PAGE, 0,
76947-			    CONF_CHECK_MIN, CONF_DONT_CHECK_MAX, true);
76948-			CONF_HANDLE_SIZE_T(opt_hpa_sec_opts.batch_fill_extra,
76949-			    "hpa_sec_batch_fill_extra", 0, HUGEPAGE_PAGES,
76950-			    CONF_CHECK_MIN, CONF_CHECK_MAX, true);
76951-
76952-			if (CONF_MATCH("slab_sizes")) {
76953-				if (CONF_MATCH_VALUE("default")) {
76954-					sc_data_init(sc_data);
76955-					CONF_CONTINUE;
76956-				}
76957-				bool err;
76958-				const char *slab_size_segment_cur = v;
76959-				size_t vlen_left = vlen;
76960-				do {
76961-					size_t slab_start;
76962-					size_t slab_end;
76963-					size_t pgs;
76964-					err = malloc_conf_multi_sizes_next(
76965-					    &slab_size_segment_cur,
76966-					    &vlen_left, &slab_start, &slab_end,
76967-					    &pgs);
76968-					if (!err) {
76969-						sc_data_update_slab_size(
76970-						    sc_data, slab_start,
76971-						    slab_end, (int)pgs);
76972-					} else {
76973-						CONF_ERROR("Invalid settings "
76974-						    "for slab_sizes",
76975-						    k, klen, v, vlen);
76976-					}
76977-				} while (!err && vlen_left > 0);
76978-				CONF_CONTINUE;
76979-			}
76980-			if (config_prof) {
76981-				CONF_HANDLE_BOOL(opt_prof, "prof")
76982-				CONF_HANDLE_CHAR_P(opt_prof_prefix,
76983-				    "prof_prefix", "jeprof")
76984-				CONF_HANDLE_BOOL(opt_prof_active, "prof_active")
76985-				CONF_HANDLE_BOOL(opt_prof_thread_active_init,
76986-				    "prof_thread_active_init")
76987-				CONF_HANDLE_SIZE_T(opt_lg_prof_sample,
76988-				    "lg_prof_sample", 0, (sizeof(uint64_t) << 3)
76989-				    - 1, CONF_DONT_CHECK_MIN, CONF_CHECK_MAX,
76990-				    true)
76991-				CONF_HANDLE_BOOL(opt_prof_accum, "prof_accum")
76992-				CONF_HANDLE_SSIZE_T(opt_lg_prof_interval,
76993-				    "lg_prof_interval", -1,
76994-				    (sizeof(uint64_t) << 3) - 1)
76995-				CONF_HANDLE_BOOL(opt_prof_gdump, "prof_gdump")
76996-				CONF_HANDLE_BOOL(opt_prof_final, "prof_final")
76997-				CONF_HANDLE_BOOL(opt_prof_leak, "prof_leak")
76998-				CONF_HANDLE_BOOL(opt_prof_leak_error,
76999-				    "prof_leak_error")
77000-				CONF_HANDLE_BOOL(opt_prof_log, "prof_log")
77001-				CONF_HANDLE_SSIZE_T(opt_prof_recent_alloc_max,
77002-				    "prof_recent_alloc_max", -1, SSIZE_MAX)
77003-				CONF_HANDLE_BOOL(opt_prof_stats, "prof_stats")
77004-				CONF_HANDLE_BOOL(opt_prof_sys_thread_name,
77005-				    "prof_sys_thread_name")
77006-				if (CONF_MATCH("prof_time_resolution")) {
77007-					if (CONF_MATCH_VALUE("default")) {
77008-						opt_prof_time_res =
77009-						    prof_time_res_default;
77010-					} else if (CONF_MATCH_VALUE("high")) {
77011-						if (!config_high_res_timer) {
77012-							CONF_ERROR(
77013-							    "No high resolution"
77014-							    " timer support",
77015-							    k, klen, v, vlen);
77016-						} else {
77017-							opt_prof_time_res =
77018-							    prof_time_res_high;
77019-						}
77020-					} else {
77021-						CONF_ERROR("Invalid conf value",
77022-						    k, klen, v, vlen);
77023-					}
77024-					CONF_CONTINUE;
77025-				}
77026-				/*
77027-				 * Undocumented.  When set to false, don't
77028-				 * correct for an unbiasing bug in jeprof
77029-				 * attribution.  This can be handy if you want
77030-				 * to get consistent numbers from your binary
77031-				 * across different jemalloc versions, even if
77032-				 * those numbers are incorrect.  The default is
77033-				 * true.
77034-				 */
77035-				CONF_HANDLE_BOOL(opt_prof_unbias, "prof_unbias")
77036-			}
77037-			if (config_log) {
77038-				if (CONF_MATCH("log")) {
77039-					size_t cpylen = (
77040-					    vlen <= sizeof(log_var_names) ?
77041-					    vlen : sizeof(log_var_names) - 1);
77042-					strncpy(log_var_names, v, cpylen);
77043-					log_var_names[cpylen] = '\0';
77044-					CONF_CONTINUE;
77045-				}
77046-			}
77047-			if (CONF_MATCH("thp")) {
77048-				bool match = false;
77049-				for (int m = 0; m < thp_mode_names_limit; m++) {
77050-					if (strncmp(thp_mode_names[m],v, vlen)
77051-					    == 0) {
77052-						if (!have_madvise_huge && !have_memcntl) {
77053-							CONF_ERROR(
77054-							    "No THP support",
77055-							    k, klen, v, vlen);
77056-						}
77057-						opt_thp = m;
77058-						match = true;
77059-						break;
77060-					}
77061-				}
77062-				if (!match) {
77063-					CONF_ERROR("Invalid conf value",
77064-					    k, klen, v, vlen);
77065-				}
77066-				CONF_CONTINUE;
77067-			}
77068-			if (CONF_MATCH("zero_realloc")) {
77069-				if (CONF_MATCH_VALUE("alloc")) {
77070-					opt_zero_realloc_action
77071-					    = zero_realloc_action_alloc;
77072-				} else if (CONF_MATCH_VALUE("free")) {
77073-					opt_zero_realloc_action
77074-					    = zero_realloc_action_free;
77075-				} else if (CONF_MATCH_VALUE("abort")) {
77076-					opt_zero_realloc_action
77077-					    = zero_realloc_action_abort;
77078-				} else {
77079-					CONF_ERROR("Invalid conf value",
77080-					    k, klen, v, vlen);
77081-				}
77082-				CONF_CONTINUE;
77083-			}
77084-			if (config_uaf_detection &&
77085-			    CONF_MATCH("lg_san_uaf_align")) {
77086-				ssize_t a;
77087-				CONF_VALUE_READ(ssize_t, a)
77088-				if (CONF_VALUE_READ_FAIL() || a < -1) {
77089-					CONF_ERROR("Invalid conf value",
77090-					    k, klen, v, vlen);
77091-				}
77092-				if (a == -1) {
77093-					opt_lg_san_uaf_align = -1;
77094-					CONF_CONTINUE;
77095-				}
77096-
77097-				/* clip if necessary */
77098-				ssize_t max_allowed = (sizeof(size_t) << 3) - 1;
77099-				ssize_t min_allowed = LG_PAGE;
77100-				if (a > max_allowed) {
77101-					a = max_allowed;
77102-				} else if (a < min_allowed) {
77103-					a = min_allowed;
77104-				}
77105-
77106-				opt_lg_san_uaf_align = a;
77107-				CONF_CONTINUE;
77108-			}
77109-
77110-			CONF_HANDLE_SIZE_T(opt_san_guard_small,
77111-			    "san_guard_small", 0, SIZE_T_MAX,
77112-			    CONF_DONT_CHECK_MIN, CONF_DONT_CHECK_MAX, false)
77113-			CONF_HANDLE_SIZE_T(opt_san_guard_large,
77114-			    "san_guard_large", 0, SIZE_T_MAX,
77115-			    CONF_DONT_CHECK_MIN, CONF_DONT_CHECK_MAX, false)
77116-
77117-			CONF_ERROR("Invalid conf pair", k, klen, v, vlen);
77118-#undef CONF_ERROR
77119-#undef CONF_CONTINUE
77120-#undef CONF_MATCH
77121-#undef CONF_MATCH_VALUE
77122-#undef CONF_HANDLE_BOOL
77123-#undef CONF_DONT_CHECK_MIN
77124-#undef CONF_CHECK_MIN
77125-#undef CONF_DONT_CHECK_MAX
77126-#undef CONF_CHECK_MAX
77127-#undef CONF_HANDLE_T
77128-#undef CONF_HANDLE_T_U
77129-#undef CONF_HANDLE_T_SIGNED
77130-#undef CONF_HANDLE_UNSIGNED
77131-#undef CONF_HANDLE_SIZE_T
77132-#undef CONF_HANDLE_SSIZE_T
77133-#undef CONF_HANDLE_CHAR_P
77134-    /* Re-enable diagnostic "-Wtype-limits" */
77135-    JEMALLOC_DIAGNOSTIC_POP
77136-		}
77137-		if (opt_abort_conf && had_conf_error) {
77138-			malloc_abort_invalid_conf();
77139-		}
77140-	}
77141-	atomic_store_b(&log_init_done, true, ATOMIC_RELEASE);
77142-}
77143-
77144-static bool
77145-malloc_conf_init_check_deps(void) {
77146-	if (opt_prof_leak_error && !opt_prof_final) {
77147-		malloc_printf("<jemalloc>: prof_leak_error is set w/o "
77148-		    "prof_final.\n");
77149-		return true;
77150-	}
77151-
77152-	return false;
77153-}
77154-
77155-static void
77156-malloc_conf_init(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS]) {
77157-	const char *opts_cache[MALLOC_CONF_NSOURCES] = {NULL, NULL, NULL, NULL,
77158-		NULL};
77159-	char buf[PATH_MAX + 1];
77160-
77161-	/* The first call only set the confirm_conf option and opts_cache */
77162-	malloc_conf_init_helper(NULL, NULL, true, opts_cache, buf);
77163-	malloc_conf_init_helper(sc_data, bin_shard_sizes, false, opts_cache,
77164-	    NULL);
77165-	if (malloc_conf_init_check_deps()) {
77166-		/* check_deps does warning msg only; abort below if needed. */
77167-		if (opt_abort_conf) {
77168-			malloc_abort_invalid_conf();
77169-		}
77170-	}
77171-}
77172-
77173-#undef MALLOC_CONF_NSOURCES
77174-
77175-static bool
77176-malloc_init_hard_needed(void) {
77177-	if (malloc_initialized() || (IS_INITIALIZER && malloc_init_state ==
77178-	    malloc_init_recursible)) {
77179-		/*
77180-		 * Another thread initialized the allocator before this one
77181-		 * acquired init_lock, or this thread is the initializing
77182-		 * thread, and it is recursively allocating.
77183-		 */
77184-		return false;
77185-	}
77186-#ifdef JEMALLOC_THREADED_INIT
77187-	if (malloc_initializer != NO_INITIALIZER && !IS_INITIALIZER) {
77188-		/* Busy-wait until the initializing thread completes. */
77189-		spin_t spinner = SPIN_INITIALIZER;
77190-		do {
77191-			malloc_mutex_unlock(TSDN_NULL, &init_lock);
77192-			spin_adaptive(&spinner);
77193-			malloc_mutex_lock(TSDN_NULL, &init_lock);
77194-		} while (!malloc_initialized());
77195-		return false;
77196-	}
77197-#endif
77198-	return true;
77199-}
77200-
77201-static bool
77202-malloc_init_hard_a0_locked() {
77203-	malloc_initializer = INITIALIZER;
77204-
77205-	JEMALLOC_DIAGNOSTIC_PUSH
77206-	JEMALLOC_DIAGNOSTIC_IGNORE_MISSING_STRUCT_FIELD_INITIALIZERS
77207-	sc_data_t sc_data = {0};
77208-	JEMALLOC_DIAGNOSTIC_POP
77209-
77210-	/*
77211-	 * Ordering here is somewhat tricky; we need sc_boot() first, since that
77212-	 * determines what the size classes will be, and then
77213-	 * malloc_conf_init(), since any slab size tweaking will need to be done
77214-	 * before sz_boot and bin_info_boot, which assume that the values they
77215-	 * read out of sc_data_global are final.
77216-	 */
77217-	sc_boot(&sc_data);
77218-	unsigned bin_shard_sizes[SC_NBINS];
77219-	bin_shard_sizes_boot(bin_shard_sizes);
77220-	/*
77221-	 * prof_boot0 only initializes opt_prof_prefix.  We need to do it before
77222-	 * we parse malloc_conf options, in case malloc_conf parsing overwrites
77223-	 * it.
77224-	 */
77225-	if (config_prof) {
77226-		prof_boot0();
77227-	}
77228-	malloc_conf_init(&sc_data, bin_shard_sizes);
77229-	san_init(opt_lg_san_uaf_align);
77230-	sz_boot(&sc_data, opt_cache_oblivious);
77231-	bin_info_boot(&sc_data, bin_shard_sizes);
77232-
77233-	if (opt_stats_print) {
77234-		/* Print statistics at exit. */
77235-		if (atexit(stats_print_atexit) != 0) {
77236-			malloc_write("<jemalloc>: Error in atexit()\n");
77237-			if (opt_abort) {
77238-				abort();
77239-			}
77240-		}
77241-	}
77242-
77243-	if (stats_boot()) {
77244-		return true;
77245-	}
77246-	if (pages_boot()) {
77247-		return true;
77248-	}
77249-	if (base_boot(TSDN_NULL)) {
77250-		return true;
77251-	}
77252-	/* emap_global is static, hence zeroed. */
77253-	if (emap_init(&arena_emap_global, b0get(), /* zeroed */ true)) {
77254-		return true;
77255-	}
77256-	if (extent_boot()) {
77257-		return true;
77258-	}
77259-	if (ctl_boot()) {
77260-		return true;
77261-	}
77262-	if (config_prof) {
77263-		prof_boot1();
77264-	}
77265-	if (opt_hpa && !hpa_supported()) {
77266-		malloc_printf("<jemalloc>: HPA not supported in the current "
77267-		    "configuration; %s.",
77268-		    opt_abort_conf ? "aborting" : "disabling");
77269-		if (opt_abort_conf) {
77270-			malloc_abort_invalid_conf();
77271-		} else {
77272-			opt_hpa = false;
77273-		}
77274-	}
77275-	if (arena_boot(&sc_data, b0get(), opt_hpa)) {
77276-		return true;
77277-	}
77278-	if (tcache_boot(TSDN_NULL, b0get())) {
77279-		return true;
77280-	}
77281-	if (malloc_mutex_init(&arenas_lock, "arenas", WITNESS_RANK_ARENAS,
77282-	    malloc_mutex_rank_exclusive)) {
77283-		return true;
77284-	}
77285-	hook_boot();
77286-	/*
77287-	 * Create enough scaffolding to allow recursive allocation in
77288-	 * malloc_ncpus().
77289-	 */
77290-	narenas_auto = 1;
77291-	manual_arena_base = narenas_auto + 1;
77292-	memset(arenas, 0, sizeof(arena_t *) * narenas_auto);
77293-	/*
77294-	 * Initialize one arena here.  The rest are lazily created in
77295-	 * arena_choose_hard().
77296-	 */
77297-	if (arena_init(TSDN_NULL, 0, &arena_config_default) == NULL) {
77298-		return true;
77299-	}
77300-	a0 = arena_get(TSDN_NULL, 0, false);
77301-
77302-	if (opt_hpa && !hpa_supported()) {
77303-		malloc_printf("<jemalloc>: HPA not supported in the current "
77304-		    "configuration; %s.",
77305-		    opt_abort_conf ? "aborting" : "disabling");
77306-		if (opt_abort_conf) {
77307-			malloc_abort_invalid_conf();
77308-		} else {
77309-			opt_hpa = false;
77310-		}
77311-	} else if (opt_hpa) {
77312-		hpa_shard_opts_t hpa_shard_opts = opt_hpa_opts;
77313-		hpa_shard_opts.deferral_allowed = background_thread_enabled();
77314-		if (pa_shard_enable_hpa(TSDN_NULL, &a0->pa_shard,
77315-		    &hpa_shard_opts, &opt_hpa_sec_opts)) {
77316-			return true;
77317-		}
77318-	}
77319-
77320-	malloc_init_state = malloc_init_a0_initialized;
77321-
77322-	return false;
77323-}
77324-
77325-static bool
77326-malloc_init_hard_a0(void) {
77327-	bool ret;
77328-
77329-	malloc_mutex_lock(TSDN_NULL, &init_lock);
77330-	ret = malloc_init_hard_a0_locked();
77331-	malloc_mutex_unlock(TSDN_NULL, &init_lock);
77332-	return ret;
77333-}
77334-
77335-/* Initialize data structures which may trigger recursive allocation. */
77336-static bool
77337-malloc_init_hard_recursible(void) {
77338-	malloc_init_state = malloc_init_recursible;
77339-
77340-	ncpus = malloc_ncpus();
77341-	if (opt_percpu_arena != percpu_arena_disabled) {
77342-		bool cpu_count_is_deterministic =
77343-		    malloc_cpu_count_is_deterministic();
77344-		if (!cpu_count_is_deterministic) {
77345-			/*
77346-			 * If # of CPU is not deterministic, and narenas not
77347-			 * specified, disables per cpu arena since it may not
77348-			 * detect CPU IDs properly.
77349-			 */
77350-			if (opt_narenas == 0) {
77351-				opt_percpu_arena = percpu_arena_disabled;
77352-				malloc_write("<jemalloc>: Number of CPUs "
77353-				    "detected is not deterministic. Per-CPU "
77354-				    "arena disabled.\n");
77355-				if (opt_abort_conf) {
77356-					malloc_abort_invalid_conf();
77357-				}
77358-				if (opt_abort) {
77359-					abort();
77360-				}
77361-			}
77362-		}
77363-	}
77364-
77365-#if (defined(JEMALLOC_HAVE_PTHREAD_ATFORK) && !defined(JEMALLOC_MUTEX_INIT_CB) \
77366-    && !defined(JEMALLOC_ZONE) && !defined(_WIN32) && \
77367-    !defined(__native_client__))
77368-	/* LinuxThreads' pthread_atfork() allocates. */
77369-	if (pthread_atfork(jemalloc_prefork, jemalloc_postfork_parent,
77370-	    jemalloc_postfork_child) != 0) {
77371-		malloc_write("<jemalloc>: Error in pthread_atfork()\n");
77372-		if (opt_abort) {
77373-			abort();
77374-		}
77375-		return true;
77376-	}
77377-#endif
77378-
77379-	if (background_thread_boot0()) {
77380-		return true;
77381-	}
77382-
77383-	return false;
77384-}
77385-
77386-static unsigned
77387-malloc_narenas_default(void) {
77388-	assert(ncpus > 0);
77389-	/*
77390-	 * For SMP systems, create more than one arena per CPU by
77391-	 * default.
77392-	 */
77393-	if (ncpus > 1) {
77394-		fxp_t fxp_ncpus = FXP_INIT_INT(ncpus);
77395-		fxp_t goal = fxp_mul(fxp_ncpus, opt_narenas_ratio);
77396-		uint32_t int_goal = fxp_round_nearest(goal);
77397-		if (int_goal == 0) {
77398-			return 1;
77399-		}
77400-		return int_goal;
77401-	} else {
77402-		return 1;
77403-	}
77404-}
77405-
77406-static percpu_arena_mode_t
77407-percpu_arena_as_initialized(percpu_arena_mode_t mode) {
77408-	assert(!malloc_initialized());
77409-	assert(mode <= percpu_arena_disabled);
77410-
77411-	if (mode != percpu_arena_disabled) {
77412-		mode += percpu_arena_mode_enabled_base;
77413-	}
77414-
77415-	return mode;
77416-}
77417-
77418-static bool
77419-malloc_init_narenas(void) {
77420-	assert(ncpus > 0);
77421-
77422-	if (opt_percpu_arena != percpu_arena_disabled) {
77423-		if (!have_percpu_arena || malloc_getcpu() < 0) {
77424-			opt_percpu_arena = percpu_arena_disabled;
77425-			malloc_printf("<jemalloc>: perCPU arena getcpu() not "
77426-			    "available. Setting narenas to %u.\n", opt_narenas ?
77427-			    opt_narenas : malloc_narenas_default());
77428-			if (opt_abort) {
77429-				abort();
77430-			}
77431-		} else {
77432-			if (ncpus >= MALLOCX_ARENA_LIMIT) {
77433-				malloc_printf("<jemalloc>: narenas w/ percpu"
77434-				    "arena beyond limit (%d)\n", ncpus);
77435-				if (opt_abort) {
77436-					abort();
77437-				}
77438-				return true;
77439-			}
77440-			/* NB: opt_percpu_arena isn't fully initialized yet. */
77441-			if (percpu_arena_as_initialized(opt_percpu_arena) ==
77442-			    per_phycpu_arena && ncpus % 2 != 0) {
77443-				malloc_printf("<jemalloc>: invalid "
77444-				    "configuration -- per physical CPU arena "
77445-				    "with odd number (%u) of CPUs (no hyper "
77446-				    "threading?).\n", ncpus);
77447-				if (opt_abort)
77448-					abort();
77449-			}
77450-			unsigned n = percpu_arena_ind_limit(
77451-			    percpu_arena_as_initialized(opt_percpu_arena));
77452-			if (opt_narenas < n) {
77453-				/*
77454-				 * If narenas is specified with percpu_arena
77455-				 * enabled, actual narenas is set as the greater
77456-				 * of the two. percpu_arena_choose will be free
77457-				 * to use any of the arenas based on CPU
77458-				 * id. This is conservative (at a small cost)
77459-				 * but ensures correctness.
77460-				 *
77461-				 * If for some reason the ncpus determined at
77462-				 * boot is not the actual number (e.g. because
77463-				 * of affinity setting from numactl), reserving
77464-				 * narenas this way provides a workaround for
77465-				 * percpu_arena.
77466-				 */
77467-				opt_narenas = n;
77468-			}
77469-		}
77470-	}
77471-	if (opt_narenas == 0) {
77472-		opt_narenas = malloc_narenas_default();
77473-	}
77474-	assert(opt_narenas > 0);
77475-
77476-	narenas_auto = opt_narenas;
77477-	/*
77478-	 * Limit the number of arenas to the indexing range of MALLOCX_ARENA().
77479-	 */
77480-	if (narenas_auto >= MALLOCX_ARENA_LIMIT) {
77481-		narenas_auto = MALLOCX_ARENA_LIMIT - 1;
77482-		malloc_printf("<jemalloc>: Reducing narenas to limit (%d)\n",
77483-		    narenas_auto);
77484-	}
77485-	narenas_total_set(narenas_auto);
77486-	if (arena_init_huge()) {
77487-		narenas_total_inc();
77488-	}
77489-	manual_arena_base = narenas_total_get();
77490-
77491-	return false;
77492-}
77493-
77494-static void
77495-malloc_init_percpu(void) {
77496-	opt_percpu_arena = percpu_arena_as_initialized(opt_percpu_arena);
77497-}
77498-
77499-static bool
77500-malloc_init_hard_finish(void) {
77501-	if (malloc_mutex_boot()) {
77502-		return true;
77503-	}
77504-
77505-	malloc_init_state = malloc_init_initialized;
77506-	malloc_slow_flag_init();
77507-
77508-	return false;
77509-}
77510-
77511-static void
77512-malloc_init_hard_cleanup(tsdn_t *tsdn, bool reentrancy_set) {
77513-	malloc_mutex_assert_owner(tsdn, &init_lock);
77514-	malloc_mutex_unlock(tsdn, &init_lock);
77515-	if (reentrancy_set) {
77516-		assert(!tsdn_null(tsdn));
77517-		tsd_t *tsd = tsdn_tsd(tsdn);
77518-		assert(tsd_reentrancy_level_get(tsd) > 0);
77519-		post_reentrancy(tsd);
77520-	}
77521-}
77522-
77523-static bool
77524-malloc_init_hard(void) {
77525-	tsd_t *tsd;
77526-
77527-#if defined(_WIN32) && _WIN32_WINNT < 0x0600
77528-	_init_init_lock();
77529-#endif
77530-	malloc_mutex_lock(TSDN_NULL, &init_lock);
77531-
77532-#define UNLOCK_RETURN(tsdn, ret, reentrancy)		\
77533-	malloc_init_hard_cleanup(tsdn, reentrancy);	\
77534-	return ret;
77535-
77536-	if (!malloc_init_hard_needed()) {
77537-		UNLOCK_RETURN(TSDN_NULL, false, false)
77538-	}
77539-
77540-	if (malloc_init_state != malloc_init_a0_initialized &&
77541-	    malloc_init_hard_a0_locked()) {
77542-		UNLOCK_RETURN(TSDN_NULL, true, false)
77543-	}
77544-
77545-	malloc_mutex_unlock(TSDN_NULL, &init_lock);
77546-	/* Recursive allocation relies on functional tsd. */
77547-	tsd = malloc_tsd_boot0();
77548-	if (tsd == NULL) {
77549-		return true;
77550-	}
77551-	if (malloc_init_hard_recursible()) {
77552-		return true;
77553-	}
77554-
77555-	malloc_mutex_lock(tsd_tsdn(tsd), &init_lock);
77556-	/* Set reentrancy level to 1 during init. */
77557-	pre_reentrancy(tsd, NULL);
77558-	/* Initialize narenas before prof_boot2 (for allocation). */
77559-	if (malloc_init_narenas()
77560-	    || background_thread_boot1(tsd_tsdn(tsd), b0get())) {
77561-		UNLOCK_RETURN(tsd_tsdn(tsd), true, true)
77562-	}
77563-	if (config_prof && prof_boot2(tsd, b0get())) {
77564-		UNLOCK_RETURN(tsd_tsdn(tsd), true, true)
77565-	}
77566-
77567-	malloc_init_percpu();
77568-
77569-	if (malloc_init_hard_finish()) {
77570-		UNLOCK_RETURN(tsd_tsdn(tsd), true, true)
77571-	}
77572-	post_reentrancy(tsd);
77573-	malloc_mutex_unlock(tsd_tsdn(tsd), &init_lock);
77574-
77575-	witness_assert_lockless(witness_tsd_tsdn(
77576-	    tsd_witness_tsdp_get_unsafe(tsd)));
77577-	malloc_tsd_boot1();
77578-	/* Update TSD after tsd_boot1. */
77579-	tsd = tsd_fetch();
77580-	if (opt_background_thread) {
77581-		assert(have_background_thread);
77582-		/*
77583-		 * Need to finish init & unlock first before creating background
77584-		 * threads (pthread_create depends on malloc).  ctl_init (which
77585-		 * sets isthreaded) needs to be called without holding any lock.
77586-		 */
77587-		background_thread_ctl_init(tsd_tsdn(tsd));
77588-		if (background_thread_create(tsd, 0)) {
77589-			return true;
77590-		}
77591-	}
77592-#undef UNLOCK_RETURN
77593-	return false;
77594-}
77595-
77596-/*
77597- * End initialization functions.
77598- */
77599-/******************************************************************************/
77600-/*
77601- * Begin allocation-path internal functions and data structures.
77602- */
77603-
77604-/*
77605- * Settings determined by the documented behavior of the allocation functions.
77606- */
77607-typedef struct static_opts_s static_opts_t;
77608-struct static_opts_s {
77609-	/* Whether or not allocation size may overflow. */
77610-	bool may_overflow;
77611-
77612-	/*
77613-	 * Whether or not allocations (with alignment) of size 0 should be
77614-	 * treated as size 1.
77615-	 */
77616-	bool bump_empty_aligned_alloc;
77617-	/*
77618-	 * Whether to assert that allocations are not of size 0 (after any
77619-	 * bumping).
77620-	 */
77621-	bool assert_nonempty_alloc;
77622-
77623-	/*
77624-	 * Whether or not to modify the 'result' argument to malloc in case of
77625-	 * error.
77626-	 */
77627-	bool null_out_result_on_error;
77628-	/* Whether to set errno when we encounter an error condition. */
77629-	bool set_errno_on_error;
77630-
77631-	/*
77632-	 * The minimum valid alignment for functions requesting aligned storage.
77633-	 */
77634-	size_t min_alignment;
77635-
77636-	/* The error string to use if we oom. */
77637-	const char *oom_string;
77638-	/* The error string to use if the passed-in alignment is invalid. */
77639-	const char *invalid_alignment_string;
77640-
77641-	/*
77642-	 * False if we're configured to skip some time-consuming operations.
77643-	 *
77644-	 * This isn't really a malloc "behavior", but it acts as a useful
77645-	 * summary of several other static (or at least, static after program
77646-	 * initialization) options.
77647-	 */
77648-	bool slow;
77649-	/*
77650-	 * Return size.
77651-	 */
77652-	bool usize;
77653-};
77654-
77655-JEMALLOC_ALWAYS_INLINE void
77656-static_opts_init(static_opts_t *static_opts) {
77657-	static_opts->may_overflow = false;
77658-	static_opts->bump_empty_aligned_alloc = false;
77659-	static_opts->assert_nonempty_alloc = false;
77660-	static_opts->null_out_result_on_error = false;
77661-	static_opts->set_errno_on_error = false;
77662-	static_opts->min_alignment = 0;
77663-	static_opts->oom_string = "";
77664-	static_opts->invalid_alignment_string = "";
77665-	static_opts->slow = false;
77666-	static_opts->usize = false;
77667-}
77668-
77669-/*
77670- * These correspond to the macros in jemalloc/jemalloc_macros.h.  Broadly, we
77671- * should have one constant here per magic value there.  Note however that the
77672- * representations need not be related.
77673- */
77674-#define TCACHE_IND_NONE ((unsigned)-1)
77675-#define TCACHE_IND_AUTOMATIC ((unsigned)-2)
77676-#define ARENA_IND_AUTOMATIC ((unsigned)-1)
77677-
77678-typedef struct dynamic_opts_s dynamic_opts_t;
77679-struct dynamic_opts_s {
77680-	void **result;
77681-	size_t usize;
77682-	size_t num_items;
77683-	size_t item_size;
77684-	size_t alignment;
77685-	bool zero;
77686-	unsigned tcache_ind;
77687-	unsigned arena_ind;
77688-};
77689-
77690-JEMALLOC_ALWAYS_INLINE void
77691-dynamic_opts_init(dynamic_opts_t *dynamic_opts) {
77692-	dynamic_opts->result = NULL;
77693-	dynamic_opts->usize = 0;
77694-	dynamic_opts->num_items = 0;
77695-	dynamic_opts->item_size = 0;
77696-	dynamic_opts->alignment = 0;
77697-	dynamic_opts->zero = false;
77698-	dynamic_opts->tcache_ind = TCACHE_IND_AUTOMATIC;
77699-	dynamic_opts->arena_ind = ARENA_IND_AUTOMATIC;
77700-}
77701-
77702-/*
77703- * ind parameter is optional and is only checked and filled if alignment == 0;
77704- * return true if result is out of range.
77705- */
77706-JEMALLOC_ALWAYS_INLINE bool
77707-aligned_usize_get(size_t size, size_t alignment, size_t *usize, szind_t *ind,
77708-    bool bump_empty_aligned_alloc) {
77709-	assert(usize != NULL);
77710-	if (alignment == 0) {
77711-		if (ind != NULL) {
77712-			*ind = sz_size2index(size);
77713-			if (unlikely(*ind >= SC_NSIZES)) {
77714-				return true;
77715-			}
77716-			*usize = sz_index2size(*ind);
77717-			assert(*usize > 0 && *usize <= SC_LARGE_MAXCLASS);
77718-			return false;
77719-		}
77720-		*usize = sz_s2u(size);
77721-	} else {
77722-		if (bump_empty_aligned_alloc && unlikely(size == 0)) {
77723-			size = 1;
77724-		}
77725-		*usize = sz_sa2u(size, alignment);
77726-	}
77727-	if (unlikely(*usize == 0 || *usize > SC_LARGE_MAXCLASS)) {
77728-		return true;
77729-	}
77730-	return false;
77731-}
77732-
77733-JEMALLOC_ALWAYS_INLINE bool
77734-zero_get(bool guarantee, bool slow) {
77735-	if (config_fill && slow && unlikely(opt_zero)) {
77736-		return true;
77737-	} else {
77738-		return guarantee;
77739-	}
77740-}
77741-
77742-JEMALLOC_ALWAYS_INLINE tcache_t *
77743-tcache_get_from_ind(tsd_t *tsd, unsigned tcache_ind, bool slow, bool is_alloc) {
77744-	tcache_t *tcache;
77745-	if (tcache_ind == TCACHE_IND_AUTOMATIC) {
77746-		if (likely(!slow)) {
77747-			/* Getting tcache ptr unconditionally. */
77748-			tcache = tsd_tcachep_get(tsd);
77749-			assert(tcache == tcache_get(tsd));
77750-		} else if (is_alloc ||
77751-		    likely(tsd_reentrancy_level_get(tsd) == 0)) {
77752-			tcache = tcache_get(tsd);
77753-		} else {
77754-			tcache = NULL;
77755-		}
77756-	} else {
77757-		/*
77758-		 * Should not specify tcache on deallocation path when being
77759-		 * reentrant.
77760-		 */
77761-		assert(is_alloc || tsd_reentrancy_level_get(tsd) == 0 ||
77762-		    tsd_state_nocleanup(tsd));
77763-		if (tcache_ind == TCACHE_IND_NONE) {
77764-			tcache = NULL;
77765-		} else {
77766-			tcache = tcaches_get(tsd, tcache_ind);
77767-		}
77768-	}
77769-	return tcache;
77770-}
77771-
77772-/* Return true if a manual arena is specified and arena_get() OOMs. */
77773-JEMALLOC_ALWAYS_INLINE bool
77774-arena_get_from_ind(tsd_t *tsd, unsigned arena_ind, arena_t **arena_p) {
77775-	if (arena_ind == ARENA_IND_AUTOMATIC) {
77776-		/*
77777-		 * In case of automatic arena management, we defer arena
77778-		 * computation until as late as we can, hoping to fill the
77779-		 * allocation out of the tcache.
77780-		 */
77781-		*arena_p = NULL;
77782-	} else {
77783-		*arena_p = arena_get(tsd_tsdn(tsd), arena_ind, true);
77784-		if (unlikely(*arena_p == NULL) && arena_ind >= narenas_auto) {
77785-			return true;
77786-		}
77787-	}
77788-	return false;
77789-}
77790-
77791-/* ind is ignored if dopts->alignment > 0. */
77792-JEMALLOC_ALWAYS_INLINE void *
77793-imalloc_no_sample(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd,
77794-    size_t size, size_t usize, szind_t ind) {
77795-	/* Fill in the tcache. */
77796-	tcache_t *tcache = tcache_get_from_ind(tsd, dopts->tcache_ind,
77797-	    sopts->slow, /* is_alloc */ true);
77798-
77799-	/* Fill in the arena. */
77800-	arena_t *arena;
77801-	if (arena_get_from_ind(tsd, dopts->arena_ind, &arena)) {
77802-		return NULL;
77803-	}
77804-
77805-	if (unlikely(dopts->alignment != 0)) {
77806-		return ipalloct(tsd_tsdn(tsd), usize, dopts->alignment,
77807-		    dopts->zero, tcache, arena);
77808-	}
77809-
77810-	return iallocztm(tsd_tsdn(tsd), size, ind, dopts->zero, tcache, false,
77811-	    arena, sopts->slow);
77812-}
77813-
77814-JEMALLOC_ALWAYS_INLINE void *
77815-imalloc_sample(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd,
77816-    size_t usize, szind_t ind) {
77817-	void *ret;
77818-
77819-	/*
77820-	 * For small allocations, sampling bumps the usize.  If so, we allocate
77821-	 * from the ind_large bucket.
77822-	 */
77823-	szind_t ind_large;
77824-	size_t bumped_usize = usize;
77825-
77826-	dopts->alignment = prof_sample_align(dopts->alignment);
77827-	if (usize <= SC_SMALL_MAXCLASS) {
77828-		assert(((dopts->alignment == 0) ?
77829-		    sz_s2u(SC_LARGE_MINCLASS) :
77830-		    sz_sa2u(SC_LARGE_MINCLASS, dopts->alignment))
77831-			== SC_LARGE_MINCLASS);
77832-		ind_large = sz_size2index(SC_LARGE_MINCLASS);
77833-		bumped_usize = sz_s2u(SC_LARGE_MINCLASS);
77834-		ret = imalloc_no_sample(sopts, dopts, tsd, bumped_usize,
77835-		    bumped_usize, ind_large);
77836-		if (unlikely(ret == NULL)) {
77837-			return NULL;
77838-		}
77839-		arena_prof_promote(tsd_tsdn(tsd), ret, usize);
77840-	} else {
77841-		ret = imalloc_no_sample(sopts, dopts, tsd, usize, usize, ind);
77842-	}
77843-	assert(prof_sample_aligned(ret));
77844-
77845-	return ret;
77846-}
77847-
77848-/*
77849- * Returns true if the allocation will overflow, and false otherwise.  Sets
77850- * *size to the product either way.
77851- */
77852-JEMALLOC_ALWAYS_INLINE bool
77853-compute_size_with_overflow(bool may_overflow, dynamic_opts_t *dopts,
77854-    size_t *size) {
77855-	/*
77856-	 * This function is just num_items * item_size, except that we may have
77857-	 * to check for overflow.
77858-	 */
77859-
77860-	if (!may_overflow) {
77861-		assert(dopts->num_items == 1);
77862-		*size = dopts->item_size;
77863-		return false;
77864-	}
77865-
77866-	/* A size_t with its high-half bits all set to 1. */
77867-	static const size_t high_bits = SIZE_T_MAX << (sizeof(size_t) * 8 / 2);
77868-
77869-	*size = dopts->item_size * dopts->num_items;
77870-
77871-	if (unlikely(*size == 0)) {
77872-		return (dopts->num_items != 0 && dopts->item_size != 0);
77873-	}
77874-
77875-	/*
77876-	 * We got a non-zero size, but we don't know if we overflowed to get
77877-	 * there.  To avoid having to do a divide, we'll be clever and note that
77878-	 * if both A and B can be represented in N/2 bits, then their product
77879-	 * can be represented in N bits (without the possibility of overflow).
77880-	 */
77881-	if (likely((high_bits & (dopts->num_items | dopts->item_size)) == 0)) {
77882-		return false;
77883-	}
77884-	if (likely(*size / dopts->item_size == dopts->num_items)) {
77885-		return false;
77886-	}
77887-	return true;
77888-}
77889-
77890-JEMALLOC_ALWAYS_INLINE int
77891-imalloc_body(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd) {
77892-	/* Where the actual allocated memory will live. */
77893-	void *allocation = NULL;
77894-	/* Filled in by compute_size_with_overflow below. */
77895-	size_t size = 0;
77896-	/*
77897-	 * The zero initialization for ind is actually dead store, in that its
77898-	 * value is reset before any branch on its value is taken.  Sometimes
77899-	 * though, it's convenient to pass it as arguments before this point.
77900-	 * To avoid undefined behavior then, we initialize it with dummy stores.
77901-	 */
77902-	szind_t ind = 0;
77903-	/* usize will always be properly initialized. */
77904-	size_t usize;
77905-
77906-	/* Reentrancy is only checked on slow path. */
77907-	int8_t reentrancy_level;
77908-
77909-	/* Compute the amount of memory the user wants. */
77910-	if (unlikely(compute_size_with_overflow(sopts->may_overflow, dopts,
77911-	    &size))) {
77912-		goto label_oom;
77913-	}
77914-
77915-	if (unlikely(dopts->alignment < sopts->min_alignment
77916-	    || (dopts->alignment & (dopts->alignment - 1)) != 0)) {
77917-		goto label_invalid_alignment;
77918-	}
77919-
77920-	/* This is the beginning of the "core" algorithm. */
77921-	dopts->zero = zero_get(dopts->zero, sopts->slow);
77922-	if (aligned_usize_get(size, dopts->alignment, &usize, &ind,
77923-	    sopts->bump_empty_aligned_alloc)) {
77924-		goto label_oom;
77925-	}
77926-	dopts->usize = usize;
77927-	/* Validate the user input. */
77928-	if (sopts->assert_nonempty_alloc) {
77929-		assert (size != 0);
77930-	}
77931-
77932-	check_entry_exit_locking(tsd_tsdn(tsd));
77933-
77934-	/*
77935-	 * If we need to handle reentrancy, we can do it out of a
77936-	 * known-initialized arena (i.e. arena 0).
77937-	 */
77938-	reentrancy_level = tsd_reentrancy_level_get(tsd);
77939-	if (sopts->slow && unlikely(reentrancy_level > 0)) {
77940-		/*
77941-		 * We should never specify particular arenas or tcaches from
77942-		 * within our internal allocations.
77943-		 */
77944-		assert(dopts->tcache_ind == TCACHE_IND_AUTOMATIC ||
77945-		    dopts->tcache_ind == TCACHE_IND_NONE);
77946-		assert(dopts->arena_ind == ARENA_IND_AUTOMATIC);
77947-		dopts->tcache_ind = TCACHE_IND_NONE;
77948-		/* We know that arena 0 has already been initialized. */
77949-		dopts->arena_ind = 0;
77950-	}
77951-
77952-	/*
77953-	 * If dopts->alignment > 0, then ind is still 0, but usize was computed
77954-	 * in the previous if statement.  Down the positive alignment path,
77955-	 * imalloc_no_sample and imalloc_sample will ignore ind.
77956-	 */
77957-
77958-	/* If profiling is on, get our profiling context. */
77959-	if (config_prof && opt_prof) {
77960-		bool prof_active = prof_active_get_unlocked();
77961-		bool sample_event = te_prof_sample_event_lookahead(tsd, usize);
77962-		prof_tctx_t *tctx = prof_alloc_prep(tsd, prof_active,
77963-		    sample_event);
77964-
77965-		emap_alloc_ctx_t alloc_ctx;
77966-		if (likely((uintptr_t)tctx == (uintptr_t)1U)) {
77967-			alloc_ctx.slab = (usize <= SC_SMALL_MAXCLASS);
77968-			allocation = imalloc_no_sample(
77969-			    sopts, dopts, tsd, usize, usize, ind);
77970-		} else if ((uintptr_t)tctx > (uintptr_t)1U) {
77971-			allocation = imalloc_sample(
77972-			    sopts, dopts, tsd, usize, ind);
77973-			alloc_ctx.slab = false;
77974-		} else {
77975-			allocation = NULL;
77976-		}
77977-
77978-		if (unlikely(allocation == NULL)) {
77979-			prof_alloc_rollback(tsd, tctx);
77980-			goto label_oom;
77981-		}
77982-		prof_malloc(tsd, allocation, size, usize, &alloc_ctx, tctx);
77983-	} else {
77984-		assert(!opt_prof);
77985-		allocation = imalloc_no_sample(sopts, dopts, tsd, size, usize,
77986-		    ind);
77987-		if (unlikely(allocation == NULL)) {
77988-			goto label_oom;
77989-		}
77990-	}
77991-
77992-	/*
77993-	 * Allocation has been done at this point.  We still have some
77994-	 * post-allocation work to do though.
77995-	 */
77996-
77997-	thread_alloc_event(tsd, usize);
77998-
77999-	assert(dopts->alignment == 0
78000-	    || ((uintptr_t)allocation & (dopts->alignment - 1)) == ZU(0));
78001-
78002-	assert(usize == isalloc(tsd_tsdn(tsd), allocation));
78003-
78004-	if (config_fill && sopts->slow && !dopts->zero
78005-	    && unlikely(opt_junk_alloc)) {
78006-		junk_alloc_callback(allocation, usize);
78007-	}
78008-
78009-	if (sopts->slow) {
78010-		UTRACE(0, size, allocation);
78011-	}
78012-
78013-	/* Success! */
78014-	check_entry_exit_locking(tsd_tsdn(tsd));
78015-	*dopts->result = allocation;
78016-	return 0;
78017-
78018-label_oom:
78019-	if (unlikely(sopts->slow) && config_xmalloc && unlikely(opt_xmalloc)) {
78020-		malloc_write(sopts->oom_string);
78021-		abort();
78022-	}
78023-
78024-	if (sopts->slow) {
78025-		UTRACE(NULL, size, NULL);
78026-	}
78027-
78028-	check_entry_exit_locking(tsd_tsdn(tsd));
78029-
78030-	if (sopts->set_errno_on_error) {
78031-		set_errno(ENOMEM);
78032-	}
78033-
78034-	if (sopts->null_out_result_on_error) {
78035-		*dopts->result = NULL;
78036-	}
78037-
78038-	return ENOMEM;
78039-
78040-	/*
78041-	 * This label is only jumped to by one goto; we move it out of line
78042-	 * anyways to avoid obscuring the non-error paths, and for symmetry with
78043-	 * the oom case.
78044-	 */
78045-label_invalid_alignment:
78046-	if (config_xmalloc && unlikely(opt_xmalloc)) {
78047-		malloc_write(sopts->invalid_alignment_string);
78048-		abort();
78049-	}
78050-
78051-	if (sopts->set_errno_on_error) {
78052-		set_errno(EINVAL);
78053-	}
78054-
78055-	if (sopts->slow) {
78056-		UTRACE(NULL, size, NULL);
78057-	}
78058-
78059-	check_entry_exit_locking(tsd_tsdn(tsd));
78060-
78061-	if (sopts->null_out_result_on_error) {
78062-		*dopts->result = NULL;
78063-	}
78064-
78065-	return EINVAL;
78066-}
78067-
78068-JEMALLOC_ALWAYS_INLINE bool
78069-imalloc_init_check(static_opts_t *sopts, dynamic_opts_t *dopts) {
78070-	if (unlikely(!malloc_initialized()) && unlikely(malloc_init())) {
78071-		if (config_xmalloc && unlikely(opt_xmalloc)) {
78072-			malloc_write(sopts->oom_string);
78073-			abort();
78074-		}
78075-		UTRACE(NULL, dopts->num_items * dopts->item_size, NULL);
78076-		set_errno(ENOMEM);
78077-		*dopts->result = NULL;
78078-
78079-		return false;
78080-	}
78081-
78082-	return true;
78083-}
78084-
78085-/* Returns the errno-style error code of the allocation. */
78086-JEMALLOC_ALWAYS_INLINE int
78087-imalloc(static_opts_t *sopts, dynamic_opts_t *dopts) {
78088-	if (tsd_get_allocates() && !imalloc_init_check(sopts, dopts)) {
78089-		return ENOMEM;
78090-	}
78091-
78092-	/* We always need the tsd.  Let's grab it right away. */
78093-	tsd_t *tsd = tsd_fetch();
78094-	assert(tsd);
78095-	if (likely(tsd_fast(tsd))) {
78096-		/* Fast and common path. */
78097-		tsd_assert_fast(tsd);
78098-		sopts->slow = false;
78099-		return imalloc_body(sopts, dopts, tsd);
78100-	} else {
78101-		if (!tsd_get_allocates() && !imalloc_init_check(sopts, dopts)) {
78102-			return ENOMEM;
78103-		}
78104-
78105-		sopts->slow = true;
78106-		return imalloc_body(sopts, dopts, tsd);
78107-	}
78108-}
78109-
78110-JEMALLOC_NOINLINE
78111-void *
78112-malloc_default(size_t size) {
78113-	void *ret;
78114-	static_opts_t sopts;
78115-	dynamic_opts_t dopts;
78116-
78117-	/*
78118-	 * This variant has logging hook on exit but not on entry.  It's callled
78119-	 * only by je_malloc, below, which emits the entry one for us (and, if
78120-	 * it calls us, does so only via tail call).
78121-	 */
78122-
78123-	static_opts_init(&sopts);
78124-	dynamic_opts_init(&dopts);
78125-
78126-	sopts.null_out_result_on_error = true;
78127-	sopts.set_errno_on_error = true;
78128-	sopts.oom_string = "<jemalloc>: Error in malloc(): out of memory\n";
78129-
78130-	dopts.result = &ret;
78131-	dopts.num_items = 1;
78132-	dopts.item_size = size;
78133-
78134-	imalloc(&sopts, &dopts);
78135-	/*
78136-	 * Note that this branch gets optimized away -- it immediately follows
78137-	 * the check on tsd_fast that sets sopts.slow.
78138-	 */
78139-	if (sopts.slow) {
78140-		uintptr_t args[3] = {size};
78141-		hook_invoke_alloc(hook_alloc_malloc, ret, (uintptr_t)ret, args);
78142-	}
78143-
78144-	LOG("core.malloc.exit", "result: %p", ret);
78145-
78146-	return ret;
78147-}
78148-
78149-/******************************************************************************/
78150-/*
78151- * Begin malloc(3)-compatible functions.
78152- */
78153-
78154-JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
78155-void JEMALLOC_NOTHROW *
78156-JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1)
78157-je_malloc(size_t size) {
78158-	return imalloc_fastpath(size, &malloc_default);
78159-}
78160-
78161-JEMALLOC_EXPORT int JEMALLOC_NOTHROW
78162-JEMALLOC_ATTR(nonnull(1))
78163-je_posix_memalign(void **memptr, size_t alignment, size_t size) {
78164-	int ret;
78165-	static_opts_t sopts;
78166-	dynamic_opts_t dopts;
78167-
78168-	LOG("core.posix_memalign.entry", "mem ptr: %p, alignment: %zu, "
78169-	    "size: %zu", memptr, alignment, size);
78170-
78171-	static_opts_init(&sopts);
78172-	dynamic_opts_init(&dopts);
78173-
78174-	sopts.bump_empty_aligned_alloc = true;
78175-	sopts.min_alignment = sizeof(void *);
78176-	sopts.oom_string =
78177-	    "<jemalloc>: Error allocating aligned memory: out of memory\n";
78178-	sopts.invalid_alignment_string =
78179-	    "<jemalloc>: Error allocating aligned memory: invalid alignment\n";
78180-
78181-	dopts.result = memptr;
78182-	dopts.num_items = 1;
78183-	dopts.item_size = size;
78184-	dopts.alignment = alignment;
78185-
78186-	ret = imalloc(&sopts, &dopts);
78187-	if (sopts.slow) {
78188-		uintptr_t args[3] = {(uintptr_t)memptr, (uintptr_t)alignment,
78189-			(uintptr_t)size};
78190-		hook_invoke_alloc(hook_alloc_posix_memalign, *memptr,
78191-		    (uintptr_t)ret, args);
78192-	}
78193-
78194-	LOG("core.posix_memalign.exit", "result: %d, alloc ptr: %p", ret,
78195-	    *memptr);
78196-
78197-	return ret;
78198-}
78199-
78200-JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
78201-void JEMALLOC_NOTHROW *
78202-JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(2)
78203-je_aligned_alloc(size_t alignment, size_t size) {
78204-	void *ret;
78205-
78206-	static_opts_t sopts;
78207-	dynamic_opts_t dopts;
78208-
78209-	LOG("core.aligned_alloc.entry", "alignment: %zu, size: %zu\n",
78210-	    alignment, size);
78211-
78212-	static_opts_init(&sopts);
78213-	dynamic_opts_init(&dopts);
78214-
78215-	sopts.bump_empty_aligned_alloc = true;
78216-	sopts.null_out_result_on_error = true;
78217-	sopts.set_errno_on_error = true;
78218-	sopts.min_alignment = 1;
78219-	sopts.oom_string =
78220-	    "<jemalloc>: Error allocating aligned memory: out of memory\n";
78221-	sopts.invalid_alignment_string =
78222-	    "<jemalloc>: Error allocating aligned memory: invalid alignment\n";
78223-
78224-	dopts.result = &ret;
78225-	dopts.num_items = 1;
78226-	dopts.item_size = size;
78227-	dopts.alignment = alignment;
78228-
78229-	imalloc(&sopts, &dopts);
78230-	if (sopts.slow) {
78231-		uintptr_t args[3] = {(uintptr_t)alignment, (uintptr_t)size};
78232-		hook_invoke_alloc(hook_alloc_aligned_alloc, ret,
78233-		    (uintptr_t)ret, args);
78234-	}
78235-
78236-	LOG("core.aligned_alloc.exit", "result: %p", ret);
78237-
78238-	return ret;
78239-}
78240-
78241-JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
78242-void JEMALLOC_NOTHROW *
78243-JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE2(1, 2)
78244-je_calloc(size_t num, size_t size) {
78245-	void *ret;
78246-	static_opts_t sopts;
78247-	dynamic_opts_t dopts;
78248-
78249-	LOG("core.calloc.entry", "num: %zu, size: %zu\n", num, size);
78250-
78251-	static_opts_init(&sopts);
78252-	dynamic_opts_init(&dopts);
78253-
78254-	sopts.may_overflow = true;
78255-	sopts.null_out_result_on_error = true;
78256-	sopts.set_errno_on_error = true;
78257-	sopts.oom_string = "<jemalloc>: Error in calloc(): out of memory\n";
78258-
78259-	dopts.result = &ret;
78260-	dopts.num_items = num;
78261-	dopts.item_size = size;
78262-	dopts.zero = true;
78263-
78264-	imalloc(&sopts, &dopts);
78265-	if (sopts.slow) {
78266-		uintptr_t args[3] = {(uintptr_t)num, (uintptr_t)size};
78267-		hook_invoke_alloc(hook_alloc_calloc, ret, (uintptr_t)ret, args);
78268-	}
78269-
78270-	LOG("core.calloc.exit", "result: %p", ret);
78271-
78272-	return ret;
78273-}
78274-
78275-JEMALLOC_ALWAYS_INLINE void
78276-ifree(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path) {
78277-	if (!slow_path) {
78278-		tsd_assert_fast(tsd);
78279-	}
78280-	check_entry_exit_locking(tsd_tsdn(tsd));
78281-	if (tsd_reentrancy_level_get(tsd) != 0) {
78282-		assert(slow_path);
78283-	}
78284-
78285-	assert(ptr != NULL);
78286-	assert(malloc_initialized() || IS_INITIALIZER);
78287-
78288-	emap_alloc_ctx_t alloc_ctx;
78289-	emap_alloc_ctx_lookup(tsd_tsdn(tsd), &arena_emap_global, ptr,
78290-	    &alloc_ctx);
78291-	assert(alloc_ctx.szind != SC_NSIZES);
78292-
78293-	size_t usize = sz_index2size(alloc_ctx.szind);
78294-	if (config_prof && opt_prof) {
78295-		prof_free(tsd, ptr, usize, &alloc_ctx);
78296-	}
78297-
78298-	if (likely(!slow_path)) {
78299-		idalloctm(tsd_tsdn(tsd), ptr, tcache, &alloc_ctx, false,
78300-		    false);
78301-	} else {
78302-		if (config_fill && slow_path && opt_junk_free) {
78303-			junk_free_callback(ptr, usize);
78304-		}
78305-		idalloctm(tsd_tsdn(tsd), ptr, tcache, &alloc_ctx, false,
78306-		    true);
78307-	}
78308-	thread_dalloc_event(tsd, usize);
78309-}
78310-
78311-JEMALLOC_ALWAYS_INLINE bool
78312-maybe_check_alloc_ctx(tsd_t *tsd, void *ptr, emap_alloc_ctx_t *alloc_ctx) {
78313-	if (config_opt_size_checks) {
78314-		emap_alloc_ctx_t dbg_ctx;
78315-		emap_alloc_ctx_lookup(tsd_tsdn(tsd), &arena_emap_global, ptr,
78316-		    &dbg_ctx);
78317-		if (alloc_ctx->szind != dbg_ctx.szind) {
78318-			safety_check_fail_sized_dealloc(
78319-			    /* current_dealloc */ true, ptr,
78320-			    /* true_size */ sz_size2index(dbg_ctx.szind),
78321-			    /* input_size */ sz_size2index(alloc_ctx->szind));
78322-			return true;
78323-		}
78324-		if (alloc_ctx->slab != dbg_ctx.slab) {
78325-			safety_check_fail(
78326-			    "Internal heap corruption detected: "
78327-			    "mismatch in slab bit");
78328-			return true;
78329-		}
78330-	}
78331-	return false;
78332-}
78333-
78334-JEMALLOC_ALWAYS_INLINE void
78335-isfree(tsd_t *tsd, void *ptr, size_t usize, tcache_t *tcache, bool slow_path) {
78336-	if (!slow_path) {
78337-		tsd_assert_fast(tsd);
78338-	}
78339-	check_entry_exit_locking(tsd_tsdn(tsd));
78340-	if (tsd_reentrancy_level_get(tsd) != 0) {
78341-		assert(slow_path);
78342-	}
78343-
78344-	assert(ptr != NULL);
78345-	assert(malloc_initialized() || IS_INITIALIZER);
78346-
78347-	emap_alloc_ctx_t alloc_ctx;
78348-	if (!config_prof) {
78349-		alloc_ctx.szind = sz_size2index(usize);
78350-		alloc_ctx.slab = (alloc_ctx.szind < SC_NBINS);
78351-	} else {
78352-		if (likely(!prof_sample_aligned(ptr))) {
78353-			/*
78354-			 * When the ptr is not page aligned, it was not sampled.
78355-			 * usize can be trusted to determine szind and slab.
78356-			 */
78357-			alloc_ctx.szind = sz_size2index(usize);
78358-			alloc_ctx.slab = (alloc_ctx.szind < SC_NBINS);
78359-		} else if (opt_prof) {
78360-			emap_alloc_ctx_lookup(tsd_tsdn(tsd), &arena_emap_global,
78361-			    ptr, &alloc_ctx);
78362-
78363-			if (config_opt_safety_checks) {
78364-				/* Small alloc may have !slab (sampled). */
78365-				if (unlikely(alloc_ctx.szind !=
78366-				    sz_size2index(usize))) {
78367-					safety_check_fail_sized_dealloc(
78368-					    /* current_dealloc */ true, ptr,
78369-					    /* true_size */ sz_index2size(
78370-					    alloc_ctx.szind),
78371-					    /* input_size */ usize);
78372-				}
78373-			}
78374-		} else {
78375-			alloc_ctx.szind = sz_size2index(usize);
78376-			alloc_ctx.slab = (alloc_ctx.szind < SC_NBINS);
78377-		}
78378-	}
78379-	bool fail = maybe_check_alloc_ctx(tsd, ptr, &alloc_ctx);
78380-	if (fail) {
78381-		/*
78382-		 * This is a heap corruption bug.  In real life we'll crash; for
78383-		 * the unit test we just want to avoid breaking anything too
78384-		 * badly to get a test result out.  Let's leak instead of trying
78385-		 * to free.
78386-		 */
78387-		return;
78388-	}
78389-
78390-	if (config_prof && opt_prof) {
78391-		prof_free(tsd, ptr, usize, &alloc_ctx);
78392-	}
78393-	if (likely(!slow_path)) {
78394-		isdalloct(tsd_tsdn(tsd), ptr, usize, tcache, &alloc_ctx,
78395-		    false);
78396-	} else {
78397-		if (config_fill && slow_path && opt_junk_free) {
78398-			junk_free_callback(ptr, usize);
78399-		}
78400-		isdalloct(tsd_tsdn(tsd), ptr, usize, tcache, &alloc_ctx,
78401-		    true);
78402-	}
78403-	thread_dalloc_event(tsd, usize);
78404-}
78405-
78406-JEMALLOC_NOINLINE
78407-void
78408-free_default(void *ptr) {
78409-	UTRACE(ptr, 0, 0);
78410-	if (likely(ptr != NULL)) {
78411-		/*
78412-		 * We avoid setting up tsd fully (e.g. tcache, arena binding)
78413-		 * based on only free() calls -- other activities trigger the
78414-		 * minimal to full transition.  This is because free() may
78415-		 * happen during thread shutdown after tls deallocation: if a
78416-		 * thread never had any malloc activities until then, a
78417-		 * fully-setup tsd won't be destructed properly.
78418-		 */
78419-		tsd_t *tsd = tsd_fetch_min();
78420-		check_entry_exit_locking(tsd_tsdn(tsd));
78421-
78422-		if (likely(tsd_fast(tsd))) {
78423-			tcache_t *tcache = tcache_get_from_ind(tsd,
78424-			    TCACHE_IND_AUTOMATIC, /* slow */ false,
78425-			    /* is_alloc */ false);
78426-			ifree(tsd, ptr, tcache, /* slow */ false);
78427-		} else {
78428-			tcache_t *tcache = tcache_get_from_ind(tsd,
78429-			    TCACHE_IND_AUTOMATIC, /* slow */ true,
78430-			    /* is_alloc */ false);
78431-			uintptr_t args_raw[3] = {(uintptr_t)ptr};
78432-			hook_invoke_dalloc(hook_dalloc_free, ptr, args_raw);
78433-			ifree(tsd, ptr, tcache, /* slow */ true);
78434-		}
78435-
78436-		check_entry_exit_locking(tsd_tsdn(tsd));
78437-	}
78438-}
78439-
78440-JEMALLOC_ALWAYS_INLINE bool
78441-free_fastpath_nonfast_aligned(void *ptr, bool check_prof) {
78442-	/*
78443-	 * free_fastpath do not handle two uncommon cases: 1) sampled profiled
78444-	 * objects and 2) sampled junk & stash for use-after-free detection.
78445-	 * Both have special alignments which are used to escape the fastpath.
78446-	 *
78447-	 * prof_sample is page-aligned, which covers the UAF check when both
78448-	 * are enabled (the assertion below).  Avoiding redundant checks since
78449-	 * this is on the fastpath -- at most one runtime branch from this.
78450-	 */
78451-	if (config_debug && cache_bin_nonfast_aligned(ptr)) {
78452-		assert(prof_sample_aligned(ptr));
78453-	}
78454-
78455-	if (config_prof && check_prof) {
78456-		/* When prof is enabled, the prof_sample alignment is enough. */
78457-		if (prof_sample_aligned(ptr)) {
78458-			return true;
78459-		} else {
78460-			return false;
78461-		}
78462-	}
78463-
78464-	if (config_uaf_detection) {
78465-		if (cache_bin_nonfast_aligned(ptr)) {
78466-			return true;
78467-		} else {
78468-			return false;
78469-		}
78470-	}
78471-
78472-	return false;
78473-}
78474-
78475-/* Returns whether or not the free attempt was successful. */
78476-JEMALLOC_ALWAYS_INLINE
78477-bool free_fastpath(void *ptr, size_t size, bool size_hint) {
78478-	tsd_t *tsd = tsd_get(false);
78479-	/* The branch gets optimized away unless tsd_get_allocates(). */
78480-	if (unlikely(tsd == NULL)) {
78481-		return false;
78482-	}
78483-	/*
78484-	 *  The tsd_fast() / initialized checks are folded into the branch
78485-	 *  testing (deallocated_after >= threshold) later in this function.
78486-	 *  The threshold will be set to 0 when !tsd_fast.
78487-	 */
78488-	assert(tsd_fast(tsd) ||
78489-	    *tsd_thread_deallocated_next_event_fastp_get_unsafe(tsd) == 0);
78490-
78491-	emap_alloc_ctx_t alloc_ctx;
78492-	if (!size_hint) {
78493-		bool err = emap_alloc_ctx_try_lookup_fast(tsd,
78494-		    &arena_emap_global, ptr, &alloc_ctx);
78495-
78496-		/* Note: profiled objects will have alloc_ctx.slab set */
78497-		if (unlikely(err || !alloc_ctx.slab ||
78498-		    free_fastpath_nonfast_aligned(ptr,
78499-		    /* check_prof */ false))) {
78500-			return false;
78501-		}
78502-		assert(alloc_ctx.szind != SC_NSIZES);
78503-	} else {
78504-		/*
78505-		 * Check for both sizes that are too large, and for sampled /
78506-		 * special aligned objects.  The alignment check will also check
78507-		 * for null ptr.
78508-		 */
78509-		if (unlikely(size > SC_LOOKUP_MAXCLASS ||
78510-		    free_fastpath_nonfast_aligned(ptr,
78511-		    /* check_prof */ true))) {
78512-			return false;
78513-		}
78514-		alloc_ctx.szind = sz_size2index_lookup(size);
78515-		/* Max lookup class must be small. */
78516-		assert(alloc_ctx.szind < SC_NBINS);
78517-		/* This is a dead store, except when opt size checking is on. */
78518-		alloc_ctx.slab = true;
78519-	}
78520-	/*
78521-	 * Currently the fastpath only handles small sizes.  The branch on
78522-	 * SC_LOOKUP_MAXCLASS makes sure of it.  This lets us avoid checking
78523-	 * tcache szind upper limit (i.e. tcache_maxclass) as well.
78524-	 */
78525-	assert(alloc_ctx.slab);
78526-
78527-	uint64_t deallocated, threshold;
78528-	te_free_fastpath_ctx(tsd, &deallocated, &threshold);
78529-
78530-	size_t usize = sz_index2size(alloc_ctx.szind);
78531-	uint64_t deallocated_after = deallocated + usize;
78532-	/*
78533-	 * Check for events and tsd non-nominal (fast_threshold will be set to
78534-	 * 0) in a single branch.  Note that this handles the uninitialized case
78535-	 * as well (TSD init will be triggered on the non-fastpath).  Therefore
78536-	 * anything depends on a functional TSD (e.g. the alloc_ctx sanity check
78537-	 * below) needs to be after this branch.
78538-	 */
78539-	if (unlikely(deallocated_after >= threshold)) {
78540-		return false;
78541-	}
78542-	assert(tsd_fast(tsd));
78543-	bool fail = maybe_check_alloc_ctx(tsd, ptr, &alloc_ctx);
78544-	if (fail) {
78545-		/* See the comment in isfree. */
78546-		return true;
78547-	}
78548-
78549-	tcache_t *tcache = tcache_get_from_ind(tsd, TCACHE_IND_AUTOMATIC,
78550-	    /* slow */ false, /* is_alloc */ false);
78551-	cache_bin_t *bin = &tcache->bins[alloc_ctx.szind];
78552-
78553-	/*
78554-	 * If junking were enabled, this is where we would do it.  It's not
78555-	 * though, since we ensured above that we're on the fast path.  Assert
78556-	 * that to double-check.
78557-	 */
78558-	assert(!opt_junk_free);
78559-
78560-	if (!cache_bin_dalloc_easy(bin, ptr)) {
78561-		return false;
78562-	}
78563-
78564-	*tsd_thread_deallocatedp_get(tsd) = deallocated_after;
78565-
78566-	return true;
78567-}
78568-
78569-JEMALLOC_EXPORT void JEMALLOC_NOTHROW
78570-je_free(void *ptr) {
78571-	LOG("core.free.entry", "ptr: %p", ptr);
78572-
78573-	if (!free_fastpath(ptr, 0, false)) {
78574-		free_default(ptr);
78575-	}
78576-
78577-	LOG("core.free.exit", "");
78578-}
78579-
78580-/*
78581- * End malloc(3)-compatible functions.
78582- */
78583-/******************************************************************************/
78584-/*
78585- * Begin non-standard override functions.
78586- */
78587-
78588-#ifdef JEMALLOC_OVERRIDE_MEMALIGN
78589-JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
78590-void JEMALLOC_NOTHROW *
78591-JEMALLOC_ATTR(malloc)
78592-je_memalign(size_t alignment, size_t size) {
78593-	void *ret;
78594-	static_opts_t sopts;
78595-	dynamic_opts_t dopts;
78596-
78597-	LOG("core.memalign.entry", "alignment: %zu, size: %zu\n", alignment,
78598-	    size);
78599-
78600-	static_opts_init(&sopts);
78601-	dynamic_opts_init(&dopts);
78602-
78603-	sopts.min_alignment = 1;
78604-	sopts.oom_string =
78605-	    "<jemalloc>: Error allocating aligned memory: out of memory\n";
78606-	sopts.invalid_alignment_string =
78607-	    "<jemalloc>: Error allocating aligned memory: invalid alignment\n";
78608-	sopts.null_out_result_on_error = true;
78609-
78610-	dopts.result = &ret;
78611-	dopts.num_items = 1;
78612-	dopts.item_size = size;
78613-	dopts.alignment = alignment;
78614-
78615-	imalloc(&sopts, &dopts);
78616-	if (sopts.slow) {
78617-		uintptr_t args[3] = {alignment, size};
78618-		hook_invoke_alloc(hook_alloc_memalign, ret, (uintptr_t)ret,
78619-		    args);
78620-	}
78621-
78622-	LOG("core.memalign.exit", "result: %p", ret);
78623-	return ret;
78624-}
78625-#endif
78626-
78627-#ifdef JEMALLOC_OVERRIDE_VALLOC
78628-JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
78629-void JEMALLOC_NOTHROW *
78630-JEMALLOC_ATTR(malloc)
78631-je_valloc(size_t size) {
78632-	void *ret;
78633-
78634-	static_opts_t sopts;
78635-	dynamic_opts_t dopts;
78636-
78637-	LOG("core.valloc.entry", "size: %zu\n", size);
78638-
78639-	static_opts_init(&sopts);
78640-	dynamic_opts_init(&dopts);
78641-
78642-	sopts.null_out_result_on_error = true;
78643-	sopts.min_alignment = PAGE;
78644-	sopts.oom_string =
78645-	    "<jemalloc>: Error allocating aligned memory: out of memory\n";
78646-	sopts.invalid_alignment_string =
78647-	    "<jemalloc>: Error allocating aligned memory: invalid alignment\n";
78648-
78649-	dopts.result = &ret;
78650-	dopts.num_items = 1;
78651-	dopts.item_size = size;
78652-	dopts.alignment = PAGE;
78653-
78654-	imalloc(&sopts, &dopts);
78655-	if (sopts.slow) {
78656-		uintptr_t args[3] = {size};
78657-		hook_invoke_alloc(hook_alloc_valloc, ret, (uintptr_t)ret, args);
78658-	}
78659-
78660-	LOG("core.valloc.exit", "result: %p\n", ret);
78661-	return ret;
78662-}
78663-#endif
78664-
78665-#if defined(JEMALLOC_IS_MALLOC) && defined(JEMALLOC_GLIBC_MALLOC_HOOK)
78666-/*
78667- * glibc provides the RTLD_DEEPBIND flag for dlopen which can make it possible
78668- * to inconsistently reference libc's malloc(3)-compatible functions
78669- * (https://bugzilla.mozilla.org/show_bug.cgi?id=493541).
78670- *
78671- * These definitions interpose hooks in glibc.  The functions are actually
78672- * passed an extra argument for the caller return address, which will be
78673- * ignored.
78674- */
78675-#include <features.h> // defines __GLIBC__ if we are compiling against glibc
78676-
78677-JEMALLOC_EXPORT void (*__free_hook)(void *ptr) = je_free;
78678-JEMALLOC_EXPORT void *(*__malloc_hook)(size_t size) = je_malloc;
78679-JEMALLOC_EXPORT void *(*__realloc_hook)(void *ptr, size_t size) = je_realloc;
78680-#  ifdef JEMALLOC_GLIBC_MEMALIGN_HOOK
78681-JEMALLOC_EXPORT void *(*__memalign_hook)(size_t alignment, size_t size) =
78682-    je_memalign;
78683-#  endif
78684-
78685-#  ifdef __GLIBC__
78686-/*
78687- * To enable static linking with glibc, the libc specific malloc interface must
78688- * be implemented also, so none of glibc's malloc.o functions are added to the
78689- * link.
78690- */
78691-#    define ALIAS(je_fn)	__attribute__((alias (#je_fn), used))
78692-/* To force macro expansion of je_ prefix before stringification. */
78693-#    define PREALIAS(je_fn)	ALIAS(je_fn)
78694-#    ifdef JEMALLOC_OVERRIDE___LIBC_CALLOC
78695-void *__libc_calloc(size_t n, size_t size) PREALIAS(je_calloc);
78696-#    endif
78697-#    ifdef JEMALLOC_OVERRIDE___LIBC_FREE
78698-void __libc_free(void* ptr) PREALIAS(je_free);
78699-#    endif
78700-#    ifdef JEMALLOC_OVERRIDE___LIBC_MALLOC
78701-void *__libc_malloc(size_t size) PREALIAS(je_malloc);
78702-#    endif
78703-#    ifdef JEMALLOC_OVERRIDE___LIBC_MEMALIGN
78704-void *__libc_memalign(size_t align, size_t s) PREALIAS(je_memalign);
78705-#    endif
78706-#    ifdef JEMALLOC_OVERRIDE___LIBC_REALLOC
78707-void *__libc_realloc(void* ptr, size_t size) PREALIAS(je_realloc);
78708-#    endif
78709-#    ifdef JEMALLOC_OVERRIDE___LIBC_VALLOC
78710-void *__libc_valloc(size_t size) PREALIAS(je_valloc);
78711-#    endif
78712-#    ifdef JEMALLOC_OVERRIDE___POSIX_MEMALIGN
78713-int __posix_memalign(void** r, size_t a, size_t s) PREALIAS(je_posix_memalign);
78714-#    endif
78715-#    undef PREALIAS
78716-#    undef ALIAS
78717-#  endif
78718-#endif
78719-
78720-/*
78721- * End non-standard override functions.
78722- */
78723-/******************************************************************************/
78724-/*
78725- * Begin non-standard functions.
78726- */
78727-
78728-JEMALLOC_ALWAYS_INLINE unsigned
78729-mallocx_tcache_get(int flags) {
78730-	if (likely((flags & MALLOCX_TCACHE_MASK) == 0)) {
78731-		return TCACHE_IND_AUTOMATIC;
78732-	} else if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) {
78733-		return TCACHE_IND_NONE;
78734-	} else {
78735-		return MALLOCX_TCACHE_GET(flags);
78736-	}
78737-}
78738-
78739-JEMALLOC_ALWAYS_INLINE unsigned
78740-mallocx_arena_get(int flags) {
78741-	if (unlikely((flags & MALLOCX_ARENA_MASK) != 0)) {
78742-		return MALLOCX_ARENA_GET(flags);
78743-	} else {
78744-		return ARENA_IND_AUTOMATIC;
78745-	}
78746-}
78747-
78748-#ifdef JEMALLOC_EXPERIMENTAL_SMALLOCX_API
78749-
78750-#define JEMALLOC_SMALLOCX_CONCAT_HELPER(x, y) x ## y
78751-#define JEMALLOC_SMALLOCX_CONCAT_HELPER2(x, y)  \
78752-  JEMALLOC_SMALLOCX_CONCAT_HELPER(x, y)
78753-
78754-typedef struct {
78755-	void *ptr;
78756-	size_t size;
78757-} smallocx_return_t;
78758-
78759-JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
78760-smallocx_return_t JEMALLOC_NOTHROW
78761-/*
78762- * The attribute JEMALLOC_ATTR(malloc) cannot be used due to:
78763- *  - https://gcc.gnu.org/bugzilla/show_bug.cgi?id=86488
78764- */
78765-JEMALLOC_SMALLOCX_CONCAT_HELPER2(je_smallocx_, JEMALLOC_VERSION_GID_IDENT)
78766-  (size_t size, int flags) {
78767-	/*
78768-	 * Note: the attribute JEMALLOC_ALLOC_SIZE(1) cannot be
78769-	 * used here because it makes writing beyond the `size`
78770-	 * of the `ptr` undefined behavior, but the objective
78771-	 * of this function is to allow writing beyond `size`
78772-	 * up to `smallocx_return_t::size`.
78773-	 */
78774-	smallocx_return_t ret;
78775-	static_opts_t sopts;
78776-	dynamic_opts_t dopts;
78777-
78778-	LOG("core.smallocx.entry", "size: %zu, flags: %d", size, flags);
78779-
78780-	static_opts_init(&sopts);
78781-	dynamic_opts_init(&dopts);
78782-
78783-	sopts.assert_nonempty_alloc = true;
78784-	sopts.null_out_result_on_error = true;
78785-	sopts.oom_string = "<jemalloc>: Error in mallocx(): out of memory\n";
78786-	sopts.usize = true;
78787-
78788-	dopts.result = &ret.ptr;
78789-	dopts.num_items = 1;
78790-	dopts.item_size = size;
78791-	if (unlikely(flags != 0)) {
78792-		dopts.alignment = MALLOCX_ALIGN_GET(flags);
78793-		dopts.zero = MALLOCX_ZERO_GET(flags);
78794-		dopts.tcache_ind = mallocx_tcache_get(flags);
78795-		dopts.arena_ind = mallocx_arena_get(flags);
78796-	}
78797-
78798-	imalloc(&sopts, &dopts);
78799-	assert(dopts.usize == je_nallocx(size, flags));
78800-	ret.size = dopts.usize;
78801-
78802-	LOG("core.smallocx.exit", "result: %p, size: %zu", ret.ptr, ret.size);
78803-	return ret;
78804-}
78805-#undef JEMALLOC_SMALLOCX_CONCAT_HELPER
78806-#undef JEMALLOC_SMALLOCX_CONCAT_HELPER2
78807-#endif
78808-
78809-JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
78810-void JEMALLOC_NOTHROW *
78811-JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1)
78812-je_mallocx(size_t size, int flags) {
78813-	void *ret;
78814-	static_opts_t sopts;
78815-	dynamic_opts_t dopts;
78816-
78817-	LOG("core.mallocx.entry", "size: %zu, flags: %d", size, flags);
78818-
78819-	static_opts_init(&sopts);
78820-	dynamic_opts_init(&dopts);
78821-
78822-	sopts.assert_nonempty_alloc = true;
78823-	sopts.null_out_result_on_error = true;
78824-	sopts.oom_string = "<jemalloc>: Error in mallocx(): out of memory\n";
78825-
78826-	dopts.result = &ret;
78827-	dopts.num_items = 1;
78828-	dopts.item_size = size;
78829-	if (unlikely(flags != 0)) {
78830-		dopts.alignment = MALLOCX_ALIGN_GET(flags);
78831-		dopts.zero = MALLOCX_ZERO_GET(flags);
78832-		dopts.tcache_ind = mallocx_tcache_get(flags);
78833-		dopts.arena_ind = mallocx_arena_get(flags);
78834-	}
78835-
78836-	imalloc(&sopts, &dopts);
78837-	if (sopts.slow) {
78838-		uintptr_t args[3] = {size, flags};
78839-		hook_invoke_alloc(hook_alloc_mallocx, ret, (uintptr_t)ret,
78840-		    args);
78841-	}
78842-
78843-	LOG("core.mallocx.exit", "result: %p", ret);
78844-	return ret;
78845-}
78846-
78847-static void *
78848-irallocx_prof_sample(tsdn_t *tsdn, void *old_ptr, size_t old_usize,
78849-    size_t usize, size_t alignment, bool zero, tcache_t *tcache, arena_t *arena,
78850-    prof_tctx_t *tctx, hook_ralloc_args_t *hook_args) {
78851-	void *p;
78852-
78853-	if (tctx == NULL) {
78854-		return NULL;
78855-	}
78856-
78857-	alignment = prof_sample_align(alignment);
78858-	if (usize <= SC_SMALL_MAXCLASS) {
78859-		p = iralloct(tsdn, old_ptr, old_usize,
78860-		    SC_LARGE_MINCLASS, alignment, zero, tcache,
78861-		    arena, hook_args);
78862-		if (p == NULL) {
78863-			return NULL;
78864-		}
78865-		arena_prof_promote(tsdn, p, usize);
78866-	} else {
78867-		p = iralloct(tsdn, old_ptr, old_usize, usize, alignment, zero,
78868-		    tcache, arena, hook_args);
78869-	}
78870-	assert(prof_sample_aligned(p));
78871-
78872-	return p;
78873-}
78874-
78875-JEMALLOC_ALWAYS_INLINE void *
78876-irallocx_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t size,
78877-    size_t alignment, size_t usize, bool zero, tcache_t *tcache,
78878-    arena_t *arena, emap_alloc_ctx_t *alloc_ctx,
78879-    hook_ralloc_args_t *hook_args) {
78880-	prof_info_t old_prof_info;
78881-	prof_info_get_and_reset_recent(tsd, old_ptr, alloc_ctx, &old_prof_info);
78882-	bool prof_active = prof_active_get_unlocked();
78883-	bool sample_event = te_prof_sample_event_lookahead(tsd, usize);
78884-	prof_tctx_t *tctx = prof_alloc_prep(tsd, prof_active, sample_event);
78885-	void *p;
78886-	if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) {
78887-		p = irallocx_prof_sample(tsd_tsdn(tsd), old_ptr, old_usize,
78888-		    usize, alignment, zero, tcache, arena, tctx, hook_args);
78889-	} else {
78890-		p = iralloct(tsd_tsdn(tsd), old_ptr, old_usize, size, alignment,
78891-		    zero, tcache, arena, hook_args);
78892-	}
78893-	if (unlikely(p == NULL)) {
78894-		prof_alloc_rollback(tsd, tctx);
78895-		return NULL;
78896-	}
78897-	assert(usize == isalloc(tsd_tsdn(tsd), p));
78898-	prof_realloc(tsd, p, size, usize, tctx, prof_active, old_ptr,
78899-	    old_usize, &old_prof_info, sample_event);
78900-
78901-	return p;
78902-}
78903-
78904-static void *
78905-do_rallocx(void *ptr, size_t size, int flags, bool is_realloc) {
78906-	void *p;
78907-	tsd_t *tsd;
78908-	size_t usize;
78909-	size_t old_usize;
78910-	size_t alignment = MALLOCX_ALIGN_GET(flags);
78911-	arena_t *arena;
78912-
78913-	assert(ptr != NULL);
78914-	assert(size != 0);
78915-	assert(malloc_initialized() || IS_INITIALIZER);
78916-	tsd = tsd_fetch();
78917-	check_entry_exit_locking(tsd_tsdn(tsd));
78918-
78919-	bool zero = zero_get(MALLOCX_ZERO_GET(flags), /* slow */ true);
78920-
78921-	unsigned arena_ind = mallocx_arena_get(flags);
78922-	if (arena_get_from_ind(tsd, arena_ind, &arena)) {
78923-		goto label_oom;
78924-	}
78925-
78926-	unsigned tcache_ind = mallocx_tcache_get(flags);
78927-	tcache_t *tcache = tcache_get_from_ind(tsd, tcache_ind,
78928-	    /* slow */ true, /* is_alloc */ true);
78929-
78930-	emap_alloc_ctx_t alloc_ctx;
78931-	emap_alloc_ctx_lookup(tsd_tsdn(tsd), &arena_emap_global, ptr,
78932-	    &alloc_ctx);
78933-	assert(alloc_ctx.szind != SC_NSIZES);
78934-	old_usize = sz_index2size(alloc_ctx.szind);
78935-	assert(old_usize == isalloc(tsd_tsdn(tsd), ptr));
78936-	if (aligned_usize_get(size, alignment, &usize, NULL, false)) {
78937-		goto label_oom;
78938-	}
78939-
78940-	hook_ralloc_args_t hook_args = {is_realloc, {(uintptr_t)ptr, size,
78941-		flags, 0}};
78942-	if (config_prof && opt_prof) {
78943-		p = irallocx_prof(tsd, ptr, old_usize, size, alignment, usize,
78944-		    zero, tcache, arena, &alloc_ctx, &hook_args);
78945-		if (unlikely(p == NULL)) {
78946-			goto label_oom;
78947-		}
78948-	} else {
78949-		p = iralloct(tsd_tsdn(tsd), ptr, old_usize, size, alignment,
78950-		    zero, tcache, arena, &hook_args);
78951-		if (unlikely(p == NULL)) {
78952-			goto label_oom;
78953-		}
78954-		assert(usize == isalloc(tsd_tsdn(tsd), p));
78955-	}
78956-	assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0));
78957-	thread_alloc_event(tsd, usize);
78958-	thread_dalloc_event(tsd, old_usize);
78959-
78960-	UTRACE(ptr, size, p);
78961-	check_entry_exit_locking(tsd_tsdn(tsd));
78962-
78963-	if (config_fill && unlikely(opt_junk_alloc) && usize > old_usize
78964-	    && !zero) {
78965-		size_t excess_len = usize - old_usize;
78966-		void *excess_start = (void *)((uintptr_t)p + old_usize);
78967-		junk_alloc_callback(excess_start, excess_len);
78968-	}
78969-
78970-	return p;
78971-label_oom:
78972-	if (config_xmalloc && unlikely(opt_xmalloc)) {
78973-		malloc_write("<jemalloc>: Error in rallocx(): out of memory\n");
78974-		abort();
78975-	}
78976-	UTRACE(ptr, size, 0);
78977-	check_entry_exit_locking(tsd_tsdn(tsd));
78978-
78979-	return NULL;
78980-}
78981-
78982-JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
78983-void JEMALLOC_NOTHROW *
78984-JEMALLOC_ALLOC_SIZE(2)
78985-je_rallocx(void *ptr, size_t size, int flags) {
78986-	LOG("core.rallocx.entry", "ptr: %p, size: %zu, flags: %d", ptr,
78987-	    size, flags);
78988-	void *ret = do_rallocx(ptr, size, flags, false);
78989-	LOG("core.rallocx.exit", "result: %p", ret);
78990-	return ret;
78991-}
78992-
78993-static void *
78994-do_realloc_nonnull_zero(void *ptr) {
78995-	if (config_stats) {
78996-		atomic_fetch_add_zu(&zero_realloc_count, 1, ATOMIC_RELAXED);
78997-	}
78998-	if (opt_zero_realloc_action == zero_realloc_action_alloc) {
78999-		/*
79000-		 * The user might have gotten an alloc setting while expecting a
79001-		 * free setting.  If that's the case, we at least try to
79002-		 * reduce the harm, and turn off the tcache while allocating, so
79003-		 * that we'll get a true first fit.
79004-		 */
79005-		return do_rallocx(ptr, 1, MALLOCX_TCACHE_NONE, true);
79006-	} else if (opt_zero_realloc_action == zero_realloc_action_free) {
79007-		UTRACE(ptr, 0, 0);
79008-		tsd_t *tsd = tsd_fetch();
79009-		check_entry_exit_locking(tsd_tsdn(tsd));
79010-
79011-		tcache_t *tcache = tcache_get_from_ind(tsd,
79012-		    TCACHE_IND_AUTOMATIC, /* slow */ true,
79013-		    /* is_alloc */ false);
79014-		uintptr_t args[3] = {(uintptr_t)ptr, 0};
79015-		hook_invoke_dalloc(hook_dalloc_realloc, ptr, args);
79016-		ifree(tsd, ptr, tcache, true);
79017-
79018-		check_entry_exit_locking(tsd_tsdn(tsd));
79019-		return NULL;
79020-	} else {
79021-		safety_check_fail("Called realloc(non-null-ptr, 0) with "
79022-		    "zero_realloc:abort set\n");
79023-		/* In real code, this will never run; the safety check failure
79024-		 * will call abort.  In the unit test, we just want to bail out
79025-		 * without corrupting internal state that the test needs to
79026-		 * finish.
79027-		 */
79028-		return NULL;
79029-	}
79030-}
79031-
79032-JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
79033-void JEMALLOC_NOTHROW *
79034-JEMALLOC_ALLOC_SIZE(2)
79035-je_realloc(void *ptr, size_t size) {
79036-	LOG("core.realloc.entry", "ptr: %p, size: %zu\n", ptr, size);
79037-
79038-	if (likely(ptr != NULL && size != 0)) {
79039-		void *ret = do_rallocx(ptr, size, 0, true);
79040-		LOG("core.realloc.exit", "result: %p", ret);
79041-		return ret;
79042-	} else if (ptr != NULL && size == 0) {
79043-		void *ret = do_realloc_nonnull_zero(ptr);
79044-		LOG("core.realloc.exit", "result: %p", ret);
79045-		return ret;
79046-	} else {
79047-		/* realloc(NULL, size) is equivalent to malloc(size). */
79048-		void *ret;
79049-
79050-		static_opts_t sopts;
79051-		dynamic_opts_t dopts;
79052-
79053-		static_opts_init(&sopts);
79054-		dynamic_opts_init(&dopts);
79055-
79056-		sopts.null_out_result_on_error = true;
79057-		sopts.set_errno_on_error = true;
79058-		sopts.oom_string =
79059-		    "<jemalloc>: Error in realloc(): out of memory\n";
79060-
79061-		dopts.result = &ret;
79062-		dopts.num_items = 1;
79063-		dopts.item_size = size;
79064-
79065-		imalloc(&sopts, &dopts);
79066-		if (sopts.slow) {
79067-			uintptr_t args[3] = {(uintptr_t)ptr, size};
79068-			hook_invoke_alloc(hook_alloc_realloc, ret,
79069-			    (uintptr_t)ret, args);
79070-		}
79071-		LOG("core.realloc.exit", "result: %p", ret);
79072-		return ret;
79073-	}
79074-}
79075-
79076-JEMALLOC_ALWAYS_INLINE size_t
79077-ixallocx_helper(tsdn_t *tsdn, void *ptr, size_t old_usize, size_t size,
79078-    size_t extra, size_t alignment, bool zero) {
79079-	size_t newsize;
79080-
79081-	if (ixalloc(tsdn, ptr, old_usize, size, extra, alignment, zero,
79082-	    &newsize)) {
79083-		return old_usize;
79084-	}
79085-
79086-	return newsize;
79087-}
79088-
79089-static size_t
79090-ixallocx_prof_sample(tsdn_t *tsdn, void *ptr, size_t old_usize, size_t size,
79091-    size_t extra, size_t alignment, bool zero, prof_tctx_t *tctx) {
79092-	/* Sampled allocation needs to be page aligned. */
79093-	if (tctx == NULL || !prof_sample_aligned(ptr)) {
79094-		return old_usize;
79095-	}
79096-
79097-	return ixallocx_helper(tsdn, ptr, old_usize, size, extra, alignment,
79098-	    zero);
79099-}
79100-
79101-JEMALLOC_ALWAYS_INLINE size_t
79102-ixallocx_prof(tsd_t *tsd, void *ptr, size_t old_usize, size_t size,
79103-    size_t extra, size_t alignment, bool zero, emap_alloc_ctx_t *alloc_ctx) {
79104-	/*
79105-	 * old_prof_info is only used for asserting that the profiling info
79106-	 * isn't changed by the ixalloc() call.
79107-	 */
79108-	prof_info_t old_prof_info;
79109-	prof_info_get(tsd, ptr, alloc_ctx, &old_prof_info);
79110-
79111-	/*
79112-	 * usize isn't knowable before ixalloc() returns when extra is non-zero.
79113-	 * Therefore, compute its maximum possible value and use that in
79114-	 * prof_alloc_prep() to decide whether to capture a backtrace.
79115-	 * prof_realloc() will use the actual usize to decide whether to sample.
79116-	 */
79117-	size_t usize_max;
79118-	if (aligned_usize_get(size + extra, alignment, &usize_max, NULL,
79119-	    false)) {
79120-		/*
79121-		 * usize_max is out of range, and chances are that allocation
79122-		 * will fail, but use the maximum possible value and carry on
79123-		 * with prof_alloc_prep(), just in case allocation succeeds.
79124-		 */
79125-		usize_max = SC_LARGE_MAXCLASS;
79126-	}
79127-	bool prof_active = prof_active_get_unlocked();
79128-	bool sample_event = te_prof_sample_event_lookahead(tsd, usize_max);
79129-	prof_tctx_t *tctx = prof_alloc_prep(tsd, prof_active, sample_event);
79130-
79131-	size_t usize;
79132-	if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) {
79133-		usize = ixallocx_prof_sample(tsd_tsdn(tsd), ptr, old_usize,
79134-		    size, extra, alignment, zero, tctx);
79135-	} else {
79136-		usize = ixallocx_helper(tsd_tsdn(tsd), ptr, old_usize, size,
79137-		    extra, alignment, zero);
79138-	}
79139-
79140-	/*
79141-	 * At this point we can still safely get the original profiling
79142-	 * information associated with the ptr, because (a) the edata_t object
79143-	 * associated with the ptr still lives and (b) the profiling info
79144-	 * fields are not touched.  "(a)" is asserted in the outer je_xallocx()
79145-	 * function, and "(b)" is indirectly verified below by checking that
79146-	 * the alloc_tctx field is unchanged.
79147-	 */
79148-	prof_info_t prof_info;
79149-	if (usize == old_usize) {
79150-		prof_info_get(tsd, ptr, alloc_ctx, &prof_info);
79151-		prof_alloc_rollback(tsd, tctx);
79152-	} else {
79153-		prof_info_get_and_reset_recent(tsd, ptr, alloc_ctx, &prof_info);
79154-		assert(usize <= usize_max);
79155-		sample_event = te_prof_sample_event_lookahead(tsd, usize);
79156-		prof_realloc(tsd, ptr, size, usize, tctx, prof_active, ptr,
79157-		    old_usize, &prof_info, sample_event);
79158-	}
79159-
79160-	assert(old_prof_info.alloc_tctx == prof_info.alloc_tctx);
79161-	return usize;
79162-}
79163-
79164-JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
79165-je_xallocx(void *ptr, size_t size, size_t extra, int flags) {
79166-	tsd_t *tsd;
79167-	size_t usize, old_usize;
79168-	size_t alignment = MALLOCX_ALIGN_GET(flags);
79169-	bool zero = zero_get(MALLOCX_ZERO_GET(flags), /* slow */ true);
79170-
79171-	LOG("core.xallocx.entry", "ptr: %p, size: %zu, extra: %zu, "
79172-	    "flags: %d", ptr, size, extra, flags);
79173-
79174-	assert(ptr != NULL);
79175-	assert(size != 0);
79176-	assert(SIZE_T_MAX - size >= extra);
79177-	assert(malloc_initialized() || IS_INITIALIZER);
79178-	tsd = tsd_fetch();
79179-	check_entry_exit_locking(tsd_tsdn(tsd));
79180-
79181-	/*
79182-	 * old_edata is only for verifying that xallocx() keeps the edata_t
79183-	 * object associated with the ptr (though the content of the edata_t
79184-	 * object can be changed).
79185-	 */
79186-	edata_t *old_edata = emap_edata_lookup(tsd_tsdn(tsd),
79187-	    &arena_emap_global, ptr);
79188-
79189-	emap_alloc_ctx_t alloc_ctx;
79190-	emap_alloc_ctx_lookup(tsd_tsdn(tsd), &arena_emap_global, ptr,
79191-	    &alloc_ctx);
79192-	assert(alloc_ctx.szind != SC_NSIZES);
79193-	old_usize = sz_index2size(alloc_ctx.szind);
79194-	assert(old_usize == isalloc(tsd_tsdn(tsd), ptr));
79195-	/*
79196-	 * The API explicitly absolves itself of protecting against (size +
79197-	 * extra) numerical overflow, but we may need to clamp extra to avoid
79198-	 * exceeding SC_LARGE_MAXCLASS.
79199-	 *
79200-	 * Ordinarily, size limit checking is handled deeper down, but here we
79201-	 * have to check as part of (size + extra) clamping, since we need the
79202-	 * clamped value in the above helper functions.
79203-	 */
79204-	if (unlikely(size > SC_LARGE_MAXCLASS)) {
79205-		usize = old_usize;
79206-		goto label_not_resized;
79207-	}
79208-	if (unlikely(SC_LARGE_MAXCLASS - size < extra)) {
79209-		extra = SC_LARGE_MAXCLASS - size;
79210-	}
79211-
79212-	if (config_prof && opt_prof) {
79213-		usize = ixallocx_prof(tsd, ptr, old_usize, size, extra,
79214-		    alignment, zero, &alloc_ctx);
79215-	} else {
79216-		usize = ixallocx_helper(tsd_tsdn(tsd), ptr, old_usize, size,
79217-		    extra, alignment, zero);
79218-	}
79219-
79220-	/*
79221-	 * xallocx() should keep using the same edata_t object (though its
79222-	 * content can be changed).
79223-	 */
79224-	assert(emap_edata_lookup(tsd_tsdn(tsd), &arena_emap_global, ptr)
79225-	    == old_edata);
79226-
79227-	if (unlikely(usize == old_usize)) {
79228-		goto label_not_resized;
79229-	}
79230-	thread_alloc_event(tsd, usize);
79231-	thread_dalloc_event(tsd, old_usize);
79232-
79233-	if (config_fill && unlikely(opt_junk_alloc) && usize > old_usize &&
79234-	    !zero) {
79235-		size_t excess_len = usize - old_usize;
79236-		void *excess_start = (void *)((uintptr_t)ptr + old_usize);
79237-		junk_alloc_callback(excess_start, excess_len);
79238-	}
79239-label_not_resized:
79240-	if (unlikely(!tsd_fast(tsd))) {
79241-		uintptr_t args[4] = {(uintptr_t)ptr, size, extra, flags};
79242-		hook_invoke_expand(hook_expand_xallocx, ptr, old_usize,
79243-		    usize, (uintptr_t)usize, args);
79244-	}
79245-
79246-	UTRACE(ptr, size, ptr);
79247-	check_entry_exit_locking(tsd_tsdn(tsd));
79248-
79249-	LOG("core.xallocx.exit", "result: %zu", usize);
79250-	return usize;
79251-}
79252-
79253-JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
79254-JEMALLOC_ATTR(pure)
79255-je_sallocx(const void *ptr, int flags) {
79256-	size_t usize;
79257-	tsdn_t *tsdn;
79258-
79259-	LOG("core.sallocx.entry", "ptr: %p, flags: %d", ptr, flags);
79260-
79261-	assert(malloc_initialized() || IS_INITIALIZER);
79262-	assert(ptr != NULL);
79263-
79264-	tsdn = tsdn_fetch();
79265-	check_entry_exit_locking(tsdn);
79266-
79267-	if (config_debug || force_ivsalloc) {
79268-		usize = ivsalloc(tsdn, ptr);
79269-		assert(force_ivsalloc || usize != 0);
79270-	} else {
79271-		usize = isalloc(tsdn, ptr);
79272-	}
79273-
79274-	check_entry_exit_locking(tsdn);
79275-
79276-	LOG("core.sallocx.exit", "result: %zu", usize);
79277-	return usize;
79278-}
79279-
79280-JEMALLOC_EXPORT void JEMALLOC_NOTHROW
79281-je_dallocx(void *ptr, int flags) {
79282-	LOG("core.dallocx.entry", "ptr: %p, flags: %d", ptr, flags);
79283-
79284-	assert(ptr != NULL);
79285-	assert(malloc_initialized() || IS_INITIALIZER);
79286-
79287-	tsd_t *tsd = tsd_fetch_min();
79288-	bool fast = tsd_fast(tsd);
79289-	check_entry_exit_locking(tsd_tsdn(tsd));
79290-
79291-	unsigned tcache_ind = mallocx_tcache_get(flags);
79292-	tcache_t *tcache = tcache_get_from_ind(tsd, tcache_ind, !fast,
79293-	    /* is_alloc */ false);
79294-
79295-	UTRACE(ptr, 0, 0);
79296-	if (likely(fast)) {
79297-		tsd_assert_fast(tsd);
79298-		ifree(tsd, ptr, tcache, false);
79299-	} else {
79300-		uintptr_t args_raw[3] = {(uintptr_t)ptr, flags};
79301-		hook_invoke_dalloc(hook_dalloc_dallocx, ptr, args_raw);
79302-		ifree(tsd, ptr, tcache, true);
79303-	}
79304-	check_entry_exit_locking(tsd_tsdn(tsd));
79305-
79306-	LOG("core.dallocx.exit", "");
79307-}
79308-
79309-JEMALLOC_ALWAYS_INLINE size_t
79310-inallocx(tsdn_t *tsdn, size_t size, int flags) {
79311-	check_entry_exit_locking(tsdn);
79312-	size_t usize;
79313-	/* In case of out of range, let the user see it rather than fail. */
79314-	aligned_usize_get(size, MALLOCX_ALIGN_GET(flags), &usize, NULL, false);
79315-	check_entry_exit_locking(tsdn);
79316-	return usize;
79317-}
79318-
79319-JEMALLOC_NOINLINE void
79320-sdallocx_default(void *ptr, size_t size, int flags) {
79321-	assert(ptr != NULL);
79322-	assert(malloc_initialized() || IS_INITIALIZER);
79323-
79324-	tsd_t *tsd = tsd_fetch_min();
79325-	bool fast = tsd_fast(tsd);
79326-	size_t usize = inallocx(tsd_tsdn(tsd), size, flags);
79327-	check_entry_exit_locking(tsd_tsdn(tsd));
79328-
79329-	unsigned tcache_ind = mallocx_tcache_get(flags);
79330-	tcache_t *tcache = tcache_get_from_ind(tsd, tcache_ind, !fast,
79331-	    /* is_alloc */ false);
79332-
79333-	UTRACE(ptr, 0, 0);
79334-	if (likely(fast)) {
79335-		tsd_assert_fast(tsd);
79336-		isfree(tsd, ptr, usize, tcache, false);
79337-	} else {
79338-		uintptr_t args_raw[3] = {(uintptr_t)ptr, size, flags};
79339-		hook_invoke_dalloc(hook_dalloc_sdallocx, ptr, args_raw);
79340-		isfree(tsd, ptr, usize, tcache, true);
79341-	}
79342-	check_entry_exit_locking(tsd_tsdn(tsd));
79343-}
79344-
79345-JEMALLOC_EXPORT void JEMALLOC_NOTHROW
79346-je_sdallocx(void *ptr, size_t size, int flags) {
79347-	LOG("core.sdallocx.entry", "ptr: %p, size: %zu, flags: %d", ptr,
79348-		size, flags);
79349-
79350-	if (flags != 0 || !free_fastpath(ptr, size, true)) {
79351-		sdallocx_default(ptr, size, flags);
79352-	}
79353-
79354-	LOG("core.sdallocx.exit", "");
79355-}
79356-
79357-void JEMALLOC_NOTHROW
79358-je_sdallocx_noflags(void *ptr, size_t size) {
79359-	LOG("core.sdallocx.entry", "ptr: %p, size: %zu, flags: 0", ptr,
79360-		size);
79361-
79362-	if (!free_fastpath(ptr, size, true)) {
79363-		sdallocx_default(ptr, size, 0);
79364-	}
79365-
79366-	LOG("core.sdallocx.exit", "");
79367-}
79368-
79369-JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
79370-JEMALLOC_ATTR(pure)
79371-je_nallocx(size_t size, int flags) {
79372-	size_t usize;
79373-	tsdn_t *tsdn;
79374-
79375-	assert(size != 0);
79376-
79377-	if (unlikely(malloc_init())) {
79378-		LOG("core.nallocx.exit", "result: %zu", ZU(0));
79379-		return 0;
79380-	}
79381-
79382-	tsdn = tsdn_fetch();
79383-	check_entry_exit_locking(tsdn);
79384-
79385-	usize = inallocx(tsdn, size, flags);
79386-	if (unlikely(usize > SC_LARGE_MAXCLASS)) {
79387-		LOG("core.nallocx.exit", "result: %zu", ZU(0));
79388-		return 0;
79389-	}
79390-
79391-	check_entry_exit_locking(tsdn);
79392-	LOG("core.nallocx.exit", "result: %zu", usize);
79393-	return usize;
79394-}
79395-
79396-JEMALLOC_EXPORT int JEMALLOC_NOTHROW
79397-je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp,
79398-    size_t newlen) {
79399-	int ret;
79400-	tsd_t *tsd;
79401-
79402-	LOG("core.mallctl.entry", "name: %s", name);
79403-
79404-	if (unlikely(malloc_init())) {
79405-		LOG("core.mallctl.exit", "result: %d", EAGAIN);
79406-		return EAGAIN;
79407-	}
79408-
79409-	tsd = tsd_fetch();
79410-	check_entry_exit_locking(tsd_tsdn(tsd));
79411-	ret = ctl_byname(tsd, name, oldp, oldlenp, newp, newlen);
79412-	check_entry_exit_locking(tsd_tsdn(tsd));
79413-
79414-	LOG("core.mallctl.exit", "result: %d", ret);
79415-	return ret;
79416-}
79417-
79418-JEMALLOC_EXPORT int JEMALLOC_NOTHROW
79419-je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp) {
79420-	int ret;
79421-
79422-	LOG("core.mallctlnametomib.entry", "name: %s", name);
79423-
79424-	if (unlikely(malloc_init())) {
79425-		LOG("core.mallctlnametomib.exit", "result: %d", EAGAIN);
79426-		return EAGAIN;
79427-	}
79428-
79429-	tsd_t *tsd = tsd_fetch();
79430-	check_entry_exit_locking(tsd_tsdn(tsd));
79431-	ret = ctl_nametomib(tsd, name, mibp, miblenp);
79432-	check_entry_exit_locking(tsd_tsdn(tsd));
79433-
79434-	LOG("core.mallctlnametomib.exit", "result: %d", ret);
79435-	return ret;
79436-}
79437-
79438-JEMALLOC_EXPORT int JEMALLOC_NOTHROW
79439-je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
79440-  void *newp, size_t newlen) {
79441-	int ret;
79442-	tsd_t *tsd;
79443-
79444-	LOG("core.mallctlbymib.entry", "");
79445-
79446-	if (unlikely(malloc_init())) {
79447-		LOG("core.mallctlbymib.exit", "result: %d", EAGAIN);
79448-		return EAGAIN;
79449-	}
79450-
79451-	tsd = tsd_fetch();
79452-	check_entry_exit_locking(tsd_tsdn(tsd));
79453-	ret = ctl_bymib(tsd, mib, miblen, oldp, oldlenp, newp, newlen);
79454-	check_entry_exit_locking(tsd_tsdn(tsd));
79455-	LOG("core.mallctlbymib.exit", "result: %d", ret);
79456-	return ret;
79457-}
79458-
79459-#define STATS_PRINT_BUFSIZE 65536
79460-JEMALLOC_EXPORT void JEMALLOC_NOTHROW
79461-je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
79462-    const char *opts) {
79463-	tsdn_t *tsdn;
79464-
79465-	LOG("core.malloc_stats_print.entry", "");
79466-
79467-	tsdn = tsdn_fetch();
79468-	check_entry_exit_locking(tsdn);
79469-
79470-	if (config_debug) {
79471-		stats_print(write_cb, cbopaque, opts);
79472-	} else {
79473-		buf_writer_t buf_writer;
79474-		buf_writer_init(tsdn, &buf_writer, write_cb, cbopaque, NULL,
79475-		    STATS_PRINT_BUFSIZE);
79476-		stats_print(buf_writer_cb, &buf_writer, opts);
79477-		buf_writer_terminate(tsdn, &buf_writer);
79478-	}
79479-
79480-	check_entry_exit_locking(tsdn);
79481-	LOG("core.malloc_stats_print.exit", "");
79482-}
79483-#undef STATS_PRINT_BUFSIZE
79484-
79485-JEMALLOC_ALWAYS_INLINE size_t
79486-je_malloc_usable_size_impl(JEMALLOC_USABLE_SIZE_CONST void *ptr) {
79487-	assert(malloc_initialized() || IS_INITIALIZER);
79488-
79489-	tsdn_t *tsdn = tsdn_fetch();
79490-	check_entry_exit_locking(tsdn);
79491-
79492-	size_t ret;
79493-	if (unlikely(ptr == NULL)) {
79494-		ret = 0;
79495-	} else {
79496-		if (config_debug || force_ivsalloc) {
79497-			ret = ivsalloc(tsdn, ptr);
79498-			assert(force_ivsalloc || ret != 0);
79499-		} else {
79500-			ret = isalloc(tsdn, ptr);
79501-		}
79502-	}
79503-	check_entry_exit_locking(tsdn);
79504-
79505-	return ret;
79506-}
79507-
79508-JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
79509-je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr) {
79510-	LOG("core.malloc_usable_size.entry", "ptr: %p", ptr);
79511-
79512-	size_t ret = je_malloc_usable_size_impl(ptr);
79513-
79514-	LOG("core.malloc_usable_size.exit", "result: %zu", ret);
79515-	return ret;
79516-}
79517-
79518-#ifdef JEMALLOC_HAVE_MALLOC_SIZE
79519-JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
79520-je_malloc_size(const void *ptr) {
79521-	LOG("core.malloc_size.entry", "ptr: %p", ptr);
79522-
79523-	size_t ret = je_malloc_usable_size_impl(ptr);
79524-
79525-	LOG("core.malloc_size.exit", "result: %zu", ret);
79526-	return ret;
79527-}
79528-#endif
79529-
79530-static void
79531-batch_alloc_prof_sample_assert(tsd_t *tsd, size_t batch, size_t usize) {
79532-	assert(config_prof && opt_prof);
79533-	bool prof_sample_event = te_prof_sample_event_lookahead(tsd,
79534-	    batch * usize);
79535-	assert(!prof_sample_event);
79536-	size_t surplus;
79537-	prof_sample_event = te_prof_sample_event_lookahead_surplus(tsd,
79538-	    (batch + 1) * usize, &surplus);
79539-	assert(prof_sample_event);
79540-	assert(surplus < usize);
79541-}
79542-
79543-size_t
79544-batch_alloc(void **ptrs, size_t num, size_t size, int flags) {
79545-	LOG("core.batch_alloc.entry",
79546-	    "ptrs: %p, num: %zu, size: %zu, flags: %d", ptrs, num, size, flags);
79547-
79548-	tsd_t *tsd = tsd_fetch();
79549-	check_entry_exit_locking(tsd_tsdn(tsd));
79550-
79551-	size_t filled = 0;
79552-
79553-	if (unlikely(tsd == NULL || tsd_reentrancy_level_get(tsd) > 0)) {
79554-		goto label_done;
79555-	}
79556-
79557-	size_t alignment = MALLOCX_ALIGN_GET(flags);
79558-	size_t usize;
79559-	if (aligned_usize_get(size, alignment, &usize, NULL, false)) {
79560-		goto label_done;
79561-	}
79562-	szind_t ind = sz_size2index(usize);
79563-	bool zero = zero_get(MALLOCX_ZERO_GET(flags), /* slow */ true);
79564-
79565-	/*
79566-	 * The cache bin and arena will be lazily initialized; it's hard to
79567-	 * know in advance whether each of them needs to be initialized.
79568-	 */
79569-	cache_bin_t *bin = NULL;
79570-	arena_t *arena = NULL;
79571-
79572-	size_t nregs = 0;
79573-	if (likely(ind < SC_NBINS)) {
79574-		nregs = bin_infos[ind].nregs;
79575-		assert(nregs > 0);
79576-	}
79577-
79578-	while (filled < num) {
79579-		size_t batch = num - filled;
79580-		size_t surplus = SIZE_MAX; /* Dead store. */
79581-		bool prof_sample_event = config_prof && opt_prof
79582-		    && prof_active_get_unlocked()
79583-		    && te_prof_sample_event_lookahead_surplus(tsd,
79584-		    batch * usize, &surplus);
79585-
79586-		if (prof_sample_event) {
79587-			/*
79588-			 * Adjust so that the batch does not trigger prof
79589-			 * sampling.
79590-			 */
79591-			batch -= surplus / usize + 1;
79592-			batch_alloc_prof_sample_assert(tsd, batch, usize);
79593-		}
79594-
79595-		size_t progress = 0;
79596-
79597-		if (likely(ind < SC_NBINS) && batch >= nregs) {
79598-			if (arena == NULL) {
79599-				unsigned arena_ind = mallocx_arena_get(flags);
79600-				if (arena_get_from_ind(tsd, arena_ind,
79601-				    &arena)) {
79602-					goto label_done;
79603-				}
79604-				if (arena == NULL) {
79605-					arena = arena_choose(tsd, NULL);
79606-				}
79607-				if (unlikely(arena == NULL)) {
79608-					goto label_done;
79609-				}
79610-			}
79611-			size_t arena_batch = batch - batch % nregs;
79612-			size_t n = arena_fill_small_fresh(tsd_tsdn(tsd), arena,
79613-			    ind, ptrs + filled, arena_batch, zero);
79614-			progress += n;
79615-			filled += n;
79616-		}
79617-
79618-		if (likely(ind < nhbins) && progress < batch) {
79619-			if (bin == NULL) {
79620-				unsigned tcache_ind = mallocx_tcache_get(flags);
79621-				tcache_t *tcache = tcache_get_from_ind(tsd,
79622-				    tcache_ind, /* slow */ true,
79623-				    /* is_alloc */ true);
79624-				if (tcache != NULL) {
79625-					bin = &tcache->bins[ind];
79626-				}
79627-			}
79628-			/*
79629-			 * If we don't have a tcache bin, we don't want to
79630-			 * immediately give up, because there's the possibility
79631-			 * that the user explicitly requested to bypass the
79632-			 * tcache, or that the user explicitly turned off the
79633-			 * tcache; in such cases, we go through the slow path,
79634-			 * i.e. the mallocx() call at the end of the while loop.
79635-			 */
79636-			if (bin != NULL) {
79637-				size_t bin_batch = batch - progress;
79638-				/*
79639-				 * n can be less than bin_batch, meaning that
79640-				 * the cache bin does not have enough memory.
79641-				 * In such cases, we rely on the slow path,
79642-				 * i.e. the mallocx() call at the end of the
79643-				 * while loop, to fill in the cache, and in the
79644-				 * next iteration of the while loop, the tcache
79645-				 * will contain a lot of memory, and we can
79646-				 * harvest them here.  Compared to the
79647-				 * alternative approach where we directly go to
79648-				 * the arena bins here, the overhead of our
79649-				 * current approach should usually be minimal,
79650-				 * since we never try to fetch more memory than
79651-				 * what a slab contains via the tcache.  An
79652-				 * additional benefit is that the tcache will
79653-				 * not be empty for the next allocation request.
79654-				 */
79655-				size_t n = cache_bin_alloc_batch(bin, bin_batch,
79656-				    ptrs + filled);
79657-				if (config_stats) {
79658-					bin->tstats.nrequests += n;
79659-				}
79660-				if (zero) {
79661-					for (size_t i = 0; i < n; ++i) {
79662-						memset(ptrs[filled + i], 0,
79663-						    usize);
79664-					}
79665-				}
79666-				if (config_prof && opt_prof
79667-				    && unlikely(ind >= SC_NBINS)) {
79668-					for (size_t i = 0; i < n; ++i) {
79669-						prof_tctx_reset_sampled(tsd,
79670-						    ptrs[filled + i]);
79671-					}
79672-				}
79673-				progress += n;
79674-				filled += n;
79675-			}
79676-		}
79677-
79678-		/*
79679-		 * For thread events other than prof sampling, trigger them as
79680-		 * if there's a single allocation of size (n * usize).  This is
79681-		 * fine because:
79682-		 * (a) these events do not alter the allocation itself, and
79683-		 * (b) it's possible that some event would have been triggered
79684-		 *     multiple times, instead of only once, if the allocations
79685-		 *     were handled individually, but it would do no harm (or
79686-		 *     even be beneficial) to coalesce the triggerings.
79687-		 */
79688-		thread_alloc_event(tsd, progress * usize);
79689-
79690-		if (progress < batch || prof_sample_event) {
79691-			void *p = je_mallocx(size, flags);
79692-			if (p == NULL) { /* OOM */
79693-				break;
79694-			}
79695-			if (progress == batch) {
79696-				assert(prof_sampled(tsd, p));
79697-			}
79698-			ptrs[filled++] = p;
79699-		}
79700-	}
79701-
79702-label_done:
79703-	check_entry_exit_locking(tsd_tsdn(tsd));
79704-	LOG("core.batch_alloc.exit", "result: %zu", filled);
79705-	return filled;
79706-}
79707-
79708-/*
79709- * End non-standard functions.
79710- */
79711-/******************************************************************************/
79712-/*
79713- * The following functions are used by threading libraries for protection of
79714- * malloc during fork().
79715- */
79716-
79717-/*
79718- * If an application creates a thread before doing any allocation in the main
79719- * thread, then calls fork(2) in the main thread followed by memory allocation
79720- * in the child process, a race can occur that results in deadlock within the
79721- * child: the main thread may have forked while the created thread had
79722- * partially initialized the allocator.  Ordinarily jemalloc prevents
79723- * fork/malloc races via the following functions it registers during
79724- * initialization using pthread_atfork(), but of course that does no good if
79725- * the allocator isn't fully initialized at fork time.  The following library
79726- * constructor is a partial solution to this problem.  It may still be possible
79727- * to trigger the deadlock described above, but doing so would involve forking
79728- * via a library constructor that runs before jemalloc's runs.
79729- */
79730-#ifndef JEMALLOC_JET
79731-JEMALLOC_ATTR(constructor)
79732-static void
79733-jemalloc_constructor(void) {
79734-	malloc_init();
79735-}
79736-#endif
79737-
79738-#ifndef JEMALLOC_MUTEX_INIT_CB
79739-void
79740-jemalloc_prefork(void)
79741-#else
79742-JEMALLOC_EXPORT void
79743-_malloc_prefork(void)
79744-#endif
79745-{
79746-	tsd_t *tsd;
79747-	unsigned i, j, narenas;
79748-	arena_t *arena;
79749-
79750-#ifdef JEMALLOC_MUTEX_INIT_CB
79751-	if (!malloc_initialized()) {
79752-		return;
79753-	}
79754-#endif
79755-	assert(malloc_initialized());
79756-
79757-	tsd = tsd_fetch();
79758-
79759-	narenas = narenas_total_get();
79760-
79761-	witness_prefork(tsd_witness_tsdp_get(tsd));
79762-	/* Acquire all mutexes in a safe order. */
79763-	ctl_prefork(tsd_tsdn(tsd));
79764-	tcache_prefork(tsd_tsdn(tsd));
79765-	malloc_mutex_prefork(tsd_tsdn(tsd), &arenas_lock);
79766-	if (have_background_thread) {
79767-		background_thread_prefork0(tsd_tsdn(tsd));
79768-	}
79769-	prof_prefork0(tsd_tsdn(tsd));
79770-	if (have_background_thread) {
79771-		background_thread_prefork1(tsd_tsdn(tsd));
79772-	}
79773-	/* Break arena prefork into stages to preserve lock order. */
79774-	for (i = 0; i < 9; i++) {
79775-		for (j = 0; j < narenas; j++) {
79776-			if ((arena = arena_get(tsd_tsdn(tsd), j, false)) !=
79777-			    NULL) {
79778-				switch (i) {
79779-				case 0:
79780-					arena_prefork0(tsd_tsdn(tsd), arena);
79781-					break;
79782-				case 1:
79783-					arena_prefork1(tsd_tsdn(tsd), arena);
79784-					break;
79785-				case 2:
79786-					arena_prefork2(tsd_tsdn(tsd), arena);
79787-					break;
79788-				case 3:
79789-					arena_prefork3(tsd_tsdn(tsd), arena);
79790-					break;
79791-				case 4:
79792-					arena_prefork4(tsd_tsdn(tsd), arena);
79793-					break;
79794-				case 5:
79795-					arena_prefork5(tsd_tsdn(tsd), arena);
79796-					break;
79797-				case 6:
79798-					arena_prefork6(tsd_tsdn(tsd), arena);
79799-					break;
79800-				case 7:
79801-					arena_prefork7(tsd_tsdn(tsd), arena);
79802-					break;
79803-				case 8:
79804-					arena_prefork8(tsd_tsdn(tsd), arena);
79805-					break;
79806-				default: not_reached();
79807-				}
79808-			}
79809-		}
79810-
79811-	}
79812-	prof_prefork1(tsd_tsdn(tsd));
79813-	stats_prefork(tsd_tsdn(tsd));
79814-	tsd_prefork(tsd);
79815-}
79816-
79817-#ifndef JEMALLOC_MUTEX_INIT_CB
79818-void
79819-jemalloc_postfork_parent(void)
79820-#else
79821-JEMALLOC_EXPORT void
79822-_malloc_postfork(void)
79823-#endif
79824-{
79825-	tsd_t *tsd;
79826-	unsigned i, narenas;
79827-
79828-#ifdef JEMALLOC_MUTEX_INIT_CB
79829-	if (!malloc_initialized()) {
79830-		return;
79831-	}
79832-#endif
79833-	assert(malloc_initialized());
79834-
79835-	tsd = tsd_fetch();
79836-
79837-	tsd_postfork_parent(tsd);
79838-
79839-	witness_postfork_parent(tsd_witness_tsdp_get(tsd));
79840-	/* Release all mutexes, now that fork() has completed. */
79841-	stats_postfork_parent(tsd_tsdn(tsd));
79842-	for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
79843-		arena_t *arena;
79844-
79845-		if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL) {
79846-			arena_postfork_parent(tsd_tsdn(tsd), arena);
79847-		}
79848-	}
79849-	prof_postfork_parent(tsd_tsdn(tsd));
79850-	if (have_background_thread) {
79851-		background_thread_postfork_parent(tsd_tsdn(tsd));
79852-	}
79853-	malloc_mutex_postfork_parent(tsd_tsdn(tsd), &arenas_lock);
79854-	tcache_postfork_parent(tsd_tsdn(tsd));
79855-	ctl_postfork_parent(tsd_tsdn(tsd));
79856-}
79857-
79858-void
79859-jemalloc_postfork_child(void) {
79860-	tsd_t *tsd;
79861-	unsigned i, narenas;
79862-
79863-	assert(malloc_initialized());
79864-
79865-	tsd = tsd_fetch();
79866-
79867-	tsd_postfork_child(tsd);
79868-
79869-	witness_postfork_child(tsd_witness_tsdp_get(tsd));
79870-	/* Release all mutexes, now that fork() has completed. */
79871-	stats_postfork_child(tsd_tsdn(tsd));
79872-	for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
79873-		arena_t *arena;
79874-
79875-		if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL) {
79876-			arena_postfork_child(tsd_tsdn(tsd), arena);
79877-		}
79878-	}
79879-	prof_postfork_child(tsd_tsdn(tsd));
79880-	if (have_background_thread) {
79881-		background_thread_postfork_child(tsd_tsdn(tsd));
79882-	}
79883-	malloc_mutex_postfork_child(tsd_tsdn(tsd), &arenas_lock);
79884-	tcache_postfork_child(tsd_tsdn(tsd));
79885-	ctl_postfork_child(tsd_tsdn(tsd));
79886-}
79887-
79888-/******************************************************************************/
79889diff --git a/jemalloc/src/jemalloc_cpp.cpp b/jemalloc/src/jemalloc_cpp.cpp
79890deleted file mode 100644
79891index 451655f..0000000
79892--- a/jemalloc/src/jemalloc_cpp.cpp
79893+++ /dev/null
79894@@ -1,254 +0,0 @@
79895-#include <mutex>
79896-#include <new>
79897-
79898-#define JEMALLOC_CPP_CPP_
79899-#ifdef __cplusplus
79900-extern "C" {
79901-#endif
79902-
79903-#include "jemalloc/internal/jemalloc_preamble.h"
79904-#include "jemalloc/internal/jemalloc_internal_includes.h"
79905-
79906-#ifdef __cplusplus
79907-}
79908-#endif
79909-
79910-// All operators in this file are exported.
79911-
79912-// Possibly alias hidden versions of malloc and sdallocx to avoid an extra plt
79913-// thunk?
79914-//
79915-// extern __typeof (sdallocx) sdallocx_int
79916-//  __attribute ((alias ("sdallocx"),
79917-//		visibility ("hidden")));
79918-//
79919-// ... but it needs to work with jemalloc namespaces.
79920-
79921-void	*operator new(std::size_t size);
79922-void	*operator new[](std::size_t size);
79923-void	*operator new(std::size_t size, const std::nothrow_t &) noexcept;
79924-void	*operator new[](std::size_t size, const std::nothrow_t &) noexcept;
79925-void	operator delete(void *ptr) noexcept;
79926-void	operator delete[](void *ptr) noexcept;
79927-void	operator delete(void *ptr, const std::nothrow_t &) noexcept;
79928-void	operator delete[](void *ptr, const std::nothrow_t &) noexcept;
79929-
79930-#if __cpp_sized_deallocation >= 201309
79931-/* C++14's sized-delete operators. */
79932-void	operator delete(void *ptr, std::size_t size) noexcept;
79933-void	operator delete[](void *ptr, std::size_t size) noexcept;
79934-#endif
79935-
79936-#if __cpp_aligned_new >= 201606
79937-/* C++17's over-aligned operators. */
79938-void	*operator new(std::size_t size, std::align_val_t);
79939-void	*operator new(std::size_t size, std::align_val_t, const std::nothrow_t &) noexcept;
79940-void	*operator new[](std::size_t size, std::align_val_t);
79941-void	*operator new[](std::size_t size, std::align_val_t, const std::nothrow_t &) noexcept;
79942-void	operator delete(void* ptr, std::align_val_t) noexcept;
79943-void	operator delete(void* ptr, std::align_val_t, const std::nothrow_t &) noexcept;
79944-void	operator delete(void* ptr, std::size_t size, std::align_val_t al) noexcept;
79945-void	operator delete[](void* ptr, std::align_val_t) noexcept;
79946-void	operator delete[](void* ptr, std::align_val_t, const std::nothrow_t &) noexcept;
79947-void	operator delete[](void* ptr, std::size_t size, std::align_val_t al) noexcept;
79948-#endif
79949-
79950-JEMALLOC_NOINLINE
79951-static void *
79952-handleOOM(std::size_t size, bool nothrow) {
79953-	if (opt_experimental_infallible_new) {
79954-		safety_check_fail("<jemalloc>: Allocation failed and "
79955-		    "opt.experimental_infallible_new is true. Aborting.\n");
79956-		return nullptr;
79957-	}
79958-
79959-	void *ptr = nullptr;
79960-
79961-	while (ptr == nullptr) {
79962-		std::new_handler handler;
79963-		// GCC-4.8 and clang 4.0 do not have std::get_new_handler.
79964-		{
79965-			static std::mutex mtx;
79966-			std::lock_guard<std::mutex> lock(mtx);
79967-
79968-			handler = std::set_new_handler(nullptr);
79969-			std::set_new_handler(handler);
79970-		}
79971-		if (handler == nullptr)
79972-			break;
79973-
79974-		try {
79975-			handler();
79976-		} catch (const std::bad_alloc &) {
79977-			break;
79978-		}
79979-
79980-		ptr = je_malloc(size);
79981-	}
79982-
79983-	if (ptr == nullptr && !nothrow)
79984-		std::__throw_bad_alloc();
79985-	return ptr;
79986-}
79987-
79988-template <bool IsNoExcept>
79989-JEMALLOC_NOINLINE
79990-static void *
79991-fallback_impl(std::size_t size) noexcept(IsNoExcept) {
79992-	void *ptr = malloc_default(size);
79993-	if (likely(ptr != nullptr)) {
79994-		return ptr;
79995-	}
79996-	return handleOOM(size, IsNoExcept);
79997-}
79998-
79999-template <bool IsNoExcept>
80000-JEMALLOC_ALWAYS_INLINE
80001-void *
80002-newImpl(std::size_t size) noexcept(IsNoExcept) {
80003-	return imalloc_fastpath(size, &fallback_impl<IsNoExcept>);
80004-}
80005-
80006-void *
80007-operator new(std::size_t size) {
80008-	return newImpl<false>(size);
80009-}
80010-
80011-void *
80012-operator new[](std::size_t size) {
80013-	return newImpl<false>(size);
80014-}
80015-
80016-void *
80017-operator new(std::size_t size, const std::nothrow_t &) noexcept {
80018-	return newImpl<true>(size);
80019-}
80020-
80021-void *
80022-operator new[](std::size_t size, const std::nothrow_t &) noexcept {
80023-	return newImpl<true>(size);
80024-}
80025-
80026-#if __cpp_aligned_new >= 201606
80027-
80028-template <bool IsNoExcept>
80029-JEMALLOC_ALWAYS_INLINE
80030-void *
80031-alignedNewImpl(std::size_t size, std::align_val_t alignment) noexcept(IsNoExcept) {
80032-	void *ptr = je_aligned_alloc(static_cast<std::size_t>(alignment), size);
80033-	if (likely(ptr != nullptr)) {
80034-		return ptr;
80035-	}
80036-
80037-	return handleOOM(size, IsNoExcept);
80038-}
80039-
80040-void *
80041-operator new(std::size_t size, std::align_val_t alignment) {
80042-	return alignedNewImpl<false>(size, alignment);
80043-}
80044-
80045-void *
80046-operator new[](std::size_t size, std::align_val_t alignment) {
80047-	return alignedNewImpl<false>(size, alignment);
80048-}
80049-
80050-void *
80051-operator new(std::size_t size, std::align_val_t alignment, const std::nothrow_t &) noexcept {
80052-	return alignedNewImpl<true>(size, alignment);
80053-}
80054-
80055-void *
80056-operator new[](std::size_t size, std::align_val_t alignment, const std::nothrow_t &) noexcept {
80057-	return alignedNewImpl<true>(size, alignment);
80058-}
80059-
80060-#endif  // __cpp_aligned_new
80061-
80062-void
80063-operator delete(void *ptr) noexcept {
80064-	je_free(ptr);
80065-}
80066-
80067-void
80068-operator delete[](void *ptr) noexcept {
80069-	je_free(ptr);
80070-}
80071-
80072-void
80073-operator delete(void *ptr, const std::nothrow_t &) noexcept {
80074-	je_free(ptr);
80075-}
80076-
80077-void operator delete[](void *ptr, const std::nothrow_t &) noexcept {
80078-	je_free(ptr);
80079-}
80080-
80081-#if __cpp_sized_deallocation >= 201309
80082-
80083-JEMALLOC_ALWAYS_INLINE
80084-void
80085-sizedDeleteImpl(void* ptr, std::size_t size) noexcept {
80086-	if (unlikely(ptr == nullptr)) {
80087-		return;
80088-	}
80089-	je_sdallocx_noflags(ptr, size);
80090-}
80091-
80092-void
80093-operator delete(void *ptr, std::size_t size) noexcept {
80094-	sizedDeleteImpl(ptr, size);
80095-}
80096-
80097-void
80098-operator delete[](void *ptr, std::size_t size) noexcept {
80099-	sizedDeleteImpl(ptr, size);
80100-}
80101-
80102-#endif  // __cpp_sized_deallocation
80103-
80104-#if __cpp_aligned_new >= 201606
80105-
80106-JEMALLOC_ALWAYS_INLINE
80107-void
80108-alignedSizedDeleteImpl(void* ptr, std::size_t size, std::align_val_t alignment) noexcept {
80109-	if (config_debug) {
80110-		assert(((size_t)alignment & ((size_t)alignment - 1)) == 0);
80111-	}
80112-	if (unlikely(ptr == nullptr)) {
80113-		return;
80114-	}
80115-	je_sdallocx(ptr, size, MALLOCX_ALIGN(alignment));
80116-}
80117-
80118-void
80119-operator delete(void* ptr, std::align_val_t) noexcept {
80120-	je_free(ptr);
80121-}
80122-
80123-void
80124-operator delete[](void* ptr, std::align_val_t) noexcept {
80125-	je_free(ptr);
80126-}
80127-
80128-void
80129-operator delete(void* ptr, std::align_val_t, const std::nothrow_t&) noexcept {
80130-	je_free(ptr);
80131-}
80132-
80133-void
80134-operator delete[](void* ptr, std::align_val_t, const std::nothrow_t&) noexcept {
80135-	je_free(ptr);
80136-}
80137-
80138-void
80139-operator delete(void* ptr, std::size_t size, std::align_val_t alignment) noexcept {
80140-	alignedSizedDeleteImpl(ptr, size, alignment);
80141-}
80142-
80143-void
80144-operator delete[](void* ptr, std::size_t size, std::align_val_t alignment) noexcept {
80145-	alignedSizedDeleteImpl(ptr, size, alignment);
80146-}
80147-
80148-#endif  // __cpp_aligned_new
80149diff --git a/jemalloc/src/large.c b/jemalloc/src/large.c
80150deleted file mode 100644
80151index 5fc4bf5..0000000
80152--- a/jemalloc/src/large.c
80153+++ /dev/null
80154@@ -1,322 +0,0 @@
80155-#include "jemalloc/internal/jemalloc_preamble.h"
80156-#include "jemalloc/internal/jemalloc_internal_includes.h"
80157-
80158-#include "jemalloc/internal/assert.h"
80159-#include "jemalloc/internal/emap.h"
80160-#include "jemalloc/internal/extent_mmap.h"
80161-#include "jemalloc/internal/mutex.h"
80162-#include "jemalloc/internal/prof_recent.h"
80163-#include "jemalloc/internal/util.h"
80164-
80165-/******************************************************************************/
80166-
80167-void *
80168-large_malloc(tsdn_t *tsdn, arena_t *arena, size_t usize, bool zero) {
80169-	assert(usize == sz_s2u(usize));
80170-
80171-	return large_palloc(tsdn, arena, usize, CACHELINE, zero);
80172-}
80173-
80174-void *
80175-large_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
80176-    bool zero) {
80177-	size_t ausize;
80178-	edata_t *edata;
80179-	UNUSED bool idump JEMALLOC_CC_SILENCE_INIT(false);
80180-
80181-	assert(!tsdn_null(tsdn) || arena != NULL);
80182-
80183-	ausize = sz_sa2u(usize, alignment);
80184-	if (unlikely(ausize == 0 || ausize > SC_LARGE_MAXCLASS)) {
80185-		return NULL;
80186-	}
80187-
80188-	if (likely(!tsdn_null(tsdn))) {
80189-		arena = arena_choose_maybe_huge(tsdn_tsd(tsdn), arena, usize);
80190-	}
80191-	if (unlikely(arena == NULL) || (edata = arena_extent_alloc_large(tsdn,
80192-	    arena, usize, alignment, zero)) == NULL) {
80193-		return NULL;
80194-	}
80195-
80196-	/* See comments in arena_bin_slabs_full_insert(). */
80197-	if (!arena_is_auto(arena)) {
80198-		/* Insert edata into large. */
80199-		malloc_mutex_lock(tsdn, &arena->large_mtx);
80200-		edata_list_active_append(&arena->large, edata);
80201-		malloc_mutex_unlock(tsdn, &arena->large_mtx);
80202-	}
80203-
80204-	arena_decay_tick(tsdn, arena);
80205-	return edata_addr_get(edata);
80206-}
80207-
80208-static bool
80209-large_ralloc_no_move_shrink(tsdn_t *tsdn, edata_t *edata, size_t usize) {
80210-	arena_t *arena = arena_get_from_edata(edata);
80211-	ehooks_t *ehooks = arena_get_ehooks(arena);
80212-	size_t old_size = edata_size_get(edata);
80213-	size_t old_usize = edata_usize_get(edata);
80214-
80215-	assert(old_usize > usize);
80216-
80217-	if (ehooks_split_will_fail(ehooks)) {
80218-		return true;
80219-	}
80220-
80221-	bool deferred_work_generated = false;
80222-	bool err = pa_shrink(tsdn, &arena->pa_shard, edata, old_size,
80223-	    usize + sz_large_pad, sz_size2index(usize),
80224-	    &deferred_work_generated);
80225-	if (err) {
80226-		return true;
80227-	}
80228-	if (deferred_work_generated) {
80229-		arena_handle_deferred_work(tsdn, arena);
80230-	}
80231-	arena_extent_ralloc_large_shrink(tsdn, arena, edata, old_usize);
80232-
80233-	return false;
80234-}
80235-
80236-static bool
80237-large_ralloc_no_move_expand(tsdn_t *tsdn, edata_t *edata, size_t usize,
80238-    bool zero) {
80239-	arena_t *arena = arena_get_from_edata(edata);
80240-
80241-	size_t old_size = edata_size_get(edata);
80242-	size_t old_usize = edata_usize_get(edata);
80243-	size_t new_size = usize + sz_large_pad;
80244-
80245-	szind_t szind = sz_size2index(usize);
80246-
80247-	bool deferred_work_generated = false;
80248-	bool err = pa_expand(tsdn, &arena->pa_shard, edata, old_size, new_size,
80249-	    szind, zero, &deferred_work_generated);
80250-
80251-	if (deferred_work_generated) {
80252-		arena_handle_deferred_work(tsdn, arena);
80253-	}
80254-
80255-	if (err) {
80256-		return true;
80257-	}
80258-
80259-	if (zero) {
80260-		if (opt_cache_oblivious) {
80261-			assert(sz_large_pad == PAGE);
80262-			/*
80263-			 * Zero the trailing bytes of the original allocation's
80264-			 * last page, since they are in an indeterminate state.
80265-			 * There will always be trailing bytes, because ptr's
80266-			 * offset from the beginning of the extent is a multiple
80267-			 * of CACHELINE in [0 .. PAGE).
80268-			 */
80269-			void *zbase = (void *)
80270-			    ((uintptr_t)edata_addr_get(edata) + old_usize);
80271-			void *zpast = PAGE_ADDR2BASE((void *)((uintptr_t)zbase +
80272-			    PAGE));
80273-			size_t nzero = (uintptr_t)zpast - (uintptr_t)zbase;
80274-			assert(nzero > 0);
80275-			memset(zbase, 0, nzero);
80276-		}
80277-	}
80278-	arena_extent_ralloc_large_expand(tsdn, arena, edata, old_usize);
80279-
80280-	return false;
80281-}
80282-
80283-bool
80284-large_ralloc_no_move(tsdn_t *tsdn, edata_t *edata, size_t usize_min,
80285-    size_t usize_max, bool zero) {
80286-	size_t oldusize = edata_usize_get(edata);
80287-
80288-	/* The following should have been caught by callers. */
80289-	assert(usize_min > 0 && usize_max <= SC_LARGE_MAXCLASS);
80290-	/* Both allocation sizes must be large to avoid a move. */
80291-	assert(oldusize >= SC_LARGE_MINCLASS
80292-	    && usize_max >= SC_LARGE_MINCLASS);
80293-
80294-	if (usize_max > oldusize) {
80295-		/* Attempt to expand the allocation in-place. */
80296-		if (!large_ralloc_no_move_expand(tsdn, edata, usize_max,
80297-		    zero)) {
80298-			arena_decay_tick(tsdn, arena_get_from_edata(edata));
80299-			return false;
80300-		}
80301-		/* Try again, this time with usize_min. */
80302-		if (usize_min < usize_max && usize_min > oldusize &&
80303-		    large_ralloc_no_move_expand(tsdn, edata, usize_min, zero)) {
80304-			arena_decay_tick(tsdn, arena_get_from_edata(edata));
80305-			return false;
80306-		}
80307-	}
80308-
80309-	/*
80310-	 * Avoid moving the allocation if the existing extent size accommodates
80311-	 * the new size.
80312-	 */
80313-	if (oldusize >= usize_min && oldusize <= usize_max) {
80314-		arena_decay_tick(tsdn, arena_get_from_edata(edata));
80315-		return false;
80316-	}
80317-
80318-	/* Attempt to shrink the allocation in-place. */
80319-	if (oldusize > usize_max) {
80320-		if (!large_ralloc_no_move_shrink(tsdn, edata, usize_max)) {
80321-			arena_decay_tick(tsdn, arena_get_from_edata(edata));
80322-			return false;
80323-		}
80324-	}
80325-	return true;
80326-}
80327-
80328-static void *
80329-large_ralloc_move_helper(tsdn_t *tsdn, arena_t *arena, size_t usize,
80330-    size_t alignment, bool zero) {
80331-	if (alignment <= CACHELINE) {
80332-		return large_malloc(tsdn, arena, usize, zero);
80333-	}
80334-	return large_palloc(tsdn, arena, usize, alignment, zero);
80335-}
80336-
80337-void *
80338-large_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t usize,
80339-    size_t alignment, bool zero, tcache_t *tcache,
80340-    hook_ralloc_args_t *hook_args) {
80341-	edata_t *edata = emap_edata_lookup(tsdn, &arena_emap_global, ptr);
80342-
80343-	size_t oldusize = edata_usize_get(edata);
80344-	/* The following should have been caught by callers. */
80345-	assert(usize > 0 && usize <= SC_LARGE_MAXCLASS);
80346-	/* Both allocation sizes must be large to avoid a move. */
80347-	assert(oldusize >= SC_LARGE_MINCLASS
80348-	    && usize >= SC_LARGE_MINCLASS);
80349-
80350-	/* Try to avoid moving the allocation. */
80351-	if (!large_ralloc_no_move(tsdn, edata, usize, usize, zero)) {
80352-		hook_invoke_expand(hook_args->is_realloc
80353-		    ? hook_expand_realloc : hook_expand_rallocx, ptr, oldusize,
80354-		    usize, (uintptr_t)ptr, hook_args->args);
80355-		return edata_addr_get(edata);
80356-	}
80357-
80358-	/*
80359-	 * usize and old size are different enough that we need to use a
80360-	 * different size class.  In that case, fall back to allocating new
80361-	 * space and copying.
80362-	 */
80363-	void *ret = large_ralloc_move_helper(tsdn, arena, usize, alignment,
80364-	    zero);
80365-	if (ret == NULL) {
80366-		return NULL;
80367-	}
80368-
80369-	hook_invoke_alloc(hook_args->is_realloc
80370-	    ? hook_alloc_realloc : hook_alloc_rallocx, ret, (uintptr_t)ret,
80371-	    hook_args->args);
80372-	hook_invoke_dalloc(hook_args->is_realloc
80373-	    ? hook_dalloc_realloc : hook_dalloc_rallocx, ptr, hook_args->args);
80374-
80375-	size_t copysize = (usize < oldusize) ? usize : oldusize;
80376-	memcpy(ret, edata_addr_get(edata), copysize);
80377-	isdalloct(tsdn, edata_addr_get(edata), oldusize, tcache, NULL, true);
80378-	return ret;
80379-}
80380-
80381-/*
80382- * locked indicates whether the arena's large_mtx is currently held.
80383- */
80384-static void
80385-large_dalloc_prep_impl(tsdn_t *tsdn, arena_t *arena, edata_t *edata,
80386-    bool locked) {
80387-	if (!locked) {
80388-		/* See comments in arena_bin_slabs_full_insert(). */
80389-		if (!arena_is_auto(arena)) {
80390-			malloc_mutex_lock(tsdn, &arena->large_mtx);
80391-			edata_list_active_remove(&arena->large, edata);
80392-			malloc_mutex_unlock(tsdn, &arena->large_mtx);
80393-		}
80394-	} else {
80395-		/* Only hold the large_mtx if necessary. */
80396-		if (!arena_is_auto(arena)) {
80397-			malloc_mutex_assert_owner(tsdn, &arena->large_mtx);
80398-			edata_list_active_remove(&arena->large, edata);
80399-		}
80400-	}
80401-	arena_extent_dalloc_large_prep(tsdn, arena, edata);
80402-}
80403-
80404-static void
80405-large_dalloc_finish_impl(tsdn_t *tsdn, arena_t *arena, edata_t *edata) {
80406-	bool deferred_work_generated = false;
80407-	pa_dalloc(tsdn, &arena->pa_shard, edata, &deferred_work_generated);
80408-	if (deferred_work_generated) {
80409-		arena_handle_deferred_work(tsdn, arena);
80410-	}
80411-}
80412-
80413-void
80414-large_dalloc_prep_locked(tsdn_t *tsdn, edata_t *edata) {
80415-	large_dalloc_prep_impl(tsdn, arena_get_from_edata(edata), edata, true);
80416-}
80417-
80418-void
80419-large_dalloc_finish(tsdn_t *tsdn, edata_t *edata) {
80420-	large_dalloc_finish_impl(tsdn, arena_get_from_edata(edata), edata);
80421-}
80422-
80423-void
80424-large_dalloc(tsdn_t *tsdn, edata_t *edata) {
80425-	arena_t *arena = arena_get_from_edata(edata);
80426-	large_dalloc_prep_impl(tsdn, arena, edata, false);
80427-	large_dalloc_finish_impl(tsdn, arena, edata);
80428-	arena_decay_tick(tsdn, arena);
80429-}
80430-
80431-size_t
80432-large_salloc(tsdn_t *tsdn, const edata_t *edata) {
80433-	return edata_usize_get(edata);
80434-}
80435-
80436-void
80437-large_prof_info_get(tsd_t *tsd, edata_t *edata, prof_info_t *prof_info,
80438-    bool reset_recent) {
80439-	assert(prof_info != NULL);
80440-
80441-	prof_tctx_t *alloc_tctx = edata_prof_tctx_get(edata);
80442-	prof_info->alloc_tctx = alloc_tctx;
80443-
80444-	if ((uintptr_t)alloc_tctx > (uintptr_t)1U) {
80445-		nstime_copy(&prof_info->alloc_time,
80446-		    edata_prof_alloc_time_get(edata));
80447-		prof_info->alloc_size = edata_prof_alloc_size_get(edata);
80448-		if (reset_recent) {
80449-			/*
80450-			 * Reset the pointer on the recent allocation record,
80451-			 * so that this allocation is recorded as released.
80452-			 */
80453-			prof_recent_alloc_reset(tsd, edata);
80454-		}
80455-	}
80456-}
80457-
80458-static void
80459-large_prof_tctx_set(edata_t *edata, prof_tctx_t *tctx) {
80460-	edata_prof_tctx_set(edata, tctx);
80461-}
80462-
80463-void
80464-large_prof_tctx_reset(edata_t *edata) {
80465-	large_prof_tctx_set(edata, (prof_tctx_t *)(uintptr_t)1U);
80466-}
80467-
80468-void
80469-large_prof_info_set(edata_t *edata, prof_tctx_t *tctx, size_t size) {
80470-	nstime_t t;
80471-	nstime_prof_init_update(&t);
80472-	edata_prof_alloc_time_set(edata, &t);
80473-	edata_prof_alloc_size_set(edata, size);
80474-	edata_prof_recent_alloc_init(edata);
80475-	large_prof_tctx_set(edata, tctx);
80476-}
80477diff --git a/jemalloc/src/log.c b/jemalloc/src/log.c
80478deleted file mode 100644
80479index 778902f..0000000
80480--- a/jemalloc/src/log.c
80481+++ /dev/null
80482@@ -1,78 +0,0 @@
80483-#include "jemalloc/internal/jemalloc_preamble.h"
80484-#include "jemalloc/internal/jemalloc_internal_includes.h"
80485-
80486-#include "jemalloc/internal/log.h"
80487-
80488-char log_var_names[JEMALLOC_LOG_VAR_BUFSIZE];
80489-atomic_b_t log_init_done = ATOMIC_INIT(false);
80490-
80491-/*
80492- * Returns true if we were able to pick out a segment.  Fills in r_segment_end
80493- * with a pointer to the first character after the end of the string.
80494- */
80495-static const char *
80496-log_var_extract_segment(const char* segment_begin) {
80497-	const char *end;
80498-	for (end = segment_begin; *end != '\0' && *end != '|'; end++) {
80499-	}
80500-	return end;
80501-}
80502-
80503-static bool
80504-log_var_matches_segment(const char *segment_begin, const char *segment_end,
80505-    const char *log_var_begin, const char *log_var_end) {
80506-	assert(segment_begin <= segment_end);
80507-	assert(log_var_begin < log_var_end);
80508-
80509-	ptrdiff_t segment_len = segment_end - segment_begin;
80510-	ptrdiff_t log_var_len = log_var_end - log_var_begin;
80511-	/* The special '.' segment matches everything. */
80512-	if (segment_len == 1 && *segment_begin == '.') {
80513-		return true;
80514-	}
80515-        if (segment_len == log_var_len) {
80516-		return strncmp(segment_begin, log_var_begin, segment_len) == 0;
80517-	} else if (segment_len < log_var_len) {
80518-		return strncmp(segment_begin, log_var_begin, segment_len) == 0
80519-		    && log_var_begin[segment_len] == '.';
80520-        } else {
80521-		return false;
80522-	}
80523-}
80524-
80525-unsigned
80526-log_var_update_state(log_var_t *log_var) {
80527-	const char *log_var_begin = log_var->name;
80528-	const char *log_var_end = log_var->name + strlen(log_var->name);
80529-
80530-	/* Pointer to one before the beginning of the current segment. */
80531-	const char *segment_begin = log_var_names;
80532-
80533-	/*
80534-	 * If log_init done is false, we haven't parsed the malloc conf yet.  To
80535-	 * avoid log-spew, we default to not displaying anything.
80536-	 */
80537-	if (!atomic_load_b(&log_init_done, ATOMIC_ACQUIRE)) {
80538-		return LOG_INITIALIZED_NOT_ENABLED;
80539-	}
80540-
80541-	while (true) {
80542-		const char *segment_end = log_var_extract_segment(
80543-		    segment_begin);
80544-		assert(segment_end < log_var_names + JEMALLOC_LOG_VAR_BUFSIZE);
80545-		if (log_var_matches_segment(segment_begin, segment_end,
80546-		    log_var_begin, log_var_end)) {
80547-			atomic_store_u(&log_var->state, LOG_ENABLED,
80548-			    ATOMIC_RELAXED);
80549-			return LOG_ENABLED;
80550-		}
80551-		if (*segment_end == '\0') {
80552-			/* Hit the end of the segment string with no match. */
80553-			atomic_store_u(&log_var->state,
80554-			    LOG_INITIALIZED_NOT_ENABLED, ATOMIC_RELAXED);
80555-			return LOG_INITIALIZED_NOT_ENABLED;
80556-		}
80557-		/* Otherwise, skip the delimiter and continue. */
80558-		segment_begin = segment_end + 1;
80559-	}
80560-}
80561diff --git a/jemalloc/src/malloc_io.c b/jemalloc/src/malloc_io.c
80562deleted file mode 100644
80563index b76885c..0000000
80564--- a/jemalloc/src/malloc_io.c
80565+++ /dev/null
80566@@ -1,697 +0,0 @@
80567-#include "jemalloc/internal/jemalloc_preamble.h"
80568-#include "jemalloc/internal/jemalloc_internal_includes.h"
80569-
80570-#include "jemalloc/internal/malloc_io.h"
80571-#include "jemalloc/internal/util.h"
80572-
80573-#ifdef assert
80574-#  undef assert
80575-#endif
80576-#ifdef not_reached
80577-#  undef not_reached
80578-#endif
80579-#ifdef not_implemented
80580-#  undef not_implemented
80581-#endif
80582-#ifdef assert_not_implemented
80583-#  undef assert_not_implemented
80584-#endif
80585-
80586-/*
80587- * Define simple versions of assertion macros that won't recurse in case
80588- * of assertion failures in malloc_*printf().
80589- */
80590-#define assert(e) do {							\
80591-	if (config_debug && !(e)) {					\
80592-		malloc_write("<jemalloc>: Failed assertion\n");		\
80593-		abort();						\
80594-	}								\
80595-} while (0)
80596-
80597-#define not_reached() do {						\
80598-	if (config_debug) {						\
80599-		malloc_write("<jemalloc>: Unreachable code reached\n");	\
80600-		abort();						\
80601-	}								\
80602-	unreachable();							\
80603-} while (0)
80604-
80605-#define not_implemented() do {						\
80606-	if (config_debug) {						\
80607-		malloc_write("<jemalloc>: Not implemented\n");		\
80608-		abort();						\
80609-	}								\
80610-} while (0)
80611-
80612-#define assert_not_implemented(e) do {					\
80613-	if (unlikely(config_debug && !(e))) {				\
80614-		not_implemented();					\
80615-	}								\
80616-} while (0)
80617-
80618-/******************************************************************************/
80619-/* Function prototypes for non-inline static functions. */
80620-
80621-#define U2S_BUFSIZE ((1U << (LG_SIZEOF_INTMAX_T + 3)) + 1)
80622-static char *u2s(uintmax_t x, unsigned base, bool uppercase, char *s,
80623-    size_t *slen_p);
80624-#define D2S_BUFSIZE (1 + U2S_BUFSIZE)
80625-static char *d2s(intmax_t x, char sign, char *s, size_t *slen_p);
80626-#define O2S_BUFSIZE (1 + U2S_BUFSIZE)
80627-static char *o2s(uintmax_t x, bool alt_form, char *s, size_t *slen_p);
80628-#define X2S_BUFSIZE (2 + U2S_BUFSIZE)
80629-static char *x2s(uintmax_t x, bool alt_form, bool uppercase, char *s,
80630-    size_t *slen_p);
80631-
80632-/******************************************************************************/
80633-
80634-/* malloc_message() setup. */
80635-void
80636-wrtmessage(void *cbopaque, const char *s) {
80637-	malloc_write_fd(STDERR_FILENO, s, strlen(s));
80638-}
80639-
80640-JEMALLOC_EXPORT void	(*je_malloc_message)(void *, const char *s);
80641-
80642-/*
80643- * Wrapper around malloc_message() that avoids the need for
80644- * je_malloc_message(...) throughout the code.
80645- */
80646-void
80647-malloc_write(const char *s) {
80648-	if (je_malloc_message != NULL) {
80649-		je_malloc_message(NULL, s);
80650-	} else {
80651-		wrtmessage(NULL, s);
80652-	}
80653-}
80654-
80655-/*
80656- * glibc provides a non-standard strerror_r() when _GNU_SOURCE is defined, so
80657- * provide a wrapper.
80658- */
80659-int
80660-buferror(int err, char *buf, size_t buflen) {
80661-#ifdef _WIN32
80662-	FormatMessageA(FORMAT_MESSAGE_FROM_SYSTEM, NULL, err, 0,
80663-	    (LPSTR)buf, (DWORD)buflen, NULL);
80664-	return 0;
80665-#elif defined(JEMALLOC_STRERROR_R_RETURNS_CHAR_WITH_GNU_SOURCE) && defined(_GNU_SOURCE)
80666-	char *b = strerror_r(err, buf, buflen);
80667-	if (b != buf) {
80668-		strncpy(buf, b, buflen);
80669-		buf[buflen-1] = '\0';
80670-	}
80671-	return 0;
80672-#else
80673-	return strerror_r(err, buf, buflen);
80674-#endif
80675-}
80676-
80677-uintmax_t
80678-malloc_strtoumax(const char *restrict nptr, char **restrict endptr, int base) {
80679-	uintmax_t ret, digit;
80680-	unsigned b;
80681-	bool neg;
80682-	const char *p, *ns;
80683-
80684-	p = nptr;
80685-	if (base < 0 || base == 1 || base > 36) {
80686-		ns = p;
80687-		set_errno(EINVAL);
80688-		ret = UINTMAX_MAX;
80689-		goto label_return;
80690-	}
80691-	b = base;
80692-
80693-	/* Swallow leading whitespace and get sign, if any. */
80694-	neg = false;
80695-	while (true) {
80696-		switch (*p) {
80697-		case '\t': case '\n': case '\v': case '\f': case '\r': case ' ':
80698-			p++;
80699-			break;
80700-		case '-':
80701-			neg = true;
80702-			JEMALLOC_FALLTHROUGH;
80703-		case '+':
80704-			p++;
80705-			JEMALLOC_FALLTHROUGH;
80706-		default:
80707-			goto label_prefix;
80708-		}
80709-	}
80710-
80711-	/* Get prefix, if any. */
80712-	label_prefix:
80713-	/*
80714-	 * Note where the first non-whitespace/sign character is so that it is
80715-	 * possible to tell whether any digits are consumed (e.g., "  0" vs.
80716-	 * "  -x").
80717-	 */
80718-	ns = p;
80719-	if (*p == '0') {
80720-		switch (p[1]) {
80721-		case '0': case '1': case '2': case '3': case '4': case '5':
80722-		case '6': case '7':
80723-			if (b == 0) {
80724-				b = 8;
80725-			}
80726-			if (b == 8) {
80727-				p++;
80728-			}
80729-			break;
80730-		case 'X': case 'x':
80731-			switch (p[2]) {
80732-			case '0': case '1': case '2': case '3': case '4':
80733-			case '5': case '6': case '7': case '8': case '9':
80734-			case 'A': case 'B': case 'C': case 'D': case 'E':
80735-			case 'F':
80736-			case 'a': case 'b': case 'c': case 'd': case 'e':
80737-			case 'f':
80738-				if (b == 0) {
80739-					b = 16;
80740-				}
80741-				if (b == 16) {
80742-					p += 2;
80743-				}
80744-				break;
80745-			default:
80746-				break;
80747-			}
80748-			break;
80749-		default:
80750-			p++;
80751-			ret = 0;
80752-			goto label_return;
80753-		}
80754-	}
80755-	if (b == 0) {
80756-		b = 10;
80757-	}
80758-
80759-	/* Convert. */
80760-	ret = 0;
80761-	while ((*p >= '0' && *p <= '9' && (digit = *p - '0') < b)
80762-	    || (*p >= 'A' && *p <= 'Z' && (digit = 10 + *p - 'A') < b)
80763-	    || (*p >= 'a' && *p <= 'z' && (digit = 10 + *p - 'a') < b)) {
80764-		uintmax_t pret = ret;
80765-		ret *= b;
80766-		ret += digit;
80767-		if (ret < pret) {
80768-			/* Overflow. */
80769-			set_errno(ERANGE);
80770-			ret = UINTMAX_MAX;
80771-			goto label_return;
80772-		}
80773-		p++;
80774-	}
80775-	if (neg) {
80776-		ret = (uintmax_t)(-((intmax_t)ret));
80777-	}
80778-
80779-	if (p == ns) {
80780-		/* No conversion performed. */
80781-		set_errno(EINVAL);
80782-		ret = UINTMAX_MAX;
80783-		goto label_return;
80784-	}
80785-
80786-label_return:
80787-	if (endptr != NULL) {
80788-		if (p == ns) {
80789-			/* No characters were converted. */
80790-			*endptr = (char *)nptr;
80791-		} else {
80792-			*endptr = (char *)p;
80793-		}
80794-	}
80795-	return ret;
80796-}
80797-
80798-static char *
80799-u2s(uintmax_t x, unsigned base, bool uppercase, char *s, size_t *slen_p) {
80800-	unsigned i;
80801-
80802-	i = U2S_BUFSIZE - 1;
80803-	s[i] = '\0';
80804-	switch (base) {
80805-	case 10:
80806-		do {
80807-			i--;
80808-			s[i] = "0123456789"[x % (uint64_t)10];
80809-			x /= (uint64_t)10;
80810-		} while (x > 0);
80811-		break;
80812-	case 16: {
80813-		const char *digits = (uppercase)
80814-		    ? "0123456789ABCDEF"
80815-		    : "0123456789abcdef";
80816-
80817-		do {
80818-			i--;
80819-			s[i] = digits[x & 0xf];
80820-			x >>= 4;
80821-		} while (x > 0);
80822-		break;
80823-	} default: {
80824-		const char *digits = (uppercase)
80825-		    ? "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
80826-		    : "0123456789abcdefghijklmnopqrstuvwxyz";
80827-
80828-		assert(base >= 2 && base <= 36);
80829-		do {
80830-			i--;
80831-			s[i] = digits[x % (uint64_t)base];
80832-			x /= (uint64_t)base;
80833-		} while (x > 0);
80834-	}}
80835-
80836-	*slen_p = U2S_BUFSIZE - 1 - i;
80837-	return &s[i];
80838-}
80839-
80840-static char *
80841-d2s(intmax_t x, char sign, char *s, size_t *slen_p) {
80842-	bool neg;
80843-
80844-	if ((neg = (x < 0))) {
80845-		x = -x;
80846-	}
80847-	s = u2s(x, 10, false, s, slen_p);
80848-	if (neg) {
80849-		sign = '-';
80850-	}
80851-	switch (sign) {
80852-	case '-':
80853-		if (!neg) {
80854-			break;
80855-		}
80856-		JEMALLOC_FALLTHROUGH;
80857-	case ' ':
80858-	case '+':
80859-		s--;
80860-		(*slen_p)++;
80861-		*s = sign;
80862-		break;
80863-	default: not_reached();
80864-	}
80865-	return s;
80866-}
80867-
80868-static char *
80869-o2s(uintmax_t x, bool alt_form, char *s, size_t *slen_p) {
80870-	s = u2s(x, 8, false, s, slen_p);
80871-	if (alt_form && *s != '0') {
80872-		s--;
80873-		(*slen_p)++;
80874-		*s = '0';
80875-	}
80876-	return s;
80877-}
80878-
80879-static char *
80880-x2s(uintmax_t x, bool alt_form, bool uppercase, char *s, size_t *slen_p) {
80881-	s = u2s(x, 16, uppercase, s, slen_p);
80882-	if (alt_form) {
80883-		s -= 2;
80884-		(*slen_p) += 2;
80885-		memcpy(s, uppercase ? "0X" : "0x", 2);
80886-	}
80887-	return s;
80888-}
80889-
80890-JEMALLOC_COLD
80891-size_t
80892-malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap) {
80893-	size_t i;
80894-	const char *f;
80895-
80896-#define APPEND_C(c) do {						\
80897-	if (i < size) {							\
80898-		str[i] = (c);						\
80899-	}								\
80900-	i++;								\
80901-} while (0)
80902-#define APPEND_S(s, slen) do {						\
80903-	if (i < size) {							\
80904-		size_t cpylen = (slen <= size - i) ? slen : size - i;	\
80905-		memcpy(&str[i], s, cpylen);				\
80906-	}								\
80907-	i += slen;							\
80908-} while (0)
80909-#define APPEND_PADDED_S(s, slen, width, left_justify) do {		\
80910-	/* Left padding. */						\
80911-	size_t pad_len = (width == -1) ? 0 : ((slen < (size_t)width) ?	\
80912-	    (size_t)width - slen : 0);					\
80913-	if (!left_justify && pad_len != 0) {				\
80914-		size_t j;						\
80915-		for (j = 0; j < pad_len; j++) {				\
80916-			if (pad_zero) {					\
80917-				APPEND_C('0');				\
80918-			} else {					\
80919-				APPEND_C(' ');				\
80920-			}						\
80921-		}							\
80922-	}								\
80923-	/* Value. */							\
80924-	APPEND_S(s, slen);						\
80925-	/* Right padding. */						\
80926-	if (left_justify && pad_len != 0) {				\
80927-		size_t j;						\
80928-		for (j = 0; j < pad_len; j++) {				\
80929-			APPEND_C(' ');					\
80930-		}							\
80931-	}								\
80932-} while (0)
80933-#define GET_ARG_NUMERIC(val, len) do {					\
80934-	switch ((unsigned char)len) {					\
80935-	case '?':							\
80936-		val = va_arg(ap, int);					\
80937-		break;							\
80938-	case '?' | 0x80:						\
80939-		val = va_arg(ap, unsigned int);				\
80940-		break;							\
80941-	case 'l':							\
80942-		val = va_arg(ap, long);					\
80943-		break;							\
80944-	case 'l' | 0x80:						\
80945-		val = va_arg(ap, unsigned long);			\
80946-		break;							\
80947-	case 'q':							\
80948-		val = va_arg(ap, long long);				\
80949-		break;							\
80950-	case 'q' | 0x80:						\
80951-		val = va_arg(ap, unsigned long long);			\
80952-		break;							\
80953-	case 'j':							\
80954-		val = va_arg(ap, intmax_t);				\
80955-		break;							\
80956-	case 'j' | 0x80:						\
80957-		val = va_arg(ap, uintmax_t);				\
80958-		break;							\
80959-	case 't':							\
80960-		val = va_arg(ap, ptrdiff_t);				\
80961-		break;							\
80962-	case 'z':							\
80963-		val = va_arg(ap, ssize_t);				\
80964-		break;							\
80965-	case 'z' | 0x80:						\
80966-		val = va_arg(ap, size_t);				\
80967-		break;							\
80968-	case 'p': /* Synthetic; used for %p. */				\
80969-		val = va_arg(ap, uintptr_t);				\
80970-		break;							\
80971-	default:							\
80972-		not_reached();						\
80973-		val = 0;						\
80974-	}								\
80975-} while (0)
80976-
80977-	i = 0;
80978-	f = format;
80979-	while (true) {
80980-		switch (*f) {
80981-		case '\0': goto label_out;
80982-		case '%': {
80983-			bool alt_form = false;
80984-			bool left_justify = false;
80985-			bool plus_space = false;
80986-			bool plus_plus = false;
80987-			int prec = -1;
80988-			int width = -1;
80989-			unsigned char len = '?';
80990-			char *s;
80991-			size_t slen;
80992-			bool first_width_digit = true;
80993-			bool pad_zero = false;
80994-
80995-			f++;
80996-			/* Flags. */
80997-			while (true) {
80998-				switch (*f) {
80999-				case '#':
81000-					assert(!alt_form);
81001-					alt_form = true;
81002-					break;
81003-				case '-':
81004-					assert(!left_justify);
81005-					left_justify = true;
81006-					break;
81007-				case ' ':
81008-					assert(!plus_space);
81009-					plus_space = true;
81010-					break;
81011-				case '+':
81012-					assert(!plus_plus);
81013-					plus_plus = true;
81014-					break;
81015-				default: goto label_width;
81016-				}
81017-				f++;
81018-			}
81019-			/* Width. */
81020-			label_width:
81021-			switch (*f) {
81022-			case '*':
81023-				width = va_arg(ap, int);
81024-				f++;
81025-				if (width < 0) {
81026-					left_justify = true;
81027-					width = -width;
81028-				}
81029-				break;
81030-			case '0':
81031-				if (first_width_digit) {
81032-					pad_zero = true;
81033-				}
81034-				JEMALLOC_FALLTHROUGH;
81035-			case '1': case '2': case '3': case '4':
81036-			case '5': case '6': case '7': case '8': case '9': {
81037-				uintmax_t uwidth;
81038-				set_errno(0);
81039-				uwidth = malloc_strtoumax(f, (char **)&f, 10);
81040-				assert(uwidth != UINTMAX_MAX || get_errno() !=
81041-				    ERANGE);
81042-				width = (int)uwidth;
81043-				first_width_digit = false;
81044-				break;
81045-			} default:
81046-				break;
81047-			}
81048-			/* Width/precision separator. */
81049-			if (*f == '.') {
81050-				f++;
81051-			} else {
81052-				goto label_length;
81053-			}
81054-			/* Precision. */
81055-			switch (*f) {
81056-			case '*':
81057-				prec = va_arg(ap, int);
81058-				f++;
81059-				break;
81060-			case '0': case '1': case '2': case '3': case '4':
81061-			case '5': case '6': case '7': case '8': case '9': {
81062-				uintmax_t uprec;
81063-				set_errno(0);
81064-				uprec = malloc_strtoumax(f, (char **)&f, 10);
81065-				assert(uprec != UINTMAX_MAX || get_errno() !=
81066-				    ERANGE);
81067-				prec = (int)uprec;
81068-				break;
81069-			}
81070-			default: break;
81071-			}
81072-			/* Length. */
81073-			label_length:
81074-			switch (*f) {
81075-			case 'l':
81076-				f++;
81077-				if (*f == 'l') {
81078-					len = 'q';
81079-					f++;
81080-				} else {
81081-					len = 'l';
81082-				}
81083-				break;
81084-			case 'q': case 'j': case 't': case 'z':
81085-				len = *f;
81086-				f++;
81087-				break;
81088-			default: break;
81089-			}
81090-			/* Conversion specifier. */
81091-			switch (*f) {
81092-			case '%':
81093-				/* %% */
81094-				APPEND_C(*f);
81095-				f++;
81096-				break;
81097-			case 'd': case 'i': {
81098-				intmax_t val JEMALLOC_CC_SILENCE_INIT(0);
81099-				char buf[D2S_BUFSIZE];
81100-
81101-				/*
81102-				 * Outputting negative, zero-padded numbers
81103-				 * would require a nontrivial rework of the
81104-				 * interaction between the width and padding
81105-				 * (since 0 padding goes between the '-' and the
81106-				 * number, while ' ' padding goes either before
81107-				 * the - or after the number.  Since we
81108-				 * currently don't ever need 0-padded negative
81109-				 * numbers, just don't bother supporting it.
81110-				 */
81111-				assert(!pad_zero);
81112-
81113-				GET_ARG_NUMERIC(val, len);
81114-				s = d2s(val, (plus_plus ? '+' : (plus_space ?
81115-				    ' ' : '-')), buf, &slen);
81116-				APPEND_PADDED_S(s, slen, width, left_justify);
81117-				f++;
81118-				break;
81119-			} case 'o': {
81120-				uintmax_t val JEMALLOC_CC_SILENCE_INIT(0);
81121-				char buf[O2S_BUFSIZE];
81122-
81123-				GET_ARG_NUMERIC(val, len | 0x80);
81124-				s = o2s(val, alt_form, buf, &slen);
81125-				APPEND_PADDED_S(s, slen, width, left_justify);
81126-				f++;
81127-				break;
81128-			} case 'u': {
81129-				uintmax_t val JEMALLOC_CC_SILENCE_INIT(0);
81130-				char buf[U2S_BUFSIZE];
81131-
81132-				GET_ARG_NUMERIC(val, len | 0x80);
81133-				s = u2s(val, 10, false, buf, &slen);
81134-				APPEND_PADDED_S(s, slen, width, left_justify);
81135-				f++;
81136-				break;
81137-			} case 'x': case 'X': {
81138-				uintmax_t val JEMALLOC_CC_SILENCE_INIT(0);
81139-				char buf[X2S_BUFSIZE];
81140-
81141-				GET_ARG_NUMERIC(val, len | 0x80);
81142-				s = x2s(val, alt_form, *f == 'X', buf, &slen);
81143-				APPEND_PADDED_S(s, slen, width, left_justify);
81144-				f++;
81145-				break;
81146-			} case 'c': {
81147-				unsigned char val;
81148-				char buf[2];
81149-
81150-				assert(len == '?' || len == 'l');
81151-				assert_not_implemented(len != 'l');
81152-				val = va_arg(ap, int);
81153-				buf[0] = val;
81154-				buf[1] = '\0';
81155-				APPEND_PADDED_S(buf, 1, width, left_justify);
81156-				f++;
81157-				break;
81158-			} case 's':
81159-				assert(len == '?' || len == 'l');
81160-				assert_not_implemented(len != 'l');
81161-				s = va_arg(ap, char *);
81162-				slen = (prec < 0) ? strlen(s) : (size_t)prec;
81163-				APPEND_PADDED_S(s, slen, width, left_justify);
81164-				f++;
81165-				break;
81166-			case 'p': {
81167-				uintmax_t val;
81168-				char buf[X2S_BUFSIZE];
81169-
81170-				GET_ARG_NUMERIC(val, 'p');
81171-				s = x2s(val, true, false, buf, &slen);
81172-				APPEND_PADDED_S(s, slen, width, left_justify);
81173-				f++;
81174-				break;
81175-			} default: not_reached();
81176-			}
81177-			break;
81178-		} default: {
81179-			APPEND_C(*f);
81180-			f++;
81181-			break;
81182-		}}
81183-	}
81184-	label_out:
81185-	if (i < size) {
81186-		str[i] = '\0';
81187-	} else {
81188-		str[size - 1] = '\0';
81189-	}
81190-
81191-#undef APPEND_C
81192-#undef APPEND_S
81193-#undef APPEND_PADDED_S
81194-#undef GET_ARG_NUMERIC
81195-	return i;
81196-}
81197-
81198-JEMALLOC_FORMAT_PRINTF(3, 4)
81199-size_t
81200-malloc_snprintf(char *str, size_t size, const char *format, ...) {
81201-	size_t ret;
81202-	va_list ap;
81203-
81204-	va_start(ap, format);
81205-	ret = malloc_vsnprintf(str, size, format, ap);
81206-	va_end(ap);
81207-
81208-	return ret;
81209-}
81210-
81211-void
81212-malloc_vcprintf(write_cb_t *write_cb, void *cbopaque, const char *format,
81213-    va_list ap) {
81214-	char buf[MALLOC_PRINTF_BUFSIZE];
81215-
81216-	if (write_cb == NULL) {
81217-		/*
81218-		 * The caller did not provide an alternate write_cb callback
81219-		 * function, so use the default one.  malloc_write() is an
81220-		 * inline function, so use malloc_message() directly here.
81221-		 */
81222-		write_cb = (je_malloc_message != NULL) ? je_malloc_message :
81223-		    wrtmessage;
81224-	}
81225-
81226-	malloc_vsnprintf(buf, sizeof(buf), format, ap);
81227-	write_cb(cbopaque, buf);
81228-}
81229-
81230-/*
81231- * Print to a callback function in such a way as to (hopefully) avoid memory
81232- * allocation.
81233- */
81234-JEMALLOC_FORMAT_PRINTF(3, 4)
81235-void
81236-malloc_cprintf(write_cb_t *write_cb, void *cbopaque, const char *format, ...) {
81237-	va_list ap;
81238-
81239-	va_start(ap, format);
81240-	malloc_vcprintf(write_cb, cbopaque, format, ap);
81241-	va_end(ap);
81242-}
81243-
81244-/* Print to stderr in such a way as to avoid memory allocation. */
81245-JEMALLOC_FORMAT_PRINTF(1, 2)
81246-void
81247-malloc_printf(const char *format, ...) {
81248-	va_list ap;
81249-
81250-	va_start(ap, format);
81251-	malloc_vcprintf(NULL, NULL, format, ap);
81252-	va_end(ap);
81253-}
81254-
81255-/*
81256- * Restore normal assertion macros, in order to make it possible to compile all
81257- * C files as a single concatenation.
81258- */
81259-#undef assert
81260-#undef not_reached
81261-#undef not_implemented
81262-#undef assert_not_implemented
81263-#include "jemalloc/internal/assert.h"
81264diff --git a/jemalloc/src/mutex.c b/jemalloc/src/mutex.c
81265deleted file mode 100644
81266index 0b3547a..0000000
81267--- a/jemalloc/src/mutex.c
81268+++ /dev/null
81269@@ -1,228 +0,0 @@
81270-#include "jemalloc/internal/jemalloc_preamble.h"
81271-#include "jemalloc/internal/jemalloc_internal_includes.h"
81272-
81273-#include "jemalloc/internal/assert.h"
81274-#include "jemalloc/internal/malloc_io.h"
81275-#include "jemalloc/internal/spin.h"
81276-
81277-#ifndef _CRT_SPINCOUNT
81278-#define _CRT_SPINCOUNT 4000
81279-#endif
81280-
81281-/*
81282- * Based on benchmark results, a fixed spin with this amount of retries works
81283- * well for our critical sections.
81284- */
81285-int64_t opt_mutex_max_spin = 600;
81286-
81287-/******************************************************************************/
81288-/* Data. */
81289-
81290-#ifdef JEMALLOC_LAZY_LOCK
81291-bool isthreaded = false;
81292-#endif
81293-#ifdef JEMALLOC_MUTEX_INIT_CB
81294-static bool		postpone_init = true;
81295-static malloc_mutex_t	*postponed_mutexes = NULL;
81296-#endif
81297-
81298-/******************************************************************************/
81299-/*
81300- * We intercept pthread_create() calls in order to toggle isthreaded if the
81301- * process goes multi-threaded.
81302- */
81303-
81304-#if defined(JEMALLOC_LAZY_LOCK) && !defined(_WIN32)
81305-JEMALLOC_EXPORT int
81306-pthread_create(pthread_t *__restrict thread,
81307-    const pthread_attr_t *__restrict attr, void *(*start_routine)(void *),
81308-    void *__restrict arg) {
81309-	return pthread_create_wrapper(thread, attr, start_routine, arg);
81310-}
81311-#endif
81312-
81313-/******************************************************************************/
81314-
81315-#ifdef JEMALLOC_MUTEX_INIT_CB
81316-JEMALLOC_EXPORT int	_pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex,
81317-    void *(calloc_cb)(size_t, size_t));
81318-#endif
81319-
81320-void
81321-malloc_mutex_lock_slow(malloc_mutex_t *mutex) {
81322-	mutex_prof_data_t *data = &mutex->prof_data;
81323-	nstime_t before;
81324-
81325-	if (ncpus == 1) {
81326-		goto label_spin_done;
81327-	}
81328-
81329-	int cnt = 0;
81330-	do {
81331-		spin_cpu_spinwait();
81332-		if (!atomic_load_b(&mutex->locked, ATOMIC_RELAXED)
81333-                    && !malloc_mutex_trylock_final(mutex)) {
81334-			data->n_spin_acquired++;
81335-			return;
81336-		}
81337-	} while (cnt++ < opt_mutex_max_spin || opt_mutex_max_spin == -1);
81338-
81339-	if (!config_stats) {
81340-		/* Only spin is useful when stats is off. */
81341-		malloc_mutex_lock_final(mutex);
81342-		return;
81343-	}
81344-label_spin_done:
81345-	nstime_init_update(&before);
81346-	/* Copy before to after to avoid clock skews. */
81347-	nstime_t after;
81348-	nstime_copy(&after, &before);
81349-	uint32_t n_thds = atomic_fetch_add_u32(&data->n_waiting_thds, 1,
81350-	    ATOMIC_RELAXED) + 1;
81351-	/* One last try as above two calls may take quite some cycles. */
81352-	if (!malloc_mutex_trylock_final(mutex)) {
81353-		atomic_fetch_sub_u32(&data->n_waiting_thds, 1, ATOMIC_RELAXED);
81354-		data->n_spin_acquired++;
81355-		return;
81356-	}
81357-
81358-	/* True slow path. */
81359-	malloc_mutex_lock_final(mutex);
81360-	/* Update more slow-path only counters. */
81361-	atomic_fetch_sub_u32(&data->n_waiting_thds, 1, ATOMIC_RELAXED);
81362-	nstime_update(&after);
81363-
81364-	nstime_t delta;
81365-	nstime_copy(&delta, &after);
81366-	nstime_subtract(&delta, &before);
81367-
81368-	data->n_wait_times++;
81369-	nstime_add(&data->tot_wait_time, &delta);
81370-	if (nstime_compare(&data->max_wait_time, &delta) < 0) {
81371-		nstime_copy(&data->max_wait_time, &delta);
81372-	}
81373-	if (n_thds > data->max_n_thds) {
81374-		data->max_n_thds = n_thds;
81375-	}
81376-}
81377-
81378-static void
81379-mutex_prof_data_init(mutex_prof_data_t *data) {
81380-	memset(data, 0, sizeof(mutex_prof_data_t));
81381-	nstime_init_zero(&data->max_wait_time);
81382-	nstime_init_zero(&data->tot_wait_time);
81383-	data->prev_owner = NULL;
81384-}
81385-
81386-void
81387-malloc_mutex_prof_data_reset(tsdn_t *tsdn, malloc_mutex_t *mutex) {
81388-	malloc_mutex_assert_owner(tsdn, mutex);
81389-	mutex_prof_data_init(&mutex->prof_data);
81390-}
81391-
81392-static int
81393-mutex_addr_comp(const witness_t *witness1, void *mutex1,
81394-    const witness_t *witness2, void *mutex2) {
81395-	assert(mutex1 != NULL);
81396-	assert(mutex2 != NULL);
81397-	uintptr_t mu1int = (uintptr_t)mutex1;
81398-	uintptr_t mu2int = (uintptr_t)mutex2;
81399-	if (mu1int < mu2int) {
81400-		return -1;
81401-	} else if (mu1int == mu2int) {
81402-		return 0;
81403-	} else {
81404-		return 1;
81405-	}
81406-}
81407-
81408-bool
81409-malloc_mutex_init(malloc_mutex_t *mutex, const char *name,
81410-    witness_rank_t rank, malloc_mutex_lock_order_t lock_order) {
81411-	mutex_prof_data_init(&mutex->prof_data);
81412-#ifdef _WIN32
81413-#  if _WIN32_WINNT >= 0x0600
81414-	InitializeSRWLock(&mutex->lock);
81415-#  else
81416-	if (!InitializeCriticalSectionAndSpinCount(&mutex->lock,
81417-	    _CRT_SPINCOUNT)) {
81418-		return true;
81419-	}
81420-#  endif
81421-#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
81422-       mutex->lock = OS_UNFAIR_LOCK_INIT;
81423-#elif (defined(JEMALLOC_MUTEX_INIT_CB))
81424-	if (postpone_init) {
81425-		mutex->postponed_next = postponed_mutexes;
81426-		postponed_mutexes = mutex;
81427-	} else {
81428-		if (_pthread_mutex_init_calloc_cb(&mutex->lock,
81429-		    bootstrap_calloc) != 0) {
81430-			return true;
81431-		}
81432-	}
81433-#else
81434-	pthread_mutexattr_t attr;
81435-
81436-	if (pthread_mutexattr_init(&attr) != 0) {
81437-		return true;
81438-	}
81439-	pthread_mutexattr_settype(&attr, MALLOC_MUTEX_TYPE);
81440-	if (pthread_mutex_init(&mutex->lock, &attr) != 0) {
81441-		pthread_mutexattr_destroy(&attr);
81442-		return true;
81443-	}
81444-	pthread_mutexattr_destroy(&attr);
81445-#endif
81446-	if (config_debug) {
81447-		mutex->lock_order = lock_order;
81448-		if (lock_order == malloc_mutex_address_ordered) {
81449-			witness_init(&mutex->witness, name, rank,
81450-			    mutex_addr_comp, mutex);
81451-		} else {
81452-			witness_init(&mutex->witness, name, rank, NULL, NULL);
81453-		}
81454-	}
81455-	return false;
81456-}
81457-
81458-void
81459-malloc_mutex_prefork(tsdn_t *tsdn, malloc_mutex_t *mutex) {
81460-	malloc_mutex_lock(tsdn, mutex);
81461-}
81462-
81463-void
81464-malloc_mutex_postfork_parent(tsdn_t *tsdn, malloc_mutex_t *mutex) {
81465-	malloc_mutex_unlock(tsdn, mutex);
81466-}
81467-
81468-void
81469-malloc_mutex_postfork_child(tsdn_t *tsdn, malloc_mutex_t *mutex) {
81470-#ifdef JEMALLOC_MUTEX_INIT_CB
81471-	malloc_mutex_unlock(tsdn, mutex);
81472-#else
81473-	if (malloc_mutex_init(mutex, mutex->witness.name,
81474-	    mutex->witness.rank, mutex->lock_order)) {
81475-		malloc_printf("<jemalloc>: Error re-initializing mutex in "
81476-		    "child\n");
81477-		if (opt_abort) {
81478-			abort();
81479-		}
81480-	}
81481-#endif
81482-}
81483-
81484-bool
81485-malloc_mutex_boot(void) {
81486-#ifdef JEMALLOC_MUTEX_INIT_CB
81487-	postpone_init = false;
81488-	while (postponed_mutexes != NULL) {
81489-		if (_pthread_mutex_init_calloc_cb(&postponed_mutexes->lock,
81490-		    bootstrap_calloc) != 0) {
81491-			return true;
81492-		}
81493-		postponed_mutexes = postponed_mutexes->postponed_next;
81494-	}
81495-#endif
81496-	return false;
81497-}
81498diff --git a/jemalloc/src/nstime.c b/jemalloc/src/nstime.c
81499deleted file mode 100644
81500index a1a5377..0000000
81501--- a/jemalloc/src/nstime.c
81502+++ /dev/null
81503@@ -1,289 +0,0 @@
81504-#include "jemalloc/internal/jemalloc_preamble.h"
81505-#include "jemalloc/internal/jemalloc_internal_includes.h"
81506-
81507-#include "jemalloc/internal/nstime.h"
81508-
81509-#include "jemalloc/internal/assert.h"
81510-
81511-#define BILLION	UINT64_C(1000000000)
81512-#define MILLION	UINT64_C(1000000)
81513-
81514-static void
81515-nstime_set_initialized(nstime_t *time) {
81516-#ifdef JEMALLOC_DEBUG
81517-	time->magic = NSTIME_MAGIC;
81518-#endif
81519-}
81520-
81521-static void
81522-nstime_assert_initialized(const nstime_t *time) {
81523-#ifdef JEMALLOC_DEBUG
81524-	/*
81525-	 * Some parts (e.g. stats) rely on memset to zero initialize.  Treat
81526-	 * these as valid initialization.
81527-	 */
81528-	assert(time->magic == NSTIME_MAGIC ||
81529-	    (time->magic == 0 && time->ns == 0));
81530-#endif
81531-}
81532-
81533-static void
81534-nstime_pair_assert_initialized(const nstime_t *t1, const nstime_t *t2) {
81535-	nstime_assert_initialized(t1);
81536-	nstime_assert_initialized(t2);
81537-}
81538-
81539-static void
81540-nstime_initialize_operand(nstime_t *time) {
81541-	/*
81542-	 * Operations like nstime_add may have the initial operand being zero
81543-	 * initialized (covered by the assert below).  Full-initialize needed
81544-	 * before changing it to non-zero.
81545-	 */
81546-	nstime_assert_initialized(time);
81547-	nstime_set_initialized(time);
81548-}
81549-
81550-void
81551-nstime_init(nstime_t *time, uint64_t ns) {
81552-	nstime_set_initialized(time);
81553-	time->ns = ns;
81554-}
81555-
81556-void
81557-nstime_init2(nstime_t *time, uint64_t sec, uint64_t nsec) {
81558-	nstime_set_initialized(time);
81559-	time->ns = sec * BILLION + nsec;
81560-}
81561-
81562-uint64_t
81563-nstime_ns(const nstime_t *time) {
81564-	nstime_assert_initialized(time);
81565-	return time->ns;
81566-}
81567-
81568-uint64_t
81569-nstime_msec(const nstime_t *time) {
81570-	nstime_assert_initialized(time);
81571-	return time->ns / MILLION;
81572-}
81573-
81574-uint64_t
81575-nstime_sec(const nstime_t *time) {
81576-	nstime_assert_initialized(time);
81577-	return time->ns / BILLION;
81578-}
81579-
81580-uint64_t
81581-nstime_nsec(const nstime_t *time) {
81582-	nstime_assert_initialized(time);
81583-	return time->ns % BILLION;
81584-}
81585-
81586-void
81587-nstime_copy(nstime_t *time, const nstime_t *source) {
81588-	/* Source is required to be initialized. */
81589-	nstime_assert_initialized(source);
81590-	*time = *source;
81591-	nstime_assert_initialized(time);
81592-}
81593-
81594-int
81595-nstime_compare(const nstime_t *a, const nstime_t *b) {
81596-	nstime_pair_assert_initialized(a, b);
81597-	return (a->ns > b->ns) - (a->ns < b->ns);
81598-}
81599-
81600-void
81601-nstime_add(nstime_t *time, const nstime_t *addend) {
81602-	nstime_pair_assert_initialized(time, addend);
81603-	assert(UINT64_MAX - time->ns >= addend->ns);
81604-
81605-	nstime_initialize_operand(time);
81606-	time->ns += addend->ns;
81607-}
81608-
81609-void
81610-nstime_iadd(nstime_t *time, uint64_t addend) {
81611-	nstime_assert_initialized(time);
81612-	assert(UINT64_MAX - time->ns >= addend);
81613-
81614-	nstime_initialize_operand(time);
81615-	time->ns += addend;
81616-}
81617-
81618-void
81619-nstime_subtract(nstime_t *time, const nstime_t *subtrahend) {
81620-	nstime_pair_assert_initialized(time, subtrahend);
81621-	assert(nstime_compare(time, subtrahend) >= 0);
81622-
81623-	/* No initialize operand -- subtraction must be initialized. */
81624-	time->ns -= subtrahend->ns;
81625-}
81626-
81627-void
81628-nstime_isubtract(nstime_t *time, uint64_t subtrahend) {
81629-	nstime_assert_initialized(time);
81630-	assert(time->ns >= subtrahend);
81631-
81632-	/* No initialize operand -- subtraction must be initialized. */
81633-	time->ns -= subtrahend;
81634-}
81635-
81636-void
81637-nstime_imultiply(nstime_t *time, uint64_t multiplier) {
81638-	nstime_assert_initialized(time);
81639-	assert((((time->ns | multiplier) & (UINT64_MAX << (sizeof(uint64_t) <<
81640-	    2))) == 0) || ((time->ns * multiplier) / multiplier == time->ns));
81641-
81642-	nstime_initialize_operand(time);
81643-	time->ns *= multiplier;
81644-}
81645-
81646-void
81647-nstime_idivide(nstime_t *time, uint64_t divisor) {
81648-	nstime_assert_initialized(time);
81649-	assert(divisor != 0);
81650-
81651-	nstime_initialize_operand(time);
81652-	time->ns /= divisor;
81653-}
81654-
81655-uint64_t
81656-nstime_divide(const nstime_t *time, const nstime_t *divisor) {
81657-	nstime_pair_assert_initialized(time, divisor);
81658-	assert(divisor->ns != 0);
81659-
81660-	/* No initialize operand -- *time itself remains unchanged. */
81661-	return time->ns / divisor->ns;
81662-}
81663-
81664-/* Returns time since *past, w/o updating *past. */
81665-uint64_t
81666-nstime_ns_since(const nstime_t *past) {
81667-	nstime_assert_initialized(past);
81668-
81669-	nstime_t now;
81670-	nstime_copy(&now, past);
81671-	nstime_update(&now);
81672-
81673-	assert(nstime_compare(&now, past) >= 0);
81674-	return now.ns - past->ns;
81675-}
81676-
81677-#ifdef _WIN32
81678-#  define NSTIME_MONOTONIC true
81679-static void
81680-nstime_get(nstime_t *time) {
81681-	FILETIME ft;
81682-	uint64_t ticks_100ns;
81683-
81684-	GetSystemTimeAsFileTime(&ft);
81685-	ticks_100ns = (((uint64_t)ft.dwHighDateTime) << 32) | ft.dwLowDateTime;
81686-
81687-	nstime_init(time, ticks_100ns * 100);
81688-}
81689-#elif defined(JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE)
81690-#  define NSTIME_MONOTONIC true
81691-static void
81692-nstime_get(nstime_t *time) {
81693-	struct timespec ts;
81694-
81695-	clock_gettime(CLOCK_MONOTONIC_COARSE, &ts);
81696-	nstime_init2(time, ts.tv_sec, ts.tv_nsec);
81697-}
81698-#elif defined(JEMALLOC_HAVE_CLOCK_MONOTONIC)
81699-#  define NSTIME_MONOTONIC true
81700-static void
81701-nstime_get(nstime_t *time) {
81702-	struct timespec ts;
81703-
81704-	clock_gettime(CLOCK_MONOTONIC, &ts);
81705-	nstime_init2(time, ts.tv_sec, ts.tv_nsec);
81706-}
81707-#elif defined(JEMALLOC_HAVE_MACH_ABSOLUTE_TIME)
81708-#  define NSTIME_MONOTONIC true
81709-static void
81710-nstime_get(nstime_t *time) {
81711-	nstime_init(time, mach_absolute_time());
81712-}
81713-#else
81714-#  define NSTIME_MONOTONIC false
81715-static void
81716-nstime_get(nstime_t *time) {
81717-	struct timeval tv;
81718-
81719-	gettimeofday(&tv, NULL);
81720-	nstime_init2(time, tv.tv_sec, tv.tv_usec * 1000);
81721-}
81722-#endif
81723-
81724-static bool
81725-nstime_monotonic_impl(void) {
81726-	return NSTIME_MONOTONIC;
81727-#undef NSTIME_MONOTONIC
81728-}
81729-nstime_monotonic_t *JET_MUTABLE nstime_monotonic = nstime_monotonic_impl;
81730-
81731-prof_time_res_t opt_prof_time_res =
81732-	prof_time_res_default;
81733-
81734-const char *prof_time_res_mode_names[] = {
81735-	"default",
81736-	"high",
81737-};
81738-
81739-
81740-static void
81741-nstime_get_realtime(nstime_t *time) {
81742-#if defined(JEMALLOC_HAVE_CLOCK_REALTIME) && !defined(_WIN32)
81743-	struct timespec ts;
81744-
81745-	clock_gettime(CLOCK_REALTIME, &ts);
81746-	nstime_init2(time, ts.tv_sec, ts.tv_nsec);
81747-#else
81748-	unreachable();
81749-#endif
81750-}
81751-
81752-static void
81753-nstime_prof_update_impl(nstime_t *time) {
81754-	nstime_t old_time;
81755-
81756-	nstime_copy(&old_time, time);
81757-
81758-	if (opt_prof_time_res == prof_time_res_high) {
81759-		nstime_get_realtime(time);
81760-	} else {
81761-		nstime_get(time);
81762-	}
81763-}
81764-nstime_prof_update_t *JET_MUTABLE nstime_prof_update = nstime_prof_update_impl;
81765-
81766-static void
81767-nstime_update_impl(nstime_t *time) {
81768-	nstime_t old_time;
81769-
81770-	nstime_copy(&old_time, time);
81771-	nstime_get(time);
81772-
81773-	/* Handle non-monotonic clocks. */
81774-	if (unlikely(nstime_compare(&old_time, time) > 0)) {
81775-		nstime_copy(time, &old_time);
81776-	}
81777-}
81778-nstime_update_t *JET_MUTABLE nstime_update = nstime_update_impl;
81779-
81780-void
81781-nstime_init_update(nstime_t *time) {
81782-	nstime_init_zero(time);
81783-	nstime_update(time);
81784-}
81785-
81786-void
81787-nstime_prof_init_update(nstime_t *time) {
81788-	nstime_init_zero(time);
81789-	nstime_prof_update(time);
81790-}
81791-
81792-
81793diff --git a/jemalloc/src/pa.c b/jemalloc/src/pa.c
81794deleted file mode 100644
81795index eb7e462..0000000
81796--- a/jemalloc/src/pa.c
81797+++ /dev/null
81798@@ -1,277 +0,0 @@
81799-#include "jemalloc/internal/jemalloc_preamble.h"
81800-#include "jemalloc/internal/jemalloc_internal_includes.h"
81801-
81802-#include "jemalloc/internal/san.h"
81803-#include "jemalloc/internal/hpa.h"
81804-
81805-static void
81806-pa_nactive_add(pa_shard_t *shard, size_t add_pages) {
81807-	atomic_fetch_add_zu(&shard->nactive, add_pages, ATOMIC_RELAXED);
81808-}
81809-
81810-static void
81811-pa_nactive_sub(pa_shard_t *shard, size_t sub_pages) {
81812-	assert(atomic_load_zu(&shard->nactive, ATOMIC_RELAXED) >= sub_pages);
81813-	atomic_fetch_sub_zu(&shard->nactive, sub_pages, ATOMIC_RELAXED);
81814-}
81815-
81816-bool
81817-pa_central_init(pa_central_t *central, base_t *base, bool hpa,
81818-    hpa_hooks_t *hpa_hooks) {
81819-	bool err;
81820-	if (hpa) {
81821-		err = hpa_central_init(&central->hpa, base, hpa_hooks);
81822-		if (err) {
81823-			return true;
81824-		}
81825-	}
81826-	return false;
81827-}
81828-
81829-bool
81830-pa_shard_init(tsdn_t *tsdn, pa_shard_t *shard, pa_central_t *central,
81831-    emap_t *emap, base_t *base, unsigned ind, pa_shard_stats_t *stats,
81832-    malloc_mutex_t *stats_mtx, nstime_t *cur_time,
81833-    size_t pac_oversize_threshold, ssize_t dirty_decay_ms,
81834-    ssize_t muzzy_decay_ms) {
81835-	/* This will change eventually, but for now it should hold. */
81836-	assert(base_ind_get(base) == ind);
81837-	if (edata_cache_init(&shard->edata_cache, base)) {
81838-		return true;
81839-	}
81840-
81841-	if (pac_init(tsdn, &shard->pac, base, emap, &shard->edata_cache,
81842-	    cur_time, pac_oversize_threshold, dirty_decay_ms, muzzy_decay_ms,
81843-	    &stats->pac_stats, stats_mtx)) {
81844-		return true;
81845-	}
81846-
81847-	shard->ind = ind;
81848-
81849-	shard->ever_used_hpa = false;
81850-	atomic_store_b(&shard->use_hpa, false, ATOMIC_RELAXED);
81851-
81852-	atomic_store_zu(&shard->nactive, 0, ATOMIC_RELAXED);
81853-
81854-	shard->stats_mtx = stats_mtx;
81855-	shard->stats = stats;
81856-	memset(shard->stats, 0, sizeof(*shard->stats));
81857-
81858-	shard->central = central;
81859-	shard->emap = emap;
81860-	shard->base = base;
81861-
81862-	return false;
81863-}
81864-
81865-bool
81866-pa_shard_enable_hpa(tsdn_t *tsdn, pa_shard_t *shard,
81867-    const hpa_shard_opts_t *hpa_opts, const sec_opts_t *hpa_sec_opts) {
81868-	if (hpa_shard_init(&shard->hpa_shard, &shard->central->hpa, shard->emap,
81869-	    shard->base, &shard->edata_cache, shard->ind, hpa_opts)) {
81870-		return true;
81871-	}
81872-	if (sec_init(tsdn, &shard->hpa_sec, shard->base, &shard->hpa_shard.pai,
81873-	    hpa_sec_opts)) {
81874-		return true;
81875-	}
81876-	shard->ever_used_hpa = true;
81877-	atomic_store_b(&shard->use_hpa, true, ATOMIC_RELAXED);
81878-
81879-	return false;
81880-}
81881-
81882-void
81883-pa_shard_disable_hpa(tsdn_t *tsdn, pa_shard_t *shard) {
81884-	atomic_store_b(&shard->use_hpa, false, ATOMIC_RELAXED);
81885-	if (shard->ever_used_hpa) {
81886-		sec_disable(tsdn, &shard->hpa_sec);
81887-		hpa_shard_disable(tsdn, &shard->hpa_shard);
81888-	}
81889-}
81890-
81891-void
81892-pa_shard_reset(tsdn_t *tsdn, pa_shard_t *shard) {
81893-	atomic_store_zu(&shard->nactive, 0, ATOMIC_RELAXED);
81894-	if (shard->ever_used_hpa) {
81895-		sec_flush(tsdn, &shard->hpa_sec);
81896-	}
81897-}
81898-
81899-static bool
81900-pa_shard_uses_hpa(pa_shard_t *shard) {
81901-	return atomic_load_b(&shard->use_hpa, ATOMIC_RELAXED);
81902-}
81903-
81904-void
81905-pa_shard_destroy(tsdn_t *tsdn, pa_shard_t *shard) {
81906-	pac_destroy(tsdn, &shard->pac);
81907-	if (shard->ever_used_hpa) {
81908-		sec_flush(tsdn, &shard->hpa_sec);
81909-		hpa_shard_disable(tsdn, &shard->hpa_shard);
81910-	}
81911-}
81912-
81913-static pai_t *
81914-pa_get_pai(pa_shard_t *shard, edata_t *edata) {
81915-	return (edata_pai_get(edata) == EXTENT_PAI_PAC
81916-	    ? &shard->pac.pai : &shard->hpa_sec.pai);
81917-}
81918-
81919-edata_t *
81920-pa_alloc(tsdn_t *tsdn, pa_shard_t *shard, size_t size, size_t alignment,
81921-    bool slab, szind_t szind, bool zero, bool guarded,
81922-    bool *deferred_work_generated) {
81923-	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
81924-	    WITNESS_RANK_CORE, 0);
81925-	assert(!guarded || alignment <= PAGE);
81926-
81927-	edata_t *edata = NULL;
81928-	if (!guarded && pa_shard_uses_hpa(shard)) {
81929-		edata = pai_alloc(tsdn, &shard->hpa_sec.pai, size, alignment,
81930-		    zero, /* guarded */ false, slab, deferred_work_generated);
81931-	}
81932-	/*
81933-	 * Fall back to the PAC if the HPA is off or couldn't serve the given
81934-	 * allocation request.
81935-	 */
81936-	if (edata == NULL) {
81937-		edata = pai_alloc(tsdn, &shard->pac.pai, size, alignment, zero,
81938-		    guarded, slab, deferred_work_generated);
81939-	}
81940-	if (edata != NULL) {
81941-		assert(edata_size_get(edata) == size);
81942-		pa_nactive_add(shard, size >> LG_PAGE);
81943-		emap_remap(tsdn, shard->emap, edata, szind, slab);
81944-		edata_szind_set(edata, szind);
81945-		edata_slab_set(edata, slab);
81946-		if (slab && (size > 2 * PAGE)) {
81947-			emap_register_interior(tsdn, shard->emap, edata, szind);
81948-		}
81949-		assert(edata_arena_ind_get(edata) == shard->ind);
81950-	}
81951-	return edata;
81952-}
81953-
81954-bool
81955-pa_expand(tsdn_t *tsdn, pa_shard_t *shard, edata_t *edata, size_t old_size,
81956-    size_t new_size, szind_t szind, bool zero, bool *deferred_work_generated) {
81957-	assert(new_size > old_size);
81958-	assert(edata_size_get(edata) == old_size);
81959-	assert((new_size & PAGE_MASK) == 0);
81960-	if (edata_guarded_get(edata)) {
81961-		return true;
81962-	}
81963-	size_t expand_amount = new_size - old_size;
81964-
81965-	pai_t *pai = pa_get_pai(shard, edata);
81966-
81967-	bool error = pai_expand(tsdn, pai, edata, old_size, new_size, zero,
81968-	    deferred_work_generated);
81969-	if (error) {
81970-		return true;
81971-	}
81972-
81973-	pa_nactive_add(shard, expand_amount >> LG_PAGE);
81974-	edata_szind_set(edata, szind);
81975-	emap_remap(tsdn, shard->emap, edata, szind, /* slab */ false);
81976-	return false;
81977-}
81978-
81979-bool
81980-pa_shrink(tsdn_t *tsdn, pa_shard_t *shard, edata_t *edata, size_t old_size,
81981-    size_t new_size, szind_t szind, bool *deferred_work_generated) {
81982-	assert(new_size < old_size);
81983-	assert(edata_size_get(edata) == old_size);
81984-	assert((new_size & PAGE_MASK) == 0);
81985-	if (edata_guarded_get(edata)) {
81986-		return true;
81987-	}
81988-	size_t shrink_amount = old_size - new_size;
81989-
81990-	pai_t *pai = pa_get_pai(shard, edata);
81991-	bool error = pai_shrink(tsdn, pai, edata, old_size, new_size,
81992-	    deferred_work_generated);
81993-	if (error) {
81994-		return true;
81995-	}
81996-	pa_nactive_sub(shard, shrink_amount >> LG_PAGE);
81997-
81998-	edata_szind_set(edata, szind);
81999-	emap_remap(tsdn, shard->emap, edata, szind, /* slab */ false);
82000-	return false;
82001-}
82002-
82003-void
82004-pa_dalloc(tsdn_t *tsdn, pa_shard_t *shard, edata_t *edata,
82005-    bool *deferred_work_generated) {
82006-	emap_remap(tsdn, shard->emap, edata, SC_NSIZES, /* slab */ false);
82007-	if (edata_slab_get(edata)) {
82008-		emap_deregister_interior(tsdn, shard->emap, edata);
82009-		/*
82010-		 * The slab state of the extent isn't cleared.  It may be used
82011-		 * by the pai implementation, e.g. to make caching decisions.
82012-		 */
82013-	}
82014-	edata_addr_set(edata, edata_base_get(edata));
82015-	edata_szind_set(edata, SC_NSIZES);
82016-	pa_nactive_sub(shard, edata_size_get(edata) >> LG_PAGE);
82017-	pai_t *pai = pa_get_pai(shard, edata);
82018-	pai_dalloc(tsdn, pai, edata, deferred_work_generated);
82019-}
82020-
82021-bool
82022-pa_shard_retain_grow_limit_get_set(tsdn_t *tsdn, pa_shard_t *shard,
82023-    size_t *old_limit, size_t *new_limit) {
82024-	return pac_retain_grow_limit_get_set(tsdn, &shard->pac, old_limit,
82025-	    new_limit);
82026-}
82027-
82028-bool
82029-pa_decay_ms_set(tsdn_t *tsdn, pa_shard_t *shard, extent_state_t state,
82030-    ssize_t decay_ms, pac_purge_eagerness_t eagerness) {
82031-	return pac_decay_ms_set(tsdn, &shard->pac, state, decay_ms, eagerness);
82032-}
82033-
82034-ssize_t
82035-pa_decay_ms_get(pa_shard_t *shard, extent_state_t state) {
82036-	return pac_decay_ms_get(&shard->pac, state);
82037-}
82038-
82039-void
82040-pa_shard_set_deferral_allowed(tsdn_t *tsdn, pa_shard_t *shard,
82041-    bool deferral_allowed) {
82042-	if (pa_shard_uses_hpa(shard)) {
82043-		hpa_shard_set_deferral_allowed(tsdn, &shard->hpa_shard,
82044-		    deferral_allowed);
82045-	}
82046-}
82047-
82048-void
82049-pa_shard_do_deferred_work(tsdn_t *tsdn, pa_shard_t *shard) {
82050-	if (pa_shard_uses_hpa(shard)) {
82051-		hpa_shard_do_deferred_work(tsdn, &shard->hpa_shard);
82052-	}
82053-}
82054-
82055-/*
82056- * Get time until next deferred work ought to happen. If there are multiple
82057- * things that have been deferred, this function calculates the time until
82058- * the soonest of those things.
82059- */
82060-uint64_t
82061-pa_shard_time_until_deferred_work(tsdn_t *tsdn, pa_shard_t *shard) {
82062-	uint64_t time = pai_time_until_deferred_work(tsdn, &shard->pac.pai);
82063-	if (time == BACKGROUND_THREAD_DEFERRED_MIN) {
82064-		return time;
82065-	}
82066-
82067-	if (pa_shard_uses_hpa(shard)) {
82068-		uint64_t hpa =
82069-		    pai_time_until_deferred_work(tsdn, &shard->hpa_shard.pai);
82070-		if (hpa < time) {
82071-			time = hpa;
82072-		}
82073-	}
82074-	return time;
82075-}
82076diff --git a/jemalloc/src/pa_extra.c b/jemalloc/src/pa_extra.c
82077deleted file mode 100644
82078index 0f488be..0000000
82079--- a/jemalloc/src/pa_extra.c
82080+++ /dev/null
82081@@ -1,191 +0,0 @@
82082-#include "jemalloc/internal/jemalloc_preamble.h"
82083-#include "jemalloc/internal/jemalloc_internal_includes.h"
82084-
82085-/*
82086- * This file is logically part of the PA module.  While pa.c contains the core
82087- * allocator functionality, this file contains boring integration functionality;
82088- * things like the pre- and post- fork handlers, and stats merging for CTL
82089- * refreshes.
82090- */
82091-
82092-void
82093-pa_shard_prefork0(tsdn_t *tsdn, pa_shard_t *shard) {
82094-	malloc_mutex_prefork(tsdn, &shard->pac.decay_dirty.mtx);
82095-	malloc_mutex_prefork(tsdn, &shard->pac.decay_muzzy.mtx);
82096-}
82097-
82098-void
82099-pa_shard_prefork2(tsdn_t *tsdn, pa_shard_t *shard) {
82100-	if (shard->ever_used_hpa) {
82101-		sec_prefork2(tsdn, &shard->hpa_sec);
82102-	}
82103-}
82104-
82105-void
82106-pa_shard_prefork3(tsdn_t *tsdn, pa_shard_t *shard) {
82107-	malloc_mutex_prefork(tsdn, &shard->pac.grow_mtx);
82108-	if (shard->ever_used_hpa) {
82109-		hpa_shard_prefork3(tsdn, &shard->hpa_shard);
82110-	}
82111-}
82112-
82113-void
82114-pa_shard_prefork4(tsdn_t *tsdn, pa_shard_t *shard) {
82115-	ecache_prefork(tsdn, &shard->pac.ecache_dirty);
82116-	ecache_prefork(tsdn, &shard->pac.ecache_muzzy);
82117-	ecache_prefork(tsdn, &shard->pac.ecache_retained);
82118-	if (shard->ever_used_hpa) {
82119-		hpa_shard_prefork4(tsdn, &shard->hpa_shard);
82120-	}
82121-}
82122-
82123-void
82124-pa_shard_prefork5(tsdn_t *tsdn, pa_shard_t *shard) {
82125-	edata_cache_prefork(tsdn, &shard->edata_cache);
82126-}
82127-
82128-void
82129-pa_shard_postfork_parent(tsdn_t *tsdn, pa_shard_t *shard) {
82130-	edata_cache_postfork_parent(tsdn, &shard->edata_cache);
82131-	ecache_postfork_parent(tsdn, &shard->pac.ecache_dirty);
82132-	ecache_postfork_parent(tsdn, &shard->pac.ecache_muzzy);
82133-	ecache_postfork_parent(tsdn, &shard->pac.ecache_retained);
82134-	malloc_mutex_postfork_parent(tsdn, &shard->pac.grow_mtx);
82135-	malloc_mutex_postfork_parent(tsdn, &shard->pac.decay_dirty.mtx);
82136-	malloc_mutex_postfork_parent(tsdn, &shard->pac.decay_muzzy.mtx);
82137-	if (shard->ever_used_hpa) {
82138-		sec_postfork_parent(tsdn, &shard->hpa_sec);
82139-		hpa_shard_postfork_parent(tsdn, &shard->hpa_shard);
82140-	}
82141-}
82142-
82143-void
82144-pa_shard_postfork_child(tsdn_t *tsdn, pa_shard_t *shard) {
82145-	edata_cache_postfork_child(tsdn, &shard->edata_cache);
82146-	ecache_postfork_child(tsdn, &shard->pac.ecache_dirty);
82147-	ecache_postfork_child(tsdn, &shard->pac.ecache_muzzy);
82148-	ecache_postfork_child(tsdn, &shard->pac.ecache_retained);
82149-	malloc_mutex_postfork_child(tsdn, &shard->pac.grow_mtx);
82150-	malloc_mutex_postfork_child(tsdn, &shard->pac.decay_dirty.mtx);
82151-	malloc_mutex_postfork_child(tsdn, &shard->pac.decay_muzzy.mtx);
82152-	if (shard->ever_used_hpa) {
82153-		sec_postfork_child(tsdn, &shard->hpa_sec);
82154-		hpa_shard_postfork_child(tsdn, &shard->hpa_shard);
82155-	}
82156-}
82157-
82158-void
82159-pa_shard_basic_stats_merge(pa_shard_t *shard, size_t *nactive, size_t *ndirty,
82160-    size_t *nmuzzy) {
82161-	*nactive += atomic_load_zu(&shard->nactive, ATOMIC_RELAXED);
82162-	*ndirty += ecache_npages_get(&shard->pac.ecache_dirty);
82163-	*nmuzzy += ecache_npages_get(&shard->pac.ecache_muzzy);
82164-}
82165-
82166-void
82167-pa_shard_stats_merge(tsdn_t *tsdn, pa_shard_t *shard,
82168-    pa_shard_stats_t *pa_shard_stats_out, pac_estats_t *estats_out,
82169-    hpa_shard_stats_t *hpa_stats_out, sec_stats_t *sec_stats_out,
82170-    size_t *resident) {
82171-	cassert(config_stats);
82172-
82173-	pa_shard_stats_out->pac_stats.retained +=
82174-	    ecache_npages_get(&shard->pac.ecache_retained) << LG_PAGE;
82175-	pa_shard_stats_out->edata_avail += atomic_load_zu(
82176-	    &shard->edata_cache.count, ATOMIC_RELAXED);
82177-
82178-	size_t resident_pgs = 0;
82179-	resident_pgs += atomic_load_zu(&shard->nactive, ATOMIC_RELAXED);
82180-	resident_pgs += ecache_npages_get(&shard->pac.ecache_dirty);
82181-	*resident += (resident_pgs << LG_PAGE);
82182-
82183-	/* Dirty decay stats */
82184-	locked_inc_u64_unsynchronized(
82185-	    &pa_shard_stats_out->pac_stats.decay_dirty.npurge,
82186-	    locked_read_u64(tsdn, LOCKEDINT_MTX(*shard->stats_mtx),
82187-	    &shard->pac.stats->decay_dirty.npurge));
82188-	locked_inc_u64_unsynchronized(
82189-	    &pa_shard_stats_out->pac_stats.decay_dirty.nmadvise,
82190-	    locked_read_u64(tsdn, LOCKEDINT_MTX(*shard->stats_mtx),
82191-	    &shard->pac.stats->decay_dirty.nmadvise));
82192-	locked_inc_u64_unsynchronized(
82193-	    &pa_shard_stats_out->pac_stats.decay_dirty.purged,
82194-	    locked_read_u64(tsdn, LOCKEDINT_MTX(*shard->stats_mtx),
82195-	    &shard->pac.stats->decay_dirty.purged));
82196-
82197-	/* Muzzy decay stats */
82198-	locked_inc_u64_unsynchronized(
82199-	    &pa_shard_stats_out->pac_stats.decay_muzzy.npurge,
82200-	    locked_read_u64(tsdn, LOCKEDINT_MTX(*shard->stats_mtx),
82201-	    &shard->pac.stats->decay_muzzy.npurge));
82202-	locked_inc_u64_unsynchronized(
82203-	    &pa_shard_stats_out->pac_stats.decay_muzzy.nmadvise,
82204-	    locked_read_u64(tsdn, LOCKEDINT_MTX(*shard->stats_mtx),
82205-	    &shard->pac.stats->decay_muzzy.nmadvise));
82206-	locked_inc_u64_unsynchronized(
82207-	    &pa_shard_stats_out->pac_stats.decay_muzzy.purged,
82208-	    locked_read_u64(tsdn, LOCKEDINT_MTX(*shard->stats_mtx),
82209-	    &shard->pac.stats->decay_muzzy.purged));
82210-
82211-	atomic_load_add_store_zu(&pa_shard_stats_out->pac_stats.abandoned_vm,
82212-	    atomic_load_zu(&shard->pac.stats->abandoned_vm, ATOMIC_RELAXED));
82213-
82214-	for (pszind_t i = 0; i < SC_NPSIZES; i++) {
82215-		size_t dirty, muzzy, retained, dirty_bytes, muzzy_bytes,
82216-		    retained_bytes;
82217-		dirty = ecache_nextents_get(&shard->pac.ecache_dirty, i);
82218-		muzzy = ecache_nextents_get(&shard->pac.ecache_muzzy, i);
82219-		retained = ecache_nextents_get(&shard->pac.ecache_retained, i);
82220-		dirty_bytes = ecache_nbytes_get(&shard->pac.ecache_dirty, i);
82221-		muzzy_bytes = ecache_nbytes_get(&shard->pac.ecache_muzzy, i);
82222-		retained_bytes = ecache_nbytes_get(&shard->pac.ecache_retained,
82223-		    i);
82224-
82225-		estats_out[i].ndirty = dirty;
82226-		estats_out[i].nmuzzy = muzzy;
82227-		estats_out[i].nretained = retained;
82228-		estats_out[i].dirty_bytes = dirty_bytes;
82229-		estats_out[i].muzzy_bytes = muzzy_bytes;
82230-		estats_out[i].retained_bytes = retained_bytes;
82231-	}
82232-
82233-	if (shard->ever_used_hpa) {
82234-		hpa_shard_stats_merge(tsdn, &shard->hpa_shard, hpa_stats_out);
82235-		sec_stats_merge(tsdn, &shard->hpa_sec, sec_stats_out);
82236-	}
82237-}
82238-
82239-static void
82240-pa_shard_mtx_stats_read_single(tsdn_t *tsdn, mutex_prof_data_t *mutex_prof_data,
82241-    malloc_mutex_t *mtx, int ind) {
82242-	malloc_mutex_lock(tsdn, mtx);
82243-	malloc_mutex_prof_read(tsdn, &mutex_prof_data[ind], mtx);
82244-	malloc_mutex_unlock(tsdn, mtx);
82245-}
82246-
82247-void
82248-pa_shard_mtx_stats_read(tsdn_t *tsdn, pa_shard_t *shard,
82249-    mutex_prof_data_t mutex_prof_data[mutex_prof_num_arena_mutexes]) {
82250-	pa_shard_mtx_stats_read_single(tsdn, mutex_prof_data,
82251-	    &shard->edata_cache.mtx, arena_prof_mutex_extent_avail);
82252-	pa_shard_mtx_stats_read_single(tsdn, mutex_prof_data,
82253-	    &shard->pac.ecache_dirty.mtx, arena_prof_mutex_extents_dirty);
82254-	pa_shard_mtx_stats_read_single(tsdn, mutex_prof_data,
82255-	    &shard->pac.ecache_muzzy.mtx, arena_prof_mutex_extents_muzzy);
82256-	pa_shard_mtx_stats_read_single(tsdn, mutex_prof_data,
82257-	    &shard->pac.ecache_retained.mtx, arena_prof_mutex_extents_retained);
82258-	pa_shard_mtx_stats_read_single(tsdn, mutex_prof_data,
82259-	    &shard->pac.decay_dirty.mtx, arena_prof_mutex_decay_dirty);
82260-	pa_shard_mtx_stats_read_single(tsdn, mutex_prof_data,
82261-	    &shard->pac.decay_muzzy.mtx, arena_prof_mutex_decay_muzzy);
82262-
82263-	if (shard->ever_used_hpa) {
82264-		pa_shard_mtx_stats_read_single(tsdn, mutex_prof_data,
82265-		    &shard->hpa_shard.mtx, arena_prof_mutex_hpa_shard);
82266-		pa_shard_mtx_stats_read_single(tsdn, mutex_prof_data,
82267-		    &shard->hpa_shard.grow_mtx,
82268-		    arena_prof_mutex_hpa_shard_grow);
82269-		sec_mutex_stats_read(tsdn, &shard->hpa_sec,
82270-		    &mutex_prof_data[arena_prof_mutex_hpa_sec]);
82271-	}
82272-}
82273diff --git a/jemalloc/src/pac.c b/jemalloc/src/pac.c
82274deleted file mode 100644
82275index 53e3d82..0000000
82276--- a/jemalloc/src/pac.c
82277+++ /dev/null
82278@@ -1,587 +0,0 @@
82279-#include "jemalloc/internal/jemalloc_preamble.h"
82280-#include "jemalloc/internal/jemalloc_internal_includes.h"
82281-
82282-#include "jemalloc/internal/pac.h"
82283-#include "jemalloc/internal/san.h"
82284-
82285-static edata_t *pac_alloc_impl(tsdn_t *tsdn, pai_t *self, size_t size,
82286-    size_t alignment, bool zero, bool guarded, bool frequent_reuse,
82287-    bool *deferred_work_generated);
82288-static bool pac_expand_impl(tsdn_t *tsdn, pai_t *self, edata_t *edata,
82289-    size_t old_size, size_t new_size, bool zero, bool *deferred_work_generated);
82290-static bool pac_shrink_impl(tsdn_t *tsdn, pai_t *self, edata_t *edata,
82291-    size_t old_size, size_t new_size, bool *deferred_work_generated);
82292-static void pac_dalloc_impl(tsdn_t *tsdn, pai_t *self, edata_t *edata,
82293-    bool *deferred_work_generated);
82294-static uint64_t pac_time_until_deferred_work(tsdn_t *tsdn, pai_t *self);
82295-
82296-static inline void
82297-pac_decay_data_get(pac_t *pac, extent_state_t state,
82298-    decay_t **r_decay, pac_decay_stats_t **r_decay_stats, ecache_t **r_ecache) {
82299-	switch(state) {
82300-	case extent_state_dirty:
82301-		*r_decay = &pac->decay_dirty;
82302-		*r_decay_stats = &pac->stats->decay_dirty;
82303-		*r_ecache = &pac->ecache_dirty;
82304-		return;
82305-	case extent_state_muzzy:
82306-		*r_decay = &pac->decay_muzzy;
82307-		*r_decay_stats = &pac->stats->decay_muzzy;
82308-		*r_ecache = &pac->ecache_muzzy;
82309-		return;
82310-	default:
82311-		unreachable();
82312-	}
82313-}
82314-
82315-bool
82316-pac_init(tsdn_t *tsdn, pac_t *pac, base_t *base, emap_t *emap,
82317-    edata_cache_t *edata_cache, nstime_t *cur_time,
82318-    size_t pac_oversize_threshold, ssize_t dirty_decay_ms,
82319-    ssize_t muzzy_decay_ms, pac_stats_t *pac_stats, malloc_mutex_t *stats_mtx) {
82320-	unsigned ind = base_ind_get(base);
82321-	/*
82322-	 * Delay coalescing for dirty extents despite the disruptive effect on
82323-	 * memory layout for best-fit extent allocation, since cached extents
82324-	 * are likely to be reused soon after deallocation, and the cost of
82325-	 * merging/splitting extents is non-trivial.
82326-	 */
82327-	if (ecache_init(tsdn, &pac->ecache_dirty, extent_state_dirty, ind,
82328-	    /* delay_coalesce */ true)) {
82329-		return true;
82330-	}
82331-	/*
82332-	 * Coalesce muzzy extents immediately, because operations on them are in
82333-	 * the critical path much less often than for dirty extents.
82334-	 */
82335-	if (ecache_init(tsdn, &pac->ecache_muzzy, extent_state_muzzy, ind,
82336-	    /* delay_coalesce */ false)) {
82337-		return true;
82338-	}
82339-	/*
82340-	 * Coalesce retained extents immediately, in part because they will
82341-	 * never be evicted (and therefore there's no opportunity for delayed
82342-	 * coalescing), but also because operations on retained extents are not
82343-	 * in the critical path.
82344-	 */
82345-	if (ecache_init(tsdn, &pac->ecache_retained, extent_state_retained,
82346-	    ind, /* delay_coalesce */ false)) {
82347-		return true;
82348-	}
82349-	exp_grow_init(&pac->exp_grow);
82350-	if (malloc_mutex_init(&pac->grow_mtx, "extent_grow",
82351-	    WITNESS_RANK_EXTENT_GROW, malloc_mutex_rank_exclusive)) {
82352-		return true;
82353-	}
82354-	atomic_store_zu(&pac->oversize_threshold, pac_oversize_threshold,
82355-	    ATOMIC_RELAXED);
82356-	if (decay_init(&pac->decay_dirty, cur_time, dirty_decay_ms)) {
82357-		return true;
82358-	}
82359-	if (decay_init(&pac->decay_muzzy, cur_time, muzzy_decay_ms)) {
82360-		return true;
82361-	}
82362-	if (san_bump_alloc_init(&pac->sba)) {
82363-		return true;
82364-	}
82365-
82366-	pac->base = base;
82367-	pac->emap = emap;
82368-	pac->edata_cache = edata_cache;
82369-	pac->stats = pac_stats;
82370-	pac->stats_mtx = stats_mtx;
82371-	atomic_store_zu(&pac->extent_sn_next, 0, ATOMIC_RELAXED);
82372-
82373-	pac->pai.alloc = &pac_alloc_impl;
82374-	pac->pai.alloc_batch = &pai_alloc_batch_default;
82375-	pac->pai.expand = &pac_expand_impl;
82376-	pac->pai.shrink = &pac_shrink_impl;
82377-	pac->pai.dalloc = &pac_dalloc_impl;
82378-	pac->pai.dalloc_batch = &pai_dalloc_batch_default;
82379-	pac->pai.time_until_deferred_work = &pac_time_until_deferred_work;
82380-
82381-	return false;
82382-}
82383-
82384-static inline bool
82385-pac_may_have_muzzy(pac_t *pac) {
82386-	return pac_decay_ms_get(pac, extent_state_muzzy) != 0;
82387-}
82388-
82389-static edata_t *
82390-pac_alloc_real(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, size_t size,
82391-    size_t alignment, bool zero, bool guarded) {
82392-	assert(!guarded || alignment <= PAGE);
82393-
82394-	edata_t *edata = ecache_alloc(tsdn, pac, ehooks, &pac->ecache_dirty,
82395-	    NULL, size, alignment, zero, guarded);
82396-
82397-	if (edata == NULL && pac_may_have_muzzy(pac)) {
82398-		edata = ecache_alloc(tsdn, pac, ehooks, &pac->ecache_muzzy,
82399-		    NULL, size, alignment, zero, guarded);
82400-	}
82401-	if (edata == NULL) {
82402-		edata = ecache_alloc_grow(tsdn, pac, ehooks,
82403-		    &pac->ecache_retained, NULL, size, alignment, zero,
82404-		    guarded);
82405-		if (config_stats && edata != NULL) {
82406-			atomic_fetch_add_zu(&pac->stats->pac_mapped, size,
82407-			    ATOMIC_RELAXED);
82408-		}
82409-	}
82410-
82411-	return edata;
82412-}
82413-
82414-static edata_t *
82415-pac_alloc_new_guarded(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, size_t size,
82416-    size_t alignment, bool zero, bool frequent_reuse) {
82417-	assert(alignment <= PAGE);
82418-
82419-	edata_t *edata;
82420-	if (san_bump_enabled() && frequent_reuse) {
82421-		edata = san_bump_alloc(tsdn, &pac->sba, pac, ehooks, size,
82422-		    zero);
82423-	} else {
82424-		size_t size_with_guards = san_two_side_guarded_sz(size);
82425-		/* Alloc a non-guarded extent first.*/
82426-		edata = pac_alloc_real(tsdn, pac, ehooks, size_with_guards,
82427-		    /* alignment */ PAGE, zero, /* guarded */ false);
82428-		if (edata != NULL) {
82429-			/* Add guards around it. */
82430-			assert(edata_size_get(edata) == size_with_guards);
82431-			san_guard_pages_two_sided(tsdn, ehooks, edata,
82432-			    pac->emap, true);
82433-		}
82434-	}
82435-	assert(edata == NULL || (edata_guarded_get(edata) &&
82436-	    edata_size_get(edata) == size));
82437-
82438-	return edata;
82439-}
82440-
82441-static edata_t *
82442-pac_alloc_impl(tsdn_t *tsdn, pai_t *self, size_t size, size_t alignment,
82443-    bool zero, bool guarded, bool frequent_reuse,
82444-    bool *deferred_work_generated) {
82445-	pac_t *pac = (pac_t *)self;
82446-	ehooks_t *ehooks = pac_ehooks_get(pac);
82447-
82448-	edata_t *edata = NULL;
82449-	/*
82450-	 * The condition is an optimization - not frequently reused guarded
82451-	 * allocations are never put in the ecache.  pac_alloc_real also
82452-	 * doesn't grow retained for guarded allocations.  So pac_alloc_real
82453-	 * for such allocations would always return NULL.
82454-	 * */
82455-	if (!guarded || frequent_reuse) {
82456-		edata =	pac_alloc_real(tsdn, pac, ehooks, size, alignment,
82457-		    zero, guarded);
82458-	}
82459-	if (edata == NULL && guarded) {
82460-		/* No cached guarded extents; creating a new one. */
82461-		edata = pac_alloc_new_guarded(tsdn, pac, ehooks, size,
82462-		    alignment, zero, frequent_reuse);
82463-	}
82464-
82465-	return edata;
82466-}
82467-
82468-static bool
82469-pac_expand_impl(tsdn_t *tsdn, pai_t *self, edata_t *edata, size_t old_size,
82470-    size_t new_size, bool zero, bool *deferred_work_generated) {
82471-	pac_t *pac = (pac_t *)self;
82472-	ehooks_t *ehooks = pac_ehooks_get(pac);
82473-
82474-	size_t mapped_add = 0;
82475-	size_t expand_amount = new_size - old_size;
82476-
82477-	if (ehooks_merge_will_fail(ehooks)) {
82478-		return true;
82479-	}
82480-	edata_t *trail = ecache_alloc(tsdn, pac, ehooks, &pac->ecache_dirty,
82481-	    edata, expand_amount, PAGE, zero, /* guarded*/ false);
82482-	if (trail == NULL) {
82483-		trail = ecache_alloc(tsdn, pac, ehooks, &pac->ecache_muzzy,
82484-		    edata, expand_amount, PAGE, zero, /* guarded*/ false);
82485-	}
82486-	if (trail == NULL) {
82487-		trail = ecache_alloc_grow(tsdn, pac, ehooks,
82488-		    &pac->ecache_retained, edata, expand_amount, PAGE, zero,
82489-		    /* guarded */ false);
82490-		mapped_add = expand_amount;
82491-	}
82492-	if (trail == NULL) {
82493-		return true;
82494-	}
82495-	if (extent_merge_wrapper(tsdn, pac, ehooks, edata, trail)) {
82496-		extent_dalloc_wrapper(tsdn, pac, ehooks, trail);
82497-		return true;
82498-	}
82499-	if (config_stats && mapped_add > 0) {
82500-		atomic_fetch_add_zu(&pac->stats->pac_mapped, mapped_add,
82501-		    ATOMIC_RELAXED);
82502-	}
82503-	return false;
82504-}
82505-
82506-static bool
82507-pac_shrink_impl(tsdn_t *tsdn, pai_t *self, edata_t *edata, size_t old_size,
82508-    size_t new_size, bool *deferred_work_generated) {
82509-	pac_t *pac = (pac_t *)self;
82510-	ehooks_t *ehooks = pac_ehooks_get(pac);
82511-
82512-	size_t shrink_amount = old_size - new_size;
82513-
82514-	if (ehooks_split_will_fail(ehooks)) {
82515-		return true;
82516-	}
82517-
82518-	edata_t *trail = extent_split_wrapper(tsdn, pac, ehooks, edata,
82519-	    new_size, shrink_amount, /* holding_core_locks */ false);
82520-	if (trail == NULL) {
82521-		return true;
82522-	}
82523-	ecache_dalloc(tsdn, pac, ehooks, &pac->ecache_dirty, trail);
82524-	*deferred_work_generated = true;
82525-	return false;
82526-}
82527-
82528-static void
82529-pac_dalloc_impl(tsdn_t *tsdn, pai_t *self, edata_t *edata,
82530-    bool *deferred_work_generated) {
82531-	pac_t *pac = (pac_t *)self;
82532-	ehooks_t *ehooks = pac_ehooks_get(pac);
82533-
82534-	if (edata_guarded_get(edata)) {
82535-		/*
82536-		 * Because cached guarded extents do exact fit only, large
82537-		 * guarded extents are restored on dalloc eagerly (otherwise
82538-		 * they will not be reused efficiently).  Slab sizes have a
82539-		 * limited number of size classes, and tend to cycle faster.
82540-		 *
82541-		 * In the case where coalesce is restrained (VirtualFree on
82542-		 * Windows), guarded extents are also not cached -- otherwise
82543-		 * during arena destroy / reset, the retained extents would not
82544-		 * be whole regions (i.e. they are split between regular and
82545-		 * guarded).
82546-		 */
82547-		if (!edata_slab_get(edata) || !maps_coalesce) {
82548-			assert(edata_size_get(edata) >= SC_LARGE_MINCLASS ||
82549-			    !maps_coalesce);
82550-			san_unguard_pages_two_sided(tsdn, ehooks, edata,
82551-			    pac->emap);
82552-		}
82553-	}
82554-
82555-	ecache_dalloc(tsdn, pac, ehooks, &pac->ecache_dirty, edata);
82556-	/* Purging of deallocated pages is deferred */
82557-	*deferred_work_generated = true;
82558-}
82559-
82560-static inline uint64_t
82561-pac_ns_until_purge(tsdn_t *tsdn, decay_t *decay, size_t npages) {
82562-	if (malloc_mutex_trylock(tsdn, &decay->mtx)) {
82563-		/* Use minimal interval if decay is contended. */
82564-		return BACKGROUND_THREAD_DEFERRED_MIN;
82565-	}
82566-	uint64_t result = decay_ns_until_purge(decay, npages,
82567-	    ARENA_DEFERRED_PURGE_NPAGES_THRESHOLD);
82568-
82569-	malloc_mutex_unlock(tsdn, &decay->mtx);
82570-	return result;
82571-}
82572-
82573-static uint64_t
82574-pac_time_until_deferred_work(tsdn_t *tsdn, pai_t *self) {
82575-	uint64_t time;
82576-	pac_t *pac = (pac_t *)self;
82577-
82578-	time = pac_ns_until_purge(tsdn,
82579-	    &pac->decay_dirty,
82580-	    ecache_npages_get(&pac->ecache_dirty));
82581-	if (time == BACKGROUND_THREAD_DEFERRED_MIN) {
82582-		return time;
82583-	}
82584-
82585-	uint64_t muzzy = pac_ns_until_purge(tsdn,
82586-	    &pac->decay_muzzy,
82587-	    ecache_npages_get(&pac->ecache_muzzy));
82588-	if (muzzy < time) {
82589-		time = muzzy;
82590-	}
82591-	return time;
82592-}
82593-
82594-bool
82595-pac_retain_grow_limit_get_set(tsdn_t *tsdn, pac_t *pac, size_t *old_limit,
82596-    size_t *new_limit) {
82597-	pszind_t new_ind JEMALLOC_CC_SILENCE_INIT(0);
82598-	if (new_limit != NULL) {
82599-		size_t limit = *new_limit;
82600-		/* Grow no more than the new limit. */
82601-		if ((new_ind = sz_psz2ind(limit + 1) - 1) >= SC_NPSIZES) {
82602-			return true;
82603-		}
82604-	}
82605-
82606-	malloc_mutex_lock(tsdn, &pac->grow_mtx);
82607-	if (old_limit != NULL) {
82608-		*old_limit = sz_pind2sz(pac->exp_grow.limit);
82609-	}
82610-	if (new_limit != NULL) {
82611-		pac->exp_grow.limit = new_ind;
82612-	}
82613-	malloc_mutex_unlock(tsdn, &pac->grow_mtx);
82614-
82615-	return false;
82616-}
82617-
82618-static size_t
82619-pac_stash_decayed(tsdn_t *tsdn, pac_t *pac, ecache_t *ecache,
82620-    size_t npages_limit, size_t npages_decay_max,
82621-    edata_list_inactive_t *result) {
82622-	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
82623-	    WITNESS_RANK_CORE, 0);
82624-	ehooks_t *ehooks = pac_ehooks_get(pac);
82625-
82626-	/* Stash extents according to npages_limit. */
82627-	size_t nstashed = 0;
82628-	while (nstashed < npages_decay_max) {
82629-		edata_t *edata = ecache_evict(tsdn, pac, ehooks, ecache,
82630-		    npages_limit);
82631-		if (edata == NULL) {
82632-			break;
82633-		}
82634-		edata_list_inactive_append(result, edata);
82635-		nstashed += edata_size_get(edata) >> LG_PAGE;
82636-	}
82637-	return nstashed;
82638-}
82639-
82640-static size_t
82641-pac_decay_stashed(tsdn_t *tsdn, pac_t *pac, decay_t *decay,
82642-    pac_decay_stats_t *decay_stats, ecache_t *ecache, bool fully_decay,
82643-    edata_list_inactive_t *decay_extents) {
82644-	bool err;
82645-
82646-	size_t nmadvise = 0;
82647-	size_t nunmapped = 0;
82648-	size_t npurged = 0;
82649-
82650-	ehooks_t *ehooks = pac_ehooks_get(pac);
82651-
82652-	bool try_muzzy = !fully_decay
82653-	    && pac_decay_ms_get(pac, extent_state_muzzy) != 0;
82654-
82655-	for (edata_t *edata = edata_list_inactive_first(decay_extents); edata !=
82656-	    NULL; edata = edata_list_inactive_first(decay_extents)) {
82657-		edata_list_inactive_remove(decay_extents, edata);
82658-
82659-		size_t size = edata_size_get(edata);
82660-		size_t npages = size >> LG_PAGE;
82661-
82662-		nmadvise++;
82663-		npurged += npages;
82664-
82665-		switch (ecache->state) {
82666-		case extent_state_active:
82667-			not_reached();
82668-		case extent_state_dirty:
82669-			if (try_muzzy) {
82670-				err = extent_purge_lazy_wrapper(tsdn, ehooks,
82671-				    edata, /* offset */ 0, size);
82672-				if (!err) {
82673-					ecache_dalloc(tsdn, pac, ehooks,
82674-					    &pac->ecache_muzzy, edata);
82675-					break;
82676-				}
82677-			}
82678-			JEMALLOC_FALLTHROUGH;
82679-		case extent_state_muzzy:
82680-			extent_dalloc_wrapper(tsdn, pac, ehooks, edata);
82681-			nunmapped += npages;
82682-			break;
82683-		case extent_state_retained:
82684-		default:
82685-			not_reached();
82686-		}
82687-	}
82688-
82689-	if (config_stats) {
82690-		LOCKEDINT_MTX_LOCK(tsdn, *pac->stats_mtx);
82691-		locked_inc_u64(tsdn, LOCKEDINT_MTX(*pac->stats_mtx),
82692-		    &decay_stats->npurge, 1);
82693-		locked_inc_u64(tsdn, LOCKEDINT_MTX(*pac->stats_mtx),
82694-		    &decay_stats->nmadvise, nmadvise);
82695-		locked_inc_u64(tsdn, LOCKEDINT_MTX(*pac->stats_mtx),
82696-		    &decay_stats->purged, npurged);
82697-		LOCKEDINT_MTX_UNLOCK(tsdn, *pac->stats_mtx);
82698-		atomic_fetch_sub_zu(&pac->stats->pac_mapped,
82699-		    nunmapped << LG_PAGE, ATOMIC_RELAXED);
82700-	}
82701-
82702-	return npurged;
82703-}
82704-
82705-/*
82706- * npages_limit: Decay at most npages_decay_max pages without violating the
82707- * invariant: (ecache_npages_get(ecache) >= npages_limit).  We need an upper
82708- * bound on number of pages in order to prevent unbounded growth (namely in
82709- * stashed), otherwise unbounded new pages could be added to extents during the
82710- * current decay run, so that the purging thread never finishes.
82711- */
82712-static void
82713-pac_decay_to_limit(tsdn_t *tsdn, pac_t *pac, decay_t *decay,
82714-    pac_decay_stats_t *decay_stats, ecache_t *ecache, bool fully_decay,
82715-    size_t npages_limit, size_t npages_decay_max) {
82716-	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
82717-	    WITNESS_RANK_CORE, 1);
82718-
82719-	if (decay->purging || npages_decay_max == 0) {
82720-		return;
82721-	}
82722-	decay->purging = true;
82723-	malloc_mutex_unlock(tsdn, &decay->mtx);
82724-
82725-	edata_list_inactive_t decay_extents;
82726-	edata_list_inactive_init(&decay_extents);
82727-	size_t npurge = pac_stash_decayed(tsdn, pac, ecache, npages_limit,
82728-	    npages_decay_max, &decay_extents);
82729-	if (npurge != 0) {
82730-		size_t npurged = pac_decay_stashed(tsdn, pac, decay,
82731-		    decay_stats, ecache, fully_decay, &decay_extents);
82732-		assert(npurged == npurge);
82733-	}
82734-
82735-	malloc_mutex_lock(tsdn, &decay->mtx);
82736-	decay->purging = false;
82737-}
82738-
82739-void
82740-pac_decay_all(tsdn_t *tsdn, pac_t *pac, decay_t *decay,
82741-    pac_decay_stats_t *decay_stats, ecache_t *ecache, bool fully_decay) {
82742-	malloc_mutex_assert_owner(tsdn, &decay->mtx);
82743-	pac_decay_to_limit(tsdn, pac, decay, decay_stats, ecache, fully_decay,
82744-	    /* npages_limit */ 0, ecache_npages_get(ecache));
82745-}
82746-
82747-static void
82748-pac_decay_try_purge(tsdn_t *tsdn, pac_t *pac, decay_t *decay,
82749-    pac_decay_stats_t *decay_stats, ecache_t *ecache,
82750-    size_t current_npages, size_t npages_limit) {
82751-	if (current_npages > npages_limit) {
82752-		pac_decay_to_limit(tsdn, pac, decay, decay_stats, ecache,
82753-		    /* fully_decay */ false, npages_limit,
82754-		    current_npages - npages_limit);
82755-	}
82756-}
82757-
82758-bool
82759-pac_maybe_decay_purge(tsdn_t *tsdn, pac_t *pac, decay_t *decay,
82760-    pac_decay_stats_t *decay_stats, ecache_t *ecache,
82761-    pac_purge_eagerness_t eagerness) {
82762-	malloc_mutex_assert_owner(tsdn, &decay->mtx);
82763-
82764-	/* Purge all or nothing if the option is disabled. */
82765-	ssize_t decay_ms = decay_ms_read(decay);
82766-	if (decay_ms <= 0) {
82767-		if (decay_ms == 0) {
82768-			pac_decay_to_limit(tsdn, pac, decay, decay_stats,
82769-			    ecache, /* fully_decay */ false,
82770-			    /* npages_limit */ 0, ecache_npages_get(ecache));
82771-		}
82772-		return false;
82773-	}
82774-
82775-	/*
82776-	 * If the deadline has been reached, advance to the current epoch and
82777-	 * purge to the new limit if necessary.  Note that dirty pages created
82778-	 * during the current epoch are not subject to purge until a future
82779-	 * epoch, so as a result purging only happens during epoch advances, or
82780-	 * being triggered by background threads (scheduled event).
82781-	 */
82782-	nstime_t time;
82783-	nstime_init_update(&time);
82784-	size_t npages_current = ecache_npages_get(ecache);
82785-	bool epoch_advanced = decay_maybe_advance_epoch(decay, &time,
82786-	    npages_current);
82787-	if (eagerness == PAC_PURGE_ALWAYS
82788-	    || (epoch_advanced && eagerness == PAC_PURGE_ON_EPOCH_ADVANCE)) {
82789-		size_t npages_limit = decay_npages_limit_get(decay);
82790-		pac_decay_try_purge(tsdn, pac, decay, decay_stats, ecache,
82791-		    npages_current, npages_limit);
82792-	}
82793-
82794-	return epoch_advanced;
82795-}
82796-
82797-bool
82798-pac_decay_ms_set(tsdn_t *tsdn, pac_t *pac, extent_state_t state,
82799-    ssize_t decay_ms, pac_purge_eagerness_t eagerness) {
82800-	decay_t *decay;
82801-	pac_decay_stats_t *decay_stats;
82802-	ecache_t *ecache;
82803-	pac_decay_data_get(pac, state, &decay, &decay_stats, &ecache);
82804-
82805-	if (!decay_ms_valid(decay_ms)) {
82806-		return true;
82807-	}
82808-
82809-	malloc_mutex_lock(tsdn, &decay->mtx);
82810-	/*
82811-	 * Restart decay backlog from scratch, which may cause many dirty pages
82812-	 * to be immediately purged.  It would conceptually be possible to map
82813-	 * the old backlog onto the new backlog, but there is no justification
82814-	 * for such complexity since decay_ms changes are intended to be
82815-	 * infrequent, either between the {-1, 0, >0} states, or a one-time
82816-	 * arbitrary change during initial arena configuration.
82817-	 */
82818-	nstime_t cur_time;
82819-	nstime_init_update(&cur_time);
82820-	decay_reinit(decay, &cur_time, decay_ms);
82821-	pac_maybe_decay_purge(tsdn, pac, decay, decay_stats, ecache, eagerness);
82822-	malloc_mutex_unlock(tsdn, &decay->mtx);
82823-
82824-	return false;
82825-}
82826-
82827-ssize_t
82828-pac_decay_ms_get(pac_t *pac, extent_state_t state) {
82829-	decay_t *decay;
82830-	pac_decay_stats_t *decay_stats;
82831-	ecache_t *ecache;
82832-	pac_decay_data_get(pac, state, &decay, &decay_stats, &ecache);
82833-	return decay_ms_read(decay);
82834-}
82835-
82836-void
82837-pac_reset(tsdn_t *tsdn, pac_t *pac) {
82838-	/*
82839-	 * No-op for now; purging is still done at the arena-level.  It should
82840-	 * get moved in here, though.
82841-	 */
82842-	(void)tsdn;
82843-	(void)pac;
82844-}
82845-
82846-void
82847-pac_destroy(tsdn_t *tsdn, pac_t *pac) {
82848-	assert(ecache_npages_get(&pac->ecache_dirty) == 0);
82849-	assert(ecache_npages_get(&pac->ecache_muzzy) == 0);
82850-	/*
82851-	 * Iterate over the retained extents and destroy them.  This gives the
82852-	 * extent allocator underlying the extent hooks an opportunity to unmap
82853-	 * all retained memory without having to keep its own metadata
82854-	 * structures.  In practice, virtual memory for dss-allocated extents is
82855-	 * leaked here, so best practice is to avoid dss for arenas to be
82856-	 * destroyed, or provide custom extent hooks that track retained
82857-	 * dss-based extents for later reuse.
82858-	 */
82859-	ehooks_t *ehooks = pac_ehooks_get(pac);
82860-	edata_t *edata;
82861-	while ((edata = ecache_evict(tsdn, pac, ehooks,
82862-	    &pac->ecache_retained, 0)) != NULL) {
82863-		extent_destroy_wrapper(tsdn, pac, ehooks, edata);
82864-	}
82865-}
82866diff --git a/jemalloc/src/pages.c b/jemalloc/src/pages.c
82867deleted file mode 100644
82868index 8c83a7d..0000000
82869--- a/jemalloc/src/pages.c
82870+++ /dev/null
82871@@ -1,824 +0,0 @@
82872-#include "jemalloc/internal/jemalloc_preamble.h"
82873-
82874-#include "jemalloc/internal/pages.h"
82875-
82876-#include "jemalloc/internal/jemalloc_internal_includes.h"
82877-
82878-#include "jemalloc/internal/assert.h"
82879-#include "jemalloc/internal/malloc_io.h"
82880-
82881-#ifdef JEMALLOC_SYSCTL_VM_OVERCOMMIT
82882-#include <sys/sysctl.h>
82883-#ifdef __FreeBSD__
82884-#include <vm/vm_param.h>
82885-#endif
82886-#endif
82887-#ifdef __NetBSD__
82888-#include <sys/bitops.h>	/* ilog2 */
82889-#endif
82890-#ifdef JEMALLOC_HAVE_VM_MAKE_TAG
82891-#define PAGES_FD_TAG VM_MAKE_TAG(101U)
82892-#else
82893-#define PAGES_FD_TAG -1
82894-#endif
82895-
82896-/******************************************************************************/
82897-/* Data. */
82898-
82899-/* Actual operating system page size, detected during bootstrap, <= PAGE. */
82900-static size_t	os_page;
82901-
82902-#ifndef _WIN32
82903-#  define PAGES_PROT_COMMIT (PROT_READ | PROT_WRITE)
82904-#  define PAGES_PROT_DECOMMIT (PROT_NONE)
82905-static int	mmap_flags;
82906-#endif
82907-static bool	os_overcommits;
82908-
82909-const char *thp_mode_names[] = {
82910-	"default",
82911-	"always",
82912-	"never",
82913-	"not supported"
82914-};
82915-thp_mode_t opt_thp = THP_MODE_DEFAULT;
82916-thp_mode_t init_system_thp_mode;
82917-
82918-/* Runtime support for lazy purge. Irrelevant when !pages_can_purge_lazy. */
82919-static bool pages_can_purge_lazy_runtime = true;
82920-
82921-#ifdef JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS
82922-static int madvise_dont_need_zeros_is_faulty = -1;
82923-/**
82924- * Check that MADV_DONTNEED will actually zero pages on subsequent access.
82925- *
82926- * Since qemu does not support this, yet [1], and you can get very tricky
82927- * assert if you will run program with jemalloc in use under qemu:
82928- *
82929- *     <jemalloc>: ../contrib/jemalloc/src/extent.c:1195: Failed assertion: "p[i] == 0"
82930- *
82931- *   [1]: https://patchwork.kernel.org/patch/10576637/
82932- */
82933-static int madvise_MADV_DONTNEED_zeroes_pages()
82934-{
82935-	int works = -1;
82936-	size_t size = PAGE;
82937-
82938-	void * addr = mmap(NULL, size, PROT_READ|PROT_WRITE,
82939-	    MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
82940-
82941-	if (addr == MAP_FAILED) {
82942-		malloc_write("<jemalloc>: Cannot allocate memory for "
82943-		    "MADV_DONTNEED check\n");
82944-		if (opt_abort) {
82945-			abort();
82946-		}
82947-	}
82948-
82949-	memset(addr, 'A', size);
82950-	if (madvise(addr, size, MADV_DONTNEED) == 0) {
82951-		works = memchr(addr, 'A', size) == NULL;
82952-	} else {
82953-		/*
82954-		 * If madvise() does not support MADV_DONTNEED, then we can
82955-		 * call it anyway, and use it's return code.
82956-		 */
82957-		works = 1;
82958-	}
82959-
82960-	if (munmap(addr, size) != 0) {
82961-		malloc_write("<jemalloc>: Cannot deallocate memory for "
82962-		    "MADV_DONTNEED check\n");
82963-		if (opt_abort) {
82964-			abort();
82965-		}
82966-	}
82967-
82968-	return works;
82969-}
82970-#endif
82971-
82972-/******************************************************************************/
82973-/*
82974- * Function prototypes for static functions that are referenced prior to
82975- * definition.
82976- */
82977-
82978-static void os_pages_unmap(void *addr, size_t size);
82979-
82980-/******************************************************************************/
82981-
82982-static void *
82983-os_pages_map(void *addr, size_t size, size_t alignment, bool *commit) {
82984-	assert(ALIGNMENT_ADDR2BASE(addr, os_page) == addr);
82985-	assert(ALIGNMENT_CEILING(size, os_page) == size);
82986-	assert(size != 0);
82987-
82988-	if (os_overcommits) {
82989-		*commit = true;
82990-	}
82991-
82992-	void *ret;
82993-#ifdef _WIN32
82994-	/*
82995-	 * If VirtualAlloc can't allocate at the given address when one is
82996-	 * given, it fails and returns NULL.
82997-	 */
82998-	ret = VirtualAlloc(addr, size, MEM_RESERVE | (*commit ? MEM_COMMIT : 0),
82999-	    PAGE_READWRITE);
83000-#else
83001-	/*
83002-	 * We don't use MAP_FIXED here, because it can cause the *replacement*
83003-	 * of existing mappings, and we only want to create new mappings.
83004-	 */
83005-	{
83006-#ifdef __NetBSD__
83007-		/*
83008-		 * On NetBSD PAGE for a platform is defined to the
83009-		 * maximum page size of all machine architectures
83010-		 * for that platform, so that we can use the same
83011-		 * binaries across all machine architectures.
83012-		 */
83013-		if (alignment > os_page || PAGE > os_page) {
83014-			unsigned int a = ilog2(MAX(alignment, PAGE));
83015-			mmap_flags |= MAP_ALIGNED(a);
83016-		}
83017-#endif
83018-		int prot = *commit ? PAGES_PROT_COMMIT : PAGES_PROT_DECOMMIT;
83019-
83020-		ret = mmap(addr, size, prot, mmap_flags, PAGES_FD_TAG, 0);
83021-	}
83022-	assert(ret != NULL);
83023-
83024-	if (ret == MAP_FAILED) {
83025-		ret = NULL;
83026-	} else if (addr != NULL && ret != addr) {
83027-		/*
83028-		 * We succeeded in mapping memory, but not in the right place.
83029-		 */
83030-		os_pages_unmap(ret, size);
83031-		ret = NULL;
83032-	}
83033-#endif
83034-	assert(ret == NULL || (addr == NULL && ret != addr) || (addr != NULL &&
83035-	    ret == addr));
83036-	return ret;
83037-}
83038-
83039-static void *
83040-os_pages_trim(void *addr, size_t alloc_size, size_t leadsize, size_t size,
83041-    bool *commit) {
83042-	void *ret = (void *)((uintptr_t)addr + leadsize);
83043-
83044-	assert(alloc_size >= leadsize + size);
83045-#ifdef _WIN32
83046-	os_pages_unmap(addr, alloc_size);
83047-	void *new_addr = os_pages_map(ret, size, PAGE, commit);
83048-	if (new_addr == ret) {
83049-		return ret;
83050-	}
83051-	if (new_addr != NULL) {
83052-		os_pages_unmap(new_addr, size);
83053-	}
83054-	return NULL;
83055-#else
83056-	size_t trailsize = alloc_size - leadsize - size;
83057-
83058-	if (leadsize != 0) {
83059-		os_pages_unmap(addr, leadsize);
83060-	}
83061-	if (trailsize != 0) {
83062-		os_pages_unmap((void *)((uintptr_t)ret + size), trailsize);
83063-	}
83064-	return ret;
83065-#endif
83066-}
83067-
83068-static void
83069-os_pages_unmap(void *addr, size_t size) {
83070-	assert(ALIGNMENT_ADDR2BASE(addr, os_page) == addr);
83071-	assert(ALIGNMENT_CEILING(size, os_page) == size);
83072-
83073-#ifdef _WIN32
83074-	if (VirtualFree(addr, 0, MEM_RELEASE) == 0)
83075-#else
83076-	if (munmap(addr, size) == -1)
83077-#endif
83078-	{
83079-		char buf[BUFERROR_BUF];
83080-
83081-		buferror(get_errno(), buf, sizeof(buf));
83082-		malloc_printf("<jemalloc>: Error in "
83083-#ifdef _WIN32
83084-		    "VirtualFree"
83085-#else
83086-		    "munmap"
83087-#endif
83088-		    "(): %s\n", buf);
83089-		if (opt_abort) {
83090-			abort();
83091-		}
83092-	}
83093-}
83094-
83095-static void *
83096-pages_map_slow(size_t size, size_t alignment, bool *commit) {
83097-	size_t alloc_size = size + alignment - os_page;
83098-	/* Beware size_t wrap-around. */
83099-	if (alloc_size < size) {
83100-		return NULL;
83101-	}
83102-
83103-	void *ret;
83104-	do {
83105-		void *pages = os_pages_map(NULL, alloc_size, alignment, commit);
83106-		if (pages == NULL) {
83107-			return NULL;
83108-		}
83109-		size_t leadsize = ALIGNMENT_CEILING((uintptr_t)pages, alignment)
83110-		    - (uintptr_t)pages;
83111-		ret = os_pages_trim(pages, alloc_size, leadsize, size, commit);
83112-	} while (ret == NULL);
83113-
83114-	assert(ret != NULL);
83115-	assert(PAGE_ADDR2BASE(ret) == ret);
83116-	return ret;
83117-}
83118-
83119-void *
83120-pages_map(void *addr, size_t size, size_t alignment, bool *commit) {
83121-	assert(alignment >= PAGE);
83122-	assert(ALIGNMENT_ADDR2BASE(addr, alignment) == addr);
83123-
83124-#if defined(__FreeBSD__) && defined(MAP_EXCL)
83125-	/*
83126-	 * FreeBSD has mechanisms both to mmap at specific address without
83127-	 * touching existing mappings, and to mmap with specific alignment.
83128-	 */
83129-	{
83130-		if (os_overcommits) {
83131-			*commit = true;
83132-		}
83133-
83134-		int prot = *commit ? PAGES_PROT_COMMIT : PAGES_PROT_DECOMMIT;
83135-		int flags = mmap_flags;
83136-
83137-		if (addr != NULL) {
83138-			flags |= MAP_FIXED | MAP_EXCL;
83139-		} else {
83140-			unsigned alignment_bits = ffs_zu(alignment);
83141-			assert(alignment_bits > 0);
83142-			flags |= MAP_ALIGNED(alignment_bits);
83143-		}
83144-
83145-		void *ret = mmap(addr, size, prot, flags, -1, 0);
83146-		if (ret == MAP_FAILED) {
83147-			ret = NULL;
83148-		}
83149-
83150-		return ret;
83151-	}
83152-#endif
83153-	/*
83154-	 * Ideally, there would be a way to specify alignment to mmap() (like
83155-	 * NetBSD has), but in the absence of such a feature, we have to work
83156-	 * hard to efficiently create aligned mappings.  The reliable, but
83157-	 * slow method is to create a mapping that is over-sized, then trim the
83158-	 * excess.  However, that always results in one or two calls to
83159-	 * os_pages_unmap(), and it can leave holes in the process's virtual
83160-	 * memory map if memory grows downward.
83161-	 *
83162-	 * Optimistically try mapping precisely the right amount before falling
83163-	 * back to the slow method, with the expectation that the optimistic
83164-	 * approach works most of the time.
83165-	 */
83166-
83167-	void *ret = os_pages_map(addr, size, os_page, commit);
83168-	if (ret == NULL || ret == addr) {
83169-		return ret;
83170-	}
83171-	assert(addr == NULL);
83172-	if (ALIGNMENT_ADDR2OFFSET(ret, alignment) != 0) {
83173-		os_pages_unmap(ret, size);
83174-		return pages_map_slow(size, alignment, commit);
83175-	}
83176-
83177-	assert(PAGE_ADDR2BASE(ret) == ret);
83178-	return ret;
83179-}
83180-
83181-void
83182-pages_unmap(void *addr, size_t size) {
83183-	assert(PAGE_ADDR2BASE(addr) == addr);
83184-	assert(PAGE_CEILING(size) == size);
83185-
83186-	os_pages_unmap(addr, size);
83187-}
83188-
83189-static bool
83190-os_pages_commit(void *addr, size_t size, bool commit) {
83191-	assert(PAGE_ADDR2BASE(addr) == addr);
83192-	assert(PAGE_CEILING(size) == size);
83193-
83194-#ifdef _WIN32
83195-	return (commit ? (addr != VirtualAlloc(addr, size, MEM_COMMIT,
83196-	    PAGE_READWRITE)) : (!VirtualFree(addr, size, MEM_DECOMMIT)));
83197-#else
83198-	{
83199-		int prot = commit ? PAGES_PROT_COMMIT : PAGES_PROT_DECOMMIT;
83200-		void *result = mmap(addr, size, prot, mmap_flags | MAP_FIXED,
83201-		    PAGES_FD_TAG, 0);
83202-		if (result == MAP_FAILED) {
83203-			return true;
83204-		}
83205-		if (result != addr) {
83206-			/*
83207-			 * We succeeded in mapping memory, but not in the right
83208-			 * place.
83209-			 */
83210-			os_pages_unmap(result, size);
83211-			return true;
83212-		}
83213-		return false;
83214-	}
83215-#endif
83216-}
83217-
83218-static bool
83219-pages_commit_impl(void *addr, size_t size, bool commit) {
83220-	if (os_overcommits) {
83221-		return true;
83222-	}
83223-
83224-	return os_pages_commit(addr, size, commit);
83225-}
83226-
83227-bool
83228-pages_commit(void *addr, size_t size) {
83229-	return pages_commit_impl(addr, size, true);
83230-}
83231-
83232-bool
83233-pages_decommit(void *addr, size_t size) {
83234-	return pages_commit_impl(addr, size, false);
83235-}
83236-
83237-void
83238-pages_mark_guards(void *head, void *tail) {
83239-	assert(head != NULL || tail != NULL);
83240-	assert(head == NULL || tail == NULL ||
83241-	    (uintptr_t)head < (uintptr_t)tail);
83242-#ifdef JEMALLOC_HAVE_MPROTECT
83243-	if (head != NULL) {
83244-		mprotect(head, PAGE, PROT_NONE);
83245-	}
83246-	if (tail != NULL) {
83247-		mprotect(tail, PAGE, PROT_NONE);
83248-	}
83249-#else
83250-	/* Decommit sets to PROT_NONE / MEM_DECOMMIT. */
83251-	if (head != NULL) {
83252-		os_pages_commit(head, PAGE, false);
83253-	}
83254-	if (tail != NULL) {
83255-		os_pages_commit(tail, PAGE, false);
83256-	}
83257-#endif
83258-}
83259-
83260-void
83261-pages_unmark_guards(void *head, void *tail) {
83262-	assert(head != NULL || tail != NULL);
83263-	assert(head == NULL || tail == NULL ||
83264-	    (uintptr_t)head < (uintptr_t)tail);
83265-#ifdef JEMALLOC_HAVE_MPROTECT
83266-	bool head_and_tail = (head != NULL) && (tail != NULL);
83267-	size_t range = head_and_tail ?
83268-	    (uintptr_t)tail - (uintptr_t)head + PAGE :
83269-	    SIZE_T_MAX;
83270-	/*
83271-	 * The amount of work that the kernel does in mprotect depends on the
83272-	 * range argument.  SC_LARGE_MINCLASS is an arbitrary threshold chosen
83273-	 * to prevent kernel from doing too much work that would outweigh the
83274-	 * savings of performing one less system call.
83275-	 */
83276-	bool ranged_mprotect = head_and_tail && range <= SC_LARGE_MINCLASS;
83277-	if (ranged_mprotect) {
83278-		mprotect(head, range, PROT_READ | PROT_WRITE);
83279-	} else {
83280-		if (head != NULL) {
83281-			mprotect(head, PAGE, PROT_READ | PROT_WRITE);
83282-		}
83283-		if (tail != NULL) {
83284-			mprotect(tail, PAGE, PROT_READ | PROT_WRITE);
83285-		}
83286-	}
83287-#else
83288-	if (head != NULL) {
83289-		os_pages_commit(head, PAGE, true);
83290-	}
83291-	if (tail != NULL) {
83292-		os_pages_commit(tail, PAGE, true);
83293-	}
83294-#endif
83295-}
83296-
83297-bool
83298-pages_purge_lazy(void *addr, size_t size) {
83299-	assert(ALIGNMENT_ADDR2BASE(addr, os_page) == addr);
83300-	assert(PAGE_CEILING(size) == size);
83301-
83302-	if (!pages_can_purge_lazy) {
83303-		return true;
83304-	}
83305-	if (!pages_can_purge_lazy_runtime) {
83306-		/*
83307-		 * Built with lazy purge enabled, but detected it was not
83308-		 * supported on the current system.
83309-		 */
83310-		return true;
83311-	}
83312-
83313-#ifdef _WIN32
83314-	VirtualAlloc(addr, size, MEM_RESET, PAGE_READWRITE);
83315-	return false;
83316-#elif defined(JEMALLOC_PURGE_MADVISE_FREE)
83317-	return (madvise(addr, size,
83318-#  ifdef MADV_FREE
83319-	    MADV_FREE
83320-#  else
83321-	    JEMALLOC_MADV_FREE
83322-#  endif
83323-	    ) != 0);
83324-#elif defined(JEMALLOC_PURGE_MADVISE_DONTNEED) && \
83325-    !defined(JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS)
83326-	return (madvise(addr, size, MADV_DONTNEED) != 0);
83327-#elif defined(JEMALLOC_PURGE_POSIX_MADVISE_DONTNEED) && \
83328-    !defined(JEMALLOC_PURGE_POSIX_MADVISE_DONTNEED_ZEROS)
83329-	return (posix_madvise(addr, size, POSIX_MADV_DONTNEED) != 0);
83330-#else
83331-	not_reached();
83332-#endif
83333-}
83334-
83335-bool
83336-pages_purge_forced(void *addr, size_t size) {
83337-	assert(PAGE_ADDR2BASE(addr) == addr);
83338-	assert(PAGE_CEILING(size) == size);
83339-
83340-	if (!pages_can_purge_forced) {
83341-		return true;
83342-	}
83343-
83344-#if defined(JEMALLOC_PURGE_MADVISE_DONTNEED) && \
83345-    defined(JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS)
83346-	return (unlikely(madvise_dont_need_zeros_is_faulty) ||
83347-	    madvise(addr, size, MADV_DONTNEED) != 0);
83348-#elif defined(JEMALLOC_PURGE_POSIX_MADVISE_DONTNEED) && \
83349-    defined(JEMALLOC_PURGE_POSIX_MADVISE_DONTNEED_ZEROS)
83350-	return (unlikely(madvise_dont_need_zeros_is_faulty) ||
83351-	    posix_madvise(addr, size, POSIX_MADV_DONTNEED) != 0);
83352-#elif defined(JEMALLOC_MAPS_COALESCE)
83353-	/* Try to overlay a new demand-zeroed mapping. */
83354-	return pages_commit(addr, size);
83355-#else
83356-	not_reached();
83357-#endif
83358-}
83359-
83360-static bool
83361-pages_huge_impl(void *addr, size_t size, bool aligned) {
83362-	if (aligned) {
83363-		assert(HUGEPAGE_ADDR2BASE(addr) == addr);
83364-		assert(HUGEPAGE_CEILING(size) == size);
83365-	}
83366-#if defined(JEMALLOC_HAVE_MADVISE_HUGE)
83367-	return (madvise(addr, size, MADV_HUGEPAGE) != 0);
83368-#elif defined(JEMALLOC_HAVE_MEMCNTL)
83369-	struct memcntl_mha m = {0};
83370-	m.mha_cmd = MHA_MAPSIZE_VA;
83371-	m.mha_pagesize = HUGEPAGE;
83372-	return (memcntl(addr, size, MC_HAT_ADVISE, (caddr_t)&m, 0, 0) == 0);
83373-#else
83374-	return true;
83375-#endif
83376-}
83377-
83378-bool
83379-pages_huge(void *addr, size_t size) {
83380-	return pages_huge_impl(addr, size, true);
83381-}
83382-
83383-static bool
83384-pages_huge_unaligned(void *addr, size_t size) {
83385-	return pages_huge_impl(addr, size, false);
83386-}
83387-
83388-static bool
83389-pages_nohuge_impl(void *addr, size_t size, bool aligned) {
83390-	if (aligned) {
83391-		assert(HUGEPAGE_ADDR2BASE(addr) == addr);
83392-		assert(HUGEPAGE_CEILING(size) == size);
83393-	}
83394-
83395-#ifdef JEMALLOC_HAVE_MADVISE_HUGE
83396-	return (madvise(addr, size, MADV_NOHUGEPAGE) != 0);
83397-#else
83398-	return false;
83399-#endif
83400-}
83401-
83402-bool
83403-pages_nohuge(void *addr, size_t size) {
83404-	return pages_nohuge_impl(addr, size, true);
83405-}
83406-
83407-static bool
83408-pages_nohuge_unaligned(void *addr, size_t size) {
83409-	return pages_nohuge_impl(addr, size, false);
83410-}
83411-
83412-bool
83413-pages_dontdump(void *addr, size_t size) {
83414-	assert(PAGE_ADDR2BASE(addr) == addr);
83415-	assert(PAGE_CEILING(size) == size);
83416-#if defined(JEMALLOC_MADVISE_DONTDUMP)
83417-	return madvise(addr, size, MADV_DONTDUMP) != 0;
83418-#elif defined(JEMALLOC_MADVISE_NOCORE)
83419-	return madvise(addr, size, MADV_NOCORE) != 0;
83420-#else
83421-	return false;
83422-#endif
83423-}
83424-
83425-bool
83426-pages_dodump(void *addr, size_t size) {
83427-	assert(PAGE_ADDR2BASE(addr) == addr);
83428-	assert(PAGE_CEILING(size) == size);
83429-#if defined(JEMALLOC_MADVISE_DONTDUMP)
83430-	return madvise(addr, size, MADV_DODUMP) != 0;
83431-#elif defined(JEMALLOC_MADVISE_NOCORE)
83432-	return madvise(addr, size, MADV_CORE) != 0;
83433-#else
83434-	return false;
83435-#endif
83436-}
83437-
83438-
83439-static size_t
83440-os_page_detect(void) {
83441-#ifdef _WIN32
83442-	SYSTEM_INFO si;
83443-	GetSystemInfo(&si);
83444-	return si.dwPageSize;
83445-#elif defined(__FreeBSD__)
83446-	/*
83447-	 * This returns the value obtained from
83448-	 * the auxv vector, avoiding a syscall.
83449-	 */
83450-	return getpagesize();
83451-#else
83452-	long result = sysconf(_SC_PAGESIZE);
83453-	if (result == -1) {
83454-		return LG_PAGE;
83455-	}
83456-	return (size_t)result;
83457-#endif
83458-}
83459-
83460-#ifdef JEMALLOC_SYSCTL_VM_OVERCOMMIT
83461-static bool
83462-os_overcommits_sysctl(void) {
83463-	int vm_overcommit;
83464-	size_t sz;
83465-
83466-	sz = sizeof(vm_overcommit);
83467-#if defined(__FreeBSD__) && defined(VM_OVERCOMMIT)
83468-	int mib[2];
83469-
83470-	mib[0] = CTL_VM;
83471-	mib[1] = VM_OVERCOMMIT;
83472-	if (sysctl(mib, 2, &vm_overcommit, &sz, NULL, 0) != 0) {
83473-		return false; /* Error. */
83474-	}
83475-#else
83476-	if (sysctlbyname("vm.overcommit", &vm_overcommit, &sz, NULL, 0) != 0) {
83477-		return false; /* Error. */
83478-	}
83479-#endif
83480-
83481-	return ((vm_overcommit & 0x3) == 0);
83482-}
83483-#endif
83484-
83485-#ifdef JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY
83486-/*
83487- * Use syscall(2) rather than {open,read,close}(2) when possible to avoid
83488- * reentry during bootstrapping if another library has interposed system call
83489- * wrappers.
83490- */
83491-static bool
83492-os_overcommits_proc(void) {
83493-	int fd;
83494-	char buf[1];
83495-
83496-#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_open)
83497-	#if defined(O_CLOEXEC)
83498-		fd = (int)syscall(SYS_open, "/proc/sys/vm/overcommit_memory", O_RDONLY |
83499-			O_CLOEXEC);
83500-	#else
83501-		fd = (int)syscall(SYS_open, "/proc/sys/vm/overcommit_memory", O_RDONLY);
83502-		if (fd != -1) {
83503-			fcntl(fd, F_SETFD, fcntl(fd, F_GETFD) | FD_CLOEXEC);
83504-		}
83505-	#endif
83506-#elif defined(JEMALLOC_USE_SYSCALL) && defined(SYS_openat)
83507-	#if defined(O_CLOEXEC)
83508-		fd = (int)syscall(SYS_openat,
83509-			AT_FDCWD, "/proc/sys/vm/overcommit_memory", O_RDONLY | O_CLOEXEC);
83510-	#else
83511-		fd = (int)syscall(SYS_openat,
83512-			AT_FDCWD, "/proc/sys/vm/overcommit_memory", O_RDONLY);
83513-		if (fd != -1) {
83514-			fcntl(fd, F_SETFD, fcntl(fd, F_GETFD) | FD_CLOEXEC);
83515-		}
83516-	#endif
83517-#else
83518-	#if defined(O_CLOEXEC)
83519-		fd = open("/proc/sys/vm/overcommit_memory", O_RDONLY | O_CLOEXEC);
83520-	#else
83521-		fd = open("/proc/sys/vm/overcommit_memory", O_RDONLY);
83522-		if (fd != -1) {
83523-			fcntl(fd, F_SETFD, fcntl(fd, F_GETFD) | FD_CLOEXEC);
83524-		}
83525-	#endif
83526-#endif
83527-
83528-	if (fd == -1) {
83529-		return false; /* Error. */
83530-	}
83531-
83532-	ssize_t nread = malloc_read_fd(fd, &buf, sizeof(buf));
83533-#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_close)
83534-	syscall(SYS_close, fd);
83535-#else
83536-	close(fd);
83537-#endif
83538-
83539-	if (nread < 1) {
83540-		return false; /* Error. */
83541-	}
83542-	/*
83543-	 * /proc/sys/vm/overcommit_memory meanings:
83544-	 * 0: Heuristic overcommit.
83545-	 * 1: Always overcommit.
83546-	 * 2: Never overcommit.
83547-	 */
83548-	return (buf[0] == '0' || buf[0] == '1');
83549-}
83550-#endif
83551-
83552-void
83553-pages_set_thp_state (void *ptr, size_t size) {
83554-	if (opt_thp == thp_mode_default || opt_thp == init_system_thp_mode) {
83555-		return;
83556-	}
83557-	assert(opt_thp != thp_mode_not_supported &&
83558-	    init_system_thp_mode != thp_mode_not_supported);
83559-
83560-	if (opt_thp == thp_mode_always
83561-	    && init_system_thp_mode != thp_mode_never) {
83562-		assert(init_system_thp_mode == thp_mode_default);
83563-		pages_huge_unaligned(ptr, size);
83564-	} else if (opt_thp == thp_mode_never) {
83565-		assert(init_system_thp_mode == thp_mode_default ||
83566-		    init_system_thp_mode == thp_mode_always);
83567-		pages_nohuge_unaligned(ptr, size);
83568-	}
83569-}
83570-
83571-static void
83572-init_thp_state(void) {
83573-	if (!have_madvise_huge && !have_memcntl) {
83574-		if (metadata_thp_enabled() && opt_abort) {
83575-			malloc_write("<jemalloc>: no MADV_HUGEPAGE support\n");
83576-			abort();
83577-		}
83578-		goto label_error;
83579-	}
83580-#if defined(JEMALLOC_HAVE_MADVISE_HUGE)
83581-	static const char sys_state_madvise[] = "always [madvise] never\n";
83582-	static const char sys_state_always[] = "[always] madvise never\n";
83583-	static const char sys_state_never[] = "always madvise [never]\n";
83584-	char buf[sizeof(sys_state_madvise)];
83585-
83586-#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_open)
83587-	int fd = (int)syscall(SYS_open,
83588-	    "/sys/kernel/mm/transparent_hugepage/enabled", O_RDONLY);
83589-#elif defined(JEMALLOC_USE_SYSCALL) && defined(SYS_openat)
83590-	int fd = (int)syscall(SYS_openat,
83591-		    AT_FDCWD, "/sys/kernel/mm/transparent_hugepage/enabled", O_RDONLY);
83592-#else
83593-	int fd = open("/sys/kernel/mm/transparent_hugepage/enabled", O_RDONLY);
83594-#endif
83595-	if (fd == -1) {
83596-		goto label_error;
83597-	}
83598-
83599-	ssize_t nread = malloc_read_fd(fd, &buf, sizeof(buf));
83600-#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_close)
83601-	syscall(SYS_close, fd);
83602-#else
83603-	close(fd);
83604-#endif
83605-
83606-        if (nread < 0) {
83607-		goto label_error;
83608-        }
83609-
83610-	if (strncmp(buf, sys_state_madvise, (size_t)nread) == 0) {
83611-		init_system_thp_mode = thp_mode_default;
83612-	} else if (strncmp(buf, sys_state_always, (size_t)nread) == 0) {
83613-		init_system_thp_mode = thp_mode_always;
83614-	} else if (strncmp(buf, sys_state_never, (size_t)nread) == 0) {
83615-		init_system_thp_mode = thp_mode_never;
83616-	} else {
83617-		goto label_error;
83618-	}
83619-	return;
83620-#elif defined(JEMALLOC_HAVE_MEMCNTL)
83621-	init_system_thp_mode = thp_mode_default;
83622-	return;
83623-#endif
83624-label_error:
83625-	opt_thp = init_system_thp_mode = thp_mode_not_supported;
83626-}
83627-
83628-bool
83629-pages_boot(void) {
83630-	os_page = os_page_detect();
83631-	if (os_page > PAGE) {
83632-		malloc_write("<jemalloc>: Unsupported system page size\n");
83633-		if (opt_abort) {
83634-			abort();
83635-		}
83636-		return true;
83637-	}
83638-
83639-#ifdef JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS
83640-	if (!opt_trust_madvise) {
83641-		madvise_dont_need_zeros_is_faulty = !madvise_MADV_DONTNEED_zeroes_pages();
83642-		if (madvise_dont_need_zeros_is_faulty) {
83643-			malloc_write("<jemalloc>: MADV_DONTNEED does not work (memset will be used instead)\n");
83644-			malloc_write("<jemalloc>: (This is the expected behaviour if you are running under QEMU)\n");
83645-		}
83646-	} else {
83647-		/* In case opt_trust_madvise is disable,
83648-		 * do not do runtime check */
83649-		madvise_dont_need_zeros_is_faulty = 0;
83650-	}
83651-#endif
83652-
83653-#ifndef _WIN32
83654-	mmap_flags = MAP_PRIVATE | MAP_ANON;
83655-#endif
83656-
83657-#ifdef JEMALLOC_SYSCTL_VM_OVERCOMMIT
83658-	os_overcommits = os_overcommits_sysctl();
83659-#elif defined(JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY)
83660-	os_overcommits = os_overcommits_proc();
83661-#  ifdef MAP_NORESERVE
83662-	if (os_overcommits) {
83663-		mmap_flags |= MAP_NORESERVE;
83664-	}
83665-#  endif
83666-#elif defined(__NetBSD__)
83667-	os_overcommits = true;
83668-#else
83669-	os_overcommits = false;
83670-#endif
83671-
83672-	init_thp_state();
83673-
83674-#ifdef __FreeBSD__
83675-	/*
83676-	 * FreeBSD doesn't need the check; madvise(2) is known to work.
83677-	 */
83678-#else
83679-	/* Detect lazy purge runtime support. */
83680-	if (pages_can_purge_lazy) {
83681-		bool committed = false;
83682-		void *madv_free_page = os_pages_map(NULL, PAGE, PAGE, &committed);
83683-		if (madv_free_page == NULL) {
83684-			return true;
83685-		}
83686-		assert(pages_can_purge_lazy_runtime);
83687-		if (pages_purge_lazy(madv_free_page, PAGE)) {
83688-			pages_can_purge_lazy_runtime = false;
83689-		}
83690-		os_pages_unmap(madv_free_page, PAGE);
83691-	}
83692-#endif
83693-
83694-	return false;
83695-}
83696diff --git a/jemalloc/src/pai.c b/jemalloc/src/pai.c
83697deleted file mode 100644
83698index 45c8772..0000000
83699--- a/jemalloc/src/pai.c
83700+++ /dev/null
83701@@ -1,31 +0,0 @@
83702-#include "jemalloc/internal/jemalloc_preamble.h"
83703-#include "jemalloc/internal/jemalloc_internal_includes.h"
83704-
83705-size_t
83706-pai_alloc_batch_default(tsdn_t *tsdn, pai_t *self, size_t size, size_t nallocs,
83707-    edata_list_active_t *results, bool *deferred_work_generated) {
83708-	for (size_t i = 0; i < nallocs; i++) {
83709-		bool deferred_by_alloc = false;
83710-		edata_t *edata = pai_alloc(tsdn, self, size, PAGE,
83711-		    /* zero */ false, /* guarded */ false,
83712-		    /* frequent_reuse */ false, &deferred_by_alloc);
83713-		*deferred_work_generated |= deferred_by_alloc;
83714-		if (edata == NULL) {
83715-			return i;
83716-		}
83717-		edata_list_active_append(results, edata);
83718-	}
83719-	return nallocs;
83720-}
83721-
83722-void
83723-pai_dalloc_batch_default(tsdn_t *tsdn, pai_t *self,
83724-    edata_list_active_t *list, bool *deferred_work_generated) {
83725-	edata_t *edata;
83726-	while ((edata = edata_list_active_first(list)) != NULL) {
83727-		bool deferred_by_dalloc = false;
83728-		edata_list_active_remove(list, edata);
83729-		pai_dalloc(tsdn, self, edata, &deferred_by_dalloc);
83730-		*deferred_work_generated |= deferred_by_dalloc;
83731-	}
83732-}
83733diff --git a/jemalloc/src/peak_event.c b/jemalloc/src/peak_event.c
83734deleted file mode 100644
83735index 4093fbc..0000000
83736--- a/jemalloc/src/peak_event.c
83737+++ /dev/null
83738@@ -1,82 +0,0 @@
83739-#include "jemalloc/internal/jemalloc_preamble.h"
83740-#include "jemalloc/internal/jemalloc_internal_includes.h"
83741-
83742-#include "jemalloc/internal/peak_event.h"
83743-
83744-#include "jemalloc/internal/activity_callback.h"
83745-#include "jemalloc/internal/peak.h"
83746-
83747-/*
83748- * Update every 64K by default.  We're not exposing this as a configuration
83749- * option for now; we don't want to bind ourselves too tightly to any particular
83750- * performance requirements for small values, or guarantee that we'll even be
83751- * able to provide fine-grained accuracy.
83752- */
83753-#define PEAK_EVENT_WAIT (64 * 1024)
83754-
83755-/* Update the peak with current tsd state. */
83756-void
83757-peak_event_update(tsd_t *tsd) {
83758-	uint64_t alloc = tsd_thread_allocated_get(tsd);
83759-	uint64_t dalloc = tsd_thread_deallocated_get(tsd);
83760-	peak_t *peak = tsd_peakp_get(tsd);
83761-	peak_update(peak, alloc, dalloc);
83762-}
83763-
83764-static void
83765-peak_event_activity_callback(tsd_t *tsd) {
83766-	activity_callback_thunk_t *thunk = tsd_activity_callback_thunkp_get(
83767-	    tsd);
83768-	uint64_t alloc = tsd_thread_allocated_get(tsd);
83769-	uint64_t dalloc = tsd_thread_deallocated_get(tsd);
83770-	if (thunk->callback != NULL) {
83771-		thunk->callback(thunk->uctx, alloc, dalloc);
83772-	}
83773-}
83774-
83775-/* Set current state to zero. */
83776-void
83777-peak_event_zero(tsd_t *tsd) {
83778-	uint64_t alloc = tsd_thread_allocated_get(tsd);
83779-	uint64_t dalloc = tsd_thread_deallocated_get(tsd);
83780-	peak_t *peak = tsd_peakp_get(tsd);
83781-	peak_set_zero(peak, alloc, dalloc);
83782-}
83783-
83784-uint64_t
83785-peak_event_max(tsd_t *tsd) {
83786-	peak_t *peak = tsd_peakp_get(tsd);
83787-	return peak_max(peak);
83788-}
83789-
83790-uint64_t
83791-peak_alloc_new_event_wait(tsd_t *tsd) {
83792-	return PEAK_EVENT_WAIT;
83793-}
83794-
83795-uint64_t
83796-peak_alloc_postponed_event_wait(tsd_t *tsd) {
83797-	return TE_MIN_START_WAIT;
83798-}
83799-
83800-void
83801-peak_alloc_event_handler(tsd_t *tsd, uint64_t elapsed) {
83802-	peak_event_update(tsd);
83803-	peak_event_activity_callback(tsd);
83804-}
83805-
83806-uint64_t
83807-peak_dalloc_new_event_wait(tsd_t *tsd) {
83808-	return PEAK_EVENT_WAIT;
83809-}
83810-
83811-uint64_t
83812-peak_dalloc_postponed_event_wait(tsd_t *tsd) {
83813-	return TE_MIN_START_WAIT;
83814-}
83815-
83816-void
83817-peak_dalloc_event_handler(tsd_t *tsd, uint64_t elapsed) {
83818-	peak_event_update(tsd);
83819-	peak_event_activity_callback(tsd);
83820-}
83821diff --git a/jemalloc/src/prof.c b/jemalloc/src/prof.c
83822deleted file mode 100644
83823index 7a6d5d5..0000000
83824--- a/jemalloc/src/prof.c
83825+++ /dev/null
83826@@ -1,789 +0,0 @@
83827-#include "jemalloc/internal/jemalloc_preamble.h"
83828-#include "jemalloc/internal/jemalloc_internal_includes.h"
83829-
83830-#include "jemalloc/internal/ctl.h"
83831-#include "jemalloc/internal/assert.h"
83832-#include "jemalloc/internal/mutex.h"
83833-#include "jemalloc/internal/counter.h"
83834-#include "jemalloc/internal/prof_data.h"
83835-#include "jemalloc/internal/prof_log.h"
83836-#include "jemalloc/internal/prof_recent.h"
83837-#include "jemalloc/internal/prof_stats.h"
83838-#include "jemalloc/internal/prof_sys.h"
83839-#include "jemalloc/internal/prof_hook.h"
83840-#include "jemalloc/internal/thread_event.h"
83841-
83842-/*
83843- * This file implements the profiling "APIs" needed by other parts of jemalloc,
83844- * and also manages the relevant "operational" data, mainly options and mutexes;
83845- * the core profiling data structures are encapsulated in prof_data.c.
83846- */
83847-
83848-/******************************************************************************/
83849-
83850-/* Data. */
83851-
83852-bool opt_prof = false;
83853-bool opt_prof_active = true;
83854-bool opt_prof_thread_active_init = true;
83855-size_t opt_lg_prof_sample = LG_PROF_SAMPLE_DEFAULT;
83856-ssize_t opt_lg_prof_interval = LG_PROF_INTERVAL_DEFAULT;
83857-bool opt_prof_gdump = false;
83858-bool opt_prof_final = false;
83859-bool opt_prof_leak = false;
83860-bool opt_prof_leak_error = false;
83861-bool opt_prof_accum = false;
83862-char opt_prof_prefix[PROF_DUMP_FILENAME_LEN];
83863-bool opt_prof_sys_thread_name = false;
83864-bool opt_prof_unbias = true;
83865-
83866-/* Accessed via prof_sample_event_handler(). */
83867-static counter_accum_t prof_idump_accumulated;
83868-
83869-/*
83870- * Initialized as opt_prof_active, and accessed via
83871- * prof_active_[gs]et{_unlocked,}().
83872- */
83873-bool prof_active_state;
83874-static malloc_mutex_t prof_active_mtx;
83875-
83876-/*
83877- * Initialized as opt_prof_thread_active_init, and accessed via
83878- * prof_thread_active_init_[gs]et().
83879- */
83880-static bool prof_thread_active_init;
83881-static malloc_mutex_t prof_thread_active_init_mtx;
83882-
83883-/*
83884- * Initialized as opt_prof_gdump, and accessed via
83885- * prof_gdump_[gs]et{_unlocked,}().
83886- */
83887-bool prof_gdump_val;
83888-static malloc_mutex_t prof_gdump_mtx;
83889-
83890-uint64_t prof_interval = 0;
83891-
83892-size_t lg_prof_sample;
83893-
83894-static uint64_t next_thr_uid;
83895-static malloc_mutex_t next_thr_uid_mtx;
83896-
83897-/* Do not dump any profiles until bootstrapping is complete. */
83898-bool prof_booted = false;
83899-
83900-/* Logically a prof_backtrace_hook_t. */
83901-atomic_p_t prof_backtrace_hook;
83902-
83903-/* Logically a prof_dump_hook_t. */
83904-atomic_p_t prof_dump_hook;
83905-
83906-/******************************************************************************/
83907-
83908-void
83909-prof_alloc_rollback(tsd_t *tsd, prof_tctx_t *tctx) {
83910-	cassert(config_prof);
83911-
83912-	if (tsd_reentrancy_level_get(tsd) > 0) {
83913-		assert((uintptr_t)tctx == (uintptr_t)1U);
83914-		return;
83915-	}
83916-
83917-	if ((uintptr_t)tctx > (uintptr_t)1U) {
83918-		malloc_mutex_lock(tsd_tsdn(tsd), tctx->tdata->lock);
83919-		tctx->prepared = false;
83920-		prof_tctx_try_destroy(tsd, tctx);
83921-	}
83922-}
83923-
83924-void
83925-prof_malloc_sample_object(tsd_t *tsd, const void *ptr, size_t size,
83926-    size_t usize, prof_tctx_t *tctx) {
83927-	cassert(config_prof);
83928-
83929-	if (opt_prof_sys_thread_name) {
83930-		prof_sys_thread_name_fetch(tsd);
83931-	}
83932-
83933-	edata_t *edata = emap_edata_lookup(tsd_tsdn(tsd), &arena_emap_global,
83934-	    ptr);
83935-	prof_info_set(tsd, edata, tctx, size);
83936-
83937-	szind_t szind = sz_size2index(usize);
83938-
83939-	malloc_mutex_lock(tsd_tsdn(tsd), tctx->tdata->lock);
83940-	/*
83941-	 * We need to do these map lookups while holding the lock, to avoid the
83942-	 * possibility of races with prof_reset calls, which update the map and
83943-	 * then acquire the lock.  This actually still leaves a data race on the
83944-	 * contents of the unbias map, but we have not yet gone through and
83945-	 * atomic-ified the prof module, and compilers are not yet causing us
83946-	 * issues.  The key thing is to make sure that, if we read garbage data,
83947-	 * the prof_reset call is about to mark our tctx as expired before any
83948-	 * dumping of our corrupted output is attempted.
83949-	 */
83950-	size_t shifted_unbiased_cnt = prof_shifted_unbiased_cnt[szind];
83951-	size_t unbiased_bytes = prof_unbiased_sz[szind];
83952-	tctx->cnts.curobjs++;
83953-	tctx->cnts.curobjs_shifted_unbiased += shifted_unbiased_cnt;
83954-	tctx->cnts.curbytes += usize;
83955-	tctx->cnts.curbytes_unbiased += unbiased_bytes;
83956-	if (opt_prof_accum) {
83957-		tctx->cnts.accumobjs++;
83958-		tctx->cnts.accumobjs_shifted_unbiased += shifted_unbiased_cnt;
83959-		tctx->cnts.accumbytes += usize;
83960-		tctx->cnts.accumbytes_unbiased += unbiased_bytes;
83961-	}
83962-	bool record_recent = prof_recent_alloc_prepare(tsd, tctx);
83963-	tctx->prepared = false;
83964-	malloc_mutex_unlock(tsd_tsdn(tsd), tctx->tdata->lock);
83965-	if (record_recent) {
83966-		assert(tctx == edata_prof_tctx_get(edata));
83967-		prof_recent_alloc(tsd, edata, size, usize);
83968-	}
83969-
83970-	if (opt_prof_stats) {
83971-		prof_stats_inc(tsd, szind, size);
83972-	}
83973-}
83974-
83975-void
83976-prof_free_sampled_object(tsd_t *tsd, size_t usize, prof_info_t *prof_info) {
83977-	cassert(config_prof);
83978-
83979-	assert(prof_info != NULL);
83980-	prof_tctx_t *tctx = prof_info->alloc_tctx;
83981-	assert((uintptr_t)tctx > (uintptr_t)1U);
83982-
83983-	szind_t szind = sz_size2index(usize);
83984-	malloc_mutex_lock(tsd_tsdn(tsd), tctx->tdata->lock);
83985-
83986-	assert(tctx->cnts.curobjs > 0);
83987-	assert(tctx->cnts.curbytes >= usize);
83988-	/*
83989-	 * It's not correct to do equivalent asserts for unbiased bytes, because
83990-	 * of the potential for races with prof.reset calls.  The map contents
83991-	 * should really be atomic, but we have not atomic-ified the prof module
83992-	 * yet.
83993-	 */
83994-	tctx->cnts.curobjs--;
83995-	tctx->cnts.curobjs_shifted_unbiased -= prof_shifted_unbiased_cnt[szind];
83996-	tctx->cnts.curbytes -= usize;
83997-	tctx->cnts.curbytes_unbiased -= prof_unbiased_sz[szind];
83998-
83999-	prof_try_log(tsd, usize, prof_info);
84000-
84001-	prof_tctx_try_destroy(tsd, tctx);
84002-
84003-	if (opt_prof_stats) {
84004-		prof_stats_dec(tsd, szind, prof_info->alloc_size);
84005-	}
84006-}
84007-
84008-prof_tctx_t *
84009-prof_tctx_create(tsd_t *tsd) {
84010-	if (!tsd_nominal(tsd) || tsd_reentrancy_level_get(tsd) > 0) {
84011-		return NULL;
84012-	}
84013-
84014-	prof_tdata_t *tdata = prof_tdata_get(tsd, true);
84015-	if (tdata == NULL) {
84016-		return NULL;
84017-	}
84018-
84019-	prof_bt_t bt;
84020-	bt_init(&bt, tdata->vec);
84021-	prof_backtrace(tsd, &bt);
84022-	return prof_lookup(tsd, &bt);
84023-}
84024-
84025-/*
84026- * The bodies of this function and prof_leakcheck() are compiled out unless heap
84027- * profiling is enabled, so that it is possible to compile jemalloc with
84028- * floating point support completely disabled.  Avoiding floating point code is
84029- * important on memory-constrained systems, but it also enables a workaround for
84030- * versions of glibc that don't properly save/restore floating point registers
84031- * during dynamic lazy symbol loading (which internally calls into whatever
84032- * malloc implementation happens to be integrated into the application).  Note
84033- * that some compilers (e.g.  gcc 4.8) may use floating point registers for fast
84034- * memory moves, so jemalloc must be compiled with such optimizations disabled
84035- * (e.g.
84036- * -mno-sse) in order for the workaround to be complete.
84037- */
84038-uint64_t
84039-prof_sample_new_event_wait(tsd_t *tsd) {
84040-#ifdef JEMALLOC_PROF
84041-	if (lg_prof_sample == 0) {
84042-		return TE_MIN_START_WAIT;
84043-	}
84044-
84045-	/*
84046-	 * Compute sample interval as a geometrically distributed random
84047-	 * variable with mean (2^lg_prof_sample).
84048-	 *
84049-	 *                      __        __
84050-	 *                      |  log(u)  |                     1
84051-	 * bytes_until_sample = | -------- |, where p = ---------------
84052-	 *                      | log(1-p) |             lg_prof_sample
84053-	 *                                              2
84054-	 *
84055-	 * For more information on the math, see:
84056-	 *
84057-	 *   Non-Uniform Random Variate Generation
84058-	 *   Luc Devroye
84059-	 *   Springer-Verlag, New York, 1986
84060-	 *   pp 500
84061-	 *   (http://luc.devroye.org/rnbookindex.html)
84062-	 *
84063-	 * In the actual computation, there's a non-zero probability that our
84064-	 * pseudo random number generator generates an exact 0, and to avoid
84065-	 * log(0), we set u to 1.0 in case r is 0.  Therefore u effectively is
84066-	 * uniformly distributed in (0, 1] instead of [0, 1).  Further, rather
84067-	 * than taking the ceiling, we take the floor and then add 1, since
84068-	 * otherwise bytes_until_sample would be 0 if u is exactly 1.0.
84069-	 */
84070-	uint64_t r = prng_lg_range_u64(tsd_prng_statep_get(tsd), 53);
84071-	double u = (r == 0U) ? 1.0 : (double)r * (1.0/9007199254740992.0L);
84072-	return (uint64_t)(log(u) /
84073-	    log(1.0 - (1.0 / (double)((uint64_t)1U << lg_prof_sample))))
84074-	    + (uint64_t)1U;
84075-#else
84076-	not_reached();
84077-	return TE_MAX_START_WAIT;
84078-#endif
84079-}
84080-
84081-uint64_t
84082-prof_sample_postponed_event_wait(tsd_t *tsd) {
84083-	/*
84084-	 * The postponed wait time for prof sample event is computed as if we
84085-	 * want a new wait time (i.e. as if the event were triggered).  If we
84086-	 * instead postpone to the immediate next allocation, like how we're
84087-	 * handling the other events, then we can have sampling bias, if e.g.
84088-	 * the allocation immediately following a reentrancy always comes from
84089-	 * the same stack trace.
84090-	 */
84091-	return prof_sample_new_event_wait(tsd);
84092-}
84093-
84094-void
84095-prof_sample_event_handler(tsd_t *tsd, uint64_t elapsed) {
84096-	cassert(config_prof);
84097-	assert(elapsed > 0 && elapsed != TE_INVALID_ELAPSED);
84098-	if (prof_interval == 0 || !prof_active_get_unlocked()) {
84099-		return;
84100-	}
84101-	if (counter_accum(tsd_tsdn(tsd), &prof_idump_accumulated, elapsed)) {
84102-		prof_idump(tsd_tsdn(tsd));
84103-	}
84104-}
84105-
84106-static void
84107-prof_fdump(void) {
84108-	tsd_t *tsd;
84109-
84110-	cassert(config_prof);
84111-	assert(opt_prof_final);
84112-
84113-	if (!prof_booted) {
84114-		return;
84115-	}
84116-	tsd = tsd_fetch();
84117-	assert(tsd_reentrancy_level_get(tsd) == 0);
84118-
84119-	prof_fdump_impl(tsd);
84120-}
84121-
84122-static bool
84123-prof_idump_accum_init(void) {
84124-	cassert(config_prof);
84125-
84126-	return counter_accum_init(&prof_idump_accumulated, prof_interval);
84127-}
84128-
84129-void
84130-prof_idump(tsdn_t *tsdn) {
84131-	tsd_t *tsd;
84132-	prof_tdata_t *tdata;
84133-
84134-	cassert(config_prof);
84135-
84136-	if (!prof_booted || tsdn_null(tsdn) || !prof_active_get_unlocked()) {
84137-		return;
84138-	}
84139-	tsd = tsdn_tsd(tsdn);
84140-	if (tsd_reentrancy_level_get(tsd) > 0) {
84141-		return;
84142-	}
84143-
84144-	tdata = prof_tdata_get(tsd, true);
84145-	if (tdata == NULL) {
84146-		return;
84147-	}
84148-	if (tdata->enq) {
84149-		tdata->enq_idump = true;
84150-		return;
84151-	}
84152-
84153-	prof_idump_impl(tsd);
84154-}
84155-
84156-bool
84157-prof_mdump(tsd_t *tsd, const char *filename) {
84158-	cassert(config_prof);
84159-	assert(tsd_reentrancy_level_get(tsd) == 0);
84160-
84161-	if (!opt_prof || !prof_booted) {
84162-		return true;
84163-	}
84164-
84165-	return prof_mdump_impl(tsd, filename);
84166-}
84167-
84168-void
84169-prof_gdump(tsdn_t *tsdn) {
84170-	tsd_t *tsd;
84171-	prof_tdata_t *tdata;
84172-
84173-	cassert(config_prof);
84174-
84175-	if (!prof_booted || tsdn_null(tsdn) || !prof_active_get_unlocked()) {
84176-		return;
84177-	}
84178-	tsd = tsdn_tsd(tsdn);
84179-	if (tsd_reentrancy_level_get(tsd) > 0) {
84180-		return;
84181-	}
84182-
84183-	tdata = prof_tdata_get(tsd, false);
84184-	if (tdata == NULL) {
84185-		return;
84186-	}
84187-	if (tdata->enq) {
84188-		tdata->enq_gdump = true;
84189-		return;
84190-	}
84191-
84192-	prof_gdump_impl(tsd);
84193-}
84194-
84195-static uint64_t
84196-prof_thr_uid_alloc(tsdn_t *tsdn) {
84197-	uint64_t thr_uid;
84198-
84199-	malloc_mutex_lock(tsdn, &next_thr_uid_mtx);
84200-	thr_uid = next_thr_uid;
84201-	next_thr_uid++;
84202-	malloc_mutex_unlock(tsdn, &next_thr_uid_mtx);
84203-
84204-	return thr_uid;
84205-}
84206-
84207-prof_tdata_t *
84208-prof_tdata_init(tsd_t *tsd) {
84209-	return prof_tdata_init_impl(tsd, prof_thr_uid_alloc(tsd_tsdn(tsd)), 0,
84210-	    NULL, prof_thread_active_init_get(tsd_tsdn(tsd)));
84211-}
84212-
84213-prof_tdata_t *
84214-prof_tdata_reinit(tsd_t *tsd, prof_tdata_t *tdata) {
84215-	uint64_t thr_uid = tdata->thr_uid;
84216-	uint64_t thr_discrim = tdata->thr_discrim + 1;
84217-	char *thread_name = (tdata->thread_name != NULL) ?
84218-	    prof_thread_name_alloc(tsd, tdata->thread_name) : NULL;
84219-	bool active = tdata->active;
84220-
84221-	prof_tdata_detach(tsd, tdata);
84222-	return prof_tdata_init_impl(tsd, thr_uid, thr_discrim, thread_name,
84223-	    active);
84224-}
84225-
84226-void
84227-prof_tdata_cleanup(tsd_t *tsd) {
84228-	prof_tdata_t *tdata;
84229-
84230-	if (!config_prof) {
84231-		return;
84232-	}
84233-
84234-	tdata = tsd_prof_tdata_get(tsd);
84235-	if (tdata != NULL) {
84236-		prof_tdata_detach(tsd, tdata);
84237-	}
84238-}
84239-
84240-bool
84241-prof_active_get(tsdn_t *tsdn) {
84242-	bool prof_active_current;
84243-
84244-	prof_active_assert();
84245-	malloc_mutex_lock(tsdn, &prof_active_mtx);
84246-	prof_active_current = prof_active_state;
84247-	malloc_mutex_unlock(tsdn, &prof_active_mtx);
84248-	return prof_active_current;
84249-}
84250-
84251-bool
84252-prof_active_set(tsdn_t *tsdn, bool active) {
84253-	bool prof_active_old;
84254-
84255-	prof_active_assert();
84256-	malloc_mutex_lock(tsdn, &prof_active_mtx);
84257-	prof_active_old = prof_active_state;
84258-	prof_active_state = active;
84259-	malloc_mutex_unlock(tsdn, &prof_active_mtx);
84260-	prof_active_assert();
84261-	return prof_active_old;
84262-}
84263-
84264-const char *
84265-prof_thread_name_get(tsd_t *tsd) {
84266-	assert(tsd_reentrancy_level_get(tsd) == 0);
84267-
84268-	prof_tdata_t *tdata;
84269-
84270-	tdata = prof_tdata_get(tsd, true);
84271-	if (tdata == NULL) {
84272-		return "";
84273-	}
84274-	return (tdata->thread_name != NULL ? tdata->thread_name : "");
84275-}
84276-
84277-int
84278-prof_thread_name_set(tsd_t *tsd, const char *thread_name) {
84279-	if (opt_prof_sys_thread_name) {
84280-		return ENOENT;
84281-	} else {
84282-		return prof_thread_name_set_impl(tsd, thread_name);
84283-	}
84284-}
84285-
84286-bool
84287-prof_thread_active_get(tsd_t *tsd) {
84288-	assert(tsd_reentrancy_level_get(tsd) == 0);
84289-
84290-	prof_tdata_t *tdata;
84291-
84292-	tdata = prof_tdata_get(tsd, true);
84293-	if (tdata == NULL) {
84294-		return false;
84295-	}
84296-	return tdata->active;
84297-}
84298-
84299-bool
84300-prof_thread_active_set(tsd_t *tsd, bool active) {
84301-	assert(tsd_reentrancy_level_get(tsd) == 0);
84302-
84303-	prof_tdata_t *tdata;
84304-
84305-	tdata = prof_tdata_get(tsd, true);
84306-	if (tdata == NULL) {
84307-		return true;
84308-	}
84309-	tdata->active = active;
84310-	return false;
84311-}
84312-
84313-bool
84314-prof_thread_active_init_get(tsdn_t *tsdn) {
84315-	bool active_init;
84316-
84317-	malloc_mutex_lock(tsdn, &prof_thread_active_init_mtx);
84318-	active_init = prof_thread_active_init;
84319-	malloc_mutex_unlock(tsdn, &prof_thread_active_init_mtx);
84320-	return active_init;
84321-}
84322-
84323-bool
84324-prof_thread_active_init_set(tsdn_t *tsdn, bool active_init) {
84325-	bool active_init_old;
84326-
84327-	malloc_mutex_lock(tsdn, &prof_thread_active_init_mtx);
84328-	active_init_old = prof_thread_active_init;
84329-	prof_thread_active_init = active_init;
84330-	malloc_mutex_unlock(tsdn, &prof_thread_active_init_mtx);
84331-	return active_init_old;
84332-}
84333-
84334-bool
84335-prof_gdump_get(tsdn_t *tsdn) {
84336-	bool prof_gdump_current;
84337-
84338-	malloc_mutex_lock(tsdn, &prof_gdump_mtx);
84339-	prof_gdump_current = prof_gdump_val;
84340-	malloc_mutex_unlock(tsdn, &prof_gdump_mtx);
84341-	return prof_gdump_current;
84342-}
84343-
84344-bool
84345-prof_gdump_set(tsdn_t *tsdn, bool gdump) {
84346-	bool prof_gdump_old;
84347-
84348-	malloc_mutex_lock(tsdn, &prof_gdump_mtx);
84349-	prof_gdump_old = prof_gdump_val;
84350-	prof_gdump_val = gdump;
84351-	malloc_mutex_unlock(tsdn, &prof_gdump_mtx);
84352-	return prof_gdump_old;
84353-}
84354-
84355-void
84356-prof_backtrace_hook_set(prof_backtrace_hook_t hook) {
84357-	atomic_store_p(&prof_backtrace_hook, hook, ATOMIC_RELEASE);
84358-}
84359-
84360-prof_backtrace_hook_t
84361-prof_backtrace_hook_get() {
84362-	return (prof_backtrace_hook_t)atomic_load_p(&prof_backtrace_hook,
84363-	    ATOMIC_ACQUIRE);
84364-}
84365-
84366-void
84367-prof_dump_hook_set(prof_dump_hook_t hook) {
84368-	atomic_store_p(&prof_dump_hook, hook, ATOMIC_RELEASE);
84369-}
84370-
84371-prof_dump_hook_t
84372-prof_dump_hook_get() {
84373-	return (prof_dump_hook_t)atomic_load_p(&prof_dump_hook,
84374-	    ATOMIC_ACQUIRE);
84375-}
84376-
84377-void
84378-prof_boot0(void) {
84379-	cassert(config_prof);
84380-
84381-	memcpy(opt_prof_prefix, PROF_PREFIX_DEFAULT,
84382-	    sizeof(PROF_PREFIX_DEFAULT));
84383-}
84384-
84385-void
84386-prof_boot1(void) {
84387-	cassert(config_prof);
84388-
84389-	/*
84390-	 * opt_prof must be in its final state before any arenas are
84391-	 * initialized, so this function must be executed early.
84392-	 */
84393-	if (opt_prof_leak_error && !opt_prof_leak) {
84394-		opt_prof_leak = true;
84395-	}
84396-
84397-	if (opt_prof_leak && !opt_prof) {
84398-		/*
84399-		 * Enable opt_prof, but in such a way that profiles are never
84400-		 * automatically dumped.
84401-		 */
84402-		opt_prof = true;
84403-		opt_prof_gdump = false;
84404-	} else if (opt_prof) {
84405-		if (opt_lg_prof_interval >= 0) {
84406-			prof_interval = (((uint64_t)1U) <<
84407-			    opt_lg_prof_interval);
84408-		}
84409-	}
84410-}
84411-
84412-bool
84413-prof_boot2(tsd_t *tsd, base_t *base) {
84414-	cassert(config_prof);
84415-
84416-	/*
84417-	 * Initialize the global mutexes unconditionally to maintain correct
84418-	 * stats when opt_prof is false.
84419-	 */
84420-	if (malloc_mutex_init(&prof_active_mtx, "prof_active",
84421-	    WITNESS_RANK_PROF_ACTIVE, malloc_mutex_rank_exclusive)) {
84422-		return true;
84423-	}
84424-	if (malloc_mutex_init(&prof_gdump_mtx, "prof_gdump",
84425-	    WITNESS_RANK_PROF_GDUMP, malloc_mutex_rank_exclusive)) {
84426-		return true;
84427-	}
84428-	if (malloc_mutex_init(&prof_thread_active_init_mtx,
84429-	    "prof_thread_active_init", WITNESS_RANK_PROF_THREAD_ACTIVE_INIT,
84430-	    malloc_mutex_rank_exclusive)) {
84431-		return true;
84432-	}
84433-	if (malloc_mutex_init(&bt2gctx_mtx, "prof_bt2gctx",
84434-	    WITNESS_RANK_PROF_BT2GCTX, malloc_mutex_rank_exclusive)) {
84435-		return true;
84436-	}
84437-	if (malloc_mutex_init(&tdatas_mtx, "prof_tdatas",
84438-	    WITNESS_RANK_PROF_TDATAS, malloc_mutex_rank_exclusive)) {
84439-		return true;
84440-	}
84441-	if (malloc_mutex_init(&next_thr_uid_mtx, "prof_next_thr_uid",
84442-	    WITNESS_RANK_PROF_NEXT_THR_UID, malloc_mutex_rank_exclusive)) {
84443-		return true;
84444-	}
84445-	if (malloc_mutex_init(&prof_stats_mtx, "prof_stats",
84446-	    WITNESS_RANK_PROF_STATS, malloc_mutex_rank_exclusive)) {
84447-		return true;
84448-	}
84449-	if (malloc_mutex_init(&prof_dump_filename_mtx,
84450-	    "prof_dump_filename", WITNESS_RANK_PROF_DUMP_FILENAME,
84451-	    malloc_mutex_rank_exclusive)) {
84452-		return true;
84453-	}
84454-	if (malloc_mutex_init(&prof_dump_mtx, "prof_dump",
84455-	    WITNESS_RANK_PROF_DUMP, malloc_mutex_rank_exclusive)) {
84456-		return true;
84457-	}
84458-
84459-	if (opt_prof) {
84460-		lg_prof_sample = opt_lg_prof_sample;
84461-		prof_unbias_map_init();
84462-		prof_active_state = opt_prof_active;
84463-		prof_gdump_val = opt_prof_gdump;
84464-		prof_thread_active_init = opt_prof_thread_active_init;
84465-
84466-		if (prof_data_init(tsd)) {
84467-			return true;
84468-		}
84469-
84470-		next_thr_uid = 0;
84471-		if (prof_idump_accum_init()) {
84472-			return true;
84473-		}
84474-
84475-		if (opt_prof_final && opt_prof_prefix[0] != '\0' &&
84476-		    atexit(prof_fdump) != 0) {
84477-			malloc_write("<jemalloc>: Error in atexit()\n");
84478-			if (opt_abort) {
84479-				abort();
84480-			}
84481-		}
84482-
84483-		if (prof_log_init(tsd)) {
84484-			return true;
84485-		}
84486-
84487-		if (prof_recent_init()) {
84488-			return true;
84489-		}
84490-
84491-		prof_base = base;
84492-
84493-		gctx_locks = (malloc_mutex_t *)base_alloc(tsd_tsdn(tsd), base,
84494-		    PROF_NCTX_LOCKS * sizeof(malloc_mutex_t), CACHELINE);
84495-		if (gctx_locks == NULL) {
84496-			return true;
84497-		}
84498-		for (unsigned i = 0; i < PROF_NCTX_LOCKS; i++) {
84499-			if (malloc_mutex_init(&gctx_locks[i], "prof_gctx",
84500-			    WITNESS_RANK_PROF_GCTX,
84501-			    malloc_mutex_rank_exclusive)) {
84502-				return true;
84503-			}
84504-		}
84505-
84506-		tdata_locks = (malloc_mutex_t *)base_alloc(tsd_tsdn(tsd), base,
84507-		    PROF_NTDATA_LOCKS * sizeof(malloc_mutex_t), CACHELINE);
84508-		if (tdata_locks == NULL) {
84509-			return true;
84510-		}
84511-		for (unsigned i = 0; i < PROF_NTDATA_LOCKS; i++) {
84512-			if (malloc_mutex_init(&tdata_locks[i], "prof_tdata",
84513-			    WITNESS_RANK_PROF_TDATA,
84514-			    malloc_mutex_rank_exclusive)) {
84515-				return true;
84516-			}
84517-		}
84518-
84519-		prof_unwind_init();
84520-		prof_hooks_init();
84521-	}
84522-	prof_booted = true;
84523-
84524-	return false;
84525-}
84526-
84527-void
84528-prof_prefork0(tsdn_t *tsdn) {
84529-	if (config_prof && opt_prof) {
84530-		unsigned i;
84531-
84532-		malloc_mutex_prefork(tsdn, &prof_dump_mtx);
84533-		malloc_mutex_prefork(tsdn, &bt2gctx_mtx);
84534-		malloc_mutex_prefork(tsdn, &tdatas_mtx);
84535-		for (i = 0; i < PROF_NTDATA_LOCKS; i++) {
84536-			malloc_mutex_prefork(tsdn, &tdata_locks[i]);
84537-		}
84538-		malloc_mutex_prefork(tsdn, &log_mtx);
84539-		for (i = 0; i < PROF_NCTX_LOCKS; i++) {
84540-			malloc_mutex_prefork(tsdn, &gctx_locks[i]);
84541-		}
84542-		malloc_mutex_prefork(tsdn, &prof_recent_dump_mtx);
84543-	}
84544-}
84545-
84546-void
84547-prof_prefork1(tsdn_t *tsdn) {
84548-	if (config_prof && opt_prof) {
84549-		counter_prefork(tsdn, &prof_idump_accumulated);
84550-		malloc_mutex_prefork(tsdn, &prof_active_mtx);
84551-		malloc_mutex_prefork(tsdn, &prof_dump_filename_mtx);
84552-		malloc_mutex_prefork(tsdn, &prof_gdump_mtx);
84553-		malloc_mutex_prefork(tsdn, &prof_recent_alloc_mtx);
84554-		malloc_mutex_prefork(tsdn, &prof_stats_mtx);
84555-		malloc_mutex_prefork(tsdn, &next_thr_uid_mtx);
84556-		malloc_mutex_prefork(tsdn, &prof_thread_active_init_mtx);
84557-	}
84558-}
84559-
84560-void
84561-prof_postfork_parent(tsdn_t *tsdn) {
84562-	if (config_prof && opt_prof) {
84563-		unsigned i;
84564-
84565-		malloc_mutex_postfork_parent(tsdn,
84566-		    &prof_thread_active_init_mtx);
84567-		malloc_mutex_postfork_parent(tsdn, &next_thr_uid_mtx);
84568-		malloc_mutex_postfork_parent(tsdn, &prof_stats_mtx);
84569-		malloc_mutex_postfork_parent(tsdn, &prof_recent_alloc_mtx);
84570-		malloc_mutex_postfork_parent(tsdn, &prof_gdump_mtx);
84571-		malloc_mutex_postfork_parent(tsdn, &prof_dump_filename_mtx);
84572-		malloc_mutex_postfork_parent(tsdn, &prof_active_mtx);
84573-		counter_postfork_parent(tsdn, &prof_idump_accumulated);
84574-		malloc_mutex_postfork_parent(tsdn, &prof_recent_dump_mtx);
84575-		for (i = 0; i < PROF_NCTX_LOCKS; i++) {
84576-			malloc_mutex_postfork_parent(tsdn, &gctx_locks[i]);
84577-		}
84578-		malloc_mutex_postfork_parent(tsdn, &log_mtx);
84579-		for (i = 0; i < PROF_NTDATA_LOCKS; i++) {
84580-			malloc_mutex_postfork_parent(tsdn, &tdata_locks[i]);
84581-		}
84582-		malloc_mutex_postfork_parent(tsdn, &tdatas_mtx);
84583-		malloc_mutex_postfork_parent(tsdn, &bt2gctx_mtx);
84584-		malloc_mutex_postfork_parent(tsdn, &prof_dump_mtx);
84585-	}
84586-}
84587-
84588-void
84589-prof_postfork_child(tsdn_t *tsdn) {
84590-	if (config_prof && opt_prof) {
84591-		unsigned i;
84592-
84593-		malloc_mutex_postfork_child(tsdn, &prof_thread_active_init_mtx);
84594-		malloc_mutex_postfork_child(tsdn, &next_thr_uid_mtx);
84595-		malloc_mutex_postfork_child(tsdn, &prof_stats_mtx);
84596-		malloc_mutex_postfork_child(tsdn, &prof_recent_alloc_mtx);
84597-		malloc_mutex_postfork_child(tsdn, &prof_gdump_mtx);
84598-		malloc_mutex_postfork_child(tsdn, &prof_dump_filename_mtx);
84599-		malloc_mutex_postfork_child(tsdn, &prof_active_mtx);
84600-		counter_postfork_child(tsdn, &prof_idump_accumulated);
84601-		malloc_mutex_postfork_child(tsdn, &prof_recent_dump_mtx);
84602-		for (i = 0; i < PROF_NCTX_LOCKS; i++) {
84603-			malloc_mutex_postfork_child(tsdn, &gctx_locks[i]);
84604-		}
84605-		malloc_mutex_postfork_child(tsdn, &log_mtx);
84606-		for (i = 0; i < PROF_NTDATA_LOCKS; i++) {
84607-			malloc_mutex_postfork_child(tsdn, &tdata_locks[i]);
84608-		}
84609-		malloc_mutex_postfork_child(tsdn, &tdatas_mtx);
84610-		malloc_mutex_postfork_child(tsdn, &bt2gctx_mtx);
84611-		malloc_mutex_postfork_child(tsdn, &prof_dump_mtx);
84612-	}
84613-}
84614-
84615-/******************************************************************************/
84616diff --git a/jemalloc/src/prof_data.c b/jemalloc/src/prof_data.c
84617deleted file mode 100644
84618index bfa55be..0000000
84619--- a/jemalloc/src/prof_data.c
84620+++ /dev/null
84621@@ -1,1447 +0,0 @@
84622-#include "jemalloc/internal/jemalloc_preamble.h"
84623-#include "jemalloc/internal/jemalloc_internal_includes.h"
84624-
84625-#include "jemalloc/internal/assert.h"
84626-#include "jemalloc/internal/ckh.h"
84627-#include "jemalloc/internal/hash.h"
84628-#include "jemalloc/internal/malloc_io.h"
84629-#include "jemalloc/internal/prof_data.h"
84630-
84631-/*
84632- * This file defines and manages the core profiling data structures.
84633- *
84634- * Conceptually, profiling data can be imagined as a table with three columns:
84635- * thread, stack trace, and current allocation size.  (When prof_accum is on,
84636- * there's one additional column which is the cumulative allocation size.)
84637- *
84638- * Implementation wise, each thread maintains a hash recording the stack trace
84639- * to allocation size correspondences, which are basically the individual rows
84640- * in the table.  In addition, two global "indices" are built to make data
84641- * aggregation efficient (for dumping): bt2gctx and tdatas, which are basically
84642- * the "grouped by stack trace" and "grouped by thread" views of the same table,
84643- * respectively.  Note that the allocation size is only aggregated to the two
84644- * indices at dumping time, so as to optimize for performance.
84645- */
84646-
84647-/******************************************************************************/
84648-
84649-malloc_mutex_t bt2gctx_mtx;
84650-malloc_mutex_t tdatas_mtx;
84651-malloc_mutex_t prof_dump_mtx;
84652-
84653-/*
84654- * Table of mutexes that are shared among gctx's.  These are leaf locks, so
84655- * there is no problem with using them for more than one gctx at the same time.
84656- * The primary motivation for this sharing though is that gctx's are ephemeral,
84657- * and destroying mutexes causes complications for systems that allocate when
84658- * creating/destroying mutexes.
84659- */
84660-malloc_mutex_t *gctx_locks;
84661-static atomic_u_t cum_gctxs; /* Atomic counter. */
84662-
84663-/*
84664- * Table of mutexes that are shared among tdata's.  No operations require
84665- * holding multiple tdata locks, so there is no problem with using them for more
84666- * than one tdata at the same time, even though a gctx lock may be acquired
84667- * while holding a tdata lock.
84668- */
84669-malloc_mutex_t *tdata_locks;
84670-
84671-/*
84672- * Global hash of (prof_bt_t *)-->(prof_gctx_t *).  This is the master data
84673- * structure that knows about all backtraces currently captured.
84674- */
84675-static ckh_t bt2gctx;
84676-
84677-/*
84678- * Tree of all extant prof_tdata_t structures, regardless of state,
84679- * {attached,detached,expired}.
84680- */
84681-static prof_tdata_tree_t tdatas;
84682-
84683-size_t prof_unbiased_sz[PROF_SC_NSIZES];
84684-size_t prof_shifted_unbiased_cnt[PROF_SC_NSIZES];
84685-
84686-/******************************************************************************/
84687-/* Red-black trees. */
84688-
84689-static int
84690-prof_tctx_comp(const prof_tctx_t *a, const prof_tctx_t *b) {
84691-	uint64_t a_thr_uid = a->thr_uid;
84692-	uint64_t b_thr_uid = b->thr_uid;
84693-	int ret = (a_thr_uid > b_thr_uid) - (a_thr_uid < b_thr_uid);
84694-	if (ret == 0) {
84695-		uint64_t a_thr_discrim = a->thr_discrim;
84696-		uint64_t b_thr_discrim = b->thr_discrim;
84697-		ret = (a_thr_discrim > b_thr_discrim) - (a_thr_discrim <
84698-		    b_thr_discrim);
84699-		if (ret == 0) {
84700-			uint64_t a_tctx_uid = a->tctx_uid;
84701-			uint64_t b_tctx_uid = b->tctx_uid;
84702-			ret = (a_tctx_uid > b_tctx_uid) - (a_tctx_uid <
84703-			    b_tctx_uid);
84704-		}
84705-	}
84706-	return ret;
84707-}
84708-
84709-rb_gen(static UNUSED, tctx_tree_, prof_tctx_tree_t, prof_tctx_t,
84710-    tctx_link, prof_tctx_comp)
84711-
84712-static int
84713-prof_gctx_comp(const prof_gctx_t *a, const prof_gctx_t *b) {
84714-	unsigned a_len = a->bt.len;
84715-	unsigned b_len = b->bt.len;
84716-	unsigned comp_len = (a_len < b_len) ? a_len : b_len;
84717-	int ret = memcmp(a->bt.vec, b->bt.vec, comp_len * sizeof(void *));
84718-	if (ret == 0) {
84719-		ret = (a_len > b_len) - (a_len < b_len);
84720-	}
84721-	return ret;
84722-}
84723-
84724-rb_gen(static UNUSED, gctx_tree_, prof_gctx_tree_t, prof_gctx_t, dump_link,
84725-    prof_gctx_comp)
84726-
84727-static int
84728-prof_tdata_comp(const prof_tdata_t *a, const prof_tdata_t *b) {
84729-	int ret;
84730-	uint64_t a_uid = a->thr_uid;
84731-	uint64_t b_uid = b->thr_uid;
84732-
84733-	ret = ((a_uid > b_uid) - (a_uid < b_uid));
84734-	if (ret == 0) {
84735-		uint64_t a_discrim = a->thr_discrim;
84736-		uint64_t b_discrim = b->thr_discrim;
84737-
84738-		ret = ((a_discrim > b_discrim) - (a_discrim < b_discrim));
84739-	}
84740-	return ret;
84741-}
84742-
84743-rb_gen(static UNUSED, tdata_tree_, prof_tdata_tree_t, prof_tdata_t, tdata_link,
84744-    prof_tdata_comp)
84745-
84746-/******************************************************************************/
84747-
84748-static malloc_mutex_t *
84749-prof_gctx_mutex_choose(void) {
84750-	unsigned ngctxs = atomic_fetch_add_u(&cum_gctxs, 1, ATOMIC_RELAXED);
84751-
84752-	return &gctx_locks[(ngctxs - 1) % PROF_NCTX_LOCKS];
84753-}
84754-
84755-static malloc_mutex_t *
84756-prof_tdata_mutex_choose(uint64_t thr_uid) {
84757-	return &tdata_locks[thr_uid % PROF_NTDATA_LOCKS];
84758-}
84759-
84760-bool
84761-prof_data_init(tsd_t *tsd) {
84762-	tdata_tree_new(&tdatas);
84763-	return ckh_new(tsd, &bt2gctx, PROF_CKH_MINITEMS,
84764-	    prof_bt_hash, prof_bt_keycomp);
84765-}
84766-
84767-static void
84768-prof_enter(tsd_t *tsd, prof_tdata_t *tdata) {
84769-	cassert(config_prof);
84770-	assert(tdata == prof_tdata_get(tsd, false));
84771-
84772-	if (tdata != NULL) {
84773-		assert(!tdata->enq);
84774-		tdata->enq = true;
84775-	}
84776-
84777-	malloc_mutex_lock(tsd_tsdn(tsd), &bt2gctx_mtx);
84778-}
84779-
84780-static void
84781-prof_leave(tsd_t *tsd, prof_tdata_t *tdata) {
84782-	cassert(config_prof);
84783-	assert(tdata == prof_tdata_get(tsd, false));
84784-
84785-	malloc_mutex_unlock(tsd_tsdn(tsd), &bt2gctx_mtx);
84786-
84787-	if (tdata != NULL) {
84788-		bool idump, gdump;
84789-
84790-		assert(tdata->enq);
84791-		tdata->enq = false;
84792-		idump = tdata->enq_idump;
84793-		tdata->enq_idump = false;
84794-		gdump = tdata->enq_gdump;
84795-		tdata->enq_gdump = false;
84796-
84797-		if (idump) {
84798-			prof_idump(tsd_tsdn(tsd));
84799-		}
84800-		if (gdump) {
84801-			prof_gdump(tsd_tsdn(tsd));
84802-		}
84803-	}
84804-}
84805-
84806-static prof_gctx_t *
84807-prof_gctx_create(tsdn_t *tsdn, prof_bt_t *bt) {
84808-	/*
84809-	 * Create a single allocation that has space for vec of length bt->len.
84810-	 */
84811-	size_t size = offsetof(prof_gctx_t, vec) + (bt->len * sizeof(void *));
84812-	prof_gctx_t *gctx = (prof_gctx_t *)iallocztm(tsdn, size,
84813-	    sz_size2index(size), false, NULL, true, arena_get(TSDN_NULL, 0, true),
84814-	    true);
84815-	if (gctx == NULL) {
84816-		return NULL;
84817-	}
84818-	gctx->lock = prof_gctx_mutex_choose();
84819-	/*
84820-	 * Set nlimbo to 1, in order to avoid a race condition with
84821-	 * prof_tctx_destroy()/prof_gctx_try_destroy().
84822-	 */
84823-	gctx->nlimbo = 1;
84824-	tctx_tree_new(&gctx->tctxs);
84825-	/* Duplicate bt. */
84826-	memcpy(gctx->vec, bt->vec, bt->len * sizeof(void *));
84827-	gctx->bt.vec = gctx->vec;
84828-	gctx->bt.len = bt->len;
84829-	return gctx;
84830-}
84831-
84832-static void
84833-prof_gctx_try_destroy(tsd_t *tsd, prof_tdata_t *tdata_self,
84834-    prof_gctx_t *gctx) {
84835-	cassert(config_prof);
84836-
84837-	/*
84838-	 * Check that gctx is still unused by any thread cache before destroying
84839-	 * it.  prof_lookup() increments gctx->nlimbo in order to avoid a race
84840-	 * condition with this function, as does prof_tctx_destroy() in order to
84841-	 * avoid a race between the main body of prof_tctx_destroy() and entry
84842-	 * into this function.
84843-	 */
84844-	prof_enter(tsd, tdata_self);
84845-	malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock);
84846-	assert(gctx->nlimbo != 0);
84847-	if (tctx_tree_empty(&gctx->tctxs) && gctx->nlimbo == 1) {
84848-		/* Remove gctx from bt2gctx. */
84849-		if (ckh_remove(tsd, &bt2gctx, &gctx->bt, NULL, NULL)) {
84850-			not_reached();
84851-		}
84852-		prof_leave(tsd, tdata_self);
84853-		/* Destroy gctx. */
84854-		malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock);
84855-		idalloctm(tsd_tsdn(tsd), gctx, NULL, NULL, true, true);
84856-	} else {
84857-		/*
84858-		 * Compensate for increment in prof_tctx_destroy() or
84859-		 * prof_lookup().
84860-		 */
84861-		gctx->nlimbo--;
84862-		malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock);
84863-		prof_leave(tsd, tdata_self);
84864-	}
84865-}
84866-
84867-static bool
84868-prof_gctx_should_destroy(prof_gctx_t *gctx) {
84869-	if (opt_prof_accum) {
84870-		return false;
84871-	}
84872-	if (!tctx_tree_empty(&gctx->tctxs)) {
84873-		return false;
84874-	}
84875-	if (gctx->nlimbo != 0) {
84876-		return false;
84877-	}
84878-	return true;
84879-}
84880-
84881-static bool
84882-prof_lookup_global(tsd_t *tsd, prof_bt_t *bt, prof_tdata_t *tdata,
84883-    void **p_btkey, prof_gctx_t **p_gctx, bool *p_new_gctx) {
84884-	union {
84885-		prof_gctx_t	*p;
84886-		void		*v;
84887-	} gctx, tgctx;
84888-	union {
84889-		prof_bt_t	*p;
84890-		void		*v;
84891-	} btkey;
84892-	bool new_gctx;
84893-
84894-	prof_enter(tsd, tdata);
84895-	if (ckh_search(&bt2gctx, bt, &btkey.v, &gctx.v)) {
84896-		/* bt has never been seen before.  Insert it. */
84897-		prof_leave(tsd, tdata);
84898-		tgctx.p = prof_gctx_create(tsd_tsdn(tsd), bt);
84899-		if (tgctx.v == NULL) {
84900-			return true;
84901-		}
84902-		prof_enter(tsd, tdata);
84903-		if (ckh_search(&bt2gctx, bt, &btkey.v, &gctx.v)) {
84904-			gctx.p = tgctx.p;
84905-			btkey.p = &gctx.p->bt;
84906-			if (ckh_insert(tsd, &bt2gctx, btkey.v, gctx.v)) {
84907-				/* OOM. */
84908-				prof_leave(tsd, tdata);
84909-				idalloctm(tsd_tsdn(tsd), gctx.v, NULL, NULL,
84910-				    true, true);
84911-				return true;
84912-			}
84913-			new_gctx = true;
84914-		} else {
84915-			new_gctx = false;
84916-		}
84917-	} else {
84918-		tgctx.v = NULL;
84919-		new_gctx = false;
84920-	}
84921-
84922-	if (!new_gctx) {
84923-		/*
84924-		 * Increment nlimbo, in order to avoid a race condition with
84925-		 * prof_tctx_destroy()/prof_gctx_try_destroy().
84926-		 */
84927-		malloc_mutex_lock(tsd_tsdn(tsd), gctx.p->lock);
84928-		gctx.p->nlimbo++;
84929-		malloc_mutex_unlock(tsd_tsdn(tsd), gctx.p->lock);
84930-		new_gctx = false;
84931-
84932-		if (tgctx.v != NULL) {
84933-			/* Lost race to insert. */
84934-			idalloctm(tsd_tsdn(tsd), tgctx.v, NULL, NULL, true,
84935-			    true);
84936-		}
84937-	}
84938-	prof_leave(tsd, tdata);
84939-
84940-	*p_btkey = btkey.v;
84941-	*p_gctx = gctx.p;
84942-	*p_new_gctx = new_gctx;
84943-	return false;
84944-}
84945-
84946-prof_tctx_t *
84947-prof_lookup(tsd_t *tsd, prof_bt_t *bt) {
84948-	union {
84949-		prof_tctx_t	*p;
84950-		void		*v;
84951-	} ret;
84952-	prof_tdata_t *tdata;
84953-	bool not_found;
84954-
84955-	cassert(config_prof);
84956-
84957-	tdata = prof_tdata_get(tsd, false);
84958-	assert(tdata != NULL);
84959-
84960-	malloc_mutex_lock(tsd_tsdn(tsd), tdata->lock);
84961-	not_found = ckh_search(&tdata->bt2tctx, bt, NULL, &ret.v);
84962-	if (!not_found) { /* Note double negative! */
84963-		ret.p->prepared = true;
84964-	}
84965-	malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock);
84966-	if (not_found) {
84967-		void *btkey;
84968-		prof_gctx_t *gctx;
84969-		bool new_gctx, error;
84970-
84971-		/*
84972-		 * This thread's cache lacks bt.  Look for it in the global
84973-		 * cache.
84974-		 */
84975-		if (prof_lookup_global(tsd, bt, tdata, &btkey, &gctx,
84976-		    &new_gctx)) {
84977-			return NULL;
84978-		}
84979-
84980-		/* Link a prof_tctx_t into gctx for this thread. */
84981-		ret.v = iallocztm(tsd_tsdn(tsd), sizeof(prof_tctx_t),
84982-		    sz_size2index(sizeof(prof_tctx_t)), false, NULL, true,
84983-		    arena_ichoose(tsd, NULL), true);
84984-		if (ret.p == NULL) {
84985-			if (new_gctx) {
84986-				prof_gctx_try_destroy(tsd, tdata, gctx);
84987-			}
84988-			return NULL;
84989-		}
84990-		ret.p->tdata = tdata;
84991-		ret.p->thr_uid = tdata->thr_uid;
84992-		ret.p->thr_discrim = tdata->thr_discrim;
84993-		ret.p->recent_count = 0;
84994-		memset(&ret.p->cnts, 0, sizeof(prof_cnt_t));
84995-		ret.p->gctx = gctx;
84996-		ret.p->tctx_uid = tdata->tctx_uid_next++;
84997-		ret.p->prepared = true;
84998-		ret.p->state = prof_tctx_state_initializing;
84999-		malloc_mutex_lock(tsd_tsdn(tsd), tdata->lock);
85000-		error = ckh_insert(tsd, &tdata->bt2tctx, btkey, ret.v);
85001-		malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock);
85002-		if (error) {
85003-			if (new_gctx) {
85004-				prof_gctx_try_destroy(tsd, tdata, gctx);
85005-			}
85006-			idalloctm(tsd_tsdn(tsd), ret.v, NULL, NULL, true, true);
85007-			return NULL;
85008-		}
85009-		malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock);
85010-		ret.p->state = prof_tctx_state_nominal;
85011-		tctx_tree_insert(&gctx->tctxs, ret.p);
85012-		gctx->nlimbo--;
85013-		malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock);
85014-	}
85015-
85016-	return ret.p;
85017-}
85018-
85019-/* Used in unit tests. */
85020-static prof_tdata_t *
85021-prof_tdata_count_iter(prof_tdata_tree_t *tdatas_ptr, prof_tdata_t *tdata,
85022-    void *arg) {
85023-	size_t *tdata_count = (size_t *)arg;
85024-
85025-	(*tdata_count)++;
85026-
85027-	return NULL;
85028-}
85029-
85030-/* Used in unit tests. */
85031-size_t
85032-prof_tdata_count(void) {
85033-	size_t tdata_count = 0;
85034-	tsdn_t *tsdn;
85035-
85036-	tsdn = tsdn_fetch();
85037-	malloc_mutex_lock(tsdn, &tdatas_mtx);
85038-	tdata_tree_iter(&tdatas, NULL, prof_tdata_count_iter,
85039-	    (void *)&tdata_count);
85040-	malloc_mutex_unlock(tsdn, &tdatas_mtx);
85041-
85042-	return tdata_count;
85043-}
85044-
85045-/* Used in unit tests. */
85046-size_t
85047-prof_bt_count(void) {
85048-	size_t bt_count;
85049-	tsd_t *tsd;
85050-	prof_tdata_t *tdata;
85051-
85052-	tsd = tsd_fetch();
85053-	tdata = prof_tdata_get(tsd, false);
85054-	if (tdata == NULL) {
85055-		return 0;
85056-	}
85057-
85058-	malloc_mutex_lock(tsd_tsdn(tsd), &bt2gctx_mtx);
85059-	bt_count = ckh_count(&bt2gctx);
85060-	malloc_mutex_unlock(tsd_tsdn(tsd), &bt2gctx_mtx);
85061-
85062-	return bt_count;
85063-}
85064-
85065-char *
85066-prof_thread_name_alloc(tsd_t *tsd, const char *thread_name) {
85067-	char *ret;
85068-	size_t size;
85069-
85070-	if (thread_name == NULL) {
85071-		return NULL;
85072-	}
85073-
85074-	size = strlen(thread_name) + 1;
85075-	if (size == 1) {
85076-		return "";
85077-	}
85078-
85079-	ret = iallocztm(tsd_tsdn(tsd), size, sz_size2index(size), false, NULL,
85080-	    true, arena_get(TSDN_NULL, 0, true), true);
85081-	if (ret == NULL) {
85082-		return NULL;
85083-	}
85084-	memcpy(ret, thread_name, size);
85085-	return ret;
85086-}
85087-
85088-int
85089-prof_thread_name_set_impl(tsd_t *tsd, const char *thread_name) {
85090-	assert(tsd_reentrancy_level_get(tsd) == 0);
85091-
85092-	prof_tdata_t *tdata;
85093-	unsigned i;
85094-	char *s;
85095-
85096-	tdata = prof_tdata_get(tsd, true);
85097-	if (tdata == NULL) {
85098-		return EAGAIN;
85099-	}
85100-
85101-	/* Validate input. */
85102-	if (thread_name == NULL) {
85103-		return EFAULT;
85104-	}
85105-	for (i = 0; thread_name[i] != '\0'; i++) {
85106-		char c = thread_name[i];
85107-		if (!isgraph(c) && !isblank(c)) {
85108-			return EFAULT;
85109-		}
85110-	}
85111-
85112-	s = prof_thread_name_alloc(tsd, thread_name);
85113-	if (s == NULL) {
85114-		return EAGAIN;
85115-	}
85116-
85117-	if (tdata->thread_name != NULL) {
85118-		idalloctm(tsd_tsdn(tsd), tdata->thread_name, NULL, NULL, true,
85119-		    true);
85120-		tdata->thread_name = NULL;
85121-	}
85122-	if (strlen(s) > 0) {
85123-		tdata->thread_name = s;
85124-	}
85125-	return 0;
85126-}
85127-
85128-JEMALLOC_FORMAT_PRINTF(3, 4)
85129-static void
85130-prof_dump_printf(write_cb_t *prof_dump_write, void *cbopaque,
85131-    const char *format, ...) {
85132-	va_list ap;
85133-	char buf[PROF_PRINTF_BUFSIZE];
85134-
85135-	va_start(ap, format);
85136-	malloc_vsnprintf(buf, sizeof(buf), format, ap);
85137-	va_end(ap);
85138-	prof_dump_write(cbopaque, buf);
85139-}
85140-
85141-/*
85142- * Casting a double to a uint64_t may not necessarily be in range; this can be
85143- * UB.  I don't think this is practically possible with the cur counters, but
85144- * plausibly could be with the accum counters.
85145- */
85146-#ifdef JEMALLOC_PROF
85147-static uint64_t
85148-prof_double_uint64_cast(double d) {
85149-	/*
85150-	 * Note: UINT64_MAX + 1 is exactly representable as a double on all
85151-	 * reasonable platforms (certainly those we'll support).  Writing this
85152-	 * as !(a < b) instead of (a >= b) means that we're NaN-safe.
85153-	 */
85154-	double rounded = round(d);
85155-	if (!(rounded < (double)UINT64_MAX)) {
85156-		return UINT64_MAX;
85157-	}
85158-	return (uint64_t)rounded;
85159-}
85160-#endif
85161-
85162-void prof_unbias_map_init() {
85163-	/* See the comment in prof_sample_new_event_wait */
85164-#ifdef JEMALLOC_PROF
85165-	for (szind_t i = 0; i < SC_NSIZES; i++) {
85166-		double sz = (double)sz_index2size(i);
85167-		double rate = (double)(ZU(1) << lg_prof_sample);
85168-		double div_val = 1.0 - exp(-sz / rate);
85169-		double unbiased_sz = sz / div_val;
85170-		/*
85171-		 * The "true" right value for the unbiased count is
85172-		 * 1.0/(1 - exp(-sz/rate)).  The problem is, we keep the counts
85173-		 * as integers (for a variety of reasons -- rounding errors
85174-		 * could trigger asserts, and not all libcs can properly handle
85175-		 * floating point arithmetic during malloc calls inside libc).
85176-		 * Rounding to an integer, though, can lead to rounding errors
85177-		 * of over 30% for sizes close to the sampling rate.  So
85178-		 * instead, we multiply by a constant, dividing the maximum
85179-		 * possible roundoff error by that constant.  To avoid overflow
85180-		 * in summing up size_t values, the largest safe constant we can
85181-		 * pick is the size of the smallest allocation.
85182-		 */
85183-		double cnt_shift = (double)(ZU(1) << SC_LG_TINY_MIN);
85184-		double shifted_unbiased_cnt = cnt_shift / div_val;
85185-		prof_unbiased_sz[i] = (size_t)round(unbiased_sz);
85186-		prof_shifted_unbiased_cnt[i] = (size_t)round(
85187-		    shifted_unbiased_cnt);
85188-	}
85189-#else
85190-	unreachable();
85191-#endif
85192-}
85193-
85194-/*
85195- * The unbiasing story is long.  The jeprof unbiasing logic was copied from
85196- * pprof.  Both shared an issue: they unbiased using the average size of the
85197- * allocations at a particular stack trace.  This can work out OK if allocations
85198- * are mostly of the same size given some stack, but not otherwise.  We now
85199- * internally track what the unbiased results ought to be.  We can't just report
85200- * them as they are though; they'll still go through the jeprof unbiasing
85201- * process.  Instead, we figure out what values we can feed *into* jeprof's
85202- * unbiasing mechanism that will lead to getting the right values out.
85203- *
85204- * It'll unbias count and aggregate size as:
85205- *
85206- *   c_out = c_in * 1/(1-exp(-s_in/c_in/R)
85207- *   s_out = s_in * 1/(1-exp(-s_in/c_in/R)
85208- *
85209- * We want to solve for the values of c_in and s_in that will
85210- * give the c_out and s_out that we've computed internally.
85211- *
85212- * Let's do a change of variables (both to make the math easier and to make it
85213- * easier to write):
85214- *   x = s_in / c_in
85215- *   y = s_in
85216- *   k = 1/R.
85217- *
85218- * Then
85219- *   c_out = y/x * 1/(1-exp(-k*x))
85220- *   s_out = y * 1/(1-exp(-k*x))
85221- *
85222- * The first equation gives:
85223- *   y = x * c_out * (1-exp(-k*x))
85224- * The second gives:
85225- *   y = s_out * (1-exp(-k*x))
85226- * So we have
85227- *   x = s_out / c_out.
85228- * And all the other values fall out from that.
85229- *
85230- * This is all a fair bit of work.  The thing we get out of it is that we don't
85231- * break backwards compatibility with jeprof (and the various tools that have
85232- * copied its unbiasing logic).  Eventually, we anticipate a v3 heap profile
85233- * dump format based on JSON, at which point I think much of this logic can get
85234- * cleaned up (since we'll be taking a compatibility break there anyways).
85235- */
85236-static void
85237-prof_do_unbias(uint64_t c_out_shifted_i, uint64_t s_out_i, uint64_t *r_c_in,
85238-    uint64_t *r_s_in) {
85239-#ifdef JEMALLOC_PROF
85240-	if (c_out_shifted_i == 0 || s_out_i == 0) {
85241-		*r_c_in = 0;
85242-		*r_s_in = 0;
85243-		return;
85244-	}
85245-	/*
85246-	 * See the note in prof_unbias_map_init() to see why we take c_out in a
85247-	 * shifted form.
85248-	 */
85249-	double c_out = (double)c_out_shifted_i
85250-	    / (double)(ZU(1) << SC_LG_TINY_MIN);
85251-	double s_out = (double)s_out_i;
85252-	double R = (double)(ZU(1) << lg_prof_sample);
85253-
85254-	double x = s_out / c_out;
85255-	double y = s_out * (1.0 - exp(-x / R));
85256-
85257-	double c_in = y / x;
85258-	double s_in = y;
85259-
85260-	*r_c_in = prof_double_uint64_cast(c_in);
85261-	*r_s_in = prof_double_uint64_cast(s_in);
85262-#else
85263-	unreachable();
85264-#endif
85265-}
85266-
85267-static void
85268-prof_dump_print_cnts(write_cb_t *prof_dump_write, void *cbopaque,
85269-    const prof_cnt_t *cnts) {
85270-	uint64_t curobjs;
85271-	uint64_t curbytes;
85272-	uint64_t accumobjs;
85273-	uint64_t accumbytes;
85274-	if (opt_prof_unbias) {
85275-		prof_do_unbias(cnts->curobjs_shifted_unbiased,
85276-		    cnts->curbytes_unbiased, &curobjs, &curbytes);
85277-		prof_do_unbias(cnts->accumobjs_shifted_unbiased,
85278-		    cnts->accumbytes_unbiased, &accumobjs, &accumbytes);
85279-	} else {
85280-		curobjs = cnts->curobjs;
85281-		curbytes = cnts->curbytes;
85282-		accumobjs = cnts->accumobjs;
85283-		accumbytes = cnts->accumbytes;
85284-	}
85285-	prof_dump_printf(prof_dump_write, cbopaque,
85286-	    "%"FMTu64": %"FMTu64" [%"FMTu64": %"FMTu64"]",
85287-	    curobjs, curbytes, accumobjs, accumbytes);
85288-}
85289-
85290-static void
85291-prof_tctx_merge_tdata(tsdn_t *tsdn, prof_tctx_t *tctx, prof_tdata_t *tdata) {
85292-	malloc_mutex_assert_owner(tsdn, tctx->tdata->lock);
85293-
85294-	malloc_mutex_lock(tsdn, tctx->gctx->lock);
85295-
85296-	switch (tctx->state) {
85297-	case prof_tctx_state_initializing:
85298-		malloc_mutex_unlock(tsdn, tctx->gctx->lock);
85299-		return;
85300-	case prof_tctx_state_nominal:
85301-		tctx->state = prof_tctx_state_dumping;
85302-		malloc_mutex_unlock(tsdn, tctx->gctx->lock);
85303-
85304-		memcpy(&tctx->dump_cnts, &tctx->cnts, sizeof(prof_cnt_t));
85305-
85306-		tdata->cnt_summed.curobjs += tctx->dump_cnts.curobjs;
85307-		tdata->cnt_summed.curobjs_shifted_unbiased
85308-		    += tctx->dump_cnts.curobjs_shifted_unbiased;
85309-		tdata->cnt_summed.curbytes += tctx->dump_cnts.curbytes;
85310-		tdata->cnt_summed.curbytes_unbiased
85311-		    += tctx->dump_cnts.curbytes_unbiased;
85312-		if (opt_prof_accum) {
85313-			tdata->cnt_summed.accumobjs +=
85314-			    tctx->dump_cnts.accumobjs;
85315-			tdata->cnt_summed.accumobjs_shifted_unbiased +=
85316-			    tctx->dump_cnts.accumobjs_shifted_unbiased;
85317-			tdata->cnt_summed.accumbytes +=
85318-			    tctx->dump_cnts.accumbytes;
85319-			tdata->cnt_summed.accumbytes_unbiased +=
85320-			    tctx->dump_cnts.accumbytes_unbiased;
85321-		}
85322-		break;
85323-	case prof_tctx_state_dumping:
85324-	case prof_tctx_state_purgatory:
85325-		not_reached();
85326-	}
85327-}
85328-
85329-static void
85330-prof_tctx_merge_gctx(tsdn_t *tsdn, prof_tctx_t *tctx, prof_gctx_t *gctx) {
85331-	malloc_mutex_assert_owner(tsdn, gctx->lock);
85332-
85333-	gctx->cnt_summed.curobjs += tctx->dump_cnts.curobjs;
85334-	gctx->cnt_summed.curobjs_shifted_unbiased
85335-	    += tctx->dump_cnts.curobjs_shifted_unbiased;
85336-	gctx->cnt_summed.curbytes += tctx->dump_cnts.curbytes;
85337-	gctx->cnt_summed.curbytes_unbiased += tctx->dump_cnts.curbytes_unbiased;
85338-	if (opt_prof_accum) {
85339-		gctx->cnt_summed.accumobjs += tctx->dump_cnts.accumobjs;
85340-		gctx->cnt_summed.accumobjs_shifted_unbiased
85341-		    += tctx->dump_cnts.accumobjs_shifted_unbiased;
85342-		gctx->cnt_summed.accumbytes += tctx->dump_cnts.accumbytes;
85343-		gctx->cnt_summed.accumbytes_unbiased
85344-		    += tctx->dump_cnts.accumbytes_unbiased;
85345-	}
85346-}
85347-
85348-static prof_tctx_t *
85349-prof_tctx_merge_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg) {
85350-	tsdn_t *tsdn = (tsdn_t *)arg;
85351-
85352-	malloc_mutex_assert_owner(tsdn, tctx->gctx->lock);
85353-
85354-	switch (tctx->state) {
85355-	case prof_tctx_state_nominal:
85356-		/* New since dumping started; ignore. */
85357-		break;
85358-	case prof_tctx_state_dumping:
85359-	case prof_tctx_state_purgatory:
85360-		prof_tctx_merge_gctx(tsdn, tctx, tctx->gctx);
85361-		break;
85362-	default:
85363-		not_reached();
85364-	}
85365-
85366-	return NULL;
85367-}
85368-
85369-typedef struct prof_dump_iter_arg_s prof_dump_iter_arg_t;
85370-struct prof_dump_iter_arg_s {
85371-	tsdn_t *tsdn;
85372-	write_cb_t *prof_dump_write;
85373-	void *cbopaque;
85374-};
85375-
85376-static prof_tctx_t *
85377-prof_tctx_dump_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *opaque) {
85378-	prof_dump_iter_arg_t *arg = (prof_dump_iter_arg_t *)opaque;
85379-	malloc_mutex_assert_owner(arg->tsdn, tctx->gctx->lock);
85380-
85381-	switch (tctx->state) {
85382-	case prof_tctx_state_initializing:
85383-	case prof_tctx_state_nominal:
85384-		/* Not captured by this dump. */
85385-		break;
85386-	case prof_tctx_state_dumping:
85387-	case prof_tctx_state_purgatory:
85388-		prof_dump_printf(arg->prof_dump_write, arg->cbopaque,
85389-		    "  t%"FMTu64": ", tctx->thr_uid);
85390-		prof_dump_print_cnts(arg->prof_dump_write, arg->cbopaque,
85391-		    &tctx->dump_cnts);
85392-		arg->prof_dump_write(arg->cbopaque, "\n");
85393-		break;
85394-	default:
85395-		not_reached();
85396-	}
85397-	return NULL;
85398-}
85399-
85400-static prof_tctx_t *
85401-prof_tctx_finish_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg) {
85402-	tsdn_t *tsdn = (tsdn_t *)arg;
85403-	prof_tctx_t *ret;
85404-
85405-	malloc_mutex_assert_owner(tsdn, tctx->gctx->lock);
85406-
85407-	switch (tctx->state) {
85408-	case prof_tctx_state_nominal:
85409-		/* New since dumping started; ignore. */
85410-		break;
85411-	case prof_tctx_state_dumping:
85412-		tctx->state = prof_tctx_state_nominal;
85413-		break;
85414-	case prof_tctx_state_purgatory:
85415-		ret = tctx;
85416-		goto label_return;
85417-	default:
85418-		not_reached();
85419-	}
85420-
85421-	ret = NULL;
85422-label_return:
85423-	return ret;
85424-}
85425-
85426-static void
85427-prof_dump_gctx_prep(tsdn_t *tsdn, prof_gctx_t *gctx, prof_gctx_tree_t *gctxs) {
85428-	cassert(config_prof);
85429-
85430-	malloc_mutex_lock(tsdn, gctx->lock);
85431-
85432-	/*
85433-	 * Increment nlimbo so that gctx won't go away before dump.
85434-	 * Additionally, link gctx into the dump list so that it is included in
85435-	 * prof_dump()'s second pass.
85436-	 */
85437-	gctx->nlimbo++;
85438-	gctx_tree_insert(gctxs, gctx);
85439-
85440-	memset(&gctx->cnt_summed, 0, sizeof(prof_cnt_t));
85441-
85442-	malloc_mutex_unlock(tsdn, gctx->lock);
85443-}
85444-
85445-typedef struct prof_gctx_merge_iter_arg_s prof_gctx_merge_iter_arg_t;
85446-struct prof_gctx_merge_iter_arg_s {
85447-	tsdn_t *tsdn;
85448-	size_t *leak_ngctx;
85449-};
85450-
85451-static prof_gctx_t *
85452-prof_gctx_merge_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *opaque) {
85453-	prof_gctx_merge_iter_arg_t *arg = (prof_gctx_merge_iter_arg_t *)opaque;
85454-
85455-	malloc_mutex_lock(arg->tsdn, gctx->lock);
85456-	tctx_tree_iter(&gctx->tctxs, NULL, prof_tctx_merge_iter,
85457-	    (void *)arg->tsdn);
85458-	if (gctx->cnt_summed.curobjs != 0) {
85459-		(*arg->leak_ngctx)++;
85460-	}
85461-	malloc_mutex_unlock(arg->tsdn, gctx->lock);
85462-
85463-	return NULL;
85464-}
85465-
85466-static void
85467-prof_gctx_finish(tsd_t *tsd, prof_gctx_tree_t *gctxs) {
85468-	prof_tdata_t *tdata = prof_tdata_get(tsd, false);
85469-	prof_gctx_t *gctx;
85470-
85471-	/*
85472-	 * Standard tree iteration won't work here, because as soon as we
85473-	 * decrement gctx->nlimbo and unlock gctx, another thread can
85474-	 * concurrently destroy it, which will corrupt the tree.  Therefore,
85475-	 * tear down the tree one node at a time during iteration.
85476-	 */
85477-	while ((gctx = gctx_tree_first(gctxs)) != NULL) {
85478-		gctx_tree_remove(gctxs, gctx);
85479-		malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock);
85480-		{
85481-			prof_tctx_t *next;
85482-
85483-			next = NULL;
85484-			do {
85485-				prof_tctx_t *to_destroy =
85486-				    tctx_tree_iter(&gctx->tctxs, next,
85487-				    prof_tctx_finish_iter,
85488-				    (void *)tsd_tsdn(tsd));
85489-				if (to_destroy != NULL) {
85490-					next = tctx_tree_next(&gctx->tctxs,
85491-					    to_destroy);
85492-					tctx_tree_remove(&gctx->tctxs,
85493-					    to_destroy);
85494-					idalloctm(tsd_tsdn(tsd), to_destroy,
85495-					    NULL, NULL, true, true);
85496-				} else {
85497-					next = NULL;
85498-				}
85499-			} while (next != NULL);
85500-		}
85501-		gctx->nlimbo--;
85502-		if (prof_gctx_should_destroy(gctx)) {
85503-			gctx->nlimbo++;
85504-			malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock);
85505-			prof_gctx_try_destroy(tsd, tdata, gctx);
85506-		} else {
85507-			malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock);
85508-		}
85509-	}
85510-}
85511-
85512-typedef struct prof_tdata_merge_iter_arg_s prof_tdata_merge_iter_arg_t;
85513-struct prof_tdata_merge_iter_arg_s {
85514-	tsdn_t *tsdn;
85515-	prof_cnt_t *cnt_all;
85516-};
85517-
85518-static prof_tdata_t *
85519-prof_tdata_merge_iter(prof_tdata_tree_t *tdatas_ptr, prof_tdata_t *tdata,
85520-    void *opaque) {
85521-	prof_tdata_merge_iter_arg_t *arg =
85522-	    (prof_tdata_merge_iter_arg_t *)opaque;
85523-
85524-	malloc_mutex_lock(arg->tsdn, tdata->lock);
85525-	if (!tdata->expired) {
85526-		size_t tabind;
85527-		union {
85528-			prof_tctx_t	*p;
85529-			void		*v;
85530-		} tctx;
85531-
85532-		tdata->dumping = true;
85533-		memset(&tdata->cnt_summed, 0, sizeof(prof_cnt_t));
85534-		for (tabind = 0; !ckh_iter(&tdata->bt2tctx, &tabind, NULL,
85535-		    &tctx.v);) {
85536-			prof_tctx_merge_tdata(arg->tsdn, tctx.p, tdata);
85537-		}
85538-
85539-		arg->cnt_all->curobjs += tdata->cnt_summed.curobjs;
85540-		arg->cnt_all->curobjs_shifted_unbiased
85541-		    += tdata->cnt_summed.curobjs_shifted_unbiased;
85542-		arg->cnt_all->curbytes += tdata->cnt_summed.curbytes;
85543-		arg->cnt_all->curbytes_unbiased
85544-		    += tdata->cnt_summed.curbytes_unbiased;
85545-		if (opt_prof_accum) {
85546-			arg->cnt_all->accumobjs += tdata->cnt_summed.accumobjs;
85547-			arg->cnt_all->accumobjs_shifted_unbiased
85548-			    += tdata->cnt_summed.accumobjs_shifted_unbiased;
85549-			arg->cnt_all->accumbytes +=
85550-			    tdata->cnt_summed.accumbytes;
85551-			arg->cnt_all->accumbytes_unbiased +=
85552-			    tdata->cnt_summed.accumbytes_unbiased;
85553-		}
85554-	} else {
85555-		tdata->dumping = false;
85556-	}
85557-	malloc_mutex_unlock(arg->tsdn, tdata->lock);
85558-
85559-	return NULL;
85560-}
85561-
85562-static prof_tdata_t *
85563-prof_tdata_dump_iter(prof_tdata_tree_t *tdatas_ptr, prof_tdata_t *tdata,
85564-    void *opaque) {
85565-	if (!tdata->dumping) {
85566-		return NULL;
85567-	}
85568-
85569-	prof_dump_iter_arg_t *arg = (prof_dump_iter_arg_t *)opaque;
85570-	prof_dump_printf(arg->prof_dump_write, arg->cbopaque, "  t%"FMTu64": ",
85571-	    tdata->thr_uid);
85572-	prof_dump_print_cnts(arg->prof_dump_write, arg->cbopaque,
85573-	    &tdata->cnt_summed);
85574-	if (tdata->thread_name != NULL) {
85575-		arg->prof_dump_write(arg->cbopaque, " ");
85576-		arg->prof_dump_write(arg->cbopaque, tdata->thread_name);
85577-	}
85578-	arg->prof_dump_write(arg->cbopaque, "\n");
85579-	return NULL;
85580-}
85581-
85582-static void
85583-prof_dump_header(prof_dump_iter_arg_t *arg, const prof_cnt_t *cnt_all) {
85584-	prof_dump_printf(arg->prof_dump_write, arg->cbopaque,
85585-	    "heap_v2/%"FMTu64"\n  t*: ", ((uint64_t)1U << lg_prof_sample));
85586-	prof_dump_print_cnts(arg->prof_dump_write, arg->cbopaque, cnt_all);
85587-	arg->prof_dump_write(arg->cbopaque, "\n");
85588-
85589-	malloc_mutex_lock(arg->tsdn, &tdatas_mtx);
85590-	tdata_tree_iter(&tdatas, NULL, prof_tdata_dump_iter, arg);
85591-	malloc_mutex_unlock(arg->tsdn, &tdatas_mtx);
85592-}
85593-
85594-static void
85595-prof_dump_gctx(prof_dump_iter_arg_t *arg, prof_gctx_t *gctx,
85596-    const prof_bt_t *bt, prof_gctx_tree_t *gctxs) {
85597-	cassert(config_prof);
85598-	malloc_mutex_assert_owner(arg->tsdn, gctx->lock);
85599-
85600-	/* Avoid dumping such gctx's that have no useful data. */
85601-	if ((!opt_prof_accum && gctx->cnt_summed.curobjs == 0) ||
85602-	    (opt_prof_accum && gctx->cnt_summed.accumobjs == 0)) {
85603-		assert(gctx->cnt_summed.curobjs == 0);
85604-		assert(gctx->cnt_summed.curbytes == 0);
85605-		/*
85606-		 * These asserts would not be correct -- see the comment on races
85607-		 * in prof.c
85608-		 * assert(gctx->cnt_summed.curobjs_unbiased == 0);
85609-		 * assert(gctx->cnt_summed.curbytes_unbiased == 0);
85610-		*/
85611-		assert(gctx->cnt_summed.accumobjs == 0);
85612-		assert(gctx->cnt_summed.accumobjs_shifted_unbiased == 0);
85613-		assert(gctx->cnt_summed.accumbytes == 0);
85614-		assert(gctx->cnt_summed.accumbytes_unbiased == 0);
85615-		return;
85616-	}
85617-
85618-	arg->prof_dump_write(arg->cbopaque, "@");
85619-	for (unsigned i = 0; i < bt->len; i++) {
85620-		prof_dump_printf(arg->prof_dump_write, arg->cbopaque,
85621-		    " %#"FMTxPTR, (uintptr_t)bt->vec[i]);
85622-	}
85623-
85624-	arg->prof_dump_write(arg->cbopaque, "\n  t*: ");
85625-	prof_dump_print_cnts(arg->prof_dump_write, arg->cbopaque,
85626-	    &gctx->cnt_summed);
85627-	arg->prof_dump_write(arg->cbopaque, "\n");
85628-
85629-	tctx_tree_iter(&gctx->tctxs, NULL, prof_tctx_dump_iter, arg);
85630-}
85631-
85632-/*
85633- * See prof_sample_new_event_wait() comment for why the body of this function
85634- * is conditionally compiled.
85635- */
85636-static void
85637-prof_leakcheck(const prof_cnt_t *cnt_all, size_t leak_ngctx) {
85638-#ifdef JEMALLOC_PROF
85639-	/*
85640-	 * Scaling is equivalent AdjustSamples() in jeprof, but the result may
85641-	 * differ slightly from what jeprof reports, because here we scale the
85642-	 * summary values, whereas jeprof scales each context individually and
85643-	 * reports the sums of the scaled values.
85644-	 */
85645-	if (cnt_all->curbytes != 0) {
85646-		double sample_period = (double)((uint64_t)1 << lg_prof_sample);
85647-		double ratio = (((double)cnt_all->curbytes) /
85648-		    (double)cnt_all->curobjs) / sample_period;
85649-		double scale_factor = 1.0 / (1.0 - exp(-ratio));
85650-		uint64_t curbytes = (uint64_t)round(((double)cnt_all->curbytes)
85651-		    * scale_factor);
85652-		uint64_t curobjs = (uint64_t)round(((double)cnt_all->curobjs) *
85653-		    scale_factor);
85654-
85655-		malloc_printf("<jemalloc>: Leak approximation summary: ~%"FMTu64
85656-		    " byte%s, ~%"FMTu64" object%s, >= %zu context%s\n",
85657-		    curbytes, (curbytes != 1) ? "s" : "", curobjs, (curobjs !=
85658-		    1) ? "s" : "", leak_ngctx, (leak_ngctx != 1) ? "s" : "");
85659-		malloc_printf(
85660-		    "<jemalloc>: Run jeprof on dump output for leak detail\n");
85661-		if (opt_prof_leak_error) {
85662-			malloc_printf(
85663-			    "<jemalloc>: Exiting with error code because memory"
85664-			    " leaks were detected\n");
85665-			/*
85666-			 * Use _exit() with underscore to avoid calling atexit()
85667-			 * and entering endless cycle.
85668-			 */
85669-			_exit(1);
85670-		}
85671-	}
85672-#endif
85673-}
85674-
85675-static prof_gctx_t *
85676-prof_gctx_dump_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *opaque) {
85677-	prof_dump_iter_arg_t *arg = (prof_dump_iter_arg_t *)opaque;
85678-	malloc_mutex_lock(arg->tsdn, gctx->lock);
85679-	prof_dump_gctx(arg, gctx, &gctx->bt, gctxs);
85680-	malloc_mutex_unlock(arg->tsdn, gctx->lock);
85681-	return NULL;
85682-}
85683-
85684-static void
85685-prof_dump_prep(tsd_t *tsd, prof_tdata_t *tdata, prof_cnt_t *cnt_all,
85686-    size_t *leak_ngctx, prof_gctx_tree_t *gctxs) {
85687-	size_t tabind;
85688-	union {
85689-		prof_gctx_t	*p;
85690-		void		*v;
85691-	} gctx;
85692-
85693-	prof_enter(tsd, tdata);
85694-
85695-	/*
85696-	 * Put gctx's in limbo and clear their counters in preparation for
85697-	 * summing.
85698-	 */
85699-	gctx_tree_new(gctxs);
85700-	for (tabind = 0; !ckh_iter(&bt2gctx, &tabind, NULL, &gctx.v);) {
85701-		prof_dump_gctx_prep(tsd_tsdn(tsd), gctx.p, gctxs);
85702-	}
85703-
85704-	/*
85705-	 * Iterate over tdatas, and for the non-expired ones snapshot their tctx
85706-	 * stats and merge them into the associated gctx's.
85707-	 */
85708-	memset(cnt_all, 0, sizeof(prof_cnt_t));
85709-	prof_tdata_merge_iter_arg_t prof_tdata_merge_iter_arg = {tsd_tsdn(tsd),
85710-	    cnt_all};
85711-	malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx);
85712-	tdata_tree_iter(&tdatas, NULL, prof_tdata_merge_iter,
85713-	    &prof_tdata_merge_iter_arg);
85714-	malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx);
85715-
85716-	/* Merge tctx stats into gctx's. */
85717-	*leak_ngctx = 0;
85718-	prof_gctx_merge_iter_arg_t prof_gctx_merge_iter_arg = {tsd_tsdn(tsd),
85719-	    leak_ngctx};
85720-	gctx_tree_iter(gctxs, NULL, prof_gctx_merge_iter,
85721-	    &prof_gctx_merge_iter_arg);
85722-
85723-	prof_leave(tsd, tdata);
85724-}
85725-
85726-void
85727-prof_dump_impl(tsd_t *tsd, write_cb_t *prof_dump_write, void *cbopaque,
85728-    prof_tdata_t *tdata, bool leakcheck) {
85729-	malloc_mutex_assert_owner(tsd_tsdn(tsd), &prof_dump_mtx);
85730-	prof_cnt_t cnt_all;
85731-	size_t leak_ngctx;
85732-	prof_gctx_tree_t gctxs;
85733-	prof_dump_prep(tsd, tdata, &cnt_all, &leak_ngctx, &gctxs);
85734-	prof_dump_iter_arg_t prof_dump_iter_arg = {tsd_tsdn(tsd),
85735-	    prof_dump_write, cbopaque};
85736-	prof_dump_header(&prof_dump_iter_arg, &cnt_all);
85737-	gctx_tree_iter(&gctxs, NULL, prof_gctx_dump_iter, &prof_dump_iter_arg);
85738-	prof_gctx_finish(tsd, &gctxs);
85739-	if (leakcheck) {
85740-		prof_leakcheck(&cnt_all, leak_ngctx);
85741-	}
85742-}
85743-
85744-/* Used in unit tests. */
85745-void
85746-prof_cnt_all(prof_cnt_t *cnt_all) {
85747-	tsd_t *tsd = tsd_fetch();
85748-	prof_tdata_t *tdata = prof_tdata_get(tsd, false);
85749-	if (tdata == NULL) {
85750-		memset(cnt_all, 0, sizeof(prof_cnt_t));
85751-	} else {
85752-		size_t leak_ngctx;
85753-		prof_gctx_tree_t gctxs;
85754-		prof_dump_prep(tsd, tdata, cnt_all, &leak_ngctx, &gctxs);
85755-		prof_gctx_finish(tsd, &gctxs);
85756-	}
85757-}
85758-
85759-void
85760-prof_bt_hash(const void *key, size_t r_hash[2]) {
85761-	prof_bt_t *bt = (prof_bt_t *)key;
85762-
85763-	cassert(config_prof);
85764-
85765-	hash(bt->vec, bt->len * sizeof(void *), 0x94122f33U, r_hash);
85766-}
85767-
85768-bool
85769-prof_bt_keycomp(const void *k1, const void *k2) {
85770-	const prof_bt_t *bt1 = (prof_bt_t *)k1;
85771-	const prof_bt_t *bt2 = (prof_bt_t *)k2;
85772-
85773-	cassert(config_prof);
85774-
85775-	if (bt1->len != bt2->len) {
85776-		return false;
85777-	}
85778-	return (memcmp(bt1->vec, bt2->vec, bt1->len * sizeof(void *)) == 0);
85779-}
85780-
85781-prof_tdata_t *
85782-prof_tdata_init_impl(tsd_t *tsd, uint64_t thr_uid, uint64_t thr_discrim,
85783-    char *thread_name, bool active) {
85784-	assert(tsd_reentrancy_level_get(tsd) == 0);
85785-
85786-	prof_tdata_t *tdata;
85787-
85788-	cassert(config_prof);
85789-
85790-	/* Initialize an empty cache for this thread. */
85791-	tdata = (prof_tdata_t *)iallocztm(tsd_tsdn(tsd), sizeof(prof_tdata_t),
85792-	    sz_size2index(sizeof(prof_tdata_t)), false, NULL, true,
85793-	    arena_get(TSDN_NULL, 0, true), true);
85794-	if (tdata == NULL) {
85795-		return NULL;
85796-	}
85797-
85798-	tdata->lock = prof_tdata_mutex_choose(thr_uid);
85799-	tdata->thr_uid = thr_uid;
85800-	tdata->thr_discrim = thr_discrim;
85801-	tdata->thread_name = thread_name;
85802-	tdata->attached = true;
85803-	tdata->expired = false;
85804-	tdata->tctx_uid_next = 0;
85805-
85806-	if (ckh_new(tsd, &tdata->bt2tctx, PROF_CKH_MINITEMS, prof_bt_hash,
85807-	    prof_bt_keycomp)) {
85808-		idalloctm(tsd_tsdn(tsd), tdata, NULL, NULL, true, true);
85809-		return NULL;
85810-	}
85811-
85812-	tdata->enq = false;
85813-	tdata->enq_idump = false;
85814-	tdata->enq_gdump = false;
85815-
85816-	tdata->dumping = false;
85817-	tdata->active = active;
85818-
85819-	malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx);
85820-	tdata_tree_insert(&tdatas, tdata);
85821-	malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx);
85822-
85823-	return tdata;
85824-}
85825-
85826-static bool
85827-prof_tdata_should_destroy_unlocked(prof_tdata_t *tdata, bool even_if_attached) {
85828-	if (tdata->attached && !even_if_attached) {
85829-		return false;
85830-	}
85831-	if (ckh_count(&tdata->bt2tctx) != 0) {
85832-		return false;
85833-	}
85834-	return true;
85835-}
85836-
85837-static bool
85838-prof_tdata_should_destroy(tsdn_t *tsdn, prof_tdata_t *tdata,
85839-    bool even_if_attached) {
85840-	malloc_mutex_assert_owner(tsdn, tdata->lock);
85841-
85842-	return prof_tdata_should_destroy_unlocked(tdata, even_if_attached);
85843-}
85844-
85845-static void
85846-prof_tdata_destroy_locked(tsd_t *tsd, prof_tdata_t *tdata,
85847-    bool even_if_attached) {
85848-	malloc_mutex_assert_owner(tsd_tsdn(tsd), &tdatas_mtx);
85849-	malloc_mutex_assert_not_owner(tsd_tsdn(tsd), tdata->lock);
85850-
85851-	tdata_tree_remove(&tdatas, tdata);
85852-
85853-	assert(prof_tdata_should_destroy_unlocked(tdata, even_if_attached));
85854-
85855-	if (tdata->thread_name != NULL) {
85856-		idalloctm(tsd_tsdn(tsd), tdata->thread_name, NULL, NULL, true,
85857-		    true);
85858-	}
85859-	ckh_delete(tsd, &tdata->bt2tctx);
85860-	idalloctm(tsd_tsdn(tsd), tdata, NULL, NULL, true, true);
85861-}
85862-
85863-static void
85864-prof_tdata_destroy(tsd_t *tsd, prof_tdata_t *tdata, bool even_if_attached) {
85865-	malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx);
85866-	prof_tdata_destroy_locked(tsd, tdata, even_if_attached);
85867-	malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx);
85868-}
85869-
85870-void
85871-prof_tdata_detach(tsd_t *tsd, prof_tdata_t *tdata) {
85872-	bool destroy_tdata;
85873-
85874-	malloc_mutex_lock(tsd_tsdn(tsd), tdata->lock);
85875-	if (tdata->attached) {
85876-		destroy_tdata = prof_tdata_should_destroy(tsd_tsdn(tsd), tdata,
85877-		    true);
85878-		/*
85879-		 * Only detach if !destroy_tdata, because detaching would allow
85880-		 * another thread to win the race to destroy tdata.
85881-		 */
85882-		if (!destroy_tdata) {
85883-			tdata->attached = false;
85884-		}
85885-		tsd_prof_tdata_set(tsd, NULL);
85886-	} else {
85887-		destroy_tdata = false;
85888-	}
85889-	malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock);
85890-	if (destroy_tdata) {
85891-		prof_tdata_destroy(tsd, tdata, true);
85892-	}
85893-}
85894-
85895-static bool
85896-prof_tdata_expire(tsdn_t *tsdn, prof_tdata_t *tdata) {
85897-	bool destroy_tdata;
85898-
85899-	malloc_mutex_lock(tsdn, tdata->lock);
85900-	if (!tdata->expired) {
85901-		tdata->expired = true;
85902-		destroy_tdata = prof_tdata_should_destroy(tsdn, tdata, false);
85903-	} else {
85904-		destroy_tdata = false;
85905-	}
85906-	malloc_mutex_unlock(tsdn, tdata->lock);
85907-
85908-	return destroy_tdata;
85909-}
85910-
85911-static prof_tdata_t *
85912-prof_tdata_reset_iter(prof_tdata_tree_t *tdatas_ptr, prof_tdata_t *tdata,
85913-    void *arg) {
85914-	tsdn_t *tsdn = (tsdn_t *)arg;
85915-
85916-	return (prof_tdata_expire(tsdn, tdata) ? tdata : NULL);
85917-}
85918-
85919-void
85920-prof_reset(tsd_t *tsd, size_t lg_sample) {
85921-	prof_tdata_t *next;
85922-
85923-	assert(lg_sample < (sizeof(uint64_t) << 3));
85924-
85925-	malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_mtx);
85926-	malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx);
85927-
85928-	lg_prof_sample = lg_sample;
85929-	prof_unbias_map_init();
85930-
85931-	next = NULL;
85932-	do {
85933-		prof_tdata_t *to_destroy = tdata_tree_iter(&tdatas, next,
85934-		    prof_tdata_reset_iter, (void *)tsd);
85935-		if (to_destroy != NULL) {
85936-			next = tdata_tree_next(&tdatas, to_destroy);
85937-			prof_tdata_destroy_locked(tsd, to_destroy, false);
85938-		} else {
85939-			next = NULL;
85940-		}
85941-	} while (next != NULL);
85942-
85943-	malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx);
85944-	malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_mtx);
85945-}
85946-
85947-static bool
85948-prof_tctx_should_destroy(tsd_t *tsd, prof_tctx_t *tctx) {
85949-	malloc_mutex_assert_owner(tsd_tsdn(tsd), tctx->tdata->lock);
85950-
85951-	if (opt_prof_accum) {
85952-		return false;
85953-	}
85954-	if (tctx->cnts.curobjs != 0) {
85955-		return false;
85956-	}
85957-	if (tctx->prepared) {
85958-		return false;
85959-	}
85960-	if (tctx->recent_count != 0) {
85961-		return false;
85962-	}
85963-	return true;
85964-}
85965-
85966-static void
85967-prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx) {
85968-	malloc_mutex_assert_owner(tsd_tsdn(tsd), tctx->tdata->lock);
85969-
85970-	assert(tctx->cnts.curobjs == 0);
85971-	assert(tctx->cnts.curbytes == 0);
85972-	/*
85973-	 * These asserts are not correct -- see the comment about races in
85974-	 * prof.c
85975-	 *
85976-	 * assert(tctx->cnts.curobjs_shifted_unbiased == 0);
85977-	 * assert(tctx->cnts.curbytes_unbiased == 0);
85978-	 */
85979-	assert(!opt_prof_accum);
85980-	assert(tctx->cnts.accumobjs == 0);
85981-	assert(tctx->cnts.accumbytes == 0);
85982-	/*
85983-	 * These ones are, since accumbyte counts never go down.  Either
85984-	 * prof_accum is off (in which case these should never have changed from
85985-	 * their initial value of zero), or it's on (in which case we shouldn't
85986-	 * be destroying this tctx).
85987-	 */
85988-	assert(tctx->cnts.accumobjs_shifted_unbiased == 0);
85989-	assert(tctx->cnts.accumbytes_unbiased == 0);
85990-
85991-	prof_gctx_t *gctx = tctx->gctx;
85992-
85993-	{
85994-		prof_tdata_t *tdata = tctx->tdata;
85995-		tctx->tdata = NULL;
85996-		ckh_remove(tsd, &tdata->bt2tctx, &gctx->bt, NULL, NULL);
85997-		bool destroy_tdata = prof_tdata_should_destroy(tsd_tsdn(tsd),
85998-		    tdata, false);
85999-		malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock);
86000-		if (destroy_tdata) {
86001-			prof_tdata_destroy(tsd, tdata, false);
86002-		}
86003-	}
86004-
86005-	bool destroy_tctx, destroy_gctx;
86006-
86007-	malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock);
86008-	switch (tctx->state) {
86009-	case prof_tctx_state_nominal:
86010-		tctx_tree_remove(&gctx->tctxs, tctx);
86011-		destroy_tctx = true;
86012-		if (prof_gctx_should_destroy(gctx)) {
86013-			/*
86014-			 * Increment gctx->nlimbo in order to keep another
86015-			 * thread from winning the race to destroy gctx while
86016-			 * this one has gctx->lock dropped.  Without this, it
86017-			 * would be possible for another thread to:
86018-			 *
86019-			 * 1) Sample an allocation associated with gctx.
86020-			 * 2) Deallocate the sampled object.
86021-			 * 3) Successfully prof_gctx_try_destroy(gctx).
86022-			 *
86023-			 * The result would be that gctx no longer exists by the
86024-			 * time this thread accesses it in
86025-			 * prof_gctx_try_destroy().
86026-			 */
86027-			gctx->nlimbo++;
86028-			destroy_gctx = true;
86029-		} else {
86030-			destroy_gctx = false;
86031-		}
86032-		break;
86033-	case prof_tctx_state_dumping:
86034-		/*
86035-		 * A dumping thread needs tctx to remain valid until dumping
86036-		 * has finished.  Change state such that the dumping thread will
86037-		 * complete destruction during a late dump iteration phase.
86038-		 */
86039-		tctx->state = prof_tctx_state_purgatory;
86040-		destroy_tctx = false;
86041-		destroy_gctx = false;
86042-		break;
86043-	default:
86044-		not_reached();
86045-		destroy_tctx = false;
86046-		destroy_gctx = false;
86047-	}
86048-	malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock);
86049-	if (destroy_gctx) {
86050-		prof_gctx_try_destroy(tsd, prof_tdata_get(tsd, false), gctx);
86051-	}
86052-	if (destroy_tctx) {
86053-		idalloctm(tsd_tsdn(tsd), tctx, NULL, NULL, true, true);
86054-	}
86055-}
86056-
86057-void
86058-prof_tctx_try_destroy(tsd_t *tsd, prof_tctx_t *tctx) {
86059-	malloc_mutex_assert_owner(tsd_tsdn(tsd), tctx->tdata->lock);
86060-	if (prof_tctx_should_destroy(tsd, tctx)) {
86061-		/* tctx->tdata->lock will be released in prof_tctx_destroy(). */
86062-		prof_tctx_destroy(tsd, tctx);
86063-	} else {
86064-		malloc_mutex_unlock(tsd_tsdn(tsd), tctx->tdata->lock);
86065-	}
86066-}
86067-
86068-/******************************************************************************/
86069diff --git a/jemalloc/src/prof_log.c b/jemalloc/src/prof_log.c
86070deleted file mode 100644
86071index 0632c3b..0000000
86072--- a/jemalloc/src/prof_log.c
86073+++ /dev/null
86074@@ -1,717 +0,0 @@
86075-#include "jemalloc/internal/jemalloc_preamble.h"
86076-#include "jemalloc/internal/jemalloc_internal_includes.h"
86077-
86078-#include "jemalloc/internal/assert.h"
86079-#include "jemalloc/internal/buf_writer.h"
86080-#include "jemalloc/internal/ckh.h"
86081-#include "jemalloc/internal/emitter.h"
86082-#include "jemalloc/internal/hash.h"
86083-#include "jemalloc/internal/malloc_io.h"
86084-#include "jemalloc/internal/mutex.h"
86085-#include "jemalloc/internal/prof_data.h"
86086-#include "jemalloc/internal/prof_log.h"
86087-#include "jemalloc/internal/prof_sys.h"
86088-
86089-bool opt_prof_log = false;
86090-typedef enum prof_logging_state_e prof_logging_state_t;
86091-enum prof_logging_state_e {
86092-	prof_logging_state_stopped,
86093-	prof_logging_state_started,
86094-	prof_logging_state_dumping
86095-};
86096-
86097-/*
86098- * - stopped: log_start never called, or previous log_stop has completed.
86099- * - started: log_start called, log_stop not called yet. Allocations are logged.
86100- * - dumping: log_stop called but not finished; samples are not logged anymore.
86101- */
86102-prof_logging_state_t prof_logging_state = prof_logging_state_stopped;
86103-
86104-/* Used in unit tests. */
86105-static bool prof_log_dummy = false;
86106-
86107-/* Incremented for every log file that is output. */
86108-static uint64_t log_seq = 0;
86109-static char log_filename[
86110-    /* Minimize memory bloat for non-prof builds. */
86111-#ifdef JEMALLOC_PROF
86112-    PATH_MAX +
86113-#endif
86114-    1];
86115-
86116-/* Timestamp for most recent call to log_start(). */
86117-static nstime_t log_start_timestamp;
86118-
86119-/* Increment these when adding to the log_bt and log_thr linked lists. */
86120-static size_t log_bt_index = 0;
86121-static size_t log_thr_index = 0;
86122-
86123-/* Linked list node definitions. These are only used in this file. */
86124-typedef struct prof_bt_node_s prof_bt_node_t;
86125-
86126-struct prof_bt_node_s {
86127-	prof_bt_node_t *next;
86128-	size_t index;
86129-	prof_bt_t bt;
86130-	/* Variable size backtrace vector pointed to by bt. */
86131-	void *vec[1];
86132-};
86133-
86134-typedef struct prof_thr_node_s prof_thr_node_t;
86135-
86136-struct prof_thr_node_s {
86137-	prof_thr_node_t *next;
86138-	size_t index;
86139-	uint64_t thr_uid;
86140-	/* Variable size based on thr_name_sz. */
86141-	char name[1];
86142-};
86143-
86144-typedef struct prof_alloc_node_s prof_alloc_node_t;
86145-
86146-/* This is output when logging sampled allocations. */
86147-struct prof_alloc_node_s {
86148-	prof_alloc_node_t *next;
86149-	/* Indices into an array of thread data. */
86150-	size_t alloc_thr_ind;
86151-	size_t free_thr_ind;
86152-
86153-	/* Indices into an array of backtraces. */
86154-	size_t alloc_bt_ind;
86155-	size_t free_bt_ind;
86156-
86157-	uint64_t alloc_time_ns;
86158-	uint64_t free_time_ns;
86159-
86160-	size_t usize;
86161-};
86162-
86163-/*
86164- * Created on the first call to prof_try_log and deleted on prof_log_stop.
86165- * These are the backtraces and threads that have already been logged by an
86166- * allocation.
86167- */
86168-static bool log_tables_initialized = false;
86169-static ckh_t log_bt_node_set;
86170-static ckh_t log_thr_node_set;
86171-
86172-/* Store linked lists for logged data. */
86173-static prof_bt_node_t *log_bt_first = NULL;
86174-static prof_bt_node_t *log_bt_last = NULL;
86175-static prof_thr_node_t *log_thr_first = NULL;
86176-static prof_thr_node_t *log_thr_last = NULL;
86177-static prof_alloc_node_t *log_alloc_first = NULL;
86178-static prof_alloc_node_t *log_alloc_last = NULL;
86179-
86180-/* Protects the prof_logging_state and any log_{...} variable. */
86181-malloc_mutex_t log_mtx;
86182-
86183-/******************************************************************************/
86184-/*
86185- * Function prototypes for static functions that are referenced prior to
86186- * definition.
86187- */
86188-
86189-/* Hashtable functions for log_bt_node_set and log_thr_node_set. */
86190-static void prof_thr_node_hash(const void *key, size_t r_hash[2]);
86191-static bool prof_thr_node_keycomp(const void *k1, const void *k2);
86192-static void prof_bt_node_hash(const void *key, size_t r_hash[2]);
86193-static bool prof_bt_node_keycomp(const void *k1, const void *k2);
86194-
86195-/******************************************************************************/
86196-
86197-static size_t
86198-prof_log_bt_index(tsd_t *tsd, prof_bt_t *bt) {
86199-	assert(prof_logging_state == prof_logging_state_started);
86200-	malloc_mutex_assert_owner(tsd_tsdn(tsd), &log_mtx);
86201-
86202-	prof_bt_node_t dummy_node;
86203-	dummy_node.bt = *bt;
86204-	prof_bt_node_t *node;
86205-
86206-	/* See if this backtrace is already cached in the table. */
86207-	if (ckh_search(&log_bt_node_set, (void *)(&dummy_node),
86208-	    (void **)(&node), NULL)) {
86209-		size_t sz = offsetof(prof_bt_node_t, vec) +
86210-			        (bt->len * sizeof(void *));
86211-		prof_bt_node_t *new_node = (prof_bt_node_t *)
86212-		    iallocztm(tsd_tsdn(tsd), sz, sz_size2index(sz), false, NULL,
86213-		    true, arena_get(TSDN_NULL, 0, true), true);
86214-		if (log_bt_first == NULL) {
86215-			log_bt_first = new_node;
86216-			log_bt_last = new_node;
86217-		} else {
86218-			log_bt_last->next = new_node;
86219-			log_bt_last = new_node;
86220-		}
86221-
86222-		new_node->next = NULL;
86223-		new_node->index = log_bt_index;
86224-		/*
86225-		 * Copy the backtrace: bt is inside a tdata or gctx, which
86226-		 * might die before prof_log_stop is called.
86227-		 */
86228-		new_node->bt.len = bt->len;
86229-		memcpy(new_node->vec, bt->vec, bt->len * sizeof(void *));
86230-		new_node->bt.vec = new_node->vec;
86231-
86232-		log_bt_index++;
86233-		ckh_insert(tsd, &log_bt_node_set, (void *)new_node, NULL);
86234-		return new_node->index;
86235-	} else {
86236-		return node->index;
86237-	}
86238-}
86239-
86240-static size_t
86241-prof_log_thr_index(tsd_t *tsd, uint64_t thr_uid, const char *name) {
86242-	assert(prof_logging_state == prof_logging_state_started);
86243-	malloc_mutex_assert_owner(tsd_tsdn(tsd), &log_mtx);
86244-
86245-	prof_thr_node_t dummy_node;
86246-	dummy_node.thr_uid = thr_uid;
86247-	prof_thr_node_t *node;
86248-
86249-	/* See if this thread is already cached in the table. */
86250-	if (ckh_search(&log_thr_node_set, (void *)(&dummy_node),
86251-	    (void **)(&node), NULL)) {
86252-		size_t sz = offsetof(prof_thr_node_t, name) + strlen(name) + 1;
86253-		prof_thr_node_t *new_node = (prof_thr_node_t *)
86254-		    iallocztm(tsd_tsdn(tsd), sz, sz_size2index(sz), false, NULL,
86255-		    true, arena_get(TSDN_NULL, 0, true), true);
86256-		if (log_thr_first == NULL) {
86257-			log_thr_first = new_node;
86258-			log_thr_last = new_node;
86259-		} else {
86260-			log_thr_last->next = new_node;
86261-			log_thr_last = new_node;
86262-		}
86263-
86264-		new_node->next = NULL;
86265-		new_node->index = log_thr_index;
86266-		new_node->thr_uid = thr_uid;
86267-		strcpy(new_node->name, name);
86268-
86269-		log_thr_index++;
86270-		ckh_insert(tsd, &log_thr_node_set, (void *)new_node, NULL);
86271-		return new_node->index;
86272-	} else {
86273-		return node->index;
86274-	}
86275-}
86276-
86277-JEMALLOC_COLD
86278-void
86279-prof_try_log(tsd_t *tsd, size_t usize, prof_info_t *prof_info) {
86280-	cassert(config_prof);
86281-	prof_tctx_t *tctx = prof_info->alloc_tctx;
86282-	malloc_mutex_assert_owner(tsd_tsdn(tsd), tctx->tdata->lock);
86283-
86284-	prof_tdata_t *cons_tdata = prof_tdata_get(tsd, false);
86285-	if (cons_tdata == NULL) {
86286-		/*
86287-		 * We decide not to log these allocations. cons_tdata will be
86288-		 * NULL only when the current thread is in a weird state (e.g.
86289-		 * it's being destroyed).
86290-		 */
86291-		return;
86292-	}
86293-
86294-	malloc_mutex_lock(tsd_tsdn(tsd), &log_mtx);
86295-
86296-	if (prof_logging_state != prof_logging_state_started) {
86297-		goto label_done;
86298-	}
86299-
86300-	if (!log_tables_initialized) {
86301-		bool err1 = ckh_new(tsd, &log_bt_node_set, PROF_CKH_MINITEMS,
86302-				prof_bt_node_hash, prof_bt_node_keycomp);
86303-		bool err2 = ckh_new(tsd, &log_thr_node_set, PROF_CKH_MINITEMS,
86304-				prof_thr_node_hash, prof_thr_node_keycomp);
86305-		if (err1 || err2) {
86306-			goto label_done;
86307-		}
86308-		log_tables_initialized = true;
86309-	}
86310-
86311-	nstime_t alloc_time = prof_info->alloc_time;
86312-	nstime_t free_time;
86313-	nstime_prof_init_update(&free_time);
86314-
86315-	size_t sz = sizeof(prof_alloc_node_t);
86316-	prof_alloc_node_t *new_node = (prof_alloc_node_t *)
86317-	    iallocztm(tsd_tsdn(tsd), sz, sz_size2index(sz), false, NULL, true,
86318-	    arena_get(TSDN_NULL, 0, true), true);
86319-
86320-	const char *prod_thr_name = (tctx->tdata->thread_name == NULL)?
86321-				        "" : tctx->tdata->thread_name;
86322-	const char *cons_thr_name = prof_thread_name_get(tsd);
86323-
86324-	prof_bt_t bt;
86325-	/* Initialize the backtrace, using the buffer in tdata to store it. */
86326-	bt_init(&bt, cons_tdata->vec);
86327-	prof_backtrace(tsd, &bt);
86328-	prof_bt_t *cons_bt = &bt;
86329-
86330-	/* We haven't destroyed tctx yet, so gctx should be good to read. */
86331-	prof_bt_t *prod_bt = &tctx->gctx->bt;
86332-
86333-	new_node->next = NULL;
86334-	new_node->alloc_thr_ind = prof_log_thr_index(tsd, tctx->tdata->thr_uid,
86335-				      prod_thr_name);
86336-	new_node->free_thr_ind = prof_log_thr_index(tsd, cons_tdata->thr_uid,
86337-				     cons_thr_name);
86338-	new_node->alloc_bt_ind = prof_log_bt_index(tsd, prod_bt);
86339-	new_node->free_bt_ind = prof_log_bt_index(tsd, cons_bt);
86340-	new_node->alloc_time_ns = nstime_ns(&alloc_time);
86341-	new_node->free_time_ns = nstime_ns(&free_time);
86342-	new_node->usize = usize;
86343-
86344-	if (log_alloc_first == NULL) {
86345-		log_alloc_first = new_node;
86346-		log_alloc_last = new_node;
86347-	} else {
86348-		log_alloc_last->next = new_node;
86349-		log_alloc_last = new_node;
86350-	}
86351-
86352-label_done:
86353-	malloc_mutex_unlock(tsd_tsdn(tsd), &log_mtx);
86354-}
86355-
86356-static void
86357-prof_bt_node_hash(const void *key, size_t r_hash[2]) {
86358-	const prof_bt_node_t *bt_node = (prof_bt_node_t *)key;
86359-	prof_bt_hash((void *)(&bt_node->bt), r_hash);
86360-}
86361-
86362-static bool
86363-prof_bt_node_keycomp(const void *k1, const void *k2) {
86364-	const prof_bt_node_t *bt_node1 = (prof_bt_node_t *)k1;
86365-	const prof_bt_node_t *bt_node2 = (prof_bt_node_t *)k2;
86366-	return prof_bt_keycomp((void *)(&bt_node1->bt),
86367-	    (void *)(&bt_node2->bt));
86368-}
86369-
86370-static void
86371-prof_thr_node_hash(const void *key, size_t r_hash[2]) {
86372-	const prof_thr_node_t *thr_node = (prof_thr_node_t *)key;
86373-	hash(&thr_node->thr_uid, sizeof(uint64_t), 0x94122f35U, r_hash);
86374-}
86375-
86376-static bool
86377-prof_thr_node_keycomp(const void *k1, const void *k2) {
86378-	const prof_thr_node_t *thr_node1 = (prof_thr_node_t *)k1;
86379-	const prof_thr_node_t *thr_node2 = (prof_thr_node_t *)k2;
86380-	return thr_node1->thr_uid == thr_node2->thr_uid;
86381-}
86382-
86383-/* Used in unit tests. */
86384-size_t
86385-prof_log_bt_count(void) {
86386-	cassert(config_prof);
86387-	size_t cnt = 0;
86388-	prof_bt_node_t *node = log_bt_first;
86389-	while (node != NULL) {
86390-		cnt++;
86391-		node = node->next;
86392-	}
86393-	return cnt;
86394-}
86395-
86396-/* Used in unit tests. */
86397-size_t
86398-prof_log_alloc_count(void) {
86399-	cassert(config_prof);
86400-	size_t cnt = 0;
86401-	prof_alloc_node_t *node = log_alloc_first;
86402-	while (node != NULL) {
86403-		cnt++;
86404-		node = node->next;
86405-	}
86406-	return cnt;
86407-}
86408-
86409-/* Used in unit tests. */
86410-size_t
86411-prof_log_thr_count(void) {
86412-	cassert(config_prof);
86413-	size_t cnt = 0;
86414-	prof_thr_node_t *node = log_thr_first;
86415-	while (node != NULL) {
86416-		cnt++;
86417-		node = node->next;
86418-	}
86419-	return cnt;
86420-}
86421-
86422-/* Used in unit tests. */
86423-bool
86424-prof_log_is_logging(void) {
86425-	cassert(config_prof);
86426-	return prof_logging_state == prof_logging_state_started;
86427-}
86428-
86429-/* Used in unit tests. */
86430-bool
86431-prof_log_rep_check(void) {
86432-	cassert(config_prof);
86433-	if (prof_logging_state == prof_logging_state_stopped
86434-	    && log_tables_initialized) {
86435-		return true;
86436-	}
86437-
86438-	if (log_bt_last != NULL && log_bt_last->next != NULL) {
86439-		return true;
86440-	}
86441-	if (log_thr_last != NULL && log_thr_last->next != NULL) {
86442-		return true;
86443-	}
86444-	if (log_alloc_last != NULL && log_alloc_last->next != NULL) {
86445-		return true;
86446-	}
86447-
86448-	size_t bt_count = prof_log_bt_count();
86449-	size_t thr_count = prof_log_thr_count();
86450-	size_t alloc_count = prof_log_alloc_count();
86451-
86452-
86453-	if (prof_logging_state == prof_logging_state_stopped) {
86454-		if (bt_count != 0 || thr_count != 0 || alloc_count || 0) {
86455-			return true;
86456-		}
86457-	}
86458-
86459-	prof_alloc_node_t *node = log_alloc_first;
86460-	while (node != NULL) {
86461-		if (node->alloc_bt_ind >= bt_count) {
86462-			return true;
86463-		}
86464-		if (node->free_bt_ind >= bt_count) {
86465-			return true;
86466-		}
86467-		if (node->alloc_thr_ind >= thr_count) {
86468-			return true;
86469-		}
86470-		if (node->free_thr_ind >= thr_count) {
86471-			return true;
86472-		}
86473-		if (node->alloc_time_ns > node->free_time_ns) {
86474-			return true;
86475-		}
86476-		node = node->next;
86477-	}
86478-
86479-	return false;
86480-}
86481-
86482-/* Used in unit tests. */
86483-void
86484-prof_log_dummy_set(bool new_value) {
86485-	cassert(config_prof);
86486-	prof_log_dummy = new_value;
86487-}
86488-
86489-/* Used as an atexit function to stop logging on exit. */
86490-static void
86491-prof_log_stop_final(void) {
86492-	tsd_t *tsd = tsd_fetch();
86493-	prof_log_stop(tsd_tsdn(tsd));
86494-}
86495-
86496-JEMALLOC_COLD
86497-bool
86498-prof_log_start(tsdn_t *tsdn, const char *filename) {
86499-	cassert(config_prof);
86500-
86501-	if (!opt_prof) {
86502-		return true;
86503-	}
86504-
86505-	bool ret = false;
86506-
86507-	malloc_mutex_lock(tsdn, &log_mtx);
86508-
86509-	static bool prof_log_atexit_called = false;
86510-	if (!prof_log_atexit_called) {
86511-		prof_log_atexit_called = true;
86512-		if (atexit(prof_log_stop_final) != 0) {
86513-			malloc_write("<jemalloc>: Error in atexit() "
86514-			    "for logging\n");
86515-			if (opt_abort) {
86516-				abort();
86517-			}
86518-			ret = true;
86519-			goto label_done;
86520-		}
86521-	}
86522-
86523-	if (prof_logging_state != prof_logging_state_stopped) {
86524-		ret = true;
86525-	} else if (filename == NULL) {
86526-		/* Make default name. */
86527-		prof_get_default_filename(tsdn, log_filename, log_seq);
86528-		log_seq++;
86529-		prof_logging_state = prof_logging_state_started;
86530-	} else if (strlen(filename) >= PROF_DUMP_FILENAME_LEN) {
86531-		ret = true;
86532-	} else {
86533-		strcpy(log_filename, filename);
86534-		prof_logging_state = prof_logging_state_started;
86535-	}
86536-
86537-	if (!ret) {
86538-		nstime_prof_init_update(&log_start_timestamp);
86539-	}
86540-label_done:
86541-	malloc_mutex_unlock(tsdn, &log_mtx);
86542-
86543-	return ret;
86544-}
86545-
86546-struct prof_emitter_cb_arg_s {
86547-	int fd;
86548-	ssize_t ret;
86549-};
86550-
86551-static void
86552-prof_emitter_write_cb(void *opaque, const char *to_write) {
86553-	struct prof_emitter_cb_arg_s *arg =
86554-	    (struct prof_emitter_cb_arg_s *)opaque;
86555-	size_t bytes = strlen(to_write);
86556-	if (prof_log_dummy) {
86557-		return;
86558-	}
86559-	arg->ret = malloc_write_fd(arg->fd, to_write, bytes);
86560-}
86561-
86562-/*
86563- * prof_log_emit_{...} goes through the appropriate linked list, emitting each
86564- * node to the json and deallocating it.
86565- */
86566-static void
86567-prof_log_emit_threads(tsd_t *tsd, emitter_t *emitter) {
86568-	emitter_json_array_kv_begin(emitter, "threads");
86569-	prof_thr_node_t *thr_node = log_thr_first;
86570-	prof_thr_node_t *thr_old_node;
86571-	while (thr_node != NULL) {
86572-		emitter_json_object_begin(emitter);
86573-
86574-		emitter_json_kv(emitter, "thr_uid", emitter_type_uint64,
86575-		    &thr_node->thr_uid);
86576-
86577-		char *thr_name = thr_node->name;
86578-
86579-		emitter_json_kv(emitter, "thr_name", emitter_type_string,
86580-		    &thr_name);
86581-
86582-		emitter_json_object_end(emitter);
86583-		thr_old_node = thr_node;
86584-		thr_node = thr_node->next;
86585-		idalloctm(tsd_tsdn(tsd), thr_old_node, NULL, NULL, true, true);
86586-	}
86587-	emitter_json_array_end(emitter);
86588-}
86589-
86590-static void
86591-prof_log_emit_traces(tsd_t *tsd, emitter_t *emitter) {
86592-	emitter_json_array_kv_begin(emitter, "stack_traces");
86593-	prof_bt_node_t *bt_node = log_bt_first;
86594-	prof_bt_node_t *bt_old_node;
86595-	/*
86596-	 * Calculate how many hex digits we need: twice number of bytes, two for
86597-	 * "0x", and then one more for terminating '\0'.
86598-	 */
86599-	char buf[2 * sizeof(intptr_t) + 3];
86600-	size_t buf_sz = sizeof(buf);
86601-	while (bt_node != NULL) {
86602-		emitter_json_array_begin(emitter);
86603-		size_t i;
86604-		for (i = 0; i < bt_node->bt.len; i++) {
86605-			malloc_snprintf(buf, buf_sz, "%p", bt_node->bt.vec[i]);
86606-			char *trace_str = buf;
86607-			emitter_json_value(emitter, emitter_type_string,
86608-			    &trace_str);
86609-		}
86610-		emitter_json_array_end(emitter);
86611-
86612-		bt_old_node = bt_node;
86613-		bt_node = bt_node->next;
86614-		idalloctm(tsd_tsdn(tsd), bt_old_node, NULL, NULL, true, true);
86615-	}
86616-	emitter_json_array_end(emitter);
86617-}
86618-
86619-static void
86620-prof_log_emit_allocs(tsd_t *tsd, emitter_t *emitter) {
86621-	emitter_json_array_kv_begin(emitter, "allocations");
86622-	prof_alloc_node_t *alloc_node = log_alloc_first;
86623-	prof_alloc_node_t *alloc_old_node;
86624-	while (alloc_node != NULL) {
86625-		emitter_json_object_begin(emitter);
86626-
86627-		emitter_json_kv(emitter, "alloc_thread", emitter_type_size,
86628-		    &alloc_node->alloc_thr_ind);
86629-
86630-		emitter_json_kv(emitter, "free_thread", emitter_type_size,
86631-		    &alloc_node->free_thr_ind);
86632-
86633-		emitter_json_kv(emitter, "alloc_trace", emitter_type_size,
86634-		    &alloc_node->alloc_bt_ind);
86635-
86636-		emitter_json_kv(emitter, "free_trace", emitter_type_size,
86637-		    &alloc_node->free_bt_ind);
86638-
86639-		emitter_json_kv(emitter, "alloc_timestamp",
86640-		    emitter_type_uint64, &alloc_node->alloc_time_ns);
86641-
86642-		emitter_json_kv(emitter, "free_timestamp", emitter_type_uint64,
86643-		    &alloc_node->free_time_ns);
86644-
86645-		emitter_json_kv(emitter, "usize", emitter_type_uint64,
86646-		    &alloc_node->usize);
86647-
86648-		emitter_json_object_end(emitter);
86649-
86650-		alloc_old_node = alloc_node;
86651-		alloc_node = alloc_node->next;
86652-		idalloctm(tsd_tsdn(tsd), alloc_old_node, NULL, NULL, true,
86653-		    true);
86654-	}
86655-	emitter_json_array_end(emitter);
86656-}
86657-
86658-static void
86659-prof_log_emit_metadata(emitter_t *emitter) {
86660-	emitter_json_object_kv_begin(emitter, "info");
86661-
86662-	nstime_t now;
86663-
86664-	nstime_prof_init_update(&now);
86665-	uint64_t ns = nstime_ns(&now) - nstime_ns(&log_start_timestamp);
86666-	emitter_json_kv(emitter, "duration", emitter_type_uint64, &ns);
86667-
86668-	char *vers = JEMALLOC_VERSION;
86669-	emitter_json_kv(emitter, "version",
86670-	    emitter_type_string, &vers);
86671-
86672-	emitter_json_kv(emitter, "lg_sample_rate",
86673-	    emitter_type_int, &lg_prof_sample);
86674-
86675-	const char *res_type = prof_time_res_mode_names[opt_prof_time_res];
86676-	emitter_json_kv(emitter, "prof_time_resolution", emitter_type_string,
86677-	    &res_type);
86678-
86679-	int pid = prof_getpid();
86680-	emitter_json_kv(emitter, "pid", emitter_type_int, &pid);
86681-
86682-	emitter_json_object_end(emitter);
86683-}
86684-
86685-#define PROF_LOG_STOP_BUFSIZE PROF_DUMP_BUFSIZE
86686-JEMALLOC_COLD
86687-bool
86688-prof_log_stop(tsdn_t *tsdn) {
86689-	cassert(config_prof);
86690-	if (!opt_prof || !prof_booted) {
86691-		return true;
86692-	}
86693-
86694-	tsd_t *tsd = tsdn_tsd(tsdn);
86695-	malloc_mutex_lock(tsdn, &log_mtx);
86696-
86697-	if (prof_logging_state != prof_logging_state_started) {
86698-		malloc_mutex_unlock(tsdn, &log_mtx);
86699-		return true;
86700-	}
86701-
86702-	/*
86703-	 * Set the state to dumping. We'll set it to stopped when we're done.
86704-	 * Since other threads won't be able to start/stop/log when the state is
86705-	 * dumping, we don't have to hold the lock during the whole method.
86706-	 */
86707-	prof_logging_state = prof_logging_state_dumping;
86708-	malloc_mutex_unlock(tsdn, &log_mtx);
86709-
86710-
86711-	emitter_t emitter;
86712-
86713-	/* Create a file. */
86714-
86715-	int fd;
86716-	if (prof_log_dummy) {
86717-		fd = 0;
86718-	} else {
86719-		fd = creat(log_filename, 0644);
86720-	}
86721-
86722-	if (fd == -1) {
86723-		malloc_printf("<jemalloc>: creat() for log file \"%s\" "
86724-			      " failed with %d\n", log_filename, errno);
86725-		if (opt_abort) {
86726-			abort();
86727-		}
86728-		return true;
86729-	}
86730-
86731-	struct prof_emitter_cb_arg_s arg;
86732-	arg.fd = fd;
86733-
86734-	buf_writer_t buf_writer;
86735-	buf_writer_init(tsdn, &buf_writer, prof_emitter_write_cb, &arg, NULL,
86736-	    PROF_LOG_STOP_BUFSIZE);
86737-	emitter_init(&emitter, emitter_output_json_compact, buf_writer_cb,
86738-	    &buf_writer);
86739-
86740-	emitter_begin(&emitter);
86741-	prof_log_emit_metadata(&emitter);
86742-	prof_log_emit_threads(tsd, &emitter);
86743-	prof_log_emit_traces(tsd, &emitter);
86744-	prof_log_emit_allocs(tsd, &emitter);
86745-	emitter_end(&emitter);
86746-
86747-	buf_writer_terminate(tsdn, &buf_writer);
86748-
86749-	/* Reset global state. */
86750-	if (log_tables_initialized) {
86751-		ckh_delete(tsd, &log_bt_node_set);
86752-		ckh_delete(tsd, &log_thr_node_set);
86753-	}
86754-	log_tables_initialized = false;
86755-	log_bt_index = 0;
86756-	log_thr_index = 0;
86757-	log_bt_first = NULL;
86758-	log_bt_last = NULL;
86759-	log_thr_first = NULL;
86760-	log_thr_last = NULL;
86761-	log_alloc_first = NULL;
86762-	log_alloc_last = NULL;
86763-
86764-	malloc_mutex_lock(tsdn, &log_mtx);
86765-	prof_logging_state = prof_logging_state_stopped;
86766-	malloc_mutex_unlock(tsdn, &log_mtx);
86767-
86768-	if (prof_log_dummy) {
86769-		return false;
86770-	}
86771-	return close(fd) || arg.ret == -1;
86772-}
86773-#undef PROF_LOG_STOP_BUFSIZE
86774-
86775-JEMALLOC_COLD
86776-bool
86777-prof_log_init(tsd_t *tsd) {
86778-	cassert(config_prof);
86779-	if (malloc_mutex_init(&log_mtx, "prof_log",
86780-	    WITNESS_RANK_PROF_LOG, malloc_mutex_rank_exclusive)) {
86781-		return true;
86782-	}
86783-
86784-	if (opt_prof_log) {
86785-		prof_log_start(tsd_tsdn(tsd), NULL);
86786-	}
86787-
86788-	return false;
86789-}
86790-
86791-/******************************************************************************/
86792diff --git a/jemalloc/src/prof_recent.c b/jemalloc/src/prof_recent.c
86793deleted file mode 100644
86794index 834a944..0000000
86795--- a/jemalloc/src/prof_recent.c
86796+++ /dev/null
86797@@ -1,600 +0,0 @@
86798-#include "jemalloc/internal/jemalloc_preamble.h"
86799-#include "jemalloc/internal/jemalloc_internal_includes.h"
86800-
86801-#include "jemalloc/internal/assert.h"
86802-#include "jemalloc/internal/buf_writer.h"
86803-#include "jemalloc/internal/emitter.h"
86804-#include "jemalloc/internal/prof_data.h"
86805-#include "jemalloc/internal/prof_recent.h"
86806-
86807-ssize_t opt_prof_recent_alloc_max = PROF_RECENT_ALLOC_MAX_DEFAULT;
86808-malloc_mutex_t prof_recent_alloc_mtx; /* Protects the fields below */
86809-static atomic_zd_t prof_recent_alloc_max;
86810-static ssize_t prof_recent_alloc_count = 0;
86811-prof_recent_list_t prof_recent_alloc_list;
86812-
86813-malloc_mutex_t prof_recent_dump_mtx; /* Protects dumping. */
86814-
86815-static void
86816-prof_recent_alloc_max_init() {
86817-	atomic_store_zd(&prof_recent_alloc_max, opt_prof_recent_alloc_max,
86818-	    ATOMIC_RELAXED);
86819-}
86820-
86821-static inline ssize_t
86822-prof_recent_alloc_max_get_no_lock() {
86823-	return atomic_load_zd(&prof_recent_alloc_max, ATOMIC_RELAXED);
86824-}
86825-
86826-static inline ssize_t
86827-prof_recent_alloc_max_get(tsd_t *tsd) {
86828-	malloc_mutex_assert_owner(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
86829-	return prof_recent_alloc_max_get_no_lock();
86830-}
86831-
86832-static inline ssize_t
86833-prof_recent_alloc_max_update(tsd_t *tsd, ssize_t max) {
86834-	malloc_mutex_assert_owner(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
86835-	ssize_t old_max = prof_recent_alloc_max_get(tsd);
86836-	atomic_store_zd(&prof_recent_alloc_max, max, ATOMIC_RELAXED);
86837-	return old_max;
86838-}
86839-
86840-static prof_recent_t *
86841-prof_recent_allocate_node(tsdn_t *tsdn) {
86842-	return (prof_recent_t *)iallocztm(tsdn, sizeof(prof_recent_t),
86843-	    sz_size2index(sizeof(prof_recent_t)), false, NULL, true,
86844-	    arena_get(tsdn, 0, false), true);
86845-}
86846-
86847-static void
86848-prof_recent_free_node(tsdn_t *tsdn, prof_recent_t *node) {
86849-	assert(node != NULL);
86850-	assert(isalloc(tsdn, node) == sz_s2u(sizeof(prof_recent_t)));
86851-	idalloctm(tsdn, node, NULL, NULL, true, true);
86852-}
86853-
86854-static inline void
86855-increment_recent_count(tsd_t *tsd, prof_tctx_t *tctx) {
86856-	malloc_mutex_assert_owner(tsd_tsdn(tsd), tctx->tdata->lock);
86857-	++tctx->recent_count;
86858-	assert(tctx->recent_count > 0);
86859-}
86860-
86861-bool
86862-prof_recent_alloc_prepare(tsd_t *tsd, prof_tctx_t *tctx) {
86863-	cassert(config_prof);
86864-	assert(opt_prof && prof_booted);
86865-	malloc_mutex_assert_owner(tsd_tsdn(tsd), tctx->tdata->lock);
86866-	malloc_mutex_assert_not_owner(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
86867-
86868-	/*
86869-	 * Check whether last-N mode is turned on without trying to acquire the
86870-	 * lock, so as to optimize for the following two scenarios:
86871-	 * (1) Last-N mode is switched off;
86872-	 * (2) Dumping, during which last-N mode is temporarily turned off so
86873-	 *     as not to block sampled allocations.
86874-	 */
86875-	if (prof_recent_alloc_max_get_no_lock() == 0) {
86876-		return false;
86877-	}
86878-
86879-	/*
86880-	 * Increment recent_count to hold the tctx so that it won't be gone
86881-	 * even after tctx->tdata->lock is released.  This acts as a
86882-	 * "placeholder"; the real recording of the allocation requires a lock
86883-	 * on prof_recent_alloc_mtx and is done in prof_recent_alloc (when
86884-	 * tctx->tdata->lock has been released).
86885-	 */
86886-	increment_recent_count(tsd, tctx);
86887-	return true;
86888-}
86889-
86890-static void
86891-decrement_recent_count(tsd_t *tsd, prof_tctx_t *tctx) {
86892-	malloc_mutex_assert_not_owner(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
86893-	assert(tctx != NULL);
86894-	malloc_mutex_lock(tsd_tsdn(tsd), tctx->tdata->lock);
86895-	assert(tctx->recent_count > 0);
86896-	--tctx->recent_count;
86897-	prof_tctx_try_destroy(tsd, tctx);
86898-}
86899-
86900-static inline edata_t *
86901-prof_recent_alloc_edata_get_no_lock(const prof_recent_t *n) {
86902-	return (edata_t *)atomic_load_p(&n->alloc_edata, ATOMIC_ACQUIRE);
86903-}
86904-
86905-edata_t *
86906-prof_recent_alloc_edata_get_no_lock_test(const prof_recent_t *n) {
86907-	cassert(config_prof);
86908-	return prof_recent_alloc_edata_get_no_lock(n);
86909-}
86910-
86911-static inline edata_t *
86912-prof_recent_alloc_edata_get(tsd_t *tsd, const prof_recent_t *n) {
86913-	malloc_mutex_assert_owner(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
86914-	return prof_recent_alloc_edata_get_no_lock(n);
86915-}
86916-
86917-static void
86918-prof_recent_alloc_edata_set(tsd_t *tsd, prof_recent_t *n, edata_t *edata) {
86919-	malloc_mutex_assert_owner(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
86920-	atomic_store_p(&n->alloc_edata, edata, ATOMIC_RELEASE);
86921-}
86922-
86923-void
86924-edata_prof_recent_alloc_init(edata_t *edata) {
86925-	cassert(config_prof);
86926-	edata_prof_recent_alloc_set_dont_call_directly(edata, NULL);
86927-}
86928-
86929-static inline prof_recent_t *
86930-edata_prof_recent_alloc_get_no_lock(const edata_t *edata) {
86931-	cassert(config_prof);
86932-	return edata_prof_recent_alloc_get_dont_call_directly(edata);
86933-}
86934-
86935-prof_recent_t *
86936-edata_prof_recent_alloc_get_no_lock_test(const edata_t *edata) {
86937-	cassert(config_prof);
86938-	return edata_prof_recent_alloc_get_no_lock(edata);
86939-}
86940-
86941-static inline prof_recent_t *
86942-edata_prof_recent_alloc_get(tsd_t *tsd, const edata_t *edata) {
86943-	malloc_mutex_assert_owner(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
86944-	prof_recent_t *recent_alloc =
86945-	    edata_prof_recent_alloc_get_no_lock(edata);
86946-	assert(recent_alloc == NULL ||
86947-	    prof_recent_alloc_edata_get(tsd, recent_alloc) == edata);
86948-	return recent_alloc;
86949-}
86950-
86951-static prof_recent_t *
86952-edata_prof_recent_alloc_update_internal(tsd_t *tsd, edata_t *edata,
86953-    prof_recent_t *recent_alloc) {
86954-	malloc_mutex_assert_owner(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
86955-	prof_recent_t *old_recent_alloc =
86956-	    edata_prof_recent_alloc_get(tsd, edata);
86957-	edata_prof_recent_alloc_set_dont_call_directly(edata, recent_alloc);
86958-	return old_recent_alloc;
86959-}
86960-
86961-static void
86962-edata_prof_recent_alloc_set(tsd_t *tsd, edata_t *edata,
86963-    prof_recent_t *recent_alloc) {
86964-	malloc_mutex_assert_owner(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
86965-	assert(recent_alloc != NULL);
86966-	prof_recent_t *old_recent_alloc =
86967-	    edata_prof_recent_alloc_update_internal(tsd, edata, recent_alloc);
86968-	assert(old_recent_alloc == NULL);
86969-	prof_recent_alloc_edata_set(tsd, recent_alloc, edata);
86970-}
86971-
86972-static void
86973-edata_prof_recent_alloc_reset(tsd_t *tsd, edata_t *edata,
86974-    prof_recent_t *recent_alloc) {
86975-	malloc_mutex_assert_owner(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
86976-	assert(recent_alloc != NULL);
86977-	prof_recent_t *old_recent_alloc =
86978-	    edata_prof_recent_alloc_update_internal(tsd, edata, NULL);
86979-	assert(old_recent_alloc == recent_alloc);
86980-	assert(edata == prof_recent_alloc_edata_get(tsd, recent_alloc));
86981-	prof_recent_alloc_edata_set(tsd, recent_alloc, NULL);
86982-}
86983-
86984-/*
86985- * This function should be called right before an allocation is released, so
86986- * that the associated recent allocation record can contain the following
86987- * information:
86988- * (1) The allocation is released;
86989- * (2) The time of the deallocation; and
86990- * (3) The prof_tctx associated with the deallocation.
86991- */
86992-void
86993-prof_recent_alloc_reset(tsd_t *tsd, edata_t *edata) {
86994-	cassert(config_prof);
86995-	/*
86996-	 * Check whether the recent allocation record still exists without
86997-	 * trying to acquire the lock.
86998-	 */
86999-	if (edata_prof_recent_alloc_get_no_lock(edata) == NULL) {
87000-		return;
87001-	}
87002-
87003-	prof_tctx_t *dalloc_tctx = prof_tctx_create(tsd);
87004-	/*
87005-	 * In case dalloc_tctx is NULL, e.g. due to OOM, we will not record the
87006-	 * deallocation time / tctx, which is handled later, after we check
87007-	 * again when holding the lock.
87008-	 */
87009-
87010-	if (dalloc_tctx != NULL) {
87011-		malloc_mutex_lock(tsd_tsdn(tsd), dalloc_tctx->tdata->lock);
87012-		increment_recent_count(tsd, dalloc_tctx);
87013-		dalloc_tctx->prepared = false;
87014-		malloc_mutex_unlock(tsd_tsdn(tsd), dalloc_tctx->tdata->lock);
87015-	}
87016-
87017-	malloc_mutex_lock(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
87018-	/* Check again after acquiring the lock.  */
87019-	prof_recent_t *recent = edata_prof_recent_alloc_get(tsd, edata);
87020-	if (recent != NULL) {
87021-		assert(nstime_equals_zero(&recent->dalloc_time));
87022-		assert(recent->dalloc_tctx == NULL);
87023-		if (dalloc_tctx != NULL) {
87024-			nstime_prof_update(&recent->dalloc_time);
87025-			recent->dalloc_tctx = dalloc_tctx;
87026-			dalloc_tctx = NULL;
87027-		}
87028-		edata_prof_recent_alloc_reset(tsd, edata, recent);
87029-	}
87030-	malloc_mutex_unlock(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
87031-
87032-	if (dalloc_tctx != NULL) {
87033-		/* We lost the rase - the allocation record was just gone. */
87034-		decrement_recent_count(tsd, dalloc_tctx);
87035-	}
87036-}
87037-
87038-static void
87039-prof_recent_alloc_evict_edata(tsd_t *tsd, prof_recent_t *recent_alloc) {
87040-	malloc_mutex_assert_owner(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
87041-	edata_t *edata = prof_recent_alloc_edata_get(tsd, recent_alloc);
87042-	if (edata != NULL) {
87043-		edata_prof_recent_alloc_reset(tsd, edata, recent_alloc);
87044-	}
87045-}
87046-
87047-static bool
87048-prof_recent_alloc_is_empty(tsd_t *tsd) {
87049-	malloc_mutex_assert_owner(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
87050-	if (ql_empty(&prof_recent_alloc_list)) {
87051-		assert(prof_recent_alloc_count == 0);
87052-		return true;
87053-	} else {
87054-		assert(prof_recent_alloc_count > 0);
87055-		return false;
87056-	}
87057-}
87058-
87059-static void
87060-prof_recent_alloc_assert_count(tsd_t *tsd) {
87061-	malloc_mutex_assert_owner(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
87062-	if (!config_debug) {
87063-		return;
87064-	}
87065-	ssize_t count = 0;
87066-	prof_recent_t *n;
87067-	ql_foreach(n, &prof_recent_alloc_list, link) {
87068-		++count;
87069-	}
87070-	assert(count == prof_recent_alloc_count);
87071-	assert(prof_recent_alloc_max_get(tsd) == -1 ||
87072-	    count <= prof_recent_alloc_max_get(tsd));
87073-}
87074-
87075-void
87076-prof_recent_alloc(tsd_t *tsd, edata_t *edata, size_t size, size_t usize) {
87077-	cassert(config_prof);
87078-	assert(edata != NULL);
87079-	prof_tctx_t *tctx = edata_prof_tctx_get(edata);
87080-
87081-	malloc_mutex_assert_not_owner(tsd_tsdn(tsd), tctx->tdata->lock);
87082-	malloc_mutex_lock(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
87083-	prof_recent_alloc_assert_count(tsd);
87084-
87085-	/*
87086-	 * Reserve a new prof_recent_t node if needed.  If needed, we release
87087-	 * the prof_recent_alloc_mtx lock and allocate.  Then, rather than
87088-	 * immediately checking for OOM, we regain the lock and try to make use
87089-	 * of the reserve node if needed.  There are six scenarios:
87090-	 *
87091-	 *          \ now | no need | need but OOMed | need and allocated
87092-	 *     later \    |         |                |
87093-	 *    ------------------------------------------------------------
87094-	 *     no need    |   (1)   |      (2)       |         (3)
87095-	 *    ------------------------------------------------------------
87096-	 *     need       |   (4)   |      (5)       |         (6)
87097-	 *
87098-	 * First, "(4)" never happens, because we don't release the lock in the
87099-	 * middle if there's no need for a new node; in such cases "(1)" always
87100-	 * takes place, which is trivial.
87101-	 *
87102-	 * Out of the remaining four scenarios, "(6)" is the common case and is
87103-	 * trivial.  "(5)" is also trivial, in which case we'll rollback the
87104-	 * effect of prof_recent_alloc_prepare() as expected.
87105-	 *
87106-	 * "(2)" / "(3)" occurs when the need for a new node is gone after we
87107-	 * regain the lock.  If the new node is successfully allocated, i.e. in
87108-	 * the case of "(3)", we'll release it in the end; otherwise, i.e. in
87109-	 * the case of "(2)", we do nothing - we're lucky that the OOM ends up
87110-	 * doing no harm at all.
87111-	 *
87112-	 * Therefore, the only performance cost of the "release lock" ->
87113-	 * "allocate" -> "regain lock" design is the "(3)" case, but it happens
87114-	 * very rarely, so the cost is relatively small compared to the gain of
87115-	 * not having to have the lock order of prof_recent_alloc_mtx above all
87116-	 * the allocation locks.
87117-	 */
87118-	prof_recent_t *reserve = NULL;
87119-	if (prof_recent_alloc_max_get(tsd) == -1 ||
87120-	    prof_recent_alloc_count < prof_recent_alloc_max_get(tsd)) {
87121-		assert(prof_recent_alloc_max_get(tsd) != 0);
87122-		malloc_mutex_unlock(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
87123-		reserve = prof_recent_allocate_node(tsd_tsdn(tsd));
87124-		malloc_mutex_lock(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
87125-		prof_recent_alloc_assert_count(tsd);
87126-	}
87127-
87128-	if (prof_recent_alloc_max_get(tsd) == 0) {
87129-		assert(prof_recent_alloc_is_empty(tsd));
87130-		goto label_rollback;
87131-	}
87132-
87133-	prof_tctx_t *old_alloc_tctx, *old_dalloc_tctx;
87134-	if (prof_recent_alloc_count == prof_recent_alloc_max_get(tsd)) {
87135-		/* If upper limit is reached, rotate the head. */
87136-		assert(prof_recent_alloc_max_get(tsd) != -1);
87137-		assert(!prof_recent_alloc_is_empty(tsd));
87138-		prof_recent_t *head = ql_first(&prof_recent_alloc_list);
87139-		old_alloc_tctx = head->alloc_tctx;
87140-		assert(old_alloc_tctx != NULL);
87141-		old_dalloc_tctx = head->dalloc_tctx;
87142-		prof_recent_alloc_evict_edata(tsd, head);
87143-		ql_rotate(&prof_recent_alloc_list, link);
87144-	} else {
87145-		/* Otherwise make use of the new node. */
87146-		assert(prof_recent_alloc_max_get(tsd) == -1 ||
87147-		    prof_recent_alloc_count < prof_recent_alloc_max_get(tsd));
87148-		if (reserve == NULL) {
87149-			goto label_rollback;
87150-		}
87151-		ql_elm_new(reserve, link);
87152-		ql_tail_insert(&prof_recent_alloc_list, reserve, link);
87153-		reserve = NULL;
87154-		old_alloc_tctx = NULL;
87155-		old_dalloc_tctx = NULL;
87156-		++prof_recent_alloc_count;
87157-	}
87158-
87159-	/* Fill content into the tail node. */
87160-	prof_recent_t *tail = ql_last(&prof_recent_alloc_list, link);
87161-	assert(tail != NULL);
87162-	tail->size = size;
87163-	tail->usize = usize;
87164-	nstime_copy(&tail->alloc_time, edata_prof_alloc_time_get(edata));
87165-	tail->alloc_tctx = tctx;
87166-	nstime_init_zero(&tail->dalloc_time);
87167-	tail->dalloc_tctx = NULL;
87168-	edata_prof_recent_alloc_set(tsd, edata, tail);
87169-
87170-	assert(!prof_recent_alloc_is_empty(tsd));
87171-	prof_recent_alloc_assert_count(tsd);
87172-	malloc_mutex_unlock(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
87173-
87174-	if (reserve != NULL) {
87175-		prof_recent_free_node(tsd_tsdn(tsd), reserve);
87176-	}
87177-
87178-	/*
87179-	 * Asynchronously handle the tctx of the old node, so that there's no
87180-	 * simultaneous holdings of prof_recent_alloc_mtx and tdata->lock.
87181-	 * In the worst case this may delay the tctx release but it's better
87182-	 * than holding prof_recent_alloc_mtx for longer.
87183-	 */
87184-	if (old_alloc_tctx != NULL) {
87185-		decrement_recent_count(tsd, old_alloc_tctx);
87186-	}
87187-	if (old_dalloc_tctx != NULL) {
87188-		decrement_recent_count(tsd, old_dalloc_tctx);
87189-	}
87190-	return;
87191-
87192-label_rollback:
87193-	assert(edata_prof_recent_alloc_get(tsd, edata) == NULL);
87194-	prof_recent_alloc_assert_count(tsd);
87195-	malloc_mutex_unlock(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
87196-	if (reserve != NULL) {
87197-		prof_recent_free_node(tsd_tsdn(tsd), reserve);
87198-	}
87199-	decrement_recent_count(tsd, tctx);
87200-}
87201-
87202-ssize_t
87203-prof_recent_alloc_max_ctl_read() {
87204-	cassert(config_prof);
87205-	/* Don't bother to acquire the lock. */
87206-	return prof_recent_alloc_max_get_no_lock();
87207-}
87208-
87209-static void
87210-prof_recent_alloc_restore_locked(tsd_t *tsd, prof_recent_list_t *to_delete) {
87211-	malloc_mutex_assert_owner(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
87212-	ssize_t max = prof_recent_alloc_max_get(tsd);
87213-	if (max == -1 || prof_recent_alloc_count <= max) {
87214-		/* Easy case - no need to alter the list. */
87215-		ql_new(to_delete);
87216-		prof_recent_alloc_assert_count(tsd);
87217-		return;
87218-	}
87219-
87220-	prof_recent_t *node;
87221-	ql_foreach(node, &prof_recent_alloc_list, link) {
87222-		if (prof_recent_alloc_count == max) {
87223-			break;
87224-		}
87225-		prof_recent_alloc_evict_edata(tsd, node);
87226-		--prof_recent_alloc_count;
87227-	}
87228-	assert(prof_recent_alloc_count == max);
87229-
87230-	ql_move(to_delete, &prof_recent_alloc_list);
87231-	if (max == 0) {
87232-		assert(node == NULL);
87233-	} else {
87234-		assert(node != NULL);
87235-		ql_split(to_delete, node, &prof_recent_alloc_list, link);
87236-	}
87237-	assert(!ql_empty(to_delete));
87238-	prof_recent_alloc_assert_count(tsd);
87239-}
87240-
87241-static void
87242-prof_recent_alloc_async_cleanup(tsd_t *tsd, prof_recent_list_t *to_delete) {
87243-	malloc_mutex_assert_not_owner(tsd_tsdn(tsd), &prof_recent_dump_mtx);
87244-	malloc_mutex_assert_not_owner(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
87245-	while (!ql_empty(to_delete)) {
87246-		prof_recent_t *node = ql_first(to_delete);
87247-		ql_remove(to_delete, node, link);
87248-		decrement_recent_count(tsd, node->alloc_tctx);
87249-		if (node->dalloc_tctx != NULL) {
87250-			decrement_recent_count(tsd, node->dalloc_tctx);
87251-		}
87252-		prof_recent_free_node(tsd_tsdn(tsd), node);
87253-	}
87254-}
87255-
87256-ssize_t
87257-prof_recent_alloc_max_ctl_write(tsd_t *tsd, ssize_t max) {
87258-	cassert(config_prof);
87259-	assert(max >= -1);
87260-	malloc_mutex_lock(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
87261-	prof_recent_alloc_assert_count(tsd);
87262-	const ssize_t old_max = prof_recent_alloc_max_update(tsd, max);
87263-	prof_recent_list_t to_delete;
87264-	prof_recent_alloc_restore_locked(tsd, &to_delete);
87265-	malloc_mutex_unlock(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
87266-	prof_recent_alloc_async_cleanup(tsd, &to_delete);
87267-	return old_max;
87268-}
87269-
87270-static void
87271-prof_recent_alloc_dump_bt(emitter_t *emitter, prof_tctx_t *tctx) {
87272-	char bt_buf[2 * sizeof(intptr_t) + 3];
87273-	char *s = bt_buf;
87274-	assert(tctx != NULL);
87275-	prof_bt_t *bt = &tctx->gctx->bt;
87276-	for (size_t i = 0; i < bt->len; ++i) {
87277-		malloc_snprintf(bt_buf, sizeof(bt_buf), "%p", bt->vec[i]);
87278-		emitter_json_value(emitter, emitter_type_string, &s);
87279-	}
87280-}
87281-
87282-static void
87283-prof_recent_alloc_dump_node(emitter_t *emitter, prof_recent_t *node) {
87284-	emitter_json_object_begin(emitter);
87285-
87286-	emitter_json_kv(emitter, "size", emitter_type_size, &node->size);
87287-	emitter_json_kv(emitter, "usize", emitter_type_size, &node->usize);
87288-	bool released = prof_recent_alloc_edata_get_no_lock(node) == NULL;
87289-	emitter_json_kv(emitter, "released", emitter_type_bool, &released);
87290-
87291-	emitter_json_kv(emitter, "alloc_thread_uid", emitter_type_uint64,
87292-	    &node->alloc_tctx->thr_uid);
87293-	prof_tdata_t *alloc_tdata = node->alloc_tctx->tdata;
87294-	assert(alloc_tdata != NULL);
87295-	if (alloc_tdata->thread_name != NULL) {
87296-		emitter_json_kv(emitter, "alloc_thread_name",
87297-		    emitter_type_string, &alloc_tdata->thread_name);
87298-	}
87299-	uint64_t alloc_time_ns = nstime_ns(&node->alloc_time);
87300-	emitter_json_kv(emitter, "alloc_time", emitter_type_uint64,
87301-	    &alloc_time_ns);
87302-	emitter_json_array_kv_begin(emitter, "alloc_trace");
87303-	prof_recent_alloc_dump_bt(emitter, node->alloc_tctx);
87304-	emitter_json_array_end(emitter);
87305-
87306-	if (released && node->dalloc_tctx != NULL) {
87307-		emitter_json_kv(emitter, "dalloc_thread_uid",
87308-		    emitter_type_uint64, &node->dalloc_tctx->thr_uid);
87309-		prof_tdata_t *dalloc_tdata = node->dalloc_tctx->tdata;
87310-		assert(dalloc_tdata != NULL);
87311-		if (dalloc_tdata->thread_name != NULL) {
87312-			emitter_json_kv(emitter, "dalloc_thread_name",
87313-			    emitter_type_string, &dalloc_tdata->thread_name);
87314-		}
87315-		assert(!nstime_equals_zero(&node->dalloc_time));
87316-		uint64_t dalloc_time_ns = nstime_ns(&node->dalloc_time);
87317-		emitter_json_kv(emitter, "dalloc_time", emitter_type_uint64,
87318-		    &dalloc_time_ns);
87319-		emitter_json_array_kv_begin(emitter, "dalloc_trace");
87320-		prof_recent_alloc_dump_bt(emitter, node->dalloc_tctx);
87321-		emitter_json_array_end(emitter);
87322-	}
87323-
87324-	emitter_json_object_end(emitter);
87325-}
87326-
87327-#define PROF_RECENT_PRINT_BUFSIZE 65536
87328-JEMALLOC_COLD
87329-void
87330-prof_recent_alloc_dump(tsd_t *tsd, write_cb_t *write_cb, void *cbopaque) {
87331-	cassert(config_prof);
87332-	malloc_mutex_lock(tsd_tsdn(tsd), &prof_recent_dump_mtx);
87333-	buf_writer_t buf_writer;
87334-	buf_writer_init(tsd_tsdn(tsd), &buf_writer, write_cb, cbopaque, NULL,
87335-	    PROF_RECENT_PRINT_BUFSIZE);
87336-	emitter_t emitter;
87337-	emitter_init(&emitter, emitter_output_json_compact, buf_writer_cb,
87338-	    &buf_writer);
87339-	prof_recent_list_t temp_list;
87340-
87341-	malloc_mutex_lock(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
87342-	prof_recent_alloc_assert_count(tsd);
87343-	ssize_t dump_max = prof_recent_alloc_max_get(tsd);
87344-	ql_move(&temp_list, &prof_recent_alloc_list);
87345-	ssize_t dump_count = prof_recent_alloc_count;
87346-	prof_recent_alloc_count = 0;
87347-	prof_recent_alloc_assert_count(tsd);
87348-	malloc_mutex_unlock(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
87349-
87350-	emitter_begin(&emitter);
87351-	uint64_t sample_interval = (uint64_t)1U << lg_prof_sample;
87352-	emitter_json_kv(&emitter, "sample_interval", emitter_type_uint64,
87353-	    &sample_interval);
87354-	emitter_json_kv(&emitter, "recent_alloc_max", emitter_type_ssize,
87355-	    &dump_max);
87356-	emitter_json_array_kv_begin(&emitter, "recent_alloc");
87357-	prof_recent_t *node;
87358-	ql_foreach(node, &temp_list, link) {
87359-		prof_recent_alloc_dump_node(&emitter, node);
87360-	}
87361-	emitter_json_array_end(&emitter);
87362-	emitter_end(&emitter);
87363-
87364-	malloc_mutex_lock(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
87365-	prof_recent_alloc_assert_count(tsd);
87366-	ql_concat(&temp_list, &prof_recent_alloc_list, link);
87367-	ql_move(&prof_recent_alloc_list, &temp_list);
87368-	prof_recent_alloc_count += dump_count;
87369-	prof_recent_alloc_restore_locked(tsd, &temp_list);
87370-	malloc_mutex_unlock(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
87371-
87372-	buf_writer_terminate(tsd_tsdn(tsd), &buf_writer);
87373-	malloc_mutex_unlock(tsd_tsdn(tsd), &prof_recent_dump_mtx);
87374-
87375-	prof_recent_alloc_async_cleanup(tsd, &temp_list);
87376-}
87377-#undef PROF_RECENT_PRINT_BUFSIZE
87378-
87379-bool
87380-prof_recent_init() {
87381-	cassert(config_prof);
87382-	prof_recent_alloc_max_init();
87383-
87384-	if (malloc_mutex_init(&prof_recent_alloc_mtx, "prof_recent_alloc",
87385-	    WITNESS_RANK_PROF_RECENT_ALLOC, malloc_mutex_rank_exclusive)) {
87386-		return true;
87387-	}
87388-
87389-	if (malloc_mutex_init(&prof_recent_dump_mtx, "prof_recent_dump",
87390-	    WITNESS_RANK_PROF_RECENT_DUMP, malloc_mutex_rank_exclusive)) {
87391-		return true;
87392-	}
87393-
87394-	ql_new(&prof_recent_alloc_list);
87395-
87396-	return false;
87397-}
87398diff --git a/jemalloc/src/prof_stats.c b/jemalloc/src/prof_stats.c
87399deleted file mode 100644
87400index 5d1a506..0000000
87401--- a/jemalloc/src/prof_stats.c
87402+++ /dev/null
87403@@ -1,57 +0,0 @@
87404-#include "jemalloc/internal/jemalloc_preamble.h"
87405-#include "jemalloc/internal/jemalloc_internal_includes.h"
87406-
87407-#include "jemalloc/internal/prof_stats.h"
87408-
87409-bool opt_prof_stats = false;
87410-malloc_mutex_t prof_stats_mtx;
87411-static prof_stats_t prof_stats_live[PROF_SC_NSIZES];
87412-static prof_stats_t prof_stats_accum[PROF_SC_NSIZES];
87413-
87414-static void
87415-prof_stats_enter(tsd_t *tsd, szind_t ind) {
87416-	assert(opt_prof && opt_prof_stats);
87417-	assert(ind < SC_NSIZES);
87418-	malloc_mutex_lock(tsd_tsdn(tsd), &prof_stats_mtx);
87419-}
87420-
87421-static void
87422-prof_stats_leave(tsd_t *tsd) {
87423-	malloc_mutex_unlock(tsd_tsdn(tsd), &prof_stats_mtx);
87424-}
87425-
87426-void
87427-prof_stats_inc(tsd_t *tsd, szind_t ind, size_t size) {
87428-	cassert(config_prof);
87429-	prof_stats_enter(tsd, ind);
87430-	prof_stats_live[ind].req_sum += size;
87431-	prof_stats_live[ind].count++;
87432-	prof_stats_accum[ind].req_sum += size;
87433-	prof_stats_accum[ind].count++;
87434-	prof_stats_leave(tsd);
87435-}
87436-
87437-void
87438-prof_stats_dec(tsd_t *tsd, szind_t ind, size_t size) {
87439-	cassert(config_prof);
87440-	prof_stats_enter(tsd, ind);
87441-	prof_stats_live[ind].req_sum -= size;
87442-	prof_stats_live[ind].count--;
87443-	prof_stats_leave(tsd);
87444-}
87445-
87446-void
87447-prof_stats_get_live(tsd_t *tsd, szind_t ind, prof_stats_t *stats) {
87448-	cassert(config_prof);
87449-	prof_stats_enter(tsd, ind);
87450-	memcpy(stats, &prof_stats_live[ind], sizeof(prof_stats_t));
87451-	prof_stats_leave(tsd);
87452-}
87453-
87454-void
87455-prof_stats_get_accum(tsd_t *tsd, szind_t ind, prof_stats_t *stats) {
87456-	cassert(config_prof);
87457-	prof_stats_enter(tsd, ind);
87458-	memcpy(stats, &prof_stats_accum[ind], sizeof(prof_stats_t));
87459-	prof_stats_leave(tsd);
87460-}
87461diff --git a/jemalloc/src/prof_sys.c b/jemalloc/src/prof_sys.c
87462deleted file mode 100644
87463index b5f1f5b..0000000
87464--- a/jemalloc/src/prof_sys.c
87465+++ /dev/null
87466@@ -1,669 +0,0 @@
87467-#define JEMALLOC_PROF_SYS_C_
87468-#include "jemalloc/internal/jemalloc_preamble.h"
87469-#include "jemalloc/internal/jemalloc_internal_includes.h"
87470-
87471-#include "jemalloc/internal/buf_writer.h"
87472-#include "jemalloc/internal/ctl.h"
87473-#include "jemalloc/internal/prof_data.h"
87474-#include "jemalloc/internal/prof_sys.h"
87475-
87476-#ifdef JEMALLOC_PROF_LIBUNWIND
87477-#define UNW_LOCAL_ONLY
87478-#include <libunwind.h>
87479-#endif
87480-
87481-#ifdef JEMALLOC_PROF_LIBGCC
87482-/*
87483- * We have a circular dependency -- jemalloc_internal.h tells us if we should
87484- * use libgcc's unwinding functionality, but after we've included that, we've
87485- * already hooked _Unwind_Backtrace.  We'll temporarily disable hooking.
87486- */
87487-#undef _Unwind_Backtrace
87488-#include <unwind.h>
87489-#define _Unwind_Backtrace JEMALLOC_TEST_HOOK(_Unwind_Backtrace, test_hooks_libc_hook)
87490-#endif
87491-
87492-/******************************************************************************/
87493-
87494-malloc_mutex_t prof_dump_filename_mtx;
87495-
87496-bool prof_do_mock = false;
87497-
87498-static uint64_t prof_dump_seq;
87499-static uint64_t prof_dump_iseq;
87500-static uint64_t prof_dump_mseq;
87501-static uint64_t prof_dump_useq;
87502-
87503-static char *prof_prefix = NULL;
87504-
87505-/* The fallback allocator profiling functionality will use. */
87506-base_t *prof_base;
87507-
87508-void
87509-bt_init(prof_bt_t *bt, void **vec) {
87510-	cassert(config_prof);
87511-
87512-	bt->vec = vec;
87513-	bt->len = 0;
87514-}
87515-
87516-#ifdef JEMALLOC_PROF_LIBUNWIND
87517-static void
87518-prof_backtrace_impl(void **vec, unsigned *len, unsigned max_len) {
87519-	int nframes;
87520-
87521-	cassert(config_prof);
87522-	assert(*len == 0);
87523-	assert(vec != NULL);
87524-	assert(max_len == PROF_BT_MAX);
87525-
87526-	nframes = unw_backtrace(vec, PROF_BT_MAX);
87527-	if (nframes <= 0) {
87528-		return;
87529-	}
87530-	*len = nframes;
87531-}
87532-#elif (defined(JEMALLOC_PROF_LIBGCC))
87533-static _Unwind_Reason_Code
87534-prof_unwind_init_callback(struct _Unwind_Context *context, void *arg) {
87535-	cassert(config_prof);
87536-
87537-	return _URC_NO_REASON;
87538-}
87539-
87540-static _Unwind_Reason_Code
87541-prof_unwind_callback(struct _Unwind_Context *context, void *arg) {
87542-	prof_unwind_data_t *data = (prof_unwind_data_t *)arg;
87543-	void *ip;
87544-
87545-	cassert(config_prof);
87546-
87547-	ip = (void *)_Unwind_GetIP(context);
87548-	if (ip == NULL) {
87549-		return _URC_END_OF_STACK;
87550-	}
87551-	data->vec[*data->len] = ip;
87552-	(*data->len)++;
87553-	if (*data->len == data->max) {
87554-		return _URC_END_OF_STACK;
87555-	}
87556-
87557-	return _URC_NO_REASON;
87558-}
87559-
87560-static void
87561-prof_backtrace_impl(void **vec, unsigned *len, unsigned max_len) {
87562-	prof_unwind_data_t data = {vec, len, max_len};
87563-
87564-	cassert(config_prof);
87565-	assert(vec != NULL);
87566-	assert(max_len == PROF_BT_MAX);
87567-
87568-	_Unwind_Backtrace(prof_unwind_callback, &data);
87569-}
87570-#elif (defined(JEMALLOC_PROF_GCC))
87571-static void
87572-prof_backtrace_impl(void **vec, unsigned *len, unsigned max_len) {
87573-#define BT_FRAME(i)							\
87574-	if ((i) < max_len) {						\
87575-		void *p;						\
87576-		if (__builtin_frame_address(i) == 0) {			\
87577-			return;						\
87578-		}							\
87579-		p = __builtin_return_address(i);			\
87580-		if (p == NULL) {					\
87581-			return;						\
87582-		}							\
87583-		vec[(i)] = p;						\
87584-		*len = (i) + 1;						\
87585-	} else {							\
87586-		return;							\
87587-	}
87588-
87589-	cassert(config_prof);
87590-	assert(vec != NULL);
87591-	assert(max_len == PROF_BT_MAX);
87592-
87593-	BT_FRAME(0)
87594-	BT_FRAME(1)
87595-	BT_FRAME(2)
87596-	BT_FRAME(3)
87597-	BT_FRAME(4)
87598-	BT_FRAME(5)
87599-	BT_FRAME(6)
87600-	BT_FRAME(7)
87601-	BT_FRAME(8)
87602-	BT_FRAME(9)
87603-
87604-	BT_FRAME(10)
87605-	BT_FRAME(11)
87606-	BT_FRAME(12)
87607-	BT_FRAME(13)
87608-	BT_FRAME(14)
87609-	BT_FRAME(15)
87610-	BT_FRAME(16)
87611-	BT_FRAME(17)
87612-	BT_FRAME(18)
87613-	BT_FRAME(19)
87614-
87615-	BT_FRAME(20)
87616-	BT_FRAME(21)
87617-	BT_FRAME(22)
87618-	BT_FRAME(23)
87619-	BT_FRAME(24)
87620-	BT_FRAME(25)
87621-	BT_FRAME(26)
87622-	BT_FRAME(27)
87623-	BT_FRAME(28)
87624-	BT_FRAME(29)
87625-
87626-	BT_FRAME(30)
87627-	BT_FRAME(31)
87628-	BT_FRAME(32)
87629-	BT_FRAME(33)
87630-	BT_FRAME(34)
87631-	BT_FRAME(35)
87632-	BT_FRAME(36)
87633-	BT_FRAME(37)
87634-	BT_FRAME(38)
87635-	BT_FRAME(39)
87636-
87637-	BT_FRAME(40)
87638-	BT_FRAME(41)
87639-	BT_FRAME(42)
87640-	BT_FRAME(43)
87641-	BT_FRAME(44)
87642-	BT_FRAME(45)
87643-	BT_FRAME(46)
87644-	BT_FRAME(47)
87645-	BT_FRAME(48)
87646-	BT_FRAME(49)
87647-
87648-	BT_FRAME(50)
87649-	BT_FRAME(51)
87650-	BT_FRAME(52)
87651-	BT_FRAME(53)
87652-	BT_FRAME(54)
87653-	BT_FRAME(55)
87654-	BT_FRAME(56)
87655-	BT_FRAME(57)
87656-	BT_FRAME(58)
87657-	BT_FRAME(59)
87658-
87659-	BT_FRAME(60)
87660-	BT_FRAME(61)
87661-	BT_FRAME(62)
87662-	BT_FRAME(63)
87663-	BT_FRAME(64)
87664-	BT_FRAME(65)
87665-	BT_FRAME(66)
87666-	BT_FRAME(67)
87667-	BT_FRAME(68)
87668-	BT_FRAME(69)
87669-
87670-	BT_FRAME(70)
87671-	BT_FRAME(71)
87672-	BT_FRAME(72)
87673-	BT_FRAME(73)
87674-	BT_FRAME(74)
87675-	BT_FRAME(75)
87676-	BT_FRAME(76)
87677-	BT_FRAME(77)
87678-	BT_FRAME(78)
87679-	BT_FRAME(79)
87680-
87681-	BT_FRAME(80)
87682-	BT_FRAME(81)
87683-	BT_FRAME(82)
87684-	BT_FRAME(83)
87685-	BT_FRAME(84)
87686-	BT_FRAME(85)
87687-	BT_FRAME(86)
87688-	BT_FRAME(87)
87689-	BT_FRAME(88)
87690-	BT_FRAME(89)
87691-
87692-	BT_FRAME(90)
87693-	BT_FRAME(91)
87694-	BT_FRAME(92)
87695-	BT_FRAME(93)
87696-	BT_FRAME(94)
87697-	BT_FRAME(95)
87698-	BT_FRAME(96)
87699-	BT_FRAME(97)
87700-	BT_FRAME(98)
87701-	BT_FRAME(99)
87702-
87703-	BT_FRAME(100)
87704-	BT_FRAME(101)
87705-	BT_FRAME(102)
87706-	BT_FRAME(103)
87707-	BT_FRAME(104)
87708-	BT_FRAME(105)
87709-	BT_FRAME(106)
87710-	BT_FRAME(107)
87711-	BT_FRAME(108)
87712-	BT_FRAME(109)
87713-
87714-	BT_FRAME(110)
87715-	BT_FRAME(111)
87716-	BT_FRAME(112)
87717-	BT_FRAME(113)
87718-	BT_FRAME(114)
87719-	BT_FRAME(115)
87720-	BT_FRAME(116)
87721-	BT_FRAME(117)
87722-	BT_FRAME(118)
87723-	BT_FRAME(119)
87724-
87725-	BT_FRAME(120)
87726-	BT_FRAME(121)
87727-	BT_FRAME(122)
87728-	BT_FRAME(123)
87729-	BT_FRAME(124)
87730-	BT_FRAME(125)
87731-	BT_FRAME(126)
87732-	BT_FRAME(127)
87733-#undef BT_FRAME
87734-}
87735-#else
87736-static void
87737-prof_backtrace_impl(void **vec, unsigned *len, unsigned max_len) {
87738-	cassert(config_prof);
87739-	not_reached();
87740-}
87741-#endif
87742-
87743-void
87744-prof_backtrace(tsd_t *tsd, prof_bt_t *bt) {
87745-	cassert(config_prof);
87746-	prof_backtrace_hook_t prof_backtrace_hook = prof_backtrace_hook_get();
87747-	assert(prof_backtrace_hook != NULL);
87748-
87749-	pre_reentrancy(tsd, NULL);
87750-	prof_backtrace_hook(bt->vec, &bt->len, PROF_BT_MAX);
87751-	post_reentrancy(tsd);
87752-}
87753-
87754-void
87755-prof_hooks_init() {
87756-	prof_backtrace_hook_set(&prof_backtrace_impl);
87757-	prof_dump_hook_set(NULL);
87758-}
87759-
87760-void
87761-prof_unwind_init() {
87762-#ifdef JEMALLOC_PROF_LIBGCC
87763-	/*
87764-	 * Cause the backtracing machinery to allocate its internal
87765-	 * state before enabling profiling.
87766-	 */
87767-	_Unwind_Backtrace(prof_unwind_init_callback, NULL);
87768-#endif
87769-}
87770-
87771-static int
87772-prof_sys_thread_name_read_impl(char *buf, size_t limit) {
87773-#if defined(JEMALLOC_HAVE_PTHREAD_GETNAME_NP)
87774-	return pthread_getname_np(pthread_self(), buf, limit);
87775-#elif defined(JEMALLOC_HAVE_PTHREAD_GET_NAME_NP)
87776-	pthread_get_name_np(pthread_self(), buf, limit);
87777-	return 0;
87778-#else
87779-	return ENOSYS;
87780-#endif
87781-}
87782-prof_sys_thread_name_read_t *JET_MUTABLE prof_sys_thread_name_read =
87783-    prof_sys_thread_name_read_impl;
87784-
87785-void
87786-prof_sys_thread_name_fetch(tsd_t *tsd) {
87787-#define THREAD_NAME_MAX_LEN 16
87788-	char buf[THREAD_NAME_MAX_LEN];
87789-	if (!prof_sys_thread_name_read(buf, THREAD_NAME_MAX_LEN)) {
87790-		prof_thread_name_set_impl(tsd, buf);
87791-	}
87792-#undef THREAD_NAME_MAX_LEN
87793-}
87794-
87795-int
87796-prof_getpid(void) {
87797-#ifdef _WIN32
87798-	return GetCurrentProcessId();
87799-#else
87800-	return getpid();
87801-#endif
87802-}
87803-
87804-/*
87805- * This buffer is rather large for stack allocation, so use a single buffer for
87806- * all profile dumps; protected by prof_dump_mtx.
87807- */
87808-static char prof_dump_buf[PROF_DUMP_BUFSIZE];
87809-
87810-typedef struct prof_dump_arg_s prof_dump_arg_t;
87811-struct prof_dump_arg_s {
87812-	/*
87813-	 * Whether error should be handled locally: if true, then we print out
87814-	 * error message as well as abort (if opt_abort is true) when an error
87815-	 * occurred, and we also report the error back to the caller in the end;
87816-	 * if false, then we only report the error back to the caller in the
87817-	 * end.
87818-	 */
87819-	const bool handle_error_locally;
87820-	/*
87821-	 * Whether there has been an error in the dumping process, which could
87822-	 * have happened either in file opening or in file writing.  When an
87823-	 * error has already occurred, we will stop further writing to the file.
87824-	 */
87825-	bool error;
87826-	/* File descriptor of the dump file. */
87827-	int prof_dump_fd;
87828-};
87829-
87830-static void
87831-prof_dump_check_possible_error(prof_dump_arg_t *arg, bool err_cond,
87832-    const char *format, ...) {
87833-	assert(!arg->error);
87834-	if (!err_cond) {
87835-		return;
87836-	}
87837-
87838-	arg->error = true;
87839-	if (!arg->handle_error_locally) {
87840-		return;
87841-	}
87842-
87843-	va_list ap;
87844-	char buf[PROF_PRINTF_BUFSIZE];
87845-	va_start(ap, format);
87846-	malloc_vsnprintf(buf, sizeof(buf), format, ap);
87847-	va_end(ap);
87848-	malloc_write(buf);
87849-
87850-	if (opt_abort) {
87851-		abort();
87852-	}
87853-}
87854-
87855-static int
87856-prof_dump_open_file_impl(const char *filename, int mode) {
87857-	return creat(filename, mode);
87858-}
87859-prof_dump_open_file_t *JET_MUTABLE prof_dump_open_file =
87860-    prof_dump_open_file_impl;
87861-
87862-static void
87863-prof_dump_open(prof_dump_arg_t *arg, const char *filename) {
87864-	arg->prof_dump_fd = prof_dump_open_file(filename, 0644);
87865-	prof_dump_check_possible_error(arg, arg->prof_dump_fd == -1,
87866-	    "<jemalloc>: failed to open \"%s\"\n", filename);
87867-}
87868-
87869-prof_dump_write_file_t *JET_MUTABLE prof_dump_write_file = malloc_write_fd;
87870-
87871-static void
87872-prof_dump_flush(void *opaque, const char *s) {
87873-	cassert(config_prof);
87874-	prof_dump_arg_t *arg = (prof_dump_arg_t *)opaque;
87875-	if (!arg->error) {
87876-		ssize_t err = prof_dump_write_file(arg->prof_dump_fd, s,
87877-		    strlen(s));
87878-		prof_dump_check_possible_error(arg, err == -1,
87879-		    "<jemalloc>: failed to write during heap profile flush\n");
87880-	}
87881-}
87882-
87883-static void
87884-prof_dump_close(prof_dump_arg_t *arg) {
87885-	if (arg->prof_dump_fd != -1) {
87886-		close(arg->prof_dump_fd);
87887-	}
87888-}
87889-
87890-#ifndef _WIN32
87891-JEMALLOC_FORMAT_PRINTF(1, 2)
87892-static int
87893-prof_open_maps_internal(const char *format, ...) {
87894-	int mfd;
87895-	va_list ap;
87896-	char filename[PATH_MAX + 1];
87897-
87898-	va_start(ap, format);
87899-	malloc_vsnprintf(filename, sizeof(filename), format, ap);
87900-	va_end(ap);
87901-
87902-#if defined(O_CLOEXEC)
87903-	mfd = open(filename, O_RDONLY | O_CLOEXEC);
87904-#else
87905-	mfd = open(filename, O_RDONLY);
87906-	if (mfd != -1) {
87907-		fcntl(mfd, F_SETFD, fcntl(mfd, F_GETFD) | FD_CLOEXEC);
87908-	}
87909-#endif
87910-
87911-	return mfd;
87912-}
87913-#endif
87914-
87915-static int
87916-prof_dump_open_maps_impl() {
87917-	int mfd;
87918-
87919-	cassert(config_prof);
87920-#if defined(__FreeBSD__) || defined(__DragonFly__)
87921-	mfd = prof_open_maps_internal("/proc/curproc/map");
87922-#elif defined(_WIN32)
87923-	mfd = -1; // Not implemented
87924-#else
87925-	int pid = prof_getpid();
87926-
87927-	mfd = prof_open_maps_internal("/proc/%d/task/%d/maps", pid, pid);
87928-	if (mfd == -1) {
87929-		mfd = prof_open_maps_internal("/proc/%d/maps", pid);
87930-	}
87931-#endif
87932-	return mfd;
87933-}
87934-prof_dump_open_maps_t *JET_MUTABLE prof_dump_open_maps =
87935-    prof_dump_open_maps_impl;
87936-
87937-static ssize_t
87938-prof_dump_read_maps_cb(void *read_cbopaque, void *buf, size_t limit) {
87939-	int mfd = *(int *)read_cbopaque;
87940-	assert(mfd != -1);
87941-	return malloc_read_fd(mfd, buf, limit);
87942-}
87943-
87944-static void
87945-prof_dump_maps(buf_writer_t *buf_writer) {
87946-	int mfd = prof_dump_open_maps();
87947-	if (mfd == -1) {
87948-		return;
87949-	}
87950-
87951-	buf_writer_cb(buf_writer, "\nMAPPED_LIBRARIES:\n");
87952-	buf_writer_pipe(buf_writer, prof_dump_read_maps_cb, &mfd);
87953-	close(mfd);
87954-}
87955-
87956-static bool
87957-prof_dump(tsd_t *tsd, bool propagate_err, const char *filename,
87958-    bool leakcheck) {
87959-	cassert(config_prof);
87960-	assert(tsd_reentrancy_level_get(tsd) == 0);
87961-
87962-	prof_tdata_t * tdata = prof_tdata_get(tsd, true);
87963-	if (tdata == NULL) {
87964-		return true;
87965-	}
87966-
87967-	prof_dump_arg_t arg = {/* handle_error_locally */ !propagate_err,
87968-	    /* error */ false, /* prof_dump_fd */ -1};
87969-
87970-	pre_reentrancy(tsd, NULL);
87971-	malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_mtx);
87972-
87973-	prof_dump_open(&arg, filename);
87974-	buf_writer_t buf_writer;
87975-	bool err = buf_writer_init(tsd_tsdn(tsd), &buf_writer, prof_dump_flush,
87976-	    &arg, prof_dump_buf, PROF_DUMP_BUFSIZE);
87977-	assert(!err);
87978-	prof_dump_impl(tsd, buf_writer_cb, &buf_writer, tdata, leakcheck);
87979-	prof_dump_maps(&buf_writer);
87980-	buf_writer_terminate(tsd_tsdn(tsd), &buf_writer);
87981-	prof_dump_close(&arg);
87982-
87983-	prof_dump_hook_t dump_hook = prof_dump_hook_get();
87984-	if (dump_hook != NULL) {
87985-		dump_hook(filename);
87986-	}
87987-	malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_mtx);
87988-	post_reentrancy(tsd);
87989-
87990-	return arg.error;
87991-}
87992-
87993-/*
87994- * If profiling is off, then PROF_DUMP_FILENAME_LEN is 1, so we'll end up
87995- * calling strncpy with a size of 0, which triggers a -Wstringop-truncation
87996- * warning (strncpy can never actually be called in this case, since we bail out
87997- * much earlier when config_prof is false).  This function works around the
87998- * warning to let us leave the warning on.
87999- */
88000-static inline void
88001-prof_strncpy(char *UNUSED dest, const char *UNUSED src, size_t UNUSED size) {
88002-	cassert(config_prof);
88003-#ifdef JEMALLOC_PROF
88004-	strncpy(dest, src, size);
88005-#endif
88006-}
88007-
88008-static const char *
88009-prof_prefix_get(tsdn_t* tsdn) {
88010-	malloc_mutex_assert_owner(tsdn, &prof_dump_filename_mtx);
88011-
88012-	return prof_prefix == NULL ? opt_prof_prefix : prof_prefix;
88013-}
88014-
88015-static bool
88016-prof_prefix_is_empty(tsdn_t *tsdn) {
88017-	malloc_mutex_lock(tsdn, &prof_dump_filename_mtx);
88018-	bool ret = (prof_prefix_get(tsdn)[0] == '\0');
88019-	malloc_mutex_unlock(tsdn, &prof_dump_filename_mtx);
88020-	return ret;
88021-}
88022-
88023-#define DUMP_FILENAME_BUFSIZE (PATH_MAX + 1)
88024-#define VSEQ_INVALID UINT64_C(0xffffffffffffffff)
88025-static void
88026-prof_dump_filename(tsd_t *tsd, char *filename, char v, uint64_t vseq) {
88027-	cassert(config_prof);
88028-
88029-	assert(tsd_reentrancy_level_get(tsd) == 0);
88030-	const char *prefix = prof_prefix_get(tsd_tsdn(tsd));
88031-
88032-	if (vseq != VSEQ_INVALID) {
88033-	        /* "<prefix>.<pid>.<seq>.v<vseq>.heap" */
88034-		malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE,
88035-		    "%s.%d.%"FMTu64".%c%"FMTu64".heap", prefix, prof_getpid(),
88036-		    prof_dump_seq, v, vseq);
88037-	} else {
88038-	        /* "<prefix>.<pid>.<seq>.<v>.heap" */
88039-		malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE,
88040-		    "%s.%d.%"FMTu64".%c.heap", prefix, prof_getpid(),
88041-		    prof_dump_seq, v);
88042-	}
88043-	prof_dump_seq++;
88044-}
88045-
88046-void
88047-prof_get_default_filename(tsdn_t *tsdn, char *filename, uint64_t ind) {
88048-	malloc_mutex_lock(tsdn, &prof_dump_filename_mtx);
88049-	malloc_snprintf(filename, PROF_DUMP_FILENAME_LEN,
88050-	    "%s.%d.%"FMTu64".json", prof_prefix_get(tsdn), prof_getpid(), ind);
88051-	malloc_mutex_unlock(tsdn, &prof_dump_filename_mtx);
88052-}
88053-
88054-void
88055-prof_fdump_impl(tsd_t *tsd) {
88056-	char filename[DUMP_FILENAME_BUFSIZE];
88057-
88058-	assert(!prof_prefix_is_empty(tsd_tsdn(tsd)));
88059-	malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_filename_mtx);
88060-	prof_dump_filename(tsd, filename, 'f', VSEQ_INVALID);
88061-	malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_filename_mtx);
88062-	prof_dump(tsd, false, filename, opt_prof_leak);
88063-}
88064-
88065-bool
88066-prof_prefix_set(tsdn_t *tsdn, const char *prefix) {
88067-	cassert(config_prof);
88068-	ctl_mtx_assert_held(tsdn);
88069-	malloc_mutex_lock(tsdn, &prof_dump_filename_mtx);
88070-	if (prof_prefix == NULL) {
88071-		malloc_mutex_unlock(tsdn, &prof_dump_filename_mtx);
88072-		/* Everything is still guarded by ctl_mtx. */
88073-		char *buffer = base_alloc(tsdn, prof_base,
88074-		    PROF_DUMP_FILENAME_LEN, QUANTUM);
88075-		if (buffer == NULL) {
88076-			return true;
88077-		}
88078-		malloc_mutex_lock(tsdn, &prof_dump_filename_mtx);
88079-		prof_prefix = buffer;
88080-	}
88081-	assert(prof_prefix != NULL);
88082-
88083-	prof_strncpy(prof_prefix, prefix, PROF_DUMP_FILENAME_LEN - 1);
88084-	prof_prefix[PROF_DUMP_FILENAME_LEN - 1] = '\0';
88085-	malloc_mutex_unlock(tsdn, &prof_dump_filename_mtx);
88086-
88087-	return false;
88088-}
88089-
88090-void
88091-prof_idump_impl(tsd_t *tsd) {
88092-	malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_filename_mtx);
88093-	if (prof_prefix_get(tsd_tsdn(tsd))[0] == '\0') {
88094-		malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_filename_mtx);
88095-		return;
88096-	}
88097-	char filename[PATH_MAX + 1];
88098-	prof_dump_filename(tsd, filename, 'i', prof_dump_iseq);
88099-	prof_dump_iseq++;
88100-	malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_filename_mtx);
88101-	prof_dump(tsd, false, filename, false);
88102-}
88103-
88104-bool
88105-prof_mdump_impl(tsd_t *tsd, const char *filename) {
88106-	char filename_buf[DUMP_FILENAME_BUFSIZE];
88107-	if (filename == NULL) {
88108-		/* No filename specified, so automatically generate one. */
88109-		malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_filename_mtx);
88110-		if (prof_prefix_get(tsd_tsdn(tsd))[0] == '\0') {
88111-			malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_filename_mtx);
88112-			return true;
88113-		}
88114-		prof_dump_filename(tsd, filename_buf, 'm', prof_dump_mseq);
88115-		prof_dump_mseq++;
88116-		malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_filename_mtx);
88117-		filename = filename_buf;
88118-	}
88119-	return prof_dump(tsd, true, filename, false);
88120-}
88121-
88122-void
88123-prof_gdump_impl(tsd_t *tsd) {
88124-	tsdn_t *tsdn = tsd_tsdn(tsd);
88125-	malloc_mutex_lock(tsdn, &prof_dump_filename_mtx);
88126-	if (prof_prefix_get(tsdn)[0] == '\0') {
88127-		malloc_mutex_unlock(tsdn, &prof_dump_filename_mtx);
88128-		return;
88129-	}
88130-	char filename[DUMP_FILENAME_BUFSIZE];
88131-	prof_dump_filename(tsd, filename, 'u', prof_dump_useq);
88132-	prof_dump_useq++;
88133-	malloc_mutex_unlock(tsdn, &prof_dump_filename_mtx);
88134-	prof_dump(tsd, false, filename, false);
88135-}
88136diff --git a/jemalloc/src/psset.c b/jemalloc/src/psset.c
88137deleted file mode 100644
88138index 9a8f054..0000000
88139--- a/jemalloc/src/psset.c
88140+++ /dev/null
88141@@ -1,385 +0,0 @@
88142-#include "jemalloc/internal/jemalloc_preamble.h"
88143-#include "jemalloc/internal/jemalloc_internal_includes.h"
88144-
88145-#include "jemalloc/internal/psset.h"
88146-
88147-#include "jemalloc/internal/fb.h"
88148-
88149-void
88150-psset_init(psset_t *psset) {
88151-	for (unsigned i = 0; i < PSSET_NPSIZES; i++) {
88152-		hpdata_age_heap_new(&psset->pageslabs[i]);
88153-	}
88154-	fb_init(psset->pageslab_bitmap, PSSET_NPSIZES);
88155-	memset(&psset->merged_stats, 0, sizeof(psset->merged_stats));
88156-	memset(&psset->stats, 0, sizeof(psset->stats));
88157-	hpdata_empty_list_init(&psset->empty);
88158-	for (int i = 0; i < PSSET_NPURGE_LISTS; i++) {
88159-		hpdata_purge_list_init(&psset->to_purge[i]);
88160-	}
88161-	fb_init(psset->purge_bitmap, PSSET_NPURGE_LISTS);
88162-	hpdata_hugify_list_init(&psset->to_hugify);
88163-}
88164-
88165-static void
88166-psset_bin_stats_accum(psset_bin_stats_t *dst, psset_bin_stats_t *src) {
88167-	dst->npageslabs += src->npageslabs;
88168-	dst->nactive += src->nactive;
88169-	dst->ndirty += src->ndirty;
88170-}
88171-
88172-void
88173-psset_stats_accum(psset_stats_t *dst, psset_stats_t *src) {
88174-	psset_bin_stats_accum(&dst->full_slabs[0], &src->full_slabs[0]);
88175-	psset_bin_stats_accum(&dst->full_slabs[1], &src->full_slabs[1]);
88176-	psset_bin_stats_accum(&dst->empty_slabs[0], &src->empty_slabs[0]);
88177-	psset_bin_stats_accum(&dst->empty_slabs[1], &src->empty_slabs[1]);
88178-	for (pszind_t i = 0; i < PSSET_NPSIZES; i++) {
88179-		psset_bin_stats_accum(&dst->nonfull_slabs[i][0],
88180-		    &src->nonfull_slabs[i][0]);
88181-		psset_bin_stats_accum(&dst->nonfull_slabs[i][1],
88182-		    &src->nonfull_slabs[i][1]);
88183-	}
88184-}
88185-
88186-/*
88187- * The stats maintenance strategy is to remove a pageslab's contribution to the
88188- * stats when we call psset_update_begin, and re-add it (to a potentially new
88189- * bin) when we call psset_update_end.
88190- */
88191-JEMALLOC_ALWAYS_INLINE void
88192-psset_bin_stats_insert_remove(psset_t *psset, psset_bin_stats_t *binstats,
88193-    hpdata_t *ps, bool insert) {
88194-	size_t mul = insert ? (size_t)1 : (size_t)-1;
88195-	size_t huge_idx = (size_t)hpdata_huge_get(ps);
88196-
88197-	binstats[huge_idx].npageslabs += mul * 1;
88198-	binstats[huge_idx].nactive += mul * hpdata_nactive_get(ps);
88199-	binstats[huge_idx].ndirty += mul * hpdata_ndirty_get(ps);
88200-
88201-	psset->merged_stats.npageslabs += mul * 1;
88202-	psset->merged_stats.nactive += mul * hpdata_nactive_get(ps);
88203-	psset->merged_stats.ndirty += mul * hpdata_ndirty_get(ps);
88204-
88205-	if (config_debug) {
88206-		psset_bin_stats_t check_stats = {0};
88207-		for (size_t huge = 0; huge <= 1; huge++) {
88208-			psset_bin_stats_accum(&check_stats,
88209-			    &psset->stats.full_slabs[huge]);
88210-			psset_bin_stats_accum(&check_stats,
88211-			    &psset->stats.empty_slabs[huge]);
88212-			for (pszind_t pind = 0; pind < PSSET_NPSIZES; pind++) {
88213-				psset_bin_stats_accum(&check_stats,
88214-				    &psset->stats.nonfull_slabs[pind][huge]);
88215-			}
88216-		}
88217-		assert(psset->merged_stats.npageslabs
88218-		    == check_stats.npageslabs);
88219-		assert(psset->merged_stats.nactive == check_stats.nactive);
88220-		assert(psset->merged_stats.ndirty == check_stats.ndirty);
88221-	}
88222-}
88223-
88224-static void
88225-psset_bin_stats_insert(psset_t *psset, psset_bin_stats_t *binstats,
88226-    hpdata_t *ps) {
88227-	psset_bin_stats_insert_remove(psset, binstats, ps, true);
88228-}
88229-
88230-static void
88231-psset_bin_stats_remove(psset_t *psset, psset_bin_stats_t *binstats,
88232-    hpdata_t *ps) {
88233-	psset_bin_stats_insert_remove(psset, binstats, ps, false);
88234-}
88235-
88236-static void
88237-psset_hpdata_heap_remove(psset_t *psset, pszind_t pind, hpdata_t *ps) {
88238-	hpdata_age_heap_remove(&psset->pageslabs[pind], ps);
88239-	if (hpdata_age_heap_empty(&psset->pageslabs[pind])) {
88240-		fb_unset(psset->pageslab_bitmap, PSSET_NPSIZES, (size_t)pind);
88241-	}
88242-}
88243-
88244-static void
88245-psset_hpdata_heap_insert(psset_t *psset, pszind_t pind, hpdata_t *ps) {
88246-	if (hpdata_age_heap_empty(&psset->pageslabs[pind])) {
88247-		fb_set(psset->pageslab_bitmap, PSSET_NPSIZES, (size_t)pind);
88248-	}
88249-	hpdata_age_heap_insert(&psset->pageslabs[pind], ps);
88250-}
88251-
88252-static void
88253-psset_stats_insert(psset_t* psset, hpdata_t *ps) {
88254-	if (hpdata_empty(ps)) {
88255-		psset_bin_stats_insert(psset, psset->stats.empty_slabs, ps);
88256-	} else if (hpdata_full(ps)) {
88257-		psset_bin_stats_insert(psset, psset->stats.full_slabs, ps);
88258-	} else {
88259-		size_t longest_free_range = hpdata_longest_free_range_get(ps);
88260-
88261-		pszind_t pind = sz_psz2ind(sz_psz_quantize_floor(
88262-		    longest_free_range << LG_PAGE));
88263-		assert(pind < PSSET_NPSIZES);
88264-
88265-		psset_bin_stats_insert(psset, psset->stats.nonfull_slabs[pind],
88266-		    ps);
88267-	}
88268-}
88269-
88270-static void
88271-psset_stats_remove(psset_t *psset, hpdata_t *ps) {
88272-	if (hpdata_empty(ps)) {
88273-		psset_bin_stats_remove(psset, psset->stats.empty_slabs, ps);
88274-	} else if (hpdata_full(ps)) {
88275-		psset_bin_stats_remove(psset, psset->stats.full_slabs, ps);
88276-	} else {
88277-		size_t longest_free_range = hpdata_longest_free_range_get(ps);
88278-
88279-		pszind_t pind = sz_psz2ind(sz_psz_quantize_floor(
88280-		    longest_free_range << LG_PAGE));
88281-		assert(pind < PSSET_NPSIZES);
88282-
88283-		psset_bin_stats_remove(psset, psset->stats.nonfull_slabs[pind],
88284-		    ps);
88285-	}
88286-}
88287-
88288-/*
88289- * Put ps into some container so that it can be found during future allocation
88290- * requests.
88291- */
88292-static void
88293-psset_alloc_container_insert(psset_t *psset, hpdata_t *ps) {
88294-	assert(!hpdata_in_psset_alloc_container_get(ps));
88295-	hpdata_in_psset_alloc_container_set(ps, true);
88296-	if (hpdata_empty(ps)) {
88297-		/*
88298-		 * This prepend, paired with popping the head in psset_fit,
88299-		 * means we implement LIFO ordering for the empty slabs set,
88300-		 * which seems reasonable.
88301-		 */
88302-		hpdata_empty_list_prepend(&psset->empty, ps);
88303-	} else if (hpdata_full(ps)) {
88304-		/*
88305-		 * We don't need to keep track of the full slabs; we're never
88306-		 * going to return them from a psset_pick_alloc call.
88307-		 */
88308-	} else {
88309-		size_t longest_free_range = hpdata_longest_free_range_get(ps);
88310-
88311-		pszind_t pind = sz_psz2ind(sz_psz_quantize_floor(
88312-		    longest_free_range << LG_PAGE));
88313-		assert(pind < PSSET_NPSIZES);
88314-
88315-		psset_hpdata_heap_insert(psset, pind, ps);
88316-	}
88317-}
88318-
88319-/* Remove ps from those collections. */
88320-static void
88321-psset_alloc_container_remove(psset_t *psset, hpdata_t *ps) {
88322-	assert(hpdata_in_psset_alloc_container_get(ps));
88323-	hpdata_in_psset_alloc_container_set(ps, false);
88324-
88325-	if (hpdata_empty(ps)) {
88326-		hpdata_empty_list_remove(&psset->empty, ps);
88327-	} else if (hpdata_full(ps)) {
88328-		/* Same as above -- do nothing in this case. */
88329-	} else {
88330-		size_t longest_free_range = hpdata_longest_free_range_get(ps);
88331-
88332-		pszind_t pind = sz_psz2ind(sz_psz_quantize_floor(
88333-		    longest_free_range << LG_PAGE));
88334-		assert(pind < PSSET_NPSIZES);
88335-
88336-		psset_hpdata_heap_remove(psset, pind, ps);
88337-	}
88338-}
88339-
88340-static size_t
88341-psset_purge_list_ind(hpdata_t *ps) {
88342-	size_t ndirty = hpdata_ndirty_get(ps);
88343-	/* Shouldn't have something with no dirty pages purgeable. */
88344-	assert(ndirty > 0);
88345-	/*
88346-	 * Higher indices correspond to lists we'd like to purge earlier; make
88347-	 * the two highest indices correspond to empty lists, which we attempt
88348-	 * to purge before purging any non-empty list.  This has two advantages:
88349-	 * - Empty page slabs are the least likely to get reused (we'll only
88350-	 *   pick them for an allocation if we have no other choice).
88351-	 * - Empty page slabs can purge every dirty page they contain in a
88352-	 *   single call, which is not usually the case.
88353-	 *
88354-	 * We purge hugeified empty slabs before nonhugeified ones, on the basis
88355-	 * that they are fully dirty, while nonhugified slabs might not be, so
88356-	 * we free up more pages more easily.
88357-	 */
88358-	if (hpdata_nactive_get(ps) == 0) {
88359-		if (hpdata_huge_get(ps)) {
88360-			return PSSET_NPURGE_LISTS - 1;
88361-		} else {
88362-			return PSSET_NPURGE_LISTS - 2;
88363-		}
88364-	}
88365-
88366-	pszind_t pind = sz_psz2ind(sz_psz_quantize_floor(ndirty << LG_PAGE));
88367-	/*
88368-	 * For non-empty slabs, we may reuse them again.  Prefer purging
88369-	 * non-hugeified slabs before hugeified ones then, among pages of
88370-	 * similar dirtiness.  We still get some benefit from the hugification.
88371-	 */
88372-	return (size_t)pind * 2 + (hpdata_huge_get(ps) ? 0 : 1);
88373-}
88374-
88375-static void
88376-psset_maybe_remove_purge_list(psset_t *psset, hpdata_t *ps) {
88377-	/*
88378-	 * Remove the hpdata from its purge list (if it's in one).  Even if it's
88379-	 * going to stay in the same one, by appending it during
88380-	 * psset_update_end, we move it to the end of its queue, so that we
88381-	 * purge LRU within a given dirtiness bucket.
88382-	 */
88383-	if (hpdata_purge_allowed_get(ps)) {
88384-		size_t ind = psset_purge_list_ind(ps);
88385-		hpdata_purge_list_t *purge_list = &psset->to_purge[ind];
88386-		hpdata_purge_list_remove(purge_list, ps);
88387-		if (hpdata_purge_list_empty(purge_list)) {
88388-			fb_unset(psset->purge_bitmap, PSSET_NPURGE_LISTS, ind);
88389-		}
88390-	}
88391-}
88392-
88393-static void
88394-psset_maybe_insert_purge_list(psset_t *psset, hpdata_t *ps) {
88395-	if (hpdata_purge_allowed_get(ps)) {
88396-		size_t ind = psset_purge_list_ind(ps);
88397-		hpdata_purge_list_t *purge_list = &psset->to_purge[ind];
88398-		if (hpdata_purge_list_empty(purge_list)) {
88399-			fb_set(psset->purge_bitmap, PSSET_NPURGE_LISTS, ind);
88400-		}
88401-		hpdata_purge_list_append(purge_list, ps);
88402-	}
88403-
88404-}
88405-
88406-void
88407-psset_update_begin(psset_t *psset, hpdata_t *ps) {
88408-	hpdata_assert_consistent(ps);
88409-	assert(hpdata_in_psset_get(ps));
88410-	hpdata_updating_set(ps, true);
88411-	psset_stats_remove(psset, ps);
88412-	if (hpdata_in_psset_alloc_container_get(ps)) {
88413-		/*
88414-		 * Some metadata updates can break alloc container invariants
88415-		 * (e.g. the longest free range determines the hpdata_heap_t the
88416-		 * pageslab lives in).
88417-		 */
88418-		assert(hpdata_alloc_allowed_get(ps));
88419-		psset_alloc_container_remove(psset, ps);
88420-	}
88421-	psset_maybe_remove_purge_list(psset, ps);
88422-	/*
88423-	 * We don't update presence in the hugify list; we try to keep it FIFO,
88424-	 * even in the presence of other metadata updates.  We'll update
88425-	 * presence at the end of the metadata update if necessary.
88426-	 */
88427-}
88428-
88429-void
88430-psset_update_end(psset_t *psset, hpdata_t *ps) {
88431-	assert(hpdata_in_psset_get(ps));
88432-	hpdata_updating_set(ps, false);
88433-	psset_stats_insert(psset, ps);
88434-
88435-	/*
88436-	 * The update begin should have removed ps from whatever alloc container
88437-	 * it was in.
88438-	 */
88439-	assert(!hpdata_in_psset_alloc_container_get(ps));
88440-	if (hpdata_alloc_allowed_get(ps)) {
88441-		psset_alloc_container_insert(psset, ps);
88442-	}
88443-	psset_maybe_insert_purge_list(psset, ps);
88444-
88445-	if (hpdata_hugify_allowed_get(ps)
88446-	    && !hpdata_in_psset_hugify_container_get(ps)) {
88447-		hpdata_in_psset_hugify_container_set(ps, true);
88448-		hpdata_hugify_list_append(&psset->to_hugify, ps);
88449-	} else if (!hpdata_hugify_allowed_get(ps)
88450-	    && hpdata_in_psset_hugify_container_get(ps)) {
88451-		hpdata_in_psset_hugify_container_set(ps, false);
88452-		hpdata_hugify_list_remove(&psset->to_hugify, ps);
88453-	}
88454-	hpdata_assert_consistent(ps);
88455-}
88456-
88457-hpdata_t *
88458-psset_pick_alloc(psset_t *psset, size_t size) {
88459-	assert((size & PAGE_MASK) == 0);
88460-	assert(size <= HUGEPAGE);
88461-
88462-	pszind_t min_pind = sz_psz2ind(sz_psz_quantize_ceil(size));
88463-	pszind_t pind = (pszind_t)fb_ffs(psset->pageslab_bitmap, PSSET_NPSIZES,
88464-	    (size_t)min_pind);
88465-	if (pind == PSSET_NPSIZES) {
88466-		return hpdata_empty_list_first(&psset->empty);
88467-	}
88468-	hpdata_t *ps = hpdata_age_heap_first(&psset->pageslabs[pind]);
88469-	if (ps == NULL) {
88470-		return NULL;
88471-	}
88472-
88473-	hpdata_assert_consistent(ps);
88474-
88475-	return ps;
88476-}
88477-
88478-hpdata_t *
88479-psset_pick_purge(psset_t *psset) {
88480-	ssize_t ind_ssz = fb_fls(psset->purge_bitmap, PSSET_NPURGE_LISTS,
88481-	    PSSET_NPURGE_LISTS - 1);
88482-	if (ind_ssz < 0) {
88483-		return NULL;
88484-	}
88485-	pszind_t ind = (pszind_t)ind_ssz;
88486-	assert(ind < PSSET_NPURGE_LISTS);
88487-	hpdata_t *ps = hpdata_purge_list_first(&psset->to_purge[ind]);
88488-	assert(ps != NULL);
88489-	return ps;
88490-}
88491-
88492-hpdata_t *
88493-psset_pick_hugify(psset_t *psset) {
88494-	return hpdata_hugify_list_first(&psset->to_hugify);
88495-}
88496-
88497-void
88498-psset_insert(psset_t *psset, hpdata_t *ps) {
88499-	hpdata_in_psset_set(ps, true);
88500-
88501-	psset_stats_insert(psset, ps);
88502-	if (hpdata_alloc_allowed_get(ps)) {
88503-		psset_alloc_container_insert(psset, ps);
88504-	}
88505-	psset_maybe_insert_purge_list(psset, ps);
88506-
88507-	if (hpdata_hugify_allowed_get(ps)) {
88508-		hpdata_in_psset_hugify_container_set(ps, true);
88509-		hpdata_hugify_list_append(&psset->to_hugify, ps);
88510-	}
88511-}
88512-
88513-void
88514-psset_remove(psset_t *psset, hpdata_t *ps) {
88515-	hpdata_in_psset_set(ps, false);
88516-
88517-	psset_stats_remove(psset, ps);
88518-	if (hpdata_in_psset_alloc_container_get(ps)) {
88519-		psset_alloc_container_remove(psset, ps);
88520-	}
88521-	psset_maybe_remove_purge_list(psset, ps);
88522-	if (hpdata_in_psset_hugify_container_get(ps)) {
88523-		hpdata_in_psset_hugify_container_set(ps, false);
88524-		hpdata_hugify_list_remove(&psset->to_hugify, ps);
88525-	}
88526-}
88527diff --git a/jemalloc/src/rtree.c b/jemalloc/src/rtree.c
88528deleted file mode 100644
88529index 6496b5a..0000000
88530--- a/jemalloc/src/rtree.c
88531+++ /dev/null
88532@@ -1,261 +0,0 @@
88533-#include "jemalloc/internal/jemalloc_preamble.h"
88534-#include "jemalloc/internal/jemalloc_internal_includes.h"
88535-
88536-#include "jemalloc/internal/assert.h"
88537-#include "jemalloc/internal/mutex.h"
88538-
88539-/*
88540- * Only the most significant bits of keys passed to rtree_{read,write}() are
88541- * used.
88542- */
88543-bool
88544-rtree_new(rtree_t *rtree, base_t *base, bool zeroed) {
88545-#ifdef JEMALLOC_JET
88546-	if (!zeroed) {
88547-		memset(rtree, 0, sizeof(rtree_t)); /* Clear root. */
88548-	}
88549-#else
88550-	assert(zeroed);
88551-#endif
88552-	rtree->base = base;
88553-
88554-	if (malloc_mutex_init(&rtree->init_lock, "rtree", WITNESS_RANK_RTREE,
88555-	    malloc_mutex_rank_exclusive)) {
88556-		return true;
88557-	}
88558-
88559-	return false;
88560-}
88561-
88562-static rtree_node_elm_t *
88563-rtree_node_alloc(tsdn_t *tsdn, rtree_t *rtree, size_t nelms) {
88564-	return (rtree_node_elm_t *)base_alloc(tsdn, rtree->base,
88565-	    nelms * sizeof(rtree_node_elm_t), CACHELINE);
88566-}
88567-
88568-static rtree_leaf_elm_t *
88569-rtree_leaf_alloc(tsdn_t *tsdn, rtree_t *rtree, size_t nelms) {
88570-	return (rtree_leaf_elm_t *)base_alloc(tsdn, rtree->base,
88571-	    nelms * sizeof(rtree_leaf_elm_t), CACHELINE);
88572-}
88573-
88574-static rtree_node_elm_t *
88575-rtree_node_init(tsdn_t *tsdn, rtree_t *rtree, unsigned level,
88576-    atomic_p_t *elmp) {
88577-	malloc_mutex_lock(tsdn, &rtree->init_lock);
88578-	/*
88579-	 * If *elmp is non-null, then it was initialized with the init lock
88580-	 * held, so we can get by with 'relaxed' here.
88581-	 */
88582-	rtree_node_elm_t *node = atomic_load_p(elmp, ATOMIC_RELAXED);
88583-	if (node == NULL) {
88584-		node = rtree_node_alloc(tsdn, rtree, ZU(1) <<
88585-		    rtree_levels[level].bits);
88586-		if (node == NULL) {
88587-			malloc_mutex_unlock(tsdn, &rtree->init_lock);
88588-			return NULL;
88589-		}
88590-		/*
88591-		 * Even though we hold the lock, a later reader might not; we
88592-		 * need release semantics.
88593-		 */
88594-		atomic_store_p(elmp, node, ATOMIC_RELEASE);
88595-	}
88596-	malloc_mutex_unlock(tsdn, &rtree->init_lock);
88597-
88598-	return node;
88599-}
88600-
88601-static rtree_leaf_elm_t *
88602-rtree_leaf_init(tsdn_t *tsdn, rtree_t *rtree, atomic_p_t *elmp) {
88603-	malloc_mutex_lock(tsdn, &rtree->init_lock);
88604-	/*
88605-	 * If *elmp is non-null, then it was initialized with the init lock
88606-	 * held, so we can get by with 'relaxed' here.
88607-	 */
88608-	rtree_leaf_elm_t *leaf = atomic_load_p(elmp, ATOMIC_RELAXED);
88609-	if (leaf == NULL) {
88610-		leaf = rtree_leaf_alloc(tsdn, rtree, ZU(1) <<
88611-		    rtree_levels[RTREE_HEIGHT-1].bits);
88612-		if (leaf == NULL) {
88613-			malloc_mutex_unlock(tsdn, &rtree->init_lock);
88614-			return NULL;
88615-		}
88616-		/*
88617-		 * Even though we hold the lock, a later reader might not; we
88618-		 * need release semantics.
88619-		 */
88620-		atomic_store_p(elmp, leaf, ATOMIC_RELEASE);
88621-	}
88622-	malloc_mutex_unlock(tsdn, &rtree->init_lock);
88623-
88624-	return leaf;
88625-}
88626-
88627-static bool
88628-rtree_node_valid(rtree_node_elm_t *node) {
88629-	return ((uintptr_t)node != (uintptr_t)0);
88630-}
88631-
88632-static bool
88633-rtree_leaf_valid(rtree_leaf_elm_t *leaf) {
88634-	return ((uintptr_t)leaf != (uintptr_t)0);
88635-}
88636-
88637-static rtree_node_elm_t *
88638-rtree_child_node_tryread(rtree_node_elm_t *elm, bool dependent) {
88639-	rtree_node_elm_t *node;
88640-
88641-	if (dependent) {
88642-		node = (rtree_node_elm_t *)atomic_load_p(&elm->child,
88643-		    ATOMIC_RELAXED);
88644-	} else {
88645-		node = (rtree_node_elm_t *)atomic_load_p(&elm->child,
88646-		    ATOMIC_ACQUIRE);
88647-	}
88648-
88649-	assert(!dependent || node != NULL);
88650-	return node;
88651-}
88652-
88653-static rtree_node_elm_t *
88654-rtree_child_node_read(tsdn_t *tsdn, rtree_t *rtree, rtree_node_elm_t *elm,
88655-    unsigned level, bool dependent) {
88656-	rtree_node_elm_t *node;
88657-
88658-	node = rtree_child_node_tryread(elm, dependent);
88659-	if (!dependent && unlikely(!rtree_node_valid(node))) {
88660-		node = rtree_node_init(tsdn, rtree, level + 1, &elm->child);
88661-	}
88662-	assert(!dependent || node != NULL);
88663-	return node;
88664-}
88665-
88666-static rtree_leaf_elm_t *
88667-rtree_child_leaf_tryread(rtree_node_elm_t *elm, bool dependent) {
88668-	rtree_leaf_elm_t *leaf;
88669-
88670-	if (dependent) {
88671-		leaf = (rtree_leaf_elm_t *)atomic_load_p(&elm->child,
88672-		    ATOMIC_RELAXED);
88673-	} else {
88674-		leaf = (rtree_leaf_elm_t *)atomic_load_p(&elm->child,
88675-		    ATOMIC_ACQUIRE);
88676-	}
88677-
88678-	assert(!dependent || leaf != NULL);
88679-	return leaf;
88680-}
88681-
88682-static rtree_leaf_elm_t *
88683-rtree_child_leaf_read(tsdn_t *tsdn, rtree_t *rtree, rtree_node_elm_t *elm,
88684-    unsigned level, bool dependent) {
88685-	rtree_leaf_elm_t *leaf;
88686-
88687-	leaf = rtree_child_leaf_tryread(elm, dependent);
88688-	if (!dependent && unlikely(!rtree_leaf_valid(leaf))) {
88689-		leaf = rtree_leaf_init(tsdn, rtree, &elm->child);
88690-	}
88691-	assert(!dependent || leaf != NULL);
88692-	return leaf;
88693-}
88694-
88695-rtree_leaf_elm_t *
88696-rtree_leaf_elm_lookup_hard(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
88697-    uintptr_t key, bool dependent, bool init_missing) {
88698-	rtree_node_elm_t *node;
88699-	rtree_leaf_elm_t *leaf;
88700-#if RTREE_HEIGHT > 1
88701-	node = rtree->root;
88702-#else
88703-	leaf = rtree->root;
88704-#endif
88705-
88706-	if (config_debug) {
88707-		uintptr_t leafkey = rtree_leafkey(key);
88708-		for (unsigned i = 0; i < RTREE_CTX_NCACHE; i++) {
88709-			assert(rtree_ctx->cache[i].leafkey != leafkey);
88710-		}
88711-		for (unsigned i = 0; i < RTREE_CTX_NCACHE_L2; i++) {
88712-			assert(rtree_ctx->l2_cache[i].leafkey != leafkey);
88713-		}
88714-	}
88715-
88716-#define RTREE_GET_CHILD(level) {					\
88717-		assert(level < RTREE_HEIGHT-1);				\
88718-		if (level != 0 && !dependent &&				\
88719-		    unlikely(!rtree_node_valid(node))) {		\
88720-			return NULL;					\
88721-		}							\
88722-		uintptr_t subkey = rtree_subkey(key, level);		\
88723-		if (level + 2 < RTREE_HEIGHT) {				\
88724-			node = init_missing ?				\
88725-			    rtree_child_node_read(tsdn, rtree,		\
88726-			    &node[subkey], level, dependent) :		\
88727-			    rtree_child_node_tryread(&node[subkey],	\
88728-			    dependent);					\
88729-		} else {						\
88730-			leaf = init_missing ?				\
88731-			    rtree_child_leaf_read(tsdn, rtree,		\
88732-			    &node[subkey], level, dependent) :		\
88733-			    rtree_child_leaf_tryread(&node[subkey],	\
88734-			    dependent);					\
88735-		}							\
88736-	}
88737-	/*
88738-	 * Cache replacement upon hard lookup (i.e. L1 & L2 rtree cache miss):
88739-	 * (1) evict last entry in L2 cache; (2) move the collision slot from L1
88740-	 * cache down to L2; and 3) fill L1.
88741-	 */
88742-#define RTREE_GET_LEAF(level) {						\
88743-		assert(level == RTREE_HEIGHT-1);			\
88744-		if (!dependent && unlikely(!rtree_leaf_valid(leaf))) {	\
88745-			return NULL;					\
88746-		}							\
88747-		if (RTREE_CTX_NCACHE_L2 > 1) {				\
88748-			memmove(&rtree_ctx->l2_cache[1],		\
88749-			    &rtree_ctx->l2_cache[0],			\
88750-			    sizeof(rtree_ctx_cache_elm_t) *		\
88751-			    (RTREE_CTX_NCACHE_L2 - 1));			\
88752-		}							\
88753-		size_t slot = rtree_cache_direct_map(key);		\
88754-		rtree_ctx->l2_cache[0].leafkey =			\
88755-		    rtree_ctx->cache[slot].leafkey;			\
88756-		rtree_ctx->l2_cache[0].leaf =				\
88757-		    rtree_ctx->cache[slot].leaf;			\
88758-		uintptr_t leafkey = rtree_leafkey(key);			\
88759-		rtree_ctx->cache[slot].leafkey = leafkey;		\
88760-		rtree_ctx->cache[slot].leaf = leaf;			\
88761-		uintptr_t subkey = rtree_subkey(key, level);		\
88762-		return &leaf[subkey];					\
88763-	}
88764-	if (RTREE_HEIGHT > 1) {
88765-		RTREE_GET_CHILD(0)
88766-	}
88767-	if (RTREE_HEIGHT > 2) {
88768-		RTREE_GET_CHILD(1)
88769-	}
88770-	if (RTREE_HEIGHT > 3) {
88771-		for (unsigned i = 2; i < RTREE_HEIGHT-1; i++) {
88772-			RTREE_GET_CHILD(i)
88773-		}
88774-	}
88775-	RTREE_GET_LEAF(RTREE_HEIGHT-1)
88776-#undef RTREE_GET_CHILD
88777-#undef RTREE_GET_LEAF
88778-	not_reached();
88779-}
88780-
88781-void
88782-rtree_ctx_data_init(rtree_ctx_t *ctx) {
88783-	for (unsigned i = 0; i < RTREE_CTX_NCACHE; i++) {
88784-		rtree_ctx_cache_elm_t *cache = &ctx->cache[i];
88785-		cache->leafkey = RTREE_LEAFKEY_INVALID;
88786-		cache->leaf = NULL;
88787-	}
88788-	for (unsigned i = 0; i < RTREE_CTX_NCACHE_L2; i++) {
88789-		rtree_ctx_cache_elm_t *cache = &ctx->l2_cache[i];
88790-		cache->leafkey = RTREE_LEAFKEY_INVALID;
88791-		cache->leaf = NULL;
88792-	}
88793-}
88794diff --git a/jemalloc/src/safety_check.c b/jemalloc/src/safety_check.c
88795deleted file mode 100644
88796index 209fdda..0000000
88797--- a/jemalloc/src/safety_check.c
88798+++ /dev/null
88799@@ -1,36 +0,0 @@
88800-#include "jemalloc/internal/jemalloc_preamble.h"
88801-#include "jemalloc/internal/jemalloc_internal_includes.h"
88802-
88803-static safety_check_abort_hook_t safety_check_abort;
88804-
88805-void safety_check_fail_sized_dealloc(bool current_dealloc, const void *ptr,
88806-    size_t true_size, size_t input_size) {
88807-	char *src = current_dealloc ? "the current pointer being freed" :
88808-	    "in thread cache, possibly from previous deallocations";
88809-
88810-	safety_check_fail("<jemalloc>: size mismatch detected (true size %zu "
88811-	    "vs input size %zu), likely caused by application sized "
88812-	    "deallocation bugs (source address: %p, %s). Suggest building with "
88813-	    "--enable-debug or address sanitizer for debugging. Abort.\n",
88814-	    true_size, input_size, ptr, src);
88815-}
88816-
88817-void safety_check_set_abort(safety_check_abort_hook_t abort_fn) {
88818-	safety_check_abort = abort_fn;
88819-}
88820-
88821-void safety_check_fail(const char *format, ...) {
88822-	char buf[MALLOC_PRINTF_BUFSIZE];
88823-
88824-	va_list ap;
88825-	va_start(ap, format);
88826-	malloc_vsnprintf(buf, MALLOC_PRINTF_BUFSIZE, format, ap);
88827-	va_end(ap);
88828-
88829-	if (safety_check_abort == NULL) {
88830-		malloc_write(buf);
88831-		abort();
88832-	} else {
88833-		safety_check_abort(buf);
88834-	}
88835-}
88836diff --git a/jemalloc/src/san.c b/jemalloc/src/san.c
88837deleted file mode 100644
88838index 6e51291..0000000
88839--- a/jemalloc/src/san.c
88840+++ /dev/null
88841@@ -1,208 +0,0 @@
88842-#include "jemalloc/internal/jemalloc_preamble.h"
88843-#include "jemalloc/internal/jemalloc_internal_includes.h"
88844-
88845-#include "jemalloc/internal/assert.h"
88846-#include "jemalloc/internal/ehooks.h"
88847-#include "jemalloc/internal/san.h"
88848-#include "jemalloc/internal/tsd.h"
88849-
88850-/* The sanitizer options. */
88851-size_t opt_san_guard_large = SAN_GUARD_LARGE_EVERY_N_EXTENTS_DEFAULT;
88852-size_t opt_san_guard_small = SAN_GUARD_SMALL_EVERY_N_EXTENTS_DEFAULT;
88853-
88854-/* Aligned (-1 is off) ptrs will be junked & stashed on dealloc. */
88855-ssize_t opt_lg_san_uaf_align = SAN_LG_UAF_ALIGN_DEFAULT;
88856-
88857-/*
88858- *  Initialized in san_init().  When disabled, the mask is set to (uintptr_t)-1
88859- *  to always fail the nonfast_align check.
88860- */
88861-uintptr_t san_cache_bin_nonfast_mask = SAN_CACHE_BIN_NONFAST_MASK_DEFAULT;
88862-
88863-static inline void
88864-san_find_guarded_addr(edata_t *edata, uintptr_t *guard1, uintptr_t *guard2,
88865-    uintptr_t *addr, size_t size, bool left, bool right) {
88866-	assert(!edata_guarded_get(edata));
88867-	assert(size % PAGE == 0);
88868-	*addr = (uintptr_t)edata_base_get(edata);
88869-	if (left) {
88870-		*guard1 = *addr;
88871-		*addr += SAN_PAGE_GUARD;
88872-	} else {
88873-		*guard1 = 0;
88874-	}
88875-
88876-	if (right) {
88877-		*guard2 = *addr + size;
88878-	} else {
88879-		*guard2 = 0;
88880-	}
88881-}
88882-
88883-static inline void
88884-san_find_unguarded_addr(edata_t *edata, uintptr_t *guard1, uintptr_t *guard2,
88885-    uintptr_t *addr, size_t size, bool left, bool right) {
88886-	assert(edata_guarded_get(edata));
88887-	assert(size % PAGE == 0);
88888-	*addr = (uintptr_t)edata_base_get(edata);
88889-	if (right) {
88890-		*guard2 = *addr + size;
88891-	} else {
88892-		*guard2 = 0;
88893-	}
88894-
88895-	if (left) {
88896-		*guard1 = *addr - SAN_PAGE_GUARD;
88897-		assert(*guard1 != 0);
88898-		*addr = *guard1;
88899-	} else {
88900-		*guard1 = 0;
88901-	}
88902-}
88903-
88904-void
88905-san_guard_pages(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata, emap_t *emap,
88906-    bool left, bool right, bool remap) {
88907-	assert(left || right);
88908-	if (remap) {
88909-		emap_deregister_boundary(tsdn, emap, edata);
88910-	}
88911-
88912-	size_t size_with_guards = edata_size_get(edata);
88913-	size_t usize = (left && right)
88914-	    ? san_two_side_unguarded_sz(size_with_guards)
88915-	    : san_one_side_unguarded_sz(size_with_guards);
88916-
88917-	uintptr_t guard1, guard2, addr;
88918-	san_find_guarded_addr(edata, &guard1, &guard2, &addr, usize, left,
88919-	    right);
88920-
88921-	assert(edata_state_get(edata) == extent_state_active);
88922-	ehooks_guard(tsdn, ehooks, (void *)guard1, (void *)guard2);
88923-
88924-	/* Update the guarded addr and usable size of the edata. */
88925-	edata_size_set(edata, usize);
88926-	edata_addr_set(edata, (void *)addr);
88927-	edata_guarded_set(edata, true);
88928-
88929-	if (remap) {
88930-		emap_register_boundary(tsdn, emap, edata, SC_NSIZES,
88931-		    /* slab */ false);
88932-	}
88933-}
88934-
88935-static void
88936-san_unguard_pages_impl(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
88937-    emap_t *emap, bool left, bool right, bool remap) {
88938-	assert(left || right);
88939-	/* Remove the inner boundary which no longer exists. */
88940-	if (remap) {
88941-		assert(edata_state_get(edata) == extent_state_active);
88942-		emap_deregister_boundary(tsdn, emap, edata);
88943-	} else {
88944-		assert(edata_state_get(edata) == extent_state_retained);
88945-	}
88946-
88947-	size_t size = edata_size_get(edata);
88948-	size_t size_with_guards = (left && right)
88949-	    ? san_two_side_guarded_sz(size)
88950-	    : san_one_side_guarded_sz(size);
88951-
88952-	uintptr_t guard1, guard2, addr;
88953-	san_find_unguarded_addr(edata, &guard1, &guard2, &addr, size, left,
88954-	    right);
88955-
88956-	ehooks_unguard(tsdn, ehooks, (void *)guard1, (void *)guard2);
88957-
88958-	/* Update the true addr and usable size of the edata. */
88959-	edata_size_set(edata, size_with_guards);
88960-	edata_addr_set(edata, (void *)addr);
88961-	edata_guarded_set(edata, false);
88962-
88963-	/*
88964-	 * Then re-register the outer boundary including the guards, if
88965-	 * requested.
88966-	 */
88967-	if (remap) {
88968-		emap_register_boundary(tsdn, emap, edata, SC_NSIZES,
88969-		    /* slab */ false);
88970-	}
88971-}
88972-
88973-void
88974-san_unguard_pages(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
88975-    emap_t *emap, bool left, bool right) {
88976-	san_unguard_pages_impl(tsdn, ehooks, edata, emap, left, right,
88977-	    /* remap */ true);
88978-}
88979-
88980-void
88981-san_unguard_pages_pre_destroy(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
88982-    emap_t *emap) {
88983-	emap_assert_not_mapped(tsdn, emap, edata);
88984-	/*
88985-	 * We don't want to touch the emap of about to be destroyed extents, as
88986-	 * they have been unmapped upon eviction from the retained ecache. Also,
88987-	 * we unguard the extents to the right, because retained extents only
88988-	 * own their right guard page per san_bump_alloc's logic.
88989-	 */
88990-	 san_unguard_pages_impl(tsdn, ehooks, edata, emap, /* left */ false,
88991-	    /* right */ true, /* remap */ false);
88992-}
88993-
88994-static bool
88995-san_stashed_corrupted(void *ptr, size_t size) {
88996-	if (san_junk_ptr_should_slow()) {
88997-		for (size_t i = 0; i < size; i++) {
88998-			if (((char *)ptr)[i] != (char)uaf_detect_junk) {
88999-				return true;
89000-			}
89001-		}
89002-		return false;
89003-	}
89004-
89005-	void *first, *mid, *last;
89006-	san_junk_ptr_locations(ptr, size, &first, &mid, &last);
89007-	if (*(uintptr_t *)first != uaf_detect_junk ||
89008-	    *(uintptr_t *)mid != uaf_detect_junk ||
89009-	    *(uintptr_t *)last != uaf_detect_junk) {
89010-		return true;
89011-	}
89012-
89013-	return false;
89014-}
89015-
89016-void
89017-san_check_stashed_ptrs(void **ptrs, size_t nstashed, size_t usize) {
89018-	/*
89019-	 * Verify that the junked-filled & stashed pointers remain unchanged, to
89020-	 * detect write-after-free.
89021-	 */
89022-	for (size_t n = 0; n < nstashed; n++) {
89023-		void *stashed = ptrs[n];
89024-		assert(stashed != NULL);
89025-		assert(cache_bin_nonfast_aligned(stashed));
89026-		if (unlikely(san_stashed_corrupted(stashed, usize))) {
89027-			safety_check_fail("<jemalloc>: Write-after-free "
89028-			    "detected on deallocated pointer %p (size %zu).\n",
89029-			    stashed, usize);
89030-		}
89031-	}
89032-}
89033-
89034-void
89035-tsd_san_init(tsd_t *tsd) {
89036-	*tsd_san_extents_until_guard_smallp_get(tsd) = opt_san_guard_small;
89037-	*tsd_san_extents_until_guard_largep_get(tsd) = opt_san_guard_large;
89038-}
89039-
89040-void
89041-san_init(ssize_t lg_san_uaf_align) {
89042-	assert(lg_san_uaf_align == -1 || lg_san_uaf_align >= LG_PAGE);
89043-	if (lg_san_uaf_align == -1) {
89044-		san_cache_bin_nonfast_mask = (uintptr_t)-1;
89045-		return;
89046-	}
89047-
89048-	san_cache_bin_nonfast_mask = ((uintptr_t)1 << lg_san_uaf_align) - 1;
89049-}
89050diff --git a/jemalloc/src/san_bump.c b/jemalloc/src/san_bump.c
89051deleted file mode 100644
89052index 8889745..0000000
89053--- a/jemalloc/src/san_bump.c
89054+++ /dev/null
89055@@ -1,104 +0,0 @@
89056-#include "jemalloc/internal/jemalloc_preamble.h"
89057-#include "jemalloc/internal/jemalloc_internal_includes.h"
89058-
89059-#include "jemalloc/internal/san_bump.h"
89060-#include "jemalloc/internal/pac.h"
89061-#include "jemalloc/internal/san.h"
89062-#include "jemalloc/internal/ehooks.h"
89063-#include "jemalloc/internal/edata_cache.h"
89064-
89065-static bool
89066-san_bump_grow_locked(tsdn_t *tsdn, san_bump_alloc_t *sba, pac_t *pac,
89067-    ehooks_t *ehooks, size_t size);
89068-
89069-edata_t *
89070-san_bump_alloc(tsdn_t *tsdn, san_bump_alloc_t* sba, pac_t *pac,
89071-    ehooks_t *ehooks, size_t size, bool zero) {
89072-	assert(san_bump_enabled());
89073-
89074-	edata_t* to_destroy;
89075-	size_t guarded_size = san_one_side_guarded_sz(size);
89076-
89077-	malloc_mutex_lock(tsdn, &sba->mtx);
89078-
89079-	if (sba->curr_reg == NULL ||
89080-	    edata_size_get(sba->curr_reg) < guarded_size) {
89081-		/*
89082-		 * If the current region can't accommodate the allocation,
89083-		 * try replacing it with a larger one and destroy current if the
89084-		 * replacement succeeds.
89085-		 */
89086-		to_destroy = sba->curr_reg;
89087-		bool err = san_bump_grow_locked(tsdn, sba, pac, ehooks,
89088-		    guarded_size);
89089-		if (err) {
89090-			goto label_err;
89091-		}
89092-	} else {
89093-		to_destroy = NULL;
89094-	}
89095-	assert(guarded_size <= edata_size_get(sba->curr_reg));
89096-	size_t trail_size = edata_size_get(sba->curr_reg) - guarded_size;
89097-
89098-	edata_t* edata;
89099-	if (trail_size != 0) {
89100-		edata_t* curr_reg_trail = extent_split_wrapper(tsdn, pac,
89101-		    ehooks, sba->curr_reg, guarded_size, trail_size,
89102-		    /* holding_core_locks */ true);
89103-		if (curr_reg_trail == NULL) {
89104-			goto label_err;
89105-		}
89106-		edata = sba->curr_reg;
89107-		sba->curr_reg = curr_reg_trail;
89108-	} else {
89109-		edata = sba->curr_reg;
89110-		sba->curr_reg = NULL;
89111-	}
89112-
89113-	malloc_mutex_unlock(tsdn, &sba->mtx);
89114-
89115-	assert(!edata_guarded_get(edata));
89116-	assert(sba->curr_reg == NULL || !edata_guarded_get(sba->curr_reg));
89117-	assert(to_destroy == NULL || !edata_guarded_get(to_destroy));
89118-
89119-	if (to_destroy != NULL) {
89120-		extent_destroy_wrapper(tsdn, pac, ehooks, to_destroy);
89121-	}
89122-
89123-	san_guard_pages(tsdn, ehooks, edata, pac->emap, /* left */ false,
89124-	    /* right */ true, /* remap */ true);
89125-
89126-	if (extent_commit_zero(tsdn, ehooks, edata, /* commit */ true, zero,
89127-	    /* growing_retained */ false)) {
89128-		extent_record(tsdn, pac, ehooks, &pac->ecache_retained,
89129-		    edata);
89130-		return NULL;
89131-	}
89132-
89133-	if (config_prof) {
89134-		extent_gdump_add(tsdn, edata);
89135-	}
89136-
89137-	return edata;
89138-label_err:
89139-	malloc_mutex_unlock(tsdn, &sba->mtx);
89140-	return NULL;
89141-}
89142-
89143-static bool
89144-san_bump_grow_locked(tsdn_t *tsdn, san_bump_alloc_t *sba, pac_t *pac,
89145-    ehooks_t *ehooks, size_t size) {
89146-	malloc_mutex_assert_owner(tsdn, &sba->mtx);
89147-
89148-	bool committed = false, zeroed = false;
89149-	size_t alloc_size = size > SBA_RETAINED_ALLOC_SIZE ? size :
89150-	    SBA_RETAINED_ALLOC_SIZE;
89151-	assert((alloc_size & PAGE_MASK) == 0);
89152-	sba->curr_reg = extent_alloc_wrapper(tsdn, pac, ehooks, NULL,
89153-	    alloc_size, PAGE, zeroed, &committed,
89154-	    /* growing_retained */ true);
89155-	if (sba->curr_reg == NULL) {
89156-		return true;
89157-	}
89158-	return false;
89159-}
89160diff --git a/jemalloc/src/sc.c b/jemalloc/src/sc.c
89161deleted file mode 100644
89162index e4a94d8..0000000
89163--- a/jemalloc/src/sc.c
89164+++ /dev/null
89165@@ -1,306 +0,0 @@
89166-#include "jemalloc/internal/jemalloc_preamble.h"
89167-
89168-#include "jemalloc/internal/assert.h"
89169-#include "jemalloc/internal/bit_util.h"
89170-#include "jemalloc/internal/bitmap.h"
89171-#include "jemalloc/internal/pages.h"
89172-#include "jemalloc/internal/sc.h"
89173-
89174-/*
89175- * This module computes the size classes used to satisfy allocations.  The logic
89176- * here was ported more or less line-by-line from a shell script, and because of
89177- * that is not the most idiomatic C.  Eventually we should fix this, but for now
89178- * at least the damage is compartmentalized to this file.
89179- */
89180-
89181-size_t
89182-reg_size_compute(int lg_base, int lg_delta, int ndelta) {
89183-	return (ZU(1) << lg_base) + (ZU(ndelta) << lg_delta);
89184-}
89185-
89186-/* Returns the number of pages in the slab. */
89187-static int
89188-slab_size(int lg_page, int lg_base, int lg_delta, int ndelta) {
89189-	size_t page = (ZU(1) << lg_page);
89190-	size_t reg_size = reg_size_compute(lg_base, lg_delta, ndelta);
89191-
89192-	size_t try_slab_size = page;
89193-	size_t try_nregs = try_slab_size / reg_size;
89194-	size_t perfect_slab_size = 0;
89195-	bool perfect = false;
89196-	/*
89197-	 * This loop continues until we find the least common multiple of the
89198-	 * page size and size class size.  Size classes are all of the form
89199-	 * base + ndelta * delta == (ndelta + base/ndelta) * delta, which is
89200-	 * (ndelta + ngroup) * delta.  The way we choose slabbing strategies
89201-	 * means that delta is at most the page size and ndelta < ngroup.  So
89202-	 * the loop executes for at most 2 * ngroup - 1 iterations, which is
89203-	 * also the bound on the number of pages in a slab chosen by default.
89204-	 * With the current default settings, this is at most 7.
89205-	 */
89206-	while (!perfect) {
89207-		perfect_slab_size = try_slab_size;
89208-		size_t perfect_nregs = try_nregs;
89209-		try_slab_size += page;
89210-		try_nregs = try_slab_size / reg_size;
89211-		if (perfect_slab_size == perfect_nregs * reg_size) {
89212-			perfect = true;
89213-		}
89214-	}
89215-	return (int)(perfect_slab_size / page);
89216-}
89217-
89218-static void
89219-size_class(
89220-    /* Output. */
89221-    sc_t *sc,
89222-    /* Configuration decisions. */
89223-    int lg_max_lookup, int lg_page, int lg_ngroup,
89224-    /* Inputs specific to the size class. */
89225-    int index, int lg_base, int lg_delta, int ndelta) {
89226-	sc->index = index;
89227-	sc->lg_base = lg_base;
89228-	sc->lg_delta = lg_delta;
89229-	sc->ndelta = ndelta;
89230-	size_t size = reg_size_compute(lg_base, lg_delta, ndelta);
89231-	sc->psz = (size % (ZU(1) << lg_page) == 0);
89232-	if (index == 0) {
89233-		assert(!sc->psz);
89234-	}
89235-	if (size < (ZU(1) << (lg_page + lg_ngroup))) {
89236-		sc->bin = true;
89237-		sc->pgs = slab_size(lg_page, lg_base, lg_delta, ndelta);
89238-	} else {
89239-		sc->bin = false;
89240-		sc->pgs = 0;
89241-	}
89242-	if (size <= (ZU(1) << lg_max_lookup)) {
89243-		sc->lg_delta_lookup = lg_delta;
89244-	} else {
89245-		sc->lg_delta_lookup = 0;
89246-	}
89247-}
89248-
89249-static void
89250-size_classes(
89251-    /* Output. */
89252-    sc_data_t *sc_data,
89253-    /* Determined by the system. */
89254-    size_t lg_ptr_size, int lg_quantum,
89255-    /* Configuration decisions. */
89256-    int lg_tiny_min, int lg_max_lookup, int lg_page, int lg_ngroup) {
89257-	int ptr_bits = (1 << lg_ptr_size) * 8;
89258-	int ngroup = (1 << lg_ngroup);
89259-	int ntiny = 0;
89260-	int nlbins = 0;
89261-	int lg_tiny_maxclass = (unsigned)-1;
89262-	int nbins = 0;
89263-	int npsizes = 0;
89264-
89265-	int index = 0;
89266-
89267-	int ndelta = 0;
89268-	int lg_base = lg_tiny_min;
89269-	int lg_delta = lg_base;
89270-
89271-	/* Outputs that we update as we go. */
89272-	size_t lookup_maxclass = 0;
89273-	size_t small_maxclass = 0;
89274-	int lg_large_minclass = 0;
89275-	size_t large_maxclass = 0;
89276-
89277-	/* Tiny size classes. */
89278-	while (lg_base < lg_quantum) {
89279-		sc_t *sc = &sc_data->sc[index];
89280-		size_class(sc, lg_max_lookup, lg_page, lg_ngroup, index,
89281-		    lg_base, lg_delta, ndelta);
89282-		if (sc->lg_delta_lookup != 0) {
89283-			nlbins = index + 1;
89284-		}
89285-		if (sc->psz) {
89286-			npsizes++;
89287-		}
89288-		if (sc->bin) {
89289-			nbins++;
89290-		}
89291-		ntiny++;
89292-		/* Final written value is correct. */
89293-		lg_tiny_maxclass = lg_base;
89294-		index++;
89295-		lg_delta = lg_base;
89296-		lg_base++;
89297-	}
89298-
89299-	/* First non-tiny (pseudo) group. */
89300-	if (ntiny != 0) {
89301-		sc_t *sc = &sc_data->sc[index];
89302-		/*
89303-		 * See the note in sc.h; the first non-tiny size class has an
89304-		 * unusual encoding.
89305-		 */
89306-		lg_base--;
89307-		ndelta = 1;
89308-		size_class(sc, lg_max_lookup, lg_page, lg_ngroup, index,
89309-		    lg_base, lg_delta, ndelta);
89310-		index++;
89311-		lg_base++;
89312-		lg_delta++;
89313-		if (sc->psz) {
89314-			npsizes++;
89315-		}
89316-		if (sc->bin) {
89317-			nbins++;
89318-		}
89319-	}
89320-	while (ndelta < ngroup) {
89321-		sc_t *sc = &sc_data->sc[index];
89322-		size_class(sc, lg_max_lookup, lg_page, lg_ngroup, index,
89323-		    lg_base, lg_delta, ndelta);
89324-		index++;
89325-		ndelta++;
89326-		if (sc->psz) {
89327-			npsizes++;
89328-		}
89329-		if (sc->bin) {
89330-			nbins++;
89331-		}
89332-	}
89333-
89334-	/* All remaining groups. */
89335-	lg_base = lg_base + lg_ngroup;
89336-	while (lg_base < ptr_bits - 1) {
89337-		ndelta = 1;
89338-		int ndelta_limit;
89339-		if (lg_base == ptr_bits - 2) {
89340-			ndelta_limit = ngroup - 1;
89341-		} else {
89342-			ndelta_limit = ngroup;
89343-		}
89344-		while (ndelta <= ndelta_limit) {
89345-			sc_t *sc = &sc_data->sc[index];
89346-			size_class(sc, lg_max_lookup, lg_page, lg_ngroup, index,
89347-			    lg_base, lg_delta, ndelta);
89348-			if (sc->lg_delta_lookup != 0) {
89349-				nlbins = index + 1;
89350-				/* Final written value is correct. */
89351-				lookup_maxclass = (ZU(1) << lg_base)
89352-				    + (ZU(ndelta) << lg_delta);
89353-			}
89354-			if (sc->psz) {
89355-				npsizes++;
89356-			}
89357-			if (sc->bin) {
89358-				nbins++;
89359-				/* Final written value is correct. */
89360-				small_maxclass = (ZU(1) << lg_base)
89361-				    + (ZU(ndelta) << lg_delta);
89362-				if (lg_ngroup > 0) {
89363-					lg_large_minclass = lg_base + 1;
89364-				} else {
89365-					lg_large_minclass = lg_base + 2;
89366-				}
89367-			}
89368-			large_maxclass = (ZU(1) << lg_base)
89369-			    + (ZU(ndelta) << lg_delta);
89370-			index++;
89371-			ndelta++;
89372-		}
89373-		lg_base++;
89374-		lg_delta++;
89375-	}
89376-	/* Additional outputs. */
89377-	int nsizes = index;
89378-	unsigned lg_ceil_nsizes = lg_ceil(nsizes);
89379-
89380-	/* Fill in the output data. */
89381-	sc_data->ntiny = ntiny;
89382-	sc_data->nlbins = nlbins;
89383-	sc_data->nbins = nbins;
89384-	sc_data->nsizes = nsizes;
89385-	sc_data->lg_ceil_nsizes = lg_ceil_nsizes;
89386-	sc_data->npsizes = npsizes;
89387-	sc_data->lg_tiny_maxclass = lg_tiny_maxclass;
89388-	sc_data->lookup_maxclass = lookup_maxclass;
89389-	sc_data->small_maxclass = small_maxclass;
89390-	sc_data->lg_large_minclass = lg_large_minclass;
89391-	sc_data->large_minclass = (ZU(1) << lg_large_minclass);
89392-	sc_data->large_maxclass = large_maxclass;
89393-
89394-	/*
89395-	 * We compute these values in two ways:
89396-	 *   - Incrementally, as above.
89397-	 *   - In macros, in sc.h.
89398-	 * The computation is easier when done incrementally, but putting it in
89399-	 * a constant makes it available to the fast paths without having to
89400-	 * touch the extra global cacheline.  We assert, however, that the two
89401-	 * computations are equivalent.
89402-	 */
89403-	assert(sc_data->npsizes == SC_NPSIZES);
89404-	assert(sc_data->lg_tiny_maxclass == SC_LG_TINY_MAXCLASS);
89405-	assert(sc_data->small_maxclass == SC_SMALL_MAXCLASS);
89406-	assert(sc_data->large_minclass == SC_LARGE_MINCLASS);
89407-	assert(sc_data->lg_large_minclass == SC_LG_LARGE_MINCLASS);
89408-	assert(sc_data->large_maxclass == SC_LARGE_MAXCLASS);
89409-
89410-	/*
89411-	 * In the allocation fastpath, we want to assume that we can
89412-	 * unconditionally subtract the requested allocation size from
89413-	 * a ssize_t, and detect passing through 0 correctly.  This
89414-	 * results in optimal generated code.  For this to work, the
89415-	 * maximum allocation size must be less than SSIZE_MAX.
89416-	 */
89417-	assert(SC_LARGE_MAXCLASS < SSIZE_MAX);
89418-}
89419-
89420-void
89421-sc_data_init(sc_data_t *sc_data) {
89422-	size_classes(sc_data, LG_SIZEOF_PTR, LG_QUANTUM, SC_LG_TINY_MIN,
89423-	    SC_LG_MAX_LOOKUP, LG_PAGE, SC_LG_NGROUP);
89424-
89425-	sc_data->initialized = true;
89426-}
89427-
89428-static void
89429-sc_data_update_sc_slab_size(sc_t *sc, size_t reg_size, size_t pgs_guess) {
89430-	size_t min_pgs = reg_size / PAGE;
89431-	if (reg_size % PAGE != 0) {
89432-		min_pgs++;
89433-	}
89434-	/*
89435-	 * BITMAP_MAXBITS is actually determined by putting the smallest
89436-	 * possible size-class on one page, so this can never be 0.
89437-	 */
89438-	size_t max_pgs = BITMAP_MAXBITS * reg_size / PAGE;
89439-
89440-	assert(min_pgs <= max_pgs);
89441-	assert(min_pgs > 0);
89442-	assert(max_pgs >= 1);
89443-	if (pgs_guess < min_pgs) {
89444-		sc->pgs = (int)min_pgs;
89445-	} else if (pgs_guess > max_pgs) {
89446-		sc->pgs = (int)max_pgs;
89447-	} else {
89448-		sc->pgs = (int)pgs_guess;
89449-	}
89450-}
89451-
89452-void
89453-sc_data_update_slab_size(sc_data_t *data, size_t begin, size_t end, int pgs) {
89454-	assert(data->initialized);
89455-	for (int i = 0; i < data->nsizes; i++) {
89456-		sc_t *sc = &data->sc[i];
89457-		if (!sc->bin) {
89458-			break;
89459-		}
89460-		size_t reg_size = reg_size_compute(sc->lg_base, sc->lg_delta,
89461-		    sc->ndelta);
89462-		if (begin <= reg_size && reg_size <= end) {
89463-			sc_data_update_sc_slab_size(sc, reg_size, pgs);
89464-		}
89465-	}
89466-}
89467-
89468-void
89469-sc_boot(sc_data_t *data) {
89470-	sc_data_init(data);
89471-}
89472diff --git a/jemalloc/src/sec.c b/jemalloc/src/sec.c
89473deleted file mode 100644
89474index df67559..0000000
89475--- a/jemalloc/src/sec.c
89476+++ /dev/null
89477@@ -1,422 +0,0 @@
89478-#include "jemalloc/internal/jemalloc_preamble.h"
89479-#include "jemalloc/internal/jemalloc_internal_includes.h"
89480-
89481-#include "jemalloc/internal/sec.h"
89482-
89483-static edata_t *sec_alloc(tsdn_t *tsdn, pai_t *self, size_t size,
89484-    size_t alignment, bool zero, bool guarded, bool frequent_reuse,
89485-    bool *deferred_work_generated);
89486-static bool sec_expand(tsdn_t *tsdn, pai_t *self, edata_t *edata,
89487-    size_t old_size, size_t new_size, bool zero, bool *deferred_work_generated);
89488-static bool sec_shrink(tsdn_t *tsdn, pai_t *self, edata_t *edata,
89489-    size_t old_size, size_t new_size, bool *deferred_work_generated);
89490-static void sec_dalloc(tsdn_t *tsdn, pai_t *self, edata_t *edata,
89491-    bool *deferred_work_generated);
89492-
89493-static void
89494-sec_bin_init(sec_bin_t *bin) {
89495-	bin->being_batch_filled = false;
89496-	bin->bytes_cur = 0;
89497-	edata_list_active_init(&bin->freelist);
89498-}
89499-
89500-bool
89501-sec_init(tsdn_t *tsdn, sec_t *sec, base_t *base, pai_t *fallback,
89502-    const sec_opts_t *opts) {
89503-	assert(opts->max_alloc >= PAGE);
89504-
89505-	size_t max_alloc = PAGE_FLOOR(opts->max_alloc);
89506-	pszind_t npsizes = sz_psz2ind(max_alloc) + 1;
89507-
89508-	size_t sz_shards = opts->nshards * sizeof(sec_shard_t);
89509-	size_t sz_bins = opts->nshards * (size_t)npsizes * sizeof(sec_bin_t);
89510-	size_t sz_alloc = sz_shards + sz_bins;
89511-	void *dynalloc = base_alloc(tsdn, base, sz_alloc, CACHELINE);
89512-	if (dynalloc == NULL) {
89513-		return true;
89514-	}
89515-	sec_shard_t *shard_cur = (sec_shard_t *)dynalloc;
89516-	sec->shards = shard_cur;
89517-	sec_bin_t *bin_cur = (sec_bin_t *)&shard_cur[opts->nshards];
89518-	/* Just for asserts, below. */
89519-	sec_bin_t *bin_start = bin_cur;
89520-
89521-	for (size_t i = 0; i < opts->nshards; i++) {
89522-		sec_shard_t *shard = shard_cur;
89523-		shard_cur++;
89524-		bool err = malloc_mutex_init(&shard->mtx, "sec_shard",
89525-		    WITNESS_RANK_SEC_SHARD, malloc_mutex_rank_exclusive);
89526-		if (err) {
89527-			return true;
89528-		}
89529-		shard->enabled = true;
89530-		shard->bins = bin_cur;
89531-		for (pszind_t j = 0; j < npsizes; j++) {
89532-			sec_bin_init(&shard->bins[j]);
89533-			bin_cur++;
89534-		}
89535-		shard->bytes_cur = 0;
89536-		shard->to_flush_next = 0;
89537-	}
89538-	/*
89539-	 * Should have exactly matched the bin_start to the first unused byte
89540-	 * after the shards.
89541-	 */
89542-	assert((void *)shard_cur == (void *)bin_start);
89543-	/* And the last bin to use up the last bytes of the allocation. */
89544-	assert((char *)bin_cur == ((char *)dynalloc + sz_alloc));
89545-	sec->fallback = fallback;
89546-
89547-
89548-	sec->opts = *opts;
89549-	sec->npsizes = npsizes;
89550-
89551-	/*
89552-	 * Initialize these last so that an improper use of an SEC whose
89553-	 * initialization failed will segfault in an easy-to-spot way.
89554-	 */
89555-	sec->pai.alloc = &sec_alloc;
89556-	sec->pai.alloc_batch = &pai_alloc_batch_default;
89557-	sec->pai.expand = &sec_expand;
89558-	sec->pai.shrink = &sec_shrink;
89559-	sec->pai.dalloc = &sec_dalloc;
89560-	sec->pai.dalloc_batch = &pai_dalloc_batch_default;
89561-
89562-	return false;
89563-}
89564-
89565-static sec_shard_t *
89566-sec_shard_pick(tsdn_t *tsdn, sec_t *sec) {
89567-	/*
89568-	 * Eventually, we should implement affinity, tracking source shard using
89569-	 * the edata_t's newly freed up fields.  For now, just randomly
89570-	 * distribute across all shards.
89571-	 */
89572-	if (tsdn_null(tsdn)) {
89573-		return &sec->shards[0];
89574-	}
89575-	tsd_t *tsd = tsdn_tsd(tsdn);
89576-	uint8_t *idxp = tsd_sec_shardp_get(tsd);
89577-	if (*idxp == (uint8_t)-1) {
89578-		/*
89579-		 * First use; initialize using the trick from Daniel Lemire's
89580-		 * "A fast alternative to the modulo reduction.  Use a 64 bit
89581-		 * number to store 32 bits, since we'll deliberately overflow
89582-		 * when we multiply by the number of shards.
89583-		 */
89584-		uint64_t rand32 = prng_lg_range_u64(tsd_prng_statep_get(tsd), 32);
89585-		uint32_t idx =
89586-		    (uint32_t)((rand32 * (uint64_t)sec->opts.nshards) >> 32);
89587-		assert(idx < (uint32_t)sec->opts.nshards);
89588-		*idxp = (uint8_t)idx;
89589-	}
89590-	return &sec->shards[*idxp];
89591-}
89592-
89593-/*
89594- * Perhaps surprisingly, this can be called on the alloc pathways; if we hit an
89595- * empty cache, we'll try to fill it, which can push the shard over it's limit.
89596- */
89597-static void
89598-sec_flush_some_and_unlock(tsdn_t *tsdn, sec_t *sec, sec_shard_t *shard) {
89599-	malloc_mutex_assert_owner(tsdn, &shard->mtx);
89600-	edata_list_active_t to_flush;
89601-	edata_list_active_init(&to_flush);
89602-	while (shard->bytes_cur > sec->opts.bytes_after_flush) {
89603-		/* Pick a victim. */
89604-		sec_bin_t *bin = &shard->bins[shard->to_flush_next];
89605-
89606-		/* Update our victim-picking state. */
89607-		shard->to_flush_next++;
89608-		if (shard->to_flush_next == sec->npsizes) {
89609-			shard->to_flush_next = 0;
89610-		}
89611-
89612-		assert(shard->bytes_cur >= bin->bytes_cur);
89613-		if (bin->bytes_cur != 0) {
89614-			shard->bytes_cur -= bin->bytes_cur;
89615-			bin->bytes_cur = 0;
89616-			edata_list_active_concat(&to_flush, &bin->freelist);
89617-		}
89618-		/*
89619-		 * Either bin->bytes_cur was 0, in which case we didn't touch
89620-		 * the bin list but it should be empty anyways (or else we
89621-		 * missed a bytes_cur update on a list modification), or it
89622-		 * *was* 0 and we emptied it ourselves.  Either way, it should
89623-		 * be empty now.
89624-		 */
89625-		assert(edata_list_active_empty(&bin->freelist));
89626-	}
89627-
89628-	malloc_mutex_unlock(tsdn, &shard->mtx);
89629-	bool deferred_work_generated = false;
89630-	pai_dalloc_batch(tsdn, sec->fallback, &to_flush,
89631-	    &deferred_work_generated);
89632-}
89633-
89634-static edata_t *
89635-sec_shard_alloc_locked(tsdn_t *tsdn, sec_t *sec, sec_shard_t *shard,
89636-    sec_bin_t *bin) {
89637-	malloc_mutex_assert_owner(tsdn, &shard->mtx);
89638-	if (!shard->enabled) {
89639-		return NULL;
89640-	}
89641-	edata_t *edata = edata_list_active_first(&bin->freelist);
89642-	if (edata != NULL) {
89643-		edata_list_active_remove(&bin->freelist, edata);
89644-		assert(edata_size_get(edata) <= bin->bytes_cur);
89645-		bin->bytes_cur -= edata_size_get(edata);
89646-		assert(edata_size_get(edata) <= shard->bytes_cur);
89647-		shard->bytes_cur -= edata_size_get(edata);
89648-	}
89649-	return edata;
89650-}
89651-
89652-static edata_t *
89653-sec_batch_fill_and_alloc(tsdn_t *tsdn, sec_t *sec, sec_shard_t *shard,
89654-    sec_bin_t *bin, size_t size) {
89655-	malloc_mutex_assert_not_owner(tsdn, &shard->mtx);
89656-
89657-	edata_list_active_t result;
89658-	edata_list_active_init(&result);
89659-	bool deferred_work_generated = false;
89660-	size_t nalloc = pai_alloc_batch(tsdn, sec->fallback, size,
89661-	    1 + sec->opts.batch_fill_extra, &result, &deferred_work_generated);
89662-
89663-	edata_t *ret = edata_list_active_first(&result);
89664-	if (ret != NULL) {
89665-		edata_list_active_remove(&result, ret);
89666-	}
89667-
89668-	malloc_mutex_lock(tsdn, &shard->mtx);
89669-	bin->being_batch_filled = false;
89670-	/*
89671-	 * Handle the easy case first: nothing to cache.  Note that this can
89672-	 * only happen in case of OOM, since sec_alloc checks the expected
89673-	 * number of allocs, and doesn't bother going down the batch_fill
89674-	 * pathway if there won't be anything left to cache.  So to be in this
89675-	 * code path, we must have asked for > 1 alloc, but only gotten 1 back.
89676-	 */
89677-	if (nalloc <= 1) {
89678-		malloc_mutex_unlock(tsdn, &shard->mtx);
89679-		return ret;
89680-	}
89681-
89682-	size_t new_cached_bytes = (nalloc - 1) * size;
89683-
89684-	edata_list_active_concat(&bin->freelist, &result);
89685-	bin->bytes_cur += new_cached_bytes;
89686-	shard->bytes_cur += new_cached_bytes;
89687-
89688-	if (shard->bytes_cur > sec->opts.max_bytes) {
89689-		sec_flush_some_and_unlock(tsdn, sec, shard);
89690-	} else {
89691-		malloc_mutex_unlock(tsdn, &shard->mtx);
89692-	}
89693-
89694-	return ret;
89695-}
89696-
89697-static edata_t *
89698-sec_alloc(tsdn_t *tsdn, pai_t *self, size_t size, size_t alignment, bool zero,
89699-    bool guarded, bool frequent_reuse, bool *deferred_work_generated) {
89700-	assert((size & PAGE_MASK) == 0);
89701-	assert(!guarded);
89702-
89703-	sec_t *sec = (sec_t *)self;
89704-
89705-	if (zero || alignment > PAGE || sec->opts.nshards == 0
89706-	    || size > sec->opts.max_alloc) {
89707-		return pai_alloc(tsdn, sec->fallback, size, alignment, zero,
89708-		    /* guarded */ false, frequent_reuse,
89709-		    deferred_work_generated);
89710-	}
89711-	pszind_t pszind = sz_psz2ind(size);
89712-	assert(pszind < sec->npsizes);
89713-
89714-	sec_shard_t *shard = sec_shard_pick(tsdn, sec);
89715-	sec_bin_t *bin = &shard->bins[pszind];
89716-	bool do_batch_fill = false;
89717-
89718-	malloc_mutex_lock(tsdn, &shard->mtx);
89719-	edata_t *edata = sec_shard_alloc_locked(tsdn, sec, shard, bin);
89720-	if (edata == NULL) {
89721-		if (!bin->being_batch_filled
89722-		    && sec->opts.batch_fill_extra > 0) {
89723-			bin->being_batch_filled = true;
89724-			do_batch_fill = true;
89725-		}
89726-	}
89727-	malloc_mutex_unlock(tsdn, &shard->mtx);
89728-	if (edata == NULL) {
89729-		if (do_batch_fill) {
89730-			edata = sec_batch_fill_and_alloc(tsdn, sec, shard, bin,
89731-			    size);
89732-		} else {
89733-			edata = pai_alloc(tsdn, sec->fallback, size, alignment,
89734-			    zero, /* guarded */ false, frequent_reuse,
89735-			    deferred_work_generated);
89736-		}
89737-	}
89738-	return edata;
89739-}
89740-
89741-static bool
89742-sec_expand(tsdn_t *tsdn, pai_t *self, edata_t *edata, size_t old_size,
89743-    size_t new_size, bool zero, bool *deferred_work_generated) {
89744-	sec_t *sec = (sec_t *)self;
89745-	return pai_expand(tsdn, sec->fallback, edata, old_size, new_size, zero,
89746-	    deferred_work_generated);
89747-}
89748-
89749-static bool
89750-sec_shrink(tsdn_t *tsdn, pai_t *self, edata_t *edata, size_t old_size,
89751-    size_t new_size, bool *deferred_work_generated) {
89752-	sec_t *sec = (sec_t *)self;
89753-	return pai_shrink(tsdn, sec->fallback, edata, old_size, new_size,
89754-	    deferred_work_generated);
89755-}
89756-
89757-static void
89758-sec_flush_all_locked(tsdn_t *tsdn, sec_t *sec, sec_shard_t *shard) {
89759-	malloc_mutex_assert_owner(tsdn, &shard->mtx);
89760-	shard->bytes_cur = 0;
89761-	edata_list_active_t to_flush;
89762-	edata_list_active_init(&to_flush);
89763-	for (pszind_t i = 0; i < sec->npsizes; i++) {
89764-		sec_bin_t *bin = &shard->bins[i];
89765-		bin->bytes_cur = 0;
89766-		edata_list_active_concat(&to_flush, &bin->freelist);
89767-	}
89768-
89769-	/*
89770-	 * Ordinarily we would try to avoid doing the batch deallocation while
89771-	 * holding the shard mutex, but the flush_all pathways only happen when
89772-	 * we're disabling the HPA or resetting the arena, both of which are
89773-	 * rare pathways.
89774-	 */
89775-	bool deferred_work_generated = false;
89776-	pai_dalloc_batch(tsdn, sec->fallback, &to_flush,
89777-	    &deferred_work_generated);
89778-}
89779-
89780-static void
89781-sec_shard_dalloc_and_unlock(tsdn_t *tsdn, sec_t *sec, sec_shard_t *shard,
89782-    edata_t *edata) {
89783-	malloc_mutex_assert_owner(tsdn, &shard->mtx);
89784-	assert(shard->bytes_cur <= sec->opts.max_bytes);
89785-	size_t size = edata_size_get(edata);
89786-	pszind_t pszind = sz_psz2ind(size);
89787-	assert(pszind < sec->npsizes);
89788-	/*
89789-	 * Prepending here results in LIFO allocation per bin, which seems
89790-	 * reasonable.
89791-	 */
89792-	sec_bin_t *bin = &shard->bins[pszind];
89793-	edata_list_active_prepend(&bin->freelist, edata);
89794-	bin->bytes_cur += size;
89795-	shard->bytes_cur += size;
89796-	if (shard->bytes_cur > sec->opts.max_bytes) {
89797-		/*
89798-		 * We've exceeded the shard limit.  We make two nods in the
89799-		 * direction of fragmentation avoidance: we flush everything in
89800-		 * the shard, rather than one particular bin, and we hold the
89801-		 * lock while flushing (in case one of the extents we flush is
89802-		 * highly preferred from a fragmentation-avoidance perspective
89803-		 * in the backing allocator).  This has the extra advantage of
89804-		 * not requiring advanced cache balancing strategies.
89805-		 */
89806-		sec_flush_some_and_unlock(tsdn, sec, shard);
89807-		malloc_mutex_assert_not_owner(tsdn, &shard->mtx);
89808-	} else {
89809-		malloc_mutex_unlock(tsdn, &shard->mtx);
89810-	}
89811-}
89812-
89813-static void
89814-sec_dalloc(tsdn_t *tsdn, pai_t *self, edata_t *edata,
89815-    bool *deferred_work_generated) {
89816-	sec_t *sec = (sec_t *)self;
89817-	if (sec->opts.nshards == 0
89818-	    || edata_size_get(edata) > sec->opts.max_alloc) {
89819-		pai_dalloc(tsdn, sec->fallback, edata,
89820-		    deferred_work_generated);
89821-		return;
89822-	}
89823-	sec_shard_t *shard = sec_shard_pick(tsdn, sec);
89824-	malloc_mutex_lock(tsdn, &shard->mtx);
89825-	if (shard->enabled) {
89826-		sec_shard_dalloc_and_unlock(tsdn, sec, shard, edata);
89827-	} else {
89828-		malloc_mutex_unlock(tsdn, &shard->mtx);
89829-		pai_dalloc(tsdn, sec->fallback, edata,
89830-		    deferred_work_generated);
89831-	}
89832-}
89833-
89834-void
89835-sec_flush(tsdn_t *tsdn, sec_t *sec) {
89836-	for (size_t i = 0; i < sec->opts.nshards; i++) {
89837-		malloc_mutex_lock(tsdn, &sec->shards[i].mtx);
89838-		sec_flush_all_locked(tsdn, sec, &sec->shards[i]);
89839-		malloc_mutex_unlock(tsdn, &sec->shards[i].mtx);
89840-	}
89841-}
89842-
89843-void
89844-sec_disable(tsdn_t *tsdn, sec_t *sec) {
89845-	for (size_t i = 0; i < sec->opts.nshards; i++) {
89846-		malloc_mutex_lock(tsdn, &sec->shards[i].mtx);
89847-		sec->shards[i].enabled = false;
89848-		sec_flush_all_locked(tsdn, sec, &sec->shards[i]);
89849-		malloc_mutex_unlock(tsdn, &sec->shards[i].mtx);
89850-	}
89851-}
89852-
89853-void
89854-sec_stats_merge(tsdn_t *tsdn, sec_t *sec, sec_stats_t *stats) {
89855-	size_t sum = 0;
89856-	for (size_t i = 0; i < sec->opts.nshards; i++) {
89857-		/*
89858-		 * We could save these lock acquisitions by making bytes_cur
89859-		 * atomic, but stats collection is rare anyways and we expect
89860-		 * the number and type of stats to get more interesting.
89861-		 */
89862-		malloc_mutex_lock(tsdn, &sec->shards[i].mtx);
89863-		sum += sec->shards[i].bytes_cur;
89864-		malloc_mutex_unlock(tsdn, &sec->shards[i].mtx);
89865-	}
89866-	stats->bytes += sum;
89867-}
89868-
89869-void
89870-sec_mutex_stats_read(tsdn_t *tsdn, sec_t *sec,
89871-    mutex_prof_data_t *mutex_prof_data) {
89872-	for (size_t i = 0; i < sec->opts.nshards; i++) {
89873-		malloc_mutex_lock(tsdn, &sec->shards[i].mtx);
89874-		malloc_mutex_prof_accum(tsdn, mutex_prof_data,
89875-		    &sec->shards[i].mtx);
89876-		malloc_mutex_unlock(tsdn, &sec->shards[i].mtx);
89877-	}
89878-}
89879-
89880-void
89881-sec_prefork2(tsdn_t *tsdn, sec_t *sec) {
89882-	for (size_t i = 0; i < sec->opts.nshards; i++) {
89883-		malloc_mutex_prefork(tsdn, &sec->shards[i].mtx);
89884-	}
89885-}
89886-
89887-void
89888-sec_postfork_parent(tsdn_t *tsdn, sec_t *sec) {
89889-	for (size_t i = 0; i < sec->opts.nshards; i++) {
89890-		malloc_mutex_postfork_parent(tsdn, &sec->shards[i].mtx);
89891-	}
89892-}
89893-
89894-void
89895-sec_postfork_child(tsdn_t *tsdn, sec_t *sec) {
89896-	for (size_t i = 0; i < sec->opts.nshards; i++) {
89897-		malloc_mutex_postfork_child(tsdn, &sec->shards[i].mtx);
89898-	}
89899-}
89900diff --git a/jemalloc/src/stats.c b/jemalloc/src/stats.c
89901deleted file mode 100644
89902index efc70fd..0000000
89903--- a/jemalloc/src/stats.c
89904+++ /dev/null
89905@@ -1,1973 +0,0 @@
89906-#include "jemalloc/internal/jemalloc_preamble.h"
89907-#include "jemalloc/internal/jemalloc_internal_includes.h"
89908-
89909-#include "jemalloc/internal/assert.h"
89910-#include "jemalloc/internal/ctl.h"
89911-#include "jemalloc/internal/emitter.h"
89912-#include "jemalloc/internal/fxp.h"
89913-#include "jemalloc/internal/mutex.h"
89914-#include "jemalloc/internal/mutex_prof.h"
89915-#include "jemalloc/internal/prof_stats.h"
89916-
89917-const char *global_mutex_names[mutex_prof_num_global_mutexes] = {
89918-#define OP(mtx) #mtx,
89919-	MUTEX_PROF_GLOBAL_MUTEXES
89920-#undef OP
89921-};
89922-
89923-const char *arena_mutex_names[mutex_prof_num_arena_mutexes] = {
89924-#define OP(mtx) #mtx,
89925-	MUTEX_PROF_ARENA_MUTEXES
89926-#undef OP
89927-};
89928-
89929-#define CTL_GET(n, v, t) do {						\
89930-	size_t sz = sizeof(t);						\
89931-	xmallctl(n, (void *)v, &sz, NULL, 0);				\
89932-} while (0)
89933-
89934-#define CTL_LEAF_PREPARE(mib, miblen, name) do {			\
89935-	assert(miblen < CTL_MAX_DEPTH);					\
89936-	size_t miblen_new = CTL_MAX_DEPTH;				\
89937-	xmallctlmibnametomib(mib, miblen, name, &miblen_new);		\
89938-	assert(miblen_new > miblen);					\
89939-} while (0)
89940-
89941-#define CTL_LEAF(mib, miblen, leaf, v, t) do {			\
89942-	assert(miblen < CTL_MAX_DEPTH);					\
89943-	size_t miblen_new = CTL_MAX_DEPTH;				\
89944-	size_t sz = sizeof(t);						\
89945-	xmallctlbymibname(mib, miblen, leaf, &miblen_new, (void *)v,	\
89946-	    &sz, NULL, 0);						\
89947-	assert(miblen_new == miblen + 1);				\
89948-} while (0)
89949-
89950-#define CTL_M2_GET(n, i, v, t) do {					\
89951-	size_t mib[CTL_MAX_DEPTH];					\
89952-	size_t miblen = sizeof(mib) / sizeof(size_t);			\
89953-	size_t sz = sizeof(t);						\
89954-	xmallctlnametomib(n, mib, &miblen);				\
89955-	mib[2] = (i);							\
89956-	xmallctlbymib(mib, miblen, (void *)v, &sz, NULL, 0);		\
89957-} while (0)
89958-
89959-/******************************************************************************/
89960-/* Data. */
89961-
89962-bool opt_stats_print = false;
89963-char opt_stats_print_opts[stats_print_tot_num_options+1] = "";
89964-
89965-int64_t opt_stats_interval = STATS_INTERVAL_DEFAULT;
89966-char opt_stats_interval_opts[stats_print_tot_num_options+1] = "";
89967-
89968-static counter_accum_t stats_interval_accumulated;
89969-/* Per thread batch accum size for stats_interval. */
89970-static uint64_t stats_interval_accum_batch;
89971-
89972-/******************************************************************************/
89973-
89974-static uint64_t
89975-rate_per_second(uint64_t value, uint64_t uptime_ns) {
89976-	uint64_t billion = 1000000000;
89977-	if (uptime_ns == 0 || value == 0) {
89978-		return 0;
89979-	}
89980-	if (uptime_ns < billion) {
89981-		return value;
89982-	} else {
89983-		uint64_t uptime_s = uptime_ns / billion;
89984-		return value / uptime_s;
89985-	}
89986-}
89987-
89988-/* Calculate x.yyy and output a string (takes a fixed sized char array). */
89989-static bool
89990-get_rate_str(uint64_t dividend, uint64_t divisor, char str[6]) {
89991-	if (divisor == 0 || dividend > divisor) {
89992-		/* The rate is not supposed to be greater than 1. */
89993-		return true;
89994-	}
89995-	if (dividend > 0) {
89996-		assert(UINT64_MAX / dividend >= 1000);
89997-	}
89998-
89999-	unsigned n = (unsigned)((dividend * 1000) / divisor);
90000-	if (n < 10) {
90001-		malloc_snprintf(str, 6, "0.00%u", n);
90002-	} else if (n < 100) {
90003-		malloc_snprintf(str, 6, "0.0%u", n);
90004-	} else if (n < 1000) {
90005-		malloc_snprintf(str, 6, "0.%u", n);
90006-	} else {
90007-		malloc_snprintf(str, 6, "1");
90008-	}
90009-
90010-	return false;
90011-}
90012-
90013-static void
90014-mutex_stats_init_cols(emitter_row_t *row, const char *table_name,
90015-    emitter_col_t *name,
90016-    emitter_col_t col_uint64_t[mutex_prof_num_uint64_t_counters],
90017-    emitter_col_t col_uint32_t[mutex_prof_num_uint32_t_counters]) {
90018-	mutex_prof_uint64_t_counter_ind_t k_uint64_t = 0;
90019-	mutex_prof_uint32_t_counter_ind_t k_uint32_t = 0;
90020-
90021-	emitter_col_t *col;
90022-
90023-	if (name != NULL) {
90024-		emitter_col_init(name, row);
90025-		name->justify = emitter_justify_left;
90026-		name->width = 21;
90027-		name->type = emitter_type_title;
90028-		name->str_val = table_name;
90029-	}
90030-
90031-#define WIDTH_uint32_t 12
90032-#define WIDTH_uint64_t 16
90033-#define OP(counter, counter_type, human, derived, base_counter)		\
90034-	col = &col_##counter_type[k_##counter_type];			\
90035-	++k_##counter_type;						\
90036-	emitter_col_init(col, row);					\
90037-	col->justify = emitter_justify_right;				\
90038-	col->width = derived ? 8 : WIDTH_##counter_type;		\
90039-	col->type = emitter_type_title;					\
90040-	col->str_val = human;
90041-	MUTEX_PROF_COUNTERS
90042-#undef OP
90043-#undef WIDTH_uint32_t
90044-#undef WIDTH_uint64_t
90045-	col_uint64_t[mutex_counter_total_wait_time_ps].width = 10;
90046-}
90047-
90048-static void
90049-mutex_stats_read_global(size_t mib[], size_t miblen, const char *name,
90050-    emitter_col_t *col_name,
90051-    emitter_col_t col_uint64_t[mutex_prof_num_uint64_t_counters],
90052-    emitter_col_t col_uint32_t[mutex_prof_num_uint32_t_counters],
90053-    uint64_t uptime) {
90054-	CTL_LEAF_PREPARE(mib, miblen, name);
90055-	size_t miblen_name = miblen + 1;
90056-
90057-	col_name->str_val = name;
90058-
90059-	emitter_col_t *dst;
90060-#define EMITTER_TYPE_uint32_t emitter_type_uint32
90061-#define EMITTER_TYPE_uint64_t emitter_type_uint64
90062-#define OP(counter, counter_type, human, derived, base_counter)		\
90063-	dst = &col_##counter_type[mutex_counter_##counter];		\
90064-	dst->type = EMITTER_TYPE_##counter_type;			\
90065-	if (!derived) {							\
90066-		CTL_LEAF(mib, miblen_name, #counter,			\
90067-		    (counter_type *)&dst->bool_val, counter_type);	\
90068-	} else {							\
90069-		emitter_col_t *base =					\
90070-		    &col_##counter_type[mutex_counter_##base_counter];	\
90071-		dst->counter_type##_val =				\
90072-		    (counter_type)rate_per_second(			\
90073-		    base->counter_type##_val, uptime);			\
90074-	}
90075-	MUTEX_PROF_COUNTERS
90076-#undef OP
90077-#undef EMITTER_TYPE_uint32_t
90078-#undef EMITTER_TYPE_uint64_t
90079-}
90080-
90081-static void
90082-mutex_stats_read_arena(size_t mib[], size_t miblen, const char *name,
90083-    emitter_col_t *col_name,
90084-    emitter_col_t col_uint64_t[mutex_prof_num_uint64_t_counters],
90085-    emitter_col_t col_uint32_t[mutex_prof_num_uint32_t_counters],
90086-    uint64_t uptime) {
90087-	CTL_LEAF_PREPARE(mib, miblen, name);
90088-	size_t miblen_name = miblen + 1;
90089-
90090-	col_name->str_val = name;
90091-
90092-	emitter_col_t *dst;
90093-#define EMITTER_TYPE_uint32_t emitter_type_uint32
90094-#define EMITTER_TYPE_uint64_t emitter_type_uint64
90095-#define OP(counter, counter_type, human, derived, base_counter)		\
90096-	dst = &col_##counter_type[mutex_counter_##counter];		\
90097-	dst->type = EMITTER_TYPE_##counter_type;			\
90098-	if (!derived) {							\
90099-		CTL_LEAF(mib, miblen_name, #counter,			\
90100-		    (counter_type *)&dst->bool_val, counter_type);	\
90101-	} else {							\
90102-		emitter_col_t *base =					\
90103-		    &col_##counter_type[mutex_counter_##base_counter];	\
90104-		dst->counter_type##_val =				\
90105-		    (counter_type)rate_per_second(			\
90106-		    base->counter_type##_val, uptime);			\
90107-	}
90108-	MUTEX_PROF_COUNTERS
90109-#undef OP
90110-#undef EMITTER_TYPE_uint32_t
90111-#undef EMITTER_TYPE_uint64_t
90112-}
90113-
90114-static void
90115-mutex_stats_read_arena_bin(size_t mib[], size_t miblen,
90116-    emitter_col_t col_uint64_t[mutex_prof_num_uint64_t_counters],
90117-    emitter_col_t col_uint32_t[mutex_prof_num_uint32_t_counters],
90118-    uint64_t uptime) {
90119-	CTL_LEAF_PREPARE(mib, miblen, "mutex");
90120-	size_t miblen_mutex = miblen + 1;
90121-
90122-	emitter_col_t *dst;
90123-
90124-#define EMITTER_TYPE_uint32_t emitter_type_uint32
90125-#define EMITTER_TYPE_uint64_t emitter_type_uint64
90126-#define OP(counter, counter_type, human, derived, base_counter)		\
90127-	dst = &col_##counter_type[mutex_counter_##counter];		\
90128-	dst->type = EMITTER_TYPE_##counter_type;			\
90129-	if (!derived) {							\
90130-		CTL_LEAF(mib, miblen_mutex, #counter,			\
90131-		    (counter_type *)&dst->bool_val, counter_type);	\
90132-	} else {							\
90133-		emitter_col_t *base =					\
90134-		    &col_##counter_type[mutex_counter_##base_counter];	\
90135-		dst->counter_type##_val =				\
90136-		    (counter_type)rate_per_second(			\
90137-		    base->counter_type##_val, uptime);			\
90138-	}
90139-	MUTEX_PROF_COUNTERS
90140-#undef OP
90141-#undef EMITTER_TYPE_uint32_t
90142-#undef EMITTER_TYPE_uint64_t
90143-}
90144-
90145-/* "row" can be NULL to avoid emitting in table mode. */
90146-static void
90147-mutex_stats_emit(emitter_t *emitter, emitter_row_t *row,
90148-    emitter_col_t col_uint64_t[mutex_prof_num_uint64_t_counters],
90149-    emitter_col_t col_uint32_t[mutex_prof_num_uint32_t_counters]) {
90150-	if (row != NULL) {
90151-		emitter_table_row(emitter, row);
90152-	}
90153-
90154-	mutex_prof_uint64_t_counter_ind_t k_uint64_t = 0;
90155-	mutex_prof_uint32_t_counter_ind_t k_uint32_t = 0;
90156-
90157-	emitter_col_t *col;
90158-
90159-#define EMITTER_TYPE_uint32_t emitter_type_uint32
90160-#define EMITTER_TYPE_uint64_t emitter_type_uint64
90161-#define OP(counter, type, human, derived, base_counter)		\
90162-	if (!derived) {                    \
90163-		col = &col_##type[k_##type];                        \
90164-		++k_##type;                            \
90165-		emitter_json_kv(emitter, #counter, EMITTER_TYPE_##type,        \
90166-		    (const void *)&col->bool_val); \
90167-	}
90168-	MUTEX_PROF_COUNTERS;
90169-#undef OP
90170-#undef EMITTER_TYPE_uint32_t
90171-#undef EMITTER_TYPE_uint64_t
90172-}
90173-
90174-#define COL_DECLARE(column_name)					\
90175-	emitter_col_t col_##column_name;
90176-
90177-#define COL_INIT(row_name, column_name, left_or_right, col_width, etype)\
90178-	emitter_col_init(&col_##column_name, &row_name);		\
90179-	col_##column_name.justify = emitter_justify_##left_or_right;	\
90180-	col_##column_name.width = col_width;				\
90181-	col_##column_name.type = emitter_type_##etype;
90182-
90183-#define COL(row_name, column_name, left_or_right, col_width, etype)	\
90184-	COL_DECLARE(column_name);					\
90185-	COL_INIT(row_name, column_name, left_or_right, col_width, etype)
90186-
90187-#define COL_HDR_DECLARE(column_name)					\
90188-	COL_DECLARE(column_name);					\
90189-	emitter_col_t header_##column_name;
90190-
90191-#define COL_HDR_INIT(row_name, column_name, human, left_or_right,	\
90192-	col_width, etype)						\
90193-	COL_INIT(row_name, column_name, left_or_right, col_width, etype)\
90194-	emitter_col_init(&header_##column_name, &header_##row_name);	\
90195-	header_##column_name.justify = emitter_justify_##left_or_right;	\
90196-	header_##column_name.width = col_width;				\
90197-	header_##column_name.type = emitter_type_title;			\
90198-	header_##column_name.str_val = human ? human : #column_name;
90199-
90200-#define COL_HDR(row_name, column_name, human, left_or_right, col_width,	\
90201-    etype)								\
90202-	COL_HDR_DECLARE(column_name)					\
90203-	COL_HDR_INIT(row_name, column_name, human, left_or_right,	\
90204-	    col_width, etype)
90205-
90206-JEMALLOC_COLD
90207-static void
90208-stats_arena_bins_print(emitter_t *emitter, bool mutex, unsigned i,
90209-    uint64_t uptime) {
90210-	size_t page;
90211-	bool in_gap, in_gap_prev;
90212-	unsigned nbins, j;
90213-
90214-	CTL_GET("arenas.page", &page, size_t);
90215-
90216-	CTL_GET("arenas.nbins", &nbins, unsigned);
90217-
90218-	emitter_row_t header_row;
90219-	emitter_row_init(&header_row);
90220-
90221-	emitter_row_t row;
90222-	emitter_row_init(&row);
90223-
90224-	bool prof_stats_on = config_prof && opt_prof && opt_prof_stats
90225-	    && i == MALLCTL_ARENAS_ALL;
90226-
90227-	COL_HDR(row, size, NULL, right, 20, size)
90228-	COL_HDR(row, ind, NULL, right, 4, unsigned)
90229-	COL_HDR(row, allocated, NULL, right, 13, uint64)
90230-	COL_HDR(row, nmalloc, NULL, right, 13, uint64)
90231-	COL_HDR(row, nmalloc_ps, "(#/sec)", right, 8, uint64)
90232-	COL_HDR(row, ndalloc, NULL, right, 13, uint64)
90233-	COL_HDR(row, ndalloc_ps, "(#/sec)", right, 8, uint64)
90234-	COL_HDR(row, nrequests, NULL, right, 13, uint64)
90235-	COL_HDR(row, nrequests_ps, "(#/sec)", right, 10, uint64)
90236-	COL_HDR_DECLARE(prof_live_requested);
90237-	COL_HDR_DECLARE(prof_live_count);
90238-	COL_HDR_DECLARE(prof_accum_requested);
90239-	COL_HDR_DECLARE(prof_accum_count);
90240-	if (prof_stats_on) {
90241-		COL_HDR_INIT(row, prof_live_requested, NULL, right, 21, uint64)
90242-		COL_HDR_INIT(row, prof_live_count, NULL, right, 17, uint64)
90243-		COL_HDR_INIT(row, prof_accum_requested, NULL, right, 21, uint64)
90244-		COL_HDR_INIT(row, prof_accum_count, NULL, right, 17, uint64)
90245-	}
90246-	COL_HDR(row, nshards, NULL, right, 9, unsigned)
90247-	COL_HDR(row, curregs, NULL, right, 13, size)
90248-	COL_HDR(row, curslabs, NULL, right, 13, size)
90249-	COL_HDR(row, nonfull_slabs, NULL, right, 15, size)
90250-	COL_HDR(row, regs, NULL, right, 5, unsigned)
90251-	COL_HDR(row, pgs, NULL, right, 4, size)
90252-	/* To buffer a right- and left-justified column. */
90253-	COL_HDR(row, justify_spacer, NULL, right, 1, title)
90254-	COL_HDR(row, util, NULL, right, 6, title)
90255-	COL_HDR(row, nfills, NULL, right, 13, uint64)
90256-	COL_HDR(row, nfills_ps, "(#/sec)", right, 8, uint64)
90257-	COL_HDR(row, nflushes, NULL, right, 13, uint64)
90258-	COL_HDR(row, nflushes_ps, "(#/sec)", right, 8, uint64)
90259-	COL_HDR(row, nslabs, NULL, right, 13, uint64)
90260-	COL_HDR(row, nreslabs, NULL, right, 13, uint64)
90261-	COL_HDR(row, nreslabs_ps, "(#/sec)", right, 8, uint64)
90262-
90263-	/* Don't want to actually print the name. */
90264-	header_justify_spacer.str_val = " ";
90265-	col_justify_spacer.str_val = " ";
90266-
90267-	emitter_col_t col_mutex64[mutex_prof_num_uint64_t_counters];
90268-	emitter_col_t col_mutex32[mutex_prof_num_uint32_t_counters];
90269-
90270-	emitter_col_t header_mutex64[mutex_prof_num_uint64_t_counters];
90271-	emitter_col_t header_mutex32[mutex_prof_num_uint32_t_counters];
90272-
90273-	if (mutex) {
90274-		mutex_stats_init_cols(&row, NULL, NULL, col_mutex64,
90275-		    col_mutex32);
90276-		mutex_stats_init_cols(&header_row, NULL, NULL, header_mutex64,
90277-		    header_mutex32);
90278-	}
90279-
90280-	/*
90281-	 * We print a "bins:" header as part of the table row; we need to adjust
90282-	 * the header size column to compensate.
90283-	 */
90284-	header_size.width -=5;
90285-	emitter_table_printf(emitter, "bins:");
90286-	emitter_table_row(emitter, &header_row);
90287-	emitter_json_array_kv_begin(emitter, "bins");
90288-
90289-	size_t stats_arenas_mib[CTL_MAX_DEPTH];
90290-	CTL_LEAF_PREPARE(stats_arenas_mib, 0, "stats.arenas");
90291-	stats_arenas_mib[2] = i;
90292-	CTL_LEAF_PREPARE(stats_arenas_mib, 3, "bins");
90293-
90294-	size_t arenas_bin_mib[CTL_MAX_DEPTH];
90295-	CTL_LEAF_PREPARE(arenas_bin_mib, 0, "arenas.bin");
90296-
90297-	size_t prof_stats_mib[CTL_MAX_DEPTH];
90298-	if (prof_stats_on) {
90299-		CTL_LEAF_PREPARE(prof_stats_mib, 0, "prof.stats.bins");
90300-	}
90301-
90302-	for (j = 0, in_gap = false; j < nbins; j++) {
90303-		uint64_t nslabs;
90304-		size_t reg_size, slab_size, curregs;
90305-		size_t curslabs;
90306-		size_t nonfull_slabs;
90307-		uint32_t nregs, nshards;
90308-		uint64_t nmalloc, ndalloc, nrequests, nfills, nflushes;
90309-		uint64_t nreslabs;
90310-		prof_stats_t prof_live;
90311-		prof_stats_t prof_accum;
90312-
90313-		stats_arenas_mib[4] = j;
90314-		arenas_bin_mib[2] = j;
90315-
90316-		CTL_LEAF(stats_arenas_mib, 5, "nslabs", &nslabs, uint64_t);
90317-
90318-		if (prof_stats_on) {
90319-			prof_stats_mib[3] = j;
90320-			CTL_LEAF(prof_stats_mib, 4, "live", &prof_live,
90321-			    prof_stats_t);
90322-			CTL_LEAF(prof_stats_mib, 4, "accum", &prof_accum,
90323-			    prof_stats_t);
90324-		}
90325-
90326-		in_gap_prev = in_gap;
90327-		if (prof_stats_on) {
90328-			in_gap = (nslabs == 0 && prof_accum.count == 0);
90329-		} else {
90330-			in_gap = (nslabs == 0);
90331-		}
90332-
90333-		if (in_gap_prev && !in_gap) {
90334-			emitter_table_printf(emitter,
90335-			    "                     ---\n");
90336-		}
90337-
90338-		if (in_gap && !emitter_outputs_json(emitter)) {
90339-			continue;
90340-		}
90341-
90342-		CTL_LEAF(arenas_bin_mib, 3, "size", &reg_size, size_t);
90343-		CTL_LEAF(arenas_bin_mib, 3, "nregs", &nregs, uint32_t);
90344-		CTL_LEAF(arenas_bin_mib, 3, "slab_size", &slab_size, size_t);
90345-		CTL_LEAF(arenas_bin_mib, 3, "nshards", &nshards, uint32_t);
90346-		CTL_LEAF(stats_arenas_mib, 5, "nmalloc", &nmalloc, uint64_t);
90347-		CTL_LEAF(stats_arenas_mib, 5, "ndalloc", &ndalloc, uint64_t);
90348-		CTL_LEAF(stats_arenas_mib, 5, "curregs", &curregs, size_t);
90349-		CTL_LEAF(stats_arenas_mib, 5, "nrequests", &nrequests,
90350-		    uint64_t);
90351-		CTL_LEAF(stats_arenas_mib, 5, "nfills", &nfills, uint64_t);
90352-		CTL_LEAF(stats_arenas_mib, 5, "nflushes", &nflushes, uint64_t);
90353-		CTL_LEAF(stats_arenas_mib, 5, "nreslabs", &nreslabs, uint64_t);
90354-		CTL_LEAF(stats_arenas_mib, 5, "curslabs", &curslabs, size_t);
90355-		CTL_LEAF(stats_arenas_mib, 5, "nonfull_slabs", &nonfull_slabs,
90356-		    size_t);
90357-
90358-		if (mutex) {
90359-			mutex_stats_read_arena_bin(stats_arenas_mib, 5,
90360-			    col_mutex64, col_mutex32, uptime);
90361-		}
90362-
90363-		emitter_json_object_begin(emitter);
90364-		emitter_json_kv(emitter, "nmalloc", emitter_type_uint64,
90365-		    &nmalloc);
90366-		emitter_json_kv(emitter, "ndalloc", emitter_type_uint64,
90367-		    &ndalloc);
90368-		emitter_json_kv(emitter, "curregs", emitter_type_size,
90369-		    &curregs);
90370-		emitter_json_kv(emitter, "nrequests", emitter_type_uint64,
90371-		    &nrequests);
90372-		if (prof_stats_on) {
90373-			emitter_json_kv(emitter, "prof_live_requested",
90374-			    emitter_type_uint64, &prof_live.req_sum);
90375-			emitter_json_kv(emitter, "prof_live_count",
90376-			    emitter_type_uint64, &prof_live.count);
90377-			emitter_json_kv(emitter, "prof_accum_requested",
90378-			    emitter_type_uint64, &prof_accum.req_sum);
90379-			emitter_json_kv(emitter, "prof_accum_count",
90380-			    emitter_type_uint64, &prof_accum.count);
90381-		}
90382-		emitter_json_kv(emitter, "nfills", emitter_type_uint64,
90383-		    &nfills);
90384-		emitter_json_kv(emitter, "nflushes", emitter_type_uint64,
90385-		    &nflushes);
90386-		emitter_json_kv(emitter, "nreslabs", emitter_type_uint64,
90387-		    &nreslabs);
90388-		emitter_json_kv(emitter, "curslabs", emitter_type_size,
90389-		    &curslabs);
90390-		emitter_json_kv(emitter, "nonfull_slabs", emitter_type_size,
90391-		    &nonfull_slabs);
90392-		if (mutex) {
90393-			emitter_json_object_kv_begin(emitter, "mutex");
90394-			mutex_stats_emit(emitter, NULL, col_mutex64,
90395-			    col_mutex32);
90396-			emitter_json_object_end(emitter);
90397-		}
90398-		emitter_json_object_end(emitter);
90399-
90400-		size_t availregs = nregs * curslabs;
90401-		char util[6];
90402-		if (get_rate_str((uint64_t)curregs, (uint64_t)availregs, util))
90403-		{
90404-			if (availregs == 0) {
90405-				malloc_snprintf(util, sizeof(util), "1");
90406-			} else if (curregs > availregs) {
90407-				/*
90408-				 * Race detected: the counters were read in
90409-				 * separate mallctl calls and concurrent
90410-				 * operations happened in between.  In this case
90411-				 * no meaningful utilization can be computed.
90412-				 */
90413-				malloc_snprintf(util, sizeof(util), " race");
90414-			} else {
90415-				not_reached();
90416-			}
90417-		}
90418-
90419-		col_size.size_val = reg_size;
90420-		col_ind.unsigned_val = j;
90421-		col_allocated.size_val = curregs * reg_size;
90422-		col_nmalloc.uint64_val = nmalloc;
90423-		col_nmalloc_ps.uint64_val = rate_per_second(nmalloc, uptime);
90424-		col_ndalloc.uint64_val = ndalloc;
90425-		col_ndalloc_ps.uint64_val = rate_per_second(ndalloc, uptime);
90426-		col_nrequests.uint64_val = nrequests;
90427-		col_nrequests_ps.uint64_val = rate_per_second(nrequests, uptime);
90428-		if (prof_stats_on) {
90429-			col_prof_live_requested.uint64_val = prof_live.req_sum;
90430-			col_prof_live_count.uint64_val = prof_live.count;
90431-			col_prof_accum_requested.uint64_val =
90432-			    prof_accum.req_sum;
90433-			col_prof_accum_count.uint64_val = prof_accum.count;
90434-		}
90435-		col_nshards.unsigned_val = nshards;
90436-		col_curregs.size_val = curregs;
90437-		col_curslabs.size_val = curslabs;
90438-		col_nonfull_slabs.size_val = nonfull_slabs;
90439-		col_regs.unsigned_val = nregs;
90440-		col_pgs.size_val = slab_size / page;
90441-		col_util.str_val = util;
90442-		col_nfills.uint64_val = nfills;
90443-		col_nfills_ps.uint64_val = rate_per_second(nfills, uptime);
90444-		col_nflushes.uint64_val = nflushes;
90445-		col_nflushes_ps.uint64_val = rate_per_second(nflushes, uptime);
90446-		col_nslabs.uint64_val = nslabs;
90447-		col_nreslabs.uint64_val = nreslabs;
90448-		col_nreslabs_ps.uint64_val = rate_per_second(nreslabs, uptime);
90449-
90450-		/*
90451-		 * Note that mutex columns were initialized above, if mutex ==
90452-		 * true.
90453-		 */
90454-
90455-		emitter_table_row(emitter, &row);
90456-	}
90457-	emitter_json_array_end(emitter); /* Close "bins". */
90458-
90459-	if (in_gap) {
90460-		emitter_table_printf(emitter, "                     ---\n");
90461-	}
90462-}
90463-
90464-JEMALLOC_COLD
90465-static void
90466-stats_arena_lextents_print(emitter_t *emitter, unsigned i, uint64_t uptime) {
90467-	unsigned nbins, nlextents, j;
90468-	bool in_gap, in_gap_prev;
90469-
90470-	CTL_GET("arenas.nbins", &nbins, unsigned);
90471-	CTL_GET("arenas.nlextents", &nlextents, unsigned);
90472-
90473-	emitter_row_t header_row;
90474-	emitter_row_init(&header_row);
90475-	emitter_row_t row;
90476-	emitter_row_init(&row);
90477-
90478-	bool prof_stats_on = config_prof && opt_prof && opt_prof_stats
90479-	    && i == MALLCTL_ARENAS_ALL;
90480-
90481-	COL_HDR(row, size, NULL, right, 20, size)
90482-	COL_HDR(row, ind, NULL, right, 4, unsigned)
90483-	COL_HDR(row, allocated, NULL, right, 13, size)
90484-	COL_HDR(row, nmalloc, NULL, right, 13, uint64)
90485-	COL_HDR(row, nmalloc_ps, "(#/sec)", right, 8, uint64)
90486-	COL_HDR(row, ndalloc, NULL, right, 13, uint64)
90487-	COL_HDR(row, ndalloc_ps, "(#/sec)", right, 8, uint64)
90488-	COL_HDR(row, nrequests, NULL, right, 13, uint64)
90489-	COL_HDR(row, nrequests_ps, "(#/sec)", right, 8, uint64)
90490-	COL_HDR_DECLARE(prof_live_requested)
90491-	COL_HDR_DECLARE(prof_live_count)
90492-	COL_HDR_DECLARE(prof_accum_requested)
90493-	COL_HDR_DECLARE(prof_accum_count)
90494-	if (prof_stats_on) {
90495-		COL_HDR_INIT(row, prof_live_requested, NULL, right, 21, uint64)
90496-		COL_HDR_INIT(row, prof_live_count, NULL, right, 17, uint64)
90497-		COL_HDR_INIT(row, prof_accum_requested, NULL, right, 21, uint64)
90498-		COL_HDR_INIT(row, prof_accum_count, NULL, right, 17, uint64)
90499-	}
90500-	COL_HDR(row, curlextents, NULL, right, 13, size)
90501-
90502-	/* As with bins, we label the large extents table. */
90503-	header_size.width -= 6;
90504-	emitter_table_printf(emitter, "large:");
90505-	emitter_table_row(emitter, &header_row);
90506-	emitter_json_array_kv_begin(emitter, "lextents");
90507-
90508-	size_t stats_arenas_mib[CTL_MAX_DEPTH];
90509-	CTL_LEAF_PREPARE(stats_arenas_mib, 0, "stats.arenas");
90510-	stats_arenas_mib[2] = i;
90511-	CTL_LEAF_PREPARE(stats_arenas_mib, 3, "lextents");
90512-
90513-	size_t arenas_lextent_mib[CTL_MAX_DEPTH];
90514-	CTL_LEAF_PREPARE(arenas_lextent_mib, 0, "arenas.lextent");
90515-
90516-	size_t prof_stats_mib[CTL_MAX_DEPTH];
90517-	if (prof_stats_on) {
90518-		CTL_LEAF_PREPARE(prof_stats_mib, 0, "prof.stats.lextents");
90519-	}
90520-
90521-	for (j = 0, in_gap = false; j < nlextents; j++) {
90522-		uint64_t nmalloc, ndalloc, nrequests;
90523-		size_t lextent_size, curlextents;
90524-		prof_stats_t prof_live;
90525-		prof_stats_t prof_accum;
90526-
90527-		stats_arenas_mib[4] = j;
90528-		arenas_lextent_mib[2] = j;
90529-
90530-		CTL_LEAF(stats_arenas_mib, 5, "nmalloc", &nmalloc, uint64_t);
90531-		CTL_LEAF(stats_arenas_mib, 5, "ndalloc", &ndalloc, uint64_t);
90532-		CTL_LEAF(stats_arenas_mib, 5, "nrequests", &nrequests,
90533-		    uint64_t);
90534-
90535-		in_gap_prev = in_gap;
90536-		in_gap = (nrequests == 0);
90537-
90538-		if (in_gap_prev && !in_gap) {
90539-			emitter_table_printf(emitter,
90540-			    "                     ---\n");
90541-		}
90542-
90543-		CTL_LEAF(arenas_lextent_mib, 3, "size", &lextent_size, size_t);
90544-		CTL_LEAF(stats_arenas_mib, 5, "curlextents", &curlextents,
90545-		    size_t);
90546-
90547-		if (prof_stats_on) {
90548-			prof_stats_mib[3] = j;
90549-			CTL_LEAF(prof_stats_mib, 4, "live", &prof_live,
90550-			    prof_stats_t);
90551-			CTL_LEAF(prof_stats_mib, 4, "accum", &prof_accum,
90552-			    prof_stats_t);
90553-		}
90554-
90555-		emitter_json_object_begin(emitter);
90556-		if (prof_stats_on) {
90557-			emitter_json_kv(emitter, "prof_live_requested",
90558-			    emitter_type_uint64, &prof_live.req_sum);
90559-			emitter_json_kv(emitter, "prof_live_count",
90560-			    emitter_type_uint64, &prof_live.count);
90561-			emitter_json_kv(emitter, "prof_accum_requested",
90562-			    emitter_type_uint64, &prof_accum.req_sum);
90563-			emitter_json_kv(emitter, "prof_accum_count",
90564-			    emitter_type_uint64, &prof_accum.count);
90565-		}
90566-		emitter_json_kv(emitter, "curlextents", emitter_type_size,
90567-		    &curlextents);
90568-		emitter_json_object_end(emitter);
90569-
90570-		col_size.size_val = lextent_size;
90571-		col_ind.unsigned_val = nbins + j;
90572-		col_allocated.size_val = curlextents * lextent_size;
90573-		col_nmalloc.uint64_val = nmalloc;
90574-		col_nmalloc_ps.uint64_val = rate_per_second(nmalloc, uptime);
90575-		col_ndalloc.uint64_val = ndalloc;
90576-		col_ndalloc_ps.uint64_val = rate_per_second(ndalloc, uptime);
90577-		col_nrequests.uint64_val = nrequests;
90578-		col_nrequests_ps.uint64_val = rate_per_second(nrequests, uptime);
90579-		if (prof_stats_on) {
90580-			col_prof_live_requested.uint64_val = prof_live.req_sum;
90581-			col_prof_live_count.uint64_val = prof_live.count;
90582-			col_prof_accum_requested.uint64_val =
90583-			    prof_accum.req_sum;
90584-			col_prof_accum_count.uint64_val = prof_accum.count;
90585-		}
90586-		col_curlextents.size_val = curlextents;
90587-
90588-		if (!in_gap) {
90589-			emitter_table_row(emitter, &row);
90590-		}
90591-	}
90592-	emitter_json_array_end(emitter); /* Close "lextents". */
90593-	if (in_gap) {
90594-		emitter_table_printf(emitter, "                     ---\n");
90595-	}
90596-}
90597-
90598-JEMALLOC_COLD
90599-static void
90600-stats_arena_extents_print(emitter_t *emitter, unsigned i) {
90601-	unsigned j;
90602-	bool in_gap, in_gap_prev;
90603-	emitter_row_t header_row;
90604-	emitter_row_init(&header_row);
90605-	emitter_row_t row;
90606-	emitter_row_init(&row);
90607-
90608-	COL_HDR(row, size, NULL, right, 20, size)
90609-	COL_HDR(row, ind, NULL, right, 4, unsigned)
90610-	COL_HDR(row, ndirty, NULL, right, 13, size)
90611-	COL_HDR(row, dirty, NULL, right, 13, size)
90612-	COL_HDR(row, nmuzzy, NULL, right, 13, size)
90613-	COL_HDR(row, muzzy, NULL, right, 13, size)
90614-	COL_HDR(row, nretained, NULL, right, 13, size)
90615-	COL_HDR(row, retained, NULL, right, 13, size)
90616-	COL_HDR(row, ntotal, NULL, right, 13, size)
90617-	COL_HDR(row, total, NULL, right, 13, size)
90618-
90619-	/* Label this section. */
90620-	header_size.width -= 8;
90621-	emitter_table_printf(emitter, "extents:");
90622-	emitter_table_row(emitter, &header_row);
90623-	emitter_json_array_kv_begin(emitter, "extents");
90624-
90625-	size_t stats_arenas_mib[CTL_MAX_DEPTH];
90626-	CTL_LEAF_PREPARE(stats_arenas_mib, 0, "stats.arenas");
90627-	stats_arenas_mib[2] = i;
90628-	CTL_LEAF_PREPARE(stats_arenas_mib, 3, "extents");
90629-
90630-	in_gap = false;
90631-	for (j = 0; j < SC_NPSIZES; j++) {
90632-		size_t ndirty, nmuzzy, nretained, total, dirty_bytes,
90633-		    muzzy_bytes, retained_bytes, total_bytes;
90634-		stats_arenas_mib[4] = j;
90635-
90636-		CTL_LEAF(stats_arenas_mib, 5, "ndirty", &ndirty, size_t);
90637-		CTL_LEAF(stats_arenas_mib, 5, "nmuzzy", &nmuzzy, size_t);
90638-		CTL_LEAF(stats_arenas_mib, 5, "nretained", &nretained, size_t);
90639-		CTL_LEAF(stats_arenas_mib, 5, "dirty_bytes", &dirty_bytes,
90640-		    size_t);
90641-		CTL_LEAF(stats_arenas_mib, 5, "muzzy_bytes", &muzzy_bytes,
90642-		    size_t);
90643-		CTL_LEAF(stats_arenas_mib, 5, "retained_bytes",
90644-		    &retained_bytes, size_t);
90645-
90646-		total = ndirty + nmuzzy + nretained;
90647-		total_bytes = dirty_bytes + muzzy_bytes + retained_bytes;
90648-
90649-		in_gap_prev = in_gap;
90650-		in_gap = (total == 0);
90651-
90652-		if (in_gap_prev && !in_gap) {
90653-			emitter_table_printf(emitter,
90654-			    "                     ---\n");
90655-		}
90656-
90657-		emitter_json_object_begin(emitter);
90658-		emitter_json_kv(emitter, "ndirty", emitter_type_size, &ndirty);
90659-		emitter_json_kv(emitter, "nmuzzy", emitter_type_size, &nmuzzy);
90660-		emitter_json_kv(emitter, "nretained", emitter_type_size,
90661-		    &nretained);
90662-
90663-		emitter_json_kv(emitter, "dirty_bytes", emitter_type_size,
90664-		    &dirty_bytes);
90665-		emitter_json_kv(emitter, "muzzy_bytes", emitter_type_size,
90666-		    &muzzy_bytes);
90667-		emitter_json_kv(emitter, "retained_bytes", emitter_type_size,
90668-		    &retained_bytes);
90669-		emitter_json_object_end(emitter);
90670-
90671-		col_size.size_val = sz_pind2sz(j);
90672-		col_ind.size_val = j;
90673-		col_ndirty.size_val = ndirty;
90674-		col_dirty.size_val = dirty_bytes;
90675-		col_nmuzzy.size_val = nmuzzy;
90676-		col_muzzy.size_val = muzzy_bytes;
90677-		col_nretained.size_val = nretained;
90678-		col_retained.size_val = retained_bytes;
90679-		col_ntotal.size_val = total;
90680-		col_total.size_val = total_bytes;
90681-
90682-		if (!in_gap) {
90683-			emitter_table_row(emitter, &row);
90684-		}
90685-	}
90686-	emitter_json_array_end(emitter); /* Close "extents". */
90687-	if (in_gap) {
90688-		emitter_table_printf(emitter, "                     ---\n");
90689-	}
90690-}
90691-
90692-static void
90693-stats_arena_hpa_shard_print(emitter_t *emitter, unsigned i, uint64_t uptime) {
90694-	emitter_row_t header_row;
90695-	emitter_row_init(&header_row);
90696-	emitter_row_t row;
90697-	emitter_row_init(&row);
90698-
90699-	uint64_t npurge_passes;
90700-	uint64_t npurges;
90701-	uint64_t nhugifies;
90702-	uint64_t ndehugifies;
90703-
90704-	CTL_M2_GET("stats.arenas.0.hpa_shard.npurge_passes",
90705-	    i, &npurge_passes, uint64_t);
90706-	CTL_M2_GET("stats.arenas.0.hpa_shard.npurges",
90707-	    i, &npurges, uint64_t);
90708-	CTL_M2_GET("stats.arenas.0.hpa_shard.nhugifies",
90709-	    i, &nhugifies, uint64_t);
90710-	CTL_M2_GET("stats.arenas.0.hpa_shard.ndehugifies",
90711-	    i, &ndehugifies, uint64_t);
90712-
90713-	size_t npageslabs_huge;
90714-	size_t nactive_huge;
90715-	size_t ndirty_huge;
90716-
90717-	size_t npageslabs_nonhuge;
90718-	size_t nactive_nonhuge;
90719-	size_t ndirty_nonhuge;
90720-	size_t nretained_nonhuge;
90721-
90722-	size_t sec_bytes;
90723-	CTL_M2_GET("stats.arenas.0.hpa_sec_bytes", i, &sec_bytes, size_t);
90724-	emitter_kv(emitter, "sec_bytes", "Bytes in small extent cache",
90725-	    emitter_type_size, &sec_bytes);
90726-
90727-	/* First, global stats. */
90728-	emitter_table_printf(emitter,
90729-	    "HPA shard stats:\n"
90730-	    "  Purge passes: %" FMTu64 " (%" FMTu64 " / sec)\n"
90731-	    "  Purges: %" FMTu64 " (%" FMTu64 " / sec)\n"
90732-	    "  Hugeifies: %" FMTu64 " (%" FMTu64 " / sec)\n"
90733-	    "  Dehugifies: %" FMTu64 " (%" FMTu64 " / sec)\n"
90734-	    "\n",
90735-	    npurge_passes, rate_per_second(npurge_passes, uptime),
90736-	    npurges, rate_per_second(npurges, uptime),
90737-	    nhugifies, rate_per_second(nhugifies, uptime),
90738-	    ndehugifies, rate_per_second(ndehugifies, uptime));
90739-
90740-	emitter_json_object_kv_begin(emitter, "hpa_shard");
90741-	emitter_json_kv(emitter, "npurge_passes", emitter_type_uint64,
90742-	    &npurge_passes);
90743-	emitter_json_kv(emitter, "npurges", emitter_type_uint64,
90744-	    &npurges);
90745-	emitter_json_kv(emitter, "nhugifies", emitter_type_uint64,
90746-	    &nhugifies);
90747-	emitter_json_kv(emitter, "ndehugifies", emitter_type_uint64,
90748-	    &ndehugifies);
90749-
90750-	/* Next, full slab stats. */
90751-	CTL_M2_GET("stats.arenas.0.hpa_shard.full_slabs.npageslabs_huge",
90752-	    i, &npageslabs_huge, size_t);
90753-	CTL_M2_GET("stats.arenas.0.hpa_shard.full_slabs.nactive_huge",
90754-	    i, &nactive_huge, size_t);
90755-	CTL_M2_GET("stats.arenas.0.hpa_shard.full_slabs.ndirty_huge",
90756-	    i, &ndirty_huge, size_t);
90757-
90758-	CTL_M2_GET("stats.arenas.0.hpa_shard.full_slabs.npageslabs_nonhuge",
90759-	    i, &npageslabs_nonhuge, size_t);
90760-	CTL_M2_GET("stats.arenas.0.hpa_shard.full_slabs.nactive_nonhuge",
90761-	    i, &nactive_nonhuge, size_t);
90762-	CTL_M2_GET("stats.arenas.0.hpa_shard.full_slabs.ndirty_nonhuge",
90763-	    i, &ndirty_nonhuge, size_t);
90764-	nretained_nonhuge = npageslabs_nonhuge * HUGEPAGE_PAGES
90765-	    - nactive_nonhuge - ndirty_nonhuge;
90766-
90767-	emitter_table_printf(emitter,
90768-	    "  In full slabs:\n"
90769-	    "      npageslabs: %zu huge, %zu nonhuge\n"
90770-	    "      nactive: %zu huge, %zu nonhuge \n"
90771-	    "      ndirty: %zu huge, %zu nonhuge \n"
90772-	    "      nretained: 0 huge, %zu nonhuge \n",
90773-	    npageslabs_huge, npageslabs_nonhuge,
90774-	    nactive_huge, nactive_nonhuge,
90775-	    ndirty_huge, ndirty_nonhuge,
90776-	    nretained_nonhuge);
90777-
90778-	emitter_json_object_kv_begin(emitter, "full_slabs");
90779-	emitter_json_kv(emitter, "npageslabs_huge", emitter_type_size,
90780-	    &npageslabs_huge);
90781-	emitter_json_kv(emitter, "nactive_huge", emitter_type_size,
90782-	    &nactive_huge);
90783-	emitter_json_kv(emitter, "nactive_huge", emitter_type_size,
90784-	    &nactive_huge);
90785-	emitter_json_kv(emitter, "npageslabs_nonhuge", emitter_type_size,
90786-	    &npageslabs_nonhuge);
90787-	emitter_json_kv(emitter, "nactive_nonhuge", emitter_type_size,
90788-	    &nactive_nonhuge);
90789-	emitter_json_kv(emitter, "ndirty_nonhuge", emitter_type_size,
90790-	    &ndirty_nonhuge);
90791-	emitter_json_object_end(emitter); /* End "full_slabs" */
90792-
90793-	/* Next, empty slab stats. */
90794-	CTL_M2_GET("stats.arenas.0.hpa_shard.empty_slabs.npageslabs_huge",
90795-	    i, &npageslabs_huge, size_t);
90796-	CTL_M2_GET("stats.arenas.0.hpa_shard.empty_slabs.nactive_huge",
90797-	    i, &nactive_huge, size_t);
90798-	CTL_M2_GET("stats.arenas.0.hpa_shard.empty_slabs.ndirty_huge",
90799-	    i, &ndirty_huge, size_t);
90800-
90801-	CTL_M2_GET("stats.arenas.0.hpa_shard.empty_slabs.npageslabs_nonhuge",
90802-	    i, &npageslabs_nonhuge, size_t);
90803-	CTL_M2_GET("stats.arenas.0.hpa_shard.empty_slabs.nactive_nonhuge",
90804-	    i, &nactive_nonhuge, size_t);
90805-	CTL_M2_GET("stats.arenas.0.hpa_shard.empty_slabs.ndirty_nonhuge",
90806-	    i, &ndirty_nonhuge, size_t);
90807-	nretained_nonhuge = npageslabs_nonhuge * HUGEPAGE_PAGES
90808-	    - nactive_nonhuge - ndirty_nonhuge;
90809-
90810-	emitter_table_printf(emitter,
90811-	    "  In empty slabs:\n"
90812-	    "      npageslabs: %zu huge, %zu nonhuge\n"
90813-	    "      nactive: %zu huge, %zu nonhuge \n"
90814-	    "      ndirty: %zu huge, %zu nonhuge \n"
90815-	    "      nretained: 0 huge, %zu nonhuge \n"
90816-	    "\n",
90817-	    npageslabs_huge, npageslabs_nonhuge,
90818-	    nactive_huge, nactive_nonhuge,
90819-	    ndirty_huge, ndirty_nonhuge,
90820-	    nretained_nonhuge);
90821-
90822-	emitter_json_object_kv_begin(emitter, "empty_slabs");
90823-	emitter_json_kv(emitter, "npageslabs_huge", emitter_type_size,
90824-	    &npageslabs_huge);
90825-	emitter_json_kv(emitter, "nactive_huge", emitter_type_size,
90826-	    &nactive_huge);
90827-	emitter_json_kv(emitter, "nactive_huge", emitter_type_size,
90828-	    &nactive_huge);
90829-	emitter_json_kv(emitter, "npageslabs_nonhuge", emitter_type_size,
90830-	    &npageslabs_nonhuge);
90831-	emitter_json_kv(emitter, "nactive_nonhuge", emitter_type_size,
90832-	    &nactive_nonhuge);
90833-	emitter_json_kv(emitter, "ndirty_nonhuge", emitter_type_size,
90834-	    &ndirty_nonhuge);
90835-	emitter_json_object_end(emitter); /* End "empty_slabs" */
90836-
90837-	COL_HDR(row, size, NULL, right, 20, size)
90838-	COL_HDR(row, ind, NULL, right, 4, unsigned)
90839-	COL_HDR(row, npageslabs_huge, NULL, right, 16, size)
90840-	COL_HDR(row, nactive_huge, NULL, right, 16, size)
90841-	COL_HDR(row, ndirty_huge, NULL, right, 16, size)
90842-	COL_HDR(row, npageslabs_nonhuge, NULL, right, 20, size)
90843-	COL_HDR(row, nactive_nonhuge, NULL, right, 20, size)
90844-	COL_HDR(row, ndirty_nonhuge, NULL, right, 20, size)
90845-	COL_HDR(row, nretained_nonhuge, NULL, right, 20, size)
90846-
90847-	size_t stats_arenas_mib[CTL_MAX_DEPTH];
90848-	CTL_LEAF_PREPARE(stats_arenas_mib, 0, "stats.arenas");
90849-	stats_arenas_mib[2] = i;
90850-	CTL_LEAF_PREPARE(stats_arenas_mib, 3, "hpa_shard.nonfull_slabs");
90851-
90852-	emitter_table_row(emitter, &header_row);
90853-	emitter_json_array_kv_begin(emitter, "nonfull_slabs");
90854-	bool in_gap = false;
90855-	for (pszind_t j = 0; j < PSSET_NPSIZES && j < SC_NPSIZES; j++) {
90856-		stats_arenas_mib[5] = j;
90857-
90858-		CTL_LEAF(stats_arenas_mib, 6, "npageslabs_huge",
90859-		    &npageslabs_huge, size_t);
90860-		CTL_LEAF(stats_arenas_mib, 6, "nactive_huge",
90861-		    &nactive_huge, size_t);
90862-		CTL_LEAF(stats_arenas_mib, 6, "ndirty_huge",
90863-		    &ndirty_huge, size_t);
90864-
90865-		CTL_LEAF(stats_arenas_mib, 6, "npageslabs_nonhuge",
90866-		    &npageslabs_nonhuge, size_t);
90867-		CTL_LEAF(stats_arenas_mib, 6, "nactive_nonhuge",
90868-		    &nactive_nonhuge, size_t);
90869-		CTL_LEAF(stats_arenas_mib, 6, "ndirty_nonhuge",
90870-		    &ndirty_nonhuge, size_t);
90871-		nretained_nonhuge = npageslabs_nonhuge * HUGEPAGE_PAGES
90872-		    - nactive_nonhuge - ndirty_nonhuge;
90873-
90874-		bool in_gap_prev = in_gap;
90875-		in_gap = (npageslabs_huge == 0 && npageslabs_nonhuge == 0);
90876-		if (in_gap_prev && !in_gap) {
90877-			emitter_table_printf(emitter,
90878-			    "                     ---\n");
90879-		}
90880-
90881-		col_size.size_val = sz_pind2sz(j);
90882-		col_ind.size_val = j;
90883-		col_npageslabs_huge.size_val = npageslabs_huge;
90884-		col_nactive_huge.size_val = nactive_huge;
90885-		col_ndirty_huge.size_val = ndirty_huge;
90886-		col_npageslabs_nonhuge.size_val = npageslabs_nonhuge;
90887-		col_nactive_nonhuge.size_val = nactive_nonhuge;
90888-		col_ndirty_nonhuge.size_val = ndirty_nonhuge;
90889-		col_nretained_nonhuge.size_val = nretained_nonhuge;
90890-		if (!in_gap) {
90891-			emitter_table_row(emitter, &row);
90892-		}
90893-
90894-		emitter_json_object_begin(emitter);
90895-		emitter_json_kv(emitter, "npageslabs_huge", emitter_type_size,
90896-		    &npageslabs_huge);
90897-		emitter_json_kv(emitter, "nactive_huge", emitter_type_size,
90898-		    &nactive_huge);
90899-		emitter_json_kv(emitter, "ndirty_huge", emitter_type_size,
90900-		    &ndirty_huge);
90901-		emitter_json_kv(emitter, "npageslabs_nonhuge", emitter_type_size,
90902-		    &npageslabs_nonhuge);
90903-		emitter_json_kv(emitter, "nactive_nonhuge", emitter_type_size,
90904-		    &nactive_nonhuge);
90905-		emitter_json_kv(emitter, "ndirty_nonhuge", emitter_type_size,
90906-		    &ndirty_nonhuge);
90907-		emitter_json_object_end(emitter);
90908-	}
90909-	emitter_json_array_end(emitter); /* End "nonfull_slabs" */
90910-	emitter_json_object_end(emitter); /* End "hpa_shard" */
90911-	if (in_gap) {
90912-		emitter_table_printf(emitter, "                     ---\n");
90913-	}
90914-}
90915-
90916-static void
90917-stats_arena_mutexes_print(emitter_t *emitter, unsigned arena_ind, uint64_t uptime) {
90918-	emitter_row_t row;
90919-	emitter_col_t col_name;
90920-	emitter_col_t col64[mutex_prof_num_uint64_t_counters];
90921-	emitter_col_t col32[mutex_prof_num_uint32_t_counters];
90922-
90923-	emitter_row_init(&row);
90924-	mutex_stats_init_cols(&row, "", &col_name, col64, col32);
90925-
90926-	emitter_json_object_kv_begin(emitter, "mutexes");
90927-	emitter_table_row(emitter, &row);
90928-
90929-	size_t stats_arenas_mib[CTL_MAX_DEPTH];
90930-	CTL_LEAF_PREPARE(stats_arenas_mib, 0, "stats.arenas");
90931-	stats_arenas_mib[2] = arena_ind;
90932-	CTL_LEAF_PREPARE(stats_arenas_mib, 3, "mutexes");
90933-
90934-	for (mutex_prof_arena_ind_t i = 0; i < mutex_prof_num_arena_mutexes;
90935-	    i++) {
90936-		const char *name = arena_mutex_names[i];
90937-		emitter_json_object_kv_begin(emitter, name);
90938-		mutex_stats_read_arena(stats_arenas_mib, 4, name, &col_name,
90939-		    col64, col32, uptime);
90940-		mutex_stats_emit(emitter, &row, col64, col32);
90941-		emitter_json_object_end(emitter); /* Close the mutex dict. */
90942-	}
90943-	emitter_json_object_end(emitter); /* End "mutexes". */
90944-}
90945-
90946-JEMALLOC_COLD
90947-static void
90948-stats_arena_print(emitter_t *emitter, unsigned i, bool bins, bool large,
90949-    bool mutex, bool extents, bool hpa) {
90950-	unsigned nthreads;
90951-	const char *dss;
90952-	ssize_t dirty_decay_ms, muzzy_decay_ms;
90953-	size_t page, pactive, pdirty, pmuzzy, mapped, retained;
90954-	size_t base, internal, resident, metadata_thp, extent_avail;
90955-	uint64_t dirty_npurge, dirty_nmadvise, dirty_purged;
90956-	uint64_t muzzy_npurge, muzzy_nmadvise, muzzy_purged;
90957-	size_t small_allocated;
90958-	uint64_t small_nmalloc, small_ndalloc, small_nrequests, small_nfills,
90959-	    small_nflushes;
90960-	size_t large_allocated;
90961-	uint64_t large_nmalloc, large_ndalloc, large_nrequests, large_nfills,
90962-	    large_nflushes;
90963-	size_t tcache_bytes, tcache_stashed_bytes, abandoned_vm;
90964-	uint64_t uptime;
90965-
90966-	CTL_GET("arenas.page", &page, size_t);
90967-
90968-	CTL_M2_GET("stats.arenas.0.nthreads", i, &nthreads, unsigned);
90969-	emitter_kv(emitter, "nthreads", "assigned threads",
90970-	    emitter_type_unsigned, &nthreads);
90971-
90972-	CTL_M2_GET("stats.arenas.0.uptime", i, &uptime, uint64_t);
90973-	emitter_kv(emitter, "uptime_ns", "uptime", emitter_type_uint64,
90974-	    &uptime);
90975-
90976-	CTL_M2_GET("stats.arenas.0.dss", i, &dss, const char *);
90977-	emitter_kv(emitter, "dss", "dss allocation precedence",
90978-	    emitter_type_string, &dss);
90979-
90980-	CTL_M2_GET("stats.arenas.0.dirty_decay_ms", i, &dirty_decay_ms,
90981-	    ssize_t);
90982-	CTL_M2_GET("stats.arenas.0.muzzy_decay_ms", i, &muzzy_decay_ms,
90983-	    ssize_t);
90984-	CTL_M2_GET("stats.arenas.0.pactive", i, &pactive, size_t);
90985-	CTL_M2_GET("stats.arenas.0.pdirty", i, &pdirty, size_t);
90986-	CTL_M2_GET("stats.arenas.0.pmuzzy", i, &pmuzzy, size_t);
90987-	CTL_M2_GET("stats.arenas.0.dirty_npurge", i, &dirty_npurge, uint64_t);
90988-	CTL_M2_GET("stats.arenas.0.dirty_nmadvise", i, &dirty_nmadvise,
90989-	    uint64_t);
90990-	CTL_M2_GET("stats.arenas.0.dirty_purged", i, &dirty_purged, uint64_t);
90991-	CTL_M2_GET("stats.arenas.0.muzzy_npurge", i, &muzzy_npurge, uint64_t);
90992-	CTL_M2_GET("stats.arenas.0.muzzy_nmadvise", i, &muzzy_nmadvise,
90993-	    uint64_t);
90994-	CTL_M2_GET("stats.arenas.0.muzzy_purged", i, &muzzy_purged, uint64_t);
90995-
90996-	emitter_row_t decay_row;
90997-	emitter_row_init(&decay_row);
90998-
90999-	/* JSON-style emission. */
91000-	emitter_json_kv(emitter, "dirty_decay_ms", emitter_type_ssize,
91001-	    &dirty_decay_ms);
91002-	emitter_json_kv(emitter, "muzzy_decay_ms", emitter_type_ssize,
91003-	    &muzzy_decay_ms);
91004-
91005-	emitter_json_kv(emitter, "pactive", emitter_type_size, &pactive);
91006-	emitter_json_kv(emitter, "pdirty", emitter_type_size, &pdirty);
91007-	emitter_json_kv(emitter, "pmuzzy", emitter_type_size, &pmuzzy);
91008-
91009-	emitter_json_kv(emitter, "dirty_npurge", emitter_type_uint64,
91010-	    &dirty_npurge);
91011-	emitter_json_kv(emitter, "dirty_nmadvise", emitter_type_uint64,
91012-	    &dirty_nmadvise);
91013-	emitter_json_kv(emitter, "dirty_purged", emitter_type_uint64,
91014-	    &dirty_purged);
91015-
91016-	emitter_json_kv(emitter, "muzzy_npurge", emitter_type_uint64,
91017-	    &muzzy_npurge);
91018-	emitter_json_kv(emitter, "muzzy_nmadvise", emitter_type_uint64,
91019-	    &muzzy_nmadvise);
91020-	emitter_json_kv(emitter, "muzzy_purged", emitter_type_uint64,
91021-	    &muzzy_purged);
91022-
91023-	/* Table-style emission. */
91024-	COL(decay_row, decay_type, right, 9, title);
91025-	col_decay_type.str_val = "decaying:";
91026-
91027-	COL(decay_row, decay_time, right, 6, title);
91028-	col_decay_time.str_val = "time";
91029-
91030-	COL(decay_row, decay_npages, right, 13, title);
91031-	col_decay_npages.str_val = "npages";
91032-
91033-	COL(decay_row, decay_sweeps, right, 13, title);
91034-	col_decay_sweeps.str_val = "sweeps";
91035-
91036-	COL(decay_row, decay_madvises, right, 13, title);
91037-	col_decay_madvises.str_val = "madvises";
91038-
91039-	COL(decay_row, decay_purged, right, 13, title);
91040-	col_decay_purged.str_val = "purged";
91041-
91042-	/* Title row. */
91043-	emitter_table_row(emitter, &decay_row);
91044-
91045-	/* Dirty row. */
91046-	col_decay_type.str_val = "dirty:";
91047-
91048-	if (dirty_decay_ms >= 0) {
91049-		col_decay_time.type = emitter_type_ssize;
91050-		col_decay_time.ssize_val = dirty_decay_ms;
91051-	} else {
91052-		col_decay_time.type = emitter_type_title;
91053-		col_decay_time.str_val = "N/A";
91054-	}
91055-
91056-	col_decay_npages.type = emitter_type_size;
91057-	col_decay_npages.size_val = pdirty;
91058-
91059-	col_decay_sweeps.type = emitter_type_uint64;
91060-	col_decay_sweeps.uint64_val = dirty_npurge;
91061-
91062-	col_decay_madvises.type = emitter_type_uint64;
91063-	col_decay_madvises.uint64_val = dirty_nmadvise;
91064-
91065-	col_decay_purged.type = emitter_type_uint64;
91066-	col_decay_purged.uint64_val = dirty_purged;
91067-
91068-	emitter_table_row(emitter, &decay_row);
91069-
91070-	/* Muzzy row. */
91071-	col_decay_type.str_val = "muzzy:";
91072-
91073-	if (muzzy_decay_ms >= 0) {
91074-		col_decay_time.type = emitter_type_ssize;
91075-		col_decay_time.ssize_val = muzzy_decay_ms;
91076-	} else {
91077-		col_decay_time.type = emitter_type_title;
91078-		col_decay_time.str_val = "N/A";
91079-	}
91080-
91081-	col_decay_npages.type = emitter_type_size;
91082-	col_decay_npages.size_val = pmuzzy;
91083-
91084-	col_decay_sweeps.type = emitter_type_uint64;
91085-	col_decay_sweeps.uint64_val = muzzy_npurge;
91086-
91087-	col_decay_madvises.type = emitter_type_uint64;
91088-	col_decay_madvises.uint64_val = muzzy_nmadvise;
91089-
91090-	col_decay_purged.type = emitter_type_uint64;
91091-	col_decay_purged.uint64_val = muzzy_purged;
91092-
91093-	emitter_table_row(emitter, &decay_row);
91094-
91095-	/* Small / large / total allocation counts. */
91096-	emitter_row_t alloc_count_row;
91097-	emitter_row_init(&alloc_count_row);
91098-
91099-	COL(alloc_count_row, count_title, left, 21, title);
91100-	col_count_title.str_val = "";
91101-
91102-	COL(alloc_count_row, count_allocated, right, 16, title);
91103-	col_count_allocated.str_val = "allocated";
91104-
91105-	COL(alloc_count_row, count_nmalloc, right, 16, title);
91106-	col_count_nmalloc.str_val = "nmalloc";
91107-	COL(alloc_count_row, count_nmalloc_ps, right, 10, title);
91108-	col_count_nmalloc_ps.str_val = "(#/sec)";
91109-
91110-	COL(alloc_count_row, count_ndalloc, right, 16, title);
91111-	col_count_ndalloc.str_val = "ndalloc";
91112-	COL(alloc_count_row, count_ndalloc_ps, right, 10, title);
91113-	col_count_ndalloc_ps.str_val = "(#/sec)";
91114-
91115-	COL(alloc_count_row, count_nrequests, right, 16, title);
91116-	col_count_nrequests.str_val = "nrequests";
91117-	COL(alloc_count_row, count_nrequests_ps, right, 10, title);
91118-	col_count_nrequests_ps.str_val = "(#/sec)";
91119-
91120-	COL(alloc_count_row, count_nfills, right, 16, title);
91121-	col_count_nfills.str_val = "nfill";
91122-	COL(alloc_count_row, count_nfills_ps, right, 10, title);
91123-	col_count_nfills_ps.str_val = "(#/sec)";
91124-
91125-	COL(alloc_count_row, count_nflushes, right, 16, title);
91126-	col_count_nflushes.str_val = "nflush";
91127-	COL(alloc_count_row, count_nflushes_ps, right, 10, title);
91128-	col_count_nflushes_ps.str_val = "(#/sec)";
91129-
91130-	emitter_table_row(emitter, &alloc_count_row);
91131-
91132-	col_count_nmalloc_ps.type = emitter_type_uint64;
91133-	col_count_ndalloc_ps.type = emitter_type_uint64;
91134-	col_count_nrequests_ps.type = emitter_type_uint64;
91135-	col_count_nfills_ps.type = emitter_type_uint64;
91136-	col_count_nflushes_ps.type = emitter_type_uint64;
91137-
91138-#define GET_AND_EMIT_ALLOC_STAT(small_or_large, name, valtype)		\
91139-	CTL_M2_GET("stats.arenas.0." #small_or_large "." #name, i,	\
91140-	    &small_or_large##_##name, valtype##_t);			\
91141-	emitter_json_kv(emitter, #name, emitter_type_##valtype,		\
91142-	    &small_or_large##_##name);					\
91143-	col_count_##name.type = emitter_type_##valtype;		\
91144-	col_count_##name.valtype##_val = small_or_large##_##name;
91145-
91146-	emitter_json_object_kv_begin(emitter, "small");
91147-	col_count_title.str_val = "small:";
91148-
91149-	GET_AND_EMIT_ALLOC_STAT(small, allocated, size)
91150-	GET_AND_EMIT_ALLOC_STAT(small, nmalloc, uint64)
91151-	col_count_nmalloc_ps.uint64_val =
91152-	    rate_per_second(col_count_nmalloc.uint64_val, uptime);
91153-	GET_AND_EMIT_ALLOC_STAT(small, ndalloc, uint64)
91154-	col_count_ndalloc_ps.uint64_val =
91155-	    rate_per_second(col_count_ndalloc.uint64_val, uptime);
91156-	GET_AND_EMIT_ALLOC_STAT(small, nrequests, uint64)
91157-	col_count_nrequests_ps.uint64_val =
91158-	    rate_per_second(col_count_nrequests.uint64_val, uptime);
91159-	GET_AND_EMIT_ALLOC_STAT(small, nfills, uint64)
91160-	col_count_nfills_ps.uint64_val =
91161-	    rate_per_second(col_count_nfills.uint64_val, uptime);
91162-	GET_AND_EMIT_ALLOC_STAT(small, nflushes, uint64)
91163-	col_count_nflushes_ps.uint64_val =
91164-	    rate_per_second(col_count_nflushes.uint64_val, uptime);
91165-
91166-	emitter_table_row(emitter, &alloc_count_row);
91167-	emitter_json_object_end(emitter); /* Close "small". */
91168-
91169-	emitter_json_object_kv_begin(emitter, "large");
91170-	col_count_title.str_val = "large:";
91171-
91172-	GET_AND_EMIT_ALLOC_STAT(large, allocated, size)
91173-	GET_AND_EMIT_ALLOC_STAT(large, nmalloc, uint64)
91174-	col_count_nmalloc_ps.uint64_val =
91175-	    rate_per_second(col_count_nmalloc.uint64_val, uptime);
91176-	GET_AND_EMIT_ALLOC_STAT(large, ndalloc, uint64)
91177-	col_count_ndalloc_ps.uint64_val =
91178-	    rate_per_second(col_count_ndalloc.uint64_val, uptime);
91179-	GET_AND_EMIT_ALLOC_STAT(large, nrequests, uint64)
91180-	col_count_nrequests_ps.uint64_val =
91181-	    rate_per_second(col_count_nrequests.uint64_val, uptime);
91182-	GET_AND_EMIT_ALLOC_STAT(large, nfills, uint64)
91183-	col_count_nfills_ps.uint64_val =
91184-	    rate_per_second(col_count_nfills.uint64_val, uptime);
91185-	GET_AND_EMIT_ALLOC_STAT(large, nflushes, uint64)
91186-	col_count_nflushes_ps.uint64_val =
91187-	    rate_per_second(col_count_nflushes.uint64_val, uptime);
91188-
91189-	emitter_table_row(emitter, &alloc_count_row);
91190-	emitter_json_object_end(emitter); /* Close "large". */
91191-
91192-#undef GET_AND_EMIT_ALLOC_STAT
91193-
91194-	/* Aggregated small + large stats are emitter only in table mode. */
91195-	col_count_title.str_val = "total:";
91196-	col_count_allocated.size_val = small_allocated + large_allocated;
91197-	col_count_nmalloc.uint64_val = small_nmalloc + large_nmalloc;
91198-	col_count_ndalloc.uint64_val = small_ndalloc + large_ndalloc;
91199-	col_count_nrequests.uint64_val = small_nrequests + large_nrequests;
91200-	col_count_nfills.uint64_val = small_nfills + large_nfills;
91201-	col_count_nflushes.uint64_val = small_nflushes + large_nflushes;
91202-	col_count_nmalloc_ps.uint64_val =
91203-	    rate_per_second(col_count_nmalloc.uint64_val, uptime);
91204-	col_count_ndalloc_ps.uint64_val =
91205-	    rate_per_second(col_count_ndalloc.uint64_val, uptime);
91206-	col_count_nrequests_ps.uint64_val =
91207-	    rate_per_second(col_count_nrequests.uint64_val, uptime);
91208-	col_count_nfills_ps.uint64_val =
91209-	    rate_per_second(col_count_nfills.uint64_val, uptime);
91210-	col_count_nflushes_ps.uint64_val =
91211-	    rate_per_second(col_count_nflushes.uint64_val, uptime);
91212-	emitter_table_row(emitter, &alloc_count_row);
91213-
91214-	emitter_row_t mem_count_row;
91215-	emitter_row_init(&mem_count_row);
91216-
91217-	emitter_col_t mem_count_title;
91218-	emitter_col_init(&mem_count_title, &mem_count_row);
91219-	mem_count_title.justify = emitter_justify_left;
91220-	mem_count_title.width = 21;
91221-	mem_count_title.type = emitter_type_title;
91222-	mem_count_title.str_val = "";
91223-
91224-	emitter_col_t mem_count_val;
91225-	emitter_col_init(&mem_count_val, &mem_count_row);
91226-	mem_count_val.justify = emitter_justify_right;
91227-	mem_count_val.width = 16;
91228-	mem_count_val.type = emitter_type_title;
91229-	mem_count_val.str_val = "";
91230-
91231-	emitter_table_row(emitter, &mem_count_row);
91232-	mem_count_val.type = emitter_type_size;
91233-
91234-	/* Active count in bytes is emitted only in table mode. */
91235-	mem_count_title.str_val = "active:";
91236-	mem_count_val.size_val = pactive * page;
91237-	emitter_table_row(emitter, &mem_count_row);
91238-
91239-#define GET_AND_EMIT_MEM_STAT(stat)					\
91240-	CTL_M2_GET("stats.arenas.0."#stat, i, &stat, size_t);		\
91241-	emitter_json_kv(emitter, #stat, emitter_type_size, &stat);	\
91242-	mem_count_title.str_val = #stat":";				\
91243-	mem_count_val.size_val = stat;					\
91244-	emitter_table_row(emitter, &mem_count_row);
91245-
91246-	GET_AND_EMIT_MEM_STAT(mapped)
91247-	GET_AND_EMIT_MEM_STAT(retained)
91248-	GET_AND_EMIT_MEM_STAT(base)
91249-	GET_AND_EMIT_MEM_STAT(internal)
91250-	GET_AND_EMIT_MEM_STAT(metadata_thp)
91251-	GET_AND_EMIT_MEM_STAT(tcache_bytes)
91252-	GET_AND_EMIT_MEM_STAT(tcache_stashed_bytes)
91253-	GET_AND_EMIT_MEM_STAT(resident)
91254-	GET_AND_EMIT_MEM_STAT(abandoned_vm)
91255-	GET_AND_EMIT_MEM_STAT(extent_avail)
91256-#undef GET_AND_EMIT_MEM_STAT
91257-
91258-	if (mutex) {
91259-		stats_arena_mutexes_print(emitter, i, uptime);
91260-	}
91261-	if (bins) {
91262-		stats_arena_bins_print(emitter, mutex, i, uptime);
91263-	}
91264-	if (large) {
91265-		stats_arena_lextents_print(emitter, i, uptime);
91266-	}
91267-	if (extents) {
91268-		stats_arena_extents_print(emitter, i);
91269-	}
91270-	if (hpa) {
91271-		stats_arena_hpa_shard_print(emitter, i, uptime);
91272-	}
91273-}
91274-
91275-JEMALLOC_COLD
91276-static void
91277-stats_general_print(emitter_t *emitter) {
91278-	const char *cpv;
91279-	bool bv, bv2;
91280-	unsigned uv;
91281-	uint32_t u32v;
91282-	uint64_t u64v;
91283-	int64_t i64v;
91284-	ssize_t ssv, ssv2;
91285-	size_t sv, bsz, usz, u32sz, u64sz, i64sz, ssz, sssz, cpsz;
91286-
91287-	bsz = sizeof(bool);
91288-	usz = sizeof(unsigned);
91289-	ssz = sizeof(size_t);
91290-	sssz = sizeof(ssize_t);
91291-	cpsz = sizeof(const char *);
91292-	u32sz = sizeof(uint32_t);
91293-	i64sz = sizeof(int64_t);
91294-	u64sz = sizeof(uint64_t);
91295-
91296-	CTL_GET("version", &cpv, const char *);
91297-	emitter_kv(emitter, "version", "Version", emitter_type_string, &cpv);
91298-
91299-	/* config. */
91300-	emitter_dict_begin(emitter, "config", "Build-time option settings");
91301-#define CONFIG_WRITE_BOOL(name)						\
91302-	do {								\
91303-		CTL_GET("config."#name, &bv, bool);			\
91304-		emitter_kv(emitter, #name, "config."#name,		\
91305-		    emitter_type_bool, &bv);				\
91306-	} while (0)
91307-
91308-	CONFIG_WRITE_BOOL(cache_oblivious);
91309-	CONFIG_WRITE_BOOL(debug);
91310-	CONFIG_WRITE_BOOL(fill);
91311-	CONFIG_WRITE_BOOL(lazy_lock);
91312-	emitter_kv(emitter, "malloc_conf", "config.malloc_conf",
91313-	    emitter_type_string, &config_malloc_conf);
91314-
91315-	CONFIG_WRITE_BOOL(opt_safety_checks);
91316-	CONFIG_WRITE_BOOL(prof);
91317-	CONFIG_WRITE_BOOL(prof_libgcc);
91318-	CONFIG_WRITE_BOOL(prof_libunwind);
91319-	CONFIG_WRITE_BOOL(stats);
91320-	CONFIG_WRITE_BOOL(utrace);
91321-	CONFIG_WRITE_BOOL(xmalloc);
91322-#undef CONFIG_WRITE_BOOL
91323-	emitter_dict_end(emitter); /* Close "config" dict. */
91324-
91325-	/* opt. */
91326-#define OPT_WRITE(name, var, size, emitter_type)			\
91327-	if (je_mallctl("opt."name, (void *)&var, &size, NULL, 0) ==	\
91328-	    0) {							\
91329-		emitter_kv(emitter, name, "opt."name, emitter_type,	\
91330-		    &var);						\
91331-	}
91332-
91333-#define OPT_WRITE_MUTABLE(name, var1, var2, size, emitter_type,		\
91334-    altname)								\
91335-	if (je_mallctl("opt."name, (void *)&var1, &size, NULL, 0) ==	\
91336-	    0 && je_mallctl(altname, (void *)&var2, &size, NULL, 0)	\
91337-	    == 0) {							\
91338-		emitter_kv_note(emitter, name, "opt."name,		\
91339-		    emitter_type, &var1, altname, emitter_type,		\
91340-		    &var2);						\
91341-	}
91342-
91343-#define OPT_WRITE_BOOL(name) OPT_WRITE(name, bv, bsz, emitter_type_bool)
91344-#define OPT_WRITE_BOOL_MUTABLE(name, altname)				\
91345-	OPT_WRITE_MUTABLE(name, bv, bv2, bsz, emitter_type_bool, altname)
91346-
91347-#define OPT_WRITE_UNSIGNED(name)					\
91348-	OPT_WRITE(name, uv, usz, emitter_type_unsigned)
91349-
91350-#define OPT_WRITE_INT64(name)						\
91351-	OPT_WRITE(name, i64v, i64sz, emitter_type_int64)
91352-#define OPT_WRITE_UINT64(name)						\
91353-	OPT_WRITE(name, u64v, u64sz, emitter_type_uint64)
91354-
91355-#define OPT_WRITE_SIZE_T(name)						\
91356-	OPT_WRITE(name, sv, ssz, emitter_type_size)
91357-#define OPT_WRITE_SSIZE_T(name)						\
91358-	OPT_WRITE(name, ssv, sssz, emitter_type_ssize)
91359-#define OPT_WRITE_SSIZE_T_MUTABLE(name, altname)			\
91360-	OPT_WRITE_MUTABLE(name, ssv, ssv2, sssz, emitter_type_ssize,	\
91361-	    altname)
91362-
91363-#define OPT_WRITE_CHAR_P(name)						\
91364-	OPT_WRITE(name, cpv, cpsz, emitter_type_string)
91365-
91366-	emitter_dict_begin(emitter, "opt", "Run-time option settings");
91367-
91368-	OPT_WRITE_BOOL("abort")
91369-	OPT_WRITE_BOOL("abort_conf")
91370-	OPT_WRITE_BOOL("cache_oblivious")
91371-	OPT_WRITE_BOOL("confirm_conf")
91372-	OPT_WRITE_BOOL("retain")
91373-	OPT_WRITE_CHAR_P("dss")
91374-	OPT_WRITE_UNSIGNED("narenas")
91375-	OPT_WRITE_CHAR_P("percpu_arena")
91376-	OPT_WRITE_SIZE_T("oversize_threshold")
91377-	OPT_WRITE_BOOL("hpa")
91378-	OPT_WRITE_SIZE_T("hpa_slab_max_alloc")
91379-	OPT_WRITE_SIZE_T("hpa_hugification_threshold")
91380-	OPT_WRITE_UINT64("hpa_hugify_delay_ms")
91381-	OPT_WRITE_UINT64("hpa_min_purge_interval_ms")
91382-	if (je_mallctl("opt.hpa_dirty_mult", (void *)&u32v, &u32sz, NULL, 0)
91383-	    == 0) {
91384-		/*
91385-		 * We cheat a little and "know" the secret meaning of this
91386-		 * representation.
91387-		 */
91388-		if (u32v == (uint32_t)-1) {
91389-			const char *neg1 = "-1";
91390-			emitter_kv(emitter, "hpa_dirty_mult",
91391-			    "opt.hpa_dirty_mult", emitter_type_string, &neg1);
91392-		} else {
91393-			char buf[FXP_BUF_SIZE];
91394-			fxp_print(u32v, buf);
91395-			const char *bufp = buf;
91396-			emitter_kv(emitter, "hpa_dirty_mult",
91397-			    "opt.hpa_dirty_mult", emitter_type_string, &bufp);
91398-		}
91399-	}
91400-	OPT_WRITE_SIZE_T("hpa_sec_nshards")
91401-	OPT_WRITE_SIZE_T("hpa_sec_max_alloc")
91402-	OPT_WRITE_SIZE_T("hpa_sec_max_bytes")
91403-	OPT_WRITE_SIZE_T("hpa_sec_bytes_after_flush")
91404-	OPT_WRITE_SIZE_T("hpa_sec_batch_fill_extra")
91405-	OPT_WRITE_CHAR_P("metadata_thp")
91406-	OPT_WRITE_INT64("mutex_max_spin")
91407-	OPT_WRITE_BOOL_MUTABLE("background_thread", "background_thread")
91408-	OPT_WRITE_SSIZE_T_MUTABLE("dirty_decay_ms", "arenas.dirty_decay_ms")
91409-	OPT_WRITE_SSIZE_T_MUTABLE("muzzy_decay_ms", "arenas.muzzy_decay_ms")
91410-	OPT_WRITE_SIZE_T("lg_extent_max_active_fit")
91411-	OPT_WRITE_CHAR_P("junk")
91412-	OPT_WRITE_BOOL("zero")
91413-	OPT_WRITE_BOOL("utrace")
91414-	OPT_WRITE_BOOL("xmalloc")
91415-	OPT_WRITE_BOOL("experimental_infallible_new")
91416-	OPT_WRITE_BOOL("tcache")
91417-	OPT_WRITE_SIZE_T("tcache_max")
91418-	OPT_WRITE_UNSIGNED("tcache_nslots_small_min")
91419-	OPT_WRITE_UNSIGNED("tcache_nslots_small_max")
91420-	OPT_WRITE_UNSIGNED("tcache_nslots_large")
91421-	OPT_WRITE_SSIZE_T("lg_tcache_nslots_mul")
91422-	OPT_WRITE_SIZE_T("tcache_gc_incr_bytes")
91423-	OPT_WRITE_SIZE_T("tcache_gc_delay_bytes")
91424-	OPT_WRITE_UNSIGNED("lg_tcache_flush_small_div")
91425-	OPT_WRITE_UNSIGNED("lg_tcache_flush_large_div")
91426-	OPT_WRITE_CHAR_P("thp")
91427-	OPT_WRITE_BOOL("prof")
91428-	OPT_WRITE_CHAR_P("prof_prefix")
91429-	OPT_WRITE_BOOL_MUTABLE("prof_active", "prof.active")
91430-	OPT_WRITE_BOOL_MUTABLE("prof_thread_active_init",
91431-	    "prof.thread_active_init")
91432-	OPT_WRITE_SSIZE_T_MUTABLE("lg_prof_sample", "prof.lg_sample")
91433-	OPT_WRITE_BOOL("prof_accum")
91434-	OPT_WRITE_SSIZE_T("lg_prof_interval")
91435-	OPT_WRITE_BOOL("prof_gdump")
91436-	OPT_WRITE_BOOL("prof_final")
91437-	OPT_WRITE_BOOL("prof_leak")
91438-	OPT_WRITE_BOOL("prof_leak_error")
91439-	OPT_WRITE_BOOL("stats_print")
91440-	OPT_WRITE_CHAR_P("stats_print_opts")
91441-	OPT_WRITE_BOOL("stats_print")
91442-	OPT_WRITE_CHAR_P("stats_print_opts")
91443-	OPT_WRITE_INT64("stats_interval")
91444-	OPT_WRITE_CHAR_P("stats_interval_opts")
91445-	OPT_WRITE_CHAR_P("zero_realloc")
91446-
91447-	emitter_dict_end(emitter);
91448-
91449-#undef OPT_WRITE
91450-#undef OPT_WRITE_MUTABLE
91451-#undef OPT_WRITE_BOOL
91452-#undef OPT_WRITE_BOOL_MUTABLE
91453-#undef OPT_WRITE_UNSIGNED
91454-#undef OPT_WRITE_SSIZE_T
91455-#undef OPT_WRITE_SSIZE_T_MUTABLE
91456-#undef OPT_WRITE_CHAR_P
91457-
91458-	/* prof. */
91459-	if (config_prof) {
91460-		emitter_dict_begin(emitter, "prof", "Profiling settings");
91461-
91462-		CTL_GET("prof.thread_active_init", &bv, bool);
91463-		emitter_kv(emitter, "thread_active_init",
91464-		    "prof.thread_active_init", emitter_type_bool, &bv);
91465-
91466-		CTL_GET("prof.active", &bv, bool);
91467-		emitter_kv(emitter, "active", "prof.active", emitter_type_bool,
91468-		    &bv);
91469-
91470-		CTL_GET("prof.gdump", &bv, bool);
91471-		emitter_kv(emitter, "gdump", "prof.gdump", emitter_type_bool,
91472-		    &bv);
91473-
91474-		CTL_GET("prof.interval", &u64v, uint64_t);
91475-		emitter_kv(emitter, "interval", "prof.interval",
91476-		    emitter_type_uint64, &u64v);
91477-
91478-		CTL_GET("prof.lg_sample", &ssv, ssize_t);
91479-		emitter_kv(emitter, "lg_sample", "prof.lg_sample",
91480-		    emitter_type_ssize, &ssv);
91481-
91482-		emitter_dict_end(emitter); /* Close "prof". */
91483-	}
91484-
91485-	/* arenas. */
91486-	/*
91487-	 * The json output sticks arena info into an "arenas" dict; the table
91488-	 * output puts them at the top-level.
91489-	 */
91490-	emitter_json_object_kv_begin(emitter, "arenas");
91491-
91492-	CTL_GET("arenas.narenas", &uv, unsigned);
91493-	emitter_kv(emitter, "narenas", "Arenas", emitter_type_unsigned, &uv);
91494-
91495-	/*
91496-	 * Decay settings are emitted only in json mode; in table mode, they're
91497-	 * emitted as notes with the opt output, above.
91498-	 */
91499-	CTL_GET("arenas.dirty_decay_ms", &ssv, ssize_t);
91500-	emitter_json_kv(emitter, "dirty_decay_ms", emitter_type_ssize, &ssv);
91501-
91502-	CTL_GET("arenas.muzzy_decay_ms", &ssv, ssize_t);
91503-	emitter_json_kv(emitter, "muzzy_decay_ms", emitter_type_ssize, &ssv);
91504-
91505-	CTL_GET("arenas.quantum", &sv, size_t);
91506-	emitter_kv(emitter, "quantum", "Quantum size", emitter_type_size, &sv);
91507-
91508-	CTL_GET("arenas.page", &sv, size_t);
91509-	emitter_kv(emitter, "page", "Page size", emitter_type_size, &sv);
91510-
91511-	if (je_mallctl("arenas.tcache_max", (void *)&sv, &ssz, NULL, 0) == 0) {
91512-		emitter_kv(emitter, "tcache_max",
91513-		    "Maximum thread-cached size class", emitter_type_size, &sv);
91514-	}
91515-
91516-	unsigned arenas_nbins;
91517-	CTL_GET("arenas.nbins", &arenas_nbins, unsigned);
91518-	emitter_kv(emitter, "nbins", "Number of bin size classes",
91519-	    emitter_type_unsigned, &arenas_nbins);
91520-
91521-	unsigned arenas_nhbins;
91522-	CTL_GET("arenas.nhbins", &arenas_nhbins, unsigned);
91523-	emitter_kv(emitter, "nhbins", "Number of thread-cache bin size classes",
91524-	    emitter_type_unsigned, &arenas_nhbins);
91525-
91526-	/*
91527-	 * We do enough mallctls in a loop that we actually want to omit them
91528-	 * (not just omit the printing).
91529-	 */
91530-	if (emitter_outputs_json(emitter)) {
91531-		emitter_json_array_kv_begin(emitter, "bin");
91532-		size_t arenas_bin_mib[CTL_MAX_DEPTH];
91533-		CTL_LEAF_PREPARE(arenas_bin_mib, 0, "arenas.bin");
91534-		for (unsigned i = 0; i < arenas_nbins; i++) {
91535-			arenas_bin_mib[2] = i;
91536-			emitter_json_object_begin(emitter);
91537-
91538-			CTL_LEAF(arenas_bin_mib, 3, "size", &sv, size_t);
91539-			emitter_json_kv(emitter, "size", emitter_type_size,
91540-			    &sv);
91541-
91542-			CTL_LEAF(arenas_bin_mib, 3, "nregs", &u32v, uint32_t);
91543-			emitter_json_kv(emitter, "nregs", emitter_type_uint32,
91544-			    &u32v);
91545-
91546-			CTL_LEAF(arenas_bin_mib, 3, "slab_size", &sv, size_t);
91547-			emitter_json_kv(emitter, "slab_size", emitter_type_size,
91548-			    &sv);
91549-
91550-			CTL_LEAF(arenas_bin_mib, 3, "nshards", &u32v, uint32_t);
91551-			emitter_json_kv(emitter, "nshards", emitter_type_uint32,
91552-			    &u32v);
91553-
91554-			emitter_json_object_end(emitter);
91555-		}
91556-		emitter_json_array_end(emitter); /* Close "bin". */
91557-	}
91558-
91559-	unsigned nlextents;
91560-	CTL_GET("arenas.nlextents", &nlextents, unsigned);
91561-	emitter_kv(emitter, "nlextents", "Number of large size classes",
91562-	    emitter_type_unsigned, &nlextents);
91563-
91564-	if (emitter_outputs_json(emitter)) {
91565-		emitter_json_array_kv_begin(emitter, "lextent");
91566-		size_t arenas_lextent_mib[CTL_MAX_DEPTH];
91567-		CTL_LEAF_PREPARE(arenas_lextent_mib, 0, "arenas.lextent");
91568-		for (unsigned i = 0; i < nlextents; i++) {
91569-			arenas_lextent_mib[2] = i;
91570-			emitter_json_object_begin(emitter);
91571-
91572-			CTL_LEAF(arenas_lextent_mib, 3, "size", &sv, size_t);
91573-			emitter_json_kv(emitter, "size", emitter_type_size,
91574-			    &sv);
91575-
91576-			emitter_json_object_end(emitter);
91577-		}
91578-		emitter_json_array_end(emitter); /* Close "lextent". */
91579-	}
91580-
91581-	emitter_json_object_end(emitter); /* Close "arenas" */
91582-}
91583-
91584-JEMALLOC_COLD
91585-static void
91586-stats_print_helper(emitter_t *emitter, bool merged, bool destroyed,
91587-    bool unmerged, bool bins, bool large, bool mutex, bool extents, bool hpa) {
91588-	/*
91589-	 * These should be deleted.  We keep them around for a while, to aid in
91590-	 * the transition to the emitter code.
91591-	 */
91592-	size_t allocated, active, metadata, metadata_thp, resident, mapped,
91593-	    retained;
91594-	size_t num_background_threads;
91595-	size_t zero_reallocs;
91596-	uint64_t background_thread_num_runs, background_thread_run_interval;
91597-
91598-	CTL_GET("stats.allocated", &allocated, size_t);
91599-	CTL_GET("stats.active", &active, size_t);
91600-	CTL_GET("stats.metadata", &metadata, size_t);
91601-	CTL_GET("stats.metadata_thp", &metadata_thp, size_t);
91602-	CTL_GET("stats.resident", &resident, size_t);
91603-	CTL_GET("stats.mapped", &mapped, size_t);
91604-	CTL_GET("stats.retained", &retained, size_t);
91605-
91606-	CTL_GET("stats.zero_reallocs", &zero_reallocs, size_t);
91607-
91608-	if (have_background_thread) {
91609-		CTL_GET("stats.background_thread.num_threads",
91610-		    &num_background_threads, size_t);
91611-		CTL_GET("stats.background_thread.num_runs",
91612-		    &background_thread_num_runs, uint64_t);
91613-		CTL_GET("stats.background_thread.run_interval",
91614-		    &background_thread_run_interval, uint64_t);
91615-	} else {
91616-		num_background_threads = 0;
91617-		background_thread_num_runs = 0;
91618-		background_thread_run_interval = 0;
91619-	}
91620-
91621-	/* Generic global stats. */
91622-	emitter_json_object_kv_begin(emitter, "stats");
91623-	emitter_json_kv(emitter, "allocated", emitter_type_size, &allocated);
91624-	emitter_json_kv(emitter, "active", emitter_type_size, &active);
91625-	emitter_json_kv(emitter, "metadata", emitter_type_size, &metadata);
91626-	emitter_json_kv(emitter, "metadata_thp", emitter_type_size,
91627-	    &metadata_thp);
91628-	emitter_json_kv(emitter, "resident", emitter_type_size, &resident);
91629-	emitter_json_kv(emitter, "mapped", emitter_type_size, &mapped);
91630-	emitter_json_kv(emitter, "retained", emitter_type_size, &retained);
91631-	emitter_json_kv(emitter, "zero_reallocs", emitter_type_size,
91632-	    &zero_reallocs);
91633-
91634-	emitter_table_printf(emitter, "Allocated: %zu, active: %zu, "
91635-	    "metadata: %zu (n_thp %zu), resident: %zu, mapped: %zu, "
91636-	    "retained: %zu\n", allocated, active, metadata, metadata_thp,
91637-	    resident, mapped, retained);
91638-
91639-	/* Strange behaviors */
91640-	emitter_table_printf(emitter,
91641-	    "Count of realloc(non-null-ptr, 0) calls: %zu\n", zero_reallocs);
91642-
91643-	/* Background thread stats. */
91644-	emitter_json_object_kv_begin(emitter, "background_thread");
91645-	emitter_json_kv(emitter, "num_threads", emitter_type_size,
91646-	    &num_background_threads);
91647-	emitter_json_kv(emitter, "num_runs", emitter_type_uint64,
91648-	    &background_thread_num_runs);
91649-	emitter_json_kv(emitter, "run_interval", emitter_type_uint64,
91650-	    &background_thread_run_interval);
91651-	emitter_json_object_end(emitter); /* Close "background_thread". */
91652-
91653-	emitter_table_printf(emitter, "Background threads: %zu, "
91654-	    "num_runs: %"FMTu64", run_interval: %"FMTu64" ns\n",
91655-	    num_background_threads, background_thread_num_runs,
91656-	    background_thread_run_interval);
91657-
91658-	if (mutex) {
91659-		emitter_row_t row;
91660-		emitter_col_t name;
91661-		emitter_col_t col64[mutex_prof_num_uint64_t_counters];
91662-		emitter_col_t col32[mutex_prof_num_uint32_t_counters];
91663-		uint64_t uptime;
91664-
91665-		emitter_row_init(&row);
91666-		mutex_stats_init_cols(&row, "", &name, col64, col32);
91667-
91668-		emitter_table_row(emitter, &row);
91669-		emitter_json_object_kv_begin(emitter, "mutexes");
91670-
91671-		CTL_M2_GET("stats.arenas.0.uptime", 0, &uptime, uint64_t);
91672-
91673-		size_t stats_mutexes_mib[CTL_MAX_DEPTH];
91674-		CTL_LEAF_PREPARE(stats_mutexes_mib, 0, "stats.mutexes");
91675-		for (int i = 0; i < mutex_prof_num_global_mutexes; i++) {
91676-			mutex_stats_read_global(stats_mutexes_mib, 2,
91677-			    global_mutex_names[i], &name, col64, col32, uptime);
91678-			emitter_json_object_kv_begin(emitter, global_mutex_names[i]);
91679-			mutex_stats_emit(emitter, &row, col64, col32);
91680-			emitter_json_object_end(emitter);
91681-		}
91682-
91683-		emitter_json_object_end(emitter); /* Close "mutexes". */
91684-	}
91685-
91686-	emitter_json_object_end(emitter); /* Close "stats". */
91687-
91688-	if (merged || destroyed || unmerged) {
91689-		unsigned narenas;
91690-
91691-		emitter_json_object_kv_begin(emitter, "stats.arenas");
91692-
91693-		CTL_GET("arenas.narenas", &narenas, unsigned);
91694-		size_t mib[3];
91695-		size_t miblen = sizeof(mib) / sizeof(size_t);
91696-		size_t sz;
91697-		VARIABLE_ARRAY(bool, initialized, narenas);
91698-		bool destroyed_initialized;
91699-		unsigned i, j, ninitialized;
91700-
91701-		xmallctlnametomib("arena.0.initialized", mib, &miblen);
91702-		for (i = ninitialized = 0; i < narenas; i++) {
91703-			mib[1] = i;
91704-			sz = sizeof(bool);
91705-			xmallctlbymib(mib, miblen, &initialized[i], &sz,
91706-			    NULL, 0);
91707-			if (initialized[i]) {
91708-				ninitialized++;
91709-			}
91710-		}
91711-		mib[1] = MALLCTL_ARENAS_DESTROYED;
91712-		sz = sizeof(bool);
91713-		xmallctlbymib(mib, miblen, &destroyed_initialized, &sz,
91714-		    NULL, 0);
91715-
91716-		/* Merged stats. */
91717-		if (merged && (ninitialized > 1 || !unmerged)) {
91718-			/* Print merged arena stats. */
91719-			emitter_table_printf(emitter, "Merged arenas stats:\n");
91720-			emitter_json_object_kv_begin(emitter, "merged");
91721-			stats_arena_print(emitter, MALLCTL_ARENAS_ALL, bins,
91722-			    large, mutex, extents, hpa);
91723-			emitter_json_object_end(emitter); /* Close "merged". */
91724-		}
91725-
91726-		/* Destroyed stats. */
91727-		if (destroyed_initialized && destroyed) {
91728-			/* Print destroyed arena stats. */
91729-			emitter_table_printf(emitter,
91730-			    "Destroyed arenas stats:\n");
91731-			emitter_json_object_kv_begin(emitter, "destroyed");
91732-			stats_arena_print(emitter, MALLCTL_ARENAS_DESTROYED,
91733-			    bins, large, mutex, extents, hpa);
91734-			emitter_json_object_end(emitter); /* Close "destroyed". */
91735-		}
91736-
91737-		/* Unmerged stats. */
91738-		if (unmerged) {
91739-			for (i = j = 0; i < narenas; i++) {
91740-				if (initialized[i]) {
91741-					char arena_ind_str[20];
91742-					malloc_snprintf(arena_ind_str,
91743-					    sizeof(arena_ind_str), "%u", i);
91744-					emitter_json_object_kv_begin(emitter,
91745-					    arena_ind_str);
91746-					emitter_table_printf(emitter,
91747-					    "arenas[%s]:\n", arena_ind_str);
91748-					stats_arena_print(emitter, i, bins,
91749-					    large, mutex, extents, hpa);
91750-					/* Close "<arena-ind>". */
91751-					emitter_json_object_end(emitter);
91752-				}
91753-			}
91754-		}
91755-		emitter_json_object_end(emitter); /* Close "stats.arenas". */
91756-	}
91757-}
91758-
91759-void
91760-stats_print(write_cb_t *write_cb, void *cbopaque, const char *opts) {
91761-	int err;
91762-	uint64_t epoch;
91763-	size_t u64sz;
91764-#define OPTION(o, v, d, s) bool v = d;
91765-	STATS_PRINT_OPTIONS
91766-#undef OPTION
91767-
91768-	/*
91769-	 * Refresh stats, in case mallctl() was called by the application.
91770-	 *
91771-	 * Check for OOM here, since refreshing the ctl cache can trigger
91772-	 * allocation.  In practice, none of the subsequent mallctl()-related
91773-	 * calls in this function will cause OOM if this one succeeds.
91774-	 * */
91775-	epoch = 1;
91776-	u64sz = sizeof(uint64_t);
91777-	err = je_mallctl("epoch", (void *)&epoch, &u64sz, (void *)&epoch,
91778-	    sizeof(uint64_t));
91779-	if (err != 0) {
91780-		if (err == EAGAIN) {
91781-			malloc_write("<jemalloc>: Memory allocation failure in "
91782-			    "mallctl(\"epoch\", ...)\n");
91783-			return;
91784-		}
91785-		malloc_write("<jemalloc>: Failure in mallctl(\"epoch\", "
91786-		    "...)\n");
91787-		abort();
91788-	}
91789-
91790-	if (opts != NULL) {
91791-		for (unsigned i = 0; opts[i] != '\0'; i++) {
91792-			switch (opts[i]) {
91793-#define OPTION(o, v, d, s) case o: v = s; break;
91794-				STATS_PRINT_OPTIONS
91795-#undef OPTION
91796-			default:;
91797-			}
91798-		}
91799-	}
91800-
91801-	emitter_t emitter;
91802-	emitter_init(&emitter,
91803-	    json ? emitter_output_json_compact : emitter_output_table,
91804-	    write_cb, cbopaque);
91805-	emitter_begin(&emitter);
91806-	emitter_table_printf(&emitter, "___ Begin jemalloc statistics ___\n");
91807-	emitter_json_object_kv_begin(&emitter, "jemalloc");
91808-
91809-	if (general) {
91810-		stats_general_print(&emitter);
91811-	}
91812-	if (config_stats) {
91813-		stats_print_helper(&emitter, merged, destroyed, unmerged,
91814-		    bins, large, mutex, extents, hpa);
91815-	}
91816-
91817-	emitter_json_object_end(&emitter); /* Closes the "jemalloc" dict. */
91818-	emitter_table_printf(&emitter, "--- End jemalloc statistics ---\n");
91819-	emitter_end(&emitter);
91820-}
91821-
91822-uint64_t
91823-stats_interval_new_event_wait(tsd_t *tsd) {
91824-	return stats_interval_accum_batch;
91825-}
91826-
91827-uint64_t
91828-stats_interval_postponed_event_wait(tsd_t *tsd) {
91829-	return TE_MIN_START_WAIT;
91830-}
91831-
91832-void
91833-stats_interval_event_handler(tsd_t *tsd, uint64_t elapsed) {
91834-	assert(elapsed > 0 && elapsed != TE_INVALID_ELAPSED);
91835-	if (counter_accum(tsd_tsdn(tsd), &stats_interval_accumulated,
91836-	    elapsed)) {
91837-		je_malloc_stats_print(NULL, NULL, opt_stats_interval_opts);
91838-	}
91839-}
91840-
91841-bool
91842-stats_boot(void) {
91843-	uint64_t stats_interval;
91844-	if (opt_stats_interval < 0) {
91845-		assert(opt_stats_interval == -1);
91846-		stats_interval = 0;
91847-		stats_interval_accum_batch = 0;
91848-	} else{
91849-		/* See comments in stats.h */
91850-		stats_interval = (opt_stats_interval > 0) ?
91851-		    opt_stats_interval : 1;
91852-		uint64_t batch = stats_interval >>
91853-		    STATS_INTERVAL_ACCUM_LG_BATCH_SIZE;
91854-		if (batch > STATS_INTERVAL_ACCUM_BATCH_MAX) {
91855-			batch = STATS_INTERVAL_ACCUM_BATCH_MAX;
91856-		} else if (batch == 0) {
91857-			batch = 1;
91858-		}
91859-		stats_interval_accum_batch = batch;
91860-	}
91861-
91862-	return counter_accum_init(&stats_interval_accumulated, stats_interval);
91863-}
91864-
91865-void
91866-stats_prefork(tsdn_t *tsdn) {
91867-	counter_prefork(tsdn, &stats_interval_accumulated);
91868-}
91869-
91870-void
91871-stats_postfork_parent(tsdn_t *tsdn) {
91872-	counter_postfork_parent(tsdn, &stats_interval_accumulated);
91873-}
91874-
91875-void
91876-stats_postfork_child(tsdn_t *tsdn) {
91877-	counter_postfork_child(tsdn, &stats_interval_accumulated);
91878-}
91879diff --git a/jemalloc/src/sz.c b/jemalloc/src/sz.c
91880deleted file mode 100644
91881index d3115dd..0000000
91882--- a/jemalloc/src/sz.c
91883+++ /dev/null
91884@@ -1,114 +0,0 @@
91885-#include "jemalloc/internal/jemalloc_preamble.h"
91886-#include "jemalloc/internal/jemalloc_internal_includes.h"
91887-#include "jemalloc/internal/sz.h"
91888-
91889-JEMALLOC_ALIGNED(CACHELINE)
91890-size_t sz_pind2sz_tab[SC_NPSIZES+1];
91891-size_t sz_large_pad;
91892-
91893-size_t
91894-sz_psz_quantize_floor(size_t size) {
91895-	size_t ret;
91896-	pszind_t pind;
91897-
91898-	assert(size > 0);
91899-	assert((size & PAGE_MASK) == 0);
91900-
91901-	pind = sz_psz2ind(size - sz_large_pad + 1);
91902-	if (pind == 0) {
91903-		/*
91904-		 * Avoid underflow.  This short-circuit would also do the right
91905-		 * thing for all sizes in the range for which there are
91906-		 * PAGE-spaced size classes, but it's simplest to just handle
91907-		 * the one case that would cause erroneous results.
91908-		 */
91909-		return size;
91910-	}
91911-	ret = sz_pind2sz(pind - 1) + sz_large_pad;
91912-	assert(ret <= size);
91913-	return ret;
91914-}
91915-
91916-size_t
91917-sz_psz_quantize_ceil(size_t size) {
91918-	size_t ret;
91919-
91920-	assert(size > 0);
91921-	assert(size - sz_large_pad <= SC_LARGE_MAXCLASS);
91922-	assert((size & PAGE_MASK) == 0);
91923-
91924-	ret = sz_psz_quantize_floor(size);
91925-	if (ret < size) {
91926-		/*
91927-		 * Skip a quantization that may have an adequately large extent,
91928-		 * because under-sized extents may be mixed in.  This only
91929-		 * happens when an unusual size is requested, i.e. for aligned
91930-		 * allocation, and is just one of several places where linear
91931-		 * search would potentially find sufficiently aligned available
91932-		 * memory somewhere lower.
91933-		 */
91934-		ret = sz_pind2sz(sz_psz2ind(ret - sz_large_pad + 1)) +
91935-		    sz_large_pad;
91936-	}
91937-	return ret;
91938-}
91939-
91940-static void
91941-sz_boot_pind2sz_tab(const sc_data_t *sc_data) {
91942-	int pind = 0;
91943-	for (unsigned i = 0; i < SC_NSIZES; i++) {
91944-		const sc_t *sc = &sc_data->sc[i];
91945-		if (sc->psz) {
91946-			sz_pind2sz_tab[pind] = (ZU(1) << sc->lg_base)
91947-			    + (ZU(sc->ndelta) << sc->lg_delta);
91948-			pind++;
91949-		}
91950-	}
91951-	for (int i = pind; i <= (int)SC_NPSIZES; i++) {
91952-		sz_pind2sz_tab[pind] = sc_data->large_maxclass + PAGE;
91953-	}
91954-}
91955-
91956-JEMALLOC_ALIGNED(CACHELINE)
91957-size_t sz_index2size_tab[SC_NSIZES];
91958-
91959-static void
91960-sz_boot_index2size_tab(const sc_data_t *sc_data) {
91961-	for (unsigned i = 0; i < SC_NSIZES; i++) {
91962-		const sc_t *sc = &sc_data->sc[i];
91963-		sz_index2size_tab[i] = (ZU(1) << sc->lg_base)
91964-		    + (ZU(sc->ndelta) << (sc->lg_delta));
91965-	}
91966-}
91967-
91968-/*
91969- * To keep this table small, we divide sizes by the tiny min size, which gives
91970- * the smallest interval for which the result can change.
91971- */
91972-JEMALLOC_ALIGNED(CACHELINE)
91973-uint8_t sz_size2index_tab[(SC_LOOKUP_MAXCLASS >> SC_LG_TINY_MIN) + 1];
91974-
91975-static void
91976-sz_boot_size2index_tab(const sc_data_t *sc_data) {
91977-	size_t dst_max = (SC_LOOKUP_MAXCLASS >> SC_LG_TINY_MIN) + 1;
91978-	size_t dst_ind = 0;
91979-	for (unsigned sc_ind = 0; sc_ind < SC_NSIZES && dst_ind < dst_max;
91980-	    sc_ind++) {
91981-		const sc_t *sc = &sc_data->sc[sc_ind];
91982-		size_t sz = (ZU(1) << sc->lg_base)
91983-		    + (ZU(sc->ndelta) << sc->lg_delta);
91984-		size_t max_ind = ((sz + (ZU(1) << SC_LG_TINY_MIN) - 1)
91985-				   >> SC_LG_TINY_MIN);
91986-		for (; dst_ind <= max_ind && dst_ind < dst_max; dst_ind++) {
91987-			sz_size2index_tab[dst_ind] = sc_ind;
91988-		}
91989-	}
91990-}
91991-
91992-void
91993-sz_boot(const sc_data_t *sc_data, bool cache_oblivious) {
91994-	sz_large_pad = cache_oblivious ? PAGE : 0;
91995-	sz_boot_pind2sz_tab(sc_data);
91996-	sz_boot_index2size_tab(sc_data);
91997-	sz_boot_size2index_tab(sc_data);
91998-}
91999diff --git a/jemalloc/src/tcache.c b/jemalloc/src/tcache.c
92000deleted file mode 100644
92001index fa16732..0000000
92002--- a/jemalloc/src/tcache.c
92003+++ /dev/null
92004@@ -1,1101 +0,0 @@
92005-#include "jemalloc/internal/jemalloc_preamble.h"
92006-#include "jemalloc/internal/jemalloc_internal_includes.h"
92007-
92008-#include "jemalloc/internal/assert.h"
92009-#include "jemalloc/internal/mutex.h"
92010-#include "jemalloc/internal/safety_check.h"
92011-#include "jemalloc/internal/san.h"
92012-#include "jemalloc/internal/sc.h"
92013-
92014-/******************************************************************************/
92015-/* Data. */
92016-
92017-bool opt_tcache = true;
92018-
92019-/* tcache_maxclass is set to 32KB by default.  */
92020-size_t opt_tcache_max = ((size_t)1) << 15;
92021-
92022-/* Reasonable defaults for min and max values. */
92023-unsigned opt_tcache_nslots_small_min = 20;
92024-unsigned opt_tcache_nslots_small_max = 200;
92025-unsigned opt_tcache_nslots_large = 20;
92026-
92027-/*
92028- * We attempt to make the number of slots in a tcache bin for a given size class
92029- * equal to the number of objects in a slab times some multiplier.  By default,
92030- * the multiplier is 2 (i.e. we set the maximum number of objects in the tcache
92031- * to twice the number of objects in a slab).
92032- * This is bounded by some other constraints as well, like the fact that it
92033- * must be even, must be less than opt_tcache_nslots_small_max, etc..
92034- */
92035-ssize_t	opt_lg_tcache_nslots_mul = 1;
92036-
92037-/*
92038- * Number of allocation bytes between tcache incremental GCs.  Again, this
92039- * default just seems to work well; more tuning is possible.
92040- */
92041-size_t opt_tcache_gc_incr_bytes = 65536;
92042-
92043-/*
92044- * With default settings, we may end up flushing small bins frequently with
92045- * small flush amounts.  To limit this tendency, we can set a number of bytes to
92046- * "delay" by.  If we try to flush N M-byte items, we decrease that size-class's
92047- * delay by N * M.  So, if delay is 1024 and we're looking at the 64-byte size
92048- * class, we won't do any flushing until we've been asked to flush 1024/64 == 16
92049- * items.  This can happen in any configuration (i.e. being asked to flush 16
92050- * items once, or 4 items 4 times).
92051- *
92052- * Practically, this is stored as a count of items in a uint8_t, so the
92053- * effective maximum value for a size class is 255 * sz.
92054- */
92055-size_t opt_tcache_gc_delay_bytes = 0;
92056-
92057-/*
92058- * When a cache bin is flushed because it's full, how much of it do we flush?
92059- * By default, we flush half the maximum number of items.
92060- */
92061-unsigned opt_lg_tcache_flush_small_div = 1;
92062-unsigned opt_lg_tcache_flush_large_div = 1;
92063-
92064-cache_bin_info_t	*tcache_bin_info;
92065-
92066-/* Total stack size required (per tcache).  Include the padding above. */
92067-static size_t tcache_bin_alloc_size;
92068-static size_t tcache_bin_alloc_alignment;
92069-
92070-/* Number of cache bins enabled, including both large and small. */
92071-unsigned		nhbins;
92072-/* Max size class to be cached (can be small or large). */
92073-size_t			tcache_maxclass;
92074-
92075-tcaches_t		*tcaches;
92076-
92077-/* Index of first element within tcaches that has never been used. */
92078-static unsigned		tcaches_past;
92079-
92080-/* Head of singly linked list tracking available tcaches elements. */
92081-static tcaches_t	*tcaches_avail;
92082-
92083-/* Protects tcaches{,_past,_avail}. */
92084-static malloc_mutex_t	tcaches_mtx;
92085-
92086-/******************************************************************************/
92087-
92088-size_t
92089-tcache_salloc(tsdn_t *tsdn, const void *ptr) {
92090-	return arena_salloc(tsdn, ptr);
92091-}
92092-
92093-uint64_t
92094-tcache_gc_new_event_wait(tsd_t *tsd) {
92095-	return opt_tcache_gc_incr_bytes;
92096-}
92097-
92098-uint64_t
92099-tcache_gc_postponed_event_wait(tsd_t *tsd) {
92100-	return TE_MIN_START_WAIT;
92101-}
92102-
92103-uint64_t
92104-tcache_gc_dalloc_new_event_wait(tsd_t *tsd) {
92105-	return opt_tcache_gc_incr_bytes;
92106-}
92107-
92108-uint64_t
92109-tcache_gc_dalloc_postponed_event_wait(tsd_t *tsd) {
92110-	return TE_MIN_START_WAIT;
92111-}
92112-
92113-static uint8_t
92114-tcache_gc_item_delay_compute(szind_t szind) {
92115-	assert(szind < SC_NBINS);
92116-	size_t sz = sz_index2size(szind);
92117-	size_t item_delay = opt_tcache_gc_delay_bytes / sz;
92118-	size_t delay_max = ZU(1)
92119-	    << (sizeof(((tcache_slow_t *)NULL)->bin_flush_delay_items[0]) * 8);
92120-	if (item_delay >= delay_max) {
92121-		item_delay = delay_max - 1;
92122-	}
92123-	return (uint8_t)item_delay;
92124-}
92125-
92126-static void
92127-tcache_gc_small(tsd_t *tsd, tcache_slow_t *tcache_slow, tcache_t *tcache,
92128-    szind_t szind) {
92129-	/* Aim to flush 3/4 of items below low-water. */
92130-	assert(szind < SC_NBINS);
92131-
92132-	cache_bin_t *cache_bin = &tcache->bins[szind];
92133-	cache_bin_sz_t ncached = cache_bin_ncached_get_local(cache_bin,
92134-	    &tcache_bin_info[szind]);
92135-	cache_bin_sz_t low_water = cache_bin_low_water_get(cache_bin,
92136-	    &tcache_bin_info[szind]);
92137-	assert(!tcache_slow->bin_refilled[szind]);
92138-
92139-	size_t nflush = low_water - (low_water >> 2);
92140-	if (nflush < tcache_slow->bin_flush_delay_items[szind]) {
92141-		/* Workaround for a conversion warning. */
92142-		uint8_t nflush_uint8 = (uint8_t)nflush;
92143-		assert(sizeof(tcache_slow->bin_flush_delay_items[0]) ==
92144-		    sizeof(nflush_uint8));
92145-		tcache_slow->bin_flush_delay_items[szind] -= nflush_uint8;
92146-		return;
92147-	} else {
92148-		tcache_slow->bin_flush_delay_items[szind]
92149-		    = tcache_gc_item_delay_compute(szind);
92150-	}
92151-
92152-	tcache_bin_flush_small(tsd, tcache, cache_bin, szind,
92153-	    (unsigned)(ncached - nflush));
92154-
92155-	/*
92156-	 * Reduce fill count by 2X.  Limit lg_fill_div such that
92157-	 * the fill count is always at least 1.
92158-	 */
92159-	if ((cache_bin_info_ncached_max(&tcache_bin_info[szind])
92160-	    >> (tcache_slow->lg_fill_div[szind] + 1)) >= 1) {
92161-		tcache_slow->lg_fill_div[szind]++;
92162-	}
92163-}
92164-
92165-static void
92166-tcache_gc_large(tsd_t *tsd, tcache_slow_t *tcache_slow, tcache_t *tcache,
92167-    szind_t szind) {
92168-	/* Like the small GC; flush 3/4 of untouched items. */
92169-	assert(szind >= SC_NBINS);
92170-	cache_bin_t *cache_bin = &tcache->bins[szind];
92171-	cache_bin_sz_t ncached = cache_bin_ncached_get_local(cache_bin,
92172-	    &tcache_bin_info[szind]);
92173-	cache_bin_sz_t low_water = cache_bin_low_water_get(cache_bin,
92174-	    &tcache_bin_info[szind]);
92175-	tcache_bin_flush_large(tsd, tcache, cache_bin, szind,
92176-	    (unsigned)(ncached - low_water + (low_water >> 2)));
92177-}
92178-
92179-static void
92180-tcache_event(tsd_t *tsd) {
92181-	tcache_t *tcache = tcache_get(tsd);
92182-	if (tcache == NULL) {
92183-		return;
92184-	}
92185-
92186-	tcache_slow_t *tcache_slow = tsd_tcache_slowp_get(tsd);
92187-	szind_t szind = tcache_slow->next_gc_bin;
92188-	bool is_small = (szind < SC_NBINS);
92189-	cache_bin_t *cache_bin = &tcache->bins[szind];
92190-
92191-	tcache_bin_flush_stashed(tsd, tcache, cache_bin, szind, is_small);
92192-
92193-	cache_bin_sz_t low_water = cache_bin_low_water_get(cache_bin,
92194-	    &tcache_bin_info[szind]);
92195-	if (low_water > 0) {
92196-		if (is_small) {
92197-			tcache_gc_small(tsd, tcache_slow, tcache, szind);
92198-		} else {
92199-			tcache_gc_large(tsd, tcache_slow, tcache, szind);
92200-		}
92201-	} else if (is_small && tcache_slow->bin_refilled[szind]) {
92202-		assert(low_water == 0);
92203-		/*
92204-		 * Increase fill count by 2X for small bins.  Make sure
92205-		 * lg_fill_div stays greater than 0.
92206-		 */
92207-		if (tcache_slow->lg_fill_div[szind] > 1) {
92208-			tcache_slow->lg_fill_div[szind]--;
92209-		}
92210-		tcache_slow->bin_refilled[szind] = false;
92211-	}
92212-	cache_bin_low_water_set(cache_bin);
92213-
92214-	tcache_slow->next_gc_bin++;
92215-	if (tcache_slow->next_gc_bin == nhbins) {
92216-		tcache_slow->next_gc_bin = 0;
92217-	}
92218-}
92219-
92220-void
92221-tcache_gc_event_handler(tsd_t *tsd, uint64_t elapsed) {
92222-	assert(elapsed == TE_INVALID_ELAPSED);
92223-	tcache_event(tsd);
92224-}
92225-
92226-void
92227-tcache_gc_dalloc_event_handler(tsd_t *tsd, uint64_t elapsed) {
92228-	assert(elapsed == TE_INVALID_ELAPSED);
92229-	tcache_event(tsd);
92230-}
92231-
92232-void *
92233-tcache_alloc_small_hard(tsdn_t *tsdn, arena_t *arena,
92234-    tcache_t *tcache, cache_bin_t *cache_bin, szind_t binind,
92235-    bool *tcache_success) {
92236-	tcache_slow_t *tcache_slow = tcache->tcache_slow;
92237-	void *ret;
92238-
92239-	assert(tcache_slow->arena != NULL);
92240-	unsigned nfill = cache_bin_info_ncached_max(&tcache_bin_info[binind])
92241-	    >> tcache_slow->lg_fill_div[binind];
92242-	arena_cache_bin_fill_small(tsdn, arena, cache_bin,
92243-	    &tcache_bin_info[binind], binind, nfill);
92244-	tcache_slow->bin_refilled[binind] = true;
92245-	ret = cache_bin_alloc(cache_bin, tcache_success);
92246-
92247-	return ret;
92248-}
92249-
92250-static const void *
92251-tcache_bin_flush_ptr_getter(void *arr_ctx, size_t ind) {
92252-	cache_bin_ptr_array_t *arr = (cache_bin_ptr_array_t *)arr_ctx;
92253-	return arr->ptr[ind];
92254-}
92255-
92256-static void
92257-tcache_bin_flush_metadata_visitor(void *szind_sum_ctx,
92258-    emap_full_alloc_ctx_t *alloc_ctx) {
92259-	size_t *szind_sum = (size_t *)szind_sum_ctx;
92260-	*szind_sum -= alloc_ctx->szind;
92261-	util_prefetch_write_range(alloc_ctx->edata, sizeof(edata_t));
92262-}
92263-
92264-JEMALLOC_NOINLINE static void
92265-tcache_bin_flush_size_check_fail(cache_bin_ptr_array_t *arr, szind_t szind,
92266-    size_t nptrs, emap_batch_lookup_result_t *edatas) {
92267-	bool found_mismatch = false;
92268-	for (size_t i = 0; i < nptrs; i++) {
92269-		szind_t true_szind = edata_szind_get(edatas[i].edata);
92270-		if (true_szind != szind) {
92271-			found_mismatch = true;
92272-			safety_check_fail_sized_dealloc(
92273-			    /* current_dealloc */ false,
92274-			    /* ptr */ tcache_bin_flush_ptr_getter(arr, i),
92275-			    /* true_size */ sz_index2size(true_szind),
92276-			    /* input_size */ sz_index2size(szind));
92277-		}
92278-	}
92279-	assert(found_mismatch);
92280-}
92281-
92282-static void
92283-tcache_bin_flush_edatas_lookup(tsd_t *tsd, cache_bin_ptr_array_t *arr,
92284-    szind_t binind, size_t nflush, emap_batch_lookup_result_t *edatas) {
92285-
92286-	/*
92287-	 * This gets compiled away when config_opt_safety_checks is false.
92288-	 * Checks for sized deallocation bugs, failing early rather than
92289-	 * corrupting metadata.
92290-	 */
92291-	size_t szind_sum = binind * nflush;
92292-	emap_edata_lookup_batch(tsd, &arena_emap_global, nflush,
92293-	    &tcache_bin_flush_ptr_getter, (void *)arr,
92294-	    &tcache_bin_flush_metadata_visitor, (void *)&szind_sum,
92295-	    edatas);
92296-	if (config_opt_safety_checks && unlikely(szind_sum != 0)) {
92297-		tcache_bin_flush_size_check_fail(arr, binind, nflush, edatas);
92298-	}
92299-}
92300-
92301-JEMALLOC_ALWAYS_INLINE bool
92302-tcache_bin_flush_match(edata_t *edata, unsigned cur_arena_ind,
92303-    unsigned cur_binshard, bool small) {
92304-	if (small) {
92305-		return edata_arena_ind_get(edata) == cur_arena_ind
92306-		    && edata_binshard_get(edata) == cur_binshard;
92307-	} else {
92308-		return edata_arena_ind_get(edata) == cur_arena_ind;
92309-	}
92310-}
92311-
92312-JEMALLOC_ALWAYS_INLINE void
92313-tcache_bin_flush_impl(tsd_t *tsd, tcache_t *tcache, cache_bin_t *cache_bin,
92314-    szind_t binind, cache_bin_ptr_array_t *ptrs, unsigned nflush, bool small) {
92315-	tcache_slow_t *tcache_slow = tcache->tcache_slow;
92316-	/*
92317-	 * A couple lookup calls take tsdn; declare it once for convenience
92318-	 * instead of calling tsd_tsdn(tsd) all the time.
92319-	 */
92320-	tsdn_t *tsdn = tsd_tsdn(tsd);
92321-
92322-	if (small) {
92323-		assert(binind < SC_NBINS);
92324-	} else {
92325-		assert(binind < nhbins);
92326-	}
92327-	arena_t *tcache_arena = tcache_slow->arena;
92328-	assert(tcache_arena != NULL);
92329-
92330-	/*
92331-	 * Variable length array must have > 0 length; the last element is never
92332-	 * touched (it's just included to satisfy the no-zero-length rule).
92333-	 */
92334-	VARIABLE_ARRAY(emap_batch_lookup_result_t, item_edata, nflush + 1);
92335-	tcache_bin_flush_edatas_lookup(tsd, ptrs, binind, nflush, item_edata);
92336-
92337-	/*
92338-	 * The slabs where we freed the last remaining object in the slab (and
92339-	 * so need to free the slab itself).
92340-	 * Used only if small == true.
92341-	 */
92342-	unsigned dalloc_count = 0;
92343-	VARIABLE_ARRAY(edata_t *, dalloc_slabs, nflush + 1);
92344-
92345-	/*
92346-	 * We're about to grab a bunch of locks.  If one of them happens to be
92347-	 * the one guarding the arena-level stats counters we flush our
92348-	 * thread-local ones to, we do so under one critical section.
92349-	 */
92350-	bool merged_stats = false;
92351-	while (nflush > 0) {
92352-		/* Lock the arena, or bin, associated with the first object. */
92353-		edata_t *edata = item_edata[0].edata;
92354-		unsigned cur_arena_ind = edata_arena_ind_get(edata);
92355-		arena_t *cur_arena = arena_get(tsdn, cur_arena_ind, false);
92356-
92357-		/*
92358-		 * These assignments are always overwritten when small is true,
92359-		 * and their values are always ignored when small is false, but
92360-		 * to avoid the technical UB when we pass them as parameters, we
92361-		 * need to intialize them.
92362-		 */
92363-		unsigned cur_binshard = 0;
92364-		bin_t *cur_bin = NULL;
92365-		if (small) {
92366-			cur_binshard = edata_binshard_get(edata);
92367-			cur_bin = arena_get_bin(cur_arena, binind,
92368-			    cur_binshard);
92369-			assert(cur_binshard < bin_infos[binind].n_shards);
92370-			/*
92371-			 * If you're looking at profiles, you might think this
92372-			 * is a good place to prefetch the bin stats, which are
92373-			 * often a cache miss.  This turns out not to be
92374-			 * helpful on the workloads we've looked at, with moving
92375-			 * the bin stats next to the lock seeming to do better.
92376-			 */
92377-		}
92378-
92379-		if (small) {
92380-			malloc_mutex_lock(tsdn, &cur_bin->lock);
92381-		}
92382-		if (!small && !arena_is_auto(cur_arena)) {
92383-			malloc_mutex_lock(tsdn, &cur_arena->large_mtx);
92384-		}
92385-
92386-		/*
92387-		 * If we acquired the right lock and have some stats to flush,
92388-		 * flush them.
92389-		 */
92390-		if (config_stats && tcache_arena == cur_arena
92391-		    && !merged_stats) {
92392-			merged_stats = true;
92393-			if (small) {
92394-				cur_bin->stats.nflushes++;
92395-				cur_bin->stats.nrequests +=
92396-				    cache_bin->tstats.nrequests;
92397-				cache_bin->tstats.nrequests = 0;
92398-			} else {
92399-				arena_stats_large_flush_nrequests_add(tsdn,
92400-				    &tcache_arena->stats, binind,
92401-				    cache_bin->tstats.nrequests);
92402-				cache_bin->tstats.nrequests = 0;
92403-			}
92404-		}
92405-
92406-		/*
92407-		 * Large allocations need special prep done.  Afterwards, we can
92408-		 * drop the large lock.
92409-		 */
92410-		if (!small) {
92411-			for (unsigned i = 0; i < nflush; i++) {
92412-				void *ptr = ptrs->ptr[i];
92413-				edata = item_edata[i].edata;
92414-				assert(ptr != NULL && edata != NULL);
92415-
92416-				if (tcache_bin_flush_match(edata, cur_arena_ind,
92417-				    cur_binshard, small)) {
92418-					large_dalloc_prep_locked(tsdn,
92419-					    edata);
92420-				}
92421-			}
92422-		}
92423-		if (!small && !arena_is_auto(cur_arena)) {
92424-			malloc_mutex_unlock(tsdn, &cur_arena->large_mtx);
92425-		}
92426-
92427-		/* Deallocate whatever we can. */
92428-		unsigned ndeferred = 0;
92429-		/* Init only to avoid used-uninitialized warning. */
92430-		arena_dalloc_bin_locked_info_t dalloc_bin_info = {0};
92431-		if (small) {
92432-			arena_dalloc_bin_locked_begin(&dalloc_bin_info, binind);
92433-		}
92434-		for (unsigned i = 0; i < nflush; i++) {
92435-			void *ptr = ptrs->ptr[i];
92436-			edata = item_edata[i].edata;
92437-			assert(ptr != NULL && edata != NULL);
92438-			if (!tcache_bin_flush_match(edata, cur_arena_ind,
92439-			    cur_binshard, small)) {
92440-				/*
92441-				 * The object was allocated either via a
92442-				 * different arena, or a different bin in this
92443-				 * arena.  Either way, stash the object so that
92444-				 * it can be handled in a future pass.
92445-				 */
92446-				ptrs->ptr[ndeferred] = ptr;
92447-				item_edata[ndeferred].edata = edata;
92448-				ndeferred++;
92449-				continue;
92450-			}
92451-			if (small) {
92452-				if (arena_dalloc_bin_locked_step(tsdn,
92453-				    cur_arena, cur_bin, &dalloc_bin_info,
92454-				    binind, edata, ptr)) {
92455-					dalloc_slabs[dalloc_count] = edata;
92456-					dalloc_count++;
92457-				}
92458-			} else {
92459-				if (large_dalloc_safety_checks(edata, ptr,
92460-				    binind)) {
92461-					/* See the comment in isfree. */
92462-					continue;
92463-				}
92464-				large_dalloc_finish(tsdn, edata);
92465-			}
92466-		}
92467-
92468-		if (small) {
92469-			arena_dalloc_bin_locked_finish(tsdn, cur_arena, cur_bin,
92470-			    &dalloc_bin_info);
92471-			malloc_mutex_unlock(tsdn, &cur_bin->lock);
92472-		}
92473-		arena_decay_ticks(tsdn, cur_arena, nflush - ndeferred);
92474-		nflush = ndeferred;
92475-	}
92476-
92477-	/* Handle all deferred slab dalloc. */
92478-	assert(small || dalloc_count == 0);
92479-	for (unsigned i = 0; i < dalloc_count; i++) {
92480-		edata_t *slab = dalloc_slabs[i];
92481-		arena_slab_dalloc(tsdn, arena_get_from_edata(slab), slab);
92482-
92483-	}
92484-
92485-	if (config_stats && !merged_stats) {
92486-		if (small) {
92487-			/*
92488-			 * The flush loop didn't happen to flush to this
92489-			 * thread's arena, so the stats didn't get merged.
92490-			 * Manually do so now.
92491-			 */
92492-			bin_t *bin = arena_bin_choose(tsdn, tcache_arena,
92493-			    binind, NULL);
92494-			malloc_mutex_lock(tsdn, &bin->lock);
92495-			bin->stats.nflushes++;
92496-			bin->stats.nrequests += cache_bin->tstats.nrequests;
92497-			cache_bin->tstats.nrequests = 0;
92498-			malloc_mutex_unlock(tsdn, &bin->lock);
92499-		} else {
92500-			arena_stats_large_flush_nrequests_add(tsdn,
92501-			    &tcache_arena->stats, binind,
92502-			    cache_bin->tstats.nrequests);
92503-			cache_bin->tstats.nrequests = 0;
92504-		}
92505-	}
92506-
92507-}
92508-
92509-JEMALLOC_ALWAYS_INLINE void
92510-tcache_bin_flush_bottom(tsd_t *tsd, tcache_t *tcache, cache_bin_t *cache_bin,
92511-    szind_t binind, unsigned rem, bool small) {
92512-	tcache_bin_flush_stashed(tsd, tcache, cache_bin, binind, small);
92513-
92514-	cache_bin_sz_t ncached = cache_bin_ncached_get_local(cache_bin,
92515-	    &tcache_bin_info[binind]);
92516-	assert((cache_bin_sz_t)rem <= ncached);
92517-	unsigned nflush = ncached - rem;
92518-
92519-	CACHE_BIN_PTR_ARRAY_DECLARE(ptrs, nflush);
92520-	cache_bin_init_ptr_array_for_flush(cache_bin, &tcache_bin_info[binind],
92521-	    &ptrs, nflush);
92522-
92523-	tcache_bin_flush_impl(tsd, tcache, cache_bin, binind, &ptrs, nflush,
92524-	    small);
92525-
92526-	cache_bin_finish_flush(cache_bin, &tcache_bin_info[binind], &ptrs,
92527-	    ncached - rem);
92528-}
92529-
92530-void
92531-tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, cache_bin_t *cache_bin,
92532-    szind_t binind, unsigned rem) {
92533-	tcache_bin_flush_bottom(tsd, tcache, cache_bin, binind, rem, true);
92534-}
92535-
92536-void
92537-tcache_bin_flush_large(tsd_t *tsd, tcache_t *tcache, cache_bin_t *cache_bin,
92538-    szind_t binind, unsigned rem) {
92539-	tcache_bin_flush_bottom(tsd, tcache, cache_bin, binind, rem, false);
92540-}
92541-
92542-/*
92543- * Flushing stashed happens when 1) tcache fill, 2) tcache flush, or 3) tcache
92544- * GC event.  This makes sure that the stashed items do not hold memory for too
92545- * long, and new buffers can only be allocated when nothing is stashed.
92546- *
92547- * The downside is, the time between stash and flush may be relatively short,
92548- * especially when the request rate is high.  It lowers the chance of detecting
92549- * write-after-free -- however that is a delayed detection anyway, and is less
92550- * of a focus than the memory overhead.
92551- */
92552-void
92553-tcache_bin_flush_stashed(tsd_t *tsd, tcache_t *tcache, cache_bin_t *cache_bin,
92554-    szind_t binind, bool is_small) {
92555-	cache_bin_info_t *info = &tcache_bin_info[binind];
92556-	/*
92557-	 * The two below are for assertion only.  The content of original cached
92558-	 * items remain unchanged -- the stashed items reside on the other end
92559-	 * of the stack.  Checking the stack head and ncached to verify.
92560-	 */
92561-	void *head_content = *cache_bin->stack_head;
92562-	cache_bin_sz_t orig_cached = cache_bin_ncached_get_local(cache_bin,
92563-	    info);
92564-
92565-	cache_bin_sz_t nstashed = cache_bin_nstashed_get_local(cache_bin, info);
92566-	assert(orig_cached + nstashed <= cache_bin_info_ncached_max(info));
92567-	if (nstashed == 0) {
92568-		return;
92569-	}
92570-
92571-	CACHE_BIN_PTR_ARRAY_DECLARE(ptrs, nstashed);
92572-	cache_bin_init_ptr_array_for_stashed(cache_bin, binind, info, &ptrs,
92573-	    nstashed);
92574-	san_check_stashed_ptrs(ptrs.ptr, nstashed, sz_index2size(binind));
92575-	tcache_bin_flush_impl(tsd, tcache, cache_bin, binind, &ptrs, nstashed,
92576-	    is_small);
92577-	cache_bin_finish_flush_stashed(cache_bin, info);
92578-
92579-	assert(cache_bin_nstashed_get_local(cache_bin, info) == 0);
92580-	assert(cache_bin_ncached_get_local(cache_bin, info) == orig_cached);
92581-	assert(head_content == *cache_bin->stack_head);
92582-}
92583-
92584-void
92585-tcache_arena_associate(tsdn_t *tsdn, tcache_slow_t *tcache_slow,
92586-    tcache_t *tcache, arena_t *arena) {
92587-	assert(tcache_slow->arena == NULL);
92588-	tcache_slow->arena = arena;
92589-
92590-	if (config_stats) {
92591-		/* Link into list of extant tcaches. */
92592-		malloc_mutex_lock(tsdn, &arena->tcache_ql_mtx);
92593-
92594-		ql_elm_new(tcache_slow, link);
92595-		ql_tail_insert(&arena->tcache_ql, tcache_slow, link);
92596-		cache_bin_array_descriptor_init(
92597-		    &tcache_slow->cache_bin_array_descriptor, tcache->bins);
92598-		ql_tail_insert(&arena->cache_bin_array_descriptor_ql,
92599-		    &tcache_slow->cache_bin_array_descriptor, link);
92600-
92601-		malloc_mutex_unlock(tsdn, &arena->tcache_ql_mtx);
92602-	}
92603-}
92604-
92605-static void
92606-tcache_arena_dissociate(tsdn_t *tsdn, tcache_slow_t *tcache_slow,
92607-    tcache_t *tcache) {
92608-	arena_t *arena = tcache_slow->arena;
92609-	assert(arena != NULL);
92610-	if (config_stats) {
92611-		/* Unlink from list of extant tcaches. */
92612-		malloc_mutex_lock(tsdn, &arena->tcache_ql_mtx);
92613-		if (config_debug) {
92614-			bool in_ql = false;
92615-			tcache_slow_t *iter;
92616-			ql_foreach(iter, &arena->tcache_ql, link) {
92617-				if (iter == tcache_slow) {
92618-					in_ql = true;
92619-					break;
92620-				}
92621-			}
92622-			assert(in_ql);
92623-		}
92624-		ql_remove(&arena->tcache_ql, tcache_slow, link);
92625-		ql_remove(&arena->cache_bin_array_descriptor_ql,
92626-		    &tcache_slow->cache_bin_array_descriptor, link);
92627-		tcache_stats_merge(tsdn, tcache_slow->tcache, arena);
92628-		malloc_mutex_unlock(tsdn, &arena->tcache_ql_mtx);
92629-	}
92630-	tcache_slow->arena = NULL;
92631-}
92632-
92633-void
92634-tcache_arena_reassociate(tsdn_t *tsdn, tcache_slow_t *tcache_slow,
92635-    tcache_t *tcache, arena_t *arena) {
92636-	tcache_arena_dissociate(tsdn, tcache_slow, tcache);
92637-	tcache_arena_associate(tsdn, tcache_slow, tcache, arena);
92638-}
92639-
92640-bool
92641-tsd_tcache_enabled_data_init(tsd_t *tsd) {
92642-	/* Called upon tsd initialization. */
92643-	tsd_tcache_enabled_set(tsd, opt_tcache);
92644-	tsd_slow_update(tsd);
92645-
92646-	if (opt_tcache) {
92647-		/* Trigger tcache init. */
92648-		tsd_tcache_data_init(tsd);
92649-	}
92650-
92651-	return false;
92652-}
92653-
92654-static void
92655-tcache_init(tsd_t *tsd, tcache_slow_t *tcache_slow, tcache_t *tcache,
92656-    void *mem) {
92657-	tcache->tcache_slow = tcache_slow;
92658-	tcache_slow->tcache = tcache;
92659-
92660-	memset(&tcache_slow->link, 0, sizeof(ql_elm(tcache_t)));
92661-	tcache_slow->next_gc_bin = 0;
92662-	tcache_slow->arena = NULL;
92663-	tcache_slow->dyn_alloc = mem;
92664-
92665-	/*
92666-	 * We reserve cache bins for all small size classes, even if some may
92667-	 * not get used (i.e. bins higher than nhbins).  This allows the fast
92668-	 * and common paths to access cache bin metadata safely w/o worrying
92669-	 * about which ones are disabled.
92670-	 */
92671-	unsigned n_reserved_bins = nhbins < SC_NBINS ? SC_NBINS : nhbins;
92672-	memset(tcache->bins, 0, sizeof(cache_bin_t) * n_reserved_bins);
92673-
92674-	size_t cur_offset = 0;
92675-	cache_bin_preincrement(tcache_bin_info, nhbins, mem,
92676-	    &cur_offset);
92677-	for (unsigned i = 0; i < nhbins; i++) {
92678-		if (i < SC_NBINS) {
92679-			tcache_slow->lg_fill_div[i] = 1;
92680-			tcache_slow->bin_refilled[i] = false;
92681-			tcache_slow->bin_flush_delay_items[i]
92682-			    = tcache_gc_item_delay_compute(i);
92683-		}
92684-		cache_bin_t *cache_bin = &tcache->bins[i];
92685-		cache_bin_init(cache_bin, &tcache_bin_info[i], mem,
92686-		    &cur_offset);
92687-	}
92688-	/*
92689-	 * For small size classes beyond tcache_maxclass (i.e. nhbins < NBINS),
92690-	 * their cache bins are initialized to a state to safely and efficiently
92691-	 * fail all fastpath alloc / free, so that no additional check around
92692-	 * nhbins is needed on fastpath.
92693-	 */
92694-	for (unsigned i = nhbins; i < SC_NBINS; i++) {
92695-		/* Disabled small bins. */
92696-		cache_bin_t *cache_bin = &tcache->bins[i];
92697-		void *fake_stack = mem;
92698-		size_t fake_offset = 0;
92699-
92700-		cache_bin_init(cache_bin, &tcache_bin_info[i], fake_stack,
92701-		    &fake_offset);
92702-		assert(tcache_small_bin_disabled(i, cache_bin));
92703-	}
92704-
92705-	cache_bin_postincrement(tcache_bin_info, nhbins, mem,
92706-	    &cur_offset);
92707-	/* Sanity check that the whole stack is used. */
92708-	assert(cur_offset == tcache_bin_alloc_size);
92709-}
92710-
92711-/* Initialize auto tcache (embedded in TSD). */
92712-bool
92713-tsd_tcache_data_init(tsd_t *tsd) {
92714-	tcache_slow_t *tcache_slow = tsd_tcache_slowp_get_unsafe(tsd);
92715-	tcache_t *tcache = tsd_tcachep_get_unsafe(tsd);
92716-
92717-	assert(cache_bin_still_zero_initialized(&tcache->bins[0]));
92718-	size_t alignment = tcache_bin_alloc_alignment;
92719-	size_t size = sz_sa2u(tcache_bin_alloc_size, alignment);
92720-
92721-	void *mem = ipallocztm(tsd_tsdn(tsd), size, alignment, true, NULL,
92722-	    true, arena_get(TSDN_NULL, 0, true));
92723-	if (mem == NULL) {
92724-		return true;
92725-	}
92726-
92727-	tcache_init(tsd, tcache_slow, tcache, mem);
92728-	/*
92729-	 * Initialization is a bit tricky here.  After malloc init is done, all
92730-	 * threads can rely on arena_choose and associate tcache accordingly.
92731-	 * However, the thread that does actual malloc bootstrapping relies on
92732-	 * functional tsd, and it can only rely on a0.  In that case, we
92733-	 * associate its tcache to a0 temporarily, and later on
92734-	 * arena_choose_hard() will re-associate properly.
92735-	 */
92736-	tcache_slow->arena = NULL;
92737-	arena_t *arena;
92738-	if (!malloc_initialized()) {
92739-		/* If in initialization, assign to a0. */
92740-		arena = arena_get(tsd_tsdn(tsd), 0, false);
92741-		tcache_arena_associate(tsd_tsdn(tsd), tcache_slow, tcache,
92742-		    arena);
92743-	} else {
92744-		arena = arena_choose(tsd, NULL);
92745-		/* This may happen if thread.tcache.enabled is used. */
92746-		if (tcache_slow->arena == NULL) {
92747-			tcache_arena_associate(tsd_tsdn(tsd), tcache_slow,
92748-			    tcache, arena);
92749-		}
92750-	}
92751-	assert(arena == tcache_slow->arena);
92752-
92753-	return false;
92754-}
92755-
92756-/* Created manual tcache for tcache.create mallctl. */
92757-tcache_t *
92758-tcache_create_explicit(tsd_t *tsd) {
92759-	/*
92760-	 * We place the cache bin stacks, then the tcache_t, then a pointer to
92761-	 * the beginning of the whole allocation (for freeing).  The makes sure
92762-	 * the cache bins have the requested alignment.
92763-	 */
92764-	size_t size = tcache_bin_alloc_size + sizeof(tcache_t)
92765-	    + sizeof(tcache_slow_t);
92766-	/* Naturally align the pointer stacks. */
92767-	size = PTR_CEILING(size);
92768-	size = sz_sa2u(size, tcache_bin_alloc_alignment);
92769-
92770-	void *mem = ipallocztm(tsd_tsdn(tsd), size, tcache_bin_alloc_alignment,
92771-	    true, NULL, true, arena_get(TSDN_NULL, 0, true));
92772-	if (mem == NULL) {
92773-		return NULL;
92774-	}
92775-	tcache_t *tcache = (void *)((uintptr_t)mem + tcache_bin_alloc_size);
92776-	tcache_slow_t *tcache_slow =
92777-	    (void *)((uintptr_t)mem + tcache_bin_alloc_size + sizeof(tcache_t));
92778-	tcache_init(tsd, tcache_slow, tcache, mem);
92779-
92780-	tcache_arena_associate(tsd_tsdn(tsd), tcache_slow, tcache,
92781-	    arena_ichoose(tsd, NULL));
92782-
92783-	return tcache;
92784-}
92785-
92786-static void
92787-tcache_flush_cache(tsd_t *tsd, tcache_t *tcache) {
92788-	tcache_slow_t *tcache_slow = tcache->tcache_slow;
92789-	assert(tcache_slow->arena != NULL);
92790-
92791-	for (unsigned i = 0; i < nhbins; i++) {
92792-		cache_bin_t *cache_bin = &tcache->bins[i];
92793-		if (i < SC_NBINS) {
92794-			tcache_bin_flush_small(tsd, tcache, cache_bin, i, 0);
92795-		} else {
92796-			tcache_bin_flush_large(tsd, tcache, cache_bin, i, 0);
92797-		}
92798-		if (config_stats) {
92799-			assert(cache_bin->tstats.nrequests == 0);
92800-		}
92801-	}
92802-}
92803-
92804-void
92805-tcache_flush(tsd_t *tsd) {
92806-	assert(tcache_available(tsd));
92807-	tcache_flush_cache(tsd, tsd_tcachep_get(tsd));
92808-}
92809-
92810-static void
92811-tcache_destroy(tsd_t *tsd, tcache_t *tcache, bool tsd_tcache) {
92812-	tcache_slow_t *tcache_slow = tcache->tcache_slow;
92813-	tcache_flush_cache(tsd, tcache);
92814-	arena_t *arena = tcache_slow->arena;
92815-	tcache_arena_dissociate(tsd_tsdn(tsd), tcache_slow, tcache);
92816-
92817-	if (tsd_tcache) {
92818-		cache_bin_t *cache_bin = &tcache->bins[0];
92819-		cache_bin_assert_empty(cache_bin, &tcache_bin_info[0]);
92820-	}
92821-	idalloctm(tsd_tsdn(tsd), tcache_slow->dyn_alloc, NULL, NULL, true,
92822-	    true);
92823-
92824-	/*
92825-	 * The deallocation and tcache flush above may not trigger decay since
92826-	 * we are on the tcache shutdown path (potentially with non-nominal
92827-	 * tsd).  Manually trigger decay to avoid pathological cases.  Also
92828-	 * include arena 0 because the tcache array is allocated from it.
92829-	 */
92830-	arena_decay(tsd_tsdn(tsd), arena_get(tsd_tsdn(tsd), 0, false),
92831-	    false, false);
92832-
92833-	if (arena_nthreads_get(arena, false) == 0 &&
92834-	    !background_thread_enabled()) {
92835-		/* Force purging when no threads assigned to the arena anymore. */
92836-		arena_decay(tsd_tsdn(tsd), arena,
92837-		    /* is_background_thread */ false, /* all */ true);
92838-	} else {
92839-		arena_decay(tsd_tsdn(tsd), arena,
92840-		    /* is_background_thread */ false, /* all */ false);
92841-	}
92842-}
92843-
92844-/* For auto tcache (embedded in TSD) only. */
92845-void
92846-tcache_cleanup(tsd_t *tsd) {
92847-	tcache_t *tcache = tsd_tcachep_get(tsd);
92848-	if (!tcache_available(tsd)) {
92849-		assert(tsd_tcache_enabled_get(tsd) == false);
92850-		assert(cache_bin_still_zero_initialized(&tcache->bins[0]));
92851-		return;
92852-	}
92853-	assert(tsd_tcache_enabled_get(tsd));
92854-	assert(!cache_bin_still_zero_initialized(&tcache->bins[0]));
92855-
92856-	tcache_destroy(tsd, tcache, true);
92857-	if (config_debug) {
92858-		/*
92859-		 * For debug testing only, we want to pretend we're still in the
92860-		 * zero-initialized state.
92861-		 */
92862-		memset(tcache->bins, 0, sizeof(cache_bin_t) * nhbins);
92863-	}
92864-}
92865-
92866-void
92867-tcache_stats_merge(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena) {
92868-	cassert(config_stats);
92869-
92870-	/* Merge and reset tcache stats. */
92871-	for (unsigned i = 0; i < nhbins; i++) {
92872-		cache_bin_t *cache_bin = &tcache->bins[i];
92873-		if (i < SC_NBINS) {
92874-			bin_t *bin = arena_bin_choose(tsdn, arena, i, NULL);
92875-			malloc_mutex_lock(tsdn, &bin->lock);
92876-			bin->stats.nrequests += cache_bin->tstats.nrequests;
92877-			malloc_mutex_unlock(tsdn, &bin->lock);
92878-		} else {
92879-			arena_stats_large_flush_nrequests_add(tsdn,
92880-			    &arena->stats, i, cache_bin->tstats.nrequests);
92881-		}
92882-		cache_bin->tstats.nrequests = 0;
92883-	}
92884-}
92885-
92886-static bool
92887-tcaches_create_prep(tsd_t *tsd, base_t *base) {
92888-	bool err;
92889-
92890-	malloc_mutex_assert_owner(tsd_tsdn(tsd), &tcaches_mtx);
92891-
92892-	if (tcaches == NULL) {
92893-		tcaches = base_alloc(tsd_tsdn(tsd), base,
92894-		    sizeof(tcache_t *) * (MALLOCX_TCACHE_MAX+1), CACHELINE);
92895-		if (tcaches == NULL) {
92896-			err = true;
92897-			goto label_return;
92898-		}
92899-	}
92900-
92901-	if (tcaches_avail == NULL && tcaches_past > MALLOCX_TCACHE_MAX) {
92902-		err = true;
92903-		goto label_return;
92904-	}
92905-
92906-	err = false;
92907-label_return:
92908-	return err;
92909-}
92910-
92911-bool
92912-tcaches_create(tsd_t *tsd, base_t *base, unsigned *r_ind) {
92913-	witness_assert_depth(tsdn_witness_tsdp_get(tsd_tsdn(tsd)), 0);
92914-
92915-	bool err;
92916-
92917-	malloc_mutex_lock(tsd_tsdn(tsd), &tcaches_mtx);
92918-
92919-	if (tcaches_create_prep(tsd, base)) {
92920-		err = true;
92921-		goto label_return;
92922-	}
92923-
92924-	tcache_t *tcache = tcache_create_explicit(tsd);
92925-	if (tcache == NULL) {
92926-		err = true;
92927-		goto label_return;
92928-	}
92929-
92930-	tcaches_t *elm;
92931-	if (tcaches_avail != NULL) {
92932-		elm = tcaches_avail;
92933-		tcaches_avail = tcaches_avail->next;
92934-		elm->tcache = tcache;
92935-		*r_ind = (unsigned)(elm - tcaches);
92936-	} else {
92937-		elm = &tcaches[tcaches_past];
92938-		elm->tcache = tcache;
92939-		*r_ind = tcaches_past;
92940-		tcaches_past++;
92941-	}
92942-
92943-	err = false;
92944-label_return:
92945-	malloc_mutex_unlock(tsd_tsdn(tsd), &tcaches_mtx);
92946-	witness_assert_depth(tsdn_witness_tsdp_get(tsd_tsdn(tsd)), 0);
92947-	return err;
92948-}
92949-
92950-static tcache_t *
92951-tcaches_elm_remove(tsd_t *tsd, tcaches_t *elm, bool allow_reinit) {
92952-	malloc_mutex_assert_owner(tsd_tsdn(tsd), &tcaches_mtx);
92953-
92954-	if (elm->tcache == NULL) {
92955-		return NULL;
92956-	}
92957-	tcache_t *tcache = elm->tcache;
92958-	if (allow_reinit) {
92959-		elm->tcache = TCACHES_ELM_NEED_REINIT;
92960-	} else {
92961-		elm->tcache = NULL;
92962-	}
92963-
92964-	if (tcache == TCACHES_ELM_NEED_REINIT) {
92965-		return NULL;
92966-	}
92967-	return tcache;
92968-}
92969-
92970-void
92971-tcaches_flush(tsd_t *tsd, unsigned ind) {
92972-	malloc_mutex_lock(tsd_tsdn(tsd), &tcaches_mtx);
92973-	tcache_t *tcache = tcaches_elm_remove(tsd, &tcaches[ind], true);
92974-	malloc_mutex_unlock(tsd_tsdn(tsd), &tcaches_mtx);
92975-	if (tcache != NULL) {
92976-		/* Destroy the tcache; recreate in tcaches_get() if needed. */
92977-		tcache_destroy(tsd, tcache, false);
92978-	}
92979-}
92980-
92981-void
92982-tcaches_destroy(tsd_t *tsd, unsigned ind) {
92983-	malloc_mutex_lock(tsd_tsdn(tsd), &tcaches_mtx);
92984-	tcaches_t *elm = &tcaches[ind];
92985-	tcache_t *tcache = tcaches_elm_remove(tsd, elm, false);
92986-	elm->next = tcaches_avail;
92987-	tcaches_avail = elm;
92988-	malloc_mutex_unlock(tsd_tsdn(tsd), &tcaches_mtx);
92989-	if (tcache != NULL) {
92990-		tcache_destroy(tsd, tcache, false);
92991-	}
92992-}
92993-
92994-static unsigned
92995-tcache_ncached_max_compute(szind_t szind) {
92996-	if (szind >= SC_NBINS) {
92997-		assert(szind < nhbins);
92998-		return opt_tcache_nslots_large;
92999-	}
93000-	unsigned slab_nregs = bin_infos[szind].nregs;
93001-
93002-	/* We may modify these values; start with the opt versions. */
93003-	unsigned nslots_small_min = opt_tcache_nslots_small_min;
93004-	unsigned nslots_small_max = opt_tcache_nslots_small_max;
93005-
93006-	/*
93007-	 * Clamp values to meet our constraints -- even, nonzero, min < max, and
93008-	 * suitable for a cache bin size.
93009-	 */
93010-	if (opt_tcache_nslots_small_max > CACHE_BIN_NCACHED_MAX) {
93011-		nslots_small_max = CACHE_BIN_NCACHED_MAX;
93012-	}
93013-	if (nslots_small_min % 2 != 0) {
93014-		nslots_small_min++;
93015-	}
93016-	if (nslots_small_max % 2 != 0) {
93017-		nslots_small_max--;
93018-	}
93019-	if (nslots_small_min < 2) {
93020-		nslots_small_min = 2;
93021-	}
93022-	if (nslots_small_max < 2) {
93023-		nslots_small_max = 2;
93024-	}
93025-	if (nslots_small_min > nslots_small_max) {
93026-		nslots_small_min = nslots_small_max;
93027-	}
93028-
93029-	unsigned candidate;
93030-	if (opt_lg_tcache_nslots_mul < 0) {
93031-		candidate = slab_nregs >> (-opt_lg_tcache_nslots_mul);
93032-	} else {
93033-		candidate = slab_nregs << opt_lg_tcache_nslots_mul;
93034-	}
93035-	if (candidate % 2 != 0) {
93036-		/*
93037-		 * We need the candidate size to be even -- we assume that we
93038-		 * can divide by two and get a positive number (e.g. when
93039-		 * flushing).
93040-		 */
93041-		++candidate;
93042-	}
93043-	if (candidate <= nslots_small_min) {
93044-		return nslots_small_min;
93045-	} else if (candidate <= nslots_small_max) {
93046-		return candidate;
93047-	} else {
93048-		return nslots_small_max;
93049-	}
93050-}
93051-
93052-bool
93053-tcache_boot(tsdn_t *tsdn, base_t *base) {
93054-	tcache_maxclass = sz_s2u(opt_tcache_max);
93055-	assert(tcache_maxclass <= TCACHE_MAXCLASS_LIMIT);
93056-	nhbins = sz_size2index(tcache_maxclass) + 1;
93057-
93058-	if (malloc_mutex_init(&tcaches_mtx, "tcaches", WITNESS_RANK_TCACHES,
93059-	    malloc_mutex_rank_exclusive)) {
93060-		return true;
93061-	}
93062-
93063-	/* Initialize tcache_bin_info.  See comments in tcache_init(). */
93064-	unsigned n_reserved_bins = nhbins < SC_NBINS ? SC_NBINS : nhbins;
93065-	size_t size = n_reserved_bins * sizeof(cache_bin_info_t);
93066-	tcache_bin_info = (cache_bin_info_t *)base_alloc(tsdn, base, size,
93067-	    CACHELINE);
93068-	if (tcache_bin_info == NULL) {
93069-		return true;
93070-	}
93071-
93072-	for (szind_t i = 0; i < nhbins; i++) {
93073-		unsigned ncached_max = tcache_ncached_max_compute(i);
93074-		cache_bin_info_init(&tcache_bin_info[i], ncached_max);
93075-	}
93076-	for (szind_t i = nhbins; i < SC_NBINS; i++) {
93077-		/* Disabled small bins. */
93078-		cache_bin_info_init(&tcache_bin_info[i], 0);
93079-		assert(tcache_small_bin_disabled(i, NULL));
93080-	}
93081-
93082-	cache_bin_info_compute_alloc(tcache_bin_info, nhbins,
93083-	    &tcache_bin_alloc_size, &tcache_bin_alloc_alignment);
93084-
93085-	return false;
93086-}
93087-
93088-void
93089-tcache_prefork(tsdn_t *tsdn) {
93090-	malloc_mutex_prefork(tsdn, &tcaches_mtx);
93091-}
93092-
93093-void
93094-tcache_postfork_parent(tsdn_t *tsdn) {
93095-	malloc_mutex_postfork_parent(tsdn, &tcaches_mtx);
93096-}
93097-
93098-void
93099-tcache_postfork_child(tsdn_t *tsdn) {
93100-	malloc_mutex_postfork_child(tsdn, &tcaches_mtx);
93101-}
93102-
93103-void tcache_assert_initialized(tcache_t *tcache) {
93104-	assert(!cache_bin_still_zero_initialized(&tcache->bins[0]));
93105-}
93106diff --git a/jemalloc/src/test_hooks.c b/jemalloc/src/test_hooks.c
93107deleted file mode 100644
93108index ace00d9..0000000
93109--- a/jemalloc/src/test_hooks.c
93110+++ /dev/null
93111@@ -1,12 +0,0 @@
93112-#include "jemalloc/internal/jemalloc_preamble.h"
93113-
93114-/*
93115- * The hooks are a little bit screwy -- they're not genuinely exported in the
93116- * sense that we want them available to end-users, but we do want them visible
93117- * from outside the generated library, so that we can use them in test code.
93118- */
93119-JEMALLOC_EXPORT
93120-void (*test_hooks_arena_new_hook)() = NULL;
93121-
93122-JEMALLOC_EXPORT
93123-void (*test_hooks_libc_hook)() = NULL;
93124diff --git a/jemalloc/src/thread_event.c b/jemalloc/src/thread_event.c
93125deleted file mode 100644
93126index 37eb582..0000000
93127--- a/jemalloc/src/thread_event.c
93128+++ /dev/null
93129@@ -1,343 +0,0 @@
93130-#include "jemalloc/internal/jemalloc_preamble.h"
93131-#include "jemalloc/internal/jemalloc_internal_includes.h"
93132-
93133-#include "jemalloc/internal/thread_event.h"
93134-
93135-/*
93136- * Signatures for event specific functions.  These functions should be defined
93137- * by the modules owning each event.  The signatures here verify that the
93138- * definitions follow the right format.
93139- *
93140- * The first two are functions computing new / postponed event wait time.  New
93141- * event wait time is the time till the next event if an event is currently
93142- * being triggered; postponed event wait time is the time till the next event
93143- * if an event should be triggered but needs to be postponed, e.g. when the TSD
93144- * is not nominal or during reentrancy.
93145- *
93146- * The third is the event handler function, which is called whenever an event
93147- * is triggered.  The parameter is the elapsed time since the last time an
93148- * event of the same type was triggered.
93149- */
93150-#define E(event, condition_unused, is_alloc_event_unused)		\
93151-uint64_t event##_new_event_wait(tsd_t *tsd);				\
93152-uint64_t event##_postponed_event_wait(tsd_t *tsd);			\
93153-void event##_event_handler(tsd_t *tsd, uint64_t elapsed);
93154-
93155-ITERATE_OVER_ALL_EVENTS
93156-#undef E
93157-
93158-/* Signatures for internal functions fetching elapsed time. */
93159-#define E(event, condition_unused, is_alloc_event_unused)		\
93160-static uint64_t event##_fetch_elapsed(tsd_t *tsd);
93161-
93162-ITERATE_OVER_ALL_EVENTS
93163-#undef E
93164-
93165-static uint64_t
93166-tcache_gc_fetch_elapsed(tsd_t *tsd) {
93167-	return TE_INVALID_ELAPSED;
93168-}
93169-
93170-static uint64_t
93171-tcache_gc_dalloc_fetch_elapsed(tsd_t *tsd) {
93172-	return TE_INVALID_ELAPSED;
93173-}
93174-
93175-static uint64_t
93176-prof_sample_fetch_elapsed(tsd_t *tsd) {
93177-	uint64_t last_event = thread_allocated_last_event_get(tsd);
93178-	uint64_t last_sample_event = prof_sample_last_event_get(tsd);
93179-	prof_sample_last_event_set(tsd, last_event);
93180-	return last_event - last_sample_event;
93181-}
93182-
93183-static uint64_t
93184-stats_interval_fetch_elapsed(tsd_t *tsd) {
93185-	uint64_t last_event = thread_allocated_last_event_get(tsd);
93186-	uint64_t last_stats_event = stats_interval_last_event_get(tsd);
93187-	stats_interval_last_event_set(tsd, last_event);
93188-	return last_event - last_stats_event;
93189-}
93190-
93191-static uint64_t
93192-peak_alloc_fetch_elapsed(tsd_t *tsd) {
93193-	return TE_INVALID_ELAPSED;
93194-}
93195-
93196-static uint64_t
93197-peak_dalloc_fetch_elapsed(tsd_t *tsd) {
93198-	return TE_INVALID_ELAPSED;
93199-}
93200-
93201-/* Per event facilities done. */
93202-
93203-static bool
93204-te_ctx_has_active_events(te_ctx_t *ctx) {
93205-	assert(config_debug);
93206-#define E(event, condition, alloc_event)			       \
93207-	if (condition && alloc_event == ctx->is_alloc) {	       \
93208-		return true;					       \
93209-	}
93210-	ITERATE_OVER_ALL_EVENTS
93211-#undef E
93212-	return false;
93213-}
93214-
93215-static uint64_t
93216-te_next_event_compute(tsd_t *tsd, bool is_alloc) {
93217-	uint64_t wait = TE_MAX_START_WAIT;
93218-#define E(event, condition, alloc_event)				\
93219-	if (is_alloc == alloc_event && condition) {			\
93220-		uint64_t event_wait =					\
93221-		    event##_event_wait_get(tsd);			\
93222-		assert(event_wait <= TE_MAX_START_WAIT);		\
93223-		if (event_wait > 0U && event_wait < wait) {		\
93224-			wait = event_wait;				\
93225-		}							\
93226-	}
93227-
93228-	ITERATE_OVER_ALL_EVENTS
93229-#undef E
93230-	assert(wait <= TE_MAX_START_WAIT);
93231-	return wait;
93232-}
93233-
93234-static void
93235-te_assert_invariants_impl(tsd_t *tsd, te_ctx_t *ctx) {
93236-	uint64_t current_bytes = te_ctx_current_bytes_get(ctx);
93237-	uint64_t last_event = te_ctx_last_event_get(ctx);
93238-	uint64_t next_event = te_ctx_next_event_get(ctx);
93239-	uint64_t next_event_fast = te_ctx_next_event_fast_get(ctx);
93240-
93241-	assert(last_event != next_event);
93242-	if (next_event > TE_NEXT_EVENT_FAST_MAX || !tsd_fast(tsd)) {
93243-		assert(next_event_fast == 0U);
93244-	} else {
93245-		assert(next_event_fast == next_event);
93246-	}
93247-
93248-	/* The subtraction is intentionally susceptible to underflow. */
93249-	uint64_t interval = next_event - last_event;
93250-
93251-	/* The subtraction is intentionally susceptible to underflow. */
93252-	assert(current_bytes - last_event < interval);
93253-	uint64_t min_wait = te_next_event_compute(tsd, te_ctx_is_alloc(ctx));
93254-	/*
93255-	 * next_event should have been pushed up only except when no event is
93256-	 * on and the TSD is just initialized.  The last_event == 0U guard
93257-	 * below is stronger than needed, but having an exactly accurate guard
93258-	 * is more complicated to implement.
93259-	 */
93260-	assert((!te_ctx_has_active_events(ctx) && last_event == 0U) ||
93261-	    interval == min_wait ||
93262-	    (interval < min_wait && interval == TE_MAX_INTERVAL));
93263-}
93264-
93265-void
93266-te_assert_invariants_debug(tsd_t *tsd) {
93267-	te_ctx_t ctx;
93268-	te_ctx_get(tsd, &ctx, true);
93269-	te_assert_invariants_impl(tsd, &ctx);
93270-
93271-	te_ctx_get(tsd, &ctx, false);
93272-	te_assert_invariants_impl(tsd, &ctx);
93273-}
93274-
93275-/*
93276- * Synchronization around the fast threshold in tsd --
93277- * There are two threads to consider in the synchronization here:
93278- * - The owner of the tsd being updated by a slow path change
93279- * - The remote thread, doing that slow path change.
93280- *
93281- * As a design constraint, we want to ensure that a slow-path transition cannot
93282- * be ignored for arbitrarily long, and that if the remote thread causes a
93283- * slow-path transition and then communicates with the owner thread that it has
93284- * occurred, then the owner will go down the slow path on the next allocator
93285- * operation (so that we don't want to just wait until the owner hits its slow
93286- * path reset condition on its own).
93287- *
93288- * Here's our strategy to do that:
93289- *
93290- * The remote thread will update the slow-path stores to TSD variables, issue a
93291- * SEQ_CST fence, and then update the TSD next_event_fast counter. The owner
93292- * thread will update next_event_fast, issue an SEQ_CST fence, and then check
93293- * its TSD to see if it's on the slow path.
93294-
93295- * This is fairly straightforward when 64-bit atomics are supported. Assume that
93296- * the remote fence is sandwiched between two owner fences in the reset pathway.
93297- * The case where there is no preceding or trailing owner fence (i.e. because
93298- * the owner thread is near the beginning or end of its life) can be analyzed
93299- * similarly. The owner store to next_event_fast preceding the earlier owner
93300- * fence will be earlier in coherence order than the remote store to it, so that
93301- * the owner thread will go down the slow path once the store becomes visible to
93302- * it, which is no later than the time of the second fence.
93303-
93304- * The case where we don't support 64-bit atomics is trickier, since word
93305- * tearing is possible. We'll repeat the same analysis, and look at the two
93306- * owner fences sandwiching the remote fence. The next_event_fast stores done
93307- * alongside the earlier owner fence cannot overwrite any of the remote stores
93308- * (since they precede the earlier owner fence in sb, which precedes the remote
93309- * fence in sc, which precedes the remote stores in sb). After the second owner
93310- * fence there will be a re-check of the slow-path variables anyways, so the
93311- * "owner will notice that it's on the slow path eventually" guarantee is
93312- * satisfied. To make sure that the out-of-band-messaging constraint is as well,
93313- * note that either the message passing is sequenced before the second owner
93314- * fence (in which case the remote stores happen before the second set of owner
93315- * stores, so malloc sees a value of zero for next_event_fast and goes down the
93316- * slow path), or it is not (in which case the owner sees the tsd slow-path
93317- * writes on its previous update). This leaves open the possibility that the
93318- * remote thread will (at some arbitrary point in the future) zero out one half
93319- * of the owner thread's next_event_fast, but that's always safe (it just sends
93320- * it down the slow path earlier).
93321- */
93322-static void
93323-te_ctx_next_event_fast_update(te_ctx_t *ctx) {
93324-	uint64_t next_event = te_ctx_next_event_get(ctx);
93325-	uint64_t next_event_fast = (next_event <= TE_NEXT_EVENT_FAST_MAX) ?
93326-	    next_event : 0U;
93327-	te_ctx_next_event_fast_set(ctx, next_event_fast);
93328-}
93329-
93330-void
93331-te_recompute_fast_threshold(tsd_t *tsd) {
93332-	if (tsd_state_get(tsd) != tsd_state_nominal) {
93333-		/* Check first because this is also called on purgatory. */
93334-		te_next_event_fast_set_non_nominal(tsd);
93335-		return;
93336-	}
93337-
93338-	te_ctx_t ctx;
93339-	te_ctx_get(tsd, &ctx, true);
93340-	te_ctx_next_event_fast_update(&ctx);
93341-	te_ctx_get(tsd, &ctx, false);
93342-	te_ctx_next_event_fast_update(&ctx);
93343-
93344-	atomic_fence(ATOMIC_SEQ_CST);
93345-	if (tsd_state_get(tsd) != tsd_state_nominal) {
93346-		te_next_event_fast_set_non_nominal(tsd);
93347-	}
93348-}
93349-
93350-static void
93351-te_adjust_thresholds_helper(tsd_t *tsd, te_ctx_t *ctx,
93352-    uint64_t wait) {
93353-	/*
93354-	 * The next threshold based on future events can only be adjusted after
93355-	 * progressing the last_event counter (which is set to current).
93356-	 */
93357-	assert(te_ctx_current_bytes_get(ctx) == te_ctx_last_event_get(ctx));
93358-	assert(wait <= TE_MAX_START_WAIT);
93359-
93360-	uint64_t next_event = te_ctx_last_event_get(ctx) + (wait <=
93361-	    TE_MAX_INTERVAL ? wait : TE_MAX_INTERVAL);
93362-	te_ctx_next_event_set(tsd, ctx, next_event);
93363-}
93364-
93365-static uint64_t
93366-te_clip_event_wait(uint64_t event_wait) {
93367-	assert(event_wait > 0U);
93368-	if (TE_MIN_START_WAIT > 1U &&
93369-	    unlikely(event_wait < TE_MIN_START_WAIT)) {
93370-		event_wait = TE_MIN_START_WAIT;
93371-	}
93372-	if (TE_MAX_START_WAIT < UINT64_MAX &&
93373-	    unlikely(event_wait > TE_MAX_START_WAIT)) {
93374-		event_wait = TE_MAX_START_WAIT;
93375-	}
93376-	return event_wait;
93377-}
93378-
93379-void
93380-te_event_trigger(tsd_t *tsd, te_ctx_t *ctx) {
93381-	/* usize has already been added to thread_allocated. */
93382-	uint64_t bytes_after = te_ctx_current_bytes_get(ctx);
93383-	/* The subtraction is intentionally susceptible to underflow. */
93384-	uint64_t accumbytes = bytes_after - te_ctx_last_event_get(ctx);
93385-
93386-	te_ctx_last_event_set(ctx, bytes_after);
93387-
93388-	bool allow_event_trigger = tsd_nominal(tsd) &&
93389-	    tsd_reentrancy_level_get(tsd) == 0;
93390-	bool is_alloc = ctx->is_alloc;
93391-	uint64_t wait = TE_MAX_START_WAIT;
93392-
93393-#define E(event, condition, alloc_event)				\
93394-	bool is_##event##_triggered = false;				\
93395-	if (is_alloc == alloc_event && condition) {			\
93396-		uint64_t event_wait = event##_event_wait_get(tsd);	\
93397-		assert(event_wait <= TE_MAX_START_WAIT);		\
93398-		if (event_wait > accumbytes) {				\
93399-			event_wait -= accumbytes;			\
93400-		} else if (!allow_event_trigger) {			\
93401-			event_wait = event##_postponed_event_wait(tsd);	\
93402-		} else {						\
93403-			is_##event##_triggered = true;			\
93404-			event_wait = event##_new_event_wait(tsd);	\
93405-		}							\
93406-		event_wait = te_clip_event_wait(event_wait);		\
93407-		event##_event_wait_set(tsd, event_wait);		\
93408-		if (event_wait < wait) {				\
93409-			wait = event_wait;				\
93410-		}							\
93411-	}
93412-
93413-	ITERATE_OVER_ALL_EVENTS
93414-#undef E
93415-
93416-	assert(wait <= TE_MAX_START_WAIT);
93417-	te_adjust_thresholds_helper(tsd, ctx, wait);
93418-	te_assert_invariants(tsd);
93419-
93420-#define E(event, condition, alloc_event)				\
93421-	if (is_alloc == alloc_event && condition &&			\
93422-	    is_##event##_triggered) {					\
93423-		assert(allow_event_trigger);				\
93424-		uint64_t elapsed = event##_fetch_elapsed(tsd);		\
93425-		event##_event_handler(tsd, elapsed);			\
93426-	}
93427-
93428-	ITERATE_OVER_ALL_EVENTS
93429-#undef E
93430-
93431-	te_assert_invariants(tsd);
93432-}
93433-
93434-static void
93435-te_init(tsd_t *tsd, bool is_alloc) {
93436-	te_ctx_t ctx;
93437-	te_ctx_get(tsd, &ctx, is_alloc);
93438-	/*
93439-	 * Reset the last event to current, which starts the events from a clean
93440-	 * state.  This is necessary when re-init the tsd event counters.
93441-	 *
93442-	 * The event counters maintain a relationship with the current bytes:
93443-	 * last_event <= current < next_event.  When a reinit happens (e.g.
93444-	 * reincarnated tsd), the last event needs progressing because all
93445-	 * events start fresh from the current bytes.
93446-	 */
93447-	te_ctx_last_event_set(&ctx, te_ctx_current_bytes_get(&ctx));
93448-
93449-	uint64_t wait = TE_MAX_START_WAIT;
93450-#define E(event, condition, alloc_event)				\
93451-	if (is_alloc == alloc_event && condition) {			\
93452-		uint64_t event_wait = event##_new_event_wait(tsd);	\
93453-		event_wait = te_clip_event_wait(event_wait);		\
93454-		event##_event_wait_set(tsd, event_wait);		\
93455-		if (event_wait < wait) {				\
93456-			wait = event_wait;				\
93457-		}							\
93458-	}
93459-
93460-	ITERATE_OVER_ALL_EVENTS
93461-#undef E
93462-	te_adjust_thresholds_helper(tsd, &ctx, wait);
93463-}
93464-
93465-void
93466-tsd_te_init(tsd_t *tsd) {
93467-	/* Make sure no overflow for the bytes accumulated on event_trigger. */
93468-	assert(TE_MAX_INTERVAL <= UINT64_MAX - SC_LARGE_MAXCLASS + 1);
93469-	te_init(tsd, true);
93470-	te_init(tsd, false);
93471-	te_assert_invariants(tsd);
93472-}
93473diff --git a/jemalloc/src/ticker.c b/jemalloc/src/ticker.c
93474deleted file mode 100644
93475index 790b5c2..0000000
93476--- a/jemalloc/src/ticker.c
93477+++ /dev/null
93478@@ -1,32 +0,0 @@
93479-#include "jemalloc/internal/jemalloc_preamble.h"
93480-#include "jemalloc/internal/jemalloc_internal_includes.h"
93481-
93482-/*
93483- * To avoid using floating point math down core paths (still necessary because
93484- * versions of the glibc dynamic loader that did not preserve xmm registers are
93485- * still somewhat common, requiring us to be compilable with -mno-sse), and also
93486- * to avoid generally expensive library calls, we use a precomputed table of
93487- * values.  We want to sample U uniformly on [0, 1], and then compute
93488- * ceil(log(u)/log(1-1/nticks)).  We're mostly interested in the case where
93489- * nticks is reasonably big, so 1/log(1-1/nticks) is well-approximated by
93490- * -nticks.
93491- *
93492- * To compute log(u), we sample an integer in [1, 64] and divide, then just look
93493- * up results in a table.  As a space-compression mechanism, we store these as
93494- * uint8_t by dividing the range (255) by the highest-magnitude value the log
93495- * can take on, and using that as a multiplier.  We then have to divide by that
93496- * multiplier at the end of the computation.
93497- *
93498- * The values here are computed in src/ticker.py
93499- */
93500-
93501-const uint8_t ticker_geom_table[1 << TICKER_GEOM_NBITS] = {
93502-	254, 211, 187, 169, 156, 144, 135, 127,
93503-	120, 113, 107, 102, 97, 93, 89, 85,
93504-	81, 77, 74, 71, 68, 65, 62, 60,
93505-	57, 55, 53, 50, 48, 46, 44, 42,
93506-	40, 39, 37, 35, 33, 32, 30, 29,
93507-	27, 26, 24, 23, 21, 20, 19, 18,
93508-	16, 15, 14, 13, 12, 10, 9, 8,
93509-	7, 6, 5, 4, 3, 2, 1, 0
93510-};
93511diff --git a/jemalloc/src/ticker.py b/jemalloc/src/ticker.py
93512deleted file mode 100755
93513index 3807740..0000000
93514--- a/jemalloc/src/ticker.py
93515+++ /dev/null
93516@@ -1,15 +0,0 @@
93517-#!/usr/bin/env python3
93518-
93519-import math
93520-
93521-# Must match TICKER_GEOM_NBITS
93522-lg_table_size = 6
93523-table_size = 2**lg_table_size
93524-byte_max = 255
93525-mul = math.floor(-byte_max/math.log(1 / table_size))
93526-values = [round(-mul * math.log(i / table_size))
93527-	for i in range(1, table_size+1)]
93528-print("mul =", mul)
93529-print("values:")
93530-for i in range(table_size // 8):
93531-	print(", ".join((str(x) for x in values[i*8 : i*8 + 8])))
93532diff --git a/jemalloc/src/tsd.c b/jemalloc/src/tsd.c
93533deleted file mode 100644
93534index e8e4f3a..0000000
93535--- a/jemalloc/src/tsd.c
93536+++ /dev/null
93537@@ -1,549 +0,0 @@
93538-#include "jemalloc/internal/jemalloc_preamble.h"
93539-#include "jemalloc/internal/jemalloc_internal_includes.h"
93540-
93541-#include "jemalloc/internal/assert.h"
93542-#include "jemalloc/internal/san.h"
93543-#include "jemalloc/internal/mutex.h"
93544-#include "jemalloc/internal/rtree.h"
93545-
93546-/******************************************************************************/
93547-/* Data. */
93548-
93549-/* TSD_INITIALIZER triggers "-Wmissing-field-initializer" */
93550-JEMALLOC_DIAGNOSTIC_PUSH
93551-JEMALLOC_DIAGNOSTIC_IGNORE_MISSING_STRUCT_FIELD_INITIALIZERS
93552-
93553-#ifdef JEMALLOC_MALLOC_THREAD_CLEANUP
93554-JEMALLOC_TSD_TYPE_ATTR(tsd_t) tsd_tls = TSD_INITIALIZER;
93555-JEMALLOC_TSD_TYPE_ATTR(bool) JEMALLOC_TLS_MODEL tsd_initialized = false;
93556-bool tsd_booted = false;
93557-#elif (defined(JEMALLOC_TLS))
93558-JEMALLOC_TSD_TYPE_ATTR(tsd_t) tsd_tls = TSD_INITIALIZER;
93559-pthread_key_t tsd_tsd;
93560-bool tsd_booted = false;
93561-#elif (defined(_WIN32))
93562-DWORD tsd_tsd;
93563-tsd_wrapper_t tsd_boot_wrapper = {false, TSD_INITIALIZER};
93564-bool tsd_booted = false;
93565-#else
93566-
93567-/*
93568- * This contains a mutex, but it's pretty convenient to allow the mutex code to
93569- * have a dependency on tsd.  So we define the struct here, and only refer to it
93570- * by pointer in the header.
93571- */
93572-struct tsd_init_head_s {
93573-	ql_head(tsd_init_block_t) blocks;
93574-	malloc_mutex_t lock;
93575-};
93576-
93577-pthread_key_t tsd_tsd;
93578-tsd_init_head_t	tsd_init_head = {
93579-	ql_head_initializer(blocks),
93580-	MALLOC_MUTEX_INITIALIZER
93581-};
93582-
93583-tsd_wrapper_t tsd_boot_wrapper = {
93584-	false,
93585-	TSD_INITIALIZER
93586-};
93587-bool tsd_booted = false;
93588-#endif
93589-
93590-JEMALLOC_DIAGNOSTIC_POP
93591-
93592-/******************************************************************************/
93593-
93594-/* A list of all the tsds in the nominal state. */
93595-typedef ql_head(tsd_t) tsd_list_t;
93596-static tsd_list_t tsd_nominal_tsds = ql_head_initializer(tsd_nominal_tsds);
93597-static malloc_mutex_t tsd_nominal_tsds_lock;
93598-
93599-/* How many slow-path-enabling features are turned on. */
93600-static atomic_u32_t tsd_global_slow_count = ATOMIC_INIT(0);
93601-
93602-static bool
93603-tsd_in_nominal_list(tsd_t *tsd) {
93604-	tsd_t *tsd_list;
93605-	bool found = false;
93606-	/*
93607-	 * We don't know that tsd is nominal; it might not be safe to get data
93608-	 * out of it here.
93609-	 */
93610-	malloc_mutex_lock(TSDN_NULL, &tsd_nominal_tsds_lock);
93611-	ql_foreach(tsd_list, &tsd_nominal_tsds, TSD_MANGLE(tsd_link)) {
93612-		if (tsd == tsd_list) {
93613-			found = true;
93614-			break;
93615-		}
93616-	}
93617-	malloc_mutex_unlock(TSDN_NULL, &tsd_nominal_tsds_lock);
93618-	return found;
93619-}
93620-
93621-static void
93622-tsd_add_nominal(tsd_t *tsd) {
93623-	assert(!tsd_in_nominal_list(tsd));
93624-	assert(tsd_state_get(tsd) <= tsd_state_nominal_max);
93625-	ql_elm_new(tsd, TSD_MANGLE(tsd_link));
93626-	malloc_mutex_lock(tsd_tsdn(tsd), &tsd_nominal_tsds_lock);
93627-	ql_tail_insert(&tsd_nominal_tsds, tsd, TSD_MANGLE(tsd_link));
93628-	malloc_mutex_unlock(tsd_tsdn(tsd), &tsd_nominal_tsds_lock);
93629-}
93630-
93631-static void
93632-tsd_remove_nominal(tsd_t *tsd) {
93633-	assert(tsd_in_nominal_list(tsd));
93634-	assert(tsd_state_get(tsd) <= tsd_state_nominal_max);
93635-	malloc_mutex_lock(tsd_tsdn(tsd), &tsd_nominal_tsds_lock);
93636-	ql_remove(&tsd_nominal_tsds, tsd, TSD_MANGLE(tsd_link));
93637-	malloc_mutex_unlock(tsd_tsdn(tsd), &tsd_nominal_tsds_lock);
93638-}
93639-
93640-static void
93641-tsd_force_recompute(tsdn_t *tsdn) {
93642-	/*
93643-	 * The stores to tsd->state here need to synchronize with the exchange
93644-	 * in tsd_slow_update.
93645-	 */
93646-	atomic_fence(ATOMIC_RELEASE);
93647-	malloc_mutex_lock(tsdn, &tsd_nominal_tsds_lock);
93648-	tsd_t *remote_tsd;
93649-	ql_foreach(remote_tsd, &tsd_nominal_tsds, TSD_MANGLE(tsd_link)) {
93650-		assert(tsd_atomic_load(&remote_tsd->state, ATOMIC_RELAXED)
93651-		    <= tsd_state_nominal_max);
93652-		tsd_atomic_store(&remote_tsd->state,
93653-		    tsd_state_nominal_recompute, ATOMIC_RELAXED);
93654-		/* See comments in te_recompute_fast_threshold(). */
93655-		atomic_fence(ATOMIC_SEQ_CST);
93656-		te_next_event_fast_set_non_nominal(remote_tsd);
93657-	}
93658-	malloc_mutex_unlock(tsdn, &tsd_nominal_tsds_lock);
93659-}
93660-
93661-void
93662-tsd_global_slow_inc(tsdn_t *tsdn) {
93663-	atomic_fetch_add_u32(&tsd_global_slow_count, 1, ATOMIC_RELAXED);
93664-	/*
93665-	 * We unconditionally force a recompute, even if the global slow count
93666-	 * was already positive.  If we didn't, then it would be possible for us
93667-	 * to return to the user, have the user synchronize externally with some
93668-	 * other thread, and then have that other thread not have picked up the
93669-	 * update yet (since the original incrementing thread might still be
93670-	 * making its way through the tsd list).
93671-	 */
93672-	tsd_force_recompute(tsdn);
93673-}
93674-
93675-void tsd_global_slow_dec(tsdn_t *tsdn) {
93676-	atomic_fetch_sub_u32(&tsd_global_slow_count, 1, ATOMIC_RELAXED);
93677-	/* See the note in ..._inc(). */
93678-	tsd_force_recompute(tsdn);
93679-}
93680-
93681-static bool
93682-tsd_local_slow(tsd_t *tsd) {
93683-	return !tsd_tcache_enabled_get(tsd)
93684-	    || tsd_reentrancy_level_get(tsd) > 0;
93685-}
93686-
93687-bool
93688-tsd_global_slow() {
93689-	return atomic_load_u32(&tsd_global_slow_count, ATOMIC_RELAXED) > 0;
93690-}
93691-
93692-/******************************************************************************/
93693-
93694-static uint8_t
93695-tsd_state_compute(tsd_t *tsd) {
93696-	if (!tsd_nominal(tsd)) {
93697-		return tsd_state_get(tsd);
93698-	}
93699-	/* We're in *a* nominal state; but which one? */
93700-	if (malloc_slow || tsd_local_slow(tsd) || tsd_global_slow()) {
93701-		return tsd_state_nominal_slow;
93702-	} else {
93703-		return tsd_state_nominal;
93704-	}
93705-}
93706-
93707-void
93708-tsd_slow_update(tsd_t *tsd) {
93709-	uint8_t old_state;
93710-	do {
93711-		uint8_t new_state = tsd_state_compute(tsd);
93712-		old_state = tsd_atomic_exchange(&tsd->state, new_state,
93713-		    ATOMIC_ACQUIRE);
93714-	} while (old_state == tsd_state_nominal_recompute);
93715-
93716-	te_recompute_fast_threshold(tsd);
93717-}
93718-
93719-void
93720-tsd_state_set(tsd_t *tsd, uint8_t new_state) {
93721-	/* Only the tsd module can change the state *to* recompute. */
93722-	assert(new_state != tsd_state_nominal_recompute);
93723-	uint8_t old_state = tsd_atomic_load(&tsd->state, ATOMIC_RELAXED);
93724-	if (old_state > tsd_state_nominal_max) {
93725-		/*
93726-		 * Not currently in the nominal list, but it might need to be
93727-		 * inserted there.
93728-		 */
93729-		assert(!tsd_in_nominal_list(tsd));
93730-		tsd_atomic_store(&tsd->state, new_state, ATOMIC_RELAXED);
93731-		if (new_state <= tsd_state_nominal_max) {
93732-			tsd_add_nominal(tsd);
93733-		}
93734-	} else {
93735-		/*
93736-		 * We're currently nominal.  If the new state is non-nominal,
93737-		 * great; we take ourselves off the list and just enter the new
93738-		 * state.
93739-		 */
93740-		assert(tsd_in_nominal_list(tsd));
93741-		if (new_state > tsd_state_nominal_max) {
93742-			tsd_remove_nominal(tsd);
93743-			tsd_atomic_store(&tsd->state, new_state,
93744-			    ATOMIC_RELAXED);
93745-		} else {
93746-			/*
93747-			 * This is the tricky case.  We're transitioning from
93748-			 * one nominal state to another.  The caller can't know
93749-			 * about any races that are occurring at the same time,
93750-			 * so we always have to recompute no matter what.
93751-			 */
93752-			tsd_slow_update(tsd);
93753-		}
93754-	}
93755-	te_recompute_fast_threshold(tsd);
93756-}
93757-
93758-static void
93759-tsd_prng_state_init(tsd_t *tsd) {
93760-	/*
93761-	 * A nondeterministic seed based on the address of tsd reduces
93762-	 * the likelihood of lockstep non-uniform cache index
93763-	 * utilization among identical concurrent processes, but at the
93764-	 * cost of test repeatability.  For debug builds, instead use a
93765-	 * deterministic seed.
93766-	 */
93767-	*tsd_prng_statep_get(tsd) = config_debug ? 0 :
93768-	    (uint64_t)(uintptr_t)tsd;
93769-}
93770-
93771-static bool
93772-tsd_data_init(tsd_t *tsd) {
93773-	/*
93774-	 * We initialize the rtree context first (before the tcache), since the
93775-	 * tcache initialization depends on it.
93776-	 */
93777-	rtree_ctx_data_init(tsd_rtree_ctxp_get_unsafe(tsd));
93778-	tsd_prng_state_init(tsd);
93779-	tsd_te_init(tsd); /* event_init may use the prng state above. */
93780-	tsd_san_init(tsd);
93781-	return tsd_tcache_enabled_data_init(tsd);
93782-}
93783-
93784-static void
93785-assert_tsd_data_cleanup_done(tsd_t *tsd) {
93786-	assert(!tsd_nominal(tsd));
93787-	assert(!tsd_in_nominal_list(tsd));
93788-	assert(*tsd_arenap_get_unsafe(tsd) == NULL);
93789-	assert(*tsd_iarenap_get_unsafe(tsd) == NULL);
93790-	assert(*tsd_tcache_enabledp_get_unsafe(tsd) == false);
93791-	assert(*tsd_prof_tdatap_get_unsafe(tsd) == NULL);
93792-}
93793-
93794-static bool
93795-tsd_data_init_nocleanup(tsd_t *tsd) {
93796-	assert(tsd_state_get(tsd) == tsd_state_reincarnated ||
93797-	    tsd_state_get(tsd) == tsd_state_minimal_initialized);
93798-	/*
93799-	 * During reincarnation, there is no guarantee that the cleanup function
93800-	 * will be called (deallocation may happen after all tsd destructors).
93801-	 * We set up tsd in a way that no cleanup is needed.
93802-	 */
93803-	rtree_ctx_data_init(tsd_rtree_ctxp_get_unsafe(tsd));
93804-	*tsd_tcache_enabledp_get_unsafe(tsd) = false;
93805-	*tsd_reentrancy_levelp_get(tsd) = 1;
93806-	tsd_prng_state_init(tsd);
93807-	tsd_te_init(tsd); /* event_init may use the prng state above. */
93808-	tsd_san_init(tsd);
93809-	assert_tsd_data_cleanup_done(tsd);
93810-
93811-	return false;
93812-}
93813-
93814-tsd_t *
93815-tsd_fetch_slow(tsd_t *tsd, bool minimal) {
93816-	assert(!tsd_fast(tsd));
93817-
93818-	if (tsd_state_get(tsd) == tsd_state_nominal_slow) {
93819-		/*
93820-		 * On slow path but no work needed.  Note that we can't
93821-		 * necessarily *assert* that we're slow, because we might be
93822-		 * slow because of an asynchronous modification to global state,
93823-		 * which might be asynchronously modified *back*.
93824-		 */
93825-	} else if (tsd_state_get(tsd) == tsd_state_nominal_recompute) {
93826-		tsd_slow_update(tsd);
93827-	} else if (tsd_state_get(tsd) == tsd_state_uninitialized) {
93828-		if (!minimal) {
93829-			if (tsd_booted) {
93830-				tsd_state_set(tsd, tsd_state_nominal);
93831-				tsd_slow_update(tsd);
93832-				/* Trigger cleanup handler registration. */
93833-				tsd_set(tsd);
93834-				tsd_data_init(tsd);
93835-			}
93836-		} else {
93837-			tsd_state_set(tsd, tsd_state_minimal_initialized);
93838-			tsd_set(tsd);
93839-			tsd_data_init_nocleanup(tsd);
93840-		}
93841-	} else if (tsd_state_get(tsd) == tsd_state_minimal_initialized) {
93842-		if (!minimal) {
93843-			/* Switch to fully initialized. */
93844-			tsd_state_set(tsd, tsd_state_nominal);
93845-			assert(*tsd_reentrancy_levelp_get(tsd) >= 1);
93846-			(*tsd_reentrancy_levelp_get(tsd))--;
93847-			tsd_slow_update(tsd);
93848-			tsd_data_init(tsd);
93849-		} else {
93850-			assert_tsd_data_cleanup_done(tsd);
93851-		}
93852-	} else if (tsd_state_get(tsd) == tsd_state_purgatory) {
93853-		tsd_state_set(tsd, tsd_state_reincarnated);
93854-		tsd_set(tsd);
93855-		tsd_data_init_nocleanup(tsd);
93856-	} else {
93857-		assert(tsd_state_get(tsd) == tsd_state_reincarnated);
93858-	}
93859-
93860-	return tsd;
93861-}
93862-
93863-void *
93864-malloc_tsd_malloc(size_t size) {
93865-	return a0malloc(CACHELINE_CEILING(size));
93866-}
93867-
93868-void
93869-malloc_tsd_dalloc(void *wrapper) {
93870-	a0dalloc(wrapper);
93871-}
93872-
93873-#if defined(JEMALLOC_MALLOC_THREAD_CLEANUP) || defined(_WIN32)
93874-static unsigned ncleanups;
93875-static malloc_tsd_cleanup_t cleanups[MALLOC_TSD_CLEANUPS_MAX];
93876-
93877-#ifndef _WIN32
93878-JEMALLOC_EXPORT
93879-#endif
93880-void
93881-_malloc_thread_cleanup(void) {
93882-	bool pending[MALLOC_TSD_CLEANUPS_MAX], again;
93883-	unsigned i;
93884-
93885-	for (i = 0; i < ncleanups; i++) {
93886-		pending[i] = true;
93887-	}
93888-
93889-	do {
93890-		again = false;
93891-		for (i = 0; i < ncleanups; i++) {
93892-			if (pending[i]) {
93893-				pending[i] = cleanups[i]();
93894-				if (pending[i]) {
93895-					again = true;
93896-				}
93897-			}
93898-		}
93899-	} while (again);
93900-}
93901-
93902-#ifndef _WIN32
93903-JEMALLOC_EXPORT
93904-#endif
93905-void
93906-_malloc_tsd_cleanup_register(bool (*f)(void)) {
93907-	assert(ncleanups < MALLOC_TSD_CLEANUPS_MAX);
93908-	cleanups[ncleanups] = f;
93909-	ncleanups++;
93910-}
93911-
93912-#endif
93913-
93914-static void
93915-tsd_do_data_cleanup(tsd_t *tsd) {
93916-	prof_tdata_cleanup(tsd);
93917-	iarena_cleanup(tsd);
93918-	arena_cleanup(tsd);
93919-	tcache_cleanup(tsd);
93920-	witnesses_cleanup(tsd_witness_tsdp_get_unsafe(tsd));
93921-	*tsd_reentrancy_levelp_get(tsd) = 1;
93922-}
93923-
93924-void
93925-tsd_cleanup(void *arg) {
93926-	tsd_t *tsd = (tsd_t *)arg;
93927-
93928-	switch (tsd_state_get(tsd)) {
93929-	case tsd_state_uninitialized:
93930-		/* Do nothing. */
93931-		break;
93932-	case tsd_state_minimal_initialized:
93933-		/* This implies the thread only did free() in its life time. */
93934-		/* Fall through. */
93935-	case tsd_state_reincarnated:
93936-		/*
93937-		 * Reincarnated means another destructor deallocated memory
93938-		 * after the destructor was called.  Cleanup isn't required but
93939-		 * is still called for testing and completeness.
93940-		 */
93941-		assert_tsd_data_cleanup_done(tsd);
93942-		JEMALLOC_FALLTHROUGH;
93943-	case tsd_state_nominal:
93944-	case tsd_state_nominal_slow:
93945-		tsd_do_data_cleanup(tsd);
93946-		tsd_state_set(tsd, tsd_state_purgatory);
93947-		tsd_set(tsd);
93948-		break;
93949-	case tsd_state_purgatory:
93950-		/*
93951-		 * The previous time this destructor was called, we set the
93952-		 * state to tsd_state_purgatory so that other destructors
93953-		 * wouldn't cause re-creation of the tsd.  This time, do
93954-		 * nothing, and do not request another callback.
93955-		 */
93956-		break;
93957-	default:
93958-		not_reached();
93959-	}
93960-#ifdef JEMALLOC_JET
93961-	test_callback_t test_callback = *tsd_test_callbackp_get_unsafe(tsd);
93962-	int *data = tsd_test_datap_get_unsafe(tsd);
93963-	if (test_callback != NULL) {
93964-		test_callback(data);
93965-	}
93966-#endif
93967-}
93968-
93969-tsd_t *
93970-malloc_tsd_boot0(void) {
93971-	tsd_t *tsd;
93972-
93973-#if defined(JEMALLOC_MALLOC_THREAD_CLEANUP) || defined(_WIN32)
93974-	ncleanups = 0;
93975-#endif
93976-	if (malloc_mutex_init(&tsd_nominal_tsds_lock, "tsd_nominal_tsds_lock",
93977-	    WITNESS_RANK_OMIT, malloc_mutex_rank_exclusive)) {
93978-		return NULL;
93979-	}
93980-	if (tsd_boot0()) {
93981-		return NULL;
93982-	}
93983-	tsd = tsd_fetch();
93984-	return tsd;
93985-}
93986-
93987-void
93988-malloc_tsd_boot1(void) {
93989-	tsd_boot1();
93990-	tsd_t *tsd = tsd_fetch();
93991-	/* malloc_slow has been set properly.  Update tsd_slow. */
93992-	tsd_slow_update(tsd);
93993-}
93994-
93995-#ifdef _WIN32
93996-static BOOL WINAPI
93997-_tls_callback(HINSTANCE hinstDLL, DWORD fdwReason, LPVOID lpvReserved) {
93998-	switch (fdwReason) {
93999-#ifdef JEMALLOC_LAZY_LOCK
94000-	case DLL_THREAD_ATTACH:
94001-		isthreaded = true;
94002-		break;
94003-#endif
94004-	case DLL_THREAD_DETACH:
94005-		_malloc_thread_cleanup();
94006-		break;
94007-	default:
94008-		break;
94009-	}
94010-	return true;
94011-}
94012-
94013-/*
94014- * We need to be able to say "read" here (in the "pragma section"), but have
94015- * hooked "read". We won't read for the rest of the file, so we can get away
94016- * with unhooking.
94017- */
94018-#ifdef read
94019-#  undef read
94020-#endif
94021-
94022-#ifdef _MSC_VER
94023-#  ifdef _M_IX86
94024-#    pragma comment(linker, "/INCLUDE:__tls_used")
94025-#    pragma comment(linker, "/INCLUDE:_tls_callback")
94026-#  else
94027-#    pragma comment(linker, "/INCLUDE:_tls_used")
94028-#    pragma comment(linker, "/INCLUDE:" STRINGIFY(tls_callback) )
94029-#  endif
94030-#  pragma section(".CRT$XLY",long,read)
94031-#endif
94032-JEMALLOC_SECTION(".CRT$XLY") JEMALLOC_ATTR(used)
94033-BOOL	(WINAPI *const tls_callback)(HINSTANCE hinstDLL,
94034-    DWORD fdwReason, LPVOID lpvReserved) = _tls_callback;
94035-#endif
94036-
94037-#if (!defined(JEMALLOC_MALLOC_THREAD_CLEANUP) && !defined(JEMALLOC_TLS) && \
94038-    !defined(_WIN32))
94039-void *
94040-tsd_init_check_recursion(tsd_init_head_t *head, tsd_init_block_t *block) {
94041-	pthread_t self = pthread_self();
94042-	tsd_init_block_t *iter;
94043-
94044-	/* Check whether this thread has already inserted into the list. */
94045-	malloc_mutex_lock(TSDN_NULL, &head->lock);
94046-	ql_foreach(iter, &head->blocks, link) {
94047-		if (iter->thread == self) {
94048-			malloc_mutex_unlock(TSDN_NULL, &head->lock);
94049-			return iter->data;
94050-		}
94051-	}
94052-	/* Insert block into list. */
94053-	ql_elm_new(block, link);
94054-	block->thread = self;
94055-	ql_tail_insert(&head->blocks, block, link);
94056-	malloc_mutex_unlock(TSDN_NULL, &head->lock);
94057-	return NULL;
94058-}
94059-
94060-void
94061-tsd_init_finish(tsd_init_head_t *head, tsd_init_block_t *block) {
94062-	malloc_mutex_lock(TSDN_NULL, &head->lock);
94063-	ql_remove(&head->blocks, block, link);
94064-	malloc_mutex_unlock(TSDN_NULL, &head->lock);
94065-}
94066-#endif
94067-
94068-void
94069-tsd_prefork(tsd_t *tsd) {
94070-	malloc_mutex_prefork(tsd_tsdn(tsd), &tsd_nominal_tsds_lock);
94071-}
94072-
94073-void
94074-tsd_postfork_parent(tsd_t *tsd) {
94075-	malloc_mutex_postfork_parent(tsd_tsdn(tsd), &tsd_nominal_tsds_lock);
94076-}
94077-
94078-void
94079-tsd_postfork_child(tsd_t *tsd) {
94080-	malloc_mutex_postfork_child(tsd_tsdn(tsd), &tsd_nominal_tsds_lock);
94081-	ql_new(&tsd_nominal_tsds);
94082-
94083-	if (tsd_state_get(tsd) <= tsd_state_nominal_max) {
94084-		tsd_add_nominal(tsd);
94085-	}
94086-}
94087diff --git a/jemalloc/src/witness.c b/jemalloc/src/witness.c
94088deleted file mode 100644
94089index 4474af0..0000000
94090--- a/jemalloc/src/witness.c
94091+++ /dev/null
94092@@ -1,122 +0,0 @@
94093-#include "jemalloc/internal/jemalloc_preamble.h"
94094-#include "jemalloc/internal/jemalloc_internal_includes.h"
94095-
94096-#include "jemalloc/internal/assert.h"
94097-#include "jemalloc/internal/malloc_io.h"
94098-
94099-void
94100-witness_init(witness_t *witness, const char *name, witness_rank_t rank,
94101-    witness_comp_t *comp, void *opaque) {
94102-	witness->name = name;
94103-	witness->rank = rank;
94104-	witness->comp = comp;
94105-	witness->opaque = opaque;
94106-}
94107-
94108-static void
94109-witness_print_witness(witness_t *w, unsigned n) {
94110-	assert(n > 0);
94111-	if (n == 1) {
94112-		malloc_printf(" %s(%u)", w->name, w->rank);
94113-	} else {
94114-		malloc_printf(" %s(%u)X%u", w->name, w->rank, n);
94115-	}
94116-}
94117-
94118-static void
94119-witness_print_witnesses(const witness_list_t *witnesses) {
94120-	witness_t *w, *last = NULL;
94121-	unsigned n = 0;
94122-	ql_foreach(w, witnesses, link) {
94123-		if (last != NULL && w->rank > last->rank) {
94124-			assert(w->name != last->name);
94125-			witness_print_witness(last, n);
94126-			n = 0;
94127-		} else if (last != NULL) {
94128-			assert(w->rank == last->rank);
94129-			assert(w->name == last->name);
94130-		}
94131-		last = w;
94132-		++n;
94133-	}
94134-	if (last != NULL) {
94135-		witness_print_witness(last, n);
94136-	}
94137-}
94138-
94139-static void
94140-witness_lock_error_impl(const witness_list_t *witnesses,
94141-    const witness_t *witness) {
94142-	malloc_printf("<jemalloc>: Lock rank order reversal:");
94143-	witness_print_witnesses(witnesses);
94144-	malloc_printf(" %s(%u)\n", witness->name, witness->rank);
94145-	abort();
94146-}
94147-witness_lock_error_t *JET_MUTABLE witness_lock_error = witness_lock_error_impl;
94148-
94149-static void
94150-witness_owner_error_impl(const witness_t *witness) {
94151-	malloc_printf("<jemalloc>: Should own %s(%u)\n", witness->name,
94152-	    witness->rank);
94153-	abort();
94154-}
94155-witness_owner_error_t *JET_MUTABLE witness_owner_error =
94156-    witness_owner_error_impl;
94157-
94158-static void
94159-witness_not_owner_error_impl(const witness_t *witness) {
94160-	malloc_printf("<jemalloc>: Should not own %s(%u)\n", witness->name,
94161-	    witness->rank);
94162-	abort();
94163-}
94164-witness_not_owner_error_t *JET_MUTABLE witness_not_owner_error =
94165-    witness_not_owner_error_impl;
94166-
94167-static void
94168-witness_depth_error_impl(const witness_list_t *witnesses,
94169-    witness_rank_t rank_inclusive, unsigned depth) {
94170-	malloc_printf("<jemalloc>: Should own %u lock%s of rank >= %u:", depth,
94171-	    (depth != 1) ?  "s" : "", rank_inclusive);
94172-	witness_print_witnesses(witnesses);
94173-	malloc_printf("\n");
94174-	abort();
94175-}
94176-witness_depth_error_t *JET_MUTABLE witness_depth_error =
94177-    witness_depth_error_impl;
94178-
94179-void
94180-witnesses_cleanup(witness_tsd_t *witness_tsd) {
94181-	witness_assert_lockless(witness_tsd_tsdn(witness_tsd));
94182-
94183-	/* Do nothing. */
94184-}
94185-
94186-void
94187-witness_prefork(witness_tsd_t *witness_tsd) {
94188-	if (!config_debug) {
94189-		return;
94190-	}
94191-	witness_tsd->forking = true;
94192-}
94193-
94194-void
94195-witness_postfork_parent(witness_tsd_t *witness_tsd) {
94196-	if (!config_debug) {
94197-		return;
94198-	}
94199-	witness_tsd->forking = false;
94200-}
94201-
94202-void
94203-witness_postfork_child(witness_tsd_t *witness_tsd) {
94204-	if (!config_debug) {
94205-		return;
94206-	}
94207-#ifndef JEMALLOC_MUTEX_INIT_CB
94208-	witness_list_t *witnesses;
94209-
94210-	witnesses = &witness_tsd->witnesses;
94211-	ql_new(witnesses);
94212-#endif
94213-	witness_tsd->forking = false;
94214-}
94215diff --git a/jemalloc/src/zone.c b/jemalloc/src/zone.c
94216deleted file mode 100644
94217index 23dfdd0..0000000
94218--- a/jemalloc/src/zone.c
94219+++ /dev/null
94220@@ -1,469 +0,0 @@
94221-#include "jemalloc/internal/jemalloc_preamble.h"
94222-#include "jemalloc/internal/jemalloc_internal_includes.h"
94223-
94224-#include "jemalloc/internal/assert.h"
94225-
94226-#ifndef JEMALLOC_ZONE
94227-#  error "This source file is for zones on Darwin (OS X)."
94228-#endif
94229-
94230-/* Definitions of the following structs in malloc/malloc.h might be too old
94231- * for the built binary to run on newer versions of OSX. So use the newest
94232- * possible version of those structs.
94233- */
94234-typedef struct _malloc_zone_t {
94235-	void *reserved1;
94236-	void *reserved2;
94237-	size_t (*size)(struct _malloc_zone_t *, const void *);
94238-	void *(*malloc)(struct _malloc_zone_t *, size_t);
94239-	void *(*calloc)(struct _malloc_zone_t *, size_t, size_t);
94240-	void *(*valloc)(struct _malloc_zone_t *, size_t);
94241-	void (*free)(struct _malloc_zone_t *, void *);
94242-	void *(*realloc)(struct _malloc_zone_t *, void *, size_t);
94243-	void (*destroy)(struct _malloc_zone_t *);
94244-	const char *zone_name;
94245-	unsigned (*batch_malloc)(struct _malloc_zone_t *, size_t, void **, unsigned);
94246-	void (*batch_free)(struct _malloc_zone_t *, void **, unsigned);
94247-	struct malloc_introspection_t *introspect;
94248-	unsigned version;
94249-	void *(*memalign)(struct _malloc_zone_t *, size_t, size_t);
94250-	void (*free_definite_size)(struct _malloc_zone_t *, void *, size_t);
94251-	size_t (*pressure_relief)(struct _malloc_zone_t *, size_t);
94252-} malloc_zone_t;
94253-
94254-typedef struct {
94255-	vm_address_t address;
94256-	vm_size_t size;
94257-} vm_range_t;
94258-
94259-typedef struct malloc_statistics_t {
94260-	unsigned blocks_in_use;
94261-	size_t size_in_use;
94262-	size_t max_size_in_use;
94263-	size_t size_allocated;
94264-} malloc_statistics_t;
94265-
94266-typedef kern_return_t memory_reader_t(task_t, vm_address_t, vm_size_t, void **);
94267-
94268-typedef void vm_range_recorder_t(task_t, void *, unsigned type, vm_range_t *, unsigned);
94269-
94270-typedef struct malloc_introspection_t {
94271-	kern_return_t (*enumerator)(task_t, void *, unsigned, vm_address_t, memory_reader_t, vm_range_recorder_t);
94272-	size_t (*good_size)(malloc_zone_t *, size_t);
94273-	boolean_t (*check)(malloc_zone_t *);
94274-	void (*print)(malloc_zone_t *, boolean_t);
94275-	void (*log)(malloc_zone_t *, void *);
94276-	void (*force_lock)(malloc_zone_t *);
94277-	void (*force_unlock)(malloc_zone_t *);
94278-	void (*statistics)(malloc_zone_t *, malloc_statistics_t *);
94279-	boolean_t (*zone_locked)(malloc_zone_t *);
94280-	boolean_t (*enable_discharge_checking)(malloc_zone_t *);
94281-	boolean_t (*disable_discharge_checking)(malloc_zone_t *);
94282-	void (*discharge)(malloc_zone_t *, void *);
94283-#ifdef __BLOCKS__
94284-	void (*enumerate_discharged_pointers)(malloc_zone_t *, void (^)(void *, void *));
94285-#else
94286-	void *enumerate_unavailable_without_blocks;
94287-#endif
94288-	void (*reinit_lock)(malloc_zone_t *);
94289-} malloc_introspection_t;
94290-
94291-extern kern_return_t malloc_get_all_zones(task_t, memory_reader_t, vm_address_t **, unsigned *);
94292-
94293-extern malloc_zone_t *malloc_default_zone(void);
94294-
94295-extern void malloc_zone_register(malloc_zone_t *zone);
94296-
94297-extern void malloc_zone_unregister(malloc_zone_t *zone);
94298-
94299-/*
94300- * The malloc_default_purgeable_zone() function is only available on >= 10.6.
94301- * We need to check whether it is present at runtime, thus the weak_import.
94302- */
94303-extern malloc_zone_t *malloc_default_purgeable_zone(void)
94304-JEMALLOC_ATTR(weak_import);
94305-
94306-/******************************************************************************/
94307-/* Data. */
94308-
94309-static malloc_zone_t *default_zone, *purgeable_zone;
94310-static malloc_zone_t jemalloc_zone;
94311-static struct malloc_introspection_t jemalloc_zone_introspect;
94312-static pid_t zone_force_lock_pid = -1;
94313-
94314-/******************************************************************************/
94315-/* Function prototypes for non-inline static functions. */
94316-
94317-static size_t	zone_size(malloc_zone_t *zone, const void *ptr);
94318-static void	*zone_malloc(malloc_zone_t *zone, size_t size);
94319-static void	*zone_calloc(malloc_zone_t *zone, size_t num, size_t size);
94320-static void	*zone_valloc(malloc_zone_t *zone, size_t size);
94321-static void	zone_free(malloc_zone_t *zone, void *ptr);
94322-static void	*zone_realloc(malloc_zone_t *zone, void *ptr, size_t size);
94323-static void	*zone_memalign(malloc_zone_t *zone, size_t alignment,
94324-    size_t size);
94325-static void	zone_free_definite_size(malloc_zone_t *zone, void *ptr,
94326-    size_t size);
94327-static void	zone_destroy(malloc_zone_t *zone);
94328-static unsigned	zone_batch_malloc(struct _malloc_zone_t *zone, size_t size,
94329-    void **results, unsigned num_requested);
94330-static void	zone_batch_free(struct _malloc_zone_t *zone,
94331-    void **to_be_freed, unsigned num_to_be_freed);
94332-static size_t	zone_pressure_relief(struct _malloc_zone_t *zone, size_t goal);
94333-static size_t	zone_good_size(malloc_zone_t *zone, size_t size);
94334-static kern_return_t	zone_enumerator(task_t task, void *data, unsigned type_mask,
94335-    vm_address_t zone_address, memory_reader_t reader,
94336-    vm_range_recorder_t recorder);
94337-static boolean_t	zone_check(malloc_zone_t *zone);
94338-static void	zone_print(malloc_zone_t *zone, boolean_t verbose);
94339-static void	zone_log(malloc_zone_t *zone, void *address);
94340-static void	zone_force_lock(malloc_zone_t *zone);
94341-static void	zone_force_unlock(malloc_zone_t *zone);
94342-static void	zone_statistics(malloc_zone_t *zone,
94343-    malloc_statistics_t *stats);
94344-static boolean_t	zone_locked(malloc_zone_t *zone);
94345-static void	zone_reinit_lock(malloc_zone_t *zone);
94346-
94347-/******************************************************************************/
94348-/*
94349- * Functions.
94350- */
94351-
94352-static size_t
94353-zone_size(malloc_zone_t *zone, const void *ptr) {
94354-	/*
94355-	 * There appear to be places within Darwin (such as setenv(3)) that
94356-	 * cause calls to this function with pointers that *no* zone owns.  If
94357-	 * we knew that all pointers were owned by *some* zone, we could split
94358-	 * our zone into two parts, and use one as the default allocator and
94359-	 * the other as the default deallocator/reallocator.  Since that will
94360-	 * not work in practice, we must check all pointers to assure that they
94361-	 * reside within a mapped extent before determining size.
94362-	 */
94363-	return ivsalloc(tsdn_fetch(), ptr);
94364-}
94365-
94366-static void *
94367-zone_malloc(malloc_zone_t *zone, size_t size) {
94368-	return je_malloc(size);
94369-}
94370-
94371-static void *
94372-zone_calloc(malloc_zone_t *zone, size_t num, size_t size) {
94373-	return je_calloc(num, size);
94374-}
94375-
94376-static void *
94377-zone_valloc(malloc_zone_t *zone, size_t size) {
94378-	void *ret = NULL; /* Assignment avoids useless compiler warning. */
94379-
94380-	je_posix_memalign(&ret, PAGE, size);
94381-
94382-	return ret;
94383-}
94384-
94385-static void
94386-zone_free(malloc_zone_t *zone, void *ptr) {
94387-	if (ivsalloc(tsdn_fetch(), ptr) != 0) {
94388-		je_free(ptr);
94389-		return;
94390-	}
94391-
94392-	free(ptr);
94393-}
94394-
94395-static void *
94396-zone_realloc(malloc_zone_t *zone, void *ptr, size_t size) {
94397-	if (ivsalloc(tsdn_fetch(), ptr) != 0) {
94398-		return je_realloc(ptr, size);
94399-	}
94400-
94401-	return realloc(ptr, size);
94402-}
94403-
94404-static void *
94405-zone_memalign(malloc_zone_t *zone, size_t alignment, size_t size) {
94406-	void *ret = NULL; /* Assignment avoids useless compiler warning. */
94407-
94408-	je_posix_memalign(&ret, alignment, size);
94409-
94410-	return ret;
94411-}
94412-
94413-static void
94414-zone_free_definite_size(malloc_zone_t *zone, void *ptr, size_t size) {
94415-	size_t alloc_size;
94416-
94417-	alloc_size = ivsalloc(tsdn_fetch(), ptr);
94418-	if (alloc_size != 0) {
94419-		assert(alloc_size == size);
94420-		je_free(ptr);
94421-		return;
94422-	}
94423-
94424-	free(ptr);
94425-}
94426-
94427-static void
94428-zone_destroy(malloc_zone_t *zone) {
94429-	/* This function should never be called. */
94430-	not_reached();
94431-}
94432-
94433-static unsigned
94434-zone_batch_malloc(struct _malloc_zone_t *zone, size_t size, void **results,
94435-    unsigned num_requested) {
94436-	unsigned i;
94437-
94438-	for (i = 0; i < num_requested; i++) {
94439-		results[i] = je_malloc(size);
94440-		if (!results[i])
94441-			break;
94442-	}
94443-
94444-	return i;
94445-}
94446-
94447-static void
94448-zone_batch_free(struct _malloc_zone_t *zone, void **to_be_freed,
94449-    unsigned num_to_be_freed) {
94450-	unsigned i;
94451-
94452-	for (i = 0; i < num_to_be_freed; i++) {
94453-		zone_free(zone, to_be_freed[i]);
94454-		to_be_freed[i] = NULL;
94455-	}
94456-}
94457-
94458-static size_t
94459-zone_pressure_relief(struct _malloc_zone_t *zone, size_t goal) {
94460-	return 0;
94461-}
94462-
94463-static size_t
94464-zone_good_size(malloc_zone_t *zone, size_t size) {
94465-	if (size == 0) {
94466-		size = 1;
94467-	}
94468-	return sz_s2u(size);
94469-}
94470-
94471-static kern_return_t
94472-zone_enumerator(task_t task, void *data, unsigned type_mask,
94473-    vm_address_t zone_address, memory_reader_t reader,
94474-    vm_range_recorder_t recorder) {
94475-	return KERN_SUCCESS;
94476-}
94477-
94478-static boolean_t
94479-zone_check(malloc_zone_t *zone) {
94480-	return true;
94481-}
94482-
94483-static void
94484-zone_print(malloc_zone_t *zone, boolean_t verbose) {
94485-}
94486-
94487-static void
94488-zone_log(malloc_zone_t *zone, void *address) {
94489-}
94490-
94491-static void
94492-zone_force_lock(malloc_zone_t *zone) {
94493-	if (isthreaded) {
94494-		/*
94495-		 * See the note in zone_force_unlock, below, to see why we need
94496-		 * this.
94497-		 */
94498-		assert(zone_force_lock_pid == -1);
94499-		zone_force_lock_pid = getpid();
94500-		jemalloc_prefork();
94501-	}
94502-}
94503-
94504-static void
94505-zone_force_unlock(malloc_zone_t *zone) {
94506-	/*
94507-	 * zone_force_lock and zone_force_unlock are the entry points to the
94508-	 * forking machinery on OS X.  The tricky thing is, the child is not
94509-	 * allowed to unlock mutexes locked in the parent, even if owned by the
94510-	 * forking thread (and the mutex type we use in OS X will fail an assert
94511-	 * if we try).  In the child, we can get away with reinitializing all
94512-	 * the mutexes, which has the effect of unlocking them.  In the parent,
94513-	 * doing this would mean we wouldn't wake any waiters blocked on the
94514-	 * mutexes we unlock.  So, we record the pid of the current thread in
94515-	 * zone_force_lock, and use that to detect if we're in the parent or
94516-	 * child here, to decide which unlock logic we need.
94517-	 */
94518-	if (isthreaded) {
94519-		assert(zone_force_lock_pid != -1);
94520-		if (getpid() == zone_force_lock_pid) {
94521-			jemalloc_postfork_parent();
94522-		} else {
94523-			jemalloc_postfork_child();
94524-		}
94525-		zone_force_lock_pid = -1;
94526-	}
94527-}
94528-
94529-static void
94530-zone_statistics(malloc_zone_t *zone, malloc_statistics_t *stats) {
94531-	/* We make no effort to actually fill the values */
94532-	stats->blocks_in_use = 0;
94533-	stats->size_in_use = 0;
94534-	stats->max_size_in_use = 0;
94535-	stats->size_allocated = 0;
94536-}
94537-
94538-static boolean_t
94539-zone_locked(malloc_zone_t *zone) {
94540-	/* Pretend no lock is being held */
94541-	return false;
94542-}
94543-
94544-static void
94545-zone_reinit_lock(malloc_zone_t *zone) {
94546-	/* As of OSX 10.12, this function is only used when force_unlock would
94547-	 * be used if the zone version were < 9. So just use force_unlock. */
94548-	zone_force_unlock(zone);
94549-}
94550-
94551-static void
94552-zone_init(void) {
94553-	jemalloc_zone.size = zone_size;
94554-	jemalloc_zone.malloc = zone_malloc;
94555-	jemalloc_zone.calloc = zone_calloc;
94556-	jemalloc_zone.valloc = zone_valloc;
94557-	jemalloc_zone.free = zone_free;
94558-	jemalloc_zone.realloc = zone_realloc;
94559-	jemalloc_zone.destroy = zone_destroy;
94560-	jemalloc_zone.zone_name = "jemalloc_zone";
94561-	jemalloc_zone.batch_malloc = zone_batch_malloc;
94562-	jemalloc_zone.batch_free = zone_batch_free;
94563-	jemalloc_zone.introspect = &jemalloc_zone_introspect;
94564-	jemalloc_zone.version = 9;
94565-	jemalloc_zone.memalign = zone_memalign;
94566-	jemalloc_zone.free_definite_size = zone_free_definite_size;
94567-	jemalloc_zone.pressure_relief = zone_pressure_relief;
94568-
94569-	jemalloc_zone_introspect.enumerator = zone_enumerator;
94570-	jemalloc_zone_introspect.good_size = zone_good_size;
94571-	jemalloc_zone_introspect.check = zone_check;
94572-	jemalloc_zone_introspect.print = zone_print;
94573-	jemalloc_zone_introspect.log = zone_log;
94574-	jemalloc_zone_introspect.force_lock = zone_force_lock;
94575-	jemalloc_zone_introspect.force_unlock = zone_force_unlock;
94576-	jemalloc_zone_introspect.statistics = zone_statistics;
94577-	jemalloc_zone_introspect.zone_locked = zone_locked;
94578-	jemalloc_zone_introspect.enable_discharge_checking = NULL;
94579-	jemalloc_zone_introspect.disable_discharge_checking = NULL;
94580-	jemalloc_zone_introspect.discharge = NULL;
94581-#ifdef __BLOCKS__
94582-	jemalloc_zone_introspect.enumerate_discharged_pointers = NULL;
94583-#else
94584-	jemalloc_zone_introspect.enumerate_unavailable_without_blocks = NULL;
94585-#endif
94586-	jemalloc_zone_introspect.reinit_lock = zone_reinit_lock;
94587-}
94588-
94589-static malloc_zone_t *
94590-zone_default_get(void) {
94591-	malloc_zone_t **zones = NULL;
94592-	unsigned int num_zones = 0;
94593-
94594-	/*
94595-	 * On OSX 10.12, malloc_default_zone returns a special zone that is not
94596-	 * present in the list of registered zones. That zone uses a "lite zone"
94597-	 * if one is present (apparently enabled when malloc stack logging is
94598-	 * enabled), or the first registered zone otherwise. In practice this
94599-	 * means unless malloc stack logging is enabled, the first registered
94600-	 * zone is the default.  So get the list of zones to get the first one,
94601-	 * instead of relying on malloc_default_zone.
94602-	 */
94603-	if (KERN_SUCCESS != malloc_get_all_zones(0, NULL,
94604-	    (vm_address_t**)&zones, &num_zones)) {
94605-		/*
94606-		 * Reset the value in case the failure happened after it was
94607-		 * set.
94608-		 */
94609-		num_zones = 0;
94610-	}
94611-
94612-	if (num_zones) {
94613-		return zones[0];
94614-	}
94615-
94616-	return malloc_default_zone();
94617-}
94618-
94619-/* As written, this function can only promote jemalloc_zone. */
94620-static void
94621-zone_promote(void) {
94622-	malloc_zone_t *zone;
94623-
94624-	do {
94625-		/*
94626-		 * Unregister and reregister the default zone.  On OSX >= 10.6,
94627-		 * unregistering takes the last registered zone and places it
94628-		 * at the location of the specified zone.  Unregistering the
94629-		 * default zone thus makes the last registered one the default.
94630-		 * On OSX < 10.6, unregistering shifts all registered zones.
94631-		 * The first registered zone then becomes the default.
94632-		 */
94633-		malloc_zone_unregister(default_zone);
94634-		malloc_zone_register(default_zone);
94635-
94636-		/*
94637-		 * On OSX 10.6, having the default purgeable zone appear before
94638-		 * the default zone makes some things crash because it thinks it
94639-		 * owns the default zone allocated pointers.  We thus
94640-		 * unregister/re-register it in order to ensure it's always
94641-		 * after the default zone.  On OSX < 10.6, there is no purgeable
94642-		 * zone, so this does nothing.  On OSX >= 10.6, unregistering
94643-		 * replaces the purgeable zone with the last registered zone
94644-		 * above, i.e. the default zone.  Registering it again then puts
94645-		 * it at the end, obviously after the default zone.
94646-		 */
94647-		if (purgeable_zone != NULL) {
94648-			malloc_zone_unregister(purgeable_zone);
94649-			malloc_zone_register(purgeable_zone);
94650-		}
94651-
94652-		zone = zone_default_get();
94653-	} while (zone != &jemalloc_zone);
94654-}
94655-
94656-JEMALLOC_ATTR(constructor)
94657-void
94658-zone_register(void) {
94659-	/*
94660-	 * If something else replaced the system default zone allocator, don't
94661-	 * register jemalloc's.
94662-	 */
94663-	default_zone = zone_default_get();
94664-	if (!default_zone->zone_name || strcmp(default_zone->zone_name,
94665-	    "DefaultMallocZone") != 0) {
94666-		return;
94667-	}
94668-
94669-	/*
94670-	 * The default purgeable zone is created lazily by OSX's libc.  It uses
94671-	 * the default zone when it is created for "small" allocations
94672-	 * (< 15 KiB), but assumes the default zone is a scalable_zone.  This
94673-	 * obviously fails when the default zone is the jemalloc zone, so
94674-	 * malloc_default_purgeable_zone() is called beforehand so that the
94675-	 * default purgeable zone is created when the default zone is still
94676-	 * a scalable_zone.  As purgeable zones only exist on >= 10.6, we need
94677-	 * to check for the existence of malloc_default_purgeable_zone() at
94678-	 * run time.
94679-	 */
94680-	purgeable_zone = (malloc_default_purgeable_zone == NULL) ? NULL :
94681-	    malloc_default_purgeable_zone();
94682-
94683-	/* Register the custom zone.  At this point it won't be the default. */
94684-	zone_init();
94685-	malloc_zone_register(&jemalloc_zone);
94686-
94687-	/* Promote the custom zone to be default. */
94688-	zone_promote();
94689-}
94690diff --git a/jemalloc/test/analyze/prof_bias.c b/jemalloc/test/analyze/prof_bias.c
94691deleted file mode 100644
94692index a96ca94..0000000
94693--- a/jemalloc/test/analyze/prof_bias.c
94694+++ /dev/null
94695@@ -1,60 +0,0 @@
94696-#include "test/jemalloc_test.h"
94697-
94698-/*
94699- * This is a helper utility, only meant to be run manually (and, for example,
94700- * doesn't check for failures, try to skip execution in non-prof modes, etc.).
94701- * It runs, allocates objects of two different sizes from the same stack trace,
94702- * and exits.
94703- *
94704- * The idea is that some human operator will run it like:
94705- *     MALLOC_CONF="prof:true,prof_final:true" test/analyze/prof_bias
94706- * and manually inspect the results.
94707- *
94708- * The results should be:
94709- * jeprof --text test/analyze/prof_bias --inuse_space jeprof.<pid>.0.f.heap:
94710- * 	around 1024 MB
94711- * jeprof --text test/analyze/prof_bias --inuse_objects jeprof.<pid>.0.f.heap:
94712- * 	around 33554448 = 16 + 32 * 1024 * 1024
94713- *
94714- * And, if prof_accum is on:
94715- * jeprof --text test/analyze/prof_bias --alloc_space jeprof.<pid>.0.f.heap:
94716- *     around 2048 MB
94717- * jeprof --text test/analyze/prof_bias --alloc_objects jeprof.<pid>.0.f.heap:
94718- * 	around 67108896 = 2 * (16 + 32 * 1024 * 1024)
94719- */
94720-
94721-static void
94722-mock_backtrace(void **vec, unsigned *len, unsigned max_len) {
94723-	*len = 4;
94724-	vec[0] = (void *)0x111;
94725-	vec[1] = (void *)0x222;
94726-	vec[2] = (void *)0x333;
94727-	vec[3] = (void *)0x444;
94728-}
94729-
94730-static void
94731-do_allocs(size_t sz, size_t cnt, bool do_frees) {
94732-	for (size_t i = 0; i < cnt; i++) {
94733-		void *ptr = mallocx(sz, 0);
94734-		assert_ptr_not_null(ptr, "Unexpected mallocx failure");
94735-		if (do_frees) {
94736-			dallocx(ptr, 0);
94737-		}
94738-	}
94739-}
94740-
94741-int
94742-main(void) {
94743-	size_t lg_prof_sample_local = 19;
94744-	int err = mallctl("prof.reset", NULL, NULL,
94745-	    (void *)&lg_prof_sample_local, sizeof(lg_prof_sample_local));
94746-	assert(err == 0);
94747-
94748-	prof_backtrace_hook_set(mock_backtrace);
94749-	do_allocs(16, 32 * 1024 * 1024, /* do_frees */ true);
94750-	do_allocs(32 * 1024* 1024, 16, /* do_frees */ true);
94751-	do_allocs(16, 32 * 1024 * 1024, /* do_frees */ false);
94752-	do_allocs(32 * 1024* 1024, 16, /* do_frees */ false);
94753-
94754-	return 0;
94755-}
94756diff --git a/jemalloc/test/analyze/rand.c b/jemalloc/test/analyze/rand.c
94757deleted file mode 100644
94758index bb20b06..0000000
94759--- a/jemalloc/test/analyze/rand.c
94760+++ /dev/null
94761@@ -1,276 +0,0 @@
94762-#include "test/jemalloc_test.h"
94763-
94764-/******************************************************************************/
94765-
94766-/*
94767- * General purpose tool for examining random number distributions.
94768- *
94769- * Input -
94770- * (a) a random number generator, and
94771- * (b) the buckets:
94772- *     (1) number of buckets,
94773- *     (2) width of each bucket, in log scale,
94774- *     (3) expected mean and stddev of the count of random numbers in each
94775- *         bucket, and
94776- * (c) number of iterations to invoke the generator.
94777- *
94778- * The program generates the specified amount of random numbers, and assess how
94779- * well they conform to the expectations: for each bucket, output -
94780- * (a) the (given) expected mean and stddev,
94781- * (b) the actual count and any interesting level of deviation:
94782- *     (1) ~68% buckets should show no interesting deviation, meaning a
94783- *         deviation less than stddev from the expectation;
94784- *     (2) ~27% buckets should show '+' / '-', meaning a deviation in the range
94785- *         of [stddev, 2 * stddev) from the expectation;
94786- *     (3) ~4% buckets should show '++' / '--', meaning a deviation in the
94787- *         range of [2 * stddev, 3 * stddev) from the expectation; and
94788- *     (4) less than 0.3% buckets should show more than two '+'s / '-'s.
94789- *
94790- * Technical remarks:
94791- * (a) The generator is expected to output uint64_t numbers, so you might need
94792- *     to define a wrapper.
94793- * (b) The buckets must be of equal width and the lowest bucket starts at
94794- *     [0, 2^lg_bucket_width - 1).
94795- * (c) Any generated number >= n_bucket * 2^lg_bucket_width will be counted
94796- *     towards the last bucket; the expected mean and stddev provided should
94797- *     also reflect that.
94798- * (d) The number of iterations is advised to be determined so that the bucket
94799- *     with the minimal expected proportion gets a sufficient count.
94800- */
94801-
94802-static void
94803-fill(size_t a[], const size_t n, const size_t k) {
94804-	for (size_t i = 0; i < n; ++i) {
94805-		a[i] = k;
94806-	}
94807-}
94808-
94809-static void
94810-collect_buckets(uint64_t (*gen)(void *), void *opaque, size_t buckets[],
94811-    const size_t n_bucket, const size_t lg_bucket_width, const size_t n_iter) {
94812-	for (size_t i = 0; i < n_iter; ++i) {
94813-		uint64_t num = gen(opaque);
94814-		uint64_t bucket_id = num >> lg_bucket_width;
94815-		if (bucket_id >= n_bucket) {
94816-			bucket_id = n_bucket - 1;
94817-		}
94818-		++buckets[bucket_id];
94819-	}
94820-}
94821-
94822-static void
94823-print_buckets(const size_t buckets[], const size_t means[],
94824-    const size_t stddevs[], const size_t n_bucket) {
94825-	for (size_t i = 0; i < n_bucket; ++i) {
94826-		malloc_printf("%zu:\tmean = %zu,\tstddev = %zu,\tbucket = %zu",
94827-		    i, means[i], stddevs[i], buckets[i]);
94828-
94829-		/* Make sure there's no overflow. */
94830-		assert(buckets[i] + stddevs[i] >= stddevs[i]);
94831-		assert(means[i] + stddevs[i] >= stddevs[i]);
94832-
94833-		if (buckets[i] + stddevs[i] <= means[i]) {
94834-			malloc_write(" ");
94835-			for (size_t t = means[i] - buckets[i]; t >= stddevs[i];
94836-			    t -= stddevs[i]) {
94837-				malloc_write("-");
94838-			}
94839-		} else if (buckets[i] >= means[i] + stddevs[i]) {
94840-			malloc_write(" ");
94841-			for (size_t t = buckets[i] - means[i]; t >= stddevs[i];
94842-			    t -= stddevs[i]) {
94843-				malloc_write("+");
94844-			}
94845-		}
94846-		malloc_write("\n");
94847-	}
94848-}
94849-
94850-static void
94851-bucket_analysis(uint64_t (*gen)(void *), void *opaque, size_t buckets[],
94852-    const size_t means[], const size_t stddevs[], const size_t n_bucket,
94853-    const size_t lg_bucket_width, const size_t n_iter) {
94854-	for (size_t i = 1; i <= 3; ++i) {
94855-		malloc_printf("round %zu\n", i);
94856-		fill(buckets, n_bucket, 0);
94857-		collect_buckets(gen, opaque, buckets, n_bucket,
94858-		    lg_bucket_width, n_iter);
94859-		print_buckets(buckets, means, stddevs, n_bucket);
94860-	}
94861-}
94862-
94863-/* (Recommended) minimal bucket mean. */
94864-#define MIN_BUCKET_MEAN 10000
94865-
94866-/******************************************************************************/
94867-
94868-/* Uniform random number generator. */
94869-
94870-typedef struct uniform_gen_arg_s uniform_gen_arg_t;
94871-struct uniform_gen_arg_s {
94872-	uint64_t state;
94873-	const unsigned lg_range;
94874-};
94875-
94876-static uint64_t
94877-uniform_gen(void *opaque) {
94878-	uniform_gen_arg_t *arg = (uniform_gen_arg_t *)opaque;
94879-	return prng_lg_range_u64(&arg->state, arg->lg_range);
94880-}
94881-
94882-TEST_BEGIN(test_uniform) {
94883-#define LG_N_BUCKET 5
94884-#define N_BUCKET (1 << LG_N_BUCKET)
94885-
94886-#define QUOTIENT_CEIL(n, d) (((n) - 1) / (d) + 1)
94887-
94888-	const unsigned lg_range_test = 25;
94889-
94890-	/*
94891-	 * Mathematical tricks to guarantee that both mean and stddev are
94892-	 * integers, and that the minimal bucket mean is at least
94893-	 * MIN_BUCKET_MEAN.
94894-	 */
94895-	const size_t q = 1 << QUOTIENT_CEIL(LG_CEIL(QUOTIENT_CEIL(
94896-	    MIN_BUCKET_MEAN, N_BUCKET * (N_BUCKET - 1))), 2);
94897-	const size_t stddev = (N_BUCKET - 1) * q;
94898-	const size_t mean = N_BUCKET * stddev * q;
94899-	const size_t n_iter = N_BUCKET * mean;
94900-
94901-	size_t means[N_BUCKET];
94902-	fill(means, N_BUCKET, mean);
94903-	size_t stddevs[N_BUCKET];
94904-	fill(stddevs, N_BUCKET, stddev);
94905-
94906-	uniform_gen_arg_t arg = {(uint64_t)(uintptr_t)&lg_range_test,
94907-	    lg_range_test};
94908-	size_t buckets[N_BUCKET];
94909-	assert_zu_ge(lg_range_test, LG_N_BUCKET, "");
94910-	const size_t lg_bucket_width = lg_range_test - LG_N_BUCKET;
94911-
94912-	bucket_analysis(uniform_gen, &arg, buckets, means, stddevs,
94913-	    N_BUCKET, lg_bucket_width, n_iter);
94914-
94915-#undef LG_N_BUCKET
94916-#undef N_BUCKET
94917-#undef QUOTIENT_CEIL
94918-}
94919-TEST_END
94920-
94921-/******************************************************************************/
94922-
94923-/* Geometric random number generator; compiled only when prof is on. */
94924-
94925-#ifdef JEMALLOC_PROF
94926-
94927-/*
94928- * Fills geometric proportions and returns the minimal proportion.  See
94929- * comments in test_prof_sample for explanations for n_divide.
94930- */
94931-static double
94932-fill_geometric_proportions(double proportions[], const size_t n_bucket,
94933-    const size_t n_divide) {
94934-	assert(n_bucket > 0);
94935-	assert(n_divide > 0);
94936-	double x = 1.;
94937-	for (size_t i = 0; i < n_bucket; ++i) {
94938-		if (i == n_bucket - 1) {
94939-			proportions[i] = x;
94940-		} else {
94941-			double y = x * exp(-1. / n_divide);
94942-			proportions[i] = x - y;
94943-			x = y;
94944-		}
94945-	}
94946-	/*
94947-	 * The minimal proportion is the smaller one of the last two
94948-	 * proportions for geometric distribution.
94949-	 */
94950-	double min_proportion = proportions[n_bucket - 1];
94951-	if (n_bucket >= 2 && proportions[n_bucket - 2] < min_proportion) {
94952-		min_proportion = proportions[n_bucket - 2];
94953-	}
94954-	return min_proportion;
94955-}
94956-
94957-static size_t
94958-round_to_nearest(const double x) {
94959-	return (size_t)(x + .5);
94960-}
94961-
94962-static void
94963-fill_references(size_t means[], size_t stddevs[], const double proportions[],
94964-    const size_t n_bucket, const size_t n_iter) {
94965-	for (size_t i = 0; i < n_bucket; ++i) {
94966-		double x = n_iter * proportions[i];
94967-		means[i] = round_to_nearest(x);
94968-		stddevs[i] = round_to_nearest(sqrt(x * (1. - proportions[i])));
94969-	}
94970-}
94971-
94972-static uint64_t
94973-prof_sample_gen(void *opaque) {
94974-	return prof_sample_new_event_wait((tsd_t *)opaque) - 1;
94975-}
94976-
94977-#endif /* JEMALLOC_PROF */
94978-
94979-TEST_BEGIN(test_prof_sample) {
94980-	test_skip_if(!config_prof);
94981-#ifdef JEMALLOC_PROF
94982-
94983-/* Number of divisions within [0, mean). */
94984-#define LG_N_DIVIDE 3
94985-#define N_DIVIDE (1 << LG_N_DIVIDE)
94986-
94987-/* Coverage of buckets in terms of multiples of mean. */
94988-#define LG_N_MULTIPLY 2
94989-#define N_GEO_BUCKET (N_DIVIDE << LG_N_MULTIPLY)
94990-
94991-	test_skip_if(!opt_prof);
94992-
94993-	size_t lg_prof_sample_test = 25;
94994-
94995-	size_t lg_prof_sample_orig = lg_prof_sample;
94996-	assert_d_eq(mallctl("prof.reset", NULL, NULL, &lg_prof_sample_test,
94997-	    sizeof(size_t)), 0, "");
94998-	malloc_printf("lg_prof_sample = %zu\n", lg_prof_sample_test);
94999-
95000-	double proportions[N_GEO_BUCKET + 1];
95001-	const double min_proportion = fill_geometric_proportions(proportions,
95002-	    N_GEO_BUCKET + 1, N_DIVIDE);
95003-	const size_t n_iter = round_to_nearest(MIN_BUCKET_MEAN /
95004-	    min_proportion);
95005-	size_t means[N_GEO_BUCKET + 1];
95006-	size_t stddevs[N_GEO_BUCKET + 1];
95007-	fill_references(means, stddevs, proportions, N_GEO_BUCKET + 1, n_iter);
95008-
95009-	tsd_t *tsd = tsd_fetch();
95010-	assert_ptr_not_null(tsd, "");
95011-	size_t buckets[N_GEO_BUCKET + 1];
95012-	assert_zu_ge(lg_prof_sample, LG_N_DIVIDE, "");
95013-	const size_t lg_bucket_width = lg_prof_sample - LG_N_DIVIDE;
95014-
95015-	bucket_analysis(prof_sample_gen, tsd, buckets, means, stddevs,
95016-	    N_GEO_BUCKET + 1, lg_bucket_width, n_iter);
95017-
95018-	assert_d_eq(mallctl("prof.reset", NULL, NULL, &lg_prof_sample_orig,
95019-	    sizeof(size_t)), 0, "");
95020-
95021-#undef LG_N_DIVIDE
95022-#undef N_DIVIDE
95023-#undef LG_N_MULTIPLY
95024-#undef N_GEO_BUCKET
95025-
95026-#endif /* JEMALLOC_PROF */
95027-}
95028-TEST_END
95029-
95030-/******************************************************************************/
95031-
95032-int
95033-main(void) {
95034-	return test_no_reentrancy(
95035-	    test_uniform,
95036-	    test_prof_sample);
95037-}
95038diff --git a/jemalloc/test/analyze/sizes.c b/jemalloc/test/analyze/sizes.c
95039deleted file mode 100644
95040index 44c9de5..0000000
95041--- a/jemalloc/test/analyze/sizes.c
95042+++ /dev/null
95043@@ -1,53 +0,0 @@
95044-#include "test/jemalloc_test.h"
95045-
95046-#include <stdio.h>
95047-
95048-/*
95049- * Print the sizes of various important core data structures.  OK, I guess this
95050- * isn't really a "stress" test, but it does give useful information about
95051- * low-level performance characteristics, as the other things in this directory
95052- * do.
95053- */
95054-
95055-static void
95056-do_print(const char *name, size_t sz_bytes) {
95057-	const char *sizes[] = {"bytes", "KB", "MB", "GB", "TB", "PB", "EB",
95058-		"ZB"};
95059-	size_t sizes_max = sizeof(sizes)/sizeof(sizes[0]);
95060-
95061-	size_t ind = 0;
95062-	double sz = sz_bytes;
95063-	while (sz >= 1024 && ind < sizes_max - 1) {
95064-		sz /= 1024;
95065-		ind++;
95066-	}
95067-	if (ind == 0) {
95068-		printf("%-20s: %zu bytes\n", name, sz_bytes);
95069-	} else {
95070-		printf("%-20s: %f %s\n", name, sz, sizes[ind]);
95071-	}
95072-}
95073-
95074-int
95075-main() {
95076-#define P(type)								\
95077-	do_print(#type, sizeof(type))
95078-	P(arena_t);
95079-	P(arena_stats_t);
95080-	P(base_t);
95081-	P(decay_t);
95082-	P(edata_t);
95083-	P(ecache_t);
95084-	P(eset_t);
95085-	P(malloc_mutex_t);
95086-	P(prof_tctx_t);
95087-	P(prof_gctx_t);
95088-	P(prof_tdata_t);
95089-	P(rtree_t);
95090-	P(rtree_leaf_elm_t);
95091-	P(slab_data_t);
95092-	P(tcache_t);
95093-	P(tcache_slow_t);
95094-	P(tsd_t);
95095-#undef P
95096-}
95097diff --git a/jemalloc/test/include/test/SFMT-alti.h b/jemalloc/test/include/test/SFMT-alti.h
95098deleted file mode 100644
95099index a1885db..0000000
95100--- a/jemalloc/test/include/test/SFMT-alti.h
95101+++ /dev/null
95102@@ -1,186 +0,0 @@
95103-/*
95104- * This file derives from SFMT 1.3.3
95105- * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was
95106- * released under the terms of the following license:
95107- *
95108- *   Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
95109- *   University. All rights reserved.
95110- *
95111- *   Redistribution and use in source and binary forms, with or without
95112- *   modification, are permitted provided that the following conditions are
95113- *   met:
95114- *
95115- *       * Redistributions of source code must retain the above copyright
95116- *         notice, this list of conditions and the following disclaimer.
95117- *       * Redistributions in binary form must reproduce the above
95118- *         copyright notice, this list of conditions and the following
95119- *         disclaimer in the documentation and/or other materials provided
95120- *         with the distribution.
95121- *       * Neither the name of the Hiroshima University nor the names of
95122- *         its contributors may be used to endorse or promote products
95123- *         derived from this software without specific prior written
95124- *         permission.
95125- *
95126- *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
95127- *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
95128- *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
95129- *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
95130- *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
95131- *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
95132- *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
95133- *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
95134- *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
95135- *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
95136- *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
95137- */
95138-/**
95139- * @file SFMT-alti.h
95140- *
95141- * @brief SIMD oriented Fast Mersenne Twister(SFMT)
95142- * pseudorandom number generator
95143- *
95144- * @author Mutsuo Saito (Hiroshima University)
95145- * @author Makoto Matsumoto (Hiroshima University)
95146- *
95147- * Copyright (C) 2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
95148- * University. All rights reserved.
95149- *
95150- * The new BSD License is applied to this software.
95151- * see LICENSE.txt
95152- */
95153-
95154-#ifndef SFMT_ALTI_H
95155-#define SFMT_ALTI_H
95156-
95157-/**
95158- * This function represents the recursion formula in AltiVec and BIG ENDIAN.
95159- * @param a a 128-bit part of the interal state array
95160- * @param b a 128-bit part of the interal state array
95161- * @param c a 128-bit part of the interal state array
95162- * @param d a 128-bit part of the interal state array
95163- * @return output
95164- */
95165-JEMALLOC_ALWAYS_INLINE
95166-vector unsigned int vec_recursion(vector unsigned int a,
95167-						vector unsigned int b,
95168-						vector unsigned int c,
95169-						vector unsigned int d) {
95170-
95171-    const vector unsigned int sl1 = ALTI_SL1;
95172-    const vector unsigned int sr1 = ALTI_SR1;
95173-#ifdef ONLY64
95174-    const vector unsigned int mask = ALTI_MSK64;
95175-    const vector unsigned char perm_sl = ALTI_SL2_PERM64;
95176-    const vector unsigned char perm_sr = ALTI_SR2_PERM64;
95177-#else
95178-    const vector unsigned int mask = ALTI_MSK;
95179-    const vector unsigned char perm_sl = ALTI_SL2_PERM;
95180-    const vector unsigned char perm_sr = ALTI_SR2_PERM;
95181-#endif
95182-    vector unsigned int v, w, x, y, z;
95183-    x = vec_perm(a, (vector unsigned int)perm_sl, perm_sl);
95184-    v = a;
95185-    y = vec_sr(b, sr1);
95186-    z = vec_perm(c, (vector unsigned int)perm_sr, perm_sr);
95187-    w = vec_sl(d, sl1);
95188-    z = vec_xor(z, w);
95189-    y = vec_and(y, mask);
95190-    v = vec_xor(v, x);
95191-    z = vec_xor(z, y);
95192-    z = vec_xor(z, v);
95193-    return z;
95194-}
95195-
95196-/**
95197- * This function fills the internal state array with pseudorandom
95198- * integers.
95199- */
95200-static inline void gen_rand_all(sfmt_t *ctx) {
95201-    int i;
95202-    vector unsigned int r, r1, r2;
95203-
95204-    r1 = ctx->sfmt[N - 2].s;
95205-    r2 = ctx->sfmt[N - 1].s;
95206-    for (i = 0; i < N - POS1; i++) {
95207-	r = vec_recursion(ctx->sfmt[i].s, ctx->sfmt[i + POS1].s, r1, r2);
95208-	ctx->sfmt[i].s = r;
95209-	r1 = r2;
95210-	r2 = r;
95211-    }
95212-    for (; i < N; i++) {
95213-	r = vec_recursion(ctx->sfmt[i].s, ctx->sfmt[i + POS1 - N].s, r1, r2);
95214-	ctx->sfmt[i].s = r;
95215-	r1 = r2;
95216-	r2 = r;
95217-    }
95218-}
95219-
95220-/**
95221- * This function fills the user-specified array with pseudorandom
95222- * integers.
95223- *
95224- * @param array an 128-bit array to be filled by pseudorandom numbers.
95225- * @param size number of 128-bit pesudorandom numbers to be generated.
95226- */
95227-static inline void gen_rand_array(sfmt_t *ctx, w128_t *array, int size) {
95228-    int i, j;
95229-    vector unsigned int r, r1, r2;
95230-
95231-    r1 = ctx->sfmt[N - 2].s;
95232-    r2 = ctx->sfmt[N - 1].s;
95233-    for (i = 0; i < N - POS1; i++) {
95234-	r = vec_recursion(ctx->sfmt[i].s, ctx->sfmt[i + POS1].s, r1, r2);
95235-	array[i].s = r;
95236-	r1 = r2;
95237-	r2 = r;
95238-    }
95239-    for (; i < N; i++) {
95240-	r = vec_recursion(ctx->sfmt[i].s, array[i + POS1 - N].s, r1, r2);
95241-	array[i].s = r;
95242-	r1 = r2;
95243-	r2 = r;
95244-    }
95245-    /* main loop */
95246-    for (; i < size - N; i++) {
95247-	r = vec_recursion(array[i - N].s, array[i + POS1 - N].s, r1, r2);
95248-	array[i].s = r;
95249-	r1 = r2;
95250-	r2 = r;
95251-    }
95252-    for (j = 0; j < 2 * N - size; j++) {
95253-	ctx->sfmt[j].s = array[j + size - N].s;
95254-    }
95255-    for (; i < size; i++) {
95256-	r = vec_recursion(array[i - N].s, array[i + POS1 - N].s, r1, r2);
95257-	array[i].s = r;
95258-	ctx->sfmt[j++].s = r;
95259-	r1 = r2;
95260-	r2 = r;
95261-    }
95262-}
95263-
95264-#ifndef ONLY64
95265-#if defined(__APPLE__)
95266-#define ALTI_SWAP (vector unsigned char) \
95267-	(4, 5, 6, 7, 0, 1, 2, 3, 12, 13, 14, 15, 8, 9, 10, 11)
95268-#else
95269-#define ALTI_SWAP {4, 5, 6, 7, 0, 1, 2, 3, 12, 13, 14, 15, 8, 9, 10, 11}
95270-#endif
95271-/**
95272- * This function swaps high and low 32-bit of 64-bit integers in user
95273- * specified array.
95274- *
95275- * @param array an 128-bit array to be swaped.
95276- * @param size size of 128-bit array.
95277- */
95278-static inline void swap(w128_t *array, int size) {
95279-    int i;
95280-    const vector unsigned char perm = ALTI_SWAP;
95281-
95282-    for (i = 0; i < size; i++) {
95283-	array[i].s = vec_perm(array[i].s, (vector unsigned int)perm, perm);
95284-    }
95285-}
95286-#endif
95287-
95288-#endif
95289diff --git a/jemalloc/test/include/test/SFMT-params.h b/jemalloc/test/include/test/SFMT-params.h
95290deleted file mode 100644
95291index ade6622..0000000
95292--- a/jemalloc/test/include/test/SFMT-params.h
95293+++ /dev/null
95294@@ -1,132 +0,0 @@
95295-/*
95296- * This file derives from SFMT 1.3.3
95297- * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was
95298- * released under the terms of the following license:
95299- *
95300- *   Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
95301- *   University. All rights reserved.
95302- *
95303- *   Redistribution and use in source and binary forms, with or without
95304- *   modification, are permitted provided that the following conditions are
95305- *   met:
95306- *
95307- *       * Redistributions of source code must retain the above copyright
95308- *         notice, this list of conditions and the following disclaimer.
95309- *       * Redistributions in binary form must reproduce the above
95310- *         copyright notice, this list of conditions and the following
95311- *         disclaimer in the documentation and/or other materials provided
95312- *         with the distribution.
95313- *       * Neither the name of the Hiroshima University nor the names of
95314- *         its contributors may be used to endorse or promote products
95315- *         derived from this software without specific prior written
95316- *         permission.
95317- *
95318- *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
95319- *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
95320- *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
95321- *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
95322- *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
95323- *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
95324- *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
95325- *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
95326- *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
95327- *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
95328- *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
95329- */
95330-#ifndef SFMT_PARAMS_H
95331-#define SFMT_PARAMS_H
95332-
95333-#if !defined(MEXP)
95334-#ifdef __GNUC__
95335-  #warning "MEXP is not defined. I assume MEXP is 19937."
95336-#endif
95337-  #define MEXP 19937
95338-#endif
95339-/*-----------------
95340-  BASIC DEFINITIONS
95341-  -----------------*/
95342-/** Mersenne Exponent. The period of the sequence
95343- *  is a multiple of 2^MEXP-1.
95344- * #define MEXP 19937 */
95345-/** SFMT generator has an internal state array of 128-bit integers,
95346- * and N is its size. */
95347-#define N (MEXP / 128 + 1)
95348-/** N32 is the size of internal state array when regarded as an array
95349- * of 32-bit integers.*/
95350-#define N32 (N * 4)
95351-/** N64 is the size of internal state array when regarded as an array
95352- * of 64-bit integers.*/
95353-#define N64 (N * 2)
95354-
95355-/*----------------------
95356-  the parameters of SFMT
95357-  following definitions are in paramsXXXX.h file.
95358-  ----------------------*/
95359-/** the pick up position of the array.
95360-#define POS1 122
95361-*/
95362-
95363-/** the parameter of shift left as four 32-bit registers.
95364-#define SL1 18
95365- */
95366-
95367-/** the parameter of shift left as one 128-bit register.
95368- * The 128-bit integer is shifted by (SL2 * 8) bits.
95369-#define SL2 1
95370-*/
95371-
95372-/** the parameter of shift right as four 32-bit registers.
95373-#define SR1 11
95374-*/
95375-
95376-/** the parameter of shift right as one 128-bit register.
95377- * The 128-bit integer is shifted by (SL2 * 8) bits.
95378-#define SR2 1
95379-*/
95380-
95381-/** A bitmask, used in the recursion.  These parameters are introduced
95382- * to break symmetry of SIMD.
95383-#define MSK1 0xdfffffefU
95384-#define MSK2 0xddfecb7fU
95385-#define MSK3 0xbffaffffU
95386-#define MSK4 0xbffffff6U
95387-*/
95388-
95389-/** These definitions are part of a 128-bit period certification vector.
95390-#define PARITY1	0x00000001U
95391-#define PARITY2	0x00000000U
95392-#define PARITY3	0x00000000U
95393-#define PARITY4	0xc98e126aU
95394-*/
95395-
95396-#if MEXP == 607
95397-  #include "test/SFMT-params607.h"
95398-#elif MEXP == 1279
95399-  #include "test/SFMT-params1279.h"
95400-#elif MEXP == 2281
95401-  #include "test/SFMT-params2281.h"
95402-#elif MEXP == 4253
95403-  #include "test/SFMT-params4253.h"
95404-#elif MEXP == 11213
95405-  #include "test/SFMT-params11213.h"
95406-#elif MEXP == 19937
95407-  #include "test/SFMT-params19937.h"
95408-#elif MEXP == 44497
95409-  #include "test/SFMT-params44497.h"
95410-#elif MEXP == 86243
95411-  #include "test/SFMT-params86243.h"
95412-#elif MEXP == 132049
95413-  #include "test/SFMT-params132049.h"
95414-#elif MEXP == 216091
95415-  #include "test/SFMT-params216091.h"
95416-#else
95417-#ifdef __GNUC__
95418-  #error "MEXP is not valid."
95419-  #undef MEXP
95420-#else
95421-  #undef MEXP
95422-#endif
95423-
95424-#endif
95425-
95426-#endif /* SFMT_PARAMS_H */
95427diff --git a/jemalloc/test/include/test/SFMT-params11213.h b/jemalloc/test/include/test/SFMT-params11213.h
95428deleted file mode 100644
95429index 2994bd2..0000000
95430--- a/jemalloc/test/include/test/SFMT-params11213.h
95431+++ /dev/null
95432@@ -1,81 +0,0 @@
95433-/*
95434- * This file derives from SFMT 1.3.3
95435- * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was
95436- * released under the terms of the following license:
95437- *
95438- *   Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
95439- *   University. All rights reserved.
95440- *
95441- *   Redistribution and use in source and binary forms, with or without
95442- *   modification, are permitted provided that the following conditions are
95443- *   met:
95444- *
95445- *       * Redistributions of source code must retain the above copyright
95446- *         notice, this list of conditions and the following disclaimer.
95447- *       * Redistributions in binary form must reproduce the above
95448- *         copyright notice, this list of conditions and the following
95449- *         disclaimer in the documentation and/or other materials provided
95450- *         with the distribution.
95451- *       * Neither the name of the Hiroshima University nor the names of
95452- *         its contributors may be used to endorse or promote products
95453- *         derived from this software without specific prior written
95454- *         permission.
95455- *
95456- *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
95457- *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
95458- *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
95459- *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
95460- *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
95461- *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
95462- *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
95463- *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
95464- *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
95465- *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
95466- *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
95467- */
95468-#ifndef SFMT_PARAMS11213_H
95469-#define SFMT_PARAMS11213_H
95470-
95471-#define POS1	68
95472-#define SL1	14
95473-#define SL2	3
95474-#define SR1	7
95475-#define SR2	3
95476-#define MSK1	0xeffff7fbU
95477-#define MSK2	0xffffffefU
95478-#define MSK3	0xdfdfbfffU
95479-#define MSK4	0x7fffdbfdU
95480-#define PARITY1	0x00000001U
95481-#define PARITY2	0x00000000U
95482-#define PARITY3	0xe8148000U
95483-#define PARITY4	0xd0c7afa3U
95484-
95485-
95486-/* PARAMETERS FOR ALTIVEC */
95487-#if defined(__APPLE__)	/* For OSX */
95488-    #define ALTI_SL1	(vector unsigned int)(SL1, SL1, SL1, SL1)
95489-    #define ALTI_SR1	(vector unsigned int)(SR1, SR1, SR1, SR1)
95490-    #define ALTI_MSK	(vector unsigned int)(MSK1, MSK2, MSK3, MSK4)
95491-    #define ALTI_MSK64 \
95492-	(vector unsigned int)(MSK2, MSK1, MSK4, MSK3)
95493-    #define ALTI_SL2_PERM \
95494-	(vector unsigned char)(3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10)
95495-    #define ALTI_SL2_PERM64 \
95496-	(vector unsigned char)(3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2)
95497-    #define ALTI_SR2_PERM \
95498-	(vector unsigned char)(5,6,7,0,9,10,11,4,13,14,15,8,19,19,19,12)
95499-    #define ALTI_SR2_PERM64 \
95500-	(vector unsigned char)(13,14,15,0,1,2,3,4,19,19,19,8,9,10,11,12)
95501-#else	/* For OTHER OSs(Linux?) */
95502-    #define ALTI_SL1	{SL1, SL1, SL1, SL1}
95503-    #define ALTI_SR1	{SR1, SR1, SR1, SR1}
95504-    #define ALTI_MSK	{MSK1, MSK2, MSK3, MSK4}
95505-    #define ALTI_MSK64	{MSK2, MSK1, MSK4, MSK3}
95506-    #define ALTI_SL2_PERM	{3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10}
95507-    #define ALTI_SL2_PERM64	{3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2}
95508-    #define ALTI_SR2_PERM	{5,6,7,0,9,10,11,4,13,14,15,8,19,19,19,12}
95509-    #define ALTI_SR2_PERM64	{13,14,15,0,1,2,3,4,19,19,19,8,9,10,11,12}
95510-#endif	/* For OSX */
95511-#define IDSTR	"SFMT-11213:68-14-3-7-3:effff7fb-ffffffef-dfdfbfff-7fffdbfd"
95512-
95513-#endif /* SFMT_PARAMS11213_H */
95514diff --git a/jemalloc/test/include/test/SFMT-params1279.h b/jemalloc/test/include/test/SFMT-params1279.h
95515deleted file mode 100644
95516index d7959f9..0000000
95517--- a/jemalloc/test/include/test/SFMT-params1279.h
95518+++ /dev/null
95519@@ -1,81 +0,0 @@
95520-/*
95521- * This file derives from SFMT 1.3.3
95522- * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was
95523- * released under the terms of the following license:
95524- *
95525- *   Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
95526- *   University. All rights reserved.
95527- *
95528- *   Redistribution and use in source and binary forms, with or without
95529- *   modification, are permitted provided that the following conditions are
95530- *   met:
95531- *
95532- *       * Redistributions of source code must retain the above copyright
95533- *         notice, this list of conditions and the following disclaimer.
95534- *       * Redistributions in binary form must reproduce the above
95535- *         copyright notice, this list of conditions and the following
95536- *         disclaimer in the documentation and/or other materials provided
95537- *         with the distribution.
95538- *       * Neither the name of the Hiroshima University nor the names of
95539- *         its contributors may be used to endorse or promote products
95540- *         derived from this software without specific prior written
95541- *         permission.
95542- *
95543- *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
95544- *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
95545- *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
95546- *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
95547- *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
95548- *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
95549- *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
95550- *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
95551- *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
95552- *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
95553- *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
95554- */
95555-#ifndef SFMT_PARAMS1279_H
95556-#define SFMT_PARAMS1279_H
95557-
95558-#define POS1	7
95559-#define SL1	14
95560-#define SL2	3
95561-#define SR1	5
95562-#define SR2	1
95563-#define MSK1	0xf7fefffdU
95564-#define MSK2	0x7fefcfffU
95565-#define MSK3	0xaff3ef3fU
95566-#define MSK4	0xb5ffff7fU
95567-#define PARITY1	0x00000001U
95568-#define PARITY2	0x00000000U
95569-#define PARITY3	0x00000000U
95570-#define PARITY4	0x20000000U
95571-
95572-
95573-/* PARAMETERS FOR ALTIVEC */
95574-#if defined(__APPLE__)	/* For OSX */
95575-    #define ALTI_SL1	(vector unsigned int)(SL1, SL1, SL1, SL1)
95576-    #define ALTI_SR1	(vector unsigned int)(SR1, SR1, SR1, SR1)
95577-    #define ALTI_MSK	(vector unsigned int)(MSK1, MSK2, MSK3, MSK4)
95578-    #define ALTI_MSK64 \
95579-	(vector unsigned int)(MSK2, MSK1, MSK4, MSK3)
95580-    #define ALTI_SL2_PERM \
95581-	(vector unsigned char)(3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10)
95582-    #define ALTI_SL2_PERM64 \
95583-	(vector unsigned char)(3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2)
95584-    #define ALTI_SR2_PERM \
95585-	(vector unsigned char)(7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14)
95586-    #define ALTI_SR2_PERM64 \
95587-	(vector unsigned char)(15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14)
95588-#else	/* For OTHER OSs(Linux?) */
95589-    #define ALTI_SL1	{SL1, SL1, SL1, SL1}
95590-    #define ALTI_SR1	{SR1, SR1, SR1, SR1}
95591-    #define ALTI_MSK	{MSK1, MSK2, MSK3, MSK4}
95592-    #define ALTI_MSK64	{MSK2, MSK1, MSK4, MSK3}
95593-    #define ALTI_SL2_PERM	{3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10}
95594-    #define ALTI_SL2_PERM64	{3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2}
95595-    #define ALTI_SR2_PERM	{7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14}
95596-    #define ALTI_SR2_PERM64	{15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14}
95597-#endif	/* For OSX */
95598-#define IDSTR	"SFMT-1279:7-14-3-5-1:f7fefffd-7fefcfff-aff3ef3f-b5ffff7f"
95599-
95600-#endif /* SFMT_PARAMS1279_H */
95601diff --git a/jemalloc/test/include/test/SFMT-params132049.h b/jemalloc/test/include/test/SFMT-params132049.h
95602deleted file mode 100644
95603index a1dcec3..0000000
95604--- a/jemalloc/test/include/test/SFMT-params132049.h
95605+++ /dev/null
95606@@ -1,81 +0,0 @@
95607-/*
95608- * This file derives from SFMT 1.3.3
95609- * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was
95610- * released under the terms of the following license:
95611- *
95612- *   Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
95613- *   University. All rights reserved.
95614- *
95615- *   Redistribution and use in source and binary forms, with or without
95616- *   modification, are permitted provided that the following conditions are
95617- *   met:
95618- *
95619- *       * Redistributions of source code must retain the above copyright
95620- *         notice, this list of conditions and the following disclaimer.
95621- *       * Redistributions in binary form must reproduce the above
95622- *         copyright notice, this list of conditions and the following
95623- *         disclaimer in the documentation and/or other materials provided
95624- *         with the distribution.
95625- *       * Neither the name of the Hiroshima University nor the names of
95626- *         its contributors may be used to endorse or promote products
95627- *         derived from this software without specific prior written
95628- *         permission.
95629- *
95630- *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
95631- *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
95632- *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
95633- *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
95634- *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
95635- *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
95636- *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
95637- *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
95638- *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
95639- *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
95640- *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
95641- */
95642-#ifndef SFMT_PARAMS132049_H
95643-#define SFMT_PARAMS132049_H
95644-
95645-#define POS1	110
95646-#define SL1	19
95647-#define SL2	1
95648-#define SR1	21
95649-#define SR2	1
95650-#define MSK1	0xffffbb5fU
95651-#define MSK2	0xfb6ebf95U
95652-#define MSK3	0xfffefffaU
95653-#define MSK4	0xcff77fffU
95654-#define PARITY1	0x00000001U
95655-#define PARITY2	0x00000000U
95656-#define PARITY3	0xcb520000U
95657-#define PARITY4	0xc7e91c7dU
95658-
95659-
95660-/* PARAMETERS FOR ALTIVEC */
95661-#if defined(__APPLE__)	/* For OSX */
95662-    #define ALTI_SL1	(vector unsigned int)(SL1, SL1, SL1, SL1)
95663-    #define ALTI_SR1	(vector unsigned int)(SR1, SR1, SR1, SR1)
95664-    #define ALTI_MSK	(vector unsigned int)(MSK1, MSK2, MSK3, MSK4)
95665-    #define ALTI_MSK64 \
95666-	(vector unsigned int)(MSK2, MSK1, MSK4, MSK3)
95667-    #define ALTI_SL2_PERM \
95668-	(vector unsigned char)(1,2,3,23,5,6,7,0,9,10,11,4,13,14,15,8)
95669-    #define ALTI_SL2_PERM64 \
95670-	(vector unsigned char)(1,2,3,4,5,6,7,31,9,10,11,12,13,14,15,0)
95671-    #define ALTI_SR2_PERM \
95672-	(vector unsigned char)(7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14)
95673-    #define ALTI_SR2_PERM64 \
95674-	(vector unsigned char)(15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14)
95675-#else	/* For OTHER OSs(Linux?) */
95676-    #define ALTI_SL1	{SL1, SL1, SL1, SL1}
95677-    #define ALTI_SR1	{SR1, SR1, SR1, SR1}
95678-    #define ALTI_MSK	{MSK1, MSK2, MSK3, MSK4}
95679-    #define ALTI_MSK64	{MSK2, MSK1, MSK4, MSK3}
95680-    #define ALTI_SL2_PERM	{1,2,3,23,5,6,7,0,9,10,11,4,13,14,15,8}
95681-    #define ALTI_SL2_PERM64	{1,2,3,4,5,6,7,31,9,10,11,12,13,14,15,0}
95682-    #define ALTI_SR2_PERM	{7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14}
95683-    #define ALTI_SR2_PERM64	{15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14}
95684-#endif	/* For OSX */
95685-#define IDSTR	"SFMT-132049:110-19-1-21-1:ffffbb5f-fb6ebf95-fffefffa-cff77fff"
95686-
95687-#endif /* SFMT_PARAMS132049_H */
95688diff --git a/jemalloc/test/include/test/SFMT-params19937.h b/jemalloc/test/include/test/SFMT-params19937.h
95689deleted file mode 100644
95690index fb92b4c..0000000
95691--- a/jemalloc/test/include/test/SFMT-params19937.h
95692+++ /dev/null
95693@@ -1,81 +0,0 @@
95694-/*
95695- * This file derives from SFMT 1.3.3
95696- * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was
95697- * released under the terms of the following license:
95698- *
95699- *   Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
95700- *   University. All rights reserved.
95701- *
95702- *   Redistribution and use in source and binary forms, with or without
95703- *   modification, are permitted provided that the following conditions are
95704- *   met:
95705- *
95706- *       * Redistributions of source code must retain the above copyright
95707- *         notice, this list of conditions and the following disclaimer.
95708- *       * Redistributions in binary form must reproduce the above
95709- *         copyright notice, this list of conditions and the following
95710- *         disclaimer in the documentation and/or other materials provided
95711- *         with the distribution.
95712- *       * Neither the name of the Hiroshima University nor the names of
95713- *         its contributors may be used to endorse or promote products
95714- *         derived from this software without specific prior written
95715- *         permission.
95716- *
95717- *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
95718- *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
95719- *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
95720- *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
95721- *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
95722- *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
95723- *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
95724- *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
95725- *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
95726- *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
95727- *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
95728- */
95729-#ifndef SFMT_PARAMS19937_H
95730-#define SFMT_PARAMS19937_H
95731-
95732-#define POS1	122
95733-#define SL1	18
95734-#define SL2	1
95735-#define SR1	11
95736-#define SR2	1
95737-#define MSK1	0xdfffffefU
95738-#define MSK2	0xddfecb7fU
95739-#define MSK3	0xbffaffffU
95740-#define MSK4	0xbffffff6U
95741-#define PARITY1	0x00000001U
95742-#define PARITY2	0x00000000U
95743-#define PARITY3	0x00000000U
95744-#define PARITY4	0x13c9e684U
95745-
95746-
95747-/* PARAMETERS FOR ALTIVEC */
95748-#if defined(__APPLE__)	/* For OSX */
95749-    #define ALTI_SL1	(vector unsigned int)(SL1, SL1, SL1, SL1)
95750-    #define ALTI_SR1	(vector unsigned int)(SR1, SR1, SR1, SR1)
95751-    #define ALTI_MSK	(vector unsigned int)(MSK1, MSK2, MSK3, MSK4)
95752-    #define ALTI_MSK64 \
95753-	(vector unsigned int)(MSK2, MSK1, MSK4, MSK3)
95754-    #define ALTI_SL2_PERM \
95755-	(vector unsigned char)(1,2,3,23,5,6,7,0,9,10,11,4,13,14,15,8)
95756-    #define ALTI_SL2_PERM64 \
95757-	(vector unsigned char)(1,2,3,4,5,6,7,31,9,10,11,12,13,14,15,0)
95758-    #define ALTI_SR2_PERM \
95759-	(vector unsigned char)(7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14)
95760-    #define ALTI_SR2_PERM64 \
95761-	(vector unsigned char)(15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14)
95762-#else	/* For OTHER OSs(Linux?) */
95763-    #define ALTI_SL1	{SL1, SL1, SL1, SL1}
95764-    #define ALTI_SR1	{SR1, SR1, SR1, SR1}
95765-    #define ALTI_MSK	{MSK1, MSK2, MSK3, MSK4}
95766-    #define ALTI_MSK64	{MSK2, MSK1, MSK4, MSK3}
95767-    #define ALTI_SL2_PERM	{1,2,3,23,5,6,7,0,9,10,11,4,13,14,15,8}
95768-    #define ALTI_SL2_PERM64	{1,2,3,4,5,6,7,31,9,10,11,12,13,14,15,0}
95769-    #define ALTI_SR2_PERM	{7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14}
95770-    #define ALTI_SR2_PERM64	{15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14}
95771-#endif	/* For OSX */
95772-#define IDSTR	"SFMT-19937:122-18-1-11-1:dfffffef-ddfecb7f-bffaffff-bffffff6"
95773-
95774-#endif /* SFMT_PARAMS19937_H */
95775diff --git a/jemalloc/test/include/test/SFMT-params216091.h b/jemalloc/test/include/test/SFMT-params216091.h
95776deleted file mode 100644
95777index 125ce28..0000000
95778--- a/jemalloc/test/include/test/SFMT-params216091.h
95779+++ /dev/null
95780@@ -1,81 +0,0 @@
95781-/*
95782- * This file derives from SFMT 1.3.3
95783- * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was
95784- * released under the terms of the following license:
95785- *
95786- *   Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
95787- *   University. All rights reserved.
95788- *
95789- *   Redistribution and use in source and binary forms, with or without
95790- *   modification, are permitted provided that the following conditions are
95791- *   met:
95792- *
95793- *       * Redistributions of source code must retain the above copyright
95794- *         notice, this list of conditions and the following disclaimer.
95795- *       * Redistributions in binary form must reproduce the above
95796- *         copyright notice, this list of conditions and the following
95797- *         disclaimer in the documentation and/or other materials provided
95798- *         with the distribution.
95799- *       * Neither the name of the Hiroshima University nor the names of
95800- *         its contributors may be used to endorse or promote products
95801- *         derived from this software without specific prior written
95802- *         permission.
95803- *
95804- *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
95805- *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
95806- *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
95807- *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
95808- *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
95809- *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
95810- *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
95811- *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
95812- *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
95813- *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
95814- *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
95815- */
95816-#ifndef SFMT_PARAMS216091_H
95817-#define SFMT_PARAMS216091_H
95818-
95819-#define POS1	627
95820-#define SL1	11
95821-#define SL2	3
95822-#define SR1	10
95823-#define SR2	1
95824-#define MSK1	0xbff7bff7U
95825-#define MSK2	0xbfffffffU
95826-#define MSK3	0xbffffa7fU
95827-#define MSK4	0xffddfbfbU
95828-#define PARITY1	0xf8000001U
95829-#define PARITY2	0x89e80709U
95830-#define PARITY3	0x3bd2b64bU
95831-#define PARITY4	0x0c64b1e4U
95832-
95833-
95834-/* PARAMETERS FOR ALTIVEC */
95835-#if defined(__APPLE__)	/* For OSX */
95836-    #define ALTI_SL1	(vector unsigned int)(SL1, SL1, SL1, SL1)
95837-    #define ALTI_SR1	(vector unsigned int)(SR1, SR1, SR1, SR1)
95838-    #define ALTI_MSK	(vector unsigned int)(MSK1, MSK2, MSK3, MSK4)
95839-    #define ALTI_MSK64 \
95840-	(vector unsigned int)(MSK2, MSK1, MSK4, MSK3)
95841-    #define ALTI_SL2_PERM \
95842-	(vector unsigned char)(3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10)
95843-    #define ALTI_SL2_PERM64 \
95844-	(vector unsigned char)(3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2)
95845-    #define ALTI_SR2_PERM \
95846-	(vector unsigned char)(7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14)
95847-    #define ALTI_SR2_PERM64 \
95848-	(vector unsigned char)(15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14)
95849-#else	/* For OTHER OSs(Linux?) */
95850-    #define ALTI_SL1	{SL1, SL1, SL1, SL1}
95851-    #define ALTI_SR1	{SR1, SR1, SR1, SR1}
95852-    #define ALTI_MSK	{MSK1, MSK2, MSK3, MSK4}
95853-    #define ALTI_MSK64	{MSK2, MSK1, MSK4, MSK3}
95854-    #define ALTI_SL2_PERM	{3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10}
95855-    #define ALTI_SL2_PERM64	{3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2}
95856-    #define ALTI_SR2_PERM	{7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14}
95857-    #define ALTI_SR2_PERM64	{15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14}
95858-#endif	/* For OSX */
95859-#define IDSTR	"SFMT-216091:627-11-3-10-1:bff7bff7-bfffffff-bffffa7f-ffddfbfb"
95860-
95861-#endif /* SFMT_PARAMS216091_H */
95862diff --git a/jemalloc/test/include/test/SFMT-params2281.h b/jemalloc/test/include/test/SFMT-params2281.h
95863deleted file mode 100644
95864index 0ef85c4..0000000
95865--- a/jemalloc/test/include/test/SFMT-params2281.h
95866+++ /dev/null
95867@@ -1,81 +0,0 @@
95868-/*
95869- * This file derives from SFMT 1.3.3
95870- * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was
95871- * released under the terms of the following license:
95872- *
95873- *   Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
95874- *   University. All rights reserved.
95875- *
95876- *   Redistribution and use in source and binary forms, with or without
95877- *   modification, are permitted provided that the following conditions are
95878- *   met:
95879- *
95880- *       * Redistributions of source code must retain the above copyright
95881- *         notice, this list of conditions and the following disclaimer.
95882- *       * Redistributions in binary form must reproduce the above
95883- *         copyright notice, this list of conditions and the following
95884- *         disclaimer in the documentation and/or other materials provided
95885- *         with the distribution.
95886- *       * Neither the name of the Hiroshima University nor the names of
95887- *         its contributors may be used to endorse or promote products
95888- *         derived from this software without specific prior written
95889- *         permission.
95890- *
95891- *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
95892- *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
95893- *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
95894- *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
95895- *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
95896- *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
95897- *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
95898- *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
95899- *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
95900- *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
95901- *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
95902- */
95903-#ifndef SFMT_PARAMS2281_H
95904-#define SFMT_PARAMS2281_H
95905-
95906-#define POS1	12
95907-#define SL1	19
95908-#define SL2	1
95909-#define SR1	5
95910-#define SR2	1
95911-#define MSK1	0xbff7ffbfU
95912-#define MSK2	0xfdfffffeU
95913-#define MSK3	0xf7ffef7fU
95914-#define MSK4	0xf2f7cbbfU
95915-#define PARITY1	0x00000001U
95916-#define PARITY2	0x00000000U
95917-#define PARITY3	0x00000000U
95918-#define PARITY4	0x41dfa600U
95919-
95920-
95921-/* PARAMETERS FOR ALTIVEC */
95922-#if defined(__APPLE__)	/* For OSX */
95923-    #define ALTI_SL1	(vector unsigned int)(SL1, SL1, SL1, SL1)
95924-    #define ALTI_SR1	(vector unsigned int)(SR1, SR1, SR1, SR1)
95925-    #define ALTI_MSK	(vector unsigned int)(MSK1, MSK2, MSK3, MSK4)
95926-    #define ALTI_MSK64 \
95927-	(vector unsigned int)(MSK2, MSK1, MSK4, MSK3)
95928-    #define ALTI_SL2_PERM \
95929-	(vector unsigned char)(1,2,3,23,5,6,7,0,9,10,11,4,13,14,15,8)
95930-    #define ALTI_SL2_PERM64 \
95931-	(vector unsigned char)(1,2,3,4,5,6,7,31,9,10,11,12,13,14,15,0)
95932-    #define ALTI_SR2_PERM \
95933-	(vector unsigned char)(7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14)
95934-    #define ALTI_SR2_PERM64 \
95935-	(vector unsigned char)(15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14)
95936-#else	/* For OTHER OSs(Linux?) */
95937-    #define ALTI_SL1	{SL1, SL1, SL1, SL1}
95938-    #define ALTI_SR1	{SR1, SR1, SR1, SR1}
95939-    #define ALTI_MSK	{MSK1, MSK2, MSK3, MSK4}
95940-    #define ALTI_MSK64	{MSK2, MSK1, MSK4, MSK3}
95941-    #define ALTI_SL2_PERM	{1,2,3,23,5,6,7,0,9,10,11,4,13,14,15,8}
95942-    #define ALTI_SL2_PERM64	{1,2,3,4,5,6,7,31,9,10,11,12,13,14,15,0}
95943-    #define ALTI_SR2_PERM	{7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14}
95944-    #define ALTI_SR2_PERM64	{15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14}
95945-#endif	/* For OSX */
95946-#define IDSTR	"SFMT-2281:12-19-1-5-1:bff7ffbf-fdfffffe-f7ffef7f-f2f7cbbf"
95947-
95948-#endif /* SFMT_PARAMS2281_H */
95949diff --git a/jemalloc/test/include/test/SFMT-params4253.h b/jemalloc/test/include/test/SFMT-params4253.h
95950deleted file mode 100644
95951index 9f07bc6..0000000
95952--- a/jemalloc/test/include/test/SFMT-params4253.h
95953+++ /dev/null
95954@@ -1,81 +0,0 @@
95955-/*
95956- * This file derives from SFMT 1.3.3
95957- * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was
95958- * released under the terms of the following license:
95959- *
95960- *   Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
95961- *   University. All rights reserved.
95962- *
95963- *   Redistribution and use in source and binary forms, with or without
95964- *   modification, are permitted provided that the following conditions are
95965- *   met:
95966- *
95967- *       * Redistributions of source code must retain the above copyright
95968- *         notice, this list of conditions and the following disclaimer.
95969- *       * Redistributions in binary form must reproduce the above
95970- *         copyright notice, this list of conditions and the following
95971- *         disclaimer in the documentation and/or other materials provided
95972- *         with the distribution.
95973- *       * Neither the name of the Hiroshima University nor the names of
95974- *         its contributors may be used to endorse or promote products
95975- *         derived from this software without specific prior written
95976- *         permission.
95977- *
95978- *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
95979- *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
95980- *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
95981- *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
95982- *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
95983- *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
95984- *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
95985- *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
95986- *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
95987- *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
95988- *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
95989- */
95990-#ifndef SFMT_PARAMS4253_H
95991-#define SFMT_PARAMS4253_H
95992-
95993-#define POS1	17
95994-#define SL1	20
95995-#define SL2	1
95996-#define SR1	7
95997-#define SR2	1
95998-#define MSK1	0x9f7bffffU
95999-#define MSK2	0x9fffff5fU
96000-#define MSK3	0x3efffffbU
96001-#define MSK4	0xfffff7bbU
96002-#define PARITY1	0xa8000001U
96003-#define PARITY2	0xaf5390a3U
96004-#define PARITY3	0xb740b3f8U
96005-#define PARITY4	0x6c11486dU
96006-
96007-
96008-/* PARAMETERS FOR ALTIVEC */
96009-#if defined(__APPLE__)	/* For OSX */
96010-    #define ALTI_SL1	(vector unsigned int)(SL1, SL1, SL1, SL1)
96011-    #define ALTI_SR1	(vector unsigned int)(SR1, SR1, SR1, SR1)
96012-    #define ALTI_MSK	(vector unsigned int)(MSK1, MSK2, MSK3, MSK4)
96013-    #define ALTI_MSK64 \
96014-	(vector unsigned int)(MSK2, MSK1, MSK4, MSK3)
96015-    #define ALTI_SL2_PERM \
96016-	(vector unsigned char)(1,2,3,23,5,6,7,0,9,10,11,4,13,14,15,8)
96017-    #define ALTI_SL2_PERM64 \
96018-	(vector unsigned char)(1,2,3,4,5,6,7,31,9,10,11,12,13,14,15,0)
96019-    #define ALTI_SR2_PERM \
96020-	(vector unsigned char)(7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14)
96021-    #define ALTI_SR2_PERM64 \
96022-	(vector unsigned char)(15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14)
96023-#else	/* For OTHER OSs(Linux?) */
96024-    #define ALTI_SL1	{SL1, SL1, SL1, SL1}
96025-    #define ALTI_SR1	{SR1, SR1, SR1, SR1}
96026-    #define ALTI_MSK	{MSK1, MSK2, MSK3, MSK4}
96027-    #define ALTI_MSK64	{MSK2, MSK1, MSK4, MSK3}
96028-    #define ALTI_SL2_PERM	{1,2,3,23,5,6,7,0,9,10,11,4,13,14,15,8}
96029-    #define ALTI_SL2_PERM64	{1,2,3,4,5,6,7,31,9,10,11,12,13,14,15,0}
96030-    #define ALTI_SR2_PERM	{7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14}
96031-    #define ALTI_SR2_PERM64	{15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14}
96032-#endif	/* For OSX */
96033-#define IDSTR	"SFMT-4253:17-20-1-7-1:9f7bffff-9fffff5f-3efffffb-fffff7bb"
96034-
96035-#endif /* SFMT_PARAMS4253_H */
96036diff --git a/jemalloc/test/include/test/SFMT-params44497.h b/jemalloc/test/include/test/SFMT-params44497.h
96037deleted file mode 100644
96038index 85598fe..0000000
96039--- a/jemalloc/test/include/test/SFMT-params44497.h
96040+++ /dev/null
96041@@ -1,81 +0,0 @@
96042-/*
96043- * This file derives from SFMT 1.3.3
96044- * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was
96045- * released under the terms of the following license:
96046- *
96047- *   Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
96048- *   University. All rights reserved.
96049- *
96050- *   Redistribution and use in source and binary forms, with or without
96051- *   modification, are permitted provided that the following conditions are
96052- *   met:
96053- *
96054- *       * Redistributions of source code must retain the above copyright
96055- *         notice, this list of conditions and the following disclaimer.
96056- *       * Redistributions in binary form must reproduce the above
96057- *         copyright notice, this list of conditions and the following
96058- *         disclaimer in the documentation and/or other materials provided
96059- *         with the distribution.
96060- *       * Neither the name of the Hiroshima University nor the names of
96061- *         its contributors may be used to endorse or promote products
96062- *         derived from this software without specific prior written
96063- *         permission.
96064- *
96065- *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
96066- *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
96067- *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
96068- *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
96069- *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
96070- *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
96071- *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
96072- *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
96073- *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
96074- *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
96075- *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
96076- */
96077-#ifndef SFMT_PARAMS44497_H
96078-#define SFMT_PARAMS44497_H
96079-
96080-#define POS1	330
96081-#define SL1	5
96082-#define SL2	3
96083-#define SR1	9
96084-#define SR2	3
96085-#define MSK1	0xeffffffbU
96086-#define MSK2	0xdfbebfffU
96087-#define MSK3	0xbfbf7befU
96088-#define MSK4	0x9ffd7bffU
96089-#define PARITY1	0x00000001U
96090-#define PARITY2	0x00000000U
96091-#define PARITY3	0xa3ac4000U
96092-#define PARITY4	0xecc1327aU
96093-
96094-
96095-/* PARAMETERS FOR ALTIVEC */
96096-#if defined(__APPLE__)	/* For OSX */
96097-    #define ALTI_SL1	(vector unsigned int)(SL1, SL1, SL1, SL1)
96098-    #define ALTI_SR1	(vector unsigned int)(SR1, SR1, SR1, SR1)
96099-    #define ALTI_MSK	(vector unsigned int)(MSK1, MSK2, MSK3, MSK4)
96100-    #define ALTI_MSK64 \
96101-	(vector unsigned int)(MSK2, MSK1, MSK4, MSK3)
96102-    #define ALTI_SL2_PERM \
96103-	(vector unsigned char)(3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10)
96104-    #define ALTI_SL2_PERM64 \
96105-	(vector unsigned char)(3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2)
96106-    #define ALTI_SR2_PERM \
96107-	(vector unsigned char)(5,6,7,0,9,10,11,4,13,14,15,8,19,19,19,12)
96108-    #define ALTI_SR2_PERM64 \
96109-	(vector unsigned char)(13,14,15,0,1,2,3,4,19,19,19,8,9,10,11,12)
96110-#else	/* For OTHER OSs(Linux?) */
96111-    #define ALTI_SL1	{SL1, SL1, SL1, SL1}
96112-    #define ALTI_SR1	{SR1, SR1, SR1, SR1}
96113-    #define ALTI_MSK	{MSK1, MSK2, MSK3, MSK4}
96114-    #define ALTI_MSK64	{MSK2, MSK1, MSK4, MSK3}
96115-    #define ALTI_SL2_PERM	{3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10}
96116-    #define ALTI_SL2_PERM64	{3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2}
96117-    #define ALTI_SR2_PERM	{5,6,7,0,9,10,11,4,13,14,15,8,19,19,19,12}
96118-    #define ALTI_SR2_PERM64	{13,14,15,0,1,2,3,4,19,19,19,8,9,10,11,12}
96119-#endif	/* For OSX */
96120-#define IDSTR	"SFMT-44497:330-5-3-9-3:effffffb-dfbebfff-bfbf7bef-9ffd7bff"
96121-
96122-#endif /* SFMT_PARAMS44497_H */
96123diff --git a/jemalloc/test/include/test/SFMT-params607.h b/jemalloc/test/include/test/SFMT-params607.h
96124deleted file mode 100644
96125index bc76485..0000000
96126--- a/jemalloc/test/include/test/SFMT-params607.h
96127+++ /dev/null
96128@@ -1,81 +0,0 @@
96129-/*
96130- * This file derives from SFMT 1.3.3
96131- * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was
96132- * released under the terms of the following license:
96133- *
96134- *   Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
96135- *   University. All rights reserved.
96136- *
96137- *   Redistribution and use in source and binary forms, with or without
96138- *   modification, are permitted provided that the following conditions are
96139- *   met:
96140- *
96141- *       * Redistributions of source code must retain the above copyright
96142- *         notice, this list of conditions and the following disclaimer.
96143- *       * Redistributions in binary form must reproduce the above
96144- *         copyright notice, this list of conditions and the following
96145- *         disclaimer in the documentation and/or other materials provided
96146- *         with the distribution.
96147- *       * Neither the name of the Hiroshima University nor the names of
96148- *         its contributors may be used to endorse or promote products
96149- *         derived from this software without specific prior written
96150- *         permission.
96151- *
96152- *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
96153- *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
96154- *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
96155- *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
96156- *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
96157- *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
96158- *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
96159- *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
96160- *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
96161- *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
96162- *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
96163- */
96164-#ifndef SFMT_PARAMS607_H
96165-#define SFMT_PARAMS607_H
96166-
96167-#define POS1	2
96168-#define SL1	15
96169-#define SL2	3
96170-#define SR1	13
96171-#define SR2	3
96172-#define MSK1	0xfdff37ffU
96173-#define MSK2	0xef7f3f7dU
96174-#define MSK3	0xff777b7dU
96175-#define MSK4	0x7ff7fb2fU
96176-#define PARITY1	0x00000001U
96177-#define PARITY2	0x00000000U
96178-#define PARITY3	0x00000000U
96179-#define PARITY4	0x5986f054U
96180-
96181-
96182-/* PARAMETERS FOR ALTIVEC */
96183-#if defined(__APPLE__)	/* For OSX */
96184-    #define ALTI_SL1	(vector unsigned int)(SL1, SL1, SL1, SL1)
96185-    #define ALTI_SR1	(vector unsigned int)(SR1, SR1, SR1, SR1)
96186-    #define ALTI_MSK	(vector unsigned int)(MSK1, MSK2, MSK3, MSK4)
96187-    #define ALTI_MSK64 \
96188-	(vector unsigned int)(MSK2, MSK1, MSK4, MSK3)
96189-    #define ALTI_SL2_PERM \
96190-	(vector unsigned char)(3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10)
96191-    #define ALTI_SL2_PERM64 \
96192-	(vector unsigned char)(3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2)
96193-    #define ALTI_SR2_PERM \
96194-	(vector unsigned char)(5,6,7,0,9,10,11,4,13,14,15,8,19,19,19,12)
96195-    #define ALTI_SR2_PERM64 \
96196-	(vector unsigned char)(13,14,15,0,1,2,3,4,19,19,19,8,9,10,11,12)
96197-#else	/* For OTHER OSs(Linux?) */
96198-    #define ALTI_SL1	{SL1, SL1, SL1, SL1}
96199-    #define ALTI_SR1	{SR1, SR1, SR1, SR1}
96200-    #define ALTI_MSK	{MSK1, MSK2, MSK3, MSK4}
96201-    #define ALTI_MSK64	{MSK2, MSK1, MSK4, MSK3}
96202-    #define ALTI_SL2_PERM	{3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10}
96203-    #define ALTI_SL2_PERM64	{3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2}
96204-    #define ALTI_SR2_PERM	{5,6,7,0,9,10,11,4,13,14,15,8,19,19,19,12}
96205-    #define ALTI_SR2_PERM64	{13,14,15,0,1,2,3,4,19,19,19,8,9,10,11,12}
96206-#endif	/* For OSX */
96207-#define IDSTR	"SFMT-607:2-15-3-13-3:fdff37ff-ef7f3f7d-ff777b7d-7ff7fb2f"
96208-
96209-#endif /* SFMT_PARAMS607_H */
96210diff --git a/jemalloc/test/include/test/SFMT-params86243.h b/jemalloc/test/include/test/SFMT-params86243.h
96211deleted file mode 100644
96212index 5e4d783..0000000
96213--- a/jemalloc/test/include/test/SFMT-params86243.h
96214+++ /dev/null
96215@@ -1,81 +0,0 @@
96216-/*
96217- * This file derives from SFMT 1.3.3
96218- * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was
96219- * released under the terms of the following license:
96220- *
96221- *   Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
96222- *   University. All rights reserved.
96223- *
96224- *   Redistribution and use in source and binary forms, with or without
96225- *   modification, are permitted provided that the following conditions are
96226- *   met:
96227- *
96228- *       * Redistributions of source code must retain the above copyright
96229- *         notice, this list of conditions and the following disclaimer.
96230- *       * Redistributions in binary form must reproduce the above
96231- *         copyright notice, this list of conditions and the following
96232- *         disclaimer in the documentation and/or other materials provided
96233- *         with the distribution.
96234- *       * Neither the name of the Hiroshima University nor the names of
96235- *         its contributors may be used to endorse or promote products
96236- *         derived from this software without specific prior written
96237- *         permission.
96238- *
96239- *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
96240- *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
96241- *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
96242- *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
96243- *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
96244- *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
96245- *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
96246- *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
96247- *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
96248- *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
96249- *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
96250- */
96251-#ifndef SFMT_PARAMS86243_H
96252-#define SFMT_PARAMS86243_H
96253-
96254-#define POS1	366
96255-#define SL1	6
96256-#define SL2	7
96257-#define SR1	19
96258-#define SR2	1
96259-#define MSK1	0xfdbffbffU
96260-#define MSK2	0xbff7ff3fU
96261-#define MSK3	0xfd77efffU
96262-#define MSK4	0xbf9ff3ffU
96263-#define PARITY1	0x00000001U
96264-#define PARITY2	0x00000000U
96265-#define PARITY3	0x00000000U
96266-#define PARITY4	0xe9528d85U
96267-
96268-
96269-/* PARAMETERS FOR ALTIVEC */
96270-#if defined(__APPLE__)	/* For OSX */
96271-    #define ALTI_SL1	(vector unsigned int)(SL1, SL1, SL1, SL1)
96272-    #define ALTI_SR1	(vector unsigned int)(SR1, SR1, SR1, SR1)
96273-    #define ALTI_MSK	(vector unsigned int)(MSK1, MSK2, MSK3, MSK4)
96274-    #define ALTI_MSK64 \
96275-	(vector unsigned int)(MSK2, MSK1, MSK4, MSK3)
96276-    #define ALTI_SL2_PERM \
96277-	(vector unsigned char)(25,25,25,25,3,25,25,25,7,0,1,2,11,4,5,6)
96278-    #define ALTI_SL2_PERM64 \
96279-	(vector unsigned char)(7,25,25,25,25,25,25,25,15,0,1,2,3,4,5,6)
96280-    #define ALTI_SR2_PERM \
96281-	(vector unsigned char)(7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14)
96282-    #define ALTI_SR2_PERM64 \
96283-	(vector unsigned char)(15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14)
96284-#else	/* For OTHER OSs(Linux?) */
96285-    #define ALTI_SL1	{SL1, SL1, SL1, SL1}
96286-    #define ALTI_SR1	{SR1, SR1, SR1, SR1}
96287-    #define ALTI_MSK	{MSK1, MSK2, MSK3, MSK4}
96288-    #define ALTI_MSK64	{MSK2, MSK1, MSK4, MSK3}
96289-    #define ALTI_SL2_PERM	{25,25,25,25,3,25,25,25,7,0,1,2,11,4,5,6}
96290-    #define ALTI_SL2_PERM64	{7,25,25,25,25,25,25,25,15,0,1,2,3,4,5,6}
96291-    #define ALTI_SR2_PERM	{7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14}
96292-    #define ALTI_SR2_PERM64	{15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14}
96293-#endif	/* For OSX */
96294-#define IDSTR	"SFMT-86243:366-6-7-19-1:fdbffbff-bff7ff3f-fd77efff-bf9ff3ff"
96295-
96296-#endif /* SFMT_PARAMS86243_H */
96297diff --git a/jemalloc/test/include/test/SFMT-sse2.h b/jemalloc/test/include/test/SFMT-sse2.h
96298deleted file mode 100644
96299index 169ad55..0000000
96300--- a/jemalloc/test/include/test/SFMT-sse2.h
96301+++ /dev/null
96302@@ -1,157 +0,0 @@
96303-/*
96304- * This file derives from SFMT 1.3.3
96305- * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was
96306- * released under the terms of the following license:
96307- *
96308- *   Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
96309- *   University. All rights reserved.
96310- *
96311- *   Redistribution and use in source and binary forms, with or without
96312- *   modification, are permitted provided that the following conditions are
96313- *   met:
96314- *
96315- *       * Redistributions of source code must retain the above copyright
96316- *         notice, this list of conditions and the following disclaimer.
96317- *       * Redistributions in binary form must reproduce the above
96318- *         copyright notice, this list of conditions and the following
96319- *         disclaimer in the documentation and/or other materials provided
96320- *         with the distribution.
96321- *       * Neither the name of the Hiroshima University nor the names of
96322- *         its contributors may be used to endorse or promote products
96323- *         derived from this software without specific prior written
96324- *         permission.
96325- *
96326- *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
96327- *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
96328- *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
96329- *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
96330- *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
96331- *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
96332- *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
96333- *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
96334- *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
96335- *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
96336- *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
96337- */
96338-/**
96339- * @file  SFMT-sse2.h
96340- * @brief SIMD oriented Fast Mersenne Twister(SFMT) for Intel SSE2
96341- *
96342- * @author Mutsuo Saito (Hiroshima University)
96343- * @author Makoto Matsumoto (Hiroshima University)
96344- *
96345- * @note We assume LITTLE ENDIAN in this file
96346- *
96347- * Copyright (C) 2006, 2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
96348- * University. All rights reserved.
96349- *
96350- * The new BSD License is applied to this software, see LICENSE.txt
96351- */
96352-
96353-#ifndef SFMT_SSE2_H
96354-#define SFMT_SSE2_H
96355-
96356-/**
96357- * This function represents the recursion formula.
96358- * @param a a 128-bit part of the interal state array
96359- * @param b a 128-bit part of the interal state array
96360- * @param c a 128-bit part of the interal state array
96361- * @param d a 128-bit part of the interal state array
96362- * @param mask 128-bit mask
96363- * @return output
96364- */
96365-JEMALLOC_ALWAYS_INLINE __m128i mm_recursion(__m128i *a, __m128i *b,
96366-				   __m128i c, __m128i d, __m128i mask) {
96367-    __m128i v, x, y, z;
96368-
96369-    x = _mm_load_si128(a);
96370-    y = _mm_srli_epi32(*b, SR1);
96371-    z = _mm_srli_si128(c, SR2);
96372-    v = _mm_slli_epi32(d, SL1);
96373-    z = _mm_xor_si128(z, x);
96374-    z = _mm_xor_si128(z, v);
96375-    x = _mm_slli_si128(x, SL2);
96376-    y = _mm_and_si128(y, mask);
96377-    z = _mm_xor_si128(z, x);
96378-    z = _mm_xor_si128(z, y);
96379-    return z;
96380-}
96381-
96382-/**
96383- * This function fills the internal state array with pseudorandom
96384- * integers.
96385- */
96386-static inline void gen_rand_all(sfmt_t *ctx) {
96387-    int i;
96388-    __m128i r, r1, r2, mask;
96389-    mask = _mm_set_epi32(MSK4, MSK3, MSK2, MSK1);
96390-
96391-    r1 = _mm_load_si128(&ctx->sfmt[N - 2].si);
96392-    r2 = _mm_load_si128(&ctx->sfmt[N - 1].si);
96393-    for (i = 0; i < N - POS1; i++) {
96394-	r = mm_recursion(&ctx->sfmt[i].si, &ctx->sfmt[i + POS1].si, r1, r2,
96395-	  mask);
96396-	_mm_store_si128(&ctx->sfmt[i].si, r);
96397-	r1 = r2;
96398-	r2 = r;
96399-    }
96400-    for (; i < N; i++) {
96401-	r = mm_recursion(&ctx->sfmt[i].si, &ctx->sfmt[i + POS1 - N].si, r1, r2,
96402-	  mask);
96403-	_mm_store_si128(&ctx->sfmt[i].si, r);
96404-	r1 = r2;
96405-	r2 = r;
96406-    }
96407-}
96408-
96409-/**
96410- * This function fills the user-specified array with pseudorandom
96411- * integers.
96412- *
96413- * @param array an 128-bit array to be filled by pseudorandom numbers.
96414- * @param size number of 128-bit pesudorandom numbers to be generated.
96415- */
96416-static inline void gen_rand_array(sfmt_t *ctx, w128_t *array, int size) {
96417-    int i, j;
96418-    __m128i r, r1, r2, mask;
96419-    mask = _mm_set_epi32(MSK4, MSK3, MSK2, MSK1);
96420-
96421-    r1 = _mm_load_si128(&ctx->sfmt[N - 2].si);
96422-    r2 = _mm_load_si128(&ctx->sfmt[N - 1].si);
96423-    for (i = 0; i < N - POS1; i++) {
96424-	r = mm_recursion(&ctx->sfmt[i].si, &ctx->sfmt[i + POS1].si, r1, r2,
96425-	  mask);
96426-	_mm_store_si128(&array[i].si, r);
96427-	r1 = r2;
96428-	r2 = r;
96429-    }
96430-    for (; i < N; i++) {
96431-	r = mm_recursion(&ctx->sfmt[i].si, &array[i + POS1 - N].si, r1, r2,
96432-	  mask);
96433-	_mm_store_si128(&array[i].si, r);
96434-	r1 = r2;
96435-	r2 = r;
96436-    }
96437-    /* main loop */
96438-    for (; i < size - N; i++) {
96439-	r = mm_recursion(&array[i - N].si, &array[i + POS1 - N].si, r1, r2,
96440-			 mask);
96441-	_mm_store_si128(&array[i].si, r);
96442-	r1 = r2;
96443-	r2 = r;
96444-    }
96445-    for (j = 0; j < 2 * N - size; j++) {
96446-	r = _mm_load_si128(&array[j + size - N].si);
96447-	_mm_store_si128(&ctx->sfmt[j].si, r);
96448-    }
96449-    for (; i < size; i++) {
96450-	r = mm_recursion(&array[i - N].si, &array[i + POS1 - N].si, r1, r2,
96451-			 mask);
96452-	_mm_store_si128(&array[i].si, r);
96453-	_mm_store_si128(&ctx->sfmt[j++].si, r);
96454-	r1 = r2;
96455-	r2 = r;
96456-    }
96457-}
96458-
96459-#endif
96460diff --git a/jemalloc/test/include/test/SFMT.h b/jemalloc/test/include/test/SFMT.h
96461deleted file mode 100644
96462index 863fc55..0000000
96463--- a/jemalloc/test/include/test/SFMT.h
96464+++ /dev/null
96465@@ -1,146 +0,0 @@
96466-/*
96467- * This file derives from SFMT 1.3.3
96468- * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was
96469- * released under the terms of the following license:
96470- *
96471- *   Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
96472- *   University. All rights reserved.
96473- *
96474- *   Redistribution and use in source and binary forms, with or without
96475- *   modification, are permitted provided that the following conditions are
96476- *   met:
96477- *
96478- *       * Redistributions of source code must retain the above copyright
96479- *         notice, this list of conditions and the following disclaimer.
96480- *       * Redistributions in binary form must reproduce the above
96481- *         copyright notice, this list of conditions and the following
96482- *         disclaimer in the documentation and/or other materials provided
96483- *         with the distribution.
96484- *       * Neither the name of the Hiroshima University nor the names of
96485- *         its contributors may be used to endorse or promote products
96486- *         derived from this software without specific prior written
96487- *         permission.
96488- *
96489- *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
96490- *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
96491- *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
96492- *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
96493- *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
96494- *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
96495- *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
96496- *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
96497- *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
96498- *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
96499- *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
96500- */
96501-/**
96502- * @file SFMT.h
96503- *
96504- * @brief SIMD oriented Fast Mersenne Twister(SFMT) pseudorandom
96505- * number generator
96506- *
96507- * @author Mutsuo Saito (Hiroshima University)
96508- * @author Makoto Matsumoto (Hiroshima University)
96509- *
96510- * Copyright (C) 2006, 2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
96511- * University. All rights reserved.
96512- *
96513- * The new BSD License is applied to this software.
96514- * see LICENSE.txt
96515- *
96516- * @note We assume that your system has inttypes.h.  If your system
96517- * doesn't have inttypes.h, you have to typedef uint32_t and uint64_t,
96518- * and you have to define PRIu64 and PRIx64 in this file as follows:
96519- * @verbatim
96520- typedef unsigned int uint32_t
96521- typedef unsigned long long uint64_t
96522- #define PRIu64 "llu"
96523- #define PRIx64 "llx"
96524-@endverbatim
96525- * uint32_t must be exactly 32-bit unsigned integer type (no more, no
96526- * less), and uint64_t must be exactly 64-bit unsigned integer type.
96527- * PRIu64 and PRIx64 are used for printf function to print 64-bit
96528- * unsigned int and 64-bit unsigned int in hexadecimal format.
96529- */
96530-
96531-#ifndef SFMT_H
96532-#define SFMT_H
96533-
96534-typedef struct sfmt_s sfmt_t;
96535-
96536-uint32_t gen_rand32(sfmt_t *ctx);
96537-uint32_t gen_rand32_range(sfmt_t *ctx, uint32_t limit);
96538-uint64_t gen_rand64(sfmt_t *ctx);
96539-uint64_t gen_rand64_range(sfmt_t *ctx, uint64_t limit);
96540-void fill_array32(sfmt_t *ctx, uint32_t *array, int size);
96541-void fill_array64(sfmt_t *ctx, uint64_t *array, int size);
96542-sfmt_t *init_gen_rand(uint32_t seed);
96543-sfmt_t *init_by_array(uint32_t *init_key, int key_length);
96544-void fini_gen_rand(sfmt_t *ctx);
96545-const char *get_idstring(void);
96546-int get_min_array_size32(void);
96547-int get_min_array_size64(void);
96548-
96549-/* These real versions are due to Isaku Wada */
96550-/** generates a random number on [0,1]-real-interval */
96551-static inline double to_real1(uint32_t v) {
96552-    return v * (1.0/4294967295.0);
96553-    /* divided by 2^32-1 */
96554-}
96555-
96556-/** generates a random number on [0,1]-real-interval */
96557-static inline double genrand_real1(sfmt_t *ctx) {
96558-    return to_real1(gen_rand32(ctx));
96559-}
96560-
96561-/** generates a random number on [0,1)-real-interval */
96562-static inline double to_real2(uint32_t v) {
96563-    return v * (1.0/4294967296.0);
96564-    /* divided by 2^32 */
96565-}
96566-
96567-/** generates a random number on [0,1)-real-interval */
96568-static inline double genrand_real2(sfmt_t *ctx) {
96569-    return to_real2(gen_rand32(ctx));
96570-}
96571-
96572-/** generates a random number on (0,1)-real-interval */
96573-static inline double to_real3(uint32_t v) {
96574-    return (((double)v) + 0.5)*(1.0/4294967296.0);
96575-    /* divided by 2^32 */
96576-}
96577-
96578-/** generates a random number on (0,1)-real-interval */
96579-static inline double genrand_real3(sfmt_t *ctx) {
96580-    return to_real3(gen_rand32(ctx));
96581-}
96582-/** These real versions are due to Isaku Wada */
96583-
96584-/** generates a random number on [0,1) with 53-bit resolution*/
96585-static inline double to_res53(uint64_t v) {
96586-    return v * (1.0/18446744073709551616.0L);
96587-}
96588-
96589-/** generates a random number on [0,1) with 53-bit resolution from two
96590- * 32 bit integers */
96591-static inline double to_res53_mix(uint32_t x, uint32_t y) {
96592-    return to_res53(x | ((uint64_t)y << 32));
96593-}
96594-
96595-/** generates a random number on [0,1) with 53-bit resolution
96596- */
96597-static inline double genrand_res53(sfmt_t *ctx) {
96598-    return to_res53(gen_rand64(ctx));
96599-}
96600-
96601-/** generates a random number on [0,1) with 53-bit resolution
96602-    using 32bit integer.
96603- */
96604-static inline double genrand_res53_mix(sfmt_t *ctx) {
96605-    uint32_t x, y;
96606-
96607-    x = gen_rand32(ctx);
96608-    y = gen_rand32(ctx);
96609-    return to_res53_mix(x, y);
96610-}
96611-#endif
96612diff --git a/jemalloc/test/include/test/arena_util.h b/jemalloc/test/include/test/arena_util.h
96613deleted file mode 100644
96614index 9a41dac..0000000
96615--- a/jemalloc/test/include/test/arena_util.h
96616+++ /dev/null
96617@@ -1,155 +0,0 @@
96618-static inline unsigned
96619-do_arena_create(ssize_t dirty_decay_ms, ssize_t muzzy_decay_ms) {
96620-	unsigned arena_ind;
96621-	size_t sz = sizeof(unsigned);
96622-	expect_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz, NULL, 0),
96623-	    0, "Unexpected mallctl() failure");
96624-	size_t mib[3];
96625-	size_t miblen = sizeof(mib)/sizeof(size_t);
96626-
96627-	expect_d_eq(mallctlnametomib("arena.0.dirty_decay_ms", mib, &miblen),
96628-	    0, "Unexpected mallctlnametomib() failure");
96629-	mib[1] = (size_t)arena_ind;
96630-	expect_d_eq(mallctlbymib(mib, miblen, NULL, NULL,
96631-	    (void *)&dirty_decay_ms, sizeof(dirty_decay_ms)), 0,
96632-	    "Unexpected mallctlbymib() failure");
96633-
96634-	expect_d_eq(mallctlnametomib("arena.0.muzzy_decay_ms", mib, &miblen),
96635-	    0, "Unexpected mallctlnametomib() failure");
96636-	mib[1] = (size_t)arena_ind;
96637-	expect_d_eq(mallctlbymib(mib, miblen, NULL, NULL,
96638-	    (void *)&muzzy_decay_ms, sizeof(muzzy_decay_ms)), 0,
96639-	    "Unexpected mallctlbymib() failure");
96640-
96641-	return arena_ind;
96642-}
96643-
96644-static inline void
96645-do_arena_destroy(unsigned arena_ind) {
96646-	/*
96647-	 * For convenience, flush tcache in case there are cached items.
96648-	 * However not assert success since the tcache may be disabled.
96649-	 */
96650-	mallctl("thread.tcache.flush", NULL, NULL, NULL, 0);
96651-
96652-	size_t mib[3];
96653-	size_t miblen = sizeof(mib)/sizeof(size_t);
96654-	expect_d_eq(mallctlnametomib("arena.0.destroy", mib, &miblen), 0,
96655-	    "Unexpected mallctlnametomib() failure");
96656-	mib[1] = (size_t)arena_ind;
96657-	expect_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0,
96658-	    "Unexpected mallctlbymib() failure");
96659-}
96660-
96661-static inline void
96662-do_epoch(void) {
96663-	uint64_t epoch = 1;
96664-	expect_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)),
96665-	    0, "Unexpected mallctl() failure");
96666-}
96667-
96668-static inline void
96669-do_purge(unsigned arena_ind) {
96670-	size_t mib[3];
96671-	size_t miblen = sizeof(mib)/sizeof(size_t);
96672-	expect_d_eq(mallctlnametomib("arena.0.purge", mib, &miblen), 0,
96673-	    "Unexpected mallctlnametomib() failure");
96674-	mib[1] = (size_t)arena_ind;
96675-	expect_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0,
96676-	    "Unexpected mallctlbymib() failure");
96677-}
96678-
96679-static inline void
96680-do_decay(unsigned arena_ind) {
96681-	size_t mib[3];
96682-	size_t miblen = sizeof(mib)/sizeof(size_t);
96683-	expect_d_eq(mallctlnametomib("arena.0.decay", mib, &miblen), 0,
96684-	    "Unexpected mallctlnametomib() failure");
96685-	mib[1] = (size_t)arena_ind;
96686-	expect_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0,
96687-	    "Unexpected mallctlbymib() failure");
96688-}
96689-
96690-static inline uint64_t
96691-get_arena_npurge_impl(const char *mibname, unsigned arena_ind) {
96692-	size_t mib[4];
96693-	size_t miblen = sizeof(mib)/sizeof(size_t);
96694-	expect_d_eq(mallctlnametomib(mibname, mib, &miblen), 0,
96695-	    "Unexpected mallctlnametomib() failure");
96696-	mib[2] = (size_t)arena_ind;
96697-	uint64_t npurge = 0;
96698-	size_t sz = sizeof(npurge);
96699-	expect_d_eq(mallctlbymib(mib, miblen, (void *)&npurge, &sz, NULL, 0),
96700-	    config_stats ? 0 : ENOENT, "Unexpected mallctlbymib() failure");
96701-	return npurge;
96702-}
96703-
96704-static inline uint64_t
96705-get_arena_dirty_npurge(unsigned arena_ind) {
96706-	do_epoch();
96707-	return get_arena_npurge_impl("stats.arenas.0.dirty_npurge", arena_ind);
96708-}
96709-
96710-static inline uint64_t
96711-get_arena_dirty_purged(unsigned arena_ind) {
96712-	do_epoch();
96713-	return get_arena_npurge_impl("stats.arenas.0.dirty_purged", arena_ind);
96714-}
96715-
96716-static inline uint64_t
96717-get_arena_muzzy_npurge(unsigned arena_ind) {
96718-	do_epoch();
96719-	return get_arena_npurge_impl("stats.arenas.0.muzzy_npurge", arena_ind);
96720-}
96721-
96722-static inline uint64_t
96723-get_arena_npurge(unsigned arena_ind) {
96724-	do_epoch();
96725-	return get_arena_npurge_impl("stats.arenas.0.dirty_npurge", arena_ind) +
96726-	    get_arena_npurge_impl("stats.arenas.0.muzzy_npurge", arena_ind);
96727-}
96728-
96729-static inline size_t
96730-get_arena_pdirty(unsigned arena_ind) {
96731-	do_epoch();
96732-	size_t mib[4];
96733-	size_t miblen = sizeof(mib)/sizeof(size_t);
96734-	expect_d_eq(mallctlnametomib("stats.arenas.0.pdirty", mib, &miblen), 0,
96735-	    "Unexpected mallctlnametomib() failure");
96736-	mib[2] = (size_t)arena_ind;
96737-	size_t pdirty;
96738-	size_t sz = sizeof(pdirty);
96739-	expect_d_eq(mallctlbymib(mib, miblen, (void *)&pdirty, &sz, NULL, 0), 0,
96740-	    "Unexpected mallctlbymib() failure");
96741-	return pdirty;
96742-}
96743-
96744-static inline size_t
96745-get_arena_pmuzzy(unsigned arena_ind) {
96746-	do_epoch();
96747-	size_t mib[4];
96748-	size_t miblen = sizeof(mib)/sizeof(size_t);
96749-	expect_d_eq(mallctlnametomib("stats.arenas.0.pmuzzy", mib, &miblen), 0,
96750-	    "Unexpected mallctlnametomib() failure");
96751-	mib[2] = (size_t)arena_ind;
96752-	size_t pmuzzy;
96753-	size_t sz = sizeof(pmuzzy);
96754-	expect_d_eq(mallctlbymib(mib, miblen, (void *)&pmuzzy, &sz, NULL, 0), 0,
96755-	    "Unexpected mallctlbymib() failure");
96756-	return pmuzzy;
96757-}
96758-
96759-static inline void *
96760-do_mallocx(size_t size, int flags) {
96761-	void *p = mallocx(size, flags);
96762-	expect_ptr_not_null(p, "Unexpected mallocx() failure");
96763-	return p;
96764-}
96765-
96766-static inline void
96767-generate_dirty(unsigned arena_ind, size_t size) {
96768-	int flags = MALLOCX_ARENA(arena_ind) | MALLOCX_TCACHE_NONE;
96769-	void *p = do_mallocx(size, flags);
96770-	dallocx(p, flags);
96771-}
96772-
96773diff --git a/jemalloc/test/include/test/bench.h b/jemalloc/test/include/test/bench.h
96774deleted file mode 100644
96775index 0397c94..0000000
96776--- a/jemalloc/test/include/test/bench.h
96777+++ /dev/null
96778@@ -1,60 +0,0 @@
96779-static inline void
96780-time_func(timedelta_t *timer, uint64_t nwarmup, uint64_t niter,
96781-    void (*func)(void)) {
96782-	uint64_t i;
96783-
96784-	for (i = 0; i < nwarmup; i++) {
96785-		func();
96786-	}
96787-	timer_start(timer);
96788-	for (i = 0; i < niter; i++) {
96789-		func();
96790-	}
96791-	timer_stop(timer);
96792-}
96793-
96794-#define FMT_NSECS_BUF_SIZE 100
96795-/* Print nanoseconds / iter into the buffer "buf". */
96796-static inline void
96797-fmt_nsecs(uint64_t usec, uint64_t iters, char *buf) {
96798-	uint64_t nsec = usec * 1000;
96799-	/* We'll display 3 digits after the decimal point. */
96800-	uint64_t nsec1000 = nsec * 1000;
96801-	uint64_t nsecs_per_iter1000 = nsec1000 / iters;
96802-	uint64_t intpart = nsecs_per_iter1000 / 1000;
96803-	uint64_t fracpart = nsecs_per_iter1000 % 1000;
96804-	malloc_snprintf(buf, FMT_NSECS_BUF_SIZE, "%"FMTu64".%03"FMTu64, intpart,
96805-	    fracpart);
96806-}
96807-
96808-static inline void
96809-compare_funcs(uint64_t nwarmup, uint64_t niter, const char *name_a,
96810-    void (*func_a), const char *name_b, void (*func_b)) {
96811-	timedelta_t timer_a, timer_b;
96812-	char ratio_buf[6];
96813-	void *p;
96814-
96815-	p = mallocx(1, 0);
96816-	if (p == NULL) {
96817-		test_fail("Unexpected mallocx() failure");
96818-		return;
96819-	}
96820-
96821-	time_func(&timer_a, nwarmup, niter, func_a);
96822-	time_func(&timer_b, nwarmup, niter, func_b);
96823-
96824-	uint64_t usec_a = timer_usec(&timer_a);
96825-	char buf_a[FMT_NSECS_BUF_SIZE];
96826-	fmt_nsecs(usec_a, niter, buf_a);
96827-
96828-	uint64_t usec_b = timer_usec(&timer_b);
96829-	char buf_b[FMT_NSECS_BUF_SIZE];
96830-	fmt_nsecs(usec_b, niter, buf_b);
96831-
96832-	timer_ratio(&timer_a, &timer_b, ratio_buf, sizeof(ratio_buf));
96833-	malloc_printf("%"FMTu64" iterations, %s=%"FMTu64"us (%s ns/iter), "
96834-	    "%s=%"FMTu64"us (%s ns/iter), ratio=1:%s\n",
96835-	    niter, name_a, usec_a, buf_a, name_b, usec_b, buf_b, ratio_buf);
96836-
96837-	dallocx(p, 0);
96838-}
96839diff --git a/jemalloc/test/include/test/bgthd.h b/jemalloc/test/include/test/bgthd.h
96840deleted file mode 100644
96841index 4fa2395..0000000
96842--- a/jemalloc/test/include/test/bgthd.h
96843+++ /dev/null
96844@@ -1,17 +0,0 @@
96845-/*
96846- * Shared utility for checking if background_thread is enabled, which affects
96847- * the purging behavior and assumptions in some tests.
96848- */
96849-
96850-static inline bool
96851-is_background_thread_enabled(void) {
96852-	bool enabled;
96853-	size_t sz = sizeof(bool);
96854-	int ret = mallctl("background_thread", (void *)&enabled, &sz, NULL,0);
96855-	if (ret == ENOENT) {
96856-		return false;
96857-	}
96858-	assert_d_eq(ret, 0, "Unexpected mallctl error");
96859-
96860-	return enabled;
96861-}
96862diff --git a/jemalloc/test/include/test/btalloc.h b/jemalloc/test/include/test/btalloc.h
96863deleted file mode 100644
96864index 8f34599..0000000
96865--- a/jemalloc/test/include/test/btalloc.h
96866+++ /dev/null
96867@@ -1,30 +0,0 @@
96868-/* btalloc() provides a mechanism for allocating via permuted backtraces. */
96869-void	*btalloc(size_t size, unsigned bits);
96870-
96871-#define btalloc_n_proto(n)						\
96872-void	*btalloc_##n(size_t size, unsigned bits);
96873-btalloc_n_proto(0)
96874-btalloc_n_proto(1)
96875-
96876-#define btalloc_n_gen(n)						\
96877-void *									\
96878-btalloc_##n(size_t size, unsigned bits) {				\
96879-	void *p;							\
96880-									\
96881-	if (bits == 0) {						\
96882-		p = mallocx(size, 0);					\
96883-	} else {							\
96884-		switch (bits & 0x1U) {					\
96885-		case 0:							\
96886-			p = (btalloc_0(size, bits >> 1));		\
96887-			break;						\
96888-		case 1:							\
96889-			p = (btalloc_1(size, bits >> 1));		\
96890-			break;						\
96891-		default: not_reached();					\
96892-		}							\
96893-	}								\
96894-	/* Intentionally sabotage tail call optimization. */		\
96895-	expect_ptr_not_null(p, "Unexpected mallocx() failure");		\
96896-	return p;							\
96897-}
96898diff --git a/jemalloc/test/include/test/extent_hooks.h b/jemalloc/test/include/test/extent_hooks.h
96899deleted file mode 100644
96900index aad0a46..0000000
96901--- a/jemalloc/test/include/test/extent_hooks.h
96902+++ /dev/null
96903@@ -1,289 +0,0 @@
96904-/*
96905- * Boilerplate code used for testing extent hooks via interception and
96906- * passthrough.
96907- */
96908-
96909-static void	*extent_alloc_hook(extent_hooks_t *extent_hooks, void *new_addr,
96910-    size_t size, size_t alignment, bool *zero, bool *commit,
96911-    unsigned arena_ind);
96912-static bool	extent_dalloc_hook(extent_hooks_t *extent_hooks, void *addr,
96913-    size_t size, bool committed, unsigned arena_ind);
96914-static void	extent_destroy_hook(extent_hooks_t *extent_hooks, void *addr,
96915-    size_t size, bool committed, unsigned arena_ind);
96916-static bool	extent_commit_hook(extent_hooks_t *extent_hooks, void *addr,
96917-    size_t size, size_t offset, size_t length, unsigned arena_ind);
96918-static bool	extent_decommit_hook(extent_hooks_t *extent_hooks, void *addr,
96919-    size_t size, size_t offset, size_t length, unsigned arena_ind);
96920-static bool	extent_purge_lazy_hook(extent_hooks_t *extent_hooks, void *addr,
96921-    size_t size, size_t offset, size_t length, unsigned arena_ind);
96922-static bool	extent_purge_forced_hook(extent_hooks_t *extent_hooks,
96923-    void *addr, size_t size, size_t offset, size_t length, unsigned arena_ind);
96924-static bool	extent_split_hook(extent_hooks_t *extent_hooks, void *addr,
96925-    size_t size, size_t size_a, size_t size_b, bool committed,
96926-    unsigned arena_ind);
96927-static bool	extent_merge_hook(extent_hooks_t *extent_hooks, void *addr_a,
96928-    size_t size_a, void *addr_b, size_t size_b, bool committed,
96929-    unsigned arena_ind);
96930-
96931-static extent_hooks_t *default_hooks;
96932-static extent_hooks_t hooks = {
96933-	extent_alloc_hook,
96934-	extent_dalloc_hook,
96935-	extent_destroy_hook,
96936-	extent_commit_hook,
96937-	extent_decommit_hook,
96938-	extent_purge_lazy_hook,
96939-	extent_purge_forced_hook,
96940-	extent_split_hook,
96941-	extent_merge_hook
96942-};
96943-
96944-/* Control whether hook functions pass calls through to default hooks. */
96945-static bool try_alloc = true;
96946-static bool try_dalloc = true;
96947-static bool try_destroy = true;
96948-static bool try_commit = true;
96949-static bool try_decommit = true;
96950-static bool try_purge_lazy = true;
96951-static bool try_purge_forced = true;
96952-static bool try_split = true;
96953-static bool try_merge = true;
96954-
96955-/* Set to false prior to operations, then introspect after operations. */
96956-static bool called_alloc;
96957-static bool called_dalloc;
96958-static bool called_destroy;
96959-static bool called_commit;
96960-static bool called_decommit;
96961-static bool called_purge_lazy;
96962-static bool called_purge_forced;
96963-static bool called_split;
96964-static bool called_merge;
96965-
96966-/* Set to false prior to operations, then introspect after operations. */
96967-static bool did_alloc;
96968-static bool did_dalloc;
96969-static bool did_destroy;
96970-static bool did_commit;
96971-static bool did_decommit;
96972-static bool did_purge_lazy;
96973-static bool did_purge_forced;
96974-static bool did_split;
96975-static bool did_merge;
96976-
96977-#if 0
96978-#  define TRACE_HOOK(fmt, ...) malloc_printf(fmt, __VA_ARGS__)
96979-#else
96980-#  define TRACE_HOOK(fmt, ...)
96981-#endif
96982-
96983-static void *
96984-extent_alloc_hook(extent_hooks_t *extent_hooks, void *new_addr, size_t size,
96985-    size_t alignment, bool *zero, bool *commit, unsigned arena_ind) {
96986-	void *ret;
96987-
96988-	TRACE_HOOK("%s(extent_hooks=%p, new_addr=%p, size=%zu, alignment=%zu, "
96989-	    "*zero=%s, *commit=%s, arena_ind=%u)\n", __func__, extent_hooks,
96990-	    new_addr, size, alignment, *zero ?  "true" : "false", *commit ?
96991-	    "true" : "false", arena_ind);
96992-	expect_ptr_eq(extent_hooks, &hooks,
96993-	    "extent_hooks should be same as pointer used to set hooks");
96994-	expect_ptr_eq(extent_hooks->alloc, extent_alloc_hook,
96995-	    "Wrong hook function");
96996-	called_alloc = true;
96997-	if (!try_alloc) {
96998-		return NULL;
96999-	}
97000-	ret = default_hooks->alloc(default_hooks, new_addr, size, alignment,
97001-	    zero, commit, 0);
97002-	did_alloc = (ret != NULL);
97003-	return ret;
97004-}
97005-
97006-static bool
97007-extent_dalloc_hook(extent_hooks_t *extent_hooks, void *addr, size_t size,
97008-    bool committed, unsigned arena_ind) {
97009-	bool err;
97010-
97011-	TRACE_HOOK("%s(extent_hooks=%p, addr=%p, size=%zu, committed=%s, "
97012-	    "arena_ind=%u)\n", __func__, extent_hooks, addr, size, committed ?
97013-	    "true" : "false", arena_ind);
97014-	expect_ptr_eq(extent_hooks, &hooks,
97015-	    "extent_hooks should be same as pointer used to set hooks");
97016-	expect_ptr_eq(extent_hooks->dalloc, extent_dalloc_hook,
97017-	    "Wrong hook function");
97018-	called_dalloc = true;
97019-	if (!try_dalloc) {
97020-		return true;
97021-	}
97022-	err = default_hooks->dalloc(default_hooks, addr, size, committed, 0);
97023-	did_dalloc = !err;
97024-	return err;
97025-}
97026-
97027-static void
97028-extent_destroy_hook(extent_hooks_t *extent_hooks, void *addr, size_t size,
97029-    bool committed, unsigned arena_ind) {
97030-	TRACE_HOOK("%s(extent_hooks=%p, addr=%p, size=%zu, committed=%s, "
97031-	    "arena_ind=%u)\n", __func__, extent_hooks, addr, size, committed ?
97032-	    "true" : "false", arena_ind);
97033-	expect_ptr_eq(extent_hooks, &hooks,
97034-	    "extent_hooks should be same as pointer used to set hooks");
97035-	expect_ptr_eq(extent_hooks->destroy, extent_destroy_hook,
97036-	    "Wrong hook function");
97037-	called_destroy = true;
97038-	if (!try_destroy) {
97039-		return;
97040-	}
97041-	default_hooks->destroy(default_hooks, addr, size, committed, 0);
97042-	did_destroy = true;
97043-}
97044-
97045-static bool
97046-extent_commit_hook(extent_hooks_t *extent_hooks, void *addr, size_t size,
97047-    size_t offset, size_t length, unsigned arena_ind) {
97048-	bool err;
97049-
97050-	TRACE_HOOK("%s(extent_hooks=%p, addr=%p, size=%zu, offset=%zu, "
97051-	    "length=%zu, arena_ind=%u)\n", __func__, extent_hooks, addr, size,
97052-	    offset, length, arena_ind);
97053-	expect_ptr_eq(extent_hooks, &hooks,
97054-	    "extent_hooks should be same as pointer used to set hooks");
97055-	expect_ptr_eq(extent_hooks->commit, extent_commit_hook,
97056-	    "Wrong hook function");
97057-	called_commit = true;
97058-	if (!try_commit) {
97059-		return true;
97060-	}
97061-	err = default_hooks->commit(default_hooks, addr, size, offset, length,
97062-	    0);
97063-	did_commit = !err;
97064-	return err;
97065-}
97066-
97067-static bool
97068-extent_decommit_hook(extent_hooks_t *extent_hooks, void *addr, size_t size,
97069-    size_t offset, size_t length, unsigned arena_ind) {
97070-	bool err;
97071-
97072-	TRACE_HOOK("%s(extent_hooks=%p, addr=%p, size=%zu, offset=%zu, "
97073-	    "length=%zu, arena_ind=%u)\n", __func__, extent_hooks, addr, size,
97074-	    offset, length, arena_ind);
97075-	expect_ptr_eq(extent_hooks, &hooks,
97076-	    "extent_hooks should be same as pointer used to set hooks");
97077-	expect_ptr_eq(extent_hooks->decommit, extent_decommit_hook,
97078-	    "Wrong hook function");
97079-	called_decommit = true;
97080-	if (!try_decommit) {
97081-		return true;
97082-	}
97083-	err = default_hooks->decommit(default_hooks, addr, size, offset, length,
97084-	    0);
97085-	did_decommit = !err;
97086-	return err;
97087-}
97088-
97089-static bool
97090-extent_purge_lazy_hook(extent_hooks_t *extent_hooks, void *addr, size_t size,
97091-    size_t offset, size_t length, unsigned arena_ind) {
97092-	bool err;
97093-
97094-	TRACE_HOOK("%s(extent_hooks=%p, addr=%p, size=%zu, offset=%zu, "
97095-	    "length=%zu arena_ind=%u)\n", __func__, extent_hooks, addr, size,
97096-	    offset, length, arena_ind);
97097-	expect_ptr_eq(extent_hooks, &hooks,
97098-	    "extent_hooks should be same as pointer used to set hooks");
97099-	expect_ptr_eq(extent_hooks->purge_lazy, extent_purge_lazy_hook,
97100-	    "Wrong hook function");
97101-	called_purge_lazy = true;
97102-	if (!try_purge_lazy) {
97103-		return true;
97104-	}
97105-	err = default_hooks->purge_lazy == NULL ||
97106-	    default_hooks->purge_lazy(default_hooks, addr, size, offset, length,
97107-	    0);
97108-	did_purge_lazy = !err;
97109-	return err;
97110-}
97111-
97112-static bool
97113-extent_purge_forced_hook(extent_hooks_t *extent_hooks, void *addr, size_t size,
97114-    size_t offset, size_t length, unsigned arena_ind) {
97115-	bool err;
97116-
97117-	TRACE_HOOK("%s(extent_hooks=%p, addr=%p, size=%zu, offset=%zu, "
97118-	    "length=%zu arena_ind=%u)\n", __func__, extent_hooks, addr, size,
97119-	    offset, length, arena_ind);
97120-	expect_ptr_eq(extent_hooks, &hooks,
97121-	    "extent_hooks should be same as pointer used to set hooks");
97122-	expect_ptr_eq(extent_hooks->purge_forced, extent_purge_forced_hook,
97123-	    "Wrong hook function");
97124-	called_purge_forced = true;
97125-	if (!try_purge_forced) {
97126-		return true;
97127-	}
97128-	err = default_hooks->purge_forced == NULL ||
97129-	    default_hooks->purge_forced(default_hooks, addr, size, offset,
97130-	    length, 0);
97131-	did_purge_forced = !err;
97132-	return err;
97133-}
97134-
97135-static bool
97136-extent_split_hook(extent_hooks_t *extent_hooks, void *addr, size_t size,
97137-    size_t size_a, size_t size_b, bool committed, unsigned arena_ind) {
97138-	bool err;
97139-
97140-	TRACE_HOOK("%s(extent_hooks=%p, addr=%p, size=%zu, size_a=%zu, "
97141-	    "size_b=%zu, committed=%s, arena_ind=%u)\n", __func__, extent_hooks,
97142-	    addr, size, size_a, size_b, committed ? "true" : "false",
97143-	    arena_ind);
97144-	expect_ptr_eq(extent_hooks, &hooks,
97145-	    "extent_hooks should be same as pointer used to set hooks");
97146-	expect_ptr_eq(extent_hooks->split, extent_split_hook,
97147-	    "Wrong hook function");
97148-	called_split = true;
97149-	if (!try_split) {
97150-		return true;
97151-	}
97152-	err = (default_hooks->split == NULL ||
97153-	    default_hooks->split(default_hooks, addr, size, size_a, size_b,
97154-	    committed, 0));
97155-	did_split = !err;
97156-	return err;
97157-}
97158-
97159-static bool
97160-extent_merge_hook(extent_hooks_t *extent_hooks, void *addr_a, size_t size_a,
97161-    void *addr_b, size_t size_b, bool committed, unsigned arena_ind) {
97162-	bool err;
97163-
97164-	TRACE_HOOK("%s(extent_hooks=%p, addr_a=%p, size_a=%zu, addr_b=%p "
97165-	    "size_b=%zu, committed=%s, arena_ind=%u)\n", __func__, extent_hooks,
97166-	    addr_a, size_a, addr_b, size_b, committed ? "true" : "false",
97167-	    arena_ind);
97168-	expect_ptr_eq(extent_hooks, &hooks,
97169-	    "extent_hooks should be same as pointer used to set hooks");
97170-	expect_ptr_eq(extent_hooks->merge, extent_merge_hook,
97171-	    "Wrong hook function");
97172-	expect_ptr_eq((void *)((uintptr_t)addr_a + size_a), addr_b,
97173-	    "Extents not mergeable");
97174-	called_merge = true;
97175-	if (!try_merge) {
97176-		return true;
97177-	}
97178-	err = (default_hooks->merge == NULL ||
97179-	    default_hooks->merge(default_hooks, addr_a, size_a, addr_b, size_b,
97180-	    committed, 0));
97181-	did_merge = !err;
97182-	return err;
97183-}
97184-
97185-static void
97186-extent_hooks_prep(void) {
97187-	size_t sz;
97188-
97189-	sz = sizeof(default_hooks);
97190-	expect_d_eq(mallctl("arena.0.extent_hooks", (void *)&default_hooks, &sz,
97191-	    NULL, 0), 0, "Unexpected mallctl() error");
97192-}
97193diff --git a/jemalloc/test/include/test/jemalloc_test.h.in b/jemalloc/test/include/test/jemalloc_test.h.in
97194deleted file mode 100644
97195index 3f8c0da..0000000
97196--- a/jemalloc/test/include/test/jemalloc_test.h.in
97197+++ /dev/null
97198@@ -1,180 +0,0 @@
97199-#ifdef __cplusplus
97200-extern "C" {
97201-#endif
97202-
97203-#include <limits.h>
97204-#ifndef SIZE_T_MAX
97205-#  define SIZE_T_MAX	SIZE_MAX
97206-#endif
97207-#include <stdlib.h>
97208-#include <stdarg.h>
97209-#include <stdbool.h>
97210-#include <errno.h>
97211-#include <math.h>
97212-#include <string.h>
97213-#ifdef _WIN32
97214-#  include "msvc_compat/strings.h"
97215-#endif
97216-
97217-#ifdef _WIN32
97218-#  include <windows.h>
97219-#  include "msvc_compat/windows_extra.h"
97220-#else
97221-#  include <pthread.h>
97222-#endif
97223-
97224-#include "test/jemalloc_test_defs.h"
97225-
97226-#if defined(JEMALLOC_OSATOMIC)
97227-#  include <libkern/OSAtomic.h>
97228-#endif
97229-
97230-#if defined(HAVE_ALTIVEC) && !defined(__APPLE__)
97231-#  include <altivec.h>
97232-#endif
97233-#ifdef HAVE_SSE2
97234-#  include <emmintrin.h>
97235-#endif
97236-
97237-/******************************************************************************/
97238-/*
97239- * For unit tests and analytics tests, expose all public and private interfaces.
97240- */
97241-#if defined(JEMALLOC_UNIT_TEST) || defined (JEMALLOC_ANALYZE_TEST)
97242-#  define JEMALLOC_JET
97243-#  define JEMALLOC_MANGLE
97244-#  include "jemalloc/internal/jemalloc_preamble.h"
97245-#  include "jemalloc/internal/jemalloc_internal_includes.h"
97246-
97247-/******************************************************************************/
97248-/*
97249- * For integration tests, expose the public jemalloc interfaces, but only
97250- * expose the minimum necessary internal utility code (to avoid re-implementing
97251- * essentially identical code within the test infrastructure).
97252- */
97253-#elif defined(JEMALLOC_INTEGRATION_TEST) || \
97254-    defined(JEMALLOC_INTEGRATION_CPP_TEST)
97255-#  define JEMALLOC_MANGLE
97256-#  include "jemalloc/jemalloc@[email protected]"
97257-#  include "jemalloc/internal/jemalloc_internal_defs.h"
97258-#  include "jemalloc/internal/jemalloc_internal_macros.h"
97259-
97260-static const bool config_debug =
97261-#ifdef JEMALLOC_DEBUG
97262-    true
97263-#else
97264-    false
97265-#endif
97266-    ;
97267-
97268-#  define JEMALLOC_N(n) @private_namespace@##n
97269-#  include "jemalloc/internal/private_namespace.h"
97270-#  include "jemalloc/internal/test_hooks.h"
97271-
97272-/* Hermetic headers. */
97273-#  include "jemalloc/internal/assert.h"
97274-#  include "jemalloc/internal/malloc_io.h"
97275-#  include "jemalloc/internal/nstime.h"
97276-#  include "jemalloc/internal/util.h"
97277-
97278-/* Non-hermetic headers. */
97279-#  include "jemalloc/internal/qr.h"
97280-#  include "jemalloc/internal/ql.h"
97281-
97282-/******************************************************************************/
97283-/*
97284- * For stress tests, expose the public jemalloc interfaces with name mangling
97285- * so that they can be tested as e.g. malloc() and free().  Also expose the
97286- * public jemalloc interfaces with jet_ prefixes, so that stress tests can use
97287- * a separate allocator for their internal data structures.
97288- */
97289-#elif defined(JEMALLOC_STRESS_TEST)
97290-#  include "jemalloc/jemalloc@[email protected]"
97291-
97292-#  include "jemalloc/jemalloc_protos_jet.h"
97293-
97294-#  define JEMALLOC_JET
97295-#  include "jemalloc/internal/jemalloc_preamble.h"
97296-#  include "jemalloc/internal/jemalloc_internal_includes.h"
97297-#  include "jemalloc/internal/public_unnamespace.h"
97298-#  undef JEMALLOC_JET
97299-
97300-#  include "jemalloc/jemalloc_rename.h"
97301-#  define JEMALLOC_MANGLE
97302-#  ifdef JEMALLOC_STRESS_TESTLIB
97303-#    include "jemalloc/jemalloc_mangle_jet.h"
97304-#  else
97305-#    include "jemalloc/jemalloc_mangle.h"
97306-#  endif
97307-
97308-/******************************************************************************/
97309-/*
97310- * This header does dangerous things, the effects of which only test code
97311- * should be subject to.
97312- */
97313-#else
97314-#  error "This header cannot be included outside a testing context"
97315-#endif
97316-
97317-/******************************************************************************/
97318-/*
97319- * Common test utilities.
97320- */
97321-#include "test/btalloc.h"
97322-#include "test/math.h"
97323-#include "test/mtx.h"
97324-#include "test/mq.h"
97325-#include "test/sleep.h"
97326-#include "test/test.h"
97327-#include "test/timer.h"
97328-#include "test/thd.h"
97329-#include "test/bgthd.h"
97330-#define MEXP 19937
97331-#include "test/SFMT.h"
97332-
97333-#ifndef JEMALLOC_HAVE_MALLOC_SIZE
97334-#define TEST_MALLOC_SIZE malloc_usable_size
97335-#else
97336-#define TEST_MALLOC_SIZE malloc_size
97337-#endif
97338-/******************************************************************************/
97339-/*
97340- * Define always-enabled assertion macros, so that test assertions execute even
97341- * if assertions are disabled in the library code.
97342- */
97343-#undef assert
97344-#undef not_reached
97345-#undef not_implemented
97346-#undef expect_not_implemented
97347-
97348-#define assert(e) do {							\
97349-	if (!(e)) {							\
97350-		malloc_printf(						\
97351-		    "<jemalloc>: %s:%d: Failed assertion: \"%s\"\n",	\
97352-		    __FILE__, __LINE__, #e);				\
97353-		abort();						\
97354-	}								\
97355-} while (0)
97356-
97357-#define not_reached() do {						\
97358-	malloc_printf(							\
97359-	    "<jemalloc>: %s:%d: Unreachable code reached\n",		\
97360-	    __FILE__, __LINE__);					\
97361-	abort();							\
97362-} while (0)
97363-
97364-#define not_implemented() do {						\
97365-	malloc_printf("<jemalloc>: %s:%d: Not implemented\n",		\
97366-	    __FILE__, __LINE__);					\
97367-	abort();							\
97368-} while (0)
97369-
97370-#define expect_not_implemented(e) do {					\
97371-	if (!(e)) {							\
97372-		not_implemented();					\
97373-	}								\
97374-} while (0)
97375-
97376-#ifdef __cplusplus
97377-}
97378-#endif
97379diff --git a/jemalloc/test/include/test/jemalloc_test_defs.h.in b/jemalloc/test/include/test/jemalloc_test_defs.h.in
97380deleted file mode 100644
97381index 5cc8532..0000000
97382--- a/jemalloc/test/include/test/jemalloc_test_defs.h.in
97383+++ /dev/null
97384@@ -1,9 +0,0 @@
97385-#include "jemalloc/internal/jemalloc_internal_defs.h"
97386-#include "jemalloc/internal/jemalloc_internal_decls.h"
97387-
97388-/*
97389- * For use by SFMT.  configure.ac doesn't actually define HAVE_SSE2 because its
97390- * dependencies are notoriously unportable in practice.
97391- */
97392-#undef HAVE_SSE2
97393-#undef HAVE_ALTIVEC
97394diff --git a/jemalloc/test/include/test/math.h b/jemalloc/test/include/test/math.h
97395deleted file mode 100644
97396index efba086..0000000
97397--- a/jemalloc/test/include/test/math.h
97398+++ /dev/null
97399@@ -1,306 +0,0 @@
97400-/*
97401- * Compute the natural log of Gamma(x), accurate to 10 decimal places.
97402- *
97403- * This implementation is based on:
97404- *
97405- *   Pike, M.C., I.D. Hill (1966) Algorithm 291: Logarithm of Gamma function
97406- *   [S14].  Communications of the ACM 9(9):684.
97407- */
97408-static inline double
97409-ln_gamma(double x) {
97410-	double f, z;
97411-
97412-	assert(x > 0.0);
97413-
97414-	if (x < 7.0) {
97415-		f = 1.0;
97416-		z = x;
97417-		while (z < 7.0) {
97418-			f *= z;
97419-			z += 1.0;
97420-		}
97421-		x = z;
97422-		f = -log(f);
97423-	} else {
97424-		f = 0.0;
97425-	}
97426-
97427-	z = 1.0 / (x * x);
97428-
97429-	return f + (x-0.5) * log(x) - x + 0.918938533204673 +
97430-	    (((-0.000595238095238 * z + 0.000793650793651) * z -
97431-	    0.002777777777778) * z + 0.083333333333333) / x;
97432-}
97433-
97434-/*
97435- * Compute the incomplete Gamma ratio for [0..x], where p is the shape
97436- * parameter, and ln_gamma_p is ln_gamma(p).
97437- *
97438- * This implementation is based on:
97439- *
97440- *   Bhattacharjee, G.P. (1970) Algorithm AS 32: The incomplete Gamma integral.
97441- *   Applied Statistics 19:285-287.
97442- */
97443-static inline double
97444-i_gamma(double x, double p, double ln_gamma_p) {
97445-	double acu, factor, oflo, gin, term, rn, a, b, an, dif;
97446-	double pn[6];
97447-	unsigned i;
97448-
97449-	assert(p > 0.0);
97450-	assert(x >= 0.0);
97451-
97452-	if (x == 0.0) {
97453-		return 0.0;
97454-	}
97455-
97456-	acu = 1.0e-10;
97457-	oflo = 1.0e30;
97458-	gin = 0.0;
97459-	factor = exp(p * log(x) - x - ln_gamma_p);
97460-
97461-	if (x <= 1.0 || x < p) {
97462-		/* Calculation by series expansion. */
97463-		gin = 1.0;
97464-		term = 1.0;
97465-		rn = p;
97466-
97467-		while (true) {
97468-			rn += 1.0;
97469-			term *= x / rn;
97470-			gin += term;
97471-			if (term <= acu) {
97472-				gin *= factor / p;
97473-				return gin;
97474-			}
97475-		}
97476-	} else {
97477-		/* Calculation by continued fraction. */
97478-		a = 1.0 - p;
97479-		b = a + x + 1.0;
97480-		term = 0.0;
97481-		pn[0] = 1.0;
97482-		pn[1] = x;
97483-		pn[2] = x + 1.0;
97484-		pn[3] = x * b;
97485-		gin = pn[2] / pn[3];
97486-
97487-		while (true) {
97488-			a += 1.0;
97489-			b += 2.0;
97490-			term += 1.0;
97491-			an = a * term;
97492-			for (i = 0; i < 2; i++) {
97493-				pn[i+4] = b * pn[i+2] - an * pn[i];
97494-			}
97495-			if (pn[5] != 0.0) {
97496-				rn = pn[4] / pn[5];
97497-				dif = fabs(gin - rn);
97498-				if (dif <= acu && dif <= acu * rn) {
97499-					gin = 1.0 - factor * gin;
97500-					return gin;
97501-				}
97502-				gin = rn;
97503-			}
97504-			for (i = 0; i < 4; i++) {
97505-				pn[i] = pn[i+2];
97506-			}
97507-
97508-			if (fabs(pn[4]) >= oflo) {
97509-				for (i = 0; i < 4; i++) {
97510-					pn[i] /= oflo;
97511-				}
97512-			}
97513-		}
97514-	}
97515-}
97516-
97517-/*
97518- * Given a value p in [0..1] of the lower tail area of the normal distribution,
97519- * compute the limit on the definite integral from [-inf..z] that satisfies p,
97520- * accurate to 16 decimal places.
97521- *
97522- * This implementation is based on:
97523- *
97524- *   Wichura, M.J. (1988) Algorithm AS 241: The percentage points of the normal
97525- *   distribution.  Applied Statistics 37(3):477-484.
97526- */
97527-static inline double
97528-pt_norm(double p) {
97529-	double q, r, ret;
97530-
97531-	assert(p > 0.0 && p < 1.0);
97532-
97533-	q = p - 0.5;
97534-	if (fabs(q) <= 0.425) {
97535-		/* p close to 1/2. */
97536-		r = 0.180625 - q * q;
97537-		return q * (((((((2.5090809287301226727e3 * r +
97538-		    3.3430575583588128105e4) * r + 6.7265770927008700853e4) * r
97539-		    + 4.5921953931549871457e4) * r + 1.3731693765509461125e4) *
97540-		    r + 1.9715909503065514427e3) * r + 1.3314166789178437745e2)
97541-		    * r + 3.3871328727963666080e0) /
97542-		    (((((((5.2264952788528545610e3 * r +
97543-		    2.8729085735721942674e4) * r + 3.9307895800092710610e4) * r
97544-		    + 2.1213794301586595867e4) * r + 5.3941960214247511077e3) *
97545-		    r + 6.8718700749205790830e2) * r + 4.2313330701600911252e1)
97546-		    * r + 1.0);
97547-	} else {
97548-		if (q < 0.0) {
97549-			r = p;
97550-		} else {
97551-			r = 1.0 - p;
97552-		}
97553-		assert(r > 0.0);
97554-
97555-		r = sqrt(-log(r));
97556-		if (r <= 5.0) {
97557-			/* p neither close to 1/2 nor 0 or 1. */
97558-			r -= 1.6;
97559-			ret = ((((((((7.74545014278341407640e-4 * r +
97560-			    2.27238449892691845833e-2) * r +
97561-			    2.41780725177450611770e-1) * r +
97562-			    1.27045825245236838258e0) * r +
97563-			    3.64784832476320460504e0) * r +
97564-			    5.76949722146069140550e0) * r +
97565-			    4.63033784615654529590e0) * r +
97566-			    1.42343711074968357734e0) /
97567-			    (((((((1.05075007164441684324e-9 * r +
97568-			    5.47593808499534494600e-4) * r +
97569-			    1.51986665636164571966e-2)
97570-			    * r + 1.48103976427480074590e-1) * r +
97571-			    6.89767334985100004550e-1) * r +
97572-			    1.67638483018380384940e0) * r +
97573-			    2.05319162663775882187e0) * r + 1.0));
97574-		} else {
97575-			/* p near 0 or 1. */
97576-			r -= 5.0;
97577-			ret = ((((((((2.01033439929228813265e-7 * r +
97578-			    2.71155556874348757815e-5) * r +
97579-			    1.24266094738807843860e-3) * r +
97580-			    2.65321895265761230930e-2) * r +
97581-			    2.96560571828504891230e-1) * r +
97582-			    1.78482653991729133580e0) * r +
97583-			    5.46378491116411436990e0) * r +
97584-			    6.65790464350110377720e0) /
97585-			    (((((((2.04426310338993978564e-15 * r +
97586-			    1.42151175831644588870e-7) * r +
97587-			    1.84631831751005468180e-5) * r +
97588-			    7.86869131145613259100e-4) * r +
97589-			    1.48753612908506148525e-2) * r +
97590-			    1.36929880922735805310e-1) * r +
97591-			    5.99832206555887937690e-1)
97592-			    * r + 1.0));
97593-		}
97594-		if (q < 0.0) {
97595-			ret = -ret;
97596-		}
97597-		return ret;
97598-	}
97599-}
97600-
97601-/*
97602- * Given a value p in [0..1] of the lower tail area of the Chi^2 distribution
97603- * with df degrees of freedom, where ln_gamma_df_2 is ln_gamma(df/2.0), compute
97604- * the upper limit on the definite integral from [0..z] that satisfies p,
97605- * accurate to 12 decimal places.
97606- *
97607- * This implementation is based on:
97608- *
97609- *   Best, D.J., D.E. Roberts (1975) Algorithm AS 91: The percentage points of
97610- *   the Chi^2 distribution.  Applied Statistics 24(3):385-388.
97611- *
97612- *   Shea, B.L. (1991) Algorithm AS R85: A remark on AS 91: The percentage
97613- *   points of the Chi^2 distribution.  Applied Statistics 40(1):233-235.
97614- */
97615-static inline double
97616-pt_chi2(double p, double df, double ln_gamma_df_2) {
97617-	double e, aa, xx, c, ch, a, q, p1, p2, t, x, b, s1, s2, s3, s4, s5, s6;
97618-	unsigned i;
97619-
97620-	assert(p >= 0.0 && p < 1.0);
97621-	assert(df > 0.0);
97622-
97623-	e = 5.0e-7;
97624-	aa = 0.6931471805;
97625-
97626-	xx = 0.5 * df;
97627-	c = xx - 1.0;
97628-
97629-	if (df < -1.24 * log(p)) {
97630-		/* Starting approximation for small Chi^2. */
97631-		ch = pow(p * xx * exp(ln_gamma_df_2 + xx * aa), 1.0 / xx);
97632-		if (ch - e < 0.0) {
97633-			return ch;
97634-		}
97635-	} else {
97636-		if (df > 0.32) {
97637-			x = pt_norm(p);
97638-			/*
97639-			 * Starting approximation using Wilson and Hilferty
97640-			 * estimate.
97641-			 */
97642-			p1 = 0.222222 / df;
97643-			ch = df * pow(x * sqrt(p1) + 1.0 - p1, 3.0);
97644-			/* Starting approximation for p tending to 1. */
97645-			if (ch > 2.2 * df + 6.0) {
97646-				ch = -2.0 * (log(1.0 - p) - c * log(0.5 * ch) +
97647-				    ln_gamma_df_2);
97648-			}
97649-		} else {
97650-			ch = 0.4;
97651-			a = log(1.0 - p);
97652-			while (true) {
97653-				q = ch;
97654-				p1 = 1.0 + ch * (4.67 + ch);
97655-				p2 = ch * (6.73 + ch * (6.66 + ch));
97656-				t = -0.5 + (4.67 + 2.0 * ch) / p1 - (6.73 + ch
97657-				    * (13.32 + 3.0 * ch)) / p2;
97658-				ch -= (1.0 - exp(a + ln_gamma_df_2 + 0.5 * ch +
97659-				    c * aa) * p2 / p1) / t;
97660-				if (fabs(q / ch - 1.0) - 0.01 <= 0.0) {
97661-					break;
97662-				}
97663-			}
97664-		}
97665-	}
97666-
97667-	for (i = 0; i < 20; i++) {
97668-		/* Calculation of seven-term Taylor series. */
97669-		q = ch;
97670-		p1 = 0.5 * ch;
97671-		if (p1 < 0.0) {
97672-			return -1.0;
97673-		}
97674-		p2 = p - i_gamma(p1, xx, ln_gamma_df_2);
97675-		t = p2 * exp(xx * aa + ln_gamma_df_2 + p1 - c * log(ch));
97676-		b = t / ch;
97677-		a = 0.5 * t - b * c;
97678-		s1 = (210.0 + a * (140.0 + a * (105.0 + a * (84.0 + a * (70.0 +
97679-		    60.0 * a))))) / 420.0;
97680-		s2 = (420.0 + a * (735.0 + a * (966.0 + a * (1141.0 + 1278.0 *
97681-		    a)))) / 2520.0;
97682-		s3 = (210.0 + a * (462.0 + a * (707.0 + 932.0 * a))) / 2520.0;
97683-		s4 = (252.0 + a * (672.0 + 1182.0 * a) + c * (294.0 + a *
97684-		    (889.0 + 1740.0 * a))) / 5040.0;
97685-		s5 = (84.0 + 264.0 * a + c * (175.0 + 606.0 * a)) / 2520.0;
97686-		s6 = (120.0 + c * (346.0 + 127.0 * c)) / 5040.0;
97687-		ch += t * (1.0 + 0.5 * t * s1 - b * c * (s1 - b * (s2 - b * (s3
97688-		    - b * (s4 - b * (s5 - b * s6))))));
97689-		if (fabs(q / ch - 1.0) <= e) {
97690-			break;
97691-		}
97692-	}
97693-
97694-	return ch;
97695-}
97696-
97697-/*
97698- * Given a value p in [0..1] and Gamma distribution shape and scale parameters,
97699- * compute the upper limit on the definite integral from [0..z] that satisfies
97700- * p.
97701- */
97702-static inline double
97703-pt_gamma(double p, double shape, double scale, double ln_gamma_shape) {
97704-	return pt_chi2(p, shape * 2.0, ln_gamma_shape) * 0.5 * scale;
97705-}
97706diff --git a/jemalloc/test/include/test/mq.h b/jemalloc/test/include/test/mq.h
97707deleted file mode 100644
97708index 5dc6486..0000000
97709--- a/jemalloc/test/include/test/mq.h
97710+++ /dev/null
97711@@ -1,107 +0,0 @@
97712-#include "test/sleep.h"
97713-
97714-/*
97715- * Simple templated message queue implementation that relies on only mutexes for
97716- * synchronization (which reduces portability issues).  Given the following
97717- * setup:
97718- *
97719- *   typedef struct mq_msg_s mq_msg_t;
97720- *   struct mq_msg_s {
97721- *           mq_msg(mq_msg_t) link;
97722- *           [message data]
97723- *   };
97724- *   mq_gen(, mq_, mq_t, mq_msg_t, link)
97725- *
97726- * The API is as follows:
97727- *
97728- *   bool mq_init(mq_t *mq);
97729- *   void mq_fini(mq_t *mq);
97730- *   unsigned mq_count(mq_t *mq);
97731- *   mq_msg_t *mq_tryget(mq_t *mq);
97732- *   mq_msg_t *mq_get(mq_t *mq);
97733- *   void mq_put(mq_t *mq, mq_msg_t *msg);
97734- *
97735- * The message queue linkage embedded in each message is to be treated as
97736- * externally opaque (no need to initialize or clean up externally).  mq_fini()
97737- * does not perform any cleanup of messages, since it knows nothing of their
97738- * payloads.
97739- */
97740-#define mq_msg(a_mq_msg_type)	ql_elm(a_mq_msg_type)
97741-
97742-#define mq_gen(a_attr, a_prefix, a_mq_type, a_mq_msg_type, a_field)	\
97743-typedef struct {							\
97744-	mtx_t			lock;					\
97745-	ql_head(a_mq_msg_type)	msgs;					\
97746-	unsigned		count;					\
97747-} a_mq_type;								\
97748-a_attr bool								\
97749-a_prefix##init(a_mq_type *mq) {						\
97750-									\
97751-	if (mtx_init(&mq->lock)) {					\
97752-		return true;						\
97753-	}								\
97754-	ql_new(&mq->msgs);						\
97755-	mq->count = 0;							\
97756-	return false;							\
97757-}									\
97758-a_attr void								\
97759-a_prefix##fini(a_mq_type *mq) {						\
97760-	mtx_fini(&mq->lock);						\
97761-}									\
97762-a_attr unsigned								\
97763-a_prefix##count(a_mq_type *mq) {					\
97764-	unsigned count;							\
97765-									\
97766-	mtx_lock(&mq->lock);						\
97767-	count = mq->count;						\
97768-	mtx_unlock(&mq->lock);						\
97769-	return count;							\
97770-}									\
97771-a_attr a_mq_msg_type *							\
97772-a_prefix##tryget(a_mq_type *mq) {					\
97773-	a_mq_msg_type *msg;						\
97774-									\
97775-	mtx_lock(&mq->lock);						\
97776-	msg = ql_first(&mq->msgs);					\
97777-	if (msg != NULL) {						\
97778-		ql_head_remove(&mq->msgs, a_mq_msg_type, a_field);	\
97779-		mq->count--;						\
97780-	}								\
97781-	mtx_unlock(&mq->lock);						\
97782-	return msg;							\
97783-}									\
97784-a_attr a_mq_msg_type *							\
97785-a_prefix##get(a_mq_type *mq) {						\
97786-	a_mq_msg_type *msg;						\
97787-	unsigned ns;							\
97788-									\
97789-	msg = a_prefix##tryget(mq);					\
97790-	if (msg != NULL) {						\
97791-		return msg;						\
97792-	}								\
97793-									\
97794-	ns = 1;								\
97795-	while (true) {							\
97796-		sleep_ns(ns);						\
97797-		msg = a_prefix##tryget(mq);				\
97798-		if (msg != NULL) {					\
97799-			return msg;					\
97800-		}							\
97801-		if (ns < 1000*1000*1000) {				\
97802-			/* Double sleep time, up to max 1 second. */	\
97803-			ns <<= 1;					\
97804-			if (ns > 1000*1000*1000) {			\
97805-				ns = 1000*1000*1000;			\
97806-			}						\
97807-		}							\
97808-	}								\
97809-}									\
97810-a_attr void								\
97811-a_prefix##put(a_mq_type *mq, a_mq_msg_type *msg) {			\
97812-									\
97813-	mtx_lock(&mq->lock);						\
97814-	ql_elm_new(msg, a_field);					\
97815-	ql_tail_insert(&mq->msgs, msg, a_field);			\
97816-	mq->count++;							\
97817-	mtx_unlock(&mq->lock);						\
97818-}
97819diff --git a/jemalloc/test/include/test/mtx.h b/jemalloc/test/include/test/mtx.h
97820deleted file mode 100644
97821index 066a213..0000000
97822--- a/jemalloc/test/include/test/mtx.h
97823+++ /dev/null
97824@@ -1,21 +0,0 @@
97825-/*
97826- * mtx is a slightly simplified version of malloc_mutex.  This code duplication
97827- * is unfortunate, but there are allocator bootstrapping considerations that
97828- * would leak into the test infrastructure if malloc_mutex were used directly
97829- * in tests.
97830- */
97831-
97832-typedef struct {
97833-#ifdef _WIN32
97834-	CRITICAL_SECTION	lock;
97835-#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
97836-	os_unfair_lock		lock;
97837-#else
97838-	pthread_mutex_t		lock;
97839-#endif
97840-} mtx_t;
97841-
97842-bool	mtx_init(mtx_t *mtx);
97843-void	mtx_fini(mtx_t *mtx);
97844-void	mtx_lock(mtx_t *mtx);
97845-void	mtx_unlock(mtx_t *mtx);
97846diff --git a/jemalloc/test/include/test/nbits.h b/jemalloc/test/include/test/nbits.h
97847deleted file mode 100644
97848index c06cf1b..0000000
97849--- a/jemalloc/test/include/test/nbits.h
97850+++ /dev/null
97851@@ -1,111 +0,0 @@
97852-#ifndef TEST_NBITS_H
97853-#define TEST_NBITS_H
97854-
97855-/* Interesting bitmap counts to test. */
97856-
97857-#define NBITS_TAB \
97858-    NB( 1) \
97859-    NB( 2) \
97860-    NB( 3) \
97861-    NB( 4) \
97862-    NB( 5) \
97863-    NB( 6) \
97864-    NB( 7) \
97865-    NB( 8) \
97866-    NB( 9) \
97867-    NB(10) \
97868-    NB(11) \
97869-    NB(12) \
97870-    NB(13) \
97871-    NB(14) \
97872-    NB(15) \
97873-    NB(16) \
97874-    NB(17) \
97875-    NB(18) \
97876-    NB(19) \
97877-    NB(20) \
97878-    NB(21) \
97879-    NB(22) \
97880-    NB(23) \
97881-    NB(24) \
97882-    NB(25) \
97883-    NB(26) \
97884-    NB(27) \
97885-    NB(28) \
97886-    NB(29) \
97887-    NB(30) \
97888-    NB(31) \
97889-    NB(32) \
97890-    \
97891-    NB(33) \
97892-    NB(34) \
97893-    NB(35) \
97894-    NB(36) \
97895-    NB(37) \
97896-    NB(38) \
97897-    NB(39) \
97898-    NB(40) \
97899-    NB(41) \
97900-    NB(42) \
97901-    NB(43) \
97902-    NB(44) \
97903-    NB(45) \
97904-    NB(46) \
97905-    NB(47) \
97906-    NB(48) \
97907-    NB(49) \
97908-    NB(50) \
97909-    NB(51) \
97910-    NB(52) \
97911-    NB(53) \
97912-    NB(54) \
97913-    NB(55) \
97914-    NB(56) \
97915-    NB(57) \
97916-    NB(58) \
97917-    NB(59) \
97918-    NB(60) \
97919-    NB(61) \
97920-    NB(62) \
97921-    NB(63) \
97922-    NB(64) \
97923-    NB(65) \
97924-    NB(66) \
97925-    NB(67) \
97926-    \
97927-    NB(126) \
97928-    NB(127) \
97929-    NB(128) \
97930-    NB(129) \
97931-    NB(130) \
97932-    \
97933-    NB(254) \
97934-    NB(255) \
97935-    NB(256) \
97936-    NB(257) \
97937-    NB(258) \
97938-    \
97939-    NB(510) \
97940-    NB(511) \
97941-    NB(512) \
97942-    NB(513) \
97943-    NB(514) \
97944-    \
97945-    NB(1022) \
97946-    NB(1023) \
97947-    NB(1024) \
97948-    NB(1025) \
97949-    NB(1026) \
97950-    \
97951-    NB(2048) \
97952-    \
97953-    NB(4094) \
97954-    NB(4095) \
97955-    NB(4096) \
97956-    NB(4097) \
97957-    NB(4098) \
97958-    \
97959-    NB(8192) \
97960-    NB(16384)
97961-
97962-#endif /* TEST_NBITS_H */
97963diff --git a/jemalloc/test/include/test/san.h b/jemalloc/test/include/test/san.h
97964deleted file mode 100644
97965index da07865..0000000
97966--- a/jemalloc/test/include/test/san.h
97967+++ /dev/null
97968@@ -1,14 +0,0 @@
97969-#if defined(JEMALLOC_UAF_DETECTION) || defined(JEMALLOC_DEBUG)
97970-#  define TEST_SAN_UAF_ALIGN_ENABLE "lg_san_uaf_align:12"
97971-#  define TEST_SAN_UAF_ALIGN_DISABLE "lg_san_uaf_align:-1"
97972-#else
97973-#  define TEST_SAN_UAF_ALIGN_ENABLE ""
97974-#  define TEST_SAN_UAF_ALIGN_DISABLE ""
97975-#endif
97976-
97977-static inline bool
97978-extent_is_guarded(tsdn_t *tsdn, void *ptr) {
97979-	edata_t *edata = emap_edata_lookup(tsdn, &arena_emap_global, ptr);
97980-	return edata_guarded_get(edata);
97981-}
97982-
97983diff --git a/jemalloc/test/include/test/sleep.h b/jemalloc/test/include/test/sleep.h
97984deleted file mode 100644
97985index c232f63..0000000
97986--- a/jemalloc/test/include/test/sleep.h
97987+++ /dev/null
97988@@ -1 +0,0 @@
97989-void sleep_ns(unsigned ns);
97990diff --git a/jemalloc/test/include/test/test.h b/jemalloc/test/include/test/test.h
97991deleted file mode 100644
97992index d4b6591..0000000
97993--- a/jemalloc/test/include/test/test.h
97994+++ /dev/null
97995@@ -1,583 +0,0 @@
97996-#define ASSERT_BUFSIZE	256
97997-
97998-#define verify_cmp(may_abort, t, a, b, cmp, neg_cmp, pri, ...) do {	\
97999-	const t a_ = (a);						\
98000-	const t b_ = (b);						\
98001-	if (!(a_ cmp b_)) {						\
98002-		char prefix[ASSERT_BUFSIZE];				\
98003-		char message[ASSERT_BUFSIZE];				\
98004-		malloc_snprintf(prefix, sizeof(prefix),			\
98005-		    "%s:%s:%d: Failed assertion: "			\
98006-		    "(%s) " #cmp " (%s) --> "				\
98007-		    "%" pri " " #neg_cmp " %" pri ": ",			\
98008-		    __func__, __FILE__, __LINE__,			\
98009-		    #a, #b, a_, b_);					\
98010-		malloc_snprintf(message, sizeof(message), __VA_ARGS__);	\
98011-		if (may_abort) {					\
98012-			abort();					\
98013-		} else {						\
98014-			p_test_fail(prefix, message);			\
98015-		}							\
98016-	}								\
98017-} while (0)
98018-
98019-#define expect_cmp(t, a, b, cmp, neg_cmp, pri, ...) verify_cmp(false,	\
98020-    t, a, b, cmp, neg_cmp, pri, __VA_ARGS__)
98021-
98022-#define expect_ptr_eq(a, b, ...)	expect_cmp(void *, a, b, ==,	\
98023-    !=, "p", __VA_ARGS__)
98024-#define expect_ptr_ne(a, b, ...)	expect_cmp(void *, a, b, !=,	\
98025-    ==, "p", __VA_ARGS__)
98026-#define expect_ptr_null(a, ...)		expect_cmp(void *, a, NULL, ==,	\
98027-    !=, "p", __VA_ARGS__)
98028-#define expect_ptr_not_null(a, ...)	expect_cmp(void *, a, NULL, !=,	\
98029-    ==, "p", __VA_ARGS__)
98030-
98031-#define expect_c_eq(a, b, ...)	expect_cmp(char, a, b, ==, !=, "c", __VA_ARGS__)
98032-#define expect_c_ne(a, b, ...)	expect_cmp(char, a, b, !=, ==, "c", __VA_ARGS__)
98033-#define expect_c_lt(a, b, ...)	expect_cmp(char, a, b, <, >=, "c", __VA_ARGS__)
98034-#define expect_c_le(a, b, ...)	expect_cmp(char, a, b, <=, >, "c", __VA_ARGS__)
98035-#define expect_c_ge(a, b, ...)	expect_cmp(char, a, b, >=, <, "c", __VA_ARGS__)
98036-#define expect_c_gt(a, b, ...)	expect_cmp(char, a, b, >, <=, "c", __VA_ARGS__)
98037-
98038-#define expect_x_eq(a, b, ...)	expect_cmp(int, a, b, ==, !=, "#x", __VA_ARGS__)
98039-#define expect_x_ne(a, b, ...)	expect_cmp(int, a, b, !=, ==, "#x", __VA_ARGS__)
98040-#define expect_x_lt(a, b, ...)	expect_cmp(int, a, b, <, >=, "#x", __VA_ARGS__)
98041-#define expect_x_le(a, b, ...)	expect_cmp(int, a, b, <=, >, "#x", __VA_ARGS__)
98042-#define expect_x_ge(a, b, ...)	expect_cmp(int, a, b, >=, <, "#x", __VA_ARGS__)
98043-#define expect_x_gt(a, b, ...)	expect_cmp(int, a, b, >, <=, "#x", __VA_ARGS__)
98044-
98045-#define expect_d_eq(a, b, ...)	expect_cmp(int, a, b, ==, !=, "d", __VA_ARGS__)
98046-#define expect_d_ne(a, b, ...)	expect_cmp(int, a, b, !=, ==, "d", __VA_ARGS__)
98047-#define expect_d_lt(a, b, ...)	expect_cmp(int, a, b, <, >=, "d", __VA_ARGS__)
98048-#define expect_d_le(a, b, ...)	expect_cmp(int, a, b, <=, >, "d", __VA_ARGS__)
98049-#define expect_d_ge(a, b, ...)	expect_cmp(int, a, b, >=, <, "d", __VA_ARGS__)
98050-#define expect_d_gt(a, b, ...)	expect_cmp(int, a, b, >, <=, "d", __VA_ARGS__)
98051-
98052-#define expect_u_eq(a, b, ...)	expect_cmp(int, a, b, ==, !=, "u", __VA_ARGS__)
98053-#define expect_u_ne(a, b, ...)	expect_cmp(int, a, b, !=, ==, "u", __VA_ARGS__)
98054-#define expect_u_lt(a, b, ...)	expect_cmp(int, a, b, <, >=, "u", __VA_ARGS__)
98055-#define expect_u_le(a, b, ...)	expect_cmp(int, a, b, <=, >, "u", __VA_ARGS__)
98056-#define expect_u_ge(a, b, ...)	expect_cmp(int, a, b, >=, <, "u", __VA_ARGS__)
98057-#define expect_u_gt(a, b, ...)	expect_cmp(int, a, b, >, <=, "u", __VA_ARGS__)
98058-
98059-#define expect_ld_eq(a, b, ...)	expect_cmp(long, a, b, ==,	\
98060-    !=, "ld", __VA_ARGS__)
98061-#define expect_ld_ne(a, b, ...)	expect_cmp(long, a, b, !=,	\
98062-    ==, "ld", __VA_ARGS__)
98063-#define expect_ld_lt(a, b, ...)	expect_cmp(long, a, b, <,	\
98064-    >=, "ld", __VA_ARGS__)
98065-#define expect_ld_le(a, b, ...)	expect_cmp(long, a, b, <=,	\
98066-    >, "ld", __VA_ARGS__)
98067-#define expect_ld_ge(a, b, ...)	expect_cmp(long, a, b, >=,	\
98068-    <, "ld", __VA_ARGS__)
98069-#define expect_ld_gt(a, b, ...)	expect_cmp(long, a, b, >,	\
98070-    <=, "ld", __VA_ARGS__)
98071-
98072-#define expect_lu_eq(a, b, ...)	expect_cmp(unsigned long,	\
98073-    a, b, ==, !=, "lu", __VA_ARGS__)
98074-#define expect_lu_ne(a, b, ...)	expect_cmp(unsigned long,	\
98075-    a, b, !=, ==, "lu", __VA_ARGS__)
98076-#define expect_lu_lt(a, b, ...)	expect_cmp(unsigned long,	\
98077-    a, b, <, >=, "lu", __VA_ARGS__)
98078-#define expect_lu_le(a, b, ...)	expect_cmp(unsigned long,	\
98079-    a, b, <=, >, "lu", __VA_ARGS__)
98080-#define expect_lu_ge(a, b, ...)	expect_cmp(unsigned long,	\
98081-    a, b, >=, <, "lu", __VA_ARGS__)
98082-#define expect_lu_gt(a, b, ...)	expect_cmp(unsigned long,	\
98083-    a, b, >, <=, "lu", __VA_ARGS__)
98084-
98085-#define expect_qd_eq(a, b, ...)	expect_cmp(long long, a, b, ==,	\
98086-    !=, "qd", __VA_ARGS__)
98087-#define expect_qd_ne(a, b, ...)	expect_cmp(long long, a, b, !=,	\
98088-    ==, "qd", __VA_ARGS__)
98089-#define expect_qd_lt(a, b, ...)	expect_cmp(long long, a, b, <,	\
98090-    >=, "qd", __VA_ARGS__)
98091-#define expect_qd_le(a, b, ...)	expect_cmp(long long, a, b, <=,	\
98092-    >, "qd", __VA_ARGS__)
98093-#define expect_qd_ge(a, b, ...)	expect_cmp(long long, a, b, >=,	\
98094-    <, "qd", __VA_ARGS__)
98095-#define expect_qd_gt(a, b, ...)	expect_cmp(long long, a, b, >,	\
98096-    <=, "qd", __VA_ARGS__)
98097-
98098-#define expect_qu_eq(a, b, ...)	expect_cmp(unsigned long long,	\
98099-    a, b, ==, !=, "qu", __VA_ARGS__)
98100-#define expect_qu_ne(a, b, ...)	expect_cmp(unsigned long long,	\
98101-    a, b, !=, ==, "qu", __VA_ARGS__)
98102-#define expect_qu_lt(a, b, ...)	expect_cmp(unsigned long long,	\
98103-    a, b, <, >=, "qu", __VA_ARGS__)
98104-#define expect_qu_le(a, b, ...)	expect_cmp(unsigned long long,	\
98105-    a, b, <=, >, "qu", __VA_ARGS__)
98106-#define expect_qu_ge(a, b, ...)	expect_cmp(unsigned long long,	\
98107-    a, b, >=, <, "qu", __VA_ARGS__)
98108-#define expect_qu_gt(a, b, ...)	expect_cmp(unsigned long long,	\
98109-    a, b, >, <=, "qu", __VA_ARGS__)
98110-
98111-#define expect_jd_eq(a, b, ...)	expect_cmp(intmax_t, a, b, ==,	\
98112-    !=, "jd", __VA_ARGS__)
98113-#define expect_jd_ne(a, b, ...)	expect_cmp(intmax_t, a, b, !=,	\
98114-    ==, "jd", __VA_ARGS__)
98115-#define expect_jd_lt(a, b, ...)	expect_cmp(intmax_t, a, b, <,	\
98116-    >=, "jd", __VA_ARGS__)
98117-#define expect_jd_le(a, b, ...)	expect_cmp(intmax_t, a, b, <=,	\
98118-    >, "jd", __VA_ARGS__)
98119-#define expect_jd_ge(a, b, ...)	expect_cmp(intmax_t, a, b, >=,	\
98120-    <, "jd", __VA_ARGS__)
98121-#define expect_jd_gt(a, b, ...)	expect_cmp(intmax_t, a, b, >,	\
98122-    <=, "jd", __VA_ARGS__)
98123-
98124-#define expect_ju_eq(a, b, ...)	expect_cmp(uintmax_t, a, b, ==,	\
98125-    !=, "ju", __VA_ARGS__)
98126-#define expect_ju_ne(a, b, ...)	expect_cmp(uintmax_t, a, b, !=,	\
98127-    ==, "ju", __VA_ARGS__)
98128-#define expect_ju_lt(a, b, ...)	expect_cmp(uintmax_t, a, b, <,	\
98129-    >=, "ju", __VA_ARGS__)
98130-#define expect_ju_le(a, b, ...)	expect_cmp(uintmax_t, a, b, <=,	\
98131-    >, "ju", __VA_ARGS__)
98132-#define expect_ju_ge(a, b, ...)	expect_cmp(uintmax_t, a, b, >=,	\
98133-    <, "ju", __VA_ARGS__)
98134-#define expect_ju_gt(a, b, ...)	expect_cmp(uintmax_t, a, b, >,	\
98135-    <=, "ju", __VA_ARGS__)
98136-
98137-#define expect_zd_eq(a, b, ...)	expect_cmp(ssize_t, a, b, ==,	\
98138-    !=, "zd", __VA_ARGS__)
98139-#define expect_zd_ne(a, b, ...)	expect_cmp(ssize_t, a, b, !=,	\
98140-    ==, "zd", __VA_ARGS__)
98141-#define expect_zd_lt(a, b, ...)	expect_cmp(ssize_t, a, b, <,	\
98142-    >=, "zd", __VA_ARGS__)
98143-#define expect_zd_le(a, b, ...)	expect_cmp(ssize_t, a, b, <=,	\
98144-    >, "zd", __VA_ARGS__)
98145-#define expect_zd_ge(a, b, ...)	expect_cmp(ssize_t, a, b, >=,	\
98146-    <, "zd", __VA_ARGS__)
98147-#define expect_zd_gt(a, b, ...)	expect_cmp(ssize_t, a, b, >,	\
98148-    <=, "zd", __VA_ARGS__)
98149-
98150-#define expect_zu_eq(a, b, ...)	expect_cmp(size_t, a, b, ==,	\
98151-    !=, "zu", __VA_ARGS__)
98152-#define expect_zu_ne(a, b, ...)	expect_cmp(size_t, a, b, !=,	\
98153-    ==, "zu", __VA_ARGS__)
98154-#define expect_zu_lt(a, b, ...)	expect_cmp(size_t, a, b, <,	\
98155-    >=, "zu", __VA_ARGS__)
98156-#define expect_zu_le(a, b, ...)	expect_cmp(size_t, a, b, <=,	\
98157-    >, "zu", __VA_ARGS__)
98158-#define expect_zu_ge(a, b, ...)	expect_cmp(size_t, a, b, >=,	\
98159-    <, "zu", __VA_ARGS__)
98160-#define expect_zu_gt(a, b, ...)	expect_cmp(size_t, a, b, >,	\
98161-    <=, "zu", __VA_ARGS__)
98162-
98163-#define expect_d32_eq(a, b, ...)	expect_cmp(int32_t, a, b, ==,	\
98164-    !=, FMTd32, __VA_ARGS__)
98165-#define expect_d32_ne(a, b, ...)	expect_cmp(int32_t, a, b, !=,	\
98166-    ==, FMTd32, __VA_ARGS__)
98167-#define expect_d32_lt(a, b, ...)	expect_cmp(int32_t, a, b, <,	\
98168-    >=, FMTd32, __VA_ARGS__)
98169-#define expect_d32_le(a, b, ...)	expect_cmp(int32_t, a, b, <=,	\
98170-    >, FMTd32, __VA_ARGS__)
98171-#define expect_d32_ge(a, b, ...)	expect_cmp(int32_t, a, b, >=,	\
98172-    <, FMTd32, __VA_ARGS__)
98173-#define expect_d32_gt(a, b, ...)	expect_cmp(int32_t, a, b, >,	\
98174-    <=, FMTd32, __VA_ARGS__)
98175-
98176-#define expect_u32_eq(a, b, ...)	expect_cmp(uint32_t, a, b, ==,	\
98177-    !=, FMTu32, __VA_ARGS__)
98178-#define expect_u32_ne(a, b, ...)	expect_cmp(uint32_t, a, b, !=,	\
98179-    ==, FMTu32, __VA_ARGS__)
98180-#define expect_u32_lt(a, b, ...)	expect_cmp(uint32_t, a, b, <,	\
98181-    >=, FMTu32, __VA_ARGS__)
98182-#define expect_u32_le(a, b, ...)	expect_cmp(uint32_t, a, b, <=,	\
98183-    >, FMTu32, __VA_ARGS__)
98184-#define expect_u32_ge(a, b, ...)	expect_cmp(uint32_t, a, b, >=,	\
98185-    <, FMTu32, __VA_ARGS__)
98186-#define expect_u32_gt(a, b, ...)	expect_cmp(uint32_t, a, b, >,	\
98187-    <=, FMTu32, __VA_ARGS__)
98188-
98189-#define expect_d64_eq(a, b, ...)	expect_cmp(int64_t, a, b, ==,	\
98190-    !=, FMTd64, __VA_ARGS__)
98191-#define expect_d64_ne(a, b, ...)	expect_cmp(int64_t, a, b, !=,	\
98192-    ==, FMTd64, __VA_ARGS__)
98193-#define expect_d64_lt(a, b, ...)	expect_cmp(int64_t, a, b, <,	\
98194-    >=, FMTd64, __VA_ARGS__)
98195-#define expect_d64_le(a, b, ...)	expect_cmp(int64_t, a, b, <=,	\
98196-    >, FMTd64, __VA_ARGS__)
98197-#define expect_d64_ge(a, b, ...)	expect_cmp(int64_t, a, b, >=,	\
98198-    <, FMTd64, __VA_ARGS__)
98199-#define expect_d64_gt(a, b, ...)	expect_cmp(int64_t, a, b, >,	\
98200-    <=, FMTd64, __VA_ARGS__)
98201-
98202-#define expect_u64_eq(a, b, ...)	expect_cmp(uint64_t, a, b, ==,	\
98203-    !=, FMTu64, __VA_ARGS__)
98204-#define expect_u64_ne(a, b, ...)	expect_cmp(uint64_t, a, b, !=,	\
98205-    ==, FMTu64, __VA_ARGS__)
98206-#define expect_u64_lt(a, b, ...)	expect_cmp(uint64_t, a, b, <,	\
98207-    >=, FMTu64, __VA_ARGS__)
98208-#define expect_u64_le(a, b, ...)	expect_cmp(uint64_t, a, b, <=,	\
98209-    >, FMTu64, __VA_ARGS__)
98210-#define expect_u64_ge(a, b, ...)	expect_cmp(uint64_t, a, b, >=,	\
98211-    <, FMTu64, __VA_ARGS__)
98212-#define expect_u64_gt(a, b, ...)	expect_cmp(uint64_t, a, b, >,	\
98213-    <=, FMTu64, __VA_ARGS__)
98214-
98215-#define verify_b_eq(may_abort, a, b, ...) do {				\
98216-	bool a_ = (a);							\
98217-	bool b_ = (b);							\
98218-	if (!(a_ == b_)) {						\
98219-		char prefix[ASSERT_BUFSIZE];				\
98220-		char message[ASSERT_BUFSIZE];				\
98221-		malloc_snprintf(prefix, sizeof(prefix),			\
98222-		    "%s:%s:%d: Failed assertion: "			\
98223-		    "(%s) == (%s) --> %s != %s: ",			\
98224-		    __func__, __FILE__, __LINE__,			\
98225-		    #a, #b, a_ ? "true" : "false",			\
98226-		    b_ ? "true" : "false");				\
98227-		malloc_snprintf(message, sizeof(message), __VA_ARGS__);	\
98228-		if (may_abort) {					\
98229-			abort();					\
98230-		} else {						\
98231-			p_test_fail(prefix, message);			\
98232-		}							\
98233-	}								\
98234-} while (0)
98235-
98236-#define verify_b_ne(may_abort, a, b, ...) do {				\
98237-	bool a_ = (a);							\
98238-	bool b_ = (b);							\
98239-	if (!(a_ != b_)) {						\
98240-		char prefix[ASSERT_BUFSIZE];				\
98241-		char message[ASSERT_BUFSIZE];				\
98242-		malloc_snprintf(prefix, sizeof(prefix),			\
98243-		    "%s:%s:%d: Failed assertion: "			\
98244-		    "(%s) != (%s) --> %s == %s: ",			\
98245-		    __func__, __FILE__, __LINE__,			\
98246-		    #a, #b, a_ ? "true" : "false",			\
98247-		    b_ ? "true" : "false");				\
98248-		malloc_snprintf(message, sizeof(message), __VA_ARGS__);	\
98249-		if (may_abort) {					\
98250-			abort();					\
98251-		} else {						\
98252-			p_test_fail(prefix, message);			\
98253-		}							\
98254-	}								\
98255-} while (0)
98256-
98257-#define expect_b_eq(a, b, ...)	verify_b_eq(false, a, b, __VA_ARGS__)
98258-#define expect_b_ne(a, b, ...)	verify_b_ne(false, a, b, __VA_ARGS__)
98259-
98260-#define expect_true(a, ...)	expect_b_eq(a, true, __VA_ARGS__)
98261-#define expect_false(a, ...)	expect_b_eq(a, false, __VA_ARGS__)
98262-
98263-#define verify_str_eq(may_abort, a, b, ...) do {			\
98264-	if (strcmp((a), (b))) {						\
98265-		char prefix[ASSERT_BUFSIZE];				\
98266-		char message[ASSERT_BUFSIZE];				\
98267-		malloc_snprintf(prefix, sizeof(prefix),			\
98268-		    "%s:%s:%d: Failed assertion: "			\
98269-		    "(%s) same as (%s) --> "				\
98270-		    "\"%s\" differs from \"%s\": ",			\
98271-		    __func__, __FILE__, __LINE__, #a, #b, a, b);	\
98272-		malloc_snprintf(message, sizeof(message), __VA_ARGS__);	\
98273-		if (may_abort) {					\
98274-			abort();					\
98275-		} else {						\
98276-			p_test_fail(prefix, message);			\
98277-		}							\
98278-	}								\
98279-} while (0)
98280-
98281-#define verify_str_ne(may_abort, a, b, ...) do {			\
98282-	if (!strcmp((a), (b))) {					\
98283-		char prefix[ASSERT_BUFSIZE];				\
98284-		char message[ASSERT_BUFSIZE];				\
98285-		malloc_snprintf(prefix, sizeof(prefix),			\
98286-		    "%s:%s:%d: Failed assertion: "			\
98287-		    "(%s) differs from (%s) --> "			\
98288-		    "\"%s\" same as \"%s\": ",				\
98289-		    __func__, __FILE__, __LINE__, #a, #b, a, b);	\
98290-		malloc_snprintf(message, sizeof(message), __VA_ARGS__);	\
98291-		if (may_abort) {					\
98292-			abort();					\
98293-		} else {						\
98294-			p_test_fail(prefix, message);			\
98295-		}							\
98296-	}								\
98297-} while (0)
98298-
98299-#define expect_str_eq(a, b, ...) verify_str_eq(false, a, b, __VA_ARGS__)
98300-#define expect_str_ne(a, b, ...) verify_str_ne(false, a, b, __VA_ARGS__)
98301-
98302-#define verify_not_reached(may_abort, ...) do {				\
98303-	char prefix[ASSERT_BUFSIZE];					\
98304-	char message[ASSERT_BUFSIZE];					\
98305-	malloc_snprintf(prefix, sizeof(prefix),				\
98306-	    "%s:%s:%d: Unreachable code reached: ",			\
98307-	    __func__, __FILE__, __LINE__);				\
98308-	malloc_snprintf(message, sizeof(message), __VA_ARGS__);		\
98309-	if (may_abort) {						\
98310-		abort();						\
98311-	} else {							\
98312-		p_test_fail(prefix, message);				\
98313-	}								\
98314-} while (0)
98315-
98316-#define expect_not_reached(...) verify_not_reached(false, __VA_ARGS__)
98317-
98318-#define assert_cmp(t, a, b, cmp, neg_cmp, pri, ...) verify_cmp(true,	\
98319-    t, a, b, cmp, neg_cmp, pri, __VA_ARGS__)
98320-
98321-#define assert_ptr_eq(a, b, ...)	assert_cmp(void *, a, b, ==,	\
98322-    !=, "p", __VA_ARGS__)
98323-#define assert_ptr_ne(a, b, ...)	assert_cmp(void *, a, b, !=,	\
98324-    ==, "p", __VA_ARGS__)
98325-#define assert_ptr_null(a, ...)		assert_cmp(void *, a, NULL, ==,	\
98326-    !=, "p", __VA_ARGS__)
98327-#define assert_ptr_not_null(a, ...)	assert_cmp(void *, a, NULL, !=,	\
98328-    ==, "p", __VA_ARGS__)
98329-
98330-#define assert_c_eq(a, b, ...)	assert_cmp(char, a, b, ==, !=, "c", __VA_ARGS__)
98331-#define assert_c_ne(a, b, ...)	assert_cmp(char, a, b, !=, ==, "c", __VA_ARGS__)
98332-#define assert_c_lt(a, b, ...)	assert_cmp(char, a, b, <, >=, "c", __VA_ARGS__)
98333-#define assert_c_le(a, b, ...)	assert_cmp(char, a, b, <=, >, "c", __VA_ARGS__)
98334-#define assert_c_ge(a, b, ...)	assert_cmp(char, a, b, >=, <, "c", __VA_ARGS__)
98335-#define assert_c_gt(a, b, ...)	assert_cmp(char, a, b, >, <=, "c", __VA_ARGS__)
98336-
98337-#define assert_x_eq(a, b, ...)	assert_cmp(int, a, b, ==, !=, "#x", __VA_ARGS__)
98338-#define assert_x_ne(a, b, ...)	assert_cmp(int, a, b, !=, ==, "#x", __VA_ARGS__)
98339-#define assert_x_lt(a, b, ...)	assert_cmp(int, a, b, <, >=, "#x", __VA_ARGS__)
98340-#define assert_x_le(a, b, ...)	assert_cmp(int, a, b, <=, >, "#x", __VA_ARGS__)
98341-#define assert_x_ge(a, b, ...)	assert_cmp(int, a, b, >=, <, "#x", __VA_ARGS__)
98342-#define assert_x_gt(a, b, ...)	assert_cmp(int, a, b, >, <=, "#x", __VA_ARGS__)
98343-
98344-#define assert_d_eq(a, b, ...)	assert_cmp(int, a, b, ==, !=, "d", __VA_ARGS__)
98345-#define assert_d_ne(a, b, ...)	assert_cmp(int, a, b, !=, ==, "d", __VA_ARGS__)
98346-#define assert_d_lt(a, b, ...)	assert_cmp(int, a, b, <, >=, "d", __VA_ARGS__)
98347-#define assert_d_le(a, b, ...)	assert_cmp(int, a, b, <=, >, "d", __VA_ARGS__)
98348-#define assert_d_ge(a, b, ...)	assert_cmp(int, a, b, >=, <, "d", __VA_ARGS__)
98349-#define assert_d_gt(a, b, ...)	assert_cmp(int, a, b, >, <=, "d", __VA_ARGS__)
98350-
98351-#define assert_u_eq(a, b, ...)	assert_cmp(int, a, b, ==, !=, "u", __VA_ARGS__)
98352-#define assert_u_ne(a, b, ...)	assert_cmp(int, a, b, !=, ==, "u", __VA_ARGS__)
98353-#define assert_u_lt(a, b, ...)	assert_cmp(int, a, b, <, >=, "u", __VA_ARGS__)
98354-#define assert_u_le(a, b, ...)	assert_cmp(int, a, b, <=, >, "u", __VA_ARGS__)
98355-#define assert_u_ge(a, b, ...)	assert_cmp(int, a, b, >=, <, "u", __VA_ARGS__)
98356-#define assert_u_gt(a, b, ...)	assert_cmp(int, a, b, >, <=, "u", __VA_ARGS__)
98357-
98358-#define assert_ld_eq(a, b, ...)	assert_cmp(long, a, b, ==,	\
98359-    !=, "ld", __VA_ARGS__)
98360-#define assert_ld_ne(a, b, ...)	assert_cmp(long, a, b, !=,	\
98361-    ==, "ld", __VA_ARGS__)
98362-#define assert_ld_lt(a, b, ...)	assert_cmp(long, a, b, <,	\
98363-    >=, "ld", __VA_ARGS__)
98364-#define assert_ld_le(a, b, ...)	assert_cmp(long, a, b, <=,	\
98365-    >, "ld", __VA_ARGS__)
98366-#define assert_ld_ge(a, b, ...)	assert_cmp(long, a, b, >=,	\
98367-    <, "ld", __VA_ARGS__)
98368-#define assert_ld_gt(a, b, ...)	assert_cmp(long, a, b, >,	\
98369-    <=, "ld", __VA_ARGS__)
98370-
98371-#define assert_lu_eq(a, b, ...)	assert_cmp(unsigned long,	\
98372-    a, b, ==, !=, "lu", __VA_ARGS__)
98373-#define assert_lu_ne(a, b, ...)	assert_cmp(unsigned long,	\
98374-    a, b, !=, ==, "lu", __VA_ARGS__)
98375-#define assert_lu_lt(a, b, ...)	assert_cmp(unsigned long,	\
98376-    a, b, <, >=, "lu", __VA_ARGS__)
98377-#define assert_lu_le(a, b, ...)	assert_cmp(unsigned long,	\
98378-    a, b, <=, >, "lu", __VA_ARGS__)
98379-#define assert_lu_ge(a, b, ...)	assert_cmp(unsigned long,	\
98380-    a, b, >=, <, "lu", __VA_ARGS__)
98381-#define assert_lu_gt(a, b, ...)	assert_cmp(unsigned long,	\
98382-    a, b, >, <=, "lu", __VA_ARGS__)
98383-
98384-#define assert_qd_eq(a, b, ...)	assert_cmp(long long, a, b, ==,	\
98385-    !=, "qd", __VA_ARGS__)
98386-#define assert_qd_ne(a, b, ...)	assert_cmp(long long, a, b, !=,	\
98387-    ==, "qd", __VA_ARGS__)
98388-#define assert_qd_lt(a, b, ...)	assert_cmp(long long, a, b, <,	\
98389-    >=, "qd", __VA_ARGS__)
98390-#define assert_qd_le(a, b, ...)	assert_cmp(long long, a, b, <=,	\
98391-    >, "qd", __VA_ARGS__)
98392-#define assert_qd_ge(a, b, ...)	assert_cmp(long long, a, b, >=,	\
98393-    <, "qd", __VA_ARGS__)
98394-#define assert_qd_gt(a, b, ...)	assert_cmp(long long, a, b, >,	\
98395-    <=, "qd", __VA_ARGS__)
98396-
98397-#define assert_qu_eq(a, b, ...)	assert_cmp(unsigned long long,	\
98398-    a, b, ==, !=, "qu", __VA_ARGS__)
98399-#define assert_qu_ne(a, b, ...)	assert_cmp(unsigned long long,	\
98400-    a, b, !=, ==, "qu", __VA_ARGS__)
98401-#define assert_qu_lt(a, b, ...)	assert_cmp(unsigned long long,	\
98402-    a, b, <, >=, "qu", __VA_ARGS__)
98403-#define assert_qu_le(a, b, ...)	assert_cmp(unsigned long long,	\
98404-    a, b, <=, >, "qu", __VA_ARGS__)
98405-#define assert_qu_ge(a, b, ...)	assert_cmp(unsigned long long,	\
98406-    a, b, >=, <, "qu", __VA_ARGS__)
98407-#define assert_qu_gt(a, b, ...)	assert_cmp(unsigned long long,	\
98408-    a, b, >, <=, "qu", __VA_ARGS__)
98409-
98410-#define assert_jd_eq(a, b, ...)	assert_cmp(intmax_t, a, b, ==,	\
98411-    !=, "jd", __VA_ARGS__)
98412-#define assert_jd_ne(a, b, ...)	assert_cmp(intmax_t, a, b, !=,	\
98413-    ==, "jd", __VA_ARGS__)
98414-#define assert_jd_lt(a, b, ...)	assert_cmp(intmax_t, a, b, <,	\
98415-    >=, "jd", __VA_ARGS__)
98416-#define assert_jd_le(a, b, ...)	assert_cmp(intmax_t, a, b, <=,	\
98417-    >, "jd", __VA_ARGS__)
98418-#define assert_jd_ge(a, b, ...)	assert_cmp(intmax_t, a, b, >=,	\
98419-    <, "jd", __VA_ARGS__)
98420-#define assert_jd_gt(a, b, ...)	assert_cmp(intmax_t, a, b, >,	\
98421-    <=, "jd", __VA_ARGS__)
98422-
98423-#define assert_ju_eq(a, b, ...)	assert_cmp(uintmax_t, a, b, ==,	\
98424-    !=, "ju", __VA_ARGS__)
98425-#define assert_ju_ne(a, b, ...)	assert_cmp(uintmax_t, a, b, !=,	\
98426-    ==, "ju", __VA_ARGS__)
98427-#define assert_ju_lt(a, b, ...)	assert_cmp(uintmax_t, a, b, <,	\
98428-    >=, "ju", __VA_ARGS__)
98429-#define assert_ju_le(a, b, ...)	assert_cmp(uintmax_t, a, b, <=,	\
98430-    >, "ju", __VA_ARGS__)
98431-#define assert_ju_ge(a, b, ...)	assert_cmp(uintmax_t, a, b, >=,	\
98432-    <, "ju", __VA_ARGS__)
98433-#define assert_ju_gt(a, b, ...)	assert_cmp(uintmax_t, a, b, >,	\
98434-    <=, "ju", __VA_ARGS__)
98435-
98436-#define assert_zd_eq(a, b, ...)	assert_cmp(ssize_t, a, b, ==,	\
98437-    !=, "zd", __VA_ARGS__)
98438-#define assert_zd_ne(a, b, ...)	assert_cmp(ssize_t, a, b, !=,	\
98439-    ==, "zd", __VA_ARGS__)
98440-#define assert_zd_lt(a, b, ...)	assert_cmp(ssize_t, a, b, <,	\
98441-    >=, "zd", __VA_ARGS__)
98442-#define assert_zd_le(a, b, ...)	assert_cmp(ssize_t, a, b, <=,	\
98443-    >, "zd", __VA_ARGS__)
98444-#define assert_zd_ge(a, b, ...)	assert_cmp(ssize_t, a, b, >=,	\
98445-    <, "zd", __VA_ARGS__)
98446-#define assert_zd_gt(a, b, ...)	assert_cmp(ssize_t, a, b, >,	\
98447-    <=, "zd", __VA_ARGS__)
98448-
98449-#define assert_zu_eq(a, b, ...)	assert_cmp(size_t, a, b, ==,	\
98450-    !=, "zu", __VA_ARGS__)
98451-#define assert_zu_ne(a, b, ...)	assert_cmp(size_t, a, b, !=,	\
98452-    ==, "zu", __VA_ARGS__)
98453-#define assert_zu_lt(a, b, ...)	assert_cmp(size_t, a, b, <,	\
98454-    >=, "zu", __VA_ARGS__)
98455-#define assert_zu_le(a, b, ...)	assert_cmp(size_t, a, b, <=,	\
98456-    >, "zu", __VA_ARGS__)
98457-#define assert_zu_ge(a, b, ...)	assert_cmp(size_t, a, b, >=,	\
98458-    <, "zu", __VA_ARGS__)
98459-#define assert_zu_gt(a, b, ...)	assert_cmp(size_t, a, b, >,	\
98460-    <=, "zu", __VA_ARGS__)
98461-
98462-#define assert_d32_eq(a, b, ...)	assert_cmp(int32_t, a, b, ==,	\
98463-    !=, FMTd32, __VA_ARGS__)
98464-#define assert_d32_ne(a, b, ...)	assert_cmp(int32_t, a, b, !=,	\
98465-    ==, FMTd32, __VA_ARGS__)
98466-#define assert_d32_lt(a, b, ...)	assert_cmp(int32_t, a, b, <,	\
98467-    >=, FMTd32, __VA_ARGS__)
98468-#define assert_d32_le(a, b, ...)	assert_cmp(int32_t, a, b, <=,	\
98469-    >, FMTd32, __VA_ARGS__)
98470-#define assert_d32_ge(a, b, ...)	assert_cmp(int32_t, a, b, >=,	\
98471-    <, FMTd32, __VA_ARGS__)
98472-#define assert_d32_gt(a, b, ...)	assert_cmp(int32_t, a, b, >,	\
98473-    <=, FMTd32, __VA_ARGS__)
98474-
98475-#define assert_u32_eq(a, b, ...)	assert_cmp(uint32_t, a, b, ==,	\
98476-    !=, FMTu32, __VA_ARGS__)
98477-#define assert_u32_ne(a, b, ...)	assert_cmp(uint32_t, a, b, !=,	\
98478-    ==, FMTu32, __VA_ARGS__)
98479-#define assert_u32_lt(a, b, ...)	assert_cmp(uint32_t, a, b, <,	\
98480-    >=, FMTu32, __VA_ARGS__)
98481-#define assert_u32_le(a, b, ...)	assert_cmp(uint32_t, a, b, <=,	\
98482-    >, FMTu32, __VA_ARGS__)
98483-#define assert_u32_ge(a, b, ...)	assert_cmp(uint32_t, a, b, >=,	\
98484-    <, FMTu32, __VA_ARGS__)
98485-#define assert_u32_gt(a, b, ...)	assert_cmp(uint32_t, a, b, >,	\
98486-    <=, FMTu32, __VA_ARGS__)
98487-
98488-#define assert_d64_eq(a, b, ...)	assert_cmp(int64_t, a, b, ==,	\
98489-    !=, FMTd64, __VA_ARGS__)
98490-#define assert_d64_ne(a, b, ...)	assert_cmp(int64_t, a, b, !=,	\
98491-    ==, FMTd64, __VA_ARGS__)
98492-#define assert_d64_lt(a, b, ...)	assert_cmp(int64_t, a, b, <,	\
98493-    >=, FMTd64, __VA_ARGS__)
98494-#define assert_d64_le(a, b, ...)	assert_cmp(int64_t, a, b, <=,	\
98495-    >, FMTd64, __VA_ARGS__)
98496-#define assert_d64_ge(a, b, ...)	assert_cmp(int64_t, a, b, >=,	\
98497-    <, FMTd64, __VA_ARGS__)
98498-#define assert_d64_gt(a, b, ...)	assert_cmp(int64_t, a, b, >,	\
98499-    <=, FMTd64, __VA_ARGS__)
98500-
98501-#define assert_u64_eq(a, b, ...)	assert_cmp(uint64_t, a, b, ==,	\
98502-    !=, FMTu64, __VA_ARGS__)
98503-#define assert_u64_ne(a, b, ...)	assert_cmp(uint64_t, a, b, !=,	\
98504-    ==, FMTu64, __VA_ARGS__)
98505-#define assert_u64_lt(a, b, ...)	assert_cmp(uint64_t, a, b, <,	\
98506-    >=, FMTu64, __VA_ARGS__)
98507-#define assert_u64_le(a, b, ...)	assert_cmp(uint64_t, a, b, <=,	\
98508-    >, FMTu64, __VA_ARGS__)
98509-#define assert_u64_ge(a, b, ...)	assert_cmp(uint64_t, a, b, >=,	\
98510-    <, FMTu64, __VA_ARGS__)
98511-#define assert_u64_gt(a, b, ...)	assert_cmp(uint64_t, a, b, >,	\
98512-    <=, FMTu64, __VA_ARGS__)
98513-
98514-#define assert_b_eq(a, b, ...)	verify_b_eq(true, a, b, __VA_ARGS__)
98515-#define assert_b_ne(a, b, ...)	verify_b_ne(true, a, b, __VA_ARGS__)
98516-
98517-#define assert_true(a, ...)	assert_b_eq(a, true, __VA_ARGS__)
98518-#define assert_false(a, ...)	assert_b_eq(a, false, __VA_ARGS__)
98519-
98520-#define assert_str_eq(a, b, ...) verify_str_eq(true, a, b, __VA_ARGS__)
98521-#define assert_str_ne(a, b, ...) verify_str_ne(true, a, b, __VA_ARGS__)
98522-
98523-#define assert_not_reached(...) verify_not_reached(true, __VA_ARGS__)
98524-
98525-/*
98526- * If this enum changes, corresponding changes in test/test.sh.in are also
98527- * necessary.
98528- */
98529-typedef enum {
98530-	test_status_pass = 0,
98531-	test_status_skip = 1,
98532-	test_status_fail = 2,
98533-
98534-	test_status_count = 3
98535-} test_status_t;
98536-
98537-typedef void (test_t)(void);
98538-
98539-#define TEST_BEGIN(f)							\
98540-static void								\
98541-f(void) {								\
98542-	p_test_init(#f);
98543-
98544-#define TEST_END							\
98545-	goto label_test_end;						\
98546-label_test_end:								\
98547-	p_test_fini();							\
98548-}
98549-
98550-#define test(...)							\
98551-	p_test(__VA_ARGS__, NULL)
98552-
98553-#define test_no_reentrancy(...)							\
98554-	p_test_no_reentrancy(__VA_ARGS__, NULL)
98555-
98556-#define test_no_malloc_init(...)					\
98557-	p_test_no_malloc_init(__VA_ARGS__, NULL)
98558-
98559-#define test_skip_if(e) do {						\
98560-	if (e) {							\
98561-		test_skip("%s:%s:%d: Test skipped: (%s)",		\
98562-		    __func__, __FILE__, __LINE__, #e);			\
98563-		goto label_test_end;					\
98564-	}								\
98565-} while (0)
98566-
98567-bool test_is_reentrant();
98568-
98569-void	test_skip(const char *format, ...) JEMALLOC_FORMAT_PRINTF(1, 2);
98570-void	test_fail(const char *format, ...) JEMALLOC_FORMAT_PRINTF(1, 2);
98571-
98572-/* For private use by macros. */
98573-test_status_t	p_test(test_t *t, ...);
98574-test_status_t	p_test_no_reentrancy(test_t *t, ...);
98575-test_status_t	p_test_no_malloc_init(test_t *t, ...);
98576-void	p_test_init(const char *name);
98577-void	p_test_fini(void);
98578-void	p_test_fail(const char *prefix, const char *message);
98579diff --git a/jemalloc/test/include/test/thd.h b/jemalloc/test/include/test/thd.h
98580deleted file mode 100644
98581index 47a5126..0000000
98582--- a/jemalloc/test/include/test/thd.h
98583+++ /dev/null
98584@@ -1,9 +0,0 @@
98585-/* Abstraction layer for threading in tests. */
98586-#ifdef _WIN32
98587-typedef HANDLE thd_t;
98588-#else
98589-typedef pthread_t thd_t;
98590-#endif
98591-
98592-void	thd_create(thd_t *thd, void *(*proc)(void *), void *arg);
98593-void	thd_join(thd_t thd, void **ret);
98594diff --git a/jemalloc/test/include/test/timer.h b/jemalloc/test/include/test/timer.h
98595deleted file mode 100644
98596index ace6191..0000000
98597--- a/jemalloc/test/include/test/timer.h
98598+++ /dev/null
98599@@ -1,11 +0,0 @@
98600-/* Simple timer, for use in benchmark reporting. */
98601-
98602-typedef struct {
98603-	nstime_t t0;
98604-	nstime_t t1;
98605-} timedelta_t;
98606-
98607-void	timer_start(timedelta_t *timer);
98608-void	timer_stop(timedelta_t *timer);
98609-uint64_t	timer_usec(const timedelta_t *timer);
98610-void	timer_ratio(timedelta_t *a, timedelta_t *b, char *buf, size_t buflen);
98611diff --git a/jemalloc/test/integration/MALLOCX_ARENA.c b/jemalloc/test/integration/MALLOCX_ARENA.c
98612deleted file mode 100644
98613index 7e61df0..0000000
98614--- a/jemalloc/test/integration/MALLOCX_ARENA.c
98615+++ /dev/null
98616@@ -1,66 +0,0 @@
98617-#include "test/jemalloc_test.h"
98618-
98619-#define NTHREADS 10
98620-
98621-static bool have_dss =
98622-#ifdef JEMALLOC_DSS
98623-    true
98624-#else
98625-    false
98626-#endif
98627-    ;
98628-
98629-void *
98630-thd_start(void *arg) {
98631-	unsigned thread_ind = (unsigned)(uintptr_t)arg;
98632-	unsigned arena_ind;
98633-	void *p;
98634-	size_t sz;
98635-
98636-	sz = sizeof(arena_ind);
98637-	expect_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz, NULL, 0),
98638-	    0, "Error in arenas.create");
98639-
98640-	if (thread_ind % 4 != 3) {
98641-		size_t mib[3];
98642-		size_t miblen = sizeof(mib) / sizeof(size_t);
98643-		const char *dss_precs[] = {"disabled", "primary", "secondary"};
98644-		unsigned prec_ind = thread_ind %
98645-		    (sizeof(dss_precs)/sizeof(char*));
98646-		const char *dss = dss_precs[prec_ind];
98647-		int expected_err = (have_dss || prec_ind == 0) ? 0 : EFAULT;
98648-		expect_d_eq(mallctlnametomib("arena.0.dss", mib, &miblen), 0,
98649-		    "Error in mallctlnametomib()");
98650-		mib[1] = arena_ind;
98651-		expect_d_eq(mallctlbymib(mib, miblen, NULL, NULL, (void *)&dss,
98652-		    sizeof(const char *)), expected_err,
98653-		    "Error in mallctlbymib()");
98654-	}
98655-
98656-	p = mallocx(1, MALLOCX_ARENA(arena_ind));
98657-	expect_ptr_not_null(p, "Unexpected mallocx() error");
98658-	dallocx(p, 0);
98659-
98660-	return NULL;
98661-}
98662-
98663-TEST_BEGIN(test_MALLOCX_ARENA) {
98664-	thd_t thds[NTHREADS];
98665-	unsigned i;
98666-
98667-	for (i = 0; i < NTHREADS; i++) {
98668-		thd_create(&thds[i], thd_start,
98669-		    (void *)(uintptr_t)i);
98670-	}
98671-
98672-	for (i = 0; i < NTHREADS; i++) {
98673-		thd_join(thds[i], NULL);
98674-	}
98675-}
98676-TEST_END
98677-
98678-int
98679-main(void) {
98680-	return test(
98681-	    test_MALLOCX_ARENA);
98682-}
98683diff --git a/jemalloc/test/integration/aligned_alloc.c b/jemalloc/test/integration/aligned_alloc.c
98684deleted file mode 100644
98685index b37d5ba..0000000
98686--- a/jemalloc/test/integration/aligned_alloc.c
98687+++ /dev/null
98688@@ -1,157 +0,0 @@
98689-#include "test/jemalloc_test.h"
98690-
98691-#define MAXALIGN (((size_t)1) << 23)
98692-
98693-/*
98694- * On systems which can't merge extents, tests that call this function generate
98695- * a lot of dirty memory very quickly.  Purging between cycles mitigates
98696- * potential OOM on e.g. 32-bit Windows.
98697- */
98698-static void
98699-purge(void) {
98700-	expect_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0,
98701-	    "Unexpected mallctl error");
98702-}
98703-
98704-TEST_BEGIN(test_alignment_errors) {
98705-	size_t alignment;
98706-	void *p;
98707-
98708-	alignment = 0;
98709-	set_errno(0);
98710-	p = aligned_alloc(alignment, 1);
98711-	expect_false(p != NULL || get_errno() != EINVAL,
98712-	    "Expected error for invalid alignment %zu", alignment);
98713-
98714-	for (alignment = sizeof(size_t); alignment < MAXALIGN;
98715-	    alignment <<= 1) {
98716-		set_errno(0);
98717-		p = aligned_alloc(alignment + 1, 1);
98718-		expect_false(p != NULL || get_errno() != EINVAL,
98719-		    "Expected error for invalid alignment %zu",
98720-		    alignment + 1);
98721-	}
98722-}
98723-TEST_END
98724-
98725-
98726-/*
98727- * GCC "-Walloc-size-larger-than" warning detects when one of the memory
98728- * allocation functions is called with a size larger than the maximum size that
98729- * they support. Here we want to explicitly test that the allocation functions
98730- * do indeed fail properly when this is the case, which triggers the warning.
98731- * Therefore we disable the warning for these tests.
98732- */
98733-JEMALLOC_DIAGNOSTIC_PUSH
98734-JEMALLOC_DIAGNOSTIC_IGNORE_ALLOC_SIZE_LARGER_THAN
98735-
98736-TEST_BEGIN(test_oom_errors) {
98737-	size_t alignment, size;
98738-	void *p;
98739-
98740-#if LG_SIZEOF_PTR == 3
98741-	alignment = UINT64_C(0x8000000000000000);
98742-	size      = UINT64_C(0x8000000000000000);
98743-#else
98744-	alignment = 0x80000000LU;
98745-	size      = 0x80000000LU;
98746-#endif
98747-	set_errno(0);
98748-	p = aligned_alloc(alignment, size);
98749-	expect_false(p != NULL || get_errno() != ENOMEM,
98750-	    "Expected error for aligned_alloc(%zu, %zu)",
98751-	    alignment, size);
98752-
98753-#if LG_SIZEOF_PTR == 3
98754-	alignment = UINT64_C(0x4000000000000000);
98755-	size      = UINT64_C(0xc000000000000001);
98756-#else
98757-	alignment = 0x40000000LU;
98758-	size      = 0xc0000001LU;
98759-#endif
98760-	set_errno(0);
98761-	p = aligned_alloc(alignment, size);
98762-	expect_false(p != NULL || get_errno() != ENOMEM,
98763-	    "Expected error for aligned_alloc(%zu, %zu)",
98764-	    alignment, size);
98765-
98766-	alignment = 0x10LU;
98767-#if LG_SIZEOF_PTR == 3
98768-	size = UINT64_C(0xfffffffffffffff0);
98769-#else
98770-	size = 0xfffffff0LU;
98771-#endif
98772-	set_errno(0);
98773-	p = aligned_alloc(alignment, size);
98774-	expect_false(p != NULL || get_errno() != ENOMEM,
98775-	    "Expected error for aligned_alloc(&p, %zu, %zu)",
98776-	    alignment, size);
98777-}
98778-TEST_END
98779-
98780-/* Re-enable the "-Walloc-size-larger-than=" warning */
98781-JEMALLOC_DIAGNOSTIC_POP
98782-
98783-TEST_BEGIN(test_alignment_and_size) {
98784-#define NITER 4
98785-	size_t alignment, size, total;
98786-	unsigned i;
98787-	void *ps[NITER];
98788-
98789-	for (i = 0; i < NITER; i++) {
98790-		ps[i] = NULL;
98791-	}
98792-
98793-	for (alignment = 8;
98794-	    alignment <= MAXALIGN;
98795-	    alignment <<= 1) {
98796-		total = 0;
98797-		for (size = 1;
98798-		    size < 3 * alignment && size < (1U << 31);
98799-		    size += (alignment >> (LG_SIZEOF_PTR-1)) - 1) {
98800-			for (i = 0; i < NITER; i++) {
98801-				ps[i] = aligned_alloc(alignment, size);
98802-				if (ps[i] == NULL) {
98803-					char buf[BUFERROR_BUF];
98804-
98805-					buferror(get_errno(), buf, sizeof(buf));
98806-					test_fail(
98807-					    "Error for alignment=%zu, "
98808-					    "size=%zu (%#zx): %s",
98809-					    alignment, size, size, buf);
98810-				}
98811-				total += TEST_MALLOC_SIZE(ps[i]);
98812-				if (total >= (MAXALIGN << 1)) {
98813-					break;
98814-				}
98815-			}
98816-			for (i = 0; i < NITER; i++) {
98817-				if (ps[i] != NULL) {
98818-					free(ps[i]);
98819-					ps[i] = NULL;
98820-				}
98821-			}
98822-		}
98823-		purge();
98824-	}
98825-#undef NITER
98826-}
98827-TEST_END
98828-
98829-TEST_BEGIN(test_zero_alloc) {
98830-	void *res = aligned_alloc(8, 0);
98831-	assert(res);
98832-	size_t usable = TEST_MALLOC_SIZE(res);
98833-	assert(usable > 0);
98834-	free(res);
98835-}
98836-TEST_END
98837-
98838-int
98839-main(void) {
98840-	return test(
98841-	    test_alignment_errors,
98842-	    test_oom_errors,
98843-	    test_alignment_and_size,
98844-	    test_zero_alloc);
98845-}
98846diff --git a/jemalloc/test/integration/allocated.c b/jemalloc/test/integration/allocated.c
98847deleted file mode 100644
98848index 0c64272..0000000
98849--- a/jemalloc/test/integration/allocated.c
98850+++ /dev/null
98851@@ -1,124 +0,0 @@
98852-#include "test/jemalloc_test.h"
98853-
98854-static const bool config_stats =
98855-#ifdef JEMALLOC_STATS
98856-    true
98857-#else
98858-    false
98859-#endif
98860-    ;
98861-
98862-void *
98863-thd_start(void *arg) {
98864-	int err;
98865-	void *p;
98866-	uint64_t a0, a1, d0, d1;
98867-	uint64_t *ap0, *ap1, *dp0, *dp1;
98868-	size_t sz, usize;
98869-
98870-	sz = sizeof(a0);
98871-	if ((err = mallctl("thread.allocated", (void *)&a0, &sz, NULL, 0))) {
98872-		if (err == ENOENT) {
98873-			goto label_ENOENT;
98874-		}
98875-		test_fail("%s(): Error in mallctl(): %s", __func__,
98876-		    strerror(err));
98877-	}
98878-	sz = sizeof(ap0);
98879-	if ((err = mallctl("thread.allocatedp", (void *)&ap0, &sz, NULL, 0))) {
98880-		if (err == ENOENT) {
98881-			goto label_ENOENT;
98882-		}
98883-		test_fail("%s(): Error in mallctl(): %s", __func__,
98884-		    strerror(err));
98885-	}
98886-	expect_u64_eq(*ap0, a0,
98887-	    "\"thread.allocatedp\" should provide a pointer to internal "
98888-	    "storage");
98889-
98890-	sz = sizeof(d0);
98891-	if ((err = mallctl("thread.deallocated", (void *)&d0, &sz, NULL, 0))) {
98892-		if (err == ENOENT) {
98893-			goto label_ENOENT;
98894-		}
98895-		test_fail("%s(): Error in mallctl(): %s", __func__,
98896-		    strerror(err));
98897-	}
98898-	sz = sizeof(dp0);
98899-	if ((err = mallctl("thread.deallocatedp", (void *)&dp0, &sz, NULL,
98900-	    0))) {
98901-		if (err == ENOENT) {
98902-			goto label_ENOENT;
98903-		}
98904-		test_fail("%s(): Error in mallctl(): %s", __func__,
98905-		    strerror(err));
98906-	}
98907-	expect_u64_eq(*dp0, d0,
98908-	    "\"thread.deallocatedp\" should provide a pointer to internal "
98909-	    "storage");
98910-
98911-	p = malloc(1);
98912-	expect_ptr_not_null(p, "Unexpected malloc() error");
98913-
98914-	sz = sizeof(a1);
98915-	mallctl("thread.allocated", (void *)&a1, &sz, NULL, 0);
98916-	sz = sizeof(ap1);
98917-	mallctl("thread.allocatedp", (void *)&ap1, &sz, NULL, 0);
98918-	expect_u64_eq(*ap1, a1,
98919-	    "Dereferenced \"thread.allocatedp\" value should equal "
98920-	    "\"thread.allocated\" value");
98921-	expect_ptr_eq(ap0, ap1,
98922-	    "Pointer returned by \"thread.allocatedp\" should not change");
98923-
98924-	usize = TEST_MALLOC_SIZE(p);
98925-	expect_u64_le(a0 + usize, a1,
98926-	    "Allocated memory counter should increase by at least the amount "
98927-	    "explicitly allocated");
98928-
98929-	free(p);
98930-
98931-	sz = sizeof(d1);
98932-	mallctl("thread.deallocated", (void *)&d1, &sz, NULL, 0);
98933-	sz = sizeof(dp1);
98934-	mallctl("thread.deallocatedp", (void *)&dp1, &sz, NULL, 0);
98935-	expect_u64_eq(*dp1, d1,
98936-	    "Dereferenced \"thread.deallocatedp\" value should equal "
98937-	    "\"thread.deallocated\" value");
98938-	expect_ptr_eq(dp0, dp1,
98939-	    "Pointer returned by \"thread.deallocatedp\" should not change");
98940-
98941-	expect_u64_le(d0 + usize, d1,
98942-	    "Deallocated memory counter should increase by at least the amount "
98943-	    "explicitly deallocated");
98944-
98945-	return NULL;
98946-label_ENOENT:
98947-	expect_false(config_stats,
98948-	    "ENOENT should only be returned if stats are disabled");
98949-	test_skip("\"thread.allocated\" mallctl not available");
98950-	return NULL;
98951-}
98952-
98953-TEST_BEGIN(test_main_thread) {
98954-	thd_start(NULL);
98955-}
98956-TEST_END
98957-
98958-TEST_BEGIN(test_subthread) {
98959-	thd_t thd;
98960-
98961-	thd_create(&thd, thd_start, NULL);
98962-	thd_join(thd, NULL);
98963-}
98964-TEST_END
98965-
98966-int
98967-main(void) {
98968-	/* Run tests multiple times to check for bad interactions. */
98969-	return test(
98970-	    test_main_thread,
98971-	    test_subthread,
98972-	    test_main_thread,
98973-	    test_subthread,
98974-	    test_main_thread);
98975-}
98976diff --git a/jemalloc/test/integration/cpp/basic.cpp b/jemalloc/test/integration/cpp/basic.cpp
98977deleted file mode 100644
98978index c1cf6cd..0000000
98979--- a/jemalloc/test/integration/cpp/basic.cpp
98980+++ /dev/null
98981@@ -1,24 +0,0 @@
98982-#include "test/jemalloc_test.h"
98983-
98984-TEST_BEGIN(test_basic) {
98985-	auto foo = new long(4);
98986-	expect_ptr_not_null(foo, "Unexpected new[] failure");
98987-	delete foo;
98988-	// Test nullptr handling.
98989-	foo = nullptr;
98990-	delete foo;
98991-
98992-	auto bar = new long;
98993-	expect_ptr_not_null(bar, "Unexpected new failure");
98994-	delete bar;
98995-	// Test nullptr handling.
98996-	bar = nullptr;
98997-	delete bar;
98998-}
98999-TEST_END
99000-
99001-int
99002-main() {
99003-	return test(
99004-	    test_basic);
99005-}
99006diff --git a/jemalloc/test/integration/cpp/infallible_new_false.cpp b/jemalloc/test/integration/cpp/infallible_new_false.cpp
99007deleted file mode 100644
99008index 42196d6..0000000
99009--- a/jemalloc/test/integration/cpp/infallible_new_false.cpp
99010+++ /dev/null
99011@@ -1,23 +0,0 @@
99012-#include <memory>
99013-
99014-#include "test/jemalloc_test.h"
99015-
99016-TEST_BEGIN(test_failing_alloc) {
99017-	bool saw_exception = false;
99018-	try {
99019-		/* Too big of an allocation to succeed. */
99020-		void *volatile ptr = ::operator new((size_t)-1);
99021-		(void)ptr;
99022-	} catch (...) {
99023-		saw_exception = true;
99024-	}
99025-	expect_true(saw_exception, "Didn't get a failure");
99026-}
99027-TEST_END
99028-
99029-int
99030-main(void) {
99031-	return test(
99032-	    test_failing_alloc);
99033-}
99034-
99035diff --git a/jemalloc/test/integration/cpp/infallible_new_false.sh b/jemalloc/test/integration/cpp/infallible_new_false.sh
99036deleted file mode 100644
99037index 7d41812..0000000
99038--- a/jemalloc/test/integration/cpp/infallible_new_false.sh
99039+++ /dev/null
99040@@ -1,8 +0,0 @@
99041-#!/bin/sh
99042-
99043-XMALLOC_STR=""
99044-if [ "x${enable_xmalloc}" = "x1" ] ; then
99045-  XMALLOC_STR="xmalloc:false,"
99046-fi
99047-
99048-export MALLOC_CONF="${XMALLOC_STR}experimental_infallible_new:false"
99049diff --git a/jemalloc/test/integration/cpp/infallible_new_true.cpp b/jemalloc/test/integration/cpp/infallible_new_true.cpp
99050deleted file mode 100644
99051index d675412..0000000
99052--- a/jemalloc/test/integration/cpp/infallible_new_true.cpp
99053+++ /dev/null
99054@@ -1,67 +0,0 @@
99055-#include <stdio.h>
99056-
99057-#include "test/jemalloc_test.h"
99058-
99059-/*
99060- * We can't test C++ in unit tests.  In order to intercept abort, use a secret
99061- * safety check abort hook in integration tests.
99062- */
99063-typedef void (*abort_hook_t)(const char *message);
99064-bool fake_abort_called;
99065-void fake_abort(const char *message) {
99066-	if (strcmp(message, "<jemalloc>: Allocation failed and "
99067-	    "opt.experimental_infallible_new is true. Aborting.\n") != 0) {
99068-		abort();
99069-	}
99070-	fake_abort_called = true;
99071-}
99072-
99073-static bool
99074-own_operator_new(void) {
99075-	uint64_t before, after;
99076-	size_t sz = sizeof(before);
99077-
99078-	/* thread.allocated is always available, even w/o config_stats. */
99079-	expect_d_eq(mallctl("thread.allocated", (void *)&before, &sz, NULL, 0),
99080-	    0, "Unexpected mallctl failure reading stats");
99081-	void *volatile ptr = ::operator new((size_t)8);
99082-	expect_ptr_not_null(ptr, "Unexpected allocation failure");
99083-	expect_d_eq(mallctl("thread.allocated", (void *)&after, &sz, NULL, 0),
99084-	    0, "Unexpected mallctl failure reading stats");
99085-
99086-	return (after != before);
99087-}
99088-
99089-TEST_BEGIN(test_failing_alloc) {
99090-	abort_hook_t abort_hook = &fake_abort;
99091-	expect_d_eq(mallctl("experimental.hooks.safety_check_abort", NULL, NULL,
99092-	    (void *)&abort_hook, sizeof(abort_hook)), 0,
99093-	    "Unexpected mallctl failure setting abort hook");
99094-
99095-	/*
99096-	 * Not owning operator new is only expected to happen on MinGW which
99097-	 * does not support operator new / delete replacement.
99098-	 */
99099-#ifdef _WIN32
99100-	test_skip_if(!own_operator_new());
99101-#else
99102-	expect_true(own_operator_new(), "No operator new overload");
99103-#endif
99104-	void *volatile ptr = (void *)1;
99105-	try {
99106-		/* Too big of an allocation to succeed. */
99107-		ptr = ::operator new((size_t)-1);
99108-	} catch (...) {
99109-		abort();
99110-	}
99111-	expect_ptr_null(ptr, "Allocation should have failed");
99112-	expect_b_eq(fake_abort_called, true, "Abort hook not invoked");
99113-}
99114-TEST_END
99115-
99116-int
99117-main(void) {
99118-	return test(
99119-	    test_failing_alloc);
99120-}
99121-
99122diff --git a/jemalloc/test/integration/cpp/infallible_new_true.sh b/jemalloc/test/integration/cpp/infallible_new_true.sh
99123deleted file mode 100644
99124index 4a0ff54..0000000
99125--- a/jemalloc/test/integration/cpp/infallible_new_true.sh
99126+++ /dev/null
99127@@ -1,8 +0,0 @@
99128-#!/bin/sh
99129-
99130-XMALLOC_STR=""
99131-if [ "x${enable_xmalloc}" = "x1" ] ; then
99132-  XMALLOC_STR="xmalloc:false,"
99133-fi
99134-
99135-export MALLOC_CONF="${XMALLOC_STR}experimental_infallible_new:true"
99136diff --git a/jemalloc/test/integration/extent.c b/jemalloc/test/integration/extent.c
99137deleted file mode 100644
99138index 7a028f1..0000000
99139--- a/jemalloc/test/integration/extent.c
99140+++ /dev/null
99141@@ -1,287 +0,0 @@
99142-#include "test/jemalloc_test.h"
99143-
99144-#include "test/extent_hooks.h"
99145-
99146-#include "jemalloc/internal/arena_types.h"
99147-
99148-static void
99149-test_extent_body(unsigned arena_ind) {
99150-	void *p;
99151-	size_t large0, large1, large2, sz;
99152-	size_t purge_mib[3];
99153-	size_t purge_miblen;
99154-	int flags;
99155-	bool xallocx_success_a, xallocx_success_b, xallocx_success_c;
99156-
99157-	flags = MALLOCX_ARENA(arena_ind) | MALLOCX_TCACHE_NONE;
99158-
99159-	/* Get large size classes. */
99160-	sz = sizeof(size_t);
99161-	expect_d_eq(mallctl("arenas.lextent.0.size", (void *)&large0, &sz, NULL,
99162-	    0), 0, "Unexpected arenas.lextent.0.size failure");
99163-	expect_d_eq(mallctl("arenas.lextent.1.size", (void *)&large1, &sz, NULL,
99164-	    0), 0, "Unexpected arenas.lextent.1.size failure");
99165-	expect_d_eq(mallctl("arenas.lextent.2.size", (void *)&large2, &sz, NULL,
99166-	    0), 0, "Unexpected arenas.lextent.2.size failure");
99167-
99168-	/* Test dalloc/decommit/purge cascade. */
99169-	purge_miblen = sizeof(purge_mib)/sizeof(size_t);
99170-	expect_d_eq(mallctlnametomib("arena.0.purge", purge_mib, &purge_miblen),
99171-	    0, "Unexpected mallctlnametomib() failure");
99172-	purge_mib[1] = (size_t)arena_ind;
99173-	called_alloc = false;
99174-	try_alloc = true;
99175-	try_dalloc = false;
99176-	try_decommit = false;
99177-	p = mallocx(large0 * 2, flags);
99178-	expect_ptr_not_null(p, "Unexpected mallocx() error");
99179-	expect_true(called_alloc, "Expected alloc call");
99180-	called_dalloc = false;
99181-	called_decommit = false;
99182-	did_purge_lazy = false;
99183-	did_purge_forced = false;
99184-	called_split = false;
99185-	xallocx_success_a = (xallocx(p, large0, 0, flags) == large0);
99186-	expect_d_eq(mallctlbymib(purge_mib, purge_miblen, NULL, NULL, NULL, 0),
99187-	    0, "Unexpected arena.%u.purge error", arena_ind);
99188-	if (xallocx_success_a) {
99189-		expect_true(called_dalloc, "Expected dalloc call");
99190-		expect_true(called_decommit, "Expected decommit call");
99191-		expect_true(did_purge_lazy || did_purge_forced,
99192-		    "Expected purge");
99193-		expect_true(called_split, "Expected split call");
99194-	}
99195-	dallocx(p, flags);
99196-	try_dalloc = true;
99197-
99198-	/* Test decommit/commit and observe split/merge. */
99199-	try_dalloc = false;
99200-	try_decommit = true;
99201-	p = mallocx(large0 * 2, flags);
99202-	expect_ptr_not_null(p, "Unexpected mallocx() error");
99203-	did_decommit = false;
99204-	did_commit = false;
99205-	called_split = false;
99206-	did_split = false;
99207-	did_merge = false;
99208-	xallocx_success_b = (xallocx(p, large0, 0, flags) == large0);
99209-	expect_d_eq(mallctlbymib(purge_mib, purge_miblen, NULL, NULL, NULL, 0),
99210-	    0, "Unexpected arena.%u.purge error", arena_ind);
99211-	if (xallocx_success_b) {
99212-		expect_true(did_split, "Expected split");
99213-	}
99214-	xallocx_success_c = (xallocx(p, large0 * 2, 0, flags) == large0 * 2);
99215-	if (did_split) {
99216-		expect_b_eq(did_decommit, did_commit,
99217-		    "Expected decommit/commit match");
99218-	}
99219-	if (xallocx_success_b && xallocx_success_c) {
99220-		expect_true(did_merge, "Expected merge");
99221-	}
99222-	dallocx(p, flags);
99223-	try_dalloc = true;
99224-	try_decommit = false;
99225-
99226-	/* Make sure non-large allocation succeeds. */
99227-	p = mallocx(42, flags);
99228-	expect_ptr_not_null(p, "Unexpected mallocx() error");
99229-	dallocx(p, flags);
99230-}
99231-
99232-static void
99233-test_manual_hook_auto_arena(void) {
99234-	unsigned narenas;
99235-	size_t old_size, new_size, sz;
99236-	size_t hooks_mib[3];
99237-	size_t hooks_miblen;
99238-	extent_hooks_t *new_hooks, *old_hooks;
99239-
99240-	extent_hooks_prep();
99241-
99242-	sz = sizeof(unsigned);
99243-	/* Get number of auto arenas. */
99244-	expect_d_eq(mallctl("opt.narenas", (void *)&narenas, &sz, NULL, 0),
99245-	    0, "Unexpected mallctl() failure");
99246-	if (narenas == 1) {
99247-		return;
99248-	}
99249-
99250-	/* Install custom extent hooks on arena 1 (might not be initialized). */
99251-	hooks_miblen = sizeof(hooks_mib)/sizeof(size_t);
99252-	expect_d_eq(mallctlnametomib("arena.0.extent_hooks", hooks_mib,
99253-	    &hooks_miblen), 0, "Unexpected mallctlnametomib() failure");
99254-	hooks_mib[1] = 1;
99255-	old_size = sizeof(extent_hooks_t *);
99256-	new_hooks = &hooks;
99257-	new_size = sizeof(extent_hooks_t *);
99258-	expect_d_eq(mallctlbymib(hooks_mib, hooks_miblen, (void *)&old_hooks,
99259-	    &old_size, (void *)&new_hooks, new_size), 0,
99260-	    "Unexpected extent_hooks error");
99261-	static bool auto_arena_created = false;
99262-	if (old_hooks != &hooks) {
99263-		expect_b_eq(auto_arena_created, false,
99264-		    "Expected auto arena 1 created only once.");
99265-		auto_arena_created = true;
99266-	}
99267-}
99268-
99269-static void
99270-test_manual_hook_body(void) {
99271-	unsigned arena_ind;
99272-	size_t old_size, new_size, sz;
99273-	size_t hooks_mib[3];
99274-	size_t hooks_miblen;
99275-	extent_hooks_t *new_hooks, *old_hooks;
99276-
99277-	extent_hooks_prep();
99278-
99279-	sz = sizeof(unsigned);
99280-	expect_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz, NULL, 0),
99281-	    0, "Unexpected mallctl() failure");
99282-
99283-	/* Install custom extent hooks. */
99284-	hooks_miblen = sizeof(hooks_mib)/sizeof(size_t);
99285-	expect_d_eq(mallctlnametomib("arena.0.extent_hooks", hooks_mib,
99286-	    &hooks_miblen), 0, "Unexpected mallctlnametomib() failure");
99287-	hooks_mib[1] = (size_t)arena_ind;
99288-	old_size = sizeof(extent_hooks_t *);
99289-	new_hooks = &hooks;
99290-	new_size = sizeof(extent_hooks_t *);
99291-	expect_d_eq(mallctlbymib(hooks_mib, hooks_miblen, (void *)&old_hooks,
99292-	    &old_size, (void *)&new_hooks, new_size), 0,
99293-	    "Unexpected extent_hooks error");
99294-	expect_ptr_ne(old_hooks->alloc, extent_alloc_hook,
99295-	    "Unexpected extent_hooks error");
99296-	expect_ptr_ne(old_hooks->dalloc, extent_dalloc_hook,
99297-	    "Unexpected extent_hooks error");
99298-	expect_ptr_ne(old_hooks->commit, extent_commit_hook,
99299-	    "Unexpected extent_hooks error");
99300-	expect_ptr_ne(old_hooks->decommit, extent_decommit_hook,
99301-	    "Unexpected extent_hooks error");
99302-	expect_ptr_ne(old_hooks->purge_lazy, extent_purge_lazy_hook,
99303-	    "Unexpected extent_hooks error");
99304-	expect_ptr_ne(old_hooks->purge_forced, extent_purge_forced_hook,
99305-	    "Unexpected extent_hooks error");
99306-	expect_ptr_ne(old_hooks->split, extent_split_hook,
99307-	    "Unexpected extent_hooks error");
99308-	expect_ptr_ne(old_hooks->merge, extent_merge_hook,
99309-	    "Unexpected extent_hooks error");
99310-
99311-	if (!is_background_thread_enabled()) {
99312-		test_extent_body(arena_ind);
99313-	}
99314-
99315-	/* Restore extent hooks. */
99316-	expect_d_eq(mallctlbymib(hooks_mib, hooks_miblen, NULL, NULL,
99317-	    (void *)&old_hooks, new_size), 0, "Unexpected extent_hooks error");
99318-	expect_d_eq(mallctlbymib(hooks_mib, hooks_miblen, (void *)&old_hooks,
99319-	    &old_size, NULL, 0), 0, "Unexpected extent_hooks error");
99320-	expect_ptr_eq(old_hooks, default_hooks, "Unexpected extent_hooks error");
99321-	expect_ptr_eq(old_hooks->alloc, default_hooks->alloc,
99322-	    "Unexpected extent_hooks error");
99323-	expect_ptr_eq(old_hooks->dalloc, default_hooks->dalloc,
99324-	    "Unexpected extent_hooks error");
99325-	expect_ptr_eq(old_hooks->commit, default_hooks->commit,
99326-	    "Unexpected extent_hooks error");
99327-	expect_ptr_eq(old_hooks->decommit, default_hooks->decommit,
99328-	    "Unexpected extent_hooks error");
99329-	expect_ptr_eq(old_hooks->purge_lazy, default_hooks->purge_lazy,
99330-	    "Unexpected extent_hooks error");
99331-	expect_ptr_eq(old_hooks->purge_forced, default_hooks->purge_forced,
99332-	    "Unexpected extent_hooks error");
99333-	expect_ptr_eq(old_hooks->split, default_hooks->split,
99334-	    "Unexpected extent_hooks error");
99335-	expect_ptr_eq(old_hooks->merge, default_hooks->merge,
99336-	    "Unexpected extent_hooks error");
99337-}
99338-
99339-TEST_BEGIN(test_extent_manual_hook) {
99340-	test_manual_hook_auto_arena();
99341-	test_manual_hook_body();
99342-
99343-	/* Test failure paths. */
99344-	try_split = false;
99345-	test_manual_hook_body();
99346-	try_merge = false;
99347-	test_manual_hook_body();
99348-	try_purge_lazy = false;
99349-	try_purge_forced = false;
99350-	test_manual_hook_body();
99351-
99352-	try_split = try_merge = try_purge_lazy = try_purge_forced = true;
99353-}
99354-TEST_END
99355-
99356-TEST_BEGIN(test_extent_auto_hook) {
99357-	unsigned arena_ind;
99358-	size_t new_size, sz;
99359-	extent_hooks_t *new_hooks;
99360-
99361-	extent_hooks_prep();
99362-
99363-	sz = sizeof(unsigned);
99364-	new_hooks = &hooks;
99365-	new_size = sizeof(extent_hooks_t *);
99366-	expect_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz,
99367-	    (void *)&new_hooks, new_size), 0, "Unexpected mallctl() failure");
99368-
99369-	test_skip_if(is_background_thread_enabled());
99370-	test_extent_body(arena_ind);
99371-}
99372-TEST_END
99373-
99374-static void
99375-test_arenas_create_ext_base(arena_config_t config,
99376-	bool expect_hook_data, bool expect_hook_metadata)
99377-{
99378-	unsigned arena, arena1;
99379-	void *ptr;
99380-	size_t sz = sizeof(unsigned);
99381-
99382-	extent_hooks_prep();
99383-
99384-	called_alloc = false;
99385-	expect_d_eq(mallctl("experimental.arenas_create_ext",
99386-	    (void *)&arena, &sz, &config, sizeof(arena_config_t)), 0,
99387-	    "Unexpected mallctl() failure");
99388-	expect_b_eq(called_alloc, expect_hook_metadata,
99389-	    "expected hook metadata alloc mismatch");
99390-
99391-	called_alloc = false;
99392-	ptr = mallocx(42, MALLOCX_ARENA(arena) | MALLOCX_TCACHE_NONE);
99393-	expect_b_eq(called_alloc, expect_hook_data,
99394-	    "expected hook data alloc mismatch");
99395-
99396-	expect_ptr_not_null(ptr, "Unexpected mallocx() failure");
99397-	expect_d_eq(mallctl("arenas.lookup", &arena1, &sz, &ptr, sizeof(ptr)),
99398-	    0, "Unexpected mallctl() failure");
99399-	expect_u_eq(arena, arena1, "Unexpected arena index");
99400-	dallocx(ptr, 0);
99401-}
99402-
99403-TEST_BEGIN(test_arenas_create_ext_with_ehooks_no_metadata) {
99404-	arena_config_t config;
99405-	config.extent_hooks = &hooks;
99406-	config.metadata_use_hooks = false;
99407-
99408-	test_arenas_create_ext_base(config, true, false);
99409-}
99410-TEST_END
99411-
99412-TEST_BEGIN(test_arenas_create_ext_with_ehooks_with_metadata) {
99413-	arena_config_t config;
99414-	config.extent_hooks = &hooks;
99415-	config.metadata_use_hooks = true;
99416-
99417-	test_arenas_create_ext_base(config, true, true);
99418-}
99419-TEST_END
99420-
99421-int
99422-main(void) {
99423-	return test(
99424-	    test_extent_manual_hook,
99425-	    test_extent_auto_hook,
99426-	    test_arenas_create_ext_with_ehooks_no_metadata,
99427-	    test_arenas_create_ext_with_ehooks_with_metadata);
99428-}
99429diff --git a/jemalloc/test/integration/extent.sh b/jemalloc/test/integration/extent.sh
99430deleted file mode 100644
99431index 0cc2187..0000000
99432--- a/jemalloc/test/integration/extent.sh
99433+++ /dev/null
99434@@ -1,5 +0,0 @@
99435-#!/bin/sh
99436-
99437-if [ "x${enable_fill}" = "x1" ] ; then
99438-  export MALLOC_CONF="junk:false"
99439-fi
99440diff --git a/jemalloc/test/integration/malloc.c b/jemalloc/test/integration/malloc.c
99441deleted file mode 100644
99442index ef44916..0000000
99443--- a/jemalloc/test/integration/malloc.c
99444+++ /dev/null
99445@@ -1,16 +0,0 @@
99446-#include "test/jemalloc_test.h"
99447-
99448-TEST_BEGIN(test_zero_alloc) {
99449-	void *res = malloc(0);
99450-	assert(res);
99451-	size_t usable = TEST_MALLOC_SIZE(res);
99452-	assert(usable > 0);
99453-	free(res);
99454-}
99455-TEST_END
99456-
99457-int
99458-main(void) {
99459-	return test(
99460-	    test_zero_alloc);
99461-}
99462diff --git a/jemalloc/test/integration/mallocx.c b/jemalloc/test/integration/mallocx.c
99463deleted file mode 100644
99464index fdf1e3f..0000000
99465--- a/jemalloc/test/integration/mallocx.c
99466+++ /dev/null
99467@@ -1,274 +0,0 @@
99468-#include "test/jemalloc_test.h"
99469-
99470-static unsigned
99471-get_nsizes_impl(const char *cmd) {
99472-	unsigned ret;
99473-	size_t z;
99474-
99475-	z = sizeof(unsigned);
99476-	expect_d_eq(mallctl(cmd, (void *)&ret, &z, NULL, 0), 0,
99477-	    "Unexpected mallctl(\"%s\", ...) failure", cmd);
99478-
99479-	return ret;
99480-}
99481-
99482-static unsigned
99483-get_nlarge(void) {
99484-	return get_nsizes_impl("arenas.nlextents");
99485-}
99486-
99487-static size_t
99488-get_size_impl(const char *cmd, size_t ind) {
99489-	size_t ret;
99490-	size_t z;
99491-	size_t mib[4];
99492-	size_t miblen = 4;
99493-
99494-	z = sizeof(size_t);
99495-	expect_d_eq(mallctlnametomib(cmd, mib, &miblen),
99496-	    0, "Unexpected mallctlnametomib(\"%s\", ...) failure", cmd);
99497-	mib[2] = ind;
99498-	z = sizeof(size_t);
99499-	expect_d_eq(mallctlbymib(mib, miblen, (void *)&ret, &z, NULL, 0),
99500-	    0, "Unexpected mallctlbymib([\"%s\", %zu], ...) failure", cmd, ind);
99501-
99502-	return ret;
99503-}
99504-
99505-static size_t
99506-get_large_size(size_t ind) {
99507-	return get_size_impl("arenas.lextent.0.size", ind);
99508-}
99509-
99510-/*
99511- * On systems which can't merge extents, tests that call this function generate
99512- * a lot of dirty memory very quickly.  Purging between cycles mitigates
99513- * potential OOM on e.g. 32-bit Windows.
99514- */
99515-static void
99516-purge(void) {
99517-	expect_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0,
99518-	    "Unexpected mallctl error");
99519-}
99520-
99521-/*
99522- * GCC "-Walloc-size-larger-than" warning detects when one of the memory
99523- * allocation functions is called with a size larger than the maximum size that
99524- * they support. Here we want to explicitly test that the allocation functions
99525- * do indeed fail properly when this is the case, which triggers the warning.
99526- * Therefore we disable the warning for these tests.
99527- */
99528-JEMALLOC_DIAGNOSTIC_PUSH
99529-JEMALLOC_DIAGNOSTIC_IGNORE_ALLOC_SIZE_LARGER_THAN
99530-
99531-TEST_BEGIN(test_overflow) {
99532-	size_t largemax;
99533-
99534-	largemax = get_large_size(get_nlarge()-1);
99535-
99536-	expect_ptr_null(mallocx(largemax+1, 0),
99537-	    "Expected OOM for mallocx(size=%#zx, 0)", largemax+1);
99538-
99539-	expect_ptr_null(mallocx(ZU(PTRDIFF_MAX)+1, 0),
99540-	    "Expected OOM for mallocx(size=%#zx, 0)", ZU(PTRDIFF_MAX)+1);
99541-
99542-	expect_ptr_null(mallocx(SIZE_T_MAX, 0),
99543-	    "Expected OOM for mallocx(size=%#zx, 0)", SIZE_T_MAX);
99544-
99545-	expect_ptr_null(mallocx(1, MALLOCX_ALIGN(ZU(PTRDIFF_MAX)+1)),
99546-	    "Expected OOM for mallocx(size=1, MALLOCX_ALIGN(%#zx))",
99547-	    ZU(PTRDIFF_MAX)+1);
99548-}
99549-TEST_END
99550-
99551-static void *
99552-remote_alloc(void *arg) {
99553-	unsigned arena;
99554-	size_t sz = sizeof(unsigned);
99555-	expect_d_eq(mallctl("arenas.create", (void *)&arena, &sz, NULL, 0), 0,
99556-	    "Unexpected mallctl() failure");
99557-	size_t large_sz;
99558-	sz = sizeof(size_t);
99559-	expect_d_eq(mallctl("arenas.lextent.0.size", (void *)&large_sz, &sz,
99560-	    NULL, 0), 0, "Unexpected mallctl failure");
99561-
99562-	void *ptr = mallocx(large_sz, MALLOCX_ARENA(arena)
99563-	    | MALLOCX_TCACHE_NONE);
99564-	void **ret = (void **)arg;
99565-	*ret = ptr;
99566-
99567-	return NULL;
99568-}
99569-
99570-TEST_BEGIN(test_remote_free) {
99571-	thd_t thd;
99572-	void *ret;
99573-	thd_create(&thd, remote_alloc, (void *)&ret);
99574-	thd_join(thd, NULL);
99575-	expect_ptr_not_null(ret, "Unexpected mallocx failure");
99576-
99577-	/* Avoid TCACHE_NONE to explicitly test tcache_flush(). */
99578-	dallocx(ret, 0);
99579-	mallctl("thread.tcache.flush", NULL, NULL, NULL, 0);
99580-}
99581-TEST_END
99582-
99583-TEST_BEGIN(test_oom) {
99584-	size_t largemax;
99585-	bool oom;
99586-	void *ptrs[3];
99587-	unsigned i;
99588-
99589-	/*
99590-	 * It should be impossible to allocate three objects that each consume
99591-	 * nearly half the virtual address space.
99592-	 */
99593-	largemax = get_large_size(get_nlarge()-1);
99594-	oom = false;
99595-	for (i = 0; i < sizeof(ptrs) / sizeof(void *); i++) {
99596-		ptrs[i] = mallocx(largemax, MALLOCX_ARENA(0));
99597-		if (ptrs[i] == NULL) {
99598-			oom = true;
99599-		}
99600-	}
99601-	expect_true(oom,
99602-	    "Expected OOM during series of calls to mallocx(size=%zu, 0)",
99603-	    largemax);
99604-	for (i = 0; i < sizeof(ptrs) / sizeof(void *); i++) {
99605-		if (ptrs[i] != NULL) {
99606-			dallocx(ptrs[i], 0);
99607-		}
99608-	}
99609-	purge();
99610-
99611-#if LG_SIZEOF_PTR == 3
99612-	expect_ptr_null(mallocx(0x8000000000000000ULL,
99613-	    MALLOCX_ALIGN(0x8000000000000000ULL)),
99614-	    "Expected OOM for mallocx()");
99615-	expect_ptr_null(mallocx(0x8000000000000000ULL,
99616-	    MALLOCX_ALIGN(0x80000000)),
99617-	    "Expected OOM for mallocx()");
99618-#else
99619-	expect_ptr_null(mallocx(0x80000000UL, MALLOCX_ALIGN(0x80000000UL)),
99620-	    "Expected OOM for mallocx()");
99621-#endif
99622-}
99623-TEST_END
99624-
99625-/* Re-enable the "-Walloc-size-larger-than=" warning */
99626-JEMALLOC_DIAGNOSTIC_POP
99627-
99628-TEST_BEGIN(test_basic) {
99629-#define MAXSZ (((size_t)1) << 23)
99630-	size_t sz;
99631-
99632-	for (sz = 1; sz < MAXSZ; sz = nallocx(sz, 0) + 1) {
99633-		size_t nsz, rsz;
99634-		void *p;
99635-		nsz = nallocx(sz, 0);
99636-		expect_zu_ne(nsz, 0, "Unexpected nallocx() error");
99637-		p = mallocx(sz, 0);
99638-		expect_ptr_not_null(p,
99639-		    "Unexpected mallocx(size=%zx, flags=0) error", sz);
99640-		rsz = sallocx(p, 0);
99641-		expect_zu_ge(rsz, sz, "Real size smaller than expected");
99642-		expect_zu_eq(nsz, rsz, "nallocx()/sallocx() size mismatch");
99643-		dallocx(p, 0);
99644-
99645-		p = mallocx(sz, 0);
99646-		expect_ptr_not_null(p,
99647-		    "Unexpected mallocx(size=%zx, flags=0) error", sz);
99648-		dallocx(p, 0);
99649-
99650-		nsz = nallocx(sz, MALLOCX_ZERO);
99651-		expect_zu_ne(nsz, 0, "Unexpected nallocx() error");
99652-		p = mallocx(sz, MALLOCX_ZERO);
99653-		expect_ptr_not_null(p,
99654-		    "Unexpected mallocx(size=%zx, flags=MALLOCX_ZERO) error",
99655-		    nsz);
99656-		rsz = sallocx(p, 0);
99657-		expect_zu_eq(nsz, rsz, "nallocx()/sallocx() rsize mismatch");
99658-		dallocx(p, 0);
99659-		purge();
99660-	}
99661-#undef MAXSZ
99662-}
99663-TEST_END
99664-
99665-TEST_BEGIN(test_alignment_and_size) {
99666-	const char *percpu_arena;
99667-	size_t sz = sizeof(percpu_arena);
99668-
99669-	if(mallctl("opt.percpu_arena", (void *)&percpu_arena, &sz, NULL, 0) ||
99670-	    strcmp(percpu_arena, "disabled") != 0) {
99671-		test_skip("test_alignment_and_size skipped: "
99672-		    "not working with percpu arena.");
99673-	};
99674-#define MAXALIGN (((size_t)1) << 23)
99675-#define NITER 4
99676-	size_t nsz, rsz, alignment, total;
99677-	unsigned i;
99678-	void *ps[NITER];
99679-
99680-	for (i = 0; i < NITER; i++) {
99681-		ps[i] = NULL;
99682-	}
99683-
99684-	for (alignment = 8;
99685-	    alignment <= MAXALIGN;
99686-	    alignment <<= 1) {
99687-		total = 0;
99688-		for (sz = 1;
99689-		    sz < 3 * alignment && sz < (1U << 31);
99690-		    sz += (alignment >> (LG_SIZEOF_PTR-1)) - 1) {
99691-			for (i = 0; i < NITER; i++) {
99692-				nsz = nallocx(sz, MALLOCX_ALIGN(alignment) |
99693-				    MALLOCX_ZERO | MALLOCX_ARENA(0));
99694-				expect_zu_ne(nsz, 0,
99695-				    "nallocx() error for alignment=%zu, "
99696-				    "size=%zu (%#zx)", alignment, sz, sz);
99697-				ps[i] = mallocx(sz, MALLOCX_ALIGN(alignment) |
99698-				    MALLOCX_ZERO | MALLOCX_ARENA(0));
99699-				expect_ptr_not_null(ps[i],
99700-				    "mallocx() error for alignment=%zu, "
99701-				    "size=%zu (%#zx)", alignment, sz, sz);
99702-				rsz = sallocx(ps[i], 0);
99703-				expect_zu_ge(rsz, sz,
99704-				    "Real size smaller than expected for "
99705-				    "alignment=%zu, size=%zu", alignment, sz);
99706-				expect_zu_eq(nsz, rsz,
99707-				    "nallocx()/sallocx() size mismatch for "
99708-				    "alignment=%zu, size=%zu", alignment, sz);
99709-				expect_ptr_null(
99710-				    (void *)((uintptr_t)ps[i] & (alignment-1)),
99711-				    "%p inadequately aligned for"
99712-				    " alignment=%zu, size=%zu", ps[i],
99713-				    alignment, sz);
99714-				total += rsz;
99715-				if (total >= (MAXALIGN << 1)) {
99716-					break;
99717-				}
99718-			}
99719-			for (i = 0; i < NITER; i++) {
99720-				if (ps[i] != NULL) {
99721-					dallocx(ps[i], 0);
99722-					ps[i] = NULL;
99723-				}
99724-			}
99725-		}
99726-		purge();
99727-	}
99728-#undef MAXALIGN
99729-#undef NITER
99730-}
99731-TEST_END
99732-
99733-int
99734-main(void) {
99735-	return test(
99736-	    test_overflow,
99737-	    test_oom,
99738-	    test_remote_free,
99739-	    test_basic,
99740-	    test_alignment_and_size);
99741-}
99742diff --git a/jemalloc/test/integration/mallocx.sh b/jemalloc/test/integration/mallocx.sh
99743deleted file mode 100644
99744index 0cc2187..0000000
99745--- a/jemalloc/test/integration/mallocx.sh
99746+++ /dev/null
99747@@ -1,5 +0,0 @@
99748-#!/bin/sh
99749-
99750-if [ "x${enable_fill}" = "x1" ] ; then
99751-  export MALLOC_CONF="junk:false"
99752-fi
99753diff --git a/jemalloc/test/integration/overflow.c b/jemalloc/test/integration/overflow.c
99754deleted file mode 100644
99755index ce63327..0000000
99756--- a/jemalloc/test/integration/overflow.c
99757+++ /dev/null
99758@@ -1,59 +0,0 @@
99759-#include "test/jemalloc_test.h"
99760-
99761-/*
99762- * GCC "-Walloc-size-larger-than" warning detects when one of the memory
99763- * allocation functions is called with a size larger than the maximum size that
99764- * they support. Here we want to explicitly test that the allocation functions
99765- * do indeed fail properly when this is the case, which triggers the warning.
99766- * Therefore we disable the warning for these tests.
99767- */
99768-JEMALLOC_DIAGNOSTIC_PUSH
99769-JEMALLOC_DIAGNOSTIC_IGNORE_ALLOC_SIZE_LARGER_THAN
99770-
99771-TEST_BEGIN(test_overflow) {
99772-	unsigned nlextents;
99773-	size_t mib[4];
99774-	size_t sz, miblen, max_size_class;
99775-	void *p;
99776-
99777-	sz = sizeof(unsigned);
99778-	expect_d_eq(mallctl("arenas.nlextents", (void *)&nlextents, &sz, NULL,
99779-	    0), 0, "Unexpected mallctl() error");
99780-
99781-	miblen = sizeof(mib) / sizeof(size_t);
99782-	expect_d_eq(mallctlnametomib("arenas.lextent.0.size", mib, &miblen), 0,
99783-	    "Unexpected mallctlnametomib() error");
99784-	mib[2] = nlextents - 1;
99785-
99786-	sz = sizeof(size_t);
99787-	expect_d_eq(mallctlbymib(mib, miblen, (void *)&max_size_class, &sz,
99788-	    NULL, 0), 0, "Unexpected mallctlbymib() error");
99789-
99790-	expect_ptr_null(malloc(max_size_class + 1),
99791-	    "Expected OOM due to over-sized allocation request");
99792-	expect_ptr_null(malloc(SIZE_T_MAX),
99793-	    "Expected OOM due to over-sized allocation request");
99794-
99795-	expect_ptr_null(calloc(1, max_size_class + 1),
99796-	    "Expected OOM due to over-sized allocation request");
99797-	expect_ptr_null(calloc(1, SIZE_T_MAX),
99798-	    "Expected OOM due to over-sized allocation request");
99799-
99800-	p = malloc(1);
99801-	expect_ptr_not_null(p, "Unexpected malloc() OOM");
99802-	expect_ptr_null(realloc(p, max_size_class + 1),
99803-	    "Expected OOM due to over-sized allocation request");
99804-	expect_ptr_null(realloc(p, SIZE_T_MAX),
99805-	    "Expected OOM due to over-sized allocation request");
99806-	free(p);
99807-}
99808-TEST_END
99809-
99810-/* Re-enable the "-Walloc-size-larger-than=" warning */
99811-JEMALLOC_DIAGNOSTIC_POP
99812-
99813-int
99814-main(void) {
99815-	return test(
99816-	    test_overflow);
99817-}
99818diff --git a/jemalloc/test/integration/posix_memalign.c b/jemalloc/test/integration/posix_memalign.c
99819deleted file mode 100644
99820index 2da0549..0000000
99821--- a/jemalloc/test/integration/posix_memalign.c
99822+++ /dev/null
99823@@ -1,128 +0,0 @@
99824-#include "test/jemalloc_test.h"
99825-
99826-#define MAXALIGN (((size_t)1) << 23)
99827-
99828-/*
99829- * On systems which can't merge extents, tests that call this function generate
99830- * a lot of dirty memory very quickly.  Purging between cycles mitigates
99831- * potential OOM on e.g. 32-bit Windows.
99832- */
99833-static void
99834-purge(void) {
99835-	expect_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0,
99836-	    "Unexpected mallctl error");
99837-}
99838-
99839-TEST_BEGIN(test_alignment_errors) {
99840-	size_t alignment;
99841-	void *p;
99842-
99843-	for (alignment = 0; alignment < sizeof(void *); alignment++) {
99844-		expect_d_eq(posix_memalign(&p, alignment, 1), EINVAL,
99845-		    "Expected error for invalid alignment %zu",
99846-		    alignment);
99847-	}
99848-
99849-	for (alignment = sizeof(size_t); alignment < MAXALIGN;
99850-	    alignment <<= 1) {
99851-		expect_d_ne(posix_memalign(&p, alignment + 1, 1), 0,
99852-		    "Expected error for invalid alignment %zu",
99853-		    alignment + 1);
99854-	}
99855-}
99856-TEST_END
99857-
99858-TEST_BEGIN(test_oom_errors) {
99859-	size_t alignment, size;
99860-	void *p;
99861-
99862-#if LG_SIZEOF_PTR == 3
99863-	alignment = UINT64_C(0x8000000000000000);
99864-	size      = UINT64_C(0x8000000000000000);
99865-#else
99866-	alignment = 0x80000000LU;
99867-	size      = 0x80000000LU;
99868-#endif
99869-	expect_d_ne(posix_memalign(&p, alignment, size), 0,
99870-	    "Expected error for posix_memalign(&p, %zu, %zu)",
99871-	    alignment, size);
99872-
99873-#if LG_SIZEOF_PTR == 3
99874-	alignment = UINT64_C(0x4000000000000000);
99875-	size      = UINT64_C(0xc000000000000001);
99876-#else
99877-	alignment = 0x40000000LU;
99878-	size      = 0xc0000001LU;
99879-#endif
99880-	expect_d_ne(posix_memalign(&p, alignment, size), 0,
99881-	    "Expected error for posix_memalign(&p, %zu, %zu)",
99882-	    alignment, size);
99883-
99884-	alignment = 0x10LU;
99885-#if LG_SIZEOF_PTR == 3
99886-	size = UINT64_C(0xfffffffffffffff0);
99887-#else
99888-	size = 0xfffffff0LU;
99889-#endif
99890-	expect_d_ne(posix_memalign(&p, alignment, size), 0,
99891-	    "Expected error for posix_memalign(&p, %zu, %zu)",
99892-	    alignment, size);
99893-}
99894-TEST_END
99895-
99896-TEST_BEGIN(test_alignment_and_size) {
99897-#define NITER 4
99898-	size_t alignment, size, total;
99899-	unsigned i;
99900-	int err;
99901-	void *ps[NITER];
99902-
99903-	for (i = 0; i < NITER; i++) {
99904-		ps[i] = NULL;
99905-	}
99906-
99907-	for (alignment = 8;
99908-	    alignment <= MAXALIGN;
99909-	    alignment <<= 1) {
99910-		total = 0;
99911-		for (size = 0;
99912-		    size < 3 * alignment && size < (1U << 31);
99913-		    size += ((size == 0) ? 1 :
99914-		    (alignment >> (LG_SIZEOF_PTR-1)) - 1)) {
99915-			for (i = 0; i < NITER; i++) {
99916-				err = posix_memalign(&ps[i],
99917-				    alignment, size);
99918-				if (err) {
99919-					char buf[BUFERROR_BUF];
99920-
99921-					buferror(get_errno(), buf, sizeof(buf));
99922-					test_fail(
99923-					    "Error for alignment=%zu, "
99924-					    "size=%zu (%#zx): %s",
99925-					    alignment, size, size, buf);
99926-				}
99927-				total += TEST_MALLOC_SIZE(ps[i]);
99928-				if (total >= (MAXALIGN << 1)) {
99929-					break;
99930-				}
99931-			}
99932-			for (i = 0; i < NITER; i++) {
99933-				if (ps[i] != NULL) {
99934-					free(ps[i]);
99935-					ps[i] = NULL;
99936-				}
99937-			}
99938-		}
99939-		purge();
99940-	}
99941-#undef NITER
99942-}
99943-TEST_END
99944-
99945-int
99946-main(void) {
99947-	return test(
99948-	    test_alignment_errors,
99949-	    test_oom_errors,
99950-	    test_alignment_and_size);
99951-}
99952diff --git a/jemalloc/test/integration/rallocx.c b/jemalloc/test/integration/rallocx.c
99953deleted file mode 100644
99954index 68b8f38..0000000
99955--- a/jemalloc/test/integration/rallocx.c
99956+++ /dev/null
99957@@ -1,308 +0,0 @@
99958-#include "test/jemalloc_test.h"
99959-
99960-static unsigned
99961-get_nsizes_impl(const char *cmd) {
99962-	unsigned ret;
99963-	size_t z;
99964-
99965-	z = sizeof(unsigned);
99966-	expect_d_eq(mallctl(cmd, (void *)&ret, &z, NULL, 0), 0,
99967-	    "Unexpected mallctl(\"%s\", ...) failure", cmd);
99968-
99969-	return ret;
99970-}
99971-
99972-static unsigned
99973-get_nlarge(void) {
99974-	return get_nsizes_impl("arenas.nlextents");
99975-}
99976-
99977-static size_t
99978-get_size_impl(const char *cmd, size_t ind) {
99979-	size_t ret;
99980-	size_t z;
99981-	size_t mib[4];
99982-	size_t miblen = 4;
99983-
99984-	z = sizeof(size_t);
99985-	expect_d_eq(mallctlnametomib(cmd, mib, &miblen),
99986-	    0, "Unexpected mallctlnametomib(\"%s\", ...) failure", cmd);
99987-	mib[2] = ind;
99988-	z = sizeof(size_t);
99989-	expect_d_eq(mallctlbymib(mib, miblen, (void *)&ret, &z, NULL, 0),
99990-	    0, "Unexpected mallctlbymib([\"%s\", %zu], ...) failure", cmd, ind);
99991-
99992-	return ret;
99993-}
99994-
99995-static size_t
99996-get_large_size(size_t ind) {
99997-	return get_size_impl("arenas.lextent.0.size", ind);
99998-}
99999-
100000-TEST_BEGIN(test_grow_and_shrink) {
100001-	/*
100002-	 * Use volatile to workaround buffer overflow false positives
100003-	 * (-D_FORTIFY_SOURCE=3).
100004-	 */
100005-	void *volatile p, *volatile q;
100006-	size_t tsz;
100007-#define NCYCLES 3
100008-	unsigned i, j;
100009-#define NSZS 1024
100010-	size_t szs[NSZS];
100011-#define MAXSZ ZU(12 * 1024 * 1024)
100012-
100013-	p = mallocx(1, 0);
100014-	expect_ptr_not_null(p, "Unexpected mallocx() error");
100015-	szs[0] = sallocx(p, 0);
100016-
100017-	for (i = 0; i < NCYCLES; i++) {
100018-		for (j = 1; j < NSZS && szs[j-1] < MAXSZ; j++) {
100019-			q = rallocx(p, szs[j-1]+1, 0);
100020-			expect_ptr_not_null(q,
100021-			    "Unexpected rallocx() error for size=%zu-->%zu",
100022-			    szs[j-1], szs[j-1]+1);
100023-			szs[j] = sallocx(q, 0);
100024-			expect_zu_ne(szs[j], szs[j-1]+1,
100025-			    "Expected size to be at least: %zu", szs[j-1]+1);
100026-			p = q;
100027-		}
100028-
100029-		for (j--; j > 0; j--) {
100030-			q = rallocx(p, szs[j-1], 0);
100031-			expect_ptr_not_null(q,
100032-			    "Unexpected rallocx() error for size=%zu-->%zu",
100033-			    szs[j], szs[j-1]);
100034-			tsz = sallocx(q, 0);
100035-			expect_zu_eq(tsz, szs[j-1],
100036-			    "Expected size=%zu, got size=%zu", szs[j-1], tsz);
100037-			p = q;
100038-		}
100039-	}
100040-
100041-	dallocx(p, 0);
100042-#undef MAXSZ
100043-#undef NSZS
100044-#undef NCYCLES
100045-}
100046-TEST_END
100047-
100048-static bool
100049-validate_fill(void *p, uint8_t c, size_t offset, size_t len) {
100050-	bool ret = false;
100051-	/*
100052-	 * Use volatile to workaround buffer overflow false positives
100053-	 * (-D_FORTIFY_SOURCE=3).
100054-	 */
100055-	uint8_t *volatile buf = (uint8_t *)p;
100056-	size_t i;
100057-
100058-	for (i = 0; i < len; i++) {
100059-		uint8_t b = buf[offset+i];
100060-		if (b != c) {
100061-			test_fail("Allocation at %p (len=%zu) contains %#x "
100062-			    "rather than %#x at offset %zu", p, len, b, c,
100063-			    offset+i);
100064-			ret = true;
100065-		}
100066-	}
100067-
100068-	return ret;
100069-}
100070-
100071-TEST_BEGIN(test_zero) {
100072-	/*
100073-	 * Use volatile to workaround buffer overflow false positives
100074-	 * (-D_FORTIFY_SOURCE=3).
100075-	 */
100076-	void *volatile p, *volatile q;
100077-	size_t psz, qsz, i, j;
100078-	size_t start_sizes[] = {1, 3*1024, 63*1024, 4095*1024};
100079-#define FILL_BYTE 0xaaU
100080-#define RANGE 2048
100081-
100082-	for (i = 0; i < sizeof(start_sizes)/sizeof(size_t); i++) {
100083-		size_t start_size = start_sizes[i];
100084-		p = mallocx(start_size, MALLOCX_ZERO);
100085-		expect_ptr_not_null(p, "Unexpected mallocx() error");
100086-		psz = sallocx(p, 0);
100087-
100088-		expect_false(validate_fill(p, 0, 0, psz),
100089-		    "Expected zeroed memory");
100090-		memset(p, FILL_BYTE, psz);
100091-		expect_false(validate_fill(p, FILL_BYTE, 0, psz),
100092-		    "Expected filled memory");
100093-
100094-		for (j = 1; j < RANGE; j++) {
100095-			q = rallocx(p, start_size+j, MALLOCX_ZERO);
100096-			expect_ptr_not_null(q, "Unexpected rallocx() error");
100097-			qsz = sallocx(q, 0);
100098-			if (q != p || qsz != psz) {
100099-				expect_false(validate_fill(q, FILL_BYTE, 0,
100100-				    psz), "Expected filled memory");
100101-				expect_false(validate_fill(q, 0, psz, qsz-psz),
100102-				    "Expected zeroed memory");
100103-			}
100104-			if (psz != qsz) {
100105-				memset((void *)((uintptr_t)q+psz), FILL_BYTE,
100106-				    qsz-psz);
100107-				psz = qsz;
100108-			}
100109-			p = q;
100110-		}
100111-		expect_false(validate_fill(p, FILL_BYTE, 0, psz),
100112-		    "Expected filled memory");
100113-		dallocx(p, 0);
100114-	}
100115-#undef FILL_BYTE
100116-}
100117-TEST_END
100118-
100119-TEST_BEGIN(test_align) {
100120-	void *p, *q;
100121-	size_t align;
100122-#define MAX_ALIGN (ZU(1) << 25)
100123-
100124-	align = ZU(1);
100125-	p = mallocx(1, MALLOCX_ALIGN(align));
100126-	expect_ptr_not_null(p, "Unexpected mallocx() error");
100127-
100128-	for (align <<= 1; align <= MAX_ALIGN; align <<= 1) {
100129-		q = rallocx(p, 1, MALLOCX_ALIGN(align));
100130-		expect_ptr_not_null(q,
100131-		    "Unexpected rallocx() error for align=%zu", align);
100132-		expect_ptr_null(
100133-		    (void *)((uintptr_t)q & (align-1)),
100134-		    "%p inadequately aligned for align=%zu",
100135-		    q, align);
100136-		p = q;
100137-	}
100138-	dallocx(p, 0);
100139-#undef MAX_ALIGN
100140-}
100141-TEST_END
100142-
100143-TEST_BEGIN(test_align_enum) {
100144-/* Span both small sizes and large sizes. */
100145-#define LG_MIN 12
100146-#define LG_MAX 15
100147-	for (size_t lg_align = LG_MIN; lg_align <= LG_MAX; ++lg_align) {
100148-		for (size_t lg_size = LG_MIN; lg_size <= LG_MAX; ++lg_size) {
100149-			size_t size = 1 << lg_size;
100150-			for (size_t lg_align_next = LG_MIN;
100151-			    lg_align_next <= LG_MAX; ++lg_align_next) {
100152-				int flags = MALLOCX_LG_ALIGN(lg_align);
100153-				void *p = mallocx(1, flags);
100154-				assert_ptr_not_null(p,
100155-				    "Unexpected mallocx() error");
100156-				assert_zu_eq(nallocx(1, flags),
100157-				    TEST_MALLOC_SIZE(p),
100158-				    "Wrong mallocx() usable size");
100159-				int flags_next =
100160-				    MALLOCX_LG_ALIGN(lg_align_next);
100161-				p = rallocx(p, size, flags_next);
100162-				assert_ptr_not_null(p,
100163-				    "Unexpected rallocx() error");
100164-				expect_zu_eq(nallocx(size, flags_next),
100165-				    TEST_MALLOC_SIZE(p),
100166-				    "Wrong rallocx() usable size");
100167-				free(p);
100168-			}
100169-		}
100170-	}
100171-#undef LG_MAX
100172-#undef LG_MIN
100173-}
100174-TEST_END
100175-
100176-TEST_BEGIN(test_lg_align_and_zero) {
100177-	/*
100178-	 * Use volatile to workaround buffer overflow false positives
100179-	 * (-D_FORTIFY_SOURCE=3).
100180-	 */
100181-	void *volatile p, *volatile q;
100182-	unsigned lg_align;
100183-	size_t sz;
100184-#define MAX_LG_ALIGN 25
100185-#define MAX_VALIDATE (ZU(1) << 22)
100186-
100187-	lg_align = 0;
100188-	p = mallocx(1, MALLOCX_LG_ALIGN(lg_align)|MALLOCX_ZERO);
100189-	expect_ptr_not_null(p, "Unexpected mallocx() error");
100190-
100191-	for (lg_align++; lg_align <= MAX_LG_ALIGN; lg_align++) {
100192-		q = rallocx(p, 1, MALLOCX_LG_ALIGN(lg_align)|MALLOCX_ZERO);
100193-		expect_ptr_not_null(q,
100194-		    "Unexpected rallocx() error for lg_align=%u", lg_align);
100195-		expect_ptr_null(
100196-		    (void *)((uintptr_t)q & ((ZU(1) << lg_align)-1)),
100197-		    "%p inadequately aligned for lg_align=%u", q, lg_align);
100198-		sz = sallocx(q, 0);
100199-		if ((sz << 1) <= MAX_VALIDATE) {
100200-			expect_false(validate_fill(q, 0, 0, sz),
100201-			    "Expected zeroed memory");
100202-		} else {
100203-			expect_false(validate_fill(q, 0, 0, MAX_VALIDATE),
100204-			    "Expected zeroed memory");
100205-			expect_false(validate_fill(
100206-			    (void *)((uintptr_t)q+sz-MAX_VALIDATE),
100207-			    0, 0, MAX_VALIDATE), "Expected zeroed memory");
100208-		}
100209-		p = q;
100210-	}
100211-	dallocx(p, 0);
100212-#undef MAX_VALIDATE
100213-#undef MAX_LG_ALIGN
100214-}
100215-TEST_END
100216-
100217-/*
100218- * GCC "-Walloc-size-larger-than" warning detects when one of the memory
100219- * allocation functions is called with a size larger than the maximum size that
100220- * they support. Here we want to explicitly test that the allocation functions
100221- * do indeed fail properly when this is the case, which triggers the warning.
100222- * Therefore we disable the warning for these tests.
100223- */
100224-JEMALLOC_DIAGNOSTIC_PUSH
100225-JEMALLOC_DIAGNOSTIC_IGNORE_ALLOC_SIZE_LARGER_THAN
100226-
100227-TEST_BEGIN(test_overflow) {
100228-	size_t largemax;
100229-	void *p;
100230-
100231-	largemax = get_large_size(get_nlarge()-1);
100232-
100233-	p = mallocx(1, 0);
100234-	expect_ptr_not_null(p, "Unexpected mallocx() failure");
100235-
100236-	expect_ptr_null(rallocx(p, largemax+1, 0),
100237-	    "Expected OOM for rallocx(p, size=%#zx, 0)", largemax+1);
100238-
100239-	expect_ptr_null(rallocx(p, ZU(PTRDIFF_MAX)+1, 0),
100240-	    "Expected OOM for rallocx(p, size=%#zx, 0)", ZU(PTRDIFF_MAX)+1);
100241-
100242-	expect_ptr_null(rallocx(p, SIZE_T_MAX, 0),
100243-	    "Expected OOM for rallocx(p, size=%#zx, 0)", SIZE_T_MAX);
100244-
100245-	expect_ptr_null(rallocx(p, 1, MALLOCX_ALIGN(ZU(PTRDIFF_MAX)+1)),
100246-	    "Expected OOM for rallocx(p, size=1, MALLOCX_ALIGN(%#zx))",
100247-	    ZU(PTRDIFF_MAX)+1);
100248-
100249-	dallocx(p, 0);
100250-}
100251-TEST_END
100252-
100253-/* Re-enable the "-Walloc-size-larger-than=" warning */
100254-JEMALLOC_DIAGNOSTIC_POP
100255-
100256-int
100257-main(void) {
100258-	return test(
100259-	    test_grow_and_shrink,
100260-	    test_zero,
100261-	    test_align,
100262-	    test_align_enum,
100263-	    test_lg_align_and_zero,
100264-	    test_overflow);
100265-}
100266diff --git a/jemalloc/test/integration/sdallocx.c b/jemalloc/test/integration/sdallocx.c
100267deleted file mode 100644
100268index ca01448..0000000
100269--- a/jemalloc/test/integration/sdallocx.c
100270+++ /dev/null
100271@@ -1,55 +0,0 @@
100272-#include "test/jemalloc_test.h"
100273-
100274-#define MAXALIGN (((size_t)1) << 22)
100275-#define NITER 3
100276-
100277-TEST_BEGIN(test_basic) {
100278-	void *ptr = mallocx(64, 0);
100279-	sdallocx(ptr, 64, 0);
100280-}
100281-TEST_END
100282-
100283-TEST_BEGIN(test_alignment_and_size) {
100284-	size_t nsz, sz, alignment, total;
100285-	unsigned i;
100286-	void *ps[NITER];
100287-
100288-	for (i = 0; i < NITER; i++) {
100289-		ps[i] = NULL;
100290-	}
100291-
100292-	for (alignment = 8;
100293-	    alignment <= MAXALIGN;
100294-	    alignment <<= 1) {
100295-		total = 0;
100296-		for (sz = 1;
100297-		    sz < 3 * alignment && sz < (1U << 31);
100298-		    sz += (alignment >> (LG_SIZEOF_PTR-1)) - 1) {
100299-			for (i = 0; i < NITER; i++) {
100300-				nsz = nallocx(sz, MALLOCX_ALIGN(alignment) |
100301-				    MALLOCX_ZERO);
100302-				ps[i] = mallocx(sz, MALLOCX_ALIGN(alignment) |
100303-				    MALLOCX_ZERO);
100304-				total += nsz;
100305-				if (total >= (MAXALIGN << 1)) {
100306-					break;
100307-				}
100308-			}
100309-			for (i = 0; i < NITER; i++) {
100310-				if (ps[i] != NULL) {
100311-					sdallocx(ps[i], sz,
100312-					    MALLOCX_ALIGN(alignment));
100313-					ps[i] = NULL;
100314-				}
100315-			}
100316-		}
100317-	}
100318-}
100319-TEST_END
100320-
100321-int
100322-main(void) {
100323-	return test_no_reentrancy(
100324-	    test_basic,
100325-	    test_alignment_and_size);
100326-}
100327diff --git a/jemalloc/test/integration/slab_sizes.c b/jemalloc/test/integration/slab_sizes.c
100328deleted file mode 100644
100329index f6a66f2..0000000
100330--- a/jemalloc/test/integration/slab_sizes.c
100331+++ /dev/null
100332@@ -1,80 +0,0 @@
100333-#include "test/jemalloc_test.h"
100334-
100335-/* Note that this test relies on the unusual slab sizes set in slab_sizes.sh. */
100336-
100337-TEST_BEGIN(test_slab_sizes) {
100338-	unsigned nbins;
100339-	size_t page;
100340-	size_t sizemib[4];
100341-	size_t slabmib[4];
100342-	size_t len;
100343-
100344-	len = sizeof(nbins);
100345-	expect_d_eq(mallctl("arenas.nbins", &nbins, &len, NULL, 0), 0,
100346-	    "nbins mallctl failure");
100347-
100348-	len = sizeof(page);
100349-	expect_d_eq(mallctl("arenas.page", &page, &len, NULL, 0), 0,
100350-	    "page mallctl failure");
100351-
100352-	len = 4;
100353-	expect_d_eq(mallctlnametomib("arenas.bin.0.size", sizemib, &len), 0,
100354-	    "bin size mallctlnametomib failure");
100355-
100356-	len = 4;
100357-	expect_d_eq(mallctlnametomib("arenas.bin.0.slab_size", slabmib, &len),
100358-	    0, "slab size mallctlnametomib failure");
100359-
100360-	size_t biggest_slab_seen = 0;
100361-
100362-	for (unsigned i = 0; i < nbins; i++) {
100363-		size_t bin_size;
100364-		size_t slab_size;
100365-		len = sizeof(size_t);
100366-		sizemib[2] = i;
100367-		slabmib[2] = i;
100368-		expect_d_eq(mallctlbymib(sizemib, 4, (void *)&bin_size, &len,
100369-		    NULL, 0), 0, "bin size mallctlbymib failure");
100370-
100371-		len = sizeof(size_t);
100372-		expect_d_eq(mallctlbymib(slabmib, 4, (void *)&slab_size, &len,
100373-		    NULL, 0), 0, "slab size mallctlbymib failure");
100374-
100375-		if (bin_size < 100) {
100376-			/*
100377-			 * Then we should be as close to 17 as possible.  Since
100378-			 * not all page sizes are valid (because of bitmap
100379-			 * limitations on the number of items in a slab), we
100380-			 * should at least make sure that the number of pages
100381-			 * goes up.
100382-			 */
100383-			expect_zu_ge(slab_size, biggest_slab_seen,
100384-			    "Slab sizes should go up");
100385-			biggest_slab_seen = slab_size;
100386-		} else if (
100387-		    (100 <= bin_size && bin_size < 128)
100388-		    || (128 < bin_size && bin_size <= 200)) {
100389-			expect_zu_eq(slab_size, page,
100390-			    "Forced-small slabs should be small");
100391-		} else if (bin_size == 128) {
100392-			expect_zu_eq(slab_size, 2 * page,
100393-			    "Forced-2-page slab should be 2 pages");
100394-		} else if (200 < bin_size && bin_size <= 4096) {
100395-			expect_zu_ge(slab_size, biggest_slab_seen,
100396-			    "Slab sizes should go up");
100397-			biggest_slab_seen = slab_size;
100398-		}
100399-	}
100400-	/*
100401-	 * For any reasonable configuration, 17 pages should be a valid slab
100402-	 * size for 4096-byte items.
100403-	 */
100404-	expect_zu_eq(biggest_slab_seen, 17 * page, "Didn't hit page target");
100405-}
100406-TEST_END
100407-
100408-int
100409-main(void) {
100410-	return test(
100411-	    test_slab_sizes);
100412-}
100413diff --git a/jemalloc/test/integration/slab_sizes.sh b/jemalloc/test/integration/slab_sizes.sh
100414deleted file mode 100644
100415index 07e3db8..0000000
100416--- a/jemalloc/test/integration/slab_sizes.sh
100417+++ /dev/null
100418@@ -1,4 +0,0 @@
100419-#!/bin/sh
100420-
100421-# Some screwy-looking slab sizes.
100422-export MALLOC_CONF="slab_sizes:1-4096:17|100-200:1|128-128:2"
100423diff --git a/jemalloc/test/integration/smallocx.c b/jemalloc/test/integration/smallocx.c
100424deleted file mode 100644
100425index 389319b..0000000
100426--- a/jemalloc/test/integration/smallocx.c
100427+++ /dev/null
100428@@ -1,312 +0,0 @@
100429-#include "test/jemalloc_test.h"
100430-#include "jemalloc/jemalloc_macros.h"
100431-
100432-#define STR_HELPER(x) #x
100433-#define STR(x) STR_HELPER(x)
100434-
100435-#ifndef JEMALLOC_VERSION_GID_IDENT
100436-  #error "JEMALLOC_VERSION_GID_IDENT not defined"
100437-#endif
100438-
100439-#define JOIN(x, y) x ## y
100440-#define JOIN2(x, y) JOIN(x, y)
100441-#define smallocx JOIN2(smallocx_, JEMALLOC_VERSION_GID_IDENT)
100442-
100443-typedef struct {
100444-	void *ptr;
100445-	size_t size;
100446-} smallocx_return_t;
100447-
100448-extern smallocx_return_t
100449-smallocx(size_t size, int flags);
100450-
100451-static unsigned
100452-get_nsizes_impl(const char *cmd) {
100453-	unsigned ret;
100454-	size_t z;
100455-
100456-	z = sizeof(unsigned);
100457-	expect_d_eq(mallctl(cmd, (void *)&ret, &z, NULL, 0), 0,
100458-	    "Unexpected mallctl(\"%s\", ...) failure", cmd);
100459-
100460-	return ret;
100461-}
100462-
100463-static unsigned
100464-get_nlarge(void) {
100465-	return get_nsizes_impl("arenas.nlextents");
100466-}
100467-
100468-static size_t
100469-get_size_impl(const char *cmd, size_t ind) {
100470-	size_t ret;
100471-	size_t z;
100472-	size_t mib[4];
100473-	size_t miblen = 4;
100474-
100475-	z = sizeof(size_t);
100476-	expect_d_eq(mallctlnametomib(cmd, mib, &miblen),
100477-	    0, "Unexpected mallctlnametomib(\"%s\", ...) failure", cmd);
100478-	mib[2] = ind;
100479-	z = sizeof(size_t);
100480-	expect_d_eq(mallctlbymib(mib, miblen, (void *)&ret, &z, NULL, 0),
100481-	    0, "Unexpected mallctlbymib([\"%s\", %zu], ...) failure", cmd, ind);
100482-
100483-	return ret;
100484-}
100485-
100486-static size_t
100487-get_large_size(size_t ind) {
100488-	return get_size_impl("arenas.lextent.0.size", ind);
100489-}
100490-
100491-/*
100492- * On systems which can't merge extents, tests that call this function generate
100493- * a lot of dirty memory very quickly.  Purging between cycles mitigates
100494- * potential OOM on e.g. 32-bit Windows.
100495- */
100496-static void
100497-purge(void) {
100498-	expect_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0,
100499-	    "Unexpected mallctl error");
100500-}
100501-
100502-/*
100503- * GCC "-Walloc-size-larger-than" warning detects when one of the memory
100504- * allocation functions is called with a size larger than the maximum size that
100505- * they support. Here we want to explicitly test that the allocation functions
100506- * do indeed fail properly when this is the case, which triggers the warning.
100507- * Therefore we disable the warning for these tests.
100508- */
100509-JEMALLOC_DIAGNOSTIC_PUSH
100510-JEMALLOC_DIAGNOSTIC_IGNORE_ALLOC_SIZE_LARGER_THAN
100511-
100512-TEST_BEGIN(test_overflow) {
100513-	size_t largemax;
100514-
100515-	largemax = get_large_size(get_nlarge()-1);
100516-
100517-	expect_ptr_null(smallocx(largemax+1, 0).ptr,
100518-	    "Expected OOM for smallocx(size=%#zx, 0)", largemax+1);
100519-
100520-	expect_ptr_null(smallocx(ZU(PTRDIFF_MAX)+1, 0).ptr,
100521-	    "Expected OOM for smallocx(size=%#zx, 0)", ZU(PTRDIFF_MAX)+1);
100522-
100523-	expect_ptr_null(smallocx(SIZE_T_MAX, 0).ptr,
100524-	    "Expected OOM for smallocx(size=%#zx, 0)", SIZE_T_MAX);
100525-
100526-	expect_ptr_null(smallocx(1, MALLOCX_ALIGN(ZU(PTRDIFF_MAX)+1)).ptr,
100527-	    "Expected OOM for smallocx(size=1, MALLOCX_ALIGN(%#zx))",
100528-	    ZU(PTRDIFF_MAX)+1);
100529-}
100530-TEST_END
100531-
100532-static void *
100533-remote_alloc(void *arg) {
100534-	unsigned arena;
100535-	size_t sz = sizeof(unsigned);
100536-	expect_d_eq(mallctl("arenas.create", (void *)&arena, &sz, NULL, 0), 0,
100537-	    "Unexpected mallctl() failure");
100538-	size_t large_sz;
100539-	sz = sizeof(size_t);
100540-	expect_d_eq(mallctl("arenas.lextent.0.size", (void *)&large_sz, &sz,
100541-	    NULL, 0), 0, "Unexpected mallctl failure");
100542-
100543-	smallocx_return_t r
100544-	    = smallocx(large_sz, MALLOCX_ARENA(arena) | MALLOCX_TCACHE_NONE);
100545-	void *ptr = r.ptr;
100546-	expect_zu_eq(r.size,
100547-	    nallocx(large_sz, MALLOCX_ARENA(arena) | MALLOCX_TCACHE_NONE),
100548-	    "Expected smalloc(size,flags).size == nallocx(size,flags)");
100549-	void **ret = (void **)arg;
100550-	*ret = ptr;
100551-
100552-	return NULL;
100553-}
100554-
100555-TEST_BEGIN(test_remote_free) {
100556-	thd_t thd;
100557-	void *ret;
100558-	thd_create(&thd, remote_alloc, (void *)&ret);
100559-	thd_join(thd, NULL);
100560-	expect_ptr_not_null(ret, "Unexpected smallocx failure");
100561-
100562-	/* Avoid TCACHE_NONE to explicitly test tcache_flush(). */
100563-	dallocx(ret, 0);
100564-	mallctl("thread.tcache.flush", NULL, NULL, NULL, 0);
100565-}
100566-TEST_END
100567-
100568-TEST_BEGIN(test_oom) {
100569-	size_t largemax;
100570-	bool oom;
100571-	void *ptrs[3];
100572-	unsigned i;
100573-
100574-	/*
100575-	 * It should be impossible to allocate three objects that each consume
100576-	 * nearly half the virtual address space.
100577-	 */
100578-	largemax = get_large_size(get_nlarge()-1);
100579-	oom = false;
100580-	for (i = 0; i < sizeof(ptrs) / sizeof(void *); i++) {
100581-		ptrs[i] = smallocx(largemax, 0).ptr;
100582-		if (ptrs[i] == NULL) {
100583-			oom = true;
100584-		}
100585-	}
100586-	expect_true(oom,
100587-	    "Expected OOM during series of calls to smallocx(size=%zu, 0)",
100588-	    largemax);
100589-	for (i = 0; i < sizeof(ptrs) / sizeof(void *); i++) {
100590-		if (ptrs[i] != NULL) {
100591-			dallocx(ptrs[i], 0);
100592-		}
100593-	}
100594-	purge();
100595-
100596-#if LG_SIZEOF_PTR == 3
100597-	expect_ptr_null(smallocx(0x8000000000000000ULL,
100598-	    MALLOCX_ALIGN(0x8000000000000000ULL)).ptr,
100599-	    "Expected OOM for smallocx()");
100600-	expect_ptr_null(smallocx(0x8000000000000000ULL,
100601-	    MALLOCX_ALIGN(0x80000000)).ptr,
100602-	    "Expected OOM for smallocx()");
100603-#else
100604-	expect_ptr_null(smallocx(0x80000000UL, MALLOCX_ALIGN(0x80000000UL)).ptr,
100605-	    "Expected OOM for smallocx()");
100606-#endif
100607-}
100608-TEST_END
100609-
100610-/* Re-enable the "-Walloc-size-larger-than=" warning */
100611-JEMALLOC_DIAGNOSTIC_POP
100612-
100613-TEST_BEGIN(test_basic) {
100614-#define MAXSZ (((size_t)1) << 23)
100615-	size_t sz;
100616-
100617-	for (sz = 1; sz < MAXSZ; sz = nallocx(sz, 0) + 1) {
100618-		smallocx_return_t ret;
100619-		size_t nsz, rsz, smz;
100620-		void *p;
100621-		nsz = nallocx(sz, 0);
100622-		expect_zu_ne(nsz, 0, "Unexpected nallocx() error");
100623-		ret = smallocx(sz, 0);
100624-		p = ret.ptr;
100625-		smz = ret.size;
100626-		expect_ptr_not_null(p,
100627-		    "Unexpected smallocx(size=%zx, flags=0) error", sz);
100628-		rsz = sallocx(p, 0);
100629-		expect_zu_ge(rsz, sz, "Real size smaller than expected");
100630-		expect_zu_eq(nsz, rsz, "nallocx()/sallocx() size mismatch");
100631-		expect_zu_eq(nsz, smz, "nallocx()/smallocx() size mismatch");
100632-		dallocx(p, 0);
100633-
100634-		ret = smallocx(sz, 0);
100635-		p = ret.ptr;
100636-		smz = ret.size;
100637-		expect_ptr_not_null(p,
100638-		    "Unexpected smallocx(size=%zx, flags=0) error", sz);
100639-		dallocx(p, 0);
100640-
100641-		nsz = nallocx(sz, MALLOCX_ZERO);
100642-		expect_zu_ne(nsz, 0, "Unexpected nallocx() error");
100643-		expect_zu_ne(smz, 0, "Unexpected smallocx() error");
100644-		ret = smallocx(sz, MALLOCX_ZERO);
100645-		p = ret.ptr;
100646-		expect_ptr_not_null(p,
100647-		    "Unexpected smallocx(size=%zx, flags=MALLOCX_ZERO) error",
100648-		    nsz);
100649-		rsz = sallocx(p, 0);
100650-		expect_zu_eq(nsz, rsz, "nallocx()/sallocx() rsize mismatch");
100651-		expect_zu_eq(nsz, smz, "nallocx()/smallocx() size mismatch");
100652-		dallocx(p, 0);
100653-		purge();
100654-	}
100655-#undef MAXSZ
100656-}
100657-TEST_END
100658-
100659-TEST_BEGIN(test_alignment_and_size) {
100660-	const char *percpu_arena;
100661-	size_t sz = sizeof(percpu_arena);
100662-
100663-	if(mallctl("opt.percpu_arena", (void *)&percpu_arena, &sz, NULL, 0) ||
100664-	    strcmp(percpu_arena, "disabled") != 0) {
100665-		test_skip("test_alignment_and_size skipped: "
100666-		    "not working with percpu arena.");
100667-	};
100668-#define MAXALIGN (((size_t)1) << 23)
100669-#define NITER 4
100670-	size_t nsz, rsz, smz, alignment, total;
100671-	unsigned i;
100672-	void *ps[NITER];
100673-
100674-	for (i = 0; i < NITER; i++) {
100675-		ps[i] = NULL;
100676-	}
100677-
100678-	for (alignment = 8;
100679-	    alignment <= MAXALIGN;
100680-	    alignment <<= 1) {
100681-		total = 0;
100682-		for (sz = 1;
100683-		    sz < 3 * alignment && sz < (1U << 31);
100684-		    sz += (alignment >> (LG_SIZEOF_PTR-1)) - 1) {
100685-			for (i = 0; i < NITER; i++) {
100686-				nsz = nallocx(sz, MALLOCX_ALIGN(alignment) |
100687-				    MALLOCX_ZERO);
100688-				expect_zu_ne(nsz, 0,
100689-				    "nallocx() error for alignment=%zu, "
100690-				    "size=%zu (%#zx)", alignment, sz, sz);
100691-				smallocx_return_t ret
100692-				    = smallocx(sz, MALLOCX_ALIGN(alignment) | MALLOCX_ZERO);
100693-				ps[i] = ret.ptr;
100694-				expect_ptr_not_null(ps[i],
100695-				    "smallocx() error for alignment=%zu, "
100696-				    "size=%zu (%#zx)", alignment, sz, sz);
100697-				rsz = sallocx(ps[i], 0);
100698-				smz = ret.size;
100699-				expect_zu_ge(rsz, sz,
100700-				    "Real size smaller than expected for "
100701-				    "alignment=%zu, size=%zu", alignment, sz);
100702-				expect_zu_eq(nsz, rsz,
100703-				    "nallocx()/sallocx() size mismatch for "
100704-				    "alignment=%zu, size=%zu", alignment, sz);
100705-				expect_zu_eq(nsz, smz,
100706-				    "nallocx()/smallocx() size mismatch for "
100707-				    "alignment=%zu, size=%zu", alignment, sz);
100708-				expect_ptr_null(
100709-				    (void *)((uintptr_t)ps[i] & (alignment-1)),
100710-				    "%p inadequately aligned for"
100711-				    " alignment=%zu, size=%zu", ps[i],
100712-				    alignment, sz);
100713-				total += rsz;
100714-				if (total >= (MAXALIGN << 1)) {
100715-					break;
100716-				}
100717-			}
100718-			for (i = 0; i < NITER; i++) {
100719-				if (ps[i] != NULL) {
100720-					dallocx(ps[i], 0);
100721-					ps[i] = NULL;
100722-				}
100723-			}
100724-		}
100725-		purge();
100726-	}
100727-#undef MAXALIGN
100728-#undef NITER
100729-}
100730-TEST_END
100731-
100732-int
100733-main(void) {
100734-	return test(
100735-	    test_overflow,
100736-	    test_oom,
100737-	    test_remote_free,
100738-	    test_basic,
100739-	    test_alignment_and_size);
100740-}
100741diff --git a/jemalloc/test/integration/smallocx.sh b/jemalloc/test/integration/smallocx.sh
100742deleted file mode 100644
100743index d07f10f..0000000
100744--- a/jemalloc/test/integration/smallocx.sh
100745+++ /dev/null
100746@@ -1,5 +0,0 @@
100747-#!/bin/sh
100748-
100749-if [ "x${enable_fill}" = "x1" ] ; then
100750-    export MALLOC_CONF="junk:false"
100751-fi
100752diff --git a/jemalloc/test/integration/thread_arena.c b/jemalloc/test/integration/thread_arena.c
100753deleted file mode 100644
100754index 4a6abf6..0000000
100755--- a/jemalloc/test/integration/thread_arena.c
100756+++ /dev/null
100757@@ -1,86 +0,0 @@
100758-#include "test/jemalloc_test.h"
100759-
100760-#define NTHREADS 10
100761-
100762-void *
100763-thd_start(void *arg) {
100764-	unsigned main_arena_ind = *(unsigned *)arg;
100765-	void *p;
100766-	unsigned arena_ind;
100767-	size_t size;
100768-	int err;
100769-
100770-	p = malloc(1);
100771-	expect_ptr_not_null(p, "Error in malloc()");
100772-	free(p);
100773-
100774-	size = sizeof(arena_ind);
100775-	if ((err = mallctl("thread.arena", (void *)&arena_ind, &size,
100776-	    (void *)&main_arena_ind, sizeof(main_arena_ind)))) {
100777-		char buf[BUFERROR_BUF];
100778-
100779-		buferror(err, buf, sizeof(buf));
100780-		test_fail("Error in mallctl(): %s", buf);
100781-	}
100782-
100783-	size = sizeof(arena_ind);
100784-	if ((err = mallctl("thread.arena", (void *)&arena_ind, &size, NULL,
100785-	    0))) {
100786-		char buf[BUFERROR_BUF];
100787-
100788-		buferror(err, buf, sizeof(buf));
100789-		test_fail("Error in mallctl(): %s", buf);
100790-	}
100791-	expect_u_eq(arena_ind, main_arena_ind,
100792-	    "Arena index should be same as for main thread");
100793-
100794-	return NULL;
100795-}
100796-
100797-static void
100798-mallctl_failure(int err) {
100799-	char buf[BUFERROR_BUF];
100800-
100801-	buferror(err, buf, sizeof(buf));
100802-	test_fail("Error in mallctl(): %s", buf);
100803-}
100804-
100805-TEST_BEGIN(test_thread_arena) {
100806-	void *p;
100807-	int err;
100808-	thd_t thds[NTHREADS];
100809-	unsigned i;
100810-
100811-	p = malloc(1);
100812-	expect_ptr_not_null(p, "Error in malloc()");
100813-
100814-	unsigned arena_ind, old_arena_ind;
100815-	size_t sz = sizeof(unsigned);
100816-	expect_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz, NULL, 0),
100817-	    0, "Arena creation failure");
100818-
100819-	size_t size = sizeof(arena_ind);
100820-	if ((err = mallctl("thread.arena", (void *)&old_arena_ind, &size,
100821-	    (void *)&arena_ind, sizeof(arena_ind))) != 0) {
100822-		mallctl_failure(err);
100823-	}
100824-
100825-	for (i = 0; i < NTHREADS; i++) {
100826-		thd_create(&thds[i], thd_start,
100827-		    (void *)&arena_ind);
100828-	}
100829-
100830-	for (i = 0; i < NTHREADS; i++) {
100831-		intptr_t join_ret;
100832-		thd_join(thds[i], (void *)&join_ret);
100833-		expect_zd_eq(join_ret, 0, "Unexpected thread join error");
100834-	}
100835-	free(p);
100836-}
100837-TEST_END
100838-
100839-int
100840-main(void) {
100841-	return test(
100842-	    test_thread_arena);
100843-}
100844diff --git a/jemalloc/test/integration/thread_tcache_enabled.c b/jemalloc/test/integration/thread_tcache_enabled.c
100845deleted file mode 100644
100846index d44dbe9..0000000
100847--- a/jemalloc/test/integration/thread_tcache_enabled.c
100848+++ /dev/null
100849@@ -1,87 +0,0 @@
100850-#include "test/jemalloc_test.h"
100851-
100852-void *
100853-thd_start(void *arg) {
100854-	bool e0, e1;
100855-	size_t sz = sizeof(bool);
100856-	expect_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz, NULL,
100857-	    0), 0, "Unexpected mallctl failure");
100858-
100859-	if (e0) {
100860-		e1 = false;
100861-		expect_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz,
100862-		    (void *)&e1, sz), 0, "Unexpected mallctl() error");
100863-		expect_true(e0, "tcache should be enabled");
100864-	}
100865-
100866-	e1 = true;
100867-	expect_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz,
100868-	    (void *)&e1, sz), 0, "Unexpected mallctl() error");
100869-	expect_false(e0, "tcache should be disabled");
100870-
100871-	e1 = true;
100872-	expect_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz,
100873-	    (void *)&e1, sz), 0, "Unexpected mallctl() error");
100874-	expect_true(e0, "tcache should be enabled");
100875-
100876-	e1 = false;
100877-	expect_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz,
100878-	    (void *)&e1, sz), 0, "Unexpected mallctl() error");
100879-	expect_true(e0, "tcache should be enabled");
100880-
100881-	e1 = false;
100882-	expect_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz,
100883-	    (void *)&e1, sz), 0, "Unexpected mallctl() error");
100884-	expect_false(e0, "tcache should be disabled");
100885-
100886-	free(malloc(1));
100887-	e1 = true;
100888-	expect_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz,
100889-	    (void *)&e1, sz), 0, "Unexpected mallctl() error");
100890-	expect_false(e0, "tcache should be disabled");
100891-
100892-	free(malloc(1));
100893-	e1 = true;
100894-	expect_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz,
100895-	    (void *)&e1, sz), 0, "Unexpected mallctl() error");
100896-	expect_true(e0, "tcache should be enabled");
100897-
100898-	free(malloc(1));
100899-	e1 = false;
100900-	expect_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz,
100901-	    (void *)&e1, sz), 0, "Unexpected mallctl() error");
100902-	expect_true(e0, "tcache should be enabled");
100903-
100904-	free(malloc(1));
100905-	e1 = false;
100906-	expect_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz,
100907-	    (void *)&e1, sz), 0, "Unexpected mallctl() error");
100908-	expect_false(e0, "tcache should be disabled");
100909-
100910-	free(malloc(1));
100911-	return NULL;
100912-}
100913-
100914-TEST_BEGIN(test_main_thread) {
100915-	thd_start(NULL);
100916-}
100917-TEST_END
100918-
100919-TEST_BEGIN(test_subthread) {
100920-	thd_t thd;
100921-
100922-	thd_create(&thd, thd_start, NULL);
100923-	thd_join(thd, NULL);
100924-}
100925-TEST_END
100926-
100927-int
100928-main(void) {
100929-	/* Run tests multiple times to check for bad interactions. */
100930-	return test(
100931-	    test_main_thread,
100932-	    test_subthread,
100933-	    test_main_thread,
100934-	    test_subthread,
100935-	    test_main_thread);
100936-}
100937diff --git a/jemalloc/test/integration/xallocx.c b/jemalloc/test/integration/xallocx.c
100938deleted file mode 100644
100939index 1370854..0000000
100940--- a/jemalloc/test/integration/xallocx.c
100941+++ /dev/null
100942@@ -1,384 +0,0 @@
100943-#include "test/jemalloc_test.h"
100944-
100945-/*
100946- * Use a separate arena for xallocx() extension/contraction tests so that
100947- * internal allocation e.g. by heap profiling can't interpose allocations where
100948- * xallocx() would ordinarily be able to extend.
100949- */
100950-static unsigned
100951-arena_ind(void) {
100952-	static unsigned ind = 0;
100953-
100954-	if (ind == 0) {
100955-		size_t sz = sizeof(ind);
100956-		expect_d_eq(mallctl("arenas.create", (void *)&ind, &sz, NULL,
100957-		    0), 0, "Unexpected mallctl failure creating arena");
100958-	}
100959-
100960-	return ind;
100961-}
100962-
100963-TEST_BEGIN(test_same_size) {
100964-	void *p;
100965-	size_t sz, tsz;
100966-
100967-	p = mallocx(42, 0);
100968-	expect_ptr_not_null(p, "Unexpected mallocx() error");
100969-	sz = sallocx(p, 0);
100970-
100971-	tsz = xallocx(p, sz, 0, 0);
100972-	expect_zu_eq(tsz, sz, "Unexpected size change: %zu --> %zu", sz, tsz);
100973-
100974-	dallocx(p, 0);
100975-}
100976-TEST_END
100977-
100978-TEST_BEGIN(test_extra_no_move) {
100979-	void *p;
100980-	size_t sz, tsz;
100981-
100982-	p = mallocx(42, 0);
100983-	expect_ptr_not_null(p, "Unexpected mallocx() error");
100984-	sz = sallocx(p, 0);
100985-
100986-	tsz = xallocx(p, sz, sz-42, 0);
100987-	expect_zu_eq(tsz, sz, "Unexpected size change: %zu --> %zu", sz, tsz);
100988-
100989-	dallocx(p, 0);
100990-}
100991-TEST_END
100992-
100993-TEST_BEGIN(test_no_move_fail) {
100994-	void *p;
100995-	size_t sz, tsz;
100996-
100997-	p = mallocx(42, 0);
100998-	expect_ptr_not_null(p, "Unexpected mallocx() error");
100999-	sz = sallocx(p, 0);
101000-
101001-	tsz = xallocx(p, sz + 5, 0, 0);
101002-	expect_zu_eq(tsz, sz, "Unexpected size change: %zu --> %zu", sz, tsz);
101003-
101004-	dallocx(p, 0);
101005-}
101006-TEST_END
101007-
101008-static unsigned
101009-get_nsizes_impl(const char *cmd) {
101010-	unsigned ret;
101011-	size_t z;
101012-
101013-	z = sizeof(unsigned);
101014-	expect_d_eq(mallctl(cmd, (void *)&ret, &z, NULL, 0), 0,
101015-	    "Unexpected mallctl(\"%s\", ...) failure", cmd);
101016-
101017-	return ret;
101018-}
101019-
101020-static unsigned
101021-get_nsmall(void) {
101022-	return get_nsizes_impl("arenas.nbins");
101023-}
101024-
101025-static unsigned
101026-get_nlarge(void) {
101027-	return get_nsizes_impl("arenas.nlextents");
101028-}
101029-
101030-static size_t
101031-get_size_impl(const char *cmd, size_t ind) {
101032-	size_t ret;
101033-	size_t z;
101034-	size_t mib[4];
101035-	size_t miblen = 4;
101036-
101037-	z = sizeof(size_t);
101038-	expect_d_eq(mallctlnametomib(cmd, mib, &miblen),
101039-	    0, "Unexpected mallctlnametomib(\"%s\", ...) failure", cmd);
101040-	mib[2] = ind;
101041-	z = sizeof(size_t);
101042-	expect_d_eq(mallctlbymib(mib, miblen, (void *)&ret, &z, NULL, 0),
101043-	    0, "Unexpected mallctlbymib([\"%s\", %zu], ...) failure", cmd, ind);
101044-
101045-	return ret;
101046-}
101047-
101048-static size_t
101049-get_small_size(size_t ind) {
101050-	return get_size_impl("arenas.bin.0.size", ind);
101051-}
101052-
101053-static size_t
101054-get_large_size(size_t ind) {
101055-	return get_size_impl("arenas.lextent.0.size", ind);
101056-}
101057-
101058-TEST_BEGIN(test_size) {
101059-	size_t small0, largemax;
101060-	void *p;
101061-
101062-	/* Get size classes. */
101063-	small0 = get_small_size(0);
101064-	largemax = get_large_size(get_nlarge()-1);
101065-
101066-	p = mallocx(small0, 0);
101067-	expect_ptr_not_null(p, "Unexpected mallocx() error");
101068-
101069-	/* Test smallest supported size. */
101070-	expect_zu_eq(xallocx(p, 1, 0, 0), small0,
101071-	    "Unexpected xallocx() behavior");
101072-
101073-	/* Test largest supported size. */
101074-	expect_zu_le(xallocx(p, largemax, 0, 0), largemax,
101075-	    "Unexpected xallocx() behavior");
101076-
101077-	/* Test size overflow. */
101078-	expect_zu_le(xallocx(p, largemax+1, 0, 0), largemax,
101079-	    "Unexpected xallocx() behavior");
101080-	expect_zu_le(xallocx(p, SIZE_T_MAX, 0, 0), largemax,
101081-	    "Unexpected xallocx() behavior");
101082-
101083-	dallocx(p, 0);
101084-}
101085-TEST_END
101086-
101087-TEST_BEGIN(test_size_extra_overflow) {
101088-	size_t small0, largemax;
101089-	void *p;
101090-
101091-	/* Get size classes. */
101092-	small0 = get_small_size(0);
101093-	largemax = get_large_size(get_nlarge()-1);
101094-
101095-	p = mallocx(small0, 0);
101096-	expect_ptr_not_null(p, "Unexpected mallocx() error");
101097-
101098-	/* Test overflows that can be resolved by clamping extra. */
101099-	expect_zu_le(xallocx(p, largemax-1, 2, 0), largemax,
101100-	    "Unexpected xallocx() behavior");
101101-	expect_zu_le(xallocx(p, largemax, 1, 0), largemax,
101102-	    "Unexpected xallocx() behavior");
101103-
101104-	/* Test overflow such that largemax-size underflows. */
101105-	expect_zu_le(xallocx(p, largemax+1, 2, 0), largemax,
101106-	    "Unexpected xallocx() behavior");
101107-	expect_zu_le(xallocx(p, largemax+2, 3, 0), largemax,
101108-	    "Unexpected xallocx() behavior");
101109-	expect_zu_le(xallocx(p, SIZE_T_MAX-2, 2, 0), largemax,
101110-	    "Unexpected xallocx() behavior");
101111-	expect_zu_le(xallocx(p, SIZE_T_MAX-1, 1, 0), largemax,
101112-	    "Unexpected xallocx() behavior");
101113-
101114-	dallocx(p, 0);
101115-}
101116-TEST_END
101117-
101118-TEST_BEGIN(test_extra_small) {
101119-	size_t small0, small1, largemax;
101120-	void *p;
101121-
101122-	/* Get size classes. */
101123-	small0 = get_small_size(0);
101124-	small1 = get_small_size(1);
101125-	largemax = get_large_size(get_nlarge()-1);
101126-
101127-	p = mallocx(small0, 0);
101128-	expect_ptr_not_null(p, "Unexpected mallocx() error");
101129-
101130-	expect_zu_eq(xallocx(p, small1, 0, 0), small0,
101131-	    "Unexpected xallocx() behavior");
101132-
101133-	expect_zu_eq(xallocx(p, small1, 0, 0), small0,
101134-	    "Unexpected xallocx() behavior");
101135-
101136-	expect_zu_eq(xallocx(p, small0, small1 - small0, 0), small0,
101137-	    "Unexpected xallocx() behavior");
101138-
101139-	/* Test size+extra overflow. */
101140-	expect_zu_eq(xallocx(p, small0, largemax - small0 + 1, 0), small0,
101141-	    "Unexpected xallocx() behavior");
101142-	expect_zu_eq(xallocx(p, small0, SIZE_T_MAX - small0, 0), small0,
101143-	    "Unexpected xallocx() behavior");
101144-
101145-	dallocx(p, 0);
101146-}
101147-TEST_END
101148-
101149-TEST_BEGIN(test_extra_large) {
101150-	int flags = MALLOCX_ARENA(arena_ind());
101151-	size_t smallmax, large1, large2, large3, largemax;
101152-	void *p;
101153-
101154-	/* Get size classes. */
101155-	smallmax = get_small_size(get_nsmall()-1);
101156-	large1 = get_large_size(1);
101157-	large2 = get_large_size(2);
101158-	large3 = get_large_size(3);
101159-	largemax = get_large_size(get_nlarge()-1);
101160-
101161-	p = mallocx(large3, flags);
101162-	expect_ptr_not_null(p, "Unexpected mallocx() error");
101163-
101164-	expect_zu_eq(xallocx(p, large3, 0, flags), large3,
101165-	    "Unexpected xallocx() behavior");
101166-	/* Test size decrease with zero extra. */
101167-	expect_zu_ge(xallocx(p, large1, 0, flags), large1,
101168-	    "Unexpected xallocx() behavior");
101169-	expect_zu_ge(xallocx(p, smallmax, 0, flags), large1,
101170-	    "Unexpected xallocx() behavior");
101171-
101172-	if (xallocx(p, large3, 0, flags) != large3) {
101173-		p = rallocx(p, large3, flags);
101174-		expect_ptr_not_null(p, "Unexpected rallocx() failure");
101175-	}
101176-	/* Test size decrease with non-zero extra. */
101177-	expect_zu_eq(xallocx(p, large1, large3 - large1, flags), large3,
101178-	    "Unexpected xallocx() behavior");
101179-	expect_zu_eq(xallocx(p, large2, large3 - large2, flags), large3,
101180-	    "Unexpected xallocx() behavior");
101181-	expect_zu_ge(xallocx(p, large1, large2 - large1, flags), large2,
101182-	    "Unexpected xallocx() behavior");
101183-	expect_zu_ge(xallocx(p, smallmax, large1 - smallmax, flags), large1,
101184-	    "Unexpected xallocx() behavior");
101185-
101186-	expect_zu_ge(xallocx(p, large1, 0, flags), large1,
101187-	    "Unexpected xallocx() behavior");
101188-	/* Test size increase with zero extra. */
101189-	expect_zu_le(xallocx(p, large3, 0, flags), large3,
101190-	    "Unexpected xallocx() behavior");
101191-	expect_zu_le(xallocx(p, largemax+1, 0, flags), large3,
101192-	    "Unexpected xallocx() behavior");
101193-
101194-	expect_zu_ge(xallocx(p, large1, 0, flags), large1,
101195-	    "Unexpected xallocx() behavior");
101196-	/* Test size increase with non-zero extra. */
101197-	expect_zu_le(xallocx(p, large1, SIZE_T_MAX - large1, flags), largemax,
101198-	    "Unexpected xallocx() behavior");
101199-
101200-	expect_zu_ge(xallocx(p, large1, 0, flags), large1,
101201-	    "Unexpected xallocx() behavior");
101202-	/* Test size increase with non-zero extra. */
101203-	expect_zu_le(xallocx(p, large1, large3 - large1, flags), large3,
101204-	    "Unexpected xallocx() behavior");
101205-
101206-	if (xallocx(p, large3, 0, flags) != large3) {
101207-		p = rallocx(p, large3, flags);
101208-		expect_ptr_not_null(p, "Unexpected rallocx() failure");
101209-	}
101210-	/* Test size+extra overflow. */
101211-	expect_zu_le(xallocx(p, large3, largemax - large3 + 1, flags), largemax,
101212-	    "Unexpected xallocx() behavior");
101213-
101214-	dallocx(p, flags);
101215-}
101216-TEST_END
101217-
101218-static void
101219-print_filled_extents(const void *p, uint8_t c, size_t len) {
101220-	const uint8_t *pc = (const uint8_t *)p;
101221-	size_t i, range0;
101222-	uint8_t c0;
101223-
101224-	malloc_printf("  p=%p, c=%#x, len=%zu:", p, c, len);
101225-	range0 = 0;
101226-	c0 = pc[0];
101227-	for (i = 0; i < len; i++) {
101228-		if (pc[i] != c0) {
101229-			malloc_printf(" %#x[%zu..%zu)", c0, range0, i);
101230-			range0 = i;
101231-			c0 = pc[i];
101232-		}
101233-	}
101234-	malloc_printf(" %#x[%zu..%zu)\n", c0, range0, i);
101235-}
101236-
101237-static bool
101238-validate_fill(const void *p, uint8_t c, size_t offset, size_t len) {
101239-	const uint8_t *pc = (const uint8_t *)p;
101240-	bool err;
101241-	size_t i;
101242-
101243-	for (i = offset, err = false; i < offset+len; i++) {
101244-		if (pc[i] != c) {
101245-			err = true;
101246-		}
101247-	}
101248-
101249-	if (err) {
101250-		print_filled_extents(p, c, offset + len);
101251-	}
101252-
101253-	return err;
101254-}
101255-
101256-static void
101257-test_zero(size_t szmin, size_t szmax) {
101258-	int flags = MALLOCX_ARENA(arena_ind()) | MALLOCX_ZERO;
101259-	size_t sz, nsz;
101260-	void *p;
101261-#define FILL_BYTE 0x7aU
101262-
101263-	sz = szmax;
101264-	p = mallocx(sz, flags);
101265-	expect_ptr_not_null(p, "Unexpected mallocx() error");
101266-	expect_false(validate_fill(p, 0x00, 0, sz), "Memory not filled: sz=%zu",
101267-	    sz);
101268-
101269-	/*
101270-	 * Fill with non-zero so that non-debug builds are more likely to detect
101271-	 * errors.
101272-	 */
101273-	memset(p, FILL_BYTE, sz);
101274-	expect_false(validate_fill(p, FILL_BYTE, 0, sz),
101275-	    "Memory not filled: sz=%zu", sz);
101276-
101277-	/* Shrink in place so that we can expect growing in place to succeed. */
101278-	sz = szmin;
101279-	if (xallocx(p, sz, 0, flags) != sz) {
101280-		p = rallocx(p, sz, flags);
101281-		expect_ptr_not_null(p, "Unexpected rallocx() failure");
101282-	}
101283-	expect_false(validate_fill(p, FILL_BYTE, 0, sz),
101284-	    "Memory not filled: sz=%zu", sz);
101285-
101286-	for (sz = szmin; sz < szmax; sz = nsz) {
101287-		nsz = nallocx(sz+1, flags);
101288-		if (xallocx(p, sz+1, 0, flags) != nsz) {
101289-			p = rallocx(p, sz+1, flags);
101290-			expect_ptr_not_null(p, "Unexpected rallocx() failure");
101291-		}
101292-		expect_false(validate_fill(p, FILL_BYTE, 0, sz),
101293-		    "Memory not filled: sz=%zu", sz);
101294-		expect_false(validate_fill(p, 0x00, sz, nsz-sz),
101295-		    "Memory not filled: sz=%zu, nsz-sz=%zu", sz, nsz-sz);
101296-		memset((void *)((uintptr_t)p + sz), FILL_BYTE, nsz-sz);
101297-		expect_false(validate_fill(p, FILL_BYTE, 0, nsz),
101298-		    "Memory not filled: nsz=%zu", nsz);
101299-	}
101300-
101301-	dallocx(p, flags);
101302-}
101303-
101304-TEST_BEGIN(test_zero_large) {
101305-	size_t large0, large1;
101306-
101307-	/* Get size classes. */
101308-	large0 = get_large_size(0);
101309-	large1 = get_large_size(1);
101310-
101311-	test_zero(large1, large0 * 2);
101312-}
101313-TEST_END
101314-
101315-int
101316-main(void) {
101317-	return test(
101318-	    test_same_size,
101319-	    test_extra_no_move,
101320-	    test_no_move_fail,
101321-	    test_size,
101322-	    test_size_extra_overflow,
101323-	    test_extra_small,
101324-	    test_extra_large,
101325-	    test_zero_large);
101326-}
101327diff --git a/jemalloc/test/integration/xallocx.sh b/jemalloc/test/integration/xallocx.sh
101328deleted file mode 100644
101329index 0cc2187..0000000
101330--- a/jemalloc/test/integration/xallocx.sh
101331+++ /dev/null
101332@@ -1,5 +0,0 @@
101333-#!/bin/sh
101334-
101335-if [ "x${enable_fill}" = "x1" ] ; then
101336-  export MALLOC_CONF="junk:false"
101337-fi
101338diff --git a/jemalloc/test/src/SFMT.c b/jemalloc/test/src/SFMT.c
101339deleted file mode 100644
101340index c05e218..0000000
101341--- a/jemalloc/test/src/SFMT.c
101342+++ /dev/null
101343@@ -1,719 +0,0 @@
101344-/*
101345- * This file derives from SFMT 1.3.3
101346- * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was
101347- * released under the terms of the following license:
101348- *
101349- *   Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
101350- *   University. All rights reserved.
101351- *
101352- *   Redistribution and use in source and binary forms, with or without
101353- *   modification, are permitted provided that the following conditions are
101354- *   met:
101355- *
101356- *       * Redistributions of source code must retain the above copyright
101357- *         notice, this list of conditions and the following disclaimer.
101358- *       * Redistributions in binary form must reproduce the above
101359- *         copyright notice, this list of conditions and the following
101360- *         disclaimer in the documentation and/or other materials provided
101361- *         with the distribution.
101362- *       * Neither the name of the Hiroshima University nor the names of
101363- *         its contributors may be used to endorse or promote products
101364- *         derived from this software without specific prior written
101365- *         permission.
101366- *
101367- *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
101368- *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
101369- *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
101370- *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
101371- *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
101372- *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
101373- *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
101374- *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
101375- *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
101376- *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
101377- *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
101378- */
101379-/**
101380- * @file  SFMT.c
101381- * @brief SIMD oriented Fast Mersenne Twister(SFMT)
101382- *
101383- * @author Mutsuo Saito (Hiroshima University)
101384- * @author Makoto Matsumoto (Hiroshima University)
101385- *
101386- * Copyright (C) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
101387- * University. All rights reserved.
101388- *
101389- * The new BSD License is applied to this software, see LICENSE.txt
101390- */
101391-#define SFMT_C_
101392-#include "test/jemalloc_test.h"
101393-#include "test/SFMT-params.h"
101394-
101395-#if defined(JEMALLOC_BIG_ENDIAN) && !defined(BIG_ENDIAN64)
101396-#define BIG_ENDIAN64 1
101397-#endif
101398-#if defined(__BIG_ENDIAN__) && !defined(__amd64) && !defined(BIG_ENDIAN64)
101399-#define BIG_ENDIAN64 1
101400-#endif
101401-#if defined(HAVE_ALTIVEC) && !defined(BIG_ENDIAN64)
101402-#define BIG_ENDIAN64 1
101403-#endif
101404-#if defined(ONLY64) && !defined(BIG_ENDIAN64)
101405-  #if defined(__GNUC__)
101406-    #error "-DONLY64 must be specified with -DBIG_ENDIAN64"
101407-  #endif
101408-#undef ONLY64
101409-#endif
101410-/*------------------------------------------------------
101411-  128-bit SIMD data type for Altivec, SSE2 or standard C
101412-  ------------------------------------------------------*/
101413-#if defined(HAVE_ALTIVEC)
101414-/** 128-bit data structure */
101415-union W128_T {
101416-    vector unsigned int s;
101417-    uint32_t u[4];
101418-};
101419-/** 128-bit data type */
101420-typedef union W128_T w128_t;
101421-
101422-#elif defined(HAVE_SSE2)
101423-/** 128-bit data structure */
101424-union W128_T {
101425-    __m128i si;
101426-    uint32_t u[4];
101427-};
101428-/** 128-bit data type */
101429-typedef union W128_T w128_t;
101430-
101431-#else
101432-
101433-/** 128-bit data structure */
101434-struct W128_T {
101435-    uint32_t u[4];
101436-};
101437-/** 128-bit data type */
101438-typedef struct W128_T w128_t;
101439-
101440-#endif
101441-
101442-struct sfmt_s {
101443-    /** the 128-bit internal state array */
101444-    w128_t sfmt[N];
101445-    /** index counter to the 32-bit internal state array */
101446-    int idx;
101447-    /** a flag: it is 0 if and only if the internal state is not yet
101448-     * initialized. */
101449-    int initialized;
101450-};
101451-
101452-/*--------------------------------------
101453-  FILE GLOBAL VARIABLES
101454-  internal state, index counter and flag
101455-  --------------------------------------*/
101456-
101457-/** a parity check vector which certificate the period of 2^{MEXP} */
101458-static uint32_t parity[4] = {PARITY1, PARITY2, PARITY3, PARITY4};
101459-
101460-/*----------------
101461-  STATIC FUNCTIONS
101462-  ----------------*/
101463-static inline int idxof(int i);
101464-#if (!defined(HAVE_ALTIVEC)) && (!defined(HAVE_SSE2))
101465-static inline void rshift128(w128_t *out,  w128_t const *in, int shift);
101466-static inline void lshift128(w128_t *out,  w128_t const *in, int shift);
101467-#endif
101468-static inline void gen_rand_all(sfmt_t *ctx);
101469-static inline void gen_rand_array(sfmt_t *ctx, w128_t *array, int size);
101470-static inline uint32_t func1(uint32_t x);
101471-static inline uint32_t func2(uint32_t x);
101472-static void period_certification(sfmt_t *ctx);
101473-#if defined(BIG_ENDIAN64) && !defined(ONLY64)
101474-static inline void swap(w128_t *array, int size);
101475-#endif
101476-
101477-#if defined(HAVE_ALTIVEC)
101478-  #include "test/SFMT-alti.h"
101479-#elif defined(HAVE_SSE2)
101480-  #include "test/SFMT-sse2.h"
101481-#endif
101482-
101483-/**
101484- * This function simulate a 64-bit index of LITTLE ENDIAN
101485- * in BIG ENDIAN machine.
101486- */
101487-#ifdef ONLY64
101488-static inline int idxof(int i) {
101489-    return i ^ 1;
101490-}
101491-#else
101492-static inline int idxof(int i) {
101493-    return i;
101494-}
101495-#endif
101496-/**
101497- * This function simulates SIMD 128-bit right shift by the standard C.
101498- * The 128-bit integer given in in is shifted by (shift * 8) bits.
101499- * This function simulates the LITTLE ENDIAN SIMD.
101500- * @param out the output of this function
101501- * @param in the 128-bit data to be shifted
101502- * @param shift the shift value
101503- */
101504-#if (!defined(HAVE_ALTIVEC)) && (!defined(HAVE_SSE2))
101505-#ifdef ONLY64
101506-static inline void rshift128(w128_t *out, w128_t const *in, int shift) {
101507-    uint64_t th, tl, oh, ol;
101508-
101509-    th = ((uint64_t)in->u[2] << 32) | ((uint64_t)in->u[3]);
101510-    tl = ((uint64_t)in->u[0] << 32) | ((uint64_t)in->u[1]);
101511-
101512-    oh = th >> (shift * 8);
101513-    ol = tl >> (shift * 8);
101514-    ol |= th << (64 - shift * 8);
101515-    out->u[0] = (uint32_t)(ol >> 32);
101516-    out->u[1] = (uint32_t)ol;
101517-    out->u[2] = (uint32_t)(oh >> 32);
101518-    out->u[3] = (uint32_t)oh;
101519-}
101520-#else
101521-static inline void rshift128(w128_t *out, w128_t const *in, int shift) {
101522-    uint64_t th, tl, oh, ol;
101523-
101524-    th = ((uint64_t)in->u[3] << 32) | ((uint64_t)in->u[2]);
101525-    tl = ((uint64_t)in->u[1] << 32) | ((uint64_t)in->u[0]);
101526-
101527-    oh = th >> (shift * 8);
101528-    ol = tl >> (shift * 8);
101529-    ol |= th << (64 - shift * 8);
101530-    out->u[1] = (uint32_t)(ol >> 32);
101531-    out->u[0] = (uint32_t)ol;
101532-    out->u[3] = (uint32_t)(oh >> 32);
101533-    out->u[2] = (uint32_t)oh;
101534-}
101535-#endif
101536-/**
101537- * This function simulates SIMD 128-bit left shift by the standard C.
101538- * The 128-bit integer given in in is shifted by (shift * 8) bits.
101539- * This function simulates the LITTLE ENDIAN SIMD.
101540- * @param out the output of this function
101541- * @param in the 128-bit data to be shifted
101542- * @param shift the shift value
101543- */
101544-#ifdef ONLY64
101545-static inline void lshift128(w128_t *out, w128_t const *in, int shift) {
101546-    uint64_t th, tl, oh, ol;
101547-
101548-    th = ((uint64_t)in->u[2] << 32) | ((uint64_t)in->u[3]);
101549-    tl = ((uint64_t)in->u[0] << 32) | ((uint64_t)in->u[1]);
101550-
101551-    oh = th << (shift * 8);
101552-    ol = tl << (shift * 8);
101553-    oh |= tl >> (64 - shift * 8);
101554-    out->u[0] = (uint32_t)(ol >> 32);
101555-    out->u[1] = (uint32_t)ol;
101556-    out->u[2] = (uint32_t)(oh >> 32);
101557-    out->u[3] = (uint32_t)oh;
101558-}
101559-#else
101560-static inline void lshift128(w128_t *out, w128_t const *in, int shift) {
101561-    uint64_t th, tl, oh, ol;
101562-
101563-    th = ((uint64_t)in->u[3] << 32) | ((uint64_t)in->u[2]);
101564-    tl = ((uint64_t)in->u[1] << 32) | ((uint64_t)in->u[0]);
101565-
101566-    oh = th << (shift * 8);
101567-    ol = tl << (shift * 8);
101568-    oh |= tl >> (64 - shift * 8);
101569-    out->u[1] = (uint32_t)(ol >> 32);
101570-    out->u[0] = (uint32_t)ol;
101571-    out->u[3] = (uint32_t)(oh >> 32);
101572-    out->u[2] = (uint32_t)oh;
101573-}
101574-#endif
101575-#endif
101576-
101577-/**
101578- * This function represents the recursion formula.
101579- * @param r output
101580- * @param a a 128-bit part of the internal state array
101581- * @param b a 128-bit part of the internal state array
101582- * @param c a 128-bit part of the internal state array
101583- * @param d a 128-bit part of the internal state array
101584- */
101585-#if (!defined(HAVE_ALTIVEC)) && (!defined(HAVE_SSE2))
101586-#ifdef ONLY64
101587-static inline void do_recursion(w128_t *r, w128_t *a, w128_t *b, w128_t *c,
101588-				w128_t *d) {
101589-    w128_t x;
101590-    w128_t y;
101591-
101592-    lshift128(&x, a, SL2);
101593-    rshift128(&y, c, SR2);
101594-    r->u[0] = a->u[0] ^ x.u[0] ^ ((b->u[0] >> SR1) & MSK2) ^ y.u[0]
101595-	^ (d->u[0] << SL1);
101596-    r->u[1] = a->u[1] ^ x.u[1] ^ ((b->u[1] >> SR1) & MSK1) ^ y.u[1]
101597-	^ (d->u[1] << SL1);
101598-    r->u[2] = a->u[2] ^ x.u[2] ^ ((b->u[2] >> SR1) & MSK4) ^ y.u[2]
101599-	^ (d->u[2] << SL1);
101600-    r->u[3] = a->u[3] ^ x.u[3] ^ ((b->u[3] >> SR1) & MSK3) ^ y.u[3]
101601-	^ (d->u[3] << SL1);
101602-}
101603-#else
101604-static inline void do_recursion(w128_t *r, w128_t *a, w128_t *b, w128_t *c,
101605-				w128_t *d) {
101606-    w128_t x;
101607-    w128_t y;
101608-
101609-    lshift128(&x, a, SL2);
101610-    rshift128(&y, c, SR2);
101611-    r->u[0] = a->u[0] ^ x.u[0] ^ ((b->u[0] >> SR1) & MSK1) ^ y.u[0]
101612-	^ (d->u[0] << SL1);
101613-    r->u[1] = a->u[1] ^ x.u[1] ^ ((b->u[1] >> SR1) & MSK2) ^ y.u[1]
101614-	^ (d->u[1] << SL1);
101615-    r->u[2] = a->u[2] ^ x.u[2] ^ ((b->u[2] >> SR1) & MSK3) ^ y.u[2]
101616-	^ (d->u[2] << SL1);
101617-    r->u[3] = a->u[3] ^ x.u[3] ^ ((b->u[3] >> SR1) & MSK4) ^ y.u[3]
101618-	^ (d->u[3] << SL1);
101619-}
101620-#endif
101621-#endif
101622-
101623-#if (!defined(HAVE_ALTIVEC)) && (!defined(HAVE_SSE2))
101624-/**
101625- * This function fills the internal state array with pseudorandom
101626- * integers.
101627- */
101628-static inline void gen_rand_all(sfmt_t *ctx) {
101629-    int i;
101630-    w128_t *r1, *r2;
101631-
101632-    r1 = &ctx->sfmt[N - 2];
101633-    r2 = &ctx->sfmt[N - 1];
101634-    for (i = 0; i < N - POS1; i++) {
101635-	do_recursion(&ctx->sfmt[i], &ctx->sfmt[i], &ctx->sfmt[i + POS1], r1,
101636-	  r2);
101637-	r1 = r2;
101638-	r2 = &ctx->sfmt[i];
101639-    }
101640-    for (; i < N; i++) {
101641-	do_recursion(&ctx->sfmt[i], &ctx->sfmt[i], &ctx->sfmt[i + POS1 - N], r1,
101642-	  r2);
101643-	r1 = r2;
101644-	r2 = &ctx->sfmt[i];
101645-    }
101646-}
101647-
101648-/**
101649- * This function fills the user-specified array with pseudorandom
101650- * integers.
101651- *
101652- * @param array an 128-bit array to be filled by pseudorandom numbers.
101653- * @param size number of 128-bit pseudorandom numbers to be generated.
101654- */
101655-static inline void gen_rand_array(sfmt_t *ctx, w128_t *array, int size) {
101656-    int i, j;
101657-    w128_t *r1, *r2;
101658-
101659-    r1 = &ctx->sfmt[N - 2];
101660-    r2 = &ctx->sfmt[N - 1];
101661-    for (i = 0; i < N - POS1; i++) {
101662-	do_recursion(&array[i], &ctx->sfmt[i], &ctx->sfmt[i + POS1], r1, r2);
101663-	r1 = r2;
101664-	r2 = &array[i];
101665-    }
101666-    for (; i < N; i++) {
101667-	do_recursion(&array[i], &ctx->sfmt[i], &array[i + POS1 - N], r1, r2);
101668-	r1 = r2;
101669-	r2 = &array[i];
101670-    }
101671-    for (; i < size - N; i++) {
101672-	do_recursion(&array[i], &array[i - N], &array[i + POS1 - N], r1, r2);
101673-	r1 = r2;
101674-	r2 = &array[i];
101675-    }
101676-    for (j = 0; j < 2 * N - size; j++) {
101677-	ctx->sfmt[j] = array[j + size - N];
101678-    }
101679-    for (; i < size; i++, j++) {
101680-	do_recursion(&array[i], &array[i - N], &array[i + POS1 - N], r1, r2);
101681-	r1 = r2;
101682-	r2 = &array[i];
101683-	ctx->sfmt[j] = array[i];
101684-    }
101685-}
101686-#endif
101687-
101688-#if defined(BIG_ENDIAN64) && !defined(ONLY64) && !defined(HAVE_ALTIVEC)
101689-static inline void swap(w128_t *array, int size) {
101690-    int i;
101691-    uint32_t x, y;
101692-
101693-    for (i = 0; i < size; i++) {
101694-	x = array[i].u[0];
101695-	y = array[i].u[2];
101696-	array[i].u[0] = array[i].u[1];
101697-	array[i].u[2] = array[i].u[3];
101698-	array[i].u[1] = x;
101699-	array[i].u[3] = y;
101700-    }
101701-}
101702-#endif
101703-/**
101704- * This function represents a function used in the initialization
101705- * by init_by_array
101706- * @param x 32-bit integer
101707- * @return 32-bit integer
101708- */
101709-static uint32_t func1(uint32_t x) {
101710-    return (x ^ (x >> 27)) * (uint32_t)1664525UL;
101711-}
101712-
101713-/**
101714- * This function represents a function used in the initialization
101715- * by init_by_array
101716- * @param x 32-bit integer
101717- * @return 32-bit integer
101718- */
101719-static uint32_t func2(uint32_t x) {
101720-    return (x ^ (x >> 27)) * (uint32_t)1566083941UL;
101721-}
101722-
101723-/**
101724- * This function certificate the period of 2^{MEXP}
101725- */
101726-static void period_certification(sfmt_t *ctx) {
101727-    int inner = 0;
101728-    int i, j;
101729-    uint32_t work;
101730-    uint32_t *psfmt32 = &ctx->sfmt[0].u[0];
101731-
101732-    for (i = 0; i < 4; i++)
101733-	inner ^= psfmt32[idxof(i)] & parity[i];
101734-    for (i = 16; i > 0; i >>= 1)
101735-	inner ^= inner >> i;
101736-    inner &= 1;
101737-    /* check OK */
101738-    if (inner == 1) {
101739-	return;
101740-    }
101741-    /* check NG, and modification */
101742-    for (i = 0; i < 4; i++) {
101743-	work = 1;
101744-	for (j = 0; j < 32; j++) {
101745-	    if ((work & parity[i]) != 0) {
101746-		psfmt32[idxof(i)] ^= work;
101747-		return;
101748-	    }
101749-	    work = work << 1;
101750-	}
101751-    }
101752-}
101753-
101754-/*----------------
101755-  PUBLIC FUNCTIONS
101756-  ----------------*/
101757-/**
101758- * This function returns the identification string.
101759- * The string shows the word size, the Mersenne exponent,
101760- * and all parameters of this generator.
101761- */
101762-const char *get_idstring(void) {
101763-    return IDSTR;
101764-}
101765-
101766-/**
101767- * This function returns the minimum size of array used for \b
101768- * fill_array32() function.
101769- * @return minimum size of array used for fill_array32() function.
101770- */
101771-int get_min_array_size32(void) {
101772-    return N32;
101773-}
101774-
101775-/**
101776- * This function returns the minimum size of array used for \b
101777- * fill_array64() function.
101778- * @return minimum size of array used for fill_array64() function.
101779- */
101780-int get_min_array_size64(void) {
101781-    return N64;
101782-}
101783-
101784-#ifndef ONLY64
101785-/**
101786- * This function generates and returns 32-bit pseudorandom number.
101787- * init_gen_rand or init_by_array must be called before this function.
101788- * @return 32-bit pseudorandom number
101789- */
101790-uint32_t gen_rand32(sfmt_t *ctx) {
101791-    uint32_t r;
101792-    uint32_t *psfmt32 = &ctx->sfmt[0].u[0];
101793-
101794-    assert(ctx->initialized);
101795-    if (ctx->idx >= N32) {
101796-	gen_rand_all(ctx);
101797-	ctx->idx = 0;
101798-    }
101799-    r = psfmt32[ctx->idx++];
101800-    return r;
101801-}
101802-
101803-/* Generate a random integer in [0..limit). */
101804-uint32_t gen_rand32_range(sfmt_t *ctx, uint32_t limit) {
101805-    uint32_t ret, above;
101806-
101807-    above = 0xffffffffU - (0xffffffffU % limit);
101808-    while (1) {
101809-	ret = gen_rand32(ctx);
101810-	if (ret < above) {
101811-	    ret %= limit;
101812-	    break;
101813-	}
101814-    }
101815-    return ret;
101816-}
101817-#endif
101818-/**
101819- * This function generates and returns 64-bit pseudorandom number.
101820- * init_gen_rand or init_by_array must be called before this function.
101821- * The function gen_rand64 should not be called after gen_rand32,
101822- * unless an initialization is again executed.
101823- * @return 64-bit pseudorandom number
101824- */
101825-uint64_t gen_rand64(sfmt_t *ctx) {
101826-#if defined(BIG_ENDIAN64) && !defined(ONLY64)
101827-    uint32_t r1, r2;
101828-    uint32_t *psfmt32 = &ctx->sfmt[0].u[0];
101829-#else
101830-    uint64_t r;
101831-    uint64_t *psfmt64 = (uint64_t *)&ctx->sfmt[0].u[0];
101832-#endif
101833-
101834-    assert(ctx->initialized);
101835-    assert(ctx->idx % 2 == 0);
101836-
101837-    if (ctx->idx >= N32) {
101838-	gen_rand_all(ctx);
101839-	ctx->idx = 0;
101840-    }
101841-#if defined(BIG_ENDIAN64) && !defined(ONLY64)
101842-    r1 = psfmt32[ctx->idx];
101843-    r2 = psfmt32[ctx->idx + 1];
101844-    ctx->idx += 2;
101845-    return ((uint64_t)r2 << 32) | r1;
101846-#else
101847-    r = psfmt64[ctx->idx / 2];
101848-    ctx->idx += 2;
101849-    return r;
101850-#endif
101851-}
101852-
101853-/* Generate a random integer in [0..limit). */
101854-uint64_t gen_rand64_range(sfmt_t *ctx, uint64_t limit) {
101855-    uint64_t ret, above;
101856-
101857-    above = KQU(0xffffffffffffffff) - (KQU(0xffffffffffffffff) % limit);
101858-    while (1) {
101859-	ret = gen_rand64(ctx);
101860-	if (ret < above) {
101861-	    ret %= limit;
101862-	    break;
101863-	}
101864-    }
101865-    return ret;
101866-}
101867-
101868-#ifndef ONLY64
101869-/**
101870- * This function generates pseudorandom 32-bit integers in the
101871- * specified array[] by one call. The number of pseudorandom integers
101872- * is specified by the argument size, which must be at least 624 and a
101873- * multiple of four.  The generation by this function is much faster
101874- * than the following gen_rand function.
101875- *
101876- * For initialization, init_gen_rand or init_by_array must be called
101877- * before the first call of this function. This function can not be
101878- * used after calling gen_rand function, without initialization.
101879- *
101880- * @param array an array where pseudorandom 32-bit integers are filled
101881- * by this function.  The pointer to the array must be \b "aligned"
101882- * (namely, must be a multiple of 16) in the SIMD version, since it
101883- * refers to the address of a 128-bit integer.  In the standard C
101884- * version, the pointer is arbitrary.
101885- *
101886- * @param size the number of 32-bit pseudorandom integers to be
101887- * generated.  size must be a multiple of 4, and greater than or equal
101888- * to (MEXP / 128 + 1) * 4.
101889- *
101890- * @note \b memalign or \b posix_memalign is available to get aligned
101891- * memory. Mac OSX doesn't have these functions, but \b malloc of OSX
101892- * returns the pointer to the aligned memory block.
101893- */
101894-void fill_array32(sfmt_t *ctx, uint32_t *array, int size) {
101895-    assert(ctx->initialized);
101896-    assert(ctx->idx == N32);
101897-    assert(size % 4 == 0);
101898-    assert(size >= N32);
101899-
101900-    gen_rand_array(ctx, (w128_t *)array, size / 4);
101901-    ctx->idx = N32;
101902-}
101903-#endif
101904-
101905-/**
101906- * This function generates pseudorandom 64-bit integers in the
101907- * specified array[] by one call. The number of pseudorandom integers
101908- * is specified by the argument size, which must be at least 312 and a
101909- * multiple of two.  The generation by this function is much faster
101910- * than the following gen_rand function.
101911- *
101912- * For initialization, init_gen_rand or init_by_array must be called
101913- * before the first call of this function. This function can not be
101914- * used after calling gen_rand function, without initialization.
101915- *
101916- * @param array an array where pseudorandom 64-bit integers are filled
101917- * by this function.  The pointer to the array must be "aligned"
101918- * (namely, must be a multiple of 16) in the SIMD version, since it
101919- * refers to the address of a 128-bit integer.  In the standard C
101920- * version, the pointer is arbitrary.
101921- *
101922- * @param size the number of 64-bit pseudorandom integers to be
101923- * generated.  size must be a multiple of 2, and greater than or equal
101924- * to (MEXP / 128 + 1) * 2
101925- *
101926- * @note \b memalign or \b posix_memalign is available to get aligned
101927- * memory. Mac OSX doesn't have these functions, but \b malloc of OSX
101928- * returns the pointer to the aligned memory block.
101929- */
101930-void fill_array64(sfmt_t *ctx, uint64_t *array, int size) {
101931-    assert(ctx->initialized);
101932-    assert(ctx->idx == N32);
101933-    assert(size % 2 == 0);
101934-    assert(size >= N64);
101935-
101936-    gen_rand_array(ctx, (w128_t *)array, size / 2);
101937-    ctx->idx = N32;
101938-
101939-#if defined(BIG_ENDIAN64) && !defined(ONLY64)
101940-    swap((w128_t *)array, size /2);
101941-#endif
101942-}
101943-
101944-/**
101945- * This function initializes the internal state array with a 32-bit
101946- * integer seed.
101947- *
101948- * @param seed a 32-bit integer used as the seed.
101949- */
101950-sfmt_t *init_gen_rand(uint32_t seed) {
101951-    void *p;
101952-    sfmt_t *ctx;
101953-    int i;
101954-    uint32_t *psfmt32;
101955-
101956-    if (posix_memalign(&p, sizeof(w128_t), sizeof(sfmt_t)) != 0) {
101957-	return NULL;
101958-    }
101959-    ctx = (sfmt_t *)p;
101960-    psfmt32 = &ctx->sfmt[0].u[0];
101961-
101962-    psfmt32[idxof(0)] = seed;
101963-    for (i = 1; i < N32; i++) {
101964-	psfmt32[idxof(i)] = 1812433253UL * (psfmt32[idxof(i - 1)]
101965-					    ^ (psfmt32[idxof(i - 1)] >> 30))
101966-	    + i;
101967-    }
101968-    ctx->idx = N32;
101969-    period_certification(ctx);
101970-    ctx->initialized = 1;
101971-
101972-    return ctx;
101973-}
101974-
101975-/**
101976- * This function initializes the internal state array,
101977- * with an array of 32-bit integers used as the seeds
101978- * @param init_key the array of 32-bit integers, used as a seed.
101979- * @param key_length the length of init_key.
101980- */
101981-sfmt_t *init_by_array(uint32_t *init_key, int key_length) {
101982-    void *p;
101983-    sfmt_t *ctx;
101984-    int i, j, count;
101985-    uint32_t r;
101986-    int lag;
101987-    int mid;
101988-    int size = N * 4;
101989-    uint32_t *psfmt32;
101990-
101991-    if (posix_memalign(&p, sizeof(w128_t), sizeof(sfmt_t)) != 0) {
101992-	return NULL;
101993-    }
101994-    ctx = (sfmt_t *)p;
101995-    psfmt32 = &ctx->sfmt[0].u[0];
101996-
101997-    if (size >= 623) {
101998-	lag = 11;
101999-    } else if (size >= 68) {
102000-	lag = 7;
102001-    } else if (size >= 39) {
102002-	lag = 5;
102003-    } else {
102004-	lag = 3;
102005-    }
102006-    mid = (size - lag) / 2;
102007-
102008-    memset(ctx->sfmt, 0x8b, sizeof(ctx->sfmt));
102009-    if (key_length + 1 > N32) {
102010-	count = key_length + 1;
102011-    } else {
102012-	count = N32;
102013-    }
102014-    r = func1(psfmt32[idxof(0)] ^ psfmt32[idxof(mid)]
102015-	      ^ psfmt32[idxof(N32 - 1)]);
102016-    psfmt32[idxof(mid)] += r;
102017-    r += key_length;
102018-    psfmt32[idxof(mid + lag)] += r;
102019-    psfmt32[idxof(0)] = r;
102020-
102021-    count--;
102022-    for (i = 1, j = 0; (j < count) && (j < key_length); j++) {
102023-	r = func1(psfmt32[idxof(i)] ^ psfmt32[idxof((i + mid) % N32)]
102024-		  ^ psfmt32[idxof((i + N32 - 1) % N32)]);
102025-	psfmt32[idxof((i + mid) % N32)] += r;
102026-	r += init_key[j] + i;
102027-	psfmt32[idxof((i + mid + lag) % N32)] += r;
102028-	psfmt32[idxof(i)] = r;
102029-	i = (i + 1) % N32;
102030-    }
102031-    for (; j < count; j++) {
102032-	r = func1(psfmt32[idxof(i)] ^ psfmt32[idxof((i + mid) % N32)]
102033-		  ^ psfmt32[idxof((i + N32 - 1) % N32)]);
102034-	psfmt32[idxof((i + mid) % N32)] += r;
102035-	r += i;
102036-	psfmt32[idxof((i + mid + lag) % N32)] += r;
102037-	psfmt32[idxof(i)] = r;
102038-	i = (i + 1) % N32;
102039-    }
102040-    for (j = 0; j < N32; j++) {
102041-	r = func2(psfmt32[idxof(i)] + psfmt32[idxof((i + mid) % N32)]
102042-		  + psfmt32[idxof((i + N32 - 1) % N32)]);
102043-	psfmt32[idxof((i + mid) % N32)] ^= r;
102044-	r -= i;
102045-	psfmt32[idxof((i + mid + lag) % N32)] ^= r;
102046-	psfmt32[idxof(i)] = r;
102047-	i = (i + 1) % N32;
102048-    }
102049-
102050-    ctx->idx = N32;
102051-    period_certification(ctx);
102052-    ctx->initialized = 1;
102053-
102054-    return ctx;
102055-}
102056-
102057-void fini_gen_rand(sfmt_t *ctx) {
102058-    assert(ctx != NULL);
102059-
102060-    ctx->initialized = 0;
102061-    free(ctx);
102062-}
102063diff --git a/jemalloc/test/src/btalloc.c b/jemalloc/test/src/btalloc.c
102064deleted file mode 100644
102065index d570952..0000000
102066--- a/jemalloc/test/src/btalloc.c
102067+++ /dev/null
102068@@ -1,6 +0,0 @@
102069-#include "test/jemalloc_test.h"
102070-
102071-void *
102072-btalloc(size_t size, unsigned bits) {
102073-	return btalloc_0(size, bits);
102074-}
102075diff --git a/jemalloc/test/src/btalloc_0.c b/jemalloc/test/src/btalloc_0.c
102076deleted file mode 100644
102077index 77d8904..0000000
102078--- a/jemalloc/test/src/btalloc_0.c
102079+++ /dev/null
102080@@ -1,3 +0,0 @@
102081-#include "test/jemalloc_test.h"
102082-
102083-btalloc_n_gen(0)
102084diff --git a/jemalloc/test/src/btalloc_1.c b/jemalloc/test/src/btalloc_1.c
102085deleted file mode 100644
102086index 4c126c3..0000000
102087--- a/jemalloc/test/src/btalloc_1.c
102088+++ /dev/null
102089@@ -1,3 +0,0 @@
102090-#include "test/jemalloc_test.h"
102091-
102092-btalloc_n_gen(1)
102093diff --git a/jemalloc/test/src/math.c b/jemalloc/test/src/math.c
102094deleted file mode 100644
102095index 1758c67..0000000
102096--- a/jemalloc/test/src/math.c
102097+++ /dev/null
102098@@ -1,2 +0,0 @@
102099-#define MATH_C_
102100-#include "test/jemalloc_test.h"
102101diff --git a/jemalloc/test/src/mtx.c b/jemalloc/test/src/mtx.c
102102deleted file mode 100644
102103index d9ce375..0000000
102104--- a/jemalloc/test/src/mtx.c
102105+++ /dev/null
102106@@ -1,61 +0,0 @@
102107-#include "test/jemalloc_test.h"
102108-
102109-#ifndef _CRT_SPINCOUNT
102110-#define _CRT_SPINCOUNT 4000
102111-#endif
102112-
102113-bool
102114-mtx_init(mtx_t *mtx) {
102115-#ifdef _WIN32
102116-	if (!InitializeCriticalSectionAndSpinCount(&mtx->lock,
102117-	    _CRT_SPINCOUNT)) {
102118-		return true;
102119-	}
102120-#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
102121-	mtx->lock = OS_UNFAIR_LOCK_INIT;
102122-#else
102123-	pthread_mutexattr_t attr;
102124-
102125-	if (pthread_mutexattr_init(&attr) != 0) {
102126-		return true;
102127-	}
102128-	pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_DEFAULT);
102129-	if (pthread_mutex_init(&mtx->lock, &attr) != 0) {
102130-		pthread_mutexattr_destroy(&attr);
102131-		return true;
102132-	}
102133-	pthread_mutexattr_destroy(&attr);
102134-#endif
102135-	return false;
102136-}
102137-
102138-void
102139-mtx_fini(mtx_t *mtx) {
102140-#ifdef _WIN32
102141-#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
102142-#else
102143-	pthread_mutex_destroy(&mtx->lock);
102144-#endif
102145-}
102146-
102147-void
102148-mtx_lock(mtx_t *mtx) {
102149-#ifdef _WIN32
102150-	EnterCriticalSection(&mtx->lock);
102151-#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
102152-	os_unfair_lock_lock(&mtx->lock);
102153-#else
102154-	pthread_mutex_lock(&mtx->lock);
102155-#endif
102156-}
102157-
102158-void
102159-mtx_unlock(mtx_t *mtx) {
102160-#ifdef _WIN32
102161-	LeaveCriticalSection(&mtx->lock);
102162-#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
102163-	os_unfair_lock_unlock(&mtx->lock);
102164-#else
102165-	pthread_mutex_unlock(&mtx->lock);
102166-#endif
102167-}
102168diff --git a/jemalloc/test/src/sleep.c b/jemalloc/test/src/sleep.c
102169deleted file mode 100644
102170index 2234b4b..0000000
102171--- a/jemalloc/test/src/sleep.c
102172+++ /dev/null
102173@@ -1,27 +0,0 @@
102174-#include "test/jemalloc_test.h"
102175-
102176-/*
102177- * Sleep for approximately ns nanoseconds.  No lower *nor* upper bound on sleep
102178- * time is guaranteed.
102179- */
102180-void
102181-sleep_ns(unsigned ns) {
102182-	assert(ns <= 1000*1000*1000);
102183-
102184-#ifdef _WIN32
102185-	Sleep(ns / 1000 / 1000);
102186-#else
102187-	{
102188-		struct timespec timeout;
102189-
102190-		if (ns < 1000*1000*1000) {
102191-			timeout.tv_sec = 0;
102192-			timeout.tv_nsec = ns;
102193-		} else {
102194-			timeout.tv_sec = 1;
102195-			timeout.tv_nsec = 0;
102196-		}
102197-		nanosleep(&timeout, NULL);
102198-	}
102199-#endif
102200-}
102201diff --git a/jemalloc/test/src/test.c b/jemalloc/test/src/test.c
102202deleted file mode 100644
102203index 4cd803e..0000000
102204--- a/jemalloc/test/src/test.c
102205+++ /dev/null
102206@@ -1,234 +0,0 @@
102207-#include "test/jemalloc_test.h"
102208-
102209-/* Test status state. */
102210-
102211-static unsigned		test_count = 0;
102212-static test_status_t	test_counts[test_status_count] = {0, 0, 0};
102213-static test_status_t	test_status = test_status_pass;
102214-static const char *	test_name = "";
102215-
102216-/* Reentrancy testing helpers. */
102217-
102218-#define NUM_REENTRANT_ALLOCS 20
102219-typedef enum {
102220-	non_reentrant = 0,
102221-	libc_reentrant = 1,
102222-	arena_new_reentrant = 2
102223-} reentrancy_t;
102224-static reentrancy_t reentrancy;
102225-
102226-static bool libc_hook_ran = false;
102227-static bool arena_new_hook_ran = false;
102228-
102229-static const char *
102230-reentrancy_t_str(reentrancy_t r) {
102231-	switch (r) {
102232-	case non_reentrant:
102233-		return "non-reentrant";
102234-	case libc_reentrant:
102235-		return "libc-reentrant";
102236-	case arena_new_reentrant:
102237-		return "arena_new-reentrant";
102238-	default:
102239-		unreachable();
102240-	}
102241-}
102242-
102243-static void
102244-do_hook(bool *hook_ran, void (**hook)()) {
102245-	*hook_ran = true;
102246-	*hook = NULL;
102247-
102248-	size_t alloc_size = 1;
102249-	for (int i = 0; i < NUM_REENTRANT_ALLOCS; i++) {
102250-		free(malloc(alloc_size));
102251-		alloc_size *= 2;
102252-	}
102253-}
102254-
102255-static void
102256-libc_reentrancy_hook() {
102257-	do_hook(&libc_hook_ran, &test_hooks_libc_hook);
102258-}
102259-
102260-static void
102261-arena_new_reentrancy_hook() {
102262-	do_hook(&arena_new_hook_ran, &test_hooks_arena_new_hook);
102263-}
102264-
102265-/* Actual test infrastructure. */
102266-bool
102267-test_is_reentrant() {
102268-	return reentrancy != non_reentrant;
102269-}
102270-
102271-JEMALLOC_FORMAT_PRINTF(1, 2)
102272-void
102273-test_skip(const char *format, ...) {
102274-	va_list ap;
102275-
102276-	va_start(ap, format);
102277-	malloc_vcprintf(NULL, NULL, format, ap);
102278-	va_end(ap);
102279-	malloc_printf("\n");
102280-	test_status = test_status_skip;
102281-}
102282-
102283-JEMALLOC_FORMAT_PRINTF(1, 2)
102284-void
102285-test_fail(const char *format, ...) {
102286-	va_list ap;
102287-
102288-	va_start(ap, format);
102289-	malloc_vcprintf(NULL, NULL, format, ap);
102290-	va_end(ap);
102291-	malloc_printf("\n");
102292-	test_status = test_status_fail;
102293-}
102294-
102295-static const char *
102296-test_status_string(test_status_t current_status) {
102297-	switch (current_status) {
102298-	case test_status_pass: return "pass";
102299-	case test_status_skip: return "skip";
102300-	case test_status_fail: return "fail";
102301-	default: not_reached();
102302-	}
102303-}
102304-
102305-void
102306-p_test_init(const char *name) {
102307-	test_count++;
102308-	test_status = test_status_pass;
102309-	test_name = name;
102310-}
102311-
102312-void
102313-p_test_fini(void) {
102314-	test_counts[test_status]++;
102315-	malloc_printf("%s (%s): %s\n", test_name, reentrancy_t_str(reentrancy),
102316-	    test_status_string(test_status));
102317-}
102318-
102319-static void
102320-check_global_slow(test_status_t *status) {
102321-#ifdef JEMALLOC_UNIT_TEST
102322-	/*
102323-	 * This check needs to peek into tsd internals, which is why it's only
102324-	 * exposed in unit tests.
102325-	 */
102326-	if (tsd_global_slow()) {
102327-		malloc_printf("Testing increased global slow count\n");
102328-		*status = test_status_fail;
102329-	}
102330-#endif
102331-}
102332-
102333-static test_status_t
102334-p_test_impl(bool do_malloc_init, bool do_reentrant, test_t *t, va_list ap) {
102335-	test_status_t ret;
102336-
102337-	if (do_malloc_init) {
102338-		/*
102339-		 * Make sure initialization occurs prior to running tests.
102340-		 * Tests are special because they may use internal facilities
102341-		 * prior to triggering initialization as a side effect of
102342-		 * calling into the public API.
102343-		 */
102344-		if (nallocx(1, 0) == 0) {
102345-			malloc_printf("Initialization error");
102346-			return test_status_fail;
102347-		}
102348-	}
102349-
102350-	ret = test_status_pass;
102351-	for (; t != NULL; t = va_arg(ap, test_t *)) {
102352-		/* Non-reentrant run. */
102353-		reentrancy = non_reentrant;
102354-		test_hooks_arena_new_hook = test_hooks_libc_hook = NULL;
102355-		t();
102356-		if (test_status > ret) {
102357-			ret = test_status;
102358-		}
102359-		check_global_slow(&ret);
102360-		/* Reentrant run. */
102361-		if (do_reentrant) {
102362-			reentrancy = libc_reentrant;
102363-			test_hooks_arena_new_hook = NULL;
102364-			test_hooks_libc_hook = &libc_reentrancy_hook;
102365-			t();
102366-			if (test_status > ret) {
102367-				ret = test_status;
102368-			}
102369-			check_global_slow(&ret);
102370-
102371-			reentrancy = arena_new_reentrant;
102372-			test_hooks_libc_hook = NULL;
102373-			test_hooks_arena_new_hook = &arena_new_reentrancy_hook;
102374-			t();
102375-			if (test_status > ret) {
102376-				ret = test_status;
102377-			}
102378-			check_global_slow(&ret);
102379-		}
102380-	}
102381-
102382-	malloc_printf("--- %s: %u/%u, %s: %u/%u, %s: %u/%u ---\n",
102383-	    test_status_string(test_status_pass),
102384-	    test_counts[test_status_pass], test_count,
102385-	    test_status_string(test_status_skip),
102386-	    test_counts[test_status_skip], test_count,
102387-	    test_status_string(test_status_fail),
102388-	    test_counts[test_status_fail], test_count);
102389-
102390-	return ret;
102391-}
102392-
102393-test_status_t
102394-p_test(test_t *t, ...) {
102395-	test_status_t ret;
102396-	va_list ap;
102397-
102398-	ret = test_status_pass;
102399-	va_start(ap, t);
102400-	ret = p_test_impl(true, true, t, ap);
102401-	va_end(ap);
102402-
102403-	return ret;
102404-}
102405-
102406-test_status_t
102407-p_test_no_reentrancy(test_t *t, ...) {
102408-	test_status_t ret;
102409-	va_list ap;
102410-
102411-	ret = test_status_pass;
102412-	va_start(ap, t);
102413-	ret = p_test_impl(true, false, t, ap);
102414-	va_end(ap);
102415-
102416-	return ret;
102417-}
102418-
102419-test_status_t
102420-p_test_no_malloc_init(test_t *t, ...) {
102421-	test_status_t ret;
102422-	va_list ap;
102423-
102424-	ret = test_status_pass;
102425-	va_start(ap, t);
102426-	/*
102427-	 * We also omit reentrancy from bootstrapping tests, since we don't
102428-	 * (yet) care about general reentrancy during bootstrapping.
102429-	 */
102430-	ret = p_test_impl(false, false, t, ap);
102431-	va_end(ap);
102432-
102433-	return ret;
102434-}
102435-
102436-void
102437-p_test_fail(const char *prefix, const char *message) {
102438-	malloc_cprintf(NULL, NULL, "%s%s\n", prefix, message);
102439-	test_status = test_status_fail;
102440-}
102441diff --git a/jemalloc/test/src/thd.c b/jemalloc/test/src/thd.c
102442deleted file mode 100644
102443index 9a15eab..0000000
102444--- a/jemalloc/test/src/thd.c
102445+++ /dev/null
102446@@ -1,34 +0,0 @@
102447-#include "test/jemalloc_test.h"
102448-
102449-#ifdef _WIN32
102450-void
102451-thd_create(thd_t *thd, void *(*proc)(void *), void *arg) {
102452-	LPTHREAD_START_ROUTINE routine = (LPTHREAD_START_ROUTINE)proc;
102453-	*thd = CreateThread(NULL, 0, routine, arg, 0, NULL);
102454-	if (*thd == NULL) {
102455-		test_fail("Error in CreateThread()\n");
102456-	}
102457-}
102458-
102459-void
102460-thd_join(thd_t thd, void **ret) {
102461-	if (WaitForSingleObject(thd, INFINITE) == WAIT_OBJECT_0 && ret) {
102462-		DWORD exit_code;
102463-		GetExitCodeThread(thd, (LPDWORD) &exit_code);
102464-		*ret = (void *)(uintptr_t)exit_code;
102465-	}
102466-}
102467-
102468-#else
102469-void
102470-thd_create(thd_t *thd, void *(*proc)(void *), void *arg) {
102471-	if (pthread_create(thd, NULL, proc, arg) != 0) {
102472-		test_fail("Error in pthread_create()\n");
102473-	}
102474-}
102475-
102476-void
102477-thd_join(thd_t thd, void **ret) {
102478-	pthread_join(thd, ret);
102479-}
102480-#endif
102481diff --git a/jemalloc/test/src/timer.c b/jemalloc/test/src/timer.c
102482deleted file mode 100644
102483index 6e8b8ed..0000000
102484--- a/jemalloc/test/src/timer.c
102485+++ /dev/null
102486@@ -1,55 +0,0 @@
102487-#include "test/jemalloc_test.h"
102488-
102489-void
102490-timer_start(timedelta_t *timer) {
102491-	nstime_init_update(&timer->t0);
102492-}
102493-
102494-void
102495-timer_stop(timedelta_t *timer) {
102496-	nstime_copy(&timer->t1, &timer->t0);
102497-	nstime_update(&timer->t1);
102498-}
102499-
102500-uint64_t
102501-timer_usec(const timedelta_t *timer) {
102502-	nstime_t delta;
102503-
102504-	nstime_copy(&delta, &timer->t1);
102505-	nstime_subtract(&delta, &timer->t0);
102506-	return nstime_ns(&delta) / 1000;
102507-}
102508-
102509-void
102510-timer_ratio(timedelta_t *a, timedelta_t *b, char *buf, size_t buflen) {
102511-	uint64_t t0 = timer_usec(a);
102512-	uint64_t t1 = timer_usec(b);
102513-	uint64_t mult;
102514-	size_t i = 0;
102515-	size_t j, n;
102516-
102517-	/* Whole. */
102518-	n = malloc_snprintf(&buf[i], buflen-i, "%"FMTu64, t0 / t1);
102519-	i += n;
102520-	if (i >= buflen) {
102521-		return;
102522-	}
102523-	mult = 1;
102524-	for (j = 0; j < n; j++) {
102525-		mult *= 10;
102526-	}
102527-
102528-	/* Decimal. */
102529-	n = malloc_snprintf(&buf[i], buflen-i, ".");
102530-	i += n;
102531-
102532-	/* Fraction. */
102533-	while (i < buflen-1) {
102534-		uint64_t round = (i+1 == buflen-1 && ((t0 * mult * 10 / t1) % 10
102535-		    >= 5)) ? 1 : 0;
102536-		n = malloc_snprintf(&buf[i], buflen-i,
102537-		    "%"FMTu64, (t0 * mult / t1) % 10 + round);
102538-		i += n;
102539-		mult *= 10;
102540-	}
102541-}
102542diff --git a/jemalloc/test/stress/batch_alloc.c b/jemalloc/test/stress/batch_alloc.c
102543deleted file mode 100644
102544index 427e1cb..0000000
102545--- a/jemalloc/test/stress/batch_alloc.c
102546+++ /dev/null
102547@@ -1,198 +0,0 @@
102548-#include "test/jemalloc_test.h"
102549-#include "test/bench.h"
102550-
102551-#define MIBLEN 8
102552-static size_t mib[MIBLEN];
102553-static size_t miblen = MIBLEN;
102554-
102555-#define TINY_BATCH 10
102556-#define TINY_BATCH_ITER (10 * 1000 * 1000)
102557-#define HUGE_BATCH (1000 * 1000)
102558-#define HUGE_BATCH_ITER 100
102559-#define LEN (100 * 1000 * 1000)
102560-static void *batch_ptrs[LEN];
102561-static size_t batch_ptrs_next = 0;
102562-static void *item_ptrs[LEN];
102563-static size_t item_ptrs_next = 0;
102564-
102565-#define SIZE 7
102566-
102567-typedef struct batch_alloc_packet_s batch_alloc_packet_t;
102568-struct batch_alloc_packet_s {
102569-	void **ptrs;
102570-	size_t num;
102571-	size_t size;
102572-	int flags;
102573-};
102574-
102575-static void
102576-batch_alloc_wrapper(size_t batch) {
102577-	batch_alloc_packet_t batch_alloc_packet =
102578-	    {batch_ptrs + batch_ptrs_next, batch, SIZE, 0};
102579-	size_t filled;
102580-	size_t len = sizeof(size_t);
102581-	assert_d_eq(mallctlbymib(mib, miblen, &filled, &len,
102582-	    &batch_alloc_packet, sizeof(batch_alloc_packet)), 0, "");
102583-	assert_zu_eq(filled, batch, "");
102584-}
102585-
102586-static void
102587-item_alloc_wrapper(size_t batch) {
102588-	for (size_t i = item_ptrs_next, end = i + batch; i < end; ++i) {
102589-		item_ptrs[i] = malloc(SIZE);
102590-	}
102591-}
102592-
102593-static void
102594-release_and_clear(void **ptrs, size_t len) {
102595-	for (size_t i = 0; i < len; ++i) {
102596-		void *p = ptrs[i];
102597-		assert_ptr_not_null(p, "allocation failed");
102598-		sdallocx(p, SIZE, 0);
102599-		ptrs[i] = NULL;
102600-	}
102601-}
102602-
102603-static void
102604-batch_alloc_without_free(size_t batch) {
102605-	batch_alloc_wrapper(batch);
102606-	batch_ptrs_next += batch;
102607-}
102608-
102609-static void
102610-item_alloc_without_free(size_t batch) {
102611-	item_alloc_wrapper(batch);
102612-	item_ptrs_next += batch;
102613-}
102614-
102615-static void
102616-batch_alloc_with_free(size_t batch) {
102617-	batch_alloc_wrapper(batch);
102618-	release_and_clear(batch_ptrs + batch_ptrs_next, batch);
102619-	batch_ptrs_next += batch;
102620-}
102621-
102622-static void
102623-item_alloc_with_free(size_t batch) {
102624-	item_alloc_wrapper(batch);
102625-	release_and_clear(item_ptrs + item_ptrs_next, batch);
102626-	item_ptrs_next += batch;
102627-}
102628-
102629-static void
102630-compare_without_free(size_t batch, size_t iter,
102631-    void (*batch_alloc_without_free_func)(void),
102632-    void (*item_alloc_without_free_func)(void)) {
102633-	assert(batch_ptrs_next == 0);
102634-	assert(item_ptrs_next == 0);
102635-	assert(batch * iter <= LEN);
102636-	for (size_t i = 0; i < iter; ++i) {
102637-		batch_alloc_without_free_func();
102638-		item_alloc_without_free_func();
102639-	}
102640-	release_and_clear(batch_ptrs, batch_ptrs_next);
102641-	batch_ptrs_next = 0;
102642-	release_and_clear(item_ptrs, item_ptrs_next);
102643-	item_ptrs_next = 0;
102644-	compare_funcs(0, iter,
102645-	    "batch allocation", batch_alloc_without_free_func,
102646-	    "item allocation", item_alloc_without_free_func);
102647-	release_and_clear(batch_ptrs, batch_ptrs_next);
102648-	batch_ptrs_next = 0;
102649-	release_and_clear(item_ptrs, item_ptrs_next);
102650-	item_ptrs_next = 0;
102651-}
102652-
102653-static void
102654-compare_with_free(size_t batch, size_t iter,
102655-    void (*batch_alloc_with_free_func)(void),
102656-    void (*item_alloc_with_free_func)(void)) {
102657-	assert(batch_ptrs_next == 0);
102658-	assert(item_ptrs_next == 0);
102659-	assert(batch * iter <= LEN);
102660-	for (size_t i = 0; i < iter; ++i) {
102661-		batch_alloc_with_free_func();
102662-		item_alloc_with_free_func();
102663-	}
102664-	batch_ptrs_next = 0;
102665-	item_ptrs_next = 0;
102666-	compare_funcs(0, iter,
102667-	    "batch allocation", batch_alloc_with_free_func,
102668-	    "item allocation", item_alloc_with_free_func);
102669-	batch_ptrs_next = 0;
102670-	item_ptrs_next = 0;
102671-}
102672-
102673-static void
102674-batch_alloc_without_free_tiny() {
102675-	batch_alloc_without_free(TINY_BATCH);
102676-}
102677-
102678-static void
102679-item_alloc_without_free_tiny() {
102680-	item_alloc_without_free(TINY_BATCH);
102681-}
102682-
102683-TEST_BEGIN(test_tiny_batch_without_free) {
102684-	compare_without_free(TINY_BATCH, TINY_BATCH_ITER,
102685-	    batch_alloc_without_free_tiny, item_alloc_without_free_tiny);
102686-}
102687-TEST_END
102688-
102689-static void
102690-batch_alloc_with_free_tiny() {
102691-	batch_alloc_with_free(TINY_BATCH);
102692-}
102693-
102694-static void
102695-item_alloc_with_free_tiny() {
102696-	item_alloc_with_free(TINY_BATCH);
102697-}
102698-
102699-TEST_BEGIN(test_tiny_batch_with_free) {
102700-	compare_with_free(TINY_BATCH, TINY_BATCH_ITER,
102701-	    batch_alloc_with_free_tiny, item_alloc_with_free_tiny);
102702-}
102703-TEST_END
102704-
102705-static void
102706-batch_alloc_without_free_huge() {
102707-	batch_alloc_without_free(HUGE_BATCH);
102708-}
102709-
102710-static void
102711-item_alloc_without_free_huge() {
102712-	item_alloc_without_free(HUGE_BATCH);
102713-}
102714-
102715-TEST_BEGIN(test_huge_batch_without_free) {
102716-	compare_without_free(HUGE_BATCH, HUGE_BATCH_ITER,
102717-	    batch_alloc_without_free_huge, item_alloc_without_free_huge);
102718-}
102719-TEST_END
102720-
102721-static void
102722-batch_alloc_with_free_huge() {
102723-	batch_alloc_with_free(HUGE_BATCH);
102724-}
102725-
102726-static void
102727-item_alloc_with_free_huge() {
102728-	item_alloc_with_free(HUGE_BATCH);
102729-}
102730-
102731-TEST_BEGIN(test_huge_batch_with_free) {
102732-	compare_with_free(HUGE_BATCH, HUGE_BATCH_ITER,
102733-	    batch_alloc_with_free_huge, item_alloc_with_free_huge);
102734-}
102735-TEST_END
102736-
102737-int main(void) {
102738-	assert_d_eq(mallctlnametomib("experimental.batch_alloc", mib, &miblen),
102739-	    0, "");
102740-	return test_no_reentrancy(
102741-	    test_tiny_batch_without_free,
102742-	    test_tiny_batch_with_free,
102743-	    test_huge_batch_without_free,
102744-	    test_huge_batch_with_free);
102745-}
102746diff --git a/jemalloc/test/stress/fill_flush.c b/jemalloc/test/stress/fill_flush.c
102747deleted file mode 100644
102748index a2db044..0000000
102749--- a/jemalloc/test/stress/fill_flush.c
102750+++ /dev/null
102751@@ -1,76 +0,0 @@
102752-#include "test/jemalloc_test.h"
102753-#include "test/bench.h"
102754-
102755-#define SMALL_ALLOC_SIZE 128
102756-#define LARGE_ALLOC_SIZE SC_LARGE_MINCLASS
102757-#define NALLOCS 1000
102758-
102759-/*
102760- * We make this volatile so the 1-at-a-time variants can't leave the allocation
102761- * in a register, just to try to get the cache behavior closer.
102762- */
102763-void *volatile allocs[NALLOCS];
102764-
102765-static void
102766-array_alloc_dalloc_small(void) {
102767-	for (int i = 0; i < NALLOCS; i++) {
102768-		void *p = mallocx(SMALL_ALLOC_SIZE, 0);
102769-		assert_ptr_not_null(p, "mallocx shouldn't fail");
102770-		allocs[i] = p;
102771-	}
102772-	for (int i = 0; i < NALLOCS; i++) {
102773-		sdallocx(allocs[i], SMALL_ALLOC_SIZE, 0);
102774-	}
102775-}
102776-
102777-static void
102778-item_alloc_dalloc_small(void) {
102779-	for (int i = 0; i < NALLOCS; i++) {
102780-		void *p = mallocx(SMALL_ALLOC_SIZE, 0);
102781-		assert_ptr_not_null(p, "mallocx shouldn't fail");
102782-		allocs[i] = p;
102783-		sdallocx(allocs[i], SMALL_ALLOC_SIZE, 0);
102784-	}
102785-}
102786-
102787-TEST_BEGIN(test_array_vs_item_small) {
102788-	compare_funcs(1 * 1000, 10 * 1000,
102789-	    "array of small allocations", array_alloc_dalloc_small,
102790-	    "small item allocation", item_alloc_dalloc_small);
102791-}
102792-TEST_END
102793-
102794-static void
102795-array_alloc_dalloc_large(void) {
102796-	for (int i = 0; i < NALLOCS; i++) {
102797-		void *p = mallocx(LARGE_ALLOC_SIZE, 0);
102798-		assert_ptr_not_null(p, "mallocx shouldn't fail");
102799-		allocs[i] = p;
102800-	}
102801-	for (int i = 0; i < NALLOCS; i++) {
102802-		sdallocx(allocs[i], LARGE_ALLOC_SIZE, 0);
102803-	}
102804-}
102805-
102806-static void
102807-item_alloc_dalloc_large(void) {
102808-	for (int i = 0; i < NALLOCS; i++) {
102809-		void *p = mallocx(LARGE_ALLOC_SIZE, 0);
102810-		assert_ptr_not_null(p, "mallocx shouldn't fail");
102811-		allocs[i] = p;
102812-		sdallocx(allocs[i], LARGE_ALLOC_SIZE, 0);
102813-	}
102814-}
102815-
102816-TEST_BEGIN(test_array_vs_item_large) {
102817-	compare_funcs(100, 1000,
102818-	    "array of large allocations", array_alloc_dalloc_large,
102819-	    "large item allocation", item_alloc_dalloc_large);
102820-}
102821-TEST_END
102822-
102823-int main(void) {
102824-	return test_no_reentrancy(
102825-	    test_array_vs_item_small,
102826-	    test_array_vs_item_large);
102827-}
102828diff --git a/jemalloc/test/stress/hookbench.c b/jemalloc/test/stress/hookbench.c
102829deleted file mode 100644
102830index 97e90b0..0000000
102831--- a/jemalloc/test/stress/hookbench.c
102832+++ /dev/null
102833@@ -1,73 +0,0 @@
102834-#include "test/jemalloc_test.h"
102835-
102836-static void
102837-noop_alloc_hook(void *extra, hook_alloc_t type, void *result,
102838-    uintptr_t result_raw, uintptr_t args_raw[3]) {
102839-}
102840-
102841-static void
102842-noop_dalloc_hook(void *extra, hook_dalloc_t type, void *address,
102843-    uintptr_t args_raw[3]) {
102844-}
102845-
102846-static void
102847-noop_expand_hook(void *extra, hook_expand_t type, void *address,
102848-    size_t old_usize, size_t new_usize, uintptr_t result_raw,
102849-    uintptr_t args_raw[4]) {
102850-}
102851-
102852-static void
102853-malloc_free_loop(int iters) {
102854-	for (int i = 0; i < iters; i++) {
102855-		void *p = mallocx(1, 0);
102856-		free(p);
102857-	}
102858-}
102859-
102860-static void
102861-test_hooked(int iters) {
102862-	hooks_t hooks = {&noop_alloc_hook, &noop_dalloc_hook, &noop_expand_hook,
102863-		NULL};
102864-
102865-	int err;
102866-	void *handles[HOOK_MAX];
102867-	size_t sz = sizeof(handles[0]);
102868-
102869-	for (int i = 0; i < HOOK_MAX; i++) {
102870-		err = mallctl("experimental.hooks.install", &handles[i],
102871-		    &sz, &hooks, sizeof(hooks));
102872-		assert(err == 0);
102873-
102874-		timedelta_t timer;
102875-		timer_start(&timer);
102876-		malloc_free_loop(iters);
102877-		timer_stop(&timer);
102878-		malloc_printf("With %d hook%s: %"FMTu64"us\n", i + 1,
102879-		    i + 1 == 1 ? "" : "s", timer_usec(&timer));
102880-	}
102881-	for (int i = 0; i < HOOK_MAX; i++) {
102882-		err = mallctl("experimental.hooks.remove", NULL, NULL,
102883-		    &handles[i], sizeof(handles[i]));
102884-		assert(err == 0);
102885-	}
102886-}
102887-
102888-static void
102889-test_unhooked(int iters) {
102890-	timedelta_t timer;
102891-	timer_start(&timer);
102892-	malloc_free_loop(iters);
102893-	timer_stop(&timer);
102894-
102895-	malloc_printf("Without hooks: %"FMTu64"us\n", timer_usec(&timer));
102896-}
102897-
102898-int
102899-main(void) {
102900-	/* Initialize */
102901-	free(mallocx(1, 0));
102902-	int iters = 10 * 1000 * 1000;
102903-	malloc_printf("Benchmarking hooks with %d iterations:\n", iters);
102904-	test_hooked(iters);
102905-	test_unhooked(iters);
102906-}
102907diff --git a/jemalloc/test/stress/large_microbench.c b/jemalloc/test/stress/large_microbench.c
102908deleted file mode 100644
102909index c66b33a..0000000
102910--- a/jemalloc/test/stress/large_microbench.c
102911+++ /dev/null
102912@@ -1,33 +0,0 @@
102913-#include "test/jemalloc_test.h"
102914-#include "test/bench.h"
102915-
102916-static void
102917-large_mallocx_free(void) {
102918-	/*
102919-	 * We go a bit larger than the large minclass on its own to better
102920-	 * expose costs from things like zeroing.
102921-	 */
102922-	void *p = mallocx(SC_LARGE_MINCLASS, MALLOCX_TCACHE_NONE);
102923-	assert_ptr_not_null(p, "mallocx shouldn't fail");
102924-	free(p);
102925-}
102926-
102927-static void
102928-small_mallocx_free(void) {
102929-	void *p = mallocx(16, 0);
102930-	assert_ptr_not_null(p, "mallocx shouldn't fail");
102931-	free(p);
102932-}
102933-
102934-TEST_BEGIN(test_large_vs_small) {
102935-	compare_funcs(100*1000, 1*1000*1000, "large mallocx",
102936-	    large_mallocx_free, "small mallocx", small_mallocx_free);
102937-}
102938-TEST_END
102939-
102940-int
102941-main(void) {
102942-	return test_no_reentrancy(
102943-	    test_large_vs_small);
102944-}
102945-
102946diff --git a/jemalloc/test/stress/mallctl.c b/jemalloc/test/stress/mallctl.c
102947deleted file mode 100644
102948index d29b311..0000000
102949--- a/jemalloc/test/stress/mallctl.c
102950+++ /dev/null
102951@@ -1,74 +0,0 @@
102952-#include "test/jemalloc_test.h"
102953-#include "test/bench.h"
102954-
102955-static void
102956-mallctl_short(void) {
102957-	const char *version;
102958-	size_t sz = sizeof(version);
102959-	int err = mallctl("version", &version, &sz, NULL, 0);
102960-	assert_d_eq(err, 0, "mallctl failure");
102961-}
102962-
102963-size_t mib_short[1];
102964-
102965-static void
102966-mallctlbymib_short(void) {
102967-	size_t miblen = sizeof(mib_short)/sizeof(mib_short[0]);
102968-	const char *version;
102969-	size_t sz = sizeof(version);
102970-	int err = mallctlbymib(mib_short, miblen, &version, &sz, NULL, 0);
102971-	assert_d_eq(err, 0, "mallctlbymib failure");
102972-}
102973-
102974-TEST_BEGIN(test_mallctl_vs_mallctlbymib_short) {
102975-	size_t miblen = sizeof(mib_short)/sizeof(mib_short[0]);
102976-
102977-	int err = mallctlnametomib("version", mib_short, &miblen);
102978-	assert_d_eq(err, 0, "mallctlnametomib failure");
102979-	compare_funcs(10*1000*1000, 10*1000*1000, "mallctl_short",
102980-	    mallctl_short, "mallctlbymib_short", mallctlbymib_short);
102981-}
102982-TEST_END
102983-
102984-static void
102985-mallctl_long(void) {
102986-	uint64_t nmalloc;
102987-	size_t sz = sizeof(nmalloc);
102988-	int err = mallctl("stats.arenas.0.bins.0.nmalloc", &nmalloc, &sz, NULL,
102989-	    0);
102990-	assert_d_eq(err, 0, "mallctl failure");
102991-}
102992-
102993-size_t mib_long[6];
102994-
102995-static void
102996-mallctlbymib_long(void) {
102997-	size_t miblen = sizeof(mib_long)/sizeof(mib_long[0]);
102998-	uint64_t nmalloc;
102999-	size_t sz = sizeof(nmalloc);
103000-	int err = mallctlbymib(mib_long, miblen, &nmalloc, &sz, NULL, 0);
103001-	assert_d_eq(err, 0, "mallctlbymib failure");
103002-}
103003-
103004-TEST_BEGIN(test_mallctl_vs_mallctlbymib_long) {
103005-	/*
103006-	 * We want to use the longest mallctl we have; that needs stats support
103007-	 * to be allowed.
103008-	 */
103009-	test_skip_if(!config_stats);
103010-
103011-	size_t miblen = sizeof(mib_long)/sizeof(mib_long[0]);
103012-	int err = mallctlnametomib("stats.arenas.0.bins.0.nmalloc", mib_long,
103013-	    &miblen);
103014-	assert_d_eq(err, 0, "mallctlnametomib failure");
103015-	compare_funcs(10*1000*1000, 10*1000*1000, "mallctl_long",
103016-	    mallctl_long, "mallctlbymib_long", mallctlbymib_long);
103017-}
103018-TEST_END
103019-
103020-int
103021-main(void) {
103022-	return test_no_reentrancy(
103023-	    test_mallctl_vs_mallctlbymib_short,
103024-	    test_mallctl_vs_mallctlbymib_long);
103025-}
103026diff --git a/jemalloc/test/stress/microbench.c b/jemalloc/test/stress/microbench.c
103027deleted file mode 100644
103028index 062e32f..0000000
103029--- a/jemalloc/test/stress/microbench.c
103030+++ /dev/null
103031@@ -1,126 +0,0 @@
103032-#include "test/jemalloc_test.h"
103033-#include "test/bench.h"
103034-
103035-static void
103036-malloc_free(void) {
103037-	/* The compiler can optimize away free(malloc(1))! */
103038-	void *p = malloc(1);
103039-	if (p == NULL) {
103040-		test_fail("Unexpected malloc() failure");
103041-		return;
103042-	}
103043-	free(p);
103044-}
103045-
103046-static void
103047-mallocx_free(void) {
103048-	void *p = mallocx(1, 0);
103049-	if (p == NULL) {
103050-		test_fail("Unexpected mallocx() failure");
103051-		return;
103052-	}
103053-	free(p);
103054-}
103055-
103056-TEST_BEGIN(test_malloc_vs_mallocx) {
103057-	compare_funcs(10*1000*1000, 100*1000*1000, "malloc",
103058-	    malloc_free, "mallocx", mallocx_free);
103059-}
103060-TEST_END
103061-
103062-static void
103063-malloc_dallocx(void) {
103064-	void *p = malloc(1);
103065-	if (p == NULL) {
103066-		test_fail("Unexpected malloc() failure");
103067-		return;
103068-	}
103069-	dallocx(p, 0);
103070-}
103071-
103072-static void
103073-malloc_sdallocx(void) {
103074-	void *p = malloc(1);
103075-	if (p == NULL) {
103076-		test_fail("Unexpected malloc() failure");
103077-		return;
103078-	}
103079-	sdallocx(p, 1, 0);
103080-}
103081-
103082-TEST_BEGIN(test_free_vs_dallocx) {
103083-	compare_funcs(10*1000*1000, 100*1000*1000, "free", malloc_free,
103084-	    "dallocx", malloc_dallocx);
103085-}
103086-TEST_END
103087-
103088-TEST_BEGIN(test_dallocx_vs_sdallocx) {
103089-	compare_funcs(10*1000*1000, 100*1000*1000, "dallocx", malloc_dallocx,
103090-	    "sdallocx", malloc_sdallocx);
103091-}
103092-TEST_END
103093-
103094-static void
103095-malloc_mus_free(void) {
103096-	void *p;
103097-
103098-	p = malloc(1);
103099-	if (p == NULL) {
103100-		test_fail("Unexpected malloc() failure");
103101-		return;
103102-	}
103103-	TEST_MALLOC_SIZE(p);
103104-	free(p);
103105-}
103106-
103107-static void
103108-malloc_sallocx_free(void) {
103109-	void *p;
103110-
103111-	p = malloc(1);
103112-	if (p == NULL) {
103113-		test_fail("Unexpected malloc() failure");
103114-		return;
103115-	}
103116-	if (sallocx(p, 0) < 1) {
103117-		test_fail("Unexpected sallocx() failure");
103118-	}
103119-	free(p);
103120-}
103121-
103122-TEST_BEGIN(test_mus_vs_sallocx) {
103123-	compare_funcs(10*1000*1000, 100*1000*1000, "malloc_usable_size",
103124-	    malloc_mus_free, "sallocx", malloc_sallocx_free);
103125-}
103126-TEST_END
103127-
103128-static void
103129-malloc_nallocx_free(void) {
103130-	void *p;
103131-
103132-	p = malloc(1);
103133-	if (p == NULL) {
103134-		test_fail("Unexpected malloc() failure");
103135-		return;
103136-	}
103137-	if (nallocx(1, 0) < 1) {
103138-		test_fail("Unexpected nallocx() failure");
103139-	}
103140-	free(p);
103141-}
103142-
103143-TEST_BEGIN(test_sallocx_vs_nallocx) {
103144-	compare_funcs(10*1000*1000, 100*1000*1000, "sallocx",
103145-	    malloc_sallocx_free, "nallocx", malloc_nallocx_free);
103146-}
103147-TEST_END
103148-
103149-int
103150-main(void) {
103151-	return test_no_reentrancy(
103152-	    test_malloc_vs_mallocx,
103153-	    test_free_vs_dallocx,
103154-	    test_dallocx_vs_sdallocx,
103155-	    test_mus_vs_sallocx,
103156-	    test_sallocx_vs_nallocx);
103157-}
103158diff --git a/jemalloc/test/test.sh.in b/jemalloc/test/test.sh.in
103159deleted file mode 100644
103160index 39302ff..0000000
103161--- a/jemalloc/test/test.sh.in
103162+++ /dev/null
103163@@ -1,80 +0,0 @@
103164-#!/bin/sh
103165-
103166-case @abi@ in
103167-  macho)
103168-    export DYLD_FALLBACK_LIBRARY_PATH="@objroot@lib"
103169-    ;;
103170-  pecoff)
103171-    export PATH="${PATH}:@objroot@lib"
103172-    ;;
103173-  *)
103174-    ;;
103175-esac
103176-
103177-# Make a copy of the @JEMALLOC_CPREFIX@MALLOC_CONF passed in to this script, so
103178-# it can be repeatedly concatenated with per test settings.
103179-export MALLOC_CONF_ALL=${@JEMALLOC_CPREFIX@MALLOC_CONF}
103180-# Concatenate the individual test's MALLOC_CONF and MALLOC_CONF_ALL.
103181-export_malloc_conf() {
103182-  if [ "x${MALLOC_CONF}" != "x" -a "x${MALLOC_CONF_ALL}" != "x" ] ; then
103183-    export @JEMALLOC_CPREFIX@MALLOC_CONF="${MALLOC_CONF},${MALLOC_CONF_ALL}"
103184-  else
103185-    export @JEMALLOC_CPREFIX@MALLOC_CONF="${MALLOC_CONF}${MALLOC_CONF_ALL}"
103186-  fi
103187-}
103188-
103189-# Corresponds to test_status_t.
103190-pass_code=0
103191-skip_code=1
103192-fail_code=2
103193-
103194-pass_count=0
103195-skip_count=0
103196-fail_count=0
103197-for t in $@; do
103198-  if [ $pass_count -ne 0 -o $skip_count -ne 0 -o $fail_count != 0 ] ; then
103199-    echo
103200-  fi
103201-  echo "=== ${t} ==="
103202-  if [ -e "@srcroot@${t}.sh" ] ; then
103203-    # Source the shell script corresponding to the test in a subshell and
103204-    # execute the test.  This allows the shell script to set MALLOC_CONF, which
103205-    # is then used to set @JEMALLOC_CPREFIX@MALLOC_CONF (thus allowing the
103206-    # per test shell script to ignore the @JEMALLOC_CPREFIX@ detail).
103207-    enable_fill=@enable_fill@ \
103208-    enable_prof=@enable_prof@ \
103209-    . @srcroot@${t}.sh && \
103210-    export_malloc_conf && \
103211-    $JEMALLOC_TEST_PREFIX ${t}@exe@ @abs_srcroot@ @abs_objroot@
103212-  else
103213-    export MALLOC_CONF= && \
103214-    export_malloc_conf && \
103215-    $JEMALLOC_TEST_PREFIX ${t}@exe@ @abs_srcroot@ @abs_objroot@
103216-  fi
103217-  result_code=$?
103218-  case ${result_code} in
103219-    ${pass_code})
103220-      pass_count=$((pass_count+1))
103221-      ;;
103222-    ${skip_code})
103223-      skip_count=$((skip_count+1))
103224-      ;;
103225-    ${fail_code})
103226-      fail_count=$((fail_count+1))
103227-      ;;
103228-    *)
103229-      echo "Test harness error: ${t} w/ MALLOC_CONF=\"${MALLOC_CONF}\"" 1>&2
103230-      echo "Use prefix to debug, e.g. JEMALLOC_TEST_PREFIX=\"gdb --args\" sh test/test.sh ${t}" 1>&2
103231-      exit 1
103232-  esac
103233-done
103234-
103235-total_count=`expr ${pass_count} + ${skip_count} + ${fail_count}`
103236-echo
103237-echo "Test suite summary: pass: ${pass_count}/${total_count}, skip: ${skip_count}/${total_count}, fail: ${fail_count}/${total_count}"
103238-
103239-if [ ${fail_count} -eq 0 ] ; then
103240-  exit 0
103241-else
103242-  exit 1
103243-fi
103244diff --git a/jemalloc/test/unit/SFMT.c b/jemalloc/test/unit/SFMT.c
103245deleted file mode 100644
103246index b9f85dd..0000000
103247--- a/jemalloc/test/unit/SFMT.c
103248+++ /dev/null
103249@@ -1,1599 +0,0 @@
103250-/*
103251- * This file derives from SFMT 1.3.3
103252- * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was
103253- * released under the terms of the following license:
103254- *
103255- *   Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
103256- *   University. All rights reserved.
103257- *
103258- *   Redistribution and use in source and binary forms, with or without
103259- *   modification, are permitted provided that the following conditions are
103260- *   met:
103261- *
103262- *       * Redistributions of source code must retain the above copyright
103263- *         notice, this list of conditions and the following disclaimer.
103264- *       * Redistributions in binary form must reproduce the above
103265- *         copyright notice, this list of conditions and the following
103266- *         disclaimer in the documentation and/or other materials provided
103267- *         with the distribution.
103268- *       * Neither the name of the Hiroshima University nor the names of
103269- *         its contributors may be used to endorse or promote products
103270- *         derived from this software without specific prior written
103271- *         permission.
103272- *
103273- *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
103274- *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
103275- *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
103276- *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
103277- *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
103278- *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
103279- *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
103280- *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
103281- *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
103282- *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
103283- *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
103284- */
103285-#include "test/jemalloc_test.h"
103286-
103287-#define BLOCK_SIZE 10000
103288-#define BLOCK_SIZE64 (BLOCK_SIZE / 2)
103289-#define COUNT_1 1000
103290-#define COUNT_2 700
103291-
103292-static const uint32_t init_gen_rand_32_expected[] = {
103293-	3440181298U, 1564997079U, 1510669302U, 2930277156U, 1452439940U,
103294-	3796268453U,  423124208U, 2143818589U, 3827219408U, 2987036003U,
103295-	2674978610U, 1536842514U, 2027035537U, 2534897563U, 1686527725U,
103296-	 545368292U, 1489013321U, 1370534252U, 4231012796U, 3994803019U,
103297-	1764869045U,  824597505U,  862581900U, 2469764249U,  812862514U,
103298-	 359318673U,  116957936U, 3367389672U, 2327178354U, 1898245200U,
103299-	3206507879U, 2378925033U, 1040214787U, 2524778605U, 3088428700U,
103300-	1417665896U,  964324147U, 2282797708U, 2456269299U,  313400376U,
103301-	2245093271U, 1015729427U, 2694465011U, 3246975184U, 1992793635U,
103302-	 463679346U, 3721104591U, 3475064196U,  856141236U, 1499559719U,
103303-	3522818941U, 3721533109U, 1954826617U, 1282044024U, 1543279136U,
103304-	1301863085U, 2669145051U, 4221477354U, 3896016841U, 3392740262U,
103305-	 462466863U, 1037679449U, 1228140306U,  922298197U, 1205109853U,
103306-	1872938061U, 3102547608U, 2742766808U, 1888626088U, 4028039414U,
103307-	 157593879U, 1136901695U, 4038377686U, 3572517236U, 4231706728U,
103308-	2997311961U, 1189931652U, 3981543765U, 2826166703U,   87159245U,
103309-	1721379072U, 3897926942U, 1790395498U, 2569178939U, 1047368729U,
103310-	2340259131U, 3144212906U, 2301169789U, 2442885464U, 3034046771U,
103311-	3667880593U, 3935928400U, 2372805237U, 1666397115U, 2460584504U,
103312-	 513866770U, 3810869743U, 2147400037U, 2792078025U, 2941761810U,
103313-	3212265810U,  984692259U,  346590253U, 1804179199U, 3298543443U,
103314-	 750108141U, 2880257022U,  243310542U, 1869036465U, 1588062513U,
103315-	2983949551U, 1931450364U, 4034505847U, 2735030199U, 1628461061U,
103316-	2539522841U,  127965585U, 3992448871U,  913388237U,  559130076U,
103317-	1202933193U, 4087643167U, 2590021067U, 2256240196U, 1746697293U,
103318-	1013913783U, 1155864921U, 2715773730U,  915061862U, 1948766573U,
103319-	2322882854U, 3761119102U, 1343405684U, 3078711943U, 3067431651U,
103320-	3245156316U, 3588354584U, 3484623306U, 3899621563U, 4156689741U,
103321-	3237090058U, 3880063844U,  862416318U, 4039923869U, 2303788317U,
103322-	3073590536U,  701653667U, 2131530884U, 3169309950U, 2028486980U,
103323-	 747196777U, 3620218225U,  432016035U, 1449580595U, 2772266392U,
103324-	 444224948U, 1662832057U, 3184055582U, 3028331792U, 1861686254U,
103325-	1104864179U,  342430307U, 1350510923U, 3024656237U, 1028417492U,
103326-	2870772950U,  290847558U, 3675663500U,  508431529U, 4264340390U,
103327-	2263569913U, 1669302976U,  519511383U, 2706411211U, 3764615828U,
103328-	3883162495U, 4051445305U, 2412729798U, 3299405164U, 3991911166U,
103329-	2348767304U, 2664054906U, 3763609282U,  593943581U, 3757090046U,
103330-	2075338894U, 2020550814U, 4287452920U, 4290140003U, 1422957317U,
103331-	2512716667U, 2003485045U, 2307520103U, 2288472169U, 3940751663U,
103332-	4204638664U, 2892583423U, 1710068300U, 3904755993U, 2363243951U,
103333-	3038334120U,  547099465U,  771105860U, 3199983734U, 4282046461U,
103334-	2298388363U,  934810218U, 2837827901U, 3952500708U, 2095130248U,
103335-	3083335297U,   26885281U, 3932155283U, 1531751116U, 1425227133U,
103336-	 495654159U, 3279634176U, 3855562207U, 3957195338U, 4159985527U,
103337-	 893375062U, 1875515536U, 1327247422U, 3754140693U, 1028923197U,
103338-	1729880440U,  805571298U,  448971099U, 2726757106U, 2749436461U,
103339-	2485987104U,  175337042U, 3235477922U, 3882114302U, 2020970972U,
103340-	 943926109U, 2762587195U, 1904195558U, 3452650564U,  108432281U,
103341-	3893463573U, 3977583081U, 2636504348U, 1110673525U, 3548479841U,
103342-	4258854744U,  980047703U, 4057175418U, 3890008292U,  145653646U,
103343-	3141868989U, 3293216228U, 1194331837U, 1254570642U, 3049934521U,
103344-	2868313360U, 2886032750U, 1110873820U,  279553524U, 3007258565U,
103345-	1104807822U, 3186961098U,  315764646U, 2163680838U, 3574508994U,
103346-	3099755655U,  191957684U, 3642656737U, 3317946149U, 3522087636U,
103347-	 444526410U,  779157624U, 1088229627U, 1092460223U, 1856013765U,
103348-	3659877367U,  368270451U,  503570716U, 3000984671U, 2742789647U,
103349-	 928097709U, 2914109539U,  308843566U, 2816161253U, 3667192079U,
103350-	2762679057U, 3395240989U, 2928925038U, 1491465914U, 3458702834U,
103351-	3787782576U, 2894104823U, 1296880455U, 1253636503U,  989959407U,
103352-	2291560361U, 2776790436U, 1913178042U, 1584677829U,  689637520U,
103353-	1898406878U,  688391508U, 3385234998U,  845493284U, 1943591856U,
103354-	2720472050U,  222695101U, 1653320868U, 2904632120U, 4084936008U,
103355-	1080720688U, 3938032556U,  387896427U, 2650839632U,   99042991U,
103356-	1720913794U, 1047186003U, 1877048040U, 2090457659U,  517087501U,
103357-	4172014665U, 2129713163U, 2413533132U, 2760285054U, 4129272496U,
103358-	1317737175U, 2309566414U, 2228873332U, 3889671280U, 1110864630U,
103359-	3576797776U, 2074552772U,  832002644U, 3097122623U, 2464859298U,
103360-	2679603822U, 1667489885U, 3237652716U, 1478413938U, 1719340335U,
103361-	2306631119U,  639727358U, 3369698270U,  226902796U, 2099920751U,
103362-	1892289957U, 2201594097U, 3508197013U, 3495811856U, 3900381493U,
103363-	 841660320U, 3974501451U, 3360949056U, 1676829340U,  728899254U,
103364-	2047809627U, 2390948962U,  670165943U, 3412951831U, 4189320049U,
103365-	1911595255U, 2055363086U,  507170575U,  418219594U, 4141495280U,
103366-	2692088692U, 4203630654U, 3540093932U,  791986533U, 2237921051U,
103367-	2526864324U, 2956616642U, 1394958700U, 1983768223U, 1893373266U,
103368-	 591653646U,  228432437U, 1611046598U, 3007736357U, 1040040725U,
103369-	2726180733U, 2789804360U, 4263568405U,  829098158U, 3847722805U,
103370-	1123578029U, 1804276347U,  997971319U, 4203797076U, 4185199713U,
103371-	2811733626U, 2343642194U, 2985262313U, 1417930827U, 3759587724U,
103372-	1967077982U, 1585223204U, 1097475516U, 1903944948U,  740382444U,
103373-	1114142065U, 1541796065U, 1718384172U, 1544076191U, 1134682254U,
103374-	3519754455U, 2866243923U,  341865437U,  645498576U, 2690735853U,
103375-	1046963033U, 2493178460U, 1187604696U, 1619577821U,  488503634U,
103376-	3255768161U, 2306666149U, 1630514044U, 2377698367U, 2751503746U,
103377-	3794467088U, 1796415981U, 3657173746U,  409136296U, 1387122342U,
103378-	1297726519U,  219544855U, 4270285558U,  437578827U, 1444698679U,
103379-	2258519491U,  963109892U, 3982244073U, 3351535275U,  385328496U,
103380-	1804784013U,  698059346U, 3920535147U,  708331212U,  784338163U,
103381-	 785678147U, 1238376158U, 1557298846U, 2037809321U,  271576218U,
103382-	4145155269U, 1913481602U, 2763691931U,  588981080U, 1201098051U,
103383-	3717640232U, 1509206239U,  662536967U, 3180523616U, 1133105435U,
103384-	2963500837U, 2253971215U, 3153642623U, 1066925709U, 2582781958U,
103385-	3034720222U, 1090798544U, 2942170004U, 4036187520U,  686972531U,
103386-	2610990302U, 2641437026U, 1837562420U,  722096247U, 1315333033U,
103387-	2102231203U, 3402389208U, 3403698140U, 1312402831U, 2898426558U,
103388-	 814384596U,  385649582U, 1916643285U, 1924625106U, 2512905582U,
103389-	2501170304U, 4275223366U, 2841225246U, 1467663688U, 3563567847U,
103390-	2969208552U,  884750901U,  102992576U,  227844301U, 3681442994U,
103391-	3502881894U, 4034693299U, 1166727018U, 1697460687U, 1737778332U,
103392-	1787161139U, 1053003655U, 1215024478U, 2791616766U, 2525841204U,
103393-	1629323443U,    3233815U, 2003823032U, 3083834263U, 2379264872U,
103394-	3752392312U, 1287475550U, 3770904171U, 3004244617U, 1502117784U,
103395-	 918698423U, 2419857538U, 3864502062U, 1751322107U, 2188775056U,
103396-	4018728324U,  983712955U,  440071928U, 3710838677U, 2001027698U,
103397-	3994702151U,   22493119U, 3584400918U, 3446253670U, 4254789085U,
103398-	1405447860U, 1240245579U, 1800644159U, 1661363424U, 3278326132U,
103399-	3403623451U,   67092802U, 2609352193U, 3914150340U, 1814842761U,
103400-	3610830847U,  591531412U, 3880232807U, 1673505890U, 2585326991U,
103401-	1678544474U, 3148435887U, 3457217359U, 1193226330U, 2816576908U,
103402-	 154025329U,  121678860U, 1164915738U,  973873761U,  269116100U,
103403-	  52087970U,  744015362U,  498556057U,   94298882U, 1563271621U,
103404-	2383059628U, 4197367290U, 3958472990U, 2592083636U, 2906408439U,
103405-	1097742433U, 3924840517U,  264557272U, 2292287003U, 3203307984U,
103406-	4047038857U, 3820609705U, 2333416067U, 1839206046U, 3600944252U,
103407-	3412254904U,  583538222U, 2390557166U, 4140459427U, 2810357445U,
103408-	 226777499U, 2496151295U, 2207301712U, 3283683112U,  611630281U,
103409-	1933218215U, 3315610954U, 3889441987U, 3719454256U, 3957190521U,
103410-	1313998161U, 2365383016U, 3146941060U, 1801206260U,  796124080U,
103411-	2076248581U, 1747472464U, 3254365145U,  595543130U, 3573909503U,
103412-	3758250204U, 2020768540U, 2439254210U,   93368951U, 3155792250U,
103413-	2600232980U, 3709198295U, 3894900440U, 2971850836U, 1578909644U,
103414-	1443493395U, 2581621665U, 3086506297U, 2443465861U,  558107211U,
103415-	1519367835U,  249149686U,  908102264U, 2588765675U, 1232743965U,
103416-	1001330373U, 3561331654U, 2259301289U, 1564977624U, 3835077093U,
103417-	 727244906U, 4255738067U, 1214133513U, 2570786021U, 3899704621U,
103418-	1633861986U, 1636979509U, 1438500431U,   58463278U, 2823485629U,
103419-	2297430187U, 2926781924U, 3371352948U, 1864009023U, 2722267973U,
103420-	1444292075U,  437703973U, 1060414512U,  189705863U,  910018135U,
103421-	4077357964U,  884213423U, 2644986052U, 3973488374U, 1187906116U,
103422-	2331207875U,  780463700U, 3713351662U, 3854611290U,  412805574U,
103423-	2978462572U, 2176222820U,  829424696U, 2790788332U, 2750819108U,
103424-	1594611657U, 3899878394U, 3032870364U, 1702887682U, 1948167778U,
103425-	  14130042U,  192292500U,  947227076U,   90719497U, 3854230320U,
103426-	 784028434U, 2142399787U, 1563449646U, 2844400217U,  819143172U,
103427-	2883302356U, 2328055304U, 1328532246U, 2603885363U, 3375188924U,
103428-	 933941291U, 3627039714U, 2129697284U, 2167253953U, 2506905438U,
103429-	1412424497U, 2981395985U, 1418359660U, 2925902456U,   52752784U,
103430-	3713667988U, 3924669405U,  648975707U, 1145520213U, 4018650664U,
103431-	3805915440U, 2380542088U, 2013260958U, 3262572197U, 2465078101U,
103432-	1114540067U, 3728768081U, 2396958768U,  590672271U,  904818725U,
103433-	4263660715U,  700754408U, 1042601829U, 4094111823U, 4274838909U,
103434-	2512692617U, 2774300207U, 2057306915U, 3470942453U,   99333088U,
103435-	1142661026U, 2889931380U,   14316674U, 2201179167U,  415289459U,
103436-	 448265759U, 3515142743U, 3254903683U,  246633281U, 1184307224U,
103437-	2418347830U, 2092967314U, 2682072314U, 2558750234U, 2000352263U,
103438-	1544150531U,  399010405U, 1513946097U,  499682937U,  461167460U,
103439-	3045570638U, 1633669705U,  851492362U, 4052801922U, 2055266765U,
103440-	 635556996U,  368266356U, 2385737383U, 3218202352U, 2603772408U,
103441-	 349178792U,  226482567U, 3102426060U, 3575998268U, 2103001871U,
103442-	3243137071U,  225500688U, 1634718593U, 4283311431U, 4292122923U,
103443-	3842802787U,  811735523U,  105712518U,  663434053U, 1855889273U,
103444-	2847972595U, 1196355421U, 2552150115U, 4254510614U, 3752181265U,
103445-	3430721819U, 3828705396U, 3436287905U, 3441964937U, 4123670631U,
103446-	 353001539U,  459496439U, 3799690868U, 1293777660U, 2761079737U,
103447-	 498096339U, 3398433374U, 4080378380U, 2304691596U, 2995729055U,
103448-	4134660419U, 3903444024U, 3576494993U,  203682175U, 3321164857U,
103449-	2747963611U,   79749085U, 2992890370U, 1240278549U, 1772175713U,
103450-	2111331972U, 2655023449U, 1683896345U, 2836027212U, 3482868021U,
103451-	2489884874U,  756853961U, 2298874501U, 4013448667U, 4143996022U,
103452-	2948306858U, 4132920035U, 1283299272U,  995592228U, 3450508595U,
103453-	1027845759U, 1766942720U, 3861411826U, 1446861231U,   95974993U,
103454-	3502263554U, 1487532194U,  601502472U, 4129619129U,  250131773U,
103455-	2050079547U, 3198903947U, 3105589778U, 4066481316U, 3026383978U,
103456-	2276901713U,  365637751U, 2260718426U, 1394775634U, 1791172338U,
103457-	2690503163U, 2952737846U, 1568710462U,  732623190U, 2980358000U,
103458-	1053631832U, 1432426951U, 3229149635U, 1854113985U, 3719733532U,
103459-	3204031934U,  735775531U,  107468620U, 3734611984U,  631009402U,
103460-	3083622457U, 4109580626U,  159373458U, 1301970201U, 4132389302U,
103461-	1293255004U,  847182752U, 4170022737U,   96712900U, 2641406755U,
103462-	1381727755U,  405608287U, 4287919625U, 1703554290U, 3589580244U,
103463-	2911403488U,    2166565U, 2647306451U, 2330535117U, 1200815358U,
103464-	1165916754U,  245060911U, 4040679071U, 3684908771U, 2452834126U,
103465-	2486872773U, 2318678365U, 2940627908U, 1837837240U, 3447897409U,
103466-	4270484676U, 1495388728U, 3754288477U, 4204167884U, 1386977705U,
103467-	2692224733U, 3076249689U, 4109568048U, 4170955115U, 4167531356U,
103468-	4020189950U, 4261855038U, 3036907575U, 3410399885U, 3076395737U,
103469-	1046178638U,  144496770U,  230725846U, 3349637149U,   17065717U,
103470-	2809932048U, 2054581785U, 3608424964U, 3259628808U,  134897388U,
103471-	3743067463U,  257685904U, 3795656590U, 1562468719U, 3589103904U,
103472-	3120404710U,  254684547U, 2653661580U, 3663904795U, 2631942758U,
103473-	1063234347U, 2609732900U, 2332080715U, 3521125233U, 1180599599U,
103474-	1935868586U, 4110970440U,  296706371U, 2128666368U, 1319875791U,
103475-	1570900197U, 3096025483U, 1799882517U, 1928302007U, 1163707758U,
103476-	1244491489U, 3533770203U,  567496053U, 2757924305U, 2781639343U,
103477-	2818420107U,  560404889U, 2619609724U, 4176035430U, 2511289753U,
103478-	2521842019U, 3910553502U, 2926149387U, 3302078172U, 4237118867U,
103479-	 330725126U,  367400677U,  888239854U,  545570454U, 4259590525U,
103480-	 134343617U, 1102169784U, 1647463719U, 3260979784U, 1518840883U,
103481-	3631537963U, 3342671457U, 1301549147U, 2083739356U,  146593792U,
103482-	3217959080U,  652755743U, 2032187193U, 3898758414U, 1021358093U,
103483-	4037409230U, 2176407931U, 3427391950U, 2883553603U,  985613827U,
103484-	3105265092U, 3423168427U, 3387507672U,  467170288U, 2141266163U,
103485-	3723870208U,  916410914U, 1293987799U, 2652584950U,  769160137U,
103486-	3205292896U, 1561287359U, 1684510084U, 3136055621U, 3765171391U,
103487-	 639683232U, 2639569327U, 1218546948U, 4263586685U, 3058215773U,
103488-	2352279820U,  401870217U, 2625822463U, 1529125296U, 2981801895U,
103489-	1191285226U, 4027725437U, 3432700217U, 4098835661U,  971182783U,
103490-	2443861173U, 3881457123U, 3874386651U,  457276199U, 2638294160U,
103491-	4002809368U,  421169044U, 1112642589U, 3076213779U, 3387033971U,
103492-	2499610950U, 3057240914U, 1662679783U,  461224431U, 1168395933U
103493-};
103494-static const uint32_t init_by_array_32_expected[] = {
103495-	2920711183U, 3885745737U, 3501893680U,  856470934U, 1421864068U,
103496-	 277361036U, 1518638004U, 2328404353U, 3355513634U,   64329189U,
103497-	1624587673U, 3508467182U, 2481792141U, 3706480799U, 1925859037U,
103498-	2913275699U,  882658412U,  384641219U,  422202002U, 1873384891U,
103499-	2006084383U, 3924929912U, 1636718106U, 3108838742U, 1245465724U,
103500-	4195470535U,  779207191U, 1577721373U, 1390469554U, 2928648150U,
103501-	 121399709U, 3170839019U, 4044347501U,  953953814U, 3821710850U,
103502-	3085591323U, 3666535579U, 3577837737U, 2012008410U, 3565417471U,
103503-	4044408017U,  433600965U, 1637785608U, 1798509764U,  860770589U,
103504-	3081466273U, 3982393409U, 2451928325U, 3437124742U, 4093828739U,
103505-	3357389386U, 2154596123U,  496568176U, 2650035164U, 2472361850U,
103506-	   3438299U, 2150366101U, 1577256676U, 3802546413U, 1787774626U,
103507-	4078331588U, 3706103141U,  170391138U, 3806085154U, 1680970100U,
103508-	1961637521U, 3316029766U,  890610272U, 1453751581U, 1430283664U,
103509-	3051057411U, 3597003186U,  542563954U, 3796490244U, 1690016688U,
103510-	3448752238U,  440702173U,  347290497U, 1121336647U, 2540588620U,
103511-	 280881896U, 2495136428U,  213707396U,   15104824U, 2946180358U,
103512-	 659000016U,  566379385U, 2614030979U, 2855760170U,  334526548U,
103513-	2315569495U, 2729518615U,  564745877U, 1263517638U, 3157185798U,
103514-	1604852056U, 1011639885U, 2950579535U, 2524219188U,  312951012U,
103515-	1528896652U, 1327861054U, 2846910138U, 3966855905U, 2536721582U,
103516-	 855353911U, 1685434729U, 3303978929U, 1624872055U, 4020329649U,
103517-	3164802143U, 1642802700U, 1957727869U, 1792352426U, 3334618929U,
103518-	2631577923U, 3027156164U,  842334259U, 3353446843U, 1226432104U,
103519-	1742801369U, 3552852535U, 3471698828U, 1653910186U, 3380330939U,
103520-	2313782701U, 3351007196U, 2129839995U, 1800682418U, 4085884420U,
103521-	1625156629U, 3669701987U,  615211810U, 3294791649U, 4131143784U,
103522-	2590843588U, 3207422808U, 3275066464U,  561592872U, 3957205738U,
103523-	3396578098U,   48410678U, 3505556445U, 1005764855U, 3920606528U,
103524-	2936980473U, 2378918600U, 2404449845U, 1649515163U,  701203563U,
103525-	3705256349U,   83714199U, 3586854132U,  922978446U, 2863406304U,
103526-	3523398907U, 2606864832U, 2385399361U, 3171757816U, 4262841009U,
103527-	3645837721U, 1169579486U, 3666433897U, 3174689479U, 1457866976U,
103528-	3803895110U, 3346639145U, 1907224409U, 1978473712U, 1036712794U,
103529-	 980754888U, 1302782359U, 1765252468U,  459245755U, 3728923860U,
103530-	1512894209U, 2046491914U,  207860527U,  514188684U, 2288713615U,
103531-	1597354672U, 3349636117U, 2357291114U, 3995796221U,  945364213U,
103532-	1893326518U, 3770814016U, 1691552714U, 2397527410U,  967486361U,
103533-	 776416472U, 4197661421U,  951150819U, 1852770983U, 4044624181U,
103534-	1399439738U, 4194455275U, 2284037669U, 1550734958U, 3321078108U,
103535-	1865235926U, 2912129961U, 2664980877U, 1357572033U, 2600196436U,
103536-	2486728200U, 2372668724U, 1567316966U, 2374111491U, 1839843570U,
103537-	  20815612U, 3727008608U, 3871996229U,  824061249U, 1932503978U,
103538-	3404541726U,  758428924U, 2609331364U, 1223966026U, 1299179808U,
103539-	 648499352U, 2180134401U,  880821170U, 3781130950U,  113491270U,
103540-	1032413764U, 4185884695U, 2490396037U, 1201932817U, 4060951446U,
103541-	4165586898U, 1629813212U, 2887821158U,  415045333U,  628926856U,
103542-	2193466079U, 3391843445U, 2227540681U, 1907099846U, 2848448395U,
103543-	1717828221U, 1372704537U, 1707549841U, 2294058813U, 2101214437U,
103544-	2052479531U, 1695809164U, 3176587306U, 2632770465U,   81634404U,
103545-	1603220563U,  644238487U,  302857763U,  897352968U, 2613146653U,
103546-	1391730149U, 4245717312U, 4191828749U, 1948492526U, 2618174230U,
103547-	3992984522U, 2178852787U, 3596044509U, 3445573503U, 2026614616U,
103548-	 915763564U, 3415689334U, 2532153403U, 3879661562U, 2215027417U,
103549-	3111154986U, 2929478371U,  668346391U, 1152241381U, 2632029711U,
103550-	3004150659U, 2135025926U,  948690501U, 2799119116U, 4228829406U,
103551-	1981197489U, 4209064138U,  684318751U, 3459397845U,  201790843U,
103552-	4022541136U, 3043635877U,  492509624U, 3263466772U, 1509148086U,
103553-	 921459029U, 3198857146U,  705479721U, 3835966910U, 3603356465U,
103554-	 576159741U, 1742849431U,  594214882U, 2055294343U, 3634861861U,
103555-	 449571793U, 3246390646U, 3868232151U, 1479156585U, 2900125656U,
103556-	2464815318U, 3960178104U, 1784261920U,   18311476U, 3627135050U,
103557-	 644609697U,  424968996U,  919890700U, 2986824110U,  816423214U,
103558-	4003562844U, 1392714305U, 1757384428U, 2569030598U,  995949559U,
103559-	3875659880U, 2933807823U, 2752536860U, 2993858466U, 4030558899U,
103560-	2770783427U, 2775406005U, 2777781742U, 1931292655U,  472147933U,
103561-	3865853827U, 2726470545U, 2668412860U, 2887008249U,  408979190U,
103562-	3578063323U, 3242082049U, 1778193530U,   27981909U, 2362826515U,
103563-	 389875677U, 1043878156U,  581653903U, 3830568952U,  389535942U,
103564-	3713523185U, 2768373359U, 2526101582U, 1998618197U, 1160859704U,
103565-	3951172488U, 1098005003U,  906275699U, 3446228002U, 2220677963U,
103566-	2059306445U,  132199571U,  476838790U, 1868039399U, 3097344807U,
103567-	 857300945U,  396345050U, 2835919916U, 1782168828U, 1419519470U,
103568-	4288137521U,  819087232U,  596301494U,  872823172U, 1526888217U,
103569-	 805161465U, 1116186205U, 2829002754U, 2352620120U,  620121516U,
103570-	 354159268U, 3601949785U,  209568138U, 1352371732U, 2145977349U,
103571-	4236871834U, 1539414078U, 3558126206U, 3224857093U, 4164166682U,
103572-	3817553440U, 3301780278U, 2682696837U, 3734994768U, 1370950260U,
103573-	1477421202U, 2521315749U, 1330148125U, 1261554731U, 2769143688U,
103574-	3554756293U, 4235882678U, 3254686059U, 3530579953U, 1215452615U,
103575-	3574970923U, 4057131421U,  589224178U, 1000098193U,  171190718U,
103576-	2521852045U, 2351447494U, 2284441580U, 2646685513U, 3486933563U,
103577-	3789864960U, 1190528160U, 1702536782U, 1534105589U, 4262946827U,
103578-	2726686826U, 3584544841U, 2348270128U, 2145092281U, 2502718509U,
103579-	1027832411U, 3571171153U, 1287361161U, 4011474411U, 3241215351U,
103580-	2419700818U,  971242709U, 1361975763U, 1096842482U, 3271045537U,
103581-	  81165449U,  612438025U, 3912966678U, 1356929810U,  733545735U,
103582-	 537003843U, 1282953084U,  884458241U,  588930090U, 3930269801U,
103583-	2961472450U, 1219535534U, 3632251943U,  268183903U, 1441240533U,
103584-	3653903360U, 3854473319U, 2259087390U, 2548293048U, 2022641195U,
103585-	2105543911U, 1764085217U, 3246183186U,  482438805U,  888317895U,
103586-	2628314765U, 2466219854U,  717546004U, 2322237039U,  416725234U,
103587-	1544049923U, 1797944973U, 3398652364U, 3111909456U,  485742908U,
103588-	2277491072U, 1056355088U, 3181001278U,  129695079U, 2693624550U,
103589-	1764438564U, 3797785470U,  195503713U, 3266519725U, 2053389444U,
103590-	1961527818U, 3400226523U, 3777903038U, 2597274307U, 4235851091U,
103591-	4094406648U, 2171410785U, 1781151386U, 1378577117U,  654643266U,
103592-	3424024173U, 3385813322U,  679385799U,  479380913U,  681715441U,
103593-	3096225905U,  276813409U, 3854398070U, 2721105350U,  831263315U,
103594-	3276280337U, 2628301522U, 3984868494U, 1466099834U, 2104922114U,
103595-	1412672743U,  820330404U, 3491501010U,  942735832U,  710652807U,
103596-	3972652090U,  679881088U,   40577009U, 3705286397U, 2815423480U,
103597-	3566262429U,  663396513U, 3777887429U, 4016670678U,  404539370U,
103598-	1142712925U, 1140173408U, 2913248352U, 2872321286U,  263751841U,
103599-	3175196073U, 3162557581U, 2878996619U,   75498548U, 3836833140U,
103600-	3284664959U, 1157523805U,  112847376U,  207855609U, 1337979698U,
103601-	1222578451U,  157107174U,  901174378U, 3883717063U, 1618632639U,
103602-	1767889440U, 4264698824U, 1582999313U,  884471997U, 2508825098U,
103603-	3756370771U, 2457213553U, 3565776881U, 3709583214U,  915609601U,
103604-	 460833524U, 1091049576U,   85522880U,    2553251U,  132102809U,
103605-	2429882442U, 2562084610U, 1386507633U, 4112471229U,   21965213U,
103606-	1981516006U, 2418435617U, 3054872091U, 4251511224U, 2025783543U,
103607-	1916911512U, 2454491136U, 3938440891U, 3825869115U, 1121698605U,
103608-	3463052265U,  802340101U, 1912886800U, 4031997367U, 3550640406U,
103609-	1596096923U,  610150600U,  431464457U, 2541325046U,  486478003U,
103610-	 739704936U, 2862696430U, 3037903166U, 1129749694U, 2611481261U,
103611-	1228993498U,  510075548U, 3424962587U, 2458689681U,  818934833U,
103612-	4233309125U, 1608196251U, 3419476016U, 1858543939U, 2682166524U,
103613-	3317854285U,  631986188U, 3008214764U,  613826412U, 3567358221U,
103614-	3512343882U, 1552467474U, 3316162670U, 1275841024U, 4142173454U,
103615-	 565267881U,  768644821U,  198310105U, 2396688616U, 1837659011U,
103616-	 203429334U,  854539004U, 4235811518U, 3338304926U, 3730418692U,
103617-	3852254981U, 3032046452U, 2329811860U, 2303590566U, 2696092212U,
103618-	3894665932U,  145835667U,  249563655U, 1932210840U, 2431696407U,
103619-	3312636759U,  214962629U, 2092026914U, 3020145527U, 4073039873U,
103620-	2739105705U, 1308336752U,  855104522U, 2391715321U,   67448785U,
103621-	 547989482U,  854411802U, 3608633740U,  431731530U,  537375589U,
103622-	3888005760U,  696099141U,  397343236U, 1864511780U,   44029739U,
103623-	1729526891U, 1993398655U, 2010173426U, 2591546756U,  275223291U,
103624-	1503900299U, 4217765081U, 2185635252U, 1122436015U, 3550155364U,
103625-	 681707194U, 3260479338U,  933579397U, 2983029282U, 2505504587U,
103626-	2667410393U, 2962684490U, 4139721708U, 2658172284U, 2452602383U,
103627-	2607631612U, 1344296217U, 3075398709U, 2949785295U, 1049956168U,
103628-	3917185129U, 2155660174U, 3280524475U, 1503827867U,  674380765U,
103629-	1918468193U, 3843983676U,  634358221U, 2538335643U, 1873351298U,
103630-	3368723763U, 2129144130U, 3203528633U, 3087174986U, 2691698871U,
103631-	2516284287U,   24437745U, 1118381474U, 2816314867U, 2448576035U,
103632-	4281989654U,  217287825U,  165872888U, 2628995722U, 3533525116U,
103633-	2721669106U,  872340568U, 3429930655U, 3309047304U, 3916704967U,
103634-	3270160355U, 1348884255U, 1634797670U,  881214967U, 4259633554U,
103635-	 174613027U, 1103974314U, 1625224232U, 2678368291U, 1133866707U,
103636-	3853082619U, 4073196549U, 1189620777U,  637238656U,  930241537U,
103637-	4042750792U, 3842136042U, 2417007212U, 2524907510U, 1243036827U,
103638-	1282059441U, 3764588774U, 1394459615U, 2323620015U, 1166152231U,
103639-	3307479609U, 3849322257U, 3507445699U, 4247696636U,  758393720U,
103640-	 967665141U, 1095244571U, 1319812152U,  407678762U, 2640605208U,
103641-	2170766134U, 3663594275U, 4039329364U, 2512175520U,  725523154U,
103642-	2249807004U, 3312617979U, 2414634172U, 1278482215U,  349206484U,
103643-	1573063308U, 1196429124U, 3873264116U, 2400067801U,  268795167U,
103644-	 226175489U, 2961367263U, 1968719665U,   42656370U, 1010790699U,
103645-	 561600615U, 2422453992U, 3082197735U, 1636700484U, 3977715296U,
103646-	3125350482U, 3478021514U, 2227819446U, 1540868045U, 3061908980U,
103647-	1087362407U, 3625200291U,  361937537U,  580441897U, 1520043666U,
103648-	2270875402U, 1009161260U, 2502355842U, 4278769785U,  473902412U,
103649-	1057239083U, 1905829039U, 1483781177U, 2080011417U, 1207494246U,
103650-	1806991954U, 2194674403U, 3455972205U,  807207678U, 3655655687U,
103651-	 674112918U,  195425752U, 3917890095U, 1874364234U, 1837892715U,
103652-	3663478166U, 1548892014U, 2570748714U, 2049929836U, 2167029704U,
103653-	 697543767U, 3499545023U, 3342496315U, 1725251190U, 3561387469U,
103654-	2905606616U, 1580182447U, 3934525927U, 4103172792U, 1365672522U,
103655-	1534795737U, 3308667416U, 2841911405U, 3943182730U, 4072020313U,
103656-	3494770452U, 3332626671U,   55327267U,  478030603U,  411080625U,
103657-	3419529010U, 1604767823U, 3513468014U,  570668510U,  913790824U,
103658-	2283967995U,  695159462U, 3825542932U, 4150698144U, 1829758699U,
103659-	 202895590U, 1609122645U, 1267651008U, 2910315509U, 2511475445U,
103660-	2477423819U, 3932081579U,  900879979U, 2145588390U, 2670007504U,
103661-	 580819444U, 1864996828U, 2526325979U, 1019124258U,  815508628U,
103662-	2765933989U, 1277301341U, 3006021786U,  855540956U,  288025710U,
103663-	1919594237U, 2331223864U,  177452412U, 2475870369U, 2689291749U,
103664-	 865194284U,  253432152U, 2628531804U, 2861208555U, 2361597573U,
103665-	1653952120U, 1039661024U, 2159959078U, 3709040440U, 3564718533U,
103666-	2596878672U, 2041442161U,   31164696U, 2662962485U, 3665637339U,
103667-	1678115244U, 2699839832U, 3651968520U, 3521595541U,  458433303U,
103668-	2423096824U,   21831741U,  380011703U, 2498168716U,  861806087U,
103669-	1673574843U, 4188794405U, 2520563651U, 2632279153U, 2170465525U,
103670-	4171949898U, 3886039621U, 1661344005U, 3424285243U,  992588372U,
103671-	2500984144U, 2993248497U, 3590193895U, 1535327365U,  515645636U,
103672-	 131633450U, 3729760261U, 1613045101U, 3254194278U,   15889678U,
103673-	1493590689U,  244148718U, 2991472662U, 1401629333U,  777349878U,
103674-	2501401703U, 4285518317U, 3794656178U,  955526526U, 3442142820U,
103675-	3970298374U,  736025417U, 2737370764U, 1271509744U,  440570731U,
103676-	 136141826U, 1596189518U,  923399175U,  257541519U, 3505774281U,
103677-	2194358432U, 2518162991U, 1379893637U, 2667767062U, 3748146247U,
103678-	1821712620U, 3923161384U, 1947811444U, 2392527197U, 4127419685U,
103679-	1423694998U, 4156576871U, 1382885582U, 3420127279U, 3617499534U,
103680-	2994377493U, 4038063986U, 1918458672U, 2983166794U, 4200449033U,
103681-	 353294540U, 1609232588U,  243926648U, 2332803291U,  507996832U,
103682-	2392838793U, 4075145196U, 2060984340U, 4287475136U,   88232602U,
103683-	2491531140U, 4159725633U, 2272075455U,  759298618U,  201384554U,
103684-	 838356250U, 1416268324U,  674476934U,   90795364U,  141672229U,
103685-	3660399588U, 4196417251U, 3249270244U, 3774530247U,   59587265U,
103686-	3683164208U,   19392575U, 1463123697U, 1882205379U,  293780489U,
103687-	2553160622U, 2933904694U,  675638239U, 2851336944U, 1435238743U,
103688-	2448730183U,  804436302U, 2119845972U,  322560608U, 4097732704U,
103689-	2987802540U,  641492617U, 2575442710U, 4217822703U, 3271835300U,
103690-	2836418300U, 3739921620U, 2138378768U, 2879771855U, 4294903423U,
103691-	3121097946U, 2603440486U, 2560820391U, 1012930944U, 2313499967U,
103692-	 584489368U, 3431165766U,  897384869U, 2062537737U, 2847889234U,
103693-	3742362450U, 2951174585U, 4204621084U, 1109373893U, 3668075775U,
103694-	2750138839U, 3518055702U,  733072558U, 4169325400U,  788493625U
103695-};
103696-static const uint64_t init_gen_rand_64_expected[] = {
103697-	KQU(16924766246869039260), KQU( 8201438687333352714),
103698-	KQU( 2265290287015001750), KQU(18397264611805473832),
103699-	KQU( 3375255223302384358), KQU( 6345559975416828796),
103700-	KQU(18229739242790328073), KQU( 7596792742098800905),
103701-	KQU(  255338647169685981), KQU( 2052747240048610300),
103702-	KQU(18328151576097299343), KQU(12472905421133796567),
103703-	KQU(11315245349717600863), KQU(16594110197775871209),
103704-	KQU(15708751964632456450), KQU(10452031272054632535),
103705-	KQU(11097646720811454386), KQU( 4556090668445745441),
103706-	KQU(17116187693090663106), KQU(14931526836144510645),
103707-	KQU( 9190752218020552591), KQU( 9625800285771901401),
103708-	KQU(13995141077659972832), KQU( 5194209094927829625),
103709-	KQU( 4156788379151063303), KQU( 8523452593770139494),
103710-	KQU(14082382103049296727), KQU( 2462601863986088483),
103711-	KQU( 3030583461592840678), KQU( 5221622077872827681),
103712-	KQU( 3084210671228981236), KQU(13956758381389953823),
103713-	KQU(13503889856213423831), KQU(15696904024189836170),
103714-	KQU( 4612584152877036206), KQU( 6231135538447867881),
103715-	KQU(10172457294158869468), KQU( 6452258628466708150),
103716-	KQU(14044432824917330221), KQU(  370168364480044279),
103717-	KQU(10102144686427193359), KQU(  667870489994776076),
103718-	KQU( 2732271956925885858), KQU(18027788905977284151),
103719-	KQU(15009842788582923859), KQU( 7136357960180199542),
103720-	KQU(15901736243475578127), KQU(16951293785352615701),
103721-	KQU(10551492125243691632), KQU(17668869969146434804),
103722-	KQU(13646002971174390445), KQU( 9804471050759613248),
103723-	KQU( 5511670439655935493), KQU(18103342091070400926),
103724-	KQU(17224512747665137533), KQU(15534627482992618168),
103725-	KQU( 1423813266186582647), KQU(15821176807932930024),
103726-	KQU(   30323369733607156), KQU(11599382494723479403),
103727-	KQU(  653856076586810062), KQU( 3176437395144899659),
103728-	KQU(14028076268147963917), KQU(16156398271809666195),
103729-	KQU( 3166955484848201676), KQU( 5746805620136919390),
103730-	KQU(17297845208891256593), KQU(11691653183226428483),
103731-	KQU(17900026146506981577), KQU(15387382115755971042),
103732-	KQU(16923567681040845943), KQU( 8039057517199388606),
103733-	KQU(11748409241468629263), KQU(  794358245539076095),
103734-	KQU(13438501964693401242), KQU(14036803236515618962),
103735-	KQU( 5252311215205424721), KQU(17806589612915509081),
103736-	KQU( 6802767092397596006), KQU(14212120431184557140),
103737-	KQU( 1072951366761385712), KQU(13098491780722836296),
103738-	KQU( 9466676828710797353), KQU(12673056849042830081),
103739-	KQU(12763726623645357580), KQU(16468961652999309493),
103740-	KQU(15305979875636438926), KQU(17444713151223449734),
103741-	KQU( 5692214267627883674), KQU(13049589139196151505),
103742-	KQU(  880115207831670745), KQU( 1776529075789695498),
103743-	KQU(16695225897801466485), KQU(10666901778795346845),
103744-	KQU( 6164389346722833869), KQU( 2863817793264300475),
103745-	KQU( 9464049921886304754), KQU( 3993566636740015468),
103746-	KQU( 9983749692528514136), KQU(16375286075057755211),
103747-	KQU(16042643417005440820), KQU(11445419662923489877),
103748-	KQU( 7999038846885158836), KQU( 6721913661721511535),
103749-	KQU( 5363052654139357320), KQU( 1817788761173584205),
103750-	KQU(13290974386445856444), KQU( 4650350818937984680),
103751-	KQU( 8219183528102484836), KQU( 1569862923500819899),
103752-	KQU( 4189359732136641860), KQU(14202822961683148583),
103753-	KQU( 4457498315309429058), KQU(13089067387019074834),
103754-	KQU(11075517153328927293), KQU(10277016248336668389),
103755-	KQU( 7070509725324401122), KQU(17808892017780289380),
103756-	KQU(13143367339909287349), KQU( 1377743745360085151),
103757-	KQU( 5749341807421286485), KQU(14832814616770931325),
103758-	KQU( 7688820635324359492), KQU(10960474011539770045),
103759-	KQU(   81970066653179790), KQU(12619476072607878022),
103760-	KQU( 4419566616271201744), KQU(15147917311750568503),
103761-	KQU( 5549739182852706345), KQU( 7308198397975204770),
103762-	KQU(13580425496671289278), KQU(17070764785210130301),
103763-	KQU( 8202832846285604405), KQU( 6873046287640887249),
103764-	KQU( 6927424434308206114), KQU( 6139014645937224874),
103765-	KQU(10290373645978487639), KQU(15904261291701523804),
103766-	KQU( 9628743442057826883), KQU(18383429096255546714),
103767-	KQU( 4977413265753686967), KQU( 7714317492425012869),
103768-	KQU( 9025232586309926193), KQU(14627338359776709107),
103769-	KQU(14759849896467790763), KQU(10931129435864423252),
103770-	KQU( 4588456988775014359), KQU(10699388531797056724),
103771-	KQU(  468652268869238792), KQU( 5755943035328078086),
103772-	KQU( 2102437379988580216), KQU( 9986312786506674028),
103773-	KQU( 2654207180040945604), KQU( 8726634790559960062),
103774-	KQU(  100497234871808137), KQU( 2800137176951425819),
103775-	KQU( 6076627612918553487), KQU( 5780186919186152796),
103776-	KQU( 8179183595769929098), KQU( 6009426283716221169),
103777-	KQU( 2796662551397449358), KQU( 1756961367041986764),
103778-	KQU( 6972897917355606205), KQU(14524774345368968243),
103779-	KQU( 2773529684745706940), KQU( 4853632376213075959),
103780-	KQU( 4198177923731358102), KQU( 8271224913084139776),
103781-	KQU( 2741753121611092226), KQU(16782366145996731181),
103782-	KQU(15426125238972640790), KQU(13595497100671260342),
103783-	KQU( 3173531022836259898), KQU( 6573264560319511662),
103784-	KQU(18041111951511157441), KQU( 2351433581833135952),
103785-	KQU( 3113255578908173487), KQU( 1739371330877858784),
103786-	KQU(16046126562789165480), KQU( 8072101652214192925),
103787-	KQU(15267091584090664910), KQU( 9309579200403648940),
103788-	KQU( 5218892439752408722), KQU(14492477246004337115),
103789-	KQU(17431037586679770619), KQU( 7385248135963250480),
103790-	KQU( 9580144956565560660), KQU( 4919546228040008720),
103791-	KQU(15261542469145035584), KQU(18233297270822253102),
103792-	KQU( 5453248417992302857), KQU( 9309519155931460285),
103793-	KQU(10342813012345291756), KQU(15676085186784762381),
103794-	KQU(15912092950691300645), KQU( 9371053121499003195),
103795-	KQU( 9897186478226866746), KQU(14061858287188196327),
103796-	KQU(  122575971620788119), KQU(12146750969116317754),
103797-	KQU( 4438317272813245201), KQU( 8332576791009527119),
103798-	KQU(13907785691786542057), KQU(10374194887283287467),
103799-	KQU( 2098798755649059566), KQU( 3416235197748288894),
103800-	KQU( 8688269957320773484), KQU( 7503964602397371571),
103801-	KQU(16724977015147478236), KQU( 9461512855439858184),
103802-	KQU(13259049744534534727), KQU( 3583094952542899294),
103803-	KQU( 8764245731305528292), KQU(13240823595462088985),
103804-	KQU(13716141617617910448), KQU(18114969519935960955),
103805-	KQU( 2297553615798302206), KQU( 4585521442944663362),
103806-	KQU(17776858680630198686), KQU( 4685873229192163363),
103807-	KQU(  152558080671135627), KQU(15424900540842670088),
103808-	KQU(13229630297130024108), KQU(17530268788245718717),
103809-	KQU(16675633913065714144), KQU( 3158912717897568068),
103810-	KQU(15399132185380087288), KQU( 7401418744515677872),
103811-	KQU(13135412922344398535), KQU( 6385314346100509511),
103812-	KQU(13962867001134161139), KQU(10272780155442671999),
103813-	KQU(12894856086597769142), KQU(13340877795287554994),
103814-	KQU(12913630602094607396), KQU(12543167911119793857),
103815-	KQU(17343570372251873096), KQU(10959487764494150545),
103816-	KQU( 6966737953093821128), KQU(13780699135496988601),
103817-	KQU( 4405070719380142046), KQU(14923788365607284982),
103818-	KQU( 2869487678905148380), KQU( 6416272754197188403),
103819-	KQU(15017380475943612591), KQU( 1995636220918429487),
103820-	KQU( 3402016804620122716), KQU(15800188663407057080),
103821-	KQU(11362369990390932882), KQU(15262183501637986147),
103822-	KQU(10239175385387371494), KQU( 9352042420365748334),
103823-	KQU( 1682457034285119875), KQU( 1724710651376289644),
103824-	KQU( 2038157098893817966), KQU( 9897825558324608773),
103825-	KQU( 1477666236519164736), KQU(16835397314511233640),
103826-	KQU(10370866327005346508), KQU(10157504370660621982),
103827-	KQU(12113904045335882069), KQU(13326444439742783008),
103828-	KQU(11302769043000765804), KQU(13594979923955228484),
103829-	KQU(11779351762613475968), KQU( 3786101619539298383),
103830-	KQU( 8021122969180846063), KQU(15745904401162500495),
103831-	KQU(10762168465993897267), KQU(13552058957896319026),
103832-	KQU(11200228655252462013), KQU( 5035370357337441226),
103833-	KQU( 7593918984545500013), KQU( 5418554918361528700),
103834-	KQU( 4858270799405446371), KQU( 9974659566876282544),
103835-	KQU(18227595922273957859), KQU( 2772778443635656220),
103836-	KQU(14285143053182085385), KQU( 9939700992429600469),
103837-	KQU(12756185904545598068), KQU( 2020783375367345262),
103838-	KQU(   57026775058331227), KQU(  950827867930065454),
103839-	KQU( 6602279670145371217), KQU( 2291171535443566929),
103840-	KQU( 5832380724425010313), KQU( 1220343904715982285),
103841-	KQU(17045542598598037633), KQU(15460481779702820971),
103842-	KQU(13948388779949365130), KQU(13975040175430829518),
103843-	KQU(17477538238425541763), KQU(11104663041851745725),
103844-	KQU(15860992957141157587), KQU(14529434633012950138),
103845-	KQU( 2504838019075394203), KQU( 7512113882611121886),
103846-	KQU( 4859973559980886617), KQU( 1258601555703250219),
103847-	KQU(15594548157514316394), KQU( 4516730171963773048),
103848-	KQU(11380103193905031983), KQU( 6809282239982353344),
103849-	KQU(18045256930420065002), KQU( 2453702683108791859),
103850-	KQU(  977214582986981460), KQU( 2006410402232713466),
103851-	KQU( 6192236267216378358), KQU( 3429468402195675253),
103852-	KQU(18146933153017348921), KQU(17369978576367231139),
103853-	KQU( 1246940717230386603), KQU(11335758870083327110),
103854-	KQU(14166488801730353682), KQU( 9008573127269635732),
103855-	KQU(10776025389820643815), KQU(15087605441903942962),
103856-	KQU( 1359542462712147922), KQU(13898874411226454206),
103857-	KQU(17911176066536804411), KQU( 9435590428600085274),
103858-	KQU(  294488509967864007), KQU( 8890111397567922046),
103859-	KQU( 7987823476034328778), KQU(13263827582440967651),
103860-	KQU( 7503774813106751573), KQU(14974747296185646837),
103861-	KQU( 8504765037032103375), KQU(17340303357444536213),
103862-	KQU( 7704610912964485743), KQU( 8107533670327205061),
103863-	KQU( 9062969835083315985), KQU(16968963142126734184),
103864-	KQU(12958041214190810180), KQU( 2720170147759570200),
103865-	KQU( 2986358963942189566), KQU(14884226322219356580),
103866-	KQU(  286224325144368520), KQU(11313800433154279797),
103867-	KQU(18366849528439673248), KQU(17899725929482368789),
103868-	KQU( 3730004284609106799), KQU( 1654474302052767205),
103869-	KQU( 5006698007047077032), KQU( 8196893913601182838),
103870-	KQU(15214541774425211640), KQU(17391346045606626073),
103871-	KQU( 8369003584076969089), KQU( 3939046733368550293),
103872-	KQU(10178639720308707785), KQU( 2180248669304388697),
103873-	KQU(   62894391300126322), KQU( 9205708961736223191),
103874-	KQU( 6837431058165360438), KQU( 3150743890848308214),
103875-	KQU(17849330658111464583), KQU(12214815643135450865),
103876-	KQU(13410713840519603402), KQU( 3200778126692046802),
103877-	KQU(13354780043041779313), KQU(  800850022756886036),
103878-	KQU(15660052933953067433), KQU( 6572823544154375676),
103879-	KQU(11030281857015819266), KQU(12682241941471433835),
103880-	KQU(11654136407300274693), KQU( 4517795492388641109),
103881-	KQU( 9757017371504524244), KQU(17833043400781889277),
103882-	KQU(12685085201747792227), KQU(10408057728835019573),
103883-	KQU(   98370418513455221), KQU( 6732663555696848598),
103884-	KQU(13248530959948529780), KQU( 3530441401230622826),
103885-	KQU(18188251992895660615), KQU( 1847918354186383756),
103886-	KQU( 1127392190402660921), KQU(11293734643143819463),
103887-	KQU( 3015506344578682982), KQU(13852645444071153329),
103888-	KQU( 2121359659091349142), KQU( 1294604376116677694),
103889-	KQU( 5616576231286352318), KQU( 7112502442954235625),
103890-	KQU(11676228199551561689), KQU(12925182803007305359),
103891-	KQU( 7852375518160493082), KQU( 1136513130539296154),
103892-	KQU( 5636923900916593195), KQU( 3221077517612607747),
103893-	KQU(17784790465798152513), KQU( 3554210049056995938),
103894-	KQU(17476839685878225874), KQU( 3206836372585575732),
103895-	KQU( 2765333945644823430), KQU(10080070903718799528),
103896-	KQU( 5412370818878286353), KQU( 9689685887726257728),
103897-	KQU( 8236117509123533998), KQU( 1951139137165040214),
103898-	KQU( 4492205209227980349), KQU(16541291230861602967),
103899-	KQU( 1424371548301437940), KQU( 9117562079669206794),
103900-	KQU(14374681563251691625), KQU(13873164030199921303),
103901-	KQU( 6680317946770936731), KQU(15586334026918276214),
103902-	KQU(10896213950976109802), KQU( 9506261949596413689),
103903-	KQU( 9903949574308040616), KQU( 6038397344557204470),
103904-	KQU(  174601465422373648), KQU(15946141191338238030),
103905-	KQU(17142225620992044937), KQU( 7552030283784477064),
103906-	KQU( 2947372384532947997), KQU(  510797021688197711),
103907-	KQU( 4962499439249363461), KQU(   23770320158385357),
103908-	KQU(  959774499105138124), KQU( 1468396011518788276),
103909-	KQU( 2015698006852312308), KQU( 4149400718489980136),
103910-	KQU( 5992916099522371188), KQU(10819182935265531076),
103911-	KQU(16189787999192351131), KQU(  342833961790261950),
103912-	KQU(12470830319550495336), KQU(18128495041912812501),
103913-	KQU( 1193600899723524337), KQU( 9056793666590079770),
103914-	KQU( 2154021227041669041), KQU( 4963570213951235735),
103915-	KQU( 4865075960209211409), KQU( 2097724599039942963),
103916-	KQU( 2024080278583179845), KQU(11527054549196576736),
103917-	KQU(10650256084182390252), KQU( 4808408648695766755),
103918-	KQU( 1642839215013788844), KQU(10607187948250398390),
103919-	KQU( 7076868166085913508), KQU(  730522571106887032),
103920-	KQU(12500579240208524895), KQU( 4484390097311355324),
103921-	KQU(15145801330700623870), KQU( 8055827661392944028),
103922-	KQU( 5865092976832712268), KQU(15159212508053625143),
103923-	KQU( 3560964582876483341), KQU( 4070052741344438280),
103924-	KQU( 6032585709886855634), KQU(15643262320904604873),
103925-	KQU( 2565119772293371111), KQU(  318314293065348260),
103926-	KQU(15047458749141511872), KQU( 7772788389811528730),
103927-	KQU( 7081187494343801976), KQU( 6465136009467253947),
103928-	KQU(10425940692543362069), KQU(  554608190318339115),
103929-	KQU(14796699860302125214), KQU( 1638153134431111443),
103930-	KQU(10336967447052276248), KQU( 8412308070396592958),
103931-	KQU( 4004557277152051226), KQU( 8143598997278774834),
103932-	KQU(16413323996508783221), KQU(13139418758033994949),
103933-	KQU( 9772709138335006667), KQU( 2818167159287157659),
103934-	KQU(17091740573832523669), KQU(14629199013130751608),
103935-	KQU(18268322711500338185), KQU( 8290963415675493063),
103936-	KQU( 8830864907452542588), KQU( 1614839084637494849),
103937-	KQU(14855358500870422231), KQU( 3472996748392519937),
103938-	KQU(15317151166268877716), KQU( 5825895018698400362),
103939-	KQU(16730208429367544129), KQU(10481156578141202800),
103940-	KQU( 4746166512382823750), KQU(12720876014472464998),
103941-	KQU( 8825177124486735972), KQU(13733447296837467838),
103942-	KQU( 6412293741681359625), KQU( 8313213138756135033),
103943-	KQU(11421481194803712517), KQU( 7997007691544174032),
103944-	KQU( 6812963847917605930), KQU( 9683091901227558641),
103945-	KQU(14703594165860324713), KQU( 1775476144519618309),
103946-	KQU( 2724283288516469519), KQU(  717642555185856868),
103947-	KQU( 8736402192215092346), KQU(11878800336431381021),
103948-	KQU( 4348816066017061293), KQU( 6115112756583631307),
103949-	KQU( 9176597239667142976), KQU(12615622714894259204),
103950-	KQU(10283406711301385987), KQU( 5111762509485379420),
103951-	KQU( 3118290051198688449), KQU( 7345123071632232145),
103952-	KQU( 9176423451688682359), KQU( 4843865456157868971),
103953-	KQU(12008036363752566088), KQU(12058837181919397720),
103954-	KQU( 2145073958457347366), KQU( 1526504881672818067),
103955-	KQU( 3488830105567134848), KQU(13208362960674805143),
103956-	KQU( 4077549672899572192), KQU( 7770995684693818365),
103957-	KQU( 1398532341546313593), KQU(12711859908703927840),
103958-	KQU( 1417561172594446813), KQU(17045191024194170604),
103959-	KQU( 4101933177604931713), KQU(14708428834203480320),
103960-	KQU(17447509264469407724), KQU(14314821973983434255),
103961-	KQU(17990472271061617265), KQU( 5087756685841673942),
103962-	KQU(12797820586893859939), KQU( 1778128952671092879),
103963-	KQU( 3535918530508665898), KQU( 9035729701042481301),
103964-	KQU(14808661568277079962), KQU(14587345077537747914),
103965-	KQU(11920080002323122708), KQU( 6426515805197278753),
103966-	KQU( 3295612216725984831), KQU(11040722532100876120),
103967-	KQU(12305952936387598754), KQU(16097391899742004253),
103968-	KQU( 4908537335606182208), KQU(12446674552196795504),
103969-	KQU(16010497855816895177), KQU( 9194378874788615551),
103970-	KQU( 3382957529567613384), KQU( 5154647600754974077),
103971-	KQU( 9801822865328396141), KQU( 9023662173919288143),
103972-	KQU(17623115353825147868), KQU( 8238115767443015816),
103973-	KQU(15811444159859002560), KQU( 9085612528904059661),
103974-	KQU( 6888601089398614254), KQU(  258252992894160189),
103975-	KQU( 6704363880792428622), KQU( 6114966032147235763),
103976-	KQU(11075393882690261875), KQU( 8797664238933620407),
103977-	KQU( 5901892006476726920), KQU( 5309780159285518958),
103978-	KQU(14940808387240817367), KQU(14642032021449656698),
103979-	KQU( 9808256672068504139), KQU( 3670135111380607658),
103980-	KQU(11211211097845960152), KQU( 1474304506716695808),
103981-	KQU(15843166204506876239), KQU( 7661051252471780561),
103982-	KQU(10170905502249418476), KQU( 7801416045582028589),
103983-	KQU( 2763981484737053050), KQU( 9491377905499253054),
103984-	KQU(16201395896336915095), KQU( 9256513756442782198),
103985-	KQU( 5411283157972456034), KQU( 5059433122288321676),
103986-	KQU( 4327408006721123357), KQU( 9278544078834433377),
103987-	KQU( 7601527110882281612), KQU(11848295896975505251),
103988-	KQU(12096998801094735560), KQU(14773480339823506413),
103989-	KQU(15586227433895802149), KQU(12786541257830242872),
103990-	KQU( 6904692985140503067), KQU( 5309011515263103959),
103991-	KQU(12105257191179371066), KQU(14654380212442225037),
103992-	KQU( 2556774974190695009), KQU( 4461297399927600261),
103993-	KQU(14888225660915118646), KQU(14915459341148291824),
103994-	KQU( 2738802166252327631), KQU( 6047155789239131512),
103995-	KQU(12920545353217010338), KQU(10697617257007840205),
103996-	KQU( 2751585253158203504), KQU(13252729159780047496),
103997-	KQU(14700326134672815469), KQU(14082527904374600529),
103998-	KQU(16852962273496542070), KQU(17446675504235853907),
103999-	KQU(15019600398527572311), KQU(12312781346344081551),
104000-	KQU(14524667935039810450), KQU( 5634005663377195738),
104001-	KQU(11375574739525000569), KQU( 2423665396433260040),
104002-	KQU( 5222836914796015410), KQU( 4397666386492647387),
104003-	KQU( 4619294441691707638), KQU(  665088602354770716),
104004-	KQU(13246495665281593610), KQU( 6564144270549729409),
104005-	KQU(10223216188145661688), KQU( 3961556907299230585),
104006-	KQU(11543262515492439914), KQU(16118031437285993790),
104007-	KQU( 7143417964520166465), KQU(13295053515909486772),
104008-	KQU(   40434666004899675), KQU(17127804194038347164),
104009-	KQU( 8599165966560586269), KQU( 8214016749011284903),
104010-	KQU(13725130352140465239), KQU( 5467254474431726291),
104011-	KQU( 7748584297438219877), KQU(16933551114829772472),
104012-	KQU( 2169618439506799400), KQU( 2169787627665113463),
104013-	KQU(17314493571267943764), KQU(18053575102911354912),
104014-	KQU(11928303275378476973), KQU(11593850925061715550),
104015-	KQU(17782269923473589362), KQU( 3280235307704747039),
104016-	KQU( 6145343578598685149), KQU(17080117031114086090),
104017-	KQU(18066839902983594755), KQU( 6517508430331020706),
104018-	KQU( 8092908893950411541), KQU(12558378233386153732),
104019-	KQU( 4476532167973132976), KQU(16081642430367025016),
104020-	KQU( 4233154094369139361), KQU( 8693630486693161027),
104021-	KQU(11244959343027742285), KQU(12273503967768513508),
104022-	KQU(14108978636385284876), KQU( 7242414665378826984),
104023-	KQU( 6561316938846562432), KQU( 8601038474994665795),
104024-	KQU(17532942353612365904), KQU(17940076637020912186),
104025-	KQU( 7340260368823171304), KQU( 7061807613916067905),
104026-	KQU(10561734935039519326), KQU(17990796503724650862),
104027-	KQU( 6208732943911827159), KQU(  359077562804090617),
104028-	KQU(14177751537784403113), KQU(10659599444915362902),
104029-	KQU(15081727220615085833), KQU(13417573895659757486),
104030-	KQU(15513842342017811524), KQU(11814141516204288231),
104031-	KQU( 1827312513875101814), KQU( 2804611699894603103),
104032-	KQU(17116500469975602763), KQU(12270191815211952087),
104033-	KQU(12256358467786024988), KQU(18435021722453971267),
104034-	KQU(  671330264390865618), KQU(  476504300460286050),
104035-	KQU(16465470901027093441), KQU( 4047724406247136402),
104036-	KQU( 1322305451411883346), KQU( 1388308688834322280),
104037-	KQU( 7303989085269758176), KQU( 9323792664765233642),
104038-	KQU( 4542762575316368936), KQU(17342696132794337618),
104039-	KQU( 4588025054768498379), KQU(13415475057390330804),
104040-	KQU(17880279491733405570), KQU(10610553400618620353),
104041-	KQU( 3180842072658960139), KQU(13002966655454270120),
104042-	KQU( 1665301181064982826), KQU( 7083673946791258979),
104043-	KQU(  190522247122496820), KQU(17388280237250677740),
104044-	KQU( 8430770379923642945), KQU(12987180971921668584),
104045-	KQU( 2311086108365390642), KQU( 2870984383579822345),
104046-	KQU(14014682609164653318), KQU(14467187293062251484),
104047-	KQU(  192186361147413298), KQU(15171951713531796524),
104048-	KQU( 9900305495015948728), KQU(17958004775615466344),
104049-	KQU(14346380954498606514), KQU(18040047357617407096),
104050-	KQU( 5035237584833424532), KQU(15089555460613972287),
104051-	KQU( 4131411873749729831), KQU( 1329013581168250330),
104052-	KQU(10095353333051193949), KQU(10749518561022462716),
104053-	KQU( 9050611429810755847), KQU(15022028840236655649),
104054-	KQU( 8775554279239748298), KQU(13105754025489230502),
104055-	KQU(15471300118574167585), KQU(   89864764002355628),
104056-	KQU( 8776416323420466637), KQU( 5280258630612040891),
104057-	KQU( 2719174488591862912), KQU( 7599309137399661994),
104058-	KQU(15012887256778039979), KQU(14062981725630928925),
104059-	KQU(12038536286991689603), KQU( 7089756544681775245),
104060-	KQU(10376661532744718039), KQU( 1265198725901533130),
104061-	KQU(13807996727081142408), KQU( 2935019626765036403),
104062-	KQU( 7651672460680700141), KQU( 3644093016200370795),
104063-	KQU( 2840982578090080674), KQU(17956262740157449201),
104064-	KQU(18267979450492880548), KQU(11799503659796848070),
104065-	KQU( 9942537025669672388), KQU(11886606816406990297),
104066-	KQU( 5488594946437447576), KQU( 7226714353282744302),
104067-	KQU( 3784851653123877043), KQU(  878018453244803041),
104068-	KQU(12110022586268616085), KQU(  734072179404675123),
104069-	KQU(11869573627998248542), KQU(  469150421297783998),
104070-	KQU(  260151124912803804), KQU(11639179410120968649),
104071-	KQU( 9318165193840846253), KQU(12795671722734758075),
104072-	KQU(15318410297267253933), KQU(  691524703570062620),
104073-	KQU( 5837129010576994601), KQU(15045963859726941052),
104074-	KQU( 5850056944932238169), KQU(12017434144750943807),
104075-	KQU( 7447139064928956574), KQU( 3101711812658245019),
104076-	KQU(16052940704474982954), KQU(18195745945986994042),
104077-	KQU( 8932252132785575659), KQU(13390817488106794834),
104078-	KQU(11582771836502517453), KQU( 4964411326683611686),
104079-	KQU( 2195093981702694011), KQU(14145229538389675669),
104080-	KQU(16459605532062271798), KQU(  866316924816482864),
104081-	KQU( 4593041209937286377), KQU( 8415491391910972138),
104082-	KQU( 4171236715600528969), KQU(16637569303336782889),
104083-	KQU( 2002011073439212680), KQU(17695124661097601411),
104084-	KQU( 4627687053598611702), KQU( 7895831936020190403),
104085-	KQU( 8455951300917267802), KQU( 2923861649108534854),
104086-	KQU( 8344557563927786255), KQU( 6408671940373352556),
104087-	KQU(12210227354536675772), KQU(14294804157294222295),
104088-	KQU(10103022425071085127), KQU(10092959489504123771),
104089-	KQU( 6554774405376736268), KQU(12629917718410641774),
104090-	KQU( 6260933257596067126), KQU( 2460827021439369673),
104091-	KQU( 2541962996717103668), KQU(  597377203127351475),
104092-	KQU( 5316984203117315309), KQU( 4811211393563241961),
104093-	KQU(13119698597255811641), KQU( 8048691512862388981),
104094-	KQU(10216818971194073842), KQU( 4612229970165291764),
104095-	KQU(10000980798419974770), KQU( 6877640812402540687),
104096-	KQU( 1488727563290436992), KQU( 2227774069895697318),
104097-	KQU(11237754507523316593), KQU(13478948605382290972),
104098-	KQU( 1963583846976858124), KQU( 5512309205269276457),
104099-	KQU( 3972770164717652347), KQU( 3841751276198975037),
104100-	KQU(10283343042181903117), KQU( 8564001259792872199),
104101-	KQU(16472187244722489221), KQU( 8953493499268945921),
104102-	KQU( 3518747340357279580), KQU( 4003157546223963073),
104103-	KQU( 3270305958289814590), KQU( 3966704458129482496),
104104-	KQU( 8122141865926661939), KQU(14627734748099506653),
104105-	KQU(13064426990862560568), KQU( 2414079187889870829),
104106-	KQU( 5378461209354225306), KQU(10841985740128255566),
104107-	KQU(  538582442885401738), KQU( 7535089183482905946),
104108-	KQU(16117559957598879095), KQU( 8477890721414539741),
104109-	KQU( 1459127491209533386), KQU(17035126360733620462),
104110-	KQU( 8517668552872379126), KQU(10292151468337355014),
104111-	KQU(17081267732745344157), KQU(13751455337946087178),
104112-	KQU(14026945459523832966), KQU( 6653278775061723516),
104113-	KQU(10619085543856390441), KQU( 2196343631481122885),
104114-	KQU(10045966074702826136), KQU(10082317330452718282),
104115-	KQU( 5920859259504831242), KQU( 9951879073426540617),
104116-	KQU( 7074696649151414158), KQU(15808193543879464318),
104117-	KQU( 7385247772746953374), KQU( 3192003544283864292),
104118-	KQU(18153684490917593847), KQU(12423498260668568905),
104119-	KQU(10957758099756378169), KQU(11488762179911016040),
104120-	KQU( 2099931186465333782), KQU(11180979581250294432),
104121-	KQU( 8098916250668367933), KQU( 3529200436790763465),
104122-	KQU(12988418908674681745), KQU( 6147567275954808580),
104123-	KQU( 3207503344604030989), KQU(10761592604898615360),
104124-	KQU(  229854861031893504), KQU( 8809853962667144291),
104125-	KQU(13957364469005693860), KQU( 7634287665224495886),
104126-	KQU(12353487366976556874), KQU( 1134423796317152034),
104127-	KQU( 2088992471334107068), KQU( 7393372127190799698),
104128-	KQU( 1845367839871058391), KQU(  207922563987322884),
104129-	KQU(11960870813159944976), KQU(12182120053317317363),
104130-	KQU(17307358132571709283), KQU(13871081155552824936),
104131-	KQU(18304446751741566262), KQU( 7178705220184302849),
104132-	KQU(10929605677758824425), KQU(16446976977835806844),
104133-	KQU(13723874412159769044), KQU( 6942854352100915216),
104134-	KQU( 1726308474365729390), KQU( 2150078766445323155),
104135-	KQU(15345558947919656626), KQU(12145453828874527201),
104136-	KQU( 2054448620739726849), KQU( 2740102003352628137),
104137-	KQU(11294462163577610655), KQU(  756164283387413743),
104138-	KQU(17841144758438810880), KQU(10802406021185415861),
104139-	KQU( 8716455530476737846), KQU( 6321788834517649606),
104140-	KQU(14681322910577468426), KQU(17330043563884336387),
104141-	KQU(12701802180050071614), KQU(14695105111079727151),
104142-	KQU( 5112098511654172830), KQU( 4957505496794139973),
104143-	KQU( 8270979451952045982), KQU(12307685939199120969),
104144-	KQU(12425799408953443032), KQU( 8376410143634796588),
104145-	KQU(16621778679680060464), KQU( 3580497854566660073),
104146-	KQU( 1122515747803382416), KQU(  857664980960597599),
104147-	KQU( 6343640119895925918), KQU(12878473260854462891),
104148-	KQU(10036813920765722626), KQU(14451335468363173812),
104149-	KQU( 5476809692401102807), KQU(16442255173514366342),
104150-	KQU(13060203194757167104), KQU(14354124071243177715),
104151-	KQU(15961249405696125227), KQU(13703893649690872584),
104152-	KQU(  363907326340340064), KQU( 6247455540491754842),
104153-	KQU(12242249332757832361), KQU(  156065475679796717),
104154-	KQU( 9351116235749732355), KQU( 4590350628677701405),
104155-	KQU( 1671195940982350389), KQU(13501398458898451905),
104156-	KQU( 6526341991225002255), KQU( 1689782913778157592),
104157-	KQU( 7439222350869010334), KQU(13975150263226478308),
104158-	KQU(11411961169932682710), KQU(17204271834833847277),
104159-	KQU(  541534742544435367), KQU( 6591191931218949684),
104160-	KQU( 2645454775478232486), KQU( 4322857481256485321),
104161-	KQU( 8477416487553065110), KQU(12902505428548435048),
104162-	KQU(  971445777981341415), KQU(14995104682744976712),
104163-	KQU( 4243341648807158063), KQU( 8695061252721927661),
104164-	KQU( 5028202003270177222), KQU( 2289257340915567840),
104165-	KQU(13870416345121866007), KQU(13994481698072092233),
104166-	KQU( 6912785400753196481), KQU( 2278309315841980139),
104167-	KQU( 4329765449648304839), KQU( 5963108095785485298),
104168-	KQU( 4880024847478722478), KQU(16015608779890240947),
104169-	KQU( 1866679034261393544), KQU(  914821179919731519),
104170-	KQU( 9643404035648760131), KQU( 2418114953615593915),
104171-	KQU(  944756836073702374), KQU(15186388048737296834),
104172-	KQU( 7723355336128442206), KQU( 7500747479679599691),
104173-	KQU(18013961306453293634), KQU( 2315274808095756456),
104174-	KQU(13655308255424029566), KQU(17203800273561677098),
104175-	KQU( 1382158694422087756), KQU( 5090390250309588976),
104176-	KQU(  517170818384213989), KQU( 1612709252627729621),
104177-	KQU( 1330118955572449606), KQU(  300922478056709885),
104178-	KQU(18115693291289091987), KQU(13491407109725238321),
104179-	KQU(15293714633593827320), KQU( 5151539373053314504),
104180-	KQU( 5951523243743139207), KQU(14459112015249527975),
104181-	KQU( 5456113959000700739), KQU( 3877918438464873016),
104182-	KQU(12534071654260163555), KQU(15871678376893555041),
104183-	KQU(11005484805712025549), KQU(16353066973143374252),
104184-	KQU( 4358331472063256685), KQU( 8268349332210859288),
104185-	KQU(12485161590939658075), KQU(13955993592854471343),
104186-	KQU( 5911446886848367039), KQU(14925834086813706974),
104187-	KQU( 6590362597857994805), KQU( 1280544923533661875),
104188-	KQU( 1637756018947988164), KQU( 4734090064512686329),
104189-	KQU(16693705263131485912), KQU( 6834882340494360958),
104190-	KQU( 8120732176159658505), KQU( 2244371958905329346),
104191-	KQU(10447499707729734021), KQU( 7318742361446942194),
104192-	KQU( 8032857516355555296), KQU(14023605983059313116),
104193-	KQU( 1032336061815461376), KQU( 9840995337876562612),
104194-	KQU( 9869256223029203587), KQU(12227975697177267636),
104195-	KQU(12728115115844186033), KQU( 7752058479783205470),
104196-	KQU(  729733219713393087), KQU(12954017801239007622)
104197-};
104198-static const uint64_t init_by_array_64_expected[] = {
104199-	KQU( 2100341266307895239), KQU( 8344256300489757943),
104200-	KQU(15687933285484243894), KQU( 8268620370277076319),
104201-	KQU(12371852309826545459), KQU( 8800491541730110238),
104202-	KQU(18113268950100835773), KQU( 2886823658884438119),
104203-	KQU( 3293667307248180724), KQU( 9307928143300172731),
104204-	KQU( 7688082017574293629), KQU(  900986224735166665),
104205-	KQU( 9977972710722265039), KQU( 6008205004994830552),
104206-	KQU(  546909104521689292), KQU( 7428471521869107594),
104207-	KQU(14777563419314721179), KQU(16116143076567350053),
104208-	KQU( 5322685342003142329), KQU( 4200427048445863473),
104209-	KQU( 4693092150132559146), KQU(13671425863759338582),
104210-	KQU( 6747117460737639916), KQU( 4732666080236551150),
104211-	KQU( 5912839950611941263), KQU( 3903717554504704909),
104212-	KQU( 2615667650256786818), KQU(10844129913887006352),
104213-	KQU(13786467861810997820), KQU(14267853002994021570),
104214-	KQU(13767807302847237439), KQU(16407963253707224617),
104215-	KQU( 4802498363698583497), KQU( 2523802839317209764),
104216-	KQU( 3822579397797475589), KQU( 8950320572212130610),
104217-	KQU( 3745623504978342534), KQU(16092609066068482806),
104218-	KQU( 9817016950274642398), KQU(10591660660323829098),
104219-	KQU(11751606650792815920), KQU( 5122873818577122211),
104220-	KQU(17209553764913936624), KQU( 6249057709284380343),
104221-	KQU(15088791264695071830), KQU(15344673071709851930),
104222-	KQU( 4345751415293646084), KQU( 2542865750703067928),
104223-	KQU(13520525127852368784), KQU(18294188662880997241),
104224-	KQU( 3871781938044881523), KQU( 2873487268122812184),
104225-	KQU(15099676759482679005), KQU(15442599127239350490),
104226-	KQU( 6311893274367710888), KQU( 3286118760484672933),
104227-	KQU( 4146067961333542189), KQU(13303942567897208770),
104228-	KQU( 8196013722255630418), KQU( 4437815439340979989),
104229-	KQU(15433791533450605135), KQU( 4254828956815687049),
104230-	KQU( 1310903207708286015), KQU(10529182764462398549),
104231-	KQU(14900231311660638810), KQU( 9727017277104609793),
104232-	KQU( 1821308310948199033), KQU(11628861435066772084),
104233-	KQU( 9469019138491546924), KQU( 3145812670532604988),
104234-	KQU( 9938468915045491919), KQU( 1562447430672662142),
104235-	KQU(13963995266697989134), KQU( 3356884357625028695),
104236-	KQU( 4499850304584309747), KQU( 8456825817023658122),
104237-	KQU(10859039922814285279), KQU( 8099512337972526555),
104238-	KQU(  348006375109672149), KQU(11919893998241688603),
104239-	KQU( 1104199577402948826), KQU(16689191854356060289),
104240-	KQU(10992552041730168078), KQU( 7243733172705465836),
104241-	KQU( 5668075606180319560), KQU(18182847037333286970),
104242-	KQU( 4290215357664631322), KQU( 4061414220791828613),
104243-	KQU(13006291061652989604), KQU( 7140491178917128798),
104244-	KQU(12703446217663283481), KQU( 5500220597564558267),
104245-	KQU(10330551509971296358), KQU(15958554768648714492),
104246-	KQU( 5174555954515360045), KQU( 1731318837687577735),
104247-	KQU( 3557700801048354857), KQU(13764012341928616198),
104248-	KQU(13115166194379119043), KQU( 7989321021560255519),
104249-	KQU( 2103584280905877040), KQU( 9230788662155228488),
104250-	KQU(16396629323325547654), KQU(  657926409811318051),
104251-	KQU(15046700264391400727), KQU( 5120132858771880830),
104252-	KQU( 7934160097989028561), KQU( 6963121488531976245),
104253-	KQU(17412329602621742089), KQU(15144843053931774092),
104254-	KQU(17204176651763054532), KQU(13166595387554065870),
104255-	KQU( 8590377810513960213), KQU( 5834365135373991938),
104256-	KQU( 7640913007182226243), KQU( 3479394703859418425),
104257-	KQU(16402784452644521040), KQU( 4993979809687083980),
104258-	KQU(13254522168097688865), KQU(15643659095244365219),
104259-	KQU( 5881437660538424982), KQU(11174892200618987379),
104260-	KQU(  254409966159711077), KQU(17158413043140549909),
104261-	KQU( 3638048789290376272), KQU( 1376816930299489190),
104262-	KQU( 4622462095217761923), KQU(15086407973010263515),
104263-	KQU(13253971772784692238), KQU( 5270549043541649236),
104264-	KQU(11182714186805411604), KQU(12283846437495577140),
104265-	KQU( 5297647149908953219), KQU(10047451738316836654),
104266-	KQU( 4938228100367874746), KQU(12328523025304077923),
104267-	KQU( 3601049438595312361), KQU( 9313624118352733770),
104268-	KQU(13322966086117661798), KQU(16660005705644029394),
104269-	KQU(11337677526988872373), KQU(13869299102574417795),
104270-	KQU(15642043183045645437), KQU( 3021755569085880019),
104271-	KQU( 4979741767761188161), KQU(13679979092079279587),
104272-	KQU( 3344685842861071743), KQU(13947960059899588104),
104273-	KQU(  305806934293368007), KQU( 5749173929201650029),
104274-	KQU(11123724852118844098), KQU(15128987688788879802),
104275-	KQU(15251651211024665009), KQU( 7689925933816577776),
104276-	KQU(16732804392695859449), KQU(17087345401014078468),
104277-	KQU(14315108589159048871), KQU( 4820700266619778917),
104278-	KQU(16709637539357958441), KQU( 4936227875177351374),
104279-	KQU( 2137907697912987247), KQU(11628565601408395420),
104280-	KQU( 2333250549241556786), KQU( 5711200379577778637),
104281-	KQU( 5170680131529031729), KQU(12620392043061335164),
104282-	KQU(   95363390101096078), KQU( 5487981914081709462),
104283-	KQU( 1763109823981838620), KQU( 3395861271473224396),
104284-	KQU( 1300496844282213595), KQU( 6894316212820232902),
104285-	KQU(10673859651135576674), KQU( 5911839658857903252),
104286-	KQU(17407110743387299102), KQU( 8257427154623140385),
104287-	KQU(11389003026741800267), KQU( 4070043211095013717),
104288-	KQU(11663806997145259025), KQU(15265598950648798210),
104289-	KQU(  630585789434030934), KQU( 3524446529213587334),
104290-	KQU( 7186424168495184211), KQU(10806585451386379021),
104291-	KQU(11120017753500499273), KQU( 1586837651387701301),
104292-	KQU(17530454400954415544), KQU( 9991670045077880430),
104293-	KQU( 7550997268990730180), KQU( 8640249196597379304),
104294-	KQU( 3522203892786893823), KQU(10401116549878854788),
104295-	KQU(13690285544733124852), KQU( 8295785675455774586),
104296-	KQU(15535716172155117603), KQU( 3112108583723722511),
104297-	KQU(17633179955339271113), KQU(18154208056063759375),
104298-	KQU( 1866409236285815666), KQU(13326075895396412882),
104299-	KQU( 8756261842948020025), KQU( 6281852999868439131),
104300-	KQU(15087653361275292858), KQU(10333923911152949397),
104301-	KQU( 5265567645757408500), KQU(12728041843210352184),
104302-	KQU( 6347959327507828759), KQU(  154112802625564758),
104303-	KQU(18235228308679780218), KQU( 3253805274673352418),
104304-	KQU( 4849171610689031197), KQU(17948529398340432518),
104305-	KQU(13803510475637409167), KQU(13506570190409883095),
104306-	KQU(15870801273282960805), KQU( 8451286481299170773),
104307-	KQU( 9562190620034457541), KQU( 8518905387449138364),
104308-	KQU(12681306401363385655), KQU( 3788073690559762558),
104309-	KQU( 5256820289573487769), KQU( 2752021372314875467),
104310-	KQU( 6354035166862520716), KQU( 4328956378309739069),
104311-	KQU(  449087441228269600), KQU( 5533508742653090868),
104312-	KQU( 1260389420404746988), KQU(18175394473289055097),
104313-	KQU( 1535467109660399420), KQU( 8818894282874061442),
104314-	KQU(12140873243824811213), KQU(15031386653823014946),
104315-	KQU( 1286028221456149232), KQU( 6329608889367858784),
104316-	KQU( 9419654354945132725), KQU( 6094576547061672379),
104317-	KQU(17706217251847450255), KQU( 1733495073065878126),
104318-	KQU(16918923754607552663), KQU( 8881949849954945044),
104319-	KQU(12938977706896313891), KQU(14043628638299793407),
104320-	KQU(18393874581723718233), KQU( 6886318534846892044),
104321-	KQU(14577870878038334081), KQU(13541558383439414119),
104322-	KQU(13570472158807588273), KQU(18300760537910283361),
104323-	KQU(  818368572800609205), KQU( 1417000585112573219),
104324-	KQU(12337533143867683655), KQU(12433180994702314480),
104325-	KQU(  778190005829189083), KQU(13667356216206524711),
104326-	KQU( 9866149895295225230), KQU(11043240490417111999),
104327-	KQU( 1123933826541378598), KQU( 6469631933605123610),
104328-	KQU(14508554074431980040), KQU(13918931242962026714),
104329-	KQU( 2870785929342348285), KQU(14786362626740736974),
104330-	KQU(13176680060902695786), KQU( 9591778613541679456),
104331-	KQU( 9097662885117436706), KQU(  749262234240924947),
104332-	KQU( 1944844067793307093), KQU( 4339214904577487742),
104333-	KQU( 8009584152961946551), KQU(16073159501225501777),
104334-	KQU( 3335870590499306217), KQU(17088312653151202847),
104335-	KQU( 3108893142681931848), KQU(16636841767202792021),
104336-	KQU(10423316431118400637), KQU( 8008357368674443506),
104337-	KQU(11340015231914677875), KQU(17687896501594936090),
104338-	KQU(15173627921763199958), KQU(  542569482243721959),
104339-	KQU(15071714982769812975), KQU( 4466624872151386956),
104340-	KQU( 1901780715602332461), KQU( 9822227742154351098),
104341-	KQU( 1479332892928648780), KQU( 6981611948382474400),
104342-	KQU( 7620824924456077376), KQU(14095973329429406782),
104343-	KQU( 7902744005696185404), KQU(15830577219375036920),
104344-	KQU(10287076667317764416), KQU(12334872764071724025),
104345-	KQU( 4419302088133544331), KQU(14455842851266090520),
104346-	KQU(12488077416504654222), KQU( 7953892017701886766),
104347-	KQU( 6331484925529519007), KQU( 4902145853785030022),
104348-	KQU(17010159216096443073), KQU(11945354668653886087),
104349-	KQU(15112022728645230829), KQU(17363484484522986742),
104350-	KQU( 4423497825896692887), KQU( 8155489510809067471),
104351-	KQU(  258966605622576285), KQU( 5462958075742020534),
104352-	KQU( 6763710214913276228), KQU( 2368935183451109054),
104353-	KQU(14209506165246453811), KQU( 2646257040978514881),
104354-	KQU( 3776001911922207672), KQU( 1419304601390147631),
104355-	KQU(14987366598022458284), KQU( 3977770701065815721),
104356-	KQU(  730820417451838898), KQU( 3982991703612885327),
104357-	KQU( 2803544519671388477), KQU(17067667221114424649),
104358-	KQU( 2922555119737867166), KQU( 1989477584121460932),
104359-	KQU(15020387605892337354), KQU( 9293277796427533547),
104360-	KQU(10722181424063557247), KQU(16704542332047511651),
104361-	KQU( 5008286236142089514), KQU(16174732308747382540),
104362-	KQU(17597019485798338402), KQU(13081745199110622093),
104363-	KQU( 8850305883842258115), KQU(12723629125624589005),
104364-	KQU( 8140566453402805978), KQU(15356684607680935061),
104365-	KQU(14222190387342648650), KQU(11134610460665975178),
104366-	KQU( 1259799058620984266), KQU(13281656268025610041),
104367-	KQU(  298262561068153992), KQU(12277871700239212922),
104368-	KQU(13911297774719779438), KQU(16556727962761474934),
104369-	KQU(17903010316654728010), KQU( 9682617699648434744),
104370-	KQU(14757681836838592850), KQU( 1327242446558524473),
104371-	KQU(11126645098780572792), KQU( 1883602329313221774),
104372-	KQU( 2543897783922776873), KQU(15029168513767772842),
104373-	KQU(12710270651039129878), KQU(16118202956069604504),
104374-	KQU(15010759372168680524), KQU( 2296827082251923948),
104375-	KQU(10793729742623518101), KQU(13829764151845413046),
104376-	KQU(17769301223184451213), KQU( 3118268169210783372),
104377-	KQU(17626204544105123127), KQU( 7416718488974352644),
104378-	KQU(10450751996212925994), KQU( 9352529519128770586),
104379-	KQU(  259347569641110140), KQU( 8048588892269692697),
104380-	KQU( 1774414152306494058), KQU(10669548347214355622),
104381-	KQU(13061992253816795081), KQU(18432677803063861659),
104382-	KQU( 8879191055593984333), KQU(12433753195199268041),
104383-	KQU(14919392415439730602), KQU( 6612848378595332963),
104384-	KQU( 6320986812036143628), KQU(10465592420226092859),
104385-	KQU( 4196009278962570808), KQU( 3747816564473572224),
104386-	KQU(17941203486133732898), KQU( 2350310037040505198),
104387-	KQU( 5811779859134370113), KQU(10492109599506195126),
104388-	KQU( 7699650690179541274), KQU( 1954338494306022961),
104389-	KQU(14095816969027231152), KQU( 5841346919964852061),
104390-	KQU(14945969510148214735), KQU( 3680200305887550992),
104391-	KQU( 6218047466131695792), KQU( 8242165745175775096),
104392-	KQU(11021371934053307357), KQU( 1265099502753169797),
104393-	KQU( 4644347436111321718), KQU( 3609296916782832859),
104394-	KQU( 8109807992218521571), KQU(18387884215648662020),
104395-	KQU(14656324896296392902), KQU(17386819091238216751),
104396-	KQU(17788300878582317152), KQU( 7919446259742399591),
104397-	KQU( 4466613134576358004), KQU(12928181023667938509),
104398-	KQU(13147446154454932030), KQU(16552129038252734620),
104399-	KQU( 8395299403738822450), KQU(11313817655275361164),
104400-	KQU(  434258809499511718), KQU( 2074882104954788676),
104401-	KQU( 7929892178759395518), KQU( 9006461629105745388),
104402-	KQU( 5176475650000323086), KQU(11128357033468341069),
104403-	KQU(12026158851559118955), KQU(14699716249471156500),
104404-	KQU(  448982497120206757), KQU( 4156475356685519900),
104405-	KQU( 6063816103417215727), KQU(10073289387954971479),
104406-	KQU( 8174466846138590962), KQU( 2675777452363449006),
104407-	KQU( 9090685420572474281), KQU( 6659652652765562060),
104408-	KQU(12923120304018106621), KQU(11117480560334526775),
104409-	KQU(  937910473424587511), KQU( 1838692113502346645),
104410-	KQU(11133914074648726180), KQU( 7922600945143884053),
104411-	KQU(13435287702700959550), KQU( 5287964921251123332),
104412-	KQU(11354875374575318947), KQU(17955724760748238133),
104413-	KQU(13728617396297106512), KQU( 4107449660118101255),
104414-	KQU( 1210269794886589623), KQU(11408687205733456282),
104415-	KQU( 4538354710392677887), KQU(13566803319341319267),
104416-	KQU(17870798107734050771), KQU( 3354318982568089135),
104417-	KQU( 9034450839405133651), KQU(13087431795753424314),
104418-	KQU(  950333102820688239), KQU( 1968360654535604116),
104419-	KQU(16840551645563314995), KQU( 8867501803892924995),
104420-	KQU(11395388644490626845), KQU( 1529815836300732204),
104421-	KQU(13330848522996608842), KQU( 1813432878817504265),
104422-	KQU( 2336867432693429560), KQU(15192805445973385902),
104423-	KQU( 2528593071076407877), KQU(  128459777936689248),
104424-	KQU( 9976345382867214866), KQU( 6208885766767996043),
104425-	KQU(14982349522273141706), KQU( 3099654362410737822),
104426-	KQU(13776700761947297661), KQU( 8806185470684925550),
104427-	KQU( 8151717890410585321), KQU(  640860591588072925),
104428-	KQU(14592096303937307465), KQU( 9056472419613564846),
104429-	KQU(14861544647742266352), KQU(12703771500398470216),
104430-	KQU( 3142372800384138465), KQU( 6201105606917248196),
104431-	KQU(18337516409359270184), KQU(15042268695665115339),
104432-	KQU(15188246541383283846), KQU(12800028693090114519),
104433-	KQU( 5992859621101493472), KQU(18278043971816803521),
104434-	KQU( 9002773075219424560), KQU( 7325707116943598353),
104435-	KQU( 7930571931248040822), KQU( 5645275869617023448),
104436-	KQU( 7266107455295958487), KQU( 4363664528273524411),
104437-	KQU(14313875763787479809), KQU(17059695613553486802),
104438-	KQU( 9247761425889940932), KQU(13704726459237593128),
104439-	KQU( 2701312427328909832), KQU(17235532008287243115),
104440-	KQU(14093147761491729538), KQU( 6247352273768386516),
104441-	KQU( 8268710048153268415), KQU( 7985295214477182083),
104442-	KQU(15624495190888896807), KQU( 3772753430045262788),
104443-	KQU( 9133991620474991698), KQU( 5665791943316256028),
104444-	KQU( 7551996832462193473), KQU(13163729206798953877),
104445-	KQU( 9263532074153846374), KQU( 1015460703698618353),
104446-	KQU(17929874696989519390), KQU(18257884721466153847),
104447-	KQU(16271867543011222991), KQU( 3905971519021791941),
104448-	KQU(16814488397137052085), KQU( 1321197685504621613),
104449-	KQU( 2870359191894002181), KQU(14317282970323395450),
104450-	KQU(13663920845511074366), KQU( 2052463995796539594),
104451-	KQU(14126345686431444337), KQU( 1727572121947022534),
104452-	KQU(17793552254485594241), KQU( 6738857418849205750),
104453-	KQU( 1282987123157442952), KQU(16655480021581159251),
104454-	KQU( 6784587032080183866), KQU(14726758805359965162),
104455-	KQU( 7577995933961987349), KQU(12539609320311114036),
104456-	KQU(10789773033385439494), KQU( 8517001497411158227),
104457-	KQU(10075543932136339710), KQU(14838152340938811081),
104458-	KQU( 9560840631794044194), KQU(17445736541454117475),
104459-	KQU(10633026464336393186), KQU(15705729708242246293),
104460-	KQU( 1117517596891411098), KQU( 4305657943415886942),
104461-	KQU( 4948856840533979263), KQU(16071681989041789593),
104462-	KQU(13723031429272486527), KQU( 7639567622306509462),
104463-	KQU(12670424537483090390), KQU( 9715223453097197134),
104464-	KQU( 5457173389992686394), KQU(  289857129276135145),
104465-	KQU(17048610270521972512), KQU(  692768013309835485),
104466-	KQU(14823232360546632057), KQU(18218002361317895936),
104467-	KQU( 3281724260212650204), KQU(16453957266549513795),
104468-	KQU( 8592711109774511881), KQU(  929825123473369579),
104469-	KQU(15966784769764367791), KQU( 9627344291450607588),
104470-	KQU(10849555504977813287), KQU( 9234566913936339275),
104471-	KQU( 6413807690366911210), KQU(10862389016184219267),
104472-	KQU(13842504799335374048), KQU( 1531994113376881174),
104473-	KQU( 2081314867544364459), KQU(16430628791616959932),
104474-	KQU( 8314714038654394368), KQU( 9155473892098431813),
104475-	KQU(12577843786670475704), KQU( 4399161106452401017),
104476-	KQU( 1668083091682623186), KQU( 1741383777203714216),
104477-	KQU( 2162597285417794374), KQU(15841980159165218736),
104478-	KQU( 1971354603551467079), KQU( 1206714764913205968),
104479-	KQU( 4790860439591272330), KQU(14699375615594055799),
104480-	KQU( 8374423871657449988), KQU(10950685736472937738),
104481-	KQU(  697344331343267176), KQU(10084998763118059810),
104482-	KQU(12897369539795983124), KQU(12351260292144383605),
104483-	KQU( 1268810970176811234), KQU( 7406287800414582768),
104484-	KQU(  516169557043807831), KQU( 5077568278710520380),
104485-	KQU( 3828791738309039304), KQU( 7721974069946943610),
104486-	KQU( 3534670260981096460), KQU( 4865792189600584891),
104487-	KQU(16892578493734337298), KQU( 9161499464278042590),
104488-	KQU(11976149624067055931), KQU(13219479887277343990),
104489-	KQU(14161556738111500680), KQU(14670715255011223056),
104490-	KQU( 4671205678403576558), KQU(12633022931454259781),
104491-	KQU(14821376219869187646), KQU(  751181776484317028),
104492-	KQU( 2192211308839047070), KQU(11787306362361245189),
104493-	KQU(10672375120744095707), KQU( 4601972328345244467),
104494-	KQU(15457217788831125879), KQU( 8464345256775460809),
104495-	KQU(10191938789487159478), KQU( 6184348739615197613),
104496-	KQU(11425436778806882100), KQU( 2739227089124319793),
104497-	KQU(  461464518456000551), KQU( 4689850170029177442),
104498-	KQU( 6120307814374078625), KQU(11153579230681708671),
104499-	KQU( 7891721473905347926), KQU(10281646937824872400),
104500-	KQU( 3026099648191332248), KQU( 8666750296953273818),
104501-	KQU(14978499698844363232), KQU(13303395102890132065),
104502-	KQU( 8182358205292864080), KQU(10560547713972971291),
104503-	KQU(11981635489418959093), KQU( 3134621354935288409),
104504-	KQU(11580681977404383968), KQU(14205530317404088650),
104505-	KQU( 5997789011854923157), KQU(13659151593432238041),
104506-	KQU(11664332114338865086), KQU( 7490351383220929386),
104507-	KQU( 7189290499881530378), KQU(15039262734271020220),
104508-	KQU( 2057217285976980055), KQU(  555570804905355739),
104509-	KQU(11235311968348555110), KQU(13824557146269603217),
104510-	KQU(16906788840653099693), KQU( 7222878245455661677),
104511-	KQU( 5245139444332423756), KQU( 4723748462805674292),
104512-	KQU(12216509815698568612), KQU(17402362976648951187),
104513-	KQU(17389614836810366768), KQU( 4880936484146667711),
104514-	KQU( 9085007839292639880), KQU(13837353458498535449),
104515-	KQU(11914419854360366677), KQU(16595890135313864103),
104516-	KQU( 6313969847197627222), KQU(18296909792163910431),
104517-	KQU(10041780113382084042), KQU( 2499478551172884794),
104518-	KQU(11057894246241189489), KQU( 9742243032389068555),
104519-	KQU(12838934582673196228), KQU(13437023235248490367),
104520-	KQU(13372420669446163240), KQU( 6752564244716909224),
104521-	KQU( 7157333073400313737), KQU(12230281516370654308),
104522-	KQU( 1182884552219419117), KQU( 2955125381312499218),
104523-	KQU(10308827097079443249), KQU( 1337648572986534958),
104524-	KQU(16378788590020343939), KQU(  108619126514420935),
104525-	KQU( 3990981009621629188), KQU( 5460953070230946410),
104526-	KQU( 9703328329366531883), KQU(13166631489188077236),
104527-	KQU( 1104768831213675170), KQU( 3447930458553877908),
104528-	KQU( 8067172487769945676), KQU( 5445802098190775347),
104529-	KQU( 3244840981648973873), KQU(17314668322981950060),
104530-	KQU( 5006812527827763807), KQU(18158695070225526260),
104531-	KQU( 2824536478852417853), KQU(13974775809127519886),
104532-	KQU( 9814362769074067392), KQU(17276205156374862128),
104533-	KQU(11361680725379306967), KQU( 3422581970382012542),
104534-	KQU(11003189603753241266), KQU(11194292945277862261),
104535-	KQU( 6839623313908521348), KQU(11935326462707324634),
104536-	KQU( 1611456788685878444), KQU(13112620989475558907),
104537-	KQU(  517659108904450427), KQU(13558114318574407624),
104538-	KQU(15699089742731633077), KQU( 4988979278862685458),
104539-	KQU( 8111373583056521297), KQU( 3891258746615399627),
104540-	KQU( 8137298251469718086), KQU(12748663295624701649),
104541-	KQU( 4389835683495292062), KQU( 5775217872128831729),
104542-	KQU( 9462091896405534927), KQU( 8498124108820263989),
104543-	KQU( 8059131278842839525), KQU(10503167994254090892),
104544-	KQU(11613153541070396656), KQU(18069248738504647790),
104545-	KQU(  570657419109768508), KQU( 3950574167771159665),
104546-	KQU( 5514655599604313077), KQU( 2908460854428484165),
104547-	KQU(10777722615935663114), KQU(12007363304839279486),
104548-	KQU( 9800646187569484767), KQU( 8795423564889864287),
104549-	KQU(14257396680131028419), KQU( 6405465117315096498),
104550-	KQU( 7939411072208774878), KQU(17577572378528990006),
104551-	KQU(14785873806715994850), KQU(16770572680854747390),
104552-	KQU(18127549474419396481), KQU(11637013449455757750),
104553-	KQU(14371851933996761086), KQU( 3601181063650110280),
104554-	KQU( 4126442845019316144), KQU(10198287239244320669),
104555-	KQU(18000169628555379659), KQU(18392482400739978269),
104556-	KQU( 6219919037686919957), KQU( 3610085377719446052),
104557-	KQU( 2513925039981776336), KQU(16679413537926716955),
104558-	KQU(12903302131714909434), KQU( 5581145789762985009),
104559-	KQU(12325955044293303233), KQU(17216111180742141204),
104560-	KQU( 6321919595276545740), KQU( 3507521147216174501),
104561-	KQU( 9659194593319481840), KQU(11473976005975358326),
104562-	KQU(14742730101435987026), KQU(  492845897709954780),
104563-	KQU(16976371186162599676), KQU(17712703422837648655),
104564-	KQU( 9881254778587061697), KQU( 8413223156302299551),
104565-	KQU( 1563841828254089168), KQU( 9996032758786671975),
104566-	KQU(  138877700583772667), KQU(13003043368574995989),
104567-	KQU( 4390573668650456587), KQU( 8610287390568126755),
104568-	KQU(15126904974266642199), KQU( 6703637238986057662),
104569-	KQU( 2873075592956810157), KQU( 6035080933946049418),
104570-	KQU(13382846581202353014), KQU( 7303971031814642463),
104571-	KQU(18418024405307444267), KQU( 5847096731675404647),
104572-	KQU( 4035880699639842500), KQU(11525348625112218478),
104573-	KQU( 3041162365459574102), KQU( 2604734487727986558),
104574-	KQU(15526341771636983145), KQU(14556052310697370254),
104575-	KQU(12997787077930808155), KQU( 9601806501755554499),
104576-	KQU(11349677952521423389), KQU(14956777807644899350),
104577-	KQU(16559736957742852721), KQU(12360828274778140726),
104578-	KQU( 6685373272009662513), KQU(16932258748055324130),
104579-	KQU(15918051131954158508), KQU( 1692312913140790144),
104580-	KQU(  546653826801637367), KQU( 5341587076045986652),
104581-	KQU(14975057236342585662), KQU(12374976357340622412),
104582-	KQU(10328833995181940552), KQU(12831807101710443149),
104583-	KQU(10548514914382545716), KQU( 2217806727199715993),
104584-	KQU(12627067369242845138), KQU( 4598965364035438158),
104585-	KQU(  150923352751318171), KQU(14274109544442257283),
104586-	KQU( 4696661475093863031), KQU( 1505764114384654516),
104587-	KQU(10699185831891495147), KQU( 2392353847713620519),
104588-	KQU( 3652870166711788383), KQU( 8640653276221911108),
104589-	KQU( 3894077592275889704), KQU( 4918592872135964845),
104590-	KQU(16379121273281400789), KQU(12058465483591683656),
104591-	KQU(11250106829302924945), KQU( 1147537556296983005),
104592-	KQU( 6376342756004613268), KQU(14967128191709280506),
104593-	KQU(18007449949790627628), KQU( 9497178279316537841),
104594-	KQU( 7920174844809394893), KQU(10037752595255719907),
104595-	KQU(15875342784985217697), KQU(15311615921712850696),
104596-	KQU( 9552902652110992950), KQU(14054979450099721140),
104597-	KQU( 5998709773566417349), KQU(18027910339276320187),
104598-	KQU( 8223099053868585554), KQU( 7842270354824999767),
104599-	KQU( 4896315688770080292), KQU(12969320296569787895),
104600-	KQU( 2674321489185759961), KQU( 4053615936864718439),
104601-	KQU(11349775270588617578), KQU( 4743019256284553975),
104602-	KQU( 5602100217469723769), KQU(14398995691411527813),
104603-	KQU( 7412170493796825470), KQU(  836262406131744846),
104604-	KQU( 8231086633845153022), KQU( 5161377920438552287),
104605-	KQU( 8828731196169924949), KQU(16211142246465502680),
104606-	KQU( 3307990879253687818), KQU( 5193405406899782022),
104607-	KQU( 8510842117467566693), KQU( 6070955181022405365),
104608-	KQU(14482950231361409799), KQU(12585159371331138077),
104609-	KQU( 3511537678933588148), KQU( 2041849474531116417),
104610-	KQU(10944936685095345792), KQU(18303116923079107729),
104611-	KQU( 2720566371239725320), KQU( 4958672473562397622),
104612-	KQU( 3032326668253243412), KQU(13689418691726908338),
104613-	KQU( 1895205511728843996), KQU( 8146303515271990527),
104614-	KQU(16507343500056113480), KQU(  473996939105902919),
104615-	KQU( 9897686885246881481), KQU(14606433762712790575),
104616-	KQU( 6732796251605566368), KQU( 1399778120855368916),
104617-	KQU(  935023885182833777), KQU(16066282816186753477),
104618-	KQU( 7291270991820612055), KQU(17530230393129853844),
104619-	KQU(10223493623477451366), KQU(15841725630495676683),
104620-	KQU(17379567246435515824), KQU( 8588251429375561971),
104621-	KQU(18339511210887206423), KQU(17349587430725976100),
104622-	KQU(12244876521394838088), KQU( 6382187714147161259),
104623-	KQU(12335807181848950831), KQU(16948885622305460665),
104624-	KQU(13755097796371520506), KQU(14806740373324947801),
104625-	KQU( 4828699633859287703), KQU( 8209879281452301604),
104626-	KQU(12435716669553736437), KQU(13970976859588452131),
104627-	KQU( 6233960842566773148), KQU(12507096267900505759),
104628-	KQU( 1198713114381279421), KQU(14989862731124149015),
104629-	KQU(15932189508707978949), KQU( 2526406641432708722),
104630-	KQU(   29187427817271982), KQU( 1499802773054556353),
104631-	KQU(10816638187021897173), KQU( 5436139270839738132),
104632-	KQU( 6659882287036010082), KQU( 2154048955317173697),
104633-	KQU(10887317019333757642), KQU(16281091802634424955),
104634-	KQU(10754549879915384901), KQU(10760611745769249815),
104635-	KQU( 2161505946972504002), KQU( 5243132808986265107),
104636-	KQU(10129852179873415416), KQU(  710339480008649081),
104637-	KQU( 7802129453068808528), KQU(17967213567178907213),
104638-	KQU(15730859124668605599), KQU(13058356168962376502),
104639-	KQU( 3701224985413645909), KQU(14464065869149109264),
104640-	KQU( 9959272418844311646), KQU(10157426099515958752),
104641-	KQU(14013736814538268528), KQU(17797456992065653951),
104642-	KQU(17418878140257344806), KQU(15457429073540561521),
104643-	KQU( 2184426881360949378), KQU( 2062193041154712416),
104644-	KQU( 8553463347406931661), KQU( 4913057625202871854),
104645-	KQU( 2668943682126618425), KQU(17064444737891172288),
104646-	KQU( 4997115903913298637), KQU(12019402608892327416),
104647-	KQU(17603584559765897352), KQU(11367529582073647975),
104648-	KQU( 8211476043518436050), KQU( 8676849804070323674),
104649-	KQU(18431829230394475730), KQU(10490177861361247904),
104650-	KQU( 9508720602025651349), KQU( 7409627448555722700),
104651-	KQU( 5804047018862729008), KQU(11943858176893142594),
104652-	KQU(11908095418933847092), KQU( 5415449345715887652),
104653-	KQU( 1554022699166156407), KQU( 9073322106406017161),
104654-	KQU( 7080630967969047082), KQU(18049736940860732943),
104655-	KQU(12748714242594196794), KQU( 1226992415735156741),
104656-	KQU(17900981019609531193), KQU(11720739744008710999),
104657-	KQU( 3006400683394775434), KQU(11347974011751996028),
104658-	KQU( 3316999628257954608), KQU( 8384484563557639101),
104659-	KQU(18117794685961729767), KQU( 1900145025596618194),
104660-	KQU(17459527840632892676), KQU( 5634784101865710994),
104661-	KQU( 7918619300292897158), KQU( 3146577625026301350),
104662-	KQU( 9955212856499068767), KQU( 1873995843681746975),
104663-	KQU( 1561487759967972194), KQU( 8322718804375878474),
104664-	KQU(11300284215327028366), KQU( 4667391032508998982),
104665-	KQU( 9820104494306625580), KQU(17922397968599970610),
104666-	KQU( 1784690461886786712), KQU(14940365084341346821),
104667-	KQU( 5348719575594186181), KQU(10720419084507855261),
104668-	KQU(14210394354145143274), KQU( 2426468692164000131),
104669-	KQU(16271062114607059202), KQU(14851904092357070247),
104670-	KQU( 6524493015693121897), KQU( 9825473835127138531),
104671-	KQU(14222500616268569578), KQU(15521484052007487468),
104672-	KQU(14462579404124614699), KQU(11012375590820665520),
104673-	KQU(11625327350536084927), KQU(14452017765243785417),
104674-	KQU( 9989342263518766305), KQU( 3640105471101803790),
104675-	KQU( 4749866455897513242), KQU(13963064946736312044),
104676-	KQU(10007416591973223791), KQU(18314132234717431115),
104677-	KQU( 3286596588617483450), KQU( 7726163455370818765),
104678-	KQU( 7575454721115379328), KQU( 5308331576437663422),
104679-	KQU(18288821894903530934), KQU( 8028405805410554106),
104680-	KQU(15744019832103296628), KQU(  149765559630932100),
104681-	KQU( 6137705557200071977), KQU(14513416315434803615),
104682-	KQU(11665702820128984473), KQU(  218926670505601386),
104683-	KQU( 6868675028717769519), KQU(15282016569441512302),
104684-	KQU( 5707000497782960236), KQU( 6671120586555079567),
104685-	KQU( 2194098052618985448), KQU(16849577895477330978),
104686-	KQU(12957148471017466283), KQU( 1997805535404859393),
104687-	KQU( 1180721060263860490), KQU(13206391310193756958),
104688-	KQU(12980208674461861797), KQU( 3825967775058875366),
104689-	KQU(17543433670782042631), KQU( 1518339070120322730),
104690-	KQU(16344584340890991669), KQU( 2611327165318529819),
104691-	KQU(11265022723283422529), KQU( 4001552800373196817),
104692-	KQU(14509595890079346161), KQU( 3528717165416234562),
104693-	KQU(18153222571501914072), KQU( 9387182977209744425),
104694-	KQU(10064342315985580021), KQU(11373678413215253977),
104695-	KQU( 2308457853228798099), KQU( 9729042942839545302),
104696-	KQU( 7833785471140127746), KQU( 6351049900319844436),
104697-	KQU(14454610627133496067), KQU(12533175683634819111),
104698-	KQU(15570163926716513029), KQU(13356980519185762498)
104699-};
104700-
104701-TEST_BEGIN(test_gen_rand_32) {
104702-	uint32_t array32[BLOCK_SIZE] JEMALLOC_ATTR(aligned(16));
104703-	uint32_t array32_2[BLOCK_SIZE] JEMALLOC_ATTR(aligned(16));
104704-	int i;
104705-	uint32_t r32;
104706-	sfmt_t *ctx;
104707-
104708-	expect_d_le(get_min_array_size32(), BLOCK_SIZE,
104709-	    "Array size too small");
104710-	ctx = init_gen_rand(1234);
104711-	fill_array32(ctx, array32, BLOCK_SIZE);
104712-	fill_array32(ctx, array32_2, BLOCK_SIZE);
104713-	fini_gen_rand(ctx);
104714-
104715-	ctx = init_gen_rand(1234);
104716-	for (i = 0; i < BLOCK_SIZE; i++) {
104717-		if (i < COUNT_1) {
104718-			expect_u32_eq(array32[i], init_gen_rand_32_expected[i],
104719-			    "Output mismatch for i=%d", i);
104720-		}
104721-		r32 = gen_rand32(ctx);
104722-		expect_u32_eq(r32, array32[i],
104723-		    "Mismatch at array32[%d]=%x, gen=%x", i, array32[i], r32);
104724-	}
104725-	for (i = 0; i < COUNT_2; i++) {
104726-		r32 = gen_rand32(ctx);
104727-		expect_u32_eq(r32, array32_2[i],
104728-		    "Mismatch at array32_2[%d]=%x, gen=%x", i, array32_2[i],
104729-		    r32);
104730-	}
104731-	fini_gen_rand(ctx);
104732-}
104733-TEST_END
104734-
104735-TEST_BEGIN(test_by_array_32) {
104736-	uint32_t array32[BLOCK_SIZE] JEMALLOC_ATTR(aligned(16));
104737-	uint32_t array32_2[BLOCK_SIZE] JEMALLOC_ATTR(aligned(16));
104738-	int i;
104739-	uint32_t ini[4] = {0x1234, 0x5678, 0x9abc, 0xdef0};
104740-	uint32_t r32;
104741-	sfmt_t *ctx;
104742-
104743-	expect_d_le(get_min_array_size32(), BLOCK_SIZE,
104744-	    "Array size too small");
104745-	ctx = init_by_array(ini, 4);
104746-	fill_array32(ctx, array32, BLOCK_SIZE);
104747-	fill_array32(ctx, array32_2, BLOCK_SIZE);
104748-	fini_gen_rand(ctx);
104749-
104750-	ctx = init_by_array(ini, 4);
104751-	for (i = 0; i < BLOCK_SIZE; i++) {
104752-		if (i < COUNT_1) {
104753-			expect_u32_eq(array32[i], init_by_array_32_expected[i],
104754-			    "Output mismatch for i=%d", i);
104755-		}
104756-		r32 = gen_rand32(ctx);
104757-		expect_u32_eq(r32, array32[i],
104758-		    "Mismatch at array32[%d]=%x, gen=%x", i, array32[i], r32);
104759-	}
104760-	for (i = 0; i < COUNT_2; i++) {
104761-		r32 = gen_rand32(ctx);
104762-		expect_u32_eq(r32, array32_2[i],
104763-		    "Mismatch at array32_2[%d]=%x, gen=%x", i, array32_2[i],
104764-		    r32);
104765-	}
104766-	fini_gen_rand(ctx);
104767-}
104768-TEST_END
104769-
104770-TEST_BEGIN(test_gen_rand_64) {
104771-	uint64_t array64[BLOCK_SIZE64] JEMALLOC_ATTR(aligned(16));
104772-	uint64_t array64_2[BLOCK_SIZE64] JEMALLOC_ATTR(aligned(16));
104773-	int i;
104774-	uint64_t r;
104775-	sfmt_t *ctx;
104776-
104777-	expect_d_le(get_min_array_size64(), BLOCK_SIZE64,
104778-	    "Array size too small");
104779-	ctx = init_gen_rand(4321);
104780-	fill_array64(ctx, array64, BLOCK_SIZE64);
104781-	fill_array64(ctx, array64_2, BLOCK_SIZE64);
104782-	fini_gen_rand(ctx);
104783-
104784-	ctx = init_gen_rand(4321);
104785-	for (i = 0; i < BLOCK_SIZE64; i++) {
104786-		if (i < COUNT_1) {
104787-			expect_u64_eq(array64[i], init_gen_rand_64_expected[i],
104788-			    "Output mismatch for i=%d", i);
104789-		}
104790-		r = gen_rand64(ctx);
104791-		expect_u64_eq(r, array64[i],
104792-		    "Mismatch at array64[%d]=%"FMTx64", gen=%"FMTx64, i,
104793-		    array64[i], r);
104794-	}
104795-	for (i = 0; i < COUNT_2; i++) {
104796-		r = gen_rand64(ctx);
104797-		expect_u64_eq(r, array64_2[i],
104798-		    "Mismatch at array64_2[%d]=%"FMTx64" gen=%"FMTx64"", i,
104799-		    array64_2[i], r);
104800-	}
104801-	fini_gen_rand(ctx);
104802-}
104803-TEST_END
104804-
104805-TEST_BEGIN(test_by_array_64) {
104806-	uint64_t array64[BLOCK_SIZE64] JEMALLOC_ATTR(aligned(16));
104807-	uint64_t array64_2[BLOCK_SIZE64] JEMALLOC_ATTR(aligned(16));
104808-	int i;
104809-	uint64_t r;
104810-	uint32_t ini[] = {5, 4, 3, 2, 1};
104811-	sfmt_t *ctx;
104812-
104813-	expect_d_le(get_min_array_size64(), BLOCK_SIZE64,
104814-	    "Array size too small");
104815-	ctx = init_by_array(ini, 5);
104816-	fill_array64(ctx, array64, BLOCK_SIZE64);
104817-	fill_array64(ctx, array64_2, BLOCK_SIZE64);
104818-	fini_gen_rand(ctx);
104819-
104820-	ctx = init_by_array(ini, 5);
104821-	for (i = 0; i < BLOCK_SIZE64; i++) {
104822-		if (i < COUNT_1) {
104823-			expect_u64_eq(array64[i], init_by_array_64_expected[i],
104824-			    "Output mismatch for i=%d", i);
104825-		}
104826-		r = gen_rand64(ctx);
104827-		expect_u64_eq(r, array64[i],
104828-		    "Mismatch at array64[%d]=%"FMTx64" gen=%"FMTx64, i,
104829-		    array64[i], r);
104830-	}
104831-	for (i = 0; i < COUNT_2; i++) {
104832-		r = gen_rand64(ctx);
104833-		expect_u64_eq(r, array64_2[i],
104834-		    "Mismatch at array64_2[%d]=%"FMTx64" gen=%"FMTx64, i,
104835-		    array64_2[i], r);
104836-	}
104837-	fini_gen_rand(ctx);
104838-}
104839-TEST_END
104840-
104841-int
104842-main(void) {
104843-	return test(
104844-	    test_gen_rand_32,
104845-	    test_by_array_32,
104846-	    test_gen_rand_64,
104847-	    test_by_array_64);
104848-}
104849diff --git a/jemalloc/test/unit/a0.c b/jemalloc/test/unit/a0.c
104850deleted file mode 100644
104851index c1be79a..0000000
104852--- a/jemalloc/test/unit/a0.c
104853+++ /dev/null
104854@@ -1,16 +0,0 @@
104855-#include "test/jemalloc_test.h"
104856-
104857-TEST_BEGIN(test_a0) {
104858-	void *p;
104859-
104860-	p = a0malloc(1);
104861-	expect_ptr_not_null(p, "Unexpected a0malloc() error");
104862-	a0dalloc(p);
104863-}
104864-TEST_END
104865-
104866-int
104867-main(void) {
104868-	return test_no_malloc_init(
104869-	    test_a0);
104870-}
104871diff --git a/jemalloc/test/unit/arena_decay.c b/jemalloc/test/unit/arena_decay.c
104872deleted file mode 100644
104873index e991f4d..0000000
104874--- a/jemalloc/test/unit/arena_decay.c
104875+++ /dev/null
104876@@ -1,436 +0,0 @@
104877-#include "test/jemalloc_test.h"
104878-#include "test/arena_util.h"
104879-
104880-#include "jemalloc/internal/ticker.h"
104881-
104882-static nstime_monotonic_t *nstime_monotonic_orig;
104883-static nstime_update_t *nstime_update_orig;
104884-
104885-static unsigned nupdates_mock;
104886-static nstime_t time_mock;
104887-static bool monotonic_mock;
104888-
104889-static bool
104890-nstime_monotonic_mock(void) {
104891-	return monotonic_mock;
104892-}
104893-
104894-static void
104895-nstime_update_mock(nstime_t *time) {
104896-	nupdates_mock++;
104897-	if (monotonic_mock) {
104898-		nstime_copy(time, &time_mock);
104899-	}
104900-}
104901-
104902-TEST_BEGIN(test_decay_ticks) {
104903-	test_skip_if(is_background_thread_enabled());
104904-	test_skip_if(opt_hpa);
104905-
104906-	ticker_geom_t *decay_ticker;
104907-	unsigned tick0, tick1, arena_ind;
104908-	size_t sz, large0;
104909-	void *p;
104910-
104911-	sz = sizeof(size_t);
104912-	expect_d_eq(mallctl("arenas.lextent.0.size", (void *)&large0, &sz, NULL,
104913-	    0), 0, "Unexpected mallctl failure");
104914-
104915-	/* Set up a manually managed arena for test. */
104916-	arena_ind = do_arena_create(0, 0);
104917-
104918-	/* Migrate to the new arena, and get the ticker. */
104919-	unsigned old_arena_ind;
104920-	size_t sz_arena_ind = sizeof(old_arena_ind);
104921-	expect_d_eq(mallctl("thread.arena", (void *)&old_arena_ind,
104922-	    &sz_arena_ind, (void *)&arena_ind, sizeof(arena_ind)), 0,
104923-	    "Unexpected mallctl() failure");
104924-	decay_ticker = tsd_arena_decay_tickerp_get(tsd_fetch());
104925-	expect_ptr_not_null(decay_ticker,
104926-	    "Unexpected failure getting decay ticker");
104927-
104928-	/*
104929-	 * Test the standard APIs using a large size class, since we can't
104930-	 * control tcache interactions for small size classes (except by
104931-	 * completely disabling tcache for the entire test program).
104932-	 */
104933-
104934-	/* malloc(). */
104935-	tick0 = ticker_geom_read(decay_ticker);
104936-	p = malloc(large0);
104937-	expect_ptr_not_null(p, "Unexpected malloc() failure");
104938-	tick1 = ticker_geom_read(decay_ticker);
104939-	expect_u32_ne(tick1, tick0, "Expected ticker to tick during malloc()");
104940-	/* free(). */
104941-	tick0 = ticker_geom_read(decay_ticker);
104942-	free(p);
104943-	tick1 = ticker_geom_read(decay_ticker);
104944-	expect_u32_ne(tick1, tick0, "Expected ticker to tick during free()");
104945-
104946-	/* calloc(). */
104947-	tick0 = ticker_geom_read(decay_ticker);
104948-	p = calloc(1, large0);
104949-	expect_ptr_not_null(p, "Unexpected calloc() failure");
104950-	tick1 = ticker_geom_read(decay_ticker);
104951-	expect_u32_ne(tick1, tick0, "Expected ticker to tick during calloc()");
104952-	free(p);
104953-
104954-	/* posix_memalign(). */
104955-	tick0 = ticker_geom_read(decay_ticker);
104956-	expect_d_eq(posix_memalign(&p, sizeof(size_t), large0), 0,
104957-	    "Unexpected posix_memalign() failure");
104958-	tick1 = ticker_geom_read(decay_ticker);
104959-	expect_u32_ne(tick1, tick0,
104960-	    "Expected ticker to tick during posix_memalign()");
104961-	free(p);
104962-
104963-	/* aligned_alloc(). */
104964-	tick0 = ticker_geom_read(decay_ticker);
104965-	p = aligned_alloc(sizeof(size_t), large0);
104966-	expect_ptr_not_null(p, "Unexpected aligned_alloc() failure");
104967-	tick1 = ticker_geom_read(decay_ticker);
104968-	expect_u32_ne(tick1, tick0,
104969-	    "Expected ticker to tick during aligned_alloc()");
104970-	free(p);
104971-
104972-	/* realloc(). */
104973-	/* Allocate. */
104974-	tick0 = ticker_geom_read(decay_ticker);
104975-	p = realloc(NULL, large0);
104976-	expect_ptr_not_null(p, "Unexpected realloc() failure");
104977-	tick1 = ticker_geom_read(decay_ticker);
104978-	expect_u32_ne(tick1, tick0, "Expected ticker to tick during realloc()");
104979-	/* Reallocate. */
104980-	tick0 = ticker_geom_read(decay_ticker);
104981-	p = realloc(p, large0);
104982-	expect_ptr_not_null(p, "Unexpected realloc() failure");
104983-	tick1 = ticker_geom_read(decay_ticker);
104984-	expect_u32_ne(tick1, tick0, "Expected ticker to tick during realloc()");
104985-	/* Deallocate. */
104986-	tick0 = ticker_geom_read(decay_ticker);
104987-	realloc(p, 0);
104988-	tick1 = ticker_geom_read(decay_ticker);
104989-	expect_u32_ne(tick1, tick0, "Expected ticker to tick during realloc()");
104990-
104991-	/*
104992-	 * Test the *allocx() APIs using large and small size classes, with
104993-	 * tcache explicitly disabled.
104994-	 */
104995-	{
104996-		unsigned i;
104997-		size_t allocx_sizes[2];
104998-		allocx_sizes[0] = large0;
104999-		allocx_sizes[1] = 1;
105000-
105001-		for (i = 0; i < sizeof(allocx_sizes) / sizeof(size_t); i++) {
105002-			sz = allocx_sizes[i];
105003-
105004-			/* mallocx(). */
105005-			tick0 = ticker_geom_read(decay_ticker);
105006-			p = mallocx(sz, MALLOCX_TCACHE_NONE);
105007-			expect_ptr_not_null(p, "Unexpected mallocx() failure");
105008-			tick1 = ticker_geom_read(decay_ticker);
105009-			expect_u32_ne(tick1, tick0,
105010-			    "Expected ticker to tick during mallocx() (sz=%zu)",
105011-			    sz);
105012-			/* rallocx(). */
105013-			tick0 = ticker_geom_read(decay_ticker);
105014-			p = rallocx(p, sz, MALLOCX_TCACHE_NONE);
105015-			expect_ptr_not_null(p, "Unexpected rallocx() failure");
105016-			tick1 = ticker_geom_read(decay_ticker);
105017-			expect_u32_ne(tick1, tick0,
105018-			    "Expected ticker to tick during rallocx() (sz=%zu)",
105019-			    sz);
105020-			/* xallocx(). */
105021-			tick0 = ticker_geom_read(decay_ticker);
105022-			xallocx(p, sz, 0, MALLOCX_TCACHE_NONE);
105023-			tick1 = ticker_geom_read(decay_ticker);
105024-			expect_u32_ne(tick1, tick0,
105025-			    "Expected ticker to tick during xallocx() (sz=%zu)",
105026-			    sz);
105027-			/* dallocx(). */
105028-			tick0 = ticker_geom_read(decay_ticker);
105029-			dallocx(p, MALLOCX_TCACHE_NONE);
105030-			tick1 = ticker_geom_read(decay_ticker);
105031-			expect_u32_ne(tick1, tick0,
105032-			    "Expected ticker to tick during dallocx() (sz=%zu)",
105033-			    sz);
105034-			/* sdallocx(). */
105035-			p = mallocx(sz, MALLOCX_TCACHE_NONE);
105036-			expect_ptr_not_null(p, "Unexpected mallocx() failure");
105037-			tick0 = ticker_geom_read(decay_ticker);
105038-			sdallocx(p, sz, MALLOCX_TCACHE_NONE);
105039-			tick1 = ticker_geom_read(decay_ticker);
105040-			expect_u32_ne(tick1, tick0,
105041-			    "Expected ticker to tick during sdallocx() "
105042-			    "(sz=%zu)", sz);
105043-		}
105044-	}
105045-
105046-	/*
105047-	 * Test tcache fill/flush interactions for large and small size classes,
105048-	 * using an explicit tcache.
105049-	 */
105050-	unsigned tcache_ind, i;
105051-	size_t tcache_sizes[2];
105052-	tcache_sizes[0] = large0;
105053-	tcache_sizes[1] = 1;
105054-
105055-	size_t tcache_max, sz_tcache_max;
105056-	sz_tcache_max = sizeof(tcache_max);
105057-	expect_d_eq(mallctl("arenas.tcache_max", (void *)&tcache_max,
105058-	    &sz_tcache_max, NULL, 0), 0, "Unexpected mallctl() failure");
105059-
105060-	sz = sizeof(unsigned);
105061-	expect_d_eq(mallctl("tcache.create", (void *)&tcache_ind, &sz,
105062-	    NULL, 0), 0, "Unexpected mallctl failure");
105063-
105064-	for (i = 0; i < sizeof(tcache_sizes) / sizeof(size_t); i++) {
105065-		sz = tcache_sizes[i];
105066-
105067-		/* tcache fill. */
105068-		tick0 = ticker_geom_read(decay_ticker);
105069-		p = mallocx(sz, MALLOCX_TCACHE(tcache_ind));
105070-		expect_ptr_not_null(p, "Unexpected mallocx() failure");
105071-		tick1 = ticker_geom_read(decay_ticker);
105072-		expect_u32_ne(tick1, tick0,
105073-		    "Expected ticker to tick during tcache fill "
105074-		    "(sz=%zu)", sz);
105075-		/* tcache flush. */
105076-		dallocx(p, MALLOCX_TCACHE(tcache_ind));
105077-		tick0 = ticker_geom_read(decay_ticker);
105078-		expect_d_eq(mallctl("tcache.flush", NULL, NULL,
105079-		    (void *)&tcache_ind, sizeof(unsigned)), 0,
105080-		    "Unexpected mallctl failure");
105081-		tick1 = ticker_geom_read(decay_ticker);
105082-
105083-		/* Will only tick if it's in tcache. */
105084-		expect_u32_ne(tick1, tick0,
105085-		    "Expected ticker to tick during tcache flush (sz=%zu)", sz);
105086-	}
105087-}
105088-TEST_END
105089-
105090-static void
105091-decay_ticker_helper(unsigned arena_ind, int flags, bool dirty, ssize_t dt,
105092-    uint64_t dirty_npurge0, uint64_t muzzy_npurge0, bool terminate_asap) {
105093-#define NINTERVALS 101
105094-	nstime_t time, update_interval, decay_ms, deadline;
105095-
105096-	nstime_init_update(&time);
105097-
105098-	nstime_init2(&decay_ms, dt, 0);
105099-	nstime_copy(&deadline, &time);
105100-	nstime_add(&deadline, &decay_ms);
105101-
105102-	nstime_init2(&update_interval, dt, 0);
105103-	nstime_idivide(&update_interval, NINTERVALS);
105104-
105105-	/*
105106-	 * Keep q's slab from being deallocated during the looping below.  If a
105107-	 * cached slab were to repeatedly come and go during looping, it could
105108-	 * prevent the decay backlog ever becoming empty.
105109-	 */
105110-	void *p = do_mallocx(1, flags);
105111-	uint64_t dirty_npurge1, muzzy_npurge1;
105112-	do {
105113-		for (unsigned i = 0; i < ARENA_DECAY_NTICKS_PER_UPDATE / 2;
105114-		    i++) {
105115-			void *q = do_mallocx(1, flags);
105116-			dallocx(q, flags);
105117-		}
105118-		dirty_npurge1 = get_arena_dirty_npurge(arena_ind);
105119-		muzzy_npurge1 = get_arena_muzzy_npurge(arena_ind);
105120-
105121-		nstime_add(&time_mock, &update_interval);
105122-		nstime_update(&time);
105123-	} while (nstime_compare(&time, &deadline) <= 0 && ((dirty_npurge1 ==
105124-	    dirty_npurge0 && muzzy_npurge1 == muzzy_npurge0) ||
105125-	    !terminate_asap));
105126-	dallocx(p, flags);
105127-
105128-	if (config_stats) {
105129-		expect_u64_gt(dirty_npurge1 + muzzy_npurge1, dirty_npurge0 +
105130-		    muzzy_npurge0, "Expected purging to occur");
105131-	}
105132-#undef NINTERVALS
105133-}
105134-
105135-TEST_BEGIN(test_decay_ticker) {
105136-	test_skip_if(is_background_thread_enabled());
105137-	test_skip_if(opt_hpa);
105138-#define NPS 2048
105139-	ssize_t ddt = opt_dirty_decay_ms;
105140-	ssize_t mdt = opt_muzzy_decay_ms;
105141-	unsigned arena_ind = do_arena_create(ddt, mdt);
105142-	int flags = (MALLOCX_ARENA(arena_ind) | MALLOCX_TCACHE_NONE);
105143-	void *ps[NPS];
105144-
105145-	/*
105146-	 * Allocate a bunch of large objects, pause the clock, deallocate every
105147-	 * other object (to fragment virtual memory), restore the clock, then
105148-	 * [md]allocx() in a tight loop while advancing time rapidly to verify
105149-	 * the ticker triggers purging.
105150-	 */
105151-	size_t large;
105152-	size_t sz = sizeof(size_t);
105153-	expect_d_eq(mallctl("arenas.lextent.0.size", (void *)&large, &sz, NULL,
105154-	    0), 0, "Unexpected mallctl failure");
105155-
105156-	do_purge(arena_ind);
105157-	uint64_t dirty_npurge0 = get_arena_dirty_npurge(arena_ind);
105158-	uint64_t muzzy_npurge0 = get_arena_muzzy_npurge(arena_ind);
105159-
105160-	for (unsigned i = 0; i < NPS; i++) {
105161-		ps[i] = do_mallocx(large, flags);
105162-	}
105163-
105164-	nupdates_mock = 0;
105165-	nstime_init_update(&time_mock);
105166-	monotonic_mock = true;
105167-
105168-	nstime_monotonic_orig = nstime_monotonic;
105169-	nstime_update_orig = nstime_update;
105170-	nstime_monotonic = nstime_monotonic_mock;
105171-	nstime_update = nstime_update_mock;
105172-
105173-	for (unsigned i = 0; i < NPS; i += 2) {
105174-		dallocx(ps[i], flags);
105175-		unsigned nupdates0 = nupdates_mock;
105176-		do_decay(arena_ind);
105177-		expect_u_gt(nupdates_mock, nupdates0,
105178-		    "Expected nstime_update() to be called");
105179-	}
105180-
105181-	decay_ticker_helper(arena_ind, flags, true, ddt, dirty_npurge0,
105182-	    muzzy_npurge0, true);
105183-	decay_ticker_helper(arena_ind, flags, false, ddt+mdt, dirty_npurge0,
105184-	    muzzy_npurge0, false);
105185-
105186-	do_arena_destroy(arena_ind);
105187-
105188-	nstime_monotonic = nstime_monotonic_orig;
105189-	nstime_update = nstime_update_orig;
105190-#undef NPS
105191-}
105192-TEST_END
105193-
105194-TEST_BEGIN(test_decay_nonmonotonic) {
105195-	test_skip_if(is_background_thread_enabled());
105196-	test_skip_if(opt_hpa);
105197-#define NPS (SMOOTHSTEP_NSTEPS + 1)
105198-	int flags = (MALLOCX_ARENA(0) | MALLOCX_TCACHE_NONE);
105199-	void *ps[NPS];
105200-	uint64_t npurge0 = 0;
105201-	uint64_t npurge1 = 0;
105202-	size_t sz, large0;
105203-	unsigned i, nupdates0;
105204-
105205-	sz = sizeof(size_t);
105206-	expect_d_eq(mallctl("arenas.lextent.0.size", (void *)&large0, &sz, NULL,
105207-	    0), 0, "Unexpected mallctl failure");
105208-
105209-	expect_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0,
105210-	    "Unexpected mallctl failure");
105211-	do_epoch();
105212-	sz = sizeof(uint64_t);
105213-	npurge0 = get_arena_npurge(0);
105214-
105215-	nupdates_mock = 0;
105216-	nstime_init_update(&time_mock);
105217-	monotonic_mock = false;
105218-
105219-	nstime_monotonic_orig = nstime_monotonic;
105220-	nstime_update_orig = nstime_update;
105221-	nstime_monotonic = nstime_monotonic_mock;
105222-	nstime_update = nstime_update_mock;
105223-
105224-	for (i = 0; i < NPS; i++) {
105225-		ps[i] = mallocx(large0, flags);
105226-		expect_ptr_not_null(ps[i], "Unexpected mallocx() failure");
105227-	}
105228-
105229-	for (i = 0; i < NPS; i++) {
105230-		dallocx(ps[i], flags);
105231-		nupdates0 = nupdates_mock;
105232-		expect_d_eq(mallctl("arena.0.decay", NULL, NULL, NULL, 0), 0,
105233-		    "Unexpected arena.0.decay failure");
105234-		expect_u_gt(nupdates_mock, nupdates0,
105235-		    "Expected nstime_update() to be called");
105236-	}
105237-
105238-	do_epoch();
105239-	sz = sizeof(uint64_t);
105240-	npurge1 = get_arena_npurge(0);
105241-
105242-	if (config_stats) {
105243-		expect_u64_eq(npurge0, npurge1, "Unexpected purging occurred");
105244-	}
105245-
105246-	nstime_monotonic = nstime_monotonic_orig;
105247-	nstime_update = nstime_update_orig;
105248-#undef NPS
105249-}
105250-TEST_END
105251-
105252-TEST_BEGIN(test_decay_now) {
105253-	test_skip_if(is_background_thread_enabled());
105254-	test_skip_if(opt_hpa);
105255-
105256-	unsigned arena_ind = do_arena_create(0, 0);
105257-	expect_zu_eq(get_arena_pdirty(arena_ind), 0, "Unexpected dirty pages");
105258-	expect_zu_eq(get_arena_pmuzzy(arena_ind), 0, "Unexpected muzzy pages");
105259-	size_t sizes[] = {16, PAGE<<2, HUGEPAGE<<2};
105260-	/* Verify that dirty/muzzy pages never linger after deallocation. */
105261-	for (unsigned i = 0; i < sizeof(sizes)/sizeof(size_t); i++) {
105262-		size_t size = sizes[i];
105263-		generate_dirty(arena_ind, size);
105264-		expect_zu_eq(get_arena_pdirty(arena_ind), 0,
105265-		    "Unexpected dirty pages");
105266-		expect_zu_eq(get_arena_pmuzzy(arena_ind), 0,
105267-		    "Unexpected muzzy pages");
105268-	}
105269-	do_arena_destroy(arena_ind);
105270-}
105271-TEST_END
105272-
105273-TEST_BEGIN(test_decay_never) {
105274-	test_skip_if(is_background_thread_enabled() || !config_stats);
105275-	test_skip_if(opt_hpa);
105276-
105277-	unsigned arena_ind = do_arena_create(-1, -1);
105278-	int flags = MALLOCX_ARENA(arena_ind) | MALLOCX_TCACHE_NONE;
105279-	expect_zu_eq(get_arena_pdirty(arena_ind), 0, "Unexpected dirty pages");
105280-	expect_zu_eq(get_arena_pmuzzy(arena_ind), 0, "Unexpected muzzy pages");
105281-	size_t sizes[] = {16, PAGE<<2, HUGEPAGE<<2};
105282-	void *ptrs[sizeof(sizes)/sizeof(size_t)];
105283-	for (unsigned i = 0; i < sizeof(sizes)/sizeof(size_t); i++) {
105284-		ptrs[i] = do_mallocx(sizes[i], flags);
105285-	}
105286-	/* Verify that each deallocation generates additional dirty pages. */
105287-	size_t pdirty_prev = get_arena_pdirty(arena_ind);
105288-	size_t pmuzzy_prev = get_arena_pmuzzy(arena_ind);
105289-	expect_zu_eq(pdirty_prev, 0, "Unexpected dirty pages");
105290-	expect_zu_eq(pmuzzy_prev, 0, "Unexpected muzzy pages");
105291-	for (unsigned i = 0; i < sizeof(sizes)/sizeof(size_t); i++) {
105292-		dallocx(ptrs[i], flags);
105293-		size_t pdirty = get_arena_pdirty(arena_ind);
105294-		size_t pmuzzy = get_arena_pmuzzy(arena_ind);
105295-		expect_zu_gt(pdirty + (size_t)get_arena_dirty_purged(arena_ind),
105296-		    pdirty_prev, "Expected dirty pages to increase.");
105297-		expect_zu_eq(pmuzzy, 0, "Unexpected muzzy pages");
105298-		pdirty_prev = pdirty;
105299-	}
105300-	do_arena_destroy(arena_ind);
105301-}
105302-TEST_END
105303-
105304-int
105305-main(void) {
105306-	return test(
105307-	    test_decay_ticks,
105308-	    test_decay_ticker,
105309-	    test_decay_nonmonotonic,
105310-	    test_decay_now,
105311-	    test_decay_never);
105312-}
105313diff --git a/jemalloc/test/unit/arena_decay.sh b/jemalloc/test/unit/arena_decay.sh
105314deleted file mode 100644
105315index 52f1b20..0000000
105316--- a/jemalloc/test/unit/arena_decay.sh
105317+++ /dev/null
105318@@ -1,3 +0,0 @@
105319-#!/bin/sh
105320-
105321-export MALLOC_CONF="dirty_decay_ms:1000,muzzy_decay_ms:1000,tcache_max:1024"
105322diff --git a/jemalloc/test/unit/arena_reset.c b/jemalloc/test/unit/arena_reset.c
105323deleted file mode 100644
105324index 8ef0786..0000000
105325--- a/jemalloc/test/unit/arena_reset.c
105326+++ /dev/null
105327@@ -1,361 +0,0 @@
105328-#ifndef ARENA_RESET_PROF_C_
105329-#include "test/jemalloc_test.h"
105330-#endif
105331-
105332-#include "jemalloc/internal/extent_mmap.h"
105333-#include "jemalloc/internal/rtree.h"
105334-
105335-#include "test/extent_hooks.h"
105336-
105337-static unsigned
105338-get_nsizes_impl(const char *cmd) {
105339-	unsigned ret;
105340-	size_t z;
105341-
105342-	z = sizeof(unsigned);
105343-	expect_d_eq(mallctl(cmd, (void *)&ret, &z, NULL, 0), 0,
105344-	    "Unexpected mallctl(\"%s\", ...) failure", cmd);
105345-
105346-	return ret;
105347-}
105348-
105349-static unsigned
105350-get_nsmall(void) {
105351-	return get_nsizes_impl("arenas.nbins");
105352-}
105353-
105354-static unsigned
105355-get_nlarge(void) {
105356-	return get_nsizes_impl("arenas.nlextents");
105357-}
105358-
105359-static size_t
105360-get_size_impl(const char *cmd, size_t ind) {
105361-	size_t ret;
105362-	size_t z;
105363-	size_t mib[4];
105364-	size_t miblen = 4;
105365-
105366-	z = sizeof(size_t);
105367-	expect_d_eq(mallctlnametomib(cmd, mib, &miblen),
105368-	    0, "Unexpected mallctlnametomib(\"%s\", ...) failure", cmd);
105369-	mib[2] = ind;
105370-	z = sizeof(size_t);
105371-	expect_d_eq(mallctlbymib(mib, miblen, (void *)&ret, &z, NULL, 0),
105372-	    0, "Unexpected mallctlbymib([\"%s\", %zu], ...) failure", cmd, ind);
105373-
105374-	return ret;
105375-}
105376-
105377-static size_t
105378-get_small_size(size_t ind) {
105379-	return get_size_impl("arenas.bin.0.size", ind);
105380-}
105381-
105382-static size_t
105383-get_large_size(size_t ind) {
105384-	return get_size_impl("arenas.lextent.0.size", ind);
105385-}
105386-
105387-/* Like ivsalloc(), but safe to call on discarded allocations. */
105388-static size_t
105389-vsalloc(tsdn_t *tsdn, const void *ptr) {
105390-	emap_full_alloc_ctx_t full_alloc_ctx;
105391-	bool missing = emap_full_alloc_ctx_try_lookup(tsdn, &arena_emap_global,
105392-	    ptr, &full_alloc_ctx);
105393-	if (missing) {
105394-		return 0;
105395-	}
105396-
105397-	if (full_alloc_ctx.edata == NULL) {
105398-		return 0;
105399-	}
105400-	if (edata_state_get(full_alloc_ctx.edata) != extent_state_active) {
105401-		return 0;
105402-	}
105403-
105404-	if (full_alloc_ctx.szind == SC_NSIZES) {
105405-		return 0;
105406-	}
105407-
105408-	return sz_index2size(full_alloc_ctx.szind);
105409-}
105410-
105411-static unsigned
105412-do_arena_create(extent_hooks_t *h) {
105413-	unsigned arena_ind;
105414-	size_t sz = sizeof(unsigned);
105415-	expect_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz,
105416-	    (void *)(h != NULL ? &h : NULL), (h != NULL ? sizeof(h) : 0)), 0,
105417-	    "Unexpected mallctl() failure");
105418-	return arena_ind;
105419-}
105420-
105421-static void
105422-do_arena_reset_pre(unsigned arena_ind, void ***ptrs, unsigned *nptrs) {
105423-#define NLARGE	32
105424-	unsigned nsmall, nlarge, i;
105425-	size_t sz;
105426-	int flags;
105427-	tsdn_t *tsdn;
105428-
105429-	flags = MALLOCX_ARENA(arena_ind) | MALLOCX_TCACHE_NONE;
105430-
105431-	nsmall = get_nsmall();
105432-	nlarge = get_nlarge() > NLARGE ? NLARGE : get_nlarge();
105433-	*nptrs = nsmall + nlarge;
105434-	*ptrs = (void **)malloc(*nptrs * sizeof(void *));
105435-	expect_ptr_not_null(*ptrs, "Unexpected malloc() failure");
105436-
105437-	/* Allocate objects with a wide range of sizes. */
105438-	for (i = 0; i < nsmall; i++) {
105439-		sz = get_small_size(i);
105440-		(*ptrs)[i] = mallocx(sz, flags);
105441-		expect_ptr_not_null((*ptrs)[i],
105442-		    "Unexpected mallocx(%zu, %#x) failure", sz, flags);
105443-	}
105444-	for (i = 0; i < nlarge; i++) {
105445-		sz = get_large_size(i);
105446-		(*ptrs)[nsmall + i] = mallocx(sz, flags);
105447-		expect_ptr_not_null((*ptrs)[i],
105448-		    "Unexpected mallocx(%zu, %#x) failure", sz, flags);
105449-	}
105450-
105451-	tsdn = tsdn_fetch();
105452-
105453-	/* Verify allocations. */
105454-	for (i = 0; i < *nptrs; i++) {
105455-		expect_zu_gt(ivsalloc(tsdn, (*ptrs)[i]), 0,
105456-		    "Allocation should have queryable size");
105457-	}
105458-}
105459-
105460-static void
105461-do_arena_reset_post(void **ptrs, unsigned nptrs, unsigned arena_ind) {
105462-	tsdn_t *tsdn;
105463-	unsigned i;
105464-
105465-	tsdn = tsdn_fetch();
105466-
105467-	if (have_background_thread) {
105468-		malloc_mutex_lock(tsdn,
105469-		    &background_thread_info_get(arena_ind)->mtx);
105470-	}
105471-	/* Verify allocations no longer exist. */
105472-	for (i = 0; i < nptrs; i++) {
105473-		expect_zu_eq(vsalloc(tsdn, ptrs[i]), 0,
105474-		    "Allocation should no longer exist");
105475-	}
105476-	if (have_background_thread) {
105477-		malloc_mutex_unlock(tsdn,
105478-		    &background_thread_info_get(arena_ind)->mtx);
105479-	}
105480-
105481-	free(ptrs);
105482-}
105483-
105484-static void
105485-do_arena_reset_destroy(const char *name, unsigned arena_ind) {
105486-	size_t mib[3];
105487-	size_t miblen;
105488-
105489-	miblen = sizeof(mib)/sizeof(size_t);
105490-	expect_d_eq(mallctlnametomib(name, mib, &miblen), 0,
105491-	    "Unexpected mallctlnametomib() failure");
105492-	mib[1] = (size_t)arena_ind;
105493-	expect_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0,
105494-	    "Unexpected mallctlbymib() failure");
105495-}
105496-
105497-static void
105498-do_arena_reset(unsigned arena_ind) {
105499-	do_arena_reset_destroy("arena.0.reset", arena_ind);
105500-}
105501-
105502-static void
105503-do_arena_destroy(unsigned arena_ind) {
105504-	do_arena_reset_destroy("arena.0.destroy", arena_ind);
105505-}
105506-
105507-TEST_BEGIN(test_arena_reset) {
105508-	unsigned arena_ind;
105509-	void **ptrs;
105510-	unsigned nptrs;
105511-
105512-	arena_ind = do_arena_create(NULL);
105513-	do_arena_reset_pre(arena_ind, &ptrs, &nptrs);
105514-	do_arena_reset(arena_ind);
105515-	do_arena_reset_post(ptrs, nptrs, arena_ind);
105516-}
105517-TEST_END
105518-
105519-static bool
105520-arena_i_initialized(unsigned arena_ind, bool refresh) {
105521-	bool initialized;
105522-	size_t mib[3];
105523-	size_t miblen, sz;
105524-
105525-	if (refresh) {
105526-		uint64_t epoch = 1;
105527-		expect_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch,
105528-		    sizeof(epoch)), 0, "Unexpected mallctl() failure");
105529-	}
105530-
105531-	miblen = sizeof(mib)/sizeof(size_t);
105532-	expect_d_eq(mallctlnametomib("arena.0.initialized", mib, &miblen), 0,
105533-	    "Unexpected mallctlnametomib() failure");
105534-	mib[1] = (size_t)arena_ind;
105535-	sz = sizeof(initialized);
105536-	expect_d_eq(mallctlbymib(mib, miblen, (void *)&initialized, &sz, NULL,
105537-	    0), 0, "Unexpected mallctlbymib() failure");
105538-
105539-	return initialized;
105540-}
105541-
105542-TEST_BEGIN(test_arena_destroy_initial) {
105543-	expect_false(arena_i_initialized(MALLCTL_ARENAS_DESTROYED, false),
105544-	    "Destroyed arena stats should not be initialized");
105545-}
105546-TEST_END
105547-
105548-TEST_BEGIN(test_arena_destroy_hooks_default) {
105549-	unsigned arena_ind, arena_ind_another, arena_ind_prev;
105550-	void **ptrs;
105551-	unsigned nptrs;
105552-
105553-	arena_ind = do_arena_create(NULL);
105554-	do_arena_reset_pre(arena_ind, &ptrs, &nptrs);
105555-
105556-	expect_false(arena_i_initialized(arena_ind, false),
105557-	    "Arena stats should not be initialized");
105558-	expect_true(arena_i_initialized(arena_ind, true),
105559-	    "Arena stats should be initialized");
105560-
105561-	/*
105562-	 * Create another arena before destroying one, to better verify arena
105563-	 * index reuse.
105564-	 */
105565-	arena_ind_another = do_arena_create(NULL);
105566-
105567-	do_arena_destroy(arena_ind);
105568-
105569-	expect_false(arena_i_initialized(arena_ind, true),
105570-	    "Arena stats should not be initialized");
105571-	expect_true(arena_i_initialized(MALLCTL_ARENAS_DESTROYED, false),
105572-	    "Destroyed arena stats should be initialized");
105573-
105574-	do_arena_reset_post(ptrs, nptrs, arena_ind);
105575-
105576-	arena_ind_prev = arena_ind;
105577-	arena_ind = do_arena_create(NULL);
105578-	do_arena_reset_pre(arena_ind, &ptrs, &nptrs);
105579-	expect_u_eq(arena_ind, arena_ind_prev,
105580-	    "Arena index should have been recycled");
105581-	do_arena_destroy(arena_ind);
105582-	do_arena_reset_post(ptrs, nptrs, arena_ind);
105583-
105584-	do_arena_destroy(arena_ind_another);
105585-
105586-	/* Try arena.create with custom hooks. */
105587-	size_t sz = sizeof(extent_hooks_t *);
105588-	extent_hooks_t *a0_default_hooks;
105589-	expect_d_eq(mallctl("arena.0.extent_hooks", (void *)&a0_default_hooks,
105590-	    &sz, NULL, 0), 0, "Unexpected mallctlnametomib() failure");
105591-
105592-	/* Default impl; but wrapped as "customized". */
105593-	extent_hooks_t new_hooks = *a0_default_hooks;
105594-	extent_hooks_t *hook = &new_hooks;
105595-	sz = sizeof(unsigned);
105596-	expect_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz,
105597-	    (void *)&hook, sizeof(void *)), 0,
105598-	    "Unexpected mallctl() failure");
105599-	do_arena_destroy(arena_ind);
105600-}
105601-TEST_END
105602-
105603-/*
105604- * Actually unmap extents, regardless of opt_retain, so that attempts to access
105605- * a destroyed arena's memory will segfault.
105606- */
105607-static bool
105608-extent_dalloc_unmap(extent_hooks_t *extent_hooks, void *addr, size_t size,
105609-    bool committed, unsigned arena_ind) {
105610-	TRACE_HOOK("%s(extent_hooks=%p, addr=%p, size=%zu, committed=%s, "
105611-	    "arena_ind=%u)\n", __func__, extent_hooks, addr, size, committed ?
105612-	    "true" : "false", arena_ind);
105613-	expect_ptr_eq(extent_hooks, &hooks,
105614-	    "extent_hooks should be same as pointer used to set hooks");
105615-	expect_ptr_eq(extent_hooks->dalloc, extent_dalloc_unmap,
105616-	    "Wrong hook function");
105617-	called_dalloc = true;
105618-	if (!try_dalloc) {
105619-		return true;
105620-	}
105621-	did_dalloc = true;
105622-	if (!maps_coalesce && opt_retain) {
105623-		return true;
105624-	}
105625-	pages_unmap(addr, size);
105626-	return false;
105627-}
105628-
105629-static extent_hooks_t hooks_orig;
105630-
105631-static extent_hooks_t hooks_unmap = {
105632-	extent_alloc_hook,
105633-	extent_dalloc_unmap, /* dalloc */
105634-	extent_destroy_hook,
105635-	extent_commit_hook,
105636-	extent_decommit_hook,
105637-	extent_purge_lazy_hook,
105638-	extent_purge_forced_hook,
105639-	extent_split_hook,
105640-	extent_merge_hook
105641-};
105642-
105643-TEST_BEGIN(test_arena_destroy_hooks_unmap) {
105644-	unsigned arena_ind;
105645-	void **ptrs;
105646-	unsigned nptrs;
105647-
105648-	extent_hooks_prep();
105649-	if (maps_coalesce) {
105650-		try_decommit = false;
105651-	}
105652-	memcpy(&hooks_orig, &hooks, sizeof(extent_hooks_t));
105653-	memcpy(&hooks, &hooks_unmap, sizeof(extent_hooks_t));
105654-
105655-	did_alloc = false;
105656-	arena_ind = do_arena_create(&hooks);
105657-	do_arena_reset_pre(arena_ind, &ptrs, &nptrs);
105658-
105659-	expect_true(did_alloc, "Expected alloc");
105660-
105661-	expect_false(arena_i_initialized(arena_ind, false),
105662-	    "Arena stats should not be initialized");
105663-	expect_true(arena_i_initialized(arena_ind, true),
105664-	    "Arena stats should be initialized");
105665-
105666-	did_dalloc = false;
105667-	do_arena_destroy(arena_ind);
105668-	expect_true(did_dalloc, "Expected dalloc");
105669-
105670-	expect_false(arena_i_initialized(arena_ind, true),
105671-	    "Arena stats should not be initialized");
105672-	expect_true(arena_i_initialized(MALLCTL_ARENAS_DESTROYED, false),
105673-	    "Destroyed arena stats should be initialized");
105674-
105675-	do_arena_reset_post(ptrs, nptrs, arena_ind);
105676-
105677-	memcpy(&hooks, &hooks_orig, sizeof(extent_hooks_t));
105678-}
105679-TEST_END
105680-
105681-int
105682-main(void) {
105683-	return test(
105684-	    test_arena_reset,
105685-	    test_arena_destroy_initial,
105686-	    test_arena_destroy_hooks_default,
105687-	    test_arena_destroy_hooks_unmap);
105688-}
105689diff --git a/jemalloc/test/unit/arena_reset_prof.c b/jemalloc/test/unit/arena_reset_prof.c
105690deleted file mode 100644
105691index 38d8012..0000000
105692--- a/jemalloc/test/unit/arena_reset_prof.c
105693+++ /dev/null
105694@@ -1,4 +0,0 @@
105695-#include "test/jemalloc_test.h"
105696-#define ARENA_RESET_PROF_C_
105697-
105698-#include "arena_reset.c"
105699diff --git a/jemalloc/test/unit/arena_reset_prof.sh b/jemalloc/test/unit/arena_reset_prof.sh
105700deleted file mode 100644
105701index 041dc1c..0000000
105702--- a/jemalloc/test/unit/arena_reset_prof.sh
105703+++ /dev/null
105704@@ -1,3 +0,0 @@
105705-#!/bin/sh
105706-
105707-export MALLOC_CONF="prof:true,lg_prof_sample:0"
105708diff --git a/jemalloc/test/unit/atomic.c b/jemalloc/test/unit/atomic.c
105709deleted file mode 100644
105710index c2ec8c7..0000000
105711--- a/jemalloc/test/unit/atomic.c
105712+++ /dev/null
105713@@ -1,229 +0,0 @@
105714-#include "test/jemalloc_test.h"
105715-
105716-/*
105717- * We *almost* have consistent short names (e.g. "u32" for uint32_t, "b" for
105718- * bool, etc.  The one exception is that the short name for void * is "p" in
105719- * some places and "ptr" in others.  In the long run it would be nice to unify
105720- * these, but in the short run we'll use this shim.
105721- */
105722-#define expect_p_eq expect_ptr_eq
105723-
105724-/*
105725- * t: the non-atomic type, like "uint32_t".
105726- * ta: the short name for the type, like "u32".
105727- * val[1,2,3]: Values of the given type.  The CAS tests use val2 for expected,
105728- * and val3 for desired.
105729- */
105730-
105731-#define DO_TESTS(t, ta, val1, val2, val3) do {				\
105732-	t val;								\
105733-	t expected;							\
105734-	bool success;							\
105735-	/* This (along with the load below) also tests ATOMIC_LOAD. */	\
105736-	atomic_##ta##_t atom = ATOMIC_INIT(val1);			\
105737-									\
105738-	/* ATOMIC_INIT and load. */					\
105739-	val = atomic_load_##ta(&atom, ATOMIC_RELAXED);			\
105740-	expect_##ta##_eq(val1, val, "Load or init failed");		\
105741-									\
105742-	/* Store. */							\
105743-	atomic_store_##ta(&atom, val1, ATOMIC_RELAXED);			\
105744-	atomic_store_##ta(&atom, val2, ATOMIC_RELAXED);			\
105745-	val = atomic_load_##ta(&atom, ATOMIC_RELAXED);			\
105746-	expect_##ta##_eq(val2, val, "Store failed");			\
105747-									\
105748-	/* Exchange. */							\
105749-	atomic_store_##ta(&atom, val1, ATOMIC_RELAXED);			\
105750-	val = atomic_exchange_##ta(&atom, val2, ATOMIC_RELAXED);	\
105751-	expect_##ta##_eq(val1, val, "Exchange returned invalid value");	\
105752-	val = atomic_load_##ta(&atom, ATOMIC_RELAXED);			\
105753-	expect_##ta##_eq(val2, val, "Exchange store invalid value");	\
105754-									\
105755-	/* 								\
105756-	 * Weak CAS.  Spurious failures are allowed, so we loop a few	\
105757-	 * times.							\
105758-	 */								\
105759-	atomic_store_##ta(&atom, val1, ATOMIC_RELAXED);			\
105760-	success = false;						\
105761-	for (int retry = 0; retry < 10 && !success; retry++) {		\
105762-		expected = val2;					\
105763-		success = atomic_compare_exchange_weak_##ta(&atom,	\
105764-		    &expected, val3, ATOMIC_RELAXED, ATOMIC_RELAXED);	\
105765-		expect_##ta##_eq(val1, expected, 			\
105766-		    "CAS should update expected");			\
105767-	}								\
105768-	expect_b_eq(val1 == val2, success,				\
105769-	    "Weak CAS did the wrong state update");			\
105770-	val = atomic_load_##ta(&atom, ATOMIC_RELAXED);			\
105771-	if (success) {							\
105772-		expect_##ta##_eq(val3, val,				\
105773-		    "Successful CAS should update atomic");		\
105774-	} else {							\
105775-		expect_##ta##_eq(val1, val,				\
105776-		    "Unsuccessful CAS should not update atomic");	\
105777-	}								\
105778-									\
105779-	/* Strong CAS. */						\
105780-	atomic_store_##ta(&atom, val1, ATOMIC_RELAXED);			\
105781-	expected = val2;						\
105782-	success = atomic_compare_exchange_strong_##ta(&atom, &expected,	\
105783-	    val3, ATOMIC_RELAXED, ATOMIC_RELAXED);			\
105784-	expect_b_eq(val1 == val2, success,				\
105785-	    "Strong CAS did the wrong state update");			\
105786-	val = atomic_load_##ta(&atom, ATOMIC_RELAXED);			\
105787-	if (success) {							\
105788-		expect_##ta##_eq(val3, val,				\
105789-		    "Successful CAS should update atomic");		\
105790-	} else {							\
105791-		expect_##ta##_eq(val1, val,				\
105792-		    "Unsuccessful CAS should not update atomic");	\
105793-	}								\
105794-									\
105795-									\
105796-} while (0)
105797-
105798-#define DO_INTEGER_TESTS(t, ta, val1, val2) do {			\
105799-	atomic_##ta##_t atom;						\
105800-	t val;								\
105801-									\
105802-	/* Fetch-add. */						\
105803-	atomic_store_##ta(&atom, val1, ATOMIC_RELAXED);			\
105804-	val = atomic_fetch_add_##ta(&atom, val2, ATOMIC_RELAXED);	\
105805-	expect_##ta##_eq(val1, val,					\
105806-	    "Fetch-add should return previous value");			\
105807-	val = atomic_load_##ta(&atom, ATOMIC_RELAXED);			\
105808-	expect_##ta##_eq(val1 + val2, val,				\
105809-	    "Fetch-add should update atomic");				\
105810-									\
105811-	/* Fetch-sub. */						\
105812-	atomic_store_##ta(&atom, val1, ATOMIC_RELAXED);			\
105813-	val = atomic_fetch_sub_##ta(&atom, val2, ATOMIC_RELAXED);	\
105814-	expect_##ta##_eq(val1, val,					\
105815-	    "Fetch-sub should return previous value");			\
105816-	val = atomic_load_##ta(&atom, ATOMIC_RELAXED);			\
105817-	expect_##ta##_eq(val1 - val2, val,				\
105818-	    "Fetch-sub should update atomic");				\
105819-									\
105820-	/* Fetch-and. */						\
105821-	atomic_store_##ta(&atom, val1, ATOMIC_RELAXED);			\
105822-	val = atomic_fetch_and_##ta(&atom, val2, ATOMIC_RELAXED);	\
105823-	expect_##ta##_eq(val1, val,					\
105824-	    "Fetch-and should return previous value");			\
105825-	val = atomic_load_##ta(&atom, ATOMIC_RELAXED);			\
105826-	expect_##ta##_eq(val1 & val2, val,				\
105827-	    "Fetch-and should update atomic");				\
105828-									\
105829-	/* Fetch-or. */							\
105830-	atomic_store_##ta(&atom, val1, ATOMIC_RELAXED);			\
105831-	val = atomic_fetch_or_##ta(&atom, val2, ATOMIC_RELAXED);	\
105832-	expect_##ta##_eq(val1, val,					\
105833-	    "Fetch-or should return previous value");			\
105834-	val = atomic_load_##ta(&atom, ATOMIC_RELAXED);			\
105835-	expect_##ta##_eq(val1 | val2, val,				\
105836-	    "Fetch-or should update atomic");				\
105837-									\
105838-	/* Fetch-xor. */						\
105839-	atomic_store_##ta(&atom, val1, ATOMIC_RELAXED);			\
105840-	val = atomic_fetch_xor_##ta(&atom, val2, ATOMIC_RELAXED);	\
105841-	expect_##ta##_eq(val1, val,					\
105842-	    "Fetch-xor should return previous value");			\
105843-	val = atomic_load_##ta(&atom, ATOMIC_RELAXED);			\
105844-	expect_##ta##_eq(val1 ^ val2, val,				\
105845-	    "Fetch-xor should update atomic");				\
105846-} while (0)
105847-
105848-#define TEST_STRUCT(t, ta)						\
105849-typedef struct {							\
105850-	t val1;								\
105851-	t val2;								\
105852-	t val3;								\
105853-} ta##_test_t;
105854-
105855-#define TEST_CASES(t) {							\
105856-	{(t)-1, (t)-1, (t)-2},						\
105857-	{(t)-1, (t) 0, (t)-2},						\
105858-	{(t)-1, (t) 1, (t)-2},						\
105859-									\
105860-	{(t) 0, (t)-1, (t)-2},						\
105861-	{(t) 0, (t) 0, (t)-2},						\
105862-	{(t) 0, (t) 1, (t)-2},						\
105863-									\
105864-	{(t) 1, (t)-1, (t)-2},						\
105865-	{(t) 1, (t) 0, (t)-2},						\
105866-	{(t) 1, (t) 1, (t)-2},						\
105867-									\
105868-	{(t)0, (t)-(1 << 22), (t)-2},					\
105869-	{(t)0, (t)(1 << 22), (t)-2},					\
105870-	{(t)(1 << 22), (t)-(1 << 22), (t)-2},				\
105871-	{(t)(1 << 22), (t)(1 << 22), (t)-2}				\
105872-}
105873-
105874-#define TEST_BODY(t, ta) do {						\
105875-	const ta##_test_t tests[] = TEST_CASES(t);			\
105876-	for (unsigned i = 0; i < sizeof(tests)/sizeof(tests[0]); i++) {	\
105877-		ta##_test_t test = tests[i];				\
105878-		DO_TESTS(t, ta, test.val1, test.val2, test.val3);	\
105879-	}								\
105880-} while (0)
105881-
105882-#define INTEGER_TEST_BODY(t, ta) do {					\
105883-	const ta##_test_t tests[] = TEST_CASES(t);			\
105884-	for (unsigned i = 0; i < sizeof(tests)/sizeof(tests[0]); i++) {	\
105885-		ta##_test_t test = tests[i];				\
105886-		DO_TESTS(t, ta, test.val1, test.val2, test.val3);	\
105887-		DO_INTEGER_TESTS(t, ta, test.val1, test.val2);		\
105888-	}								\
105889-} while (0)
105890-
105891-TEST_STRUCT(uint64_t, u64);
105892-TEST_BEGIN(test_atomic_u64) {
105893-#if !(LG_SIZEOF_PTR == 3 || LG_SIZEOF_INT == 3)
105894-	test_skip("64-bit atomic operations not supported");
105895-#else
105896-	INTEGER_TEST_BODY(uint64_t, u64);
105897-#endif
105898-}
105899-TEST_END
105900-
105901-
105902-TEST_STRUCT(uint32_t, u32);
105903-TEST_BEGIN(test_atomic_u32) {
105904-	INTEGER_TEST_BODY(uint32_t, u32);
105905-}
105906-TEST_END
105907-
105908-TEST_STRUCT(void *, p);
105909-TEST_BEGIN(test_atomic_p) {
105910-	TEST_BODY(void *, p);
105911-}
105912-TEST_END
105913-
105914-TEST_STRUCT(size_t, zu);
105915-TEST_BEGIN(test_atomic_zu) {
105916-	INTEGER_TEST_BODY(size_t, zu);
105917-}
105918-TEST_END
105919-
105920-TEST_STRUCT(ssize_t, zd);
105921-TEST_BEGIN(test_atomic_zd) {
105922-	INTEGER_TEST_BODY(ssize_t, zd);
105923-}
105924-TEST_END
105925-
105926-
105927-TEST_STRUCT(unsigned, u);
105928-TEST_BEGIN(test_atomic_u) {
105929-	INTEGER_TEST_BODY(unsigned, u);
105930-}
105931-TEST_END
105932-
105933-int
105934-main(void) {
105935-	return test(
105936-	    test_atomic_u64,
105937-	    test_atomic_u32,
105938-	    test_atomic_p,
105939-	    test_atomic_zu,
105940-	    test_atomic_zd,
105941-	    test_atomic_u);
105942-}
105943diff --git a/jemalloc/test/unit/background_thread.c b/jemalloc/test/unit/background_thread.c
105944deleted file mode 100644
105945index c60010a..0000000
105946--- a/jemalloc/test/unit/background_thread.c
105947+++ /dev/null
105948@@ -1,118 +0,0 @@
105949-#include "test/jemalloc_test.h"
105950-
105951-#include "jemalloc/internal/util.h"
105952-
105953-static void
105954-test_switch_background_thread_ctl(bool new_val) {
105955-	bool e0, e1;
105956-	size_t sz = sizeof(bool);
105957-
105958-	e1 = new_val;
105959-	expect_d_eq(mallctl("background_thread", (void *)&e0, &sz,
105960-	    &e1, sz), 0, "Unexpected mallctl() failure");
105961-	expect_b_eq(e0, !e1,
105962-	    "background_thread should be %d before.\n", !e1);
105963-	if (e1) {
105964-		expect_zu_gt(n_background_threads, 0,
105965-		    "Number of background threads should be non zero.\n");
105966-	} else {
105967-		expect_zu_eq(n_background_threads, 0,
105968-		    "Number of background threads should be zero.\n");
105969-	}
105970-}
105971-
105972-static void
105973-test_repeat_background_thread_ctl(bool before) {
105974-	bool e0, e1;
105975-	size_t sz = sizeof(bool);
105976-
105977-	e1 = before;
105978-	expect_d_eq(mallctl("background_thread", (void *)&e0, &sz,
105979-	    &e1, sz), 0, "Unexpected mallctl() failure");
105980-	expect_b_eq(e0, before,
105981-	    "background_thread should be %d.\n", before);
105982-	if (e1) {
105983-		expect_zu_gt(n_background_threads, 0,
105984-		    "Number of background threads should be non zero.\n");
105985-	} else {
105986-		expect_zu_eq(n_background_threads, 0,
105987-		    "Number of background threads should be zero.\n");
105988-	}
105989-}
105990-
105991-TEST_BEGIN(test_background_thread_ctl) {
105992-	test_skip_if(!have_background_thread);
105993-
105994-	bool e0, e1;
105995-	size_t sz = sizeof(bool);
105996-
105997-	expect_d_eq(mallctl("opt.background_thread", (void *)&e0, &sz,
105998-	    NULL, 0), 0, "Unexpected mallctl() failure");
105999-	expect_d_eq(mallctl("background_thread", (void *)&e1, &sz,
106000-	    NULL, 0), 0, "Unexpected mallctl() failure");
106001-	expect_b_eq(e0, e1,
106002-	    "Default and opt.background_thread does not match.\n");
106003-	if (e0) {
106004-		test_switch_background_thread_ctl(false);
106005-	}
106006-	expect_zu_eq(n_background_threads, 0,
106007-	    "Number of background threads should be 0.\n");
106008-
106009-	for (unsigned i = 0; i < 4; i++) {
106010-		test_switch_background_thread_ctl(true);
106011-		test_repeat_background_thread_ctl(true);
106012-		test_repeat_background_thread_ctl(true);
106013-
106014-		test_switch_background_thread_ctl(false);
106015-		test_repeat_background_thread_ctl(false);
106016-		test_repeat_background_thread_ctl(false);
106017-	}
106018-}
106019-TEST_END
106020-
106021-TEST_BEGIN(test_background_thread_running) {
106022-	test_skip_if(!have_background_thread);
106023-	test_skip_if(!config_stats);
106024-
106025-#if defined(JEMALLOC_BACKGROUND_THREAD)
106026-	tsd_t *tsd = tsd_fetch();
106027-	background_thread_info_t *info = &background_thread_info[0];
106028-
106029-	test_repeat_background_thread_ctl(false);
106030-	test_switch_background_thread_ctl(true);
106031-	expect_b_eq(info->state, background_thread_started,
106032-	    "Background_thread did not start.\n");
106033-
106034-	nstime_t start;
106035-	nstime_init_update(&start);
106036-
106037-	bool ran = false;
106038-	while (true) {
106039-		malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
106040-		if (info->tot_n_runs > 0) {
106041-			ran = true;
106042-		}
106043-		malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
106044-		if (ran) {
106045-			break;
106046-		}
106047-
106048-		nstime_t now;
106049-		nstime_init_update(&now);
106050-		nstime_subtract(&now, &start);
106051-		expect_u64_lt(nstime_sec(&now), 1000,
106052-		    "Background threads did not run for 1000 seconds.");
106053-		sleep(1);
106054-	}
106055-	test_switch_background_thread_ctl(false);
106056-#endif
106057-}
106058-TEST_END
106059-
106060-int
106061-main(void) {
106062-	/* Background_thread creation tests reentrancy naturally. */
106063-	return test_no_reentrancy(
106064-	    test_background_thread_ctl,
106065-	    test_background_thread_running);
106066-}
106067diff --git a/jemalloc/test/unit/background_thread_enable.c b/jemalloc/test/unit/background_thread_enable.c
106068deleted file mode 100644
106069index 44034ac..0000000
106070--- a/jemalloc/test/unit/background_thread_enable.c
106071+++ /dev/null
106072@@ -1,96 +0,0 @@
106073-#include "test/jemalloc_test.h"
106074-
106075-const char *malloc_conf = "background_thread:false,narenas:1,max_background_threads:20";
106076-
106077-static unsigned
106078-max_test_narenas(void) {
106079-	/*
106080-	 * 10 here is somewhat arbitrary, except insofar as we want to ensure
106081-	 * that the number of background threads is smaller than the number of
106082-	 * arenas.  I'll ragequit long before we have to spin up 10 threads per
106083-	 * cpu to handle background purging, so this is a conservative
106084-	 * approximation.
106085-	 */
106086-	unsigned ret = 10 * ncpus;
106087-	/* Limit the max to avoid VM exhaustion on 32-bit . */
106088-	if (ret > 512) {
106089-		ret = 512;
106090-	}
106091-
106092-	return ret;
106093-}
106094-
106095-TEST_BEGIN(test_deferred) {
106096-	test_skip_if(!have_background_thread);
106097-
106098-	unsigned id;
106099-	size_t sz_u = sizeof(unsigned);
106100-
106101-	for (unsigned i = 0; i < max_test_narenas(); i++) {
106102-		expect_d_eq(mallctl("arenas.create", &id, &sz_u, NULL, 0), 0,
106103-		    "Failed to create arena");
106104-	}
106105-
106106-	bool enable = true;
106107-	size_t sz_b = sizeof(bool);
106108-	expect_d_eq(mallctl("background_thread", NULL, NULL, &enable, sz_b), 0,
106109-	    "Failed to enable background threads");
106110-	enable = false;
106111-	expect_d_eq(mallctl("background_thread", NULL, NULL, &enable, sz_b), 0,
106112-	    "Failed to disable background threads");
106113-}
106114-TEST_END
106115-
106116-TEST_BEGIN(test_max_background_threads) {
106117-	test_skip_if(!have_background_thread);
106118-
106119-	size_t max_n_thds;
106120-	size_t opt_max_n_thds;
106121-	size_t sz_m = sizeof(max_n_thds);
106122-	expect_d_eq(mallctl("opt.max_background_threads",
106123-	    &opt_max_n_thds, &sz_m, NULL, 0), 0,
106124-	    "Failed to get opt.max_background_threads");
106125-	expect_d_eq(mallctl("max_background_threads", &max_n_thds, &sz_m, NULL,
106126-	    0), 0, "Failed to get max background threads");
106127-	expect_zu_eq(opt_max_n_thds, max_n_thds,
106128-	    "max_background_threads and "
106129-	    "opt.max_background_threads should match");
106130-	expect_d_eq(mallctl("max_background_threads", NULL, NULL, &max_n_thds,
106131-	    sz_m), 0, "Failed to set max background threads");
106132-
106133-	unsigned id;
106134-	size_t sz_u = sizeof(unsigned);
106135-
106136-	for (unsigned i = 0; i < max_test_narenas(); i++) {
106137-		expect_d_eq(mallctl("arenas.create", &id, &sz_u, NULL, 0), 0,
106138-		    "Failed to create arena");
106139-	}
106140-
106141-	bool enable = true;
106142-	size_t sz_b = sizeof(bool);
106143-	expect_d_eq(mallctl("background_thread", NULL, NULL, &enable, sz_b), 0,
106144-	    "Failed to enable background threads");
106145-	expect_zu_eq(n_background_threads, max_n_thds,
106146-	    "Number of background threads should not change.\n");
106147-	size_t new_max_thds = max_n_thds - 1;
106148-	if (new_max_thds > 0) {
106149-		expect_d_eq(mallctl("max_background_threads", NULL, NULL,
106150-		    &new_max_thds, sz_m), 0,
106151-		    "Failed to set max background threads");
106152-		expect_zu_eq(n_background_threads, new_max_thds,
106153-		    "Number of background threads should decrease by 1.\n");
106154-	}
106155-	new_max_thds = 1;
106156-	expect_d_eq(mallctl("max_background_threads", NULL, NULL, &new_max_thds,
106157-	    sz_m), 0, "Failed to set max background threads");
106158-	expect_zu_eq(n_background_threads, new_max_thds,
106159-	    "Number of background threads should be 1.\n");
106160-}
106161-TEST_END
106162-
106163-int
106164-main(void) {
106165-	return test_no_reentrancy(
106166-		test_deferred,
106167-		test_max_background_threads);
106168-}
106169diff --git a/jemalloc/test/unit/base.c b/jemalloc/test/unit/base.c
106170deleted file mode 100644
106171index 15e04a8..0000000
106172--- a/jemalloc/test/unit/base.c
106173+++ /dev/null
106174@@ -1,265 +0,0 @@
106175-#include "test/jemalloc_test.h"
106176-
106177-#include "test/extent_hooks.h"
106178-
106179-static extent_hooks_t hooks_null = {
106180-	extent_alloc_hook,
106181-	NULL, /* dalloc */
106182-	NULL, /* destroy */
106183-	NULL, /* commit */
106184-	NULL, /* decommit */
106185-	NULL, /* purge_lazy */
106186-	NULL, /* purge_forced */
106187-	NULL, /* split */
106188-	NULL /* merge */
106189-};
106190-
106191-static extent_hooks_t hooks_not_null = {
106192-	extent_alloc_hook,
106193-	extent_dalloc_hook,
106194-	extent_destroy_hook,
106195-	NULL, /* commit */
106196-	extent_decommit_hook,
106197-	extent_purge_lazy_hook,
106198-	extent_purge_forced_hook,
106199-	NULL, /* split */
106200-	NULL /* merge */
106201-};
106202-
106203-TEST_BEGIN(test_base_hooks_default) {
106204-	base_t *base;
106205-	size_t allocated0, allocated1, resident, mapped, n_thp;
106206-
106207-	tsdn_t *tsdn = tsd_tsdn(tsd_fetch());
106208-	base = base_new(tsdn, 0,
106209-	    (extent_hooks_t *)&ehooks_default_extent_hooks,
106210-	    /* metadata_use_hooks */ true);
106211-
106212-	if (config_stats) {
106213-		base_stats_get(tsdn, base, &allocated0, &resident, &mapped,
106214-		    &n_thp);
106215-		expect_zu_ge(allocated0, sizeof(base_t),
106216-		    "Base header should count as allocated");
106217-		if (opt_metadata_thp == metadata_thp_always) {
106218-			expect_zu_gt(n_thp, 0,
106219-			    "Base should have 1 THP at least.");
106220-		}
106221-	}
106222-
106223-	expect_ptr_not_null(base_alloc(tsdn, base, 42, 1),
106224-	    "Unexpected base_alloc() failure");
106225-
106226-	if (config_stats) {
106227-		base_stats_get(tsdn, base, &allocated1, &resident, &mapped,
106228-		    &n_thp);
106229-		expect_zu_ge(allocated1 - allocated0, 42,
106230-		    "At least 42 bytes were allocated by base_alloc()");
106231-	}
106232-
106233-	base_delete(tsdn, base);
106234-}
106235-TEST_END
106236-
106237-TEST_BEGIN(test_base_hooks_null) {
106238-	extent_hooks_t hooks_orig;
106239-	base_t *base;
106240-	size_t allocated0, allocated1, resident, mapped, n_thp;
106241-
106242-	extent_hooks_prep();
106243-	try_dalloc = false;
106244-	try_destroy = true;
106245-	try_decommit = false;
106246-	try_purge_lazy = false;
106247-	try_purge_forced = false;
106248-	memcpy(&hooks_orig, &hooks, sizeof(extent_hooks_t));
106249-	memcpy(&hooks, &hooks_null, sizeof(extent_hooks_t));
106250-
106251-	tsdn_t *tsdn = tsd_tsdn(tsd_fetch());
106252-	base = base_new(tsdn, 0, &hooks, /* metadata_use_hooks */ true);
106253-	expect_ptr_not_null(base, "Unexpected base_new() failure");
106254-
106255-	if (config_stats) {
106256-		base_stats_get(tsdn, base, &allocated0, &resident, &mapped,
106257-		    &n_thp);
106258-		expect_zu_ge(allocated0, sizeof(base_t),
106259-		    "Base header should count as allocated");
106260-		if (opt_metadata_thp == metadata_thp_always) {
106261-			expect_zu_gt(n_thp, 0,
106262-			    "Base should have 1 THP at least.");
106263-		}
106264-	}
106265-
106266-	expect_ptr_not_null(base_alloc(tsdn, base, 42, 1),
106267-	    "Unexpected base_alloc() failure");
106268-
106269-	if (config_stats) {
106270-		base_stats_get(tsdn, base, &allocated1, &resident, &mapped,
106271-		    &n_thp);
106272-		expect_zu_ge(allocated1 - allocated0, 42,
106273-		    "At least 42 bytes were allocated by base_alloc()");
106274-	}
106275-
106276-	base_delete(tsdn, base);
106277-
106278-	memcpy(&hooks, &hooks_orig, sizeof(extent_hooks_t));
106279-}
106280-TEST_END
106281-
106282-TEST_BEGIN(test_base_hooks_not_null) {
106283-	extent_hooks_t hooks_orig;
106284-	base_t *base;
106285-	void *p, *q, *r, *r_exp;
106286-
106287-	extent_hooks_prep();
106288-	try_dalloc = false;
106289-	try_destroy = true;
106290-	try_decommit = false;
106291-	try_purge_lazy = false;
106292-	try_purge_forced = false;
106293-	memcpy(&hooks_orig, &hooks, sizeof(extent_hooks_t));
106294-	memcpy(&hooks, &hooks_not_null, sizeof(extent_hooks_t));
106295-
106296-	tsdn_t *tsdn = tsd_tsdn(tsd_fetch());
106297-	did_alloc = false;
106298-	base = base_new(tsdn, 0, &hooks, /* metadata_use_hooks */ true);
106299-	expect_ptr_not_null(base, "Unexpected base_new() failure");
106300-	expect_true(did_alloc, "Expected alloc");
106301-
106302-	/*
106303-	 * Check for tight packing at specified alignment under simple
106304-	 * conditions.
106305-	 */
106306-	{
106307-		const size_t alignments[] = {
106308-			1,
106309-			QUANTUM,
106310-			QUANTUM << 1,
106311-			CACHELINE,
106312-			CACHELINE << 1,
106313-		};
106314-		unsigned i;
106315-
106316-		for (i = 0; i < sizeof(alignments) / sizeof(size_t); i++) {
106317-			size_t alignment = alignments[i];
106318-			size_t align_ceil = ALIGNMENT_CEILING(alignment,
106319-			    QUANTUM);
106320-			p = base_alloc(tsdn, base, 1, alignment);
106321-			expect_ptr_not_null(p,
106322-			    "Unexpected base_alloc() failure");
106323-			expect_ptr_eq(p,
106324-			    (void *)(ALIGNMENT_CEILING((uintptr_t)p,
106325-			    alignment)), "Expected quantum alignment");
106326-			q = base_alloc(tsdn, base, alignment, alignment);
106327-			expect_ptr_not_null(q,
106328-			    "Unexpected base_alloc() failure");
106329-			expect_ptr_eq((void *)((uintptr_t)p + align_ceil), q,
106330-			    "Minimal allocation should take up %zu bytes",
106331-			    align_ceil);
106332-			r = base_alloc(tsdn, base, 1, alignment);
106333-			expect_ptr_not_null(r,
106334-			    "Unexpected base_alloc() failure");
106335-			expect_ptr_eq((void *)((uintptr_t)q + align_ceil), r,
106336-			    "Minimal allocation should take up %zu bytes",
106337-			    align_ceil);
106338-		}
106339-	}
106340-
106341-	/*
106342-	 * Allocate an object that cannot fit in the first block, then verify
106343-	 * that the first block's remaining space is considered for subsequent
106344-	 * allocation.
106345-	 */
106346-	expect_zu_ge(edata_bsize_get(&base->blocks->edata), QUANTUM,
106347-	    "Remainder insufficient for test");
106348-	/* Use up all but one quantum of block. */
106349-	while (edata_bsize_get(&base->blocks->edata) > QUANTUM) {
106350-		p = base_alloc(tsdn, base, QUANTUM, QUANTUM);
106351-		expect_ptr_not_null(p, "Unexpected base_alloc() failure");
106352-	}
106353-	r_exp = edata_addr_get(&base->blocks->edata);
106354-	expect_zu_eq(base->extent_sn_next, 1, "One extant block expected");
106355-	q = base_alloc(tsdn, base, QUANTUM + 1, QUANTUM);
106356-	expect_ptr_not_null(q, "Unexpected base_alloc() failure");
106357-	expect_ptr_ne(q, r_exp, "Expected allocation from new block");
106358-	expect_zu_eq(base->extent_sn_next, 2, "Two extant blocks expected");
106359-	r = base_alloc(tsdn, base, QUANTUM, QUANTUM);
106360-	expect_ptr_not_null(r, "Unexpected base_alloc() failure");
106361-	expect_ptr_eq(r, r_exp, "Expected allocation from first block");
106362-	expect_zu_eq(base->extent_sn_next, 2, "Two extant blocks expected");
106363-
106364-	/*
106365-	 * Check for proper alignment support when normal blocks are too small.
106366-	 */
106367-	{
106368-		const size_t alignments[] = {
106369-			HUGEPAGE,
106370-			HUGEPAGE << 1
106371-		};
106372-		unsigned i;
106373-
106374-		for (i = 0; i < sizeof(alignments) / sizeof(size_t); i++) {
106375-			size_t alignment = alignments[i];
106376-			p = base_alloc(tsdn, base, QUANTUM, alignment);
106377-			expect_ptr_not_null(p,
106378-			    "Unexpected base_alloc() failure");
106379-			expect_ptr_eq(p,
106380-			    (void *)(ALIGNMENT_CEILING((uintptr_t)p,
106381-			    alignment)), "Expected %zu-byte alignment",
106382-			    alignment);
106383-		}
106384-	}
106385-
106386-	called_dalloc = called_destroy = called_decommit = called_purge_lazy =
106387-	    called_purge_forced = false;
106388-	base_delete(tsdn, base);
106389-	expect_true(called_dalloc, "Expected dalloc call");
106390-	expect_true(!called_destroy, "Unexpected destroy call");
106391-	expect_true(called_decommit, "Expected decommit call");
106392-	expect_true(called_purge_lazy, "Expected purge_lazy call");
106393-	expect_true(called_purge_forced, "Expected purge_forced call");
106394-
106395-	try_dalloc = true;
106396-	try_destroy = true;
106397-	try_decommit = true;
106398-	try_purge_lazy = true;
106399-	try_purge_forced = true;
106400-	memcpy(&hooks, &hooks_orig, sizeof(extent_hooks_t));
106401-}
106402-TEST_END
106403-
106404-TEST_BEGIN(test_base_ehooks_get_for_metadata_default_hook) {
106405-	extent_hooks_prep();
106406-	memcpy(&hooks, &hooks_not_null, sizeof(extent_hooks_t));
106407-	base_t *base;
106408-	tsdn_t *tsdn = tsd_tsdn(tsd_fetch());
106409-	base = base_new(tsdn, 0, &hooks, /* metadata_use_hooks */ false);
106410-	ehooks_t *ehooks = base_ehooks_get_for_metadata(base);
106411-	expect_true(ehooks_are_default(ehooks),
106412-		"Expected default extent hook functions pointer");
106413-	base_delete(tsdn, base);
106414-}
106415-TEST_END
106416-
106417-
106418-TEST_BEGIN(test_base_ehooks_get_for_metadata_custom_hook) {
106419-	extent_hooks_prep();
106420-	memcpy(&hooks, &hooks_not_null, sizeof(extent_hooks_t));
106421-	base_t *base;
106422-	tsdn_t *tsdn = tsd_tsdn(tsd_fetch());
106423-	base = base_new(tsdn, 0, &hooks, /* metadata_use_hooks */ true);
106424-	ehooks_t *ehooks = base_ehooks_get_for_metadata(base);
106425-	expect_ptr_eq(&hooks, ehooks_get_extent_hooks_ptr(ehooks),
106426-		"Expected user-specified extend hook functions pointer");
106427-	base_delete(tsdn, base);
106428-}
106429-TEST_END
106430-
106431-int
106432-main(void) {
106433-	return test(
106434-	    test_base_hooks_default,
106435-	    test_base_hooks_null,
106436-	    test_base_hooks_not_null,
106437-            test_base_ehooks_get_for_metadata_default_hook,
106438-            test_base_ehooks_get_for_metadata_custom_hook);
106439-}
106440diff --git a/jemalloc/test/unit/batch_alloc.c b/jemalloc/test/unit/batch_alloc.c
106441deleted file mode 100644
106442index 901c52b..0000000
106443--- a/jemalloc/test/unit/batch_alloc.c
106444+++ /dev/null
106445@@ -1,189 +0,0 @@
106446-#include "test/jemalloc_test.h"
106447-
106448-#define BATCH_MAX ((1U << 16) + 1024)
106449-static void *global_ptrs[BATCH_MAX];
106450-
106451-#define PAGE_ALIGNED(ptr) (((uintptr_t)ptr & PAGE_MASK) == 0)
106452-
106453-static void
106454-verify_batch_basic(tsd_t *tsd, void **ptrs, size_t batch, size_t usize,
106455-    bool zero) {
106456-	for (size_t i = 0; i < batch; ++i) {
106457-		void *p = ptrs[i];
106458-		expect_zu_eq(isalloc(tsd_tsdn(tsd), p), usize, "");
106459-		if (zero) {
106460-			for (size_t k = 0; k < usize; ++k) {
106461-				expect_true(*((unsigned char *)p + k) == 0, "");
106462-			}
106463-		}
106464-	}
106465-}
106466-
106467-static void
106468-verify_batch_locality(tsd_t *tsd, void **ptrs, size_t batch, size_t usize,
106469-    arena_t *arena, unsigned nregs) {
106470-	if (config_prof && opt_prof) {
106471-		/*
106472-		 * Checking batch locality when prof is on is feasible but
106473-		 * complicated, while checking the non-prof case suffices for
106474-		 * unit-test purpose.
106475-		 */
106476-		return;
106477-	}
106478-	for (size_t i = 0, j = 0; i < batch; ++i, ++j) {
106479-		if (j == nregs) {
106480-			j = 0;
106481-		}
106482-		if (j == 0 && batch - i < nregs) {
106483-			break;
106484-		}
106485-		void *p = ptrs[i];
106486-		expect_ptr_eq(iaalloc(tsd_tsdn(tsd), p), arena, "");
106487-		if (j == 0) {
106488-			expect_true(PAGE_ALIGNED(p), "");
106489-			continue;
106490-		}
106491-		assert(i > 0);
106492-		void *q = ptrs[i - 1];
106493-		expect_true((uintptr_t)p > (uintptr_t)q
106494-		    && (size_t)((uintptr_t)p - (uintptr_t)q) == usize, "");
106495-	}
106496-}
106497-
106498-static void
106499-release_batch(void **ptrs, size_t batch, size_t size) {
106500-	for (size_t i = 0; i < batch; ++i) {
106501-		sdallocx(ptrs[i], size, 0);
106502-	}
106503-}
106504-
106505-typedef struct batch_alloc_packet_s batch_alloc_packet_t;
106506-struct batch_alloc_packet_s {
106507-	void **ptrs;
106508-	size_t num;
106509-	size_t size;
106510-	int flags;
106511-};
106512-
106513-static size_t
106514-batch_alloc_wrapper(void **ptrs, size_t num, size_t size, int flags) {
106515-	batch_alloc_packet_t batch_alloc_packet = {ptrs, num, size, flags};
106516-	size_t filled;
106517-	size_t len = sizeof(size_t);
106518-	assert_d_eq(mallctl("experimental.batch_alloc", &filled, &len,
106519-	    &batch_alloc_packet, sizeof(batch_alloc_packet)), 0, "");
106520-	return filled;
106521-}
106522-
106523-static void
106524-test_wrapper(size_t size, size_t alignment, bool zero, unsigned arena_flag) {
106525-	tsd_t *tsd = tsd_fetch();
106526-	assert(tsd != NULL);
106527-	const size_t usize =
106528-	    (alignment != 0 ? sz_sa2u(size, alignment) : sz_s2u(size));
106529-	const szind_t ind = sz_size2index(usize);
106530-	const bin_info_t *bin_info = &bin_infos[ind];
106531-	const unsigned nregs = bin_info->nregs;
106532-	assert(nregs > 0);
106533-	arena_t *arena;
106534-	if (arena_flag != 0) {
106535-		arena = arena_get(tsd_tsdn(tsd), MALLOCX_ARENA_GET(arena_flag),
106536-		    false);
106537-	} else {
106538-		arena = arena_choose(tsd, NULL);
106539-	}
106540-	assert(arena != NULL);
106541-	int flags = arena_flag;
106542-	if (alignment != 0) {
106543-		flags |= MALLOCX_ALIGN(alignment);
106544-	}
106545-	if (zero) {
106546-		flags |= MALLOCX_ZERO;
106547-	}
106548-
106549-	/*
106550-	 * Allocate for the purpose of bootstrapping arena_tdata, so that the
106551-	 * change in bin stats won't contaminate the stats to be verified below.
106552-	 */
106553-	void *p = mallocx(size, flags | MALLOCX_TCACHE_NONE);
106554-
106555-	for (size_t i = 0; i < 4; ++i) {
106556-		size_t base = 0;
106557-		if (i == 1) {
106558-			base = nregs;
106559-		} else if (i == 2) {
106560-			base = nregs * 2;
106561-		} else if (i == 3) {
106562-			base = (1 << 16);
106563-		}
106564-		for (int j = -1; j <= 1; ++j) {
106565-			if (base == 0 && j == -1) {
106566-				continue;
106567-			}
106568-			size_t batch = base + (size_t)j;
106569-			assert(batch < BATCH_MAX);
106570-			size_t filled = batch_alloc_wrapper(global_ptrs, batch,
106571-			    size, flags);
106572-			assert_zu_eq(filled, batch, "");
106573-			verify_batch_basic(tsd, global_ptrs, batch, usize,
106574-			    zero);
106575-			verify_batch_locality(tsd, global_ptrs, batch, usize,
106576-			    arena, nregs);
106577-			release_batch(global_ptrs, batch, usize);
106578-		}
106579-	}
106580-
106581-	free(p);
106582-}
106583-
106584-TEST_BEGIN(test_batch_alloc) {
106585-	test_wrapper(11, 0, false, 0);
106586-}
106587-TEST_END
106588-
106589-TEST_BEGIN(test_batch_alloc_zero) {
106590-	test_wrapper(11, 0, true, 0);
106591-}
106592-TEST_END
106593-
106594-TEST_BEGIN(test_batch_alloc_aligned) {
106595-	test_wrapper(7, 16, false, 0);
106596-}
106597-TEST_END
106598-
106599-TEST_BEGIN(test_batch_alloc_manual_arena) {
106600-	unsigned arena_ind;
106601-	size_t len_unsigned = sizeof(unsigned);
106602-	assert_d_eq(mallctl("arenas.create", &arena_ind, &len_unsigned, NULL,
106603-	    0), 0, "");
106604-	test_wrapper(11, 0, false, MALLOCX_ARENA(arena_ind));
106605-}
106606-TEST_END
106607-
106608-TEST_BEGIN(test_batch_alloc_large) {
106609-	size_t size = SC_LARGE_MINCLASS;
106610-	for (size_t batch = 0; batch < 4; ++batch) {
106611-		assert(batch < BATCH_MAX);
106612-		size_t filled = batch_alloc(global_ptrs, batch, size, 0);
106613-		assert_zu_eq(filled, batch, "");
106614-		release_batch(global_ptrs, batch, size);
106615-	}
106616-	size = tcache_maxclass + 1;
106617-	for (size_t batch = 0; batch < 4; ++batch) {
106618-		assert(batch < BATCH_MAX);
106619-		size_t filled = batch_alloc(global_ptrs, batch, size, 0);
106620-		assert_zu_eq(filled, batch, "");
106621-		release_batch(global_ptrs, batch, size);
106622-	}
106623-}
106624-TEST_END
106625-
106626-int
106627-main(void) {
106628-	return test(
106629-	    test_batch_alloc,
106630-	    test_batch_alloc_zero,
106631-	    test_batch_alloc_aligned,
106632-	    test_batch_alloc_manual_arena,
106633-	    test_batch_alloc_large);
106634-}
106635diff --git a/jemalloc/test/unit/batch_alloc.sh b/jemalloc/test/unit/batch_alloc.sh
106636deleted file mode 100644
106637index 9d81010..0000000
106638--- a/jemalloc/test/unit/batch_alloc.sh
106639+++ /dev/null
106640@@ -1,3 +0,0 @@
106641-#!/bin/sh
106642-
106643-export MALLOC_CONF="tcache_gc_incr_bytes:2147483648"
106644diff --git a/jemalloc/test/unit/batch_alloc_prof.c b/jemalloc/test/unit/batch_alloc_prof.c
106645deleted file mode 100644
106646index ef64458..0000000
106647--- a/jemalloc/test/unit/batch_alloc_prof.c
106648+++ /dev/null
106649@@ -1 +0,0 @@
106650-#include "batch_alloc.c"
106651diff --git a/jemalloc/test/unit/batch_alloc_prof.sh b/jemalloc/test/unit/batch_alloc_prof.sh
106652deleted file mode 100644
106653index a2697a6..0000000
106654--- a/jemalloc/test/unit/batch_alloc_prof.sh
106655+++ /dev/null
106656@@ -1,3 +0,0 @@
106657-#!/bin/sh
106658-
106659-export MALLOC_CONF="prof:true,lg_prof_sample:14"
106660diff --git a/jemalloc/test/unit/binshard.c b/jemalloc/test/unit/binshard.c
106661deleted file mode 100644
106662index 040ea54..0000000
106663--- a/jemalloc/test/unit/binshard.c
106664+++ /dev/null
106665@@ -1,154 +0,0 @@
106666-#include "test/jemalloc_test.h"
106667-
106668-/* Config -- "narenas:1,bin_shards:1-160:16|129-512:4|256-256:8" */
106669-
106670-#define NTHREADS 16
106671-#define REMOTE_NALLOC 256
106672-
106673-static void *
106674-thd_producer(void *varg) {
106675-	void **mem = varg;
106676-	unsigned arena, i;
106677-	size_t sz;
106678-
106679-	sz = sizeof(arena);
106680-	/* Remote arena. */
106681-	expect_d_eq(mallctl("arenas.create", (void *)&arena, &sz, NULL, 0), 0,
106682-	    "Unexpected mallctl() failure");
106683-	for (i = 0; i < REMOTE_NALLOC / 2; i++) {
106684-		mem[i] = mallocx(1, MALLOCX_TCACHE_NONE | MALLOCX_ARENA(arena));
106685-	}
106686-
106687-	/* Remote bin. */
106688-	for (; i < REMOTE_NALLOC; i++) {
106689-		mem[i] = mallocx(1, MALLOCX_TCACHE_NONE | MALLOCX_ARENA(0));
106690-	}
106691-
106692-	return NULL;
106693-}
106694-
106695-TEST_BEGIN(test_producer_consumer) {
106696-	thd_t thds[NTHREADS];
106697-	void *mem[NTHREADS][REMOTE_NALLOC];
106698-	unsigned i;
106699-
106700-	/* Create producer threads to allocate. */
106701-	for (i = 0; i < NTHREADS; i++) {
106702-		thd_create(&thds[i], thd_producer, mem[i]);
106703-	}
106704-	for (i = 0; i < NTHREADS; i++) {
106705-		thd_join(thds[i], NULL);
106706-	}
106707-	/* Remote deallocation by the current thread. */
106708-	for (i = 0; i < NTHREADS; i++) {
106709-		for (unsigned j = 0; j < REMOTE_NALLOC; j++) {
106710-			expect_ptr_not_null(mem[i][j],
106711-			    "Unexpected remote allocation failure");
106712-			dallocx(mem[i][j], 0);
106713-		}
106714-	}
106715-}
106716-TEST_END
106717-
106718-static void *
106719-thd_start(void *varg) {
106720-	void *ptr, *ptr2;
106721-	edata_t *edata;
106722-	unsigned shard1, shard2;
106723-
106724-	tsdn_t *tsdn = tsdn_fetch();
106725-	/* Try triggering allocations from sharded bins. */
106726-	for (unsigned i = 0; i < 1024; i++) {
106727-		ptr = mallocx(1, MALLOCX_TCACHE_NONE);
106728-		ptr2 = mallocx(129, MALLOCX_TCACHE_NONE);
106729-
106730-		edata = emap_edata_lookup(tsdn, &arena_emap_global, ptr);
106731-		shard1 = edata_binshard_get(edata);
106732-		dallocx(ptr, 0);
106733-		expect_u_lt(shard1, 16, "Unexpected bin shard used");
106734-
106735-		edata = emap_edata_lookup(tsdn, &arena_emap_global, ptr2);
106736-		shard2 = edata_binshard_get(edata);
106737-		dallocx(ptr2, 0);
106738-		expect_u_lt(shard2, 4, "Unexpected bin shard used");
106739-
106740-		if (shard1 > 0 || shard2 > 0) {
106741-			/* Triggered sharded bin usage. */
106742-			return (void *)(uintptr_t)shard1;
106743-		}
106744-	}
106745-
106746-	return NULL;
106747-}
106748-
106749-TEST_BEGIN(test_bin_shard_mt) {
106750-	test_skip_if(have_percpu_arena &&
106751-	    PERCPU_ARENA_ENABLED(opt_percpu_arena));
106752-
106753-	thd_t thds[NTHREADS];
106754-	unsigned i;
106755-	for (i = 0; i < NTHREADS; i++) {
106756-		thd_create(&thds[i], thd_start, NULL);
106757-	}
106758-	bool sharded = false;
106759-	for (i = 0; i < NTHREADS; i++) {
106760-		void *ret;
106761-		thd_join(thds[i], &ret);
106762-		if (ret != NULL) {
106763-			sharded = true;
106764-		}
106765-	}
106766-	expect_b_eq(sharded, true, "Did not find sharded bins");
106767-}
106768-TEST_END
106769-
106770-TEST_BEGIN(test_bin_shard) {
106771-	unsigned nbins, i;
106772-	size_t mib[4], mib2[4];
106773-	size_t miblen, miblen2, len;
106774-
106775-	len = sizeof(nbins);
106776-	expect_d_eq(mallctl("arenas.nbins", (void *)&nbins, &len, NULL, 0), 0,
106777-	    "Unexpected mallctl() failure");
106778-
106779-	miblen = 4;
106780-	expect_d_eq(mallctlnametomib("arenas.bin.0.nshards", mib, &miblen), 0,
106781-	    "Unexpected mallctlnametomib() failure");
106782-	miblen2 = 4;
106783-	expect_d_eq(mallctlnametomib("arenas.bin.0.size", mib2, &miblen2), 0,
106784-	    "Unexpected mallctlnametomib() failure");
106785-
106786-	for (i = 0; i < nbins; i++) {
106787-		uint32_t nshards;
106788-		size_t size, sz1, sz2;
106789-
106790-		mib[2] = i;
106791-		sz1 = sizeof(nshards);
106792-		expect_d_eq(mallctlbymib(mib, miblen, (void *)&nshards, &sz1,
106793-		    NULL, 0), 0, "Unexpected mallctlbymib() failure");
106794-
106795-		mib2[2] = i;
106796-		sz2 = sizeof(size);
106797-		expect_d_eq(mallctlbymib(mib2, miblen2, (void *)&size, &sz2,
106798-		    NULL, 0), 0, "Unexpected mallctlbymib() failure");
106799-
106800-		if (size >= 1 && size <= 128) {
106801-			expect_u_eq(nshards, 16, "Unexpected nshards");
106802-		} else if (size == 256) {
106803-			expect_u_eq(nshards, 8, "Unexpected nshards");
106804-		} else if (size > 128 && size <= 512) {
106805-			expect_u_eq(nshards, 4, "Unexpected nshards");
106806-		} else {
106807-			expect_u_eq(nshards, 1, "Unexpected nshards");
106808-		}
106809-	}
106810-}
106811-TEST_END
106812-
106813-int
106814-main(void) {
106815-	return test_no_reentrancy(
106816-	    test_bin_shard,
106817-	    test_bin_shard_mt,
106818-	    test_producer_consumer);
106819-}
106820diff --git a/jemalloc/test/unit/binshard.sh b/jemalloc/test/unit/binshard.sh
106821deleted file mode 100644
106822index c1d58c8..0000000
106823--- a/jemalloc/test/unit/binshard.sh
106824+++ /dev/null
106825@@ -1,3 +0,0 @@
106826-#!/bin/sh
106827-
106828-export MALLOC_CONF="narenas:1,bin_shards:1-160:16|129-512:4|256-256:8"
106829diff --git a/jemalloc/test/unit/bit_util.c b/jemalloc/test/unit/bit_util.c
106830deleted file mode 100644
106831index 7d31b21..0000000
106832--- a/jemalloc/test/unit/bit_util.c
106833+++ /dev/null
106834@@ -1,307 +0,0 @@
106835-#include "test/jemalloc_test.h"
106836-
106837-#include "jemalloc/internal/bit_util.h"
106838-
106839-#define TEST_POW2_CEIL(t, suf, pri) do {				\
106840-	unsigned i, pow2;						\
106841-	t x;								\
106842-									\
106843-	expect_##suf##_eq(pow2_ceil_##suf(0), 0, "Unexpected result");	\
106844-									\
106845-	for (i = 0; i < sizeof(t) * 8; i++) {				\
106846-		expect_##suf##_eq(pow2_ceil_##suf(((t)1) << i), ((t)1)	\
106847-		    << i, "Unexpected result");				\
106848-	}								\
106849-									\
106850-	for (i = 2; i < sizeof(t) * 8; i++) {				\
106851-		expect_##suf##_eq(pow2_ceil_##suf((((t)1) << i) - 1),	\
106852-		    ((t)1) << i, "Unexpected result");			\
106853-	}								\
106854-									\
106855-	for (i = 0; i < sizeof(t) * 8 - 1; i++) {			\
106856-		expect_##suf##_eq(pow2_ceil_##suf((((t)1) << i) + 1),	\
106857-		    ((t)1) << (i+1), "Unexpected result");		\
106858-	}								\
106859-									\
106860-	for (pow2 = 1; pow2 < 25; pow2++) {				\
106861-		for (x = (((t)1) << (pow2-1)) + 1; x <= ((t)1) << pow2;	\
106862-		    x++) {						\
106863-			expect_##suf##_eq(pow2_ceil_##suf(x),		\
106864-			    ((t)1) << pow2,				\
106865-			    "Unexpected result, x=%"pri, x);		\
106866-		}							\
106867-	}								\
106868-} while (0)
106869-
106870-TEST_BEGIN(test_pow2_ceil_u64) {
106871-	TEST_POW2_CEIL(uint64_t, u64, FMTu64);
106872-}
106873-TEST_END
106874-
106875-TEST_BEGIN(test_pow2_ceil_u32) {
106876-	TEST_POW2_CEIL(uint32_t, u32, FMTu32);
106877-}
106878-TEST_END
106879-
106880-TEST_BEGIN(test_pow2_ceil_zu) {
106881-	TEST_POW2_CEIL(size_t, zu, "zu");
106882-}
106883-TEST_END
106884-
106885-void
106886-expect_lg_ceil_range(size_t input, unsigned answer) {
106887-	if (input == 1) {
106888-		expect_u_eq(0, answer, "Got %u as lg_ceil of 1", answer);
106889-		return;
106890-	}
106891-	expect_zu_le(input, (ZU(1) << answer),
106892-	    "Got %u as lg_ceil of %zu", answer, input);
106893-	expect_zu_gt(input, (ZU(1) << (answer - 1)),
106894-	    "Got %u as lg_ceil of %zu", answer, input);
106895-}
106896-
106897-void
106898-expect_lg_floor_range(size_t input, unsigned answer) {
106899-	if (input == 1) {
106900-		expect_u_eq(0, answer, "Got %u as lg_floor of 1", answer);
106901-		return;
106902-	}
106903-	expect_zu_ge(input, (ZU(1) << answer),
106904-	    "Got %u as lg_floor of %zu", answer, input);
106905-	expect_zu_lt(input, (ZU(1) << (answer + 1)),
106906-	    "Got %u as lg_floor of %zu", answer, input);
106907-}
106908-
106909-TEST_BEGIN(test_lg_ceil_floor) {
106910-	for (size_t i = 1; i < 10 * 1000 * 1000; i++) {
106911-		expect_lg_ceil_range(i, lg_ceil(i));
106912-		expect_lg_ceil_range(i, LG_CEIL(i));
106913-		expect_lg_floor_range(i, lg_floor(i));
106914-		expect_lg_floor_range(i, LG_FLOOR(i));
106915-	}
106916-	for (int i = 10; i < 8 * (1 << LG_SIZEOF_PTR) - 5; i++) {
106917-		for (size_t j = 0; j < (1 << 4); j++) {
106918-			size_t num1 = ((size_t)1 << i)
106919-			    - j * ((size_t)1 << (i - 4));
106920-			size_t num2 = ((size_t)1 << i)
106921-			    + j * ((size_t)1 << (i - 4));
106922-			expect_zu_ne(num1, 0, "Invalid lg argument");
106923-			expect_zu_ne(num2, 0, "Invalid lg argument");
106924-			expect_lg_ceil_range(num1, lg_ceil(num1));
106925-			expect_lg_ceil_range(num1, LG_CEIL(num1));
106926-			expect_lg_ceil_range(num2, lg_ceil(num2));
106927-			expect_lg_ceil_range(num2, LG_CEIL(num2));
106928-
106929-			expect_lg_floor_range(num1, lg_floor(num1));
106930-			expect_lg_floor_range(num1, LG_FLOOR(num1));
106931-			expect_lg_floor_range(num2, lg_floor(num2));
106932-			expect_lg_floor_range(num2, LG_FLOOR(num2));
106933-		}
106934-	}
106935-}
106936-TEST_END
106937-
106938-#define TEST_FFS(t, suf, test_suf, pri) do {				\
106939-	for (unsigned i = 0; i < sizeof(t) * 8; i++) {			\
106940-		for (unsigned j = 0; j <= i; j++) {			\
106941-			for (unsigned k = 0; k <= j; k++) {		\
106942-				t x = (t)1 << i;			\
106943-				x |= (t)1 << j;				\
106944-				x |= (t)1 << k;				\
106945-				expect_##test_suf##_eq(ffs_##suf(x), k,	\
106946-				    "Unexpected result, x=%"pri, x);	\
106947-			}						\
106948-		}							\
106949-	}								\
106950-} while(0)
106951-
106952-TEST_BEGIN(test_ffs_u) {
106953-	TEST_FFS(unsigned, u, u,"u");
106954-}
106955-TEST_END
106956-
106957-TEST_BEGIN(test_ffs_lu) {
106958-	TEST_FFS(unsigned long, lu, lu, "lu");
106959-}
106960-TEST_END
106961-
106962-TEST_BEGIN(test_ffs_llu) {
106963-	TEST_FFS(unsigned long long, llu, qd, "llu");
106964-}
106965-TEST_END
106966-
106967-TEST_BEGIN(test_ffs_u32) {
106968-	TEST_FFS(uint32_t, u32, u32, FMTu32);
106969-}
106970-TEST_END
106971-
106972-TEST_BEGIN(test_ffs_u64) {
106973-	TEST_FFS(uint64_t, u64, u64, FMTu64);
106974-}
106975-TEST_END
106976-
106977-TEST_BEGIN(test_ffs_zu) {
106978-	TEST_FFS(size_t, zu, zu, "zu");
106979-}
106980-TEST_END
106981-
106982-#define TEST_FLS(t, suf, test_suf, pri) do {				\
106983-	for (unsigned i = 0; i < sizeof(t) * 8; i++) {			\
106984-		for (unsigned j = 0; j <= i; j++) {			\
106985-			for (unsigned k = 0; k <= j; k++) {		\
106986-				t x = (t)1 << i;			\
106987-				x |= (t)1 << j;				\
106988-				x |= (t)1 << k;				\
106989-				expect_##test_suf##_eq(fls_##suf(x), i,	\
106990-				    "Unexpected result, x=%"pri, x);	\
106991-			}						\
106992-		}							\
106993-	}								\
106994-} while(0)
106995-
106996-TEST_BEGIN(test_fls_u) {
106997-	TEST_FLS(unsigned, u, u,"u");
106998-}
106999-TEST_END
107000-
107001-TEST_BEGIN(test_fls_lu) {
107002-	TEST_FLS(unsigned long, lu, lu, "lu");
107003-}
107004-TEST_END
107005-
107006-TEST_BEGIN(test_fls_llu) {
107007-	TEST_FLS(unsigned long long, llu, qd, "llu");
107008-}
107009-TEST_END
107010-
107011-TEST_BEGIN(test_fls_u32) {
107012-	TEST_FLS(uint32_t, u32, u32, FMTu32);
107013-}
107014-TEST_END
107015-
107016-TEST_BEGIN(test_fls_u64) {
107017-	TEST_FLS(uint64_t, u64, u64, FMTu64);
107018-}
107019-TEST_END
107020-
107021-TEST_BEGIN(test_fls_zu) {
107022-	TEST_FLS(size_t, zu, zu, "zu");
107023-}
107024-TEST_END
107025-
107026-TEST_BEGIN(test_fls_u_slow) {
107027-	TEST_FLS(unsigned, u_slow, u,"u");
107028-}
107029-TEST_END
107030-
107031-TEST_BEGIN(test_fls_lu_slow) {
107032-	TEST_FLS(unsigned long, lu_slow, lu, "lu");
107033-}
107034-TEST_END
107035-
107036-TEST_BEGIN(test_fls_llu_slow) {
107037-	TEST_FLS(unsigned long long, llu_slow, qd, "llu");
107038-}
107039-TEST_END
107040-
107041-static unsigned
107042-popcount_byte(unsigned byte) {
107043-	int count = 0;
107044-	for (int i = 0; i < 8; i++) {
107045-		if ((byte & (1 << i)) != 0) {
107046-			count++;
107047-		}
107048-	}
107049-	return count;
107050-}
107051-
107052-static uint64_t
107053-expand_byte_to_mask(unsigned byte) {
107054-	uint64_t result = 0;
107055-	for (int i = 0; i < 8; i++) {
107056-		if ((byte & (1 << i)) != 0) {
107057-			result |= ((uint64_t)0xFF << (i * 8));
107058-		}
107059-	}
107060-	return result;
107061-}
107062-
107063-#define TEST_POPCOUNT(t, suf, pri_hex) do {				\
107064-	t bmul = (t)0x0101010101010101ULL;				\
107065-	for (unsigned i = 0; i < (1 << sizeof(t)); i++) {		\
107066-		for (unsigned j = 0; j < 256; j++) {			\
107067-			/*						\
107068-			 * Replicate the byte j into various		\
107069-			 * bytes of the integer (as indicated by the	\
107070-			 * mask in i), and ensure that the popcount of	\
107071-			 * the result is popcount(i) * popcount(j)	\
107072-			 */						\
107073-			t mask = (t)expand_byte_to_mask(i);		\
107074-			t x = (bmul * j) & mask;			\
107075-			expect_u_eq(					\
107076-			    popcount_byte(i) * popcount_byte(j),	\
107077-			    popcount_##suf(x),				\
107078-			    "Unexpected result, x=0x%"pri_hex, x);	\
107079-		}							\
107080-	}								\
107081-} while (0)
107082-
107083-TEST_BEGIN(test_popcount_u) {
107084-	TEST_POPCOUNT(unsigned, u, "x");
107085-}
107086-TEST_END
107087-
107088-TEST_BEGIN(test_popcount_u_slow) {
107089-	TEST_POPCOUNT(unsigned, u_slow, "x");
107090-}
107091-TEST_END
107092-
107093-TEST_BEGIN(test_popcount_lu) {
107094-	TEST_POPCOUNT(unsigned long, lu, "lx");
107095-}
107096-TEST_END
107097-
107098-TEST_BEGIN(test_popcount_lu_slow) {
107099-	TEST_POPCOUNT(unsigned long, lu_slow, "lx");
107100-}
107101-TEST_END
107102-
107103-TEST_BEGIN(test_popcount_llu) {
107104-	TEST_POPCOUNT(unsigned long long, llu, "llx");
107105-}
107106-TEST_END
107107-
107108-TEST_BEGIN(test_popcount_llu_slow) {
107109-	TEST_POPCOUNT(unsigned long long, llu_slow, "llx");
107110-}
107111-TEST_END
107112-
107113-int
107114-main(void) {
107115-	return test_no_reentrancy(
107116-	    test_pow2_ceil_u64,
107117-	    test_pow2_ceil_u32,
107118-	    test_pow2_ceil_zu,
107119-	    test_lg_ceil_floor,
107120-	    test_ffs_u,
107121-	    test_ffs_lu,
107122-	    test_ffs_llu,
107123-	    test_ffs_u32,
107124-	    test_ffs_u64,
107125-	    test_ffs_zu,
107126-	    test_fls_u,
107127-	    test_fls_lu,
107128-	    test_fls_llu,
107129-	    test_fls_u32,
107130-	    test_fls_u64,
107131-	    test_fls_zu,
107132-	    test_fls_u_slow,
107133-	    test_fls_lu_slow,
107134-	    test_fls_llu_slow,
107135-	    test_popcount_u,
107136-	    test_popcount_u_slow,
107137-	    test_popcount_lu,
107138-	    test_popcount_lu_slow,
107139-	    test_popcount_llu,
107140-	    test_popcount_llu_slow);
107141-}
107142diff --git a/jemalloc/test/unit/bitmap.c b/jemalloc/test/unit/bitmap.c
107143deleted file mode 100644
107144index 78e542b..0000000
107145--- a/jemalloc/test/unit/bitmap.c
107146+++ /dev/null
107147@@ -1,343 +0,0 @@
107148-#include "test/jemalloc_test.h"
107149-
107150-#include "test/nbits.h"
107151-
107152-static void
107153-test_bitmap_initializer_body(const bitmap_info_t *binfo, size_t nbits) {
107154-	bitmap_info_t binfo_dyn;
107155-	bitmap_info_init(&binfo_dyn, nbits);
107156-
107157-	expect_zu_eq(bitmap_size(binfo), bitmap_size(&binfo_dyn),
107158-	    "Unexpected difference between static and dynamic initialization, "
107159-	    "nbits=%zu", nbits);
107160-	expect_zu_eq(binfo->nbits, binfo_dyn.nbits,
107161-	    "Unexpected difference between static and dynamic initialization, "
107162-	    "nbits=%zu", nbits);
107163-#ifdef BITMAP_USE_TREE
107164-	expect_u_eq(binfo->nlevels, binfo_dyn.nlevels,
107165-	    "Unexpected difference between static and dynamic initialization, "
107166-	    "nbits=%zu", nbits);
107167-	{
107168-		unsigned i;
107169-
107170-		for (i = 0; i < binfo->nlevels; i++) {
107171-			expect_zu_eq(binfo->levels[i].group_offset,
107172-			    binfo_dyn.levels[i].group_offset,
107173-			    "Unexpected difference between static and dynamic "
107174-			    "initialization, nbits=%zu, level=%u", nbits, i);
107175-		}
107176-	}
107177-#else
107178-	expect_zu_eq(binfo->ngroups, binfo_dyn.ngroups,
107179-	    "Unexpected difference between static and dynamic initialization");
107180-#endif
107181-}
107182-
107183-TEST_BEGIN(test_bitmap_initializer) {
107184-#define NB(nbits) {							\
107185-		if (nbits <= BITMAP_MAXBITS) {				\
107186-			bitmap_info_t binfo =				\
107187-			    BITMAP_INFO_INITIALIZER(nbits);		\
107188-			test_bitmap_initializer_body(&binfo, nbits);	\
107189-		}							\
107190-	}
107191-	NBITS_TAB
107192-#undef NB
107193-}
107194-TEST_END
107195-
107196-static size_t
107197-test_bitmap_size_body(const bitmap_info_t *binfo, size_t nbits,
107198-    size_t prev_size) {
107199-	size_t size = bitmap_size(binfo);
107200-	expect_zu_ge(size, (nbits >> 3),
107201-	    "Bitmap size is smaller than expected");
107202-	expect_zu_ge(size, prev_size, "Bitmap size is smaller than expected");
107203-	return size;
107204-}
107205-
107206-TEST_BEGIN(test_bitmap_size) {
107207-	size_t nbits, prev_size;
107208-
107209-	prev_size = 0;
107210-	for (nbits = 1; nbits <= BITMAP_MAXBITS; nbits++) {
107211-		bitmap_info_t binfo;
107212-		bitmap_info_init(&binfo, nbits);
107213-		prev_size = test_bitmap_size_body(&binfo, nbits, prev_size);
107214-	}
107215-#define NB(nbits) {							\
107216-		bitmap_info_t binfo = BITMAP_INFO_INITIALIZER(nbits);	\
107217-		prev_size = test_bitmap_size_body(&binfo, nbits,	\
107218-		    prev_size);						\
107219-	}
107220-	prev_size = 0;
107221-	NBITS_TAB
107222-#undef NB
107223-}
107224-TEST_END
107225-
107226-static void
107227-test_bitmap_init_body(const bitmap_info_t *binfo, size_t nbits) {
107228-	size_t i;
107229-	bitmap_t *bitmap = (bitmap_t *)malloc(bitmap_size(binfo));
107230-	expect_ptr_not_null(bitmap, "Unexpected malloc() failure");
107231-
107232-	bitmap_init(bitmap, binfo, false);
107233-	for (i = 0; i < nbits; i++) {
107234-		expect_false(bitmap_get(bitmap, binfo, i),
107235-		    "Bit should be unset");
107236-	}
107237-
107238-	bitmap_init(bitmap, binfo, true);
107239-	for (i = 0; i < nbits; i++) {
107240-		expect_true(bitmap_get(bitmap, binfo, i), "Bit should be set");
107241-	}
107242-
107243-	free(bitmap);
107244-}
107245-
107246-TEST_BEGIN(test_bitmap_init) {
107247-	size_t nbits;
107248-
107249-	for (nbits = 1; nbits <= BITMAP_MAXBITS; nbits++) {
107250-		bitmap_info_t binfo;
107251-		bitmap_info_init(&binfo, nbits);
107252-		test_bitmap_init_body(&binfo, nbits);
107253-	}
107254-#define NB(nbits) {							\
107255-		bitmap_info_t binfo = BITMAP_INFO_INITIALIZER(nbits);	\
107256-		test_bitmap_init_body(&binfo, nbits);			\
107257-	}
107258-	NBITS_TAB
107259-#undef NB
107260-}
107261-TEST_END
107262-
107263-static void
107264-test_bitmap_set_body(const bitmap_info_t *binfo, size_t nbits) {
107265-	size_t i;
107266-	bitmap_t *bitmap = (bitmap_t *)malloc(bitmap_size(binfo));
107267-	expect_ptr_not_null(bitmap, "Unexpected malloc() failure");
107268-	bitmap_init(bitmap, binfo, false);
107269-
107270-	for (i = 0; i < nbits; i++) {
107271-		bitmap_set(bitmap, binfo, i);
107272-	}
107273-	expect_true(bitmap_full(bitmap, binfo), "All bits should be set");
107274-	free(bitmap);
107275-}
107276-
107277-TEST_BEGIN(test_bitmap_set) {
107278-	size_t nbits;
107279-
107280-	for (nbits = 1; nbits <= BITMAP_MAXBITS; nbits++) {
107281-		bitmap_info_t binfo;
107282-		bitmap_info_init(&binfo, nbits);
107283-		test_bitmap_set_body(&binfo, nbits);
107284-	}
107285-#define NB(nbits) {							\
107286-		bitmap_info_t binfo = BITMAP_INFO_INITIALIZER(nbits);	\
107287-		test_bitmap_set_body(&binfo, nbits);			\
107288-	}
107289-	NBITS_TAB
107290-#undef NB
107291-}
107292-TEST_END
107293-
107294-static void
107295-test_bitmap_unset_body(const bitmap_info_t *binfo, size_t nbits) {
107296-	size_t i;
107297-	bitmap_t *bitmap = (bitmap_t *)malloc(bitmap_size(binfo));
107298-	expect_ptr_not_null(bitmap, "Unexpected malloc() failure");
107299-	bitmap_init(bitmap, binfo, false);
107300-
107301-	for (i = 0; i < nbits; i++) {
107302-		bitmap_set(bitmap, binfo, i);
107303-	}
107304-	expect_true(bitmap_full(bitmap, binfo), "All bits should be set");
107305-	for (i = 0; i < nbits; i++) {
107306-		bitmap_unset(bitmap, binfo, i);
107307-	}
107308-	for (i = 0; i < nbits; i++) {
107309-		bitmap_set(bitmap, binfo, i);
107310-	}
107311-	expect_true(bitmap_full(bitmap, binfo), "All bits should be set");
107312-	free(bitmap);
107313-}
107314-
107315-TEST_BEGIN(test_bitmap_unset) {
107316-	size_t nbits;
107317-
107318-	for (nbits = 1; nbits <= BITMAP_MAXBITS; nbits++) {
107319-		bitmap_info_t binfo;
107320-		bitmap_info_init(&binfo, nbits);
107321-		test_bitmap_unset_body(&binfo, nbits);
107322-	}
107323-#define NB(nbits) {							\
107324-		bitmap_info_t binfo = BITMAP_INFO_INITIALIZER(nbits);	\
107325-		test_bitmap_unset_body(&binfo, nbits);			\
107326-	}
107327-	NBITS_TAB
107328-#undef NB
107329-}
107330-TEST_END
107331-
107332-static void
107333-test_bitmap_xfu_body(const bitmap_info_t *binfo, size_t nbits) {
107334-	bitmap_t *bitmap = (bitmap_t *)malloc(bitmap_size(binfo));
107335-	expect_ptr_not_null(bitmap, "Unexpected malloc() failure");
107336-	bitmap_init(bitmap, binfo, false);
107337-
107338-	/* Iteratively set bits starting at the beginning. */
107339-	for (size_t i = 0; i < nbits; i++) {
107340-		expect_zu_eq(bitmap_ffu(bitmap, binfo, 0), i,
107341-		    "First unset bit should be just after previous first unset "
107342-		    "bit");
107343-		expect_zu_eq(bitmap_ffu(bitmap, binfo, (i > 0) ? i-1 : i), i,
107344-		    "First unset bit should be just after previous first unset "
107345-		    "bit");
107346-		expect_zu_eq(bitmap_ffu(bitmap, binfo, i), i,
107347-		    "First unset bit should be just after previous first unset "
107348-		    "bit");
107349-		expect_zu_eq(bitmap_sfu(bitmap, binfo), i,
107350-		    "First unset bit should be just after previous first unset "
107351-		    "bit");
107352-	}
107353-	expect_true(bitmap_full(bitmap, binfo), "All bits should be set");
107354-
107355-	/*
107356-	 * Iteratively unset bits starting at the end, and verify that
107357-	 * bitmap_sfu() reaches the unset bits.
107358-	 */
107359-	for (size_t i = nbits - 1; i < nbits; i--) { /* (nbits..0] */
107360-		bitmap_unset(bitmap, binfo, i);
107361-		expect_zu_eq(bitmap_ffu(bitmap, binfo, 0), i,
107362-		    "First unset bit should the bit previously unset");
107363-		expect_zu_eq(bitmap_ffu(bitmap, binfo, (i > 0) ? i-1 : i), i,
107364-		    "First unset bit should the bit previously unset");
107365-		expect_zu_eq(bitmap_ffu(bitmap, binfo, i), i,
107366-		    "First unset bit should the bit previously unset");
107367-		expect_zu_eq(bitmap_sfu(bitmap, binfo), i,
107368-		    "First unset bit should the bit previously unset");
107369-		bitmap_unset(bitmap, binfo, i);
107370-	}
107371-	expect_false(bitmap_get(bitmap, binfo, 0), "Bit should be unset");
107372-
107373-	/*
107374-	 * Iteratively set bits starting at the beginning, and verify that
107375-	 * bitmap_sfu() looks past them.
107376-	 */
107377-	for (size_t i = 1; i < nbits; i++) {
107378-		bitmap_set(bitmap, binfo, i - 1);
107379-		expect_zu_eq(bitmap_ffu(bitmap, binfo, 0), i,
107380-		    "First unset bit should be just after the bit previously "
107381-		    "set");
107382-		expect_zu_eq(bitmap_ffu(bitmap, binfo, (i > 0) ? i-1 : i), i,
107383-		    "First unset bit should be just after the bit previously "
107384-		    "set");
107385-		expect_zu_eq(bitmap_ffu(bitmap, binfo, i), i,
107386-		    "First unset bit should be just after the bit previously "
107387-		    "set");
107388-		expect_zu_eq(bitmap_sfu(bitmap, binfo), i,
107389-		    "First unset bit should be just after the bit previously "
107390-		    "set");
107391-		bitmap_unset(bitmap, binfo, i);
107392-	}
107393-	expect_zu_eq(bitmap_ffu(bitmap, binfo, 0), nbits - 1,
107394-	    "First unset bit should be the last bit");
107395-	expect_zu_eq(bitmap_ffu(bitmap, binfo, (nbits > 1) ? nbits-2 : nbits-1),
107396-	    nbits - 1, "First unset bit should be the last bit");
107397-	expect_zu_eq(bitmap_ffu(bitmap, binfo, nbits - 1), nbits - 1,
107398-	    "First unset bit should be the last bit");
107399-	expect_zu_eq(bitmap_sfu(bitmap, binfo), nbits - 1,
107400-	    "First unset bit should be the last bit");
107401-	expect_true(bitmap_full(bitmap, binfo), "All bits should be set");
107402-
107403-	/*
107404-	 * Bubble a "usu" pattern through the bitmap and verify that
107405-	 * bitmap_ffu() finds the correct bit for all five min_bit cases.
107406-	 */
107407-	if (nbits >= 3) {
107408-		for (size_t i = 0; i < nbits-2; i++) {
107409-			bitmap_unset(bitmap, binfo, i);
107410-			bitmap_unset(bitmap, binfo, i+2);
107411-			if (i > 0) {
107412-				expect_zu_eq(bitmap_ffu(bitmap, binfo, i-1), i,
107413-				    "Unexpected first unset bit");
107414-			}
107415-			expect_zu_eq(bitmap_ffu(bitmap, binfo, i), i,
107416-			    "Unexpected first unset bit");
107417-			expect_zu_eq(bitmap_ffu(bitmap, binfo, i+1), i+2,
107418-			    "Unexpected first unset bit");
107419-			expect_zu_eq(bitmap_ffu(bitmap, binfo, i+2), i+2,
107420-			    "Unexpected first unset bit");
107421-			if (i + 3 < nbits) {
107422-				expect_zu_eq(bitmap_ffu(bitmap, binfo, i+3),
107423-				    nbits, "Unexpected first unset bit");
107424-			}
107425-			expect_zu_eq(bitmap_sfu(bitmap, binfo), i,
107426-			    "Unexpected first unset bit");
107427-			expect_zu_eq(bitmap_sfu(bitmap, binfo), i+2,
107428-			    "Unexpected first unset bit");
107429-		}
107430-	}
107431-
107432-	/*
107433-	 * Unset the last bit, bubble another unset bit through the bitmap, and
107434-	 * verify that bitmap_ffu() finds the correct bit for all four min_bit
107435-	 * cases.
107436-	 */
107437-	if (nbits >= 3) {
107438-		bitmap_unset(bitmap, binfo, nbits-1);
107439-		for (size_t i = 0; i < nbits-1; i++) {
107440-			bitmap_unset(bitmap, binfo, i);
107441-			if (i > 0) {
107442-				expect_zu_eq(bitmap_ffu(bitmap, binfo, i-1), i,
107443-				    "Unexpected first unset bit");
107444-			}
107445-			expect_zu_eq(bitmap_ffu(bitmap, binfo, i), i,
107446-			    "Unexpected first unset bit");
107447-			expect_zu_eq(bitmap_ffu(bitmap, binfo, i+1), nbits-1,
107448-			    "Unexpected first unset bit");
107449-			expect_zu_eq(bitmap_ffu(bitmap, binfo, nbits-1),
107450-			    nbits-1, "Unexpected first unset bit");
107451-
107452-			expect_zu_eq(bitmap_sfu(bitmap, binfo), i,
107453-			    "Unexpected first unset bit");
107454-		}
107455-		expect_zu_eq(bitmap_sfu(bitmap, binfo), nbits-1,
107456-		    "Unexpected first unset bit");
107457-	}
107458-
107459-	free(bitmap);
107460-}
107461-
107462-TEST_BEGIN(test_bitmap_xfu) {
107463-	size_t nbits, nbits_max;
107464-
107465-	/* The test is O(n^2); large page sizes may slow down too much. */
107466-	nbits_max = BITMAP_MAXBITS > 512 ? 512 : BITMAP_MAXBITS;
107467-	for (nbits = 1; nbits <= nbits_max; nbits++) {
107468-		bitmap_info_t binfo;
107469-		bitmap_info_init(&binfo, nbits);
107470-		test_bitmap_xfu_body(&binfo, nbits);
107471-	}
107472-#define NB(nbits) {							\
107473-		bitmap_info_t binfo = BITMAP_INFO_INITIALIZER(nbits);	\
107474-		test_bitmap_xfu_body(&binfo, nbits);			\
107475-	}
107476-	NBITS_TAB
107477-#undef NB
107478-}
107479-TEST_END
107480-
107481-int
107482-main(void) {
107483-	return test(
107484-	    test_bitmap_initializer,
107485-	    test_bitmap_size,
107486-	    test_bitmap_init,
107487-	    test_bitmap_set,
107488-	    test_bitmap_unset,
107489-	    test_bitmap_xfu);
107490-}
107491diff --git a/jemalloc/test/unit/buf_writer.c b/jemalloc/test/unit/buf_writer.c
107492deleted file mode 100644
107493index d5e63a0..0000000
107494--- a/jemalloc/test/unit/buf_writer.c
107495+++ /dev/null
107496@@ -1,196 +0,0 @@
107497-#include "test/jemalloc_test.h"
107498-
107499-#include "jemalloc/internal/buf_writer.h"
107500-
107501-#define TEST_BUF_SIZE 16
107502-#define UNIT_MAX (TEST_BUF_SIZE * 3)
107503-
107504-static size_t test_write_len;
107505-static char test_buf[TEST_BUF_SIZE];
107506-static uint64_t arg;
107507-static uint64_t arg_store;
107508-
107509-static void
107510-test_write_cb(void *cbopaque, const char *s) {
107511-	size_t prev_test_write_len = test_write_len;
107512-	test_write_len += strlen(s); /* only increase the length */
107513-	arg_store = *(uint64_t *)cbopaque; /* only pass along the argument */
107514-	assert_zu_le(prev_test_write_len, test_write_len,
107515-	    "Test write overflowed");
107516-}
107517-
107518-static void
107519-test_buf_writer_body(tsdn_t *tsdn, buf_writer_t *buf_writer) {
107520-	char s[UNIT_MAX + 1];
107521-	size_t n_unit, remain, i;
107522-	ssize_t unit;
107523-
107524-	assert(buf_writer->buf != NULL);
107525-	memset(s, 'a', UNIT_MAX);
107526-	arg = 4; /* Starting value of random argument. */
107527-	arg_store = arg;
107528-	for (unit = UNIT_MAX; unit >= 0; --unit) {
107529-		/* unit keeps decreasing, so strlen(s) is always unit. */
107530-		s[unit] = '\0';
107531-		for (n_unit = 1; n_unit <= 3; ++n_unit) {
107532-			test_write_len = 0;
107533-			remain = 0;
107534-			for (i = 1; i <= n_unit; ++i) {
107535-				arg = prng_lg_range_u64(&arg, 64);
107536-				buf_writer_cb(buf_writer, s);
107537-				remain += unit;
107538-				if (remain > buf_writer->buf_size) {
107539-					/* Flushes should have happened. */
107540-					assert_u64_eq(arg_store, arg, "Call "
107541-					    "back argument didn't get through");
107542-					remain %= buf_writer->buf_size;
107543-					if (remain == 0) {
107544-						/* Last flush should be lazy. */
107545-						remain += buf_writer->buf_size;
107546-					}
107547-				}
107548-				assert_zu_eq(test_write_len + remain, i * unit,
107549-				    "Incorrect length after writing %zu strings"
107550-				    " of length %zu", i, unit);
107551-			}
107552-			buf_writer_flush(buf_writer);
107553-			expect_zu_eq(test_write_len, n_unit * unit,
107554-			    "Incorrect length after flushing at the end of"
107555-			    " writing %zu strings of length %zu", n_unit, unit);
107556-		}
107557-	}
107558-	buf_writer_terminate(tsdn, buf_writer);
107559-}
107560-
107561-TEST_BEGIN(test_buf_write_static) {
107562-	buf_writer_t buf_writer;
107563-	tsdn_t *tsdn = tsdn_fetch();
107564-	assert_false(buf_writer_init(tsdn, &buf_writer, test_write_cb, &arg,
107565-	    test_buf, TEST_BUF_SIZE),
107566-	    "buf_writer_init() should not encounter error on static buffer");
107567-	test_buf_writer_body(tsdn, &buf_writer);
107568-}
107569-TEST_END
107570-
107571-TEST_BEGIN(test_buf_write_dynamic) {
107572-	buf_writer_t buf_writer;
107573-	tsdn_t *tsdn = tsdn_fetch();
107574-	assert_false(buf_writer_init(tsdn, &buf_writer, test_write_cb, &arg,
107575-	    NULL, TEST_BUF_SIZE), "buf_writer_init() should not OOM");
107576-	test_buf_writer_body(tsdn, &buf_writer);
107577-}
107578-TEST_END
107579-
107580-TEST_BEGIN(test_buf_write_oom) {
107581-	buf_writer_t buf_writer;
107582-	tsdn_t *tsdn = tsdn_fetch();
107583-	assert_true(buf_writer_init(tsdn, &buf_writer, test_write_cb, &arg,
107584-	    NULL, SC_LARGE_MAXCLASS + 1), "buf_writer_init() should OOM");
107585-	assert(buf_writer.buf == NULL);
107586-
107587-	char s[UNIT_MAX + 1];
107588-	size_t n_unit, i;
107589-	ssize_t unit;
107590-
107591-	memset(s, 'a', UNIT_MAX);
107592-	arg = 4; /* Starting value of random argument. */
107593-	arg_store = arg;
107594-	for (unit = UNIT_MAX; unit >= 0; unit -= UNIT_MAX / 4) {
107595-		/* unit keeps decreasing, so strlen(s) is always unit. */
107596-		s[unit] = '\0';
107597-		for (n_unit = 1; n_unit <= 3; ++n_unit) {
107598-			test_write_len = 0;
107599-			for (i = 1; i <= n_unit; ++i) {
107600-				arg = prng_lg_range_u64(&arg, 64);
107601-				buf_writer_cb(&buf_writer, s);
107602-				assert_u64_eq(arg_store, arg,
107603-				    "Call back argument didn't get through");
107604-				assert_zu_eq(test_write_len, i * unit,
107605-				    "Incorrect length after writing %zu strings"
107606-				    " of length %zu", i, unit);
107607-			}
107608-			buf_writer_flush(&buf_writer);
107609-			expect_zu_eq(test_write_len, n_unit * unit,
107610-			    "Incorrect length after flushing at the end of"
107611-			    " writing %zu strings of length %zu", n_unit, unit);
107612-		}
107613-	}
107614-	buf_writer_terminate(tsdn, &buf_writer);
107615-}
107616-TEST_END
107617-
107618-static int test_read_count;
107619-static size_t test_read_len;
107620-static uint64_t arg_sum;
107621-
107622-ssize_t
107623-test_read_cb(void *cbopaque, void *buf, size_t limit) {
107624-	static uint64_t rand = 4;
107625-
107626-	arg_sum += *(uint64_t *)cbopaque;
107627-	assert_zu_gt(limit, 0, "Limit for read_cb must be positive");
107628-	--test_read_count;
107629-	if (test_read_count == 0) {
107630-		return -1;
107631-	} else {
107632-		size_t read_len = limit;
107633-		if (limit > 1) {
107634-			rand = prng_range_u64(&rand, (uint64_t)limit);
107635-			read_len -= (size_t)rand;
107636-		}
107637-		assert(read_len > 0);
107638-		memset(buf, 'a', read_len);
107639-		size_t prev_test_read_len = test_read_len;
107640-		test_read_len += read_len;
107641-		assert_zu_le(prev_test_read_len, test_read_len,
107642-		    "Test read overflowed");
107643-		return read_len;
107644-	}
107645-}
107646-
107647-static void
107648-test_buf_writer_pipe_body(tsdn_t *tsdn, buf_writer_t *buf_writer) {
107649-	arg = 4; /* Starting value of random argument. */
107650-	for (int count = 5; count > 0; --count) {
107651-		arg = prng_lg_range_u64(&arg, 64);
107652-		arg_sum = 0;
107653-		test_read_count = count;
107654-		test_read_len = 0;
107655-		test_write_len = 0;
107656-		buf_writer_pipe(buf_writer, test_read_cb, &arg);
107657-		assert(test_read_count == 0);
107658-		expect_u64_eq(arg_sum, arg * count, "");
107659-		expect_zu_eq(test_write_len, test_read_len,
107660-		    "Write length should be equal to read length");
107661-	}
107662-	buf_writer_terminate(tsdn, buf_writer);
107663-}
107664-
107665-TEST_BEGIN(test_buf_write_pipe) {
107666-	buf_writer_t buf_writer;
107667-	tsdn_t *tsdn = tsdn_fetch();
107668-	assert_false(buf_writer_init(tsdn, &buf_writer, test_write_cb, &arg,
107669-	    test_buf, TEST_BUF_SIZE),
107670-	    "buf_writer_init() should not encounter error on static buffer");
107671-	test_buf_writer_pipe_body(tsdn, &buf_writer);
107672-}
107673-TEST_END
107674-
107675-TEST_BEGIN(test_buf_write_pipe_oom) {
107676-	buf_writer_t buf_writer;
107677-	tsdn_t *tsdn = tsdn_fetch();
107678-	assert_true(buf_writer_init(tsdn, &buf_writer, test_write_cb, &arg,
107679-	    NULL, SC_LARGE_MAXCLASS + 1), "buf_writer_init() should OOM");
107680-	test_buf_writer_pipe_body(tsdn, &buf_writer);
107681-}
107682-TEST_END
107683-
107684-int
107685-main(void) {
107686-	return test(
107687-	    test_buf_write_static,
107688-	    test_buf_write_dynamic,
107689-	    test_buf_write_oom,
107690-	    test_buf_write_pipe,
107691-	    test_buf_write_pipe_oom);
107692-}
107693diff --git a/jemalloc/test/unit/cache_bin.c b/jemalloc/test/unit/cache_bin.c
107694deleted file mode 100644
107695index 3b6dbab..0000000
107696--- a/jemalloc/test/unit/cache_bin.c
107697+++ /dev/null
107698@@ -1,384 +0,0 @@
107699-#include "test/jemalloc_test.h"
107700-
107701-static void
107702-do_fill_test(cache_bin_t *bin, cache_bin_info_t *info, void **ptrs,
107703-    cache_bin_sz_t ncached_max, cache_bin_sz_t nfill_attempt,
107704-    cache_bin_sz_t nfill_succeed) {
107705-	bool success;
107706-	void *ptr;
107707-	assert_true(cache_bin_ncached_get_local(bin, info) == 0, "");
107708-	CACHE_BIN_PTR_ARRAY_DECLARE(arr, nfill_attempt);
107709-	cache_bin_init_ptr_array_for_fill(bin, info, &arr, nfill_attempt);
107710-	for (cache_bin_sz_t i = 0; i < nfill_succeed; i++) {
107711-		arr.ptr[i] = &ptrs[i];
107712-	}
107713-	cache_bin_finish_fill(bin, info, &arr, nfill_succeed);
107714-	expect_true(cache_bin_ncached_get_local(bin, info) == nfill_succeed,
107715-	    "");
107716-	cache_bin_low_water_set(bin);
107717-
107718-	for (cache_bin_sz_t i = 0; i < nfill_succeed; i++) {
107719-		ptr = cache_bin_alloc(bin, &success);
107720-		expect_true(success, "");
107721-		expect_ptr_eq(ptr, (void *)&ptrs[i],
107722-		    "Should pop in order filled");
107723-		expect_true(cache_bin_low_water_get(bin, info)
107724-		    == nfill_succeed - i - 1, "");
107725-	}
107726-	expect_true(cache_bin_ncached_get_local(bin, info) == 0, "");
107727-	expect_true(cache_bin_low_water_get(bin, info) == 0, "");
107728-}
107729-
107730-static void
107731-do_flush_test(cache_bin_t *bin, cache_bin_info_t *info, void **ptrs,
107732-    cache_bin_sz_t nfill, cache_bin_sz_t nflush) {
107733-	bool success;
107734-	assert_true(cache_bin_ncached_get_local(bin, info) == 0, "");
107735-
107736-	for (cache_bin_sz_t i = 0; i < nfill; i++) {
107737-		success = cache_bin_dalloc_easy(bin, &ptrs[i]);
107738-		expect_true(success, "");
107739-	}
107740-
107741-	CACHE_BIN_PTR_ARRAY_DECLARE(arr, nflush);
107742-	cache_bin_init_ptr_array_for_flush(bin, info, &arr, nflush);
107743-	for (cache_bin_sz_t i = 0; i < nflush; i++) {
107744-		expect_ptr_eq(arr.ptr[i], &ptrs[nflush - i - 1], "");
107745-	}
107746-	cache_bin_finish_flush(bin, info, &arr, nflush);
107747-
107748-	expect_true(cache_bin_ncached_get_local(bin, info) == nfill - nflush,
107749-	    "");
107750-	while (cache_bin_ncached_get_local(bin, info) > 0) {
107751-		cache_bin_alloc(bin, &success);
107752-	}
107753-}
107754-
107755-static void
107756-do_batch_alloc_test(cache_bin_t *bin, cache_bin_info_t *info, void **ptrs,
107757-    cache_bin_sz_t nfill, size_t batch) {
107758-	assert_true(cache_bin_ncached_get_local(bin, info) == 0, "");
107759-	CACHE_BIN_PTR_ARRAY_DECLARE(arr, nfill);
107760-	cache_bin_init_ptr_array_for_fill(bin, info, &arr, nfill);
107761-	for (cache_bin_sz_t i = 0; i < nfill; i++) {
107762-		arr.ptr[i] = &ptrs[i];
107763-	}
107764-	cache_bin_finish_fill(bin, info, &arr, nfill);
107765-	assert_true(cache_bin_ncached_get_local(bin, info) == nfill, "");
107766-	cache_bin_low_water_set(bin);
107767-
107768-	void **out = malloc((batch + 1) * sizeof(void *));
107769-	size_t n = cache_bin_alloc_batch(bin, batch, out);
107770-	assert_true(n == ((size_t)nfill < batch ? (size_t)nfill : batch), "");
107771-	for (cache_bin_sz_t i = 0; i < (cache_bin_sz_t)n; i++) {
107772-		expect_ptr_eq(out[i], &ptrs[i], "");
107773-	}
107774-	expect_true(cache_bin_low_water_get(bin, info) == nfill -
107775-	    (cache_bin_sz_t)n, "");
107776-	while (cache_bin_ncached_get_local(bin, info) > 0) {
107777-		bool success;
107778-		cache_bin_alloc(bin, &success);
107779-	}
107780-	free(out);
107781-}
107782-
107783-static void
107784-test_bin_init(cache_bin_t *bin, cache_bin_info_t *info) {
107785-	size_t size;
107786-	size_t alignment;
107787-	cache_bin_info_compute_alloc(info, 1, &size, &alignment);
107788-	void *mem = mallocx(size, MALLOCX_ALIGN(alignment));
107789-	assert_ptr_not_null(mem, "Unexpected mallocx failure");
107790-
107791-	size_t cur_offset = 0;
107792-	cache_bin_preincrement(info, 1, mem, &cur_offset);
107793-	cache_bin_init(bin, info, mem, &cur_offset);
107794-	cache_bin_postincrement(info, 1, mem, &cur_offset);
107795-	assert_zu_eq(cur_offset, size, "Should use all requested memory");
107796-}
107797-
107798-TEST_BEGIN(test_cache_bin) {
107799-	const int ncached_max = 100;
107800-	bool success;
107801-	void *ptr;
107802-
107803-	cache_bin_info_t info;
107804-	cache_bin_info_init(&info, ncached_max);
107805-	cache_bin_t bin;
107806-	test_bin_init(&bin, &info);
107807-
107808-	/* Initialize to empty; should then have 0 elements. */
107809-	expect_d_eq(ncached_max, cache_bin_info_ncached_max(&info), "");
107810-	expect_true(cache_bin_ncached_get_local(&bin, &info) == 0, "");
107811-	expect_true(cache_bin_low_water_get(&bin, &info) == 0, "");
107812-
107813-	ptr = cache_bin_alloc_easy(&bin, &success);
107814-	expect_false(success, "Shouldn't successfully allocate when empty");
107815-	expect_ptr_null(ptr, "Shouldn't get a non-null pointer on failure");
107816-
107817-	ptr = cache_bin_alloc(&bin, &success);
107818-	expect_false(success, "Shouldn't successfully allocate when empty");
107819-	expect_ptr_null(ptr, "Shouldn't get a non-null pointer on failure");
107820-
107821-	/*
107822-	 * We allocate one more item than ncached_max, so we can test cache bin
107823-	 * exhaustion.
107824-	 */
107825-	void **ptrs = mallocx(sizeof(void *) * (ncached_max + 1), 0);
107826-	assert_ptr_not_null(ptrs, "Unexpected mallocx failure");
107827-	for  (cache_bin_sz_t i = 0; i < ncached_max; i++) {
107828-		expect_true(cache_bin_ncached_get_local(&bin, &info) == i, "");
107829-		success = cache_bin_dalloc_easy(&bin, &ptrs[i]);
107830-		expect_true(success,
107831-		    "Should be able to dalloc into a non-full cache bin.");
107832-		expect_true(cache_bin_low_water_get(&bin, &info) == 0,
107833-		    "Pushes and pops shouldn't change low water of zero.");
107834-	}
107835-	expect_true(cache_bin_ncached_get_local(&bin, &info) == ncached_max,
107836-	    "");
107837-	success = cache_bin_dalloc_easy(&bin, &ptrs[ncached_max]);
107838-	expect_false(success, "Shouldn't be able to dalloc into a full bin.");
107839-
107840-	cache_bin_low_water_set(&bin);
107841-
107842-	for (cache_bin_sz_t i = 0; i < ncached_max; i++) {
107843-		expect_true(cache_bin_low_water_get(&bin, &info)
107844-		    == ncached_max - i, "");
107845-		expect_true(cache_bin_ncached_get_local(&bin, &info)
107846-		    == ncached_max - i, "");
107847-		/*
107848-		 * This should fail -- the easy variant can't change the low
107849-		 * water mark.
107850-		 */
107851-		ptr = cache_bin_alloc_easy(&bin, &success);
107852-		expect_ptr_null(ptr, "");
107853-		expect_false(success, "");
107854-		expect_true(cache_bin_low_water_get(&bin, &info)
107855-		    == ncached_max - i, "");
107856-		expect_true(cache_bin_ncached_get_local(&bin, &info)
107857-		    == ncached_max - i, "");
107858-
107859-		/* This should succeed, though. */
107860-		ptr = cache_bin_alloc(&bin, &success);
107861-		expect_true(success, "");
107862-		expect_ptr_eq(ptr, &ptrs[ncached_max - i - 1],
107863-		    "Alloc should pop in stack order");
107864-		expect_true(cache_bin_low_water_get(&bin, &info)
107865-		    == ncached_max - i - 1, "");
107866-		expect_true(cache_bin_ncached_get_local(&bin, &info)
107867-		    == ncached_max - i - 1, "");
107868-	}
107869-	/* Now we're empty -- all alloc attempts should fail. */
107870-	expect_true(cache_bin_ncached_get_local(&bin, &info) == 0, "");
107871-	ptr = cache_bin_alloc_easy(&bin, &success);
107872-	expect_ptr_null(ptr, "");
107873-	expect_false(success, "");
107874-	ptr = cache_bin_alloc(&bin, &success);
107875-	expect_ptr_null(ptr, "");
107876-	expect_false(success, "");
107877-
107878-	for (cache_bin_sz_t i = 0; i < ncached_max / 2; i++) {
107879-		cache_bin_dalloc_easy(&bin, &ptrs[i]);
107880-	}
107881-	cache_bin_low_water_set(&bin);
107882-
107883-	for (cache_bin_sz_t i = ncached_max / 2; i < ncached_max; i++) {
107884-		cache_bin_dalloc_easy(&bin, &ptrs[i]);
107885-	}
107886-	expect_true(cache_bin_ncached_get_local(&bin, &info) == ncached_max,
107887-	    "");
107888-	for (cache_bin_sz_t i = ncached_max - 1; i >= ncached_max / 2; i--) {
107889-		/*
107890-		 * Size is bigger than low water -- the reduced version should
107891-		 * succeed.
107892-		 */
107893-		ptr = cache_bin_alloc_easy(&bin, &success);
107894-		expect_true(success, "");
107895-		expect_ptr_eq(ptr, &ptrs[i], "");
107896-	}
107897-	/* But now, we've hit low-water. */
107898-	ptr = cache_bin_alloc_easy(&bin, &success);
107899-	expect_false(success, "");
107900-	expect_ptr_null(ptr, "");
107901-
107902-	/* We're going to test filling -- we must be empty to start. */
107903-	while (cache_bin_ncached_get_local(&bin, &info)) {
107904-		cache_bin_alloc(&bin, &success);
107905-		expect_true(success, "");
107906-	}
107907-
107908-	/* Test fill. */
107909-	/* Try to fill all, succeed fully. */
107910-	do_fill_test(&bin, &info, ptrs, ncached_max, ncached_max, ncached_max);
107911-	/* Try to fill all, succeed partially. */
107912-	do_fill_test(&bin, &info, ptrs, ncached_max, ncached_max,
107913-	    ncached_max / 2);
107914-	/* Try to fill all, fail completely. */
107915-	do_fill_test(&bin, &info, ptrs, ncached_max, ncached_max, 0);
107916-
107917-	/* Try to fill some, succeed fully. */
107918-	do_fill_test(&bin, &info, ptrs, ncached_max, ncached_max / 2,
107919-	    ncached_max / 2);
107920-	/* Try to fill some, succeed partially. */
107921-	do_fill_test(&bin, &info, ptrs, ncached_max, ncached_max / 2,
107922-	    ncached_max / 4);
107923-	/* Try to fill some, fail completely. */
107924-	do_fill_test(&bin, &info, ptrs, ncached_max, ncached_max / 2, 0);
107925-
107926-	do_flush_test(&bin, &info, ptrs, ncached_max, ncached_max);
107927-	do_flush_test(&bin, &info, ptrs, ncached_max, ncached_max / 2);
107928-	do_flush_test(&bin, &info, ptrs, ncached_max, 0);
107929-	do_flush_test(&bin, &info, ptrs, ncached_max / 2, ncached_max / 2);
107930-	do_flush_test(&bin, &info, ptrs, ncached_max / 2, ncached_max / 4);
107931-	do_flush_test(&bin, &info, ptrs, ncached_max / 2, 0);
107932-
107933-	do_batch_alloc_test(&bin, &info, ptrs, ncached_max, ncached_max);
107934-	do_batch_alloc_test(&bin, &info, ptrs, ncached_max, ncached_max * 2);
107935-	do_batch_alloc_test(&bin, &info, ptrs, ncached_max, ncached_max / 2);
107936-	do_batch_alloc_test(&bin, &info, ptrs, ncached_max, 2);
107937-	do_batch_alloc_test(&bin, &info, ptrs, ncached_max, 1);
107938-	do_batch_alloc_test(&bin, &info, ptrs, ncached_max, 0);
107939-	do_batch_alloc_test(&bin, &info, ptrs, ncached_max / 2,
107940-	    ncached_max / 2);
107941-	do_batch_alloc_test(&bin, &info, ptrs, ncached_max / 2, ncached_max);
107942-	do_batch_alloc_test(&bin, &info, ptrs, ncached_max / 2,
107943-	    ncached_max / 4);
107944-	do_batch_alloc_test(&bin, &info, ptrs, ncached_max / 2, 2);
107945-	do_batch_alloc_test(&bin, &info, ptrs, ncached_max / 2, 1);
107946-	do_batch_alloc_test(&bin, &info, ptrs, ncached_max / 2, 0);
107947-	do_batch_alloc_test(&bin, &info, ptrs, 2, ncached_max);
107948-	do_batch_alloc_test(&bin, &info, ptrs, 2, 2);
107949-	do_batch_alloc_test(&bin, &info, ptrs, 2, 1);
107950-	do_batch_alloc_test(&bin, &info, ptrs, 2, 0);
107951-	do_batch_alloc_test(&bin, &info, ptrs, 1, 2);
107952-	do_batch_alloc_test(&bin, &info, ptrs, 1, 1);
107953-	do_batch_alloc_test(&bin, &info, ptrs, 1, 0);
107954-	do_batch_alloc_test(&bin, &info, ptrs, 0, 2);
107955-	do_batch_alloc_test(&bin, &info, ptrs, 0, 1);
107956-	do_batch_alloc_test(&bin, &info, ptrs, 0, 0);
107957-
107958-	free(ptrs);
107959-}
107960-TEST_END
107961-
107962-static void
107963-do_flush_stashed_test(cache_bin_t *bin, cache_bin_info_t *info, void **ptrs,
107964-    cache_bin_sz_t nfill, cache_bin_sz_t nstash) {
107965-	expect_true(cache_bin_ncached_get_local(bin, info) == 0,
107966-	    "Bin not empty");
107967-	expect_true(cache_bin_nstashed_get_local(bin, info) == 0,
107968-	    "Bin not empty");
107969-	expect_true(nfill + nstash <= info->ncached_max, "Exceeded max");
107970-
107971-	bool ret;
107972-	/* Fill */
107973-	for (cache_bin_sz_t i = 0; i < nfill; i++) {
107974-		ret = cache_bin_dalloc_easy(bin, &ptrs[i]);
107975-		expect_true(ret, "Unexpected fill failure");
107976-	}
107977-	expect_true(cache_bin_ncached_get_local(bin, info) == nfill,
107978-	    "Wrong cached count");
107979-
107980-	/* Stash */
107981-	for (cache_bin_sz_t i = 0; i < nstash; i++) {
107982-		ret = cache_bin_stash(bin, &ptrs[i + nfill]);
107983-		expect_true(ret, "Unexpected stash failure");
107984-	}
107985-	expect_true(cache_bin_nstashed_get_local(bin, info) == nstash,
107986-	    "Wrong stashed count");
107987-
107988-	if (nfill + nstash == info->ncached_max) {
107989-		ret = cache_bin_dalloc_easy(bin, &ptrs[0]);
107990-		expect_false(ret, "Should not dalloc into a full bin");
107991-		ret = cache_bin_stash(bin, &ptrs[0]);
107992-		expect_false(ret, "Should not stash into a full bin");
107993-	}
107994-
107995-	/* Alloc filled ones */
107996-	for (cache_bin_sz_t i = 0; i < nfill; i++) {
107997-		void *ptr = cache_bin_alloc(bin, &ret);
107998-		expect_true(ret, "Unexpected alloc failure");
107999-		/* Verify it's not from the stashed range. */
108000-		expect_true((uintptr_t)ptr < (uintptr_t)&ptrs[nfill],
108001-		    "Should not alloc stashed ptrs");
108002-	}
108003-	expect_true(cache_bin_ncached_get_local(bin, info) == 0,
108004-	    "Wrong cached count");
108005-	expect_true(cache_bin_nstashed_get_local(bin, info) == nstash,
108006-	    "Wrong stashed count");
108007-
108008-	cache_bin_alloc(bin, &ret);
108009-	expect_false(ret, "Should not alloc stashed");
108010-
108011-	/* Clear stashed ones */
108012-	cache_bin_finish_flush_stashed(bin, info);
108013-	expect_true(cache_bin_ncached_get_local(bin, info) == 0,
108014-	    "Wrong cached count");
108015-	expect_true(cache_bin_nstashed_get_local(bin, info) == 0,
108016-	    "Wrong stashed count");
108017-
108018-	cache_bin_alloc(bin, &ret);
108019-	expect_false(ret, "Should not alloc from empty bin");
108020-}
108021-
108022-TEST_BEGIN(test_cache_bin_stash) {
108023-	const int ncached_max = 100;
108024-
108025-	cache_bin_t bin;
108026-	cache_bin_info_t info;
108027-	cache_bin_info_init(&info, ncached_max);
108028-	test_bin_init(&bin, &info);
108029-
108030-	/*
108031-	 * The content of this array is not accessed; instead the interior
108032-	 * addresses are used to insert / stash into the bins as test pointers.
108033-	 */
108034-	void **ptrs = mallocx(sizeof(void *) * (ncached_max + 1), 0);
108035-	assert_ptr_not_null(ptrs, "Unexpected mallocx failure");
108036-	bool ret;
108037-	for (cache_bin_sz_t i = 0; i < ncached_max; i++) {
108038-		expect_true(cache_bin_ncached_get_local(&bin, &info) ==
108039-		    (i / 2 + i % 2), "Wrong ncached value");
108040-		expect_true(cache_bin_nstashed_get_local(&bin, &info) == i / 2,
108041-		    "Wrong nstashed value");
108042-		if (i % 2 == 0) {
108043-			cache_bin_dalloc_easy(&bin, &ptrs[i]);
108044-		} else {
108045-			ret = cache_bin_stash(&bin, &ptrs[i]);
108046-			expect_true(ret, "Should be able to stash into a "
108047-			    "non-full cache bin");
108048-		}
108049-	}
108050-	ret = cache_bin_dalloc_easy(&bin, &ptrs[0]);
108051-	expect_false(ret, "Should not dalloc into a full cache bin");
108052-	ret = cache_bin_stash(&bin, &ptrs[0]);
108053-	expect_false(ret, "Should not stash into a full cache bin");
108054-	for (cache_bin_sz_t i = 0; i < ncached_max; i++) {
108055-		void *ptr = cache_bin_alloc(&bin, &ret);
108056-		if (i < ncached_max / 2) {
108057-			expect_true(ret, "Should be able to alloc");
108058-			uintptr_t diff = ((uintptr_t)ptr - (uintptr_t)&ptrs[0])
108059-			    / sizeof(void *);
108060-			expect_true(diff % 2 == 0, "Should be able to alloc");
108061-		} else {
108062-			expect_false(ret, "Should not alloc stashed");
108063-			expect_true(cache_bin_nstashed_get_local(&bin, &info) ==
108064-			    ncached_max / 2, "Wrong nstashed value");
108065-		}
108066-	}
108067-
108068-	test_bin_init(&bin, &info);
108069-	do_flush_stashed_test(&bin, &info, ptrs, ncached_max, 0);
108070-	do_flush_stashed_test(&bin, &info, ptrs, 0, ncached_max);
108071-	do_flush_stashed_test(&bin, &info, ptrs, ncached_max / 2, ncached_max / 2);
108072-	do_flush_stashed_test(&bin, &info, ptrs, ncached_max / 4, ncached_max / 2);
108073-	do_flush_stashed_test(&bin, &info, ptrs, ncached_max / 2, ncached_max / 4);
108074-	do_flush_stashed_test(&bin, &info, ptrs, ncached_max / 4, ncached_max / 4);
108075-}
108076-TEST_END
108077-
108078-int
108079-main(void) {
108080-	return test(test_cache_bin,
108081-		test_cache_bin_stash);
108082-}
108083diff --git a/jemalloc/test/unit/ckh.c b/jemalloc/test/unit/ckh.c
108084deleted file mode 100644
108085index 36142ac..0000000
108086--- a/jemalloc/test/unit/ckh.c
108087+++ /dev/null
108088@@ -1,211 +0,0 @@
108089-#include "test/jemalloc_test.h"
108090-
108091-TEST_BEGIN(test_new_delete) {
108092-	tsd_t *tsd;
108093-	ckh_t ckh;
108094-
108095-	tsd = tsd_fetch();
108096-
108097-	expect_false(ckh_new(tsd, &ckh, 2, ckh_string_hash,
108098-	    ckh_string_keycomp), "Unexpected ckh_new() error");
108099-	ckh_delete(tsd, &ckh);
108100-
108101-	expect_false(ckh_new(tsd, &ckh, 3, ckh_pointer_hash,
108102-	    ckh_pointer_keycomp), "Unexpected ckh_new() error");
108103-	ckh_delete(tsd, &ckh);
108104-}
108105-TEST_END
108106-
108107-TEST_BEGIN(test_count_insert_search_remove) {
108108-	tsd_t *tsd;
108109-	ckh_t ckh;
108110-	const char *strs[] = {
108111-	    "a string",
108112-	    "A string",
108113-	    "a string.",
108114-	    "A string."
108115-	};
108116-	const char *missing = "A string not in the hash table.";
108117-	size_t i;
108118-
108119-	tsd = tsd_fetch();
108120-
108121-	expect_false(ckh_new(tsd, &ckh, 2, ckh_string_hash,
108122-	    ckh_string_keycomp), "Unexpected ckh_new() error");
108123-	expect_zu_eq(ckh_count(&ckh), 0,
108124-	    "ckh_count() should return %zu, but it returned %zu", ZU(0),
108125-	    ckh_count(&ckh));
108126-
108127-	/* Insert. */
108128-	for (i = 0; i < sizeof(strs)/sizeof(const char *); i++) {
108129-		ckh_insert(tsd, &ckh, strs[i], strs[i]);
108130-		expect_zu_eq(ckh_count(&ckh), i+1,
108131-		    "ckh_count() should return %zu, but it returned %zu", i+1,
108132-		    ckh_count(&ckh));
108133-	}
108134-
108135-	/* Search. */
108136-	for (i = 0; i < sizeof(strs)/sizeof(const char *); i++) {
108137-		union {
108138-			void *p;
108139-			const char *s;
108140-		} k, v;
108141-		void **kp, **vp;
108142-		const char *ks, *vs;
108143-
108144-		kp = (i & 1) ? &k.p : NULL;
108145-		vp = (i & 2) ? &v.p : NULL;
108146-		k.p = NULL;
108147-		v.p = NULL;
108148-		expect_false(ckh_search(&ckh, strs[i], kp, vp),
108149-		    "Unexpected ckh_search() error");
108150-
108151-		ks = (i & 1) ? strs[i] : (const char *)NULL;
108152-		vs = (i & 2) ? strs[i] : (const char *)NULL;
108153-		expect_ptr_eq((void *)ks, (void *)k.s, "Key mismatch, i=%zu",
108154-		    i);
108155-		expect_ptr_eq((void *)vs, (void *)v.s, "Value mismatch, i=%zu",
108156-		    i);
108157-	}
108158-	expect_true(ckh_search(&ckh, missing, NULL, NULL),
108159-	    "Unexpected ckh_search() success");
108160-
108161-	/* Remove. */
108162-	for (i = 0; i < sizeof(strs)/sizeof(const char *); i++) {
108163-		union {
108164-			void *p;
108165-			const char *s;
108166-		} k, v;
108167-		void **kp, **vp;
108168-		const char *ks, *vs;
108169-
108170-		kp = (i & 1) ? &k.p : NULL;
108171-		vp = (i & 2) ? &v.p : NULL;
108172-		k.p = NULL;
108173-		v.p = NULL;
108174-		expect_false(ckh_remove(tsd, &ckh, strs[i], kp, vp),
108175-		    "Unexpected ckh_remove() error");
108176-
108177-		ks = (i & 1) ? strs[i] : (const char *)NULL;
108178-		vs = (i & 2) ? strs[i] : (const char *)NULL;
108179-		expect_ptr_eq((void *)ks, (void *)k.s, "Key mismatch, i=%zu",
108180-		    i);
108181-		expect_ptr_eq((void *)vs, (void *)v.s, "Value mismatch, i=%zu",
108182-		    i);
108183-		expect_zu_eq(ckh_count(&ckh),
108184-		    sizeof(strs)/sizeof(const char *) - i - 1,
108185-		    "ckh_count() should return %zu, but it returned %zu",
108186-		        sizeof(strs)/sizeof(const char *) - i - 1,
108187-		    ckh_count(&ckh));
108188-	}
108189-
108190-	ckh_delete(tsd, &ckh);
108191-}
108192-TEST_END
108193-
108194-TEST_BEGIN(test_insert_iter_remove) {
108195-#define NITEMS ZU(1000)
108196-	tsd_t *tsd;
108197-	ckh_t ckh;
108198-	void **p[NITEMS];
108199-	void *q, *r;
108200-	size_t i;
108201-
108202-	tsd = tsd_fetch();
108203-
108204-	expect_false(ckh_new(tsd, &ckh, 2, ckh_pointer_hash,
108205-	    ckh_pointer_keycomp), "Unexpected ckh_new() error");
108206-
108207-	for (i = 0; i < NITEMS; i++) {
108208-		p[i] = mallocx(i+1, 0);
108209-		expect_ptr_not_null(p[i], "Unexpected mallocx() failure");
108210-	}
108211-
108212-	for (i = 0; i < NITEMS; i++) {
108213-		size_t j;
108214-
108215-		for (j = i; j < NITEMS; j++) {
108216-			expect_false(ckh_insert(tsd, &ckh, p[j], p[j]),
108217-			    "Unexpected ckh_insert() failure");
108218-			expect_false(ckh_search(&ckh, p[j], &q, &r),
108219-			    "Unexpected ckh_search() failure");
108220-			expect_ptr_eq(p[j], q, "Key pointer mismatch");
108221-			expect_ptr_eq(p[j], r, "Value pointer mismatch");
108222-		}
108223-
108224-		expect_zu_eq(ckh_count(&ckh), NITEMS,
108225-		    "ckh_count() should return %zu, but it returned %zu",
108226-		    NITEMS, ckh_count(&ckh));
108227-
108228-		for (j = i + 1; j < NITEMS; j++) {
108229-			expect_false(ckh_search(&ckh, p[j], NULL, NULL),
108230-			    "Unexpected ckh_search() failure");
108231-			expect_false(ckh_remove(tsd, &ckh, p[j], &q, &r),
108232-			    "Unexpected ckh_remove() failure");
108233-			expect_ptr_eq(p[j], q, "Key pointer mismatch");
108234-			expect_ptr_eq(p[j], r, "Value pointer mismatch");
108235-			expect_true(ckh_search(&ckh, p[j], NULL, NULL),
108236-			    "Unexpected ckh_search() success");
108237-			expect_true(ckh_remove(tsd, &ckh, p[j], &q, &r),
108238-			    "Unexpected ckh_remove() success");
108239-		}
108240-
108241-		{
108242-			bool seen[NITEMS];
108243-			size_t tabind;
108244-
108245-			memset(seen, 0, sizeof(seen));
108246-
108247-			for (tabind = 0; !ckh_iter(&ckh, &tabind, &q, &r);) {
108248-				size_t k;
108249-
108250-				expect_ptr_eq(q, r, "Key and val not equal");
108251-
108252-				for (k = 0; k < NITEMS; k++) {
108253-					if (p[k] == q) {
108254-						expect_false(seen[k],
108255-						    "Item %zu already seen", k);
108256-						seen[k] = true;
108257-						break;
108258-					}
108259-				}
108260-			}
108261-
108262-			for (j = 0; j < i + 1; j++) {
108263-				expect_true(seen[j], "Item %zu not seen", j);
108264-			}
108265-			for (; j < NITEMS; j++) {
108266-				expect_false(seen[j], "Item %zu seen", j);
108267-			}
108268-		}
108269-	}
108270-
108271-	for (i = 0; i < NITEMS; i++) {
108272-		expect_false(ckh_search(&ckh, p[i], NULL, NULL),
108273-		    "Unexpected ckh_search() failure");
108274-		expect_false(ckh_remove(tsd, &ckh, p[i], &q, &r),
108275-		    "Unexpected ckh_remove() failure");
108276-		expect_ptr_eq(p[i], q, "Key pointer mismatch");
108277-		expect_ptr_eq(p[i], r, "Value pointer mismatch");
108278-		expect_true(ckh_search(&ckh, p[i], NULL, NULL),
108279-		    "Unexpected ckh_search() success");
108280-		expect_true(ckh_remove(tsd, &ckh, p[i], &q, &r),
108281-		    "Unexpected ckh_remove() success");
108282-		dallocx(p[i], 0);
108283-	}
108284-
108285-	expect_zu_eq(ckh_count(&ckh), 0,
108286-	    "ckh_count() should return %zu, but it returned %zu",
108287-	    ZU(0), ckh_count(&ckh));
108288-	ckh_delete(tsd, &ckh);
108289-#undef NITEMS
108290-}
108291-TEST_END
108292-
108293-int
108294-main(void) {
108295-	return test(
108296-	    test_new_delete,
108297-	    test_count_insert_search_remove,
108298-	    test_insert_iter_remove);
108299-}
108300diff --git a/jemalloc/test/unit/counter.c b/jemalloc/test/unit/counter.c
108301deleted file mode 100644
108302index 277baac..0000000
108303--- a/jemalloc/test/unit/counter.c
108304+++ /dev/null
108305@@ -1,80 +0,0 @@
108306-#include "test/jemalloc_test.h"
108307-
108308-static const uint64_t interval = 1 << 20;
108309-
108310-TEST_BEGIN(test_counter_accum) {
108311-	uint64_t increment = interval >> 4;
108312-	unsigned n = interval / increment;
108313-	uint64_t accum = 0;
108314-
108315-	counter_accum_t c;
108316-	counter_accum_init(&c, interval);
108317-
108318-	tsd_t *tsd = tsd_fetch();
108319-	bool trigger;
108320-	for (unsigned i = 0; i < n; i++) {
108321-		trigger = counter_accum(tsd_tsdn(tsd), &c, increment);
108322-		accum += increment;
108323-		if (accum < interval) {
108324-			expect_b_eq(trigger, false, "Should not trigger");
108325-		} else {
108326-			expect_b_eq(trigger, true, "Should have triggered");
108327-		}
108328-	}
108329-	expect_b_eq(trigger, true, "Should have triggered");
108330-}
108331-TEST_END
108332-
108333-void
108334-expect_counter_value(counter_accum_t *c, uint64_t v) {
108335-	uint64_t accum = locked_read_u64_unsynchronized(&c->accumbytes);
108336-	expect_u64_eq(accum, v, "Counter value mismatch");
108337-}
108338-
108339-#define N_THDS (16)
108340-#define N_ITER_THD (1 << 12)
108341-#define ITER_INCREMENT (interval >> 4)
108342-
108343-static void *
108344-thd_start(void *varg) {
108345-	counter_accum_t *c = (counter_accum_t *)varg;
108346-
108347-	tsd_t *tsd = tsd_fetch();
108348-	bool trigger;
108349-	uintptr_t n_triggered = 0;
108350-	for (unsigned i = 0; i < N_ITER_THD; i++) {
108351-		trigger = counter_accum(tsd_tsdn(tsd), c, ITER_INCREMENT);
108352-		n_triggered += trigger ? 1 : 0;
108353-	}
108354-
108355-	return (void *)n_triggered;
108356-}
108357-
108358-
108359-TEST_BEGIN(test_counter_mt) {
108360-	counter_accum_t shared_c;
108361-	counter_accum_init(&shared_c, interval);
108362-
108363-	thd_t thds[N_THDS];
108364-	unsigned i;
108365-	for (i = 0; i < N_THDS; i++) {
108366-		thd_create(&thds[i], thd_start, (void *)&shared_c);
108367-	}
108368-
108369-	uint64_t sum = 0;
108370-	for (i = 0; i < N_THDS; i++) {
108371-		void *ret;
108372-		thd_join(thds[i], &ret);
108373-		sum += (uintptr_t)ret;
108374-	}
108375-	expect_u64_eq(sum, N_THDS * N_ITER_THD / (interval / ITER_INCREMENT),
108376-	    "Incorrect number of triggers");
108377-}
108378-TEST_END
108379-
108380-int
108381-main(void) {
108382-	return test(
108383-	    test_counter_accum,
108384-	    test_counter_mt);
108385-}
108386diff --git a/jemalloc/test/unit/decay.c b/jemalloc/test/unit/decay.c
108387deleted file mode 100644
108388index bdb6d0a..0000000
108389--- a/jemalloc/test/unit/decay.c
108390+++ /dev/null
108391@@ -1,283 +0,0 @@
108392-#include "test/jemalloc_test.h"
108393-
108394-#include "jemalloc/internal/decay.h"
108395-
108396-TEST_BEGIN(test_decay_init) {
108397-	decay_t decay;
108398-	memset(&decay, 0, sizeof(decay));
108399-
108400-	nstime_t curtime;
108401-	nstime_init(&curtime, 0);
108402-
108403-	ssize_t decay_ms = 1000;
108404-	assert_true(decay_ms_valid(decay_ms), "");
108405-
108406-	expect_false(decay_init(&decay, &curtime, decay_ms),
108407-	    "Failed to initialize decay");
108408-	expect_zd_eq(decay_ms_read(&decay), decay_ms,
108409-	    "Decay_ms was initialized incorrectly");
108410-	expect_u64_ne(decay_epoch_duration_ns(&decay), 0,
108411-	    "Epoch duration was initialized incorrectly");
108412-}
108413-TEST_END
108414-
108415-TEST_BEGIN(test_decay_ms_valid) {
108416-	expect_false(decay_ms_valid(-7),
108417-	    "Misclassified negative decay as valid");
108418-	expect_true(decay_ms_valid(-1),
108419-	    "Misclassified -1 (never decay) as invalid decay");
108420-	expect_true(decay_ms_valid(8943),
108421-	    "Misclassified valid decay");
108422-	if (SSIZE_MAX > NSTIME_SEC_MAX) {
108423-		expect_false(
108424-		    decay_ms_valid((ssize_t)(NSTIME_SEC_MAX * KQU(1000) + 39)),
108425-		    "Misclassified too large decay");
108426-	}
108427-}
108428-TEST_END
108429-
108430-TEST_BEGIN(test_decay_npages_purge_in) {
108431-	decay_t decay;
108432-	memset(&decay, 0, sizeof(decay));
108433-
108434-	nstime_t curtime;
108435-	nstime_init(&curtime, 0);
108436-
108437-	uint64_t decay_ms = 1000;
108438-	nstime_t decay_nstime;
108439-	nstime_init(&decay_nstime, decay_ms * 1000 * 1000);
108440-	expect_false(decay_init(&decay, &curtime, (ssize_t)decay_ms),
108441-	    "Failed to initialize decay");
108442-
108443-	size_t new_pages = 100;
108444-
108445-	nstime_t time;
108446-	nstime_copy(&time, &decay_nstime);
108447-	expect_u64_eq(decay_npages_purge_in(&decay, &time, new_pages),
108448-	    new_pages, "Not all pages are expected to decay in decay_ms");
108449-
108450-	nstime_init(&time, 0);
108451-	expect_u64_eq(decay_npages_purge_in(&decay, &time, new_pages), 0,
108452-	    "More than zero pages are expected to instantly decay");
108453-
108454-	nstime_copy(&time, &decay_nstime);
108455-	nstime_idivide(&time, 2);
108456-	expect_u64_eq(decay_npages_purge_in(&decay, &time, new_pages),
108457-	    new_pages / 2, "Not half of pages decay in half the decay period");
108458-}
108459-TEST_END
108460-
108461-TEST_BEGIN(test_decay_maybe_advance_epoch) {
108462-	decay_t decay;
108463-	memset(&decay, 0, sizeof(decay));
108464-
108465-	nstime_t curtime;
108466-	nstime_init(&curtime, 0);
108467-
108468-	uint64_t decay_ms = 1000;
108469-
108470-	bool err = decay_init(&decay, &curtime, (ssize_t)decay_ms);
108471-	expect_false(err, "");
108472-
108473-	bool advanced;
108474-	advanced = decay_maybe_advance_epoch(&decay, &curtime, 0);
108475-	expect_false(advanced, "Epoch advanced while time didn't");
108476-
108477-	nstime_t interval;
108478-	nstime_init(&interval, decay_epoch_duration_ns(&decay));
108479-
108480-	nstime_add(&curtime, &interval);
108481-	advanced = decay_maybe_advance_epoch(&decay, &curtime, 0);
108482-	expect_false(advanced, "Epoch advanced after first interval");
108483-
108484-	nstime_add(&curtime, &interval);
108485-	advanced = decay_maybe_advance_epoch(&decay, &curtime, 0);
108486-	expect_true(advanced, "Epoch didn't advance after two intervals");
108487-}
108488-TEST_END
108489-
108490-TEST_BEGIN(test_decay_empty) {
108491-	/* If we never have any decaying pages, npages_limit should be 0. */
108492-	decay_t decay;
108493-	memset(&decay, 0, sizeof(decay));
108494-
108495-	nstime_t curtime;
108496-	nstime_init(&curtime, 0);
108497-
108498-	uint64_t decay_ms = 1000;
108499-	uint64_t decay_ns = decay_ms * 1000 * 1000;
108500-
108501-	bool err = decay_init(&decay, &curtime, (ssize_t)decay_ms);
108502-	assert_false(err, "");
108503-
108504-	uint64_t time_between_calls = decay_epoch_duration_ns(&decay) / 5;
108505-	int nepochs = 0;
108506-	for (uint64_t i = 0; i < decay_ns / time_between_calls * 10; i++) {
108507-		size_t dirty_pages = 0;
108508-		nstime_init(&curtime, i * time_between_calls);
108509-		bool epoch_advanced = decay_maybe_advance_epoch(&decay,
108510-		    &curtime, dirty_pages);
108511-		if (epoch_advanced) {
108512-			nepochs++;
108513-			expect_zu_eq(decay_npages_limit_get(&decay), 0,
108514-			    "Unexpectedly increased npages_limit");
108515-		}
108516-	}
108517-	expect_d_gt(nepochs, 0, "Epochs never advanced");
108518-}
108519-TEST_END
108520-
108521-/*
108522- * Verify that npages_limit correctly decays as the time goes.
108523- *
108524- * During first 'nepoch_init' epochs, add new dirty pages.
108525- * After that, let them decay and verify npages_limit decreases.
108526- * Then proceed with another 'nepoch_init' epochs and check that
108527- * all dirty pages are flushed out of backlog, bringing npages_limit
108528- * down to zero.
108529- */
108530-TEST_BEGIN(test_decay) {
108531-	const uint64_t nepoch_init = 10;
108532-
108533-	decay_t decay;
108534-	memset(&decay, 0, sizeof(decay));
108535-
108536-	nstime_t curtime;
108537-	nstime_init(&curtime, 0);
108538-
108539-	uint64_t decay_ms = 1000;
108540-	uint64_t decay_ns = decay_ms * 1000 * 1000;
108541-
108542-	bool err = decay_init(&decay, &curtime, (ssize_t)decay_ms);
108543-	assert_false(err, "");
108544-
108545-	expect_zu_eq(decay_npages_limit_get(&decay), 0,
108546-	    "Empty decay returned nonzero npages_limit");
108547-
108548-	nstime_t epochtime;
108549-	nstime_init(&epochtime, decay_epoch_duration_ns(&decay));
108550-
108551-	const size_t dirty_pages_per_epoch = 1000;
108552-	size_t dirty_pages = 0;
108553-	uint64_t epoch_ns = decay_epoch_duration_ns(&decay);
108554-	bool epoch_advanced = false;
108555-
108556-	/* Populate backlog with some dirty pages */
108557-	for (uint64_t i = 0; i < nepoch_init; i++) {
108558-		nstime_add(&curtime, &epochtime);
108559-		dirty_pages += dirty_pages_per_epoch;
108560-		epoch_advanced |= decay_maybe_advance_epoch(&decay, &curtime,
108561-		    dirty_pages);
108562-	}
108563-	expect_true(epoch_advanced, "Epoch never advanced");
108564-
108565-	size_t npages_limit = decay_npages_limit_get(&decay);
108566-	expect_zu_gt(npages_limit, 0, "npages_limit is incorrectly equal "
108567-	    "to zero after dirty pages have been added");
108568-
108569-	/* Keep dirty pages unchanged and verify that npages_limit decreases */
108570-	for (uint64_t i = nepoch_init; i * epoch_ns < decay_ns; ++i) {
108571-		nstime_add(&curtime, &epochtime);
108572-		epoch_advanced = decay_maybe_advance_epoch(&decay, &curtime,
108573-				    dirty_pages);
108574-		if (epoch_advanced) {
108575-			size_t npages_limit_new = decay_npages_limit_get(&decay);
108576-			expect_zu_lt(npages_limit_new, npages_limit,
108577-			    "napges_limit failed to decay");
108578-
108579-			npages_limit = npages_limit_new;
108580-		}
108581-	}
108582-
108583-	expect_zu_gt(npages_limit, 0, "npages_limit decayed to zero earlier "
108584-	    "than decay_ms since last dirty page was added");
108585-
108586-	/* Completely push all dirty pages out of the backlog */
108587-	epoch_advanced = false;
108588-	for (uint64_t i = 0; i < nepoch_init; i++) {
108589-		nstime_add(&curtime, &epochtime);
108590-		epoch_advanced |= decay_maybe_advance_epoch(&decay, &curtime,
108591-		    dirty_pages);
108592-	}
108593-	expect_true(epoch_advanced, "Epoch never advanced");
108594-
108595-	npages_limit = decay_npages_limit_get(&decay);
108596-	expect_zu_eq(npages_limit, 0, "npages_limit didn't decay to 0 after "
108597-	    "decay_ms since last bump in dirty pages");
108598-}
108599-TEST_END
108600-
108601-TEST_BEGIN(test_decay_ns_until_purge) {
108602-	const uint64_t nepoch_init = 10;
108603-
108604-	decay_t decay;
108605-	memset(&decay, 0, sizeof(decay));
108606-
108607-	nstime_t curtime;
108608-	nstime_init(&curtime, 0);
108609-
108610-	uint64_t decay_ms = 1000;
108611-	uint64_t decay_ns = decay_ms * 1000 * 1000;
108612-
108613-	bool err = decay_init(&decay, &curtime, (ssize_t)decay_ms);
108614-	assert_false(err, "");
108615-
108616-	nstime_t epochtime;
108617-	nstime_init(&epochtime, decay_epoch_duration_ns(&decay));
108618-
108619-	uint64_t ns_until_purge_empty = decay_ns_until_purge(&decay, 0, 0);
108620-	expect_u64_eq(ns_until_purge_empty, DECAY_UNBOUNDED_TIME_TO_PURGE,
108621-	    "Failed to return unbounded wait time for zero threshold");
108622-
108623-	const size_t dirty_pages_per_epoch = 1000;
108624-	size_t dirty_pages = 0;
108625-	bool epoch_advanced = false;
108626-	for (uint64_t i = 0; i < nepoch_init; i++) {
108627-		nstime_add(&curtime, &epochtime);
108628-		dirty_pages += dirty_pages_per_epoch;
108629-		epoch_advanced |= decay_maybe_advance_epoch(&decay, &curtime,
108630-		    dirty_pages);
108631-	}
108632-	expect_true(epoch_advanced, "Epoch never advanced");
108633-
108634-	uint64_t ns_until_purge_all = decay_ns_until_purge(&decay,
108635-	    dirty_pages, dirty_pages);
108636-	expect_u64_ge(ns_until_purge_all, decay_ns,
108637-	    "Incorrectly calculated time to purge all pages");
108638-
108639-	uint64_t ns_until_purge_none = decay_ns_until_purge(&decay,
108640-	    dirty_pages, 0);
108641-	expect_u64_eq(ns_until_purge_none, decay_epoch_duration_ns(&decay) * 2,
108642-	    "Incorrectly calculated time to purge 0 pages");
108643-
108644-	uint64_t npages_threshold = dirty_pages / 2;
108645-	uint64_t ns_until_purge_half = decay_ns_until_purge(&decay,
108646-	    dirty_pages, npages_threshold);
108647-
108648-	nstime_t waittime;
108649-	nstime_init(&waittime, ns_until_purge_half);
108650-	nstime_add(&curtime, &waittime);
108651-
108652-	decay_maybe_advance_epoch(&decay, &curtime, dirty_pages);
108653-	size_t npages_limit = decay_npages_limit_get(&decay);
108654-	expect_zu_lt(npages_limit, dirty_pages,
108655-	    "npages_limit failed to decrease after waiting");
108656-	size_t expected = dirty_pages - npages_limit;
108657-	int deviation = abs((int)expected - (int)(npages_threshold));
108658-	expect_d_lt(deviation, (int)(npages_threshold / 2),
108659-	    "After waiting, number of pages is out of the expected interval "
108660-	    "[0.5 * npages_threshold .. 1.5 * npages_threshold]");
108661-}
108662-TEST_END
108663-
108664-int
108665-main(void) {
108666-	return test(
108667-	    test_decay_init,
108668-	    test_decay_ms_valid,
108669-	    test_decay_npages_purge_in,
108670-	    test_decay_maybe_advance_epoch,
108671-	    test_decay_empty,
108672-	    test_decay,
108673-	    test_decay_ns_until_purge);
108674-}
108675diff --git a/jemalloc/test/unit/div.c b/jemalloc/test/unit/div.c
108676deleted file mode 100644
108677index 29aea66..0000000
108678--- a/jemalloc/test/unit/div.c
108679+++ /dev/null
108680@@ -1,29 +0,0 @@
108681-#include "test/jemalloc_test.h"
108682-
108683-#include "jemalloc/internal/div.h"
108684-
108685-TEST_BEGIN(test_div_exhaustive) {
108686-	for (size_t divisor = 2; divisor < 1000 * 1000; ++divisor) {
108687-		div_info_t div_info;
108688-		div_init(&div_info, divisor);
108689-		size_t max = 1000 * divisor;
108690-		if (max < 1000 * 1000) {
108691-			max = 1000 * 1000;
108692-		}
108693-		for (size_t dividend = 0; dividend < 1000 * divisor;
108694-		    dividend += divisor) {
108695-			size_t quotient = div_compute(
108696-			    &div_info, dividend);
108697-			expect_zu_eq(dividend, quotient * divisor,
108698-			    "With divisor = %zu, dividend = %zu, "
108699-			    "got quotient %zu", divisor, dividend, quotient);
108700-		}
108701-	}
108702-}
108703-TEST_END
108704-
108705-int
108706-main(void) {
108707-	return test_no_reentrancy(
108708-	    test_div_exhaustive);
108709-}
108710diff --git a/jemalloc/test/unit/double_free.c b/jemalloc/test/unit/double_free.c
108711deleted file mode 100644
108712index 12122c1..0000000
108713--- a/jemalloc/test/unit/double_free.c
108714+++ /dev/null
108715@@ -1,77 +0,0 @@
108716-#include "test/jemalloc_test.h"
108717-#include "test/san.h"
108718-
108719-#include "jemalloc/internal/safety_check.h"
108720-
108721-bool fake_abort_called;
108722-void fake_abort(const char *message) {
108723-	(void)message;
108724-	fake_abort_called = true;
108725-}
108726-
108727-void
108728-test_large_double_free_pre(void) {
108729-	safety_check_set_abort(&fake_abort);
108730-	fake_abort_called = false;
108731-}
108732-
108733-void
108734-test_large_double_free_post() {
108735-	expect_b_eq(fake_abort_called, true, "Double-free check didn't fire.");
108736-	safety_check_set_abort(NULL);
108737-}
108738-
108739-TEST_BEGIN(test_large_double_free_tcache) {
108740-	test_skip_if(!config_opt_safety_checks);
108741-	/*
108742-	 * Skip debug builds, since too many assertions will be triggered with
108743-	 * double-free before hitting the one we are interested in.
108744-	 */
108745-	test_skip_if(config_debug);
108746-
108747-	test_large_double_free_pre();
108748-	char *ptr = malloc(SC_LARGE_MINCLASS);
108749-	bool guarded = extent_is_guarded(tsdn_fetch(), ptr);
108750-	free(ptr);
108751-	if (!guarded) {
108752-		free(ptr);
108753-	} else {
108754-		/*
108755-		 * Skip because guarded extents may unguard immediately on
108756-		 * deallocation, in which case the second free will crash before
108757-		 * reaching the intended safety check.
108758-		 */
108759-		fake_abort_called = true;
108760-	}
108761-	mallctl("thread.tcache.flush", NULL, NULL, NULL, 0);
108762-	test_large_double_free_post();
108763-}
108764-TEST_END
108765-
108766-TEST_BEGIN(test_large_double_free_no_tcache) {
108767-	test_skip_if(!config_opt_safety_checks);
108768-	test_skip_if(config_debug);
108769-
108770-	test_large_double_free_pre();
108771-	char *ptr = mallocx(SC_LARGE_MINCLASS, MALLOCX_TCACHE_NONE);
108772-	bool guarded = extent_is_guarded(tsdn_fetch(), ptr);
108773-	dallocx(ptr, MALLOCX_TCACHE_NONE);
108774-	if (!guarded) {
108775-		dallocx(ptr, MALLOCX_TCACHE_NONE);
108776-	} else {
108777-		/*
108778-		 * Skip because guarded extents may unguard immediately on
108779-		 * deallocation, in which case the second free will crash before
108780-		 * reaching the intended safety check.
108781-		 */
108782-		fake_abort_called = true;
108783-	}
108784-	test_large_double_free_post();
108785-}
108786-TEST_END
108787-
108788-int
108789-main(void) {
108790-	return test(test_large_double_free_no_tcache,
108791-	    test_large_double_free_tcache);
108792-}
108793diff --git a/jemalloc/test/unit/double_free.h b/jemalloc/test/unit/double_free.h
108794deleted file mode 100644
108795index 8b13789..0000000
108796--- a/jemalloc/test/unit/double_free.h
108797+++ /dev/null
108798@@ -1 +0,0 @@
108799-
108800diff --git a/jemalloc/test/unit/edata_cache.c b/jemalloc/test/unit/edata_cache.c
108801deleted file mode 100644
108802index af1110a..0000000
108803--- a/jemalloc/test/unit/edata_cache.c
108804+++ /dev/null
108805@@ -1,226 +0,0 @@
108806-#include "test/jemalloc_test.h"
108807-
108808-#include "jemalloc/internal/edata_cache.h"
108809-
108810-static void
108811-test_edata_cache_init(edata_cache_t *edata_cache) {
108812-	base_t *base = base_new(TSDN_NULL, /* ind */ 1,
108813-	    &ehooks_default_extent_hooks, /* metadata_use_hooks */ true);
108814-	assert_ptr_not_null(base, "");
108815-	bool err = edata_cache_init(edata_cache, base);
108816-	assert_false(err, "");
108817-}
108818-
108819-static void
108820-test_edata_cache_destroy(edata_cache_t *edata_cache) {
108821-	base_delete(TSDN_NULL, edata_cache->base);
108822-}
108823-
108824-TEST_BEGIN(test_edata_cache) {
108825-	edata_cache_t ec;
108826-	test_edata_cache_init(&ec);
108827-
108828-	/* Get one */
108829-	edata_t *ed1 = edata_cache_get(TSDN_NULL, &ec);
108830-	assert_ptr_not_null(ed1, "");
108831-
108832-	/* Cache should be empty */
108833-	assert_zu_eq(atomic_load_zu(&ec.count, ATOMIC_RELAXED), 0, "");
108834-
108835-	/* Get another */
108836-	edata_t *ed2 = edata_cache_get(TSDN_NULL, &ec);
108837-	assert_ptr_not_null(ed2, "");
108838-
108839-	/* Still empty */
108840-	assert_zu_eq(atomic_load_zu(&ec.count, ATOMIC_RELAXED), 0, "");
108841-
108842-	/* Put one back, and the cache should now have one item */
108843-	edata_cache_put(TSDN_NULL, &ec, ed1);
108844-	assert_zu_eq(atomic_load_zu(&ec.count, ATOMIC_RELAXED), 1, "");
108845-
108846-	/* Reallocating should reuse the item, and leave an empty cache. */
108847-	edata_t *ed1_again = edata_cache_get(TSDN_NULL, &ec);
108848-	assert_ptr_eq(ed1, ed1_again, "");
108849-	assert_zu_eq(atomic_load_zu(&ec.count, ATOMIC_RELAXED), 0, "");
108850-
108851-	test_edata_cache_destroy(&ec);
108852-}
108853-TEST_END
108854-
108855-static size_t
108856-ecf_count(edata_cache_fast_t *ecf) {
108857-	size_t count = 0;
108858-	edata_t *cur;
108859-	ql_foreach(cur, &ecf->list.head, ql_link_inactive) {
108860-		count++;
108861-	}
108862-	return count;
108863-}
108864-
108865-TEST_BEGIN(test_edata_cache_fast_simple) {
108866-	edata_cache_t ec;
108867-	edata_cache_fast_t ecf;
108868-
108869-	test_edata_cache_init(&ec);
108870-	edata_cache_fast_init(&ecf, &ec);
108871-
108872-	edata_t *ed1 = edata_cache_fast_get(TSDN_NULL, &ecf);
108873-	expect_ptr_not_null(ed1, "");
108874-	expect_zu_eq(ecf_count(&ecf), 0, "");
108875-	expect_zu_eq(atomic_load_zu(&ec.count, ATOMIC_RELAXED), 0, "");
108876-
108877-	edata_t *ed2 = edata_cache_fast_get(TSDN_NULL, &ecf);
108878-	expect_ptr_not_null(ed2, "");
108879-	expect_zu_eq(ecf_count(&ecf), 0, "");
108880-	expect_zu_eq(atomic_load_zu(&ec.count, ATOMIC_RELAXED), 0, "");
108881-
108882-	edata_cache_fast_put(TSDN_NULL, &ecf, ed1);
108883-	expect_zu_eq(ecf_count(&ecf), 1, "");
108884-	expect_zu_eq(atomic_load_zu(&ec.count, ATOMIC_RELAXED), 0, "");
108885-
108886-	edata_cache_fast_put(TSDN_NULL, &ecf, ed2);
108887-	expect_zu_eq(ecf_count(&ecf), 2, "");
108888-	expect_zu_eq(atomic_load_zu(&ec.count, ATOMIC_RELAXED), 0, "");
108889-
108890-	/* LIFO ordering. */
108891-	expect_ptr_eq(ed2, edata_cache_fast_get(TSDN_NULL, &ecf), "");
108892-	expect_zu_eq(ecf_count(&ecf), 1, "");
108893-	expect_zu_eq(atomic_load_zu(&ec.count, ATOMIC_RELAXED), 0, "");
108894-
108895-	expect_ptr_eq(ed1, edata_cache_fast_get(TSDN_NULL, &ecf), "");
108896-	expect_zu_eq(ecf_count(&ecf), 0, "");
108897-	expect_zu_eq(atomic_load_zu(&ec.count, ATOMIC_RELAXED), 0, "");
108898-
108899-	test_edata_cache_destroy(&ec);
108900-}
108901-TEST_END
108902-
108903-TEST_BEGIN(test_edata_cache_fill) {
108904-	edata_cache_t ec;
108905-	edata_cache_fast_t ecf;
108906-
108907-	test_edata_cache_init(&ec);
108908-	edata_cache_fast_init(&ecf, &ec);
108909-
108910-	edata_t *allocs[EDATA_CACHE_FAST_FILL * 2];
108911-
108912-	/*
108913-	 * If the fallback cache can't satisfy the request, we shouldn't do
108914-	 * extra allocations until compelled to.  Put half the fill goal in the
108915-	 * fallback.
108916-	 */
108917-	for (int i = 0; i < EDATA_CACHE_FAST_FILL / 2; i++) {
108918-		allocs[i] = edata_cache_get(TSDN_NULL, &ec);
108919-	}
108920-	for (int i = 0; i < EDATA_CACHE_FAST_FILL / 2; i++) {
108921-		edata_cache_put(TSDN_NULL, &ec, allocs[i]);
108922-	}
108923-	expect_zu_eq(EDATA_CACHE_FAST_FILL / 2,
108924-	    atomic_load_zu(&ec.count, ATOMIC_RELAXED), "");
108925-
108926-	allocs[0] = edata_cache_fast_get(TSDN_NULL, &ecf);
108927-	expect_zu_eq(EDATA_CACHE_FAST_FILL / 2 - 1, ecf_count(&ecf),
108928-	    "Should have grabbed all edatas available but no more.");
108929-
108930-	for (int i = 1; i < EDATA_CACHE_FAST_FILL / 2; i++) {
108931-		allocs[i] = edata_cache_fast_get(TSDN_NULL, &ecf);
108932-		expect_ptr_not_null(allocs[i], "");
108933-	}
108934-	expect_zu_eq(0, ecf_count(&ecf), "");
108935-
108936-	/* When forced, we should alloc from the base. */
108937-	edata_t *edata = edata_cache_fast_get(TSDN_NULL, &ecf);
108938-	expect_ptr_not_null(edata, "");
108939-	expect_zu_eq(0, ecf_count(&ecf), "Allocated more than necessary");
108940-	expect_zu_eq(0, atomic_load_zu(&ec.count, ATOMIC_RELAXED),
108941-	    "Allocated more than necessary");
108942-
108943-	/*
108944-	 * We should correctly fill in the common case where the fallback isn't
108945-	 * exhausted, too.
108946-	 */
108947-	for (int i = 0; i < EDATA_CACHE_FAST_FILL * 2; i++) {
108948-		allocs[i] = edata_cache_get(TSDN_NULL, &ec);
108949-		expect_ptr_not_null(allocs[i], "");
108950-	}
108951-	for (int i = 0; i < EDATA_CACHE_FAST_FILL * 2; i++) {
108952-		edata_cache_put(TSDN_NULL, &ec, allocs[i]);
108953-	}
108954-
108955-	allocs[0] = edata_cache_fast_get(TSDN_NULL, &ecf);
108956-	expect_zu_eq(EDATA_CACHE_FAST_FILL - 1, ecf_count(&ecf), "");
108957-	expect_zu_eq(EDATA_CACHE_FAST_FILL,
108958-	    atomic_load_zu(&ec.count, ATOMIC_RELAXED), "");
108959-	for (int i = 1; i < EDATA_CACHE_FAST_FILL; i++) {
108960-		expect_zu_eq(EDATA_CACHE_FAST_FILL - i, ecf_count(&ecf), "");
108961-		expect_zu_eq(EDATA_CACHE_FAST_FILL,
108962-		    atomic_load_zu(&ec.count, ATOMIC_RELAXED), "");
108963-		allocs[i] = edata_cache_fast_get(TSDN_NULL, &ecf);
108964-		expect_ptr_not_null(allocs[i], "");
108965-	}
108966-	expect_zu_eq(0, ecf_count(&ecf), "");
108967-	expect_zu_eq(EDATA_CACHE_FAST_FILL,
108968-	    atomic_load_zu(&ec.count, ATOMIC_RELAXED), "");
108969-
108970-	allocs[0] = edata_cache_fast_get(TSDN_NULL, &ecf);
108971-	expect_zu_eq(EDATA_CACHE_FAST_FILL - 1, ecf_count(&ecf), "");
108972-	expect_zu_eq(0, atomic_load_zu(&ec.count, ATOMIC_RELAXED), "");
108973-	for (int i = 1; i < EDATA_CACHE_FAST_FILL; i++) {
108974-		expect_zu_eq(EDATA_CACHE_FAST_FILL - i, ecf_count(&ecf), "");
108975-		expect_zu_eq(0, atomic_load_zu(&ec.count, ATOMIC_RELAXED), "");
108976-		allocs[i] = edata_cache_fast_get(TSDN_NULL, &ecf);
108977-		expect_ptr_not_null(allocs[i], "");
108978-	}
108979-	expect_zu_eq(0, ecf_count(&ecf), "");
108980-	expect_zu_eq(0, atomic_load_zu(&ec.count, ATOMIC_RELAXED), "");
108981-
108982-	test_edata_cache_destroy(&ec);
108983-}
108984-TEST_END
108985-
108986-TEST_BEGIN(test_edata_cache_disable) {
108987-	edata_cache_t ec;
108988-	edata_cache_fast_t ecf;
108989-
108990-	test_edata_cache_init(&ec);
108991-	edata_cache_fast_init(&ecf, &ec);
108992-
108993-	for (int i = 0; i < EDATA_CACHE_FAST_FILL; i++) {
108994-		edata_t *edata = edata_cache_get(TSDN_NULL, &ec);
108995-		expect_ptr_not_null(edata, "");
108996-		edata_cache_fast_put(TSDN_NULL, &ecf, edata);
108997-	}
108998-
108999-	expect_zu_eq(EDATA_CACHE_FAST_FILL, ecf_count(&ecf), "");
109000-	expect_zu_eq(0, atomic_load_zu(&ec.count, ATOMIC_RELAXED), "");
109001-
109002-	edata_cache_fast_disable(TSDN_NULL, &ecf);
109003-
109004-	expect_zu_eq(0, ecf_count(&ecf), "");
109005-	expect_zu_eq(EDATA_CACHE_FAST_FILL,
109006-	    atomic_load_zu(&ec.count, ATOMIC_RELAXED), "Disabling should flush");
109007-
109008-	edata_t *edata = edata_cache_fast_get(TSDN_NULL, &ecf);
109009-	expect_zu_eq(0, ecf_count(&ecf), "");
109010-	expect_zu_eq(EDATA_CACHE_FAST_FILL - 1,
109011-	    atomic_load_zu(&ec.count, ATOMIC_RELAXED),
109012-	    "Disabled ecf should forward on get");
109013-
109014-	edata_cache_fast_put(TSDN_NULL, &ecf, edata);
109015-	expect_zu_eq(0, ecf_count(&ecf), "");
109016-	expect_zu_eq(EDATA_CACHE_FAST_FILL,
109017-	    atomic_load_zu(&ec.count, ATOMIC_RELAXED),
109018-	    "Disabled ecf should forward on put");
109019-
109020-	test_edata_cache_destroy(&ec);
109021-}
109022-TEST_END
109023-
109024-int
109025-main(void) {
109026-	return test(
109027-	    test_edata_cache,
109028-	    test_edata_cache_fast_simple,
109029-	    test_edata_cache_fill,
109030-	    test_edata_cache_disable);
109031-}
109032diff --git a/jemalloc/test/unit/emitter.c b/jemalloc/test/unit/emitter.c
109033deleted file mode 100644
109034index ef8f9ff..0000000
109035--- a/jemalloc/test/unit/emitter.c
109036+++ /dev/null
109037@@ -1,533 +0,0 @@
109038-#include "test/jemalloc_test.h"
109039-#include "jemalloc/internal/emitter.h"
109040-
109041-/*
109042- * This is so useful for debugging and feature work, we'll leave printing
109043- * functionality committed but disabled by default.
109044- */
109045-/* Print the text as it will appear. */
109046-static bool print_raw = false;
109047-/* Print the text escaped, so it can be copied back into the test case. */
109048-static bool print_escaped = false;
109049-
109050-typedef struct buf_descriptor_s buf_descriptor_t;
109051-struct buf_descriptor_s {
109052-	char *buf;
109053-	size_t len;
109054-	bool mid_quote;
109055-};
109056-
109057-/*
109058- * Forwards all writes to the passed-in buf_v (which should be cast from a
109059- * buf_descriptor_t *).
109060- */
109061-static void
109062-forwarding_cb(void *buf_descriptor_v, const char *str) {
109063-	buf_descriptor_t *buf_descriptor = (buf_descriptor_t *)buf_descriptor_v;
109064-
109065-	if (print_raw) {
109066-		malloc_printf("%s", str);
109067-	}
109068-	if (print_escaped) {
109069-		const char *it = str;
109070-		while (*it != '\0') {
109071-			if (!buf_descriptor->mid_quote) {
109072-				malloc_printf("\"");
109073-				buf_descriptor->mid_quote = true;
109074-			}
109075-			switch (*it) {
109076-			case '\\':
109077-				malloc_printf("\\");
109078-				break;
109079-			case '\"':
109080-				malloc_printf("\\\"");
109081-				break;
109082-			case '\t':
109083-				malloc_printf("\\t");
109084-				break;
109085-			case '\n':
109086-				malloc_printf("\\n\"\n");
109087-				buf_descriptor->mid_quote = false;
109088-				break;
109089-			default:
109090-				malloc_printf("%c", *it);
109091-			}
109092-			it++;
109093-		}
109094-	}
109095-
109096-	size_t written = malloc_snprintf(buf_descriptor->buf,
109097-	    buf_descriptor->len, "%s", str);
109098-	expect_zu_eq(written, strlen(str), "Buffer overflow!");
109099-	buf_descriptor->buf += written;
109100-	buf_descriptor->len -= written;
109101-	expect_zu_gt(buf_descriptor->len, 0, "Buffer out of space!");
109102-}
109103-
109104-static void
109105-expect_emit_output(void (*emit_fn)(emitter_t *),
109106-    const char *expected_json_output,
109107-    const char *expected_json_compact_output,
109108-    const char *expected_table_output) {
109109-	emitter_t emitter;
109110-	char buf[MALLOC_PRINTF_BUFSIZE];
109111-	buf_descriptor_t buf_descriptor;
109112-
109113-	buf_descriptor.buf = buf;
109114-	buf_descriptor.len = MALLOC_PRINTF_BUFSIZE;
109115-	buf_descriptor.mid_quote = false;
109116-
109117-	emitter_init(&emitter, emitter_output_json, &forwarding_cb,
109118-	    &buf_descriptor);
109119-	(*emit_fn)(&emitter);
109120-	expect_str_eq(expected_json_output, buf, "json output failure");
109121-
109122-	buf_descriptor.buf = buf;
109123-	buf_descriptor.len = MALLOC_PRINTF_BUFSIZE;
109124-	buf_descriptor.mid_quote = false;
109125-
109126-	emitter_init(&emitter, emitter_output_json_compact, &forwarding_cb,
109127-	    &buf_descriptor);
109128-	(*emit_fn)(&emitter);
109129-	expect_str_eq(expected_json_compact_output, buf,
109130-	    "compact json output failure");
109131-
109132-	buf_descriptor.buf = buf;
109133-	buf_descriptor.len = MALLOC_PRINTF_BUFSIZE;
109134-	buf_descriptor.mid_quote = false;
109135-
109136-	emitter_init(&emitter, emitter_output_table, &forwarding_cb,
109137-	    &buf_descriptor);
109138-	(*emit_fn)(&emitter);
109139-	expect_str_eq(expected_table_output, buf, "table output failure");
109140-}
109141-
109142-static void
109143-emit_dict(emitter_t *emitter) {
109144-	bool b_false = false;
109145-	bool b_true = true;
109146-	int i_123 = 123;
109147-	const char *str = "a string";
109148-
109149-	emitter_begin(emitter);
109150-	emitter_dict_begin(emitter, "foo", "This is the foo table:");
109151-	emitter_kv(emitter, "abc", "ABC", emitter_type_bool, &b_false);
109152-	emitter_kv(emitter, "def", "DEF", emitter_type_bool, &b_true);
109153-	emitter_kv_note(emitter, "ghi", "GHI", emitter_type_int, &i_123,
109154-	    "note_key1", emitter_type_string, &str);
109155-	emitter_kv_note(emitter, "jkl", "JKL", emitter_type_string, &str,
109156-	    "note_key2", emitter_type_bool, &b_false);
109157-	emitter_dict_end(emitter);
109158-	emitter_end(emitter);
109159-}
109160-
109161-static const char *dict_json =
109162-"{\n"
109163-"\t\"foo\": {\n"
109164-"\t\t\"abc\": false,\n"
109165-"\t\t\"def\": true,\n"
109166-"\t\t\"ghi\": 123,\n"
109167-"\t\t\"jkl\": \"a string\"\n"
109168-"\t}\n"
109169-"}\n";
109170-static const char *dict_json_compact =
109171-"{"
109172-	"\"foo\":{"
109173-		"\"abc\":false,"
109174-		"\"def\":true,"
109175-		"\"ghi\":123,"
109176-		"\"jkl\":\"a string\""
109177-	"}"
109178-"}";
109179-static const char *dict_table =
109180-"This is the foo table:\n"
109181-"  ABC: false\n"
109182-"  DEF: true\n"
109183-"  GHI: 123 (note_key1: \"a string\")\n"
109184-"  JKL: \"a string\" (note_key2: false)\n";
109185-
109186-static void
109187-emit_table_printf(emitter_t *emitter) {
109188-	emitter_begin(emitter);
109189-	emitter_table_printf(emitter, "Table note 1\n");
109190-	emitter_table_printf(emitter, "Table note 2 %s\n",
109191-	    "with format string");
109192-	emitter_end(emitter);
109193-}
109194-
109195-static const char *table_printf_json =
109196-"{\n"
109197-"}\n";
109198-static const char *table_printf_json_compact = "{}";
109199-static const char *table_printf_table =
109200-"Table note 1\n"
109201-"Table note 2 with format string\n";
109202-
109203-static void emit_nested_dict(emitter_t *emitter) {
109204-	int val = 123;
109205-	emitter_begin(emitter);
109206-	emitter_dict_begin(emitter, "json1", "Dict 1");
109207-	emitter_dict_begin(emitter, "json2", "Dict 2");
109208-	emitter_kv(emitter, "primitive", "A primitive", emitter_type_int, &val);
109209-	emitter_dict_end(emitter); /* Close 2 */
109210-	emitter_dict_begin(emitter, "json3", "Dict 3");
109211-	emitter_dict_end(emitter); /* Close 3 */
109212-	emitter_dict_end(emitter); /* Close 1 */
109213-	emitter_dict_begin(emitter, "json4", "Dict 4");
109214-	emitter_kv(emitter, "primitive", "Another primitive",
109215-	    emitter_type_int, &val);
109216-	emitter_dict_end(emitter); /* Close 4 */
109217-	emitter_end(emitter);
109218-}
109219-
109220-static const char *nested_dict_json =
109221-"{\n"
109222-"\t\"json1\": {\n"
109223-"\t\t\"json2\": {\n"
109224-"\t\t\t\"primitive\": 123\n"
109225-"\t\t},\n"
109226-"\t\t\"json3\": {\n"
109227-"\t\t}\n"
109228-"\t},\n"
109229-"\t\"json4\": {\n"
109230-"\t\t\"primitive\": 123\n"
109231-"\t}\n"
109232-"}\n";
109233-static const char *nested_dict_json_compact =
109234-"{"
109235-	"\"json1\":{"
109236-		"\"json2\":{"
109237-			"\"primitive\":123"
109238-		"},"
109239-		"\"json3\":{"
109240-		"}"
109241-	"},"
109242-	"\"json4\":{"
109243-		"\"primitive\":123"
109244-	"}"
109245-"}";
109246-static const char *nested_dict_table =
109247-"Dict 1\n"
109248-"  Dict 2\n"
109249-"    A primitive: 123\n"
109250-"  Dict 3\n"
109251-"Dict 4\n"
109252-"  Another primitive: 123\n";
109253-
109254-static void
109255-emit_types(emitter_t *emitter) {
109256-	bool b = false;
109257-	int i = -123;
109258-	unsigned u = 123;
109259-	ssize_t zd = -456;
109260-	size_t zu = 456;
109261-	const char *str = "string";
109262-	uint32_t u32 = 789;
109263-	uint64_t u64 = 10000000000ULL;
109264-
109265-	emitter_begin(emitter);
109266-	emitter_kv(emitter, "k1", "K1", emitter_type_bool, &b);
109267-	emitter_kv(emitter, "k2", "K2", emitter_type_int, &i);
109268-	emitter_kv(emitter, "k3", "K3", emitter_type_unsigned, &u);
109269-	emitter_kv(emitter, "k4", "K4", emitter_type_ssize, &zd);
109270-	emitter_kv(emitter, "k5", "K5", emitter_type_size, &zu);
109271-	emitter_kv(emitter, "k6", "K6", emitter_type_string, &str);
109272-	emitter_kv(emitter, "k7", "K7", emitter_type_uint32, &u32);
109273-	emitter_kv(emitter, "k8", "K8", emitter_type_uint64, &u64);
109274-	/*
109275-	 * We don't test the title type, since it's only used for tables.  It's
109276-	 * tested in the emitter_table_row tests.
109277-	 */
109278-	emitter_end(emitter);
109279-}
109280-
109281-static const char *types_json =
109282-"{\n"
109283-"\t\"k1\": false,\n"
109284-"\t\"k2\": -123,\n"
109285-"\t\"k3\": 123,\n"
109286-"\t\"k4\": -456,\n"
109287-"\t\"k5\": 456,\n"
109288-"\t\"k6\": \"string\",\n"
109289-"\t\"k7\": 789,\n"
109290-"\t\"k8\": 10000000000\n"
109291-"}\n";
109292-static const char *types_json_compact =
109293-"{"
109294-	"\"k1\":false,"
109295-	"\"k2\":-123,"
109296-	"\"k3\":123,"
109297-	"\"k4\":-456,"
109298-	"\"k5\":456,"
109299-	"\"k6\":\"string\","
109300-	"\"k7\":789,"
109301-	"\"k8\":10000000000"
109302-"}";
109303-static const char *types_table =
109304-"K1: false\n"
109305-"K2: -123\n"
109306-"K3: 123\n"
109307-"K4: -456\n"
109308-"K5: 456\n"
109309-"K6: \"string\"\n"
109310-"K7: 789\n"
109311-"K8: 10000000000\n";
109312-
109313-static void
109314-emit_modal(emitter_t *emitter) {
109315-	int val = 123;
109316-	emitter_begin(emitter);
109317-	emitter_dict_begin(emitter, "j0", "T0");
109318-	emitter_json_key(emitter, "j1");
109319-	emitter_json_object_begin(emitter);
109320-	emitter_kv(emitter, "i1", "I1", emitter_type_int, &val);
109321-	emitter_json_kv(emitter, "i2", emitter_type_int, &val);
109322-	emitter_table_kv(emitter, "I3", emitter_type_int, &val);
109323-	emitter_table_dict_begin(emitter, "T1");
109324-	emitter_kv(emitter, "i4", "I4", emitter_type_int, &val);
109325-	emitter_json_object_end(emitter); /* Close j1 */
109326-	emitter_kv(emitter, "i5", "I5", emitter_type_int, &val);
109327-	emitter_table_dict_end(emitter); /* Close T1 */
109328-	emitter_kv(emitter, "i6", "I6", emitter_type_int, &val);
109329-	emitter_dict_end(emitter); /* Close j0 / T0 */
109330-	emitter_end(emitter);
109331-}
109332-
109333-const char *modal_json =
109334-"{\n"
109335-"\t\"j0\": {\n"
109336-"\t\t\"j1\": {\n"
109337-"\t\t\t\"i1\": 123,\n"
109338-"\t\t\t\"i2\": 123,\n"
109339-"\t\t\t\"i4\": 123\n"
109340-"\t\t},\n"
109341-"\t\t\"i5\": 123,\n"
109342-"\t\t\"i6\": 123\n"
109343-"\t}\n"
109344-"}\n";
109345-const char *modal_json_compact =
109346-"{"
109347-	"\"j0\":{"
109348-		"\"j1\":{"
109349-			"\"i1\":123,"
109350-			"\"i2\":123,"
109351-			"\"i4\":123"
109352-		"},"
109353-		"\"i5\":123,"
109354-		"\"i6\":123"
109355-	"}"
109356-"}";
109357-const char *modal_table =
109358-"T0\n"
109359-"  I1: 123\n"
109360-"  I3: 123\n"
109361-"  T1\n"
109362-"    I4: 123\n"
109363-"    I5: 123\n"
109364-"  I6: 123\n";
109365-
109366-static void
109367-emit_json_array(emitter_t *emitter) {
109368-	int ival = 123;
109369-
109370-	emitter_begin(emitter);
109371-	emitter_json_key(emitter, "dict");
109372-	emitter_json_object_begin(emitter);
109373-	emitter_json_key(emitter, "arr");
109374-	emitter_json_array_begin(emitter);
109375-	emitter_json_object_begin(emitter);
109376-	emitter_json_kv(emitter, "foo", emitter_type_int, &ival);
109377-	emitter_json_object_end(emitter); /* Close arr[0] */
109378-	/* arr[1] and arr[2] are primitives. */
109379-	emitter_json_value(emitter, emitter_type_int, &ival);
109380-	emitter_json_value(emitter, emitter_type_int, &ival);
109381-	emitter_json_object_begin(emitter);
109382-	emitter_json_kv(emitter, "bar", emitter_type_int, &ival);
109383-	emitter_json_kv(emitter, "baz", emitter_type_int, &ival);
109384-	emitter_json_object_end(emitter); /* Close arr[3]. */
109385-	emitter_json_array_end(emitter); /* Close arr. */
109386-	emitter_json_object_end(emitter); /* Close dict. */
109387-	emitter_end(emitter);
109388-}
109389-
109390-static const char *json_array_json =
109391-"{\n"
109392-"\t\"dict\": {\n"
109393-"\t\t\"arr\": [\n"
109394-"\t\t\t{\n"
109395-"\t\t\t\t\"foo\": 123\n"
109396-"\t\t\t},\n"
109397-"\t\t\t123,\n"
109398-"\t\t\t123,\n"
109399-"\t\t\t{\n"
109400-"\t\t\t\t\"bar\": 123,\n"
109401-"\t\t\t\t\"baz\": 123\n"
109402-"\t\t\t}\n"
109403-"\t\t]\n"
109404-"\t}\n"
109405-"}\n";
109406-static const char *json_array_json_compact =
109407-"{"
109408-	"\"dict\":{"
109409-		"\"arr\":["
109410-			"{"
109411-				"\"foo\":123"
109412-			"},"
109413-			"123,"
109414-			"123,"
109415-			"{"
109416-				"\"bar\":123,"
109417-				"\"baz\":123"
109418-			"}"
109419-		"]"
109420-	"}"
109421-"}";
109422-static const char *json_array_table = "";
109423-
109424-static void
109425-emit_json_nested_array(emitter_t *emitter) {
109426-	int ival = 123;
109427-	char *sval = "foo";
109428-	emitter_begin(emitter);
109429-	emitter_json_array_begin(emitter);
109430-		emitter_json_array_begin(emitter);
109431-		emitter_json_value(emitter, emitter_type_int, &ival);
109432-		emitter_json_value(emitter, emitter_type_string, &sval);
109433-		emitter_json_value(emitter, emitter_type_int, &ival);
109434-		emitter_json_value(emitter, emitter_type_string, &sval);
109435-		emitter_json_array_end(emitter);
109436-		emitter_json_array_begin(emitter);
109437-		emitter_json_value(emitter, emitter_type_int, &ival);
109438-		emitter_json_array_end(emitter);
109439-		emitter_json_array_begin(emitter);
109440-		emitter_json_value(emitter, emitter_type_string, &sval);
109441-		emitter_json_value(emitter, emitter_type_int, &ival);
109442-		emitter_json_array_end(emitter);
109443-		emitter_json_array_begin(emitter);
109444-		emitter_json_array_end(emitter);
109445-	emitter_json_array_end(emitter);
109446-	emitter_end(emitter);
109447-}
109448-
109449-static const char *json_nested_array_json =
109450-"{\n"
109451-"\t[\n"
109452-"\t\t[\n"
109453-"\t\t\t123,\n"
109454-"\t\t\t\"foo\",\n"
109455-"\t\t\t123,\n"
109456-"\t\t\t\"foo\"\n"
109457-"\t\t],\n"
109458-"\t\t[\n"
109459-"\t\t\t123\n"
109460-"\t\t],\n"
109461-"\t\t[\n"
109462-"\t\t\t\"foo\",\n"
109463-"\t\t\t123\n"
109464-"\t\t],\n"
109465-"\t\t[\n"
109466-"\t\t]\n"
109467-"\t]\n"
109468-"}\n";
109469-static const char *json_nested_array_json_compact =
109470-"{"
109471-	"["
109472-		"["
109473-			"123,"
109474-			"\"foo\","
109475-			"123,"
109476-			"\"foo\""
109477-		"],"
109478-		"["
109479-			"123"
109480-		"],"
109481-		"["
109482-			"\"foo\","
109483-			"123"
109484-		"],"
109485-		"["
109486-		"]"
109487-	"]"
109488-"}";
109489-static const char *json_nested_array_table = "";
109490-
109491-static void
109492-emit_table_row(emitter_t *emitter) {
109493-	emitter_begin(emitter);
109494-	emitter_row_t row;
109495-	emitter_col_t abc = {emitter_justify_left, 10, emitter_type_title, {0}, {0, 0}};
109496-	abc.str_val = "ABC title";
109497-	emitter_col_t def = {emitter_justify_right, 15, emitter_type_title, {0}, {0, 0}};
109498-	def.str_val = "DEF title";
109499-	emitter_col_t ghi = {emitter_justify_right, 5, emitter_type_title, {0}, {0, 0}};
109500-	ghi.str_val = "GHI";
109501-
109502-	emitter_row_init(&row);
109503-	emitter_col_init(&abc, &row);
109504-	emitter_col_init(&def, &row);
109505-	emitter_col_init(&ghi, &row);
109506-
109507-	emitter_table_row(emitter, &row);
109508-
109509-	abc.type = emitter_type_int;
109510-	def.type = emitter_type_bool;
109511-	ghi.type = emitter_type_int;
109512-
109513-	abc.int_val = 123;
109514-	def.bool_val = true;
109515-	ghi.int_val = 456;
109516-	emitter_table_row(emitter, &row);
109517-
109518-	abc.int_val = 789;
109519-	def.bool_val = false;
109520-	ghi.int_val = 1011;
109521-	emitter_table_row(emitter, &row);
109522-
109523-	abc.type = emitter_type_string;
109524-	abc.str_val = "a string";
109525-	def.bool_val = false;
109526-	ghi.type = emitter_type_title;
109527-	ghi.str_val = "ghi";
109528-	emitter_table_row(emitter, &row);
109529-
109530-	emitter_end(emitter);
109531-}
109532-
109533-static const char *table_row_json =
109534-"{\n"
109535-"}\n";
109536-static const char *table_row_json_compact = "{}";
109537-static const char *table_row_table =
109538-"ABC title       DEF title  GHI\n"
109539-"123                  true  456\n"
109540-"789                 false 1011\n"
109541-"\"a string\"          false  ghi\n";
109542-
109543-#define GENERATE_TEST(feature)					\
109544-TEST_BEGIN(test_##feature) {					\
109545-	expect_emit_output(emit_##feature, feature##_json,	\
109546-	    feature##_json_compact, feature##_table);		\
109547-}								\
109548-TEST_END
109549-
109550-GENERATE_TEST(dict)
109551-GENERATE_TEST(table_printf)
109552-GENERATE_TEST(nested_dict)
109553-GENERATE_TEST(types)
109554-GENERATE_TEST(modal)
109555-GENERATE_TEST(json_array)
109556-GENERATE_TEST(json_nested_array)
109557-GENERATE_TEST(table_row)
109558-
109559-int
109560-main(void) {
109561-	return test_no_reentrancy(
109562-	    test_dict,
109563-	    test_table_printf,
109564-	    test_nested_dict,
109565-	    test_types,
109566-	    test_modal,
109567-	    test_json_array,
109568-	    test_json_nested_array,
109569-	    test_table_row);
109570-}
109571diff --git a/jemalloc/test/unit/extent_quantize.c b/jemalloc/test/unit/extent_quantize.c
109572deleted file mode 100644
109573index e6bbd53..0000000
109574--- a/jemalloc/test/unit/extent_quantize.c
109575+++ /dev/null
109576@@ -1,141 +0,0 @@
109577-#include "test/jemalloc_test.h"
109578-
109579-TEST_BEGIN(test_small_extent_size) {
109580-	unsigned nbins, i;
109581-	size_t sz, extent_size;
109582-	size_t mib[4];
109583-	size_t miblen = sizeof(mib) / sizeof(size_t);
109584-
109585-	/*
109586-	 * Iterate over all small size classes, get their extent sizes, and
109587-	 * verify that the quantized size is the same as the extent size.
109588-	 */
109589-
109590-	sz = sizeof(unsigned);
109591-	expect_d_eq(mallctl("arenas.nbins", (void *)&nbins, &sz, NULL, 0), 0,
109592-	    "Unexpected mallctl failure");
109593-
109594-	expect_d_eq(mallctlnametomib("arenas.bin.0.slab_size", mib, &miblen), 0,
109595-	    "Unexpected mallctlnametomib failure");
109596-	for (i = 0; i < nbins; i++) {
109597-		mib[2] = i;
109598-		sz = sizeof(size_t);
109599-		expect_d_eq(mallctlbymib(mib, miblen, (void *)&extent_size, &sz,
109600-		    NULL, 0), 0, "Unexpected mallctlbymib failure");
109601-		expect_zu_eq(extent_size,
109602-		    sz_psz_quantize_floor(extent_size),
109603-		    "Small extent quantization should be a no-op "
109604-		    "(extent_size=%zu)", extent_size);
109605-		expect_zu_eq(extent_size,
109606-		    sz_psz_quantize_ceil(extent_size),
109607-		    "Small extent quantization should be a no-op "
109608-		    "(extent_size=%zu)", extent_size);
109609-	}
109610-}
109611-TEST_END
109612-
109613-TEST_BEGIN(test_large_extent_size) {
109614-	bool cache_oblivious;
109615-	unsigned nlextents, i;
109616-	size_t sz, extent_size_prev, ceil_prev;
109617-	size_t mib[4];
109618-	size_t miblen = sizeof(mib) / sizeof(size_t);
109619-
109620-	/*
109621-	 * Iterate over all large size classes, get their extent sizes, and
109622-	 * verify that the quantized size is the same as the extent size.
109623-	 */
109624-
109625-	sz = sizeof(bool);
109626-	expect_d_eq(mallctl("opt.cache_oblivious", (void *)&cache_oblivious,
109627-	    &sz, NULL, 0), 0, "Unexpected mallctl failure");
109628-
109629-	sz = sizeof(unsigned);
109630-	expect_d_eq(mallctl("arenas.nlextents", (void *)&nlextents, &sz, NULL,
109631-	    0), 0, "Unexpected mallctl failure");
109632-
109633-	expect_d_eq(mallctlnametomib("arenas.lextent.0.size", mib, &miblen), 0,
109634-	    "Unexpected mallctlnametomib failure");
109635-	for (i = 0; i < nlextents; i++) {
109636-		size_t lextent_size, extent_size, floor, ceil;
109637-
109638-		mib[2] = i;
109639-		sz = sizeof(size_t);
109640-		expect_d_eq(mallctlbymib(mib, miblen, (void *)&lextent_size,
109641-		    &sz, NULL, 0), 0, "Unexpected mallctlbymib failure");
109642-		extent_size = cache_oblivious ? lextent_size + PAGE :
109643-		    lextent_size;
109644-		floor = sz_psz_quantize_floor(extent_size);
109645-		ceil = sz_psz_quantize_ceil(extent_size);
109646-
109647-		expect_zu_eq(extent_size, floor,
109648-		    "Extent quantization should be a no-op for precise size "
109649-		    "(lextent_size=%zu, extent_size=%zu)", lextent_size,
109650-		    extent_size);
109651-		expect_zu_eq(extent_size, ceil,
109652-		    "Extent quantization should be a no-op for precise size "
109653-		    "(lextent_size=%zu, extent_size=%zu)", lextent_size,
109654-		    extent_size);
109655-
109656-		if (i > 0) {
109657-			expect_zu_eq(extent_size_prev,
109658-			    sz_psz_quantize_floor(extent_size - PAGE),
109659-			    "Floor should be a precise size");
109660-			if (extent_size_prev < ceil_prev) {
109661-				expect_zu_eq(ceil_prev, extent_size,
109662-				    "Ceiling should be a precise size "
109663-				    "(extent_size_prev=%zu, ceil_prev=%zu, "
109664-				    "extent_size=%zu)", extent_size_prev,
109665-				    ceil_prev, extent_size);
109666-			}
109667-		}
109668-		if (i + 1 < nlextents) {
109669-			extent_size_prev = floor;
109670-			ceil_prev = sz_psz_quantize_ceil(extent_size +
109671-			    PAGE);
109672-		}
109673-	}
109674-}
109675-TEST_END
109676-
109677-TEST_BEGIN(test_monotonic) {
109678-#define SZ_MAX	ZU(4 * 1024 * 1024)
109679-	unsigned i;
109680-	size_t floor_prev, ceil_prev;
109681-
109682-	floor_prev = 0;
109683-	ceil_prev = 0;
109684-	for (i = 1; i <= SZ_MAX >> LG_PAGE; i++) {
109685-		size_t extent_size, floor, ceil;
109686-
109687-		extent_size = i << LG_PAGE;
109688-		floor = sz_psz_quantize_floor(extent_size);
109689-		ceil = sz_psz_quantize_ceil(extent_size);
109690-
109691-		expect_zu_le(floor, extent_size,
109692-		    "Floor should be <= (floor=%zu, extent_size=%zu, ceil=%zu)",
109693-		    floor, extent_size, ceil);
109694-		expect_zu_ge(ceil, extent_size,
109695-		    "Ceiling should be >= (floor=%zu, extent_size=%zu, "
109696-		    "ceil=%zu)", floor, extent_size, ceil);
109697-
109698-		expect_zu_le(floor_prev, floor, "Floor should be monotonic "
109699-		    "(floor_prev=%zu, floor=%zu, extent_size=%zu, ceil=%zu)",
109700-		    floor_prev, floor, extent_size, ceil);
109701-		expect_zu_le(ceil_prev, ceil, "Ceiling should be monotonic "
109702-		    "(floor=%zu, extent_size=%zu, ceil_prev=%zu, ceil=%zu)",
109703-		    floor, extent_size, ceil_prev, ceil);
109704-
109705-		floor_prev = floor;
109706-		ceil_prev = ceil;
109707-	}
109708-}
109709-TEST_END
109710-
109711-int
109712-main(void) {
109713-	return test(
109714-	    test_small_extent_size,
109715-	    test_large_extent_size,
109716-	    test_monotonic);
109717-}
109718diff --git a/jemalloc/test/unit/fb.c b/jemalloc/test/unit/fb.c
109719deleted file mode 100644
109720index ad72c75..0000000
109721--- a/jemalloc/test/unit/fb.c
109722+++ /dev/null
109723@@ -1,954 +0,0 @@
109724-#include "test/jemalloc_test.h"
109725-
109726-#include "jemalloc/internal/fb.h"
109727-#include "test/nbits.h"
109728-
109729-static void
109730-do_test_init(size_t nbits) {
109731-	size_t sz = FB_NGROUPS(nbits) * sizeof(fb_group_t);
109732-	fb_group_t *fb = malloc(sz);
109733-	/* Junk fb's contents. */
109734-	memset(fb, 99, sz);
109735-	fb_init(fb, nbits);
109736-	for (size_t i = 0; i < nbits; i++) {
109737-		expect_false(fb_get(fb, nbits, i),
109738-		    "bitmap should start empty");
109739-	}
109740-	free(fb);
109741-}
109742-
109743-TEST_BEGIN(test_fb_init) {
109744-#define NB(nbits) \
109745-	do_test_init(nbits);
109746-	NBITS_TAB
109747-#undef NB
109748-}
109749-TEST_END
109750-
109751-static void
109752-do_test_get_set_unset(size_t nbits) {
109753-	size_t sz = FB_NGROUPS(nbits) * sizeof(fb_group_t);
109754-	fb_group_t *fb = malloc(sz);
109755-	fb_init(fb, nbits);
109756-	/* Set the bits divisible by 3. */
109757-	for (size_t i = 0; i < nbits; i++) {
109758-		if (i % 3 == 0) {
109759-			fb_set(fb, nbits, i);
109760-		}
109761-	}
109762-	/* Check them. */
109763-	for (size_t i = 0; i < nbits; i++) {
109764-		expect_b_eq(i % 3 == 0, fb_get(fb, nbits, i),
109765-		    "Unexpected bit at position %zu", i);
109766-	}
109767-	/* Unset those divisible by 5. */
109768-	for (size_t i = 0; i < nbits; i++) {
109769-		if (i % 5 == 0) {
109770-			fb_unset(fb, nbits, i);
109771-		}
109772-	}
109773-	/* Check them. */
109774-	for (size_t i = 0; i < nbits; i++) {
109775-		expect_b_eq(i % 3 == 0 && i % 5 != 0, fb_get(fb, nbits, i),
109776-		    "Unexpected bit at position %zu", i);
109777-	}
109778-	free(fb);
109779-}
109780-
109781-TEST_BEGIN(test_get_set_unset) {
109782-#define NB(nbits) \
109783-	do_test_get_set_unset(nbits);
109784-	NBITS_TAB
109785-#undef NB
109786-}
109787-TEST_END
109788-
109789-static ssize_t
109790-find_3_5_compute(ssize_t i, size_t nbits, bool bit, bool forward) {
109791-	for(; i < (ssize_t)nbits && i >= 0; i += (forward ? 1 : -1)) {
109792-		bool expected_bit = i % 3 == 0 || i % 5 == 0;
109793-		if (expected_bit == bit) {
109794-			return i;
109795-		}
109796-	}
109797-	return forward ? (ssize_t)nbits : (ssize_t)-1;
109798-}
109799-
109800-static void
109801-do_test_search_simple(size_t nbits) {
109802-	size_t sz = FB_NGROUPS(nbits) * sizeof(fb_group_t);
109803-	fb_group_t *fb = malloc(sz);
109804-	fb_init(fb, nbits);
109805-
109806-	/* We pick multiples of 3 or 5. */
109807-	for (size_t i = 0; i < nbits; i++) {
109808-		if (i % 3 == 0) {
109809-			fb_set(fb, nbits, i);
109810-		}
109811-		/* This tests double-setting a little, too. */
109812-		if (i % 5 == 0) {
109813-			fb_set(fb, nbits, i);
109814-		}
109815-	}
109816-	for (size_t i = 0; i < nbits; i++) {
109817-		size_t ffs_compute = find_3_5_compute(i, nbits, true, true);
109818-		size_t ffs_search = fb_ffs(fb, nbits, i);
109819-		expect_zu_eq(ffs_compute, ffs_search, "ffs mismatch at %zu", i);
109820-
109821-		ssize_t fls_compute = find_3_5_compute(i, nbits, true, false);
109822-		size_t fls_search = fb_fls(fb, nbits, i);
109823-		expect_zu_eq(fls_compute, fls_search, "fls mismatch at %zu", i);
109824-
109825-		size_t ffu_compute = find_3_5_compute(i, nbits, false, true);
109826-		size_t ffu_search = fb_ffu(fb, nbits, i);
109827-		expect_zu_eq(ffu_compute, ffu_search, "ffu mismatch at %zu", i);
109828-
109829-		size_t flu_compute = find_3_5_compute(i, nbits, false, false);
109830-		size_t flu_search = fb_flu(fb, nbits, i);
109831-		expect_zu_eq(flu_compute, flu_search, "flu mismatch at %zu", i);
109832-	}
109833-
109834-	free(fb);
109835-}
109836-
109837-TEST_BEGIN(test_search_simple) {
109838-#define NB(nbits) \
109839-	do_test_search_simple(nbits);
109840-	NBITS_TAB
109841-#undef NB
109842-}
109843-TEST_END
109844-
109845-static void
109846-expect_exhaustive_results(fb_group_t *mostly_full, fb_group_t *mostly_empty,
109847-    size_t nbits, size_t special_bit, size_t position) {
109848-	if (position < special_bit) {
109849-		expect_zu_eq(special_bit, fb_ffs(mostly_empty, nbits, position),
109850-		    "mismatch at %zu, %zu", position, special_bit);
109851-		expect_zd_eq(-1, fb_fls(mostly_empty, nbits, position),
109852-		    "mismatch at %zu, %zu", position, special_bit);
109853-		expect_zu_eq(position, fb_ffu(mostly_empty, nbits, position),
109854-		    "mismatch at %zu, %zu", position, special_bit);
109855-		expect_zd_eq(position, fb_flu(mostly_empty, nbits, position),
109856-		    "mismatch at %zu, %zu", position, special_bit);
109857-
109858-		expect_zu_eq(position, fb_ffs(mostly_full, nbits, position),
109859-		    "mismatch at %zu, %zu", position, special_bit);
109860-		expect_zd_eq(position, fb_fls(mostly_full, nbits, position),
109861-		    "mismatch at %zu, %zu", position, special_bit);
109862-		expect_zu_eq(special_bit, fb_ffu(mostly_full, nbits, position),
109863-		    "mismatch at %zu, %zu", position, special_bit);
109864-		expect_zd_eq(-1, fb_flu(mostly_full, nbits, position),
109865-		    "mismatch at %zu, %zu", position, special_bit);
109866-	} else if (position == special_bit) {
109867-		expect_zu_eq(special_bit, fb_ffs(mostly_empty, nbits, position),
109868-		    "mismatch at %zu, %zu", position, special_bit);
109869-		expect_zd_eq(special_bit, fb_fls(mostly_empty, nbits, position),
109870-		    "mismatch at %zu, %zu", position, special_bit);
109871-		expect_zu_eq(position + 1, fb_ffu(mostly_empty, nbits, position),
109872-		    "mismatch at %zu, %zu", position, special_bit);
109873-		expect_zd_eq(position - 1, fb_flu(mostly_empty, nbits,
109874-		    position), "mismatch at %zu, %zu", position, special_bit);
109875-
109876-		expect_zu_eq(position + 1, fb_ffs(mostly_full, nbits, position),
109877-		    "mismatch at %zu, %zu", position, special_bit);
109878-		expect_zd_eq(position - 1, fb_fls(mostly_full, nbits,
109879-		    position), "mismatch at %zu, %zu", position, special_bit);
109880-		expect_zu_eq(position, fb_ffu(mostly_full, nbits, position),
109881-		    "mismatch at %zu, %zu", position, special_bit);
109882-		expect_zd_eq(position, fb_flu(mostly_full, nbits, position),
109883-		    "mismatch at %zu, %zu", position, special_bit);
109884-	} else {
109885-		/* position > special_bit. */
109886-		expect_zu_eq(nbits, fb_ffs(mostly_empty, nbits, position),
109887-		    "mismatch at %zu, %zu", position, special_bit);
109888-		expect_zd_eq(special_bit, fb_fls(mostly_empty, nbits,
109889-		    position), "mismatch at %zu, %zu", position, special_bit);
109890-		expect_zu_eq(position, fb_ffu(mostly_empty, nbits, position),
109891-		    "mismatch at %zu, %zu", position, special_bit);
109892-		expect_zd_eq(position, fb_flu(mostly_empty, nbits, position),
109893-		    "mismatch at %zu, %zu", position, special_bit);
109894-
109895-		expect_zu_eq(position, fb_ffs(mostly_full, nbits, position),
109896-		    "mismatch at %zu, %zu", position, special_bit);
109897-		expect_zd_eq(position, fb_fls(mostly_full, nbits, position),
109898-		    "mismatch at %zu, %zu", position, special_bit);
109899-		expect_zu_eq(nbits, fb_ffu(mostly_full, nbits, position),
109900-		    "mismatch at %zu, %zu", position, special_bit);
109901-		expect_zd_eq(special_bit, fb_flu(mostly_full, nbits, position),
109902-		    "mismatch at %zu, %zu", position, special_bit);
109903-	}
109904-}
109905-
109906-static void
109907-do_test_search_exhaustive(size_t nbits) {
109908-	/* This test is quadratic; let's not get too big. */
109909-	if (nbits > 1000) {
109910-		return;
109911-	}
109912-	size_t sz = FB_NGROUPS(nbits) * sizeof(fb_group_t);
109913-	fb_group_t *empty = malloc(sz);
109914-	fb_init(empty, nbits);
109915-	fb_group_t *full = malloc(sz);
109916-	fb_init(full, nbits);
109917-	fb_set_range(full, nbits, 0, nbits);
109918-
109919-	for (size_t i = 0; i < nbits; i++) {
109920-		fb_set(empty, nbits, i);
109921-		fb_unset(full, nbits, i);
109922-
109923-		for (size_t j = 0; j < nbits; j++) {
109924-			expect_exhaustive_results(full, empty, nbits, i, j);
109925-		}
109926-		fb_unset(empty, nbits, i);
109927-		fb_set(full, nbits, i);
109928-	}
109929-
109930-	free(empty);
109931-	free(full);
109932-}
109933-
109934-TEST_BEGIN(test_search_exhaustive) {
109935-#define NB(nbits) \
109936-	do_test_search_exhaustive(nbits);
109937-	NBITS_TAB
109938-#undef NB
109939-}
109940-TEST_END
109941-
109942-TEST_BEGIN(test_range_simple) {
109943-	/*
109944-	 * Just pick a constant big enough to have nontrivial middle sizes, and
109945-	 * big enough that usages of things like weirdnum (below) near the
109946-	 * beginning fit comfortably into the beginning of the bitmap.
109947-	 */
109948-	size_t nbits = 64 * 10;
109949-	size_t ngroups = FB_NGROUPS(nbits);
109950-	fb_group_t *fb = malloc(sizeof(fb_group_t) * ngroups);
109951-	fb_init(fb, nbits);
109952-	for (size_t i = 0; i < nbits; i++) {
109953-		if (i % 2 == 0) {
109954-			fb_set_range(fb, nbits, i, 1);
109955-		}
109956-	}
109957-	for (size_t i = 0; i < nbits; i++) {
109958-		expect_b_eq(i % 2 == 0, fb_get(fb, nbits, i),
109959-		    "mismatch at position %zu", i);
109960-	}
109961-	fb_set_range(fb, nbits, 0, nbits / 2);
109962-	fb_unset_range(fb, nbits, nbits / 2, nbits / 2);
109963-	for (size_t i = 0; i < nbits; i++) {
109964-		expect_b_eq(i < nbits / 2, fb_get(fb, nbits, i),
109965-		    "mismatch at position %zu", i);
109966-	}
109967-
109968-	static const size_t weirdnum = 7;
109969-	fb_set_range(fb, nbits, 0, nbits);
109970-	fb_unset_range(fb, nbits, weirdnum, FB_GROUP_BITS + weirdnum);
109971-	for (size_t i = 0; i < nbits; i++) {
109972-		expect_b_eq(7 <= i && i <= 2 * weirdnum + FB_GROUP_BITS - 1,
109973-		    !fb_get(fb, nbits, i), "mismatch at position %zu", i);
109974-	}
109975-	free(fb);
109976-}
109977-TEST_END
109978-
109979-static void
109980-do_test_empty_full_exhaustive(size_t nbits) {
109981-	size_t sz = FB_NGROUPS(nbits) * sizeof(fb_group_t);
109982-	fb_group_t *empty = malloc(sz);
109983-	fb_init(empty, nbits);
109984-	fb_group_t *full = malloc(sz);
109985-	fb_init(full, nbits);
109986-	fb_set_range(full, nbits, 0, nbits);
109987-
109988-	expect_true(fb_full(full, nbits), "");
109989-	expect_false(fb_empty(full, nbits), "");
109990-	expect_false(fb_full(empty, nbits), "");
109991-	expect_true(fb_empty(empty, nbits), "");
109992-
109993-	for (size_t i = 0; i < nbits; i++) {
109994-		fb_set(empty, nbits, i);
109995-		fb_unset(full, nbits, i);
109996-
109997-		expect_false(fb_empty(empty, nbits), "error at bit %zu", i);
109998-		if (nbits != 1) {
109999-			expect_false(fb_full(empty, nbits),
110000-			    "error at bit %zu", i);
110001-			expect_false(fb_empty(full, nbits),
110002-			    "error at bit %zu", i);
110003-		} else {
110004-			expect_true(fb_full(empty, nbits),
110005-			    "error at bit %zu", i);
110006-			expect_true(fb_empty(full, nbits),
110007-			    "error at bit %zu", i);
110008-		}
110009-		expect_false(fb_full(full, nbits), "error at bit %zu", i);
110010-
110011-		fb_unset(empty, nbits, i);
110012-		fb_set(full, nbits, i);
110013-	}
110014-
110015-	free(empty);
110016-	free(full);
110017-}
110018-
110019-TEST_BEGIN(test_empty_full) {
110020-#define NB(nbits) \
110021-	do_test_empty_full_exhaustive(nbits);
110022-	NBITS_TAB
110023-#undef NB
110024-}
110025-TEST_END
110026-
110027-/*
110028- * This tests both iter_range and the longest range functionality, which is
110029- * built closely on top of it.
110030- */
110031-TEST_BEGIN(test_iter_range_simple) {
110032-	size_t set_limit = 30;
110033-	size_t nbits = 100;
110034-	fb_group_t fb[FB_NGROUPS(100)];
110035-
110036-	fb_init(fb, nbits);
110037-
110038-	/*
110039-	 * Failing to initialize these can lead to build failures with -Wall;
110040-	 * the compiler can't prove that they're set.
110041-	 */
110042-	size_t begin = (size_t)-1;
110043-	size_t len = (size_t)-1;
110044-	bool result;
110045-
110046-	/* A set of checks with only the first set_limit bits *set*. */
110047-	fb_set_range(fb, nbits, 0, set_limit);
110048-	expect_zu_eq(set_limit, fb_srange_longest(fb, nbits),
110049-	    "Incorrect longest set range");
110050-	expect_zu_eq(nbits - set_limit, fb_urange_longest(fb, nbits),
110051-	    "Incorrect longest unset range");
110052-	for (size_t i = 0; i < set_limit; i++) {
110053-		result = fb_srange_iter(fb, nbits, i, &begin, &len);
110054-		expect_true(result, "Should have found a range at %zu", i);
110055-		expect_zu_eq(i, begin, "Incorrect begin at %zu", i);
110056-		expect_zu_eq(set_limit - i, len, "Incorrect len at %zu", i);
110057-
110058-		result = fb_urange_iter(fb, nbits, i, &begin, &len);
110059-		expect_true(result, "Should have found a range at %zu", i);
110060-		expect_zu_eq(set_limit, begin, "Incorrect begin at %zu", i);
110061-		expect_zu_eq(nbits - set_limit, len, "Incorrect len at %zu", i);
110062-
110063-		result = fb_srange_riter(fb, nbits, i, &begin, &len);
110064-		expect_true(result, "Should have found a range at %zu", i);
110065-		expect_zu_eq(0, begin, "Incorrect begin at %zu", i);
110066-		expect_zu_eq(i + 1, len, "Incorrect len at %zu", i);
110067-
110068-		result = fb_urange_riter(fb, nbits, i, &begin, &len);
110069-		expect_false(result, "Should not have found a range at %zu", i);
110070-	}
110071-	for (size_t i = set_limit; i < nbits; i++) {
110072-		result = fb_srange_iter(fb, nbits, i, &begin, &len);
110073-		expect_false(result, "Should not have found a range at %zu", i);
110074-
110075-		result = fb_urange_iter(fb, nbits, i, &begin, &len);
110076-		expect_true(result, "Should have found a range at %zu", i);
110077-		expect_zu_eq(i, begin, "Incorrect begin at %zu", i);
110078-		expect_zu_eq(nbits - i, len, "Incorrect len at %zu", i);
110079-
110080-		result = fb_srange_riter(fb, nbits, i, &begin, &len);
110081-		expect_true(result, "Should have found a range at %zu", i);
110082-		expect_zu_eq(0, begin, "Incorrect begin at %zu", i);
110083-		expect_zu_eq(set_limit, len, "Incorrect len at %zu", i);
110084-
110085-		result = fb_urange_riter(fb, nbits, i, &begin, &len);
110086-		expect_true(result, "Should have found a range at %zu", i);
110087-		expect_zu_eq(set_limit, begin, "Incorrect begin at %zu", i);
110088-		expect_zu_eq(i - set_limit + 1, len, "Incorrect len at %zu", i);
110089-	}
110090-
110091-	/* A set of checks with only the first set_limit bits *unset*. */
110092-	fb_unset_range(fb, nbits, 0, set_limit);
110093-	fb_set_range(fb, nbits, set_limit, nbits - set_limit);
110094-	expect_zu_eq(nbits - set_limit, fb_srange_longest(fb, nbits),
110095-	    "Incorrect longest set range");
110096-	expect_zu_eq(set_limit, fb_urange_longest(fb, nbits),
110097-	    "Incorrect longest unset range");
110098-	for (size_t i = 0; i < set_limit; i++) {
110099-		result = fb_srange_iter(fb, nbits, i, &begin, &len);
110100-		expect_true(result, "Should have found a range at %zu", i);
110101-		expect_zu_eq(set_limit, begin, "Incorrect begin at %zu", i);
110102-		expect_zu_eq(nbits - set_limit, len, "Incorrect len at %zu", i);
110103-
110104-		result = fb_urange_iter(fb, nbits, i, &begin, &len);
110105-		expect_true(result, "Should have found a range at %zu", i);
110106-		expect_zu_eq(i, begin, "Incorrect begin at %zu", i);
110107-		expect_zu_eq(set_limit - i, len, "Incorrect len at %zu", i);
110108-
110109-		result = fb_srange_riter(fb, nbits, i, &begin, &len);
110110-		expect_false(result, "Should not have found a range at %zu", i);
110111-
110112-		result = fb_urange_riter(fb, nbits, i, &begin, &len);
110113-		expect_true(result, "Should not have found a range at %zu", i);
110114-		expect_zu_eq(0, begin, "Incorrect begin at %zu", i);
110115-		expect_zu_eq(i + 1, len, "Incorrect len at %zu", i);
110116-	}
110117-	for (size_t i = set_limit; i < nbits; i++) {
110118-		result = fb_srange_iter(fb, nbits, i, &begin, &len);
110119-		expect_true(result, "Should have found a range at %zu", i);
110120-		expect_zu_eq(i, begin, "Incorrect begin at %zu", i);
110121-		expect_zu_eq(nbits - i, len, "Incorrect len at %zu", i);
110122-
110123-		result = fb_urange_iter(fb, nbits, i, &begin, &len);
110124-		expect_false(result, "Should not have found a range at %zu", i);
110125-
110126-		result = fb_srange_riter(fb, nbits, i, &begin, &len);
110127-		expect_true(result, "Should have found a range at %zu", i);
110128-		expect_zu_eq(set_limit, begin, "Incorrect begin at %zu", i);
110129-		expect_zu_eq(i - set_limit + 1, len, "Incorrect len at %zu", i);
110130-
110131-		result = fb_urange_riter(fb, nbits, i, &begin, &len);
110132-		expect_true(result, "Should have found a range at %zu", i);
110133-		expect_zu_eq(0, begin, "Incorrect begin at %zu", i);
110134-		expect_zu_eq(set_limit, len, "Incorrect len at %zu", i);
110135-	}
110136-
110137-}
110138-TEST_END
110139-
110140-/*
110141- * Doing this bit-by-bit is too slow for a real implementation, but for testing
110142- * code, it's easy to get right.  In the exhaustive tests, we'll compare the
110143- * (fast but tricky) real implementation against the (slow but simple) testing
110144- * one.
110145- */
110146-static bool
110147-fb_iter_simple(fb_group_t *fb, size_t nbits, size_t start, size_t *r_begin,
110148-    size_t *r_len, bool val, bool forward) {
110149-	ssize_t stride = (forward ? (ssize_t)1 : (ssize_t)-1);
110150-	ssize_t range_begin = (ssize_t)start;
110151-	for (; range_begin != (ssize_t)nbits && range_begin != -1;
110152-	    range_begin += stride) {
110153-		if (fb_get(fb, nbits, range_begin) == val) {
110154-			ssize_t range_end = range_begin;
110155-			for (; range_end != (ssize_t)nbits && range_end != -1;
110156-			    range_end += stride) {
110157-				if (fb_get(fb, nbits, range_end) != val) {
110158-					break;
110159-				}
110160-			}
110161-			if (forward) {
110162-				*r_begin = range_begin;
110163-				*r_len = range_end - range_begin;
110164-			} else {
110165-				*r_begin = range_end + 1;
110166-				*r_len = range_begin - range_end;
110167-			}
110168-			return true;
110169-		}
110170-	}
110171-	return false;
110172-}
110173-
110174-/* Similar, but for finding longest ranges. */
110175-static size_t
110176-fb_range_longest_simple(fb_group_t *fb, size_t nbits, bool val) {
110177-	size_t longest_so_far = 0;
110178-	for (size_t begin = 0; begin < nbits; begin++) {
110179-		if (fb_get(fb, nbits, begin) != val) {
110180-			continue;
110181-		}
110182-		size_t end = begin + 1;
110183-		for (; end < nbits; end++) {
110184-			if (fb_get(fb, nbits, end) != val) {
110185-				break;
110186-			}
110187-		}
110188-		if (end - begin > longest_so_far) {
110189-			longest_so_far = end - begin;
110190-		}
110191-	}
110192-	return longest_so_far;
110193-}
110194-
110195-static void
110196-expect_iter_results_at(fb_group_t *fb, size_t nbits, size_t pos,
110197-    bool val, bool forward) {
110198-	bool iter_res;
110199-	size_t iter_begin JEMALLOC_CC_SILENCE_INIT(0);
110200-	size_t iter_len JEMALLOC_CC_SILENCE_INIT(0);
110201-	if (val) {
110202-		if (forward) {
110203-			iter_res = fb_srange_iter(fb, nbits, pos,
110204-			    &iter_begin, &iter_len);
110205-		} else {
110206-			iter_res = fb_srange_riter(fb, nbits, pos,
110207-			    &iter_begin, &iter_len);
110208-		}
110209-	} else {
110210-		if (forward) {
110211-			iter_res = fb_urange_iter(fb, nbits, pos,
110212-			    &iter_begin, &iter_len);
110213-		} else {
110214-			iter_res = fb_urange_riter(fb, nbits, pos,
110215-			    &iter_begin, &iter_len);
110216-		}
110217-	}
110218-
110219-	bool simple_iter_res;
110220-	/*
110221-	 * These are dead stores, but the compiler can't always figure that out
110222-	 * statically, and warns on the uninitialized variable.
110223-	 */
110224-	size_t simple_iter_begin = 0;
110225-	size_t simple_iter_len = 0;
110226-	simple_iter_res = fb_iter_simple(fb, nbits, pos, &simple_iter_begin,
110227-	    &simple_iter_len, val, forward);
110228-
110229-	expect_b_eq(iter_res, simple_iter_res, "Result mismatch at %zu", pos);
110230-	if (iter_res && simple_iter_res) {
110231-		assert_zu_eq(iter_begin, simple_iter_begin,
110232-		    "Begin mismatch at %zu", pos);
110233-		expect_zu_eq(iter_len, simple_iter_len,
110234-		    "Length mismatch at %zu", pos);
110235-	}
110236-}
110237-
110238-static void
110239-expect_iter_results(fb_group_t *fb, size_t nbits) {
110240-	for (size_t i = 0; i < nbits; i++) {
110241-		expect_iter_results_at(fb, nbits, i, false, false);
110242-		expect_iter_results_at(fb, nbits, i, false, true);
110243-		expect_iter_results_at(fb, nbits, i, true, false);
110244-		expect_iter_results_at(fb, nbits, i, true, true);
110245-	}
110246-	expect_zu_eq(fb_range_longest_simple(fb, nbits, true),
110247-	    fb_srange_longest(fb, nbits), "Longest range mismatch");
110248-	expect_zu_eq(fb_range_longest_simple(fb, nbits, false),
110249-	    fb_urange_longest(fb, nbits), "Longest range mismatch");
110250-}
110251-
110252-static void
110253-set_pattern_3(fb_group_t *fb, size_t nbits, bool zero_val) {
110254-	for (size_t i = 0; i < nbits; i++) {
110255-		if ((i % 6 < 3 && zero_val) || (i % 6 >= 3 && !zero_val)) {
110256-			fb_set(fb, nbits, i);
110257-		} else {
110258-			fb_unset(fb, nbits, i);
110259-		}
110260-	}
110261-}
110262-
110263-static void
110264-do_test_iter_range_exhaustive(size_t nbits) {
110265-	/* This test is also pretty slow. */
110266-	if (nbits > 1000) {
110267-		return;
110268-	}
110269-	size_t sz = FB_NGROUPS(nbits) * sizeof(fb_group_t);
110270-	fb_group_t *fb = malloc(sz);
110271-	fb_init(fb, nbits);
110272-
110273-	set_pattern_3(fb, nbits, /* zero_val */ true);
110274-	expect_iter_results(fb, nbits);
110275-
110276-	set_pattern_3(fb, nbits, /* zero_val */ false);
110277-	expect_iter_results(fb, nbits);
110278-
110279-	fb_set_range(fb, nbits, 0, nbits);
110280-	fb_unset_range(fb, nbits, 0, nbits / 2 == 0 ? 1 : nbits / 2);
110281-	expect_iter_results(fb, nbits);
110282-
110283-	fb_unset_range(fb, nbits, 0, nbits);
110284-	fb_set_range(fb, nbits, 0, nbits / 2 == 0 ? 1: nbits / 2);
110285-	expect_iter_results(fb, nbits);
110286-
110287-	free(fb);
110288-}
110289-
110290-/*
110291- * Like test_iter_range_simple, this tests both iteration and longest-range
110292- * computation.
110293- */
110294-TEST_BEGIN(test_iter_range_exhaustive) {
110295-#define NB(nbits) \
110296-	do_test_iter_range_exhaustive(nbits);
110297-	NBITS_TAB
110298-#undef NB
110299-}
110300-TEST_END
110301-
110302-/*
110303- * If all set bits in the bitmap are contiguous, in [set_start, set_end),
110304- * returns the number of set bits in [scount_start, scount_end).
110305- */
110306-static size_t
110307-scount_contiguous(size_t set_start, size_t set_end, size_t scount_start,
110308-    size_t scount_end) {
110309-	/* No overlap. */
110310-	if (set_end <= scount_start || scount_end <= set_start) {
110311-		return 0;
110312-	}
110313-	/* set range contains scount range */
110314-	if (set_start <= scount_start && set_end >= scount_end) {
110315-		return scount_end - scount_start;
110316-	}
110317-	/* scount range contains set range. */
110318-	if (scount_start <= set_start && scount_end >= set_end) {
110319-		return set_end - set_start;
110320-	}
110321-	/* Partial overlap, with set range starting first. */
110322-	if (set_start < scount_start && set_end < scount_end) {
110323-		return set_end - scount_start;
110324-	}
110325-	/* Partial overlap, with scount range starting first. */
110326-	if (scount_start < set_start && scount_end < set_end) {
110327-		return scount_end - set_start;
110328-	}
110329-	/*
110330-	 * Trigger an assert failure; the above list should have been
110331-	 * exhaustive.
110332-	 */
110333-	unreachable();
110334-}
110335-
110336-static size_t
110337-ucount_contiguous(size_t set_start, size_t set_end, size_t ucount_start,
110338-    size_t ucount_end) {
110339-	/* No overlap. */
110340-	if (set_end <= ucount_start || ucount_end <= set_start) {
110341-		return ucount_end - ucount_start;
110342-	}
110343-	/* set range contains ucount range */
110344-	if (set_start <= ucount_start && set_end >= ucount_end) {
110345-		return 0;
110346-	}
110347-	/* ucount range contains set range. */
110348-	if (ucount_start <= set_start && ucount_end >= set_end) {
110349-		return (ucount_end - ucount_start) - (set_end - set_start);
110350-	}
110351-	/* Partial overlap, with set range starting first. */
110352-	if (set_start < ucount_start && set_end < ucount_end) {
110353-		return ucount_end - set_end;
110354-	}
110355-	/* Partial overlap, with ucount range starting first. */
110356-	if (ucount_start < set_start && ucount_end < set_end) {
110357-		return set_start - ucount_start;
110358-	}
110359-	/*
110360-	 * Trigger an assert failure; the above list should have been
110361-	 * exhaustive.
110362-	 */
110363-	unreachable();
110364-}
110365-
110366-static void
110367-expect_count_match_contiguous(fb_group_t *fb, size_t nbits, size_t set_start,
110368-    size_t set_end) {
110369-	for (size_t i = 0; i < nbits; i++) {
110370-		for (size_t j = i + 1; j <= nbits; j++) {
110371-			size_t cnt = j - i;
110372-			size_t scount_expected = scount_contiguous(set_start,
110373-			    set_end, i, j);
110374-			size_t scount_computed = fb_scount(fb, nbits, i, cnt);
110375-			expect_zu_eq(scount_expected, scount_computed,
110376-			    "fb_scount error with nbits=%zu, start=%zu, "
110377-			    "cnt=%zu, with bits set in [%zu, %zu)",
110378-			    nbits, i, cnt, set_start, set_end);
110379-
110380-			size_t ucount_expected = ucount_contiguous(set_start,
110381-			    set_end, i, j);
110382-			size_t ucount_computed = fb_ucount(fb, nbits, i, cnt);
110383-			assert_zu_eq(ucount_expected, ucount_computed,
110384-			    "fb_ucount error with nbits=%zu, start=%zu, "
110385-			    "cnt=%zu, with bits set in [%zu, %zu)",
110386-			    nbits, i, cnt, set_start, set_end);
110387-
110388-		}
110389-	}
110390-}
110391-
110392-static void
110393-do_test_count_contiguous(size_t nbits) {
110394-	size_t sz = FB_NGROUPS(nbits) * sizeof(fb_group_t);
110395-	fb_group_t *fb = malloc(sz);
110396-
110397-	fb_init(fb, nbits);
110398-
110399-	expect_count_match_contiguous(fb, nbits, 0, 0);
110400-	for (size_t i = 0; i < nbits; i++) {
110401-		fb_set(fb, nbits, i);
110402-		expect_count_match_contiguous(fb, nbits, 0, i + 1);
110403-	}
110404-
110405-	for (size_t i = 0; i < nbits; i++) {
110406-		fb_unset(fb, nbits, i);
110407-		expect_count_match_contiguous(fb, nbits, i + 1, nbits);
110408-	}
110409-
110410-	free(fb);
110411-}
110412-
110413-TEST_BEGIN(test_count_contiguous_simple) {
110414-	enum {nbits = 300};
110415-	fb_group_t fb[FB_NGROUPS(nbits)];
110416-	fb_init(fb, nbits);
110417-	/* Just an arbitrary number. */
110418-	size_t start = 23;
110419-
110420-	fb_set_range(fb, nbits, start, 30 - start);
110421-	expect_count_match_contiguous(fb, nbits, start, 30);
110422-
110423-	fb_set_range(fb, nbits, start, 40 - start);
110424-	expect_count_match_contiguous(fb, nbits, start, 40);
110425-
110426-	fb_set_range(fb, nbits, start, 70 - start);
110427-	expect_count_match_contiguous(fb, nbits, start, 70);
110428-
110429-	fb_set_range(fb, nbits, start, 120 - start);
110430-	expect_count_match_contiguous(fb, nbits, start, 120);
110431-
110432-	fb_set_range(fb, nbits, start, 150 - start);
110433-	expect_count_match_contiguous(fb, nbits, start, 150);
110434-
110435-	fb_set_range(fb, nbits, start, 200 - start);
110436-	expect_count_match_contiguous(fb, nbits, start, 200);
110437-
110438-	fb_set_range(fb, nbits, start, 290 - start);
110439-	expect_count_match_contiguous(fb, nbits, start, 290);
110440-}
110441-TEST_END
110442-
110443-TEST_BEGIN(test_count_contiguous) {
110444-#define NB(nbits) \
110445-	/* This test is *particularly* slow in debug builds. */ \
110446-	if ((!config_debug && nbits < 300) || nbits < 150) { \
110447-		do_test_count_contiguous(nbits); \
110448-	}
110449-	NBITS_TAB
110450-#undef NB
110451-}
110452-TEST_END
110453-
110454-static void
110455-expect_count_match_alternating(fb_group_t *fb_even, fb_group_t *fb_odd,
110456-    size_t nbits) {
110457-	for (size_t i = 0; i < nbits; i++) {
110458-		for (size_t j = i + 1; j <= nbits; j++) {
110459-			size_t cnt = j - i;
110460-			size_t odd_scount = cnt / 2
110461-			    + (size_t)(cnt % 2 == 1 && i % 2 == 1);
110462-			size_t odd_scount_computed = fb_scount(fb_odd, nbits,
110463-			    i, j - i);
110464-			assert_zu_eq(odd_scount, odd_scount_computed,
110465-			    "fb_scount error with nbits=%zu, start=%zu, "
110466-			    "cnt=%zu, with alternating bits set.",
110467-			    nbits, i, j - i);
110468-
110469-			size_t odd_ucount = cnt / 2
110470-			    + (size_t)(cnt % 2 == 1 && i % 2 == 0);
110471-			size_t odd_ucount_computed = fb_ucount(fb_odd, nbits,
110472-			    i, j - i);
110473-			assert_zu_eq(odd_ucount, odd_ucount_computed,
110474-			    "fb_ucount error with nbits=%zu, start=%zu, "
110475-			    "cnt=%zu, with alternating bits set.",
110476-			    nbits, i, j - i);
110477-
110478-			size_t even_scount = cnt / 2
110479-			    + (size_t)(cnt % 2 == 1 && i % 2 == 0);
110480-			size_t even_scount_computed = fb_scount(fb_even, nbits,
110481-			    i, j - i);
110482-			assert_zu_eq(even_scount, even_scount_computed,
110483-			    "fb_scount error with nbits=%zu, start=%zu, "
110484-			    "cnt=%zu, with alternating bits set.",
110485-			    nbits, i, j - i);
110486-
110487-			size_t even_ucount = cnt / 2
110488-			    + (size_t)(cnt % 2 == 1 && i % 2 == 1);
110489-			size_t even_ucount_computed = fb_ucount(fb_even, nbits,
110490-			    i, j - i);
110491-			assert_zu_eq(even_ucount, even_ucount_computed,
110492-			    "fb_ucount error with nbits=%zu, start=%zu, "
110493-			    "cnt=%zu, with alternating bits set.",
110494-			    nbits, i, j - i);
110495-		}
110496-	}
110497-}
110498-
110499-static void
110500-do_test_count_alternating(size_t nbits) {
110501-	if (nbits > 1000) {
110502-		return;
110503-	}
110504-	size_t sz = FB_NGROUPS(nbits) * sizeof(fb_group_t);
110505-	fb_group_t *fb_even = malloc(sz);
110506-	fb_group_t *fb_odd = malloc(sz);
110507-
110508-	fb_init(fb_even, nbits);
110509-	fb_init(fb_odd, nbits);
110510-
110511-	for (size_t i = 0; i < nbits; i++) {
110512-		if (i % 2 == 0) {
110513-			fb_set(fb_even, nbits, i);
110514-		} else {
110515-			fb_set(fb_odd, nbits, i);
110516-		}
110517-	}
110518-
110519-	expect_count_match_alternating(fb_even, fb_odd, nbits);
110520-
110521-	free(fb_even);
110522-	free(fb_odd);
110523-}
110524-
110525-TEST_BEGIN(test_count_alternating) {
110526-#define NB(nbits) \
110527-	do_test_count_alternating(nbits);
110528-	NBITS_TAB
110529-#undef NB
110530-}
110531-TEST_END
110532-
110533-static void
110534-do_test_bit_op(size_t nbits, bool (*op)(bool a, bool b),
110535-    void (*fb_op)(fb_group_t *dst, fb_group_t *src1, fb_group_t *src2, size_t nbits)) {
110536-	size_t sz = FB_NGROUPS(nbits) * sizeof(fb_group_t);
110537-	fb_group_t *fb1 = malloc(sz);
110538-	fb_group_t *fb2 = malloc(sz);
110539-	fb_group_t *fb_result = malloc(sz);
110540-	fb_init(fb1, nbits);
110541-	fb_init(fb2, nbits);
110542-	fb_init(fb_result, nbits);
110543-
110544-	/* Just two random numbers. */
110545-	const uint64_t prng_init1 = (uint64_t)0X4E9A9DE6A35691CDULL;
110546-	const uint64_t prng_init2 = (uint64_t)0X7856E396B063C36EULL;
110547-
110548-	uint64_t prng1 = prng_init1;
110549-	uint64_t prng2 = prng_init2;
110550-
110551-	for (size_t i = 0; i < nbits; i++) {
110552-		bool bit1 = ((prng1 & (1ULL << (i % 64))) != 0);
110553-		bool bit2 = ((prng2 & (1ULL << (i % 64))) != 0);
110554-
110555-		if (bit1) {
110556-			fb_set(fb1, nbits, i);
110557-		}
110558-		if (bit2) {
110559-			fb_set(fb2, nbits, i);
110560-		}
110561-
110562-		if (i % 64 == 0) {
110563-			prng1 = prng_state_next_u64(prng1);
110564-			prng2 = prng_state_next_u64(prng2);
110565-		}
110566-	}
110567-
110568-	fb_op(fb_result, fb1, fb2, nbits);
110569-
110570-	/* Reset the prngs to replay them. */
110571-	prng1 = prng_init1;
110572-	prng2 = prng_init2;
110573-
110574-	for (size_t i = 0; i < nbits; i++) {
110575-		bool bit1 = ((prng1 & (1ULL << (i % 64))) != 0);
110576-		bool bit2 = ((prng2 & (1ULL << (i % 64))) != 0);
110577-
110578-		/* Original bitmaps shouldn't change. */
110579-		expect_b_eq(bit1, fb_get(fb1, nbits, i), "difference at bit %zu", i);
110580-		expect_b_eq(bit2, fb_get(fb2, nbits, i), "difference at bit %zu", i);
110581-
110582-		/* New one should be bitwise and. */
110583-		expect_b_eq(op(bit1, bit2), fb_get(fb_result, nbits, i),
110584-		    "difference at bit %zu", i);
110585-
110586-		/* Update the same way we did last time. */
110587-		if (i % 64 == 0) {
110588-			prng1 = prng_state_next_u64(prng1);
110589-			prng2 = prng_state_next_u64(prng2);
110590-		}
110591-	}
110592-
110593-	free(fb1);
110594-	free(fb2);
110595-	free(fb_result);
110596-}
110597-
110598-static bool
110599-binary_and(bool a, bool b) {
110600-	return a & b;
110601-}
110602-
110603-static void
110604-do_test_bit_and(size_t nbits) {
110605-	do_test_bit_op(nbits, &binary_and, &fb_bit_and);
110606-}
110607-
110608-TEST_BEGIN(test_bit_and) {
110609-#define NB(nbits) \
110610-	do_test_bit_and(nbits);
110611-	NBITS_TAB
110612-#undef NB
110613-}
110614-TEST_END
110615-
110616-static bool
110617-binary_or(bool a, bool b) {
110618-	return a | b;
110619-}
110620-
110621-static void
110622-do_test_bit_or(size_t nbits) {
110623-	do_test_bit_op(nbits, &binary_or, &fb_bit_or);
110624-}
110625-
110626-TEST_BEGIN(test_bit_or) {
110627-#define NB(nbits) \
110628-	do_test_bit_or(nbits);
110629-	NBITS_TAB
110630-#undef NB
110631-}
110632-TEST_END
110633-
110634-static bool
110635-binary_not(bool a, bool b) {
110636-	(void)b;
110637-	return !a;
110638-}
110639-
110640-static void
110641-fb_bit_not_shim(fb_group_t *dst, fb_group_t *src1, fb_group_t *src2,
110642-    size_t nbits) {
110643-	(void)src2;
110644-	fb_bit_not(dst, src1, nbits);
110645-}
110646-
110647-static void
110648-do_test_bit_not(size_t nbits) {
110649-	do_test_bit_op(nbits, &binary_not, &fb_bit_not_shim);
110650-}
110651-
110652-TEST_BEGIN(test_bit_not) {
110653-#define NB(nbits) \
110654-	do_test_bit_not(nbits);
110655-	NBITS_TAB
110656-#undef NB
110657-}
110658-TEST_END
110659-
110660-int
110661-main(void) {
110662-	return test_no_reentrancy(
110663-	    test_fb_init,
110664-	    test_get_set_unset,
110665-	    test_search_simple,
110666-	    test_search_exhaustive,
110667-	    test_range_simple,
110668-	    test_empty_full,
110669-	    test_iter_range_simple,
110670-	    test_iter_range_exhaustive,
110671-	    test_count_contiguous_simple,
110672-	    test_count_contiguous,
110673-	    test_count_alternating,
110674-	    test_bit_and,
110675-	    test_bit_or,
110676-	    test_bit_not);
110677-}
110678diff --git a/jemalloc/test/unit/fork.c b/jemalloc/test/unit/fork.c
110679deleted file mode 100644
110680index 4137423..0000000
110681--- a/jemalloc/test/unit/fork.c
110682+++ /dev/null
110683@@ -1,141 +0,0 @@
110684-#include "test/jemalloc_test.h"
110685-
110686-#ifndef _WIN32
110687-#include <sys/wait.h>
110688-#endif
110689-
110690-#ifndef _WIN32
110691-static void
110692-wait_for_child_exit(int pid) {
110693-	int status;
110694-	while (true) {
110695-		if (waitpid(pid, &status, 0) == -1) {
110696-			test_fail("Unexpected waitpid() failure.");
110697-		}
110698-		if (WIFSIGNALED(status)) {
110699-			test_fail("Unexpected child termination due to "
110700-			    "signal %d", WTERMSIG(status));
110701-			break;
110702-		}
110703-		if (WIFEXITED(status)) {
110704-			if (WEXITSTATUS(status) != 0) {
110705-				test_fail("Unexpected child exit value %d",
110706-				    WEXITSTATUS(status));
110707-			}
110708-			break;
110709-		}
110710-	}
110711-}
110712-#endif
110713-
110714-TEST_BEGIN(test_fork) {
110715-#ifndef _WIN32
110716-	void *p;
110717-	pid_t pid;
110718-
110719-	/* Set up a manually managed arena for test. */
110720-	unsigned arena_ind;
110721-	size_t sz = sizeof(unsigned);
110722-	expect_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz, NULL, 0),
110723-	    0, "Unexpected mallctl() failure");
110724-
110725-	/* Migrate to the new arena. */
110726-	unsigned old_arena_ind;
110727-	sz = sizeof(old_arena_ind);
110728-	expect_d_eq(mallctl("thread.arena", (void *)&old_arena_ind, &sz,
110729-	    (void *)&arena_ind, sizeof(arena_ind)), 0,
110730-	    "Unexpected mallctl() failure");
110731-
110732-	p = malloc(1);
110733-	expect_ptr_not_null(p, "Unexpected malloc() failure");
110734-
110735-	pid = fork();
110736-
110737-	free(p);
110738-
110739-	p = malloc(64);
110740-	expect_ptr_not_null(p, "Unexpected malloc() failure");
110741-	free(p);
110742-
110743-	if (pid == -1) {
110744-		/* Error. */
110745-		test_fail("Unexpected fork() failure");
110746-	} else if (pid == 0) {
110747-		/* Child. */
110748-		_exit(0);
110749-	} else {
110750-		wait_for_child_exit(pid);
110751-	}
110752-#else
110753-	test_skip("fork(2) is irrelevant to Windows");
110754-#endif
110755-}
110756-TEST_END
110757-
110758-#ifndef _WIN32
110759-static void *
110760-do_fork_thd(void *arg) {
110761-	malloc(1);
110762-	int pid = fork();
110763-	if (pid == -1) {
110764-		/* Error. */
110765-		test_fail("Unexpected fork() failure");
110766-	} else if (pid == 0) {
110767-		/* Child. */
110768-		char *args[] = {"true", NULL};
110769-		execvp(args[0], args);
110770-		test_fail("Exec failed");
110771-	} else {
110772-		/* Parent */
110773-		wait_for_child_exit(pid);
110774-	}
110775-	return NULL;
110776-}
110777-#endif
110778-
110779-#ifndef _WIN32
110780-static void
110781-do_test_fork_multithreaded() {
110782-	thd_t child;
110783-	thd_create(&child, do_fork_thd, NULL);
110784-	do_fork_thd(NULL);
110785-	thd_join(child, NULL);
110786-}
110787-#endif
110788-
110789-TEST_BEGIN(test_fork_multithreaded) {
110790-#ifndef _WIN32
110791-	/*
110792-	 * We've seen bugs involving hanging on arenas_lock (though the same
110793-	 * class of bugs can happen on any mutex).  The bugs are intermittent
110794-	 * though, so we want to run the test multiple times.  Since we hold the
110795-	 * arenas lock only early in the process lifetime, we can't just run
110796-	 * this test in a loop (since, after all the arenas are initialized, we
110797-	 * won't acquire arenas_lock any further).  We therefore repeat the test
110798-	 * with multiple processes.
110799-	 */
110800-	for (int i = 0; i < 100; i++) {
110801-		int pid = fork();
110802-		if (pid == -1) {
110803-			/* Error. */
110804-			test_fail("Unexpected fork() failure,");
110805-		} else if (pid == 0) {
110806-			/* Child. */
110807-			do_test_fork_multithreaded();
110808-			_exit(0);
110809-		} else {
110810-			wait_for_child_exit(pid);
110811-		}
110812-	}
110813-#else
110814-	test_skip("fork(2) is irrelevant to Windows");
110815-#endif
110816-}
110817-TEST_END
110818-
110819-int
110820-main(void) {
110821-	return test_no_reentrancy(
110822-	    test_fork,
110823-	    test_fork_multithreaded);
110824-}
110825diff --git a/jemalloc/test/unit/fxp.c b/jemalloc/test/unit/fxp.c
110826deleted file mode 100644
110827index 27f1097..0000000
110828--- a/jemalloc/test/unit/fxp.c
110829+++ /dev/null
110830@@ -1,394 +0,0 @@
110831-#include "test/jemalloc_test.h"
110832-
110833-#include "jemalloc/internal/fxp.h"
110834-
110835-static double
110836-fxp2double(fxp_t a) {
110837-	double intpart = (double)(a >> 16);
110838-	double fracpart = (double)(a & ((1U << 16) - 1)) / (1U << 16);
110839-	return intpart + fracpart;
110840-}
110841-
110842-/* Is a close to b? */
110843-static bool
110844-double_close(double a, double b) {
110845-	/*
110846-	 * Our implementation doesn't try for precision.  Correspondingly, don't
110847-	 * enforce it too strenuously here; accept values that are close in
110848-	 * either relative or absolute terms.
110849-	 */
110850-	return fabs(a - b) < 0.01 || fabs(a - b) / a < 0.01;
110851-}
110852-
110853-static bool
110854-fxp_close(fxp_t a, fxp_t b) {
110855-	return double_close(fxp2double(a), fxp2double(b));
110856-}
110857-
110858-static fxp_t
110859-xparse_fxp(const char *str) {
110860-	fxp_t result;
110861-	bool err = fxp_parse(&result, str, NULL);
110862-	assert_false(err, "Invalid fxp string: %s", str);
110863-	return result;
110864-}
110865-
110866-static void
110867-expect_parse_accurate(const char *str, const char *parse_str) {
110868-	double true_val = strtod(str, NULL);
110869-	fxp_t fxp_val;
110870-	char *end;
110871-	bool err = fxp_parse(&fxp_val, parse_str, &end);
110872-	expect_false(err, "Unexpected parse failure");
110873-	expect_ptr_eq(parse_str + strlen(str), end,
110874-	    "Didn't parse whole string");
110875-	expect_true(double_close(fxp2double(fxp_val), true_val),
110876-	    "Misparsed %s", str);
110877-}
110878-
110879-static void
110880-parse_valid_trial(const char *str) {
110881-	/* The value it parses should be correct. */
110882-	expect_parse_accurate(str, str);
110883-	char buf[100];
110884-	snprintf(buf, sizeof(buf), "%swith_some_trailing_text", str);
110885-	expect_parse_accurate(str, buf);
110886-	snprintf(buf, sizeof(buf), "%s with a space", str);
110887-	expect_parse_accurate(str, buf);
110888-	snprintf(buf, sizeof(buf), "%s,in_a_malloc_conf_string:1", str);
110889-	expect_parse_accurate(str, buf);
110890-}
110891-
110892-TEST_BEGIN(test_parse_valid) {
110893-	parse_valid_trial("0");
110894-	parse_valid_trial("1");
110895-	parse_valid_trial("2");
110896-	parse_valid_trial("100");
110897-	parse_valid_trial("345");
110898-	parse_valid_trial("00000000123");
110899-	parse_valid_trial("00000000987");
110900-
110901-	parse_valid_trial("0.0");
110902-	parse_valid_trial("0.00000000000456456456");
110903-	parse_valid_trial("100.00000000000456456456");
110904-
110905-	parse_valid_trial("123.1");
110906-	parse_valid_trial("123.01");
110907-	parse_valid_trial("123.001");
110908-	parse_valid_trial("123.0001");
110909-	parse_valid_trial("123.00001");
110910-	parse_valid_trial("123.000001");
110911-	parse_valid_trial("123.0000001");
110912-
110913-	parse_valid_trial(".0");
110914-	parse_valid_trial(".1");
110915-	parse_valid_trial(".01");
110916-	parse_valid_trial(".001");
110917-	parse_valid_trial(".0001");
110918-	parse_valid_trial(".00001");
110919-	parse_valid_trial(".000001");
110920-
110921-	parse_valid_trial(".1");
110922-	parse_valid_trial(".10");
110923-	parse_valid_trial(".100");
110924-	parse_valid_trial(".1000");
110925-	parse_valid_trial(".100000");
110926-}
110927-TEST_END
110928-
110929-static void
110930-expect_parse_failure(const char *str) {
110931-	fxp_t result = FXP_INIT_INT(333);
110932-	char *end = (void *)0x123;
110933-	bool err = fxp_parse(&result, str, &end);
110934-	expect_true(err, "Expected a parse error on: %s", str);
110935-	expect_ptr_eq((void *)0x123, end,
110936-	    "Parse error shouldn't change results");
110937-	expect_u32_eq(result, FXP_INIT_INT(333),
110938-	    "Parse error shouldn't change results");
110939-}
110940-
110941-TEST_BEGIN(test_parse_invalid) {
110942-	expect_parse_failure("123.");
110943-	expect_parse_failure("3.a");
110944-	expect_parse_failure(".a");
110945-	expect_parse_failure("a.1");
110946-	expect_parse_failure("a");
110947-	/* A valid string, but one that overflows. */
110948-	expect_parse_failure("123456789");
110949-	expect_parse_failure("0000000123456789");
110950-	expect_parse_failure("1000000");
110951-}
110952-TEST_END
110953-
110954-static void
110955-expect_init_percent(unsigned percent, const char *str) {
110956-	fxp_t result_init = FXP_INIT_PERCENT(percent);
110957-	fxp_t result_parse = xparse_fxp(str);
110958-	expect_u32_eq(result_init, result_parse,
110959-	    "Expect representations of FXP_INIT_PERCENT(%u) and "
110960-	    "fxp_parse(\"%s\") to be equal; got %x and %x",
110961-	    percent, str, result_init, result_parse);
110962-
110963-}
110964-
110965-/*
110966- * Every other test uses either parsing or FXP_INIT_INT; it gets tested in those
110967- * ways.  We need a one-off for the percent-based initialization, though.
110968- */
110969-TEST_BEGIN(test_init_percent) {
110970-	expect_init_percent(100, "1");
110971-	expect_init_percent(75, ".75");
110972-	expect_init_percent(1, ".01");
110973-	expect_init_percent(50, ".5");
110974-}
110975-TEST_END
110976-
110977-static void
110978-expect_add(const char *astr, const char *bstr, const char* resultstr) {
110979-	fxp_t a = xparse_fxp(astr);
110980-	fxp_t b = xparse_fxp(bstr);
110981-	fxp_t result = xparse_fxp(resultstr);
110982-	expect_true(fxp_close(fxp_add(a, b), result),
110983-	    "Expected %s + %s == %s", astr, bstr, resultstr);
110984-}
110985-
110986-TEST_BEGIN(test_add_simple) {
110987-	expect_add("0", "0", "0");
110988-	expect_add("0", "1", "1");
110989-	expect_add("1", "1", "2");
110990-	expect_add("1.5", "1.5", "3");
110991-	expect_add("0.1", "0.1", "0.2");
110992-	expect_add("123", "456", "579");
110993-}
110994-TEST_END
110995-
110996-static void
110997-expect_sub(const char *astr, const char *bstr, const char* resultstr) {
110998-	fxp_t a = xparse_fxp(astr);
110999-	fxp_t b = xparse_fxp(bstr);
111000-	fxp_t result = xparse_fxp(resultstr);
111001-	expect_true(fxp_close(fxp_sub(a, b), result),
111002-	    "Expected %s - %s == %s", astr, bstr, resultstr);
111003-}
111004-
111005-TEST_BEGIN(test_sub_simple) {
111006-	expect_sub("0", "0", "0");
111007-	expect_sub("1", "0", "1");
111008-	expect_sub("1", "1", "0");
111009-	expect_sub("3.5", "1.5", "2");
111010-	expect_sub("0.3", "0.1", "0.2");
111011-	expect_sub("456", "123", "333");
111012-}
111013-TEST_END
111014-
111015-static void
111016-expect_mul(const char *astr, const char *bstr, const char* resultstr) {
111017-	fxp_t a = xparse_fxp(astr);
111018-	fxp_t b = xparse_fxp(bstr);
111019-	fxp_t result = xparse_fxp(resultstr);
111020-	expect_true(fxp_close(fxp_mul(a, b), result),
111021-	    "Expected %s * %s == %s", astr, bstr, resultstr);
111022-}
111023-
111024-TEST_BEGIN(test_mul_simple) {
111025-	expect_mul("0", "0", "0");
111026-	expect_mul("1", "0", "0");
111027-	expect_mul("1", "1", "1");
111028-	expect_mul("1.5", "1.5", "2.25");
111029-	expect_mul("100.0", "10", "1000");
111030-	expect_mul(".1", "10", "1");
111031-}
111032-TEST_END
111033-
111034-static void
111035-expect_div(const char *astr, const char *bstr, const char* resultstr) {
111036-	fxp_t a = xparse_fxp(astr);
111037-	fxp_t b = xparse_fxp(bstr);
111038-	fxp_t result = xparse_fxp(resultstr);
111039-	expect_true(fxp_close(fxp_div(a, b), result),
111040-	    "Expected %s / %s == %s", astr, bstr, resultstr);
111041-}
111042-
111043-TEST_BEGIN(test_div_simple) {
111044-	expect_div("1", "1", "1");
111045-	expect_div("0", "1", "0");
111046-	expect_div("2", "1", "2");
111047-	expect_div("3", "2", "1.5");
111048-	expect_div("3", "1.5", "2");
111049-	expect_div("10", ".1", "100");
111050-	expect_div("123", "456", ".2697368421");
111051-}
111052-TEST_END
111053-
111054-static void
111055-expect_round(const char *str, uint32_t rounded_down, uint32_t rounded_nearest) {
111056-	fxp_t fxp = xparse_fxp(str);
111057-	uint32_t fxp_rounded_down = fxp_round_down(fxp);
111058-	uint32_t fxp_rounded_nearest = fxp_round_nearest(fxp);
111059-	expect_u32_eq(rounded_down, fxp_rounded_down,
111060-	    "Mistake rounding %s down", str);
111061-	expect_u32_eq(rounded_nearest, fxp_rounded_nearest,
111062-	    "Mistake rounding %s to nearest", str);
111063-}
111064-
111065-TEST_BEGIN(test_round_simple) {
111066-	expect_round("1.5", 1, 2);
111067-	expect_round("0", 0, 0);
111068-	expect_round("0.1", 0, 0);
111069-	expect_round("0.4", 0, 0);
111070-	expect_round("0.40000", 0, 0);
111071-	expect_round("0.5", 0, 1);
111072-	expect_round("0.6", 0, 1);
111073-	expect_round("123", 123, 123);
111074-	expect_round("123.4", 123, 123);
111075-	expect_round("123.5", 123, 124);
111076-}
111077-TEST_END
111078-
111079-static void
111080-expect_mul_frac(size_t a, const char *fracstr, size_t expected) {
111081-	fxp_t frac = xparse_fxp(fracstr);
111082-	size_t result = fxp_mul_frac(a, frac);
111083-	expect_true(double_close(expected, result),
111084-	    "Expected %zu * %s == %zu (fracmul); got %zu", a, fracstr,
111085-	    expected, result);
111086-}
111087-
111088-TEST_BEGIN(test_mul_frac_simple) {
111089-	expect_mul_frac(SIZE_MAX, "1.0", SIZE_MAX);
111090-	expect_mul_frac(SIZE_MAX, ".75", SIZE_MAX / 4 * 3);
111091-	expect_mul_frac(SIZE_MAX, ".5", SIZE_MAX / 2);
111092-	expect_mul_frac(SIZE_MAX, ".25", SIZE_MAX / 4);
111093-	expect_mul_frac(1U << 16, "1.0", 1U << 16);
111094-	expect_mul_frac(1U << 30, "0.5", 1U << 29);
111095-	expect_mul_frac(1U << 30, "0.25", 1U << 28);
111096-	expect_mul_frac(1U << 30, "0.125", 1U << 27);
111097-	expect_mul_frac((1U << 30) + 1, "0.125", 1U << 27);
111098-	expect_mul_frac(100, "0.25", 25);
111099-	expect_mul_frac(1000 * 1000, "0.001", 1000);
111100-}
111101-TEST_END
111102-
111103-static void
111104-expect_print(const char *str) {
111105-	fxp_t fxp = xparse_fxp(str);
111106-	char buf[FXP_BUF_SIZE];
111107-	fxp_print(fxp, buf);
111108-	expect_d_eq(0, strcmp(str, buf), "Couldn't round-trip print %s", str);
111109-}
111110-
111111-TEST_BEGIN(test_print_simple) {
111112-	expect_print("0.0");
111113-	expect_print("1.0");
111114-	expect_print("2.0");
111115-	expect_print("123.0");
111116-	/*
111117-	 * We hit the possibility of roundoff errors whenever the fractional
111118-	 * component isn't a round binary number; only check these here (we
111119-	 * round-trip properly in the stress test).
111120-	 */
111121-	expect_print("1.5");
111122-	expect_print("3.375");
111123-	expect_print("0.25");
111124-	expect_print("0.125");
111125-	/* 1 / 2**14 */
111126-	expect_print("0.00006103515625");
111127-}
111128-TEST_END
111129-
111130-TEST_BEGIN(test_stress) {
111131-	const char *numbers[] = {
111132-		"0.0", "0.1", "0.2", "0.3", "0.4",
111133-		"0.5", "0.6", "0.7", "0.8", "0.9",
111134-
111135-		"1.0", "1.1", "1.2", "1.3", "1.4",
111136-		"1.5", "1.6", "1.7", "1.8", "1.9",
111137-
111138-		"2.0", "2.1", "2.2", "2.3", "2.4",
111139-		"2.5", "2.6", "2.7", "2.8", "2.9",
111140-
111141-		"17.0", "17.1", "17.2", "17.3", "17.4",
111142-		"17.5", "17.6", "17.7", "17.8", "17.9",
111143-
111144-		"18.0", "18.1", "18.2", "18.3", "18.4",
111145-		"18.5", "18.6", "18.7", "18.8", "18.9",
111146-
111147-		"123.0", "123.1", "123.2", "123.3", "123.4",
111148-		"123.5", "123.6", "123.7", "123.8", "123.9",
111149-
111150-		"124.0", "124.1", "124.2", "124.3", "124.4",
111151-		"124.5", "124.6", "124.7", "124.8", "124.9",
111152-
111153-		"125.0", "125.1", "125.2", "125.3", "125.4",
111154-		"125.5", "125.6", "125.7", "125.8", "125.9"};
111155-	size_t numbers_len = sizeof(numbers)/sizeof(numbers[0]);
111156-	for (size_t i = 0; i < numbers_len; i++) {
111157-		fxp_t fxp_a = xparse_fxp(numbers[i]);
111158-		double double_a = strtod(numbers[i], NULL);
111159-
111160-		uint32_t fxp_rounded_down = fxp_round_down(fxp_a);
111161-		uint32_t fxp_rounded_nearest = fxp_round_nearest(fxp_a);
111162-		uint32_t double_rounded_down = (uint32_t)double_a;
111163-		uint32_t double_rounded_nearest = (uint32_t)round(double_a);
111164-
111165-		expect_u32_eq(double_rounded_down, fxp_rounded_down,
111166-		    "Incorrectly rounded down %s", numbers[i]);
111167-		expect_u32_eq(double_rounded_nearest, fxp_rounded_nearest,
111168-		    "Incorrectly rounded-to-nearest %s", numbers[i]);
111169-
111170-		for (size_t j = 0; j < numbers_len; j++) {
111171-			fxp_t fxp_b = xparse_fxp(numbers[j]);
111172-			double double_b = strtod(numbers[j], NULL);
111173-
111174-			fxp_t fxp_sum = fxp_add(fxp_a, fxp_b);
111175-			double double_sum = double_a + double_b;
111176-			expect_true(
111177-			    double_close(fxp2double(fxp_sum), double_sum),
111178-			    "Miscomputed %s + %s", numbers[i], numbers[j]);
111179-
111180-			if (double_a > double_b) {
111181-				fxp_t fxp_diff = fxp_sub(fxp_a, fxp_b);
111182-				double double_diff = double_a - double_b;
111183-				expect_true(
111184-				    double_close(fxp2double(fxp_diff),
111185-				    double_diff),
111186-				    "Miscomputed %s - %s", numbers[i],
111187-				    numbers[j]);
111188-			}
111189-
111190-			fxp_t fxp_prod = fxp_mul(fxp_a, fxp_b);
111191-			double double_prod = double_a * double_b;
111192-			expect_true(
111193-			    double_close(fxp2double(fxp_prod), double_prod),
111194-			    "Miscomputed %s * %s", numbers[i], numbers[j]);
111195-
111196-			if (double_b != 0.0) {
111197-				fxp_t fxp_quot = fxp_div(fxp_a, fxp_b);
111198-				double double_quot = double_a / double_b;
111199-				expect_true(
111200-				    double_close(fxp2double(fxp_quot),
111201-				    double_quot),
111202-				    "Miscomputed %s / %s", numbers[i],
111203-				    numbers[j]);
111204-			}
111205-		}
111206-	}
111207-}
111208-TEST_END
111209-
111210-int
111211-main(void) {
111212-	return test_no_reentrancy(
111213-	    test_parse_valid,
111214-	    test_parse_invalid,
111215-	    test_init_percent,
111216-	    test_add_simple,
111217-	    test_sub_simple,
111218-	    test_mul_simple,
111219-	    test_div_simple,
111220-	    test_round_simple,
111221-	    test_mul_frac_simple,
111222-	    test_print_simple,
111223-	    test_stress);
111224-}
111225diff --git a/jemalloc/test/unit/hash.c b/jemalloc/test/unit/hash.c
111226deleted file mode 100644
111227index 49f0823..0000000
111228--- a/jemalloc/test/unit/hash.c
111229+++ /dev/null
111230@@ -1,173 +0,0 @@
111231-/*
111232- * This file is based on code that is part of SMHasher
111233- * (https://code.google.com/p/smhasher/), and is subject to the MIT license
111234- * (http://www.opensource.org/licenses/mit-license.php).  Both email addresses
111235- * associated with the source code's revision history belong to Austin Appleby,
111236- * and the revision history ranges from 2010 to 2012.  Therefore the copyright
111237- * and license are here taken to be:
111238- *
111239- * Copyright (c) 2010-2012 Austin Appleby
111240- *
111241- * Permission is hereby granted, free of charge, to any person obtaining a copy
111242- * of this software and associated documentation files (the "Software"), to deal
111243- * in the Software without restriction, including without limitation the rights
111244- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
111245- * copies of the Software, and to permit persons to whom the Software is
111246- * furnished to do so, subject to the following conditions:
111247- *
111248- * The above copyright notice and this permission notice shall be included in
111249- * all copies or substantial portions of the Software.
111250- *
111251- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
111252- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
111253- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
111254- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
111255- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
111256- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
111257- * THE SOFTWARE.
111258- */
111259-
111260-#include "test/jemalloc_test.h"
111261-#include "jemalloc/internal/hash.h"
111262-
111263-typedef enum {
111264-	hash_variant_x86_32,
111265-	hash_variant_x86_128,
111266-	hash_variant_x64_128
111267-} hash_variant_t;
111268-
111269-static int
111270-hash_variant_bits(hash_variant_t variant) {
111271-	switch (variant) {
111272-	case hash_variant_x86_32: return 32;
111273-	case hash_variant_x86_128: return 128;
111274-	case hash_variant_x64_128: return 128;
111275-	default: not_reached();
111276-	}
111277-}
111278-
111279-static const char *
111280-hash_variant_string(hash_variant_t variant) {
111281-	switch (variant) {
111282-	case hash_variant_x86_32: return "hash_x86_32";
111283-	case hash_variant_x86_128: return "hash_x86_128";
111284-	case hash_variant_x64_128: return "hash_x64_128";
111285-	default: not_reached();
111286-	}
111287-}
111288-
111289-#define KEY_SIZE	256
111290-static void
111291-hash_variant_verify_key(hash_variant_t variant, uint8_t *key) {
111292-	const int hashbytes = hash_variant_bits(variant) / 8;
111293-	const int hashes_size = hashbytes * 256;
111294-	VARIABLE_ARRAY(uint8_t, hashes, hashes_size);
111295-	VARIABLE_ARRAY(uint8_t, final, hashbytes);
111296-	unsigned i;
111297-	uint32_t computed, expected;
111298-
111299-	memset(key, 0, KEY_SIZE);
111300-	memset(hashes, 0, hashes_size);
111301-	memset(final, 0, hashbytes);
111302-
111303-	/*
111304-	 * Hash keys of the form {0}, {0,1}, {0,1,2}, ..., {0,1,...,255} as the
111305-	 * seed.
111306-	 */
111307-	for (i = 0; i < 256; i++) {
111308-		key[i] = (uint8_t)i;
111309-		switch (variant) {
111310-		case hash_variant_x86_32: {
111311-			uint32_t out;
111312-			out = hash_x86_32(key, i, 256-i);
111313-			memcpy(&hashes[i*hashbytes], &out, hashbytes);
111314-			break;
111315-		} case hash_variant_x86_128: {
111316-			uint64_t out[2];
111317-			hash_x86_128(key, i, 256-i, out);
111318-			memcpy(&hashes[i*hashbytes], out, hashbytes);
111319-			break;
111320-		} case hash_variant_x64_128: {
111321-			uint64_t out[2];
111322-			hash_x64_128(key, i, 256-i, out);
111323-			memcpy(&hashes[i*hashbytes], out, hashbytes);
111324-			break;
111325-		} default: not_reached();
111326-		}
111327-	}
111328-
111329-	/* Hash the result array. */
111330-	switch (variant) {
111331-	case hash_variant_x86_32: {
111332-		uint32_t out = hash_x86_32(hashes, hashes_size, 0);
111333-		memcpy(final, &out, sizeof(out));
111334-		break;
111335-	} case hash_variant_x86_128: {
111336-		uint64_t out[2];
111337-		hash_x86_128(hashes, hashes_size, 0, out);
111338-		memcpy(final, out, sizeof(out));
111339-		break;
111340-	} case hash_variant_x64_128: {
111341-		uint64_t out[2];
111342-		hash_x64_128(hashes, hashes_size, 0, out);
111343-		memcpy(final, out, sizeof(out));
111344-		break;
111345-	} default: not_reached();
111346-	}
111347-
111348-	computed = (final[0] << 0) | (final[1] << 8) | (final[2] << 16) |
111349-	    (final[3] << 24);
111350-
111351-	switch (variant) {
111352-#ifdef JEMALLOC_BIG_ENDIAN
111353-	case hash_variant_x86_32: expected = 0x6213303eU; break;
111354-	case hash_variant_x86_128: expected = 0x266820caU; break;
111355-	case hash_variant_x64_128: expected = 0xcc622b6fU; break;
111356-#else
111357-	case hash_variant_x86_32: expected = 0xb0f57ee3U; break;
111358-	case hash_variant_x86_128: expected = 0xb3ece62aU; break;
111359-	case hash_variant_x64_128: expected = 0x6384ba69U; break;
111360-#endif
111361-	default: not_reached();
111362-	}
111363-
111364-	expect_u32_eq(computed, expected,
111365-	    "Hash mismatch for %s(): expected %#x but got %#x",
111366-	    hash_variant_string(variant), expected, computed);
111367-}
111368-
111369-static void
111370-hash_variant_verify(hash_variant_t variant) {
111371-#define MAX_ALIGN	16
111372-	uint8_t key[KEY_SIZE + (MAX_ALIGN - 1)];
111373-	unsigned i;
111374-
111375-	for (i = 0; i < MAX_ALIGN; i++) {
111376-		hash_variant_verify_key(variant, &key[i]);
111377-	}
111378-#undef MAX_ALIGN
111379-}
111380-#undef KEY_SIZE
111381-
111382-TEST_BEGIN(test_hash_x86_32) {
111383-	hash_variant_verify(hash_variant_x86_32);
111384-}
111385-TEST_END
111386-
111387-TEST_BEGIN(test_hash_x86_128) {
111388-	hash_variant_verify(hash_variant_x86_128);
111389-}
111390-TEST_END
111391-
111392-TEST_BEGIN(test_hash_x64_128) {
111393-	hash_variant_verify(hash_variant_x64_128);
111394-}
111395-TEST_END
111396-
111397-int
111398-main(void) {
111399-	return test(
111400-	    test_hash_x86_32,
111401-	    test_hash_x86_128,
111402-	    test_hash_x64_128);
111403-}
111404diff --git a/jemalloc/test/unit/hook.c b/jemalloc/test/unit/hook.c
111405deleted file mode 100644
111406index 16a6f1b..0000000
111407--- a/jemalloc/test/unit/hook.c
111408+++ /dev/null
111409@@ -1,586 +0,0 @@
111410-#include "test/jemalloc_test.h"
111411-
111412-#include "jemalloc/internal/hook.h"
111413-
111414-static void *arg_extra;
111415-static int arg_type;
111416-static void *arg_result;
111417-static void *arg_address;
111418-static size_t arg_old_usize;
111419-static size_t arg_new_usize;
111420-static uintptr_t arg_result_raw;
111421-static uintptr_t arg_args_raw[4];
111422-
111423-static int call_count = 0;
111424-
111425-static void
111426-reset_args() {
111427-	arg_extra = NULL;
111428-	arg_type = 12345;
111429-	arg_result = NULL;
111430-	arg_address = NULL;
111431-	arg_old_usize = 0;
111432-	arg_new_usize = 0;
111433-	arg_result_raw = 0;
111434-	memset(arg_args_raw, 77, sizeof(arg_args_raw));
111435-}
111436-
111437-static void
111438-alloc_free_size(size_t sz) {
111439-	void *ptr = mallocx(1, 0);
111440-	free(ptr);
111441-	ptr = mallocx(1, 0);
111442-	free(ptr);
111443-	ptr = mallocx(1, MALLOCX_TCACHE_NONE);
111444-	dallocx(ptr, MALLOCX_TCACHE_NONE);
111445-}
111446-
111447-/*
111448- * We want to support a degree of user reentrancy.  This tests a variety of
111449- * allocation scenarios.
111450- */
111451-static void
111452-be_reentrant() {
111453-	/* Let's make sure the tcache is non-empty if enabled. */
111454-	alloc_free_size(1);
111455-	alloc_free_size(1024);
111456-	alloc_free_size(64 * 1024);
111457-	alloc_free_size(256 * 1024);
111458-	alloc_free_size(1024 * 1024);
111459-
111460-	/* Some reallocation. */
111461-	void *ptr = mallocx(129, 0);
111462-	ptr = rallocx(ptr, 130, 0);
111463-	free(ptr);
111464-
111465-	ptr = mallocx(2 * 1024 * 1024, 0);
111466-	free(ptr);
111467-	ptr = mallocx(1 * 1024 * 1024, 0);
111468-	ptr = rallocx(ptr, 2 * 1024 * 1024, 0);
111469-	free(ptr);
111470-
111471-	ptr = mallocx(1, 0);
111472-	ptr = rallocx(ptr, 1000, 0);
111473-	free(ptr);
111474-}
111475-
111476-static void
111477-set_args_raw(uintptr_t *args_raw, int nargs) {
111478-	memcpy(arg_args_raw, args_raw, sizeof(uintptr_t) * nargs);
111479-}
111480-
111481-static void
111482-expect_args_raw(uintptr_t *args_raw_expected, int nargs) {
111483-	int cmp = memcmp(args_raw_expected, arg_args_raw,
111484-	    sizeof(uintptr_t) * nargs);
111485-	expect_d_eq(cmp, 0, "Raw args mismatch");
111486-}
111487-
111488-static void
111489-reset() {
111490-	call_count = 0;
111491-	reset_args();
111492-}
111493-
111494-static void
111495-test_alloc_hook(void *extra, hook_alloc_t type, void *result,
111496-    uintptr_t result_raw, uintptr_t args_raw[3]) {
111497-	call_count++;
111498-	arg_extra = extra;
111499-	arg_type = (int)type;
111500-	arg_result = result;
111501-	arg_result_raw = result_raw;
111502-	set_args_raw(args_raw, 3);
111503-	be_reentrant();
111504-}
111505-
111506-static void
111507-test_dalloc_hook(void *extra, hook_dalloc_t type, void *address,
111508-    uintptr_t args_raw[3]) {
111509-	call_count++;
111510-	arg_extra = extra;
111511-	arg_type = (int)type;
111512-	arg_address = address;
111513-	set_args_raw(args_raw, 3);
111514-	be_reentrant();
111515-}
111516-
111517-static void
111518-test_expand_hook(void *extra, hook_expand_t type, void *address,
111519-    size_t old_usize, size_t new_usize, uintptr_t result_raw,
111520-    uintptr_t args_raw[4]) {
111521-	call_count++;
111522-	arg_extra = extra;
111523-	arg_type = (int)type;
111524-	arg_address = address;
111525-	arg_old_usize = old_usize;
111526-	arg_new_usize = new_usize;
111527-	arg_result_raw = result_raw;
111528-	set_args_raw(args_raw, 4);
111529-	be_reentrant();
111530-}
111531-
111532-TEST_BEGIN(test_hooks_basic) {
111533-	/* Just verify that the record their arguments correctly. */
111534-	hooks_t hooks = {
111535-		&test_alloc_hook, &test_dalloc_hook, &test_expand_hook,
111536-		(void *)111};
111537-	void *handle = hook_install(TSDN_NULL, &hooks);
111538-	uintptr_t args_raw[4] = {10, 20, 30, 40};
111539-
111540-	/* Alloc */
111541-	reset_args();
111542-	hook_invoke_alloc(hook_alloc_posix_memalign, (void *)222, 333,
111543-	    args_raw);
111544-	expect_ptr_eq(arg_extra, (void *)111, "Passed wrong user pointer");
111545-	expect_d_eq((int)hook_alloc_posix_memalign, arg_type,
111546-	    "Passed wrong alloc type");
111547-	expect_ptr_eq((void *)222, arg_result, "Passed wrong result address");
111548-	expect_u64_eq(333, arg_result_raw, "Passed wrong result");
111549-	expect_args_raw(args_raw, 3);
111550-
111551-	/* Dalloc */
111552-	reset_args();
111553-	hook_invoke_dalloc(hook_dalloc_sdallocx, (void *)222, args_raw);
111554-	expect_d_eq((int)hook_dalloc_sdallocx, arg_type,
111555-	    "Passed wrong dalloc type");
111556-	expect_ptr_eq((void *)111, arg_extra, "Passed wrong user pointer");
111557-	expect_ptr_eq((void *)222, arg_address, "Passed wrong address");
111558-	expect_args_raw(args_raw, 3);
111559-
111560-	/* Expand */
111561-	reset_args();
111562-	hook_invoke_expand(hook_expand_xallocx, (void *)222, 333, 444, 555,
111563-	    args_raw);
111564-	expect_d_eq((int)hook_expand_xallocx, arg_type,
111565-	    "Passed wrong expand type");
111566-	expect_ptr_eq((void *)111, arg_extra, "Passed wrong user pointer");
111567-	expect_ptr_eq((void *)222, arg_address, "Passed wrong address");
111568-	expect_zu_eq(333, arg_old_usize, "Passed wrong old usize");
111569-	expect_zu_eq(444, arg_new_usize, "Passed wrong new usize");
111570-	expect_zu_eq(555, arg_result_raw, "Passed wrong result");
111571-	expect_args_raw(args_raw, 4);
111572-
111573-	hook_remove(TSDN_NULL, handle);
111574-}
111575-TEST_END
111576-
111577-TEST_BEGIN(test_hooks_null) {
111578-	/* Null hooks should be ignored, not crash. */
111579-	hooks_t hooks1 = {NULL, NULL, NULL, NULL};
111580-	hooks_t hooks2 = {&test_alloc_hook, NULL, NULL, NULL};
111581-	hooks_t hooks3 = {NULL, &test_dalloc_hook, NULL, NULL};
111582-	hooks_t hooks4 = {NULL, NULL, &test_expand_hook, NULL};
111583-
111584-	void *handle1 = hook_install(TSDN_NULL, &hooks1);
111585-	void *handle2 = hook_install(TSDN_NULL, &hooks2);
111586-	void *handle3 = hook_install(TSDN_NULL, &hooks3);
111587-	void *handle4 = hook_install(TSDN_NULL, &hooks4);
111588-
111589-	expect_ptr_ne(handle1, NULL, "Hook installation failed");
111590-	expect_ptr_ne(handle2, NULL, "Hook installation failed");
111591-	expect_ptr_ne(handle3, NULL, "Hook installation failed");
111592-	expect_ptr_ne(handle4, NULL, "Hook installation failed");
111593-
111594-	uintptr_t args_raw[4] = {10, 20, 30, 40};
111595-
111596-	call_count = 0;
111597-	hook_invoke_alloc(hook_alloc_malloc, NULL, 0, args_raw);
111598-	expect_d_eq(call_count, 1, "Called wrong number of times");
111599-
111600-	call_count = 0;
111601-	hook_invoke_dalloc(hook_dalloc_free, NULL, args_raw);
111602-	expect_d_eq(call_count, 1, "Called wrong number of times");
111603-
111604-	call_count = 0;
111605-	hook_invoke_expand(hook_expand_realloc, NULL, 0, 0, 0, args_raw);
111606-	expect_d_eq(call_count, 1, "Called wrong number of times");
111607-
111608-	hook_remove(TSDN_NULL, handle1);
111609-	hook_remove(TSDN_NULL, handle2);
111610-	hook_remove(TSDN_NULL, handle3);
111611-	hook_remove(TSDN_NULL, handle4);
111612-}
111613-TEST_END
111614-
111615-TEST_BEGIN(test_hooks_remove) {
111616-	hooks_t hooks = {&test_alloc_hook, NULL, NULL, NULL};
111617-	void *handle = hook_install(TSDN_NULL, &hooks);
111618-	expect_ptr_ne(handle, NULL, "Hook installation failed");
111619-	call_count = 0;
111620-	uintptr_t args_raw[4] = {10, 20, 30, 40};
111621-	hook_invoke_alloc(hook_alloc_malloc, NULL, 0, args_raw);
111622-	expect_d_eq(call_count, 1, "Hook not invoked");
111623-
111624-	call_count = 0;
111625-	hook_remove(TSDN_NULL, handle);
111626-	hook_invoke_alloc(hook_alloc_malloc, NULL, 0, NULL);
111627-	expect_d_eq(call_count, 0, "Hook invoked after removal");
111628-
111629-}
111630-TEST_END
111631-
111632-TEST_BEGIN(test_hooks_alloc_simple) {
111633-	/* "Simple" in the sense that we're not in a realloc variant. */
111634-	hooks_t hooks = {&test_alloc_hook, NULL, NULL, (void *)123};
111635-	void *handle = hook_install(TSDN_NULL, &hooks);
111636-	expect_ptr_ne(handle, NULL, "Hook installation failed");
111637-
111638-	/* Stop malloc from being optimized away. */
111639-	volatile int err;
111640-	void *volatile ptr;
111641-
111642-	/* malloc */
111643-	reset();
111644-	ptr = malloc(1);
111645-	expect_d_eq(call_count, 1, "Hook not called");
111646-	expect_ptr_eq(arg_extra, (void *)123, "Wrong extra");
111647-	expect_d_eq(arg_type, (int)hook_alloc_malloc, "Wrong hook type");
111648-	expect_ptr_eq(ptr, arg_result, "Wrong result");
111649-	expect_u64_eq((uintptr_t)ptr, (uintptr_t)arg_result_raw,
111650-	    "Wrong raw result");
111651-	expect_u64_eq((uintptr_t)1, arg_args_raw[0], "Wrong argument");
111652-	free(ptr);
111653-
111654-	/* posix_memalign */
111655-	reset();
111656-	err = posix_memalign((void **)&ptr, 1024, 1);
111657-	expect_d_eq(call_count, 1, "Hook not called");
111658-	expect_ptr_eq(arg_extra, (void *)123, "Wrong extra");
111659-	expect_d_eq(arg_type, (int)hook_alloc_posix_memalign,
111660-	    "Wrong hook type");
111661-	expect_ptr_eq(ptr, arg_result, "Wrong result");
111662-	expect_u64_eq((uintptr_t)err, (uintptr_t)arg_result_raw,
111663-	    "Wrong raw result");
111664-	expect_u64_eq((uintptr_t)&ptr, arg_args_raw[0], "Wrong argument");
111665-	expect_u64_eq((uintptr_t)1024, arg_args_raw[1], "Wrong argument");
111666-	expect_u64_eq((uintptr_t)1, arg_args_raw[2], "Wrong argument");
111667-	free(ptr);
111668-
111669-	/* aligned_alloc */
111670-	reset();
111671-	ptr = aligned_alloc(1024, 1);
111672-	expect_d_eq(call_count, 1, "Hook not called");
111673-	expect_ptr_eq(arg_extra, (void *)123, "Wrong extra");
111674-	expect_d_eq(arg_type, (int)hook_alloc_aligned_alloc,
111675-	    "Wrong hook type");
111676-	expect_ptr_eq(ptr, arg_result, "Wrong result");
111677-	expect_u64_eq((uintptr_t)ptr, (uintptr_t)arg_result_raw,
111678-	    "Wrong raw result");
111679-	expect_u64_eq((uintptr_t)1024, arg_args_raw[0], "Wrong argument");
111680-	expect_u64_eq((uintptr_t)1, arg_args_raw[1], "Wrong argument");
111681-	free(ptr);
111682-
111683-	/* calloc */
111684-	reset();
111685-	ptr = calloc(11, 13);
111686-	expect_d_eq(call_count, 1, "Hook not called");
111687-	expect_ptr_eq(arg_extra, (void *)123, "Wrong extra");
111688-	expect_d_eq(arg_type, (int)hook_alloc_calloc, "Wrong hook type");
111689-	expect_ptr_eq(ptr, arg_result, "Wrong result");
111690-	expect_u64_eq((uintptr_t)ptr, (uintptr_t)arg_result_raw,
111691-	    "Wrong raw result");
111692-	expect_u64_eq((uintptr_t)11, arg_args_raw[0], "Wrong argument");
111693-	expect_u64_eq((uintptr_t)13, arg_args_raw[1], "Wrong argument");
111694-	free(ptr);
111695-
111696-	/* memalign */
111697-#ifdef JEMALLOC_OVERRIDE_MEMALIGN
111698-	reset();
111699-	ptr = memalign(1024, 1);
111700-	expect_d_eq(call_count, 1, "Hook not called");
111701-	expect_ptr_eq(arg_extra, (void *)123, "Wrong extra");
111702-	expect_d_eq(arg_type, (int)hook_alloc_memalign, "Wrong hook type");
111703-	expect_ptr_eq(ptr, arg_result, "Wrong result");
111704-	expect_u64_eq((uintptr_t)ptr, (uintptr_t)arg_result_raw,
111705-	    "Wrong raw result");
111706-	expect_u64_eq((uintptr_t)1024, arg_args_raw[0], "Wrong argument");
111707-	expect_u64_eq((uintptr_t)1, arg_args_raw[1], "Wrong argument");
111708-	free(ptr);
111709-#endif /* JEMALLOC_OVERRIDE_MEMALIGN */
111710-
111711-	/* valloc */
111712-#ifdef JEMALLOC_OVERRIDE_VALLOC
111713-	reset();
111714-	ptr = valloc(1);
111715-	expect_d_eq(call_count, 1, "Hook not called");
111716-	expect_ptr_eq(arg_extra, (void *)123, "Wrong extra");
111717-	expect_d_eq(arg_type, (int)hook_alloc_valloc, "Wrong hook type");
111718-	expect_ptr_eq(ptr, arg_result, "Wrong result");
111719-	expect_u64_eq((uintptr_t)ptr, (uintptr_t)arg_result_raw,
111720-	    "Wrong raw result");
111721-	expect_u64_eq((uintptr_t)1, arg_args_raw[0], "Wrong argument");
111722-	free(ptr);
111723-#endif /* JEMALLOC_OVERRIDE_VALLOC */
111724-
111725-	/* mallocx */
111726-	reset();
111727-	ptr = mallocx(1, MALLOCX_LG_ALIGN(10));
111728-	expect_d_eq(call_count, 1, "Hook not called");
111729-	expect_ptr_eq(arg_extra, (void *)123, "Wrong extra");
111730-	expect_d_eq(arg_type, (int)hook_alloc_mallocx, "Wrong hook type");
111731-	expect_ptr_eq(ptr, arg_result, "Wrong result");
111732-	expect_u64_eq((uintptr_t)ptr, (uintptr_t)arg_result_raw,
111733-	    "Wrong raw result");
111734-	expect_u64_eq((uintptr_t)1, arg_args_raw[0], "Wrong argument");
111735-	expect_u64_eq((uintptr_t)MALLOCX_LG_ALIGN(10), arg_args_raw[1],
111736-	    "Wrong flags");
111737-	free(ptr);
111738-
111739-	hook_remove(TSDN_NULL, handle);
111740-}
111741-TEST_END
111742-
111743-TEST_BEGIN(test_hooks_dalloc_simple) {
111744-	/* "Simple" in the sense that we're not in a realloc variant. */
111745-	hooks_t hooks = {NULL, &test_dalloc_hook, NULL, (void *)123};
111746-	void *handle = hook_install(TSDN_NULL, &hooks);
111747-	expect_ptr_ne(handle, NULL, "Hook installation failed");
111748-
111749-	void *volatile ptr;
111750-
111751-	/* free() */
111752-	reset();
111753-	ptr = malloc(1);
111754-	free(ptr);
111755-	expect_d_eq(call_count, 1, "Hook not called");
111756-	expect_ptr_eq(arg_extra, (void *)123, "Wrong extra");
111757-	expect_d_eq(arg_type, (int)hook_dalloc_free, "Wrong hook type");
111758-	expect_ptr_eq(ptr, arg_address, "Wrong pointer freed");
111759-	expect_u64_eq((uintptr_t)ptr, arg_args_raw[0], "Wrong raw arg");
111760-
111761-	/* dallocx() */
111762-	reset();
111763-	ptr = malloc(1);
111764-	dallocx(ptr, MALLOCX_TCACHE_NONE);
111765-	expect_d_eq(call_count, 1, "Hook not called");
111766-	expect_ptr_eq(arg_extra, (void *)123, "Wrong extra");
111767-	expect_d_eq(arg_type, (int)hook_dalloc_dallocx, "Wrong hook type");
111768-	expect_ptr_eq(ptr, arg_address, "Wrong pointer freed");
111769-	expect_u64_eq((uintptr_t)ptr, arg_args_raw[0], "Wrong raw arg");
111770-	expect_u64_eq((uintptr_t)MALLOCX_TCACHE_NONE, arg_args_raw[1],
111771-	    "Wrong raw arg");
111772-
111773-	/* sdallocx() */
111774-	reset();
111775-	ptr = malloc(1);
111776-	sdallocx(ptr, 1, MALLOCX_TCACHE_NONE);
111777-	expect_d_eq(call_count, 1, "Hook not called");
111778-	expect_ptr_eq(arg_extra, (void *)123, "Wrong extra");
111779-	expect_d_eq(arg_type, (int)hook_dalloc_sdallocx, "Wrong hook type");
111780-	expect_ptr_eq(ptr, arg_address, "Wrong pointer freed");
111781-	expect_u64_eq((uintptr_t)ptr, arg_args_raw[0], "Wrong raw arg");
111782-	expect_u64_eq((uintptr_t)1, arg_args_raw[1], "Wrong raw arg");
111783-	expect_u64_eq((uintptr_t)MALLOCX_TCACHE_NONE, arg_args_raw[2],
111784-	    "Wrong raw arg");
111785-
111786-	hook_remove(TSDN_NULL, handle);
111787-}
111788-TEST_END
111789-
111790-TEST_BEGIN(test_hooks_expand_simple) {
111791-	/* "Simple" in the sense that we're not in a realloc variant. */
111792-	hooks_t hooks = {NULL, NULL, &test_expand_hook, (void *)123};
111793-	void *handle = hook_install(TSDN_NULL, &hooks);
111794-	expect_ptr_ne(handle, NULL, "Hook installation failed");
111795-
111796-	void *volatile ptr;
111797-
111798-	/* xallocx() */
111799-	reset();
111800-	ptr = malloc(1);
111801-	size_t new_usize = xallocx(ptr, 100, 200, MALLOCX_TCACHE_NONE);
111802-	expect_d_eq(call_count, 1, "Hook not called");
111803-	expect_ptr_eq(arg_extra, (void *)123, "Wrong extra");
111804-	expect_d_eq(arg_type, (int)hook_expand_xallocx, "Wrong hook type");
111805-	expect_ptr_eq(ptr, arg_address, "Wrong pointer expanded");
111806-	expect_u64_eq(arg_old_usize, nallocx(1, 0), "Wrong old usize");
111807-	expect_u64_eq(arg_new_usize, sallocx(ptr, 0), "Wrong new usize");
111808-	expect_u64_eq(new_usize, arg_result_raw, "Wrong result");
111809-	expect_u64_eq((uintptr_t)ptr, arg_args_raw[0], "Wrong arg");
111810-	expect_u64_eq(100, arg_args_raw[1], "Wrong arg");
111811-	expect_u64_eq(200, arg_args_raw[2], "Wrong arg");
111812-	expect_u64_eq(MALLOCX_TCACHE_NONE, arg_args_raw[3], "Wrong arg");
111813-
111814-	hook_remove(TSDN_NULL, handle);
111815-}
111816-TEST_END
111817-
111818-TEST_BEGIN(test_hooks_realloc_as_malloc_or_free) {
111819-	hooks_t hooks = {&test_alloc_hook, &test_dalloc_hook,
111820-		&test_expand_hook, (void *)123};
111821-	void *handle = hook_install(TSDN_NULL, &hooks);
111822-	expect_ptr_ne(handle, NULL, "Hook installation failed");
111823-
111824-	void *volatile ptr;
111825-
111826-	/* realloc(NULL, size) as malloc */
111827-	reset();
111828-	ptr = realloc(NULL, 1);
111829-	expect_d_eq(call_count, 1, "Hook not called");
111830-	expect_ptr_eq(arg_extra, (void *)123, "Wrong extra");
111831-	expect_d_eq(arg_type, (int)hook_alloc_realloc, "Wrong hook type");
111832-	expect_ptr_eq(ptr, arg_result, "Wrong result");
111833-	expect_u64_eq((uintptr_t)ptr, (uintptr_t)arg_result_raw,
111834-	    "Wrong raw result");
111835-	expect_u64_eq((uintptr_t)NULL, arg_args_raw[0], "Wrong argument");
111836-	expect_u64_eq((uintptr_t)1, arg_args_raw[1], "Wrong argument");
111837-	free(ptr);
111838-
111839-	/* realloc(ptr, 0) as free */
111840-	if (opt_zero_realloc_action == zero_realloc_action_free) {
111841-		ptr = malloc(1);
111842-		reset();
111843-		realloc(ptr, 0);
111844-		expect_d_eq(call_count, 1, "Hook not called");
111845-		expect_ptr_eq(arg_extra, (void *)123, "Wrong extra");
111846-		expect_d_eq(arg_type, (int)hook_dalloc_realloc,
111847-		    "Wrong hook type");
111848-		expect_ptr_eq(ptr, arg_address,
111849-		    "Wrong pointer freed");
111850-		expect_u64_eq((uintptr_t)ptr, arg_args_raw[0],
111851-		    "Wrong raw arg");
111852-		expect_u64_eq((uintptr_t)0, arg_args_raw[1],
111853-		    "Wrong raw arg");
111854-	}
111855-
111856-	/* realloc(NULL, 0) as malloc(0) */
111857-	reset();
111858-	ptr = realloc(NULL, 0);
111859-	expect_d_eq(call_count, 1, "Hook not called");
111860-	expect_ptr_eq(arg_extra, (void *)123, "Wrong extra");
111861-	expect_d_eq(arg_type, (int)hook_alloc_realloc, "Wrong hook type");
111862-	expect_ptr_eq(ptr, arg_result, "Wrong result");
111863-	expect_u64_eq((uintptr_t)ptr, (uintptr_t)arg_result_raw,
111864-	    "Wrong raw result");
111865-	expect_u64_eq((uintptr_t)NULL, arg_args_raw[0], "Wrong argument");
111866-	expect_u64_eq((uintptr_t)0, arg_args_raw[1], "Wrong argument");
111867-	free(ptr);
111868-
111869-	hook_remove(TSDN_NULL, handle);
111870-}
111871-TEST_END
111872-
111873-static void
111874-do_realloc_test(void *(*ralloc)(void *, size_t, int), int flags,
111875-    int expand_type, int dalloc_type) {
111876-	hooks_t hooks = {&test_alloc_hook, &test_dalloc_hook,
111877-		&test_expand_hook, (void *)123};
111878-	void *handle = hook_install(TSDN_NULL, &hooks);
111879-	expect_ptr_ne(handle, NULL, "Hook installation failed");
111880-
111881-	void *volatile ptr;
111882-	void *volatile ptr2;
111883-
111884-	/* Realloc in-place, small. */
111885-	ptr = malloc(129);
111886-	reset();
111887-	ptr2 = ralloc(ptr, 130, flags);
111888-	expect_ptr_eq(ptr, ptr2, "Small realloc moved");
111889-
111890-	expect_d_eq(call_count, 1, "Hook not called");
111891-	expect_ptr_eq(arg_extra, (void *)123, "Wrong extra");
111892-	expect_d_eq(arg_type, expand_type, "Wrong hook type");
111893-	expect_ptr_eq(ptr, arg_address, "Wrong address");
111894-	expect_u64_eq((uintptr_t)ptr, (uintptr_t)arg_result_raw,
111895-	    "Wrong raw result");
111896-	expect_u64_eq((uintptr_t)ptr, arg_args_raw[0], "Wrong argument");
111897-	expect_u64_eq((uintptr_t)130, arg_args_raw[1], "Wrong argument");
111898-	free(ptr);
111899-
111900-	/*
111901-	 * Realloc in-place, large.  Since we can't guarantee the large case
111902-	 * across all platforms, we stay resilient to moving results.
111903-	 */
111904-	ptr = malloc(2 * 1024 * 1024);
111905-	free(ptr);
111906-	ptr2 = malloc(1 * 1024 * 1024);
111907-	reset();
111908-	ptr = ralloc(ptr2, 2 * 1024 * 1024, flags);
111909-	/* ptr is the new address, ptr2 is the old address. */
111910-	if (ptr == ptr2) {
111911-		expect_d_eq(call_count, 1, "Hook not called");
111912-		expect_d_eq(arg_type, expand_type, "Wrong hook type");
111913-	} else {
111914-		expect_d_eq(call_count, 2, "Wrong hooks called");
111915-		expect_ptr_eq(ptr, arg_result, "Wrong address");
111916-		expect_d_eq(arg_type, dalloc_type, "Wrong hook type");
111917-	}
111918-	expect_ptr_eq(arg_extra, (void *)123, "Wrong extra");
111919-	expect_ptr_eq(ptr2, arg_address, "Wrong address");
111920-	expect_u64_eq((uintptr_t)ptr, (uintptr_t)arg_result_raw,
111921-	    "Wrong raw result");
111922-	expect_u64_eq((uintptr_t)ptr2, arg_args_raw[0], "Wrong argument");
111923-	expect_u64_eq((uintptr_t)2 * 1024 * 1024, arg_args_raw[1],
111924-	    "Wrong argument");
111925-	free(ptr);
111926-
111927-	/* Realloc with move, small. */
111928-	ptr = malloc(8);
111929-	reset();
111930-	ptr2 = ralloc(ptr, 128, flags);
111931-	expect_ptr_ne(ptr, ptr2, "Small realloc didn't move");
111932-
111933-	expect_d_eq(call_count, 2, "Hook not called");
111934-	expect_ptr_eq(arg_extra, (void *)123, "Wrong extra");
111935-	expect_d_eq(arg_type, dalloc_type, "Wrong hook type");
111936-	expect_ptr_eq(ptr, arg_address, "Wrong address");
111937-	expect_ptr_eq(ptr2, arg_result, "Wrong address");
111938-	expect_u64_eq((uintptr_t)ptr2, (uintptr_t)arg_result_raw,
111939-	    "Wrong raw result");
111940-	expect_u64_eq((uintptr_t)ptr, arg_args_raw[0], "Wrong argument");
111941-	expect_u64_eq((uintptr_t)128, arg_args_raw[1], "Wrong argument");
111942-	free(ptr2);
111943-
111944-	/* Realloc with move, large. */
111945-	ptr = malloc(1);
111946-	reset();
111947-	ptr2 = ralloc(ptr, 2 * 1024 * 1024, flags);
111948-	expect_ptr_ne(ptr, ptr2, "Large realloc didn't move");
111949-
111950-	expect_d_eq(call_count, 2, "Hook not called");
111951-	expect_ptr_eq(arg_extra, (void *)123, "Wrong extra");
111952-	expect_d_eq(arg_type, dalloc_type, "Wrong hook type");
111953-	expect_ptr_eq(ptr, arg_address, "Wrong address");
111954-	expect_ptr_eq(ptr2, arg_result, "Wrong address");
111955-	expect_u64_eq((uintptr_t)ptr2, (uintptr_t)arg_result_raw,
111956-	    "Wrong raw result");
111957-	expect_u64_eq((uintptr_t)ptr, arg_args_raw[0], "Wrong argument");
111958-	expect_u64_eq((uintptr_t)2 * 1024 * 1024, arg_args_raw[1],
111959-	    "Wrong argument");
111960-	free(ptr2);
111961-
111962-	hook_remove(TSDN_NULL, handle);
111963-}
111964-
111965-static void *
111966-realloc_wrapper(void *ptr, size_t size, UNUSED int flags) {
111967-	return realloc(ptr, size);
111968-}
111969-
111970-TEST_BEGIN(test_hooks_realloc) {
111971-	do_realloc_test(&realloc_wrapper, 0, hook_expand_realloc,
111972-	    hook_dalloc_realloc);
111973-}
111974-TEST_END
111975-
111976-TEST_BEGIN(test_hooks_rallocx) {
111977-	do_realloc_test(&rallocx, MALLOCX_TCACHE_NONE, hook_expand_rallocx,
111978-	    hook_dalloc_rallocx);
111979-}
111980-TEST_END
111981-
111982-int
111983-main(void) {
111984-	/* We assert on call counts. */
111985-	return test_no_reentrancy(
111986-	    test_hooks_basic,
111987-	    test_hooks_null,
111988-	    test_hooks_remove,
111989-	    test_hooks_alloc_simple,
111990-	    test_hooks_dalloc_simple,
111991-	    test_hooks_expand_simple,
111992-	    test_hooks_realloc_as_malloc_or_free,
111993-	    test_hooks_realloc,
111994-	    test_hooks_rallocx);
111995-}
111996diff --git a/jemalloc/test/unit/hpa.c b/jemalloc/test/unit/hpa.c
111997deleted file mode 100644
111998index dfd57f3..0000000
111999--- a/jemalloc/test/unit/hpa.c
112000+++ /dev/null
112001@@ -1,459 +0,0 @@
112002-#include "test/jemalloc_test.h"
112003-
112004-#include "jemalloc/internal/hpa.h"
112005-#include "jemalloc/internal/nstime.h"
112006-
112007-#define SHARD_IND 111
112008-
112009-#define ALLOC_MAX (HUGEPAGE / 4)
112010-
112011-typedef struct test_data_s test_data_t;
112012-struct test_data_s {
112013-	/*
112014-	 * Must be the first member -- we convert back and forth between the
112015-	 * test_data_t and the hpa_shard_t;
112016-	 */
112017-	hpa_shard_t shard;
112018-	hpa_central_t central;
112019-	base_t *base;
112020-	edata_cache_t shard_edata_cache;
112021-
112022-	emap_t emap;
112023-};
112024-
112025-static hpa_shard_opts_t test_hpa_shard_opts_default = {
112026-	/* slab_max_alloc */
112027-	ALLOC_MAX,
112028-	/* hugification threshold */
112029-	HUGEPAGE,
112030-	/* dirty_mult */
112031-	FXP_INIT_PERCENT(25),
112032-	/* deferral_allowed */
112033-	false,
112034-	/* hugify_delay_ms */
112035-	10 * 1000,
112036-};
112037-
112038-static hpa_shard_t *
112039-create_test_data(hpa_hooks_t *hooks, hpa_shard_opts_t *opts) {
112040-	bool err;
112041-	base_t *base = base_new(TSDN_NULL, /* ind */ SHARD_IND,
112042-	    &ehooks_default_extent_hooks, /* metadata_use_hooks */ true);
112043-	assert_ptr_not_null(base, "");
112044-
112045-	test_data_t *test_data = malloc(sizeof(test_data_t));
112046-	assert_ptr_not_null(test_data, "");
112047-
112048-	test_data->base = base;
112049-
112050-	err = edata_cache_init(&test_data->shard_edata_cache, base);
112051-	assert_false(err, "");
112052-
112053-	err = emap_init(&test_data->emap, test_data->base, /* zeroed */ false);
112054-	assert_false(err, "");
112055-
112056-	err = hpa_central_init(&test_data->central, test_data->base, hooks);
112057-	assert_false(err, "");
112058-
112059-	err = hpa_shard_init(&test_data->shard, &test_data->central,
112060-	    &test_data->emap, test_data->base, &test_data->shard_edata_cache,
112061-	    SHARD_IND, opts);
112062-	assert_false(err, "");
112063-
112064-	return (hpa_shard_t *)test_data;
112065-}
112066-
112067-static void
112068-destroy_test_data(hpa_shard_t *shard) {
112069-	test_data_t *test_data = (test_data_t *)shard;
112070-	base_delete(TSDN_NULL, test_data->base);
112071-	free(test_data);
112072-}
112073-
112074-TEST_BEGIN(test_alloc_max) {
112075-	test_skip_if(!hpa_supported());
112076-
112077-	hpa_shard_t *shard = create_test_data(&hpa_hooks_default,
112078-	    &test_hpa_shard_opts_default);
112079-	tsdn_t *tsdn = tsd_tsdn(tsd_fetch());
112080-
112081-	edata_t *edata;
112082-
112083-	/* Small max */
112084-	bool deferred_work_generated = false;
112085-	edata = pai_alloc(tsdn, &shard->pai, ALLOC_MAX, PAGE, false, false,
112086-	    false, &deferred_work_generated);
112087-	expect_ptr_not_null(edata, "Allocation of small max failed");
112088-	edata = pai_alloc(tsdn, &shard->pai, ALLOC_MAX + PAGE, PAGE, false,
112089-	    false, false, &deferred_work_generated);
112090-	expect_ptr_null(edata, "Allocation of larger than small max succeeded");
112091-
112092-	destroy_test_data(shard);
112093-}
112094-TEST_END
112095-
112096-typedef struct mem_contents_s mem_contents_t;
112097-struct mem_contents_s {
112098-	uintptr_t my_addr;
112099-	size_t size;
112100-	edata_t *my_edata;
112101-	rb_node(mem_contents_t) link;
112102-};
112103-
112104-static int
112105-mem_contents_cmp(const mem_contents_t *a, const mem_contents_t *b) {
112106-	return (a->my_addr > b->my_addr) - (a->my_addr < b->my_addr);
112107-}
112108-
112109-typedef rb_tree(mem_contents_t) mem_tree_t;
112110-rb_gen(static, mem_tree_, mem_tree_t, mem_contents_t, link,
112111-    mem_contents_cmp);
112112-
112113-static void
112114-node_assert_ordered(mem_contents_t *a, mem_contents_t *b) {
112115-	assert_zu_lt(a->my_addr, a->my_addr + a->size, "Overflow");
112116-	assert_zu_le(a->my_addr + a->size, b->my_addr, "");
112117-}
112118-
112119-static void
112120-node_check(mem_tree_t *tree, mem_contents_t *contents) {
112121-	edata_t *edata = contents->my_edata;
112122-	assert_ptr_eq(contents, (void *)contents->my_addr, "");
112123-	assert_ptr_eq(contents, edata_base_get(edata), "");
112124-	assert_zu_eq(contents->size, edata_size_get(edata), "");
112125-	assert_ptr_eq(contents->my_edata, edata, "");
112126-
112127-	mem_contents_t *next = mem_tree_next(tree, contents);
112128-	if (next != NULL) {
112129-		node_assert_ordered(contents, next);
112130-	}
112131-	mem_contents_t *prev = mem_tree_prev(tree, contents);
112132-	if (prev != NULL) {
112133-		node_assert_ordered(prev, contents);
112134-	}
112135-}
112136-
112137-static void
112138-node_insert(mem_tree_t *tree, edata_t *edata, size_t npages) {
112139-	mem_contents_t *contents = (mem_contents_t *)edata_base_get(edata);
112140-	contents->my_addr = (uintptr_t)edata_base_get(edata);
112141-	contents->size = edata_size_get(edata);
112142-	contents->my_edata = edata;
112143-	mem_tree_insert(tree, contents);
112144-	node_check(tree, contents);
112145-}
112146-
112147-static void
112148-node_remove(mem_tree_t *tree, edata_t *edata) {
112149-	mem_contents_t *contents = (mem_contents_t *)edata_base_get(edata);
112150-	node_check(tree, contents);
112151-	mem_tree_remove(tree, contents);
112152-}
112153-
112154-TEST_BEGIN(test_stress) {
112155-	test_skip_if(!hpa_supported());
112156-
112157-	hpa_shard_t *shard = create_test_data(&hpa_hooks_default,
112158-	    &test_hpa_shard_opts_default);
112159-
112160-	tsdn_t *tsdn = tsd_tsdn(tsd_fetch());
112161-
112162-	const size_t nlive_edatas_max = 500;
112163-	size_t nlive_edatas = 0;
112164-	edata_t **live_edatas = calloc(nlive_edatas_max, sizeof(edata_t *));
112165-	/*
112166-	 * Nothing special about this constant; we're only fixing it for
112167-	 * consistency across runs.
112168-	 */
112169-	size_t prng_state = (size_t)0x76999ffb014df07c;
112170-
112171-	mem_tree_t tree;
112172-	mem_tree_new(&tree);
112173-
112174-	bool deferred_work_generated = false;
112175-
112176-	for (size_t i = 0; i < 100 * 1000; i++) {
112177-		size_t operation = prng_range_zu(&prng_state, 2);
112178-		if (operation == 0) {
112179-			/* Alloc */
112180-			if (nlive_edatas == nlive_edatas_max) {
112181-				continue;
112182-			}
112183-
112184-			/*
112185-			 * We make sure to get an even balance of small and
112186-			 * large allocations.
112187-			 */
112188-			size_t npages_min = 1;
112189-			size_t npages_max = ALLOC_MAX / PAGE;
112190-			size_t npages = npages_min + prng_range_zu(&prng_state,
112191-			    npages_max - npages_min);
112192-			edata_t *edata = pai_alloc(tsdn, &shard->pai,
112193-			    npages * PAGE, PAGE, false, false, false,
112194-			    &deferred_work_generated);
112195-			assert_ptr_not_null(edata,
112196-			    "Unexpected allocation failure");
112197-			live_edatas[nlive_edatas] = edata;
112198-			nlive_edatas++;
112199-			node_insert(&tree, edata, npages);
112200-		} else {
112201-			/* Free. */
112202-			if (nlive_edatas == 0) {
112203-				continue;
112204-			}
112205-			size_t victim = prng_range_zu(&prng_state, nlive_edatas);
112206-			edata_t *to_free = live_edatas[victim];
112207-			live_edatas[victim] = live_edatas[nlive_edatas - 1];
112208-			nlive_edatas--;
112209-			node_remove(&tree, to_free);
112210-			pai_dalloc(tsdn, &shard->pai, to_free,
112211-			    &deferred_work_generated);
112212-		}
112213-	}
112214-
112215-	size_t ntreenodes = 0;
112216-	for (mem_contents_t *contents = mem_tree_first(&tree); contents != NULL;
112217-	    contents = mem_tree_next(&tree, contents)) {
112218-		ntreenodes++;
112219-		node_check(&tree, contents);
112220-	}
112221-	expect_zu_eq(ntreenodes, nlive_edatas, "");
112222-
112223-	/*
112224-	 * Test hpa_shard_destroy, which requires as a precondition that all its
112225-	 * extents have been deallocated.
112226-	 */
112227-	for (size_t i = 0; i < nlive_edatas; i++) {
112228-		edata_t *to_free = live_edatas[i];
112229-		node_remove(&tree, to_free);
112230-		pai_dalloc(tsdn, &shard->pai, to_free,
112231-		    &deferred_work_generated);
112232-	}
112233-	hpa_shard_destroy(tsdn, shard);
112234-
112235-	free(live_edatas);
112236-	destroy_test_data(shard);
112237-}
112238-TEST_END
112239-
112240-static void
112241-expect_contiguous(edata_t **edatas, size_t nedatas) {
112242-	for (size_t i = 0; i < nedatas; i++) {
112243-		size_t expected = (size_t)edata_base_get(edatas[0])
112244-		    + i * PAGE;
112245-		expect_zu_eq(expected, (size_t)edata_base_get(edatas[i]),
112246-		    "Mismatch at index %zu", i);
112247-	}
112248-}
112249-
112250-TEST_BEGIN(test_alloc_dalloc_batch) {
112251-	test_skip_if(!hpa_supported());
112252-
112253-	hpa_shard_t *shard = create_test_data(&hpa_hooks_default,
112254-	    &test_hpa_shard_opts_default);
112255-	tsdn_t *tsdn = tsd_tsdn(tsd_fetch());
112256-
112257-	bool deferred_work_generated = false;
112258-
112259-	enum {NALLOCS = 8};
112260-
112261-	edata_t *allocs[NALLOCS];
112262-	/*
112263-	 * Allocate a mix of ways; first half from regular alloc, second half
112264-	 * from alloc_batch.
112265-	 */
112266-	for (size_t i = 0; i < NALLOCS / 2; i++) {
112267-		allocs[i] = pai_alloc(tsdn, &shard->pai, PAGE, PAGE,
112268-		    /* zero */ false, /* guarded */ false,
112269-		    /* frequent_reuse */ false, &deferred_work_generated);
112270-		expect_ptr_not_null(allocs[i], "Unexpected alloc failure");
112271-	}
112272-	edata_list_active_t allocs_list;
112273-	edata_list_active_init(&allocs_list);
112274-	size_t nsuccess = pai_alloc_batch(tsdn, &shard->pai, PAGE, NALLOCS / 2,
112275-	    &allocs_list, &deferred_work_generated);
112276-	expect_zu_eq(NALLOCS / 2, nsuccess, "Unexpected oom");
112277-	for (size_t i = NALLOCS / 2; i < NALLOCS; i++) {
112278-		allocs[i] = edata_list_active_first(&allocs_list);
112279-		edata_list_active_remove(&allocs_list, allocs[i]);
112280-	}
112281-
112282-	/*
112283-	 * Should have allocated them contiguously, despite the differing
112284-	 * methods used.
112285-	 */
112286-	void *orig_base = edata_base_get(allocs[0]);
112287-	expect_contiguous(allocs, NALLOCS);
112288-
112289-	/*
112290-	 * Batch dalloc the first half, individually deallocate the second half.
112291-	 */
112292-	for (size_t i = 0; i < NALLOCS / 2; i++) {
112293-		edata_list_active_append(&allocs_list, allocs[i]);
112294-	}
112295-	pai_dalloc_batch(tsdn, &shard->pai, &allocs_list,
112296-	    &deferred_work_generated);
112297-	for (size_t i = NALLOCS / 2; i < NALLOCS; i++) {
112298-		pai_dalloc(tsdn, &shard->pai, allocs[i],
112299-		    &deferred_work_generated);
112300-	}
112301-
112302-	/* Reallocate (individually), and ensure reuse and contiguity. */
112303-	for (size_t i = 0; i < NALLOCS; i++) {
112304-		allocs[i] = pai_alloc(tsdn, &shard->pai, PAGE, PAGE,
112305-		    /* zero */ false, /* guarded */ false, /* frequent_reuse */
112306-		    false, &deferred_work_generated);
112307-		expect_ptr_not_null(allocs[i], "Unexpected alloc failure.");
112308-	}
112309-	void *new_base = edata_base_get(allocs[0]);
112310-	expect_ptr_eq(orig_base, new_base,
112311-	    "Failed to reuse the allocated memory.");
112312-	expect_contiguous(allocs, NALLOCS);
112313-
112314-	destroy_test_data(shard);
112315-}
112316-TEST_END
112317-
112318-static uintptr_t defer_bump_ptr = HUGEPAGE * 123;
112319-static void *
112320-defer_test_map(size_t size) {
112321-	void *result = (void *)defer_bump_ptr;
112322-	defer_bump_ptr += size;
112323-	return result;
112324-}
112325-
112326-static void
112327-defer_test_unmap(void *ptr, size_t size) {
112328-	(void)ptr;
112329-	(void)size;
112330-}
112331-
112332-static bool defer_purge_called = false;
112333-static void
112334-defer_test_purge(void *ptr, size_t size) {
112335-	(void)ptr;
112336-	(void)size;
112337-	defer_purge_called = true;
112338-}
112339-
112340-static bool defer_hugify_called = false;
112341-static void
112342-defer_test_hugify(void *ptr, size_t size) {
112343-	defer_hugify_called = true;
112344-}
112345-
112346-static bool defer_dehugify_called = false;
112347-static void
112348-defer_test_dehugify(void *ptr, size_t size) {
112349-	defer_dehugify_called = true;
112350-}
112351-
112352-static nstime_t defer_curtime;
112353-static void
112354-defer_test_curtime(nstime_t *r_time, bool first_reading) {
112355-	*r_time = defer_curtime;
112356-}
112357-
112358-static uint64_t
112359-defer_test_ms_since(nstime_t *past_time) {
112360-	return (nstime_ns(&defer_curtime) - nstime_ns(past_time)) / 1000 / 1000;
112361-}
112362-
112363-TEST_BEGIN(test_defer_time) {
112364-	test_skip_if(!hpa_supported());
112365-
112366-	hpa_hooks_t hooks;
112367-	hooks.map = &defer_test_map;
112368-	hooks.unmap = &defer_test_unmap;
112369-	hooks.purge = &defer_test_purge;
112370-	hooks.hugify = &defer_test_hugify;
112371-	hooks.dehugify = &defer_test_dehugify;
112372-	hooks.curtime = &defer_test_curtime;
112373-	hooks.ms_since = &defer_test_ms_since;
112374-
112375-	hpa_shard_opts_t opts = test_hpa_shard_opts_default;
112376-	opts.deferral_allowed = true;
112377-
112378-	hpa_shard_t *shard = create_test_data(&hooks, &opts);
112379-
112380-	bool deferred_work_generated = false;
112381-
112382-	nstime_init(&defer_curtime, 0);
112383-	tsdn_t *tsdn = tsd_tsdn(tsd_fetch());
112384-	edata_t *edatas[HUGEPAGE_PAGES];
112385-	for (int i = 0; i < (int)HUGEPAGE_PAGES; i++) {
112386-		edatas[i] = pai_alloc(tsdn, &shard->pai, PAGE, PAGE, false,
112387-		    false, false, &deferred_work_generated);
112388-		expect_ptr_not_null(edatas[i], "Unexpected null edata");
112389-	}
112390-	hpa_shard_do_deferred_work(tsdn, shard);
112391-	expect_false(defer_hugify_called, "Hugified too early");
112392-
112393-	/* Hugification delay is set to 10 seconds in options. */
112394-	nstime_init2(&defer_curtime, 11, 0);
112395-	hpa_shard_do_deferred_work(tsdn, shard);
112396-	expect_true(defer_hugify_called, "Failed to hugify");
112397-
112398-	defer_hugify_called = false;
112399-
112400-	/* Purge.  Recall that dirty_mult is .25. */
112401-	for (int i = 0; i < (int)HUGEPAGE_PAGES / 2; i++) {
112402-		pai_dalloc(tsdn, &shard->pai, edatas[i],
112403-		    &deferred_work_generated);
112404-	}
112405-
112406-	hpa_shard_do_deferred_work(tsdn, shard);
112407-
112408-	expect_false(defer_hugify_called, "Hugified too early");
112409-	expect_true(defer_dehugify_called, "Should have dehugified");
112410-	expect_true(defer_purge_called, "Should have purged");
112411-	defer_hugify_called = false;
112412-	defer_dehugify_called = false;
112413-	defer_purge_called = false;
112414-
112415-	/*
112416-	 * Refill the page.  We now meet the hugification threshold; we should
112417-	 * be marked for pending hugify.
112418-	 */
112419-	for (int i = 0; i < (int)HUGEPAGE_PAGES / 2; i++) {
112420-		edatas[i] = pai_alloc(tsdn, &shard->pai, PAGE, PAGE, false,
112421-		    false, false, &deferred_work_generated);
112422-		expect_ptr_not_null(edatas[i], "Unexpected null edata");
112423-	}
112424-	/*
112425-	 * We would be ineligible for hugification, had we not already met the
112426-	 * threshold before dipping below it.
112427-	 */
112428-	pai_dalloc(tsdn, &shard->pai, edatas[0],
112429-	    &deferred_work_generated);
112430-	/* Wait for the threshold again. */
112431-	nstime_init2(&defer_curtime, 22, 0);
112432-	hpa_shard_do_deferred_work(tsdn, shard);
112433-	expect_true(defer_hugify_called, "Hugified too early");
112434-	expect_false(defer_dehugify_called, "Unexpected dehugify");
112435-	expect_false(defer_purge_called, "Unexpected purge");
112436-
112437-	destroy_test_data(shard);
112438-}
112439-TEST_END
112440-
112441-int
112442-main(void) {
112443-	/*
112444-	 * These trigger unused-function warnings on CI runs, even if declared
112445-	 * with static inline.
112446-	 */
112447-	(void)mem_tree_empty;
112448-	(void)mem_tree_last;
112449-	(void)mem_tree_search;
112450-	(void)mem_tree_nsearch;
112451-	(void)mem_tree_psearch;
112452-	(void)mem_tree_iter;
112453-	(void)mem_tree_reverse_iter;
112454-	(void)mem_tree_destroy;
112455-	return test_no_reentrancy(
112456-	    test_alloc_max,
112457-	    test_stress,
112458-	    test_alloc_dalloc_batch,
112459-	    test_defer_time);
112460-}
112461diff --git a/jemalloc/test/unit/hpa_background_thread.c b/jemalloc/test/unit/hpa_background_thread.c
112462deleted file mode 100644
112463index 81c2561..0000000
112464--- a/jemalloc/test/unit/hpa_background_thread.c
112465+++ /dev/null
112466@@ -1,188 +0,0 @@
112467-#include "test/jemalloc_test.h"
112468-#include "test/sleep.h"
112469-
112470-static void
112471-sleep_for_background_thread_interval() {
112472-	/*
112473-	 * The sleep interval set in our .sh file is 50ms.  So it likely will
112474-	 * run if we sleep for four times that.
112475-	 */
112476-	sleep_ns(200 * 1000 * 1000);
112477-}
112478-
112479-static unsigned
112480-create_arena() {
112481-	unsigned arena_ind;
112482-	size_t sz;
112483-
112484-	sz = sizeof(unsigned);
112485-	expect_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz, NULL, 2),
112486-	    0, "Unexpected mallctl() failure");
112487-	return arena_ind;
112488-}
112489-
112490-static size_t
112491-get_empty_ndirty(unsigned arena_ind) {
112492-	int err;
112493-	size_t ndirty_huge;
112494-	size_t ndirty_nonhuge;
112495-	uint64_t epoch = 1;
112496-	size_t sz = sizeof(epoch);
112497-	err = je_mallctl("epoch", (void *)&epoch, &sz, (void *)&epoch,
112498-	    sizeof(epoch));
112499-	expect_d_eq(0, err, "Unexpected mallctl() failure");
112500-
112501-	size_t mib[6];
112502-	size_t miblen = sizeof(mib)/sizeof(mib[0]);
112503-	err = mallctlnametomib(
112504-	    "stats.arenas.0.hpa_shard.empty_slabs.ndirty_nonhuge", mib,
112505-	    &miblen);
112506-	expect_d_eq(0, err, "Unexpected mallctlnametomib() failure");
112507-
112508-	sz = sizeof(ndirty_nonhuge);
112509-	mib[2] = arena_ind;
112510-	err = mallctlbymib(mib, miblen, &ndirty_nonhuge, &sz, NULL, 0);
112511-	expect_d_eq(0, err, "Unexpected mallctlbymib() failure");
112512-
112513-	err = mallctlnametomib(
112514-	    "stats.arenas.0.hpa_shard.empty_slabs.ndirty_huge", mib,
112515-	    &miblen);
112516-	expect_d_eq(0, err, "Unexpected mallctlnametomib() failure");
112517-
112518-	sz = sizeof(ndirty_huge);
112519-	mib[2] = arena_ind;
112520-	err = mallctlbymib(mib, miblen, &ndirty_huge, &sz, NULL, 0);
112521-	expect_d_eq(0, err, "Unexpected mallctlbymib() failure");
112522-
112523-	return ndirty_huge + ndirty_nonhuge;
112524-}
112525-
112526-static void
112527-set_background_thread_enabled(bool enabled) {
112528-	int err;
112529-	err = je_mallctl("background_thread", NULL, NULL, &enabled,
112530-	    sizeof(enabled));
112531-	expect_d_eq(0, err, "Unexpected mallctl failure");
112532-}
112533-
112534-static void
112535-wait_until_thread_is_enabled(unsigned arena_id) {
112536-	tsd_t* tsd = tsd_fetch();
112537-
112538-	bool sleeping = false;
112539-	int iterations = 0;
112540-	do {
112541-		background_thread_info_t *info =
112542-		    background_thread_info_get(arena_id);
112543-		malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
112544-		malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
112545-		sleeping = background_thread_indefinite_sleep(info);
112546-		assert_d_lt(iterations, UINT64_C(1000000),
112547-		    "Waiting for a thread to start for too long");
112548-	} while (!sleeping);
112549-}
112550-
112551-static void
112552-expect_purging(unsigned arena_ind, bool expect_deferred) {
112553-	size_t empty_ndirty;
112554-
112555-	empty_ndirty = get_empty_ndirty(arena_ind);
112556-	expect_zu_eq(0, empty_ndirty, "Expected arena to start unused.");
112557-
112558-	/*
112559-	 * It's possible that we get unlucky with our stats collection timing,
112560-	 * and the background thread runs in between the deallocation and the
112561-	 * stats collection.  So we retry 10 times, and see if we *ever* see
112562-	 * deferred reclamation.
112563-	 */
112564-	bool observed_dirty_page = false;
112565-	for (int i = 0; i < 10; i++) {
112566-		void *ptr = mallocx(PAGE,
112567-		    MALLOCX_TCACHE_NONE | MALLOCX_ARENA(arena_ind));
112568-		empty_ndirty = get_empty_ndirty(arena_ind);
112569-		expect_zu_eq(0, empty_ndirty, "All pages should be active");
112570-		dallocx(ptr, MALLOCX_TCACHE_NONE);
112571-		empty_ndirty = get_empty_ndirty(arena_ind);
112572-		if (expect_deferred) {
112573-			expect_true(empty_ndirty == 0 || empty_ndirty == 1 ||
112574-			    opt_prof, "Unexpected extra dirty page count: %zu",
112575-			    empty_ndirty);
112576-		} else {
112577-			assert_zu_eq(0, empty_ndirty,
112578-			    "Saw dirty pages without deferred purging");
112579-		}
112580-		if (empty_ndirty > 0) {
112581-			observed_dirty_page = true;
112582-			break;
112583-		}
112584-	}
112585-	expect_b_eq(expect_deferred, observed_dirty_page, "");
112586-
112587-	/*
112588-	 * Under high concurrency / heavy test load (e.g. using run_test.sh),
112589-	 * the background thread may not get scheduled for a longer period of
112590-	 * time.  Retry 100 times max before bailing out.
112591-	 */
112592-	unsigned retry = 0;
112593-	while ((empty_ndirty = get_empty_ndirty(arena_ind)) > 0 &&
112594-	    expect_deferred && (retry++ < 100)) {
112595-		sleep_for_background_thread_interval();
112596-	}
112597-
112598-	expect_zu_eq(0, empty_ndirty, "Should have seen a background purge");
112599-}
112600-
112601-TEST_BEGIN(test_hpa_background_thread_purges) {
112602-	test_skip_if(!config_stats);
112603-	test_skip_if(!hpa_supported());
112604-	test_skip_if(!have_background_thread);
112605-	/* Skip since guarded pages cannot be allocated from hpa. */
112606-	test_skip_if(san_guard_enabled());
112607-
112608-	unsigned arena_ind = create_arena();
112609-	/*
112610-	 * Our .sh sets dirty mult to 0, so all dirty pages should get purged
112611-	 * any time any thread frees.
112612-	 */
112613-	expect_purging(arena_ind, /* expect_deferred */ true);
112614-}
112615-TEST_END
112616-
112617-TEST_BEGIN(test_hpa_background_thread_enable_disable) {
112618-	test_skip_if(!config_stats);
112619-	test_skip_if(!hpa_supported());
112620-	test_skip_if(!have_background_thread);
112621-	/* Skip since guarded pages cannot be allocated from hpa. */
112622-	test_skip_if(san_guard_enabled());
112623-
112624-	unsigned arena_ind = create_arena();
112625-
112626-	set_background_thread_enabled(false);
112627-	expect_purging(arena_ind, false);
112628-
112629-	set_background_thread_enabled(true);
112630-	wait_until_thread_is_enabled(arena_ind);
112631-	expect_purging(arena_ind, true);
112632-}
112633-TEST_END
112634-
112635-int
112636-main(void) {
112637-	/*
112638-	 * OK, this is a sort of nasty hack.  We don't want to add *another*
112639-	 * config option for HPA (the intent is that it becomes available on
112640-	 * more platforms over time, and we're trying to prune back config
112641-	 * options generally.  But we'll get initialization errors on other
112642-	 * platforms if we set hpa:true in the MALLOC_CONF (even if we set
112643-	 * abort_conf:false as well).  So we reach into the internals and set
112644-	 * them directly, but only if we know that we're actually going to do
112645-	 * something nontrivial in the tests.
112646-	 */
112647-	if (config_stats && hpa_supported() && have_background_thread) {
112648-		opt_hpa = true;
112649-		opt_background_thread = true;
112650-	}
112651-	return test_no_reentrancy(
112652-	    test_hpa_background_thread_purges,
112653-	    test_hpa_background_thread_enable_disable);
112654-}
112655diff --git a/jemalloc/test/unit/hpa_background_thread.sh b/jemalloc/test/unit/hpa_background_thread.sh
112656deleted file mode 100644
112657index 65a56a0..0000000
112658--- a/jemalloc/test/unit/hpa_background_thread.sh
112659+++ /dev/null
112660@@ -1,4 +0,0 @@
112661-#!/bin/sh
112662-
112663-export MALLOC_CONF="hpa_dirty_mult:0,hpa_min_purge_interval_ms:50,hpa_sec_nshards:0"
112664-
112665diff --git a/jemalloc/test/unit/hpdata.c b/jemalloc/test/unit/hpdata.c
112666deleted file mode 100644
112667index 288e71d..0000000
112668--- a/jemalloc/test/unit/hpdata.c
112669+++ /dev/null
112670@@ -1,244 +0,0 @@
112671-#include "test/jemalloc_test.h"
112672-
112673-#define HPDATA_ADDR ((void *)(10 * HUGEPAGE))
112674-#define HPDATA_AGE 123
112675-
112676-TEST_BEGIN(test_reserve_alloc) {
112677-	hpdata_t hpdata;
112678-	hpdata_init(&hpdata, HPDATA_ADDR, HPDATA_AGE);
112679-
112680-	/* Allocating a page at a time, we should do first fit. */
112681-	for (size_t i = 0; i < HUGEPAGE_PAGES; i++) {
112682-		expect_true(hpdata_consistent(&hpdata), "");
112683-		expect_zu_eq(HUGEPAGE_PAGES - i,
112684-		    hpdata_longest_free_range_get(&hpdata), "");
112685-		void *alloc = hpdata_reserve_alloc(&hpdata, PAGE);
112686-		expect_ptr_eq((char *)HPDATA_ADDR + i * PAGE, alloc, "");
112687-		expect_true(hpdata_consistent(&hpdata), "");
112688-	}
112689-	expect_true(hpdata_consistent(&hpdata), "");
112690-	expect_zu_eq(0, hpdata_longest_free_range_get(&hpdata), "");
112691-
112692-	/*
112693-	 * Build up a bigger free-range, 2 pages at a time, until we've got 6
112694-	 * adjacent free pages total.  Pages 8-13 should be unreserved after
112695-	 * this.
112696-	 */
112697-	hpdata_unreserve(&hpdata, (char *)HPDATA_ADDR + 10 * PAGE, 2 * PAGE);
112698-	expect_true(hpdata_consistent(&hpdata), "");
112699-	expect_zu_eq(2, hpdata_longest_free_range_get(&hpdata), "");
112700-
112701-	hpdata_unreserve(&hpdata, (char *)HPDATA_ADDR + 12 * PAGE, 2 * PAGE);
112702-	expect_true(hpdata_consistent(&hpdata), "");
112703-	expect_zu_eq(4, hpdata_longest_free_range_get(&hpdata), "");
112704-
112705-	hpdata_unreserve(&hpdata, (char *)HPDATA_ADDR + 8 * PAGE, 2 * PAGE);
112706-	expect_true(hpdata_consistent(&hpdata), "");
112707-	expect_zu_eq(6, hpdata_longest_free_range_get(&hpdata), "");
112708-
112709-	/*
112710-	 * Leave page 14 reserved, but free page 15 (this test the case where
112711-	 * unreserving combines two ranges).
112712-	 */
112713-	hpdata_unreserve(&hpdata, (char *)HPDATA_ADDR + 15 * PAGE, PAGE);
112714-	/*
112715-	 * Longest free range shouldn't change; we've got a free range of size
112716-	 * 6, then a reserved page, then another free range.
112717-	 */
112718-	expect_true(hpdata_consistent(&hpdata), "");
112719-	expect_zu_eq(6, hpdata_longest_free_range_get(&hpdata), "");
112720-
112721-	/* After freeing page 14, the two ranges get combined. */
112722-	hpdata_unreserve(&hpdata, (char *)HPDATA_ADDR + 14 * PAGE, PAGE);
112723-	expect_true(hpdata_consistent(&hpdata), "");
112724-	expect_zu_eq(8, hpdata_longest_free_range_get(&hpdata), "");
112725-}
112726-TEST_END
112727-
112728-TEST_BEGIN(test_purge_simple) {
112729-	hpdata_t hpdata;
112730-	hpdata_init(&hpdata, HPDATA_ADDR, HPDATA_AGE);
112731-
112732-	void *alloc = hpdata_reserve_alloc(&hpdata, HUGEPAGE_PAGES / 2 * PAGE);
112733-	expect_ptr_eq(alloc, HPDATA_ADDR, "");
112734-
112735-	/* Create HUGEPAGE_PAGES / 4 dirty inactive pages at the beginning. */
112736-	hpdata_unreserve(&hpdata, alloc, HUGEPAGE_PAGES / 4 * PAGE);
112737-
112738-	expect_zu_eq(hpdata_ntouched_get(&hpdata), HUGEPAGE_PAGES / 2, "");
112739-
112740-	hpdata_alloc_allowed_set(&hpdata, false);
112741-	hpdata_purge_state_t purge_state;
112742-	size_t to_purge = hpdata_purge_begin(&hpdata, &purge_state);
112743-	expect_zu_eq(HUGEPAGE_PAGES / 4, to_purge, "");
112744-
112745-	void *purge_addr;
112746-	size_t purge_size;
112747-	bool got_result = hpdata_purge_next(&hpdata, &purge_state, &purge_addr,
112748-	    &purge_size);
112749-	expect_true(got_result, "");
112750-	expect_ptr_eq(HPDATA_ADDR, purge_addr, "");
112751-	expect_zu_eq(HUGEPAGE_PAGES / 4 * PAGE, purge_size, "");
112752-
112753-	got_result = hpdata_purge_next(&hpdata, &purge_state, &purge_addr,
112754-	    &purge_size);
112755-	expect_false(got_result, "Unexpected additional purge range: "
112756-	    "extent at %p of size %zu", purge_addr, purge_size);
112757-
112758-	hpdata_purge_end(&hpdata, &purge_state);
112759-	expect_zu_eq(hpdata_ntouched_get(&hpdata), HUGEPAGE_PAGES / 4, "");
112760-}
112761-TEST_END
112762-
112763-/*
112764- * We only test intervening dalloc's not intervening allocs; the latter are
112765- * disallowed as a purging precondition (because they interfere with purging
112766- * across a retained extent, saving a purge call).
112767- */
112768-TEST_BEGIN(test_purge_intervening_dalloc) {
112769-	hpdata_t hpdata;
112770-	hpdata_init(&hpdata, HPDATA_ADDR, HPDATA_AGE);
112771-
112772-	/* Allocate the first 3/4 of the pages. */
112773-	void *alloc = hpdata_reserve_alloc(&hpdata, 3 * HUGEPAGE_PAGES / 4  * PAGE);
112774-	expect_ptr_eq(alloc, HPDATA_ADDR, "");
112775-
112776-	/* Free the first 1/4 and the third 1/4 of the pages. */
112777-	hpdata_unreserve(&hpdata, alloc, HUGEPAGE_PAGES / 4 * PAGE);
112778-	hpdata_unreserve(&hpdata,
112779-	    (void *)((uintptr_t)alloc + 2 * HUGEPAGE_PAGES / 4 * PAGE),
112780-	    HUGEPAGE_PAGES / 4 * PAGE);
112781-
112782-	expect_zu_eq(hpdata_ntouched_get(&hpdata), 3 * HUGEPAGE_PAGES / 4, "");
112783-
112784-	hpdata_alloc_allowed_set(&hpdata, false);
112785-	hpdata_purge_state_t purge_state;
112786-	size_t to_purge = hpdata_purge_begin(&hpdata, &purge_state);
112787-	expect_zu_eq(HUGEPAGE_PAGES / 2, to_purge, "");
112788-
112789-	void *purge_addr;
112790-	size_t purge_size;
112791-	/* First purge. */
112792-	bool got_result = hpdata_purge_next(&hpdata, &purge_state, &purge_addr,
112793-	    &purge_size);
112794-	expect_true(got_result, "");
112795-	expect_ptr_eq(HPDATA_ADDR, purge_addr, "");
112796-	expect_zu_eq(HUGEPAGE_PAGES / 4 * PAGE, purge_size, "");
112797-
112798-	/* Deallocate the second 1/4 before the second purge occurs. */
112799-	hpdata_unreserve(&hpdata,
112800-	    (void *)((uintptr_t)alloc + 1 * HUGEPAGE_PAGES / 4 * PAGE),
112801-	    HUGEPAGE_PAGES / 4 * PAGE);
112802-
112803-	/* Now continue purging. */
112804-	got_result = hpdata_purge_next(&hpdata, &purge_state, &purge_addr,
112805-	    &purge_size);
112806-	expect_true(got_result, "");
112807-	expect_ptr_eq(
112808-	    (void *)((uintptr_t)alloc + 2 * HUGEPAGE_PAGES / 4 * PAGE),
112809-	    purge_addr, "");
112810-	expect_zu_ge(HUGEPAGE_PAGES / 4 * PAGE, purge_size, "");
112811-
112812-	got_result = hpdata_purge_next(&hpdata, &purge_state, &purge_addr,
112813-	    &purge_size);
112814-	expect_false(got_result, "Unexpected additional purge range: "
112815-	    "extent at %p of size %zu", purge_addr, purge_size);
112816-
112817-	hpdata_purge_end(&hpdata, &purge_state);
112818-
112819-	expect_zu_eq(hpdata_ntouched_get(&hpdata), HUGEPAGE_PAGES / 4, "");
112820-}
112821-TEST_END
112822-
112823-TEST_BEGIN(test_purge_over_retained) {
112824-	void *purge_addr;
112825-	size_t purge_size;
112826-
112827-	hpdata_t hpdata;
112828-	hpdata_init(&hpdata, HPDATA_ADDR, HPDATA_AGE);
112829-
112830-	/* Allocate the first 3/4 of the pages. */
112831-	void *alloc = hpdata_reserve_alloc(&hpdata, 3 * HUGEPAGE_PAGES / 4  * PAGE);
112832-	expect_ptr_eq(alloc, HPDATA_ADDR, "");
112833-
112834-	/* Free the second quarter. */
112835-	void *second_quarter =
112836-	    (void *)((uintptr_t)alloc + HUGEPAGE_PAGES / 4 * PAGE);
112837-	hpdata_unreserve(&hpdata, second_quarter, HUGEPAGE_PAGES / 4 * PAGE);
112838-
112839-	expect_zu_eq(hpdata_ntouched_get(&hpdata), 3 * HUGEPAGE_PAGES / 4, "");
112840-
112841-	/* Purge the second quarter. */
112842-	hpdata_alloc_allowed_set(&hpdata, false);
112843-	hpdata_purge_state_t purge_state;
112844-	size_t to_purge_dirty = hpdata_purge_begin(&hpdata, &purge_state);
112845-	expect_zu_eq(HUGEPAGE_PAGES / 4, to_purge_dirty, "");
112846-
112847-	bool got_result = hpdata_purge_next(&hpdata, &purge_state, &purge_addr,
112848-	    &purge_size);
112849-	expect_true(got_result, "");
112850-	expect_ptr_eq(second_quarter, purge_addr, "");
112851-	expect_zu_eq(HUGEPAGE_PAGES / 4 * PAGE, purge_size, "");
112852-
112853-	got_result = hpdata_purge_next(&hpdata, &purge_state, &purge_addr,
112854-	    &purge_size);
112855-	expect_false(got_result, "Unexpected additional purge range: "
112856-	    "extent at %p of size %zu", purge_addr, purge_size);
112857-	hpdata_purge_end(&hpdata, &purge_state);
112858-
112859-	expect_zu_eq(hpdata_ntouched_get(&hpdata), HUGEPAGE_PAGES / 2, "");
112860-
112861-	/* Free the first and third quarter. */
112862-	hpdata_unreserve(&hpdata, HPDATA_ADDR, HUGEPAGE_PAGES / 4 * PAGE);
112863-	hpdata_unreserve(&hpdata,
112864-	    (void *)((uintptr_t)alloc + 2 * HUGEPAGE_PAGES / 4 * PAGE),
112865-	    HUGEPAGE_PAGES / 4 * PAGE);
112866-
112867-	/*
112868-	 * Purge again.  The second quarter is retained, so we can safely
112869-	 * re-purge it.  We expect a single purge of 3/4 of the hugepage,
112870-	 * purging half its pages.
112871-	 */
112872-	to_purge_dirty = hpdata_purge_begin(&hpdata, &purge_state);
112873-	expect_zu_eq(HUGEPAGE_PAGES / 2, to_purge_dirty, "");
112874-
112875-	got_result = hpdata_purge_next(&hpdata, &purge_state, &purge_addr,
112876-	    &purge_size);
112877-	expect_true(got_result, "");
112878-	expect_ptr_eq(HPDATA_ADDR, purge_addr, "");
112879-	expect_zu_eq(3 * HUGEPAGE_PAGES / 4 * PAGE, purge_size, "");
112880-
112881-	got_result = hpdata_purge_next(&hpdata, &purge_state, &purge_addr,
112882-	    &purge_size);
112883-	expect_false(got_result, "Unexpected additional purge range: "
112884-	    "extent at %p of size %zu", purge_addr, purge_size);
112885-	hpdata_purge_end(&hpdata, &purge_state);
112886-
112887-	expect_zu_eq(hpdata_ntouched_get(&hpdata), 0, "");
112888-}
112889-TEST_END
112890-
112891-TEST_BEGIN(test_hugify) {
112892-	hpdata_t hpdata;
112893-	hpdata_init(&hpdata, HPDATA_ADDR, HPDATA_AGE);
112894-
112895-	void *alloc = hpdata_reserve_alloc(&hpdata, HUGEPAGE / 2);
112896-	expect_ptr_eq(alloc, HPDATA_ADDR, "");
112897-
112898-	expect_zu_eq(HUGEPAGE_PAGES / 2, hpdata_ntouched_get(&hpdata), "");
112899-
112900-	hpdata_hugify(&hpdata);
112901-
112902-	/* Hugeifying should have increased the dirty page count. */
112903-	expect_zu_eq(HUGEPAGE_PAGES, hpdata_ntouched_get(&hpdata), "");
112904-}
112905-TEST_END
112906-
112907-int main(void) {
112908-	return test_no_reentrancy(
112909-	    test_reserve_alloc,
112910-	    test_purge_simple,
112911-	    test_purge_intervening_dalloc,
112912-	    test_purge_over_retained,
112913-	    test_hugify);
112914-}
112915diff --git a/jemalloc/test/unit/huge.c b/jemalloc/test/unit/huge.c
112916deleted file mode 100644
112917index ec64e50..0000000
112918--- a/jemalloc/test/unit/huge.c
112919+++ /dev/null
112920@@ -1,108 +0,0 @@
112921-#include "test/jemalloc_test.h"
112922-
112923-/* Threshold: 2 << 20 = 2097152. */
112924-const char *malloc_conf = "oversize_threshold:2097152";
112925-
112926-#define HUGE_SZ (2 << 20)
112927-#define SMALL_SZ (8)
112928-
112929-TEST_BEGIN(huge_bind_thread) {
112930-	unsigned arena1, arena2;
112931-	size_t sz = sizeof(unsigned);
112932-
112933-	/* Bind to a manual arena. */
112934-	expect_d_eq(mallctl("arenas.create", &arena1, &sz, NULL, 0), 0,
112935-	    "Failed to create arena");
112936-	expect_d_eq(mallctl("thread.arena", NULL, NULL, &arena1,
112937-	    sizeof(arena1)), 0, "Fail to bind thread");
112938-
112939-	void *ptr = mallocx(HUGE_SZ, 0);
112940-	expect_ptr_not_null(ptr, "Fail to allocate huge size");
112941-	expect_d_eq(mallctl("arenas.lookup", &arena2, &sz, &ptr,
112942-	    sizeof(ptr)), 0, "Unexpected mallctl() failure");
112943-	expect_u_eq(arena1, arena2, "Wrong arena used after binding");
112944-	dallocx(ptr, 0);
112945-
112946-	/* Switch back to arena 0. */
112947-	test_skip_if(have_percpu_arena &&
112948-	    PERCPU_ARENA_ENABLED(opt_percpu_arena));
112949-	arena2 = 0;
112950-	expect_d_eq(mallctl("thread.arena", NULL, NULL, &arena2,
112951-	    sizeof(arena2)), 0, "Fail to bind thread");
112952-	ptr = mallocx(SMALL_SZ, MALLOCX_TCACHE_NONE);
112953-	expect_d_eq(mallctl("arenas.lookup", &arena2, &sz, &ptr,
112954-	    sizeof(ptr)), 0, "Unexpected mallctl() failure");
112955-	expect_u_eq(arena2, 0, "Wrong arena used after binding");
112956-	dallocx(ptr, MALLOCX_TCACHE_NONE);
112957-
112958-	/* Then huge allocation should use the huge arena. */
112959-	ptr = mallocx(HUGE_SZ, 0);
112960-	expect_ptr_not_null(ptr, "Fail to allocate huge size");
112961-	expect_d_eq(mallctl("arenas.lookup", &arena2, &sz, &ptr,
112962-	    sizeof(ptr)), 0, "Unexpected mallctl() failure");
112963-	expect_u_ne(arena2, 0, "Wrong arena used after binding");
112964-	expect_u_ne(arena1, arena2, "Wrong arena used after binding");
112965-	dallocx(ptr, 0);
112966-}
112967-TEST_END
112968-
112969-TEST_BEGIN(huge_mallocx) {
112970-	unsigned arena1, arena2;
112971-	size_t sz = sizeof(unsigned);
112972-
112973-	expect_d_eq(mallctl("arenas.create", &arena1, &sz, NULL, 0), 0,
112974-	    "Failed to create arena");
112975-	void *huge = mallocx(HUGE_SZ, MALLOCX_ARENA(arena1));
112976-	expect_ptr_not_null(huge, "Fail to allocate huge size");
112977-	expect_d_eq(mallctl("arenas.lookup", &arena2, &sz, &huge,
112978-	    sizeof(huge)), 0, "Unexpected mallctl() failure");
112979-	expect_u_eq(arena1, arena2, "Wrong arena used for mallocx");
112980-	dallocx(huge, MALLOCX_ARENA(arena1));
112981-
112982-	void *huge2 = mallocx(HUGE_SZ, 0);
112983-	expect_ptr_not_null(huge, "Fail to allocate huge size");
112984-	expect_d_eq(mallctl("arenas.lookup", &arena2, &sz, &huge2,
112985-	    sizeof(huge2)), 0, "Unexpected mallctl() failure");
112986-	expect_u_ne(arena1, arena2,
112987-	    "Huge allocation should not come from the manual arena.");
112988-	expect_u_ne(arena2, 0,
112989-	    "Huge allocation should not come from the arena 0.");
112990-	dallocx(huge2, 0);
112991-}
112992-TEST_END
112993-
112994-TEST_BEGIN(huge_allocation) {
112995-	unsigned arena1, arena2;
112996-
112997-	void *ptr = mallocx(HUGE_SZ, 0);
112998-	expect_ptr_not_null(ptr, "Fail to allocate huge size");
112999-	size_t sz = sizeof(unsigned);
113000-	expect_d_eq(mallctl("arenas.lookup", &arena1, &sz, &ptr, sizeof(ptr)),
113001-	    0, "Unexpected mallctl() failure");
113002-	expect_u_gt(arena1, 0, "Huge allocation should not come from arena 0");
113003-	dallocx(ptr, 0);
113004-
113005-	ptr = mallocx(HUGE_SZ >> 1, 0);
113006-	expect_ptr_not_null(ptr, "Fail to allocate half huge size");
113007-	expect_d_eq(mallctl("arenas.lookup", &arena2, &sz, &ptr,
113008-	    sizeof(ptr)), 0, "Unexpected mallctl() failure");
113009-	expect_u_ne(arena1, arena2, "Wrong arena used for half huge");
113010-	dallocx(ptr, 0);
113011-
113012-	ptr = mallocx(SMALL_SZ, MALLOCX_TCACHE_NONE);
113013-	expect_ptr_not_null(ptr, "Fail to allocate small size");
113014-	expect_d_eq(mallctl("arenas.lookup", &arena2, &sz, &ptr,
113015-	    sizeof(ptr)), 0, "Unexpected mallctl() failure");
113016-	expect_u_ne(arena1, arena2,
113017-	    "Huge and small should be from different arenas");
113018-	dallocx(ptr, 0);
113019-}
113020-TEST_END
113021-
113022-int
113023-main(void) {
113024-	return test(
113025-	    huge_allocation,
113026-	    huge_mallocx,
113027-	    huge_bind_thread);
113028-}
113029diff --git a/jemalloc/test/unit/inspect.c b/jemalloc/test/unit/inspect.c
113030deleted file mode 100644
113031index fe59e59..0000000
113032--- a/jemalloc/test/unit/inspect.c
113033+++ /dev/null
113034@@ -1,278 +0,0 @@
113035-#include "test/jemalloc_test.h"
113036-
113037-#define TEST_UTIL_EINVAL(node, a, b, c, d, why_inval) do {		\
113038-	assert_d_eq(mallctl("experimental.utilization." node,		\
113039-	    a, b, c, d), EINVAL, "Should fail when " why_inval);	\
113040-	assert_zu_eq(out_sz, out_sz_ref,				\
113041-	    "Output size touched when given invalid arguments");	\
113042-	assert_d_eq(memcmp(out, out_ref, out_sz_ref), 0,		\
113043-	    "Output content touched when given invalid arguments");	\
113044-} while (0)
113045-
113046-#define TEST_UTIL_QUERY_EINVAL(a, b, c, d, why_inval)			\
113047-	TEST_UTIL_EINVAL("query", a, b, c, d, why_inval)
113048-#define TEST_UTIL_BATCH_EINVAL(a, b, c, d, why_inval)			\
113049-	TEST_UTIL_EINVAL("batch_query", a, b, c, d, why_inval)
113050-
113051-#define TEST_UTIL_VALID(node) do {					\
113052-        assert_d_eq(mallctl("experimental.utilization." node,		\
113053-	    out, &out_sz, in, in_sz), 0,				\
113054-	    "Should return 0 on correct arguments");			\
113055-        expect_zu_eq(out_sz, out_sz_ref, "incorrect output size");	\
113056-	expect_d_ne(memcmp(out, out_ref, out_sz_ref), 0,		\
113057-	    "Output content should be changed");			\
113058-} while (0)
113059-
113060-#define TEST_UTIL_BATCH_VALID TEST_UTIL_VALID("batch_query")
113061-
113062-#define TEST_MAX_SIZE (1 << 20)
113063-
113064-TEST_BEGIN(test_query) {
113065-	size_t sz;
113066-	/*
113067-	 * Select some sizes that can span both small and large sizes, and are
113068-	 * numerically unrelated to any size boundaries.
113069-	 */
113070-	for (sz = 7; sz <= TEST_MAX_SIZE && sz <= SC_LARGE_MAXCLASS;
113071-	    sz += (sz <= SC_SMALL_MAXCLASS ? 1009 : 99989)) {
113072-		void *p = mallocx(sz, 0);
113073-		void **in = &p;
113074-		size_t in_sz = sizeof(const void *);
113075-		size_t out_sz = sizeof(void *) + sizeof(size_t) * 5;
113076-		void *out = mallocx(out_sz, 0);
113077-		void *out_ref = mallocx(out_sz, 0);
113078-		size_t out_sz_ref = out_sz;
113079-
113080-		assert_ptr_not_null(p,
113081-		    "test pointer allocation failed");
113082-		assert_ptr_not_null(out,
113083-		    "test output allocation failed");
113084-		assert_ptr_not_null(out_ref,
113085-		    "test reference output allocation failed");
113086-
113087-#define SLABCUR_READ(out) (*(void **)out)
113088-#define COUNTS(out) ((size_t *)((void **)out + 1))
113089-#define NFREE_READ(out) COUNTS(out)[0]
113090-#define NREGS_READ(out) COUNTS(out)[1]
113091-#define SIZE_READ(out) COUNTS(out)[2]
113092-#define BIN_NFREE_READ(out) COUNTS(out)[3]
113093-#define BIN_NREGS_READ(out) COUNTS(out)[4]
113094-
113095-		SLABCUR_READ(out) = NULL;
113096-		NFREE_READ(out) = NREGS_READ(out) = SIZE_READ(out) = -1;
113097-		BIN_NFREE_READ(out) = BIN_NREGS_READ(out) = -1;
113098-		memcpy(out_ref, out, out_sz);
113099-
113100-		/* Test invalid argument(s) errors */
113101-		TEST_UTIL_QUERY_EINVAL(NULL, &out_sz, in, in_sz,
113102-		    "old is NULL");
113103-		TEST_UTIL_QUERY_EINVAL(out, NULL, in, in_sz,
113104-		    "oldlenp is NULL");
113105-		TEST_UTIL_QUERY_EINVAL(out, &out_sz, NULL, in_sz,
113106-		    "newp is NULL");
113107-		TEST_UTIL_QUERY_EINVAL(out, &out_sz, in, 0,
113108-		    "newlen is zero");
113109-		in_sz -= 1;
113110-		TEST_UTIL_QUERY_EINVAL(out, &out_sz, in, in_sz,
113111-		    "invalid newlen");
113112-		in_sz += 1;
113113-		out_sz_ref = out_sz -= 2 * sizeof(size_t);
113114-		TEST_UTIL_QUERY_EINVAL(out, &out_sz, in, in_sz,
113115-		    "invalid *oldlenp");
113116-		out_sz_ref = out_sz += 2 * sizeof(size_t);
113117-
113118-		/* Examine output for valid call */
113119-		TEST_UTIL_VALID("query");
113120-		expect_zu_le(sz, SIZE_READ(out),
113121-		    "Extent size should be at least allocation size");
113122-		expect_zu_eq(SIZE_READ(out) & (PAGE - 1), 0,
113123-		    "Extent size should be a multiple of page size");
113124-
113125-		/*
113126-		 * We don't do much bin checking if prof is on, since profiling
113127-		 * can produce extents that are for small size classes but not
113128-		 * slabs, which interferes with things like region counts.
113129-		 */
113130-		if (!opt_prof && sz <= SC_SMALL_MAXCLASS) {
113131-			expect_zu_le(NFREE_READ(out), NREGS_READ(out),
113132-			    "Extent free count exceeded region count");
113133-			expect_zu_le(NREGS_READ(out), SIZE_READ(out),
113134-			    "Extent region count exceeded size");
113135-			expect_zu_ne(NREGS_READ(out), 0,
113136-			    "Extent region count must be positive");
113137-			expect_true(NFREE_READ(out) == 0 || (SLABCUR_READ(out)
113138-			    != NULL && SLABCUR_READ(out) <= p),
113139-			    "Allocation should follow first fit principle");
113140-
113141-			if (config_stats) {
113142-				expect_zu_le(BIN_NFREE_READ(out),
113143-				    BIN_NREGS_READ(out),
113144-				    "Bin free count exceeded region count");
113145-				expect_zu_ne(BIN_NREGS_READ(out), 0,
113146-				    "Bin region count must be positive");
113147-				expect_zu_le(NFREE_READ(out),
113148-				    BIN_NFREE_READ(out),
113149-				    "Extent free count exceeded bin free count");
113150-				expect_zu_le(NREGS_READ(out),
113151-				    BIN_NREGS_READ(out),
113152-				    "Extent region count exceeded "
113153-				    "bin region count");
113154-				expect_zu_eq(BIN_NREGS_READ(out)
113155-				    % NREGS_READ(out), 0,
113156-				    "Bin region count isn't a multiple of "
113157-				    "extent region count");
113158-				expect_zu_le(
113159-				    BIN_NFREE_READ(out) - NFREE_READ(out),
113160-				    BIN_NREGS_READ(out) - NREGS_READ(out),
113161-				    "Free count in other extents in the bin "
113162-				    "exceeded region count in other extents "
113163-				    "in the bin");
113164-				expect_zu_le(NREGS_READ(out) - NFREE_READ(out),
113165-				    BIN_NREGS_READ(out) - BIN_NFREE_READ(out),
113166-				    "Extent utilized count exceeded "
113167-				    "bin utilized count");
113168-			}
113169-		} else if (sz > SC_SMALL_MAXCLASS) {
113170-			expect_zu_eq(NFREE_READ(out), 0,
113171-			    "Extent free count should be zero");
113172-			expect_zu_eq(NREGS_READ(out), 1,
113173-			    "Extent region count should be one");
113174-			expect_ptr_null(SLABCUR_READ(out),
113175-			    "Current slab must be null for large size classes");
113176-			if (config_stats) {
113177-				expect_zu_eq(BIN_NFREE_READ(out), 0,
113178-				    "Bin free count must be zero for "
113179-				    "large sizes");
113180-				expect_zu_eq(BIN_NREGS_READ(out), 0,
113181-				    "Bin region count must be zero for "
113182-				    "large sizes");
113183-			}
113184-		}
113185-
113186-#undef BIN_NREGS_READ
113187-#undef BIN_NFREE_READ
113188-#undef SIZE_READ
113189-#undef NREGS_READ
113190-#undef NFREE_READ
113191-#undef COUNTS
113192-#undef SLABCUR_READ
113193-
113194-		free(out_ref);
113195-		free(out);
113196-		free(p);
113197-	}
113198-}
113199-TEST_END
113200-
113201-TEST_BEGIN(test_batch) {
113202-	size_t sz;
113203-	/*
113204-	 * Select some sizes that can span both small and large sizes, and are
113205-	 * numerically unrelated to any size boundaries.
113206-	 */
113207-	for (sz = 17; sz <= TEST_MAX_SIZE && sz <= SC_LARGE_MAXCLASS;
113208-	    sz += (sz <= SC_SMALL_MAXCLASS ? 1019 : 99991)) {
113209-		void *p = mallocx(sz, 0);
113210-		void *q = mallocx(sz, 0);
113211-		void *in[] = {p, q};
113212-		size_t in_sz = sizeof(const void *) * 2;
113213-		size_t out[] = {-1, -1, -1, -1, -1, -1};
113214-		size_t out_sz = sizeof(size_t) * 6;
113215-		size_t out_ref[] = {-1, -1, -1, -1, -1, -1};
113216-		size_t out_sz_ref = out_sz;
113217-
113218-		assert_ptr_not_null(p, "test pointer allocation failed");
113219-		assert_ptr_not_null(q, "test pointer allocation failed");
113220-
113221-		/* Test invalid argument(s) errors */
113222-		TEST_UTIL_BATCH_EINVAL(NULL, &out_sz, in, in_sz,
113223-		    "old is NULL");
113224-		TEST_UTIL_BATCH_EINVAL(out, NULL, in, in_sz,
113225-		    "oldlenp is NULL");
113226-		TEST_UTIL_BATCH_EINVAL(out, &out_sz, NULL, in_sz,
113227-		    "newp is NULL");
113228-		TEST_UTIL_BATCH_EINVAL(out, &out_sz, in, 0,
113229-		    "newlen is zero");
113230-		in_sz -= 1;
113231-		TEST_UTIL_BATCH_EINVAL(out, &out_sz, in, in_sz,
113232-		    "newlen is not an exact multiple");
113233-		in_sz += 1;
113234-		out_sz_ref = out_sz -= 2 * sizeof(size_t);
113235-		TEST_UTIL_BATCH_EINVAL(out, &out_sz, in, in_sz,
113236-		    "*oldlenp is not an exact multiple");
113237-		out_sz_ref = out_sz += 2 * sizeof(size_t);
113238-		in_sz -= sizeof(const void *);
113239-		TEST_UTIL_BATCH_EINVAL(out, &out_sz, in, in_sz,
113240-		    "*oldlenp and newlen do not match");
113241-		in_sz += sizeof(const void *);
113242-
113243-	/* Examine output for valid calls */
113244-#define TEST_EQUAL_REF(i, message) \
113245-	assert_d_eq(memcmp(out + (i) * 3, out_ref + (i) * 3, 3), 0, message)
113246-
113247-#define NFREE_READ(out, i) out[(i) * 3]
113248-#define NREGS_READ(out, i) out[(i) * 3 + 1]
113249-#define SIZE_READ(out, i) out[(i) * 3 + 2]
113250-
113251-		out_sz_ref = out_sz /= 2;
113252-		in_sz /= 2;
113253-		TEST_UTIL_BATCH_VALID;
113254-		expect_zu_le(sz, SIZE_READ(out, 0),
113255-		    "Extent size should be at least allocation size");
113256-		expect_zu_eq(SIZE_READ(out, 0) & (PAGE - 1), 0,
113257-		    "Extent size should be a multiple of page size");
113258-		/*
113259-		 * See the corresponding comment in test_query; profiling breaks
113260-		 * our slab count expectations.
113261-		 */
113262-		if (sz <= SC_SMALL_MAXCLASS && !opt_prof) {
113263-			expect_zu_le(NFREE_READ(out, 0), NREGS_READ(out, 0),
113264-			    "Extent free count exceeded region count");
113265-			expect_zu_le(NREGS_READ(out, 0), SIZE_READ(out, 0),
113266-			    "Extent region count exceeded size");
113267-			expect_zu_ne(NREGS_READ(out, 0), 0,
113268-			    "Extent region count must be positive");
113269-		} else if (sz > SC_SMALL_MAXCLASS) {
113270-			expect_zu_eq(NFREE_READ(out, 0), 0,
113271-			    "Extent free count should be zero");
113272-			expect_zu_eq(NREGS_READ(out, 0), 1,
113273-			    "Extent region count should be one");
113274-		}
113275-		TEST_EQUAL_REF(1,
113276-		    "Should not overwrite content beyond what's needed");
113277-		in_sz *= 2;
113278-		out_sz_ref = out_sz *= 2;
113279-
113280-		memcpy(out_ref, out, 3 * sizeof(size_t));
113281-		TEST_UTIL_BATCH_VALID;
113282-		TEST_EQUAL_REF(0, "Statistics should be stable across calls");
113283-		if (sz <= SC_SMALL_MAXCLASS) {
113284-			expect_zu_le(NFREE_READ(out, 1), NREGS_READ(out, 1),
113285-			    "Extent free count exceeded region count");
113286-		} else {
113287-			expect_zu_eq(NFREE_READ(out, 0), 0,
113288-			    "Extent free count should be zero");
113289-		}
113290-		expect_zu_eq(NREGS_READ(out, 0), NREGS_READ(out, 1),
113291-		    "Extent region count should be same for same region size");
113292-		expect_zu_eq(SIZE_READ(out, 0), SIZE_READ(out, 1),
113293-		    "Extent size should be same for same region size");
113294-
113295-#undef SIZE_READ
113296-#undef NREGS_READ
113297-#undef NFREE_READ
113298-
113299-#undef TEST_EQUAL_REF
113300-
113301-		free(q);
113302-		free(p);
113303-	}
113304-}
113305-TEST_END
113306-
113307-int
113308-main(void) {
113309-	assert_zu_lt(SC_SMALL_MAXCLASS + 100000, TEST_MAX_SIZE,
113310-	    "Test case cannot cover large classes");
113311-	return test(test_query, test_batch);
113312-}
113313diff --git a/jemalloc/test/unit/inspect.sh b/jemalloc/test/unit/inspect.sh
113314deleted file mode 100644
113315index 352d110..0000000
113316--- a/jemalloc/test/unit/inspect.sh
113317+++ /dev/null
113318@@ -1,5 +0,0 @@
113319-#!/bin/sh
113320-
113321-if [ "x${enable_prof}" = "x1" ] ; then
113322-  export MALLOC_CONF="prof:false"
113323-fi
113324diff --git a/jemalloc/test/unit/junk.c b/jemalloc/test/unit/junk.c
113325deleted file mode 100644
113326index 543092f..0000000
113327--- a/jemalloc/test/unit/junk.c
113328+++ /dev/null
113329@@ -1,195 +0,0 @@
113330-#include "test/jemalloc_test.h"
113331-
113332-#define arraylen(arr) (sizeof(arr)/sizeof(arr[0]))
113333-static size_t ptr_ind;
113334-static void *volatile ptrs[100];
113335-static void *last_junked_ptr;
113336-static size_t last_junked_usize;
113337-
113338-static void
113339-reset() {
113340-	ptr_ind = 0;
113341-	last_junked_ptr = NULL;
113342-	last_junked_usize = 0;
113343-}
113344-
113345-static void
113346-test_junk(void *ptr, size_t usize) {
113347-	last_junked_ptr = ptr;
113348-	last_junked_usize = usize;
113349-}
113350-
113351-static void
113352-do_allocs(size_t size, bool zero, size_t lg_align) {
113353-#define JUNK_ALLOC(...)							\
113354-	do {								\
113355-		assert(ptr_ind + 1 < arraylen(ptrs));			\
113356-		void *ptr = __VA_ARGS__;				\
113357-		assert_ptr_not_null(ptr, "");				\
113358-		ptrs[ptr_ind++] = ptr;					\
113359-		if (opt_junk_alloc && !zero) {				\
113360-			expect_ptr_eq(ptr, last_junked_ptr, "");	\
113361-			expect_zu_eq(last_junked_usize,			\
113362-			    TEST_MALLOC_SIZE(ptr), "");			\
113363-		}							\
113364-	} while (0)
113365-	if (!zero && lg_align == 0) {
113366-		JUNK_ALLOC(malloc(size));
113367-	}
113368-	if (!zero) {
113369-		JUNK_ALLOC(aligned_alloc(1 << lg_align, size));
113370-	}
113371-#ifdef JEMALLOC_OVERRIDE_MEMALIGN
113372-	if (!zero) {
113373-		JUNK_ALLOC(je_memalign(1 << lg_align, size));
113374-	}
113375-#endif
113376-#ifdef JEMALLOC_OVERRIDE_VALLOC
113377-	if (!zero && lg_align == LG_PAGE) {
113378-		JUNK_ALLOC(je_valloc(size));
113379-	}
113380-#endif
113381-	int zero_flag = zero ? MALLOCX_ZERO : 0;
113382-	JUNK_ALLOC(mallocx(size, zero_flag | MALLOCX_LG_ALIGN(lg_align)));
113383-	JUNK_ALLOC(mallocx(size, zero_flag | MALLOCX_LG_ALIGN(lg_align)
113384-	    | MALLOCX_TCACHE_NONE));
113385-	if (lg_align >= LG_SIZEOF_PTR) {
113386-		void *memalign_result;
113387-		int err = posix_memalign(&memalign_result, (1 << lg_align),
113388-		    size);
113389-		assert_d_eq(err, 0, "");
113390-		JUNK_ALLOC(memalign_result);
113391-	}
113392-}
113393-
113394-TEST_BEGIN(test_junk_alloc_free) {
113395-	bool zerovals[] = {false, true};
113396-	size_t sizevals[] = {
113397-		1, 8, 100, 1000, 100*1000
113398-	/*
113399-	 * Memory allocation failure is a real possibility in 32-bit mode.
113400-	 * Rather than try to check in the face of resource exhaustion, we just
113401-	 * rely more on the 64-bit tests.  This is a little bit white-box-y in
113402-	 * the sense that this is only a good test strategy if we know that the
113403-	 * junk pathways don't touch interact with the allocation selection
113404-	 * mechanisms; but this is in fact the case.
113405-	 */
113406-#if LG_SIZEOF_PTR == 3
113407-		    , 10 * 1000 * 1000
113408-#endif
113409-	};
113410-	size_t lg_alignvals[] = {
113411-		0, 4, 10, 15, 16, LG_PAGE
113412-#if LG_SIZEOF_PTR == 3
113413-		    , 20, 24
113414-#endif
113415-	};
113416-
113417-#define JUNK_FREE(...)							\
113418-	do {								\
113419-		do_allocs(size, zero, lg_align);			\
113420-		for (size_t n = 0; n < ptr_ind; n++) {			\
113421-			void *ptr = ptrs[n];				\
113422-			__VA_ARGS__;					\
113423-			if (opt_junk_free) {				\
113424-				assert_ptr_eq(ptr, last_junked_ptr,	\
113425-				    "");				\
113426-				assert_zu_eq(usize, last_junked_usize,	\
113427-				    "");				\
113428-			}						\
113429-			reset();					\
113430-		}							\
113431-	} while (0)
113432-	for (size_t i = 0; i < arraylen(zerovals); i++) {
113433-		for (size_t j = 0; j < arraylen(sizevals); j++) {
113434-			for (size_t k = 0; k < arraylen(lg_alignvals); k++) {
113435-				bool zero = zerovals[i];
113436-				size_t size = sizevals[j];
113437-				size_t lg_align = lg_alignvals[k];
113438-				size_t usize = nallocx(size,
113439-				    MALLOCX_LG_ALIGN(lg_align));
113440-
113441-				JUNK_FREE(free(ptr));
113442-				JUNK_FREE(dallocx(ptr, 0));
113443-				JUNK_FREE(dallocx(ptr, MALLOCX_TCACHE_NONE));
113444-				JUNK_FREE(dallocx(ptr, MALLOCX_LG_ALIGN(
113445-				    lg_align)));
113446-				JUNK_FREE(sdallocx(ptr, usize, MALLOCX_LG_ALIGN(
113447-				    lg_align)));
113448-				JUNK_FREE(sdallocx(ptr, usize,
113449-				    MALLOCX_TCACHE_NONE | MALLOCX_LG_ALIGN(lg_align)));
113450-				if (opt_zero_realloc_action
113451-				    == zero_realloc_action_free) {
113452-					JUNK_FREE(realloc(ptr, 0));
113453-				}
113454-			}
113455-		}
113456-	}
113457-}
113458-TEST_END
113459-
113460-TEST_BEGIN(test_realloc_expand) {
113461-	char *volatile ptr;
113462-	char *volatile expanded;
113463-
113464-	test_skip_if(!opt_junk_alloc);
113465-
113466-	/* Realloc */
113467-	ptr = malloc(SC_SMALL_MAXCLASS);
113468-	expanded = realloc(ptr, SC_LARGE_MINCLASS);
113469-	expect_ptr_eq(last_junked_ptr, &expanded[SC_SMALL_MAXCLASS], "");
113470-	expect_zu_eq(last_junked_usize,
113471-	    SC_LARGE_MINCLASS - SC_SMALL_MAXCLASS, "");
113472-	free(expanded);
113473-
113474-	/* rallocx(..., 0) */
113475-	ptr = malloc(SC_SMALL_MAXCLASS);
113476-	expanded = rallocx(ptr, SC_LARGE_MINCLASS, 0);
113477-	expect_ptr_eq(last_junked_ptr, &expanded[SC_SMALL_MAXCLASS], "");
113478-	expect_zu_eq(last_junked_usize,
113479-	    SC_LARGE_MINCLASS - SC_SMALL_MAXCLASS, "");
113480-	free(expanded);
113481-
113482-	/* rallocx(..., nonzero) */
113483-	ptr = malloc(SC_SMALL_MAXCLASS);
113484-	expanded = rallocx(ptr, SC_LARGE_MINCLASS, MALLOCX_TCACHE_NONE);
113485-	expect_ptr_eq(last_junked_ptr, &expanded[SC_SMALL_MAXCLASS], "");
113486-	expect_zu_eq(last_junked_usize,
113487-	    SC_LARGE_MINCLASS - SC_SMALL_MAXCLASS, "");
113488-	free(expanded);
113489-
113490-	/* rallocx(..., MALLOCX_ZERO) */
113491-	ptr = malloc(SC_SMALL_MAXCLASS);
113492-	last_junked_ptr = (void *)-1;
113493-	last_junked_usize = (size_t)-1;
113494-	expanded = rallocx(ptr, SC_LARGE_MINCLASS, MALLOCX_ZERO);
113495-	expect_ptr_eq(last_junked_ptr, (void *)-1, "");
113496-	expect_zu_eq(last_junked_usize, (size_t)-1, "");
113497-	free(expanded);
113498-
113499-	/*
113500-	 * Unfortunately, testing xallocx reliably is difficult to do portably
113501-	 * (since allocations can be expanded / not expanded differently on
113502-	 * different platforms.  We rely on manual inspection there -- the
113503-	 * xallocx pathway is easy to inspect, though.
113504-	 *
113505-	 * Likewise, we don't test the shrinking pathways.  It's difficult to do
113506-	 * so consistently (because of the risk of split failure or memory
113507-	 * exhaustion, in which case no junking should happen).  This is fine
113508-	 * -- junking is a best-effort debug mechanism in the first place.
113509-	 */
113510-}
113511-TEST_END
113512-
113513-int
113514-main(void) {
113515-	junk_alloc_callback = &test_junk;
113516-	junk_free_callback = &test_junk;
113517-	/*
113518-	 * We check the last pointer junked.  If a reentrant call happens, that
113519-	 * might be an internal allocation.
113520-	 */
113521-	return test_no_reentrancy(
113522-	    test_junk_alloc_free,
113523-	    test_realloc_expand);
113524-}
113525diff --git a/jemalloc/test/unit/junk.sh b/jemalloc/test/unit/junk.sh
113526deleted file mode 100644
113527index 97cd8ca..0000000
113528--- a/jemalloc/test/unit/junk.sh
113529+++ /dev/null
113530@@ -1,5 +0,0 @@
113531-#!/bin/sh
113532-
113533-if [ "x${enable_fill}" = "x1" ] ; then
113534-  export MALLOC_CONF="abort:false,zero:false,junk:true"
113535-fi
113536diff --git a/jemalloc/test/unit/junk_alloc.c b/jemalloc/test/unit/junk_alloc.c
113537deleted file mode 100644
113538index a442a0c..0000000
113539--- a/jemalloc/test/unit/junk_alloc.c
113540+++ /dev/null
113541@@ -1 +0,0 @@
113542-#include "junk.c"
113543diff --git a/jemalloc/test/unit/junk_alloc.sh b/jemalloc/test/unit/junk_alloc.sh
113544deleted file mode 100644
113545index e1008c2..0000000
113546--- a/jemalloc/test/unit/junk_alloc.sh
113547+++ /dev/null
113548@@ -1,5 +0,0 @@
113549-#!/bin/sh
113550-
113551-if [ "x${enable_fill}" = "x1" ] ; then
113552-  export MALLOC_CONF="abort:false,zero:false,junk:alloc"
113553-fi
113554diff --git a/jemalloc/test/unit/junk_free.c b/jemalloc/test/unit/junk_free.c
113555deleted file mode 100644
113556index a442a0c..0000000
113557--- a/jemalloc/test/unit/junk_free.c
113558+++ /dev/null
113559@@ -1 +0,0 @@
113560-#include "junk.c"
113561diff --git a/jemalloc/test/unit/junk_free.sh b/jemalloc/test/unit/junk_free.sh
113562deleted file mode 100644
113563index 402196c..0000000
113564--- a/jemalloc/test/unit/junk_free.sh
113565+++ /dev/null
113566@@ -1,5 +0,0 @@
113567-#!/bin/sh
113568-
113569-if [ "x${enable_fill}" = "x1" ] ; then
113570-  export MALLOC_CONF="abort:false,zero:false,junk:free"
113571-fi
113572diff --git a/jemalloc/test/unit/log.c b/jemalloc/test/unit/log.c
113573deleted file mode 100644
113574index c09b589..0000000
113575--- a/jemalloc/test/unit/log.c
113576+++ /dev/null
113577@@ -1,198 +0,0 @@
113578-#include "test/jemalloc_test.h"
113579-
113580-#include "jemalloc/internal/log.h"
113581-
113582-static void
113583-update_log_var_names(const char *names) {
113584-	strncpy(log_var_names, names, sizeof(log_var_names));
113585-}
113586-
113587-static void
113588-expect_no_logging(const char *names) {
113589-	log_var_t log_l1 = LOG_VAR_INIT("l1");
113590-	log_var_t log_l2 = LOG_VAR_INIT("l2");
113591-	log_var_t log_l2_a = LOG_VAR_INIT("l2.a");
113592-
113593-	update_log_var_names(names);
113594-
113595-	int count = 0;
113596-
113597-	for (int i = 0; i < 10; i++) {
113598-		log_do_begin(log_l1)
113599-			count++;
113600-		log_do_end(log_l1)
113601-
113602-		log_do_begin(log_l2)
113603-			count++;
113604-		log_do_end(log_l2)
113605-
113606-		log_do_begin(log_l2_a)
113607-			count++;
113608-		log_do_end(log_l2_a)
113609-	}
113610-	expect_d_eq(count, 0, "Disabled logging not ignored!");
113611-}
113612-
113613-TEST_BEGIN(test_log_disabled) {
113614-	test_skip_if(!config_log);
113615-	atomic_store_b(&log_init_done, true, ATOMIC_RELAXED);
113616-	expect_no_logging("");
113617-	expect_no_logging("abc");
113618-	expect_no_logging("a.b.c");
113619-	expect_no_logging("l12");
113620-	expect_no_logging("l123|a456|b789");
113621-	expect_no_logging("|||");
113622-}
113623-TEST_END
113624-
113625-TEST_BEGIN(test_log_enabled_direct) {
113626-	test_skip_if(!config_log);
113627-	atomic_store_b(&log_init_done, true, ATOMIC_RELAXED);
113628-	log_var_t log_l1 = LOG_VAR_INIT("l1");
113629-	log_var_t log_l1_a = LOG_VAR_INIT("l1.a");
113630-	log_var_t log_l2 = LOG_VAR_INIT("l2");
113631-
113632-	int count;
113633-
113634-	count = 0;
113635-	update_log_var_names("l1");
113636-	for (int i = 0; i < 10; i++) {
113637-		log_do_begin(log_l1)
113638-			count++;
113639-		log_do_end(log_l1)
113640-	}
113641-	expect_d_eq(count, 10, "Mis-logged!");
113642-
113643-	count = 0;
113644-	update_log_var_names("l1.a");
113645-	for (int i = 0; i < 10; i++) {
113646-		log_do_begin(log_l1_a)
113647-			count++;
113648-		log_do_end(log_l1_a)
113649-	}
113650-	expect_d_eq(count, 10, "Mis-logged!");
113651-
113652-	count = 0;
113653-	update_log_var_names("l1.a|abc|l2|def");
113654-	for (int i = 0; i < 10; i++) {
113655-		log_do_begin(log_l1_a)
113656-			count++;
113657-		log_do_end(log_l1_a)
113658-
113659-		log_do_begin(log_l2)
113660-			count++;
113661-		log_do_end(log_l2)
113662-	}
113663-	expect_d_eq(count, 20, "Mis-logged!");
113664-}
113665-TEST_END
113666-
113667-TEST_BEGIN(test_log_enabled_indirect) {
113668-	test_skip_if(!config_log);
113669-	atomic_store_b(&log_init_done, true, ATOMIC_RELAXED);
113670-	update_log_var_names("l0|l1|abc|l2.b|def");
113671-
113672-	/* On. */
113673-	log_var_t log_l1 = LOG_VAR_INIT("l1");
113674-	/* Off. */
113675-	log_var_t log_l1a = LOG_VAR_INIT("l1a");
113676-	/* On. */
113677-	log_var_t log_l1_a = LOG_VAR_INIT("l1.a");
113678-	/* Off. */
113679-	log_var_t log_l2_a = LOG_VAR_INIT("l2.a");
113680-	/* On. */
113681-	log_var_t log_l2_b_a = LOG_VAR_INIT("l2.b.a");
113682-	/* On. */
113683-	log_var_t log_l2_b_b = LOG_VAR_INIT("l2.b.b");
113684-
113685-	/* 4 are on total, so should sum to 40. */
113686-	int count = 0;
113687-	for (int i = 0; i < 10; i++) {
113688-		log_do_begin(log_l1)
113689-			count++;
113690-		log_do_end(log_l1)
113691-
113692-		log_do_begin(log_l1a)
113693-			count++;
113694-		log_do_end(log_l1a)
113695-
113696-		log_do_begin(log_l1_a)
113697-			count++;
113698-		log_do_end(log_l1_a)
113699-
113700-		log_do_begin(log_l2_a)
113701-			count++;
113702-		log_do_end(log_l2_a)
113703-
113704-		log_do_begin(log_l2_b_a)
113705-			count++;
113706-		log_do_end(log_l2_b_a)
113707-
113708-		log_do_begin(log_l2_b_b)
113709-			count++;
113710-		log_do_end(log_l2_b_b)
113711-	}
113712-
113713-	expect_d_eq(count, 40, "Mis-logged!");
113714-}
113715-TEST_END
113716-
113717-TEST_BEGIN(test_log_enabled_global) {
113718-	test_skip_if(!config_log);
113719-	atomic_store_b(&log_init_done, true, ATOMIC_RELAXED);
113720-	update_log_var_names("abc|.|def");
113721-
113722-	log_var_t log_l1 = LOG_VAR_INIT("l1");
113723-	log_var_t log_l2_a_a = LOG_VAR_INIT("l2.a.a");
113724-
113725-	int count = 0;
113726-	for (int i = 0; i < 10; i++) {
113727-		log_do_begin(log_l1)
113728-		    count++;
113729-		log_do_end(log_l1)
113730-
113731-		log_do_begin(log_l2_a_a)
113732-		    count++;
113733-		log_do_end(log_l2_a_a)
113734-	}
113735-	expect_d_eq(count, 20, "Mis-logged!");
113736-}
113737-TEST_END
113738-
113739-TEST_BEGIN(test_logs_if_no_init) {
113740-	test_skip_if(!config_log);
113741-	atomic_store_b(&log_init_done, false, ATOMIC_RELAXED);
113742-
113743-	log_var_t l = LOG_VAR_INIT("definitely.not.enabled");
113744-
113745-	int count = 0;
113746-	for (int i = 0; i < 10; i++) {
113747-		log_do_begin(l)
113748-			count++;
113749-		log_do_end(l)
113750-	}
113751-	expect_d_eq(count, 0, "Logging shouldn't happen if not initialized.");
113752-}
113753-TEST_END
113754-
113755-/*
113756- * This really just checks to make sure that this usage compiles; we don't have
113757- * any test code to run.
113758- */
113759-TEST_BEGIN(test_log_only_format_string) {
113760-	if (false) {
113761-		LOG("log_str", "No arguments follow this format string.");
113762-	}
113763-}
113764-TEST_END
113765-
113766-int
113767-main(void) {
113768-	return test(
113769-	    test_log_disabled,
113770-	    test_log_enabled_direct,
113771-	    test_log_enabled_indirect,
113772-	    test_log_enabled_global,
113773-	    test_logs_if_no_init,
113774-	    test_log_only_format_string);
113775-}
113776diff --git a/jemalloc/test/unit/mallctl.c b/jemalloc/test/unit/mallctl.c
113777deleted file mode 100644
113778index 6efc8f1..0000000
113779--- a/jemalloc/test/unit/mallctl.c
113780+++ /dev/null
113781@@ -1,1274 +0,0 @@
113782-#include "test/jemalloc_test.h"
113783-
113784-#include "jemalloc/internal/ctl.h"
113785-#include "jemalloc/internal/hook.h"
113786-#include "jemalloc/internal/util.h"
113787-
113788-TEST_BEGIN(test_mallctl_errors) {
113789-	uint64_t epoch;
113790-	size_t sz;
113791-
113792-	expect_d_eq(mallctl("no_such_name", NULL, NULL, NULL, 0), ENOENT,
113793-	    "mallctl() should return ENOENT for non-existent names");
113794-
113795-	expect_d_eq(mallctl("version", NULL, NULL, "0.0.0", strlen("0.0.0")),
113796-	    EPERM, "mallctl() should return EPERM on attempt to write "
113797-	    "read-only value");
113798-
113799-	expect_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch,
113800-	    sizeof(epoch)-1), EINVAL,
113801-	    "mallctl() should return EINVAL for input size mismatch");
113802-	expect_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch,
113803-	    sizeof(epoch)+1), EINVAL,
113804-	    "mallctl() should return EINVAL for input size mismatch");
113805-
113806-	sz = sizeof(epoch)-1;
113807-	expect_d_eq(mallctl("epoch", (void *)&epoch, &sz, NULL, 0), EINVAL,
113808-	    "mallctl() should return EINVAL for output size mismatch");
113809-	sz = sizeof(epoch)+1;
113810-	expect_d_eq(mallctl("epoch", (void *)&epoch, &sz, NULL, 0), EINVAL,
113811-	    "mallctl() should return EINVAL for output size mismatch");
113812-}
113813-TEST_END
113814-
113815-TEST_BEGIN(test_mallctlnametomib_errors) {
113816-	size_t mib[1];
113817-	size_t miblen;
113818-
113819-	miblen = sizeof(mib)/sizeof(size_t);
113820-	expect_d_eq(mallctlnametomib("no_such_name", mib, &miblen), ENOENT,
113821-	    "mallctlnametomib() should return ENOENT for non-existent names");
113822-}
113823-TEST_END
113824-
113825-TEST_BEGIN(test_mallctlbymib_errors) {
113826-	uint64_t epoch;
113827-	size_t sz;
113828-	size_t mib[1];
113829-	size_t miblen;
113830-
113831-	miblen = sizeof(mib)/sizeof(size_t);
113832-	expect_d_eq(mallctlnametomib("version", mib, &miblen), 0,
113833-	    "Unexpected mallctlnametomib() failure");
113834-
113835-	expect_d_eq(mallctlbymib(mib, miblen, NULL, NULL, "0.0.0",
113836-	    strlen("0.0.0")), EPERM, "mallctl() should return EPERM on "
113837-	    "attempt to write read-only value");
113838-
113839-	miblen = sizeof(mib)/sizeof(size_t);
113840-	expect_d_eq(mallctlnametomib("epoch", mib, &miblen), 0,
113841-	    "Unexpected mallctlnametomib() failure");
113842-
113843-	expect_d_eq(mallctlbymib(mib, miblen, NULL, NULL, (void *)&epoch,
113844-	    sizeof(epoch)-1), EINVAL,
113845-	    "mallctlbymib() should return EINVAL for input size mismatch");
113846-	expect_d_eq(mallctlbymib(mib, miblen, NULL, NULL, (void *)&epoch,
113847-	    sizeof(epoch)+1), EINVAL,
113848-	    "mallctlbymib() should return EINVAL for input size mismatch");
113849-
113850-	sz = sizeof(epoch)-1;
113851-	expect_d_eq(mallctlbymib(mib, miblen, (void *)&epoch, &sz, NULL, 0),
113852-	    EINVAL,
113853-	    "mallctlbymib() should return EINVAL for output size mismatch");
113854-	sz = sizeof(epoch)+1;
113855-	expect_d_eq(mallctlbymib(mib, miblen, (void *)&epoch, &sz, NULL, 0),
113856-	    EINVAL,
113857-	    "mallctlbymib() should return EINVAL for output size mismatch");
113858-}
113859-TEST_END
113860-
113861-TEST_BEGIN(test_mallctl_read_write) {
113862-	uint64_t old_epoch, new_epoch;
113863-	size_t sz = sizeof(old_epoch);
113864-
113865-	/* Blind. */
113866-	expect_d_eq(mallctl("epoch", NULL, NULL, NULL, 0), 0,
113867-	    "Unexpected mallctl() failure");
113868-	expect_zu_eq(sz, sizeof(old_epoch), "Unexpected output size");
113869-
113870-	/* Read. */
113871-	expect_d_eq(mallctl("epoch", (void *)&old_epoch, &sz, NULL, 0), 0,
113872-	    "Unexpected mallctl() failure");
113873-	expect_zu_eq(sz, sizeof(old_epoch), "Unexpected output size");
113874-
113875-	/* Write. */
113876-	expect_d_eq(mallctl("epoch", NULL, NULL, (void *)&new_epoch,
113877-	    sizeof(new_epoch)), 0, "Unexpected mallctl() failure");
113878-	expect_zu_eq(sz, sizeof(old_epoch), "Unexpected output size");
113879-
113880-	/* Read+write. */
113881-	expect_d_eq(mallctl("epoch", (void *)&old_epoch, &sz,
113882-	    (void *)&new_epoch, sizeof(new_epoch)), 0,
113883-	    "Unexpected mallctl() failure");
113884-	expect_zu_eq(sz, sizeof(old_epoch), "Unexpected output size");
113885-}
113886-TEST_END
113887-
113888-TEST_BEGIN(test_mallctlnametomib_short_mib) {
113889-	size_t mib[4];
113890-	size_t miblen;
113891-
113892-	miblen = 3;
113893-	mib[3] = 42;
113894-	expect_d_eq(mallctlnametomib("arenas.bin.0.nregs", mib, &miblen), 0,
113895-	    "Unexpected mallctlnametomib() failure");
113896-	expect_zu_eq(miblen, 3, "Unexpected mib output length");
113897-	expect_zu_eq(mib[3], 42,
113898-	    "mallctlnametomib() wrote past the end of the input mib");
113899-}
113900-TEST_END
113901-
113902-TEST_BEGIN(test_mallctlnametomib_short_name) {
113903-	size_t mib[4];
113904-	size_t miblen;
113905-
113906-	miblen = 4;
113907-	mib[3] = 42;
113908-	expect_d_eq(mallctlnametomib("arenas.bin.0", mib, &miblen), 0,
113909-	    "Unexpected mallctlnametomib() failure");
113910-	expect_zu_eq(miblen, 3, "Unexpected mib output length");
113911-	expect_zu_eq(mib[3], 42,
113912-	    "mallctlnametomib() wrote past the end of the input mib");
113913-}
113914-TEST_END
113915-
113916-TEST_BEGIN(test_mallctlmibnametomib) {
113917-	size_t mib[4];
113918-	size_t miblen = 4;
113919-	uint32_t result, result_ref;
113920-	size_t len_result = sizeof(uint32_t);
113921-
113922-	tsd_t *tsd = tsd_fetch();
113923-
113924-	/* Error cases */
113925-	assert_d_eq(ctl_mibnametomib(tsd, mib, 0, "bob", &miblen), ENOENT, "");
113926-	assert_zu_eq(miblen, 4, "");
113927-	assert_d_eq(ctl_mibnametomib(tsd, mib, 0, "9999", &miblen), ENOENT, "");
113928-	assert_zu_eq(miblen, 4, "");
113929-
113930-	/* Valid case. */
113931-	assert_d_eq(ctl_mibnametomib(tsd, mib, 0, "arenas", &miblen), 0, "");
113932-	assert_zu_eq(miblen, 1, "");
113933-	miblen = 4;
113934-	assert_d_eq(ctl_mibnametomib(tsd, mib, 1, "bin", &miblen), 0, "");
113935-	assert_zu_eq(miblen, 2, "");
113936-	expect_d_eq(mallctlbymib(mib, miblen, &result, &len_result, NULL, 0),
113937-	    ENOENT, "mallctlbymib() should fail on partial path");
113938-
113939-	/* Error cases. */
113940-	miblen = 4;
113941-	assert_d_eq(ctl_mibnametomib(tsd, mib, 2, "bob", &miblen), ENOENT, "");
113942-	assert_zu_eq(miblen, 4, "");
113943-	assert_d_eq(ctl_mibnametomib(tsd, mib, 2, "9999", &miblen), ENOENT, "");
113944-	assert_zu_eq(miblen, 4, "");
113945-
113946-	/* Valid case. */
113947-	assert_d_eq(ctl_mibnametomib(tsd, mib, 2, "0", &miblen), 0, "");
113948-	assert_zu_eq(miblen, 3, "");
113949-	expect_d_eq(mallctlbymib(mib, miblen, &result, &len_result, NULL, 0),
113950-	    ENOENT, "mallctlbymib() should fail on partial path");
113951-
113952-	/* Error cases. */
113953-	miblen = 4;
113954-	assert_d_eq(ctl_mibnametomib(tsd, mib, 3, "bob", &miblen), ENOENT, "");
113955-	assert_zu_eq(miblen, 4, "");
113956-	assert_d_eq(ctl_mibnametomib(tsd, mib, 3, "9999", &miblen), ENOENT, "");
113957-	assert_zu_eq(miblen, 4, "");
113958-
113959-	/* Valid case. */
113960-	assert_d_eq(ctl_mibnametomib(tsd, mib, 3, "nregs", &miblen), 0, "");
113961-	assert_zu_eq(miblen, 4, "");
113962-	assert_d_eq(mallctlbymib(mib, miblen, &result, &len_result, NULL, 0),
113963-	    0, "Unexpected mallctlbymib() failure");
113964-	assert_d_eq(mallctl("arenas.bin.0.nregs", &result_ref, &len_result,
113965-	    NULL, 0), 0, "Unexpected mallctl() failure");
113966-	expect_zu_eq(result, result_ref,
113967-	    "mallctlbymib() and mallctl() returned different result");
113968-}
113969-TEST_END
113970-
113971-TEST_BEGIN(test_mallctlbymibname) {
113972-	size_t mib[4];
113973-	size_t miblen = 4;
113974-	uint32_t result, result_ref;
113975-	size_t len_result = sizeof(uint32_t);
113976-
113977-	tsd_t *tsd = tsd_fetch();
113978-
113979-	/* Error cases. */
113980-
113981-	assert_d_eq(mallctlnametomib("arenas", mib, &miblen), 0,
113982-	    "Unexpected mallctlnametomib() failure");
113983-	assert_zu_eq(miblen, 1, "");
113984-
113985-	miblen = 4;
113986-	assert_d_eq(ctl_bymibname(tsd, mib, 1, "bin.0", &miblen,
113987-	    &result, &len_result, NULL, 0), ENOENT, "");
113988-	miblen = 4;
113989-	assert_d_eq(ctl_bymibname(tsd, mib, 1, "bin.0.bob", &miblen,
113990-	    &result, &len_result, NULL, 0), ENOENT, "");
113991-	assert_zu_eq(miblen, 4, "");
113992-
113993-	/* Valid cases. */
113994-
113995-	assert_d_eq(mallctl("arenas.bin.0.nregs", &result_ref, &len_result,
113996-	    NULL, 0), 0, "Unexpected mallctl() failure");
113997-	miblen = 4;
113998-
113999-	assert_d_eq(ctl_bymibname(tsd, mib, 0, "arenas.bin.0.nregs", &miblen,
114000-	    &result, &len_result, NULL, 0), 0, "");
114001-	assert_zu_eq(miblen, 4, "");
114002-	expect_zu_eq(result, result_ref, "Unexpected result");
114003-
114004-	assert_d_eq(ctl_bymibname(tsd, mib, 1, "bin.0.nregs", &miblen, &result,
114005-	    &len_result, NULL, 0), 0, "");
114006-	assert_zu_eq(miblen, 4, "");
114007-	expect_zu_eq(result, result_ref, "Unexpected result");
114008-
114009-	assert_d_eq(ctl_bymibname(tsd, mib, 2, "0.nregs", &miblen, &result,
114010-	    &len_result, NULL, 0), 0, "");
114011-	assert_zu_eq(miblen, 4, "");
114012-	expect_zu_eq(result, result_ref, "Unexpected result");
114013-
114014-	assert_d_eq(ctl_bymibname(tsd, mib, 3, "nregs", &miblen, &result,
114015-	    &len_result, NULL, 0), 0, "");
114016-	assert_zu_eq(miblen, 4, "");
114017-	expect_zu_eq(result, result_ref, "Unexpected result");
114018-}
114019-TEST_END
114020-
114021-TEST_BEGIN(test_mallctl_config) {
114022-#define TEST_MALLCTL_CONFIG(config, t) do {				\
114023-	t oldval;							\
114024-	size_t sz = sizeof(oldval);					\
114025-	expect_d_eq(mallctl("config."#config, (void *)&oldval, &sz,	\
114026-	    NULL, 0), 0, "Unexpected mallctl() failure");		\
114027-	expect_b_eq(oldval, config_##config, "Incorrect config value");	\
114028-	expect_zu_eq(sz, sizeof(oldval), "Unexpected output size");	\
114029-} while (0)
114030-
114031-	TEST_MALLCTL_CONFIG(cache_oblivious, bool);
114032-	TEST_MALLCTL_CONFIG(debug, bool);
114033-	TEST_MALLCTL_CONFIG(fill, bool);
114034-	TEST_MALLCTL_CONFIG(lazy_lock, bool);
114035-	TEST_MALLCTL_CONFIG(malloc_conf, const char *);
114036-	TEST_MALLCTL_CONFIG(prof, bool);
114037-	TEST_MALLCTL_CONFIG(prof_libgcc, bool);
114038-	TEST_MALLCTL_CONFIG(prof_libunwind, bool);
114039-	TEST_MALLCTL_CONFIG(stats, bool);
114040-	TEST_MALLCTL_CONFIG(utrace, bool);
114041-	TEST_MALLCTL_CONFIG(xmalloc, bool);
114042-
114043-#undef TEST_MALLCTL_CONFIG
114044-}
114045-TEST_END
114046-
114047-TEST_BEGIN(test_mallctl_opt) {
114048-	bool config_always = true;
114049-
114050-#define TEST_MALLCTL_OPT(t, opt, config) do {				\
114051-	t oldval;							\
114052-	size_t sz = sizeof(oldval);					\
114053-	int expected = config_##config ? 0 : ENOENT;			\
114054-	int result = mallctl("opt."#opt, (void *)&oldval, &sz, NULL,	\
114055-	    0);								\
114056-	expect_d_eq(result, expected,					\
114057-	    "Unexpected mallctl() result for opt."#opt);		\
114058-	expect_zu_eq(sz, sizeof(oldval), "Unexpected output size");	\
114059-} while (0)
114060-
114061-	TEST_MALLCTL_OPT(bool, abort, always);
114062-	TEST_MALLCTL_OPT(bool, abort_conf, always);
114063-	TEST_MALLCTL_OPT(bool, cache_oblivious, always);
114064-	TEST_MALLCTL_OPT(bool, trust_madvise, always);
114065-	TEST_MALLCTL_OPT(bool, confirm_conf, always);
114066-	TEST_MALLCTL_OPT(const char *, metadata_thp, always);
114067-	TEST_MALLCTL_OPT(bool, retain, always);
114068-	TEST_MALLCTL_OPT(const char *, dss, always);
114069-	TEST_MALLCTL_OPT(bool, hpa, always);
114070-	TEST_MALLCTL_OPT(size_t, hpa_slab_max_alloc, always);
114071-	TEST_MALLCTL_OPT(size_t, hpa_sec_nshards, always);
114072-	TEST_MALLCTL_OPT(size_t, hpa_sec_max_alloc, always);
114073-	TEST_MALLCTL_OPT(size_t, hpa_sec_max_bytes, always);
114074-	TEST_MALLCTL_OPT(size_t, hpa_sec_bytes_after_flush, always);
114075-	TEST_MALLCTL_OPT(size_t, hpa_sec_batch_fill_extra, always);
114076-	TEST_MALLCTL_OPT(unsigned, narenas, always);
114077-	TEST_MALLCTL_OPT(const char *, percpu_arena, always);
114078-	TEST_MALLCTL_OPT(size_t, oversize_threshold, always);
114079-	TEST_MALLCTL_OPT(bool, background_thread, always);
114080-	TEST_MALLCTL_OPT(ssize_t, dirty_decay_ms, always);
114081-	TEST_MALLCTL_OPT(ssize_t, muzzy_decay_ms, always);
114082-	TEST_MALLCTL_OPT(bool, stats_print, always);
114083-	TEST_MALLCTL_OPT(const char *, stats_print_opts, always);
114084-	TEST_MALLCTL_OPT(int64_t, stats_interval, always);
114085-	TEST_MALLCTL_OPT(const char *, stats_interval_opts, always);
114086-	TEST_MALLCTL_OPT(const char *, junk, fill);
114087-	TEST_MALLCTL_OPT(bool, zero, fill);
114088-	TEST_MALLCTL_OPT(bool, utrace, utrace);
114089-	TEST_MALLCTL_OPT(bool, xmalloc, xmalloc);
114090-	TEST_MALLCTL_OPT(bool, tcache, always);
114091-	TEST_MALLCTL_OPT(size_t, lg_extent_max_active_fit, always);
114092-	TEST_MALLCTL_OPT(size_t, tcache_max, always);
114093-	TEST_MALLCTL_OPT(const char *, thp, always);
114094-	TEST_MALLCTL_OPT(const char *, zero_realloc, always);
114095-	TEST_MALLCTL_OPT(bool, prof, prof);
114096-	TEST_MALLCTL_OPT(const char *, prof_prefix, prof);
114097-	TEST_MALLCTL_OPT(bool, prof_active, prof);
114098-	TEST_MALLCTL_OPT(ssize_t, lg_prof_sample, prof);
114099-	TEST_MALLCTL_OPT(bool, prof_accum, prof);
114100-	TEST_MALLCTL_OPT(ssize_t, lg_prof_interval, prof);
114101-	TEST_MALLCTL_OPT(bool, prof_gdump, prof);
114102-	TEST_MALLCTL_OPT(bool, prof_final, prof);
114103-	TEST_MALLCTL_OPT(bool, prof_leak, prof);
114104-	TEST_MALLCTL_OPT(bool, prof_leak_error, prof);
114105-	TEST_MALLCTL_OPT(ssize_t, prof_recent_alloc_max, prof);
114106-	TEST_MALLCTL_OPT(bool, prof_stats, prof);
114107-	TEST_MALLCTL_OPT(bool, prof_sys_thread_name, prof);
114108-	TEST_MALLCTL_OPT(ssize_t, lg_san_uaf_align, uaf_detection);
114109-
114110-#undef TEST_MALLCTL_OPT
114111-}
114112-TEST_END
114113-
114114-TEST_BEGIN(test_manpage_example) {
114115-	unsigned nbins, i;
114116-	size_t mib[4];
114117-	size_t len, miblen;
114118-
114119-	len = sizeof(nbins);
114120-	expect_d_eq(mallctl("arenas.nbins", (void *)&nbins, &len, NULL, 0), 0,
114121-	    "Unexpected mallctl() failure");
114122-
114123-	miblen = 4;
114124-	expect_d_eq(mallctlnametomib("arenas.bin.0.size", mib, &miblen), 0,
114125-	    "Unexpected mallctlnametomib() failure");
114126-	for (i = 0; i < nbins; i++) {
114127-		size_t bin_size;
114128-
114129-		mib[2] = i;
114130-		len = sizeof(bin_size);
114131-		expect_d_eq(mallctlbymib(mib, miblen, (void *)&bin_size, &len,
114132-		    NULL, 0), 0, "Unexpected mallctlbymib() failure");
114133-		/* Do something with bin_size... */
114134-	}
114135-}
114136-TEST_END
114137-
114138-TEST_BEGIN(test_tcache_none) {
114139-	test_skip_if(!opt_tcache);
114140-
114141-	/* Allocate p and q. */
114142-	void *p0 = mallocx(42, 0);
114143-	expect_ptr_not_null(p0, "Unexpected mallocx() failure");
114144-	void *q = mallocx(42, 0);
114145-	expect_ptr_not_null(q, "Unexpected mallocx() failure");
114146-
114147-	/* Deallocate p and q, but bypass the tcache for q. */
114148-	dallocx(p0, 0);
114149-	dallocx(q, MALLOCX_TCACHE_NONE);
114150-
114151-	/* Make sure that tcache-based allocation returns p, not q. */
114152-	void *p1 = mallocx(42, 0);
114153-	expect_ptr_not_null(p1, "Unexpected mallocx() failure");
114154-	if (!opt_prof && !san_uaf_detection_enabled()) {
114155-		expect_ptr_eq(p0, p1,
114156-		    "Expected tcache to allocate cached region");
114157-	}
114158-
114159-	/* Clean up. */
114160-	dallocx(p1, MALLOCX_TCACHE_NONE);
114161-}
114162-TEST_END
114163-
114164-TEST_BEGIN(test_tcache) {
114165-#define NTCACHES	10
114166-	unsigned tis[NTCACHES];
114167-	void *ps[NTCACHES];
114168-	void *qs[NTCACHES];
114169-	unsigned i;
114170-	size_t sz, psz, qsz;
114171-
114172-	psz = 42;
114173-	qsz = nallocx(psz, 0) + 1;
114174-
114175-	/* Create tcaches. */
114176-	for (i = 0; i < NTCACHES; i++) {
114177-		sz = sizeof(unsigned);
114178-		expect_d_eq(mallctl("tcache.create", (void *)&tis[i], &sz, NULL,
114179-		    0), 0, "Unexpected mallctl() failure, i=%u", i);
114180-	}
114181-
114182-	/* Exercise tcache ID recycling. */
114183-	for (i = 0; i < NTCACHES; i++) {
114184-		expect_d_eq(mallctl("tcache.destroy", NULL, NULL,
114185-		    (void *)&tis[i], sizeof(unsigned)), 0,
114186-		    "Unexpected mallctl() failure, i=%u", i);
114187-	}
114188-	for (i = 0; i < NTCACHES; i++) {
114189-		sz = sizeof(unsigned);
114190-		expect_d_eq(mallctl("tcache.create", (void *)&tis[i], &sz, NULL,
114191-		    0), 0, "Unexpected mallctl() failure, i=%u", i);
114192-	}
114193-
114194-	/* Flush empty tcaches. */
114195-	for (i = 0; i < NTCACHES; i++) {
114196-		expect_d_eq(mallctl("tcache.flush", NULL, NULL, (void *)&tis[i],
114197-		    sizeof(unsigned)), 0, "Unexpected mallctl() failure, i=%u",
114198-		    i);
114199-	}
114200-
114201-	/* Cache some allocations. */
114202-	for (i = 0; i < NTCACHES; i++) {
114203-		ps[i] = mallocx(psz, MALLOCX_TCACHE(tis[i]));
114204-		expect_ptr_not_null(ps[i], "Unexpected mallocx() failure, i=%u",
114205-		    i);
114206-		dallocx(ps[i], MALLOCX_TCACHE(tis[i]));
114207-
114208-		qs[i] = mallocx(qsz, MALLOCX_TCACHE(tis[i]));
114209-		expect_ptr_not_null(qs[i], "Unexpected mallocx() failure, i=%u",
114210-		    i);
114211-		dallocx(qs[i], MALLOCX_TCACHE(tis[i]));
114212-	}
114213-
114214-	/* Verify that tcaches allocate cached regions. */
114215-	for (i = 0; i < NTCACHES; i++) {
114216-		void *p0 = ps[i];
114217-		ps[i] = mallocx(psz, MALLOCX_TCACHE(tis[i]));
114218-		expect_ptr_not_null(ps[i], "Unexpected mallocx() failure, i=%u",
114219-		    i);
114220-		if (!san_uaf_detection_enabled()) {
114221-			expect_ptr_eq(ps[i], p0, "Expected mallocx() to "
114222-			    "allocate cached region, i=%u", i);
114223-		}
114224-	}
114225-
114226-	/* Verify that reallocation uses cached regions. */
114227-	for (i = 0; i < NTCACHES; i++) {
114228-		void *q0 = qs[i];
114229-		qs[i] = rallocx(ps[i], qsz, MALLOCX_TCACHE(tis[i]));
114230-		expect_ptr_not_null(qs[i], "Unexpected rallocx() failure, i=%u",
114231-		    i);
114232-		if (!san_uaf_detection_enabled()) {
114233-			expect_ptr_eq(qs[i], q0, "Expected rallocx() to "
114234-			    "allocate cached region, i=%u", i);
114235-		}
114236-		/* Avoid undefined behavior in case of test failure. */
114237-		if (qs[i] == NULL) {
114238-			qs[i] = ps[i];
114239-		}
114240-	}
114241-	for (i = 0; i < NTCACHES; i++) {
114242-		dallocx(qs[i], MALLOCX_TCACHE(tis[i]));
114243-	}
114244-
114245-	/* Flush some non-empty tcaches. */
114246-	for (i = 0; i < NTCACHES/2; i++) {
114247-		expect_d_eq(mallctl("tcache.flush", NULL, NULL, (void *)&tis[i],
114248-		    sizeof(unsigned)), 0, "Unexpected mallctl() failure, i=%u",
114249-		    i);
114250-	}
114251-
114252-	/* Destroy tcaches. */
114253-	for (i = 0; i < NTCACHES; i++) {
114254-		expect_d_eq(mallctl("tcache.destroy", NULL, NULL,
114255-		    (void *)&tis[i], sizeof(unsigned)), 0,
114256-		    "Unexpected mallctl() failure, i=%u", i);
114257-	}
114258-}
114259-TEST_END
114260-
114261-TEST_BEGIN(test_thread_arena) {
114262-	unsigned old_arena_ind, new_arena_ind, narenas;
114263-
114264-	const char *opa;
114265-	size_t sz = sizeof(opa);
114266-	expect_d_eq(mallctl("opt.percpu_arena", (void *)&opa, &sz, NULL, 0), 0,
114267-	    "Unexpected mallctl() failure");
114268-
114269-	sz = sizeof(unsigned);
114270-	expect_d_eq(mallctl("arenas.narenas", (void *)&narenas, &sz, NULL, 0),
114271-	    0, "Unexpected mallctl() failure");
114272-	if (opt_oversize_threshold != 0) {
114273-		narenas--;
114274-	}
114275-	expect_u_eq(narenas, opt_narenas, "Number of arenas incorrect");
114276-
114277-	if (strcmp(opa, "disabled") == 0) {
114278-		new_arena_ind = narenas - 1;
114279-		expect_d_eq(mallctl("thread.arena", (void *)&old_arena_ind, &sz,
114280-		    (void *)&new_arena_ind, sizeof(unsigned)), 0,
114281-		    "Unexpected mallctl() failure");
114282-		new_arena_ind = 0;
114283-		expect_d_eq(mallctl("thread.arena", (void *)&old_arena_ind, &sz,
114284-		    (void *)&new_arena_ind, sizeof(unsigned)), 0,
114285-		    "Unexpected mallctl() failure");
114286-	} else {
114287-		expect_d_eq(mallctl("thread.arena", (void *)&old_arena_ind, &sz,
114288-		    NULL, 0), 0, "Unexpected mallctl() failure");
114289-		new_arena_ind = percpu_arena_ind_limit(opt_percpu_arena) - 1;
114290-		if (old_arena_ind != new_arena_ind) {
114291-			expect_d_eq(mallctl("thread.arena",
114292-			    (void *)&old_arena_ind, &sz, (void *)&new_arena_ind,
114293-			    sizeof(unsigned)), EPERM, "thread.arena ctl "
114294-			    "should not be allowed with percpu arena");
114295-		}
114296-	}
114297-}
114298-TEST_END
114299-
114300-TEST_BEGIN(test_arena_i_initialized) {
114301-	unsigned narenas, i;
114302-	size_t sz;
114303-	size_t mib[3];
114304-	size_t miblen = sizeof(mib) / sizeof(size_t);
114305-	bool initialized;
114306-
114307-	sz = sizeof(narenas);
114308-	expect_d_eq(mallctl("arenas.narenas", (void *)&narenas, &sz, NULL, 0),
114309-	    0, "Unexpected mallctl() failure");
114310-
114311-	expect_d_eq(mallctlnametomib("arena.0.initialized", mib, &miblen), 0,
114312-	    "Unexpected mallctlnametomib() failure");
114313-	for (i = 0; i < narenas; i++) {
114314-		mib[1] = i;
114315-		sz = sizeof(initialized);
114316-		expect_d_eq(mallctlbymib(mib, miblen, &initialized, &sz, NULL,
114317-		    0), 0, "Unexpected mallctl() failure");
114318-	}
114319-
114320-	mib[1] = MALLCTL_ARENAS_ALL;
114321-	sz = sizeof(initialized);
114322-	expect_d_eq(mallctlbymib(mib, miblen, &initialized, &sz, NULL, 0), 0,
114323-	    "Unexpected mallctl() failure");
114324-	expect_true(initialized,
114325-	    "Merged arena statistics should always be initialized");
114326-
114327-	/* Equivalent to the above but using mallctl() directly. */
114328-	sz = sizeof(initialized);
114329-	expect_d_eq(mallctl(
114330-	    "arena." STRINGIFY(MALLCTL_ARENAS_ALL) ".initialized",
114331-	    (void *)&initialized, &sz, NULL, 0), 0,
114332-	    "Unexpected mallctl() failure");
114333-	expect_true(initialized,
114334-	    "Merged arena statistics should always be initialized");
114335-}
114336-TEST_END
114337-
114338-TEST_BEGIN(test_arena_i_dirty_decay_ms) {
114339-	ssize_t dirty_decay_ms, orig_dirty_decay_ms, prev_dirty_decay_ms;
114340-	size_t sz = sizeof(ssize_t);
114341-
114342-	expect_d_eq(mallctl("arena.0.dirty_decay_ms",
114343-	    (void *)&orig_dirty_decay_ms, &sz, NULL, 0), 0,
114344-	    "Unexpected mallctl() failure");
114345-
114346-	dirty_decay_ms = -2;
114347-	expect_d_eq(mallctl("arena.0.dirty_decay_ms", NULL, NULL,
114348-	    (void *)&dirty_decay_ms, sizeof(ssize_t)), EFAULT,
114349-	    "Unexpected mallctl() success");
114350-
114351-	dirty_decay_ms = 0x7fffffff;
114352-	expect_d_eq(mallctl("arena.0.dirty_decay_ms", NULL, NULL,
114353-	    (void *)&dirty_decay_ms, sizeof(ssize_t)), 0,
114354-	    "Unexpected mallctl() failure");
114355-
114356-	for (prev_dirty_decay_ms = dirty_decay_ms, dirty_decay_ms = -1;
114357-	    dirty_decay_ms < 20; prev_dirty_decay_ms = dirty_decay_ms,
114358-	    dirty_decay_ms++) {
114359-		ssize_t old_dirty_decay_ms;
114360-
114361-		expect_d_eq(mallctl("arena.0.dirty_decay_ms",
114362-		    (void *)&old_dirty_decay_ms, &sz, (void *)&dirty_decay_ms,
114363-		    sizeof(ssize_t)), 0, "Unexpected mallctl() failure");
114364-		expect_zd_eq(old_dirty_decay_ms, prev_dirty_decay_ms,
114365-		    "Unexpected old arena.0.dirty_decay_ms");
114366-	}
114367-}
114368-TEST_END
114369-
114370-TEST_BEGIN(test_arena_i_muzzy_decay_ms) {
114371-	ssize_t muzzy_decay_ms, orig_muzzy_decay_ms, prev_muzzy_decay_ms;
114372-	size_t sz = sizeof(ssize_t);
114373-
114374-	expect_d_eq(mallctl("arena.0.muzzy_decay_ms",
114375-	    (void *)&orig_muzzy_decay_ms, &sz, NULL, 0), 0,
114376-	    "Unexpected mallctl() failure");
114377-
114378-	muzzy_decay_ms = -2;
114379-	expect_d_eq(mallctl("arena.0.muzzy_decay_ms", NULL, NULL,
114380-	    (void *)&muzzy_decay_ms, sizeof(ssize_t)), EFAULT,
114381-	    "Unexpected mallctl() success");
114382-
114383-	muzzy_decay_ms = 0x7fffffff;
114384-	expect_d_eq(mallctl("arena.0.muzzy_decay_ms", NULL, NULL,
114385-	    (void *)&muzzy_decay_ms, sizeof(ssize_t)), 0,
114386-	    "Unexpected mallctl() failure");
114387-
114388-	for (prev_muzzy_decay_ms = muzzy_decay_ms, muzzy_decay_ms = -1;
114389-	    muzzy_decay_ms < 20; prev_muzzy_decay_ms = muzzy_decay_ms,
114390-	    muzzy_decay_ms++) {
114391-		ssize_t old_muzzy_decay_ms;
114392-
114393-		expect_d_eq(mallctl("arena.0.muzzy_decay_ms",
114394-		    (void *)&old_muzzy_decay_ms, &sz, (void *)&muzzy_decay_ms,
114395-		    sizeof(ssize_t)), 0, "Unexpected mallctl() failure");
114396-		expect_zd_eq(old_muzzy_decay_ms, prev_muzzy_decay_ms,
114397-		    "Unexpected old arena.0.muzzy_decay_ms");
114398-	}
114399-}
114400-TEST_END
114401-
114402-TEST_BEGIN(test_arena_i_purge) {
114403-	unsigned narenas;
114404-	size_t sz = sizeof(unsigned);
114405-	size_t mib[3];
114406-	size_t miblen = 3;
114407-
114408-	expect_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0,
114409-	    "Unexpected mallctl() failure");
114410-
114411-	expect_d_eq(mallctl("arenas.narenas", (void *)&narenas, &sz, NULL, 0),
114412-	    0, "Unexpected mallctl() failure");
114413-	expect_d_eq(mallctlnametomib("arena.0.purge", mib, &miblen), 0,
114414-	    "Unexpected mallctlnametomib() failure");
114415-	mib[1] = narenas;
114416-	expect_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0,
114417-	    "Unexpected mallctlbymib() failure");
114418-
114419-	mib[1] = MALLCTL_ARENAS_ALL;
114420-	expect_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0,
114421-	    "Unexpected mallctlbymib() failure");
114422-}
114423-TEST_END
114424-
114425-TEST_BEGIN(test_arena_i_decay) {
114426-	unsigned narenas;
114427-	size_t sz = sizeof(unsigned);
114428-	size_t mib[3];
114429-	size_t miblen = 3;
114430-
114431-	expect_d_eq(mallctl("arena.0.decay", NULL, NULL, NULL, 0), 0,
114432-	    "Unexpected mallctl() failure");
114433-
114434-	expect_d_eq(mallctl("arenas.narenas", (void *)&narenas, &sz, NULL, 0),
114435-	    0, "Unexpected mallctl() failure");
114436-	expect_d_eq(mallctlnametomib("arena.0.decay", mib, &miblen), 0,
114437-	    "Unexpected mallctlnametomib() failure");
114438-	mib[1] = narenas;
114439-	expect_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0,
114440-	    "Unexpected mallctlbymib() failure");
114441-
114442-	mib[1] = MALLCTL_ARENAS_ALL;
114443-	expect_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0,
114444-	    "Unexpected mallctlbymib() failure");
114445-}
114446-TEST_END
114447-
114448-TEST_BEGIN(test_arena_i_dss) {
114449-	const char *dss_prec_old, *dss_prec_new;
114450-	size_t sz = sizeof(dss_prec_old);
114451-	size_t mib[3];
114452-	size_t miblen;
114453-
114454-	miblen = sizeof(mib)/sizeof(size_t);
114455-	expect_d_eq(mallctlnametomib("arena.0.dss", mib, &miblen), 0,
114456-	    "Unexpected mallctlnametomib() error");
114457-
114458-	dss_prec_new = "disabled";
114459-	expect_d_eq(mallctlbymib(mib, miblen, (void *)&dss_prec_old, &sz,
114460-	    (void *)&dss_prec_new, sizeof(dss_prec_new)), 0,
114461-	    "Unexpected mallctl() failure");
114462-	expect_str_ne(dss_prec_old, "primary",
114463-	    "Unexpected default for dss precedence");
114464-
114465-	expect_d_eq(mallctlbymib(mib, miblen, (void *)&dss_prec_new, &sz,
114466-	    (void *)&dss_prec_old, sizeof(dss_prec_old)), 0,
114467-	    "Unexpected mallctl() failure");
114468-
114469-	expect_d_eq(mallctlbymib(mib, miblen, (void *)&dss_prec_old, &sz, NULL,
114470-	    0), 0, "Unexpected mallctl() failure");
114471-	expect_str_ne(dss_prec_old, "primary",
114472-	    "Unexpected value for dss precedence");
114473-
114474-	mib[1] = narenas_total_get();
114475-	dss_prec_new = "disabled";
114476-	expect_d_eq(mallctlbymib(mib, miblen, (void *)&dss_prec_old, &sz,
114477-	    (void *)&dss_prec_new, sizeof(dss_prec_new)), 0,
114478-	    "Unexpected mallctl() failure");
114479-	expect_str_ne(dss_prec_old, "primary",
114480-	    "Unexpected default for dss precedence");
114481-
114482-	expect_d_eq(mallctlbymib(mib, miblen, (void *)&dss_prec_new, &sz,
114483-	    (void *)&dss_prec_old, sizeof(dss_prec_new)), 0,
114484-	    "Unexpected mallctl() failure");
114485-
114486-	expect_d_eq(mallctlbymib(mib, miblen, (void *)&dss_prec_old, &sz, NULL,
114487-	    0), 0, "Unexpected mallctl() failure");
114488-	expect_str_ne(dss_prec_old, "primary",
114489-	    "Unexpected value for dss precedence");
114490-}
114491-TEST_END
114492-
114493-TEST_BEGIN(test_arena_i_retain_grow_limit) {
114494-	size_t old_limit, new_limit, default_limit;
114495-	size_t mib[3];
114496-	size_t miblen;
114497-
114498-	bool retain_enabled;
114499-	size_t sz = sizeof(retain_enabled);
114500-	expect_d_eq(mallctl("opt.retain", &retain_enabled, &sz, NULL, 0),
114501-	    0, "Unexpected mallctl() failure");
114502-	test_skip_if(!retain_enabled);
114503-
114504-	sz = sizeof(default_limit);
114505-	miblen = sizeof(mib)/sizeof(size_t);
114506-	expect_d_eq(mallctlnametomib("arena.0.retain_grow_limit", mib, &miblen),
114507-	    0, "Unexpected mallctlnametomib() error");
114508-
114509-	expect_d_eq(mallctlbymib(mib, miblen, &default_limit, &sz, NULL, 0), 0,
114510-	    "Unexpected mallctl() failure");
114511-	expect_zu_eq(default_limit, SC_LARGE_MAXCLASS,
114512-	    "Unexpected default for retain_grow_limit");
114513-
114514-	new_limit = PAGE - 1;
114515-	expect_d_eq(mallctlbymib(mib, miblen, NULL, NULL, &new_limit,
114516-	    sizeof(new_limit)), EFAULT, "Unexpected mallctl() success");
114517-
114518-	new_limit = PAGE + 1;
114519-	expect_d_eq(mallctlbymib(mib, miblen, NULL, NULL, &new_limit,
114520-	    sizeof(new_limit)), 0, "Unexpected mallctl() failure");
114521-	expect_d_eq(mallctlbymib(mib, miblen, &old_limit, &sz, NULL, 0), 0,
114522-	    "Unexpected mallctl() failure");
114523-	expect_zu_eq(old_limit, PAGE,
114524-	    "Unexpected value for retain_grow_limit");
114525-
114526-	/* Expect grow less than psize class 10. */
114527-	new_limit = sz_pind2sz(10) - 1;
114528-	expect_d_eq(mallctlbymib(mib, miblen, NULL, NULL, &new_limit,
114529-	    sizeof(new_limit)), 0, "Unexpected mallctl() failure");
114530-	expect_d_eq(mallctlbymib(mib, miblen, &old_limit, &sz, NULL, 0), 0,
114531-	    "Unexpected mallctl() failure");
114532-	expect_zu_eq(old_limit, sz_pind2sz(9),
114533-	    "Unexpected value for retain_grow_limit");
114534-
114535-	/* Restore to default. */
114536-	expect_d_eq(mallctlbymib(mib, miblen, NULL, NULL, &default_limit,
114537-	    sizeof(default_limit)), 0, "Unexpected mallctl() failure");
114538-}
114539-TEST_END
114540-
114541-TEST_BEGIN(test_arenas_dirty_decay_ms) {
114542-	ssize_t dirty_decay_ms, orig_dirty_decay_ms, prev_dirty_decay_ms;
114543-	size_t sz = sizeof(ssize_t);
114544-
114545-	expect_d_eq(mallctl("arenas.dirty_decay_ms",
114546-	    (void *)&orig_dirty_decay_ms, &sz, NULL, 0), 0,
114547-	    "Unexpected mallctl() failure");
114548-
114549-	dirty_decay_ms = -2;
114550-	expect_d_eq(mallctl("arenas.dirty_decay_ms", NULL, NULL,
114551-	    (void *)&dirty_decay_ms, sizeof(ssize_t)), EFAULT,
114552-	    "Unexpected mallctl() success");
114553-
114554-	dirty_decay_ms = 0x7fffffff;
114555-	expect_d_eq(mallctl("arenas.dirty_decay_ms", NULL, NULL,
114556-	    (void *)&dirty_decay_ms, sizeof(ssize_t)), 0,
114557-	    "Expected mallctl() failure");
114558-
114559-	for (prev_dirty_decay_ms = dirty_decay_ms, dirty_decay_ms = -1;
114560-	    dirty_decay_ms < 20; prev_dirty_decay_ms = dirty_decay_ms,
114561-	    dirty_decay_ms++) {
114562-		ssize_t old_dirty_decay_ms;
114563-
114564-		expect_d_eq(mallctl("arenas.dirty_decay_ms",
114565-		    (void *)&old_dirty_decay_ms, &sz, (void *)&dirty_decay_ms,
114566-		    sizeof(ssize_t)), 0, "Unexpected mallctl() failure");
114567-		expect_zd_eq(old_dirty_decay_ms, prev_dirty_decay_ms,
114568-		    "Unexpected old arenas.dirty_decay_ms");
114569-	}
114570-}
114571-TEST_END
114572-
114573-TEST_BEGIN(test_arenas_muzzy_decay_ms) {
114574-	ssize_t muzzy_decay_ms, orig_muzzy_decay_ms, prev_muzzy_decay_ms;
114575-	size_t sz = sizeof(ssize_t);
114576-
114577-	expect_d_eq(mallctl("arenas.muzzy_decay_ms",
114578-	    (void *)&orig_muzzy_decay_ms, &sz, NULL, 0), 0,
114579-	    "Unexpected mallctl() failure");
114580-
114581-	muzzy_decay_ms = -2;
114582-	expect_d_eq(mallctl("arenas.muzzy_decay_ms", NULL, NULL,
114583-	    (void *)&muzzy_decay_ms, sizeof(ssize_t)), EFAULT,
114584-	    "Unexpected mallctl() success");
114585-
114586-	muzzy_decay_ms = 0x7fffffff;
114587-	expect_d_eq(mallctl("arenas.muzzy_decay_ms", NULL, NULL,
114588-	    (void *)&muzzy_decay_ms, sizeof(ssize_t)), 0,
114589-	    "Expected mallctl() failure");
114590-
114591-	for (prev_muzzy_decay_ms = muzzy_decay_ms, muzzy_decay_ms = -1;
114592-	    muzzy_decay_ms < 20; prev_muzzy_decay_ms = muzzy_decay_ms,
114593-	    muzzy_decay_ms++) {
114594-		ssize_t old_muzzy_decay_ms;
114595-
114596-		expect_d_eq(mallctl("arenas.muzzy_decay_ms",
114597-		    (void *)&old_muzzy_decay_ms, &sz, (void *)&muzzy_decay_ms,
114598-		    sizeof(ssize_t)), 0, "Unexpected mallctl() failure");
114599-		expect_zd_eq(old_muzzy_decay_ms, prev_muzzy_decay_ms,
114600-		    "Unexpected old arenas.muzzy_decay_ms");
114601-	}
114602-}
114603-TEST_END
114604-
114605-TEST_BEGIN(test_arenas_constants) {
114606-#define TEST_ARENAS_CONSTANT(t, name, expected) do {			\
114607-	t name;								\
114608-	size_t sz = sizeof(t);						\
114609-	expect_d_eq(mallctl("arenas."#name, (void *)&name, &sz, NULL,	\
114610-	    0), 0, "Unexpected mallctl() failure");			\
114611-	expect_zu_eq(name, expected, "Incorrect "#name" size");		\
114612-} while (0)
114613-
114614-	TEST_ARENAS_CONSTANT(size_t, quantum, QUANTUM);
114615-	TEST_ARENAS_CONSTANT(size_t, page, PAGE);
114616-	TEST_ARENAS_CONSTANT(unsigned, nbins, SC_NBINS);
114617-	TEST_ARENAS_CONSTANT(unsigned, nlextents, SC_NSIZES - SC_NBINS);
114618-
114619-#undef TEST_ARENAS_CONSTANT
114620-}
114621-TEST_END
114622-
114623-TEST_BEGIN(test_arenas_bin_constants) {
114624-#define TEST_ARENAS_BIN_CONSTANT(t, name, expected) do {		\
114625-	t name;								\
114626-	size_t sz = sizeof(t);						\
114627-	expect_d_eq(mallctl("arenas.bin.0."#name, (void *)&name, &sz,	\
114628-	    NULL, 0), 0, "Unexpected mallctl() failure");		\
114629-	expect_zu_eq(name, expected, "Incorrect "#name" size");		\
114630-} while (0)
114631-
114632-	TEST_ARENAS_BIN_CONSTANT(size_t, size, bin_infos[0].reg_size);
114633-	TEST_ARENAS_BIN_CONSTANT(uint32_t, nregs, bin_infos[0].nregs);
114634-	TEST_ARENAS_BIN_CONSTANT(size_t, slab_size,
114635-	    bin_infos[0].slab_size);
114636-	TEST_ARENAS_BIN_CONSTANT(uint32_t, nshards, bin_infos[0].n_shards);
114637-
114638-#undef TEST_ARENAS_BIN_CONSTANT
114639-}
114640-TEST_END
114641-
114642-TEST_BEGIN(test_arenas_lextent_constants) {
114643-#define TEST_ARENAS_LEXTENT_CONSTANT(t, name, expected) do {		\
114644-	t name;								\
114645-	size_t sz = sizeof(t);						\
114646-	expect_d_eq(mallctl("arenas.lextent.0."#name, (void *)&name,	\
114647-	    &sz, NULL, 0), 0, "Unexpected mallctl() failure");		\
114648-	expect_zu_eq(name, expected, "Incorrect "#name" size");		\
114649-} while (0)
114650-
114651-	TEST_ARENAS_LEXTENT_CONSTANT(size_t, size,
114652-	    SC_LARGE_MINCLASS);
114653-
114654-#undef TEST_ARENAS_LEXTENT_CONSTANT
114655-}
114656-TEST_END
114657-
114658-TEST_BEGIN(test_arenas_create) {
114659-	unsigned narenas_before, arena, narenas_after;
114660-	size_t sz = sizeof(unsigned);
114661-
114662-	expect_d_eq(mallctl("arenas.narenas", (void *)&narenas_before, &sz,
114663-	    NULL, 0), 0, "Unexpected mallctl() failure");
114664-	expect_d_eq(mallctl("arenas.create", (void *)&arena, &sz, NULL, 0), 0,
114665-	    "Unexpected mallctl() failure");
114666-	expect_d_eq(mallctl("arenas.narenas", (void *)&narenas_after, &sz, NULL,
114667-	    0), 0, "Unexpected mallctl() failure");
114668-
114669-	expect_u_eq(narenas_before+1, narenas_after,
114670-	    "Unexpected number of arenas before versus after extension");
114671-	expect_u_eq(arena, narenas_after-1, "Unexpected arena index");
114672-}
114673-TEST_END
114674-
114675-TEST_BEGIN(test_arenas_lookup) {
114676-	unsigned arena, arena1;
114677-	void *ptr;
114678-	size_t sz = sizeof(unsigned);
114679-
114680-	expect_d_eq(mallctl("arenas.create", (void *)&arena, &sz, NULL, 0), 0,
114681-	    "Unexpected mallctl() failure");
114682-	ptr = mallocx(42, MALLOCX_ARENA(arena) | MALLOCX_TCACHE_NONE);
114683-	expect_ptr_not_null(ptr, "Unexpected mallocx() failure");
114684-	expect_d_eq(mallctl("arenas.lookup", &arena1, &sz, &ptr, sizeof(ptr)),
114685-	    0, "Unexpected mallctl() failure");
114686-	expect_u_eq(arena, arena1, "Unexpected arena index");
114687-	dallocx(ptr, 0);
114688-}
114689-TEST_END
114690-
114691-TEST_BEGIN(test_prof_active) {
114692-	/*
114693-	 * If config_prof is off, then the test for prof_active in
114694-	 * test_mallctl_opt was already enough.
114695-	 */
114696-	test_skip_if(!config_prof);
114697-	test_skip_if(opt_prof);
114698-
114699-	bool active, old;
114700-	size_t len = sizeof(bool);
114701-
114702-	active = true;
114703-	expect_d_eq(mallctl("prof.active", NULL, NULL, &active, len), ENOENT,
114704-	    "Setting prof_active to true should fail when opt_prof is off");
114705-	old = true;
114706-	expect_d_eq(mallctl("prof.active", &old, &len, &active, len), ENOENT,
114707-	    "Setting prof_active to true should fail when opt_prof is off");
114708-	expect_true(old, "old value should not be touched when mallctl fails");
114709-	active = false;
114710-	expect_d_eq(mallctl("prof.active", NULL, NULL, &active, len), 0,
114711-	    "Setting prof_active to false should succeed when opt_prof is off");
114712-	expect_d_eq(mallctl("prof.active", &old, &len, &active, len), 0,
114713-	    "Setting prof_active to false should succeed when opt_prof is off");
114714-	expect_false(old, "prof_active should be false when opt_prof is off");
114715-}
114716-TEST_END
114717-
114718-TEST_BEGIN(test_stats_arenas) {
114719-#define TEST_STATS_ARENAS(t, name) do {					\
114720-	t name;								\
114721-	size_t sz = sizeof(t);						\
114722-	expect_d_eq(mallctl("stats.arenas.0."#name, (void *)&name, &sz,	\
114723-	    NULL, 0), 0, "Unexpected mallctl() failure");		\
114724-} while (0)
114725-
114726-	TEST_STATS_ARENAS(unsigned, nthreads);
114727-	TEST_STATS_ARENAS(const char *, dss);
114728-	TEST_STATS_ARENAS(ssize_t, dirty_decay_ms);
114729-	TEST_STATS_ARENAS(ssize_t, muzzy_decay_ms);
114730-	TEST_STATS_ARENAS(size_t, pactive);
114731-	TEST_STATS_ARENAS(size_t, pdirty);
114732-
114733-#undef TEST_STATS_ARENAS
114734-}
114735-TEST_END
114736-
114737-static void
114738-alloc_hook(void *extra, UNUSED hook_alloc_t type, UNUSED void *result,
114739-    UNUSED uintptr_t result_raw, UNUSED uintptr_t args_raw[3]) {
114740-	*(bool *)extra = true;
114741-}
114742-
114743-static void
114744-dalloc_hook(void *extra, UNUSED hook_dalloc_t type,
114745-    UNUSED void *address, UNUSED uintptr_t args_raw[3]) {
114746-	*(bool *)extra = true;
114747-}
114748-
114749-TEST_BEGIN(test_hooks) {
114750-	bool hook_called = false;
114751-	hooks_t hooks = {&alloc_hook, &dalloc_hook, NULL, &hook_called};
114752-	void *handle = NULL;
114753-	size_t sz = sizeof(handle);
114754-	int err = mallctl("experimental.hooks.install", &handle, &sz, &hooks,
114755-	    sizeof(hooks));
114756-	expect_d_eq(err, 0, "Hook installation failed");
114757-	expect_ptr_ne(handle, NULL, "Hook installation gave null handle");
114758-	void *ptr = mallocx(1, 0);
114759-	expect_true(hook_called, "Alloc hook not called");
114760-	hook_called = false;
114761-	free(ptr);
114762-	expect_true(hook_called, "Free hook not called");
114763-
114764-	err = mallctl("experimental.hooks.remove", NULL, NULL, &handle,
114765-	    sizeof(handle));
114766-	expect_d_eq(err, 0, "Hook removal failed");
114767-	hook_called = false;
114768-	ptr = mallocx(1, 0);
114769-	free(ptr);
114770-	expect_false(hook_called, "Hook called after removal");
114771-}
114772-TEST_END
114773-
114774-TEST_BEGIN(test_hooks_exhaustion) {
114775-	bool hook_called = false;
114776-	hooks_t hooks = {&alloc_hook, &dalloc_hook, NULL, &hook_called};
114777-
114778-	void *handle;
114779-	void *handles[HOOK_MAX];
114780-	size_t sz = sizeof(handle);
114781-	int err;
114782-	for (int i = 0; i < HOOK_MAX; i++) {
114783-		handle = NULL;
114784-		err = mallctl("experimental.hooks.install", &handle, &sz,
114785-		    &hooks, sizeof(hooks));
114786-		expect_d_eq(err, 0, "Error installation hooks");
114787-		expect_ptr_ne(handle, NULL, "Got NULL handle");
114788-		handles[i] = handle;
114789-	}
114790-	err = mallctl("experimental.hooks.install", &handle, &sz, &hooks,
114791-	    sizeof(hooks));
114792-	expect_d_eq(err, EAGAIN, "Should have failed hook installation");
114793-	for (int i = 0; i < HOOK_MAX; i++) {
114794-		err = mallctl("experimental.hooks.remove", NULL, NULL,
114795-		    &handles[i], sizeof(handles[i]));
114796-		expect_d_eq(err, 0, "Hook removal failed");
114797-	}
114798-	/* Insertion failed, but then we removed some; it should work now. */
114799-	handle = NULL;
114800-	err = mallctl("experimental.hooks.install", &handle, &sz, &hooks,
114801-	    sizeof(hooks));
114802-	expect_d_eq(err, 0, "Hook insertion failed");
114803-	expect_ptr_ne(handle, NULL, "Got NULL handle");
114804-	err = mallctl("experimental.hooks.remove", NULL, NULL, &handle,
114805-	    sizeof(handle));
114806-	expect_d_eq(err, 0, "Hook removal failed");
114807-}
114808-TEST_END
114809-
114810-TEST_BEGIN(test_thread_idle) {
114811-	/*
114812-	 * We're cheating a little bit in this test, and inferring things about
114813-	 * implementation internals (like tcache details).  We have to;
114814-	 * thread.idle has no guaranteed effects.  We need stats to make these
114815-	 * inferences.
114816-	 */
114817-	test_skip_if(!config_stats);
114818-
114819-	int err;
114820-	size_t sz;
114821-	size_t miblen;
114822-
114823-	bool tcache_enabled = false;
114824-	sz = sizeof(tcache_enabled);
114825-	err = mallctl("thread.tcache.enabled", &tcache_enabled, &sz, NULL, 0);
114826-	expect_d_eq(err, 0, "");
114827-	test_skip_if(!tcache_enabled);
114828-
114829-	size_t tcache_max;
114830-	sz = sizeof(tcache_max);
114831-	err = mallctl("arenas.tcache_max", &tcache_max, &sz, NULL, 0);
114832-	expect_d_eq(err, 0, "");
114833-	test_skip_if(tcache_max == 0);
114834-
114835-	unsigned arena_ind;
114836-	sz = sizeof(arena_ind);
114837-	err = mallctl("thread.arena", &arena_ind, &sz, NULL, 0);
114838-	expect_d_eq(err, 0, "");
114839-
114840-	/* We're going to do an allocation of size 1, which we know is small. */
114841-	size_t mib[5];
114842-	miblen = sizeof(mib)/sizeof(mib[0]);
114843-	err = mallctlnametomib("stats.arenas.0.small.ndalloc", mib, &miblen);
114844-	expect_d_eq(err, 0, "");
114845-	mib[2] = arena_ind;
114846-
114847-	/*
114848-	 * This alloc and dalloc should leave something in the tcache, in a
114849-	 * small size's cache bin.
114850-	 */
114851-	void *ptr = mallocx(1, 0);
114852-	dallocx(ptr, 0);
114853-
114854-	uint64_t epoch;
114855-	err = mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch));
114856-	expect_d_eq(err, 0, "");
114857-
114858-	uint64_t small_dalloc_pre_idle;
114859-	sz = sizeof(small_dalloc_pre_idle);
114860-	err = mallctlbymib(mib, miblen, &small_dalloc_pre_idle, &sz, NULL, 0);
114861-	expect_d_eq(err, 0, "");
114862-
114863-	err = mallctl("thread.idle", NULL, NULL, NULL, 0);
114864-	expect_d_eq(err, 0, "");
114865-
114866-	err = mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch));
114867-	expect_d_eq(err, 0, "");
114868-
114869-	uint64_t small_dalloc_post_idle;
114870-	sz = sizeof(small_dalloc_post_idle);
114871-	err = mallctlbymib(mib, miblen, &small_dalloc_post_idle, &sz, NULL, 0);
114872-	expect_d_eq(err, 0, "");
114873-
114874-	expect_u64_lt(small_dalloc_pre_idle, small_dalloc_post_idle,
114875-	    "Purge didn't flush the tcache");
114876-}
114877-TEST_END
114878-
114879-TEST_BEGIN(test_thread_peak) {
114880-	test_skip_if(!config_stats);
114881-
114882-	/*
114883-	 * We don't commit to any stable amount of accuracy for peak tracking
114884-	 * (in practice, when this test was written, we made sure to be within
114885-	 * 100k).  But 10MB is big for more or less any definition of big.
114886-	 */
114887-	size_t big_size = 10 * 1024 * 1024;
114888-	size_t small_size = 256;
114889-
114890-	void *ptr;
114891-	int err;
114892-	size_t sz;
114893-	uint64_t peak;
114894-	sz = sizeof(uint64_t);
114895-
114896-	err = mallctl("thread.peak.reset", NULL, NULL, NULL, 0);
114897-	expect_d_eq(err, 0, "");
114898-	ptr = mallocx(SC_SMALL_MAXCLASS, 0);
114899-	err = mallctl("thread.peak.read", &peak, &sz, NULL, 0);
114900-	expect_d_eq(err, 0, "");
114901-	expect_u64_eq(peak, SC_SMALL_MAXCLASS, "Missed an update");
114902-	free(ptr);
114903-	err = mallctl("thread.peak.read", &peak, &sz, NULL, 0);
114904-	expect_d_eq(err, 0, "");
114905-	expect_u64_eq(peak, SC_SMALL_MAXCLASS, "Freeing changed peak");
114906-	ptr = mallocx(big_size, 0);
114907-	free(ptr);
114908-	/*
114909-	 * The peak should have hit big_size in the last two lines, even though
114910-	 * the net allocated bytes has since dropped back down to zero.  We
114911-	 * should have noticed the peak change without having down any mallctl
114912-	 * calls while net allocated bytes was high.
114913-	 */
114914-	err = mallctl("thread.peak.read", &peak, &sz, NULL, 0);
114915-	expect_d_eq(err, 0, "");
114916-	expect_u64_ge(peak, big_size, "Missed a peak change.");
114917-
114918-	/* Allocate big_size, but using small allocations. */
114919-	size_t nallocs = big_size / small_size;
114920-	void **ptrs = calloc(nallocs, sizeof(void *));
114921-	err = mallctl("thread.peak.reset", NULL, NULL, NULL, 0);
114922-	expect_d_eq(err, 0, "");
114923-	err = mallctl("thread.peak.read", &peak, &sz, NULL, 0);
114924-	expect_d_eq(err, 0, "");
114925-	expect_u64_eq(0, peak, "Missed a reset.");
114926-	for (size_t i = 0; i < nallocs; i++) {
114927-		ptrs[i] = mallocx(small_size, 0);
114928-	}
114929-	for (size_t i = 0; i < nallocs; i++) {
114930-		free(ptrs[i]);
114931-	}
114932-	err = mallctl("thread.peak.read", &peak, &sz, NULL, 0);
114933-	expect_d_eq(err, 0, "");
114934-	/*
114935-	 * We don't guarantee exactness; make sure we're within 10% of the peak,
114936-	 * though.
114937-	 */
114938-	expect_u64_ge(peak, nallocx(small_size, 0) * nallocs * 9 / 10,
114939-	    "Missed some peak changes.");
114940-	expect_u64_le(peak, nallocx(small_size, 0) * nallocs * 11 / 10,
114941-	    "Overcounted peak changes.");
114942-	free(ptrs);
114943-}
114944-TEST_END
114945-
114946-typedef struct activity_test_data_s activity_test_data_t;
114947-struct activity_test_data_s {
114948-	uint64_t obtained_alloc;
114949-	uint64_t obtained_dalloc;
114950-};
114951-
114952-static void
114953-activity_test_callback(void *uctx, uint64_t alloc, uint64_t dalloc) {
114954-	activity_test_data_t *test_data = (activity_test_data_t *)uctx;
114955-	test_data->obtained_alloc = alloc;
114956-	test_data->obtained_dalloc = dalloc;
114957-}
114958-
114959-TEST_BEGIN(test_thread_activity_callback) {
114960-	test_skip_if(!config_stats);
114961-
114962-	const size_t big_size = 10 * 1024 * 1024;
114963-	void *ptr;
114964-	int err;
114965-	size_t sz;
114966-
114967-	uint64_t *allocatedp;
114968-	uint64_t *deallocatedp;
114969-	sz = sizeof(allocatedp);
114970-	err = mallctl("thread.allocatedp", &allocatedp, &sz, NULL, 0);
114971-	assert_d_eq(0, err, "");
114972-	err = mallctl("thread.deallocatedp", &deallocatedp, &sz, NULL, 0);
114973-	assert_d_eq(0, err, "");
114974-
114975-	activity_callback_thunk_t old_thunk = {(activity_callback_t)111,
114976-		(void *)222};
114977-
114978-	activity_test_data_t test_data = {333, 444};
114979-	activity_callback_thunk_t new_thunk =
114980-	    {&activity_test_callback, &test_data};
114981-
114982-	sz = sizeof(old_thunk);
114983-	err = mallctl("experimental.thread.activity_callback", &old_thunk, &sz,
114984-	    &new_thunk, sizeof(new_thunk));
114985-	assert_d_eq(0, err, "");
114986-
114987-	expect_true(old_thunk.callback == NULL, "Callback already installed");
114988-	expect_true(old_thunk.uctx == NULL, "Callback data already installed");
114989-
114990-	ptr = mallocx(big_size, 0);
114991-	expect_u64_eq(test_data.obtained_alloc, *allocatedp, "");
114992-	expect_u64_eq(test_data.obtained_dalloc, *deallocatedp, "");
114993-
114994-	free(ptr);
114995-	expect_u64_eq(test_data.obtained_alloc, *allocatedp, "");
114996-	expect_u64_eq(test_data.obtained_dalloc, *deallocatedp, "");
114997-
114998-	sz = sizeof(old_thunk);
114999-	new_thunk = (activity_callback_thunk_t){ NULL, NULL };
115000-	err = mallctl("experimental.thread.activity_callback", &old_thunk, &sz,
115001-	    &new_thunk, sizeof(new_thunk));
115002-	assert_d_eq(0, err, "");
115003-
115004-	expect_true(old_thunk.callback == &activity_test_callback, "");
115005-	expect_true(old_thunk.uctx == &test_data, "");
115006-
115007-	/* Inserting NULL should have turned off tracking. */
115008-	test_data.obtained_alloc = 333;
115009-	test_data.obtained_dalloc = 444;
115010-	ptr = mallocx(big_size, 0);
115011-	free(ptr);
115012-	expect_u64_eq(333, test_data.obtained_alloc, "");
115013-	expect_u64_eq(444, test_data.obtained_dalloc, "");
115014-}
115015-TEST_END
115016-
115017-int
115018-main(void) {
115019-	return test(
115020-	    test_mallctl_errors,
115021-	    test_mallctlnametomib_errors,
115022-	    test_mallctlbymib_errors,
115023-	    test_mallctl_read_write,
115024-	    test_mallctlnametomib_short_mib,
115025-	    test_mallctlnametomib_short_name,
115026-	    test_mallctlmibnametomib,
115027-	    test_mallctlbymibname,
115028-	    test_mallctl_config,
115029-	    test_mallctl_opt,
115030-	    test_manpage_example,
115031-	    test_tcache_none,
115032-	    test_tcache,
115033-	    test_thread_arena,
115034-	    test_arena_i_initialized,
115035-	    test_arena_i_dirty_decay_ms,
115036-	    test_arena_i_muzzy_decay_ms,
115037-	    test_arena_i_purge,
115038-	    test_arena_i_decay,
115039-	    test_arena_i_dss,
115040-	    test_arena_i_retain_grow_limit,
115041-	    test_arenas_dirty_decay_ms,
115042-	    test_arenas_muzzy_decay_ms,
115043-	    test_arenas_constants,
115044-	    test_arenas_bin_constants,
115045-	    test_arenas_lextent_constants,
115046-	    test_arenas_create,
115047-	    test_arenas_lookup,
115048-	    test_prof_active,
115049-	    test_stats_arenas,
115050-	    test_hooks,
115051-	    test_hooks_exhaustion,
115052-	    test_thread_idle,
115053-	    test_thread_peak,
115054-	    test_thread_activity_callback);
115055-}
115056diff --git a/jemalloc/test/unit/malloc_conf_2.c b/jemalloc/test/unit/malloc_conf_2.c
115057deleted file mode 100644
115058index ecfa499..0000000
115059--- a/jemalloc/test/unit/malloc_conf_2.c
115060+++ /dev/null
115061@@ -1,29 +0,0 @@
115062-#include "test/jemalloc_test.h"
115063-
115064-const char *malloc_conf = "dirty_decay_ms:1000";
115065-const char *malloc_conf_2_conf_harder = "dirty_decay_ms:1234";
115066-
115067-TEST_BEGIN(test_malloc_conf_2) {
115068-#ifdef _WIN32
115069-	bool windows = true;
115070-#else
115071-	bool windows = false;
115072-#endif
115073-	/* Windows doesn't support weak symbol linker trickery. */
115074-	test_skip_if(windows);
115075-
115076-	ssize_t dirty_decay_ms;
115077-	size_t sz = sizeof(dirty_decay_ms);
115078-
115079-	int err = mallctl("opt.dirty_decay_ms", &dirty_decay_ms, &sz, NULL, 0);
115080-	assert_d_eq(err, 0, "Unexpected mallctl failure");
115081-	expect_zd_eq(dirty_decay_ms, 1234,
115082-	    "malloc_conf_2 setting didn't take effect");
115083-}
115084-TEST_END
115085-
115086-int
115087-main(void) {
115088-	return test(
115089-	    test_malloc_conf_2);
115090-}
115091diff --git a/jemalloc/test/unit/malloc_conf_2.sh b/jemalloc/test/unit/malloc_conf_2.sh
115092deleted file mode 100644
115093index 2c780f1..0000000
115094--- a/jemalloc/test/unit/malloc_conf_2.sh
115095+++ /dev/null
115096@@ -1 +0,0 @@
115097-export MALLOC_CONF="dirty_decay_ms:500"
115098diff --git a/jemalloc/test/unit/malloc_io.c b/jemalloc/test/unit/malloc_io.c
115099deleted file mode 100644
115100index 385f745..0000000
115101--- a/jemalloc/test/unit/malloc_io.c
115102+++ /dev/null
115103@@ -1,268 +0,0 @@
115104-#include "test/jemalloc_test.h"
115105-
115106-TEST_BEGIN(test_malloc_strtoumax_no_endptr) {
115107-	int err;
115108-
115109-	set_errno(0);
115110-	expect_ju_eq(malloc_strtoumax("0", NULL, 0), 0, "Unexpected result");
115111-	err = get_errno();
115112-	expect_d_eq(err, 0, "Unexpected failure");
115113-}
115114-TEST_END
115115-
115116-TEST_BEGIN(test_malloc_strtoumax) {
115117-	struct test_s {
115118-		const char *input;
115119-		const char *expected_remainder;
115120-		int base;
115121-		int expected_errno;
115122-		const char *expected_errno_name;
115123-		uintmax_t expected_x;
115124-	};
115125-#define ERR(e)		e, #e
115126-#define KUMAX(x)	((uintmax_t)x##ULL)
115127-#define KSMAX(x)	((uintmax_t)(intmax_t)x##LL)
115128-	struct test_s tests[] = {
115129-		{"0",		"0",	-1,	ERR(EINVAL),	UINTMAX_MAX},
115130-		{"0",		"0",	1,	ERR(EINVAL),	UINTMAX_MAX},
115131-		{"0",		"0",	37,	ERR(EINVAL),	UINTMAX_MAX},
115132-
115133-		{"",		"",	0,	ERR(EINVAL),	UINTMAX_MAX},
115134-		{"+",		"+",	0,	ERR(EINVAL),	UINTMAX_MAX},
115135-		{"++3",		"++3",	0,	ERR(EINVAL),	UINTMAX_MAX},
115136-		{"-",		"-",	0,	ERR(EINVAL),	UINTMAX_MAX},
115137-
115138-		{"42",		"",	0,	ERR(0),		KUMAX(42)},
115139-		{"+42",		"",	0,	ERR(0),		KUMAX(42)},
115140-		{"-42",		"",	0,	ERR(0),		KSMAX(-42)},
115141-		{"042",		"",	0,	ERR(0),		KUMAX(042)},
115142-		{"+042",	"",	0,	ERR(0),		KUMAX(042)},
115143-		{"-042",	"",	0,	ERR(0),		KSMAX(-042)},
115144-		{"0x42",	"",	0,	ERR(0),		KUMAX(0x42)},
115145-		{"+0x42",	"",	0,	ERR(0),		KUMAX(0x42)},
115146-		{"-0x42",	"",	0,	ERR(0),		KSMAX(-0x42)},
115147-
115148-		{"0",		"",	0,	ERR(0),		KUMAX(0)},
115149-		{"1",		"",	0,	ERR(0),		KUMAX(1)},
115150-
115151-		{"42",		"",	0,	ERR(0),		KUMAX(42)},
115152-		{" 42",		"",	0,	ERR(0),		KUMAX(42)},
115153-		{"42 ",		" ",	0,	ERR(0),		KUMAX(42)},
115154-		{"0x",		"x",	0,	ERR(0),		KUMAX(0)},
115155-		{"42x",		"x",	0,	ERR(0),		KUMAX(42)},
115156-
115157-		{"07",		"",	0,	ERR(0),		KUMAX(7)},
115158-		{"010",		"",	0,	ERR(0),		KUMAX(8)},
115159-		{"08",		"8",	0,	ERR(0),		KUMAX(0)},
115160-		{"0_",		"_",	0,	ERR(0),		KUMAX(0)},
115161-
115162-		{"0x",		"x",	0,	ERR(0),		KUMAX(0)},
115163-		{"0X",		"X",	0,	ERR(0),		KUMAX(0)},
115164-		{"0xg",		"xg",	0,	ERR(0),		KUMAX(0)},
115165-		{"0XA",		"",	0,	ERR(0),		KUMAX(10)},
115166-
115167-		{"010",		"",	10,	ERR(0),		KUMAX(10)},
115168-		{"0x3",		"x3",	10,	ERR(0),		KUMAX(0)},
115169-
115170-		{"12",		"2",	2,	ERR(0),		KUMAX(1)},
115171-		{"78",		"8",	8,	ERR(0),		KUMAX(7)},
115172-		{"9a",		"a",	10,	ERR(0),		KUMAX(9)},
115173-		{"9A",		"A",	10,	ERR(0),		KUMAX(9)},
115174-		{"fg",		"g",	16,	ERR(0),		KUMAX(15)},
115175-		{"FG",		"G",	16,	ERR(0),		KUMAX(15)},
115176-		{"0xfg",	"g",	16,	ERR(0),		KUMAX(15)},
115177-		{"0XFG",	"G",	16,	ERR(0),		KUMAX(15)},
115178-		{"z_",		"_",	36,	ERR(0),		KUMAX(35)},
115179-		{"Z_",		"_",	36,	ERR(0),		KUMAX(35)}
115180-	};
115181-#undef ERR
115182-#undef KUMAX
115183-#undef KSMAX
115184-	unsigned i;
115185-
115186-	for (i = 0; i < sizeof(tests)/sizeof(struct test_s); i++) {
115187-		struct test_s *test = &tests[i];
115188-		int err;
115189-		uintmax_t result;
115190-		char *remainder;
115191-
115192-		set_errno(0);
115193-		result = malloc_strtoumax(test->input, &remainder, test->base);
115194-		err = get_errno();
115195-		expect_d_eq(err, test->expected_errno,
115196-		    "Expected errno %s for \"%s\", base %d",
115197-		    test->expected_errno_name, test->input, test->base);
115198-		expect_str_eq(remainder, test->expected_remainder,
115199-		    "Unexpected remainder for \"%s\", base %d",
115200-		    test->input, test->base);
115201-		if (err == 0) {
115202-			expect_ju_eq(result, test->expected_x,
115203-			    "Unexpected result for \"%s\", base %d",
115204-			    test->input, test->base);
115205-		}
115206-	}
115207-}
115208-TEST_END
115209-
115210-TEST_BEGIN(test_malloc_snprintf_truncated) {
115211-#define BUFLEN	15
115212-	char buf[BUFLEN];
115213-	size_t result;
115214-	size_t len;
115215-#define TEST(expected_str_untruncated, ...) do {			\
115216-	result = malloc_snprintf(buf, len, __VA_ARGS__);		\
115217-	expect_d_eq(strncmp(buf, expected_str_untruncated, len-1), 0,	\
115218-	    "Unexpected string inequality (\"%s\" vs \"%s\")",		\
115219-	    buf, expected_str_untruncated);				\
115220-	expect_zu_eq(result, strlen(expected_str_untruncated),		\
115221-	    "Unexpected result");					\
115222-} while (0)
115223-
115224-	for (len = 1; len < BUFLEN; len++) {
115225-		TEST("012346789",	"012346789");
115226-		TEST("a0123b",		"a%sb", "0123");
115227-		TEST("a01234567",	"a%s%s", "0123", "4567");
115228-		TEST("a0123  ",		"a%-6s", "0123");
115229-		TEST("a  0123",		"a%6s", "0123");
115230-		TEST("a   012",		"a%6.3s", "0123");
115231-		TEST("a   012",		"a%*.*s", 6, 3, "0123");
115232-		TEST("a 123b",		"a% db", 123);
115233-		TEST("a123b",		"a%-db", 123);
115234-		TEST("a-123b",		"a%-db", -123);
115235-		TEST("a+123b",		"a%+db", 123);
115236-	}
115237-#undef BUFLEN
115238-#undef TEST
115239-}
115240-TEST_END
115241-
115242-TEST_BEGIN(test_malloc_snprintf) {
115243-#define BUFLEN	128
115244-	char buf[BUFLEN];
115245-	size_t result;
115246-#define TEST(expected_str, ...) do {					\
115247-	result = malloc_snprintf(buf, sizeof(buf), __VA_ARGS__);	\
115248-	expect_str_eq(buf, expected_str, "Unexpected output");		\
115249-	expect_zu_eq(result, strlen(expected_str), "Unexpected result");\
115250-} while (0)
115251-
115252-	TEST("hello", "hello");
115253-
115254-	TEST("50%, 100%", "50%%, %d%%", 100);
115255-
115256-	TEST("a0123b", "a%sb", "0123");
115257-
115258-	TEST("a 0123b", "a%5sb", "0123");
115259-	TEST("a 0123b", "a%*sb", 5, "0123");
115260-
115261-	TEST("a0123 b", "a%-5sb", "0123");
115262-	TEST("a0123b", "a%*sb", -1, "0123");
115263-	TEST("a0123 b", "a%*sb", -5, "0123");
115264-	TEST("a0123 b", "a%-*sb", -5, "0123");
115265-
115266-	TEST("a012b", "a%.3sb", "0123");
115267-	TEST("a012b", "a%.*sb", 3, "0123");
115268-	TEST("a0123b", "a%.*sb", -3, "0123");
115269-
115270-	TEST("a  012b", "a%5.3sb", "0123");
115271-	TEST("a  012b", "a%5.*sb", 3, "0123");
115272-	TEST("a  012b", "a%*.3sb", 5, "0123");
115273-	TEST("a  012b", "a%*.*sb", 5, 3, "0123");
115274-	TEST("a 0123b", "a%*.*sb", 5, -3, "0123");
115275-
115276-	TEST("_abcd_", "_%x_", 0xabcd);
115277-	TEST("_0xabcd_", "_%#x_", 0xabcd);
115278-	TEST("_1234_", "_%o_", 01234);
115279-	TEST("_01234_", "_%#o_", 01234);
115280-	TEST("_1234_", "_%u_", 1234);
115281-	TEST("01234", "%05u", 1234);
115282-
115283-	TEST("_1234_", "_%d_", 1234);
115284-	TEST("_ 1234_", "_% d_", 1234);
115285-	TEST("_+1234_", "_%+d_", 1234);
115286-	TEST("_-1234_", "_%d_", -1234);
115287-	TEST("_-1234_", "_% d_", -1234);
115288-	TEST("_-1234_", "_%+d_", -1234);
115289-
115290-	/*
115291-	 * Morally, we should test these too, but 0-padded signed types are not
115292-	 * yet supported.
115293-	 *
115294-	 * TEST("01234", "%05", 1234);
115295-	 * TEST("-1234", "%05d", -1234);
115296-	 * TEST("-01234", "%06d", -1234);
115297-	*/
115298-
115299-	TEST("_-1234_", "_%d_", -1234);
115300-	TEST("_1234_", "_%d_", 1234);
115301-	TEST("_-1234_", "_%i_", -1234);
115302-	TEST("_1234_", "_%i_", 1234);
115303-	TEST("_01234_", "_%#o_", 01234);
115304-	TEST("_1234_", "_%u_", 1234);
115305-	TEST("_0x1234abc_", "_%#x_", 0x1234abc);
115306-	TEST("_0X1234ABC_", "_%#X_", 0x1234abc);
115307-	TEST("_c_", "_%c_", 'c');
115308-	TEST("_string_", "_%s_", "string");
115309-	TEST("_0x42_", "_%p_", ((void *)0x42));
115310-
115311-	TEST("_-1234_", "_%ld_", ((long)-1234));
115312-	TEST("_1234_", "_%ld_", ((long)1234));
115313-	TEST("_-1234_", "_%li_", ((long)-1234));
115314-	TEST("_1234_", "_%li_", ((long)1234));
115315-	TEST("_01234_", "_%#lo_", ((long)01234));
115316-	TEST("_1234_", "_%lu_", ((long)1234));
115317-	TEST("_0x1234abc_", "_%#lx_", ((long)0x1234abc));
115318-	TEST("_0X1234ABC_", "_%#lX_", ((long)0x1234ABC));
115319-
115320-	TEST("_-1234_", "_%lld_", ((long long)-1234));
115321-	TEST("_1234_", "_%lld_", ((long long)1234));
115322-	TEST("_-1234_", "_%lli_", ((long long)-1234));
115323-	TEST("_1234_", "_%lli_", ((long long)1234));
115324-	TEST("_01234_", "_%#llo_", ((long long)01234));
115325-	TEST("_1234_", "_%llu_", ((long long)1234));
115326-	TEST("_0x1234abc_", "_%#llx_", ((long long)0x1234abc));
115327-	TEST("_0X1234ABC_", "_%#llX_", ((long long)0x1234ABC));
115328-
115329-	TEST("_-1234_", "_%qd_", ((long long)-1234));
115330-	TEST("_1234_", "_%qd_", ((long long)1234));
115331-	TEST("_-1234_", "_%qi_", ((long long)-1234));
115332-	TEST("_1234_", "_%qi_", ((long long)1234));
115333-	TEST("_01234_", "_%#qo_", ((long long)01234));
115334-	TEST("_1234_", "_%qu_", ((long long)1234));
115335-	TEST("_0x1234abc_", "_%#qx_", ((long long)0x1234abc));
115336-	TEST("_0X1234ABC_", "_%#qX_", ((long long)0x1234ABC));
115337-
115338-	TEST("_-1234_", "_%jd_", ((intmax_t)-1234));
115339-	TEST("_1234_", "_%jd_", ((intmax_t)1234));
115340-	TEST("_-1234_", "_%ji_", ((intmax_t)-1234));
115341-	TEST("_1234_", "_%ji_", ((intmax_t)1234));
115342-	TEST("_01234_", "_%#jo_", ((intmax_t)01234));
115343-	TEST("_1234_", "_%ju_", ((intmax_t)1234));
115344-	TEST("_0x1234abc_", "_%#jx_", ((intmax_t)0x1234abc));
115345-	TEST("_0X1234ABC_", "_%#jX_", ((intmax_t)0x1234ABC));
115346-
115347-	TEST("_1234_", "_%td_", ((ptrdiff_t)1234));
115348-	TEST("_-1234_", "_%td_", ((ptrdiff_t)-1234));
115349-	TEST("_1234_", "_%ti_", ((ptrdiff_t)1234));
115350-	TEST("_-1234_", "_%ti_", ((ptrdiff_t)-1234));
115351-
115352-	TEST("_-1234_", "_%zd_", ((ssize_t)-1234));
115353-	TEST("_1234_", "_%zd_", ((ssize_t)1234));
115354-	TEST("_-1234_", "_%zi_", ((ssize_t)-1234));
115355-	TEST("_1234_", "_%zi_", ((ssize_t)1234));
115356-	TEST("_01234_", "_%#zo_", ((ssize_t)01234));
115357-	TEST("_1234_", "_%zu_", ((ssize_t)1234));
115358-	TEST("_0x1234abc_", "_%#zx_", ((ssize_t)0x1234abc));
115359-	TEST("_0X1234ABC_", "_%#zX_", ((ssize_t)0x1234ABC));
115360-#undef BUFLEN
115361-}
115362-TEST_END
115363-
115364-int
115365-main(void) {
115366-	return test(
115367-	    test_malloc_strtoumax_no_endptr,
115368-	    test_malloc_strtoumax,
115369-	    test_malloc_snprintf_truncated,
115370-	    test_malloc_snprintf);
115371-}
115372diff --git a/jemalloc/test/unit/math.c b/jemalloc/test/unit/math.c
115373deleted file mode 100644
115374index a32767c..0000000
115375--- a/jemalloc/test/unit/math.c
115376+++ /dev/null
115377@@ -1,390 +0,0 @@
115378-#include "test/jemalloc_test.h"
115379-
115380-#define MAX_REL_ERR 1.0e-9
115381-#define MAX_ABS_ERR 1.0e-9
115382-
115383-#include <float.h>
115384-
115385-#ifdef __PGI
115386-#undef INFINITY
115387-#endif
115388-
115389-#ifndef INFINITY
115390-#define INFINITY (DBL_MAX + DBL_MAX)
115391-#endif
115392-
115393-static bool
115394-double_eq_rel(double a, double b, double max_rel_err, double max_abs_err) {
115395-	double rel_err;
115396-
115397-	if (fabs(a - b) < max_abs_err) {
115398-		return true;
115399-	}
115400-	rel_err = (fabs(b) > fabs(a)) ? fabs((a-b)/b) : fabs((a-b)/a);
115401-	return (rel_err < max_rel_err);
115402-}
115403-
115404-static uint64_t
115405-factorial(unsigned x) {
115406-	uint64_t ret = 1;
115407-	unsigned i;
115408-
115409-	for (i = 2; i <= x; i++) {
115410-		ret *= (uint64_t)i;
115411-	}
115412-
115413-	return ret;
115414-}
115415-
115416-TEST_BEGIN(test_ln_gamma_factorial) {
115417-	unsigned x;
115418-
115419-	/* exp(ln_gamma(x)) == (x-1)! for integer x. */
115420-	for (x = 1; x <= 21; x++) {
115421-		expect_true(double_eq_rel(exp(ln_gamma(x)),
115422-		    (double)factorial(x-1), MAX_REL_ERR, MAX_ABS_ERR),
115423-		    "Incorrect factorial result for x=%u", x);
115424-	}
115425-}
115426-TEST_END
115427-
115428-/* Expected ln_gamma([0.0..100.0] increment=0.25). */
115429-static const double ln_gamma_misc_expected[] = {
115430-	INFINITY,
115431-	1.28802252469807743, 0.57236494292470008, 0.20328095143129538,
115432-	0.00000000000000000, -0.09827183642181320, -0.12078223763524518,
115433-	-0.08440112102048555, 0.00000000000000000, 0.12487171489239651,
115434-	0.28468287047291918, 0.47521466691493719, 0.69314718055994529,
115435-	0.93580193110872523, 1.20097360234707429, 1.48681557859341718,
115436-	1.79175946922805496, 2.11445692745037128, 2.45373657084244234,
115437-	2.80857141857573644, 3.17805383034794575, 3.56137591038669710,
115438-	3.95781396761871651, 4.36671603662228680, 4.78749174278204581,
115439-	5.21960398699022932, 5.66256205985714178, 6.11591589143154568,
115440-	6.57925121201010121, 7.05218545073853953, 7.53436423675873268,
115441-	8.02545839631598312, 8.52516136106541467, 9.03318691960512332,
115442-	9.54926725730099690, 10.07315123968123949, 10.60460290274525086,
115443-	11.14340011995171231, 11.68933342079726856, 12.24220494005076176,
115444-	12.80182748008146909, 13.36802367147604720, 13.94062521940376342,
115445-	14.51947222506051816, 15.10441257307551943, 15.69530137706046524,
115446-	16.29200047656724237, 16.89437797963419285, 17.50230784587389010,
115447-	18.11566950571089407, 18.73434751193644843, 19.35823122022435427,
115448-	19.98721449566188468, 20.62119544270163018, 21.26007615624470048,
115449-	21.90376249182879320, 22.55216385312342098, 23.20519299513386002,
115450-	23.86276584168908954, 24.52480131594137802, 25.19122118273868338,
115451-	25.86194990184851861, 26.53691449111561340, 27.21604439872720604,
115452-	27.89927138384089389, 28.58652940490193828, 29.27775451504081516,
115453-	29.97288476399884871, 30.67186010608067548, 31.37462231367769050,
115454-	32.08111489594735843, 32.79128302226991565, 33.50507345013689076,
115455-	34.22243445715505317, 34.94331577687681545, 35.66766853819134298,
115456-	36.39544520803305261, 37.12659953718355865, 37.86108650896109395,
115457-	38.59886229060776230, 39.33988418719949465, 40.08411059791735198,
115458-	40.83150097453079752, 41.58201578195490100, 42.33561646075348506,
115459-	43.09226539146988699, 43.85192586067515208, 44.61456202863158893,
115460-	45.38013889847690052, 46.14862228684032885, 46.91997879580877395,
115461-	47.69417578616628361, 48.47118135183522014, 49.25096429545256882,
115462-	50.03349410501914463, 50.81874093156324790, 51.60667556776436982,
115463-	52.39726942748592364, 53.19049452616926743, 53.98632346204390586,
115464-	54.78472939811231157, 55.58568604486942633, 56.38916764371992940,
115465-	57.19514895105859864, 58.00360522298051080, 58.81451220059079787,
115466-	59.62784609588432261, 60.44358357816834371, 61.26170176100199427,
115467-	62.08217818962842927, 62.90499082887649962, 63.73011805151035958,
115468-	64.55753862700632340, 65.38723171073768015, 66.21917683354901385,
115469-	67.05335389170279825, 67.88974313718154008, 68.72832516833013017,
115470-	69.56908092082363737, 70.41199165894616385, 71.25703896716800045,
115471-	72.10420474200799390, 72.95347118416940191, 73.80482079093779646,
115472-	74.65823634883015814, 75.51370092648485866, 76.37119786778275454,
115473-	77.23071078519033961, 78.09222355331530707, 78.95572030266725960,
115474-	79.82118541361435859, 80.68860351052903468, 81.55795945611502873,
115475-	82.42923834590904164, 83.30242550295004378, 84.17750647261028973,
115476-	85.05446701758152983, 85.93329311301090456, 86.81397094178107920,
115477-	87.69648688992882057, 88.58082754219766741, 89.46697967771913795,
115478-	90.35493026581838194, 91.24466646193963015, 92.13617560368709292,
115479-	93.02944520697742803, 93.92446296229978486, 94.82121673107967297,
115480-	95.71969454214321615, 96.61988458827809723, 97.52177522288820910,
115481-	98.42535495673848800, 99.33061245478741341, 100.23753653310367895,
115482-	101.14611615586458981, 102.05634043243354370, 102.96819861451382394,
115483-	103.88168009337621811, 104.79677439715833032, 105.71347118823287303,
115484-	106.63176026064346047, 107.55163153760463501, 108.47307506906540198,
115485-	109.39608102933323153, 110.32063971475740516, 111.24674154146920557,
115486-	112.17437704317786995, 113.10353686902013237, 114.03421178146170689,
115487-	114.96639265424990128, 115.90007047041454769, 116.83523632031698014,
115488-	117.77188139974506953, 118.70999700805310795, 119.64957454634490830,
115489-	120.59060551569974962, 121.53308151543865279, 122.47699424143097247,
115490-	123.42233548443955726, 124.36909712850338394, 125.31727114935689826,
115491-	126.26684961288492559, 127.21782467361175861, 128.17018857322420899,
115492-	129.12393363912724453, 130.07905228303084755, 131.03553699956862033,
115493-	131.99338036494577864, 132.95257503561629164, 133.91311374698926784,
115494-	134.87498931216194364, 135.83819462068046846, 136.80272263732638294,
115495-	137.76856640092901785, 138.73571902320256299, 139.70417368760718091,
115496-	140.67392364823425055, 141.64496222871400732, 142.61728282114600574,
115497-	143.59087888505104047, 144.56574394634486680, 145.54187159633210058,
115498-	146.51925549072063859, 147.49788934865566148, 148.47776695177302031,
115499-	149.45888214327129617, 150.44122882700193600, 151.42480096657754984,
115500-	152.40959258449737490, 153.39559776128982094, 154.38281063467164245,
115501-	155.37122539872302696, 156.36083630307879844, 157.35163765213474107,
115502-	158.34362380426921391, 159.33678917107920370, 160.33112821663092973,
115503-	161.32663545672428995, 162.32330545817117695, 163.32113283808695314,
115504-	164.32011226319519892, 165.32023844914485267, 166.32150615984036790,
115505-	167.32391020678358018, 168.32744544842768164, 169.33210678954270634,
115506-	170.33788918059275375, 171.34478761712384198, 172.35279713916281707,
115507-	173.36191283062726143, 174.37212981874515094, 175.38344327348534080,
115508-	176.39584840699734514, 177.40934047306160437, 178.42391476654847793,
115509-	179.43956662288721304, 180.45629141754378111, 181.47408456550741107,
115510-	182.49294152078630304, 183.51285777591152737, 184.53382886144947861,
115511-	185.55585034552262869, 186.57891783333786861, 187.60302696672312095,
115512-	188.62817342367162610, 189.65435291789341932, 190.68156119837468054,
115513-	191.70979404894376330, 192.73904728784492590, 193.76931676731820176,
115514-	194.80059837318714244, 195.83288802445184729, 196.86618167288995096,
115515-	197.90047530266301123, 198.93576492992946214, 199.97204660246373464,
115516-	201.00931639928148797, 202.04757043027063901, 203.08680483582807597,
115517-	204.12701578650228385, 205.16819948264117102, 206.21035215404597807,
115518-	207.25347005962987623, 208.29754948708190909, 209.34258675253678916,
115519-	210.38857820024875878, 211.43552020227099320, 212.48340915813977858,
115520-	213.53224149456323744, 214.58201366511514152, 215.63272214993284592,
115521-	216.68436345542014010, 217.73693411395422004, 218.79043068359703739,
115522-	219.84484974781133815, 220.90018791517996988, 221.95644181913033322,
115523-	223.01360811766215875, 224.07168349307951871, 225.13066465172661879,
115524-	226.19054832372759734, 227.25133126272962159, 228.31301024565024704,
115525-	229.37558207242807384, 230.43904356577689896, 231.50339157094342113,
115526-	232.56862295546847008, 233.63473460895144740, 234.70172344281823484,
115527-	235.76958639009222907, 236.83832040516844586, 237.90792246359117712,
115528-	238.97838956183431947, 240.04971871708477238, 241.12190696702904802,
115529-	242.19495136964280846, 243.26884900298270509, 244.34359696498191283,
115530-	245.41919237324782443, 246.49563236486270057, 247.57291409618682110,
115531-	248.65103474266476269, 249.72999149863338175, 250.80978157713354904,
115532-	251.89040220972316320, 252.97185064629374551, 254.05412415488834199,
115533-	255.13722002152300661, 256.22113555000953511, 257.30586806178126835,
115534-	258.39141489572085675, 259.47777340799029844, 260.56494097186322279,
115535-	261.65291497755913497, 262.74169283208021852, 263.83127195904967266,
115536-	264.92164979855277807, 266.01282380697938379, 267.10479145686849733,
115537-	268.19755023675537586, 269.29109765101975427, 270.38543121973674488,
115538-	271.48054847852881721, 272.57644697842033565, 273.67312428569374561,
115539-	274.77057798174683967, 275.86880566295326389, 276.96780494052313770,
115540-	278.06757344036617496, 279.16810880295668085, 280.26940868320008349,
115541-	281.37147075030043197, 282.47429268763045229, 283.57787219260217171,
115542-	284.68220697654078322, 285.78729476455760050, 286.89313329542699194,
115543-	287.99972032146268930, 289.10705360839756395, 290.21513093526289140,
115544-	291.32395009427028754, 292.43350889069523646, 293.54380514276073200,
115545-	294.65483668152336350, 295.76660135076059532, 296.87909700685889902,
115546-	297.99232151870342022, 299.10627276756946458, 300.22094864701409733,
115547-	301.33634706277030091, 302.45246593264130297, 303.56930318639643929,
115548-	304.68685676566872189, 305.80512462385280514, 306.92410472600477078,
115549-	308.04379504874236773, 309.16419358014690033, 310.28529831966631036,
115550-	311.40710727801865687, 312.52961847709792664, 313.65282994987899201,
115551-	314.77673974032603610, 315.90134590329950015, 317.02664650446632777,
115552-	318.15263962020929966, 319.27932333753892635, 320.40669575400545455,
115553-	321.53475497761127144, 322.66349912672620803, 323.79292633000159185,
115554-	324.92303472628691452, 326.05382246454587403, 327.18528770377525916,
115555-	328.31742861292224234, 329.45024337080525356, 330.58373016603343331,
115556-	331.71788719692847280, 332.85271267144611329, 333.98820480709991898,
115557-	335.12436183088397001, 336.26118197919845443, 337.39866349777429377,
115558-	338.53680464159958774, 339.67560367484657036, 340.81505887079896411,
115559-	341.95516851178109619, 343.09593088908627578, 344.23734430290727460,
115560-	345.37940706226686416, 346.52211748494903532, 347.66547389743118401,
115561-	348.80947463481720661, 349.95411804077025408, 351.09940246744753267,
115562-	352.24532627543504759, 353.39188783368263103, 354.53908551944078908,
115563-	355.68691771819692349, 356.83538282361303118, 357.98447923746385868,
115564-	359.13420536957539753
115565-};
115566-
115567-TEST_BEGIN(test_ln_gamma_misc) {
115568-	unsigned i;
115569-
115570-	for (i = 1; i < sizeof(ln_gamma_misc_expected)/sizeof(double); i++) {
115571-		double x = (double)i * 0.25;
115572-		expect_true(double_eq_rel(ln_gamma(x),
115573-		    ln_gamma_misc_expected[i], MAX_REL_ERR, MAX_ABS_ERR),
115574-		    "Incorrect ln_gamma result for i=%u", i);
115575-	}
115576-}
115577-TEST_END
115578-
115579-/* Expected pt_norm([0.01..0.99] increment=0.01). */
115580-static const double pt_norm_expected[] = {
115581-	-INFINITY,
115582-	-2.32634787404084076, -2.05374891063182252, -1.88079360815125085,
115583-	-1.75068607125216946, -1.64485362695147264, -1.55477359459685305,
115584-	-1.47579102817917063, -1.40507156030963221, -1.34075503369021654,
115585-	-1.28155156554460081, -1.22652812003661049, -1.17498679206608991,
115586-	-1.12639112903880045, -1.08031934081495606, -1.03643338949378938,
115587-	-0.99445788320975281, -0.95416525314619416, -0.91536508784281390,
115588-	-0.87789629505122846, -0.84162123357291418, -0.80642124701824025,
115589-	-0.77219321418868492, -0.73884684918521371, -0.70630256284008752,
115590-	-0.67448975019608171, -0.64334540539291685, -0.61281299101662701,
115591-	-0.58284150727121620, -0.55338471955567281, -0.52440051270804067,
115592-	-0.49585034734745320, -0.46769879911450812, -0.43991316567323380,
115593-	-0.41246312944140462, -0.38532046640756751, -0.35845879325119373,
115594-	-0.33185334643681652, -0.30548078809939738, -0.27931903444745404,
115595-	-0.25334710313579978, -0.22754497664114931, -0.20189347914185077,
115596-	-0.17637416478086135, -0.15096921549677725, -0.12566134685507399,
115597-	-0.10043372051146975, -0.07526986209982976, -0.05015358346473352,
115598-	-0.02506890825871106, 0.00000000000000000, 0.02506890825871106,
115599-	0.05015358346473366, 0.07526986209982990, 0.10043372051146990,
115600-	0.12566134685507413, 0.15096921549677739, 0.17637416478086146,
115601-	0.20189347914185105, 0.22754497664114931, 0.25334710313579978,
115602-	0.27931903444745404, 0.30548078809939738, 0.33185334643681652,
115603-	0.35845879325119373, 0.38532046640756762, 0.41246312944140484,
115604-	0.43991316567323391, 0.46769879911450835, 0.49585034734745348,
115605-	0.52440051270804111, 0.55338471955567303, 0.58284150727121620,
115606-	0.61281299101662701, 0.64334540539291685, 0.67448975019608171,
115607-	0.70630256284008752, 0.73884684918521371, 0.77219321418868492,
115608-	0.80642124701824036, 0.84162123357291441, 0.87789629505122879,
115609-	0.91536508784281423, 0.95416525314619460, 0.99445788320975348,
115610-	1.03643338949378938, 1.08031934081495606, 1.12639112903880045,
115611-	1.17498679206608991, 1.22652812003661049, 1.28155156554460081,
115612-	1.34075503369021654, 1.40507156030963265, 1.47579102817917085,
115613-	1.55477359459685394, 1.64485362695147308, 1.75068607125217102,
115614-	1.88079360815125041, 2.05374891063182208, 2.32634787404084076
115615-};
115616-
115617-TEST_BEGIN(test_pt_norm) {
115618-	unsigned i;
115619-
115620-	for (i = 1; i < sizeof(pt_norm_expected)/sizeof(double); i++) {
115621-		double p = (double)i * 0.01;
115622-		expect_true(double_eq_rel(pt_norm(p), pt_norm_expected[i],
115623-		    MAX_REL_ERR, MAX_ABS_ERR),
115624-		    "Incorrect pt_norm result for i=%u", i);
115625-	}
115626-}
115627-TEST_END
115628-
115629-/*
115630- * Expected pt_chi2(p=[0.01..0.99] increment=0.07,
115631- *                  df={0.1, 1.1, 10.1, 100.1, 1000.1}).
115632- */
115633-static const double pt_chi2_df[] = {0.1, 1.1, 10.1, 100.1, 1000.1};
115634-static const double pt_chi2_expected[] = {
115635-	1.168926411457320e-40, 1.347680397072034e-22, 3.886980416666260e-17,
115636-	8.245951724356564e-14, 2.068936347497604e-11, 1.562561743309233e-09,
115637-	5.459543043426564e-08, 1.114775688149252e-06, 1.532101202364371e-05,
115638-	1.553884683726585e-04, 1.239396954915939e-03, 8.153872320255721e-03,
115639-	4.631183739647523e-02, 2.473187311701327e-01, 2.175254800183617e+00,
115640-
115641-	0.0003729887888876379, 0.0164409238228929513, 0.0521523015190650113,
115642-	0.1064701372271216612, 0.1800913735793082115, 0.2748704281195626931,
115643-	0.3939246282787986497, 0.5420727552260817816, 0.7267265822221973259,
115644-	0.9596554296000253670, 1.2607440376386165326, 1.6671185084541604304,
115645-	2.2604828984738705167, 3.2868613342148607082, 6.9298574921692139839,
115646-
115647-	2.606673548632508, 4.602913725294877, 5.646152813924212,
115648-	6.488971315540869, 7.249823275816285, 7.977314231410841,
115649-	8.700354939944047, 9.441728024225892, 10.224338321374127,
115650-	11.076435368801061, 12.039320937038386, 13.183878752697167,
115651-	14.657791935084575, 16.885728216339373, 23.361991680031817,
115652-
115653-	70.14844087392152, 80.92379498849355, 85.53325420085891,
115654-	88.94433120715347, 91.83732712857017, 94.46719943606301,
115655-	96.96896479994635, 99.43412843510363, 101.94074719829733,
115656-	104.57228644307247, 107.43900093448734, 110.71844673417287,
115657-	114.76616819871325, 120.57422505959563, 135.92318818757556,
115658-
115659-	899.0072447849649, 937.9271278858220, 953.8117189560207,
115660-	965.3079371501154, 974.8974061207954, 983.4936235182347,
115661-	991.5691170518946, 999.4334123954690, 1007.3391826856553,
115662-	1015.5445154999951, 1024.3777075619569, 1034.3538789836223,
115663-	1046.4872561869577, 1063.5717461999654, 1107.0741966053859
115664-};
115665-
115666-TEST_BEGIN(test_pt_chi2) {
115667-	unsigned i, j;
115668-	unsigned e = 0;
115669-
115670-	for (i = 0; i < sizeof(pt_chi2_df)/sizeof(double); i++) {
115671-		double df = pt_chi2_df[i];
115672-		double ln_gamma_df = ln_gamma(df * 0.5);
115673-		for (j = 1; j < 100; j += 7) {
115674-			double p = (double)j * 0.01;
115675-			expect_true(double_eq_rel(pt_chi2(p, df, ln_gamma_df),
115676-			    pt_chi2_expected[e], MAX_REL_ERR, MAX_ABS_ERR),
115677-			    "Incorrect pt_chi2 result for i=%u, j=%u", i, j);
115678-			e++;
115679-		}
115680-	}
115681-}
115682-TEST_END
115683-
115684-/*
115685- * Expected pt_gamma(p=[0.1..0.99] increment=0.07,
115686- *                   shape=[0.5..3.0] increment=0.5).
115687- */
115688-static const double pt_gamma_shape[] = {0.5, 1.0, 1.5, 2.0, 2.5, 3.0};
115689-static const double pt_gamma_expected[] = {
115690-	7.854392895485103e-05, 5.043466107888016e-03, 1.788288957794883e-02,
115691-	3.900956150232906e-02, 6.913847560638034e-02, 1.093710833465766e-01,
115692-	1.613412523825817e-01, 2.274682115597864e-01, 3.114117323127083e-01,
115693-	4.189466220207417e-01, 5.598106789059246e-01, 7.521856146202706e-01,
115694-	1.036125427911119e+00, 1.532450860038180e+00, 3.317448300510606e+00,
115695-
115696-	0.01005033585350144, 0.08338160893905107, 0.16251892949777497,
115697-	0.24846135929849966, 0.34249030894677596, 0.44628710262841947,
115698-	0.56211891815354142, 0.69314718055994529, 0.84397007029452920,
115699-	1.02165124753198167, 1.23787435600161766, 1.51412773262977574,
115700-	1.89711998488588196, 2.52572864430825783, 4.60517018598809091,
115701-
115702-	0.05741590094955853, 0.24747378084860744, 0.39888572212236084,
115703-	0.54394139997444901, 0.69048812513915159, 0.84311389861296104,
115704-	1.00580622221479898, 1.18298694218766931, 1.38038096305861213,
115705-	1.60627736383027453, 1.87396970522337947, 2.20749220408081070,
115706-	2.65852391865854942, 3.37934630984842244, 5.67243336507218476,
115707-
115708-	0.1485547402532659, 0.4657458011640391, 0.6832386130709406,
115709-	0.8794297834672100, 1.0700752852474524, 1.2629614217350744,
115710-	1.4638400448580779, 1.6783469900166610, 1.9132338090606940,
115711-	2.1778589228618777, 2.4868823970010991, 2.8664695666264195,
115712-	3.3724415436062114, 4.1682658512758071, 6.6383520679938108,
115713-
115714-	0.2771490383641385, 0.7195001279643727, 0.9969081732265243,
115715-	1.2383497880608061, 1.4675206597269927, 1.6953064251816552,
115716-	1.9291243435606809, 2.1757300955477641, 2.4428032131216391,
115717-	2.7406534569230616, 3.0851445039665513, 3.5043101122033367,
115718-	4.0575997065264637, 4.9182956424675286, 7.5431362346944937,
115719-
115720-	0.4360451650782932, 0.9983600902486267, 1.3306365880734528,
115721-	1.6129750834753802, 1.8767241606994294, 2.1357032436097660,
115722-	2.3988853336865565, 2.6740603137235603, 2.9697561737517959,
115723-	3.2971457713883265, 3.6731795898504660, 4.1275751617770631,
115724-	4.7230515633946677, 5.6417477865306020, 8.4059469148854635
115725-};
115726-
115727-TEST_BEGIN(test_pt_gamma_shape) {
115728-	unsigned i, j;
115729-	unsigned e = 0;
115730-
115731-	for (i = 0; i < sizeof(pt_gamma_shape)/sizeof(double); i++) {
115732-		double shape = pt_gamma_shape[i];
115733-		double ln_gamma_shape = ln_gamma(shape);
115734-		for (j = 1; j < 100; j += 7) {
115735-			double p = (double)j * 0.01;
115736-			expect_true(double_eq_rel(pt_gamma(p, shape, 1.0,
115737-			    ln_gamma_shape), pt_gamma_expected[e], MAX_REL_ERR,
115738-			    MAX_ABS_ERR),
115739-			    "Incorrect pt_gamma result for i=%u, j=%u", i, j);
115740-			e++;
115741-		}
115742-	}
115743-}
115744-TEST_END
115745-
115746-TEST_BEGIN(test_pt_gamma_scale) {
115747-	double shape = 1.0;
115748-	double ln_gamma_shape = ln_gamma(shape);
115749-
115750-	expect_true(double_eq_rel(
115751-	    pt_gamma(0.5, shape, 1.0, ln_gamma_shape) * 10.0,
115752-	    pt_gamma(0.5, shape, 10.0, ln_gamma_shape), MAX_REL_ERR,
115753-	    MAX_ABS_ERR),
115754-	    "Scale should be trivially equivalent to external multiplication");
115755-}
115756-TEST_END
115757-
115758-int
115759-main(void) {
115760-	return test(
115761-	    test_ln_gamma_factorial,
115762-	    test_ln_gamma_misc,
115763-	    test_pt_norm,
115764-	    test_pt_chi2,
115765-	    test_pt_gamma_shape,
115766-	    test_pt_gamma_scale);
115767-}
115768diff --git a/jemalloc/test/unit/mpsc_queue.c b/jemalloc/test/unit/mpsc_queue.c
115769deleted file mode 100644
115770index 895edf8..0000000
115771--- a/jemalloc/test/unit/mpsc_queue.c
115772+++ /dev/null
115773@@ -1,304 +0,0 @@
115774-#include "test/jemalloc_test.h"
115775-
115776-#include "jemalloc/internal/mpsc_queue.h"
115777-
115778-typedef struct elem_s elem_t;
115779-typedef ql_head(elem_t) elem_list_t;
115780-typedef mpsc_queue(elem_t) elem_mpsc_queue_t;
115781-struct elem_s {
115782-	int thread;
115783-	int idx;
115784-	ql_elm(elem_t) link;
115785-};
115786-
115787-/* Include both proto and gen to make sure they match up. */
115788-mpsc_queue_proto(static, elem_mpsc_queue_, elem_mpsc_queue_t, elem_t,
115789-    elem_list_t);
115790-mpsc_queue_gen(static, elem_mpsc_queue_, elem_mpsc_queue_t, elem_t,
115791-    elem_list_t, link);
115792-
115793-static void
115794-init_elems_simple(elem_t *elems, int nelems, int thread) {
115795-	for (int i = 0; i < nelems; i++) {
115796-		elems[i].thread = thread;
115797-		elems[i].idx = i;
115798-		ql_elm_new(&elems[i], link);
115799-	}
115800-}
115801-
115802-static void
115803-check_elems_simple(elem_list_t *list, int nelems, int thread) {
115804-	elem_t *elem;
115805-	int next_idx = 0;
115806-	ql_foreach(elem, list, link) {
115807-		expect_d_lt(next_idx, nelems, "Too many list items");
115808-		expect_d_eq(thread, elem->thread, "");
115809-		expect_d_eq(next_idx, elem->idx, "List out of order");
115810-		next_idx++;
115811-	}
115812-}
115813-
115814-TEST_BEGIN(test_simple) {
115815-	enum {NELEMS = 10};
115816-	elem_t elems[NELEMS];
115817-	elem_list_t list;
115818-	elem_mpsc_queue_t queue;
115819-
115820-	/* Pop empty queue onto empty list -> empty list */
115821-	ql_new(&list);
115822-	elem_mpsc_queue_new(&queue);
115823-	elem_mpsc_queue_pop_batch(&queue, &list);
115824-	expect_true(ql_empty(&list), "");
115825-
115826-	/* Pop empty queue onto nonempty list -> list unchanged */
115827-	ql_new(&list);
115828-	elem_mpsc_queue_new(&queue);
115829-	init_elems_simple(elems, NELEMS, 0);
115830-	for (int i = 0; i < NELEMS; i++) {
115831-		ql_tail_insert(&list, &elems[i], link);
115832-	}
115833-	elem_mpsc_queue_pop_batch(&queue, &list);
115834-	check_elems_simple(&list, NELEMS, 0);
115835-
115836-	/* Pop nonempty queue onto empty list -> list takes queue contents */
115837-	ql_new(&list);
115838-	elem_mpsc_queue_new(&queue);
115839-	init_elems_simple(elems, NELEMS, 0);
115840-	for (int i = 0; i < NELEMS; i++) {
115841-		elem_mpsc_queue_push(&queue, &elems[i]);
115842-	}
115843-	elem_mpsc_queue_pop_batch(&queue, &list);
115844-	check_elems_simple(&list, NELEMS, 0);
115845-
115846-	/* Pop nonempty queue onto nonempty list -> list gains queue contents */
115847-	ql_new(&list);
115848-	elem_mpsc_queue_new(&queue);
115849-	init_elems_simple(elems, NELEMS, 0);
115850-	for (int i = 0; i < NELEMS / 2; i++) {
115851-		ql_tail_insert(&list, &elems[i], link);
115852-	}
115853-	for (int i = NELEMS / 2; i < NELEMS; i++) {
115854-		elem_mpsc_queue_push(&queue, &elems[i]);
115855-	}
115856-	elem_mpsc_queue_pop_batch(&queue, &list);
115857-	check_elems_simple(&list, NELEMS, 0);
115858-
115859-}
115860-TEST_END
115861-
115862-TEST_BEGIN(test_push_single_or_batch) {
115863-	enum {
115864-		BATCH_MAX = 10,
115865-		/*
115866-		 * We'll push i items one-at-a-time, then i items as a batch,
115867-		 * then i items as a batch again, as i ranges from 1 to
115868-		 * BATCH_MAX.  So we need 3 times the sum of the numbers from 1
115869-		 * to BATCH_MAX elements total.
115870-		 */
115871-		NELEMS = 3 * BATCH_MAX * (BATCH_MAX - 1) / 2
115872-	};
115873-	elem_t elems[NELEMS];
115874-	init_elems_simple(elems, NELEMS, 0);
115875-	elem_list_t list;
115876-	ql_new(&list);
115877-	elem_mpsc_queue_t queue;
115878-	elem_mpsc_queue_new(&queue);
115879-	int next_idx = 0;
115880-	for (int i = 1; i < 10; i++) {
115881-		/* Push i items 1 at a time. */
115882-		for (int j = 0; j < i; j++) {
115883-			elem_mpsc_queue_push(&queue, &elems[next_idx]);
115884-			next_idx++;
115885-		}
115886-		/* Push i items in batch. */
115887-		for (int j = 0; j < i; j++) {
115888-			ql_tail_insert(&list, &elems[next_idx], link);
115889-			next_idx++;
115890-		}
115891-		elem_mpsc_queue_push_batch(&queue, &list);
115892-		expect_true(ql_empty(&list), "Batch push should empty source");
115893-		/*
115894-		 * Push i items in batch, again.  This tests two batches
115895-		 * proceeding one after the other.
115896-		 */
115897-		for (int j = 0; j < i; j++) {
115898-			ql_tail_insert(&list, &elems[next_idx], link);
115899-			next_idx++;
115900-		}
115901-		elem_mpsc_queue_push_batch(&queue, &list);
115902-		expect_true(ql_empty(&list), "Batch push should empty source");
115903-	}
115904-	expect_d_eq(NELEMS, next_idx, "Miscomputed number of elems to push.");
115905-
115906-	expect_true(ql_empty(&list), "");
115907-	elem_mpsc_queue_pop_batch(&queue, &list);
115908-	check_elems_simple(&list, NELEMS, 0);
115909-}
115910-TEST_END
115911-
115912-TEST_BEGIN(test_multi_op) {
115913-	enum {NELEMS = 20};
115914-	elem_t elems[NELEMS];
115915-	init_elems_simple(elems, NELEMS, 0);
115916-	elem_list_t push_list;
115917-	ql_new(&push_list);
115918-	elem_list_t result_list;
115919-	ql_new(&result_list);
115920-	elem_mpsc_queue_t queue;
115921-	elem_mpsc_queue_new(&queue);
115922-
115923-	int next_idx = 0;
115924-	/* Push first quarter 1-at-a-time. */
115925-	for (int i = 0; i < NELEMS / 4; i++) {
115926-		elem_mpsc_queue_push(&queue, &elems[next_idx]);
115927-		next_idx++;
115928-	}
115929-	/* Push second quarter in batch. */
115930-	for (int i = NELEMS / 4; i < NELEMS / 2; i++) {
115931-		ql_tail_insert(&push_list, &elems[next_idx], link);
115932-		next_idx++;
115933-	}
115934-	elem_mpsc_queue_push_batch(&queue, &push_list);
115935-	/* Batch pop all pushed elements. */
115936-	elem_mpsc_queue_pop_batch(&queue, &result_list);
115937-	/* Push third quarter in batch. */
115938-	for (int i = NELEMS / 2; i < 3 * NELEMS / 4; i++) {
115939-		ql_tail_insert(&push_list, &elems[next_idx], link);
115940-		next_idx++;
115941-	}
115942-	elem_mpsc_queue_push_batch(&queue, &push_list);
115943-	/* Push last quarter one-at-a-time. */
115944-	for (int i = 3 * NELEMS / 4; i < NELEMS; i++) {
115945-		elem_mpsc_queue_push(&queue, &elems[next_idx]);
115946-		next_idx++;
115947-	}
115948-	/* Pop them again.  Order of existing list should be preserved. */
115949-	elem_mpsc_queue_pop_batch(&queue, &result_list);
115950-
115951-	check_elems_simple(&result_list, NELEMS, 0);
115952-
115953-}
115954-TEST_END
115955-
115956-typedef struct pusher_arg_s pusher_arg_t;
115957-struct pusher_arg_s {
115958-	elem_mpsc_queue_t *queue;
115959-	int thread;
115960-	elem_t *elems;
115961-	int nelems;
115962-};
115963-
115964-typedef struct popper_arg_s popper_arg_t;
115965-struct popper_arg_s {
115966-	elem_mpsc_queue_t *queue;
115967-	int npushers;
115968-	int nelems_per_pusher;
115969-	int *pusher_counts;
115970-};
115971-
115972-static void *
115973-thd_pusher(void *void_arg) {
115974-	pusher_arg_t *arg = (pusher_arg_t *)void_arg;
115975-	int next_idx = 0;
115976-	while (next_idx < arg->nelems) {
115977-		/* Push 10 items in batch. */
115978-		elem_list_t list;
115979-		ql_new(&list);
115980-		int limit = next_idx + 10;
115981-		while (next_idx < arg->nelems && next_idx < limit) {
115982-			ql_tail_insert(&list, &arg->elems[next_idx], link);
115983-			next_idx++;
115984-		}
115985-		elem_mpsc_queue_push_batch(arg->queue, &list);
115986-		/* Push 10 items one-at-a-time. */
115987-		limit = next_idx + 10;
115988-		while (next_idx < arg->nelems && next_idx < limit) {
115989-			elem_mpsc_queue_push(arg->queue, &arg->elems[next_idx]);
115990-			next_idx++;
115991-		}
115992-
115993-	}
115994-	return NULL;
115995-}
115996-
115997-static void *
115998-thd_popper(void *void_arg) {
115999-	popper_arg_t *arg = (popper_arg_t *)void_arg;
116000-	int done_pushers = 0;
116001-	while (done_pushers < arg->npushers) {
116002-		elem_list_t list;
116003-		ql_new(&list);
116004-		elem_mpsc_queue_pop_batch(arg->queue, &list);
116005-		elem_t *elem;
116006-		ql_foreach(elem, &list, link) {
116007-			int thread = elem->thread;
116008-			int idx = elem->idx;
116009-			expect_d_eq(arg->pusher_counts[thread], idx,
116010-			    "Thread's pushes reordered");
116011-			arg->pusher_counts[thread]++;
116012-			if (arg->pusher_counts[thread]
116013-			    == arg->nelems_per_pusher) {
116014-				done_pushers++;
116015-			}
116016-		}
116017-	}
116018-	return NULL;
116019-}
116020-
116021-TEST_BEGIN(test_multiple_threads) {
116022-	enum {
116023-		NPUSHERS = 4,
116024-		NELEMS_PER_PUSHER = 1000*1000,
116025-	};
116026-	thd_t pushers[NPUSHERS];
116027-	pusher_arg_t pusher_arg[NPUSHERS];
116028-
116029-	thd_t popper;
116030-	popper_arg_t popper_arg;
116031-
116032-	elem_mpsc_queue_t queue;
116033-	elem_mpsc_queue_new(&queue);
116034-
116035-	elem_t *elems = calloc(NPUSHERS * NELEMS_PER_PUSHER, sizeof(elem_t));
116036-	elem_t *elem_iter = elems;
116037-	for (int i = 0; i < NPUSHERS; i++) {
116038-		pusher_arg[i].queue = &queue;
116039-		pusher_arg[i].thread = i;
116040-		pusher_arg[i].elems = elem_iter;
116041-		pusher_arg[i].nelems = NELEMS_PER_PUSHER;
116042-
116043-		init_elems_simple(elem_iter, NELEMS_PER_PUSHER, i);
116044-		elem_iter += NELEMS_PER_PUSHER;
116045-	}
116046-	popper_arg.queue = &queue;
116047-	popper_arg.npushers = NPUSHERS;
116048-	popper_arg.nelems_per_pusher = NELEMS_PER_PUSHER;
116049-	int pusher_counts[NPUSHERS] = {0};
116050-	popper_arg.pusher_counts = pusher_counts;
116051-
116052-	thd_create(&popper, thd_popper, (void *)&popper_arg);
116053-	for (int i = 0; i < NPUSHERS; i++) {
116054-		thd_create(&pushers[i], thd_pusher, &pusher_arg[i]);
116055-	}
116056-
116057-	thd_join(popper, NULL);
116058-	for (int i = 0; i < NPUSHERS; i++) {
116059-		thd_join(pushers[i], NULL);
116060-	}
116061-
116062-	for (int i = 0; i < NPUSHERS; i++) {
116063-		expect_d_eq(NELEMS_PER_PUSHER, pusher_counts[i], "");
116064-	}
116065-
116066-	free(elems);
116067-}
116068-TEST_END
116069-
116070-int
116071-main(void) {
116072-	return test_no_reentrancy(
116073-	    test_simple,
116074-	    test_push_single_or_batch,
116075-	    test_multi_op,
116076-	    test_multiple_threads);
116077-}
116078diff --git a/jemalloc/test/unit/mq.c b/jemalloc/test/unit/mq.c
116079deleted file mode 100644
116080index f833f77..0000000
116081--- a/jemalloc/test/unit/mq.c
116082+++ /dev/null
116083@@ -1,89 +0,0 @@
116084-#include "test/jemalloc_test.h"
116085-
116086-#define NSENDERS	3
116087-#define NMSGS		100000
116088-
116089-typedef struct mq_msg_s mq_msg_t;
116090-struct mq_msg_s {
116091-	mq_msg(mq_msg_t)	link;
116092-};
116093-mq_gen(static, mq_, mq_t, mq_msg_t, link)
116094-
116095-TEST_BEGIN(test_mq_basic) {
116096-	mq_t mq;
116097-	mq_msg_t msg;
116098-
116099-	expect_false(mq_init(&mq), "Unexpected mq_init() failure");
116100-	expect_u_eq(mq_count(&mq), 0, "mq should be empty");
116101-	expect_ptr_null(mq_tryget(&mq),
116102-	    "mq_tryget() should fail when the queue is empty");
116103-
116104-	mq_put(&mq, &msg);
116105-	expect_u_eq(mq_count(&mq), 1, "mq should contain one message");
116106-	expect_ptr_eq(mq_tryget(&mq), &msg, "mq_tryget() should return msg");
116107-
116108-	mq_put(&mq, &msg);
116109-	expect_ptr_eq(mq_get(&mq), &msg, "mq_get() should return msg");
116110-
116111-	mq_fini(&mq);
116112-}
116113-TEST_END
116114-
116115-static void *
116116-thd_receiver_start(void *arg) {
116117-	mq_t *mq = (mq_t *)arg;
116118-	unsigned i;
116119-
116120-	for (i = 0; i < (NSENDERS * NMSGS); i++) {
116121-		mq_msg_t *msg = mq_get(mq);
116122-		expect_ptr_not_null(msg, "mq_get() should never return NULL");
116123-		dallocx(msg, 0);
116124-	}
116125-	return NULL;
116126-}
116127-
116128-static void *
116129-thd_sender_start(void *arg) {
116130-	mq_t *mq = (mq_t *)arg;
116131-	unsigned i;
116132-
116133-	for (i = 0; i < NMSGS; i++) {
116134-		mq_msg_t *msg;
116135-		void *p;
116136-		p = mallocx(sizeof(mq_msg_t), 0);
116137-		expect_ptr_not_null(p, "Unexpected mallocx() failure");
116138-		msg = (mq_msg_t *)p;
116139-		mq_put(mq, msg);
116140-	}
116141-	return NULL;
116142-}
116143-
116144-TEST_BEGIN(test_mq_threaded) {
116145-	mq_t mq;
116146-	thd_t receiver;
116147-	thd_t senders[NSENDERS];
116148-	unsigned i;
116149-
116150-	expect_false(mq_init(&mq), "Unexpected mq_init() failure");
116151-
116152-	thd_create(&receiver, thd_receiver_start, (void *)&mq);
116153-	for (i = 0; i < NSENDERS; i++) {
116154-		thd_create(&senders[i], thd_sender_start, (void *)&mq);
116155-	}
116156-
116157-	thd_join(receiver, NULL);
116158-	for (i = 0; i < NSENDERS; i++) {
116159-		thd_join(senders[i], NULL);
116160-	}
116161-
116162-	mq_fini(&mq);
116163-}
116164-TEST_END
116165-
116166-int
116167-main(void) {
116168-	return test(
116169-	    test_mq_basic,
116170-	    test_mq_threaded);
116171-}
116172-
116173diff --git a/jemalloc/test/unit/mtx.c b/jemalloc/test/unit/mtx.c
116174deleted file mode 100644
116175index 4aeebc1..0000000
116176--- a/jemalloc/test/unit/mtx.c
116177+++ /dev/null
116178@@ -1,57 +0,0 @@
116179-#include "test/jemalloc_test.h"
116180-
116181-#define NTHREADS	2
116182-#define NINCRS		2000000
116183-
116184-TEST_BEGIN(test_mtx_basic) {
116185-	mtx_t mtx;
116186-
116187-	expect_false(mtx_init(&mtx), "Unexpected mtx_init() failure");
116188-	mtx_lock(&mtx);
116189-	mtx_unlock(&mtx);
116190-	mtx_fini(&mtx);
116191-}
116192-TEST_END
116193-
116194-typedef struct {
116195-	mtx_t		mtx;
116196-	unsigned	x;
116197-} thd_start_arg_t;
116198-
116199-static void *
116200-thd_start(void *varg) {
116201-	thd_start_arg_t *arg = (thd_start_arg_t *)varg;
116202-	unsigned i;
116203-
116204-	for (i = 0; i < NINCRS; i++) {
116205-		mtx_lock(&arg->mtx);
116206-		arg->x++;
116207-		mtx_unlock(&arg->mtx);
116208-	}
116209-	return NULL;
116210-}
116211-
116212-TEST_BEGIN(test_mtx_race) {
116213-	thd_start_arg_t arg;
116214-	thd_t thds[NTHREADS];
116215-	unsigned i;
116216-
116217-	expect_false(mtx_init(&arg.mtx), "Unexpected mtx_init() failure");
116218-	arg.x = 0;
116219-	for (i = 0; i < NTHREADS; i++) {
116220-		thd_create(&thds[i], thd_start, (void *)&arg);
116221-	}
116222-	for (i = 0; i < NTHREADS; i++) {
116223-		thd_join(thds[i], NULL);
116224-	}
116225-	expect_u_eq(arg.x, NTHREADS * NINCRS,
116226-	    "Race-related counter corruption");
116227-}
116228-TEST_END
116229-
116230-int
116231-main(void) {
116232-	return test(
116233-	    test_mtx_basic,
116234-	    test_mtx_race);
116235-}
116236diff --git a/jemalloc/test/unit/nstime.c b/jemalloc/test/unit/nstime.c
116237deleted file mode 100644
116238index 56238ab..0000000
116239--- a/jemalloc/test/unit/nstime.c
116240+++ /dev/null
116241@@ -1,252 +0,0 @@
116242-#include "test/jemalloc_test.h"
116243-
116244-#define BILLION	UINT64_C(1000000000)
116245-
116246-TEST_BEGIN(test_nstime_init) {
116247-	nstime_t nst;
116248-
116249-	nstime_init(&nst, 42000000043);
116250-	expect_u64_eq(nstime_ns(&nst), 42000000043, "ns incorrectly read");
116251-	expect_u64_eq(nstime_sec(&nst), 42, "sec incorrectly read");
116252-	expect_u64_eq(nstime_nsec(&nst), 43, "nsec incorrectly read");
116253-}
116254-TEST_END
116255-
116256-TEST_BEGIN(test_nstime_init2) {
116257-	nstime_t nst;
116258-
116259-	nstime_init2(&nst, 42, 43);
116260-	expect_u64_eq(nstime_sec(&nst), 42, "sec incorrectly read");
116261-	expect_u64_eq(nstime_nsec(&nst), 43, "nsec incorrectly read");
116262-}
116263-TEST_END
116264-
116265-TEST_BEGIN(test_nstime_copy) {
116266-	nstime_t nsta, nstb;
116267-
116268-	nstime_init2(&nsta, 42, 43);
116269-	nstime_init_zero(&nstb);
116270-	nstime_copy(&nstb, &nsta);
116271-	expect_u64_eq(nstime_sec(&nstb), 42, "sec incorrectly copied");
116272-	expect_u64_eq(nstime_nsec(&nstb), 43, "nsec incorrectly copied");
116273-}
116274-TEST_END
116275-
116276-TEST_BEGIN(test_nstime_compare) {
116277-	nstime_t nsta, nstb;
116278-
116279-	nstime_init2(&nsta, 42, 43);
116280-	nstime_copy(&nstb, &nsta);
116281-	expect_d_eq(nstime_compare(&nsta, &nstb), 0, "Times should be equal");
116282-	expect_d_eq(nstime_compare(&nstb, &nsta), 0, "Times should be equal");
116283-
116284-	nstime_init2(&nstb, 42, 42);
116285-	expect_d_eq(nstime_compare(&nsta, &nstb), 1,
116286-	    "nsta should be greater than nstb");
116287-	expect_d_eq(nstime_compare(&nstb, &nsta), -1,
116288-	    "nstb should be less than nsta");
116289-
116290-	nstime_init2(&nstb, 42, 44);
116291-	expect_d_eq(nstime_compare(&nsta, &nstb), -1,
116292-	    "nsta should be less than nstb");
116293-	expect_d_eq(nstime_compare(&nstb, &nsta), 1,
116294-	    "nstb should be greater than nsta");
116295-
116296-	nstime_init2(&nstb, 41, BILLION - 1);
116297-	expect_d_eq(nstime_compare(&nsta, &nstb), 1,
116298-	    "nsta should be greater than nstb");
116299-	expect_d_eq(nstime_compare(&nstb, &nsta), -1,
116300-	    "nstb should be less than nsta");
116301-
116302-	nstime_init2(&nstb, 43, 0);
116303-	expect_d_eq(nstime_compare(&nsta, &nstb), -1,
116304-	    "nsta should be less than nstb");
116305-	expect_d_eq(nstime_compare(&nstb, &nsta), 1,
116306-	    "nstb should be greater than nsta");
116307-}
116308-TEST_END
116309-
116310-TEST_BEGIN(test_nstime_add) {
116311-	nstime_t nsta, nstb;
116312-
116313-	nstime_init2(&nsta, 42, 43);
116314-	nstime_copy(&nstb, &nsta);
116315-	nstime_add(&nsta, &nstb);
116316-	nstime_init2(&nstb, 84, 86);
116317-	expect_d_eq(nstime_compare(&nsta, &nstb), 0,
116318-	    "Incorrect addition result");
116319-
116320-	nstime_init2(&nsta, 42, BILLION - 1);
116321-	nstime_copy(&nstb, &nsta);
116322-	nstime_add(&nsta, &nstb);
116323-	nstime_init2(&nstb, 85, BILLION - 2);
116324-	expect_d_eq(nstime_compare(&nsta, &nstb), 0,
116325-	    "Incorrect addition result");
116326-}
116327-TEST_END
116328-
116329-TEST_BEGIN(test_nstime_iadd) {
116330-	nstime_t nsta, nstb;
116331-
116332-	nstime_init2(&nsta, 42, BILLION - 1);
116333-	nstime_iadd(&nsta, 1);
116334-	nstime_init2(&nstb, 43, 0);
116335-	expect_d_eq(nstime_compare(&nsta, &nstb), 0,
116336-	    "Incorrect addition result");
116337-
116338-	nstime_init2(&nsta, 42, 1);
116339-	nstime_iadd(&nsta, BILLION + 1);
116340-	nstime_init2(&nstb, 43, 2);
116341-	expect_d_eq(nstime_compare(&nsta, &nstb), 0,
116342-	    "Incorrect addition result");
116343-}
116344-TEST_END
116345-
116346-TEST_BEGIN(test_nstime_subtract) {
116347-	nstime_t nsta, nstb;
116348-
116349-	nstime_init2(&nsta, 42, 43);
116350-	nstime_copy(&nstb, &nsta);
116351-	nstime_subtract(&nsta, &nstb);
116352-	nstime_init_zero(&nstb);
116353-	expect_d_eq(nstime_compare(&nsta, &nstb), 0,
116354-	    "Incorrect subtraction result");
116355-
116356-	nstime_init2(&nsta, 42, 43);
116357-	nstime_init2(&nstb, 41, 44);
116358-	nstime_subtract(&nsta, &nstb);
116359-	nstime_init2(&nstb, 0, BILLION - 1);
116360-	expect_d_eq(nstime_compare(&nsta, &nstb), 0,
116361-	    "Incorrect subtraction result");
116362-}
116363-TEST_END
116364-
116365-TEST_BEGIN(test_nstime_isubtract) {
116366-	nstime_t nsta, nstb;
116367-
116368-	nstime_init2(&nsta, 42, 43);
116369-	nstime_isubtract(&nsta, 42*BILLION + 43);
116370-	nstime_init_zero(&nstb);
116371-	expect_d_eq(nstime_compare(&nsta, &nstb), 0,
116372-	    "Incorrect subtraction result");
116373-
116374-	nstime_init2(&nsta, 42, 43);
116375-	nstime_isubtract(&nsta, 41*BILLION + 44);
116376-	nstime_init2(&nstb, 0, BILLION - 1);
116377-	expect_d_eq(nstime_compare(&nsta, &nstb), 0,
116378-	    "Incorrect subtraction result");
116379-}
116380-TEST_END
116381-
116382-TEST_BEGIN(test_nstime_imultiply) {
116383-	nstime_t nsta, nstb;
116384-
116385-	nstime_init2(&nsta, 42, 43);
116386-	nstime_imultiply(&nsta, 10);
116387-	nstime_init2(&nstb, 420, 430);
116388-	expect_d_eq(nstime_compare(&nsta, &nstb), 0,
116389-	    "Incorrect multiplication result");
116390-
116391-	nstime_init2(&nsta, 42, 666666666);
116392-	nstime_imultiply(&nsta, 3);
116393-	nstime_init2(&nstb, 127, 999999998);
116394-	expect_d_eq(nstime_compare(&nsta, &nstb), 0,
116395-	    "Incorrect multiplication result");
116396-}
116397-TEST_END
116398-
116399-TEST_BEGIN(test_nstime_idivide) {
116400-	nstime_t nsta, nstb;
116401-
116402-	nstime_init2(&nsta, 42, 43);
116403-	nstime_copy(&nstb, &nsta);
116404-	nstime_imultiply(&nsta, 10);
116405-	nstime_idivide(&nsta, 10);
116406-	expect_d_eq(nstime_compare(&nsta, &nstb), 0,
116407-	    "Incorrect division result");
116408-
116409-	nstime_init2(&nsta, 42, 666666666);
116410-	nstime_copy(&nstb, &nsta);
116411-	nstime_imultiply(&nsta, 3);
116412-	nstime_idivide(&nsta, 3);
116413-	expect_d_eq(nstime_compare(&nsta, &nstb), 0,
116414-	    "Incorrect division result");
116415-}
116416-TEST_END
116417-
116418-TEST_BEGIN(test_nstime_divide) {
116419-	nstime_t nsta, nstb, nstc;
116420-
116421-	nstime_init2(&nsta, 42, 43);
116422-	nstime_copy(&nstb, &nsta);
116423-	nstime_imultiply(&nsta, 10);
116424-	expect_u64_eq(nstime_divide(&nsta, &nstb), 10,
116425-	    "Incorrect division result");
116426-
116427-	nstime_init2(&nsta, 42, 43);
116428-	nstime_copy(&nstb, &nsta);
116429-	nstime_imultiply(&nsta, 10);
116430-	nstime_init(&nstc, 1);
116431-	nstime_add(&nsta, &nstc);
116432-	expect_u64_eq(nstime_divide(&nsta, &nstb), 10,
116433-	    "Incorrect division result");
116434-
116435-	nstime_init2(&nsta, 42, 43);
116436-	nstime_copy(&nstb, &nsta);
116437-	nstime_imultiply(&nsta, 10);
116438-	nstime_init(&nstc, 1);
116439-	nstime_subtract(&nsta, &nstc);
116440-	expect_u64_eq(nstime_divide(&nsta, &nstb), 9,
116441-	    "Incorrect division result");
116442-}
116443-TEST_END
116444-
116445-void
116446-test_nstime_since_once(nstime_t *t) {
116447-	nstime_t old_t;
116448-	nstime_copy(&old_t, t);
116449-
116450-	uint64_t ns_since = nstime_ns_since(t);
116451-	nstime_update(t);
116452-
116453-	nstime_t new_t;
116454-	nstime_copy(&new_t, t);
116455-	nstime_subtract(&new_t, &old_t);
116456-
116457-	expect_u64_ge(nstime_ns(&new_t), ns_since,
116458-	    "Incorrect time since result");
116459-}
116460-
116461-TEST_BEGIN(test_nstime_ns_since) {
116462-	nstime_t t;
116463-
116464-	nstime_init_update(&t);
116465-	for (uint64_t i = 0; i < 10000; i++) {
116466-		/* Keeps updating t and verifies ns_since is valid. */
116467-		test_nstime_since_once(&t);
116468-	}
116469-}
116470-TEST_END
116471-
116472-TEST_BEGIN(test_nstime_monotonic) {
116473-	nstime_monotonic();
116474-}
116475-TEST_END
116476-
116477-int
116478-main(void) {
116479-	return test(
116480-	    test_nstime_init,
116481-	    test_nstime_init2,
116482-	    test_nstime_copy,
116483-	    test_nstime_compare,
116484-	    test_nstime_add,
116485-	    test_nstime_iadd,
116486-	    test_nstime_subtract,
116487-	    test_nstime_isubtract,
116488-	    test_nstime_imultiply,
116489-	    test_nstime_idivide,
116490-	    test_nstime_divide,
116491-	    test_nstime_ns_since,
116492-	    test_nstime_monotonic);
116493-}
116494diff --git a/jemalloc/test/unit/oversize_threshold.c b/jemalloc/test/unit/oversize_threshold.c
116495deleted file mode 100644
116496index 44a8f76..0000000
116497--- a/jemalloc/test/unit/oversize_threshold.c
116498+++ /dev/null
116499@@ -1,133 +0,0 @@
116500-#include "test/jemalloc_test.h"
116501-
116502-#include "jemalloc/internal/ctl.h"
116503-
116504-static void
116505-arena_mallctl(const char *mallctl_str, unsigned arena, void *oldp,
116506-    size_t *oldlen, void *newp, size_t newlen) {
116507-	int err;
116508-	char buf[100];
116509-	malloc_snprintf(buf, sizeof(buf), mallctl_str, arena);
116510-
116511-	err = mallctl(buf, oldp, oldlen, newp, newlen);
116512-	expect_d_eq(0, err, "Mallctl failed; %s", buf);
116513-}
116514-
116515-TEST_BEGIN(test_oversize_threshold_get_set) {
116516-	int err;
116517-	size_t old_threshold;
116518-	size_t new_threshold;
116519-	size_t threshold_sz = sizeof(old_threshold);
116520-
116521-	unsigned arena;
116522-	size_t arena_sz = sizeof(arena);
116523-	err = mallctl("arenas.create", (void *)&arena, &arena_sz, NULL, 0);
116524-	expect_d_eq(0, err, "Arena creation failed");
116525-
116526-	/* Just a write. */
116527-	new_threshold = 1024 * 1024;
116528-	arena_mallctl("arena.%u.oversize_threshold", arena, NULL, NULL,
116529-	    &new_threshold, threshold_sz);
116530-
116531-	/* Read and write */
116532-	new_threshold = 2 * 1024 * 1024;
116533-	arena_mallctl("arena.%u.oversize_threshold", arena, &old_threshold,
116534-	    &threshold_sz, &new_threshold, threshold_sz);
116535-	expect_zu_eq(1024 * 1024, old_threshold, "Should have read old value");
116536-
116537-	/* Just a read */
116538-	arena_mallctl("arena.%u.oversize_threshold", arena, &old_threshold,
116539-	    &threshold_sz, NULL, 0);
116540-	expect_zu_eq(2 * 1024 * 1024, old_threshold, "Should have read old value");
116541-}
116542-TEST_END
116543-
116544-static size_t max_purged = 0;
116545-static bool
116546-purge_forced_record_max(extent_hooks_t* hooks, void *addr, size_t sz,
116547-    size_t offset, size_t length, unsigned arena_ind) {
116548-	if (length > max_purged) {
116549-		max_purged = length;
116550-	}
116551-	return false;
116552-}
116553-
116554-static bool
116555-dalloc_record_max(extent_hooks_t *extent_hooks, void *addr, size_t sz,
116556-    bool comitted, unsigned arena_ind) {
116557-	if (sz > max_purged) {
116558-		max_purged = sz;
116559-	}
116560-	return false;
116561-}
116562-
116563-extent_hooks_t max_recording_extent_hooks;
116564-
116565-TEST_BEGIN(test_oversize_threshold) {
116566-	max_recording_extent_hooks = ehooks_default_extent_hooks;
116567-	max_recording_extent_hooks.purge_forced = &purge_forced_record_max;
116568-	max_recording_extent_hooks.dalloc = &dalloc_record_max;
116569-
116570-	extent_hooks_t *extent_hooks = &max_recording_extent_hooks;
116571-
116572-	int err;
116573-
116574-	unsigned arena;
116575-	size_t arena_sz = sizeof(arena);
116576-	err = mallctl("arenas.create", (void *)&arena, &arena_sz, NULL, 0);
116577-	expect_d_eq(0, err, "Arena creation failed");
116578-	arena_mallctl("arena.%u.extent_hooks", arena, NULL, NULL, &extent_hooks,
116579-	    sizeof(extent_hooks));
116580-
116581-	/*
116582-	 * This test will fundamentally race with purging, since we're going to
116583-	 * check the dirty stats to see if our oversized allocation got purged.
116584-	 * We don't want other purging to happen accidentally.  We can't just
116585-	 * disable purging entirely, though, since that will also disable
116586-	 * oversize purging.  Just set purging intervals to be very large.
116587-	 */
116588-	ssize_t decay_ms = 100 * 1000;
116589-	ssize_t decay_ms_sz = sizeof(decay_ms);
116590-	arena_mallctl("arena.%u.dirty_decay_ms", arena, NULL, NULL, &decay_ms,
116591-	    decay_ms_sz);
116592-	arena_mallctl("arena.%u.muzzy_decay_ms", arena, NULL, NULL, &decay_ms,
116593-	    decay_ms_sz);
116594-
116595-	/* Clean everything out. */
116596-	arena_mallctl("arena.%u.purge", arena, NULL, NULL, NULL, 0);
116597-	max_purged = 0;
116598-
116599-	/* Set threshold to 1MB. */
116600-	size_t threshold = 1024 * 1024;
116601-	size_t threshold_sz = sizeof(threshold);
116602-	arena_mallctl("arena.%u.oversize_threshold", arena, NULL, NULL,
116603-	    &threshold, threshold_sz);
116604-
116605-	/* Allocating and freeing half a megabyte should leave them dirty. */
116606-	void *ptr = mallocx(512 * 1024, MALLOCX_ARENA(arena));
116607-	dallocx(ptr, MALLOCX_TCACHE_NONE);
116608-	if (!is_background_thread_enabled()) {
116609-		expect_zu_lt(max_purged, 512 * 1024, "Expected no 512k purge");
116610-	}
116611-
116612-	/* Purge again to reset everything out. */
116613-	arena_mallctl("arena.%u.purge", arena, NULL, NULL, NULL, 0);
116614-	max_purged = 0;
116615-
116616-	/*
116617-	 * Allocating and freeing 2 megabytes should have them purged because of
116618-	 * the oversize threshold.
116619-	 */
116620-	ptr = mallocx(2 * 1024 * 1024, MALLOCX_ARENA(arena));
116621-	dallocx(ptr, MALLOCX_TCACHE_NONE);
116622-	expect_zu_ge(max_purged, 2 * 1024 * 1024, "Expected a 2MB purge");
116623-}
116624-TEST_END
116625-
116626-int
116627-main(void) {
116628-	return test_no_reentrancy(
116629-	    test_oversize_threshold_get_set,
116630-	    test_oversize_threshold);
116631-}
116632-
116633diff --git a/jemalloc/test/unit/pa.c b/jemalloc/test/unit/pa.c
116634deleted file mode 100644
116635index b1e2f6e..0000000
116636--- a/jemalloc/test/unit/pa.c
116637+++ /dev/null
116638@@ -1,126 +0,0 @@
116639-#include "test/jemalloc_test.h"
116640-
116641-#include "jemalloc/internal/pa.h"
116642-
116643-static void *
116644-alloc_hook(extent_hooks_t *extent_hooks, void *new_addr, size_t size,
116645-    size_t alignment, bool *zero, bool *commit, unsigned arena_ind) {
116646-	void *ret = pages_map(new_addr, size, alignment, commit);
116647-	return ret;
116648-}
116649-
116650-static bool
116651-merge_hook(extent_hooks_t *extent_hooks, void *addr_a, size_t size_a,
116652-    void *addr_b, size_t size_b, bool committed, unsigned arena_ind) {
116653-	return !maps_coalesce;
116654-}
116655-
116656-static bool
116657-split_hook(extent_hooks_t *extent_hooks, void *addr, size_t size,
116658-    size_t size_a, size_t size_b, bool committed, unsigned arena_ind) {
116659-	return !maps_coalesce;
116660-}
116661-
116662-static void
116663-init_test_extent_hooks(extent_hooks_t *hooks) {
116664-	/*
116665-	 * The default hooks are mostly fine for testing.  A few of them,
116666-	 * though, access globals (alloc for dss setting in an arena, split and
116667-	 * merge touch the global emap to find head state.  The first of these
116668-	 * can be fixed by keeping that state with the hooks, where it logically
116669-	 * belongs.  The second, though, we can only fix when we use the extent
116670-	 * hook API.
116671-	 */
116672-	memcpy(hooks, &ehooks_default_extent_hooks, sizeof(extent_hooks_t));
116673-	hooks->alloc = &alloc_hook;
116674-	hooks->merge = &merge_hook;
116675-	hooks->split = &split_hook;
116676-}
116677-
116678-typedef struct test_data_s test_data_t;
116679-struct test_data_s {
116680-	pa_shard_t shard;
116681-	pa_central_t central;
116682-	base_t *base;
116683-	emap_t emap;
116684-	pa_shard_stats_t stats;
116685-	malloc_mutex_t stats_mtx;
116686-	extent_hooks_t hooks;
116687-};
116688-
116689-test_data_t *init_test_data(ssize_t dirty_decay_ms, ssize_t muzzy_decay_ms) {
116690-	test_data_t *test_data = calloc(1, sizeof(test_data_t));
116691-	assert_ptr_not_null(test_data, "");
116692-	init_test_extent_hooks(&test_data->hooks);
116693-
116694-	base_t *base = base_new(TSDN_NULL, /* ind */ 1, &test_data->hooks,
116695-	    /* metadata_use_hooks */ true);
116696-	assert_ptr_not_null(base, "");
116697-
116698-	test_data->base = base;
116699-	bool err = emap_init(&test_data->emap, test_data->base,
116700-	    /* zeroed */ true);
116701-	assert_false(err, "");
116702-
116703-	nstime_t time;
116704-	nstime_init(&time, 0);
116705-
116706-	err = pa_central_init(&test_data->central, base, opt_hpa,
116707-	    &hpa_hooks_default);
116708-	assert_false(err, "");
116709-
116710-	const size_t pa_oversize_threshold = 8 * 1024 * 1024;
116711-	err = pa_shard_init(TSDN_NULL, &test_data->shard, &test_data->central,
116712-	    &test_data->emap, test_data->base, /* ind */ 1, &test_data->stats,
116713-	    &test_data->stats_mtx, &time, pa_oversize_threshold, dirty_decay_ms,
116714-	    muzzy_decay_ms);
116715-	assert_false(err, "");
116716-
116717-	return test_data;
116718-}
116719-
116720-void destroy_test_data(test_data_t *data) {
116721-	base_delete(TSDN_NULL, data->base);
116722-	free(data);
116723-}
116724-
116725-static void *
116726-do_alloc_free_purge(void *arg) {
116727-	test_data_t *test_data = (test_data_t *)arg;
116728-	for (int i = 0; i < 10 * 1000; i++) {
116729-		bool deferred_work_generated = false;
116730-		edata_t *edata = pa_alloc(TSDN_NULL, &test_data->shard, PAGE,
116731-		    PAGE, /* slab */ false, /* szind */ 0, /* zero */ false,
116732-		    /* guarded */ false, &deferred_work_generated);
116733-		assert_ptr_not_null(edata, "");
116734-		pa_dalloc(TSDN_NULL, &test_data->shard, edata,
116735-		    &deferred_work_generated);
116736-		malloc_mutex_lock(TSDN_NULL,
116737-		    &test_data->shard.pac.decay_dirty.mtx);
116738-		pac_decay_all(TSDN_NULL, &test_data->shard.pac,
116739-		    &test_data->shard.pac.decay_dirty,
116740-		    &test_data->shard.pac.stats->decay_dirty,
116741-		    &test_data->shard.pac.ecache_dirty, true);
116742-		malloc_mutex_unlock(TSDN_NULL,
116743-		    &test_data->shard.pac.decay_dirty.mtx);
116744-	}
116745-	return NULL;
116746-}
116747-
116748-TEST_BEGIN(test_alloc_free_purge_thds) {
116749-	test_data_t *test_data = init_test_data(0, 0);
116750-	thd_t thds[4];
116751-	for (int i = 0; i < 4; i++) {
116752-		thd_create(&thds[i], do_alloc_free_purge, test_data);
116753-	}
116754-	for (int i = 0; i < 4; i++) {
116755-		thd_join(thds[i], NULL);
116756-	}
116757-}
116758-TEST_END
116759-
116760-int
116761-main(void) {
116762-	return test(
116763-	    test_alloc_free_purge_thds);
116764-}
116765diff --git a/jemalloc/test/unit/pack.c b/jemalloc/test/unit/pack.c
116766deleted file mode 100644
116767index e639282..0000000
116768--- a/jemalloc/test/unit/pack.c
116769+++ /dev/null
116770@@ -1,166 +0,0 @@
116771-#include "test/jemalloc_test.h"
116772-
116773-/*
116774- * Size class that is a divisor of the page size, ideally 4+ regions per run.
116775- */
116776-#if LG_PAGE <= 14
116777-#define SZ	(ZU(1) << (LG_PAGE - 2))
116778-#else
116779-#define SZ	ZU(4096)
116780-#endif
116781-
116782-/*
116783- * Number of slabs to consume at high water mark.  Should be at least 2 so that
116784- * if mmap()ed memory grows downward, downward growth of mmap()ed memory is
116785- * tested.
116786- */
116787-#define NSLABS	8
116788-
116789-static unsigned
116790-binind_compute(void) {
116791-	size_t sz;
116792-	unsigned nbins, i;
116793-
116794-	sz = sizeof(nbins);
116795-	expect_d_eq(mallctl("arenas.nbins", (void *)&nbins, &sz, NULL, 0), 0,
116796-	    "Unexpected mallctl failure");
116797-
116798-	for (i = 0; i < nbins; i++) {
116799-		size_t mib[4];
116800-		size_t miblen = sizeof(mib)/sizeof(size_t);
116801-		size_t size;
116802-
116803-		expect_d_eq(mallctlnametomib("arenas.bin.0.size", mib,
116804-		    &miblen), 0, "Unexpected mallctlnametomb failure");
116805-		mib[2] = (size_t)i;
116806-
116807-		sz = sizeof(size);
116808-		expect_d_eq(mallctlbymib(mib, miblen, (void *)&size, &sz, NULL,
116809-		    0), 0, "Unexpected mallctlbymib failure");
116810-		if (size == SZ) {
116811-			return i;
116812-		}
116813-	}
116814-
116815-	test_fail("Unable to compute nregs_per_run");
116816-	return 0;
116817-}
116818-
116819-static size_t
116820-nregs_per_run_compute(void) {
116821-	uint32_t nregs;
116822-	size_t sz;
116823-	unsigned binind = binind_compute();
116824-	size_t mib[4];
116825-	size_t miblen = sizeof(mib)/sizeof(size_t);
116826-
116827-	expect_d_eq(mallctlnametomib("arenas.bin.0.nregs", mib, &miblen), 0,
116828-	    "Unexpected mallctlnametomb failure");
116829-	mib[2] = (size_t)binind;
116830-	sz = sizeof(nregs);
116831-	expect_d_eq(mallctlbymib(mib, miblen, (void *)&nregs, &sz, NULL,
116832-	    0), 0, "Unexpected mallctlbymib failure");
116833-	return nregs;
116834-}
116835-
116836-static unsigned
116837-arenas_create_mallctl(void) {
116838-	unsigned arena_ind;
116839-	size_t sz;
116840-
116841-	sz = sizeof(arena_ind);
116842-	expect_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz, NULL, 0),
116843-	    0, "Error in arenas.create");
116844-
116845-	return arena_ind;
116846-}
116847-
116848-static void
116849-arena_reset_mallctl(unsigned arena_ind) {
116850-	size_t mib[3];
116851-	size_t miblen = sizeof(mib)/sizeof(size_t);
116852-
116853-	expect_d_eq(mallctlnametomib("arena.0.reset", mib, &miblen), 0,
116854-	    "Unexpected mallctlnametomib() failure");
116855-	mib[1] = (size_t)arena_ind;
116856-	expect_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0,
116857-	    "Unexpected mallctlbymib() failure");
116858-}
116859-
116860-TEST_BEGIN(test_pack) {
116861-	bool prof_enabled;
116862-	size_t sz = sizeof(prof_enabled);
116863-	if (mallctl("opt.prof", (void *)&prof_enabled, &sz, NULL, 0) == 0) {
116864-		test_skip_if(prof_enabled);
116865-	}
116866-
116867-	unsigned arena_ind = arenas_create_mallctl();
116868-	size_t nregs_per_run = nregs_per_run_compute();
116869-	size_t nregs = nregs_per_run * NSLABS;
116870-	VARIABLE_ARRAY(void *, ptrs, nregs);
116871-	size_t i, j, offset;
116872-
116873-	/* Fill matrix. */
116874-	for (i = offset = 0; i < NSLABS; i++) {
116875-		for (j = 0; j < nregs_per_run; j++) {
116876-			void *p = mallocx(SZ, MALLOCX_ARENA(arena_ind) |
116877-			    MALLOCX_TCACHE_NONE);
116878-			expect_ptr_not_null(p,
116879-			    "Unexpected mallocx(%zu, MALLOCX_ARENA(%u) |"
116880-			    " MALLOCX_TCACHE_NONE) failure, run=%zu, reg=%zu",
116881-			    SZ, arena_ind, i, j);
116882-			ptrs[(i * nregs_per_run) + j] = p;
116883-		}
116884-	}
116885-
116886-	/*
116887-	 * Free all but one region of each run, but rotate which region is
116888-	 * preserved, so that subsequent allocations exercise the within-run
116889-	 * layout policy.
116890-	 */
116891-	offset = 0;
116892-	for (i = offset = 0;
116893-	    i < NSLABS;
116894-	    i++, offset = (offset + 1) % nregs_per_run) {
116895-		for (j = 0; j < nregs_per_run; j++) {
116896-			void *p = ptrs[(i * nregs_per_run) + j];
116897-			if (offset == j) {
116898-				continue;
116899-			}
116900-			dallocx(p, MALLOCX_ARENA(arena_ind) |
116901-			    MALLOCX_TCACHE_NONE);
116902-		}
116903-	}
116904-
116905-	/*
116906-	 * Logically refill matrix, skipping preserved regions and verifying
116907-	 * that the matrix is unmodified.
116908-	 */
116909-	offset = 0;
116910-	for (i = offset = 0;
116911-	    i < NSLABS;
116912-	    i++, offset = (offset + 1) % nregs_per_run) {
116913-		for (j = 0; j < nregs_per_run; j++) {
116914-			void *p;
116915-
116916-			if (offset == j) {
116917-				continue;
116918-			}
116919-			p = mallocx(SZ, MALLOCX_ARENA(arena_ind) |
116920-			    MALLOCX_TCACHE_NONE);
116921-			expect_ptr_eq(p, ptrs[(i * nregs_per_run) + j],
116922-			    "Unexpected refill discrepancy, run=%zu, reg=%zu\n",
116923-			    i, j);
116924-		}
116925-	}
116926-
116927-	/* Clean up. */
116928-	arena_reset_mallctl(arena_ind);
116929-}
116930-TEST_END
116931-
116932-int
116933-main(void) {
116934-	return test(
116935-	    test_pack);
116936-}
116937diff --git a/jemalloc/test/unit/pack.sh b/jemalloc/test/unit/pack.sh
116938deleted file mode 100644
116939index 6f45148..0000000
116940--- a/jemalloc/test/unit/pack.sh
116941+++ /dev/null
116942@@ -1,4 +0,0 @@
116943-#!/bin/sh
116944-
116945-# Immediately purge to minimize fragmentation.
116946-export MALLOC_CONF="dirty_decay_ms:0,muzzy_decay_ms:0"
116947diff --git a/jemalloc/test/unit/pages.c b/jemalloc/test/unit/pages.c
116948deleted file mode 100644
116949index 8dfd1a7..0000000
116950--- a/jemalloc/test/unit/pages.c
116951+++ /dev/null
116952@@ -1,29 +0,0 @@
116953-#include "test/jemalloc_test.h"
116954-
116955-TEST_BEGIN(test_pages_huge) {
116956-	size_t alloc_size;
116957-	bool commit;
116958-	void *pages, *hugepage;
116959-
116960-	alloc_size = HUGEPAGE * 2 - PAGE;
116961-	commit = true;
116962-	pages = pages_map(NULL, alloc_size, PAGE, &commit);
116963-	expect_ptr_not_null(pages, "Unexpected pages_map() error");
116964-
116965-	if (init_system_thp_mode == thp_mode_default) {
116966-	    hugepage = (void *)(ALIGNMENT_CEILING((uintptr_t)pages, HUGEPAGE));
116967-	    expect_b_ne(pages_huge(hugepage, HUGEPAGE), have_madvise_huge,
116968-	        "Unexpected pages_huge() result");
116969-	    expect_false(pages_nohuge(hugepage, HUGEPAGE),
116970-	        "Unexpected pages_nohuge() result");
116971-	}
116972-
116973-	pages_unmap(pages, alloc_size);
116974-}
116975-TEST_END
116976-
116977-int
116978-main(void) {
116979-	return test(
116980-	    test_pages_huge);
116981-}
116982diff --git a/jemalloc/test/unit/peak.c b/jemalloc/test/unit/peak.c
116983deleted file mode 100644
116984index 1112978..0000000
116985--- a/jemalloc/test/unit/peak.c
116986+++ /dev/null
116987@@ -1,47 +0,0 @@
116988-#include "test/jemalloc_test.h"
116989-
116990-#include "jemalloc/internal/peak.h"
116991-
116992-TEST_BEGIN(test_peak) {
116993-	peak_t peak = PEAK_INITIALIZER;
116994-	expect_u64_eq(0, peak_max(&peak),
116995-	    "Peak should be zero at initialization");
116996-	peak_update(&peak, 100, 50);
116997-	expect_u64_eq(50, peak_max(&peak),
116998-	    "Missed update");
116999-	peak_update(&peak, 100, 100);
117000-	expect_u64_eq(50, peak_max(&peak), "Dallocs shouldn't change peak");
117001-	peak_update(&peak, 100, 200);
117002-	expect_u64_eq(50, peak_max(&peak), "Dallocs shouldn't change peak");
117003-	peak_update(&peak, 200, 200);
117004-	expect_u64_eq(50, peak_max(&peak), "Haven't reached peak again");
117005-	peak_update(&peak, 300, 200);
117006-	expect_u64_eq(100, peak_max(&peak), "Missed an update.");
117007-	peak_set_zero(&peak, 300, 200);
117008-	expect_u64_eq(0, peak_max(&peak), "No effect from zeroing");
117009-	peak_update(&peak, 300, 300);
117010-	expect_u64_eq(0, peak_max(&peak), "Dalloc shouldn't change peak");
117011-	peak_update(&peak, 400, 300);
117012-	expect_u64_eq(0, peak_max(&peak), "Should still be net negative");
117013-	peak_update(&peak, 500, 300);
117014-	expect_u64_eq(100, peak_max(&peak), "Missed an update.");
117015-	/*
117016-	 * Above, we set to zero while a net allocator; let's try as a
117017-	 * net-deallocator.
117018-	 */
117019-	peak_set_zero(&peak, 600, 700);
117020-	expect_u64_eq(0, peak_max(&peak), "No effect from zeroing.");
117021-	peak_update(&peak, 600, 800);
117022-	expect_u64_eq(0, peak_max(&peak), "Dalloc shouldn't change peak.");
117023-	peak_update(&peak, 700, 800);
117024-	expect_u64_eq(0, peak_max(&peak), "Should still be net negative.");
117025-	peak_update(&peak, 800, 800);
117026-	expect_u64_eq(100, peak_max(&peak), "Missed an update.");
117027-}
117028-TEST_END
117029-
117030-int
117031-main(void) {
117032-	return test_no_reentrancy(
117033-	    test_peak);
117034-}
117035diff --git a/jemalloc/test/unit/ph.c b/jemalloc/test/unit/ph.c
117036deleted file mode 100644
117037index 28f5e48..0000000
117038--- a/jemalloc/test/unit/ph.c
117039+++ /dev/null
117040@@ -1,330 +0,0 @@
117041-#include "test/jemalloc_test.h"
117042-
117043-#include "jemalloc/internal/ph.h"
117044-
117045-typedef struct node_s node_t;
117046-ph_structs(heap, node_t);
117047-
117048-struct node_s {
117049-#define NODE_MAGIC 0x9823af7e
117050-	uint32_t magic;
117051-	heap_link_t link;
117052-	uint64_t key;
117053-};
117054-
117055-static int
117056-node_cmp(const node_t *a, const node_t *b) {
117057-	int ret;
117058-
117059-	ret = (a->key > b->key) - (a->key < b->key);
117060-	if (ret == 0) {
117061-		/*
117062-		 * Duplicates are not allowed in the heap, so force an
117063-		 * arbitrary ordering for non-identical items with equal keys.
117064-		 */
117065-		ret = (((uintptr_t)a) > ((uintptr_t)b))
117066-		    - (((uintptr_t)a) < ((uintptr_t)b));
117067-	}
117068-	return ret;
117069-}
117070-
117071-static int
117072-node_cmp_magic(const node_t *a, const node_t *b) {
117073-
117074-	expect_u32_eq(a->magic, NODE_MAGIC, "Bad magic");
117075-	expect_u32_eq(b->magic, NODE_MAGIC, "Bad magic");
117076-
117077-	return node_cmp(a, b);
117078-}
117079-
117080-ph_gen(static, heap, node_t, link, node_cmp_magic);
117081-
117082-static node_t *
117083-node_next_get(const node_t *node) {
117084-	return phn_next_get((node_t *)node, offsetof(node_t, link));
117085-}
117086-
117087-static node_t *
117088-node_prev_get(const node_t *node) {
117089-	return phn_prev_get((node_t *)node, offsetof(node_t, link));
117090-}
117091-
117092-static node_t *
117093-node_lchild_get(const node_t *node) {
117094-	return phn_lchild_get((node_t *)node, offsetof(node_t, link));
117095-}
117096-
117097-static void
117098-node_print(const node_t *node, unsigned depth) {
117099-	unsigned i;
117100-	node_t *leftmost_child, *sibling;
117101-
117102-	for (i = 0; i < depth; i++) {
117103-		malloc_printf("\t");
117104-	}
117105-	malloc_printf("%2"FMTu64"\n", node->key);
117106-
117107-	leftmost_child = node_lchild_get(node);
117108-	if (leftmost_child == NULL) {
117109-		return;
117110-	}
117111-	node_print(leftmost_child, depth + 1);
117112-
117113-	for (sibling = node_next_get(leftmost_child); sibling !=
117114-	    NULL; sibling = node_next_get(sibling)) {
117115-		node_print(sibling, depth + 1);
117116-	}
117117-}
117118-
117119-static void
117120-heap_print(const heap_t *heap) {
117121-	node_t *auxelm;
117122-
117123-	malloc_printf("vvv heap %p vvv\n", heap);
117124-	if (heap->ph.root == NULL) {
117125-		goto label_return;
117126-	}
117127-
117128-	node_print(heap->ph.root, 0);
117129-
117130-	for (auxelm = node_next_get(heap->ph.root); auxelm != NULL;
117131-	    auxelm = node_next_get(auxelm)) {
117132-		expect_ptr_eq(node_next_get(node_prev_get(auxelm)), auxelm,
117133-		    "auxelm's prev doesn't link to auxelm");
117134-		node_print(auxelm, 0);
117135-	}
117136-
117137-label_return:
117138-	malloc_printf("^^^ heap %p ^^^\n", heap);
117139-}
117140-
117141-static unsigned
117142-node_validate(const node_t *node, const node_t *parent) {
117143-	unsigned nnodes = 1;
117144-	node_t *leftmost_child, *sibling;
117145-
117146-	if (parent != NULL) {
117147-		expect_d_ge(node_cmp_magic(node, parent), 0,
117148-		    "Child is less than parent");
117149-	}
117150-
117151-	leftmost_child = node_lchild_get(node);
117152-	if (leftmost_child == NULL) {
117153-		return nnodes;
117154-	}
117155-	expect_ptr_eq(node_prev_get(leftmost_child),
117156-	    (void *)node, "Leftmost child does not link to node");
117157-	nnodes += node_validate(leftmost_child, node);
117158-
117159-	for (sibling = node_next_get(leftmost_child); sibling !=
117160-	    NULL; sibling = node_next_get(sibling)) {
117161-		expect_ptr_eq(node_next_get(node_prev_get(sibling)), sibling,
117162-		    "sibling's prev doesn't link to sibling");
117163-		nnodes += node_validate(sibling, node);
117164-	}
117165-	return nnodes;
117166-}
117167-
117168-static unsigned
117169-heap_validate(const heap_t *heap) {
117170-	unsigned nnodes = 0;
117171-	node_t *auxelm;
117172-
117173-	if (heap->ph.root == NULL) {
117174-		goto label_return;
117175-	}
117176-
117177-	nnodes += node_validate(heap->ph.root, NULL);
117178-
117179-	for (auxelm = node_next_get(heap->ph.root); auxelm != NULL;
117180-	    auxelm = node_next_get(auxelm)) {
117181-		expect_ptr_eq(node_next_get(node_prev_get(auxelm)), auxelm,
117182-		    "auxelm's prev doesn't link to auxelm");
117183-		nnodes += node_validate(auxelm, NULL);
117184-	}
117185-
117186-label_return:
117187-	if (false) {
117188-		heap_print(heap);
117189-	}
117190-	return nnodes;
117191-}
117192-
117193-TEST_BEGIN(test_ph_empty) {
117194-	heap_t heap;
117195-
117196-	heap_new(&heap);
117197-	expect_true(heap_empty(&heap), "Heap should be empty");
117198-	expect_ptr_null(heap_first(&heap), "Unexpected node");
117199-	expect_ptr_null(heap_any(&heap), "Unexpected node");
117200-}
117201-TEST_END
117202-
117203-static void
117204-node_remove(heap_t *heap, node_t *node) {
117205-	heap_remove(heap, node);
117206-
117207-	node->magic = 0;
117208-}
117209-
117210-static node_t *
117211-node_remove_first(heap_t *heap) {
117212-	node_t *node = heap_remove_first(heap);
117213-	node->magic = 0;
117214-	return node;
117215-}
117216-
117217-static node_t *
117218-node_remove_any(heap_t *heap) {
117219-	node_t *node = heap_remove_any(heap);
117220-	node->magic = 0;
117221-	return node;
117222-}
117223-
117224-TEST_BEGIN(test_ph_random) {
117225-#define NNODES 25
117226-#define NBAGS 250
117227-#define SEED 42
117228-	sfmt_t *sfmt;
117229-	uint64_t bag[NNODES];
117230-	heap_t heap;
117231-	node_t nodes[NNODES];
117232-	unsigned i, j, k;
117233-
117234-	sfmt = init_gen_rand(SEED);
117235-	for (i = 0; i < NBAGS; i++) {
117236-		switch (i) {
117237-		case 0:
117238-			/* Insert in order. */
117239-			for (j = 0; j < NNODES; j++) {
117240-				bag[j] = j;
117241-			}
117242-			break;
117243-		case 1:
117244-			/* Insert in reverse order. */
117245-			for (j = 0; j < NNODES; j++) {
117246-				bag[j] = NNODES - j - 1;
117247-			}
117248-			break;
117249-		default:
117250-			for (j = 0; j < NNODES; j++) {
117251-				bag[j] = gen_rand64_range(sfmt, NNODES);
117252-			}
117253-		}
117254-
117255-		for (j = 1; j <= NNODES; j++) {
117256-			/* Initialize heap and nodes. */
117257-			heap_new(&heap);
117258-			expect_u_eq(heap_validate(&heap), 0,
117259-			    "Incorrect node count");
117260-			for (k = 0; k < j; k++) {
117261-				nodes[k].magic = NODE_MAGIC;
117262-				nodes[k].key = bag[k];
117263-			}
117264-
117265-			/* Insert nodes. */
117266-			for (k = 0; k < j; k++) {
117267-				heap_insert(&heap, &nodes[k]);
117268-				if (i % 13 == 12) {
117269-					expect_ptr_not_null(heap_any(&heap),
117270-					    "Heap should not be empty");
117271-					/* Trigger merging. */
117272-					expect_ptr_not_null(heap_first(&heap),
117273-					    "Heap should not be empty");
117274-				}
117275-				expect_u_eq(heap_validate(&heap), k + 1,
117276-				    "Incorrect node count");
117277-			}
117278-
117279-			expect_false(heap_empty(&heap),
117280-			    "Heap should not be empty");
117281-
117282-			/* Remove nodes. */
117283-			switch (i % 6) {
117284-			case 0:
117285-				for (k = 0; k < j; k++) {
117286-					expect_u_eq(heap_validate(&heap), j - k,
117287-					    "Incorrect node count");
117288-					node_remove(&heap, &nodes[k]);
117289-					expect_u_eq(heap_validate(&heap), j - k
117290-					    - 1, "Incorrect node count");
117291-				}
117292-				break;
117293-			case 1:
117294-				for (k = j; k > 0; k--) {
117295-					node_remove(&heap, &nodes[k-1]);
117296-					expect_u_eq(heap_validate(&heap), k - 1,
117297-					    "Incorrect node count");
117298-				}
117299-				break;
117300-			case 2: {
117301-				node_t *prev = NULL;
117302-				for (k = 0; k < j; k++) {
117303-					node_t *node = node_remove_first(&heap);
117304-					expect_u_eq(heap_validate(&heap), j - k
117305-					    - 1, "Incorrect node count");
117306-					if (prev != NULL) {
117307-						expect_d_ge(node_cmp(node,
117308-						    prev), 0,
117309-						    "Bad removal order");
117310-					}
117311-					prev = node;
117312-				}
117313-				break;
117314-			} case 3: {
117315-				node_t *prev = NULL;
117316-				for (k = 0; k < j; k++) {
117317-					node_t *node = heap_first(&heap);
117318-					expect_u_eq(heap_validate(&heap), j - k,
117319-					    "Incorrect node count");
117320-					if (prev != NULL) {
117321-						expect_d_ge(node_cmp(node,
117322-						    prev), 0,
117323-						    "Bad removal order");
117324-					}
117325-					node_remove(&heap, node);
117326-					expect_u_eq(heap_validate(&heap), j - k
117327-					    - 1, "Incorrect node count");
117328-					prev = node;
117329-				}
117330-				break;
117331-			} case 4: {
117332-				for (k = 0; k < j; k++) {
117333-					node_remove_any(&heap);
117334-					expect_u_eq(heap_validate(&heap), j - k
117335-					    - 1, "Incorrect node count");
117336-				}
117337-				break;
117338-			} case 5: {
117339-				for (k = 0; k < j; k++) {
117340-					node_t *node = heap_any(&heap);
117341-					expect_u_eq(heap_validate(&heap), j - k,
117342-					    "Incorrect node count");
117343-					node_remove(&heap, node);
117344-					expect_u_eq(heap_validate(&heap), j - k
117345-					    - 1, "Incorrect node count");
117346-				}
117347-				break;
117348-			} default:
117349-				not_reached();
117350-			}
117351-
117352-			expect_ptr_null(heap_first(&heap),
117353-			    "Heap should be empty");
117354-			expect_ptr_null(heap_any(&heap),
117355-			    "Heap should be empty");
117356-			expect_true(heap_empty(&heap), "Heap should be empty");
117357-		}
117358-	}
117359-	fini_gen_rand(sfmt);
117360-#undef NNODES
117361-#undef SEED
117362-}
117363-TEST_END
117364-
117365-int
117366-main(void) {
117367-	return test(
117368-	    test_ph_empty,
117369-	    test_ph_random);
117370-}
117371diff --git a/jemalloc/test/unit/prng.c b/jemalloc/test/unit/prng.c
117372deleted file mode 100644
117373index a6d9b01..0000000
117374--- a/jemalloc/test/unit/prng.c
117375+++ /dev/null
117376@@ -1,189 +0,0 @@
117377-#include "test/jemalloc_test.h"
117378-
117379-TEST_BEGIN(test_prng_lg_range_u32) {
117380-	uint32_t sa, sb;
117381-	uint32_t ra, rb;
117382-	unsigned lg_range;
117383-
117384-	sa = 42;
117385-	ra = prng_lg_range_u32(&sa, 32);
117386-	sa = 42;
117387-	rb = prng_lg_range_u32(&sa, 32);
117388-	expect_u32_eq(ra, rb,
117389-	    "Repeated generation should produce repeated results");
117390-
117391-	sb = 42;
117392-	rb = prng_lg_range_u32(&sb, 32);
117393-	expect_u32_eq(ra, rb,
117394-	    "Equivalent generation should produce equivalent results");
117395-
117396-	sa = 42;
117397-	ra = prng_lg_range_u32(&sa, 32);
117398-	rb = prng_lg_range_u32(&sa, 32);
117399-	expect_u32_ne(ra, rb,
117400-	    "Full-width results must not immediately repeat");
117401-
117402-	sa = 42;
117403-	ra = prng_lg_range_u32(&sa, 32);
117404-	for (lg_range = 31; lg_range > 0; lg_range--) {
117405-		sb = 42;
117406-		rb = prng_lg_range_u32(&sb, lg_range);
117407-		expect_u32_eq((rb & (UINT32_C(0xffffffff) << lg_range)),
117408-		    0, "High order bits should be 0, lg_range=%u", lg_range);
117409-		expect_u32_eq(rb, (ra >> (32 - lg_range)),
117410-		    "Expected high order bits of full-width result, "
117411-		    "lg_range=%u", lg_range);
117412-	}
117413-
117414-}
117415-TEST_END
117416-
117417-TEST_BEGIN(test_prng_lg_range_u64) {
117418-	uint64_t sa, sb, ra, rb;
117419-	unsigned lg_range;
117420-
117421-	sa = 42;
117422-	ra = prng_lg_range_u64(&sa, 64);
117423-	sa = 42;
117424-	rb = prng_lg_range_u64(&sa, 64);
117425-	expect_u64_eq(ra, rb,
117426-	    "Repeated generation should produce repeated results");
117427-
117428-	sb = 42;
117429-	rb = prng_lg_range_u64(&sb, 64);
117430-	expect_u64_eq(ra, rb,
117431-	    "Equivalent generation should produce equivalent results");
117432-
117433-	sa = 42;
117434-	ra = prng_lg_range_u64(&sa, 64);
117435-	rb = prng_lg_range_u64(&sa, 64);
117436-	expect_u64_ne(ra, rb,
117437-	    "Full-width results must not immediately repeat");
117438-
117439-	sa = 42;
117440-	ra = prng_lg_range_u64(&sa, 64);
117441-	for (lg_range = 63; lg_range > 0; lg_range--) {
117442-		sb = 42;
117443-		rb = prng_lg_range_u64(&sb, lg_range);
117444-		expect_u64_eq((rb & (UINT64_C(0xffffffffffffffff) << lg_range)),
117445-		    0, "High order bits should be 0, lg_range=%u", lg_range);
117446-		expect_u64_eq(rb, (ra >> (64 - lg_range)),
117447-		    "Expected high order bits of full-width result, "
117448-		    "lg_range=%u", lg_range);
117449-	}
117450-}
117451-TEST_END
117452-
117453-TEST_BEGIN(test_prng_lg_range_zu) {
117454-	size_t sa, sb;
117455-	size_t ra, rb;
117456-	unsigned lg_range;
117457-
117458-	sa = 42;
117459-	ra = prng_lg_range_zu(&sa, ZU(1) << (3 + LG_SIZEOF_PTR));
117460-	sa = 42;
117461-	rb = prng_lg_range_zu(&sa, ZU(1) << (3 + LG_SIZEOF_PTR));
117462-	expect_zu_eq(ra, rb,
117463-	    "Repeated generation should produce repeated results");
117464-
117465-	sb = 42;
117466-	rb = prng_lg_range_zu(&sb, ZU(1) << (3 + LG_SIZEOF_PTR));
117467-	expect_zu_eq(ra, rb,
117468-	    "Equivalent generation should produce equivalent results");
117469-
117470-	sa = 42;
117471-	ra = prng_lg_range_zu(&sa, ZU(1) << (3 + LG_SIZEOF_PTR));
117472-	rb = prng_lg_range_zu(&sa, ZU(1) << (3 + LG_SIZEOF_PTR));
117473-	expect_zu_ne(ra, rb,
117474-	    "Full-width results must not immediately repeat");
117475-
117476-	sa = 42;
117477-	ra = prng_lg_range_zu(&sa, ZU(1) << (3 + LG_SIZEOF_PTR));
117478-	for (lg_range = (ZU(1) << (3 + LG_SIZEOF_PTR)) - 1; lg_range > 0;
117479-	    lg_range--) {
117480-		sb = 42;
117481-		rb = prng_lg_range_zu(&sb, lg_range);
117482-		expect_zu_eq((rb & (SIZE_T_MAX << lg_range)),
117483-		    0, "High order bits should be 0, lg_range=%u", lg_range);
117484-		expect_zu_eq(rb, (ra >> ((ZU(1) << (3 + LG_SIZEOF_PTR)) -
117485-		    lg_range)), "Expected high order bits of full-width "
117486-		    "result, lg_range=%u", lg_range);
117487-	}
117488-
117489-}
117490-TEST_END
117491-
117492-TEST_BEGIN(test_prng_range_u32) {
117493-	uint32_t range;
117494-
117495-	const uint32_t max_range = 10000000;
117496-	const uint32_t range_step = 97;
117497-	const unsigned nreps = 10;
117498-
117499-	for (range = 2; range < max_range; range += range_step) {
117500-		uint32_t s;
117501-		unsigned rep;
117502-
117503-		s = range;
117504-		for (rep = 0; rep < nreps; rep++) {
117505-			uint32_t r = prng_range_u32(&s, range);
117506-
117507-			expect_u32_lt(r, range, "Out of range");
117508-		}
117509-	}
117510-}
117511-TEST_END
117512-
117513-TEST_BEGIN(test_prng_range_u64) {
117514-	uint64_t range;
117515-
117516-	const uint64_t max_range = 10000000;
117517-	const uint64_t range_step = 97;
117518-	const unsigned nreps = 10;
117519-
117520-	for (range = 2; range < max_range; range += range_step) {
117521-		uint64_t s;
117522-		unsigned rep;
117523-
117524-		s = range;
117525-		for (rep = 0; rep < nreps; rep++) {
117526-			uint64_t r = prng_range_u64(&s, range);
117527-
117528-			expect_u64_lt(r, range, "Out of range");
117529-		}
117530-	}
117531-}
117532-TEST_END
117533-
117534-TEST_BEGIN(test_prng_range_zu) {
117535-	size_t range;
117536-
117537-	const size_t max_range = 10000000;
117538-	const size_t range_step = 97;
117539-	const unsigned nreps = 10;
117540-
117541-
117542-	for (range = 2; range < max_range; range += range_step) {
117543-		size_t s;
117544-		unsigned rep;
117545-
117546-		s = range;
117547-		for (rep = 0; rep < nreps; rep++) {
117548-			size_t r = prng_range_zu(&s, range);
117549-
117550-			expect_zu_lt(r, range, "Out of range");
117551-		}
117552-	}
117553-}
117554-TEST_END
117555-
117556-int
117557-main(void) {
117558-	return test_no_reentrancy(
117559-	    test_prng_lg_range_u32,
117560-	    test_prng_lg_range_u64,
117561-	    test_prng_lg_range_zu,
117562-	    test_prng_range_u32,
117563-	    test_prng_range_u64,
117564-	    test_prng_range_zu);
117565-}
117566diff --git a/jemalloc/test/unit/prof_accum.c b/jemalloc/test/unit/prof_accum.c
117567deleted file mode 100644
117568index ef392ac..0000000
117569--- a/jemalloc/test/unit/prof_accum.c
117570+++ /dev/null
117571@@ -1,84 +0,0 @@
117572-#include "test/jemalloc_test.h"
117573-
117574-#include "jemalloc/internal/prof_data.h"
117575-#include "jemalloc/internal/prof_sys.h"
117576-
117577-#define NTHREADS		4
117578-#define NALLOCS_PER_THREAD	50
117579-#define DUMP_INTERVAL		1
117580-#define BT_COUNT_CHECK_INTERVAL	5
117581-
117582-static int
117583-prof_dump_open_file_intercept(const char *filename, int mode) {
117584-	int fd;
117585-
117586-	fd = open("/dev/null", O_WRONLY);
117587-	assert_d_ne(fd, -1, "Unexpected open() failure");
117588-
117589-	return fd;
117590-}
117591-
117592-static void *
117593-alloc_from_permuted_backtrace(unsigned thd_ind, unsigned iteration) {
117594-	return btalloc(1, thd_ind*NALLOCS_PER_THREAD + iteration);
117595-}
117596-
117597-static void *
117598-thd_start(void *varg) {
117599-	unsigned thd_ind = *(unsigned *)varg;
117600-	size_t bt_count_prev, bt_count;
117601-	unsigned i_prev, i;
117602-
117603-	i_prev = 0;
117604-	bt_count_prev = 0;
117605-	for (i = 0; i < NALLOCS_PER_THREAD; i++) {
117606-		void *p = alloc_from_permuted_backtrace(thd_ind, i);
117607-		dallocx(p, 0);
117608-		if (i % DUMP_INTERVAL == 0) {
117609-			expect_d_eq(mallctl("prof.dump", NULL, NULL, NULL, 0),
117610-			    0, "Unexpected error while dumping heap profile");
117611-		}
117612-
117613-		if (i % BT_COUNT_CHECK_INTERVAL == 0 ||
117614-		    i+1 == NALLOCS_PER_THREAD) {
117615-			bt_count = prof_bt_count();
117616-			expect_zu_le(bt_count_prev+(i-i_prev), bt_count,
117617-			    "Expected larger backtrace count increase");
117618-			i_prev = i;
117619-			bt_count_prev = bt_count;
117620-		}
117621-	}
117622-
117623-	return NULL;
117624-}
117625-
117626-TEST_BEGIN(test_idump) {
117627-	bool active;
117628-	thd_t thds[NTHREADS];
117629-	unsigned thd_args[NTHREADS];
117630-	unsigned i;
117631-
117632-	test_skip_if(!config_prof);
117633-
117634-	active = true;
117635-	expect_d_eq(mallctl("prof.active", NULL, NULL, (void *)&active,
117636-	    sizeof(active)), 0,
117637-	    "Unexpected mallctl failure while activating profiling");
117638-
117639-	prof_dump_open_file = prof_dump_open_file_intercept;
117640-
117641-	for (i = 0; i < NTHREADS; i++) {
117642-		thd_args[i] = i;
117643-		thd_create(&thds[i], thd_start, (void *)&thd_args[i]);
117644-	}
117645-	for (i = 0; i < NTHREADS; i++) {
117646-		thd_join(thds[i], NULL);
117647-	}
117648-}
117649-TEST_END
117650-
117651-int
117652-main(void) {
117653-	return test_no_reentrancy(
117654-	    test_idump);
117655-}
117656diff --git a/jemalloc/test/unit/prof_accum.sh b/jemalloc/test/unit/prof_accum.sh
117657deleted file mode 100644
117658index b3e13fc..0000000
117659--- a/jemalloc/test/unit/prof_accum.sh
117660+++ /dev/null
117661@@ -1,5 +0,0 @@
117662-#!/bin/sh
117663-
117664-if [ "x${enable_prof}" = "x1" ] ; then
117665-  export MALLOC_CONF="prof:true,prof_accum:true,prof_active:false,lg_prof_sample:0"
117666-fi
117667diff --git a/jemalloc/test/unit/prof_active.c b/jemalloc/test/unit/prof_active.c
117668deleted file mode 100644
117669index af29e7a..0000000
117670--- a/jemalloc/test/unit/prof_active.c
117671+++ /dev/null
117672@@ -1,119 +0,0 @@
117673-#include "test/jemalloc_test.h"
117674-
117675-#include "jemalloc/internal/prof_data.h"
117676-
117677-static void
117678-mallctl_bool_get(const char *name, bool expected, const char *func, int line) {
117679-	bool old;
117680-	size_t sz;
117681-
117682-	sz = sizeof(old);
117683-	expect_d_eq(mallctl(name, (void *)&old, &sz, NULL, 0), 0,
117684-	    "%s():%d: Unexpected mallctl failure reading %s", func, line, name);
117685-	expect_b_eq(old, expected, "%s():%d: Unexpected %s value", func, line,
117686-	    name);
117687-}
117688-
117689-static void
117690-mallctl_bool_set(const char *name, bool old_expected, bool val_new,
117691-    const char *func, int line) {
117692-	bool old;
117693-	size_t sz;
117694-
117695-	sz = sizeof(old);
117696-	expect_d_eq(mallctl(name, (void *)&old, &sz, (void *)&val_new,
117697-	    sizeof(val_new)), 0,
117698-	    "%s():%d: Unexpected mallctl failure reading/writing %s", func,
117699-	    line, name);
117700-	expect_b_eq(old, old_expected, "%s():%d: Unexpected %s value", func,
117701-	    line, name);
117702-}
117703-
117704-static void
117705-mallctl_prof_active_get_impl(bool prof_active_old_expected, const char *func,
117706-    int line) {
117707-	mallctl_bool_get("prof.active", prof_active_old_expected, func, line);
117708-}
117709-#define mallctl_prof_active_get(a)					\
117710-	mallctl_prof_active_get_impl(a, __func__, __LINE__)
117711-
117712-static void
117713-mallctl_prof_active_set_impl(bool prof_active_old_expected,
117714-    bool prof_active_new, const char *func, int line) {
117715-	mallctl_bool_set("prof.active", prof_active_old_expected,
117716-	    prof_active_new, func, line);
117717-}
117718-#define mallctl_prof_active_set(a, b)					\
117719-	mallctl_prof_active_set_impl(a, b, __func__, __LINE__)
117720-
117721-static void
117722-mallctl_thread_prof_active_get_impl(bool thread_prof_active_old_expected,
117723-    const char *func, int line) {
117724-	mallctl_bool_get("thread.prof.active", thread_prof_active_old_expected,
117725-	    func, line);
117726-}
117727-#define mallctl_thread_prof_active_get(a)				\
117728-	mallctl_thread_prof_active_get_impl(a, __func__, __LINE__)
117729-
117730-static void
117731-mallctl_thread_prof_active_set_impl(bool thread_prof_active_old_expected,
117732-    bool thread_prof_active_new, const char *func, int line) {
117733-	mallctl_bool_set("thread.prof.active", thread_prof_active_old_expected,
117734-	    thread_prof_active_new, func, line);
117735-}
117736-#define mallctl_thread_prof_active_set(a, b)				\
117737-	mallctl_thread_prof_active_set_impl(a, b, __func__, __LINE__)
117738-
117739-static void
117740-prof_sampling_probe_impl(bool expect_sample, const char *func, int line) {
117741-	void *p;
117742-	size_t expected_backtraces = expect_sample ? 1 : 0;
117743-
117744-	expect_zu_eq(prof_bt_count(), 0, "%s():%d: Expected 0 backtraces", func,
117745-	    line);
117746-	p = mallocx(1, 0);
117747-	expect_ptr_not_null(p, "Unexpected mallocx() failure");
117748-	expect_zu_eq(prof_bt_count(), expected_backtraces,
117749-	    "%s():%d: Unexpected backtrace count", func, line);
117750-	dallocx(p, 0);
117751-}
117752-#define prof_sampling_probe(a)						\
117753-	prof_sampling_probe_impl(a, __func__, __LINE__)
117754-
117755-TEST_BEGIN(test_prof_active) {
117756-	test_skip_if(!config_prof);
117757-
117758-	mallctl_prof_active_get(true);
117759-	mallctl_thread_prof_active_get(false);
117760-
117761-	mallctl_prof_active_set(true, true);
117762-	mallctl_thread_prof_active_set(false, false);
117763-	/* prof.active, !thread.prof.active. */
117764-	prof_sampling_probe(false);
117765-
117766-	mallctl_prof_active_set(true, false);
117767-	mallctl_thread_prof_active_set(false, false);
117768-	/* !prof.active, !thread.prof.active. */
117769-	prof_sampling_probe(false);
117770-
117771-	mallctl_prof_active_set(false, false);
117772-	mallctl_thread_prof_active_set(false, true);
117773-	/* !prof.active, thread.prof.active. */
117774-	prof_sampling_probe(false);
117775-
117776-	mallctl_prof_active_set(false, true);
117777-	mallctl_thread_prof_active_set(true, true);
117778-	/* prof.active, thread.prof.active. */
117779-	prof_sampling_probe(true);
117780-
117781-	/* Restore settings. */
117782-	mallctl_prof_active_set(true, true);
117783-	mallctl_thread_prof_active_set(true, false);
117784-}
117785-TEST_END
117786-
117787-int
117788-main(void) {
117789-	return test_no_reentrancy(
117790-	    test_prof_active);
117791-}
117792diff --git a/jemalloc/test/unit/prof_active.sh b/jemalloc/test/unit/prof_active.sh
117793deleted file mode 100644
117794index 9749674..0000000
117795--- a/jemalloc/test/unit/prof_active.sh
117796+++ /dev/null
117797@@ -1,5 +0,0 @@
117798-#!/bin/sh
117799-
117800-if [ "x${enable_prof}" = "x1" ] ; then
117801-  export MALLOC_CONF="prof:true,prof_active:true,prof_thread_active_init:false,lg_prof_sample:0"
117802-fi
117803diff --git a/jemalloc/test/unit/prof_gdump.c b/jemalloc/test/unit/prof_gdump.c
117804deleted file mode 100644
117805index 46e4503..0000000
117806--- a/jemalloc/test/unit/prof_gdump.c
117807+++ /dev/null
117808@@ -1,77 +0,0 @@
117809-#include "test/jemalloc_test.h"
117810-
117811-#include "jemalloc/internal/prof_sys.h"
117812-
117813-static bool did_prof_dump_open;
117814-
117815-static int
117816-prof_dump_open_file_intercept(const char *filename, int mode) {
117817-	int fd;
117818-
117819-	did_prof_dump_open = true;
117820-
117821-	fd = open("/dev/null", O_WRONLY);
117822-	assert_d_ne(fd, -1, "Unexpected open() failure");
117823-
117824-	return fd;
117825-}
117826-
117827-TEST_BEGIN(test_gdump) {
117828-	test_skip_if(opt_hpa);
117829-	bool active, gdump, gdump_old;
117830-	void *p, *q, *r, *s;
117831-	size_t sz;
117832-
117833-	test_skip_if(!config_prof);
117834-
117835-	active = true;
117836-	expect_d_eq(mallctl("prof.active", NULL, NULL, (void *)&active,
117837-	    sizeof(active)), 0,
117838-	    "Unexpected mallctl failure while activating profiling");
117839-
117840-	prof_dump_open_file = prof_dump_open_file_intercept;
117841-
117842-	did_prof_dump_open = false;
117843-	p = mallocx((1U << SC_LG_LARGE_MINCLASS), 0);
117844-	expect_ptr_not_null(p, "Unexpected mallocx() failure");
117845-	expect_true(did_prof_dump_open, "Expected a profile dump");
117846-
117847-	did_prof_dump_open = false;
117848-	q = mallocx((1U << SC_LG_LARGE_MINCLASS), 0);
117849-	expect_ptr_not_null(q, "Unexpected mallocx() failure");
117850-	expect_true(did_prof_dump_open, "Expected a profile dump");
117851-
117852-	gdump = false;
117853-	sz = sizeof(gdump_old);
117854-	expect_d_eq(mallctl("prof.gdump", (void *)&gdump_old, &sz,
117855-	    (void *)&gdump, sizeof(gdump)), 0,
117856-	    "Unexpected mallctl failure while disabling prof.gdump");
117857-	assert(gdump_old);
117858-	did_prof_dump_open = false;
117859-	r = mallocx((1U << SC_LG_LARGE_MINCLASS), 0);
117860-	expect_ptr_not_null(q, "Unexpected mallocx() failure");
117861-	expect_false(did_prof_dump_open, "Unexpected profile dump");
117862-
117863-	gdump = true;
117864-	sz = sizeof(gdump_old);
117865-	expect_d_eq(mallctl("prof.gdump", (void *)&gdump_old, &sz,
117866-	    (void *)&gdump, sizeof(gdump)), 0,
117867-	    "Unexpected mallctl failure while enabling prof.gdump");
117868-	assert(!gdump_old);
117869-	did_prof_dump_open = false;
117870-	s = mallocx((1U << SC_LG_LARGE_MINCLASS), 0);
117871-	expect_ptr_not_null(q, "Unexpected mallocx() failure");
117872-	expect_true(did_prof_dump_open, "Expected a profile dump");
117873-
117874-	dallocx(p, 0);
117875-	dallocx(q, 0);
117876-	dallocx(r, 0);
117877-	dallocx(s, 0);
117878-}
117879-TEST_END
117880-
117881-int
117882-main(void) {
117883-	return test_no_reentrancy(
117884-	    test_gdump);
117885-}
117886diff --git a/jemalloc/test/unit/prof_gdump.sh b/jemalloc/test/unit/prof_gdump.sh
117887deleted file mode 100644
117888index 3f600d2..0000000
117889--- a/jemalloc/test/unit/prof_gdump.sh
117890+++ /dev/null
117891@@ -1,6 +0,0 @@
117892-#!/bin/sh
117893-
117894-if [ "x${enable_prof}" = "x1" ] ; then
117895-  export MALLOC_CONF="prof:true,prof_active:false,prof_gdump:true"
117896-fi
117897-
117898diff --git a/jemalloc/test/unit/prof_hook.c b/jemalloc/test/unit/prof_hook.c
117899deleted file mode 100644
117900index 6480d93..0000000
117901--- a/jemalloc/test/unit/prof_hook.c
117902+++ /dev/null
117903@@ -1,169 +0,0 @@
117904-#include "test/jemalloc_test.h"
117905-
117906-const char *dump_filename = "/dev/null";
117907-
117908-prof_backtrace_hook_t default_hook;
117909-
117910-bool mock_bt_hook_called = false;
117911-bool mock_dump_hook_called = false;
117912-
117913-void
117914-mock_bt_hook(void **vec, unsigned *len, unsigned max_len) {
117915-	*len = max_len;
117916-	for (unsigned i = 0; i < max_len; ++i) {
117917-		vec[i] = (void *)((uintptr_t)i);
117918-	}
117919-	mock_bt_hook_called = true;
117920-}
117921-
117922-void
117923-mock_bt_augmenting_hook(void **vec, unsigned *len, unsigned max_len) {
117924-	default_hook(vec, len, max_len);
117925-	expect_u_gt(*len, 0, "Default backtrace hook returned empty backtrace");
117926-	expect_u_lt(*len, max_len,
117927-	    "Default backtrace hook returned too large backtrace");
117928-
117929-	/* Add a separator between default frames and augmented */
117930-	vec[*len] = (void *)0x030303030;
117931-	(*len)++;
117932-
117933-	/* Add more stack frames */
117934-	for (unsigned i = 0; i < 3; ++i) {
117935-		if (*len == max_len) {
117936-			break;
117937-		}
117938-		vec[*len] = (void *)((uintptr_t)i);
117939-		(*len)++;
117940-	}
117941-
117942-
117943-	mock_bt_hook_called = true;
117944-}
117945-
117946-void
117947-mock_dump_hook(const char *filename) {
117948-	mock_dump_hook_called = true;
117949-	expect_str_eq(filename, dump_filename,
117950-	    "Incorrect file name passed to the dump hook");
117951-}
117952-
117953-TEST_BEGIN(test_prof_backtrace_hook_replace) {
117954-
117955-	test_skip_if(!config_prof);
117956-
117957-	mock_bt_hook_called = false;
117958-
117959-	void *p0 = mallocx(1, 0);
117960-	assert_ptr_not_null(p0, "Failed to allocate");
117961-
117962-	expect_false(mock_bt_hook_called, "Called mock hook before it's set");
117963-
117964-	prof_backtrace_hook_t null_hook = NULL;
117965-	expect_d_eq(mallctl("experimental.hooks.prof_backtrace",
117966-	    NULL, 0, (void *)&null_hook,  sizeof(null_hook)),
117967-		EINVAL, "Incorrectly allowed NULL backtrace hook");
117968-
117969-	size_t default_hook_sz = sizeof(prof_backtrace_hook_t);
117970-	prof_backtrace_hook_t hook = &mock_bt_hook;
117971-	expect_d_eq(mallctl("experimental.hooks.prof_backtrace",
117972-	    (void *)&default_hook, &default_hook_sz, (void *)&hook,
117973-	    sizeof(hook)), 0, "Unexpected mallctl failure setting hook");
117974-
117975-	void *p1 = mallocx(1, 0);
117976-	assert_ptr_not_null(p1, "Failed to allocate");
117977-
117978-	expect_true(mock_bt_hook_called, "Didn't call mock hook");
117979-
117980-	prof_backtrace_hook_t current_hook;
117981-	size_t current_hook_sz = sizeof(prof_backtrace_hook_t);
117982-	expect_d_eq(mallctl("experimental.hooks.prof_backtrace",
117983-	    (void *)&current_hook, &current_hook_sz, (void *)&default_hook,
117984-	    sizeof(default_hook)), 0,
117985-	    "Unexpected mallctl failure resetting hook to default");
117986-
117987-	expect_ptr_eq(current_hook, hook,
117988-	    "Hook returned by mallctl is not equal to mock hook");
117989-
117990-	dallocx(p1, 0);
117991-	dallocx(p0, 0);
117992-}
117993-TEST_END
117994-
117995-TEST_BEGIN(test_prof_backtrace_hook_augment) {
117996-
117997-	test_skip_if(!config_prof);
117998-
117999-	mock_bt_hook_called = false;
118000-
118001-	void *p0 = mallocx(1, 0);
118002-	assert_ptr_not_null(p0, "Failed to allocate");
118003-
118004-	expect_false(mock_bt_hook_called, "Called mock hook before it's set");
118005-
118006-	size_t default_hook_sz = sizeof(prof_backtrace_hook_t);
118007-	prof_backtrace_hook_t hook = &mock_bt_augmenting_hook;
118008-	expect_d_eq(mallctl("experimental.hooks.prof_backtrace",
118009-	    (void *)&default_hook, &default_hook_sz, (void *)&hook,
118010-	    sizeof(hook)), 0, "Unexpected mallctl failure setting hook");
118011-
118012-	void *p1 = mallocx(1, 0);
118013-	assert_ptr_not_null(p1, "Failed to allocate");
118014-
118015-	expect_true(mock_bt_hook_called, "Didn't call mock hook");
118016-
118017-	prof_backtrace_hook_t current_hook;
118018-	size_t current_hook_sz = sizeof(prof_backtrace_hook_t);
118019-	expect_d_eq(mallctl("experimental.hooks.prof_backtrace",
118020-	    (void *)&current_hook, &current_hook_sz, (void *)&default_hook,
118021-	    sizeof(default_hook)), 0,
118022-	    "Unexpected mallctl failure resetting hook to default");
118023-
118024-	expect_ptr_eq(current_hook, hook,
118025-	    "Hook returned by mallctl is not equal to mock hook");
118026-
118027-	dallocx(p1, 0);
118028-	dallocx(p0, 0);
118029-}
118030-TEST_END
118031-
118032-TEST_BEGIN(test_prof_dump_hook) {
118033-
118034-	test_skip_if(!config_prof);
118035-
118036-	mock_dump_hook_called = false;
118037-
118038-	expect_d_eq(mallctl("prof.dump", NULL, NULL, (void *)&dump_filename,
118039-	    sizeof(dump_filename)), 0, "Failed to dump heap profile");
118040-
118041-	expect_false(mock_dump_hook_called, "Called dump hook before it's set");
118042-
118043-	size_t default_hook_sz = sizeof(prof_dump_hook_t);
118044-	prof_dump_hook_t hook = &mock_dump_hook;
118045-	expect_d_eq(mallctl("experimental.hooks.prof_dump",
118046-	    (void *)&default_hook, &default_hook_sz, (void *)&hook,
118047-	    sizeof(hook)), 0, "Unexpected mallctl failure setting hook");
118048-
118049-	expect_d_eq(mallctl("prof.dump", NULL, NULL, (void *)&dump_filename,
118050-	    sizeof(dump_filename)), 0, "Failed to dump heap profile");
118051-
118052-	expect_true(mock_dump_hook_called, "Didn't call mock hook");
118053-
118054-	prof_dump_hook_t current_hook;
118055-	size_t current_hook_sz = sizeof(prof_dump_hook_t);
118056-	expect_d_eq(mallctl("experimental.hooks.prof_dump",
118057-	    (void *)&current_hook, &current_hook_sz, (void *)&default_hook,
118058-	    sizeof(default_hook)), 0,
118059-	    "Unexpected mallctl failure resetting hook to default");
118060-
118061-	expect_ptr_eq(current_hook, hook,
118062-	    "Hook returned by mallctl is not equal to mock hook");
118063-}
118064-TEST_END
118065-
118066-int
118067-main(void) {
118068-	return test(
118069-	    test_prof_backtrace_hook_replace,
118070-	    test_prof_backtrace_hook_augment,
118071-	    test_prof_dump_hook);
118072-}
118073diff --git a/jemalloc/test/unit/prof_hook.sh b/jemalloc/test/unit/prof_hook.sh
118074deleted file mode 100644
118075index c7ebd8f..0000000
118076--- a/jemalloc/test/unit/prof_hook.sh
118077+++ /dev/null
118078@@ -1,6 +0,0 @@
118079-#!/bin/sh
118080-
118081-if [ "x${enable_prof}" = "x1" ] ; then
118082-  export MALLOC_CONF="prof:true,prof_active:true,lg_prof_sample:0"
118083-fi
118084-
118085diff --git a/jemalloc/test/unit/prof_idump.c b/jemalloc/test/unit/prof_idump.c
118086deleted file mode 100644
118087index 455ac52..0000000
118088--- a/jemalloc/test/unit/prof_idump.c
118089+++ /dev/null
118090@@ -1,57 +0,0 @@
118091-#include "test/jemalloc_test.h"
118092-
118093-#include "jemalloc/internal/prof_sys.h"
118094-
118095-#define TEST_PREFIX "test_prefix"
118096-
118097-static bool did_prof_dump_open;
118098-
118099-static int
118100-prof_dump_open_file_intercept(const char *filename, int mode) {
118101-	int fd;
118102-
118103-	did_prof_dump_open = true;
118104-
118105-	const char filename_prefix[] = TEST_PREFIX ".";
118106-	expect_d_eq(strncmp(filename_prefix, filename, sizeof(filename_prefix)
118107-	    - 1), 0, "Dump file name should start with \"" TEST_PREFIX ".\"");
118108-
118109-	fd = open("/dev/null", O_WRONLY);
118110-	assert_d_ne(fd, -1, "Unexpected open() failure");
118111-
118112-	return fd;
118113-}
118114-
118115-TEST_BEGIN(test_idump) {
118116-	bool active;
118117-	void *p;
118118-
118119-	const char *test_prefix = TEST_PREFIX;
118120-
118121-	test_skip_if(!config_prof);
118122-
118123-	active = true;
118124-
118125-	expect_d_eq(mallctl("prof.prefix", NULL, NULL, (void *)&test_prefix,
118126-	    sizeof(test_prefix)), 0,
118127-	    "Unexpected mallctl failure while overwriting dump prefix");
118128-
118129-	expect_d_eq(mallctl("prof.active", NULL, NULL, (void *)&active,
118130-	    sizeof(active)), 0,
118131-	    "Unexpected mallctl failure while activating profiling");
118132-
118133-	prof_dump_open_file = prof_dump_open_file_intercept;
118134-
118135-	did_prof_dump_open = false;
118136-	p = mallocx(1, 0);
118137-	expect_ptr_not_null(p, "Unexpected mallocx() failure");
118138-	dallocx(p, 0);
118139-	expect_true(did_prof_dump_open, "Expected a profile dump");
118140-}
118141-TEST_END
118142-
118143-int
118144-main(void) {
118145-	return test(
118146-	    test_idump);
118147-}
118148diff --git a/jemalloc/test/unit/prof_idump.sh b/jemalloc/test/unit/prof_idump.sh
118149deleted file mode 100644
118150index 4dc599a..0000000
118151--- a/jemalloc/test/unit/prof_idump.sh
118152+++ /dev/null
118153@@ -1,8 +0,0 @@
118154-#!/bin/sh
118155-
118156-export MALLOC_CONF="tcache:false"
118157-if [ "x${enable_prof}" = "x1" ] ; then
118158-  export MALLOC_CONF="${MALLOC_CONF},prof:true,prof_accum:true,prof_active:false,lg_prof_sample:0,lg_prof_interval:0"
118159-fi
118160-
118161-
118162diff --git a/jemalloc/test/unit/prof_log.c b/jemalloc/test/unit/prof_log.c
118163deleted file mode 100644
118164index 5ff208e..0000000
118165--- a/jemalloc/test/unit/prof_log.c
118166+++ /dev/null
118167@@ -1,151 +0,0 @@
118168-#include "test/jemalloc_test.h"
118169-#include "jemalloc/internal/prof_log.h"
118170-
118171-#define N_PARAM 100
118172-#define N_THREADS 10
118173-
118174-static void expect_rep() {
118175-	expect_b_eq(prof_log_rep_check(), false, "Rep check failed");
118176-}
118177-
118178-static void expect_log_empty() {
118179-	expect_zu_eq(prof_log_bt_count(), 0,
118180-	    "The log has backtraces; it isn't empty");
118181-	expect_zu_eq(prof_log_thr_count(), 0,
118182-	    "The log has threads; it isn't empty");
118183-	expect_zu_eq(prof_log_alloc_count(), 0,
118184-	    "The log has allocations; it isn't empty");
118185-}
118186-
118187-void *buf[N_PARAM];
118188-
118189-static void f() {
118190-	int i;
118191-	for (i = 0; i < N_PARAM; i++) {
118192-		buf[i] = malloc(100);
118193-	}
118194-	for (i = 0; i < N_PARAM; i++) {
118195-		free(buf[i]);
118196-	}
118197-}
118198-
118199-TEST_BEGIN(test_prof_log_many_logs) {
118200-	int i;
118201-
118202-	test_skip_if(!config_prof);
118203-
118204-	for (i = 0; i < N_PARAM; i++) {
118205-		expect_b_eq(prof_log_is_logging(), false,
118206-		    "Logging shouldn't have started yet");
118207-		expect_d_eq(mallctl("prof.log_start", NULL, NULL, NULL, 0), 0,
118208-		    "Unexpected mallctl failure when starting logging");
118209-		expect_b_eq(prof_log_is_logging(), true,
118210-		    "Logging should be started by now");
118211-		expect_log_empty();
118212-		expect_rep();
118213-		f();
118214-		expect_zu_eq(prof_log_thr_count(), 1, "Wrong thread count");
118215-		expect_rep();
118216-		expect_b_eq(prof_log_is_logging(), true,
118217-		    "Logging should still be on");
118218-		expect_d_eq(mallctl("prof.log_stop", NULL, NULL, NULL, 0), 0,
118219-		    "Unexpected mallctl failure when stopping logging");
118220-		expect_b_eq(prof_log_is_logging(), false,
118221-		    "Logging should have turned off");
118222-	}
118223-}
118224-TEST_END
118225-
118226-thd_t thr_buf[N_THREADS];
118227-
118228-static void *f_thread(void *unused) {
118229-	int i;
118230-	for (i = 0; i < N_PARAM; i++) {
118231-		void *p = malloc(100);
118232-		memset(p, 100, 1);
118233-		free(p);
118234-	}
118235-
118236-	return NULL;
118237-}
118238-
118239-TEST_BEGIN(test_prof_log_many_threads) {
118240-
118241-	test_skip_if(!config_prof);
118242-
118243-	int i;
118244-	expect_d_eq(mallctl("prof.log_start", NULL, NULL, NULL, 0), 0,
118245-	    "Unexpected mallctl failure when starting logging");
118246-	for (i = 0; i < N_THREADS; i++) {
118247-		thd_create(&thr_buf[i], &f_thread, NULL);
118248-	}
118249-
118250-	for (i = 0; i < N_THREADS; i++) {
118251-		thd_join(thr_buf[i], NULL);
118252-	}
118253-	expect_zu_eq(prof_log_thr_count(), N_THREADS,
118254-	    "Wrong number of thread entries");
118255-	expect_rep();
118256-	expect_d_eq(mallctl("prof.log_stop", NULL, NULL, NULL, 0), 0,
118257-	    "Unexpected mallctl failure when stopping logging");
118258-}
118259-TEST_END
118260-
118261-static void f3() {
118262-	void *p = malloc(100);
118263-	free(p);
118264-}
118265-
118266-static void f1() {
118267-	void *p = malloc(100);
118268-	f3();
118269-	free(p);
118270-}
118271-
118272-static void f2() {
118273-	void *p = malloc(100);
118274-	free(p);
118275-}
118276-
118277-TEST_BEGIN(test_prof_log_many_traces) {
118278-
118279-	test_skip_if(!config_prof);
118280-
118281-	expect_d_eq(mallctl("prof.log_start", NULL, NULL, NULL, 0), 0,
118282-	    "Unexpected mallctl failure when starting logging");
118283-	int i;
118284-	expect_rep();
118285-	expect_log_empty();
118286-	for (i = 0; i < N_PARAM; i++) {
118287-		expect_rep();
118288-		f1();
118289-		expect_rep();
118290-		f2();
118291-		expect_rep();
118292-		f3();
118293-		expect_rep();
118294-	}
118295-	/*
118296-	 * There should be 8 total backtraces: two for malloc/free in f1(), two
118297-	 * for malloc/free in f2(), two for malloc/free in f3(), and then two
118298-	 * for malloc/free in f1()'s call to f3().  However compiler
118299-	 * optimizations such as loop unrolling might generate more call sites.
118300-	 * So >= 8 traces are expected.
118301-	 */
118302-	expect_zu_ge(prof_log_bt_count(), 8,
118303-	    "Expect at least 8 backtraces given sample workload");
118304-	expect_d_eq(mallctl("prof.log_stop", NULL, NULL, NULL, 0), 0,
118305-	    "Unexpected mallctl failure when stopping logging");
118306-}
118307-TEST_END
118308-
118309-int
118310-main(void) {
118311-	if (config_prof) {
118312-		prof_log_dummy_set(true);
118313-	}
118314-	return test_no_reentrancy(
118315-	    test_prof_log_many_logs,
118316-	    test_prof_log_many_traces,
118317-	    test_prof_log_many_threads);
118318-}
118319diff --git a/jemalloc/test/unit/prof_log.sh b/jemalloc/test/unit/prof_log.sh
118320deleted file mode 100644
118321index 485f9bf..0000000
118322--- a/jemalloc/test/unit/prof_log.sh
118323+++ /dev/null
118324@@ -1,5 +0,0 @@
118325-#!/bin/sh
118326-
118327-if [ "x${enable_prof}" = "x1" ] ; then
118328-  export MALLOC_CONF="prof:true,prof_active:true,lg_prof_sample:0"
118329-fi
118330diff --git a/jemalloc/test/unit/prof_mdump.c b/jemalloc/test/unit/prof_mdump.c
118331deleted file mode 100644
118332index 75b3a51..0000000
118333--- a/jemalloc/test/unit/prof_mdump.c
118334+++ /dev/null
118335@@ -1,216 +0,0 @@
118336-#include "test/jemalloc_test.h"
118337-
118338-#include "jemalloc/internal/prof_sys.h"
118339-
118340-static const char *test_filename = "test_filename";
118341-static bool did_prof_dump_open;
118342-
118343-static int
118344-prof_dump_open_file_intercept(const char *filename, int mode) {
118345-	int fd;
118346-
118347-	did_prof_dump_open = true;
118348-
118349-	/*
118350-	 * Stronger than a strcmp() - verifying that we internally directly use
118351-	 * the caller supplied char pointer.
118352-	 */
118353-	expect_ptr_eq(filename, test_filename,
118354-	    "Dump file name should be \"%s\"", test_filename);
118355-
118356-	fd = open("/dev/null", O_WRONLY);
118357-	assert_d_ne(fd, -1, "Unexpected open() failure");
118358-
118359-	return fd;
118360-}
118361-
118362-TEST_BEGIN(test_mdump_normal) {
118363-	test_skip_if(!config_prof);
118364-
118365-	prof_dump_open_file_t *open_file_orig = prof_dump_open_file;
118366-
118367-	void *p = mallocx(1, 0);
118368-	assert_ptr_not_null(p, "Unexpected mallocx() failure");
118369-
118370-	prof_dump_open_file = prof_dump_open_file_intercept;
118371-	did_prof_dump_open = false;
118372-	expect_d_eq(mallctl("prof.dump", NULL, NULL, (void *)&test_filename,
118373-	    sizeof(test_filename)), 0,
118374-	    "Unexpected mallctl failure while dumping");
118375-	expect_true(did_prof_dump_open, "Expected a profile dump");
118376-
118377-	dallocx(p, 0);
118378-
118379-	prof_dump_open_file = open_file_orig;
118380-}
118381-TEST_END
118382-
118383-static int
118384-prof_dump_open_file_error(const char *filename, int mode) {
118385-	return -1;
118386-}
118387-
118388-/*
118389- * In the context of test_mdump_output_error, prof_dump_write_file_count is the
118390- * total number of times prof_dump_write_file_error() is expected to be called.
118391- * In the context of test_mdump_maps_error, prof_dump_write_file_count is the
118392- * total number of times prof_dump_write_file_error() is expected to be called
118393- * starting from the one that contains an 'M' (beginning the "MAPPED_LIBRARIES"
118394- * header).
118395- */
118396-static int prof_dump_write_file_count;
118397-
118398-static ssize_t
118399-prof_dump_write_file_error(int fd, const void *s, size_t len) {
118400-	--prof_dump_write_file_count;
118401-
118402-	expect_d_ge(prof_dump_write_file_count, 0,
118403-	    "Write is called after error occurs");
118404-
118405-	if (prof_dump_write_file_count == 0) {
118406-		return -1;
118407-	} else {
118408-		/*
118409-		 * Any non-negative number indicates success, and for
118410-		 * simplicity we just use 0.  When prof_dump_write_file_count
118411-		 * is positive, it means that we haven't reached the write that
118412-		 * we want to fail; when prof_dump_write_file_count is
118413-		 * negative, it means that we've already violated the
118414-		 * expect_d_ge(prof_dump_write_file_count, 0) statement above,
118415-		 * but instead of aborting, we continue the rest of the test,
118416-		 * and we indicate that all the writes after the failed write
118417-		 * are successful.
118418-		 */
118419-		return 0;
118420-	}
118421-}
118422-
118423-static void
118424-expect_write_failure(int count) {
118425-	prof_dump_write_file_count = count;
118426-	expect_d_eq(mallctl("prof.dump", NULL, NULL, (void *)&test_filename,
118427-	    sizeof(test_filename)), EFAULT, "Dump should err");
118428-	expect_d_eq(prof_dump_write_file_count, 0,
118429-	    "Dumping stopped after a wrong number of writes");
118430-}
118431-
118432-TEST_BEGIN(test_mdump_output_error) {
118433-	test_skip_if(!config_prof);
118434-	test_skip_if(!config_debug);
118435-
118436-	prof_dump_open_file_t *open_file_orig = prof_dump_open_file;
118437-	prof_dump_write_file_t *write_file_orig = prof_dump_write_file;
118438-
118439-	prof_dump_write_file = prof_dump_write_file_error;
118440-
118441-	void *p = mallocx(1, 0);
118442-	assert_ptr_not_null(p, "Unexpected mallocx() failure");
118443-
118444-	/*
118445-	 * When opening the dump file fails, there shouldn't be any write, and
118446-	 * mallctl() should return failure.
118447-	 */
118448-	prof_dump_open_file = prof_dump_open_file_error;
118449-	expect_write_failure(0);
118450-
118451-	/*
118452-	 * When the n-th write fails, there shouldn't be any more write, and
118453-	 * mallctl() should return failure.
118454-	 */
118455-	prof_dump_open_file = prof_dump_open_file_intercept;
118456-	expect_write_failure(1); /* First write fails. */
118457-	expect_write_failure(2); /* Second write fails. */
118458-
118459-	dallocx(p, 0);
118460-
118461-	prof_dump_open_file = open_file_orig;
118462-	prof_dump_write_file = write_file_orig;
118463-}
118464-TEST_END
118465-
118466-static int
118467-prof_dump_open_maps_error() {
118468-	return -1;
118469-}
118470-
118471-static bool started_piping_maps_file;
118472-
118473-static ssize_t
118474-prof_dump_write_maps_file_error(int fd, const void *s, size_t len) {
118475-	/* The main dump doesn't contain any capital 'M'. */
118476-	if (!started_piping_maps_file && strchr(s, 'M') != NULL) {
118477-		started_piping_maps_file = true;
118478-	}
118479-
118480-	if (started_piping_maps_file) {
118481-		return prof_dump_write_file_error(fd, s, len);
118482-	} else {
118483-		/* Return success when we haven't started piping maps. */
118484-		return 0;
118485-	}
118486-}
118487-
118488-static void
118489-expect_maps_write_failure(int count) {
118490-	int mfd = prof_dump_open_maps();
118491-	if (mfd == -1) {
118492-		/* No need to continue if we just can't find the maps file. */
118493-		return;
118494-	}
118495-	close(mfd);
118496-	started_piping_maps_file = false;
118497-	expect_write_failure(count);
118498-	expect_true(started_piping_maps_file, "Should start piping maps");
118499-}
118500-
118501-TEST_BEGIN(test_mdump_maps_error) {
118502-	test_skip_if(!config_prof);
118503-	test_skip_if(!config_debug);
118504-
118505-	prof_dump_open_file_t *open_file_orig = prof_dump_open_file;
118506-	prof_dump_write_file_t *write_file_orig = prof_dump_write_file;
118507-	prof_dump_open_maps_t *open_maps_orig = prof_dump_open_maps;
118508-
118509-	prof_dump_open_file = prof_dump_open_file_intercept;
118510-	prof_dump_write_file = prof_dump_write_maps_file_error;
118511-
118512-	void *p = mallocx(1, 0);
118513-	assert_ptr_not_null(p, "Unexpected mallocx() failure");
118514-
118515-	/*
118516-	 * When opening the maps file fails, there shouldn't be any maps write,
118517-	 * and mallctl() should return success.
118518-	 */
118519-	prof_dump_open_maps = prof_dump_open_maps_error;
118520-	started_piping_maps_file = false;
118521-	prof_dump_write_file_count = 0;
118522-	expect_d_eq(mallctl("prof.dump", NULL, NULL, (void *)&test_filename,
118523-	    sizeof(test_filename)), 0,
118524-	    "mallctl should not fail in case of maps file opening failure");
118525-	expect_false(started_piping_maps_file, "Shouldn't start piping maps");
118526-	expect_d_eq(prof_dump_write_file_count, 0,
118527-	    "Dumping stopped after a wrong number of writes");
118528-
118529-	/*
118530-	 * When the n-th maps write fails (given that we are able to find the
118531-	 * maps file), there shouldn't be any more maps write, and mallctl()
118532-	 * should return failure.
118533-	 */
118534-	prof_dump_open_maps = open_maps_orig;
118535-	expect_maps_write_failure(1); /* First write fails. */
118536-	expect_maps_write_failure(2); /* Second write fails. */
118537-
118538-	dallocx(p, 0);
118539-
118540-	prof_dump_open_file = open_file_orig;
118541-	prof_dump_write_file = write_file_orig;
118542-}
118543-TEST_END
118544-
118545-int
118546-main(void) {
118547-	return test(
118548-	    test_mdump_normal,
118549-	    test_mdump_output_error,
118550-	    test_mdump_maps_error);
118551-}
118552diff --git a/jemalloc/test/unit/prof_mdump.sh b/jemalloc/test/unit/prof_mdump.sh
118553deleted file mode 100644
118554index d14cb8c..0000000
118555--- a/jemalloc/test/unit/prof_mdump.sh
118556+++ /dev/null
118557@@ -1,6 +0,0 @@
118558-#!/bin/sh
118559-
118560-if [ "x${enable_prof}" = "x1" ] ; then
118561-  export MALLOC_CONF="prof:true,lg_prof_sample:0"
118562-fi
118563-
118564diff --git a/jemalloc/test/unit/prof_recent.c b/jemalloc/test/unit/prof_recent.c
118565deleted file mode 100644
118566index 4fb3723..0000000
118567--- a/jemalloc/test/unit/prof_recent.c
118568+++ /dev/null
118569@@ -1,678 +0,0 @@
118570-#include "test/jemalloc_test.h"
118571-
118572-#include "jemalloc/internal/prof_recent.h"
118573-
118574-/* As specified in the shell script */
118575-#define OPT_ALLOC_MAX 3
118576-
118577-/* Invariant before and after every test (when config_prof is on) */
118578-static void
118579-confirm_prof_setup() {
118580-	/* Options */
118581-	assert_true(opt_prof, "opt_prof not on");
118582-	assert_true(opt_prof_active, "opt_prof_active not on");
118583-	assert_zd_eq(opt_prof_recent_alloc_max, OPT_ALLOC_MAX,
118584-	    "opt_prof_recent_alloc_max not set correctly");
118585-
118586-	/* Dynamics */
118587-	assert_true(prof_active_state, "prof_active not on");
118588-	assert_zd_eq(prof_recent_alloc_max_ctl_read(), OPT_ALLOC_MAX,
118589-	    "prof_recent_alloc_max not set correctly");
118590-}
118591-
118592-TEST_BEGIN(test_confirm_setup) {
118593-	test_skip_if(!config_prof);
118594-	confirm_prof_setup();
118595-}
118596-TEST_END
118597-
118598-TEST_BEGIN(test_prof_recent_off) {
118599-	test_skip_if(config_prof);
118600-
118601-	const ssize_t past_ref = 0, future_ref = 0;
118602-	const size_t len_ref = sizeof(ssize_t);
118603-
118604-	ssize_t past = past_ref, future = future_ref;
118605-	size_t len = len_ref;
118606-
118607-#define ASSERT_SHOULD_FAIL(opt, a, b, c, d) do {			\
118608-	assert_d_eq(mallctl("experimental.prof_recent." opt, a, b, c,	\
118609-	    d), ENOENT, "Should return ENOENT when config_prof is off");\
118610-	assert_zd_eq(past, past_ref, "output was touched");		\
118611-	assert_zu_eq(len, len_ref, "output length was touched");	\
118612-	assert_zd_eq(future, future_ref, "input was touched");		\
118613-} while (0)
118614-
118615-	ASSERT_SHOULD_FAIL("alloc_max", NULL, NULL, NULL, 0);
118616-	ASSERT_SHOULD_FAIL("alloc_max", &past, &len, NULL, 0);
118617-	ASSERT_SHOULD_FAIL("alloc_max", NULL, NULL, &future, len);
118618-	ASSERT_SHOULD_FAIL("alloc_max", &past, &len, &future, len);
118619-
118620-#undef ASSERT_SHOULD_FAIL
118621-}
118622-TEST_END
118623-
118624-TEST_BEGIN(test_prof_recent_on) {
118625-	test_skip_if(!config_prof);
118626-
118627-	ssize_t past, future;
118628-	size_t len = sizeof(ssize_t);
118629-
118630-	confirm_prof_setup();
118631-
118632-	assert_d_eq(mallctl("experimental.prof_recent.alloc_max",
118633-	    NULL, NULL, NULL, 0), 0, "no-op mallctl should be allowed");
118634-	confirm_prof_setup();
118635-
118636-	assert_d_eq(mallctl("experimental.prof_recent.alloc_max",
118637-	    &past, &len, NULL, 0), 0, "Read error");
118638-	expect_zd_eq(past, OPT_ALLOC_MAX, "Wrong read result");
118639-	future = OPT_ALLOC_MAX + 1;
118640-	assert_d_eq(mallctl("experimental.prof_recent.alloc_max",
118641-	    NULL, NULL, &future, len), 0, "Write error");
118642-	future = -1;
118643-	assert_d_eq(mallctl("experimental.prof_recent.alloc_max",
118644-	    &past, &len, &future, len), 0, "Read/write error");
118645-	expect_zd_eq(past, OPT_ALLOC_MAX + 1, "Wrong read result");
118646-	future = -2;
118647-	assert_d_eq(mallctl("experimental.prof_recent.alloc_max",
118648-	    &past, &len, &future, len), EINVAL,
118649-	    "Invalid write should return EINVAL");
118650-	expect_zd_eq(past, OPT_ALLOC_MAX + 1,
118651-	    "Output should not be touched given invalid write");
118652-	future = OPT_ALLOC_MAX;
118653-	assert_d_eq(mallctl("experimental.prof_recent.alloc_max",
118654-	    &past, &len, &future, len), 0, "Read/write error");
118655-	expect_zd_eq(past, -1, "Wrong read result");
118656-	future = OPT_ALLOC_MAX + 2;
118657-	assert_d_eq(mallctl("experimental.prof_recent.alloc_max",
118658-	    &past, &len, &future, len * 2), EINVAL,
118659-	    "Invalid write should return EINVAL");
118660-	expect_zd_eq(past, -1,
118661-	    "Output should not be touched given invalid write");
118662-
118663-	confirm_prof_setup();
118664-}
118665-TEST_END
118666-
118667-/* Reproducible sequence of request sizes */
118668-#define NTH_REQ_SIZE(n) ((n) * 97 + 101)
118669-
118670-static void
118671-confirm_malloc(void *p) {
118672-	assert_ptr_not_null(p, "malloc failed unexpectedly");
118673-	edata_t *e = emap_edata_lookup(TSDN_NULL, &arena_emap_global, p);
118674-	assert_ptr_not_null(e, "NULL edata for living pointer");
118675-	prof_recent_t *n = edata_prof_recent_alloc_get_no_lock_test(e);
118676-	assert_ptr_not_null(n, "Record in edata should not be NULL");
118677-	expect_ptr_not_null(n->alloc_tctx,
118678-	    "alloc_tctx in record should not be NULL");
118679-	expect_ptr_eq(e, prof_recent_alloc_edata_get_no_lock_test(n),
118680-	    "edata pointer in record is not correct");
118681-	expect_ptr_null(n->dalloc_tctx, "dalloc_tctx in record should be NULL");
118682-}
118683-
118684-static void
118685-confirm_record_size(prof_recent_t *n, unsigned kth) {
118686-	expect_zu_eq(n->size, NTH_REQ_SIZE(kth),
118687-	    "Recorded allocation size is wrong");
118688-}
118689-
118690-static void
118691-confirm_record_living(prof_recent_t *n) {
118692-	expect_ptr_not_null(n->alloc_tctx,
118693-	    "alloc_tctx in record should not be NULL");
118694-	edata_t *edata = prof_recent_alloc_edata_get_no_lock_test(n);
118695-	assert_ptr_not_null(edata,
118696-	    "Recorded edata should not be NULL for living pointer");
118697-	expect_ptr_eq(n, edata_prof_recent_alloc_get_no_lock_test(edata),
118698-	    "Record in edata is not correct");
118699-	expect_ptr_null(n->dalloc_tctx, "dalloc_tctx in record should be NULL");
118700-}
118701-
118702-static void
118703-confirm_record_released(prof_recent_t *n) {
118704-	expect_ptr_not_null(n->alloc_tctx,
118705-	    "alloc_tctx in record should not be NULL");
118706-	expect_ptr_null(prof_recent_alloc_edata_get_no_lock_test(n),
118707-	    "Recorded edata should be NULL for released pointer");
118708-	expect_ptr_not_null(n->dalloc_tctx,
118709-	    "dalloc_tctx in record should not be NULL for released pointer");
118710-}
118711-
118712-TEST_BEGIN(test_prof_recent_alloc) {
118713-	test_skip_if(!config_prof);
118714-
118715-	bool b;
118716-	unsigned i, c;
118717-	size_t req_size;
118718-	void *p;
118719-	prof_recent_t *n;
118720-	ssize_t future;
118721-
118722-	confirm_prof_setup();
118723-
118724-	/*
118725-	 * First batch of 2 * OPT_ALLOC_MAX allocations.  After the
118726-	 * (OPT_ALLOC_MAX - 1)'th allocation the recorded allocations should
118727-	 * always be the last OPT_ALLOC_MAX allocations coming from here.
118728-	 */
118729-	for (i = 0; i < 2 * OPT_ALLOC_MAX; ++i) {
118730-		req_size = NTH_REQ_SIZE(i);
118731-		p = malloc(req_size);
118732-		confirm_malloc(p);
118733-		if (i < OPT_ALLOC_MAX - 1) {
118734-			assert_false(ql_empty(&prof_recent_alloc_list),
118735-			    "Empty recent allocation");
118736-			free(p);
118737-			/*
118738-			 * The recorded allocations may still include some
118739-			 * other allocations before the test run started,
118740-			 * so keep allocating without checking anything.
118741-			 */
118742-			continue;
118743-		}
118744-		c = 0;
118745-		ql_foreach(n, &prof_recent_alloc_list, link) {
118746-			++c;
118747-			confirm_record_size(n, i + c - OPT_ALLOC_MAX);
118748-			if (c == OPT_ALLOC_MAX) {
118749-				confirm_record_living(n);
118750-			} else {
118751-				confirm_record_released(n);
118752-			}
118753-		}
118754-		assert_u_eq(c, OPT_ALLOC_MAX,
118755-		    "Incorrect total number of allocations");
118756-		free(p);
118757-	}
118758-
118759-	confirm_prof_setup();
118760-
118761-	b = false;
118762-	assert_d_eq(mallctl("prof.active", NULL, NULL, &b, sizeof(bool)), 0,
118763-	    "mallctl for turning off prof_active failed");
118764-
118765-	/*
118766-	 * Second batch of OPT_ALLOC_MAX allocations.  Since prof_active is
118767-	 * turned off, this batch shouldn't be recorded.
118768-	 */
118769-	for (; i < 3 * OPT_ALLOC_MAX; ++i) {
118770-		req_size = NTH_REQ_SIZE(i);
118771-		p = malloc(req_size);
118772-		assert_ptr_not_null(p, "malloc failed unexpectedly");
118773-		c = 0;
118774-		ql_foreach(n, &prof_recent_alloc_list, link) {
118775-			confirm_record_size(n, c + OPT_ALLOC_MAX);
118776-			confirm_record_released(n);
118777-			++c;
118778-		}
118779-		assert_u_eq(c, OPT_ALLOC_MAX,
118780-		    "Incorrect total number of allocations");
118781-		free(p);
118782-	}
118783-
118784-	b = true;
118785-	assert_d_eq(mallctl("prof.active", NULL, NULL, &b, sizeof(bool)), 0,
118786-	    "mallctl for turning on prof_active failed");
118787-
118788-	confirm_prof_setup();
118789-
118790-	/*
118791-	 * Third batch of OPT_ALLOC_MAX allocations.  Since prof_active is
118792-	 * turned back on, they should be recorded, and in the list of recorded
118793-	 * allocations they should follow the first batch rather than the
118794-	 * second batch.
118795-	 */
118796-	for (; i < 4 * OPT_ALLOC_MAX; ++i) {
118797-		req_size = NTH_REQ_SIZE(i);
118798-		p = malloc(req_size);
118799-		confirm_malloc(p);
118800-		c = 0;
118801-		ql_foreach(n, &prof_recent_alloc_list, link) {
118802-			++c;
118803-			confirm_record_size(n,
118804-			    /* Is the allocation from the third batch? */
118805-			    i + c - OPT_ALLOC_MAX >= 3 * OPT_ALLOC_MAX ?
118806-			    /* If yes, then it's just recorded. */
118807-			    i + c - OPT_ALLOC_MAX :
118808-			    /*
118809-			     * Otherwise, it should come from the first batch
118810-			     * instead of the second batch.
118811-			     */
118812-			    i + c - 2 * OPT_ALLOC_MAX);
118813-			if (c == OPT_ALLOC_MAX) {
118814-				confirm_record_living(n);
118815-			} else {
118816-				confirm_record_released(n);
118817-			}
118818-		}
118819-		assert_u_eq(c, OPT_ALLOC_MAX,
118820-		    "Incorrect total number of allocations");
118821-		free(p);
118822-	}
118823-
118824-	/* Increasing the limit shouldn't alter the list of records. */
118825-	future = OPT_ALLOC_MAX + 1;
118826-	assert_d_eq(mallctl("experimental.prof_recent.alloc_max",
118827-	    NULL, NULL, &future, sizeof(ssize_t)), 0, "Write error");
118828-	c = 0;
118829-	ql_foreach(n, &prof_recent_alloc_list, link) {
118830-		confirm_record_size(n, c + 3 * OPT_ALLOC_MAX);
118831-		confirm_record_released(n);
118832-		++c;
118833-	}
118834-	assert_u_eq(c, OPT_ALLOC_MAX,
118835-	    "Incorrect total number of allocations");
118836-
118837-	/*
118838-	 * Decreasing the limit shouldn't alter the list of records as long as
118839-	 * the new limit is still no less than the length of the list.
118840-	 */
118841-	future = OPT_ALLOC_MAX;
118842-	assert_d_eq(mallctl("experimental.prof_recent.alloc_max",
118843-	    NULL, NULL, &future, sizeof(ssize_t)), 0, "Write error");
118844-	c = 0;
118845-	ql_foreach(n, &prof_recent_alloc_list, link) {
118846-		confirm_record_size(n, c + 3 * OPT_ALLOC_MAX);
118847-		confirm_record_released(n);
118848-		++c;
118849-	}
118850-	assert_u_eq(c, OPT_ALLOC_MAX,
118851-	    "Incorrect total number of allocations");
118852-
118853-	/*
118854-	 * Decreasing the limit should shorten the list of records if the new
118855-	 * limit is less than the length of the list.
118856-	 */
118857-	future = OPT_ALLOC_MAX - 1;
118858-	assert_d_eq(mallctl("experimental.prof_recent.alloc_max",
118859-	    NULL, NULL, &future, sizeof(ssize_t)), 0, "Write error");
118860-	c = 0;
118861-	ql_foreach(n, &prof_recent_alloc_list, link) {
118862-		++c;
118863-		confirm_record_size(n, c + 3 * OPT_ALLOC_MAX);
118864-		confirm_record_released(n);
118865-	}
118866-	assert_u_eq(c, OPT_ALLOC_MAX - 1,
118867-	    "Incorrect total number of allocations");
118868-
118869-	/* Setting to unlimited shouldn't alter the list of records. */
118870-	future = -1;
118871-	assert_d_eq(mallctl("experimental.prof_recent.alloc_max",
118872-	    NULL, NULL, &future, sizeof(ssize_t)), 0, "Write error");
118873-	c = 0;
118874-	ql_foreach(n, &prof_recent_alloc_list, link) {
118875-		++c;
118876-		confirm_record_size(n, c + 3 * OPT_ALLOC_MAX);
118877-		confirm_record_released(n);
118878-	}
118879-	assert_u_eq(c, OPT_ALLOC_MAX - 1,
118880-	    "Incorrect total number of allocations");
118881-
118882-	/* Downshift to only one record. */
118883-	future = 1;
118884-	assert_d_eq(mallctl("experimental.prof_recent.alloc_max",
118885-	    NULL, NULL, &future, sizeof(ssize_t)), 0, "Write error");
118886-	assert_false(ql_empty(&prof_recent_alloc_list), "Recent list is empty");
118887-	n = ql_first(&prof_recent_alloc_list);
118888-	confirm_record_size(n, 4 * OPT_ALLOC_MAX - 1);
118889-	confirm_record_released(n);
118890-	n = ql_next(&prof_recent_alloc_list, n, link);
118891-	assert_ptr_null(n, "Recent list should only contain one record");
118892-
118893-	/* Completely turn off. */
118894-	future = 0;
118895-	assert_d_eq(mallctl("experimental.prof_recent.alloc_max",
118896-	    NULL, NULL, &future, sizeof(ssize_t)), 0, "Write error");
118897-	assert_true(ql_empty(&prof_recent_alloc_list),
118898-	    "Recent list should be empty");
118899-
118900-	/* Restore the settings. */
118901-	future = OPT_ALLOC_MAX;
118902-	assert_d_eq(mallctl("experimental.prof_recent.alloc_max",
118903-	    NULL, NULL, &future, sizeof(ssize_t)), 0, "Write error");
118904-	assert_true(ql_empty(&prof_recent_alloc_list),
118905-	    "Recent list should be empty");
118906-
118907-	confirm_prof_setup();
118908-}
118909-TEST_END
118910-
118911-#undef NTH_REQ_SIZE
118912-
118913-#define DUMP_OUT_SIZE 4096
118914-static char dump_out[DUMP_OUT_SIZE];
118915-static size_t dump_out_len = 0;
118916-
118917-static void
118918-test_dump_write_cb(void *not_used, const char *str) {
118919-	size_t len = strlen(str);
118920-	assert(dump_out_len + len < DUMP_OUT_SIZE);
118921-	memcpy(dump_out + dump_out_len, str, len + 1);
118922-	dump_out_len += len;
118923-}
118924-
118925-static void
118926-call_dump() {
118927-	static void *in[2] = {test_dump_write_cb, NULL};
118928-	dump_out_len = 0;
118929-	assert_d_eq(mallctl("experimental.prof_recent.alloc_dump",
118930-	    NULL, NULL, in, sizeof(in)), 0, "Dump mallctl raised error");
118931-}
118932-
118933-typedef struct {
118934-	size_t size;
118935-	size_t usize;
118936-	bool released;
118937-} confirm_record_t;
118938-
118939-#define DUMP_ERROR "Dump output is wrong"
118940-
118941-static void
118942-confirm_record(const char *template, const confirm_record_t *records,
118943-    const size_t n_records) {
118944-	static const char *types[2] = {"alloc", "dalloc"};
118945-	static char buf[64];
118946-
118947-	/*
118948-	 * The template string would be in the form of:
118949-	 * "{...,\"recent_alloc\":[]}",
118950-	 * and dump_out would be in the form of:
118951-	 * "{...,\"recent_alloc\":[...]}".
118952-	 * Using "- 2" serves to cut right before the ending "]}".
118953-	 */
118954-	assert_d_eq(memcmp(dump_out, template, strlen(template) - 2), 0,
118955-	    DUMP_ERROR);
118956-	assert_d_eq(memcmp(dump_out + strlen(dump_out) - 2,
118957-	    template + strlen(template) - 2, 2), 0, DUMP_ERROR);
118958-
118959-	const char *start = dump_out + strlen(template) - 2;
118960-	const char *end = dump_out + strlen(dump_out) - 2;
118961-	const confirm_record_t *record;
118962-	for (record = records; record < records + n_records; ++record) {
118963-
118964-#define ASSERT_CHAR(c) do {						\
118965-	assert_true(start < end, DUMP_ERROR);				\
118966-	assert_c_eq(*start++, c, DUMP_ERROR);				\
118967-} while (0)
118968-
118969-#define ASSERT_STR(s) do {						\
118970-	const size_t len = strlen(s);					\
118971-	assert_true(start + len <= end, DUMP_ERROR);			\
118972-	assert_d_eq(memcmp(start, s, len), 0, DUMP_ERROR);		\
118973-	start += len;							\
118974-} while (0)
118975-
118976-#define ASSERT_FORMATTED_STR(s, ...) do {				\
118977-	malloc_snprintf(buf, sizeof(buf), s, __VA_ARGS__);		\
118978-	ASSERT_STR(buf);						\
118979-} while (0)
118980-
118981-		if (record != records) {
118982-			ASSERT_CHAR(',');
118983-		}
118984-
118985-		ASSERT_CHAR('{');
118986-
118987-		ASSERT_STR("\"size\"");
118988-		ASSERT_CHAR(':');
118989-		ASSERT_FORMATTED_STR("%zu", record->size);
118990-		ASSERT_CHAR(',');
118991-
118992-		ASSERT_STR("\"usize\"");
118993-		ASSERT_CHAR(':');
118994-		ASSERT_FORMATTED_STR("%zu", record->usize);
118995-		ASSERT_CHAR(',');
118996-
118997-		ASSERT_STR("\"released\"");
118998-		ASSERT_CHAR(':');
118999-		ASSERT_STR(record->released ? "true" : "false");
119000-		ASSERT_CHAR(',');
119001-
119002-		const char **type = types;
119003-		while (true) {
119004-			ASSERT_FORMATTED_STR("\"%s_thread_uid\"", *type);
119005-			ASSERT_CHAR(':');
119006-			while (isdigit(*start)) {
119007-				++start;
119008-			}
119009-			ASSERT_CHAR(',');
119010-
119011-			if (opt_prof_sys_thread_name) {
119012-				ASSERT_FORMATTED_STR("\"%s_thread_name\"",
119013-				    *type);
119014-				ASSERT_CHAR(':');
119015-				ASSERT_CHAR('"');
119016-				while (*start != '"') {
119017-					++start;
119018-				}
119019-				ASSERT_CHAR('"');
119020-				ASSERT_CHAR(',');
119021-			}
119022-
119023-			ASSERT_FORMATTED_STR("\"%s_time\"", *type);
119024-			ASSERT_CHAR(':');
119025-			while (isdigit(*start)) {
119026-				++start;
119027-			}
119028-			ASSERT_CHAR(',');
119029-
119030-			ASSERT_FORMATTED_STR("\"%s_trace\"", *type);
119031-			ASSERT_CHAR(':');
119032-			ASSERT_CHAR('[');
119033-			while (isdigit(*start) || *start == 'x' ||
119034-			    (*start >= 'a' && *start <= 'f') ||
119035-			    *start == '\"' || *start == ',') {
119036-				++start;
119037-			}
119038-			ASSERT_CHAR(']');
119039-
119040-			if (strcmp(*type, "dalloc") == 0) {
119041-				break;
119042-			}
119043-
119044-			assert(strcmp(*type, "alloc") == 0);
119045-			if (!record->released) {
119046-				break;
119047-			}
119048-
119049-			ASSERT_CHAR(',');
119050-			++type;
119051-		}
119052-
119053-		ASSERT_CHAR('}');
119054-
119055-#undef ASSERT_FORMATTED_STR
119056-#undef ASSERT_STR
119057-#undef ASSERT_CHAR
119058-
119059-	}
119060-	assert_ptr_eq(record, records + n_records, DUMP_ERROR);
119061-	assert_ptr_eq(start, end, DUMP_ERROR);
119062-}
119063-
119064-TEST_BEGIN(test_prof_recent_alloc_dump) {
119065-	test_skip_if(!config_prof);
119066-
119067-	confirm_prof_setup();
119068-
119069-	ssize_t future;
119070-	void *p, *q;
119071-	confirm_record_t records[2];
119072-
119073-	assert_zu_eq(lg_prof_sample, (size_t)0,
119074-	    "lg_prof_sample not set correctly");
119075-
119076-	future = 0;
119077-	assert_d_eq(mallctl("experimental.prof_recent.alloc_max",
119078-	    NULL, NULL, &future, sizeof(ssize_t)), 0, "Write error");
119079-	call_dump();
119080-	expect_str_eq(dump_out, "{\"sample_interval\":1,"
119081-	    "\"recent_alloc_max\":0,\"recent_alloc\":[]}", DUMP_ERROR);
119082-
119083-	future = 2;
119084-	assert_d_eq(mallctl("experimental.prof_recent.alloc_max",
119085-	    NULL, NULL, &future, sizeof(ssize_t)), 0, "Write error");
119086-	call_dump();
119087-	const char *template = "{\"sample_interval\":1,"
119088-	    "\"recent_alloc_max\":2,\"recent_alloc\":[]}";
119089-	expect_str_eq(dump_out, template, DUMP_ERROR);
119090-
119091-	p = malloc(7);
119092-	call_dump();
119093-	records[0].size = 7;
119094-	records[0].usize = sz_s2u(7);
119095-	records[0].released = false;
119096-	confirm_record(template, records, 1);
119097-
119098-	q = mallocx(17, MALLOCX_ALIGN(128));
119099-	call_dump();
119100-	records[1].size = 17;
119101-	records[1].usize = sz_sa2u(17, 128);
119102-	records[1].released = false;
119103-	confirm_record(template, records, 2);
119104-
119105-	free(q);
119106-	call_dump();
119107-	records[1].released = true;
119108-	confirm_record(template, records, 2);
119109-
119110-	free(p);
119111-	call_dump();
119112-	records[0].released = true;
119113-	confirm_record(template, records, 2);
119114-
119115-	future = OPT_ALLOC_MAX;
119116-	assert_d_eq(mallctl("experimental.prof_recent.alloc_max",
119117-	    NULL, NULL, &future, sizeof(ssize_t)), 0, "Write error");
119118-	confirm_prof_setup();
119119-}
119120-TEST_END
119121-
119122-#undef DUMP_ERROR
119123-#undef DUMP_OUT_SIZE
119124-
119125-#define N_THREADS 8
119126-#define N_PTRS 512
119127-#define N_CTLS 8
119128-#define N_ITERS 2048
119129-#define STRESS_ALLOC_MAX 4096
119130-
119131-typedef struct {
119132-	thd_t thd;
119133-	size_t id;
119134-	void *ptrs[N_PTRS];
119135-	size_t count;
119136-} thd_data_t;
119137-
119138-static thd_data_t thd_data[N_THREADS];
119139-static ssize_t test_max;
119140-
119141-static void
119142-test_write_cb(void *cbopaque, const char *str) {
119143-	sleep_ns(1000 * 1000);
119144-}
119145-
119146-static void *
119147-f_thread(void *arg) {
119148-	const size_t thd_id = *(size_t *)arg;
119149-	thd_data_t *data_p = thd_data + thd_id;
119150-	assert(data_p->id == thd_id);
119151-	data_p->count = 0;
119152-	uint64_t rand = (uint64_t)thd_id;
119153-	tsd_t *tsd = tsd_fetch();
119154-	assert(test_max > 1);
119155-	ssize_t last_max = -1;
119156-	for (int i = 0; i < N_ITERS; i++) {
119157-		rand = prng_range_u64(&rand, N_PTRS + N_CTLS * 5);
119158-		assert(data_p->count <= N_PTRS);
119159-		if (rand < data_p->count) {
119160-			assert(data_p->count > 0);
119161-			if (rand != data_p->count - 1) {
119162-				assert(data_p->count > 1);
119163-				void *temp = data_p->ptrs[rand];
119164-				data_p->ptrs[rand] =
119165-				    data_p->ptrs[data_p->count - 1];
119166-				data_p->ptrs[data_p->count - 1] = temp;
119167-			}
119168-			free(data_p->ptrs[--data_p->count]);
119169-		} else if (rand < N_PTRS) {
119170-			assert(data_p->count < N_PTRS);
119171-			data_p->ptrs[data_p->count++] = malloc(1);
119172-		} else if (rand % 5 == 0) {
119173-			prof_recent_alloc_dump(tsd, test_write_cb, NULL);
119174-		} else if (rand % 5 == 1) {
119175-			last_max = prof_recent_alloc_max_ctl_read();
119176-		} else if (rand % 5 == 2) {
119177-			last_max =
119178-			    prof_recent_alloc_max_ctl_write(tsd, test_max * 2);
119179-		} else if (rand % 5 == 3) {
119180-			last_max =
119181-			    prof_recent_alloc_max_ctl_write(tsd, test_max);
119182-		} else {
119183-			assert(rand % 5 == 4);
119184-			last_max =
119185-			    prof_recent_alloc_max_ctl_write(tsd, test_max / 2);
119186-		}
119187-		assert_zd_ge(last_max, -1, "Illegal last-N max");
119188-	}
119189-
119190-	while (data_p->count > 0) {
119191-		free(data_p->ptrs[--data_p->count]);
119192-	}
119193-
119194-	return NULL;
119195-}
119196-
119197-TEST_BEGIN(test_prof_recent_stress) {
119198-	test_skip_if(!config_prof);
119199-
119200-	confirm_prof_setup();
119201-
119202-	test_max = OPT_ALLOC_MAX;
119203-	for (size_t i = 0; i < N_THREADS; i++) {
119204-		thd_data_t *data_p = thd_data + i;
119205-		data_p->id = i;
119206-		thd_create(&data_p->thd, &f_thread, &data_p->id);
119207-	}
119208-	for (size_t i = 0; i < N_THREADS; i++) {
119209-		thd_data_t *data_p = thd_data + i;
119210-		thd_join(data_p->thd, NULL);
119211-	}
119212-
119213-	test_max = STRESS_ALLOC_MAX;
119214-	assert_d_eq(mallctl("experimental.prof_recent.alloc_max",
119215-	    NULL, NULL, &test_max, sizeof(ssize_t)), 0, "Write error");
119216-	for (size_t i = 0; i < N_THREADS; i++) {
119217-		thd_data_t *data_p = thd_data + i;
119218-		data_p->id = i;
119219-		thd_create(&data_p->thd, &f_thread, &data_p->id);
119220-	}
119221-	for (size_t i = 0; i < N_THREADS; i++) {
119222-		thd_data_t *data_p = thd_data + i;
119223-		thd_join(data_p->thd, NULL);
119224-	}
119225-
119226-	test_max = OPT_ALLOC_MAX;
119227-	assert_d_eq(mallctl("experimental.prof_recent.alloc_max",
119228-	    NULL, NULL, &test_max, sizeof(ssize_t)), 0, "Write error");
119229-	confirm_prof_setup();
119230-}
119231-TEST_END
119232-
119233-#undef STRESS_ALLOC_MAX
119234-#undef N_ITERS
119235-#undef N_PTRS
119236-#undef N_THREADS
119237-
119238-int
119239-main(void) {
119240-	return test(
119241-	    test_confirm_setup,
119242-	    test_prof_recent_off,
119243-	    test_prof_recent_on,
119244-	    test_prof_recent_alloc,
119245-	    test_prof_recent_alloc_dump,
119246-	    test_prof_recent_stress);
119247-}
119248diff --git a/jemalloc/test/unit/prof_recent.sh b/jemalloc/test/unit/prof_recent.sh
119249deleted file mode 100644
119250index 58a54a4..0000000
119251--- a/jemalloc/test/unit/prof_recent.sh
119252+++ /dev/null
119253@@ -1,5 +0,0 @@
119254-#!/bin/sh
119255-
119256-if [ "x${enable_prof}" = "x1" ] ; then
119257-  export MALLOC_CONF="prof:true,prof_active:true,lg_prof_sample:0,prof_recent_alloc_max:3"
119258-fi
119259diff --git a/jemalloc/test/unit/prof_reset.c b/jemalloc/test/unit/prof_reset.c
119260deleted file mode 100644
119261index 9b33b20..0000000
119262--- a/jemalloc/test/unit/prof_reset.c
119263+++ /dev/null
119264@@ -1,266 +0,0 @@
119265-#include "test/jemalloc_test.h"
119266-
119267-#include "jemalloc/internal/prof_data.h"
119268-#include "jemalloc/internal/prof_sys.h"
119269-
119270-static int
119271-prof_dump_open_file_intercept(const char *filename, int mode) {
119272-	int fd;
119273-
119274-	fd = open("/dev/null", O_WRONLY);
119275-	assert_d_ne(fd, -1, "Unexpected open() failure");
119276-
119277-	return fd;
119278-}
119279-
119280-static void
119281-set_prof_active(bool active) {
119282-	expect_d_eq(mallctl("prof.active", NULL, NULL, (void *)&active,
119283-	    sizeof(active)), 0, "Unexpected mallctl failure");
119284-}
119285-
119286-static size_t
119287-get_lg_prof_sample(void) {
119288-	size_t ret;
119289-	size_t sz = sizeof(size_t);
119290-
119291-	expect_d_eq(mallctl("prof.lg_sample", (void *)&ret, &sz, NULL, 0), 0,
119292-	    "Unexpected mallctl failure while reading profiling sample rate");
119293-	return ret;
119294-}
119295-
119296-static void
119297-do_prof_reset(size_t lg_prof_sample_input) {
119298-	expect_d_eq(mallctl("prof.reset", NULL, NULL,
119299-	    (void *)&lg_prof_sample_input, sizeof(size_t)), 0,
119300-	    "Unexpected mallctl failure while resetting profile data");
119301-	expect_zu_eq(lg_prof_sample_input, get_lg_prof_sample(),
119302-	    "Expected profile sample rate change");
119303-}
119304-
119305-TEST_BEGIN(test_prof_reset_basic) {
119306-	size_t lg_prof_sample_orig, lg_prof_sample_cur, lg_prof_sample_next;
119307-	size_t sz;
119308-	unsigned i;
119309-
119310-	test_skip_if(!config_prof);
119311-
119312-	sz = sizeof(size_t);
119313-	expect_d_eq(mallctl("opt.lg_prof_sample", (void *)&lg_prof_sample_orig,
119314-	    &sz, NULL, 0), 0,
119315-	    "Unexpected mallctl failure while reading profiling sample rate");
119316-	expect_zu_eq(lg_prof_sample_orig, 0,
119317-	    "Unexpected profiling sample rate");
119318-	lg_prof_sample_cur = get_lg_prof_sample();
119319-	expect_zu_eq(lg_prof_sample_orig, lg_prof_sample_cur,
119320-	    "Unexpected disagreement between \"opt.lg_prof_sample\" and "
119321-	    "\"prof.lg_sample\"");
119322-
119323-	/* Test simple resets. */
119324-	for (i = 0; i < 2; i++) {
119325-		expect_d_eq(mallctl("prof.reset", NULL, NULL, NULL, 0), 0,
119326-		    "Unexpected mallctl failure while resetting profile data");
119327-		lg_prof_sample_cur = get_lg_prof_sample();
119328-		expect_zu_eq(lg_prof_sample_orig, lg_prof_sample_cur,
119329-		    "Unexpected profile sample rate change");
119330-	}
119331-
119332-	/* Test resets with prof.lg_sample changes. */
119333-	lg_prof_sample_next = 1;
119334-	for (i = 0; i < 2; i++) {
119335-		do_prof_reset(lg_prof_sample_next);
119336-		lg_prof_sample_cur = get_lg_prof_sample();
119337-		expect_zu_eq(lg_prof_sample_cur, lg_prof_sample_next,
119338-		    "Expected profile sample rate change");
119339-		lg_prof_sample_next = lg_prof_sample_orig;
119340-	}
119341-
119342-	/* Make sure the test code restored prof.lg_sample. */
119343-	lg_prof_sample_cur = get_lg_prof_sample();
119344-	expect_zu_eq(lg_prof_sample_orig, lg_prof_sample_cur,
119345-	    "Unexpected disagreement between \"opt.lg_prof_sample\" and "
119346-	    "\"prof.lg_sample\"");
119347-}
119348-TEST_END
119349-
119350-TEST_BEGIN(test_prof_reset_cleanup) {
119351-	test_skip_if(!config_prof);
119352-
119353-	set_prof_active(true);
119354-
119355-	expect_zu_eq(prof_bt_count(), 0, "Expected 0 backtraces");
119356-	void *p = mallocx(1, 0);
119357-	expect_ptr_not_null(p, "Unexpected mallocx() failure");
119358-	expect_zu_eq(prof_bt_count(), 1, "Expected 1 backtrace");
119359-
119360-	prof_cnt_t cnt_all;
119361-	prof_cnt_all(&cnt_all);
119362-	expect_u64_eq(cnt_all.curobjs, 1, "Expected 1 allocation");
119363-
119364-	expect_d_eq(mallctl("prof.reset", NULL, NULL, NULL, 0), 0,
119365-	    "Unexpected error while resetting heap profile data");
119366-	prof_cnt_all(&cnt_all);
119367-	expect_u64_eq(cnt_all.curobjs, 0, "Expected 0 allocations");
119368-	expect_zu_eq(prof_bt_count(), 1, "Expected 1 backtrace");
119369-
119370-	dallocx(p, 0);
119371-	expect_zu_eq(prof_bt_count(), 0, "Expected 0 backtraces");
119372-
119373-	set_prof_active(false);
119374-}
119375-TEST_END
119376-
119377-#define NTHREADS		4
119378-#define NALLOCS_PER_THREAD	(1U << 13)
119379-#define OBJ_RING_BUF_COUNT	1531
119380-#define RESET_INTERVAL		(1U << 10)
119381-#define DUMP_INTERVAL		3677
119382-static void *
119383-thd_start(void *varg) {
119384-	unsigned thd_ind = *(unsigned *)varg;
119385-	unsigned i;
119386-	void *objs[OBJ_RING_BUF_COUNT];
119387-
119388-	memset(objs, 0, sizeof(objs));
119389-
119390-	for (i = 0; i < NALLOCS_PER_THREAD; i++) {
119391-		if (i % RESET_INTERVAL == 0) {
119392-			expect_d_eq(mallctl("prof.reset", NULL, NULL, NULL, 0),
119393-			    0, "Unexpected error while resetting heap profile "
119394-			    "data");
119395-		}
119396-
119397-		if (i % DUMP_INTERVAL == 0) {
119398-			expect_d_eq(mallctl("prof.dump", NULL, NULL, NULL, 0),
119399-			    0, "Unexpected error while dumping heap profile");
119400-		}
119401-
119402-		{
119403-			void **pp = &objs[i % OBJ_RING_BUF_COUNT];
119404-			if (*pp != NULL) {
119405-				dallocx(*pp, 0);
119406-				*pp = NULL;
119407-			}
119408-			*pp = btalloc(1, thd_ind*NALLOCS_PER_THREAD + i);
119409-			expect_ptr_not_null(*pp,
119410-			    "Unexpected btalloc() failure");
119411-		}
119412-	}
119413-
119414-	/* Clean up any remaining objects. */
119415-	for (i = 0; i < OBJ_RING_BUF_COUNT; i++) {
119416-		void **pp = &objs[i % OBJ_RING_BUF_COUNT];
119417-		if (*pp != NULL) {
119418-			dallocx(*pp, 0);
119419-			*pp = NULL;
119420-		}
119421-	}
119422-
119423-	return NULL;
119424-}
119425-
119426-TEST_BEGIN(test_prof_reset) {
119427-	size_t lg_prof_sample_orig;
119428-	thd_t thds[NTHREADS];
119429-	unsigned thd_args[NTHREADS];
119430-	unsigned i;
119431-	size_t bt_count, tdata_count;
119432-
119433-	test_skip_if(!config_prof);
119434-
119435-	bt_count = prof_bt_count();
119436-	expect_zu_eq(bt_count, 0,
119437-	    "Unexpected pre-existing tdata structures");
119438-	tdata_count = prof_tdata_count();
119439-
119440-	lg_prof_sample_orig = get_lg_prof_sample();
119441-	do_prof_reset(5);
119442-
119443-	set_prof_active(true);
119444-
119445-	for (i = 0; i < NTHREADS; i++) {
119446-		thd_args[i] = i;
119447-		thd_create(&thds[i], thd_start, (void *)&thd_args[i]);
119448-	}
119449-	for (i = 0; i < NTHREADS; i++) {
119450-		thd_join(thds[i], NULL);
119451-	}
119452-
119453-	expect_zu_eq(prof_bt_count(), bt_count,
119454-	    "Unexpected bactrace count change");
119455-	expect_zu_eq(prof_tdata_count(), tdata_count,
119456-	    "Unexpected remaining tdata structures");
119457-
119458-	set_prof_active(false);
119459-
119460-	do_prof_reset(lg_prof_sample_orig);
119461-}
119462-TEST_END
119463-#undef NTHREADS
119464-#undef NALLOCS_PER_THREAD
119465-#undef OBJ_RING_BUF_COUNT
119466-#undef RESET_INTERVAL
119467-#undef DUMP_INTERVAL
119468-
119469-/* Test sampling at the same allocation site across resets. */
119470-#define NITER 10
119471-TEST_BEGIN(test_xallocx) {
119472-	size_t lg_prof_sample_orig;
119473-	unsigned i;
119474-	void *ptrs[NITER];
119475-
119476-	test_skip_if(!config_prof);
119477-
119478-	lg_prof_sample_orig = get_lg_prof_sample();
119479-	set_prof_active(true);
119480-
119481-	/* Reset profiling. */
119482-	do_prof_reset(0);
119483-
119484-	for (i = 0; i < NITER; i++) {
119485-		void *p;
119486-		size_t sz, nsz;
119487-
119488-		/* Reset profiling. */
119489-		do_prof_reset(0);
119490-
119491-		/* Allocate small object (which will be promoted). */
119492-		p = ptrs[i] = mallocx(1, 0);
119493-		expect_ptr_not_null(p, "Unexpected mallocx() failure");
119494-
119495-		/* Reset profiling. */
119496-		do_prof_reset(0);
119497-
119498-		/* Perform successful xallocx(). */
119499-		sz = sallocx(p, 0);
119500-		expect_zu_eq(xallocx(p, sz, 0, 0), sz,
119501-		    "Unexpected xallocx() failure");
119502-
119503-		/* Perform unsuccessful xallocx(). */
119504-		nsz = nallocx(sz+1, 0);
119505-		expect_zu_eq(xallocx(p, nsz, 0, 0), sz,
119506-		    "Unexpected xallocx() success");
119507-	}
119508-
119509-	for (i = 0; i < NITER; i++) {
119510-		/* dallocx. */
119511-		dallocx(ptrs[i], 0);
119512-	}
119513-
119514-	set_prof_active(false);
119515-	do_prof_reset(lg_prof_sample_orig);
119516-}
119517-TEST_END
119518-#undef NITER
119519-
119520-int
119521-main(void) {
119522-	/* Intercept dumping prior to running any tests. */
119523-	prof_dump_open_file = prof_dump_open_file_intercept;
119524-
119525-	return test_no_reentrancy(
119526-	    test_prof_reset_basic,
119527-	    test_prof_reset_cleanup,
119528-	    test_prof_reset,
119529-	    test_xallocx);
119530-}
119531diff --git a/jemalloc/test/unit/prof_reset.sh b/jemalloc/test/unit/prof_reset.sh
119532deleted file mode 100644
119533index daefeb7..0000000
119534--- a/jemalloc/test/unit/prof_reset.sh
119535+++ /dev/null
119536@@ -1,5 +0,0 @@
119537-#!/bin/sh
119538-
119539-if [ "x${enable_prof}" = "x1" ] ; then
119540-  export MALLOC_CONF="prof:true,prof_active:false,lg_prof_sample:0,prof_recent_alloc_max:0"
119541-fi
119542diff --git a/jemalloc/test/unit/prof_stats.c b/jemalloc/test/unit/prof_stats.c
119543deleted file mode 100644
119544index c88c4ae..0000000
119545--- a/jemalloc/test/unit/prof_stats.c
119546+++ /dev/null
119547@@ -1,151 +0,0 @@
119548-#include "test/jemalloc_test.h"
119549-
119550-#define N_PTRS 3
119551-
119552-static void
119553-test_combinations(szind_t ind, size_t sizes_array[N_PTRS],
119554-    int flags_array[N_PTRS]) {
119555-#define MALLCTL_STR_LEN 64
119556-	assert(opt_prof && opt_prof_stats);
119557-
119558-	char mallctl_live_str[MALLCTL_STR_LEN];
119559-	char mallctl_accum_str[MALLCTL_STR_LEN];
119560-	if (ind < SC_NBINS) {
119561-		malloc_snprintf(mallctl_live_str, MALLCTL_STR_LEN,
119562-		    "prof.stats.bins.%u.live", (unsigned)ind);
119563-		malloc_snprintf(mallctl_accum_str, MALLCTL_STR_LEN,
119564-		    "prof.stats.bins.%u.accum", (unsigned)ind);
119565-	} else {
119566-		malloc_snprintf(mallctl_live_str, MALLCTL_STR_LEN,
119567-		    "prof.stats.lextents.%u.live", (unsigned)(ind - SC_NBINS));
119568-		malloc_snprintf(mallctl_accum_str, MALLCTL_STR_LEN,
119569-		    "prof.stats.lextents.%u.accum", (unsigned)(ind - SC_NBINS));
119570-	}
119571-
119572-	size_t stats_len = 2 * sizeof(uint64_t);
119573-
119574-	uint64_t live_stats_orig[2];
119575-	assert_d_eq(mallctl(mallctl_live_str, &live_stats_orig, &stats_len,
119576-	    NULL, 0), 0, "");
119577-	uint64_t accum_stats_orig[2];
119578-	assert_d_eq(mallctl(mallctl_accum_str, &accum_stats_orig, &stats_len,
119579-	    NULL, 0), 0, "");
119580-
119581-	void *ptrs[N_PTRS];
119582-
119583-	uint64_t live_req_sum = 0;
119584-	uint64_t live_count = 0;
119585-	uint64_t accum_req_sum = 0;
119586-	uint64_t accum_count = 0;
119587-
119588-	for (size_t i = 0; i < N_PTRS; ++i) {
119589-		size_t sz = sizes_array[i];
119590-		int flags = flags_array[i];
119591-		void *p = mallocx(sz, flags);
119592-		assert_ptr_not_null(p, "malloc() failed");
119593-		assert(TEST_MALLOC_SIZE(p) == sz_index2size(ind));
119594-		ptrs[i] = p;
119595-		live_req_sum += sz;
119596-		live_count++;
119597-		accum_req_sum += sz;
119598-		accum_count++;
119599-		uint64_t live_stats[2];
119600-		assert_d_eq(mallctl(mallctl_live_str, &live_stats, &stats_len,
119601-		    NULL, 0), 0, "");
119602-		expect_u64_eq(live_stats[0] - live_stats_orig[0],
119603-		    live_req_sum, "");
119604-		expect_u64_eq(live_stats[1] - live_stats_orig[1],
119605-		    live_count, "");
119606-		uint64_t accum_stats[2];
119607-		assert_d_eq(mallctl(mallctl_accum_str, &accum_stats, &stats_len,
119608-		    NULL, 0), 0, "");
119609-		expect_u64_eq(accum_stats[0] - accum_stats_orig[0],
119610-		    accum_req_sum, "");
119611-		expect_u64_eq(accum_stats[1] - accum_stats_orig[1],
119612-		    accum_count, "");
119613-	}
119614-
119615-	for (size_t i = 0; i < N_PTRS; ++i) {
119616-		size_t sz = sizes_array[i];
119617-		int flags = flags_array[i];
119618-		sdallocx(ptrs[i], sz, flags);
119619-		live_req_sum -= sz;
119620-		live_count--;
119621-		uint64_t live_stats[2];
119622-		assert_d_eq(mallctl(mallctl_live_str, &live_stats, &stats_len,
119623-		    NULL, 0), 0, "");
119624-		expect_u64_eq(live_stats[0] - live_stats_orig[0],
119625-		    live_req_sum, "");
119626-		expect_u64_eq(live_stats[1] - live_stats_orig[1],
119627-		    live_count, "");
119628-		uint64_t accum_stats[2];
119629-		assert_d_eq(mallctl(mallctl_accum_str, &accum_stats, &stats_len,
119630-		    NULL, 0), 0, "");
119631-		expect_u64_eq(accum_stats[0] - accum_stats_orig[0],
119632-		    accum_req_sum, "");
119633-		expect_u64_eq(accum_stats[1] - accum_stats_orig[1],
119634-		    accum_count, "");
119635-	}
119636-#undef MALLCTL_STR_LEN
119637-}
119638-
119639-static void
119640-test_szind_wrapper(szind_t ind) {
119641-	size_t sizes_array[N_PTRS];
119642-	int flags_array[N_PTRS];
119643-	for (size_t i = 0, sz = sz_index2size(ind) - N_PTRS; i < N_PTRS;
119644-	    ++i, ++sz) {
119645-		sizes_array[i] = sz;
119646-		flags_array[i] = 0;
119647-	}
119648-	test_combinations(ind, sizes_array, flags_array);
119649-}
119650-
119651-TEST_BEGIN(test_prof_stats) {
119652-	test_skip_if(!config_prof);
119653-	test_szind_wrapper(0);
119654-	test_szind_wrapper(1);
119655-	test_szind_wrapper(2);
119656-	test_szind_wrapper(SC_NBINS);
119657-	test_szind_wrapper(SC_NBINS + 1);
119658-	test_szind_wrapper(SC_NBINS + 2);
119659-}
119660-TEST_END
119661-
119662-static void
119663-test_szind_aligned_wrapper(szind_t ind, unsigned lg_align) {
119664-	size_t sizes_array[N_PTRS];
119665-	int flags_array[N_PTRS];
119666-	int flags = MALLOCX_LG_ALIGN(lg_align);
119667-	for (size_t i = 0, sz = sz_index2size(ind) - N_PTRS; i < N_PTRS;
119668-	    ++i, ++sz) {
119669-		sizes_array[i] = sz;
119670-		flags_array[i] = flags;
119671-	}
119672-	test_combinations(
119673-	    sz_size2index(sz_sa2u(sz_index2size(ind), 1 << lg_align)),
119674-	    sizes_array, flags_array);
119675-}
119676-
119677-TEST_BEGIN(test_prof_stats_aligned) {
119678-	test_skip_if(!config_prof);
119679-	for (szind_t ind = 0; ind < 10; ++ind) {
119680-		for (unsigned lg_align = 0; lg_align < 10; ++lg_align) {
119681-			test_szind_aligned_wrapper(ind, lg_align);
119682-		}
119683-	}
119684-	for (szind_t ind = SC_NBINS - 5; ind < SC_NBINS + 5; ++ind) {
119685-		for (unsigned lg_align = SC_LG_LARGE_MINCLASS - 5;
119686-		    lg_align < SC_LG_LARGE_MINCLASS + 5; ++lg_align) {
119687-			test_szind_aligned_wrapper(ind, lg_align);
119688-		}
119689-	}
119690-}
119691-TEST_END
119692-
119693-int
119694-main(void) {
119695-	return test(
119696-	    test_prof_stats,
119697-	    test_prof_stats_aligned);
119698-}
119699diff --git a/jemalloc/test/unit/prof_stats.sh b/jemalloc/test/unit/prof_stats.sh
119700deleted file mode 100644
119701index f3c819b..0000000
119702--- a/jemalloc/test/unit/prof_stats.sh
119703+++ /dev/null
119704@@ -1,5 +0,0 @@
119705-#!/bin/sh
119706-
119707-if [ "x${enable_prof}" = "x1" ] ; then
119708-  export MALLOC_CONF="prof:true,prof_active:true,lg_prof_sample:0,prof_stats:true"
119709-fi
119710diff --git a/jemalloc/test/unit/prof_sys_thread_name.c b/jemalloc/test/unit/prof_sys_thread_name.c
119711deleted file mode 100644
119712index affc788..0000000
119713--- a/jemalloc/test/unit/prof_sys_thread_name.c
119714+++ /dev/null
119715@@ -1,77 +0,0 @@
119716-#include "test/jemalloc_test.h"
119717-
119718-#include "jemalloc/internal/prof_sys.h"
119719-
119720-static const char *test_thread_name = "test_name";
119721-
119722-static int
119723-test_prof_sys_thread_name_read_error(char *buf, size_t limit) {
119724-	return ENOSYS;
119725-}
119726-
119727-static int
119728-test_prof_sys_thread_name_read(char *buf, size_t limit) {
119729-	assert(strlen(test_thread_name) < limit);
119730-	strncpy(buf, test_thread_name, limit);
119731-	return 0;
119732-}
119733-
119734-static int
119735-test_prof_sys_thread_name_read_clear(char *buf, size_t limit) {
119736-	assert(limit > 0);
119737-	buf[0] = '\0';
119738-	return 0;
119739-}
119740-
119741-TEST_BEGIN(test_prof_sys_thread_name) {
119742-	test_skip_if(!config_prof);
119743-
119744-	bool oldval;
119745-	size_t sz = sizeof(oldval);
119746-	assert_d_eq(mallctl("opt.prof_sys_thread_name", &oldval, &sz, NULL, 0),
119747-	    0, "mallctl failed");
119748-	assert_true(oldval, "option was not set correctly");
119749-
119750-	const char *thread_name;
119751-	sz = sizeof(thread_name);
119752-	assert_d_eq(mallctl("thread.prof.name", &thread_name, &sz, NULL, 0), 0,
119753-	    "mallctl read for thread name should not fail");
119754-	expect_str_eq(thread_name, "", "Initial thread name should be empty");
119755-
119756-	thread_name = test_thread_name;
119757-	assert_d_eq(mallctl("thread.prof.name", NULL, NULL, &thread_name, sz),
119758-	    ENOENT, "mallctl write for thread name should fail");
119759-	assert_ptr_eq(thread_name, test_thread_name,
119760-	    "Thread name should not be touched");
119761-
119762-	prof_sys_thread_name_read = test_prof_sys_thread_name_read_error;
119763-	void *p = malloc(1);
119764-	free(p);
119765-	assert_d_eq(mallctl("thread.prof.name", &thread_name, &sz, NULL, 0), 0,
119766-	    "mallctl read for thread name should not fail");
119767-	assert_str_eq(thread_name, "",
119768-	    "Thread name should stay the same if the system call fails");
119769-
119770-	prof_sys_thread_name_read = test_prof_sys_thread_name_read;
119771-	p = malloc(1);
119772-	free(p);
119773-	assert_d_eq(mallctl("thread.prof.name", &thread_name, &sz, NULL, 0), 0,
119774-	    "mallctl read for thread name should not fail");
119775-	assert_str_eq(thread_name, test_thread_name,
119776-	    "Thread name should be changed if the system call succeeds");
119777-
119778-	prof_sys_thread_name_read = test_prof_sys_thread_name_read_clear;
119779-	p = malloc(1);
119780-	free(p);
119781-	assert_d_eq(mallctl("thread.prof.name", &thread_name, &sz, NULL, 0), 0,
119782-	    "mallctl read for thread name should not fail");
119783-	expect_str_eq(thread_name, "", "Thread name should be updated if the "
119784-	    "system call returns a different name");
119785-}
119786-TEST_END
119787-
119788-int
119789-main(void) {
119790-	return test(
119791-	    test_prof_sys_thread_name);
119792-}
119793diff --git a/jemalloc/test/unit/prof_sys_thread_name.sh b/jemalloc/test/unit/prof_sys_thread_name.sh
119794deleted file mode 100644
119795index 1f02a8a..0000000
119796--- a/jemalloc/test/unit/prof_sys_thread_name.sh
119797+++ /dev/null
119798@@ -1,5 +0,0 @@
119799-#!/bin/sh
119800-
119801-if [ "x${enable_prof}" = "x1" ] ; then
119802-  export MALLOC_CONF="prof:true,prof_active:true,lg_prof_sample:0,prof_sys_thread_name:true"
119803-fi
119804diff --git a/jemalloc/test/unit/prof_tctx.c b/jemalloc/test/unit/prof_tctx.c
119805deleted file mode 100644
119806index e0efdc3..0000000
119807--- a/jemalloc/test/unit/prof_tctx.c
119808+++ /dev/null
119809@@ -1,48 +0,0 @@
119810-#include "test/jemalloc_test.h"
119811-
119812-#include "jemalloc/internal/prof_data.h"
119813-
119814-TEST_BEGIN(test_prof_realloc) {
119815-	tsd_t *tsd;
119816-	int flags;
119817-	void *p, *q;
119818-	prof_info_t prof_info_p, prof_info_q;
119819-	prof_cnt_t cnt_0, cnt_1, cnt_2, cnt_3;
119820-
119821-	test_skip_if(!config_prof);
119822-
119823-	tsd = tsd_fetch();
119824-	flags = MALLOCX_TCACHE_NONE;
119825-
119826-	prof_cnt_all(&cnt_0);
119827-	p = mallocx(1024, flags);
119828-	expect_ptr_not_null(p, "Unexpected mallocx() failure");
119829-	prof_info_get(tsd, p, NULL, &prof_info_p);
119830-	expect_ptr_ne(prof_info_p.alloc_tctx, (prof_tctx_t *)(uintptr_t)1U,
119831-	    "Expected valid tctx");
119832-	prof_cnt_all(&cnt_1);
119833-	expect_u64_eq(cnt_0.curobjs + 1, cnt_1.curobjs,
119834-	    "Allocation should have increased sample size");
119835-
119836-	q = rallocx(p, 2048, flags);
119837-	expect_ptr_ne(p, q, "Expected move");
119838-	expect_ptr_not_null(p, "Unexpected rmallocx() failure");
119839-	prof_info_get(tsd, q, NULL, &prof_info_q);
119840-	expect_ptr_ne(prof_info_q.alloc_tctx, (prof_tctx_t *)(uintptr_t)1U,
119841-	    "Expected valid tctx");
119842-	prof_cnt_all(&cnt_2);
119843-	expect_u64_eq(cnt_1.curobjs, cnt_2.curobjs,
119844-	    "Reallocation should not have changed sample size");
119845-
119846-	dallocx(q, flags);
119847-	prof_cnt_all(&cnt_3);
119848-	expect_u64_eq(cnt_0.curobjs, cnt_3.curobjs,
119849-	    "Sample size should have returned to base level");
119850-}
119851-TEST_END
119852-
119853-int
119854-main(void) {
119855-	return test_no_reentrancy(
119856-	    test_prof_realloc);
119857-}
119858diff --git a/jemalloc/test/unit/prof_tctx.sh b/jemalloc/test/unit/prof_tctx.sh
119859deleted file mode 100644
119860index 485f9bf..0000000
119861--- a/jemalloc/test/unit/prof_tctx.sh
119862+++ /dev/null
119863@@ -1,5 +0,0 @@
119864-#!/bin/sh
119865-
119866-if [ "x${enable_prof}" = "x1" ] ; then
119867-  export MALLOC_CONF="prof:true,prof_active:true,lg_prof_sample:0"
119868-fi
119869diff --git a/jemalloc/test/unit/prof_thread_name.c b/jemalloc/test/unit/prof_thread_name.c
119870deleted file mode 100644
119871index 3c4614f..0000000
119872--- a/jemalloc/test/unit/prof_thread_name.c
119873+++ /dev/null
119874@@ -1,122 +0,0 @@
119875-#include "test/jemalloc_test.h"
119876-
119877-static void
119878-mallctl_thread_name_get_impl(const char *thread_name_expected, const char *func,
119879-    int line) {
119880-	const char *thread_name_old;
119881-	size_t sz;
119882-
119883-	sz = sizeof(thread_name_old);
119884-	expect_d_eq(mallctl("thread.prof.name", (void *)&thread_name_old, &sz,
119885-	    NULL, 0), 0,
119886-	    "%s():%d: Unexpected mallctl failure reading thread.prof.name",
119887-	    func, line);
119888-	expect_str_eq(thread_name_old, thread_name_expected,
119889-	    "%s():%d: Unexpected thread.prof.name value", func, line);
119890-}
119891-#define mallctl_thread_name_get(a)					\
119892-	mallctl_thread_name_get_impl(a, __func__, __LINE__)
119893-
119894-static void
119895-mallctl_thread_name_set_impl(const char *thread_name, const char *func,
119896-    int line) {
119897-	expect_d_eq(mallctl("thread.prof.name", NULL, NULL,
119898-	    (void *)&thread_name, sizeof(thread_name)), 0,
119899-	    "%s():%d: Unexpected mallctl failure writing thread.prof.name",
119900-	    func, line);
119901-	mallctl_thread_name_get_impl(thread_name, func, line);
119902-}
119903-#define mallctl_thread_name_set(a)					\
119904-	mallctl_thread_name_set_impl(a, __func__, __LINE__)
119905-
119906-TEST_BEGIN(test_prof_thread_name_validation) {
119907-	const char *thread_name;
119908-
119909-	test_skip_if(!config_prof);
119910-	test_skip_if(opt_prof_sys_thread_name);
119911-
119912-	mallctl_thread_name_get("");
119913-	mallctl_thread_name_set("hi there");
119914-
119915-	/* NULL input shouldn't be allowed. */
119916-	thread_name = NULL;
119917-	expect_d_eq(mallctl("thread.prof.name", NULL, NULL,
119918-	    (void *)&thread_name, sizeof(thread_name)), EFAULT,
119919-	    "Unexpected mallctl result writing \"%s\" to thread.prof.name",
119920-	    thread_name);
119921-
119922-	/* '\n' shouldn't be allowed. */
119923-	thread_name = "hi\nthere";
119924-	expect_d_eq(mallctl("thread.prof.name", NULL, NULL,
119925-	    (void *)&thread_name, sizeof(thread_name)), EFAULT,
119926-	    "Unexpected mallctl result writing \"%s\" to thread.prof.name",
119927-	    thread_name);
119928-
119929-	/* Simultaneous read/write shouldn't be allowed. */
119930-	{
119931-		const char *thread_name_old;
119932-		size_t sz;
119933-
119934-		sz = sizeof(thread_name_old);
119935-		expect_d_eq(mallctl("thread.prof.name",
119936-		    (void *)&thread_name_old, &sz, (void *)&thread_name,
119937-		    sizeof(thread_name)), EPERM,
119938-		    "Unexpected mallctl result writing \"%s\" to "
119939-		    "thread.prof.name", thread_name);
119940-	}
119941-
119942-	mallctl_thread_name_set("");
119943-}
119944-TEST_END
119945-
119946-#define NTHREADS	4
119947-#define NRESET		25
119948-static void *
119949-thd_start(void *varg) {
119950-	unsigned thd_ind = *(unsigned *)varg;
119951-	char thread_name[16] = "";
119952-	unsigned i;
119953-
119954-	malloc_snprintf(thread_name, sizeof(thread_name), "thread %u", thd_ind);
119955-
119956-	mallctl_thread_name_get("");
119957-	mallctl_thread_name_set(thread_name);
119958-
119959-	for (i = 0; i < NRESET; i++) {
119960-		expect_d_eq(mallctl("prof.reset", NULL, NULL, NULL, 0), 0,
119961-		    "Unexpected error while resetting heap profile data");
119962-		mallctl_thread_name_get(thread_name);
119963-	}
119964-
119965-	mallctl_thread_name_set(thread_name);
119966-	mallctl_thread_name_set("");
119967-
119968-	return NULL;
119969-}
119970-
119971-TEST_BEGIN(test_prof_thread_name_threaded) {
119972-	test_skip_if(!config_prof);
119973-	test_skip_if(opt_prof_sys_thread_name);
119974-
119975-	thd_t thds[NTHREADS];
119976-	unsigned thd_args[NTHREADS];
119977-	unsigned i;
119978-
119979-	for (i = 0; i < NTHREADS; i++) {
119980-		thd_args[i] = i;
119981-		thd_create(&thds[i], thd_start, (void *)&thd_args[i]);
119982-	}
119983-	for (i = 0; i < NTHREADS; i++) {
119984-		thd_join(thds[i], NULL);
119985-	}
119986-}
119987-TEST_END
119988-#undef NTHREADS
119989-#undef NRESET
119990-
119991-int
119992-main(void) {
119993-	return test(
119994-	    test_prof_thread_name_validation,
119995-	    test_prof_thread_name_threaded);
119996-}
119997diff --git a/jemalloc/test/unit/prof_thread_name.sh b/jemalloc/test/unit/prof_thread_name.sh
119998deleted file mode 100644
119999index 298c105..0000000
120000--- a/jemalloc/test/unit/prof_thread_name.sh
120001+++ /dev/null
120002@@ -1,5 +0,0 @@
120003-#!/bin/sh
120004-
120005-if [ "x${enable_prof}" = "x1" ] ; then
120006-  export MALLOC_CONF="prof:true,prof_active:false"
120007-fi
120008diff --git a/jemalloc/test/unit/psset.c b/jemalloc/test/unit/psset.c
120009deleted file mode 100644
120010index 6ff7201..0000000
120011--- a/jemalloc/test/unit/psset.c
120012+++ /dev/null
120013@@ -1,748 +0,0 @@
120014-#include "test/jemalloc_test.h"
120015-
120016-#include "jemalloc/internal/psset.h"
120017-
120018-#define PAGESLAB_ADDR ((void *)(1234 * HUGEPAGE))
120019-#define PAGESLAB_AGE 5678
120020-
120021-#define ALLOC_ARENA_IND 111
120022-#define ALLOC_ESN 222
120023-
120024-static void
120025-edata_init_test(edata_t *edata) {
120026-	memset(edata, 0, sizeof(*edata));
120027-	edata_arena_ind_set(edata, ALLOC_ARENA_IND);
120028-	edata_esn_set(edata, ALLOC_ESN);
120029-}
120030-
120031-static void
120032-test_psset_fake_purge(hpdata_t *ps) {
120033-	hpdata_purge_state_t purge_state;
120034-	hpdata_alloc_allowed_set(ps, false);
120035-	hpdata_purge_begin(ps, &purge_state);
120036-	void *addr;
120037-	size_t size;
120038-	while (hpdata_purge_next(ps, &purge_state, &addr, &size)) {
120039-	}
120040-	hpdata_purge_end(ps, &purge_state);
120041-	hpdata_alloc_allowed_set(ps, true);
120042-}
120043-
120044-static void
120045-test_psset_alloc_new(psset_t *psset, hpdata_t *ps, edata_t *r_edata,
120046-    size_t size) {
120047-	hpdata_assert_empty(ps);
120048-
120049-	test_psset_fake_purge(ps);
120050-
120051-	psset_insert(psset, ps);
120052-	psset_update_begin(psset, ps);
120053-
120054-        void *addr = hpdata_reserve_alloc(ps, size);
120055-        edata_init(r_edata, edata_arena_ind_get(r_edata), addr, size,
120056-	    /* slab */ false, SC_NSIZES, /* sn */ 0, extent_state_active,
120057-            /* zeroed */ false, /* committed */ true, EXTENT_PAI_HPA,
120058-            EXTENT_NOT_HEAD);
120059-        edata_ps_set(r_edata, ps);
120060-	psset_update_end(psset, ps);
120061-}
120062-
120063-static bool
120064-test_psset_alloc_reuse(psset_t *psset, edata_t *r_edata, size_t size) {
120065-	hpdata_t *ps = psset_pick_alloc(psset, size);
120066-	if (ps == NULL) {
120067-		return true;
120068-	}
120069-	psset_update_begin(psset, ps);
120070-	void *addr = hpdata_reserve_alloc(ps, size);
120071-	edata_init(r_edata, edata_arena_ind_get(r_edata), addr, size,
120072-	    /* slab */ false, SC_NSIZES, /* sn */ 0, extent_state_active,
120073-	    /* zeroed */ false, /* committed */ true, EXTENT_PAI_HPA,
120074-	    EXTENT_NOT_HEAD);
120075-	edata_ps_set(r_edata, ps);
120076-	psset_update_end(psset, ps);
120077-	return false;
120078-}
120079-
120080-static hpdata_t *
120081-test_psset_dalloc(psset_t *psset, edata_t *edata) {
120082-	hpdata_t *ps = edata_ps_get(edata);
120083-	psset_update_begin(psset, ps);
120084-	hpdata_unreserve(ps, edata_addr_get(edata), edata_size_get(edata));
120085-	psset_update_end(psset, ps);
120086-	if (hpdata_empty(ps)) {
120087-		psset_remove(psset, ps);
120088-		return ps;
120089-	} else {
120090-		return NULL;
120091-	}
120092-}
120093-
120094-static void
120095-edata_expect(edata_t *edata, size_t page_offset, size_t page_cnt) {
120096-	/*
120097-	 * Note that allocations should get the arena ind of their home
120098-	 * arena, *not* the arena ind of the pageslab allocator.
120099-	 */
120100-	expect_u_eq(ALLOC_ARENA_IND, edata_arena_ind_get(edata),
120101-	    "Arena ind changed");
120102-	expect_ptr_eq(
120103-	    (void *)((uintptr_t)PAGESLAB_ADDR + (page_offset << LG_PAGE)),
120104-	    edata_addr_get(edata), "Didn't allocate in order");
120105-	expect_zu_eq(page_cnt << LG_PAGE, edata_size_get(edata), "");
120106-	expect_false(edata_slab_get(edata), "");
120107-	expect_u_eq(SC_NSIZES, edata_szind_get_maybe_invalid(edata),
120108-	    "");
120109-	expect_u64_eq(0, edata_sn_get(edata), "");
120110-	expect_d_eq(edata_state_get(edata), extent_state_active, "");
120111-	expect_false(edata_zeroed_get(edata), "");
120112-	expect_true(edata_committed_get(edata), "");
120113-	expect_d_eq(EXTENT_PAI_HPA, edata_pai_get(edata), "");
120114-	expect_false(edata_is_head_get(edata), "");
120115-}
120116-
120117-TEST_BEGIN(test_empty) {
120118-	bool err;
120119-	hpdata_t pageslab;
120120-	hpdata_init(&pageslab, PAGESLAB_ADDR, PAGESLAB_AGE);
120121-
120122-	edata_t alloc;
120123-	edata_init_test(&alloc);
120124-
120125-	psset_t psset;
120126-	psset_init(&psset);
120127-
120128-	/* Empty psset should return fail allocations. */
120129-	err = test_psset_alloc_reuse(&psset, &alloc, PAGE);
120130-	expect_true(err, "Empty psset succeeded in an allocation.");
120131-}
120132-TEST_END
120133-
120134-TEST_BEGIN(test_fill) {
120135-	bool err;
120136-
120137-	hpdata_t pageslab;
120138-	hpdata_init(&pageslab, PAGESLAB_ADDR, PAGESLAB_AGE);
120139-
120140-	edata_t alloc[HUGEPAGE_PAGES];
120141-
120142-	psset_t psset;
120143-	psset_init(&psset);
120144-
120145-	edata_init_test(&alloc[0]);
120146-	test_psset_alloc_new(&psset, &pageslab, &alloc[0], PAGE);
120147-	for (size_t i = 1; i < HUGEPAGE_PAGES; i++) {
120148-		edata_init_test(&alloc[i]);
120149-		err = test_psset_alloc_reuse(&psset, &alloc[i], PAGE);
120150-		expect_false(err, "Nonempty psset failed page allocation.");
120151-	}
120152-
120153-	for (size_t i = 0; i < HUGEPAGE_PAGES; i++) {
120154-		edata_t *edata = &alloc[i];
120155-		edata_expect(edata, i, 1);
120156-	}
120157-
120158-	/* The pageslab, and thus psset, should now have no allocations. */
120159-	edata_t extra_alloc;
120160-	edata_init_test(&extra_alloc);
120161-	err = test_psset_alloc_reuse(&psset, &extra_alloc, PAGE);
120162-	expect_true(err, "Alloc succeeded even though psset should be empty");
120163-}
120164-TEST_END
120165-
120166-TEST_BEGIN(test_reuse) {
120167-	bool err;
120168-	hpdata_t *ps;
120169-
120170-	hpdata_t pageslab;
120171-	hpdata_init(&pageslab, PAGESLAB_ADDR, PAGESLAB_AGE);
120172-
120173-	edata_t alloc[HUGEPAGE_PAGES];
120174-
120175-	psset_t psset;
120176-	psset_init(&psset);
120177-
120178-	edata_init_test(&alloc[0]);
120179-	test_psset_alloc_new(&psset, &pageslab, &alloc[0], PAGE);
120180-	for (size_t i = 1; i < HUGEPAGE_PAGES; i++) {
120181-		edata_init_test(&alloc[i]);
120182-		err = test_psset_alloc_reuse(&psset, &alloc[i], PAGE);
120183-		expect_false(err, "Nonempty psset failed page allocation.");
120184-	}
120185-
120186-	/* Free odd indices. */
120187-	for (size_t i = 0; i < HUGEPAGE_PAGES; i ++) {
120188-		if (i % 2 == 0) {
120189-			continue;
120190-		}
120191-		ps = test_psset_dalloc(&psset, &alloc[i]);
120192-		expect_ptr_null(ps, "Nonempty pageslab evicted");
120193-	}
120194-	/* Realloc into them. */
120195-	for (size_t i = 0; i < HUGEPAGE_PAGES; i++) {
120196-		if (i % 2 == 0) {
120197-			continue;
120198-		}
120199-		err = test_psset_alloc_reuse(&psset, &alloc[i], PAGE);
120200-		expect_false(err, "Nonempty psset failed page allocation.");
120201-		edata_expect(&alloc[i], i, 1);
120202-	}
120203-	/* Now, free the pages at indices 0 or 1 mod 2. */
120204-	for (size_t i = 0; i < HUGEPAGE_PAGES; i++) {
120205-		if (i % 4 > 1) {
120206-			continue;
120207-		}
120208-		ps = test_psset_dalloc(&psset, &alloc[i]);
120209-		expect_ptr_null(ps, "Nonempty pageslab evicted");
120210-	}
120211-	/* And realloc 2-page allocations into them. */
120212-	for (size_t i = 0; i < HUGEPAGE_PAGES; i++) {
120213-		if (i % 4 != 0) {
120214-			continue;
120215-		}
120216-		err = test_psset_alloc_reuse(&psset, &alloc[i], 2 * PAGE);
120217-		expect_false(err, "Nonempty psset failed page allocation.");
120218-		edata_expect(&alloc[i], i, 2);
120219-	}
120220-	/* Free all the 2-page allocations. */
120221-	for (size_t i = 0; i < HUGEPAGE_PAGES; i++) {
120222-		if (i % 4 != 0) {
120223-			continue;
120224-		}
120225-		ps = test_psset_dalloc(&psset, &alloc[i]);
120226-		expect_ptr_null(ps, "Nonempty pageslab evicted");
120227-	}
120228-	/*
120229-	 * Free up a 1-page hole next to a 2-page hole, but somewhere in the
120230-	 * middle of the pageslab.  Index 11 should be right before such a hole
120231-	 * (since 12 % 4 == 0).
120232-	 */
120233-	size_t index_of_3 = 11;
120234-	ps = test_psset_dalloc(&psset, &alloc[index_of_3]);
120235-	expect_ptr_null(ps, "Nonempty pageslab evicted");
120236-	err = test_psset_alloc_reuse(&psset, &alloc[index_of_3], 3 * PAGE);
120237-	expect_false(err, "Should have been able to find alloc.");
120238-	edata_expect(&alloc[index_of_3], index_of_3, 3);
120239-
120240-	/*
120241-	 * Free up a 4-page hole at the end.  Recall that the pages at offsets 0
120242-	 * and 1 mod 4 were freed above, so we just have to free the last
120243-	 * allocations.
120244-	 */
120245-	ps = test_psset_dalloc(&psset, &alloc[HUGEPAGE_PAGES - 1]);
120246-	expect_ptr_null(ps, "Nonempty pageslab evicted");
120247-	ps = test_psset_dalloc(&psset, &alloc[HUGEPAGE_PAGES - 2]);
120248-	expect_ptr_null(ps, "Nonempty pageslab evicted");
120249-
120250-	/* Make sure we can satisfy an allocation at the very end of a slab. */
120251-	size_t index_of_4 = HUGEPAGE_PAGES - 4;
120252-	err = test_psset_alloc_reuse(&psset, &alloc[index_of_4], 4 * PAGE);
120253-	expect_false(err, "Should have been able to find alloc.");
120254-	edata_expect(&alloc[index_of_4], index_of_4, 4);
120255-}
120256-TEST_END
120257-
120258-TEST_BEGIN(test_evict) {
120259-	bool err;
120260-	hpdata_t *ps;
120261-
120262-	hpdata_t pageslab;
120263-	hpdata_init(&pageslab, PAGESLAB_ADDR, PAGESLAB_AGE);
120264-
120265-	edata_t alloc[HUGEPAGE_PAGES];
120266-
120267-	psset_t psset;
120268-	psset_init(&psset);
120269-
120270-	/* Alloc the whole slab. */
120271-	edata_init_test(&alloc[0]);
120272-	test_psset_alloc_new(&psset, &pageslab, &alloc[0], PAGE);
120273-	for (size_t i = 1; i < HUGEPAGE_PAGES; i++) {
120274-		edata_init_test(&alloc[i]);
120275-		err = test_psset_alloc_reuse(&psset, &alloc[i], PAGE);
120276-		expect_false(err, "Unxpected allocation failure");
120277-	}
120278-
120279-	/* Dealloc the whole slab, going forwards. */
120280-	for (size_t i = 0; i < HUGEPAGE_PAGES - 1; i++) {
120281-		ps = test_psset_dalloc(&psset, &alloc[i]);
120282-		expect_ptr_null(ps, "Nonempty pageslab evicted");
120283-	}
120284-	ps = test_psset_dalloc(&psset, &alloc[HUGEPAGE_PAGES - 1]);
120285-	expect_ptr_eq(&pageslab, ps, "Empty pageslab not evicted.");
120286-
120287-	err = test_psset_alloc_reuse(&psset, &alloc[0], PAGE);
120288-	expect_true(err, "psset should be empty.");
120289-}
120290-TEST_END
120291-
120292-TEST_BEGIN(test_multi_pageslab) {
120293-	bool err;
120294-	hpdata_t *ps;
120295-
120296-	hpdata_t pageslab[2];
120297-	hpdata_init(&pageslab[0], PAGESLAB_ADDR, PAGESLAB_AGE);
120298-	hpdata_init(&pageslab[1],
120299-	    (void *)((uintptr_t)PAGESLAB_ADDR + HUGEPAGE),
120300-	    PAGESLAB_AGE + 1);
120301-
120302-	edata_t alloc[2][HUGEPAGE_PAGES];
120303-
120304-	psset_t psset;
120305-	psset_init(&psset);
120306-
120307-	/* Insert both slabs. */
120308-	edata_init_test(&alloc[0][0]);
120309-	test_psset_alloc_new(&psset, &pageslab[0], &alloc[0][0], PAGE);
120310-	edata_init_test(&alloc[1][0]);
120311-	test_psset_alloc_new(&psset, &pageslab[1], &alloc[1][0], PAGE);
120312-
120313-	/* Fill them both up; make sure we do so in first-fit order. */
120314-	for (size_t i = 0; i < 2; i++) {
120315-		for (size_t j = 1; j < HUGEPAGE_PAGES; j++) {
120316-			edata_init_test(&alloc[i][j]);
120317-			err = test_psset_alloc_reuse(&psset, &alloc[i][j], PAGE);
120318-			expect_false(err,
120319-			    "Nonempty psset failed page allocation.");
120320-			assert_ptr_eq(&pageslab[i], edata_ps_get(&alloc[i][j]),
120321-			    "Didn't pick pageslabs in first-fit");
120322-		}
120323-	}
120324-
120325-	/*
120326-	 * Free up a 2-page hole in the earlier slab, and a 1-page one in the
120327-	 * later one.  We should still pick the later one.
120328-	 */
120329-	ps = test_psset_dalloc(&psset, &alloc[0][0]);
120330-	expect_ptr_null(ps, "Unexpected eviction");
120331-	ps = test_psset_dalloc(&psset, &alloc[0][1]);
120332-	expect_ptr_null(ps, "Unexpected eviction");
120333-	ps = test_psset_dalloc(&psset, &alloc[1][0]);
120334-	expect_ptr_null(ps, "Unexpected eviction");
120335-	err = test_psset_alloc_reuse(&psset, &alloc[0][0], PAGE);
120336-	expect_ptr_eq(&pageslab[1], edata_ps_get(&alloc[0][0]),
120337-	    "Should have picked the fuller pageslab");
120338-
120339-	/*
120340-	 * Now both slabs have 1-page holes. Free up a second one in the later
120341-	 * slab.
120342-	 */
120343-	ps = test_psset_dalloc(&psset, &alloc[1][1]);
120344-	expect_ptr_null(ps, "Unexpected eviction");
120345-
120346-	/*
120347-	 * We should be able to allocate a 2-page object, even though an earlier
120348-	 * size class is nonempty.
120349-	 */
120350-	err = test_psset_alloc_reuse(&psset, &alloc[1][0], 2 * PAGE);
120351-	expect_false(err, "Allocation should have succeeded");
120352-}
120353-TEST_END
120354-
120355-static void
120356-stats_expect_empty(psset_bin_stats_t *stats) {
120357-	assert_zu_eq(0, stats->npageslabs,
120358-	    "Supposedly empty bin had positive npageslabs");
120359-	expect_zu_eq(0, stats->nactive, "Unexpected nonempty bin"
120360-	    "Supposedly empty bin had positive nactive");
120361-}
120362-
120363-static void
120364-stats_expect(psset_t *psset, size_t nactive) {
120365-	if (nactive == HUGEPAGE_PAGES) {
120366-		expect_zu_eq(1, psset->stats.full_slabs[0].npageslabs,
120367-		    "Expected a full slab");
120368-		expect_zu_eq(HUGEPAGE_PAGES,
120369-		    psset->stats.full_slabs[0].nactive,
120370-		    "Should have exactly filled the bin");
120371-	} else {
120372-		stats_expect_empty(&psset->stats.full_slabs[0]);
120373-	}
120374-	size_t ninactive = HUGEPAGE_PAGES - nactive;
120375-	pszind_t nonempty_pind = PSSET_NPSIZES;
120376-	if (ninactive != 0 && ninactive < HUGEPAGE_PAGES) {
120377-		nonempty_pind = sz_psz2ind(sz_psz_quantize_floor(
120378-		    ninactive << LG_PAGE));
120379-	}
120380-	for (pszind_t i = 0; i < PSSET_NPSIZES; i++) {
120381-		if (i == nonempty_pind) {
120382-			assert_zu_eq(1,
120383-			    psset->stats.nonfull_slabs[i][0].npageslabs,
120384-			    "Should have found a slab");
120385-			expect_zu_eq(nactive,
120386-			    psset->stats.nonfull_slabs[i][0].nactive,
120387-			    "Mismatch in active pages");
120388-		} else {
120389-			stats_expect_empty(&psset->stats.nonfull_slabs[i][0]);
120390-		}
120391-	}
120392-	expect_zu_eq(nactive, psset_nactive(psset), "");
120393-}
120394-
120395-TEST_BEGIN(test_stats) {
120396-	bool err;
120397-
120398-	hpdata_t pageslab;
120399-	hpdata_init(&pageslab, PAGESLAB_ADDR, PAGESLAB_AGE);
120400-
120401-	edata_t alloc[HUGEPAGE_PAGES];
120402-
120403-	psset_t psset;
120404-	psset_init(&psset);
120405-	stats_expect(&psset, 0);
120406-
120407-	edata_init_test(&alloc[0]);
120408-	test_psset_alloc_new(&psset, &pageslab, &alloc[0], PAGE);
120409-	for (size_t i = 1; i < HUGEPAGE_PAGES; i++) {
120410-		stats_expect(&psset, i);
120411-		edata_init_test(&alloc[i]);
120412-		err = test_psset_alloc_reuse(&psset, &alloc[i], PAGE);
120413-		expect_false(err, "Nonempty psset failed page allocation.");
120414-	}
120415-	stats_expect(&psset, HUGEPAGE_PAGES);
120416-	hpdata_t *ps;
120417-	for (ssize_t i = HUGEPAGE_PAGES - 1; i >= 0; i--) {
120418-		ps = test_psset_dalloc(&psset, &alloc[i]);
120419-		expect_true((ps == NULL) == (i != 0),
120420-		    "test_psset_dalloc should only evict a slab on the last "
120421-		    "free");
120422-		stats_expect(&psset, i);
120423-	}
120424-
120425-	test_psset_alloc_new(&psset, &pageslab, &alloc[0], PAGE);
120426-	stats_expect(&psset, 1);
120427-	psset_update_begin(&psset, &pageslab);
120428-	stats_expect(&psset, 0);
120429-	psset_update_end(&psset, &pageslab);
120430-	stats_expect(&psset, 1);
120431-}
120432-TEST_END
120433-
120434-/*
120435- * Fills in and inserts two pageslabs, with the first better than the second,
120436- * and each fully allocated (into the allocations in allocs and worse_allocs,
120437- * each of which should be HUGEPAGE_PAGES long), except for a single free page
120438- * at the end.
120439- *
120440- * (There's nothing magic about these numbers; it's just useful to share the
120441- * setup between the oldest fit and the insert/remove test).
120442- */
120443-static void
120444-init_test_pageslabs(psset_t *psset, hpdata_t *pageslab,
120445-    hpdata_t *worse_pageslab, edata_t *alloc, edata_t *worse_alloc) {
120446-	bool err;
120447-
120448-	hpdata_init(pageslab, (void *)(10 * HUGEPAGE), PAGESLAB_AGE);
120449-	/*
120450-	 * This pageslab would be better from an address-first-fit POV, but
120451-	 * worse from an age POV.
120452-	 */
120453-	hpdata_init(worse_pageslab, (void *)(9 * HUGEPAGE), PAGESLAB_AGE + 1);
120454-
120455-	psset_init(psset);
120456-
120457-	edata_init_test(&alloc[0]);
120458-	test_psset_alloc_new(psset, pageslab, &alloc[0], PAGE);
120459-	for (size_t i = 1; i < HUGEPAGE_PAGES; i++) {
120460-		edata_init_test(&alloc[i]);
120461-		err = test_psset_alloc_reuse(psset, &alloc[i], PAGE);
120462-		expect_false(err, "Nonempty psset failed page allocation.");
120463-		expect_ptr_eq(pageslab, edata_ps_get(&alloc[i]),
120464-		    "Allocated from the wrong pageslab");
120465-	}
120466-
120467-	edata_init_test(&worse_alloc[0]);
120468-	test_psset_alloc_new(psset, worse_pageslab, &worse_alloc[0], PAGE);
120469-	expect_ptr_eq(worse_pageslab, edata_ps_get(&worse_alloc[0]),
120470-	    "Allocated from the wrong pageslab");
120471-	/*
120472-	 * Make the two pssets otherwise indistinguishable; all full except for
120473-	 * a single page.
120474-	 */
120475-	for (size_t i = 1; i < HUGEPAGE_PAGES - 1; i++) {
120476-		edata_init_test(&worse_alloc[i]);
120477-		err = test_psset_alloc_reuse(psset, &alloc[i], PAGE);
120478-		expect_false(err, "Nonempty psset failed page allocation.");
120479-		expect_ptr_eq(worse_pageslab, edata_ps_get(&alloc[i]),
120480-		    "Allocated from the wrong pageslab");
120481-	}
120482-
120483-	/* Deallocate the last page from the older pageslab. */
120484-	hpdata_t *evicted = test_psset_dalloc(psset,
120485-	    &alloc[HUGEPAGE_PAGES - 1]);
120486-	expect_ptr_null(evicted, "Unexpected eviction");
120487-}
120488-
120489-TEST_BEGIN(test_oldest_fit) {
120490-	bool err;
120491-	edata_t alloc[HUGEPAGE_PAGES];
120492-	edata_t worse_alloc[HUGEPAGE_PAGES];
120493-
120494-	hpdata_t pageslab;
120495-	hpdata_t worse_pageslab;
120496-
120497-	psset_t psset;
120498-
120499-	init_test_pageslabs(&psset, &pageslab, &worse_pageslab, alloc,
120500-	    worse_alloc);
120501-
120502-	/* The edata should come from the better pageslab. */
120503-	edata_t test_edata;
120504-	edata_init_test(&test_edata);
120505-	err = test_psset_alloc_reuse(&psset, &test_edata, PAGE);
120506-	expect_false(err, "Nonempty psset failed page allocation");
120507-	expect_ptr_eq(&pageslab, edata_ps_get(&test_edata),
120508-	    "Allocated from the wrong pageslab");
120509-}
120510-TEST_END
120511-
120512-TEST_BEGIN(test_insert_remove) {
120513-	bool err;
120514-	hpdata_t *ps;
120515-	edata_t alloc[HUGEPAGE_PAGES];
120516-	edata_t worse_alloc[HUGEPAGE_PAGES];
120517-
120518-	hpdata_t pageslab;
120519-	hpdata_t worse_pageslab;
120520-
120521-	psset_t psset;
120522-
120523-	init_test_pageslabs(&psset, &pageslab, &worse_pageslab, alloc,
120524-	    worse_alloc);
120525-
120526-	/* Remove better; should still be able to alloc from worse. */
120527-	psset_update_begin(&psset, &pageslab);
120528-	err = test_psset_alloc_reuse(&psset, &worse_alloc[HUGEPAGE_PAGES - 1],
120529-	    PAGE);
120530-	expect_false(err, "Removal should still leave an empty page");
120531-	expect_ptr_eq(&worse_pageslab,
120532-	    edata_ps_get(&worse_alloc[HUGEPAGE_PAGES - 1]),
120533-	    "Allocated out of wrong ps");
120534-
120535-	/*
120536-	 * After deallocating the previous alloc and reinserting better, it
120537-	 * should be preferred for future allocations.
120538-	 */
120539-	ps = test_psset_dalloc(&psset, &worse_alloc[HUGEPAGE_PAGES - 1]);
120540-	expect_ptr_null(ps, "Incorrect eviction of nonempty pageslab");
120541-	psset_update_end(&psset, &pageslab);
120542-	err = test_psset_alloc_reuse(&psset, &alloc[HUGEPAGE_PAGES - 1], PAGE);
120543-	expect_false(err, "psset should be nonempty");
120544-	expect_ptr_eq(&pageslab, edata_ps_get(&alloc[HUGEPAGE_PAGES - 1]),
120545-	    "Removal/reinsertion shouldn't change ordering");
120546-	/*
120547-	 * After deallocating and removing both, allocations should fail.
120548-	 */
120549-	ps = test_psset_dalloc(&psset, &alloc[HUGEPAGE_PAGES - 1]);
120550-	expect_ptr_null(ps, "Incorrect eviction");
120551-	psset_update_begin(&psset, &pageslab);
120552-	psset_update_begin(&psset, &worse_pageslab);
120553-	err = test_psset_alloc_reuse(&psset, &alloc[HUGEPAGE_PAGES - 1], PAGE);
120554-	expect_true(err, "psset should be empty, but an alloc succeeded");
120555-}
120556-TEST_END
120557-
120558-TEST_BEGIN(test_purge_prefers_nonhuge) {
120559-	/*
120560-	 * All else being equal, we should prefer purging non-huge pages over
120561-	 * huge ones for non-empty extents.
120562-	 */
120563-
120564-	/* Nothing magic about this constant. */
120565-	enum {
120566-		NHP = 23,
120567-	};
120568-	hpdata_t *hpdata;
120569-
120570-	psset_t psset;
120571-	psset_init(&psset);
120572-
120573-	hpdata_t hpdata_huge[NHP];
120574-	uintptr_t huge_begin = (uintptr_t)&hpdata_huge[0];
120575-	uintptr_t huge_end = (uintptr_t)&hpdata_huge[NHP];
120576-	hpdata_t hpdata_nonhuge[NHP];
120577-	uintptr_t nonhuge_begin = (uintptr_t)&hpdata_nonhuge[0];
120578-	uintptr_t nonhuge_end = (uintptr_t)&hpdata_nonhuge[NHP];
120579-
120580-	for (size_t i = 0; i < NHP; i++) {
120581-		hpdata_init(&hpdata_huge[i], (void *)((10 + i) * HUGEPAGE),
120582-		    123 + i);
120583-		psset_insert(&psset, &hpdata_huge[i]);
120584-
120585-		hpdata_init(&hpdata_nonhuge[i],
120586-		    (void *)((10 + NHP + i) * HUGEPAGE),
120587-		    456 + i);
120588-		psset_insert(&psset, &hpdata_nonhuge[i]);
120589-
120590-	}
120591-	for (int i = 0; i < 2 * NHP; i++) {
120592-		hpdata = psset_pick_alloc(&psset, HUGEPAGE * 3 / 4);
120593-		psset_update_begin(&psset, hpdata);
120594-		void *ptr;
120595-		ptr = hpdata_reserve_alloc(hpdata, HUGEPAGE * 3 / 4);
120596-		/* Ignore the first alloc, which will stick around. */
120597-		(void)ptr;
120598-		/*
120599-		 * The second alloc is to dirty the pages; free it immediately
120600-		 * after allocating.
120601-		 */
120602-		ptr = hpdata_reserve_alloc(hpdata, HUGEPAGE / 4);
120603-		hpdata_unreserve(hpdata, ptr, HUGEPAGE / 4);
120604-
120605-		if (huge_begin <= (uintptr_t)hpdata
120606-		    && (uintptr_t)hpdata < huge_end) {
120607-			hpdata_hugify(hpdata);
120608-		}
120609-
120610-		hpdata_purge_allowed_set(hpdata, true);
120611-		psset_update_end(&psset, hpdata);
120612-	}
120613-
120614-	/*
120615-	 * We've got a bunch of 1/8th dirty hpdatas.  It should give us all the
120616-	 * non-huge ones to purge, then all the huge ones, then refuse to purge
120617-	 * further.
120618-	 */
120619-	for (int i = 0; i < NHP; i++) {
120620-		hpdata = psset_pick_purge(&psset);
120621-		assert_true(nonhuge_begin <= (uintptr_t)hpdata
120622-		    && (uintptr_t)hpdata < nonhuge_end, "");
120623-		psset_update_begin(&psset, hpdata);
120624-		test_psset_fake_purge(hpdata);
120625-		hpdata_purge_allowed_set(hpdata, false);
120626-		psset_update_end(&psset, hpdata);
120627-	}
120628-	for (int i = 0; i < NHP; i++) {
120629-		hpdata = psset_pick_purge(&psset);
120630-		expect_true(huge_begin <= (uintptr_t)hpdata
120631-		    && (uintptr_t)hpdata < huge_end, "");
120632-		psset_update_begin(&psset, hpdata);
120633-		hpdata_dehugify(hpdata);
120634-		test_psset_fake_purge(hpdata);
120635-		hpdata_purge_allowed_set(hpdata, false);
120636-		psset_update_end(&psset, hpdata);
120637-	}
120638-}
120639-TEST_END
120640-
120641-TEST_BEGIN(test_purge_prefers_empty) {
120642-	void *ptr;
120643-
120644-	psset_t psset;
120645-	psset_init(&psset);
120646-
120647-	hpdata_t hpdata_empty;
120648-	hpdata_t hpdata_nonempty;
120649-	hpdata_init(&hpdata_empty, (void *)(10 * HUGEPAGE), 123);
120650-	psset_insert(&psset, &hpdata_empty);
120651-	hpdata_init(&hpdata_nonempty, (void *)(11 * HUGEPAGE), 456);
120652-	psset_insert(&psset, &hpdata_nonempty);
120653-
120654-	psset_update_begin(&psset, &hpdata_empty);
120655-	ptr = hpdata_reserve_alloc(&hpdata_empty, PAGE);
120656-	expect_ptr_eq(hpdata_addr_get(&hpdata_empty), ptr, "");
120657-	hpdata_unreserve(&hpdata_empty, ptr, PAGE);
120658-	hpdata_purge_allowed_set(&hpdata_empty, true);
120659-	psset_update_end(&psset, &hpdata_empty);
120660-
120661-	psset_update_begin(&psset, &hpdata_nonempty);
120662-	ptr = hpdata_reserve_alloc(&hpdata_nonempty, 10 * PAGE);
120663-	expect_ptr_eq(hpdata_addr_get(&hpdata_nonempty), ptr, "");
120664-	hpdata_unreserve(&hpdata_nonempty, ptr, 9 * PAGE);
120665-	hpdata_purge_allowed_set(&hpdata_nonempty, true);
120666-	psset_update_end(&psset, &hpdata_nonempty);
120667-
120668-	/*
120669-	 * The nonempty slab has 9 dirty pages, while the empty one has only 1.
120670-	 * We should still pick the empty one for purging.
120671-	 */
120672-	hpdata_t *to_purge = psset_pick_purge(&psset);
120673-	expect_ptr_eq(&hpdata_empty, to_purge, "");
120674-}
120675-TEST_END
120676-
120677-TEST_BEGIN(test_purge_prefers_empty_huge) {
120678-	void *ptr;
120679-
120680-	psset_t psset;
120681-	psset_init(&psset);
120682-
120683-	enum {NHP = 10 };
120684-
120685-	hpdata_t hpdata_huge[NHP];
120686-	hpdata_t hpdata_nonhuge[NHP];
120687-
120688-	uintptr_t cur_addr = 100 * HUGEPAGE;
120689-	uint64_t cur_age = 123;
120690-	for (int i = 0; i < NHP; i++) {
120691-		hpdata_init(&hpdata_huge[i], (void *)cur_addr, cur_age);
120692-		cur_addr += HUGEPAGE;
120693-		cur_age++;
120694-		psset_insert(&psset, &hpdata_huge[i]);
120695-
120696-		hpdata_init(&hpdata_nonhuge[i], (void *)cur_addr, cur_age);
120697-		cur_addr += HUGEPAGE;
120698-		cur_age++;
120699-		psset_insert(&psset, &hpdata_nonhuge[i]);
120700-
120701-		/*
120702-		 * Make the hpdata_huge[i] fully dirty, empty, purgable, and
120703-		 * huge.
120704-		 */
120705-		psset_update_begin(&psset, &hpdata_huge[i]);
120706-		ptr = hpdata_reserve_alloc(&hpdata_huge[i], HUGEPAGE);
120707-		expect_ptr_eq(hpdata_addr_get(&hpdata_huge[i]), ptr, "");
120708-		hpdata_hugify(&hpdata_huge[i]);
120709-		hpdata_unreserve(&hpdata_huge[i], ptr, HUGEPAGE);
120710-		hpdata_purge_allowed_set(&hpdata_huge[i], true);
120711-		psset_update_end(&psset, &hpdata_huge[i]);
120712-
120713-		/*
120714-		 * Make hpdata_nonhuge[i] fully dirty, empty, purgable, and
120715-		 * non-huge.
120716-		 */
120717-		psset_update_begin(&psset, &hpdata_nonhuge[i]);
120718-		ptr = hpdata_reserve_alloc(&hpdata_nonhuge[i], HUGEPAGE);
120719-		expect_ptr_eq(hpdata_addr_get(&hpdata_nonhuge[i]), ptr, "");
120720-		hpdata_unreserve(&hpdata_nonhuge[i], ptr, HUGEPAGE);
120721-		hpdata_purge_allowed_set(&hpdata_nonhuge[i], true);
120722-		psset_update_end(&psset, &hpdata_nonhuge[i]);
120723-	}
120724-
120725-	/*
120726-	 * We have a bunch of empty slabs, half huge, half nonhuge, inserted in
120727-	 * alternating order.  We should pop all the huge ones before popping
120728-	 * any of the non-huge ones for purging.
120729-	 */
120730-	for (int i = 0; i < NHP; i++) {
120731-		hpdata_t *to_purge = psset_pick_purge(&psset);
120732-		expect_ptr_eq(&hpdata_huge[i], to_purge, "");
120733-		psset_update_begin(&psset, to_purge);
120734-		hpdata_purge_allowed_set(to_purge, false);
120735-		psset_update_end(&psset, to_purge);
120736-	}
120737-	for (int i = 0; i < NHP; i++) {
120738-		hpdata_t *to_purge = psset_pick_purge(&psset);
120739-		expect_ptr_eq(&hpdata_nonhuge[i], to_purge, "");
120740-		psset_update_begin(&psset, to_purge);
120741-		hpdata_purge_allowed_set(to_purge, false);
120742-		psset_update_end(&psset, to_purge);
120743-	}
120744-}
120745-TEST_END
120746-
120747-int
120748-main(void) {
120749-	return test_no_reentrancy(
120750-	    test_empty,
120751-	    test_fill,
120752-	    test_reuse,
120753-	    test_evict,
120754-	    test_multi_pageslab,
120755-	    test_stats,
120756-	    test_oldest_fit,
120757-	    test_insert_remove,
120758-	    test_purge_prefers_nonhuge,
120759-	    test_purge_prefers_empty,
120760-	    test_purge_prefers_empty_huge);
120761-}
120762diff --git a/jemalloc/test/unit/ql.c b/jemalloc/test/unit/ql.c
120763deleted file mode 100644
120764index f913058..0000000
120765--- a/jemalloc/test/unit/ql.c
120766+++ /dev/null
120767@@ -1,317 +0,0 @@
120768-#include "test/jemalloc_test.h"
120769-
120770-#include "jemalloc/internal/ql.h"
120771-
120772-/* Number of ring entries, in [2..26]. */
120773-#define NENTRIES 9
120774-
120775-typedef struct list_s list_t;
120776-typedef ql_head(list_t) list_head_t;
120777-
120778-struct list_s {
120779-	ql_elm(list_t) link;
120780-	char id;
120781-};
120782-
120783-static void
120784-test_empty_list(list_head_t *head) {
120785-	list_t *t;
120786-	unsigned i;
120787-
120788-	expect_true(ql_empty(head), "Unexpected element for empty list");
120789-	expect_ptr_null(ql_first(head), "Unexpected element for empty list");
120790-	expect_ptr_null(ql_last(head, link),
120791-	    "Unexpected element for empty list");
120792-
120793-	i = 0;
120794-	ql_foreach(t, head, link) {
120795-		i++;
120796-	}
120797-	expect_u_eq(i, 0, "Unexpected element for empty list");
120798-
120799-	i = 0;
120800-	ql_reverse_foreach(t, head, link) {
120801-		i++;
120802-	}
120803-	expect_u_eq(i, 0, "Unexpected element for empty list");
120804-}
120805-
120806-TEST_BEGIN(test_ql_empty) {
120807-	list_head_t head;
120808-
120809-	ql_new(&head);
120810-	test_empty_list(&head);
120811-}
120812-TEST_END
120813-
120814-static void
120815-init_entries(list_t *entries, unsigned nentries) {
120816-	unsigned i;
120817-
120818-	for (i = 0; i < nentries; i++) {
120819-		entries[i].id = 'a' + i;
120820-		ql_elm_new(&entries[i], link);
120821-	}
120822-}
120823-
120824-static void
120825-test_entries_list(list_head_t *head, list_t *entries, unsigned nentries) {
120826-	list_t *t;
120827-	unsigned i;
120828-
120829-	expect_false(ql_empty(head), "List should not be empty");
120830-	expect_c_eq(ql_first(head)->id, entries[0].id, "Element id mismatch");
120831-	expect_c_eq(ql_last(head, link)->id, entries[nentries-1].id,
120832-	    "Element id mismatch");
120833-
120834-	i = 0;
120835-	ql_foreach(t, head, link) {
120836-		expect_c_eq(t->id, entries[i].id, "Element id mismatch");
120837-		i++;
120838-	}
120839-
120840-	i = 0;
120841-	ql_reverse_foreach(t, head, link) {
120842-		expect_c_eq(t->id, entries[nentries-i-1].id,
120843-		    "Element id mismatch");
120844-		i++;
120845-	}
120846-
120847-	for (i = 0; i < nentries-1; i++) {
120848-		t = ql_next(head, &entries[i], link);
120849-		expect_c_eq(t->id, entries[i+1].id, "Element id mismatch");
120850-	}
120851-	expect_ptr_null(ql_next(head, &entries[nentries-1], link),
120852-	    "Unexpected element");
120853-
120854-	expect_ptr_null(ql_prev(head, &entries[0], link), "Unexpected element");
120855-	for (i = 1; i < nentries; i++) {
120856-		t = ql_prev(head, &entries[i], link);
120857-		expect_c_eq(t->id, entries[i-1].id, "Element id mismatch");
120858-	}
120859-}
120860-
120861-TEST_BEGIN(test_ql_tail_insert) {
120862-	list_head_t head;
120863-	list_t entries[NENTRIES];
120864-	unsigned i;
120865-
120866-	ql_new(&head);
120867-	init_entries(entries, sizeof(entries)/sizeof(list_t));
120868-	for (i = 0; i < NENTRIES; i++) {
120869-		ql_tail_insert(&head, &entries[i], link);
120870-	}
120871-
120872-	test_entries_list(&head, entries, NENTRIES);
120873-}
120874-TEST_END
120875-
120876-TEST_BEGIN(test_ql_tail_remove) {
120877-	list_head_t head;
120878-	list_t entries[NENTRIES];
120879-	unsigned i;
120880-
120881-	ql_new(&head);
120882-	init_entries(entries, sizeof(entries)/sizeof(list_t));
120883-	for (i = 0; i < NENTRIES; i++) {
120884-		ql_tail_insert(&head, &entries[i], link);
120885-	}
120886-
120887-	for (i = 0; i < NENTRIES; i++) {
120888-		test_entries_list(&head, entries, NENTRIES-i);
120889-		ql_tail_remove(&head, list_t, link);
120890-	}
120891-	test_empty_list(&head);
120892-}
120893-TEST_END
120894-
120895-TEST_BEGIN(test_ql_head_insert) {
120896-	list_head_t head;
120897-	list_t entries[NENTRIES];
120898-	unsigned i;
120899-
120900-	ql_new(&head);
120901-	init_entries(entries, sizeof(entries)/sizeof(list_t));
120902-	for (i = 0; i < NENTRIES; i++) {
120903-		ql_head_insert(&head, &entries[NENTRIES-i-1], link);
120904-	}
120905-
120906-	test_entries_list(&head, entries, NENTRIES);
120907-}
120908-TEST_END
120909-
120910-TEST_BEGIN(test_ql_head_remove) {
120911-	list_head_t head;
120912-	list_t entries[NENTRIES];
120913-	unsigned i;
120914-
120915-	ql_new(&head);
120916-	init_entries(entries, sizeof(entries)/sizeof(list_t));
120917-	for (i = 0; i < NENTRIES; i++) {
120918-		ql_head_insert(&head, &entries[NENTRIES-i-1], link);
120919-	}
120920-
120921-	for (i = 0; i < NENTRIES; i++) {
120922-		test_entries_list(&head, &entries[i], NENTRIES-i);
120923-		ql_head_remove(&head, list_t, link);
120924-	}
120925-	test_empty_list(&head);
120926-}
120927-TEST_END
120928-
120929-TEST_BEGIN(test_ql_insert) {
120930-	list_head_t head;
120931-	list_t entries[8];
120932-	list_t *a, *b, *c, *d, *e, *f, *g, *h;
120933-
120934-	ql_new(&head);
120935-	init_entries(entries, sizeof(entries)/sizeof(list_t));
120936-	a = &entries[0];
120937-	b = &entries[1];
120938-	c = &entries[2];
120939-	d = &entries[3];
120940-	e = &entries[4];
120941-	f = &entries[5];
120942-	g = &entries[6];
120943-	h = &entries[7];
120944-
120945-	/*
120946-	 * ql_remove(), ql_before_insert(), and ql_after_insert() are used
120947-	 * internally by other macros that are already tested, so there's no
120948-	 * need to test them completely.  However, insertion/deletion from the
120949-	 * middle of lists is not otherwise tested; do so here.
120950-	 */
120951-	ql_tail_insert(&head, f, link);
120952-	ql_before_insert(&head, f, b, link);
120953-	ql_before_insert(&head, f, c, link);
120954-	ql_after_insert(f, h, link);
120955-	ql_after_insert(f, g, link);
120956-	ql_before_insert(&head, b, a, link);
120957-	ql_after_insert(c, d, link);
120958-	ql_before_insert(&head, f, e, link);
120959-
120960-	test_entries_list(&head, entries, sizeof(entries)/sizeof(list_t));
120961-}
120962-TEST_END
120963-
120964-static void
120965-test_concat_split_entries(list_t *entries, unsigned nentries_a,
120966-    unsigned nentries_b) {
120967-	init_entries(entries, nentries_a + nentries_b);
120968-
120969-	list_head_t head_a;
120970-	ql_new(&head_a);
120971-	for (unsigned i = 0; i < nentries_a; i++) {
120972-		ql_tail_insert(&head_a, &entries[i], link);
120973-	}
120974-	if (nentries_a == 0) {
120975-		test_empty_list(&head_a);
120976-	} else {
120977-		test_entries_list(&head_a, entries, nentries_a);
120978-	}
120979-
120980-	list_head_t head_b;
120981-	ql_new(&head_b);
120982-	for (unsigned i = 0; i < nentries_b; i++) {
120983-		ql_tail_insert(&head_b, &entries[nentries_a + i], link);
120984-	}
120985-	if (nentries_b == 0) {
120986-		test_empty_list(&head_b);
120987-	} else {
120988-		test_entries_list(&head_b, entries + nentries_a, nentries_b);
120989-	}
120990-
120991-	ql_concat(&head_a, &head_b, link);
120992-	if (nentries_a + nentries_b == 0) {
120993-		test_empty_list(&head_a);
120994-	} else {
120995-		test_entries_list(&head_a, entries, nentries_a + nentries_b);
120996-	}
120997-	test_empty_list(&head_b);
120998-
120999-	if (nentries_b == 0) {
121000-		return;
121001-	}
121002-
121003-	list_head_t head_c;
121004-	ql_split(&head_a, &entries[nentries_a], &head_c, link);
121005-	if (nentries_a == 0) {
121006-		test_empty_list(&head_a);
121007-	} else {
121008-		test_entries_list(&head_a, entries, nentries_a);
121009-	}
121010-	test_entries_list(&head_c, entries + nentries_a, nentries_b);
121011-}
121012-
121013-TEST_BEGIN(test_ql_concat_split) {
121014-	list_t entries[NENTRIES];
121015-
121016-	test_concat_split_entries(entries, 0, 0);
121017-
121018-	test_concat_split_entries(entries, 0, 1);
121019-	test_concat_split_entries(entries, 1, 0);
121020-
121021-	test_concat_split_entries(entries, 0, NENTRIES);
121022-	test_concat_split_entries(entries, 1, NENTRIES - 1);
121023-	test_concat_split_entries(entries, NENTRIES / 2,
121024-	    NENTRIES - NENTRIES / 2);
121025-	test_concat_split_entries(entries, NENTRIES - 1, 1);
121026-	test_concat_split_entries(entries, NENTRIES, 0);
121027-}
121028-TEST_END
121029-
121030-TEST_BEGIN(test_ql_rotate) {
121031-	list_head_t head;
121032-	list_t entries[NENTRIES];
121033-	unsigned i;
121034-
121035-	ql_new(&head);
121036-	init_entries(entries, sizeof(entries)/sizeof(list_t));
121037-	for (i = 0; i < NENTRIES; i++) {
121038-		ql_tail_insert(&head, &entries[i], link);
121039-	}
121040-
121041-	char head_id = ql_first(&head)->id;
121042-	for (i = 0; i < NENTRIES; i++) {
121043-		assert_c_eq(ql_first(&head)->id, head_id, "");
121044-		ql_rotate(&head, link);
121045-		assert_c_eq(ql_last(&head, link)->id, head_id, "");
121046-		head_id++;
121047-	}
121048-	test_entries_list(&head, entries, NENTRIES);
121049-}
121050-TEST_END
121051-
121052-TEST_BEGIN(test_ql_move) {
121053-	list_head_t head_dest, head_src;
121054-	list_t entries[NENTRIES];
121055-	unsigned i;
121056-
121057-	ql_new(&head_src);
121058-	ql_move(&head_dest, &head_src);
121059-	test_empty_list(&head_src);
121060-	test_empty_list(&head_dest);
121061-
121062-	init_entries(entries, sizeof(entries)/sizeof(list_t));
121063-	for (i = 0; i < NENTRIES; i++) {
121064-		ql_tail_insert(&head_src, &entries[i], link);
121065-	}
121066-	ql_move(&head_dest, &head_src);
121067-	test_empty_list(&head_src);
121068-	test_entries_list(&head_dest, entries, NENTRIES);
121069-}
121070-TEST_END
121071-
121072-int
121073-main(void) {
121074-	return test(
121075-	    test_ql_empty,
121076-	    test_ql_tail_insert,
121077-	    test_ql_tail_remove,
121078-	    test_ql_head_insert,
121079-	    test_ql_head_remove,
121080-	    test_ql_insert,
121081-	    test_ql_concat_split,
121082-	    test_ql_rotate,
121083-	    test_ql_move);
121084-}
121085diff --git a/jemalloc/test/unit/qr.c b/jemalloc/test/unit/qr.c
121086deleted file mode 100644
121087index 16eed0e..0000000
121088--- a/jemalloc/test/unit/qr.c
121089+++ /dev/null
121090@@ -1,243 +0,0 @@
121091-#include "test/jemalloc_test.h"
121092-
121093-#include "jemalloc/internal/qr.h"
121094-
121095-/* Number of ring entries, in [2..26]. */
121096-#define NENTRIES 9
121097-/* Split index, in [1..NENTRIES). */
121098-#define SPLIT_INDEX 5
121099-
121100-typedef struct ring_s ring_t;
121101-
121102-struct ring_s {
121103-	qr(ring_t) link;
121104-	char id;
121105-};
121106-
121107-static void
121108-init_entries(ring_t *entries) {
121109-	unsigned i;
121110-
121111-	for (i = 0; i < NENTRIES; i++) {
121112-		qr_new(&entries[i], link);
121113-		entries[i].id = 'a' + i;
121114-	}
121115-}
121116-
121117-static void
121118-test_independent_entries(ring_t *entries) {
121119-	ring_t *t;
121120-	unsigned i, j;
121121-
121122-	for (i = 0; i < NENTRIES; i++) {
121123-		j = 0;
121124-		qr_foreach(t, &entries[i], link) {
121125-			j++;
121126-		}
121127-		expect_u_eq(j, 1,
121128-		    "Iteration over single-element ring should visit precisely "
121129-		    "one element");
121130-	}
121131-	for (i = 0; i < NENTRIES; i++) {
121132-		j = 0;
121133-		qr_reverse_foreach(t, &entries[i], link) {
121134-			j++;
121135-		}
121136-		expect_u_eq(j, 1,
121137-		    "Iteration over single-element ring should visit precisely "
121138-		    "one element");
121139-	}
121140-	for (i = 0; i < NENTRIES; i++) {
121141-		t = qr_next(&entries[i], link);
121142-		expect_ptr_eq(t, &entries[i],
121143-		    "Next element in single-element ring should be same as "
121144-		    "current element");
121145-	}
121146-	for (i = 0; i < NENTRIES; i++) {
121147-		t = qr_prev(&entries[i], link);
121148-		expect_ptr_eq(t, &entries[i],
121149-		    "Previous element in single-element ring should be same as "
121150-		    "current element");
121151-	}
121152-}
121153-
121154-TEST_BEGIN(test_qr_one) {
121155-	ring_t entries[NENTRIES];
121156-
121157-	init_entries(entries);
121158-	test_independent_entries(entries);
121159-}
121160-TEST_END
121161-
121162-static void
121163-test_entries_ring(ring_t *entries) {
121164-	ring_t *t;
121165-	unsigned i, j;
121166-
121167-	for (i = 0; i < NENTRIES; i++) {
121168-		j = 0;
121169-		qr_foreach(t, &entries[i], link) {
121170-			expect_c_eq(t->id, entries[(i+j) % NENTRIES].id,
121171-			    "Element id mismatch");
121172-			j++;
121173-		}
121174-	}
121175-	for (i = 0; i < NENTRIES; i++) {
121176-		j = 0;
121177-		qr_reverse_foreach(t, &entries[i], link) {
121178-			expect_c_eq(t->id, entries[(NENTRIES+i-j-1) %
121179-			    NENTRIES].id, "Element id mismatch");
121180-			j++;
121181-		}
121182-	}
121183-	for (i = 0; i < NENTRIES; i++) {
121184-		t = qr_next(&entries[i], link);
121185-		expect_c_eq(t->id, entries[(i+1) % NENTRIES].id,
121186-		    "Element id mismatch");
121187-	}
121188-	for (i = 0; i < NENTRIES; i++) {
121189-		t = qr_prev(&entries[i], link);
121190-		expect_c_eq(t->id, entries[(NENTRIES+i-1) % NENTRIES].id,
121191-		    "Element id mismatch");
121192-	}
121193-}
121194-
121195-TEST_BEGIN(test_qr_after_insert) {
121196-	ring_t entries[NENTRIES];
121197-	unsigned i;
121198-
121199-	init_entries(entries);
121200-	for (i = 1; i < NENTRIES; i++) {
121201-		qr_after_insert(&entries[i - 1], &entries[i], link);
121202-	}
121203-	test_entries_ring(entries);
121204-}
121205-TEST_END
121206-
121207-TEST_BEGIN(test_qr_remove) {
121208-	ring_t entries[NENTRIES];
121209-	ring_t *t;
121210-	unsigned i, j;
121211-
121212-	init_entries(entries);
121213-	for (i = 1; i < NENTRIES; i++) {
121214-		qr_after_insert(&entries[i - 1], &entries[i], link);
121215-	}
121216-
121217-	for (i = 0; i < NENTRIES; i++) {
121218-		j = 0;
121219-		qr_foreach(t, &entries[i], link) {
121220-			expect_c_eq(t->id, entries[i+j].id,
121221-			    "Element id mismatch");
121222-			j++;
121223-		}
121224-		j = 0;
121225-		qr_reverse_foreach(t, &entries[i], link) {
121226-			expect_c_eq(t->id, entries[NENTRIES - 1 - j].id,
121227-			"Element id mismatch");
121228-			j++;
121229-		}
121230-		qr_remove(&entries[i], link);
121231-	}
121232-	test_independent_entries(entries);
121233-}
121234-TEST_END
121235-
121236-TEST_BEGIN(test_qr_before_insert) {
121237-	ring_t entries[NENTRIES];
121238-	ring_t *t;
121239-	unsigned i, j;
121240-
121241-	init_entries(entries);
121242-	for (i = 1; i < NENTRIES; i++) {
121243-		qr_before_insert(&entries[i - 1], &entries[i], link);
121244-	}
121245-	for (i = 0; i < NENTRIES; i++) {
121246-		j = 0;
121247-		qr_foreach(t, &entries[i], link) {
121248-			expect_c_eq(t->id, entries[(NENTRIES+i-j) %
121249-			    NENTRIES].id, "Element id mismatch");
121250-			j++;
121251-		}
121252-	}
121253-	for (i = 0; i < NENTRIES; i++) {
121254-		j = 0;
121255-		qr_reverse_foreach(t, &entries[i], link) {
121256-			expect_c_eq(t->id, entries[(i+j+1) % NENTRIES].id,
121257-			    "Element id mismatch");
121258-			j++;
121259-		}
121260-	}
121261-	for (i = 0; i < NENTRIES; i++) {
121262-		t = qr_next(&entries[i], link);
121263-		expect_c_eq(t->id, entries[(NENTRIES+i-1) % NENTRIES].id,
121264-		    "Element id mismatch");
121265-	}
121266-	for (i = 0; i < NENTRIES; i++) {
121267-		t = qr_prev(&entries[i], link);
121268-		expect_c_eq(t->id, entries[(i+1) % NENTRIES].id,
121269-		    "Element id mismatch");
121270-	}
121271-}
121272-TEST_END
121273-
121274-static void
121275-test_split_entries(ring_t *entries) {
121276-	ring_t *t;
121277-	unsigned i, j;
121278-
121279-	for (i = 0; i < NENTRIES; i++) {
121280-		j = 0;
121281-		qr_foreach(t, &entries[i], link) {
121282-			if (i < SPLIT_INDEX) {
121283-				expect_c_eq(t->id,
121284-				    entries[(i+j) % SPLIT_INDEX].id,
121285-				    "Element id mismatch");
121286-			} else {
121287-				expect_c_eq(t->id, entries[(i+j-SPLIT_INDEX) %
121288-				    (NENTRIES-SPLIT_INDEX) + SPLIT_INDEX].id,
121289-				    "Element id mismatch");
121290-			}
121291-			j++;
121292-		}
121293-	}
121294-}
121295-
121296-TEST_BEGIN(test_qr_meld_split) {
121297-	ring_t entries[NENTRIES];
121298-	unsigned i;
121299-
121300-	init_entries(entries);
121301-	for (i = 1; i < NENTRIES; i++) {
121302-		qr_after_insert(&entries[i - 1], &entries[i], link);
121303-	}
121304-
121305-	qr_split(&entries[0], &entries[SPLIT_INDEX], link);
121306-	test_split_entries(entries);
121307-
121308-	qr_meld(&entries[0], &entries[SPLIT_INDEX], link);
121309-	test_entries_ring(entries);
121310-
121311-	qr_meld(&entries[0], &entries[SPLIT_INDEX], link);
121312-	test_split_entries(entries);
121313-
121314-	qr_split(&entries[0], &entries[SPLIT_INDEX], link);
121315-	test_entries_ring(entries);
121316-
121317-	qr_split(&entries[0], &entries[0], link);
121318-	test_entries_ring(entries);
121319-
121320-	qr_meld(&entries[0], &entries[0], link);
121321-	test_entries_ring(entries);
121322-}
121323-TEST_END
121324-
121325-int
121326-main(void) {
121327-	return test(
121328-	    test_qr_one,
121329-	    test_qr_after_insert,
121330-	    test_qr_remove,
121331-	    test_qr_before_insert,
121332-	    test_qr_meld_split);
121333-}
121334diff --git a/jemalloc/test/unit/rb.c b/jemalloc/test/unit/rb.c
121335deleted file mode 100644
121336index 827ec51..0000000
121337--- a/jemalloc/test/unit/rb.c
121338+++ /dev/null
121339@@ -1,1019 +0,0 @@
121340-#include "test/jemalloc_test.h"
121341-
121342-#include <stdlib.h>
121343-
121344-#include "jemalloc/internal/rb.h"
121345-
121346-#define rbtn_black_height(a_type, a_field, a_rbt, r_height) do {	\
121347-	a_type *rbp_bh_t;						\
121348-	for (rbp_bh_t = (a_rbt)->rbt_root, (r_height) = 0; rbp_bh_t !=	\
121349-	    NULL; rbp_bh_t = rbtn_left_get(a_type, a_field,		\
121350-	    rbp_bh_t)) {						\
121351-		if (!rbtn_red_get(a_type, a_field, rbp_bh_t)) {		\
121352-		(r_height)++;						\
121353-		}							\
121354-	}								\
121355-} while (0)
121356-
121357-static bool summarize_always_returns_true = false;
121358-
121359-typedef struct node_s node_t;
121360-struct node_s {
121361-#define NODE_MAGIC 0x9823af7e
121362-	uint32_t magic;
121363-	rb_node(node_t) link;
121364-	/* Order used by nodes. */
121365-	uint64_t key;
121366-	/*
121367-	 * Our made-up summary property is "specialness", with summarization
121368-	 * taking the max.
121369-	 */
121370-	uint64_t specialness;
121371-
121372-	/*
121373-	 * Used by some of the test randomization to avoid double-removing
121374-	 * nodes.
121375-	 */
121376-	bool mid_remove;
121377-
121378-	/*
121379-	 * To test searching functionality, we want to temporarily weaken the
121380-	 * ordering to allow non-equal nodes that nevertheless compare equal.
121381-	 */
121382-	bool allow_duplicates;
121383-
121384-	/*
121385-	 * In check_consistency, it's handy to know a node's rank in the tree;
121386-	 * this tracks it (but only there; not all tests use this).
121387-	 */
121388-	int rank;
121389-	int filtered_rank;
121390-
121391-	/*
121392-	 * Replicate the internal structure of the tree, to make sure the
121393-	 * implementation doesn't miss any updates.
121394-	 */
121395-	const node_t *summary_lchild;
121396-	const node_t *summary_rchild;
121397-	uint64_t summary_max_specialness;
121398-};
121399-
121400-static int
121401-node_cmp(const node_t *a, const node_t *b) {
121402-	int ret;
121403-
121404-	expect_u32_eq(a->magic, NODE_MAGIC, "Bad magic");
121405-	expect_u32_eq(b->magic, NODE_MAGIC, "Bad magic");
121406-
121407-	ret = (a->key > b->key) - (a->key < b->key);
121408-	if (ret == 0 && !a->allow_duplicates) {
121409-		/*
121410-		 * Duplicates are not allowed in the tree, so force an
121411-		 * arbitrary ordering for non-identical items with equal keys,
121412-		 * unless the user is searching and wants to allow the
121413-		 * duplicate.
121414-		 */
121415-		ret = (((uintptr_t)a) > ((uintptr_t)b))
121416-		    - (((uintptr_t)a) < ((uintptr_t)b));
121417-	}
121418-	return ret;
121419-}
121420-
121421-static uint64_t
121422-node_subtree_specialness(node_t *n, const node_t *lchild,
121423-    const node_t *rchild) {
121424-	uint64_t subtree_specialness = n->specialness;
121425-	if (lchild != NULL
121426-	    && lchild->summary_max_specialness > subtree_specialness) {
121427-		subtree_specialness = lchild->summary_max_specialness;
121428-	}
121429-	if (rchild != NULL
121430-	    && rchild->summary_max_specialness > subtree_specialness) {
121431-		subtree_specialness = rchild->summary_max_specialness;
121432-	}
121433-	return subtree_specialness;
121434-}
121435-
121436-static bool
121437-node_summarize(node_t *a, const node_t *lchild, const node_t *rchild) {
121438-	uint64_t new_summary_max_specialness = node_subtree_specialness(
121439-	    a, lchild, rchild);
121440-	bool changed = (a->summary_lchild != lchild)
121441-	    || (a->summary_rchild != rchild)
121442-	    || (new_summary_max_specialness != a->summary_max_specialness);
121443-	a->summary_max_specialness = new_summary_max_specialness;
121444-	a->summary_lchild = lchild;
121445-	a->summary_rchild = rchild;
121446-	return changed || summarize_always_returns_true;
121447-}
121448-
121449-typedef rb_tree(node_t) tree_t;
121450-rb_summarized_proto(static, tree_, tree_t, node_t);
121451-rb_summarized_gen(static, tree_, tree_t, node_t, link, node_cmp,
121452-    node_summarize);
121453-
121454-static bool
121455-specialness_filter_node(void *ctx, node_t *node) {
121456-	uint64_t specialness = *(uint64_t *)ctx;
121457-	return node->specialness >= specialness;
121458-}
121459-
121460-static bool
121461-specialness_filter_subtree(void *ctx, node_t *node) {
121462-	uint64_t specialness = *(uint64_t *)ctx;
121463-	return node->summary_max_specialness >= specialness;
121464-}
121465-
121466-static node_t *
121467-tree_iterate_cb(tree_t *tree, node_t *node, void *data) {
121468-	unsigned *i = (unsigned *)data;
121469-	node_t *search_node;
121470-
121471-	expect_u32_eq(node->magic, NODE_MAGIC, "Bad magic");
121472-
121473-	/* Test rb_search(). */
121474-	search_node = tree_search(tree, node);
121475-	expect_ptr_eq(search_node, node,
121476-	    "tree_search() returned unexpected node");
121477-
121478-	/* Test rb_nsearch(). */
121479-	search_node = tree_nsearch(tree, node);
121480-	expect_ptr_eq(search_node, node,
121481-	    "tree_nsearch() returned unexpected node");
121482-
121483-	/* Test rb_psearch(). */
121484-	search_node = tree_psearch(tree, node);
121485-	expect_ptr_eq(search_node, node,
121486-	    "tree_psearch() returned unexpected node");
121487-
121488-	(*i)++;
121489-
121490-	return NULL;
121491-}
121492-
121493-TEST_BEGIN(test_rb_empty) {
121494-	tree_t tree;
121495-	node_t key;
121496-
121497-	tree_new(&tree);
121498-
121499-	expect_true(tree_empty(&tree), "Tree should be empty");
121500-	expect_ptr_null(tree_first(&tree), "Unexpected node");
121501-	expect_ptr_null(tree_last(&tree), "Unexpected node");
121502-
121503-	key.key = 0;
121504-	key.magic = NODE_MAGIC;
121505-	expect_ptr_null(tree_search(&tree, &key), "Unexpected node");
121506-
121507-	key.key = 0;
121508-	key.magic = NODE_MAGIC;
121509-	expect_ptr_null(tree_nsearch(&tree, &key), "Unexpected node");
121510-
121511-	key.key = 0;
121512-	key.magic = NODE_MAGIC;
121513-	expect_ptr_null(tree_psearch(&tree, &key), "Unexpected node");
121514-
121515-	unsigned nodes = 0;
121516-	tree_iter_filtered(&tree, NULL, &tree_iterate_cb,
121517-	    &nodes, &specialness_filter_node, &specialness_filter_subtree,
121518-	    NULL);
121519-	expect_u_eq(0, nodes, "");
121520-
121521-	nodes = 0;
121522-	tree_reverse_iter_filtered(&tree, NULL, &tree_iterate_cb,
121523-	    &nodes, &specialness_filter_node, &specialness_filter_subtree,
121524-	    NULL);
121525-	expect_u_eq(0, nodes, "");
121526-
121527-	expect_ptr_null(tree_first_filtered(&tree, &specialness_filter_node,
121528-	    &specialness_filter_subtree, NULL), "");
121529-	expect_ptr_null(tree_last_filtered(&tree, &specialness_filter_node,
121530-	    &specialness_filter_subtree, NULL), "");
121531-
121532-	key.key = 0;
121533-	key.magic = NODE_MAGIC;
121534-	expect_ptr_null(tree_search_filtered(&tree, &key,
121535-	    &specialness_filter_node, &specialness_filter_subtree, NULL), "");
121536-	expect_ptr_null(tree_nsearch_filtered(&tree, &key,
121537-	    &specialness_filter_node, &specialness_filter_subtree, NULL), "");
121538-	expect_ptr_null(tree_psearch_filtered(&tree, &key,
121539-	    &specialness_filter_node, &specialness_filter_subtree, NULL), "");
121540-}
121541-TEST_END
121542-
121543-static unsigned
121544-tree_recurse(node_t *node, unsigned black_height, unsigned black_depth) {
121545-	unsigned ret = 0;
121546-	node_t *left_node;
121547-	node_t *right_node;
121548-
121549-	if (node == NULL) {
121550-		return ret;
121551-	}
121552-
121553-	left_node = rbtn_left_get(node_t, link, node);
121554-	right_node = rbtn_right_get(node_t, link, node);
121555-
121556-	expect_ptr_eq(left_node, node->summary_lchild,
121557-	    "summary missed a tree update");
121558-	expect_ptr_eq(right_node, node->summary_rchild,
121559-	    "summary missed a tree update");
121560-
121561-	uint64_t expected_subtree_specialness = node_subtree_specialness(node,
121562-	    left_node, right_node);
121563-	expect_u64_eq(expected_subtree_specialness,
121564-	    node->summary_max_specialness, "Incorrect summary");
121565-
121566-	if (!rbtn_red_get(node_t, link, node)) {
121567-		black_depth++;
121568-	}
121569-
121570-	/* Red nodes must be interleaved with black nodes. */
121571-	if (rbtn_red_get(node_t, link, node)) {
121572-		if (left_node != NULL) {
121573-			expect_false(rbtn_red_get(node_t, link, left_node),
121574-				"Node should be black");
121575-		}
121576-		if (right_node != NULL) {
121577-			expect_false(rbtn_red_get(node_t, link, right_node),
121578-			    "Node should be black");
121579-		}
121580-	}
121581-
121582-	/* Self. */
121583-	expect_u32_eq(node->magic, NODE_MAGIC, "Bad magic");
121584-
121585-	/* Left subtree. */
121586-	if (left_node != NULL) {
121587-		ret += tree_recurse(left_node, black_height, black_depth);
121588-	} else {
121589-		ret += (black_depth != black_height);
121590-	}
121591-
121592-	/* Right subtree. */
121593-	if (right_node != NULL) {
121594-		ret += tree_recurse(right_node, black_height, black_depth);
121595-	} else {
121596-		ret += (black_depth != black_height);
121597-	}
121598-
121599-	return ret;
121600-}
121601-
121602-static unsigned
121603-tree_iterate(tree_t *tree) {
121604-	unsigned i;
121605-
121606-	i = 0;
121607-	tree_iter(tree, NULL, tree_iterate_cb, (void *)&i);
121608-
121609-	return i;
121610-}
121611-
121612-static unsigned
121613-tree_iterate_reverse(tree_t *tree) {
121614-	unsigned i;
121615-
121616-	i = 0;
121617-	tree_reverse_iter(tree, NULL, tree_iterate_cb, (void *)&i);
121618-
121619-	return i;
121620-}
121621-
121622-static void
121623-node_remove(tree_t *tree, node_t *node, unsigned nnodes) {
121624-	node_t *search_node;
121625-	unsigned black_height, imbalances;
121626-
121627-	tree_remove(tree, node);
121628-
121629-	/* Test rb_nsearch(). */
121630-	search_node = tree_nsearch(tree, node);
121631-	if (search_node != NULL) {
121632-		expect_u64_ge(search_node->key, node->key,
121633-		    "Key ordering error");
121634-	}
121635-
121636-	/* Test rb_psearch(). */
121637-	search_node = tree_psearch(tree, node);
121638-	if (search_node != NULL) {
121639-		expect_u64_le(search_node->key, node->key,
121640-		    "Key ordering error");
121641-	}
121642-
121643-	node->magic = 0;
121644-
121645-	rbtn_black_height(node_t, link, tree, black_height);
121646-	imbalances = tree_recurse(tree->rbt_root, black_height, 0);
121647-	expect_u_eq(imbalances, 0, "Tree is unbalanced");
121648-	expect_u_eq(tree_iterate(tree), nnodes-1,
121649-	    "Unexpected node iteration count");
121650-	expect_u_eq(tree_iterate_reverse(tree), nnodes-1,
121651-	    "Unexpected node iteration count");
121652-}
121653-
121654-static node_t *
121655-remove_iterate_cb(tree_t *tree, node_t *node, void *data) {
121656-	unsigned *nnodes = (unsigned *)data;
121657-	node_t *ret = tree_next(tree, node);
121658-
121659-	node_remove(tree, node, *nnodes);
121660-
121661-	return ret;
121662-}
121663-
121664-static node_t *
121665-remove_reverse_iterate_cb(tree_t *tree, node_t *node, void *data) {
121666-	unsigned *nnodes = (unsigned *)data;
121667-	node_t *ret = tree_prev(tree, node);
121668-
121669-	node_remove(tree, node, *nnodes);
121670-
121671-	return ret;
121672-}
121673-
121674-static void
121675-destroy_cb(node_t *node, void *data) {
121676-	unsigned *nnodes = (unsigned *)data;
121677-
121678-	expect_u_gt(*nnodes, 0, "Destruction removed too many nodes");
121679-	(*nnodes)--;
121680-}
121681-
121682-TEST_BEGIN(test_rb_random) {
121683-	enum {
121684-		NNODES = 25,
121685-		NBAGS = 500,
121686-		SEED = 42
121687-	};
121688-	sfmt_t *sfmt;
121689-	uint64_t bag[NNODES];
121690-	tree_t tree;
121691-	node_t nodes[NNODES];
121692-	unsigned i, j, k, black_height, imbalances;
121693-
121694-	sfmt = init_gen_rand(SEED);
121695-	for (i = 0; i < NBAGS; i++) {
121696-		switch (i) {
121697-		case 0:
121698-			/* Insert in order. */
121699-			for (j = 0; j < NNODES; j++) {
121700-				bag[j] = j;
121701-			}
121702-			break;
121703-		case 1:
121704-			/* Insert in reverse order. */
121705-			for (j = 0; j < NNODES; j++) {
121706-				bag[j] = NNODES - j - 1;
121707-			}
121708-			break;
121709-		default:
121710-			for (j = 0; j < NNODES; j++) {
121711-				bag[j] = gen_rand64_range(sfmt, NNODES);
121712-			}
121713-		}
121714-
121715-		/*
121716-		 * We alternate test behavior with a period of 2 here, and a
121717-		 * period of 5 down below, so there's no cycle in which certain
121718-		 * combinations get omitted.
121719-		 */
121720-		summarize_always_returns_true = (i % 2 == 0);
121721-
121722-		for (j = 1; j <= NNODES; j++) {
121723-			/* Initialize tree and nodes. */
121724-			tree_new(&tree);
121725-			for (k = 0; k < j; k++) {
121726-				nodes[k].magic = NODE_MAGIC;
121727-				nodes[k].key = bag[k];
121728-				nodes[k].specialness = gen_rand64_range(sfmt,
121729-				    NNODES);
121730-				nodes[k].mid_remove = false;
121731-				nodes[k].allow_duplicates = false;
121732-				nodes[k].summary_lchild = NULL;
121733-				nodes[k].summary_rchild = NULL;
121734-				nodes[k].summary_max_specialness = 0;
121735-			}
121736-
121737-			/* Insert nodes. */
121738-			for (k = 0; k < j; k++) {
121739-				tree_insert(&tree, &nodes[k]);
121740-
121741-				rbtn_black_height(node_t, link, &tree,
121742-				    black_height);
121743-				imbalances = tree_recurse(tree.rbt_root,
121744-				    black_height, 0);
121745-				expect_u_eq(imbalances, 0,
121746-				    "Tree is unbalanced");
121747-
121748-				expect_u_eq(tree_iterate(&tree), k+1,
121749-				    "Unexpected node iteration count");
121750-				expect_u_eq(tree_iterate_reverse(&tree), k+1,
121751-				    "Unexpected node iteration count");
121752-
121753-				expect_false(tree_empty(&tree),
121754-				    "Tree should not be empty");
121755-				expect_ptr_not_null(tree_first(&tree),
121756-				    "Tree should not be empty");
121757-				expect_ptr_not_null(tree_last(&tree),
121758-				    "Tree should not be empty");
121759-
121760-				tree_next(&tree, &nodes[k]);
121761-				tree_prev(&tree, &nodes[k]);
121762-			}
121763-
121764-			/* Remove nodes. */
121765-			switch (i % 5) {
121766-			case 0:
121767-				for (k = 0; k < j; k++) {
121768-					node_remove(&tree, &nodes[k], j - k);
121769-				}
121770-				break;
121771-			case 1:
121772-				for (k = j; k > 0; k--) {
121773-					node_remove(&tree, &nodes[k-1], k);
121774-				}
121775-				break;
121776-			case 2: {
121777-				node_t *start;
121778-				unsigned nnodes = j;
121779-
121780-				start = NULL;
121781-				do {
121782-					start = tree_iter(&tree, start,
121783-					    remove_iterate_cb, (void *)&nnodes);
121784-					nnodes--;
121785-				} while (start != NULL);
121786-				expect_u_eq(nnodes, 0,
121787-				    "Removal terminated early");
121788-				break;
121789-			} case 3: {
121790-				node_t *start;
121791-				unsigned nnodes = j;
121792-
121793-				start = NULL;
121794-				do {
121795-					start = tree_reverse_iter(&tree, start,
121796-					    remove_reverse_iterate_cb,
121797-					    (void *)&nnodes);
121798-					nnodes--;
121799-				} while (start != NULL);
121800-				expect_u_eq(nnodes, 0,
121801-				    "Removal terminated early");
121802-				break;
121803-			} case 4: {
121804-				unsigned nnodes = j;
121805-				tree_destroy(&tree, destroy_cb, &nnodes);
121806-				expect_u_eq(nnodes, 0,
121807-				    "Destruction terminated early");
121808-				break;
121809-			} default:
121810-				not_reached();
121811-			}
121812-		}
121813-	}
121814-	fini_gen_rand(sfmt);
121815-}
121816-TEST_END
121817-
121818-static void
121819-expect_simple_consistency(tree_t *tree, uint64_t specialness,
121820-    bool expected_empty, node_t *expected_first, node_t *expected_last) {
121821-	bool empty;
121822-	node_t *first;
121823-	node_t *last;
121824-
121825-	empty = tree_empty_filtered(tree, &specialness_filter_node,
121826-	    &specialness_filter_subtree, &specialness);
121827-	expect_b_eq(expected_empty, empty, "");
121828-
121829-	first = tree_first_filtered(tree,
121830-	    &specialness_filter_node, &specialness_filter_subtree,
121831-	    (void *)&specialness);
121832-	expect_ptr_eq(expected_first, first, "");
121833-
121834-	last = tree_last_filtered(tree,
121835-	    &specialness_filter_node, &specialness_filter_subtree,
121836-	    (void *)&specialness);
121837-	expect_ptr_eq(expected_last, last, "");
121838-}
121839-
121840-TEST_BEGIN(test_rb_filter_simple) {
121841-	enum {FILTER_NODES = 10};
121842-	node_t nodes[FILTER_NODES];
121843-	for (unsigned i = 0; i < FILTER_NODES; i++) {
121844-		nodes[i].magic = NODE_MAGIC;
121845-		nodes[i].key = i;
121846-		if (i == 0) {
121847-			nodes[i].specialness = 0;
121848-		} else {
121849-			nodes[i].specialness = ffs_u(i);
121850-		}
121851-		nodes[i].mid_remove = false;
121852-		nodes[i].allow_duplicates = false;
121853-		nodes[i].summary_lchild = NULL;
121854-		nodes[i].summary_rchild = NULL;
121855-		nodes[i].summary_max_specialness = 0;
121856-	}
121857-
121858-	summarize_always_returns_true = false;
121859-
121860-	tree_t tree;
121861-	tree_new(&tree);
121862-
121863-	/* Should be empty */
121864-	expect_simple_consistency(&tree, /* specialness */ 0, /* empty */ true,
121865-	    /* first */ NULL, /* last */ NULL);
121866-
121867-	/* Fill in just the odd nodes. */
121868-	for (int i = 1; i < FILTER_NODES; i += 2) {
121869-		tree_insert(&tree, &nodes[i]);
121870-	}
121871-
121872-	/* A search for an odd node should succeed. */
121873-	expect_simple_consistency(&tree, /* specialness */ 0, /* empty */ false,
121874-	    /* first */ &nodes[1], /* last */ &nodes[9]);
121875-
121876-	/* But a search for an even one should fail. */
121877-	expect_simple_consistency(&tree, /* specialness */ 1, /* empty */ true,
121878-	    /* first */ NULL, /* last */ NULL);
121879-
121880-	/* Now we add an even. */
121881-	tree_insert(&tree, &nodes[4]);
121882-	expect_simple_consistency(&tree, /* specialness */ 1, /* empty */ false,
121883-	    /* first */ &nodes[4], /* last */ &nodes[4]);
121884-
121885-	/* A smaller even, and a larger even. */
121886-	tree_insert(&tree, &nodes[2]);
121887-	tree_insert(&tree, &nodes[8]);
121888-
121889-	/*
121890-	 * A first-search (resp. last-search) for an even should switch to the
121891-	 * lower (higher) one, now that it's been added.
121892-	 */
121893-	expect_simple_consistency(&tree, /* specialness */ 1, /* empty */ false,
121894-	    /* first */ &nodes[2], /* last */ &nodes[8]);
121895-
121896-	/*
121897-	 * If we remove 2, a first-search we should go back to 4, while a
121898-	 * last-search should remain unchanged.
121899-	 */
121900-	tree_remove(&tree, &nodes[2]);
121901-	expect_simple_consistency(&tree, /* specialness */ 1, /* empty */ false,
121902-	    /* first */ &nodes[4], /* last */ &nodes[8]);
121903-
121904-	/* Reinsert 2, then find it again. */
121905-	tree_insert(&tree, &nodes[2]);
121906-	expect_simple_consistency(&tree, /* specialness */ 1, /* empty */ false,
121907-	    /* first */ &nodes[2], /* last */ &nodes[8]);
121908-
121909-	/* Searching for a multiple of 4 should not have changed. */
121910-	expect_simple_consistency(&tree, /* specialness */ 2, /* empty */ false,
121911-	    /* first */ &nodes[4], /* last */ &nodes[8]);
121912-
121913-	/* And a multiple of 8 */
121914-	expect_simple_consistency(&tree, /* specialness */ 3, /* empty */ false,
121915-	    /* first */ &nodes[8], /* last */ &nodes[8]);
121916-
121917-	/* But not a multiple of 16 */
121918-	expect_simple_consistency(&tree, /* specialness */ 4, /* empty */ true,
121919-	    /* first */ NULL, /* last */ NULL);
121920-}
121921-TEST_END
121922-
121923-typedef struct iter_ctx_s iter_ctx_t;
121924-struct iter_ctx_s {
121925-	int ncalls;
121926-	node_t *last_node;
121927-
121928-	int ncalls_max;
121929-	bool forward;
121930-};
121931-
121932-static node_t *
121933-tree_iterate_filtered_cb(tree_t *tree, node_t *node, void *arg) {
121934-	iter_ctx_t *ctx = (iter_ctx_t *)arg;
121935-	ctx->ncalls++;
121936-	expect_u64_ge(node->specialness, 1,
121937-	    "Should only invoke cb on nodes that pass the filter");
121938-	if (ctx->last_node != NULL) {
121939-		if (ctx->forward) {
121940-			expect_d_lt(node_cmp(ctx->last_node, node), 0,
121941-			    "Incorrect iteration order");
121942-		} else {
121943-			expect_d_gt(node_cmp(ctx->last_node, node), 0,
121944-			    "Incorrect iteration order");
121945-		}
121946-	}
121947-	ctx->last_node = node;
121948-	if (ctx->ncalls == ctx->ncalls_max) {
121949-		return node;
121950-	}
121951-	return NULL;
121952-}
121953-
121954-static int
121955-qsort_node_cmp(const void *ap, const void *bp) {
121956-	node_t *a = *(node_t **)ap;
121957-	node_t *b = *(node_t **)bp;
121958-	return node_cmp(a, b);
121959-}
121960-
121961-#define UPDATE_TEST_MAX 100
121962-static void
121963-check_consistency(tree_t *tree, node_t nodes[UPDATE_TEST_MAX], int nnodes) {
121964-	uint64_t specialness = 1;
121965-
121966-	bool empty;
121967-	bool real_empty = true;
121968-	node_t *first;
121969-	node_t *real_first = NULL;
121970-	node_t *last;
121971-	node_t *real_last = NULL;
121972-	for (int i = 0; i < nnodes; i++) {
121973-		if (nodes[i].specialness >= specialness) {
121974-			real_empty = false;
121975-			if (real_first == NULL
121976-			    || node_cmp(&nodes[i], real_first) < 0) {
121977-				real_first = &nodes[i];
121978-			}
121979-			if (real_last == NULL
121980-			    || node_cmp(&nodes[i], real_last) > 0) {
121981-				real_last = &nodes[i];
121982-			}
121983-		}
121984-	}
121985-
121986-	empty = tree_empty_filtered(tree, &specialness_filter_node,
121987-	    &specialness_filter_subtree, &specialness);
121988-	expect_b_eq(real_empty, empty, "");
121989-
121990-	first = tree_first_filtered(tree, &specialness_filter_node,
121991-	    &specialness_filter_subtree, &specialness);
121992-	expect_ptr_eq(real_first, first, "");
121993-
121994-	last = tree_last_filtered(tree, &specialness_filter_node,
121995-	    &specialness_filter_subtree, &specialness);
121996-	expect_ptr_eq(real_last, last, "");
121997-
121998-	for (int i = 0; i < nnodes; i++) {
121999-		node_t *next_filtered;
122000-		node_t *real_next_filtered = NULL;
122001-		node_t *prev_filtered;
122002-		node_t *real_prev_filtered = NULL;
122003-		for (int j = 0; j < nnodes; j++) {
122004-			if (nodes[j].specialness < specialness) {
122005-				continue;
122006-			}
122007-			if (node_cmp(&nodes[j], &nodes[i]) < 0
122008-			    && (real_prev_filtered == NULL
122009-			    || node_cmp(&nodes[j], real_prev_filtered) > 0)) {
122010-				real_prev_filtered = &nodes[j];
122011-			}
122012-			if (node_cmp(&nodes[j], &nodes[i]) > 0
122013-			    && (real_next_filtered == NULL
122014-			    || node_cmp(&nodes[j], real_next_filtered) < 0)) {
122015-				real_next_filtered = &nodes[j];
122016-			}
122017-		}
122018-		next_filtered = tree_next_filtered(tree, &nodes[i],
122019-		    &specialness_filter_node, &specialness_filter_subtree,
122020-		    &specialness);
122021-		expect_ptr_eq(real_next_filtered, next_filtered, "");
122022-
122023-		prev_filtered = tree_prev_filtered(tree, &nodes[i],
122024-		    &specialness_filter_node, &specialness_filter_subtree,
122025-		    &specialness);
122026-		expect_ptr_eq(real_prev_filtered, prev_filtered, "");
122027-
122028-		node_t *search_filtered;
122029-		node_t *real_search_filtered;
122030-		node_t *nsearch_filtered;
122031-		node_t *real_nsearch_filtered;
122032-		node_t *psearch_filtered;
122033-		node_t *real_psearch_filtered;
122034-
122035-		/*
122036-		 * search, nsearch, psearch from a node before nodes[i] in the
122037-		 * ordering.
122038-		 */
122039-		node_t before;
122040-		before.magic = NODE_MAGIC;
122041-		before.key = nodes[i].key - 1;
122042-		before.allow_duplicates = false;
122043-		real_search_filtered = NULL;
122044-		search_filtered = tree_search_filtered(tree, &before,
122045-		    &specialness_filter_node, &specialness_filter_subtree,
122046-		    &specialness);
122047-		expect_ptr_eq(real_search_filtered, search_filtered, "");
122048-
122049-		real_nsearch_filtered = (nodes[i].specialness >= specialness ?
122050-		    &nodes[i] : real_next_filtered);
122051-		nsearch_filtered = tree_nsearch_filtered(tree, &before,
122052-		    &specialness_filter_node, &specialness_filter_subtree,
122053-		    &specialness);
122054-		expect_ptr_eq(real_nsearch_filtered, nsearch_filtered, "");
122055-
122056-		real_psearch_filtered = real_prev_filtered;
122057-		psearch_filtered = tree_psearch_filtered(tree, &before,
122058-		    &specialness_filter_node, &specialness_filter_subtree,
122059-		    &specialness);
122060-		expect_ptr_eq(real_psearch_filtered, psearch_filtered, "");
122061-
122062-		/* search, nsearch, psearch from nodes[i] */
122063-		real_search_filtered = (nodes[i].specialness >= specialness ?
122064-		    &nodes[i] : NULL);
122065-		search_filtered = tree_search_filtered(tree, &nodes[i],
122066-		    &specialness_filter_node, &specialness_filter_subtree,
122067-		    &specialness);
122068-		expect_ptr_eq(real_search_filtered, search_filtered, "");
122069-
122070-		real_nsearch_filtered = (nodes[i].specialness >= specialness ?
122071-		    &nodes[i] : real_next_filtered);
122072-		nsearch_filtered = tree_nsearch_filtered(tree, &nodes[i],
122073-		    &specialness_filter_node, &specialness_filter_subtree,
122074-		    &specialness);
122075-		expect_ptr_eq(real_nsearch_filtered, nsearch_filtered, "");
122076-
122077-		real_psearch_filtered = (nodes[i].specialness >= specialness ?
122078-		    &nodes[i] : real_prev_filtered);
122079-		psearch_filtered = tree_psearch_filtered(tree, &nodes[i],
122080-		    &specialness_filter_node, &specialness_filter_subtree,
122081-		    &specialness);
122082-		expect_ptr_eq(real_psearch_filtered, psearch_filtered, "");
122083-
122084-		/*
122085-		 * search, nsearch, psearch from a node equivalent to but
122086-		 * distinct from nodes[i].
122087-		 */
122088-		node_t equiv;
122089-		equiv.magic = NODE_MAGIC;
122090-		equiv.key = nodes[i].key;
122091-		equiv.allow_duplicates = true;
122092-		real_search_filtered = (nodes[i].specialness >= specialness ?
122093-		    &nodes[i] : NULL);
122094-		search_filtered = tree_search_filtered(tree, &equiv,
122095-		    &specialness_filter_node, &specialness_filter_subtree,
122096-		    &specialness);
122097-		expect_ptr_eq(real_search_filtered, search_filtered, "");
122098-
122099-		real_nsearch_filtered = (nodes[i].specialness >= specialness ?
122100-		    &nodes[i] : real_next_filtered);
122101-		nsearch_filtered = tree_nsearch_filtered(tree, &equiv,
122102-		    &specialness_filter_node, &specialness_filter_subtree,
122103-		    &specialness);
122104-		expect_ptr_eq(real_nsearch_filtered, nsearch_filtered, "");
122105-
122106-		real_psearch_filtered = (nodes[i].specialness >= specialness ?
122107-		    &nodes[i] : real_prev_filtered);
122108-		psearch_filtered = tree_psearch_filtered(tree, &equiv,
122109-		    &specialness_filter_node, &specialness_filter_subtree,
122110-		    &specialness);
122111-		expect_ptr_eq(real_psearch_filtered, psearch_filtered, "");
122112-
122113-		/*
122114-		 * search, nsearch, psearch from a node after nodes[i] in the
122115-		 * ordering.
122116-		 */
122117-		node_t after;
122118-		after.magic = NODE_MAGIC;
122119-		after.key = nodes[i].key + 1;
122120-		after.allow_duplicates = false;
122121-		real_search_filtered = NULL;
122122-		search_filtered = tree_search_filtered(tree, &after,
122123-		    &specialness_filter_node, &specialness_filter_subtree,
122124-		    &specialness);
122125-		expect_ptr_eq(real_search_filtered, search_filtered, "");
122126-
122127-		real_nsearch_filtered = real_next_filtered;
122128-		nsearch_filtered = tree_nsearch_filtered(tree, &after,
122129-		    &specialness_filter_node, &specialness_filter_subtree,
122130-		    &specialness);
122131-		expect_ptr_eq(real_nsearch_filtered, nsearch_filtered, "");
122132-
122133-		real_psearch_filtered = (nodes[i].specialness >= specialness ?
122134-		    &nodes[i] : real_prev_filtered);
122135-		psearch_filtered = tree_psearch_filtered(tree, &after,
122136-		    &specialness_filter_node, &specialness_filter_subtree,
122137-		    &specialness);
122138-		expect_ptr_eq(real_psearch_filtered, psearch_filtered, "");
122139-	}
122140-
122141-	/* Filtered iteration test setup. */
122142-	int nspecial = 0;
122143-	node_t *sorted_nodes[UPDATE_TEST_MAX];
122144-	node_t *sorted_filtered_nodes[UPDATE_TEST_MAX];
122145-	for (int i = 0; i < nnodes; i++) {
122146-		sorted_nodes[i] = &nodes[i];
122147-	}
122148-	qsort(sorted_nodes, nnodes, sizeof(node_t *), &qsort_node_cmp);
122149-	for (int i = 0; i < nnodes; i++) {
122150-		sorted_nodes[i]->rank = i;
122151-		sorted_nodes[i]->filtered_rank = nspecial;
122152-		if (sorted_nodes[i]->specialness >= 1) {
122153-			sorted_filtered_nodes[nspecial] = sorted_nodes[i];
122154-			nspecial++;
122155-		}
122156-	}
122157-
122158-	node_t *iter_result;
122159-
122160-	iter_ctx_t ctx;
122161-	ctx.ncalls = 0;
122162-	ctx.last_node = NULL;
122163-	ctx.ncalls_max = INT_MAX;
122164-	ctx.forward = true;
122165-
122166-	/* Filtered forward iteration from the beginning. */
122167-	iter_result = tree_iter_filtered(tree, NULL, &tree_iterate_filtered_cb,
122168-	    &ctx, &specialness_filter_node, &specialness_filter_subtree,
122169-	    &specialness);
122170-	expect_ptr_null(iter_result, "");
122171-	expect_d_eq(nspecial, ctx.ncalls, "");
122172-	/* Filtered forward iteration from a starting point. */
122173-	for (int i = 0; i < nnodes; i++) {
122174-		ctx.ncalls = 0;
122175-		ctx.last_node = NULL;
122176-		iter_result = tree_iter_filtered(tree, &nodes[i],
122177-		    &tree_iterate_filtered_cb, &ctx, &specialness_filter_node,
122178-		    &specialness_filter_subtree, &specialness);
122179-		expect_ptr_null(iter_result, "");
122180-		expect_d_eq(nspecial - nodes[i].filtered_rank, ctx.ncalls, "");
122181-	}
122182-	/* Filtered forward iteration from the beginning, with stopping */
122183-	for (int i = 0; i < nspecial; i++) {
122184-		ctx.ncalls = 0;
122185-		ctx.last_node = NULL;
122186-		ctx.ncalls_max = i + 1;
122187-		iter_result = tree_iter_filtered(tree, NULL,
122188-		    &tree_iterate_filtered_cb, &ctx, &specialness_filter_node,
122189-		    &specialness_filter_subtree, &specialness);
122190-		expect_ptr_eq(sorted_filtered_nodes[i], iter_result, "");
122191-		expect_d_eq(ctx.ncalls, i + 1, "");
122192-	}
122193-	/* Filtered forward iteration from a starting point, with stopping. */
122194-	for (int i = 0; i < nnodes; i++) {
122195-		for (int j = 0; j < nspecial - nodes[i].filtered_rank; j++) {
122196-			ctx.ncalls = 0;
122197-			ctx.last_node = NULL;
122198-			ctx.ncalls_max = j + 1;
122199-			iter_result = tree_iter_filtered(tree, &nodes[i],
122200-			    &tree_iterate_filtered_cb, &ctx,
122201-			    &specialness_filter_node,
122202-			    &specialness_filter_subtree, &specialness);
122203-			expect_d_eq(j + 1, ctx.ncalls, "");
122204-			expect_ptr_eq(sorted_filtered_nodes[
122205-			    nodes[i].filtered_rank + j], iter_result, "");
122206-		}
122207-	}
122208-
122209-	/* Backwards iteration. */
122210-	ctx.ncalls = 0;
122211-	ctx.last_node = NULL;
122212-	ctx.ncalls_max = INT_MAX;
122213-	ctx.forward = false;
122214-
122215-	/* Filtered backward iteration from the end. */
122216-	iter_result = tree_reverse_iter_filtered(tree, NULL,
122217-	    &tree_iterate_filtered_cb, &ctx, &specialness_filter_node,
122218-	    &specialness_filter_subtree, &specialness);
122219-	expect_ptr_null(iter_result, "");
122220-	expect_d_eq(nspecial, ctx.ncalls, "");
122221-	/* Filtered backward iteration from a starting point. */
122222-	for (int i = 0; i < nnodes; i++) {
122223-		ctx.ncalls = 0;
122224-		ctx.last_node = NULL;
122225-		iter_result = tree_reverse_iter_filtered(tree, &nodes[i],
122226-		    &tree_iterate_filtered_cb, &ctx, &specialness_filter_node,
122227-		    &specialness_filter_subtree, &specialness);
122228-		expect_ptr_null(iter_result, "");
122229-		int surplus_rank = (nodes[i].specialness >= 1 ? 1 : 0);
122230-		expect_d_eq(nodes[i].filtered_rank + surplus_rank, ctx.ncalls,
122231-		    "");
122232-	}
122233-	/* Filtered backward iteration from the end, with stopping */
122234-	for (int i = 0; i < nspecial; i++) {
122235-		ctx.ncalls = 0;
122236-		ctx.last_node = NULL;
122237-		ctx.ncalls_max = i + 1;
122238-		iter_result = tree_reverse_iter_filtered(tree, NULL,
122239-		    &tree_iterate_filtered_cb, &ctx, &specialness_filter_node,
122240-		    &specialness_filter_subtree, &specialness);
122241-		expect_ptr_eq(sorted_filtered_nodes[nspecial - i - 1],
122242-		    iter_result, "");
122243-		expect_d_eq(ctx.ncalls, i + 1, "");
122244-	}
122245-	/* Filtered backward iteration from a starting point, with stopping. */
122246-	for (int i = 0; i < nnodes; i++) {
122247-		int surplus_rank = (nodes[i].specialness >= 1 ? 1 : 0);
122248-		for (int j = 0; j < nodes[i].filtered_rank + surplus_rank;
122249-		    j++) {
122250-			ctx.ncalls = 0;
122251-			ctx.last_node = NULL;
122252-			ctx.ncalls_max = j + 1;
122253-			iter_result = tree_reverse_iter_filtered(tree,
122254-			    &nodes[i], &tree_iterate_filtered_cb, &ctx,
122255-			    &specialness_filter_node,
122256-			    &specialness_filter_subtree, &specialness);
122257-			expect_d_eq(j + 1, ctx.ncalls, "");
122258-			expect_ptr_eq(sorted_filtered_nodes[
122259-			    nodes[i].filtered_rank - j - 1 + surplus_rank],
122260-			    iter_result, "");
122261-		}
122262-	}
122263-}
122264-
122265-static void
122266-do_update_search_test(int nnodes, int ntrees, int nremovals,
122267-    int nupdates) {
122268-	node_t nodes[UPDATE_TEST_MAX];
122269-	assert(nnodes <= UPDATE_TEST_MAX);
122270-
122271-	sfmt_t *sfmt = init_gen_rand(12345);
122272-	for (int i = 0; i < ntrees; i++) {
122273-		tree_t tree;
122274-		tree_new(&tree);
122275-		for (int j = 0; j < nnodes; j++) {
122276-			nodes[j].magic = NODE_MAGIC;
122277-			/*
122278-			 * In consistency checking, we increment or decrement a
122279-			 * key and assume that the result is not a key in the
122280-			 * tree.  This isn't a *real* concern with 64-bit keys
122281-			 * and a good PRNG, but why not be correct anyways?
122282-			 */
122283-			nodes[j].key = 2 * gen_rand64(sfmt);
122284-			nodes[j].specialness = 0;
122285-			nodes[j].mid_remove = false;
122286-			nodes[j].allow_duplicates = false;
122287-			nodes[j].summary_lchild = NULL;
122288-			nodes[j].summary_rchild = NULL;
122289-			nodes[j].summary_max_specialness = 0;
122290-			tree_insert(&tree, &nodes[j]);
122291-		}
122292-		for (int j = 0; j < nremovals; j++) {
122293-			int victim = (int)gen_rand64_range(sfmt, nnodes);
122294-			if (!nodes[victim].mid_remove) {
122295-				tree_remove(&tree, &nodes[victim]);
122296-				nodes[victim].mid_remove = true;
122297-			}
122298-		}
122299-		for (int j = 0; j < nnodes; j++) {
122300-			if (nodes[j].mid_remove) {
122301-				nodes[j].mid_remove = false;
122302-				nodes[j].key = 2 * gen_rand64(sfmt);
122303-				tree_insert(&tree, &nodes[j]);
122304-			}
122305-		}
122306-		for (int j = 0; j < nupdates; j++) {
122307-			uint32_t ind = gen_rand32_range(sfmt, nnodes);
122308-			nodes[ind].specialness = 1 - nodes[ind].specialness;
122309-			tree_update_summaries(&tree, &nodes[ind]);
122310-			check_consistency(&tree, nodes, nnodes);
122311-		}
122312-	}
122313-}
122314-
122315-TEST_BEGIN(test_rb_update_search) {
122316-	summarize_always_returns_true = false;
122317-	do_update_search_test(2, 100, 3, 50);
122318-	do_update_search_test(5, 100, 3, 50);
122319-	do_update_search_test(12, 100, 5, 1000);
122320-	do_update_search_test(100, 1, 50, 500);
122321-}
122322-TEST_END
122323-
122324-typedef rb_tree(node_t) unsummarized_tree_t;
122325-rb_gen(static UNUSED, unsummarized_tree_, unsummarized_tree_t, node_t, link,
122326-    node_cmp);
122327-
122328-static node_t *
122329-unsummarized_tree_iterate_cb(unsummarized_tree_t *tree, node_t *node,
122330-    void *data) {
122331-	unsigned *i = (unsigned *)data;
122332-	(*i)++;
122333-	return NULL;
122334-}
122335-/*
122336- * The unsummarized and summarized funtionality is implemented via the same
122337- * functions; we don't really need to do much more than test that we can exclude
122338- * the filtered functionality without anything breaking.
122339- */
122340-TEST_BEGIN(test_rb_unsummarized) {
122341-	unsummarized_tree_t tree;
122342-	unsummarized_tree_new(&tree);
122343-	unsigned nnodes = 0;
122344-	unsummarized_tree_iter(&tree, NULL, &unsummarized_tree_iterate_cb,
122345-	    &nnodes);
122346-	expect_u_eq(0, nnodes, "");
122347-}
122348-TEST_END
122349-
122350-int
122351-main(void) {
122352-	return test_no_reentrancy(
122353-	    test_rb_empty,
122354-	    test_rb_random,
122355-	    test_rb_filter_simple,
122356-	    test_rb_update_search,
122357-	    test_rb_unsummarized);
122358-}
122359diff --git a/jemalloc/test/unit/retained.c b/jemalloc/test/unit/retained.c
122360deleted file mode 100644
122361index aa9f684..0000000
122362--- a/jemalloc/test/unit/retained.c
122363+++ /dev/null
122364@@ -1,188 +0,0 @@
122365-#include "test/jemalloc_test.h"
122366-
122367-#include "jemalloc/internal/san.h"
122368-#include "jemalloc/internal/spin.h"
122369-
122370-static unsigned		arena_ind;
122371-static size_t		sz;
122372-static size_t		esz;
122373-#define NEPOCHS		8
122374-#define PER_THD_NALLOCS	1
122375-static atomic_u_t	epoch;
122376-static atomic_u_t	nfinished;
122377-
122378-static unsigned
122379-do_arena_create(extent_hooks_t *h) {
122380-	unsigned new_arena_ind;
122381-	size_t ind_sz = sizeof(unsigned);
122382-	expect_d_eq(mallctl("arenas.create", (void *)&new_arena_ind, &ind_sz,
122383-	    (void *)(h != NULL ? &h : NULL), (h != NULL ? sizeof(h) : 0)), 0,
122384-	    "Unexpected mallctl() failure");
122385-	return new_arena_ind;
122386-}
122387-
122388-static void
122389-do_arena_destroy(unsigned ind) {
122390-	size_t mib[3];
122391-	size_t miblen;
122392-
122393-	miblen = sizeof(mib)/sizeof(size_t);
122394-	expect_d_eq(mallctlnametomib("arena.0.destroy", mib, &miblen), 0,
122395-	    "Unexpected mallctlnametomib() failure");
122396-	mib[1] = (size_t)ind;
122397-	expect_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0,
122398-	    "Unexpected mallctlbymib() failure");
122399-}
122400-
122401-static void
122402-do_refresh(void) {
122403-	uint64_t refresh_epoch = 1;
122404-	expect_d_eq(mallctl("epoch", NULL, NULL, (void *)&refresh_epoch,
122405-	    sizeof(refresh_epoch)), 0, "Unexpected mallctl() failure");
122406-}
122407-
122408-static size_t
122409-do_get_size_impl(const char *cmd, unsigned ind) {
122410-	size_t mib[4];
122411-	size_t miblen = sizeof(mib) / sizeof(size_t);
122412-	size_t z = sizeof(size_t);
122413-
122414-	expect_d_eq(mallctlnametomib(cmd, mib, &miblen),
122415-	    0, "Unexpected mallctlnametomib(\"%s\", ...) failure", cmd);
122416-	mib[2] = ind;
122417-	size_t size;
122418-	expect_d_eq(mallctlbymib(mib, miblen, (void *)&size, &z, NULL, 0),
122419-	    0, "Unexpected mallctlbymib([\"%s\"], ...) failure", cmd);
122420-
122421-	return size;
122422-}
122423-
122424-static size_t
122425-do_get_active(unsigned ind) {
122426-	return do_get_size_impl("stats.arenas.0.pactive", ind) * PAGE;
122427-}
122428-
122429-static size_t
122430-do_get_mapped(unsigned ind) {
122431-	return do_get_size_impl("stats.arenas.0.mapped", ind);
122432-}
122433-
122434-static void *
122435-thd_start(void *arg) {
122436-	for (unsigned next_epoch = 1; next_epoch < NEPOCHS; next_epoch++) {
122437-		/* Busy-wait for next epoch. */
122438-		unsigned cur_epoch;
122439-		spin_t spinner = SPIN_INITIALIZER;
122440-		while ((cur_epoch = atomic_load_u(&epoch, ATOMIC_ACQUIRE)) !=
122441-		    next_epoch) {
122442-			spin_adaptive(&spinner);
122443-		}
122444-		expect_u_eq(cur_epoch, next_epoch, "Unexpected epoch");
122445-
122446-		/*
122447-		 * Allocate.  The main thread will reset the arena, so there's
122448-		 * no need to deallocate.
122449-		 */
122450-		for (unsigned i = 0; i < PER_THD_NALLOCS; i++) {
122451-			void *p = mallocx(sz, MALLOCX_ARENA(arena_ind) |
122452-			    MALLOCX_TCACHE_NONE
122453-			    );
122454-			expect_ptr_not_null(p,
122455-			    "Unexpected mallocx() failure\n");
122456-		}
122457-
122458-		/* Let the main thread know we've finished this iteration. */
122459-		atomic_fetch_add_u(&nfinished, 1, ATOMIC_RELEASE);
122460-	}
122461-
122462-	return NULL;
122463-}
122464-
122465-TEST_BEGIN(test_retained) {
122466-	test_skip_if(!config_stats);
122467-	test_skip_if(opt_hpa);
122468-
122469-	arena_ind = do_arena_create(NULL);
122470-	sz = nallocx(HUGEPAGE, 0);
122471-	size_t guard_sz = san_guard_enabled() ? SAN_PAGE_GUARDS_SIZE : 0;
122472-	esz = sz + sz_large_pad + guard_sz;
122473-
122474-	atomic_store_u(&epoch, 0, ATOMIC_RELAXED);
122475-
122476-	unsigned nthreads = ncpus * 2;
122477-	if (LG_SIZEOF_PTR < 3 && nthreads > 16) {
122478-		nthreads = 16; /* 32-bit platform could run out of vaddr. */
122479-	}
122480-	VARIABLE_ARRAY(thd_t, threads, nthreads);
122481-	for (unsigned i = 0; i < nthreads; i++) {
122482-		thd_create(&threads[i], thd_start, NULL);
122483-	}
122484-
122485-	for (unsigned e = 1; e < NEPOCHS; e++) {
122486-		atomic_store_u(&nfinished, 0, ATOMIC_RELEASE);
122487-		atomic_store_u(&epoch, e, ATOMIC_RELEASE);
122488-
122489-		/* Wait for threads to finish allocating. */
122490-		spin_t spinner = SPIN_INITIALIZER;
122491-		while (atomic_load_u(&nfinished, ATOMIC_ACQUIRE) < nthreads) {
122492-			spin_adaptive(&spinner);
122493-		}
122494-
122495-		/*
122496-		 * Assert that retained is no more than the sum of size classes
122497-		 * that should have been used to satisfy the worker threads'
122498-		 * requests, discounting per growth fragmentation.
122499-		 */
122500-		do_refresh();
122501-
122502-		size_t allocated = (esz - guard_sz) * nthreads *
122503-		    PER_THD_NALLOCS;
122504-		size_t active = do_get_active(arena_ind);
122505-		expect_zu_le(allocated, active, "Unexpected active memory");
122506-		size_t mapped = do_get_mapped(arena_ind);
122507-		expect_zu_le(active, mapped, "Unexpected mapped memory");
122508-
122509-		arena_t *arena = arena_get(tsdn_fetch(), arena_ind, false);
122510-		size_t usable = 0;
122511-		size_t fragmented = 0;
122512-		for (pszind_t pind = sz_psz2ind(HUGEPAGE); pind <
122513-		    arena->pa_shard.pac.exp_grow.next; pind++) {
122514-			size_t psz = sz_pind2sz(pind);
122515-			size_t psz_fragmented = psz % esz;
122516-			size_t psz_usable = psz - psz_fragmented;
122517-			/*
122518-			 * Only consider size classes that wouldn't be skipped.
122519-			 */
122520-			if (psz_usable > 0) {
122521-				expect_zu_lt(usable, allocated,
122522-				    "Excessive retained memory "
122523-				    "(%#zx[+%#zx] > %#zx)", usable, psz_usable,
122524-				    allocated);
122525-				fragmented += psz_fragmented;
122526-				usable += psz_usable;
122527-			}
122528-		}
122529-
122530-		/*
122531-		 * Clean up arena.  Destroying and recreating the arena
122532-		 * is simpler that specifying extent hooks that deallocate
122533-		 * (rather than retaining) during reset.
122534-		 */
122535-		do_arena_destroy(arena_ind);
122536-		expect_u_eq(do_arena_create(NULL), arena_ind,
122537-		    "Unexpected arena index");
122538-	}
122539-
122540-	for (unsigned i = 0; i < nthreads; i++) {
122541-		thd_join(threads[i], NULL);
122542-	}
122543-
122544-	do_arena_destroy(arena_ind);
122545-}
122546-TEST_END
122547-
122548-int
122549-main(void) {
122550-	return test(
122551-	    test_retained);
122552-}
122553diff --git a/jemalloc/test/unit/rtree.c b/jemalloc/test/unit/rtree.c
122554deleted file mode 100644
122555index 4101b72..0000000
122556--- a/jemalloc/test/unit/rtree.c
122557+++ /dev/null
122558@@ -1,289 +0,0 @@
122559-#include "test/jemalloc_test.h"
122560-
122561-#include "jemalloc/internal/rtree.h"
122562-
122563-#define INVALID_ARENA_IND ((1U << MALLOCX_ARENA_BITS) - 1)
122564-
122565-/* Potentially too large to safely place on the stack. */
122566-rtree_t test_rtree;
122567-
122568-TEST_BEGIN(test_rtree_read_empty) {
122569-	tsdn_t *tsdn;
122570-
122571-	tsdn = tsdn_fetch();
122572-
122573-	base_t *base = base_new(tsdn, 0, &ehooks_default_extent_hooks,
122574-	    /* metadata_use_hooks */ true);
122575-	expect_ptr_not_null(base, "Unexpected base_new failure");
122576-
122577-	rtree_t *rtree = &test_rtree;
122578-	rtree_ctx_t rtree_ctx;
122579-	rtree_ctx_data_init(&rtree_ctx);
122580-	expect_false(rtree_new(rtree, base, false),
122581-	    "Unexpected rtree_new() failure");
122582-	rtree_contents_t contents;
122583-	expect_true(rtree_read_independent(tsdn, rtree, &rtree_ctx, PAGE,
122584-	    &contents), "rtree_read_independent() should fail on empty rtree.");
122585-
122586-	base_delete(tsdn, base);
122587-}
122588-TEST_END
122589-
122590-#undef NTHREADS
122591-#undef NITERS
122592-#undef SEED
122593-
122594-static edata_t *
122595-alloc_edata(void) {
122596-	void *ret = mallocx(sizeof(edata_t), MALLOCX_ALIGN(EDATA_ALIGNMENT));
122597-	assert_ptr_not_null(ret, "Unexpected mallocx() failure");
122598-
122599-	return ret;
122600-}
122601-
122602-TEST_BEGIN(test_rtree_extrema) {
122603-	edata_t *edata_a, *edata_b;
122604-	edata_a = alloc_edata();
122605-	edata_b = alloc_edata();
122606-	edata_init(edata_a, INVALID_ARENA_IND, NULL, SC_LARGE_MINCLASS,
122607-	    false, sz_size2index(SC_LARGE_MINCLASS), 0,
122608-	    extent_state_active, false, false, EXTENT_PAI_PAC, EXTENT_NOT_HEAD);
122609-	edata_init(edata_b, INVALID_ARENA_IND, NULL, 0, false, SC_NSIZES, 0,
122610-	    extent_state_active, false, false, EXTENT_PAI_PAC, EXTENT_NOT_HEAD);
122611-
122612-	tsdn_t *tsdn = tsdn_fetch();
122613-
122614-	base_t *base = base_new(tsdn, 0, &ehooks_default_extent_hooks,
122615-	    /* metadata_use_hooks */ true);
122616-	expect_ptr_not_null(base, "Unexpected base_new failure");
122617-
122618-	rtree_t *rtree = &test_rtree;
122619-	rtree_ctx_t rtree_ctx;
122620-	rtree_ctx_data_init(&rtree_ctx);
122621-	expect_false(rtree_new(rtree, base, false),
122622-	    "Unexpected rtree_new() failure");
122623-
122624-	rtree_contents_t contents_a;
122625-	contents_a.edata = edata_a;
122626-	contents_a.metadata.szind = edata_szind_get(edata_a);
122627-	contents_a.metadata.slab = edata_slab_get(edata_a);
122628-	contents_a.metadata.is_head = edata_is_head_get(edata_a);
122629-	contents_a.metadata.state = edata_state_get(edata_a);
122630-	expect_false(rtree_write(tsdn, rtree, &rtree_ctx, PAGE, contents_a),
122631-	    "Unexpected rtree_write() failure");
122632-	expect_false(rtree_write(tsdn, rtree, &rtree_ctx, PAGE, contents_a),
122633-	    "Unexpected rtree_write() failure");
122634-	rtree_contents_t read_contents_a = rtree_read(tsdn, rtree, &rtree_ctx,
122635-	    PAGE);
122636-	expect_true(contents_a.edata == read_contents_a.edata
122637-	    && contents_a.metadata.szind == read_contents_a.metadata.szind
122638-	    && contents_a.metadata.slab == read_contents_a.metadata.slab
122639-	    && contents_a.metadata.is_head == read_contents_a.metadata.is_head
122640-	    && contents_a.metadata.state == read_contents_a.metadata.state,
122641-	    "rtree_read() should return previously set value");
122642-
122643-	rtree_contents_t contents_b;
122644-	contents_b.edata = edata_b;
122645-	contents_b.metadata.szind = edata_szind_get_maybe_invalid(edata_b);
122646-	contents_b.metadata.slab = edata_slab_get(edata_b);
122647-	contents_b.metadata.is_head = edata_is_head_get(edata_b);
122648-	contents_b.metadata.state = edata_state_get(edata_b);
122649-	expect_false(rtree_write(tsdn, rtree, &rtree_ctx, ~((uintptr_t)0),
122650-	    contents_b), "Unexpected rtree_write() failure");
122651-	rtree_contents_t read_contents_b = rtree_read(tsdn, rtree, &rtree_ctx,
122652-	    ~((uintptr_t)0));
122653-	assert_true(contents_b.edata == read_contents_b.edata
122654-	    && contents_b.metadata.szind == read_contents_b.metadata.szind
122655-	    && contents_b.metadata.slab == read_contents_b.metadata.slab
122656-	    && contents_b.metadata.is_head == read_contents_b.metadata.is_head
122657-	    && contents_b.metadata.state == read_contents_b.metadata.state,
122658-	    "rtree_read() should return previously set value");
122659-
122660-	base_delete(tsdn, base);
122661-}
122662-TEST_END
122663-
122664-TEST_BEGIN(test_rtree_bits) {
122665-	tsdn_t *tsdn = tsdn_fetch();
122666-	base_t *base = base_new(tsdn, 0, &ehooks_default_extent_hooks,
122667-	    /* metadata_use_hooks */ true);
122668-	expect_ptr_not_null(base, "Unexpected base_new failure");
122669-
122670-	uintptr_t keys[] = {PAGE, PAGE + 1,
122671-	    PAGE + (((uintptr_t)1) << LG_PAGE) - 1};
122672-	edata_t *edata_c = alloc_edata();
122673-	edata_init(edata_c, INVALID_ARENA_IND, NULL, 0, false, SC_NSIZES, 0,
122674-	    extent_state_active, false, false, EXTENT_PAI_PAC, EXTENT_NOT_HEAD);
122675-
122676-	rtree_t *rtree = &test_rtree;
122677-	rtree_ctx_t rtree_ctx;
122678-	rtree_ctx_data_init(&rtree_ctx);
122679-	expect_false(rtree_new(rtree, base, false),
122680-	    "Unexpected rtree_new() failure");
122681-
122682-	for (unsigned i = 0; i < sizeof(keys)/sizeof(uintptr_t); i++) {
122683-		rtree_contents_t contents;
122684-		contents.edata = edata_c;
122685-		contents.metadata.szind = SC_NSIZES;
122686-		contents.metadata.slab = false;
122687-		contents.metadata.is_head = false;
122688-		contents.metadata.state = extent_state_active;
122689-
122690-		expect_false(rtree_write(tsdn, rtree, &rtree_ctx, keys[i],
122691-		    contents), "Unexpected rtree_write() failure");
122692-		for (unsigned j = 0; j < sizeof(keys)/sizeof(uintptr_t); j++) {
122693-			expect_ptr_eq(rtree_read(tsdn, rtree, &rtree_ctx,
122694-			    keys[j]).edata, edata_c,
122695-			    "rtree_edata_read() should return previously set "
122696-			    "value and ignore insignificant key bits; i=%u, "
122697-			    "j=%u, set key=%#"FMTxPTR", get key=%#"FMTxPTR, i,
122698-			    j, keys[i], keys[j]);
122699-		}
122700-		expect_ptr_null(rtree_read(tsdn, rtree, &rtree_ctx,
122701-		    (((uintptr_t)2) << LG_PAGE)).edata,
122702-		    "Only leftmost rtree leaf should be set; i=%u", i);
122703-		rtree_clear(tsdn, rtree, &rtree_ctx, keys[i]);
122704-	}
122705-
122706-	base_delete(tsdn, base);
122707-}
122708-TEST_END
122709-
122710-TEST_BEGIN(test_rtree_random) {
122711-#define NSET 16
122712-#define SEED 42
122713-	sfmt_t *sfmt = init_gen_rand(SEED);
122714-	tsdn_t *tsdn = tsdn_fetch();
122715-
122716-	base_t *base = base_new(tsdn, 0, &ehooks_default_extent_hooks,
122717-	    /* metadata_use_hooks */ true);
122718-	expect_ptr_not_null(base, "Unexpected base_new failure");
122719-
122720-	uintptr_t keys[NSET];
122721-	rtree_t *rtree = &test_rtree;
122722-	rtree_ctx_t rtree_ctx;
122723-	rtree_ctx_data_init(&rtree_ctx);
122724-
122725-	edata_t *edata_d = alloc_edata();
122726-	edata_init(edata_d, INVALID_ARENA_IND, NULL, 0, false, SC_NSIZES, 0,
122727-	    extent_state_active, false, false, EXTENT_PAI_PAC, EXTENT_NOT_HEAD);
122728-
122729-	expect_false(rtree_new(rtree, base, false),
122730-	    "Unexpected rtree_new() failure");
122731-
122732-	for (unsigned i = 0; i < NSET; i++) {
122733-		keys[i] = (uintptr_t)gen_rand64(sfmt);
122734-		rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, rtree,
122735-		    &rtree_ctx, keys[i], false, true);
122736-		expect_ptr_not_null(elm,
122737-		    "Unexpected rtree_leaf_elm_lookup() failure");
122738-		rtree_contents_t contents;
122739-		contents.edata = edata_d;
122740-		contents.metadata.szind = SC_NSIZES;
122741-		contents.metadata.slab = false;
122742-		contents.metadata.is_head = false;
122743-		contents.metadata.state = edata_state_get(edata_d);
122744-		rtree_leaf_elm_write(tsdn, rtree, elm, contents);
122745-		expect_ptr_eq(rtree_read(tsdn, rtree, &rtree_ctx,
122746-		    keys[i]).edata, edata_d,
122747-		    "rtree_edata_read() should return previously set value");
122748-	}
122749-	for (unsigned i = 0; i < NSET; i++) {
122750-		expect_ptr_eq(rtree_read(tsdn, rtree, &rtree_ctx,
122751-		    keys[i]).edata, edata_d,
122752-		    "rtree_edata_read() should return previously set value, "
122753-		    "i=%u", i);
122754-	}
122755-
122756-	for (unsigned i = 0; i < NSET; i++) {
122757-		rtree_clear(tsdn, rtree, &rtree_ctx, keys[i]);
122758-		expect_ptr_null(rtree_read(tsdn, rtree, &rtree_ctx,
122759-		    keys[i]).edata,
122760-		   "rtree_edata_read() should return previously set value");
122761-	}
122762-	for (unsigned i = 0; i < NSET; i++) {
122763-		expect_ptr_null(rtree_read(tsdn, rtree, &rtree_ctx,
122764-		    keys[i]).edata,
122765-		    "rtree_edata_read() should return previously set value");
122766-	}
122767-
122768-	base_delete(tsdn, base);
122769-	fini_gen_rand(sfmt);
122770-#undef NSET
122771-#undef SEED
122772-}
122773-TEST_END
122774-
122775-static void
122776-test_rtree_range_write(tsdn_t *tsdn, rtree_t *rtree, uintptr_t start,
122777-    uintptr_t end) {
122778-	rtree_ctx_t rtree_ctx;
122779-	rtree_ctx_data_init(&rtree_ctx);
122780-
122781-	edata_t *edata_e = alloc_edata();
122782-	edata_init(edata_e, INVALID_ARENA_IND, NULL, 0, false, SC_NSIZES, 0,
122783-	    extent_state_active, false, false, EXTENT_PAI_PAC, EXTENT_NOT_HEAD);
122784-	rtree_contents_t contents;
122785-	contents.edata = edata_e;
122786-	contents.metadata.szind = SC_NSIZES;
122787-	contents.metadata.slab = false;
122788-	contents.metadata.is_head = false;
122789-	contents.metadata.state = extent_state_active;
122790-
122791-	expect_false(rtree_write(tsdn, rtree, &rtree_ctx, start,
122792-	    contents), "Unexpected rtree_write() failure");
122793-	expect_false(rtree_write(tsdn, rtree, &rtree_ctx, end,
122794-	    contents), "Unexpected rtree_write() failure");
122795-
122796-	rtree_write_range(tsdn, rtree, &rtree_ctx, start, end, contents);
122797-	for (uintptr_t i = 0; i < ((end - start) >> LG_PAGE); i++) {
122798-		expect_ptr_eq(rtree_read(tsdn, rtree, &rtree_ctx,
122799-		    start + (i << LG_PAGE)).edata, edata_e,
122800-		    "rtree_edata_read() should return previously set value");
122801-	}
122802-	rtree_clear_range(tsdn, rtree, &rtree_ctx, start, end);
122803-	rtree_leaf_elm_t *elm;
122804-	for (uintptr_t i = 0; i < ((end - start) >> LG_PAGE); i++) {
122805-		elm = rtree_leaf_elm_lookup(tsdn, rtree, &rtree_ctx,
122806-		    start + (i << LG_PAGE), false, false);
122807-		expect_ptr_not_null(elm, "Should have been initialized.");
122808-		expect_ptr_null(rtree_leaf_elm_read(tsdn, rtree, elm,
122809-		    false).edata, "Should have been cleared.");
122810-	}
122811-}
122812-
122813-TEST_BEGIN(test_rtree_range) {
122814-	tsdn_t *tsdn = tsdn_fetch();
122815-	base_t *base = base_new(tsdn, 0, &ehooks_default_extent_hooks,
122816-	    /* metadata_use_hooks */ true);
122817-	expect_ptr_not_null(base, "Unexpected base_new failure");
122818-
122819-	rtree_t *rtree = &test_rtree;
122820-	expect_false(rtree_new(rtree, base, false),
122821-	    "Unexpected rtree_new() failure");
122822-
122823-	/* Not crossing rtree node boundary first. */
122824-	uintptr_t start = ZU(1) << rtree_leaf_maskbits();
122825-	uintptr_t end = start + (ZU(100) << LG_PAGE);
122826-	test_rtree_range_write(tsdn, rtree, start, end);
122827-
122828-	/* Crossing rtree node boundary. */
122829-	start = (ZU(1) << rtree_leaf_maskbits()) - (ZU(10) << LG_PAGE);
122830-	end = start + (ZU(100) << LG_PAGE);
122831-	assert_ptr_ne((void *)rtree_leafkey(start), (void *)rtree_leafkey(end),
122832-	    "The range should span across two rtree nodes");
122833-	test_rtree_range_write(tsdn, rtree, start, end);
122834-
122835-	base_delete(tsdn, base);
122836-}
122837-TEST_END
122838-
122839-int
122840-main(void) {
122841-	return test(
122842-	    test_rtree_read_empty,
122843-	    test_rtree_extrema,
122844-	    test_rtree_bits,
122845-	    test_rtree_random,
122846-	    test_rtree_range);
122847-}
122848diff --git a/jemalloc/test/unit/safety_check.c b/jemalloc/test/unit/safety_check.c
122849deleted file mode 100644
122850index 8472667..0000000
122851--- a/jemalloc/test/unit/safety_check.c
122852+++ /dev/null
122853@@ -1,163 +0,0 @@
122854-#include "test/jemalloc_test.h"
122855-
122856-#include "jemalloc/internal/safety_check.h"
122857-
122858-/*
122859- * Note that we get called through safety_check.sh, which turns on sampling for
122860- * everything.
122861- */
122862-
122863-bool fake_abort_called;
122864-void fake_abort(const char *message) {
122865-	(void)message;
122866-	fake_abort_called = true;
122867-}
122868-
122869-static void
122870-buffer_overflow_write(char *ptr, size_t size) {
122871-	/* Avoid overflow warnings. */
122872-	volatile size_t idx = size;
122873-	ptr[idx] = 0;
122874-}
122875-
122876-TEST_BEGIN(test_malloc_free_overflow) {
122877-	test_skip_if(!config_prof);
122878-	test_skip_if(!config_opt_safety_checks);
122879-
122880-	safety_check_set_abort(&fake_abort);
122881-	/* Buffer overflow! */
122882-	char* ptr = malloc(128);
122883-	buffer_overflow_write(ptr, 128);
122884-	free(ptr);
122885-	safety_check_set_abort(NULL);
122886-
122887-	expect_b_eq(fake_abort_called, true, "Redzone check didn't fire.");
122888-	fake_abort_called = false;
122889-}
122890-TEST_END
122891-
122892-TEST_BEGIN(test_mallocx_dallocx_overflow) {
122893-	test_skip_if(!config_prof);
122894-	test_skip_if(!config_opt_safety_checks);
122895-
122896-	safety_check_set_abort(&fake_abort);
122897-	/* Buffer overflow! */
122898-	char* ptr = mallocx(128, 0);
122899-	buffer_overflow_write(ptr, 128);
122900-	dallocx(ptr, 0);
122901-	safety_check_set_abort(NULL);
122902-
122903-	expect_b_eq(fake_abort_called, true, "Redzone check didn't fire.");
122904-	fake_abort_called = false;
122905-}
122906-TEST_END
122907-
122908-TEST_BEGIN(test_malloc_sdallocx_overflow) {
122909-	test_skip_if(!config_prof);
122910-	test_skip_if(!config_opt_safety_checks);
122911-
122912-	safety_check_set_abort(&fake_abort);
122913-	/* Buffer overflow! */
122914-	char* ptr = malloc(128);
122915-	buffer_overflow_write(ptr, 128);
122916-	sdallocx(ptr, 128, 0);
122917-	safety_check_set_abort(NULL);
122918-
122919-	expect_b_eq(fake_abort_called, true, "Redzone check didn't fire.");
122920-	fake_abort_called = false;
122921-}
122922-TEST_END
122923-
122924-TEST_BEGIN(test_realloc_overflow) {
122925-	test_skip_if(!config_prof);
122926-	test_skip_if(!config_opt_safety_checks);
122927-
122928-	safety_check_set_abort(&fake_abort);
122929-	/* Buffer overflow! */
122930-	char* ptr = malloc(128);
122931-	buffer_overflow_write(ptr, 128);
122932-	ptr = realloc(ptr, 129);
122933-	safety_check_set_abort(NULL);
122934-	free(ptr);
122935-
122936-	expect_b_eq(fake_abort_called, true, "Redzone check didn't fire.");
122937-	fake_abort_called = false;
122938-}
122939-TEST_END
122940-
122941-TEST_BEGIN(test_rallocx_overflow) {
122942-	test_skip_if(!config_prof);
122943-	test_skip_if(!config_opt_safety_checks);
122944-
122945-	safety_check_set_abort(&fake_abort);
122946-	/* Buffer overflow! */
122947-	char* ptr = malloc(128);
122948-	buffer_overflow_write(ptr, 128);
122949-	ptr = rallocx(ptr, 129, 0);
122950-	safety_check_set_abort(NULL);
122951-	free(ptr);
122952-
122953-	expect_b_eq(fake_abort_called, true, "Redzone check didn't fire.");
122954-	fake_abort_called = false;
122955-}
122956-TEST_END
122957-
122958-TEST_BEGIN(test_xallocx_overflow) {
122959-	test_skip_if(!config_prof);
122960-	test_skip_if(!config_opt_safety_checks);
122961-
122962-	safety_check_set_abort(&fake_abort);
122963-	/* Buffer overflow! */
122964-	char* ptr = malloc(128);
122965-	buffer_overflow_write(ptr, 128);
122966-	size_t result = xallocx(ptr, 129, 0, 0);
122967-	expect_zu_eq(result, 128, "");
122968-	free(ptr);
122969-	expect_b_eq(fake_abort_called, true, "Redzone check didn't fire.");
122970-	fake_abort_called = false;
122971-	safety_check_set_abort(NULL);
122972-}
122973-TEST_END
122974-
122975-TEST_BEGIN(test_realloc_no_overflow) {
122976-	char* ptr = malloc(128);
122977-	ptr = realloc(ptr, 256);
122978-	ptr[128] = 0;
122979-	ptr[255] = 0;
122980-	free(ptr);
122981-
122982-	ptr = malloc(128);
122983-	ptr = realloc(ptr, 64);
122984-	ptr[63] = 0;
122985-	ptr[0] = 0;
122986-	free(ptr);
122987-}
122988-TEST_END
122989-
122990-TEST_BEGIN(test_rallocx_no_overflow) {
122991-	char* ptr = malloc(128);
122992-	ptr = rallocx(ptr, 256, 0);
122993-	ptr[128] = 0;
122994-	ptr[255] = 0;
122995-	free(ptr);
122996-
122997-	ptr = malloc(128);
122998-	ptr = rallocx(ptr, 64, 0);
122999-	ptr[63] = 0;
123000-	ptr[0] = 0;
123001-	free(ptr);
123002-}
123003-TEST_END
123004-
123005-int
123006-main(void) {
123007-	return test(
123008-	    test_malloc_free_overflow,
123009-	    test_mallocx_dallocx_overflow,
123010-	    test_malloc_sdallocx_overflow,
123011-	    test_realloc_overflow,
123012-	    test_rallocx_overflow,
123013-	    test_xallocx_overflow,
123014-	    test_realloc_no_overflow,
123015-	    test_rallocx_no_overflow);
123016-}
123017diff --git a/jemalloc/test/unit/safety_check.sh b/jemalloc/test/unit/safety_check.sh
123018deleted file mode 100644
123019index 485f9bf..0000000
123020--- a/jemalloc/test/unit/safety_check.sh
123021+++ /dev/null
123022@@ -1,5 +0,0 @@
123023-#!/bin/sh
123024-
123025-if [ "x${enable_prof}" = "x1" ] ; then
123026-  export MALLOC_CONF="prof:true,prof_active:true,lg_prof_sample:0"
123027-fi
123028diff --git a/jemalloc/test/unit/san.c b/jemalloc/test/unit/san.c
123029deleted file mode 100644
123030index 5b98f52..0000000
123031--- a/jemalloc/test/unit/san.c
123032+++ /dev/null
123033@@ -1,207 +0,0 @@
123034-#include "test/jemalloc_test.h"
123035-#include "test/arena_util.h"
123036-#include "test/san.h"
123037-
123038-#include "jemalloc/internal/san.h"
123039-
123040-static void
123041-verify_extent_guarded(tsdn_t *tsdn, void *ptr) {
123042-	expect_true(extent_is_guarded(tsdn, ptr),
123043-	    "All extents should be guarded.");
123044-}
123045-
123046-#define MAX_SMALL_ALLOCATIONS 4096
123047-void *small_alloc[MAX_SMALL_ALLOCATIONS];
123048-
123049-/*
123050- * This test allocates page sized slabs and checks that every two slabs have
123051- * at least one page in between them. That page is supposed to be the guard
123052- * page.
123053- */
123054-TEST_BEGIN(test_guarded_small) {
123055-	test_skip_if(opt_prof);
123056-
123057-	tsdn_t *tsdn = tsd_tsdn(tsd_fetch());
123058-	unsigned npages = 16, pages_found = 0, ends_found = 0;
123059-	VARIABLE_ARRAY(uintptr_t, pages, npages);
123060-
123061-	/* Allocate to get sanitized pointers. */
123062-	size_t slab_sz = PAGE;
123063-	size_t sz = slab_sz / 8;
123064-	unsigned n_alloc = 0;
123065-	while (n_alloc < MAX_SMALL_ALLOCATIONS) {
123066-		void *ptr = malloc(sz);
123067-		expect_ptr_not_null(ptr, "Unexpected malloc() failure");
123068-		small_alloc[n_alloc] = ptr;
123069-		verify_extent_guarded(tsdn, ptr);
123070-		if ((uintptr_t)ptr % PAGE == 0) {
123071-			assert_u_lt(pages_found, npages,
123072-			    "Unexpectedly large number of page aligned allocs");
123073-			pages[pages_found++] = (uintptr_t)ptr;
123074-		}
123075-		if (((uintptr_t)ptr + (uintptr_t)sz) % PAGE == 0) {
123076-			ends_found++;
123077-		}
123078-		n_alloc++;
123079-		if (pages_found == npages && ends_found == npages) {
123080-			break;
123081-		}
123082-	}
123083-	/* Should found the ptrs being checked for overflow and underflow. */
123084-	expect_u_eq(pages_found, npages, "Could not found the expected pages.");
123085-	expect_u_eq(ends_found, npages, "Could not found the expected pages.");
123086-
123087-	/* Verify the pages are not continuous, i.e. separated by guards. */
123088-	for (unsigned i = 0; i < npages - 1; i++) {
123089-		for (unsigned j = i + 1; j < npages; j++) {
123090-			uintptr_t ptr_diff = pages[i] > pages[j] ?
123091-			    pages[i] - pages[j] : pages[j] - pages[i];
123092-			expect_zu_ge((size_t)ptr_diff, slab_sz + PAGE,
123093-			    "There should be at least one pages between "
123094-			    "guarded slabs");
123095-		}
123096-	}
123097-
123098-	for (unsigned i = 0; i < n_alloc + 1; i++) {
123099-		free(small_alloc[i]);
123100-	}
123101-}
123102-TEST_END
123103-
123104-TEST_BEGIN(test_guarded_large) {
123105-	tsdn_t *tsdn = tsd_tsdn(tsd_fetch());
123106-	unsigned nlarge = 32;
123107-	VARIABLE_ARRAY(uintptr_t, large, nlarge);
123108-
123109-	/* Allocate to get sanitized pointers. */
123110-	size_t large_sz = SC_LARGE_MINCLASS;
123111-	for (unsigned i = 0; i < nlarge; i++) {
123112-		void *ptr = malloc(large_sz);
123113-		verify_extent_guarded(tsdn, ptr);
123114-		expect_ptr_not_null(ptr, "Unexpected malloc() failure");
123115-		large[i] = (uintptr_t)ptr;
123116-	}
123117-
123118-	/* Verify the pages are not continuous, i.e. separated by guards. */
123119-	for (unsigned i = 0; i < nlarge; i++) {
123120-		for (unsigned j = i + 1; j < nlarge; j++) {
123121-			uintptr_t ptr_diff = large[i] > large[j] ?
123122-			    large[i] - large[j] : large[j] - large[i];
123123-			expect_zu_ge((size_t)ptr_diff, large_sz + 2 * PAGE,
123124-			    "There should be at least two pages between "
123125-			    " guarded large allocations");
123126-		}
123127-	}
123128-
123129-	for (unsigned i = 0; i < nlarge; i++) {
123130-		free((void *)large[i]);
123131-	}
123132-}
123133-TEST_END
123134-
123135-static void
123136-verify_pdirty(unsigned arena_ind, uint64_t expected) {
123137-	uint64_t pdirty = get_arena_pdirty(arena_ind);
123138-	expect_u64_eq(pdirty, expected / PAGE,
123139-	    "Unexpected dirty page amount.");
123140-}
123141-
123142-static void
123143-verify_pmuzzy(unsigned arena_ind, uint64_t expected) {
123144-	uint64_t pmuzzy = get_arena_pmuzzy(arena_ind);
123145-	expect_u64_eq(pmuzzy, expected / PAGE,
123146-	    "Unexpected muzzy page amount.");
123147-}
123148-
123149-TEST_BEGIN(test_guarded_decay) {
123150-	unsigned arena_ind = do_arena_create(-1, -1);
123151-	do_decay(arena_ind);
123152-	do_purge(arena_ind);
123153-
123154-	verify_pdirty(arena_ind, 0);
123155-	verify_pmuzzy(arena_ind, 0);
123156-
123157-	/* Verify that guarded extents as dirty. */
123158-	size_t sz1 = PAGE, sz2 = PAGE * 2;
123159-	/* W/o maps_coalesce, guarded extents are unguarded eagerly. */
123160-	size_t add_guard_size = maps_coalesce ? 0 : SAN_PAGE_GUARDS_SIZE;
123161-	generate_dirty(arena_ind, sz1);
123162-	verify_pdirty(arena_ind, sz1 + add_guard_size);
123163-	verify_pmuzzy(arena_ind, 0);
123164-
123165-	/* Should reuse the first extent. */
123166-	generate_dirty(arena_ind, sz1);
123167-	verify_pdirty(arena_ind, sz1 + add_guard_size);
123168-	verify_pmuzzy(arena_ind, 0);
123169-
123170-	/* Should not reuse; expect new dirty pages. */
123171-	generate_dirty(arena_ind, sz2);
123172-	verify_pdirty(arena_ind, sz1 + sz2 + 2 * add_guard_size);
123173-	verify_pmuzzy(arena_ind, 0);
123174-
123175-	tsdn_t *tsdn = tsd_tsdn(tsd_fetch());
123176-	int flags = MALLOCX_ARENA(arena_ind) | MALLOCX_TCACHE_NONE;
123177-
123178-	/* Should reuse dirty extents for the two mallocx. */
123179-	void *p1 = do_mallocx(sz1, flags);
123180-	verify_extent_guarded(tsdn, p1);
123181-	verify_pdirty(arena_ind, sz2 + add_guard_size);
123182-
123183-	void *p2 = do_mallocx(sz2, flags);
123184-	verify_extent_guarded(tsdn, p2);
123185-	verify_pdirty(arena_ind, 0);
123186-	verify_pmuzzy(arena_ind, 0);
123187-
123188-	dallocx(p1, flags);
123189-	verify_pdirty(arena_ind, sz1 + add_guard_size);
123190-	dallocx(p2, flags);
123191-	verify_pdirty(arena_ind, sz1 + sz2 + 2 * add_guard_size);
123192-	verify_pmuzzy(arena_ind, 0);
123193-
123194-	do_purge(arena_ind);
123195-	verify_pdirty(arena_ind, 0);
123196-	verify_pmuzzy(arena_ind, 0);
123197-
123198-	if (config_stats) {
123199-		expect_u64_eq(get_arena_npurge(arena_ind), 1,
123200-		    "Expected purging to occur");
123201-		expect_u64_eq(get_arena_dirty_npurge(arena_ind), 1,
123202-		    "Expected purging to occur");
123203-		expect_u64_eq(get_arena_dirty_purged(arena_ind),
123204-		    (sz1 + sz2 + 2 * add_guard_size) / PAGE,
123205-		    "Expected purging to occur");
123206-		expect_u64_eq(get_arena_muzzy_npurge(arena_ind), 0,
123207-		    "Expected purging to occur");
123208-	}
123209-
123210-	if (opt_retain) {
123211-		/*
123212-		 * With retain, guarded extents are not mergable and will be
123213-		 * cached in ecache_retained.  They should be reused.
123214-		 */
123215-		void *new_p1 = do_mallocx(sz1, flags);
123216-		verify_extent_guarded(tsdn, p1);
123217-		expect_ptr_eq(p1, new_p1, "Expect to reuse p1");
123218-
123219-		void *new_p2 = do_mallocx(sz2, flags);
123220-		verify_extent_guarded(tsdn, p2);
123221-		expect_ptr_eq(p2, new_p2, "Expect to reuse p2");
123222-
123223-		dallocx(new_p1, flags);
123224-		verify_pdirty(arena_ind, sz1 + add_guard_size);
123225-		dallocx(new_p2, flags);
123226-		verify_pdirty(arena_ind, sz1 + sz2 + 2 * add_guard_size);
123227-		verify_pmuzzy(arena_ind, 0);
123228-	}
123229-
123230-	do_arena_destroy(arena_ind);
123231-}
123232-TEST_END
123233-
123234-int
123235-main(void) {
123236-	return test(
123237-	    test_guarded_small,
123238-	    test_guarded_large,
123239-	    test_guarded_decay);
123240-}
123241diff --git a/jemalloc/test/unit/san.sh b/jemalloc/test/unit/san.sh
123242deleted file mode 100644
123243index 933b4a4..0000000
123244--- a/jemalloc/test/unit/san.sh
123245+++ /dev/null
123246@@ -1,3 +0,0 @@
123247-#!/bin/sh
123248-
123249-export MALLOC_CONF="san_guard_large:1,san_guard_small:1"
123250diff --git a/jemalloc/test/unit/san_bump.c b/jemalloc/test/unit/san_bump.c
123251deleted file mode 100644
123252index cafa37f..0000000
123253--- a/jemalloc/test/unit/san_bump.c
123254+++ /dev/null
123255@@ -1,111 +0,0 @@
123256-#include "test/jemalloc_test.h"
123257-#include "test/arena_util.h"
123258-
123259-#include "jemalloc/internal/arena_structs.h"
123260-#include "jemalloc/internal/san_bump.h"
123261-
123262-TEST_BEGIN(test_san_bump_alloc) {
123263-	test_skip_if(!maps_coalesce || !opt_retain);
123264-
123265-	tsdn_t *tsdn = tsdn_fetch();
123266-
123267-	san_bump_alloc_t sba;
123268-	san_bump_alloc_init(&sba);
123269-
123270-	unsigned arena_ind = do_arena_create(0, 0);
123271-	assert_u_ne(arena_ind, UINT_MAX, "Failed to create an arena");
123272-
123273-	arena_t *arena = arena_get(tsdn, arena_ind, false);
123274-	pac_t *pac = &arena->pa_shard.pac;
123275-
123276-	size_t alloc_size = PAGE * 16;
123277-	size_t alloc_n = alloc_size / sizeof(unsigned);
123278-	edata_t* edata = san_bump_alloc(tsdn, &sba, pac, pac_ehooks_get(pac),
123279-	    alloc_size, /* zero */ false);
123280-
123281-	expect_ptr_not_null(edata, "Failed to allocate edata");
123282-	expect_u_eq(edata_arena_ind_get(edata), arena_ind,
123283-	    "Edata was assigned an incorrect arena id");
123284-	expect_zu_eq(edata_size_get(edata), alloc_size,
123285-	    "Allocated edata of incorrect size");
123286-	expect_false(edata_slab_get(edata),
123287-	    "Bump allocator incorrectly assigned 'slab' to true");
123288-	expect_true(edata_committed_get(edata), "Edata is not committed");
123289-
123290-	void *ptr = edata_addr_get(edata);
123291-	expect_ptr_not_null(ptr, "Edata was assigned an invalid address");
123292-	/* Test that memory is allocated; no guard pages are misplaced */
123293-	for (unsigned i = 0; i < alloc_n; ++i) {
123294-		((unsigned *)ptr)[i] = 1;
123295-	}
123296-
123297-	size_t alloc_size2 = PAGE * 28;
123298-	size_t alloc_n2 = alloc_size / sizeof(unsigned);
123299-	edata_t *edata2 = san_bump_alloc(tsdn, &sba, pac, pac_ehooks_get(pac),
123300-	    alloc_size2, /* zero */ true);
123301-
123302-	expect_ptr_not_null(edata2, "Failed to allocate edata");
123303-	expect_u_eq(edata_arena_ind_get(edata2), arena_ind,
123304-	    "Edata was assigned an incorrect arena id");
123305-	expect_zu_eq(edata_size_get(edata2), alloc_size2,
123306-	    "Allocated edata of incorrect size");
123307-	expect_false(edata_slab_get(edata2),
123308-	    "Bump allocator incorrectly assigned 'slab' to true");
123309-	expect_true(edata_committed_get(edata2), "Edata is not committed");
123310-
123311-	void *ptr2 = edata_addr_get(edata2);
123312-	expect_ptr_not_null(ptr, "Edata was assigned an invalid address");
123313-
123314-	uintptr_t ptrdiff = ptr2 > ptr ? (uintptr_t)ptr2 - (uintptr_t)ptr
123315-	    : (uintptr_t)ptr - (uintptr_t)ptr2;
123316-	size_t between_allocs = (size_t)ptrdiff - alloc_size;
123317-
123318-	expect_zu_ge(between_allocs, PAGE,
123319-	    "Guard page between allocs is missing");
123320-
123321-	for (unsigned i = 0; i < alloc_n2; ++i) {
123322-		expect_u_eq(((unsigned *)ptr2)[i], 0, "Memory is not zeroed");
123323-	}
123324-}
123325-TEST_END
123326-
123327-TEST_BEGIN(test_large_alloc_size) {
123328-	test_skip_if(!maps_coalesce || !opt_retain);
123329-
123330-	tsdn_t *tsdn = tsdn_fetch();
123331-
123332-	san_bump_alloc_t sba;
123333-	san_bump_alloc_init(&sba);
123334-
123335-	unsigned arena_ind = do_arena_create(0, 0);
123336-	assert_u_ne(arena_ind, UINT_MAX, "Failed to create an arena");
123337-
123338-	arena_t *arena = arena_get(tsdn, arena_ind, false);
123339-	pac_t *pac = &arena->pa_shard.pac;
123340-
123341-	size_t alloc_size = SBA_RETAINED_ALLOC_SIZE * 2;
123342-	edata_t* edata = san_bump_alloc(tsdn, &sba, pac, pac_ehooks_get(pac),
123343-	    alloc_size, /* zero */ false);
123344-	expect_u_eq(edata_arena_ind_get(edata), arena_ind,
123345-	    "Edata was assigned an incorrect arena id");
123346-	expect_zu_eq(edata_size_get(edata), alloc_size,
123347-	    "Allocated edata of incorrect size");
123348-	expect_false(edata_slab_get(edata),
123349-	    "Bump allocator incorrectly assigned 'slab' to true");
123350-	expect_true(edata_committed_get(edata), "Edata is not committed");
123351-
123352-	void *ptr = edata_addr_get(edata);
123353-	expect_ptr_not_null(ptr, "Edata was assigned an invalid address");
123354-	/* Test that memory is allocated; no guard pages are misplaced */
123355-	for (unsigned i = 0; i < alloc_size / PAGE; ++i) {
123356-		*((char *)ptr + PAGE * i) = 1;
123357-	}
123358-}
123359-TEST_END
123360-
123361-int
123362-main(void) {
123363-	return test(
123364-	    test_san_bump_alloc,
123365-	    test_large_alloc_size);
123366-}
123367diff --git a/jemalloc/test/unit/sc.c b/jemalloc/test/unit/sc.c
123368deleted file mode 100644
123369index d207481..0000000
123370--- a/jemalloc/test/unit/sc.c
123371+++ /dev/null
123372@@ -1,33 +0,0 @@
123373-#include "test/jemalloc_test.h"
123374-
123375-TEST_BEGIN(test_update_slab_size) {
123376-	sc_data_t data;
123377-	memset(&data, 0, sizeof(data));
123378-	sc_data_init(&data);
123379-	sc_t *tiny = &data.sc[0];
123380-	size_t tiny_size = (ZU(1) << tiny->lg_base)
123381-	    + (ZU(tiny->ndelta) << tiny->lg_delta);
123382-	size_t pgs_too_big = (tiny_size * BITMAP_MAXBITS + PAGE - 1) / PAGE + 1;
123383-	sc_data_update_slab_size(&data, tiny_size, tiny_size, (int)pgs_too_big);
123384-	expect_zu_lt((size_t)tiny->pgs, pgs_too_big, "Allowed excessive pages");
123385-
123386-	sc_data_update_slab_size(&data, 1, 10 * PAGE, 1);
123387-	for (int i = 0; i < data.nbins; i++) {
123388-		sc_t *sc = &data.sc[i];
123389-		size_t reg_size = (ZU(1) << sc->lg_base)
123390-		    + (ZU(sc->ndelta) << sc->lg_delta);
123391-		if (reg_size <= PAGE) {
123392-			expect_d_eq(sc->pgs, 1, "Ignored valid page size hint");
123393-		} else {
123394-			expect_d_gt(sc->pgs, 1,
123395-			    "Allowed invalid page size hint");
123396-		}
123397-	}
123398-}
123399-TEST_END
123400-
123401-int
123402-main(void) {
123403-	return test(
123404-	    test_update_slab_size);
123405-}
123406diff --git a/jemalloc/test/unit/sec.c b/jemalloc/test/unit/sec.c
123407deleted file mode 100644
123408index f3ec403..0000000
123409--- a/jemalloc/test/unit/sec.c
123410+++ /dev/null
123411@@ -1,634 +0,0 @@
123412-#include "test/jemalloc_test.h"
123413-
123414-#include "jemalloc/internal/sec.h"
123415-
123416-typedef struct pai_test_allocator_s pai_test_allocator_t;
123417-struct pai_test_allocator_s {
123418-	pai_t pai;
123419-	bool alloc_fail;
123420-	size_t alloc_count;
123421-	size_t alloc_batch_count;
123422-	size_t dalloc_count;
123423-	size_t dalloc_batch_count;
123424-	/*
123425-	 * We use a simple bump allocator as the implementation.  This isn't
123426-	 * *really* correct, since we may allow expansion into a subsequent
123427-	 * allocation, but it's not like the SEC is really examining the
123428-	 * pointers it gets back; this is mostly just helpful for debugging.
123429-	 */
123430-	uintptr_t next_ptr;
123431-	size_t expand_count;
123432-	bool expand_return_value;
123433-	size_t shrink_count;
123434-	bool shrink_return_value;
123435-};
123436-
123437-static void
123438-test_sec_init(sec_t *sec, pai_t *fallback, size_t nshards, size_t max_alloc,
123439-    size_t max_bytes) {
123440-	sec_opts_t opts;
123441-	opts.nshards = 1;
123442-	opts.max_alloc = max_alloc;
123443-	opts.max_bytes = max_bytes;
123444-	/*
123445-	 * Just choose reasonable defaults for these; most tests don't care so
123446-	 * long as they're something reasonable.
123447-	 */
123448-	opts.bytes_after_flush = max_bytes / 2;
123449-	opts.batch_fill_extra = 4;
123450-
123451-	/*
123452-	 * We end up leaking this base, but that's fine; this test is
123453-	 * short-running, and SECs are arena-scoped in reality.
123454-	 */
123455-	base_t *base = base_new(TSDN_NULL, /* ind */ 123,
123456-	    &ehooks_default_extent_hooks, /* metadata_use_hooks */ true);
123457-
123458-	bool err = sec_init(TSDN_NULL, sec, base, fallback, &opts);
123459-	assert_false(err, "Unexpected initialization failure");
123460-	assert_u_ge(sec->npsizes, 0, "Zero size classes allowed for caching");
123461-}
123462-
123463-static inline edata_t *
123464-pai_test_allocator_alloc(tsdn_t *tsdn, pai_t *self, size_t size,
123465-    size_t alignment, bool zero, bool guarded, bool frequent_reuse,
123466-    bool *deferred_work_generated) {
123467-	assert(!guarded);
123468-	pai_test_allocator_t *ta = (pai_test_allocator_t *)self;
123469-	if (ta->alloc_fail) {
123470-		return NULL;
123471-	}
123472-	edata_t *edata = malloc(sizeof(edata_t));
123473-	assert_ptr_not_null(edata, "");
123474-	ta->next_ptr += alignment - 1;
123475-	edata_init(edata, /* arena_ind */ 0,
123476-	    (void *)(ta->next_ptr & ~(alignment - 1)), size,
123477-	    /* slab */ false,
123478-	    /* szind */ 0, /* sn */ 1, extent_state_active, /* zero */ zero,
123479-	    /* comitted */ true, /* ranged */ false, EXTENT_NOT_HEAD);
123480-	ta->next_ptr += size;
123481-	ta->alloc_count++;
123482-	return edata;
123483-}
123484-
123485-static inline size_t
123486-pai_test_allocator_alloc_batch(tsdn_t *tsdn, pai_t *self, size_t size,
123487-    size_t nallocs, edata_list_active_t *results,
123488-    bool *deferred_work_generated) {
123489-	pai_test_allocator_t *ta = (pai_test_allocator_t *)self;
123490-	if (ta->alloc_fail) {
123491-		return 0;
123492-	}
123493-	for (size_t i = 0; i < nallocs; i++) {
123494-		edata_t *edata = malloc(sizeof(edata_t));
123495-		assert_ptr_not_null(edata, "");
123496-		edata_init(edata, /* arena_ind */ 0,
123497-		    (void *)ta->next_ptr, size,
123498-		    /* slab */ false, /* szind */ 0, /* sn */ 1,
123499-		    extent_state_active, /* zero */ false, /* comitted */ true,
123500-		    /* ranged */ false, EXTENT_NOT_HEAD);
123501-		ta->next_ptr += size;
123502-		ta->alloc_batch_count++;
123503-		edata_list_active_append(results, edata);
123504-	}
123505-	return nallocs;
123506-}
123507-
123508-static bool
123509-pai_test_allocator_expand(tsdn_t *tsdn, pai_t *self, edata_t *edata,
123510-    size_t old_size, size_t new_size, bool zero,
123511-    bool *deferred_work_generated) {
123512-	pai_test_allocator_t *ta = (pai_test_allocator_t *)self;
123513-	ta->expand_count++;
123514-	return ta->expand_return_value;
123515-}
123516-
123517-static bool
123518-pai_test_allocator_shrink(tsdn_t *tsdn, pai_t *self, edata_t *edata,
123519-    size_t old_size, size_t new_size, bool *deferred_work_generated) {
123520-	pai_test_allocator_t *ta = (pai_test_allocator_t *)self;
123521-	ta->shrink_count++;
123522-	return ta->shrink_return_value;
123523-}
123524-
123525-static void
123526-pai_test_allocator_dalloc(tsdn_t *tsdn, pai_t *self, edata_t *edata,
123527-    bool *deferred_work_generated) {
123528-	pai_test_allocator_t *ta = (pai_test_allocator_t *)self;
123529-	ta->dalloc_count++;
123530-	free(edata);
123531-}
123532-
123533-static void
123534-pai_test_allocator_dalloc_batch(tsdn_t *tsdn, pai_t *self,
123535-    edata_list_active_t *list, bool *deferred_work_generated) {
123536-	pai_test_allocator_t *ta = (pai_test_allocator_t *)self;
123537-
123538-	edata_t *edata;
123539-	while ((edata = edata_list_active_first(list)) != NULL) {
123540-		edata_list_active_remove(list, edata);
123541-		ta->dalloc_batch_count++;
123542-		free(edata);
123543-	}
123544-}
123545-
123546-static inline void
123547-pai_test_allocator_init(pai_test_allocator_t *ta) {
123548-	ta->alloc_fail = false;
123549-	ta->alloc_count = 0;
123550-	ta->alloc_batch_count = 0;
123551-	ta->dalloc_count = 0;
123552-	ta->dalloc_batch_count = 0;
123553-	/* Just don't start the edata at 0. */
123554-	ta->next_ptr = 10 * PAGE;
123555-	ta->expand_count = 0;
123556-	ta->expand_return_value = false;
123557-	ta->shrink_count = 0;
123558-	ta->shrink_return_value = false;
123559-	ta->pai.alloc = &pai_test_allocator_alloc;
123560-	ta->pai.alloc_batch = &pai_test_allocator_alloc_batch;
123561-	ta->pai.expand = &pai_test_allocator_expand;
123562-	ta->pai.shrink = &pai_test_allocator_shrink;
123563-	ta->pai.dalloc = &pai_test_allocator_dalloc;
123564-	ta->pai.dalloc_batch = &pai_test_allocator_dalloc_batch;
123565-}
123566-
123567-TEST_BEGIN(test_reuse) {
123568-	pai_test_allocator_t ta;
123569-	pai_test_allocator_init(&ta);
123570-	sec_t sec;
123571-	/*
123572-	 * We can't use the "real" tsd, since we malloc within the test
123573-	 * allocator hooks; we'd get lock inversion crashes.  Eventually, we
123574-	 * should have a way to mock tsds, but for now just don't do any
123575-	 * lock-order checking.
123576-	 */
123577-	tsdn_t *tsdn = TSDN_NULL;
123578-	/*
123579-	 * 11 allocs apiece of 1-PAGE and 2-PAGE objects means that we should be
123580-	 * able to get to 33 pages in the cache before triggering a flush.  We
123581-	 * set the flush liimt to twice this amount, to avoid accidentally
123582-	 * triggering a flush caused by the batch-allocation down the cache fill
123583-	 * pathway disrupting ordering.
123584-	 */
123585-	enum { NALLOCS = 11 };
123586-	edata_t *one_page[NALLOCS];
123587-	edata_t *two_page[NALLOCS];
123588-	bool deferred_work_generated = false;
123589-	test_sec_init(&sec, &ta.pai, /* nshards */ 1, /* max_alloc */ 2 * PAGE,
123590-	    /* max_bytes */ 2 * (NALLOCS * PAGE + NALLOCS * 2 * PAGE));
123591-	for (int i = 0; i < NALLOCS; i++) {
123592-		one_page[i] = pai_alloc(tsdn, &sec.pai, PAGE, PAGE,
123593-		    /* zero */ false, /* guarded */ false, /* frequent_reuse */
123594-		    false, &deferred_work_generated);
123595-		expect_ptr_not_null(one_page[i], "Unexpected alloc failure");
123596-		two_page[i] = pai_alloc(tsdn, &sec.pai, 2 * PAGE, PAGE,
123597-		    /* zero */ false, /* guarded */ false, /* frequent_reuse */
123598-		    false, &deferred_work_generated);
123599-		expect_ptr_not_null(one_page[i], "Unexpected alloc failure");
123600-	}
123601-	expect_zu_eq(0, ta.alloc_count, "Should be using batch allocs");
123602-	size_t max_allocs = ta.alloc_count + ta.alloc_batch_count;
123603-	expect_zu_le(2 * NALLOCS, max_allocs,
123604-	    "Incorrect number of allocations");
123605-	expect_zu_eq(0, ta.dalloc_count,
123606-	    "Incorrect number of allocations");
123607-	/*
123608-	 * Free in a different order than we allocated, to make sure free-list
123609-	 * separation works correctly.
123610-	 */
123611-	for (int i = NALLOCS - 1; i >= 0; i--) {
123612-		pai_dalloc(tsdn, &sec.pai, one_page[i],
123613-		    &deferred_work_generated);
123614-	}
123615-	for (int i = NALLOCS - 1; i >= 0; i--) {
123616-		pai_dalloc(tsdn, &sec.pai, two_page[i],
123617-		    &deferred_work_generated);
123618-	}
123619-	expect_zu_eq(max_allocs, ta.alloc_count + ta.alloc_batch_count,
123620-	    "Incorrect number of allocations");
123621-	expect_zu_eq(0, ta.dalloc_count,
123622-	    "Incorrect number of allocations");
123623-	/*
123624-	 * Check that the n'th most recent deallocated extent is returned for
123625-	 * the n'th alloc request of a given size.
123626-	 */
123627-	for (int i = 0; i < NALLOCS; i++) {
123628-		edata_t *alloc1 = pai_alloc(tsdn, &sec.pai, PAGE, PAGE,
123629-		    /* zero */ false, /* guarded */ false, /* frequent_reuse */
123630-		    false, &deferred_work_generated);
123631-		edata_t *alloc2 = pai_alloc(tsdn, &sec.pai, 2 * PAGE, PAGE,
123632-		    /* zero */ false, /* guarded */ false, /* frequent_reuse */
123633-		    false, &deferred_work_generated);
123634-		expect_ptr_eq(one_page[i], alloc1,
123635-		    "Got unexpected allocation");
123636-		expect_ptr_eq(two_page[i], alloc2,
123637-		    "Got unexpected allocation");
123638-	}
123639-	expect_zu_eq(max_allocs, ta.alloc_count + ta.alloc_batch_count,
123640-	    "Incorrect number of allocations");
123641-	expect_zu_eq(0, ta.dalloc_count,
123642-	    "Incorrect number of allocations");
123643-}
123644-TEST_END
123645-
123646-
123647-TEST_BEGIN(test_auto_flush) {
123648-	pai_test_allocator_t ta;
123649-	pai_test_allocator_init(&ta);
123650-	sec_t sec;
123651-	/* See the note above -- we can't use the real tsd. */
123652-	tsdn_t *tsdn = TSDN_NULL;
123653-	/*
123654-	 * 10-allocs apiece of 1-PAGE and 2-PAGE objects means that we should be
123655-	 * able to get to 30 pages in the cache before triggering a flush.  The
123656-	 * choice of NALLOCS here is chosen to match the batch allocation
123657-	 * default (4 extra + 1 == 5; so 10 allocations leaves the cache exactly
123658-	 * empty, even in the presence of batch allocation on fill).
123659-	 * Eventually, once our allocation batching strategies become smarter,
123660-	 * this should change.
123661-	 */
123662-	enum { NALLOCS = 10 };
123663-	edata_t *extra_alloc;
123664-	edata_t *allocs[NALLOCS];
123665-	bool deferred_work_generated = false;
123666-	test_sec_init(&sec, &ta.pai, /* nshards */ 1, /* max_alloc */ PAGE,
123667-	    /* max_bytes */ NALLOCS * PAGE);
123668-	for (int i = 0; i < NALLOCS; i++) {
123669-		allocs[i] = pai_alloc(tsdn, &sec.pai, PAGE, PAGE,
123670-		    /* zero */ false, /* guarded */ false, /* frequent_reuse */
123671-		    false, &deferred_work_generated);
123672-		expect_ptr_not_null(allocs[i], "Unexpected alloc failure");
123673-	}
123674-	extra_alloc = pai_alloc(tsdn, &sec.pai, PAGE, PAGE, /* zero */ false,
123675-	    /* guarded */ false, /* frequent_reuse */ false,
123676-	    &deferred_work_generated);
123677-	expect_ptr_not_null(extra_alloc, "Unexpected alloc failure");
123678-	size_t max_allocs = ta.alloc_count + ta.alloc_batch_count;
123679-	expect_zu_le(NALLOCS + 1, max_allocs,
123680-	    "Incorrect number of allocations");
123681-	expect_zu_eq(0, ta.dalloc_count,
123682-	    "Incorrect number of allocations");
123683-	/* Free until the SEC is full, but should not have flushed yet. */
123684-	for (int i = 0; i < NALLOCS; i++) {
123685-		pai_dalloc(tsdn, &sec.pai, allocs[i], &deferred_work_generated);
123686-	}
123687-	expect_zu_le(NALLOCS + 1, max_allocs,
123688-	    "Incorrect number of allocations");
123689-	expect_zu_eq(0, ta.dalloc_count,
123690-	    "Incorrect number of allocations");
123691-	/*
123692-	 * Free the extra allocation; this should trigger a flush.  The internal
123693-	 * flushing logic is allowed to get complicated; for now, we rely on our
123694-	 * whitebox knowledge of the fact that the SEC flushes bins in their
123695-	 * entirety when it decides to do so, and it has only one bin active
123696-	 * right now.
123697-	 */
123698-	pai_dalloc(tsdn, &sec.pai, extra_alloc, &deferred_work_generated);
123699-	expect_zu_eq(max_allocs, ta.alloc_count + ta.alloc_batch_count,
123700-	    "Incorrect number of allocations");
123701-	expect_zu_eq(0, ta.dalloc_count,
123702-	    "Incorrect number of (non-batch) deallocations");
123703-	expect_zu_eq(NALLOCS + 1, ta.dalloc_batch_count,
123704-	    "Incorrect number of batch deallocations");
123705-}
123706-TEST_END
123707-
123708-/*
123709- * A disable and a flush are *almost* equivalent; the only difference is what
123710- * happens afterwards; disabling disallows all future caching as well.
123711- */
123712-static void
123713-do_disable_flush_test(bool is_disable) {
123714-	pai_test_allocator_t ta;
123715-	pai_test_allocator_init(&ta);
123716-	sec_t sec;
123717-	/* See the note above -- we can't use the real tsd. */
123718-	tsdn_t *tsdn = TSDN_NULL;
123719-
123720-	enum { NALLOCS = 11 };
123721-	edata_t *allocs[NALLOCS];
123722-	bool deferred_work_generated = false;
123723-	test_sec_init(&sec, &ta.pai, /* nshards */ 1, /* max_alloc */ PAGE,
123724-	    /* max_bytes */ NALLOCS * PAGE);
123725-	for (int i = 0; i < NALLOCS; i++) {
123726-		allocs[i] = pai_alloc(tsdn, &sec.pai, PAGE, PAGE,
123727-		    /* zero */ false, /* guarded */ false, /* frequent_reuse */
123728-		    false, &deferred_work_generated);
123729-		expect_ptr_not_null(allocs[i], "Unexpected alloc failure");
123730-	}
123731-	/* Free all but the last aloc. */
123732-	for (int i = 0; i < NALLOCS - 1; i++) {
123733-		pai_dalloc(tsdn, &sec.pai, allocs[i], &deferred_work_generated);
123734-	}
123735-	size_t max_allocs = ta.alloc_count + ta.alloc_batch_count;
123736-
123737-	expect_zu_le(NALLOCS, max_allocs, "Incorrect number of allocations");
123738-	expect_zu_eq(0, ta.dalloc_count,
123739-	    "Incorrect number of allocations");
123740-
123741-	if (is_disable) {
123742-		sec_disable(tsdn, &sec);
123743-	} else {
123744-		sec_flush(tsdn, &sec);
123745-	}
123746-
123747-	expect_zu_eq(max_allocs, ta.alloc_count + ta.alloc_batch_count,
123748-	    "Incorrect number of allocations");
123749-	expect_zu_eq(0, ta.dalloc_count,
123750-	    "Incorrect number of (non-batch) deallocations");
123751-	expect_zu_le(NALLOCS - 1, ta.dalloc_batch_count,
123752-	    "Incorrect number of batch deallocations");
123753-	size_t old_dalloc_batch_count = ta.dalloc_batch_count;
123754-
123755-	/*
123756-	 * If we free into a disabled SEC, it should forward to the fallback.
123757-	 * Otherwise, the SEC should accept the allocation.
123758-	 */
123759-	pai_dalloc(tsdn, &sec.pai, allocs[NALLOCS - 1],
123760-	    &deferred_work_generated);
123761-
123762-	expect_zu_eq(max_allocs, ta.alloc_count + ta.alloc_batch_count,
123763-	    "Incorrect number of allocations");
123764-	expect_zu_eq(is_disable ? 1 : 0, ta.dalloc_count,
123765-	    "Incorrect number of (non-batch) deallocations");
123766-	expect_zu_eq(old_dalloc_batch_count, ta.dalloc_batch_count,
123767-	    "Incorrect number of batch deallocations");
123768-}
123769-
123770-TEST_BEGIN(test_disable) {
123771-	do_disable_flush_test(/* is_disable */ true);
123772-}
123773-TEST_END
123774-
123775-TEST_BEGIN(test_flush) {
123776-	do_disable_flush_test(/* is_disable */ false);
123777-}
123778-TEST_END
123779-
123780-TEST_BEGIN(test_max_alloc_respected) {
123781-	pai_test_allocator_t ta;
123782-	pai_test_allocator_init(&ta);
123783-	sec_t sec;
123784-	/* See the note above -- we can't use the real tsd. */
123785-	tsdn_t *tsdn = TSDN_NULL;
123786-
123787-	size_t max_alloc = 2 * PAGE;
123788-	size_t attempted_alloc = 3 * PAGE;
123789-
123790-	bool deferred_work_generated = false;
123791-
123792-	test_sec_init(&sec, &ta.pai, /* nshards */ 1, max_alloc,
123793-	    /* max_bytes */ 1000 * PAGE);
123794-
123795-	for (size_t i = 0; i < 100; i++) {
123796-		expect_zu_eq(i, ta.alloc_count,
123797-		    "Incorrect number of allocations");
123798-		expect_zu_eq(i, ta.dalloc_count,
123799-		    "Incorrect number of deallocations");
123800-		edata_t *edata = pai_alloc(tsdn, &sec.pai, attempted_alloc,
123801-		    PAGE, /* zero */ false, /* guarded */ false,
123802-		    /* frequent_reuse */ false, &deferred_work_generated);
123803-		expect_ptr_not_null(edata, "Unexpected alloc failure");
123804-		expect_zu_eq(i + 1, ta.alloc_count,
123805-		    "Incorrect number of allocations");
123806-		expect_zu_eq(i, ta.dalloc_count,
123807-		    "Incorrect number of deallocations");
123808-		pai_dalloc(tsdn, &sec.pai, edata, &deferred_work_generated);
123809-	}
123810-}
123811-TEST_END
123812-
123813-TEST_BEGIN(test_expand_shrink_delegate) {
123814-	/*
123815-	 * Expand and shrink shouldn't affect sec state; they should just
123816-	 * delegate to the fallback PAI.
123817-	 */
123818-	pai_test_allocator_t ta;
123819-	pai_test_allocator_init(&ta);
123820-	sec_t sec;
123821-	/* See the note above -- we can't use the real tsd. */
123822-	tsdn_t *tsdn = TSDN_NULL;
123823-
123824-	bool deferred_work_generated = false;
123825-
123826-	test_sec_init(&sec, &ta.pai, /* nshards */ 1, /* max_alloc */ 10 * PAGE,
123827-	    /* max_bytes */ 1000 * PAGE);
123828-	edata_t *edata = pai_alloc(tsdn, &sec.pai, PAGE, PAGE,
123829-	    /* zero */ false, /* guarded */ false, /* frequent_reuse */ false,
123830-	    &deferred_work_generated);
123831-	expect_ptr_not_null(edata, "Unexpected alloc failure");
123832-
123833-	bool err = pai_expand(tsdn, &sec.pai, edata, PAGE, 4 * PAGE,
123834-	    /* zero */ false, &deferred_work_generated);
123835-	expect_false(err, "Unexpected expand failure");
123836-	expect_zu_eq(1, ta.expand_count, "");
123837-	ta.expand_return_value = true;
123838-	err = pai_expand(tsdn, &sec.pai, edata, 4 * PAGE, 3 * PAGE,
123839-	    /* zero */ false, &deferred_work_generated);
123840-	expect_true(err, "Unexpected expand success");
123841-	expect_zu_eq(2, ta.expand_count, "");
123842-
123843-	err = pai_shrink(tsdn, &sec.pai, edata, 4 * PAGE, 2 * PAGE,
123844-	    &deferred_work_generated);
123845-	expect_false(err, "Unexpected shrink failure");
123846-	expect_zu_eq(1, ta.shrink_count, "");
123847-	ta.shrink_return_value = true;
123848-	err = pai_shrink(tsdn, &sec.pai, edata, 2 * PAGE, PAGE,
123849-	    &deferred_work_generated);
123850-	expect_true(err, "Unexpected shrink success");
123851-	expect_zu_eq(2, ta.shrink_count, "");
123852-}
123853-TEST_END
123854-
123855-TEST_BEGIN(test_nshards_0) {
123856-	pai_test_allocator_t ta;
123857-	pai_test_allocator_init(&ta);
123858-	sec_t sec;
123859-	/* See the note above -- we can't use the real tsd. */
123860-	tsdn_t *tsdn = TSDN_NULL;
123861-	base_t *base = base_new(TSDN_NULL, /* ind */ 123,
123862-	    &ehooks_default_extent_hooks, /* metadata_use_hooks */ true);
123863-
123864-	sec_opts_t opts = SEC_OPTS_DEFAULT;
123865-	opts.nshards = 0;
123866-	sec_init(TSDN_NULL, &sec, base, &ta.pai, &opts);
123867-
123868-	bool deferred_work_generated = false;
123869-	edata_t *edata = pai_alloc(tsdn, &sec.pai, PAGE, PAGE,
123870-	    /* zero */ false, /* guarded */ false, /* frequent_reuse */ false,
123871-	    &deferred_work_generated);
123872-	pai_dalloc(tsdn, &sec.pai, edata, &deferred_work_generated);
123873-
123874-	/* Both operations should have gone directly to the fallback. */
123875-	expect_zu_eq(1, ta.alloc_count, "");
123876-	expect_zu_eq(1, ta.dalloc_count, "");
123877-}
123878-TEST_END
123879-
123880-static void
123881-expect_stats_pages(tsdn_t *tsdn, sec_t *sec, size_t npages) {
123882-	sec_stats_t stats;
123883-	/*
123884-	 * Check that the stats merging accumulates rather than overwrites by
123885-	 * putting some (made up) data there to begin with.
123886-	 */
123887-	stats.bytes = 123;
123888-	sec_stats_merge(tsdn, sec, &stats);
123889-	assert_zu_le(npages * PAGE + 123, stats.bytes, "");
123890-}
123891-
123892-TEST_BEGIN(test_stats_simple) {
123893-	pai_test_allocator_t ta;
123894-	pai_test_allocator_init(&ta);
123895-	sec_t sec;
123896-
123897-	/* See the note above -- we can't use the real tsd. */
123898-	tsdn_t *tsdn = TSDN_NULL;
123899-
123900-	enum {
123901-		NITERS = 100,
123902-		FLUSH_PAGES = 20,
123903-	};
123904-
123905-	bool deferred_work_generated = false;
123906-
123907-	test_sec_init(&sec, &ta.pai, /* nshards */ 1, /* max_alloc */ PAGE,
123908-	    /* max_bytes */ FLUSH_PAGES * PAGE);
123909-
123910-	edata_t *allocs[FLUSH_PAGES];
123911-	for (size_t i = 0; i < FLUSH_PAGES; i++) {
123912-		allocs[i] = pai_alloc(tsdn, &sec.pai, PAGE, PAGE,
123913-		    /* zero */ false, /* guarded */ false, /* frequent_reuse */
123914-		    false, &deferred_work_generated);
123915-		expect_stats_pages(tsdn, &sec, 0);
123916-	}
123917-
123918-	/* Increase and decrease, without flushing. */
123919-	for (size_t i = 0; i < NITERS; i++) {
123920-		for (size_t j = 0; j < FLUSH_PAGES / 2; j++) {
123921-			pai_dalloc(tsdn, &sec.pai, allocs[j],
123922-			    &deferred_work_generated);
123923-			expect_stats_pages(tsdn, &sec, j + 1);
123924-		}
123925-		for (size_t j = 0; j < FLUSH_PAGES / 2; j++) {
123926-			allocs[j] = pai_alloc(tsdn, &sec.pai, PAGE, PAGE,
123927-			    /* zero */ false, /* guarded */ false,
123928-			    /* frequent_reuse */ false,
123929-			    &deferred_work_generated);
123930-			expect_stats_pages(tsdn, &sec, FLUSH_PAGES / 2 - j - 1);
123931-		}
123932-	}
123933-}
123934-TEST_END
123935-
123936-TEST_BEGIN(test_stats_auto_flush) {
123937-	pai_test_allocator_t ta;
123938-	pai_test_allocator_init(&ta);
123939-	sec_t sec;
123940-
123941-	/* See the note above -- we can't use the real tsd. */
123942-	tsdn_t *tsdn = TSDN_NULL;
123943-
123944-	enum {
123945-		FLUSH_PAGES = 10,
123946-	};
123947-
123948-	test_sec_init(&sec, &ta.pai, /* nshards */ 1, /* max_alloc */ PAGE,
123949-	    /* max_bytes */ FLUSH_PAGES * PAGE);
123950-
123951-	edata_t *extra_alloc0;
123952-	edata_t *extra_alloc1;
123953-	edata_t *allocs[2 * FLUSH_PAGES];
123954-
123955-	bool deferred_work_generated = false;
123956-
123957-	extra_alloc0 = pai_alloc(tsdn, &sec.pai, PAGE, PAGE, /* zero */ false,
123958-	    /* guarded */ false, /* frequent_reuse */ false,
123959-	    &deferred_work_generated);
123960-	extra_alloc1 = pai_alloc(tsdn, &sec.pai, PAGE, PAGE, /* zero */ false,
123961-	    /* guarded */ false, /* frequent_reuse */ false,
123962-	    &deferred_work_generated);
123963-
123964-	for (size_t i = 0; i < 2 * FLUSH_PAGES; i++) {
123965-		allocs[i] = pai_alloc(tsdn, &sec.pai, PAGE, PAGE,
123966-		    /* zero */ false, /* guarded */ false, /* frequent_reuse */
123967-		    false, &deferred_work_generated);
123968-	}
123969-
123970-	for (size_t i = 0; i < FLUSH_PAGES; i++) {
123971-		pai_dalloc(tsdn, &sec.pai, allocs[i], &deferred_work_generated);
123972-	}
123973-	pai_dalloc(tsdn, &sec.pai, extra_alloc0, &deferred_work_generated);
123974-
123975-	/* Flush the remaining pages; stats should still work. */
123976-	for (size_t i = 0; i < FLUSH_PAGES; i++) {
123977-		pai_dalloc(tsdn, &sec.pai, allocs[FLUSH_PAGES + i],
123978-		    &deferred_work_generated);
123979-	}
123980-
123981-	pai_dalloc(tsdn, &sec.pai, extra_alloc1, &deferred_work_generated);
123982-
123983-	expect_stats_pages(tsdn, &sec, ta.alloc_count + ta.alloc_batch_count
123984-	    - ta.dalloc_count - ta.dalloc_batch_count);
123985-}
123986-TEST_END
123987-
123988-TEST_BEGIN(test_stats_manual_flush) {
123989-	pai_test_allocator_t ta;
123990-	pai_test_allocator_init(&ta);
123991-	sec_t sec;
123992-
123993-	/* See the note above -- we can't use the real tsd. */
123994-	tsdn_t *tsdn = TSDN_NULL;
123995-
123996-	enum {
123997-		FLUSH_PAGES = 10,
123998-	};
123999-
124000-	test_sec_init(&sec, &ta.pai, /* nshards */ 1, /* max_alloc */ PAGE,
124001-	    /* max_bytes */ FLUSH_PAGES * PAGE);
124002-
124003-	bool deferred_work_generated = false;
124004-	edata_t *allocs[FLUSH_PAGES];
124005-	for (size_t i = 0; i < FLUSH_PAGES; i++) {
124006-		allocs[i] = pai_alloc(tsdn, &sec.pai, PAGE, PAGE,
124007-		    /* zero */ false, /* guarded */ false, /* frequent_reuse */
124008-		    false, &deferred_work_generated);
124009-		expect_stats_pages(tsdn, &sec, 0);
124010-	}
124011-
124012-	/* Dalloc the first half of the allocations. */
124013-	for (size_t i = 0; i < FLUSH_PAGES / 2; i++) {
124014-		pai_dalloc(tsdn, &sec.pai, allocs[i], &deferred_work_generated);
124015-		expect_stats_pages(tsdn, &sec, i + 1);
124016-	}
124017-
124018-	sec_flush(tsdn, &sec);
124019-	expect_stats_pages(tsdn, &sec, 0);
124020-
124021-	/* Flush the remaining pages. */
124022-	for (size_t i = 0; i < FLUSH_PAGES / 2; i++) {
124023-		pai_dalloc(tsdn, &sec.pai, allocs[FLUSH_PAGES / 2 + i],
124024-		    &deferred_work_generated);
124025-		expect_stats_pages(tsdn, &sec, i + 1);
124026-	}
124027-	sec_disable(tsdn, &sec);
124028-	expect_stats_pages(tsdn, &sec, 0);
124029-}
124030-TEST_END
124031-
124032-int
124033-main(void) {
124034-	return test(
124035-	    test_reuse,
124036-	    test_auto_flush,
124037-	    test_disable,
124038-	    test_flush,
124039-	    test_max_alloc_respected,
124040-	    test_expand_shrink_delegate,
124041-	    test_nshards_0,
124042-	    test_stats_simple,
124043-	    test_stats_auto_flush,
124044-	    test_stats_manual_flush);
124045-}
124046diff --git a/jemalloc/test/unit/seq.c b/jemalloc/test/unit/seq.c
124047deleted file mode 100644
124048index 06ed683..0000000
124049--- a/jemalloc/test/unit/seq.c
124050+++ /dev/null
124051@@ -1,95 +0,0 @@
124052-#include "test/jemalloc_test.h"
124053-
124054-#include "jemalloc/internal/seq.h"
124055-
124056-typedef struct data_s data_t;
124057-struct data_s {
124058-	int arr[10];
124059-};
124060-
124061-static void
124062-set_data(data_t *data, int num) {
124063-	for (int i = 0; i < 10; i++) {
124064-		data->arr[i] = num;
124065-	}
124066-}
124067-
124068-static void
124069-expect_data(data_t *data) {
124070-	int num = data->arr[0];
124071-	for (int i = 0; i < 10; i++) {
124072-		expect_d_eq(num, data->arr[i], "Data consistency error");
124073-	}
124074-}
124075-
124076-seq_define(data_t, data)
124077-
124078-typedef struct thd_data_s thd_data_t;
124079-struct thd_data_s {
124080-	seq_data_t data;
124081-};
124082-
124083-static void *
124084-seq_reader_thd(void *arg) {
124085-	thd_data_t *thd_data = (thd_data_t *)arg;
124086-	int iter = 0;
124087-	data_t local_data;
124088-	while (iter < 1000 * 1000 - 1) {
124089-		bool success = seq_try_load_data(&local_data, &thd_data->data);
124090-		if (success) {
124091-			expect_data(&local_data);
124092-			expect_d_le(iter, local_data.arr[0],
124093-			    "Seq read went back in time.");
124094-			iter = local_data.arr[0];
124095-		}
124096-	}
124097-	return NULL;
124098-}
124099-
124100-static void *
124101-seq_writer_thd(void *arg) {
124102-	thd_data_t *thd_data = (thd_data_t *)arg;
124103-	data_t local_data;
124104-	memset(&local_data, 0, sizeof(local_data));
124105-	for (int i = 0; i < 1000 * 1000; i++) {
124106-		set_data(&local_data, i);
124107-		seq_store_data(&thd_data->data, &local_data);
124108-	}
124109-	return NULL;
124110-}
124111-
124112-TEST_BEGIN(test_seq_threaded) {
124113-	thd_data_t thd_data;
124114-	memset(&thd_data, 0, sizeof(thd_data));
124115-
124116-	thd_t reader;
124117-	thd_t writer;
124118-
124119-	thd_create(&reader, seq_reader_thd, &thd_data);
124120-	thd_create(&writer, seq_writer_thd, &thd_data);
124121-
124122-	thd_join(reader, NULL);
124123-	thd_join(writer, NULL);
124124-}
124125-TEST_END
124126-
124127-TEST_BEGIN(test_seq_simple) {
124128-	data_t data;
124129-	seq_data_t seq;
124130-	memset(&seq, 0, sizeof(seq));
124131-	for (int i = 0; i < 1000 * 1000; i++) {
124132-		set_data(&data, i);
124133-		seq_store_data(&seq, &data);
124134-		set_data(&data, 0);
124135-		bool success = seq_try_load_data(&data, &seq);
124136-		expect_b_eq(success, true, "Failed non-racing read");
124137-		expect_data(&data);
124138-	}
124139-}
124140-TEST_END
124141-
124142-int main(void) {
124143-	return test_no_reentrancy(
124144-	    test_seq_simple,
124145-	    test_seq_threaded);
124146-}
124147diff --git a/jemalloc/test/unit/size_check.c b/jemalloc/test/unit/size_check.c
124148deleted file mode 100644
124149index accdc40..0000000
124150--- a/jemalloc/test/unit/size_check.c
124151+++ /dev/null
124152@@ -1,79 +0,0 @@
124153-#include "test/jemalloc_test.h"
124154-
124155-#include "jemalloc/internal/safety_check.h"
124156-
124157-bool fake_abort_called;
124158-void fake_abort(const char *message) {
124159-	(void)message;
124160-	fake_abort_called = true;
124161-}
124162-
124163-#define SMALL_SIZE1 SC_SMALL_MAXCLASS
124164-#define SMALL_SIZE2 (SC_SMALL_MAXCLASS / 2)
124165-
124166-#define LARGE_SIZE1 SC_LARGE_MINCLASS
124167-#define LARGE_SIZE2 (LARGE_SIZE1 * 2)
124168-
124169-void *
124170-test_invalid_size_pre(size_t sz) {
124171-	safety_check_set_abort(&fake_abort);
124172-
124173-	fake_abort_called = false;
124174-	void *ptr = malloc(sz);
124175-	assert_ptr_not_null(ptr, "Unexpected failure");
124176-
124177-	return ptr;
124178-}
124179-
124180-void
124181-test_invalid_size_post(void) {
124182-	expect_true(fake_abort_called, "Safety check didn't fire");
124183-	safety_check_set_abort(NULL);
124184-}
124185-
124186-TEST_BEGIN(test_invalid_size_sdallocx) {
124187-	test_skip_if(!config_opt_size_checks);
124188-
124189-	void *ptr = test_invalid_size_pre(SMALL_SIZE1);
124190-	sdallocx(ptr, SMALL_SIZE2, 0);
124191-	test_invalid_size_post();
124192-
124193-	ptr = test_invalid_size_pre(LARGE_SIZE1);
124194-	sdallocx(ptr, LARGE_SIZE2, 0);
124195-	test_invalid_size_post();
124196-}
124197-TEST_END
124198-
124199-TEST_BEGIN(test_invalid_size_sdallocx_nonzero_flag) {
124200-	test_skip_if(!config_opt_size_checks);
124201-
124202-	void *ptr = test_invalid_size_pre(SMALL_SIZE1);
124203-	sdallocx(ptr, SMALL_SIZE2, MALLOCX_TCACHE_NONE);
124204-	test_invalid_size_post();
124205-
124206-	ptr = test_invalid_size_pre(LARGE_SIZE1);
124207-	sdallocx(ptr, LARGE_SIZE2, MALLOCX_TCACHE_NONE);
124208-	test_invalid_size_post();
124209-}
124210-TEST_END
124211-
124212-TEST_BEGIN(test_invalid_size_sdallocx_noflags) {
124213-	test_skip_if(!config_opt_size_checks);
124214-
124215-	void *ptr = test_invalid_size_pre(SMALL_SIZE1);
124216-	je_sdallocx_noflags(ptr, SMALL_SIZE2);
124217-	test_invalid_size_post();
124218-
124219-	ptr = test_invalid_size_pre(LARGE_SIZE1);
124220-	je_sdallocx_noflags(ptr, LARGE_SIZE2);
124221-	test_invalid_size_post();
124222-}
124223-TEST_END
124224-
124225-int
124226-main(void) {
124227-	return test(
124228-	    test_invalid_size_sdallocx,
124229-	    test_invalid_size_sdallocx_nonzero_flag,
124230-	    test_invalid_size_sdallocx_noflags);
124231-}
124232diff --git a/jemalloc/test/unit/size_check.sh b/jemalloc/test/unit/size_check.sh
124233deleted file mode 100644
124234index 352d110..0000000
124235--- a/jemalloc/test/unit/size_check.sh
124236+++ /dev/null
124237@@ -1,5 +0,0 @@
124238-#!/bin/sh
124239-
124240-if [ "x${enable_prof}" = "x1" ] ; then
124241-  export MALLOC_CONF="prof:false"
124242-fi
124243diff --git a/jemalloc/test/unit/size_classes.c b/jemalloc/test/unit/size_classes.c
124244deleted file mode 100644
124245index c70eb59..0000000
124246--- a/jemalloc/test/unit/size_classes.c
124247+++ /dev/null
124248@@ -1,188 +0,0 @@
124249-#include "test/jemalloc_test.h"
124250-
124251-static size_t
124252-get_max_size_class(void) {
124253-	unsigned nlextents;
124254-	size_t mib[4];
124255-	size_t sz, miblen, max_size_class;
124256-
124257-	sz = sizeof(unsigned);
124258-	expect_d_eq(mallctl("arenas.nlextents", (void *)&nlextents, &sz, NULL,
124259-	    0), 0, "Unexpected mallctl() error");
124260-
124261-	miblen = sizeof(mib) / sizeof(size_t);
124262-	expect_d_eq(mallctlnametomib("arenas.lextent.0.size", mib, &miblen), 0,
124263-	    "Unexpected mallctlnametomib() error");
124264-	mib[2] = nlextents - 1;
124265-
124266-	sz = sizeof(size_t);
124267-	expect_d_eq(mallctlbymib(mib, miblen, (void *)&max_size_class, &sz,
124268-	    NULL, 0), 0, "Unexpected mallctlbymib() error");
124269-
124270-	return max_size_class;
124271-}
124272-
124273-TEST_BEGIN(test_size_classes) {
124274-	size_t size_class, max_size_class;
124275-	szind_t index, max_index;
124276-
124277-	max_size_class = get_max_size_class();
124278-	max_index = sz_size2index(max_size_class);
124279-
124280-	for (index = 0, size_class = sz_index2size(index); index < max_index ||
124281-	    size_class < max_size_class; index++, size_class =
124282-	    sz_index2size(index)) {
124283-		expect_true(index < max_index,
124284-		    "Loop conditionals should be equivalent; index=%u, "
124285-		    "size_class=%zu (%#zx)", index, size_class, size_class);
124286-		expect_true(size_class < max_size_class,
124287-		    "Loop conditionals should be equivalent; index=%u, "
124288-		    "size_class=%zu (%#zx)", index, size_class, size_class);
124289-
124290-		expect_u_eq(index, sz_size2index(size_class),
124291-		    "sz_size2index() does not reverse sz_index2size(): index=%u"
124292-		    " --> size_class=%zu --> index=%u --> size_class=%zu",
124293-		    index, size_class, sz_size2index(size_class),
124294-		    sz_index2size(sz_size2index(size_class)));
124295-		expect_zu_eq(size_class,
124296-		    sz_index2size(sz_size2index(size_class)),
124297-		    "sz_index2size() does not reverse sz_size2index(): index=%u"
124298-		    " --> size_class=%zu --> index=%u --> size_class=%zu",
124299-		    index, size_class, sz_size2index(size_class),
124300-		    sz_index2size(sz_size2index(size_class)));
124301-
124302-		expect_u_eq(index+1, sz_size2index(size_class+1),
124303-		    "Next size_class does not round up properly");
124304-
124305-		expect_zu_eq(size_class, (index > 0) ?
124306-		    sz_s2u(sz_index2size(index-1)+1) : sz_s2u(1),
124307-		    "sz_s2u() does not round up to size class");
124308-		expect_zu_eq(size_class, sz_s2u(size_class-1),
124309-		    "sz_s2u() does not round up to size class");
124310-		expect_zu_eq(size_class, sz_s2u(size_class),
124311-		    "sz_s2u() does not compute same size class");
124312-		expect_zu_eq(sz_s2u(size_class+1), sz_index2size(index+1),
124313-		    "sz_s2u() does not round up to next size class");
124314-	}
124315-
124316-	expect_u_eq(index, sz_size2index(sz_index2size(index)),
124317-	    "sz_size2index() does not reverse sz_index2size()");
124318-	expect_zu_eq(max_size_class, sz_index2size(
124319-	    sz_size2index(max_size_class)),
124320-	    "sz_index2size() does not reverse sz_size2index()");
124321-
124322-	expect_zu_eq(size_class, sz_s2u(sz_index2size(index-1)+1),
124323-	    "sz_s2u() does not round up to size class");
124324-	expect_zu_eq(size_class, sz_s2u(size_class-1),
124325-	    "sz_s2u() does not round up to size class");
124326-	expect_zu_eq(size_class, sz_s2u(size_class),
124327-	    "sz_s2u() does not compute same size class");
124328-}
124329-TEST_END
124330-
124331-TEST_BEGIN(test_psize_classes) {
124332-	size_t size_class, max_psz;
124333-	pszind_t pind, max_pind;
124334-
124335-	max_psz = get_max_size_class() + PAGE;
124336-	max_pind = sz_psz2ind(max_psz);
124337-
124338-	for (pind = 0, size_class = sz_pind2sz(pind);
124339-	    pind < max_pind || size_class < max_psz;
124340-	    pind++, size_class = sz_pind2sz(pind)) {
124341-		expect_true(pind < max_pind,
124342-		    "Loop conditionals should be equivalent; pind=%u, "
124343-		    "size_class=%zu (%#zx)", pind, size_class, size_class);
124344-		expect_true(size_class < max_psz,
124345-		    "Loop conditionals should be equivalent; pind=%u, "
124346-		    "size_class=%zu (%#zx)", pind, size_class, size_class);
124347-
124348-		expect_u_eq(pind, sz_psz2ind(size_class),
124349-		    "sz_psz2ind() does not reverse sz_pind2sz(): pind=%u -->"
124350-		    " size_class=%zu --> pind=%u --> size_class=%zu", pind,
124351-		    size_class, sz_psz2ind(size_class),
124352-		    sz_pind2sz(sz_psz2ind(size_class)));
124353-		expect_zu_eq(size_class, sz_pind2sz(sz_psz2ind(size_class)),
124354-		    "sz_pind2sz() does not reverse sz_psz2ind(): pind=%u -->"
124355-		    " size_class=%zu --> pind=%u --> size_class=%zu", pind,
124356-		    size_class, sz_psz2ind(size_class),
124357-		    sz_pind2sz(sz_psz2ind(size_class)));
124358-
124359-		if (size_class == SC_LARGE_MAXCLASS) {
124360-			expect_u_eq(SC_NPSIZES, sz_psz2ind(size_class + 1),
124361-			    "Next size_class does not round up properly");
124362-		} else {
124363-			expect_u_eq(pind + 1, sz_psz2ind(size_class + 1),
124364-			    "Next size_class does not round up properly");
124365-		}
124366-
124367-		expect_zu_eq(size_class, (pind > 0) ?
124368-		    sz_psz2u(sz_pind2sz(pind-1)+1) : sz_psz2u(1),
124369-		    "sz_psz2u() does not round up to size class");
124370-		expect_zu_eq(size_class, sz_psz2u(size_class-1),
124371-		    "sz_psz2u() does not round up to size class");
124372-		expect_zu_eq(size_class, sz_psz2u(size_class),
124373-		    "sz_psz2u() does not compute same size class");
124374-		expect_zu_eq(sz_psz2u(size_class+1), sz_pind2sz(pind+1),
124375-		    "sz_psz2u() does not round up to next size class");
124376-	}
124377-
124378-	expect_u_eq(pind, sz_psz2ind(sz_pind2sz(pind)),
124379-	    "sz_psz2ind() does not reverse sz_pind2sz()");
124380-	expect_zu_eq(max_psz, sz_pind2sz(sz_psz2ind(max_psz)),
124381-	    "sz_pind2sz() does not reverse sz_psz2ind()");
124382-
124383-	expect_zu_eq(size_class, sz_psz2u(sz_pind2sz(pind-1)+1),
124384-	    "sz_psz2u() does not round up to size class");
124385-	expect_zu_eq(size_class, sz_psz2u(size_class-1),
124386-	    "sz_psz2u() does not round up to size class");
124387-	expect_zu_eq(size_class, sz_psz2u(size_class),
124388-	    "sz_psz2u() does not compute same size class");
124389-}
124390-TEST_END
124391-
124392-TEST_BEGIN(test_overflow) {
124393-	size_t max_size_class, max_psz;
124394-
124395-	max_size_class = get_max_size_class();
124396-	max_psz = max_size_class + PAGE;
124397-
124398-	expect_u_eq(sz_size2index(max_size_class+1), SC_NSIZES,
124399-	    "sz_size2index() should return NSIZES on overflow");
124400-	expect_u_eq(sz_size2index(ZU(PTRDIFF_MAX)+1), SC_NSIZES,
124401-	    "sz_size2index() should return NSIZES on overflow");
124402-	expect_u_eq(sz_size2index(SIZE_T_MAX), SC_NSIZES,
124403-	    "sz_size2index() should return NSIZES on overflow");
124404-
124405-	expect_zu_eq(sz_s2u(max_size_class+1), 0,
124406-	    "sz_s2u() should return 0 for unsupported size");
124407-	expect_zu_eq(sz_s2u(ZU(PTRDIFF_MAX)+1), 0,
124408-	    "sz_s2u() should return 0 for unsupported size");
124409-	expect_zu_eq(sz_s2u(SIZE_T_MAX), 0,
124410-	    "sz_s2u() should return 0 on overflow");
124411-
124412-	expect_u_eq(sz_psz2ind(max_size_class+1), SC_NPSIZES,
124413-	    "sz_psz2ind() should return NPSIZES on overflow");
124414-	expect_u_eq(sz_psz2ind(ZU(PTRDIFF_MAX)+1), SC_NPSIZES,
124415-	    "sz_psz2ind() should return NPSIZES on overflow");
124416-	expect_u_eq(sz_psz2ind(SIZE_T_MAX), SC_NPSIZES,
124417-	    "sz_psz2ind() should return NPSIZES on overflow");
124418-
124419-	expect_zu_eq(sz_psz2u(max_size_class+1), max_psz,
124420-	    "sz_psz2u() should return (LARGE_MAXCLASS + PAGE) for unsupported"
124421-	    " size");
124422-	expect_zu_eq(sz_psz2u(ZU(PTRDIFF_MAX)+1), max_psz,
124423-	    "sz_psz2u() should return (LARGE_MAXCLASS + PAGE) for unsupported "
124424-	    "size");
124425-	expect_zu_eq(sz_psz2u(SIZE_T_MAX), max_psz,
124426-	    "sz_psz2u() should return (LARGE_MAXCLASS + PAGE) on overflow");
124427-}
124428-TEST_END
124429-
124430-int
124431-main(void) {
124432-	return test(
124433-	    test_size_classes,
124434-	    test_psize_classes,
124435-	    test_overflow);
124436-}
124437diff --git a/jemalloc/test/unit/slab.c b/jemalloc/test/unit/slab.c
124438deleted file mode 100644
124439index 70fc5c7..0000000
124440--- a/jemalloc/test/unit/slab.c
124441+++ /dev/null
124442@@ -1,39 +0,0 @@
124443-#include "test/jemalloc_test.h"
124444-
124445-#define INVALID_ARENA_IND ((1U << MALLOCX_ARENA_BITS) - 1)
124446-
124447-TEST_BEGIN(test_arena_slab_regind) {
124448-	szind_t binind;
124449-
124450-	for (binind = 0; binind < SC_NBINS; binind++) {
124451-		size_t regind;
124452-		edata_t slab;
124453-		const bin_info_t *bin_info = &bin_infos[binind];
124454-		edata_init(&slab, INVALID_ARENA_IND,
124455-		    mallocx(bin_info->slab_size, MALLOCX_LG_ALIGN(LG_PAGE)),
124456-		    bin_info->slab_size, true,
124457-		    binind, 0, extent_state_active, false, true, EXTENT_PAI_PAC,
124458-		    EXTENT_NOT_HEAD);
124459-		expect_ptr_not_null(edata_addr_get(&slab),
124460-		    "Unexpected malloc() failure");
124461-		arena_dalloc_bin_locked_info_t dalloc_info;
124462-		arena_dalloc_bin_locked_begin(&dalloc_info, binind);
124463-		for (regind = 0; regind < bin_info->nregs; regind++) {
124464-			void *reg = (void *)((uintptr_t)edata_addr_get(&slab) +
124465-			    (bin_info->reg_size * regind));
124466-			expect_zu_eq(arena_slab_regind(&dalloc_info, binind,
124467-			    &slab, reg),
124468-			    regind,
124469-			    "Incorrect region index computed for size %zu",
124470-			    bin_info->reg_size);
124471-		}
124472-		free(edata_addr_get(&slab));
124473-	}
124474-}
124475-TEST_END
124476-
124477-int
124478-main(void) {
124479-	return test(
124480-	    test_arena_slab_regind);
124481-}
124482diff --git a/jemalloc/test/unit/smoothstep.c b/jemalloc/test/unit/smoothstep.c
124483deleted file mode 100644
124484index 588c9f4..0000000
124485--- a/jemalloc/test/unit/smoothstep.c
124486+++ /dev/null
124487@@ -1,102 +0,0 @@
124488-#include "test/jemalloc_test.h"
124489-
124490-static const uint64_t smoothstep_tab[] = {
124491-#define STEP(step, h, x, y)			\
124492-	h,
124493-	SMOOTHSTEP
124494-#undef STEP
124495-};
124496-
124497-TEST_BEGIN(test_smoothstep_integral) {
124498-	uint64_t sum, min, max;
124499-	unsigned i;
124500-
124501-	/*
124502-	 * The integral of smoothstep in the [0..1] range equals 1/2.  Verify
124503-	 * that the fixed point representation's integral is no more than
124504-	 * rounding error distant from 1/2.  Regarding rounding, each table
124505-	 * element is rounded down to the nearest fixed point value, so the
124506-	 * integral may be off by as much as SMOOTHSTEP_NSTEPS ulps.
124507-	 */
124508-	sum = 0;
124509-	for (i = 0; i < SMOOTHSTEP_NSTEPS; i++) {
124510-		sum += smoothstep_tab[i];
124511-	}
124512-
124513-	max = (KQU(1) << (SMOOTHSTEP_BFP-1)) * (SMOOTHSTEP_NSTEPS+1);
124514-	min = max - SMOOTHSTEP_NSTEPS;
124515-
124516-	expect_u64_ge(sum, min,
124517-	    "Integral too small, even accounting for truncation");
124518-	expect_u64_le(sum, max, "Integral exceeds 1/2");
124519-	if (false) {
124520-		malloc_printf("%"FMTu64" ulps under 1/2 (limit %d)\n",
124521-		    max - sum, SMOOTHSTEP_NSTEPS);
124522-	}
124523-}
124524-TEST_END
124525-
124526-TEST_BEGIN(test_smoothstep_monotonic) {
124527-	uint64_t prev_h;
124528-	unsigned i;
124529-
124530-	/*
124531-	 * The smoothstep function is monotonic in [0..1], i.e. its slope is
124532-	 * non-negative.  In practice we want to parametrize table generation
124533-	 * such that piecewise slope is greater than zero, but do not require
124534-	 * that here.
124535-	 */
124536-	prev_h = 0;
124537-	for (i = 0; i < SMOOTHSTEP_NSTEPS; i++) {
124538-		uint64_t h = smoothstep_tab[i];
124539-		expect_u64_ge(h, prev_h, "Piecewise non-monotonic, i=%u", i);
124540-		prev_h = h;
124541-	}
124542-	expect_u64_eq(smoothstep_tab[SMOOTHSTEP_NSTEPS-1],
124543-	    (KQU(1) << SMOOTHSTEP_BFP), "Last step must equal 1");
124544-}
124545-TEST_END
124546-
124547-TEST_BEGIN(test_smoothstep_slope) {
124548-	uint64_t prev_h, prev_delta;
124549-	unsigned i;
124550-
124551-	/*
124552-	 * The smoothstep slope strictly increases until x=0.5, and then
124553-	 * strictly decreases until x=1.0.  Verify the slightly weaker
124554-	 * requirement of monotonicity, so that inadequate table precision does
124555-	 * not cause false test failures.
124556-	 */
124557-	prev_h = 0;
124558-	prev_delta = 0;
124559-	for (i = 0; i < SMOOTHSTEP_NSTEPS / 2 + SMOOTHSTEP_NSTEPS % 2; i++) {
124560-		uint64_t h = smoothstep_tab[i];
124561-		uint64_t delta = h - prev_h;
124562-		expect_u64_ge(delta, prev_delta,
124563-		    "Slope must monotonically increase in 0.0 <= x <= 0.5, "
124564-		    "i=%u", i);
124565-		prev_h = h;
124566-		prev_delta = delta;
124567-	}
124568-
124569-	prev_h = KQU(1) << SMOOTHSTEP_BFP;
124570-	prev_delta = 0;
124571-	for (i = SMOOTHSTEP_NSTEPS-1; i >= SMOOTHSTEP_NSTEPS / 2; i--) {
124572-		uint64_t h = smoothstep_tab[i];
124573-		uint64_t delta = prev_h - h;
124574-		expect_u64_ge(delta, prev_delta,
124575-		    "Slope must monotonically decrease in 0.5 <= x <= 1.0, "
124576-		    "i=%u", i);
124577-		prev_h = h;
124578-		prev_delta = delta;
124579-	}
124580-}
124581-TEST_END
124582-
124583-int
124584-main(void) {
124585-	return test(
124586-	    test_smoothstep_integral,
124587-	    test_smoothstep_monotonic,
124588-	    test_smoothstep_slope);
124589-}
124590diff --git a/jemalloc/test/unit/spin.c b/jemalloc/test/unit/spin.c
124591deleted file mode 100644
124592index b965f74..0000000
124593--- a/jemalloc/test/unit/spin.c
124594+++ /dev/null
124595@@ -1,18 +0,0 @@
124596-#include "test/jemalloc_test.h"
124597-
124598-#include "jemalloc/internal/spin.h"
124599-
124600-TEST_BEGIN(test_spin) {
124601-	spin_t spinner = SPIN_INITIALIZER;
124602-
124603-	for (unsigned i = 0; i < 100; i++) {
124604-		spin_adaptive(&spinner);
124605-	}
124606-}
124607-TEST_END
124608-
124609-int
124610-main(void) {
124611-	return test(
124612-	    test_spin);
124613-}
124614diff --git a/jemalloc/test/unit/stats.c b/jemalloc/test/unit/stats.c
124615deleted file mode 100644
124616index bbdbd18..0000000
124617--- a/jemalloc/test/unit/stats.c
124618+++ /dev/null
124619@@ -1,431 +0,0 @@
124620-#include "test/jemalloc_test.h"
124621-
124622-#define STRINGIFY_HELPER(x) #x
124623-#define STRINGIFY(x) STRINGIFY_HELPER(x)
124624-
124625-TEST_BEGIN(test_stats_summary) {
124626-	size_t sz, allocated, active, resident, mapped;
124627-	int expected = config_stats ? 0 : ENOENT;
124628-
124629-	sz = sizeof(size_t);
124630-	expect_d_eq(mallctl("stats.allocated", (void *)&allocated, &sz, NULL,
124631-	    0), expected, "Unexpected mallctl() result");
124632-	expect_d_eq(mallctl("stats.active", (void *)&active, &sz, NULL, 0),
124633-	    expected, "Unexpected mallctl() result");
124634-	expect_d_eq(mallctl("stats.resident", (void *)&resident, &sz, NULL, 0),
124635-	    expected, "Unexpected mallctl() result");
124636-	expect_d_eq(mallctl("stats.mapped", (void *)&mapped, &sz, NULL, 0),
124637-	    expected, "Unexpected mallctl() result");
124638-
124639-	if (config_stats) {
124640-		expect_zu_le(allocated, active,
124641-		    "allocated should be no larger than active");
124642-		expect_zu_lt(active, resident,
124643-		    "active should be less than resident");
124644-		expect_zu_lt(active, mapped,
124645-		    "active should be less than mapped");
124646-	}
124647-}
124648-TEST_END
124649-
124650-TEST_BEGIN(test_stats_large) {
124651-	void *p;
124652-	uint64_t epoch;
124653-	size_t allocated;
124654-	uint64_t nmalloc, ndalloc, nrequests;
124655-	size_t sz;
124656-	int expected = config_stats ? 0 : ENOENT;
124657-
124658-	p = mallocx(SC_SMALL_MAXCLASS + 1, MALLOCX_ARENA(0));
124659-	expect_ptr_not_null(p, "Unexpected mallocx() failure");
124660-
124661-	expect_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)),
124662-	    0, "Unexpected mallctl() failure");
124663-
124664-	sz = sizeof(size_t);
124665-	expect_d_eq(mallctl("stats.arenas.0.large.allocated",
124666-	    (void *)&allocated, &sz, NULL, 0), expected,
124667-	    "Unexpected mallctl() result");
124668-	sz = sizeof(uint64_t);
124669-	expect_d_eq(mallctl("stats.arenas.0.large.nmalloc", (void *)&nmalloc,
124670-	    &sz, NULL, 0), expected, "Unexpected mallctl() result");
124671-	expect_d_eq(mallctl("stats.arenas.0.large.ndalloc", (void *)&ndalloc,
124672-	    &sz, NULL, 0), expected, "Unexpected mallctl() result");
124673-	expect_d_eq(mallctl("stats.arenas.0.large.nrequests",
124674-	    (void *)&nrequests, &sz, NULL, 0), expected,
124675-	    "Unexpected mallctl() result");
124676-
124677-	if (config_stats) {
124678-		expect_zu_gt(allocated, 0,
124679-		    "allocated should be greater than zero");
124680-		expect_u64_ge(nmalloc, ndalloc,
124681-		    "nmalloc should be at least as large as ndalloc");
124682-		expect_u64_le(nmalloc, nrequests,
124683-		    "nmalloc should no larger than nrequests");
124684-	}
124685-
124686-	dallocx(p, 0);
124687-}
124688-TEST_END
124689-
124690-TEST_BEGIN(test_stats_arenas_summary) {
124691-	void *little, *large;
124692-	uint64_t epoch;
124693-	size_t sz;
124694-	int expected = config_stats ? 0 : ENOENT;
124695-	size_t mapped;
124696-	uint64_t dirty_npurge, dirty_nmadvise, dirty_purged;
124697-	uint64_t muzzy_npurge, muzzy_nmadvise, muzzy_purged;
124698-
124699-	little = mallocx(SC_SMALL_MAXCLASS, MALLOCX_ARENA(0));
124700-	expect_ptr_not_null(little, "Unexpected mallocx() failure");
124701-	large = mallocx((1U << SC_LG_LARGE_MINCLASS),
124702-	    MALLOCX_ARENA(0));
124703-	expect_ptr_not_null(large, "Unexpected mallocx() failure");
124704-
124705-	dallocx(little, 0);
124706-	dallocx(large, 0);
124707-
124708-	expect_d_eq(mallctl("thread.tcache.flush", NULL, NULL, NULL, 0),
124709-	    opt_tcache ? 0 : EFAULT, "Unexpected mallctl() result");
124710-	expect_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0,
124711-	    "Unexpected mallctl() failure");
124712-
124713-	expect_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)),
124714-	    0, "Unexpected mallctl() failure");
124715-
124716-	sz = sizeof(size_t);
124717-	expect_d_eq(mallctl("stats.arenas.0.mapped", (void *)&mapped, &sz, NULL,
124718-	    0), expected, "Unexepected mallctl() result");
124719-
124720-	sz = sizeof(uint64_t);
124721-	expect_d_eq(mallctl("stats.arenas.0.dirty_npurge",
124722-	    (void *)&dirty_npurge, &sz, NULL, 0), expected,
124723-	    "Unexepected mallctl() result");
124724-	expect_d_eq(mallctl("stats.arenas.0.dirty_nmadvise",
124725-	    (void *)&dirty_nmadvise, &sz, NULL, 0), expected,
124726-	    "Unexepected mallctl() result");
124727-	expect_d_eq(mallctl("stats.arenas.0.dirty_purged",
124728-	    (void *)&dirty_purged, &sz, NULL, 0), expected,
124729-	    "Unexepected mallctl() result");
124730-	expect_d_eq(mallctl("stats.arenas.0.muzzy_npurge",
124731-	    (void *)&muzzy_npurge, &sz, NULL, 0), expected,
124732-	    "Unexepected mallctl() result");
124733-	expect_d_eq(mallctl("stats.arenas.0.muzzy_nmadvise",
124734-	    (void *)&muzzy_nmadvise, &sz, NULL, 0), expected,
124735-	    "Unexepected mallctl() result");
124736-	expect_d_eq(mallctl("stats.arenas.0.muzzy_purged",
124737-	    (void *)&muzzy_purged, &sz, NULL, 0), expected,
124738-	    "Unexepected mallctl() result");
124739-
124740-	if (config_stats) {
124741-		if (!is_background_thread_enabled() && !opt_hpa) {
124742-			expect_u64_gt(dirty_npurge + muzzy_npurge, 0,
124743-			    "At least one purge should have occurred");
124744-		}
124745-		expect_u64_le(dirty_nmadvise, dirty_purged,
124746-		    "dirty_nmadvise should be no greater than dirty_purged");
124747-		expect_u64_le(muzzy_nmadvise, muzzy_purged,
124748-		    "muzzy_nmadvise should be no greater than muzzy_purged");
124749-	}
124750-}
124751-TEST_END
124752-
124753-void *
124754-thd_start(void *arg) {
124755-	return NULL;
124756-}
124757-
124758-static void
124759-no_lazy_lock(void) {
124760-	thd_t thd;
124761-
124762-	thd_create(&thd, thd_start, NULL);
124763-	thd_join(thd, NULL);
124764-}
124765-
124766-TEST_BEGIN(test_stats_arenas_small) {
124767-	void *p;
124768-	size_t sz, allocated;
124769-	uint64_t epoch, nmalloc, ndalloc, nrequests;
124770-	int expected = config_stats ? 0 : ENOENT;
124771-
124772-	no_lazy_lock(); /* Lazy locking would dodge tcache testing. */
124773-
124774-	p = mallocx(SC_SMALL_MAXCLASS, MALLOCX_ARENA(0));
124775-	expect_ptr_not_null(p, "Unexpected mallocx() failure");
124776-
124777-	expect_d_eq(mallctl("thread.tcache.flush", NULL, NULL, NULL, 0),
124778-	    opt_tcache ? 0 : EFAULT, "Unexpected mallctl() result");
124779-
124780-	expect_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)),
124781-	    0, "Unexpected mallctl() failure");
124782-
124783-	sz = sizeof(size_t);
124784-	expect_d_eq(mallctl("stats.arenas.0.small.allocated",
124785-	    (void *)&allocated, &sz, NULL, 0), expected,
124786-	    "Unexpected mallctl() result");
124787-	sz = sizeof(uint64_t);
124788-	expect_d_eq(mallctl("stats.arenas.0.small.nmalloc", (void *)&nmalloc,
124789-	    &sz, NULL, 0), expected, "Unexpected mallctl() result");
124790-	expect_d_eq(mallctl("stats.arenas.0.small.ndalloc", (void *)&ndalloc,
124791-	    &sz, NULL, 0), expected, "Unexpected mallctl() result");
124792-	expect_d_eq(mallctl("stats.arenas.0.small.nrequests",
124793-	    (void *)&nrequests, &sz, NULL, 0), expected,
124794-	    "Unexpected mallctl() result");
124795-
124796-	if (config_stats) {
124797-		expect_zu_gt(allocated, 0,
124798-		    "allocated should be greater than zero");
124799-		expect_u64_gt(nmalloc, 0,
124800-		    "nmalloc should be no greater than zero");
124801-		expect_u64_ge(nmalloc, ndalloc,
124802-		    "nmalloc should be at least as large as ndalloc");
124803-		expect_u64_gt(nrequests, 0,
124804-		    "nrequests should be greater than zero");
124805-	}
124806-
124807-	dallocx(p, 0);
124808-}
124809-TEST_END
124810-
124811-TEST_BEGIN(test_stats_arenas_large) {
124812-	void *p;
124813-	size_t sz, allocated;
124814-	uint64_t epoch, nmalloc, ndalloc;
124815-	int expected = config_stats ? 0 : ENOENT;
124816-
124817-	p = mallocx((1U << SC_LG_LARGE_MINCLASS), MALLOCX_ARENA(0));
124818-	expect_ptr_not_null(p, "Unexpected mallocx() failure");
124819-
124820-	expect_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)),
124821-	    0, "Unexpected mallctl() failure");
124822-
124823-	sz = sizeof(size_t);
124824-	expect_d_eq(mallctl("stats.arenas.0.large.allocated",
124825-	    (void *)&allocated, &sz, NULL, 0), expected,
124826-	    "Unexpected mallctl() result");
124827-	sz = sizeof(uint64_t);
124828-	expect_d_eq(mallctl("stats.arenas.0.large.nmalloc", (void *)&nmalloc,
124829-	    &sz, NULL, 0), expected, "Unexpected mallctl() result");
124830-	expect_d_eq(mallctl("stats.arenas.0.large.ndalloc", (void *)&ndalloc,
124831-	    &sz, NULL, 0), expected, "Unexpected mallctl() result");
124832-
124833-	if (config_stats) {
124834-		expect_zu_gt(allocated, 0,
124835-		    "allocated should be greater than zero");
124836-		expect_u64_gt(nmalloc, 0,
124837-		    "nmalloc should be greater than zero");
124838-		expect_u64_ge(nmalloc, ndalloc,
124839-		    "nmalloc should be at least as large as ndalloc");
124840-	}
124841-
124842-	dallocx(p, 0);
124843-}
124844-TEST_END
124845-
124846-static void
124847-gen_mallctl_str(char *cmd, char *name, unsigned arena_ind) {
124848-	sprintf(cmd, "stats.arenas.%u.bins.0.%s", arena_ind, name);
124849-}
124850-
124851-TEST_BEGIN(test_stats_arenas_bins) {
124852-	void *p;
124853-	size_t sz, curslabs, curregs, nonfull_slabs;
124854-	uint64_t epoch, nmalloc, ndalloc, nrequests, nfills, nflushes;
124855-	uint64_t nslabs, nreslabs;
124856-	int expected = config_stats ? 0 : ENOENT;
124857-
124858-	/* Make sure allocation below isn't satisfied by tcache. */
124859-	expect_d_eq(mallctl("thread.tcache.flush", NULL, NULL, NULL, 0),
124860-	    opt_tcache ? 0 : EFAULT, "Unexpected mallctl() result");
124861-
124862-	unsigned arena_ind, old_arena_ind;
124863-	sz = sizeof(unsigned);
124864-	expect_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz, NULL, 0),
124865-	    0, "Arena creation failure");
124866-	sz = sizeof(arena_ind);
124867-	expect_d_eq(mallctl("thread.arena", (void *)&old_arena_ind, &sz,
124868-	    (void *)&arena_ind, sizeof(arena_ind)), 0,
124869-	    "Unexpected mallctl() failure");
124870-
124871-	p = malloc(bin_infos[0].reg_size);
124872-	expect_ptr_not_null(p, "Unexpected malloc() failure");
124873-
124874-	expect_d_eq(mallctl("thread.tcache.flush", NULL, NULL, NULL, 0),
124875-	    opt_tcache ? 0 : EFAULT, "Unexpected mallctl() result");
124876-
124877-	expect_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)),
124878-	    0, "Unexpected mallctl() failure");
124879-
124880-	char cmd[128];
124881-	sz = sizeof(uint64_t);
124882-	gen_mallctl_str(cmd, "nmalloc", arena_ind);
124883-	expect_d_eq(mallctl(cmd, (void *)&nmalloc, &sz, NULL, 0), expected,
124884-	    "Unexpected mallctl() result");
124885-	gen_mallctl_str(cmd, "ndalloc", arena_ind);
124886-	expect_d_eq(mallctl(cmd, (void *)&ndalloc, &sz, NULL, 0), expected,
124887-	    "Unexpected mallctl() result");
124888-	gen_mallctl_str(cmd, "nrequests", arena_ind);
124889-	expect_d_eq(mallctl(cmd, (void *)&nrequests, &sz, NULL, 0), expected,
124890-	    "Unexpected mallctl() result");
124891-	sz = sizeof(size_t);
124892-	gen_mallctl_str(cmd, "curregs", arena_ind);
124893-	expect_d_eq(mallctl(cmd, (void *)&curregs, &sz, NULL, 0), expected,
124894-	    "Unexpected mallctl() result");
124895-
124896-	sz = sizeof(uint64_t);
124897-	gen_mallctl_str(cmd, "nfills", arena_ind);
124898-	expect_d_eq(mallctl(cmd, (void *)&nfills, &sz, NULL, 0), expected,
124899-	    "Unexpected mallctl() result");
124900-	gen_mallctl_str(cmd, "nflushes", arena_ind);
124901-	expect_d_eq(mallctl(cmd, (void *)&nflushes, &sz, NULL, 0), expected,
124902-	    "Unexpected mallctl() result");
124903-
124904-	gen_mallctl_str(cmd, "nslabs", arena_ind);
124905-	expect_d_eq(mallctl(cmd, (void *)&nslabs, &sz, NULL, 0), expected,
124906-	    "Unexpected mallctl() result");
124907-	gen_mallctl_str(cmd, "nreslabs", arena_ind);
124908-	expect_d_eq(mallctl(cmd, (void *)&nreslabs, &sz, NULL, 0), expected,
124909-	    "Unexpected mallctl() result");
124910-	sz = sizeof(size_t);
124911-	gen_mallctl_str(cmd, "curslabs", arena_ind);
124912-	expect_d_eq(mallctl(cmd, (void *)&curslabs, &sz, NULL, 0), expected,
124913-	    "Unexpected mallctl() result");
124914-	gen_mallctl_str(cmd, "nonfull_slabs", arena_ind);
124915-	expect_d_eq(mallctl(cmd, (void *)&nonfull_slabs, &sz, NULL, 0),
124916-	    expected, "Unexpected mallctl() result");
124917-
124918-	if (config_stats) {
124919-		expect_u64_gt(nmalloc, 0,
124920-		    "nmalloc should be greater than zero");
124921-		expect_u64_ge(nmalloc, ndalloc,
124922-		    "nmalloc should be at least as large as ndalloc");
124923-		expect_u64_gt(nrequests, 0,
124924-		    "nrequests should be greater than zero");
124925-		expect_zu_gt(curregs, 0,
124926-		    "allocated should be greater than zero");
124927-		if (opt_tcache) {
124928-			expect_u64_gt(nfills, 0,
124929-			    "At least one fill should have occurred");
124930-			expect_u64_gt(nflushes, 0,
124931-			    "At least one flush should have occurred");
124932-		}
124933-		expect_u64_gt(nslabs, 0,
124934-		    "At least one slab should have been allocated");
124935-		expect_zu_gt(curslabs, 0,
124936-		    "At least one slab should be currently allocated");
124937-		expect_zu_eq(nonfull_slabs, 0,
124938-		    "slabs_nonfull should be empty");
124939-	}
124940-
124941-	dallocx(p, 0);
124942-}
124943-TEST_END
124944-
124945-TEST_BEGIN(test_stats_arenas_lextents) {
124946-	void *p;
124947-	uint64_t epoch, nmalloc, ndalloc;
124948-	size_t curlextents, sz, hsize;
124949-	int expected = config_stats ? 0 : ENOENT;
124950-
124951-	sz = sizeof(size_t);
124952-	expect_d_eq(mallctl("arenas.lextent.0.size", (void *)&hsize, &sz, NULL,
124953-	    0), 0, "Unexpected mallctl() failure");
124954-
124955-	p = mallocx(hsize, MALLOCX_ARENA(0));
124956-	expect_ptr_not_null(p, "Unexpected mallocx() failure");
124957-
124958-	expect_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)),
124959-	    0, "Unexpected mallctl() failure");
124960-
124961-	sz = sizeof(uint64_t);
124962-	expect_d_eq(mallctl("stats.arenas.0.lextents.0.nmalloc",
124963-	    (void *)&nmalloc, &sz, NULL, 0), expected,
124964-	    "Unexpected mallctl() result");
124965-	expect_d_eq(mallctl("stats.arenas.0.lextents.0.ndalloc",
124966-	    (void *)&ndalloc, &sz, NULL, 0), expected,
124967-	    "Unexpected mallctl() result");
124968-	sz = sizeof(size_t);
124969-	expect_d_eq(mallctl("stats.arenas.0.lextents.0.curlextents",
124970-	    (void *)&curlextents, &sz, NULL, 0), expected,
124971-	    "Unexpected mallctl() result");
124972-
124973-	if (config_stats) {
124974-		expect_u64_gt(nmalloc, 0,
124975-		    "nmalloc should be greater than zero");
124976-		expect_u64_ge(nmalloc, ndalloc,
124977-		    "nmalloc should be at least as large as ndalloc");
124978-		expect_u64_gt(curlextents, 0,
124979-		    "At least one extent should be currently allocated");
124980-	}
124981-
124982-	dallocx(p, 0);
124983-}
124984-TEST_END
124985-
124986-static void
124987-test_tcache_bytes_for_usize(size_t usize) {
124988-	uint64_t epoch;
124989-	size_t tcache_bytes, tcache_stashed_bytes;
124990-	size_t sz = sizeof(tcache_bytes);
124991-
124992-	void *ptr = mallocx(usize, 0);
124993-
124994-	expect_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)),
124995-	    0, "Unexpected mallctl() failure");
124996-	assert_d_eq(mallctl(
124997-	    "stats.arenas." STRINGIFY(MALLCTL_ARENAS_ALL) ".tcache_bytes",
124998-	    &tcache_bytes, &sz, NULL, 0), 0, "Unexpected mallctl failure");
124999-	assert_d_eq(mallctl(
125000-	    "stats.arenas." STRINGIFY(MALLCTL_ARENAS_ALL)
125001-	    ".tcache_stashed_bytes", &tcache_stashed_bytes, &sz, NULL, 0), 0,
125002-	    "Unexpected mallctl failure");
125003-	size_t tcache_bytes_before = tcache_bytes + tcache_stashed_bytes;
125004-	dallocx(ptr, 0);
125005-
125006-	expect_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)),
125007-	    0, "Unexpected mallctl() failure");
125008-	assert_d_eq(mallctl(
125009-	    "stats.arenas." STRINGIFY(MALLCTL_ARENAS_ALL) ".tcache_bytes",
125010-	    &tcache_bytes, &sz, NULL, 0), 0, "Unexpected mallctl failure");
125011-	assert_d_eq(mallctl(
125012-	    "stats.arenas." STRINGIFY(MALLCTL_ARENAS_ALL)
125013-	    ".tcache_stashed_bytes", &tcache_stashed_bytes, &sz, NULL, 0), 0,
125014-	    "Unexpected mallctl failure");
125015-	size_t tcache_bytes_after = tcache_bytes + tcache_stashed_bytes;
125016-	assert_zu_eq(tcache_bytes_after - tcache_bytes_before,
125017-	    usize, "Incorrectly attributed a free");
125018-}
125019-
125020-TEST_BEGIN(test_stats_tcache_bytes_small) {
125021-	test_skip_if(!config_stats);
125022-	test_skip_if(!opt_tcache);
125023-	test_skip_if(opt_tcache_max < SC_SMALL_MAXCLASS);
125024-
125025-	test_tcache_bytes_for_usize(SC_SMALL_MAXCLASS);
125026-}
125027-TEST_END
125028-
125029-TEST_BEGIN(test_stats_tcache_bytes_large) {
125030-	test_skip_if(!config_stats);
125031-	test_skip_if(!opt_tcache);
125032-	test_skip_if(opt_tcache_max < SC_LARGE_MINCLASS);
125033-
125034-	test_tcache_bytes_for_usize(SC_LARGE_MINCLASS);
125035-}
125036-TEST_END
125037-
125038-int
125039-main(void) {
125040-	return test_no_reentrancy(
125041-	    test_stats_summary,
125042-	    test_stats_large,
125043-	    test_stats_arenas_summary,
125044-	    test_stats_arenas_small,
125045-	    test_stats_arenas_large,
125046-	    test_stats_arenas_bins,
125047-	    test_stats_arenas_lextents,
125048-	    test_stats_tcache_bytes_small,
125049-	    test_stats_tcache_bytes_large);
125050-}
125051diff --git a/jemalloc/test/unit/stats_print.c b/jemalloc/test/unit/stats_print.c
125052deleted file mode 100644
125053index 3b31775..0000000
125054--- a/jemalloc/test/unit/stats_print.c
125055+++ /dev/null
125056@@ -1,999 +0,0 @@
125057-#include "test/jemalloc_test.h"
125058-
125059-#include "jemalloc/internal/util.h"
125060-
125061-typedef enum {
125062-	TOKEN_TYPE_NONE,
125063-	TOKEN_TYPE_ERROR,
125064-	TOKEN_TYPE_EOI,
125065-	TOKEN_TYPE_NULL,
125066-	TOKEN_TYPE_FALSE,
125067-	TOKEN_TYPE_TRUE,
125068-	TOKEN_TYPE_LBRACKET,
125069-	TOKEN_TYPE_RBRACKET,
125070-	TOKEN_TYPE_LBRACE,
125071-	TOKEN_TYPE_RBRACE,
125072-	TOKEN_TYPE_COLON,
125073-	TOKEN_TYPE_COMMA,
125074-	TOKEN_TYPE_STRING,
125075-	TOKEN_TYPE_NUMBER
125076-} token_type_t;
125077-
125078-typedef struct parser_s parser_t;
125079-typedef struct {
125080-	parser_t	*parser;
125081-	token_type_t	token_type;
125082-	size_t		pos;
125083-	size_t		len;
125084-	size_t		line;
125085-	size_t		col;
125086-} token_t;
125087-
125088-struct parser_s {
125089-	bool verbose;
125090-	char	*buf; /* '\0'-terminated. */
125091-	size_t	len; /* Number of characters preceding '\0' in buf. */
125092-	size_t	pos;
125093-	size_t	line;
125094-	size_t	col;
125095-	token_t	token;
125096-};
125097-
125098-static void
125099-token_init(token_t *token, parser_t *parser, token_type_t token_type,
125100-    size_t pos, size_t len, size_t line, size_t col) {
125101-	token->parser = parser;
125102-	token->token_type = token_type;
125103-	token->pos = pos;
125104-	token->len = len;
125105-	token->line = line;
125106-	token->col = col;
125107-}
125108-
125109-static void
125110-token_error(token_t *token) {
125111-	if (!token->parser->verbose) {
125112-		return;
125113-	}
125114-	switch (token->token_type) {
125115-	case TOKEN_TYPE_NONE:
125116-		not_reached();
125117-	case TOKEN_TYPE_ERROR:
125118-		malloc_printf("%zu:%zu: Unexpected character in token: ",
125119-		    token->line, token->col);
125120-		break;
125121-	default:
125122-		malloc_printf("%zu:%zu: Unexpected token: ", token->line,
125123-		    token->col);
125124-		break;
125125-	}
125126-	UNUSED ssize_t err = malloc_write_fd(STDERR_FILENO,
125127-	    &token->parser->buf[token->pos], token->len);
125128-	malloc_printf("\n");
125129-}
125130-
125131-static void
125132-parser_init(parser_t *parser, bool verbose) {
125133-	parser->verbose = verbose;
125134-	parser->buf = NULL;
125135-	parser->len = 0;
125136-	parser->pos = 0;
125137-	parser->line = 1;
125138-	parser->col = 0;
125139-}
125140-
125141-static void
125142-parser_fini(parser_t *parser) {
125143-	if (parser->buf != NULL) {
125144-		dallocx(parser->buf, MALLOCX_TCACHE_NONE);
125145-	}
125146-}
125147-
125148-static bool
125149-parser_append(parser_t *parser, const char *str) {
125150-	size_t len = strlen(str);
125151-	char *buf = (parser->buf == NULL) ? mallocx(len + 1,
125152-	    MALLOCX_TCACHE_NONE) : rallocx(parser->buf, parser->len + len + 1,
125153-	    MALLOCX_TCACHE_NONE);
125154-	if (buf == NULL) {
125155-		return true;
125156-	}
125157-	memcpy(&buf[parser->len], str, len + 1);
125158-	parser->buf = buf;
125159-	parser->len += len;
125160-	return false;
125161-}
125162-
125163-static bool
125164-parser_tokenize(parser_t *parser) {
125165-	enum {
125166-		STATE_START,
125167-		STATE_EOI,
125168-		STATE_N, STATE_NU, STATE_NUL, STATE_NULL,
125169-		STATE_F, STATE_FA, STATE_FAL, STATE_FALS, STATE_FALSE,
125170-		STATE_T, STATE_TR, STATE_TRU, STATE_TRUE,
125171-		STATE_LBRACKET,
125172-		STATE_RBRACKET,
125173-		STATE_LBRACE,
125174-		STATE_RBRACE,
125175-		STATE_COLON,
125176-		STATE_COMMA,
125177-		STATE_CHARS,
125178-		STATE_CHAR_ESCAPE,
125179-		STATE_CHAR_U, STATE_CHAR_UD, STATE_CHAR_UDD, STATE_CHAR_UDDD,
125180-		STATE_STRING,
125181-		STATE_MINUS,
125182-		STATE_LEADING_ZERO,
125183-		STATE_DIGITS,
125184-		STATE_DECIMAL,
125185-		STATE_FRAC_DIGITS,
125186-		STATE_EXP,
125187-		STATE_EXP_SIGN,
125188-		STATE_EXP_DIGITS,
125189-		STATE_ACCEPT
125190-	} state = STATE_START;
125191-	size_t token_pos JEMALLOC_CC_SILENCE_INIT(0);
125192-	size_t token_line JEMALLOC_CC_SILENCE_INIT(1);
125193-	size_t token_col JEMALLOC_CC_SILENCE_INIT(0);
125194-
125195-	expect_zu_le(parser->pos, parser->len,
125196-	    "Position is past end of buffer");
125197-
125198-	while (state != STATE_ACCEPT) {
125199-		char c = parser->buf[parser->pos];
125200-
125201-		switch (state) {
125202-		case STATE_START:
125203-			token_pos = parser->pos;
125204-			token_line = parser->line;
125205-			token_col = parser->col;
125206-			switch (c) {
125207-			case ' ': case '\b': case '\n': case '\r': case '\t':
125208-				break;
125209-			case '\0':
125210-				state = STATE_EOI;
125211-				break;
125212-			case 'n':
125213-				state = STATE_N;
125214-				break;
125215-			case 'f':
125216-				state = STATE_F;
125217-				break;
125218-			case 't':
125219-				state = STATE_T;
125220-				break;
125221-			case '[':
125222-				state = STATE_LBRACKET;
125223-				break;
125224-			case ']':
125225-				state = STATE_RBRACKET;
125226-				break;
125227-			case '{':
125228-				state = STATE_LBRACE;
125229-				break;
125230-			case '}':
125231-				state = STATE_RBRACE;
125232-				break;
125233-			case ':':
125234-				state = STATE_COLON;
125235-				break;
125236-			case ',':
125237-				state = STATE_COMMA;
125238-				break;
125239-			case '"':
125240-				state = STATE_CHARS;
125241-				break;
125242-			case '-':
125243-				state = STATE_MINUS;
125244-				break;
125245-			case '0':
125246-				state = STATE_LEADING_ZERO;
125247-				break;
125248-			case '1': case '2': case '3': case '4':
125249-			case '5': case '6': case '7': case '8': case '9':
125250-				state = STATE_DIGITS;
125251-				break;
125252-			default:
125253-				token_init(&parser->token, parser,
125254-				    TOKEN_TYPE_ERROR, token_pos, parser->pos + 1
125255-				    - token_pos, token_line, token_col);
125256-				return true;
125257-			}
125258-			break;
125259-		case STATE_EOI:
125260-			token_init(&parser->token, parser,
125261-			    TOKEN_TYPE_EOI, token_pos, parser->pos -
125262-			    token_pos, token_line, token_col);
125263-			state = STATE_ACCEPT;
125264-			break;
125265-		case STATE_N:
125266-			switch (c) {
125267-			case 'u':
125268-				state = STATE_NU;
125269-				break;
125270-			default:
125271-				token_init(&parser->token, parser,
125272-				    TOKEN_TYPE_ERROR, token_pos, parser->pos + 1
125273-				    - token_pos, token_line, token_col);
125274-				return true;
125275-			}
125276-			break;
125277-		case STATE_NU:
125278-			switch (c) {
125279-			case 'l':
125280-				state = STATE_NUL;
125281-				break;
125282-			default:
125283-				token_init(&parser->token, parser,
125284-				    TOKEN_TYPE_ERROR, token_pos, parser->pos + 1
125285-				    - token_pos, token_line, token_col);
125286-				return true;
125287-			}
125288-			break;
125289-		case STATE_NUL:
125290-			switch (c) {
125291-			case 'l':
125292-				state = STATE_NULL;
125293-				break;
125294-			default:
125295-				token_init(&parser->token, parser,
125296-				    TOKEN_TYPE_ERROR, token_pos, parser->pos + 1
125297-				    - token_pos, token_line, token_col);
125298-				return true;
125299-			}
125300-			break;
125301-		case STATE_NULL:
125302-			switch (c) {
125303-			case ' ': case '\b': case '\n': case '\r': case '\t':
125304-			case '\0':
125305-			case '[': case ']': case '{': case '}': case ':':
125306-			case ',':
125307-				break;
125308-			default:
125309-				token_init(&parser->token, parser,
125310-				    TOKEN_TYPE_ERROR, token_pos, parser->pos + 1
125311-				    - token_pos, token_line, token_col);
125312-				return true;
125313-			}
125314-			token_init(&parser->token, parser, TOKEN_TYPE_NULL,
125315-			    token_pos, parser->pos - token_pos, token_line,
125316-			    token_col);
125317-			state = STATE_ACCEPT;
125318-			break;
125319-		case STATE_F:
125320-			switch (c) {
125321-			case 'a':
125322-				state = STATE_FA;
125323-				break;
125324-			default:
125325-				token_init(&parser->token, parser,
125326-				    TOKEN_TYPE_ERROR, token_pos, parser->pos + 1
125327-				    - token_pos, token_line, token_col);
125328-				return true;
125329-			}
125330-			break;
125331-		case STATE_FA:
125332-			switch (c) {
125333-			case 'l':
125334-				state = STATE_FAL;
125335-				break;
125336-			default:
125337-				token_init(&parser->token, parser,
125338-				    TOKEN_TYPE_ERROR, token_pos, parser->pos + 1
125339-				    - token_pos, token_line, token_col);
125340-				return true;
125341-			}
125342-			break;
125343-		case STATE_FAL:
125344-			switch (c) {
125345-			case 's':
125346-				state = STATE_FALS;
125347-				break;
125348-			default:
125349-				token_init(&parser->token, parser,
125350-				    TOKEN_TYPE_ERROR, token_pos, parser->pos + 1
125351-				    - token_pos, token_line, token_col);
125352-				return true;
125353-			}
125354-			break;
125355-		case STATE_FALS:
125356-			switch (c) {
125357-			case 'e':
125358-				state = STATE_FALSE;
125359-				break;
125360-			default:
125361-				token_init(&parser->token, parser,
125362-				    TOKEN_TYPE_ERROR, token_pos, parser->pos + 1
125363-				    - token_pos, token_line, token_col);
125364-				return true;
125365-			}
125366-			break;
125367-		case STATE_FALSE:
125368-			switch (c) {
125369-			case ' ': case '\b': case '\n': case '\r': case '\t':
125370-			case '\0':
125371-			case '[': case ']': case '{': case '}': case ':':
125372-			case ',':
125373-				break;
125374-			default:
125375-				token_init(&parser->token, parser,
125376-				    TOKEN_TYPE_ERROR, token_pos, parser->pos + 1
125377-				    - token_pos, token_line, token_col);
125378-				return true;
125379-			}
125380-			token_init(&parser->token, parser,
125381-			    TOKEN_TYPE_FALSE, token_pos, parser->pos -
125382-			    token_pos, token_line, token_col);
125383-			state = STATE_ACCEPT;
125384-			break;
125385-		case STATE_T:
125386-			switch (c) {
125387-			case 'r':
125388-				state = STATE_TR;
125389-				break;
125390-			default:
125391-				token_init(&parser->token, parser,
125392-				    TOKEN_TYPE_ERROR, token_pos, parser->pos + 1
125393-				    - token_pos, token_line, token_col);
125394-				return true;
125395-			}
125396-			break;
125397-		case STATE_TR:
125398-			switch (c) {
125399-			case 'u':
125400-				state = STATE_TRU;
125401-				break;
125402-			default:
125403-				token_init(&parser->token, parser,
125404-				    TOKEN_TYPE_ERROR, token_pos, parser->pos + 1
125405-				    - token_pos, token_line, token_col);
125406-				return true;
125407-			}
125408-			break;
125409-		case STATE_TRU:
125410-			switch (c) {
125411-			case 'e':
125412-				state = STATE_TRUE;
125413-				break;
125414-			default:
125415-				token_init(&parser->token, parser,
125416-				    TOKEN_TYPE_ERROR, token_pos, parser->pos + 1
125417-				    - token_pos, token_line, token_col);
125418-				return true;
125419-			}
125420-			break;
125421-		case STATE_TRUE:
125422-			switch (c) {
125423-			case ' ': case '\b': case '\n': case '\r': case '\t':
125424-			case '\0':
125425-			case '[': case ']': case '{': case '}': case ':':
125426-			case ',':
125427-				break;
125428-			default:
125429-				token_init(&parser->token, parser,
125430-				    TOKEN_TYPE_ERROR, token_pos, parser->pos + 1
125431-				    - token_pos, token_line, token_col);
125432-				return true;
125433-			}
125434-			token_init(&parser->token, parser, TOKEN_TYPE_TRUE,
125435-			    token_pos, parser->pos - token_pos, token_line,
125436-			    token_col);
125437-			state = STATE_ACCEPT;
125438-			break;
125439-		case STATE_LBRACKET:
125440-			token_init(&parser->token, parser, TOKEN_TYPE_LBRACKET,
125441-			    token_pos, parser->pos - token_pos, token_line,
125442-			    token_col);
125443-			state = STATE_ACCEPT;
125444-			break;
125445-		case STATE_RBRACKET:
125446-			token_init(&parser->token, parser, TOKEN_TYPE_RBRACKET,
125447-			    token_pos, parser->pos - token_pos, token_line,
125448-			    token_col);
125449-			state = STATE_ACCEPT;
125450-			break;
125451-		case STATE_LBRACE:
125452-			token_init(&parser->token, parser, TOKEN_TYPE_LBRACE,
125453-			    token_pos, parser->pos - token_pos, token_line,
125454-			    token_col);
125455-			state = STATE_ACCEPT;
125456-			break;
125457-		case STATE_RBRACE:
125458-			token_init(&parser->token, parser, TOKEN_TYPE_RBRACE,
125459-			    token_pos, parser->pos - token_pos, token_line,
125460-			    token_col);
125461-			state = STATE_ACCEPT;
125462-			break;
125463-		case STATE_COLON:
125464-			token_init(&parser->token, parser, TOKEN_TYPE_COLON,
125465-			    token_pos, parser->pos - token_pos, token_line,
125466-			    token_col);
125467-			state = STATE_ACCEPT;
125468-			break;
125469-		case STATE_COMMA:
125470-			token_init(&parser->token, parser, TOKEN_TYPE_COMMA,
125471-			    token_pos, parser->pos - token_pos, token_line,
125472-			    token_col);
125473-			state = STATE_ACCEPT;
125474-			break;
125475-		case STATE_CHARS:
125476-			switch (c) {
125477-			case '\\':
125478-				state = STATE_CHAR_ESCAPE;
125479-				break;
125480-			case '"':
125481-				state = STATE_STRING;
125482-				break;
125483-			case 0x00: case 0x01: case 0x02: case 0x03: case 0x04:
125484-			case 0x05: case 0x06: case 0x07: case 0x08: case 0x09:
125485-			case 0x0a: case 0x0b: case 0x0c: case 0x0d: case 0x0e:
125486-			case 0x0f: case 0x10: case 0x11: case 0x12: case 0x13:
125487-			case 0x14: case 0x15: case 0x16: case 0x17: case 0x18:
125488-			case 0x19: case 0x1a: case 0x1b: case 0x1c: case 0x1d:
125489-			case 0x1e: case 0x1f:
125490-				token_init(&parser->token, parser,
125491-				    TOKEN_TYPE_ERROR, token_pos, parser->pos + 1
125492-				    - token_pos, token_line, token_col);
125493-				return true;
125494-			default:
125495-				break;
125496-			}
125497-			break;
125498-		case STATE_CHAR_ESCAPE:
125499-			switch (c) {
125500-			case '"': case '\\': case '/': case 'b': case 'n':
125501-			case 'r': case 't':
125502-				state = STATE_CHARS;
125503-				break;
125504-			case 'u':
125505-				state = STATE_CHAR_U;
125506-				break;
125507-			default:
125508-				token_init(&parser->token, parser,
125509-				    TOKEN_TYPE_ERROR, token_pos, parser->pos + 1
125510-				    - token_pos, token_line, token_col);
125511-				return true;
125512-			}
125513-			break;
125514-		case STATE_CHAR_U:
125515-			switch (c) {
125516-			case '0': case '1': case '2': case '3': case '4':
125517-			case '5': case '6': case '7': case '8': case '9':
125518-			case 'a': case 'b': case 'c': case 'd': case 'e':
125519-			case 'f':
125520-			case 'A': case 'B': case 'C': case 'D': case 'E':
125521-			case 'F':
125522-				state = STATE_CHAR_UD;
125523-				break;
125524-			default:
125525-				token_init(&parser->token, parser,
125526-				    TOKEN_TYPE_ERROR, token_pos, parser->pos + 1
125527-				    - token_pos, token_line, token_col);
125528-				return true;
125529-			}
125530-			break;
125531-		case STATE_CHAR_UD:
125532-			switch (c) {
125533-			case '0': case '1': case '2': case '3': case '4':
125534-			case '5': case '6': case '7': case '8': case '9':
125535-			case 'a': case 'b': case 'c': case 'd': case 'e':
125536-			case 'f':
125537-			case 'A': case 'B': case 'C': case 'D': case 'E':
125538-			case 'F':
125539-				state = STATE_CHAR_UDD;
125540-				break;
125541-			default:
125542-				token_init(&parser->token, parser,
125543-				    TOKEN_TYPE_ERROR, token_pos, parser->pos + 1
125544-				    - token_pos, token_line, token_col);
125545-				return true;
125546-			}
125547-			break;
125548-		case STATE_CHAR_UDD:
125549-			switch (c) {
125550-			case '0': case '1': case '2': case '3': case '4':
125551-			case '5': case '6': case '7': case '8': case '9':
125552-			case 'a': case 'b': case 'c': case 'd': case 'e':
125553-			case 'f':
125554-			case 'A': case 'B': case 'C': case 'D': case 'E':
125555-			case 'F':
125556-				state = STATE_CHAR_UDDD;
125557-				break;
125558-			default:
125559-				token_init(&parser->token, parser,
125560-				    TOKEN_TYPE_ERROR, token_pos, parser->pos + 1
125561-				    - token_pos, token_line, token_col);
125562-				return true;
125563-			}
125564-			break;
125565-		case STATE_CHAR_UDDD:
125566-			switch (c) {
125567-			case '0': case '1': case '2': case '3': case '4':
125568-			case '5': case '6': case '7': case '8': case '9':
125569-			case 'a': case 'b': case 'c': case 'd': case 'e':
125570-			case 'f':
125571-			case 'A': case 'B': case 'C': case 'D': case 'E':
125572-			case 'F':
125573-				state = STATE_CHARS;
125574-				break;
125575-			default:
125576-				token_init(&parser->token, parser,
125577-				    TOKEN_TYPE_ERROR, token_pos, parser->pos + 1
125578-				    - token_pos, token_line, token_col);
125579-				return true;
125580-			}
125581-			break;
125582-		case STATE_STRING:
125583-			token_init(&parser->token, parser, TOKEN_TYPE_STRING,
125584-			    token_pos, parser->pos - token_pos, token_line,
125585-			    token_col);
125586-			state = STATE_ACCEPT;
125587-			break;
125588-		case STATE_MINUS:
125589-			switch (c) {
125590-			case '0':
125591-				state = STATE_LEADING_ZERO;
125592-				break;
125593-			case '1': case '2': case '3': case '4':
125594-			case '5': case '6': case '7': case '8': case '9':
125595-				state = STATE_DIGITS;
125596-				break;
125597-			default:
125598-				token_init(&parser->token, parser,
125599-				    TOKEN_TYPE_ERROR, token_pos, parser->pos + 1
125600-				    - token_pos, token_line, token_col);
125601-				return true;
125602-			}
125603-			break;
125604-		case STATE_LEADING_ZERO:
125605-			switch (c) {
125606-			case '.':
125607-				state = STATE_DECIMAL;
125608-				break;
125609-			default:
125610-				token_init(&parser->token, parser,
125611-				    TOKEN_TYPE_NUMBER, token_pos, parser->pos -
125612-				    token_pos, token_line, token_col);
125613-				state = STATE_ACCEPT;
125614-				break;
125615-			}
125616-			break;
125617-		case STATE_DIGITS:
125618-			switch (c) {
125619-			case '0': case '1': case '2': case '3': case '4':
125620-			case '5': case '6': case '7': case '8': case '9':
125621-				break;
125622-			case '.':
125623-				state = STATE_DECIMAL;
125624-				break;
125625-			default:
125626-				token_init(&parser->token, parser,
125627-				    TOKEN_TYPE_NUMBER, token_pos, parser->pos -
125628-				    token_pos, token_line, token_col);
125629-				state = STATE_ACCEPT;
125630-				break;
125631-			}
125632-			break;
125633-		case STATE_DECIMAL:
125634-			switch (c) {
125635-			case '0': case '1': case '2': case '3': case '4':
125636-			case '5': case '6': case '7': case '8': case '9':
125637-				state = STATE_FRAC_DIGITS;
125638-				break;
125639-			default:
125640-				token_init(&parser->token, parser,
125641-				    TOKEN_TYPE_ERROR, token_pos, parser->pos + 1
125642-				    - token_pos, token_line, token_col);
125643-				return true;
125644-			}
125645-			break;
125646-		case STATE_FRAC_DIGITS:
125647-			switch (c) {
125648-			case '0': case '1': case '2': case '3': case '4':
125649-			case '5': case '6': case '7': case '8': case '9':
125650-				break;
125651-			case 'e': case 'E':
125652-				state = STATE_EXP;
125653-				break;
125654-			default:
125655-				token_init(&parser->token, parser,
125656-				    TOKEN_TYPE_NUMBER, token_pos, parser->pos -
125657-				    token_pos, token_line, token_col);
125658-				state = STATE_ACCEPT;
125659-				break;
125660-			}
125661-			break;
125662-		case STATE_EXP:
125663-			switch (c) {
125664-			case '-': case '+':
125665-				state = STATE_EXP_SIGN;
125666-				break;
125667-			case '0': case '1': case '2': case '3': case '4':
125668-			case '5': case '6': case '7': case '8': case '9':
125669-				state = STATE_EXP_DIGITS;
125670-				break;
125671-			default:
125672-				token_init(&parser->token, parser,
125673-				    TOKEN_TYPE_ERROR, token_pos, parser->pos + 1
125674-				    - token_pos, token_line, token_col);
125675-				return true;
125676-			}
125677-			break;
125678-		case STATE_EXP_SIGN:
125679-			switch (c) {
125680-			case '0': case '1': case '2': case '3': case '4':
125681-			case '5': case '6': case '7': case '8': case '9':
125682-				state = STATE_EXP_DIGITS;
125683-				break;
125684-			default:
125685-				token_init(&parser->token, parser,
125686-				    TOKEN_TYPE_ERROR, token_pos, parser->pos + 1
125687-				    - token_pos, token_line, token_col);
125688-				return true;
125689-			}
125690-			break;
125691-		case STATE_EXP_DIGITS:
125692-			switch (c) {
125693-			case '0': case '1': case '2': case '3': case '4':
125694-			case '5': case '6': case '7': case '8': case '9':
125695-				break;
125696-			default:
125697-				token_init(&parser->token, parser,
125698-				    TOKEN_TYPE_NUMBER, token_pos, parser->pos -
125699-				    token_pos, token_line, token_col);
125700-				state = STATE_ACCEPT;
125701-				break;
125702-			}
125703-			break;
125704-		default:
125705-			not_reached();
125706-		}
125707-
125708-		if (state != STATE_ACCEPT) {
125709-			if (c == '\n') {
125710-				parser->line++;
125711-				parser->col = 0;
125712-			} else {
125713-				parser->col++;
125714-			}
125715-			parser->pos++;
125716-		}
125717-	}
125718-	return false;
125719-}
125720-
125721-static bool	parser_parse_array(parser_t *parser);
125722-static bool	parser_parse_object(parser_t *parser);
125723-
125724-static bool
125725-parser_parse_value(parser_t *parser) {
125726-	switch (parser->token.token_type) {
125727-	case TOKEN_TYPE_NULL:
125728-	case TOKEN_TYPE_FALSE:
125729-	case TOKEN_TYPE_TRUE:
125730-	case TOKEN_TYPE_STRING:
125731-	case TOKEN_TYPE_NUMBER:
125732-		return false;
125733-	case TOKEN_TYPE_LBRACE:
125734-		return parser_parse_object(parser);
125735-	case TOKEN_TYPE_LBRACKET:
125736-		return parser_parse_array(parser);
125737-	default:
125738-		return true;
125739-	}
125740-	not_reached();
125741-}
125742-
125743-static bool
125744-parser_parse_pair(parser_t *parser) {
125745-	expect_d_eq(parser->token.token_type, TOKEN_TYPE_STRING,
125746-	    "Pair should start with string");
125747-	if (parser_tokenize(parser)) {
125748-		return true;
125749-	}
125750-	switch (parser->token.token_type) {
125751-	case TOKEN_TYPE_COLON:
125752-		if (parser_tokenize(parser)) {
125753-			return true;
125754-		}
125755-		return parser_parse_value(parser);
125756-	default:
125757-		return true;
125758-	}
125759-}
125760-
125761-static bool
125762-parser_parse_values(parser_t *parser) {
125763-	if (parser_parse_value(parser)) {
125764-		return true;
125765-	}
125766-
125767-	while (true) {
125768-		if (parser_tokenize(parser)) {
125769-			return true;
125770-		}
125771-		switch (parser->token.token_type) {
125772-		case TOKEN_TYPE_COMMA:
125773-			if (parser_tokenize(parser)) {
125774-				return true;
125775-			}
125776-			if (parser_parse_value(parser)) {
125777-				return true;
125778-			}
125779-			break;
125780-		case TOKEN_TYPE_RBRACKET:
125781-			return false;
125782-		default:
125783-			return true;
125784-		}
125785-	}
125786-}
125787-
125788-static bool
125789-parser_parse_array(parser_t *parser) {
125790-	expect_d_eq(parser->token.token_type, TOKEN_TYPE_LBRACKET,
125791-	    "Array should start with [");
125792-	if (parser_tokenize(parser)) {
125793-		return true;
125794-	}
125795-	switch (parser->token.token_type) {
125796-	case TOKEN_TYPE_RBRACKET:
125797-		return false;
125798-	default:
125799-		return parser_parse_values(parser);
125800-	}
125801-	not_reached();
125802-}
125803-
125804-static bool
125805-parser_parse_pairs(parser_t *parser) {
125806-	expect_d_eq(parser->token.token_type, TOKEN_TYPE_STRING,
125807-	    "Object should start with string");
125808-	if (parser_parse_pair(parser)) {
125809-		return true;
125810-	}
125811-
125812-	while (true) {
125813-		if (parser_tokenize(parser)) {
125814-			return true;
125815-		}
125816-		switch (parser->token.token_type) {
125817-		case TOKEN_TYPE_COMMA:
125818-			if (parser_tokenize(parser)) {
125819-				return true;
125820-			}
125821-			switch (parser->token.token_type) {
125822-			case TOKEN_TYPE_STRING:
125823-				if (parser_parse_pair(parser)) {
125824-					return true;
125825-				}
125826-				break;
125827-			default:
125828-				return true;
125829-			}
125830-			break;
125831-		case TOKEN_TYPE_RBRACE:
125832-			return false;
125833-		default:
125834-			return true;
125835-		}
125836-	}
125837-}
125838-
125839-static bool
125840-parser_parse_object(parser_t *parser) {
125841-	expect_d_eq(parser->token.token_type, TOKEN_TYPE_LBRACE,
125842-	    "Object should start with {");
125843-	if (parser_tokenize(parser)) {
125844-		return true;
125845-	}
125846-	switch (parser->token.token_type) {
125847-	case TOKEN_TYPE_STRING:
125848-		return parser_parse_pairs(parser);
125849-	case TOKEN_TYPE_RBRACE:
125850-		return false;
125851-	default:
125852-		return true;
125853-	}
125854-	not_reached();
125855-}
125856-
125857-static bool
125858-parser_parse(parser_t *parser) {
125859-	if (parser_tokenize(parser)) {
125860-		goto label_error;
125861-	}
125862-	if (parser_parse_value(parser)) {
125863-		goto label_error;
125864-	}
125865-
125866-	if (parser_tokenize(parser)) {
125867-		goto label_error;
125868-	}
125869-	switch (parser->token.token_type) {
125870-	case TOKEN_TYPE_EOI:
125871-		return false;
125872-	default:
125873-		goto label_error;
125874-	}
125875-	not_reached();
125876-
125877-label_error:
125878-	token_error(&parser->token);
125879-	return true;
125880-}
125881-
125882-TEST_BEGIN(test_json_parser) {
125883-	size_t i;
125884-	const char *invalid_inputs[] = {
125885-		/* Tokenizer error case tests. */
125886-		"{ \"string\": X }",
125887-		"{ \"string\": nXll }",
125888-		"{ \"string\": nuXl }",
125889-		"{ \"string\": nulX }",
125890-		"{ \"string\": nullX }",
125891-		"{ \"string\": fXlse }",
125892-		"{ \"string\": faXse }",
125893-		"{ \"string\": falXe }",
125894-		"{ \"string\": falsX }",
125895-		"{ \"string\": falseX }",
125896-		"{ \"string\": tXue }",
125897-		"{ \"string\": trXe }",
125898-		"{ \"string\": truX }",
125899-		"{ \"string\": trueX }",
125900-		"{ \"string\": \"\n\" }",
125901-		"{ \"string\": \"\\z\" }",
125902-		"{ \"string\": \"\\uX000\" }",
125903-		"{ \"string\": \"\\u0X00\" }",
125904-		"{ \"string\": \"\\u00X0\" }",
125905-		"{ \"string\": \"\\u000X\" }",
125906-		"{ \"string\": -X }",
125907-		"{ \"string\": 0.X }",
125908-		"{ \"string\": 0.0eX }",
125909-		"{ \"string\": 0.0e+X }",
125910-
125911-		/* Parser error test cases. */
125912-		"{\"string\": }",
125913-		"{\"string\" }",
125914-		"{\"string\": [ 0 }",
125915-		"{\"string\": {\"a\":0, 1 } }",
125916-		"{\"string\": {\"a\":0: } }",
125917-		"{",
125918-		"{}{",
125919-	};
125920-	const char *valid_inputs[] = {
125921-		/* Token tests. */
125922-		"null",
125923-		"false",
125924-		"true",
125925-		"{}",
125926-		"{\"a\": 0}",
125927-		"[]",
125928-		"[0, 1]",
125929-		"0",
125930-		"1",
125931-		"10",
125932-		"-10",
125933-		"10.23",
125934-		"10.23e4",
125935-		"10.23e-4",
125936-		"10.23e+4",
125937-		"10.23E4",
125938-		"10.23E-4",
125939-		"10.23E+4",
125940-		"-10.23",
125941-		"-10.23e4",
125942-		"-10.23e-4",
125943-		"-10.23e+4",
125944-		"-10.23E4",
125945-		"-10.23E-4",
125946-		"-10.23E+4",
125947-		"\"value\"",
125948-		"\" \\\" \\/ \\b \\n \\r \\t \\u0abc \\u1DEF \"",
125949-
125950-		/* Parser test with various nesting. */
125951-		"{\"a\":null, \"b\":[1,[{\"c\":2},3]], \"d\":{\"e\":true}}",
125952-	};
125953-
125954-	for (i = 0; i < sizeof(invalid_inputs)/sizeof(const char *); i++) {
125955-		const char *input = invalid_inputs[i];
125956-		parser_t parser;
125957-		parser_init(&parser, false);
125958-		expect_false(parser_append(&parser, input),
125959-		    "Unexpected input appending failure");
125960-		expect_true(parser_parse(&parser),
125961-		    "Unexpected parse success for input: %s", input);
125962-		parser_fini(&parser);
125963-	}
125964-
125965-	for (i = 0; i < sizeof(valid_inputs)/sizeof(const char *); i++) {
125966-		const char *input = valid_inputs[i];
125967-		parser_t parser;
125968-		parser_init(&parser, true);
125969-		expect_false(parser_append(&parser, input),
125970-		    "Unexpected input appending failure");
125971-		expect_false(parser_parse(&parser),
125972-		    "Unexpected parse error for input: %s", input);
125973-		parser_fini(&parser);
125974-	}
125975-}
125976-TEST_END
125977-
125978-void
125979-write_cb(void *opaque, const char *str) {
125980-	parser_t *parser = (parser_t *)opaque;
125981-	if (parser_append(parser, str)) {
125982-		test_fail("Unexpected input appending failure");
125983-	}
125984-}
125985-
125986-TEST_BEGIN(test_stats_print_json) {
125987-	const char *opts[] = {
125988-		"J",
125989-		"Jg",
125990-		"Jm",
125991-		"Jd",
125992-		"Jmd",
125993-		"Jgd",
125994-		"Jgm",
125995-		"Jgmd",
125996-		"Ja",
125997-		"Jb",
125998-		"Jl",
125999-		"Jx",
126000-		"Jbl",
126001-		"Jal",
126002-		"Jab",
126003-		"Jabl",
126004-		"Jax",
126005-		"Jbx",
126006-		"Jlx",
126007-		"Jablx",
126008-		"Jgmdablx",
126009-	};
126010-	unsigned arena_ind, i;
126011-
126012-	for (i = 0; i < 3; i++) {
126013-		unsigned j;
126014-
126015-		switch (i) {
126016-		case 0:
126017-			break;
126018-		case 1: {
126019-			size_t sz = sizeof(arena_ind);
126020-			expect_d_eq(mallctl("arenas.create", (void *)&arena_ind,
126021-			    &sz, NULL, 0), 0, "Unexpected mallctl failure");
126022-			break;
126023-		} case 2: {
126024-			size_t mib[3];
126025-			size_t miblen = sizeof(mib)/sizeof(size_t);
126026-			expect_d_eq(mallctlnametomib("arena.0.destroy",
126027-			    mib, &miblen), 0,
126028-			    "Unexpected mallctlnametomib failure");
126029-			mib[1] = arena_ind;
126030-			expect_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL,
126031-			    0), 0, "Unexpected mallctlbymib failure");
126032-			break;
126033-		} default:
126034-			not_reached();
126035-		}
126036-
126037-		for (j = 0; j < sizeof(opts)/sizeof(const char *); j++) {
126038-			parser_t parser;
126039-
126040-			parser_init(&parser, true);
126041-			malloc_stats_print(write_cb, (void *)&parser, opts[j]);
126042-			expect_false(parser_parse(&parser),
126043-			    "Unexpected parse error, opts=\"%s\"", opts[j]);
126044-			parser_fini(&parser);
126045-		}
126046-	}
126047-}
126048-TEST_END
126049-
126050-int
126051-main(void) {
126052-	return test(
126053-	    test_json_parser,
126054-	    test_stats_print_json);
126055-}
126056diff --git a/jemalloc/test/unit/sz.c b/jemalloc/test/unit/sz.c
126057deleted file mode 100644
126058index 8ae04b9..0000000
126059--- a/jemalloc/test/unit/sz.c
126060+++ /dev/null
126061@@ -1,66 +0,0 @@
126062-#include "test/jemalloc_test.h"
126063-
126064-TEST_BEGIN(test_sz_psz2ind) {
126065-	/*
126066-	 * Testing page size classes which reside prior to the regular group
126067-	 * with all size classes divisible by page size.
126068-	 * For x86_64 Linux, it's 4096, 8192, 12288, 16384, with corresponding
126069-	 * pszind 0, 1, 2 and 3.
126070-	 */
126071-	for (size_t i = 0; i < SC_NGROUP; i++) {
126072-		for (size_t psz = i * PAGE + 1; psz <= (i + 1) * PAGE; psz++) {
126073-			pszind_t ind = sz_psz2ind(psz);
126074-			expect_zu_eq(ind, i, "Got %u as sz_psz2ind of %zu", ind,
126075-			    psz);
126076-		}
126077-	}
126078-
126079-	sc_data_t data;
126080-	memset(&data, 0, sizeof(data));
126081-	sc_data_init(&data);
126082-	/*
126083-	 * 'base' is the base of the first regular group with all size classes
126084-	 * divisible by page size.
126085-	 * For x86_64 Linux, it's 16384, and base_ind is 36.
126086-	 */
126087-	size_t base_psz = 1 << (SC_LG_NGROUP + LG_PAGE);
126088-	size_t base_ind = 0;
126089-	while (base_ind < SC_NSIZES &&
126090-	    reg_size_compute(data.sc[base_ind].lg_base,
126091-		data.sc[base_ind].lg_delta,
126092-		data.sc[base_ind].ndelta) < base_psz) {
126093-		base_ind++;
126094-	}
126095-	expect_zu_eq(
126096-	    reg_size_compute(data.sc[base_ind].lg_base,
126097-		data.sc[base_ind].lg_delta, data.sc[base_ind].ndelta),
126098-	    base_psz, "Size class equal to %zu not found", base_psz);
126099-	/*
126100-	 * Test different sizes falling into groups after the 'base'. The
126101-	 * increment is PAGE / 3 for the execution speed purpose.
126102-	 */
126103-	base_ind -= SC_NGROUP;
126104-	for (size_t psz = base_psz; psz <= 64 * 1024 * 1024; psz += PAGE / 3) {
126105-		pszind_t ind = sz_psz2ind(psz);
126106-		sc_t gt_sc = data.sc[ind + base_ind];
126107-		expect_zu_gt(psz,
126108-		    reg_size_compute(gt_sc.lg_base, gt_sc.lg_delta,
126109-			gt_sc.ndelta),
126110-		    "Got %u as sz_psz2ind of %zu", ind, psz);
126111-		sc_t le_sc = data.sc[ind + base_ind + 1];
126112-		expect_zu_le(psz,
126113-		    reg_size_compute(le_sc.lg_base, le_sc.lg_delta,
126114-			le_sc.ndelta),
126115-		    "Got %u as sz_psz2ind of %zu", ind, psz);
126116-	}
126117-
126118-	pszind_t max_ind = sz_psz2ind(SC_LARGE_MAXCLASS + 1);
126119-	expect_lu_eq(max_ind, SC_NPSIZES,
126120-	    "Got %u as sz_psz2ind of %llu", max_ind, SC_LARGE_MAXCLASS);
126121-}
126122-TEST_END
126123-
126124-int
126125-main(void) {
126126-	return test(test_sz_psz2ind);
126127-}
126128diff --git a/jemalloc/test/unit/tcache_max.c b/jemalloc/test/unit/tcache_max.c
126129deleted file mode 100644
126130index 1f657c8..0000000
126131--- a/jemalloc/test/unit/tcache_max.c
126132+++ /dev/null
126133@@ -1,175 +0,0 @@
126134-#include "test/jemalloc_test.h"
126135-#include "test/san.h"
126136-
126137-const char *malloc_conf = TEST_SAN_UAF_ALIGN_DISABLE;
126138-
126139-enum {
126140-	alloc_option_start = 0,
126141-	use_malloc = 0,
126142-	use_mallocx,
126143-	alloc_option_end
126144-};
126145-
126146-enum {
126147-	dalloc_option_start = 0,
126148-	use_free = 0,
126149-	use_dallocx,
126150-	use_sdallocx,
126151-	dalloc_option_end
126152-};
126153-
126154-static unsigned alloc_option, dalloc_option;
126155-static size_t tcache_max;
126156-
126157-static void *
126158-alloc_func(size_t sz) {
126159-	void *ret;
126160-
126161-	switch (alloc_option) {
126162-	case use_malloc:
126163-		ret = malloc(sz);
126164-		break;
126165-	case use_mallocx:
126166-		ret = mallocx(sz, 0);
126167-		break;
126168-	default:
126169-		unreachable();
126170-	}
126171-	expect_ptr_not_null(ret, "Unexpected malloc / mallocx failure");
126172-
126173-	return ret;
126174-}
126175-
126176-static void
126177-dalloc_func(void *ptr, size_t sz) {
126178-	switch (dalloc_option) {
126179-	case use_free:
126180-		free(ptr);
126181-		break;
126182-	case use_dallocx:
126183-		dallocx(ptr, 0);
126184-		break;
126185-	case use_sdallocx:
126186-		sdallocx(ptr, sz, 0);
126187-		break;
126188-	default:
126189-		unreachable();
126190-	}
126191-}
126192-
126193-static size_t
126194-tcache_bytes_read(void) {
126195-	uint64_t epoch;
126196-	assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)),
126197-	    0, "Unexpected mallctl() failure");
126198-
126199-	size_t tcache_bytes;
126200-	size_t sz = sizeof(tcache_bytes);
126201-	assert_d_eq(mallctl(
126202-	    "stats.arenas." STRINGIFY(MALLCTL_ARENAS_ALL) ".tcache_bytes",
126203-	    &tcache_bytes, &sz, NULL, 0), 0, "Unexpected mallctl failure");
126204-
126205-	return tcache_bytes;
126206-}
126207-
126208-static void
126209-tcache_bytes_check_update(size_t *prev, ssize_t diff) {
126210-	size_t tcache_bytes = tcache_bytes_read();
126211-	expect_zu_eq(tcache_bytes, *prev + diff, "tcache bytes not expected");
126212-
126213-	*prev += diff;
126214-}
126215-
126216-static void
126217-test_tcache_bytes_alloc(size_t alloc_size) {
126218-	expect_d_eq(mallctl("thread.tcache.flush", NULL, NULL, NULL, 0), 0,
126219-	    "Unexpected tcache flush failure");
126220-
126221-	size_t usize = sz_s2u(alloc_size);
126222-	/* No change is expected if usize is outside of tcache_max range. */
126223-	bool cached = (usize <= tcache_max);
126224-	ssize_t diff = cached ? usize : 0;
126225-
126226-	void *ptr1 = alloc_func(alloc_size);
126227-	void *ptr2 = alloc_func(alloc_size);
126228-
126229-	size_t bytes = tcache_bytes_read();
126230-	dalloc_func(ptr2, alloc_size);
126231-	/* Expect tcache_bytes increase after dalloc */
126232-	tcache_bytes_check_update(&bytes, diff);
126233-
126234-	dalloc_func(ptr1, alloc_size);
126235-	/* Expect tcache_bytes increase again */
126236-	tcache_bytes_check_update(&bytes, diff);
126237-
126238-	void *ptr3 = alloc_func(alloc_size);
126239-	if (cached) {
126240-		expect_ptr_eq(ptr1, ptr3, "Unexpected cached ptr");
126241-	}
126242-	/* Expect tcache_bytes decrease after alloc */
126243-	tcache_bytes_check_update(&bytes, -diff);
126244-
126245-	void *ptr4 = alloc_func(alloc_size);
126246-	if (cached) {
126247-		expect_ptr_eq(ptr2, ptr4, "Unexpected cached ptr");
126248-	}
126249-	/* Expect tcache_bytes decrease again */
126250-	tcache_bytes_check_update(&bytes, -diff);
126251-
126252-	dalloc_func(ptr3, alloc_size);
126253-	tcache_bytes_check_update(&bytes, diff);
126254-	dalloc_func(ptr4, alloc_size);
126255-	tcache_bytes_check_update(&bytes, diff);
126256-}
126257-
126258-static void
126259-test_tcache_max_impl(void) {
126260-	size_t sz;
126261-	sz = sizeof(tcache_max);
126262-	assert_d_eq(mallctl("arenas.tcache_max", (void *)&tcache_max,
126263-	    &sz, NULL, 0), 0, "Unexpected mallctl() failure");
126264-
126265-	/* opt.tcache_max set to 1024 in tcache_max.sh */
126266-	expect_zu_eq(tcache_max, 1024, "tcache_max not expected");
126267-
126268-	test_tcache_bytes_alloc(1);
126269-	test_tcache_bytes_alloc(tcache_max - 1);
126270-	test_tcache_bytes_alloc(tcache_max);
126271-	test_tcache_bytes_alloc(tcache_max + 1);
126272-
126273-	test_tcache_bytes_alloc(PAGE - 1);
126274-	test_tcache_bytes_alloc(PAGE);
126275-	test_tcache_bytes_alloc(PAGE + 1);
126276-
126277-	size_t large;
126278-	sz = sizeof(large);
126279-	assert_d_eq(mallctl("arenas.lextent.0.size", (void *)&large, &sz, NULL,
126280-	    0), 0, "Unexpected mallctl() failure");
126281-
126282-	test_tcache_bytes_alloc(large - 1);
126283-	test_tcache_bytes_alloc(large);
126284-	test_tcache_bytes_alloc(large + 1);
126285-}
126286-
126287-TEST_BEGIN(test_tcache_max) {
126288-	test_skip_if(!config_stats);
126289-	test_skip_if(!opt_tcache);
126290-	test_skip_if(opt_prof);
126291-	test_skip_if(san_uaf_detection_enabled());
126292-
126293-	for (alloc_option = alloc_option_start;
126294-	     alloc_option < alloc_option_end;
126295-	     alloc_option++) {
126296-		for (dalloc_option = dalloc_option_start;
126297-		     dalloc_option < dalloc_option_end;
126298-		     dalloc_option++) {
126299-			test_tcache_max_impl();
126300-		}
126301-	}
126302-}
126303-TEST_END
126304-
126305-int
126306-main(void) {
126307-	return test(test_tcache_max);
126308-}
126309diff --git a/jemalloc/test/unit/tcache_max.sh b/jemalloc/test/unit/tcache_max.sh
126310deleted file mode 100644
126311index 4480d73..0000000
126312--- a/jemalloc/test/unit/tcache_max.sh
126313+++ /dev/null
126314@@ -1,3 +0,0 @@
126315-#!/bin/sh
126316-
126317-export MALLOC_CONF="tcache_max:1024"
126318diff --git a/jemalloc/test/unit/test_hooks.c b/jemalloc/test/unit/test_hooks.c
126319deleted file mode 100644
126320index 8cd2b3b..0000000
126321--- a/jemalloc/test/unit/test_hooks.c
126322+++ /dev/null
126323@@ -1,38 +0,0 @@
126324-#include "test/jemalloc_test.h"
126325-
126326-static bool hook_called = false;
126327-
126328-static void
126329-hook() {
126330-	hook_called = true;
126331-}
126332-
126333-static int
126334-func_to_hook(int arg1, int arg2) {
126335-	return arg1 + arg2;
126336-}
126337-
126338-#define func_to_hook JEMALLOC_TEST_HOOK(func_to_hook, test_hooks_libc_hook)
126339-
126340-TEST_BEGIN(unhooked_call) {
126341-	test_hooks_libc_hook = NULL;
126342-	hook_called = false;
126343-	expect_d_eq(3, func_to_hook(1, 2), "Hooking changed return value.");
126344-	expect_false(hook_called, "Nulling out hook didn't take.");
126345-}
126346-TEST_END
126347-
126348-TEST_BEGIN(hooked_call) {
126349-	test_hooks_libc_hook = &hook;
126350-	hook_called = false;
126351-	expect_d_eq(3, func_to_hook(1, 2), "Hooking changed return value.");
126352-	expect_true(hook_called, "Hook should have executed.");
126353-}
126354-TEST_END
126355-
126356-int
126357-main(void) {
126358-	return test(
126359-	    unhooked_call,
126360-	    hooked_call);
126361-}
126362diff --git a/jemalloc/test/unit/thread_event.c b/jemalloc/test/unit/thread_event.c
126363deleted file mode 100644
126364index e0b88a9..0000000
126365--- a/jemalloc/test/unit/thread_event.c
126366+++ /dev/null
126367@@ -1,34 +0,0 @@
126368-#include "test/jemalloc_test.h"
126369-
126370-TEST_BEGIN(test_next_event_fast) {
126371-	tsd_t *tsd = tsd_fetch();
126372-	te_ctx_t ctx;
126373-	te_ctx_get(tsd, &ctx, true);
126374-
126375-	te_ctx_last_event_set(&ctx, 0);
126376-	te_ctx_current_bytes_set(&ctx, TE_NEXT_EVENT_FAST_MAX - 8U);
126377-	te_ctx_next_event_set(tsd, &ctx, TE_NEXT_EVENT_FAST_MAX);
126378-#define E(event, condition, is_alloc)					\
126379-	if (is_alloc && condition) {					\
126380-		event##_event_wait_set(tsd, TE_NEXT_EVENT_FAST_MAX);	\
126381-	}
126382-	ITERATE_OVER_ALL_EVENTS
126383-#undef E
126384-
126385-	/* Test next_event_fast rolling back to 0. */
126386-	void *p = malloc(16U);
126387-	assert_ptr_not_null(p, "malloc() failed");
126388-	free(p);
126389-
126390-	/* Test next_event_fast resuming to be equal to next_event. */
126391-	void *q = malloc(SC_LOOKUP_MAXCLASS);
126392-	assert_ptr_not_null(q, "malloc() failed");
126393-	free(q);
126394-}
126395-TEST_END
126396-
126397-int
126398-main(void) {
126399-	return test(
126400-	    test_next_event_fast);
126401-}
126402diff --git a/jemalloc/test/unit/thread_event.sh b/jemalloc/test/unit/thread_event.sh
126403deleted file mode 100644
126404index 8fcc7d8..0000000
126405--- a/jemalloc/test/unit/thread_event.sh
126406+++ /dev/null
126407@@ -1,5 +0,0 @@
126408-#!/bin/sh
126409-
126410-if [ "x${enable_prof}" = "x1" ] ; then
126411-  export MALLOC_CONF="prof:true,lg_prof_sample:0"
126412-fi
126413diff --git a/jemalloc/test/unit/ticker.c b/jemalloc/test/unit/ticker.c
126414deleted file mode 100644
126415index 0dd7786..0000000
126416--- a/jemalloc/test/unit/ticker.c
126417+++ /dev/null
126418@@ -1,100 +0,0 @@
126419-#include "test/jemalloc_test.h"
126420-
126421-#include "jemalloc/internal/ticker.h"
126422-
126423-TEST_BEGIN(test_ticker_tick) {
126424-#define NREPS 2
126425-#define NTICKS 3
126426-	ticker_t ticker;
126427-	int32_t i, j;
126428-
126429-	ticker_init(&ticker, NTICKS);
126430-	for (i = 0; i < NREPS; i++) {
126431-		for (j = 0; j < NTICKS; j++) {
126432-			expect_u_eq(ticker_read(&ticker), NTICKS - j,
126433-			    "Unexpected ticker value (i=%d, j=%d)", i, j);
126434-			expect_false(ticker_tick(&ticker),
126435-			    "Unexpected ticker fire (i=%d, j=%d)", i, j);
126436-		}
126437-		expect_u32_eq(ticker_read(&ticker), 0,
126438-		    "Expected ticker depletion");
126439-		expect_true(ticker_tick(&ticker),
126440-		    "Expected ticker fire (i=%d)", i);
126441-		expect_u32_eq(ticker_read(&ticker), NTICKS,
126442-		    "Expected ticker reset");
126443-	}
126444-#undef NTICKS
126445-}
126446-TEST_END
126447-
126448-TEST_BEGIN(test_ticker_ticks) {
126449-#define NTICKS 3
126450-	ticker_t ticker;
126451-
126452-	ticker_init(&ticker, NTICKS);
126453-
126454-	expect_u_eq(ticker_read(&ticker), NTICKS, "Unexpected ticker value");
126455-	expect_false(ticker_ticks(&ticker, NTICKS), "Unexpected ticker fire");
126456-	expect_u_eq(ticker_read(&ticker), 0, "Unexpected ticker value");
126457-	expect_true(ticker_ticks(&ticker, NTICKS), "Expected ticker fire");
126458-	expect_u_eq(ticker_read(&ticker), NTICKS, "Unexpected ticker value");
126459-
126460-	expect_true(ticker_ticks(&ticker, NTICKS + 1), "Expected ticker fire");
126461-	expect_u_eq(ticker_read(&ticker), NTICKS, "Unexpected ticker value");
126462-#undef NTICKS
126463-}
126464-TEST_END
126465-
126466-TEST_BEGIN(test_ticker_copy) {
126467-#define NTICKS 3
126468-	ticker_t ta, tb;
126469-
126470-	ticker_init(&ta, NTICKS);
126471-	ticker_copy(&tb, &ta);
126472-	expect_u_eq(ticker_read(&tb), NTICKS, "Unexpected ticker value");
126473-	expect_true(ticker_ticks(&tb, NTICKS + 1), "Expected ticker fire");
126474-	expect_u_eq(ticker_read(&tb), NTICKS, "Unexpected ticker value");
126475-
126476-	ticker_tick(&ta);
126477-	ticker_copy(&tb, &ta);
126478-	expect_u_eq(ticker_read(&tb), NTICKS - 1, "Unexpected ticker value");
126479-	expect_true(ticker_ticks(&tb, NTICKS), "Expected ticker fire");
126480-	expect_u_eq(ticker_read(&tb), NTICKS, "Unexpected ticker value");
126481-#undef NTICKS
126482-}
126483-TEST_END
126484-
126485-TEST_BEGIN(test_ticker_geom) {
126486-	const int32_t ticks = 100;
126487-	const uint64_t niters = 100 * 1000;
126488-
126489-	ticker_geom_t ticker;
126490-	ticker_geom_init(&ticker, ticks);
126491-	uint64_t total_ticks = 0;
126492-	/* Just some random constant. */
126493-	uint64_t prng_state = 0x343219f93496db9fULL;
126494-	for (uint64_t i = 0; i < niters; i++) {
126495-		while(!ticker_geom_tick(&ticker, &prng_state)) {
126496-			total_ticks++;
126497-		}
126498-	}
126499-	/*
126500-	 * In fact, with this choice of random seed and the PRNG implementation
126501-	 * used at the time this was tested, total_ticks is 95.1% of the
126502-	 * expected ticks.
126503-	 */
126504-	expect_u64_ge(total_ticks , niters * ticks * 9 / 10,
126505-	    "Mean off by > 10%%");
126506-	expect_u64_le(total_ticks , niters * ticks * 11 / 10,
126507-	    "Mean off by > 10%%");
126508-}
126509-TEST_END
126510-
126511-int
126512-main(void) {
126513-	return test(
126514-	    test_ticker_tick,
126515-	    test_ticker_ticks,
126516-	    test_ticker_copy,
126517-	    test_ticker_geom);
126518-}
126519diff --git a/jemalloc/test/unit/tsd.c b/jemalloc/test/unit/tsd.c
126520deleted file mode 100644
126521index 205d870..0000000
126522--- a/jemalloc/test/unit/tsd.c
126523+++ /dev/null
126524@@ -1,274 +0,0 @@
126525-#include "test/jemalloc_test.h"
126526-
126527-/*
126528- * If we're e.g. in debug mode, we *never* enter the fast path, and so shouldn't
126529- * be asserting that we're on one.
126530- */
126531-static bool originally_fast;
126532-static int data_cleanup_count;
126533-
126534-void
126535-data_cleanup(int *data) {
126536-	if (data_cleanup_count == 0) {
126537-		expect_x_eq(*data, MALLOC_TSD_TEST_DATA_INIT,
126538-		    "Argument passed into cleanup function should match tsd "
126539-		    "value");
126540-	}
126541-	++data_cleanup_count;
126542-
126543-	/*
126544-	 * Allocate during cleanup for two rounds, in order to assure that
126545-	 * jemalloc's internal tsd reinitialization happens.
126546-	 */
126547-	bool reincarnate = false;
126548-	switch (*data) {
126549-	case MALLOC_TSD_TEST_DATA_INIT:
126550-		*data = 1;
126551-		reincarnate = true;
126552-		break;
126553-	case 1:
126554-		*data = 2;
126555-		reincarnate = true;
126556-		break;
126557-	case 2:
126558-		return;
126559-	default:
126560-		not_reached();
126561-	}
126562-
126563-	if (reincarnate) {
126564-		void *p = mallocx(1, 0);
126565-		expect_ptr_not_null(p, "Unexpeced mallocx() failure");
126566-		dallocx(p, 0);
126567-	}
126568-}
126569-
126570-static void *
126571-thd_start(void *arg) {
126572-	int d = (int)(uintptr_t)arg;
126573-	void *p;
126574-
126575-	/*
126576-	 * Test free before tsd init -- the free fast path (which does not
126577-	 * explicitly check for NULL) has to tolerate this case, and fall back
126578-	 * to free_default.
126579-	 */
126580-	free(NULL);
126581-
126582-	tsd_t *tsd = tsd_fetch();
126583-	expect_x_eq(tsd_test_data_get(tsd), MALLOC_TSD_TEST_DATA_INIT,
126584-	    "Initial tsd get should return initialization value");
126585-
126586-	p = malloc(1);
126587-	expect_ptr_not_null(p, "Unexpected malloc() failure");
126588-
126589-	tsd_test_data_set(tsd, d);
126590-	expect_x_eq(tsd_test_data_get(tsd), d,
126591-	    "After tsd set, tsd get should return value that was set");
126592-
126593-	d = 0;
126594-	expect_x_eq(tsd_test_data_get(tsd), (int)(uintptr_t)arg,
126595-	    "Resetting local data should have no effect on tsd");
126596-
126597-	tsd_test_callback_set(tsd, &data_cleanup);
126598-
126599-	free(p);
126600-	return NULL;
126601-}
126602-
126603-TEST_BEGIN(test_tsd_main_thread) {
126604-	thd_start((void *)(uintptr_t)0xa5f3e329);
126605-}
126606-TEST_END
126607-
126608-TEST_BEGIN(test_tsd_sub_thread) {
126609-	thd_t thd;
126610-
126611-	data_cleanup_count = 0;
126612-	thd_create(&thd, thd_start, (void *)MALLOC_TSD_TEST_DATA_INIT);
126613-	thd_join(thd, NULL);
126614-	/*
126615-	 * We reincarnate twice in the data cleanup, so it should execute at
126616-	 * least 3 times.
126617-	 */
126618-	expect_x_ge(data_cleanup_count, 3,
126619-	    "Cleanup function should have executed multiple times.");
126620-}
126621-TEST_END
126622-
126623-static void *
126624-thd_start_reincarnated(void *arg) {
126625-	tsd_t *tsd = tsd_fetch();
126626-	assert(tsd);
126627-
126628-	void *p = malloc(1);
126629-	expect_ptr_not_null(p, "Unexpected malloc() failure");
126630-
126631-	/* Manually trigger reincarnation. */
126632-	expect_ptr_not_null(tsd_arena_get(tsd),
126633-	    "Should have tsd arena set.");
126634-	tsd_cleanup((void *)tsd);
126635-	expect_ptr_null(*tsd_arenap_get_unsafe(tsd),
126636-	    "TSD arena should have been cleared.");
126637-	expect_u_eq(tsd_state_get(tsd), tsd_state_purgatory,
126638-	    "TSD state should be purgatory\n");
126639-
126640-	free(p);
126641-	expect_u_eq(tsd_state_get(tsd), tsd_state_reincarnated,
126642-	    "TSD state should be reincarnated\n");
126643-	p = mallocx(1, MALLOCX_TCACHE_NONE);
126644-	expect_ptr_not_null(p, "Unexpected malloc() failure");
126645-	expect_ptr_null(*tsd_arenap_get_unsafe(tsd),
126646-	    "Should not have tsd arena set after reincarnation.");
126647-
126648-	free(p);
126649-	tsd_cleanup((void *)tsd);
126650-	expect_ptr_null(*tsd_arenap_get_unsafe(tsd),
126651-	    "TSD arena should have been cleared after 2nd cleanup.");
126652-
126653-	return NULL;
126654-}
126655-
126656-TEST_BEGIN(test_tsd_reincarnation) {
126657-	thd_t thd;
126658-	thd_create(&thd, thd_start_reincarnated, NULL);
126659-	thd_join(thd, NULL);
126660-}
126661-TEST_END
126662-
126663-typedef struct {
126664-	atomic_u32_t phase;
126665-	atomic_b_t error;
126666-} global_slow_data_t;
126667-
126668-static void *
126669-thd_start_global_slow(void *arg) {
126670-	/* PHASE 0 */
126671-	global_slow_data_t *data = (global_slow_data_t *)arg;
126672-	free(mallocx(1, 0));
126673-
126674-	tsd_t *tsd = tsd_fetch();
126675-	/*
126676-	 * No global slowness has happened yet; there was an error if we were
126677-	 * originally fast but aren't now.
126678-	 */
126679-	atomic_store_b(&data->error, originally_fast && !tsd_fast(tsd),
126680-	    ATOMIC_SEQ_CST);
126681-	atomic_store_u32(&data->phase, 1, ATOMIC_SEQ_CST);
126682-
126683-	/* PHASE 2 */
126684-	while (atomic_load_u32(&data->phase, ATOMIC_SEQ_CST) != 2) {
126685-	}
126686-	free(mallocx(1, 0));
126687-	atomic_store_b(&data->error, tsd_fast(tsd), ATOMIC_SEQ_CST);
126688-	atomic_store_u32(&data->phase, 3, ATOMIC_SEQ_CST);
126689-
126690-	/* PHASE 4 */
126691-	while (atomic_load_u32(&data->phase, ATOMIC_SEQ_CST) != 4) {
126692-	}
126693-	free(mallocx(1, 0));
126694-	atomic_store_b(&data->error, tsd_fast(tsd), ATOMIC_SEQ_CST);
126695-	atomic_store_u32(&data->phase, 5, ATOMIC_SEQ_CST);
126696-
126697-	/* PHASE 6 */
126698-	while (atomic_load_u32(&data->phase, ATOMIC_SEQ_CST) != 6) {
126699-	}
126700-	free(mallocx(1, 0));
126701-	/* Only one decrement so far. */
126702-	atomic_store_b(&data->error, tsd_fast(tsd), ATOMIC_SEQ_CST);
126703-	atomic_store_u32(&data->phase, 7, ATOMIC_SEQ_CST);
126704-
126705-	/* PHASE 8 */
126706-	while (atomic_load_u32(&data->phase, ATOMIC_SEQ_CST) != 8) {
126707-	}
126708-	free(mallocx(1, 0));
126709-	/*
126710-	 * Both decrements happened; we should be fast again (if we ever
126711-	 * were)
126712-	 */
126713-	atomic_store_b(&data->error, originally_fast && !tsd_fast(tsd),
126714-	    ATOMIC_SEQ_CST);
126715-	atomic_store_u32(&data->phase, 9, ATOMIC_SEQ_CST);
126716-
126717-	return NULL;
126718-}
126719-
126720-TEST_BEGIN(test_tsd_global_slow) {
126721-	global_slow_data_t data = {ATOMIC_INIT(0), ATOMIC_INIT(false)};
126722-	/*
126723-	 * Note that the "mallocx" here (vs. malloc) is important, since the
126724-	 * compiler is allowed to optimize away free(malloc(1)) but not
126725-	 * free(mallocx(1)).
126726-	 */
126727-	free(mallocx(1, 0));
126728-	tsd_t *tsd = tsd_fetch();
126729-	originally_fast = tsd_fast(tsd);
126730-
126731-	thd_t thd;
126732-	thd_create(&thd, thd_start_global_slow, (void *)&data.phase);
126733-	/* PHASE 1 */
126734-	while (atomic_load_u32(&data.phase, ATOMIC_SEQ_CST) != 1) {
126735-		/*
126736-		 * We don't have a portable condvar/semaphore mechanism.
126737-		 * Spin-wait.
126738-		 */
126739-	}
126740-	expect_false(atomic_load_b(&data.error, ATOMIC_SEQ_CST), "");
126741-	tsd_global_slow_inc(tsd_tsdn(tsd));
126742-	free(mallocx(1, 0));
126743-	expect_false(tsd_fast(tsd), "");
126744-	atomic_store_u32(&data.phase, 2, ATOMIC_SEQ_CST);
126745-
126746-	/* PHASE 3 */
126747-	while (atomic_load_u32(&data.phase, ATOMIC_SEQ_CST) != 3) {
126748-	}
126749-	expect_false(atomic_load_b(&data.error, ATOMIC_SEQ_CST), "");
126750-	/* Increase again, so that we can test multiple fast/slow changes. */
126751-	tsd_global_slow_inc(tsd_tsdn(tsd));
126752-	atomic_store_u32(&data.phase, 4, ATOMIC_SEQ_CST);
126753-	free(mallocx(1, 0));
126754-	expect_false(tsd_fast(tsd), "");
126755-
126756-	/* PHASE 5 */
126757-	while (atomic_load_u32(&data.phase, ATOMIC_SEQ_CST) != 5) {
126758-	}
126759-	expect_false(atomic_load_b(&data.error, ATOMIC_SEQ_CST), "");
126760-	tsd_global_slow_dec(tsd_tsdn(tsd));
126761-	atomic_store_u32(&data.phase, 6, ATOMIC_SEQ_CST);
126762-	/* We only decreased once; things should still be slow. */
126763-	free(mallocx(1, 0));
126764-	expect_false(tsd_fast(tsd), "");
126765-
126766-	/* PHASE 7 */
126767-	while (atomic_load_u32(&data.phase, ATOMIC_SEQ_CST) != 7) {
126768-	}
126769-	expect_false(atomic_load_b(&data.error, ATOMIC_SEQ_CST), "");
126770-	tsd_global_slow_dec(tsd_tsdn(tsd));
126771-	atomic_store_u32(&data.phase, 8, ATOMIC_SEQ_CST);
126772-	/* We incremented and then decremented twice; we should be fast now. */
126773-	free(mallocx(1, 0));
126774-	expect_true(!originally_fast || tsd_fast(tsd), "");
126775-
126776-	/* PHASE 9 */
126777-	while (atomic_load_u32(&data.phase, ATOMIC_SEQ_CST) != 9) {
126778-	}
126779-	expect_false(atomic_load_b(&data.error, ATOMIC_SEQ_CST), "");
126780-
126781-	thd_join(thd, NULL);
126782-}
126783-TEST_END
126784-
126785-int
126786-main(void) {
126787-	/* Ensure tsd bootstrapped. */
126788-	if (nallocx(1, 0) == 0) {
126789-		malloc_printf("Initialization error");
126790-		return test_status_fail;
126791-	}
126792-
126793-	return test_no_reentrancy(
126794-	    test_tsd_main_thread,
126795-	    test_tsd_sub_thread,
126796-	    test_tsd_reincarnation,
126797-	    test_tsd_global_slow);
126798-}
126799diff --git a/jemalloc/test/unit/uaf.c b/jemalloc/test/unit/uaf.c
126800deleted file mode 100644
126801index a8433c2..0000000
126802--- a/jemalloc/test/unit/uaf.c
126803+++ /dev/null
126804@@ -1,262 +0,0 @@
126805-#include "test/jemalloc_test.h"
126806-#include "test/arena_util.h"
126807-#include "test/san.h"
126808-
126809-#include "jemalloc/internal/cache_bin.h"
126810-#include "jemalloc/internal/san.h"
126811-#include "jemalloc/internal/safety_check.h"
126812-
126813-const char *malloc_conf = TEST_SAN_UAF_ALIGN_ENABLE;
126814-
126815-static size_t san_uaf_align;
126816-
126817-static bool fake_abort_called;
126818-void fake_abort(const char *message) {
126819-	(void)message;
126820-	fake_abort_called = true;
126821-}
126822-
126823-static void
126824-test_write_after_free_pre(void) {
126825-	safety_check_set_abort(&fake_abort);
126826-	fake_abort_called = false;
126827-}
126828-
126829-static void
126830-test_write_after_free_post(void) {
126831-	assert_d_eq(mallctl("thread.tcache.flush", NULL, NULL, NULL, 0),
126832-	    0, "Unexpected tcache flush failure");
126833-	expect_true(fake_abort_called, "Use-after-free check didn't fire.");
126834-	safety_check_set_abort(NULL);
126835-}
126836-
126837-static bool
126838-uaf_detection_enabled(void) {
126839-	if (!config_uaf_detection || !san_uaf_detection_enabled()) {
126840-		return false;
126841-	}
126842-
126843-	ssize_t lg_san_uaf_align;
126844-	size_t sz = sizeof(lg_san_uaf_align);
126845-	assert_d_eq(mallctl("opt.lg_san_uaf_align", &lg_san_uaf_align, &sz,
126846-	    NULL, 0), 0, "Unexpected mallctl failure");
126847-	if (lg_san_uaf_align < 0) {
126848-		return false;
126849-	}
126850-	assert_zd_ge(lg_san_uaf_align, LG_PAGE, "san_uaf_align out of range");
126851-	san_uaf_align = (size_t)1 << lg_san_uaf_align;
126852-
126853-	bool tcache_enabled;
126854-	sz = sizeof(tcache_enabled);
126855-	assert_d_eq(mallctl("thread.tcache.enabled", &tcache_enabled, &sz, NULL,
126856-	    0), 0, "Unexpected mallctl failure");
126857-	if (!tcache_enabled) {
126858-		return false;
126859-	}
126860-
126861-	return true;
126862-}
126863-
126864-static size_t
126865-read_tcache_stashed_bytes(unsigned arena_ind) {
126866-	if (!config_stats) {
126867-		return 0;
126868-	}
126869-
126870-	uint64_t epoch;
126871-	assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)),
126872-	    0, "Unexpected mallctl() failure");
126873-
126874-	size_t tcache_stashed_bytes;
126875-	size_t sz = sizeof(tcache_stashed_bytes);
126876-	assert_d_eq(mallctl(
126877-	    "stats.arenas." STRINGIFY(MALLCTL_ARENAS_ALL)
126878-	    ".tcache_stashed_bytes", &tcache_stashed_bytes, &sz, NULL, 0), 0,
126879-	    "Unexpected mallctl failure");
126880-
126881-	return tcache_stashed_bytes;
126882-}
126883-
126884-static void
126885-test_use_after_free(size_t alloc_size, bool write_after_free) {
126886-	void *ptr = (void *)(uintptr_t)san_uaf_align;
126887-	assert_true(cache_bin_nonfast_aligned(ptr), "Wrong alignment");
126888-	ptr = (void *)((uintptr_t)123 * (uintptr_t)san_uaf_align);
126889-	assert_true(cache_bin_nonfast_aligned(ptr), "Wrong alignment");
126890-	ptr = (void *)((uintptr_t)san_uaf_align + 1);
126891-	assert_false(cache_bin_nonfast_aligned(ptr), "Wrong alignment");
126892-
126893-	/*
126894-	 * Disable purging (-1) so that all dirty pages remain committed, to
126895-	 * make use-after-free tolerable.
126896-	 */
126897-	unsigned arena_ind = do_arena_create(-1, -1);
126898-	int flags = MALLOCX_ARENA(arena_ind) | MALLOCX_TCACHE_NONE;
126899-
126900-	size_t n_max = san_uaf_align * 2;
126901-	void **items = mallocx(n_max * sizeof(void *), flags);
126902-	assert_ptr_not_null(items, "Unexpected mallocx failure");
126903-
126904-	bool found = false;
126905-	size_t iter = 0;
126906-	char magic = 's';
126907-	assert_d_eq(mallctl("thread.tcache.flush", NULL, NULL, NULL, 0),
126908-	    0, "Unexpected tcache flush failure");
126909-	while (!found) {
126910-		ptr = mallocx(alloc_size, flags);
126911-		assert_ptr_not_null(ptr, "Unexpected mallocx failure");
126912-
126913-		found = cache_bin_nonfast_aligned(ptr);
126914-		*(char *)ptr = magic;
126915-		items[iter] = ptr;
126916-		assert_zu_lt(iter++, n_max, "No aligned ptr found");
126917-	}
126918-
126919-	if (write_after_free) {
126920-		test_write_after_free_pre();
126921-	}
126922-	bool junked = false;
126923-	while (iter-- != 0) {
126924-		char *volatile mem = items[iter];
126925-		assert_c_eq(*mem, magic, "Unexpected memory content");
126926-		size_t stashed_before = read_tcache_stashed_bytes(arena_ind);
126927-		free(mem);
126928-		if (*mem != magic) {
126929-			junked = true;
126930-			assert_c_eq(*mem, (char)uaf_detect_junk,
126931-			    "Unexpected junk-filling bytes");
126932-			if (write_after_free) {
126933-				*(char *)mem = magic + 1;
126934-			}
126935-
126936-			size_t stashed_after = read_tcache_stashed_bytes(
126937-			    arena_ind);
126938-			/*
126939-			 * An edge case is the deallocation above triggering the
126940-			 * tcache GC event, in which case the stashed pointers
126941-			 * may get flushed immediately, before returning from
126942-			 * free().  Treat these cases as checked already.
126943-			 */
126944-			if (stashed_after <= stashed_before) {
126945-				fake_abort_called = true;
126946-			}
126947-		}
126948-		/* Flush tcache (including stashed). */
126949-		assert_d_eq(mallctl("thread.tcache.flush", NULL, NULL, NULL, 0),
126950-		    0, "Unexpected tcache flush failure");
126951-	}
126952-	expect_true(junked, "Aligned ptr not junked");
126953-	if (write_after_free) {
126954-		test_write_after_free_post();
126955-	}
126956-
126957-	dallocx(items, flags);
126958-	do_arena_destroy(arena_ind);
126959-}
126960-
126961-TEST_BEGIN(test_read_after_free) {
126962-	test_skip_if(!uaf_detection_enabled());
126963-
126964-	test_use_after_free(sizeof(void *), /* write_after_free */ false);
126965-	test_use_after_free(sizeof(void *) + 1, /* write_after_free */ false);
126966-	test_use_after_free(16, /* write_after_free */ false);
126967-	test_use_after_free(20, /* write_after_free */ false);
126968-	test_use_after_free(32, /* write_after_free */ false);
126969-	test_use_after_free(33, /* write_after_free */ false);
126970-	test_use_after_free(48, /* write_after_free */ false);
126971-	test_use_after_free(64, /* write_after_free */ false);
126972-	test_use_after_free(65, /* write_after_free */ false);
126973-	test_use_after_free(129, /* write_after_free */ false);
126974-	test_use_after_free(255, /* write_after_free */ false);
126975-	test_use_after_free(256, /* write_after_free */ false);
126976-}
126977-TEST_END
126978-
126979-TEST_BEGIN(test_write_after_free) {
126980-	test_skip_if(!uaf_detection_enabled());
126981-
126982-	test_use_after_free(sizeof(void *), /* write_after_free */ true);
126983-	test_use_after_free(sizeof(void *) + 1, /* write_after_free */ true);
126984-	test_use_after_free(16, /* write_after_free */ true);
126985-	test_use_after_free(20, /* write_after_free */ true);
126986-	test_use_after_free(32, /* write_after_free */ true);
126987-	test_use_after_free(33, /* write_after_free */ true);
126988-	test_use_after_free(48, /* write_after_free */ true);
126989-	test_use_after_free(64, /* write_after_free */ true);
126990-	test_use_after_free(65, /* write_after_free */ true);
126991-	test_use_after_free(129, /* write_after_free */ true);
126992-	test_use_after_free(255, /* write_after_free */ true);
126993-	test_use_after_free(256, /* write_after_free */ true);
126994-}
126995-TEST_END
126996-
126997-static bool
126998-check_allocated_intact(void **allocated, size_t n_alloc) {
126999-	for (unsigned i = 0; i < n_alloc; i++) {
127000-		void *ptr = *(void **)allocated[i];
127001-		bool found = false;
127002-		for (unsigned j = 0; j < n_alloc; j++) {
127003-			if (ptr == allocated[j]) {
127004-				found = true;
127005-				break;
127006-			}
127007-		}
127008-		if (!found) {
127009-			return false;
127010-		}
127011-	}
127012-
127013-	return true;
127014-}
127015-
127016-TEST_BEGIN(test_use_after_free_integration) {
127017-	test_skip_if(!uaf_detection_enabled());
127018-
127019-	unsigned arena_ind = do_arena_create(-1, -1);
127020-	int flags = MALLOCX_ARENA(arena_ind);
127021-
127022-	size_t n_alloc = san_uaf_align * 2;
127023-	void **allocated = mallocx(n_alloc * sizeof(void *), flags);
127024-	assert_ptr_not_null(allocated, "Unexpected mallocx failure");
127025-
127026-	for (unsigned i = 0; i < n_alloc; i++) {
127027-		allocated[i] = mallocx(sizeof(void *) * 8, flags);
127028-		assert_ptr_not_null(allocated[i], "Unexpected mallocx failure");
127029-		if (i > 0) {
127030-			/* Emulate a circular list. */
127031-			*(void **)allocated[i] = allocated[i - 1];
127032-		}
127033-	}
127034-	*(void **)allocated[0] = allocated[n_alloc - 1];
127035-	expect_true(check_allocated_intact(allocated, n_alloc),
127036-	    "Allocated data corrupted");
127037-
127038-	for (unsigned i = 0; i < n_alloc; i++) {
127039-		free(allocated[i]);
127040-	}
127041-	/* Read-after-free */
127042-	expect_false(check_allocated_intact(allocated, n_alloc),
127043-	    "Junk-filling not detected");
127044-
127045-	test_write_after_free_pre();
127046-	for (unsigned i = 0; i < n_alloc; i++) {
127047-		allocated[i] = mallocx(sizeof(void *), flags);
127048-		assert_ptr_not_null(allocated[i], "Unexpected mallocx failure");
127049-		*(void **)allocated[i] = (void *)(uintptr_t)i;
127050-	}
127051-	/* Write-after-free */
127052-	for (unsigned i = 0; i < n_alloc; i++) {
127053-		free(allocated[i]);
127054-		*(void **)allocated[i] = NULL;
127055-	}
127056-	test_write_after_free_post();
127057-}
127058-TEST_END
127059-
127060-int
127061-main(void) {
127062-	return test(
127063-	    test_read_after_free,
127064-	    test_write_after_free,
127065-	    test_use_after_free_integration);
127066-}
127067diff --git a/jemalloc/test/unit/witness.c b/jemalloc/test/unit/witness.c
127068deleted file mode 100644
127069index 5a6c448..0000000
127070--- a/jemalloc/test/unit/witness.c
127071+++ /dev/null
127072@@ -1,280 +0,0 @@
127073-#include "test/jemalloc_test.h"
127074-
127075-static witness_lock_error_t *witness_lock_error_orig;
127076-static witness_owner_error_t *witness_owner_error_orig;
127077-static witness_not_owner_error_t *witness_not_owner_error_orig;
127078-static witness_depth_error_t *witness_depth_error_orig;
127079-
127080-static bool saw_lock_error;
127081-static bool saw_owner_error;
127082-static bool saw_not_owner_error;
127083-static bool saw_depth_error;
127084-
127085-static void
127086-witness_lock_error_intercept(const witness_list_t *witnesses,
127087-    const witness_t *witness) {
127088-	saw_lock_error = true;
127089-}
127090-
127091-static void
127092-witness_owner_error_intercept(const witness_t *witness) {
127093-	saw_owner_error = true;
127094-}
127095-
127096-static void
127097-witness_not_owner_error_intercept(const witness_t *witness) {
127098-	saw_not_owner_error = true;
127099-}
127100-
127101-static void
127102-witness_depth_error_intercept(const witness_list_t *witnesses,
127103-    witness_rank_t rank_inclusive, unsigned depth) {
127104-	saw_depth_error = true;
127105-}
127106-
127107-static int
127108-witness_comp(const witness_t *a, void *oa, const witness_t *b, void *ob) {
127109-	expect_u_eq(a->rank, b->rank, "Witnesses should have equal rank");
127110-
127111-	assert(oa == (void *)a);
127112-	assert(ob == (void *)b);
127113-
127114-	return strcmp(a->name, b->name);
127115-}
127116-
127117-static int
127118-witness_comp_reverse(const witness_t *a, void *oa, const witness_t *b,
127119-    void *ob) {
127120-	expect_u_eq(a->rank, b->rank, "Witnesses should have equal rank");
127121-
127122-	assert(oa == (void *)a);
127123-	assert(ob == (void *)b);
127124-
127125-	return -strcmp(a->name, b->name);
127126-}
127127-
127128-TEST_BEGIN(test_witness) {
127129-	witness_t a, b;
127130-	witness_tsdn_t witness_tsdn = { WITNESS_TSD_INITIALIZER };
127131-
127132-	test_skip_if(!config_debug);
127133-
127134-	witness_assert_lockless(&witness_tsdn);
127135-	witness_assert_depth(&witness_tsdn, 0);
127136-	witness_assert_depth_to_rank(&witness_tsdn, (witness_rank_t)1U, 0);
127137-
127138-	witness_init(&a, "a", 1, NULL, NULL);
127139-	witness_assert_not_owner(&witness_tsdn, &a);
127140-	witness_lock(&witness_tsdn, &a);
127141-	witness_assert_owner(&witness_tsdn, &a);
127142-	witness_assert_depth(&witness_tsdn, 1);
127143-	witness_assert_depth_to_rank(&witness_tsdn, (witness_rank_t)1U, 1);
127144-	witness_assert_depth_to_rank(&witness_tsdn, (witness_rank_t)2U, 0);
127145-
127146-	witness_init(&b, "b", 2, NULL, NULL);
127147-	witness_assert_not_owner(&witness_tsdn, &b);
127148-	witness_lock(&witness_tsdn, &b);
127149-	witness_assert_owner(&witness_tsdn, &b);
127150-	witness_assert_depth(&witness_tsdn, 2);
127151-	witness_assert_depth_to_rank(&witness_tsdn, (witness_rank_t)1U, 2);
127152-	witness_assert_depth_to_rank(&witness_tsdn, (witness_rank_t)2U, 1);
127153-	witness_assert_depth_to_rank(&witness_tsdn, (witness_rank_t)3U, 0);
127154-
127155-	witness_unlock(&witness_tsdn, &a);
127156-	witness_assert_depth(&witness_tsdn, 1);
127157-	witness_assert_depth_to_rank(&witness_tsdn, (witness_rank_t)1U, 1);
127158-	witness_assert_depth_to_rank(&witness_tsdn, (witness_rank_t)2U, 1);
127159-	witness_assert_depth_to_rank(&witness_tsdn, (witness_rank_t)3U, 0);
127160-	witness_unlock(&witness_tsdn, &b);
127161-
127162-	witness_assert_lockless(&witness_tsdn);
127163-	witness_assert_depth(&witness_tsdn, 0);
127164-	witness_assert_depth_to_rank(&witness_tsdn, (witness_rank_t)1U, 0);
127165-}
127166-TEST_END
127167-
127168-TEST_BEGIN(test_witness_comp) {
127169-	witness_t a, b, c, d;
127170-	witness_tsdn_t witness_tsdn = { WITNESS_TSD_INITIALIZER };
127171-
127172-	test_skip_if(!config_debug);
127173-
127174-	witness_assert_lockless(&witness_tsdn);
127175-
127176-	witness_init(&a, "a", 1, witness_comp, &a);
127177-	witness_assert_not_owner(&witness_tsdn, &a);
127178-	witness_lock(&witness_tsdn, &a);
127179-	witness_assert_owner(&witness_tsdn, &a);
127180-	witness_assert_depth(&witness_tsdn, 1);
127181-
127182-	witness_init(&b, "b", 1, witness_comp, &b);
127183-	witness_assert_not_owner(&witness_tsdn, &b);
127184-	witness_lock(&witness_tsdn, &b);
127185-	witness_assert_owner(&witness_tsdn, &b);
127186-	witness_assert_depth(&witness_tsdn, 2);
127187-	witness_unlock(&witness_tsdn, &b);
127188-	witness_assert_depth(&witness_tsdn, 1);
127189-
127190-	witness_lock_error_orig = witness_lock_error;
127191-	witness_lock_error = witness_lock_error_intercept;
127192-	saw_lock_error = false;
127193-
127194-	witness_init(&c, "c", 1, witness_comp_reverse, &c);
127195-	witness_assert_not_owner(&witness_tsdn, &c);
127196-	expect_false(saw_lock_error, "Unexpected witness lock error");
127197-	witness_lock(&witness_tsdn, &c);
127198-	expect_true(saw_lock_error, "Expected witness lock error");
127199-	witness_unlock(&witness_tsdn, &c);
127200-	witness_assert_depth(&witness_tsdn, 1);
127201-
127202-	saw_lock_error = false;
127203-
127204-	witness_init(&d, "d", 1, NULL, NULL);
127205-	witness_assert_not_owner(&witness_tsdn, &d);
127206-	expect_false(saw_lock_error, "Unexpected witness lock error");
127207-	witness_lock(&witness_tsdn, &d);
127208-	expect_true(saw_lock_error, "Expected witness lock error");
127209-	witness_unlock(&witness_tsdn, &d);
127210-	witness_assert_depth(&witness_tsdn, 1);
127211-
127212-	witness_unlock(&witness_tsdn, &a);
127213-
127214-	witness_assert_lockless(&witness_tsdn);
127215-
127216-	witness_lock_error = witness_lock_error_orig;
127217-}
127218-TEST_END
127219-
127220-TEST_BEGIN(test_witness_reversal) {
127221-	witness_t a, b;
127222-	witness_tsdn_t witness_tsdn = { WITNESS_TSD_INITIALIZER };
127223-
127224-	test_skip_if(!config_debug);
127225-
127226-	witness_lock_error_orig = witness_lock_error;
127227-	witness_lock_error = witness_lock_error_intercept;
127228-	saw_lock_error = false;
127229-
127230-	witness_assert_lockless(&witness_tsdn);
127231-
127232-	witness_init(&a, "a", 1, NULL, NULL);
127233-	witness_init(&b, "b", 2, NULL, NULL);
127234-
127235-	witness_lock(&witness_tsdn, &b);
127236-	witness_assert_depth(&witness_tsdn, 1);
127237-	expect_false(saw_lock_error, "Unexpected witness lock error");
127238-	witness_lock(&witness_tsdn, &a);
127239-	expect_true(saw_lock_error, "Expected witness lock error");
127240-
127241-	witness_unlock(&witness_tsdn, &a);
127242-	witness_assert_depth(&witness_tsdn, 1);
127243-	witness_unlock(&witness_tsdn, &b);
127244-
127245-	witness_assert_lockless(&witness_tsdn);
127246-
127247-	witness_lock_error = witness_lock_error_orig;
127248-}
127249-TEST_END
127250-
127251-TEST_BEGIN(test_witness_recursive) {
127252-	witness_t a;
127253-	witness_tsdn_t witness_tsdn = { WITNESS_TSD_INITIALIZER };
127254-
127255-	test_skip_if(!config_debug);
127256-
127257-	witness_not_owner_error_orig = witness_not_owner_error;
127258-	witness_not_owner_error = witness_not_owner_error_intercept;
127259-	saw_not_owner_error = false;
127260-
127261-	witness_lock_error_orig = witness_lock_error;
127262-	witness_lock_error = witness_lock_error_intercept;
127263-	saw_lock_error = false;
127264-
127265-	witness_assert_lockless(&witness_tsdn);
127266-
127267-	witness_init(&a, "a", 1, NULL, NULL);
127268-
127269-	witness_lock(&witness_tsdn, &a);
127270-	expect_false(saw_lock_error, "Unexpected witness lock error");
127271-	expect_false(saw_not_owner_error, "Unexpected witness not owner error");
127272-	witness_lock(&witness_tsdn, &a);
127273-	expect_true(saw_lock_error, "Expected witness lock error");
127274-	expect_true(saw_not_owner_error, "Expected witness not owner error");
127275-
127276-	witness_unlock(&witness_tsdn, &a);
127277-
127278-	witness_assert_lockless(&witness_tsdn);
127279-
127280-	witness_owner_error = witness_owner_error_orig;
127281-	witness_lock_error = witness_lock_error_orig;
127282-
127283-}
127284-TEST_END
127285-
127286-TEST_BEGIN(test_witness_unlock_not_owned) {
127287-	witness_t a;
127288-	witness_tsdn_t witness_tsdn = { WITNESS_TSD_INITIALIZER };
127289-
127290-	test_skip_if(!config_debug);
127291-
127292-	witness_owner_error_orig = witness_owner_error;
127293-	witness_owner_error = witness_owner_error_intercept;
127294-	saw_owner_error = false;
127295-
127296-	witness_assert_lockless(&witness_tsdn);
127297-
127298-	witness_init(&a, "a", 1, NULL, NULL);
127299-
127300-	expect_false(saw_owner_error, "Unexpected owner error");
127301-	witness_unlock(&witness_tsdn, &a);
127302-	expect_true(saw_owner_error, "Expected owner error");
127303-
127304-	witness_assert_lockless(&witness_tsdn);
127305-
127306-	witness_owner_error = witness_owner_error_orig;
127307-}
127308-TEST_END
127309-
127310-TEST_BEGIN(test_witness_depth) {
127311-	witness_t a;
127312-	witness_tsdn_t witness_tsdn = { WITNESS_TSD_INITIALIZER };
127313-
127314-	test_skip_if(!config_debug);
127315-
127316-	witness_depth_error_orig = witness_depth_error;
127317-	witness_depth_error = witness_depth_error_intercept;
127318-	saw_depth_error = false;
127319-
127320-	witness_assert_lockless(&witness_tsdn);
127321-	witness_assert_depth(&witness_tsdn, 0);
127322-
127323-	witness_init(&a, "a", 1, NULL, NULL);
127324-
127325-	expect_false(saw_depth_error, "Unexpected depth error");
127326-	witness_assert_lockless(&witness_tsdn);
127327-	witness_assert_depth(&witness_tsdn, 0);
127328-
127329-	witness_lock(&witness_tsdn, &a);
127330-	witness_assert_lockless(&witness_tsdn);
127331-	witness_assert_depth(&witness_tsdn, 0);
127332-	expect_true(saw_depth_error, "Expected depth error");
127333-
127334-	witness_unlock(&witness_tsdn, &a);
127335-
127336-	witness_assert_lockless(&witness_tsdn);
127337-	witness_assert_depth(&witness_tsdn, 0);
127338-
127339-	witness_depth_error = witness_depth_error_orig;
127340-}
127341-TEST_END
127342-
127343-int
127344-main(void) {
127345-	return test(
127346-	    test_witness,
127347-	    test_witness_comp,
127348-	    test_witness_reversal,
127349-	    test_witness_recursive,
127350-	    test_witness_unlock_not_owned,
127351-	    test_witness_depth);
127352-}
127353diff --git a/jemalloc/test/unit/zero.c b/jemalloc/test/unit/zero.c
127354deleted file mode 100644
127355index d3e81f1..0000000
127356--- a/jemalloc/test/unit/zero.c
127357+++ /dev/null
127358@@ -1,59 +0,0 @@
127359-#include "test/jemalloc_test.h"
127360-
127361-static void
127362-test_zero(size_t sz_min, size_t sz_max) {
127363-	uint8_t *s;
127364-	size_t sz_prev, sz, i;
127365-#define MAGIC	((uint8_t)0x61)
127366-
127367-	sz_prev = 0;
127368-	s = (uint8_t *)mallocx(sz_min, 0);
127369-	expect_ptr_not_null((void *)s, "Unexpected mallocx() failure");
127370-
127371-	for (sz = sallocx(s, 0); sz <= sz_max;
127372-	    sz_prev = sz, sz = sallocx(s, 0)) {
127373-		if (sz_prev > 0) {
127374-			expect_u_eq(s[0], MAGIC,
127375-			    "Previously allocated byte %zu/%zu is corrupted",
127376-			    ZU(0), sz_prev);
127377-			expect_u_eq(s[sz_prev-1], MAGIC,
127378-			    "Previously allocated byte %zu/%zu is corrupted",
127379-			    sz_prev-1, sz_prev);
127380-		}
127381-
127382-		for (i = sz_prev; i < sz; i++) {
127383-			expect_u_eq(s[i], 0x0,
127384-			    "Newly allocated byte %zu/%zu isn't zero-filled",
127385-			    i, sz);
127386-			s[i] = MAGIC;
127387-		}
127388-
127389-		if (xallocx(s, sz+1, 0, 0) == sz) {
127390-			s = (uint8_t *)rallocx(s, sz+1, 0);
127391-			expect_ptr_not_null((void *)s,
127392-			    "Unexpected rallocx() failure");
127393-		}
127394-	}
127395-
127396-	dallocx(s, 0);
127397-#undef MAGIC
127398-}
127399-
127400-TEST_BEGIN(test_zero_small) {
127401-	test_skip_if(!config_fill);
127402-	test_zero(1, SC_SMALL_MAXCLASS - 1);
127403-}
127404-TEST_END
127405-
127406-TEST_BEGIN(test_zero_large) {
127407-	test_skip_if(!config_fill);
127408-	test_zero(SC_SMALL_MAXCLASS + 1, 1U << (SC_LG_LARGE_MINCLASS + 1));
127409-}
127410-TEST_END
127411-
127412-int
127413-main(void) {
127414-	return test(
127415-	    test_zero_small,
127416-	    test_zero_large);
127417-}
127418diff --git a/jemalloc/test/unit/zero.sh b/jemalloc/test/unit/zero.sh
127419deleted file mode 100644
127420index b4540b2..0000000
127421--- a/jemalloc/test/unit/zero.sh
127422+++ /dev/null
127423@@ -1,5 +0,0 @@
127424-#!/bin/sh
127425-
127426-if [ "x${enable_fill}" = "x1" ] ; then
127427-  export MALLOC_CONF="abort:false,junk:false,zero:true"
127428-fi
127429diff --git a/jemalloc/test/unit/zero_realloc_abort.c b/jemalloc/test/unit/zero_realloc_abort.c
127430deleted file mode 100644
127431index a880d10..0000000
127432--- a/jemalloc/test/unit/zero_realloc_abort.c
127433+++ /dev/null
127434@@ -1,26 +0,0 @@
127435-#include "test/jemalloc_test.h"
127436-
127437-#include <signal.h>
127438-
127439-static bool abort_called = false;
127440-
127441-void set_abort_called() {
127442-	abort_called = true;
127443-};
127444-
127445-TEST_BEGIN(test_realloc_abort) {
127446-	abort_called = false;
127447-	safety_check_set_abort(&set_abort_called);
127448-	void *ptr = mallocx(42, 0);
127449-	expect_ptr_not_null(ptr, "Unexpected mallocx error");
127450-	ptr = realloc(ptr, 0);
127451-	expect_true(abort_called, "Realloc with zero size didn't abort");
127452-}
127453-TEST_END
127454-
127455-int
127456-main(void) {
127457-	return test(
127458-	    test_realloc_abort);
127459-}
127460-
127461diff --git a/jemalloc/test/unit/zero_realloc_abort.sh b/jemalloc/test/unit/zero_realloc_abort.sh
127462deleted file mode 100644
127463index 37daeea..0000000
127464--- a/jemalloc/test/unit/zero_realloc_abort.sh
127465+++ /dev/null
127466@@ -1,3 +0,0 @@
127467-#!/bin/sh
127468-
127469-export MALLOC_CONF="zero_realloc:abort"
127470diff --git a/jemalloc/test/unit/zero_realloc_alloc.c b/jemalloc/test/unit/zero_realloc_alloc.c
127471deleted file mode 100644
127472index 65e07bd..0000000
127473--- a/jemalloc/test/unit/zero_realloc_alloc.c
127474+++ /dev/null
127475@@ -1,48 +0,0 @@
127476-#include "test/jemalloc_test.h"
127477-
127478-static uint64_t
127479-allocated() {
127480-	if (!config_stats) {
127481-		return 0;
127482-	}
127483-	uint64_t allocated;
127484-	size_t sz = sizeof(allocated);
127485-	expect_d_eq(mallctl("thread.allocated", (void *)&allocated, &sz, NULL,
127486-	    0), 0, "Unexpected mallctl failure");
127487-	return allocated;
127488-}
127489-
127490-static uint64_t
127491-deallocated() {
127492-	if (!config_stats) {
127493-		return 0;
127494-	}
127495-	uint64_t deallocated;
127496-	size_t sz = sizeof(deallocated);
127497-	expect_d_eq(mallctl("thread.deallocated", (void *)&deallocated, &sz,
127498-	    NULL, 0), 0, "Unexpected mallctl failure");
127499-	return deallocated;
127500-}
127501-
127502-TEST_BEGIN(test_realloc_alloc) {
127503-	void *ptr = mallocx(1, 0);
127504-	expect_ptr_not_null(ptr, "Unexpected mallocx error");
127505-	uint64_t allocated_before = allocated();
127506-	uint64_t deallocated_before = deallocated();
127507-	ptr = realloc(ptr, 0);
127508-	uint64_t allocated_after = allocated();
127509-	uint64_t deallocated_after = deallocated();
127510-	if (config_stats) {
127511-		expect_u64_lt(allocated_before, allocated_after,
127512-		    "Unexpected stats change");
127513-		expect_u64_lt(deallocated_before, deallocated_after,
127514-		    "Unexpected stats change");
127515-	}
127516-	dallocx(ptr, 0);
127517-}
127518-TEST_END
127519-int
127520-main(void) {
127521-	return test(
127522-	    test_realloc_alloc);
127523-}
127524diff --git a/jemalloc/test/unit/zero_realloc_alloc.sh b/jemalloc/test/unit/zero_realloc_alloc.sh
127525deleted file mode 100644
127526index 802687c..0000000
127527--- a/jemalloc/test/unit/zero_realloc_alloc.sh
127528+++ /dev/null
127529@@ -1,3 +0,0 @@
127530-#!/bin/sh
127531-
127532-export MALLOC_CONF="zero_realloc:alloc"
127533diff --git a/jemalloc/test/unit/zero_realloc_free.c b/jemalloc/test/unit/zero_realloc_free.c
127534deleted file mode 100644
127535index baed86c..0000000
127536--- a/jemalloc/test/unit/zero_realloc_free.c
127537+++ /dev/null
127538@@ -1,33 +0,0 @@
127539-#include "test/jemalloc_test.h"
127540-
127541-static uint64_t
127542-deallocated() {
127543-	if (!config_stats) {
127544-		return 0;
127545-	}
127546-	uint64_t deallocated;
127547-	size_t sz = sizeof(deallocated);
127548-	expect_d_eq(mallctl("thread.deallocated", (void *)&deallocated, &sz,
127549-	    NULL, 0), 0, "Unexpected mallctl failure");
127550-	return deallocated;
127551-}
127552-
127553-TEST_BEGIN(test_realloc_free) {
127554-	void *ptr = mallocx(42, 0);
127555-	expect_ptr_not_null(ptr, "Unexpected mallocx error");
127556-	uint64_t deallocated_before = deallocated();
127557-	ptr = realloc(ptr, 0);
127558-	uint64_t deallocated_after = deallocated();
127559-	expect_ptr_null(ptr, "Realloc didn't free");
127560-	if (config_stats) {
127561-		expect_u64_gt(deallocated_after, deallocated_before,
127562-		    "Realloc didn't free");
127563-	}
127564-}
127565-TEST_END
127566-
127567-int
127568-main(void) {
127569-	return test(
127570-	    test_realloc_free);
127571-}
127572diff --git a/jemalloc/test/unit/zero_realloc_free.sh b/jemalloc/test/unit/zero_realloc_free.sh
127573deleted file mode 100644
127574index 51b01c9..0000000
127575--- a/jemalloc/test/unit/zero_realloc_free.sh
127576+++ /dev/null
127577@@ -1,3 +0,0 @@
127578-#!/bin/sh
127579-
127580-export MALLOC_CONF="zero_realloc:free"
127581diff --git a/jemalloc/test/unit/zero_reallocs.c b/jemalloc/test/unit/zero_reallocs.c
127582deleted file mode 100644
127583index 66c7a40..0000000
127584--- a/jemalloc/test/unit/zero_reallocs.c
127585+++ /dev/null
127586@@ -1,40 +0,0 @@
127587-#include "test/jemalloc_test.h"
127588-
127589-static size_t
127590-zero_reallocs() {
127591-	if (!config_stats) {
127592-		return 0;
127593-	}
127594-	size_t count = 12345;
127595-	size_t sz = sizeof(count);
127596-
127597-	expect_d_eq(mallctl("stats.zero_reallocs", (void *)&count, &sz,
127598-	    NULL, 0), 0, "Unexpected mallctl failure");
127599-	return count;
127600-}
127601-
127602-TEST_BEGIN(test_zero_reallocs) {
127603-	test_skip_if(!config_stats);
127604-
127605-	for (size_t i = 0; i < 100; ++i) {
127606-		void *ptr = mallocx(i * i + 1, 0);
127607-		expect_ptr_not_null(ptr, "Unexpected mallocx error");
127608-		size_t count = zero_reallocs();
127609-		expect_zu_eq(i, count, "Incorrect zero realloc count");
127610-		ptr = realloc(ptr, 0);
127611-		expect_ptr_null(ptr, "Realloc didn't free");
127612-		count = zero_reallocs();
127613-		expect_zu_eq(i + 1, count, "Realloc didn't adjust count");
127614-	}
127615-}
127616-TEST_END
127617-
127618-int
127619-main(void) {
127620-	/*
127621-	 * We expect explicit counts; reentrant tests run multiple times, so
127622-	 * counts leak across runs.
127623-	 */
127624-	return test_no_reentrancy(
127625-	    test_zero_reallocs);
127626-}
127627diff --git a/jemalloc/test/unit/zero_reallocs.sh b/jemalloc/test/unit/zero_reallocs.sh
127628deleted file mode 100644
127629index 51b01c9..0000000
127630--- a/jemalloc/test/unit/zero_reallocs.sh
127631+++ /dev/null
127632@@ -1,3 +0,0 @@
127633-#!/bin/sh
127634-
127635-export MALLOC_CONF="zero_realloc:free"
127636