diff --git a/.gitattributes b/.gitattributes
index acdbdbf..0332e03 100644
--- a/.gitattributes
+++ b/.gitattributes
@@ -2,9 +2,11 @@
* text eol=lf
*.png binary
*.pdn binary
+*.jpg binary
*.sln binary
*.suo binary
*.vcproj binary
*.patch binary
*.dll binary
*.lib binary
+*.exe binary
diff --git a/.gitignore b/.gitignore
index 3639d32..f8b7f5e 100644
--- a/.gitignore
+++ b/.gitignore
@@ -3,6 +3,7 @@ ide/vs20??/*.opendb
ide/vs20??/*.user
ide/vs20??/*.vcxproj.filters
ide/vs20??/.vs
+ide/vs20??/VTune*
out/
docs/
*.zip
diff --git a/Aurora.json b/Aurora.json
new file mode 100644
index 0000000..6c0eeb2
--- /dev/null
+++ b/Aurora.json
@@ -0,0 +1,15 @@
+{
+ "name": "mimalloc",
+ "type": "generic",
+ "sourcePaths": "src",
+ "include": "include",
+ "excludes": [
+ "src/page-queue.c",
+ "src/alloc-override.c",
+ "src/prim/*/*.*",
+ "src/static.c"
+ ],
+ "sources": [
+ "src/prim/prim.c"
+ ]
+}
\ No newline at end of file
diff --git a/CMakeLists.txt b/CMakeLists.txt
deleted file mode 100644
index 35460e8..0000000
--- a/CMakeLists.txt
+++ /dev/null
@@ -1,367 +0,0 @@
-cmake_minimum_required(VERSION 3.0)
-project(libmimalloc C CXX)
-
-set(CMAKE_C_STANDARD 11)
-set(CMAKE_CXX_STANDARD 17)
-
-option(MI_SECURE "Use full security mitigations (like guard pages, allocation randomization, double-free mitigation, and free-list corruption detection)" OFF)
-option(MI_DEBUG_FULL "Use full internal heap invariant checking in DEBUG mode (expensive)" OFF)
-option(MI_PADDING "Enable padding to detect heap block overflow (used only in DEBUG mode)" ON)
-option(MI_OVERRIDE "Override the standard malloc interface (e.g. define entry points for malloc() etc)" ON)
-option(MI_XMALLOC "Enable abort() call on memory allocation failure by default" OFF)
-option(MI_SHOW_ERRORS "Show error and warning messages by default (only enabled by default in DEBUG mode)" OFF)
-option(MI_USE_CXX "Use the C++ compiler to compile the library (instead of the C compiler)" OFF)
-option(MI_SEE_ASM "Generate assembly files" OFF)
-option(MI_INTERPOSE "Use interpose to override standard malloc on macOS" ON)
-option(MI_OSX_ZONE "Use malloc zone to override standard malloc on macOS" OFF) # enables interpose as well
-option(MI_LOCAL_DYNAMIC_TLS "Use slightly slower, dlopen-compatible TLS mechanism (Unix)" OFF)
-option(MI_BUILD_SHARED "Build shared library" ON)
-option(MI_BUILD_STATIC "Build static library" ON)
-option(MI_BUILD_OBJECT "Build object library" ON)
-option(MI_BUILD_TESTS "Build test executables" ON)
-option(MI_DEBUG_TSAN "Build with thread sanitizer (needs clang)" OFF)
-option(MI_DEBUG_UBSAN "Build with undefined-behavior sanitizer (needs clang++)" OFF)
-option(MI_CHECK_FULL "Use full internal invariant checking in DEBUG mode (deprecated, use MI_DEBUG_FULL instead)" OFF)
-
-include("cmake/mimalloc-config-version.cmake")
-
-set(mi_sources
- src/stats.c
- src/random.c
- src/os.c
- src/arena.c
- src/region.c
- src/segment.c
- src/page.c
- src/alloc.c
- src/alloc-aligned.c
- src/alloc-posix.c
- src/heap.c
- src/options.c
- src/init.c)
-
-# -----------------------------------------------------------------------------
-# Converience: set default build type depending on the build directory
-# -----------------------------------------------------------------------------
-
-if (NOT CMAKE_BUILD_TYPE)
- if ("${CMAKE_BINARY_DIR}" MATCHES ".*(D|d)ebug$" OR MI_DEBUG_FULL MATCHES "ON")
- message(STATUS "No build type selected, default to: Debug")
- set(CMAKE_BUILD_TYPE "Debug")
- else()
- message(STATUS "No build type selected, default to: Release")
- set(CMAKE_BUILD_TYPE "Release")
- endif()
-endif()
-
-if("${CMAKE_BINARY_DIR}" MATCHES ".*(S|s)ecure$")
- message(STATUS "Default to secure build")
- set(MI_SECURE "ON")
-endif()
-
-# -----------------------------------------------------------------------------
-# Process options
-# -----------------------------------------------------------------------------
-
-if(CMAKE_C_COMPILER_ID MATCHES "MSVC|Intel")
- set(MI_USE_CXX "ON")
-endif()
-
-if(MI_OVERRIDE MATCHES "ON")
- message(STATUS "Override standard malloc (MI_OVERRIDE=ON)")
- if(APPLE)
- if(MI_OSX_ZONE MATCHES "ON")
- # use zone's on macOS
- message(STATUS " Use malloc zone to override malloc (MI_OSX_ZONE=ON)")
- list(APPEND mi_sources src/alloc-override-osx.c)
- list(APPEND mi_defines MI_OSX_ZONE=1)
- if(NOT MI_INTERPOSE MATCHES "ON")
- message(STATUS " (enabling INTERPOSE as well since zone's require this)")
- set(MI_INTERPOSE "ON")
- endif()
- endif()
- if(MI_INTERPOSE MATCHES "ON")
- # use interpose on macOS
- message(STATUS " Use interpose to override malloc (MI_INTERPOSE=ON)")
- list(APPEND mi_defines MI_INTERPOSE)
- endif()
- endif()
-endif()
-
-if(MI_SECURE MATCHES "ON")
- message(STATUS "Set full secure build (MI_SECURE=ON)")
- list(APPEND mi_defines MI_SECURE=4)
-endif()
-
-if(MI_SEE_ASM MATCHES "ON")
- message(STATUS "Generate assembly listings (MI_SEE_ASM=ON)")
- list(APPEND mi_cflags -save-temps)
-endif()
-
-if(MI_CHECK_FULL MATCHES "ON")
- message(STATUS "The MI_CHECK_FULL option is deprecated, use MI_DEBUG_FULL instead")
- set(MI_DEBUG_FULL "ON")
-endif()
-
-if(MI_DEBUG_FULL MATCHES "ON")
- message(STATUS "Set debug level to full internal invariant checking (MI_DEBUG_FULL=ON)")
- list(APPEND mi_defines MI_DEBUG=3) # full invariant checking
-endif()
-
-if(MI_PADDING MATCHES "OFF")
- message(STATUS "Disable padding of heap blocks in debug mode (MI_PADDING=OFF)")
- list(APPEND mi_defines MI_PADDING=0)
-endif()
-
-if(MI_XMALLOC MATCHES "ON")
- message(STATUS "Enable abort() calls on memory allocation failure (MI_XMALLOC=ON)")
- list(APPEND mi_defines MI_XMALLOC=1)
-endif()
-
-if(MI_SHOW_ERRORS MATCHES "ON")
- message(STATUS "Enable printing of error and warning messages by default (MI_SHOW_ERRORS=ON)")
- list(APPEND mi_defines MI_SHOW_ERRORS=1)
-endif()
-
-if(MI_DEBUG_TSAN MATCHES "ON")
- if(CMAKE_C_COMPILER_ID MATCHES "Clang")
- message(STATUS "Build with thread sanitizer (MI_DEBUG_TSAN=ON)")
- list(APPEND mi_defines MI_TSAN=1)
- list(APPEND mi_cflags -fsanitize=thread -g -O1)
- list(APPEND CMAKE_EXE_LINKER_FLAGS -fsanitize=thread)
- else()
- message(WARNING "Can only use thread sanitizer with clang (MI_DEBUG_TSAN=ON but ignored)")
- endif()
-endif()
-
-if(MI_DEBUG_UBSAN MATCHES "ON")
- if(CMAKE_BUILD_TYPE MATCHES "Debug")
- if(CMAKE_CXX_COMPILER_ID MATCHES "Clang")
- message(STATUS "Build with undefined-behavior sanitizer (MI_DEBUG_UBSAN=ON)")
- list(APPEND mi_cflags -fsanitize=undefined -g)
- list(APPEND CMAKE_EXE_LINKER_FLAGS -fsanitize=undefined)
- if (MI_USE_CXX MATCHES "OFF")
- message(STATUS "(switch to use C++ due to MI_DEBUG_UBSAN)")
- set(MI_USE_CXX "ON")
- endif()
- else()
- message(WARNING "Can only use undefined-behavior sanitizer with clang++ (MI_DEBUG_UBSAN=ON but ignored)")
- endif()
- else()
- message(WARNING "Can only use thread sanitizer with a debug build (CMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE})")
- endif()
-endif()
-
-if(MI_USE_CXX MATCHES "ON")
- message(STATUS "Use the C++ compiler to compile (MI_USE_CXX=ON)")
- set_source_files_properties(${mi_sources} PROPERTIES LANGUAGE CXX )
- set_source_files_properties(src/static.c test/test-api.c test/test-stress PROPERTIES LANGUAGE CXX )
- if(CMAKE_CXX_COMPILER_ID MATCHES "AppleClang|Clang")
- list(APPEND mi_cflags -Wno-deprecated)
- endif()
- if(CMAKE_CXX_COMPILER_ID MATCHES "Intel")
- list(APPEND mi_cflags -Kc++)
- endif()
-endif()
-
-# Compiler flags
-if(CMAKE_C_COMPILER_ID MATCHES "AppleClang|Clang|GNU")
- list(APPEND mi_cflags -Wall -Wextra -Wno-unknown-pragmas -fvisibility=hidden)
- if(CMAKE_C_COMPILER_ID MATCHES "GNU")
- list(APPEND mi_cflags -Wno-invalid-memory-model)
- endif()
-endif()
-
-if(CMAKE_C_COMPILER_ID MATCHES "Intel")
- list(APPEND mi_cflags -Wall -fvisibility=hidden)
-endif()
-
-if(CMAKE_C_COMPILER_ID MATCHES "AppleClang|Clang|GNU|Intel" AND NOT CMAKE_SYSTEM_NAME MATCHES "Haiku")
- if(MI_LOCAL_DYNAMIC_TLS MATCHES "ON")
- list(APPEND mi_cflags -ftls-model=local-dynamic)
- else()
- list(APPEND mi_cflags -ftls-model=initial-exec)
- endif()
-endif()
-
-# Architecture flags
-if(${CMAKE_HOST_SYSTEM_PROCESSOR} MATCHES "arm")
- list(APPEND mi_cflags -march=native)
-endif()
-
-# extra needed libraries
-if(WIN32)
- list(APPEND mi_libraries psapi shell32 user32 advapi32 bcrypt)
-else()
- if(NOT ${CMAKE_C_COMPILER} MATCHES "android")
- list(APPEND mi_libraries pthread)
- find_library(LIBRT rt)
- if(LIBRT)
- list(APPEND mi_libraries ${LIBRT})
- endif()
- endif()
-endif()
-
-# -----------------------------------------------------------------------------
-# Install and output names
-# -----------------------------------------------------------------------------
-
-set(mi_install_dir "${CMAKE_INSTALL_PREFIX}/lib/mimalloc-${mi_version}")
-if(MI_SECURE MATCHES "ON")
- set(mi_basename "mimalloc-secure")
-else()
- set(mi_basename "mimalloc")
-endif()
-string(TOLOWER "${CMAKE_BUILD_TYPE}" CMAKE_BUILD_TYPE_LC)
-if(NOT(CMAKE_BUILD_TYPE_LC MATCHES "^(release|relwithdebinfo|minsizerel)$"))
- set(mi_basename "${mi_basename}-${CMAKE_BUILD_TYPE_LC}") #append build type (e.g. -debug) if not a release version
-endif()
-if(MI_BUILD_SHARED)
- list(APPEND mi_build_targets "shared")
-endif()
-if(MI_BUILD_STATIC)
- list(APPEND mi_build_targets "static")
-endif()
-if(MI_BUILD_OBJECT)
- list(APPEND mi_build_targets "object")
-endif()
-if(MI_BUILD_TESTS)
- list(APPEND mi_build_targets "tests")
-endif()
-message(STATUS "")
-message(STATUS "Library base name: ${mi_basename}")
-message(STATUS "Build type : ${CMAKE_BUILD_TYPE_LC}")
-if(MI_USE_CXX MATCHES "ON")
- message(STATUS "Compiler : ${CMAKE_CXX_COMPILER}")
-else()
- message(STATUS "Compiler : ${CMAKE_C_COMPILER}")
-endif()
-message(STATUS "Install directory: ${mi_install_dir}")
-message(STATUS "Build targets : ${mi_build_targets}")
-message(STATUS "")
-
-# -----------------------------------------------------------------------------
-# Main targets
-# -----------------------------------------------------------------------------
-
-# shared library
-if(MI_BUILD_SHARED)
- add_library(mimalloc SHARED ${mi_sources})
- set_target_properties(mimalloc PROPERTIES VERSION ${mi_version} OUTPUT_NAME ${mi_basename} )
- target_compile_definitions(mimalloc PRIVATE ${mi_defines} MI_SHARED_LIB MI_SHARED_LIB_EXPORT)
- target_compile_options(mimalloc PRIVATE ${mi_cflags})
- target_link_libraries(mimalloc PUBLIC ${mi_libraries})
- target_include_directories(mimalloc PUBLIC
- $
- $
- )
- if(WIN32)
- # On windows copy the mimalloc redirection dll too.
- target_link_libraries(mimalloc PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/bin/mimalloc-redirect.lib)
- add_custom_command(TARGET mimalloc POST_BUILD
- COMMAND "${CMAKE_COMMAND}" -E copy "${CMAKE_CURRENT_SOURCE_DIR}/bin/mimalloc-redirect.dll" $
- COMMENT "Copy mimalloc-redirect.dll to output directory")
- endif()
-
- install(TARGETS mimalloc EXPORT mimalloc DESTINATION ${mi_install_dir} LIBRARY)
- install(EXPORT mimalloc DESTINATION ${mi_install_dir}/cmake)
-endif()
-
-# static library
-if (MI_BUILD_STATIC)
- add_library(mimalloc-static STATIC ${mi_sources})
- set_property(TARGET mimalloc-static PROPERTY POSITION_INDEPENDENT_CODE ON)
- target_compile_definitions(mimalloc-static PRIVATE ${mi_defines} MI_STATIC_LIB)
- target_compile_options(mimalloc-static PRIVATE ${mi_cflags})
- target_link_libraries(mimalloc-static PUBLIC ${mi_libraries})
- target_include_directories(mimalloc-static PUBLIC
- $
- $
- )
- if(WIN32)
- # When building both static and shared libraries on Windows, a static library should use a
- # different output name to avoid the conflict with the import library of a shared one.
- string(REPLACE "mimalloc" "mimalloc-static" mi_output_name ${mi_basename})
- set_target_properties(mimalloc-static PROPERTIES OUTPUT_NAME ${mi_output_name})
- else()
- set_target_properties(mimalloc-static PROPERTIES OUTPUT_NAME ${mi_basename})
- endif()
-
- install(TARGETS mimalloc-static EXPORT mimalloc DESTINATION ${mi_install_dir})
-endif()
-
-# install include files
-install(FILES include/mimalloc.h DESTINATION ${mi_install_dir}/include)
-install(FILES include/mimalloc-override.h DESTINATION ${mi_install_dir}/include)
-install(FILES include/mimalloc-new-delete.h DESTINATION ${mi_install_dir}/include)
-install(FILES cmake/mimalloc-config.cmake DESTINATION ${mi_install_dir}/cmake)
-install(FILES cmake/mimalloc-config-version.cmake DESTINATION ${mi_install_dir}/cmake)
-
-if(NOT WIN32 AND MI_BUILD_SHARED)
- # install a symlink in the /usr/local/lib to the versioned library
- set(mi_symlink "${CMAKE_SHARED_MODULE_PREFIX}${mi_basename}${CMAKE_SHARED_LIBRARY_SUFFIX}")
- set(mi_soname "mimalloc-${mi_version}/${mi_symlink}.${mi_version}")
- install(CODE "execute_process(COMMAND ${CMAKE_COMMAND} -E create_symlink ${mi_soname} ${mi_symlink} WORKING_DIRECTORY ${mi_install_dir}/..)")
- install(CODE "MESSAGE(\"-- Symbolic link: ${CMAKE_INSTALL_PREFIX}/lib/${mi_symlink} -> ${mi_soname}\")")
-endif()
-
-# single object file for more predictable static overriding
-if (MI_BUILD_OBJECT)
- add_library(mimalloc-obj OBJECT src/static.c)
- set_property(TARGET mimalloc-obj PROPERTY POSITION_INDEPENDENT_CODE ON)
- target_compile_definitions(mimalloc-obj PRIVATE ${mi_defines})
- target_compile_options(mimalloc-obj PRIVATE ${mi_cflags})
- target_include_directories(mimalloc-obj PUBLIC
- $
- $
- )
-
- # the following seems to lead to cmake warnings/errors on some systems, disable for now :-(
- # install(TARGETS mimalloc-obj EXPORT mimalloc DESTINATION ${mi_install_dir})
-
- # the FILES expression can also be: $
- # but that fails cmake versions less than 3.10 so we leave it as is for now
- install(FILES ${CMAKE_CURRENT_BINARY_DIR}/CMakeFiles/mimalloc-obj.dir/src/static.c${CMAKE_C_OUTPUT_EXTENSION}
- DESTINATION ${mi_install_dir}
- RENAME ${mi_basename}${CMAKE_C_OUTPUT_EXTENSION} )
-endif()
-
-# -----------------------------------------------------------------------------
-# API surface testing
-# -----------------------------------------------------------------------------
-
-if (MI_BUILD_TESTS MATCHES "ON")
- add_executable(mimalloc-test-api test/test-api.c)
- target_compile_definitions(mimalloc-test-api PRIVATE ${mi_defines})
- target_compile_options(mimalloc-test-api PRIVATE ${mi_cflags})
- target_include_directories(mimalloc-test-api PRIVATE include)
- target_link_libraries(mimalloc-test-api PRIVATE mimalloc-static ${mi_libraries})
-
- add_executable(mimalloc-test-stress test/test-stress.c)
- target_compile_definitions(mimalloc-test-stress PRIVATE ${mi_defines})
- target_compile_options(mimalloc-test-stress PRIVATE ${mi_cflags})
- target_include_directories(mimalloc-test-stress PRIVATE include)
- target_link_libraries(mimalloc-test-stress PRIVATE mimalloc ${mi_libraries})
-
- enable_testing()
- add_test(test_api, mimalloc-test-api)
- add_test(test_stress, mimalloc-test-stress)
-endif()
-
-# -----------------------------------------------------------------------------
-# Set override properties
-# -----------------------------------------------------------------------------
-if (MI_OVERRIDE MATCHES "ON")
- if (MI_BUILD_SHARED)
- target_compile_definitions(mimalloc PRIVATE MI_MALLOC_OVERRIDE)
- endif()
- if(NOT WIN32)
- # It is only possible to override malloc on Windows when building as a DLL.
- if (MI_BUILD_STATIC)
- target_compile_definitions(mimalloc-static PRIVATE MI_MALLOC_OVERRIDE)
- endif()
- if (MI_BUILD_OBJECT)
- target_compile_definitions(mimalloc-obj PRIVATE MI_MALLOC_OVERRIDE)
- endif()
- endif()
-endif()
diff --git a/LICENSE b/LICENSE
index 4151dbe..670b668 100644
--- a/LICENSE
+++ b/LICENSE
@@ -1,6 +1,6 @@
MIT License
-Copyright (c) 2019 Microsoft Corporation, Daan Leijen
+Copyright (c) 2018-2021 Microsoft Corporation, Daan Leijen
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
diff --git a/azure-pipelines.yml b/azure-pipelines.yml
deleted file mode 100644
index 85e8942..0000000
--- a/azure-pipelines.yml
+++ /dev/null
@@ -1,168 +0,0 @@
-# Starter pipeline
-# Start with a minimal pipeline that you can customize to build and deploy your code.
-# Add steps that build, run tests, deploy, and more:
-# https://aka.ms/yaml
-
-trigger:
-- master
-- dev
-
-jobs:
-- job:
- displayName: Windows
- pool:
- vmImage:
- windows-2019
- strategy:
- matrix:
- Debug:
- BuildType: debug
- cmakeExtraArgs: -DCMAKE_BUILD_TYPE=Debug -DMI_DEBUG_FULL=ON
- MSBuildConfiguration: Debug
- Release:
- BuildType: release
- cmakeExtraArgs: -DCMAKE_BUILD_TYPE=Release
- MSBuildConfiguration: Release
- Secure:
- BuildType: secure
- cmakeExtraArgs: -DCMAKE_BUILD_TYPE=Release -DMI_SECURE=ON
- MSBuildConfiguration: Release
- steps:
- - task: CMake@1
- inputs:
- workingDirectory: $(BuildType)
- cmakeArgs: .. $(cmakeExtraArgs)
- - task: MSBuild@1
- inputs:
- solution: $(BuildType)/libmimalloc.sln
- configuration: '$(MSBuildConfiguration)'
- msbuildArguments: -m
- - script: ctest --verbose --timeout 120
- workingDirectory: $(BuildType)
- displayName: CTest
- #- script: $(BuildType)\$(BuildType)\mimalloc-test-stress
- # displayName: TestStress
- #- upload: $(Build.SourcesDirectory)/$(BuildType)
- # artifact: mimalloc-windows-$(BuildType)
-
-- job:
- displayName: Linux
- pool:
- vmImage:
- ubuntu-18.04
- strategy:
- matrix:
- Debug:
- CC: gcc
- CXX: g++
- BuildType: debug
- cmakeExtraArgs: -DCMAKE_BUILD_TYPE=Debug -DMI_DEBUG_FULL=ON
- Release:
- CC: gcc
- CXX: g++
- BuildType: release
- cmakeExtraArgs: -DCMAKE_BUILD_TYPE=Release
- Secure:
- CC: gcc
- CXX: g++
- BuildType: secure
- cmakeExtraArgs: -DCMAKE_BUILD_TYPE=Release -DMI_SECURE=ON
- Debug++:
- CC: gcc
- CXX: g++
- BuildType: debug-cxx
- cmakeExtraArgs: -DCMAKE_BUILD_TYPE=Debug -DMI_DEBUG_FULL=ON -DMI_USE_CXX=ON
- Debug Clang:
- CC: clang
- CXX: clang++
- BuildType: debug-clang
- cmakeExtraArgs: -DCMAKE_BUILD_TYPE=Debug -DMI_DEBUG_FULL=ON
- Release Clang:
- CC: clang
- CXX: clang++
- BuildType: release-clang
- cmakeExtraArgs: -DCMAKE_BUILD_TYPE=Release
- Secure Clang:
- CC: clang
- CXX: clang++
- BuildType: secure-clang
- cmakeExtraArgs: -DCMAKE_BUILD_TYPE=Release -DMI_SECURE=ON
- Debug++ Clang:
- CC: clang
- CXX: clang++
- BuildType: debug-clang-cxx
- cmakeExtraArgs: -DCMAKE_BUILD_TYPE=Debug -DMI_DEBUG_FULL=ON -DMI_USE_CXX=ON
- steps:
- - task: CMake@1
- inputs:
- workingDirectory: $(BuildType)
- cmakeArgs: .. $(cmakeExtraArgs)
- - script: make -j$(nproc) -C $(BuildType)
- displayName: Make
- - script: ctest --verbose --timeout 120
- workingDirectory: $(BuildType)
- displayName: CTest
-# - upload: $(Build.SourcesDirectory)/$(BuildType)
-# artifact: mimalloc-ubuntu-$(BuildType)
-
-- job:
- displayName: macOS
- pool:
- vmImage:
- macOS-10.14
- strategy:
- matrix:
- Debug:
- BuildType: debug
- cmakeExtraArgs: -DCMAKE_BUILD_TYPE=Debug -DMI_DEBUG_FULL=ON
- Release:
- BuildType: release
- cmakeExtraArgs: -DCMAKE_BUILD_TYPE=Release
- Secure:
- BuildType: secure
- cmakeExtraArgs: -DCMAKE_BUILD_TYPE=Release -DMI_SECURE=ON
- steps:
- - task: CMake@1
- inputs:
- workingDirectory: $(BuildType)
- cmakeArgs: .. $(cmakeExtraArgs)
- - script: make -j$(sysctl -n hw.ncpu) -C $(BuildType)
- displayName: Make
- - script: ctest --verbose --timeout 120
- workingDirectory: $(BuildType)
- displayName: CTest
-# - upload: $(Build.SourcesDirectory)/$(BuildType)
-# artifact: mimalloc-macos-$(BuildType)
-
-# - job:
-# displayName: Windows-2017
-# pool:
-# vmImage:
-# vs2017-win2016
-# strategy:
-# matrix:
-# Debug:
-# BuildType: debug
-# cmakeExtraArgs: -A x64 -DCMAKE_BUILD_TYPE=Debug -DMI_DEBUG_FULL=ON
-# MSBuildConfiguration: Debug
-# Release:
-# BuildType: release
-# cmakeExtraArgs: -A x64 -DCMAKE_BUILD_TYPE=Release
-# MSBuildConfiguration: Release
-# Secure:
-# BuildType: secure
-# cmakeExtraArgs: -A x64 -DCMAKE_BUILD_TYPE=Release -DMI_SECURE=ON
-# MSBuildConfiguration: Release
-# steps:
-# - task: CMake@1
-# inputs:
-# workingDirectory: $(BuildType)
-# cmakeArgs: .. $(cmakeExtraArgs)
-# - task: MSBuild@1
-# inputs:
-# solution: $(BuildType)/libmimalloc.sln
-# configuration: '$(MSBuildConfiguration)'
-# - script: |
-# cd $(BuildType)
-# ctest --verbose --timeout 120
-# displayName: CTest
diff --git a/bin/mimalloc-redirect.dll b/bin/mimalloc-redirect.dll
deleted file mode 100644
index b7bf1d0..0000000
Binary files a/bin/mimalloc-redirect.dll and /dev/null differ
diff --git a/bin/mimalloc-redirect.lib b/bin/mimalloc-redirect.lib
deleted file mode 100644
index 550db8e..0000000
Binary files a/bin/mimalloc-redirect.lib and /dev/null differ
diff --git a/bin/mimalloc-redirect32.dll b/bin/mimalloc-redirect32.dll
deleted file mode 100644
index 7ba303a..0000000
Binary files a/bin/mimalloc-redirect32.dll and /dev/null differ
diff --git a/bin/mimalloc-redirect32.lib b/bin/mimalloc-redirect32.lib
deleted file mode 100644
index 6617306..0000000
Binary files a/bin/mimalloc-redirect32.lib and /dev/null differ
diff --git a/cmake/mimalloc-config-version.cmake b/cmake/mimalloc-config-version.cmake
deleted file mode 100644
index edffeea..0000000
--- a/cmake/mimalloc-config-version.cmake
+++ /dev/null
@@ -1,18 +0,0 @@
-set(mi_version_major 1)
-set(mi_version_minor 6)
-set(mi_version ${mi_version_major}.${mi_version_minor})
-
-set(PACKAGE_VERSION ${mi_version})
-if(PACKAGE_FIND_VERSION_MAJOR)
- if("${PACKAGE_FIND_VERSION_MAJOR}" EQUAL "${mi_version_major}")
- if ("${PACKAGE_FIND_VERSION_MINOR}" EQUAL "${mi_version_minor}")
- set(PACKAGE_VERSION_EXACT TRUE)
- elseif("${PACKAGE_FIND_VERSION_MINOR}" LESS "${mi_version_minor}")
- set(PACKAGE_VERSION_COMPATIBLE TRUE)
- else()
- set(PACKAGE_VERSION_UNSUITABLE TRUE)
- endif()
- else()
- set(PACKAGE_VERSION_UNSUITABLE TRUE)
- endif()
-endif()
diff --git a/cmake/mimalloc-config.cmake b/cmake/mimalloc-config.cmake
deleted file mode 100644
index 12da076..0000000
--- a/cmake/mimalloc-config.cmake
+++ /dev/null
@@ -1,2 +0,0 @@
-include(${CMAKE_CURRENT_LIST_DIR}/mimalloc.cmake)
-get_filename_component(MIMALLOC_TARGET_DIR "${CMAKE_CURRENT_LIST_DIR}" PATH)
diff --git a/doc/bench-c5-18xlarge-2020-01-20-a.svg b/doc/bench-2020/bench-c5-18xlarge-2020-01-20-a.svg
similarity index 99%
rename from doc/bench-c5-18xlarge-2020-01-20-a.svg
rename to doc/bench-2020/bench-c5-18xlarge-2020-01-20-a.svg
index 0e55093..9005097 100644
--- a/doc/bench-c5-18xlarge-2020-01-20-a.svg
+++ b/doc/bench-2020/bench-c5-18xlarge-2020-01-20-a.svg
@@ -1,6 +1,7 @@
+
diff --git a/doc/bench-c5-18xlarge-2020-01-20-b.svg b/doc/bench-2020/bench-c5-18xlarge-2020-01-20-b.svg
similarity index 99%
rename from doc/bench-c5-18xlarge-2020-01-20-b.svg
rename to doc/bench-2020/bench-c5-18xlarge-2020-01-20-b.svg
index 22bfa5c..2d853ed 100644
--- a/doc/bench-c5-18xlarge-2020-01-20-b.svg
+++ b/doc/bench-2020/bench-c5-18xlarge-2020-01-20-b.svg
@@ -1,6 +1,7 @@
+
diff --git a/doc/bench-c5-18xlarge-2020-01-20-rss-a.svg b/doc/bench-2020/bench-c5-18xlarge-2020-01-20-rss-a.svg
similarity index 99%
rename from doc/bench-c5-18xlarge-2020-01-20-rss-a.svg
rename to doc/bench-2020/bench-c5-18xlarge-2020-01-20-rss-a.svg
index 6b15ebe..393bfad 100644
--- a/doc/bench-c5-18xlarge-2020-01-20-rss-a.svg
+++ b/doc/bench-2020/bench-c5-18xlarge-2020-01-20-rss-a.svg
@@ -1,6 +1,7 @@
+
diff --git a/doc/bench-c5-18xlarge-2020-01-20-rss-b.svg b/doc/bench-2020/bench-c5-18xlarge-2020-01-20-rss-b.svg
similarity index 99%
rename from doc/bench-c5-18xlarge-2020-01-20-rss-b.svg
rename to doc/bench-2020/bench-c5-18xlarge-2020-01-20-rss-b.svg
index e3eb774..419dc25 100644
--- a/doc/bench-c5-18xlarge-2020-01-20-rss-b.svg
+++ b/doc/bench-2020/bench-c5-18xlarge-2020-01-20-rss-b.svg
@@ -1,6 +1,7 @@
+
diff --git a/doc/bench-r5a-1.svg b/doc/bench-2020/bench-r5a-1.svg
similarity index 99%
rename from doc/bench-r5a-1.svg
rename to doc/bench-2020/bench-r5a-1.svg
index 127d6de..c296a04 100644
--- a/doc/bench-r5a-1.svg
+++ b/doc/bench-2020/bench-r5a-1.svg
@@ -1,6 +1,7 @@
+
diff --git a/doc/bench-r5a-12xlarge-2020-01-16-a.svg b/doc/bench-2020/bench-r5a-12xlarge-2020-01-16-a.svg
similarity index 99%
rename from doc/bench-r5a-12xlarge-2020-01-16-a.svg
rename to doc/bench-2020/bench-r5a-12xlarge-2020-01-16-a.svg
index b110ff4..b8a2f20 100644
--- a/doc/bench-r5a-12xlarge-2020-01-16-a.svg
+++ b/doc/bench-2020/bench-r5a-12xlarge-2020-01-16-a.svg
@@ -1,6 +1,7 @@
+
diff --git a/doc/bench-r5a-12xlarge-2020-01-16-b.svg b/doc/bench-2020/bench-r5a-12xlarge-2020-01-16-b.svg
similarity index 99%
rename from doc/bench-r5a-12xlarge-2020-01-16-b.svg
rename to doc/bench-2020/bench-r5a-12xlarge-2020-01-16-b.svg
index f7a3287..4a7e21e 100644
--- a/doc/bench-r5a-12xlarge-2020-01-16-b.svg
+++ b/doc/bench-2020/bench-r5a-12xlarge-2020-01-16-b.svg
@@ -1,6 +1,7 @@
+
diff --git a/doc/bench-r5a-2.svg b/doc/bench-2020/bench-r5a-2.svg
similarity index 99%
rename from doc/bench-r5a-2.svg
rename to doc/bench-2020/bench-r5a-2.svg
index 8b7b2da..917ea57 100644
--- a/doc/bench-r5a-2.svg
+++ b/doc/bench-2020/bench-r5a-2.svg
@@ -1,6 +1,7 @@
+
diff --git a/doc/bench-r5a-rss-1.svg b/doc/bench-2020/bench-r5a-rss-1.svg
similarity index 99%
rename from doc/bench-r5a-rss-1.svg
rename to doc/bench-2020/bench-r5a-rss-1.svg
index 1c7f856..375ebd2 100644
--- a/doc/bench-r5a-rss-1.svg
+++ b/doc/bench-2020/bench-r5a-rss-1.svg
@@ -1,6 +1,7 @@
+
diff --git a/doc/bench-r5a-rss-2.svg b/doc/bench-2020/bench-r5a-rss-2.svg
similarity index 99%
rename from doc/bench-r5a-rss-2.svg
rename to doc/bench-2020/bench-r5a-rss-2.svg
index e819884..cb2bbc8 100644
--- a/doc/bench-r5a-rss-2.svg
+++ b/doc/bench-2020/bench-r5a-rss-2.svg
@@ -1,6 +1,7 @@
+
diff --git a/doc/bench-spec-rss.svg b/doc/bench-2020/bench-spec-rss.svg
similarity index 100%
rename from doc/bench-spec-rss.svg
rename to doc/bench-2020/bench-spec-rss.svg
diff --git a/doc/bench-spec.svg b/doc/bench-2020/bench-spec.svg
similarity index 100%
rename from doc/bench-spec.svg
rename to doc/bench-2020/bench-spec.svg
diff --git a/doc/bench-z4-1.svg b/doc/bench-2020/bench-z4-1.svg
similarity index 100%
rename from doc/bench-z4-1.svg
rename to doc/bench-2020/bench-z4-1.svg
diff --git a/doc/bench-z4-2.svg b/doc/bench-2020/bench-z4-2.svg
similarity index 100%
rename from doc/bench-z4-2.svg
rename to doc/bench-2020/bench-z4-2.svg
diff --git a/doc/bench-z4-rss-1.svg b/doc/bench-2020/bench-z4-rss-1.svg
similarity index 100%
rename from doc/bench-z4-rss-1.svg
rename to doc/bench-2020/bench-z4-rss-1.svg
diff --git a/doc/bench-z4-rss-2.svg b/doc/bench-2020/bench-z4-rss-2.svg
similarity index 100%
rename from doc/bench-z4-rss-2.svg
rename to doc/bench-2020/bench-z4-rss-2.svg
diff --git a/doc/bench-2021/bench-amd5950x-2021-01-30-a.svg b/doc/bench-2021/bench-amd5950x-2021-01-30-a.svg
new file mode 100644
index 0000000..86a97bf
--- /dev/null
+++ b/doc/bench-2021/bench-amd5950x-2021-01-30-a.svg
@@ -0,0 +1,952 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/doc/bench-2021/bench-amd5950x-2021-01-30-b.svg b/doc/bench-2021/bench-amd5950x-2021-01-30-b.svg
new file mode 100644
index 0000000..c748877
--- /dev/null
+++ b/doc/bench-2021/bench-amd5950x-2021-01-30-b.svg
@@ -0,0 +1,1255 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/doc/bench-2021/bench-c5-18xlarge-2021-01-30-a.svg b/doc/bench-2021/bench-c5-18xlarge-2021-01-30-a.svg
new file mode 100644
index 0000000..bc91c21
--- /dev/null
+++ b/doc/bench-2021/bench-c5-18xlarge-2021-01-30-a.svg
@@ -0,0 +1,955 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/doc/bench-2021/bench-c5-18xlarge-2021-01-30-b.svg b/doc/bench-2021/bench-c5-18xlarge-2021-01-30-b.svg
new file mode 100644
index 0000000..e8b04a0
--- /dev/null
+++ b/doc/bench-2021/bench-c5-18xlarge-2021-01-30-b.svg
@@ -0,0 +1,1269 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/doc/bench-2021/bench-c5-18xlarge-2021-01-30-rss-a.svg b/doc/bench-2021/bench-c5-18xlarge-2021-01-30-rss-a.svg
new file mode 100644
index 0000000..6cd36aa
--- /dev/null
+++ b/doc/bench-2021/bench-c5-18xlarge-2021-01-30-rss-a.svg
@@ -0,0 +1,836 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/doc/bench-2021/bench-c5-18xlarge-2021-01-30-rss-b.svg b/doc/bench-2021/bench-c5-18xlarge-2021-01-30-rss-b.svg
new file mode 100644
index 0000000..c81072e
--- /dev/null
+++ b/doc/bench-2021/bench-c5-18xlarge-2021-01-30-rss-b.svg
@@ -0,0 +1,1131 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/doc/bench-2021/bench-macmini-2021-01-30.svg b/doc/bench-2021/bench-macmini-2021-01-30.svg
new file mode 100644
index 0000000..ece6418
--- /dev/null
+++ b/doc/bench-2021/bench-macmini-2021-01-30.svg
@@ -0,0 +1,766 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/doc/doxyfile b/doc/doxyfile
index 6c1e30a..55cae8b 100644
--- a/doc/doxyfile
+++ b/doc/doxyfile
@@ -1,4 +1,4 @@
-# Doxyfile 1.8.15
+# Doxyfile 1.9.1
# This file describes the settings to be used by the documentation system
# doxygen (www.doxygen.org) for a project.
@@ -38,7 +38,7 @@ PROJECT_NAME = mi-malloc
# could be handy for archiving the generated documentation or if some version
# control system is used.
-PROJECT_NUMBER = 1.6
+PROJECT_NUMBER = 1.8/2.1
# Using the PROJECT_BRIEF tag one can provide an optional one line description
# for a project that appears at the top of each page and should give viewer a
@@ -197,6 +197,16 @@ SHORT_NAMES = NO
JAVADOC_AUTOBRIEF = YES
+# If the JAVADOC_BANNER tag is set to YES then doxygen will interpret a line
+# such as
+# /***************
+# as being the beginning of a Javadoc-style comment "banner". If set to NO, the
+# Javadoc-style will behave just like regular comments and it will not be
+# interpreted by doxygen.
+# The default value is: NO.
+
+JAVADOC_BANNER = NO
+
# If the QT_AUTOBRIEF tag is set to YES then doxygen will interpret the first
# line (until the first dot) of a Qt-style comment as the brief description. If
# set to NO, the Qt-style will behave just like regular Qt-style comments (thus
@@ -217,6 +227,14 @@ QT_AUTOBRIEF = NO
MULTILINE_CPP_IS_BRIEF = NO
+# By default Python docstrings are displayed as preformatted text and doxygen's
+# special commands cannot be used. By setting PYTHON_DOCSTRING to NO the
+# doxygen's special commands can be used and the contents of the docstring
+# documentation blocks is shown as doxygen documentation.
+# The default value is: YES.
+
+PYTHON_DOCSTRING = YES
+
# If the INHERIT_DOCS tag is set to YES then an undocumented member inherits the
# documentation from any documented member that it re-implements.
# The default value is: YES.
@@ -253,12 +271,6 @@ TAB_SIZE = 2
ALIASES =
-# This tag can be used to specify a number of word-keyword mappings (TCL only).
-# A mapping has the form "name=value". For example adding "class=itcl::class"
-# will allow you to use the command class in the itcl::class meaning.
-
-TCL_SUBST =
-
# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C sources
# only. Doxygen will then generate output that is more tailored for C. For
# instance, some of the names that are used will be different. The list of all
@@ -299,19 +311,22 @@ OPTIMIZE_OUTPUT_SLICE = NO
# parses. With this tag you can assign which parser to use for a given
# extension. Doxygen has a built-in mapping, but you can override or extend it
# using this tag. The format is ext=language, where ext is a file extension, and
-# language is one of the parsers supported by doxygen: IDL, Java, Javascript,
-# Csharp (C#), C, C++, D, PHP, md (Markdown), Objective-C, Python, Slice,
+# language is one of the parsers supported by doxygen: IDL, Java, JavaScript,
+# Csharp (C#), C, C++, D, PHP, md (Markdown), Objective-C, Python, Slice, VHDL,
# Fortran (fixed format Fortran: FortranFixed, free formatted Fortran:
# FortranFree, unknown formatted Fortran: Fortran. In the later case the parser
# tries to guess whether the code is fixed or free formatted code, this is the
-# default for Fortran type files), VHDL, tcl. For instance to make doxygen treat
-# .inc files as Fortran files (default is PHP), and .f files as C (default is
-# Fortran), use: inc=Fortran f=C.
+# default for Fortran type files). For instance to make doxygen treat .inc files
+# as Fortran files (default is PHP), and .f files as C (default is Fortran),
+# use: inc=Fortran f=C.
#
# Note: For files without extension you can use no_extension as a placeholder.
#
# Note that for custom extensions you also need to set FILE_PATTERNS otherwise
-# the files are not read by doxygen.
+# the files are not read by doxygen. When specifying no_extension you should add
+# * to the FILE_PATTERNS.
+#
+# Note see also the list of default file extension mappings.
EXTENSION_MAPPING =
@@ -329,7 +344,7 @@ MARKDOWN_SUPPORT = YES
# to that level are automatically included in the table of contents, even if
# they do not have an id attribute.
# Note: This feature currently applies only to Markdown headings.
-# Minimum value: 0, maximum value: 99, default value: 0.
+# Minimum value: 0, maximum value: 99, default value: 5.
# This tag requires that the tag MARKDOWN_SUPPORT is set to YES.
TOC_INCLUDE_HEADINGS = 0
@@ -445,6 +460,19 @@ TYPEDEF_HIDES_STRUCT = YES
LOOKUP_CACHE_SIZE = 0
+# The NUM_PROC_THREADS specifies the number threads doxygen is allowed to use
+# during processing. When set to 0 doxygen will based this on the number of
+# cores available in the system. You can set it explicitly to a value larger
+# than 0 to get more control over the balance between CPU load and processing
+# speed. At this moment only the input processing can be done using multiple
+# threads. Since this is still an experimental feature the default is set to 1,
+# which efficively disables parallel processing. Please report any issues you
+# encounter. Generating dot graphs in parallel is controlled by the
+# DOT_NUM_THREADS setting.
+# Minimum value: 0, maximum value: 32, default value: 1.
+
+NUM_PROC_THREADS = 1
+
#---------------------------------------------------------------------------
# Build related configuration options
#---------------------------------------------------------------------------
@@ -465,6 +493,12 @@ EXTRACT_ALL = YES
EXTRACT_PRIVATE = NO
+# If the EXTRACT_PRIV_VIRTUAL tag is set to YES, documented private virtual
+# methods of a class will be included in the documentation.
+# The default value is: NO.
+
+EXTRACT_PRIV_VIRTUAL = NO
+
# If the EXTRACT_PACKAGE tag is set to YES, all members with package or internal
# scope will be included in the documentation.
# The default value is: NO.
@@ -502,6 +536,13 @@ EXTRACT_LOCAL_METHODS = NO
EXTRACT_ANON_NSPACES = NO
+# If this flag is set to YES, the name of an unnamed parameter in a declaration
+# will be determined by the corresponding definition. By default unnamed
+# parameters remain unnamed in the output.
+# The default value is: YES.
+
+RESOLVE_UNNAMED_PARAMS = YES
+
# If the HIDE_UNDOC_MEMBERS tag is set to YES, doxygen will hide all
# undocumented members inside documented classes or files. If set to NO these
# members will be included in the various overviews, but no documentation
@@ -519,8 +560,8 @@ HIDE_UNDOC_MEMBERS = NO
HIDE_UNDOC_CLASSES = NO
# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, doxygen will hide all friend
-# (class|struct|union) declarations. If set to NO, these declarations will be
-# included in the documentation.
+# declarations. If set to NO, these declarations will be included in the
+# documentation.
# The default value is: NO.
HIDE_FRIEND_COMPOUNDS = NO
@@ -539,11 +580,18 @@ HIDE_IN_BODY_DOCS = NO
INTERNAL_DOCS = NO
-# If the CASE_SENSE_NAMES tag is set to NO then doxygen will only generate file
-# names in lower-case letters. If set to YES, upper-case letters are also
-# allowed. This is useful if you have classes or files whose names only differ
-# in case and if your file system supports case sensitive file names. Windows
-# and Mac users are advised to set this option to NO.
+# With the correct setting of option CASE_SENSE_NAMES doxygen will better be
+# able to match the capabilities of the underlying filesystem. In case the
+# filesystem is case sensitive (i.e. it supports files in the same directory
+# whose names only differ in casing), the option must be set to YES to properly
+# deal with such files in case they appear in the input. For filesystems that
+# are not case sensitive the option should be be set to NO to properly deal with
+# output files written for symbols that only differ in casing, such as for two
+# classes, one named CLASS and the other named Class, and to also support
+# references to files without having to specify the exact matching casing. On
+# Windows (including Cygwin) and MacOS, users should typically set this option
+# to NO, whereas on Linux or other Unix flavors it should typically be set to
+# YES.
# The default value is: system dependent.
CASE_SENSE_NAMES = NO
@@ -782,7 +830,10 @@ WARN_IF_DOC_ERROR = YES
WARN_NO_PARAMDOC = NO
# If the WARN_AS_ERROR tag is set to YES then doxygen will immediately stop when
-# a warning is encountered.
+# a warning is encountered. If the WARN_AS_ERROR tag is set to FAIL_ON_WARNINGS
+# then doxygen will continue running as if WARN_AS_ERROR tag is set to NO, but
+# at the end of the doxygen process doxygen will return with a non-zero status.
+# Possible values are: NO, YES and FAIL_ON_WARNINGS.
# The default value is: NO.
WARN_AS_ERROR = NO
@@ -818,8 +869,8 @@ INPUT = mimalloc-doc.h
# This tag can be used to specify the character encoding of the source files
# that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses
# libiconv (or the iconv built into libc) for the transcoding. See the libiconv
-# documentation (see: https://www.gnu.org/software/libiconv/) for the list of
-# possible encodings.
+# documentation (see:
+# https://www.gnu.org/software/libiconv/) for the list of possible encodings.
# The default value is: UTF-8.
INPUT_ENCODING = UTF-8
@@ -832,11 +883,15 @@ INPUT_ENCODING = UTF-8
# need to set EXTENSION_MAPPING for the extension otherwise the files are not
# read by doxygen.
#
+# Note the list of default checked file patterns might differ from the list of
+# default file extension mappings.
+#
# If left blank the following patterns are tested:*.c, *.cc, *.cxx, *.cpp,
# *.c++, *.java, *.ii, *.ixx, *.ipp, *.i++, *.inl, *.idl, *.ddl, *.odl, *.h,
# *.hh, *.hxx, *.hpp, *.h++, *.cs, *.d, *.php, *.php4, *.php5, *.phtml, *.inc,
-# *.m, *.markdown, *.md, *.mm, *.dox, *.py, *.pyw, *.f90, *.f95, *.f03, *.f08,
-# *.f, *.for, *.tcl, *.vhd, *.vhdl, *.ucf, *.qsf and *.ice.
+# *.m, *.markdown, *.md, *.mm, *.dox (to be provided as doxygen C comment),
+# *.py, *.pyw, *.f90, *.f95, *.f03, *.f08, *.f18, *.f, *.for, *.vhd, *.vhdl,
+# *.ucf, *.qsf and *.ice.
FILE_PATTERNS = *.c \
*.cc \
@@ -1094,16 +1149,22 @@ USE_HTAGS = NO
VERBATIM_HEADERS = YES
# If the CLANG_ASSISTED_PARSING tag is set to YES then doxygen will use the
-# clang parser (see: http://clang.llvm.org/) for more accurate parsing at the
-# cost of reduced performance. This can be particularly helpful with template
-# rich C++ code for which doxygen's built-in parser lacks the necessary type
-# information.
+# clang parser (see:
+# http://clang.llvm.org/) for more accurate parsing at the cost of reduced
+# performance. This can be particularly helpful with template rich C++ code for
+# which doxygen's built-in parser lacks the necessary type information.
# Note: The availability of this option depends on whether or not doxygen was
# generated with the -Duse_libclang=ON option for CMake.
# The default value is: NO.
CLANG_ASSISTED_PARSING = NO
+# If clang assisted parsing is enabled and the CLANG_ADD_INC_PATHS tag is set to
+# YES then doxygen will add the directory of each input to the include path.
+# The default value is: YES.
+
+CLANG_ADD_INC_PATHS = YES
+
# If clang assisted parsing is enabled you can provide the compiler with command
# line options that you would normally use when invoking the compiler. Note that
# the include paths will already be set by doxygen for the files and directories
@@ -1113,10 +1174,13 @@ CLANG_ASSISTED_PARSING = NO
CLANG_OPTIONS =
# If clang assisted parsing is enabled you can provide the clang parser with the
-# path to the compilation database (see:
-# http://clang.llvm.org/docs/HowToSetupToolingForLLVM.html) used when the files
-# were built. This is equivalent to specifying the "-p" option to a clang tool,
-# such as clang-check. These options will then be passed to the parser.
+# path to the directory containing a file called compile_commands.json. This
+# file is the compilation database (see:
+# http://clang.llvm.org/docs/HowToSetupToolingForLLVM.html) containing the
+# options used when the source files were built. This is equivalent to
+# specifying the -p option to a clang tool, such as clang-check. These options
+# will then be passed to the parser. Any options specified with CLANG_OPTIONS
+# will be added as well.
# Note: The availability of this option depends on whether or not doxygen was
# generated with the -Duse_libclang=ON option for CMake.
@@ -1133,13 +1197,6 @@ CLANG_DATABASE_PATH =
ALPHABETICAL_INDEX = YES
-# The COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns in
-# which the alphabetical index list will be split.
-# Minimum value: 1, maximum value: 20, default value: 5.
-# This tag requires that the tag ALPHABETICAL_INDEX is set to YES.
-
-COLS_IN_ALPHA_INDEX = 5
-
# In case all classes in a project start with a common prefix, all classes will
# be put under the same header in the alphabetical index. The IGNORE_PREFIX tag
# can be used to specify a prefix (or a list of prefixes) that should be ignored
@@ -1278,9 +1335,9 @@ HTML_TIMESTAMP = NO
# If the HTML_DYNAMIC_MENUS tag is set to YES then the generated HTML
# documentation will contain a main index with vertical navigation menus that
-# are dynamically created via Javascript. If disabled, the navigation index will
+# are dynamically created via JavaScript. If disabled, the navigation index will
# consists of multiple levels of tabs that are statically embedded in every HTML
-# page. Disable this option to support browsers that do not have Javascript,
+# page. Disable this option to support browsers that do not have JavaScript,
# like the Qt help browser.
# The default value is: YES.
# This tag requires that the tag GENERATE_HTML is set to YES.
@@ -1310,10 +1367,11 @@ HTML_INDEX_NUM_ENTRIES = 100
# If the GENERATE_DOCSET tag is set to YES, additional index files will be
# generated that can be used as input for Apple's Xcode 3 integrated development
-# environment (see: https://developer.apple.com/xcode/), introduced with OSX
-# 10.5 (Leopard). To create a documentation set, doxygen will generate a
-# Makefile in the HTML output directory. Running make will produce the docset in
-# that directory and running make install will install the docset in
+# environment (see:
+# https://developer.apple.com/xcode/), introduced with OSX 10.5 (Leopard). To
+# create a documentation set, doxygen will generate a Makefile in the HTML
+# output directory. Running make will produce the docset in that directory and
+# running make install will install the docset in
# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find it at
# startup. See https://developer.apple.com/library/archive/featuredarticles/Doxy
# genXcode/_index.html for more information.
@@ -1355,8 +1413,8 @@ DOCSET_PUBLISHER_NAME = Publisher
# If the GENERATE_HTMLHELP tag is set to YES then doxygen generates three
# additional HTML index files: index.hhp, index.hhc, and index.hhk. The
# index.hhp is a project file that can be read by Microsoft's HTML Help Workshop
-# (see: https://www.microsoft.com/en-us/download/details.aspx?id=21138) on
-# Windows.
+# (see:
+# https://www.microsoft.com/en-us/download/details.aspx?id=21138) on Windows.
#
# The HTML Help Workshop contains a compiler that can convert all HTML output
# generated by doxygen into a single compiled HTML file (.chm). Compiled HTML
@@ -1386,7 +1444,7 @@ CHM_FILE =
HHC_LOCATION =
# The GENERATE_CHI flag controls if a separate .chi index file is generated
-# (YES) or that it should be included in the master .chm file (NO).
+# (YES) or that it should be included in the main .chm file (NO).
# The default value is: NO.
# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
@@ -1431,7 +1489,8 @@ QCH_FILE =
# The QHP_NAMESPACE tag specifies the namespace to use when generating Qt Help
# Project output. For more information please see Qt Help Project / Namespace
-# (see: http://doc.qt.io/archives/qt-4.8/qthelpproject.html#namespace).
+# (see:
+# https://doc.qt.io/archives/qt-4.8/qthelpproject.html#namespace).
# The default value is: org.doxygen.Project.
# This tag requires that the tag GENERATE_QHP is set to YES.
@@ -1439,8 +1498,8 @@ QHP_NAMESPACE = org.doxygen.Project
# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating Qt
# Help Project output. For more information please see Qt Help Project / Virtual
-# Folders (see: http://doc.qt.io/archives/qt-4.8/qthelpproject.html#virtual-
-# folders).
+# Folders (see:
+# https://doc.qt.io/archives/qt-4.8/qthelpproject.html#virtual-folders).
# The default value is: doc.
# This tag requires that the tag GENERATE_QHP is set to YES.
@@ -1448,30 +1507,30 @@ QHP_VIRTUAL_FOLDER = doc
# If the QHP_CUST_FILTER_NAME tag is set, it specifies the name of a custom
# filter to add. For more information please see Qt Help Project / Custom
-# Filters (see: http://doc.qt.io/archives/qt-4.8/qthelpproject.html#custom-
-# filters).
+# Filters (see:
+# https://doc.qt.io/archives/qt-4.8/qthelpproject.html#custom-filters).
# This tag requires that the tag GENERATE_QHP is set to YES.
QHP_CUST_FILTER_NAME =
# The QHP_CUST_FILTER_ATTRS tag specifies the list of the attributes of the
# custom filter to add. For more information please see Qt Help Project / Custom
-# Filters (see: http://doc.qt.io/archives/qt-4.8/qthelpproject.html#custom-
-# filters).
+# Filters (see:
+# https://doc.qt.io/archives/qt-4.8/qthelpproject.html#custom-filters).
# This tag requires that the tag GENERATE_QHP is set to YES.
QHP_CUST_FILTER_ATTRS =
# The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this
# project's filter section matches. Qt Help Project / Filter Attributes (see:
-# http://doc.qt.io/archives/qt-4.8/qthelpproject.html#filter-attributes).
+# https://doc.qt.io/archives/qt-4.8/qthelpproject.html#filter-attributes).
# This tag requires that the tag GENERATE_QHP is set to YES.
QHP_SECT_FILTER_ATTRS =
-# The QHG_LOCATION tag can be used to specify the location of Qt's
-# qhelpgenerator. If non-empty doxygen will try to run qhelpgenerator on the
-# generated .qhp file.
+# The QHG_LOCATION tag can be used to specify the location (absolute path
+# including file name) of Qt's qhelpgenerator. If non-empty doxygen will try to
+# run qhelpgenerator on the generated .qhp file.
# This tag requires that the tag GENERATE_QHP is set to YES.
QHG_LOCATION =
@@ -1548,6 +1607,17 @@ TREEVIEW_WIDTH = 180
EXT_LINKS_IN_WINDOW = NO
+# If the HTML_FORMULA_FORMAT option is set to svg, doxygen will use the pdf2svg
+# tool (see https://github.com/dawbarton/pdf2svg) or inkscape (see
+# https://inkscape.org) to generate formulas as SVG images instead of PNGs for
+# the HTML output. These images will generally look nicer at scaled resolutions.
+# Possible values are: png (the default) and svg (looks nicer but requires the
+# pdf2svg or inkscape tool).
+# The default value is: png.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_FORMULA_FORMAT = png
+
# Use this tag to change the font size of LaTeX formulas included as images in
# the HTML documentation. When you change the font size after a successful
# doxygen run you need to manually remove any form_*.png images from the HTML
@@ -1568,8 +1638,14 @@ FORMULA_FONTSIZE = 10
FORMULA_TRANSPARENT = YES
+# The FORMULA_MACROFILE can contain LaTeX \newcommand and \renewcommand commands
+# to create new LaTeX commands to be used in formulas as building blocks. See
+# the section "Including formulas" for details.
+
+FORMULA_MACROFILE =
+
# Enable the USE_MATHJAX option to render LaTeX formulas using MathJax (see
-# https://www.mathjax.org) which uses client side Javascript for the rendering
+# https://www.mathjax.org) which uses client side JavaScript for the rendering
# instead of using pre-rendered bitmaps. Use this if you do not have LaTeX
# installed or if you want to formulas look prettier in the HTML output. When
# enabled you may also need to install MathJax separately and configure the path
@@ -1581,7 +1657,7 @@ USE_MATHJAX = NO
# When MathJax is enabled you can set the default output format to be used for
# the MathJax output. See the MathJax site (see:
-# http://docs.mathjax.org/en/latest/output.html) for more details.
+# http://docs.mathjax.org/en/v2.7-latest/output.html) for more details.
# Possible values are: HTML-CSS (which is slower, but has the best
# compatibility), NativeMML (i.e. MathML) and SVG.
# The default value is: HTML-CSS.
@@ -1597,7 +1673,7 @@ MATHJAX_FORMAT = HTML-CSS
# Content Delivery Network so you can quickly see the result without installing
# MathJax. However, it is strongly recommended to install a local copy of
# MathJax from https://www.mathjax.org before deployment.
-# The default value is: https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.5/.
+# The default value is: https://cdn.jsdelivr.net/npm/mathjax@2.
# This tag requires that the tag USE_MATHJAX is set to YES.
MATHJAX_RELPATH = http://cdn.mathjax.org/mathjax/latest
@@ -1611,7 +1687,8 @@ MATHJAX_EXTENSIONS =
# The MATHJAX_CODEFILE tag can be used to specify a file with javascript pieces
# of code that will be used on startup of the MathJax code. See the MathJax site
-# (see: http://docs.mathjax.org/en/latest/output.html) for more details. For an
+# (see:
+# http://docs.mathjax.org/en/v2.7-latest/output.html) for more details. For an
# example see the documentation.
# This tag requires that the tag USE_MATHJAX is set to YES.
@@ -1639,7 +1716,7 @@ MATHJAX_CODEFILE =
SEARCHENGINE = YES
# When the SERVER_BASED_SEARCH tag is enabled the search engine will be
-# implemented using a web server instead of a web client using Javascript. There
+# implemented using a web server instead of a web client using JavaScript. There
# are two flavors of web server based searching depending on the EXTERNAL_SEARCH
# setting. When disabled, doxygen will generate a PHP script for searching and
# an index file used by the script. When EXTERNAL_SEARCH is enabled the indexing
@@ -1658,7 +1735,8 @@ SERVER_BASED_SEARCH = NO
#
# Doxygen ships with an example indexer (doxyindexer) and search engine
# (doxysearch.cgi) which are based on the open source search engine library
-# Xapian (see: https://xapian.org/).
+# Xapian (see:
+# https://xapian.org/).
#
# See the section "External Indexing and Searching" for details.
# The default value is: NO.
@@ -1671,8 +1749,9 @@ EXTERNAL_SEARCH = NO
#
# Doxygen ships with an example indexer (doxyindexer) and search engine
# (doxysearch.cgi) which are based on the open source search engine library
-# Xapian (see: https://xapian.org/). See the section "External Indexing and
-# Searching" for details.
+# Xapian (see:
+# https://xapian.org/). See the section "External Indexing and Searching" for
+# details.
# This tag requires that the tag SEARCHENGINE is set to YES.
SEARCHENGINE_URL =
@@ -1743,10 +1822,11 @@ LATEX_CMD_NAME = latex
MAKEINDEX_CMD_NAME = makeindex
# The LATEX_MAKEINDEX_CMD tag can be used to specify the command name to
-# generate index for LaTeX.
+# generate index for LaTeX. In case there is no backslash (\) as first character
+# it will be automatically added in the LaTeX code.
# Note: This tag is used in the generated output file (.tex).
# See also: MAKEINDEX_CMD_NAME for the part in the Makefile / make.bat.
-# The default value is: \makeindex.
+# The default value is: makeindex.
# This tag requires that the tag GENERATE_LATEX is set to YES.
LATEX_MAKEINDEX_CMD = \makeindex
@@ -1835,9 +1915,11 @@ LATEX_EXTRA_FILES =
PDF_HYPERLINKS = YES
-# If the USE_PDFLATEX tag is set to YES, doxygen will use pdflatex to generate
-# the PDF file directly from the LaTeX files. Set this option to YES, to get a
-# higher quality PDF documentation.
+# If the USE_PDFLATEX tag is set to YES, doxygen will use the engine as
+# specified with LATEX_CMD_NAME to generate the PDF file directly from the LaTeX
+# files. Set this option to YES, to get a higher quality PDF documentation.
+#
+# See also section LATEX_CMD_NAME for selecting the engine.
# The default value is: YES.
# This tag requires that the tag GENERATE_LATEX is set to YES.
@@ -2076,6 +2158,10 @@ DOCBOOK_PROGRAMLISTING = NO
GENERATE_AUTOGEN_DEF = NO
+#---------------------------------------------------------------------------
+# Configuration options related to Sqlite3 output
+#---------------------------------------------------------------------------
+
#---------------------------------------------------------------------------
# Configuration options related to the Perl module output
#---------------------------------------------------------------------------
@@ -2238,12 +2324,6 @@ EXTERNAL_GROUPS = YES
EXTERNAL_PAGES = YES
-# The PERL_PATH should be the absolute path and name of the perl script
-# interpreter (i.e. the result of 'which perl').
-# The default file (with absolute path) is: /usr/bin/perl.
-
-PERL_PATH = /usr/bin/perl
-
#---------------------------------------------------------------------------
# Configuration options related to the dot tool
#---------------------------------------------------------------------------
@@ -2257,15 +2337,6 @@ PERL_PATH = /usr/bin/perl
CLASS_DIAGRAMS = YES
-# You can define message sequence charts within doxygen comments using the \msc
-# command. Doxygen will then run the mscgen tool (see:
-# http://www.mcternan.me.uk/mscgen/)) to produce the chart and insert it in the
-# documentation. The MSCGEN_PATH tag allows you to specify the directory where
-# the mscgen tool resides. If left empty the tool is assumed to be found in the
-# default search path.
-
-MSCGEN_PATH =
-
# You can include diagrams made with dia in doxygen documentation. Doxygen will
# then run dia to produce the diagram and insert it in the documentation. The
# DIA_PATH tag allows you to specify the directory where the dia binary resides.
@@ -2363,10 +2434,32 @@ UML_LOOK = NO
# but if the number exceeds 15, the total amount of fields shown is limited to
# 10.
# Minimum value: 0, maximum value: 100, default value: 10.
-# This tag requires that the tag HAVE_DOT is set to YES.
+# This tag requires that the tag UML_LOOK is set to YES.
UML_LIMIT_NUM_FIELDS = 10
+# If the DOT_UML_DETAILS tag is set to NO, doxygen will show attributes and
+# methods without types and arguments in the UML graphs. If the DOT_UML_DETAILS
+# tag is set to YES, doxygen will add type and arguments for attributes and
+# methods in the UML graphs. If the DOT_UML_DETAILS tag is set to NONE, doxygen
+# will not generate fields with class member information in the UML graphs. The
+# class diagrams will look similar to the default class diagrams but using UML
+# notation for the relationships.
+# Possible values are: NO, YES and NONE.
+# The default value is: NO.
+# This tag requires that the tag UML_LOOK is set to YES.
+
+DOT_UML_DETAILS = NO
+
+# The DOT_WRAP_THRESHOLD tag can be used to set the maximum number of characters
+# to display on a single line. If the actual line length exceeds this threshold
+# significantly it will wrapped across multiple lines. Some heuristics are apply
+# to avoid ugly line breaks.
+# Minimum value: 0, maximum value: 1000, default value: 17.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_WRAP_THRESHOLD = 17
+
# If the TEMPLATE_RELATIONS tag is set to YES then the inheritance and
# collaboration graphs will show the relations between templates and their
# instances.
@@ -2556,9 +2649,11 @@ DOT_MULTI_TARGETS = NO
GENERATE_LEGEND = YES
-# If the DOT_CLEANUP tag is set to YES, doxygen will remove the intermediate dot
+# If the DOT_CLEANUP tag is set to YES, doxygen will remove the intermediate
# files that are used to generate the various graphs.
+#
+# Note: This setting is not only used for dot files but also for msc and
+# plantuml temporary files.
# The default value is: YES.
-# This tag requires that the tag HAVE_DOT is set to YES.
DOT_CLEANUP = YES
diff --git a/doc/ds-logo.jpg b/doc/ds-logo.jpg
new file mode 100644
index 0000000..c9abb1a
Binary files /dev/null and b/doc/ds-logo.jpg differ
diff --git a/doc/ds-logo.png b/doc/ds-logo.png
new file mode 100644
index 0000000..93b84e4
Binary files /dev/null and b/doc/ds-logo.png differ
diff --git a/doc/mimalloc-doc.h b/doc/mimalloc-doc.h
index 7c238d2..3e75243 100644
--- a/doc/mimalloc-doc.h
+++ b/doc/mimalloc-doc.h
@@ -1,5 +1,5 @@
/* ----------------------------------------------------------------------------
-Copyright (c) 2018, Microsoft Research, Daan Leijen
+Copyright (c) 2018-2021, Microsoft Research, Daan Leijen
This is free software; you can redistribute it and/or modify it under the
terms of the MIT license. A copy of the license can be found in the file
"LICENSE" at the root of this distribution.
@@ -40,7 +40,7 @@ Notable aspects of the design include:
per mimalloc page, but for each page we have multiple free lists. In particular, there
is one list for thread-local `free` operations, and another one for concurrent `free`
operations. Free-ing from another thread can now be a single CAS without needing
- sophisticated coordination between threads. Since there will be
+ sophisticated coordination between threads. Since there will be
thousands of separate free lists, contention is naturally distributed over the heap,
and the chance of contending on a single location will be low -- this is quite
similar to randomized algorithms like skip lists where adding
@@ -51,12 +51,12 @@ Notable aspects of the design include:
programs.
- __secure__: _mimalloc_ can be build in secure mode, adding guard pages,
randomized allocation, encrypted free lists, etc. to protect against various
- heap vulnerabilities. The performance penalty is only around 3% on average
+ heap vulnerabilities. The performance penalty is only around 5% on average
over our benchmarks.
- __first-class heaps__: efficiently create and use multiple heaps to allocate across different regions.
A heap can be destroyed at once instead of deallocating each object separately.
- __bounded__: it does not suffer from _blowup_ \[1\], has bounded worst-case allocation
- times (_wcat_), bounded space overhead (~0.2% meta-data, with at most 12.5% waste in allocation sizes),
+ times (_wcat_), bounded space overhead (~0.2% meta-data, with low internal fragmentation),
and has no internal points of contention using only atomic operations.
- __fast__: In our benchmarks (see [below](#performance)),
_mimalloc_ outperforms all other leading allocators (_jemalloc_, _tcmalloc_, _Hoard_, etc),
@@ -413,6 +413,28 @@ void mi_register_error(mi_error_fun* errfun, void* arg);
/// This function is relatively fast.
bool mi_is_in_heap_region(const void* p);
+/// Reserve OS memory for use by mimalloc. Reserved areas are used
+/// before allocating from the OS again. By reserving a large area upfront,
+/// allocation can be more efficient, and can be better managed on systems
+/// without `mmap`/`VirtualAlloc` (like WASM for example).
+/// @param size The size to reserve.
+/// @param commit Commit the memory upfront.
+/// @param allow_large Allow large OS pages (2MiB) to be used?
+/// @return \a 0 if successful, and an error code otherwise (e.g. `ENOMEM`).
+int mi_reserve_os_memory(size_t size, bool commit, bool allow_large);
+
+/// Manage a particular memory area for use by mimalloc.
+/// This is just like `mi_reserve_os_memory` except that the area should already be
+/// allocated in some manner and available for use my mimalloc.
+/// @param start Start of the memory area
+/// @param size The size of the memory area.
+/// @param commit Is the area already committed?
+/// @param is_large Does it consist of large OS pages? Set this to \a true as well for memory
+/// that should not be decommitted or protected (like rdma etc.)
+/// @param is_zero Does the area consists of zero's?
+/// @param numa_node Possible associated numa node or `-1`.
+/// @return \a true if successful, and \a false on error.
+bool mi_manage_os_memory(void* start, size_t size, bool is_committed, bool is_large, bool is_zero, int numa_node);
/// Reserve \a pages of huge OS pages (1GiB) evenly divided over \a numa_nodes nodes,
/// but stops after at most `timeout_msecs` seconds.
@@ -476,9 +498,12 @@ void mi_process_info(size_t* elapsed_msecs, size_t* user_msecs, size_t* system_m
///
/// \{
+/// The maximum supported alignment size (currently 1MiB).
+#define MI_ALIGNMENT_MAX (1024*1024UL)
+
/// Allocate \a size bytes aligned by \a alignment.
/// @param size number of bytes to allocate.
-/// @param alignment the minimal alignment of the allocated memory.
+/// @param alignment the minimal alignment of the allocated memory. Must be less than #MI_ALIGNMENT_MAX.
/// @returns pointer to the allocated memory or \a NULL if out of memory.
/// The returned pointer is aligned by \a alignment, i.e.
/// `(uintptr_t)p % alignment == 0`.
@@ -777,19 +802,32 @@ typedef enum mi_option_e {
mi_option_show_errors, ///< Print error messages to `stderr`.
mi_option_show_stats, ///< Print statistics to `stderr` when the program is done.
mi_option_verbose, ///< Print verbose messages to `stderr`.
+
// the following options are experimental
mi_option_eager_commit, ///< Eagerly commit segments (4MiB) (enabled by default).
- mi_option_eager_region_commit, ///< Eagerly commit large (256MiB) memory regions (enabled by default, except on Windows)
mi_option_large_os_pages, ///< Use large OS pages (2MiB in size) if possible
mi_option_reserve_huge_os_pages, ///< The number of huge OS pages (1GiB in size) to reserve at the start of the program.
- mi_option_segment_cache, ///< The number of segments per thread to keep cached.
+ mi_option_reserve_huge_os_pages_at, ///< Reserve huge OS pages at node N.
+ mi_option_reserve_os_memory, ///< Reserve specified amount of OS memory at startup, e.g. "1g" or "512m".
+ mi_option_segment_cache, ///< The number of segments per thread to keep cached (0).
mi_option_page_reset, ///< Reset page memory after \a mi_option_reset_delay milliseconds when it becomes free.
+ mi_option_abandoned_page_reset, //< Reset free page memory when a thread terminates.
+ mi_option_use_numa_nodes, ///< Pretend there are at most N NUMA nodes; Use 0 to use the actual detected NUMA nodes at runtime.
+ mi_option_eager_commit_delay, ///< the first N segments per thread are not eagerly committed (=1).
+ mi_option_os_tag, ///< OS tag to assign to mimalloc'd memory
+ mi_option_limit_os_alloc, ///< If set to 1, do not use OS memory for allocation (but only pre-reserved arenas)
+
+ // v1.x specific options
+ mi_option_eager_region_commit, ///< Eagerly commit large (256MiB) memory regions (enabled by default, except on Windows)
mi_option_segment_reset, ///< Experimental
mi_option_reset_delay, ///< Delay in milli-seconds before resetting a page (100ms by default)
- mi_option_use_numa_nodes, ///< Pretend there are at most N NUMA nodes
- mi_option_reset_decommits, ///< Experimental
- mi_option_eager_commit_delay, ///< Experimental
- mi_option_os_tag, ///< OS tag to assign to mimalloc'd memory
+ mi_option_purge_decommits, ///< Experimental
+
+ // v2.x specific options
+ mi_option_allow_purge, ///< Enable decommitting memory (=on)
+ mi_option_purge_delay, ///< Decommit page memory after N milli-seconds delay (25ms).
+ mi_option_segment_purge_delay, ///< Decommit large segment memory after N milli-seconds delay (500ms).
+
_mi_option_last
} mi_option_t;
@@ -828,8 +866,14 @@ void* mi_valloc(size_t size);
void* mi_pvalloc(size_t size);
void* mi_aligned_alloc(size_t alignment, size_t size);
+
+/// Correspond s to [reallocarray](https://www.freebsd.org/cgi/man.cgi?query=reallocarray&sektion=3&manpath=freebsd-release-ports)
+/// in FreeBSD.
void* mi_reallocarray(void* p, size_t count, size_t size);
+/// Corresponds to [reallocarr](https://man.netbsd.org/reallocarr.3) in NetBSD.
+int mi_reallocarr(void* p, size_t count, size_t size);
+
void mi_free_size(void* p, size_t size);
void mi_free_size_aligned(void* p, size_t size, size_t alignment);
void mi_free_aligned(void* p, size_t alignment);
@@ -883,7 +927,7 @@ template struct mi_stl_allocator { }
/*! \page build Building
-Checkout the sources from Github:
+Checkout the sources from GitHub:
```
git clone https://github.com/microsoft/mimalloc
```
@@ -1036,7 +1080,7 @@ or via environment variables.
- `MIMALLOC_PAGE_RESET=0`: by default, mimalloc will reset (or purge) OS pages when not in use to signal to the OS
that the underlying physical memory can be reused. This can reduce memory fragmentation in long running (server)
programs. By setting it to `0` no such page resets will be done which can improve performance for programs that are not long
- running. As an alternative, the `MIMALLOC_RESET_DELAY=` can be set higher (100ms by default) to make the page
+ running. As an alternative, the `MIMALLOC_DECOMMIT_DELAY=` can be set higher (100ms by default) to make the page
reset occur less frequently instead of turning it off completely.
- `MIMALLOC_LARGE_OS_PAGES=1`: use large OS pages (2MiB) when available; for some workloads this can significantly
improve performance. Use `MIMALLOC_VERBOSE` to check if the large OS pages are enabled -- usually one needs
@@ -1053,6 +1097,8 @@ or via environment variables.
`MIMALLOC_EAGER_COMMIT_DELAY=N` (`N` is 1 by default) to delay the initial `N` segments (of 4MiB)
of a thread to not allocate in the huge OS pages; this prevents threads that are short lived
and allocate just a little to take up space in the huge OS page area (which cannot be reset).
+- `MIMALLOC_RESERVE_HUGE_OS_PAGES_AT=N`: where N is the numa node. This reserves the huge pages at a specific numa node.
+ (`N` is -1 by default to reserve huge pages evenly among the given number of numa nodes (or use the available ones as detected))
Use caution when using `fork` in combination with either large or huge OS pages: on a fork, the OS uses copy-on-write
for all pages in the original process including the huge OS pages. When any memory is now written in that area, the
@@ -1158,6 +1204,12 @@ void* calloc(size_t size, size_t n);
void* realloc(void* p, size_t newsize);
void free(void* p);
+void* aligned_alloc(size_t alignment, size_t size);
+char* strdup(const char* s);
+char* strndup(const char* s, size_t n);
+char* realpath(const char* fname, char* resolved_name);
+
+
// C++
void operator delete(void* p);
void operator delete[](void* p);
@@ -1177,16 +1229,24 @@ int posix_memalign(void** p, size_t alignment, size_t size);
// Linux
void* memalign(size_t alignment, size_t size);
-void* aligned_alloc(size_t alignment, size_t size);
void* valloc(size_t size);
void* pvalloc(size_t size);
size_t malloc_usable_size(void *p);
+void* reallocf(void* p, size_t newsize);
+
+// macOS
+void vfree(void* p);
+size_t malloc_size(const void* p);
+size_t malloc_good_size(size_t size);
// BSD
void* reallocarray( void* p, size_t count, size_t size );
void* reallocf(void* p, size_t newsize);
void cfree(void* p);
+// NetBSD
+int reallocarr(void* p, size_t count, size_t size);
+
// Windows
void* _expand(void* p, size_t newsize);
size_t _msize(void* p);
@@ -1209,7 +1269,7 @@ synthetic benchmarks that see how the allocator behaves under more
extreme circumstances.
In our benchmarks, _mimalloc_ always outperforms all other leading
-allocators (_jemalloc_, _tcmalloc_, _Hoard_, etc) (Apr 2019),
+allocators (_jemalloc_, _tcmalloc_, _Hoard_, etc) (Jan 2021),
and usually uses less memory (up to 25% more in the worst case).
A nice property is that it does *consistently* well over the wide
range of benchmarks.
diff --git a/doc/spades-logo.png b/doc/spades-logo.png
new file mode 100644
index 0000000..d8c73fe
Binary files /dev/null and b/doc/spades-logo.png differ
diff --git a/doc/unreal-logo.svg b/doc/unreal-logo.svg
new file mode 100644
index 0000000..5d5192a
--- /dev/null
+++ b/doc/unreal-logo.svg
@@ -0,0 +1,43 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/docs/annotated.html b/docs/annotated.html
index feba243..948a886 100644
--- a/docs/annotated.html
+++ b/docs/annotated.html
@@ -3,7 +3,7 @@
-
+
mi-malloc: Data Structures
@@ -13,10 +13,6 @@
-
@@ -37,21 +33,21 @@
mi-malloc
- 1.6
+ 1.7/2.0
@@ -60,10 +56,10 @@
-
+
@@ -73,13 +69,13 @@ var searchBox = new SearchBox("searchBox", "search",false,'Search');
-
@@ -92,7 +88,7 @@ $(document).ready(function(){initNavTree('annotated.html','');});
-
@@ -113,9 +109,7 @@ $(document).ready(function(){initNavTree('annotated.html','');});
diff --git a/docs/bench.html b/docs/bench.html
index f39fade..d54f5fd 100644
--- a/docs/bench.html
+++ b/docs/bench.html
@@ -3,7 +3,7 @@
-
+
mi-malloc: Performance
@@ -13,10 +13,6 @@
-
@@ -37,21 +33,21 @@
mi-malloc
- 1.6
+ 1.7/2.0
@@ -60,10 +56,10 @@
-
+
@@ -73,13 +69,13 @@ var searchBox = new SearchBox("searchBox", "search",false,'Search');
-
@@ -92,7 +88,7 @@ $(document).ready(function(){initNavTree('bench.html','');});
-
@@ -103,17 +99,15 @@ $(document).ready(function(){initNavTree('bench.html','');});
We tested mimalloc against many other top allocators over a wide range of benchmarks, ranging from various real world programs to synthetic benchmarks that see how the allocator behaves under more extreme circumstances.
-
In our benchmarks, mimalloc always outperforms all other leading allocators (jemalloc , tcmalloc , Hoard , etc) (Apr 2019), and usually uses less memory (up to 25% more in the worst case). A nice property is that it does consistently well over the wide range of benchmarks.
+
In our benchmarks, mimalloc always outperforms all other leading allocators (jemalloc , tcmalloc , Hoard , etc) (Jan 2021), and usually uses less memory (up to 25% more in the worst case). A nice property is that it does consistently well over the wide range of benchmarks.
See the Performance section in the mimalloc repository for benchmark results, or the the technical report for detailed benchmark results.
-
-
+
+
diff --git a/docs/build.html b/docs/build.html
index 2bd06f1..41e0199 100644
--- a/docs/build.html
+++ b/docs/build.html
@@ -3,7 +3,7 @@
-
+
mi-malloc: Building
@@ -13,10 +13,6 @@
-
@@ -37,21 +33,21 @@
mi-malloc
- 1.6
+ 1.7/2.0
@@ -60,10 +56,10 @@
-
+
@@ -73,13 +69,13 @@ var searchBox = new SearchBox("searchBox", "search",false,'Search');
-
@@ -92,7 +88,7 @@ $(document).ready(function(){initNavTree('build.html','');});
-
@@ -102,28 +98,39 @@ $(document).ready(function(){initNavTree('build.html','');});
Building
-
Checkout the sources from Github:
Windows
+
Checkout the sources from GitHub:
git clone https://github.com/microsoft/mimalloc
+
Windows
Open ide/vs2019/mimalloc.sln
in Visual Studio 2019 and build (or ide/vs2017/mimalloc.sln
). The mimalloc
project builds a static library (in out/msvc-x64
), while the mimalloc-override
project builds a DLL for overriding malloc in the entire program.
macOS, Linux, BSD, etc.
We use cmake
1 as the build system:
-
> mkdir -p out/release
> cd out/release
> cmake ../..
> make
This builds the library as a shared (dynamic) library (.so
or .dylib
), a static library (.a
), and as a single object file (.o
).
+
> mkdir -p out/release
+
> cd out/release
+
> cmake ../..
+
> make
+
This builds the library as a shared (dynamic) library (.so
or .dylib
), a static library (.a
), and as a single object file (.o
).
> sudo make install
(install the library and header files in /usr/local/lib
and /usr/local/include
)
You can build the debug version which does many internal checks and maintains detailed statistics as:
-
> mkdir -p out/debug
> cd out/debug
> cmake -DCMAKE_BUILD_TYPE=Debug ../..
> make
This will name the shared library as libmimalloc-debug.so
.
-
Finally, you can build a secure version that uses guard pages, encrypted free lists, etc, as:
> mkdir -p out/secure
> cd out/secure
> cmake -DMI_SECURE=ON ../..
> make
This will name the shared library as libmimalloc-secure.so
. Use ccmake
2 instead of cmake
to see and customize all the available build options.
+
> mkdir -p out/debug
+
> cd out/debug
+
> cmake -DCMAKE_BUILD_TYPE=Debug ../..
+
> make
+
This will name the shared library as libmimalloc-debug.so
.
+
Finally, you can build a secure version that uses guard pages, encrypted free lists, etc, as:
> mkdir -p out/secure
+
> cd out/secure
+
> cmake -DMI_SECURE=ON ../..
+
> make
+
This will name the shared library as libmimalloc-secure.so
. Use ccmake
2 instead of cmake
to see and customize all the available build options.
Notes:
Install CMake: sudo apt-get install cmake
Install CCMake: sudo apt-get install cmake-curses-gui
-
-
+
+
diff --git a/docs/classes.html b/docs/classes.html
index e74a0a2..3baa0db 100644
--- a/docs/classes.html
+++ b/docs/classes.html
@@ -3,7 +3,7 @@
-
+
mi-malloc: Data Structure Index
@@ -13,10 +13,6 @@
-
@@ -37,21 +33,21 @@
mi-malloc
- 1.6
+ 1.7/2.0
@@ -60,10 +56,10 @@
-
+
@@ -73,13 +69,13 @@ var searchBox = new SearchBox("searchBox", "search",false,'Search');
-
@@ -92,7 +88,7 @@ $(document).ready(function(){initNavTree('classes.html','');});
-
@@ -102,23 +98,18 @@ $(document).ready(function(){initNavTree('classes.html','');});
Data Structure Index
diff --git a/docs/doxygen.css b/docs/doxygen.css
index 7238471..3809180 100644
--- a/docs/doxygen.css
+++ b/docs/doxygen.css
@@ -1,4 +1,4 @@
-/* The standard CSS for doxygen 1.8.15 */
+/* The standard CSS for doxygen 1.9.1 */
body, table, div, p, dl {
font: 400 14px/22px Roboto,sans-serif;
@@ -53,17 +53,24 @@ dt {
font-weight: bold;
}
-div.multicol {
+ul.multicol {
-moz-column-gap: 1em;
-webkit-column-gap: 1em;
+ column-gap: 1em;
-moz-column-count: 3;
-webkit-column-count: 3;
+ column-count: 3;
}
p.startli, p.startdd {
margin-top: 2px;
}
+th p.starttd, th p.intertd, th p.endtd {
+ font-size: 100%;
+ font-weight: 700;
+}
+
p.starttd {
margin-top: 0px;
}
@@ -96,30 +103,96 @@ caption {
}
span.legend {
- font-size: 70%;
- text-align: center;
-}
-
-h3.version {
- font-size: 90%;
- text-align: center;
-}
-
-div.qindex, div.navtab{
- background-color: #D6D9D9;
- border: 1px solid #636C6D;
+ font-size: 70%;
text-align: center;
}
-div.qindex, div.navpath {
- width: 100%;
- line-height: 140%;
+h3.version {
+ font-size: 90%;
+ text-align: center;
}
div.navtab {
- margin-right: 15px;
+ border-right: 1px solid #636C6D;
+ padding-right: 15px;
+ text-align: right;
+ line-height: 110%;
}
+div.navtab table {
+ border-spacing: 0;
+}
+
+td.navtab {
+ padding-right: 6px;
+ padding-left: 6px;
+}
+td.navtabHL {
+ background-image: url('tab_a.png');
+ background-repeat:repeat-x;
+ padding-right: 6px;
+ padding-left: 6px;
+}
+
+td.navtabHL a, td.navtabHL a:visited {
+ color: #fff;
+ text-shadow: 0px 1px 1px rgba(0, 0, 0, 1.0);
+}
+
+a.navtab {
+ font-weight: bold;
+}
+
+div.qindex{
+ text-align: center;
+ width: 100%;
+ line-height: 140%;
+ font-size: 130%;
+ color: #A0A0A0;
+}
+
+dt.alphachar{
+ font-size: 180%;
+ font-weight: bold;
+}
+
+.alphachar a{
+ color: black;
+}
+
+.alphachar a:hover, .alphachar a:visited{
+ text-decoration: none;
+}
+
+.classindex dl {
+ padding: 25px;
+ column-count:1
+}
+
+.classindex dd {
+ display:inline-block;
+ margin-left: 50px;
+ width: 90%;
+ line-height: 1.15em;
+}
+
+.classindex dl.odd {
+ background-color: #F0F1F1;
+}
+
+@media(min-width: 1120px) {
+ .classindex dl {
+ column-count:2
+ }
+}
+
+@media(min-width: 1320px) {
+ .classindex dl {
+ column-count:3
+ }
+}
+
+
/* @group Link Styling */
a {
@@ -136,17 +209,6 @@ a:hover {
text-decoration: underline;
}
-a.qindex {
- font-weight: bold;
-}
-
-a.qindexHL {
- font-weight: bold;
- background-color: #5B6364;
- color: #FFFFFF;
- border: 1px double #464C4D;
-}
-
.contents a.qindexHL:visited {
color: #FFFFFF;
}
@@ -159,11 +221,11 @@ a.elRef {
}
a.code, a.code:visited, a.line, a.line:visited {
- color: #171919;
+ color: #171919;
}
a.codeRef, a.codeRef:visited, a.lineRef, a.lineRef:visited {
- color: #171919;
+ color: #171919;
}
/* @end */
@@ -349,7 +411,7 @@ p.formulaDsp {
}
img.formulaDsp {
-
+
}
img.formulaInl, img.inline {
@@ -407,20 +469,20 @@ span.charliteral {
color: #008080
}
-span.vhdldigit {
- color: #ff00ff
+span.vhdldigit {
+ color: #ff00ff
}
-span.vhdlchar {
- color: #000000
+span.vhdlchar {
+ color: #000000
}
-span.vhdlkeyword {
- color: #700070
+span.vhdlkeyword {
+ color: #700070
}
-span.vhdllogic {
- color: #ff0000
+span.vhdllogic {
+ color: #ff0000
}
blockquote {
@@ -533,7 +595,7 @@ table.memberdecls {
white-space: nowrap;
}
-.memItemRight {
+.memItemRight, .memTemplItemRight {
width: 100%;
}
@@ -645,9 +707,9 @@ table.memberdecls {
}
.memdoc, dl.reflist dd {
- border-bottom: 1px solid #697273;
- border-left: 1px solid #697273;
- border-right: 1px solid #697273;
+ border-bottom: 1px solid #697273;
+ border-left: 1px solid #697273;
+ border-right: 1px solid #697273;
padding: 6px 10px 2px 10px;
background-color: #F7F8F8;
border-top-width: 0;
@@ -699,18 +761,18 @@ dl.reflist dd {
.params, .retval, .exception, .tparams {
margin-left: 0px;
padding-left: 0px;
-}
+}
-.params .paramname, .retval .paramname, .tparams .paramname {
+.params .paramname, .retval .paramname, .tparams .paramname, .exception .paramname {
font-weight: bold;
vertical-align: top;
}
-
+
.params .paramtype, .tparams .paramtype {
font-style: italic;
vertical-align: top;
-}
-
+}
+
.params .paramdir, .tparams .paramdir {
font-family: "courier new",courier,monospace;
vertical-align: top;
@@ -966,8 +1028,8 @@ table.fieldtable {
.fieldtable td.fielddoc p:first-child {
margin-top: 0px;
-}
-
+}
+
.fieldtable td.fielddoc p:last-child {
margin-bottom: 2px;
}
@@ -1042,7 +1104,7 @@ table.fieldtable {
color: #040404;
font-family: 'Lucida Grande',Geneva,Helvetica,Arial,sans-serif;
text-shadow: 0px 1px 1px rgba(255, 255, 255, 0.9);
- text-decoration: none;
+ text-decoration: none;
}
.navpath li.navelem a:hover
@@ -1071,7 +1133,7 @@ div.summary
padding-right: 5px;
width: 50%;
text-align: right;
-}
+}
div.summary a
{
@@ -1086,7 +1148,7 @@ table.classindex
margin-right: 3%;
width: 94%;
border: 0;
- border-spacing: 0;
+ border-spacing: 0;
padding: 0;
}
@@ -1266,12 +1328,12 @@ dl.section dd {
vertical-align: bottom;
border-collapse: separate;
}
-
+
#projectlogo img
-{
+{
border: 0px none;
}
-
+
#projectalign
{
vertical-align: middle;
@@ -1283,7 +1345,7 @@ dl.section dd {
margin: 0px;
padding: 2px 0px;
}
-
+
#projectbrief
{
font: 120% Tahoma, Arial,sans-serif;
@@ -1351,10 +1413,12 @@ dl.citelist dt {
font-weight:bold;
margin-right:10px;
padding:5px;
+ text-align:right;
+ width:52px;
}
dl.citelist dd {
- margin:2px 0;
+ margin:2px 0 2px 72px;
padding:5px 0;
}
@@ -1399,7 +1463,7 @@ div.toc ul {
list-style: none outside none;
border: medium none;
padding: 0px;
-}
+}
div.toc li.level1 {
margin-left: 0px;
@@ -1417,6 +1481,12 @@ div.toc li.level4 {
margin-left: 45px;
}
+span.emoji {
+ /* font family used at the site: https://unicode.org/emoji/charts/full-emoji-list.html
+ * font-family: "Noto Color Emoji", "Apple Color Emoji", "Segoe UI Emoji", Times, Symbola, Aegyptus, Code2000, Code2001, Code2002, Musica, serif, LastResort;
+ */
+}
+
.PageDocRTL-title div.toc li.level1 {
margin-left: 0 !important;
margin-right: 0;
@@ -1654,47 +1724,6 @@ tr.heading h2 {
/* @group Markdown */
-/*
-table.markdownTable {
- border-collapse:collapse;
- margin-top: 4px;
- margin-bottom: 4px;
-}
-
-table.markdownTable td, table.markdownTable th {
- border: 1px solid #060606;
- padding: 3px 7px 2px;
-}
-
-table.markdownTableHead tr {
-}
-
-table.markdownTableBodyLeft td, table.markdownTable th {
- border: 1px solid #060606;
- padding: 3px 7px 2px;
-}
-
-th.markdownTableHeadLeft th.markdownTableHeadRight th.markdownTableHeadCenter th.markdownTableHeadNone {
- background-color: #0B0C0C;
- color: #FFFFFF;
- font-size: 110%;
- padding-bottom: 4px;
- padding-top: 5px;
-}
-
-th.markdownTableHeadLeft {
- text-align: left
-}
-
-th.markdownTableHeadRight {
- text-align: right
-}
-
-th.markdownTableHeadCenter {
- text-align: center
-}
-*/
-
table.markdownTable {
border-collapse:collapse;
margin-top: 4px;
@@ -1754,11 +1783,10 @@ table.DocNodeLTR {
tt, code, kbd, samp
{
display: inline-block;
- direction:ltr;
+ direction:ltr;
}
/* @end */
u {
text-decoration: underline;
}
-
diff --git a/docs/dynsections.js b/docs/dynsections.js
index ea0a7b3..3174bd7 100644
--- a/docs/dynsections.js
+++ b/docs/dynsections.js
@@ -1,25 +1,26 @@
/*
- @licstart The following is the entire license notice for the
- JavaScript code in this file.
+ @licstart The following is the entire license notice for the JavaScript code in this file.
- Copyright (C) 1997-2017 by Dimitri van Heesch
+ The MIT License (MIT)
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
+ Copyright (C) 1997-2020 by Dimitri van Heesch
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
+ Permission is hereby granted, free of charge, to any person obtaining a copy of this software
+ and associated documentation files (the "Software"), to deal in the Software without restriction,
+ including without limitation the rights to use, copy, modify, merge, publish, distribute,
+ sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is
+ furnished to do so, subject to the following conditions:
- You should have received a copy of the GNU General Public License along
- with this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ The above copyright notice and this permission notice shall be included in all copies or
+ substantial portions of the Software.
- @licend The above is the entire license notice
- for the JavaScript code in this file
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+ BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
+ DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+ @licend The above is the entire license notice for the JavaScript code in this file
*/
function toggleVisibility(linkObj)
{
diff --git a/docs/environment.html b/docs/environment.html
index 87d67e4..58d20e1 100644
--- a/docs/environment.html
+++ b/docs/environment.html
@@ -3,7 +3,7 @@
-
+
mi-malloc: Environment Options
@@ -13,10 +13,6 @@
-
@@ -37,21 +33,21 @@
mi-malloc
- 1.6
+ 1.7/2.0
@@ -60,10 +56,10 @@
-
+
@@ -73,13 +69,13 @@ var searchBox = new SearchBox("searchBox", "search",false,'Search');
-
@@ -92,7 +88,7 @@ $(document).ready(function(){initNavTree('environment.html','');});
-
@@ -110,17 +106,16 @@ $(document).ready(function(){initNavTree('environment.html','');});
MIMALLOC_PAGE_RESET=0
: by default, mimalloc will reset (or purge) OS pages when not in use to signal to the OS that the underlying physical memory can be reused. This can reduce memory fragmentation in long running (server) programs. By setting it to 0
no such page resets will be done which can improve performance for programs that are not long running. As an alternative, the MIMALLOC_RESET_DELAY=
<msecs> can be set higher (100ms by default) to make the page reset occur less frequently instead of turning it off completely.
MIMALLOC_LARGE_OS_PAGES=1
: use large OS pages (2MiB) when available; for some workloads this can significantly improve performance. Use MIMALLOC_VERBOSE
to check if the large OS pages are enabled – usually one needs to explicitly allow large OS pages (as on Windows and Linux ). However, sometimes the OS is very slow to reserve contiguous physical memory for large OS pages so use with care on systems that can have fragmented memory (for that reason, we generally recommend to use MIMALLOC_RESERVE_HUGE_OS_PAGES
instead when possible).
MIMALLOC_RESERVE_HUGE_OS_PAGES=N
: where N is the number of 1GiB huge OS pages. This reserves the huge pages at startup and sometimes this can give a large (latency) performance improvement on big workloads. Usually it is better to not use MIMALLOC_LARGE_OS_PAGES
in combination with this setting. Just like large OS pages, use with care as reserving contiguous physical memory can take a long time when memory is fragmented (but reserving the huge pages is done at startup only once). Note that we usually need to explicitly enable huge OS pages (as on Windows and Linux )). With huge OS pages, it may be beneficial to set the setting MIMALLOC_EAGER_COMMIT_DELAY=N
(N
is 1 by default) to delay the initial N
segments (of 4MiB) of a thread to not allocate in the huge OS pages; this prevents threads that are short lived and allocate just a little to take up space in the huge OS page area (which cannot be reset).
+
MIMALLOC_RESERVE_HUGE_OS_PAGES_AT=N
: where N is the numa node. This reserves the huge pages at a specific numa node. (N
is -1 by default to reserve huge pages evenly among the given number of numa nodes (or use the available ones as detected))
Use caution when using fork
in combination with either large or huge OS pages: on a fork, the OS uses copy-on-write for all pages in the original process including the huge OS pages. When any memory is now written in that area, the OS will copy the entire 1GiB huge page (or 2MiB large page) which can cause the memory usage to grow in big increments.
-
-
+
+
diff --git a/docs/functions.html b/docs/functions.html
index 6202121..0419c96 100644
--- a/docs/functions.html
+++ b/docs/functions.html
@@ -3,7 +3,7 @@
-
+
mi-malloc: Data Fields
@@ -13,10 +13,6 @@
-
@@ -37,21 +33,21 @@
mi-malloc
- 1.6
+ 1.7/2.0
@@ -60,10 +56,10 @@
-
+
@@ -73,13 +69,13 @@ var searchBox = new SearchBox("searchBox", "search",false,'Search');
-
@@ -92,7 +88,7 @@ $(document).ready(function(){initNavTree('functions.html','');});
-
@@ -120,9 +116,7 @@ $(document).ready(function(){initNavTree('functions.html','');});
diff --git a/docs/functions_vars.html b/docs/functions_vars.html
index 7d41d10..d5252ae 100644
--- a/docs/functions_vars.html
+++ b/docs/functions_vars.html
@@ -3,7 +3,7 @@
-
+
mi-malloc: Data Fields - Variables
@@ -13,10 +13,6 @@
-
@@ -37,21 +33,21 @@
mi-malloc
- 1.6
+ 1.7/2.0
@@ -60,10 +56,10 @@
-
+
@@ -73,13 +69,13 @@ var searchBox = new SearchBox("searchBox", "search",false,'Search');
-
@@ -92,7 +88,7 @@ $(document).ready(function(){initNavTree('functions_vars.html','');});
-
@@ -120,9 +116,7 @@ $(document).ready(function(){initNavTree('functions_vars.html','');});
diff --git a/docs/group__aligned.html b/docs/group__aligned.html
index a3eaacf..c9ad48e 100644
--- a/docs/group__aligned.html
+++ b/docs/group__aligned.html
@@ -3,7 +3,7 @@
-
+
mi-malloc: Aligned Allocation
@@ -13,10 +13,6 @@
-
@@ -37,21 +33,21 @@
mi-malloc
- 1.6
+ 1.7/2.0
@@ -60,10 +56,10 @@
-
+
@@ -73,13 +69,13 @@ var searchBox = new SearchBox("searchBox", "search",false,'Search');
-
@@ -92,26 +88,33 @@ $(document).ready(function(){initNavTree('group__aligned.html','');});
-
-
Allocating aligned memory blocks.
+
Allocating aligned memory blocks.
More...
void * mi_malloc_aligned (size_t size, size_t alignment)
- Allocate size bytes aligned by alignment . More...
+ Allocate size bytes aligned by alignment . More...
void * mi_zalloc_aligned (size_t size, size_t alignment)
@@ -120,7 +123,7 @@ Functions
void * mi_realloc_aligned (void *p, size_t newsize, size_t alignment)
void * mi_malloc_aligned_at (size_t size, size_t alignment, size_t offset)
- Allocate size bytes aligned by alignment at a specified offset . More...
+ Allocate size bytes aligned by alignment at a specified offset . More...
void * mi_zalloc_aligned_at (size_t size, size_t alignment, size_t offset)
@@ -131,6 +134,23 @@ Functions
Allocating aligned memory blocks.
+
+
+
◆ MI_ALIGNMENT_MAX
+
+
+
+
+
+ #define MI_ALIGNMENT_MAX
+
+
+
+
+
The maximum supported alignment size (currently 1MiB).
+
+
+
◆ mi_calloc_aligned()
@@ -236,7 +256,7 @@ Functions
Parameters
size number of bytes to allocate.
- alignment the minimal alignment of the allocated memory.
+ alignment the minimal alignment of the allocated memory. Must be less than MI_ALIGNMENT_MAX .
@@ -438,9 +458,7 @@ Functions
diff --git a/docs/group__aligned.js b/docs/group__aligned.js
index 0a5aa5c..06ccb0c 100644
--- a/docs/group__aligned.js
+++ b/docs/group__aligned.js
@@ -1,5 +1,6 @@
var group__aligned =
[
+ [ "MI_ALIGNMENT_MAX", "group__aligned.html#ga83c03016066b438f51a8095e9140be06", null ],
[ "mi_calloc_aligned", "group__aligned.html#ga53dddb4724042a90315b94bc268fb4c9", null ],
[ "mi_calloc_aligned_at", "group__aligned.html#ga08647c4593f3b2eef24a919a73eba3a3", null ],
[ "mi_malloc_aligned", "group__aligned.html#ga68930196751fa2cca9e1fd0d71bade56", null ],
diff --git a/docs/group__analysis.html b/docs/group__analysis.html
index 2487c24..6ceb4d5 100644
--- a/docs/group__analysis.html
+++ b/docs/group__analysis.html
@@ -3,7 +3,7 @@
-
+
mi-malloc: Heap Introspection
@@ -13,10 +13,6 @@
-
@@ -37,21 +33,21 @@
mi-malloc
- 1.6
+ 1.7/2.0
@@ -60,10 +56,10 @@
-
+
@@ -73,13 +69,13 @@ var searchBox = new SearchBox("searchBox", "search",false,'Search');
-
@@ -92,7 +88,7 @@ $(document).ready(function(){initNavTree('group__analysis.html','');});
-
@@ -107,7 +103,7 @@ $(document).ready(function(){initNavTree('group__analysis.html','');});
-
Inspect the heap at runtime.
+
Inspect the heap at runtime.
More...
@@ -376,9 +372,7 @@ bytes in use by allocated blocks
diff --git a/docs/group__cpp.html b/docs/group__cpp.html
index 88c7588..2ad5303 100644
--- a/docs/group__cpp.html
+++ b/docs/group__cpp.html
@@ -3,7 +3,7 @@
-
+
mi-malloc: C++ wrappers
@@ -13,10 +13,6 @@
-
@@ -37,21 +33,21 @@
mi-malloc
- 1.6
+ 1.7/2.0
@@ -60,10 +56,10 @@
-
+
@@ -73,13 +69,13 @@ var searchBox = new SearchBox("searchBox", "search",false,'Search');
-
@@ -92,7 +88,7 @@ $(document).ready(function(){initNavTree('group__cpp.html','');});
-
@@ -106,7 +102,7 @@ $(document).ready(function(){initNavTree('group__cpp.html','');});
-
mi_
prefixed implementations of various allocation functions that use C++ semantics on out-of-memory, generally calling std::get_new_handler
and raising a std::bad_alloc
exception on failure.
+
mi_
prefixed implementations of various allocation functions that use C++ semantics on out-of-memory, generally calling std::get_new_handler
and raising a std::bad_alloc
exception on failure.
More...
void * mi_new (std::size_t n) noexcept(false)
- like mi_malloc() , but when out of memory, use std::get_new_handler
and raise std::bad_alloc
exception on failure. More...
+ like mi_malloc() , but when out of memory, use std::get_new_handler
and raise std::bad_alloc
exception on failure. More...
void * mi_new_n (size_t count, size_t size) noexcept(false)
- like mi_mallocn() , but when out of memory, use std::get_new_handler
and raise std::bad_alloc
exception on failure. More...
+ like mi_mallocn() , but when out of memory, use std::get_new_handler
and raise std::bad_alloc
exception on failure. More...
void * mi_new_aligned (std::size_t n, std::align_val_t alignment) noexcept(false)
- like mi_malloc_aligned() , but when out of memory, use std::get_new_handler
and raise std::bad_alloc
exception on failure. More...
+ like mi_malloc_aligned() , but when out of memory, use std::get_new_handler
and raise std::bad_alloc
exception on failure. More...
void * mi_new_nothrow (size_t n)
- like mi_malloc
, but when out of memory, use std::get_new_handler
but return NULL on failure. More...
+ like mi_malloc
, but when out of memory, use std::get_new_handler
but return NULL on failure. More...
void * mi_new_aligned_nothrow (size_t n, size_t alignment)
- like mi_malloc_aligned
, but when out of memory, use std::get_new_handler
but return NULL on failure. More...
+ like mi_malloc_aligned
, but when out of memory, use std::get_new_handler
but return NULL on failure. More...
void * mi_new_realloc (void *p, size_t newsize)
- like mi_realloc() , but when out of memory, use std::get_new_handler
and raise std::bad_alloc
exception on failure. More...
+ like mi_realloc() , but when out of memory, use std::get_new_handler
and raise std::bad_alloc
exception on failure. More...
void * mi_new_reallocn (void *p, size_t newcount, size_t size)
- like mi_reallocn() , but when out of memory, use std::get_new_handler
and raise std::bad_alloc
exception on failure. More...
+ like mi_reallocn() , but when out of memory, use std::get_new_handler
and raise std::bad_alloc
exception on failure. More...
@@ -158,7 +154,10 @@ Functions
struct mi_stl_allocator< T >
std::allocator implementation for mimalloc for use in STL containers.
-
For example:
std::vector<int, mi_stl_allocator<int> > vec;
vec.push_back(1);
vec.pop_back();
+For example:
std::vector<int, mi_stl_allocator<int> > vec;
+
vec.push_back(1);
+
vec.pop_back();
+
@@ -387,9 +386,7 @@ struct mi_stl_allocator< T >
diff --git a/docs/group__extended.html b/docs/group__extended.html
index 12e51cb..bf71e92 100644
--- a/docs/group__extended.html
+++ b/docs/group__extended.html
@@ -3,7 +3,7 @@
-
+
mi-malloc: Extended Functions
@@ -13,10 +13,6 @@
-
@@ -37,21 +33,21 @@
mi-malloc
- 1.6
+ 1.7/2.0
@@ -60,10 +56,10 @@
-
+
@@ -73,13 +69,13 @@ var searchBox = new SearchBox("searchBox", "search",false,'Search');
-
@@ -92,7 +88,7 @@ $(document).ready(function(){initNavTree('group__extended.html','');});
-
@@ -107,88 +103,94 @@ $(document).ready(function(){initNavTree('group__extended.html','');});
-
Extended functionality.
+
Extended functionality.
More...
typedef void() mi_deferred_free_fun (bool force, unsigned long long heartbeat, void *arg)
- Type of deferred free functions. More...
+ Type of deferred free functions. More...
typedef void() mi_output_fun (const char *msg, void *arg)
- Type of output functions. More...
+ Type of output functions. More...
typedef void() mi_error_fun (int err, void *arg)
- Type of error callback functions. More...
+ Type of error callback functions. More...
void * mi_malloc_small (size_t size)
- Allocate a small object. More...
+ Allocate a small object. More...
void * mi_zalloc_small (size_t size)
- Allocate a zero initialized small object. More...
+ Allocate a zero initialized small object. More...
size_t mi_usable_size (void *p)
- Return the available bytes in a memory block. More...
+ Return the available bytes in a memory block. More...
size_t mi_good_size (size_t size)
- Return the used allocation size. More...
+ Return the used allocation size. More...
void mi_collect (bool force)
- Eagerly free memory. More...
+ Eagerly free memory. More...
void mi_stats_print (void *out)
- Deprecated. More...
+ Deprecated. More...
void mi_stats_print_out (mi_output_fun *out, void *arg)
- Print the main statistics. More...
+ Print the main statistics. More...
void mi_stats_reset (void)
- Reset statistics. More...
+ Reset statistics. More...
void mi_stats_merge (void)
- Merge thread local statistics with the main statistics and reset. More...
+ Merge thread local statistics with the main statistics and reset. More...
void mi_thread_init (void)
- Initialize mimalloc on a thread. More...
+ Initialize mimalloc on a thread. More...
void mi_thread_done (void)
- Uninitialize mimalloc on a thread. More...
+ Uninitialize mimalloc on a thread. More...
void mi_thread_stats_print_out (mi_output_fun *out, void *arg)
- Print out heap statistics for this thread. More...
+ Print out heap statistics for this thread. More...
void mi_register_deferred_free (mi_deferred_free_fun *deferred_free, void *arg)
- Register a deferred free function. More...
+ Register a deferred free function. More...
void mi_register_output (mi_output_fun *out, void *arg)
- Register an output function. More...
+ Register an output function. More...
void mi_register_error (mi_error_fun *errfun, void *arg)
- Register an error callback function. More...
+ Register an error callback function. More...
bool mi_is_in_heap_region (const void *p)
- Is a pointer part of our heap? More...
+ Is a pointer part of our heap? More...
+int mi_reserve_os_memory (size_t size, bool commit, bool allow_large)
+ Reserve OS memory for use by mimalloc. More...
+
+bool mi_manage_os_memory (void *start, size_t size, bool is_committed, bool is_large, bool is_zero, int numa_node)
+ Manage a particular memory area for use by mimalloc. More...
+
int mi_reserve_huge_os_pages_interleave (size_t pages, size_t numa_nodes, size_t timeout_msecs)
- Reserve pages of huge OS pages (1GiB) evenly divided over numa_nodes nodes, but stops after at most timeout_msecs
seconds. More...
+ Reserve pages of huge OS pages (1GiB) evenly divided over numa_nodes nodes, but stops after at most timeout_msecs
seconds. More...
int mi_reserve_huge_os_pages_at (size_t pages, int numa_node, size_t timeout_msecs)
- Reserve pages of huge OS pages (1GiB) at a specific numa_node , but stops after at most timeout_msecs
seconds. More...
+ Reserve pages of huge OS pages (1GiB) at a specific numa_node , but stops after at most timeout_msecs
seconds. More...
bool mi_is_redirected ()
- Is the C runtime malloc API redirected? More...
+ Is the C runtime malloc API redirected? More...
void mi_process_info (size_t *elapsed_msecs, size_t *user_msecs, size_t *system_msecs, size_t *current_rss, size_t *peak_rss, size_t *current_commit, size_t *peak_commit, size_t *page_faults)
- Return process information (time and memory usage). More...
+ Return process information (time and memory usage). More...
@@ -414,6 +416,72 @@ Functions
Returns a pointer to newly allocated memory of at least size bytes, or NULL if out of memory. This function is meant for use in run-time systems for best performance and does not check if size was indeed small – use with care!
+
+
+
+◆ mi_manage_os_memory()
+
+
+
+
+
+ bool mi_manage_os_memory
+ (
+ void *
+ start ,
+
+
+
+
+ size_t
+ size ,
+
+
+
+
+ bool
+ is_committed ,
+
+
+
+
+ bool
+ is_large ,
+
+
+
+
+ bool
+ is_zero ,
+
+
+
+
+ int
+ numa_node
+
+
+
+ )
+
+
+
+
+
+
Manage a particular memory area for use by mimalloc.
+
This is just like mi_reserve_os_memory
except that the area should already be allocated in some manner and available for use my mimalloc.
Parameters
+
+ start Start of the memory area
+ size The size of the memory area.
+ commit Is the area already committed?
+ is_large Does it consist of large OS pages? Set this to true as well for memory that should not be decommitted or protected (like rdma etc.)
+ is_zero Does the area consists of zero's?
+ numa_node Possible associated numa node or -1
.
+
+
+
+
Returns true if successful, and false on error.
+
@@ -706,6 +774,51 @@ Functions
Returns 0 if successfull, ENOMEM if running out of memory, or ETIMEDOUT if timed out.
The reserved memory is used by mimalloc to satisfy allocations. May quit before timeout_msecs are expired if it estimates it will take more than 1.5 times timeout_msecs . The time limit is needed because on some operating systems it can take a long time to reserve contiguous memory if the physical memory is fragmented.
+
+
+
+◆ mi_reserve_os_memory()
+
+
+
+
+
+ int mi_reserve_os_memory
+ (
+ size_t
+ size ,
+
+
+
+
+ bool
+ commit ,
+
+
+
+
+ bool
+ allow_large
+
+
+
+ )
+
+
+
+
+
+
Reserve OS memory for use by mimalloc.
+
Reserved areas are used before allocating from the OS again. By reserving a large area upfront, allocation can be more efficient, and can be better managed on systems without mmap
/VirtualAlloc
(like WASM for example).
Parameters
+
+ size The size to reserve.
+ commit Commit the memory upfront.
+ allow_large Allow large OS pages (2MiB) to be used?
+
+
+
+
Returns 0 if successful, and an error code otherwise (e.g. ENOMEM
).
+
@@ -958,9 +1071,7 @@ Functions
diff --git a/docs/group__extended.js b/docs/group__extended.js
index ed4a8b4..c217aac 100644
--- a/docs/group__extended.js
+++ b/docs/group__extended.js
@@ -9,12 +9,14 @@ var group__extended =
[ "mi_is_in_heap_region", "group__extended.html#ga5f071b10d4df1c3658e04e7fd67a94e6", null ],
[ "mi_is_redirected", "group__extended.html#gaad25050b19f30cd79397b227e0157a3f", null ],
[ "mi_malloc_small", "group__extended.html#ga7136c2e55cb22c98ecf95d08d6debb99", null ],
+ [ "mi_manage_os_memory", "group__extended.html#ga4c6486a1fdcd7a423b5f25fe4be8e0cf", null ],
[ "mi_process_info", "group__extended.html#ga7d862c2affd5790381da14eb102a364d", null ],
[ "mi_register_deferred_free", "group__extended.html#ga3460a6ca91af97be4058f523d3cb8ece", null ],
[ "mi_register_error", "group__extended.html#gaa1d55e0e894be240827e5d87ec3a1f45", null ],
[ "mi_register_output", "group__extended.html#gae5b17ff027cd2150b43a33040250cf3f", null ],
[ "mi_reserve_huge_os_pages_at", "group__extended.html#ga7795a13d20087447281858d2c771cca1", null ],
[ "mi_reserve_huge_os_pages_interleave", "group__extended.html#ga3132f521fb756fc0e8ec0b74fb58df50", null ],
+ [ "mi_reserve_os_memory", "group__extended.html#ga00ec3324b6b2591c7fe3677baa30a767", null ],
[ "mi_stats_merge", "group__extended.html#ga854b1de8cb067c7316286c28b2fcd3d1", null ],
[ "mi_stats_print", "group__extended.html#ga2d126e5c62d3badc35445e5d84166df2", null ],
[ "mi_stats_print_out", "group__extended.html#ga537f13b299ddf801e49a5a94fde02c79", null ],
diff --git a/docs/group__heap.html b/docs/group__heap.html
index 1a38c93..5f97698 100644
--- a/docs/group__heap.html
+++ b/docs/group__heap.html
@@ -3,7 +3,7 @@
-
+
mi-malloc: Heap Allocation
@@ -13,10 +13,6 @@
-
@@ -37,21 +33,21 @@
mi-malloc
- 1.6
+ 1.7/2.0
@@ -60,10 +56,10 @@
-
+
@@ -73,13 +69,13 @@ var searchBox = new SearchBox("searchBox", "search",false,'Search');
-
@@ -92,7 +88,7 @@ $(document).ready(function(){initNavTree('group__heap.html','');});
-
@@ -106,61 +102,61 @@ $(document).ready(function(){initNavTree('group__heap.html','');});
-
First-class heaps that can be destroyed in one go.
+
First-class heaps that can be destroyed in one go.
More...
mi_heap_t * mi_heap_new ()
- Create a new heap that can be used for allocation. More...
+ Create a new heap that can be used for allocation. More...
void mi_heap_delete (mi_heap_t *heap)
- Delete a previously allocated heap. More...
+ Delete a previously allocated heap. More...
void mi_heap_destroy (mi_heap_t *heap)
- Destroy a heap, freeing all its still allocated blocks. More...
+ Destroy a heap, freeing all its still allocated blocks. More...
mi_heap_t * mi_heap_set_default (mi_heap_t *heap)
- Set the default heap to use for mi_malloc() et al. More...
+ Set the default heap to use for mi_malloc() et al. More...
mi_heap_t * mi_heap_get_default ()
- Get the default heap that is used for mi_malloc() et al. More...
+ Get the default heap that is used for mi_malloc() et al. More...
mi_heap_t * mi_heap_get_backing ()
- Get the backing heap. More...
+ Get the backing heap. More...
void mi_heap_collect (mi_heap_t *heap, bool force)
- Release outstanding resources in a specific heap. More...
+ Release outstanding resources in a specific heap. More...
void * mi_heap_malloc (mi_heap_t *heap, size_t size)
- Allocate in a specific heap. More...
+ Allocate in a specific heap. More...
void * mi_heap_malloc_small (mi_heap_t *heap, size_t size)
- Allocate a small object in a specific heap. More...
+ Allocate a small object in a specific heap. More...
void * mi_heap_zalloc (mi_heap_t *heap, size_t size)
- Allocate zero-initialized in a specific heap. More...
+ Allocate zero-initialized in a specific heap. More...
void * mi_heap_calloc (mi_heap_t *heap, size_t count, size_t size)
- Allocate count zero-initialized elements in a specific heap. More...
+ Allocate count zero-initialized elements in a specific heap. More...
void * mi_heap_mallocn (mi_heap_t *heap, size_t count, size_t size)
- Allocate count elements in a specific heap. More...
+ Allocate count elements in a specific heap. More...
char * mi_heap_strdup (mi_heap_t *heap, const char *s)
- Duplicate a string in a specific heap. More...
+ Duplicate a string in a specific heap. More...
char * mi_heap_strndup (mi_heap_t *heap, const char *s, size_t n)
- Duplicate a string of at most length n in a specific heap. More...
+ Duplicate a string of at most length n in a specific heap. More...
char * mi_heap_realpath (mi_heap_t *heap, const char *fname, char *resolved_name)
- Resolve a file path name using a specific heap to allocate the result. More...
+ Resolve a file path name using a specific heap to allocate the result. More...
void * mi_heap_realloc (mi_heap_t *heap, void *p, size_t newsize)
@@ -1071,9 +1067,7 @@ Functions
diff --git a/docs/group__malloc.html b/docs/group__malloc.html
index 224c4b0..c110fdb 100644
--- a/docs/group__malloc.html
+++ b/docs/group__malloc.html
@@ -3,7 +3,7 @@
-
+
mi-malloc: Basic Allocation
@@ -13,10 +13,6 @@
-
@@ -37,21 +33,21 @@
mi-malloc
- 1.6
+ 1.7/2.0
@@ -60,10 +56,10 @@
-
+
@@ -73,13 +69,13 @@ var searchBox = new SearchBox("searchBox", "search",false,'Search');
-
@@ -92,7 +88,7 @@ $(document).ready(function(){initNavTree('group__malloc.html','');});
-
@@ -105,49 +101,49 @@ $(document).ready(function(){initNavTree('group__malloc.html','');});
-
The basic allocation interface.
+
The basic allocation interface.
More...
void mi_free (void *p)
- Free previously allocated memory. More...
+ Free previously allocated memory. More...
void * mi_malloc (size_t size)
- Allocate size bytes. More...
+ Allocate size bytes. More...
void * mi_zalloc (size_t size)
- Allocate zero-initialized size
bytes. More...
+ Allocate zero-initialized size
bytes. More...
void * mi_calloc (size_t count, size_t size)
- Allocate zero-initialized count elements of size bytes. More...
+ Allocate zero-initialized count elements of size bytes. More...
void * mi_realloc (void *p, size_t newsize)
- Re-allocate memory to newsize bytes. More...
+ Re-allocate memory to newsize bytes. More...
void * mi_recalloc (void *p, size_t count, size_t size)
- Re-allocate memory to count elements of size bytes, with extra memory initialized to zero. More...
+ Re-allocate memory to count elements of size bytes, with extra memory initialized to zero. More...
void * mi_expand (void *p, size_t newsize)
- Try to re-allocate memory to newsize bytes in place . More...
+ Try to re-allocate memory to newsize bytes in place . More...
void * mi_mallocn (size_t count, size_t size)
- Allocate count elements of size bytes. More...
+ Allocate count elements of size bytes. More...
void * mi_reallocn (void *p, size_t count, size_t size)
- Re-allocate memory to count elements of size bytes. More...
+ Re-allocate memory to count elements of size bytes. More...
void * mi_reallocf (void *p, size_t newsize)
- Re-allocate memory to newsize bytes,. More...
+ Re-allocate memory to newsize bytes,. More...
char * mi_strdup (const char *s)
- Allocate and duplicate a string. More...
+ Allocate and duplicate a string. More...
char * mi_strndup (const char *s, size_t n)
- Allocate and duplicate a string up to n bytes. More...
+ Allocate and duplicate a string up to n bytes. More...
char * mi_realpath (const char *fname, char *resolved_name)
- Resolve a file path name. More...
+ Resolve a file path name. More...
@@ -635,9 +631,7 @@ mi_zallocn()
diff --git a/docs/group__options.html b/docs/group__options.html
index 9425765..6c63e17 100644
--- a/docs/group__options.html
+++ b/docs/group__options.html
@@ -3,7 +3,7 @@
-
+
mi-malloc: Runtime Options
@@ -13,10 +13,6 @@
-
@@ -37,21 +33,21 @@
mi-malloc
- 1.6
+ 1.7/2.0
@@ -60,10 +56,10 @@
-
+
@@ -73,13 +69,13 @@ var searchBox = new SearchBox("searchBox", "search",false,'Search');
-
@@ -92,7 +88,7 @@ $(document).ready(function(){initNavTree('group__options.html','');});
-
@@ -106,31 +102,33 @@ $(document).ready(function(){initNavTree('group__options.html','');});
-
Set runtime behavior.
+
Set runtime behavior.
More...
enum mi_option_t {
- mi_option_show_errors ,
-mi_option_show_stats ,
-mi_option_verbose ,
-mi_option_eager_commit ,
-
- mi_option_eager_region_commit ,
-mi_option_large_os_pages ,
-mi_option_reserve_huge_os_pages ,
-mi_option_segment_cache ,
-
- mi_option_page_reset ,
-mi_option_segment_reset ,
-mi_option_reset_delay ,
-mi_option_use_numa_nodes ,
-
- mi_option_reset_decommits ,
-mi_option_eager_commit_delay ,
-mi_option_os_tag ,
-_mi_option_last
+ mi_option_show_errors
+, mi_option_show_stats
+, mi_option_verbose
+, mi_option_eager_commit
+,
+ mi_option_eager_region_commit
+, mi_option_large_os_pages
+, mi_option_reserve_huge_os_pages
+, mi_option_reserve_huge_os_pages_at
+,
+ mi_option_segment_cache
+, mi_option_page_reset
+, mi_option_segment_reset
+, mi_option_reset_delay
+,
+ mi_option_use_numa_nodes
+, mi_option_reset_decommits
+, mi_option_eager_commit_delay
+, mi_option_os_tag
+,
+ _mi_option_last
}
Runtime options. More...
@@ -186,6 +184,8 @@ Functions
mi_option_reserve_huge_os_pages The number of huge OS pages (1GiB in size) to reserve at the start of the program.
+ mi_option_reserve_huge_os_pages_at Reserve huge OS pages at node N.
+
mi_option_segment_cache The number of segments per thread to keep cached.
mi_option_page_reset Reset page memory after mi_option_reset_delay milliseconds when it becomes free.
@@ -397,9 +397,7 @@ Functions
diff --git a/docs/group__options.js b/docs/group__options.js
index 9aaf231..c8836cd 100644
--- a/docs/group__options.js
+++ b/docs/group__options.js
@@ -8,6 +8,7 @@ var group__options =
[ "mi_option_eager_region_commit", "group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca32ce97ece29f69e82579679cf8a307ad", null ],
[ "mi_option_large_os_pages", "group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca4192d491200d0055df0554d4cf65054e", null ],
[ "mi_option_reserve_huge_os_pages", "group__options.html#ggafebf7ed116adb38ae5218bc3ce06884caca7ed041be3b0b9d0b82432c7bf41af2", null ],
+ [ "mi_option_reserve_huge_os_pages_at", "group__options.html#ggafebf7ed116adb38ae5218bc3ce06884caa13e7926d4339d2aa6fbf61d4473fd5c", null ],
[ "mi_option_segment_cache", "group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca2ecbe7ef32f5c84de3739aa4f0b805a1", null ],
[ "mi_option_page_reset", "group__options.html#ggafebf7ed116adb38ae5218bc3ce06884cada854dd272c66342f18a93ee254a2968", null ],
[ "mi_option_segment_reset", "group__options.html#ggafebf7ed116adb38ae5218bc3ce06884cafb121d30d87591850d5410ccc3a95c6d", null ],
diff --git a/docs/group__posix.html b/docs/group__posix.html
index fe3a88e..37c8702 100644
--- a/docs/group__posix.html
+++ b/docs/group__posix.html
@@ -3,7 +3,7 @@
-
+
mi-malloc: Posix
@@ -13,10 +13,6 @@
-
@@ -37,21 +33,21 @@
mi-malloc
- 1.6
+ 1.7/2.0
@@ -60,10 +56,10 @@
-
+
@@ -73,13 +69,13 @@ var searchBox = new SearchBox("searchBox", "search",false,'Search');
-
@@ -92,7 +88,7 @@ $(document).ready(function(){initNavTree('group__posix.html','');});
-
@@ -105,7 +101,7 @@ $(document).ready(function(){initNavTree('group__posix.html','');});
-
mi_
prefixed implementations of various Posix, Unix, and C++ allocation functions.
+
mi_
prefixed implementations of various Posix, Unix, and C++ allocation functions.
More...
size_t mi_malloc_usable_size (const void *p)
void mi_cfree (void *p)
- Just as free
but also checks if the pointer p
belongs to our heap. More...
+ Just as free
but also checks if the pointer p
belongs to our heap. More...
int mi_posix_memalign (void **p, size_t alignment, size_t size)
@@ -130,7 +126,11 @@ Functions
void * mi_aligned_alloc (size_t alignment, size_t size)
void * mi_reallocarray (void *p, size_t count, size_t size)
+ Correspond s to reallocarray in FreeBSD. More...
+int mi_reallocarr (void *p, size_t count, size_t size)
+ Corresponds to reallocarr in NetBSD. More...
+
void mi_free_size (void *p, size_t size)
void mi_free_size_aligned (void *p, size_t size, size_t alignment)
@@ -428,6 +428,42 @@ Functions
+
+
+
+◆ mi_reallocarr()
+
+
+
+
+
+ int mi_reallocarr
+ (
+ void *
+ p ,
+
+
+
+
+ size_t
+ count ,
+
+
+
+
+ size_t
+ size
+
+
+
+ )
+
+
+
+
@@ -462,6 +498,8 @@ Functions
@@ -487,9 +525,7 @@ Functions
diff --git a/docs/group__posix.js b/docs/group__posix.js
index e43453d..50c248c 100644
--- a/docs/group__posix.js
+++ b/docs/group__posix.js
@@ -11,6 +11,7 @@ var group__posix =
[ "mi_memalign", "group__posix.html#gaab7fa71ea93b96873f5d9883db57d40e", null ],
[ "mi_posix_memalign", "group__posix.html#gacff84f226ba9feb2031b8992e5579447", null ],
[ "mi_pvalloc", "group__posix.html#gaeb325c39b887d3b90d85d1eb1712fb1e", null ],
+ [ "mi_reallocarr", "group__posix.html#ga7e1934d60a3e697950eeb48e042bfad5", null ],
[ "mi_reallocarray", "group__posix.html#ga48fad8648a2f1dab9c87ea9448a52088", null ],
[ "mi_valloc", "group__posix.html#ga73baaf5951f5165ba0763d0c06b6a93b", null ]
];
\ No newline at end of file
diff --git a/docs/group__typed.html b/docs/group__typed.html
index 5cbfbd6..3c00adb 100644
--- a/docs/group__typed.html
+++ b/docs/group__typed.html
@@ -3,7 +3,7 @@
-
+
mi-malloc: Typed Macros
@@ -13,10 +13,6 @@
-
@@ -37,21 +33,21 @@
mi-malloc
- 1.6
+ 1.7/2.0
@@ -60,10 +56,10 @@
-
+
@@ -73,13 +69,13 @@ var searchBox = new SearchBox("searchBox", "search",false,'Search');
-
@@ -92,7 +88,7 @@ $(document).ready(function(){initNavTree('group__typed.html','');});
-
@@ -105,48 +101,50 @@ $(document).ready(function(){initNavTree('group__typed.html','');});
-
Typed allocation macros.
+
Typed allocation macros.
More...
#define mi_malloc_tp (tp)
- Allocate a block of type tp . More...
+ Allocate a block of type tp . More...
#define mi_zalloc_tp (tp)
- Allocate a zero-initialized block of type tp . More...
+ Allocate a zero-initialized block of type tp . More...
#define mi_calloc_tp (tp, count)
- Allocate count zero-initialized blocks of type tp . More...
+ Allocate count zero-initialized blocks of type tp . More...
#define mi_mallocn_tp (tp, count)
- Allocate count blocks of type tp . More...
+ Allocate count blocks of type tp . More...
#define mi_reallocn_tp (p, tp, count)
- Re-allocate to count blocks of type tp . More...
+ Re-allocate to count blocks of type tp . More...
#define mi_heap_malloc_tp (hp, tp)
- Allocate a block of type tp in a heap hp . More...
+ Allocate a block of type tp in a heap hp . More...
#define mi_heap_zalloc_tp (hp, tp)
- Allocate a zero-initialized block of type tp in a heap hp . More...
+ Allocate a zero-initialized block of type tp in a heap hp . More...
#define mi_heap_calloc_tp (hp, tp, count)
- Allocate count zero-initialized blocks of type tp in a heap hp . More...
+ Allocate count zero-initialized blocks of type tp in a heap hp . More...
#define mi_heap_mallocn_tp (hp, tp, count)
- Allocate count blocks of type tp in a heap hp . More...
+ Allocate count blocks of type tp in a heap hp . More...
#define mi_heap_reallocn_tp (hp, p, tp, count)
- Re-allocate to count blocks of type tp in a heap hp . More...
+ Re-allocate to count blocks of type tp in a heap hp . More...
#define mi_heap_recalloc_tp (hp, p, tp, count)
- Re-allocate to count zero initialized blocks of type tp in a heap hp . More...
+ Re-allocate to count zero initialized blocks of type tp in a heap hp . More...
Typed allocation macros.
-
For example:
+
For example:
+
#define mi_malloc_tp(tp)
Allocate a block of type tp.
Definition: mimalloc-doc.h:692
+
◆ mi_calloc_tp
@@ -417,7 +415,8 @@ Macros
Returns A pointer to an object of type tp , or NULL if out of memory.
-
Example:
See also mi_malloc()
+
Example:
See also mi_malloc()
@@ -512,9 +511,7 @@ Macros
diff --git a/docs/group__zeroinit.html b/docs/group__zeroinit.html
index 3c04a5a..41af9a2 100644
--- a/docs/group__zeroinit.html
+++ b/docs/group__zeroinit.html
@@ -3,7 +3,7 @@
-
+
mi-malloc: Zero initialized re-allocation
@@ -13,10 +13,6 @@
-
@@ -37,21 +33,21 @@
mi-malloc
- 1.6
+ 1.7/2.0
@@ -60,10 +56,10 @@
-
+
@@ -73,13 +69,13 @@ var searchBox = new SearchBox("searchBox", "search",false,'Search');
-
@@ -92,7 +88,7 @@ $(document).ready(function(){initNavTree('group__zeroinit.html','');});
-
@@ -105,7 +101,7 @@ $(document).ready(function(){initNavTree('group__zeroinit.html','');});
-
The zero-initialized re-allocations are only valid on memory that was originally allocated with zero initialization too.
+
The zero-initialized re-allocations are only valid on memory that was originally allocated with zero initialization too.
More...
diff --git a/docs/index.html b/docs/index.html
index 01af9be..6cc439d 100644
--- a/docs/index.html
+++ b/docs/index.html
@@ -3,7 +3,7 @@
-
+
mi-malloc: Main Page
@@ -13,10 +13,6 @@
-
@@ -37,21 +33,21 @@
mi-malloc
- 1.6
+ 1.7/2.0
@@ -60,10 +56,10 @@
-
+
@@ -73,13 +69,13 @@ var searchBox = new SearchBox("searchBox", "search",false,'Search');
-
@@ -92,7 +88,7 @@ $(document).ready(function(){initNavTree('index.html','');});
-
@@ -103,13 +99,14 @@ $(document).ready(function(){initNavTree('index.html','');});
This is the API documentation of the mimalloc allocator (pronounced "me-malloc") – a general purpose allocator with excellent performance characteristics. Initially developed by Daan Leijen for the run-time systems of the Koka and Lean languages.
-
It is a drop-in replacement for malloc
and can be used in other programs without code changes, for example, on Unix you can use it as:
> LD_PRELOAD=/usr/bin/libmimalloc.so myprogram
Notable aspects of the design include:
+
It is a drop-in replacement for malloc
and can be used in other programs without code changes, for example, on Unix you can use it as:
> LD_PRELOAD=/usr/bin/libmimalloc.so myprogram
+
Notable aspects of the design include:
small and consistent : the library is about 8k LOC using simple and consistent data structures. This makes it very suitable to integrate and adapt in other projects. For runtime systems it provides hooks for a monotonic heartbeat and deferred freeing (for bounded worst-case times with reference counting).
free list sharding : instead of one big free list (per size class) we have many smaller lists per "mimalloc page" which reduces fragmentation and increases locality – things that are allocated close in time get allocated close in memory. (A mimalloc page contains blocks of one size class and is usually 64KiB on a 64-bit system).
free list multi-sharding : the big idea! Not only do we shard the free list per mimalloc page, but for each page we have multiple free lists. In particular, there is one list for thread-local free
operations, and another one for concurrent free
operations. Free-ing from another thread can now be a single CAS without needing sophisticated coordination between threads. Since there will be thousands of separate free lists, contention is naturally distributed over the heap, and the chance of contending on a single location will be low – this is quite similar to randomized algorithms like skip lists where adding a random oracle removes the need for a more complex algorithm.
eager page reset : when a "page" becomes empty (with increased chance due to free list sharding) the memory is marked to the OS as unused ("reset" or "purged") reducing (real) memory pressure and fragmentation, especially in long running programs.
-secure : mimalloc can be build in secure mode, adding guard pages, randomized allocation, encrypted free lists, etc. to protect against various heap vulnerabilities. The performance penalty is only around 3% on average over our benchmarks.
+secure : mimalloc can be build in secure mode, adding guard pages, randomized allocation, encrypted free lists, etc. to protect against various heap vulnerabilities. The performance penalty is only around 5% on average over our benchmarks.
first-class heaps : efficiently create and use multiple heaps to allocate across different regions. A heap can be destroyed at once instead of deallocating each object separately.
bounded : it does not suffer from blowup [1], has bounded worst-case allocation times (wcat ), bounded space overhead (~0.2% meta-data, with at most 12.5% waste in allocation sizes), and has no internal points of contention using only atomic operations.
fast : In our benchmarks (see below ), mimalloc outperforms all other leading allocators (jemalloc , tcmalloc , Hoard , etc), and usually uses less memory (up to 25% more in the worst case). A nice property is that it does consistently well over a wide range of benchmarks.
@@ -138,9 +135,7 @@ $(document).ready(function(){initNavTree('index.html','');});
diff --git a/docs/jquery.js b/docs/jquery.js
index 1ee895c..103c32d 100644
--- a/docs/jquery.js
+++ b/docs/jquery.js
@@ -1,71 +1,26 @@
+/*! jQuery v3.4.1 | (c) JS Foundation and other contributors | jquery.org/license */
+!function(e,t){"use strict";"object"==typeof module&&"object"==typeof module.exports?module.exports=e.document?t(e,!0):function(e){if(!e.document)throw new Error("jQuery requires a window with a document");return t(e)}:t(e)}("undefined"!=typeof window?window:this,function(C,e){"use strict";var t=[],E=C.document,r=Object.getPrototypeOf,s=t.slice,g=t.concat,u=t.push,i=t.indexOf,n={},o=n.toString,v=n.hasOwnProperty,a=v.toString,l=a.call(Object),y={},m=function(e){return"function"==typeof e&&"number"!=typeof e.nodeType},x=function(e){return null!=e&&e===e.window},c={type:!0,src:!0,nonce:!0,noModule:!0};function b(e,t,n){var r,i,o=(n=n||E).createElement("script");if(o.text=e,t)for(r in c)(i=t[r]||t.getAttribute&&t.getAttribute(r))&&o.setAttribute(r,i);n.head.appendChild(o).parentNode.removeChild(o)}function w(e){return null==e?e+"":"object"==typeof e||"function"==typeof e?n[o.call(e)]||"object":typeof e}var f="3.4.1",k=function(e,t){return new k.fn.init(e,t)},p=/^[\s\uFEFF\xA0]+|[\s\uFEFF\xA0]+$/g;function d(e){var t=!!e&&"length"in e&&e.length,n=w(e);return!m(e)&&!x(e)&&("array"===n||0===t||"number"==typeof t&&0+~]|"+M+")"+M+"*"),U=new RegExp(M+"|>"),X=new RegExp($),V=new RegExp("^"+I+"$"),G={ID:new RegExp("^#("+I+")"),CLASS:new RegExp("^\\.("+I+")"),TAG:new RegExp("^("+I+"|[*])"),ATTR:new RegExp("^"+W),PSEUDO:new RegExp("^"+$),CHILD:new RegExp("^:(only|first|last|nth|nth-last)-(child|of-type)(?:\\("+M+"*(even|odd|(([+-]|)(\\d*)n|)"+M+"*(?:([+-]|)"+M+"*(\\d+)|))"+M+"*\\)|)","i"),bool:new RegExp("^(?:"+R+")$","i"),needsContext:new RegExp("^"+M+"*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\\("+M+"*((?:-\\d)?\\d*)"+M+"*\\)|)(?=[^-]|$)","i")},Y=/HTML$/i,Q=/^(?:input|select|textarea|button)$/i,J=/^h\d$/i,K=/^[^{]+\{\s*\[native \w/,Z=/^(?:#([\w-]+)|(\w+)|\.([\w-]+))$/,ee=/[+~]/,te=new RegExp("\\\\([\\da-f]{1,6}"+M+"?|("+M+")|.)","ig"),ne=function(e,t,n){var r="0x"+t-65536;return r!=r||n?t:r<0?String.fromCharCode(r+65536):String.fromCharCode(r>>10|55296,1023&r|56320)},re=/([\0-\x1f\x7f]|^-?\d)|^-$|[^\0-\x1f\x7f-\uFFFF\w-]/g,ie=function(e,t){return t?"\0"===e?"\ufffd":e.slice(0,-1)+"\\"+e.charCodeAt(e.length-1).toString(16)+" ":"\\"+e},oe=function(){T()},ae=be(function(e){return!0===e.disabled&&"fieldset"===e.nodeName.toLowerCase()},{dir:"parentNode",next:"legend"});try{H.apply(t=O.call(m.childNodes),m.childNodes),t[m.childNodes.length].nodeType}catch(e){H={apply:t.length?function(e,t){L.apply(e,O.call(t))}:function(e,t){var n=e.length,r=0;while(e[n++]=t[r++]);e.length=n-1}}}function se(t,e,n,r){var i,o,a,s,u,l,c,f=e&&e.ownerDocument,p=e?e.nodeType:9;if(n=n||[],"string"!=typeof t||!t||1!==p&&9!==p&&11!==p)return n;if(!r&&((e?e.ownerDocument||e:m)!==C&&T(e),e=e||C,E)){if(11!==p&&(u=Z.exec(t)))if(i=u[1]){if(9===p){if(!(a=e.getElementById(i)))return n;if(a.id===i)return n.push(a),n}else if(f&&(a=f.getElementById(i))&&y(e,a)&&a.id===i)return n.push(a),n}else{if(u[2])return H.apply(n,e.getElementsByTagName(t)),n;if((i=u[3])&&d.getElementsByClassName&&e.getElementsByClassName)return H.apply(n,e.getElementsByClassName(i)),n}if(d.qsa&&!A[t+" "]&&(!v||!v.test(t))&&(1!==p||"object"!==e.nodeName.toLowerCase())){if(c=t,f=e,1===p&&U.test(t)){(s=e.getAttribute("id"))?s=s.replace(re,ie):e.setAttribute("id",s=k),o=(l=h(t)).length;while(o--)l[o]="#"+s+" "+xe(l[o]);c=l.join(","),f=ee.test(t)&&ye(e.parentNode)||e}try{return H.apply(n,f.querySelectorAll(c)),n}catch(e){A(t,!0)}finally{s===k&&e.removeAttribute("id")}}}return g(t.replace(B,"$1"),e,n,r)}function ue(){var r=[];return function e(t,n){return r.push(t+" ")>b.cacheLength&&delete e[r.shift()],e[t+" "]=n}}function le(e){return e[k]=!0,e}function ce(e){var t=C.createElement("fieldset");try{return!!e(t)}catch(e){return!1}finally{t.parentNode&&t.parentNode.removeChild(t),t=null}}function fe(e,t){var n=e.split("|"),r=n.length;while(r--)b.attrHandle[n[r]]=t}function pe(e,t){var n=t&&e,r=n&&1===e.nodeType&&1===t.nodeType&&e.sourceIndex-t.sourceIndex;if(r)return r;if(n)while(n=n.nextSibling)if(n===t)return-1;return e?1:-1}function de(t){return function(e){return"input"===e.nodeName.toLowerCase()&&e.type===t}}function he(n){return function(e){var t=e.nodeName.toLowerCase();return("input"===t||"button"===t)&&e.type===n}}function ge(t){return function(e){return"form"in e?e.parentNode&&!1===e.disabled?"label"in e?"label"in e.parentNode?e.parentNode.disabled===t:e.disabled===t:e.isDisabled===t||e.isDisabled!==!t&&ae(e)===t:e.disabled===t:"label"in e&&e.disabled===t}}function ve(a){return le(function(o){return o=+o,le(function(e,t){var n,r=a([],e.length,o),i=r.length;while(i--)e[n=r[i]]&&(e[n]=!(t[n]=e[n]))})})}function ye(e){return e&&"undefined"!=typeof e.getElementsByTagName&&e}for(e in d=se.support={},i=se.isXML=function(e){var t=e.namespaceURI,n=(e.ownerDocument||e).documentElement;return!Y.test(t||n&&n.nodeName||"HTML")},T=se.setDocument=function(e){var t,n,r=e?e.ownerDocument||e:m;return r!==C&&9===r.nodeType&&r.documentElement&&(a=(C=r).documentElement,E=!i(C),m!==C&&(n=C.defaultView)&&n.top!==n&&(n.addEventListener?n.addEventListener("unload",oe,!1):n.attachEvent&&n.attachEvent("onunload",oe)),d.attributes=ce(function(e){return e.className="i",!e.getAttribute("className")}),d.getElementsByTagName=ce(function(e){return e.appendChild(C.createComment("")),!e.getElementsByTagName("*").length}),d.getElementsByClassName=K.test(C.getElementsByClassName),d.getById=ce(function(e){return a.appendChild(e).id=k,!C.getElementsByName||!C.getElementsByName(k).length}),d.getById?(b.filter.ID=function(e){var t=e.replace(te,ne);return function(e){return e.getAttribute("id")===t}},b.find.ID=function(e,t){if("undefined"!=typeof t.getElementById&&E){var n=t.getElementById(e);return n?[n]:[]}}):(b.filter.ID=function(e){var n=e.replace(te,ne);return function(e){var t="undefined"!=typeof e.getAttributeNode&&e.getAttributeNode("id");return t&&t.value===n}},b.find.ID=function(e,t){if("undefined"!=typeof t.getElementById&&E){var n,r,i,o=t.getElementById(e);if(o){if((n=o.getAttributeNode("id"))&&n.value===e)return[o];i=t.getElementsByName(e),r=0;while(o=i[r++])if((n=o.getAttributeNode("id"))&&n.value===e)return[o]}return[]}}),b.find.TAG=d.getElementsByTagName?function(e,t){return"undefined"!=typeof t.getElementsByTagName?t.getElementsByTagName(e):d.qsa?t.querySelectorAll(e):void 0}:function(e,t){var n,r=[],i=0,o=t.getElementsByTagName(e);if("*"===e){while(n=o[i++])1===n.nodeType&&r.push(n);return r}return o},b.find.CLASS=d.getElementsByClassName&&function(e,t){if("undefined"!=typeof t.getElementsByClassName&&E)return t.getElementsByClassName(e)},s=[],v=[],(d.qsa=K.test(C.querySelectorAll))&&(ce(function(e){a.appendChild(e).innerHTML=" ",e.querySelectorAll("[msallowcapture^='']").length&&v.push("[*^$]="+M+"*(?:''|\"\")"),e.querySelectorAll("[selected]").length||v.push("\\["+M+"*(?:value|"+R+")"),e.querySelectorAll("[id~="+k+"-]").length||v.push("~="),e.querySelectorAll(":checked").length||v.push(":checked"),e.querySelectorAll("a#"+k+"+*").length||v.push(".#.+[+~]")}),ce(function(e){e.innerHTML=" ";var t=C.createElement("input");t.setAttribute("type","hidden"),e.appendChild(t).setAttribute("name","D"),e.querySelectorAll("[name=d]").length&&v.push("name"+M+"*[*^$|!~]?="),2!==e.querySelectorAll(":enabled").length&&v.push(":enabled",":disabled"),a.appendChild(e).disabled=!0,2!==e.querySelectorAll(":disabled").length&&v.push(":enabled",":disabled"),e.querySelectorAll("*,:x"),v.push(",.*:")})),(d.matchesSelector=K.test(c=a.matches||a.webkitMatchesSelector||a.mozMatchesSelector||a.oMatchesSelector||a.msMatchesSelector))&&ce(function(e){d.disconnectedMatch=c.call(e,"*"),c.call(e,"[s!='']:x"),s.push("!=",$)}),v=v.length&&new RegExp(v.join("|")),s=s.length&&new RegExp(s.join("|")),t=K.test(a.compareDocumentPosition),y=t||K.test(a.contains)?function(e,t){var n=9===e.nodeType?e.documentElement:e,r=t&&t.parentNode;return e===r||!(!r||1!==r.nodeType||!(n.contains?n.contains(r):e.compareDocumentPosition&&16&e.compareDocumentPosition(r)))}:function(e,t){if(t)while(t=t.parentNode)if(t===e)return!0;return!1},D=t?function(e,t){if(e===t)return l=!0,0;var n=!e.compareDocumentPosition-!t.compareDocumentPosition;return n||(1&(n=(e.ownerDocument||e)===(t.ownerDocument||t)?e.compareDocumentPosition(t):1)||!d.sortDetached&&t.compareDocumentPosition(e)===n?e===C||e.ownerDocument===m&&y(m,e)?-1:t===C||t.ownerDocument===m&&y(m,t)?1:u?P(u,e)-P(u,t):0:4&n?-1:1)}:function(e,t){if(e===t)return l=!0,0;var n,r=0,i=e.parentNode,o=t.parentNode,a=[e],s=[t];if(!i||!o)return e===C?-1:t===C?1:i?-1:o?1:u?P(u,e)-P(u,t):0;if(i===o)return pe(e,t);n=e;while(n=n.parentNode)a.unshift(n);n=t;while(n=n.parentNode)s.unshift(n);while(a[r]===s[r])r++;return r?pe(a[r],s[r]):a[r]===m?-1:s[r]===m?1:0}),C},se.matches=function(e,t){return se(e,null,null,t)},se.matchesSelector=function(e,t){if((e.ownerDocument||e)!==C&&T(e),d.matchesSelector&&E&&!A[t+" "]&&(!s||!s.test(t))&&(!v||!v.test(t)))try{var n=c.call(e,t);if(n||d.disconnectedMatch||e.document&&11!==e.document.nodeType)return n}catch(e){A(t,!0)}return 0":{dir:"parentNode",first:!0}," ":{dir:"parentNode"},"+":{dir:"previousSibling",first:!0},"~":{dir:"previousSibling"}},preFilter:{ATTR:function(e){return e[1]=e[1].replace(te,ne),e[3]=(e[3]||e[4]||e[5]||"").replace(te,ne),"~="===e[2]&&(e[3]=" "+e[3]+" "),e.slice(0,4)},CHILD:function(e){return e[1]=e[1].toLowerCase(),"nth"===e[1].slice(0,3)?(e[3]||se.error(e[0]),e[4]=+(e[4]?e[5]+(e[6]||1):2*("even"===e[3]||"odd"===e[3])),e[5]=+(e[7]+e[8]||"odd"===e[3])):e[3]&&se.error(e[0]),e},PSEUDO:function(e){var t,n=!e[6]&&e[2];return G.CHILD.test(e[0])?null:(e[3]?e[2]=e[4]||e[5]||"":n&&X.test(n)&&(t=h(n,!0))&&(t=n.indexOf(")",n.length-t)-n.length)&&(e[0]=e[0].slice(0,t),e[2]=n.slice(0,t)),e.slice(0,3))}},filter:{TAG:function(e){var t=e.replace(te,ne).toLowerCase();return"*"===e?function(){return!0}:function(e){return e.nodeName&&e.nodeName.toLowerCase()===t}},CLASS:function(e){var t=p[e+" "];return t||(t=new RegExp("(^|"+M+")"+e+"("+M+"|$)"))&&p(e,function(e){return t.test("string"==typeof e.className&&e.className||"undefined"!=typeof e.getAttribute&&e.getAttribute("class")||"")})},ATTR:function(n,r,i){return function(e){var t=se.attr(e,n);return null==t?"!="===r:!r||(t+="","="===r?t===i:"!="===r?t!==i:"^="===r?i&&0===t.indexOf(i):"*="===r?i&&-1:\x20\t\r\n\f]*)[\x20\t\r\n\f]*\/?>(?:<\/\1>|)$/i;function j(e,n,r){return m(n)?k.grep(e,function(e,t){return!!n.call(e,t,e)!==r}):n.nodeType?k.grep(e,function(e){return e===n!==r}):"string"!=typeof n?k.grep(e,function(e){return-1)[^>]*|#([\w-]+))$/;(k.fn.init=function(e,t,n){var r,i;if(!e)return this;if(n=n||q,"string"==typeof e){if(!(r="<"===e[0]&&">"===e[e.length-1]&&3<=e.length?[null,e,null]:L.exec(e))||!r[1]&&t)return!t||t.jquery?(t||n).find(e):this.constructor(t).find(e);if(r[1]){if(t=t instanceof k?t[0]:t,k.merge(this,k.parseHTML(r[1],t&&t.nodeType?t.ownerDocument||t:E,!0)),D.test(r[1])&&k.isPlainObject(t))for(r in t)m(this[r])?this[r](t[r]):this.attr(r,t[r]);return this}return(i=E.getElementById(r[2]))&&(this[0]=i,this.length=1),this}return e.nodeType?(this[0]=e,this.length=1,this):m(e)?void 0!==n.ready?n.ready(e):e(k):k.makeArray(e,this)}).prototype=k.fn,q=k(E);var H=/^(?:parents|prev(?:Until|All))/,O={children:!0,contents:!0,next:!0,prev:!0};function P(e,t){while((e=e[t])&&1!==e.nodeType);return e}k.fn.extend({has:function(e){var t=k(e,this),n=t.length;return this.filter(function(){for(var e=0;e\x20\t\r\n\f]*)/i,he=/^$|^module$|\/(?:java|ecma)script/i,ge={option:[1,""," "],thead:[1,""],col:[2,""],tr:[2,""],td:[3,""],_default:[0,"",""]};function ve(e,t){var n;return n="undefined"!=typeof e.getElementsByTagName?e.getElementsByTagName(t||"*"):"undefined"!=typeof e.querySelectorAll?e.querySelectorAll(t||"*"):[],void 0===t||t&&A(e,t)?k.merge([e],n):n}function ye(e,t){for(var n=0,r=e.length;nx",y.noCloneChecked=!!me.cloneNode(!0).lastChild.defaultValue;var Te=/^key/,Ce=/^(?:mouse|pointer|contextmenu|drag|drop)|click/,Ee=/^([^.]*)(?:\.(.+)|)/;function ke(){return!0}function Se(){return!1}function Ne(e,t){return e===function(){try{return E.activeElement}catch(e){}}()==("focus"===t)}function Ae(e,t,n,r,i,o){var a,s;if("object"==typeof t){for(s in"string"!=typeof n&&(r=r||n,n=void 0),t)Ae(e,s,n,r,t[s],o);return e}if(null==r&&null==i?(i=n,r=n=void 0):null==i&&("string"==typeof n?(i=r,r=void 0):(i=r,r=n,n=void 0)),!1===i)i=Se;else if(!i)return e;return 1===o&&(a=i,(i=function(e){return k().off(e),a.apply(this,arguments)}).guid=a.guid||(a.guid=k.guid++)),e.each(function(){k.event.add(this,t,i,r,n)})}function De(e,i,o){o?(Q.set(e,i,!1),k.event.add(e,i,{namespace:!1,handler:function(e){var t,n,r=Q.get(this,i);if(1&e.isTrigger&&this[i]){if(r.length)(k.event.special[i]||{}).delegateType&&e.stopPropagation();else if(r=s.call(arguments),Q.set(this,i,r),t=o(this,i),this[i](),r!==(n=Q.get(this,i))||t?Q.set(this,i,!1):n={},r!==n)return e.stopImmediatePropagation(),e.preventDefault(),n.value}else r.length&&(Q.set(this,i,{value:k.event.trigger(k.extend(r[0],k.Event.prototype),r.slice(1),this)}),e.stopImmediatePropagation())}})):void 0===Q.get(e,i)&&k.event.add(e,i,ke)}k.event={global:{},add:function(t,e,n,r,i){var o,a,s,u,l,c,f,p,d,h,g,v=Q.get(t);if(v){n.handler&&(n=(o=n).handler,i=o.selector),i&&k.find.matchesSelector(ie,i),n.guid||(n.guid=k.guid++),(u=v.events)||(u=v.events={}),(a=v.handle)||(a=v.handle=function(e){return"undefined"!=typeof k&&k.event.triggered!==e.type?k.event.dispatch.apply(t,arguments):void 0}),l=(e=(e||"").match(R)||[""]).length;while(l--)d=g=(s=Ee.exec(e[l])||[])[1],h=(s[2]||"").split(".").sort(),d&&(f=k.event.special[d]||{},d=(i?f.delegateType:f.bindType)||d,f=k.event.special[d]||{},c=k.extend({type:d,origType:g,data:r,handler:n,guid:n.guid,selector:i,needsContext:i&&k.expr.match.needsContext.test(i),namespace:h.join(".")},o),(p=u[d])||((p=u[d]=[]).delegateCount=0,f.setup&&!1!==f.setup.call(t,r,h,a)||t.addEventListener&&t.addEventListener(d,a)),f.add&&(f.add.call(t,c),c.handler.guid||(c.handler.guid=n.guid)),i?p.splice(p.delegateCount++,0,c):p.push(c),k.event.global[d]=!0)}},remove:function(e,t,n,r,i){var o,a,s,u,l,c,f,p,d,h,g,v=Q.hasData(e)&&Q.get(e);if(v&&(u=v.events)){l=(t=(t||"").match(R)||[""]).length;while(l--)if(d=g=(s=Ee.exec(t[l])||[])[1],h=(s[2]||"").split(".").sort(),d){f=k.event.special[d]||{},p=u[d=(r?f.delegateType:f.bindType)||d]||[],s=s[2]&&new RegExp("(^|\\.)"+h.join("\\.(?:.*\\.|)")+"(\\.|$)"),a=o=p.length;while(o--)c=p[o],!i&&g!==c.origType||n&&n.guid!==c.guid||s&&!s.test(c.namespace)||r&&r!==c.selector&&("**"!==r||!c.selector)||(p.splice(o,1),c.selector&&p.delegateCount--,f.remove&&f.remove.call(e,c));a&&!p.length&&(f.teardown&&!1!==f.teardown.call(e,h,v.handle)||k.removeEvent(e,d,v.handle),delete u[d])}else for(d in u)k.event.remove(e,d+t[l],n,r,!0);k.isEmptyObject(u)&&Q.remove(e,"handle events")}},dispatch:function(e){var t,n,r,i,o,a,s=k.event.fix(e),u=new Array(arguments.length),l=(Q.get(this,"events")||{})[s.type]||[],c=k.event.special[s.type]||{};for(u[0]=s,t=1;t\x20\t\r\n\f]*)[^>]*)\/>/gi,qe=/
-
@@ -37,21 +33,21 @@
mi-malloc
- 1.6
+ 1.7/2.0
@@ -60,10 +56,10 @@
-
+
@@ -73,13 +69,13 @@ var searchBox = new SearchBox("searchBox", "search",false,'Search');
-
@@ -92,7 +88,7 @@ $(document).ready(function(){initNavTree('mimalloc-doc_8h_source.html','');});
-
@@ -102,154 +98,463 @@ $(document).ready(function(){initNavTree('mimalloc-doc_8h_source.html','');});
mimalloc-doc.h
-
8 #error "documentation file only!" 122 void *
mi_calloc (
size_t count,
size_t size);
148 void *
mi_recalloc (
void * p,
size_t count,
size_t size);
163 void *
mi_expand (
void * p,
size_t newsize);
185 void *
mi_reallocn (
void * p,
size_t count,
size_t size);
239 char *
mi_realpath (
const char * fname,
char * resolved_name);
253 #define MI_SMALL_SIZE_MAX (128*sizeof(void*)) 465 void mi_process_info (
size_t * elapsed_msecs,
size_t * user_msecs,
size_t * system_msecs,
size_t * current_rss,
size_t * peak_rss,
size_t * current_commit,
size_t * peak_commit,
size_t * page_faults);
630 void *
mi_recalloc (
void * p,
size_t newcount,
size_t size) ;
667 #define mi_malloc_tp(tp) ((tp*)mi_malloc(sizeof(tp))) 670 #define mi_zalloc_tp(tp) ((tp*)mi_zalloc(sizeof(tp))) 673 #define mi_calloc_tp(tp,count) ((tp*)mi_calloc(count,sizeof(tp))) 676 #define mi_mallocn_tp(tp,count) ((tp*)mi_mallocn(count,sizeof(tp))) 679 #define mi_reallocn_tp(p,tp,count) ((tp*)mi_reallocn(p,count,sizeof(tp))) 682 #define mi_heap_malloc_tp(hp,tp) ((tp*)mi_heap_malloc(hp,sizeof(tp))) 685 #define mi_heap_zalloc_tp(hp,tp) ((tp*)mi_heap_zalloc(hp,sizeof(tp))) 688 #define mi_heap_calloc_tp(hp,tp,count) ((tp*)mi_heap_calloc(hp,count,sizeof(tp))) 691 #define mi_heap_mallocn_tp(hp,tp,count) ((tp*)mi_heap_mallocn(hp,count,sizeof(tp))) 694 #define mi_heap_reallocn_tp(hp,p,tp,count) ((tp*)mi_heap_reallocn(p,count,sizeof(tp))) 697 #define mi_heap_recalloc_tp(hp,p,tp,count) ((tp*)mi_heap_recalloc(p,count,sizeof(tp))) 736 typedef struct mi_heap_area_s {
775 typedef enum mi_option_e {
817 void *
mi_recalloc (
void * p,
size_t count,
size_t size);
853 void *
mi_new (std::size_t n) noexcept(
false );
856 void *
mi_new_n (
size_t count,
size_t size) noexcept(
false );
859 void *
mi_new_aligned (std::size_t n, std::align_val_t alignment) noexcept(
false );
size_t mi_usable_size(void *p)
Return the available bytes in a memory block.
-
void * mi_new_nothrow(size_t n)
like mi_malloc, but when out of memory, use std::get_new_handler but return NULL on failure.
-
void * mi_reallocn(void *p, size_t count, size_t size)
Re-allocate memory to count elements of size bytes.
-
void * mi_malloc_aligned(size_t size, size_t alignment)
Allocate size bytes aligned by alignment.
-
void * mi_recalloc_aligned_at(void *p, size_t newcount, size_t size, size_t alignment, size_t offset)
-
void mi_stats_reset(void)
Reset statistics.
-
void * mi_heap_realloc_aligned(mi_heap_t *heap, void *p, size_t newsize, size_t alignment)
-
bool mi_option_is_enabled(mi_option_t option)
-
void * mi_new_realloc(void *p, size_t newsize)
like mi_realloc(), but when out of memory, use std::get_new_handler and raise std::bad_alloc exceptio...
-
void * mi_recalloc(void *p, size_t count, size_t size)
Re-allocate memory to count elements of size bytes, with extra memory initialized to zero.
-
void * mi_mallocn(size_t count, size_t size)
Allocate count elements of size bytes.
-
size_t mi_malloc_size(const void *p)
-
void mi_option_set_enabled(mi_option_t option, bool enable)
-
int mi_posix_memalign(void **p, size_t alignment, size_t size)
-
void mi_stats_merge(void)
Merge thread local statistics with the main statistics and reset.
-
void * mi_new_n(size_t count, size_t size) noexcept(false)
like mi_mallocn(), but when out of memory, use std::get_new_handler and raise std::bad_alloc exceptio...
-
void mi_option_set_default(mi_option_t option, long value)
-
void mi_stats_print_out(mi_output_fun *out, void *arg)
Print the main statistics.
-
void() mi_error_fun(int err, void *arg)
Type of error callback functions.
Definition: mimalloc-doc.h:391
-
void * mi_rezalloc(void *p, size_t newsize)
-
Eagerly commit segments (4MiB) (enabled by default).
Definition: mimalloc-doc.h:781
-
void * mi_heap_zalloc(mi_heap_t *heap, size_t size)
Allocate zero-initialized in a specific heap.
-
void mi_option_set(mi_option_t option, long value)
-
Eagerly commit large (256MiB) memory regions (enabled by default, except on Windows)
Definition: mimalloc-doc.h:782
-
void mi_cfree(void *p)
Just as free but also checks if the pointer p belongs to our heap.
-
void * mi_recalloc_aligned(void *p, size_t newcount, size_t size, size_t alignment)
-
Definition: mimalloc-doc.h:793
-
void * mi_realloc_aligned_at(void *p, size_t newsize, size_t alignment, size_t offset)
-
void * blocks
start of the area containing heap blocks
Definition: mimalloc-doc.h:737
-
void * mi_realloc_aligned(void *p, size_t newsize, size_t alignment)
-
void mi_option_enable(mi_option_t option)
-
int mi__posix_memalign(void **p, size_t alignment, size_t size)
-
void mi_free(void *p)
Free previously allocated memory.
-
char * mi_heap_strdup(mi_heap_t *heap, const char *s)
Duplicate a string in a specific heap.
-
char * mi_heap_realpath(mi_heap_t *heap, const char *fname, char *resolved_name)
Resolve a file path name using a specific heap to allocate the result.
-
void * mi_heap_calloc_aligned_at(mi_heap_t *heap, size_t count, size_t size, size_t alignment, size_t offset)
-
void mi_process_info(size_t *elapsed_msecs, size_t *user_msecs, size_t *system_msecs, size_t *current_rss, size_t *peak_rss, size_t *current_commit, size_t *peak_commit, size_t *page_faults)
Return process information (time and memory usage).
-
void * mi_calloc_aligned(size_t count, size_t size, size_t alignment)
-
void * mi_heap_zalloc_aligned(mi_heap_t *heap, size_t size, size_t alignment)
-
void * mi_zalloc_small(size_t size)
Allocate a zero initialized small object.
-
char * mi_strndup(const char *s, size_t n)
Allocate and duplicate a string up to n bytes.
-
void * mi_expand(void *p, size_t newsize)
Try to re-allocate memory to newsize bytes in place.
-
void * mi_pvalloc(size_t size)
-
void mi_option_set_enabled_default(mi_option_t option, bool enable)
-
void * mi_heap_rezalloc_aligned_at(mi_heap_t *heap, void *p, size_t newsize, size_t alignment, size_t offset)
-
void * mi_zalloc(size_t size)
Allocate zero-initialized size bytes.
-
void * mi_heap_rezalloc(mi_heap_t *heap, void *p, size_t newsize)
-
The number of segments per thread to keep cached.
Definition: mimalloc-doc.h:785
-
void * mi_heap_calloc(mi_heap_t *heap, size_t count, size_t size)
Allocate count zero-initialized elements in a specific heap.
-
void * mi_heap_calloc_aligned(mi_heap_t *heap, size_t count, size_t size, size_t alignment)
-
bool mi_is_redirected()
Is the C runtime malloc API redirected?
-
size_t block_size
size in bytes of one block
Definition: mimalloc-doc.h:741
-
void * mi_reallocarray(void *p, size_t count, size_t size)
-
int mi_reserve_huge_os_pages_interleave(size_t pages, size_t numa_nodes, size_t timeout_msecs)
Reserve pages of huge OS pages (1GiB) evenly divided over numa_nodes nodes, but stops after at most t...
-
void() mi_deferred_free_fun(bool force, unsigned long long heartbeat, void *arg)
Type of deferred free functions.
Definition: mimalloc-doc.h:352
-
bool mi_is_in_heap_region(const void *p)
Is a pointer part of our heap?
-
void * mi_new_aligned(std::size_t n, std::align_val_t alignment) noexcept(false)
like mi_malloc_aligned(), but when out of memory, use std::get_new_handler and raise std::bad_alloc e...
-
void * mi_realloc(void *p, size_t newsize)
Re-allocate memory to newsize bytes.
-
The number of huge OS pages (1GiB in size) to reserve at the start of the program.
Definition: mimalloc-doc.h:784
-
void * mi_heap_reallocf(mi_heap_t *heap, void *p, size_t newsize)
-
void mi_free_size_aligned(void *p, size_t size, size_t alignment)
-
void * mi_rezalloc_aligned_at(void *p, size_t newsize, size_t alignment, size_t offset)
-
Reset page memory after mi_option_reset_delay milliseconds when it becomes free.
Definition: mimalloc-doc.h:786
-
void mi_thread_done(void)
Uninitialize mimalloc on a thread.
-
bool mi_heap_visit_blocks(const mi_heap_t *heap, bool visit_all_blocks, mi_block_visit_fun *visitor, void *arg)
Visit all areas and blocks in a heap.
-
Pretend there are at most N NUMA nodes.
Definition: mimalloc-doc.h:789
-
void * mi_malloc(size_t size)
Allocate size bytes.
-
void mi_register_error(mi_error_fun *errfun, void *arg)
Register an error callback function.
-
Experimental.
Definition: mimalloc-doc.h:790
-
char * mi_heap_strndup(mi_heap_t *heap, const char *s, size_t n)
Duplicate a string of at most length n in a specific heap.
-
bool() mi_block_visit_fun(const mi_heap_t *heap, const mi_heap_area_t *area, void *block, size_t block_size, void *arg)
Visitor function passed to mi_heap_visit_blocks()
Definition: mimalloc-doc.h:751
-
void * mi_heap_recalloc(mi_heap_t *heap, void *p, size_t newcount, size_t size)
-
void * mi_heap_malloc_aligned_at(mi_heap_t *heap, size_t size, size_t alignment, size_t offset)
-
char * mi_realpath(const char *fname, char *resolved_name)
Resolve a file path name.
-
Print error messages to stderr.
Definition: mimalloc-doc.h:777
-
Experimental.
Definition: mimalloc-doc.h:787
-
void * mi_heap_rezalloc_aligned(mi_heap_t *heap, void *p, size_t newsize, size_t alignment)
-
void * mi_new_aligned_nothrow(size_t n, size_t alignment)
like mi_malloc_aligned, but when out of memory, use std::get_new_handler but return NULL on failure.
-
void * mi_memalign(size_t alignment, size_t size)
-
void * mi_rezalloc_aligned(void *p, size_t newsize, size_t alignment)
-
bool mi_heap_contains_block(mi_heap_t *heap, const void *p)
Does a heap contain a pointer to a previously allocated block?
-
void mi_heap_collect(mi_heap_t *heap, bool force)
Release outstanding resources in a specific heap.
-
void * mi_heap_recalloc_aligned_at(mi_heap_t *heap, void *p, size_t newcount, size_t size, size_t alignment, size_t offset)
-
Print verbose messages to stderr.
Definition: mimalloc-doc.h:779
-
void * mi_zalloc_aligned_at(size_t size, size_t alignment, size_t offset)
-
void * mi_malloc_aligned_at(size_t size, size_t alignment, size_t offset)
Allocate size bytes aligned by alignment at a specified offset.
-
void mi_heap_delete(mi_heap_t *heap)
Delete a previously allocated heap.
-
OS tag to assign to mimalloc'd memory.
Definition: mimalloc-doc.h:792
-
mi_heap_t * mi_heap_get_default()
Get the default heap that is used for mi_malloc() et al.
-
int mi_reserve_huge_os_pages_at(size_t pages, int numa_node, size_t timeout_msecs)
Reserve pages of huge OS pages (1GiB) at a specific numa_node, but stops after at most timeout_msecs ...
-
void mi_option_disable(mi_option_t option)
-
void * mi_aligned_alloc(size_t alignment, size_t size)
-
void * mi_valloc(size_t size)
-
void mi_thread_init(void)
Initialize mimalloc on a thread.
-
size_t mi_good_size(size_t size)
Return the used allocation size.
-
void mi_stats_print(void *out)
Deprecated.
-
Experimental.
Definition: mimalloc-doc.h:791
-
void * mi_heap_recalloc_aligned(mi_heap_t *heap, void *p, size_t newcount, size_t size, size_t alignment)
-
void * mi_heap_mallocn(mi_heap_t *heap, size_t count, size_t size)
Allocate count elements in a specific heap.
-
An area of heap space contains blocks of a single size.
Definition: mimalloc-doc.h:736
-
void mi_thread_stats_print_out(mi_output_fun *out, void *arg)
Print out heap statistics for this thread.
-
Print statistics to stderr when the program is done.
Definition: mimalloc-doc.h:778
-
void * mi_zalloc_aligned(size_t size, size_t alignment)
-
size_t reserved
bytes reserved for this area
Definition: mimalloc-doc.h:738
-
struct mi_heap_s mi_heap_t
Type of first-class heaps.
Definition: mimalloc-doc.h:529
-
size_t used
bytes in use by allocated blocks
Definition: mimalloc-doc.h:740
-
void mi_register_deferred_free(mi_deferred_free_fun *deferred_free, void *arg)
Register a deferred free function.
-
void mi_free_size(void *p, size_t size)
-
void mi_collect(bool force)
Eagerly free memory.
-
void * mi_new_reallocn(void *p, size_t newcount, size_t size)
like mi_reallocn(), but when out of memory, use std::get_new_handler and raise std::bad_alloc excepti...
-
void mi_heap_destroy(mi_heap_t *heap)
Destroy a heap, freeing all its still allocated blocks.
-
void * mi_calloc_aligned_at(size_t count, size_t size, size_t alignment, size_t offset)
-
Use large OS pages (2MiB in size) if possible.
Definition: mimalloc-doc.h:783
-
void * mi_heap_reallocn(mi_heap_t *heap, void *p, size_t count, size_t size)
-
void mi_register_output(mi_output_fun *out, void *arg)
Register an output function.
-
std::allocator implementation for mimalloc for use in STL containers.
Definition: mimalloc-doc.h:880
-
void * mi_heap_malloc_small(mi_heap_t *heap, size_t size)
Allocate a small object in a specific heap.
-
void * mi_heap_realloc(mi_heap_t *heap, void *p, size_t newsize)
-
size_t mi_malloc_usable_size(const void *p)
-
void() mi_output_fun(const char *msg, void *arg)
Type of output functions.
Definition: mimalloc-doc.h:376
-
char * mi_strdup(const char *s)
Allocate and duplicate a string.
-
void * mi_heap_realloc_aligned_at(mi_heap_t *heap, void *p, size_t newsize, size_t alignment, size_t offset)
-
void * mi_reallocf(void *p, size_t newsize)
Re-allocate memory to newsize bytes,.
-
void * mi_calloc(size_t count, size_t size)
Allocate zero-initialized count elements of size bytes.
-
void * mi_heap_zalloc_aligned_at(mi_heap_t *heap, size_t size, size_t alignment, size_t offset)
-
void * mi_malloc_small(size_t size)
Allocate a small object.
-
bool mi_check_owned(const void *p)
Check safely if any pointer is part of the default heap of this thread.
-
void * mi_heap_malloc_aligned(mi_heap_t *heap, size_t size, size_t alignment)
-
long mi_option_get(mi_option_t option)
-
mi_heap_t * mi_heap_get_backing()
Get the backing heap.
-
void mi_free_aligned(void *p, size_t alignment)
-
void * mi_new(std::size_t n) noexcept(false)
like mi_malloc(), but when out of memory, use std::get_new_handler and raise std::bad_alloc exception...
-
Delay in milli-seconds before resetting a page (100ms by default)
Definition: mimalloc-doc.h:788
-
mi_heap_t * mi_heap_new()
Create a new heap that can be used for allocation.
-
void * mi_heap_malloc(mi_heap_t *heap, size_t size)
Allocate in a specific heap.
-
size_t committed
current committed bytes of this area
Definition: mimalloc-doc.h:739
-
mi_option_t
Runtime options.
Definition: mimalloc-doc.h:775
-
bool mi_heap_check_owned(mi_heap_t *heap, const void *p)
Check safely if any pointer is part of a heap.
-
mi_heap_t * mi_heap_set_default(mi_heap_t *heap)
Set the default heap to use for mi_malloc() et al.
+
+
+
+
+
+
+
+
8 #error "documentation file only!"
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
253 #define MI_SMALL_SIZE_MAX (128*sizeof(void*))
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
437 bool mi_manage_os_memory (
void * start,
size_t size,
bool is_committed,
bool is_large,
bool is_zero,
int numa_node);
+
+
+
+
+
+
+
+
+
487 void mi_process_info (
size_t * elapsed_msecs,
size_t * user_msecs,
size_t * system_msecs,
size_t * current_rss,
size_t * peak_rss,
size_t * current_commit,
size_t * peak_commit,
size_t * page_faults);
+
+
+
+
+
+
+
+
502 #define MI_ALIGNMENT_MAX (1024*1024UL)
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
655 void *
mi_recalloc (
void * p,
size_t newcount,
size_t size) ;
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
692 #define mi_malloc_tp(tp) ((tp*)mi_malloc(sizeof(tp)))
+
+
695 #define mi_zalloc_tp(tp) ((tp*)mi_zalloc(sizeof(tp)))
+
+
698 #define mi_calloc_tp(tp,count) ((tp*)mi_calloc(count,sizeof(tp)))
+
+
701 #define mi_mallocn_tp(tp,count) ((tp*)mi_mallocn(count,sizeof(tp)))
+
+
704 #define mi_reallocn_tp(p,tp,count) ((tp*)mi_reallocn(p,count,sizeof(tp)))
+
+
707 #define mi_heap_malloc_tp(hp,tp) ((tp*)mi_heap_malloc(hp,sizeof(tp)))
+
+
710 #define mi_heap_zalloc_tp(hp,tp) ((tp*)mi_heap_zalloc(hp,sizeof(tp)))
+
+
713 #define mi_heap_calloc_tp(hp,tp,count) ((tp*)mi_heap_calloc(hp,count,sizeof(tp)))
+
+
716 #define mi_heap_mallocn_tp(hp,tp,count) ((tp*)mi_heap_mallocn(hp,count,sizeof(tp)))
+
+
719 #define mi_heap_reallocn_tp(hp,p,tp,count) ((tp*)mi_heap_reallocn(p,count,sizeof(tp)))
+
+
722 #define mi_heap_recalloc_tp(hp,p,tp,count) ((tp*)mi_heap_recalloc(p,count,sizeof(tp)))
+
+
+
+
+
+
+
+
+
+
761 typedef struct mi_heap_area_s {
+
+
+
+
+
+
+
+
+
+
+
+
+
+
800 typedef enum mi_option_e {
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
843 void *
mi_recalloc (
void * p,
size_t count,
size_t size);
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
885 void *
mi_new (std::size_t n) noexcept(
false );
+
+
888 void *
mi_new_n (
size_t count,
size_t size) noexcept(
false );
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
void * mi_calloc_aligned_at(size_t count, size_t size, size_t alignment, size_t offset)
+
void * mi_zalloc_aligned(size_t size, size_t alignment)
+
void * mi_realloc_aligned(void *p, size_t newsize, size_t alignment)
+
void * mi_calloc_aligned(size_t count, size_t size, size_t alignment)
+
void * mi_malloc_aligned_at(size_t size, size_t alignment, size_t offset)
Allocate size bytes aligned by alignment at a specified offset.
+
void * mi_zalloc_aligned_at(size_t size, size_t alignment, size_t offset)
+
void * mi_malloc_aligned(size_t size, size_t alignment)
Allocate size bytes aligned by alignment.
+
void * mi_realloc_aligned_at(void *p, size_t newsize, size_t alignment, size_t offset)
+
size_t block_size
size in bytes of one block
Definition: mimalloc-doc.h:766
+
size_t committed
current committed bytes of this area
Definition: mimalloc-doc.h:764
+
size_t used
bytes in use by allocated blocks
Definition: mimalloc-doc.h:765
+
void * blocks
start of the area containing heap blocks
Definition: mimalloc-doc.h:762
+
size_t reserved
bytes reserved for this area
Definition: mimalloc-doc.h:763
+
bool mi_heap_check_owned(mi_heap_t *heap, const void *p)
Check safely if any pointer is part of a heap.
+
bool mi_check_owned(const void *p)
Check safely if any pointer is part of the default heap of this thread.
+
bool mi_heap_visit_blocks(const mi_heap_t *heap, bool visit_all_blocks, mi_block_visit_fun *visitor, void *arg)
Visit all areas and blocks in a heap.
+
bool mi_heap_contains_block(mi_heap_t *heap, const void *p)
Does a heap contain a pointer to a previously allocated block?
+
bool() mi_block_visit_fun(const mi_heap_t *heap, const mi_heap_area_t *area, void *block, size_t block_size, void *arg)
Visitor function passed to mi_heap_visit_blocks()
Definition: mimalloc-doc.h:776
+
An area of heap space contains blocks of a single size.
Definition: mimalloc-doc.h:761
+
void * mi_new_reallocn(void *p, size_t newcount, size_t size)
like mi_reallocn(), but when out of memory, use std::get_new_handler and raise std::bad_alloc excepti...
+
void * mi_new_realloc(void *p, size_t newsize)
like mi_realloc(), but when out of memory, use std::get_new_handler and raise std::bad_alloc exceptio...
+
void * mi_new(std::size_t n) noexcept(false)
like mi_malloc(), but when out of memory, use std::get_new_handler and raise std::bad_alloc exception...
+
void * mi_new_aligned_nothrow(size_t n, size_t alignment)
like mi_malloc_aligned, but when out of memory, use std::get_new_handler but return NULL on failure.
+
void * mi_new_n(size_t count, size_t size) noexcept(false)
like mi_mallocn(), but when out of memory, use std::get_new_handler and raise std::bad_alloc exceptio...
+
void * mi_new_nothrow(size_t n)
like mi_malloc, but when out of memory, use std::get_new_handler but return NULL on failure.
+
void * mi_new_aligned(std::size_t n, std::align_val_t alignment) noexcept(false)
like mi_malloc_aligned(), but when out of memory, use std::get_new_handler and raise std::bad_alloc e...
+
std::allocator implementation for mimalloc for use in STL containers.
Definition: mimalloc-doc.h:912
+
int mi_reserve_os_memory(size_t size, bool commit, bool allow_large)
Reserve OS memory for use by mimalloc.
+
size_t mi_usable_size(void *p)
Return the available bytes in a memory block.
+
void mi_thread_done(void)
Uninitialize mimalloc on a thread.
+
void * mi_zalloc_small(size_t size)
Allocate a zero initialized small object.
+
void() mi_error_fun(int err, void *arg)
Type of error callback functions.
Definition: mimalloc-doc.h:391
+
void() mi_deferred_free_fun(bool force, unsigned long long heartbeat, void *arg)
Type of deferred free functions.
Definition: mimalloc-doc.h:352
+
void mi_stats_print(void *out)
Deprecated.
+
int mi_reserve_huge_os_pages_interleave(size_t pages, size_t numa_nodes, size_t timeout_msecs)
Reserve pages of huge OS pages (1GiB) evenly divided over numa_nodes nodes, but stops after at most t...
+
void mi_register_deferred_free(mi_deferred_free_fun *deferred_free, void *arg)
Register a deferred free function.
+
void mi_stats_reset(void)
Reset statistics.
+
void mi_collect(bool force)
Eagerly free memory.
+
bool mi_manage_os_memory(void *start, size_t size, bool is_committed, bool is_large, bool is_zero, int numa_node)
Manage a particular memory area for use by mimalloc.
+
void mi_stats_print_out(mi_output_fun *out, void *arg)
Print the main statistics.
+
bool mi_is_in_heap_region(const void *p)
Is a pointer part of our heap?
+
void * mi_malloc_small(size_t size)
Allocate a small object.
+
int mi_reserve_huge_os_pages_at(size_t pages, int numa_node, size_t timeout_msecs)
Reserve pages of huge OS pages (1GiB) at a specific numa_node, but stops after at most timeout_msecs ...
+
void mi_process_info(size_t *elapsed_msecs, size_t *user_msecs, size_t *system_msecs, size_t *current_rss, size_t *peak_rss, size_t *current_commit, size_t *peak_commit, size_t *page_faults)
Return process information (time and memory usage).
+
void mi_stats_merge(void)
Merge thread local statistics with the main statistics and reset.
+
void mi_register_error(mi_error_fun *errfun, void *arg)
Register an error callback function.
+
bool mi_is_redirected()
Is the C runtime malloc API redirected?
+
void mi_thread_stats_print_out(mi_output_fun *out, void *arg)
Print out heap statistics for this thread.
+
size_t mi_good_size(size_t size)
Return the used allocation size.
+
void() mi_output_fun(const char *msg, void *arg)
Type of output functions.
Definition: mimalloc-doc.h:376
+
void mi_register_output(mi_output_fun *out, void *arg)
Register an output function.
+
void mi_thread_init(void)
Initialize mimalloc on a thread.
+
char * mi_heap_realpath(mi_heap_t *heap, const char *fname, char *resolved_name)
Resolve a file path name using a specific heap to allocate the result.
+
void * mi_heap_calloc_aligned_at(mi_heap_t *heap, size_t count, size_t size, size_t alignment, size_t offset)
+
char * mi_heap_strdup(mi_heap_t *heap, const char *s)
Duplicate a string in a specific heap.
+
void * mi_heap_malloc_aligned_at(mi_heap_t *heap, size_t size, size_t alignment, size_t offset)
+
void mi_heap_delete(mi_heap_t *heap)
Delete a previously allocated heap.
+
struct mi_heap_s mi_heap_t
Type of first-class heaps.
Definition: mimalloc-doc.h:554
+
void * mi_heap_zalloc_aligned_at(mi_heap_t *heap, size_t size, size_t alignment, size_t offset)
+
void * mi_heap_reallocf(mi_heap_t *heap, void *p, size_t newsize)
+
void * mi_heap_calloc_aligned(mi_heap_t *heap, size_t count, size_t size, size_t alignment)
+
mi_heap_t * mi_heap_get_backing()
Get the backing heap.
+
mi_heap_t * mi_heap_new()
Create a new heap that can be used for allocation.
+
void mi_heap_collect(mi_heap_t *heap, bool force)
Release outstanding resources in a specific heap.
+
void * mi_heap_mallocn(mi_heap_t *heap, size_t count, size_t size)
Allocate count elements in a specific heap.
+
mi_heap_t * mi_heap_get_default()
Get the default heap that is used for mi_malloc() et al.
+
char * mi_heap_strndup(mi_heap_t *heap, const char *s, size_t n)
Duplicate a string of at most length n in a specific heap.
+
void * mi_heap_zalloc(mi_heap_t *heap, size_t size)
Allocate zero-initialized in a specific heap.
+
void * mi_heap_malloc(mi_heap_t *heap, size_t size)
Allocate in a specific heap.
+
void mi_heap_destroy(mi_heap_t *heap)
Destroy a heap, freeing all its still allocated blocks.
+
void * mi_heap_malloc_small(mi_heap_t *heap, size_t size)
Allocate a small object in a specific heap.
+
void * mi_heap_zalloc_aligned(mi_heap_t *heap, size_t size, size_t alignment)
+
void * mi_heap_calloc(mi_heap_t *heap, size_t count, size_t size)
Allocate count zero-initialized elements in a specific heap.
+
void * mi_heap_realloc(mi_heap_t *heap, void *p, size_t newsize)
+
void * mi_heap_malloc_aligned(mi_heap_t *heap, size_t size, size_t alignment)
+
mi_heap_t * mi_heap_set_default(mi_heap_t *heap)
Set the default heap to use for mi_malloc() et al.
+
void * mi_heap_reallocn(mi_heap_t *heap, void *p, size_t count, size_t size)
+
void * mi_heap_realloc_aligned_at(mi_heap_t *heap, void *p, size_t newsize, size_t alignment, size_t offset)
+
void * mi_heap_realloc_aligned(mi_heap_t *heap, void *p, size_t newsize, size_t alignment)
+
char * mi_realpath(const char *fname, char *resolved_name)
Resolve a file path name.
+
void * mi_mallocn(size_t count, size_t size)
Allocate count elements of size bytes.
+
void * mi_recalloc(void *p, size_t count, size_t size)
Re-allocate memory to count elements of size bytes, with extra memory initialized to zero.
+
void * mi_malloc(size_t size)
Allocate size bytes.
+
void * mi_reallocn(void *p, size_t count, size_t size)
Re-allocate memory to count elements of size bytes.
+
void * mi_calloc(size_t count, size_t size)
Allocate zero-initialized count elements of size bytes.
+
char * mi_strndup(const char *s, size_t n)
Allocate and duplicate a string up to n bytes.
+
void * mi_expand(void *p, size_t newsize)
Try to re-allocate memory to newsize bytes in place.
+
char * mi_strdup(const char *s)
Allocate and duplicate a string.
+
void * mi_realloc(void *p, size_t newsize)
Re-allocate memory to newsize bytes.
+
void mi_free(void *p)
Free previously allocated memory.
+
void * mi_zalloc(size_t size)
Allocate zero-initialized size bytes.
+
void * mi_reallocf(void *p, size_t newsize)
Re-allocate memory to newsize bytes,.
+
void mi_option_enable(mi_option_t option)
+
bool mi_option_is_enabled(mi_option_t option)
+
void mi_option_set_enabled_default(mi_option_t option, bool enable)
+
long mi_option_get(mi_option_t option)
+
void mi_option_set_default(mi_option_t option, long value)
+
void mi_option_set_enabled(mi_option_t option, bool enable)
+
void mi_option_disable(mi_option_t option)
+
void mi_option_set(mi_option_t option, long value)
+
mi_option_t
Runtime options.
Definition: mimalloc-doc.h:800
+
@ mi_option_show_stats
Print statistics to stderr when the program is done.
Definition: mimalloc-doc.h:803
+
@ mi_option_use_numa_nodes
Pretend there are at most N NUMA nodes.
Definition: mimalloc-doc.h:815
+
@ mi_option_reset_delay
Delay in milli-seconds before resetting a page (100ms by default)
Definition: mimalloc-doc.h:814
+
@ mi_option_eager_commit_delay
Experimental.
Definition: mimalloc-doc.h:817
+
@ mi_option_eager_commit
Eagerly commit segments (4MiB) (enabled by default).
Definition: mimalloc-doc.h:806
+
@ mi_option_segment_cache
The number of segments per thread to keep cached.
Definition: mimalloc-doc.h:811
+
@ mi_option_eager_region_commit
Eagerly commit large (256MiB) memory regions (enabled by default, except on Windows)
Definition: mimalloc-doc.h:807
+
@ mi_option_large_os_pages
Use large OS pages (2MiB in size) if possible.
Definition: mimalloc-doc.h:808
+
@ mi_option_os_tag
OS tag to assign to mimalloc'd memory.
Definition: mimalloc-doc.h:818
+
@ _mi_option_last
Definition: mimalloc-doc.h:819
+
@ mi_option_verbose
Print verbose messages to stderr.
Definition: mimalloc-doc.h:804
+
@ mi_option_reserve_huge_os_pages_at
Reserve huge OS pages at node N.
Definition: mimalloc-doc.h:810
+
@ mi_option_reset_decommits
Experimental.
Definition: mimalloc-doc.h:816
+
@ mi_option_reserve_huge_os_pages
The number of huge OS pages (1GiB in size) to reserve at the start of the program.
Definition: mimalloc-doc.h:809
+
@ mi_option_page_reset
Reset page memory after mi_option_reset_delay milliseconds when it becomes free.
Definition: mimalloc-doc.h:812
+
@ mi_option_segment_reset
Experimental.
Definition: mimalloc-doc.h:813
+
@ mi_option_show_errors
Print error messages to stderr.
Definition: mimalloc-doc.h:802
+
size_t mi_malloc_usable_size(const void *p)
+
void mi_free_aligned(void *p, size_t alignment)
+
void * mi_aligned_alloc(size_t alignment, size_t size)
+
size_t mi_malloc_size(const void *p)
+
void * mi_reallocarray(void *p, size_t count, size_t size)
Correspond s to reallocarray in FreeBSD.
+
void mi_cfree(void *p)
Just as free but also checks if the pointer p belongs to our heap.
+
void mi_free_size_aligned(void *p, size_t size, size_t alignment)
+
void * mi_valloc(size_t size)
+
int mi_reallocarr(void *p, size_t count, size_t size)
Corresponds to reallocarr in NetBSD.
+
void * mi_memalign(size_t alignment, size_t size)
+
int mi_posix_memalign(void **p, size_t alignment, size_t size)
+
int mi__posix_memalign(void **p, size_t alignment, size_t size)
+
void mi_free_size(void *p, size_t size)
+
void * mi_pvalloc(size_t size)
+
void * mi_heap_rezalloc_aligned(mi_heap_t *heap, void *p, size_t newsize, size_t alignment)
+
void * mi_recalloc_aligned(void *p, size_t newcount, size_t size, size_t alignment)
+
void * mi_heap_recalloc_aligned_at(mi_heap_t *heap, void *p, size_t newcount, size_t size, size_t alignment, size_t offset)
+
void * mi_recalloc_aligned_at(void *p, size_t newcount, size_t size, size_t alignment, size_t offset)
+
void * mi_heap_recalloc(mi_heap_t *heap, void *p, size_t newcount, size_t size)
+
void * mi_rezalloc(void *p, size_t newsize)
+
void * mi_heap_recalloc_aligned(mi_heap_t *heap, void *p, size_t newcount, size_t size, size_t alignment)
+
void * mi_heap_rezalloc_aligned_at(mi_heap_t *heap, void *p, size_t newsize, size_t alignment, size_t offset)
+
void * mi_rezalloc_aligned(void *p, size_t newsize, size_t alignment)
+
void * mi_heap_rezalloc(mi_heap_t *heap, void *p, size_t newsize)
+
void * mi_rezalloc_aligned_at(void *p, size_t newsize, size_t alignment, size_t offset)
diff --git a/docs/modules.html b/docs/modules.html
index 9858d6a..0129057 100644
--- a/docs/modules.html
+++ b/docs/modules.html
@@ -3,7 +3,7 @@
-
+
mi-malloc: Modules
@@ -13,10 +13,6 @@
-
@@ -37,21 +33,21 @@
mi-malloc
- 1.6
+ 1.7/2.0
@@ -60,10 +56,10 @@
-
+
@@ -73,13 +69,13 @@ var searchBox = new SearchBox("searchBox", "search",false,'Search');
-
@@ -92,7 +88,7 @@ $(document).ready(function(){initNavTree('modules.html','');});
-
@@ -121,9 +117,7 @@ $(document).ready(function(){initNavTree('modules.html','');});
diff --git a/docs/navtree.css b/docs/navtree.css
index 046366c..a270571 100644
--- a/docs/navtree.css
+++ b/docs/navtree.css
@@ -67,7 +67,7 @@
#nav-tree {
padding: 0px 0px;
- background-color: #FAFAFF;
+ background-color: #FAFAFF;
font-size:14px;
overflow:auto;
}
@@ -143,4 +143,3 @@
#nav-tree { display: none; }
div.ui-resizable-handle { display: none; position: relative; }
}
-
diff --git a/docs/navtree.js b/docs/navtree.js
index 7ce2935..1e272d3 100644
--- a/docs/navtree.js
+++ b/docs/navtree.js
@@ -1,25 +1,26 @@
/*
- @licstart The following is the entire license notice for the
- JavaScript code in this file.
+ @licstart The following is the entire license notice for the JavaScript code in this file.
- Copyright (C) 1997-2017 by Dimitri van Heesch
+ The MIT License (MIT)
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
+ Copyright (C) 1997-2020 by Dimitri van Heesch
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
+ Permission is hereby granted, free of charge, to any person obtaining a copy of this software
+ and associated documentation files (the "Software"), to deal in the Software without restriction,
+ including without limitation the rights to use, copy, modify, merge, publish, distribute,
+ sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is
+ furnished to do so, subject to the following conditions:
- You should have received a copy of the GNU General Public License along
- with this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ The above copyright notice and this permission notice shall be included in all copies or
+ substantial portions of the Software.
- @licend The above is the entire license notice
- for the JavaScript code in this file
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+ BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
+ DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+ @licend The above is the entire license notice for the JavaScript code in this file
*/
var navTreeSubIndices = new Array();
var arrowDown = '▼';
@@ -70,7 +71,6 @@ function localStorageSupported()
}
}
-
function storeLink(link)
{
if (!$("#nav-sync").hasClass('sync') && localStorageSupported()) {
@@ -102,14 +102,6 @@ function getScript(scriptName,func,show)
script.type = 'text/javascript';
script.onload = func;
script.src = scriptName+'.js';
- if ($.browser.msie && $.browser.version<=8) {
- // script.onload does not work with older versions of IE
- script.onreadystatechange = function() {
- if (script.readyState=='complete' || script.readyState=='loaded') {
- func(); if (show) showRoot();
- }
- }
- }
head.appendChild(script);
}
@@ -153,6 +145,7 @@ function gotoAnchor(anchor,aname,updateLocation)
var pos, docContent = $('#doc-content');
var ancParent = $(anchor.parent());
if (ancParent.hasClass('memItemLeft') ||
+ ancParent.hasClass('memtitle') ||
ancParent.hasClass('fieldname') ||
ancParent.hasClass('fieldtype') ||
ancParent.is(':header'))
@@ -265,7 +258,7 @@ function showRoot()
(function (){ // retry until we can scroll to the selected item
try {
var navtree=$('#nav-tree');
- navtree.scrollTo('#selected',0,{offset:-windowHeight/2});
+ navtree.scrollTo('#selected',100,{offset:-windowHeight/2});
} catch (err) {
setTimeout(arguments.callee, 0);
}
@@ -284,12 +277,8 @@ function expandNode(o, node, imm, showRoot)
} else {
if (!node.childrenVisited) {
getNode(o, node);
- } if (imm || ($.browser.msie && $.browser.version>8)) {
- // somehow slideDown jumps to the start of tree for IE9 :-(
- $(node.getChildrenUL()).show();
- } else {
- $(node.getChildrenUL()).slideDown("fast");
}
+ $(node.getChildrenUL()).slideDown("fast");
node.plus_img.innerHTML = arrowDown;
node.expanded = true;
}
@@ -319,7 +308,6 @@ function highlightAnchor()
} else {
glowEffect(anchor.next(),1000); // normal member
}
- gotoAnchor(anchor,aname,false);
}
function selectAndHighlight(hash,n)
@@ -481,6 +469,18 @@ function toggleSyncButton(relpath)
}
}
+var loadTriggered = false;
+var readyTriggered = false;
+var loadObject,loadToRoot,loadUrl,loadRelPath;
+
+$(window).on('load',function(){
+ if (readyTriggered) { // ready first
+ navTo(loadObject,loadToRoot,loadUrl,loadRelPath);
+ showRoot();
+ }
+ loadTriggered=true;
+});
+
function initNavTree(toroot,relpath)
{
var o = new Object();
@@ -511,10 +511,16 @@ function initNavTree(toroot,relpath)
navSync.click(function(){ toggleSyncButton(relpath); });
}
- $(window).load(function(){
+ if (loadTriggered) { // load before ready
navTo(o,toroot,hashUrl(),relpath);
showRoot();
- });
+ } else { // ready before load
+ loadObject = o;
+ loadToRoot = toroot;
+ loadUrl = hashUrl();
+ loadRelPath = relpath;
+ readyTriggered=true;
+ }
$(window).bind('hashchange', function(){
if (window.location.hash && window.location.hash.length>1){
diff --git a/docs/navtreedata.js b/docs/navtreedata.js
index e6dc7bc..8dd5a55 100644
--- a/docs/navtreedata.js
+++ b/docs/navtreedata.js
@@ -1,25 +1,26 @@
/*
-@ @licstart The following is the entire license notice for the
-JavaScript code in this file.
+ @licstart The following is the entire license notice for the JavaScript code in this file.
-Copyright (C) 1997-2017 by Dimitri van Heesch
+ The MIT License (MIT)
-This program is free software; you can redistribute it and/or modify
-it under the terms of the GNU General Public License as published by
-the Free Software Foundation; either version 2 of the License, or
-(at your option) any later version.
+ Copyright (C) 1997-2020 by Dimitri van Heesch
-This program is distributed in the hope that it will be useful,
-but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
+ Permission is hereby granted, free of charge, to any person obtaining a copy of this software
+ and associated documentation files (the "Software"), to deal in the Software without restriction,
+ including without limitation the rights to use, copy, modify, merge, publish, distribute,
+ sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is
+ furnished to do so, subject to the following conditions:
-You should have received a copy of the GNU General Public License along
-with this program; if not, write to the Free Software Foundation, Inc.,
-51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ The above copyright notice and this permission notice shall be included in all copies or
+ substantial portions of the Software.
-@licend The above is the entire license notice
-for the JavaScript code in this file
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+ BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
+ DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+ @licend The above is the entire license notice for the JavaScript code in this file
*/
var NAVTREE =
[
diff --git a/docs/navtreeindex0.js b/docs/navtreeindex0.js
index 4a1e93f..7218170 100644
--- a/docs/navtreeindex0.js
+++ b/docs/navtreeindex0.js
@@ -8,14 +8,15 @@ var NAVTREEINDEX0 =
"functions.html":[6,2,0],
"functions_vars.html":[6,2,1],
"group__aligned.html":[5,2],
-"group__aligned.html#ga08647c4593f3b2eef24a919a73eba3a3":[5,2,1],
-"group__aligned.html#ga0cadbcf5b89a7b6fb171bc8df8734819":[5,2,6],
-"group__aligned.html#ga4028d1cf4aa4c87c880747044a8322ae":[5,2,4],
-"group__aligned.html#ga53dddb4724042a90315b94bc268fb4c9":[5,2,0],
-"group__aligned.html#ga5850da130c936bd77db039dcfbc8295d":[5,2,3],
-"group__aligned.html#ga5f8c2353766db522565e642fafd8a3f8":[5,2,7],
-"group__aligned.html#ga68930196751fa2cca9e1fd0d71bade56":[5,2,2],
-"group__aligned.html#gaf66a9ae6c6f08bd6be6fb6ea771faffb":[5,2,5],
+"group__aligned.html#ga08647c4593f3b2eef24a919a73eba3a3":[5,2,2],
+"group__aligned.html#ga0cadbcf5b89a7b6fb171bc8df8734819":[5,2,7],
+"group__aligned.html#ga4028d1cf4aa4c87c880747044a8322ae":[5,2,5],
+"group__aligned.html#ga53dddb4724042a90315b94bc268fb4c9":[5,2,1],
+"group__aligned.html#ga5850da130c936bd77db039dcfbc8295d":[5,2,4],
+"group__aligned.html#ga5f8c2353766db522565e642fafd8a3f8":[5,2,8],
+"group__aligned.html#ga68930196751fa2cca9e1fd0d71bade56":[5,2,3],
+"group__aligned.html#ga83c03016066b438f51a8095e9140be06":[5,2,0],
+"group__aligned.html#gaf66a9ae6c6f08bd6be6fb6ea771faffb":[5,2,6],
"group__analysis.html":[5,6],
"group__analysis.html#a332a6c14d736a99699d5453a1cb04b41":[5,6,0,0],
"group__analysis.html#ab47526df656d8837ec3e97f11b83f835":[5,6,0,2],
@@ -38,30 +39,32 @@ var NAVTREEINDEX0 =
"group__cpp.html#gaef2c2bdb4f70857902d3c8903ac095f3":[5,9,2],
"group__cpp.html#structmi__stl__allocator":[5,9,0],
"group__extended.html":[5,1],
-"group__extended.html#ga089c859d9eddc5f9b4bd946cd53cebee":[5,1,22],
-"group__extended.html#ga0ae4581e85453456a0d658b2b98bf7bf":[5,1,19],
+"group__extended.html#ga00ec3324b6b2591c7fe3677baa30a767":[5,1,16],
+"group__extended.html#ga089c859d9eddc5f9b4bd946cd53cebee":[5,1,24],
+"group__extended.html#ga0ae4581e85453456a0d658b2b98bf7bf":[5,1,21],
"group__extended.html#ga1ea64283508718d9d645c38efc2f4305":[5,1,0],
-"group__extended.html#ga220f29f40a44404b0061c15bc1c31152":[5,1,23],
+"group__extended.html#ga220f29f40a44404b0061c15bc1c31152":[5,1,25],
"group__extended.html#ga251d369cda3f1c2a955c555486ed90e5":[5,1,2],
"group__extended.html#ga299dae78d25ce112e384a98b7309c5be":[5,1,1],
-"group__extended.html#ga2d126e5c62d3badc35445e5d84166df2":[5,1,16],
-"group__extended.html#ga3132f521fb756fc0e8ec0b74fb58df50":[5,1,14],
-"group__extended.html#ga3460a6ca91af97be4058f523d3cb8ece":[5,1,10],
-"group__extended.html#ga3bb8468b8cfcc6e2a61d98aee85c5f99":[5,1,18],
+"group__extended.html#ga2d126e5c62d3badc35445e5d84166df2":[5,1,18],
+"group__extended.html#ga3132f521fb756fc0e8ec0b74fb58df50":[5,1,15],
+"group__extended.html#ga3460a6ca91af97be4058f523d3cb8ece":[5,1,11],
+"group__extended.html#ga3bb8468b8cfcc6e2a61d98aee85c5f99":[5,1,20],
"group__extended.html#ga421430e2226d7d468529cec457396756":[5,1,4],
-"group__extended.html#ga537f13b299ddf801e49a5a94fde02c79":[5,1,17],
+"group__extended.html#ga4c6486a1fdcd7a423b5f25fe4be8e0cf":[5,1,9],
+"group__extended.html#ga537f13b299ddf801e49a5a94fde02c79":[5,1,19],
"group__extended.html#ga5f071b10d4df1c3658e04e7fd67a94e6":[5,1,6],
"group__extended.html#ga7136c2e55cb22c98ecf95d08d6debb99":[5,1,8],
-"group__extended.html#ga7795a13d20087447281858d2c771cca1":[5,1,13],
-"group__extended.html#ga7d862c2affd5790381da14eb102a364d":[5,1,9],
-"group__extended.html#ga854b1de8cb067c7316286c28b2fcd3d1":[5,1,15],
-"group__extended.html#gaa1d55e0e894be240827e5d87ec3a1f45":[5,1,11],
+"group__extended.html#ga7795a13d20087447281858d2c771cca1":[5,1,14],
+"group__extended.html#ga7d862c2affd5790381da14eb102a364d":[5,1,10],
+"group__extended.html#ga854b1de8cb067c7316286c28b2fcd3d1":[5,1,17],
+"group__extended.html#gaa1d55e0e894be240827e5d87ec3a1f45":[5,1,12],
"group__extended.html#gaad25050b19f30cd79397b227e0157a3f":[5,1,7],
-"group__extended.html#gab1dac8476c46cb9eecab767eb40c1525":[5,1,21],
+"group__extended.html#gab1dac8476c46cb9eecab767eb40c1525":[5,1,23],
"group__extended.html#gac057927cd06c854b45fe7847e921bd47":[5,1,5],
"group__extended.html#gad823d23444a4b77a40f66bf075a98a0c":[5,1,3],
-"group__extended.html#gae5b17ff027cd2150b43a33040250cf3f":[5,1,12],
-"group__extended.html#gaf8e73efc2cbca9ebfdfb166983a04c17":[5,1,20],
+"group__extended.html#gae5b17ff027cd2150b43a33040250cf3f":[5,1,13],
+"group__extended.html#gaf8e73efc2cbca9ebfdfb166983a04c17":[5,1,22],
"group__heap.html":[5,3],
"group__heap.html#ga00e95ba1e01acac3cfd95bb7a357a6f0":[5,3,20],
"group__heap.html#ga08ca6419a5c057a4d965868998eef487":[5,3,3],
@@ -115,30 +118,32 @@ var NAVTREEINDEX0 =
"group__options.html#gaf84921c32375e25754dc2ee6a911fa60":[5,7,5],
"group__options.html#gafebf7ed116adb38ae5218bc3ce06884c":[5,7,0],
"group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca0957ef73b2550764b4840edf48422fda":[5,7,0,1],
-"group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca0ac33a18f6b659fcfaf44efb0bab1b74":[5,7,0,11],
-"group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca154fe170131d5212cff57e22b99523c5":[5,7,0,10],
-"group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca17a190c25be381142d87e0468c4c068c":[5,7,0,13],
+"group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca0ac33a18f6b659fcfaf44efb0bab1b74":[5,7,0,12],
+"group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca154fe170131d5212cff57e22b99523c5":[5,7,0,11],
+"group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca17a190c25be381142d87e0468c4c068c":[5,7,0,14],
"group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca1e8de72c93da7ff22d91e1e27b52ac2b":[5,7,0,3],
-"group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca2ecbe7ef32f5c84de3739aa4f0b805a1":[5,7,0,7],
+"group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca2ecbe7ef32f5c84de3739aa4f0b805a1":[5,7,0,8],
"group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca32ce97ece29f69e82579679cf8a307ad":[5,7,0,4],
"group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca4192d491200d0055df0554d4cf65054e":[5,7,0,5],
-"group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca4b74ae2a69e445de6c2361b73c1d14bf":[5,7,0,14],
-"group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca5b4357b74be0d87568036c32eb1a2e4a":[5,7,0,15],
+"group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca4b74ae2a69e445de6c2361b73c1d14bf":[5,7,0,15],
+"group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca5b4357b74be0d87568036c32eb1a2e4a":[5,7,0,16],
"group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca7c8b7bf5281c581bad64f5daa6442777":[5,7,0,2],
-"group__options.html#ggafebf7ed116adb38ae5218bc3ce06884cac81ee965b130fa81238913a3c239d536":[5,7,0,12],
+"group__options.html#ggafebf7ed116adb38ae5218bc3ce06884caa13e7926d4339d2aa6fbf61d4473fd5c":[5,7,0,7],
+"group__options.html#ggafebf7ed116adb38ae5218bc3ce06884cac81ee965b130fa81238913a3c239d536":[5,7,0,13],
"group__options.html#ggafebf7ed116adb38ae5218bc3ce06884caca7ed041be3b0b9d0b82432c7bf41af2":[5,7,0,6],
-"group__options.html#ggafebf7ed116adb38ae5218bc3ce06884cada854dd272c66342f18a93ee254a2968":[5,7,0,8],
-"group__options.html#ggafebf7ed116adb38ae5218bc3ce06884cafb121d30d87591850d5410ccc3a95c6d":[5,7,0,9],
+"group__options.html#ggafebf7ed116adb38ae5218bc3ce06884cada854dd272c66342f18a93ee254a2968":[5,7,0,9],
+"group__options.html#ggafebf7ed116adb38ae5218bc3ce06884cafb121d30d87591850d5410ccc3a95c6d":[5,7,0,10],
"group__options.html#ggafebf7ed116adb38ae5218bc3ce06884cafbf4822e5c00732c5984b32a032837f0":[5,7,0,0],
"group__posix.html":[5,8],
"group__posix.html#ga06d07cf357bbac5c73ba5d0c0c421e17":[5,8,7],
"group__posix.html#ga0d28d5cf61e6bfbb18c63092939fe5c9":[5,8,3],
"group__posix.html#ga1326d2e4388630b5f81ca7206318b8e5":[5,8,1],
"group__posix.html#ga4531c9e775bb3ae12db57c1ba8a5d7de":[5,8,6],
-"group__posix.html#ga48fad8648a2f1dab9c87ea9448a52088":[5,8,11],
+"group__posix.html#ga48fad8648a2f1dab9c87ea9448a52088":[5,8,12],
"group__posix.html#ga705dc7a64bffacfeeb0141501a5c35d7":[5,8,2],
"group__posix.html#ga72e9d7ffb5fe94d69bc722c8506e27bc":[5,8,5],
-"group__posix.html#ga73baaf5951f5165ba0763d0c06b6a93b":[5,8,12],
+"group__posix.html#ga73baaf5951f5165ba0763d0c06b6a93b":[5,8,13],
+"group__posix.html#ga7e1934d60a3e697950eeb48e042bfad5":[5,8,11],
"group__posix.html#gaab7fa71ea93b96873f5d9883db57d40e":[5,8,8],
"group__posix.html#gacff84f226ba9feb2031b8992e5579447":[5,8,9],
"group__posix.html#gad5a69c8fea96aa2b7a7c818c2130090a":[5,8,0],
diff --git a/docs/overrides.html b/docs/overrides.html
index 2a6c51e..9c249a3 100644
--- a/docs/overrides.html
+++ b/docs/overrides.html
@@ -3,7 +3,7 @@
-
+
mi-malloc: Overriding Malloc
@@ -13,10 +13,6 @@
-
@@ -37,21 +33,21 @@
mi-malloc
- 1.6
+ 1.7/2.0
@@ -60,10 +56,10 @@
-
+
@@ -73,13 +69,13 @@ var searchBox = new SearchBox("searchBox", "search",false,'Search');
-
@@ -92,7 +88,7 @@ $(document).ready(function(){initNavTree('overrides.html','');});
-
@@ -110,7 +106,9 @@ $(document).ready(function(){initNavTree('overrides.html','');});
env LD_PRELOAD=/usr/lib/libmimalloc.so myprogram
-
You can set extra environment variables to check that mimalloc is running, like:
env MIMALLOC_VERBOSE=1 LD_PRELOAD=/usr/lib/libmimalloc.so myprogram
or run with the debug version to get detailed statistics:
env MIMALLOC_SHOW_STATS=1 LD_PRELOAD=/usr/lib/libmimalloc-debug.so myprogram
MacOS
+
You can set extra environment variables to check that mimalloc is running, like:
env MIMALLOC_VERBOSE=1 LD_PRELOAD=/usr/lib/libmimalloc.so myprogram
+
or run with the debug version to get detailed statistics:
env MIMALLOC_SHOW_STATS=1 LD_PRELOAD=/usr/lib/libmimalloc-debug.so myprogram
+
MacOS
On macOS we can also preload the mimalloc shared library so all calls to the standard malloc
interface are resolved to the mimalloc library.
env DYLD_FORCE_FLAT_NAMESPACE=1 DYLD_INSERT_LIBRARIES=/usr/lib/libmimalloc.dylib myprogram
@@ -120,22 +118,80 @@ $(document).ready(function(){initNavTree('overrides.html','');});
Windows
Overriding on Windows is robust and has the particular advantage to be able to redirect all malloc/free calls that go through the (dynamic) C runtime allocator, including those from other DLL's or libraries.
The overriding on Windows requires that you link your program explicitly with the mimalloc DLL and use the C-runtime library as a DLL (using the /MD
or /MDd
switch). Also, the mimalloc-redirect.dll
(or mimalloc-redirect32.dll
) must be available in the same folder as the main mimalloc-override.dll
at runtime (as it is a dependency). The redirection DLL ensures that all calls to the C runtime malloc API get redirected to mimalloc (in mimalloc-override.dll
).
-To ensure the mimalloc DLL is loaded at run-time it is easiest to insert some call to the mimalloc API in the main
function, like mi_version()
(or use the /INCLUDE:mi_version
switch on the linker). See the mimalloc-override-test
project for an example on how to use this. For best performance on Windows with C++, it is also recommended to also override the new
/delete
operations (by including mimalloc-new-delete.h
a single(!) source file in your project).
+To ensure the mimalloc DLL is loaded at run-time it is easiest to insert some call to the mimalloc API in the main
function, like mi_version()
(or use the /INCLUDE:mi_version
switch on the linker). See the mimalloc-override-test
project for an example on how to use this. For best performance on Windows with C++, it is also recommended to also override the new
/delete
operations (by including mimalloc-new-delete.h
a single(!) source file in your project without linking to the mimalloc library).
The environment variable MIMALLOC_DISABLE_REDIRECT=1
can be used to disable dynamic overriding at run-time. Use MIMALLOC_VERBOSE=1
to check if mimalloc was successfully redirected.
(Note: in principle, it is possible to even patch existing executables without any recompilation if they are linked with the dynamic C runtime (ucrtbase.dll
) – just put the mimalloc-override.dll
into the import table (and put mimalloc-redirect.dll
in the same folder) Such patching can be done for example with CFF Explorer ).
Static override
On Unix systems, you can also statically link with mimalloc to override the standard malloc interface. The recommended way is to link the final program with the mimalloc single object file (mimalloc-override.o
). We use an object file instead of a library file as linkers give preference to that over archives to resolve symbols. To ensure that the standard malloc interface resolves to the mimalloc library, link it as the first object file. For example:
-gcc -o myprogram mimalloc-override .o myfile1.c ...
List of Overrides:
+gcc -o myprogram mimalloc-override .o myfile1.c ...
+
List of Overrides:
The specific functions that get redirected to the mimalloc library are:
-
void * malloc(size_t size);
void * calloc(size_t size, size_t n);
void * realloc(void * p, size_t newsize);
void free(void * p);
void operator delete (void * p);
void operator delete [](void * p);
void * operator new (std::size_t n) noexcept(false );
void * operator new [](std::size_t n) noexcept(false );
void * operator new ( std::size_t n, std::align_val_t align) noexcept(false );
void * operator new []( std::size_t n, std::align_val_t align) noexcept(false );
void * operator new ( std::size_t count, const std::nothrow_t& tag);
void * operator new []( std::size_t count, const std::nothrow_t& tag);
void * operator new ( std::size_t count, std::align_val_t al, const std::nothrow_t&);
void * operator new []( std::size_t count, std::align_val_t al, const std::nothrow_t&);
int posix_memalign(void ** p, size_t alignment, size_t size);
void * memalign(size_t alignment, size_t size);
void * aligned_alloc(size_t alignment, size_t size);
void * valloc(size_t size);
void * pvalloc(size_t size);
size_t malloc_usable_size(void *p);
void * reallocarray( void * p, size_t count, size_t size );
void * reallocf(void * p, size_t newsize);
void cfree(void * p);
void * _expand(void * p, size_t newsize);
size_t _msize(void * p);
void * _malloc_dbg(size_t size, int block_type, const char * fname, int line);
void * _realloc_dbg(void * p, size_t newsize, int block_type, const char * fname, int line);
void * _calloc_dbg(size_t count, size_t size, int block_type, const char * fname, int line);
void * _expand_dbg(void * p, size_t size, int block_type, const char * fname, int line);
size_t _msize_dbg(void * p, int block_type);
void _free_dbg(void * p, int block_type);
-
+
+
void * malloc(size_t size);
+
void * calloc(size_t size, size_t n);
+
void * realloc(void * p, size_t newsize);
+
void free(void * p);
+
+
void * aligned_alloc(size_t alignment, size_t size);
+
char * strdup(const char * s);
+
char * strndup(const char * s, size_t n);
+
char * realpath(const char * fname, char * resolved_name);
+
+
+
+
void operator delete (void * p);
+
void operator delete [](void * p);
+
+
void * operator new (std::size_t n) noexcept(false );
+
void * operator new [](std::size_t n) noexcept(false );
+
void * operator new ( std::size_t n, std::align_val_t align) noexcept(false );
+
void * operator new []( std::size_t n, std::align_val_t align) noexcept(false );
+
+
void * operator new ( std::size_t count, const std::nothrow_t& tag);
+
void * operator new []( std::size_t count, const std::nothrow_t& tag);
+
void * operator new ( std::size_t count, std::align_val_t al, const std::nothrow_t&);
+
void * operator new []( std::size_t count, std::align_val_t al, const std::nothrow_t&);
+
+
+
int posix_memalign(void ** p, size_t alignment, size_t size);
+
+
+
void * memalign(size_t alignment, size_t size);
+
void * valloc(size_t size);
+
void * pvalloc(size_t size);
+
size_t malloc_usable_size(void *p);
+
void * reallocf(void * p, size_t newsize);
+
+
+
void vfree(void * p);
+
size_t malloc_size(const void * p);
+
size_t malloc_good_size(size_t size);
+
+
+
void * reallocarray( void * p, size_t count, size_t size );
+
void * reallocf(void * p, size_t newsize);
+
void cfree(void * p);
+
+
+
int reallocarr(void * p, size_t count, size_t size);
+
+
+
void * _expand(void * p, size_t newsize);
+
size_t _msize(void * p);
+
+
void * _malloc_dbg(size_t size, int block_type, const char * fname, int line);
+
void * _realloc_dbg(void * p, size_t newsize, int block_type, const char * fname, int line);
+
void * _calloc_dbg(size_t count, size_t size, int block_type, const char * fname, int line);
+
void * _expand_dbg(void * p, size_t size, int block_type, const char * fname, int line);
+
size_t _msize_dbg(void * p, int block_type);
+
void _free_dbg(void * p, int block_type);
+
+
diff --git a/docs/pages.html b/docs/pages.html
index 8fcd6f0..60b7fc3 100644
--- a/docs/pages.html
+++ b/docs/pages.html
@@ -3,7 +3,7 @@
-
+
mi-malloc: Related Pages
@@ -13,10 +13,6 @@
-
@@ -37,21 +33,21 @@
mi-malloc
- 1.6
+ 1.7/2.0
@@ -60,10 +56,10 @@
-
+
@@ -73,13 +69,13 @@ var searchBox = new SearchBox("searchBox", "search",false,'Search');
-
@@ -92,7 +88,7 @@ $(document).ready(function(){initNavTree('pages.html','');});
-
@@ -116,9 +112,7 @@ $(document).ready(function(){initNavTree('pages.html','');});
diff --git a/docs/resize.js b/docs/resize.js
index 6617aee..e1ad0fe 100644
--- a/docs/resize.js
+++ b/docs/resize.js
@@ -1,25 +1,26 @@
/*
- @licstart The following is the entire license notice for the
- JavaScript code in this file.
+ @licstart The following is the entire license notice for the JavaScript code in this file.
- Copyright (C) 1997-2017 by Dimitri van Heesch
+ The MIT License (MIT)
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
+ Copyright (C) 1997-2020 by Dimitri van Heesch
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
+ Permission is hereby granted, free of charge, to any person obtaining a copy of this software
+ and associated documentation files (the "Software"), to deal in the Software without restriction,
+ including without limitation the rights to use, copy, modify, merge, publish, distribute,
+ sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is
+ furnished to do so, subject to the following conditions:
- You should have received a copy of the GNU General Public License along
- with this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ The above copyright notice and this permission notice shall be included in all copies or
+ substantial portions of the Software.
- @licend The above is the entire license notice
- for the JavaScript code in this file
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+ BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
+ DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+ @licend The above is the entire license notice for the JavaScript code in this file
*/
function initResizable()
{
@@ -91,6 +92,9 @@ function initResizable()
}
collapsedWidth=width;
}
+ if (location.hash.slice(1)) {
+ (document.getElementById(location.hash.slice(1))||document.body).scrollIntoView();
+ }
}
function collapseExpand()
@@ -131,6 +135,6 @@ function initResizable()
var _preventDefault = function(evt) { evt.preventDefault(); };
$("#splitbar").bind("dragstart", _preventDefault).bind("selectstart", _preventDefault);
$(".ui-resizable-handle").dblclick(collapseExpand);
- $(window).load(resizeHeight);
+ $(window).on('load',resizeHeight);
}
/* @license-end */
diff --git a/docs/search/all_0.html b/docs/search/all_0.html
index 5330204..1ec5b2d 100644
--- a/docs/search/all_0.html
+++ b/docs/search/all_0.html
@@ -1,7 +1,8 @@
-
+
+
-
+
@@ -10,21 +11,27 @@
Loading...
-
+
Searching...
No Matches
-
+
diff --git a/docs/search/all_0.js b/docs/search/all_0.js
index 7054b6d..cd7bb41 100644
--- a/docs/search/all_0.js
+++ b/docs/search/all_0.js
@@ -1,4 +1,4 @@
var searchData=
[
- ['_5fmi_5foption_5flast',['_mi_option_last',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca5b4357b74be0d87568036c32eb1a2e4a',1,'mimalloc-doc.h']]]
+ ['_5fmi_5foption_5flast_0',['_mi_option_last',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca5b4357b74be0d87568036c32eb1a2e4a',1,'mimalloc-doc.h']]]
];
diff --git a/docs/search/all_1.html b/docs/search/all_1.html
index 2f46793..9f80e90 100644
--- a/docs/search/all_1.html
+++ b/docs/search/all_1.html
@@ -1,7 +1,8 @@
-
+
+
-
+
@@ -10,21 +11,27 @@
Loading...
-
+
Searching...
No Matches
-
+
diff --git a/docs/search/all_1.js b/docs/search/all_1.js
index bbb4b54..7f1097c 100644
--- a/docs/search/all_1.js
+++ b/docs/search/all_1.js
@@ -1,4 +1,4 @@
var searchData=
[
- ['aligned_20allocation',['Aligned Allocation',['../group__aligned.html',1,'']]]
+ ['aligned_20allocation_1',['Aligned Allocation',['../group__aligned.html',1,'']]]
];
diff --git a/docs/search/all_2.html b/docs/search/all_2.html
index 4c33d85..02cfffc 100644
--- a/docs/search/all_2.html
+++ b/docs/search/all_2.html
@@ -1,7 +1,8 @@
-
+
+
-
+
@@ -10,21 +11,27 @@
Loading...
-
+
Searching...
No Matches
-
+
diff --git a/docs/search/all_2.js b/docs/search/all_2.js
index 829288c..00576d7 100644
--- a/docs/search/all_2.js
+++ b/docs/search/all_2.js
@@ -1,7 +1,7 @@
var searchData=
[
- ['block_5fsize',['block_size',['../group__analysis.html#a332a6c14d736a99699d5453a1cb04b41',1,'mi_heap_area_t']]],
- ['blocks',['blocks',['../group__analysis.html#ae0085e6e1cf059a4eb7767e30e9991b8',1,'mi_heap_area_t']]],
- ['building',['Building',['../build.html',1,'']]],
- ['basic_20allocation',['Basic Allocation',['../group__malloc.html',1,'']]]
+ ['basic_20allocation_2',['Basic Allocation',['../group__malloc.html',1,'']]],
+ ['block_5fsize_3',['block_size',['../group__analysis.html#a332a6c14d736a99699d5453a1cb04b41',1,'mi_heap_area_t']]],
+ ['blocks_4',['blocks',['../group__analysis.html#ae0085e6e1cf059a4eb7767e30e9991b8',1,'mi_heap_area_t']]],
+ ['building_5',['Building',['../build.html',1,'']]]
];
diff --git a/docs/search/all_3.html b/docs/search/all_3.html
index b634070..39767b8 100644
--- a/docs/search/all_3.html
+++ b/docs/search/all_3.html
@@ -1,7 +1,8 @@
-
+
+
-
+
@@ -10,21 +11,27 @@
Loading...
-
+
Searching...
No Matches
-
+
diff --git a/docs/search/all_3.js b/docs/search/all_3.js
index 2e08411..9a029ee 100644
--- a/docs/search/all_3.js
+++ b/docs/search/all_3.js
@@ -1,5 +1,5 @@
var searchData=
[
- ['committed',['committed',['../group__analysis.html#ab47526df656d8837ec3e97f11b83f835',1,'mi_heap_area_t']]],
- ['c_2b_2b_20wrappers',['C++ wrappers',['../group__cpp.html',1,'']]]
+ ['c_2b_2b_20wrappers_6',['C++ wrappers',['../group__cpp.html',1,'']]],
+ ['committed_7',['committed',['../group__analysis.html#ab47526df656d8837ec3e97f11b83f835',1,'mi_heap_area_t']]]
];
diff --git a/docs/search/all_4.html b/docs/search/all_4.html
index dd062ae..fc40463 100644
--- a/docs/search/all_4.html
+++ b/docs/search/all_4.html
@@ -1,7 +1,8 @@
-
+
+
-
+
@@ -10,21 +11,27 @@
Loading...
-
+
Searching...
No Matches
-
+
diff --git a/docs/search/all_4.js b/docs/search/all_4.js
index 059f44c..5dc5128 100644
--- a/docs/search/all_4.js
+++ b/docs/search/all_4.js
@@ -1,5 +1,5 @@
var searchData=
[
- ['environment_20options',['Environment Options',['../environment.html',1,'']]],
- ['extended_20functions',['Extended Functions',['../group__extended.html',1,'']]]
+ ['environment_20options_8',['Environment Options',['../environment.html',1,'']]],
+ ['extended_20functions_9',['Extended Functions',['../group__extended.html',1,'']]]
];
diff --git a/docs/search/all_5.html b/docs/search/all_5.html
index f0780fd..9dd9344 100644
--- a/docs/search/all_5.html
+++ b/docs/search/all_5.html
@@ -1,7 +1,8 @@
-
+
+
-
+
@@ -10,21 +11,27 @@
Loading...
-
+
Searching...
No Matches
-
+
diff --git a/docs/search/all_5.js b/docs/search/all_5.js
index e7e4093..7441d85 100644
--- a/docs/search/all_5.js
+++ b/docs/search/all_5.js
@@ -1,5 +1,5 @@
var searchData=
[
- ['heap_20introspection',['Heap Introspection',['../group__analysis.html',1,'']]],
- ['heap_20allocation',['Heap Allocation',['../group__heap.html',1,'']]]
+ ['heap_20allocation_10',['Heap Allocation',['../group__heap.html',1,'']]],
+ ['heap_20introspection_11',['Heap Introspection',['../group__analysis.html',1,'']]]
];
diff --git a/docs/search/all_6.html b/docs/search/all_6.html
index 39b0f55..f1e516d 100644
--- a/docs/search/all_6.html
+++ b/docs/search/all_6.html
@@ -1,7 +1,8 @@
-
+
+
-
+
@@ -10,21 +11,27 @@
Loading...
-
+
Searching...
No Matches
-
+
diff --git a/docs/search/all_6.js b/docs/search/all_6.js
index 491883f..6d32b7b 100644
--- a/docs/search/all_6.js
+++ b/docs/search/all_6.js
@@ -1,148 +1,153 @@
var searchData=
[
- ['mi_5f_5fposix_5fmemalign',['mi__posix_memalign',['../group__posix.html#gad5a69c8fea96aa2b7a7c818c2130090a',1,'mimalloc-doc.h']]],
- ['mi_5faligned_5falloc',['mi_aligned_alloc',['../group__posix.html#ga1326d2e4388630b5f81ca7206318b8e5',1,'mimalloc-doc.h']]],
- ['mi_5fblock_5fvisit_5ffun',['mi_block_visit_fun',['../group__analysis.html#gadfa01e2900f0e5d515ad5506b26f6d65',1,'mimalloc-doc.h']]],
- ['mi_5fcalloc',['mi_calloc',['../group__malloc.html#ga97fedb4f7107c592fd7f0f0a8949a57d',1,'mimalloc-doc.h']]],
- ['mi_5fcalloc_5faligned',['mi_calloc_aligned',['../group__aligned.html#ga53dddb4724042a90315b94bc268fb4c9',1,'mimalloc-doc.h']]],
- ['mi_5fcalloc_5faligned_5fat',['mi_calloc_aligned_at',['../group__aligned.html#ga08647c4593f3b2eef24a919a73eba3a3',1,'mimalloc-doc.h']]],
- ['mi_5fcalloc_5ftp',['mi_calloc_tp',['../group__typed.html#gae80c47c9d4cab10961fff1a8ac98fc07',1,'mimalloc-doc.h']]],
- ['mi_5fcfree',['mi_cfree',['../group__posix.html#ga705dc7a64bffacfeeb0141501a5c35d7',1,'mimalloc-doc.h']]],
- ['mi_5fcheck_5fowned',['mi_check_owned',['../group__analysis.html#ga628c237489c2679af84a4d0d143b3dd5',1,'mimalloc-doc.h']]],
- ['mi_5fcollect',['mi_collect',['../group__extended.html#ga421430e2226d7d468529cec457396756',1,'mimalloc-doc.h']]],
- ['mi_5fdeferred_5ffree_5ffun',['mi_deferred_free_fun',['../group__extended.html#ga299dae78d25ce112e384a98b7309c5be',1,'mimalloc-doc.h']]],
- ['mi_5ferror_5ffun',['mi_error_fun',['../group__extended.html#ga251d369cda3f1c2a955c555486ed90e5',1,'mimalloc-doc.h']]],
- ['mi_5fexpand',['mi_expand',['../group__malloc.html#gaaee66a1d483c3e28f585525fb96707e4',1,'mimalloc-doc.h']]],
- ['mi_5ffree',['mi_free',['../group__malloc.html#gaf2c7b89c327d1f60f59e68b9ea644d95',1,'mimalloc-doc.h']]],
- ['mi_5ffree_5faligned',['mi_free_aligned',['../group__posix.html#ga0d28d5cf61e6bfbb18c63092939fe5c9',1,'mimalloc-doc.h']]],
- ['mi_5ffree_5fsize',['mi_free_size',['../group__posix.html#gae01389eedab8d67341ff52e2aad80ebb',1,'mimalloc-doc.h']]],
- ['mi_5ffree_5fsize_5faligned',['mi_free_size_aligned',['../group__posix.html#ga72e9d7ffb5fe94d69bc722c8506e27bc',1,'mimalloc-doc.h']]],
- ['mi_5fgood_5fsize',['mi_good_size',['../group__extended.html#gac057927cd06c854b45fe7847e921bd47',1,'mimalloc-doc.h']]],
- ['mi_5fheap_5farea_5ft',['mi_heap_area_t',['../group__analysis.html#structmi__heap__area__t',1,'']]],
- ['mi_5fheap_5fcalloc',['mi_heap_calloc',['../group__heap.html#gaa6702b3c48e9e53e50e81b36f5011d55',1,'mimalloc-doc.h']]],
- ['mi_5fheap_5fcalloc_5faligned',['mi_heap_calloc_aligned',['../group__heap.html#ga4af03a6e2b93fae77424d93f889705c3',1,'mimalloc-doc.h']]],
- ['mi_5fheap_5fcalloc_5faligned_5fat',['mi_heap_calloc_aligned_at',['../group__heap.html#ga08ca6419a5c057a4d965868998eef487',1,'mimalloc-doc.h']]],
- ['mi_5fheap_5fcalloc_5ftp',['mi_heap_calloc_tp',['../group__typed.html#ga4e5d1f1707c90e5f55e023ac5f45fe74',1,'mimalloc-doc.h']]],
- ['mi_5fheap_5fcheck_5fowned',['mi_heap_check_owned',['../group__analysis.html#ga0d67c1789faaa15ff366c024fcaf6377',1,'mimalloc-doc.h']]],
- ['mi_5fheap_5fcollect',['mi_heap_collect',['../group__heap.html#ga7922f7495cde30b1984d0e6072419298',1,'mimalloc-doc.h']]],
- ['mi_5fheap_5fcontains_5fblock',['mi_heap_contains_block',['../group__analysis.html#gaa862aa8ed8d57d84cae41fc1022d71af',1,'mimalloc-doc.h']]],
- ['mi_5fheap_5fdelete',['mi_heap_delete',['../group__heap.html#ga2ab1af8d438819b55319c7ef51d1e409',1,'mimalloc-doc.h']]],
- ['mi_5fheap_5fdestroy',['mi_heap_destroy',['../group__heap.html#ga9f9c0844edb9717f4feacd79116b8e0d',1,'mimalloc-doc.h']]],
- ['mi_5fheap_5fget_5fbacking',['mi_heap_get_backing',['../group__heap.html#ga5d03fbe062ffcf38f0f417fd968357fc',1,'mimalloc-doc.h']]],
- ['mi_5fheap_5fget_5fdefault',['mi_heap_get_default',['../group__heap.html#ga8db4cbb87314a989a9a187464d6b5e05',1,'mimalloc-doc.h']]],
- ['mi_5fheap_5fmalloc',['mi_heap_malloc',['../group__heap.html#ga9cbed01e42c0647907295de92c3fa296',1,'mimalloc-doc.h']]],
- ['mi_5fheap_5fmalloc_5faligned',['mi_heap_malloc_aligned',['../group__heap.html#gab5b87e1805306f70df38789fcfcf6653',1,'mimalloc-doc.h']]],
- ['mi_5fheap_5fmalloc_5faligned_5fat',['mi_heap_malloc_aligned_at',['../group__heap.html#ga23acd7680fb0976dde3783254c6c874b',1,'mimalloc-doc.h']]],
- ['mi_5fheap_5fmalloc_5fsmall',['mi_heap_malloc_small',['../group__heap.html#gaa1a1c7a1f4da6826b5a25b70ef878368',1,'mimalloc-doc.h']]],
- ['mi_5fheap_5fmalloc_5ftp',['mi_heap_malloc_tp',['../group__typed.html#ga653bcb24ac495bc19940ecd6898f9cd7',1,'mimalloc-doc.h']]],
- ['mi_5fheap_5fmallocn',['mi_heap_mallocn',['../group__heap.html#ga851da6c43fe0b71c1376cee8aef90db0',1,'mimalloc-doc.h']]],
- ['mi_5fheap_5fmallocn_5ftp',['mi_heap_mallocn_tp',['../group__typed.html#ga6b75cb9c4b9c647661d0924552dc6e83',1,'mimalloc-doc.h']]],
- ['mi_5fheap_5fnew',['mi_heap_new',['../group__heap.html#ga766f672ba56f2fbfeb9d9dbb0b7f6b11',1,'mimalloc-doc.h']]],
- ['mi_5fheap_5frealloc',['mi_heap_realloc',['../group__heap.html#gaaef3395f66be48f37bdc8322509c5d81',1,'mimalloc-doc.h']]],
- ['mi_5fheap_5frealloc_5faligned',['mi_heap_realloc_aligned',['../group__heap.html#gafc603b696bd14cae6da28658f950d98c',1,'mimalloc-doc.h']]],
- ['mi_5fheap_5frealloc_5faligned_5fat',['mi_heap_realloc_aligned_at',['../group__heap.html#gaf96c788a1bf553fe2d371de9365e047c',1,'mimalloc-doc.h']]],
- ['mi_5fheap_5freallocf',['mi_heap_reallocf',['../group__heap.html#ga4a21070eb4e7cce018133c8d5f4b0527',1,'mimalloc-doc.h']]],
- ['mi_5fheap_5freallocn',['mi_heap_reallocn',['../group__heap.html#gac74e94ad9b0c9b57c1c4d88b8825b7a8',1,'mimalloc-doc.h']]],
- ['mi_5fheap_5freallocn_5ftp',['mi_heap_reallocn_tp',['../group__typed.html#gaf213d5422ec35e7f6caad827c79bc948',1,'mimalloc-doc.h']]],
- ['mi_5fheap_5frealpath',['mi_heap_realpath',['../group__heap.html#ga00e95ba1e01acac3cfd95bb7a357a6f0',1,'mimalloc-doc.h']]],
- ['mi_5fheap_5frecalloc',['mi_heap_recalloc',['../group__zeroinit.html#ga8648c5fbb22a80f0262859099f06dfbd',1,'mimalloc-doc.h']]],
- ['mi_5fheap_5frecalloc_5faligned',['mi_heap_recalloc_aligned',['../group__zeroinit.html#ga9f3f999396c8f77ca5e80e7b40ac29e3',1,'mimalloc-doc.h']]],
- ['mi_5fheap_5frecalloc_5faligned_5fat',['mi_heap_recalloc_aligned_at',['../group__zeroinit.html#ga496452c96f1de8c500be9fddf52edaf7',1,'mimalloc-doc.h']]],
- ['mi_5fheap_5frecalloc_5ftp',['mi_heap_recalloc_tp',['../group__typed.html#ga3e50a1600958fcaf1a7f3560c9174f9e',1,'mimalloc-doc.h']]],
- ['mi_5fheap_5frezalloc',['mi_heap_rezalloc',['../group__zeroinit.html#gacfad83f14eb5d6a42a497a898e19fc76',1,'mimalloc-doc.h']]],
- ['mi_5fheap_5frezalloc_5faligned',['mi_heap_rezalloc_aligned',['../group__zeroinit.html#ga375fa8a611c51905e592d5d467c49664',1,'mimalloc-doc.h']]],
- ['mi_5fheap_5frezalloc_5faligned_5fat',['mi_heap_rezalloc_aligned_at',['../group__zeroinit.html#gac90da54fa7e5d10bdc97ce0b51dce2eb',1,'mimalloc-doc.h']]],
- ['mi_5fheap_5fset_5fdefault',['mi_heap_set_default',['../group__heap.html#gab8631ec88c8d26641b68b5d25dcd4422',1,'mimalloc-doc.h']]],
- ['mi_5fheap_5fstrdup',['mi_heap_strdup',['../group__heap.html#ga139d6b09dbf50c3c2523d0f4d1cfdeb5',1,'mimalloc-doc.h']]],
- ['mi_5fheap_5fstrndup',['mi_heap_strndup',['../group__heap.html#ga8e3dbd46650dd26573cf307a2c8f1f5a',1,'mimalloc-doc.h']]],
- ['mi_5fheap_5ft',['mi_heap_t',['../group__heap.html#ga34a47cde5a5b38c29f1aa3c5e76943c2',1,'mimalloc-doc.h']]],
- ['mi_5fheap_5fvisit_5fblocks',['mi_heap_visit_blocks',['../group__analysis.html#ga70c46687dc6e9dc98b232b02646f8bed',1,'mimalloc-doc.h']]],
- ['mi_5fheap_5fzalloc',['mi_heap_zalloc',['../group__heap.html#ga903104592c8ed53417a3762da6241133',1,'mimalloc-doc.h']]],
- ['mi_5fheap_5fzalloc_5faligned',['mi_heap_zalloc_aligned',['../group__heap.html#gaa450a59c6c7ae5fdbd1c2b80a8329ef0',1,'mimalloc-doc.h']]],
- ['mi_5fheap_5fzalloc_5faligned_5fat',['mi_heap_zalloc_aligned_at',['../group__heap.html#ga45fb43a62776fbebbdf1edd99b527954',1,'mimalloc-doc.h']]],
- ['mi_5fheap_5fzalloc_5ftp',['mi_heap_zalloc_tp',['../group__typed.html#gad6e87e86e994aa14416ae9b5d4c188fe',1,'mimalloc-doc.h']]],
- ['mi_5fis_5fin_5fheap_5fregion',['mi_is_in_heap_region',['../group__extended.html#ga5f071b10d4df1c3658e04e7fd67a94e6',1,'mimalloc-doc.h']]],
- ['mi_5fis_5fredirected',['mi_is_redirected',['../group__extended.html#gaad25050b19f30cd79397b227e0157a3f',1,'mimalloc-doc.h']]],
- ['mi_5fmalloc',['mi_malloc',['../group__malloc.html#ga3406e8b168bc74c8637b11571a6da83a',1,'mimalloc-doc.h']]],
- ['mi_5fmalloc_5faligned',['mi_malloc_aligned',['../group__aligned.html#ga68930196751fa2cca9e1fd0d71bade56',1,'mimalloc-doc.h']]],
- ['mi_5fmalloc_5faligned_5fat',['mi_malloc_aligned_at',['../group__aligned.html#ga5850da130c936bd77db039dcfbc8295d',1,'mimalloc-doc.h']]],
- ['mi_5fmalloc_5fsize',['mi_malloc_size',['../group__posix.html#ga4531c9e775bb3ae12db57c1ba8a5d7de',1,'mimalloc-doc.h']]],
- ['mi_5fmalloc_5fsmall',['mi_malloc_small',['../group__extended.html#ga7136c2e55cb22c98ecf95d08d6debb99',1,'mimalloc-doc.h']]],
- ['mi_5fmalloc_5ftp',['mi_malloc_tp',['../group__typed.html#ga0619a62c5fd886f1016030abe91f0557',1,'mimalloc-doc.h']]],
- ['mi_5fmalloc_5fusable_5fsize',['mi_malloc_usable_size',['../group__posix.html#ga06d07cf357bbac5c73ba5d0c0c421e17',1,'mimalloc-doc.h']]],
- ['mi_5fmallocn',['mi_mallocn',['../group__malloc.html#ga0b05e2bf0f73e7401ae08597ff782ac6',1,'mimalloc-doc.h']]],
- ['mi_5fmallocn_5ftp',['mi_mallocn_tp',['../group__typed.html#gae5cb6e0fafc9f23169c5622e077afe8b',1,'mimalloc-doc.h']]],
- ['mi_5fmemalign',['mi_memalign',['../group__posix.html#gaab7fa71ea93b96873f5d9883db57d40e',1,'mimalloc-doc.h']]],
- ['mi_5fnew',['mi_new',['../group__cpp.html#gaad048a9fce3d02c5909cd05c6ec24545',1,'mimalloc-doc.h']]],
- ['mi_5fnew_5faligned',['mi_new_aligned',['../group__cpp.html#gaef2c2bdb4f70857902d3c8903ac095f3',1,'mimalloc-doc.h']]],
- ['mi_5fnew_5faligned_5fnothrow',['mi_new_aligned_nothrow',['../group__cpp.html#gab5e29558926d934c3f1cae8c815f942c',1,'mimalloc-doc.h']]],
- ['mi_5fnew_5fn',['mi_new_n',['../group__cpp.html#gae7bc4f56cd57ed3359060ff4f38bda81',1,'mimalloc-doc.h']]],
- ['mi_5fnew_5fnothrow',['mi_new_nothrow',['../group__cpp.html#gaeaded64eda71ed6b1d569d3e723abc4a',1,'mimalloc-doc.h']]],
- ['mi_5fnew_5frealloc',['mi_new_realloc',['../group__cpp.html#gaab78a32f55149e9fbf432d5288e38e1e',1,'mimalloc-doc.h']]],
- ['mi_5fnew_5freallocn',['mi_new_reallocn',['../group__cpp.html#ga756f4b2bc6a7ecd0a90baea8e90c7907',1,'mimalloc-doc.h']]],
- ['mi_5foption_5fdisable',['mi_option_disable',['../group__options.html#gaebf6ff707a2e688ebb1a2296ca564054',1,'mimalloc-doc.h']]],
- ['mi_5foption_5feager_5fcommit',['mi_option_eager_commit',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca1e8de72c93da7ff22d91e1e27b52ac2b',1,'mimalloc-doc.h']]],
- ['mi_5foption_5feager_5fcommit_5fdelay',['mi_option_eager_commit_delay',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca17a190c25be381142d87e0468c4c068c',1,'mimalloc-doc.h']]],
- ['mi_5foption_5feager_5fregion_5fcommit',['mi_option_eager_region_commit',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca32ce97ece29f69e82579679cf8a307ad',1,'mimalloc-doc.h']]],
- ['mi_5foption_5fenable',['mi_option_enable',['../group__options.html#ga04180ae41b0d601421dd62ced40ca050',1,'mimalloc-doc.h']]],
- ['mi_5foption_5fget',['mi_option_get',['../group__options.html#ga7e8af195cc81d3fa64ccf2662caa565a',1,'mimalloc-doc.h']]],
- ['mi_5foption_5fis_5fenabled',['mi_option_is_enabled',['../group__options.html#ga459ad98f18b3fc9275474807fe0ca188',1,'mimalloc-doc.h']]],
- ['mi_5foption_5flarge_5fos_5fpages',['mi_option_large_os_pages',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca4192d491200d0055df0554d4cf65054e',1,'mimalloc-doc.h']]],
- ['mi_5foption_5fos_5ftag',['mi_option_os_tag',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca4b74ae2a69e445de6c2361b73c1d14bf',1,'mimalloc-doc.h']]],
- ['mi_5foption_5fpage_5freset',['mi_option_page_reset',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884cada854dd272c66342f18a93ee254a2968',1,'mimalloc-doc.h']]],
- ['mi_5foption_5freserve_5fhuge_5fos_5fpages',['mi_option_reserve_huge_os_pages',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884caca7ed041be3b0b9d0b82432c7bf41af2',1,'mimalloc-doc.h']]],
- ['mi_5foption_5freset_5fdecommits',['mi_option_reset_decommits',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884cac81ee965b130fa81238913a3c239d536',1,'mimalloc-doc.h']]],
- ['mi_5foption_5freset_5fdelay',['mi_option_reset_delay',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca154fe170131d5212cff57e22b99523c5',1,'mimalloc-doc.h']]],
- ['mi_5foption_5fsegment_5fcache',['mi_option_segment_cache',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca2ecbe7ef32f5c84de3739aa4f0b805a1',1,'mimalloc-doc.h']]],
- ['mi_5foption_5fsegment_5freset',['mi_option_segment_reset',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884cafb121d30d87591850d5410ccc3a95c6d',1,'mimalloc-doc.h']]],
- ['mi_5foption_5fset',['mi_option_set',['../group__options.html#gaf84921c32375e25754dc2ee6a911fa60',1,'mimalloc-doc.h']]],
- ['mi_5foption_5fset_5fdefault',['mi_option_set_default',['../group__options.html#ga7ef623e440e6e5545cb08c94e71e4b90',1,'mimalloc-doc.h']]],
- ['mi_5foption_5fset_5fenabled',['mi_option_set_enabled',['../group__options.html#ga9a13d05fcb77489cb06d4d017ebd8bed',1,'mimalloc-doc.h']]],
- ['mi_5foption_5fset_5fenabled_5fdefault',['mi_option_set_enabled_default',['../group__options.html#ga65518b69ec5d32336b50e07f74b3f629',1,'mimalloc-doc.h']]],
- ['mi_5foption_5fshow_5ferrors',['mi_option_show_errors',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884cafbf4822e5c00732c5984b32a032837f0',1,'mimalloc-doc.h']]],
- ['mi_5foption_5fshow_5fstats',['mi_option_show_stats',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca0957ef73b2550764b4840edf48422fda',1,'mimalloc-doc.h']]],
- ['mi_5foption_5ft',['mi_option_t',['../group__options.html#gafebf7ed116adb38ae5218bc3ce06884c',1,'mimalloc-doc.h']]],
- ['mi_5foption_5fuse_5fnuma_5fnodes',['mi_option_use_numa_nodes',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca0ac33a18f6b659fcfaf44efb0bab1b74',1,'mimalloc-doc.h']]],
- ['mi_5foption_5fverbose',['mi_option_verbose',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca7c8b7bf5281c581bad64f5daa6442777',1,'mimalloc-doc.h']]],
- ['mi_5foutput_5ffun',['mi_output_fun',['../group__extended.html#gad823d23444a4b77a40f66bf075a98a0c',1,'mimalloc-doc.h']]],
- ['mi_5fposix_5fmemalign',['mi_posix_memalign',['../group__posix.html#gacff84f226ba9feb2031b8992e5579447',1,'mimalloc-doc.h']]],
- ['mi_5fprocess_5finfo',['mi_process_info',['../group__extended.html#ga7d862c2affd5790381da14eb102a364d',1,'mimalloc-doc.h']]],
- ['mi_5fpvalloc',['mi_pvalloc',['../group__posix.html#gaeb325c39b887d3b90d85d1eb1712fb1e',1,'mimalloc-doc.h']]],
- ['mi_5frealloc',['mi_realloc',['../group__malloc.html#gaf11eb497da57bdfb2de65eb191c69db6',1,'mimalloc-doc.h']]],
- ['mi_5frealloc_5faligned',['mi_realloc_aligned',['../group__aligned.html#ga4028d1cf4aa4c87c880747044a8322ae',1,'mimalloc-doc.h']]],
- ['mi_5frealloc_5faligned_5fat',['mi_realloc_aligned_at',['../group__aligned.html#gaf66a9ae6c6f08bd6be6fb6ea771faffb',1,'mimalloc-doc.h']]],
- ['mi_5freallocarray',['mi_reallocarray',['../group__posix.html#ga48fad8648a2f1dab9c87ea9448a52088',1,'mimalloc-doc.h']]],
- ['mi_5freallocf',['mi_reallocf',['../group__malloc.html#gafe68ac7c5e24a65cd55c9d6b152211a0',1,'mimalloc-doc.h']]],
- ['mi_5freallocn',['mi_reallocn',['../group__malloc.html#ga61d57b4144ba24fba5c1e9b956d13853',1,'mimalloc-doc.h']]],
- ['mi_5freallocn_5ftp',['mi_reallocn_tp',['../group__typed.html#ga1158b49a55dfa81f58a4426a7578f523',1,'mimalloc-doc.h']]],
- ['mi_5frealpath',['mi_realpath',['../group__malloc.html#ga08cec32dd5bbe7da91c78d19f1b5bebe',1,'mimalloc-doc.h']]],
- ['mi_5frecalloc',['mi_recalloc',['../group__malloc.html#ga23a0fbb452b5dce8e31fab1a1958cacc',1,'mimalloc-doc.h']]],
- ['mi_5frecalloc_5faligned',['mi_recalloc_aligned',['../group__zeroinit.html#ga3e7e5c291acf1c7fd7ffd9914a9f945f',1,'mimalloc-doc.h']]],
- ['mi_5frecalloc_5faligned_5fat',['mi_recalloc_aligned_at',['../group__zeroinit.html#ga4ff5e92ad73585418a072c9d059e5cf9',1,'mimalloc-doc.h']]],
- ['mi_5fregister_5fdeferred_5ffree',['mi_register_deferred_free',['../group__extended.html#ga3460a6ca91af97be4058f523d3cb8ece',1,'mimalloc-doc.h']]],
- ['mi_5fregister_5ferror',['mi_register_error',['../group__extended.html#gaa1d55e0e894be240827e5d87ec3a1f45',1,'mimalloc-doc.h']]],
- ['mi_5fregister_5foutput',['mi_register_output',['../group__extended.html#gae5b17ff027cd2150b43a33040250cf3f',1,'mimalloc-doc.h']]],
- ['mi_5freserve_5fhuge_5fos_5fpages_5fat',['mi_reserve_huge_os_pages_at',['../group__extended.html#ga7795a13d20087447281858d2c771cca1',1,'mimalloc-doc.h']]],
- ['mi_5freserve_5fhuge_5fos_5fpages_5finterleave',['mi_reserve_huge_os_pages_interleave',['../group__extended.html#ga3132f521fb756fc0e8ec0b74fb58df50',1,'mimalloc-doc.h']]],
- ['mi_5frezalloc',['mi_rezalloc',['../group__zeroinit.html#ga8c292e142110229a2980b37ab036dbc6',1,'mimalloc-doc.h']]],
- ['mi_5frezalloc_5faligned',['mi_rezalloc_aligned',['../group__zeroinit.html#gacd71a7bce96aab38ae6de17af2eb2cf0',1,'mimalloc-doc.h']]],
- ['mi_5frezalloc_5faligned_5fat',['mi_rezalloc_aligned_at',['../group__zeroinit.html#gae8b358c417e61d5307da002702b0a8e1',1,'mimalloc-doc.h']]],
- ['mi_5fsmall_5fsize_5fmax',['MI_SMALL_SIZE_MAX',['../group__extended.html#ga1ea64283508718d9d645c38efc2f4305',1,'mimalloc-doc.h']]],
- ['mi_5fstats_5fmerge',['mi_stats_merge',['../group__extended.html#ga854b1de8cb067c7316286c28b2fcd3d1',1,'mimalloc-doc.h']]],
- ['mi_5fstats_5fprint',['mi_stats_print',['../group__extended.html#ga2d126e5c62d3badc35445e5d84166df2',1,'mimalloc-doc.h']]],
- ['mi_5fstats_5fprint_5fout',['mi_stats_print_out',['../group__extended.html#ga537f13b299ddf801e49a5a94fde02c79',1,'mimalloc-doc.h']]],
- ['mi_5fstats_5freset',['mi_stats_reset',['../group__extended.html#ga3bb8468b8cfcc6e2a61d98aee85c5f99',1,'mimalloc-doc.h']]],
- ['mi_5fstl_5fallocator',['mi_stl_allocator',['../group__cpp.html#structmi__stl__allocator',1,'']]],
- ['mi_5fstrdup',['mi_strdup',['../group__malloc.html#gac7cffe13f1f458ed16789488bf92b9b2',1,'mimalloc-doc.h']]],
- ['mi_5fstrndup',['mi_strndup',['../group__malloc.html#gaaabf971c2571891433477e2d21a35266',1,'mimalloc-doc.h']]],
- ['mi_5fthread_5fdone',['mi_thread_done',['../group__extended.html#ga0ae4581e85453456a0d658b2b98bf7bf',1,'mimalloc-doc.h']]],
- ['mi_5fthread_5finit',['mi_thread_init',['../group__extended.html#gaf8e73efc2cbca9ebfdfb166983a04c17',1,'mimalloc-doc.h']]],
- ['mi_5fthread_5fstats_5fprint_5fout',['mi_thread_stats_print_out',['../group__extended.html#gab1dac8476c46cb9eecab767eb40c1525',1,'mimalloc-doc.h']]],
- ['mi_5fusable_5fsize',['mi_usable_size',['../group__extended.html#ga089c859d9eddc5f9b4bd946cd53cebee',1,'mimalloc-doc.h']]],
- ['mi_5fvalloc',['mi_valloc',['../group__posix.html#ga73baaf5951f5165ba0763d0c06b6a93b',1,'mimalloc-doc.h']]],
- ['mi_5fzalloc',['mi_zalloc',['../group__malloc.html#gafdd9d8bb2986e668ba9884f28af38000',1,'mimalloc-doc.h']]],
- ['mi_5fzalloc_5faligned',['mi_zalloc_aligned',['../group__aligned.html#ga0cadbcf5b89a7b6fb171bc8df8734819',1,'mimalloc-doc.h']]],
- ['mi_5fzalloc_5faligned_5fat',['mi_zalloc_aligned_at',['../group__aligned.html#ga5f8c2353766db522565e642fafd8a3f8',1,'mimalloc-doc.h']]],
- ['mi_5fzalloc_5fsmall',['mi_zalloc_small',['../group__extended.html#ga220f29f40a44404b0061c15bc1c31152',1,'mimalloc-doc.h']]],
- ['mi_5fzalloc_5ftp',['mi_zalloc_tp',['../group__typed.html#gac77a61bdaf680a803785fe307820b48c',1,'mimalloc-doc.h']]]
+ ['mi_5f_5fposix_5fmemalign_12',['mi__posix_memalign',['../group__posix.html#gad5a69c8fea96aa2b7a7c818c2130090a',1,'mimalloc-doc.h']]],
+ ['mi_5faligned_5falloc_13',['mi_aligned_alloc',['../group__posix.html#ga1326d2e4388630b5f81ca7206318b8e5',1,'mimalloc-doc.h']]],
+ ['mi_5falignment_5fmax_14',['MI_ALIGNMENT_MAX',['../group__aligned.html#ga83c03016066b438f51a8095e9140be06',1,'mimalloc-doc.h']]],
+ ['mi_5fblock_5fvisit_5ffun_15',['mi_block_visit_fun',['../group__analysis.html#gadfa01e2900f0e5d515ad5506b26f6d65',1,'mimalloc-doc.h']]],
+ ['mi_5fcalloc_16',['mi_calloc',['../group__malloc.html#ga97fedb4f7107c592fd7f0f0a8949a57d',1,'mimalloc-doc.h']]],
+ ['mi_5fcalloc_5faligned_17',['mi_calloc_aligned',['../group__aligned.html#ga53dddb4724042a90315b94bc268fb4c9',1,'mimalloc-doc.h']]],
+ ['mi_5fcalloc_5faligned_5fat_18',['mi_calloc_aligned_at',['../group__aligned.html#ga08647c4593f3b2eef24a919a73eba3a3',1,'mimalloc-doc.h']]],
+ ['mi_5fcalloc_5ftp_19',['mi_calloc_tp',['../group__typed.html#gae80c47c9d4cab10961fff1a8ac98fc07',1,'mimalloc-doc.h']]],
+ ['mi_5fcfree_20',['mi_cfree',['../group__posix.html#ga705dc7a64bffacfeeb0141501a5c35d7',1,'mimalloc-doc.h']]],
+ ['mi_5fcheck_5fowned_21',['mi_check_owned',['../group__analysis.html#ga628c237489c2679af84a4d0d143b3dd5',1,'mimalloc-doc.h']]],
+ ['mi_5fcollect_22',['mi_collect',['../group__extended.html#ga421430e2226d7d468529cec457396756',1,'mimalloc-doc.h']]],
+ ['mi_5fdeferred_5ffree_5ffun_23',['mi_deferred_free_fun',['../group__extended.html#ga299dae78d25ce112e384a98b7309c5be',1,'mimalloc-doc.h']]],
+ ['mi_5ferror_5ffun_24',['mi_error_fun',['../group__extended.html#ga251d369cda3f1c2a955c555486ed90e5',1,'mimalloc-doc.h']]],
+ ['mi_5fexpand_25',['mi_expand',['../group__malloc.html#gaaee66a1d483c3e28f585525fb96707e4',1,'mimalloc-doc.h']]],
+ ['mi_5ffree_26',['mi_free',['../group__malloc.html#gaf2c7b89c327d1f60f59e68b9ea644d95',1,'mimalloc-doc.h']]],
+ ['mi_5ffree_5faligned_27',['mi_free_aligned',['../group__posix.html#ga0d28d5cf61e6bfbb18c63092939fe5c9',1,'mimalloc-doc.h']]],
+ ['mi_5ffree_5fsize_28',['mi_free_size',['../group__posix.html#gae01389eedab8d67341ff52e2aad80ebb',1,'mimalloc-doc.h']]],
+ ['mi_5ffree_5fsize_5faligned_29',['mi_free_size_aligned',['../group__posix.html#ga72e9d7ffb5fe94d69bc722c8506e27bc',1,'mimalloc-doc.h']]],
+ ['mi_5fgood_5fsize_30',['mi_good_size',['../group__extended.html#gac057927cd06c854b45fe7847e921bd47',1,'mimalloc-doc.h']]],
+ ['mi_5fheap_5farea_5ft_31',['mi_heap_area_t',['../group__analysis.html#structmi__heap__area__t',1,'']]],
+ ['mi_5fheap_5fcalloc_32',['mi_heap_calloc',['../group__heap.html#gaa6702b3c48e9e53e50e81b36f5011d55',1,'mimalloc-doc.h']]],
+ ['mi_5fheap_5fcalloc_5faligned_33',['mi_heap_calloc_aligned',['../group__heap.html#ga4af03a6e2b93fae77424d93f889705c3',1,'mimalloc-doc.h']]],
+ ['mi_5fheap_5fcalloc_5faligned_5fat_34',['mi_heap_calloc_aligned_at',['../group__heap.html#ga08ca6419a5c057a4d965868998eef487',1,'mimalloc-doc.h']]],
+ ['mi_5fheap_5fcalloc_5ftp_35',['mi_heap_calloc_tp',['../group__typed.html#ga4e5d1f1707c90e5f55e023ac5f45fe74',1,'mimalloc-doc.h']]],
+ ['mi_5fheap_5fcheck_5fowned_36',['mi_heap_check_owned',['../group__analysis.html#ga0d67c1789faaa15ff366c024fcaf6377',1,'mimalloc-doc.h']]],
+ ['mi_5fheap_5fcollect_37',['mi_heap_collect',['../group__heap.html#ga7922f7495cde30b1984d0e6072419298',1,'mimalloc-doc.h']]],
+ ['mi_5fheap_5fcontains_5fblock_38',['mi_heap_contains_block',['../group__analysis.html#gaa862aa8ed8d57d84cae41fc1022d71af',1,'mimalloc-doc.h']]],
+ ['mi_5fheap_5fdelete_39',['mi_heap_delete',['../group__heap.html#ga2ab1af8d438819b55319c7ef51d1e409',1,'mimalloc-doc.h']]],
+ ['mi_5fheap_5fdestroy_40',['mi_heap_destroy',['../group__heap.html#ga9f9c0844edb9717f4feacd79116b8e0d',1,'mimalloc-doc.h']]],
+ ['mi_5fheap_5fget_5fbacking_41',['mi_heap_get_backing',['../group__heap.html#ga5d03fbe062ffcf38f0f417fd968357fc',1,'mimalloc-doc.h']]],
+ ['mi_5fheap_5fget_5fdefault_42',['mi_heap_get_default',['../group__heap.html#ga8db4cbb87314a989a9a187464d6b5e05',1,'mimalloc-doc.h']]],
+ ['mi_5fheap_5fmalloc_43',['mi_heap_malloc',['../group__heap.html#ga9cbed01e42c0647907295de92c3fa296',1,'mimalloc-doc.h']]],
+ ['mi_5fheap_5fmalloc_5faligned_44',['mi_heap_malloc_aligned',['../group__heap.html#gab5b87e1805306f70df38789fcfcf6653',1,'mimalloc-doc.h']]],
+ ['mi_5fheap_5fmalloc_5faligned_5fat_45',['mi_heap_malloc_aligned_at',['../group__heap.html#ga23acd7680fb0976dde3783254c6c874b',1,'mimalloc-doc.h']]],
+ ['mi_5fheap_5fmalloc_5fsmall_46',['mi_heap_malloc_small',['../group__heap.html#gaa1a1c7a1f4da6826b5a25b70ef878368',1,'mimalloc-doc.h']]],
+ ['mi_5fheap_5fmalloc_5ftp_47',['mi_heap_malloc_tp',['../group__typed.html#ga653bcb24ac495bc19940ecd6898f9cd7',1,'mimalloc-doc.h']]],
+ ['mi_5fheap_5fmallocn_48',['mi_heap_mallocn',['../group__heap.html#ga851da6c43fe0b71c1376cee8aef90db0',1,'mimalloc-doc.h']]],
+ ['mi_5fheap_5fmallocn_5ftp_49',['mi_heap_mallocn_tp',['../group__typed.html#ga6b75cb9c4b9c647661d0924552dc6e83',1,'mimalloc-doc.h']]],
+ ['mi_5fheap_5fnew_50',['mi_heap_new',['../group__heap.html#ga766f672ba56f2fbfeb9d9dbb0b7f6b11',1,'mimalloc-doc.h']]],
+ ['mi_5fheap_5frealloc_51',['mi_heap_realloc',['../group__heap.html#gaaef3395f66be48f37bdc8322509c5d81',1,'mimalloc-doc.h']]],
+ ['mi_5fheap_5frealloc_5faligned_52',['mi_heap_realloc_aligned',['../group__heap.html#gafc603b696bd14cae6da28658f950d98c',1,'mimalloc-doc.h']]],
+ ['mi_5fheap_5frealloc_5faligned_5fat_53',['mi_heap_realloc_aligned_at',['../group__heap.html#gaf96c788a1bf553fe2d371de9365e047c',1,'mimalloc-doc.h']]],
+ ['mi_5fheap_5freallocf_54',['mi_heap_reallocf',['../group__heap.html#ga4a21070eb4e7cce018133c8d5f4b0527',1,'mimalloc-doc.h']]],
+ ['mi_5fheap_5freallocn_55',['mi_heap_reallocn',['../group__heap.html#gac74e94ad9b0c9b57c1c4d88b8825b7a8',1,'mimalloc-doc.h']]],
+ ['mi_5fheap_5freallocn_5ftp_56',['mi_heap_reallocn_tp',['../group__typed.html#gaf213d5422ec35e7f6caad827c79bc948',1,'mimalloc-doc.h']]],
+ ['mi_5fheap_5frealpath_57',['mi_heap_realpath',['../group__heap.html#ga00e95ba1e01acac3cfd95bb7a357a6f0',1,'mimalloc-doc.h']]],
+ ['mi_5fheap_5frecalloc_58',['mi_heap_recalloc',['../group__zeroinit.html#ga8648c5fbb22a80f0262859099f06dfbd',1,'mimalloc-doc.h']]],
+ ['mi_5fheap_5frecalloc_5faligned_59',['mi_heap_recalloc_aligned',['../group__zeroinit.html#ga9f3f999396c8f77ca5e80e7b40ac29e3',1,'mimalloc-doc.h']]],
+ ['mi_5fheap_5frecalloc_5faligned_5fat_60',['mi_heap_recalloc_aligned_at',['../group__zeroinit.html#ga496452c96f1de8c500be9fddf52edaf7',1,'mimalloc-doc.h']]],
+ ['mi_5fheap_5frecalloc_5ftp_61',['mi_heap_recalloc_tp',['../group__typed.html#ga3e50a1600958fcaf1a7f3560c9174f9e',1,'mimalloc-doc.h']]],
+ ['mi_5fheap_5frezalloc_62',['mi_heap_rezalloc',['../group__zeroinit.html#gacfad83f14eb5d6a42a497a898e19fc76',1,'mimalloc-doc.h']]],
+ ['mi_5fheap_5frezalloc_5faligned_63',['mi_heap_rezalloc_aligned',['../group__zeroinit.html#ga375fa8a611c51905e592d5d467c49664',1,'mimalloc-doc.h']]],
+ ['mi_5fheap_5frezalloc_5faligned_5fat_64',['mi_heap_rezalloc_aligned_at',['../group__zeroinit.html#gac90da54fa7e5d10bdc97ce0b51dce2eb',1,'mimalloc-doc.h']]],
+ ['mi_5fheap_5fset_5fdefault_65',['mi_heap_set_default',['../group__heap.html#gab8631ec88c8d26641b68b5d25dcd4422',1,'mimalloc-doc.h']]],
+ ['mi_5fheap_5fstrdup_66',['mi_heap_strdup',['../group__heap.html#ga139d6b09dbf50c3c2523d0f4d1cfdeb5',1,'mimalloc-doc.h']]],
+ ['mi_5fheap_5fstrndup_67',['mi_heap_strndup',['../group__heap.html#ga8e3dbd46650dd26573cf307a2c8f1f5a',1,'mimalloc-doc.h']]],
+ ['mi_5fheap_5ft_68',['mi_heap_t',['../group__heap.html#ga34a47cde5a5b38c29f1aa3c5e76943c2',1,'mimalloc-doc.h']]],
+ ['mi_5fheap_5fvisit_5fblocks_69',['mi_heap_visit_blocks',['../group__analysis.html#ga70c46687dc6e9dc98b232b02646f8bed',1,'mimalloc-doc.h']]],
+ ['mi_5fheap_5fzalloc_70',['mi_heap_zalloc',['../group__heap.html#ga903104592c8ed53417a3762da6241133',1,'mimalloc-doc.h']]],
+ ['mi_5fheap_5fzalloc_5faligned_71',['mi_heap_zalloc_aligned',['../group__heap.html#gaa450a59c6c7ae5fdbd1c2b80a8329ef0',1,'mimalloc-doc.h']]],
+ ['mi_5fheap_5fzalloc_5faligned_5fat_72',['mi_heap_zalloc_aligned_at',['../group__heap.html#ga45fb43a62776fbebbdf1edd99b527954',1,'mimalloc-doc.h']]],
+ ['mi_5fheap_5fzalloc_5ftp_73',['mi_heap_zalloc_tp',['../group__typed.html#gad6e87e86e994aa14416ae9b5d4c188fe',1,'mimalloc-doc.h']]],
+ ['mi_5fis_5fin_5fheap_5fregion_74',['mi_is_in_heap_region',['../group__extended.html#ga5f071b10d4df1c3658e04e7fd67a94e6',1,'mimalloc-doc.h']]],
+ ['mi_5fis_5fredirected_75',['mi_is_redirected',['../group__extended.html#gaad25050b19f30cd79397b227e0157a3f',1,'mimalloc-doc.h']]],
+ ['mi_5fmalloc_76',['mi_malloc',['../group__malloc.html#ga3406e8b168bc74c8637b11571a6da83a',1,'mimalloc-doc.h']]],
+ ['mi_5fmalloc_5faligned_77',['mi_malloc_aligned',['../group__aligned.html#ga68930196751fa2cca9e1fd0d71bade56',1,'mimalloc-doc.h']]],
+ ['mi_5fmalloc_5faligned_5fat_78',['mi_malloc_aligned_at',['../group__aligned.html#ga5850da130c936bd77db039dcfbc8295d',1,'mimalloc-doc.h']]],
+ ['mi_5fmalloc_5fsize_79',['mi_malloc_size',['../group__posix.html#ga4531c9e775bb3ae12db57c1ba8a5d7de',1,'mimalloc-doc.h']]],
+ ['mi_5fmalloc_5fsmall_80',['mi_malloc_small',['../group__extended.html#ga7136c2e55cb22c98ecf95d08d6debb99',1,'mimalloc-doc.h']]],
+ ['mi_5fmalloc_5ftp_81',['mi_malloc_tp',['../group__typed.html#ga0619a62c5fd886f1016030abe91f0557',1,'mimalloc-doc.h']]],
+ ['mi_5fmalloc_5fusable_5fsize_82',['mi_malloc_usable_size',['../group__posix.html#ga06d07cf357bbac5c73ba5d0c0c421e17',1,'mimalloc-doc.h']]],
+ ['mi_5fmallocn_83',['mi_mallocn',['../group__malloc.html#ga0b05e2bf0f73e7401ae08597ff782ac6',1,'mimalloc-doc.h']]],
+ ['mi_5fmallocn_5ftp_84',['mi_mallocn_tp',['../group__typed.html#gae5cb6e0fafc9f23169c5622e077afe8b',1,'mimalloc-doc.h']]],
+ ['mi_5fmanage_5fos_5fmemory_85',['mi_manage_os_memory',['../group__extended.html#ga4c6486a1fdcd7a423b5f25fe4be8e0cf',1,'mimalloc-doc.h']]],
+ ['mi_5fmemalign_86',['mi_memalign',['../group__posix.html#gaab7fa71ea93b96873f5d9883db57d40e',1,'mimalloc-doc.h']]],
+ ['mi_5fnew_87',['mi_new',['../group__cpp.html#gaad048a9fce3d02c5909cd05c6ec24545',1,'mimalloc-doc.h']]],
+ ['mi_5fnew_5faligned_88',['mi_new_aligned',['../group__cpp.html#gaef2c2bdb4f70857902d3c8903ac095f3',1,'mimalloc-doc.h']]],
+ ['mi_5fnew_5faligned_5fnothrow_89',['mi_new_aligned_nothrow',['../group__cpp.html#gab5e29558926d934c3f1cae8c815f942c',1,'mimalloc-doc.h']]],
+ ['mi_5fnew_5fn_90',['mi_new_n',['../group__cpp.html#gae7bc4f56cd57ed3359060ff4f38bda81',1,'mimalloc-doc.h']]],
+ ['mi_5fnew_5fnothrow_91',['mi_new_nothrow',['../group__cpp.html#gaeaded64eda71ed6b1d569d3e723abc4a',1,'mimalloc-doc.h']]],
+ ['mi_5fnew_5frealloc_92',['mi_new_realloc',['../group__cpp.html#gaab78a32f55149e9fbf432d5288e38e1e',1,'mimalloc-doc.h']]],
+ ['mi_5fnew_5freallocn_93',['mi_new_reallocn',['../group__cpp.html#ga756f4b2bc6a7ecd0a90baea8e90c7907',1,'mimalloc-doc.h']]],
+ ['mi_5foption_5fdisable_94',['mi_option_disable',['../group__options.html#gaebf6ff707a2e688ebb1a2296ca564054',1,'mimalloc-doc.h']]],
+ ['mi_5foption_5feager_5fcommit_95',['mi_option_eager_commit',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca1e8de72c93da7ff22d91e1e27b52ac2b',1,'mimalloc-doc.h']]],
+ ['mi_5foption_5feager_5fcommit_5fdelay_96',['mi_option_eager_commit_delay',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca17a190c25be381142d87e0468c4c068c',1,'mimalloc-doc.h']]],
+ ['mi_5foption_5feager_5fregion_5fcommit_97',['mi_option_eager_region_commit',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca32ce97ece29f69e82579679cf8a307ad',1,'mimalloc-doc.h']]],
+ ['mi_5foption_5fenable_98',['mi_option_enable',['../group__options.html#ga04180ae41b0d601421dd62ced40ca050',1,'mimalloc-doc.h']]],
+ ['mi_5foption_5fget_99',['mi_option_get',['../group__options.html#ga7e8af195cc81d3fa64ccf2662caa565a',1,'mimalloc-doc.h']]],
+ ['mi_5foption_5fis_5fenabled_100',['mi_option_is_enabled',['../group__options.html#ga459ad98f18b3fc9275474807fe0ca188',1,'mimalloc-doc.h']]],
+ ['mi_5foption_5flarge_5fos_5fpages_101',['mi_option_large_os_pages',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca4192d491200d0055df0554d4cf65054e',1,'mimalloc-doc.h']]],
+ ['mi_5foption_5fos_5ftag_102',['mi_option_os_tag',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca4b74ae2a69e445de6c2361b73c1d14bf',1,'mimalloc-doc.h']]],
+ ['mi_5foption_5fpage_5freset_103',['mi_option_page_reset',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884cada854dd272c66342f18a93ee254a2968',1,'mimalloc-doc.h']]],
+ ['mi_5foption_5freserve_5fhuge_5fos_5fpages_104',['mi_option_reserve_huge_os_pages',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884caca7ed041be3b0b9d0b82432c7bf41af2',1,'mimalloc-doc.h']]],
+ ['mi_5foption_5freserve_5fhuge_5fos_5fpages_5fat_105',['mi_option_reserve_huge_os_pages_at',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884caa13e7926d4339d2aa6fbf61d4473fd5c',1,'mimalloc-doc.h']]],
+ ['mi_5foption_5freset_5fdecommits_106',['mi_option_reset_decommits',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884cac81ee965b130fa81238913a3c239d536',1,'mimalloc-doc.h']]],
+ ['mi_5foption_5freset_5fdelay_107',['mi_option_reset_delay',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca154fe170131d5212cff57e22b99523c5',1,'mimalloc-doc.h']]],
+ ['mi_5foption_5fsegment_5fcache_108',['mi_option_segment_cache',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca2ecbe7ef32f5c84de3739aa4f0b805a1',1,'mimalloc-doc.h']]],
+ ['mi_5foption_5fsegment_5freset_109',['mi_option_segment_reset',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884cafb121d30d87591850d5410ccc3a95c6d',1,'mimalloc-doc.h']]],
+ ['mi_5foption_5fset_110',['mi_option_set',['../group__options.html#gaf84921c32375e25754dc2ee6a911fa60',1,'mimalloc-doc.h']]],
+ ['mi_5foption_5fset_5fdefault_111',['mi_option_set_default',['../group__options.html#ga7ef623e440e6e5545cb08c94e71e4b90',1,'mimalloc-doc.h']]],
+ ['mi_5foption_5fset_5fenabled_112',['mi_option_set_enabled',['../group__options.html#ga9a13d05fcb77489cb06d4d017ebd8bed',1,'mimalloc-doc.h']]],
+ ['mi_5foption_5fset_5fenabled_5fdefault_113',['mi_option_set_enabled_default',['../group__options.html#ga65518b69ec5d32336b50e07f74b3f629',1,'mimalloc-doc.h']]],
+ ['mi_5foption_5fshow_5ferrors_114',['mi_option_show_errors',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884cafbf4822e5c00732c5984b32a032837f0',1,'mimalloc-doc.h']]],
+ ['mi_5foption_5fshow_5fstats_115',['mi_option_show_stats',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca0957ef73b2550764b4840edf48422fda',1,'mimalloc-doc.h']]],
+ ['mi_5foption_5ft_116',['mi_option_t',['../group__options.html#gafebf7ed116adb38ae5218bc3ce06884c',1,'mimalloc-doc.h']]],
+ ['mi_5foption_5fuse_5fnuma_5fnodes_117',['mi_option_use_numa_nodes',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca0ac33a18f6b659fcfaf44efb0bab1b74',1,'mimalloc-doc.h']]],
+ ['mi_5foption_5fverbose_118',['mi_option_verbose',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca7c8b7bf5281c581bad64f5daa6442777',1,'mimalloc-doc.h']]],
+ ['mi_5foutput_5ffun_119',['mi_output_fun',['../group__extended.html#gad823d23444a4b77a40f66bf075a98a0c',1,'mimalloc-doc.h']]],
+ ['mi_5fposix_5fmemalign_120',['mi_posix_memalign',['../group__posix.html#gacff84f226ba9feb2031b8992e5579447',1,'mimalloc-doc.h']]],
+ ['mi_5fprocess_5finfo_121',['mi_process_info',['../group__extended.html#ga7d862c2affd5790381da14eb102a364d',1,'mimalloc-doc.h']]],
+ ['mi_5fpvalloc_122',['mi_pvalloc',['../group__posix.html#gaeb325c39b887d3b90d85d1eb1712fb1e',1,'mimalloc-doc.h']]],
+ ['mi_5frealloc_123',['mi_realloc',['../group__malloc.html#gaf11eb497da57bdfb2de65eb191c69db6',1,'mimalloc-doc.h']]],
+ ['mi_5frealloc_5faligned_124',['mi_realloc_aligned',['../group__aligned.html#ga4028d1cf4aa4c87c880747044a8322ae',1,'mimalloc-doc.h']]],
+ ['mi_5frealloc_5faligned_5fat_125',['mi_realloc_aligned_at',['../group__aligned.html#gaf66a9ae6c6f08bd6be6fb6ea771faffb',1,'mimalloc-doc.h']]],
+ ['mi_5freallocarr_126',['mi_reallocarr',['../group__posix.html#ga7e1934d60a3e697950eeb48e042bfad5',1,'mimalloc-doc.h']]],
+ ['mi_5freallocarray_127',['mi_reallocarray',['../group__posix.html#ga48fad8648a2f1dab9c87ea9448a52088',1,'mimalloc-doc.h']]],
+ ['mi_5freallocf_128',['mi_reallocf',['../group__malloc.html#gafe68ac7c5e24a65cd55c9d6b152211a0',1,'mimalloc-doc.h']]],
+ ['mi_5freallocn_129',['mi_reallocn',['../group__malloc.html#ga61d57b4144ba24fba5c1e9b956d13853',1,'mimalloc-doc.h']]],
+ ['mi_5freallocn_5ftp_130',['mi_reallocn_tp',['../group__typed.html#ga1158b49a55dfa81f58a4426a7578f523',1,'mimalloc-doc.h']]],
+ ['mi_5frealpath_131',['mi_realpath',['../group__malloc.html#ga08cec32dd5bbe7da91c78d19f1b5bebe',1,'mimalloc-doc.h']]],
+ ['mi_5frecalloc_132',['mi_recalloc',['../group__malloc.html#ga23a0fbb452b5dce8e31fab1a1958cacc',1,'mimalloc-doc.h']]],
+ ['mi_5frecalloc_5faligned_133',['mi_recalloc_aligned',['../group__zeroinit.html#ga3e7e5c291acf1c7fd7ffd9914a9f945f',1,'mimalloc-doc.h']]],
+ ['mi_5frecalloc_5faligned_5fat_134',['mi_recalloc_aligned_at',['../group__zeroinit.html#ga4ff5e92ad73585418a072c9d059e5cf9',1,'mimalloc-doc.h']]],
+ ['mi_5fregister_5fdeferred_5ffree_135',['mi_register_deferred_free',['../group__extended.html#ga3460a6ca91af97be4058f523d3cb8ece',1,'mimalloc-doc.h']]],
+ ['mi_5fregister_5ferror_136',['mi_register_error',['../group__extended.html#gaa1d55e0e894be240827e5d87ec3a1f45',1,'mimalloc-doc.h']]],
+ ['mi_5fregister_5foutput_137',['mi_register_output',['../group__extended.html#gae5b17ff027cd2150b43a33040250cf3f',1,'mimalloc-doc.h']]],
+ ['mi_5freserve_5fhuge_5fos_5fpages_5fat_138',['mi_reserve_huge_os_pages_at',['../group__extended.html#ga7795a13d20087447281858d2c771cca1',1,'mimalloc-doc.h']]],
+ ['mi_5freserve_5fhuge_5fos_5fpages_5finterleave_139',['mi_reserve_huge_os_pages_interleave',['../group__extended.html#ga3132f521fb756fc0e8ec0b74fb58df50',1,'mimalloc-doc.h']]],
+ ['mi_5freserve_5fos_5fmemory_140',['mi_reserve_os_memory',['../group__extended.html#ga00ec3324b6b2591c7fe3677baa30a767',1,'mimalloc-doc.h']]],
+ ['mi_5frezalloc_141',['mi_rezalloc',['../group__zeroinit.html#ga8c292e142110229a2980b37ab036dbc6',1,'mimalloc-doc.h']]],
+ ['mi_5frezalloc_5faligned_142',['mi_rezalloc_aligned',['../group__zeroinit.html#gacd71a7bce96aab38ae6de17af2eb2cf0',1,'mimalloc-doc.h']]],
+ ['mi_5frezalloc_5faligned_5fat_143',['mi_rezalloc_aligned_at',['../group__zeroinit.html#gae8b358c417e61d5307da002702b0a8e1',1,'mimalloc-doc.h']]],
+ ['mi_5fsmall_5fsize_5fmax_144',['MI_SMALL_SIZE_MAX',['../group__extended.html#ga1ea64283508718d9d645c38efc2f4305',1,'mimalloc-doc.h']]],
+ ['mi_5fstats_5fmerge_145',['mi_stats_merge',['../group__extended.html#ga854b1de8cb067c7316286c28b2fcd3d1',1,'mimalloc-doc.h']]],
+ ['mi_5fstats_5fprint_146',['mi_stats_print',['../group__extended.html#ga2d126e5c62d3badc35445e5d84166df2',1,'mimalloc-doc.h']]],
+ ['mi_5fstats_5fprint_5fout_147',['mi_stats_print_out',['../group__extended.html#ga537f13b299ddf801e49a5a94fde02c79',1,'mimalloc-doc.h']]],
+ ['mi_5fstats_5freset_148',['mi_stats_reset',['../group__extended.html#ga3bb8468b8cfcc6e2a61d98aee85c5f99',1,'mimalloc-doc.h']]],
+ ['mi_5fstl_5fallocator_149',['mi_stl_allocator',['../group__cpp.html#structmi__stl__allocator',1,'']]],
+ ['mi_5fstrdup_150',['mi_strdup',['../group__malloc.html#gac7cffe13f1f458ed16789488bf92b9b2',1,'mimalloc-doc.h']]],
+ ['mi_5fstrndup_151',['mi_strndup',['../group__malloc.html#gaaabf971c2571891433477e2d21a35266',1,'mimalloc-doc.h']]],
+ ['mi_5fthread_5fdone_152',['mi_thread_done',['../group__extended.html#ga0ae4581e85453456a0d658b2b98bf7bf',1,'mimalloc-doc.h']]],
+ ['mi_5fthread_5finit_153',['mi_thread_init',['../group__extended.html#gaf8e73efc2cbca9ebfdfb166983a04c17',1,'mimalloc-doc.h']]],
+ ['mi_5fthread_5fstats_5fprint_5fout_154',['mi_thread_stats_print_out',['../group__extended.html#gab1dac8476c46cb9eecab767eb40c1525',1,'mimalloc-doc.h']]],
+ ['mi_5fusable_5fsize_155',['mi_usable_size',['../group__extended.html#ga089c859d9eddc5f9b4bd946cd53cebee',1,'mimalloc-doc.h']]],
+ ['mi_5fvalloc_156',['mi_valloc',['../group__posix.html#ga73baaf5951f5165ba0763d0c06b6a93b',1,'mimalloc-doc.h']]],
+ ['mi_5fzalloc_157',['mi_zalloc',['../group__malloc.html#gafdd9d8bb2986e668ba9884f28af38000',1,'mimalloc-doc.h']]],
+ ['mi_5fzalloc_5faligned_158',['mi_zalloc_aligned',['../group__aligned.html#ga0cadbcf5b89a7b6fb171bc8df8734819',1,'mimalloc-doc.h']]],
+ ['mi_5fzalloc_5faligned_5fat_159',['mi_zalloc_aligned_at',['../group__aligned.html#ga5f8c2353766db522565e642fafd8a3f8',1,'mimalloc-doc.h']]],
+ ['mi_5fzalloc_5fsmall_160',['mi_zalloc_small',['../group__extended.html#ga220f29f40a44404b0061c15bc1c31152',1,'mimalloc-doc.h']]],
+ ['mi_5fzalloc_5ftp_161',['mi_zalloc_tp',['../group__typed.html#gac77a61bdaf680a803785fe307820b48c',1,'mimalloc-doc.h']]]
];
diff --git a/docs/search/all_7.html b/docs/search/all_7.html
index 9cd0196..8ddbf6c 100644
--- a/docs/search/all_7.html
+++ b/docs/search/all_7.html
@@ -1,7 +1,8 @@
-
+
+
-
+
@@ -10,21 +11,27 @@
Loading...
-
+
Searching...
No Matches
-
+
diff --git a/docs/search/all_7.js b/docs/search/all_7.js
index df03c8d..8f296aa 100644
--- a/docs/search/all_7.js
+++ b/docs/search/all_7.js
@@ -1,4 +1,4 @@
var searchData=
[
- ['overriding_20malloc',['Overriding Malloc',['../overrides.html',1,'']]]
+ ['overriding_20malloc_162',['Overriding Malloc',['../overrides.html',1,'']]]
];
diff --git a/docs/search/all_8.html b/docs/search/all_8.html
index 1e8fb9c..83c55ae 100644
--- a/docs/search/all_8.html
+++ b/docs/search/all_8.html
@@ -1,7 +1,8 @@
-
+
+
-
+
@@ -10,21 +11,27 @@
Loading...
-
+
Searching...
No Matches
-
+
diff --git a/docs/search/all_8.js b/docs/search/all_8.js
index 0651bcc..a9caa77 100644
--- a/docs/search/all_8.js
+++ b/docs/search/all_8.js
@@ -1,5 +1,5 @@
var searchData=
[
- ['performance',['Performance',['../bench.html',1,'']]],
- ['posix',['Posix',['../group__posix.html',1,'']]]
+ ['performance_163',['Performance',['../bench.html',1,'']]],
+ ['posix_164',['Posix',['../group__posix.html',1,'']]]
];
diff --git a/docs/search/all_9.html b/docs/search/all_9.html
index 27df366..1e263c1 100644
--- a/docs/search/all_9.html
+++ b/docs/search/all_9.html
@@ -1,7 +1,8 @@
-
+
+
-
+
@@ -10,21 +11,27 @@
Loading...
-
+
Searching...
No Matches
-
+
diff --git a/docs/search/all_9.js b/docs/search/all_9.js
index cd78624..f6b4ba3 100644
--- a/docs/search/all_9.js
+++ b/docs/search/all_9.js
@@ -1,5 +1,5 @@
var searchData=
[
- ['runtime_20options',['Runtime Options',['../group__options.html',1,'']]],
- ['reserved',['reserved',['../group__analysis.html#ae848a3e6840414891035423948ca0383',1,'mi_heap_area_t']]]
+ ['reserved_165',['reserved',['../group__analysis.html#ae848a3e6840414891035423948ca0383',1,'mi_heap_area_t']]],
+ ['runtime_20options_166',['Runtime Options',['../group__options.html',1,'']]]
];
diff --git a/docs/search/all_a.html b/docs/search/all_a.html
index 63f9254..3a6cac1 100644
--- a/docs/search/all_a.html
+++ b/docs/search/all_a.html
@@ -1,7 +1,8 @@
-
+
+
-
+
@@ -10,21 +11,27 @@
Loading...
-
+
Searching...
No Matches
-
+
diff --git a/docs/search/all_a.js b/docs/search/all_a.js
index 647887f..699b545 100644
--- a/docs/search/all_a.js
+++ b/docs/search/all_a.js
@@ -1,4 +1,4 @@
var searchData=
[
- ['typed_20macros',['Typed Macros',['../group__typed.html',1,'']]]
+ ['typed_20macros_167',['Typed Macros',['../group__typed.html',1,'']]]
];
diff --git a/docs/search/all_b.html b/docs/search/all_b.html
index 44ae3e4..130deb4 100644
--- a/docs/search/all_b.html
+++ b/docs/search/all_b.html
@@ -1,7 +1,8 @@
-
+
+
-
+
@@ -10,21 +11,27 @@
Loading...
-
+
Searching...
No Matches
-
+
diff --git a/docs/search/all_b.js b/docs/search/all_b.js
index 2bc3fb6..73a2671 100644
--- a/docs/search/all_b.js
+++ b/docs/search/all_b.js
@@ -1,5 +1,5 @@
var searchData=
[
- ['used',['used',['../group__analysis.html#ab820302c5cd0df133eb8e51650a008b4',1,'mi_heap_area_t']]],
- ['using_20the_20library',['Using the library',['../using.html',1,'']]]
+ ['used_168',['used',['../group__analysis.html#ab820302c5cd0df133eb8e51650a008b4',1,'mi_heap_area_t']]],
+ ['using_20the_20library_169',['Using the library',['../using.html',1,'']]]
];
diff --git a/docs/search/all_c.html b/docs/search/all_c.html
index 3de1586..3dd5af0 100644
--- a/docs/search/all_c.html
+++ b/docs/search/all_c.html
@@ -1,7 +1,8 @@
-
+
+
-
+
@@ -10,21 +11,27 @@
Loading...
-
+
Searching...
No Matches
-
+
diff --git a/docs/search/all_c.js b/docs/search/all_c.js
index 2b9b4ce..192fb1c 100644
--- a/docs/search/all_c.js
+++ b/docs/search/all_c.js
@@ -1,4 +1,4 @@
var searchData=
[
- ['zero_20initialized_20re_2dallocation',['Zero initialized re-allocation',['../group__zeroinit.html',1,'']]]
+ ['zero_20initialized_20re_2dallocation_170',['Zero initialized re-allocation',['../group__zeroinit.html',1,'']]]
];
diff --git a/docs/search/classes_0.html b/docs/search/classes_0.html
index b3c6ec6..af8159e 100644
--- a/docs/search/classes_0.html
+++ b/docs/search/classes_0.html
@@ -1,7 +1,8 @@
-
+
+
-
+
@@ -10,21 +11,27 @@
Loading...
-
+
Searching...
No Matches
-
+
diff --git a/docs/search/classes_0.js b/docs/search/classes_0.js
index 0010dd9..e3770fb 100644
--- a/docs/search/classes_0.js
+++ b/docs/search/classes_0.js
@@ -1,5 +1,5 @@
var searchData=
[
- ['mi_5fheap_5farea_5ft',['mi_heap_area_t',['../group__analysis.html#structmi__heap__area__t',1,'']]],
- ['mi_5fstl_5fallocator',['mi_stl_allocator',['../group__cpp.html#structmi__stl__allocator',1,'']]]
+ ['mi_5fheap_5farea_5ft_171',['mi_heap_area_t',['../group__analysis.html#structmi__heap__area__t',1,'']]],
+ ['mi_5fstl_5fallocator_172',['mi_stl_allocator',['../group__cpp.html#structmi__stl__allocator',1,'']]]
];
diff --git a/docs/search/enums_0.html b/docs/search/enums_0.html
index 7040a9c..141fff5 100644
--- a/docs/search/enums_0.html
+++ b/docs/search/enums_0.html
@@ -1,7 +1,8 @@
-
+
+
-
+
@@ -10,21 +11,27 @@
Loading...
-
+
Searching...
No Matches
-
+
diff --git a/docs/search/enums_0.js b/docs/search/enums_0.js
index f0c1ba5..6f1f383 100644
--- a/docs/search/enums_0.js
+++ b/docs/search/enums_0.js
@@ -1,4 +1,4 @@
var searchData=
[
- ['mi_5foption_5ft',['mi_option_t',['../group__options.html#gafebf7ed116adb38ae5218bc3ce06884c',1,'mimalloc-doc.h']]]
+ ['mi_5foption_5ft_296',['mi_option_t',['../group__options.html#gafebf7ed116adb38ae5218bc3ce06884c',1,'mimalloc-doc.h']]]
];
diff --git a/docs/search/enumvalues_0.html b/docs/search/enumvalues_0.html
index 78895c7..0d131d9 100644
--- a/docs/search/enumvalues_0.html
+++ b/docs/search/enumvalues_0.html
@@ -1,7 +1,8 @@
-
+
+
-
+
@@ -10,21 +11,27 @@
Loading...
-
+
Searching...
No Matches
-
+
diff --git a/docs/search/enumvalues_0.js b/docs/search/enumvalues_0.js
index 7054b6d..1aca63b 100644
--- a/docs/search/enumvalues_0.js
+++ b/docs/search/enumvalues_0.js
@@ -1,4 +1,4 @@
var searchData=
[
- ['_5fmi_5foption_5flast',['_mi_option_last',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca5b4357b74be0d87568036c32eb1a2e4a',1,'mimalloc-doc.h']]]
+ ['_5fmi_5foption_5flast_297',['_mi_option_last',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca5b4357b74be0d87568036c32eb1a2e4a',1,'mimalloc-doc.h']]]
];
diff --git a/docs/search/enumvalues_1.html b/docs/search/enumvalues_1.html
index 9b02a4b..cd9187a 100644
--- a/docs/search/enumvalues_1.html
+++ b/docs/search/enumvalues_1.html
@@ -1,7 +1,8 @@
-
+
+
-
+
@@ -10,21 +11,27 @@
Loading...
-
+
Searching...
No Matches
-
+
diff --git a/docs/search/enumvalues_1.js b/docs/search/enumvalues_1.js
index 3b71270..bd525bb 100644
--- a/docs/search/enumvalues_1.js
+++ b/docs/search/enumvalues_1.js
@@ -1,18 +1,19 @@
var searchData=
[
- ['mi_5foption_5feager_5fcommit',['mi_option_eager_commit',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca1e8de72c93da7ff22d91e1e27b52ac2b',1,'mimalloc-doc.h']]],
- ['mi_5foption_5feager_5fcommit_5fdelay',['mi_option_eager_commit_delay',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca17a190c25be381142d87e0468c4c068c',1,'mimalloc-doc.h']]],
- ['mi_5foption_5feager_5fregion_5fcommit',['mi_option_eager_region_commit',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca32ce97ece29f69e82579679cf8a307ad',1,'mimalloc-doc.h']]],
- ['mi_5foption_5flarge_5fos_5fpages',['mi_option_large_os_pages',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca4192d491200d0055df0554d4cf65054e',1,'mimalloc-doc.h']]],
- ['mi_5foption_5fos_5ftag',['mi_option_os_tag',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca4b74ae2a69e445de6c2361b73c1d14bf',1,'mimalloc-doc.h']]],
- ['mi_5foption_5fpage_5freset',['mi_option_page_reset',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884cada854dd272c66342f18a93ee254a2968',1,'mimalloc-doc.h']]],
- ['mi_5foption_5freserve_5fhuge_5fos_5fpages',['mi_option_reserve_huge_os_pages',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884caca7ed041be3b0b9d0b82432c7bf41af2',1,'mimalloc-doc.h']]],
- ['mi_5foption_5freset_5fdecommits',['mi_option_reset_decommits',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884cac81ee965b130fa81238913a3c239d536',1,'mimalloc-doc.h']]],
- ['mi_5foption_5freset_5fdelay',['mi_option_reset_delay',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca154fe170131d5212cff57e22b99523c5',1,'mimalloc-doc.h']]],
- ['mi_5foption_5fsegment_5fcache',['mi_option_segment_cache',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca2ecbe7ef32f5c84de3739aa4f0b805a1',1,'mimalloc-doc.h']]],
- ['mi_5foption_5fsegment_5freset',['mi_option_segment_reset',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884cafb121d30d87591850d5410ccc3a95c6d',1,'mimalloc-doc.h']]],
- ['mi_5foption_5fshow_5ferrors',['mi_option_show_errors',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884cafbf4822e5c00732c5984b32a032837f0',1,'mimalloc-doc.h']]],
- ['mi_5foption_5fshow_5fstats',['mi_option_show_stats',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca0957ef73b2550764b4840edf48422fda',1,'mimalloc-doc.h']]],
- ['mi_5foption_5fuse_5fnuma_5fnodes',['mi_option_use_numa_nodes',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca0ac33a18f6b659fcfaf44efb0bab1b74',1,'mimalloc-doc.h']]],
- ['mi_5foption_5fverbose',['mi_option_verbose',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca7c8b7bf5281c581bad64f5daa6442777',1,'mimalloc-doc.h']]]
+ ['mi_5foption_5feager_5fcommit_298',['mi_option_eager_commit',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca1e8de72c93da7ff22d91e1e27b52ac2b',1,'mimalloc-doc.h']]],
+ ['mi_5foption_5feager_5fcommit_5fdelay_299',['mi_option_eager_commit_delay',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca17a190c25be381142d87e0468c4c068c',1,'mimalloc-doc.h']]],
+ ['mi_5foption_5feager_5fregion_5fcommit_300',['mi_option_eager_region_commit',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca32ce97ece29f69e82579679cf8a307ad',1,'mimalloc-doc.h']]],
+ ['mi_5foption_5flarge_5fos_5fpages_301',['mi_option_large_os_pages',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca4192d491200d0055df0554d4cf65054e',1,'mimalloc-doc.h']]],
+ ['mi_5foption_5fos_5ftag_302',['mi_option_os_tag',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca4b74ae2a69e445de6c2361b73c1d14bf',1,'mimalloc-doc.h']]],
+ ['mi_5foption_5fpage_5freset_303',['mi_option_page_reset',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884cada854dd272c66342f18a93ee254a2968',1,'mimalloc-doc.h']]],
+ ['mi_5foption_5freserve_5fhuge_5fos_5fpages_304',['mi_option_reserve_huge_os_pages',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884caca7ed041be3b0b9d0b82432c7bf41af2',1,'mimalloc-doc.h']]],
+ ['mi_5foption_5freserve_5fhuge_5fos_5fpages_5fat_305',['mi_option_reserve_huge_os_pages_at',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884caa13e7926d4339d2aa6fbf61d4473fd5c',1,'mimalloc-doc.h']]],
+ ['mi_5foption_5freset_5fdecommits_306',['mi_option_reset_decommits',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884cac81ee965b130fa81238913a3c239d536',1,'mimalloc-doc.h']]],
+ ['mi_5foption_5freset_5fdelay_307',['mi_option_reset_delay',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca154fe170131d5212cff57e22b99523c5',1,'mimalloc-doc.h']]],
+ ['mi_5foption_5fsegment_5fcache_308',['mi_option_segment_cache',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca2ecbe7ef32f5c84de3739aa4f0b805a1',1,'mimalloc-doc.h']]],
+ ['mi_5foption_5fsegment_5freset_309',['mi_option_segment_reset',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884cafb121d30d87591850d5410ccc3a95c6d',1,'mimalloc-doc.h']]],
+ ['mi_5foption_5fshow_5ferrors_310',['mi_option_show_errors',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884cafbf4822e5c00732c5984b32a032837f0',1,'mimalloc-doc.h']]],
+ ['mi_5foption_5fshow_5fstats_311',['mi_option_show_stats',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca0957ef73b2550764b4840edf48422fda',1,'mimalloc-doc.h']]],
+ ['mi_5foption_5fuse_5fnuma_5fnodes_312',['mi_option_use_numa_nodes',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca0ac33a18f6b659fcfaf44efb0bab1b74',1,'mimalloc-doc.h']]],
+ ['mi_5foption_5fverbose_313',['mi_option_verbose',['../group__options.html#ggafebf7ed116adb38ae5218bc3ce06884ca7c8b7bf5281c581bad64f5daa6442777',1,'mimalloc-doc.h']]]
];
diff --git a/docs/search/functions_0.html b/docs/search/functions_0.html
index bc73761..eb4c501 100644
--- a/docs/search/functions_0.html
+++ b/docs/search/functions_0.html
@@ -1,7 +1,8 @@
-
+
+
-
+
@@ -10,21 +11,27 @@
Loading...
-
+
Searching...
No Matches
-
+
diff --git a/docs/search/functions_0.js b/docs/search/functions_0.js
index b188b27..b44917a 100644
--- a/docs/search/functions_0.js
+++ b/docs/search/functions_0.js
@@ -1,113 +1,116 @@
var searchData=
[
- ['mi_5f_5fposix_5fmemalign',['mi__posix_memalign',['../group__posix.html#gad5a69c8fea96aa2b7a7c818c2130090a',1,'mimalloc-doc.h']]],
- ['mi_5faligned_5falloc',['mi_aligned_alloc',['../group__posix.html#ga1326d2e4388630b5f81ca7206318b8e5',1,'mimalloc-doc.h']]],
- ['mi_5fcalloc',['mi_calloc',['../group__malloc.html#ga97fedb4f7107c592fd7f0f0a8949a57d',1,'mimalloc-doc.h']]],
- ['mi_5fcalloc_5faligned',['mi_calloc_aligned',['../group__aligned.html#ga53dddb4724042a90315b94bc268fb4c9',1,'mimalloc-doc.h']]],
- ['mi_5fcalloc_5faligned_5fat',['mi_calloc_aligned_at',['../group__aligned.html#ga08647c4593f3b2eef24a919a73eba3a3',1,'mimalloc-doc.h']]],
- ['mi_5fcfree',['mi_cfree',['../group__posix.html#ga705dc7a64bffacfeeb0141501a5c35d7',1,'mimalloc-doc.h']]],
- ['mi_5fcheck_5fowned',['mi_check_owned',['../group__analysis.html#ga628c237489c2679af84a4d0d143b3dd5',1,'mimalloc-doc.h']]],
- ['mi_5fcollect',['mi_collect',['../group__extended.html#ga421430e2226d7d468529cec457396756',1,'mimalloc-doc.h']]],
- ['mi_5fexpand',['mi_expand',['../group__malloc.html#gaaee66a1d483c3e28f585525fb96707e4',1,'mimalloc-doc.h']]],
- ['mi_5ffree',['mi_free',['../group__malloc.html#gaf2c7b89c327d1f60f59e68b9ea644d95',1,'mimalloc-doc.h']]],
- ['mi_5ffree_5faligned',['mi_free_aligned',['../group__posix.html#ga0d28d5cf61e6bfbb18c63092939fe5c9',1,'mimalloc-doc.h']]],
- ['mi_5ffree_5fsize',['mi_free_size',['../group__posix.html#gae01389eedab8d67341ff52e2aad80ebb',1,'mimalloc-doc.h']]],
- ['mi_5ffree_5fsize_5faligned',['mi_free_size_aligned',['../group__posix.html#ga72e9d7ffb5fe94d69bc722c8506e27bc',1,'mimalloc-doc.h']]],
- ['mi_5fgood_5fsize',['mi_good_size',['../group__extended.html#gac057927cd06c854b45fe7847e921bd47',1,'mimalloc-doc.h']]],
- ['mi_5fheap_5fcalloc',['mi_heap_calloc',['../group__heap.html#gaa6702b3c48e9e53e50e81b36f5011d55',1,'mimalloc-doc.h']]],
- ['mi_5fheap_5fcalloc_5faligned',['mi_heap_calloc_aligned',['../group__heap.html#ga4af03a6e2b93fae77424d93f889705c3',1,'mimalloc-doc.h']]],
- ['mi_5fheap_5fcalloc_5faligned_5fat',['mi_heap_calloc_aligned_at',['../group__heap.html#ga08ca6419a5c057a4d965868998eef487',1,'mimalloc-doc.h']]],
- ['mi_5fheap_5fcheck_5fowned',['mi_heap_check_owned',['../group__analysis.html#ga0d67c1789faaa15ff366c024fcaf6377',1,'mimalloc-doc.h']]],
- ['mi_5fheap_5fcollect',['mi_heap_collect',['../group__heap.html#ga7922f7495cde30b1984d0e6072419298',1,'mimalloc-doc.h']]],
- ['mi_5fheap_5fcontains_5fblock',['mi_heap_contains_block',['../group__analysis.html#gaa862aa8ed8d57d84cae41fc1022d71af',1,'mimalloc-doc.h']]],
- ['mi_5fheap_5fdelete',['mi_heap_delete',['../group__heap.html#ga2ab1af8d438819b55319c7ef51d1e409',1,'mimalloc-doc.h']]],
- ['mi_5fheap_5fdestroy',['mi_heap_destroy',['../group__heap.html#ga9f9c0844edb9717f4feacd79116b8e0d',1,'mimalloc-doc.h']]],
- ['mi_5fheap_5fget_5fbacking',['mi_heap_get_backing',['../group__heap.html#ga5d03fbe062ffcf38f0f417fd968357fc',1,'mimalloc-doc.h']]],
- ['mi_5fheap_5fget_5fdefault',['mi_heap_get_default',['../group__heap.html#ga8db4cbb87314a989a9a187464d6b5e05',1,'mimalloc-doc.h']]],
- ['mi_5fheap_5fmalloc',['mi_heap_malloc',['../group__heap.html#ga9cbed01e42c0647907295de92c3fa296',1,'mimalloc-doc.h']]],
- ['mi_5fheap_5fmalloc_5faligned',['mi_heap_malloc_aligned',['../group__heap.html#gab5b87e1805306f70df38789fcfcf6653',1,'mimalloc-doc.h']]],
- ['mi_5fheap_5fmalloc_5faligned_5fat',['mi_heap_malloc_aligned_at',['../group__heap.html#ga23acd7680fb0976dde3783254c6c874b',1,'mimalloc-doc.h']]],
- ['mi_5fheap_5fmalloc_5fsmall',['mi_heap_malloc_small',['../group__heap.html#gaa1a1c7a1f4da6826b5a25b70ef878368',1,'mimalloc-doc.h']]],
- ['mi_5fheap_5fmallocn',['mi_heap_mallocn',['../group__heap.html#ga851da6c43fe0b71c1376cee8aef90db0',1,'mimalloc-doc.h']]],
- ['mi_5fheap_5fnew',['mi_heap_new',['../group__heap.html#ga766f672ba56f2fbfeb9d9dbb0b7f6b11',1,'mimalloc-doc.h']]],
- ['mi_5fheap_5frealloc',['mi_heap_realloc',['../group__heap.html#gaaef3395f66be48f37bdc8322509c5d81',1,'mimalloc-doc.h']]],
- ['mi_5fheap_5frealloc_5faligned',['mi_heap_realloc_aligned',['../group__heap.html#gafc603b696bd14cae6da28658f950d98c',1,'mimalloc-doc.h']]],
- ['mi_5fheap_5frealloc_5faligned_5fat',['mi_heap_realloc_aligned_at',['../group__heap.html#gaf96c788a1bf553fe2d371de9365e047c',1,'mimalloc-doc.h']]],
- ['mi_5fheap_5freallocf',['mi_heap_reallocf',['../group__heap.html#ga4a21070eb4e7cce018133c8d5f4b0527',1,'mimalloc-doc.h']]],
- ['mi_5fheap_5freallocn',['mi_heap_reallocn',['../group__heap.html#gac74e94ad9b0c9b57c1c4d88b8825b7a8',1,'mimalloc-doc.h']]],
- ['mi_5fheap_5frealpath',['mi_heap_realpath',['../group__heap.html#ga00e95ba1e01acac3cfd95bb7a357a6f0',1,'mimalloc-doc.h']]],
- ['mi_5fheap_5frecalloc',['mi_heap_recalloc',['../group__zeroinit.html#ga8648c5fbb22a80f0262859099f06dfbd',1,'mimalloc-doc.h']]],
- ['mi_5fheap_5frecalloc_5faligned',['mi_heap_recalloc_aligned',['../group__zeroinit.html#ga9f3f999396c8f77ca5e80e7b40ac29e3',1,'mimalloc-doc.h']]],
- ['mi_5fheap_5frecalloc_5faligned_5fat',['mi_heap_recalloc_aligned_at',['../group__zeroinit.html#ga496452c96f1de8c500be9fddf52edaf7',1,'mimalloc-doc.h']]],
- ['mi_5fheap_5frezalloc',['mi_heap_rezalloc',['../group__zeroinit.html#gacfad83f14eb5d6a42a497a898e19fc76',1,'mimalloc-doc.h']]],
- ['mi_5fheap_5frezalloc_5faligned',['mi_heap_rezalloc_aligned',['../group__zeroinit.html#ga375fa8a611c51905e592d5d467c49664',1,'mimalloc-doc.h']]],
- ['mi_5fheap_5frezalloc_5faligned_5fat',['mi_heap_rezalloc_aligned_at',['../group__zeroinit.html#gac90da54fa7e5d10bdc97ce0b51dce2eb',1,'mimalloc-doc.h']]],
- ['mi_5fheap_5fset_5fdefault',['mi_heap_set_default',['../group__heap.html#gab8631ec88c8d26641b68b5d25dcd4422',1,'mimalloc-doc.h']]],
- ['mi_5fheap_5fstrdup',['mi_heap_strdup',['../group__heap.html#ga139d6b09dbf50c3c2523d0f4d1cfdeb5',1,'mimalloc-doc.h']]],
- ['mi_5fheap_5fstrndup',['mi_heap_strndup',['../group__heap.html#ga8e3dbd46650dd26573cf307a2c8f1f5a',1,'mimalloc-doc.h']]],
- ['mi_5fheap_5fvisit_5fblocks',['mi_heap_visit_blocks',['../group__analysis.html#ga70c46687dc6e9dc98b232b02646f8bed',1,'mimalloc-doc.h']]],
- ['mi_5fheap_5fzalloc',['mi_heap_zalloc',['../group__heap.html#ga903104592c8ed53417a3762da6241133',1,'mimalloc-doc.h']]],
- ['mi_5fheap_5fzalloc_5faligned',['mi_heap_zalloc_aligned',['../group__heap.html#gaa450a59c6c7ae5fdbd1c2b80a8329ef0',1,'mimalloc-doc.h']]],
- ['mi_5fheap_5fzalloc_5faligned_5fat',['mi_heap_zalloc_aligned_at',['../group__heap.html#ga45fb43a62776fbebbdf1edd99b527954',1,'mimalloc-doc.h']]],
- ['mi_5fis_5fin_5fheap_5fregion',['mi_is_in_heap_region',['../group__extended.html#ga5f071b10d4df1c3658e04e7fd67a94e6',1,'mimalloc-doc.h']]],
- ['mi_5fis_5fredirected',['mi_is_redirected',['../group__extended.html#gaad25050b19f30cd79397b227e0157a3f',1,'mimalloc-doc.h']]],
- ['mi_5fmalloc',['mi_malloc',['../group__malloc.html#ga3406e8b168bc74c8637b11571a6da83a',1,'mimalloc-doc.h']]],
- ['mi_5fmalloc_5faligned',['mi_malloc_aligned',['../group__aligned.html#ga68930196751fa2cca9e1fd0d71bade56',1,'mimalloc-doc.h']]],
- ['mi_5fmalloc_5faligned_5fat',['mi_malloc_aligned_at',['../group__aligned.html#ga5850da130c936bd77db039dcfbc8295d',1,'mimalloc-doc.h']]],
- ['mi_5fmalloc_5fsize',['mi_malloc_size',['../group__posix.html#ga4531c9e775bb3ae12db57c1ba8a5d7de',1,'mimalloc-doc.h']]],
- ['mi_5fmalloc_5fsmall',['mi_malloc_small',['../group__extended.html#ga7136c2e55cb22c98ecf95d08d6debb99',1,'mimalloc-doc.h']]],
- ['mi_5fmalloc_5fusable_5fsize',['mi_malloc_usable_size',['../group__posix.html#ga06d07cf357bbac5c73ba5d0c0c421e17',1,'mimalloc-doc.h']]],
- ['mi_5fmallocn',['mi_mallocn',['../group__malloc.html#ga0b05e2bf0f73e7401ae08597ff782ac6',1,'mimalloc-doc.h']]],
- ['mi_5fmemalign',['mi_memalign',['../group__posix.html#gaab7fa71ea93b96873f5d9883db57d40e',1,'mimalloc-doc.h']]],
- ['mi_5fnew',['mi_new',['../group__cpp.html#gaad048a9fce3d02c5909cd05c6ec24545',1,'mimalloc-doc.h']]],
- ['mi_5fnew_5faligned',['mi_new_aligned',['../group__cpp.html#gaef2c2bdb4f70857902d3c8903ac095f3',1,'mimalloc-doc.h']]],
- ['mi_5fnew_5faligned_5fnothrow',['mi_new_aligned_nothrow',['../group__cpp.html#gab5e29558926d934c3f1cae8c815f942c',1,'mimalloc-doc.h']]],
- ['mi_5fnew_5fn',['mi_new_n',['../group__cpp.html#gae7bc4f56cd57ed3359060ff4f38bda81',1,'mimalloc-doc.h']]],
- ['mi_5fnew_5fnothrow',['mi_new_nothrow',['../group__cpp.html#gaeaded64eda71ed6b1d569d3e723abc4a',1,'mimalloc-doc.h']]],
- ['mi_5fnew_5frealloc',['mi_new_realloc',['../group__cpp.html#gaab78a32f55149e9fbf432d5288e38e1e',1,'mimalloc-doc.h']]],
- ['mi_5fnew_5freallocn',['mi_new_reallocn',['../group__cpp.html#ga756f4b2bc6a7ecd0a90baea8e90c7907',1,'mimalloc-doc.h']]],
- ['mi_5foption_5fdisable',['mi_option_disable',['../group__options.html#gaebf6ff707a2e688ebb1a2296ca564054',1,'mimalloc-doc.h']]],
- ['mi_5foption_5fenable',['mi_option_enable',['../group__options.html#ga04180ae41b0d601421dd62ced40ca050',1,'mimalloc-doc.h']]],
- ['mi_5foption_5fget',['mi_option_get',['../group__options.html#ga7e8af195cc81d3fa64ccf2662caa565a',1,'mimalloc-doc.h']]],
- ['mi_5foption_5fis_5fenabled',['mi_option_is_enabled',['../group__options.html#ga459ad98f18b3fc9275474807fe0ca188',1,'mimalloc-doc.h']]],
- ['mi_5foption_5fset',['mi_option_set',['../group__options.html#gaf84921c32375e25754dc2ee6a911fa60',1,'mimalloc-doc.h']]],
- ['mi_5foption_5fset_5fdefault',['mi_option_set_default',['../group__options.html#ga7ef623e440e6e5545cb08c94e71e4b90',1,'mimalloc-doc.h']]],
- ['mi_5foption_5fset_5fenabled',['mi_option_set_enabled',['../group__options.html#ga9a13d05fcb77489cb06d4d017ebd8bed',1,'mimalloc-doc.h']]],
- ['mi_5foption_5fset_5fenabled_5fdefault',['mi_option_set_enabled_default',['../group__options.html#ga65518b69ec5d32336b50e07f74b3f629',1,'mimalloc-doc.h']]],
- ['mi_5fposix_5fmemalign',['mi_posix_memalign',['../group__posix.html#gacff84f226ba9feb2031b8992e5579447',1,'mimalloc-doc.h']]],
- ['mi_5fprocess_5finfo',['mi_process_info',['../group__extended.html#ga7d862c2affd5790381da14eb102a364d',1,'mimalloc-doc.h']]],
- ['mi_5fpvalloc',['mi_pvalloc',['../group__posix.html#gaeb325c39b887d3b90d85d1eb1712fb1e',1,'mimalloc-doc.h']]],
- ['mi_5frealloc',['mi_realloc',['../group__malloc.html#gaf11eb497da57bdfb2de65eb191c69db6',1,'mimalloc-doc.h']]],
- ['mi_5frealloc_5faligned',['mi_realloc_aligned',['../group__aligned.html#ga4028d1cf4aa4c87c880747044a8322ae',1,'mimalloc-doc.h']]],
- ['mi_5frealloc_5faligned_5fat',['mi_realloc_aligned_at',['../group__aligned.html#gaf66a9ae6c6f08bd6be6fb6ea771faffb',1,'mimalloc-doc.h']]],
- ['mi_5freallocarray',['mi_reallocarray',['../group__posix.html#ga48fad8648a2f1dab9c87ea9448a52088',1,'mimalloc-doc.h']]],
- ['mi_5freallocf',['mi_reallocf',['../group__malloc.html#gafe68ac7c5e24a65cd55c9d6b152211a0',1,'mimalloc-doc.h']]],
- ['mi_5freallocn',['mi_reallocn',['../group__malloc.html#ga61d57b4144ba24fba5c1e9b956d13853',1,'mimalloc-doc.h']]],
- ['mi_5frealpath',['mi_realpath',['../group__malloc.html#ga08cec32dd5bbe7da91c78d19f1b5bebe',1,'mimalloc-doc.h']]],
- ['mi_5frecalloc',['mi_recalloc',['../group__malloc.html#ga23a0fbb452b5dce8e31fab1a1958cacc',1,'mimalloc-doc.h']]],
- ['mi_5frecalloc_5faligned',['mi_recalloc_aligned',['../group__zeroinit.html#ga3e7e5c291acf1c7fd7ffd9914a9f945f',1,'mimalloc-doc.h']]],
- ['mi_5frecalloc_5faligned_5fat',['mi_recalloc_aligned_at',['../group__zeroinit.html#ga4ff5e92ad73585418a072c9d059e5cf9',1,'mimalloc-doc.h']]],
- ['mi_5fregister_5fdeferred_5ffree',['mi_register_deferred_free',['../group__extended.html#ga3460a6ca91af97be4058f523d3cb8ece',1,'mimalloc-doc.h']]],
- ['mi_5fregister_5ferror',['mi_register_error',['../group__extended.html#gaa1d55e0e894be240827e5d87ec3a1f45',1,'mimalloc-doc.h']]],
- ['mi_5fregister_5foutput',['mi_register_output',['../group__extended.html#gae5b17ff027cd2150b43a33040250cf3f',1,'mimalloc-doc.h']]],
- ['mi_5freserve_5fhuge_5fos_5fpages_5fat',['mi_reserve_huge_os_pages_at',['../group__extended.html#ga7795a13d20087447281858d2c771cca1',1,'mimalloc-doc.h']]],
- ['mi_5freserve_5fhuge_5fos_5fpages_5finterleave',['mi_reserve_huge_os_pages_interleave',['../group__extended.html#ga3132f521fb756fc0e8ec0b74fb58df50',1,'mimalloc-doc.h']]],
- ['mi_5frezalloc',['mi_rezalloc',['../group__zeroinit.html#ga8c292e142110229a2980b37ab036dbc6',1,'mimalloc-doc.h']]],
- ['mi_5frezalloc_5faligned',['mi_rezalloc_aligned',['../group__zeroinit.html#gacd71a7bce96aab38ae6de17af2eb2cf0',1,'mimalloc-doc.h']]],
- ['mi_5frezalloc_5faligned_5fat',['mi_rezalloc_aligned_at',['../group__zeroinit.html#gae8b358c417e61d5307da002702b0a8e1',1,'mimalloc-doc.h']]],
- ['mi_5fstats_5fmerge',['mi_stats_merge',['../group__extended.html#ga854b1de8cb067c7316286c28b2fcd3d1',1,'mimalloc-doc.h']]],
- ['mi_5fstats_5fprint',['mi_stats_print',['../group__extended.html#ga2d126e5c62d3badc35445e5d84166df2',1,'mimalloc-doc.h']]],
- ['mi_5fstats_5fprint_5fout',['mi_stats_print_out',['../group__extended.html#ga537f13b299ddf801e49a5a94fde02c79',1,'mimalloc-doc.h']]],
- ['mi_5fstats_5freset',['mi_stats_reset',['../group__extended.html#ga3bb8468b8cfcc6e2a61d98aee85c5f99',1,'mimalloc-doc.h']]],
- ['mi_5fstrdup',['mi_strdup',['../group__malloc.html#gac7cffe13f1f458ed16789488bf92b9b2',1,'mimalloc-doc.h']]],
- ['mi_5fstrndup',['mi_strndup',['../group__malloc.html#gaaabf971c2571891433477e2d21a35266',1,'mimalloc-doc.h']]],
- ['mi_5fthread_5fdone',['mi_thread_done',['../group__extended.html#ga0ae4581e85453456a0d658b2b98bf7bf',1,'mimalloc-doc.h']]],
- ['mi_5fthread_5finit',['mi_thread_init',['../group__extended.html#gaf8e73efc2cbca9ebfdfb166983a04c17',1,'mimalloc-doc.h']]],
- ['mi_5fthread_5fstats_5fprint_5fout',['mi_thread_stats_print_out',['../group__extended.html#gab1dac8476c46cb9eecab767eb40c1525',1,'mimalloc-doc.h']]],
- ['mi_5fusable_5fsize',['mi_usable_size',['../group__extended.html#ga089c859d9eddc5f9b4bd946cd53cebee',1,'mimalloc-doc.h']]],
- ['mi_5fvalloc',['mi_valloc',['../group__posix.html#ga73baaf5951f5165ba0763d0c06b6a93b',1,'mimalloc-doc.h']]],
- ['mi_5fzalloc',['mi_zalloc',['../group__malloc.html#gafdd9d8bb2986e668ba9884f28af38000',1,'mimalloc-doc.h']]],
- ['mi_5fzalloc_5faligned',['mi_zalloc_aligned',['../group__aligned.html#ga0cadbcf5b89a7b6fb171bc8df8734819',1,'mimalloc-doc.h']]],
- ['mi_5fzalloc_5faligned_5fat',['mi_zalloc_aligned_at',['../group__aligned.html#ga5f8c2353766db522565e642fafd8a3f8',1,'mimalloc-doc.h']]],
- ['mi_5fzalloc_5fsmall',['mi_zalloc_small',['../group__extended.html#ga220f29f40a44404b0061c15bc1c31152',1,'mimalloc-doc.h']]]
+ ['mi_5f_5fposix_5fmemalign_173',['mi__posix_memalign',['../group__posix.html#gad5a69c8fea96aa2b7a7c818c2130090a',1,'mimalloc-doc.h']]],
+ ['mi_5faligned_5falloc_174',['mi_aligned_alloc',['../group__posix.html#ga1326d2e4388630b5f81ca7206318b8e5',1,'mimalloc-doc.h']]],
+ ['mi_5fcalloc_175',['mi_calloc',['../group__malloc.html#ga97fedb4f7107c592fd7f0f0a8949a57d',1,'mimalloc-doc.h']]],
+ ['mi_5fcalloc_5faligned_176',['mi_calloc_aligned',['../group__aligned.html#ga53dddb4724042a90315b94bc268fb4c9',1,'mimalloc-doc.h']]],
+ ['mi_5fcalloc_5faligned_5fat_177',['mi_calloc_aligned_at',['../group__aligned.html#ga08647c4593f3b2eef24a919a73eba3a3',1,'mimalloc-doc.h']]],
+ ['mi_5fcfree_178',['mi_cfree',['../group__posix.html#ga705dc7a64bffacfeeb0141501a5c35d7',1,'mimalloc-doc.h']]],
+ ['mi_5fcheck_5fowned_179',['mi_check_owned',['../group__analysis.html#ga628c237489c2679af84a4d0d143b3dd5',1,'mimalloc-doc.h']]],
+ ['mi_5fcollect_180',['mi_collect',['../group__extended.html#ga421430e2226d7d468529cec457396756',1,'mimalloc-doc.h']]],
+ ['mi_5fexpand_181',['mi_expand',['../group__malloc.html#gaaee66a1d483c3e28f585525fb96707e4',1,'mimalloc-doc.h']]],
+ ['mi_5ffree_182',['mi_free',['../group__malloc.html#gaf2c7b89c327d1f60f59e68b9ea644d95',1,'mimalloc-doc.h']]],
+ ['mi_5ffree_5faligned_183',['mi_free_aligned',['../group__posix.html#ga0d28d5cf61e6bfbb18c63092939fe5c9',1,'mimalloc-doc.h']]],
+ ['mi_5ffree_5fsize_184',['mi_free_size',['../group__posix.html#gae01389eedab8d67341ff52e2aad80ebb',1,'mimalloc-doc.h']]],
+ ['mi_5ffree_5fsize_5faligned_185',['mi_free_size_aligned',['../group__posix.html#ga72e9d7ffb5fe94d69bc722c8506e27bc',1,'mimalloc-doc.h']]],
+ ['mi_5fgood_5fsize_186',['mi_good_size',['../group__extended.html#gac057927cd06c854b45fe7847e921bd47',1,'mimalloc-doc.h']]],
+ ['mi_5fheap_5fcalloc_187',['mi_heap_calloc',['../group__heap.html#gaa6702b3c48e9e53e50e81b36f5011d55',1,'mimalloc-doc.h']]],
+ ['mi_5fheap_5fcalloc_5faligned_188',['mi_heap_calloc_aligned',['../group__heap.html#ga4af03a6e2b93fae77424d93f889705c3',1,'mimalloc-doc.h']]],
+ ['mi_5fheap_5fcalloc_5faligned_5fat_189',['mi_heap_calloc_aligned_at',['../group__heap.html#ga08ca6419a5c057a4d965868998eef487',1,'mimalloc-doc.h']]],
+ ['mi_5fheap_5fcheck_5fowned_190',['mi_heap_check_owned',['../group__analysis.html#ga0d67c1789faaa15ff366c024fcaf6377',1,'mimalloc-doc.h']]],
+ ['mi_5fheap_5fcollect_191',['mi_heap_collect',['../group__heap.html#ga7922f7495cde30b1984d0e6072419298',1,'mimalloc-doc.h']]],
+ ['mi_5fheap_5fcontains_5fblock_192',['mi_heap_contains_block',['../group__analysis.html#gaa862aa8ed8d57d84cae41fc1022d71af',1,'mimalloc-doc.h']]],
+ ['mi_5fheap_5fdelete_193',['mi_heap_delete',['../group__heap.html#ga2ab1af8d438819b55319c7ef51d1e409',1,'mimalloc-doc.h']]],
+ ['mi_5fheap_5fdestroy_194',['mi_heap_destroy',['../group__heap.html#ga9f9c0844edb9717f4feacd79116b8e0d',1,'mimalloc-doc.h']]],
+ ['mi_5fheap_5fget_5fbacking_195',['mi_heap_get_backing',['../group__heap.html#ga5d03fbe062ffcf38f0f417fd968357fc',1,'mimalloc-doc.h']]],
+ ['mi_5fheap_5fget_5fdefault_196',['mi_heap_get_default',['../group__heap.html#ga8db4cbb87314a989a9a187464d6b5e05',1,'mimalloc-doc.h']]],
+ ['mi_5fheap_5fmalloc_197',['mi_heap_malloc',['../group__heap.html#ga9cbed01e42c0647907295de92c3fa296',1,'mimalloc-doc.h']]],
+ ['mi_5fheap_5fmalloc_5faligned_198',['mi_heap_malloc_aligned',['../group__heap.html#gab5b87e1805306f70df38789fcfcf6653',1,'mimalloc-doc.h']]],
+ ['mi_5fheap_5fmalloc_5faligned_5fat_199',['mi_heap_malloc_aligned_at',['../group__heap.html#ga23acd7680fb0976dde3783254c6c874b',1,'mimalloc-doc.h']]],
+ ['mi_5fheap_5fmalloc_5fsmall_200',['mi_heap_malloc_small',['../group__heap.html#gaa1a1c7a1f4da6826b5a25b70ef878368',1,'mimalloc-doc.h']]],
+ ['mi_5fheap_5fmallocn_201',['mi_heap_mallocn',['../group__heap.html#ga851da6c43fe0b71c1376cee8aef90db0',1,'mimalloc-doc.h']]],
+ ['mi_5fheap_5fnew_202',['mi_heap_new',['../group__heap.html#ga766f672ba56f2fbfeb9d9dbb0b7f6b11',1,'mimalloc-doc.h']]],
+ ['mi_5fheap_5frealloc_203',['mi_heap_realloc',['../group__heap.html#gaaef3395f66be48f37bdc8322509c5d81',1,'mimalloc-doc.h']]],
+ ['mi_5fheap_5frealloc_5faligned_204',['mi_heap_realloc_aligned',['../group__heap.html#gafc603b696bd14cae6da28658f950d98c',1,'mimalloc-doc.h']]],
+ ['mi_5fheap_5frealloc_5faligned_5fat_205',['mi_heap_realloc_aligned_at',['../group__heap.html#gaf96c788a1bf553fe2d371de9365e047c',1,'mimalloc-doc.h']]],
+ ['mi_5fheap_5freallocf_206',['mi_heap_reallocf',['../group__heap.html#ga4a21070eb4e7cce018133c8d5f4b0527',1,'mimalloc-doc.h']]],
+ ['mi_5fheap_5freallocn_207',['mi_heap_reallocn',['../group__heap.html#gac74e94ad9b0c9b57c1c4d88b8825b7a8',1,'mimalloc-doc.h']]],
+ ['mi_5fheap_5frealpath_208',['mi_heap_realpath',['../group__heap.html#ga00e95ba1e01acac3cfd95bb7a357a6f0',1,'mimalloc-doc.h']]],
+ ['mi_5fheap_5frecalloc_209',['mi_heap_recalloc',['../group__zeroinit.html#ga8648c5fbb22a80f0262859099f06dfbd',1,'mimalloc-doc.h']]],
+ ['mi_5fheap_5frecalloc_5faligned_210',['mi_heap_recalloc_aligned',['../group__zeroinit.html#ga9f3f999396c8f77ca5e80e7b40ac29e3',1,'mimalloc-doc.h']]],
+ ['mi_5fheap_5frecalloc_5faligned_5fat_211',['mi_heap_recalloc_aligned_at',['../group__zeroinit.html#ga496452c96f1de8c500be9fddf52edaf7',1,'mimalloc-doc.h']]],
+ ['mi_5fheap_5frezalloc_212',['mi_heap_rezalloc',['../group__zeroinit.html#gacfad83f14eb5d6a42a497a898e19fc76',1,'mimalloc-doc.h']]],
+ ['mi_5fheap_5frezalloc_5faligned_213',['mi_heap_rezalloc_aligned',['../group__zeroinit.html#ga375fa8a611c51905e592d5d467c49664',1,'mimalloc-doc.h']]],
+ ['mi_5fheap_5frezalloc_5faligned_5fat_214',['mi_heap_rezalloc_aligned_at',['../group__zeroinit.html#gac90da54fa7e5d10bdc97ce0b51dce2eb',1,'mimalloc-doc.h']]],
+ ['mi_5fheap_5fset_5fdefault_215',['mi_heap_set_default',['../group__heap.html#gab8631ec88c8d26641b68b5d25dcd4422',1,'mimalloc-doc.h']]],
+ ['mi_5fheap_5fstrdup_216',['mi_heap_strdup',['../group__heap.html#ga139d6b09dbf50c3c2523d0f4d1cfdeb5',1,'mimalloc-doc.h']]],
+ ['mi_5fheap_5fstrndup_217',['mi_heap_strndup',['../group__heap.html#ga8e3dbd46650dd26573cf307a2c8f1f5a',1,'mimalloc-doc.h']]],
+ ['mi_5fheap_5fvisit_5fblocks_218',['mi_heap_visit_blocks',['../group__analysis.html#ga70c46687dc6e9dc98b232b02646f8bed',1,'mimalloc-doc.h']]],
+ ['mi_5fheap_5fzalloc_219',['mi_heap_zalloc',['../group__heap.html#ga903104592c8ed53417a3762da6241133',1,'mimalloc-doc.h']]],
+ ['mi_5fheap_5fzalloc_5faligned_220',['mi_heap_zalloc_aligned',['../group__heap.html#gaa450a59c6c7ae5fdbd1c2b80a8329ef0',1,'mimalloc-doc.h']]],
+ ['mi_5fheap_5fzalloc_5faligned_5fat_221',['mi_heap_zalloc_aligned_at',['../group__heap.html#ga45fb43a62776fbebbdf1edd99b527954',1,'mimalloc-doc.h']]],
+ ['mi_5fis_5fin_5fheap_5fregion_222',['mi_is_in_heap_region',['../group__extended.html#ga5f071b10d4df1c3658e04e7fd67a94e6',1,'mimalloc-doc.h']]],
+ ['mi_5fis_5fredirected_223',['mi_is_redirected',['../group__extended.html#gaad25050b19f30cd79397b227e0157a3f',1,'mimalloc-doc.h']]],
+ ['mi_5fmalloc_224',['mi_malloc',['../group__malloc.html#ga3406e8b168bc74c8637b11571a6da83a',1,'mimalloc-doc.h']]],
+ ['mi_5fmalloc_5faligned_225',['mi_malloc_aligned',['../group__aligned.html#ga68930196751fa2cca9e1fd0d71bade56',1,'mimalloc-doc.h']]],
+ ['mi_5fmalloc_5faligned_5fat_226',['mi_malloc_aligned_at',['../group__aligned.html#ga5850da130c936bd77db039dcfbc8295d',1,'mimalloc-doc.h']]],
+ ['mi_5fmalloc_5fsize_227',['mi_malloc_size',['../group__posix.html#ga4531c9e775bb3ae12db57c1ba8a5d7de',1,'mimalloc-doc.h']]],
+ ['mi_5fmalloc_5fsmall_228',['mi_malloc_small',['../group__extended.html#ga7136c2e55cb22c98ecf95d08d6debb99',1,'mimalloc-doc.h']]],
+ ['mi_5fmalloc_5fusable_5fsize_229',['mi_malloc_usable_size',['../group__posix.html#ga06d07cf357bbac5c73ba5d0c0c421e17',1,'mimalloc-doc.h']]],
+ ['mi_5fmallocn_230',['mi_mallocn',['../group__malloc.html#ga0b05e2bf0f73e7401ae08597ff782ac6',1,'mimalloc-doc.h']]],
+ ['mi_5fmanage_5fos_5fmemory_231',['mi_manage_os_memory',['../group__extended.html#ga4c6486a1fdcd7a423b5f25fe4be8e0cf',1,'mimalloc-doc.h']]],
+ ['mi_5fmemalign_232',['mi_memalign',['../group__posix.html#gaab7fa71ea93b96873f5d9883db57d40e',1,'mimalloc-doc.h']]],
+ ['mi_5fnew_233',['mi_new',['../group__cpp.html#gaad048a9fce3d02c5909cd05c6ec24545',1,'mimalloc-doc.h']]],
+ ['mi_5fnew_5faligned_234',['mi_new_aligned',['../group__cpp.html#gaef2c2bdb4f70857902d3c8903ac095f3',1,'mimalloc-doc.h']]],
+ ['mi_5fnew_5faligned_5fnothrow_235',['mi_new_aligned_nothrow',['../group__cpp.html#gab5e29558926d934c3f1cae8c815f942c',1,'mimalloc-doc.h']]],
+ ['mi_5fnew_5fn_236',['mi_new_n',['../group__cpp.html#gae7bc4f56cd57ed3359060ff4f38bda81',1,'mimalloc-doc.h']]],
+ ['mi_5fnew_5fnothrow_237',['mi_new_nothrow',['../group__cpp.html#gaeaded64eda71ed6b1d569d3e723abc4a',1,'mimalloc-doc.h']]],
+ ['mi_5fnew_5frealloc_238',['mi_new_realloc',['../group__cpp.html#gaab78a32f55149e9fbf432d5288e38e1e',1,'mimalloc-doc.h']]],
+ ['mi_5fnew_5freallocn_239',['mi_new_reallocn',['../group__cpp.html#ga756f4b2bc6a7ecd0a90baea8e90c7907',1,'mimalloc-doc.h']]],
+ ['mi_5foption_5fdisable_240',['mi_option_disable',['../group__options.html#gaebf6ff707a2e688ebb1a2296ca564054',1,'mimalloc-doc.h']]],
+ ['mi_5foption_5fenable_241',['mi_option_enable',['../group__options.html#ga04180ae41b0d601421dd62ced40ca050',1,'mimalloc-doc.h']]],
+ ['mi_5foption_5fget_242',['mi_option_get',['../group__options.html#ga7e8af195cc81d3fa64ccf2662caa565a',1,'mimalloc-doc.h']]],
+ ['mi_5foption_5fis_5fenabled_243',['mi_option_is_enabled',['../group__options.html#ga459ad98f18b3fc9275474807fe0ca188',1,'mimalloc-doc.h']]],
+ ['mi_5foption_5fset_244',['mi_option_set',['../group__options.html#gaf84921c32375e25754dc2ee6a911fa60',1,'mimalloc-doc.h']]],
+ ['mi_5foption_5fset_5fdefault_245',['mi_option_set_default',['../group__options.html#ga7ef623e440e6e5545cb08c94e71e4b90',1,'mimalloc-doc.h']]],
+ ['mi_5foption_5fset_5fenabled_246',['mi_option_set_enabled',['../group__options.html#ga9a13d05fcb77489cb06d4d017ebd8bed',1,'mimalloc-doc.h']]],
+ ['mi_5foption_5fset_5fenabled_5fdefault_247',['mi_option_set_enabled_default',['../group__options.html#ga65518b69ec5d32336b50e07f74b3f629',1,'mimalloc-doc.h']]],
+ ['mi_5fposix_5fmemalign_248',['mi_posix_memalign',['../group__posix.html#gacff84f226ba9feb2031b8992e5579447',1,'mimalloc-doc.h']]],
+ ['mi_5fprocess_5finfo_249',['mi_process_info',['../group__extended.html#ga7d862c2affd5790381da14eb102a364d',1,'mimalloc-doc.h']]],
+ ['mi_5fpvalloc_250',['mi_pvalloc',['../group__posix.html#gaeb325c39b887d3b90d85d1eb1712fb1e',1,'mimalloc-doc.h']]],
+ ['mi_5frealloc_251',['mi_realloc',['../group__malloc.html#gaf11eb497da57bdfb2de65eb191c69db6',1,'mimalloc-doc.h']]],
+ ['mi_5frealloc_5faligned_252',['mi_realloc_aligned',['../group__aligned.html#ga4028d1cf4aa4c87c880747044a8322ae',1,'mimalloc-doc.h']]],
+ ['mi_5frealloc_5faligned_5fat_253',['mi_realloc_aligned_at',['../group__aligned.html#gaf66a9ae6c6f08bd6be6fb6ea771faffb',1,'mimalloc-doc.h']]],
+ ['mi_5freallocarr_254',['mi_reallocarr',['../group__posix.html#ga7e1934d60a3e697950eeb48e042bfad5',1,'mimalloc-doc.h']]],
+ ['mi_5freallocarray_255',['mi_reallocarray',['../group__posix.html#ga48fad8648a2f1dab9c87ea9448a52088',1,'mimalloc-doc.h']]],
+ ['mi_5freallocf_256',['mi_reallocf',['../group__malloc.html#gafe68ac7c5e24a65cd55c9d6b152211a0',1,'mimalloc-doc.h']]],
+ ['mi_5freallocn_257',['mi_reallocn',['../group__malloc.html#ga61d57b4144ba24fba5c1e9b956d13853',1,'mimalloc-doc.h']]],
+ ['mi_5frealpath_258',['mi_realpath',['../group__malloc.html#ga08cec32dd5bbe7da91c78d19f1b5bebe',1,'mimalloc-doc.h']]],
+ ['mi_5frecalloc_259',['mi_recalloc',['../group__malloc.html#ga23a0fbb452b5dce8e31fab1a1958cacc',1,'mimalloc-doc.h']]],
+ ['mi_5frecalloc_5faligned_260',['mi_recalloc_aligned',['../group__zeroinit.html#ga3e7e5c291acf1c7fd7ffd9914a9f945f',1,'mimalloc-doc.h']]],
+ ['mi_5frecalloc_5faligned_5fat_261',['mi_recalloc_aligned_at',['../group__zeroinit.html#ga4ff5e92ad73585418a072c9d059e5cf9',1,'mimalloc-doc.h']]],
+ ['mi_5fregister_5fdeferred_5ffree_262',['mi_register_deferred_free',['../group__extended.html#ga3460a6ca91af97be4058f523d3cb8ece',1,'mimalloc-doc.h']]],
+ ['mi_5fregister_5ferror_263',['mi_register_error',['../group__extended.html#gaa1d55e0e894be240827e5d87ec3a1f45',1,'mimalloc-doc.h']]],
+ ['mi_5fregister_5foutput_264',['mi_register_output',['../group__extended.html#gae5b17ff027cd2150b43a33040250cf3f',1,'mimalloc-doc.h']]],
+ ['mi_5freserve_5fhuge_5fos_5fpages_5fat_265',['mi_reserve_huge_os_pages_at',['../group__extended.html#ga7795a13d20087447281858d2c771cca1',1,'mimalloc-doc.h']]],
+ ['mi_5freserve_5fhuge_5fos_5fpages_5finterleave_266',['mi_reserve_huge_os_pages_interleave',['../group__extended.html#ga3132f521fb756fc0e8ec0b74fb58df50',1,'mimalloc-doc.h']]],
+ ['mi_5freserve_5fos_5fmemory_267',['mi_reserve_os_memory',['../group__extended.html#ga00ec3324b6b2591c7fe3677baa30a767',1,'mimalloc-doc.h']]],
+ ['mi_5frezalloc_268',['mi_rezalloc',['../group__zeroinit.html#ga8c292e142110229a2980b37ab036dbc6',1,'mimalloc-doc.h']]],
+ ['mi_5frezalloc_5faligned_269',['mi_rezalloc_aligned',['../group__zeroinit.html#gacd71a7bce96aab38ae6de17af2eb2cf0',1,'mimalloc-doc.h']]],
+ ['mi_5frezalloc_5faligned_5fat_270',['mi_rezalloc_aligned_at',['../group__zeroinit.html#gae8b358c417e61d5307da002702b0a8e1',1,'mimalloc-doc.h']]],
+ ['mi_5fstats_5fmerge_271',['mi_stats_merge',['../group__extended.html#ga854b1de8cb067c7316286c28b2fcd3d1',1,'mimalloc-doc.h']]],
+ ['mi_5fstats_5fprint_272',['mi_stats_print',['../group__extended.html#ga2d126e5c62d3badc35445e5d84166df2',1,'mimalloc-doc.h']]],
+ ['mi_5fstats_5fprint_5fout_273',['mi_stats_print_out',['../group__extended.html#ga537f13b299ddf801e49a5a94fde02c79',1,'mimalloc-doc.h']]],
+ ['mi_5fstats_5freset_274',['mi_stats_reset',['../group__extended.html#ga3bb8468b8cfcc6e2a61d98aee85c5f99',1,'mimalloc-doc.h']]],
+ ['mi_5fstrdup_275',['mi_strdup',['../group__malloc.html#gac7cffe13f1f458ed16789488bf92b9b2',1,'mimalloc-doc.h']]],
+ ['mi_5fstrndup_276',['mi_strndup',['../group__malloc.html#gaaabf971c2571891433477e2d21a35266',1,'mimalloc-doc.h']]],
+ ['mi_5fthread_5fdone_277',['mi_thread_done',['../group__extended.html#ga0ae4581e85453456a0d658b2b98bf7bf',1,'mimalloc-doc.h']]],
+ ['mi_5fthread_5finit_278',['mi_thread_init',['../group__extended.html#gaf8e73efc2cbca9ebfdfb166983a04c17',1,'mimalloc-doc.h']]],
+ ['mi_5fthread_5fstats_5fprint_5fout_279',['mi_thread_stats_print_out',['../group__extended.html#gab1dac8476c46cb9eecab767eb40c1525',1,'mimalloc-doc.h']]],
+ ['mi_5fusable_5fsize_280',['mi_usable_size',['../group__extended.html#ga089c859d9eddc5f9b4bd946cd53cebee',1,'mimalloc-doc.h']]],
+ ['mi_5fvalloc_281',['mi_valloc',['../group__posix.html#ga73baaf5951f5165ba0763d0c06b6a93b',1,'mimalloc-doc.h']]],
+ ['mi_5fzalloc_282',['mi_zalloc',['../group__malloc.html#gafdd9d8bb2986e668ba9884f28af38000',1,'mimalloc-doc.h']]],
+ ['mi_5fzalloc_5faligned_283',['mi_zalloc_aligned',['../group__aligned.html#ga0cadbcf5b89a7b6fb171bc8df8734819',1,'mimalloc-doc.h']]],
+ ['mi_5fzalloc_5faligned_5fat_284',['mi_zalloc_aligned_at',['../group__aligned.html#ga5f8c2353766db522565e642fafd8a3f8',1,'mimalloc-doc.h']]],
+ ['mi_5fzalloc_5fsmall_285',['mi_zalloc_small',['../group__extended.html#ga220f29f40a44404b0061c15bc1c31152',1,'mimalloc-doc.h']]]
];
diff --git a/docs/search/groups_0.html b/docs/search/groups_0.html
index 194bb7b..c600b49 100644
--- a/docs/search/groups_0.html
+++ b/docs/search/groups_0.html
@@ -1,7 +1,8 @@
-
+
+
-
+
@@ -10,21 +11,27 @@
Loading...
-
+
Searching...
No Matches
-
+
diff --git a/docs/search/groups_0.js b/docs/search/groups_0.js
index bbb4b54..0ed99b8 100644
--- a/docs/search/groups_0.js
+++ b/docs/search/groups_0.js
@@ -1,4 +1,4 @@
var searchData=
[
- ['aligned_20allocation',['Aligned Allocation',['../group__aligned.html',1,'']]]
+ ['aligned_20allocation_314',['Aligned Allocation',['../group__aligned.html',1,'']]]
];
diff --git a/docs/search/groups_1.html b/docs/search/groups_1.html
index ed9b5c6..2eb3550 100644
--- a/docs/search/groups_1.html
+++ b/docs/search/groups_1.html
@@ -1,7 +1,8 @@
-
+
+
-
+
@@ -10,21 +11,27 @@
Loading...
-
+
Searching...
No Matches
-
+
diff --git a/docs/search/groups_1.js b/docs/search/groups_1.js
index b258fac..f27c584 100644
--- a/docs/search/groups_1.js
+++ b/docs/search/groups_1.js
@@ -1,4 +1,4 @@
var searchData=
[
- ['basic_20allocation',['Basic Allocation',['../group__malloc.html',1,'']]]
+ ['basic_20allocation_315',['Basic Allocation',['../group__malloc.html',1,'']]]
];
diff --git a/docs/search/groups_2.html b/docs/search/groups_2.html
index 17d4e06..12f4af7 100644
--- a/docs/search/groups_2.html
+++ b/docs/search/groups_2.html
@@ -1,7 +1,8 @@
-
+
+
-
+
@@ -10,21 +11,27 @@
Loading...
-
+
Searching...
No Matches
-
+
diff --git a/docs/search/groups_2.js b/docs/search/groups_2.js
index 2918576..6da64b6 100644
--- a/docs/search/groups_2.js
+++ b/docs/search/groups_2.js
@@ -1,4 +1,4 @@
var searchData=
[
- ['c_2b_2b_20wrappers',['C++ wrappers',['../group__cpp.html',1,'']]]
+ ['c_2b_2b_20wrappers_316',['C++ wrappers',['../group__cpp.html',1,'']]]
];
diff --git a/docs/search/groups_3.html b/docs/search/groups_3.html
index 7d4a624..5e235b5 100644
--- a/docs/search/groups_3.html
+++ b/docs/search/groups_3.html
@@ -1,7 +1,8 @@
-
+
+
-
+
@@ -10,21 +11,27 @@
Loading...
-
+
Searching...
No Matches
-
+
diff --git a/docs/search/groups_3.js b/docs/search/groups_3.js
index 68c73db..cdfbe64 100644
--- a/docs/search/groups_3.js
+++ b/docs/search/groups_3.js
@@ -1,4 +1,4 @@
var searchData=
[
- ['extended_20functions',['Extended Functions',['../group__extended.html',1,'']]]
+ ['extended_20functions_317',['Extended Functions',['../group__extended.html',1,'']]]
];
diff --git a/docs/search/groups_4.html b/docs/search/groups_4.html
index 5e5ae2a..99405e1 100644
--- a/docs/search/groups_4.html
+++ b/docs/search/groups_4.html
@@ -1,7 +1,8 @@
-
+
+
-
+
@@ -10,21 +11,27 @@
Loading...
-
+
Searching...
No Matches
-
+
diff --git a/docs/search/groups_4.js b/docs/search/groups_4.js
index e7e4093..687f1ea 100644
--- a/docs/search/groups_4.js
+++ b/docs/search/groups_4.js
@@ -1,5 +1,5 @@
var searchData=
[
- ['heap_20introspection',['Heap Introspection',['../group__analysis.html',1,'']]],
- ['heap_20allocation',['Heap Allocation',['../group__heap.html',1,'']]]
+ ['heap_20allocation_318',['Heap Allocation',['../group__heap.html',1,'']]],
+ ['heap_20introspection_319',['Heap Introspection',['../group__analysis.html',1,'']]]
];
diff --git a/docs/search/groups_5.html b/docs/search/groups_5.html
index fbd1460..583f5f5 100644
--- a/docs/search/groups_5.html
+++ b/docs/search/groups_5.html
@@ -1,7 +1,8 @@
-
+
+
-
+
@@ -10,21 +11,27 @@
Loading...
-
+
Searching...
No Matches
-
+
diff --git a/docs/search/groups_5.js b/docs/search/groups_5.js
index 4f00568..43c8b1f 100644
--- a/docs/search/groups_5.js
+++ b/docs/search/groups_5.js
@@ -1,4 +1,4 @@
var searchData=
[
- ['posix',['Posix',['../group__posix.html',1,'']]]
+ ['posix_320',['Posix',['../group__posix.html',1,'']]]
];
diff --git a/docs/search/groups_6.html b/docs/search/groups_6.html
index 277d80e..df6a310 100644
--- a/docs/search/groups_6.html
+++ b/docs/search/groups_6.html
@@ -1,7 +1,8 @@
-
+
+
-
+
@@ -10,21 +11,27 @@
Loading...
-
+
Searching...
No Matches
-
+
diff --git a/docs/search/groups_6.js b/docs/search/groups_6.js
index 2533cb9..3463187 100644
--- a/docs/search/groups_6.js
+++ b/docs/search/groups_6.js
@@ -1,4 +1,4 @@
var searchData=
[
- ['runtime_20options',['Runtime Options',['../group__options.html',1,'']]]
+ ['runtime_20options_321',['Runtime Options',['../group__options.html',1,'']]]
];
diff --git a/docs/search/groups_7.html b/docs/search/groups_7.html
index 6a24e7c..8964e05 100644
--- a/docs/search/groups_7.html
+++ b/docs/search/groups_7.html
@@ -1,7 +1,8 @@
-
+
+
-
+
@@ -10,21 +11,27 @@
Loading...
-
+
Searching...
No Matches
-
+
diff --git a/docs/search/groups_7.js b/docs/search/groups_7.js
index 647887f..aa150e9 100644
--- a/docs/search/groups_7.js
+++ b/docs/search/groups_7.js
@@ -1,4 +1,4 @@
var searchData=
[
- ['typed_20macros',['Typed Macros',['../group__typed.html',1,'']]]
+ ['typed_20macros_322',['Typed Macros',['../group__typed.html',1,'']]]
];
diff --git a/docs/search/groups_8.html b/docs/search/groups_8.html
index 81ac950..7987ca3 100644
--- a/docs/search/groups_8.html
+++ b/docs/search/groups_8.html
@@ -1,7 +1,8 @@
-
+
+
-
+
@@ -10,21 +11,27 @@
Loading...
-
+
Searching...
No Matches
-
+
diff --git a/docs/search/groups_8.js b/docs/search/groups_8.js
index 2b9b4ce..f9c29fe 100644
--- a/docs/search/groups_8.js
+++ b/docs/search/groups_8.js
@@ -1,4 +1,4 @@
var searchData=
[
- ['zero_20initialized_20re_2dallocation',['Zero initialized re-allocation',['../group__zeroinit.html',1,'']]]
+ ['zero_20initialized_20re_2dallocation_323',['Zero initialized re-allocation',['../group__zeroinit.html',1,'']]]
];
diff --git a/docs/search/nomatches.html b/docs/search/nomatches.html
index 4377320..2b9360b 100644
--- a/docs/search/nomatches.html
+++ b/docs/search/nomatches.html
@@ -1,5 +1,6 @@
-
+
+
diff --git a/docs/search/pages_0.html b/docs/search/pages_0.html
index 3d06b05..8517b48 100644
--- a/docs/search/pages_0.html
+++ b/docs/search/pages_0.html
@@ -1,7 +1,8 @@
-
+
+
-
+
@@ -10,21 +11,27 @@
Loading...
-
+
Searching...
No Matches
-
+
diff --git a/docs/search/pages_0.js b/docs/search/pages_0.js
index 33f3d05..07922da 100644
--- a/docs/search/pages_0.js
+++ b/docs/search/pages_0.js
@@ -1,4 +1,4 @@
var searchData=
[
- ['building',['Building',['../build.html',1,'']]]
+ ['building_324',['Building',['../build.html',1,'']]]
];
diff --git a/docs/search/pages_1.html b/docs/search/pages_1.html
index 06f1e40..a0fb679 100644
--- a/docs/search/pages_1.html
+++ b/docs/search/pages_1.html
@@ -1,7 +1,8 @@
-
+
+
-
+
@@ -10,21 +11,27 @@
Loading...
-
+
Searching...
No Matches
-
+
diff --git a/docs/search/pages_1.js b/docs/search/pages_1.js
index 0f2757c..6433dae 100644
--- a/docs/search/pages_1.js
+++ b/docs/search/pages_1.js
@@ -1,4 +1,4 @@
var searchData=
[
- ['environment_20options',['Environment Options',['../environment.html',1,'']]]
+ ['environment_20options_325',['Environment Options',['../environment.html',1,'']]]
];
diff --git a/docs/search/pages_2.html b/docs/search/pages_2.html
index 703f781..084edfd 100644
--- a/docs/search/pages_2.html
+++ b/docs/search/pages_2.html
@@ -1,7 +1,8 @@
-
+
+
-
+
@@ -10,21 +11,27 @@
Loading...
-
+
Searching...
No Matches
-
+
diff --git a/docs/search/pages_2.js b/docs/search/pages_2.js
index df03c8d..7577377 100644
--- a/docs/search/pages_2.js
+++ b/docs/search/pages_2.js
@@ -1,4 +1,4 @@
var searchData=
[
- ['overriding_20malloc',['Overriding Malloc',['../overrides.html',1,'']]]
+ ['overriding_20malloc_326',['Overriding Malloc',['../overrides.html',1,'']]]
];
diff --git a/docs/search/pages_3.html b/docs/search/pages_3.html
index 299228a..c0b45b0 100644
--- a/docs/search/pages_3.html
+++ b/docs/search/pages_3.html
@@ -1,7 +1,8 @@
-
+
+
-
+
@@ -10,21 +11,27 @@
Loading...
-
+
Searching...
No Matches
-
+
diff --git a/docs/search/pages_3.js b/docs/search/pages_3.js
index d745403..d62a3cf 100644
--- a/docs/search/pages_3.js
+++ b/docs/search/pages_3.js
@@ -1,4 +1,4 @@
var searchData=
[
- ['performance',['Performance',['../bench.html',1,'']]]
+ ['performance_327',['Performance',['../bench.html',1,'']]]
];
diff --git a/docs/search/pages_4.html b/docs/search/pages_4.html
index 021d277..0f05c2e 100644
--- a/docs/search/pages_4.html
+++ b/docs/search/pages_4.html
@@ -1,7 +1,8 @@
-
+
+
-
+
@@ -10,21 +11,27 @@
Loading...
-
+
Searching...
No Matches
-
+
diff --git a/docs/search/pages_4.js b/docs/search/pages_4.js
index b47682a..4e4e64d 100644
--- a/docs/search/pages_4.js
+++ b/docs/search/pages_4.js
@@ -1,4 +1,4 @@
var searchData=
[
- ['using_20the_20library',['Using the library',['../using.html',1,'']]]
+ ['using_20the_20library_328',['Using the library',['../using.html',1,'']]]
];
diff --git a/docs/search/search.css b/docs/search/search.css
index 10bd4b5..d30e027 100644
--- a/docs/search/search.css
+++ b/docs/search/search.css
@@ -131,7 +131,7 @@ span.SelectionMark {
a.SelectItem {
display: block;
outline-style: none;
- color: #000000;
+ color: #000000;
text-decoration: none;
padding-left: 6px;
padding-right: 12px;
@@ -139,7 +139,7 @@ a.SelectItem {
a.SelectItem:focus,
a.SelectItem:active {
- color: #000000;
+ color: #000000;
outline-style: none;
text-decoration: none;
}
@@ -173,7 +173,7 @@ iframe#MSearchResults {
#SRIndex {
- clear:both;
+ clear:both;
padding-bottom: 15px;
}
@@ -192,7 +192,7 @@ body.SRPage {
}
.SRChildren {
- padding-left: 3ex; padding-bottom: .5em
+ padding-left: 3ex; padding-bottom: .5em
}
.SRPage .SRChildren {
@@ -200,7 +200,7 @@ body.SRPage {
}
.SRSymbol {
- font-weight: bold;
+ font-weight: bold;
color: #121414;
font-family: Arial, Verdana, sans-serif;
text-decoration: none;
@@ -209,7 +209,7 @@ body.SRPage {
a.SRScope {
display: block;
- color: #121414;
+ color: #121414;
font-family: Arial, Verdana, sans-serif;
text-decoration: none;
outline: none;
@@ -270,4 +270,3 @@ DIV.searchresults {
.searchpages {
margin-top: 10px;
}
-
diff --git a/docs/search/search.js b/docs/search/search.js
index a554ab9..fb226f7 100644
--- a/docs/search/search.js
+++ b/docs/search/search.js
@@ -1,25 +1,26 @@
/*
- @licstart The following is the entire license notice for the
- JavaScript code in this file.
+ @licstart The following is the entire license notice for the JavaScript code in this file.
- Copyright (C) 1997-2017 by Dimitri van Heesch
+ The MIT License (MIT)
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
+ Copyright (C) 1997-2020 by Dimitri van Heesch
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
+ Permission is hereby granted, free of charge, to any person obtaining a copy of this software
+ and associated documentation files (the "Software"), to deal in the Software without restriction,
+ including without limitation the rights to use, copy, modify, merge, publish, distribute,
+ sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is
+ furnished to do so, subject to the following conditions:
- You should have received a copy of the GNU General Public License along
- with this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ The above copyright notice and this permission notice shall be included in all copies or
+ substantial portions of the Software.
- @licend The above is the entire license notice
- for the JavaScript code in this file
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+ BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
+ DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+ @licend The above is the entire license notice for the JavaScript code in this file
*/
function convertToId(search)
{
@@ -79,9 +80,10 @@ function getYPos(item)
storing this instance. Is needed to be able to set timeouts.
resultPath - path to use for external files
*/
-function SearchBox(name, resultsPath, inFrame, label)
+function SearchBox(name, resultsPath, inFrame, label, extension)
{
if (!name || !resultsPath) { alert("Missing parameters to SearchBox."); }
+ if (!extension || extension == "") { extension = ".html"; }
// ---------- Instance variables
this.name = name;
@@ -96,6 +98,7 @@ function SearchBox(name, resultsPath, inFrame, label)
this.searchActive = false;
this.insideFrame = inFrame;
this.searchLabel = label;
+ this.extension = extension;
// ----------- DOM Elements
@@ -200,10 +203,9 @@ function SearchBox(name, resultsPath, inFrame, label)
}
return;
}
- else if (window.frames.MSearchResults.searchResults)
+ else
{
- var elem = window.frames.MSearchResults.searchResults.NavNext(0);
- if (elem) elem.focus();
+ window.frames.MSearchResults.postMessage("take_focus", "*");
}
}
else if (e.keyCode==27) // Escape out of the search field
@@ -347,13 +349,13 @@ function SearchBox(name, resultsPath, inFrame, label)
if (idx!=-1)
{
var hexCode=idx.toString(16);
- resultsPage = this.resultsPath + '/' + indexSectionNames[this.searchIndex] + '_' + hexCode + '.html';
+ resultsPage = this.resultsPath + '/' + indexSectionNames[this.searchIndex] + '_' + hexCode + this.extension;
resultsPageWithSearch = resultsPage+'?'+escape(searchValue);
hasResultsPage = true;
}
else // nothing available for this search term
{
- resultsPage = this.resultsPath + '/nomatches.html';
+ resultsPage = this.resultsPath + '/nomatches' + this.extension;
resultsPageWithSearch = resultsPage;
hasResultsPage = false;
}
@@ -364,7 +366,7 @@ function SearchBox(name, resultsPath, inFrame, label)
if (domPopupSearchResultsWindow.style.display!='block')
{
var domSearchBox = this.DOMSearchBox();
- this.DOMSearchClose().style.display = 'inline';
+ this.DOMSearchClose().style.display = 'inline-block';
if (this.insideFrame)
{
var domPopupSearchResults = this.DOMPopupSearchResults();
@@ -439,12 +441,12 @@ function SearchResults(name)
while (element && element!=parentElement)
{
- if (element.nodeName == 'DIV' && element.className == 'SRChildren')
+ if (element.nodeName.toLowerCase() == 'div' && element.className == 'SRChildren')
{
return element;
}
- if (element.nodeName == 'DIV' && element.hasChildNodes())
+ if (element.nodeName.toLowerCase() == 'div' && element.hasChildNodes())
{
element = element.firstChild;
}
diff --git a/docs/search/searchdata.js b/docs/search/searchdata.js
index dd31068..c4c02fa 100644
--- a/docs/search/searchdata.js
+++ b/docs/search/searchdata.js
@@ -36,4 +36,3 @@ var indexSectionLabels =
7: "Modules",
8: "Pages"
};
-
diff --git a/docs/search/typedefs_0.html b/docs/search/typedefs_0.html
index 3848b20..a4684c4 100644
--- a/docs/search/typedefs_0.html
+++ b/docs/search/typedefs_0.html
@@ -1,7 +1,8 @@
-
+
+
-
+
@@ -10,21 +11,27 @@
Loading...
-
+
Searching...
No Matches
-
+
diff --git a/docs/search/typedefs_0.js b/docs/search/typedefs_0.js
index 44a0a6c..ac67dcd 100644
--- a/docs/search/typedefs_0.js
+++ b/docs/search/typedefs_0.js
@@ -1,8 +1,8 @@
var searchData=
[
- ['mi_5fblock_5fvisit_5ffun',['mi_block_visit_fun',['../group__analysis.html#gadfa01e2900f0e5d515ad5506b26f6d65',1,'mimalloc-doc.h']]],
- ['mi_5fdeferred_5ffree_5ffun',['mi_deferred_free_fun',['../group__extended.html#ga299dae78d25ce112e384a98b7309c5be',1,'mimalloc-doc.h']]],
- ['mi_5ferror_5ffun',['mi_error_fun',['../group__extended.html#ga251d369cda3f1c2a955c555486ed90e5',1,'mimalloc-doc.h']]],
- ['mi_5fheap_5ft',['mi_heap_t',['../group__heap.html#ga34a47cde5a5b38c29f1aa3c5e76943c2',1,'mimalloc-doc.h']]],
- ['mi_5foutput_5ffun',['mi_output_fun',['../group__extended.html#gad823d23444a4b77a40f66bf075a98a0c',1,'mimalloc-doc.h']]]
+ ['mi_5fblock_5fvisit_5ffun_291',['mi_block_visit_fun',['../group__analysis.html#gadfa01e2900f0e5d515ad5506b26f6d65',1,'mimalloc-doc.h']]],
+ ['mi_5fdeferred_5ffree_5ffun_292',['mi_deferred_free_fun',['../group__extended.html#ga299dae78d25ce112e384a98b7309c5be',1,'mimalloc-doc.h']]],
+ ['mi_5ferror_5ffun_293',['mi_error_fun',['../group__extended.html#ga251d369cda3f1c2a955c555486ed90e5',1,'mimalloc-doc.h']]],
+ ['mi_5fheap_5ft_294',['mi_heap_t',['../group__heap.html#ga34a47cde5a5b38c29f1aa3c5e76943c2',1,'mimalloc-doc.h']]],
+ ['mi_5foutput_5ffun_295',['mi_output_fun',['../group__extended.html#gad823d23444a4b77a40f66bf075a98a0c',1,'mimalloc-doc.h']]]
];
diff --git a/docs/search/variables_0.html b/docs/search/variables_0.html
index 12104bc..1e477c0 100644
--- a/docs/search/variables_0.html
+++ b/docs/search/variables_0.html
@@ -1,7 +1,8 @@
-
+
+
-
+
@@ -10,21 +11,27 @@
Loading...
-
+
Searching...
No Matches
-
+
diff --git a/docs/search/variables_0.js b/docs/search/variables_0.js
index 330c800..4c0d772 100644
--- a/docs/search/variables_0.js
+++ b/docs/search/variables_0.js
@@ -1,5 +1,5 @@
var searchData=
[
- ['block_5fsize',['block_size',['../group__analysis.html#a332a6c14d736a99699d5453a1cb04b41',1,'mi_heap_area_t']]],
- ['blocks',['blocks',['../group__analysis.html#ae0085e6e1cf059a4eb7767e30e9991b8',1,'mi_heap_area_t']]]
+ ['block_5fsize_286',['block_size',['../group__analysis.html#a332a6c14d736a99699d5453a1cb04b41',1,'mi_heap_area_t']]],
+ ['blocks_287',['blocks',['../group__analysis.html#ae0085e6e1cf059a4eb7767e30e9991b8',1,'mi_heap_area_t']]]
];
diff --git a/docs/search/variables_1.html b/docs/search/variables_1.html
index b784017..ea73d9a 100644
--- a/docs/search/variables_1.html
+++ b/docs/search/variables_1.html
@@ -1,7 +1,8 @@
-
+
+
-
+
@@ -10,21 +11,27 @@
Loading...
-
+
Searching...
No Matches
-
+
diff --git a/docs/search/variables_1.js b/docs/search/variables_1.js
index af76e9c..449fd61 100644
--- a/docs/search/variables_1.js
+++ b/docs/search/variables_1.js
@@ -1,4 +1,4 @@
var searchData=
[
- ['committed',['committed',['../group__analysis.html#ab47526df656d8837ec3e97f11b83f835',1,'mi_heap_area_t']]]
+ ['committed_288',['committed',['../group__analysis.html#ab47526df656d8837ec3e97f11b83f835',1,'mi_heap_area_t']]]
];
diff --git a/docs/search/variables_2.html b/docs/search/variables_2.html
index 0cb98d3..0580462 100644
--- a/docs/search/variables_2.html
+++ b/docs/search/variables_2.html
@@ -1,7 +1,8 @@
-
+
+
-
+
@@ -10,21 +11,27 @@
Loading...
-
+
Searching...
No Matches
-
+
diff --git a/docs/search/variables_2.js b/docs/search/variables_2.js
index 304ad43..713c68e 100644
--- a/docs/search/variables_2.js
+++ b/docs/search/variables_2.js
@@ -1,4 +1,4 @@
var searchData=
[
- ['reserved',['reserved',['../group__analysis.html#ae848a3e6840414891035423948ca0383',1,'mi_heap_area_t']]]
+ ['reserved_289',['reserved',['../group__analysis.html#ae848a3e6840414891035423948ca0383',1,'mi_heap_area_t']]]
];
diff --git a/docs/search/variables_3.html b/docs/search/variables_3.html
index 1e83bf5..0d69e76 100644
--- a/docs/search/variables_3.html
+++ b/docs/search/variables_3.html
@@ -1,7 +1,8 @@
-
+
+
-
+
@@ -10,21 +11,27 @@
Loading...
-
+
Searching...
No Matches
-
+
diff --git a/docs/search/variables_3.js b/docs/search/variables_3.js
index c889d4f..08ec3ba 100644
--- a/docs/search/variables_3.js
+++ b/docs/search/variables_3.js
@@ -1,4 +1,4 @@
var searchData=
[
- ['used',['used',['../group__analysis.html#ab820302c5cd0df133eb8e51650a008b4',1,'mi_heap_area_t']]]
+ ['used_290',['used',['../group__analysis.html#ab820302c5cd0df133eb8e51650a008b4',1,'mi_heap_area_t']]]
];
diff --git a/docs/tabs.css b/docs/tabs.css
index e1f2e0b..a87eade 100644
--- a/docs/tabs.css
+++ b/docs/tabs.css
@@ -58,4 +58,3 @@
color: #fff;
text-shadow: 0px 1px 1px rgba(0, 0, 0, 1.0);
}
-
diff --git a/docs/using.html b/docs/using.html
index 047e35e..eea12cf 100644
--- a/docs/using.html
+++ b/docs/using.html
@@ -3,7 +3,7 @@
-
+
mi-malloc: Using the library
@@ -13,10 +13,6 @@
-
@@ -37,21 +33,21 @@
mi-malloc
- 1.6
+ 1.7/2.0
@@ -60,10 +56,10 @@
-
+
@@ -73,13 +69,13 @@ var searchBox = new SearchBox("searchBox", "search",false,'Search');
-
@@ -92,7 +88,7 @@ $(document).ready(function(){initNavTree('using.html','');});
-
@@ -103,21 +99,56 @@ $(document).ready(function(){initNavTree('using.html','');});
Build
-
The preferred usage is including <mimalloc.h>
, linking with the shared- or static library, and using the mi_malloc
API exclusively for allocation. For example,
gcc -o myprogram -lmimalloc myfile.c
mimalloc uses only safe OS calls (mmap
and VirtualAlloc
) and can co-exist with other allocators linked to the same program. If you use cmake
, you can simply use:
find_package(mimalloc 1.0 REQUIRED)
in your CMakeLists.txt
to find a locally installed mimalloc. Then use either:
target_link_libraries(myapp PUBLIC mimalloc)
to link with the shared (dynamic) library, or:
target_link_libraries(myapp PUBLIC mimalloc-static )
to link with the static library. See test\CMakeLists.txt
for an example.
+
The preferred usage is including <mimalloc.h>
, linking with the shared- or static library, and using the mi_malloc
API exclusively for allocation. For example,
gcc -o myprogram -lmimalloc myfile.c
+
mimalloc uses only safe OS calls (mmap
and VirtualAlloc
) and can co-exist with other allocators linked to the same program. If you use cmake
, you can simply use:
find_package(mimalloc 1.0 REQUIRED)
+
in your CMakeLists.txt
to find a locally installed mimalloc. Then use either:
target_link_libraries(myapp PUBLIC mimalloc)
+
to link with the shared (dynamic) library, or:
target_link_libraries(myapp PUBLIC mimalloc-static )
+
to link with the static library. See test\CMakeLists.txt
for an example.
C++
-
For best performance in C++ programs, it is also recommended to override the global new
and delete
operators. For convience, mimalloc provides mimalloc-new-delete.h
which does this for you – just include it in a single(!) source file in your project.
-
In C++, mimalloc also provides the mi_stl_allocator
struct which implements the std::allocator
interface. For example:
std::vector<some_struct, mi_stl_allocator<some_struct>> vec;
vec.push_back(some_struct());
Statistics
-
You can pass environment variables to print verbose messages (MIMALLOC_VERBOSE=1
) and statistics (MIMALLOC_SHOW_STATS=1
) (in the debug version):
> env MIMALLOC_SHOW_STATS=1 ./cfrac 175451865205073170563711388363
175451865205073170563711388363 = 374456281610909315237213 * 468551
heap stats: peak total freed unit
normal 2: 16.4 kb 17.5 mb 17.5 mb 16 b ok
normal 3: 16.3 kb 15.2 mb 15.2 mb 24 b ok
normal 4: 64 b 4.6 kb 4.6 kb 32 b ok
normal 5: 80 b 118.4 kb 118.4 kb 40 b ok
normal 6: 48 b 48 b 48 b 48 b ok
normal 17: 960 b 960 b 960 b 320 b ok
heap stats: peak total freed unit
normal: 33.9 kb 32.8 mb 32.8 mb 1 b ok
huge: 0 b 0 b 0 b 1 b ok
total: 33.9 kb 32.8 mb 32.8 mb 1 b ok
malloc requested: 32.8 mb
committed: 58.2 kb 58.2 kb 58.2 kb 1 b ok
reserved: 2.0 mb 2.0 mb 2.0 mb 1 b ok
reset: 0 b 0 b 0 b 1 b ok
segments: 1 1 1
-abandoned: 0
pages: 6 6 6
-abandoned: 0
mmaps: 3
mmap fast: 0
mmap slow: 1
threads: 0
elapsed: 2.022s
process: user: 1.781s, system: 0.016s, faults: 756, reclaims: 0, rss: 2.7 mb
The above model of using the mi_
prefixed API is not always possible though in existing programs that already use the standard malloc interface, and another option is to override the standard malloc interface completely and redirect all calls to the mimalloc library instead.
+
For best performance in C++ programs, it is also recommended to override the global new
and delete
operators. For convience, mimalloc provides mimalloc-new-delete.h
which does this for you – just include it in a single(!) source file in your project without linking to the mimalloc's library.
+
In C++, mimalloc also provides the mi_stl_allocator
struct which implements the std::allocator
interface. For example:
std::vector<some_struct, mi_stl_allocator<some_struct>> vec;
+
vec.push_back(some_struct());
+
Statistics
+
You can pass environment variables to print verbose messages (MIMALLOC_VERBOSE=1
) and statistics (MIMALLOC_SHOW_STATS=1
) (in the debug version):
> env MIMALLOC_SHOW_STATS=1 ./cfrac 175451865205073170563711388363
+
+
175451865205073170563711388363 = 374456281610909315237213 * 468551
+
+
heap stats: peak total freed unit
+
normal 2: 16.4 kb 17.5 mb 17.5 mb 16 b ok
+
normal 3: 16.3 kb 15.2 mb 15.2 mb 24 b ok
+
normal 4: 64 b 4.6 kb 4.6 kb 32 b ok
+
normal 5: 80 b 118.4 kb 118.4 kb 40 b ok
+
normal 6: 48 b 48 b 48 b 48 b ok
+
normal 17: 960 b 960 b 960 b 320 b ok
+
+
heap stats: peak total freed unit
+
normal: 33.9 kb 32.8 mb 32.8 mb 1 b ok
+
huge: 0 b 0 b 0 b 1 b ok
+
total: 33.9 kb 32.8 mb 32.8 mb 1 b ok
+
malloc requested: 32.8 mb
+
+
committed: 58.2 kb 58.2 kb 58.2 kb 1 b ok
+
reserved: 2.0 mb 2.0 mb 2.0 mb 1 b ok
+
reset: 0 b 0 b 0 b 1 b ok
+
segments: 1 1 1
+
-abandoned: 0
+
pages: 6 6 6
+
-abandoned: 0
+
mmaps: 3
+
mmap fast: 0
+
mmap slow: 1
+
threads: 0
+
elapsed: 2.022s
+
process: user: 1.781s, system: 0.016s, faults: 756, reclaims: 0, rss: 2.7 mb
+
The above model of using the mi_
prefixed API is not always possible though in existing programs that already use the standard malloc interface, and another option is to override the standard malloc interface completely and redirect all calls to the mimalloc library instead.
See Overriding Malloc for more info.
-
-
+
+
diff --git a/ide/vs2017/mimalloc-override-test.vcxproj b/ide/vs2017/mimalloc-override-test.vcxproj
deleted file mode 100644
index faaa00e..0000000
--- a/ide/vs2017/mimalloc-override-test.vcxproj
+++ /dev/null
@@ -1,190 +0,0 @@
-
-
-
-
- Debug
- Win32
-
-
- Release
- Win32
-
-
- Debug
- x64
-
-
- Release
- x64
-
-
-
- 15.0
- {FEF7868F-750E-4C21-A04D-22707CC66879}
- mimalloc-override-test
- 10.0.17134.0
- mimalloc-override-test
-
-
-
- Application
- true
- v141
-
-
- Application
- false
- v141
- true
-
-
- Application
- true
- v141
-
-
- Application
- false
- v141
- true
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\
- $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
-
-
- $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\
- $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
-
-
- $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\
- $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
-
-
- $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\
- $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
-
-
-
- Level3
- Disabled
- true
- true
- ..\..\include
- MultiThreadedDebugDLL
- false
- Default
- false
-
-
- Console
- kernel32.lib;%(AdditionalDependencies)
-
-
-
-
-
-
-
-
-
- Level3
- Disabled
- true
- true
- ..\..\include
- MultiThreadedDebugDLL
- Sync
- Default
- false
-
-
- Console
-
-
- kernel32.lib;%(AdditionalDependencies)
-
-
-
-
-
-
-
-
-
- Level3
- MaxSpeed
- true
- true
- true
- true
- ..\..\include
- _MBCS;%(PreprocessorDefinitions);NDEBUG
- MultiThreadedDLL
-
-
- true
- true
- Console
- kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)
-
-
-
-
-
-
-
-
- Level3
- MaxSpeed
- true
- true
- true
- true
- ..\..\include
- _MBCS;%(PreprocessorDefinitions);NDEBUG
- MultiThreadedDLL
-
-
- true
- true
- Console
-
-
- kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)
-
-
-
-
-
-
-
-
- {abb5eae7-b3e6-432e-b636-333449892ea7}
-
-
-
-
-
-
-
-
-
\ No newline at end of file
diff --git a/ide/vs2017/mimalloc-override-test.vcxproj.filters b/ide/vs2017/mimalloc-override-test.vcxproj.filters
deleted file mode 100644
index eb5e70b..0000000
--- a/ide/vs2017/mimalloc-override-test.vcxproj.filters
+++ /dev/null
@@ -1,22 +0,0 @@
-
-
-
-
- {4FC737F1-C7A5-4376-A066-2A32D752A2FF}
- cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx
-
-
- {93995380-89BD-4b04-88EB-625FBE52EBFB}
- h;hh;hpp;hxx;hm;inl;inc;ipp;xsd
-
-
- {67DA6AB6-F800-4c08-8B7A-83BB121AAD01}
- rc;ico;cur;bmp;dlg;rc2;rct;bin;rgs;gif;jpg;jpeg;jpe;resx;tiff;tif;png;wav;mfcribbon-ms
-
-
-
-
- Source Files
-
-
-
\ No newline at end of file
diff --git a/ide/vs2017/mimalloc-override.vcxproj b/ide/vs2017/mimalloc-override.vcxproj
deleted file mode 100644
index 990d6ca..0000000
--- a/ide/vs2017/mimalloc-override.vcxproj
+++ /dev/null
@@ -1,254 +0,0 @@
-
-
-
-
- Debug
- Win32
-
-
- Release
- Win32
-
-
- Debug
- x64
-
-
- Release
- x64
-
-
-
- 15.0
- {ABB5EAE7-B3E6-432E-B636-333449892EA7}
- mimalloc-override
- 10.0.17134.0
- mimalloc-override
-
-
-
- DynamicLibrary
- true
- v141
-
-
- DynamicLibrary
- false
- v141
-
-
- DynamicLibrary
- true
- v141
-
-
- DynamicLibrary
- false
- v141
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- $(SolutionDir)..\..\out\msvc-$(Platform)\$(Configuration)\
- $(SolutionDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
- .dll
- mimalloc-override
-
-
- $(SolutionDir)..\..\out\msvc-$(Platform)\$(Configuration)\
- $(SolutionDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
- .dll
- mimalloc-override
-
-
- $(SolutionDir)..\..\out\msvc-$(Platform)\$(Configuration)\
- $(SolutionDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
- .dll
- mimalloc-override
-
-
- $(SolutionDir)..\..\out\msvc-$(Platform)\$(Configuration)\
- $(SolutionDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
- .dll
- mimalloc-override
-
-
-
- Level3
- Disabled
- true
- true
- ../../include
- _CRT_SECURE_NO_WARNINGS;MI_SHARED_LIB;MI_SHARED_LIB_EXPORT;MI_MALLOC_OVERRIDE;%(PreprocessorDefinitions);
- MultiThreadedDebugDLL
- false
- Default
-
-
- $(ProjectDir)\..\..\bin\mimalloc-redirect32.lib;%(AdditionalDependencies)
-
-
-
-
- Default
- false
-
-
- COPY /Y $(ProjectDir)..\..\bin\mimalloc-redirect32.dll $(OutputPath)
-
-
- Copy mimalloc-redirect32.dll to the output directory
-
-
-
-
- Level3
- Disabled
- true
- true
- ../../include
- _CRT_SECURE_NO_WARNINGS;MI_SHARED_LIB;MI_SHARED_LIB_EXPORT;MI_MALLOC_OVERRIDE;%(PreprocessorDefinitions);
- MultiThreadedDebugDLL
- false
- Default
-
-
- $(ProjectDir)\..\..\bin\mimalloc-redirect.lib;bcrypt.lib;%(AdditionalDependencies)
-
-
-
-
- Default
- false
-
-
- COPY /Y $(ProjectDir)..\..\bin\mimalloc-redirect.dll $(OutputPath)
-
-
- copy mimalloc-redirect.dll to the output directory
-
-
-
-
- Level3
- MaxSpeed
- true
- true
- true
- ../../include
- _CRT_SECURE_NO_WARNINGS;MI_SHARED_LIB;MI_SHARED_LIB_EXPORT;MI_MALLOC_OVERRIDE;%(PreprocessorDefinitions);NDEBUG
- AssemblyAndSourceCode
- $(IntDir)
- false
- MultiThreadedDLL
- Default
- false
-
-
- true
- true
- $(ProjectDir)\..\..\bin\mimalloc-redirect32.lib;%(AdditionalDependencies)
-
-
- Default
- false
-
-
- COPY /Y $(ProjectDir)..\..\bin\mimalloc-redirect32.dll $(OutputPath)
-
-
- Copy mimalloc-redirect32.dll to the output directory
-
-
-
-
- Level3
- MaxSpeed
- true
- true
- true
- ../../include
- _CRT_SECURE_NO_WARNINGS;MI_SHARED_LIB;MI_SHARED_LIB_EXPORT;MI_MALLOC_OVERRIDE;%(PreprocessorDefinitions);NDEBUG
- AssemblyAndSourceCode
- $(IntDir)
- false
- MultiThreadedDLL
- Default
- false
-
-
- true
- true
- $(ProjectDir)\..\..\bin\mimalloc-redirect.lib;bcrypt.lib;%(AdditionalDependencies)
-
-
- Default
- false
-
-
- COPY /Y $(ProjectDir)..\..\bin\mimalloc-redirect.dll $(OutputPath)
-
-
- copy mimalloc-redirect.dll to the output directory
-
-
-
-
-
-
-
-
-
-
-
-
- false
- false
- false
- false
-
-
- true
- true
- true
- true
-
-
-
-
-
-
-
-
-
-
- true
- true
- true
- true
-
-
-
-
-
-
-
-
-
-
diff --git a/ide/vs2017/mimalloc-override.vcxproj.filters b/ide/vs2017/mimalloc-override.vcxproj.filters
deleted file mode 100644
index 0265265..0000000
--- a/ide/vs2017/mimalloc-override.vcxproj.filters
+++ /dev/null
@@ -1,80 +0,0 @@
-
-
-
-
- {4FC737F1-C7A5-4376-A066-2A32D752A2FF}
- cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx
-
-
- {93995380-89BD-4b04-88EB-625FBE52EBFB}
- h;hh;hpp;hxx;hm;inl;inc;ipp;xsd
-
-
-
-
- Header Files
-
-
- Header Files
-
-
- Header Files
-
-
- Header Files
-
-
- Header Files
-
-
- Header Files
-
-
-
-
- Source Files
-
-
- Source Files
-
-
- Source Files
-
-
- Source Files
-
-
- Source Files
-
-
- Source Files
-
-
- Source Files
-
-
- Source Files
-
-
- Source Files
-
-
- Source Files
-
-
- Source Files
-
-
- Source Files
-
-
- Source Files
-
-
- Source Files
-
-
- Source Files
-
-
-
diff --git a/ide/vs2017/mimalloc-test-stress.vcxproj b/ide/vs2017/mimalloc-test-stress.vcxproj
deleted file mode 100644
index b8267d0..0000000
--- a/ide/vs2017/mimalloc-test-stress.vcxproj
+++ /dev/null
@@ -1,159 +0,0 @@
-
-
-
-
- Debug
- Win32
-
-
- Release
- Win32
-
-
- Debug
- x64
-
-
- Release
- x64
-
-
-
- 15.0
- {FEF7958F-750E-4C21-A04D-22707CC66878}
- mimalloc-test-stress
- 10.0.17134.0
- mimalloc-test-stress
-
-
-
- Application
- true
- v141
-
-
- Application
- false
- v141
- true
-
-
- Application
- true
- v141
-
-
- Application
- false
- v141
- true
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\
- $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
-
-
- $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\
- $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
-
-
- $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\
- $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
-
-
- $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\
- $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
-
-
-
- Level3
- Disabled
- true
- true
- ..\..\include
-
-
- Console
-
-
-
-
- Level3
- Disabled
- true
- true
- ..\..\include
-
-
- Console
-
-
-
-
- Level3
- MaxSpeed
- true
- true
- true
- true
- ..\..\include
- %(PreprocessorDefinitions);NDEBUG
-
-
- true
- true
- Console
-
-
-
-
- Level3
- MaxSpeed
- true
- true
- true
- true
- ..\..\include
- %(PreprocessorDefinitions);NDEBUG
-
-
- true
- true
- Console
-
-
-
-
- false
- false
- false
- false
-
-
-
-
- {abb5eae7-b3e6-432e-b636-333449892ea6}
-
-
-
-
-
-
\ No newline at end of file
diff --git a/ide/vs2017/mimalloc-test-stress.vcxproj.filters b/ide/vs2017/mimalloc-test-stress.vcxproj.filters
deleted file mode 100644
index 7c5239e..0000000
--- a/ide/vs2017/mimalloc-test-stress.vcxproj.filters
+++ /dev/null
@@ -1,22 +0,0 @@
-
-
-
-
- {4FC737F1-C7A5-4376-A066-2A32D752A2FF}
- cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx
-
-
- {93995380-89BD-4b04-88EB-625FBE52EBFB}
- h;hh;hpp;hxx;hm;inl;inc;ipp;xsd
-
-
- {67DA6AB6-F800-4c08-8B7A-83BB121AAD01}
- rc;ico;cur;bmp;dlg;rc2;rct;bin;rgs;gif;jpg;jpeg;jpe;resx;tiff;tif;png;wav;mfcribbon-ms
-
-
-
-
- Source Files
-
-
-
\ No newline at end of file
diff --git a/ide/vs2017/mimalloc-test.vcxproj b/ide/vs2017/mimalloc-test.vcxproj
deleted file mode 100644
index 27c7bb6..0000000
--- a/ide/vs2017/mimalloc-test.vcxproj
+++ /dev/null
@@ -1,158 +0,0 @@
-
-
-
-
- Debug
- Win32
-
-
- Release
- Win32
-
-
- Debug
- x64
-
-
- Release
- x64
-
-
-
- 15.0
- {FEF7858F-750E-4C21-A04D-22707CC66878}
- mimalloctest
- 10.0.17134.0
- mimalloc-test
-
-
-
- Application
- true
- v141
-
-
- Application
- false
- v141
- true
-
-
- Application
- true
- v141
-
-
- Application
- false
- v141
- true
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\
- $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
-
-
- $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\
- $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
-
-
- $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\
- $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
-
-
- $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\
- $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
-
-
-
- Level3
- Disabled
- true
- true
- ..\..\include
- stdcpp17
-
-
- Console
-
-
-
-
- Level3
- Disabled
- true
- true
- ..\..\include
- stdcpp17
-
-
- Console
-
-
-
-
- Level3
- MaxSpeed
- true
- true
- true
- true
- ..\..\include
- _MBCS;%(PreprocessorDefinitions);NDEBUG
- stdcpp17
-
-
- true
- true
- Console
-
-
-
-
- Level3
- MaxSpeed
- true
- true
- true
- true
- ..\..\include
- _MBCS;%(PreprocessorDefinitions);NDEBUG
- stdcpp17
-
-
- true
- true
- Console
-
-
-
-
- {abb5eae7-b3e6-432e-b636-333449892ea6}
-
-
-
-
-
-
-
-
-
\ No newline at end of file
diff --git a/ide/vs2017/mimalloc-test.vcxproj.filters b/ide/vs2017/mimalloc-test.vcxproj.filters
deleted file mode 100644
index fca75e1..0000000
--- a/ide/vs2017/mimalloc-test.vcxproj.filters
+++ /dev/null
@@ -1,22 +0,0 @@
-
-
-
-
- {4FC737F1-C7A5-4376-A066-2A32D752A2FF}
- cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx
-
-
- {93995380-89BD-4b04-88EB-625FBE52EBFB}
- h;hh;hpp;hxx;hm;inl;inc;ipp;xsd
-
-
- {67DA6AB6-F800-4c08-8B7A-83BB121AAD01}
- rc;ico;cur;bmp;dlg;rc2;rct;bin;rgs;gif;jpg;jpeg;jpe;resx;tiff;tif;png;wav;mfcribbon-ms
-
-
-
-
- Source Files
-
-
-
\ No newline at end of file
diff --git a/ide/vs2017/mimalloc.sln b/ide/vs2017/mimalloc.sln
deleted file mode 100644
index aeab6b8..0000000
Binary files a/ide/vs2017/mimalloc.sln and /dev/null differ
diff --git a/ide/vs2017/mimalloc.vcxproj b/ide/vs2017/mimalloc.vcxproj
deleted file mode 100644
index 1ff1af9..0000000
--- a/ide/vs2017/mimalloc.vcxproj
+++ /dev/null
@@ -1,260 +0,0 @@
-
-
-
-
- Debug
- Win32
-
-
- Release
- Win32
-
-
- Debug
- x64
-
-
- Release
- x64
-
-
-
- 15.0
- {ABB5EAE7-B3E6-432E-B636-333449892EA6}
- mimalloc
- 10.0.17134.0
- mimalloc
-
-
-
- StaticLibrary
- true
- v141
-
-
- StaticLibrary
- false
- v141
- true
-
-
- StaticLibrary
- true
- v141
-
-
- StaticLibrary
- false
- v141
- true
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- $(SolutionDir)..\..\out\msvc-$(Platform)\$(Configuration)\
- $(SolutionDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
- .lib
- mimalloc-static
-
-
- $(SolutionDir)..\..\out\msvc-$(Platform)\$(Configuration)\
- $(SolutionDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
- .lib
- mimalloc-static
-
-
- $(SolutionDir)..\..\out\msvc-$(Platform)\$(Configuration)\
- $(SolutionDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
- .lib
- mimalloc-static
-
-
- $(SolutionDir)..\..\out\msvc-$(Platform)\$(Configuration)\
- $(SolutionDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
- .lib
- mimalloc-static
-
-
- false
-
-
- false
-
-
- false
-
-
- false
-
-
-
- Level3
- Disabled
- true
- true
- ../../include
- _CRT_SECURE_NO_WARNINGS;MI_DEBUG=3;%(PreprocessorDefinitions);
- CompileAsC
- false
- stdcpp17
-
-
-
-
-
-
-
-
-
-
- Level4
- Disabled
- true
- true
- ../../include
- _CRT_SECURE_NO_WARNINGS;MI_DEBUG=3;%(PreprocessorDefinitions);
- CompileAsC
- false
- stdcpp17
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- Level3
- MaxSpeed
- true
- true
- ../../include
- _CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions);NDEBUG
- AssemblyAndSourceCode
- $(IntDir)
- false
- false
- Default
- CompileAsC
- true
-
-
- true
- true
-
-
-
-
-
-
-
-
-
-
- Level4
- MaxSpeed
- true
- true
- ../../include
- _CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions);NDEBUG
- AssemblyAndSourceCode
- $(IntDir)
- false
- false
- Default
- CompileAsC
- true
-
-
- true
- true
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- false
- false
- false
- false
-
-
- true
- true
- true
- true
-
-
- true
- true
- true
- true
-
-
-
-
-
-
-
-
-
- true
- true
- true
- true
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
\ No newline at end of file
diff --git a/ide/vs2017/mimalloc.vcxproj.filters b/ide/vs2017/mimalloc.vcxproj.filters
deleted file mode 100644
index 4366051..0000000
--- a/ide/vs2017/mimalloc.vcxproj.filters
+++ /dev/null
@@ -1,83 +0,0 @@
-
-
-
-
- {4FC737F1-C7A5-4376-A066-2A32D752A2FF}
- cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx
-
-
- {93995380-89BD-4b04-88EB-625FBE52EBFB}
- h;hh;hpp;hxx;hm;inl;inc;ipp;xsd
-
-
-
-
- Source Files
-
-
- Source Files
-
-
- Source Files
-
-
- Source Files
-
-
- Source Files
-
-
- Source Files
-
-
- Source Files
-
-
- Source Files
-
-
- Source Files
-
-
- Source Files
-
-
- Source Files
-
-
- Source Files
-
-
- Source Files
-
-
- Source Files
-
-
- Source Files
-
-
- Source Files
-
-
-
-
- Header Files
-
-
- Header Files
-
-
- Header Files
-
-
- Header Files
-
-
- Header Files
-
-
- Header Files
-
-
-
diff --git a/ide/vs2019/mimalloc-override-test.vcxproj b/ide/vs2019/mimalloc-override-test.vcxproj
deleted file mode 100644
index 7a9202f..0000000
--- a/ide/vs2019/mimalloc-override-test.vcxproj
+++ /dev/null
@@ -1,190 +0,0 @@
-
-
-
-
- Debug
- Win32
-
-
- Release
- Win32
-
-
- Debug
- x64
-
-
- Release
- x64
-
-
-
- 15.0
- {FEF7868F-750E-4C21-A04D-22707CC66879}
- mimalloc-override-test
- 10.0
- mimalloc-override-test
-
-
-
- Application
- true
- v142
-
-
- Application
- false
- v142
- true
-
-
- Application
- true
- v142
-
-
- Application
- false
- v142
- true
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\
- $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
-
-
- $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\
- $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
-
-
- $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\
- $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
-
-
- $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\
- $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
-
-
-
- Level3
- Disabled
- true
- true
- ..\..\include
- MultiThreadedDebugDLL
- Sync
- Default
- false
-
-
- Console
- kernel32.lib;%(AdditionalDependencies)
-
-
-
-
-
-
-
-
-
- Level3
- Disabled
- true
- true
- ..\..\include
- MultiThreadedDebugDLL
- Sync
- Default
- false
-
-
- Console
-
-
- kernel32.lib;%(AdditionalDependencies)
-
-
-
-
-
-
-
-
-
- Level3
- MaxSpeed
- true
- true
- true
- true
- ..\..\include
- _MBCS;%(PreprocessorDefinitions);NDEBUG
- MultiThreadedDLL
-
-
- true
- true
- Console
- kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)
-
-
-
-
-
-
-
-
- Level3
- MaxSpeed
- true
- true
- true
- true
- ..\..\include
- _MBCS;%(PreprocessorDefinitions);NDEBUG
- MultiThreadedDLL
-
-
- true
- true
- Console
-
-
- kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)
-
-
-
-
-
-
-
-
-
-
-
- {abb5eae7-b3e6-432e-b636-333449892ea7}
-
-
-
-
-
-
\ No newline at end of file
diff --git a/ide/vs2019/mimalloc-override.vcxproj b/ide/vs2019/mimalloc-override.vcxproj
deleted file mode 100644
index a0e79fb..0000000
--- a/ide/vs2019/mimalloc-override.vcxproj
+++ /dev/null
@@ -1,257 +0,0 @@
-
-
-
-
- Debug
- Win32
-
-
- Release
- Win32
-
-
- Debug
- x64
-
-
- Release
- x64
-
-
-
- 15.0
- {ABB5EAE7-B3E6-432E-B636-333449892EA7}
- mimalloc-override
- 10.0
- mimalloc-override
-
-
-
- DynamicLibrary
- true
- v142
-
-
- DynamicLibrary
- false
- v142
-
-
- DynamicLibrary
- true
- v142
-
-
- DynamicLibrary
- false
- v142
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- $(SolutionDir)..\..\out\msvc-$(Platform)\$(Configuration)\
- $(SolutionDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
- .dll
- mimalloc-override
-
-
- $(SolutionDir)..\..\out\msvc-$(Platform)\$(Configuration)\
- $(SolutionDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
- .dll
- mimalloc-override
-
-
- $(SolutionDir)..\..\out\msvc-$(Platform)\$(Configuration)\
- $(SolutionDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
- .dll
- mimalloc-override
-
-
- $(SolutionDir)..\..\out\msvc-$(Platform)\$(Configuration)\
- $(SolutionDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
- .dll
- mimalloc-override
-
-
-
- Level3
- Disabled
- true
- true
- ../../include
- MI_SHARED_LIB;MI_SHARED_LIB_EXPORT;MI_MALLOC_OVERRIDE;%(PreprocessorDefinitions);
- MultiThreadedDebugDLL
- false
- Default
-
-
- $(ProjectDir)\..\..\bin\mimalloc-redirect32.lib;%(AdditionalDependencies)
-
-
-
-
- Default
- false
-
-
- COPY /Y $(ProjectDir)..\..\bin\mimalloc-redirect32.dll $(OutputPath)
-
-
- Copy mimalloc-redirect32.dll to the output directory
-
-
-
-
- Level3
- Disabled
- true
- true
- ../../include
- MI_DEBUG=3;MI_SHARED_LIB;MI_SHARED_LIB_EXPORT;MI_MALLOC_OVERRIDE;%(PreprocessorDefinitions);
- MultiThreadedDebugDLL
- false
- Default
-
-
- $(ProjectDir)\..\..\bin\mimalloc-redirect.lib;%(AdditionalDependencies)
-
-
-
-
- Default
- false
-
-
- COPY /Y $(ProjectDir)..\..\bin\mimalloc-redirect.dll $(OutputPath)
-
-
- copy mimalloc-redirect.dll to the output directory
-
-
-
-
- Level3
- MaxSpeed
- true
- true
- true
- ../../include
- MI_SHARED_LIB;MI_SHARED_LIB_EXPORT;MI_MALLOC_OVERRIDE;%(PreprocessorDefinitions);NDEBUG
- AssemblyAndSourceCode
- $(IntDir)
- false
- MultiThreadedDLL
- Default
- false
-
-
- true
- true
- $(ProjectDir)\..\..\bin\mimalloc-redirect32.lib;%(AdditionalDependencies)
-
-
- Default
- false
-
-
- COPY /Y $(ProjectDir)..\..\bin\mimalloc-redirect32.dll $(OutputPath)
-
-
- Copy mimalloc-redirect32.dll to the output directory
-
-
-
-
- Level3
- MaxSpeed
- true
- true
- true
- ../../include
- MI_SHARED_LIB;MI_SHARED_LIB_EXPORT;MI_MALLOC_OVERRIDE;%(PreprocessorDefinitions);NDEBUG
- AssemblyAndSourceCode
- $(IntDir)
- false
- MultiThreadedDLL
- Default
- false
-
-
- true
- true
- $(ProjectDir)\..\..\bin\mimalloc-redirect.lib;%(AdditionalDependencies)
-
-
- Default
- false
-
-
- COPY /Y $(ProjectDir)..\..\bin\mimalloc-redirect.dll $(OutputPath)
-
-
- copy mimalloc-redirect.dll to the output directory
-
-
-
-
-
-
-
-
-
-
-
-
- false
- false
- false
- false
-
-
- true
- true
- true
- true
-
-
-
-
-
- true
-
-
-
-
-
-
-
- true
- true
- true
- true
-
-
-
-
-
-
-
-
-
-
\ No newline at end of file
diff --git a/ide/vs2019/mimalloc-override.vcxproj.filters b/ide/vs2019/mimalloc-override.vcxproj.filters
deleted file mode 100644
index 8e36f50..0000000
--- a/ide/vs2019/mimalloc-override.vcxproj.filters
+++ /dev/null
@@ -1,81 +0,0 @@
-
-
-
-
- Source Files
-
-
- Source Files
-
-
- Source Files
-
-
- Source Files
-
-
- Source Files
-
-
- Source Files
-
-
- Source Files
-
-
- Source Files
-
-
- Source Files
-
-
- Source Files
-
-
- Source Files
-
-
- Source Files
-
-
- Source Files
-
-
- Source Files
-
-
- Source Files
-
-
- Source Files
-
-
-
-
- Header Files
-
-
- Header Files
-
-
- Header Files
-
-
- Header Files
-
-
- Header Files
-
-
- Header Files
-
-
-
-
- {f1fccf27-17b9-42dd-ba51-6070baff85c6}
-
-
- {39cb7e38-69d0-43fb-8406-6a0f7cefc3b4}
-
-
-
\ No newline at end of file
diff --git a/ide/vs2019/mimalloc-test-api.vcxproj b/ide/vs2019/mimalloc-test-api.vcxproj
deleted file mode 100644
index 812a9cb..0000000
--- a/ide/vs2019/mimalloc-test-api.vcxproj
+++ /dev/null
@@ -1,155 +0,0 @@
-
-
-
-
- Debug
- Win32
-
-
- Release
- Win32
-
-
- Debug
- x64
-
-
- Release
- x64
-
-
-
- 15.0
- {FFF7958F-750E-4C21-A04D-22707CC66878}
- mimalloc-test-api
- 10.0
- mimalloc-test-api
-
-
-
- Application
- true
- v142
-
-
- Application
- false
- v142
- true
-
-
- Application
- true
- v142
-
-
- Application
- false
- v142
- true
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\
- $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
-
-
- $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\
- $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
-
-
- $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\
- $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
-
-
- $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\
- $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
-
-
-
- Level3
- Disabled
- true
- true
- ..\..\include
-
-
- Console
-
-
-
-
- Level3
- Disabled
- true
- true
- ..\..\include
-
-
- Console
-
-
-
-
- Level3
- MaxSpeed
- true
- true
- true
- true
- ..\..\include
- %(PreprocessorDefinitions);NDEBUG
-
-
- true
- true
- Console
-
-
-
-
- Level3
- MaxSpeed
- true
- true
- true
- true
- ..\..\include
- %(PreprocessorDefinitions);NDEBUG
-
-
- true
- true
- Console
-
-
-
-
-
-
-
-
- {abb5eae7-b3e6-432e-b636-333449892ea6}
-
-
-
-
-
-
diff --git a/ide/vs2019/mimalloc-test-stress.vcxproj b/ide/vs2019/mimalloc-test-stress.vcxproj
deleted file mode 100644
index afbb666..0000000
--- a/ide/vs2019/mimalloc-test-stress.vcxproj
+++ /dev/null
@@ -1,159 +0,0 @@
-
-
-
-
- Debug
- Win32
-
-
- Release
- Win32
-
-
- Debug
- x64
-
-
- Release
- x64
-
-
-
- 15.0
- {FEF7958F-750E-4C21-A04D-22707CC66878}
- mimalloc-test-stress
- 10.0
- mimalloc-test-stress
-
-
-
- Application
- true
- v142
-
-
- Application
- false
- v142
- true
-
-
- Application
- true
- v142
-
-
- Application
- false
- v142
- true
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\
- $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
-
-
- $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\
- $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
-
-
- $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\
- $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
-
-
- $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\
- $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
-
-
-
- Level3
- Disabled
- true
- true
- ..\..\include
-
-
- Console
-
-
-
-
- Level3
- Disabled
- true
- true
- ..\..\include
-
-
- Console
-
-
-
-
- Level3
- MaxSpeed
- true
- true
- true
- true
- ..\..\include
- %(PreprocessorDefinitions);NDEBUG
-
-
- true
- true
- Console
-
-
-
-
- Level3
- MaxSpeed
- true
- true
- true
- true
- ..\..\include
- %(PreprocessorDefinitions);NDEBUG
-
-
- true
- true
- Console
-
-
-
-
- false
- false
- false
- false
-
-
-
-
- {abb5eae7-b3e6-432e-b636-333449892ea7}
-
-
-
-
-
-
\ No newline at end of file
diff --git a/ide/vs2019/mimalloc-test.vcxproj b/ide/vs2019/mimalloc-test.vcxproj
deleted file mode 100644
index 13af6ab..0000000
--- a/ide/vs2019/mimalloc-test.vcxproj
+++ /dev/null
@@ -1,158 +0,0 @@
-
-
-
-
- Debug
- Win32
-
-
- Release
- Win32
-
-
- Debug
- x64
-
-
- Release
- x64
-
-
-
- 15.0
- {FEF7858F-750E-4C21-A04D-22707CC66878}
- mimalloctest
- 10.0
- mimalloc-test
-
-
-
- Application
- true
- v142
-
-
- Application
- false
- v142
- true
-
-
- Application
- true
- v142
-
-
- Application
- false
- v142
- true
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\
- $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
-
-
- $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\
- $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
-
-
- $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\
- $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
-
-
- $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\
- $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
-
-
-
- Level3
- Disabled
- true
- true
- ..\..\include
- stdcpp17
-
-
- Console
-
-
-
-
- Level3
- Disabled
- true
- true
- ..\..\include
- stdcpp17
-
-
- Console
-
-
-
-
- Level3
- MaxSpeed
- true
- true
- true
- true
- ..\..\include
- _MBCS;%(PreprocessorDefinitions);NDEBUG
- stdcpp17
-
-
- true
- true
- Console
-
-
-
-
- Level3
- MaxSpeed
- true
- true
- true
- true
- ..\..\include
- _MBCS;%(PreprocessorDefinitions);NDEBUG
- stdcpp17
-
-
- true
- true
- Console
-
-
-
-
- {abb5eae7-b3e6-432e-b636-333449892ea6}
-
-
-
-
-
-
-
-
-
\ No newline at end of file
diff --git a/ide/vs2019/mimalloc.sln b/ide/vs2019/mimalloc.sln
deleted file mode 100644
index fcb938a..0000000
Binary files a/ide/vs2019/mimalloc.sln and /dev/null differ
diff --git a/ide/vs2019/mimalloc.vcxproj b/ide/vs2019/mimalloc.vcxproj
deleted file mode 100644
index e18db0c..0000000
--- a/ide/vs2019/mimalloc.vcxproj
+++ /dev/null
@@ -1,253 +0,0 @@
-
-
-
-
- Debug
- Win32
-
-
- Release
- Win32
-
-
- Debug
- x64
-
-
- Release
- x64
-
-
-
- 15.0
- {ABB5EAE7-B3E6-432E-B636-333449892EA6}
- mimalloc
- 10.0
- mimalloc
-
-
-
- StaticLibrary
- true
- v142
-
-
- StaticLibrary
- false
- v142
- true
-
-
- StaticLibrary
- true
- v142
-
-
- StaticLibrary
- false
- v142
- true
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- $(SolutionDir)..\..\out\msvc-$(Platform)\$(Configuration)\
- $(SolutionDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
- .lib
- mimalloc-static
-
-
- $(SolutionDir)..\..\out\msvc-$(Platform)\$(Configuration)\
- $(SolutionDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
- .lib
- mimalloc-static
-
-
- $(SolutionDir)..\..\out\msvc-$(Platform)\$(Configuration)\
- $(SolutionDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
- .lib
- mimalloc-static
-
-
- $(SolutionDir)..\..\out\msvc-$(Platform)\$(Configuration)\
- $(SolutionDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\
- .lib
- mimalloc-static
-
-
-
- Level3
- Disabled
- true
- true
- ../../include
- MI_DEBUG=3;%(PreprocessorDefinitions);
- CompileAsCpp
- false
- Default
-
-
-
-
-
-
-
-
-
-
- Level4
- Disabled
- true
- true
- ../../include
- MI_DEBUG=3;%(PreprocessorDefinitions);
- CompileAsCpp
- false
- Default
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- Level3
- MaxSpeed
- true
- true
- ../../include
- %(PreprocessorDefinitions);NDEBUG
- AssemblyAndSourceCode
- $(IntDir)
- false
- false
- Default
- CompileAsCpp
- true
- Default
-
-
- true
- true
-
-
-
-
-
-
-
-
-
-
- Level3
- MaxSpeed
- true
- true
- ../../include
- %(PreprocessorDefinitions);NDEBUG
- AssemblyAndSourceCode
- $(IntDir)
- false
- false
- Default
- CompileAsCpp
- true
- Default
-
-
- true
- true
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- false
- false
- false
- false
-
-
- true
- true
- true
- true
-
-
- true
- true
- true
- true
-
-
-
-
-
- true
-
-
-
-
-
-
- true
- true
- true
- true
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
\ No newline at end of file
diff --git a/ide/vs2019/mimalloc.vcxproj.filters b/ide/vs2019/mimalloc.vcxproj.filters
deleted file mode 100644
index 4704fb2..0000000
--- a/ide/vs2019/mimalloc.vcxproj.filters
+++ /dev/null
@@ -1,84 +0,0 @@
-
-
-
-
- Source Files
-
-
- Source Files
-
-
- Source Files
-
-
- Source Files
-
-
- Source Files
-
-
- Source Files
-
-
- Source Files
-
-
- Source Files
-
-
- Source Files
-
-
- Source Files
-
-
- Source Files
-
-
- Source Files
-
-
- Source Files
-
-
- Source Files
-
-
- Source Files
-
-
- Source Files
-
-
- Source Files
-
-
-
-
- Header Files
-
-
- Header Files
-
-
- Header Files
-
-
- Header Files
-
-
- Header Files
-
-
- Header Files
-
-
-
-
- {2b556b10-f559-4b2d-896e-142652adbf0c}
-
-
- {852a14ae-6dde-4e95-8077-ca705e97e5af}
-
-
-
diff --git a/include/mimalloc-new-delete.h b/include/mimalloc-new-delete.h
index fded0c0..c16f4a6 100644
--- a/include/mimalloc-new-delete.h
+++ b/include/mimalloc-new-delete.h
@@ -1,5 +1,5 @@
/* ----------------------------------------------------------------------------
-Copyright (c) 2018,2019 Microsoft Research, Daan Leijen
+Copyright (c) 2018-2020 Microsoft Research, Daan Leijen
This is free software; you can redistribute it and/or modify it under the
terms of the MIT license. A copy of the license can be found in the file
"LICENSE" at the root of this distribution.
@@ -22,14 +22,26 @@ terms of the MIT license. A copy of the license can be found in the file
#include
#include
+ #if defined(_MSC_VER) && defined(_Ret_notnull_) && defined(_Post_writable_byte_size_)
+ // stay consistent with VCRT definitions
+ #define mi_decl_new(n) mi_decl_nodiscard mi_decl_restrict _Ret_notnull_ _Post_writable_byte_size_(n)
+ #define mi_decl_new_nothrow(n) mi_decl_nodiscard mi_decl_restrict _Ret_maybenull_ _Success_(return != NULL) _Post_writable_byte_size_(n)
+ #else
+ #define mi_decl_new(n) mi_decl_nodiscard mi_decl_restrict
+ #define mi_decl_new_nothrow(n) mi_decl_nodiscard mi_decl_restrict
+ #endif
+
void operator delete(void* p) noexcept { mi_free(p); };
void operator delete[](void* p) noexcept { mi_free(p); };
- void* operator new(std::size_t n) noexcept(false) { return mi_new(n); }
- void* operator new[](std::size_t n) noexcept(false) { return mi_new(n); }
+ void operator delete (void* p, const std::nothrow_t&) noexcept { mi_free(p); }
+ void operator delete[](void* p, const std::nothrow_t&) noexcept { mi_free(p); }
- void* operator new (std::size_t n, const std::nothrow_t& tag) noexcept { (void)(tag); return mi_new_nothrow(n); }
- void* operator new[](std::size_t n, const std::nothrow_t& tag) noexcept { (void)(tag); return mi_new_nothrow(n); }
+ mi_decl_new(n) void* operator new(std::size_t n) noexcept(false) { return mi_new(n); }
+ mi_decl_new(n) void* operator new[](std::size_t n) noexcept(false) { return mi_new(n); }
+
+ mi_decl_new_nothrow(n) void* operator new (std::size_t n, const std::nothrow_t& tag) noexcept { (void)(tag); return mi_new_nothrow(n); }
+ mi_decl_new_nothrow(n) void* operator new[](std::size_t n, const std::nothrow_t& tag) noexcept { (void)(tag); return mi_new_nothrow(n); }
#if (__cplusplus >= 201402L || _MSC_VER >= 1916)
void operator delete (void* p, std::size_t n) noexcept { mi_free_size(p,n); };
@@ -41,9 +53,11 @@ terms of the MIT license. A copy of the license can be found in the file
void operator delete[](void* p, std::align_val_t al) noexcept { mi_free_aligned(p, static_cast(al)); }
void operator delete (void* p, std::size_t n, std::align_val_t al) noexcept { mi_free_size_aligned(p, n, static_cast(al)); };
void operator delete[](void* p, std::size_t n, std::align_val_t al) noexcept { mi_free_size_aligned(p, n, static_cast(al)); };
+ void operator delete (void* p, std::align_val_t al, const std::nothrow_t&) noexcept { mi_free_aligned(p, static_cast(al)); }
+ void operator delete[](void* p, std::align_val_t al, const std::nothrow_t&) noexcept { mi_free_aligned(p, static_cast(al)); }
- void* operator new( std::size_t n, std::align_val_t al) noexcept(false) { return mi_new_aligned(n, static_cast(al)); }
- void* operator new[]( std::size_t n, std::align_val_t al) noexcept(false) { return mi_new_aligned(n, static_cast(al)); }
+ void* operator new (std::size_t n, std::align_val_t al) noexcept(false) { return mi_new_aligned(n, static_cast(al)); }
+ void* operator new[](std::size_t n, std::align_val_t al) noexcept(false) { return mi_new_aligned(n, static_cast(al)); }
void* operator new (std::size_t n, std::align_val_t al, const std::nothrow_t&) noexcept { return mi_new_aligned_nothrow(n, static_cast(al)); }
void* operator new[](std::size_t n, std::align_val_t al, const std::nothrow_t&) noexcept { return mi_new_aligned_nothrow(n, static_cast(al)); }
#endif
diff --git a/include/mimalloc-override.h b/include/mimalloc-override.h
index 201fb8b..c63b0b9 100644
--- a/include/mimalloc-override.h
+++ b/include/mimalloc-override.h
@@ -1,5 +1,5 @@
/* ----------------------------------------------------------------------------
-Copyright (c) 2018,2019 Microsoft Research, Daan Leijen
+Copyright (c) 2018-2020 Microsoft Research, Daan Leijen
This is free software; you can redistribute it and/or modify it under the
terms of the MIT license. A copy of the license can be found in the file
"LICENSE" at the root of this distribution.
@@ -24,7 +24,7 @@ not accidentally mix pointers from different allocators).
#define free(p) mi_free(p)
#define strdup(s) mi_strdup(s)
-#define strndup(s) mi_strndup(s)
+#define strndup(s,n) mi_strndup(s,n)
#define realpath(f,n) mi_realpath(f,n)
// Microsoft extensions
@@ -33,7 +33,7 @@ not accidentally mix pointers from different allocators).
#define _recalloc(p,n,c) mi_recalloc(p,n,c)
#define _strdup(s) mi_strdup(s)
-#define _strndup(s) mi_strndup(s)
+#define _strndup(s,n) mi_strndup(s,n)
#define _wcsdup(s) (wchar_t*)mi_wcsdup((const unsigned short*)(s))
#define _mbsdup(s) mi_mbsdup(s)
#define _dupenv_s(b,n,v) mi_dupenv_s(b,n,v)
@@ -48,6 +48,7 @@ not accidentally mix pointers from different allocators).
#define valloc(n) mi_valloc(n)
#define pvalloc(n) mi_pvalloc(n)
#define reallocarray(p,s,n) mi_reallocarray(p,s,n)
+#define reallocarr(p,s,n) mi_reallocarr(p,s,n)
#define memalign(a,n) mi_memalign(a,n)
#define aligned_alloc(a,n) mi_aligned_alloc(a,n)
#define posix_memalign(p,a,n) mi_posix_memalign(p,a,n)
diff --git a/include/mimalloc.h b/include/mimalloc.h
index 2574200..f77c2ea 100644
--- a/include/mimalloc.h
+++ b/include/mimalloc.h
@@ -1,5 +1,5 @@
/* ----------------------------------------------------------------------------
-Copyright (c) 2018-2020, Microsoft Research, Daan Leijen
+Copyright (c) 2018-2023, Microsoft Research, Daan Leijen
This is free software; you can redistribute it and/or modify it under the
terms of the MIT license. A copy of the license can be found in the file
"LICENSE" at the root of this distribution.
@@ -8,7 +8,7 @@ terms of the MIT license. A copy of the license can be found in the file
#ifndef MIMALLOC_H
#define MIMALLOC_H
-#define MI_MALLOC_VERSION 167 // major + 2 digits minor
+#define MI_MALLOC_VERSION 212 // major + 2 digits minor
// ------------------------------------------------------
// Compiler specific attributes
@@ -26,8 +26,10 @@ terms of the MIT license. A copy of the license can be found in the file
#if defined(__cplusplus) && (__cplusplus >= 201703)
#define mi_decl_nodiscard [[nodiscard]]
-#elif (__GNUC__ >= 4) || defined(__clang__) // includes clang, icc, and clang-cl
+#elif (defined(__GNUC__) && (__GNUC__ >= 4)) || defined(__clang__) // includes clang, icc, and clang-cl
#define mi_decl_nodiscard __attribute__((warn_unused_result))
+#elif defined(_HAS_NODISCARD)
+ #define mi_decl_nodiscard _NODISCARD
#elif (_MSC_VER >= 1700)
#define mi_decl_nodiscard _Check_return_
#else
@@ -58,14 +60,12 @@ terms of the MIT license. A copy of the license can be found in the file
#define mi_attr_alloc_size2(s1,s2)
#define mi_attr_alloc_align(p)
#elif defined(__GNUC__) // includes clang and icc
- #define mi_cdecl // leads to warnings... __attribute__((cdecl))
- #if !defined(MI_SHARED_LIB)
- #define mi_decl_export
- #elif defined(MI_SHARED_LIB_EXPORT)
+ #if defined(MI_SHARED_LIB) && defined(MI_SHARED_LIB_EXPORT)
#define mi_decl_export __attribute__((visibility("default")))
#else
- #define mi_decl_export
+ #define mi_decl_export
#endif
+ #define mi_cdecl // leads to warnings... __attribute__((cdecl))
#define mi_decl_restrict
#define mi_attr_malloc __attribute__((malloc))
#if (defined(__clang_major__) && (__clang_major__ < 4)) || (__GNUC__ < 5)
@@ -97,6 +97,7 @@ terms of the MIT license. A copy of the license can be found in the file
#include // size_t
#include // bool
+#include // INTPTR_MAX
#ifdef __cplusplus
extern "C" {
@@ -159,6 +160,10 @@ mi_decl_export void mi_thread_init(void) mi_attr_noexcept;
mi_decl_export void mi_thread_done(void) mi_attr_noexcept;
mi_decl_export void mi_thread_stats_print_out(mi_output_fun* out, void* arg) mi_attr_noexcept;
+mi_decl_export void mi_process_info(size_t* elapsed_msecs, size_t* user_msecs, size_t* system_msecs,
+ size_t* current_rss, size_t* peak_rss,
+ size_t* current_commit, size_t* peak_commit, size_t* page_faults) mi_attr_noexcept;
+
// -------------------------------------------------------------------------------------
// Aligned allocation
// Note that `alignment` always follows `size` for consistency with unaligned
@@ -251,8 +256,9 @@ typedef struct mi_heap_area_s {
void* blocks; // start of the area containing heap blocks
size_t reserved; // bytes reserved for this area (virtual)
size_t committed; // current available bytes for this area
- size_t used; // bytes in use by allocated blocks
+ size_t used; // number of allocated blocks
size_t block_size; // size in bytes of each block
+ size_t full_block_size; // size in bytes of a full block including padding and metadata.
} mi_heap_area_t;
typedef bool (mi_cdecl mi_block_visit_fun)(const mi_heap_t* heap, const mi_heap_area_t* area, void* block, size_t block_size, void* arg);
@@ -266,6 +272,23 @@ mi_decl_nodiscard mi_decl_export bool mi_is_redirected(void) mi_attr_noexcept;
mi_decl_export int mi_reserve_huge_os_pages_interleave(size_t pages, size_t numa_nodes, size_t timeout_msecs) mi_attr_noexcept;
mi_decl_export int mi_reserve_huge_os_pages_at(size_t pages, int numa_node, size_t timeout_msecs) mi_attr_noexcept;
+mi_decl_export int mi_reserve_os_memory(size_t size, bool commit, bool allow_large) mi_attr_noexcept;
+mi_decl_export bool mi_manage_os_memory(void* start, size_t size, bool is_committed, bool is_large, bool is_zero, int numa_node) mi_attr_noexcept;
+
+mi_decl_export void mi_debug_show_arenas(void) mi_attr_noexcept;
+
+// Experimental: heaps associated with specific memory arena's
+typedef int mi_arena_id_t;
+mi_decl_export void* mi_arena_area(mi_arena_id_t arena_id, size_t* size);
+mi_decl_export int mi_reserve_huge_os_pages_at_ex(size_t pages, int numa_node, size_t timeout_msecs, bool exclusive, mi_arena_id_t* arena_id) mi_attr_noexcept;
+mi_decl_export int mi_reserve_os_memory_ex(size_t size, bool commit, bool allow_large, bool exclusive, mi_arena_id_t* arena_id) mi_attr_noexcept;
+mi_decl_export bool mi_manage_os_memory_ex(void* start, size_t size, bool is_committed, bool is_large, bool is_zero, int numa_node, bool exclusive, mi_arena_id_t* arena_id) mi_attr_noexcept;
+
+#if MI_MALLOC_VERSION >= 182
+// Create a heap that only allocates in the specified arena
+mi_decl_nodiscard mi_decl_export mi_heap_t* mi_heap_new_in_arena(mi_arena_id_t arena_id);
+#endif
+
// deprecated
mi_decl_export int mi_reserve_huge_os_pages(size_t pages, double max_secs, size_t* pages_reserved) mi_attr_noexcept;
@@ -290,30 +313,45 @@ mi_decl_export int mi_reserve_huge_os_pages(size_t pages, double max_secs, size
// ------------------------------------------------------
-// Options, all `false` by default
+// Options
// ------------------------------------------------------
typedef enum mi_option_e {
// stable options
- mi_option_show_errors,
- mi_option_show_stats,
- mi_option_verbose,
- // the following options are experimental
- mi_option_eager_commit,
- mi_option_eager_region_commit,
- mi_option_reset_decommits,
- mi_option_large_os_pages, // implies eager commit
- mi_option_reserve_huge_os_pages,
- mi_option_segment_cache,
- mi_option_page_reset,
- mi_option_abandoned_page_reset,
- mi_option_segment_reset,
- mi_option_eager_commit_delay,
- mi_option_reset_delay,
- mi_option_use_numa_nodes,
- mi_option_os_tag,
- mi_option_max_errors,
- _mi_option_last
+ mi_option_show_errors, // print error messages
+ mi_option_show_stats, // print statistics on termination
+ mi_option_verbose, // print verbose messages
+ // the following options are experimental (see src/options.h)
+ mi_option_eager_commit, // eager commit segments? (after `eager_commit_delay` segments) (=1)
+ mi_option_arena_eager_commit, // eager commit arenas? Use 2 to enable just on overcommit systems (=2)
+ mi_option_purge_decommits, // should a memory purge decommit (or only reset) (=1)
+ mi_option_allow_large_os_pages, // allow large (2MiB) OS pages, implies eager commit
+ mi_option_reserve_huge_os_pages, // reserve N huge OS pages (1GiB/page) at startup
+ mi_option_reserve_huge_os_pages_at, // reserve huge OS pages at a specific NUMA node
+ mi_option_reserve_os_memory, // reserve specified amount of OS memory in an arena at startup
+ mi_option_deprecated_segment_cache,
+ mi_option_deprecated_page_reset,
+ mi_option_abandoned_page_purge, // immediately purge delayed purges on thread termination
+ mi_option_deprecated_segment_reset,
+ mi_option_eager_commit_delay,
+ mi_option_purge_delay, // memory purging is delayed by N milli seconds; use 0 for immediate purging or -1 for no purging at all.
+ mi_option_use_numa_nodes, // 0 = use all available numa nodes, otherwise use at most N nodes.
+ mi_option_limit_os_alloc, // 1 = do not use OS memory for allocation (but only programmatically reserved arenas)
+ mi_option_os_tag, // tag used for OS logging (macOS only for now)
+ mi_option_max_errors, // issue at most N error messages
+ mi_option_max_warnings, // issue at most N warning messages
+ mi_option_max_segment_reclaim,
+ mi_option_destroy_on_exit, // if set, release all memory on exit; sometimes used for dynamic unloading but can be unsafe.
+ mi_option_arena_reserve, // initial memory size in KiB for arena reservation (1GiB on 64-bit)
+ mi_option_arena_purge_mult,
+ mi_option_purge_extend_delay,
+ _mi_option_last,
+ // legacy option names
+ mi_option_large_os_pages = mi_option_allow_large_os_pages,
+ mi_option_eager_region_commit = mi_option_arena_eager_commit,
+ mi_option_reset_decommits = mi_option_purge_decommits,
+ mi_option_reset_delay = mi_option_purge_delay,
+ mi_option_abandoned_page_reset = mi_option_abandoned_page_purge
} mi_option_t;
@@ -323,7 +361,9 @@ mi_decl_export void mi_option_disable(mi_option_t option);
mi_decl_export void mi_option_set_enabled(mi_option_t option, bool enable);
mi_decl_export void mi_option_set_enabled_default(mi_option_t option, bool enable);
-mi_decl_nodiscard mi_decl_export long mi_option_get(mi_option_t option);
+mi_decl_nodiscard mi_decl_export long mi_option_get(mi_option_t option);
+mi_decl_nodiscard mi_decl_export long mi_option_get_clamp(mi_option_t option, long min, long max);
+mi_decl_nodiscard mi_decl_export size_t mi_option_get_size(mi_option_t option);
mi_decl_export void mi_option_set(mi_option_t option, long value);
mi_decl_export void mi_option_set_default(mi_option_t option, long value);
@@ -337,6 +377,7 @@ mi_decl_export void mi_option_set_default(mi_option_t option, long value);
mi_decl_export void mi_cfree(void* p) mi_attr_noexcept;
mi_decl_export void* mi__expand(void* p, size_t newsize) mi_attr_noexcept;
mi_decl_nodiscard mi_decl_export size_t mi_malloc_size(const void* p) mi_attr_noexcept;
+mi_decl_nodiscard mi_decl_export size_t mi_malloc_good_size(size_t size) mi_attr_noexcept;
mi_decl_nodiscard mi_decl_export size_t mi_malloc_usable_size(const void *p) mi_attr_noexcept;
mi_decl_export int mi_posix_memalign(void** p, size_t alignment, size_t size) mi_attr_noexcept;
@@ -346,6 +387,7 @@ mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_pvalloc(size_t size)
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_aligned_alloc(size_t alignment, size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(2) mi_attr_alloc_align(1);
mi_decl_nodiscard mi_decl_export void* mi_reallocarray(void* p, size_t count, size_t size) mi_attr_noexcept mi_attr_alloc_size2(2,3);
+mi_decl_nodiscard mi_decl_export int mi_reallocarr(void* p, size_t count, size_t size) mi_attr_noexcept;
mi_decl_nodiscard mi_decl_export void* mi_aligned_recalloc(void* p, size_t newcount, size_t size, size_t alignment) mi_attr_noexcept;
mi_decl_nodiscard mi_decl_export void* mi_aligned_offset_recalloc(void* p, size_t newcount, size_t size, size_t alignment, size_t offset) mi_attr_noexcept;
@@ -368,6 +410,9 @@ mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_new_n(size_t count, s
mi_decl_nodiscard mi_decl_export void* mi_new_realloc(void* p, size_t newsize) mi_attr_alloc_size(2);
mi_decl_nodiscard mi_decl_export void* mi_new_reallocn(void* p, size_t newcount, size_t size) mi_attr_alloc_size2(2, 3);
+mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_heap_alloc_new(mi_heap_t* heap, size_t size) mi_attr_malloc mi_attr_alloc_size(2);
+mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_heap_alloc_new_n(mi_heap_t* heap, size_t count, size_t size) mi_attr_malloc mi_attr_alloc_size2(2, 3);
+
#ifdef __cplusplus
}
#endif
@@ -378,13 +423,14 @@ mi_decl_nodiscard mi_decl_export void* mi_new_reallocn(void* p, size_t newcount,
// ---------------------------------------------------------------------------------------------
#ifdef __cplusplus
+#include // std::size_t
#include // PTRDIFF_MAX
#if (__cplusplus >= 201103L) || (_MSC_VER > 1900) // C++11
#include // std::true_type
#include // std::forward
#endif
-template struct mi_stl_allocator {
+template struct _mi_stl_allocator_common {
typedef T value_type;
typedef std::size_t size_type;
typedef std::ptrdiff_t difference_type;
@@ -392,6 +438,27 @@ template struct mi_stl_allocator {
typedef value_type const& const_reference;
typedef value_type* pointer;
typedef value_type const* const_pointer;
+
+ #if ((__cplusplus >= 201103L) || (_MSC_VER > 1900)) // C++11
+ using propagate_on_container_copy_assignment = std::true_type;
+ using propagate_on_container_move_assignment = std::true_type;
+ using propagate_on_container_swap = std::true_type;
+ template void construct(U* p, Args&& ...args) { ::new(p) U(std::forward(args)...); }
+ template void destroy(U* p) mi_attr_noexcept { p->~U(); }
+ #else
+ void construct(pointer p, value_type const& val) { ::new(p) value_type(val); }
+ void destroy(pointer p) { p->~value_type(); }
+ #endif
+
+ size_type max_size() const mi_attr_noexcept { return (PTRDIFF_MAX/sizeof(value_type)); }
+ pointer address(reference x) const { return &x; }
+ const_pointer address(const_reference x) const { return &x; }
+};
+
+template struct mi_stl_allocator : public _mi_stl_allocator_common {
+ using typename _mi_stl_allocator_common::size_type;
+ using typename _mi_stl_allocator_common::value_type;
+ using typename _mi_stl_allocator_common::pointer;
template struct rebind { typedef mi_stl_allocator other; };
mi_stl_allocator() mi_attr_noexcept = default;
@@ -408,24 +475,91 @@ template struct mi_stl_allocator {
#endif
#if ((__cplusplus >= 201103L) || (_MSC_VER > 1900)) // C++11
- using propagate_on_container_copy_assignment = std::true_type;
- using propagate_on_container_move_assignment = std::true_type;
- using propagate_on_container_swap = std::true_type;
- using is_always_equal = std::true_type;
- template void construct(U* p, Args&& ...args) { ::new(p) U(std::forward(args)...); }
- template void destroy(U* p) mi_attr_noexcept { p->~U(); }
- #else
- void construct(pointer p, value_type const& val) { ::new(p) value_type(val); }
- void destroy(pointer p) { p->~value_type(); }
+ using is_always_equal = std::true_type;
#endif
-
- size_type max_size() const mi_attr_noexcept { return (PTRDIFF_MAX/sizeof(value_type)); }
- pointer address(reference x) const { return &x; }
- const_pointer address(const_reference x) const { return &x; }
};
template bool operator==(const mi_stl_allocator& , const mi_stl_allocator& ) mi_attr_noexcept { return true; }
template bool operator!=(const mi_stl_allocator& , const mi_stl_allocator& ) mi_attr_noexcept { return false; }
+
+
+#if (__cplusplus >= 201103L) || (_MSC_VER >= 1900) // C++11
+#define MI_HAS_HEAP_STL_ALLOCATOR 1
+
+#include // std::shared_ptr
+
+// Common base class for STL allocators in a specific heap
+template struct _mi_heap_stl_allocator_common : public _mi_stl_allocator_common {
+ using typename _mi_stl_allocator_common::size_type;
+ using typename _mi_stl_allocator_common::value_type;
+ using typename _mi_stl_allocator_common::pointer;
+
+ _mi_heap_stl_allocator_common(mi_heap_t* hp) : heap(hp) { } /* will not delete nor destroy the passed in heap */
+
+ #if (__cplusplus >= 201703L) // C++17
+ mi_decl_nodiscard T* allocate(size_type count) { return static_cast(mi_heap_alloc_new_n(this->heap.get(), count, sizeof(T))); }
+ mi_decl_nodiscard T* allocate(size_type count, const void*) { return allocate(count); }
+ #else
+ mi_decl_nodiscard pointer allocate(size_type count, const void* = 0) { return static_cast(mi_heap_alloc_new_n(this->heap.get(), count, sizeof(value_type))); }
+ #endif
+
+ #if ((__cplusplus >= 201103L) || (_MSC_VER > 1900)) // C++11
+ using is_always_equal = std::false_type;
+ #endif
+
+ void collect(bool force) { mi_heap_collect(this->heap.get(), force); }
+ template bool is_equal(const _mi_heap_stl_allocator_common& x) const { return (this->heap == x.heap); }
+
+protected:
+ std::shared_ptr heap;
+ template friend struct _mi_heap_stl_allocator_common;
+
+ _mi_heap_stl_allocator_common() {
+ mi_heap_t* hp = mi_heap_new();
+ this->heap.reset(hp, (_mi_destroy ? &heap_destroy : &heap_delete)); /* calls heap_delete/destroy when the refcount drops to zero */
+ }
+ _mi_heap_stl_allocator_common(const _mi_heap_stl_allocator_common& x) mi_attr_noexcept : heap(x.heap) { }
+ template _mi_heap_stl_allocator_common(const _mi_heap_stl_allocator_common& x) mi_attr_noexcept : heap(x.heap) { }
+
+private:
+ static void heap_delete(mi_heap_t* hp) { if (hp != NULL) { mi_heap_delete(hp); } }
+ static void heap_destroy(mi_heap_t* hp) { if (hp != NULL) { mi_heap_destroy(hp); } }
+};
+
+// STL allocator allocation in a specific heap
+template struct mi_heap_stl_allocator : public _mi_heap_stl_allocator_common {
+ using typename _mi_heap_stl_allocator_common::size_type;
+ mi_heap_stl_allocator() : _mi_heap_stl_allocator_common() { } // creates fresh heap that is deleted when the destructor is called
+ mi_heap_stl_allocator(mi_heap_t* hp) : _mi_heap_stl_allocator_common(hp) { } // no delete nor destroy on the passed in heap
+ template mi_heap_stl_allocator(const mi_heap_stl_allocator& x) mi_attr_noexcept : _mi_heap_stl_allocator_common(x) { }
+
+ mi_heap_stl_allocator select_on_container_copy_construction() const { return *this; }
+ void deallocate(T* p, size_type) { mi_free(p); }
+ template struct rebind { typedef mi_heap_stl_allocator other; };
+};
+
+template bool operator==(const mi_heap_stl_allocator& x, const mi_heap_stl_allocator& y) mi_attr_noexcept { return (x.is_equal(y)); }
+template bool operator!=(const mi_heap_stl_allocator& x, const mi_heap_stl_allocator& y) mi_attr_noexcept { return (!x.is_equal(y)); }
+
+
+// STL allocator allocation in a specific heap, where `free` does nothing and
+// the heap is destroyed in one go on destruction -- use with care!
+template struct mi_heap_destroy_stl_allocator : public _mi_heap_stl_allocator_common {
+ using typename _mi_heap_stl_allocator_common::size_type;
+ mi_heap_destroy_stl_allocator() : _mi_heap_stl_allocator_common() { } // creates fresh heap that is destroyed when the destructor is called
+ mi_heap_destroy_stl_allocator(mi_heap_t* hp) : _mi_heap_stl_allocator_common(hp) { } // no delete nor destroy on the passed in heap
+ template mi_heap_destroy_stl_allocator(const mi_heap_destroy_stl_allocator& x) mi_attr_noexcept : _mi_heap_stl_allocator_common(x) { }
+
+ mi_heap_destroy_stl_allocator select_on_container_copy_construction() const { return *this; }
+ void deallocate(T*, size_type) { /* do nothing as we destroy the heap on destruct. */ }
+ template struct rebind { typedef mi_heap_destroy_stl_allocator other; };
+};
+
+template bool operator==(const mi_heap_destroy_stl_allocator& x, const mi_heap_destroy_stl_allocator& y) mi_attr_noexcept { return (x.is_equal(y)); }
+template bool operator!=(const mi_heap_destroy_stl_allocator& x, const mi_heap_destroy_stl_allocator& y) mi_attr_noexcept { return (!x.is_equal(y)); }
+
+#endif // C++11
+
#endif // __cplusplus
#endif
diff --git a/include/mimalloc-atomic.h b/include/mimalloc/atomic.h
similarity index 79%
rename from include/mimalloc-atomic.h
rename to include/mimalloc/atomic.h
index b650607..fe418fa 100644
--- a/include/mimalloc-atomic.h
+++ b/include/mimalloc/atomic.h
@@ -1,5 +1,5 @@
/* ----------------------------------------------------------------------------
-Copyright (c) 2018,2020 Microsoft Research, Daan Leijen
+Copyright (c) 2018-2023 Microsoft Research, Daan Leijen
This is free software; you can redistribute it and/or modify it under the
terms of the MIT license. A copy of the license can be found in the file
"LICENSE" at the root of this distribution.
@@ -11,9 +11,9 @@ terms of the MIT license. A copy of the license can be found in the file
// --------------------------------------------------------------------------------------------
// Atomics
// We need to be portable between C, C++, and MSVC.
-// We base the primitives on the C/C++ atomics and create a mimimal wrapper for MSVC in C compilation mode.
-// This is why we try to use only `uintptr_t` and `*` as atomic types.
-// To gain better insight in the range of used atomics, we use explicitly named memory order operations
+// We base the primitives on the C/C++ atomics and create a mimimal wrapper for MSVC in C compilation mode.
+// This is why we try to use only `uintptr_t` and `*` as atomic types.
+// To gain better insight in the range of used atomics, we use explicitly named memory order operations
// instead of passing the memory order as a parameter.
// -----------------------------------------------------------------------------------------------
@@ -23,10 +23,15 @@ terms of the MIT license. A copy of the license can be found in the file
#define _Atomic(tp) std::atomic
#define mi_atomic(name) std::atomic_##name
#define mi_memory_order(name) std::memory_order_##name
+#if !defined(ATOMIC_VAR_INIT) || (__cplusplus >= 202002L) // c++20, see issue #571
+ #define MI_ATOMIC_VAR_INIT(x) x
+#else
+ #define MI_ATOMIC_VAR_INIT(x) ATOMIC_VAR_INIT(x)
+#endif
#elif defined(_MSC_VER)
// Use MSVC C wrapper for C11 atomics
#define _Atomic(tp) tp
-#define ATOMIC_VAR_INIT(x) x
+#define MI_ATOMIC_VAR_INIT(x) x
#define mi_atomic(name) mi_atomic_##name
#define mi_memory_order(name) mi_memory_order_##name
#else
@@ -34,6 +39,11 @@ terms of the MIT license. A copy of the license can be found in the file
#include
#define mi_atomic(name) atomic_##name
#define mi_memory_order(name) memory_order_##name
+#if !defined(ATOMIC_VAR_INIT) || (__STDC_VERSION__ >= 201710L) // c17, see issue #735
+ #define MI_ATOMIC_VAR_INIT(x) x
+#else
+ #define MI_ATOMIC_VAR_INIT(x) ATOMIC_VAR_INIT(x)
+#endif
#endif
// Various defines for all used memory orders in mimalloc
@@ -107,18 +117,20 @@ static inline void mi_atomic_maxi64_relaxed(volatile int64_t* p, int64_t x) {
}
// Used by timers
-#define mi_atomic_loadi64_acquire(p) mi_atomic(load_explicit)(p,mi_memory_order(acquire))
-#define mi_atomic_loadi64_relaxed(p) mi_atomic(load_explicit)(p,mi_memory_order(relaxed))
-#define mi_atomic_storei64_release(p,x) mi_atomic(store_explicit)(p,x,mi_memory_order(release))
-#define mi_atomic_storei64_relaxed(p,x) mi_atomic(store_explicit)(p,x,mi_memory_order(relaxed))
+#define mi_atomic_loadi64_acquire(p) mi_atomic(load_explicit)(p,mi_memory_order(acquire))
+#define mi_atomic_loadi64_relaxed(p) mi_atomic(load_explicit)(p,mi_memory_order(relaxed))
+#define mi_atomic_storei64_release(p,x) mi_atomic(store_explicit)(p,x,mi_memory_order(release))
+#define mi_atomic_storei64_relaxed(p,x) mi_atomic(store_explicit)(p,x,mi_memory_order(relaxed))
+#define mi_atomic_casi64_strong_acq_rel(p,e,d) mi_atomic_cas_strong_acq_rel(p,e,d)
+#define mi_atomic_addi64_acq_rel(p,i) mi_atomic_add_acq_rel(p,i)
#elif defined(_MSC_VER)
// MSVC C compilation wrapper that uses Interlocked operations to model C11 atomics.
#define WIN32_LEAN_AND_MEAN
-#include
+#include
#include
#ifdef _WIN64
typedef LONG64 msc_intptr_t;
@@ -173,7 +185,7 @@ static inline uintptr_t mi_atomic_exchange_explicit(_Atomic(uintptr_t)*p, uintpt
}
static inline void mi_atomic_thread_fence(mi_memory_order mo) {
(void)(mo);
- _Atomic(uintptr_t)x = 0;
+ _Atomic(uintptr_t) x = 0;
mi_atomic_exchange_explicit(&x, 1, mo);
}
static inline uintptr_t mi_atomic_load_explicit(_Atomic(uintptr_t) const* p, mi_memory_order mo) {
@@ -239,6 +251,21 @@ static inline void mi_atomic_maxi64_relaxed(volatile _Atomic(int64_t)*p, int64_t
} while (current < x && _InterlockedCompareExchange64(p, x, current) != current);
}
+static inline void mi_atomic_addi64_acq_rel(volatile _Atomic(int64_t*)p, int64_t i) {
+ mi_atomic_addi64_relaxed(p, i);
+}
+
+static inline bool mi_atomic_casi64_strong_acq_rel(volatile _Atomic(int64_t*)p, int64_t* exp, int64_t des) {
+ int64_t read = _InterlockedCompareExchange64(p, des, *exp);
+ if (read == *exp) {
+ return true;
+ }
+ else {
+ *exp = read;
+ return false;
+ }
+}
+
// The pointer macros cast to `uintptr_t`.
#define mi_atomic_load_ptr_acquire(tp,p) (tp*)mi_atomic_load_acquire((_Atomic(uintptr_t)*)(p))
#define mi_atomic_load_ptr_relaxed(tp,p) (tp*)mi_atomic_load_relaxed((_Atomic(uintptr_t)*)(p))
@@ -269,7 +296,27 @@ static inline intptr_t mi_atomic_subi(_Atomic(intptr_t)*p, intptr_t sub) {
return (intptr_t)mi_atomic_addi(p, -sub);
}
-// Yield
+typedef _Atomic(uintptr_t) mi_atomic_once_t;
+
+// Returns true only on the first invocation
+static inline bool mi_atomic_once( mi_atomic_once_t* once ) {
+ if (mi_atomic_load_relaxed(once) != 0) return false; // quick test
+ uintptr_t expected = 0;
+ return mi_atomic_cas_strong_acq_rel(once, &expected, (uintptr_t)1); // try to set to 1
+}
+
+typedef _Atomic(uintptr_t) mi_atomic_guard_t;
+
+// Allows only one thread to execute at a time
+#define mi_atomic_guard(guard) \
+ uintptr_t _mi_guard_expected = 0; \
+ for(bool _mi_guard_once = true; \
+ _mi_guard_once && mi_atomic_cas_strong_acq_rel(guard,&_mi_guard_expected,(uintptr_t)1); \
+ (mi_atomic_store_release(guard,(uintptr_t)0), _mi_guard_once = false) )
+
+
+
+// Yield
#if defined(__cplusplus)
#include
static inline void mi_atomic_yield(void) {
@@ -281,17 +328,47 @@ static inline void mi_atomic_yield(void) {
static inline void mi_atomic_yield(void) {
YieldProcessor();
}
+#elif defined(__SSE2__)
+#include
+static inline void mi_atomic_yield(void) {
+ _mm_pause();
+}
#elif (defined(__GNUC__) || defined(__clang__)) && \
- (defined(__x86_64__) || defined(__i386__) || defined(__arm__) || defined(__aarch64__))
+ (defined(__x86_64__) || defined(__i386__) || defined(__arm__) || defined(__armel__) || defined(__ARMEL__) || \
+ defined(__aarch64__) || defined(__powerpc__) || defined(__ppc__) || defined(__PPC__)) || defined(__POWERPC__)
#if defined(__x86_64__) || defined(__i386__)
static inline void mi_atomic_yield(void) {
__asm__ volatile ("pause" ::: "memory");
}
-#elif defined(__arm__) || defined(__aarch64__)
+#elif defined(__aarch64__)
static inline void mi_atomic_yield(void) {
- __asm__ volatile("yield");
+ __asm__ volatile("wfe");
+}
+#elif (defined(__arm__) && __ARM_ARCH__ >= 7)
+static inline void mi_atomic_yield(void) {
+ __asm__ volatile("yield" ::: "memory");
+}
+#elif defined(__powerpc__) || defined(__ppc__) || defined(__PPC__) || defined(__POWERPC__)
+#ifdef __APPLE__
+static inline void mi_atomic_yield(void) {
+ __asm__ volatile ("or r27,r27,r27" ::: "memory");
+}
+#else
+static inline void mi_atomic_yield(void) {
+ __asm__ __volatile__ ("or 27,27,27" ::: "memory");
}
#endif
+#elif defined(__armel__) || defined(__ARMEL__)
+static inline void mi_atomic_yield(void) {
+ __asm__ volatile ("nop" ::: "memory");
+}
+#endif
+#elif defined(__sun)
+// Fallback for other archs
+#include
+static inline void mi_atomic_yield(void) {
+ smt_pause();
+}
#elif defined(__wasi__)
#include
static inline void mi_atomic_yield(void) {
@@ -305,4 +382,4 @@ static inline void mi_atomic_yield(void) {
#endif
-#endif // __MIMALLOC_ATOMIC_H
\ No newline at end of file
+#endif // __MIMALLOC_ATOMIC_H
diff --git a/include/mimalloc-internal.h b/include/mimalloc/internal.h
similarity index 50%
rename from include/mimalloc-internal.h
rename to include/mimalloc/internal.h
index d57f65c..00d2626 100644
--- a/include/mimalloc-internal.h
+++ b/include/mimalloc/internal.h
@@ -1,5 +1,5 @@
/* ----------------------------------------------------------------------------
-Copyright (c) 2018, Microsoft Research, Daan Leijen
+Copyright (c) 2018-2023, Microsoft Research, Daan Leijen
This is free software; you can redistribute it and/or modify it under the
terms of the MIT license. A copy of the license can be found in the file
"LICENSE" at the root of this distribution.
@@ -8,7 +8,14 @@ terms of the MIT license. A copy of the license can be found in the file
#ifndef MIMALLOC_INTERNAL_H
#define MIMALLOC_INTERNAL_H
-#include "mimalloc-types.h"
+
+// --------------------------------------------------------------------------
+// This file contains the interal API's of mimalloc and various utility
+// functions and macros.
+// --------------------------------------------------------------------------
+
+#include "mimalloc/types.h"
+#include "mimalloc/track.h"
#if (MI_DEBUG>0)
#define mi_trace_message(...) _mi_trace_message(__VA_ARGS__)
@@ -19,10 +26,11 @@ terms of the MIT license. A copy of the license can be found in the file
#define MI_CACHE_LINE 64
#if defined(_MSC_VER)
#pragma warning(disable:4127) // suppress constant conditional warning (due to MI_SECURE paths)
+#pragma warning(disable:26812) // unscoped enum warning
#define mi_decl_noinline __declspec(noinline)
#define mi_decl_thread __declspec(thread)
#define mi_decl_cache_align __declspec(align(MI_CACHE_LINE))
-#elif (defined(__GNUC__) && (__GNUC__>=3)) // includes clang and icc
+#elif (defined(__GNUC__) && (__GNUC__ >= 3)) || defined(__clang__) // includes clang and icc
#define mi_decl_noinline __attribute__((noinline))
#define mi_decl_thread __thread
#define mi_decl_cache_align __attribute__((aligned(MI_CACHE_LINE)))
@@ -32,6 +40,21 @@ terms of the MIT license. A copy of the license can be found in the file
#define mi_decl_cache_align
#endif
+#if defined(__EMSCRIPTEN__) && !defined(__wasi__)
+#define __wasi__
+#endif
+
+#if defined(__cplusplus)
+#define mi_decl_externc extern "C"
+#else
+#define mi_decl_externc
+#endif
+
+// pthreads
+#if !defined(_WIN32) && !defined(__wasi__)
+#define MI_USE_PTHREADS
+#include
+#endif
// "options.c"
void _mi_fputs(mi_output_fun* out, void* arg, const char* prefix, const char* message);
@@ -44,61 +67,99 @@ void _mi_error_message(int err, const char* fmt, ...);
// random.c
void _mi_random_init(mi_random_ctx_t* ctx);
+void _mi_random_init_weak(mi_random_ctx_t* ctx);
+void _mi_random_reinit_if_weak(mi_random_ctx_t * ctx);
void _mi_random_split(mi_random_ctx_t* ctx, mi_random_ctx_t* new_ctx);
uintptr_t _mi_random_next(mi_random_ctx_t* ctx);
uintptr_t _mi_heap_random_next(mi_heap_t* heap);
-uintptr_t _os_random_weak(uintptr_t extra_seed);
+uintptr_t _mi_os_random_weak(uintptr_t extra_seed);
static inline uintptr_t _mi_random_shuffle(uintptr_t x);
// init.c
-extern mi_stats_t _mi_stats_main;
-extern const mi_page_t _mi_page_empty;
+extern mi_decl_cache_align mi_stats_t _mi_stats_main;
+extern mi_decl_cache_align const mi_page_t _mi_page_empty;
bool _mi_is_main_thread(void);
-bool _mi_preloading(); // true while the C runtime is not ready
+size_t _mi_current_thread_count(void);
+bool _mi_preloading(void); // true while the C runtime is not initialized yet
+mi_threadid_t _mi_thread_id(void) mi_attr_noexcept;
+mi_heap_t* _mi_heap_main_get(void); // statically allocated main backing heap
+void _mi_thread_done(mi_heap_t* heap);
+void _mi_thread_data_collect(void);
// os.c
+void _mi_os_init(void); // called from process init
+void* _mi_os_alloc(size_t size, mi_memid_t* memid, mi_stats_t* stats);
+void _mi_os_free(void* p, size_t size, mi_memid_t memid, mi_stats_t* stats);
+void _mi_os_free_ex(void* p, size_t size, bool still_committed, mi_memid_t memid, mi_stats_t* stats);
+
size_t _mi_os_page_size(void);
-void _mi_os_init(void); // called from process init
-void* _mi_os_alloc(size_t size, mi_stats_t* stats); // to allocate thread local data
-void _mi_os_free(void* p, size_t size, mi_stats_t* stats); // to free thread local data
size_t _mi_os_good_alloc_size(size_t size);
+bool _mi_os_has_overcommit(void);
+bool _mi_os_has_virtual_reserve(void);
-// memory.c
-void* _mi_mem_alloc_aligned(size_t size, size_t alignment, bool* commit, bool* large, bool* is_zero, size_t* id, mi_os_tld_t* tld);
-void _mi_mem_free(void* p, size_t size, size_t id, bool fully_committed, bool any_reset, mi_os_tld_t* tld);
+bool _mi_os_purge(void* p, size_t size, mi_stats_t* stats);
+bool _mi_os_reset(void* addr, size_t size, mi_stats_t* tld_stats);
+bool _mi_os_commit(void* p, size_t size, bool* is_zero, mi_stats_t* stats);
+bool _mi_os_decommit(void* addr, size_t size, mi_stats_t* stats);
+bool _mi_os_protect(void* addr, size_t size);
+bool _mi_os_unprotect(void* addr, size_t size);
+bool _mi_os_purge(void* p, size_t size, mi_stats_t* stats);
+bool _mi_os_purge_ex(void* p, size_t size, bool allow_reset, mi_stats_t* stats);
-bool _mi_mem_reset(void* p, size_t size, mi_os_tld_t* tld);
-bool _mi_mem_unreset(void* p, size_t size, bool* is_zero, mi_os_tld_t* tld);
-bool _mi_mem_commit(void* p, size_t size, bool* is_zero, mi_os_tld_t* tld);
-bool _mi_mem_protect(void* addr, size_t size);
-bool _mi_mem_unprotect(void* addr, size_t size);
+void* _mi_os_alloc_aligned(size_t size, size_t alignment, bool commit, bool allow_large, mi_memid_t* memid, mi_stats_t* stats);
+void* _mi_os_alloc_aligned_at_offset(size_t size, size_t alignment, size_t align_offset, bool commit, bool allow_large, mi_memid_t* memid, mi_stats_t* tld_stats);
-void _mi_mem_collect(mi_os_tld_t* tld);
+void* _mi_os_get_aligned_hint(size_t try_alignment, size_t size);
+bool _mi_os_use_large_page(size_t size, size_t alignment);
+size_t _mi_os_large_page_size(void);
+
+void* _mi_os_alloc_huge_os_pages(size_t pages, int numa_node, mi_msecs_t max_secs, size_t* pages_reserved, size_t* psize, mi_memid_t* memid);
+
+// arena.c
+mi_arena_id_t _mi_arena_id_none(void);
+void _mi_arena_free(void* p, size_t size, size_t still_committed_size, mi_memid_t memid, mi_stats_t* stats);
+void* _mi_arena_alloc(size_t size, bool commit, bool allow_large, mi_arena_id_t req_arena_id, mi_memid_t* memid, mi_os_tld_t* tld);
+void* _mi_arena_alloc_aligned(size_t size, size_t alignment, size_t align_offset, bool commit, bool allow_large, mi_arena_id_t req_arena_id, mi_memid_t* memid, mi_os_tld_t* tld);
+bool _mi_arena_memid_is_suitable(mi_memid_t memid, mi_arena_id_t request_arena_id);
+bool _mi_arena_contains(const void* p);
+void _mi_arena_collect(bool force_purge, mi_stats_t* stats);
+void _mi_arena_unsafe_destroy_all(mi_stats_t* stats);
+
+// "segment-map.c"
+void _mi_segment_map_allocated_at(const mi_segment_t* segment);
+void _mi_segment_map_freed_at(const mi_segment_t* segment);
// "segment.c"
-mi_page_t* _mi_segment_page_alloc(mi_heap_t* heap, size_t block_wsize, mi_segments_tld_t* tld, mi_os_tld_t* os_tld);
+mi_page_t* _mi_segment_page_alloc(mi_heap_t* heap, size_t block_size, size_t page_alignment, mi_segments_tld_t* tld, mi_os_tld_t* os_tld);
void _mi_segment_page_free(mi_page_t* page, bool force, mi_segments_tld_t* tld);
void _mi_segment_page_abandon(mi_page_t* page, mi_segments_tld_t* tld);
-uint8_t* _mi_segment_page_start(const mi_segment_t* segment, const mi_page_t* page, size_t block_size, size_t* page_size, size_t* pre_size); // page start for any page
-void _mi_segment_huge_page_free(mi_segment_t* segment, mi_page_t* page, mi_block_t* block);
-
+bool _mi_segment_try_reclaim_abandoned( mi_heap_t* heap, bool try_all, mi_segments_tld_t* tld);
void _mi_segment_thread_collect(mi_segments_tld_t* tld);
+
+#if MI_HUGE_PAGE_ABANDON
+void _mi_segment_huge_page_free(mi_segment_t* segment, mi_page_t* page, mi_block_t* block);
+#else
+void _mi_segment_huge_page_reset(mi_segment_t* segment, mi_page_t* page, mi_block_t* block);
+#endif
+
+uint8_t* _mi_segment_page_start(const mi_segment_t* segment, const mi_page_t* page, size_t* page_size); // page start for any page
void _mi_abandoned_reclaim_all(mi_heap_t* heap, mi_segments_tld_t* tld);
void _mi_abandoned_await_readers(void);
-
-
+void _mi_abandoned_collect(mi_heap_t* heap, bool force, mi_segments_tld_t* tld);
// "page.c"
-void* _mi_malloc_generic(mi_heap_t* heap, size_t size) mi_attr_noexcept mi_attr_malloc;
+void* _mi_malloc_generic(mi_heap_t* heap, size_t size, bool zero, size_t huge_alignment) mi_attr_noexcept mi_attr_malloc;
-void _mi_page_retire(mi_page_t* page); // free the page if there are no other pages with many free blocks
+void _mi_page_retire(mi_page_t* page) mi_attr_noexcept; // free the page if there are no other pages with many free blocks
void _mi_page_unfull(mi_page_t* page);
void _mi_page_free(mi_page_t* page, mi_page_queue_t* pq, bool force); // free the page
void _mi_page_abandon(mi_page_t* page, mi_page_queue_t* pq); // abandon the page, to be picked up by another thread...
-void _mi_heap_delayed_free(mi_heap_t* heap);
+void _mi_heap_delayed_free_all(mi_heap_t* heap);
+bool _mi_heap_delayed_free_partial(mi_heap_t* heap);
void _mi_heap_collect_retired(mi_heap_t* heap, bool force);
void _mi_page_use_delayed_free(mi_page_t* page, mi_delayed_t delay, bool override_never);
+bool _mi_page_try_use_delayed_free(mi_page_t* page, mi_delayed_t delay, bool override_never);
size_t _mi_page_queue_append(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_queue_t* append);
void _mi_deferred_free(mi_heap_t* heap, bool force);
@@ -107,27 +168,38 @@ void _mi_page_reclaim(mi_heap_t* heap, mi_page_t* page); // callback fro
size_t _mi_bin_size(uint8_t bin); // for stats
uint8_t _mi_bin(size_t size); // for stats
-uint8_t _mi_bsr(uintptr_t x); // bit-scan-right, used on BSD in "os.c"
// "heap.c"
void _mi_heap_destroy_pages(mi_heap_t* heap);
void _mi_heap_collect_abandon(mi_heap_t* heap);
void _mi_heap_set_default_direct(mi_heap_t* heap);
+bool _mi_heap_memid_is_suitable(mi_heap_t* heap, mi_memid_t memid);
+void _mi_heap_unsafe_destroy_all(void);
// "stats.c"
void _mi_stats_done(mi_stats_t* stats);
-
mi_msecs_t _mi_clock_now(void);
mi_msecs_t _mi_clock_end(mi_msecs_t start);
mi_msecs_t _mi_clock_start(void);
// "alloc.c"
-void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t size) mi_attr_noexcept; // called from `_mi_malloc_generic`
-void* _mi_heap_malloc_zero(mi_heap_t* heap, size_t size, bool zero);
-void* _mi_heap_realloc_zero(mi_heap_t* heap, void* p, size_t newsize, bool zero);
+void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t size, bool zero) mi_attr_noexcept; // called from `_mi_malloc_generic`
+void* _mi_heap_malloc_zero(mi_heap_t* heap, size_t size, bool zero) mi_attr_noexcept;
+void* _mi_heap_malloc_zero_ex(mi_heap_t* heap, size_t size, bool zero, size_t huge_alignment) mi_attr_noexcept; // called from `_mi_heap_malloc_aligned`
+void* _mi_heap_realloc_zero(mi_heap_t* heap, void* p, size_t newsize, bool zero) mi_attr_noexcept;
mi_block_t* _mi_page_ptr_unalign(const mi_segment_t* segment, const mi_page_t* page, const void* p);
bool _mi_free_delayed_block(mi_block_t* block);
-void _mi_block_zero_init(const mi_page_t* page, void* p, size_t size);
+void _mi_free_generic(const mi_segment_t* segment, mi_page_t* page, bool is_local, void* p) mi_attr_noexcept; // for runtime integration
+void _mi_padding_shrink(const mi_page_t* page, const mi_block_t* block, const size_t min_size);
+
+// option.c, c primitives
+char _mi_toupper(char c);
+int _mi_strnicmp(const char* s, const char* t, size_t n);
+void _mi_strlcpy(char* dest, const char* src, size_t dest_size);
+void _mi_strlcat(char* dest, const char* src, size_t dest_size);
+size_t _mi_strlen(const char* s);
+size_t _mi_strnlen(const char* s, size_t max_len);
+
#if MI_DEBUG>1
bool _mi_page_is_valid(mi_page_t* page);
@@ -139,8 +211,11 @@ bool _mi_page_is_valid(mi_page_t* page);
// ------------------------------------------------------
#if defined(__GNUC__) || defined(__clang__)
-#define mi_unlikely(x) __builtin_expect((x),0)
-#define mi_likely(x) __builtin_expect((x),1)
+#define mi_unlikely(x) (__builtin_expect(!!(x),false))
+#define mi_likely(x) (__builtin_expect(!!(x),true))
+#elif (defined(__cplusplus) && (__cplusplus >= 202002L)) || (defined(_MSVC_LANG) && _MSVC_LANG >= 202002L)
+#define mi_unlikely(x) (x) [[unlikely]]
+#define mi_likely(x) (x) [[likely]]
#else
#define mi_unlikely(x) (x)
#define mi_likely(x) (x)
@@ -178,11 +253,11 @@ bool _mi_page_is_valid(mi_page_t* page);
/* -----------------------------------------------------------
Inlined definitions
----------------------------------------------------------- */
-#define UNUSED(x) (void)(x)
+#define MI_UNUSED(x) (void)(x)
#if (MI_DEBUG>0)
-#define UNUSED_RELEASE(x)
+#define MI_UNUSED_RELEASE(x)
#else
-#define UNUSED_RELEASE(x) UNUSED(x)
+#define MI_UNUSED_RELEASE(x) MI_UNUSED(x)
#endif
#define MI_INIT4(x) x(),x(),x(),x()
@@ -194,11 +269,21 @@ bool _mi_page_is_valid(mi_page_t* page);
#define MI_INIT256(x) MI_INIT128(x),MI_INIT128(x)
+#include
+// initialize a local variable to zero; use memset as compilers optimize constant sized memset's
+#define _mi_memzero_var(x) memset(&x,0,sizeof(x))
+
// Is `x` a power of two? (0 is considered a power of two)
static inline bool _mi_is_power_of_two(uintptr_t x) {
return ((x & (x - 1)) == 0);
}
+// Is a pointer aligned?
+static inline bool _mi_is_aligned(void* p, size_t alignment) {
+ mi_assert_internal(alignment != 0);
+ return (((uintptr_t)p % alignment) == 0);
+}
+
// Align upwards
static inline uintptr_t _mi_align_up(uintptr_t sz, size_t alignment) {
mi_assert_internal(alignment != 0);
@@ -211,6 +296,18 @@ static inline uintptr_t _mi_align_up(uintptr_t sz, size_t alignment) {
}
}
+// Align downwards
+static inline uintptr_t _mi_align_down(uintptr_t sz, size_t alignment) {
+ mi_assert_internal(alignment != 0);
+ uintptr_t mask = alignment - 1;
+ if ((alignment & mask) == 0) { // power of two?
+ return (sz & ~mask);
+ }
+ else {
+ return ((sz / alignment) * alignment);
+ }
+}
+
// Divide upwards: `s <= _mi_divide_up(s,d)*d < s+d`.
static inline uintptr_t _mi_divide_up(uintptr_t size, size_t divider) {
mi_assert_internal(divider != 0);
@@ -218,13 +315,14 @@ static inline uintptr_t _mi_divide_up(uintptr_t size, size_t divider) {
}
// Is memory zero initialized?
-static inline bool mi_mem_is_zero(void* p, size_t size) {
+static inline bool mi_mem_is_zero(const void* p, size_t size) {
for (size_t i = 0; i < size; i++) {
if (((uint8_t*)p)[i] != 0) return false;
}
return true;
}
+
// Align a byte size to a size in _machine words_,
// i.e. byte size == `wsize*sizeof(void*)`.
static inline size_t _mi_wsize_from_size(size_t size) {
@@ -232,32 +330,27 @@ static inline size_t _mi_wsize_from_size(size_t size) {
return (size + sizeof(uintptr_t) - 1) / sizeof(uintptr_t);
}
-// Does malloc satisfy the alignment constraints already?
-static inline bool mi_malloc_satisfies_alignment(size_t alignment, size_t size) {
- return (alignment == sizeof(void*) || (alignment == MI_MAX_ALIGN_SIZE && size > (MI_MAX_ALIGN_SIZE/2)));
-}
-
// Overflow detecting multiply
-#if __has_builtin(__builtin_umul_overflow) || __GNUC__ >= 5
+#if __has_builtin(__builtin_umul_overflow) || (defined(__GNUC__) && (__GNUC__ >= 5))
#include // UINT_MAX, ULONG_MAX
#if defined(_CLOCK_T) // for Illumos
#undef _CLOCK_T
#endif
static inline bool mi_mul_overflow(size_t count, size_t size, size_t* total) {
- #if (SIZE_MAX == UINT_MAX)
- return __builtin_umul_overflow(count, size, total);
- #elif (SIZE_MAX == ULONG_MAX)
- return __builtin_umull_overflow(count, size, total);
+ #if (SIZE_MAX == ULONG_MAX)
+ return __builtin_umull_overflow(count, size, (unsigned long *)total);
+ #elif (SIZE_MAX == UINT_MAX)
+ return __builtin_umul_overflow(count, size, (unsigned int *)total);
#else
- return __builtin_umulll_overflow(count, size, total);
+ return __builtin_umulll_overflow(count, size, (unsigned long long *)total);
#endif
}
#else /* __builtin_umul_overflow is unavailable */
static inline bool mi_mul_overflow(size_t count, size_t size, size_t* total) {
#define MI_MUL_NO_OVERFLOW ((size_t)1 << (4*sizeof(size_t))) // sqrt(SIZE_MAX)
*total = count * size;
- return ((size >= MI_MUL_NO_OVERFLOW || count >= MI_MUL_NO_OVERFLOW)
- && size > 0 && (SIZE_MAX / size) < count);
+ // note: gcc/clang optimize this to directly check the overflow flag
+ return ((size >= MI_MUL_NO_OVERFLOW || count >= MI_MUL_NO_OVERFLOW) && size > 0 && (SIZE_MAX / size) < count);
}
#endif
@@ -267,8 +360,10 @@ static inline bool mi_count_size_overflow(size_t count, size_t size, size_t* tot
*total = size;
return false;
}
- else if (mi_unlikely(mi_mul_overflow(count, size, total))) {
+ else if mi_unlikely(mi_mul_overflow(count, size, total)) {
+ #if MI_DEBUG > 0
_mi_error_message(EOVERFLOW, "allocation request is too large (%zu * %zu bytes)\n", count, size);
+ #endif
*total = SIZE_MAX;
return true;
}
@@ -276,87 +371,11 @@ static inline bool mi_count_size_overflow(size_t count, size_t size, size_t* tot
}
-/* ----------------------------------------------------------------------------------------
-The thread local default heap: `_mi_get_default_heap` returns the thread local heap.
-On most platforms (Windows, Linux, FreeBSD, NetBSD, etc), this just returns a
-__thread local variable (`_mi_heap_default`). With the initial-exec TLS model this ensures
-that the storage will always be available (allocated on the thread stacks).
-On some platforms though we cannot use that when overriding `malloc` since the underlying
-TLS implementation (or the loader) will call itself `malloc` on a first access and recurse.
-We try to circumvent this in an efficient way:
-- macOSX : we use an unused TLS slot from the OS allocated slots (MI_TLS_SLOT). On OSX, the
- loader itself calls `malloc` even before the modules are initialized.
-- OpenBSD: we use an unused slot from the pthread block (MI_TLS_PTHREAD_SLOT_OFS).
-- DragonFly: not yet working.
+/*----------------------------------------------------------------------------------------
+ Heap functions
------------------------------------------------------------------------------------------- */
extern const mi_heap_t _mi_heap_empty; // read-only empty heap, initial value of the thread local default heap
-extern bool _mi_process_is_initialized;
-#if defined (MI_TLS_RECURSE_GUARD_HOST)
-#define MI_MAIN_VIS __declspec(dllexport)
-#else
-#define MI_MAIN_VIS
-#endif
-MI_MAIN_VIS mi_heap_t *_mi_heap_main_get(void); // statically allocated main backing heap
-
-
-#if defined(MI_MALLOC_OVERRIDE)
-#if defined(__MACH__) // OSX
-#define MI_TLS_SLOT 89 // seems unused?
-// other possible unused ones are 9, 29, __PTK_FRAMEWORK_JAVASCRIPTCORE_KEY4 (94), __PTK_FRAMEWORK_GC_KEY9 (112) and __PTK_FRAMEWORK_OLDGC_KEY9 (89)
-// see
-#elif defined(__OpenBSD__)
-// use end bytes of a name; goes wrong if anyone uses names > 23 characters (ptrhread specifies 16)
-// see
-#define MI_TLS_PTHREAD_SLOT_OFS (6*sizeof(int) + 4*sizeof(void*) + 24)
-#elif defined(__DragonFly__)
-#warning "mimalloc is not working correctly on DragonFly yet."
-#define MI_TLS_PTHREAD_SLOT_OFS (4 + 1*sizeof(void*)) // offset `uniqueid` (also used by gdb?)
-#endif
-#endif
-
-#if defined(MI_TLS_SLOT)
-static inline void* mi_tls_slot(size_t slot) mi_attr_noexcept; // forward declaration
-#elif defined(MI_TLS_PTHREAD_SLOT_OFS)
-#include
-static inline mi_heap_t** mi_tls_pthread_heap_slot(void) {
- pthread_t self = pthread_self();
- #if defined(__DragonFly__)
- if (self==NULL) {
- static mi_heap_t* pheap_main = _mi_heap_main_get();
- return &pheap_main;
- }
- #endif
- return (mi_heap_t**)((uint8_t*)self + MI_TLS_PTHREAD_SLOT_OFS);
-}
-#elif defined(MI_TLS_PTHREAD)
-#include
-extern pthread_key_t _mi_heap_default_key;
-#else
-extern mi_decl_thread mi_heap_t* _mi_heap_default; // default heap to allocate from
-#endif
-
-static inline mi_heap_t* mi_get_default_heap(void) {
-#if defined(MI_TLS_SLOT)
- mi_heap_t* heap = (mi_heap_t*)mi_tls_slot(MI_TLS_SLOT);
- return (mi_unlikely(heap == NULL) ? (mi_heap_t*)&_mi_heap_empty : heap);
-#elif defined(MI_TLS_PTHREAD_SLOT_OFS)
- mi_heap_t* heap = *mi_tls_pthread_heap_slot();
- return (mi_unlikely(heap == NULL) ? (mi_heap_t*)&_mi_heap_empty : heap);
-#elif defined(MI_TLS_PTHREAD)
- mi_heap_t* heap = (mi_unlikely(_mi_heap_default_key == (pthread_key_t)(-1)) ? _mi_heap_main_get() : (mi_heap_t*)pthread_getspecific(_mi_heap_default_key));
- return (mi_unlikely(heap == NULL) ? (mi_heap_t*)&_mi_heap_empty : heap);
-#else
- #if defined(MI_TLS_RECURSE_GUARD)
- if (mi_unlikely(!_mi_process_is_initialized)) return _mi_heap_main_get();
- #endif
- return _mi_heap_default;
-#endif
-}
-
-static inline bool mi_heap_is_default(const mi_heap_t* heap) {
- return (heap == mi_get_default_heap());
-}
static inline bool mi_heap_is_backing(const mi_heap_t* heap) {
return (heap->tld->heap_backing == heap);
@@ -384,46 +403,57 @@ static inline mi_page_t* _mi_heap_get_free_small_page(mi_heap_t* heap, size_t si
return heap->pages_free_direct[idx];
}
-// Get the page belonging to a certain size class
-static inline mi_page_t* _mi_get_free_small_page(size_t size) {
- return _mi_heap_get_free_small_page(mi_get_default_heap(), size);
+// Segment that contains the pointer
+// Large aligned blocks may be aligned at N*MI_SEGMENT_SIZE (inside a huge segment > MI_SEGMENT_SIZE),
+// and we need align "down" to the segment info which is `MI_SEGMENT_SIZE` bytes before it;
+// therefore we align one byte before `p`.
+static inline mi_segment_t* _mi_ptr_segment(const void* p) {
+ mi_assert_internal(p != NULL);
+ return (mi_segment_t*)(((uintptr_t)p - 1) & ~MI_SEGMENT_MASK);
}
-// Segment that contains the pointer
-static inline mi_segment_t* _mi_ptr_segment(const void* p) {
- // mi_assert_internal(p != NULL);
- return (mi_segment_t*)((uintptr_t)p & ~MI_SEGMENT_MASK);
+static inline mi_page_t* mi_slice_to_page(mi_slice_t* s) {
+ mi_assert_internal(s->slice_offset== 0 && s->slice_count > 0);
+ return (mi_page_t*)(s);
+}
+
+static inline mi_slice_t* mi_page_to_slice(mi_page_t* p) {
+ mi_assert_internal(p->slice_offset== 0 && p->slice_count > 0);
+ return (mi_slice_t*)(p);
}
// Segment belonging to a page
static inline mi_segment_t* _mi_page_segment(const mi_page_t* page) {
- mi_segment_t* segment = _mi_ptr_segment(page);
- mi_assert_internal(segment == NULL || page == &segment->pages[page->segment_idx]);
+ mi_segment_t* segment = _mi_ptr_segment(page);
+ mi_assert_internal(segment == NULL || ((mi_slice_t*)page >= segment->slices && (mi_slice_t*)page < segment->slices + segment->slice_entries));
return segment;
}
-// used internally
-static inline uintptr_t _mi_segment_page_idx_of(const mi_segment_t* segment, const void* p) {
- // if (segment->page_size > MI_SEGMENT_SIZE) return &segment->pages[0]; // huge pages
- ptrdiff_t diff = (uint8_t*)p - (uint8_t*)segment;
- mi_assert_internal(diff >= 0 && (size_t)diff < MI_SEGMENT_SIZE);
- uintptr_t idx = (uintptr_t)diff >> segment->page_shift;
- mi_assert_internal(idx < segment->capacity);
- mi_assert_internal(segment->page_kind <= MI_PAGE_MEDIUM || idx == 0);
- return idx;
+static inline mi_slice_t* mi_slice_first(const mi_slice_t* slice) {
+ mi_slice_t* start = (mi_slice_t*)((uint8_t*)slice - slice->slice_offset);
+ mi_assert_internal(start >= _mi_ptr_segment(slice)->slices);
+ mi_assert_internal(start->slice_offset == 0);
+ mi_assert_internal(start + start->slice_count > slice);
+ return start;
}
-// Get the page containing the pointer
+// Get the page containing the pointer (performance critical as it is called in mi_free)
static inline mi_page_t* _mi_segment_page_of(const mi_segment_t* segment, const void* p) {
- uintptr_t idx = _mi_segment_page_idx_of(segment, p);
- return &((mi_segment_t*)segment)->pages[idx];
+ mi_assert_internal(p > (void*)segment);
+ ptrdiff_t diff = (uint8_t*)p - (uint8_t*)segment;
+ mi_assert_internal(diff > 0 && diff <= (ptrdiff_t)MI_SEGMENT_SIZE);
+ size_t idx = (size_t)diff >> MI_SEGMENT_SLICE_SHIFT;
+ mi_assert_internal(idx <= segment->slice_entries);
+ mi_slice_t* slice0 = (mi_slice_t*)&segment->slices[idx];
+ mi_slice_t* slice = mi_slice_first(slice0); // adjust to the block that holds the page data
+ mi_assert_internal(slice->slice_offset == 0);
+ mi_assert_internal(slice >= segment->slices && slice < segment->slices + segment->slice_entries);
+ return mi_slice_to_page(slice);
}
// Quick page start for initialized pages
static inline uint8_t* _mi_page_start(const mi_segment_t* segment, const mi_page_t* page, size_t* page_size) {
- const size_t bsize = page->xblock_size;
- mi_assert_internal(bsize > 0 && (bsize%sizeof(void*)) == 0);
- return _mi_segment_page_start(segment, page, bsize, page_size, NULL);
+ return _mi_segment_page_start(segment, page, page_size);
}
// Get the page containing the pointer
@@ -431,26 +461,38 @@ static inline mi_page_t* _mi_ptr_page(void* p) {
return _mi_segment_page_of(_mi_ptr_segment(p), p);
}
-// Get the block size of a page (special cased for huge objects)
+// Get the block size of a page (special case for huge objects)
static inline size_t mi_page_block_size(const mi_page_t* page) {
const size_t bsize = page->xblock_size;
mi_assert_internal(bsize > 0);
- if (mi_likely(bsize < MI_HUGE_BLOCK_SIZE)) {
+ if mi_likely(bsize < MI_HUGE_BLOCK_SIZE) {
return bsize;
}
else {
size_t psize;
- _mi_segment_page_start(_mi_page_segment(page), page, bsize, &psize, NULL);
+ _mi_segment_page_start(_mi_page_segment(page), page, &psize);
return psize;
}
}
+static inline bool mi_page_is_huge(const mi_page_t* page) {
+ return (_mi_page_segment(page)->kind == MI_SEGMENT_HUGE);
+}
+
// Get the usable block size of a page without fixed padding.
// This may still include internal padding due to alignment and rounding up size classes.
static inline size_t mi_page_usable_block_size(const mi_page_t* page) {
return mi_page_block_size(page) - MI_PADDING_SIZE;
}
+// size of a segment
+static inline size_t mi_segment_size(mi_segment_t* segment) {
+ return segment->segment_slices * MI_SEGMENT_SLICE_SIZE;
+}
+
+static inline uint8_t* mi_segment_end(mi_segment_t* segment) {
+ return (uint8_t*)segment + mi_segment_size(segment);
+}
// Thread free access
static inline mi_block_t* mi_page_thread_free(const mi_page_t* page) {
@@ -570,12 +612,13 @@ static inline bool mi_is_in_same_segment(const void* p, const void* q) {
}
static inline bool mi_is_in_same_page(const void* p, const void* q) {
- mi_segment_t* segmentp = _mi_ptr_segment(p);
- mi_segment_t* segmentq = _mi_ptr_segment(q);
- if (segmentp != segmentq) return false;
- uintptr_t idxp = _mi_segment_page_idx_of(segmentp, p);
- uintptr_t idxq = _mi_segment_page_idx_of(segmentq, q);
- return (idxp == idxq);
+ mi_segment_t* segment = _mi_ptr_segment(p);
+ if (_mi_ptr_segment(q) != segment) return false;
+ // assume q may be invalid // return (_mi_segment_page_of(segment, p) == _mi_segment_page_of(segment, q));
+ mi_page_t* page = _mi_segment_page_of(segment, p);
+ size_t psize;
+ uint8_t* start = _mi_segment_page_start(segment, page, &psize);
+ return (start <= (uint8_t*)q && (uint8_t*)q < start + psize);
}
static inline uintptr_t mi_rotl(uintptr_t x, uintptr_t shift) {
@@ -589,30 +632,36 @@ static inline uintptr_t mi_rotr(uintptr_t x, uintptr_t shift) {
static inline void* mi_ptr_decode(const void* null, const mi_encoded_t x, const uintptr_t* keys) {
void* p = (void*)(mi_rotr(x - keys[0], keys[0]) ^ keys[1]);
- return (mi_unlikely(p==null) ? NULL : p);
+ return (p==null ? NULL : p);
}
static inline mi_encoded_t mi_ptr_encode(const void* null, const void* p, const uintptr_t* keys) {
- uintptr_t x = (uintptr_t)(mi_unlikely(p==NULL) ? null : p);
+ uintptr_t x = (uintptr_t)(p==NULL ? null : p);
return mi_rotl(x ^ keys[1], keys[0]) + keys[0];
}
static inline mi_block_t* mi_block_nextx( const void* null, const mi_block_t* block, const uintptr_t* keys ) {
+ mi_track_mem_defined(block,sizeof(mi_block_t));
+ mi_block_t* next;
#ifdef MI_ENCODE_FREELIST
- return (mi_block_t*)mi_ptr_decode(null, block->next, keys);
+ next = (mi_block_t*)mi_ptr_decode(null, block->next, keys);
#else
- UNUSED(keys); UNUSED(null);
- return (mi_block_t*)block->next;
+ MI_UNUSED(keys); MI_UNUSED(null);
+ next = (mi_block_t*)block->next;
#endif
+ mi_track_mem_noaccess(block,sizeof(mi_block_t));
+ return next;
}
static inline void mi_block_set_nextx(const void* null, mi_block_t* block, const mi_block_t* next, const uintptr_t* keys) {
+ mi_track_mem_undefined(block,sizeof(mi_block_t));
#ifdef MI_ENCODE_FREELIST
block->next = mi_ptr_encode(null, next, keys);
#else
- UNUSED(keys); UNUSED(null);
+ MI_UNUSED(keys); MI_UNUSED(null);
block->next = (mi_encoded_t)next;
#endif
+ mi_track_mem_noaccess(block,sizeof(mi_block_t));
}
static inline mi_block_t* mi_block_next(const mi_page_t* page, const mi_block_t* block) {
@@ -620,13 +669,13 @@ static inline mi_block_t* mi_block_next(const mi_page_t* page, const mi_block_t*
mi_block_t* next = mi_block_nextx(page,block,page->keys);
// check for free list corruption: is `next` at least in the same page?
// TODO: check if `next` is `page->block_size` aligned?
- if (mi_unlikely(next!=NULL && !mi_is_in_same_page(block, next))) {
+ if mi_unlikely(next!=NULL && !mi_is_in_same_page(block, next)) {
_mi_error_message(EFAULT, "corrupted free list entry of size %zub at %p: value 0x%zx\n", mi_page_block_size(page), block, (uintptr_t)next);
next = NULL;
}
return next;
#else
- UNUSED(page);
+ MI_UNUSED(page);
return mi_block_nextx(page,block,NULL);
#endif
}
@@ -635,11 +684,80 @@ static inline void mi_block_set_next(const mi_page_t* page, mi_block_t* block, c
#ifdef MI_ENCODE_FREELIST
mi_block_set_nextx(page,block,next, page->keys);
#else
- UNUSED(page);
+ MI_UNUSED(page);
mi_block_set_nextx(page,block,next,NULL);
#endif
}
+
+// -------------------------------------------------------------------
+// commit mask
+// -------------------------------------------------------------------
+
+static inline void mi_commit_mask_create_empty(mi_commit_mask_t* cm) {
+ for (size_t i = 0; i < MI_COMMIT_MASK_FIELD_COUNT; i++) {
+ cm->mask[i] = 0;
+ }
+}
+
+static inline void mi_commit_mask_create_full(mi_commit_mask_t* cm) {
+ for (size_t i = 0; i < MI_COMMIT_MASK_FIELD_COUNT; i++) {
+ cm->mask[i] = ~((size_t)0);
+ }
+}
+
+static inline bool mi_commit_mask_is_empty(const mi_commit_mask_t* cm) {
+ for (size_t i = 0; i < MI_COMMIT_MASK_FIELD_COUNT; i++) {
+ if (cm->mask[i] != 0) return false;
+ }
+ return true;
+}
+
+static inline bool mi_commit_mask_is_full(const mi_commit_mask_t* cm) {
+ for (size_t i = 0; i < MI_COMMIT_MASK_FIELD_COUNT; i++) {
+ if (cm->mask[i] != ~((size_t)0)) return false;
+ }
+ return true;
+}
+
+// defined in `segment.c`:
+size_t _mi_commit_mask_committed_size(const mi_commit_mask_t* cm, size_t total);
+size_t _mi_commit_mask_next_run(const mi_commit_mask_t* cm, size_t* idx);
+
+#define mi_commit_mask_foreach(cm,idx,count) \
+ idx = 0; \
+ while ((count = _mi_commit_mask_next_run(cm,&idx)) > 0) {
+
+#define mi_commit_mask_foreach_end() \
+ idx += count; \
+ }
+
+
+
+/* -----------------------------------------------------------
+ memory id's
+----------------------------------------------------------- */
+
+static inline mi_memid_t _mi_memid_create(mi_memkind_t memkind) {
+ mi_memid_t memid;
+ _mi_memzero_var(memid);
+ memid.memkind = memkind;
+ return memid;
+}
+
+static inline mi_memid_t _mi_memid_none(void) {
+ return _mi_memid_create(MI_MEM_NONE);
+}
+
+static inline mi_memid_t _mi_memid_create_os(bool committed, bool is_zero, bool is_large) {
+ mi_memid_t memid = _mi_memid_create(MI_MEM_OS);
+ memid.initially_committed = committed;
+ memid.initially_zero = is_zero;
+ memid.is_pinned = is_large;
+ return memid;
+}
+
+
// -------------------------------------------------------------------
// Fast "random" shuffle
// -------------------------------------------------------------------
@@ -671,82 +789,189 @@ static inline uintptr_t _mi_random_shuffle(uintptr_t x) {
int _mi_os_numa_node_get(mi_os_tld_t* tld);
size_t _mi_os_numa_node_count_get(void);
-extern size_t _mi_numa_node_count;
+extern _Atomic(size_t) _mi_numa_node_count;
static inline int _mi_os_numa_node(mi_os_tld_t* tld) {
- if (mi_likely(_mi_numa_node_count == 1)) return 0;
+ if mi_likely(mi_atomic_load_relaxed(&_mi_numa_node_count) == 1) { return 0; }
else return _mi_os_numa_node_get(tld);
}
static inline size_t _mi_os_numa_node_count(void) {
- if (mi_likely(_mi_numa_node_count>0)) return _mi_numa_node_count;
+ const size_t count = mi_atomic_load_relaxed(&_mi_numa_node_count);
+ if mi_likely(count > 0) { return count; }
else return _mi_os_numa_node_count_get();
}
-// -------------------------------------------------------------------
-// Getting the thread id should be performant as it is called in the
-// fast path of `_mi_free` and we specialize for various platforms.
-// -------------------------------------------------------------------
-#if defined(_WIN32)
-#define WIN32_LEAN_AND_MEAN
-#include
-static inline uintptr_t _mi_thread_id(void) mi_attr_noexcept {
- // Windows: works on Intel and ARM in both 32- and 64-bit
- return (uintptr_t)NtCurrentTeb();
-}
-#elif defined(__GNUC__) && \
- (defined(__x86_64__) || defined(__i386__) || defined(__arm__) || defined(__aarch64__))
+// -----------------------------------------------------------------------
+// Count bits: trailing or leading zeros (with MI_INTPTR_BITS on all zero)
+// -----------------------------------------------------------------------
-// TLS register on x86 is in the FS or GS register, see: https://akkadia.org/drepper/tls.pdf
-static inline void* mi_tls_slot(size_t slot) mi_attr_noexcept {
- void* res;
- const size_t ofs = (slot*sizeof(void*));
-#if defined(__i386__)
- __asm__("movl %%gs:%1, %0" : "=r" (res) : "m" (*((void**)ofs)) : ); // 32-bit always uses GS
-#elif defined(__MACH__) && defined(__x86_64__)
- __asm__("movq %%gs:%1, %0" : "=r" (res) : "m" (*((void**)ofs)) : ); // x86_64 macOSX uses GS
-#elif defined(__x86_64__)
- __asm__("movq %%fs:%1, %0" : "=r" (res) : "m" (*((void**)ofs)) : ); // x86_64 Linux, BSD uses FS
-#elif defined(__arm__)
- void** tcb; UNUSED(ofs);
- __asm__ volatile ("mrc p15, 0, %0, c13, c0, 3\nbic %0, %0, #3" : "=r" (tcb));
- res = tcb[slot];
-#elif defined(__aarch64__)
- void** tcb; UNUSED(ofs);
- __asm__ volatile ("mrs %0, tpidr_el0" : "=r" (tcb));
- res = tcb[slot];
+#if defined(__GNUC__)
+
+#include // LONG_MAX
+#define MI_HAVE_FAST_BITSCAN
+static inline size_t mi_clz(uintptr_t x) {
+ if (x==0) return MI_INTPTR_BITS;
+#if (INTPTR_MAX == LONG_MAX)
+ return __builtin_clzl(x);
+#else
+ return __builtin_clzll(x);
#endif
- return res;
}
-
-// setting is only used on macOSX for now
-static inline void mi_tls_slot_set(size_t slot, void* value) mi_attr_noexcept {
- const size_t ofs = (slot*sizeof(void*));
-#if defined(__i386__)
- __asm__("movl %1,%%gs:%0" : "=m" (*((void**)ofs)) : "rn" (value) : ); // 32-bit always uses GS
-#elif defined(__MACH__) && defined(__x86_64__)
- __asm__("movq %1,%%gs:%0" : "=m" (*((void**)ofs)) : "rn" (value) : ); // x86_64 macOSX uses GS
-#elif defined(__x86_64__)
- __asm__("movq %1,%%fs:%1" : "=m" (*((void**)ofs)) : "rn" (value) : ); // x86_64 Linux, BSD uses FS
-#elif defined(__arm__)
- void** tcb; UNUSED(ofs);
- __asm__ volatile ("mrc p15, 0, %0, c13, c0, 3\nbic %0, %0, #3" : "=r" (tcb));
- tcb[slot] = value;
-#elif defined(__aarch64__)
- void** tcb; UNUSED(ofs);
- __asm__ volatile ("mrs %0, tpidr_el0" : "=r" (tcb));
- tcb[slot] = value;
+static inline size_t mi_ctz(uintptr_t x) {
+ if (x==0) return MI_INTPTR_BITS;
+#if (INTPTR_MAX == LONG_MAX)
+ return __builtin_ctzl(x);
+#else
+ return __builtin_ctzll(x);
#endif
}
-static inline uintptr_t _mi_thread_id(void) mi_attr_noexcept {
- // in all our targets, slot 0 is the pointer to the thread control block
- return (uintptr_t)mi_tls_slot(0);
+#elif defined(_MSC_VER)
+
+#include // LONG_MAX
+#include // BitScanReverse64
+#define MI_HAVE_FAST_BITSCAN
+static inline size_t mi_clz(uintptr_t x) {
+ if (x==0) return MI_INTPTR_BITS;
+ unsigned long idx;
+#if (INTPTR_MAX == LONG_MAX)
+ _BitScanReverse(&idx, x);
+#else
+ _BitScanReverse64(&idx, x);
+#endif
+ return ((MI_INTPTR_BITS - 1) - idx);
+}
+static inline size_t mi_ctz(uintptr_t x) {
+ if (x==0) return MI_INTPTR_BITS;
+ unsigned long idx;
+#if (INTPTR_MAX == LONG_MAX)
+ _BitScanForward(&idx, x);
+#else
+ _BitScanForward64(&idx, x);
+#endif
+ return idx;
+}
+
+#else
+static inline size_t mi_ctz32(uint32_t x) {
+ // de Bruijn multiplication, see
+ static const unsigned char debruijn[32] = {
+ 0, 1, 28, 2, 29, 14, 24, 3, 30, 22, 20, 15, 25, 17, 4, 8,
+ 31, 27, 13, 23, 21, 19, 16, 7, 26, 12, 18, 6, 11, 5, 10, 9
+ };
+ if (x==0) return 32;
+ return debruijn[((x & -(int32_t)x) * 0x077CB531UL) >> 27];
+}
+static inline size_t mi_clz32(uint32_t x) {
+ // de Bruijn multiplication, see
+ static const uint8_t debruijn[32] = {
+ 31, 22, 30, 21, 18, 10, 29, 2, 20, 17, 15, 13, 9, 6, 28, 1,
+ 23, 19, 11, 3, 16, 14, 7, 24, 12, 4, 8, 25, 5, 26, 27, 0
+ };
+ if (x==0) return 32;
+ x |= x >> 1;
+ x |= x >> 2;
+ x |= x >> 4;
+ x |= x >> 8;
+ x |= x >> 16;
+ return debruijn[(uint32_t)(x * 0x07C4ACDDUL) >> 27];
+}
+
+static inline size_t mi_clz(uintptr_t x) {
+ if (x==0) return MI_INTPTR_BITS;
+#if (MI_INTPTR_BITS <= 32)
+ return mi_clz32((uint32_t)x);
+#else
+ size_t count = mi_clz32((uint32_t)(x >> 32));
+ if (count < 32) return count;
+ return (32 + mi_clz32((uint32_t)x));
+#endif
+}
+static inline size_t mi_ctz(uintptr_t x) {
+ if (x==0) return MI_INTPTR_BITS;
+#if (MI_INTPTR_BITS <= 32)
+ return mi_ctz32((uint32_t)x);
+#else
+ size_t count = mi_ctz32((uint32_t)x);
+ if (count < 32) return count;
+ return (32 + mi_ctz32((uint32_t)(x>>32)));
+#endif
+}
+
+#endif
+
+// "bit scan reverse": Return index of the highest bit (or MI_INTPTR_BITS if `x` is zero)
+static inline size_t mi_bsr(uintptr_t x) {
+ return (x==0 ? MI_INTPTR_BITS : MI_INTPTR_BITS - 1 - mi_clz(x));
+}
+
+
+// ---------------------------------------------------------------------------------
+// Provide our own `_mi_memcpy` for potential performance optimizations.
+//
+// For now, only on Windows with msvc/clang-cl we optimize to `rep movsb` if
+// we happen to run on x86/x64 cpu's that have "fast short rep movsb" (FSRM) support
+// (AMD Zen3+ (~2020) or Intel Ice Lake+ (~2017). See also issue #201 and pr #253.
+// ---------------------------------------------------------------------------------
+
+#if !MI_TRACK_ENABLED && defined(_WIN32) && (defined(_M_IX86) || defined(_M_X64))
+#include
+extern bool _mi_cpu_has_fsrm;
+static inline void _mi_memcpy(void* dst, const void* src, size_t n) {
+ if (_mi_cpu_has_fsrm) {
+ __movsb((unsigned char*)dst, (const unsigned char*)src, n);
+ }
+ else {
+ memcpy(dst, src, n);
+ }
+}
+static inline void _mi_memzero(void* dst, size_t n) {
+ if (_mi_cpu_has_fsrm) {
+ __stosb((unsigned char*)dst, 0, n);
+ }
+ else {
+ memset(dst, 0, n);
+ }
}
#else
-// otherwise use standard C
-static inline uintptr_t _mi_thread_id(void) mi_attr_noexcept {
- return (uintptr_t)&_mi_heap_default;
+static inline void _mi_memcpy(void* dst, const void* src, size_t n) {
+ memcpy(dst, src, n);
+}
+static inline void _mi_memzero(void* dst, size_t n) {
+ memset(dst, 0, n);
+}
+#endif
+
+// -------------------------------------------------------------------------------
+// The `_mi_memcpy_aligned` can be used if the pointers are machine-word aligned
+// This is used for example in `mi_realloc`.
+// -------------------------------------------------------------------------------
+
+#if (defined(__GNUC__) && (__GNUC__ >= 4)) || defined(__clang__)
+// On GCC/CLang we provide a hint that the pointers are word aligned.
+static inline void _mi_memcpy_aligned(void* dst, const void* src, size_t n) {
+ mi_assert_internal(((uintptr_t)dst % MI_INTPTR_SIZE == 0) && ((uintptr_t)src % MI_INTPTR_SIZE == 0));
+ void* adst = __builtin_assume_aligned(dst, MI_INTPTR_SIZE);
+ const void* asrc = __builtin_assume_aligned(src, MI_INTPTR_SIZE);
+ _mi_memcpy(adst, asrc, n);
+}
+
+static inline void _mi_memzero_aligned(void* dst, size_t n) {
+ mi_assert_internal((uintptr_t)dst % MI_INTPTR_SIZE == 0);
+ void* adst = __builtin_assume_aligned(dst, MI_INTPTR_SIZE);
+ _mi_memzero(adst, n);
+}
+#else
+// Default fallback on `_mi_memcpy`
+static inline void _mi_memcpy_aligned(void* dst, const void* src, size_t n) {
+ mi_assert_internal(((uintptr_t)dst % MI_INTPTR_SIZE == 0) && ((uintptr_t)src % MI_INTPTR_SIZE == 0));
+ _mi_memcpy(dst, src, n);
+}
+
+static inline void _mi_memzero_aligned(void* dst, size_t n) {
+ mi_assert_internal((uintptr_t)dst % MI_INTPTR_SIZE == 0);
+ _mi_memzero(dst, n);
}
#endif
diff --git a/include/mimalloc/prim.h b/include/mimalloc/prim.h
new file mode 100644
index 0000000..9e56069
--- /dev/null
+++ b/include/mimalloc/prim.h
@@ -0,0 +1,323 @@
+/* ----------------------------------------------------------------------------
+Copyright (c) 2018-2023, Microsoft Research, Daan Leijen
+This is free software; you can redistribute it and/or modify it under the
+terms of the MIT license. A copy of the license can be found in the file
+"LICENSE" at the root of this distribution.
+-----------------------------------------------------------------------------*/
+#pragma once
+#ifndef MIMALLOC_PRIM_H
+#define MIMALLOC_PRIM_H
+
+
+// --------------------------------------------------------------------------
+// This file specifies the primitive portability API.
+// Each OS/host needs to implement these primitives, see `src/prim`
+// for implementations on Window, macOS, WASI, and Linux/Unix.
+//
+// note: on all primitive functions, we always have result parameters != NUL, and:
+// addr != NULL and page aligned
+// size > 0 and page aligned
+// return value is an error code an int where 0 is success.
+// --------------------------------------------------------------------------
+
+// OS memory configuration
+typedef struct mi_os_mem_config_s {
+ size_t page_size; // 4KiB
+ size_t large_page_size; // 2MiB
+ size_t alloc_granularity; // smallest allocation size (on Windows 64KiB)
+ bool has_overcommit; // can we reserve more memory than can be actually committed?
+ bool must_free_whole; // must allocated blocks be freed as a whole (false for mmap, true for VirtualAlloc)
+ bool has_virtual_reserve; // supports virtual address space reservation? (if true we can reserve virtual address space without using commit or physical memory)
+} mi_os_mem_config_t;
+
+// Initialize
+void _mi_prim_mem_init( mi_os_mem_config_t* config );
+
+// Free OS memory
+int _mi_prim_free(void* addr, size_t size );
+
+// Allocate OS memory. Return NULL on error.
+// The `try_alignment` is just a hint and the returned pointer does not have to be aligned.
+// If `commit` is false, the virtual memory range only needs to be reserved (with no access)
+// which will later be committed explicitly using `_mi_prim_commit`.
+// `is_zero` is set to true if the memory was zero initialized (as on most OS's)
+// pre: !commit => !allow_large
+// try_alignment >= _mi_os_page_size() and a power of 2
+int _mi_prim_alloc(size_t size, size_t try_alignment, bool commit, bool allow_large, bool* is_large, bool* is_zero, void** addr);
+
+// Commit memory. Returns error code or 0 on success.
+// For example, on Linux this would make the memory PROT_READ|PROT_WRITE.
+// `is_zero` is set to true if the memory was zero initialized (e.g. on Windows)
+int _mi_prim_commit(void* addr, size_t size, bool* is_zero);
+
+// Decommit memory. Returns error code or 0 on success. The `needs_recommit` result is true
+// if the memory would need to be re-committed. For example, on Windows this is always true,
+// but on Linux we could use MADV_DONTNEED to decommit which does not need a recommit.
+// pre: needs_recommit != NULL
+int _mi_prim_decommit(void* addr, size_t size, bool* needs_recommit);
+
+// Reset memory. The range keeps being accessible but the content might be reset.
+// Returns error code or 0 on success.
+int _mi_prim_reset(void* addr, size_t size);
+
+// Protect memory. Returns error code or 0 on success.
+int _mi_prim_protect(void* addr, size_t size, bool protect);
+
+// Allocate huge (1GiB) pages possibly associated with a NUMA node.
+// `is_zero` is set to true if the memory was zero initialized (as on most OS's)
+// pre: size > 0 and a multiple of 1GiB.
+// numa_node is either negative (don't care), or a numa node number.
+int _mi_prim_alloc_huge_os_pages(void* hint_addr, size_t size, int numa_node, bool* is_zero, void** addr);
+
+// Return the current NUMA node
+size_t _mi_prim_numa_node(void);
+
+// Return the number of logical NUMA nodes
+size_t _mi_prim_numa_node_count(void);
+
+// Clock ticks
+mi_msecs_t _mi_prim_clock_now(void);
+
+// Return process information (only for statistics)
+typedef struct mi_process_info_s {
+ mi_msecs_t elapsed;
+ mi_msecs_t utime;
+ mi_msecs_t stime;
+ size_t current_rss;
+ size_t peak_rss;
+ size_t current_commit;
+ size_t peak_commit;
+ size_t page_faults;
+} mi_process_info_t;
+
+void _mi_prim_process_info(mi_process_info_t* pinfo);
+
+// Default stderr output. (only for warnings etc. with verbose enabled)
+// msg != NULL && _mi_strlen(msg) > 0
+void _mi_prim_out_stderr( const char* msg );
+
+// Get an environment variable. (only for options)
+// name != NULL, result != NULL, result_size >= 64
+bool _mi_prim_getenv(const char* name, char* result, size_t result_size);
+
+
+// Fill a buffer with strong randomness; return `false` on error or if
+// there is no strong randomization available.
+bool _mi_prim_random_buf(void* buf, size_t buf_len);
+
+// Called on the first thread start, and should ensure `_mi_thread_done` is called on thread termination.
+void _mi_prim_thread_init_auto_done(void);
+
+// Called on process exit and may take action to clean up resources associated with the thread auto done.
+void _mi_prim_thread_done_auto_done(void);
+
+// Called when the default heap for a thread changes
+void _mi_prim_thread_associate_default_heap(mi_heap_t* heap);
+
+
+//-------------------------------------------------------------------
+// Thread id: `_mi_prim_thread_id()`
+//
+// Getting the thread id should be performant as it is called in the
+// fast path of `_mi_free` and we specialize for various platforms as
+// inlined definitions. Regular code should call `init.c:_mi_thread_id()`.
+// We only require _mi_prim_thread_id() to return a unique id
+// for each thread (unequal to zero).
+//-------------------------------------------------------------------
+
+// defined in `init.c`; do not use these directly
+extern mi_decl_thread mi_heap_t* _mi_heap_default; // default heap to allocate from
+extern bool _mi_process_is_initialized; // has mi_process_init been called?
+
+static inline mi_threadid_t _mi_prim_thread_id(void) mi_attr_noexcept;
+
+#if defined(_WIN32)
+
+#define WIN32_LEAN_AND_MEAN
+#include
+static inline mi_threadid_t _mi_prim_thread_id(void) mi_attr_noexcept {
+ // Windows: works on Intel and ARM in both 32- and 64-bit
+ return (uintptr_t)NtCurrentTeb();
+}
+
+// We use assembly for a fast thread id on the main platforms. The TLS layout depends on
+// both the OS and libc implementation so we use specific tests for each main platform.
+// If you test on another platform and it works please send a PR :-)
+// see also https://akkadia.org/drepper/tls.pdf for more info on the TLS register.
+#elif defined(__GNUC__) && ( \
+ (defined(__GLIBC__) && (defined(__x86_64__) || defined(__i386__) || defined(__arm__) || defined(__aarch64__))) \
+ || (defined(__APPLE__) && (defined(__x86_64__) || defined(__aarch64__))) \
+ || (defined(__BIONIC__) && (defined(__x86_64__) || defined(__i386__) || defined(__arm__) || defined(__aarch64__))) \
+ || (defined(__FreeBSD__) && (defined(__x86_64__) || defined(__i386__) || defined(__aarch64__))) \
+ || (defined(__OpenBSD__) && (defined(__x86_64__) || defined(__i386__) || defined(__aarch64__))) \
+ )
+
+static inline void* mi_prim_tls_slot(size_t slot) mi_attr_noexcept {
+ void* res;
+ const size_t ofs = (slot*sizeof(void*));
+ #if defined(__i386__)
+ __asm__("movl %%gs:%1, %0" : "=r" (res) : "m" (*((void**)ofs)) : ); // x86 32-bit always uses GS
+ #elif defined(__APPLE__) && defined(__x86_64__)
+ __asm__("movq %%gs:%1, %0" : "=r" (res) : "m" (*((void**)ofs)) : ); // x86_64 macOSX uses GS
+ #elif defined(__x86_64__) && (MI_INTPTR_SIZE==4)
+ __asm__("movl %%fs:%1, %0" : "=r" (res) : "m" (*((void**)ofs)) : ); // x32 ABI
+ #elif defined(__x86_64__)
+ __asm__("movq %%fs:%1, %0" : "=r" (res) : "m" (*((void**)ofs)) : ); // x86_64 Linux, BSD uses FS
+ #elif defined(__arm__)
+ void** tcb; MI_UNUSED(ofs);
+ __asm__ volatile ("mrc p15, 0, %0, c13, c0, 3\nbic %0, %0, #3" : "=r" (tcb));
+ res = tcb[slot];
+ #elif defined(__aarch64__)
+ void** tcb; MI_UNUSED(ofs);
+ #if defined(__APPLE__) // M1, issue #343
+ __asm__ volatile ("mrs %0, tpidrro_el0\nbic %0, %0, #7" : "=r" (tcb));
+ #else
+ __asm__ volatile ("mrs %0, tpidr_el0" : "=r" (tcb));
+ #endif
+ res = tcb[slot];
+ #endif
+ return res;
+}
+
+// setting a tls slot is only used on macOS for now
+static inline void mi_prim_tls_slot_set(size_t slot, void* value) mi_attr_noexcept {
+ const size_t ofs = (slot*sizeof(void*));
+ #if defined(__i386__)
+ __asm__("movl %1,%%gs:%0" : "=m" (*((void**)ofs)) : "rn" (value) : ); // 32-bit always uses GS
+ #elif defined(__APPLE__) && defined(__x86_64__)
+ __asm__("movq %1,%%gs:%0" : "=m" (*((void**)ofs)) : "rn" (value) : ); // x86_64 macOS uses GS
+ #elif defined(__x86_64__) && (MI_INTPTR_SIZE==4)
+ __asm__("movl %1,%%fs:%0" : "=m" (*((void**)ofs)) : "rn" (value) : ); // x32 ABI
+ #elif defined(__x86_64__)
+ __asm__("movq %1,%%fs:%0" : "=m" (*((void**)ofs)) : "rn" (value) : ); // x86_64 Linux, BSD uses FS
+ #elif defined(__arm__)
+ void** tcb; MI_UNUSED(ofs);
+ __asm__ volatile ("mrc p15, 0, %0, c13, c0, 3\nbic %0, %0, #3" : "=r" (tcb));
+ tcb[slot] = value;
+ #elif defined(__aarch64__)
+ void** tcb; MI_UNUSED(ofs);
+ #if defined(__APPLE__) // M1, issue #343
+ __asm__ volatile ("mrs %0, tpidrro_el0\nbic %0, %0, #7" : "=r" (tcb));
+ #else
+ __asm__ volatile ("mrs %0, tpidr_el0" : "=r" (tcb));
+ #endif
+ tcb[slot] = value;
+ #endif
+}
+
+static inline mi_threadid_t _mi_prim_thread_id(void) mi_attr_noexcept {
+ #if defined(__BIONIC__)
+ // issue #384, #495: on the Bionic libc (Android), slot 1 is the thread id
+ // see: https://github.com/aosp-mirror/platform_bionic/blob/c44b1d0676ded732df4b3b21c5f798eacae93228/libc/platform/bionic/tls_defines.h#L86
+ return (uintptr_t)mi_prim_tls_slot(1);
+ #else
+ // in all our other targets, slot 0 is the thread id
+ // glibc: https://sourceware.org/git/?p=glibc.git;a=blob_plain;f=sysdeps/x86_64/nptl/tls.h
+ // apple: https://github.com/apple/darwin-xnu/blob/main/libsyscall/os/tsd.h#L36
+ return (uintptr_t)mi_prim_tls_slot(0);
+ #endif
+}
+
+#else
+
+// otherwise use portable C, taking the address of a thread local variable (this is still very fast on most platforms).
+static inline mi_threadid_t _mi_prim_thread_id(void) mi_attr_noexcept {
+ return (uintptr_t)&_mi_heap_default;
+}
+
+#endif
+
+
+
+/* ----------------------------------------------------------------------------------------
+The thread local default heap: `_mi_prim_get_default_heap()`
+This is inlined here as it is on the fast path for allocation functions.
+
+On most platforms (Windows, Linux, FreeBSD, NetBSD, etc), this just returns a
+__thread local variable (`_mi_heap_default`). With the initial-exec TLS model this ensures
+that the storage will always be available (allocated on the thread stacks).
+
+On some platforms though we cannot use that when overriding `malloc` since the underlying
+TLS implementation (or the loader) will call itself `malloc` on a first access and recurse.
+We try to circumvent this in an efficient way:
+- macOSX : we use an unused TLS slot from the OS allocated slots (MI_TLS_SLOT). On OSX, the
+ loader itself calls `malloc` even before the modules are initialized.
+- OpenBSD: we use an unused slot from the pthread block (MI_TLS_PTHREAD_SLOT_OFS).
+- DragonFly: defaults are working but seem slow compared to freeBSD (see PR #323)
+------------------------------------------------------------------------------------------- */
+
+static inline mi_heap_t* mi_prim_get_default_heap(void);
+
+#if defined(MI_MALLOC_OVERRIDE)
+#if defined(__APPLE__) // macOS
+ #define MI_TLS_SLOT 89 // seems unused?
+ // #define MI_TLS_RECURSE_GUARD 1
+ // other possible unused ones are 9, 29, __PTK_FRAMEWORK_JAVASCRIPTCORE_KEY4 (94), __PTK_FRAMEWORK_GC_KEY9 (112) and __PTK_FRAMEWORK_OLDGC_KEY9 (89)
+ // see
+#elif defined(__OpenBSD__)
+ // use end bytes of a name; goes wrong if anyone uses names > 23 characters (ptrhread specifies 16)
+ // see
+ #define MI_TLS_PTHREAD_SLOT_OFS (6*sizeof(int) + 4*sizeof(void*) + 24)
+ // #elif defined(__DragonFly__)
+ // #warning "mimalloc is not working correctly on DragonFly yet."
+ // #define MI_TLS_PTHREAD_SLOT_OFS (4 + 1*sizeof(void*)) // offset `uniqueid` (also used by gdb?)
+#elif defined(__ANDROID__)
+ // See issue #381
+ #define MI_TLS_PTHREAD
+#endif
+#endif
+
+
+#if defined(MI_TLS_SLOT)
+
+static inline mi_heap_t* mi_prim_get_default_heap(void) {
+ mi_heap_t* heap = (mi_heap_t*)mi_prim_tls_slot(MI_TLS_SLOT);
+ if mi_unlikely(heap == NULL) {
+ #ifdef __GNUC__
+ __asm(""); // prevent conditional load of the address of _mi_heap_empty
+ #endif
+ heap = (mi_heap_t*)&_mi_heap_empty;
+ }
+ return heap;
+}
+
+#elif defined(MI_TLS_PTHREAD_SLOT_OFS)
+
+static inline mi_heap_t** mi_prim_tls_pthread_heap_slot(void) {
+ pthread_t self = pthread_self();
+ #if defined(__DragonFly__)
+ if (self==NULL) return NULL;
+ #endif
+ return (mi_heap_t**)((uint8_t*)self + MI_TLS_PTHREAD_SLOT_OFS);
+}
+
+static inline mi_heap_t* mi_prim_get_default_heap(void) {
+ mi_heap_t** pheap = mi_prim_tls_pthread_heap_slot();
+ if mi_unlikely(pheap == NULL) return _mi_heap_main_get();
+ mi_heap_t* heap = *pheap;
+ if mi_unlikely(heap == NULL) return (mi_heap_t*)&_mi_heap_empty;
+ return heap;
+}
+
+#elif defined(MI_TLS_PTHREAD)
+
+extern pthread_key_t _mi_heap_default_key;
+static inline mi_heap_t* mi_prim_get_default_heap(void) {
+ mi_heap_t* heap = (mi_unlikely(_mi_heap_default_key == (pthread_key_t)(-1)) ? _mi_heap_main_get() : (mi_heap_t*)pthread_getspecific(_mi_heap_default_key));
+ return (mi_unlikely(heap == NULL) ? (mi_heap_t*)&_mi_heap_empty : heap);
+}
+
+#else // default using a thread local variable; used on most platforms.
+
+static inline mi_heap_t* mi_prim_get_default_heap(void) {
+ #if defined(MI_TLS_RECURSE_GUARD)
+ if (mi_unlikely(!_mi_process_is_initialized)) return _mi_heap_main_get();
+ #endif
+ return _mi_heap_default;
+}
+
+#endif // mi_prim_get_default_heap()
+
+
+
+#endif // MIMALLOC_PRIM_H
diff --git a/include/mimalloc/track.h b/include/mimalloc/track.h
new file mode 100644
index 0000000..9545f75
--- /dev/null
+++ b/include/mimalloc/track.h
@@ -0,0 +1,147 @@
+/* ----------------------------------------------------------------------------
+Copyright (c) 2018-2023, Microsoft Research, Daan Leijen
+This is free software; you can redistribute it and/or modify it under the
+terms of the MIT license. A copy of the license can be found in the file
+"LICENSE" at the root of this distribution.
+-----------------------------------------------------------------------------*/
+#pragma once
+#ifndef MIMALLOC_TRACK_H
+#define MIMALLOC_TRACK_H
+
+/* ------------------------------------------------------------------------------------------------------
+Track memory ranges with macros for tools like Valgrind address sanitizer, or other memory checkers.
+These can be defined for tracking allocation:
+
+ #define mi_track_malloc_size(p,reqsize,size,zero)
+ #define mi_track_free_size(p,_size)
+
+The macros are set up such that the size passed to `mi_track_free_size`
+always matches the size of `mi_track_malloc_size`. (currently, `size == mi_usable_size(p)`).
+The `reqsize` is what the user requested, and `size >= reqsize`.
+The `size` is either byte precise (and `size==reqsize`) if `MI_PADDING` is enabled,
+or otherwise it is the usable block size which may be larger than the original request.
+Use `_mi_block_size_of(void* p)` to get the full block size that was allocated (including padding etc).
+The `zero` parameter is `true` if the allocated block is zero initialized.
+
+Optional:
+
+ #define mi_track_align(p,alignedp,offset,size)
+ #define mi_track_resize(p,oldsize,newsize)
+ #define mi_track_init()
+
+The `mi_track_align` is called right after a `mi_track_malloc` for aligned pointers in a block.
+The corresponding `mi_track_free` still uses the block start pointer and original size (corresponding to the `mi_track_malloc`).
+The `mi_track_resize` is currently unused but could be called on reallocations within a block.
+`mi_track_init` is called at program start.
+
+The following macros are for tools like asan and valgrind to track whether memory is
+defined, undefined, or not accessible at all:
+
+ #define mi_track_mem_defined(p,size)
+ #define mi_track_mem_undefined(p,size)
+ #define mi_track_mem_noaccess(p,size)
+
+-------------------------------------------------------------------------------------------------------*/
+
+#if MI_TRACK_VALGRIND
+// valgrind tool
+
+#define MI_TRACK_ENABLED 1
+#define MI_TRACK_HEAP_DESTROY 1 // track free of individual blocks on heap_destroy
+#define MI_TRACK_TOOL "valgrind"
+
+#include
+#include
+
+#define mi_track_malloc_size(p,reqsize,size,zero) VALGRIND_MALLOCLIKE_BLOCK(p,size,MI_PADDING_SIZE /*red zone*/,zero)
+#define mi_track_free_size(p,_size) VALGRIND_FREELIKE_BLOCK(p,MI_PADDING_SIZE /*red zone*/)
+#define mi_track_resize(p,oldsize,newsize) VALGRIND_RESIZEINPLACE_BLOCK(p,oldsize,newsize,MI_PADDING_SIZE /*red zone*/)
+#define mi_track_mem_defined(p,size) VALGRIND_MAKE_MEM_DEFINED(p,size)
+#define mi_track_mem_undefined(p,size) VALGRIND_MAKE_MEM_UNDEFINED(p,size)
+#define mi_track_mem_noaccess(p,size) VALGRIND_MAKE_MEM_NOACCESS(p,size)
+
+#elif MI_TRACK_ASAN
+// address sanitizer
+
+#define MI_TRACK_ENABLED 1
+#define MI_TRACK_HEAP_DESTROY 0
+#define MI_TRACK_TOOL "asan"
+
+#include
+
+#define mi_track_malloc_size(p,reqsize,size,zero) ASAN_UNPOISON_MEMORY_REGION(p,size)
+#define mi_track_free_size(p,size) ASAN_POISON_MEMORY_REGION(p,size)
+#define mi_track_mem_defined(p,size) ASAN_UNPOISON_MEMORY_REGION(p,size)
+#define mi_track_mem_undefined(p,size) ASAN_UNPOISON_MEMORY_REGION(p,size)
+#define mi_track_mem_noaccess(p,size) ASAN_POISON_MEMORY_REGION(p,size)
+
+#elif MI_TRACK_ETW
+// windows event tracing
+
+#define MI_TRACK_ENABLED 1
+#define MI_TRACK_HEAP_DESTROY 1
+#define MI_TRACK_TOOL "ETW"
+
+#define WIN32_LEAN_AND_MEAN
+#include
+#include "../src/prim/windows/etw.h"
+
+#define mi_track_init() EventRegistermicrosoft_windows_mimalloc();
+#define mi_track_malloc_size(p,reqsize,size,zero) EventWriteETW_MI_ALLOC((UINT64)(p), size)
+#define mi_track_free_size(p,size) EventWriteETW_MI_FREE((UINT64)(p), size)
+
+#else
+// no tracking
+
+#define MI_TRACK_ENABLED 0
+#define MI_TRACK_HEAP_DESTROY 0
+#define MI_TRACK_TOOL "none"
+
+#define mi_track_malloc_size(p,reqsize,size,zero)
+#define mi_track_free_size(p,_size)
+
+#endif
+
+// -------------------
+// Utility definitions
+
+#ifndef mi_track_resize
+#define mi_track_resize(p,oldsize,newsize) mi_track_free_size(p,oldsize); mi_track_malloc(p,newsize,false)
+#endif
+
+#ifndef mi_track_align
+#define mi_track_align(p,alignedp,offset,size) mi_track_mem_noaccess(p,offset)
+#endif
+
+#ifndef mi_track_init
+#define mi_track_init()
+#endif
+
+#ifndef mi_track_mem_defined
+#define mi_track_mem_defined(p,size)
+#endif
+
+#ifndef mi_track_mem_undefined
+#define mi_track_mem_undefined(p,size)
+#endif
+
+#ifndef mi_track_mem_noaccess
+#define mi_track_mem_noaccess(p,size)
+#endif
+
+
+#if MI_PADDING
+#define mi_track_malloc(p,reqsize,zero) \
+ if ((p)!=NULL) { \
+ mi_assert_internal(mi_usable_size(p)==(reqsize)); \
+ mi_track_malloc_size(p,reqsize,reqsize,zero); \
+ }
+#else
+#define mi_track_malloc(p,reqsize,zero) \
+ if ((p)!=NULL) { \
+ mi_assert_internal(mi_usable_size(p)>=(reqsize)); \
+ mi_track_malloc_size(p,reqsize,mi_usable_size(p),zero); \
+ }
+#endif
+
+#endif
diff --git a/include/mimalloc-types.h b/include/mimalloc/types.h
similarity index 56%
rename from include/mimalloc-types.h
rename to include/mimalloc/types.h
index d9bd14e..2005238 100644
--- a/include/mimalloc-types.h
+++ b/include/mimalloc/types.h
@@ -1,5 +1,5 @@
/* ----------------------------------------------------------------------------
-Copyright (c) 2018, Microsoft Research, Daan Leijen
+Copyright (c) 2018-2023, Microsoft Research, Daan Leijen
This is free software; you can redistribute it and/or modify it under the
terms of the MIT license. A copy of the license can be found in the file
"LICENSE" at the root of this distribution.
@@ -8,16 +8,27 @@ terms of the MIT license. A copy of the license can be found in the file
#ifndef MIMALLOC_TYPES_H
#define MIMALLOC_TYPES_H
+// --------------------------------------------------------------------------
+// This file contains the main type definitions for mimalloc:
+// mi_heap_t : all data for a thread-local heap, contains
+// lists of all managed heap pages.
+// mi_segment_t : a larger chunk of memory (32GiB) from where pages
+// are allocated.
+// mi_page_t : a mimalloc page (usually 64KiB or 512KiB) from
+// where objects are allocated.
+// --------------------------------------------------------------------------
+
+
#include // ptrdiff_t
#include // uintptr_t, uint16_t, etc
-#include // _Atomic
+#include "mimalloc/atomic.h" // _Atomic
#ifdef _MSC_VER
#pragma warning(disable:4214) // bitfield is not int
-#endif
+#endif
// Minimal alignment necessary. On most platforms 16 bytes are needed
-// due to SSE registers for example. This must be at least `MI_INTPTR_SIZE`
+// due to SSE registers for example. This must be at least `sizeof(void*)`
#ifndef MI_MAX_ALIGN_SIZE
#define MI_MAX_ALIGN_SIZE 16 // sizeof(max_align_t)
#endif
@@ -29,6 +40,11 @@ terms of the MIT license. A copy of the license can be found in the file
// Define NDEBUG in the release version to disable assertions.
// #define NDEBUG
+// Define MI_TRACK_ to enable tracking support
+// #define MI_TRACK_VALGRIND 1
+// #define MI_TRACK_ASAN 1
+// #define MI_TRACK_ETW 1
+
// Define MI_STAT as 1 to maintain statistics; set it to 2 to have detailed statistics (but costs some performance).
// #define MI_STAT 1
@@ -55,18 +71,31 @@ terms of the MIT license. A copy of the license can be found in the file
#endif
// Reserve extra padding at the end of each block to be more resilient against heap block overflows.
-// The padding can detect byte-precise buffer overflow on free.
-#if !defined(MI_PADDING) && (MI_DEBUG>=1)
+// The padding can detect buffer overflow on free.
+#if !defined(MI_PADDING) && (MI_SECURE>=3 || MI_DEBUG>=1 || (MI_TRACK_VALGRIND || MI_TRACK_ASAN || MI_TRACK_ETW))
#define MI_PADDING 1
#endif
+// Check padding bytes; allows byte-precise buffer overflow detection
+#if !defined(MI_PADDING_CHECK) && MI_PADDING && (MI_SECURE>=3 || MI_DEBUG>=1)
+#define MI_PADDING_CHECK 1
+#endif
+
// Encoded free lists allow detection of corrupted free lists
// and can detect buffer overflows, modify after free, and double `free`s.
-#if (MI_SECURE>=3 || MI_DEBUG>=1 || MI_PADDING > 0)
+#if (MI_SECURE>=3 || MI_DEBUG>=1)
#define MI_ENCODE_FREELIST 1
#endif
+
+// We used to abandon huge pages but to eagerly deallocate if freed from another thread,
+// but that makes it not possible to visit them during a heap walk or include them in a
+// `mi_heap_destroy`. We therefore instead reset/decommit the huge blocks if freed from
+// another thread so most memory is available until it gets properly freed by the owning thread.
+// #define MI_HUGE_PAGE_ABANDON 1
+
+
// ------------------------------------------------------
// Platform specific values
// ------------------------------------------------------
@@ -83,20 +112,43 @@ terms of the MIT license. A copy of the license can be found in the file
// or otherwise one might define an intptr_t type that is larger than a pointer...
// ------------------------------------------------------
-#if INTPTR_MAX == 9223372036854775807LL
+#if INTPTR_MAX > INT64_MAX
+# define MI_INTPTR_SHIFT (4) // assume 128-bit (as on arm CHERI for example)
+#elif INTPTR_MAX == INT64_MAX
# define MI_INTPTR_SHIFT (3)
-#elif INTPTR_MAX == 2147483647LL
+#elif INTPTR_MAX == INT32_MAX
# define MI_INTPTR_SHIFT (2)
#else
-#error platform must be 32 or 64 bits
+#error platform pointers must be 32, 64, or 128 bits
+#endif
+
+#if SIZE_MAX == UINT64_MAX
+# define MI_SIZE_SHIFT (3)
+typedef int64_t mi_ssize_t;
+#elif SIZE_MAX == UINT32_MAX
+# define MI_SIZE_SHIFT (2)
+typedef int32_t mi_ssize_t;
+#else
+#error platform objects must be 32 or 64 bits
+#endif
+
+#if (SIZE_MAX/2) > LONG_MAX
+# define MI_ZU(x) x##ULL
+# define MI_ZI(x) x##LL
+#else
+# define MI_ZU(x) x##UL
+# define MI_ZI(x) x##L
#endif
#define MI_INTPTR_SIZE (1< 4
+#define MI_SEGMENT_SHIFT ( 9 + MI_SEGMENT_SLICE_SHIFT) // 32MiB
+#else
+#define MI_SEGMENT_SHIFT ( 7 + MI_SEGMENT_SLICE_SHIFT) // 4MiB on 32-bit
+#endif
+
+#define MI_SMALL_PAGE_SHIFT (MI_SEGMENT_SLICE_SHIFT) // 64KiB
+#define MI_MEDIUM_PAGE_SHIFT ( 3 + MI_SMALL_PAGE_SHIFT) // 512KiB
+
// Derived constants
-#define MI_SEGMENT_SIZE (1UL<= 655360)
-#error "define more bins"
+#if (MI_MEDIUM_OBJ_WSIZE_MAX >= 655360)
+#error "mimalloc internal: define more bins"
#endif
+// Maximum slice offset (15)
+#define MI_MAX_SLICE_OFFSET ((MI_ALIGNMENT_MAX / MI_SEGMENT_SLICE_SIZE) - 1)
+
// Used as a special value to encode block sizes in 32 bits.
-#define MI_HUGE_BLOCK_SIZE ((uint32_t)MI_HUGE_OBJ_SIZE_MAX)
+#define MI_HUGE_BLOCK_SIZE ((uint32_t)(2*MI_GiB))
+
+// blocks up to this size are always allocated aligned
+#define MI_MAX_ALIGN_GUARANTEE (8*MI_MAX_ALIGN_SIZE)
+
+// Alignments over MI_ALIGNMENT_MAX are allocated in dedicated huge page segments
+#define MI_ALIGNMENT_MAX (MI_SEGMENT_SIZE >> 1)
+
+
+// ------------------------------------------------------
+// Mimalloc pages contain allocated blocks
+// ------------------------------------------------------
// The free lists use encoded next fields
// (Only actually encodes when MI_ENCODED_FREELIST is defined.)
-typedef uintptr_t mi_encoded_t;
+typedef uintptr_t mi_encoded_t;
+
+// thread id's
+typedef size_t mi_threadid_t;
// free lists contain blocks
typedef struct mi_block_s {
@@ -201,88 +273,176 @@ typedef uintptr_t mi_thread_free_t;
// We don't count `freed` (as |free|) but use `used` to reduce
// the number of memory accesses in the `mi_page_all_free` function(s).
//
-// Notes:
+// Notes:
// - Access is optimized for `mi_free` and `mi_page_alloc` (in `alloc.c`)
// - Using `uint16_t` does not seem to slow things down
// - The size is 8 words on 64-bit which helps the page index calculations
-// (and 10 words on 32-bit, and encoded free lists add 2 words. Sizes 10
+// (and 10 words on 32-bit, and encoded free lists add 2 words. Sizes 10
// and 12 are still good for address calculation)
-// - To limit the structure size, the `xblock_size` is 32-bits only; for
+// - To limit the structure size, the `xblock_size` is 32-bits only; for
// blocks > MI_HUGE_BLOCK_SIZE the size is determined from the segment page size
// - `thread_free` uses the bottom bits as a delayed-free flags to optimize
// concurrent frees where only the first concurrent free adds to the owning
// heap `thread_delayed_free` list (see `alloc.c:mi_free_block_mt`).
// The invariant is that no-delayed-free is only set if there is
-// at least one block that will be added, or as already been added, to
+// at least one block that will be added, or as already been added, to
// the owning heap `thread_delayed_free` list. This guarantees that pages
// will be freed correctly even if only other threads free blocks.
typedef struct mi_page_s {
// "owned" by the segment
- uint8_t segment_idx; // index in the segment `pages` array, `page == &segment->pages[page->segment_idx]`
- uint8_t segment_in_use:1; // `true` if the segment allocated this page
- uint8_t is_reset:1; // `true` if the page memory was reset
- uint8_t is_committed:1; // `true` if the page virtual memory is committed
- uint8_t is_zero_init:1; // `true` if the page was zero initialized
+ uint32_t slice_count; // slices in this page (0 if not a page)
+ uint32_t slice_offset; // distance from the actual page data slice (0 if a page)
+ uint8_t is_committed : 1; // `true` if the page virtual memory is committed
+ uint8_t is_zero_init : 1; // `true` if the page was initially zero initialized
// layout like this to optimize access in `mi_malloc` and `mi_free`
uint16_t capacity; // number of blocks committed, must be the first field, see `segment.c:page_clear`
uint16_t reserved; // number of blocks reserved in memory
mi_page_flags_t flags; // `in_full` and `has_aligned` flags (8 bits)
- uint8_t is_zero:1; // `true` if the blocks in the free list are zero initialized
- uint8_t retire_expire:7; // expiration count for retired blocks
+ uint8_t free_is_zero : 1; // `true` if the blocks in the free list are zero initialized
+ uint8_t retire_expire : 7; // expiration count for retired blocks
mi_block_t* free; // list of available free blocks (`malloc` allocates from this list)
- #ifdef MI_ENCODE_FREELIST
- uintptr_t keys[2]; // two random keys to encode the free lists (see `_mi_block_next`)
- #endif
uint32_t used; // number of blocks in use (including blocks in `local_free` and `thread_free`)
- uint32_t xblock_size; // size available in each block (always `>0`)
-
+ uint32_t xblock_size; // size available in each block (always `>0`)
mi_block_t* local_free; // list of deferred free blocks by this thread (migrates to `free`)
+
+ #if (MI_ENCODE_FREELIST || MI_PADDING)
+ uintptr_t keys[2]; // two random keys to encode the free lists (see `_mi_block_next`) or padding canary
+ #endif
+
_Atomic(mi_thread_free_t) xthread_free; // list of deferred free blocks freed by other threads
_Atomic(uintptr_t) xheap;
-
+
struct mi_page_s* next; // next page owned by this thread with the same `block_size`
struct mi_page_s* prev; // previous page owned by this thread with the same `block_size`
+
+ // 64-bit 9 words, 32-bit 12 words, (+2 for secure)
+ #if MI_INTPTR_SIZE==8
+ uintptr_t padding[1];
+ #endif
} mi_page_t;
+// ------------------------------------------------------
+// Mimalloc segments contain mimalloc pages
+// ------------------------------------------------------
+
typedef enum mi_page_kind_e {
- MI_PAGE_SMALL, // small blocks go into 64kb pages inside a segment
- MI_PAGE_MEDIUM, // medium blocks go into 512kb pages inside a segment
- MI_PAGE_LARGE, // larger blocks go into a single page spanning a whole segment
- MI_PAGE_HUGE // huge blocks (>512kb) are put into a single page in a segment of the exact size (but still 2mb aligned)
+ MI_PAGE_SMALL, // small blocks go into 64KiB pages inside a segment
+ MI_PAGE_MEDIUM, // medium blocks go into medium pages inside a segment
+ MI_PAGE_LARGE, // larger blocks go into a page of just one block
+ MI_PAGE_HUGE, // huge blocks (> 16 MiB) are put into a single page in a single segment.
} mi_page_kind_t;
-// Segments are large allocated memory blocks (2mb on 64 bit) from
+typedef enum mi_segment_kind_e {
+ MI_SEGMENT_NORMAL, // MI_SEGMENT_SIZE size with pages inside.
+ MI_SEGMENT_HUGE, // > MI_LARGE_SIZE_MAX segment with just one huge page inside.
+} mi_segment_kind_t;
+
+// ------------------------------------------------------
+// A segment holds a commit mask where a bit is set if
+// the corresponding MI_COMMIT_SIZE area is committed.
+// The MI_COMMIT_SIZE must be a multiple of the slice
+// size. If it is equal we have the most fine grained
+// decommit (but setting it higher can be more efficient).
+// The MI_MINIMAL_COMMIT_SIZE is the minimal amount that will
+// be committed in one go which can be set higher than
+// MI_COMMIT_SIZE for efficiency (while the decommit mask
+// is still tracked in fine-grained MI_COMMIT_SIZE chunks)
+// ------------------------------------------------------
+
+#define MI_MINIMAL_COMMIT_SIZE (1*MI_SEGMENT_SLICE_SIZE)
+#define MI_COMMIT_SIZE (MI_SEGMENT_SLICE_SIZE) // 64KiB
+#define MI_COMMIT_MASK_BITS (MI_SEGMENT_SIZE / MI_COMMIT_SIZE)
+#define MI_COMMIT_MASK_FIELD_BITS MI_SIZE_BITS
+#define MI_COMMIT_MASK_FIELD_COUNT (MI_COMMIT_MASK_BITS / MI_COMMIT_MASK_FIELD_BITS)
+
+#if (MI_COMMIT_MASK_BITS != (MI_COMMIT_MASK_FIELD_COUNT * MI_COMMIT_MASK_FIELD_BITS))
+#error "the segment size must be exactly divisible by the (commit size * size_t bits)"
+#endif
+
+typedef struct mi_commit_mask_s {
+ size_t mask[MI_COMMIT_MASK_FIELD_COUNT];
+} mi_commit_mask_t;
+
+typedef mi_page_t mi_slice_t;
+typedef int64_t mi_msecs_t;
+
+
+// Memory can reside in arena's, direct OS allocated, or statically allocated. The memid keeps track of this.
+typedef enum mi_memkind_e {
+ MI_MEM_NONE, // not allocated
+ MI_MEM_EXTERNAL, // not owned by mimalloc but provided externally (via `mi_manage_os_memory` for example)
+ MI_MEM_STATIC, // allocated in a static area and should not be freed (for arena meta data for example)
+ MI_MEM_OS, // allocated from the OS
+ MI_MEM_OS_HUGE, // allocated as huge os pages
+ MI_MEM_OS_REMAP, // allocated in a remapable area (i.e. using `mremap`)
+ MI_MEM_ARENA // allocated from an arena (the usual case)
+} mi_memkind_t;
+
+static inline bool mi_memkind_is_os(mi_memkind_t memkind) {
+ return (memkind >= MI_MEM_OS && memkind <= MI_MEM_OS_REMAP);
+}
+
+typedef struct mi_memid_os_info {
+ void* base; // actual base address of the block (used for offset aligned allocations)
+ size_t alignment; // alignment at allocation
+} mi_memid_os_info_t;
+
+typedef struct mi_memid_arena_info {
+ size_t block_index; // index in the arena
+ mi_arena_id_t id; // arena id (>= 1)
+ bool is_exclusive; // the arena can only be used for specific arena allocations
+} mi_memid_arena_info_t;
+
+typedef struct mi_memid_s {
+ union {
+ mi_memid_os_info_t os; // only used for MI_MEM_OS
+ mi_memid_arena_info_t arena; // only used for MI_MEM_ARENA
+ } mem;
+ bool is_pinned; // `true` if we cannot decommit/reset/protect in this memory (e.g. when allocated using large OS pages)
+ bool initially_committed;// `true` if the memory was originally allocated as committed
+ bool initially_zero; // `true` if the memory was originally zero initialized
+ mi_memkind_t memkind;
+} mi_memid_t;
+
+
+// Segments are large allocated memory blocks (8mb on 64 bit) from
// the OS. Inside segments we allocated fixed size _pages_ that
// contain blocks.
typedef struct mi_segment_s {
- // memory fields
- size_t memid; // id for the os-level memory manager
- bool mem_is_fixed; // `true` if we cannot decommit/reset/protect in this memory (i.e. when allocated using large OS pages)
- bool mem_is_committed; // `true` if the whole segment is eagerly committed
+ // constant fields
+ mi_memid_t memid; // memory id for arena allocation
+ bool allow_decommit;
+ bool allow_purge;
+ size_t segment_size;
// segment fields
+ mi_msecs_t purge_expire;
+ mi_commit_mask_t purge_mask;
+ mi_commit_mask_t commit_mask;
+
_Atomic(struct mi_segment_s*) abandoned_next;
- struct mi_segment_s* next; // must be the first segment field after abandoned_next -- see `segment.c:segment_init`
- struct mi_segment_s* prev;
- size_t abandoned; // abandoned pages (i.e. the original owning thread stopped) (`abandoned <= used`)
- size_t abandoned_visits; // count how often this segment is visited in the abandoned list (to force reclaim it it is too long)
+ // from here is zero initialized
+ struct mi_segment_s* next; // the list of freed segments in the cache (must be first field, see `segment.c:mi_segment_init`)
+
+ size_t abandoned; // abandoned pages (i.e. the original owning thread stopped) (`abandoned <= used`)
+ size_t abandoned_visits; // count how often this segment is visited in the abandoned list (to force reclaim it it is too long)
+ size_t used; // count of pages in use
+ uintptr_t cookie; // verify addresses in debug mode: `mi_ptr_cookie(segment) == segment->cookie`
- size_t used; // count of pages in use (`used <= capacity`)
- size_t capacity; // count of available pages (`#free + used`)
- size_t segment_size; // for huge pages this may be different from `MI_SEGMENT_SIZE`
- size_t segment_info_size;// space we are using from the first page for segment meta-data and possible guard pages.
- uintptr_t cookie; // verify addresses in secure mode: `_mi_ptr_cookie(segment) == segment->cookie`
+ size_t segment_slices; // for huge segments this may be different from `MI_SLICES_PER_SEGMENT`
+ size_t segment_info_slices; // initial slices we are using segment info and possible guard pages.
// layout like this to optimize access in `mi_free`
- size_t page_shift; // `1 << page_shift` == the page sizes == `page->block_size * page->reserved` (unless the first page, then `-segment_info_size`).
- _Atomic(uintptr_t) thread_id; // unique id of the thread owning this segment
- mi_page_kind_t page_kind; // kind of pages: small, large, or huge
- mi_page_t pages[1]; // up to `MI_SMALL_PAGES_PER_SEGMENT` pages
+ mi_segment_kind_t kind;
+ size_t slice_entries; // entries in the `slices` array, at most `MI_SLICES_PER_SEGMENT`
+ _Atomic(mi_threadid_t) thread_id; // unique id of the thread owning this segment
+
+ mi_slice_t slices[MI_SLICES_PER_SEGMENT+1]; // one more for huge blocks with large alignment
} mi_segment_t;
@@ -316,10 +476,11 @@ typedef struct mi_random_cxt_s {
uint32_t input[16];
uint32_t output[16];
int output_available;
+ bool weak;
} mi_random_ctx_t;
-// In debug mode there is a padding stucture at the end of the blocks to check for buffer overflows
+// In debug mode there is a padding structure at the end of the blocks to check for buffer overflows
#if (MI_PADDING)
typedef struct mi_padding_s {
uint32_t canary; // encoded block value to check validity of the padding (in case of overflow)
@@ -341,7 +502,8 @@ struct mi_heap_s {
mi_page_t* pages_free_direct[MI_PAGES_DIRECT]; // optimize: array where every entry points a page with possibly free blocks in the corresponding queue for that size.
mi_page_queue_t pages[MI_BIN_FULL + 1]; // queue of pages for each size class (or "bin")
_Atomic(mi_block_t*) thread_delayed_free;
- uintptr_t thread_id; // thread this heap belongs too
+ mi_threadid_t thread_id; // thread this heap belongs too
+ mi_arena_id_t arena_id; // arena id if the heap belongs to a specific arena (or 0)
uintptr_t cookie; // random cookie to verify pointers (see `_mi_ptr_cookie`)
uintptr_t keys[2]; // two random keys used to encode the `thread_delayed_free` list
mi_random_ctx_t random; // random number context used for secure allocation
@@ -358,9 +520,15 @@ struct mi_heap_s {
// Debug
// ------------------------------------------------------
+#if !defined(MI_DEBUG_UNINIT)
#define MI_DEBUG_UNINIT (0xD0)
+#endif
+#if !defined(MI_DEBUG_FREED)
#define MI_DEBUG_FREED (0xDF)
+#endif
+#if !defined(MI_DEBUG_PADDING)
#define MI_DEBUG_PADDING (0xDE)
+#endif
#if (MI_DEBUG)
// use our own assertion to print without memory allocation
@@ -412,23 +580,28 @@ typedef struct mi_stats_s {
mi_stat_count_t reserved;
mi_stat_count_t committed;
mi_stat_count_t reset;
+ mi_stat_count_t purged;
mi_stat_count_t page_committed;
mi_stat_count_t segments_abandoned;
mi_stat_count_t pages_abandoned;
mi_stat_count_t threads;
+ mi_stat_count_t normal;
mi_stat_count_t huge;
- mi_stat_count_t giant;
+ mi_stat_count_t large;
mi_stat_count_t malloc;
mi_stat_count_t segments_cache;
mi_stat_counter_t pages_extended;
mi_stat_counter_t mmap_calls;
mi_stat_counter_t commit_calls;
+ mi_stat_counter_t reset_calls;
+ mi_stat_counter_t purge_calls;
mi_stat_counter_t page_no_retire;
mi_stat_counter_t searches;
+ mi_stat_counter_t normal_count;
mi_stat_counter_t huge_count;
- mi_stat_counter_t giant_count;
+ mi_stat_counter_t large_count;
#if MI_STAT>1
- mi_stat_count_t normal[MI_BIN_HUGE+1];
+ mi_stat_count_t normal_bins[MI_BIN_HUGE+1];
#endif
} mi_stats_t;
@@ -447,6 +620,7 @@ void _mi_stat_counter_increase(mi_stat_counter_t* stat, size_t amount);
#define mi_stat_counter_increase(stat,amount) (void)0
#endif
+#define mi_heap_stat_counter_increase(heap,stat,amount) mi_stat_counter_increase( (heap)->tld->stats.stat, amount)
#define mi_heap_stat_increase(heap,stat,amount) mi_stat_increase( (heap)->tld->stats.stat, amount)
#define mi_heap_stat_decrease(heap,stat,amount) mi_stat_decrease( (heap)->tld->stats.stat, amount)
@@ -454,13 +628,15 @@ void _mi_stat_counter_increase(mi_stat_counter_t* stat, size_t amount);
// Thread Local data
// ------------------------------------------------------
-typedef int64_t mi_msecs_t;
+// A "span" is is an available range of slices. The span queues keep
+// track of slice spans of at most the given `slice_count` (but more than the previous size class).
+typedef struct mi_span_queue_s {
+ mi_slice_t* first;
+ mi_slice_t* last;
+ size_t slice_count;
+} mi_span_queue_t;
-// Queue of segments
-typedef struct mi_segment_queue_s {
- mi_segment_t* first;
- mi_segment_t* last;
-} mi_segment_queue_t;
+#define MI_SEGMENT_BIN_MAX (35) // 35 == mi_segment_bin(MI_SLICES_PER_SEGMENT)
// OS thread local data
typedef struct mi_os_tld_s {
@@ -468,18 +644,14 @@ typedef struct mi_os_tld_s {
mi_stats_t* stats; // points to tld stats
} mi_os_tld_t;
+
// Segments thread local data
typedef struct mi_segments_tld_s {
- mi_segment_queue_t small_free; // queue of segments with free small pages
- mi_segment_queue_t medium_free; // queue of segments with free medium pages
- mi_page_queue_t pages_reset; // queue of freed pages that can be reset
+ mi_span_queue_t spans[MI_SEGMENT_BIN_MAX+1]; // free slice spans inside segments
size_t count; // current number of segments;
size_t peak_count; // peak number of segments
size_t current_size; // current size of all segments
size_t peak_size; // peak size of all segments
- size_t cache_count; // number of segments in the cache
- size_t cache_size; // total size of all segments in the cache
- mi_segment_t* cache; // (small) cache of segments
mi_stats_t* stats; // points to tld stats
mi_os_tld_t* os; // points to os stats
} mi_segments_tld_t;
diff --git a/mimalloc.pc.in b/mimalloc.pc.in
new file mode 100644
index 0000000..36da203
--- /dev/null
+++ b/mimalloc.pc.in
@@ -0,0 +1,11 @@
+prefix=@CMAKE_INSTALL_PREFIX@
+libdir=@libdir_for_pc_file@
+includedir=@includedir_for_pc_file@
+
+Name: @PROJECT_NAME@
+Description: A compact general purpose allocator with excellent performance
+Version: @PACKAGE_VERSION@
+URL: https://github.com/microsoft/mimalloc/
+Libs: -L${libdir} -lmimalloc
+Libs.private: @pc_libraries@
+Cflags: -I${includedir}
diff --git a/readme.md b/readme.md
index 9419a65..85d3563 100644
--- a/readme.md
+++ b/readme.md
@@ -9,22 +9,26 @@
mimalloc (pronounced "me-malloc")
is a general purpose allocator with excellent [performance](#performance) characteristics.
-Initially developed by Daan Leijen for the run-time systems of the
-[Koka](https://github.com/koka-lang/koka) and [Lean](https://github.com/leanprover/lean) languages.
-Latest release:`v1.6.7` (2020-09-24).
+Initially developed by Daan Leijen for the runtime systems of the
+[Koka](https://koka-lang.github.io) and [Lean](https://github.com/leanprover/lean) languages.
-It is a drop-in replacement for `malloc` and can be used in other programs
+Latest release tag: `v2.1.2` (2023-04-24).
+Latest stable tag: `v1.8.2` (2023-04-24).
+
+mimalloc is a drop-in replacement for `malloc` and can be used in other programs
without code changes, for example, on dynamically linked ELF-based systems (Linux, BSD, etc.) you can use it as:
```
-> LD_PRELOAD=/usr/bin/libmimalloc.so myprogram
+> LD_PRELOAD=/usr/lib/libmimalloc.so myprogram
```
-It also has an easy way to override the default allocator in [Windows](#override_on_windows). Notable aspects of the design include:
+It also includes a robust way to override the default allocator in [Windows](#override_on_windows). Notable aspects of the design include:
- __small and consistent__: the library is about 8k LOC using simple and
consistent data structures. This makes it very suitable
to integrate and adapt in other projects. For runtime systems it
provides hooks for a monotonic _heartbeat_ and deferred freeing (for
bounded worst-case times with reference counting).
+ Partly due to its simplicity, mimalloc has been ported to many systems (Windows, macOS,
+ Linux, WASM, various BSD's, Haiku, MUSL, etc) and has excellent support for dynamic overriding.
- __free list sharding__: instead of one big free list (per size class) we have
many smaller lists per "mimalloc page" which reduces fragmentation and
increases locality --
@@ -34,13 +38,13 @@ It also has an easy way to override the default allocator in [Windows](#override
per mimalloc page, but for each page we have multiple free lists. In particular, there
is one list for thread-local `free` operations, and another one for concurrent `free`
operations. Free-ing from another thread can now be a single CAS without needing
- sophisticated coordination between threads. Since there will be
+ sophisticated coordination between threads. Since there will be
thousands of separate free lists, contention is naturally distributed over the heap,
and the chance of contending on a single location will be low -- this is quite
similar to randomized algorithms like skip lists where adding
a random oracle removes the need for a more complex algorithm.
-- __eager page reset__: when a "page" becomes empty (with increased chance
- due to free list sharding) the memory is marked to the OS as unused ("reset" or "purged")
+- __eager page purging__: when a "page" becomes empty (with increased chance
+ due to free list sharding) the memory is marked to the OS as unused (reset or decommitted)
reducing (real) memory pressure and fragmentation, especially in long running
programs.
- __secure__: _mimalloc_ can be built in secure mode, adding guard pages,
@@ -48,75 +52,96 @@ It also has an easy way to override the default allocator in [Windows](#override
heap vulnerabilities. The performance penalty is usually around 10% on average
over our benchmarks.
- __first-class heaps__: efficiently create and use multiple heaps to allocate across different regions.
- A heap can be destroyed at once instead of deallocating each object separately.
+ A heap can be destroyed at once instead of deallocating each object separately.
- __bounded__: it does not suffer from _blowup_ \[1\], has bounded worst-case allocation
- times (_wcat_), bounded space overhead (~0.2% meta-data, with at most 12.5% waste in allocation sizes),
- and has no internal points of contention using only atomic operations.
+ times (_wcat_) (upto OS primitives), bounded space overhead (~0.2% meta-data, with low
+ internal fragmentation), and has no internal points of contention using only atomic operations.
- __fast__: In our benchmarks (see [below](#performance)),
_mimalloc_ outperforms other leading allocators (_jemalloc_, _tcmalloc_, _Hoard_, etc),
- and usually uses less memory (up to 25% more in the worst case). A nice property
- is that it does consistently well over a wide range of benchmarks. There is also good huge OS page
- support for larger server programs.
+ and often uses less memory. A nice property is that it does consistently well over a wide range
+ of benchmarks. There is also good huge OS page support for larger server programs.
The [documentation](https://microsoft.github.io/mimalloc) gives a full overview of the API.
-You can read more on the design of _mimalloc_ in the [technical report](https://www.microsoft.com/en-us/research/publication/mimalloc-free-list-sharding-in-action) which also has detailed benchmark results.
+You can read more on the design of _mimalloc_ in the [technical report](https://www.microsoft.com/en-us/research/publication/mimalloc-free-list-sharding-in-action) which also has detailed benchmark results.
-Enjoy!
+Enjoy!
### Branches
-* `master`: latest stable release.
-* `dev`: latest development branch.
-* `dev-slice`: experimental branch with a different way of managing mimalloc pages that tends
- to use less memory than regular mimalloc with similar performance. Give it a try and please
- report any significant performance improvement or degradation.
+* `master`: latest stable release (based on `dev-slice`).
+* `dev`: development branch for mimalloc v1. Use this branch for submitting PR's.
+* `dev-slice`: development branch for mimalloc v2. This branch is downstream of `dev`.
### Releases
-* 2020-09-24, `v1.6.7`: stable release 1.6: using standard C atomics, passing tsan testing, improved
- handling of failing to commit on Windows, add [`mi_process_info`](https://github.com/microsoft/mimalloc/blob/master/include/mimalloc.h#L156) api call.
-* 2020-08-06, `v1.6.4`: stable release 1.6: improved error recovery in low-memory situations,
- support for IllumOS and Haiku, NUMA support for Vista/XP, improved NUMA detection for AMD Ryzen, ubsan support.
-* 2020-05-05, `v1.6.3`: stable release 1.6: improved behavior in out-of-memory situations, improved malloc zones on macOS,
- build PIC static libraries by default, add option to abort on out-of-memory, line buffered statistics.
-* 2020-04-20, `v1.6.2`: stable release 1.6: fix compilation on Android, MingW, Raspberry, and Conda,
- stability fix for Windows 7, fix multiple mimalloc instances in one executable, fix `strnlen` overload,
- fix aligned debug padding.
-* 2020-02-17, `v1.6.1`: stable release 1.6: minor updates (build with clang-cl, fix alignment issue for small objects).
-* 2020-02-09, `v1.6.0`: stable release 1.6: fixed potential memory leak, improved overriding
- and thread local support on FreeBSD, NetBSD, DragonFly, and macOSX. New byte-precise
- heap block overflow detection in debug mode (besides the double-free detection and free-list
- corruption detection). Add `nodiscard` attribute to most allocation functions.
- Enable `MIMALLOC_PAGE_RESET` by default. New reclamation strategy for abandoned heap pages
- for better memory footprint.
-* 2020-02-09, `v1.5.0`: stable release 1.5: improved free performance, small bug fixes.
-* 2020-01-22, `v1.4.0`: stable release 1.4: improved performance for delayed OS page reset,
-more eager concurrent free, addition of STL allocator, fixed potential memory leak.
-* 2020-01-15, `v1.3.0`: stable release 1.3: bug fixes, improved randomness and [stronger
-free list encoding](https://github.com/microsoft/mimalloc/blob/783e3377f79ee82af43a0793910a9f2d01ac7863/include/mimalloc-internal.h#L396) in secure mode.
-* 2019-12-22, `v1.2.2`: stable release 1.2: minor updates.
-* 2019-11-22, `v1.2.0`: stable release 1.2: bug fixes, improved secure mode (free list corruption checks, double free mitigation). Improved dynamic overriding on Windows.
-* 2019-10-07, `v1.1.0`: stable release 1.1.
-* 2019-09-01, `v1.0.8`: pre-release 8: more robust windows dynamic overriding, initial huge page support.
-* 2019-08-10, `v1.0.6`: pre-release 6: various performance improvements.
+Note: the `v2.x` version has a new algorithm for managing internal mimalloc pages that tends to reduce memory usage
+ and fragmentation compared to mimalloc `v1.x` (especially for large workloads). Should otherwise have similar performance
+ (see [below](#performance)); please report if you observe any significant performance regression.
+
+* 2023-04-24, `v1.8.2`, `v2.1.2`: Fixes build issues on freeBSD, musl, and C17 (UE 5.1.1). Reduce code size/complexity
+ by removing regions and segment-cache's and only use arenas with improved memory purging -- this may improve memory
+ usage as well for larger services. Renamed options for consistency. Improved Valgrind and ASAN checking.
+
+* 2023-04-03, `v1.8.1`, `v2.1.1`: Fixes build issues on some platforms.
+
+* 2023-03-29, `v1.8.0`, `v2.1.0`: Improved support dynamic overriding on Windows 11. Improved tracing precision
+ with [asan](#asan) and [Valgrind](#valgrind), and added Windows event tracing [ETW](#ETW) (contributed by Xinglong He). Created an OS
+ abstraction layer to make it easier to port and separate platform dependent code (in `src/prim`). Fixed C++ STL compilation on older Microsoft C++ compilers, and various small bug fixes.
+
+* 2022-12-23, `v1.7.9`, `v2.0.9`: Supports building with [asan](#asan) and improved [Valgrind](#valgrind) support.
+ Support abitrary large alignments (in particular for `std::pmr` pools).
+ Added C++ STL allocators attached to a specific heap (thanks @vmarkovtsev).
+ Heap walks now visit all object (including huge objects). Support Windows nano server containers (by Johannes Schindelin,@dscho). Various small bug fixes.
+
+* 2022-11-03, `v1.7.7`, `v2.0.7`: Initial support for [Valgrind](#valgrind) for leak testing and heap block overflow
+ detection. Initial
+ support for attaching heaps to a speficic memory area (only in v2). Fix `realloc` behavior for zero size blocks, remove restriction to integral multiple of the alignment in `alloc_align`, improved aligned allocation performance, reduced contention with many threads on few processors (thank you @dposluns!), vs2022 support, support `pkg-config`, .
+
+* 2022-04-14, `v1.7.6`, `v2.0.6`: fix fallback path for aligned OS allocation on Windows, improve Windows aligned allocation
+ even when compiling with older SDK's, fix dynamic overriding on macOS Monterey, fix MSVC C++ dynamic overriding, fix
+ warnings under Clang 14, improve performance if many OS threads are created and destroyed, fix statistics for large object
+ allocations, using MIMALLOC_VERBOSE=1 has no maximum on the number of error messages, various small fixes.
+
+* 2022-02-14, `v1.7.5`, `v2.0.5` (alpha): fix malloc override on
+ Windows 11, fix compilation with musl, potentially reduced
+ committed memory, add `bin/minject` for Windows,
+ improved wasm support, faster aligned allocation,
+ various small fixes.
+
+* [Older release notes](#older-release-notes)
Special thanks to:
+* [David Carlier](https://devnexen.blogspot.com/) (@devnexen) for his many contributions, and making
+ mimalloc work better on many less common operating systems, like Haiku, Dragonfly, etc.
* Mary Feofanova (@mary3000), Evgeniy Moiseenko, and Manuel Pöter (@mpoeter) for making mimalloc TSAN checkable, and finding
memory model bugs using the [genMC] model checker.
* Weipeng Liu (@pongba), Zhuowei Li, Junhua Wang, and Jakub Szymanski, for their early support of mimalloc and deployment
at large scale services, leading to many improvements in the mimalloc algorithms for large workloads.
-* Jason Gibson (@jasongibson) for exhaustive testing on large scale workloads and server environments, and finding complex bugs
+* Jason Gibson (@jasongibson) for exhaustive testing on large scale workloads and server environments, and finding complex bugs
in (early versions of) `mimalloc`.
-* Manuel Pöter (@mpoeter) and Sam Gross (@colesbury) for finding an ABA concurrency issue in abandoned segment reclamation.
+* Manuel Pöter (@mpoeter) and Sam Gross(@colesbury) for finding an ABA concurrency issue in abandoned segment reclamation. Sam also created the [no GIL](https://github.com/colesbury/nogil) Python fork which
+ uses mimalloc internally.
+
[genMC]: https://plv.mpi-sws.org/genmc/
+### Usage
+
+mimalloc is used in various large scale low-latency services and programs, for example:
+
+
+
+
+
+
+
+
# Building
## Windows
-Open `ide/vs2019/mimalloc.sln` in Visual Studio 2019 and build (or `ide/vs2017/mimalloc.sln`).
+Open `ide/vs2019/mimalloc.sln` in Visual Studio 2019 and build.
The `mimalloc` project builds a static library (in `out/msvc-x64`), while the
`mimalloc-override` project builds a DLL for overriding malloc
in the entire program.
@@ -165,6 +190,11 @@ Notes:
2. Install CCMake: `sudo apt-get install cmake-curses-gui`
+## Single source
+
+You can also directly build the single `src/static.c` file as part of your project without
+needing `cmake` at all. Make sure to also add the mimalloc `include` directory to the include path.
+
# Using the library
@@ -239,47 +269,55 @@ completely and redirect all calls to the _mimalloc_ library instead .
## Environment Options
-You can set further options either programmatically (using [`mi_option_set`](https://microsoft.github.io/mimalloc/group__options.html)),
-or via environment variables:
+You can set further options either programmatically (using [`mi_option_set`](https://microsoft.github.io/mimalloc/group__options.html)), or via environment variables:
- `MIMALLOC_SHOW_STATS=1`: show statistics when the program terminates.
- `MIMALLOC_VERBOSE=1`: show verbose messages.
- `MIMALLOC_SHOW_ERRORS=1`: show error and warning messages.
-- `MIMALLOC_PAGE_RESET=0`: by default, mimalloc will reset (or purge) OS pages that are not in use, to signal to the OS
- that the underlying physical memory can be reused. This can reduce memory fragmentation in long running (server)
- programs. By setting it to `0` this will no longer be done which can improve performance for batch-like programs.
- As an alternative, the `MIMALLOC_RESET_DELAY=` can be set higher (100ms by default) to make the page
- reset occur less frequently instead of turning it off completely.
+
+Advanced options:
+
+- `MIMALLOC_PURGE_DELAY=N`: the delay in `N` milli-seconds (by default `10`) after which mimalloc will purge
+ OS pages that are not in use. This signals to the OS that the underlying physical memory can be reused which
+ can reduce memory fragmentation especially in long running (server) programs. Setting `N` to `0` purges immediately when
+ a page becomes unused which can improve memory usage but also decreases performance. Setting `N` to a higher
+ value like `100` can improve performance (sometimes by a lot) at the cost of potentially using more memory at times.
+ Setting it to `-1` disables purging completely.
+- `MIMALLOC_ARENA_EAGER_COMMIT=1`: turns on eager commit for the large arenas (usually 1GiB) from which mimalloc
+ allocates segments and pages. This is by default
+ only enabled on overcommit systems (e.g. Linux) but enabling it explicitly on other systems (like Windows or macOS)
+ may improve performance. Note that eager commit only increases the commit but not the actual the peak resident set
+ (rss) so it is generally ok to enable this.
+
+Further options for large workloads and services:
+
- `MIMALLOC_USE_NUMA_NODES=N`: pretend there are at most `N` NUMA nodes. If not set, the actual NUMA nodes are detected
at runtime. Setting `N` to 1 may avoid problems in some virtual environments. Also, setting it to a lower number than
the actual NUMA nodes is fine and will only cause threads to potentially allocate more memory across actual NUMA
nodes (but this can happen in any case as NUMA local allocation is always a best effort but not guaranteed).
-- `MIMALLOC_LARGE_OS_PAGES=1`: use large OS pages (2MiB) when available; for some workloads this can significantly
+- `MIMALLOC_ALLOW_LARGE_OS_PAGES=1`: use large OS pages (2MiB) when available; for some workloads this can significantly
improve performance. Use `MIMALLOC_VERBOSE` to check if the large OS pages are enabled -- usually one needs
to explicitly allow large OS pages (as on [Windows][windows-huge] and [Linux][linux-huge]). However, sometimes
the OS is very slow to reserve contiguous physical memory for large OS pages so use with care on systems that
- can have fragmented memory (for that reason, we generally recommend to use `MIMALLOC_RESERVE_HUGE_OS_PAGES` instead whenever possible).
-
-- `MIMALLOC_RESERVE_HUGE_OS_PAGES=N`: where N is the number of 1GiB _huge_ OS pages. This reserves the huge pages at
+ can have fragmented memory (for that reason, we generally recommend to use `MIMALLOC_RESERVE_HUGE_OS_PAGES` instead whenever possible).
+- `MIMALLOC_RESERVE_HUGE_OS_PAGES=N`: where `N` is the number of 1GiB _huge_ OS pages. This reserves the huge pages at
startup and sometimes this can give a large (latency) performance improvement on big workloads.
- Usually it is better to not use
- `MIMALLOC_LARGE_OS_PAGES` in combination with this setting. Just like large OS pages, use with care as reserving
+ Usually it is better to not use `MIMALLOC_ALLOW_LARGE_OS_PAGES=1` in combination with this setting. Just like large
+ OS pages, use with care as reserving
contiguous physical memory can take a long time when memory is fragmented (but reserving the huge pages is done at
startup only once).
Note that we usually need to explicitly enable huge OS pages (as on [Windows][windows-huge] and [Linux][linux-huge])).
With huge OS pages, it may be beneficial to set the setting
`MIMALLOC_EAGER_COMMIT_DELAY=N` (`N` is 1 by default) to delay the initial `N` segments (of 4MiB)
of a thread to not allocate in the huge OS pages; this prevents threads that are short lived
- and allocate just a little to take up space in the huge OS page area (which cannot be reset).
+ and allocate just a little to take up space in the huge OS page area (which cannot be purged).
+ The huge pages are usually allocated evenly among NUMA nodes.
+ We can use `MIMALLOC_RESERVE_HUGE_OS_PAGES_AT=N` where `N` is the numa node (starting at 0) to allocate all
+ the huge pages at a specific numa node instead.
Use caution when using `fork` in combination with either large or huge OS pages: on a fork, the OS uses copy-on-write
for all pages in the original process including the huge OS pages. When any memory is now written in that area, the
-OS will copy the entire 1GiB huge page (or 2MiB large page) which can cause the memory usage to grow in big increments.
+OS will copy the entire 1GiB huge page (or 2MiB large page) which can cause the memory usage to grow in large increments.
[linux-huge]: https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/5/html/tuning_and_optimizing_red_hat_enterprise_linux_for_oracle_9i_and_10g_databases/sect-oracle_9i_and_10g_tuning_guide-large_memory_optimization_big_pages_and_huge_pages-configuring_huge_pages_in_red_hat_enterprise_linux_4_or_5
[windows-huge]: https://docs.microsoft.com/en-us/sql/database-engine/configure-windows/enable-the-lock-pages-in-memory-option-windows?view=sql-server-2017
@@ -311,15 +349,15 @@ When _mimalloc_ is built using debug mode, various checks are done at runtime to
- Corrupted free-lists and some forms of use-after-free are detected.
-# Overriding Malloc
+# Overriding Standard Malloc
-Overriding the standard `malloc` can be done either _dynamically_ or _statically_.
+Overriding the standard `malloc` (and `new`) can be done either _dynamically_ or _statically_.
## Dynamic override
This is the recommended way to override the standard malloc interface.
-### Override on Linux, BSD
+### Dynamic Override on Linux, BSD
On these ELF-based systems we preload the mimalloc shared
library so all calls to the standard `malloc` interface are
@@ -338,21 +376,20 @@ or run with the debug version to get detailed statistics:
> env MIMALLOC_SHOW_STATS=1 LD_PRELOAD=/usr/lib/libmimalloc-debug.so myprogram
```
-### Override on MacOS
+### Dynamic Override on MacOS
On macOS we can also preload the mimalloc shared
library so all calls to the standard `malloc` interface are
resolved to the _mimalloc_ library.
```
-> env DYLD_FORCE_FLAT_NAMESPACE=1 DYLD_INSERT_LIBRARIES=/usr/lib/libmimalloc.dylib myprogram
+> env DYLD_INSERT_LIBRARIES=/usr/lib/libmimalloc.dylib myprogram
```
Note that certain security restrictions may apply when doing this from
the [shell](https://stackoverflow.com/questions/43941322/dyld-insert-libraries-ignored-when-calling-application-through-bash).
-(Note: macOS support for dynamic overriding is recent, please report any issues.)
-### Override on Windows
+### Dynamic Override on Windows
Overriding on Windows is robust and has the
particular advantage to be able to redirect all malloc/free calls that go through
@@ -360,7 +397,7 @@ the (dynamic) C runtime allocator, including those from other DLL's or libraries
The overriding on Windows requires that you link your program explicitly with
the mimalloc DLL and use the C-runtime library as a DLL (using the `/MD` or `/MDd` switch).
-Also, the `mimalloc-redirect.dll` (or `mimalloc-redirect32.dll`) must be available
+Also, the `mimalloc-redirect.dll` (or `mimalloc-redirect32.dll`) must be put
in the same folder as the main `mimalloc-override.dll` at runtime (as it is a dependency).
The redirection DLL ensures that all calls to the C runtime malloc API get redirected to
mimalloc (in `mimalloc-override.dll`).
@@ -385,13 +422,13 @@ Such patching can be done for example with [CFF Explorer](https://ntcore.com/?pa
On Unix-like systems, you can also statically link with _mimalloc_ to override the standard
malloc interface. The recommended way is to link the final program with the
-_mimalloc_ single object file (`mimalloc-override.o`). We use
+_mimalloc_ single object file (`mimalloc.o`). We use
an object file instead of a library file as linkers give preference to
that over archives to resolve symbols. To ensure that the standard
malloc interface resolves to the _mimalloc_ library, link it as the first
object file. For example:
```
-> gcc -o myprogram mimalloc-override.o myfile1.c ...
+> gcc -o myprogram mimalloc.o myfile1.c ...
```
Another way to override statically that works on all platforms, is to
@@ -401,9 +438,99 @@ This is provided by [`mimalloc-override.h`](https://github.com/microsoft/mimallo
under your control or otherwise mixing of pointers from different heaps may occur!
+## Tools
+
+Generally, we recommend using the standard allocator with memory tracking tools, but mimalloc
+can also be build to support the [address sanitizer][asan] or the excellent [Valgrind] tool.
+Moreover, it can be build to support Windows event tracing ([ETW]).
+This has a small performance overhead but does allow detecting memory leaks and byte-precise
+buffer overflows directly on final executables. See also the `test/test-wrong.c` file to test with various tools.
+
+### Valgrind
+
+To build with [valgrind] support, use the `MI_TRACK_VALGRIND=ON` cmake option:
+
+```
+> cmake ../.. -DMI_TRACK_VALGRIND=ON
+```
+
+This can also be combined with secure mode or debug mode.
+You can then run your programs directly under valgrind:
+
+```
+> valgrind
+```
+
+If you rely on overriding `malloc`/`free` by mimalloc (instead of using the `mi_malloc`/`mi_free` API directly),
+you also need to tell `valgrind` to not intercept those calls itself, and use:
+
+```
+> MIMALLOC_SHOW_STATS=1 valgrind --soname-synonyms=somalloc=*mimalloc* --
+```
+
+By setting the `MIMALLOC_SHOW_STATS` environment variable you can check that mimalloc is indeed
+used and not the standard allocator. Even though the [Valgrind option][valgrind-soname]
+is called `--soname-synonyms`, this also
+works when overriding with a static library or object file. Unfortunately, it is not possible to
+dynamically override mimalloc using `LD_PRELOAD` together with `valgrind`.
+See also the `test/test-wrong.c` file to test with `valgrind`.
+
+Valgrind support is in its initial development -- please report any issues.
+
+[Valgrind]: https://valgrind.org/
+[valgrind-soname]: https://valgrind.org/docs/manual/manual-core.html#opt.soname-synonyms
+
+### ASAN
+
+To build with the address sanitizer, use the `-DMI_TRACK_ASAN=ON` cmake option:
+
+```
+> cmake ../.. -DMI_TRACK_ASAN=ON
+```
+
+This can also be combined with secure mode or debug mode.
+You can then run your programs as:'
+
+```
+> ASAN_OPTIONS=verbosity=1
+```
+
+When you link a program with an address sanitizer build of mimalloc, you should
+generally compile that program too with the address sanitizer enabled.
+For example, assuming you build mimalloc in `out/debug`:
+
+```
+clang -g -o test-wrong -Iinclude test/test-wrong.c out/debug/libmimalloc-asan-debug.a -lpthread -fsanitize=address -fsanitize-recover=address
+```
+
+Since the address sanitizer redirects the standard allocation functions, on some platforms (macOSX for example)
+it is required to compile mimalloc with `-DMI_OVERRIDE=OFF`.
+Adress sanitizer support is in its initial development -- please report any issues.
+
+[asan]: https://github.com/google/sanitizers/wiki/AddressSanitizer
+
+### ETW
+
+Event tracing for Windows ([ETW]) provides a high performance way to capture all allocations though
+mimalloc and analyze them later. To build with ETW support, use the `-DMI_TRACK_ETW=ON` cmake option.
+
+You can then capture an allocation trace using the Windows performance recorder (WPR), using the
+`src/prim/windows/etw-mimalloc.wprp` profile. In an admin prompt, you can use:
+```
+> wpr -start src\prim\windows\etw-mimalloc.wprp -filemode
+>
+> wpr -stop .etl
+```
+and then open `.etl` in the Windows Performance Analyzer (WPA), or
+use a tool like [TraceControl] that is specialized for analyzing mimalloc traces.
+
+[ETW]: https://learn.microsoft.com/en-us/windows-hardware/test/wpt/event-tracing-for-windows
+[TraceControl]: https://github.com/xinglonghe/TraceControl
+
+
# Performance
-Last update: 2020-01-20
+Last update: 2021-01-30
We tested _mimalloc_ against many other top allocators over a wide
range of benchmarks, ranging from various real world programs to
@@ -420,10 +547,15 @@ suffering from (too much) underperformance in less common situations.
As always, interpret these results with care since some benchmarks test synthetic
or uncommon situations that may never apply to your workloads. For example, most
-allocators do not do well on `xmalloc-testN` but that includes the best
+allocators do not do well on `xmalloc-testN` but that includes even the best
industrial allocators like _jemalloc_ and _tcmalloc_ that are used in some of
the world's largest systems (like Chrome or FreeBSD).
+Also, the benchmarks here do not measure the behaviour on very large and long-running server workloads,
+or worst-case latencies of allocation. Much work has gone into `mimalloc` to work well on such
+workloads (for example, to reduce virtual memory fragmentation on long-running services)
+but such optimizations are not always reflected in the current benchmark suite.
+
We show here only an overview -- for
more specific details and further benchmarks we refer to the
[technical report](https://www.microsoft.com/en-us/research/publication/mimalloc-free-list-sharding-in-action).
@@ -431,27 +563,28 @@ The benchmark suite is automated and available separately
as [mimalloc-bench](https://github.com/daanx/mimalloc-bench).
-## Benchmark Results on 36-core Intel
+## Benchmark Results on a 16-core AMD 5950x (Zen3)
-Testing on a big Amazon EC2 compute instance
-([c5.18xlarge](https://aws.amazon.com/ec2/instance-types/#Compute_Optimized))
-consisting of a 72 processor Intel Xeon at 3GHz
-with 144GiB ECC memory, running Ubuntu 18.04.1 with glibc 2.27 and GCC 7.4.0.
-The measured allocators are _mimalloc_ (xmi, tag:v1.4.0, page reset enabled)
-and its secure build as _smi_,
-Google's [_tcmalloc_](https://github.com/gperftools/gperftools) (tc, tag:gperftools-2.7) used in Chrome,
-Facebook's [_jemalloc_](https://github.com/jemalloc/jemalloc) (je, tag:5.2.1) by Jason Evans used in Firefox and FreeBSD,
-the Intel thread building blocks [allocator](https://github.com/intel/tbb) (tbb, tag:2020),
-[rpmalloc](https://github.com/mjansson/rpmalloc) (rp,tag:1.4.0) by Mattias Jansson,
-the original scalable [_Hoard_](https://github.com/emeryberger/Hoard) (tag:3.13) allocator by Emery Berger \[1],
-the memory compacting [_Mesh_](https://github.com/plasma-umass/Mesh) (git:51222e7) allocator by
+Testing on the 16-core AMD 5950x processor at 3.4Ghz (4.9Ghz boost), with
+with 32GiB memory at 3600Mhz, running Ubuntu 20.04 with glibc 2.31 and GCC 9.3.0.
+
+We measure three versions of _mimalloc_: the main version `mi` (tag:v1.7.0),
+the new v2.0 beta version as `xmi` (tag:v2.0.0), and the main version in secure mode as `smi` (tag:v1.7.0).
+
+The other allocators are
+Google's [_tcmalloc_](https://github.com/gperftools/gperftools) (`tc`, tag:gperftools-2.8.1) used in Chrome,
+Facebook's [_jemalloc_](https://github.com/jemalloc/jemalloc) (`je`, tag:5.2.1) by Jason Evans used in Firefox and FreeBSD,
+the Intel thread building blocks [allocator](https://github.com/intel/tbb) (`tbb`, tag:v2020.3),
+[rpmalloc](https://github.com/mjansson/rpmalloc) (`rp`,tag:1.4.1) by Mattias Jansson,
+the original scalable [_Hoard_](https://github.com/emeryberger/Hoard) (git:d880f72) allocator by Emery Berger \[1],
+the memory compacting [_Mesh_](https://github.com/plasma-umass/Mesh) (git:67ff31a) allocator by
Bobby Powers _et al_ \[8],
-and finally the default system allocator (glibc, 2.27) (based on _PtMalloc2_).
+and finally the default system allocator (`glibc`, 2.31) (based on _PtMalloc2_).
-
-
+
+
-Any benchmarks ending in `N` run on all processors in parallel.
+Any benchmarks ending in `N` run on all 32 logical cores in parallel.
Results are averaged over 10 runs and reported relative
to mimalloc (where 1.2 means it took 1.2× longer to run).
The legend also contains the _overall relative score_ between the
@@ -466,18 +599,17 @@ _jemalloc_.
The _leanN_ program is interesting as a large realistic and
concurrent workload of the [Lean](https://github.com/leanprover/lean)
-theorem prover compiling its own standard library, and there is a 7%
+theorem prover compiling its own standard library, and there is a 13%
speedup over _tcmalloc_. This is
quite significant: if Lean spends 20% of its time in the
-allocator that means that _mimalloc_ is 1.3× faster than _tcmalloc_
+allocator that means that _mimalloc_ is 1.6× faster than _tcmalloc_
here. (This is surprising as that is not measured in a pure
allocation benchmark like _alloc-test_. We conjecture that we see this
outsized improvement here because _mimalloc_ has better locality in
the allocation which improves performance for the *other* computations
in a program as well).
-The single threaded _redis_ benchmark again show that most allocators do well on such workloads where _tcmalloc_
-did best this time.
+The single threaded _redis_ benchmark again show that most allocators do well on such workloads.
The _larsonN_ server benchmark by Larson and Krishnan \[2] allocates and frees between threads. They observed this
behavior (which they call _bleeding_) in actual server applications, and the benchmark simulates this.
@@ -501,14 +633,12 @@ The _alloc-test_, by
[OLogN Technologies AG](http://ithare.com/testing-memory-allocators-ptmalloc2-tcmalloc-hoard-jemalloc-while-trying-to-simulate-real-world-loads/), is a very allocation intensive benchmark doing millions of
allocations in various size classes. The test is scaled such that when an
allocator performs almost identically on _alloc-test1_ as _alloc-testN_ it
-means that it scales linearly. Here, _tcmalloc_, and
-_Hoard_ seem to scale less well and do more than 10% worse on the multi-core version. Even the best industrial
-allocators (_tcmalloc_, _jemalloc_, and _tbb_) are more than 10% slower as _mimalloc_ here.
+means that it scales linearly.
The _sh6bench_ and _sh8bench_ benchmarks are
developed by [MicroQuill](http://www.microquill.com/) as part of SmartHeap.
In _sh6bench_ _mimalloc_ does much
-better than the others (more than 1.5× faster than _jemalloc_).
+better than the others (more than 2.5× faster than _jemalloc_).
We cannot explain this well but believe it is
caused in part by the "reverse" free-ing pattern in _sh6bench_.
The _sh8bench_ is a variation with object migration
@@ -518,7 +648,7 @@ The _xmalloc-testN_ benchmark by Lever and Boreham \[5] and Christian Eder, simu
some threads only allocate, and others only free -- they observed this pattern in
larger server applications. Here we see that
the _mimalloc_ technique of having non-contended sharded thread free
-lists pays off as it outperforms others by a very large margin. Only _rpmalloc_ and _tbb_ also scale well on this benchmark.
+lists pays off as it outperforms others by a very large margin. Only _rpmalloc_, _tbb_, and _glibc_ also scale well on this benchmark.
The _cache-scratch_ benchmark by Emery Berger \[1], and introduced with
the Hoard allocator to test for _passive-false_ sharing of cache lines.
@@ -532,16 +662,20 @@ cache line sharing completely, while _Hoard_ and _glibc_ seem to mitigate
the effects. Kukanov and Voss \[7] describe in detail
how the design of _tbb_ avoids the false cache line sharing.
-## On 24-core AMD Epyc
-For completeness, here are the results on a
-[r5a.12xlarge](https://aws.amazon.com/ec2/instance-types/#Memory_Optimized) instance
-having a 48 processor AMD Epyc 7000 at 2.5GHz with 384GiB of memory.
-The results are similar to the Intel results but it is interesting to
+## On a 36-core Intel Xeon
+
+For completeness, here are the results on a big Amazon
+[c5.18xlarge](https://aws.amazon.com/ec2/instance-types/#Compute_Optimized) instance
+consisting of a 2×18-core Intel Xeon (Cascade Lake) at 3.4GHz (boost 3.5GHz)
+with 144GiB ECC memory, running Ubuntu 20.04 with glibc 2.31, GCC 9.3.0, and
+Clang 10.0.0. This time, the mimalloc allocators (mi, xmi, and smi) were
+compiled with the Clang compiler instead of GCC.
+The results are similar to the AMD results but it is interesting to
see the differences in the _larsonN_, _mstressN_, and _xmalloc-testN_ benchmarks.
-
-
+
+
## Peak Working Set
@@ -549,14 +683,59 @@ see the differences in the _larsonN_, _mstressN_, and _xmalloc-testN_ benchmarks
The following figure shows the peak working set (rss) of the allocators
on the benchmarks (on the c5.18xlarge instance).
-
-
+
+
Note that the _xmalloc-testN_ memory usage should be disregarded as it
allocates more the faster the program runs. Similarly, memory usage of
-_mstressN_, _rptestN_ and _sh8bench_ can vary depending on scheduling and
-speed. Nevertheless, even though _mimalloc_ is fast on these benchmarks we
-believe the memory usage is too high and hope to improve.
+_larsonN_, _mstressN_, _rptestN_ and _sh8bench_ can vary depending on scheduling and
+speed. Nevertheless, we hope to improve the memory usage on _mstressN_
+and _rptestN_ (just as _cfrac_, _larsonN_ and _sh8bench_ have a small working set which skews the results).
+
+
# References
@@ -596,7 +775,6 @@ believe the memory usage is too high and hope to improve.
In Proceedings of the 2019 ACM SIGPLAN International Symposium on Memory Management, 122–135. ACM. 2019.
-->
-
# Contributing
This project welcomes contributions and suggestions. Most contributions require you to agree to a
@@ -606,3 +784,44 @@ the rights to use your contribution. For details, visit https://cla.microsoft.co
When you submit a pull request, a CLA-bot will automatically determine whether you need to provide
a CLA and decorate the PR appropriately (e.g., label, comment). Simply follow the instructions
provided by the bot. You will only need to do this once across all repos using our CLA.
+
+
+# Older Release Notes
+
+* 2021-11-14, `v1.7.3`, `v2.0.3` (beta): improved WASM support, improved macOS support and performance (including
+ M1), improved performance for v2 for large objects, Python integration improvements, more standard
+ installation directories, various small fixes.
+* 2021-06-17, `v1.7.2`, `v2.0.2` (beta): support M1, better installation layout on Linux, fix
+ thread_id on Android, prefer 2-6TiB area for aligned allocation to work better on pre-windows 8, various small fixes.
+* 2021-04-06, `v1.7.1`, `v2.0.1` (beta): fix bug in arena allocation for huge pages, improved aslr on large allocations, initial M1 support (still experimental).
+* 2021-01-31, `v2.0.0`: beta release 2.0: new slice algorithm for managing internal mimalloc pages.
+* 2021-01-31, `v1.7.0`: stable release 1.7: support explicit user provided memory regions, more precise statistics,
+ improve macOS overriding, initial support for Apple M1, improved DragonFly support, faster memcpy on Windows, various small fixes.
+
+* 2020-09-24, `v1.6.7`: stable release 1.6: using standard C atomics, passing tsan testing, improved
+ handling of failing to commit on Windows, add [`mi_process_info`](https://github.com/microsoft/mimalloc/blob/master/include/mimalloc.h#L156) api call.
+* 2020-08-06, `v1.6.4`: stable release 1.6: improved error recovery in low-memory situations,
+ support for IllumOS and Haiku, NUMA support for Vista/XP, improved NUMA detection for AMD Ryzen, ubsan support.
+* 2020-05-05, `v1.6.3`: stable release 1.6: improved behavior in out-of-memory situations, improved malloc zones on macOS,
+ build PIC static libraries by default, add option to abort on out-of-memory, line buffered statistics.
+* 2020-04-20, `v1.6.2`: stable release 1.6: fix compilation on Android, MingW, Raspberry, and Conda,
+ stability fix for Windows 7, fix multiple mimalloc instances in one executable, fix `strnlen` overload,
+ fix aligned debug padding.
+* 2020-02-17, `v1.6.1`: stable release 1.6: minor updates (build with clang-cl, fix alignment issue for small objects).
+* 2020-02-09, `v1.6.0`: stable release 1.6: fixed potential memory leak, improved overriding
+ and thread local support on FreeBSD, NetBSD, DragonFly, and macOSX. New byte-precise
+ heap block overflow detection in debug mode (besides the double-free detection and free-list
+ corruption detection). Add `nodiscard` attribute to most allocation functions.
+ Enable `MIMALLOC_PAGE_RESET` by default. New reclamation strategy for abandoned heap pages
+ for better memory footprint.
+* 2020-02-09, `v1.5.0`: stable release 1.5: improved free performance, small bug fixes.
+* 2020-01-22, `v1.4.0`: stable release 1.4: improved performance for delayed OS page reset,
+more eager concurrent free, addition of STL allocator, fixed potential memory leak.
+* 2020-01-15, `v1.3.0`: stable release 1.3: bug fixes, improved randomness and [stronger
+free list encoding](https://github.com/microsoft/mimalloc/blob/783e3377f79ee82af43a0793910a9f2d01ac7863/include/mimalloc-internal.h#L396) in secure mode.
+
+* 2019-12-22, `v1.2.2`: stable release 1.2: minor updates.
+* 2019-11-22, `v1.2.0`: stable release 1.2: bug fixes, improved secure mode (free list corruption checks, double free mitigation). Improved dynamic overriding on Windows.
+* 2019-10-07, `v1.1.0`: stable release 1.1.
+* 2019-09-01, `v1.0.8`: pre-release 8: more robust windows dynamic overriding, initial huge page support.
+* 2019-08-10, `v1.0.6`: pre-release 6: various performance improvements.
diff --git a/src/alloc-aligned.c b/src/alloc-aligned.c
index ca16d36..1cd809f 100644
--- a/src/alloc-aligned.c
+++ b/src/alloc-aligned.c
@@ -1,118 +1,218 @@
/* ----------------------------------------------------------------------------
-Copyright (c) 2018, Microsoft Research, Daan Leijen
+Copyright (c) 2018-2021, Microsoft Research, Daan Leijen
This is free software; you can redistribute it and/or modify it under the
terms of the MIT license. A copy of the license can be found in the file
"LICENSE" at the root of this distribution.
-----------------------------------------------------------------------------*/
#include "mimalloc.h"
-#include "mimalloc-internal.h"
+#include "mimalloc/internal.h"
+#include "mimalloc/prim.h" // mi_prim_get_default_heap
-#include // memset, memcpy
+#include // memset
// ------------------------------------------------------
// Aligned Allocation
// ------------------------------------------------------
-static void* mi_heap_malloc_zero_aligned_at(mi_heap_t* const heap, const size_t size, const size_t alignment, const size_t offset, const bool zero) mi_attr_noexcept {
- // note: we don't require `size > offset`, we just guarantee that
- // the address at offset is aligned regardless of the allocated size.
- mi_assert(alignment > 0);
- if (mi_unlikely(size > PTRDIFF_MAX)) return NULL; // we don't allocate more than PTRDIFF_MAX (see )
- if (mi_unlikely(alignment==0 || !_mi_is_power_of_two(alignment))) return NULL; // require power-of-two (see )
- const uintptr_t align_mask = alignment-1; // for any x, `(x & align_mask) == (x % alignment)`
-
- // try if there is a small block available with just the right alignment
+// Fallback primitive aligned allocation -- split out for better codegen
+static mi_decl_noinline void* mi_heap_malloc_zero_aligned_at_fallback(mi_heap_t* const heap, const size_t size, const size_t alignment, const size_t offset, const bool zero) mi_attr_noexcept
+{
+ mi_assert_internal(size <= PTRDIFF_MAX);
+ mi_assert_internal(alignment != 0 && _mi_is_power_of_two(alignment));
+
+ const uintptr_t align_mask = alignment - 1; // for any x, `(x & align_mask) == (x % alignment)`
const size_t padsize = size + MI_PADDING_SIZE;
- if (mi_likely(padsize <= MI_SMALL_SIZE_MAX)) {
- mi_page_t* page = _mi_heap_get_free_small_page(heap,padsize);
- const bool is_aligned = (((uintptr_t)page->free+offset) & align_mask)==0;
- if (mi_likely(page->free != NULL && is_aligned))
- {
- #if MI_STAT>1
- mi_heap_stat_increase( heap, malloc, size);
- #endif
- void* p = _mi_page_malloc(heap,page,padsize); // TODO: inline _mi_page_malloc
- mi_assert_internal(p != NULL);
- mi_assert_internal(((uintptr_t)p + offset) % alignment == 0);
- if (zero) _mi_block_zero_init(page,p,size);
- return p;
- }
- }
// use regular allocation if it is guaranteed to fit the alignment constraints
- if (offset==0 && alignment<=padsize && padsize<=MI_MEDIUM_OBJ_SIZE_MAX && (padsize&align_mask)==0) {
+ if (offset==0 && alignment<=padsize && padsize<=MI_MAX_ALIGN_GUARANTEE && (padsize&align_mask)==0) {
void* p = _mi_heap_malloc_zero(heap, size, zero);
mi_assert_internal(p == NULL || ((uintptr_t)p % alignment) == 0);
return p;
}
-
- // otherwise over-allocate
- void* p = _mi_heap_malloc_zero(heap, size + alignment - 1, zero);
- if (p == NULL) return NULL;
+
+ void* p;
+ size_t oversize;
+ if mi_unlikely(alignment > MI_ALIGNMENT_MAX) {
+ // use OS allocation for very large alignment and allocate inside a huge page (dedicated segment with 1 page)
+ // This can support alignments >= MI_SEGMENT_SIZE by ensuring the object can be aligned at a point in the
+ // first (and single) page such that the segment info is `MI_SEGMENT_SIZE` bytes before it (so it can be found by aligning the pointer down)
+ if mi_unlikely(offset != 0) {
+ // todo: cannot support offset alignment for very large alignments yet
+ #if MI_DEBUG > 0
+ _mi_error_message(EOVERFLOW, "aligned allocation with a very large alignment cannot be used with an alignment offset (size %zu, alignment %zu, offset %zu)\n", size, alignment, offset);
+ #endif
+ return NULL;
+ }
+ oversize = (size <= MI_SMALL_SIZE_MAX ? MI_SMALL_SIZE_MAX + 1 /* ensure we use generic malloc path */ : size);
+ p = _mi_heap_malloc_zero_ex(heap, oversize, false, alignment); // the page block size should be large enough to align in the single huge page block
+ // zero afterwards as only the area from the aligned_p may be committed!
+ if (p == NULL) return NULL;
+ }
+ else {
+ // otherwise over-allocate
+ oversize = size + alignment - 1;
+ p = _mi_heap_malloc_zero(heap, oversize, zero);
+ if (p == NULL) return NULL;
+ }
// .. and align within the allocation
- uintptr_t adjust = alignment - (((uintptr_t)p + offset) & align_mask);
- mi_assert_internal(adjust <= alignment);
- void* aligned_p = (adjust == alignment ? p : (void*)((uintptr_t)p + adjust));
- if (aligned_p != p) mi_page_set_has_aligned(_mi_ptr_page(p), true);
+ const uintptr_t poffset = ((uintptr_t)p + offset) & align_mask;
+ const uintptr_t adjust = (poffset == 0 ? 0 : alignment - poffset);
+ mi_assert_internal(adjust < alignment);
+ void* aligned_p = (void*)((uintptr_t)p + adjust);
+ if (aligned_p != p) {
+ mi_page_t* page = _mi_ptr_page(p);
+ mi_page_set_has_aligned(page, true);
+ _mi_padding_shrink(page, (mi_block_t*)p, adjust + size);
+ }
+ // todo: expand padding if overallocated ?
+
+ mi_assert_internal(mi_page_usable_block_size(_mi_ptr_page(p)) >= adjust + size);
+ mi_assert_internal(p == _mi_page_ptr_unalign(_mi_ptr_segment(aligned_p), _mi_ptr_page(aligned_p), aligned_p));
mi_assert_internal(((uintptr_t)aligned_p + offset) % alignment == 0);
- mi_assert_internal( p == _mi_page_ptr_unalign(_mi_ptr_segment(aligned_p),_mi_ptr_page(aligned_p),aligned_p) );
+ mi_assert_internal(mi_usable_size(aligned_p)>=size);
+ mi_assert_internal(mi_usable_size(p) == mi_usable_size(aligned_p)+adjust);
+
+ // now zero the block if needed
+ if (alignment > MI_ALIGNMENT_MAX) {
+ // for the tracker, on huge aligned allocations only from the start of the large block is defined
+ mi_track_mem_undefined(aligned_p, size);
+ if (zero) {
+ _mi_memzero_aligned(aligned_p, mi_usable_size(aligned_p));
+ }
+ }
+
+ if (p != aligned_p) {
+ mi_track_align(p,aligned_p,adjust,mi_usable_size(aligned_p));
+ }
return aligned_p;
}
+// Primitive aligned allocation
+static void* mi_heap_malloc_zero_aligned_at(mi_heap_t* const heap, const size_t size, const size_t alignment, const size_t offset, const bool zero) mi_attr_noexcept
+{
+ // note: we don't require `size > offset`, we just guarantee that the address at offset is aligned regardless of the allocated size.
+ if mi_unlikely(alignment == 0 || !_mi_is_power_of_two(alignment)) { // require power-of-two (see )
+ #if MI_DEBUG > 0
+ _mi_error_message(EOVERFLOW, "aligned allocation requires the alignment to be a power-of-two (size %zu, alignment %zu)\n", size, alignment);
+ #endif
+ return NULL;
+ }
-mi_decl_restrict void* mi_heap_malloc_aligned_at(mi_heap_t* heap, size_t size, size_t alignment, size_t offset) mi_attr_noexcept {
+ if mi_unlikely(size > PTRDIFF_MAX) { // we don't allocate more than PTRDIFF_MAX (see )
+ #if MI_DEBUG > 0
+ _mi_error_message(EOVERFLOW, "aligned allocation request is too large (size %zu, alignment %zu)\n", size, alignment);
+ #endif
+ return NULL;
+ }
+ const uintptr_t align_mask = alignment-1; // for any x, `(x & align_mask) == (x % alignment)`
+ const size_t padsize = size + MI_PADDING_SIZE; // note: cannot overflow due to earlier size > PTRDIFF_MAX check
+
+ // try first if there happens to be a small block available with just the right alignment
+ if mi_likely(padsize <= MI_SMALL_SIZE_MAX && alignment <= padsize) {
+ mi_page_t* page = _mi_heap_get_free_small_page(heap, padsize);
+ const bool is_aligned = (((uintptr_t)page->free+offset) & align_mask)==0;
+ if mi_likely(page->free != NULL && is_aligned)
+ {
+ #if MI_STAT>1
+ mi_heap_stat_increase(heap, malloc, size);
+ #endif
+ void* p = _mi_page_malloc(heap, page, padsize, zero); // TODO: inline _mi_page_malloc
+ mi_assert_internal(p != NULL);
+ mi_assert_internal(((uintptr_t)p + offset) % alignment == 0);
+ mi_track_malloc(p,size,zero);
+ return p;
+ }
+ }
+ // fallback
+ return mi_heap_malloc_zero_aligned_at_fallback(heap, size, alignment, offset, zero);
+}
+
+
+// ------------------------------------------------------
+// Optimized mi_heap_malloc_aligned / mi_malloc_aligned
+// ------------------------------------------------------
+
+mi_decl_nodiscard mi_decl_restrict void* mi_heap_malloc_aligned_at(mi_heap_t* heap, size_t size, size_t alignment, size_t offset) mi_attr_noexcept {
return mi_heap_malloc_zero_aligned_at(heap, size, alignment, offset, false);
}
-mi_decl_restrict void* mi_heap_malloc_aligned(mi_heap_t* heap, size_t size, size_t alignment) mi_attr_noexcept {
- return mi_heap_malloc_aligned_at(heap, size, alignment, 0);
+mi_decl_nodiscard mi_decl_restrict void* mi_heap_malloc_aligned(mi_heap_t* heap, size_t size, size_t alignment) mi_attr_noexcept {
+ if mi_unlikely(alignment == 0 || !_mi_is_power_of_two(alignment)) return NULL;
+ #if !MI_PADDING
+ // without padding, any small sized allocation is naturally aligned (see also `_mi_segment_page_start`)
+ if mi_likely(_mi_is_power_of_two(size) && size >= alignment && size <= MI_SMALL_SIZE_MAX)
+ #else
+ // with padding, we can only guarantee this for fixed alignments
+ if mi_likely((alignment == sizeof(void*) || (alignment == MI_MAX_ALIGN_SIZE && size > (MI_MAX_ALIGN_SIZE/2)))
+ && size <= MI_SMALL_SIZE_MAX)
+ #endif
+ {
+ // fast path for common alignment and size
+ return mi_heap_malloc_small(heap, size);
+ }
+ else {
+ return mi_heap_malloc_aligned_at(heap, size, alignment, 0);
+ }
}
-mi_decl_restrict void* mi_heap_zalloc_aligned_at(mi_heap_t* heap, size_t size, size_t alignment, size_t offset) mi_attr_noexcept {
+// ensure a definition is emitted
+#if defined(__cplusplus)
+static void* _mi_heap_malloc_aligned = (void*)&mi_heap_malloc_aligned;
+#endif
+
+// ------------------------------------------------------
+// Aligned Allocation
+// ------------------------------------------------------
+
+mi_decl_nodiscard mi_decl_restrict void* mi_heap_zalloc_aligned_at(mi_heap_t* heap, size_t size, size_t alignment, size_t offset) mi_attr_noexcept {
return mi_heap_malloc_zero_aligned_at(heap, size, alignment, offset, true);
}
-mi_decl_restrict void* mi_heap_zalloc_aligned(mi_heap_t* heap, size_t size, size_t alignment) mi_attr_noexcept {
+mi_decl_nodiscard mi_decl_restrict void* mi_heap_zalloc_aligned(mi_heap_t* heap, size_t size, size_t alignment) mi_attr_noexcept {
return mi_heap_zalloc_aligned_at(heap, size, alignment, 0);
}
-mi_decl_restrict void* mi_heap_calloc_aligned_at(mi_heap_t* heap, size_t count, size_t size, size_t alignment, size_t offset) mi_attr_noexcept {
+mi_decl_nodiscard mi_decl_restrict void* mi_heap_calloc_aligned_at(mi_heap_t* heap, size_t count, size_t size, size_t alignment, size_t offset) mi_attr_noexcept {
size_t total;
if (mi_count_size_overflow(count, size, &total)) return NULL;
return mi_heap_zalloc_aligned_at(heap, total, alignment, offset);
}
-mi_decl_restrict void* mi_heap_calloc_aligned(mi_heap_t* heap, size_t count, size_t size, size_t alignment) mi_attr_noexcept {
+mi_decl_nodiscard mi_decl_restrict void* mi_heap_calloc_aligned(mi_heap_t* heap, size_t count, size_t size, size_t alignment) mi_attr_noexcept {
return mi_heap_calloc_aligned_at(heap,count,size,alignment,0);
}
-mi_decl_restrict void* mi_malloc_aligned_at(size_t size, size_t alignment, size_t offset) mi_attr_noexcept {
- return mi_heap_malloc_aligned_at(mi_get_default_heap(), size, alignment, offset);
+mi_decl_nodiscard mi_decl_restrict void* mi_malloc_aligned_at(size_t size, size_t alignment, size_t offset) mi_attr_noexcept {
+ return mi_heap_malloc_aligned_at(mi_prim_get_default_heap(), size, alignment, offset);
}
-mi_decl_restrict void* mi_malloc_aligned(size_t size, size_t alignment) mi_attr_noexcept {
- return mi_heap_malloc_aligned(mi_get_default_heap(), size, alignment);
+mi_decl_nodiscard mi_decl_restrict void* mi_malloc_aligned(size_t size, size_t alignment) mi_attr_noexcept {
+ return mi_heap_malloc_aligned(mi_prim_get_default_heap(), size, alignment);
}
-mi_decl_restrict void* mi_zalloc_aligned_at(size_t size, size_t alignment, size_t offset) mi_attr_noexcept {
- return mi_heap_zalloc_aligned_at(mi_get_default_heap(), size, alignment, offset);
+mi_decl_nodiscard mi_decl_restrict void* mi_zalloc_aligned_at(size_t size, size_t alignment, size_t offset) mi_attr_noexcept {
+ return mi_heap_zalloc_aligned_at(mi_prim_get_default_heap(), size, alignment, offset);
}
-mi_decl_restrict void* mi_zalloc_aligned(size_t size, size_t alignment) mi_attr_noexcept {
- return mi_heap_zalloc_aligned(mi_get_default_heap(), size, alignment);
+mi_decl_nodiscard mi_decl_restrict void* mi_zalloc_aligned(size_t size, size_t alignment) mi_attr_noexcept {
+ return mi_heap_zalloc_aligned(mi_prim_get_default_heap(), size, alignment);
}
-mi_decl_restrict void* mi_calloc_aligned_at(size_t count, size_t size, size_t alignment, size_t offset) mi_attr_noexcept {
- return mi_heap_calloc_aligned_at(mi_get_default_heap(), count, size, alignment, offset);
+mi_decl_nodiscard mi_decl_restrict void* mi_calloc_aligned_at(size_t count, size_t size, size_t alignment, size_t offset) mi_attr_noexcept {
+ return mi_heap_calloc_aligned_at(mi_prim_get_default_heap(), count, size, alignment, offset);
}
-mi_decl_restrict void* mi_calloc_aligned(size_t count, size_t size, size_t alignment) mi_attr_noexcept {
- return mi_heap_calloc_aligned(mi_get_default_heap(), count, size, alignment);
+mi_decl_nodiscard mi_decl_restrict void* mi_calloc_aligned(size_t count, size_t size, size_t alignment) mi_attr_noexcept {
+ return mi_heap_calloc_aligned(mi_prim_get_default_heap(), count, size, alignment);
}
+// ------------------------------------------------------
+// Aligned re-allocation
+// ------------------------------------------------------
+
static void* mi_heap_realloc_zero_aligned_at(mi_heap_t* heap, void* p, size_t newsize, size_t alignment, size_t offset, bool zero) mi_attr_noexcept {
mi_assert(alignment > 0);
if (alignment <= sizeof(uintptr_t)) return _mi_heap_realloc_zero(heap,p,newsize,zero);
@@ -123,21 +223,15 @@ static void* mi_heap_realloc_zero_aligned_at(mi_heap_t* heap, void* p, size_t ne
return p; // reallocation still fits, is aligned and not more than 50% waste
}
else {
+ // note: we don't zero allocate upfront so we only zero initialize the expanded part
void* newp = mi_heap_malloc_aligned_at(heap,newsize,alignment,offset);
if (newp != NULL) {
if (zero && newsize > size) {
- const mi_page_t* page = _mi_ptr_page(newp);
- if (page->is_zero) {
- // already zero initialized
- mi_assert_expensive(mi_mem_is_zero(newp,newsize));
- }
- else {
- // also set last word in the previous allocation to zero to ensure any padding is zero-initialized
- size_t start = (size >= sizeof(intptr_t) ? size - sizeof(intptr_t) : 0);
- memset((uint8_t*)newp + start, 0, newsize - start);
- }
+ // also set last word in the previous allocation to zero to ensure any padding is zero-initialized
+ size_t start = (size >= sizeof(intptr_t) ? size - sizeof(intptr_t) : 0);
+ _mi_memzero((uint8_t*)newp + start, newsize - start);
}
- memcpy(newp, p, (newsize > size ? size : newsize));
+ _mi_memcpy_aligned(newp, p, (newsize > size ? size : newsize));
mi_free(p); // only free if successful
}
return newp;
@@ -151,55 +245,54 @@ static void* mi_heap_realloc_zero_aligned(mi_heap_t* heap, void* p, size_t newsi
return mi_heap_realloc_zero_aligned_at(heap,p,newsize,alignment,offset,zero);
}
-void* mi_heap_realloc_aligned_at(mi_heap_t* heap, void* p, size_t newsize, size_t alignment, size_t offset) mi_attr_noexcept {
+mi_decl_nodiscard void* mi_heap_realloc_aligned_at(mi_heap_t* heap, void* p, size_t newsize, size_t alignment, size_t offset) mi_attr_noexcept {
return mi_heap_realloc_zero_aligned_at(heap,p,newsize,alignment,offset,false);
}
-void* mi_heap_realloc_aligned(mi_heap_t* heap, void* p, size_t newsize, size_t alignment) mi_attr_noexcept {
+mi_decl_nodiscard void* mi_heap_realloc_aligned(mi_heap_t* heap, void* p, size_t newsize, size_t alignment) mi_attr_noexcept {
return mi_heap_realloc_zero_aligned(heap,p,newsize,alignment,false);
}
-void* mi_heap_rezalloc_aligned_at(mi_heap_t* heap, void* p, size_t newsize, size_t alignment, size_t offset) mi_attr_noexcept {
+mi_decl_nodiscard void* mi_heap_rezalloc_aligned_at(mi_heap_t* heap, void* p, size_t newsize, size_t alignment, size_t offset) mi_attr_noexcept {
return mi_heap_realloc_zero_aligned_at(heap, p, newsize, alignment, offset, true);
}
-void* mi_heap_rezalloc_aligned(mi_heap_t* heap, void* p, size_t newsize, size_t alignment) mi_attr_noexcept {
+mi_decl_nodiscard void* mi_heap_rezalloc_aligned(mi_heap_t* heap, void* p, size_t newsize, size_t alignment) mi_attr_noexcept {
return mi_heap_realloc_zero_aligned(heap, p, newsize, alignment, true);
}
-void* mi_heap_recalloc_aligned_at(mi_heap_t* heap, void* p, size_t newcount, size_t size, size_t alignment, size_t offset) mi_attr_noexcept {
+mi_decl_nodiscard void* mi_heap_recalloc_aligned_at(mi_heap_t* heap, void* p, size_t newcount, size_t size, size_t alignment, size_t offset) mi_attr_noexcept {
size_t total;
if (mi_count_size_overflow(newcount, size, &total)) return NULL;
return mi_heap_rezalloc_aligned_at(heap, p, total, alignment, offset);
}
-void* mi_heap_recalloc_aligned(mi_heap_t* heap, void* p, size_t newcount, size_t size, size_t alignment) mi_attr_noexcept {
+mi_decl_nodiscard void* mi_heap_recalloc_aligned(mi_heap_t* heap, void* p, size_t newcount, size_t size, size_t alignment) mi_attr_noexcept {
size_t total;
if (mi_count_size_overflow(newcount, size, &total)) return NULL;
return mi_heap_rezalloc_aligned(heap, p, total, alignment);
}
-void* mi_realloc_aligned_at(void* p, size_t newsize, size_t alignment, size_t offset) mi_attr_noexcept {
- return mi_heap_realloc_aligned_at(mi_get_default_heap(), p, newsize, alignment, offset);
+mi_decl_nodiscard void* mi_realloc_aligned_at(void* p, size_t newsize, size_t alignment, size_t offset) mi_attr_noexcept {
+ return mi_heap_realloc_aligned_at(mi_prim_get_default_heap(), p, newsize, alignment, offset);
}
-void* mi_realloc_aligned(void* p, size_t newsize, size_t alignment) mi_attr_noexcept {
- return mi_heap_realloc_aligned(mi_get_default_heap(), p, newsize, alignment);
+mi_decl_nodiscard void* mi_realloc_aligned(void* p, size_t newsize, size_t alignment) mi_attr_noexcept {
+ return mi_heap_realloc_aligned(mi_prim_get_default_heap(), p, newsize, alignment);
}
-void* mi_rezalloc_aligned_at(void* p, size_t newsize, size_t alignment, size_t offset) mi_attr_noexcept {
- return mi_heap_rezalloc_aligned_at(mi_get_default_heap(), p, newsize, alignment, offset);
+mi_decl_nodiscard void* mi_rezalloc_aligned_at(void* p, size_t newsize, size_t alignment, size_t offset) mi_attr_noexcept {
+ return mi_heap_rezalloc_aligned_at(mi_prim_get_default_heap(), p, newsize, alignment, offset);
}
-void* mi_rezalloc_aligned(void* p, size_t newsize, size_t alignment) mi_attr_noexcept {
- return mi_heap_rezalloc_aligned(mi_get_default_heap(), p, newsize, alignment);
+mi_decl_nodiscard void* mi_rezalloc_aligned(void* p, size_t newsize, size_t alignment) mi_attr_noexcept {
+ return mi_heap_rezalloc_aligned(mi_prim_get_default_heap(), p, newsize, alignment);
}
-void* mi_recalloc_aligned_at(void* p, size_t newcount, size_t size, size_t alignment, size_t offset) mi_attr_noexcept {
- return mi_heap_recalloc_aligned_at(mi_get_default_heap(), p, newcount, size, alignment, offset);
+mi_decl_nodiscard void* mi_recalloc_aligned_at(void* p, size_t newcount, size_t size, size_t alignment, size_t offset) mi_attr_noexcept {
+ return mi_heap_recalloc_aligned_at(mi_prim_get_default_heap(), p, newcount, size, alignment, offset);
}
-void* mi_recalloc_aligned(void* p, size_t newcount, size_t size, size_t alignment) mi_attr_noexcept {
- return mi_heap_recalloc_aligned(mi_get_default_heap(), p, newcount, size, alignment);
+mi_decl_nodiscard void* mi_recalloc_aligned(void* p, size_t newcount, size_t size, size_t alignment) mi_attr_noexcept {
+ return mi_heap_recalloc_aligned(mi_prim_get_default_heap(), p, newcount, size, alignment);
}
-
diff --git a/src/alloc-override.c b/src/alloc-override.c
new file mode 100644
index 0000000..873065d
--- /dev/null
+++ b/src/alloc-override.c
@@ -0,0 +1,297 @@
+/* ----------------------------------------------------------------------------
+Copyright (c) 2018-2021, Microsoft Research, Daan Leijen
+This is free software; you can redistribute it and/or modify it under the
+terms of the MIT license. A copy of the license can be found in the file
+"LICENSE" at the root of this distribution.
+-----------------------------------------------------------------------------*/
+
+#if !defined(MI_IN_ALLOC_C)
+#error "this file should be included from 'alloc.c' (so aliases can work)"
+#endif
+
+#if defined(MI_MALLOC_OVERRIDE) && defined(_WIN32) && !(defined(MI_SHARED_LIB) && defined(_DLL))
+#error "It is only possible to override "malloc" on Windows when building as a DLL (and linking the C runtime as a DLL)"
+#endif
+
+#if defined(MI_MALLOC_OVERRIDE) && !(defined(_WIN32))
+
+#if defined(__APPLE__)
+#include
+mi_decl_externc void vfree(void* p);
+mi_decl_externc size_t malloc_size(const void* p);
+mi_decl_externc size_t malloc_good_size(size_t size);
+#endif
+
+// helper definition for C override of C++ new
+typedef struct mi_nothrow_s { int _tag; } mi_nothrow_t;
+
+// ------------------------------------------------------
+// Override system malloc
+// ------------------------------------------------------
+
+#if (defined(__GNUC__) || defined(__clang__)) && !defined(__APPLE__) && !MI_TRACK_ENABLED
+ // gcc, clang: use aliasing to alias the exported function to one of our `mi_` functions
+ #if (defined(__GNUC__) && __GNUC__ >= 9)
+ #pragma GCC diagnostic ignored "-Wattributes" // or we get warnings that nodiscard is ignored on a forward
+ #define MI_FORWARD(fun) __attribute__((alias(#fun), used, visibility("default"), copy(fun)));
+ #else
+ #define MI_FORWARD(fun) __attribute__((alias(#fun), used, visibility("default")));
+ #endif
+ #define MI_FORWARD1(fun,x) MI_FORWARD(fun)
+ #define MI_FORWARD2(fun,x,y) MI_FORWARD(fun)
+ #define MI_FORWARD3(fun,x,y,z) MI_FORWARD(fun)
+ #define MI_FORWARD0(fun,x) MI_FORWARD(fun)
+ #define MI_FORWARD02(fun,x,y) MI_FORWARD(fun)
+#else
+ // otherwise use forwarding by calling our `mi_` function
+ #define MI_FORWARD1(fun,x) { return fun(x); }
+ #define MI_FORWARD2(fun,x,y) { return fun(x,y); }
+ #define MI_FORWARD3(fun,x,y,z) { return fun(x,y,z); }
+ #define MI_FORWARD0(fun,x) { fun(x); }
+ #define MI_FORWARD02(fun,x,y) { fun(x,y); }
+#endif
+
+
+#if defined(__APPLE__) && defined(MI_SHARED_LIB_EXPORT) && defined(MI_OSX_INTERPOSE)
+ // define MI_OSX_IS_INTERPOSED as we should not provide forwarding definitions for
+ // functions that are interposed (or the interposing does not work)
+ #define MI_OSX_IS_INTERPOSED
+
+ mi_decl_externc size_t mi_malloc_size_checked(void *p) {
+ if (!mi_is_in_heap_region(p)) return 0;
+ return mi_usable_size(p);
+ }
+
+ // use interposing so `DYLD_INSERT_LIBRARIES` works without `DYLD_FORCE_FLAT_NAMESPACE=1`
+ // See:
+ struct mi_interpose_s {
+ const void* replacement;
+ const void* target;
+ };
+ #define MI_INTERPOSE_FUN(oldfun,newfun) { (const void*)&newfun, (const void*)&oldfun }
+ #define MI_INTERPOSE_MI(fun) MI_INTERPOSE_FUN(fun,mi_##fun)
+
+ __attribute__((used)) static struct mi_interpose_s _mi_interposes[] __attribute__((section("__DATA, __interpose"))) =
+ {
+ MI_INTERPOSE_MI(malloc),
+ MI_INTERPOSE_MI(calloc),
+ MI_INTERPOSE_MI(realloc),
+ MI_INTERPOSE_MI(strdup),
+ MI_INTERPOSE_MI(strndup),
+ MI_INTERPOSE_MI(realpath),
+ MI_INTERPOSE_MI(posix_memalign),
+ MI_INTERPOSE_MI(reallocf),
+ MI_INTERPOSE_MI(valloc),
+ MI_INTERPOSE_FUN(malloc_size,mi_malloc_size_checked),
+ MI_INTERPOSE_MI(malloc_good_size),
+ #if defined(MAC_OS_X_VERSION_10_15) && MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_15
+ MI_INTERPOSE_MI(aligned_alloc),
+ #endif
+ #ifdef MI_OSX_ZONE
+ // we interpose malloc_default_zone in alloc-override-osx.c so we can use mi_free safely
+ MI_INTERPOSE_MI(free),
+ MI_INTERPOSE_FUN(vfree,mi_free),
+ #else
+ // sometimes code allocates from default zone but deallocates using plain free :-( (like NxHashResizeToCapacity )
+ MI_INTERPOSE_FUN(free,mi_cfree), // use safe free that checks if pointers are from us
+ MI_INTERPOSE_FUN(vfree,mi_cfree),
+ #endif
+ };
+
+ #ifdef __cplusplus
+ extern "C" {
+ #endif
+ void _ZdlPv(void* p); // delete
+ void _ZdaPv(void* p); // delete[]
+ void _ZdlPvm(void* p, size_t n); // delete
+ void _ZdaPvm(void* p, size_t n); // delete[]
+ void* _Znwm(size_t n); // new
+ void* _Znam(size_t n); // new[]
+ void* _ZnwmRKSt9nothrow_t(size_t n, mi_nothrow_t tag); // new nothrow
+ void* _ZnamRKSt9nothrow_t(size_t n, mi_nothrow_t tag); // new[] nothrow
+ #ifdef __cplusplus
+ }
+ #endif
+ __attribute__((used)) static struct mi_interpose_s _mi_cxx_interposes[] __attribute__((section("__DATA, __interpose"))) =
+ {
+ MI_INTERPOSE_FUN(_ZdlPv,mi_free),
+ MI_INTERPOSE_FUN(_ZdaPv,mi_free),
+ MI_INTERPOSE_FUN(_ZdlPvm,mi_free_size),
+ MI_INTERPOSE_FUN(_ZdaPvm,mi_free_size),
+ MI_INTERPOSE_FUN(_Znwm,mi_new),
+ MI_INTERPOSE_FUN(_Znam,mi_new),
+ MI_INTERPOSE_FUN(_ZnwmRKSt9nothrow_t,mi_new_nothrow),
+ MI_INTERPOSE_FUN(_ZnamRKSt9nothrow_t,mi_new_nothrow),
+ };
+
+#elif defined(_MSC_VER)
+ // cannot override malloc unless using a dll.
+ // we just override new/delete which does work in a static library.
+#else
+ // On all other systems forward to our API
+ mi_decl_export void* malloc(size_t size) MI_FORWARD1(mi_malloc, size)
+ mi_decl_export void* calloc(size_t size, size_t n) MI_FORWARD2(mi_calloc, size, n)
+ mi_decl_export void* realloc(void* p, size_t newsize) MI_FORWARD2(mi_realloc, p, newsize)
+ mi_decl_export void free(void* p) MI_FORWARD0(mi_free, p)
+#endif
+
+#if (defined(__GNUC__) || defined(__clang__)) && !defined(__APPLE__)
+#pragma GCC visibility push(default)
+#endif
+
+// ------------------------------------------------------
+// Override new/delete
+// This is not really necessary as they usually call
+// malloc/free anyway, but it improves performance.
+// ------------------------------------------------------
+#ifdef __cplusplus
+ // ------------------------------------------------------
+ // With a C++ compiler we override the new/delete operators.
+ // see
+ // ------------------------------------------------------
+ #include
+
+ #ifndef MI_OSX_IS_INTERPOSED
+ void operator delete(void* p) noexcept MI_FORWARD0(mi_free,p)
+ void operator delete[](void* p) noexcept MI_FORWARD0(mi_free,p)
+
+ void* operator new(std::size_t n) noexcept(false) MI_FORWARD1(mi_new,n)
+ void* operator new[](std::size_t n) noexcept(false) MI_FORWARD1(mi_new,n)
+
+ void* operator new (std::size_t n, const std::nothrow_t& tag) noexcept { MI_UNUSED(tag); return mi_new_nothrow(n); }
+ void* operator new[](std::size_t n, const std::nothrow_t& tag) noexcept { MI_UNUSED(tag); return mi_new_nothrow(n); }
+
+ #if (__cplusplus >= 201402L || _MSC_VER >= 1916)
+ void operator delete (void* p, std::size_t n) noexcept MI_FORWARD02(mi_free_size,p,n)
+ void operator delete[](void* p, std::size_t n) noexcept MI_FORWARD02(mi_free_size,p,n)
+ #endif
+ #endif
+
+ #if (__cplusplus > 201402L && defined(__cpp_aligned_new)) && (!defined(__GNUC__) || (__GNUC__ > 5))
+ void operator delete (void* p, std::align_val_t al) noexcept { mi_free_aligned(p, static_cast(al)); }
+ void operator delete[](void* p, std::align_val_t al) noexcept { mi_free_aligned(p, static_cast(al)); }
+ void operator delete (void* p, std::size_t n, std::align_val_t al) noexcept { mi_free_size_aligned(p, n, static_cast(al)); };
+ void operator delete[](void* p, std::size_t n, std::align_val_t al) noexcept { mi_free_size_aligned(p, n, static_cast(al)); };
+ void operator delete (void* p, std::align_val_t al, const std::nothrow_t&) noexcept { mi_free_aligned(p, static_cast(al)); }
+ void operator delete[](void* p, std::align_val_t al, const std::nothrow_t&) noexcept { mi_free_aligned(p, static_cast(al)); }
+
+ void* operator new( std::size_t n, std::align_val_t al) noexcept(false) { return mi_new_aligned(n, static_cast(al)); }
+ void* operator new[]( std::size_t n, std::align_val_t al) noexcept(false) { return mi_new_aligned(n, static_cast(al)); }
+ void* operator new (std::size_t n, std::align_val_t al, const std::nothrow_t&) noexcept { return mi_new_aligned_nothrow(n, static_cast(al)); }
+ void* operator new[](std::size_t n, std::align_val_t al, const std::nothrow_t&) noexcept { return mi_new_aligned_nothrow(n, static_cast(al)); }
+ #endif
+
+#elif (defined(__GNUC__) || defined(__clang__))
+ // ------------------------------------------------------
+ // Override by defining the mangled C++ names of the operators (as
+ // used by GCC and CLang).
+ // See
+ // ------------------------------------------------------
+
+ void _ZdlPv(void* p) MI_FORWARD0(mi_free,p) // delete
+ void _ZdaPv(void* p) MI_FORWARD0(mi_free,p) // delete[]
+ void _ZdlPvm(void* p, size_t n) MI_FORWARD02(mi_free_size,p,n)
+ void _ZdaPvm(void* p, size_t n) MI_FORWARD02(mi_free_size,p,n)
+ void _ZdlPvSt11align_val_t(void* p, size_t al) { mi_free_aligned(p,al); }
+ void _ZdaPvSt11align_val_t(void* p, size_t al) { mi_free_aligned(p,al); }
+ void _ZdlPvmSt11align_val_t(void* p, size_t n, size_t al) { mi_free_size_aligned(p,n,al); }
+ void _ZdaPvmSt11align_val_t(void* p, size_t n, size_t al) { mi_free_size_aligned(p,n,al); }
+
+ #if (MI_INTPTR_SIZE==8)
+ void* _Znwm(size_t n) MI_FORWARD1(mi_new,n) // new 64-bit
+ void* _Znam(size_t n) MI_FORWARD1(mi_new,n) // new[] 64-bit
+ void* _ZnwmRKSt9nothrow_t(size_t n, mi_nothrow_t tag) { MI_UNUSED(tag); return mi_new_nothrow(n); }
+ void* _ZnamRKSt9nothrow_t(size_t n, mi_nothrow_t tag) { MI_UNUSED(tag); return mi_new_nothrow(n); }
+ void* _ZnwmSt11align_val_t(size_t n, size_t al) MI_FORWARD2(mi_new_aligned, n, al)
+ void* _ZnamSt11align_val_t(size_t n, size_t al) MI_FORWARD2(mi_new_aligned, n, al)
+ void* _ZnwmSt11align_val_tRKSt9nothrow_t(size_t n, size_t al, mi_nothrow_t tag) { MI_UNUSED(tag); return mi_new_aligned_nothrow(n,al); }
+ void* _ZnamSt11align_val_tRKSt9nothrow_t(size_t n, size_t al, mi_nothrow_t tag) { MI_UNUSED(tag); return mi_new_aligned_nothrow(n,al); }
+ #elif (MI_INTPTR_SIZE==4)
+ void* _Znwj(size_t n) MI_FORWARD1(mi_new,n) // new 64-bit
+ void* _Znaj(size_t n) MI_FORWARD1(mi_new,n) // new[] 64-bit
+ void* _ZnwjRKSt9nothrow_t(size_t n, mi_nothrow_t tag) { MI_UNUSED(tag); return mi_new_nothrow(n); }
+ void* _ZnajRKSt9nothrow_t(size_t n, mi_nothrow_t tag) { MI_UNUSED(tag); return mi_new_nothrow(n); }
+ void* _ZnwjSt11align_val_t(size_t n, size_t al) MI_FORWARD2(mi_new_aligned, n, al)
+ void* _ZnajSt11align_val_t(size_t n, size_t al) MI_FORWARD2(mi_new_aligned, n, al)
+ void* _ZnwjSt11align_val_tRKSt9nothrow_t(size_t n, size_t al, mi_nothrow_t tag) { MI_UNUSED(tag); return mi_new_aligned_nothrow(n,al); }
+ void* _ZnajSt11align_val_tRKSt9nothrow_t(size_t n, size_t al, mi_nothrow_t tag) { MI_UNUSED(tag); return mi_new_aligned_nothrow(n,al); }
+ #else
+ #error "define overloads for new/delete for this platform (just for performance, can be skipped)"
+ #endif
+#endif // __cplusplus
+
+// ------------------------------------------------------
+// Further Posix & Unix functions definitions
+// ------------------------------------------------------
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifndef MI_OSX_IS_INTERPOSED
+ // Forward Posix/Unix calls as well
+ void* reallocf(void* p, size_t newsize) MI_FORWARD2(mi_reallocf,p,newsize)
+ size_t malloc_size(const void* p) MI_FORWARD1(mi_usable_size,p)
+ #if !defined(__ANDROID__) && !defined(__FreeBSD__)
+ size_t malloc_usable_size(void *p) MI_FORWARD1(mi_usable_size,p)
+ #else
+ size_t malloc_usable_size(const void *p) MI_FORWARD1(mi_usable_size,p)
+ #endif
+
+ // No forwarding here due to aliasing/name mangling issues
+ void* valloc(size_t size) { return mi_valloc(size); }
+ void vfree(void* p) { mi_free(p); }
+ size_t malloc_good_size(size_t size) { return mi_malloc_good_size(size); }
+ int posix_memalign(void** p, size_t alignment, size_t size) { return mi_posix_memalign(p, alignment, size); }
+
+ // `aligned_alloc` is only available when __USE_ISOC11 is defined.
+ // Note: it seems __USE_ISOC11 is not defined in musl (and perhaps other libc's) so we only check
+ // for it if using glibc.
+ // Note: Conda has a custom glibc where `aligned_alloc` is declared `static inline` and we cannot
+ // override it, but both _ISOC11_SOURCE and __USE_ISOC11 are undefined in Conda GCC7 or GCC9.
+ // Fortunately, in the case where `aligned_alloc` is declared as `static inline` it
+ // uses internally `memalign`, `posix_memalign`, or `_aligned_malloc` so we can avoid overriding it ourselves.
+ #if !defined(__GLIBC__) || __USE_ISOC11
+ void* aligned_alloc(size_t alignment, size_t size) { return mi_aligned_alloc(alignment, size); }
+ #endif
+#endif
+
+// no forwarding here due to aliasing/name mangling issues
+void cfree(void* p) { mi_free(p); }
+void* pvalloc(size_t size) { return mi_pvalloc(size); }
+void* reallocarray(void* p, size_t count, size_t size) { return mi_reallocarray(p, count, size); }
+int reallocarr(void* p, size_t count, size_t size) { return mi_reallocarr(p, count, size); }
+void* memalign(size_t alignment, size_t size) { return mi_memalign(alignment, size); }
+void* _aligned_malloc(size_t alignment, size_t size) { return mi_aligned_alloc(alignment, size); }
+
+#if defined(__wasi__)
+ // forward __libc interface (see PR #667)
+ void* __libc_malloc(size_t size) MI_FORWARD1(mi_malloc, size)
+ void* __libc_calloc(size_t count, size_t size) MI_FORWARD2(mi_calloc, count, size)
+ void* __libc_realloc(void* p, size_t size) MI_FORWARD2(mi_realloc, p, size)
+ void __libc_free(void* p) MI_FORWARD0(mi_free, p)
+ void* __libc_memalign(size_t alignment, size_t size) { return mi_memalign(alignment, size); }
+
+#elif defined(__GLIBC__) && defined(__linux__)
+ // forward __libc interface (needed for glibc-based Linux distributions)
+ void* __libc_malloc(size_t size) MI_FORWARD1(mi_malloc,size)
+ void* __libc_calloc(size_t count, size_t size) MI_FORWARD2(mi_calloc,count,size)
+ void* __libc_realloc(void* p, size_t size) MI_FORWARD2(mi_realloc,p,size)
+ void __libc_free(void* p) MI_FORWARD0(mi_free,p)
+ void __libc_cfree(void* p) MI_FORWARD0(mi_free,p)
+
+ void* __libc_valloc(size_t size) { return mi_valloc(size); }
+ void* __libc_pvalloc(size_t size) { return mi_pvalloc(size); }
+ void* __libc_memalign(size_t alignment, size_t size) { return mi_memalign(alignment,size); }
+ int __posix_memalign(void** p, size_t alignment, size_t size) { return mi_posix_memalign(p,alignment,size); }
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#if (defined(__GNUC__) || defined(__clang__)) && !defined(__APPLE__)
+#pragma GCC visibility pop
+#endif
+
+#endif // MI_MALLOC_OVERRIDE && !_WIN32
diff --git a/src/alloc-posix.c b/src/alloc-posix.c
index 1ba1509..225752f 100644
--- a/src/alloc-posix.c
+++ b/src/alloc-posix.c
@@ -1,5 +1,5 @@
/* ----------------------------------------------------------------------------
-Copyright (c) 2018,2019, Microsoft Research, Daan Leijen
+Copyright (c) 2018-2021, Microsoft Research, Daan Leijen
This is free software; you can redistribute it and/or modify it under the
terms of the MIT license. A copy of the license can be found in the file
"LICENSE" at the root of this distribution.
@@ -10,14 +10,14 @@ terms of the MIT license. A copy of the license can be found in the file
// for convenience and used when overriding these functions.
// ------------------------------------------------------------------------
#include "mimalloc.h"
-#include "mimalloc-internal.h"
+#include "mimalloc/internal.h"
// ------------------------------------------------------
// Posix & Unix functions definitions
// ------------------------------------------------------
#include
-#include // memcpy
+#include // memset
#include // getenv
#ifdef _MSC_VER
@@ -32,14 +32,20 @@ terms of the MIT license. A copy of the license can be found in the file
#endif
-size_t mi_malloc_size(const void* p) mi_attr_noexcept {
+mi_decl_nodiscard size_t mi_malloc_size(const void* p) mi_attr_noexcept {
+ // if (!mi_is_in_heap_region(p)) return 0;
return mi_usable_size(p);
}
-size_t mi_malloc_usable_size(const void *p) mi_attr_noexcept {
+mi_decl_nodiscard size_t mi_malloc_usable_size(const void *p) mi_attr_noexcept {
+ // if (!mi_is_in_heap_region(p)) return 0;
return mi_usable_size(p);
}
+mi_decl_nodiscard size_t mi_malloc_good_size(size_t size) mi_attr_noexcept {
+ return mi_good_size(size);
+}
+
void mi_cfree(void* p) mi_attr_noexcept {
if (mi_is_in_heap_region(p)) {
mi_free(p);
@@ -50,65 +56,87 @@ int mi_posix_memalign(void** p, size_t alignment, size_t size) mi_attr_noexcept
// Note: The spec dictates we should not modify `*p` on an error. (issue#27)
//
if (p == NULL) return EINVAL;
- if (alignment % sizeof(void*) != 0) return EINVAL; // natural alignment
- if (!_mi_is_power_of_two(alignment)) return EINVAL; // not a power of 2
- void* q = (mi_malloc_satisfies_alignment(alignment, size) ? mi_malloc(size) : mi_malloc_aligned(size, alignment));
+ if ((alignment % sizeof(void*)) != 0) return EINVAL; // natural alignment
+ // it is also required that alignment is a power of 2 and > 0; this is checked in `mi_malloc_aligned`
+ if (alignment==0 || !_mi_is_power_of_two(alignment)) return EINVAL; // not a power of 2
+ void* q = mi_malloc_aligned(size, alignment);
if (q==NULL && size != 0) return ENOMEM;
mi_assert_internal(((uintptr_t)q % alignment) == 0);
*p = q;
return 0;
}
-mi_decl_restrict void* mi_memalign(size_t alignment, size_t size) mi_attr_noexcept {
- void* p = (mi_malloc_satisfies_alignment(alignment,size) ? mi_malloc(size) : mi_malloc_aligned(size, alignment));
+mi_decl_nodiscard mi_decl_restrict void* mi_memalign(size_t alignment, size_t size) mi_attr_noexcept {
+ void* p = mi_malloc_aligned(size, alignment);
mi_assert_internal(((uintptr_t)p % alignment) == 0);
return p;
}
-mi_decl_restrict void* mi_valloc(size_t size) mi_attr_noexcept {
+mi_decl_nodiscard mi_decl_restrict void* mi_valloc(size_t size) mi_attr_noexcept {
return mi_memalign( _mi_os_page_size(), size );
}
-mi_decl_restrict void* mi_pvalloc(size_t size) mi_attr_noexcept {
+mi_decl_nodiscard mi_decl_restrict void* mi_pvalloc(size_t size) mi_attr_noexcept {
size_t psize = _mi_os_page_size();
if (size >= SIZE_MAX - psize) return NULL; // overflow
size_t asize = _mi_align_up(size, psize);
return mi_malloc_aligned(asize, psize);
}
-mi_decl_restrict void* mi_aligned_alloc(size_t alignment, size_t size) mi_attr_noexcept {
- if (alignment==0 || !_mi_is_power_of_two(alignment)) return NULL;
- if ((size&(alignment-1)) != 0) return NULL; // C11 requires integral multiple, see
- void* p = (mi_malloc_satisfies_alignment(alignment, size) ? mi_malloc(size) : mi_malloc_aligned(size, alignment));
+mi_decl_nodiscard mi_decl_restrict void* mi_aligned_alloc(size_t alignment, size_t size) mi_attr_noexcept {
+ // C11 requires the size to be an integral multiple of the alignment, see .
+ // unfortunately, it turns out quite some programs pass a size that is not an integral multiple so skip this check..
+ /* if mi_unlikely((size & (alignment - 1)) != 0) { // C11 requires alignment>0 && integral multiple, see
+ #if MI_DEBUG > 0
+ _mi_error_message(EOVERFLOW, "(mi_)aligned_alloc requires the size to be an integral multiple of the alignment (size %zu, alignment %zu)\n", size, alignment);
+ #endif
+ return NULL;
+ }
+ */
+ // C11 also requires alignment to be a power-of-two (and > 0) which is checked in mi_malloc_aligned
+ void* p = mi_malloc_aligned(size, alignment);
mi_assert_internal(((uintptr_t)p % alignment) == 0);
return p;
}
-void* mi_reallocarray( void* p, size_t count, size_t size ) mi_attr_noexcept { // BSD
+mi_decl_nodiscard void* mi_reallocarray( void* p, size_t count, size_t size ) mi_attr_noexcept { // BSD
void* newp = mi_reallocn(p,count,size);
- if (newp==NULL) errno = ENOMEM;
+ if (newp==NULL) { errno = ENOMEM; }
return newp;
}
+mi_decl_nodiscard int mi_reallocarr( void* p, size_t count, size_t size ) mi_attr_noexcept { // NetBSD
+ mi_assert(p != NULL);
+ if (p == NULL) {
+ errno = EINVAL;
+ return EINVAL;
+ }
+ void** op = (void**)p;
+ void* newp = mi_reallocarray(*op, count, size);
+ if mi_unlikely(newp == NULL) { return errno; }
+ *op = newp;
+ return 0;
+}
+
void* mi__expand(void* p, size_t newsize) mi_attr_noexcept { // Microsoft
void* res = mi_expand(p, newsize);
- if (res == NULL) errno = ENOMEM;
+ if (res == NULL) { errno = ENOMEM; }
return res;
}
-mi_decl_restrict unsigned short* mi_wcsdup(const unsigned short* s) mi_attr_noexcept {
+mi_decl_nodiscard mi_decl_restrict unsigned short* mi_wcsdup(const unsigned short* s) mi_attr_noexcept {
if (s==NULL) return NULL;
size_t len;
for(len = 0; s[len] != 0; len++) { }
size_t size = (len+1)*sizeof(unsigned short);
unsigned short* p = (unsigned short*)mi_malloc(size);
if (p != NULL) {
- memcpy(p,s,size);
+ _mi_memcpy(p,s,size);
}
return p;
}
-mi_decl_restrict unsigned char* mi_mbsdup(const unsigned char* s) mi_attr_noexcept {
+mi_decl_nodiscard mi_decl_restrict unsigned char* mi_mbsdup(const unsigned char* s) mi_attr_noexcept {
return (unsigned char*)mi_strdup((const char*)s);
}
@@ -122,7 +150,7 @@ int mi_dupenv_s(char** buf, size_t* size, const char* name) mi_attr_noexcept {
else {
*buf = mi_strdup(p);
if (*buf==NULL) return ENOMEM;
- if (size != NULL) *size = strlen(p);
+ if (size != NULL) *size = _mi_strlen(p);
}
return 0;
}
@@ -148,10 +176,10 @@ int mi_wdupenv_s(unsigned short** buf, size_t* size, const unsigned short* name)
#endif
}
-void* mi_aligned_offset_recalloc(void* p, size_t newcount, size_t size, size_t alignment, size_t offset) mi_attr_noexcept { // Microsoft
+mi_decl_nodiscard void* mi_aligned_offset_recalloc(void* p, size_t newcount, size_t size, size_t alignment, size_t offset) mi_attr_noexcept { // Microsoft
return mi_recalloc_aligned_at(p, newcount, size, alignment, offset);
}
-void* mi_aligned_recalloc(void* p, size_t newcount, size_t size, size_t alignment) mi_attr_noexcept { // Microsoft
+mi_decl_nodiscard void* mi_aligned_recalloc(void* p, size_t newcount, size_t size, size_t alignment) mi_attr_noexcept { // Microsoft
return mi_recalloc_aligned(p, newcount, size, alignment);
}
diff --git a/src/alloc.c b/src/alloc.c
index f31d160..ffc1747 100644
--- a/src/alloc.c
+++ b/src/alloc.c
@@ -1,19 +1,24 @@
/* ----------------------------------------------------------------------------
-Copyright (c) 2018, Microsoft Research, Daan Leijen
+Copyright (c) 2018-2022, Microsoft Research, Daan Leijen
This is free software; you can redistribute it and/or modify it under the
terms of the MIT license. A copy of the license can be found in the file
"LICENSE" at the root of this distribution.
-----------------------------------------------------------------------------*/
-#include "AuroraEnvironment.h"
+#ifndef _DEFAULT_SOURCE
+#define _DEFAULT_SOURCE // for realpath() on Linux
+#endif
#include "mimalloc.h"
-#include "mimalloc-internal.h"
-#include "mimalloc-atomic.h"
+#include "mimalloc/internal.h"
+#include "mimalloc/atomic.h"
+#include "mimalloc/prim.h" // _mi_prim_thread_id()
+#include // memset, strlen (for mi_strdup)
+#include // malloc, abort
-#include // memset, memcpy, strlen
-#include // malloc, exit
-
+#define MI_IN_ALLOC_C
+#include "alloc-override.c"
+#undef MI_IN_ALLOC_C
// ------------------------------------------------------
// Allocation
@@ -21,134 +26,167 @@ terms of the MIT license. A copy of the license can be found in the file
// Fast allocation in a page: just pop from the free list.
// Fall back to generic allocation only if the list is empty.
-extern inline void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t size) mi_attr_noexcept {
+extern inline void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t size, bool zero) mi_attr_noexcept {
mi_assert_internal(page->xblock_size==0||mi_page_block_size(page) >= size);
- mi_block_t* block = page->free;
- if (mi_unlikely(block == NULL)) {
- return _mi_malloc_generic(heap, size);
+ mi_block_t* const block = page->free;
+ if mi_unlikely(block == NULL) {
+ return _mi_malloc_generic(heap, size, zero, 0);
}
mi_assert_internal(block != NULL && _mi_ptr_page(block) == page);
// pop from the free list
- page->free = mi_block_next(page, block);
page->used++;
+ page->free = mi_block_next(page, block);
mi_assert_internal(page->free == NULL || _mi_ptr_page(page->free) == page);
-#if (MI_DEBUG>0)
- if (!page->is_zero) { memset(block, MI_DEBUG_UNINIT, size); }
+ #if MI_DEBUG>3
+ if (page->free_is_zero) {
+ mi_assert_expensive(mi_mem_is_zero(block+1,size - sizeof(*block)));
+ }
+ #endif
+
+ // allow use of the block internally
+ // note: when tracking we need to avoid ever touching the MI_PADDING since
+ // that is tracked by valgrind etc. as non-accessible (through the red-zone, see `mimalloc/track.h`)
+ mi_track_mem_undefined(block, mi_page_usable_block_size(page));
+
+ // zero the block? note: we need to zero the full block size (issue #63)
+ if mi_unlikely(zero) {
+ mi_assert_internal(page->xblock_size != 0); // do not call with zero'ing for huge blocks (see _mi_malloc_generic)
+ mi_assert_internal(page->xblock_size >= MI_PADDING_SIZE);
+ if (page->free_is_zero) {
+ block->next = 0;
+ mi_track_mem_defined(block, page->xblock_size - MI_PADDING_SIZE);
+ }
+ else {
+ _mi_memzero_aligned(block, page->xblock_size - MI_PADDING_SIZE);
+ }
+ }
+
+#if (MI_DEBUG>0) && !MI_TRACK_ENABLED && !MI_TSAN
+ if (!zero && !mi_page_is_huge(page)) {
+ memset(block, MI_DEBUG_UNINIT, mi_page_usable_block_size(page));
+ }
#elif (MI_SECURE!=0)
- block->next = 0; // don't leak internal data
+ if (!zero) { block->next = 0; } // don't leak internal data
#endif
-#if (MI_STAT>1)
+
+#if (MI_STAT>0)
const size_t bsize = mi_page_usable_block_size(page);
- if (bsize <= MI_LARGE_OBJ_SIZE_MAX) {
+ if (bsize <= MI_MEDIUM_OBJ_SIZE_MAX) {
+ mi_heap_stat_increase(heap, normal, bsize);
+ mi_heap_stat_counter_increase(heap, normal_count, 1);
+#if (MI_STAT>1)
const size_t bin = _mi_bin(bsize);
- mi_heap_stat_increase(heap, normal[bin], 1);
+ mi_heap_stat_increase(heap, normal_bins[bin], 1);
+#endif
}
#endif
-#if (MI_PADDING > 0) && defined(MI_ENCODE_FREELIST)
+
+#if MI_PADDING // && !MI_TRACK_ENABLED
mi_padding_t* const padding = (mi_padding_t*)((uint8_t*)block + mi_page_usable_block_size(page));
ptrdiff_t delta = ((uint8_t*)padding - (uint8_t*)block - (size - MI_PADDING_SIZE));
+ #if (MI_DEBUG>=2)
mi_assert_internal(delta >= 0 && mi_page_usable_block_size(page) >= (size - MI_PADDING_SIZE + delta));
+ #endif
+ mi_track_mem_defined(padding,sizeof(mi_padding_t)); // note: re-enable since mi_page_usable_block_size may set noaccess
padding->canary = (uint32_t)(mi_ptr_encode(page,block,page->keys));
padding->delta = (uint32_t)(delta);
- uint8_t* fill = (uint8_t*)padding - delta;
- const size_t maxpad = (delta > MI_MAX_ALIGN_SIZE ? MI_MAX_ALIGN_SIZE : delta); // set at most N initial padding bytes
- for (size_t i = 0; i < maxpad; i++) { fill[i] = MI_DEBUG_PADDING; }
+ #if MI_PADDING_CHECK
+ if (!mi_page_is_huge(page)) {
+ uint8_t* fill = (uint8_t*)padding - delta;
+ const size_t maxpad = (delta > MI_MAX_ALIGN_SIZE ? MI_MAX_ALIGN_SIZE : delta); // set at most N initial padding bytes
+ for (size_t i = 0; i < maxpad; i++) { fill[i] = MI_DEBUG_PADDING; }
+ }
+ #endif
#endif
+
return block;
}
-// allocate a small block
-extern inline mi_decl_restrict void* mi_heap_malloc_small(mi_heap_t* heap, size_t size) mi_attr_noexcept {
- mi_assert(heap!=NULL);
- mi_assert(heap->thread_id == 0 || heap->thread_id == _mi_thread_id()); // heaps are thread local
+static inline mi_decl_restrict void* mi_heap_malloc_small_zero(mi_heap_t* heap, size_t size, bool zero) mi_attr_noexcept {
+ mi_assert(heap != NULL);
+ #if MI_DEBUG
+ const uintptr_t tid = _mi_thread_id();
+ mi_assert(heap->thread_id == 0 || heap->thread_id == tid); // heaps are thread local
+ #endif
mi_assert(size <= MI_SMALL_SIZE_MAX);
#if (MI_PADDING)
- if (size == 0) {
- size = sizeof(void*);
- }
+ if (size == 0) { size = sizeof(void*); }
#endif
- mi_page_t* page = _mi_heap_get_free_small_page(heap,size + MI_PADDING_SIZE);
- void* p = _mi_page_malloc(heap, page, size + MI_PADDING_SIZE);
- mi_assert_internal(p==NULL || mi_usable_size(p) >= size);
+ mi_page_t* page = _mi_heap_get_free_small_page(heap, size + MI_PADDING_SIZE);
+ void* const p = _mi_page_malloc(heap, page, size + MI_PADDING_SIZE, zero);
+ mi_track_malloc(p,size,zero);
#if MI_STAT>1
if (p != NULL) {
- if (!mi_heap_is_initialized(heap)) { heap = mi_get_default_heap(); }
+ if (!mi_heap_is_initialized(heap)) { heap = mi_prim_get_default_heap(); }
mi_heap_stat_increase(heap, malloc, mi_usable_size(p));
}
#endif
+ #if MI_DEBUG>3
+ if (p != NULL && zero) {
+ mi_assert_expensive(mi_mem_is_zero(p, size));
+ }
+ #endif
return p;
}
-extern inline mi_decl_restrict void* mi_malloc_small(size_t size) mi_attr_noexcept {
- return mi_heap_malloc_small(mi_get_default_heap(), size);
+// allocate a small block
+mi_decl_nodiscard extern inline mi_decl_restrict void* mi_heap_malloc_small(mi_heap_t* heap, size_t size) mi_attr_noexcept {
+ return mi_heap_malloc_small_zero(heap, size, false);
+}
+
+mi_decl_nodiscard extern inline mi_decl_restrict void* mi_malloc_small(size_t size) mi_attr_noexcept {
+ return mi_heap_malloc_small(mi_prim_get_default_heap(), size);
}
// The main allocation function
-extern inline mi_decl_restrict void* mi_heap_malloc(mi_heap_t* heap, size_t size) mi_attr_noexcept {
- if (mi_likely(size <= MI_SMALL_SIZE_MAX)) {
- return mi_heap_malloc_small(heap, size);
+extern inline void* _mi_heap_malloc_zero_ex(mi_heap_t* heap, size_t size, bool zero, size_t huge_alignment) mi_attr_noexcept {
+ if mi_likely(size <= MI_SMALL_SIZE_MAX) {
+ mi_assert_internal(huge_alignment == 0);
+ return mi_heap_malloc_small_zero(heap, size, zero);
}
else {
mi_assert(heap!=NULL);
- mi_assert(heap->thread_id == 0 || heap->thread_id == _mi_thread_id()); // heaps are thread local
- void* const p = _mi_malloc_generic(heap, size + MI_PADDING_SIZE); // note: size can overflow but it is detected in malloc_generic
- mi_assert_internal(p == NULL || mi_usable_size(p) >= size);
+ mi_assert(heap->thread_id == 0 || heap->thread_id == _mi_thread_id()); // heaps are thread local
+ void* const p = _mi_malloc_generic(heap, size + MI_PADDING_SIZE, zero, huge_alignment); // note: size can overflow but it is detected in malloc_generic
+ mi_track_malloc(p,size,zero);
#if MI_STAT>1
if (p != NULL) {
- if (!mi_heap_is_initialized(heap)) { heap = mi_get_default_heap(); }
+ if (!mi_heap_is_initialized(heap)) { heap = mi_prim_get_default_heap(); }
mi_heap_stat_increase(heap, malloc, mi_usable_size(p));
}
#endif
+ #if MI_DEBUG>3
+ if (p != NULL && zero) {
+ mi_assert_expensive(mi_mem_is_zero(p, size));
+ }
+ #endif
return p;
}
}
-extern inline mi_decl_restrict void* mi_malloc(size_t size) mi_attr_noexcept {
- return mi_heap_malloc(mi_get_default_heap(), size);
+extern inline void* _mi_heap_malloc_zero(mi_heap_t* heap, size_t size, bool zero) mi_attr_noexcept {
+ return _mi_heap_malloc_zero_ex(heap, size, zero, 0);
}
+mi_decl_nodiscard extern inline mi_decl_restrict void* mi_heap_malloc(mi_heap_t* heap, size_t size) mi_attr_noexcept {
+ return _mi_heap_malloc_zero(heap, size, false);
+}
-void _mi_block_zero_init(const mi_page_t* page, void* p, size_t size) {
- // note: we need to initialize the whole usable block size to zero, not just the requested size,
- // or the recalloc/rezalloc functions cannot safely expand in place (see issue #63)
- UNUSED(size);
- mi_assert_internal(p != NULL);
- mi_assert_internal(mi_usable_size(p) >= size); // size can be zero
- mi_assert_internal(_mi_ptr_page(p)==page);
- if (page->is_zero && size > sizeof(mi_block_t)) {
- // already zero initialized memory
- ((mi_block_t*)p)->next = 0; // clear the free list pointer
- mi_assert_expensive(mi_mem_is_zero(p, mi_usable_size(p)));
- }
- else {
- // otherwise memset
- memset(p, 0, mi_usable_size(p));
- }
+mi_decl_nodiscard extern inline mi_decl_restrict void* mi_malloc(size_t size) mi_attr_noexcept {
+ return mi_heap_malloc(mi_prim_get_default_heap(), size);
}
// zero initialized small block
-mi_decl_restrict void* mi_zalloc_small(size_t size) mi_attr_noexcept {
- void* p = mi_malloc_small(size);
- if (p != NULL) {
- _mi_block_zero_init(_mi_ptr_page(p), p, size); // todo: can we avoid getting the page again?
- }
- return p;
+mi_decl_nodiscard mi_decl_restrict void* mi_zalloc_small(size_t size) mi_attr_noexcept {
+ return mi_heap_malloc_small_zero(mi_prim_get_default_heap(), size, true);
}
-void* _mi_heap_malloc_zero(mi_heap_t* heap, size_t size, bool zero) {
- void* p = mi_heap_malloc(heap,size);
- if (zero && p != NULL) {
- _mi_block_zero_init(_mi_ptr_page(p),p,size); // todo: can we avoid getting the page again?
- }
- return p;
-}
-
-extern inline mi_decl_restrict void* mi_heap_zalloc(mi_heap_t* heap, size_t size) mi_attr_noexcept {
+mi_decl_nodiscard extern inline mi_decl_restrict void* mi_heap_zalloc(mi_heap_t* heap, size_t size) mi_attr_noexcept {
return _mi_heap_malloc_zero(heap, size, true);
}
-mi_decl_restrict void* mi_zalloc(size_t size) mi_attr_noexcept {
- return mi_heap_zalloc(mi_get_default_heap(),size);
+mi_decl_nodiscard mi_decl_restrict void* mi_zalloc(size_t size) mi_attr_noexcept {
+ return mi_heap_zalloc(mi_prim_get_default_heap(),size);
}
@@ -180,21 +218,24 @@ static mi_decl_noinline bool mi_check_is_double_freex(const mi_page_t* page, con
return false;
}
+#define mi_track_page(page,access) { size_t psize; void* pstart = _mi_page_start(_mi_page_segment(page),page,&psize); mi_track_mem_##access( pstart, psize); }
+
static inline bool mi_check_is_double_free(const mi_page_t* page, const mi_block_t* block) {
+ bool is_double_free = false;
mi_block_t* n = mi_block_nextx(page, block, page->keys); // pretend it is freed, and get the decoded first field
if (((uintptr_t)n & (MI_INTPTR_SIZE-1))==0 && // quick check: aligned pointer?
(n==NULL || mi_is_in_same_page(block, n))) // quick check: in same page or NULL?
{
// Suspicous: decoded value a in block is in the same page (or NULL) -- maybe a double free?
// (continue in separate function to improve code generation)
- return mi_check_is_double_freex(page, block);
+ is_double_free = mi_check_is_double_freex(page, block);
}
- return false;
+ return is_double_free;
}
#else
static inline bool mi_check_is_double_free(const mi_page_t* page, const mi_block_t* block) {
- UNUSED(page);
- UNUSED(block);
+ MI_UNUSED(page);
+ MI_UNUSED(block);
return false;
}
#endif
@@ -203,12 +244,19 @@ static inline bool mi_check_is_double_free(const mi_page_t* page, const mi_block
// Check for heap block overflow by setting up padding at the end of the block
// ---------------------------------------------------------------------------
-#if (MI_PADDING>0) && defined(MI_ENCODE_FREELIST)
+#if MI_PADDING // && !MI_TRACK_ENABLED
static bool mi_page_decode_padding(const mi_page_t* page, const mi_block_t* block, size_t* delta, size_t* bsize) {
*bsize = mi_page_usable_block_size(page);
const mi_padding_t* const padding = (mi_padding_t*)((uint8_t*)block + *bsize);
+ mi_track_mem_defined(padding,sizeof(mi_padding_t));
*delta = padding->delta;
- return ((uint32_t)mi_ptr_encode(page,block,page->keys) == padding->canary && *delta <= *bsize);
+ uint32_t canary = padding->canary;
+ uintptr_t keys[2];
+ keys[0] = page->keys[0];
+ keys[1] = page->keys[1];
+ bool ok = ((uint32_t)mi_ptr_encode(page,block,keys) == canary && *delta <= *bsize);
+ mi_track_mem_noaccess(padding,sizeof(mi_padding_t));
+ return ok;
}
// Return the exact usable size of a block.
@@ -220,38 +268,11 @@ static size_t mi_page_usable_size_of(const mi_page_t* page, const mi_block_t* bl
return (ok ? bsize - delta : 0);
}
-static bool mi_verify_padding(const mi_page_t* page, const mi_block_t* block, size_t* size, size_t* wrong) {
- size_t bsize;
- size_t delta;
- bool ok = mi_page_decode_padding(page, block, &delta, &bsize);
- *size = *wrong = bsize;
- if (!ok) return false;
- mi_assert_internal(bsize >= delta);
- *size = bsize - delta;
- uint8_t* fill = (uint8_t*)block + bsize - delta;
- const size_t maxpad = (delta > MI_MAX_ALIGN_SIZE ? MI_MAX_ALIGN_SIZE : delta); // check at most the first N padding bytes
- for (size_t i = 0; i < maxpad; i++) {
- if (fill[i] != MI_DEBUG_PADDING) {
- *wrong = bsize - delta + i;
- return false;
- }
- }
- return true;
-}
-
-static void mi_check_padding(const mi_page_t* page, const mi_block_t* block) {
- size_t size;
- size_t wrong;
- if (!mi_verify_padding(page,block,&size,&wrong)) {
- _mi_error_message(EFAULT, "buffer overflow in heap block %p of size %zu: write after %zu bytes\n", block, size, wrong );
- }
-}
-
// When a non-thread-local block is freed, it becomes part of the thread delayed free
// list that is freed later by the owning heap. If the exact usable size is too small to
// contain the pointer for the delayed list, then shrink the padding (by decreasing delta)
// so it will later not trigger an overflow error in `mi_free_block`.
-static void mi_padding_shrink(const mi_page_t* page, const mi_block_t* block, const size_t min_size) {
+void _mi_padding_shrink(const mi_page_t* page, const mi_block_t* block, const size_t min_size) {
size_t bsize;
size_t delta;
bool ok = mi_page_decode_padding(page, block, &delta, &bsize);
@@ -262,47 +283,150 @@ static void mi_padding_shrink(const mi_page_t* page, const mi_block_t* block, co
size_t new_delta = (bsize - min_size);
mi_assert_internal(new_delta < bsize);
mi_padding_t* padding = (mi_padding_t*)((uint8_t*)block + bsize);
+ mi_track_mem_defined(padding,sizeof(mi_padding_t));
padding->delta = (uint32_t)new_delta;
+ mi_track_mem_noaccess(padding,sizeof(mi_padding_t));
}
#else
-static void mi_check_padding(const mi_page_t* page, const mi_block_t* block) {
- UNUSED(page);
- UNUSED(block);
-}
-
static size_t mi_page_usable_size_of(const mi_page_t* page, const mi_block_t* block) {
- UNUSED(block);
+ MI_UNUSED(block);
return mi_page_usable_block_size(page);
}
-static void mi_padding_shrink(const mi_page_t* page, const mi_block_t* block, const size_t min_size) {
- UNUSED(page);
- UNUSED(block);
- UNUSED(min_size);
+void _mi_padding_shrink(const mi_page_t* page, const mi_block_t* block, const size_t min_size) {
+ MI_UNUSED(page);
+ MI_UNUSED(block);
+ MI_UNUSED(min_size);
}
#endif
+#if MI_PADDING && MI_PADDING_CHECK
+
+static bool mi_verify_padding(const mi_page_t* page, const mi_block_t* block, size_t* size, size_t* wrong) {
+ size_t bsize;
+ size_t delta;
+ bool ok = mi_page_decode_padding(page, block, &delta, &bsize);
+ *size = *wrong = bsize;
+ if (!ok) return false;
+ mi_assert_internal(bsize >= delta);
+ *size = bsize - delta;
+ if (!mi_page_is_huge(page)) {
+ uint8_t* fill = (uint8_t*)block + bsize - delta;
+ const size_t maxpad = (delta > MI_MAX_ALIGN_SIZE ? MI_MAX_ALIGN_SIZE : delta); // check at most the first N padding bytes
+ mi_track_mem_defined(fill, maxpad);
+ for (size_t i = 0; i < maxpad; i++) {
+ if (fill[i] != MI_DEBUG_PADDING) {
+ *wrong = bsize - delta + i;
+ ok = false;
+ break;
+ }
+ }
+ mi_track_mem_noaccess(fill, maxpad);
+ }
+ return ok;
+}
+
+static void mi_check_padding(const mi_page_t* page, const mi_block_t* block) {
+ size_t size;
+ size_t wrong;
+ if (!mi_verify_padding(page,block,&size,&wrong)) {
+ _mi_error_message(EFAULT, "buffer overflow in heap block %p of size %zu: write after %zu bytes\n", block, size, wrong );
+ }
+}
+
+#else
+
+static void mi_check_padding(const mi_page_t* page, const mi_block_t* block) {
+ MI_UNUSED(page);
+ MI_UNUSED(block);
+}
+
+#endif
+
+// only maintain stats for smaller objects if requested
+#if (MI_STAT>0)
+static void mi_stat_free(const mi_page_t* page, const mi_block_t* block) {
+ #if (MI_STAT < 2)
+ MI_UNUSED(block);
+ #endif
+ mi_heap_t* const heap = mi_heap_get_default();
+ const size_t bsize = mi_page_usable_block_size(page);
+ #if (MI_STAT>1)
+ const size_t usize = mi_page_usable_size_of(page, block);
+ mi_heap_stat_decrease(heap, malloc, usize);
+ #endif
+ if (bsize <= MI_MEDIUM_OBJ_SIZE_MAX) {
+ mi_heap_stat_decrease(heap, normal, bsize);
+ #if (MI_STAT > 1)
+ mi_heap_stat_decrease(heap, normal_bins[_mi_bin(bsize)], 1);
+ #endif
+ }
+ else if (bsize <= MI_LARGE_OBJ_SIZE_MAX) {
+ mi_heap_stat_decrease(heap, large, bsize);
+ }
+ else {
+ mi_heap_stat_decrease(heap, huge, bsize);
+ }
+}
+#else
+static void mi_stat_free(const mi_page_t* page, const mi_block_t* block) {
+ MI_UNUSED(page); MI_UNUSED(block);
+}
+#endif
+
+#if MI_HUGE_PAGE_ABANDON
+#if (MI_STAT>0)
+// maintain stats for huge objects
+static void mi_stat_huge_free(const mi_page_t* page) {
+ mi_heap_t* const heap = mi_heap_get_default();
+ const size_t bsize = mi_page_block_size(page); // to match stats in `page.c:mi_page_huge_alloc`
+ if (bsize <= MI_LARGE_OBJ_SIZE_MAX) {
+ mi_heap_stat_decrease(heap, large, bsize);
+ }
+ else {
+ mi_heap_stat_decrease(heap, huge, bsize);
+ }
+}
+#else
+static void mi_stat_huge_free(const mi_page_t* page) {
+ MI_UNUSED(page);
+}
+#endif
+#endif
+
// ------------------------------------------------------
// Free
// ------------------------------------------------------
-// multi-threaded free
+// multi-threaded free (or free in huge block if compiled with MI_HUGE_PAGE_ABANDON)
static mi_decl_noinline void _mi_free_block_mt(mi_page_t* page, mi_block_t* block)
{
// The padding check may access the non-thread-owned page for the key values.
// that is safe as these are constant and the page won't be freed (as the block is not freed yet).
mi_check_padding(page, block);
- mi_padding_shrink(page, block, sizeof(mi_block_t)); // for small size, ensure we can fit the delayed thread pointers without triggering overflow detection
- #if (MI_DEBUG!=0)
- memset(block, MI_DEBUG_FREED, mi_usable_size(block));
- #endif
-
+ _mi_padding_shrink(page, block, sizeof(mi_block_t)); // for small size, ensure we can fit the delayed thread pointers without triggering overflow detection
+
// huge page segments are always abandoned and can be freed immediately
- mi_segment_t* const segment = _mi_page_segment(page);
- if (segment->page_kind==MI_PAGE_HUGE) {
+ mi_segment_t* segment = _mi_page_segment(page);
+ if (segment->kind == MI_SEGMENT_HUGE) {
+ #if MI_HUGE_PAGE_ABANDON
+ // huge page segments are always abandoned and can be freed immediately
+ mi_stat_huge_free(page);
_mi_segment_huge_page_free(segment, page, block);
return;
+ #else
+ // huge pages are special as they occupy the entire segment
+ // as these are large we reset the memory occupied by the page so it is available to other threads
+ // (as the owning thread needs to actually free the memory later).
+ _mi_segment_huge_page_reset(segment, page, block);
+ #endif
}
+
+ #if (MI_DEBUG>0) && !MI_TRACK_ENABLED && !MI_TSAN // note: when tracking, cannot use mi_usable_size with multi-threading
+ if (segment->kind != MI_SEGMENT_HUGE) { // not for huge segments as we just reset the content
+ memset(block, MI_DEBUG_FREED, mi_usable_size(block));
+ }
+ #endif
// Try to put the block on either the page-local thread free list, or the heap delayed free list.
mi_thread_free_t tfreex;
@@ -310,7 +434,7 @@ static mi_decl_noinline void _mi_free_block_mt(mi_page_t* page, mi_block_t* bloc
mi_thread_free_t tfree = mi_atomic_load_relaxed(&page->xthread_free);
do {
use_delayed = (mi_tf_delayed(tfree) == MI_USE_DELAYED_FREE);
- if (mi_unlikely(use_delayed)) {
+ if mi_unlikely(use_delayed) {
// unlikely: this only happens on the first concurrent free in a page that is in the full list
tfreex = mi_tf_set_delayed(tfree,MI_DELAYED_FREEING);
}
@@ -321,7 +445,7 @@ static mi_decl_noinline void _mi_free_block_mt(mi_page_t* page, mi_block_t* bloc
}
} while (!mi_atomic_cas_weak_release(&page->xthread_free, &tfree, tfreex));
- if (mi_unlikely(use_delayed)) {
+ if mi_unlikely(use_delayed) {
// racy read on `heap`, but ok because MI_DELAYED_FREEING is set (see `mi_heap_delete` and `mi_heap_collect_abandon`)
mi_heap_t* const heap = (mi_heap_t*)(mi_atomic_load_acquire(&page->xheap)); //mi_page_heap(page);
mi_assert_internal(heap != NULL);
@@ -343,25 +467,27 @@ static mi_decl_noinline void _mi_free_block_mt(mi_page_t* page, mi_block_t* bloc
}
}
-
// regular free
static inline void _mi_free_block(mi_page_t* page, bool local, mi_block_t* block)
{
// and push it on the free list
- if (mi_likely(local)) {
+ //const size_t bsize = mi_page_block_size(page);
+ if mi_likely(local) {
// owning thread can free a block directly
- if (mi_unlikely(mi_check_is_double_free(page, block))) return;
+ if mi_unlikely(mi_check_is_double_free(page, block)) return;
mi_check_padding(page, block);
- #if (MI_DEBUG!=0)
- memset(block, MI_DEBUG_FREED, mi_page_block_size(page));
+ #if (MI_DEBUG>0) && !MI_TRACK_ENABLED && !MI_TSAN
+ if (!mi_page_is_huge(page)) { // huge page content may be already decommitted
+ memset(block, MI_DEBUG_FREED, mi_page_block_size(page));
+ }
#endif
mi_block_set_next(page, block, page->local_free);
page->local_free = block;
page->used--;
- if (mi_unlikely(mi_page_all_free(page))) {
+ if mi_unlikely(mi_page_all_free(page)) {
_mi_page_retire(page);
}
- else if (mi_unlikely(mi_page_is_in_full(page))) {
+ else if mi_unlikely(mi_page_is_in_full(page)) {
_mi_page_unfull(page);
}
}
@@ -380,85 +506,94 @@ mi_block_t* _mi_page_ptr_unalign(const mi_segment_t* segment, const mi_page_t* p
}
-static void mi_decl_noinline mi_free_generic(const mi_segment_t* segment, bool local, void* p) {
- mi_page_t* const page = _mi_segment_page_of(segment, p);
+void mi_decl_noinline _mi_free_generic(const mi_segment_t* segment, mi_page_t* page, bool is_local, void* p) mi_attr_noexcept {
mi_block_t* const block = (mi_page_has_aligned(page) ? _mi_page_ptr_unalign(segment, page, p) : (mi_block_t*)p);
- _mi_free_block(page, local, block);
+ mi_stat_free(page, block); // stat_free may access the padding
+ mi_track_free_size(block, mi_page_usable_size_of(page,block));
+ _mi_free_block(page, is_local, block);
}
// Get the segment data belonging to a pointer
// This is just a single `and` in assembly but does further checks in debug mode
// (and secure mode) if this was a valid pointer.
-static inline mi_segment_t* mi_checked_ptr_segment(const void* p, const char* msg)
+static inline mi_segment_t* mi_checked_ptr_segment(const void* p, const char* msg)
{
- UNUSED(msg);
+ MI_UNUSED(msg);
+ mi_assert(p != NULL);
+
#if (MI_DEBUG>0)
- if (mi_unlikely(((uintptr_t)p & (MI_INTPTR_SIZE - 1)) != 0)) {
+ if mi_unlikely(((uintptr_t)p & (MI_INTPTR_SIZE - 1)) != 0) {
_mi_error_message(EINVAL, "%s: invalid (unaligned) pointer: %p\n", msg, p);
return NULL;
}
#endif
mi_segment_t* const segment = _mi_ptr_segment(p);
- if (mi_unlikely(segment == NULL)) return NULL; // checks also for (p==NULL)
+ mi_assert_internal(segment != NULL);
#if (MI_DEBUG>0)
- if (mi_unlikely(!mi_is_in_heap_region(p))) {
- _mi_warning_message("%s: pointer might not point to a valid heap region: %p\n"
- "(this may still be a valid very large allocation (over 64MiB))\n", msg, p);
- if (mi_likely(_mi_ptr_cookie(segment) == segment->cookie)) {
- _mi_warning_message("(yes, the previous pointer %p was valid after all)\n", p);
+ if mi_unlikely(!mi_is_in_heap_region(p)) {
+ #if (MI_INTPTR_SIZE == 8 && defined(__linux__))
+ if (((uintptr_t)p >> 40) != 0x7F) { // linux tends to align large blocks above 0x7F000000000 (issue #640)
+ #else
+ {
+ #endif
+ _mi_warning_message("%s: pointer might not point to a valid heap region: %p\n"
+ "(this may still be a valid very large allocation (over 64MiB))\n", msg, p);
+ if mi_likely(_mi_ptr_cookie(segment) == segment->cookie) {
+ _mi_warning_message("(yes, the previous pointer %p was valid after all)\n", p);
+ }
}
}
#endif
#if (MI_DEBUG>0 || MI_SECURE>=4)
- if (mi_unlikely(_mi_ptr_cookie(segment) != segment->cookie)) {
- _mi_error_message(EINVAL, "%s: pointer does not point to a valid heap space: %p\n", p);
+ if mi_unlikely(_mi_ptr_cookie(segment) != segment->cookie) {
+ _mi_error_message(EINVAL, "%s: pointer does not point to a valid heap space: %p\n", msg, p);
+ return NULL;
}
#endif
+
return segment;
}
-
// Free a block
+// fast path written carefully to prevent spilling on the stack
void mi_free(void* p) mi_attr_noexcept
{
- const mi_segment_t* const segment = mi_checked_ptr_segment(p,"mi_free");
- if (mi_unlikely(segment == NULL)) return;
+ if mi_unlikely(p == NULL) return;
+ mi_segment_t* const segment = mi_checked_ptr_segment(p,"mi_free");
+ const bool is_local= (_mi_prim_thread_id() == mi_atomic_load_relaxed(&segment->thread_id));
+ mi_page_t* const page = _mi_segment_page_of(segment, p);
- const uintptr_t tid = _mi_thread_id();
- mi_page_t* const page = _mi_segment_page_of(segment, p);
- mi_block_t* const block = (mi_block_t*)p;
-
-#if (MI_STAT>1)
- mi_heap_t* const heap = mi_heap_get_default();
- const size_t bsize = mi_page_usable_block_size(page);
- mi_heap_stat_decrease(heap, malloc, bsize);
- if (bsize <= MI_LARGE_OBJ_SIZE_MAX) { // huge page stats are accounted for in `_mi_page_retire`
- mi_heap_stat_decrease(heap, normal[_mi_bin(bsize)], 1);
- }
-#endif
-
- if (mi_likely(tid == segment->thread_id && page->flags.full_aligned == 0)) { // the thread id matches and it is not a full page, nor has aligned blocks
- // local, and not full or aligned
- if (mi_unlikely(mi_check_is_double_free(page,block))) return;
- mi_check_padding(page, block);
- #if (MI_DEBUG!=0)
- memset(block, MI_DEBUG_FREED, mi_page_block_size(page));
- #endif
- mi_block_set_next(page, block, page->local_free);
- page->local_free = block;
- if (mi_unlikely(--page->used == 0)) { // using this expression generates better code than: page->used--; if (mi_page_all_free(page))
- _mi_page_retire(page);
+ if mi_likely(is_local) { // thread-local free?
+ if mi_likely(page->flags.full_aligned == 0) // and it is not a full page (full pages need to move from the full bin), nor has aligned blocks (aligned blocks need to be unaligned)
+ {
+ mi_block_t* const block = (mi_block_t*)p;
+ if mi_unlikely(mi_check_is_double_free(page, block)) return;
+ mi_check_padding(page, block);
+ mi_stat_free(page, block);
+ #if (MI_DEBUG>0) && !MI_TRACK_ENABLED && !MI_TSAN
+ memset(block, MI_DEBUG_FREED, mi_page_block_size(page));
+ #endif
+ mi_track_free_size(p, mi_page_usable_size_of(page,block)); // faster then mi_usable_size as we already know the page and that p is unaligned
+ mi_block_set_next(page, block, page->local_free);
+ page->local_free = block;
+ if mi_unlikely(--page->used == 0) { // using this expression generates better code than: page->used--; if (mi_page_all_free(page))
+ _mi_page_retire(page);
+ }
+ }
+ else {
+ // page is full or contains (inner) aligned blocks; use generic path
+ _mi_free_generic(segment, page, true, p);
}
}
else {
- // non-local, aligned blocks, or a full page; use the more generic path
- // note: recalc page in generic to improve code generation
- mi_free_generic(segment, tid == segment->thread_id, p);
+ // not thread-local; use generic path
+ _mi_free_generic(segment, page, false, p);
}
}
+// return true if successful
bool _mi_free_delayed_block(mi_block_t* block) {
// get segment and page
const mi_segment_t* const segment = _mi_ptr_segment(block);
@@ -471,7 +606,9 @@ bool _mi_free_delayed_block(mi_block_t* block) {
// some blocks may end up in the page `thread_free` list with no blocks in the
// heap `thread_delayed_free` list which may cause the page to be never freed!
// (it would only be freed if we happen to scan it in `mi_page_queue_find_free_ex`)
- _mi_page_use_delayed_free(page, MI_USE_DELAYED_FREE, false /* dont overwrite never delayed */);
+ if (!_mi_page_try_use_delayed_free(page, MI_USE_DELAYED_FREE, false /* dont overwrite never delayed */)) {
+ return false;
+ }
// collect all other non-local frees to ensure up-to-date `used` count
_mi_page_free_collect(page, false);
@@ -482,119 +619,127 @@ bool _mi_free_delayed_block(mi_block_t* block) {
}
// Bytes available in a block
-static size_t _mi_usable_size(const void* p, const char* msg) mi_attr_noexcept {
- const mi_segment_t* const segment = mi_checked_ptr_segment(p,msg);
- if (segment==NULL) return 0;
+mi_decl_noinline static size_t mi_page_usable_aligned_size_of(const mi_segment_t* segment, const mi_page_t* page, const void* p) mi_attr_noexcept {
+ const mi_block_t* block = _mi_page_ptr_unalign(segment, page, p);
+ const size_t size = mi_page_usable_size_of(page, block);
+ const ptrdiff_t adjust = (uint8_t*)p - (uint8_t*)block;
+ mi_assert_internal(adjust >= 0 && (size_t)adjust <= size);
+ return (size - adjust);
+}
+
+static inline size_t _mi_usable_size(const void* p, const char* msg) mi_attr_noexcept {
+ if (p == NULL) return 0;
+ const mi_segment_t* const segment = mi_checked_ptr_segment(p, msg);
const mi_page_t* const page = _mi_segment_page_of(segment, p);
- const mi_block_t* block = (const mi_block_t*)p;
- if (mi_unlikely(mi_page_has_aligned(page))) {
- block = _mi_page_ptr_unalign(segment, page, p);
- size_t size = mi_page_usable_size_of(page, block);
- ptrdiff_t const adjust = (uint8_t*)p - (uint8_t*)block;
- mi_assert_internal(adjust >= 0 && (size_t)adjust <= size);
- return (size - adjust);
- }
- else {
+ if mi_likely(!mi_page_has_aligned(page)) {
+ const mi_block_t* block = (const mi_block_t*)p;
return mi_page_usable_size_of(page, block);
}
+ else {
+ // split out to separate routine for improved code generation
+ return mi_page_usable_aligned_size_of(segment, page, p);
+ }
}
-size_t mi_usable_size(const void* p) mi_attr_noexcept {
+mi_decl_nodiscard size_t mi_usable_size(const void* p) mi_attr_noexcept {
return _mi_usable_size(p, "mi_usable_size");
}
-// ------------------------------------------------------
-// ensure explicit external inline definitions are emitted!
-// ------------------------------------------------------
-
-#ifdef __cplusplus
-void* _mi_externs[] = {
- (void*)&_mi_page_malloc,
- (void*)&mi_malloc,
- (void*)&mi_malloc_small,
- (void*)&mi_heap_malloc,
- (void*)&mi_heap_zalloc,
- (void*)&mi_heap_malloc_small
-};
-#endif
-
-
// ------------------------------------------------------
// Allocation extensions
// ------------------------------------------------------
void mi_free_size(void* p, size_t size) mi_attr_noexcept {
- UNUSED_RELEASE(size);
+ MI_UNUSED_RELEASE(size);
mi_assert(p == NULL || size <= _mi_usable_size(p,"mi_free_size"));
mi_free(p);
}
void mi_free_size_aligned(void* p, size_t size, size_t alignment) mi_attr_noexcept {
- UNUSED_RELEASE(alignment);
+ MI_UNUSED_RELEASE(alignment);
mi_assert(((uintptr_t)p % alignment) == 0);
mi_free_size(p,size);
}
void mi_free_aligned(void* p, size_t alignment) mi_attr_noexcept {
- UNUSED_RELEASE(alignment);
+ MI_UNUSED_RELEASE(alignment);
mi_assert(((uintptr_t)p % alignment) == 0);
mi_free(p);
}
-extern inline mi_decl_restrict void* mi_heap_calloc(mi_heap_t* heap, size_t count, size_t size) mi_attr_noexcept {
+mi_decl_nodiscard extern inline mi_decl_restrict void* mi_heap_calloc(mi_heap_t* heap, size_t count, size_t size) mi_attr_noexcept {
size_t total;
if (mi_count_size_overflow(count,size,&total)) return NULL;
return mi_heap_zalloc(heap,total);
}
-mi_decl_restrict void* mi_calloc(size_t count, size_t size) mi_attr_noexcept {
- return mi_heap_calloc(mi_get_default_heap(),count,size);
+mi_decl_nodiscard mi_decl_restrict void* mi_calloc(size_t count, size_t size) mi_attr_noexcept {
+ return mi_heap_calloc(mi_prim_get_default_heap(),count,size);
}
// Uninitialized `calloc`
-extern mi_decl_restrict void* mi_heap_mallocn(mi_heap_t* heap, size_t count, size_t size) mi_attr_noexcept {
+mi_decl_nodiscard extern mi_decl_restrict void* mi_heap_mallocn(mi_heap_t* heap, size_t count, size_t size) mi_attr_noexcept {
size_t total;
if (mi_count_size_overflow(count, size, &total)) return NULL;
return mi_heap_malloc(heap, total);
}
-mi_decl_restrict void* mi_mallocn(size_t count, size_t size) mi_attr_noexcept {
- return mi_heap_mallocn(mi_get_default_heap(),count,size);
+mi_decl_nodiscard mi_decl_restrict void* mi_mallocn(size_t count, size_t size) mi_attr_noexcept {
+ return mi_heap_mallocn(mi_prim_get_default_heap(),count,size);
}
-// Expand in place or fail
+// Expand (or shrink) in place (or fail)
void* mi_expand(void* p, size_t newsize) mi_attr_noexcept {
+ #if MI_PADDING
+ // we do not shrink/expand with padding enabled
+ MI_UNUSED(p); MI_UNUSED(newsize);
+ return NULL;
+ #else
if (p == NULL) return NULL;
- size_t size = _mi_usable_size(p,"mi_expand");
+ const size_t size = _mi_usable_size(p,"mi_expand");
if (newsize > size) return NULL;
return p; // it fits
+ #endif
}
-void* _mi_heap_realloc_zero(mi_heap_t* heap, void* p, size_t newsize, bool zero) {
- if (p == NULL) return _mi_heap_malloc_zero(heap,newsize,zero);
- size_t size = _mi_usable_size(p,"mi_realloc");
- if (newsize <= size && newsize >= (size / 2)) {
+void* _mi_heap_realloc_zero(mi_heap_t* heap, void* p, size_t newsize, bool zero) mi_attr_noexcept {
+ // if p == NULL then behave as malloc.
+ // else if size == 0 then reallocate to a zero-sized block (and don't return NULL, just as mi_malloc(0)).
+ // (this means that returning NULL always indicates an error, and `p` will not have been freed in that case.)
+ const size_t size = _mi_usable_size(p,"mi_realloc"); // also works if p == NULL (with size 0)
+ if mi_unlikely(newsize <= size && newsize >= (size / 2) && newsize > 0) { // note: newsize must be > 0 or otherwise we return NULL for realloc(NULL,0)
+ mi_assert_internal(p!=NULL);
+ // todo: do not track as the usable size is still the same in the free; adjust potential padding?
+ // mi_track_resize(p,size,newsize)
+ // if (newsize < size) { mi_track_mem_noaccess((uint8_t*)p + newsize, size - newsize); }
return p; // reallocation still fits and not more than 50% waste
}
void* newp = mi_heap_malloc(heap,newsize);
- if (mi_likely(newp != NULL)) {
+ if mi_likely(newp != NULL) {
if (zero && newsize > size) {
// also set last word in the previous allocation to zero to ensure any padding is zero-initialized
- size_t start = (size >= sizeof(intptr_t) ? size - sizeof(intptr_t) : 0);
- memset((uint8_t*)newp + start, 0, newsize - start);
+ const size_t start = (size >= sizeof(intptr_t) ? size - sizeof(intptr_t) : 0);
+ _mi_memzero((uint8_t*)newp + start, newsize - start);
+ }
+ else if (newsize == 0) {
+ ((uint8_t*)newp)[0] = 0; // work around for applications that expect zero-reallocation to be zero initialized (issue #725)
+ }
+ if mi_likely(p != NULL) {
+ const size_t copysize = (newsize > size ? size : newsize);
+ mi_track_mem_defined(p,copysize); // _mi_useable_size may be too large for byte precise memory tracking..
+ _mi_memcpy(newp, p, copysize);
+ mi_free(p); // only free the original pointer if successful
}
- memcpy(newp, p, (newsize > size ? size : newsize));
- mi_free(p); // only free if successful
}
return newp;
}
-void* mi_heap_realloc(mi_heap_t* heap, void* p, size_t newsize) mi_attr_noexcept {
+mi_decl_nodiscard void* mi_heap_realloc(mi_heap_t* heap, void* p, size_t newsize) mi_attr_noexcept {
return _mi_heap_realloc_zero(heap, p, newsize, false);
}
-void* mi_heap_reallocn(mi_heap_t* heap, void* p, size_t count, size_t size) mi_attr_noexcept {
+mi_decl_nodiscard void* mi_heap_reallocn(mi_heap_t* heap, void* p, size_t count, size_t size) mi_attr_noexcept {
size_t total;
if (mi_count_size_overflow(count, size, &total)) return NULL;
return mi_heap_realloc(heap, p, total);
@@ -602,42 +747,42 @@ void* mi_heap_reallocn(mi_heap_t* heap, void* p, size_t count, size_t size) mi_a
// Reallocate but free `p` on errors
-void* mi_heap_reallocf(mi_heap_t* heap, void* p, size_t newsize) mi_attr_noexcept {
+mi_decl_nodiscard void* mi_heap_reallocf(mi_heap_t* heap, void* p, size_t newsize) mi_attr_noexcept {
void* newp = mi_heap_realloc(heap, p, newsize);
if (newp==NULL && p!=NULL) mi_free(p);
return newp;
}
-void* mi_heap_rezalloc(mi_heap_t* heap, void* p, size_t newsize) mi_attr_noexcept {
+mi_decl_nodiscard void* mi_heap_rezalloc(mi_heap_t* heap, void* p, size_t newsize) mi_attr_noexcept {
return _mi_heap_realloc_zero(heap, p, newsize, true);
}
-void* mi_heap_recalloc(mi_heap_t* heap, void* p, size_t count, size_t size) mi_attr_noexcept {
+mi_decl_nodiscard void* mi_heap_recalloc(mi_heap_t* heap, void* p, size_t count, size_t size) mi_attr_noexcept {
size_t total;
if (mi_count_size_overflow(count, size, &total)) return NULL;
return mi_heap_rezalloc(heap, p, total);
}
-void* mi_realloc(void* p, size_t newsize) mi_attr_noexcept {
- return mi_heap_realloc(mi_get_default_heap(),p,newsize);
+mi_decl_nodiscard void* mi_realloc(void* p, size_t newsize) mi_attr_noexcept {
+ return mi_heap_realloc(mi_prim_get_default_heap(),p,newsize);
}
-void* mi_reallocn(void* p, size_t count, size_t size) mi_attr_noexcept {
- return mi_heap_reallocn(mi_get_default_heap(),p,count,size);
+mi_decl_nodiscard void* mi_reallocn(void* p, size_t count, size_t size) mi_attr_noexcept {
+ return mi_heap_reallocn(mi_prim_get_default_heap(),p,count,size);
}
// Reallocate but free `p` on errors
-void* mi_reallocf(void* p, size_t newsize) mi_attr_noexcept {
- return mi_heap_reallocf(mi_get_default_heap(),p,newsize);
+mi_decl_nodiscard void* mi_reallocf(void* p, size_t newsize) mi_attr_noexcept {
+ return mi_heap_reallocf(mi_prim_get_default_heap(),p,newsize);
}
-void* mi_rezalloc(void* p, size_t newsize) mi_attr_noexcept {
- return mi_heap_rezalloc(mi_get_default_heap(), p, newsize);
+mi_decl_nodiscard void* mi_rezalloc(void* p, size_t newsize) mi_attr_noexcept {
+ return mi_heap_rezalloc(mi_prim_get_default_heap(), p, newsize);
}
-void* mi_recalloc(void* p, size_t count, size_t size) mi_attr_noexcept {
- return mi_heap_recalloc(mi_get_default_heap(), p, count, size);
+mi_decl_nodiscard void* mi_recalloc(void* p, size_t count, size_t size) mi_attr_noexcept {
+ return mi_heap_recalloc(mi_prim_get_default_heap(), p, count, size);
}
@@ -647,33 +792,35 @@ void* mi_recalloc(void* p, size_t count, size_t size) mi_attr_noexcept {
// ------------------------------------------------------
// `strdup` using mi_malloc
-mi_decl_restrict char* mi_heap_strdup(mi_heap_t* heap, const char* s) mi_attr_noexcept {
+mi_decl_nodiscard mi_decl_restrict char* mi_heap_strdup(mi_heap_t* heap, const char* s) mi_attr_noexcept {
if (s == NULL) return NULL;
size_t n = strlen(s);
char* t = (char*)mi_heap_malloc(heap,n+1);
- if (t != NULL) memcpy(t, s, n + 1);
+ if (t == NULL) return NULL;
+ _mi_memcpy(t, s, n);
+ t[n] = 0;
return t;
}
-mi_decl_restrict char* mi_strdup(const char* s) mi_attr_noexcept {
- return mi_heap_strdup(mi_get_default_heap(), s);
+mi_decl_nodiscard mi_decl_restrict char* mi_strdup(const char* s) mi_attr_noexcept {
+ return mi_heap_strdup(mi_prim_get_default_heap(), s);
}
// `strndup` using mi_malloc
-mi_decl_restrict char* mi_heap_strndup(mi_heap_t* heap, const char* s, size_t n) mi_attr_noexcept {
+mi_decl_nodiscard mi_decl_restrict char* mi_heap_strndup(mi_heap_t* heap, const char* s, size_t n) mi_attr_noexcept {
if (s == NULL) return NULL;
const char* end = (const char*)memchr(s, 0, n); // find end of string in the first `n` characters (returns NULL if not found)
const size_t m = (end != NULL ? (size_t)(end - s) : n); // `m` is the minimum of `n` or the end-of-string
mi_assert_internal(m <= n);
char* t = (char*)mi_heap_malloc(heap, m+1);
if (t == NULL) return NULL;
- memcpy(t, s, m);
+ _mi_memcpy(t, s, m);
t[m] = 0;
return t;
}
-mi_decl_restrict char* mi_strndup(const char* s, size_t n) mi_attr_noexcept {
- return mi_heap_strndup(mi_get_default_heap(),s,n);
+mi_decl_nodiscard mi_decl_restrict char* mi_strndup(const char* s, size_t n) mi_attr_noexcept {
+ return mi_heap_strndup(mi_prim_get_default_heap(),s,n);
}
#ifndef __wasi__
@@ -682,8 +829,8 @@ mi_decl_restrict char* mi_strndup(const char* s, size_t n) mi_attr_noexcept {
#ifndef PATH_MAX
#define PATH_MAX MAX_PATH
#endif
-#include
-mi_decl_restrict char* mi_heap_realpath(mi_heap_t* heap, const char* fname, char* resolved_name) mi_attr_noexcept {
+#include
+mi_decl_nodiscard mi_decl_restrict char* mi_heap_realpath(mi_heap_t* heap, const char* fname, char* resolved_name) mi_attr_noexcept {
// todo: use GetFullPathNameW to allow longer file names
char buf[PATH_MAX];
DWORD res = GetFullPathNameA(fname, PATH_MAX, (resolved_name == NULL ? buf : resolved_name), NULL);
@@ -701,8 +848,9 @@ mi_decl_restrict char* mi_heap_realpath(mi_heap_t* heap, const char* fname, char
}
}
#else
+/*
#include // pathconf
-static size_t mi_path_max() {
+static size_t mi_path_max(void) {
static size_t path_max = 0;
if (path_max <= 0) {
long m = pathconf("/",_PC_PATH_MAX);
@@ -712,25 +860,36 @@ static size_t mi_path_max() {
}
return path_max;
}
-
+*/
char* mi_heap_realpath(mi_heap_t* heap, const char* fname, char* resolved_name) mi_attr_noexcept {
if (resolved_name != NULL) {
return realpath(fname,resolved_name);
}
else {
- size_t n = mi_path_max();
+ char* rname = realpath(fname, NULL);
+ if (rname == NULL) return NULL;
+ char* result = mi_heap_strdup(heap, rname);
+ free(rname); // use regular free! (which may be redirected to our free but that's ok)
+ return result;
+ }
+ /*
+ const size_t n = mi_path_max();
char* buf = (char*)mi_malloc(n+1);
- if (buf==NULL) return NULL;
+ if (buf == NULL) {
+ errno = ENOMEM;
+ return NULL;
+ }
char* rname = realpath(fname,buf);
char* result = mi_heap_strndup(heap,rname,n); // ok if `rname==NULL`
mi_free(buf);
return result;
}
+ */
}
#endif
-mi_decl_restrict char* mi_realpath(const char* fname, char* resolved_name) mi_attr_noexcept {
- return mi_heap_realpath(mi_get_default_heap(),fname,resolved_name);
+mi_decl_nodiscard mi_decl_restrict char* mi_realpath(const char* fname, char* resolved_name) mi_attr_noexcept {
+ return mi_heap_realpath(mi_prim_get_default_heap(),fname,resolved_name);
}
#endif
@@ -746,9 +905,17 @@ but we call `exit` instead (i.e. not returning).
#ifdef __cplusplus
#include
static bool mi_try_new_handler(bool nothrow) {
- std::new_handler h = std::get_new_handler();
+ #if defined(_MSC_VER) || (__cplusplus >= 201103L)
+ std::new_handler h = std::get_new_handler();
+ #else
+ std::new_handler h = std::set_new_handler();
+ std::set_new_handler(h);
+ #endif
if (h==NULL) {
- if (!nothrow) throw std::bad_alloc();
+ _mi_error_message(ENOMEM, "out of memory in 'new'");
+ if (!nothrow) {
+ throw std::bad_alloc();
+ }
return false;
}
else {
@@ -757,13 +924,13 @@ static bool mi_try_new_handler(bool nothrow) {
}
}
#else
-typedef void (*std_new_handler_t)();
+typedef void (*std_new_handler_t)(void);
-#if (defined(__GNUC__) || defined(__clang__))
-std_new_handler_t __attribute((weak)) _ZSt15get_new_handlerv() {
+#if (defined(__GNUC__) || (defined(__clang__) && !defined(_MSC_VER))) // exclude clang-cl, see issue #631
+std_new_handler_t __attribute__((weak)) _ZSt15get_new_handlerv(void) {
return NULL;
}
-static std_new_handler_t mi_get_new_handler() {
+static std_new_handler_t mi_get_new_handler(void) {
return _ZSt15get_new_handlerv();
}
#else
@@ -776,7 +943,10 @@ static std_new_handler_t mi_get_new_handler() {
static bool mi_try_new_handler(bool nothrow) {
std_new_handler_t h = mi_get_new_handler();
if (h==NULL) {
- if (!nothrow) exit(ENOMEM); // cannot throw in plain C, use exit as we are out of memory anyway.
+ _mi_error_message(ENOMEM, "out of memory in 'new'");
+ if (!nothrow) {
+ abort(); // cannot throw in plain C, use abort
+ }
return false;
}
else {
@@ -786,27 +956,53 @@ static bool mi_try_new_handler(bool nothrow) {
}
#endif
-static mi_decl_noinline void* mi_try_new(size_t size, bool nothrow ) {
+mi_decl_export mi_decl_noinline void* mi_heap_try_new(mi_heap_t* heap, size_t size, bool nothrow ) {
void* p = NULL;
while(p == NULL && mi_try_new_handler(nothrow)) {
- p = mi_malloc(size);
+ p = mi_heap_malloc(heap,size);
}
return p;
}
-mi_decl_restrict void* mi_new(size_t size) {
- void* p = mi_malloc(size);
- if (mi_unlikely(p == NULL)) return mi_try_new(size,false);
+static mi_decl_noinline void* mi_try_new(size_t size, bool nothrow) {
+ return mi_heap_try_new(mi_prim_get_default_heap(), size, nothrow);
+}
+
+
+mi_decl_nodiscard mi_decl_restrict void* mi_heap_alloc_new(mi_heap_t* heap, size_t size) {
+ void* p = mi_heap_malloc(heap,size);
+ if mi_unlikely(p == NULL) return mi_heap_try_new(heap, size, false);
return p;
}
-mi_decl_restrict void* mi_new_nothrow(size_t size) mi_attr_noexcept {
+mi_decl_nodiscard mi_decl_restrict void* mi_new(size_t size) {
+ return mi_heap_alloc_new(mi_prim_get_default_heap(), size);
+}
+
+
+mi_decl_nodiscard mi_decl_restrict void* mi_heap_alloc_new_n(mi_heap_t* heap, size_t count, size_t size) {
+ size_t total;
+ if mi_unlikely(mi_count_size_overflow(count, size, &total)) {
+ mi_try_new_handler(false); // on overflow we invoke the try_new_handler once to potentially throw std::bad_alloc
+ return NULL;
+ }
+ else {
+ return mi_heap_alloc_new(heap,total);
+ }
+}
+
+mi_decl_nodiscard mi_decl_restrict void* mi_new_n(size_t count, size_t size) {
+ return mi_heap_alloc_new_n(mi_prim_get_default_heap(), size, count);
+}
+
+
+mi_decl_nodiscard mi_decl_restrict void* mi_new_nothrow(size_t size) mi_attr_noexcept {
void* p = mi_malloc(size);
- if (mi_unlikely(p == NULL)) return mi_try_new(size, true);
+ if mi_unlikely(p == NULL) return mi_try_new(size, true);
return p;
}
-mi_decl_restrict void* mi_new_aligned(size_t size, size_t alignment) {
+mi_decl_nodiscard mi_decl_restrict void* mi_new_aligned(size_t size, size_t alignment) {
void* p;
do {
p = mi_malloc_aligned(size, alignment);
@@ -815,7 +1011,7 @@ mi_decl_restrict void* mi_new_aligned(size_t size, size_t alignment) {
return p;
}
-mi_decl_restrict void* mi_new_aligned_nothrow(size_t size, size_t alignment) mi_attr_noexcept {
+mi_decl_nodiscard mi_decl_restrict void* mi_new_aligned_nothrow(size_t size, size_t alignment) mi_attr_noexcept {
void* p;
do {
p = mi_malloc_aligned(size, alignment);
@@ -824,18 +1020,7 @@ mi_decl_restrict void* mi_new_aligned_nothrow(size_t size, size_t alignment) mi_
return p;
}
-mi_decl_restrict void* mi_new_n(size_t count, size_t size) {
- size_t total;
- if (mi_unlikely(mi_count_size_overflow(count, size, &total))) {
- mi_try_new_handler(false); // on overflow we invoke the try_new_handler once to potentially throw std::bad_alloc
- return NULL;
- }
- else {
- return mi_new(total);
- }
-}
-
-void* mi_new_realloc(void* p, size_t newsize) {
+mi_decl_nodiscard void* mi_new_realloc(void* p, size_t newsize) {
void* q;
do {
q = mi_realloc(p, newsize);
@@ -843,9 +1028,9 @@ void* mi_new_realloc(void* p, size_t newsize) {
return q;
}
-void* mi_new_reallocn(void* p, size_t newcount, size_t size) {
+mi_decl_nodiscard void* mi_new_reallocn(void* p, size_t newcount, size_t size) {
size_t total;
- if (mi_unlikely(mi_count_size_overflow(newcount, size, &total))) {
+ if mi_unlikely(mi_count_size_overflow(newcount, size, &total)) {
mi_try_new_handler(false); // on overflow we invoke the try_new_handler once to potentially throw std::bad_alloc
return NULL;
}
@@ -853,3 +1038,23 @@ void* mi_new_reallocn(void* p, size_t newcount, size_t size) {
return mi_new_realloc(p, total);
}
}
+
+// ------------------------------------------------------
+// ensure explicit external inline definitions are emitted!
+// ------------------------------------------------------
+
+#ifdef __cplusplus
+void* _mi_externs[] = {
+ (void*)&_mi_page_malloc,
+ (void*)&_mi_heap_malloc_zero,
+ (void*)&_mi_heap_malloc_zero_ex,
+ (void*)&mi_malloc,
+ (void*)&mi_malloc_small,
+ (void*)&mi_zalloc_small,
+ (void*)&mi_heap_malloc,
+ (void*)&mi_heap_zalloc,
+ (void*)&mi_heap_malloc_small,
+ // (void*)&mi_heap_alloc_new,
+ // (void*)&mi_heap_alloc_new_n
+};
+#endif
diff --git a/src/arena.c b/src/arena.c
index 73a7e70..a04a04c 100644
--- a/src/arena.c
+++ b/src/arena.c
@@ -1,5 +1,5 @@
/* ----------------------------------------------------------------------------
-Copyright (c) 2019, Microsoft Research, Daan Leijen
+Copyright (c) 2019-2023, Microsoft Research, Daan Leijen
This is free software; you can redistribute it and/or modify it under the
terms of the MIT license. A copy of the license can be found in the file
"LICENSE" at the root of this distribution.
@@ -7,113 +7,207 @@ terms of the MIT license. A copy of the license can be found in the file
/* ----------------------------------------------------------------------------
"Arenas" are fixed area's of OS memory from which we can allocate
-large blocks (>= MI_ARENA_BLOCK_SIZE, 32MiB).
+large blocks (>= MI_ARENA_MIN_BLOCK_SIZE, 4MiB).
In contrast to the rest of mimalloc, the arenas are shared between
threads and need to be accessed using atomic operations.
-Currently arenas are only used to for huge OS page (1GiB) reservations,
-otherwise it delegates to direct allocation from the OS.
-In the future, we can expose an API to manually add more kinds of arenas
-which is sometimes needed for embedded devices or shared memory for example.
-(We can also employ this with WASI or `sbrk` systems to reserve large arenas
- on demand and be able to reuse them efficiently).
+Arenas are used to for huge OS page (1GiB) reservations or for reserving
+OS memory upfront which can be improve performance or is sometimes needed
+on embedded devices. We can also employ this with WASI or `sbrk` systems
+to reserve large arenas upfront and be able to reuse the memory more effectively.
-The arena allocation needs to be thread safe and we use an atomic
-bitmap to allocate. The current implementation of the bitmap can
-only do this within a field (`uintptr_t`) so we can allocate at most
-blocks of 2GiB (64*32MiB) and no object can cross the boundary. This
-can lead to fragmentation but fortunately most objects will be regions
-of 256MiB in practice.
+The arena allocation needs to be thread safe and we use an atomic bitmap to allocate.
-----------------------------------------------------------------------------*/
#include "mimalloc.h"
-#include "mimalloc-internal.h"
-#include "mimalloc-atomic.h"
+#include "mimalloc/internal.h"
+#include "mimalloc/atomic.h"
#include // memset
+#include // ENOMEM
-#include "bitmap.inc.c" // atomic bitmap
-
-
-// os.c
-void* _mi_os_alloc_aligned(size_t size, size_t alignment, bool commit, bool* large, mi_os_tld_t* tld);
-void _mi_os_free_ex(void* p, size_t size, bool was_committed, mi_stats_t* stats);
-void _mi_os_free(void* p, size_t size, mi_stats_t* stats);
-
-void* _mi_os_alloc_huge_os_pages(size_t pages, int numa_node, mi_msecs_t max_secs, size_t* pages_reserved, size_t* psize);
-void _mi_os_free_huge_pages(void* p, size_t size, mi_stats_t* stats);
-
-bool _mi_os_commit(void* p, size_t size, bool* is_zero, mi_stats_t* stats);
+#include "bitmap.h" // atomic bitmap
/* -----------------------------------------------------------
Arena allocation
----------------------------------------------------------- */
-#define MI_SEGMENT_ALIGN MI_SEGMENT_SIZE
-#define MI_ARENA_BLOCK_SIZE (8*MI_SEGMENT_ALIGN) // 32MiB
-#define MI_ARENA_MAX_OBJ_SIZE (MI_BITMAP_FIELD_BITS * MI_ARENA_BLOCK_SIZE) // 2GiB
-#define MI_ARENA_MIN_OBJ_SIZE (MI_ARENA_BLOCK_SIZE/2) // 16MiB
-#define MI_MAX_ARENAS (64) // not more than 256 (since we use 8 bits in the memid)
+// Block info: bit 0 contains the `in_use` bit, the upper bits the
+// size in count of arena blocks.
+typedef uintptr_t mi_block_info_t;
+#define MI_ARENA_BLOCK_SIZE (MI_SEGMENT_SIZE) // 64MiB (must be at least MI_SEGMENT_ALIGN)
+#define MI_ARENA_MIN_OBJ_SIZE (MI_ARENA_BLOCK_SIZE/2) // 32MiB
+#define MI_MAX_ARENAS (112) // not more than 126 (since we use 7 bits in the memid and an arena index + 1)
// A memory arena descriptor
typedef struct mi_arena_s {
+ mi_arena_id_t id; // arena id; 0 for non-specific
+ mi_memid_t memid; // memid of the memory area
_Atomic(uint8_t*) start; // the start of the memory area
size_t block_count; // size of the area in arena blocks (of `MI_ARENA_BLOCK_SIZE`)
size_t field_count; // number of bitmap fields (where `field_count * MI_BITMAP_FIELD_BITS >= block_count`)
+ size_t meta_size; // size of the arena structure itself (including its bitmaps)
+ mi_memid_t meta_memid; // memid of the arena structure itself (OS or static allocation)
int numa_node; // associated NUMA node
- bool is_zero_init; // is the arena zero initialized?
- bool is_committed; // is the memory committed
- bool is_large; // large OS page allocated
- _Atomic(uintptr_t) search_idx; // optimization to start the search for free blocks
+ bool exclusive; // only allow allocations if specifically for this arena
+ bool is_large; // memory area consists of large- or huge OS pages (always committed)
+ _Atomic(size_t) search_idx; // optimization to start the search for free blocks
+ _Atomic(mi_msecs_t) purge_expire; // expiration time when blocks should be decommitted from `blocks_decommit`.
mi_bitmap_field_t* blocks_dirty; // are the blocks potentially non-zero?
- mi_bitmap_field_t* blocks_committed; // if `!is_committed`, are the blocks committed?
- mi_bitmap_field_t blocks_inuse[1]; // in-place bitmap of in-use blocks (of size `field_count`)
+ mi_bitmap_field_t* blocks_committed; // are the blocks committed? (can be NULL for memory that cannot be decommitted)
+ mi_bitmap_field_t* blocks_purge; // blocks that can be (reset) decommitted. (can be NULL for memory that cannot be (reset) decommitted)
+ mi_bitmap_field_t blocks_inuse[1]; // in-place bitmap of in-use blocks (of size `field_count`)
} mi_arena_t;
// The available arenas
static mi_decl_cache_align _Atomic(mi_arena_t*) mi_arenas[MI_MAX_ARENAS];
-static mi_decl_cache_align _Atomic(uintptr_t) mi_arena_count; // = 0
+static mi_decl_cache_align _Atomic(size_t) mi_arena_count; // = 0
+//static bool mi_manage_os_memory_ex2(void* start, size_t size, bool is_large, int numa_node, bool exclusive, mi_memid_t memid, mi_arena_id_t* arena_id) mi_attr_noexcept;
+
/* -----------------------------------------------------------
- Arena allocations get a memory id where the lower 8 bits are
- the arena index +1, and the upper bits the block index.
+ Arena id's
+ id = arena_index + 1
----------------------------------------------------------- */
-// Use `0` as a special id for direct OS allocated memory.
-#define MI_MEMID_OS 0
-
-static size_t mi_arena_id_create(size_t arena_index, mi_bitmap_index_t bitmap_index) {
- mi_assert_internal(arena_index < 0xFE);
- mi_assert_internal(((bitmap_index << 8) >> 8) == bitmap_index); // no overflow?
- return ((bitmap_index << 8) | ((arena_index+1) & 0xFF));
+static size_t mi_arena_id_index(mi_arena_id_t id) {
+ return (size_t)(id <= 0 ? MI_MAX_ARENAS : id - 1);
}
-static void mi_arena_id_indices(size_t memid, size_t* arena_index, mi_bitmap_index_t* bitmap_index) {
- mi_assert_internal(memid != MI_MEMID_OS);
- *arena_index = (memid & 0xFF) - 1;
- *bitmap_index = (memid >> 8);
+static mi_arena_id_t mi_arena_id_create(size_t arena_index) {
+ mi_assert_internal(arena_index < MI_MAX_ARENAS);
+ return (int)arena_index + 1;
}
+mi_arena_id_t _mi_arena_id_none(void) {
+ return 0;
+}
+
+static bool mi_arena_id_is_suitable(mi_arena_id_t arena_id, bool arena_is_exclusive, mi_arena_id_t req_arena_id) {
+ return ((!arena_is_exclusive && req_arena_id == _mi_arena_id_none()) ||
+ (arena_id == req_arena_id));
+}
+
+bool _mi_arena_memid_is_suitable(mi_memid_t memid, mi_arena_id_t request_arena_id) {
+ if (memid.memkind == MI_MEM_ARENA) {
+ return mi_arena_id_is_suitable(memid.mem.arena.id, memid.mem.arena.is_exclusive, request_arena_id);
+ }
+ else {
+ return mi_arena_id_is_suitable(0, false, request_arena_id);
+ }
+}
+
+bool _mi_arena_memid_is_os_allocated(mi_memid_t memid) {
+ return (memid.memkind == MI_MEM_OS);
+}
+
+/* -----------------------------------------------------------
+ Arena allocations get a (currently) 16-bit memory id where the
+ lower 8 bits are the arena id, and the upper bits the block index.
+----------------------------------------------------------- */
+
static size_t mi_block_count_of_size(size_t size) {
return _mi_divide_up(size, MI_ARENA_BLOCK_SIZE);
}
+static size_t mi_arena_block_size(size_t bcount) {
+ return (bcount * MI_ARENA_BLOCK_SIZE);
+}
+
+static size_t mi_arena_size(mi_arena_t* arena) {
+ return mi_arena_block_size(arena->block_count);
+}
+
+static mi_memid_t mi_memid_create_arena(mi_arena_id_t id, bool is_exclusive, mi_bitmap_index_t bitmap_index) {
+ mi_memid_t memid = _mi_memid_create(MI_MEM_ARENA);
+ memid.mem.arena.id = id;
+ memid.mem.arena.block_index = bitmap_index;
+ memid.mem.arena.is_exclusive = is_exclusive;
+ return memid;
+}
+
+static bool mi_arena_memid_indices(mi_memid_t memid, size_t* arena_index, mi_bitmap_index_t* bitmap_index) {
+ mi_assert_internal(memid.memkind == MI_MEM_ARENA);
+ *arena_index = mi_arena_id_index(memid.mem.arena.id);
+ *bitmap_index = memid.mem.arena.block_index;
+ return memid.mem.arena.is_exclusive;
+}
+
+
+
+/* -----------------------------------------------------------
+ Special static area for mimalloc internal structures
+ to avoid OS calls (for example, for the arena metadata)
+----------------------------------------------------------- */
+
+#define MI_ARENA_STATIC_MAX (MI_INTPTR_SIZE*MI_KiB) // 8 KiB on 64-bit
+
+static uint8_t mi_arena_static[MI_ARENA_STATIC_MAX];
+static _Atomic(size_t) mi_arena_static_top;
+
+static void* mi_arena_static_zalloc(size_t size, size_t alignment, mi_memid_t* memid) {
+ *memid = _mi_memid_none();
+ if (size == 0 || size > MI_ARENA_STATIC_MAX) return NULL;
+ if ((mi_atomic_load_relaxed(&mi_arena_static_top) + size) > MI_ARENA_STATIC_MAX) return NULL;
+
+ // try to claim space
+ if (alignment == 0) { alignment = 1; }
+ const size_t oversize = size + alignment - 1;
+ if (oversize > MI_ARENA_STATIC_MAX) return NULL;
+ const size_t oldtop = mi_atomic_add_acq_rel(&mi_arena_static_top, oversize);
+ size_t top = oldtop + oversize;
+ if (top > MI_ARENA_STATIC_MAX) {
+ // try to roll back, ok if this fails
+ mi_atomic_cas_strong_acq_rel(&mi_arena_static_top, &top, oldtop);
+ return NULL;
+ }
+
+ // success
+ *memid = _mi_memid_create(MI_MEM_STATIC);
+ const size_t start = _mi_align_up(oldtop, alignment);
+ uint8_t* const p = &mi_arena_static[start];
+ _mi_memzero(p, size);
+ return p;
+}
+
+static void* mi_arena_meta_zalloc(size_t size, mi_memid_t* memid, mi_stats_t* stats) {
+ *memid = _mi_memid_none();
+
+ // try static
+ void* p = mi_arena_static_zalloc(size, MI_ALIGNMENT_MAX, memid);
+ if (p != NULL) return p;
+
+ // or fall back to the OS
+ return _mi_os_alloc(size, memid, stats);
+}
+
+static void mi_arena_meta_free(void* p, mi_memid_t memid, size_t size, mi_stats_t* stats) {
+ if (mi_memkind_is_os(memid.memkind)) {
+ _mi_os_free(p, size, memid, stats);
+ }
+ else {
+ mi_assert(memid.memkind == MI_MEM_STATIC);
+ }
+}
+
+static void* mi_arena_block_start(mi_arena_t* arena, mi_bitmap_index_t bindex) {
+ return (arena->start + mi_arena_block_size(mi_bitmap_index_bit(bindex)));
+}
+
+
/* -----------------------------------------------------------
Thread safe allocation in an arena
----------------------------------------------------------- */
-static bool mi_arena_alloc(mi_arena_t* arena, size_t blocks, mi_bitmap_index_t* bitmap_idx)
+
+// claim the `blocks_inuse` bits
+static bool mi_arena_try_claim(mi_arena_t* arena, size_t blocks, mi_bitmap_index_t* bitmap_idx)
{
- const size_t fcount = arena->field_count;
- size_t idx = mi_atomic_load_acquire(&arena->search_idx); // start from last search
- for (size_t visited = 0; visited < fcount; visited++, idx++) {
- if (idx >= fcount) idx = 0; // wrap around
- // try to atomically claim a range of bits
- if (mi_bitmap_try_find_claim_field(arena->blocks_inuse, idx, blocks, bitmap_idx)) {
- mi_atomic_store_release(&arena->search_idx, idx); // start search from here next time
- return true;
- }
- }
+ size_t idx = 0; // mi_atomic_load_relaxed(&arena->search_idx); // start from last search; ok to be relaxed as the exact start does not matter
+ if (_mi_bitmap_try_find_from_claim_across(arena->blocks_inuse, arena->field_count, idx, blocks, bitmap_idx)) {
+ mi_atomic_store_relaxed(&arena->search_idx, mi_bitmap_index_field(*bitmap_idx)); // start search from found location next time around
+ return true;
+ };
return false;
}
@@ -122,114 +216,412 @@ static bool mi_arena_alloc(mi_arena_t* arena, size_t blocks, mi_bitmap_index_t*
Arena Allocation
----------------------------------------------------------- */
-static void* mi_arena_alloc_from(mi_arena_t* arena, size_t arena_index, size_t needed_bcount,
- bool* commit, bool* large, bool* is_zero, size_t* memid, mi_os_tld_t* tld)
+static mi_decl_noinline void* mi_arena_try_alloc_at(mi_arena_t* arena, size_t arena_index, size_t needed_bcount,
+ bool commit, mi_memid_t* memid, mi_os_tld_t* tld)
{
- mi_bitmap_index_t bitmap_index;
- if (!mi_arena_alloc(arena, needed_bcount, &bitmap_index)) return NULL;
+ MI_UNUSED(arena_index);
+ mi_assert_internal(mi_arena_id_index(arena->id) == arena_index);
- // claimed it! set the dirty bits (todo: no need for an atomic op here?)
- void* p = arena->start + (mi_bitmap_index_bit(bitmap_index)*MI_ARENA_BLOCK_SIZE);
- *memid = mi_arena_id_create(arena_index, bitmap_index);
- *is_zero = mi_bitmap_claim(arena->blocks_dirty, arena->field_count, needed_bcount, bitmap_index, NULL);
- *large = arena->is_large;
- if (arena->is_committed) {
- // always committed
- *commit = true;
+ mi_bitmap_index_t bitmap_index;
+ if (!mi_arena_try_claim(arena, needed_bcount, &bitmap_index)) return NULL;
+
+ // claimed it!
+ void* p = mi_arena_block_start(arena, bitmap_index);
+ *memid = mi_memid_create_arena(arena->id, arena->exclusive, bitmap_index);
+ memid->is_pinned = arena->memid.is_pinned;
+
+ // none of the claimed blocks should be scheduled for a decommit
+ if (arena->blocks_purge != NULL) {
+ // this is thread safe as a potential purge only decommits parts that are not yet claimed as used (in `blocks_inuse`).
+ _mi_bitmap_unclaim_across(arena->blocks_purge, arena->field_count, needed_bcount, bitmap_index);
}
- else if (*commit) {
- // arena not committed as a whole, but commit requested: ensure commit now
+
+ // set the dirty bits (todo: no need for an atomic op here?)
+ if (arena->memid.initially_zero && arena->blocks_dirty != NULL) {
+ memid->initially_zero = _mi_bitmap_claim_across(arena->blocks_dirty, arena->field_count, needed_bcount, bitmap_index, NULL);
+ }
+
+ // set commit state
+ if (arena->blocks_committed == NULL) {
+ // always committed
+ memid->initially_committed = true;
+ }
+ else if (commit) {
+ // commit requested, but the range may not be committed as a whole: ensure it is committed now
+ memid->initially_committed = true;
bool any_uncommitted;
- mi_bitmap_claim(arena->blocks_committed, arena->field_count, needed_bcount, bitmap_index, &any_uncommitted);
+ _mi_bitmap_claim_across(arena->blocks_committed, arena->field_count, needed_bcount, bitmap_index, &any_uncommitted);
if (any_uncommitted) {
- bool commit_zero;
- _mi_os_commit(p, needed_bcount * MI_ARENA_BLOCK_SIZE, &commit_zero, tld->stats);
- if (commit_zero) *is_zero = true;
+ bool commit_zero = false;
+ if (!_mi_os_commit(p, mi_arena_block_size(needed_bcount), &commit_zero, tld->stats)) {
+ memid->initially_committed = false;
+ }
+ else {
+ if (commit_zero) { memid->initially_zero = true; }
+ }
}
}
else {
// no need to commit, but check if already fully committed
- *commit = mi_bitmap_is_claimed(arena->blocks_committed, arena->field_count, needed_bcount, bitmap_index);
+ memid->initially_committed = _mi_bitmap_is_claimed_across(arena->blocks_committed, arena->field_count, needed_bcount, bitmap_index);
}
+
return p;
}
-void* _mi_arena_alloc_aligned(size_t size, size_t alignment,
- bool* commit, bool* large, bool* is_zero,
- size_t* memid, mi_os_tld_t* tld)
+// allocate in a speficic arena
+static void* mi_arena_try_alloc_at_id(mi_arena_id_t arena_id, bool match_numa_node, int numa_node, size_t size, size_t alignment,
+ bool commit, bool allow_large, mi_arena_id_t req_arena_id, mi_memid_t* memid, mi_os_tld_t* tld )
{
- mi_assert_internal(commit != NULL && large != NULL && is_zero != NULL && memid != NULL && tld != NULL);
- mi_assert_internal(size > 0);
- *memid = MI_MEMID_OS;
- *is_zero = false;
+ MI_UNUSED_RELEASE(alignment);
+ mi_assert_internal(alignment <= MI_SEGMENT_ALIGN);
+ const size_t bcount = mi_block_count_of_size(size);
+ const size_t arena_index = mi_arena_id_index(arena_id);
+ mi_assert_internal(arena_index < mi_atomic_load_relaxed(&mi_arena_count));
+ mi_assert_internal(size <= mi_arena_block_size(bcount));
+
+ // Check arena suitability
+ mi_arena_t* arena = mi_atomic_load_ptr_acquire(mi_arena_t, &mi_arenas[arena_index]);
+ if (arena == NULL) return NULL;
+ if (!allow_large && arena->is_large) return NULL;
+ if (!mi_arena_id_is_suitable(arena->id, arena->exclusive, req_arena_id)) return NULL;
+ if (req_arena_id == _mi_arena_id_none()) { // in not specific, check numa affinity
+ const bool numa_suitable = (numa_node < 0 || arena->numa_node < 0 || arena->numa_node == numa_node);
+ if (match_numa_node) { if (!numa_suitable) return NULL; }
+ else { if (numa_suitable) return NULL; }
+ }
- // try to allocate in an arena if the alignment is small enough
- // and the object is not too large or too small.
- if (alignment <= MI_SEGMENT_ALIGN &&
- size <= MI_ARENA_MAX_OBJ_SIZE &&
- size >= MI_ARENA_MIN_OBJ_SIZE)
- {
- const size_t bcount = mi_block_count_of_size(size);
- const int numa_node = _mi_os_numa_node(tld); // current numa node
+ // try to allocate
+ void* p = mi_arena_try_alloc_at(arena, arena_index, bcount, commit, memid, tld);
+ mi_assert_internal(p == NULL || _mi_is_aligned(p, alignment));
+ return p;
+}
- mi_assert_internal(size <= bcount*MI_ARENA_BLOCK_SIZE);
+
+// allocate from an arena with fallback to the OS
+static mi_decl_noinline void* mi_arena_try_alloc(int numa_node, size_t size, size_t alignment,
+ bool commit, bool allow_large,
+ mi_arena_id_t req_arena_id, mi_memid_t* memid, mi_os_tld_t* tld )
+{
+ MI_UNUSED(alignment);
+ mi_assert_internal(alignment <= MI_SEGMENT_ALIGN);
+ const size_t max_arena = mi_atomic_load_relaxed(&mi_arena_count);
+ if mi_likely(max_arena == 0) return NULL;
+
+ if (req_arena_id != _mi_arena_id_none()) {
+ // try a specific arena if requested
+ if (mi_arena_id_index(req_arena_id) < max_arena) {
+ void* p = mi_arena_try_alloc_at_id(req_arena_id, true, numa_node, size, alignment, commit, allow_large, req_arena_id, memid, tld);
+ if (p != NULL) return p;
+ }
+ }
+ else {
// try numa affine allocation
- for (size_t i = 0; i < MI_MAX_ARENAS; i++) {
- mi_arena_t* arena = mi_atomic_load_ptr_relaxed(mi_arena_t, &mi_arenas[i]);
- if (arena==NULL) break; // end reached
- if ((arena->numa_node<0 || arena->numa_node==numa_node) && // numa local?
- (*large || !arena->is_large)) // large OS pages allowed, or arena is not large OS pages
- {
- void* p = mi_arena_alloc_from(arena, i, bcount, commit, large, is_zero, memid, tld);
- mi_assert_internal((uintptr_t)p % alignment == 0);
+ for (size_t i = 0; i < max_arena; i++) {
+ void* p = mi_arena_try_alloc_at_id(mi_arena_id_create(i), true, numa_node, size, alignment, commit, allow_large, req_arena_id, memid, tld);
+ if (p != NULL) return p;
+ }
+
+ // try from another numa node instead..
+ if (numa_node >= 0) { // if numa_node was < 0 (no specific affinity requested), all arena's have been tried already
+ for (size_t i = 0; i < max_arena; i++) {
+ void* p = mi_arena_try_alloc_at_id(mi_arena_id_create(i), false /* only proceed if not numa local */, numa_node, size, alignment, commit, allow_large, req_arena_id, memid, tld);
if (p != NULL) return p;
}
}
- // try from another numa node instead..
- for (size_t i = 0; i < MI_MAX_ARENAS; i++) {
- mi_arena_t* arena = mi_atomic_load_ptr_relaxed(mi_arena_t, &mi_arenas[i]);
- if (arena==NULL) break; // end reached
- if ((arena->numa_node>=0 && arena->numa_node!=numa_node) && // not numa local!
- (*large || !arena->is_large)) // large OS pages allowed, or arena is not large OS pages
- {
- void* p = mi_arena_alloc_from(arena, i, bcount, commit, large, is_zero, memid, tld);
- mi_assert_internal((uintptr_t)p % alignment == 0);
+ }
+ return NULL;
+}
+
+// try to reserve a fresh arena space
+static bool mi_arena_reserve(size_t req_size, bool allow_large, mi_arena_id_t req_arena_id, mi_arena_id_t *arena_id)
+{
+ if (_mi_preloading()) return false; // use OS only while pre loading
+ if (req_arena_id != _mi_arena_id_none()) return false;
+
+ const size_t arena_count = mi_atomic_load_acquire(&mi_arena_count);
+ if (arena_count > (MI_MAX_ARENAS - 4)) return false;
+
+ size_t arena_reserve = mi_option_get_size(mi_option_arena_reserve);
+ if (arena_reserve == 0) return false;
+
+ if (!_mi_os_has_virtual_reserve()) {
+ arena_reserve = arena_reserve/4; // be conservative if virtual reserve is not supported (for some embedded systems for example)
+ }
+ arena_reserve = _mi_align_up(arena_reserve, MI_ARENA_BLOCK_SIZE);
+ if (arena_count >= 8 && arena_count <= 128) {
+ arena_reserve = ((size_t)1<<(arena_count/8)) * arena_reserve; // scale up the arena sizes exponentially
+ }
+ if (arena_reserve < req_size) return false; // should be able to at least handle the current allocation size
+
+ // commit eagerly?
+ bool arena_commit = false;
+ if (mi_option_get(mi_option_arena_eager_commit) == 2) { arena_commit = _mi_os_has_overcommit(); }
+ else if (mi_option_get(mi_option_arena_eager_commit) == 1) { arena_commit = true; }
+
+ return (mi_reserve_os_memory_ex(arena_reserve, arena_commit, allow_large, false /* exclusive */, arena_id) == 0);
+}
+
+
+void* _mi_arena_alloc_aligned(size_t size, size_t alignment, size_t align_offset, bool commit, bool allow_large,
+ mi_arena_id_t req_arena_id, mi_memid_t* memid, mi_os_tld_t* tld)
+{
+ mi_assert_internal(memid != NULL && tld != NULL);
+ mi_assert_internal(size > 0);
+ *memid = _mi_memid_none();
+
+ const int numa_node = _mi_os_numa_node(tld); // current numa node
+
+ // try to allocate in an arena if the alignment is small enough and the object is not too small (as for heap meta data)
+ if (size >= MI_ARENA_MIN_OBJ_SIZE && alignment <= MI_SEGMENT_ALIGN && align_offset == 0) {
+ void* p = mi_arena_try_alloc(numa_node, size, alignment, commit, allow_large, req_arena_id, memid, tld);
+ if (p != NULL) return p;
+
+ // otherwise, try to first eagerly reserve a new arena
+ if (req_arena_id == _mi_arena_id_none()) {
+ mi_arena_id_t arena_id = 0;
+ if (mi_arena_reserve(size, allow_large, req_arena_id, &arena_id)) {
+ // and try allocate in there
+ mi_assert_internal(req_arena_id == _mi_arena_id_none());
+ p = mi_arena_try_alloc_at_id(arena_id, true, numa_node, size, alignment, commit, allow_large, req_arena_id, memid, tld);
if (p != NULL) return p;
}
}
}
+ // if we cannot use OS allocation, return NULL
+ if (mi_option_is_enabled(mi_option_limit_os_alloc) || req_arena_id != _mi_arena_id_none()) {
+ errno = ENOMEM;
+ return NULL;
+ }
+
// finally, fall back to the OS
- *is_zero = true;
- *memid = MI_MEMID_OS;
- return _mi_os_alloc_aligned(size, alignment, *commit, large, tld);
+ if (align_offset > 0) {
+ return _mi_os_alloc_aligned_at_offset(size, alignment, align_offset, commit, allow_large, memid, tld->stats);
+ }
+ else {
+ return _mi_os_alloc_aligned(size, alignment, commit, allow_large, memid, tld->stats);
+ }
}
-void* _mi_arena_alloc(size_t size, bool* commit, bool* large, bool* is_zero, size_t* memid, mi_os_tld_t* tld)
+void* _mi_arena_alloc(size_t size, bool commit, bool allow_large, mi_arena_id_t req_arena_id, mi_memid_t* memid, mi_os_tld_t* tld)
{
- return _mi_arena_alloc_aligned(size, MI_ARENA_BLOCK_SIZE, commit, large, is_zero, memid, tld);
+ return _mi_arena_alloc_aligned(size, MI_ARENA_BLOCK_SIZE, 0, commit, allow_large, req_arena_id, memid, tld);
}
+
+void* mi_arena_area(mi_arena_id_t arena_id, size_t* size) {
+ if (size != NULL) *size = 0;
+ size_t arena_index = mi_arena_id_index(arena_id);
+ if (arena_index >= MI_MAX_ARENAS) return NULL;
+ mi_arena_t* arena = mi_atomic_load_ptr_acquire(mi_arena_t, &mi_arenas[arena_index]);
+ if (arena == NULL) return NULL;
+ if (size != NULL) { *size = mi_arena_block_size(arena->block_count); }
+ return arena->start;
+}
+
+
+/* -----------------------------------------------------------
+ Arena purge
+----------------------------------------------------------- */
+
+static long mi_arena_purge_delay(void) {
+ // <0 = no purging allowed, 0=immediate purging, >0=milli-second delay
+ return (mi_option_get(mi_option_purge_delay) * mi_option_get(mi_option_arena_purge_mult));
+}
+
+// reset or decommit in an arena and update the committed/decommit bitmaps
+// assumes we own the area (i.e. blocks_in_use is claimed by us)
+static void mi_arena_purge(mi_arena_t* arena, size_t bitmap_idx, size_t blocks, mi_stats_t* stats) {
+ mi_assert_internal(arena->blocks_committed != NULL);
+ mi_assert_internal(arena->blocks_purge != NULL);
+ mi_assert_internal(!arena->memid.is_pinned);
+ const size_t size = mi_arena_block_size(blocks);
+ void* const p = mi_arena_block_start(arena, bitmap_idx);
+ bool needs_recommit;
+ if (_mi_bitmap_is_claimed_across(arena->blocks_committed, arena->field_count, blocks, bitmap_idx)) {
+ // all blocks are committed, we can purge freely
+ needs_recommit = _mi_os_purge(p, size, stats);
+ }
+ else {
+ // some blocks are not committed -- this can happen when a partially committed block is freed
+ // in `_mi_arena_free` and it is conservatively marked as uncommitted but still scheduled for a purge
+ // we need to ensure we do not try to reset (as that may be invalid for uncommitted memory),
+ // and also undo the decommit stats (as it was already adjusted)
+ mi_assert_internal(mi_option_is_enabled(mi_option_purge_decommits));
+ needs_recommit = _mi_os_purge_ex(p, size, false /* allow reset? */, stats);
+ _mi_stat_increase(&stats->committed, size);
+ }
+
+ // clear the purged blocks
+ _mi_bitmap_unclaim_across(arena->blocks_purge, arena->field_count, blocks, bitmap_idx);
+ // update committed bitmap
+ if (needs_recommit) {
+ _mi_bitmap_unclaim_across(arena->blocks_committed, arena->field_count, blocks, bitmap_idx);
+ }
+}
+
+// Schedule a purge. This is usually delayed to avoid repeated decommit/commit calls.
+// Note: assumes we (still) own the area as we may purge immediately
+static void mi_arena_schedule_purge(mi_arena_t* arena, size_t bitmap_idx, size_t blocks, mi_stats_t* stats) {
+ mi_assert_internal(arena->blocks_purge != NULL);
+ const long delay = mi_arena_purge_delay();
+ if (delay < 0) return; // is purging allowed at all?
+
+ if (_mi_preloading() || delay == 0) {
+ // decommit directly
+ mi_arena_purge(arena, bitmap_idx, blocks, stats);
+ }
+ else {
+ // schedule decommit
+ mi_msecs_t expire = mi_atomic_loadi64_relaxed(&arena->purge_expire);
+ if (expire != 0) {
+ mi_atomic_addi64_acq_rel(&arena->purge_expire, delay/10); // add smallish extra delay
+ }
+ else {
+ mi_atomic_storei64_release(&arena->purge_expire, _mi_clock_now() + delay);
+ }
+ _mi_bitmap_claim_across(arena->blocks_purge, arena->field_count, blocks, bitmap_idx, NULL);
+ }
+}
+
+// purge a range of blocks
+// return true if the full range was purged.
+// assumes we own the area (i.e. blocks_in_use is claimed by us)
+static bool mi_arena_purge_range(mi_arena_t* arena, size_t idx, size_t startidx, size_t bitlen, size_t purge, mi_stats_t* stats) {
+ const size_t endidx = startidx + bitlen;
+ size_t bitidx = startidx;
+ bool all_purged = false;
+ while (bitidx < endidx) {
+ // count consequetive ones in the purge mask
+ size_t count = 0;
+ while (bitidx + count < endidx && (purge & ((size_t)1 << (bitidx + count))) != 0) {
+ count++;
+ }
+ if (count > 0) {
+ // found range to be purged
+ const mi_bitmap_index_t range_idx = mi_bitmap_index_create(idx, bitidx);
+ mi_arena_purge(arena, range_idx, count, stats);
+ if (count == bitlen) {
+ all_purged = true;
+ }
+ }
+ bitidx += (count+1); // +1 to skip the zero bit (or end)
+ }
+ return all_purged;
+}
+
+// returns true if anything was purged
+static bool mi_arena_try_purge(mi_arena_t* arena, mi_msecs_t now, bool force, mi_stats_t* stats)
+{
+ if (arena->memid.is_pinned || arena->blocks_purge == NULL) return false;
+ mi_msecs_t expire = mi_atomic_loadi64_relaxed(&arena->purge_expire);
+ if (expire == 0) return false;
+ if (!force && expire > now) return false;
+
+ // reset expire (if not already set concurrently)
+ mi_atomic_casi64_strong_acq_rel(&arena->purge_expire, &expire, 0);
+
+ // potential purges scheduled, walk through the bitmap
+ bool any_purged = false;
+ bool full_purge = true;
+ for (size_t i = 0; i < arena->field_count; i++) {
+ size_t purge = mi_atomic_load_relaxed(&arena->blocks_purge[i]);
+ if (purge != 0) {
+ size_t bitidx = 0;
+ while (bitidx < MI_BITMAP_FIELD_BITS) {
+ // find consequetive range of ones in the purge mask
+ size_t bitlen = 0;
+ while (bitidx + bitlen < MI_BITMAP_FIELD_BITS && (purge & ((size_t)1 << (bitidx + bitlen))) != 0) {
+ bitlen++;
+ }
+ // try to claim the longest range of corresponding in_use bits
+ const mi_bitmap_index_t bitmap_index = mi_bitmap_index_create(i, bitidx);
+ while( bitlen > 0 ) {
+ if (_mi_bitmap_try_claim(arena->blocks_inuse, arena->field_count, bitlen, bitmap_index)) {
+ break;
+ }
+ bitlen--;
+ }
+ // actual claimed bits at `in_use`
+ if (bitlen > 0) {
+ // read purge again now that we have the in_use bits
+ purge = mi_atomic_load_acquire(&arena->blocks_purge[i]);
+ if (!mi_arena_purge_range(arena, i, bitidx, bitlen, purge, stats)) {
+ full_purge = false;
+ }
+ any_purged = true;
+ // release the claimed `in_use` bits again
+ _mi_bitmap_unclaim(arena->blocks_inuse, arena->field_count, bitlen, bitmap_index);
+ }
+ bitidx += (bitlen+1); // +1 to skip the zero (or end)
+ } // while bitidx
+ } // purge != 0
+ }
+ // if not fully purged, make sure to purge again in the future
+ if (!full_purge) {
+ const long delay = mi_arena_purge_delay();
+ mi_msecs_t expected = 0;
+ mi_atomic_casi64_strong_acq_rel(&arena->purge_expire,&expected,_mi_clock_now() + delay);
+ }
+ return any_purged;
+}
+
+static void mi_arenas_try_purge( bool force, bool visit_all, mi_stats_t* stats ) {
+ if (_mi_preloading() || mi_arena_purge_delay() <= 0) return; // nothing will be scheduled
+
+ const size_t max_arena = mi_atomic_load_acquire(&mi_arena_count);
+ if (max_arena == 0) return;
+
+ // allow only one thread to purge at a time
+ static mi_atomic_guard_t purge_guard;
+ mi_atomic_guard(&purge_guard)
+ {
+ mi_msecs_t now = _mi_clock_now();
+ size_t max_purge_count = (visit_all ? max_arena : 1);
+ for (size_t i = 0; i < max_arena; i++) {
+ mi_arena_t* arena = mi_atomic_load_ptr_acquire(mi_arena_t, &mi_arenas[i]);
+ if (arena != NULL) {
+ if (mi_arena_try_purge(arena, now, force, stats)) {
+ if (max_purge_count <= 1) break;
+ max_purge_count--;
+ }
+ }
+ }
+ }
+}
+
+
/* -----------------------------------------------------------
Arena free
----------------------------------------------------------- */
-void _mi_arena_free(void* p, size_t size, size_t memid, bool all_committed, mi_stats_t* stats) {
+void _mi_arena_free(void* p, size_t size, size_t committed_size, mi_memid_t memid, mi_stats_t* stats) {
mi_assert_internal(size > 0 && stats != NULL);
+ mi_assert_internal(committed_size <= size);
if (p==NULL) return;
if (size==0) return;
- if (memid == MI_MEMID_OS) {
+ const bool all_committed = (committed_size == size);
+
+ if (mi_memkind_is_os(memid.memkind)) {
// was a direct OS allocation, pass through
- _mi_os_free_ex(p, size, all_committed, stats);
+ if (!all_committed && committed_size > 0) {
+ // if partially committed, adjust the committed stats (as `_mi_os_free` will increase decommit by the full size)
+ _mi_stat_decrease(&stats->committed, committed_size);
+ }
+ _mi_os_free(p, size, memid, stats);
}
- else {
+ else if (memid.memkind == MI_MEM_ARENA) {
// allocated in an arena
size_t arena_idx;
size_t bitmap_idx;
- mi_arena_id_indices(memid, &arena_idx, &bitmap_idx);
+ mi_arena_memid_indices(memid, &arena_idx, &bitmap_idx);
mi_assert_internal(arena_idx < MI_MAX_ARENAS);
- mi_arena_t* arena = mi_atomic_load_ptr_relaxed(mi_arena_t,&mi_arenas[arena_idx]);
+ mi_arena_t* arena = mi_atomic_load_ptr_acquire(mi_arena_t,&mi_arenas[arena_idx]);
mi_assert_internal(arena != NULL);
+ const size_t blocks = mi_block_count_of_size(size);
+
+ // checks
if (arena == NULL) {
_mi_error_message(EINVAL, "trying to free from non-existent arena: %p, size %zu, memid: 0x%zx\n", p, size, memid);
return;
@@ -239,85 +631,271 @@ void _mi_arena_free(void* p, size_t size, size_t memid, bool all_committed, mi_s
_mi_error_message(EINVAL, "trying to free from non-existent arena block: %p, size %zu, memid: 0x%zx\n", p, size, memid);
return;
}
- const size_t blocks = mi_block_count_of_size(size);
- bool ones = mi_bitmap_unclaim(arena->blocks_inuse, arena->field_count, blocks, bitmap_idx);
- if (!ones) {
- _mi_error_message(EAGAIN, "trying to free an already freed block: %p, size %zu\n", p, size);
+
+ // need to set all memory to undefined as some parts may still be marked as no_access (like padding etc.)
+ mi_track_mem_undefined(p,size);
+
+ // potentially decommit
+ if (arena->memid.is_pinned || arena->blocks_committed == NULL) {
+ mi_assert_internal(all_committed);
+ }
+ else {
+ mi_assert_internal(arena->blocks_committed != NULL);
+ mi_assert_internal(arena->blocks_purge != NULL);
+
+ if (!all_committed) {
+ // mark the entire range as no longer committed (so we recommit the full range when re-using)
+ _mi_bitmap_unclaim_across(arena->blocks_committed, arena->field_count, blocks, bitmap_idx);
+ mi_track_mem_noaccess(p,size);
+ if (committed_size > 0) {
+ // if partially committed, adjust the committed stats (is it will be recommitted when re-using)
+ // in the delayed purge, we now need to not count a decommit if the range is not marked as committed.
+ _mi_stat_decrease(&stats->committed, committed_size);
+ }
+ // note: if not all committed, it may be that the purge will reset/decommit the entire range
+ // that contains already decommitted parts. Since purge consistently uses reset or decommit that
+ // works (as we should never reset decommitted parts).
+ }
+ // (delay) purge the entire range
+ mi_arena_schedule_purge(arena, bitmap_idx, blocks, stats);
+ }
+
+ // and make it available to others again
+ bool all_inuse = _mi_bitmap_unclaim_across(arena->blocks_inuse, arena->field_count, blocks, bitmap_idx);
+ if (!all_inuse) {
+ _mi_error_message(EAGAIN, "trying to free an already freed arena block: %p, size %zu\n", p, size);
return;
};
}
+ else {
+ // arena was none, external, or static; nothing to do
+ mi_assert_internal(memid.memkind < MI_MEM_OS);
+ }
+
+ // purge expired decommits
+ mi_arenas_try_purge(false, false, stats);
}
+// destroy owned arenas; this is unsafe and should only be done using `mi_option_destroy_on_exit`
+// for dynamic libraries that are unloaded and need to release all their allocated memory.
+static void mi_arenas_unsafe_destroy(void) {
+ const size_t max_arena = mi_atomic_load_relaxed(&mi_arena_count);
+ size_t new_max_arena = 0;
+ for (size_t i = 0; i < max_arena; i++) {
+ mi_arena_t* arena = mi_atomic_load_ptr_acquire(mi_arena_t, &mi_arenas[i]);
+ if (arena != NULL) {
+ if (arena->start != NULL && mi_memkind_is_os(arena->memid.memkind)) {
+ mi_atomic_store_ptr_release(mi_arena_t, &mi_arenas[i], NULL);
+ _mi_os_free(arena->start, mi_arena_size(arena), arena->memid, &_mi_stats_main);
+ }
+ else {
+ new_max_arena = i;
+ }
+ mi_arena_meta_free(arena, arena->meta_memid, arena->meta_size, &_mi_stats_main);
+ }
+ }
+
+ // try to lower the max arena.
+ size_t expected = max_arena;
+ mi_atomic_cas_strong_acq_rel(&mi_arena_count, &expected, new_max_arena);
+}
+
+// Purge the arenas; if `force_purge` is true, amenable parts are purged even if not yet expired
+void _mi_arena_collect(bool force_purge, mi_stats_t* stats) {
+ mi_arenas_try_purge(force_purge, true /* visit all */, stats);
+}
+
+// destroy owned arenas; this is unsafe and should only be done using `mi_option_destroy_on_exit`
+// for dynamic libraries that are unloaded and need to release all their allocated memory.
+void _mi_arena_unsafe_destroy_all(mi_stats_t* stats) {
+ mi_arenas_unsafe_destroy();
+ _mi_arena_collect(true /* force purge */, stats); // purge non-owned arenas
+}
+
+// Is a pointer inside any of our arenas?
+bool _mi_arena_contains(const void* p) {
+ const size_t max_arena = mi_atomic_load_relaxed(&mi_arena_count);
+ for (size_t i = 0; i < max_arena; i++) {
+ mi_arena_t* arena = mi_atomic_load_ptr_acquire(mi_arena_t, &mi_arenas[i]);
+ if (arena != NULL && arena->start <= (const uint8_t*)p && arena->start + mi_arena_block_size(arena->block_count) > (const uint8_t*)p) {
+ return true;
+ }
+ }
+ return false;
+}
+
+
/* -----------------------------------------------------------
Add an arena.
----------------------------------------------------------- */
-static bool mi_arena_add(mi_arena_t* arena) {
+static bool mi_arena_add(mi_arena_t* arena, mi_arena_id_t* arena_id) {
mi_assert_internal(arena != NULL);
mi_assert_internal((uintptr_t)mi_atomic_load_ptr_relaxed(uint8_t,&arena->start) % MI_SEGMENT_ALIGN == 0);
mi_assert_internal(arena->block_count > 0);
+ if (arena_id != NULL) { *arena_id = -1; }
- uintptr_t i = mi_atomic_increment_acq_rel(&mi_arena_count);
+ size_t i = mi_atomic_increment_acq_rel(&mi_arena_count);
if (i >= MI_MAX_ARENAS) {
mi_atomic_decrement_acq_rel(&mi_arena_count);
return false;
}
+ arena->id = mi_arena_id_create(i);
mi_atomic_store_ptr_release(mi_arena_t,&mi_arenas[i], arena);
+ if (arena_id != NULL) { *arena_id = arena->id; }
return true;
}
+static bool mi_manage_os_memory_ex2(void* start, size_t size, bool is_large, int numa_node, bool exclusive, mi_memid_t memid, mi_arena_id_t* arena_id) mi_attr_noexcept
+{
+ if (arena_id != NULL) *arena_id = _mi_arena_id_none();
+ if (size < MI_ARENA_BLOCK_SIZE) return false;
+
+ if (is_large) {
+ mi_assert_internal(memid.initially_committed && memid.is_pinned);
+ }
+
+ const size_t bcount = size / MI_ARENA_BLOCK_SIZE;
+ const size_t fields = _mi_divide_up(bcount, MI_BITMAP_FIELD_BITS);
+ const size_t bitmaps = (memid.is_pinned ? 2 : 4);
+ const size_t asize = sizeof(mi_arena_t) + (bitmaps*fields*sizeof(mi_bitmap_field_t));
+ mi_memid_t meta_memid;
+ mi_arena_t* arena = (mi_arena_t*)mi_arena_meta_zalloc(asize, &meta_memid, &_mi_stats_main); // TODO: can we avoid allocating from the OS?
+ if (arena == NULL) return false;
+
+ // already zero'd due to os_alloc
+ // _mi_memzero(arena, asize);
+ arena->id = _mi_arena_id_none();
+ arena->memid = memid;
+ arena->exclusive = exclusive;
+ arena->meta_size = asize;
+ arena->meta_memid = meta_memid;
+ arena->block_count = bcount;
+ arena->field_count = fields;
+ arena->start = (uint8_t*)start;
+ arena->numa_node = numa_node; // TODO: or get the current numa node if -1? (now it allows anyone to allocate on -1)
+ arena->is_large = is_large;
+ arena->purge_expire = 0;
+ arena->search_idx = 0;
+ arena->blocks_dirty = &arena->blocks_inuse[fields]; // just after inuse bitmap
+ arena->blocks_committed = (arena->memid.is_pinned ? NULL : &arena->blocks_inuse[2*fields]); // just after dirty bitmap
+ arena->blocks_purge = (arena->memid.is_pinned ? NULL : &arena->blocks_inuse[3*fields]); // just after committed bitmap
+ // initialize committed bitmap?
+ if (arena->blocks_committed != NULL && arena->memid.initially_committed) {
+ memset((void*)arena->blocks_committed, 0xFF, fields*sizeof(mi_bitmap_field_t)); // cast to void* to avoid atomic warning
+ }
+
+ // and claim leftover blocks if needed (so we never allocate there)
+ ptrdiff_t post = (fields * MI_BITMAP_FIELD_BITS) - bcount;
+ mi_assert_internal(post >= 0);
+ if (post > 0) {
+ // don't use leftover bits at the end
+ mi_bitmap_index_t postidx = mi_bitmap_index_create(fields - 1, MI_BITMAP_FIELD_BITS - post);
+ _mi_bitmap_claim(arena->blocks_inuse, fields, post, postidx, NULL);
+ }
+ return mi_arena_add(arena, arena_id);
+
+}
+
+bool mi_manage_os_memory_ex(void* start, size_t size, bool is_committed, bool is_large, bool is_zero, int numa_node, bool exclusive, mi_arena_id_t* arena_id) mi_attr_noexcept {
+ mi_memid_t memid = _mi_memid_create(MI_MEM_EXTERNAL);
+ memid.initially_committed = is_committed;
+ memid.initially_zero = is_zero;
+ memid.is_pinned = is_large;
+ return mi_manage_os_memory_ex2(start,size,is_large,numa_node,exclusive,memid, arena_id);
+}
+
+// Reserve a range of regular OS memory
+int mi_reserve_os_memory_ex(size_t size, bool commit, bool allow_large, bool exclusive, mi_arena_id_t* arena_id) mi_attr_noexcept {
+ if (arena_id != NULL) *arena_id = _mi_arena_id_none();
+ size = _mi_align_up(size, MI_ARENA_BLOCK_SIZE); // at least one block
+ mi_memid_t memid;
+ void* start = _mi_os_alloc_aligned(size, MI_SEGMENT_ALIGN, commit, allow_large, &memid, &_mi_stats_main);
+ if (start == NULL) return ENOMEM;
+ const bool is_large = memid.is_pinned; // todo: use separate is_large field?
+ if (!mi_manage_os_memory_ex2(start, size, is_large, -1 /* numa node */, exclusive, memid, arena_id)) {
+ _mi_os_free_ex(start, size, commit, memid, &_mi_stats_main);
+ _mi_verbose_message("failed to reserve %zu k memory\n", _mi_divide_up(size, 1024));
+ return ENOMEM;
+ }
+ _mi_verbose_message("reserved %zu KiB memory%s\n", _mi_divide_up(size, 1024), is_large ? " (in large os pages)" : "");
+ return 0;
+}
+
+
+// Manage a range of regular OS memory
+bool mi_manage_os_memory(void* start, size_t size, bool is_committed, bool is_large, bool is_zero, int numa_node) mi_attr_noexcept {
+ return mi_manage_os_memory_ex(start, size, is_committed, is_large, is_zero, numa_node, false /* exclusive? */, NULL);
+}
+
+// Reserve a range of regular OS memory
+int mi_reserve_os_memory(size_t size, bool commit, bool allow_large) mi_attr_noexcept {
+ return mi_reserve_os_memory_ex(size, commit, allow_large, false, NULL);
+}
+
+
+/* -----------------------------------------------------------
+ Debugging
+----------------------------------------------------------- */
+
+static size_t mi_debug_show_bitmap(const char* prefix, mi_bitmap_field_t* fields, size_t field_count ) {
+ size_t inuse_count = 0;
+ for (size_t i = 0; i < field_count; i++) {
+ char buf[MI_BITMAP_FIELD_BITS + 1];
+ uintptr_t field = mi_atomic_load_relaxed(&fields[i]);
+ for (size_t bit = 0; bit < MI_BITMAP_FIELD_BITS; bit++) {
+ bool inuse = ((((uintptr_t)1 << bit) & field) != 0);
+ if (inuse) inuse_count++;
+ buf[MI_BITMAP_FIELD_BITS - 1 - bit] = (inuse ? 'x' : '.');
+ }
+ buf[MI_BITMAP_FIELD_BITS] = 0;
+ _mi_verbose_message("%s%s\n", prefix, buf);
+ }
+ return inuse_count;
+}
+
+void mi_debug_show_arenas(void) mi_attr_noexcept {
+ size_t max_arenas = mi_atomic_load_relaxed(&mi_arena_count);
+ for (size_t i = 0; i < max_arenas; i++) {
+ mi_arena_t* arena = mi_atomic_load_ptr_relaxed(mi_arena_t, &mi_arenas[i]);
+ if (arena == NULL) break;
+ size_t inuse_count = 0;
+ _mi_verbose_message("arena %zu: %zu blocks with %zu fields\n", i, arena->block_count, arena->field_count);
+ inuse_count += mi_debug_show_bitmap(" ", arena->blocks_inuse, arena->field_count);
+ _mi_verbose_message(" blocks in use ('x'): %zu\n", inuse_count);
+ }
+}
+
/* -----------------------------------------------------------
Reserve a huge page arena.
----------------------------------------------------------- */
-#include // ENOMEM
-
// reserve at a specific numa node
-int mi_reserve_huge_os_pages_at(size_t pages, int numa_node, size_t timeout_msecs) mi_attr_noexcept {
+int mi_reserve_huge_os_pages_at_ex(size_t pages, int numa_node, size_t timeout_msecs, bool exclusive, mi_arena_id_t* arena_id) mi_attr_noexcept {
+ if (arena_id != NULL) *arena_id = -1;
if (pages==0) return 0;
if (numa_node < -1) numa_node = -1;
if (numa_node >= 0) numa_node = numa_node % _mi_os_numa_node_count();
size_t hsize = 0;
size_t pages_reserved = 0;
- void* p = _mi_os_alloc_huge_os_pages(pages, numa_node, timeout_msecs, &pages_reserved, &hsize);
+ mi_memid_t memid;
+ void* p = _mi_os_alloc_huge_os_pages(pages, numa_node, timeout_msecs, &pages_reserved, &hsize, &memid);
if (p==NULL || pages_reserved==0) {
- _mi_warning_message("failed to reserve %zu gb huge pages\n", pages);
+ _mi_warning_message("failed to reserve %zu GiB huge pages\n", pages);
return ENOMEM;
}
- _mi_verbose_message("numa node %i: reserved %zu gb huge pages (of the %zu gb requested)\n", numa_node, pages_reserved, pages);
+ _mi_verbose_message("numa node %i: reserved %zu GiB huge pages (of the %zu GiB requested)\n", numa_node, pages_reserved, pages);
- size_t bcount = mi_block_count_of_size(hsize);
- size_t fields = _mi_divide_up(bcount, MI_BITMAP_FIELD_BITS);
- size_t asize = sizeof(mi_arena_t) + (2*fields*sizeof(mi_bitmap_field_t));
- mi_arena_t* arena = (mi_arena_t*)_mi_os_alloc(asize, &_mi_stats_main); // TODO: can we avoid allocating from the OS?
- if (arena == NULL) {
- _mi_os_free_huge_pages(p, hsize, &_mi_stats_main);
+ if (!mi_manage_os_memory_ex2(p, hsize, true, numa_node, exclusive, memid, arena_id)) {
+ _mi_os_free(p, hsize, memid, &_mi_stats_main);
return ENOMEM;
}
- arena->block_count = bcount;
- arena->field_count = fields;
- arena->start = (uint8_t*)p;
- arena->numa_node = numa_node; // TODO: or get the current numa node if -1? (now it allows anyone to allocate on -1)
- arena->is_large = true;
- arena->is_zero_init = true;
- arena->is_committed = true;
- arena->search_idx = 0;
- arena->blocks_dirty = &arena->blocks_inuse[fields]; // just after inuse bitmap
- arena->blocks_committed = NULL;
- // the bitmaps are already zero initialized due to os_alloc
- // just claim leftover blocks if needed
- ptrdiff_t post = (fields * MI_BITMAP_FIELD_BITS) - bcount;
- mi_assert_internal(post >= 0);
- if (post > 0) {
- // don't use leftover bits at the end
- mi_bitmap_index_t postidx = mi_bitmap_index_create(fields - 1, MI_BITMAP_FIELD_BITS - post);
- mi_bitmap_claim(arena->blocks_inuse, fields, post, postidx, NULL);
- }
-
- mi_arena_add(arena);
return 0;
}
+int mi_reserve_huge_os_pages_at(size_t pages, int numa_node, size_t timeout_msecs) mi_attr_noexcept {
+ return mi_reserve_huge_os_pages_at_ex(pages, numa_node, timeout_msecs, false, NULL);
+}
// reserve huge pages evenly among the given number of numa nodes (or use the available ones as detected)
int mi_reserve_huge_os_pages_interleave(size_t pages, size_t numa_nodes, size_t timeout_msecs) mi_attr_noexcept {
@@ -348,10 +926,11 @@ int mi_reserve_huge_os_pages_interleave(size_t pages, size_t numa_nodes, size_t
}
int mi_reserve_huge_os_pages(size_t pages, double max_secs, size_t* pages_reserved) mi_attr_noexcept {
- UNUSED(max_secs);
+ MI_UNUSED(max_secs);
_mi_warning_message("mi_reserve_huge_os_pages is deprecated: use mi_reserve_huge_os_pages_interleave/at instead\n");
if (pages_reserved != NULL) *pages_reserved = 0;
int err = mi_reserve_huge_os_pages_interleave(pages, 0, (size_t)(max_secs * 1000.0));
if (err==0 && pages_reserved!=NULL) *pages_reserved = pages;
return err;
}
+
diff --git a/src/bitmap.c b/src/bitmap.c
new file mode 100644
index 0000000..a13dbe1
--- /dev/null
+++ b/src/bitmap.c
@@ -0,0 +1,432 @@
+/* ----------------------------------------------------------------------------
+Copyright (c) 2019-2023 Microsoft Research, Daan Leijen
+This is free software; you can redistribute it and/or modify it under the
+terms of the MIT license. A copy of the license can be found in the file
+"LICENSE" at the root of this distribution.
+-----------------------------------------------------------------------------*/
+
+/* ----------------------------------------------------------------------------
+Concurrent bitmap that can set/reset sequences of bits atomically,
+represeted as an array of fields where each field is a machine word (`size_t`)
+
+There are two api's; the standard one cannot have sequences that cross
+between the bitmap fields (and a sequence must be <= MI_BITMAP_FIELD_BITS).
+
+The `_across` postfixed functions do allow sequences that can cross over
+between the fields. (This is used in arena allocation)
+---------------------------------------------------------------------------- */
+
+#include "mimalloc.h"
+#include "mimalloc/internal.h"
+#include "bitmap.h"
+
+/* -----------------------------------------------------------
+ Bitmap definition
+----------------------------------------------------------- */
+
+// The bit mask for a given number of blocks at a specified bit index.
+static inline size_t mi_bitmap_mask_(size_t count, size_t bitidx) {
+ mi_assert_internal(count + bitidx <= MI_BITMAP_FIELD_BITS);
+ mi_assert_internal(count > 0);
+ if (count >= MI_BITMAP_FIELD_BITS) return MI_BITMAP_FIELD_FULL;
+ if (count == 0) return 0;
+ return ((((size_t)1 << count) - 1) << bitidx);
+}
+
+
+/* -----------------------------------------------------------
+ Claim a bit sequence atomically
+----------------------------------------------------------- */
+
+// Try to atomically claim a sequence of `count` bits in a single
+// field at `idx` in `bitmap`. Returns `true` on success.
+inline bool _mi_bitmap_try_find_claim_field(mi_bitmap_t bitmap, size_t idx, const size_t count, mi_bitmap_index_t* bitmap_idx)
+{
+ mi_assert_internal(bitmap_idx != NULL);
+ mi_assert_internal(count <= MI_BITMAP_FIELD_BITS);
+ mi_assert_internal(count > 0);
+ mi_bitmap_field_t* field = &bitmap[idx];
+ size_t map = mi_atomic_load_relaxed(field);
+ if (map==MI_BITMAP_FIELD_FULL) return false; // short cut
+
+ // search for 0-bit sequence of length count
+ const size_t mask = mi_bitmap_mask_(count, 0);
+ const size_t bitidx_max = MI_BITMAP_FIELD_BITS - count;
+
+#ifdef MI_HAVE_FAST_BITSCAN
+ size_t bitidx = mi_ctz(~map); // quickly find the first zero bit if possible
+#else
+ size_t bitidx = 0; // otherwise start at 0
+#endif
+ size_t m = (mask << bitidx); // invariant: m == mask shifted by bitidx
+
+ // scan linearly for a free range of zero bits
+ while (bitidx <= bitidx_max) {
+ const size_t mapm = (map & m);
+ if (mapm == 0) { // are the mask bits free at bitidx?
+ mi_assert_internal((m >> bitidx) == mask); // no overflow?
+ const size_t newmap = (map | m);
+ mi_assert_internal((newmap^map) >> bitidx == mask);
+ if (!mi_atomic_cas_strong_acq_rel(field, &map, newmap)) { // TODO: use weak cas here?
+ // no success, another thread claimed concurrently.. keep going (with updated `map`)
+ continue;
+ }
+ else {
+ // success, we claimed the bits!
+ *bitmap_idx = mi_bitmap_index_create(idx, bitidx);
+ return true;
+ }
+ }
+ else {
+ // on to the next bit range
+#ifdef MI_HAVE_FAST_BITSCAN
+ mi_assert_internal(mapm != 0);
+ const size_t shift = (count == 1 ? 1 : (MI_INTPTR_BITS - mi_clz(mapm) - bitidx));
+ mi_assert_internal(shift > 0 && shift <= count);
+#else
+ const size_t shift = 1;
+#endif
+ bitidx += shift;
+ m <<= shift;
+ }
+ }
+ // no bits found
+ return false;
+}
+
+// Find `count` bits of 0 and set them to 1 atomically; returns `true` on success.
+// Starts at idx, and wraps around to search in all `bitmap_fields` fields.
+// `count` can be at most MI_BITMAP_FIELD_BITS and will never cross fields.
+bool _mi_bitmap_try_find_from_claim(mi_bitmap_t bitmap, const size_t bitmap_fields, const size_t start_field_idx, const size_t count, mi_bitmap_index_t* bitmap_idx) {
+ size_t idx = start_field_idx;
+ for (size_t visited = 0; visited < bitmap_fields; visited++, idx++) {
+ if (idx >= bitmap_fields) { idx = 0; } // wrap
+ if (_mi_bitmap_try_find_claim_field(bitmap, idx, count, bitmap_idx)) {
+ return true;
+ }
+ }
+ return false;
+}
+
+// Like _mi_bitmap_try_find_from_claim but with an extra predicate that must be fullfilled
+bool _mi_bitmap_try_find_from_claim_pred(mi_bitmap_t bitmap, const size_t bitmap_fields,
+ const size_t start_field_idx, const size_t count,
+ mi_bitmap_pred_fun_t pred_fun, void* pred_arg,
+ mi_bitmap_index_t* bitmap_idx) {
+ size_t idx = start_field_idx;
+ for (size_t visited = 0; visited < bitmap_fields; visited++, idx++) {
+ if (idx >= bitmap_fields) idx = 0; // wrap
+ if (_mi_bitmap_try_find_claim_field(bitmap, idx, count, bitmap_idx)) {
+ if (pred_fun == NULL || pred_fun(*bitmap_idx, pred_arg)) {
+ return true;
+ }
+ // predicate returned false, unclaim and look further
+ _mi_bitmap_unclaim(bitmap, bitmap_fields, count, *bitmap_idx);
+ }
+ }
+ return false;
+}
+
+// Set `count` bits at `bitmap_idx` to 0 atomically
+// Returns `true` if all `count` bits were 1 previously.
+bool _mi_bitmap_unclaim(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx) {
+ const size_t idx = mi_bitmap_index_field(bitmap_idx);
+ const size_t bitidx = mi_bitmap_index_bit_in_field(bitmap_idx);
+ const size_t mask = mi_bitmap_mask_(count, bitidx);
+ mi_assert_internal(bitmap_fields > idx); MI_UNUSED(bitmap_fields);
+ // mi_assert_internal((bitmap[idx] & mask) == mask);
+ const size_t prev = mi_atomic_and_acq_rel(&bitmap[idx], ~mask);
+ return ((prev & mask) == mask);
+}
+
+
+// Set `count` bits at `bitmap_idx` to 1 atomically
+// Returns `true` if all `count` bits were 0 previously. `any_zero` is `true` if there was at least one zero bit.
+bool _mi_bitmap_claim(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx, bool* any_zero) {
+ const size_t idx = mi_bitmap_index_field(bitmap_idx);
+ const size_t bitidx = mi_bitmap_index_bit_in_field(bitmap_idx);
+ const size_t mask = mi_bitmap_mask_(count, bitidx);
+ mi_assert_internal(bitmap_fields > idx); MI_UNUSED(bitmap_fields);
+ //mi_assert_internal(any_zero != NULL || (bitmap[idx] & mask) == 0);
+ size_t prev = mi_atomic_or_acq_rel(&bitmap[idx], mask);
+ if (any_zero != NULL) { *any_zero = ((prev & mask) != mask); }
+ return ((prev & mask) == 0);
+}
+
+// Returns `true` if all `count` bits were 1. `any_ones` is `true` if there was at least one bit set to one.
+static bool mi_bitmap_is_claimedx(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx, bool* any_ones) {
+ const size_t idx = mi_bitmap_index_field(bitmap_idx);
+ const size_t bitidx = mi_bitmap_index_bit_in_field(bitmap_idx);
+ const size_t mask = mi_bitmap_mask_(count, bitidx);
+ mi_assert_internal(bitmap_fields > idx); MI_UNUSED(bitmap_fields);
+ const size_t field = mi_atomic_load_relaxed(&bitmap[idx]);
+ if (any_ones != NULL) { *any_ones = ((field & mask) != 0); }
+ return ((field & mask) == mask);
+}
+
+// Try to set `count` bits at `bitmap_idx` from 0 to 1 atomically.
+// Returns `true` if successful when all previous `count` bits were 0.
+bool _mi_bitmap_try_claim(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx) {
+ const size_t idx = mi_bitmap_index_field(bitmap_idx);
+ const size_t bitidx = mi_bitmap_index_bit_in_field(bitmap_idx);
+ const size_t mask = mi_bitmap_mask_(count, bitidx);
+ mi_assert_internal(bitmap_fields > idx); MI_UNUSED(bitmap_fields);
+ size_t expected = mi_atomic_load_relaxed(&bitmap[idx]);
+ do {
+ if ((expected & mask) != 0) return false;
+ }
+ while (!mi_atomic_cas_strong_acq_rel(&bitmap[idx], &expected, expected | mask));
+ mi_assert_internal((expected & mask) == 0);
+ return true;
+}
+
+
+bool _mi_bitmap_is_claimed(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx) {
+ return mi_bitmap_is_claimedx(bitmap, bitmap_fields, count, bitmap_idx, NULL);
+}
+
+bool _mi_bitmap_is_any_claimed(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx) {
+ bool any_ones;
+ mi_bitmap_is_claimedx(bitmap, bitmap_fields, count, bitmap_idx, &any_ones);
+ return any_ones;
+}
+
+
+//--------------------------------------------------------------------------
+// the `_across` functions work on bitmaps where sequences can cross over
+// between the fields. This is used in arena allocation
+//--------------------------------------------------------------------------
+
+// Try to atomically claim a sequence of `count` bits starting from the field
+// at `idx` in `bitmap` and crossing into subsequent fields. Returns `true` on success.
+// Only needs to consider crossing into the next fields (see `mi_bitmap_try_find_from_claim_across`)
+static bool mi_bitmap_try_find_claim_field_across(mi_bitmap_t bitmap, size_t bitmap_fields, size_t idx, const size_t count, const size_t retries, mi_bitmap_index_t* bitmap_idx)
+{
+ mi_assert_internal(bitmap_idx != NULL);
+
+ // check initial trailing zeros
+ mi_bitmap_field_t* field = &bitmap[idx];
+ size_t map = mi_atomic_load_relaxed(field);
+ const size_t initial = mi_clz(map); // count of initial zeros starting at idx
+ mi_assert_internal(initial <= MI_BITMAP_FIELD_BITS);
+ if (initial == 0) return false;
+ if (initial >= count) return _mi_bitmap_try_find_claim_field(bitmap, idx, count, bitmap_idx); // no need to cross fields (this case won't happen for us)
+ if (_mi_divide_up(count - initial, MI_BITMAP_FIELD_BITS) >= (bitmap_fields - idx)) return false; // not enough entries
+
+ // scan ahead
+ size_t found = initial;
+ size_t mask = 0; // mask bits for the final field
+ while(found < count) {
+ field++;
+ map = mi_atomic_load_relaxed(field);
+ const size_t mask_bits = (found + MI_BITMAP_FIELD_BITS <= count ? MI_BITMAP_FIELD_BITS : (count - found));
+ mi_assert_internal(mask_bits > 0 && mask_bits <= MI_BITMAP_FIELD_BITS);
+ mask = mi_bitmap_mask_(mask_bits, 0);
+ if ((map & mask) != 0) return false; // some part is already claimed
+ found += mask_bits;
+ }
+ mi_assert_internal(field < &bitmap[bitmap_fields]);
+
+ // we found a range of contiguous zeros up to the final field; mask contains mask in the final field
+ // now try to claim the range atomically
+ mi_bitmap_field_t* const final_field = field;
+ const size_t final_mask = mask;
+ mi_bitmap_field_t* const initial_field = &bitmap[idx];
+ const size_t initial_idx = MI_BITMAP_FIELD_BITS - initial;
+ const size_t initial_mask = mi_bitmap_mask_(initial, initial_idx);
+
+ // initial field
+ size_t newmap;
+ field = initial_field;
+ map = mi_atomic_load_relaxed(field);
+ do {
+ newmap = (map | initial_mask);
+ if ((map & initial_mask) != 0) { goto rollback; };
+ } while (!mi_atomic_cas_strong_acq_rel(field, &map, newmap));
+
+ // intermediate fields
+ while (++field < final_field) {
+ newmap = MI_BITMAP_FIELD_FULL;
+ map = 0;
+ if (!mi_atomic_cas_strong_acq_rel(field, &map, newmap)) { goto rollback; }
+ }
+
+ // final field
+ mi_assert_internal(field == final_field);
+ map = mi_atomic_load_relaxed(field);
+ do {
+ newmap = (map | final_mask);
+ if ((map & final_mask) != 0) { goto rollback; }
+ } while (!mi_atomic_cas_strong_acq_rel(field, &map, newmap));
+
+ // claimed!
+ *bitmap_idx = mi_bitmap_index_create(idx, initial_idx);
+ return true;
+
+rollback:
+ // roll back intermediate fields
+ // (we just failed to claim `field` so decrement first)
+ while (--field > initial_field) {
+ newmap = 0;
+ map = MI_BITMAP_FIELD_FULL;
+ mi_assert_internal(mi_atomic_load_relaxed(field) == map);
+ mi_atomic_store_release(field, newmap);
+ }
+ if (field == initial_field) { // (if we failed on the initial field, `field + 1 == initial_field`)
+ map = mi_atomic_load_relaxed(field);
+ do {
+ mi_assert_internal((map & initial_mask) == initial_mask);
+ newmap = (map & ~initial_mask);
+ } while (!mi_atomic_cas_strong_acq_rel(field, &map, newmap));
+ }
+ // retry? (we make a recursive call instead of goto to be able to use const declarations)
+ if (retries <= 2) {
+ return mi_bitmap_try_find_claim_field_across(bitmap, bitmap_fields, idx, count, retries+1, bitmap_idx);
+ }
+ else {
+ return false;
+ }
+}
+
+
+// Find `count` bits of zeros and set them to 1 atomically; returns `true` on success.
+// Starts at idx, and wraps around to search in all `bitmap_fields` fields.
+bool _mi_bitmap_try_find_from_claim_across(mi_bitmap_t bitmap, const size_t bitmap_fields, const size_t start_field_idx, const size_t count, mi_bitmap_index_t* bitmap_idx) {
+ mi_assert_internal(count > 0);
+ if (count <= 2) {
+ // we don't bother with crossover fields for small counts
+ return _mi_bitmap_try_find_from_claim(bitmap, bitmap_fields, start_field_idx, count, bitmap_idx);
+ }
+
+ // visit the fields
+ size_t idx = start_field_idx;
+ for (size_t visited = 0; visited < bitmap_fields; visited++, idx++) {
+ if (idx >= bitmap_fields) { idx = 0; } // wrap
+ // first try to claim inside a field
+ if (count <= MI_BITMAP_FIELD_BITS) {
+ if (_mi_bitmap_try_find_claim_field(bitmap, idx, count, bitmap_idx)) {
+ return true;
+ }
+ }
+ // if that fails, then try to claim across fields
+ if (mi_bitmap_try_find_claim_field_across(bitmap, bitmap_fields, idx, count, 0, bitmap_idx)) {
+ return true;
+ }
+ }
+ return false;
+}
+
+// Helper for masks across fields; returns the mid count, post_mask may be 0
+static size_t mi_bitmap_mask_across(mi_bitmap_index_t bitmap_idx, size_t bitmap_fields, size_t count, size_t* pre_mask, size_t* mid_mask, size_t* post_mask) {
+ MI_UNUSED(bitmap_fields);
+ const size_t bitidx = mi_bitmap_index_bit_in_field(bitmap_idx);
+ if mi_likely(bitidx + count <= MI_BITMAP_FIELD_BITS) {
+ *pre_mask = mi_bitmap_mask_(count, bitidx);
+ *mid_mask = 0;
+ *post_mask = 0;
+ mi_assert_internal(mi_bitmap_index_field(bitmap_idx) < bitmap_fields);
+ return 0;
+ }
+ else {
+ const size_t pre_bits = MI_BITMAP_FIELD_BITS - bitidx;
+ mi_assert_internal(pre_bits < count);
+ *pre_mask = mi_bitmap_mask_(pre_bits, bitidx);
+ count -= pre_bits;
+ const size_t mid_count = (count / MI_BITMAP_FIELD_BITS);
+ *mid_mask = MI_BITMAP_FIELD_FULL;
+ count %= MI_BITMAP_FIELD_BITS;
+ *post_mask = (count==0 ? 0 : mi_bitmap_mask_(count, 0));
+ mi_assert_internal(mi_bitmap_index_field(bitmap_idx) + mid_count + (count==0 ? 0 : 1) < bitmap_fields);
+ return mid_count;
+ }
+}
+
+// Set `count` bits at `bitmap_idx` to 0 atomically
+// Returns `true` if all `count` bits were 1 previously.
+bool _mi_bitmap_unclaim_across(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx) {
+ size_t idx = mi_bitmap_index_field(bitmap_idx);
+ size_t pre_mask;
+ size_t mid_mask;
+ size_t post_mask;
+ size_t mid_count = mi_bitmap_mask_across(bitmap_idx, bitmap_fields, count, &pre_mask, &mid_mask, &post_mask);
+ bool all_one = true;
+ mi_bitmap_field_t* field = &bitmap[idx];
+ size_t prev = mi_atomic_and_acq_rel(field++, ~pre_mask); // clear first part
+ if ((prev & pre_mask) != pre_mask) all_one = false;
+ while(mid_count-- > 0) {
+ prev = mi_atomic_and_acq_rel(field++, ~mid_mask); // clear mid part
+ if ((prev & mid_mask) != mid_mask) all_one = false;
+ }
+ if (post_mask!=0) {
+ prev = mi_atomic_and_acq_rel(field, ~post_mask); // clear end part
+ if ((prev & post_mask) != post_mask) all_one = false;
+ }
+ return all_one;
+}
+
+// Set `count` bits at `bitmap_idx` to 1 atomically
+// Returns `true` if all `count` bits were 0 previously. `any_zero` is `true` if there was at least one zero bit.
+bool _mi_bitmap_claim_across(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx, bool* pany_zero) {
+ size_t idx = mi_bitmap_index_field(bitmap_idx);
+ size_t pre_mask;
+ size_t mid_mask;
+ size_t post_mask;
+ size_t mid_count = mi_bitmap_mask_across(bitmap_idx, bitmap_fields, count, &pre_mask, &mid_mask, &post_mask);
+ bool all_zero = true;
+ bool any_zero = false;
+ _Atomic(size_t)*field = &bitmap[idx];
+ size_t prev = mi_atomic_or_acq_rel(field++, pre_mask);
+ if ((prev & pre_mask) != 0) all_zero = false;
+ if ((prev & pre_mask) != pre_mask) any_zero = true;
+ while (mid_count-- > 0) {
+ prev = mi_atomic_or_acq_rel(field++, mid_mask);
+ if ((prev & mid_mask) != 0) all_zero = false;
+ if ((prev & mid_mask) != mid_mask) any_zero = true;
+ }
+ if (post_mask!=0) {
+ prev = mi_atomic_or_acq_rel(field, post_mask);
+ if ((prev & post_mask) != 0) all_zero = false;
+ if ((prev & post_mask) != post_mask) any_zero = true;
+ }
+ if (pany_zero != NULL) { *pany_zero = any_zero; }
+ return all_zero;
+}
+
+
+// Returns `true` if all `count` bits were 1.
+// `any_ones` is `true` if there was at least one bit set to one.
+static bool mi_bitmap_is_claimedx_across(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx, bool* pany_ones) {
+ size_t idx = mi_bitmap_index_field(bitmap_idx);
+ size_t pre_mask;
+ size_t mid_mask;
+ size_t post_mask;
+ size_t mid_count = mi_bitmap_mask_across(bitmap_idx, bitmap_fields, count, &pre_mask, &mid_mask, &post_mask);
+ bool all_ones = true;
+ bool any_ones = false;
+ mi_bitmap_field_t* field = &bitmap[idx];
+ size_t prev = mi_atomic_load_relaxed(field++);
+ if ((prev & pre_mask) != pre_mask) all_ones = false;
+ if ((prev & pre_mask) != 0) any_ones = true;
+ while (mid_count-- > 0) {
+ prev = mi_atomic_load_relaxed(field++);
+ if ((prev & mid_mask) != mid_mask) all_ones = false;
+ if ((prev & mid_mask) != 0) any_ones = true;
+ }
+ if (post_mask!=0) {
+ prev = mi_atomic_load_relaxed(field);
+ if ((prev & post_mask) != post_mask) all_ones = false;
+ if ((prev & post_mask) != 0) any_ones = true;
+ }
+ if (pany_ones != NULL) { *pany_ones = any_ones; }
+ return all_ones;
+}
+
+bool _mi_bitmap_is_claimed_across(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx) {
+ return mi_bitmap_is_claimedx_across(bitmap, bitmap_fields, count, bitmap_idx, NULL);
+}
+
+bool _mi_bitmap_is_any_claimed_across(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx) {
+ bool any_ones;
+ mi_bitmap_is_claimedx_across(bitmap, bitmap_fields, count, bitmap_idx, &any_ones);
+ return any_ones;
+}
diff --git a/src/bitmap.h b/src/bitmap.h
new file mode 100644
index 0000000..0a765c7
--- /dev/null
+++ b/src/bitmap.h
@@ -0,0 +1,115 @@
+/* ----------------------------------------------------------------------------
+Copyright (c) 2019-2023 Microsoft Research, Daan Leijen
+This is free software; you can redistribute it and/or modify it under the
+terms of the MIT license. A copy of the license can be found in the file
+"LICENSE" at the root of this distribution.
+-----------------------------------------------------------------------------*/
+
+/* ----------------------------------------------------------------------------
+Concurrent bitmap that can set/reset sequences of bits atomically,
+represeted as an array of fields where each field is a machine word (`size_t`)
+
+There are two api's; the standard one cannot have sequences that cross
+between the bitmap fields (and a sequence must be <= MI_BITMAP_FIELD_BITS).
+(this is used in region allocation)
+
+The `_across` postfixed functions do allow sequences that can cross over
+between the fields. (This is used in arena allocation)
+---------------------------------------------------------------------------- */
+#pragma once
+#ifndef MI_BITMAP_H
+#define MI_BITMAP_H
+
+/* -----------------------------------------------------------
+ Bitmap definition
+----------------------------------------------------------- */
+
+#define MI_BITMAP_FIELD_BITS (8*MI_SIZE_SIZE)
+#define MI_BITMAP_FIELD_FULL (~((size_t)0)) // all bits set
+
+// An atomic bitmap of `size_t` fields
+typedef _Atomic(size_t) mi_bitmap_field_t;
+typedef mi_bitmap_field_t* mi_bitmap_t;
+
+// A bitmap index is the index of the bit in a bitmap.
+typedef size_t mi_bitmap_index_t;
+
+// Create a bit index.
+static inline mi_bitmap_index_t mi_bitmap_index_create(size_t idx, size_t bitidx) {
+ mi_assert_internal(bitidx < MI_BITMAP_FIELD_BITS);
+ return (idx*MI_BITMAP_FIELD_BITS) + bitidx;
+}
+
+// Create a bit index.
+static inline mi_bitmap_index_t mi_bitmap_index_create_from_bit(size_t full_bitidx) {
+ return mi_bitmap_index_create(full_bitidx / MI_BITMAP_FIELD_BITS, full_bitidx % MI_BITMAP_FIELD_BITS);
+}
+
+// Get the field index from a bit index.
+static inline size_t mi_bitmap_index_field(mi_bitmap_index_t bitmap_idx) {
+ return (bitmap_idx / MI_BITMAP_FIELD_BITS);
+}
+
+// Get the bit index in a bitmap field
+static inline size_t mi_bitmap_index_bit_in_field(mi_bitmap_index_t bitmap_idx) {
+ return (bitmap_idx % MI_BITMAP_FIELD_BITS);
+}
+
+// Get the full bit index
+static inline size_t mi_bitmap_index_bit(mi_bitmap_index_t bitmap_idx) {
+ return bitmap_idx;
+}
+
+/* -----------------------------------------------------------
+ Claim a bit sequence atomically
+----------------------------------------------------------- */
+
+// Try to atomically claim a sequence of `count` bits in a single
+// field at `idx` in `bitmap`. Returns `true` on success.
+bool _mi_bitmap_try_find_claim_field(mi_bitmap_t bitmap, size_t idx, const size_t count, mi_bitmap_index_t* bitmap_idx);
+
+// Starts at idx, and wraps around to search in all `bitmap_fields` fields.
+// For now, `count` can be at most MI_BITMAP_FIELD_BITS and will never cross fields.
+bool _mi_bitmap_try_find_from_claim(mi_bitmap_t bitmap, const size_t bitmap_fields, const size_t start_field_idx, const size_t count, mi_bitmap_index_t* bitmap_idx);
+
+// Like _mi_bitmap_try_find_from_claim but with an extra predicate that must be fullfilled
+typedef bool (mi_cdecl *mi_bitmap_pred_fun_t)(mi_bitmap_index_t bitmap_idx, void* pred_arg);
+bool _mi_bitmap_try_find_from_claim_pred(mi_bitmap_t bitmap, const size_t bitmap_fields, const size_t start_field_idx, const size_t count, mi_bitmap_pred_fun_t pred_fun, void* pred_arg, mi_bitmap_index_t* bitmap_idx);
+
+// Set `count` bits at `bitmap_idx` to 0 atomically
+// Returns `true` if all `count` bits were 1 previously.
+bool _mi_bitmap_unclaim(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx);
+
+// Try to set `count` bits at `bitmap_idx` from 0 to 1 atomically.
+// Returns `true` if successful when all previous `count` bits were 0.
+bool _mi_bitmap_try_claim(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx);
+
+// Set `count` bits at `bitmap_idx` to 1 atomically
+// Returns `true` if all `count` bits were 0 previously. `any_zero` is `true` if there was at least one zero bit.
+bool _mi_bitmap_claim(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx, bool* any_zero);
+
+bool _mi_bitmap_is_claimed(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx);
+bool _mi_bitmap_is_any_claimed(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx);
+
+
+//--------------------------------------------------------------------------
+// the `_across` functions work on bitmaps where sequences can cross over
+// between the fields. This is used in arena allocation
+//--------------------------------------------------------------------------
+
+// Find `count` bits of zeros and set them to 1 atomically; returns `true` on success.
+// Starts at idx, and wraps around to search in all `bitmap_fields` fields.
+bool _mi_bitmap_try_find_from_claim_across(mi_bitmap_t bitmap, const size_t bitmap_fields, const size_t start_field_idx, const size_t count, mi_bitmap_index_t* bitmap_idx);
+
+// Set `count` bits at `bitmap_idx` to 0 atomically
+// Returns `true` if all `count` bits were 1 previously.
+bool _mi_bitmap_unclaim_across(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx);
+
+// Set `count` bits at `bitmap_idx` to 1 atomically
+// Returns `true` if all `count` bits were 0 previously. `any_zero` is `true` if there was at least one zero bit.
+bool _mi_bitmap_claim_across(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx, bool* pany_zero);
+
+bool _mi_bitmap_is_claimed_across(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx);
+bool _mi_bitmap_is_any_claimed_across(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx);
+
+#endif
diff --git a/src/bitmap.inc.c b/src/bitmap.inc.c
deleted file mode 100644
index 2d6df46..0000000
--- a/src/bitmap.inc.c
+++ /dev/null
@@ -1,247 +0,0 @@
-/* ----------------------------------------------------------------------------
-Copyright (c) 2019, Microsoft Research, Daan Leijen
-This is free software; you can redistribute it and/or modify it under the
-terms of the MIT license. A copy of the license can be found in the file
-"LICENSE" at the root of this distribution.
------------------------------------------------------------------------------*/
-
-/* ----------------------------------------------------------------------------
-This file is meant to be included in other files for efficiency.
-It implements a bitmap that can set/reset sequences of bits atomically
-and is used to concurrently claim memory ranges.
-
-A bitmap is an array of fields where each field is a machine word (`uintptr_t`)
-
-A current limitation is that the bit sequences cannot cross fields
-and that the sequence must be smaller or equal to the bits in a field.
----------------------------------------------------------------------------- */
-#pragma once
-#ifndef MI_BITMAP_C
-#define MI_BITMAP_C
-
-#include "mimalloc.h"
-#include "mimalloc-internal.h"
-
-/* -----------------------------------------------------------
- Bitmap definition
------------------------------------------------------------ */
-
-#define MI_BITMAP_FIELD_BITS (8*MI_INTPTR_SIZE)
-#define MI_BITMAP_FIELD_FULL (~((uintptr_t)0)) // all bits set
-
-// An atomic bitmap of `uintptr_t` fields
-typedef _Atomic(uintptr_t) mi_bitmap_field_t;
-typedef mi_bitmap_field_t* mi_bitmap_t;
-
-// A bitmap index is the index of the bit in a bitmap.
-typedef size_t mi_bitmap_index_t;
-
-// Create a bit index.
-static inline mi_bitmap_index_t mi_bitmap_index_create(size_t idx, size_t bitidx) {
- mi_assert_internal(bitidx < MI_BITMAP_FIELD_BITS);
- return (idx*MI_BITMAP_FIELD_BITS) + bitidx;
-}
-
-// Get the field index from a bit index.
-static inline size_t mi_bitmap_index_field(mi_bitmap_index_t bitmap_idx) {
- return (bitmap_idx / MI_BITMAP_FIELD_BITS);
-}
-
-// Get the bit index in a bitmap field
-static inline size_t mi_bitmap_index_bit_in_field(mi_bitmap_index_t bitmap_idx) {
- return (bitmap_idx % MI_BITMAP_FIELD_BITS);
-}
-
-// Get the full bit index
-static inline size_t mi_bitmap_index_bit(mi_bitmap_index_t bitmap_idx) {
- return bitmap_idx;
-}
-
-
-// The bit mask for a given number of blocks at a specified bit index.
-static inline uintptr_t mi_bitmap_mask_(size_t count, size_t bitidx) {
- mi_assert_internal(count + bitidx <= MI_BITMAP_FIELD_BITS);
- if (count == MI_BITMAP_FIELD_BITS) return MI_BITMAP_FIELD_FULL;
- return ((((uintptr_t)1 << count) - 1) << bitidx);
-}
-
-
-/* -----------------------------------------------------------
- Use bit scan forward/reverse to quickly find the first zero bit if it is available
------------------------------------------------------------ */
-#if defined(_MSC_VER)
-#define MI_HAVE_BITSCAN
-#include
-#ifndef MI_64
-#if MI_INTPTR_SIZE==8
-#define MI_64(f) f##64
-#else
-#define MI_64(f) f
-#endif
-#endif
-
-static inline size_t mi_bsf(uintptr_t x) {
- if (x==0) return 8*MI_INTPTR_SIZE;
- DWORD idx;
- MI_64(_BitScanForward)(&idx, x);
- return idx;
-}
-static inline size_t mi_bsr(uintptr_t x) {
- if (x==0) return 8*MI_INTPTR_SIZE;
- DWORD idx;
- MI_64(_BitScanReverse)(&idx, x);
- return idx;
-}
-#elif defined(__GNUC__) || defined(__clang__)
-#include // LONG_MAX
-#define MI_HAVE_BITSCAN
-#if (INTPTR_MAX == LONG_MAX)
-# define MI_L(x) x##l
-#else
-# define MI_L(x) x##ll
-#endif
-static inline size_t mi_bsf(uintptr_t x) {
- return (x==0 ? 8*MI_INTPTR_SIZE : MI_L(__builtin_ctz)(x));
-}
-static inline size_t mi_bsr(uintptr_t x) {
- return (x==0 ? 8*MI_INTPTR_SIZE : (8*MI_INTPTR_SIZE - 1) - MI_L(__builtin_clz)(x));
-}
-#endif
-
-/* -----------------------------------------------------------
- Claim a bit sequence atomically
------------------------------------------------------------ */
-
-// Try to atomically claim a sequence of `count` bits at in `idx`
-// in the bitmap field. Returns `true` on success.
-static inline bool mi_bitmap_try_claim_field(mi_bitmap_t bitmap, size_t bitmap_fields, const size_t count, mi_bitmap_index_t bitmap_idx) {
- const size_t idx = mi_bitmap_index_field(bitmap_idx);
- const size_t bitidx = mi_bitmap_index_bit_in_field(bitmap_idx);
- const uintptr_t mask = mi_bitmap_mask_(count, bitidx);
- mi_assert_internal(bitmap_fields > idx); UNUSED(bitmap_fields);
- mi_assert_internal(bitidx + count <= MI_BITMAP_FIELD_BITS);
-
- uintptr_t field = mi_atomic_load_relaxed(&bitmap[idx]);
- if ((field & mask) == 0) { // free?
- if (mi_atomic_cas_strong_acq_rel(&bitmap[idx], &field, (field|mask))) {
- // claimed!
- return true;
- }
- }
- return false;
-}
-
-
-// Try to atomically claim a sequence of `count` bits in a single
-// field at `idx` in `bitmap`. Returns `true` on success.
-static inline bool mi_bitmap_try_find_claim_field(mi_bitmap_t bitmap, size_t idx, const size_t count, mi_bitmap_index_t* bitmap_idx)
-{
- mi_assert_internal(bitmap_idx != NULL);
- _Atomic(uintptr_t)* field = &bitmap[idx];
- uintptr_t map = mi_atomic_load_relaxed(field);
- if (map==MI_BITMAP_FIELD_FULL) return false; // short cut
-
- // search for 0-bit sequence of length count
- const uintptr_t mask = mi_bitmap_mask_(count, 0);
- const size_t bitidx_max = MI_BITMAP_FIELD_BITS - count;
-
-#ifdef MI_HAVE_BITSCAN
- size_t bitidx = mi_bsf(~map); // quickly find the first zero bit if possible
-#else
- size_t bitidx = 0; // otherwise start at 0
-#endif
- uintptr_t m = (mask << bitidx); // invariant: m == mask shifted by bitidx
-
- // scan linearly for a free range of zero bits
- while (bitidx <= bitidx_max) {
- if ((map & m) == 0) { // are the mask bits free at bitidx?
- mi_assert_internal((m >> bitidx) == mask); // no overflow?
- const uintptr_t newmap = map | m;
- mi_assert_internal((newmap^map) >> bitidx == mask);
- if (!mi_atomic_cas_weak_acq_rel(field, &map, newmap)) { // TODO: use strong cas here?
- // no success, another thread claimed concurrently.. keep going (with updated `map`)
- continue;
- }
- else {
- // success, we claimed the bits!
- *bitmap_idx = mi_bitmap_index_create(idx, bitidx);
- return true;
- }
- }
- else {
- // on to the next bit range
-#ifdef MI_HAVE_BITSCAN
- const size_t shift = (count == 1 ? 1 : mi_bsr(map & m) - bitidx + 1);
- mi_assert_internal(shift > 0 && shift <= count);
-#else
- const size_t shift = 1;
-#endif
- bitidx += shift;
- m <<= shift;
- }
- }
- // no bits found
- return false;
-}
-
-
-// Find `count` bits of 0 and set them to 1 atomically; returns `true` on success.
-// For now, `count` can be at most MI_BITMAP_FIELD_BITS and will never span fields.
-static inline bool mi_bitmap_try_find_claim(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t* bitmap_idx) {
- for (size_t idx = 0; idx < bitmap_fields; idx++) {
- if (mi_bitmap_try_find_claim_field(bitmap, idx, count, bitmap_idx)) {
- return true;
- }
- }
- return false;
-}
-
-// Set `count` bits at `bitmap_idx` to 0 atomically
-// Returns `true` if all `count` bits were 1 previously.
-static inline bool mi_bitmap_unclaim(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx) {
- const size_t idx = mi_bitmap_index_field(bitmap_idx);
- const size_t bitidx = mi_bitmap_index_bit_in_field(bitmap_idx);
- const uintptr_t mask = mi_bitmap_mask_(count, bitidx);
- mi_assert_internal(bitmap_fields > idx); UNUSED(bitmap_fields);
- // mi_assert_internal((bitmap[idx] & mask) == mask);
- uintptr_t prev = mi_atomic_and_acq_rel(&bitmap[idx], ~mask);
- return ((prev & mask) == mask);
-}
-
-
-// Set `count` bits at `bitmap_idx` to 1 atomically
-// Returns `true` if all `count` bits were 0 previously. `any_zero` is `true` if there was at least one zero bit.
-static inline bool mi_bitmap_claim(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx, bool* any_zero) {
- const size_t idx = mi_bitmap_index_field(bitmap_idx);
- const size_t bitidx = mi_bitmap_index_bit_in_field(bitmap_idx);
- const uintptr_t mask = mi_bitmap_mask_(count, bitidx);
- mi_assert_internal(bitmap_fields > idx); UNUSED(bitmap_fields);
- //mi_assert_internal(any_zero != NULL || (bitmap[idx] & mask) == 0);
- uintptr_t prev = mi_atomic_or_acq_rel(&bitmap[idx], mask);
- if (any_zero != NULL) *any_zero = ((prev & mask) != mask);
- return ((prev & mask) == 0);
-}
-
-// Returns `true` if all `count` bits were 1. `any_ones` is `true` if there was at least one bit set to one.
-static inline bool mi_bitmap_is_claimedx(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx, bool* any_ones) {
- const size_t idx = mi_bitmap_index_field(bitmap_idx);
- const size_t bitidx = mi_bitmap_index_bit_in_field(bitmap_idx);
- const uintptr_t mask = mi_bitmap_mask_(count, bitidx);
- mi_assert_internal(bitmap_fields > idx); UNUSED(bitmap_fields);
- uintptr_t field = mi_atomic_load_relaxed(&bitmap[idx]);
- if (any_ones != NULL) *any_ones = ((field & mask) != 0);
- return ((field & mask) == mask);
-}
-
-static inline bool mi_bitmap_is_claimed(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx) {
- return mi_bitmap_is_claimedx(bitmap, bitmap_fields, count, bitmap_idx, NULL);
-}
-
-static inline bool mi_bitmap_is_any_claimed(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx) {
- bool any_ones;
- mi_bitmap_is_claimedx(bitmap, bitmap_fields, count, bitmap_idx, &any_ones);
- return any_ones;
-}
-
-
-#endif
diff --git a/src/heap.c b/src/heap.c
index b1079e1..58520dd 100644
--- a/src/heap.c
+++ b/src/heap.c
@@ -1,13 +1,14 @@
/*----------------------------------------------------------------------------
-Copyright (c) 2018, Microsoft Research, Daan Leijen
+Copyright (c) 2018-2021, Microsoft Research, Daan Leijen
This is free software; you can redistribute it and/or modify it under the
terms of the MIT license. A copy of the license can be found in the file
"LICENSE" at the root of this distribution.
-----------------------------------------------------------------------------*/
#include "mimalloc.h"
-#include "mimalloc-internal.h"
-#include "mimalloc-atomic.h"
+#include "mimalloc/internal.h"
+#include "mimalloc/atomic.h"
+#include "mimalloc/prim.h" // mi_prim_get_default_heap
#include // memset, memcpy
@@ -30,15 +31,18 @@ static bool mi_heap_visit_pages(mi_heap_t* heap, heap_page_visitor_fun* fn, void
// visit all pages
#if MI_DEBUG>1
size_t total = heap->page_count;
- #endif
size_t count = 0;
+ #endif
+
for (size_t i = 0; i <= MI_BIN_FULL; i++) {
mi_page_queue_t* pq = &heap->pages[i];
mi_page_t* page = pq->first;
while(page != NULL) {
mi_page_t* next = page->next; // save next in case the page gets removed from the queue
mi_assert_internal(mi_page_heap(page) == heap);
+ #if MI_DEBUG>1
count++;
+ #endif
if (!fn(heap, pq, page, arg1, arg2)) return false;
page = next; // and continue
}
@@ -50,9 +54,9 @@ static bool mi_heap_visit_pages(mi_heap_t* heap, heap_page_visitor_fun* fn, void
#if MI_DEBUG>=2
static bool mi_heap_page_is_valid(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_t* page, void* arg1, void* arg2) {
- UNUSED(arg1);
- UNUSED(arg2);
- UNUSED(pq);
+ MI_UNUSED(arg1);
+ MI_UNUSED(arg2);
+ MI_UNUSED(pq);
mi_assert_internal(mi_page_heap(page) == heap);
mi_segment_t* segment = _mi_page_segment(page);
mi_assert_internal(segment->thread_id == heap->thread_id);
@@ -86,13 +90,13 @@ typedef enum mi_collect_e {
static bool mi_heap_page_collect(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_t* page, void* arg_collect, void* arg2 ) {
- UNUSED(arg2);
- UNUSED(heap);
+ MI_UNUSED(arg2);
+ MI_UNUSED(heap);
mi_assert_internal(mi_heap_page_is_valid(heap, pq, page, NULL, NULL));
mi_collect_t collect = *((mi_collect_t*)arg_collect);
_mi_page_free_collect(page, collect >= MI_FORCE);
if (mi_page_all_free(page)) {
- // no more used blocks, free the page.
+ // no more used blocks, free the page.
// note: this will free retired pages as well.
_mi_page_free(page, pq, collect >= MI_FORCE);
}
@@ -104,57 +108,65 @@ static bool mi_heap_page_collect(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_t
}
static bool mi_heap_page_never_delayed_free(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_t* page, void* arg1, void* arg2) {
- UNUSED(arg1);
- UNUSED(arg2);
- UNUSED(heap);
- UNUSED(pq);
+ MI_UNUSED(arg1);
+ MI_UNUSED(arg2);
+ MI_UNUSED(heap);
+ MI_UNUSED(pq);
_mi_page_use_delayed_free(page, MI_NEVER_DELAYED_FREE, false);
return true; // don't break
}
static void mi_heap_collect_ex(mi_heap_t* heap, mi_collect_t collect)
{
- if (!mi_heap_is_initialized(heap)) return;
- _mi_deferred_free(heap, collect >= MI_FORCE);
+ if (heap==NULL || !mi_heap_is_initialized(heap)) return;
+
+ const bool force = collect >= MI_FORCE;
+ _mi_deferred_free(heap, force);
// note: never reclaim on collect but leave it to threads that need storage to reclaim
- if (
- #ifdef NDEBUG
+ const bool force_main =
+ #ifdef NDEBUG
collect == MI_FORCE
- #else
+ #else
collect >= MI_FORCE
- #endif
- && _mi_is_main_thread() && mi_heap_is_backing(heap) && !heap->no_reclaim)
- {
+ #endif
+ && _mi_is_main_thread() && mi_heap_is_backing(heap) && !heap->no_reclaim;
+
+ if (force_main) {
// the main thread is abandoned (end-of-program), try to reclaim all abandoned segments.
// if all memory is freed by now, all segments should be freed.
_mi_abandoned_reclaim_all(heap, &heap->tld->segments);
}
-
+
// if abandoning, mark all pages to no longer add to delayed_free
if (collect == MI_ABANDON) {
mi_heap_visit_pages(heap, &mi_heap_page_never_delayed_free, NULL, NULL);
}
- // free thread delayed blocks.
+ // free all current thread delayed blocks.
// (if abandoning, after this there are no more thread-delayed references into the pages.)
- _mi_heap_delayed_free(heap);
+ _mi_heap_delayed_free_all(heap);
// collect retired pages
- _mi_heap_collect_retired(heap, collect >= MI_FORCE);
+ _mi_heap_collect_retired(heap, force);
// collect all pages owned by this thread
mi_heap_visit_pages(heap, &mi_heap_page_collect, &collect, NULL);
mi_assert_internal( collect != MI_ABANDON || mi_atomic_load_ptr_acquire(mi_block_t,&heap->thread_delayed_free) == NULL );
- // collect segment caches
- if (collect >= MI_FORCE) {
+ // collect abandoned segments (in particular, purge expired parts of segments in the abandoned segment list)
+ // note: forced purge can be quite expensive if many threads are created/destroyed so we do not force on abandonment
+ _mi_abandoned_collect(heap, collect == MI_FORCE /* force? */, &heap->tld->segments);
+
+ // collect segment local caches
+ if (force) {
_mi_segment_thread_collect(&heap->tld->segments);
}
// collect regions on program-exit (or shared library unload)
- if (collect >= MI_FORCE && _mi_is_main_thread() && mi_heap_is_backing(heap)) {
- _mi_mem_collect(&heap->tld->os);
+ if (force && _mi_is_main_thread() && mi_heap_is_backing(heap)) {
+ _mi_thread_data_collect(); // collect thread data cache
+ _mi_arena_collect(true /* force purge */, &heap->tld->stats);
}
}
@@ -167,7 +179,7 @@ void mi_heap_collect(mi_heap_t* heap, bool force) mi_attr_noexcept {
}
void mi_collect(bool force) mi_attr_noexcept {
- mi_heap_collect(mi_get_default_heap(), force);
+ mi_heap_collect(mi_prim_get_default_heap(), force);
}
@@ -177,9 +189,14 @@ void mi_collect(bool force) mi_attr_noexcept {
mi_heap_t* mi_heap_get_default(void) {
mi_thread_init();
- return mi_get_default_heap();
+ return mi_prim_get_default_heap();
}
+static bool mi_heap_is_default(const mi_heap_t* heap) {
+ return (heap == mi_prim_get_default_heap());
+}
+
+
mi_heap_t* mi_heap_get_backing(void) {
mi_heap_t* heap = mi_heap_get_default();
mi_assert_internal(heap!=NULL);
@@ -189,15 +206,16 @@ mi_heap_t* mi_heap_get_backing(void) {
return bheap;
}
-mi_heap_t* mi_heap_new(void) {
+mi_decl_nodiscard mi_heap_t* mi_heap_new_in_arena(mi_arena_id_t arena_id) {
mi_heap_t* bheap = mi_heap_get_backing();
mi_heap_t* heap = mi_heap_malloc_tp(bheap, mi_heap_t); // todo: OS allocate in secure mode?
- if (heap==NULL) return NULL;
- memcpy(heap, &_mi_heap_empty, sizeof(mi_heap_t));
+ if (heap == NULL) return NULL;
+ _mi_memcpy_aligned(heap, &_mi_heap_empty, sizeof(mi_heap_t));
heap->tld = bheap->tld;
heap->thread_id = _mi_thread_id();
+ heap->arena_id = arena_id;
_mi_random_split(&bheap->random, &heap->random);
- heap->cookie = _mi_heap_random_next(heap) | 1;
+ heap->cookie = _mi_heap_random_next(heap) | 1;
heap->keys[0] = _mi_heap_random_next(heap);
heap->keys[1] = _mi_heap_random_next(heap);
heap->no_reclaim = true; // don't reclaim abandoned pages or otherwise destroy is unsafe
@@ -207,19 +225,25 @@ mi_heap_t* mi_heap_new(void) {
return heap;
}
+mi_decl_nodiscard mi_heap_t* mi_heap_new(void) {
+ return mi_heap_new_in_arena(_mi_arena_id_none());
+}
+
+bool _mi_heap_memid_is_suitable(mi_heap_t* heap, mi_memid_t memid) {
+ return _mi_arena_memid_is_suitable(memid, heap->arena_id);
+}
+
uintptr_t _mi_heap_random_next(mi_heap_t* heap) {
return _mi_random_next(&heap->random);
}
// zero out the page queues
static void mi_heap_reset_pages(mi_heap_t* heap) {
+ mi_assert_internal(heap != NULL);
mi_assert_internal(mi_heap_is_initialized(heap));
// TODO: copy full empty heap instead?
memset(&heap->pages_free_direct, 0, sizeof(heap->pages_free_direct));
-#ifdef MI_MEDIUM_DIRECT
- memset(&heap->pages_free_medium, 0, sizeof(heap->pages_free_medium));
-#endif
- memcpy(&heap->pages, &_mi_heap_empty.pages, sizeof(heap->pages));
+ _mi_memcpy_aligned(&heap->pages, &_mi_heap_empty.pages, sizeof(heap->pages));
heap->thread_delayed_free = NULL;
heap->page_count = 0;
}
@@ -228,6 +252,7 @@ static void mi_heap_reset_pages(mi_heap_t* heap) {
static void mi_heap_free(mi_heap_t* heap) {
mi_assert(heap != NULL);
mi_assert_internal(mi_heap_is_initialized(heap));
+ if (heap==NULL || !mi_heap_is_initialized(heap)) return;
if (mi_heap_is_backing(heap)) return; // dont free the backing heap
// reset default
@@ -238,7 +263,7 @@ static void mi_heap_free(mi_heap_t* heap) {
// remove ourselves from the thread local heaps list
// linear search but we expect the number of heaps to be relatively small
mi_heap_t* prev = NULL;
- mi_heap_t* curr = heap->tld->heaps;
+ mi_heap_t* curr = heap->tld->heaps;
while (curr != heap && curr != NULL) {
prev = curr;
curr = curr->next;
@@ -260,29 +285,32 @@ static void mi_heap_free(mi_heap_t* heap) {
----------------------------------------------------------- */
static bool _mi_heap_page_destroy(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_t* page, void* arg1, void* arg2) {
- UNUSED(arg1);
- UNUSED(arg2);
- UNUSED(heap);
- UNUSED(pq);
+ MI_UNUSED(arg1);
+ MI_UNUSED(arg2);
+ MI_UNUSED(heap);
+ MI_UNUSED(pq);
// ensure no more thread_delayed_free will be added
_mi_page_use_delayed_free(page, MI_NEVER_DELAYED_FREE, false);
// stats
const size_t bsize = mi_page_block_size(page);
- if (bsize > MI_LARGE_OBJ_SIZE_MAX) {
- if (bsize > MI_HUGE_OBJ_SIZE_MAX) {
- _mi_stat_decrease(&heap->tld->stats.giant, bsize);
+ if (bsize > MI_MEDIUM_OBJ_SIZE_MAX) {
+ if (bsize <= MI_LARGE_OBJ_SIZE_MAX) {
+ mi_heap_stat_decrease(heap, large, bsize);
}
else {
- _mi_stat_decrease(&heap->tld->stats.huge, bsize);
+ mi_heap_stat_decrease(heap, huge, bsize);
}
}
-#if (MI_STAT>1)
+#if (MI_STAT)
_mi_page_free_collect(page, false); // update used count
const size_t inuse = page->used;
if (bsize <= MI_LARGE_OBJ_SIZE_MAX) {
- mi_heap_stat_decrease(heap, normal[_mi_bin(bsize)], inuse);
+ mi_heap_stat_decrease(heap, normal, bsize * inuse);
+#if (MI_STAT>1)
+ mi_heap_stat_decrease(heap, normal_bins[_mi_bin(bsize)], inuse);
+#endif
}
mi_heap_stat_decrease(heap, malloc, bsize * inuse); // todo: off for aligned blocks...
#endif
@@ -305,38 +333,64 @@ void _mi_heap_destroy_pages(mi_heap_t* heap) {
mi_heap_reset_pages(heap);
}
+#if MI_TRACK_HEAP_DESTROY
+static bool mi_cdecl mi_heap_track_block_free(const mi_heap_t* heap, const mi_heap_area_t* area, void* block, size_t block_size, void* arg) {
+ MI_UNUSED(heap); MI_UNUSED(area); MI_UNUSED(arg); MI_UNUSED(block_size);
+ mi_track_free_size(block,mi_usable_size(block));
+ return true;
+}
+#endif
+
void mi_heap_destroy(mi_heap_t* heap) {
mi_assert(heap != NULL);
mi_assert(mi_heap_is_initialized(heap));
mi_assert(heap->no_reclaim);
mi_assert_expensive(mi_heap_is_valid(heap));
- if (!mi_heap_is_initialized(heap)) return;
+ if (heap==NULL || !mi_heap_is_initialized(heap)) return;
if (!heap->no_reclaim) {
// don't free in case it may contain reclaimed pages
mi_heap_delete(heap);
}
else {
+ // track all blocks as freed
+ #if MI_TRACK_HEAP_DESTROY
+ mi_heap_visit_blocks(heap, true, mi_heap_track_block_free, NULL);
+ #endif
// free all pages
_mi_heap_destroy_pages(heap);
mi_heap_free(heap);
}
}
-
+// forcefully destroy all heaps in the current thread
+void _mi_heap_unsafe_destroy_all(void) {
+ mi_heap_t* bheap = mi_heap_get_backing();
+ mi_heap_t* curr = bheap->tld->heaps;
+ while (curr != NULL) {
+ mi_heap_t* next = curr->next;
+ if (curr->no_reclaim) {
+ mi_heap_destroy(curr);
+ }
+ else {
+ _mi_heap_destroy_pages(curr);
+ }
+ curr = next;
+ }
+}
/* -----------------------------------------------------------
Safe Heap delete
----------------------------------------------------------- */
-// Tranfer the pages from one heap to the other
+// Transfer the pages from one heap to the other
static void mi_heap_absorb(mi_heap_t* heap, mi_heap_t* from) {
mi_assert_internal(heap!=NULL);
if (from==NULL || from->page_count == 0) return;
// reduce the size of the delayed frees
- _mi_heap_delayed_free(from);
-
- // transfer all pages by appending the queues; this will set a new heap field
+ _mi_heap_delayed_free_partial(from);
+
+ // transfer all pages by appending the queues; this will set a new heap field
// so threads may do delayed frees in either heap for a while.
// note: appending waits for each page to not be in the `MI_DELAYED_FREEING` state
// so after this only the new heap will get delayed frees
@@ -349,15 +403,17 @@ static void mi_heap_absorb(mi_heap_t* heap, mi_heap_t* from) {
}
mi_assert_internal(from->page_count == 0);
- // and do outstanding delayed frees in the `from` heap
+ // and do outstanding delayed frees in the `from` heap
// note: be careful here as the `heap` field in all those pages no longer point to `from`,
- // turns out to be ok as `_mi_heap_delayed_free` only visits the list and calls a
+ // turns out to be ok as `_mi_heap_delayed_free` only visits the list and calls a
// the regular `_mi_free_delayed_block` which is safe.
- _mi_heap_delayed_free(from);
- mi_assert_internal(from->thread_delayed_free == NULL);
+ _mi_heap_delayed_free_all(from);
+ #if !defined(_MSC_VER) || (_MSC_VER > 1900) // somehow the following line gives an error in VS2015, issue #353
+ mi_assert_internal(mi_atomic_load_ptr_relaxed(mi_block_t,&from->thread_delayed_free) == NULL);
+ #endif
// and reset the `from` heap
- mi_heap_reset_pages(from);
+ mi_heap_reset_pages(from);
}
// Safe delete a heap without freeing any still allocated blocks in that heap.
@@ -366,7 +422,7 @@ void mi_heap_delete(mi_heap_t* heap)
mi_assert(heap != NULL);
mi_assert(mi_heap_is_initialized(heap));
mi_assert_expensive(mi_heap_is_valid(heap));
- if (!mi_heap_is_initialized(heap)) return;
+ if (heap==NULL || !mi_heap_is_initialized(heap)) return;
if (!mi_heap_is_backing(heap)) {
// tranfer still used pages to the backing heap
@@ -381,10 +437,11 @@ void mi_heap_delete(mi_heap_t* heap)
}
mi_heap_t* mi_heap_set_default(mi_heap_t* heap) {
+ mi_assert(heap != NULL);
mi_assert(mi_heap_is_initialized(heap));
- if (!mi_heap_is_initialized(heap)) return NULL;
+ if (heap==NULL || !mi_heap_is_initialized(heap)) return NULL;
mi_assert_expensive(mi_heap_is_valid(heap));
- mi_heap_t* old = mi_get_default_heap();
+ mi_heap_t* old = mi_prim_get_default_heap();
_mi_heap_set_default_direct(heap);
return old;
}
@@ -402,20 +459,20 @@ static mi_heap_t* mi_heap_of_block(const void* p) {
mi_segment_t* segment = _mi_ptr_segment(p);
bool valid = (_mi_ptr_cookie(segment) == segment->cookie);
mi_assert_internal(valid);
- if (mi_unlikely(!valid)) return NULL;
+ if mi_unlikely(!valid) return NULL;
return mi_page_heap(_mi_segment_page_of(segment,p));
}
bool mi_heap_contains_block(mi_heap_t* heap, const void* p) {
mi_assert(heap != NULL);
- if (!mi_heap_is_initialized(heap)) return false;
+ if (heap==NULL || !mi_heap_is_initialized(heap)) return false;
return (heap == mi_heap_of_block(p));
}
static bool mi_heap_page_check_owned(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_t* page, void* p, void* vfound) {
- UNUSED(heap);
- UNUSED(pq);
+ MI_UNUSED(heap);
+ MI_UNUSED(pq);
bool* found = (bool*)vfound;
mi_segment_t* segment = _mi_page_segment(page);
void* start = _mi_page_start(segment, page, NULL);
@@ -426,7 +483,7 @@ static bool mi_heap_page_check_owned(mi_heap_t* heap, mi_page_queue_t* pq, mi_pa
bool mi_heap_check_owned(mi_heap_t* heap, const void* p) {
mi_assert(heap != NULL);
- if (!mi_heap_is_initialized(heap)) return false;
+ if (heap==NULL || !mi_heap_is_initialized(heap)) return false;
if (((uintptr_t)p & (MI_INTPTR_SIZE - 1)) != 0) return false; // only aligned pointers
bool found = false;
mi_heap_visit_pages(heap, &mi_heap_page_check_owned, (void*)p, &found);
@@ -434,7 +491,7 @@ bool mi_heap_check_owned(mi_heap_t* heap, const void* p) {
}
bool mi_check_owned(const void* p) {
- return mi_heap_check_owned(mi_get_default_heap(), p);
+ return mi_heap_check_owned(mi_prim_get_default_heap(), p);
}
/* -----------------------------------------------------------
@@ -462,13 +519,14 @@ static bool mi_heap_area_visit_blocks(const mi_heap_area_ex_t* xarea, mi_block_v
if (page->used == 0) return true;
const size_t bsize = mi_page_block_size(page);
+ const size_t ubsize = mi_page_usable_block_size(page); // without padding
size_t psize;
uint8_t* pstart = _mi_page_start(_mi_page_segment(page), page, &psize);
if (page->capacity == 1) {
// optimize page with one block
mi_assert_internal(page->used == 1 && page->free == NULL);
- return visitor(mi_page_heap(page), area, pstart, bsize, arg);
+ return visitor(mi_page_heap(page), area, pstart, ubsize, arg);
}
// create a bitmap of free blocks.
@@ -476,9 +534,13 @@ static bool mi_heap_area_visit_blocks(const mi_heap_area_ex_t* xarea, mi_block_v
uintptr_t free_map[MI_MAX_BLOCKS / sizeof(uintptr_t)];
memset(free_map, 0, sizeof(free_map));
+ #if MI_DEBUG>1
size_t free_count = 0;
+ #endif
for (mi_block_t* block = page->free; block != NULL; block = mi_block_next(page,block)) {
+ #if MI_DEBUG>1
free_count++;
+ #endif
mi_assert_internal((uint8_t*)block >= pstart && (uint8_t*)block < (pstart + psize));
size_t offset = (uint8_t*)block - pstart;
mi_assert_internal(offset % bsize == 0);
@@ -491,7 +553,9 @@ static bool mi_heap_area_visit_blocks(const mi_heap_area_ex_t* xarea, mi_block_v
mi_assert_internal(page->capacity == (free_count + page->used));
// walk through all blocks skipping the free ones
+ #if MI_DEBUG>1
size_t used_count = 0;
+ #endif
for (size_t i = 0; i < page->capacity; i++) {
size_t bitidx = (i / sizeof(uintptr_t));
size_t bit = i - (bitidx * sizeof(uintptr_t));
@@ -500,9 +564,11 @@ static bool mi_heap_area_visit_blocks(const mi_heap_area_ex_t* xarea, mi_block_v
i += (sizeof(uintptr_t) - 1); // skip a run of free blocks
}
else if ((m & ((uintptr_t)1 << bit)) == 0) {
+ #if MI_DEBUG>1
used_count++;
+ #endif
uint8_t* block = pstart + (i * bsize);
- if (!visitor(mi_page_heap(page), area, block, bsize, arg)) return false;
+ if (!visitor(mi_page_heap(page), area, block, ubsize, arg)) return false;
}
}
mi_assert_internal(page->used == used_count);
@@ -513,17 +579,19 @@ typedef bool (mi_heap_area_visit_fun)(const mi_heap_t* heap, const mi_heap_area_
static bool mi_heap_visit_areas_page(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_t* page, void* vfun, void* arg) {
- UNUSED(heap);
- UNUSED(pq);
+ MI_UNUSED(heap);
+ MI_UNUSED(pq);
mi_heap_area_visit_fun* fun = (mi_heap_area_visit_fun*)vfun;
mi_heap_area_ex_t xarea;
const size_t bsize = mi_page_block_size(page);
+ const size_t ubsize = mi_page_usable_block_size(page);
xarea.page = page;
xarea.area.reserved = page->reserved * bsize;
xarea.area.committed = page->capacity * bsize;
xarea.area.blocks = _mi_page_start(_mi_page_segment(page), page, NULL);
- xarea.area.used = page->used;
- xarea.area.block_size = bsize;
+ xarea.area.used = page->used; // number of blocks in use (#553)
+ xarea.area.block_size = ubsize;
+ xarea.area.full_block_size = bsize;
return fun(heap, &xarea, arg);
}
diff --git a/src/init.c b/src/init.c
index de24f40..b1db14c 100644
--- a/src/init.c
+++ b/src/init.c
@@ -1,37 +1,43 @@
/* ----------------------------------------------------------------------------
-Copyright (c) 2018, Microsoft Research, Daan Leijen
+Copyright (c) 2018-2022, Microsoft Research, Daan Leijen
This is free software; you can redistribute it and/or modify it under the
terms of the MIT license. A copy of the license can be found in the file
"LICENSE" at the root of this distribution.
-----------------------------------------------------------------------------*/
#include "mimalloc.h"
-#include "mimalloc-internal.h"
+#include "mimalloc/internal.h"
+#include "mimalloc/prim.h"
#include // memcpy, memset
#include // atexit
+
// Empty page used to initialize the small free pages array
const mi_page_t _mi_page_empty = {
- 0, false, false, false, false,
+ 0, false, false, false,
0, // capacity
0, // reserved capacity
{ 0 }, // flags
false, // is_zero
0, // retire_expire
NULL, // free
- #if MI_ENCODE_FREELIST
- { 0, 0 },
- #endif
0, // used
0, // xblock_size
NULL, // local_free
- ATOMIC_VAR_INIT(0), // xthread_free
- ATOMIC_VAR_INIT(0), // xheap
+ #if (MI_PADDING || MI_ENCODE_FREELIST)
+ { 0, 0 },
+ #endif
+ MI_ATOMIC_VAR_INIT(0), // xthread_free
+ MI_ATOMIC_VAR_INIT(0), // xheap
NULL, NULL
+ #if MI_INTPTR_SIZE==8
+ , { 0 } // padding
+ #endif
};
#define MI_PAGE_EMPTY() ((mi_page_t*)&_mi_page_empty)
+#if (MI_SMALL_WSIZE_MAX==128)
#if (MI_PADDING>0) && (MI_INTPTR_SIZE >= 8)
#define MI_SMALL_PAGES_EMPTY { MI_INIT128(MI_PAGE_EMPTY), MI_PAGE_EMPTY(), MI_PAGE_EMPTY() }
#elif (MI_PADDING>0)
@@ -39,7 +45,9 @@ const mi_page_t _mi_page_empty = {
#else
#define MI_SMALL_PAGES_EMPTY { MI_INIT128(MI_PAGE_EMPTY), MI_PAGE_EMPTY() }
#endif
-
+#else
+#error "define right initialization sizes corresponding to MI_SMALL_WSIZE_MAX"
+#endif
// Empty page queues for every bin
#define QNULL(sz) { NULL, NULL, (sz)*sizeof(uintptr_t) }
@@ -54,8 +62,8 @@ const mi_page_t _mi_page_empty = {
QNULL( 10240), QNULL( 12288), QNULL( 14336), QNULL( 16384), QNULL( 20480), QNULL( 24576), QNULL( 28672), QNULL( 32768), /* 56 */ \
QNULL( 40960), QNULL( 49152), QNULL( 57344), QNULL( 65536), QNULL( 81920), QNULL( 98304), QNULL(114688), QNULL(131072), /* 64 */ \
QNULL(163840), QNULL(196608), QNULL(229376), QNULL(262144), QNULL(327680), QNULL(393216), QNULL(458752), QNULL(524288), /* 72 */ \
- QNULL(MI_LARGE_OBJ_WSIZE_MAX + 1 /* 655360, Huge queue */), \
- QNULL(MI_LARGE_OBJ_WSIZE_MAX + 2) /* Full queue */ }
+ QNULL(MI_MEDIUM_OBJ_WSIZE_MAX + 1 /* 655360, Huge queue */), \
+ QNULL(MI_MEDIUM_OBJ_WSIZE_MAX + 2) /* Full queue */ }
#define MI_STAT_COUNT_NULL() {0,0,0,0}
@@ -73,11 +81,24 @@ const mi_page_t _mi_page_empty = {
MI_STAT_COUNT_NULL(), MI_STAT_COUNT_NULL(), \
MI_STAT_COUNT_NULL(), MI_STAT_COUNT_NULL(), \
MI_STAT_COUNT_NULL(), MI_STAT_COUNT_NULL(), \
+ MI_STAT_COUNT_NULL(), MI_STAT_COUNT_NULL(), \
MI_STAT_COUNT_NULL(), \
- { 0, 0 }, { 0, 0 }, { 0, 0 }, \
- { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 } \
+ { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, \
+ { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 } \
MI_STAT_COUNT_END_NULL()
+
+// Empty slice span queues for every bin
+#define SQNULL(sz) { NULL, NULL, sz }
+#define MI_SEGMENT_SPAN_QUEUES_EMPTY \
+ { SQNULL(1), \
+ SQNULL( 1), SQNULL( 2), SQNULL( 3), SQNULL( 4), SQNULL( 5), SQNULL( 6), SQNULL( 7), SQNULL( 10), /* 8 */ \
+ SQNULL( 12), SQNULL( 14), SQNULL( 16), SQNULL( 20), SQNULL( 24), SQNULL( 28), SQNULL( 32), SQNULL( 40), /* 16 */ \
+ SQNULL( 48), SQNULL( 56), SQNULL( 64), SQNULL( 80), SQNULL( 96), SQNULL( 112), SQNULL( 128), SQNULL( 160), /* 24 */ \
+ SQNULL( 192), SQNULL( 224), SQNULL( 256), SQNULL( 320), SQNULL( 384), SQNULL( 448), SQNULL( 512), SQNULL( 640), /* 32 */ \
+ SQNULL( 768), SQNULL( 896), SQNULL( 1024) /* 35 */ }
+
+
// --------------------------------------------------------
// Statically allocate an empty heap as the initial
// thread local value for the default heap,
@@ -87,21 +108,38 @@ const mi_page_t _mi_page_empty = {
// may lead to allocation itself on some platforms)
// --------------------------------------------------------
-const mi_heap_t _mi_heap_empty = {
+mi_decl_cache_align const mi_heap_t _mi_heap_empty = {
NULL,
MI_SMALL_PAGES_EMPTY,
MI_PAGE_QUEUES_EMPTY,
- ATOMIC_VAR_INIT(NULL),
+ MI_ATOMIC_VAR_INIT(NULL),
0, // tid
0, // cookie
+ 0, // arena id
{ 0, 0 }, // keys
- { {0}, {0}, 0 },
+ { {0}, {0}, 0, true }, // random
0, // page count
MI_BIN_FULL, 0, // page retired min/max
NULL, // next
false
};
+#define tld_empty_stats ((mi_stats_t*)((uint8_t*)&tld_empty + offsetof(mi_tld_t,stats)))
+#define tld_empty_os ((mi_os_tld_t*)((uint8_t*)&tld_empty + offsetof(mi_tld_t,os)))
+
+mi_decl_cache_align static const mi_tld_t tld_empty = {
+ 0,
+ false,
+ NULL, NULL,
+ { MI_SEGMENT_SPAN_QUEUES_EMPTY, 0, 0, 0, 0, tld_empty_stats, tld_empty_os }, // segments
+ { 0, tld_empty_stats }, // os
+ { MI_STATS_NULL } // stats
+};
+
+mi_threadid_t _mi_thread_id(void) mi_attr_noexcept {
+ return _mi_prim_thread_id();
+}
+
// the thread-local default heap for allocation
mi_decl_thread mi_heap_t* _mi_heap_default = (mi_heap_t*)&_mi_heap_empty;
@@ -109,11 +147,8 @@ extern mi_heap_t _mi_heap_main;
static mi_tld_t tld_main = {
0, false,
- &_mi_heap_main, &_mi_heap_main,
- { { NULL, NULL }, {NULL ,NULL}, {NULL ,NULL, 0},
- 0, 0, 0, 0, 0, 0, NULL,
- &tld_main.stats, &tld_main.os
- }, // segments
+ &_mi_heap_main, & _mi_heap_main,
+ { MI_SEGMENT_SPAN_QUEUES_EMPTY, 0, 0, 0, 0, &tld_main.stats, &tld_main.os }, // segments
{ 0, &tld_main.stats }, // os
{ MI_STATS_NULL } // stats
};
@@ -122,11 +157,12 @@ mi_heap_t _mi_heap_main = {
&tld_main,
MI_SMALL_PAGES_EMPTY,
MI_PAGE_QUEUES_EMPTY,
- ATOMIC_VAR_INIT(NULL),
+ MI_ATOMIC_VAR_INIT(NULL),
0, // thread id
0, // initial cookie
+ 0, // arena id
{ 0, 0 }, // the key of the main heap can be fixed (unlike page keys that need to be secure!)
- { {0x846ca68b}, {0}, 0 }, // random
+ { {0x846ca68b}, {0}, 0, true }, // random
0, // page count
MI_BIN_FULL, 0, // page retired min/max
NULL, // next heap
@@ -141,19 +177,23 @@ mi_stats_t _mi_stats_main = { MI_STATS_NULL };
static void mi_heap_main_init(void) {
if (_mi_heap_main.cookie == 0) {
_mi_heap_main.thread_id = _mi_thread_id();
- _mi_heap_main.cookie = _os_random_weak((uintptr_t)&mi_heap_main_init);
- _mi_random_init(&_mi_heap_main.random);
+ _mi_heap_main.cookie = 1;
+ #if defined(_WIN32) && !defined(MI_SHARED_LIB)
+ _mi_random_init_weak(&_mi_heap_main.random); // prevent allocation failure during bcrypt dll initialization with static linking
+ #else
+ _mi_random_init(&_mi_heap_main.random);
+ #endif
+ _mi_heap_main.cookie = _mi_heap_random_next(&_mi_heap_main);
_mi_heap_main.keys[0] = _mi_heap_random_next(&_mi_heap_main);
_mi_heap_main.keys[1] = _mi_heap_random_next(&_mi_heap_main);
}
}
-#if defined(MI_TLS_RECURSE_GUARD_HOST) || !defined(MI_TLS_RECURSE_GUARD)
mi_heap_t* _mi_heap_main_get(void) {
mi_heap_main_init();
return &_mi_heap_main;
}
-#endif
+
/* -----------------------------------------------------------
Initialization and freeing of the thread local heaps
@@ -163,34 +203,104 @@ mi_heap_t* _mi_heap_main_get(void) {
typedef struct mi_thread_data_s {
mi_heap_t heap; // must come first due to cast in `_mi_heap_done`
mi_tld_t tld;
+ mi_memid_t memid;
} mi_thread_data_t;
+
+// Thread meta-data is allocated directly from the OS. For
+// some programs that do not use thread pools and allocate and
+// destroy many OS threads, this may causes too much overhead
+// per thread so we maintain a small cache of recently freed metadata.
+
+#define TD_CACHE_SIZE (16)
+static _Atomic(mi_thread_data_t*) td_cache[TD_CACHE_SIZE];
+
+static mi_thread_data_t* mi_thread_data_zalloc(void) {
+ // try to find thread metadata in the cache
+ bool is_zero = false;
+ mi_thread_data_t* td = NULL;
+ for (int i = 0; i < TD_CACHE_SIZE; i++) {
+ td = mi_atomic_load_ptr_relaxed(mi_thread_data_t, &td_cache[i]);
+ if (td != NULL) {
+ // found cached allocation, try use it
+ td = mi_atomic_exchange_ptr_acq_rel(mi_thread_data_t, &td_cache[i], NULL);
+ if (td != NULL) {
+ break;
+ }
+ }
+ }
+
+ // if that fails, allocate as meta data
+ if (td == NULL) {
+ mi_memid_t memid;
+ td = (mi_thread_data_t*)_mi_os_alloc(sizeof(mi_thread_data_t), &memid, &_mi_stats_main);
+ if (td == NULL) {
+ // if this fails, try once more. (issue #257)
+ td = (mi_thread_data_t*)_mi_os_alloc(sizeof(mi_thread_data_t), &memid, &_mi_stats_main);
+ if (td == NULL) {
+ // really out of memory
+ _mi_error_message(ENOMEM, "unable to allocate thread local heap metadata (%zu bytes)\n", sizeof(mi_thread_data_t));
+ }
+ }
+ if (td != NULL) {
+ td->memid = memid;
+ is_zero = memid.initially_zero;
+ }
+ }
+
+ if (td != NULL && !is_zero) {
+ _mi_memzero_aligned(td, sizeof(*td));
+ }
+ return td;
+}
+
+static void mi_thread_data_free( mi_thread_data_t* tdfree ) {
+ // try to add the thread metadata to the cache
+ for (int i = 0; i < TD_CACHE_SIZE; i++) {
+ mi_thread_data_t* td = mi_atomic_load_ptr_relaxed(mi_thread_data_t, &td_cache[i]);
+ if (td == NULL) {
+ mi_thread_data_t* expected = NULL;
+ if (mi_atomic_cas_ptr_weak_acq_rel(mi_thread_data_t, &td_cache[i], &expected, tdfree)) {
+ return;
+ }
+ }
+ }
+ // if that fails, just free it directly
+ _mi_os_free(tdfree, sizeof(mi_thread_data_t), tdfree->memid, &_mi_stats_main);
+}
+
+void _mi_thread_data_collect(void) {
+ // free all thread metadata from the cache
+ for (int i = 0; i < TD_CACHE_SIZE; i++) {
+ mi_thread_data_t* td = mi_atomic_load_ptr_relaxed(mi_thread_data_t, &td_cache[i]);
+ if (td != NULL) {
+ td = mi_atomic_exchange_ptr_acq_rel(mi_thread_data_t, &td_cache[i], NULL);
+ if (td != NULL) {
+ _mi_os_free(td, sizeof(mi_thread_data_t), td->memid, &_mi_stats_main);
+ }
+ }
+ }
+}
+
// Initialize the thread local default heap, called from `mi_thread_init`
static bool _mi_heap_init(void) {
- if (mi_heap_is_initialized(mi_get_default_heap())) return true;
+ if (mi_heap_is_initialized(mi_prim_get_default_heap())) return true;
if (_mi_is_main_thread()) {
// mi_assert_internal(_mi_heap_main.thread_id != 0); // can happen on freeBSD where alloc is called before any initialization
// the main heap is statically allocated
mi_heap_main_init();
_mi_heap_set_default_direct(&_mi_heap_main);
- //mi_assert_internal(_mi_heap_default->tld->heap_backing == mi_get_default_heap());
+ //mi_assert_internal(_mi_heap_default->tld->heap_backing == mi_prim_get_default_heap());
}
else {
// use `_mi_os_alloc` to allocate directly from the OS
- mi_thread_data_t* td = (mi_thread_data_t*)_mi_os_alloc(sizeof(mi_thread_data_t), &_mi_stats_main); // Todo: more efficient allocation?
- if (td == NULL) {
- // if this fails, try once more. (issue #257)
- td = (mi_thread_data_t*)_mi_os_alloc(sizeof(mi_thread_data_t), &_mi_stats_main);
- if (td == NULL) {
- // really out of memory
- _mi_error_message(ENOMEM, "unable to allocate thread local heap metadata (%zu bytes)\n", sizeof(mi_thread_data_t));
- return false;
- }
- }
- // OS allocated so already zero initialized
+ mi_thread_data_t* td = mi_thread_data_zalloc();
+ if (td == NULL) return false;
+
mi_tld_t* tld = &td->tld;
mi_heap_t* heap = &td->heap;
- memcpy(heap, &_mi_heap_empty, sizeof(*heap));
+ _mi_memcpy_aligned(tld, &tld_empty, sizeof(*tld));
+ _mi_memcpy_aligned(heap, &_mi_heap_empty, sizeof(*heap));
heap->thread_id = _mi_thread_id();
_mi_random_init(&heap->random);
heap->cookie = _mi_heap_random_next(heap) | 1;
@@ -202,7 +312,7 @@ static bool _mi_heap_init(void) {
tld->segments.stats = &tld->stats;
tld->segments.os = &tld->os;
tld->os.stats = &tld->stats;
- _mi_heap_set_default_direct(heap);
+ _mi_heap_set_default_direct(heap);
}
return false;
}
@@ -235,23 +345,26 @@ static bool _mi_heap_done(mi_heap_t* heap) {
if (heap != &_mi_heap_main) {
_mi_heap_collect_abandon(heap);
}
-
+
// merge stats
- _mi_stats_done(&heap->tld->stats);
+ _mi_stats_done(&heap->tld->stats);
// free if not the main thread
if (heap != &_mi_heap_main) {
- mi_assert_internal(heap->tld->segments.count == 0 || heap->thread_id != _mi_thread_id());
- _mi_os_free(heap, sizeof(mi_thread_data_t), &_mi_stats_main);
+ // the following assertion does not always hold for huge segments as those are always treated
+ // as abondened: one may allocate it in one thread, but deallocate in another in which case
+ // the count can be too large or negative. todo: perhaps not count huge segments? see issue #363
+ // mi_assert_internal(heap->tld->segments.count == 0 || heap->thread_id != _mi_thread_id());
+ mi_thread_data_free((mi_thread_data_t*)heap);
}
-#if 0
- // never free the main thread even in debug mode; if a dll is linked statically with mimalloc,
- // there may still be delete/free calls after the mi_fls_done is called. Issue #207
else {
+ #if 0
+ // never free the main thread even in debug mode; if a dll is linked statically with mimalloc,
+ // there may still be delete/free calls after the mi_fls_done is called. Issue #207
_mi_heap_destroy_pages(heap);
mi_assert_internal(heap->tld->heap_backing == &_mi_heap_main);
+ #endif
}
-#endif
return false;
}
@@ -273,57 +386,12 @@ static bool _mi_heap_done(mi_heap_t* heap) {
// to set up the thread local keys.
// --------------------------------------------------------
-static void _mi_thread_done(mi_heap_t* default_heap);
-
-#ifdef __wasi__
-// no pthreads in the WebAssembly Standard Interface
-#elif !defined(_WIN32)
-#define MI_USE_PTHREADS
-#endif
-
-#if defined(_WIN32) && defined(MI_SHARED_LIB)
- // nothing to do as it is done in DllMain
-#elif defined(_WIN32) && !defined(MI_SHARED_LIB)
- // use thread local storage keys to detect thread ending
- #include
- #include
- #if (_WIN32_WINNT < 0x600) // before Windows Vista
- WINBASEAPI DWORD WINAPI TlsAlloc( _In_opt_ PFLS_CALLBACK_FUNCTION lpCallback );
- WINBASEAPI PVOID WINAPI TlsGetValue( _In_ DWORD dwFlsIndex );
- WINBASEAPI BOOL WINAPI TlsSetValue( _In_ DWORD dwFlsIndex, _In_opt_ PVOID lpFlsData );
- WINBASEAPI BOOL WINAPI TlsFree(_In_ DWORD dwFlsIndex);
- #endif
- static DWORD mi_fls_key = (DWORD)(-1);
- static void NTAPI mi_fls_done(PVOID value) {
- if (value!=NULL) _mi_thread_done((mi_heap_t*)value);
- }
-#elif defined(MI_USE_PTHREADS)
- // use pthread local storage keys to detect thread ending
- // (and used with MI_TLS_PTHREADS for the default heap)
- #include
- pthread_key_t _mi_heap_default_key = (pthread_key_t)(-1);
- static void mi_pthread_done(void* value) {
- if (value!=NULL) _mi_thread_done((mi_heap_t*)value);
- }
-#elif defined(__wasi__)
-// no pthreads in the WebAssembly Standard Interface
-#else
- #pragma message("define a way to call mi_thread_done when a thread is done")
-#endif
-
// Set up handlers so `mi_thread_done` is called automatically
static void mi_process_setup_auto_thread_done(void) {
static bool tls_initialized = false; // fine if it races
if (tls_initialized) return;
tls_initialized = true;
- #if defined(_WIN32) && defined(MI_SHARED_LIB)
- // nothing to do as it is done in DllMain
- #elif defined(_WIN32) && !defined(MI_SHARED_LIB)
- mi_fls_key = TlsAlloc(&mi_fls_done);
- #elif defined(MI_USE_PTHREADS)
- mi_assert_internal(_mi_heap_default_key == (pthread_key_t)(-1));
- pthread_key_create(&_mi_heap_default_key, &mi_pthread_done);
- #endif
+ _mi_prim_thread_init_auto_done();
_mi_heap_set_default_direct(&_mi_heap_main);
}
@@ -332,31 +400,52 @@ bool _mi_is_main_thread(void) {
return (_mi_heap_main.thread_id==0 || _mi_heap_main.thread_id == _mi_thread_id());
}
+static _Atomic(size_t) thread_count = MI_ATOMIC_VAR_INIT(1);
+
+size_t _mi_current_thread_count(void) {
+ return mi_atomic_load_relaxed(&thread_count);
+}
+
// This is called from the `mi_malloc_generic`
void mi_thread_init(void) mi_attr_noexcept
{
// ensure our process has started already
mi_process_init();
-
+
// initialize the thread local default heap
// (this will call `_mi_heap_set_default_direct` and thus set the
// fiber/pthread key to a non-zero value, ensuring `_mi_thread_done` is called)
if (_mi_heap_init()) return; // returns true if already initialized
_mi_stat_increase(&_mi_stats_main.threads, 1);
+ mi_atomic_increment_relaxed(&thread_count);
//_mi_verbose_message("thread init: 0x%zx\n", _mi_thread_id());
}
void mi_thread_done(void) mi_attr_noexcept {
- _mi_thread_done(mi_get_default_heap());
+ _mi_thread_done(NULL);
}
-static void _mi_thread_done(mi_heap_t* heap) {
- _mi_stat_decrease(&_mi_stats_main.threads, 1);
+void _mi_thread_done(mi_heap_t* heap)
+{
+ // calling with NULL implies using the default heap
+ if (heap == NULL) {
+ heap = mi_prim_get_default_heap();
+ if (heap == NULL) return;
+ }
+ // prevent re-entrancy through heap_done/heap_set_default_direct (issue #699)
+ if (!mi_heap_is_initialized(heap)) {
+ return;
+ }
+
+ // adjust stats
+ mi_atomic_decrement_relaxed(&thread_count);
+ _mi_stat_decrease(&_mi_stats_main.threads, 1);
+
// check thread-id as on Windows shutdown with FLS the main (exit) thread may call this on thread-local heaps...
if (heap->thread_id != _mi_thread_id()) return;
-
+
// abandon the thread local heap
if (_mi_heap_done(heap)) return; // returns true if already ran
}
@@ -364,7 +453,7 @@ static void _mi_thread_done(mi_heap_t* heap) {
void _mi_heap_set_default_direct(mi_heap_t* heap) {
mi_assert_internal(heap != NULL);
#if defined(MI_TLS_SLOT)
- mi_tls_slot_set(MI_TLS_SLOT,heap);
+ mi_prim_tls_slot_set(MI_TLS_SLOT,heap);
#elif defined(MI_TLS_PTHREAD_SLOT_OFS)
*mi_tls_pthread_heap_slot() = heap;
#elif defined(MI_TLS_PTHREAD)
@@ -375,38 +464,29 @@ void _mi_heap_set_default_direct(mi_heap_t* heap) {
// ensure the default heap is passed to `_mi_thread_done`
// setting to a non-NULL value also ensures `mi_thread_done` is called.
- #if defined(_WIN32) && defined(MI_SHARED_LIB)
- // nothing to do as it is done in DllMain
- #elif defined(_WIN32) && !defined(MI_SHARED_LIB)
- mi_assert_internal(mi_fls_key != 0);
- TlsSetValue(mi_fls_key, heap);
- #elif defined(MI_USE_PTHREADS)
- if (_mi_heap_default_key != (pthread_key_t)(-1)) { // can happen during recursive invocation on freeBSD
- pthread_setspecific(_mi_heap_default_key, heap);
- }
- #endif
+ _mi_prim_thread_associate_default_heap(heap);
}
// --------------------------------------------------------
// Run functions on process init/done, and thread init/done
// --------------------------------------------------------
-static void mi_process_done(void);
+static void mi_cdecl mi_process_done(void);
static bool os_preloading = true; // true until this module is initialized
static bool mi_redirected = false; // true if malloc redirects to mi_malloc
// Returns true if this module has not been initialized; Don't use C runtime routines until it returns false.
-bool _mi_preloading(void) {
+bool mi_decl_noinline _mi_preloading(void) {
return os_preloading;
}
-bool mi_is_redirected(void) mi_attr_noexcept {
+mi_decl_nodiscard bool mi_is_redirected(void) mi_attr_noexcept {
return mi_redirected;
}
// Communicate with the redirection module on Windows
-#if defined(_WIN32) && defined(MI_SHARED_LIB)
+#if defined(_WIN32) && defined(MI_SHARED_LIB) && !defined(MI_WIN_NOREDIRECT)
#ifdef __cplusplus
extern "C" {
#endif
@@ -422,8 +502,8 @@ mi_decl_export void _mi_redirect_entry(DWORD reason) {
mi_thread_done();
}
}
-__declspec(dllimport) bool mi_allocator_init(const char** message);
-__declspec(dllimport) void mi_allocator_done(void);
+__declspec(dllimport) bool mi_cdecl mi_allocator_init(const char** message);
+__declspec(dllimport) void mi_cdecl mi_allocator_done(void);
#ifdef __cplusplus
}
#endif
@@ -440,15 +520,18 @@ static void mi_allocator_done(void) {
// Called once by the process loader
static void mi_process_load(void) {
mi_heap_main_init();
- #if defined(MI_TLS_RECURSE_GUARD)
+ #if defined(__APPLE__) || defined(MI_TLS_RECURSE_GUARD)
volatile mi_heap_t* dummy = _mi_heap_default; // access TLS to allocate it before setting tls_initialized to true;
- UNUSED(dummy);
+ if (dummy == NULL) return; // use dummy or otherwise the access may get optimized away (issue #697)
#endif
os_preloading = false;
+ mi_assert_internal(_mi_is_main_thread());
+ #if !(defined(_WIN32) && defined(MI_SHARED_LIB)) // use Dll process detach (see below) instead of atexit (issue #521)
atexit(&mi_process_done);
+ #endif
_mi_options_init();
+ mi_process_setup_auto_thread_done();
mi_process_init();
- //mi_stats_reset();-
if (mi_redirected) _mi_verbose_message("malloc is redirected.\n");
// show message from the redirector (if present)
@@ -457,33 +540,81 @@ static void mi_process_load(void) {
if (msg != NULL && (mi_option_is_enabled(mi_option_verbose) || mi_option_is_enabled(mi_option_show_errors))) {
_mi_fputs(NULL,NULL,NULL,msg);
}
+
+ // reseed random
+ _mi_random_reinit_if_weak(&_mi_heap_main.random);
}
+#if defined(_WIN32) && (defined(_M_IX86) || defined(_M_X64))
+#include
+mi_decl_cache_align bool _mi_cpu_has_fsrm = false;
+
+static void mi_detect_cpu_features(void) {
+ // FSRM for fast rep movsb support (AMD Zen3+ (~2020) or Intel Ice Lake+ (~2017))
+ int32_t cpu_info[4];
+ __cpuid(cpu_info, 7);
+ _mi_cpu_has_fsrm = ((cpu_info[3] & (1 << 4)) != 0); // bit 4 of EDX : see
+}
+#else
+static void mi_detect_cpu_features(void) {
+ // nothing
+}
+#endif
+
// Initialize the process; called by thread_init or the process loader
void mi_process_init(void) mi_attr_noexcept {
// ensure we are called once
- if (_mi_process_is_initialized) return;
+ static mi_atomic_once_t process_init;
+ #if _MSC_VER < 1920
+ mi_heap_main_init(); // vs2017 can dynamically re-initialize _mi_heap_main
+ #endif
+ if (!mi_atomic_once(&process_init)) return;
_mi_process_is_initialized = true;
+ _mi_verbose_message("process init: 0x%zx\n", _mi_thread_id());
mi_process_setup_auto_thread_done();
- _mi_verbose_message("process init: 0x%zx\n", _mi_thread_id());
+ mi_detect_cpu_features();
_mi_os_init();
mi_heap_main_init();
- #if (MI_DEBUG)
+ #if MI_DEBUG
_mi_verbose_message("debug level : %d\n", MI_DEBUG);
#endif
_mi_verbose_message("secure level: %d\n", MI_SECURE);
+ _mi_verbose_message("mem tracking: %s\n", MI_TRACK_TOOL);
+ #if MI_TSAN
+ _mi_verbose_message("thread santizer enabled\n");
+ #endif
mi_thread_init();
+
+ #if defined(_WIN32)
+ // On windows, when building as a static lib the FLS cleanup happens to early for the main thread.
+ // To avoid this, set the FLS value for the main thread to NULL so the fls cleanup
+ // will not call _mi_thread_done on the (still executing) main thread. See issue #508.
+ _mi_prim_thread_associate_default_heap(NULL);
+ #endif
+
mi_stats_reset(); // only call stat reset *after* thread init (or the heap tld == NULL)
+ mi_track_init();
if (mi_option_is_enabled(mi_option_reserve_huge_os_pages)) {
- size_t pages = mi_option_get(mi_option_reserve_huge_os_pages);
- mi_reserve_huge_os_pages_interleave(pages, 0, pages*500);
+ size_t pages = mi_option_get_clamp(mi_option_reserve_huge_os_pages, 0, 128*1024);
+ long reserve_at = mi_option_get(mi_option_reserve_huge_os_pages_at);
+ if (reserve_at != -1) {
+ mi_reserve_huge_os_pages_at(pages, reserve_at, pages*500);
+ } else {
+ mi_reserve_huge_os_pages_interleave(pages, 0, pages*500);
+ }
+ }
+ if (mi_option_is_enabled(mi_option_reserve_os_memory)) {
+ long ksize = mi_option_get(mi_option_reserve_os_memory);
+ if (ksize > 0) {
+ mi_reserve_os_memory((size_t)ksize*MI_KiB, true /* commit? */, true /* allow large pages? */);
+ }
}
}
// Called when the process is done (through `at_exit`)
-static void mi_process_done(void) {
+static void mi_cdecl mi_process_done(void) {
// only shutdown if we were initialized
if (!_mi_process_is_initialized) return;
// ensure we are called once
@@ -491,22 +622,31 @@ static void mi_process_done(void) {
if (process_done) return;
process_done = true;
- #if defined(_WIN32) && !defined(MI_SHARED_LIB)
- TlsSetValue(mi_fls_key, NULL); // don't call main-thread callback
- TlsFree(mi_fls_key); // call thread-done on all threads to prevent dangling callback pointer if statically linked with a DLL; Issue #208
- #endif
+ // release any thread specific resources and ensure _mi_thread_done is called on all but the main thread
+ _mi_prim_thread_done_auto_done();
- #if (MI_DEBUG != 0) || !defined(MI_SHARED_LIB)
- // free all memory if possible on process exit. This is not needed for a stand-alone process
- // but should be done if mimalloc is statically linked into another shared library which
- // is repeatedly loaded/unloaded, see issue #281.
- mi_collect(true /* force */ );
+ #ifndef MI_SKIP_COLLECT_ON_EXIT
+ #if (MI_DEBUG || !defined(MI_SHARED_LIB))
+ // free all memory if possible on process exit. This is not needed for a stand-alone process
+ // but should be done if mimalloc is statically linked into another shared library which
+ // is repeatedly loaded/unloaded, see issue #281.
+ mi_collect(true /* force */ );
+ #endif
#endif
+ // Forcefully release all retained memory; this can be dangerous in general if overriding regular malloc/free
+ // since after process_done there might still be other code running that calls `free` (like at_exit routines,
+ // or C-runtime termination code.
+ if (mi_option_is_enabled(mi_option_destroy_on_exit)) {
+ mi_collect(true /* force */);
+ _mi_heap_unsafe_destroy_all(); // forcefully release all memory held by all heaps (of this thread only!)
+ _mi_arena_unsafe_destroy_all(& _mi_heap_main_get()->tld->stats);
+ }
+
if (mi_option_is_enabled(mi_option_show_stats) || mi_option_is_enabled(mi_option_verbose)) {
mi_stats_print(NULL);
}
- mi_allocator_done();
+ mi_allocator_done();
_mi_verbose_message("process done: 0x%zx\n", _mi_heap_main.thread_id);
os_preloading = true; // don't call the C runtime anymore
}
@@ -516,17 +656,40 @@ static void mi_process_done(void) {
#if defined(_WIN32) && defined(MI_SHARED_LIB)
// Windows DLL: easy to hook into process_init and thread_done
__declspec(dllexport) BOOL WINAPI DllMain(HINSTANCE inst, DWORD reason, LPVOID reserved) {
- UNUSED(reserved);
- UNUSED(inst);
+ MI_UNUSED(reserved);
+ MI_UNUSED(inst);
if (reason==DLL_PROCESS_ATTACH) {
mi_process_load();
}
+ else if (reason==DLL_PROCESS_DETACH) {
+ mi_process_done();
+ }
else if (reason==DLL_THREAD_DETACH) {
- if (!mi_is_redirected()) mi_thread_done();
+ if (!mi_is_redirected()) {
+ mi_thread_done();
+ }
}
return TRUE;
}
+#elif defined(_MSC_VER)
+ // MSVC: use data section magic for static libraries
+ // See
+ static int _mi_process_init(void) {
+ mi_process_load();
+ return 0;
+ }
+ typedef int(*_mi_crt_callback_t)(void);
+ #if defined(_M_X64) || defined(_M_ARM64)
+ __pragma(comment(linker, "/include:" "_mi_msvc_initu"))
+ #pragma section(".CRT$XIU", long, read)
+ #else
+ __pragma(comment(linker, "/include:" "__mi_msvc_initu"))
+ #endif
+ #pragma data_seg(".CRT$XIU")
+ mi_decl_externc _mi_crt_callback_t _mi_msvc_initu[] = { &_mi_process_init };
+ #pragma data_seg()
+
#elif defined(__cplusplus)
// C++: use static initialization to detect process start
static bool _mi_process_init(void) {
@@ -541,24 +704,6 @@ static void mi_process_done(void) {
mi_process_load();
}
-#elif defined(_MSC_VER)
- // MSVC: use data section magic for static libraries
- // See
- static int _mi_process_init(void) {
- mi_process_load();
- return 0;
- }
- typedef int(*_crt_cb)(void);
- #ifdef _M_X64
- __pragma(comment(linker, "/include:" "_mi_msvc_initu"))
- #pragma section(".CRT$XIU", long, read)
- #else
- __pragma(comment(linker, "/include:" "__mi_msvc_initu"))
- #endif
- #pragma data_seg(".CRT$XIU")
- _crt_cb _mi_msvc_initu[] = { &_mi_process_init };
- #pragma data_seg()
-
#else
#pragma message("define a way to call mi_process_load on your platform")
#endif
diff --git a/src/options.c b/src/options.c
index 9da3a9b..345b560 100644
--- a/src/options.c
+++ b/src/options.c
@@ -1,35 +1,28 @@
/* ----------------------------------------------------------------------------
-Copyright (c) 2018, Microsoft Research, Daan Leijen
+Copyright (c) 2018-2021, Microsoft Research, Daan Leijen
This is free software; you can redistribute it and/or modify it under the
terms of the MIT license. A copy of the license can be found in the file
"LICENSE" at the root of this distribution.
-----------------------------------------------------------------------------*/
#include "mimalloc.h"
-#include "mimalloc-internal.h"
-#include "mimalloc-atomic.h"
+#include "mimalloc/internal.h"
+#include "mimalloc/atomic.h"
+#include "mimalloc/prim.h" // mi_prim_out_stderr
-#include
-#include // strtol
-#include // strncpy, strncat, strlen, strstr
-#include // toupper
+#include // FILE
+#include // abort
#include
-#ifdef _MSC_VER
-#pragma warning(disable:4996) // strncpy, strncat
-#endif
+static long mi_max_error_count = 16; // stop outputting errors after this (use < 0 for no limit)
+static long mi_max_warning_count = 16; // stop outputting warnings after this (use < 0 for no limit)
-static uintptr_t mi_max_error_count = 16; // stop outputting errors after this
-
-static void mi_add_stderr_output();
+static void mi_add_stderr_output(void);
int mi_version(void) mi_attr_noexcept {
return MI_MALLOC_VERSION;
}
-#ifdef _WIN32
-#include
-#endif
// --------------------------------------------------------
// Options
@@ -48,46 +41,55 @@ typedef struct mi_option_desc_s {
mi_init_t init; // is it initialized yet? (from the environment)
mi_option_t option; // for debugging: the option index should match the option
const char* name; // option name without `mimalloc_` prefix
+ const char* legacy_name; // potential legacy option name
} mi_option_desc_t;
-#define MI_OPTION(opt) mi_option_##opt, #opt
-#define MI_OPTION_DESC(opt) {0, UNINIT, MI_OPTION(opt) }
+#define MI_OPTION(opt) mi_option_##opt, #opt, NULL
+#define MI_OPTION_LEGACY(opt,legacy) mi_option_##opt, #opt, #legacy
static mi_option_desc_t options[_mi_option_last] =
{
// stable options
-#if MI_DEBUG || defined(MI_SHOW_ERRORS)
+ #if MI_DEBUG || defined(MI_SHOW_ERRORS)
{ 1, UNINIT, MI_OPTION(show_errors) },
-#else
+ #else
{ 0, UNINIT, MI_OPTION(show_errors) },
-#endif
+ #endif
{ 0, UNINIT, MI_OPTION(show_stats) },
{ 0, UNINIT, MI_OPTION(verbose) },
// the following options are experimental and not all combinations make sense.
- { 1, UNINIT, MI_OPTION(eager_commit) }, // commit per segment directly (4MiB) (but see also `eager_commit_delay`)
- #if defined(_WIN32) || (MI_INTPTR_SIZE <= 4) // and other OS's without overcommit?
- { 0, UNINIT, MI_OPTION(eager_region_commit) },
- { 1, UNINIT, MI_OPTION(reset_decommits) }, // reset decommits memory
- #else
- { 1, UNINIT, MI_OPTION(eager_region_commit) },
- { 0, UNINIT, MI_OPTION(reset_decommits) }, // reset uses MADV_FREE/MADV_DONTNEED
- #endif
- { 0, UNINIT, MI_OPTION(large_os_pages) }, // use large OS pages, use only with eager commit to prevent fragmentation of VMA's
- { 0, UNINIT, MI_OPTION(reserve_huge_os_pages) },
- { 0, UNINIT, MI_OPTION(segment_cache) }, // cache N segments per thread
- { 1, UNINIT, MI_OPTION(page_reset) }, // reset page memory on free
- { 0, UNINIT, MI_OPTION(abandoned_page_reset) },// reset free page memory when a thread terminates
- { 0, UNINIT, MI_OPTION(segment_reset) }, // reset segment memory on free (needs eager commit)
+ { 1, UNINIT, MI_OPTION(eager_commit) }, // commit per segment directly (4MiB) (but see also `eager_commit_delay`)
+ { 2, UNINIT, MI_OPTION_LEGACY(arena_eager_commit,eager_region_commit) }, // eager commit arena's? 2 is used to enable this only on an OS that has overcommit (i.e. linux)
+ { 1, UNINIT, MI_OPTION_LEGACY(purge_decommits,reset_decommits) }, // purge decommits memory (instead of reset) (note: on linux this uses MADV_DONTNEED for decommit)
+ { 0, UNINIT, MI_OPTION_LEGACY(allow_large_os_pages,large_os_pages) }, // use large OS pages, use only with eager commit to prevent fragmentation of VMA's
+ { 0, UNINIT, MI_OPTION(reserve_huge_os_pages) }, // per 1GiB huge pages
+ {-1, UNINIT, MI_OPTION(reserve_huge_os_pages_at) }, // reserve huge pages at node N
+ { 0, UNINIT, MI_OPTION(reserve_os_memory) },
+ { 0, UNINIT, MI_OPTION(deprecated_segment_cache) }, // cache N segments per thread
+ { 0, UNINIT, MI_OPTION(deprecated_page_reset) }, // reset page memory on free
+ { 0, UNINIT, MI_OPTION_LEGACY(abandoned_page_purge,abandoned_page_reset) }, // reset free page memory when a thread terminates
+ { 0, UNINIT, MI_OPTION(deprecated_segment_reset) }, // reset segment memory on free (needs eager commit)
#if defined(__NetBSD__)
- { 0, UNINIT, MI_OPTION(eager_commit_delay) }, // the first N segments per thread are not eagerly committed
+ { 0, UNINIT, MI_OPTION(eager_commit_delay) }, // the first N segments per thread are not eagerly committed
#else
- { 1, UNINIT, MI_OPTION(eager_commit_delay) }, // the first N segments per thread are not eagerly committed (but per page in the segment on demand)
+ { 1, UNINIT, MI_OPTION(eager_commit_delay) }, // the first N segments per thread are not eagerly committed (but per page in the segment on demand)
#endif
- { 100, UNINIT, MI_OPTION(reset_delay) }, // reset delay in milli-seconds
- { 0, UNINIT, MI_OPTION(use_numa_nodes) }, // 0 = use available numa nodes, otherwise use at most N nodes.
- { 100, UNINIT, MI_OPTION(os_tag) }, // only apple specific for now but might serve more or less related purpose
- { 16, UNINIT, MI_OPTION(max_errors) } // maximum errors that are output
+ { 10, UNINIT, MI_OPTION_LEGACY(purge_delay,reset_delay) }, // purge delay in milli-seconds
+ { 0, UNINIT, MI_OPTION(use_numa_nodes) }, // 0 = use available numa nodes, otherwise use at most N nodes.
+ { 0, UNINIT, MI_OPTION(limit_os_alloc) }, // 1 = do not use OS memory for allocation (but only reserved arenas)
+ { 100, UNINIT, MI_OPTION(os_tag) }, // only apple specific for now but might serve more or less related purpose
+ { 16, UNINIT, MI_OPTION(max_errors) }, // maximum errors that are output
+ { 16, UNINIT, MI_OPTION(max_warnings) }, // maximum warnings that are output
+ { 8, UNINIT, MI_OPTION(max_segment_reclaim)}, // max. number of segment reclaims from the abandoned segments per try.
+ { 0, UNINIT, MI_OPTION(destroy_on_exit)}, // release all OS memory on process exit; careful with dangling pointer or after-exit frees!
+ #if (MI_INTPTR_SIZE>4)
+ { 1024L * 1024L, UNINIT, MI_OPTION(arena_reserve) }, // reserve memory N KiB at a time
+ #else
+ { 128L * 1024L, UNINIT, MI_OPTION(arena_reserve) },
+ #endif
+ { 10, UNINIT, MI_OPTION(arena_purge_mult) }, // purge delay multiplier for arena's
+ { 1, UNINIT, MI_OPTION_LEGACY(purge_extend_delay, decommit_extend_delay) },
};
static void mi_option_init(mi_option_desc_t* desc);
@@ -98,27 +100,42 @@ void _mi_options_init(void) {
mi_add_stderr_output(); // now it safe to use stderr for output
for(int i = 0; i < _mi_option_last; i++ ) {
mi_option_t option = (mi_option_t)i;
- long l = mi_option_get(option); UNUSED(l); // initialize
- if (option != mi_option_verbose) {
+ long l = mi_option_get(option); MI_UNUSED(l); // initialize
+ // if (option != mi_option_verbose)
+ {
mi_option_desc_t* desc = &options[option];
_mi_verbose_message("option '%s': %ld\n", desc->name, desc->value);
}
}
mi_max_error_count = mi_option_get(mi_option_max_errors);
+ mi_max_warning_count = mi_option_get(mi_option_max_warnings);
}
-long mi_option_get(mi_option_t option) {
+mi_decl_nodiscard long mi_option_get(mi_option_t option) {
mi_assert(option >= 0 && option < _mi_option_last);
+ if (option < 0 || option >= _mi_option_last) return 0;
mi_option_desc_t* desc = &options[option];
mi_assert(desc->option == option); // index should match the option
- if (mi_unlikely(desc->init == UNINIT)) {
+ if mi_unlikely(desc->init == UNINIT) {
mi_option_init(desc);
}
return desc->value;
}
+mi_decl_nodiscard long mi_option_get_clamp(mi_option_t option, long min, long max) {
+ long x = mi_option_get(option);
+ return (x < min ? min : (x > max ? max : x));
+}
+
+mi_decl_nodiscard size_t mi_option_get_size(mi_option_t option) {
+ mi_assert_internal(option == mi_option_reserve_os_memory || option == mi_option_arena_reserve);
+ long x = mi_option_get(option);
+ return (x < 0 ? 0 : (size_t)x * MI_KiB);
+}
+
void mi_option_set(mi_option_t option, long value) {
mi_assert(option >= 0 && option < _mi_option_last);
+ if (option < 0 || option >= _mi_option_last) return;
mi_option_desc_t* desc = &options[option];
mi_assert(desc->option == option); // index should match the option
desc->value = value;
@@ -127,13 +144,14 @@ void mi_option_set(mi_option_t option, long value) {
void mi_option_set_default(mi_option_t option, long value) {
mi_assert(option >= 0 && option < _mi_option_last);
+ if (option < 0 || option >= _mi_option_last) return;
mi_option_desc_t* desc = &options[option];
if (desc->init != INITIALIZED) {
desc->value = value;
}
}
-bool mi_option_is_enabled(mi_option_t option) {
+mi_decl_nodiscard bool mi_option_is_enabled(mi_option_t option) {
return (mi_option_get(option) != 0);
}
@@ -153,16 +171,11 @@ void mi_option_disable(mi_option_t option) {
mi_option_set_enabled(option,false);
}
-
-static void mi_out_stderr(const char* msg, void* arg) {
- UNUSED(arg);
- #ifdef _WIN32
- // on windows with redirection, the C runtime cannot handle locale dependent output
- // after the main thread closes so we use direct console output.
- if (!_mi_preloading()) { _cputs(msg); }
- #else
- fputs(msg, stderr);
- #endif
+static void mi_cdecl mi_out_stderr(const char* msg, void* arg) {
+ MI_UNUSED(arg);
+ if (msg != NULL && msg[0] != 0) {
+ _mi_prim_out_stderr(msg);
+ }
}
// Since an output function can be registered earliest in the `main`
@@ -170,25 +183,25 @@ static void mi_out_stderr(const char* msg, void* arg) {
// an output function is registered it is called immediately with
// the output up to that point.
#ifndef MI_MAX_DELAY_OUTPUT
-#define MI_MAX_DELAY_OUTPUT ((uintptr_t)(32*1024))
+#define MI_MAX_DELAY_OUTPUT ((size_t)(32*1024))
#endif
static char out_buf[MI_MAX_DELAY_OUTPUT+1];
-static _Atomic(uintptr_t) out_len;
+static _Atomic(size_t) out_len;
-static void mi_out_buf(const char* msg, void* arg) {
- UNUSED(arg);
+static void mi_cdecl mi_out_buf(const char* msg, void* arg) {
+ MI_UNUSED(arg);
if (msg==NULL) return;
if (mi_atomic_load_relaxed(&out_len)>=MI_MAX_DELAY_OUTPUT) return;
- size_t n = strlen(msg);
+ size_t n = _mi_strlen(msg);
if (n==0) return;
// claim space
- uintptr_t start = mi_atomic_add_acq_rel(&out_len, n);
+ size_t start = mi_atomic_add_acq_rel(&out_len, n);
if (start >= MI_MAX_DELAY_OUTPUT) return;
// check bound
if (start+n >= MI_MAX_DELAY_OUTPUT) {
n = MI_MAX_DELAY_OUTPUT-start-1;
}
- memcpy(&out_buf[start], msg, n);
+ _mi_memcpy(&out_buf[start], msg, n);
}
static void mi_out_buf_flush(mi_output_fun* out, bool no_more_buf, void* arg) {
@@ -207,7 +220,7 @@ static void mi_out_buf_flush(mi_output_fun* out, bool no_more_buf, void* arg) {
// Once this module is loaded, switch to this routine
// which outputs to stderr and the delayed output buffer.
-static void mi_out_buf_stderr(const char* msg, void* arg) {
+static void mi_cdecl mi_out_buf_stderr(const char* msg, void* arg) {
mi_out_stderr(msg,arg);
mi_out_buf(msg,arg);
}
@@ -236,7 +249,7 @@ void mi_register_output(mi_output_fun* out, void* arg) mi_attr_noexcept {
}
// add stderr to the delayed output after the module is loaded
-static void mi_add_stderr_output() {
+static void mi_add_stderr_output(void) {
mi_assert_internal(mi_out_default == NULL);
mi_out_buf_flush(&mi_out_stderr, false, NULL); // flush current contents to stderr
mi_out_default = &mi_out_buf_stderr; // and add stderr to the delayed output
@@ -245,30 +258,46 @@ static void mi_add_stderr_output() {
// --------------------------------------------------------
// Messages, all end up calling `_mi_fputs`.
// --------------------------------------------------------
-static _Atomic(uintptr_t) error_count; // = 0; // when MAX_ERROR_COUNT stop emitting errors and warnings
+static _Atomic(size_t) error_count; // = 0; // when >= max_error_count stop emitting errors
+static _Atomic(size_t) warning_count; // = 0; // when >= max_warning_count stop emitting warnings
// When overriding malloc, we may recurse into mi_vfprintf if an allocation
// inside the C runtime causes another message.
+// In some cases (like on macOS) the loader already allocates which
+// calls into mimalloc; if we then access thread locals (like `recurse`)
+// this may crash as the access may call _tlv_bootstrap that tries to
+// (recursively) invoke malloc again to allocate space for the thread local
+// variables on demand. This is why we use a _mi_preloading test on such
+// platforms. However, C code generator may move the initial thread local address
+// load before the `if` and we therefore split it out in a separate funcion.
static mi_decl_thread bool recurse = false;
-static bool mi_recurse_enter(void) {
- #ifdef MI_TLS_RECURSE_GUARD
- if (_mi_preloading()) return true;
- #endif
+static mi_decl_noinline bool mi_recurse_enter_prim(void) {
if (recurse) return false;
recurse = true;
return true;
}
-static void mi_recurse_exit(void) {
- #ifdef MI_TLS_RECURSE_GUARD
- if (_mi_preloading()) return;
- #endif
+static mi_decl_noinline void mi_recurse_exit_prim(void) {
recurse = false;
}
+static bool mi_recurse_enter(void) {
+ #if defined(__APPLE__) || defined(MI_TLS_RECURSE_GUARD)
+ if (_mi_preloading()) return false;
+ #endif
+ return mi_recurse_enter_prim();
+}
+
+static void mi_recurse_exit(void) {
+ #if defined(__APPLE__) || defined(MI_TLS_RECURSE_GUARD)
+ if (_mi_preloading()) return;
+ #endif
+ mi_recurse_exit_prim();
+}
+
void _mi_fputs(mi_output_fun* out, void* arg, const char* prefix, const char* message) {
- if (out==NULL || (FILE*)out==stdout || (FILE*)out==stderr) { // TODO: use mi_out_stderr for stderr?
+ if (out==NULL || (void*)out==(void*)stdout || (void*)out==(void*)stderr) { // TODO: use mi_out_stderr for stderr?
if (!mi_recurse_enter()) return;
out = mi_out_get_default(&arg);
if (prefix != NULL) out(prefix, arg);
@@ -299,11 +328,22 @@ void _mi_fprintf( mi_output_fun* out, void* arg, const char* fmt, ... ) {
va_end(args);
}
+static void mi_vfprintf_thread(mi_output_fun* out, void* arg, const char* prefix, const char* fmt, va_list args) {
+ if (prefix != NULL && _mi_strnlen(prefix,33) <= 32 && !_mi_is_main_thread()) {
+ char tprefix[64];
+ snprintf(tprefix, sizeof(tprefix), "%sthread 0x%llx: ", prefix, (unsigned long long)_mi_thread_id());
+ mi_vfprintf(out, arg, tprefix, fmt, args);
+ }
+ else {
+ mi_vfprintf(out, arg, prefix, fmt, args);
+ }
+}
+
void _mi_trace_message(const char* fmt, ...) {
if (mi_option_get(mi_option_verbose) <= 1) return; // only with verbose level 2 or higher
va_list args;
va_start(args, fmt);
- mi_vfprintf(NULL, NULL, "mimalloc: ", fmt, args);
+ mi_vfprintf_thread(NULL, NULL, "mimalloc: ", fmt, args);
va_end(args);
}
@@ -316,17 +356,21 @@ void _mi_verbose_message(const char* fmt, ...) {
}
static void mi_show_error_message(const char* fmt, va_list args) {
- if (!mi_option_is_enabled(mi_option_show_errors) && !mi_option_is_enabled(mi_option_verbose)) return;
- if (mi_atomic_increment_acq_rel(&error_count) > mi_max_error_count) return;
- mi_vfprintf(NULL, NULL, "mimalloc: error: ", fmt, args);
+ if (!mi_option_is_enabled(mi_option_verbose)) {
+ if (!mi_option_is_enabled(mi_option_show_errors)) return;
+ if (mi_max_error_count >= 0 && (long)mi_atomic_increment_acq_rel(&error_count) > mi_max_error_count) return;
+ }
+ mi_vfprintf_thread(NULL, NULL, "mimalloc: error: ", fmt, args);
}
void _mi_warning_message(const char* fmt, ...) {
- if (!mi_option_is_enabled(mi_option_show_errors) && !mi_option_is_enabled(mi_option_verbose)) return;
- if (mi_atomic_increment_acq_rel(&error_count) > mi_max_error_count) return;
+ if (!mi_option_is_enabled(mi_option_verbose)) {
+ if (!mi_option_is_enabled(mi_option_show_errors)) return;
+ if (mi_max_warning_count >= 0 && (long)mi_atomic_increment_acq_rel(&warning_count) > mi_max_warning_count) return;
+ }
va_list args;
va_start(args,fmt);
- mi_vfprintf(NULL, NULL, "mimalloc: warning: ", fmt, args);
+ mi_vfprintf_thread(NULL, NULL, "mimalloc: warning: ", fmt, args);
va_end(args);
}
@@ -346,8 +390,8 @@ static mi_error_fun* volatile mi_error_handler; // = NULL
static _Atomic(void*) mi_error_arg; // = NULL
static void mi_error_default(int err) {
- UNUSED(err);
-#if (MI_DEBUG>0)
+ MI_UNUSED(err);
+#if (MI_DEBUG>0)
if (err==EFAULT) {
#ifdef _MSC_VER
__debugbreak();
@@ -390,109 +434,97 @@ void _mi_error_message(int err, const char* fmt, ...) {
// --------------------------------------------------------
// Initialize options by checking the environment
// --------------------------------------------------------
-
-static void mi_strlcpy(char* dest, const char* src, size_t dest_size) {
- dest[0] = 0;
- strncpy(dest, src, dest_size - 1);
- dest[dest_size - 1] = 0;
+char _mi_toupper(char c) {
+ if (c >= 'a' && c <= 'z') return (c - 'a' + 'A');
+ else return c;
}
-static void mi_strlcat(char* dest, const char* src, size_t dest_size) {
- strncat(dest, src, dest_size - 1);
- dest[dest_size - 1] = 0;
-}
-
-static inline int mi_strnicmp(const char* s, const char* t, size_t n) {
- if (n==0) return 0;
+int _mi_strnicmp(const char* s, const char* t, size_t n) {
+ if (n == 0) return 0;
for (; *s != 0 && *t != 0 && n > 0; s++, t++, n--) {
- if (toupper(*s) != toupper(*t)) break;
+ if (_mi_toupper(*s) != _mi_toupper(*t)) break;
}
- return (n==0 ? 0 : *s - *t);
+ return (n == 0 ? 0 : *s - *t);
}
-#if defined _WIN32
-// On Windows use GetEnvironmentVariable instead of getenv to work
-// reliably even when this is invoked before the C runtime is initialized.
-// i.e. when `_mi_preloading() == true`.
-// Note: on windows, environment names are not case sensitive.
-#include
-static bool mi_getenv(const char* name, char* result, size_t result_size) {
- result[0] = 0;
- size_t len = GetEnvironmentVariableA(name, result, (DWORD)result_size);
- return (len > 0 && len < result_size);
-}
-#elif !defined(MI_USE_ENVIRON) || (MI_USE_ENVIRON!=0)
-// On Posix systemsr use `environ` to acces environment variables
-// even before the C runtime is initialized.
-#if defined(__APPLE__) && defined(__has_include) && __has_include()
-#include
-static char** mi_get_environ(void) {
- return (*_NSGetEnviron());
-}
-#else
-extern char** environ;
-static char** mi_get_environ(void) {
- return environ;
-}
-#endif
-static bool mi_getenv(const char* name, char* result, size_t result_size) {
- if (name==NULL) return false;
- const size_t len = strlen(name);
- if (len == 0) return false;
- char** env = mi_get_environ();
- if (env == NULL) return false;
- // compare up to 256 entries
- for (int i = 0; i < 256 && env[i] != NULL; i++) {
- const char* s = env[i];
- if (mi_strnicmp(name, s, len) == 0 && s[len] == '=') { // case insensitive
- // found it
- mi_strlcpy(result, s + len + 1, result_size);
- return true;
- }
+void _mi_strlcpy(char* dest, const char* src, size_t dest_size) {
+ if (dest==NULL || src==NULL || dest_size == 0) return;
+ // copy until end of src, or when dest is (almost) full
+ while (*src != 0 && dest_size > 1) {
+ *dest++ = *src++;
+ dest_size--;
}
+ // always zero terminate
+ *dest = 0;
+}
+
+void _mi_strlcat(char* dest, const char* src, size_t dest_size) {
+ if (dest==NULL || src==NULL || dest_size == 0) return;
+ // find end of string in the dest buffer
+ while (*dest != 0 && dest_size > 1) {
+ dest++;
+ dest_size--;
+ }
+ // and catenate
+ _mi_strlcpy(dest, src, dest_size);
+}
+
+size_t _mi_strlen(const char* s) {
+ if (s==NULL) return 0;
+ size_t len = 0;
+ while(s[len] != 0) { len++; }
+ return len;
+}
+
+size_t _mi_strnlen(const char* s, size_t max_len) {
+ if (s==NULL) return 0;
+ size_t len = 0;
+ while(s[len] != 0 && len < max_len) { len++; }
+ return len;
+}
+
+#ifdef MI_NO_GETENV
+static bool mi_getenv(const char* name, char* result, size_t result_size) {
+ MI_UNUSED(name);
+ MI_UNUSED(result);
+ MI_UNUSED(result_size);
return false;
}
-#else
-// fallback: use standard C `getenv` but this cannot be used while initializing the C runtime
+#else
static bool mi_getenv(const char* name, char* result, size_t result_size) {
- // cannot call getenv() when still initializing the C runtime.
- if (_mi_preloading()) return false;
- const char* s = getenv(name);
- if (s == NULL) {
- // we check the upper case name too.
- char buf[64+1];
- size_t len = strlen(name);
- if (len >= sizeof(buf)) len = sizeof(buf) - 1;
- for (size_t i = 0; i < len; i++) {
- buf[i] = toupper(name[i]);
- }
- buf[len] = 0;
- s = getenv(buf);
- }
- if (s != NULL && strlen(s) < result_size) {
- mi_strlcpy(result, s, result_size);
- return true;
- }
- else {
- return false;
- }
+ if (name==NULL || result == NULL || result_size < 64) return false;
+ return _mi_prim_getenv(name,result,result_size);
}
#endif
-static void mi_option_init(mi_option_desc_t* desc) {
+// TODO: implement ourselves to reduce dependencies on the C runtime
+#include // strtol
+#include // strstr
+
+
+static void mi_option_init(mi_option_desc_t* desc) {
// Read option value from the environment
+ char s[64 + 1];
char buf[64+1];
- mi_strlcpy(buf, "mimalloc_", sizeof(buf));
- mi_strlcat(buf, desc->name, sizeof(buf));
- char s[64+1];
- if (mi_getenv(buf, s, sizeof(s))) {
- size_t len = strlen(s);
- if (len >= sizeof(buf)) len = sizeof(buf) - 1;
+ _mi_strlcpy(buf, "mimalloc_", sizeof(buf));
+ _mi_strlcat(buf, desc->name, sizeof(buf));
+ bool found = mi_getenv(buf, s, sizeof(s));
+ if (!found && desc->legacy_name != NULL) {
+ _mi_strlcpy(buf, "mimalloc_", sizeof(buf));
+ _mi_strlcat(buf, desc->legacy_name, sizeof(buf));
+ found = mi_getenv(buf, s, sizeof(s));
+ if (found) {
+ _mi_warning_message("environment option \"mimalloc_%s\" is deprecated -- use \"mimalloc_%s\" instead.\n", desc->legacy_name, desc->name);
+ }
+ }
+
+ if (found) {
+ size_t len = _mi_strnlen(s, sizeof(buf) - 1);
for (size_t i = 0; i < len; i++) {
- buf[i] = (char)toupper(s[i]);
+ buf[i] = _mi_toupper(s[i]);
}
buf[len] = 0;
- if (buf[0]==0 || strstr("1;TRUE;YES;ON", buf) != NULL) {
+ if (buf[0] == 0 || strstr("1;TRUE;YES;ON", buf) != NULL) {
desc->value = 1;
desc->init = INITIALIZED;
}
@@ -503,13 +535,32 @@ static void mi_option_init(mi_option_desc_t* desc) {
else {
char* end = buf;
long value = strtol(buf, &end, 10);
+ if (desc->option == mi_option_reserve_os_memory || desc->option == mi_option_arena_reserve) {
+ // this option is interpreted in KiB to prevent overflow of `long`
+ if (*end == 'K') { end++; }
+ else if (*end == 'M') { value *= MI_KiB; end++; }
+ else if (*end == 'G') { value *= MI_MiB; end++; }
+ else { value = (value + MI_KiB - 1) / MI_KiB; }
+ if (end[0] == 'I' && end[1] == 'B') { end += 2; }
+ else if (*end == 'B') { end++; }
+ }
if (*end == 0) {
desc->value = value;
desc->init = INITIALIZED;
}
else {
- _mi_warning_message("environment option mimalloc_%s has an invalid value: %s\n", desc->name, buf);
+ // set `init` first to avoid recursion through _mi_warning_message on mimalloc_verbose.
desc->init = DEFAULTED;
+ if (desc->option == mi_option_verbose && desc->value == 0) {
+ // if the 'mimalloc_verbose' env var has a bogus value we'd never know
+ // (since the value defaults to 'off') so in that case briefly enable verbose
+ desc->value = 1;
+ _mi_warning_message("environment option mimalloc_%s has an invalid value.\n", desc->name);
+ desc->value = 0;
+ }
+ else {
+ _mi_warning_message("environment option mimalloc_%s has an invalid value.\n", desc->name);
+ }
}
}
mi_assert_internal(desc->init != UNINIT);
diff --git a/src/os.c b/src/os.c
index 3bac953..b4f02ba 100644
--- a/src/os.c
+++ b/src/os.c
@@ -1,584 +1,218 @@
/* ----------------------------------------------------------------------------
-Copyright (c) 2018, Microsoft Research, Daan Leijen
+Copyright (c) 2018-2023, Microsoft Research, Daan Leijen
This is free software; you can redistribute it and/or modify it under the
terms of the MIT license. A copy of the license can be found in the file
"LICENSE" at the root of this distribution.
-----------------------------------------------------------------------------*/
-#include "AuroraEnvironment.h"
-
-#ifndef _DEFAULT_SOURCE
-#define _DEFAULT_SOURCE // ensure mmap flags are defined
-#endif
-
-#if defined(__sun)
-// illumos provides new mman.h api when any of these are defined
-// otherwise the old api based on caddr_t which predates the void pointers one.
-// stock solaris provides only the former, chose to atomically to discard those
-// flags only here rather than project wide tough.
-#undef _XOPEN_SOURCE
-#undef _POSIX_C_SOURCE
-#endif
#include "mimalloc.h"
-#include "mimalloc-internal.h"
-#include "mimalloc-atomic.h"
+#include "mimalloc/internal.h"
+#include "mimalloc/atomic.h"
+#include "mimalloc/prim.h"
-#if defined(_AURORA_MEM_STRICT)
-#include // std allocation
-#endif
-
-#include // strerror
-
-#ifdef _MSC_VER
-#pragma warning(disable:4996) // strerror
-#endif
-
-
-#if defined(_WIN32)
-#include
-#elif defined(__wasi__)
-// stdlib.h is all we need, and has already been included in mimalloc.h
-#else
-#include // mmap
-#include // sysconf
-#if defined(__linux__)
-#include
-#if defined(__GLIBC__)
-#include // linux mmap flags
-#else
-#include
-#endif
-#endif
-#if defined(__APPLE__)
-#include
-#if !TARGET_IOS_IPHONE && !TARGET_IOS_SIMULATOR
-#include
-#endif
-#endif
-#if defined(__HAIKU__)
-#define madvise posix_madvise
-#define MADV_DONTNEED POSIX_MADV_DONTNEED
-#endif
-#endif
/* -----------------------------------------------------------
Initialization.
On windows initializes support for aligned allocation and
large OS pages (if MIMALLOC_LARGE_OS_PAGES is true).
----------------------------------------------------------- */
-bool _mi_os_decommit(void* addr, size_t size, mi_stats_t* stats);
+
+static mi_os_mem_config_t mi_os_mem_config = {
+ 4096, // page size
+ 0, // large page size (usually 2MiB)
+ 4096, // allocation granularity
+ true, // has overcommit? (if true we use MAP_NORESERVE on mmap systems)
+ false, // must free whole? (on mmap systems we can free anywhere in a mapped range, but on Windows we must free the entire span)
+ true // has virtual reserve? (if true we can reserve virtual address space without using commit or physical memory)
+};
+
+bool _mi_os_has_overcommit(void) {
+ return mi_os_mem_config.has_overcommit;
+}
+
+bool _mi_os_has_virtual_reserve(void) {
+ return mi_os_mem_config.has_virtual_reserve;
+}
+
+
+// OS (small) page size
+size_t _mi_os_page_size(void) {
+ return mi_os_mem_config.page_size;
+}
+
+// if large OS pages are supported (2 or 4MiB), then return the size, otherwise return the small page size (4KiB)
+size_t _mi_os_large_page_size(void) {
+ return (mi_os_mem_config.large_page_size != 0 ? mi_os_mem_config.large_page_size : _mi_os_page_size());
+}
+
+bool _mi_os_use_large_page(size_t size, size_t alignment) {
+ // if we have access, check the size and alignment requirements
+ if (mi_os_mem_config.large_page_size == 0 || !mi_option_is_enabled(mi_option_allow_large_os_pages)) return false;
+ return ((size % mi_os_mem_config.large_page_size) == 0 && (alignment % mi_os_mem_config.large_page_size) == 0);
+}
+
+// round to a good OS allocation size (bounded by max 12.5% waste)
+size_t _mi_os_good_alloc_size(size_t size) {
+ size_t align_size;
+ if (size < 512*MI_KiB) align_size = _mi_os_page_size();
+ else if (size < 2*MI_MiB) align_size = 64*MI_KiB;
+ else if (size < 8*MI_MiB) align_size = 256*MI_KiB;
+ else if (size < 32*MI_MiB) align_size = 1*MI_MiB;
+ else align_size = 4*MI_MiB;
+ if mi_unlikely(size >= (SIZE_MAX - align_size)) return size; // possible overflow?
+ return _mi_align_up(size, align_size);
+}
+
+void _mi_os_init(void) {
+ _mi_prim_mem_init(&mi_os_mem_config);
+}
+
+
+/* -----------------------------------------------------------
+ Util
+-------------------------------------------------------------- */
+bool _mi_os_decommit(void* addr, size_t size, mi_stats_t* stats);
+bool _mi_os_commit(void* addr, size_t size, bool* is_zero, mi_stats_t* tld_stats);
static void* mi_align_up_ptr(void* p, size_t alignment) {
return (void*)_mi_align_up((uintptr_t)p, alignment);
}
-static uintptr_t _mi_align_down(uintptr_t sz, size_t alignment) {
- return (sz / alignment) * alignment;
-}
-
static void* mi_align_down_ptr(void* p, size_t alignment) {
return (void*)_mi_align_down((uintptr_t)p, alignment);
}
-// page size (initialized properly in `os_init`)
-static size_t os_page_size = 4096;
-
-// minimal allocation granularity
-static size_t os_alloc_granularity = 4096;
-
-// if non-zero, use large page allocation
-static size_t large_os_page_size = 0;
-
-// OS (small) page size
-size_t _mi_os_page_size() {
- return os_page_size;
-}
-
-// if large OS pages are supported (2 or 4MiB), then return the size, otherwise return the small page size (4KiB)
-size_t _mi_os_large_page_size() {
- return (large_os_page_size != 0 ? large_os_page_size : _mi_os_page_size());
-}
-
-static bool use_large_os_page(size_t size, size_t alignment) {
- // if we have access, check the size and alignment requirements
- if (large_os_page_size == 0 || !mi_option_is_enabled(mi_option_large_os_pages)) return false;
- return ((size % large_os_page_size) == 0 && (alignment % large_os_page_size) == 0);
-}
-
-// round to a good OS allocation size (bounded by max 12.5% waste)
-size_t _mi_os_good_alloc_size(size_t size) {
- size_t align_size;
- if (size < 512*KiB) align_size = _mi_os_page_size();
- else if (size < 2*MiB) align_size = 64*KiB;
- else if (size < 8*MiB) align_size = 256*KiB;
- else if (size < 32*MiB) align_size = 1*MiB;
- else align_size = 4*MiB;
- if (size >= (SIZE_MAX - align_size)) return size; // possible overflow?
- return _mi_align_up(size, align_size);
-}
-
-#if defined(_WIN32)
-// We use VirtualAlloc2 for aligned allocation, but it is only supported on Windows 10 and Windows Server 2016.
-// So, we need to look it up dynamically to run on older systems. (use __stdcall for 32-bit compatibility)
-// NtAllocateVirtualAllocEx is used for huge OS page allocation (1GiB)
-//
-// We hide MEM_EXTENDED_PARAMETER to compile with older SDK's.
-#include
-typedef PVOID (__stdcall *PVirtualAlloc2)(HANDLE, PVOID, SIZE_T, ULONG, ULONG, /* MEM_EXTENDED_PARAMETER* */ void*, ULONG);
-typedef NTSTATUS (__stdcall *PNtAllocateVirtualMemoryEx)(HANDLE, PVOID*, SIZE_T*, ULONG, ULONG, /* MEM_EXTENDED_PARAMETER* */ PVOID, ULONG);
-static PVirtualAlloc2 pVirtualAlloc2 = NULL;
-static PNtAllocateVirtualMemoryEx pNtAllocateVirtualMemoryEx = NULL;
-
-// Similarly, GetNumaProcesorNodeEx is only supported since Windows 7
-#if (_WIN32_WINNT < 0x601) // before Win7
-typedef struct _PROCESSOR_NUMBER { WORD Group; BYTE Number; BYTE Reserved; } PROCESSOR_NUMBER, *PPROCESSOR_NUMBER;
-#endif
-typedef VOID (__stdcall *PGetCurrentProcessorNumberEx)(PPROCESSOR_NUMBER ProcNumber);
-typedef BOOL (__stdcall *PGetNumaProcessorNodeEx)(PPROCESSOR_NUMBER Processor, PUSHORT NodeNumber);
-typedef BOOL (__stdcall* PGetNumaNodeProcessorMaskEx)(USHORT Node, PGROUP_AFFINITY ProcessorMask);
-static PGetCurrentProcessorNumberEx pGetCurrentProcessorNumberEx = NULL;
-static PGetNumaProcessorNodeEx pGetNumaProcessorNodeEx = NULL;
-static PGetNumaNodeProcessorMaskEx pGetNumaNodeProcessorMaskEx = NULL;
-static DWORD(__stdcall *pGetCurrentProcessorNumber)() = NULL; L;
-static DWORD(__stdcall* pGetLargePageMinimum)() = NULL;
-static BOOL(__stdcall* pGetNumaHighestNodeNumber)(PULONG HighestNodeNumber) = NULL;
-static BOOL(__stdcall* pGetNumaProcessorNode)(UCHAR Processor, PUCHAR NodeNumber) = NULL;
-static BOOL(__stdcall* pGetNumaNodeProcessorMask)(UCHAR Processor, PULONGLONG ProcessorMask) = NULL;
-static BOOL(__stdcall* pLookupPrivilegeValueW)(LPCWSTR lpSystemName,
- LPCWSTR lpName,
- PLUID lpLuid) = NULL;
-static BOOL(__stdcall* pAdjustTokenPrivileges)(HANDLE TokenHandle,
- BOOL DisableAllPrivileges,
- PTOKEN_PRIVILEGES NewState,
- DWORD BufferLength,
- PTOKEN_PRIVILEGES PreviousState,
- PDWORD ReturnLength) = NULL;
-static BOOL(__stdcall* pOpenProcessToken)(
- HANDLE ProcessHandle,
- DWORD DesiredAccess,
- PHANDLE TokenHandle) = NULL;
-
-static bool mi_win_enable_large_os_pages()
-{
- if (large_os_page_size > 0) return true;
- if (!pGetLargePageMinimum) return false;
-
- // Try to see if large OS pages are supported
- // To use large pages on Windows, we first need access permission
- // Set "Lock pages in memory" permission in the group policy editor
- //
- unsigned long err = 0;
- HANDLE token = NULL;
- BOOL ok = pOpenProcessToken &&
- pOpenProcessToken(GetCurrentProcess(), TOKEN_ADJUST_PRIVILEGES | TOKEN_QUERY, &token);
- if (ok) {
- TOKEN_PRIVILEGES tp;
- ok = pLookupPrivilegeValueW &&
- pLookupPrivilegeValueW(NULL, L"SeLockMemoryPrivilege", &tp.Privileges[0].Luid);
- if (ok) {
- tp.PrivilegeCount = 1;
- tp.Privileges[0].Attributes = SE_PRIVILEGE_ENABLED;
- ok = pAdjustTokenPrivileges &&
- pAdjustTokenPrivileges(token, FALSE, &tp, 0, (PTOKEN_PRIVILEGES)NULL, 0);
- if (ok) {
- err = GetLastError();
- ok = (err == ERROR_SUCCESS);
- if (ok) {
- large_os_page_size = pGetLargePageMinimum();
- }
- }
- }
- CloseHandle(token);
- }
- if (!ok) {
- if (err == 0) err = GetLastError();
- _mi_warning_message("cannot enable large OS page support, error %lu\n", err);
- }
- return (ok!=0);
-}
-
-void _mi_os_init(void) {
- // get the page size
- SYSTEM_INFO si;
- GetSystemInfo(&si);
- if (si.dwPageSize > 0) os_page_size = si.dwPageSize;
- if (si.dwAllocationGranularity > 0) os_alloc_granularity = si.dwAllocationGranularity;
- // get the VirtualAlloc2 function
- HINSTANCE hDll;
- hDll = LoadLibrary(TEXT("kernelbase.dll"));
- if (hDll != NULL) {
- // use VirtualAlloc2FromApp if possible as it is available to Windows store apps
- pVirtualAlloc2 = (PVirtualAlloc2)(void (*)(void))GetProcAddress(hDll, "VirtualAlloc2FromApp");
- if (pVirtualAlloc2==NULL) pVirtualAlloc2 = (PVirtualAlloc2)(void (*)(void))GetProcAddress(hDll, "VirtualAlloc2");
- }
- // NtAllocateVirtualMemoryEx is used for huge page allocation
- hDll = LoadLibrary(TEXT("ntdll.dll"));
- if (hDll != NULL) {
- pNtAllocateVirtualMemoryEx = (PNtAllocateVirtualMemoryEx)(void (*)(void))GetProcAddress(hDll, "NtAllocateVirtualMemoryEx");
- }
- // Try to use Win7+ numa API
- hDll = LoadLibrary(TEXT("kernel32.dll"));
- if (hDll != NULL) {
- pGetCurrentProcessorNumberEx = (PGetCurrentProcessorNumberEx)(void (*)(void))GetProcAddress(hDll, "GetCurrentProcessorNumberEx");
- pGetNumaProcessorNodeEx = (PGetNumaProcessorNodeEx)(void (*)(void))GetProcAddress(hDll, "GetNumaProcessorNodeEx");
- pGetNumaNodeProcessorMaskEx = (PGetNumaNodeProcessorMaskEx)(void (*)(void))GetProcAddress(hDll, "GetNumaNodeProcessorMaskEx");
- pGetCurrentProcessorNumber = (DWORD(__stdcall*)())GetProcAddress(hDll, "GetCurrentProcessorNumber");
- pGetLargePageMinimum = (DWORD(__cdecl*)())GetProcAddress(hDll, "GetLargePageMinimum");
- pGetNumaHighestNodeNumber = (BOOL(__stdcall*)(PULONG))GetProcAddress(hDll, "GetNumaHighestNodeNumber");
- pGetNumaProcessorNode = (BOOL(__stdcall*)(UCHAR, PUCHAR))GetProcAddress(hDll, "GetNumaProcessorNode");
- pGetNumaNodeProcessorMask = (BOOL(__stdcall*)(UCHAR, PULONGLONG))GetProcAddress(hDll, "GetNumaNodeProcessorMask");
- }
-
- hDll = LoadLibrary(TEXT("Advapi32.dll"));
- if (hDll != NULL) {
- pLookupPrivilegeValueW = (BOOL(__stdcall*)(LPCWSTR,
- LPCWSTR,
- PLUID))GetProcAddress(hDll, "LookupPrivilegeValueW");
-
- pAdjustTokenPrivileges = (BOOL(__stdcall*)(HANDLE,
- BOOL,
- PTOKEN_PRIVILEGES,
- DWORD,
- PTOKEN_PRIVILEGES,
- PDWORD))GetProcAddress(hDll, "AdjustTokenPrivileges");
-
- pOpenProcessToken = (BOOL(__stdcall*)(
- HANDLE ,
- DWORD,
- PHANDLE))GetProcAddress(hDll, "OpenProcessToken");
- }
- if (mi_option_is_enabled(mi_option_large_os_pages) || mi_option_is_enabled(mi_option_reserve_huge_os_pages)) {
- mi_win_enable_large_os_pages();
- }
-}
-#elif defined(__wasi__)
-void _mi_os_init() {
- os_page_size = 0x10000; // WebAssembly has a fixed page size: 64KB
- os_alloc_granularity = 16;
-}
-#else
-void _mi_os_init() {
- // get the page size
- long result = sysconf(_SC_PAGESIZE);
- if (result > 0) {
- os_page_size = (size_t)result;
- os_alloc_granularity = os_page_size;
- }
- large_os_page_size = 2*MiB; // TODO: can we query the OS for this?
-}
-#endif
-
/* -----------------------------------------------------------
- Raw allocation on Windows (VirtualAlloc) and Unix's (mmap).
------------------------------------------------------------ */
-
-static bool mi_os_mem_free(void* addr, size_t size, bool was_committed, mi_stats_t* stats)
-{
- if (addr == NULL || size == 0) return true; // || _mi_os_is_huge_reserved(addr)
- bool err = false;
-#if defined(_AURORA_MEM_STRICT)
- free(addr);
- err = 0;
-#elif defined(_WIN32)
- err = (VirtualFree(addr, 0, MEM_RELEASE) == 0);
-#elif defined(__wasi__)
- err = 0; // WebAssembly's heap cannot be shrunk
-#else
- err = (munmap(addr, size) == -1);
-#endif
- if (was_committed) _mi_stat_decrease(&stats->committed, size);
- _mi_stat_decrease(&stats->reserved, size);
- if (err) {
- _mi_warning_message("munmap failed: %s, addr 0x%8li, size %lu\n", strerror(errno), (size_t)addr, size);
- return false;
- }
- else {
- return true;
- }
-}
-
-static void* mi_os_get_aligned_hint(size_t try_alignment, size_t size);
-
-#if defined(_AURORA_MEM_STRICT)
-
-#elif defined(_WIN32)
-static void* mi_win_virtual_allocx(void* addr, size_t size, size_t try_alignment, DWORD flags) {
-#if (MI_INTPTR_SIZE >= 8)
- // on 64-bit systems, try to use the virtual address area after 4TiB for 4MiB aligned allocations
- void* hint;
- if (addr == NULL && (hint = mi_os_get_aligned_hint(try_alignment,size)) != NULL) {
- void* p = VirtualAlloc(hint, size, flags, PAGE_READWRITE);
- if (p != NULL) return p;
- DWORD err = GetLastError();
- if (err != ERROR_INVALID_ADDRESS && // If linked with multiple instances, we may have tried to allocate at an already allocated area (#210)
- err != ERROR_INVALID_PARAMETER) { // Windows7 instability (#230)
- return NULL;
- }
- // fall through
- }
-#endif
-#if defined(MEM_EXTENDED_PARAMETER_TYPE_BITS)
- // on modern Windows try use VirtualAlloc2 for aligned allocation
- if (try_alignment > 0 && (try_alignment % _mi_os_page_size()) == 0 && pVirtualAlloc2 != NULL) {
- MEM_ADDRESS_REQUIREMENTS reqs = { 0, 0, 0 };
- reqs.Alignment = try_alignment;
- MEM_EXTENDED_PARAMETER param = { {0, 0}, {0} };
- param.Type = MemExtendedParameterAddressRequirements;
- param.Pointer = &reqs;
- return (*pVirtualAlloc2)(GetCurrentProcess(), addr, size, flags, PAGE_READWRITE, ¶m, 1);
- }
-#endif
- // last resort
- return VirtualAlloc(addr, size, flags, PAGE_READWRITE);
-}
-
-static void* mi_win_virtual_alloc(void* addr, size_t size, size_t try_alignment, DWORD flags, bool large_only, bool allow_large, bool* is_large) {
- mi_assert_internal(!(large_only && !allow_large));
- static _Atomic(uintptr_t) large_page_try_ok; // = 0;
- void* p = NULL;
- if ((large_only || use_large_os_page(size, try_alignment))
- && allow_large && (flags&MEM_COMMIT)!=0 && (flags&MEM_RESERVE)!=0) {
- uintptr_t try_ok = mi_atomic_load_acquire(&large_page_try_ok);
- if (!large_only && try_ok > 0) {
- // if a large page allocation fails, it seems the calls to VirtualAlloc get very expensive.
- // therefore, once a large page allocation failed, we don't try again for `large_page_try_ok` times.
- mi_atomic_cas_strong_acq_rel(&large_page_try_ok, &try_ok, try_ok - 1);
- }
- else {
- // large OS pages must always reserve and commit.
- *is_large = true;
- p = mi_win_virtual_allocx(addr, size, try_alignment, flags | MEM_LARGE_PAGES);
- if (large_only) return p;
- // fall back to non-large page allocation on error (`p == NULL`).
- if (p == NULL) {
- mi_atomic_store_release(&large_page_try_ok,10UL); // on error, don't try again for the next N allocations
- }
- }
- }
- if (p == NULL) {
- *is_large = ((flags&MEM_LARGE_PAGES) != 0);
- p = mi_win_virtual_allocx(addr, size, try_alignment, flags);
- }
- if (p == NULL) {
- _mi_warning_message("unable to allocate OS memory (%zu bytes, error code: %i, address: %p, large only: %d, allow large: %d)\n", size, GetLastError(), addr, large_only, allow_large);
- }
- return p;
-}
-
-#elif defined(__wasi__)
-static void* mi_wasm_heap_grow(size_t size, size_t try_alignment) {
- uintptr_t base = __builtin_wasm_memory_size(0) * _mi_os_page_size();
- uintptr_t aligned_base = _mi_align_up(base, (uintptr_t) try_alignment);
- size_t alloc_size = _mi_align_up( aligned_base - base + size, _mi_os_page_size());
- mi_assert(alloc_size >= size && (alloc_size % _mi_os_page_size()) == 0);
- if (alloc_size < size) return NULL;
- if (__builtin_wasm_memory_grow(0, alloc_size / _mi_os_page_size()) == SIZE_MAX) {
- errno = ENOMEM;
- return NULL;
- }
- return (void*)aligned_base;
-}
-#else
-#define MI_OS_USE_MMAP
-static void* mi_unix_mmapx(void* addr, size_t size, size_t try_alignment, int protect_flags, int flags, int fd) {
- void* p = NULL;
- #if (MI_INTPTR_SIZE >= 8) && !defined(MAP_ALIGNED)
- // on 64-bit systems, use the virtual address area after 4TiB for 4MiB aligned allocations
- void* hint;
- if (addr == NULL && (hint = mi_os_get_aligned_hint(try_alignment, size)) != NULL) {
- p = mmap(hint,size,protect_flags,flags,fd,0);
- if (p==MAP_FAILED) p = NULL; // fall back to regular mmap
- }
- #else
- UNUSED(try_alignment);
- UNUSED(mi_os_get_aligned_hint);
- #endif
- if (p==NULL) {
- p = mmap(addr,size,protect_flags,flags,fd,0);
- if (p==MAP_FAILED) p = NULL;
- }
- return p;
-}
-
-static void* mi_unix_mmap(void* addr, size_t size, size_t try_alignment, int protect_flags, bool large_only, bool allow_large, bool* is_large) {
- void* p = NULL;
- #if !defined(MAP_ANONYMOUS)
- #define MAP_ANONYMOUS MAP_ANON
- #endif
- #if !defined(MAP_NORESERVE)
- #define MAP_NORESERVE 0
- #endif
- int flags = MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE;
- int fd = -1;
- #if defined(MAP_ALIGNED) // BSD
- if (try_alignment > 0) {
- size_t n = _mi_bsr(try_alignment);
- if (((size_t)1 << n) == try_alignment && n >= 12 && n <= 30) { // alignment is a power of 2 and 4096 <= alignment <= 1GiB
- flags |= MAP_ALIGNED(n);
- }
- }
- #endif
- #if defined(PROT_MAX)
- protect_flags |= PROT_MAX(PROT_READ | PROT_WRITE); // BSD
- #endif
- #if defined(VM_MAKE_TAG)
- // macOS: tracking anonymous page with a specific ID. (All up to 98 are taken officially but LLVM sanitizers had taken 99)
- int os_tag = (int)mi_option_get(mi_option_os_tag);
- if (os_tag < 100 || os_tag > 255) os_tag = 100;
- fd = VM_MAKE_TAG(os_tag);
- #endif
- if ((large_only || use_large_os_page(size, try_alignment)) && allow_large) {
- static _Atomic(uintptr_t) large_page_try_ok; // = 0;
- uintptr_t try_ok = mi_atomic_load_acquire(&large_page_try_ok);
- if (!large_only && try_ok > 0) {
- // If the OS is not configured for large OS pages, or the user does not have
- // enough permission, the `mmap` will always fail (but it might also fail for other reasons).
- // Therefore, once a large page allocation failed, we don't try again for `large_page_try_ok` times
- // to avoid too many failing calls to mmap.
- mi_atomic_cas_strong_acq_rel(&large_page_try_ok, &try_ok, try_ok - 1);
- }
- else {
- int lflags = flags & ~MAP_NORESERVE; // using NORESERVE on huge pages seems to fail on Linux
- int lfd = fd;
- #ifdef MAP_ALIGNED_SUPER
- lflags |= MAP_ALIGNED_SUPER;
- #endif
- #ifdef MAP_HUGETLB
- lflags |= MAP_HUGETLB;
- #endif
- #ifdef MAP_HUGE_1GB
- static bool mi_huge_pages_available = true;
- if ((size % GiB) == 0 && mi_huge_pages_available) {
- lflags |= MAP_HUGE_1GB;
- }
- else
- #endif
- {
- #ifdef MAP_HUGE_2MB
- lflags |= MAP_HUGE_2MB;
- #endif
- }
- #ifdef VM_FLAGS_SUPERPAGE_SIZE_2MB
- lfd |= VM_FLAGS_SUPERPAGE_SIZE_2MB;
- #endif
- if (large_only || lflags != flags) {
- // try large OS page allocation
- *is_large = true;
- p = mi_unix_mmapx(addr, size, try_alignment, protect_flags, lflags, lfd);
- #ifdef MAP_HUGE_1GB
- if (p == NULL && (lflags & MAP_HUGE_1GB) != 0) {
- mi_huge_pages_available = false; // don't try huge 1GiB pages again
- _mi_warning_message("unable to allocate huge (1GiB) page, trying large (2MiB) pages instead (error %i)\n", errno);
- lflags = ((lflags & ~MAP_HUGE_1GB) | MAP_HUGE_2MB);
- p = mi_unix_mmapx(addr, size, try_alignment, protect_flags, lflags, lfd);
- }
- #endif
- if (large_only) return p;
- if (p == NULL) {
- mi_atomic_store_release(&large_page_try_ok, 10UL); // on error, don't try again for the next N allocations
- }
- }
- }
- }
- if (p == NULL) {
- *is_large = false;
- p = mi_unix_mmapx(addr, size, try_alignment, protect_flags, flags, fd);
- #if defined(MADV_HUGEPAGE)
- // Many Linux systems don't allow MAP_HUGETLB but they support instead
- // transparent huge pages (THP). It is not required to call `madvise` with MADV_HUGE
- // though since properly aligned allocations will already use large pages if available
- // in that case -- in particular for our large regions (in `memory.c`).
- // However, some systems only allow THP if called with explicit `madvise`, so
- // when large OS pages are enabled for mimalloc, we call `madvice` anyways.
- if (allow_large && use_large_os_page(size, try_alignment)) {
- if (madvise(p, size, MADV_HUGEPAGE) == 0) {
- *is_large = true; // possibly
- };
- }
- #endif
- #if defined(__sun)
- if (allow_large && use_large_os_page(size, try_alignment)) {
- struct memcntl_mha cmd = {0};
- cmd.mha_pagesize = large_os_page_size;
- cmd.mha_cmd = MHA_MAPSIZE_VA;
- if (memcntl(p, size, MC_HAT_ADVISE, (caddr_t)&cmd, 0, 0) == 0) {
- *is_large = true;
- }
- }
- #endif
- }
- if (p == NULL) {
- _mi_warning_message("unable to allocate OS memory (%zu bytes, error code: %i, address: %p, large only: %d, allow large: %d)\n", size, errno, addr, large_only, allow_large);
- }
- return p;
-}
-#endif
+ aligned hinting
+-------------------------------------------------------------- */
// On 64-bit systems, we can do efficient aligned allocation by using
-// the 4TiB to 30TiB area to allocate them.
-#if (MI_INTPTR_SIZE >= 8) && (defined(_WIN32) || (defined(MI_OS_USE_MMAP) && !defined(MAP_ALIGNED)))
-static mi_decl_cache_align _Atomic(uintptr_t) aligned_base;
+// the 2TiB to 30TiB area to allocate those.
+#if (MI_INTPTR_SIZE >= 8)
+static mi_decl_cache_align _Atomic(uintptr_t)aligned_base;
+
+// Return a MI_SEGMENT_SIZE aligned address that is probably available.
+// If this returns NULL, the OS will determine the address but on some OS's that may not be
+// properly aligned which can be more costly as it needs to be adjusted afterwards.
+// For a size > 1GiB this always returns NULL in order to guarantee good ASLR randomization;
+// (otherwise an initial large allocation of say 2TiB has a 50% chance to include (known) addresses
+// in the middle of the 2TiB - 6TiB address range (see issue #372))
+
+#define MI_HINT_BASE ((uintptr_t)2 << 40) // 2TiB start
+#define MI_HINT_AREA ((uintptr_t)4 << 40) // upto 6TiB (since before win8 there is "only" 8TiB available to processes)
+#define MI_HINT_MAX ((uintptr_t)30 << 40) // wrap after 30TiB (area after 32TiB is used for huge OS pages)
+
+void* _mi_os_get_aligned_hint(size_t try_alignment, size_t size)
+{
+ if (try_alignment <= 1 || try_alignment > MI_SEGMENT_SIZE) return NULL;
+ size = _mi_align_up(size, MI_SEGMENT_SIZE);
+ if (size > 1*MI_GiB) return NULL; // guarantee the chance of fixed valid address is at most 1/(MI_HINT_AREA / 1<<30) = 1/4096.
+ #if (MI_SECURE>0)
+ size += MI_SEGMENT_SIZE; // put in `MI_SEGMENT_SIZE` virtual gaps between hinted blocks; this splits VLA's but increases guarded areas.
+ #endif
-// Return a 4MiB aligned address that is probably available
-static void* mi_os_get_aligned_hint(size_t try_alignment, size_t size) {
- if (try_alignment == 0 || try_alignment > MI_SEGMENT_SIZE) return NULL;
- if ((size%MI_SEGMENT_SIZE) != 0) return NULL;
uintptr_t hint = mi_atomic_add_acq_rel(&aligned_base, size);
- if (hint == 0 || hint > ((intptr_t)30<<40)) { // try to wrap around after 30TiB (area after 32TiB is used for huge OS pages)
- uintptr_t init = ((uintptr_t)4 << 40); // start at 4TiB area
- #if (MI_SECURE>0 || MI_DEBUG==0) // security: randomize start of aligned allocations unless in debug mode
- uintptr_t r = _mi_heap_random_next(mi_get_default_heap());
- init = init + (MI_SEGMENT_SIZE * ((r>>17) & 0xFFFFF)); // (randomly 20 bits)*4MiB == 0 to 4TiB
+ if (hint == 0 || hint > MI_HINT_MAX) { // wrap or initialize
+ uintptr_t init = MI_HINT_BASE;
+ #if (MI_SECURE>0 || MI_DEBUG==0) // security: randomize start of aligned allocations unless in debug mode
+ uintptr_t r = _mi_heap_random_next(mi_prim_get_default_heap());
+ init = init + ((MI_SEGMENT_SIZE * ((r>>17) & 0xFFFFF)) % MI_HINT_AREA); // (randomly 20 bits)*4MiB == 0 to 4TiB
#endif
uintptr_t expected = hint + size;
mi_atomic_cas_strong_acq_rel(&aligned_base, &expected, init);
- hint = mi_atomic_add_acq_rel(&aligned_base, size); // this may still give 0 or > 30TiB but that is ok, it is a hint after all
+ hint = mi_atomic_add_acq_rel(&aligned_base, size); // this may still give 0 or > MI_HINT_MAX but that is ok, it is a hint after all
}
if (hint%try_alignment != 0) return NULL;
return (void*)hint;
}
#else
-static void* mi_os_get_aligned_hint(size_t try_alignment, size_t size) {
- UNUSED(try_alignment); UNUSED(size);
+void* _mi_os_get_aligned_hint(size_t try_alignment, size_t size) {
+ MI_UNUSED(try_alignment); MI_UNUSED(size);
return NULL;
}
#endif
-// Primitive allocation from the OS.
-// Note: the `try_alignment` is just a hint and the returned pointer is not guaranteed to be aligned.
-static void* mi_os_mem_alloc(size_t size, size_t try_alignment, bool commit, bool allow_large, bool* is_large, mi_stats_t* stats) {
- mi_assert_internal(size > 0 && (size % _mi_os_page_size()) == 0);
- if (size == 0) return NULL;
- if (!commit) allow_large = false;
+/* -----------------------------------------------------------
+ Free memory
+-------------------------------------------------------------- */
- void* p = NULL;
- /*
- if (commit && allow_large) {
- p = _mi_os_try_alloc_from_huge_reserved(size, try_alignment);
- if (p != NULL) {
- *is_large = true;
- return p;
+static void mi_os_free_huge_os_pages(void* p, size_t size, mi_stats_t* stats);
+
+static void mi_os_prim_free(void* addr, size_t size, bool still_committed, mi_stats_t* tld_stats) {
+ MI_UNUSED(tld_stats);
+ mi_assert_internal((size % _mi_os_page_size()) == 0);
+ if (addr == NULL || size == 0) return; // || _mi_os_is_huge_reserved(addr)
+ int err = _mi_prim_free(addr, size);
+ if (err != 0) {
+ _mi_warning_message("unable to free OS memory (error: %d (0x%x), size: 0x%zx bytes, address: %p)\n", err, err, size, addr);
+ }
+ mi_stats_t* stats = &_mi_stats_main;
+ if (still_committed) { _mi_stat_decrease(&stats->committed, size); }
+ _mi_stat_decrease(&stats->reserved, size);
+}
+
+void _mi_os_free_ex(void* addr, size_t size, bool still_committed, mi_memid_t memid, mi_stats_t* tld_stats) {
+ if (mi_memkind_is_os(memid.memkind)) {
+ size_t csize = _mi_os_good_alloc_size(size);
+ void* base = addr;
+ // different base? (due to alignment)
+ if (memid.mem.os.base != NULL) {
+ mi_assert(memid.mem.os.base <= addr);
+ mi_assert((uint8_t*)memid.mem.os.base + memid.mem.os.alignment >= (uint8_t*)addr);
+ base = memid.mem.os.base;
+ csize += ((uint8_t*)addr - (uint8_t*)memid.mem.os.base);
+ }
+ // free it
+ if (memid.memkind == MI_MEM_OS_HUGE) {
+ mi_assert(memid.is_pinned);
+ mi_os_free_huge_os_pages(base, csize, tld_stats);
+ }
+ else {
+ mi_os_prim_free(base, csize, still_committed, tld_stats);
}
}
- */
- #if defined(_AURORA_MEM_STRICT)
- return aligned_alloc(try_alignment, size);
- #elif defined(_WIN32)
- int flags = MEM_RESERVE;
- if (commit) flags |= MEM_COMMIT;
- p = mi_win_virtual_alloc(NULL, size, try_alignment, flags, false, allow_large, is_large);
- #elif defined(__wasi__)
- *is_large = false;
- p = mi_wasm_heap_grow(size, try_alignment);
- #else
- int protect_flags = (commit ? (PROT_WRITE | PROT_READ) : PROT_NONE);
- p = mi_unix_mmap(NULL, size, try_alignment, protect_flags, false, allow_large, is_large);
- #endif
+ else {
+ // nothing to do
+ mi_assert(memid.memkind < MI_MEM_OS);
+ }
+}
+
+void _mi_os_free(void* p, size_t size, mi_memid_t memid, mi_stats_t* tld_stats) {
+ _mi_os_free_ex(p, size, true, memid, tld_stats);
+}
+
+
+/* -----------------------------------------------------------
+ Primitive allocation from the OS.
+-------------------------------------------------------------- */
+
+// Note: the `try_alignment` is just a hint and the returned pointer is not guaranteed to be aligned.
+static void* mi_os_prim_alloc(size_t size, size_t try_alignment, bool commit, bool allow_large, bool* is_large, bool* is_zero, mi_stats_t* stats) {
+ mi_assert_internal(size > 0 && (size % _mi_os_page_size()) == 0);
+ mi_assert_internal(is_zero != NULL);
+ mi_assert_internal(is_large != NULL);
+ if (size == 0) return NULL;
+ if (!commit) { allow_large = false; }
+ if (try_alignment == 0) { try_alignment = 1; } // avoid 0 to ensure there will be no divide by zero when aligning
+
+ *is_zero = false;
+ void* p = NULL;
+ int err = _mi_prim_alloc(size, try_alignment, commit, allow_large, is_large, is_zero, &p);
+ if (err != 0) {
+ _mi_warning_message("unable to allocate OS memory (error: %d (0x%x), size: 0x%zx bytes, align: 0x%zx, commit: %d, allow large: %d)\n", err, err, size, try_alignment, commit, allow_large);
+ }
mi_stat_counter_increase(stats->mmap_calls, 1);
if (p != NULL) {
_mi_stat_increase(&stats->reserved, size);
- if (commit) { _mi_stat_increase(&stats->committed, size); }
+ if (commit) {
+ _mi_stat_increase(&stats->committed, size);
+ // seems needed for asan (or `mimalloc-test-api` fails)
+ #ifdef MI_TRACK_ASAN
+ if (*is_zero) { mi_track_mem_defined(p,size); }
+ else { mi_track_mem_undefined(p,size); }
+ #endif
+ }
}
return p;
}
@@ -586,119 +220,150 @@ static void* mi_os_mem_alloc(size_t size, size_t try_alignment, bool commit, boo
// Primitive aligned allocation from the OS.
// This function guarantees the allocated memory is aligned.
-static void* mi_os_mem_alloc_aligned(size_t size, size_t alignment, bool commit, bool allow_large, bool* is_large, mi_stats_t* stats) {
+static void* mi_os_prim_alloc_aligned(size_t size, size_t alignment, bool commit, bool allow_large, bool* is_large, bool* is_zero, void** base, mi_stats_t* stats) {
mi_assert_internal(alignment >= _mi_os_page_size() && ((alignment & (alignment - 1)) == 0));
mi_assert_internal(size > 0 && (size % _mi_os_page_size()) == 0);
+ mi_assert_internal(is_large != NULL);
+ mi_assert_internal(is_zero != NULL);
+ mi_assert_internal(base != NULL);
if (!commit) allow_large = false;
if (!(alignment >= _mi_os_page_size() && ((alignment & (alignment - 1)) == 0))) return NULL;
size = _mi_align_up(size, _mi_os_page_size());
// try first with a hint (this will be aligned directly on Win 10+ or BSD)
- void* p = mi_os_mem_alloc(size, alignment, commit, allow_large, is_large, stats);
+ void* p = mi_os_prim_alloc(size, alignment, commit, allow_large, is_large, is_zero, stats);
if (p == NULL) return NULL;
- // if not aligned, free it, overallocate, and unmap around it
- if (((uintptr_t)p % alignment != 0)) {
- mi_os_mem_free(p, size, commit, stats);
+ // aligned already?
+ if (((uintptr_t)p % alignment) == 0) {
+ *base = p;
+ }
+ else {
+ // if not aligned, free it, overallocate, and unmap around it
+ _mi_warning_message("unable to allocate aligned OS memory directly, fall back to over-allocation (size: 0x%zx bytes, address: %p, alignment: 0x%zx, commit: %d)\n", size, p, alignment, commit);
+ mi_os_prim_free(p, size, commit, stats);
if (size >= (SIZE_MAX - alignment)) return NULL; // overflow
- size_t over_size = size + alignment;
+ const size_t over_size = size + alignment;
-#if defined(_WIN32) && !defined(_AURORA_MEM_STRICT)
- // over-allocate and than re-allocate exactly at an aligned address in there.
- // this may fail due to threads allocating at the same time so we
- // retry this at most 3 times before giving up.
- // (we can not decommit around the overallocation on Windows, because we can only
- // free the original pointer, not one pointing inside the area)
- int flags = MEM_RESERVE;
- if (commit) flags |= MEM_COMMIT;
- for (int tries = 0; tries < 3; tries++) {
- // over-allocate to determine a virtual memory range
- p = mi_os_mem_alloc(over_size, alignment, commit, false, is_large, stats);
- if (p == NULL) return NULL; // error
- if (((uintptr_t)p % alignment) == 0) {
- // if p happens to be aligned, just decommit the left-over area
- _mi_os_decommit((uint8_t*)p + size, over_size - size, stats);
- break;
- }
- else {
- // otherwise free and allocate at an aligned address in there
- mi_os_mem_free(p, over_size, commit, stats);
- void* aligned_p = mi_align_up_ptr(p, alignment);
- p = mi_win_virtual_alloc(aligned_p, size, alignment, flags, false, allow_large, is_large);
- if (p == aligned_p) break; // success!
- if (p != NULL) { // should not happen?
- mi_os_mem_free(p, size, commit, stats);
- p = NULL;
- }
+ if (mi_os_mem_config.must_free_whole) { // win32 virtualAlloc cannot free parts of an allocate block
+ // over-allocate uncommitted (virtual) memory
+ p = mi_os_prim_alloc(over_size, 1 /*alignment*/, false /* commit? */, false /* allow_large */, is_large, is_zero, stats);
+ if (p == NULL) return NULL;
+
+ // set p to the aligned part in the full region
+ // note: this is dangerous on Windows as VirtualFree needs the actual base pointer
+ // this is handled though by having the `base` field in the memid's
+ *base = p; // remember the base
+ p = mi_align_up_ptr(p, alignment);
+
+ // explicitly commit only the aligned part
+ if (commit) {
+ _mi_os_commit(p, size, NULL, stats);
}
}
-#else
- // overallocate...
- p = mi_os_mem_alloc(over_size, alignment, commit, false, is_large, stats);
- if (p == NULL) return NULL;
- // and selectively unmap parts around the over-allocated area.
- void* aligned_p = mi_align_up_ptr(p, alignment);
- size_t pre_size = (uint8_t*)aligned_p - (uint8_t*)p;
- size_t mid_size = _mi_align_up(size, _mi_os_page_size());
- size_t post_size = over_size - pre_size - mid_size;
- mi_assert_internal(pre_size < over_size && post_size < over_size && mid_size >= size);
- if (pre_size > 0) mi_os_mem_free(p, pre_size, commit, stats);
- if (post_size > 0) mi_os_mem_free((uint8_t*)aligned_p + mid_size, post_size, commit, stats);
- // we can return the aligned pointer on `mmap` systems
- p = aligned_p;
-#endif
+ else { // mmap can free inside an allocation
+ // overallocate...
+ p = mi_os_prim_alloc(over_size, 1, commit, false, is_large, is_zero, stats);
+ if (p == NULL) return NULL;
+
+ // and selectively unmap parts around the over-allocated area. (noop on sbrk)
+ void* aligned_p = mi_align_up_ptr(p, alignment);
+ size_t pre_size = (uint8_t*)aligned_p - (uint8_t*)p;
+ size_t mid_size = _mi_align_up(size, _mi_os_page_size());
+ size_t post_size = over_size - pre_size - mid_size;
+ mi_assert_internal(pre_size < over_size&& post_size < over_size&& mid_size >= size);
+ if (pre_size > 0) { mi_os_prim_free(p, pre_size, commit, stats); }
+ if (post_size > 0) { mi_os_prim_free((uint8_t*)aligned_p + mid_size, post_size, commit, stats); }
+ // we can return the aligned pointer on `mmap` (and sbrk) systems
+ p = aligned_p;
+ *base = aligned_p; // since we freed the pre part, `*base == p`.
+ }
}
- mi_assert_internal(p == NULL || (p != NULL && ((uintptr_t)p % alignment) == 0));
+ mi_assert_internal(p == NULL || (p != NULL && *base != NULL && ((uintptr_t)p % alignment) == 0));
+ return p;
+}
+
+
+/* -----------------------------------------------------------
+ OS API: alloc and alloc_aligned
+----------------------------------------------------------- */
+
+void* _mi_os_alloc(size_t size, mi_memid_t* memid, mi_stats_t* tld_stats) {
+ MI_UNUSED(tld_stats);
+ *memid = _mi_memid_none();
+ mi_stats_t* stats = &_mi_stats_main;
+ if (size == 0) return NULL;
+ size = _mi_os_good_alloc_size(size);
+ bool os_is_large = false;
+ bool os_is_zero = false;
+ void* p = mi_os_prim_alloc(size, 0, true, false, &os_is_large, &os_is_zero, stats);
+ if (p != NULL) {
+ *memid = _mi_memid_create_os(true, os_is_zero, os_is_large);
+ }
+ return p;
+}
+
+void* _mi_os_alloc_aligned(size_t size, size_t alignment, bool commit, bool allow_large, mi_memid_t* memid, mi_stats_t* tld_stats)
+{
+ MI_UNUSED(&_mi_os_get_aligned_hint); // suppress unused warnings
+ MI_UNUSED(tld_stats);
+ *memid = _mi_memid_none();
+ if (size == 0) return NULL;
+ size = _mi_os_good_alloc_size(size);
+ alignment = _mi_align_up(alignment, _mi_os_page_size());
+
+ bool os_is_large = false;
+ bool os_is_zero = false;
+ void* os_base = NULL;
+ void* p = mi_os_prim_alloc_aligned(size, alignment, commit, allow_large, &os_is_large, &os_is_zero, &os_base, &_mi_stats_main /*tld->stats*/ );
+ if (p != NULL) {
+ *memid = _mi_memid_create_os(commit, os_is_zero, os_is_large);
+ memid->mem.os.base = os_base;
+ memid->mem.os.alignment = alignment;
+ }
return p;
}
/* -----------------------------------------------------------
- OS API: alloc, free, alloc_aligned
+ OS aligned allocation with an offset. This is used
+ for large alignments > MI_ALIGNMENT_MAX. We use a large mimalloc
+ page where the object can be aligned at an offset from the start of the segment.
+ As we may need to overallocate, we need to free such pointers using `mi_free_aligned`
+ to use the actual start of the memory region.
----------------------------------------------------------- */
-void* _mi_os_alloc(size_t size, mi_stats_t* tld_stats) {
- UNUSED(tld_stats);
- mi_stats_t* stats = &_mi_stats_main;
- if (size == 0) return NULL;
- size = _mi_os_good_alloc_size(size);
- bool is_large = false;
- return mi_os_mem_alloc(size, 0, true, false, &is_large, stats);
-}
-
-void _mi_os_free_ex(void* p, size_t size, bool was_committed, mi_stats_t* tld_stats) {
- UNUSED(tld_stats);
- mi_stats_t* stats = &_mi_stats_main;
- if (size == 0 || p == NULL) return;
- size = _mi_os_good_alloc_size(size);
- mi_os_mem_free(p, size, was_committed, stats);
-}
-
-void _mi_os_free(void* p, size_t size, mi_stats_t* stats) {
- _mi_os_free_ex(p, size, true, stats);
-}
-
-void* _mi_os_alloc_aligned(size_t size, size_t alignment, bool commit, bool* large, mi_os_tld_t* tld)
-{
- UNUSED(tld);
- if (size == 0) return NULL;
- size = _mi_os_good_alloc_size(size);
- alignment = _mi_align_up(alignment, _mi_os_page_size());
- bool allow_large = false;
- if (large != NULL) {
- allow_large = *large;
- *large = false;
+void* _mi_os_alloc_aligned_at_offset(size_t size, size_t alignment, size_t offset, bool commit, bool allow_large, mi_memid_t* memid, mi_stats_t* tld_stats) {
+ mi_assert(offset <= MI_SEGMENT_SIZE);
+ mi_assert(offset <= size);
+ mi_assert((alignment % _mi_os_page_size()) == 0);
+ *memid = _mi_memid_none();
+ if (offset > MI_SEGMENT_SIZE) return NULL;
+ if (offset == 0) {
+ // regular aligned allocation
+ return _mi_os_alloc_aligned(size, alignment, commit, allow_large, memid, tld_stats);
+ }
+ else {
+ // overallocate to align at an offset
+ const size_t extra = _mi_align_up(offset, alignment) - offset;
+ const size_t oversize = size + extra;
+ void* const start = _mi_os_alloc_aligned(oversize, alignment, commit, allow_large, memid, tld_stats);
+ if (start == NULL) return NULL;
+
+ void* const p = (uint8_t*)start + extra;
+ mi_assert(_mi_is_aligned((uint8_t*)p + offset, alignment));
+ // decommit the overallocation at the start
+ if (commit && extra > _mi_os_page_size()) {
+ _mi_os_decommit(start, extra, tld_stats);
+ }
+ return p;
}
- return mi_os_mem_alloc_aligned(size, alignment, commit, allow_large, (large!=NULL?large:&allow_large), &_mi_stats_main /*tld->stats*/ );
}
-
-
/* -----------------------------------------------------------
OS memory API: reset, commit, decommit, protect, unprotect.
----------------------------------------------------------- */
-
// OS page align within a given area, either conservative (pages inside the area only),
// or not (straddling pages outside the area is possible)
static void* mi_os_page_align_areax(bool conservative, void* addr, size_t size, size_t* newsize) {
@@ -723,173 +388,117 @@ static void* mi_os_page_align_area_conservative(void* addr, size_t size, size_t*
return mi_os_page_align_areax(true, addr, size, newsize);
}
-static void mi_mprotect_hint(int err) {
-#if defined(MI_OS_USE_MMAP) && (MI_SECURE>=2) // guard page around every mimalloc page
- if (err == ENOMEM) {
- _mi_warning_message("the previous warning may have been caused by a low memory map limit.\n"
- " On Linux this is controlled by the vm.max_map_count. For example:\n"
- " > sudo sysctl -w vm.max_map_count=262144\n");
+bool _mi_os_commit(void* addr, size_t size, bool* is_zero, mi_stats_t* tld_stats) {
+ MI_UNUSED(tld_stats);
+ mi_stats_t* stats = &_mi_stats_main;
+ if (is_zero != NULL) { *is_zero = false; }
+ _mi_stat_increase(&stats->committed, size); // use size for precise commit vs. decommit
+ _mi_stat_counter_increase(&stats->commit_calls, 1);
+
+ // page align range
+ size_t csize;
+ void* start = mi_os_page_align_areax(false /* conservative? */, addr, size, &csize);
+ if (csize == 0) return true;
+
+ // commit
+ bool os_is_zero = false;
+ int err = _mi_prim_commit(start, csize, &os_is_zero);
+ if (err != 0) {
+ _mi_warning_message("cannot commit OS memory (error: %d (0x%x), address: %p, size: 0x%zx bytes)\n", err, err, start, csize);
+ return false;
}
-#else
- UNUSED(err);
-#endif
+ if (os_is_zero && is_zero != NULL) {
+ *is_zero = true;
+ mi_assert_expensive(mi_mem_is_zero(start, csize));
+ }
+ // note: the following seems required for asan (otherwise `mimalloc-test-stress` fails)
+ #ifdef MI_TRACK_ASAN
+ if (os_is_zero) { mi_track_mem_defined(start,csize); }
+ else { mi_track_mem_undefined(start,csize); }
+ #endif
+ return true;
}
-// Commit/Decommit memory.
-// Usually commit is aligned liberal, while decommit is aligned conservative.
-// (but not for the reset version where we want commit to be conservative as well)
-static bool mi_os_commitx(void* addr, size_t size, bool commit, bool conservative, bool* is_zero, mi_stats_t* stats) {
- // page align in the range, commit liberally, decommit conservative
- if (is_zero != NULL) { *is_zero = false; }
- size_t csize;
- void* start = mi_os_page_align_areax(conservative, addr, size, &csize);
- if (csize == 0) return true; // || _mi_os_is_huge_reserved(addr))
- int err = 0;
- if (commit) {
- _mi_stat_increase(&stats->committed, size); // use size for precise commit vs. decommit
- _mi_stat_counter_increase(&stats->commit_calls, 1);
- }
- else {
- _mi_stat_decrease(&stats->committed, size);
- }
+static bool mi_os_decommit_ex(void* addr, size_t size, bool* needs_recommit, mi_stats_t* tld_stats) {
+ MI_UNUSED(tld_stats);
+ mi_stats_t* stats = &_mi_stats_main;
+ mi_assert_internal(needs_recommit!=NULL);
+ _mi_stat_decrease(&stats->committed, size);
- #if defined(_AURORA_MEM_STRICT)
- // optional low level control. we can't control the MMIO from the CRT
- #elif defined(_WIN32)
- if (commit) {
- // if the memory was already committed, the call succeeds but it is not zero'd
- // *is_zero = true;
- void* p = VirtualAlloc(start, csize, MEM_COMMIT, PAGE_READWRITE);
- err = (p == start ? 0 : GetLastError());
- }
- else {
- BOOL ok = VirtualFree(start, csize, MEM_DECOMMIT);
- err = (ok ? 0 : GetLastError());
- }
- #elif defined(__wasi__)
- // WebAssembly guests can't control memory protection
- #elif defined(MAP_FIXED)
- if (!commit) {
- // use mmap with MAP_FIXED to discard the existing memory (and reduce commit charge)
- void* p = mmap(start, csize, PROT_NONE, (MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE), -1, 0);
- if (p != start) { err = errno; }
- }
- else {
- // for commit, just change the protection
- err = mprotect(start, csize, (PROT_READ | PROT_WRITE));
- if (err != 0) { err = errno; }
- }
- #else
- err = mprotect(start, csize, (commit ? (PROT_READ | PROT_WRITE) : PROT_NONE));
- if (err != 0) { err = errno; }
- #endif
+ // page align
+ size_t csize;
+ void* start = mi_os_page_align_area_conservative(addr, size, &csize);
+ if (csize == 0) return true;
+
+ // decommit
+ *needs_recommit = true;
+ int err = _mi_prim_decommit(start,csize,needs_recommit);
if (err != 0) {
- _mi_warning_message("%s error: start: %p, csize: 0x%x, err: %i\n", commit ? "commit" : "decommit", start, csize, err);
- mi_mprotect_hint(err);
+ _mi_warning_message("cannot decommit OS memory (error: %d (0x%x), address: %p, size: 0x%zx bytes)\n", err, err, start, csize);
}
mi_assert_internal(err == 0);
return (err == 0);
}
-bool _mi_os_commit(void* addr, size_t size, bool* is_zero, mi_stats_t* tld_stats) {
- UNUSED(tld_stats);
- mi_stats_t* stats = &_mi_stats_main;
- return mi_os_commitx(addr, size, true, false /* liberal */, is_zero, stats);
-}
-
bool _mi_os_decommit(void* addr, size_t size, mi_stats_t* tld_stats) {
- UNUSED(tld_stats);
- mi_stats_t* stats = &_mi_stats_main;
- bool is_zero;
- return mi_os_commitx(addr, size, false, true /* conservative */, &is_zero, stats);
+ bool needs_recommit;
+ return mi_os_decommit_ex(addr, size, &needs_recommit, tld_stats);
}
-static bool mi_os_commit_unreset(void* addr, size_t size, bool* is_zero, mi_stats_t* stats) {
- return mi_os_commitx(addr, size, true, true /* conservative */, is_zero, stats);
-}
// Signal to the OS that the address range is no longer in use
// but may be used later again. This will release physical memory
// pages and reduce swapping while keeping the memory committed.
// We page align to a conservative area inside the range to reset.
-static bool mi_os_resetx(void* addr, size_t size, bool reset, mi_stats_t* stats) {
+bool _mi_os_reset(void* addr, size_t size, mi_stats_t* stats) {
// page align conservatively within the range
size_t csize;
void* start = mi_os_page_align_area_conservative(addr, size, &csize);
if (csize == 0) return true; // || _mi_os_is_huge_reserved(addr)
- if (reset) _mi_stat_increase(&stats->reset, csize);
- else _mi_stat_decrease(&stats->reset, csize);
- if (!reset) return true; // nothing to do on unreset!
+ _mi_stat_increase(&stats->reset, csize);
+ _mi_stat_counter_increase(&stats->reset_calls, 1);
- #if (MI_DEBUG>1)
- if (MI_SECURE==0) {
- memset(start, 0, csize); // pretend it is eagerly reset
- }
+ #if (MI_DEBUG>1) && !MI_SECURE && !MI_TRACK_ENABLED // && !MI_TSAN
+ memset(start, 0, csize); // pretend it is eagerly reset
#endif
- #if defined(_AURORA_MEM_STRICT)
- // optional low level control. we can't control the MMIO from the CRT
- #elif defined(_WIN32)
- // Testing shows that for us (on `malloc-large`) MEM_RESET is 2x faster than DiscardVirtualMemory
- void* p = VirtualAlloc(start, csize, MEM_RESET, PAGE_READWRITE);
- mi_assert_internal(p == start);
- #if 1
- if (p == start && start != NULL) {
- VirtualUnlock(start,csize); // VirtualUnlock after MEM_RESET removes the memory from the working set
- }
- #endif
- if (p != start) return false;
-#else
-#if defined(MADV_FREE)
- static _Atomic(uintptr_t) advice = ATOMIC_VAR_INIT(MADV_FREE);
- int err = madvise(start, csize, (int)mi_atomic_load_relaxed(&advice));
- if (err != 0 && errno == EINVAL && advice == MADV_FREE) {
- // if MADV_FREE is not supported, fall back to MADV_DONTNEED from now on
- mi_atomic_store_release(&advice, (uintptr_t)MADV_DONTNEED);
- err = madvise(start, csize, MADV_DONTNEED);
- }
-#elif defined(__wasi__)
- int err = 0;
-#else
- int err = madvise(start, csize, MADV_DONTNEED);
-#endif
+ int err = _mi_prim_reset(start, csize);
if (err != 0) {
- _mi_warning_message("madvise reset error: start: %p, csize: 0x%x, errno: %i\n", start, csize, errno);
+ _mi_warning_message("cannot reset OS memory (error: %d (0x%x), address: %p, size: 0x%zx bytes)\n", err, err, start, csize);
}
- //mi_assert(err == 0);
- if (err != 0) return false;
-#endif
- return true;
+ return (err == 0);
}
-// Signal to the OS that the address range is no longer in use
-// but may be used later again. This will release physical memory
-// pages and reduce swapping while keeping the memory committed.
-// We page align to a conservative area inside the range to reset.
-bool _mi_os_reset(void* addr, size_t size, mi_stats_t* tld_stats) {
- UNUSED(tld_stats);
- mi_stats_t* stats = &_mi_stats_main;
- if (mi_option_is_enabled(mi_option_reset_decommits)) {
- return _mi_os_decommit(addr, size, stats);
+
+// either resets or decommits memory, returns true if the memory needs
+// to be recommitted if it is to be re-used later on.
+bool _mi_os_purge_ex(void* p, size_t size, bool allow_reset, mi_stats_t* stats)
+{
+ if (mi_option_get(mi_option_purge_delay) < 0) return false; // is purging allowed?
+ _mi_stat_counter_increase(&stats->purge_calls, 1);
+ _mi_stat_increase(&stats->purged, size);
+
+ if (mi_option_is_enabled(mi_option_purge_decommits) && // should decommit?
+ !_mi_preloading()) // don't decommit during preloading (unsafe)
+ {
+ bool needs_recommit = true;
+ mi_os_decommit_ex(p, size, &needs_recommit, stats);
+ return needs_recommit;
}
else {
- return mi_os_resetx(addr, size, true, stats);
+ if (allow_reset) { // this can sometimes be not allowed if the range is not fully committed
+ _mi_os_reset(p, size, stats);
+ }
+ return false; // needs no recommit
}
}
-bool _mi_os_unreset(void* addr, size_t size, bool* is_zero, mi_stats_t* tld_stats) {
- UNUSED(tld_stats);
- mi_stats_t* stats = &_mi_stats_main;
- if (mi_option_is_enabled(mi_option_reset_decommits)) {
- return mi_os_commit_unreset(addr, size, is_zero, stats); // re-commit it (conservatively!)
- }
- else {
- *is_zero = false;
- return mi_os_resetx(addr, size, false, stats);
- }
+// either resets or decommits memory, returns true if the memory needs
+// to be recommitted if it is to be re-used later on.
+bool _mi_os_purge(void* p, size_t size, mi_stats_t * stats) {
+ return _mi_os_purge_ex(p, size, true, stats);
}
-
// Protect a region in memory to be not accessible.
static bool mi_os_protectx(void* addr, size_t size, bool protect) {
// page align conservatively within the range
@@ -901,22 +510,9 @@ static bool mi_os_protectx(void* addr, size_t size, bool protect) {
_mi_warning_message("cannot mprotect memory allocated in huge OS pages\n");
}
*/
- int err = 0;
-#if defined(_AURORA_MEM_STRICT)
- // optional low level control. we can't control the MMIO from the CRT
-#elif defined(_WIN32)
- DWORD oldprotect = 0;
- BOOL ok = VirtualProtect(start, csize, protect ? PAGE_NOACCESS : PAGE_READWRITE, &oldprotect);
- err = (ok ? 0 : GetLastError());
-#elif defined(__wasi__)
- err = 0;
-#else
- err = mprotect(start, csize, protect ? PROT_NONE : (PROT_READ | PROT_WRITE));
- if (err != 0) { err = errno; }
-#endif
+ int err = _mi_prim_protect(start,csize,protect);
if (err != 0) {
- _mi_warning_message("mprotect error: start: %p, csize: 0x%x, err: %i\n", start, csize, err);
- mi_mprotect_hint(err);
+ _mi_warning_message("cannot %s OS memory (error: %d (0x%x), address: %p, size: 0x%zx bytes)\n", (protect ? "protect" : "unprotect"), err, err, start, csize);
}
return (err == 0);
}
@@ -931,121 +527,12 @@ bool _mi_os_unprotect(void* addr, size_t size) {
-bool _mi_os_shrink(void* p, size_t oldsize, size_t newsize, mi_stats_t* stats) {
- // page align conservatively within the range
- mi_assert_internal(oldsize > newsize && p != NULL);
- if (oldsize < newsize || p == NULL) return false;
- if (oldsize == newsize) return true;
-
- // oldsize and newsize should be page aligned or we cannot shrink precisely
- void* addr = (uint8_t*)p + newsize;
- size_t size = 0;
- void* start = mi_os_page_align_area_conservative(addr, oldsize - newsize, &size);
- if (size == 0 || start != addr) return false;
-
-#if defined(_WIN32) && !defined(_AURORA_MEM_STRICT)
- // we cannot shrink on windows, but we can decommit
- return _mi_os_decommit(start, size, stats);
-#else
- return mi_os_mem_free(start, size, true, stats);
-#endif
-}
-
-
/* ----------------------------------------------------------------------------
Support for allocating huge OS pages (1Gib) that are reserved up-front
and possibly associated with a specific NUMA node. (use `numa_node>=0`)
-----------------------------------------------------------------------------*/
-#define MI_HUGE_OS_PAGE_SIZE (GiB)
+#define MI_HUGE_OS_PAGE_SIZE (MI_GiB)
-#if defined(_WIN32) && (MI_INTPTR_SIZE >= 8)
-static void* mi_os_alloc_huge_os_pagesx(void* addr, size_t size, int numa_node)
-{
- mi_assert_internal(size%GiB == 0);
- mi_assert_internal(addr != NULL);
- const DWORD flags = MEM_LARGE_PAGES | MEM_COMMIT | MEM_RESERVE;
-
- mi_win_enable_large_os_pages();
-
- #if defined(MEM_EXTENDED_PARAMETER_TYPE_BITS)
- MEM_EXTENDED_PARAMETER params[3] = { {{0,0},{0}},{{0,0},{0}},{{0,0},{0}} };
- // on modern Windows try use NtAllocateVirtualMemoryEx for 1GiB huge pages
- static bool mi_huge_pages_available = true;
- if (pNtAllocateVirtualMemoryEx != NULL && mi_huge_pages_available) {
- #ifndef MEM_EXTENDED_PARAMETER_NONPAGED_HUGE
- #define MEM_EXTENDED_PARAMETER_NONPAGED_HUGE (0x10)
- #endif
- params[0].Type = 5; // == MemExtendedParameterAttributeFlags;
- params[0].ULong64 = MEM_EXTENDED_PARAMETER_NONPAGED_HUGE;
- ULONG param_count = 1;
- if (numa_node >= 0) {
- param_count++;
- params[1].Type = MemExtendedParameterNumaNode;
- params[1].ULong = (unsigned)numa_node;
- }
- SIZE_T psize = size;
- void* base = addr;
- NTSTATUS err = (*pNtAllocateVirtualMemoryEx)(GetCurrentProcess(), &base, &psize, flags, PAGE_READWRITE, params, param_count);
- if (err == 0 && base != NULL) {
- return base;
- }
- else {
- // fall back to regular large pages
- mi_huge_pages_available = false; // don't try further huge pages
- _mi_warning_message("unable to allocate using huge (1gb) pages, trying large (2mb) pages instead (status 0x%lx)\n", err);
- }
- }
- // on modern Windows try use VirtualAlloc2 for numa aware large OS page allocation
- if (pVirtualAlloc2 != NULL && numa_node >= 0) {
- params[0].Type = MemExtendedParameterNumaNode;
- params[0].ULong = (unsigned)numa_node;
- return (*pVirtualAlloc2)(GetCurrentProcess(), addr, size, flags, PAGE_READWRITE, params, 1);
- }
- #else
- UNUSED(numa_node);
- #endif
- // otherwise use regular virtual alloc on older windows
- return VirtualAlloc(addr, size, flags, PAGE_READWRITE);
-}
-
-#elif defined(MI_OS_USE_MMAP) && (MI_INTPTR_SIZE >= 8) && !defined(__HAIKU__)
-#include
-#ifndef MPOL_PREFERRED
-#define MPOL_PREFERRED 1
-#endif
-#if defined(SYS_mbind)
-static long mi_os_mbind(void* start, unsigned long len, unsigned long mode, const unsigned long* nmask, unsigned long maxnode, unsigned flags) {
- return syscall(SYS_mbind, start, len, mode, nmask, maxnode, flags);
-}
-#else
-static long mi_os_mbind(void* start, unsigned long len, unsigned long mode, const unsigned long* nmask, unsigned long maxnode, unsigned flags) {
- UNUSED(start); UNUSED(len); UNUSED(mode); UNUSED(nmask); UNUSED(maxnode); UNUSED(flags);
- return 0;
-}
-#endif
-static void* mi_os_alloc_huge_os_pagesx(void* addr, size_t size, int numa_node) {
- mi_assert_internal(size%GiB == 0);
- bool is_large = true;
- void* p = mi_unix_mmap(addr, size, MI_SEGMENT_SIZE, PROT_READ | PROT_WRITE, true, true, &is_large);
- if (p == NULL) return NULL;
- if (numa_node >= 0 && numa_node < 8*MI_INTPTR_SIZE) { // at most 64 nodes
- uintptr_t numa_mask = (1UL << numa_node);
- // TODO: does `mbind` work correctly for huge OS pages? should we
- // use `set_mempolicy` before calling mmap instead?
- // see:
- long err = mi_os_mbind(p, size, MPOL_PREFERRED, &numa_mask, 8*MI_INTPTR_SIZE, 0);
- if (err != 0) {
- _mi_warning_message("failed to bind huge (1gb) pages to numa node %d: %s\n", numa_node, strerror(errno));
- }
- }
- return p;
-}
-#else
-static void* mi_os_alloc_huge_os_pagesx(void* addr, size_t size, int numa_node) {
- UNUSED(addr); UNUSED(size); UNUSED(numa_node);
- return NULL;
-}
-#endif
#if (MI_INTPTR_SIZE >= 8)
// To ensure proper alignment, use our own area for huge OS pages
@@ -1064,10 +551,10 @@ static uint8_t* mi_os_claim_huge_pages(size_t pages, size_t* total_size) {
if (start == 0) {
// Initialize the start address after the 32TiB area
start = ((uintptr_t)32 << 40); // 32TiB virtual start address
-#if (MI_SECURE>0 || MI_DEBUG==0) // security: randomize start of huge pages unless in debug mode
- uintptr_t r = _mi_heap_random_next(mi_get_default_heap());
+ #if (MI_SECURE>0 || MI_DEBUG==0) // security: randomize start of huge pages unless in debug mode
+ uintptr_t r = _mi_heap_random_next(mi_prim_get_default_heap());
start = start + ((uintptr_t)MI_HUGE_OS_PAGE_SIZE * ((r>>17) & 0x0FFF)); // (randomly 12bits)*1GiB == between 0 to 4TiB
-#endif
+ #endif
}
end = start + size;
mi_assert_internal(end % MI_SEGMENT_SIZE == 0);
@@ -1078,14 +565,15 @@ static uint8_t* mi_os_claim_huge_pages(size_t pages, size_t* total_size) {
}
#else
static uint8_t* mi_os_claim_huge_pages(size_t pages, size_t* total_size) {
- UNUSED(pages);
+ MI_UNUSED(pages);
if (total_size != NULL) *total_size = 0;
return NULL;
}
#endif
// Allocate MI_SEGMENT_SIZE aligned huge pages
-void* _mi_os_alloc_huge_os_pages(size_t pages, int numa_node, mi_msecs_t max_msecs, size_t* pages_reserved, size_t* psize) {
+void* _mi_os_alloc_huge_os_pages(size_t pages, int numa_node, mi_msecs_t max_msecs, size_t* pages_reserved, size_t* psize, mi_memid_t* memid) {
+ *memid = _mi_memid_none();
if (psize != NULL) *psize = 0;
if (pages_reserved != NULL) *pages_reserved = 0;
size_t size = 0;
@@ -1096,23 +584,32 @@ void* _mi_os_alloc_huge_os_pages(size_t pages, int numa_node, mi_msecs_t max_mse
// We allocate one page at the time to be able to abort if it takes too long
// or to at least allocate as many as available on the system.
mi_msecs_t start_t = _mi_clock_start();
- size_t page;
- for (page = 0; page < pages; page++) {
+ size_t page = 0;
+ bool all_zero = true;
+ while (page < pages) {
// allocate a page
+ bool is_zero = false;
void* addr = start + (page * MI_HUGE_OS_PAGE_SIZE);
- void* p = mi_os_alloc_huge_os_pagesx(addr, MI_HUGE_OS_PAGE_SIZE, numa_node);
+ void* p = NULL;
+ int err = _mi_prim_alloc_huge_os_pages(addr, MI_HUGE_OS_PAGE_SIZE, numa_node, &is_zero, &p);
+ if (!is_zero) { all_zero = false; }
+ if (err != 0) {
+ _mi_warning_message("unable to allocate huge OS page (error: %d (0x%x), address: %p, size: %zx bytes)\n", err, err, addr, MI_HUGE_OS_PAGE_SIZE);
+ break;
+ }
// Did we succeed at a contiguous address?
if (p != addr) {
// no success, issue a warning and break
if (p != NULL) {
- _mi_warning_message("could not allocate contiguous huge page %zu at %p\n", page, addr);
- _mi_os_free(p, MI_HUGE_OS_PAGE_SIZE, &_mi_stats_main);
+ _mi_warning_message("could not allocate contiguous huge OS page %zu at %p\n", page, addr);
+ mi_os_prim_free(p, MI_HUGE_OS_PAGE_SIZE, true, &_mi_stats_main);
}
break;
}
// success, record it
+ page++; // increase before timeout check (see issue #711)
_mi_stat_increase(&_mi_stats_main.committed, MI_HUGE_OS_PAGE_SIZE);
_mi_stat_increase(&_mi_stats_main.reserved, MI_HUGE_OS_PAGE_SIZE);
@@ -1126,153 +623,67 @@ void* _mi_os_alloc_huge_os_pages(size_t pages, int numa_node, mi_msecs_t max_mse
}
}
if (elapsed > max_msecs) {
- _mi_warning_message("huge page allocation timed out\n");
+ _mi_warning_message("huge OS page allocation timed out (after allocating %zu page(s))\n", page);
break;
}
}
}
mi_assert_internal(page*MI_HUGE_OS_PAGE_SIZE <= size);
- if (pages_reserved != NULL) *pages_reserved = page;
- if (psize != NULL) *psize = page * MI_HUGE_OS_PAGE_SIZE;
+ if (pages_reserved != NULL) { *pages_reserved = page; }
+ if (psize != NULL) { *psize = page * MI_HUGE_OS_PAGE_SIZE; }
+ if (page != 0) {
+ mi_assert(start != NULL);
+ *memid = _mi_memid_create_os(true /* is committed */, all_zero, true /* is_large */);
+ memid->memkind = MI_MEM_OS_HUGE;
+ mi_assert(memid->is_pinned);
+ #ifdef MI_TRACK_ASAN
+ if (all_zero) { mi_track_mem_defined(start,size); }
+ #endif
+ }
return (page == 0 ? NULL : start);
}
// free every huge page in a range individually (as we allocated per page)
// note: needed with VirtualAlloc but could potentially be done in one go on mmap'd systems.
-void _mi_os_free_huge_pages(void* p, size_t size, mi_stats_t* stats) {
+static void mi_os_free_huge_os_pages(void* p, size_t size, mi_stats_t* stats) {
if (p==NULL || size==0) return;
uint8_t* base = (uint8_t*)p;
while (size >= MI_HUGE_OS_PAGE_SIZE) {
- _mi_os_free(base, MI_HUGE_OS_PAGE_SIZE, stats);
+ mi_os_prim_free(base, MI_HUGE_OS_PAGE_SIZE, true, stats);
size -= MI_HUGE_OS_PAGE_SIZE;
+ base += MI_HUGE_OS_PAGE_SIZE;
}
}
/* ----------------------------------------------------------------------------
Support NUMA aware allocation
-----------------------------------------------------------------------------*/
-#ifdef _WIN32
-#if defined(AURORA_IS_64BIT)
-DWORD __declspec(dllexport) GetCurrentProcessorNumberXP(void)
-{
- return pGetCurrentProcessorNumber();
-}
-#else
-DWORD __declspec(dllexport) __declspec(naked) GetCurrentProcessorNumberXP(void)
-{
- __asm {
- mov eax, 1
- cpuid
- shr ebx, 24
- mov eax, ebx
- ret
- }
-}
-#endif
-
-static size_t mi_os_numa_nodex() {
- USHORT numa_node = 0;
- if (pGetCurrentProcessorNumberEx != NULL && pGetNumaProcessorNodeEx != NULL) {
- // Extended API is supported
- PROCESSOR_NUMBER pnum;
- (*pGetCurrentProcessorNumberEx)(&pnum);
- USHORT nnode = 0;
- BOOL ok = (*pGetNumaProcessorNodeEx)(&pnum, &nnode);
- if (ok) numa_node = nnode;
- }
- else
- {
- // Vista or earlier, use older API that is limited to 64 processors. Issue #277
- DWORD pnum = GetCurrentProcessorNumberXP();
- UCHAR nnode = 0;
- BOOL ok = pGetNumaProcessorNode &&
- pGetNumaProcessorNode((UCHAR)pnum, &nnode);
- if (ok) numa_node = nnode;
- }
- return numa_node;
-}
-
-static size_t mi_os_numa_node_countx(void) {
- ULONG numa_max = 0;
- if (pGetNumaHighestNodeNumber) {
- pGetNumaHighestNodeNumber(&numa_max);
- }
- // find the highest node number that has actual processors assigned to it. Issue #282
- while(numa_max > 0) {
- if (pGetNumaNodeProcessorMaskEx != NULL) {
- // Extended API is supported
- GROUP_AFFINITY affinity;
- if ((*pGetNumaNodeProcessorMaskEx)((USHORT)numa_max, &affinity)) {
- if (affinity.Mask != 0) break; // found the maximum non-empty node
- }
- }
- else {
- // Vista or earlier, use older API that is limited to 64 processors.
- ULONGLONG mask;
- if (pGetNumaNodeProcessorMask &&
- pGetNumaNodeProcessorMask((UCHAR)numa_max, &mask)) {
- if (mask != 0) break; // found the maximum non-empty node
- };
- }
- // max node was invalid or had no processor assigned, try again
- numa_max--;
- }
- return ((size_t)numa_max + 1);
-}
-#elif defined(__linux__)
-#include // getcpu
-#include // access
-
-static size_t mi_os_numa_nodex(void) {
-#ifdef SYS_getcpu
- unsigned long node = 0;
- unsigned long ncpu = 0;
- long err = syscall(SYS_getcpu, &ncpu, &node, NULL);
- if (err != 0) return 0;
- return node;
-#else
- return 0;
-#endif
-}
-static size_t mi_os_numa_node_countx(void) {
- char buf[128];
- unsigned node = 0;
- for(node = 0; node < 256; node++) {
- // enumerate node entries -- todo: it there a more efficient way to do this? (but ensure there is no allocation)
- snprintf(buf, 127, "/sys/devices/system/node/node%u", node + 1);
- if (access(buf,R_OK) != 0) break;
- }
- return (node+1);
-}
-#else
-static size_t mi_os_numa_nodex(void) {
- return 0;
-}
-static size_t mi_os_numa_node_countx(void) {
- return 1;
-}
-#endif
-
-size_t _mi_numa_node_count = 0; // cache the node count
+_Atomic(size_t) _mi_numa_node_count; // = 0 // cache the node count
size_t _mi_os_numa_node_count_get(void) {
- if (mi_unlikely(_mi_numa_node_count <= 0)) {
+ size_t count = mi_atomic_load_acquire(&_mi_numa_node_count);
+ if (count <= 0) {
long ncount = mi_option_get(mi_option_use_numa_nodes); // given explicitly?
- if (ncount <= 0) ncount = (long)mi_os_numa_node_countx(); // or detect dynamically
- _mi_numa_node_count = (size_t)(ncount <= 0 ? 1 : ncount);
- _mi_verbose_message("using %zd numa regions\n", _mi_numa_node_count);
+ if (ncount > 0) {
+ count = (size_t)ncount;
+ }
+ else {
+ count = _mi_prim_numa_node_count(); // or detect dynamically
+ if (count == 0) count = 1;
+ }
+ mi_atomic_store_release(&_mi_numa_node_count, count); // save it
+ _mi_verbose_message("using %zd numa regions\n", count);
}
- mi_assert_internal(_mi_numa_node_count >= 1);
- return _mi_numa_node_count;
+ return count;
}
int _mi_os_numa_node_get(mi_os_tld_t* tld) {
- UNUSED(tld);
+ MI_UNUSED(tld);
size_t numa_count = _mi_os_numa_node_count();
if (numa_count<=1) return 0; // optimize on single numa node systems: always node 0
// never more than the node count and >= 0
- size_t numa_node = mi_os_numa_nodex();
+ size_t numa_node = _mi_prim_numa_node();
if (numa_node >= numa_count) { numa_node = numa_node % numa_count; }
return (int)numa_node;
}
diff --git a/src/page-queue.c b/src/page-queue.c
index 37719e0..cb54b37 100644
--- a/src/page-queue.c
+++ b/src/page-queue.c
@@ -1,5 +1,5 @@
/*----------------------------------------------------------------------------
-Copyright (c) 2018, Microsoft Research, Daan Leijen
+Copyright (c) 2018-2020, Microsoft Research, Daan Leijen
This is free software; you can redistribute it and/or modify it under the
terms of the MIT license. A copy of the license can be found in the file
"LICENSE" at the root of this distribution.
@@ -34,70 +34,26 @@ terms of the MIT license. A copy of the license can be found in the file
static inline bool mi_page_queue_is_huge(const mi_page_queue_t* pq) {
- return (pq->block_size == (MI_LARGE_OBJ_SIZE_MAX+sizeof(uintptr_t)));
+ return (pq->block_size == (MI_MEDIUM_OBJ_SIZE_MAX+sizeof(uintptr_t)));
}
static inline bool mi_page_queue_is_full(const mi_page_queue_t* pq) {
- return (pq->block_size == (MI_LARGE_OBJ_SIZE_MAX+(2*sizeof(uintptr_t))));
+ return (pq->block_size == (MI_MEDIUM_OBJ_SIZE_MAX+(2*sizeof(uintptr_t))));
}
static inline bool mi_page_queue_is_special(const mi_page_queue_t* pq) {
- return (pq->block_size > MI_LARGE_OBJ_SIZE_MAX);
+ return (pq->block_size > MI_MEDIUM_OBJ_SIZE_MAX);
}
/* -----------------------------------------------------------
Bins
----------------------------------------------------------- */
-// Bit scan reverse: return the index of the highest bit.
-static inline uint8_t mi_bsr32(uint32_t x);
-
-#if defined(_MSC_VER)
-#include
-static inline uint8_t mi_bsr32(uint32_t x) {
- uint32_t idx;
- _BitScanReverse((DWORD*)&idx, x);
- return (uint8_t)idx;
-}
-#elif defined(__GNUC__) || defined(__clang__)
-static inline uint8_t mi_bsr32(uint32_t x) {
- return (31 - __builtin_clz(x));
-}
-#else
-static inline uint8_t mi_bsr32(uint32_t x) {
- // de Bruijn multiplication, see
- static const uint8_t debruijn[32] = {
- 31, 0, 22, 1, 28, 23, 18, 2, 29, 26, 24, 10, 19, 7, 3, 12,
- 30, 21, 27, 17, 25, 9, 6, 11, 20, 16, 8, 5, 15, 4, 14, 13,
- };
- x |= x >> 1;
- x |= x >> 2;
- x |= x >> 4;
- x |= x >> 8;
- x |= x >> 16;
- x++;
- return debruijn[(x*0x076be629) >> 27];
-}
-#endif
-
-// Bit scan reverse: return the index of the highest bit.
-uint8_t _mi_bsr(uintptr_t x) {
- if (x == 0) return 0;
-#if MI_INTPTR_SIZE==8
- uint32_t hi = (x >> 32);
- return (hi == 0 ? mi_bsr32((uint32_t)x) : 32 + mi_bsr32(hi));
-#elif MI_INTPTR_SIZE==4
- return mi_bsr32(x);
-#else
-# error "define bsr for non-32 or 64-bit platforms"
-#endif
-}
-
// Return the bin for a given field size.
// Returns MI_BIN_HUGE if the size is too large.
// We use `wsize` for the size in "machine word sizes",
// i.e. byte size == `wsize*sizeof(void*)`.
-extern inline uint8_t _mi_bin(size_t size) {
+static inline uint8_t mi_bin(size_t size) {
size_t wsize = _mi_wsize_from_size(size);
uint8_t bin;
if (wsize <= 1) {
@@ -116,16 +72,16 @@ extern inline uint8_t _mi_bin(size_t size) {
bin = (uint8_t)wsize;
}
#endif
- else if (wsize > MI_LARGE_OBJ_WSIZE_MAX) {
+ else if (wsize > MI_MEDIUM_OBJ_WSIZE_MAX) {
bin = MI_BIN_HUGE;
}
else {
- #if defined(MI_ALIGN4W)
+ #if defined(MI_ALIGN4W)
if (wsize <= 16) { wsize = (wsize+3)&~3; } // round to 4x word sizes
#endif
wsize--;
// find the highest bit
- uint8_t b = mi_bsr32((uint32_t)wsize);
+ uint8_t b = (uint8_t)mi_bsr(wsize); // note: wsize != 0
// and use the top 3 bits to determine the bin (~12.5% worst internal fragmentation).
// - adjust with 3 because we use do not round the first 8 sizes
// which each get an exact bin
@@ -142,14 +98,18 @@ extern inline uint8_t _mi_bin(size_t size) {
Queue of pages with free blocks
----------------------------------------------------------- */
+uint8_t _mi_bin(size_t size) {
+ return mi_bin(size);
+}
+
size_t _mi_bin_size(uint8_t bin) {
return _mi_heap_empty.pages[bin].block_size;
}
// Good size for allocation
size_t mi_good_size(size_t size) mi_attr_noexcept {
- if (size <= MI_LARGE_OBJ_SIZE_MAX) {
- return _mi_bin_size(_mi_bin(size));
+ if (size <= MI_MEDIUM_OBJ_SIZE_MAX) {
+ return _mi_bin_size(mi_bin(size));
}
else {
return _mi_align_up(size,_mi_os_page_size());
@@ -178,7 +138,7 @@ static bool mi_heap_contains_queue(const mi_heap_t* heap, const mi_page_queue_t*
#endif
static mi_page_queue_t* mi_page_queue_of(const mi_page_t* page) {
- uint8_t bin = (mi_page_is_in_full(page) ? MI_BIN_FULL : _mi_bin(page->xblock_size));
+ uint8_t bin = (mi_page_is_in_full(page) ? MI_BIN_FULL : mi_bin(page->xblock_size));
mi_heap_t* heap = mi_page_heap(page);
mi_assert_internal(heap != NULL && bin <= MI_BIN_FULL);
mi_page_queue_t* pq = &heap->pages[bin];
@@ -188,7 +148,7 @@ static mi_page_queue_t* mi_page_queue_of(const mi_page_t* page) {
}
static mi_page_queue_t* mi_heap_page_queue_of(mi_heap_t* heap, const mi_page_t* page) {
- uint8_t bin = (mi_page_is_in_full(page) ? MI_BIN_FULL : _mi_bin(page->xblock_size));
+ uint8_t bin = (mi_page_is_in_full(page) ? MI_BIN_FULL : mi_bin(page->xblock_size));
mi_assert_internal(bin <= MI_BIN_FULL);
mi_page_queue_t* pq = &heap->pages[bin];
mi_assert_internal(mi_page_is_in_full(page) || page->xblock_size == pq->block_size);
@@ -221,9 +181,9 @@ static inline void mi_heap_queue_first_update(mi_heap_t* heap, const mi_page_que
}
else {
// find previous size; due to minimal alignment upto 3 previous bins may need to be skipped
- uint8_t bin = _mi_bin(size);
+ uint8_t bin = mi_bin(size);
const mi_page_queue_t* prev = pq - 1;
- while( bin == _mi_bin(prev->block_size) && prev > &heap->pages[0]) {
+ while( bin == mi_bin(prev->block_size) && prev > &heap->pages[0]) {
prev--;
}
start = 1 + _mi_wsize_from_size(prev->block_size);
@@ -246,8 +206,9 @@ static bool mi_page_queue_is_empty(mi_page_queue_t* queue) {
static void mi_page_queue_remove(mi_page_queue_t* queue, mi_page_t* page) {
mi_assert_internal(page != NULL);
mi_assert_expensive(mi_page_queue_contains(queue, page));
- mi_assert_internal(page->xblock_size == queue->block_size || (page->xblock_size > MI_LARGE_OBJ_SIZE_MAX && mi_page_queue_is_huge(queue)) || (mi_page_is_in_full(page) && mi_page_queue_is_full(queue)));
+ mi_assert_internal(page->xblock_size == queue->block_size || (page->xblock_size > MI_MEDIUM_OBJ_SIZE_MAX && mi_page_queue_is_huge(queue)) || (mi_page_is_in_full(page) && mi_page_queue_is_full(queue)));
mi_heap_t* heap = mi_page_heap(page);
+
if (page->prev != NULL) page->prev->next = page->next;
if (page->next != NULL) page->next->prev = page->prev;
if (page == queue->last) queue->last = page->prev;
@@ -268,9 +229,11 @@ static void mi_page_queue_remove(mi_page_queue_t* queue, mi_page_t* page) {
static void mi_page_queue_push(mi_heap_t* heap, mi_page_queue_t* queue, mi_page_t* page) {
mi_assert_internal(mi_page_heap(page) == heap);
mi_assert_internal(!mi_page_queue_contains(queue, page));
- mi_assert_internal(_mi_page_segment(page)->page_kind != MI_PAGE_HUGE);
+ #if MI_HUGE_PAGE_ABANDON
+ mi_assert_internal(_mi_page_segment(page)->kind != MI_SEGMENT_HUGE);
+ #endif
mi_assert_internal(page->xblock_size == queue->block_size ||
- (page->xblock_size > MI_LARGE_OBJ_SIZE_MAX && mi_page_queue_is_huge(queue)) ||
+ (page->xblock_size > MI_MEDIUM_OBJ_SIZE_MAX) ||
(mi_page_is_in_full(page) && mi_page_queue_is_full(queue)));
mi_page_set_in_full(page, mi_page_queue_is_full(queue));
@@ -296,6 +259,7 @@ static void mi_page_queue_enqueue_from(mi_page_queue_t* to, mi_page_queue_t* fro
mi_assert_internal(page != NULL);
mi_assert_expensive(mi_page_queue_contains(from, page));
mi_assert_expensive(!mi_page_queue_contains(to, page));
+
mi_assert_internal((page->xblock_size == to->block_size && page->xblock_size == from->block_size) ||
(page->xblock_size == to->block_size && mi_page_queue_is_full(from)) ||
(page->xblock_size == from->block_size && mi_page_queue_is_full(to)) ||
@@ -341,7 +305,7 @@ size_t _mi_page_queue_append(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_queue
for (mi_page_t* page = append->first; page != NULL; page = page->next) {
// inline `mi_page_set_heap` to avoid wrong assertion during absorption;
// in this case it is ok to be delayed freeing since both "to" and "from" heap are still alive.
- mi_atomic_store_release(&page->xheap, (uintptr_t)heap);
+ mi_atomic_store_release(&page->xheap, (uintptr_t)heap);
// set the flag to delayed free (not overriding NEVER_DELAYED_FREE) which has as a
// side effect that it spins until any DELAYED_FREEING is finished. This ensures
// that after appending only the new heap will be used for delayed free operations.
diff --git a/src/page.c b/src/page.c
index cd96bb9..8ac0a71 100644
--- a/src/page.c
+++ b/src/page.c
@@ -1,5 +1,5 @@
/*----------------------------------------------------------------------------
-Copyright (c) 2018, Microsoft Research, Daan Leijen
+Copyright (c) 2018-2020, Microsoft Research, Daan Leijen
This is free software; you can redistribute it and/or modify it under the
terms of the MIT license. A copy of the license can be found in the file
"LICENSE" at the root of this distribution.
@@ -7,13 +7,13 @@ terms of the MIT license. A copy of the license can be found in the file
/* -----------------------------------------------------------
The core of the allocator. Every segment contains
- pages of a {certain block size. The main function
+ pages of a certain block size. The main function
exported is `mi_malloc_generic`.
----------------------------------------------------------- */
#include "mimalloc.h"
-#include "mimalloc-internal.h"
-#include "mimalloc-atomic.h"
+#include "mimalloc/internal.h"
+#include "mimalloc/atomic.h"
/* -----------------------------------------------------------
Definition of page queues for each block size
@@ -30,7 +30,7 @@ terms of the MIT license. A copy of the license can be found in the file
// Index a block in a page
static inline mi_block_t* mi_page_block_at(const mi_page_t* page, void* page_start, size_t block_size, size_t i) {
- UNUSED(page);
+ MI_UNUSED(page);
mi_assert_internal(page != NULL);
mi_assert_internal(i <= page->reserved);
return (mi_block_t*)((uint8_t*)page_start + (i * block_size));
@@ -66,6 +66,14 @@ static bool mi_page_list_is_valid(mi_page_t* page, mi_block_t* p) {
if (p < start || p >= end) return false;
p = mi_block_next(page, p);
}
+#if MI_DEBUG>3 // generally too expensive to check this
+ if (page->free_is_zero) {
+ const size_t ubsize = mi_page_usable_block_size(page);
+ for (mi_block_t* block = page->free; block != NULL; block = mi_block_next(page, block)) {
+ mi_assert_expensive(mi_mem_is_zero(block + 1, ubsize - sizeof(mi_block_t)));
+ }
+ }
+#endif
return true;
}
@@ -74,27 +82,30 @@ static bool mi_page_is_valid_init(mi_page_t* page) {
mi_assert_internal(page->used <= page->capacity);
mi_assert_internal(page->capacity <= page->reserved);
- const size_t bsize = mi_page_block_size(page);
mi_segment_t* segment = _mi_page_segment(page);
uint8_t* start = _mi_page_start(segment,page,NULL);
- mi_assert_internal(start == _mi_segment_page_start(segment,page,bsize,NULL,NULL));
+ mi_assert_internal(start == _mi_segment_page_start(segment,page,NULL));
+ //const size_t bsize = mi_page_block_size(page);
//mi_assert_internal(start + page->capacity*page->block_size == page->top);
mi_assert_internal(mi_page_list_is_valid(page,page->free));
mi_assert_internal(mi_page_list_is_valid(page,page->local_free));
#if MI_DEBUG>3 // generally too expensive to check this
- if (page->flags.is_zero) {
- for(mi_block_t* block = page->free; block != NULL; mi_block_next(page,block)) {
- mi_assert_expensive(mi_mem_is_zero(block + 1, page->block_size - sizeof(mi_block_t)));
+ if (page->free_is_zero) {
+ const size_t ubsize = mi_page_usable_block_size(page);
+ for(mi_block_t* block = page->free; block != NULL; block = mi_block_next(page,block)) {
+ mi_assert_expensive(mi_mem_is_zero(block + 1, ubsize - sizeof(mi_block_t)));
}
}
#endif
+ #if !MI_TRACK_ENABLED && !MI_TSAN
mi_block_t* tfree = mi_page_thread_free(page);
mi_assert_internal(mi_page_list_is_valid(page, tfree));
//size_t tfree_count = mi_page_list_count(page, tfree);
//mi_assert_internal(tfree_count <= page->thread_freed + 1);
+ #endif
size_t free_count = mi_page_list_count(page, page->free) + mi_page_list_count(page, page->local_free);
mi_assert_internal(page->used + free_count == page->capacity);
@@ -102,6 +113,8 @@ static bool mi_page_is_valid_init(mi_page_t* page) {
return true;
}
+extern bool _mi_process_is_initialized; // has mi_process_init been called?
+
bool _mi_page_is_valid(mi_page_t* page) {
mi_assert_internal(mi_page_is_valid_init(page));
#if MI_SECURE
@@ -109,11 +122,15 @@ bool _mi_page_is_valid(mi_page_t* page) {
#endif
if (mi_page_heap(page)!=NULL) {
mi_segment_t* segment = _mi_page_segment(page);
- mi_assert_internal(!_mi_process_is_initialized || segment->thread_id == mi_page_heap(page)->thread_id || segment->thread_id==0);
- if (segment->page_kind != MI_PAGE_HUGE) {
+
+ mi_assert_internal(!_mi_process_is_initialized || segment->thread_id==0 || segment->thread_id == mi_page_heap(page)->thread_id);
+ #if MI_HUGE_PAGE_ABANDON
+ if (segment->kind != MI_SEGMENT_HUGE)
+ #endif
+ {
mi_page_queue_t* pq = mi_page_queue_of(page);
mi_assert_internal(mi_page_queue_contains(pq, page));
- mi_assert_internal(pq->block_size==mi_page_block_size(page) || mi_page_block_size(page) > MI_LARGE_OBJ_SIZE_MAX || mi_page_is_in_full(page));
+ mi_assert_internal(pq->block_size==mi_page_block_size(page) || mi_page_block_size(page) > MI_MEDIUM_OBJ_SIZE_MAX || mi_page_is_in_full(page));
mi_assert_internal(mi_heap_contains_queue(mi_page_heap(page),pq));
}
}
@@ -122,14 +139,23 @@ bool _mi_page_is_valid(mi_page_t* page) {
#endif
void _mi_page_use_delayed_free(mi_page_t* page, mi_delayed_t delay, bool override_never) {
+ while (!_mi_page_try_use_delayed_free(page, delay, override_never)) {
+ mi_atomic_yield();
+ }
+}
+
+bool _mi_page_try_use_delayed_free(mi_page_t* page, mi_delayed_t delay, bool override_never) {
mi_thread_free_t tfreex;
mi_delayed_t old_delay;
- mi_thread_free_t tfree;
+ mi_thread_free_t tfree;
+ size_t yield_count = 0;
do {
tfree = mi_atomic_load_acquire(&page->xthread_free); // note: must acquire as we can break/repeat this loop and not do a CAS;
tfreex = mi_tf_set_delayed(tfree, delay);
old_delay = mi_tf_delayed(tfree);
- if (mi_unlikely(old_delay == MI_DELAYED_FREEING)) {
+ if mi_unlikely(old_delay == MI_DELAYED_FREEING) {
+ if (yield_count >= 4) return false; // give up after 4 tries
+ yield_count++;
mi_atomic_yield(); // delay until outstanding MI_DELAYED_FREEING are done.
// tfree = mi_tf_set_delayed(tfree, MI_NO_DELAYED_FREE); // will cause CAS to busy fail
}
@@ -141,6 +167,8 @@ void _mi_page_use_delayed_free(mi_page_t* page, mi_delayed_t delay, bool overrid
}
} while ((old_delay == MI_DELAYED_FREEING) ||
!mi_atomic_cas_weak_release(&page->xthread_free, &tfree, tfreex));
+
+ return true; // success
}
/* -----------------------------------------------------------
@@ -197,11 +225,11 @@ void _mi_page_free_collect(mi_page_t* page, bool force) {
// and the local free list
if (page->local_free != NULL) {
- if (mi_likely(page->free == NULL)) {
+ if mi_likely(page->free == NULL) {
// usual case
page->free = page->local_free;
page->local_free = NULL;
- page->is_zero = false;
+ page->free_is_zero = false;
}
else if (force) {
// append -- only on shutdown (force) as this is a linear operation
@@ -213,7 +241,7 @@ void _mi_page_free_collect(mi_page_t* page, bool force) {
mi_block_set_next(page, tail, page->free);
page->free = page->local_free;
page->local_free = NULL;
- page->is_zero = false;
+ page->free_is_zero = false;
}
}
@@ -229,10 +257,13 @@ void _mi_page_free_collect(mi_page_t* page, bool force) {
// called from segments when reclaiming abandoned pages
void _mi_page_reclaim(mi_heap_t* heap, mi_page_t* page) {
mi_assert_expensive(mi_page_is_valid_init(page));
+
mi_assert_internal(mi_page_heap(page) == heap);
mi_assert_internal(mi_page_thread_free_flag(page) != MI_NEVER_DELAYED_FREE);
- mi_assert_internal(_mi_page_segment(page)->page_kind != MI_PAGE_HUGE);
- mi_assert_internal(!page->is_reset);
+ #if MI_HUGE_PAGE_ABANDON
+ mi_assert_internal(_mi_page_segment(page)->kind != MI_SEGMENT_HUGE);
+ #endif
+
// TODO: push on full queue immediately if it is full?
mi_page_queue_t* pq = mi_page_queue(heap, mi_page_block_size(page));
mi_page_queue_push(heap, pq, page);
@@ -240,19 +271,26 @@ void _mi_page_reclaim(mi_heap_t* heap, mi_page_t* page) {
}
// allocate a fresh page from a segment
-static mi_page_t* mi_page_fresh_alloc(mi_heap_t* heap, mi_page_queue_t* pq, size_t block_size) {
- mi_assert_internal(pq==NULL||mi_heap_contains_queue(heap, pq));
- mi_assert_internal(pq==NULL||block_size == pq->block_size);
- mi_page_t* page = _mi_segment_page_alloc(heap, block_size, &heap->tld->segments, &heap->tld->os);
+static mi_page_t* mi_page_fresh_alloc(mi_heap_t* heap, mi_page_queue_t* pq, size_t block_size, size_t page_alignment) {
+ #if !MI_HUGE_PAGE_ABANDON
+ mi_assert_internal(pq != NULL);
+ mi_assert_internal(mi_heap_contains_queue(heap, pq));
+ mi_assert_internal(page_alignment > 0 || block_size > MI_MEDIUM_OBJ_SIZE_MAX || block_size == pq->block_size);
+ #endif
+ mi_page_t* page = _mi_segment_page_alloc(heap, block_size, page_alignment, &heap->tld->segments, &heap->tld->os);
if (page == NULL) {
// this may be out-of-memory, or an abandoned page was reclaimed (and in our queue)
return NULL;
}
+ mi_assert_internal(page_alignment >0 || block_size > MI_MEDIUM_OBJ_SIZE_MAX || _mi_page_segment(page)->kind != MI_SEGMENT_HUGE);
+ mi_assert_internal(pq!=NULL || page->xblock_size != 0);
+ mi_assert_internal(pq!=NULL || mi_page_block_size(page) >= block_size);
// a fresh page was found, initialize it
- mi_assert_internal(pq==NULL || _mi_page_segment(page)->page_kind != MI_PAGE_HUGE);
- mi_page_init(heap, page, block_size, heap->tld);
- _mi_stat_increase(&heap->tld->stats.pages, 1);
- if (pq!=NULL) mi_page_queue_push(heap, pq, page); // huge pages use pq==NULL
+ const size_t full_block_size = ((pq == NULL || mi_page_queue_is_huge(pq)) ? mi_page_block_size(page) : block_size); // see also: mi_segment_huge_page_alloc
+ mi_assert_internal(full_block_size >= block_size);
+ mi_page_init(heap, page, full_block_size, heap->tld);
+ mi_heap_stat_increase(heap, pages, 1);
+ if (pq != NULL) { mi_page_queue_push(heap, pq, page); }
mi_assert_expensive(_mi_page_is_valid(page));
return page;
}
@@ -260,7 +298,7 @@ static mi_page_t* mi_page_fresh_alloc(mi_heap_t* heap, mi_page_queue_t* pq, size
// Get a fresh page to use
static mi_page_t* mi_page_fresh(mi_heap_t* heap, mi_page_queue_t* pq) {
mi_assert_internal(mi_heap_contains_queue(heap, pq));
- mi_page_t* page = mi_page_fresh_alloc(heap, pq, pq->block_size);
+ mi_page_t* page = mi_page_fresh_alloc(heap, pq, pq->block_size, 0);
if (page==NULL) return NULL;
mi_assert_internal(pq->block_size==mi_page_block_size(page));
mi_assert_internal(pq==mi_page_queue(heap, mi_page_block_size(page)));
@@ -271,10 +309,18 @@ static mi_page_t* mi_page_fresh(mi_heap_t* heap, mi_page_queue_t* pq) {
Do any delayed frees
(put there by other threads if they deallocated in a full page)
----------------------------------------------------------- */
-void _mi_heap_delayed_free(mi_heap_t* heap) {
+void _mi_heap_delayed_free_all(mi_heap_t* heap) {
+ while (!_mi_heap_delayed_free_partial(heap)) {
+ mi_atomic_yield();
+ }
+}
+
+// returns true if all delayed frees were processed
+bool _mi_heap_delayed_free_partial(mi_heap_t* heap) {
// take over the list (note: no atomic exchange since it is often NULL)
mi_block_t* block = mi_atomic_load_ptr_relaxed(mi_block_t, &heap->thread_delayed_free);
while (block != NULL && !mi_atomic_cas_ptr_weak_acq_rel(mi_block_t, &heap->thread_delayed_free, &block, NULL)) { /* nothing */ };
+ bool all_freed = true;
// and free them all
while(block != NULL) {
@@ -282,7 +328,9 @@ void _mi_heap_delayed_free(mi_heap_t* heap) {
// use internal free instead of regular one to keep stats etc correct
if (!_mi_free_delayed_block(block)) {
// we might already start delayed freeing while another thread has not yet
- // reset the delayed_freeing flag; in that case delay it further by reinserting.
+ // reset the delayed_freeing flag; in that case delay it further by reinserting the current block
+ // into the delayed free list
+ all_freed = false;
mi_block_t* dfree = mi_atomic_load_ptr_relaxed(mi_block_t, &heap->thread_delayed_free);
do {
mi_block_set_nextx(heap, block, dfree, heap->keys);
@@ -290,6 +338,7 @@ void _mi_heap_delayed_free(mi_heap_t* heap) {
}
block = next;
}
+ return all_freed;
}
/* -----------------------------------------------------------
@@ -342,7 +391,7 @@ void _mi_page_abandon(mi_page_t* page, mi_page_queue_t* pq) {
mi_assert_internal(mi_page_thread_free_flag(page)==MI_NEVER_DELAYED_FREE);
mi_page_set_heap(page, NULL);
-#if MI_DEBUG>1
+#if (MI_DEBUG>1) && !MI_TRACK_ENABLED
// check there are no references left..
for (mi_block_t* block = (mi_block_t*)pheap->thread_delayed_free; block != NULL; block = mi_block_nextx(pheap, block, pheap->keys)) {
mi_assert_internal(_mi_ptr_page(block) != page);
@@ -366,9 +415,11 @@ void _mi_page_free(mi_page_t* page, mi_page_queue_t* pq, bool force) {
// no more aligned blocks in here
mi_page_set_has_aligned(page, false);
+ mi_heap_t* heap = mi_page_heap(page);
+
// remove from the page list
// (no need to do _mi_heap_delayed_free first as all blocks are already free)
- mi_segments_tld_t* segments_tld = &mi_page_heap(page)->tld->segments;
+ mi_segments_tld_t* segments_tld = &heap->tld->segments;
mi_page_queue_remove(pq, page);
// and free it
@@ -376,8 +427,9 @@ void _mi_page_free(mi_page_t* page, mi_page_queue_t* pq, bool force) {
_mi_segment_page_free(page, force, segments_tld);
}
-#define MI_MAX_RETIRE_SIZE MI_LARGE_OBJ_SIZE_MAX
-#define MI_RETIRE_CYCLES (8)
+// Retire parameters
+#define MI_MAX_RETIRE_SIZE (MI_MEDIUM_OBJ_SIZE_MAX)
+#define MI_RETIRE_CYCLES (16)
// Retire a page with no more used blocks
// Important to not retire too quickly though as new
@@ -385,11 +437,11 @@ void _mi_page_free(mi_page_t* page, mi_page_queue_t* pq, bool force) {
// Note: called from `mi_free` and benchmarks often
// trigger this due to freeing everything and then
// allocating again so careful when changing this.
-void _mi_page_retire(mi_page_t* page) {
+void _mi_page_retire(mi_page_t* page) mi_attr_noexcept {
mi_assert_internal(page != NULL);
mi_assert_expensive(_mi_page_is_valid(page));
mi_assert_internal(mi_page_all_free(page));
-
+
mi_page_set_has_aligned(page, false);
// don't retire too often..
@@ -399,10 +451,10 @@ void _mi_page_retire(mi_page_t* page) {
// how to check this efficiently though...
// for now, we don't retire if it is the only page left of this size class.
mi_page_queue_t* pq = mi_page_queue_of(page);
- if (mi_likely(page->xblock_size <= MI_MAX_RETIRE_SIZE && !mi_page_is_in_full(page))) {
+ if mi_likely(page->xblock_size <= MI_MAX_RETIRE_SIZE && !mi_page_queue_is_special(pq)) { // not too large && not full or huge queue?
if (pq->last==page && pq->first==page) { // the only page in the queue?
mi_stat_counter_increase(_mi_stats_main.page_no_retire,1);
- page->retire_expire = (page->xblock_size <= MI_SMALL_OBJ_SIZE_MAX ? MI_RETIRE_CYCLES : MI_RETIRE_CYCLES/4);
+ page->retire_expire = 1 + (page->xblock_size <= MI_SMALL_OBJ_SIZE_MAX ? MI_RETIRE_CYCLES : MI_RETIRE_CYCLES/4);
mi_heap_t* heap = mi_page_heap(page);
mi_assert_internal(pq >= heap->pages);
const size_t index = pq - heap->pages;
@@ -413,7 +465,6 @@ void _mi_page_retire(mi_page_t* page) {
return; // dont't free after all
}
}
-
_mi_page_free(page, pq, false);
}
@@ -458,7 +509,7 @@ void _mi_heap_collect_retired(mi_heap_t* heap, bool force) {
#define MI_MIN_SLICES (2)
static void mi_page_free_list_extend_secure(mi_heap_t* const heap, mi_page_t* const page, const size_t bsize, const size_t extend, mi_stats_t* const stats) {
- UNUSED(stats);
+ MI_UNUSED(stats);
#if (MI_SECURE<=2)
mi_assert_internal(page->free == NULL);
mi_assert_internal(page->local_free == NULL);
@@ -516,7 +567,7 @@ static void mi_page_free_list_extend_secure(mi_heap_t* const heap, mi_page_t* co
static mi_decl_noinline void mi_page_free_list_extend( mi_page_t* const page, const size_t bsize, const size_t extend, mi_stats_t* const stats)
{
- UNUSED(stats);
+ MI_UNUSED(stats);
#if (MI_SECURE <= 2)
mi_assert_internal(page->free == NULL);
mi_assert_internal(page->local_free == NULL);
@@ -548,7 +599,7 @@ static mi_decl_noinline void mi_page_free_list_extend( mi_page_t* const page, co
#if (MI_SECURE>0)
#define MI_MIN_EXTEND (8*MI_SECURE) // extend at least by this many
#else
-#define MI_MIN_EXTEND (1)
+#define MI_MIN_EXTEND (4)
#endif
// Extend the capacity (up to reserved) by initializing a free list
@@ -557,6 +608,7 @@ static mi_decl_noinline void mi_page_free_list_extend( mi_page_t* const page, co
// allocations but this did not speed up any benchmark (due to an
// extra test in malloc? or cache effects?)
static void mi_page_extend_free(mi_heap_t* heap, mi_page_t* page, mi_tld_t* tld) {
+ MI_UNUSED(tld);
mi_assert_expensive(mi_page_is_valid_init(page));
#if (MI_SECURE<=2)
mi_assert(page->free == NULL);
@@ -566,20 +618,22 @@ static void mi_page_extend_free(mi_heap_t* heap, mi_page_t* page, mi_tld_t* tld)
if (page->capacity >= page->reserved) return;
size_t page_size;
- //uint8_t* page_start =
_mi_page_start(_mi_page_segment(page), page, &page_size);
mi_stat_counter_increase(tld->stats.pages_extended, 1);
// calculate the extend count
const size_t bsize = (page->xblock_size < MI_HUGE_BLOCK_SIZE ? page->xblock_size : page_size);
size_t extend = page->reserved - page->capacity;
+ mi_assert_internal(extend > 0);
+
size_t max_extend = (bsize >= MI_MAX_EXTEND_SIZE ? MI_MIN_EXTEND : MI_MAX_EXTEND_SIZE/(uint32_t)bsize);
- if (max_extend < MI_MIN_EXTEND) max_extend = MI_MIN_EXTEND;
+ if (max_extend < MI_MIN_EXTEND) { max_extend = MI_MIN_EXTEND; }
+ mi_assert_internal(max_extend > 0);
if (extend > max_extend) {
// ensure we don't touch memory beyond the page to reduce page commit.
// the `lean` benchmark tests this. Going from 1 to 8 increases rss by 50%.
- extend = (max_extend==0 ? 1 : max_extend);
+ extend = max_extend;
}
mi_assert_internal(extend > 0 && extend + page->capacity <= page->reserved);
@@ -595,11 +649,6 @@ static void mi_page_extend_free(mi_heap_t* heap, mi_page_t* page, mi_tld_t* tld)
// enable the new free list
page->capacity += (uint16_t)extend;
mi_stat_increase(tld->stats.page_committed, extend * bsize);
-
- // extension into zero initialized memory preserves the zero'd free list
- if (!page->is_zero_init) {
- page->is_zero = false;
- }
mi_assert_expensive(mi_page_is_valid_init(page));
}
@@ -611,17 +660,29 @@ static void mi_page_init(mi_heap_t* heap, mi_page_t* page, size_t block_size, mi
mi_assert_internal(block_size > 0);
// set fields
mi_page_set_heap(page, heap);
+ page->xblock_size = (block_size < MI_HUGE_BLOCK_SIZE ? (uint32_t)block_size : MI_HUGE_BLOCK_SIZE); // initialize before _mi_segment_page_start
size_t page_size;
- _mi_segment_page_start(segment, page, block_size, &page_size, NULL);
- page->xblock_size = (block_size < MI_HUGE_BLOCK_SIZE ? (uint32_t)block_size : MI_HUGE_BLOCK_SIZE);
+ const void* page_start = _mi_segment_page_start(segment, page, &page_size);
+ MI_UNUSED(page_start);
+ mi_track_mem_noaccess(page_start,page_size);
+ mi_assert_internal(mi_page_block_size(page) <= page_size);
+ mi_assert_internal(page_size <= page->slice_count*MI_SEGMENT_SLICE_SIZE);
mi_assert_internal(page_size / block_size < (1L<<16));
page->reserved = (uint16_t)(page_size / block_size);
- #ifdef MI_ENCODE_FREELIST
+ mi_assert_internal(page->reserved > 0);
+ #if (MI_PADDING || MI_ENCODE_FREELIST)
page->keys[0] = _mi_heap_random_next(heap);
page->keys[1] = _mi_heap_random_next(heap);
#endif
- page->is_zero = page->is_zero_init;
-
+ page->free_is_zero = page->is_zero_init;
+ #if MI_DEBUG>2
+ if (page->is_zero_init) {
+ mi_track_mem_defined(page_start, page_size);
+ mi_assert_expensive(mi_mem_is_zero(page_start, page_size));
+ }
+ #endif
+
+ mi_assert_internal(page->is_committed);
mi_assert_internal(page->capacity == 0);
mi_assert_internal(page->free == NULL);
mi_assert_internal(page->used == 0);
@@ -630,7 +691,7 @@ static void mi_page_init(mi_heap_t* heap, mi_page_t* page, size_t block_size, mi
mi_assert_internal(page->prev == NULL);
mi_assert_internal(page->retire_expire == 0);
mi_assert_internal(!mi_page_has_aligned(page));
- #if (MI_ENCODE_FREELIST)
+ #if (MI_PADDING || MI_ENCODE_FREELIST)
mi_assert_internal(page->keys[0] != 0);
mi_assert_internal(page->keys[1] != 0);
#endif
@@ -650,12 +711,16 @@ static void mi_page_init(mi_heap_t* heap, mi_page_t* page, size_t block_size, mi
static mi_page_t* mi_page_queue_find_free_ex(mi_heap_t* heap, mi_page_queue_t* pq, bool first_try)
{
// search through the pages in "next fit" order
+ #if MI_STAT
size_t count = 0;
+ #endif
mi_page_t* page = pq->first;
while (page != NULL)
{
mi_page_t* next = page->next; // remember next
+ #if MI_STAT
count++;
+ #endif
// 0. collect freed blocks by us and other threads
_mi_page_free_collect(page, false);
@@ -680,14 +745,14 @@ static mi_page_t* mi_page_queue_find_free_ex(mi_heap_t* heap, mi_page_queue_t* p
page = next;
} // for each page
- mi_stat_counter_increase(heap->tld->stats.searches, count);
+ mi_heap_stat_counter_increase(heap, searches, count);
if (page == NULL) {
- _mi_heap_collect_retired(heap, false); // perhaps make a page available
+ _mi_heap_collect_retired(heap, false); // perhaps make a page available?
page = mi_page_fresh(heap, pq);
if (page == NULL && first_try) {
// out-of-memory _or_ an abandoned page with free blocks was reclaimed, try once again
- page = mi_page_queue_find_free_ex(heap, pq, false);
+ page = mi_page_queue_find_free_ex(heap, pq, false);
}
}
else {
@@ -705,14 +770,17 @@ static inline mi_page_t* mi_find_free_page(mi_heap_t* heap, size_t size) {
mi_page_queue_t* pq = mi_page_queue(heap,size);
mi_page_t* page = pq->first;
if (page != NULL) {
- if ((MI_SECURE >= 3) && page->capacity < page->reserved && ((_mi_heap_random_next(heap) & 1) == 1)) {
- // in secure mode, we extend half the time to increase randomness
+ #if (MI_SECURE>=3) // in secure mode, we extend half the time to increase randomness
+ if (page->capacity < page->reserved && ((_mi_heap_random_next(heap) & 1) == 1)) {
mi_page_extend_free(heap, page, heap->tld);
mi_assert_internal(mi_page_immediate_available(page));
}
- else {
+ else
+ #endif
+ {
_mi_page_free_collect(page,false);
}
+
if (mi_page_immediate_available(page)) {
page->retire_expire = 0;
return page; // fast path
@@ -751,30 +819,46 @@ void mi_register_deferred_free(mi_deferred_free_fun* fn, void* arg) mi_attr_noex
General allocation
----------------------------------------------------------- */
-// A huge page is allocated directly without being in a queue.
+// Large and huge page allocation.
+// Huge pages are allocated directly without being in a queue.
// Because huge pages contain just one block, and the segment contains
// just that page, we always treat them as abandoned and any thread
// that frees the block can free the whole page and segment directly.
-static mi_page_t* mi_huge_page_alloc(mi_heap_t* heap, size_t size) {
+// Huge pages are also use if the requested alignment is very large (> MI_ALIGNMENT_MAX).
+static mi_page_t* mi_large_huge_page_alloc(mi_heap_t* heap, size_t size, size_t page_alignment) {
size_t block_size = _mi_os_good_alloc_size(size);
- mi_assert_internal(_mi_bin(block_size) == MI_BIN_HUGE);
- mi_page_t* page = mi_page_fresh_alloc(heap,NULL,block_size);
+ mi_assert_internal(mi_bin(block_size) == MI_BIN_HUGE || page_alignment > 0);
+ bool is_huge = (block_size > MI_LARGE_OBJ_SIZE_MAX || page_alignment > 0);
+ #if MI_HUGE_PAGE_ABANDON
+ mi_page_queue_t* pq = (is_huge ? NULL : mi_page_queue(heap, block_size));
+ #else
+ mi_page_queue_t* pq = mi_page_queue(heap, is_huge ? MI_HUGE_BLOCK_SIZE : block_size); // not block_size as that can be low if the page_alignment > 0
+ mi_assert_internal(!is_huge || mi_page_queue_is_huge(pq));
+ #endif
+ mi_page_t* page = mi_page_fresh_alloc(heap, pq, block_size, page_alignment);
if (page != NULL) {
- const size_t bsize = mi_page_block_size(page); // note: not `mi_page_usable_block_size` as `size` includes padding already
- mi_assert_internal(bsize >= size);
mi_assert_internal(mi_page_immediate_available(page));
- mi_assert_internal(_mi_page_segment(page)->page_kind==MI_PAGE_HUGE);
- mi_assert_internal(_mi_page_segment(page)->used==1);
- mi_assert_internal(_mi_page_segment(page)->thread_id==0); // abandoned, not in the huge queue
- mi_page_set_heap(page, NULL);
-
- if (bsize > MI_HUGE_OBJ_SIZE_MAX) {
- _mi_stat_increase(&heap->tld->stats.giant, bsize);
- _mi_stat_counter_increase(&heap->tld->stats.giant_count, 1);
+
+ if (is_huge) {
+ mi_assert_internal(_mi_page_segment(page)->kind == MI_SEGMENT_HUGE);
+ mi_assert_internal(_mi_page_segment(page)->used==1);
+ #if MI_HUGE_PAGE_ABANDON
+ mi_assert_internal(_mi_page_segment(page)->thread_id==0); // abandoned, not in the huge queue
+ mi_page_set_heap(page, NULL);
+ #endif
}
else {
- _mi_stat_increase(&heap->tld->stats.huge, bsize);
- _mi_stat_counter_increase(&heap->tld->stats.huge_count, 1);
+ mi_assert_internal(_mi_page_segment(page)->kind != MI_SEGMENT_HUGE);
+ }
+
+ const size_t bsize = mi_page_usable_block_size(page); // note: not `mi_page_block_size` to account for padding
+ if (bsize <= MI_LARGE_OBJ_SIZE_MAX) {
+ mi_heap_stat_increase(heap, large, bsize);
+ mi_heap_stat_counter_increase(heap, large_count, 1);
+ }
+ else {
+ mi_heap_stat_increase(heap, huge, bsize);
+ mi_heap_stat_counter_increase(heap, huge_count, 1);
}
}
return page;
@@ -783,54 +867,57 @@ static mi_page_t* mi_huge_page_alloc(mi_heap_t* heap, size_t size) {
// Allocate a page
// Note: in debug mode the size includes MI_PADDING_SIZE and might have overflowed.
-static mi_page_t* mi_find_page(mi_heap_t* heap, size_t size) mi_attr_noexcept {
+static mi_page_t* mi_find_page(mi_heap_t* heap, size_t size, size_t huge_alignment) mi_attr_noexcept {
// huge allocation?
const size_t req_size = size - MI_PADDING_SIZE; // correct for padding_size in case of an overflow on `size`
- if (mi_unlikely(req_size > (MI_LARGE_OBJ_SIZE_MAX - MI_PADDING_SIZE) )) {
- if (mi_unlikely(req_size > PTRDIFF_MAX)) { // we don't allocate more than PTRDIFF_MAX (see )
+ if mi_unlikely(req_size > (MI_MEDIUM_OBJ_SIZE_MAX - MI_PADDING_SIZE) || huge_alignment > 0) {
+ if mi_unlikely(req_size > PTRDIFF_MAX) { // we don't allocate more than PTRDIFF_MAX (see )
_mi_error_message(EOVERFLOW, "allocation request is too large (%zu bytes)\n", req_size);
return NULL;
}
else {
- return mi_huge_page_alloc(heap,size);
+ return mi_large_huge_page_alloc(heap,size,huge_alignment);
}
}
else {
// otherwise find a page with free blocks in our size segregated queues
- mi_assert_internal(size >= MI_PADDING_SIZE);
+ #if MI_PADDING
+ mi_assert_internal(size >= MI_PADDING_SIZE);
+ #endif
return mi_find_free_page(heap, size);
}
}
// Generic allocation routine if the fast path (`alloc.c:mi_page_malloc`) does not succeed.
// Note: in debug mode the size includes MI_PADDING_SIZE and might have overflowed.
-void* _mi_malloc_generic(mi_heap_t* heap, size_t size) mi_attr_noexcept
+// The `huge_alignment` is normally 0 but is set to a multiple of MI_SEGMENT_SIZE for
+// very large requested alignments in which case we use a huge segment.
+void* _mi_malloc_generic(mi_heap_t* heap, size_t size, bool zero, size_t huge_alignment) mi_attr_noexcept
{
mi_assert_internal(heap != NULL);
// initialize if necessary
- if (mi_unlikely(!mi_heap_is_initialized(heap))) {
- mi_thread_init(); // calls `_mi_heap_init` in turn
- heap = mi_get_default_heap();
- if (mi_unlikely(!mi_heap_is_initialized(heap))) { return NULL; }
+ if mi_unlikely(!mi_heap_is_initialized(heap)) {
+ heap = mi_heap_get_default(); // calls mi_thread_init
+ if mi_unlikely(!mi_heap_is_initialized(heap)) { return NULL; }
}
mi_assert_internal(mi_heap_is_initialized(heap));
// call potential deferred free routines
_mi_deferred_free(heap, false);
- // free delayed frees from other threads
- _mi_heap_delayed_free(heap);
+ // free delayed frees from other threads (but skip contended ones)
+ _mi_heap_delayed_free_partial(heap);
// find (or allocate) a page of the right size
- mi_page_t* page = mi_find_page(heap, size);
- if (mi_unlikely(page == NULL)) { // first time out of memory, try to collect and retry the allocation once more
+ mi_page_t* page = mi_find_page(heap, size, huge_alignment);
+ if mi_unlikely(page == NULL) { // first time out of memory, try to collect and retry the allocation once more
mi_heap_collect(heap, true /* force */);
- page = mi_find_page(heap, size);
+ page = mi_find_page(heap, size, huge_alignment);
}
- if (mi_unlikely(page == NULL)) { // out of memory
- const size_t req_size = size - MI_PADDING_SIZE; // correct for padding_size in case of an overflow on `size`
+ if mi_unlikely(page == NULL) { // out of memory
+ const size_t req_size = size - MI_PADDING_SIZE; // correct for padding_size in case of an overflow on `size`
_mi_error_message(ENOMEM, "unable to allocate memory (%zu bytes)\n", req_size);
return NULL;
}
@@ -838,6 +925,15 @@ void* _mi_malloc_generic(mi_heap_t* heap, size_t size) mi_attr_noexcept
mi_assert_internal(mi_page_immediate_available(page));
mi_assert_internal(mi_page_block_size(page) >= size);
- // and try again, this time succeeding! (i.e. this should never recurse)
- return _mi_page_malloc(heap, page, size);
+ // and try again, this time succeeding! (i.e. this should never recurse through _mi_page_malloc)
+ if mi_unlikely(zero && page->xblock_size == 0) {
+ // note: we cannot call _mi_page_malloc with zeroing for huge blocks; we zero it afterwards in that case.
+ void* p = _mi_page_malloc(heap, page, size, false);
+ mi_assert_internal(p != NULL);
+ _mi_memzero_aligned(p, mi_page_usable_block_size(page));
+ return p;
+ }
+ else {
+ return _mi_page_malloc(heap, page, size, zero);
+ }
}
diff --git a/src/prim/osx/alloc-override-zone.c b/src/prim/osx/alloc-override-zone.c
new file mode 100644
index 0000000..0e0a99d
--- /dev/null
+++ b/src/prim/osx/alloc-override-zone.c
@@ -0,0 +1,458 @@
+/* ----------------------------------------------------------------------------
+Copyright (c) 2018-2022, Microsoft Research, Daan Leijen
+This is free software; you can redistribute it and/or modify it under the
+terms of the MIT license. A copy of the license can be found in the file
+"LICENSE" at the root of this distribution.
+-----------------------------------------------------------------------------*/
+
+#include "mimalloc.h"
+#include "mimalloc/internal.h"
+
+#if defined(MI_MALLOC_OVERRIDE)
+
+#if !defined(__APPLE__)
+#error "this file should only be included on macOS"
+#endif
+
+/* ------------------------------------------------------
+ Override system malloc on macOS
+ This is done through the malloc zone interface.
+ It seems to be most robust in combination with interposing
+ though or otherwise we may get zone errors as there are could
+ be allocations done by the time we take over the
+ zone.
+------------------------------------------------------ */
+
+#include
+#include
+#include // memset
+#include
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#if defined(MAC_OS_X_VERSION_10_6) && (MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_6)
+// only available from OSX 10.6
+extern malloc_zone_t* malloc_default_purgeable_zone(void) __attribute__((weak_import));
+#endif
+
+/* ------------------------------------------------------
+ malloc zone members
+------------------------------------------------------ */
+
+static size_t zone_size(malloc_zone_t* zone, const void* p) {
+ MI_UNUSED(zone);
+ if (!mi_is_in_heap_region(p)){ return 0; } // not our pointer, bail out
+ return mi_usable_size(p);
+}
+
+static void* zone_malloc(malloc_zone_t* zone, size_t size) {
+ MI_UNUSED(zone);
+ return mi_malloc(size);
+}
+
+static void* zone_calloc(malloc_zone_t* zone, size_t count, size_t size) {
+ MI_UNUSED(zone);
+ return mi_calloc(count, size);
+}
+
+static void* zone_valloc(malloc_zone_t* zone, size_t size) {
+ MI_UNUSED(zone);
+ return mi_malloc_aligned(size, _mi_os_page_size());
+}
+
+static void zone_free(malloc_zone_t* zone, void* p) {
+ MI_UNUSED(zone);
+ mi_cfree(p);
+}
+
+static void* zone_realloc(malloc_zone_t* zone, void* p, size_t newsize) {
+ MI_UNUSED(zone);
+ return mi_realloc(p, newsize);
+}
+
+static void* zone_memalign(malloc_zone_t* zone, size_t alignment, size_t size) {
+ MI_UNUSED(zone);
+ return mi_malloc_aligned(size,alignment);
+}
+
+static void zone_destroy(malloc_zone_t* zone) {
+ MI_UNUSED(zone);
+ // todo: ignore for now?
+}
+
+static unsigned zone_batch_malloc(malloc_zone_t* zone, size_t size, void** ps, unsigned count) {
+ size_t i;
+ for (i = 0; i < count; i++) {
+ ps[i] = zone_malloc(zone, size);
+ if (ps[i] == NULL) break;
+ }
+ return i;
+}
+
+static void zone_batch_free(malloc_zone_t* zone, void** ps, unsigned count) {
+ for(size_t i = 0; i < count; i++) {
+ zone_free(zone, ps[i]);
+ ps[i] = NULL;
+ }
+}
+
+static size_t zone_pressure_relief(malloc_zone_t* zone, size_t size) {
+ MI_UNUSED(zone); MI_UNUSED(size);
+ mi_collect(false);
+ return 0;
+}
+
+static void zone_free_definite_size(malloc_zone_t* zone, void* p, size_t size) {
+ MI_UNUSED(size);
+ zone_free(zone,p);
+}
+
+static boolean_t zone_claimed_address(malloc_zone_t* zone, void* p) {
+ MI_UNUSED(zone);
+ return mi_is_in_heap_region(p);
+}
+
+
+/* ------------------------------------------------------
+ Introspection members
+------------------------------------------------------ */
+
+static kern_return_t intro_enumerator(task_t task, void* p,
+ unsigned type_mask, vm_address_t zone_address,
+ memory_reader_t reader,
+ vm_range_recorder_t recorder)
+{
+ // todo: enumerate all memory
+ MI_UNUSED(task); MI_UNUSED(p); MI_UNUSED(type_mask); MI_UNUSED(zone_address);
+ MI_UNUSED(reader); MI_UNUSED(recorder);
+ return KERN_SUCCESS;
+}
+
+static size_t intro_good_size(malloc_zone_t* zone, size_t size) {
+ MI_UNUSED(zone);
+ return mi_good_size(size);
+}
+
+static boolean_t intro_check(malloc_zone_t* zone) {
+ MI_UNUSED(zone);
+ return true;
+}
+
+static void intro_print(malloc_zone_t* zone, boolean_t verbose) {
+ MI_UNUSED(zone); MI_UNUSED(verbose);
+ mi_stats_print(NULL);
+}
+
+static void intro_log(malloc_zone_t* zone, void* p) {
+ MI_UNUSED(zone); MI_UNUSED(p);
+ // todo?
+}
+
+static void intro_force_lock(malloc_zone_t* zone) {
+ MI_UNUSED(zone);
+ // todo?
+}
+
+static void intro_force_unlock(malloc_zone_t* zone) {
+ MI_UNUSED(zone);
+ // todo?
+}
+
+static void intro_statistics(malloc_zone_t* zone, malloc_statistics_t* stats) {
+ MI_UNUSED(zone);
+ // todo...
+ stats->blocks_in_use = 0;
+ stats->size_in_use = 0;
+ stats->max_size_in_use = 0;
+ stats->size_allocated = 0;
+}
+
+static boolean_t intro_zone_locked(malloc_zone_t* zone) {
+ MI_UNUSED(zone);
+ return false;
+}
+
+
+/* ------------------------------------------------------
+ At process start, override the default allocator
+------------------------------------------------------ */
+
+#if defined(__GNUC__) && !defined(__clang__)
+#pragma GCC diagnostic ignored "-Wmissing-field-initializers"
+#endif
+
+#if defined(__clang__)
+#pragma clang diagnostic ignored "-Wc99-extensions"
+#endif
+
+static malloc_introspection_t mi_introspect = {
+ .enumerator = &intro_enumerator,
+ .good_size = &intro_good_size,
+ .check = &intro_check,
+ .print = &intro_print,
+ .log = &intro_log,
+ .force_lock = &intro_force_lock,
+ .force_unlock = &intro_force_unlock,
+#if defined(MAC_OS_X_VERSION_10_6) && (MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_6) && !defined(__ppc__)
+ .statistics = &intro_statistics,
+ .zone_locked = &intro_zone_locked,
+#endif
+};
+
+static malloc_zone_t mi_malloc_zone = {
+ // note: even with designators, the order is important for C++ compilation
+ //.reserved1 = NULL,
+ //.reserved2 = NULL,
+ .size = &zone_size,
+ .malloc = &zone_malloc,
+ .calloc = &zone_calloc,
+ .valloc = &zone_valloc,
+ .free = &zone_free,
+ .realloc = &zone_realloc,
+ .destroy = &zone_destroy,
+ .zone_name = "mimalloc",
+ .batch_malloc = &zone_batch_malloc,
+ .batch_free = &zone_batch_free,
+ .introspect = &mi_introspect,
+#if defined(MAC_OS_X_VERSION_10_6) && (MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_6) && !defined(__ppc__)
+ #if defined(MAC_OS_X_VERSION_10_14) && (MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_14)
+ .version = 10,
+ #else
+ .version = 9,
+ #endif
+ // switch to version 9+ on OSX 10.6 to support memalign.
+ .memalign = &zone_memalign,
+ .free_definite_size = &zone_free_definite_size,
+ .pressure_relief = &zone_pressure_relief,
+ #if defined(MAC_OS_X_VERSION_10_14) && (MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_14)
+ .claimed_address = &zone_claimed_address,
+ #endif
+#else
+ .version = 4,
+#endif
+};
+
+#ifdef __cplusplus
+}
+#endif
+
+
+#if defined(MI_OSX_INTERPOSE) && defined(MI_SHARED_LIB_EXPORT)
+
+// ------------------------------------------------------
+// Override malloc_xxx and malloc_zone_xxx api's to use only
+// our mimalloc zone. Since even the loader uses malloc
+// on macOS, this ensures that all allocations go through
+// mimalloc (as all calls are interposed).
+// The main `malloc`, `free`, etc calls are interposed in `alloc-override.c`,
+// Here, we also override macOS specific API's like
+// `malloc_zone_calloc` etc. see
+// ------------------------------------------------------
+
+static inline malloc_zone_t* mi_get_default_zone(void)
+{
+ static bool init;
+ if mi_unlikely(!init) {
+ init = true;
+ malloc_zone_register(&mi_malloc_zone); // by calling register we avoid a zone error on free (see )
+ }
+ return &mi_malloc_zone;
+}
+
+mi_decl_externc int malloc_jumpstart(uintptr_t cookie);
+mi_decl_externc void _malloc_fork_prepare(void);
+mi_decl_externc void _malloc_fork_parent(void);
+mi_decl_externc void _malloc_fork_child(void);
+
+
+static malloc_zone_t* mi_malloc_create_zone(vm_size_t size, unsigned flags) {
+ MI_UNUSED(size); MI_UNUSED(flags);
+ return mi_get_default_zone();
+}
+
+static malloc_zone_t* mi_malloc_default_zone (void) {
+ return mi_get_default_zone();
+}
+
+static malloc_zone_t* mi_malloc_default_purgeable_zone(void) {
+ return mi_get_default_zone();
+}
+
+static void mi_malloc_destroy_zone(malloc_zone_t* zone) {
+ MI_UNUSED(zone);
+ // nothing.
+}
+
+static kern_return_t mi_malloc_get_all_zones (task_t task, memory_reader_t mr, vm_address_t** addresses, unsigned* count) {
+ MI_UNUSED(task); MI_UNUSED(mr);
+ if (addresses != NULL) *addresses = NULL;
+ if (count != NULL) *count = 0;
+ return KERN_SUCCESS;
+}
+
+static const char* mi_malloc_get_zone_name(malloc_zone_t* zone) {
+ return (zone == NULL ? mi_malloc_zone.zone_name : zone->zone_name);
+}
+
+static void mi_malloc_set_zone_name(malloc_zone_t* zone, const char* name) {
+ MI_UNUSED(zone); MI_UNUSED(name);
+}
+
+static int mi_malloc_jumpstart(uintptr_t cookie) {
+ MI_UNUSED(cookie);
+ return 1; // or 0 for no error?
+}
+
+static void mi__malloc_fork_prepare(void) {
+ // nothing
+}
+static void mi__malloc_fork_parent(void) {
+ // nothing
+}
+static void mi__malloc_fork_child(void) {
+ // nothing
+}
+
+static void mi_malloc_printf(const char* fmt, ...) {
+ MI_UNUSED(fmt);
+}
+
+static bool zone_check(malloc_zone_t* zone) {
+ MI_UNUSED(zone);
+ return true;
+}
+
+static malloc_zone_t* zone_from_ptr(const void* p) {
+ MI_UNUSED(p);
+ return mi_get_default_zone();
+}
+
+static void zone_log(malloc_zone_t* zone, void* p) {
+ MI_UNUSED(zone); MI_UNUSED(p);
+}
+
+static void zone_print(malloc_zone_t* zone, bool b) {
+ MI_UNUSED(zone); MI_UNUSED(b);
+}
+
+static void zone_print_ptr_info(void* p) {
+ MI_UNUSED(p);
+}
+
+static void zone_register(malloc_zone_t* zone) {
+ MI_UNUSED(zone);
+}
+
+static void zone_unregister(malloc_zone_t* zone) {
+ MI_UNUSED(zone);
+}
+
+// use interposing so `DYLD_INSERT_LIBRARIES` works without `DYLD_FORCE_FLAT_NAMESPACE=1`
+// See:
+struct mi_interpose_s {
+ const void* replacement;
+ const void* target;
+};
+#define MI_INTERPOSE_FUN(oldfun,newfun) { (const void*)&newfun, (const void*)&oldfun }
+#define MI_INTERPOSE_MI(fun) MI_INTERPOSE_FUN(fun,mi_##fun)
+#define MI_INTERPOSE_ZONE(fun) MI_INTERPOSE_FUN(malloc_##fun,fun)
+__attribute__((used)) static const struct mi_interpose_s _mi_zone_interposes[] __attribute__((section("__DATA, __interpose"))) =
+{
+
+ MI_INTERPOSE_MI(malloc_create_zone),
+ MI_INTERPOSE_MI(malloc_default_purgeable_zone),
+ MI_INTERPOSE_MI(malloc_default_zone),
+ MI_INTERPOSE_MI(malloc_destroy_zone),
+ MI_INTERPOSE_MI(malloc_get_all_zones),
+ MI_INTERPOSE_MI(malloc_get_zone_name),
+ MI_INTERPOSE_MI(malloc_jumpstart),
+ MI_INTERPOSE_MI(malloc_printf),
+ MI_INTERPOSE_MI(malloc_set_zone_name),
+ MI_INTERPOSE_MI(_malloc_fork_child),
+ MI_INTERPOSE_MI(_malloc_fork_parent),
+ MI_INTERPOSE_MI(_malloc_fork_prepare),
+
+ MI_INTERPOSE_ZONE(zone_batch_free),
+ MI_INTERPOSE_ZONE(zone_batch_malloc),
+ MI_INTERPOSE_ZONE(zone_calloc),
+ MI_INTERPOSE_ZONE(zone_check),
+ MI_INTERPOSE_ZONE(zone_free),
+ MI_INTERPOSE_ZONE(zone_from_ptr),
+ MI_INTERPOSE_ZONE(zone_log),
+ MI_INTERPOSE_ZONE(zone_malloc),
+ MI_INTERPOSE_ZONE(zone_memalign),
+ MI_INTERPOSE_ZONE(zone_print),
+ MI_INTERPOSE_ZONE(zone_print_ptr_info),
+ MI_INTERPOSE_ZONE(zone_realloc),
+ MI_INTERPOSE_ZONE(zone_register),
+ MI_INTERPOSE_ZONE(zone_unregister),
+ MI_INTERPOSE_ZONE(zone_valloc)
+};
+
+
+#else
+
+// ------------------------------------------------------
+// hook into the zone api's without interposing
+// This is the official way of adding an allocator but
+// it seems less robust than using interpose.
+// ------------------------------------------------------
+
+static inline malloc_zone_t* mi_get_default_zone(void)
+{
+ // The first returned zone is the real default
+ malloc_zone_t** zones = NULL;
+ unsigned count = 0;
+ kern_return_t ret = malloc_get_all_zones(0, NULL, (vm_address_t**)&zones, &count);
+ if (ret == KERN_SUCCESS && count > 0) {
+ return zones[0];
+ }
+ else {
+ // fallback
+ return malloc_default_zone();
+ }
+}
+
+#if defined(__clang__)
+__attribute__((constructor(0)))
+#else
+__attribute__((constructor)) // seems not supported by g++-11 on the M1
+#endif
+static void _mi_macos_override_malloc(void) {
+ malloc_zone_t* purgeable_zone = NULL;
+
+ #if defined(MAC_OS_X_VERSION_10_6) && (MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_6)
+ // force the purgeable zone to exist to avoid strange bugs
+ if (malloc_default_purgeable_zone) {
+ purgeable_zone = malloc_default_purgeable_zone();
+ }
+ #endif
+
+ // Register our zone.
+ // thomcc: I think this is still needed to put us in the zone list.
+ malloc_zone_register(&mi_malloc_zone);
+ // Unregister the default zone, this makes our zone the new default
+ // as that was the last registered.
+ malloc_zone_t *default_zone = mi_get_default_zone();
+ // thomcc: Unsure if the next test is *always* false or just false in the
+ // cases I've tried. I'm also unsure if the code inside is needed. at all
+ if (default_zone != &mi_malloc_zone) {
+ malloc_zone_unregister(default_zone);
+
+ // Reregister the default zone so free and realloc in that zone keep working.
+ malloc_zone_register(default_zone);
+ }
+
+ // Unregister, and re-register the purgeable_zone to avoid bugs if it occurs
+ // earlier than the default zone.
+ if (purgeable_zone != NULL) {
+ malloc_zone_unregister(purgeable_zone);
+ malloc_zone_register(purgeable_zone);
+ }
+
+}
+#endif // MI_OSX_INTERPOSE
+
+#endif // MI_MALLOC_OVERRIDE
diff --git a/src/prim/osx/prim.c b/src/prim/osx/prim.c
new file mode 100644
index 0000000..8a2f4e8
--- /dev/null
+++ b/src/prim/osx/prim.c
@@ -0,0 +1,9 @@
+/* ----------------------------------------------------------------------------
+Copyright (c) 2018-2023, Microsoft Research, Daan Leijen
+This is free software; you can redistribute it and/or modify it under the
+terms of the MIT license. A copy of the license can be found in the file
+"LICENSE" at the root of this distribution.
+-----------------------------------------------------------------------------*/
+
+// We use the unix/prim.c with the mmap API on macOSX
+#include "../unix/prim.c"
diff --git a/src/prim/prim.c b/src/prim/prim.c
new file mode 100644
index 0000000..9a597d8
--- /dev/null
+++ b/src/prim/prim.c
@@ -0,0 +1,24 @@
+/* ----------------------------------------------------------------------------
+Copyright (c) 2018-2023, Microsoft Research, Daan Leijen
+This is free software; you can redistribute it and/or modify it under the
+terms of the MIT license. A copy of the license can be found in the file
+"LICENSE" at the root of this distribution.
+-----------------------------------------------------------------------------*/
+
+// Select the implementation of the primitives
+// depending on the OS.
+
+#if defined(_WIN32)
+#include "windows/prim.c" // VirtualAlloc (Windows)
+
+#elif defined(__APPLE__)
+#include "osx/prim.c" // macOSX (actually defers to mmap in unix/prim.c)
+
+#elif defined(__wasi__)
+#define MI_USE_SBRK
+#include "wasi/prim.c" // memory-grow or sbrk (Wasm)
+
+#else
+#include "unix/prim.c" // mmap() (Linux, macOSX, BSD, Illumnos, Haiku, DragonFly, etc.)
+
+#endif
diff --git a/src/prim/readme.md b/src/prim/readme.md
new file mode 100644
index 0000000..380dd3a
--- /dev/null
+++ b/src/prim/readme.md
@@ -0,0 +1,9 @@
+## Portability Primitives
+
+This is the portability layer where all primitives needed from the OS are defined.
+
+- `include/mimalloc/prim.h`: primitive portability API definition.
+- `prim.c`: Selects one of `unix/prim.c`, `wasi/prim.c`, or `windows/prim.c` depending on the host platform
+ (and on macOS, `osx/prim.c` defers to `unix/prim.c`).
+
+Note: still work in progress, there may still be places in the sources that still depend on OS ifdef's.
\ No newline at end of file
diff --git a/src/prim/unix/prim.c b/src/prim/unix/prim.c
new file mode 100644
index 0000000..314281f
--- /dev/null
+++ b/src/prim/unix/prim.c
@@ -0,0 +1,859 @@
+/* ----------------------------------------------------------------------------
+Copyright (c) 2018-2023, Microsoft Research, Daan Leijen
+This is free software; you can redistribute it and/or modify it under the
+terms of the MIT license. A copy of the license can be found in the file
+"LICENSE" at the root of this distribution.
+-----------------------------------------------------------------------------*/
+
+// This file is included in `src/prim/prim.c`
+
+#ifndef _DEFAULT_SOURCE
+#define _DEFAULT_SOURCE // ensure mmap flags and syscall are defined
+#endif
+
+#if defined(__sun)
+// illumos provides new mman.h api when any of these are defined
+// otherwise the old api based on caddr_t which predates the void pointers one.
+// stock solaris provides only the former, chose to atomically to discard those
+// flags only here rather than project wide tough.
+#undef _XOPEN_SOURCE
+#undef _POSIX_C_SOURCE
+#endif
+
+#include "mimalloc.h"
+#include "mimalloc/internal.h"
+#include "mimalloc/atomic.h"
+#include "mimalloc/prim.h"
+
+#include // mmap
+#include // sysconf
+
+#if defined(__linux__)
+ #include
+ #include
+ #if defined(__GLIBC__)
+ #include // linux mmap flags
+ #else
+ #include
+ #endif
+#elif defined(__APPLE__)
+ #include
+ #if !TARGET_IOS_IPHONE && !TARGET_IOS_SIMULATOR
+ #include
+ #endif
+#elif defined(__FreeBSD__) || defined(__DragonFly__)
+ #include
+ #if __FreeBSD_version >= 1200000
+ #include
+ #include
+ #endif
+ #include
+#endif
+
+#if !defined(__HAIKU__) && !defined(__APPLE__) && !defined(__CYGWIN__)
+ #define MI_HAS_SYSCALL_H
+ #include
+#endif
+
+//------------------------------------------------------------------------------------
+// Use syscalls for some primitives to allow for libraries that override open/read/close etc.
+// and do allocation themselves; using syscalls prevents recursion when mimalloc is
+// still initializing (issue #713)
+//------------------------------------------------------------------------------------
+
+#if defined(MI_HAS_SYSCALL_H) && defined(SYS_open) && defined(SYS_close) && defined(SYS_read) && defined(SYS_access)
+
+static int mi_prim_open(const char* fpath, int open_flags) {
+ return syscall(SYS_open,fpath,open_flags,0);
+}
+static ssize_t mi_prim_read(int fd, void* buf, size_t bufsize) {
+ return syscall(SYS_read,fd,buf,bufsize);
+}
+static int mi_prim_close(int fd) {
+ return syscall(SYS_close,fd);
+}
+static int mi_prim_access(const char *fpath, int mode) {
+ return syscall(SYS_access,fpath,mode);
+}
+
+#elif !defined(__APPLE__) // avoid unused warnings
+
+static int mi_prim_open(const char* fpath, int open_flags) {
+ return open(fpath,open_flags);
+}
+static ssize_t mi_prim_read(int fd, void* buf, size_t bufsize) {
+ return read(fd,buf,bufsize);
+}
+static int mi_prim_close(int fd) {
+ return close(fd);
+}
+static int mi_prim_access(const char *fpath, int mode) {
+ return access(fpath,mode);
+}
+
+#endif
+
+
+
+//---------------------------------------------
+// init
+//---------------------------------------------
+
+static bool unix_detect_overcommit(void) {
+ bool os_overcommit = true;
+#if defined(__linux__)
+ int fd = mi_prim_open("/proc/sys/vm/overcommit_memory", O_RDONLY);
+ if (fd >= 0) {
+ char buf[32];
+ ssize_t nread = mi_prim_read(fd, &buf, sizeof(buf));
+ mi_prim_close(fd);
+ //
+ // 0: heuristic overcommit, 1: always overcommit, 2: never overcommit (ignore NORESERVE)
+ if (nread >= 1) {
+ os_overcommit = (buf[0] == '0' || buf[0] == '1');
+ }
+ }
+#elif defined(__FreeBSD__)
+ int val = 0;
+ size_t olen = sizeof(val);
+ if (sysctlbyname("vm.overcommit", &val, &olen, NULL, 0) == 0) {
+ os_overcommit = (val != 0);
+ }
+#else
+ // default: overcommit is true
+#endif
+ return os_overcommit;
+}
+
+void _mi_prim_mem_init( mi_os_mem_config_t* config ) {
+ long psize = sysconf(_SC_PAGESIZE);
+ if (psize > 0) {
+ config->page_size = (size_t)psize;
+ config->alloc_granularity = (size_t)psize;
+ }
+ config->large_page_size = 2*MI_MiB; // TODO: can we query the OS for this?
+ config->has_overcommit = unix_detect_overcommit();
+ config->must_free_whole = false; // mmap can free in parts
+ config->has_virtual_reserve = true; // todo: check if this true for NetBSD? (for anonymous mmap with PROT_NONE)
+}
+
+
+//---------------------------------------------
+// free
+//---------------------------------------------
+
+int _mi_prim_free(void* addr, size_t size ) {
+ bool err = (munmap(addr, size) == -1);
+ return (err ? errno : 0);
+}
+
+
+//---------------------------------------------
+// mmap
+//---------------------------------------------
+
+static int unix_madvise(void* addr, size_t size, int advice) {
+ #if defined(__sun)
+ return madvise((caddr_t)addr, size, advice); // Solaris needs cast (issue #520)
+ #else
+ return madvise(addr, size, advice);
+ #endif
+}
+
+static void* unix_mmap_prim(void* addr, size_t size, size_t try_alignment, int protect_flags, int flags, int fd) {
+ MI_UNUSED(try_alignment);
+ void* p = NULL;
+ #if defined(MAP_ALIGNED) // BSD
+ if (addr == NULL && try_alignment > 1 && (try_alignment % _mi_os_page_size()) == 0) {
+ size_t n = mi_bsr(try_alignment);
+ if (((size_t)1 << n) == try_alignment && n >= 12 && n <= 30) { // alignment is a power of 2 and 4096 <= alignment <= 1GiB
+ p = mmap(addr, size, protect_flags, flags | MAP_ALIGNED(n), fd, 0);
+ if (p==MAP_FAILED || !_mi_is_aligned(p,try_alignment)) {
+ int err = errno;
+ _mi_warning_message("unable to directly request aligned OS memory (error: %d (0x%x), size: 0x%zx bytes, alignment: 0x%zx, hint address: %p)\n", err, err, size, try_alignment, addr);
+ }
+ if (p!=MAP_FAILED) return p;
+ // fall back to regular mmap
+ }
+ }
+ #elif defined(MAP_ALIGN) // Solaris
+ if (addr == NULL && try_alignment > 1 && (try_alignment % _mi_os_page_size()) == 0) {
+ p = mmap((void*)try_alignment, size, protect_flags, flags | MAP_ALIGN, fd, 0); // addr parameter is the required alignment
+ if (p!=MAP_FAILED) return p;
+ // fall back to regular mmap
+ }
+ #endif
+ #if (MI_INTPTR_SIZE >= 8) && !defined(MAP_ALIGNED)
+ // on 64-bit systems, use the virtual address area after 2TiB for 4MiB aligned allocations
+ if (addr == NULL) {
+ void* hint = _mi_os_get_aligned_hint(try_alignment, size);
+ if (hint != NULL) {
+ p = mmap(hint, size, protect_flags, flags, fd, 0);
+ if (p==MAP_FAILED || !_mi_is_aligned(p,try_alignment)) {
+ #if MI_TRACK_ENABLED // asan sometimes does not instrument errno correctly?
+ int err = 0;
+ #else
+ int err = errno;
+ #endif
+ _mi_warning_message("unable to directly request hinted aligned OS memory (error: %d (0x%x), size: 0x%zx bytes, alignment: 0x%zx, hint address: %p)\n", err, err, size, try_alignment, hint);
+ }
+ if (p!=MAP_FAILED) return p;
+ // fall back to regular mmap
+ }
+ }
+ #endif
+ // regular mmap
+ p = mmap(addr, size, protect_flags, flags, fd, 0);
+ if (p!=MAP_FAILED) return p;
+ // failed to allocate
+ return NULL;
+}
+
+static int unix_mmap_fd(void) {
+ #if defined(VM_MAKE_TAG)
+ // macOS: tracking anonymous page with a specific ID. (All up to 98 are taken officially but LLVM sanitizers had taken 99)
+ int os_tag = (int)mi_option_get(mi_option_os_tag);
+ if (os_tag < 100 || os_tag > 255) { os_tag = 100; }
+ return VM_MAKE_TAG(os_tag);
+ #else
+ return -1;
+ #endif
+}
+
+static void* unix_mmap(void* addr, size_t size, size_t try_alignment, int protect_flags, bool large_only, bool allow_large, bool* is_large) {
+ #if !defined(MAP_ANONYMOUS)
+ #define MAP_ANONYMOUS MAP_ANON
+ #endif
+ #if !defined(MAP_NORESERVE)
+ #define MAP_NORESERVE 0
+ #endif
+ void* p = NULL;
+ const int fd = unix_mmap_fd();
+ int flags = MAP_PRIVATE | MAP_ANONYMOUS;
+ if (_mi_os_has_overcommit()) {
+ flags |= MAP_NORESERVE;
+ }
+ #if defined(PROT_MAX)
+ protect_flags |= PROT_MAX(PROT_READ | PROT_WRITE); // BSD
+ #endif
+ // huge page allocation
+ if ((large_only || _mi_os_use_large_page(size, try_alignment)) && allow_large) {
+ static _Atomic(size_t) large_page_try_ok; // = 0;
+ size_t try_ok = mi_atomic_load_acquire(&large_page_try_ok);
+ if (!large_only && try_ok > 0) {
+ // If the OS is not configured for large OS pages, or the user does not have
+ // enough permission, the `mmap` will always fail (but it might also fail for other reasons).
+ // Therefore, once a large page allocation failed, we don't try again for `large_page_try_ok` times
+ // to avoid too many failing calls to mmap.
+ mi_atomic_cas_strong_acq_rel(&large_page_try_ok, &try_ok, try_ok - 1);
+ }
+ else {
+ int lflags = flags & ~MAP_NORESERVE; // using NORESERVE on huge pages seems to fail on Linux
+ int lfd = fd;
+ #ifdef MAP_ALIGNED_SUPER
+ lflags |= MAP_ALIGNED_SUPER;
+ #endif
+ #ifdef MAP_HUGETLB
+ lflags |= MAP_HUGETLB;
+ #endif
+ #ifdef MAP_HUGE_1GB
+ static bool mi_huge_pages_available = true;
+ if ((size % MI_GiB) == 0 && mi_huge_pages_available) {
+ lflags |= MAP_HUGE_1GB;
+ }
+ else
+ #endif
+ {
+ #ifdef MAP_HUGE_2MB
+ lflags |= MAP_HUGE_2MB;
+ #endif
+ }
+ #ifdef VM_FLAGS_SUPERPAGE_SIZE_2MB
+ lfd |= VM_FLAGS_SUPERPAGE_SIZE_2MB;
+ #endif
+ if (large_only || lflags != flags) {
+ // try large OS page allocation
+ *is_large = true;
+ p = unix_mmap_prim(addr, size, try_alignment, protect_flags, lflags, lfd);
+ #ifdef MAP_HUGE_1GB
+ if (p == NULL && (lflags & MAP_HUGE_1GB) != 0) {
+ mi_huge_pages_available = false; // don't try huge 1GiB pages again
+ _mi_warning_message("unable to allocate huge (1GiB) page, trying large (2MiB) pages instead (errno: %i)\n", errno);
+ lflags = ((lflags & ~MAP_HUGE_1GB) | MAP_HUGE_2MB);
+ p = unix_mmap_prim(addr, size, try_alignment, protect_flags, lflags, lfd);
+ }
+ #endif
+ if (large_only) return p;
+ if (p == NULL) {
+ mi_atomic_store_release(&large_page_try_ok, (size_t)8); // on error, don't try again for the next N allocations
+ }
+ }
+ }
+ }
+ // regular allocation
+ if (p == NULL) {
+ *is_large = false;
+ p = unix_mmap_prim(addr, size, try_alignment, protect_flags, flags, fd);
+ if (p != NULL) {
+ #if defined(MADV_HUGEPAGE)
+ // Many Linux systems don't allow MAP_HUGETLB but they support instead
+ // transparent huge pages (THP). Generally, it is not required to call `madvise` with MADV_HUGE
+ // though since properly aligned allocations will already use large pages if available
+ // in that case -- in particular for our large regions (in `memory.c`).
+ // However, some systems only allow THP if called with explicit `madvise`, so
+ // when large OS pages are enabled for mimalloc, we call `madvise` anyways.
+ if (allow_large && _mi_os_use_large_page(size, try_alignment)) {
+ if (unix_madvise(p, size, MADV_HUGEPAGE) == 0) {
+ *is_large = true; // possibly
+ };
+ }
+ #elif defined(__sun)
+ if (allow_large && _mi_os_use_large_page(size, try_alignment)) {
+ struct memcntl_mha cmd = {0};
+ cmd.mha_pagesize = large_os_page_size;
+ cmd.mha_cmd = MHA_MAPSIZE_VA;
+ if (memcntl((caddr_t)p, size, MC_HAT_ADVISE, (caddr_t)&cmd, 0, 0) == 0) {
+ *is_large = true;
+ }
+ }
+ #endif
+ }
+ }
+ return p;
+}
+
+// Note: the `try_alignment` is just a hint and the returned pointer is not guaranteed to be aligned.
+int _mi_prim_alloc(size_t size, size_t try_alignment, bool commit, bool allow_large, bool* is_large, bool* is_zero, void** addr) {
+ mi_assert_internal(size > 0 && (size % _mi_os_page_size()) == 0);
+ mi_assert_internal(commit || !allow_large);
+ mi_assert_internal(try_alignment > 0);
+
+ *is_zero = true;
+ int protect_flags = (commit ? (PROT_WRITE | PROT_READ) : PROT_NONE);
+ *addr = unix_mmap(NULL, size, try_alignment, protect_flags, false, allow_large, is_large);
+ return (*addr != NULL ? 0 : errno);
+}
+
+
+//---------------------------------------------
+// Commit/Reset
+//---------------------------------------------
+
+static void unix_mprotect_hint(int err) {
+ #if defined(__linux__) && (MI_SECURE>=2) // guard page around every mimalloc page
+ if (err == ENOMEM) {
+ _mi_warning_message("The next warning may be caused by a low memory map limit.\n"
+ " On Linux this is controlled by the vm.max_map_count -- maybe increase it?\n"
+ " For example: sudo sysctl -w vm.max_map_count=262144\n");
+ }
+ #else
+ MI_UNUSED(err);
+ #endif
+}
+
+int _mi_prim_commit(void* start, size_t size, bool* is_zero) {
+ // commit: ensure we can access the area
+ // note: we may think that *is_zero can be true since the memory
+ // was either from mmap PROT_NONE, or from decommit MADV_DONTNEED, but
+ // we sometimes call commit on a range with still partially committed
+ // memory and `mprotect` does not zero the range.
+ *is_zero = false;
+ int err = mprotect(start, size, (PROT_READ | PROT_WRITE));
+ if (err != 0) {
+ err = errno;
+ unix_mprotect_hint(err);
+ }
+ return err;
+}
+
+int _mi_prim_decommit(void* start, size_t size, bool* needs_recommit) {
+ int err = 0;
+ // decommit: use MADV_DONTNEED as it decreases rss immediately (unlike MADV_FREE)
+ err = unix_madvise(start, size, MADV_DONTNEED);
+ #if !MI_DEBUG && !MI_SECURE
+ *needs_recommit = false;
+ #else
+ *needs_recommit = true;
+ mprotect(start, size, PROT_NONE);
+ #endif
+ /*
+ // decommit: use mmap with MAP_FIXED and PROT_NONE to discard the existing memory (and reduce rss)
+ *needs_recommit = true;
+ const int fd = unix_mmap_fd();
+ void* p = mmap(start, size, PROT_NONE, (MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE), fd, 0);
+ if (p != start) { err = errno; }
+ */
+ return err;
+}
+
+int _mi_prim_reset(void* start, size_t size) {
+ // We try to use `MADV_FREE` as that is the fastest. A drawback though is that it
+ // will not reduce the `rss` stats in tools like `top` even though the memory is available
+ // to other processes. With the default `MIMALLOC_PURGE_DECOMMITS=1` we ensure that by
+ // default `MADV_DONTNEED` is used though.
+ #if defined(MADV_FREE)
+ static _Atomic(size_t) advice = MI_ATOMIC_VAR_INIT(MADV_FREE);
+ int oadvice = (int)mi_atomic_load_relaxed(&advice);
+ int err;
+ while ((err = unix_madvise(start, size, oadvice)) != 0 && errno == EAGAIN) { errno = 0; };
+ if (err != 0 && errno == EINVAL && oadvice == MADV_FREE) {
+ // if MADV_FREE is not supported, fall back to MADV_DONTNEED from now on
+ mi_atomic_store_release(&advice, (size_t)MADV_DONTNEED);
+ err = unix_madvise(start, size, MADV_DONTNEED);
+ }
+ #else
+ int err = unix_madvise(start, size, MADV_DONTNEED);
+ #endif
+ return err;
+}
+
+int _mi_prim_protect(void* start, size_t size, bool protect) {
+ int err = mprotect(start, size, protect ? PROT_NONE : (PROT_READ | PROT_WRITE));
+ if (err != 0) { err = errno; }
+ unix_mprotect_hint(err);
+ return err;
+}
+
+
+
+//---------------------------------------------
+// Huge page allocation
+//---------------------------------------------
+
+#if (MI_INTPTR_SIZE >= 8) && !defined(__HAIKU__) && !defined(__CYGWIN__)
+
+#ifndef MPOL_PREFERRED
+#define MPOL_PREFERRED 1
+#endif
+
+#if defined(MI_HAS_SYSCALL_H) && defined(SYS_mbind)
+static long mi_prim_mbind(void* start, unsigned long len, unsigned long mode, const unsigned long* nmask, unsigned long maxnode, unsigned flags) {
+ return syscall(SYS_mbind, start, len, mode, nmask, maxnode, flags);
+}
+#else
+static long mi_prim_mbind(void* start, unsigned long len, unsigned long mode, const unsigned long* nmask, unsigned long maxnode, unsigned flags) {
+ MI_UNUSED(start); MI_UNUSED(len); MI_UNUSED(mode); MI_UNUSED(nmask); MI_UNUSED(maxnode); MI_UNUSED(flags);
+ return 0;
+}
+#endif
+
+int _mi_prim_alloc_huge_os_pages(void* hint_addr, size_t size, int numa_node, bool* is_zero, void** addr) {
+ bool is_large = true;
+ *is_zero = true;
+ *addr = unix_mmap(hint_addr, size, MI_SEGMENT_SIZE, PROT_READ | PROT_WRITE, true, true, &is_large);
+ if (*addr != NULL && numa_node >= 0 && numa_node < 8*MI_INTPTR_SIZE) { // at most 64 nodes
+ unsigned long numa_mask = (1UL << numa_node);
+ // TODO: does `mbind` work correctly for huge OS pages? should we
+ // use `set_mempolicy` before calling mmap instead?
+ // see:
+ long err = mi_prim_mbind(*addr, size, MPOL_PREFERRED, &numa_mask, 8*MI_INTPTR_SIZE, 0);
+ if (err != 0) {
+ err = errno;
+ _mi_warning_message("failed to bind huge (1GiB) pages to numa node %d (error: %d (0x%x))\n", numa_node, err, err);
+ }
+ }
+ return (*addr != NULL ? 0 : errno);
+}
+
+#else
+
+int _mi_prim_alloc_huge_os_pages(void* hint_addr, size_t size, int numa_node, bool* is_zero, void** addr) {
+ MI_UNUSED(hint_addr); MI_UNUSED(size); MI_UNUSED(numa_node);
+ *is_zero = false;
+ *addr = NULL;
+ return ENOMEM;
+}
+
+#endif
+
+//---------------------------------------------
+// NUMA nodes
+//---------------------------------------------
+
+#if defined(__linux__)
+
+#include // snprintf
+
+size_t _mi_prim_numa_node(void) {
+ #if defined(MI_HAS_SYSCALL_H) && defined(SYS_getcpu)
+ unsigned long node = 0;
+ unsigned long ncpu = 0;
+ long err = syscall(SYS_getcpu, &ncpu, &node, NULL);
+ if (err != 0) return 0;
+ return node;
+ #else
+ return 0;
+ #endif
+}
+
+size_t _mi_prim_numa_node_count(void) {
+ char buf[128];
+ unsigned node = 0;
+ for(node = 0; node < 256; node++) {
+ // enumerate node entries -- todo: it there a more efficient way to do this? (but ensure there is no allocation)
+ snprintf(buf, 127, "/sys/devices/system/node/node%u", node + 1);
+ if (mi_prim_access(buf,R_OK) != 0) break;
+ }
+ return (node+1);
+}
+
+#elif defined(__FreeBSD__) && __FreeBSD_version >= 1200000
+
+size_t _mi_prim_numa_node(void) {
+ domainset_t dom;
+ size_t node;
+ int policy;
+ if (cpuset_getdomain(CPU_LEVEL_CPUSET, CPU_WHICH_PID, -1, sizeof(dom), &dom, &policy) == -1) return 0ul;
+ for (node = 0; node < MAXMEMDOM; node++) {
+ if (DOMAINSET_ISSET(node, &dom)) return node;
+ }
+ return 0ul;
+}
+
+size_t _mi_prim_numa_node_count(void) {
+ size_t ndomains = 0;
+ size_t len = sizeof(ndomains);
+ if (sysctlbyname("vm.ndomains", &ndomains, &len, NULL, 0) == -1) return 0ul;
+ return ndomains;
+}
+
+#elif defined(__DragonFly__)
+
+size_t _mi_prim_numa_node(void) {
+ // TODO: DragonFly does not seem to provide any userland means to get this information.
+ return 0ul;
+}
+
+size_t _mi_prim_numa_node_count(void) {
+ size_t ncpus = 0, nvirtcoresperphys = 0;
+ size_t len = sizeof(size_t);
+ if (sysctlbyname("hw.ncpu", &ncpus, &len, NULL, 0) == -1) return 0ul;
+ if (sysctlbyname("hw.cpu_topology_ht_ids", &nvirtcoresperphys, &len, NULL, 0) == -1) return 0ul;
+ return nvirtcoresperphys * ncpus;
+}
+
+#else
+
+size_t _mi_prim_numa_node(void) {
+ return 0;
+}
+
+size_t _mi_prim_numa_node_count(void) {
+ return 1;
+}
+
+#endif
+
+// ----------------------------------------------------------------
+// Clock
+// ----------------------------------------------------------------
+
+#include
+
+#if defined(CLOCK_REALTIME) || defined(CLOCK_MONOTONIC)
+
+mi_msecs_t _mi_prim_clock_now(void) {
+ struct timespec t;
+ #ifdef CLOCK_MONOTONIC
+ clock_gettime(CLOCK_MONOTONIC, &t);
+ #else
+ clock_gettime(CLOCK_REALTIME, &t);
+ #endif
+ return ((mi_msecs_t)t.tv_sec * 1000) + ((mi_msecs_t)t.tv_nsec / 1000000);
+}
+
+#else
+
+// low resolution timer
+mi_msecs_t _mi_prim_clock_now(void) {
+ #if !defined(CLOCKS_PER_SEC) || (CLOCKS_PER_SEC == 1000) || (CLOCKS_PER_SEC == 0)
+ return (mi_msecs_t)clock();
+ #elif (CLOCKS_PER_SEC < 1000)
+ return (mi_msecs_t)clock() * (1000 / (mi_msecs_t)CLOCKS_PER_SEC);
+ #else
+ return (mi_msecs_t)clock() / ((mi_msecs_t)CLOCKS_PER_SEC / 1000);
+ #endif
+}
+
+#endif
+
+
+
+
+//----------------------------------------------------------------
+// Process info
+//----------------------------------------------------------------
+
+#if defined(__unix__) || defined(__unix) || defined(unix) || defined(__APPLE__) || defined(__HAIKU__)
+#include
+#include
+#include
+
+#if defined(__APPLE__)
+#include
+#endif
+
+#if defined(__HAIKU__)
+#include
+#endif
+
+static mi_msecs_t timeval_secs(const struct timeval* tv) {
+ return ((mi_msecs_t)tv->tv_sec * 1000L) + ((mi_msecs_t)tv->tv_usec / 1000L);
+}
+
+void _mi_prim_process_info(mi_process_info_t* pinfo)
+{
+ struct rusage rusage;
+ getrusage(RUSAGE_SELF, &rusage);
+ pinfo->utime = timeval_secs(&rusage.ru_utime);
+ pinfo->stime = timeval_secs(&rusage.ru_stime);
+#if !defined(__HAIKU__)
+ pinfo->page_faults = rusage.ru_majflt;
+#endif
+#if defined(__HAIKU__)
+ // Haiku does not have (yet?) a way to
+ // get these stats per process
+ thread_info tid;
+ area_info mem;
+ ssize_t c;
+ get_thread_info(find_thread(0), &tid);
+ while (get_next_area_info(tid.team, &c, &mem) == B_OK) {
+ pinfo->peak_rss += mem.ram_size;
+ }
+ pinfo->page_faults = 0;
+#elif defined(__APPLE__)
+ pinfo->peak_rss = rusage.ru_maxrss; // macos reports in bytes
+ #ifdef MACH_TASK_BASIC_INFO
+ struct mach_task_basic_info info;
+ mach_msg_type_number_t infoCount = MACH_TASK_BASIC_INFO_COUNT;
+ if (task_info(mach_task_self(), MACH_TASK_BASIC_INFO, (task_info_t)&info, &infoCount) == KERN_SUCCESS) {
+ pinfo->current_rss = (size_t)info.resident_size;
+ }
+ #else
+ struct task_basic_info info;
+ mach_msg_type_number_t infoCount = TASK_BASIC_INFO_COUNT;
+ if (task_info(mach_task_self(), TASK_BASIC_INFO, (task_info_t)&info, &infoCount) == KERN_SUCCESS) {
+ pinfo->current_rss = (size_t)info.resident_size;
+ }
+ #endif
+#else
+ pinfo->peak_rss = rusage.ru_maxrss * 1024; // Linux/BSD report in KiB
+#endif
+ // use defaults for commit
+}
+
+#else
+
+#ifndef __wasi__
+// WebAssembly instances are not processes
+#pragma message("define a way to get process info")
+#endif
+
+void _mi_prim_process_info(mi_process_info_t* pinfo)
+{
+ // use defaults
+ MI_UNUSED(pinfo);
+}
+
+#endif
+
+
+//----------------------------------------------------------------
+// Output
+//----------------------------------------------------------------
+
+void _mi_prim_out_stderr( const char* msg ) {
+ fputs(msg,stderr);
+}
+
+
+//----------------------------------------------------------------
+// Environment
+//----------------------------------------------------------------
+
+#if !defined(MI_USE_ENVIRON) || (MI_USE_ENVIRON!=0)
+// On Posix systemsr use `environ` to access environment variables
+// even before the C runtime is initialized.
+#if defined(__APPLE__) && defined(__has_include) && __has_include()
+#include
+static char** mi_get_environ(void) {
+ return (*_NSGetEnviron());
+}
+#else
+extern char** environ;
+static char** mi_get_environ(void) {
+ return environ;
+}
+#endif
+bool _mi_prim_getenv(const char* name, char* result, size_t result_size) {
+ if (name==NULL) return false;
+ const size_t len = _mi_strlen(name);
+ if (len == 0) return false;
+ char** env = mi_get_environ();
+ if (env == NULL) return false;
+ // compare up to 10000 entries
+ for (int i = 0; i < 10000 && env[i] != NULL; i++) {
+ const char* s = env[i];
+ if (_mi_strnicmp(name, s, len) == 0 && s[len] == '=') { // case insensitive
+ // found it
+ _mi_strlcpy(result, s + len + 1, result_size);
+ return true;
+ }
+ }
+ return false;
+}
+#else
+// fallback: use standard C `getenv` but this cannot be used while initializing the C runtime
+bool _mi_prim_getenv(const char* name, char* result, size_t result_size) {
+ // cannot call getenv() when still initializing the C runtime.
+ if (_mi_preloading()) return false;
+ const char* s = getenv(name);
+ if (s == NULL) {
+ // we check the upper case name too.
+ char buf[64+1];
+ size_t len = _mi_strnlen(name,sizeof(buf)-1);
+ for (size_t i = 0; i < len; i++) {
+ buf[i] = _mi_toupper(name[i]);
+ }
+ buf[len] = 0;
+ s = getenv(buf);
+ }
+ if (s == NULL || _mi_strnlen(s,result_size) >= result_size) return false;
+ _mi_strlcpy(result, s, result_size);
+ return true;
+}
+#endif // !MI_USE_ENVIRON
+
+
+//----------------------------------------------------------------
+// Random
+//----------------------------------------------------------------
+
+#if defined(__APPLE__)
+
+#include