diff --git a/include/runtime/arena.h b/include/runtime/arena.h index 1c32a0fc5..536b3b325 100644 --- a/include/runtime/arena.h +++ b/include/runtime/arena.h @@ -10,7 +10,53 @@ extern "C" { // An arena can be used to allocate objects that can then be deallocated all at // once. -struct arena { +class arena { +public: + arena(char id) + : allocation_semispace_id(id) { } + void *kore_arena_alloc(size_t requested); + + // Returns the address of the first byte that belongs in the given arena. + // Returns 0 if nothing has been allocated ever in that arena. + char *arena_start_ptr() const; + + // Returns a pointer to a location holding the address of last allocated + // byte in the given arena plus 1. + // This address is 0 if nothing has been allocated ever in that arena. + char **arena_end_ptr(); + + // return the total number of allocatable bytes currently in the arena in its + // active semispace. + size_t arena_size() const; + + // Clears the current allocation space by setting its start back to its first + // block. It is used during garbage collection to effectively collect all of the + // arena. + void arena_clear(); + + // Resizes the last allocation as long as the resize does not require a new + // block allocation. + // Returns the address of the byte following the last newlly allocated byte when + // the resize succeeds, returns 0 otherwise. + void *arena_resize_last_alloc(ssize_t increase); + + // Returns the given arena's current collection semispace ID. + // Each arena has 2 semispace IDs one equal to the arena ID and the other equal + // to the 1's complement of the arena ID. At any time one of these semispaces + // is used for allocation and the other is used for collection. + char get_arena_collection_semispace_id() const; + + // Exchanges the current allocation and collection semispaces and clears the new + // current allocation semispace by setting its start back to its first block. + // It is used before garbage collection. + void arena_swap_and_clear(); + +private: + void fresh_block(); + + // helper function for `kore_arena_alloc`. Do not call directly. + void *do_alloc_slow(size_t requested); + char *first_block; char *block; char *block_start; @@ -29,78 +75,43 @@ using memory_block_header = struct { // Macro to define a new arena with the given ID. Supports IDs ranging from 0 to // 127. -#define REGISTER_ARENA(name, id) \ - static struct arena name = {.allocation_semispace_id = (id)} +#define REGISTER_ARENA(name, id) static thread_local arena name(id) #define MEM_BLOCK_START(ptr) \ ((char *)(((uintptr_t)(ptr)-1) & ~(BLOCK_SIZE - 1))) +#ifdef __MACH__ +// +// thread_local disabled for Apple +// extern bool time_for_collection; +#else +extern thread_local bool time_for_collection; +#endif size_t get_gc_threshold(); -// Resets the given arena. -void arena_reset(struct arena *); - -// Returns the given arena's current allocation semispace ID. -// Each arena has 2 semispace IDs one equal to the arena ID and the other equal -// to the 1's complement of the arena ID. At any time one of these semispaces -// is used for allocation and the other is used for collection. -char get_arena_allocation_semispace_id(const struct arena *); - -// Returns the given arena's current collection semispace ID. -// See above for details. -char get_arena_collection_semispace_id(const struct arena *); - // Returns the ID of the semispace where the given address was allocated. // The behavior is undefined if called with an address that has not been // allocated within an arena. char get_arena_semispace_id_of_object(void *); -// helper function for `kore_arena_alloc`. Do not call directly. -void *do_alloc_slow(size_t, struct arena *); - // Allocates the requested number of bytes as a contiguous region and returns a // pointer to the first allocated byte. // If called with requested size greater than the maximun single allocation // size, the space is allocated in a general (not garbage collected pool). -inline void *kore_arena_alloc(struct arena *arena, size_t requested) { - if (arena->block + requested > arena->block_end) { - return do_alloc_slow(requested, arena); +inline void *arena::kore_arena_alloc(size_t requested) { + if (block + requested > block_end) { + return do_alloc_slow(requested); } - void *result = arena->block; - arena->block += requested; + void *result = block; + block += requested; MEM_LOG( "Allocation at %p (size %zd), next alloc at %p (if it fits)\n", result, - requested, arena->block); + requested, block); return result; } -// Resizes the last allocation as long as the resize does not require a new -// block allocation. -// Returns the address of the byte following the last newlly allocated byte when -// the resize succeeds, returns 0 otherwise. -void *arena_resize_last_alloc(struct arena *, ssize_t); - -// Exchanges the current allocation and collection semispaces and clears the new -// current allocation semispace by setting its start back to its first block. -// It is used before garbage collection. -void arena_swap_and_clear(struct arena *); - -// Clears the current allocation space by setting its start back to its first -// block. It is used during garbage collection to effectively collect all of the -// arena. -void arena_clear(struct arena *); - -// Returns the address of the first byte that belongs in the given arena. -// Returns 0 if nothing has been allocated ever in that arena. -char *arena_start_ptr(const struct arena *); - -// Returns a pointer to a location holding the address of last allocated -// byte in the given arena plus 1. -// This address is 0 if nothing has been allocated ever in that arena. -char **arena_end_ptr(struct arena *); - // Given a starting pointer to an address allocated in an arena and a size in // bytes, this function returns a pointer to an address allocated in the // same arena after size bytes from the starting pointer. @@ -119,10 +130,6 @@ char *move_ptr(char *, size_t, char const *); // different arenas. ssize_t ptr_diff(char *, char *); -// return the total number of allocatable bytes currently in the arena in its -// active semispace. -size_t arena_size(const struct arena *); - // Deallocates all the memory allocated for registered arenas. void free_all_memory(void); } diff --git a/include/runtime/collect.h b/include/runtime/collect.h index a4ef7d2b0..1d448fcd4 100644 --- a/include/runtime/collect.h +++ b/include/runtime/collect.h @@ -26,8 +26,8 @@ using set_node = set::iterator::node_t; using set_impl = set::iterator::tree_t; extern "C" { -extern size_t numBytesLiveAtCollection[1 << AGE_WIDTH]; -extern bool collect_old; +extern thread_local size_t numBytesLiveAtCollection[1 << AGE_WIDTH]; +extern thread_local bool collect_old; size_t get_size(uint64_t, uint16_t); void migrate_static_roots(void); void migrate(block **block_ptr); diff --git a/include/runtime/header.h b/include/runtime/header.h index 000ec7cd2..d82cae004 100644 --- a/include/runtime/header.h +++ b/include/runtime/header.h @@ -47,8 +47,14 @@ size_t hash_k(block *); void k_hash(block *, void *); bool hash_enter(void); void hash_exit(void); - +#ifdef __MACH__ +// +// thread_local disabled for Apple +// extern bool gc_enabled; +#else +extern thread_local bool gc_enabled; +#endif } class k_elem { diff --git a/lib/codegen/CreateTerm.cpp b/lib/codegen/CreateTerm.cpp index 13f95b686..60117f53e 100644 --- a/lib/codegen/CreateTerm.cpp +++ b/lib/codegen/CreateTerm.cpp @@ -782,10 +782,25 @@ llvm::Value *create_term::disable_gc() { llvm::Constant *global = module_->getOrInsertGlobal("gc_enabled", llvm::Type::getInt1Ty(ctx_)); auto *global_var = llvm::cast(global); +#ifdef __MACH__ + // + // thread_local disabled for Apple + // + /* + global_var->setThreadLocal(true); + llvm::IRBuilder b(current_block_); + auto *global_var_address = b.CreateThreadLocalAddress(global_var); + */ + auto *global_var_address = global_var; +#else + global_var->setThreadLocal(true); + auto *global_var_address = global_var; +#endif auto *old_val = new llvm::LoadInst( - llvm::Type::getInt1Ty(ctx_), global_var, "was_enabled", current_block_); + llvm::Type::getInt1Ty(ctx_), global_var_address, "was_enabled", + current_block_); new llvm::StoreInst( - llvm::ConstantInt::getFalse(ctx_), global_var, current_block_); + llvm::ConstantInt::getFalse(ctx_), global_var_address, current_block_); return old_val; } @@ -793,7 +808,21 @@ void create_term::enable_gc(llvm::Value *was_enabled) { llvm::Constant *global = module_->getOrInsertGlobal("gc_enabled", llvm::Type::getInt1Ty(ctx_)); auto *global_var = llvm::cast(global); - new llvm::StoreInst(was_enabled, global_var, current_block_); +#ifdef __MACH__ + // + // thread_local disabled for Apple + // + /* + global_var->setThreadLocal(true); + llvm::IRBuilder b(current_block_); + auto *global_var_address = b.CreateThreadLocalAddress(global_var); + */ + auto *global_var_address = global_var; +#else + global_var->setThreadLocal(true); + auto *global_var_address = global_var; +#endif + new llvm::StoreInst(was_enabled, global_var_address, current_block_); } // We use tailcc calling convention for apply_rule_* and eval_* functions to diff --git a/lib/codegen/Decision.cpp b/lib/codegen/Decision.cpp index 14f8c7964..97a5e6c68 100644 --- a/lib/codegen/Decision.cpp +++ b/lib/codegen/Decision.cpp @@ -5,6 +5,7 @@ #include "kllvm/codegen/ProofEvent.h" #include "kllvm/codegen/Util.h" +#include "llvm/IR/IRBuilder.h" #include #include #include @@ -1012,9 +1013,25 @@ std::pair, llvm::BasicBlock *> step_function_header( auto *collection = module->getOrInsertGlobal( "time_for_collection", llvm::Type::getInt1Ty(module->getContext())); + +#ifdef __MACH__ + // + // thread_local disabled for Apple + // + /* + llvm::cast(collection)->setThreadLocal(true); + llvm::IRBuilder b(check_collect); + auto *collection_address = b.CreateThreadLocalAddress(collection); + */ + auto *collection_address = collection; +#else + llvm::cast(collection)->setThreadLocal(true); + auto *collection_address = collection; +#endif + auto *is_collection = new llvm::LoadInst( - llvm::Type::getInt1Ty(module->getContext()), collection, "is_collection", - check_collect); + llvm::Type::getInt1Ty(module->getContext()), collection_address, + "is_collection", check_collect); set_debug_loc(is_collection); auto *collect = llvm::BasicBlock::Create( module->getContext(), "isCollect", block->getParent()); diff --git a/runtime/alloc/arena.cpp b/runtime/alloc/arena.cpp index dd0f6b2e6..8cfbe22fa 100644 --- a/runtime/alloc/arena.cpp +++ b/runtime/alloc/arena.cpp @@ -3,6 +3,8 @@ #include #include #include +#include +#include #include "runtime/alloc.h" #include "runtime/arena.h" @@ -17,29 +19,9 @@ mem_block_header(void *ptr) { ((uintptr_t)(ptr)-1) & ~(BLOCK_SIZE - 1)); } -__attribute__((always_inline)) void arena_reset(struct arena *arena) { - char id = arena->allocation_semispace_id; - if (id < 0) { - id = ~arena->allocation_semispace_id; - } - arena->first_block = nullptr; - arena->block = nullptr; - arena->block_start = nullptr; - arena->block_end = nullptr; - arena->first_collection_block = nullptr; - arena->num_blocks = 0; - arena->num_collection_blocks = 0; - arena->allocation_semispace_id = id; -} - __attribute__((always_inline)) char -get_arena_allocation_semispace_id(const struct arena *arena) { - return arena->allocation_semispace_id; -} - -__attribute__((always_inline)) char -get_arena_collection_semispace_id(const struct arena *arena) { - return ~arena->allocation_semispace_id; +arena::get_arena_collection_semispace_id() const { + return ~allocation_semispace_id; } __attribute__((always_inline)) char @@ -47,137 +29,169 @@ get_arena_semispace_id_of_object(void *ptr) { return mem_block_header(ptr)->semispace; } -static void *first_superblock_ptr = nullptr; -static void *superblock_ptr = nullptr; -static char **next_superblock_ptr = nullptr; -static unsigned blocks_left = 0; +// +// We will reserve enough address space for 1 million 1MB blocks. Might want to increase this on a > 1TB server. +// +size_t const HYPERBLOCK_SIZE = (size_t)BLOCK_SIZE * 1024 * 1024; +static thread_local void *hyperblock_ptr = nullptr; // only needed for munmap() static void *megabyte_malloc() { - if (blocks_left == 0) { - blocks_left = 15; - if (int result - = posix_memalign(&superblock_ptr, BLOCK_SIZE, BLOCK_SIZE * 15)) { - errno = result; - perror("posix_memalign"); - } - if (!first_superblock_ptr) { - first_superblock_ptr = superblock_ptr; - } - if (next_superblock_ptr) { - *next_superblock_ptr = (char *)superblock_ptr; + // + // Return pointer to a BLOCK_SIZE chunk of memory with BLOCK_SIZE alignment. + // + static thread_local char *currentblock_ptr + = nullptr; // char* rather than void* to permit pointer arithmetic + if (currentblock_ptr) { + // + // We expect an page fault due to not being able to map physical memory to this block or the + // process to be killed by the OOM killer long before we run off the end of our address space. + // + currentblock_ptr += BLOCK_SIZE; + } else { + // + // First call - need to reserve the address space. + // + size_t request = HYPERBLOCK_SIZE; + void *addr = mmap( + nullptr, // let OS choose the address + request, // Linux and MacOS both allow up to 64TB + PROT_READ | PROT_WRITE, // read, write but not execute + MAP_ANONYMOUS | MAP_PRIVATE + | MAP_NORESERVE, // allocate address space only + -1, // no file backing + 0); // no offset + if (addr == MAP_FAILED) { + perror("mmap()"); + abort(); } - auto *hdr = (memory_block_header *)superblock_ptr; - next_superblock_ptr = &hdr->next_superblock; - hdr->next_superblock = nullptr; + hyperblock_ptr = addr; + // + // We ask for one block worth of address space less than we allocated so alignment will always succeed. + // We don't worry about unused address space either side of our aligned address space because there will be no + // memory mapped to it. + // + currentblock_ptr = reinterpret_cast( + std::align(BLOCK_SIZE, HYPERBLOCK_SIZE - BLOCK_SIZE, addr, request)); } - blocks_left--; - void *result = superblock_ptr; - superblock_ptr = (char *)superblock_ptr + BLOCK_SIZE; - return result; + return currentblock_ptr; +} + +void free_all_memory() { + // + // Frees all memory that was demand paged into this address range. + // + munmap(hyperblock_ptr, HYPERBLOCK_SIZE); } +#ifdef __MACH__ +// +// thread_local disabled for Apple +// bool time_for_collection; +#else +thread_local bool time_for_collection; +#endif -static void fresh_block(struct arena *arena) { +void arena::fresh_block() { char *next_block = nullptr; - if (arena->block_start == nullptr) { + if (block_start == nullptr) { next_block = (char *)megabyte_malloc(); - arena->first_block = next_block; + first_block = next_block; auto *next_header = (memory_block_header *)next_block; next_header->next_block = nullptr; - next_header->semispace = arena->allocation_semispace_id; - arena->num_blocks++; + next_header->semispace = allocation_semispace_id; + num_blocks++; } else { - next_block = *(char **)arena->block_start; - if (arena->block != arena->block_end) { - if (arena->block_end - arena->block == 8) { - *(uint64_t *)arena->block - = NOT_YOUNG_OBJECT_BIT; // 8 bit sentinel value + next_block = *(char **)block_start; + if (block != block_end) { + if (block_end - block == 8) { + *(uint64_t *)block = NOT_YOUNG_OBJECT_BIT; // 8 bit sentinel value } else { - *(uint64_t *)arena->block = arena->block_end - arena->block - - 8; // 16-bit or more sentinel value + *(uint64_t *)block + = block_end - block - 8; // 16-bit or more sentinel value } } if (!next_block) { MEM_LOG( "Allocating new block for the first time in arena %d\n", - arena->allocation_semispace_id); + allocation_semispace_id); next_block = (char *)megabyte_malloc(); - *(char **)arena->block_start = next_block; + *(char **)block_start = next_block; auto *next_header = (memory_block_header *)next_block; next_header->next_block = nullptr; - next_header->semispace = arena->allocation_semispace_id; - arena->num_blocks++; + next_header->semispace = allocation_semispace_id; + num_blocks++; time_for_collection = true; } } - if (!*(char **)next_block && arena->num_blocks >= get_gc_threshold()) { + if (!*(char **)next_block && num_blocks >= get_gc_threshold()) { time_for_collection = true; } - arena->block = next_block + sizeof(memory_block_header); - arena->block_start = next_block; - arena->block_end = next_block + BLOCK_SIZE; + block = next_block + sizeof(memory_block_header); + block_start = next_block; + block_end = next_block + BLOCK_SIZE; MEM_LOG( - "New block at %p (remaining %zd)\n", arena->block, + "New block at %p (remaining %zd)\n", block, BLOCK_SIZE - sizeof(memory_block_header)); } +#ifdef __MACH__ +// +// thread_local disabled for Apple +// bool gc_enabled = true; +#else +thread_local bool gc_enabled = true; +#endif -__attribute__((noinline)) void * -do_alloc_slow(size_t requested, struct arena *arena) { +__attribute__((noinline)) void *arena::do_alloc_slow(size_t requested) { MEM_LOG( - "Block at %p too small, %zd remaining but %zd needed\n", arena->block, - arena->block_end - arena->block, requested); + "Block at %p too small, %zd remaining but %zd needed\n", block, + block_end - block, requested); if (requested > BLOCK_SIZE - sizeof(memory_block_header)) { return malloc(requested); } - fresh_block(arena); - void *result = arena->block; - arena->block += requested; + fresh_block(); + void *result = block; + block += requested; MEM_LOG( "Allocation at %p (size %zd), next alloc at %p (if it fits)\n", result, - requested, arena->block); + requested, block); return result; } __attribute__((always_inline)) void * -arena_resize_last_alloc(struct arena *arena, ssize_t increase) { - if (arena->block + increase <= arena->block_end) { - arena->block += increase; - return arena->block; +arena::arena_resize_last_alloc(ssize_t increase) { + if (block + increase <= block_end) { + block += increase; + return block; } return nullptr; } -__attribute__((always_inline)) void arena_swap_and_clear(struct arena *arena) { - char *tmp = arena->first_block; - arena->first_block = arena->first_collection_block; - arena->first_collection_block = tmp; - size_t tmp2 = arena->num_blocks; - arena->num_blocks = arena->num_collection_blocks; - arena->num_collection_blocks = tmp2; - arena->allocation_semispace_id = ~arena->allocation_semispace_id; - arena_clear(arena); +__attribute__((always_inline)) void arena::arena_swap_and_clear() { + char *tmp = first_block; + first_block = first_collection_block; + first_collection_block = tmp; + size_t tmp2 = num_blocks; + num_blocks = num_collection_blocks; + num_collection_blocks = tmp2; + allocation_semispace_id = ~allocation_semispace_id; + arena_clear(); } -__attribute__((always_inline)) void arena_clear(struct arena *arena) { - arena->block = arena->first_block - ? arena->first_block + sizeof(memory_block_header) - : nullptr; - arena->block_start = arena->first_block; - arena->block_end - = arena->first_block ? arena->first_block + BLOCK_SIZE : nullptr; +__attribute__((always_inline)) void arena::arena_clear() { + block = first_block ? first_block + sizeof(memory_block_header) : nullptr; + block_start = first_block; + block_end = first_block ? first_block + BLOCK_SIZE : nullptr; } -__attribute__((always_inline)) char * -arena_start_ptr(const struct arena *arena) { - return arena->first_block ? arena->first_block + sizeof(memory_block_header) - : nullptr; +__attribute__((always_inline)) char *arena::arena_start_ptr() const { + return first_block ? first_block + sizeof(memory_block_header) : nullptr; } -__attribute__((always_inline)) char **arena_end_ptr(struct arena *arena) { - return &arena->block; +__attribute__((always_inline)) char **arena::arena_end_ptr() { + return █ } char *move_ptr(char *ptr, size_t size, char const *arena_end_ptr) { @@ -223,22 +237,8 @@ ssize_t ptr_diff(char *ptr1, char *ptr2) { return -ptr_diff(ptr2, ptr1); } -size_t arena_size(const struct arena *arena) { - return (arena->num_blocks > arena->num_collection_blocks - ? arena->num_blocks - : arena->num_collection_blocks) +size_t arena::arena_size() const { + return (num_blocks > num_collection_blocks ? num_blocks + : num_collection_blocks) * (BLOCK_SIZE - sizeof(memory_block_header)); } - -void free_all_memory() { - auto *superblock = (memory_block_header *)first_superblock_ptr; - while (superblock) { - auto *next_superblock = (memory_block_header *)superblock->next_superblock; - free(superblock); - superblock = next_superblock; - } - first_superblock_ptr = nullptr; - superblock_ptr = nullptr; - next_superblock_ptr = nullptr; - blocks_left = 0; -} diff --git a/runtime/alloc/register_gc_roots_enum.cpp b/runtime/alloc/register_gc_roots_enum.cpp index bbb4b2269..2c1a3165a 100644 --- a/runtime/alloc/register_gc_roots_enum.cpp +++ b/runtime/alloc/register_gc_roots_enum.cpp @@ -3,7 +3,7 @@ #include "runtime/collect.h" #include "runtime/header.h" -std::vector block_enumerators; +thread_local std::vector block_enumerators; void register_gc_roots_enumerator(BlockEnumerator f) { block_enumerators.push_back(f); diff --git a/runtime/arithmetic/int.cpp b/runtime/arithmetic/int.cpp index fcb4feec6..e76333fb6 100644 --- a/runtime/arithmetic/int.cpp +++ b/runtime/arithmetic/int.cpp @@ -373,8 +373,8 @@ void int_hash(mpz_t i, void *hasher) { } } -gmp_randstate_t kllvm_rand_state; -bool kllvm_rand_state_initialized = false; +thread_local gmp_randstate_t kllvm_rand_state; +thread_local bool kllvm_rand_state_initialized = false; SortK hook_INT_srand(SortInt seed) { if (!kllvm_rand_state_initialized) { diff --git a/runtime/collect/collect.cpp b/runtime/collect/collect.cpp index 31b8c4b77..b519bc15b 100644 --- a/runtime/collect/collect.cpp +++ b/runtime/collect/collect.cpp @@ -16,15 +16,15 @@ char **old_alloc_ptr(void); char *youngspace_ptr(void); char *oldspace_ptr(void); -static bool is_gc = false; -bool collect_old = false; +static thread_local bool is_gc = false; +bool thread_local collect_old = false; #ifndef GC_DBG -static uint8_t num_collection_only_young = 0; +static thread_local uint8_t num_collection_only_young = 0; #else -static char *last_alloc_ptr; +static thread_local char *last_alloc_ptr; #endif -size_t numBytesLiveAtCollection[1 << AGE_WIDTH]; +size_t thread_local numBytesLiveAtCollection[1 << AGE_WIDTH]; bool during_gc() { return is_gc; diff --git a/runtime/collect/migrate_static_roots.cpp b/runtime/collect/migrate_static_roots.cpp index d162f0bb8..3474e83ee 100644 --- a/runtime/collect/migrate_static_roots.cpp +++ b/runtime/collect/migrate_static_roots.cpp @@ -2,10 +2,10 @@ #include "runtime/collect.h" -extern std::vector block_enumerators; +extern thread_local std::vector block_enumerators; -extern gmp_randstate_t kllvm_rand_state; -extern bool kllvm_rand_state_initialized; +extern thread_local gmp_randstate_t kllvm_rand_state; +extern thread_local bool kllvm_rand_state_initialized; extern "C" { diff --git a/runtime/lto/alloc.cpp b/runtime/lto/alloc.cpp index 86fa11dfc..0cd79a3f8 100644 --- a/runtime/lto/alloc.cpp +++ b/runtime/lto/alloc.cpp @@ -16,47 +16,42 @@ REGISTER_ARENA(oldspace, OLDSPACE_ID); REGISTER_ARENA(alwaysgcspace, ALWAYSGCSPACE_ID); char *youngspace_ptr() { - return arena_start_ptr(&youngspace); + return youngspace.arena_start_ptr(); } char *oldspace_ptr() { - return arena_start_ptr(&oldspace); + return oldspace.arena_start_ptr(); } char **young_alloc_ptr() { - return arena_end_ptr(&youngspace); + return youngspace.arena_end_ptr(); } char **old_alloc_ptr() { - return arena_end_ptr(&oldspace); + return oldspace.arena_end_ptr(); } char youngspace_collection_id() { - return get_arena_collection_semispace_id(&youngspace); + return youngspace.get_arena_collection_semispace_id(); } char oldspace_collection_id() { - return get_arena_collection_semispace_id(&oldspace); + return oldspace.get_arena_collection_semispace_id(); } size_t youngspace_size(void) { - return arena_size(&youngspace); -} - -bool youngspace_almost_full(size_t threshold) { - char *next_block = *(char **)youngspace.block_start; - return !next_block; + return youngspace.arena_size(); } void kore_alloc_swap(bool swap_old) { - arena_swap_and_clear(&youngspace); + youngspace.arena_swap_and_clear(); if (swap_old) { - arena_swap_and_clear(&oldspace); + oldspace.arena_swap_and_clear(); } } void kore_clear() { - arena_clear(&alwaysgcspace); + alwaysgcspace.arena_clear(); } void set_kore_memory_functions_for_gmp() { @@ -64,25 +59,25 @@ void set_kore_memory_functions_for_gmp() { } __attribute__((always_inline)) void *kore_alloc(size_t requested) { - return kore_arena_alloc(&youngspace, requested); + return youngspace.kore_arena_alloc(requested); } __attribute__((always_inline)) void *kore_alloc_token(size_t requested) { size_t size = (requested + 7) & ~7; - return kore_arena_alloc(&youngspace, size < 16 ? 16 : size); + return youngspace.kore_arena_alloc(size < 16 ? 16 : size); } __attribute__((always_inline)) void *kore_alloc_old(size_t requested) { - return kore_arena_alloc(&oldspace, requested); + return oldspace.kore_arena_alloc(requested); } __attribute__((always_inline)) void *kore_alloc_token_old(size_t requested) { size_t size = (requested + 7) & ~7; - return kore_arena_alloc(&oldspace, size < 16 ? 16 : size); + return oldspace.kore_arena_alloc(size < 16 ? 16 : size); } __attribute__((always_inline)) void *kore_alloc_always_gc(size_t requested) { - return kore_arena_alloc(&alwaysgcspace, requested); + return alwaysgcspace.kore_arena_alloc(requested); } void * @@ -90,7 +85,7 @@ kore_resize_last_alloc(void *oldptr, size_t newrequest, size_t last_size) { newrequest = (newrequest + 7) & ~7; last_size = (last_size + 7) & ~7; - if (oldptr != *arena_end_ptr(&youngspace) - last_size) { + if (oldptr != *(youngspace.arena_end_ptr()) - last_size) { MEM_LOG( "May only reallocate last allocation. Tried to reallocate %p to %zd\n", oldptr, newrequest); @@ -98,7 +93,7 @@ kore_resize_last_alloc(void *oldptr, size_t newrequest, size_t last_size) { } ssize_t increase = newrequest - last_size; - if (arena_resize_last_alloc(&youngspace, increase)) { + if (youngspace.arena_resize_last_alloc(increase)) { return oldptr; } @@ -159,8 +154,8 @@ static inline void *kore_alloc_collection(kllvm::sort_category cat) { void *mem = kore_alloc(sizeof(blockheader) + sizeof(collection) + sizeof(uint64_t)); auto *hdr = (blockheader *)mem; - static std::string name = get_raw_symbol_name(cat) + "{}"; - static blockheader hdr_val + static thread_local std::string name = get_raw_symbol_name(cat) + "{}"; + static thread_local blockheader hdr_val = get_block_header_for_symbol(get_tag_for_symbol_name(name.c_str())); *hdr = hdr_val; auto *offset = (uint64_t *)(hdr + 1); diff --git a/unittests/runtime-collections/lists.cpp b/unittests/runtime-collections/lists.cpp index d4d2a20d9..1aa13a4e0 100644 --- a/unittests/runtime-collections/lists.cpp +++ b/unittests/runtime-collections/lists.cpp @@ -62,7 +62,15 @@ block D1 = {{1}}; block *DUMMY1 = &D1; } +#ifdef __MACH__ +// +// thread_local disabled for Apple +// bool gc_enabled; +#else +thread_local bool gc_enabled; +#endif + size_t get_gc_threshold() { return SIZE_MAX; }