Skip to content

Commit

Permalink
make hyperblock size depend on physical memory
Browse files Browse the repository at this point in the history
  • Loading branch information
stevenmeker committed Dec 16, 2024
1 parent 3c2819a commit d8eb2e9
Show file tree
Hide file tree
Showing 2 changed files with 41 additions and 9 deletions.
10 changes: 7 additions & 3 deletions include/runtime/arena.h
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@

extern "C" {

size_t const HYPERBLOCK_SIZE = (size_t)BLOCK_SIZE * 1024 * 1024;
size_t const hyperblock_size = (size_t)BLOCK_SIZE * 1024 * 1024;

// An arena can be used to allocate objects that can then be deallocated all at
// once.
Expand Down Expand Up @@ -117,6 +117,10 @@ class arena {
= nullptr; // pointer to start of collection address space
size_t num_collection_blocks
= 0; // notional number of BLOCK_SIZE blocks in collection semispace
//
// hyperblock_size is set to non-zero value just once, at runtime, by any thread.
//
static size_t hyperblock_size;
};

inline char arena::get_arena_semispace_id_of_object(void *ptr) {
Expand All @@ -128,7 +132,7 @@ inline char arena::get_arena_semispace_id_of_object(void *ptr) {
// Set the low bits to 1 to get the address of the last byte in the hyperblock.
//
uintptr_t end_address
= reinterpret_cast<uintptr_t>(ptr) | (HYPERBLOCK_SIZE - 1);
= reinterpret_cast<uintptr_t>(ptr) | (hyperblock_size - 1);
return *reinterpret_cast<char *>(end_address);
}

Expand Down Expand Up @@ -159,7 +163,7 @@ inline void *arena::kore_arena_alloc(size_t requested) {
// We move the tripwire to 1 past the end of our hyperblock so that we have
// a well defined comparison that will always be false until the next arena swap.
//
tripwire = current_addr_ptr + HYPERBLOCK_SIZE;
tripwire = current_addr_ptr + hyperblock_size;
}
void *result = allocation_ptr;
allocation_ptr += requested;
Expand Down
40 changes: 34 additions & 6 deletions runtime/alloc/arena.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -23,11 +23,39 @@ thread_local bool time_for_collection = false;
thread_local bool gc_enabled = true;
#endif

size_t arena::hyperblock_size = 0;

void arena::initialize_semispace() {
if (hyperblock_size == 0)
{
//
// No thread has initialized hyperblock_size at this moment so we will.
// We get the size of the memory from the OS as number of physical pages * page size.
//
size_t pages = sysconf(_SC_PHYS_PAGES);
size_t page_size = sysconf(_SC_PAGE_SIZE);
size_t v = pages * page_size;
//
// We require hyperblock_size to be a power of 2 for bit twiddling, so we
// round up to the next power of 2.
//
--v;
v |= v >> 1;
v |= v >> 2;
v |= v >> 4;
v |= v >> 8;
v |= v >> 16;
v |= v >> 32;
v++;
//
// Writing to a class static variable might be a race with another thread.
//
hyperblock_size = v;
}
//
// Current semispace is uninitialized so mmap() a big chuck of address space.
//
size_t request = 2 * HYPERBLOCK_SIZE;
size_t request = 2 * hyperblock_size;
void *addr = mmap(
nullptr, // let OS choose the address
request, // Linux and MacOS both allow up to 64TB
Expand All @@ -46,12 +74,12 @@ void arena::initialize_semispace() {
auto *start_block = reinterpret_cast<char *>(addr);
auto *end_block = start_block + request;
//
// We allocated 2 * HYPERBLOCK_SIZE worth of address space but we're only going to use 1, aligned on a
// HYPERBLOCK_SIZE boundry. This is so we can get end of the hyperblock by setting the low bits of any
// We allocated 2 * hyperblock_size worth of address space but we're only going to use 1, aligned on a
// hyperblock_size boundry. This is so we can get end of the hyperblock by setting the low bits of any
// address within the space to 1.
//
current_addr_ptr = reinterpret_cast<char *>(
std::align(HYPERBLOCK_SIZE, HYPERBLOCK_SIZE, addr, request));
std::align(hyperblock_size, hyperblock_size, addr, request));
//
// Release any unused address space at the start of the mmap()ed block.
//
Expand All @@ -61,15 +89,15 @@ void arena::initialize_semispace() {
//
// Release any unused address space at the end of the mmap()ed block.
//
auto *end_aligned = current_addr_ptr + HYPERBLOCK_SIZE;
auto *end_aligned = current_addr_ptr + hyperblock_size;
if (size_t back_slop = end_block - end_aligned) {
munmap(end_aligned, back_slop);
}
//
// We put a semispace id in the last byte of the hyperblock so we can identify which semispace an address
// belongs to by setting the low bits to 1 to access this id.
//
current_addr_ptr[HYPERBLOCK_SIZE - 1] = allocation_semispace_id;
current_addr_ptr[hyperblock_size - 1] = allocation_semispace_id;
allocation_ptr = current_addr_ptr;
//
// We set the tripwire for this space so we get trigger a garbage collection when we pass BLOCK_SIZE of memory
Expand Down

0 comments on commit d8eb2e9

Please sign in to comment.