Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

EE: Allow mapping main memory anywhere #11734

Open
wants to merge 12 commits into
base: master
Choose a base branch
from
1 change: 1 addition & 0 deletions common/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -149,6 +149,7 @@ elseif(APPLE)
Darwin/DarwinThreads.cpp
Darwin/DarwinMisc.cpp
Darwin/DarwinMisc.h
Linux/LnxHostSys.cpp
)
target_compile_options(common PRIVATE -fobjc-arc)
target_link_options(common PRIVATE -fobjc-link-runtime)
Expand Down
200 changes: 0 additions & 200 deletions common/Darwin/DarwinMisc.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -16,16 +16,10 @@
#include <cstring>
#include <cstdlib>
#include <optional>
#include <sys/mman.h>
#include <sys/types.h>
#include <sys/sysctl.h>
#include <time.h>
#include <mach/mach_init.h>
#include <mach/mach_port.h>
#include <mach/mach_time.h>
#include <mach/mach_vm.h>
#include <mach/task.h>
#include <mach/vm_map.h>
#include <mutex>
#include <IOKit/pwr_mgt/IOPMLib.h>

Expand Down Expand Up @@ -201,200 +195,6 @@ size_t HostSys::GetRuntimeCacheLineSize()
return static_cast<size_t>(std::max<s64>(sysctlbyname_T<s64>("hw.cachelinesize").value_or(0), 0));
}

static __ri vm_prot_t MachProt(const PageProtectionMode& mode)
{
vm_prot_t machmode = (mode.CanWrite()) ? VM_PROT_WRITE : 0;
machmode |= (mode.CanRead()) ? VM_PROT_READ : 0;
machmode |= (mode.CanExecute()) ? (VM_PROT_EXECUTE | VM_PROT_READ) : 0;
return machmode;
}

void* HostSys::Mmap(void* base, size_t size, const PageProtectionMode& mode)
{
pxAssertMsg((size & (__pagesize - 1)) == 0, "Size is page aligned");
if (mode.IsNone())
return nullptr;

#ifdef __aarch64__
// We can't allocate executable memory with mach_vm_allocate() on Apple Silicon.
// Instead, we need to use MAP_JIT with mmap(), which does not support fixed mappings.
if (mode.CanExecute())
{
if (base)
return nullptr;

const u32 mmap_prot = mode.CanWrite() ? (PROT_READ | PROT_WRITE | PROT_EXEC) : (PROT_READ | PROT_EXEC);
const u32 flags = MAP_PRIVATE | MAP_ANON | MAP_JIT;
void* const res = mmap(nullptr, size, mmap_prot, flags, -1, 0);
return (res == MAP_FAILED) ? nullptr : res;
}
#endif

kern_return_t ret = mach_vm_allocate(mach_task_self(), reinterpret_cast<mach_vm_address_t*>(&base), size,
base ? VM_FLAGS_FIXED : VM_FLAGS_ANYWHERE);
if (ret != KERN_SUCCESS)
{
DEV_LOG("mach_vm_allocate() returned {}", ret);
return nullptr;
}

ret = mach_vm_protect(mach_task_self(), reinterpret_cast<mach_vm_address_t>(base), size, false, MachProt(mode));
if (ret != KERN_SUCCESS)
{
DEV_LOG("mach_vm_protect() returned {}", ret);
mach_vm_deallocate(mach_task_self(), reinterpret_cast<mach_vm_address_t>(base), size);
return nullptr;
}

return base;
}

void HostSys::Munmap(void* base, size_t size)
{
if (!base)
return;

mach_vm_deallocate(mach_task_self(), reinterpret_cast<mach_vm_address_t>(base), size);
}

void HostSys::MemProtect(void* baseaddr, size_t size, const PageProtectionMode& mode)
{
pxAssertMsg((size & (__pagesize - 1)) == 0, "Size is page aligned");

kern_return_t res = mach_vm_protect(mach_task_self(), reinterpret_cast<mach_vm_address_t>(baseaddr), size, false,
MachProt(mode));
if (res != KERN_SUCCESS) [[unlikely]]
{
ERROR_LOG("mach_vm_protect() failed: {}", res);
pxFailRel("mach_vm_protect() failed");
}
}

std::string HostSys::GetFileMappingName(const char* prefix)
{
// name actually is not used.
return {};
}

void* HostSys::CreateSharedMemory(const char* name, size_t size)
{
mach_vm_size_t vm_size = size;
mach_port_t port;
const kern_return_t res = mach_make_memory_entry_64(
mach_task_self(), &vm_size, 0, MAP_MEM_NAMED_CREATE | VM_PROT_READ | VM_PROT_WRITE, &port, MACH_PORT_NULL);
if (res != KERN_SUCCESS)
{
ERROR_LOG("mach_make_memory_entry_64() failed: {}", res);
return nullptr;
}

return reinterpret_cast<void*>(static_cast<uintptr_t>(port));
}

void HostSys::DestroySharedMemory(void* ptr)
{
mach_port_deallocate(mach_task_self(), static_cast<mach_port_t>(reinterpret_cast<uintptr_t>(ptr)));
}

void* HostSys::MapSharedMemory(void* handle, size_t offset, void* baseaddr, size_t size, const PageProtectionMode& mode)
{
mach_vm_address_t ptr = reinterpret_cast<mach_vm_address_t>(baseaddr);
const kern_return_t res = mach_vm_map(mach_task_self(), &ptr, size, 0, baseaddr ? VM_FLAGS_FIXED : VM_FLAGS_ANYWHERE,
static_cast<mach_port_t>(reinterpret_cast<uintptr_t>(handle)), offset, FALSE,
MachProt(mode), VM_PROT_READ | VM_PROT_WRITE, VM_INHERIT_NONE);
if (res != KERN_SUCCESS)
{
ERROR_LOG("mach_vm_map() failed: {}", res);
return nullptr;
}

return reinterpret_cast<void*>(ptr);
}

void HostSys::UnmapSharedMemory(void* baseaddr, size_t size)
{
const kern_return_t res = mach_vm_deallocate(mach_task_self(), reinterpret_cast<mach_vm_address_t>(baseaddr), size);
if (res != KERN_SUCCESS)
pxFailRel("Failed to unmap shared memory");
}

#ifdef _M_ARM64

void HostSys::FlushInstructionCache(void* address, u32 size)
{
__builtin___clear_cache(reinterpret_cast<char*>(address), reinterpret_cast<char*>(address) + size);
}

#endif

SharedMemoryMappingArea::SharedMemoryMappingArea(u8* base_ptr, size_t size, size_t num_pages)
: m_base_ptr(base_ptr)
, m_size(size)
, m_num_pages(num_pages)
{
}

SharedMemoryMappingArea::~SharedMemoryMappingArea()
{
pxAssertRel(m_num_mappings == 0, "No mappings left");

if (mach_vm_deallocate(mach_task_self(), reinterpret_cast<mach_vm_address_t>(m_base_ptr), m_size) != KERN_SUCCESS)
pxFailRel("Failed to release shared memory area");
}


std::unique_ptr<SharedMemoryMappingArea> SharedMemoryMappingArea::Create(size_t size)
{
pxAssertRel(Common::IsAlignedPow2(size, __pagesize), "Size is page aligned");

mach_vm_address_t alloc;
const kern_return_t res =
mach_vm_map(mach_task_self(), &alloc, size, 0, VM_FLAGS_ANYWHERE,
MEMORY_OBJECT_NULL, 0, false, VM_PROT_NONE, VM_PROT_NONE, VM_INHERIT_NONE);
if (res != KERN_SUCCESS)
{
ERROR_LOG("mach_vm_map() failed: {}", res);
return {};
}

return std::unique_ptr<SharedMemoryMappingArea>(new SharedMemoryMappingArea(reinterpret_cast<u8*>(alloc), size, size / __pagesize));
}

u8* SharedMemoryMappingArea::Map(void* file_handle, size_t file_offset, void* map_base, size_t map_size, const PageProtectionMode& mode)
{
pxAssert(static_cast<u8*>(map_base) >= m_base_ptr && static_cast<u8*>(map_base) < (m_base_ptr + m_size));

const kern_return_t res =
mach_vm_map(mach_task_self(), reinterpret_cast<mach_vm_address_t*>(&map_base), map_size, 0, VM_FLAGS_OVERWRITE,
static_cast<mach_port_t>(reinterpret_cast<uintptr_t>(file_handle)), file_offset, false,
MachProt(mode), VM_PROT_READ | VM_PROT_WRITE, VM_INHERIT_NONE);
if (res != KERN_SUCCESS) [[unlikely]]
{
ERROR_LOG("mach_vm_map() failed: {}", res);
return nullptr;
}

m_num_mappings++;
return static_cast<u8*>(map_base);
}

bool SharedMemoryMappingArea::Unmap(void* map_base, size_t map_size)
{
pxAssert(static_cast<u8*>(map_base) >= m_base_ptr && static_cast<u8*>(map_base) < (m_base_ptr + m_size));

const kern_return_t res =
mach_vm_map(mach_task_self(), reinterpret_cast<mach_vm_address_t*>(&map_base), map_size, 0, VM_FLAGS_OVERWRITE,
MEMORY_OBJECT_NULL, 0, false, VM_PROT_NONE, VM_PROT_NONE, VM_INHERIT_NONE);
if (res != KERN_SUCCESS) [[unlikely]]
{
ERROR_LOG("mach_vm_map() failed: {}", res);
return false;
}

m_num_mappings--;
return true;
}

#ifdef _M_ARM64

static thread_local int s_code_write_depth = 0;
Expand Down
11 changes: 1 addition & 10 deletions common/HostSys.h
Original file line number Diff line number Diff line change
Expand Up @@ -90,20 +90,11 @@ static __fi PageProtectionMode PageAccess_Any()
// --------------------------------------------------------------------------------------
namespace HostSys
{
// Maps a block of memory for use as a recompiled code buffer.
// Returns NULL on allocation failure.
extern void* Mmap(void* base, size_t size, const PageProtectionMode& mode);

// Unmaps a block allocated by SysMmap
extern void Munmap(void* base, size_t size);

extern void MemProtect(void* baseaddr, size_t size, const PageProtectionMode& mode);

extern std::string GetFileMappingName(const char* prefix);
extern void* CreateSharedMemory(const char* name, size_t size);
extern void DestroySharedMemory(void* ptr);
extern void* MapSharedMemory(void* handle, size_t offset, void* baseaddr, size_t size, const PageProtectionMode& mode);
extern void UnmapSharedMemory(void* baseaddr, size_t size);

/// JIT write protect for Apple Silicon. Needs to be called prior to writing to any RWX pages.
#if !defined(__APPLE__) || !defined(_M_ARM64)
Expand Down Expand Up @@ -146,7 +137,7 @@ namespace PageFaultHandler
class SharedMemoryMappingArea
{
public:
static std::unique_ptr<SharedMemoryMappingArea> Create(size_t size);
static std::unique_ptr<SharedMemoryMappingArea> Create(size_t size, bool jit = false);

~SharedMemoryMappingArea();

Expand Down
Loading