diff --git a/Cargo.lock b/Cargo.lock index 1ecb718..227e43e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -154,6 +154,12 @@ dependencies = [ "scopeguard", ] +[[package]] +name = "lock_free_buddy_allocator" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f2b8256be05fb9612e0276693020bed985954c9e5c621f4ec6edec822af6b13b" + [[package]] name = "log" version = "0.4.22" @@ -310,10 +316,12 @@ dependencies = [ "bitflags", "cc", "lock_api", + "lock_free_buddy_allocator", "memory_addr", "multiboot2", "portable", "spin", + "talc", "thiserror", ] @@ -360,6 +368,15 @@ dependencies = [ "unicode-ident", ] +[[package]] +name = "talc" +version = "4.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3fcad3be1cfe36eb7d716a04791eba36a197da9d9b6ea1e28e64ac569da3701d" +dependencies = [ + "lock_api", +] + [[package]] name = "thiserror" version = "2.0.9" diff --git a/Cargo.toml b/Cargo.toml index c5fba66..d53a750 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -17,13 +17,15 @@ panic ="abort" [dependencies] thiserror = { version = "2.0.9", default-features = false } +lock_api = "0.4.12" spin = "0.9.8" bitfield-struct = "0.10.0" bitflags = "2.6.0" memory_addr = "0.3.1" -lock_api = "0.4.12" +lock_free_buddy_allocator = "0.1.0" +talc = "4.4.2" [target.'cfg(any(target_arch = "x86_64", target_arch = "x86"))'.dependencies] multiboot2 = { version = "0.23.1", default-features = false } diff --git a/README.md b/README.md index 792752f..5cef7e4 100644 --- a/README.md +++ b/README.md @@ -9,3 +9,8 @@ Then, run the build script by issuing `./build.sh` | x86 (i486) | Works | | | x86 (i386) | Unsupported | TLB flushing | | x86_64 | TODO | | + +## Help!!! +Here are some things you could help with: +- Find a way to make freeing in ZonedBuddy page allocator checked (you can free a random page rigth now and it won't even panic) +- Find a way to also check freeing in NestedPageTable diff --git a/a.out b/a.out index f340bd2..fb0980d 100644 Binary files a/a.out and b/a.out differ diff --git a/bochsrc b/bochsrc new file mode 100644 index 0000000..b5b46a1 --- /dev/null +++ b/bochsrc @@ -0,0 +1,3 @@ +ata0-slave: type=cdrom, path=bin/os.iso, status=inserted +boot: cdrom +magic_break: enabled=1 diff --git a/build.sh b/build.sh index b317c45..f00f7ba 100755 --- a/build.sh +++ b/build.sh @@ -2,8 +2,9 @@ # If this is not your path, kindly change it export CROSS_CC="${CROSS_CC:-$HOME/opt/cross/bin/i686-elf-gcc}" -export ARCH=x86/x32 -export QEMU_SYSTEM=i386 +export ARCH="${ARCH:-x86/x32}" +export QEMU_SYSTEM="${QEMU_SYSTEM:-i386}" +export EMULATOR="${EMULATOR:-qemu}" # Colors if command -v tput &> /dev/null; then @@ -27,7 +28,7 @@ fi # Utils build() { if ! cargo build; then - return -1 + return 1 fi KERNEL="target/target/debug/satan" @@ -44,12 +45,20 @@ build() { } run() { - qemu-system-$QEMU_SYSTEM -d guest_errors -no-reboot -cdrom bin/os.iso + if [ $EMULATOR = "bochs" ]; then + bochs -q + else + qemu-system-$QEMU_SYSTEM -d guest_errors -no-reboot -cdrom bin/os.iso + fi } debug() { - qemu-system-$QEMU_SYSTEM -d guest_errors -no-reboot -cdrom bin/os.iso -s -S & - rust-gdb target/target/debug/satan -x gdbinit + if [ $EMULATOR = "bochs" ]; then + bochs -q + else + qemu-system-$QEMU_SYSTEM -d guest_errors -no-reboot -cdrom bin/os.iso -s -S & + rust-gdb target/target/debug/satan -x gdbinit + fi } clean() { diff --git a/flake.nix b/flake.nix index c8ee1cf..c50f8bf 100644 --- a/flake.nix +++ b/flake.nix @@ -18,7 +18,7 @@ { devShell = pkgs.mkShell { buildInputs = with pkgs; [ - qemu + qemu bochs libisoburn mtools pkgs-crosssystem.buildPackages.grub2 pkgs-crosssystem.buildPackages.gcc diff --git a/rust-toolchain.toml b/rust-toolchain.toml index 0543d26..20713ce 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -1,3 +1,3 @@ [toolchain] -channel = "nightly-2024-12-11" +channel = "nightly" components = ["rust-src", "rust-analyzer"] diff --git a/src/arch/mod.rs b/src/arch/mod.rs index 408395e..1f1ca74 100644 --- a/src/arch/mod.rs +++ b/src/arch/mod.rs @@ -2,4 +2,18 @@ /// x86 and x86_64 architectures pub mod x86; #[cfg(any(target_arch = "x86_64", target_arch = "x86"))] -pub use x86::*; +pub use x86 as current; + +// * Stub imports to make it easier to see required functions and types + +pub use current::{_panic, _print}; + +/// Instructions like cpuid +pub mod instructions { + pub use super::current::instructions::cpu_id; +} + +/// Interrupt handling +pub mod interrupts { + pub use super::current::interrupts::{disable, enable}; +} diff --git a/src/arch/x86/instructions.rs b/src/arch/x86/instructions.rs new file mode 100644 index 0000000..93874bd --- /dev/null +++ b/src/arch/x86/instructions.rs @@ -0,0 +1,4 @@ +/// Get a CPU identifier +pub fn cpu_id() -> usize { + 0 +} diff --git a/src/arch/x86/interrupts.rs b/src/arch/x86/interrupts.rs index 75d0856..ea25054 100644 --- a/src/arch/x86/interrupts.rs +++ b/src/arch/x86/interrupts.rs @@ -31,16 +31,16 @@ struct InterruptStackFrame { /// Central interrupt handler, all interrupts come here specifying an interrupt number fn interrupt_handler(interrupt: u8, error_code: usize) { + if interrupt == 0x20 { + // Timer + return; + } if interrupt == 0x0E { // Page fault crate::println!("Page fault!\nError code:\n{:#032b}", error_code); crate::println!(" ^ ^^IRUWP"); crate::println!(" SGX SSPK "); - return; - } - if interrupt == 0x20 { - // Timer - return; + panic!("Halt"); } if interrupt == 0x21 { // Keyboard @@ -48,7 +48,15 @@ fn interrupt_handler(interrupt: u8, error_code: usize) { crate::println!("Keyboard: {}", scancode); return; } - + if interrupt <= 0x08 { + panic!("Double fault!!!\nError code: {:#x}", error_code); + } + if interrupt <= 0x1F { + panic!( + "Unhandled exception: {:#x}\nError code: {:#x}", + interrupt, error_code + ); + } loop {} } diff --git a/src/arch/x86/mod.rs b/src/arch/x86/mod.rs index 6f631d6..756bb1c 100644 --- a/src/arch/x86/mod.rs +++ b/src/arch/x86/mod.rs @@ -1,21 +1,42 @@ #[cfg(target_arch = "x86")] core::arch::global_asm!(include_str!("x32/bootstrap.S"), options(att_syntax)); +/// Instructions like cpuid +pub mod instructions; + /// Early logging facilities pub mod early_logger; +pub use early_logger::{_panic, _print}; /// Interrupts and IDT pub mod interrupts; /// Paging implementation -/// I spent a lot of time here +/// I spent a lot of time here. +/// And I hate every single second of it. pub mod paging; +mod allocator { + const SIZE: usize = 0x1000; + static mut ARENA: [u8; SIZE] = [0; SIZE]; + + // TODO: Use system allocator on OOM + #[global_allocator] + static ALLOCATOR: talc::Talck, talc::ClaimOnOom> = talc::Talc::new(unsafe { + // if we're in a hosted environment, the Rust runtime may allocate before + // main() is called, so we need to initialize the arena automatically + talc::ClaimOnOom::new(talc::Span::from_slice(core::ptr::addr_of_mut!(ARENA))) + }) + .lock(); +} + /// Kernel setup function. First thing that is called /// after assembly bootstrap setus up GDT and higher-half address space #[no_mangle] pub extern "cdecl" fn ksetup(mb_magic: u32, mbi_ptr: u32) -> ! { - // loop {} + crate::println!("Hello, SATAN!"); + interrupts::setup(); + let boot_info = if mb_magic == multiboot2::MAGIC { let boot_info = unsafe { multiboot2::BootInformation::load(mbi_ptr as *const multiboot2::BootInformationHeader) @@ -34,8 +55,6 @@ pub extern "cdecl" fn ksetup(mb_magic: u32, mbi_ptr: u32) -> ! { ); }; - crate::println!("Hello, SATAN!"); - interrupts::setup(); paging::setup_paging(&boot_info); loop {} diff --git a/src/arch/x86/paging/address_space.rs b/src/arch/x86/paging/address_space.rs index a995297..d8caca5 100644 --- a/src/arch/x86/paging/address_space.rs +++ b/src/arch/x86/paging/address_space.rs @@ -1,104 +1,150 @@ -use super::*; +use memory_addr::{MemoryAddr, PhysAddr, VirtAddr}; + +use super::tmp_page; +use super::PageSize; +/// Physical page table entry types +mod entry { + pub(super) use super::super::{PTEFlags, PTEntry}; +} + +use crate::memory::address_space::nested_page_table::{NestedPageTable, NestedPageTableLevel}; +use crate::memory::address_space::AddressSpaceTrait; +use crate::memory::{MappingError, MappingResult, PageAllocatorTrait}; + +/// Interface page table entry types +mod if_entry { + pub(super) use crate::memory::address_space::nested_page_table::PageTableEntry; + pub(super) use crate::memory::MappingFlags; +} /// Address space struct -pub struct AddressSpace(pub PhysAddr); +#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord)] +pub struct AddressSpace(PageTableLevel); + +#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord)] +pub struct PageTableLevel(PhysAddr, usize); impl AddressSpace { - /// Map the page table layer and get the page table entry associated with this address - fn get_entry( - layer: &::Layer, - vaddr: VirtAddr, - ) -> crate::sync::MappedLockGuard { - let page_table = tmp_page::map::(layer.0); + pub(super) fn from_paddr(addr: PhysAddr) -> Self { + #[cfg(target_arch = "x86")] + let top_level_bits = 22; + #[cfg(target_arch = "x86_64")] + let top_level_bits = 39; + Self(PageTableLevel(addr, top_level_bits)) + } +} + +impl PageTableLevel { + /// Map the page table level to tmp page + /// and get the page table entry associated with this address + fn lock_entry(&self, vaddr: VirtAddr) -> crate::sync::MappedLockGuard { + let page_table = tmp_page::map::(self.0); - let mask = PAGE_TABLE_ENTRIES - 1; - let index = vaddr.as_usize() >> layer.1 & mask; + let mask = super::PAGE_TABLE_ENTRIES - 1; + let index = (vaddr.as_usize() >> self.1) & mask; crate::sync::MappedLockGuard::map(page_table, |page_table| &mut page_table[index]) } } -impl AddressSpaceTrait for AddressSpace { - type Layer = (PhysAddr, usize); +impl NestedPageTable for AddressSpace { + type PageSize = PageSize; + type Level = PageTableLevel; - fn page_size(layer: &Self::Layer) -> usize { - 1 << layer.1 + fn top_level(&self) -> Self::Level { + self.0.clone() } +} - fn set_entry( - layer: Self::Layer, - vaddr: VirtAddr, - paddr: PhysAddr, - page_size: crate::arch::paging::PageSize, - flags: MappingFlags, - ) -> MappingResult<()> { - debug_assert_eq!(1usize << layer.1, page_size as usize); - let mut entry = Self::get_entry(&layer, vaddr); - if entry.flags().contains(PTEFlags::P) { - return Err(MappingError::MappingOver(entry.address())); +impl NestedPageTableLevel for PageTableLevel { + type PageSize = PageSize; + + fn region_size(&self) -> usize { + 1 << self.1 + } + + fn new_sublevel(&self, alloc: &impl PageAllocatorTrait) -> Option { + let addr = alloc.alloc(PageSize::Size4K)?; + let mut page_table = tmp_page::map::(addr); + + // Clear the page table + for index in 0..super::PAGE_TABLE_ENTRIES { + page_table[index] = entry::PTEntry::NULL; } - *entry = PTEntry::new_page(paddr, page_size, flags.into()); - flush_tlb(vaddr); - Ok(()) + + Some(PageTableLevel(addr, self.1 - super::PAGE_LEVEL_BITS)) } - fn unset_entry( - layer: Self::Layer, - vaddr: VirtAddr, - page_size: crate::arch::paging::PageSize, + fn free_sublevel( + &self, + sublevel: Self, + alloc: &impl PageAllocatorTrait, ) -> MappingResult<()> { - debug_assert_eq!(1usize << layer.1, page_size as usize); - let mut entry = Self::get_entry(&layer, vaddr); - if !entry.flags().contains(PTEFlags::P) { - return Err(MappingError::UnmappingNotMapped(vaddr)); - } - *entry = PTEntry::NULL; - flush_tlb(vaddr); + alloc.free(sublevel.0, PageSize::Size4K); Ok(()) } - fn next_layer(layer: Self::Layer, vaddr: VirtAddr, map: bool) -> MappingResult { - let mut entry = Self::get_entry(&layer, vaddr); - - if entry.flags().contains(PTEFlags::P | PTEFlags::PS) { - if map { - return Err(MappingError::MappingOver(entry.address())); - } else { - return Err(MappingError::UnmappingPartOfLargePage(entry.address())); - } + fn set_entry( + &self, + vaddr: VirtAddr, + new_entry: crate::memory::address_space::nested_page_table::PageTableEntry, + ) -> MappingResult<()> { + if matches!(new_entry, if_entry::PageTableEntry::Page(_, _)) { + debug_assert!(vaddr.is_aligned(1usize << self.1)); } - let entry = if !entry.flags().contains(PTEFlags::P) { - drop(entry); - if !map { - return Err(MappingError::UnmappingNotMapped(vaddr)); - } - - // Create a new page table - let page_table_addr = page_alloc::alloc_page(PageSize::Size4K as _); - let mut page_table = tmp_page::map::(page_table_addr); + let mut entry = self.lock_entry(vaddr); + if self.1 > 12 && entry.flags().contains(entry::PTEFlags::PS) { + return Err(MappingError::MappingOver(entry.address())); + } - // Clear the page table - for index in 0..PAGE_TABLE_ENTRIES { - page_table[index] = PTEntry::NULL; + *entry = match new_entry { + if_entry::PageTableEntry::Level(level) => entry::PTEntry::new_page_table(level.0), + if_entry::PageTableEntry::Page(paddr, flags) => { + entry::PTEntry::new_page(paddr, self.page_size().unwrap(), flags.into()) } + }; - drop(page_table); + // TODO: Check if this page table is currently active + super::flush_tlb(vaddr); + Ok(()) + } - // Set the entry to this page table - let mut entry = Self::get_entry(&layer, vaddr); - *entry = PTEntry::new_page_table(page_table_addr); - entry + fn get_entry(&self, vaddr: VirtAddr) -> MappingResult> { + let entry = self.lock_entry(vaddr); + if entry.flags().contains(entry::PTEFlags::P) + && self.1 > 12 + && !entry.flags().contains(entry::PTEFlags::PS) + { + Ok(if_entry::PageTableEntry::Level(PageTableLevel( + entry.address(), + self.1 - super::PAGE_LEVEL_BITS, + ))) } else { - entry - }; + Ok(if_entry::PageTableEntry::Page( + entry.address(), + entry.flags().into(), + )) + } + } +} - Ok((entry.address(), layer.1 - PAGE_LEVEL_BITS)) +impl AddressSpaceTrait for AddressSpace { + fn map_alloc( + &self, + vaddr: VirtAddr, + size: usize, + flags: if_entry::MappingFlags, + alloc: &impl PageAllocatorTrait, + ) -> MappingResult { + ::map_alloc(self, vaddr, size, flags, alloc) } - fn top_layer(&self) -> Self::Layer { - #[cfg(target_arch = "x86")] - return (self.0, 22); - #[cfg(target_arch = "x86_64")] - return (self.0, 39); + fn unmap_free( + &self, + vaddr: VirtAddr, + size: usize, + alloc: &impl PageAllocatorTrait, + ) -> MappingResult<()> { + ::unmap_free(self, vaddr, size, alloc) } } diff --git a/src/arch/x86/paging/mod.rs b/src/arch/x86/paging/mod.rs index 3b5d021..2db1115 100644 --- a/src/arch/x86/paging/mod.rs +++ b/src/arch/x86/paging/mod.rs @@ -1,33 +1,22 @@ -use crate::memory::*; +use memory_addr::{MemoryAddr, PhysAddr, VirtAddr}; /// Temproary page, space for it is allocated after the kernel in the kernel address space. /// Used to map page tables and manipulate their entries mod tmp_page; +mod page_size; +pub use page_size::PageSize; + /// Page table entry and it's flags mod page_table_entry; use page_table_entry::{PTEFlags, PTEntry}; /// Address space implementation mod address_space; -use address_space::AddressSpace; - -/// Page allocator manages free pages -mod page_alloc; - -/// Page sizes possible to map -#[derive(Clone, Copy, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash)] -#[repr(usize)] -pub enum PageSize { - #[default] - Size4K = 0x1000, - #[cfg(target_arch = "x86")] - Size4M = 0x400000, - #[cfg(target_arch = "x86_64")] - Size2M = 0x200000, - #[cfg(target_arch = "x86_64")] - Size1G = 0x40000000, -} +pub use address_space::AddressSpace; + +/// Use standard zone-based page allocator +pub type PageAllocator = crate::memory::page_allocator::ZonedBuddy<0x1000>; extern "C" { #[link_name = "kernel_top_level_page_table"] @@ -36,6 +25,8 @@ extern "C" { linker_symbol! { kernel_offset(KERNEL_OFFSET_SYMBOL) => "KERNEL_OFFSET"; + kernel_end(KERNEL_END) => "kernel_end"; + kernel_reserved_end(KERNEL_RESERVED_END) => "kernel_reserved_end"; } /// Convert a physical address in the kernel address space to virtual by adding the offset @@ -74,18 +65,67 @@ fn flush_tlb(address: VirtAddr) { /// Setup paging pub(super) fn setup_paging(boot_info: &multiboot2::BootInformation) { - // Enable PSE - unsafe { - core::arch::asm!( - "mov %cr4, {tmp}", - "or $0x10, {tmp}", - "mov {tmp}, %cr4", - tmp = out(reg) _, - options(att_syntax) - ); + let page_allocator = PageAllocator::new(); + let kernel_address_space = VirtAddr::from_usize(&raw const KERNEL_TOP_LEVEL_PAGE_TABLE as _); + let kernel_address_space = AddressSpace::from_paddr(kernel_virt2phys(kernel_address_space)); + + // Add zones to the page allocator + let memory_map_tag = boot_info + .memory_map_tag() + .expect("Memory map not available"); + for region in memory_map_tag.memory_areas() { + use multiboot2::MemoryAreaType; + let typ = MemoryAreaType::from(region.typ()); + if typ == MemoryAreaType::Available { + let kernel_physical_end = kernel_virt2phys(kernel_end()); + let start = PhysAddr::from_usize(region.start_address() as _); + let start = start.max(kernel_physical_end).align_up_4k(); + let end = PhysAddr::from_usize(region.end_address() as _); + if end <= start { + continue; + } + + if page_allocator + .add_zone(start.as_usize(), memory_addr::align_down_4k(end - start)) + .is_err() + { + crate::println!("Failed to add some memory zones"); + } + } } - page_alloc::setup_page_info_table(boot_info); + // TODO: Free boot info and bootstrap code + + // TEST + crate::println!("Total memory: {}", page_allocator.total_memory()); + use crate::memory::MappingFlags; + use crate::memory::{AddressSpaceTrait, PageSizeTrait}; + let test = 0xc0801000 as *mut u32; + let test = kernel_address_space + .map_alloc( + VirtAddr::from_mut_ptr_of(test), + PageSize::MIN as _, + MappingFlags::PRESENT | MappingFlags::READ | MappingFlags::WRITE, + &page_allocator, + ) + .unwrap() + .as_mut_ptr_of(); + crate::println!("Mapped!"); + crate::println!("Allocated memory: {}", page_allocator.allocated_memory()); + unsafe { + *test = 42; + }; + crate::println!("Wrote!"); + crate::println!("Testing page mapping: {}", unsafe { *test }); + kernel_address_space + .unmap_free(VirtAddr::from_mut_ptr_of(test), 4096, &page_allocator) + .unwrap(); + crate::println!( + "Allocated memory after freeing: {}", + page_allocator.allocated_memory() + ); + crate::println!("Testing page unmapping (You should see a page fault):"); + crate::println!("Huh? {}", unsafe { *test }); } macro_rules! linker_symbol { diff --git a/src/arch/x86/paging/page_alloc.rs b/src/arch/x86/paging/page_alloc.rs deleted file mode 100644 index 6f930fd..0000000 --- a/src/arch/x86/paging/page_alloc.rs +++ /dev/null @@ -1,64 +0,0 @@ -use super::*; - -static mut EARLY_PAGE_ALLOC_ADDRESS: PhysAddr = PhysAddr::from_usize(0); - -linker_symbol! { - kernel_early_alloc_start(KERNEL_EARLY_ALLOC_START) => "kernel_tmp_page_address"; -} - -pub(super) fn alloc_page(page_size: usize) -> PhysAddr { - unsafe { - let addr = EARLY_PAGE_ALLOC_ADDRESS.align_up(page_size); - EARLY_PAGE_ALLOC_ADDRESS = addr + page_size; - addr - } -} - -pub(super) fn free_page(address: PhysAddr, page_size: usize) { - todo!() -} - -pub(super) fn setup_page_info_table(boot_info: &multiboot2::BootInformation) { - unsafe { - EARLY_PAGE_ALLOC_ADDRESS = kernel_virt2phys(kernel_early_alloc_start()); - } - - let kernel_address_space = VirtAddr::from_usize(&raw const KERNEL_TOP_LEVEL_PAGE_TABLE as _); - let kernel_address_space = AddressSpace(kernel_virt2phys(kernel_address_space)); - - let test_r = 0xc03ff000 as *mut u32; - let test_w = 0xc03fe000 as *mut u32; - kernel_address_space - .map_page( - kernel_address_space.top_layer(), - VirtAddr::from_mut_ptr_of(test_r), - PhysAddr::from_usize(0x800000), - PageSize::Size4K, - MappingFlags::PRESENT | MappingFlags::READ, - ) - .unwrap(); - kernel_address_space - .map_page( - kernel_address_space.top_layer(), - VirtAddr::from_mut_ptr_of(test_w), - PhysAddr::from_usize(0x800000), - PageSize::Size4K, - MappingFlags::PRESENT | MappingFlags::WRITE, - ) - .unwrap(); - crate::println!("Mapped!"); - unsafe { - *test_w = 42; - }; - crate::println!("Wrote!"); - crate::println!("Testing page mapping: {}", unsafe { *test_r }); - kernel_address_space - .unmap_page( - kernel_address_space.top_layer(), - VirtAddr::from_mut_ptr_of(test_r), - PageSize::Size4K, - ) - .unwrap(); - crate::println!("Testing page unmapping (you should see a page fault)..."); - crate::println!("If you see this everything broke: {}", unsafe { *test_r }); -} diff --git a/src/arch/x86/paging/page_size.rs b/src/arch/x86/paging/page_size.rs new file mode 100644 index 0000000..9f10638 --- /dev/null +++ b/src/arch/x86/paging/page_size.rs @@ -0,0 +1,40 @@ +/// Page sizes possible to map +#[derive(Clone, Copy, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[repr(usize)] +pub enum PageSize { + #[default] + Size4K = 0x1000, + #[cfg(target_arch = "x86")] + Size4M = 0x400000, + #[cfg(target_arch = "x86_64")] + Size2M = 0x200000, + #[cfg(target_arch = "x86_64")] + Size1G = 0x40000000, +} + +impl TryFrom for PageSize { + type Error = (); + + fn try_from(size: usize) -> Result { + match size { + 0x1000 => Ok(Self::Size4K), + #[cfg(target_arch = "x86")] + 0x400000 => Ok(Self::Size4M), + #[cfg(target_arch = "x86_64")] + 0x200000 => Ok(Self::Size2M), + #[cfg(target_arch = "x86_64")] + 0x40000000 => Ok(Self::Size1G), + _ => Err(()), + } + } +} + +impl From for usize { + fn from(value: PageSize) -> Self { + value as _ + } +} + +impl crate::memory::PageSizeTrait for PageSize { + const MIN: Self = Self::Size4K; +} diff --git a/src/arch/x86/paging/page_table_entry.rs b/src/arch/x86/paging/page_table_entry.rs index 710d762..af470b1 100644 --- a/src/arch/x86/paging/page_table_entry.rs +++ b/src/arch/x86/paging/page_table_entry.rs @@ -1,4 +1,5 @@ use super::*; +use crate::memory::MappingFlags; bitflags::bitflags! { /// Page table entry flags (first byte from the right) @@ -48,6 +49,32 @@ impl From for PTEFlags { } } +impl From for MappingFlags { + fn from(value: PTEFlags) -> Self { + let mut flags = Self::empty(); + if value.contains(PTEFlags::P) { + flags |= Self::PRESENT; + } + if value.contains(PTEFlags::RW) { + flags |= Self::WRITE; + } + #[cfg(target_arch = "x86_64")] + if value.contains(todo!()) { + flags |= Self::EXECUTE; + } + if value.contains(PTEFlags::US) { + flags |= Self::USER; + } + if value.contains(PTEFlags::PCD) { + flags |= Self::UNCACHED; + } + if value.contains(PTEFlags::G) { + flags |= Self::GLOBAL; + } + flags + } +} + /// Page table entry #[derive(Clone, Copy, Default, PartialEq, Eq, PartialOrd, Ord, Hash)] #[repr(transparent)] diff --git a/src/arch/x86/paging/tmp_page.rs b/src/arch/x86/paging/tmp_page.rs index 6fd9949..89694c4 100644 --- a/src/arch/x86/paging/tmp_page.rs +++ b/src/arch/x86/paging/tmp_page.rs @@ -24,10 +24,13 @@ pub(super) fn map(addr: PhysAddr) -> crate::sync::MappedLockGuard { ); crate::sync::LockGuard::map(TMP_PAGE_MUTEX.lock(), |_| { + let entry = PTEntry::new_page(addr, PageSize::Size4K, PTEFlags::P | PTEFlags::RW); unsafe { - *TMP_PAGE_ENTRY = PTEntry::new_page(addr, PageSize::Size4K, PTEFlags::P | PTEFlags::RW); + if *TMP_PAGE_ENTRY != entry { + *TMP_PAGE_ENTRY = entry; + flush_tlb(address()); + } } - flush_tlb(address()); unsafe { &mut *address().as_mut_ptr_of() } }) } diff --git a/src/arch/x86/x32/bootstrap.S b/src/arch/x86/x32/bootstrap.S index 4726aee..c5cd2cc 100644 --- a/src/arch/x86/x32/bootstrap.S +++ b/src/arch/x86/x32/bootstrap.S @@ -68,7 +68,8 @@ kernel_top_level_page_table: .fill 1024, 4, 0 kernel_page_table_bootstrap: .fill 1024, 4, 0 -kernel_page_table1_higher_half: +kernel_page_tables_higher_half: + .fill 1024, 4, 0 .fill 1024, 4, 0 .global kernel_tmp_page_entry_address @@ -77,10 +78,10 @@ kernel_tmp_page_entry_address: .section .stack, "aw" bootstrap_stack: - .skip 16384 + .skip 0x4000 bootstrap_stack_top: tss_stack: - .skip 16384 + .skip 0x4000 tss_stack_top: .section .bootstrap, "ax" @@ -102,6 +103,7 @@ mmap: jb mmap ret +.extern kernel_bootstrap_end .extern kernel_start .extern data_start .extern kernel_end @@ -147,11 +149,35 @@ after_gdt: or $0b00000011, %eax mov %eax, kernel_top_level_page_table - KERNEL_OFFSET - # Map first page table (4MB) in the higher half of the address space - mov $kernel_page_table1_higher_half - KERNEL_OFFSET, %eax + # Map some more of lower pages + mov $0x400000, %esi + mov $kernel_top_level_page_table - KERNEL_OFFSET + 4, %edi + +map_lower.loop: + mov %esi, %eax + or $0b10000011, %eax + mov %eax, (%edi) + + add $0x400000, %esi + add $4, %edi + cmp $0x800000, %esi + ja map_lower.loop + + # Map first 2 page tables into the higher half of the address space + mov $kernel_page_tables_higher_half - KERNEL_OFFSET, %eax or $0b00000011, %eax - mov %eax, kernel_top_level_page_table - KERNEL_OFFSET + KERNEL_OFFSET / 0x400000 * 4 + mov $kernel_top_level_page_table - KERNEL_OFFSET + KERNEL_OFFSET / 0x400000 * 4, %ebx + mov %eax, (%ebx) + add $0x1000, %eax + add $4, %ebx + mov %eax, (%ebx) + + # Check if bootstrap fits + mov $0x400000, %eax + cmp $kernel_bootstrap_end, %eax + jb kernelBootstrapTooBig + mov $0, %esi # Start address mov $0x100000, %eax # End address mov $kernel_page_table_bootstrap - KERNEL_OFFSET, %edi # Page table address @@ -162,7 +188,7 @@ after_gdt: call mmap # Check if kernel fits - mov $KERNEL_OFFSET + 0x400000, %eax + mov $KERNEL_OFFSET + 0x800000, %eax cmp $kernel_reserved_end, %eax jb kernelTooBig @@ -171,7 +197,7 @@ after_gdt: # Compute offset into page table mov $kernel_start - KERNEL_OFFSET, %edi shr $10, %edi - add $kernel_page_table1_higher_half - KERNEL_OFFSET, %edi # Add to the page table address + add $kernel_page_tables_higher_half - KERNEL_OFFSET, %edi # Add to the page table address mov $0b100000001, %ebx # Flags call mmap mov $kernel_end - KERNEL_OFFSET, %eax # End address @@ -181,9 +207,14 @@ after_gdt: # Setup tmp page mov $kernel_tmp_page_address - KERNEL_OFFSET, %edi shr $10, %edi - add $kernel_page_table1_higher_half, %edi # Add to the page table address + add $kernel_page_tables_higher_half, %edi # Add to the page table address mov %edi, kernel_tmp_page_entry_address - KERNEL_OFFSET + # Enable PSE + mov %cr4, %eax + or $0x10, %eax + mov %eax, %cr4 + # Enable paging mov $kernel_top_level_page_table - KERNEL_OFFSET, %eax mov %eax, %cr3 @@ -217,5 +248,32 @@ kernelTooBig: movw $'i' | 0x0400, 0xb8018 movw $'g' | 0x0400, 0xb801a movw $'!' | 0x0400, 0xb801c + jmp kernelTooBig.loop +kernelBootstrapTooBig: + movw $'K' | 0x0400, 0xb8000 + movw $'e' | 0x0400, 0xb8002 + movw $'r' | 0x0400, 0xb8004 + movw $'n' | 0x0400, 0xb8006 + movw $'e' | 0x0400, 0xb8008 + movw $'l' | 0x0400, 0xb800a + movw $' ' | 0x0400, 0xb800c + movw $'b' | 0x0400, 0xb800e + movw $'o' | 0x0400, 0xb8010 + movw $'o' | 0x0400, 0xb8012 + movw $'t' | 0x0400, 0xb8014 + movw $'s' | 0x0400, 0xb8016 + movw $'t' | 0x0400, 0xb8018 + movw $'r' | 0x0400, 0xb801a + movw $'a' | 0x0400, 0xb801c + movw $'p' | 0x0400, 0xb801e + movw $' ' | 0x0400, 0xb8020 + movw $'t' | 0x0400, 0xb8022 + movw $'o' | 0x0400, 0xb8024 + movw $'o' | 0x0400, 0xb8026 + movw $' ' | 0x0400, 0xb8028 + movw $'b' | 0x0400, 0xb802a + movw $'i' | 0x0400, 0xb802c + movw $'g' | 0x0400, 0xb802e + movw $'!' | 0x0400, 0xb8030 kernelTooBig.loop: jmp kernelTooBig.loop diff --git a/src/arch/x86/x32/linker.ld b/src/arch/x86/x32/linker.ld index 15f1dd7..6e2a95c 100644 --- a/src/arch/x86/x32/linker.ld +++ b/src/arch/x86/x32/linker.ld @@ -11,6 +11,8 @@ SECTIONS { *(.bootstrap) } + kernel_bootstrap_end = .; + . += KERNEL_OFFSET; /* Read-only code */ @@ -23,6 +25,7 @@ SECTIONS { /* Read-only data, page aligned to allow use of the no-execute feature */ .rodata ALIGN (4K) : AT (ADDR (.rodata) - KERNEL_OFFSET) { *(.rodata .rodata.*) + *(.got .got.*) } /* Read-write data, page aligned for the .padata section */ @@ -40,6 +43,5 @@ SECTIONS { kernel_tmp_page_address = ALIGN(4K); /* Add a symbol that indicates the end address of the space reserved for kernel. */ - kernel_reserved_end = kernel_tmp_page_address + 4096; - + kernel_reserved_end = kernel_tmp_page_address + 4K; } diff --git a/src/arch/x86/x32/target.json b/src/arch/x86/x32/target.json index b856ce0..8d031fe 100644 --- a/src/arch/x86/x32/target.json +++ b/src/arch/x86/x32/target.json @@ -18,5 +18,5 @@ }, "panic-strategy": "abort", "disable-redzone": true, - "features": "-mmx,-sse,+soft-float" + "features": "-mmx,-sse" } diff --git a/src/log.rs b/src/log.rs index 8324469..ccae4ec 100644 --- a/src/log.rs +++ b/src/log.rs @@ -2,7 +2,7 @@ use core::panic::PanicInfo; #[panic_handler] fn panic(info: &PanicInfo) -> ! { - crate::arch::early_logger::_panic(format_args!("{}", info)); + crate::arch::_panic(format_args!("{}", info)); } #[macro_export] @@ -13,5 +13,5 @@ macro_rules! println { #[macro_export] macro_rules! print { - ($($arg:tt)*) => ($crate::arch::early_logger::_print(format_args!($($arg)*))); + ($($arg:tt)*) => ($crate::arch::_print(format_args!($($arg)*))); } diff --git a/src/main.rs b/src/main.rs index c5f2d2e..f55a22a 100644 --- a/src/main.rs +++ b/src/main.rs @@ -4,11 +4,14 @@ any(target_arch = "x86_64", target_arch = "x86"), feature(abi_x86_interrupt) )] +#![feature(allocator_api)] + +extern crate alloc; /// Synchronization primitives pub mod sync; -/// Arch-specific things +/// Architecture implementaitons pub mod arch; /// Basic logging facilities, calling arch-specific early print and panic functions diff --git a/src/memory.rs b/src/memory.rs index df36a01..422fbfd 100644 --- a/src/memory.rs +++ b/src/memory.rs @@ -1,129 +1,55 @@ -use super::*; pub use memory_addr::{pa, va, va_range, MemoryAddr, PhysAddr, VirtAddr}; -bitflags::bitflags! { - /// Generic page table entry flags that indicate the corresponding mapped - /// memory region permissions and attributes. - #[derive(Clone, Copy, PartialEq)] - pub struct MappingFlags: usize { - /// Memory is present. If not, generate a page fault - const PRESENT = 1 << 0; - /// The memory is readable. - const READ = 1 << 1; - /// The memory is writable. - const WRITE = 1 << 2; - /// The memory is executable. - const EXECUTE = 1 << 3; - /// The memory is user accessible. - const USER = 1 << 4; - /// The memory is uncached. - const UNCACHED = 1 << 5; - /// The memory globally accessible, doesn't invalidate TLB. - const GLOBAL = 1 << 6; - } -} - -/// Kinds of errors if mapping failed -#[derive(Clone, Debug, thiserror::Error)] -pub enum MappingError { - /// Mapping over an already existing page - #[error("mapping over existing page at address {0:#x}")] - MappingOver(PhysAddr), - /// Mapping an unaligned address - #[error("mapping an unaligned address {0:#x}")] - UnalignedPhysicalAddress(PhysAddr), - /// Mapping to an unaligned address - #[error("mapping to an unaligned address {0:#x}")] - UnalignedVirtualAddress(VirtAddr), - /// Unmapping a page that wasn't mapped - #[error("unmapping a page that wasn't mapped (address {0:#x})")] - UnmappingNotMapped(VirtAddr), - /// Unmapping part of a large page - #[error("unmapping part of a large page at {0:#x}")] - UnmappingPartOfLargePage(PhysAddr), -} - -/// Result type for memory mapping operations -pub type MappingResult = Result; +/// Address space implementations +pub mod address_space; +pub use address_space::{AddressSpaceTrait, MappingError, MappingFlags, MappingResult}; -/// Trait to be implemented by an address space -pub trait AddressSpaceTrait { - /// Single page table - type Layer; +/// Different page allocator implementaitons +pub mod page_allocator; +pub use page_allocator::PageAllocatorTrait; - /// Page size of one page - fn page_size(layer: &Self::Layer) -> usize; - - /// Set an entry in the page table layer to map vaddr to paddr with size and flags - fn set_entry( - layer: Self::Layer, - vaddr: VirtAddr, - paddr: PhysAddr, - page_size: arch::paging::PageSize, - flags: MappingFlags, - ) -> MappingResult<()>; - - /// Unset an entry in the page table layer - fn unset_entry( - layer: Self::Layer, - vaddr: VirtAddr, - page_size: arch::paging::PageSize, - ) -> MappingResult<()>; +/// Page size trait, implement for an enum (or a struct) that could hold valid page sizes +pub trait PageSizeTrait: Copy + PartialEq + Eq + TryFrom + Into { + const MIN: Self; +} - /// Get or create (only if map is true) a page table layer in this layer - /// that is associated with this virtual address. map parameter indicates - /// if this call corresponds to mapping/unmapping operation - fn next_layer(layer: Self::Layer, vaddr: VirtAddr, map: bool) -> MappingResult; +/// Wrap a u64 in this struct to display it with size postfix (KiB, MiB, GiB, etc.) +pub struct FormatSize(pub u64); - /// Get top level page table layer for this address space - fn top_layer(&self) -> Self::Layer; +impl core::ops::Deref for FormatSize { + type Target = u64; - /// Map a single (possibly large/huge) page. - /// As a layer should take [`AddressSpaceTrait::top_layer`] - fn map_page( - &self, - layer: Self::Layer, - vaddr: VirtAddr, - paddr: PhysAddr, - page_size: arch::paging::PageSize, - flags: MappingFlags, - ) -> MappingResult<()> { - if !vaddr.is_aligned(page_size as usize) { - return Err(MappingError::UnalignedVirtualAddress(vaddr)); - } - if !paddr.is_aligned(page_size as usize) { - return Err(MappingError::UnalignedPhysicalAddress(paddr)); - } + fn deref(&self) -> &Self::Target { + &self.0 + } +} - if Self::page_size(&layer) == page_size as usize { - Self::set_entry(layer, vaddr, paddr, page_size, flags) - } else { - self.map_page( - Self::next_layer(layer, vaddr, true)?, - vaddr, - paddr, - page_size, - flags, - ) - } +impl core::ops::DerefMut for FormatSize { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 } +} - /// Unmap a single (possibly large/huge) page or a whole page table of the same size. - /// As a layer should take [`AddressSpaceTrait::top_layer`] - fn unmap_page( - &self, - layer: Self::Layer, - vaddr: VirtAddr, - page_size: arch::paging::PageSize, - ) -> MappingResult<()> { - if !vaddr.is_aligned(page_size as usize) { - return Err(MappingError::UnalignedVirtualAddress(vaddr)); +impl core::fmt::Display for FormatSize { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + let mut value = self.0; + let mut order = 0; + let orders = ["B", "KiB", "MiB", "GiB", "TiB", "PiB"]; + while value >= 1 << 10 && order + 1 < orders.len() { + value >>= 10; + order += 1; } - if Self::page_size(&layer) == page_size as usize { - Self::unset_entry(layer, vaddr, page_size) + if value >= 10 { + write!(f, "{} {}", value, orders[order]) } else { - self.unmap_page(Self::next_layer(layer, vaddr, false)?, vaddr, page_size) + write!( + f, + "{}.{} {}", + value, + ((self.0 * 10) >> (order * 10)) % 10, + orders[order] + ) } } } diff --git a/src/memory/address_space.rs b/src/memory/address_space.rs new file mode 100644 index 0000000..3ae32fb --- /dev/null +++ b/src/memory/address_space.rs @@ -0,0 +1,79 @@ +use super::{PageAllocatorTrait, PageSizeTrait}; +use memory_addr::{PhysAddr, VirtAddr}; + +bitflags::bitflags! { + /// Generic page table entry flags that indicate the corresponding mapped + /// memory region permissions and attributes. + #[derive(Clone, Copy, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash)] + pub struct MappingFlags: usize { + /// Memory is present. If not, generate a page fault + const PRESENT = 1 << 0; + /// The memory is readable. + const READ = 1 << 1; + /// The memory is writable. + const WRITE = 1 << 2; + /// The memory is executable. + const EXECUTE = 1 << 3; + /// The memory is user accessible. + const USER = 1 << 4; + /// The memory is uncached. + const UNCACHED = 1 << 5; + /// The memory globally accessible, doesn't invalidate TLB. + const GLOBAL = 1 << 6; + } +} + +/// Kinds of errors if mapping failed +#[derive(Clone, Debug, thiserror::Error)] +pub enum MappingError { + /// Mapping over an already existing page + #[error("mapping over existing page at address {0:#x}")] + MappingOver(PhysAddr), + /// Page allocation failed + #[error("page allocation failed")] + PageAllocationFailed, + + /// Mapping an unaligned address + #[error("mapping an unaligned address {0:#x}")] + UnalignedPhysicalAddress(PhysAddr), + /// Mapping to an unaligned address + #[error("mapping to an unaligned address {0:#x}")] + UnalignedVirtualAddress(VirtAddr), + /// Unmapping a page that wasn't mapped + #[error("unmapping a page that wasn't mapped (address {0:#x})")] + UnmappingNotMapped(VirtAddr), + /// Unmapping part of a large page + #[error("unmapping part of a large page at {0:#x}")] + UnmappingPartOfLargePage(PhysAddr), +} + +/// Result type for memory mapping operations +pub type MappingResult = Result; + +pub mod nested_page_table; + +/// Address space allows for control over accessible memory +pub trait AddressSpaceTrait { + // pub fn map(&mut self, vaddr: VirtAddr, paddr: PhysAddr, size: usize) -> MappingResult; + // pub fn unmap(&mut self, addr: VirtAddr, size: usize) -> MappingResult<()>; + + /// Allocate and map a region of memory into + /// the address space. On success returns + /// actual address region has been mapped to. + /// vaddr must be a valid hint + fn map_alloc( + &self, + vaddr: VirtAddr, + size: usize, + flags: MappingFlags, + alloc: &impl PageAllocatorTrait, + ) -> MappingResult; + + /// Unmap a region of memory from the address space and mark it as free + fn unmap_free( + &self, + vaddr: VirtAddr, + size: usize, + alloc: &impl PageAllocatorTrait, + ) -> MappingResult<()>; +} diff --git a/src/memory/address_space/nested_page_table.rs b/src/memory/address_space/nested_page_table.rs new file mode 100644 index 0000000..2760ae4 --- /dev/null +++ b/src/memory/address_space/nested_page_table.rs @@ -0,0 +1,196 @@ +use memory_addr::MemoryAddr; + +use super::{MappingError, MappingFlags, MappingResult}; +use super::{PageAllocatorTrait, PageSizeTrait}; +use super::{PhysAddr, VirtAddr}; + +/// Page table entry returned +pub enum PageTableEntry { + /// Page table entry maps to the next level page table + Level(Level), + /// Page table entry identity maps (regular or large/huge pages) + Page(PhysAddr, MappingFlags), +} + +impl PageTableEntry { + const NULL: Self = Self::Page(PhysAddr::from_usize(0), MappingFlags::empty()); + + /// Returns true if the page entry is mapped to somewhere (maybe even swapped), false + /// if it is free + pub fn mapped(&self) -> bool { + match self { + PageTableEntry::Level(_) => true, + PageTableEntry::Page(phys_addr, mapping_flags) => { + mapping_flags.contains(MappingFlags::PRESENT) || phys_addr.as_usize() != 0 + } + } + } +} + +/// A single level of a nested page table +/// (underlying type should be something like a pointer that's freely cloneable) +pub trait NestedPageTableLevel: Clone + Sized { + type PageSize: PageSizeTrait; + + /// Get the size of a page/page table of this layer, similar to page_size, but + /// returns the memory region that a sub-level page table manages if page can't + /// be mapped here + fn region_size(&self) -> usize; + + /// Get page size of this layer, if a page can be mapped here + fn page_size(&self) -> Option { + self.region_size().try_into().ok() + } + + /// Allocate a new page table level, that's gonna come after this one + fn new_sublevel(&self, alloc: &impl PageAllocatorTrait) -> Option; + + /// Free a page table level, that was a part of this page table. All it's sublevels + /// were already freed + fn free_sublevel( + &self, + sublevel: Self, + alloc: &impl PageAllocatorTrait, + ) -> MappingResult<()>; + + /// Set an entry in this level. vaddr might not be aligned if entry + /// is [`PageTableEntry::Level`] + fn set_entry(&self, vaddr: VirtAddr, entry: PageTableEntry) -> MappingResult<()>; + + /// Get an entry in this page table. vaddr might not be aligned + fn get_entry(&self, vaddr: VirtAddr) -> MappingResult>; + + /// Map a single (possibly large/huge) page. + fn map_page( + &self, + vaddr: VirtAddr, + paddr: PhysAddr, + page_size: Self::PageSize, + flags: MappingFlags, + alloc: &impl PageAllocatorTrait, + ) -> MappingResult<()> { + if self.page_size() == Some(page_size) { + self.set_entry(vaddr, PageTableEntry::Page(paddr, flags)) + } else { + let entry = self.get_entry(vaddr)?; + let next_level = match entry { + PageTableEntry::Page(addr, flags) => { + if flags.contains(MappingFlags::PRESENT) { + return Err(MappingError::MappingOver(addr)); + } + let level = self + .new_sublevel(alloc) + .ok_or(MappingError::PageAllocationFailed)?; + self.set_entry(vaddr, PageTableEntry::Level(level.clone()))?; + level + } + PageTableEntry::Level(level) => level, + }; + next_level.map_page(vaddr, paddr, page_size, flags, alloc) + } + } + + fn unmap_free( + &self, + vaddr: VirtAddr, + size: usize, + alloc: &impl PageAllocatorTrait, + ) -> MappingResult<()> { + let region_size = self.region_size(); + let start = vaddr.align_down(region_size); + let end = (vaddr + size).align_up(region_size); + for page in (start.as_usize()..end.as_usize()).step_by(region_size) { + let page = VirtAddr::from(page); + let entry = self.get_entry(page)?; + if page < vaddr || page + region_size > vaddr + size { + match entry { + PageTableEntry::Level(level) => { + if page < vaddr { + level.unmap_free(vaddr, page + region_size - vaddr, alloc)?; + } else { + level.unmap_free(page, vaddr + size - page, alloc)?; + } + let mut mapped = false; + for entry_addr in (page.as_usize()..page.as_usize() + region_size) + .step_by(level.region_size()) + { + let entry = level.get_entry(entry_addr.into())?; + if entry.mapped() { + mapped = true; + break; + } + } + if !mapped { + self.free_sublevel(level, alloc)?; + self.set_entry(page, PageTableEntry::NULL)?; + } + } + PageTableEntry::Page(paddr, flags) => { + if flags.contains(MappingFlags::PRESENT) { + return Err(MappingError::UnmappingPartOfLargePage(paddr)); + } + } + } + } else { + match entry { + PageTableEntry::Level(level) => { + level.unmap_free(page, region_size, alloc)?; + self.free_sublevel(level, alloc)?; + self.set_entry(page, PageTableEntry::NULL)?; + } + PageTableEntry::Page(paddr, flags) => { + if flags.contains(MappingFlags::PRESENT) { + alloc.free(paddr, self.page_size().unwrap()); + self.set_entry(page, PageTableEntry::NULL)?; + } + } + } + } + } + Ok(()) + } +} + +/// Implementation of [`super::AddressSpaceTrait`] for a nested page table +/// structure (x86 for example) +pub trait NestedPageTable { + /// Page size + type PageSize: PageSizeTrait; + + /// Single level of paging + type Level: NestedPageTableLevel; + + /// Get top level page table for this address space + fn top_level(&self) -> Self::Level; + + /// Implementation of [`super::AddressSpaceTrait::map_alloc`] + fn map_alloc( + &self, + vaddr: VirtAddr, + size: usize, + flags: MappingFlags, + alloc: &impl PageAllocatorTrait, + ) -> MappingResult { + // TODO: Possibly bigger pages + for page in 0..size / Self::PageSize::MIN.into() { + self.top_level().map_page( + vaddr + page * Self::PageSize::MIN.into(), + alloc.alloc(Self::PageSize::MIN).unwrap(), + Self::PageSize::MIN, + flags, + alloc, + )?; + } + Ok(vaddr) + } + + /// Implementation of [`super::AddressSpaceTrait::unmap_free`] + fn unmap_free( + &self, + vaddr: VirtAddr, + size: usize, + alloc: &impl PageAllocatorTrait, + ) -> MappingResult<()> { + self.top_level().unmap_free(vaddr, size, alloc) + } +} diff --git a/src/memory/page_allocator.rs b/src/memory/page_allocator.rs new file mode 100644 index 0000000..69a7610 --- /dev/null +++ b/src/memory/page_allocator.rs @@ -0,0 +1,10 @@ +use super::PageSizeTrait; +use memory_addr::PhysAddr; + +pub mod zoned_buddy; +pub use zoned_buddy::ZonedBuddy; + +pub trait PageAllocatorTrait { + fn alloc(&self, size: PageSize) -> Option; + fn free(&self, allocation: PhysAddr, size: PageSize); +} diff --git a/src/memory/page_allocator/zoned_buddy.rs b/src/memory/page_allocator/zoned_buddy.rs new file mode 100644 index 0000000..2dfac3b --- /dev/null +++ b/src/memory/page_allocator/zoned_buddy.rs @@ -0,0 +1,130 @@ +use core::alloc::AllocError; +use core::sync::atomic::AtomicUsize; + +use super::{PageAllocatorTrait, PageSizeTrait, PhysAddr}; +use crate::sync::RwLock; + +struct CpuId; +impl lock_free_buddy_allocator::cpuid::Cpu for CpuId { + fn current_cpu() -> usize { + crate::arch::instructions::cpu_id() + } +} + +struct Zone { + start: usize, + size: usize, + allocated: AtomicUsize, + buddy: lock_free_buddy_allocator::buddy_alloc::BuddyAlloc< + 'static, + PAGE_SIZE, + CpuId, + alloc::alloc::Global, + >, +} + +/// Zone-based buddy allocator. Manages zones, +/// each zone having a separate binary buddy, +/// similar to how linux does this +pub struct ZonedBuddy { + zones: RwLock>>, +} + +impl ZonedBuddy { + pub const fn new() -> Self { + Self { + zones: RwLock::new(alloc::vec::Vec::new()), + } + } + + pub fn add_zone(&self, start: usize, size: usize) -> Result<(), AllocError> { + debug_assert!( + start % BLOCK_SIZE == 0, + "zone is not aligned ({:#x})", + start + ); + debug_assert!(size % BLOCK_SIZE == 0, "size is not aligned ({:#x})", size); + + if !size.is_power_of_two() { + let mut start = start; + for bit in 0..usize::BITS { + let size_p2 = 1 << bit; + if size & size_p2 != 0 { + self.add_zone(start, size_p2)?; + start += size_p2; + } + } + } else { + self.zones.write().push(Zone { + start, + size, + allocated: AtomicUsize::new(0), + buddy: lock_free_buddy_allocator::buddy_alloc::BuddyAlloc::new( + start, + size / BLOCK_SIZE, + &alloc::alloc::Global, + ) + .ok_or(AllocError)?, + }); + } + Ok(()) + } + + pub fn alloc(&self, size: usize) -> Option { + let blocks = size / BLOCK_SIZE; + for zone in self.zones.read().iter() { + if let Some(addr) = zone.buddy.alloc(blocks) { + zone.allocated + .fetch_add(size, core::sync::atomic::Ordering::SeqCst); + return Some(PhysAddr::from_usize(addr)); + } + } + None + } + + pub fn free(&self, allocation: PhysAddr, size: usize) { + let start = allocation.as_usize(); + let blocks = size / BLOCK_SIZE; + for zone in self.zones.read().iter() { + if start >= zone.start && start + size <= zone.start + zone.size { + zone.buddy.free(allocation.as_usize(), blocks); + zone.allocated + .fetch_sub(size, core::sync::atomic::Ordering::SeqCst); + } + } + } + + /// Returns total amount of memory managed by the allocator. + /// To get free space, use [`Self::total_memory`] - [`Self::allocated_memory`] + pub fn total_memory(&self) -> usize { + self.zones + .read() + .iter() + .fold(0, |acc, zone| acc + zone.size) + } + + /// Returns the amount of allocated memory + pub fn allocated_memory(&self) -> usize { + self.zones.read().iter().fold(0, |acc, zone| { + acc + zone.allocated.load(core::sync::atomic::Ordering::SeqCst) + }) + } +} + +impl Default for ZonedBuddy { + fn default() -> Self { + Self::new() + } +} + +impl PageAllocatorTrait + for ZonedBuddy +{ + fn alloc(&self, size: PageSize) -> Option { + self.alloc(size.into()) + } + + fn free(&self, allocation: PhysAddr, size: PageSize) { + self.free(allocation, size.into()) + } +} diff --git a/src/sync.rs b/src/sync.rs index e17dbec..2b60b91 100644 --- a/src/sync.rs +++ b/src/sync.rs @@ -1,6 +1,18 @@ pub use spin::lock_api::{Mutex, MutexGuard}; pub type MappedMutexGuard<'a, T, U> = lock_api::MappedMutexGuard<'a, spin::Mutex, U>; +pub use spin::lock_api::{RwLock, RwLockReadGuard, RwLockWriteGuard}; +pub type MappedRwLockReadGuard<'a, T, U> = lock_api::MappedRwLockReadGuard<'a, spin::RwLock, U>; +pub type MappedRwLockWriteGuard<'a, T, U> = + lock_api::MappedRwLockWriteGuard<'a, spin::RwLock, U>; + pub type Lock = Mutex<()>; pub type LockGuard = MutexGuard<'static, ()>; pub type MappedLockGuard = MappedMutexGuard<'static, (), T>; + +pub fn lock_nb(mutex: &spin::Mutex) -> spin::MutexGuard { + match mutex.try_lock() { + Some(guard) => guard, + None => panic!("Tried to lock a locked mutex!"), + } +}