From 360dc9bea5be9d1874f4653d2d90078437724b5c Mon Sep 17 00:00:00 2001 From: InfiniteCoder Date: Sun, 12 Jan 2025 21:22:18 +0300 Subject: [PATCH 1/9] Early alloc --- src/arch/mod.rs | 12 +- src/arch/x86/mod.rs | 4 +- src/arch/x86/paging/address_space.rs | 3 +- src/arch/x86/paging/early_page_alloc.rs | 137 ++++++++++++++++++++ src/arch/x86/paging/mod.rs | 9 +- src/arch/x86/paging/page_alloc.rs | 64 ---------- src/arch/x86/paging/page_table_entry.rs | 1 + src/log.rs | 4 +- src/memory.rs | 162 ++++++++---------------- src/memory/address_space.rs | 129 +++++++++++++++++++ 10 files changed, 342 insertions(+), 183 deletions(-) create mode 100644 src/arch/x86/paging/early_page_alloc.rs delete mode 100644 src/arch/x86/paging/page_alloc.rs create mode 100644 src/memory/address_space.rs diff --git a/src/arch/mod.rs b/src/arch/mod.rs index 408395e..ac9a999 100644 --- a/src/arch/mod.rs +++ b/src/arch/mod.rs @@ -2,4 +2,14 @@ /// x86 and x86_64 architectures pub mod x86; #[cfg(any(target_arch = "x86_64", target_arch = "x86"))] -pub use x86::*; +pub use x86 as current; + +pub use current::{_panic, _print}; + +pub mod interrupts { + pub use super::current::interrupts::{disable, enable}; +} + +pub mod paging { + pub use super::current::paging::{early_alloc_page, AddressSpace, PageSize}; +} diff --git a/src/arch/x86/mod.rs b/src/arch/x86/mod.rs index 6f631d6..9d7207d 100644 --- a/src/arch/x86/mod.rs +++ b/src/arch/x86/mod.rs @@ -3,12 +3,14 @@ core::arch::global_asm!(include_str!("x32/bootstrap.S"), options(att_syntax)); /// Early logging facilities pub mod early_logger; +pub use early_logger::{_panic, _print}; /// Interrupts and IDT pub mod interrupts; /// Paging implementation -/// I spent a lot of time here +/// I spent a lot of time here. +/// And I hate every single second of it. pub mod paging; /// Kernel setup function. First thing that is called diff --git a/src/arch/x86/paging/address_space.rs b/src/arch/x86/paging/address_space.rs index a995297..bd9f0c7 100644 --- a/src/arch/x86/paging/address_space.rs +++ b/src/arch/x86/paging/address_space.rs @@ -1,4 +1,5 @@ use super::*; +use crate::memory::*; /// Address space struct pub struct AddressSpace(pub PhysAddr); @@ -74,7 +75,7 @@ impl AddressSpaceTrait for AddressSpace { } // Create a new page table - let page_table_addr = page_alloc::alloc_page(PageSize::Size4K as _); + let page_table_addr = crate::memory::alloc_page(PageSize::Size4K as _); let mut page_table = tmp_page::map::(page_table_addr); // Clear the page table diff --git a/src/arch/x86/paging/early_page_alloc.rs b/src/arch/x86/paging/early_page_alloc.rs new file mode 100644 index 0000000..006a003 --- /dev/null +++ b/src/arch/x86/paging/early_page_alloc.rs @@ -0,0 +1,137 @@ +use super::*; +use crate::memory::AddressSpaceTrait; + +linker_symbol! { + kernel_early_alloc_start(KERNEL_EARLY_ALLOC_START) => "kernel_tmp_page_address"; + kernel_reserved_end(KERNEL_RESERVED_END) => "kernel_reserved_end"; +} + +// * Allocation +struct EarlyPageAllocator { + alloc_start: PhysAddr, + boot_info: &'static multiboot2::BootInformation<'static>, +} + +impl EarlyPageAllocator { + fn is_page_free(&self, addr: PhysAddr, page_size: PageSize) -> bool { + let start = addr.as_usize(); + let end = addr.as_usize() + page_size as usize; + + // * Check for overlap + // With kernel + if addr < kernel_virt2phys(kernel_reserved_end()) { + return false; + } + // With boot info structure + if start < self.boot_info.end_address() && end > self.boot_info.start_address() { + return false; + } + + // * Check for memory region validity + for region in self.boot_info.memory_map_tag().unwrap().memory_areas() { + if start < region.end_address() as usize && end > region.start_address() as usize { + use multiboot2::MemoryAreaType; + match MemoryAreaType::from(region.typ()) { + MemoryAreaType::Available => (), + _ => return false, + } + } + } + true + } +} + +static mut EARLY_ALLOCATOR: Option = None; + +pub fn early_alloc_page(page_size: PageSize) -> PhysAddr { + let Some(allocator) = (unsafe { EARLY_ALLOCATOR.as_mut() }) else { + panic!("Early allocator not available") + }; + let mut addr = allocator.alloc_start.align_up(page_size as usize); + while !allocator.is_page_free(addr, page_size) { + addr += page_size as usize; + } + allocator.alloc_start = addr + page_size as usize; + addr +} + +/// Setup page info table, responsible for page allocation +pub(super) fn setup_page_info_table(boot_info: &multiboot2::BootInformation) { + let kernel_address_space = VirtAddr::from_usize(&raw const KERNEL_TOP_LEVEL_PAGE_TABLE as _); + let kernel_address_space = AddressSpace(kernel_virt2phys(kernel_address_space)); + unsafe { + EARLY_ALLOCATOR = Some(EarlyPageAllocator { + alloc_start: kernel_virt2phys(kernel_reserved_end()), + boot_info: core::mem::transmute(boot_info), + }); + } + + // Get the address limit (last usable physical address) + let mut address_limit = 0; + let memory_map_tag = boot_info + .memory_map_tag() + .expect("Memory map not available"); + for region in memory_map_tag.memory_areas() { + use multiboot2::MemoryAreaType; + let typ = MemoryAreaType::from(region.typ()); + if typ == MemoryAreaType::Available { + address_limit = address_limit.max(region.end_address()); + } + } + + if address_limit > (usize::MAX as u64) { + panic!( + "Kernel address size can't handle {} of memory", + crate::memory::FormatSize(address_limit) + ); + } + + let page_info_table_entries = (address_limit / memory_addr::PAGE_SIZE_4K as u64) as usize; + let page_info_table_size = + page_info_table_entries * core::mem::size_of::(); + + /// Allocate and map page info table + unsafe { + // EARLY_PAGE_ALLOC_ADDRESS = kernel_virt2phys(kernel_reserved_end()); + // PAGE_INFO_TABLE = core::slice::from_raw_parts_mut( + // kernel_reserved_end().as_mut_ptr_of(), + // page_info_table_entries, + // ); + } + + let test_r = 0xc0400000 as *mut u32; + let test_w = 0xc03ff000 as *mut u32; + kernel_address_space + .map_page( + kernel_address_space.top_layer(), + VirtAddr::from_mut_ptr_of(test_r), + PhysAddr::from_usize(0x800000), + PageSize::Size4K, + crate::memory::MappingFlags::PRESENT | crate::memory::MappingFlags::READ, + ) + .unwrap(); + kernel_address_space + .map_page( + kernel_address_space.top_layer(), + VirtAddr::from_mut_ptr_of(test_w), + PhysAddr::from_usize(0x800000), + PageSize::Size4K, + crate::memory::MappingFlags::PRESENT | crate::memory::MappingFlags::WRITE, + ) + .unwrap(); + crate::println!("Mapped!"); + unsafe { + *test_w = 42; + }; + crate::println!("Wrote!"); + crate::println!("Testing page mapping: {}", unsafe { *test_r }); + kernel_address_space + .unmap_page( + kernel_address_space.top_layer(), + VirtAddr::from_mut_ptr_of(test_r), + PageSize::Size4K, + ) + .unwrap(); + crate::println!("Testing page unmapping (you should see a page fault)..."); + crate::println!("If you see this everything broke: {}", unsafe { *test_r }); +} diff --git a/src/arch/x86/paging/mod.rs b/src/arch/x86/paging/mod.rs index 3b5d021..3d29498 100644 --- a/src/arch/x86/paging/mod.rs +++ b/src/arch/x86/paging/mod.rs @@ -1,4 +1,4 @@ -use crate::memory::*; +pub use memory_addr::{pa, va, va_range, MemoryAddr, PhysAddr, VirtAddr}; /// Temproary page, space for it is allocated after the kernel in the kernel address space. /// Used to map page tables and manipulate their entries @@ -10,10 +10,11 @@ use page_table_entry::{PTEFlags, PTEntry}; /// Address space implementation mod address_space; -use address_space::AddressSpace; +pub use address_space::AddressSpace; /// Page allocator manages free pages -mod page_alloc; +mod early_page_alloc; +pub use early_page_alloc::early_alloc_page; /// Page sizes possible to map #[derive(Clone, Copy, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash)] @@ -85,7 +86,7 @@ pub(super) fn setup_paging(boot_info: &multiboot2::BootInformation) { ); } - page_alloc::setup_page_info_table(boot_info); + early_page_alloc::setup_page_info_table(boot_info); } macro_rules! linker_symbol { diff --git a/src/arch/x86/paging/page_alloc.rs b/src/arch/x86/paging/page_alloc.rs deleted file mode 100644 index 6f930fd..0000000 --- a/src/arch/x86/paging/page_alloc.rs +++ /dev/null @@ -1,64 +0,0 @@ -use super::*; - -static mut EARLY_PAGE_ALLOC_ADDRESS: PhysAddr = PhysAddr::from_usize(0); - -linker_symbol! { - kernel_early_alloc_start(KERNEL_EARLY_ALLOC_START) => "kernel_tmp_page_address"; -} - -pub(super) fn alloc_page(page_size: usize) -> PhysAddr { - unsafe { - let addr = EARLY_PAGE_ALLOC_ADDRESS.align_up(page_size); - EARLY_PAGE_ALLOC_ADDRESS = addr + page_size; - addr - } -} - -pub(super) fn free_page(address: PhysAddr, page_size: usize) { - todo!() -} - -pub(super) fn setup_page_info_table(boot_info: &multiboot2::BootInformation) { - unsafe { - EARLY_PAGE_ALLOC_ADDRESS = kernel_virt2phys(kernel_early_alloc_start()); - } - - let kernel_address_space = VirtAddr::from_usize(&raw const KERNEL_TOP_LEVEL_PAGE_TABLE as _); - let kernel_address_space = AddressSpace(kernel_virt2phys(kernel_address_space)); - - let test_r = 0xc03ff000 as *mut u32; - let test_w = 0xc03fe000 as *mut u32; - kernel_address_space - .map_page( - kernel_address_space.top_layer(), - VirtAddr::from_mut_ptr_of(test_r), - PhysAddr::from_usize(0x800000), - PageSize::Size4K, - MappingFlags::PRESENT | MappingFlags::READ, - ) - .unwrap(); - kernel_address_space - .map_page( - kernel_address_space.top_layer(), - VirtAddr::from_mut_ptr_of(test_w), - PhysAddr::from_usize(0x800000), - PageSize::Size4K, - MappingFlags::PRESENT | MappingFlags::WRITE, - ) - .unwrap(); - crate::println!("Mapped!"); - unsafe { - *test_w = 42; - }; - crate::println!("Wrote!"); - crate::println!("Testing page mapping: {}", unsafe { *test_r }); - kernel_address_space - .unmap_page( - kernel_address_space.top_layer(), - VirtAddr::from_mut_ptr_of(test_r), - PageSize::Size4K, - ) - .unwrap(); - crate::println!("Testing page unmapping (you should see a page fault)..."); - crate::println!("If you see this everything broke: {}", unsafe { *test_r }); -} diff --git a/src/arch/x86/paging/page_table_entry.rs b/src/arch/x86/paging/page_table_entry.rs index 710d762..258bc91 100644 --- a/src/arch/x86/paging/page_table_entry.rs +++ b/src/arch/x86/paging/page_table_entry.rs @@ -1,4 +1,5 @@ use super::*; +use crate::memory::MappingFlags; bitflags::bitflags! { /// Page table entry flags (first byte from the right) diff --git a/src/log.rs b/src/log.rs index 8324469..ccae4ec 100644 --- a/src/log.rs +++ b/src/log.rs @@ -2,7 +2,7 @@ use core::panic::PanicInfo; #[panic_handler] fn panic(info: &PanicInfo) -> ! { - crate::arch::early_logger::_panic(format_args!("{}", info)); + crate::arch::_panic(format_args!("{}", info)); } #[macro_export] @@ -13,5 +13,5 @@ macro_rules! println { #[macro_export] macro_rules! print { - ($($arg:tt)*) => ($crate::arch::early_logger::_print(format_args!($($arg)*))); + ($($arg:tt)*) => ($crate::arch::_print(format_args!($($arg)*))); } diff --git a/src/memory.rs b/src/memory.rs index df36a01..2502295 100644 --- a/src/memory.rs +++ b/src/memory.rs @@ -1,129 +1,71 @@ -use super::*; pub use memory_addr::{pa, va, va_range, MemoryAddr, PhysAddr, VirtAddr}; -bitflags::bitflags! { - /// Generic page table entry flags that indicate the corresponding mapped - /// memory region permissions and attributes. - #[derive(Clone, Copy, PartialEq)] - pub struct MappingFlags: usize { - /// Memory is present. If not, generate a page fault - const PRESENT = 1 << 0; - /// The memory is readable. - const READ = 1 << 1; - /// The memory is writable. - const WRITE = 1 << 2; - /// The memory is executable. - const EXECUTE = 1 << 3; - /// The memory is user accessible. - const USER = 1 << 4; - /// The memory is uncached. - const UNCACHED = 1 << 5; - /// The memory globally accessible, doesn't invalidate TLB. - const GLOBAL = 1 << 6; - } -} +pub mod address_space; +pub use address_space::{AddressSpaceTrait, MappingError, MappingFlags, MappingResult}; -/// Kinds of errors if mapping failed -#[derive(Clone, Debug, thiserror::Error)] -pub enum MappingError { - /// Mapping over an already existing page - #[error("mapping over existing page at address {0:#x}")] - MappingOver(PhysAddr), - /// Mapping an unaligned address - #[error("mapping an unaligned address {0:#x}")] - UnalignedPhysicalAddress(PhysAddr), - /// Mapping to an unaligned address - #[error("mapping to an unaligned address {0:#x}")] - UnalignedVirtualAddress(VirtAddr), - /// Unmapping a page that wasn't mapped - #[error("unmapping a page that wasn't mapped (address {0:#x})")] - UnmappingNotMapped(VirtAddr), - /// Unmapping part of a large page - #[error("unmapping part of a large page at {0:#x}")] - UnmappingPartOfLargePage(PhysAddr), +/// Kernel page info entry +pub struct PageInfo { + uses: core::sync::atomic::AtomicU32, } -/// Result type for memory mapping operations -pub type MappingResult = Result; - -/// Trait to be implemented by an address space -pub trait AddressSpaceTrait { - /// Single page table - type Layer; +static mut PAGE_INFO_TABLE: &[PageInfo] = &[]; - /// Page size of one page - fn page_size(layer: &Self::Layer) -> usize; - - /// Set an entry in the page table layer to map vaddr to paddr with size and flags - fn set_entry( - layer: Self::Layer, - vaddr: VirtAddr, - paddr: PhysAddr, - page_size: arch::paging::PageSize, - flags: MappingFlags, - ) -> MappingResult<()>; +/// Allocate page of size page_size aligned to page_size +pub fn alloc_page(page_size: crate::arch::paging::PageSize) -> PhysAddr { + let page_info_table = unsafe { PAGE_INFO_TABLE }; + if page_info_table.is_empty() { + crate::arch::paging::early_alloc_page(page_size) + } else { + todo!() + } +} - /// Unset an entry in the page table layer - fn unset_entry( - layer: Self::Layer, - vaddr: VirtAddr, - page_size: arch::paging::PageSize, - ) -> MappingResult<()>; +/// Free page allocated with [alloc_page] +pub fn free_page(addr: PhysAddr, page_size: crate::arch::paging::PageSize) { + let page_info_table = unsafe { PAGE_INFO_TABLE }; + if page_info_table.is_empty() { + panic!("Can't free page without page info table"); + } + todo!() +} - /// Get or create (only if map is true) a page table layer in this layer - /// that is associated with this virtual address. map parameter indicates - /// if this call corresponds to mapping/unmapping operation - fn next_layer(layer: Self::Layer, vaddr: VirtAddr, map: bool) -> MappingResult; +/// Wrap a u64 in this struct to display it with size postfix (KiB, MiB, GiB, etc.) +pub struct FormatSize(pub u64); - /// Get top level page table layer for this address space - fn top_layer(&self) -> Self::Layer; +impl core::ops::Deref for FormatSize { + type Target = u64; - /// Map a single (possibly large/huge) page. - /// As a layer should take [`AddressSpaceTrait::top_layer`] - fn map_page( - &self, - layer: Self::Layer, - vaddr: VirtAddr, - paddr: PhysAddr, - page_size: arch::paging::PageSize, - flags: MappingFlags, - ) -> MappingResult<()> { - if !vaddr.is_aligned(page_size as usize) { - return Err(MappingError::UnalignedVirtualAddress(vaddr)); - } - if !paddr.is_aligned(page_size as usize) { - return Err(MappingError::UnalignedPhysicalAddress(paddr)); - } + fn deref(&self) -> &Self::Target { + &self.0 + } +} - if Self::page_size(&layer) == page_size as usize { - Self::set_entry(layer, vaddr, paddr, page_size, flags) - } else { - self.map_page( - Self::next_layer(layer, vaddr, true)?, - vaddr, - paddr, - page_size, - flags, - ) - } +impl core::ops::DerefMut for FormatSize { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 } +} - /// Unmap a single (possibly large/huge) page or a whole page table of the same size. - /// As a layer should take [`AddressSpaceTrait::top_layer`] - fn unmap_page( - &self, - layer: Self::Layer, - vaddr: VirtAddr, - page_size: arch::paging::PageSize, - ) -> MappingResult<()> { - if !vaddr.is_aligned(page_size as usize) { - return Err(MappingError::UnalignedVirtualAddress(vaddr)); +impl core::fmt::Display for FormatSize { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + let mut value = self.0; + let mut order = 0; + let orders = ["B", "KiB", "MiB", "GiB", "TiB", "PiB"]; + while value >= 1 << 10 && order + 1 < orders.len() { + value >>= 10; + order += 1; } - if Self::page_size(&layer) == page_size as usize { - Self::unset_entry(layer, vaddr, page_size) + if value >= 10 { + write!(f, "{} {}", value, orders[order]) } else { - self.unmap_page(Self::next_layer(layer, vaddr, false)?, vaddr, page_size) + write!( + f, + "{}.{} {}", + value, + (self.0 * 10 >> order * 10) % 10, + orders[order] + ) } } } diff --git a/src/memory/address_space.rs b/src/memory/address_space.rs new file mode 100644 index 0000000..8e8037d --- /dev/null +++ b/src/memory/address_space.rs @@ -0,0 +1,129 @@ +use memory_addr::{pa, va, va_range, MemoryAddr, PhysAddr, VirtAddr}; + +bitflags::bitflags! { + /// Generic page table entry flags that indicate the corresponding mapped + /// memory region permissions and attributes. + #[derive(Clone, Copy, PartialEq)] + pub struct MappingFlags: usize { + /// Memory is present. If not, generate a page fault + const PRESENT = 1 << 0; + /// The memory is readable. + const READ = 1 << 1; + /// The memory is writable. + const WRITE = 1 << 2; + /// The memory is executable. + const EXECUTE = 1 << 3; + /// The memory is user accessible. + const USER = 1 << 4; + /// The memory is uncached. + const UNCACHED = 1 << 5; + /// The memory globally accessible, doesn't invalidate TLB. + const GLOBAL = 1 << 6; + } +} + +/// Kinds of errors if mapping failed +#[derive(Clone, Debug, thiserror::Error)] +pub enum MappingError { + /// Mapping over an already existing page + #[error("mapping over existing page at address {0:#x}")] + MappingOver(PhysAddr), + /// Mapping an unaligned address + #[error("mapping an unaligned address {0:#x}")] + UnalignedPhysicalAddress(PhysAddr), + /// Mapping to an unaligned address + #[error("mapping to an unaligned address {0:#x}")] + UnalignedVirtualAddress(VirtAddr), + /// Unmapping a page that wasn't mapped + #[error("unmapping a page that wasn't mapped (address {0:#x})")] + UnmappingNotMapped(VirtAddr), + /// Unmapping part of a large page + #[error("unmapping part of a large page at {0:#x}")] + UnmappingPartOfLargePage(PhysAddr), +} + +/// Result type for memory mapping operations +pub type MappingResult = Result; + +/// Trait to be implemented by an address space +pub trait AddressSpaceTrait { + // * Mapping + /// Single page table + type Layer; + + /// Page size of one page + fn page_size(layer: &Self::Layer) -> usize; + + /// Set an entry in the page table layer to map vaddr to paddr with size and flags + fn set_entry( + layer: Self::Layer, + vaddr: VirtAddr, + paddr: PhysAddr, + page_size: crate::arch::paging::PageSize, + flags: MappingFlags, + ) -> MappingResult<()>; + + /// Unset an entry in the page table layer + fn unset_entry( + layer: Self::Layer, + vaddr: VirtAddr, + page_size: crate::arch::paging::PageSize, + ) -> MappingResult<()>; + + /// Get or create (only if map is true) a page table layer in this layer + /// that is associated with this virtual address. map parameter indicates + /// if this call corresponds to mapping/unmapping operation + fn next_layer(layer: Self::Layer, vaddr: VirtAddr, map: bool) -> MappingResult; + + /// Get top level page table layer for this address space + fn top_layer(&self) -> Self::Layer; + + /// Map a single (possibly large/huge) page. + /// As a layer should take [`AddressSpaceTrait::top_layer`] + fn map_page( + &self, + layer: Self::Layer, + vaddr: VirtAddr, + paddr: PhysAddr, + page_size: crate::arch::paging::PageSize, + flags: MappingFlags, + ) -> MappingResult<()> { + if !vaddr.is_aligned(page_size as usize) { + return Err(MappingError::UnalignedVirtualAddress(vaddr)); + } + if !paddr.is_aligned(page_size as usize) { + return Err(MappingError::UnalignedPhysicalAddress(paddr)); + } + + if Self::page_size(&layer) == page_size as usize { + Self::set_entry(layer, vaddr, paddr, page_size, flags) + } else { + self.map_page( + Self::next_layer(layer, vaddr, true)?, + vaddr, + paddr, + page_size, + flags, + ) + } + } + + /// Unmap a single (possibly large/huge) page or a whole page table of the same size. + /// As a layer should take [`AddressSpaceTrait::top_layer`] + fn unmap_page( + &self, + layer: Self::Layer, + vaddr: VirtAddr, + page_size: crate::arch::paging::PageSize, + ) -> MappingResult<()> { + if !vaddr.is_aligned(page_size as usize) { + return Err(MappingError::UnalignedVirtualAddress(vaddr)); + } + + if Self::page_size(&layer) == page_size as usize { + Self::unset_entry(layer, vaddr, page_size) + } else { + self.unmap_page(Self::next_layer(layer, vaddr, false)?, vaddr, page_size) + } + } +} From cbed4e934dccb0d16f8cf84f734fd9ca9680c7e6 Mon Sep 17 00:00:00 2001 From: InfiniteCoder Date: Mon, 13 Jan 2025 19:47:08 +0300 Subject: [PATCH 2/9] BASIC page allocator --- src/arch/x86/paging/address_space.rs | 2 +- src/arch/x86/paging/early_page_alloc.rs | 73 +++++++++++++++---------- src/arch/x86/paging/mod.rs | 6 ++ src/memory.rs | 47 ++++++++++++++-- src/memory/address_space.rs | 41 ++++++++++++++ src/sync.rs | 7 +++ 6 files changed, 142 insertions(+), 34 deletions(-) diff --git a/src/arch/x86/paging/address_space.rs b/src/arch/x86/paging/address_space.rs index bd9f0c7..6efe61b 100644 --- a/src/arch/x86/paging/address_space.rs +++ b/src/arch/x86/paging/address_space.rs @@ -58,7 +58,7 @@ impl AddressSpaceTrait for AddressSpace { } fn next_layer(layer: Self::Layer, vaddr: VirtAddr, map: bool) -> MappingResult { - let mut entry = Self::get_entry(&layer, vaddr); + let entry = Self::get_entry(&layer, vaddr); if entry.flags().contains(PTEFlags::P | PTEFlags::PS) { if map { diff --git a/src/arch/x86/paging/early_page_alloc.rs b/src/arch/x86/paging/early_page_alloc.rs index 006a003..8f711df 100644 --- a/src/arch/x86/paging/early_page_alloc.rs +++ b/src/arch/x86/paging/early_page_alloc.rs @@ -1,8 +1,7 @@ use super::*; -use crate::memory::AddressSpaceTrait; +use crate::memory::{AddressSpaceTrait, MappingFlags}; linker_symbol! { - kernel_early_alloc_start(KERNEL_EARLY_ALLOC_START) => "kernel_tmp_page_address"; kernel_reserved_end(KERNEL_RESERVED_END) => "kernel_reserved_end"; } @@ -13,18 +12,19 @@ struct EarlyPageAllocator { } impl EarlyPageAllocator { - fn is_page_free(&self, addr: PhysAddr, page_size: PageSize) -> bool { + /// Get next possibly free region of memory to try to fit page. Returns none if page fits + fn next_possibly_free(&self, addr: PhysAddr, page_size: PageSize) -> Option { let start = addr.as_usize(); let end = addr.as_usize() + page_size as usize; // * Check for overlap // With kernel if addr < kernel_virt2phys(kernel_reserved_end()) { - return false; + return Some(kernel_virt2phys(kernel_reserved_end())); } // With boot info structure if start < self.boot_info.end_address() && end > self.boot_info.start_address() { - return false; + return Some(self.boot_info.end_address().into()); } // * Check for memory region validity @@ -33,23 +33,24 @@ impl EarlyPageAllocator { use multiboot2::MemoryAreaType; match MemoryAreaType::from(region.typ()) { MemoryAreaType::Available => (), - _ => return false, + _ => return Some((region.end_address() as usize).into()), } } } - true + None } } -static mut EARLY_ALLOCATOR: Option = None; +static EARLY_ALLOCATOR: spin::Mutex> = spin::Mutex::new(None); pub fn early_alloc_page(page_size: PageSize) -> PhysAddr { - let Some(allocator) = (unsafe { EARLY_ALLOCATOR.as_mut() }) else { + let mut allocator = crate::sync::lock_nb(&EARLY_ALLOCATOR); + let Some(allocator) = allocator.as_mut() else { panic!("Early allocator not available") }; let mut addr = allocator.alloc_start.align_up(page_size as usize); - while !allocator.is_page_free(addr, page_size) { - addr += page_size as usize; + while let Some(next) = allocator.next_possibly_free(addr, page_size) { + addr = next.align_up(page_size as usize); } allocator.alloc_start = addr + page_size as usize; addr @@ -59,12 +60,14 @@ pub fn early_alloc_page(page_size: PageSize) -> PhysAddr { pub(super) fn setup_page_info_table(boot_info: &multiboot2::BootInformation) { let kernel_address_space = VirtAddr::from_usize(&raw const KERNEL_TOP_LEVEL_PAGE_TABLE as _); let kernel_address_space = AddressSpace(kernel_virt2phys(kernel_address_space)); - unsafe { - EARLY_ALLOCATOR = Some(EarlyPageAllocator { - alloc_start: kernel_virt2phys(kernel_reserved_end()), - boot_info: core::mem::transmute(boot_info), - }); - } + *crate::sync::lock_nb(&EARLY_ALLOCATOR) = Some(EarlyPageAllocator { + alloc_start: kernel_virt2phys(kernel_reserved_end()), + boot_info: unsafe { + core::mem::transmute::<&multiboot2::BootInformation, &multiboot2::BootInformation>( + boot_info, + ) + }, + }); // Get the address limit (last usable physical address) let mut address_limit = 0; @@ -90,24 +93,38 @@ pub(super) fn setup_page_info_table(boot_info: &multiboot2::BootInformation) { let page_info_table_size = page_info_table_entries * core::mem::size_of::(); - /// Allocate and map page info table - unsafe { - // EARLY_PAGE_ALLOC_ADDRESS = kernel_virt2phys(kernel_reserved_end()); - // PAGE_INFO_TABLE = core::slice::from_raw_parts_mut( - // kernel_reserved_end().as_mut_ptr_of(), - // page_info_table_entries, - // ); + // Allocate and map page info table + let page_info_table_address = kernel_reserved_end().align_up_4k(); + let page_info_table_address = kernel_address_space + .map_alloc( + page_info_table_address, + memory_addr::align_up_4k(page_info_table_size), + MappingFlags::PRESENT | MappingFlags::READ | MappingFlags::WRITE | MappingFlags::GLOBAL, + ) + .unwrap(); + + let page_info_table: &[crate::memory::PageInfo] = unsafe { + core::slice::from_raw_parts(page_info_table_address.as_ptr_of(), page_info_table_entries) + }; + + // Initialize page info table + for entry in page_info_table { + entry.reset(); } - let test_r = 0xc0400000 as *mut u32; - let test_w = 0xc03ff000 as *mut u32; + *crate::memory::PAGE_INFO_TABLE.try_write().unwrap() = page_info_table; + crate::sync::lock_nb(&EARLY_ALLOCATOR).take(); + + // TEST + let test_r = 0xc0801000 as *mut u32; + let test_w = 0xc0800000 as *mut u32; kernel_address_space .map_page( kernel_address_space.top_layer(), VirtAddr::from_mut_ptr_of(test_r), PhysAddr::from_usize(0x800000), PageSize::Size4K, - crate::memory::MappingFlags::PRESENT | crate::memory::MappingFlags::READ, + MappingFlags::PRESENT | MappingFlags::READ, ) .unwrap(); kernel_address_space @@ -116,7 +133,7 @@ pub(super) fn setup_page_info_table(boot_info: &multiboot2::BootInformation) { VirtAddr::from_mut_ptr_of(test_w), PhysAddr::from_usize(0x800000), PageSize::Size4K, - crate::memory::MappingFlags::PRESENT | crate::memory::MappingFlags::WRITE, + MappingFlags::PRESENT | MappingFlags::WRITE, ) .unwrap(); crate::println!("Mapped!"); diff --git a/src/arch/x86/paging/mod.rs b/src/arch/x86/paging/mod.rs index 3d29498..283acd8 100644 --- a/src/arch/x86/paging/mod.rs +++ b/src/arch/x86/paging/mod.rs @@ -30,6 +30,12 @@ pub enum PageSize { Size1G = 0x40000000, } +impl PageSize { + pub fn min() -> Self { + Self::Size4K + } +} + extern "C" { #[link_name = "kernel_top_level_page_table"] static mut KERNEL_TOP_LEVEL_PAGE_TABLE: PageTable; diff --git a/src/memory.rs b/src/memory.rs index 2502295..88d48c5 100644 --- a/src/memory.rs +++ b/src/memory.rs @@ -1,3 +1,4 @@ +use core::sync::atomic::Ordering; pub use memory_addr::{pa, va, va_range, MemoryAddr, PhysAddr, VirtAddr}; pub mod address_space; @@ -5,24 +6,60 @@ pub use address_space::{AddressSpaceTrait, MappingError, MappingFlags, MappingRe /// Kernel page info entry pub struct PageInfo { - uses: core::sync::atomic::AtomicU32, + pub uses: core::sync::atomic::AtomicU32, } -static mut PAGE_INFO_TABLE: &[PageInfo] = &[]; +impl PageInfo { + /// Reset page info to an unused page + pub fn reset(&self) { + self.uses.store(0, Ordering::SeqCst); + } + + // /// Add one to uses count + // pub fn r#use(&self) { + // self.uses.fetch_add(1, Ordering::Relaxed); + // } + + // /// Unuse this page, pass in it's address. Will free if rc goes to zero + // pub fn r#unuse(&self, addr: PhysAddr) { + // self.uses.fetch_sub(1, Ordering::Relaxed); + // } + + pub fn acquire(&self) -> bool { + self.uses + .compare_exchange(0, 1, Ordering::AcqRel, Ordering::Relaxed) + .is_ok() + } +} + +pub static PAGE_INFO_TABLE: spin::RwLock<&[PageInfo]> = spin::RwLock::new(&[]); + +/// Access the page info table +pub fn page_info_table() -> spin::RwLockReadGuard<'static, &'static [PageInfo]> { + match PAGE_INFO_TABLE.try_read() { + Some(guard) => guard, + None => panic!("Tried to lock a locked mutex!"), + } +} /// Allocate page of size page_size aligned to page_size pub fn alloc_page(page_size: crate::arch::paging::PageSize) -> PhysAddr { - let page_info_table = unsafe { PAGE_INFO_TABLE }; + let page_info_table = page_info_table(); if page_info_table.is_empty() { crate::arch::paging::early_alloc_page(page_size) } else { + for (index, page_info) in page_info_table.iter().enumerate() { + if page_info.acquire() { + return PhysAddr::from_usize(index * crate::arch::paging::PageSize::min() as usize); + } + } todo!() } } /// Free page allocated with [alloc_page] pub fn free_page(addr: PhysAddr, page_size: crate::arch::paging::PageSize) { - let page_info_table = unsafe { PAGE_INFO_TABLE }; + let page_info_table = page_info_table(); if page_info_table.is_empty() { panic!("Can't free page without page info table"); } @@ -63,7 +100,7 @@ impl core::fmt::Display for FormatSize { f, "{}.{} {}", value, - (self.0 * 10 >> order * 10) % 10, + ((self.0 * 10) >> (order * 10)) % 10, orders[order] ) } diff --git a/src/memory/address_space.rs b/src/memory/address_space.rs index 8e8037d..3070a25 100644 --- a/src/memory/address_space.rs +++ b/src/memory/address_space.rs @@ -126,4 +126,45 @@ pub trait AddressSpaceTrait { self.unmap_page(Self::next_layer(layer, vaddr, false)?, vaddr, page_size) } } + + /// Allocate and map a region of memory into + /// the address space. On success returns + /// actual address region has been mapped to. + /// vaddr must be a valid hint + fn map_alloc( + &self, + vaddr: VirtAddr, + size: usize, + flags: MappingFlags, + ) -> MappingResult { + // TODO: Bigger pages + let min_page = crate::arch::paging::PageSize::min(); + debug_assert!(vaddr.is_aligned(min_page as usize)); + debug_assert!(size % min_page as usize == 0); + for page in 0..size / min_page as usize { + self.map_page( + self.top_layer(), + vaddr + page * min_page as usize, + crate::memory::alloc_page(min_page), + min_page, + flags, + )?; + } + Ok(vaddr) + } + + /// Allocate and map a region of memory into + /// the address space. On success returns + /// actual address region has been mapped to. + /// vaddr must be a valid hint. Value is uninit + fn map_alloc_ty( + &self, + vaddr: VirtAddr, + size: usize, + flags: MappingFlags, + ) -> MappingResult<&mut core::mem::MaybeUninit> { + Ok(unsafe { &mut *self.map_alloc(vaddr, size, flags)?.as_mut_ptr_of() }) + } + + // TODO: Unmap } diff --git a/src/sync.rs b/src/sync.rs index e17dbec..602d65e 100644 --- a/src/sync.rs +++ b/src/sync.rs @@ -4,3 +4,10 @@ pub type MappedMutexGuard<'a, T, U> = lock_api::MappedMutexGuard<'a, spin::Mutex pub type Lock = Mutex<()>; pub type LockGuard = MutexGuard<'static, ()>; pub type MappedLockGuard = MappedMutexGuard<'static, (), T>; + +pub fn lock_nb(mutex: &spin::Mutex) -> spin::MutexGuard { + match mutex.try_lock() { + Some(guard) => guard, + None => panic!("Tried to lock a locked mutex!"), + } +} From fc6cdb6b7e2c30c7b9e94bf9231d0cd160d1fd26 Mon Sep 17 00:00:00 2001 From: InfiniteCoder Date: Mon, 13 Jan 2025 20:23:53 +0300 Subject: [PATCH 3/9] Mark used pages as used --- src/arch/x86/paging/early_page_alloc.rs | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/src/arch/x86/paging/early_page_alloc.rs b/src/arch/x86/paging/early_page_alloc.rs index 8f711df..1a0efad 100644 --- a/src/arch/x86/paging/early_page_alloc.rs +++ b/src/arch/x86/paging/early_page_alloc.rs @@ -108,12 +108,25 @@ pub(super) fn setup_page_info_table(boot_info: &multiboot2::BootInformation) { }; // Initialize page info table - for entry in page_info_table { + let mut early_alloc = crate::sync::lock_nb(&EARLY_ALLOCATOR); + for (index, entry) in page_info_table.iter().enumerate() { entry.reset(); + let addr = PhysAddr::from_usize(index * PageSize::min() as usize); + if addr < early_alloc.as_ref().unwrap().alloc_start { + entry.acquire(); + } else if early_alloc + .as_ref() + .unwrap() + .next_possibly_free(addr, PageSize::min()) + .is_some() + { + entry.acquire(); + } } *crate::memory::PAGE_INFO_TABLE.try_write().unwrap() = page_info_table; - crate::sync::lock_nb(&EARLY_ALLOCATOR).take(); + early_alloc.take(); + drop(early_alloc); // TEST let test_r = 0xc0801000 as *mut u32; From b8aa7d874cdde7fefbf6b91d7e1c779b3fbddf34 Mon Sep 17 00:00:00 2001 From: InfiniteCoder Date: Tue, 14 Jan 2025 21:13:52 +0300 Subject: [PATCH 4/9] Drop --- src/arch/x86/paging/address_space.rs | 21 ++++++++++ src/arch/x86/paging/early_page_alloc.rs | 53 +++++++++++-------------- src/arch/x86/paging/mod.rs | 13 ++++++ src/memory.rs | 11 ++++- src/memory/address_space.rs | 21 ++++------ src/sync.rs | 5 +++ 6 files changed, 79 insertions(+), 45 deletions(-) diff --git a/src/arch/x86/paging/address_space.rs b/src/arch/x86/paging/address_space.rs index 6efe61b..d2c761a 100644 --- a/src/arch/x86/paging/address_space.rs +++ b/src/arch/x86/paging/address_space.rs @@ -102,4 +102,25 @@ impl AddressSpaceTrait for AddressSpace { #[cfg(target_arch = "x86_64")] return (self.0, 39); } + + /// Decrement reference count of all pages related to this one + fn free_page(&self, layer: &Self::Layer, vaddr: VirtAddr) -> MappingResult<()> { + let mut entry = Self::get_entry(&layer, vaddr); + if !entry.flags().contains(PTEFlags::P) { + return Ok(()); + } + + if !entry.flags().contains(PTEFlags::PS) && page_info(entry.address()).uses() { + for page in 0..Self::page_size(layer) / PageSize::min() as usize { + // + } + } + free_page( + entry.address(), + PageSize::from_usize(Self::page_size(layer)).unwrap(), + ); + *entry = PTEntry::NULL; + flush_tlb(vaddr); + Ok(()) + } } diff --git a/src/arch/x86/paging/early_page_alloc.rs b/src/arch/x86/paging/early_page_alloc.rs index 1a0efad..823f58a 100644 --- a/src/arch/x86/paging/early_page_alloc.rs +++ b/src/arch/x86/paging/early_page_alloc.rs @@ -108,6 +108,7 @@ pub(super) fn setup_page_info_table(boot_info: &multiboot2::BootInformation) { }; // Initialize page info table + // TODO: Speed up let mut early_alloc = crate::sync::lock_nb(&EARLY_ALLOCATOR); for (index, entry) in page_info_table.iter().enumerate() { entry.reset(); @@ -128,40 +129,32 @@ pub(super) fn setup_page_info_table(boot_info: &multiboot2::BootInformation) { early_alloc.take(); drop(early_alloc); + // TODO: Free boot info and bootstrap code + // TEST - let test_r = 0xc0801000 as *mut u32; - let test_w = 0xc0800000 as *mut u32; - kernel_address_space - .map_page( - kernel_address_space.top_layer(), - VirtAddr::from_mut_ptr_of(test_r), - PhysAddr::from_usize(0x800000), - PageSize::Size4K, - MappingFlags::PRESENT | MappingFlags::READ, - ) - .unwrap(); - kernel_address_space - .map_page( - kernel_address_space.top_layer(), - VirtAddr::from_mut_ptr_of(test_w), - PhysAddr::from_usize(0x800000), - PageSize::Size4K, - MappingFlags::PRESENT | MappingFlags::WRITE, + let test = 0xc0801000 as *mut u32; + let test = kernel_address_space + .map_alloc( + VirtAddr::from_mut_ptr_of(test), + PageSize::min() as _, + MappingFlags::PRESENT | MappingFlags::READ | MappingFlags::WRITE, ) - .unwrap(); + .unwrap() + .as_mut_ptr_of(); crate::println!("Mapped!"); unsafe { - *test_w = 42; + *test = 42; }; crate::println!("Wrote!"); - crate::println!("Testing page mapping: {}", unsafe { *test_r }); - kernel_address_space - .unmap_page( - kernel_address_space.top_layer(), - VirtAddr::from_mut_ptr_of(test_r), - PageSize::Size4K, - ) - .unwrap(); - crate::println!("Testing page unmapping (you should see a page fault)..."); - crate::println!("If you see this everything broke: {}", unsafe { *test_r }); + crate::println!("Testing page mapping: {}", unsafe { *test }); + kernel_address_space.unmap_free(VirtAddr::from_mut_ptr_of(test), PageSize::min() as _); + // kernel_address_space + // .unmap_page( + // kernel_address_space.top_layer(), + // VirtAddr::from_mut_ptr_of(test_r), + // PageSize::Size4K, + // ) + // .unwrap(); + // crate::println!("Testing page unmapping (you should see a page fault)..."); + // crate::println!("If you see this everything broke: {}", unsafe { *test_r }); } diff --git a/src/arch/x86/paging/mod.rs b/src/arch/x86/paging/mod.rs index 283acd8..192ae6d 100644 --- a/src/arch/x86/paging/mod.rs +++ b/src/arch/x86/paging/mod.rs @@ -31,6 +31,19 @@ pub enum PageSize { } impl PageSize { + pub fn from_usize(size: usize) -> Option { + match size { + 0x1000 => Some(Self::Size4K), + #[cfg(target_arch = "x86")] + 0x400000 => Some(Self::Size4M), + #[cfg(target_arch = "x86_64")] + 0x200000 => Some(Self::Size2M), + #[cfg(target_arch = "x86_64")] + 0x40000000 => Some(Self::Size1G), + _ => None, + } + } + pub fn min() -> Self { Self::Size4K } diff --git a/src/memory.rs b/src/memory.rs index 88d48c5..14a1f77 100644 --- a/src/memory.rs +++ b/src/memory.rs @@ -32,16 +32,23 @@ impl PageInfo { } } -pub static PAGE_INFO_TABLE: spin::RwLock<&[PageInfo]> = spin::RwLock::new(&[]); +pub static PAGE_INFO_TABLE: crate::sync::RwLock<&[PageInfo]> = crate::sync::RwLock::new(&[]); /// Access the page info table -pub fn page_info_table() -> spin::RwLockReadGuard<'static, &'static [PageInfo]> { +pub fn page_info_table() -> crate::sync::RwLockReadGuard<'static, &'static [PageInfo]> { match PAGE_INFO_TABLE.try_read() { Some(guard) => guard, None => panic!("Tried to lock a locked mutex!"), } } +/// Access page info from the table for a specific page +pub fn page_info(page: PhysAddr) -> crate::sync::MappedRwLockReadGuard<'static, &'static PageInfo> { + crate::sync::RwLockReadGuard::map(page_info_table(), |page_info_table| { + &page_info_table[page.as_usize() / crate::arch::paging::PageSize::min() as usize] + }) +} + /// Allocate page of size page_size aligned to page_size pub fn alloc_page(page_size: crate::arch::paging::PageSize) -> PhysAddr { let page_info_table = page_info_table(); diff --git a/src/memory/address_space.rs b/src/memory/address_space.rs index 3070a25..699aca9 100644 --- a/src/memory/address_space.rs +++ b/src/memory/address_space.rs @@ -80,6 +80,7 @@ pub trait AddressSpaceTrait { /// Map a single (possibly large/huge) page. /// As a layer should take [`AddressSpaceTrait::top_layer`] + /// DOES NOT increment reference count of the target page fn map_page( &self, layer: Self::Layer, @@ -108,8 +109,12 @@ pub trait AddressSpaceTrait { } } + /// Decrement reference count of all pages related to this one + fn free_page(&self, layer: &Self::Layer, vaddr: VirtAddr) -> MappingResult<()>; + /// Unmap a single (possibly large/huge) page or a whole page table of the same size. /// As a layer should take [`AddressSpaceTrait::top_layer`] + /// DOES decrement reference count of the target page fn unmap_page( &self, layer: Self::Layer, @@ -121,6 +126,7 @@ pub trait AddressSpaceTrait { } if Self::page_size(&layer) == page_size as usize { + // Self::unset_entry(layer, vaddr, page_size) } else { self.unmap_page(Self::next_layer(layer, vaddr, false)?, vaddr, page_size) @@ -153,18 +159,7 @@ pub trait AddressSpaceTrait { Ok(vaddr) } - /// Allocate and map a region of memory into - /// the address space. On success returns - /// actual address region has been mapped to. - /// vaddr must be a valid hint. Value is uninit - fn map_alloc_ty( - &self, - vaddr: VirtAddr, - size: usize, - flags: MappingFlags, - ) -> MappingResult<&mut core::mem::MaybeUninit> { - Ok(unsafe { &mut *self.map_alloc(vaddr, size, flags)?.as_mut_ptr_of() }) + fn unmap_free(&self, vaddr: VirtAddr, size: usize) -> MappingResult<()> { + todo!() } - - // TODO: Unmap } diff --git a/src/sync.rs b/src/sync.rs index 602d65e..2b60b91 100644 --- a/src/sync.rs +++ b/src/sync.rs @@ -1,6 +1,11 @@ pub use spin::lock_api::{Mutex, MutexGuard}; pub type MappedMutexGuard<'a, T, U> = lock_api::MappedMutexGuard<'a, spin::Mutex, U>; +pub use spin::lock_api::{RwLock, RwLockReadGuard, RwLockWriteGuard}; +pub type MappedRwLockReadGuard<'a, T, U> = lock_api::MappedRwLockReadGuard<'a, spin::RwLock, U>; +pub type MappedRwLockWriteGuard<'a, T, U> = + lock_api::MappedRwLockWriteGuard<'a, spin::RwLock, U>; + pub type Lock = Mutex<()>; pub type LockGuard = MutexGuard<'static, ()>; pub type MappedLockGuard = MappedMutexGuard<'static, (), T>; From 23982d8f752806deeebf5fc387cd4fa608e9025e Mon Sep 17 00:00:00 2001 From: InfiniteCoder Date: Sun, 26 Jan 2025 18:32:27 +0300 Subject: [PATCH 5/9] Rewrite paging a bit more --- Cargo.lock | 17 ++ Cargo.toml | 4 +- src/arch/mod.rs | 12 +- src/arch/x86/instructions.rs | 4 + src/arch/x86/mod.rs | 16 + src/arch/x86/paging/address_space.rs | 280 ++++++++++++------ src/arch/x86/paging/mod.rs | 96 +++--- src/arch/x86/paging/page_size.rs | 40 +++ src/arch/x86/paging/page_table_entry.rs | 26 ++ src/arch/x86/paging/tmp_page.rs | 7 +- src/main.rs | 5 +- src/memory.rs | 74 +---- src/memory/address_space.rs | 123 +------- src/memory/address_space/nested_page_table.rs | 136 +++++++++ src/memory/page_allocator.rs | 10 + src/memory/page_allocator/zoned_buddy.rs | 99 +++++++ 16 files changed, 634 insertions(+), 315 deletions(-) create mode 100644 src/arch/x86/instructions.rs create mode 100644 src/arch/x86/paging/page_size.rs create mode 100644 src/memory/address_space/nested_page_table.rs create mode 100644 src/memory/page_allocator.rs create mode 100644 src/memory/page_allocator/zoned_buddy.rs diff --git a/Cargo.lock b/Cargo.lock index 1ecb718..227e43e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -154,6 +154,12 @@ dependencies = [ "scopeguard", ] +[[package]] +name = "lock_free_buddy_allocator" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f2b8256be05fb9612e0276693020bed985954c9e5c621f4ec6edec822af6b13b" + [[package]] name = "log" version = "0.4.22" @@ -310,10 +316,12 @@ dependencies = [ "bitflags", "cc", "lock_api", + "lock_free_buddy_allocator", "memory_addr", "multiboot2", "portable", "spin", + "talc", "thiserror", ] @@ -360,6 +368,15 @@ dependencies = [ "unicode-ident", ] +[[package]] +name = "talc" +version = "4.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3fcad3be1cfe36eb7d716a04791eba36a197da9d9b6ea1e28e64ac569da3701d" +dependencies = [ + "lock_api", +] + [[package]] name = "thiserror" version = "2.0.9" diff --git a/Cargo.toml b/Cargo.toml index c5fba66..d53a750 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -17,13 +17,15 @@ panic ="abort" [dependencies] thiserror = { version = "2.0.9", default-features = false } +lock_api = "0.4.12" spin = "0.9.8" bitfield-struct = "0.10.0" bitflags = "2.6.0" memory_addr = "0.3.1" -lock_api = "0.4.12" +lock_free_buddy_allocator = "0.1.0" +talc = "4.4.2" [target.'cfg(any(target_arch = "x86_64", target_arch = "x86"))'.dependencies] multiboot2 = { version = "0.23.1", default-features = false } diff --git a/src/arch/mod.rs b/src/arch/mod.rs index ac9a999..1f1ca74 100644 --- a/src/arch/mod.rs +++ b/src/arch/mod.rs @@ -4,12 +4,16 @@ pub mod x86; #[cfg(any(target_arch = "x86_64", target_arch = "x86"))] pub use x86 as current; +// * Stub imports to make it easier to see required functions and types + pub use current::{_panic, _print}; -pub mod interrupts { - pub use super::current::interrupts::{disable, enable}; +/// Instructions like cpuid +pub mod instructions { + pub use super::current::instructions::cpu_id; } -pub mod paging { - pub use super::current::paging::{early_alloc_page, AddressSpace, PageSize}; +/// Interrupt handling +pub mod interrupts { + pub use super::current::interrupts::{disable, enable}; } diff --git a/src/arch/x86/instructions.rs b/src/arch/x86/instructions.rs new file mode 100644 index 0000000..93874bd --- /dev/null +++ b/src/arch/x86/instructions.rs @@ -0,0 +1,4 @@ +/// Get a CPU identifier +pub fn cpu_id() -> usize { + 0 +} diff --git a/src/arch/x86/mod.rs b/src/arch/x86/mod.rs index 9d7207d..c5239cc 100644 --- a/src/arch/x86/mod.rs +++ b/src/arch/x86/mod.rs @@ -1,6 +1,9 @@ #[cfg(target_arch = "x86")] core::arch::global_asm!(include_str!("x32/bootstrap.S"), options(att_syntax)); +/// Instructions like cpuid +pub mod instructions; + /// Early logging facilities pub mod early_logger; pub use early_logger::{_panic, _print}; @@ -13,6 +16,19 @@ pub mod interrupts; /// And I hate every single second of it. pub mod paging; +mod allocator { + const SIZE: usize = 0x1000; + static mut ARENA: [u8; SIZE] = [0; SIZE]; + + #[global_allocator] + static ALLOCATOR: talc::Talck, talc::ClaimOnOom> = talc::Talc::new(unsafe { + // if we're in a hosted environment, the Rust runtime may allocate before + // main() is called, so we need to initialize the arena automatically + talc::ClaimOnOom::new(talc::Span::from_slice(core::ptr::addr_of_mut!(ARENA))) + }) + .lock(); +} + /// Kernel setup function. First thing that is called /// after assembly bootstrap setus up GDT and higher-half address space #[no_mangle] diff --git a/src/arch/x86/paging/address_space.rs b/src/arch/x86/paging/address_space.rs index d2c761a..26739c2 100644 --- a/src/arch/x86/paging/address_space.rs +++ b/src/arch/x86/paging/address_space.rs @@ -1,126 +1,216 @@ -use super::*; -use crate::memory::*; +use memory_addr::{MemoryAddr, PhysAddr, VirtAddr}; + +use super::tmp_page; +use super::PageSize; +/// Physical page table entry types +mod entry { + pub(super) use super::super::{PTEFlags, PTEntry}; +} + +use crate::memory::address_space::nested_page_table::{NestedPageTable, NestedPageTableLevel}; +use crate::memory::address_space::AddressSpaceTrait; +use crate::memory::{MappingError, MappingResult, PageAllocatorTrait}; + +/// Interface page table entry types +mod if_entry { + pub(super) use crate::memory::address_space::nested_page_table::PageTableEntry; + pub(super) use crate::memory::MappingFlags; +} /// Address space struct -pub struct AddressSpace(pub PhysAddr); +#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord)] +pub struct AddressSpace(PageTableLevel); + +#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord)] +pub struct PageTableLevel(PhysAddr, usize); impl AddressSpace { - /// Map the page table layer and get the page table entry associated with this address - fn get_entry( - layer: &::Layer, - vaddr: VirtAddr, - ) -> crate::sync::MappedLockGuard { - let page_table = tmp_page::map::(layer.0); + pub(super) fn from_paddr(addr: PhysAddr) -> Self { + #[cfg(target_arch = "x86")] + let top_level_bits = 22; + #[cfg(target_arch = "x86_64")] + let top_level_bits = 39; + Self(PageTableLevel(addr, top_level_bits)) + } +} + +impl PageTableLevel { + /// Map the page table level to tmp page + /// and get the page table entry associated with this address + fn lock_entry(&self, vaddr: VirtAddr) -> crate::sync::MappedLockGuard { + let page_table = tmp_page::map::(self.0); - let mask = PAGE_TABLE_ENTRIES - 1; - let index = vaddr.as_usize() >> layer.1 & mask; + let mask = super::PAGE_TABLE_ENTRIES - 1; + let index = vaddr.as_usize() >> self.1 & mask; crate::sync::MappedLockGuard::map(page_table, |page_table| &mut page_table[index]) } } -impl AddressSpaceTrait for AddressSpace { - type Layer = (PhysAddr, usize); +impl NestedPageTable for AddressSpace { + type PageSize = PageSize; + type Level = PageTableLevel; - fn page_size(layer: &Self::Layer) -> usize { - 1 << layer.1 + fn top_level(&self) -> Self::Level { + self.0.clone() } - fn set_entry( - layer: Self::Layer, - vaddr: VirtAddr, - paddr: PhysAddr, - page_size: crate::arch::paging::PageSize, - flags: MappingFlags, - ) -> MappingResult<()> { - debug_assert_eq!(1usize << layer.1, page_size as usize); - let mut entry = Self::get_entry(&layer, vaddr); - if entry.flags().contains(PTEFlags::P) { - return Err(MappingError::MappingOver(entry.address())); + // fn unset_entry(layer: Self::Layer, vaddr: VirtAddr, page_size: PageSize) -> MappingResult<()> { + // debug_assert_eq!(1usize << layer.1, page_size as usize); + // let mut entry = Self::get_entry(&layer, vaddr); + // if !entry.flags().contains(PTEFlags::P) { + // return Err(MappingError::UnmappingNotMapped(vaddr)); + // } + // *entry = PTEntry::NULL; + // flush_tlb(vaddr); + // Ok(()) + // } + + // fn next_layer(layer: Self::Layer, vaddr: VirtAddr, map: bool) -> MappingResult { + // let entry = Self::get_entry(&layer, vaddr); + + // if entry.flags().contains(PTEFlags::P | PTEFlags::PS) { + // if map { + // return Err(MappingError::MappingOver(entry.address())); + // } else { + // return Err(MappingError::UnmappingPartOfLargePage(entry.address())); + // } + // } + + // let entry = if !entry.flags().contains(PTEFlags::P) { + // drop(entry); + // if !map { + // return Err(MappingError::UnmappingNotMapped(vaddr)); + // } + + // // Create a new page table + // let page_table_addr = crate::memory::PAGE_ALLOCATOR + // .alloc(PageSize::min()) + // .unwrap(); + // let mut page_table = tmp_page::map::(page_table_addr); + + // // Clear the page table + // for index in 0..PAGE_TABLE_ENTRIES { + // page_table[index] = PTEntry::NULL; + // } + + // drop(page_table); + + // // Set the entry to this page table + // let mut entry = Self::get_entry(&layer, vaddr); + // *entry = PTEntry::new_page_table(page_table_addr); + // entry + // } else { + // entry + // }; + + // Ok((entry.address(), layer.1 - PAGE_LEVEL_BITS)) + // } + + // fn top_layer(&self) -> Self::Layer { + // #[cfg(target_arch = "x86")] + // return (self.0, 22); + // #[cfg(target_arch = "x86_64")] + // return (self.0, 39); + // } + + // /// Decrement reference count of all pages related to this one + // fn free_page(&self, layer: &Self::Layer, vaddr: VirtAddr) -> MappingResult<()> { + // let mut entry = Self::get_entry(&layer, vaddr); + // if !entry.flags().contains(PTEFlags::P) { + // return Ok(()); + // } + + // // if !entry.flags().contains(PTEFlags::PS) && page_info(entry.address()).uses() { + // // for page in 0..Self::page_size(layer) / PageSize::min() as usize { + // // // + // // } + // // } + // // free_page( + // // entry.address(), + // // PageSize::from_usize(Self::page_size(layer)).unwrap(), + // // ); + // *entry = PTEntry::NULL; + // flush_tlb(vaddr); + // Ok(()) + // } +} + +impl NestedPageTableLevel for PageTableLevel { + type PageSize = PageSize; + + fn page_size(&self) -> Option { + PageSize::try_from(1 << self.1).ok() + } + + fn new_sublevel(&self, alloc: &impl PageAllocatorTrait) -> Option { + let addr = alloc.alloc(PageSize::Size4K)?; + let mut page_table = tmp_page::map::(addr); + + // Clear the page table + for index in 0..super::PAGE_TABLE_ENTRIES { + page_table[index] = entry::PTEntry::NULL; } - *entry = PTEntry::new_page(paddr, page_size, flags.into()); - flush_tlb(vaddr); - Ok(()) + + Some(PageTableLevel(addr, self.1 - super::PAGE_LEVEL_BITS)) } - fn unset_entry( - layer: Self::Layer, + fn set_entry( + &self, vaddr: VirtAddr, - page_size: crate::arch::paging::PageSize, + new_entry: crate::memory::address_space::nested_page_table::PageTableEntry, ) -> MappingResult<()> { - debug_assert_eq!(1usize << layer.1, page_size as usize); - let mut entry = Self::get_entry(&layer, vaddr); - if !entry.flags().contains(PTEFlags::P) { - return Err(MappingError::UnmappingNotMapped(vaddr)); + if matches!(new_entry, if_entry::PageTableEntry::Page(_, _)) { + debug_assert!(vaddr.is_aligned(1usize << self.1)); } - *entry = PTEntry::NULL; - flush_tlb(vaddr); - Ok(()) - } - fn next_layer(layer: Self::Layer, vaddr: VirtAddr, map: bool) -> MappingResult { - let entry = Self::get_entry(&layer, vaddr); - - if entry.flags().contains(PTEFlags::P | PTEFlags::PS) { - if map { - return Err(MappingError::MappingOver(entry.address())); - } else { - return Err(MappingError::UnmappingPartOfLargePage(entry.address())); - } + let mut entry = self.lock_entry(vaddr); + if self.1 > 12 && entry.flags().contains(entry::PTEFlags::PS) { + return Err(MappingError::MappingOver(entry.address())); } - let entry = if !entry.flags().contains(PTEFlags::P) { - drop(entry); - if !map { - return Err(MappingError::UnmappingNotMapped(vaddr)); + *entry = match new_entry { + if_entry::PageTableEntry::Level(level) => entry::PTEntry::new_page_table(level.0), + if_entry::PageTableEntry::Page(paddr, flags) => { + entry::PTEntry::new_page(paddr, self.page_size().unwrap(), flags.into()) } - - // Create a new page table - let page_table_addr = crate::memory::alloc_page(PageSize::Size4K as _); - let mut page_table = tmp_page::map::(page_table_addr); - - // Clear the page table - for index in 0..PAGE_TABLE_ENTRIES { - page_table[index] = PTEntry::NULL; - } - - drop(page_table); - - // Set the entry to this page table - let mut entry = Self::get_entry(&layer, vaddr); - *entry = PTEntry::new_page_table(page_table_addr); - entry - } else { - entry }; - Ok((entry.address(), layer.1 - PAGE_LEVEL_BITS)) + // TODO: Check if this page table is currently active + super::flush_tlb(vaddr); + Ok(()) } - fn top_layer(&self) -> Self::Layer { - #[cfg(target_arch = "x86")] - return (self.0, 22); - #[cfg(target_arch = "x86_64")] - return (self.0, 39); + fn get_entry(&self, vaddr: VirtAddr) -> MappingResult> { + let entry = self.lock_entry(vaddr); + if entry.flags().contains(entry::PTEFlags::P) + && self.1 > 12 + && !entry.flags().contains(entry::PTEFlags::PS) + { + Ok(if_entry::PageTableEntry::Level(PageTableLevel( + entry.address(), + self.1 - super::PAGE_LEVEL_BITS, + ))) + } else { + Ok(if_entry::PageTableEntry::Page( + entry.address(), + entry.flags().into(), + )) + } } +} - /// Decrement reference count of all pages related to this one - fn free_page(&self, layer: &Self::Layer, vaddr: VirtAddr) -> MappingResult<()> { - let mut entry = Self::get_entry(&layer, vaddr); - if !entry.flags().contains(PTEFlags::P) { - return Ok(()); - } +impl AddressSpaceTrait for AddressSpace { + fn map_alloc( + &self, + vaddr: VirtAddr, + size: usize, + flags: if_entry::MappingFlags, + alloc: &impl crate::memory::PageAllocatorTrait, + ) -> MappingResult { + ::map_alloc(self, vaddr, size, flags, alloc) + } - if !entry.flags().contains(PTEFlags::PS) && page_info(entry.address()).uses() { - for page in 0..Self::page_size(layer) / PageSize::min() as usize { - // - } - } - free_page( - entry.address(), - PageSize::from_usize(Self::page_size(layer)).unwrap(), - ); - *entry = PTEntry::NULL; - flush_tlb(vaddr); - Ok(()) + fn unmap_free(&self, vaddr: VirtAddr, size: usize) -> MappingResult<()> { + ::unmap_free(self, vaddr, size) } } diff --git a/src/arch/x86/paging/mod.rs b/src/arch/x86/paging/mod.rs index 192ae6d..a10c7d3 100644 --- a/src/arch/x86/paging/mod.rs +++ b/src/arch/x86/paging/mod.rs @@ -1,9 +1,12 @@ -pub use memory_addr::{pa, va, va_range, MemoryAddr, PhysAddr, VirtAddr}; +use memory_addr::{MemoryAddr, PhysAddr, VirtAddr}; /// Temproary page, space for it is allocated after the kernel in the kernel address space. /// Used to map page tables and manipulate their entries mod tmp_page; +mod page_size; +pub use page_size::PageSize; + /// Page table entry and it's flags mod page_table_entry; use page_table_entry::{PTEFlags, PTEntry}; @@ -12,42 +15,8 @@ use page_table_entry::{PTEFlags, PTEntry}; mod address_space; pub use address_space::AddressSpace; -/// Page allocator manages free pages -mod early_page_alloc; -pub use early_page_alloc::early_alloc_page; - -/// Page sizes possible to map -#[derive(Clone, Copy, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash)] -#[repr(usize)] -pub enum PageSize { - #[default] - Size4K = 0x1000, - #[cfg(target_arch = "x86")] - Size4M = 0x400000, - #[cfg(target_arch = "x86_64")] - Size2M = 0x200000, - #[cfg(target_arch = "x86_64")] - Size1G = 0x40000000, -} - -impl PageSize { - pub fn from_usize(size: usize) -> Option { - match size { - 0x1000 => Some(Self::Size4K), - #[cfg(target_arch = "x86")] - 0x400000 => Some(Self::Size4M), - #[cfg(target_arch = "x86_64")] - 0x200000 => Some(Self::Size2M), - #[cfg(target_arch = "x86_64")] - 0x40000000 => Some(Self::Size1G), - _ => None, - } - } - - pub fn min() -> Self { - Self::Size4K - } -} +/// Use standard zone-based page allocator +pub type PageAllocator = crate::memory::page_allocator::ZonedBuddy<0x1000>; extern "C" { #[link_name = "kernel_top_level_page_table"] @@ -56,6 +25,7 @@ extern "C" { linker_symbol! { kernel_offset(KERNEL_OFFSET_SYMBOL) => "KERNEL_OFFSET"; + kernel_reserved_end(KERNEL_RESERVED_END) => "kernel_reserved_end"; } /// Convert a physical address in the kernel address space to virtual by adding the offset @@ -105,7 +75,57 @@ pub(super) fn setup_paging(boot_info: &multiboot2::BootInformation) { ); } - early_page_alloc::setup_page_info_table(boot_info); + let page_allocator = PageAllocator::new(); + let kernel_address_space = VirtAddr::from_usize(&raw const KERNEL_TOP_LEVEL_PAGE_TABLE as _); + let kernel_address_space = AddressSpace::from_paddr(kernel_virt2phys(kernel_address_space)); + + // TODO: Properly add zones and avoid adding kernel + page_allocator + .add_zone(kernel_virt2phys(kernel_reserved_end()).as_usize(), 0x16000) + .unwrap(); + + // Add zones to the page allocator + let memory_map_tag = boot_info + .memory_map_tag() + .expect("Memory map not available"); + for region in memory_map_tag.memory_areas() { + use multiboot2::MemoryAreaType; + let typ = MemoryAreaType::from(region.typ()); + if typ == MemoryAreaType::Available { + // if page_allocator + // .add_zone( + // region.start_address() as _, + // memory_addr::align_down_4k(region.size() as _), + // ) + // .is_err() + // { + // crate::println!("Failed to add some memory zones"); + // } + } + } + + // TODO: Free boot info and bootstrap code + + // TEST + use crate::memory::MappingFlags; + use crate::memory::{AddressSpaceTrait, PageSizeTrait}; + let test = 0xc0801000 as *mut u32; + let test = kernel_address_space + .map_alloc( + VirtAddr::from_mut_ptr_of(test), + PageSize::MIN as _, + MappingFlags::PRESENT | MappingFlags::READ | MappingFlags::WRITE, + &page_allocator, + ) + .unwrap() + .as_mut_ptr_of(); + crate::println!("Mapped!"); + unsafe { + *test = 42; + }; + crate::println!("Wrote!"); + crate::println!("Testing page mapping: {}", unsafe { *test }); + // kernel_address_space.unmap_free(VirtAddr::from_mut_ptr_of(test), PageSize::min() as _); } macro_rules! linker_symbol { diff --git a/src/arch/x86/paging/page_size.rs b/src/arch/x86/paging/page_size.rs new file mode 100644 index 0000000..9f10638 --- /dev/null +++ b/src/arch/x86/paging/page_size.rs @@ -0,0 +1,40 @@ +/// Page sizes possible to map +#[derive(Clone, Copy, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[repr(usize)] +pub enum PageSize { + #[default] + Size4K = 0x1000, + #[cfg(target_arch = "x86")] + Size4M = 0x400000, + #[cfg(target_arch = "x86_64")] + Size2M = 0x200000, + #[cfg(target_arch = "x86_64")] + Size1G = 0x40000000, +} + +impl TryFrom for PageSize { + type Error = (); + + fn try_from(size: usize) -> Result { + match size { + 0x1000 => Ok(Self::Size4K), + #[cfg(target_arch = "x86")] + 0x400000 => Ok(Self::Size4M), + #[cfg(target_arch = "x86_64")] + 0x200000 => Ok(Self::Size2M), + #[cfg(target_arch = "x86_64")] + 0x40000000 => Ok(Self::Size1G), + _ => Err(()), + } + } +} + +impl From for usize { + fn from(value: PageSize) -> Self { + value as _ + } +} + +impl crate::memory::PageSizeTrait for PageSize { + const MIN: Self = Self::Size4K; +} diff --git a/src/arch/x86/paging/page_table_entry.rs b/src/arch/x86/paging/page_table_entry.rs index 258bc91..af470b1 100644 --- a/src/arch/x86/paging/page_table_entry.rs +++ b/src/arch/x86/paging/page_table_entry.rs @@ -49,6 +49,32 @@ impl From for PTEFlags { } } +impl From for MappingFlags { + fn from(value: PTEFlags) -> Self { + let mut flags = Self::empty(); + if value.contains(PTEFlags::P) { + flags |= Self::PRESENT; + } + if value.contains(PTEFlags::RW) { + flags |= Self::WRITE; + } + #[cfg(target_arch = "x86_64")] + if value.contains(todo!()) { + flags |= Self::EXECUTE; + } + if value.contains(PTEFlags::US) { + flags |= Self::USER; + } + if value.contains(PTEFlags::PCD) { + flags |= Self::UNCACHED; + } + if value.contains(PTEFlags::G) { + flags |= Self::GLOBAL; + } + flags + } +} + /// Page table entry #[derive(Clone, Copy, Default, PartialEq, Eq, PartialOrd, Ord, Hash)] #[repr(transparent)] diff --git a/src/arch/x86/paging/tmp_page.rs b/src/arch/x86/paging/tmp_page.rs index 6fd9949..89694c4 100644 --- a/src/arch/x86/paging/tmp_page.rs +++ b/src/arch/x86/paging/tmp_page.rs @@ -24,10 +24,13 @@ pub(super) fn map(addr: PhysAddr) -> crate::sync::MappedLockGuard { ); crate::sync::LockGuard::map(TMP_PAGE_MUTEX.lock(), |_| { + let entry = PTEntry::new_page(addr, PageSize::Size4K, PTEFlags::P | PTEFlags::RW); unsafe { - *TMP_PAGE_ENTRY = PTEntry::new_page(addr, PageSize::Size4K, PTEFlags::P | PTEFlags::RW); + if *TMP_PAGE_ENTRY != entry { + *TMP_PAGE_ENTRY = entry; + flush_tlb(address()); + } } - flush_tlb(address()); unsafe { &mut *address().as_mut_ptr_of() } }) } diff --git a/src/main.rs b/src/main.rs index c5f2d2e..f55a22a 100644 --- a/src/main.rs +++ b/src/main.rs @@ -4,11 +4,14 @@ any(target_arch = "x86_64", target_arch = "x86"), feature(abi_x86_interrupt) )] +#![feature(allocator_api)] + +extern crate alloc; /// Synchronization primitives pub mod sync; -/// Arch-specific things +/// Architecture implementaitons pub mod arch; /// Basic logging facilities, calling arch-specific early print and panic functions diff --git a/src/memory.rs b/src/memory.rs index 14a1f77..422fbfd 100644 --- a/src/memory.rs +++ b/src/memory.rs @@ -1,76 +1,16 @@ -use core::sync::atomic::Ordering; pub use memory_addr::{pa, va, va_range, MemoryAddr, PhysAddr, VirtAddr}; +/// Address space implementations pub mod address_space; pub use address_space::{AddressSpaceTrait, MappingError, MappingFlags, MappingResult}; -/// Kernel page info entry -pub struct PageInfo { - pub uses: core::sync::atomic::AtomicU32, -} - -impl PageInfo { - /// Reset page info to an unused page - pub fn reset(&self) { - self.uses.store(0, Ordering::SeqCst); - } - - // /// Add one to uses count - // pub fn r#use(&self) { - // self.uses.fetch_add(1, Ordering::Relaxed); - // } - - // /// Unuse this page, pass in it's address. Will free if rc goes to zero - // pub fn r#unuse(&self, addr: PhysAddr) { - // self.uses.fetch_sub(1, Ordering::Relaxed); - // } +/// Different page allocator implementaitons +pub mod page_allocator; +pub use page_allocator::PageAllocatorTrait; - pub fn acquire(&self) -> bool { - self.uses - .compare_exchange(0, 1, Ordering::AcqRel, Ordering::Relaxed) - .is_ok() - } -} - -pub static PAGE_INFO_TABLE: crate::sync::RwLock<&[PageInfo]> = crate::sync::RwLock::new(&[]); - -/// Access the page info table -pub fn page_info_table() -> crate::sync::RwLockReadGuard<'static, &'static [PageInfo]> { - match PAGE_INFO_TABLE.try_read() { - Some(guard) => guard, - None => panic!("Tried to lock a locked mutex!"), - } -} - -/// Access page info from the table for a specific page -pub fn page_info(page: PhysAddr) -> crate::sync::MappedRwLockReadGuard<'static, &'static PageInfo> { - crate::sync::RwLockReadGuard::map(page_info_table(), |page_info_table| { - &page_info_table[page.as_usize() / crate::arch::paging::PageSize::min() as usize] - }) -} - -/// Allocate page of size page_size aligned to page_size -pub fn alloc_page(page_size: crate::arch::paging::PageSize) -> PhysAddr { - let page_info_table = page_info_table(); - if page_info_table.is_empty() { - crate::arch::paging::early_alloc_page(page_size) - } else { - for (index, page_info) in page_info_table.iter().enumerate() { - if page_info.acquire() { - return PhysAddr::from_usize(index * crate::arch::paging::PageSize::min() as usize); - } - } - todo!() - } -} - -/// Free page allocated with [alloc_page] -pub fn free_page(addr: PhysAddr, page_size: crate::arch::paging::PageSize) { - let page_info_table = page_info_table(); - if page_info_table.is_empty() { - panic!("Can't free page without page info table"); - } - todo!() +/// Page size trait, implement for an enum (or a struct) that could hold valid page sizes +pub trait PageSizeTrait: Copy + PartialEq + Eq + TryFrom + Into { + const MIN: Self; } /// Wrap a u64 in this struct to display it with size postfix (KiB, MiB, GiB, etc.) diff --git a/src/memory/address_space.rs b/src/memory/address_space.rs index 699aca9..8f89ac8 100644 --- a/src/memory/address_space.rs +++ b/src/memory/address_space.rs @@ -1,9 +1,10 @@ -use memory_addr::{pa, va, va_range, MemoryAddr, PhysAddr, VirtAddr}; +use super::{PageAllocatorTrait, PageSizeTrait}; +use memory_addr::{MemoryAddr, PhysAddr, VirtAddr}; bitflags::bitflags! { /// Generic page table entry flags that indicate the corresponding mapped /// memory region permissions and attributes. - #[derive(Clone, Copy, PartialEq)] + #[derive(Clone, Copy, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash)] pub struct MappingFlags: usize { /// Memory is present. If not, generate a page fault const PRESENT = 1 << 0; @@ -28,6 +29,10 @@ pub enum MappingError { /// Mapping over an already existing page #[error("mapping over existing page at address {0:#x}")] MappingOver(PhysAddr), + /// Page allocation failed + #[error("page allocation failed")] + PageAllocationFailed, + /// Mapping an unaligned address #[error("mapping an unaligned address {0:#x}")] UnalignedPhysicalAddress(PhysAddr), @@ -45,93 +50,12 @@ pub enum MappingError { /// Result type for memory mapping operations pub type MappingResult = Result; -/// Trait to be implemented by an address space -pub trait AddressSpaceTrait { - // * Mapping - /// Single page table - type Layer; - - /// Page size of one page - fn page_size(layer: &Self::Layer) -> usize; - - /// Set an entry in the page table layer to map vaddr to paddr with size and flags - fn set_entry( - layer: Self::Layer, - vaddr: VirtAddr, - paddr: PhysAddr, - page_size: crate::arch::paging::PageSize, - flags: MappingFlags, - ) -> MappingResult<()>; - - /// Unset an entry in the page table layer - fn unset_entry( - layer: Self::Layer, - vaddr: VirtAddr, - page_size: crate::arch::paging::PageSize, - ) -> MappingResult<()>; - - /// Get or create (only if map is true) a page table layer in this layer - /// that is associated with this virtual address. map parameter indicates - /// if this call corresponds to mapping/unmapping operation - fn next_layer(layer: Self::Layer, vaddr: VirtAddr, map: bool) -> MappingResult; - - /// Get top level page table layer for this address space - fn top_layer(&self) -> Self::Layer; - - /// Map a single (possibly large/huge) page. - /// As a layer should take [`AddressSpaceTrait::top_layer`] - /// DOES NOT increment reference count of the target page - fn map_page( - &self, - layer: Self::Layer, - vaddr: VirtAddr, - paddr: PhysAddr, - page_size: crate::arch::paging::PageSize, - flags: MappingFlags, - ) -> MappingResult<()> { - if !vaddr.is_aligned(page_size as usize) { - return Err(MappingError::UnalignedVirtualAddress(vaddr)); - } - if !paddr.is_aligned(page_size as usize) { - return Err(MappingError::UnalignedPhysicalAddress(paddr)); - } - - if Self::page_size(&layer) == page_size as usize { - Self::set_entry(layer, vaddr, paddr, page_size, flags) - } else { - self.map_page( - Self::next_layer(layer, vaddr, true)?, - vaddr, - paddr, - page_size, - flags, - ) - } - } +pub mod nested_page_table; - /// Decrement reference count of all pages related to this one - fn free_page(&self, layer: &Self::Layer, vaddr: VirtAddr) -> MappingResult<()>; - - /// Unmap a single (possibly large/huge) page or a whole page table of the same size. - /// As a layer should take [`AddressSpaceTrait::top_layer`] - /// DOES decrement reference count of the target page - fn unmap_page( - &self, - layer: Self::Layer, - vaddr: VirtAddr, - page_size: crate::arch::paging::PageSize, - ) -> MappingResult<()> { - if !vaddr.is_aligned(page_size as usize) { - return Err(MappingError::UnalignedVirtualAddress(vaddr)); - } - - if Self::page_size(&layer) == page_size as usize { - // - Self::unset_entry(layer, vaddr, page_size) - } else { - self.unmap_page(Self::next_layer(layer, vaddr, false)?, vaddr, page_size) - } - } +/// Address space allows for control over accessible memory +pub trait AddressSpaceTrait { + // pub fn map(&mut self, vaddr: VirtAddr, paddr: PhysAddr, size: usize) -> MappingResult; + // pub fn unmap(&mut self, addr: VirtAddr, size: usize) -> MappingResult<()>; /// Allocate and map a region of memory into /// the address space. On success returns @@ -142,24 +66,9 @@ pub trait AddressSpaceTrait { vaddr: VirtAddr, size: usize, flags: MappingFlags, - ) -> MappingResult { - // TODO: Bigger pages - let min_page = crate::arch::paging::PageSize::min(); - debug_assert!(vaddr.is_aligned(min_page as usize)); - debug_assert!(size % min_page as usize == 0); - for page in 0..size / min_page as usize { - self.map_page( - self.top_layer(), - vaddr + page * min_page as usize, - crate::memory::alloc_page(min_page), - min_page, - flags, - )?; - } - Ok(vaddr) - } + alloc: &impl PageAllocatorTrait, + ) -> MappingResult; - fn unmap_free(&self, vaddr: VirtAddr, size: usize) -> MappingResult<()> { - todo!() - } + /// TODO: Doc + fn unmap_free(&self, vaddr: VirtAddr, size: usize) -> MappingResult<()>; } diff --git a/src/memory/address_space/nested_page_table.rs b/src/memory/address_space/nested_page_table.rs new file mode 100644 index 0000000..36a5b49 --- /dev/null +++ b/src/memory/address_space/nested_page_table.rs @@ -0,0 +1,136 @@ +use super::{MappingError, MappingFlags, MappingResult}; +use super::{PageAllocatorTrait, PageSizeTrait}; +use super::{PhysAddr, VirtAddr}; + +/// Page table entry returned +pub enum PageTableEntry { + /// Page table entry maps to the next level page table + Level(Level), + /// Page table entry identity maps (regular or large/huge pages) + Page(PhysAddr, MappingFlags), +} + +/// A single level of a nested page table +/// (underlying type should be something like a pointer that's freely cloneable) +pub trait NestedPageTableLevel: Clone + Sized { + type PageSize: PageSizeTrait; + + /// Get page size of this layer, if a page can be mapped here + fn page_size(&self) -> Option; + + /// Allocate a new page table level, that's gonna come after this one + fn new_sublevel(&self, alloc: &impl PageAllocatorTrait) -> Option; + + /// Set an entry in this level. vaddr might not be aligned if entry + /// is [`PageTableEntry::Level`] + fn set_entry(&self, vaddr: VirtAddr, entry: PageTableEntry) -> MappingResult<()>; + + /// Get an entry in this page table. vaddr might not be aligned + fn get_entry(&self, vaddr: VirtAddr) -> MappingResult>; + + /// Map a single (possibly large/huge) page. + fn map_page( + &self, + vaddr: VirtAddr, + paddr: PhysAddr, + page_size: Self::PageSize, + flags: MappingFlags, + alloc: &impl PageAllocatorTrait, + ) -> MappingResult<()> { + if self.page_size() == Some(page_size) { + crate::println!( + "Mapping {vaddr:?} to {paddr:?} with flags {:#b}", + flags.bits() + ); + self.set_entry(vaddr, PageTableEntry::Page(paddr, flags)) + } else { + let entry = self.get_entry(vaddr)?; + let next_level = match entry { + PageTableEntry::Page(addr, flags) => { + crate::println!( + "Going into a page entry w/ addr {:?} and flags {:#x}", + addr, + flags.bits() + ); + if flags.contains(MappingFlags::PRESENT) { + return Err(MappingError::MappingOver(addr)); + } + let level = self + .new_sublevel(alloc) + .ok_or(MappingError::PageAllocationFailed)?; + self.set_entry(vaddr, PageTableEntry::Level(level.clone()))?; + level + } + PageTableEntry::Level(level) => { + // crate::println!( + // "Going into a level with page size {:?}", + // level.page_size().map(|v| v.into()) + // ); + level + } + }; + next_level.map_page(vaddr, paddr, page_size, flags, alloc) + } + } +} + +/// Implementation of [`super::AddressSpaceTrait`] for a nested page table +/// structure (x86 for example) +pub trait NestedPageTable { + /// Page size + type PageSize: PageSizeTrait; + + /// Single level of paging + type Level: NestedPageTableLevel; + + /// Get top level page table for this address space + fn top_level(&self) -> Self::Level; + + // /// Unmap a single (possibly large/huge) page or a whole page table of the same size. + // /// As a layer should take [`AddressSpaceTrait::top_layer`] + // /// DOES NOT FREE + // fn unmap_page( + // &self, + // layer: Self::Layer, + // vaddr: VirtAddr, + // page_size: PageSize, + // alloc: &impl PageAllocatorTrait, + // ) -> MappingResult<()> { + // if !vaddr.is_aligned(page_size.clone()) { + // return Err(MappingError::UnalignedVirtualAddress(vaddr)); + // } + + // if Self::page_size(&layer) == page_size.clone().into() { + // Self::set_entry(layer, vaddr, page_size) + // } else { + // self.unmap_page(Self::next_layer(layer, vaddr, None)?, vaddr, page_size) + // } + // } + + /// Implementation of [`super::AddressSpaceTrait::map_alloc`] + fn map_alloc( + &self, + vaddr: VirtAddr, + size: usize, + flags: MappingFlags, + alloc: &impl PageAllocatorTrait, + ) -> MappingResult { + // TODO: Possibly bigger pages + for page in 0..size / Self::PageSize::MIN.into() { + self.top_level().map_page( + vaddr + page * Self::PageSize::MIN.into(), + alloc.alloc(Self::PageSize::MIN).unwrap(), + Self::PageSize::MIN, + flags, + alloc, + )?; + } + Ok(vaddr) + } + + /// Implementation of [`super::AddressSpaceTrait::unmap_free`] + fn unmap_free(&self, vaddr: VirtAddr, size: usize) -> MappingResult<()> { + todo!(); + Ok(()) + } +} diff --git a/src/memory/page_allocator.rs b/src/memory/page_allocator.rs new file mode 100644 index 0000000..69a7610 --- /dev/null +++ b/src/memory/page_allocator.rs @@ -0,0 +1,10 @@ +use super::PageSizeTrait; +use memory_addr::PhysAddr; + +pub mod zoned_buddy; +pub use zoned_buddy::ZonedBuddy; + +pub trait PageAllocatorTrait { + fn alloc(&self, size: PageSize) -> Option; + fn free(&self, allocation: PhysAddr, size: PageSize); +} diff --git a/src/memory/page_allocator/zoned_buddy.rs b/src/memory/page_allocator/zoned_buddy.rs new file mode 100644 index 0000000..ab422d7 --- /dev/null +++ b/src/memory/page_allocator/zoned_buddy.rs @@ -0,0 +1,99 @@ +use super::{PageAllocatorTrait, PageSizeTrait, PhysAddr}; +use crate::sync::RwLock; + +struct CpuId; +impl lock_free_buddy_allocator::cpuid::Cpu for CpuId { + fn current_cpu() -> usize { + crate::arch::instructions::cpu_id() + } +} + +struct Zone { + start: usize, + size: usize, + buddy: lock_free_buddy_allocator::buddy_alloc::BuddyAlloc< + 'static, + PAGE_SIZE, + CpuId, + alloc::alloc::Global, + >, +} + +/// Zone-based buddy allocator. Manages zones, +/// each zone having a separate binary buddy, +/// similar to how linux does this +pub struct ZonedBuddy { + zones: RwLock>>, +} + +impl ZonedBuddy { + pub const fn new() -> Self { + Self { + zones: RwLock::new(alloc::vec::Vec::new()), + } + } + + pub fn add_zone(&self, start: usize, size: usize) -> Result<(), ()> { + debug_assert!( + start % BLOCK_SIZE == 0, + "zone is not aligned ({:#x})", + start + ); + debug_assert!(size % BLOCK_SIZE == 0, "size is not aligned ({:#x})", size); + + if !size.is_power_of_two() { + let mut start = start; + for bit in 0..usize::BITS { + let size_p2 = 1 << bit; + if size & size_p2 != 0 { + self.add_zone(start, size_p2)?; + start += size_p2; + } + } + } else { + self.zones.write().push(Zone { + start, + size, + buddy: lock_free_buddy_allocator::buddy_alloc::BuddyAlloc::new( + start, + size / BLOCK_SIZE, + &alloc::alloc::Global, + ) + .ok_or(())?, + }); + } + Ok(()) + } + + fn alloc(&self, size: usize) -> Option { + let blocks = size / BLOCK_SIZE; + for zone in self.zones.read().iter() { + if let Some(addr) = zone.buddy.alloc(blocks) { + return Some(PhysAddr::from_usize(addr)); + } + } + None + } + + fn free(&self, allocation: PhysAddr, size: usize) { + let start = allocation.as_usize(); + let blocks = size / BLOCK_SIZE; + for zone in self.zones.read().iter() { + if start > zone.start && start + size < zone.start + zone.size { + zone.buddy.free(allocation.as_usize(), blocks); + } + } + } +} + +impl PageAllocatorTrait + for ZonedBuddy +{ + fn alloc(&self, size: PageSize) -> Option { + self.alloc(size.into()) + } + + fn free(&self, allocation: PhysAddr, size: PageSize) { + self.free(allocation, size.into()) + } +} From 93bc4df15e73bcedbe3c4b9dacee08d7a2052fa4 Mon Sep 17 00:00:00 2001 From: InfiniteCoder Date: Sun, 26 Jan 2025 18:34:24 +0300 Subject: [PATCH 6/9] Cleanup --- src/arch/x86/paging/early_page_alloc.rs | 160 ------------------ src/memory/address_space/nested_page_table.rs | 17 +- 2 files changed, 1 insertion(+), 176 deletions(-) delete mode 100644 src/arch/x86/paging/early_page_alloc.rs diff --git a/src/arch/x86/paging/early_page_alloc.rs b/src/arch/x86/paging/early_page_alloc.rs deleted file mode 100644 index 823f58a..0000000 --- a/src/arch/x86/paging/early_page_alloc.rs +++ /dev/null @@ -1,160 +0,0 @@ -use super::*; -use crate::memory::{AddressSpaceTrait, MappingFlags}; - -linker_symbol! { - kernel_reserved_end(KERNEL_RESERVED_END) => "kernel_reserved_end"; -} - -// * Allocation -struct EarlyPageAllocator { - alloc_start: PhysAddr, - boot_info: &'static multiboot2::BootInformation<'static>, -} - -impl EarlyPageAllocator { - /// Get next possibly free region of memory to try to fit page. Returns none if page fits - fn next_possibly_free(&self, addr: PhysAddr, page_size: PageSize) -> Option { - let start = addr.as_usize(); - let end = addr.as_usize() + page_size as usize; - - // * Check for overlap - // With kernel - if addr < kernel_virt2phys(kernel_reserved_end()) { - return Some(kernel_virt2phys(kernel_reserved_end())); - } - // With boot info structure - if start < self.boot_info.end_address() && end > self.boot_info.start_address() { - return Some(self.boot_info.end_address().into()); - } - - // * Check for memory region validity - for region in self.boot_info.memory_map_tag().unwrap().memory_areas() { - if start < region.end_address() as usize && end > region.start_address() as usize { - use multiboot2::MemoryAreaType; - match MemoryAreaType::from(region.typ()) { - MemoryAreaType::Available => (), - _ => return Some((region.end_address() as usize).into()), - } - } - } - None - } -} - -static EARLY_ALLOCATOR: spin::Mutex> = spin::Mutex::new(None); - -pub fn early_alloc_page(page_size: PageSize) -> PhysAddr { - let mut allocator = crate::sync::lock_nb(&EARLY_ALLOCATOR); - let Some(allocator) = allocator.as_mut() else { - panic!("Early allocator not available") - }; - let mut addr = allocator.alloc_start.align_up(page_size as usize); - while let Some(next) = allocator.next_possibly_free(addr, page_size) { - addr = next.align_up(page_size as usize); - } - allocator.alloc_start = addr + page_size as usize; - addr -} - -/// Setup page info table, responsible for page allocation -pub(super) fn setup_page_info_table(boot_info: &multiboot2::BootInformation) { - let kernel_address_space = VirtAddr::from_usize(&raw const KERNEL_TOP_LEVEL_PAGE_TABLE as _); - let kernel_address_space = AddressSpace(kernel_virt2phys(kernel_address_space)); - *crate::sync::lock_nb(&EARLY_ALLOCATOR) = Some(EarlyPageAllocator { - alloc_start: kernel_virt2phys(kernel_reserved_end()), - boot_info: unsafe { - core::mem::transmute::<&multiboot2::BootInformation, &multiboot2::BootInformation>( - boot_info, - ) - }, - }); - - // Get the address limit (last usable physical address) - let mut address_limit = 0; - let memory_map_tag = boot_info - .memory_map_tag() - .expect("Memory map not available"); - for region in memory_map_tag.memory_areas() { - use multiboot2::MemoryAreaType; - let typ = MemoryAreaType::from(region.typ()); - if typ == MemoryAreaType::Available { - address_limit = address_limit.max(region.end_address()); - } - } - - if address_limit > (usize::MAX as u64) { - panic!( - "Kernel address size can't handle {} of memory", - crate::memory::FormatSize(address_limit) - ); - } - - let page_info_table_entries = (address_limit / memory_addr::PAGE_SIZE_4K as u64) as usize; - let page_info_table_size = - page_info_table_entries * core::mem::size_of::(); - - // Allocate and map page info table - let page_info_table_address = kernel_reserved_end().align_up_4k(); - let page_info_table_address = kernel_address_space - .map_alloc( - page_info_table_address, - memory_addr::align_up_4k(page_info_table_size), - MappingFlags::PRESENT | MappingFlags::READ | MappingFlags::WRITE | MappingFlags::GLOBAL, - ) - .unwrap(); - - let page_info_table: &[crate::memory::PageInfo] = unsafe { - core::slice::from_raw_parts(page_info_table_address.as_ptr_of(), page_info_table_entries) - }; - - // Initialize page info table - // TODO: Speed up - let mut early_alloc = crate::sync::lock_nb(&EARLY_ALLOCATOR); - for (index, entry) in page_info_table.iter().enumerate() { - entry.reset(); - let addr = PhysAddr::from_usize(index * PageSize::min() as usize); - if addr < early_alloc.as_ref().unwrap().alloc_start { - entry.acquire(); - } else if early_alloc - .as_ref() - .unwrap() - .next_possibly_free(addr, PageSize::min()) - .is_some() - { - entry.acquire(); - } - } - - *crate::memory::PAGE_INFO_TABLE.try_write().unwrap() = page_info_table; - early_alloc.take(); - drop(early_alloc); - - // TODO: Free boot info and bootstrap code - - // TEST - let test = 0xc0801000 as *mut u32; - let test = kernel_address_space - .map_alloc( - VirtAddr::from_mut_ptr_of(test), - PageSize::min() as _, - MappingFlags::PRESENT | MappingFlags::READ | MappingFlags::WRITE, - ) - .unwrap() - .as_mut_ptr_of(); - crate::println!("Mapped!"); - unsafe { - *test = 42; - }; - crate::println!("Wrote!"); - crate::println!("Testing page mapping: {}", unsafe { *test }); - kernel_address_space.unmap_free(VirtAddr::from_mut_ptr_of(test), PageSize::min() as _); - // kernel_address_space - // .unmap_page( - // kernel_address_space.top_layer(), - // VirtAddr::from_mut_ptr_of(test_r), - // PageSize::Size4K, - // ) - // .unwrap(); - // crate::println!("Testing page unmapping (you should see a page fault)..."); - // crate::println!("If you see this everything broke: {}", unsafe { *test_r }); -} diff --git a/src/memory/address_space/nested_page_table.rs b/src/memory/address_space/nested_page_table.rs index 36a5b49..cc8f4ad 100644 --- a/src/memory/address_space/nested_page_table.rs +++ b/src/memory/address_space/nested_page_table.rs @@ -38,20 +38,11 @@ pub trait NestedPageTableLevel: Clone + Sized { alloc: &impl PageAllocatorTrait, ) -> MappingResult<()> { if self.page_size() == Some(page_size) { - crate::println!( - "Mapping {vaddr:?} to {paddr:?} with flags {:#b}", - flags.bits() - ); self.set_entry(vaddr, PageTableEntry::Page(paddr, flags)) } else { let entry = self.get_entry(vaddr)?; let next_level = match entry { PageTableEntry::Page(addr, flags) => { - crate::println!( - "Going into a page entry w/ addr {:?} and flags {:#x}", - addr, - flags.bits() - ); if flags.contains(MappingFlags::PRESENT) { return Err(MappingError::MappingOver(addr)); } @@ -61,13 +52,7 @@ pub trait NestedPageTableLevel: Clone + Sized { self.set_entry(vaddr, PageTableEntry::Level(level.clone()))?; level } - PageTableEntry::Level(level) => { - // crate::println!( - // "Going into a level with page size {:?}", - // level.page_size().map(|v| v.into()) - // ); - level - } + PageTableEntry::Level(level) => level, }; next_level.map_page(vaddr, paddr, page_size, flags, alloc) } From 6f0c52fd8abdb45ed98d020a4500e85847094454 Mon Sep 17 00:00:00 2001 From: InfiniteCoder Date: Sun, 9 Feb 2025 17:27:37 +0300 Subject: [PATCH 7/9] Page unmapping --- src/arch/x86/paging/address_space.rs | 4 +- src/arch/x86/paging/mod.rs | 6 +- src/memory/address_space.rs | 4 +- src/memory/address_space/nested_page_table.rs | 56 ++++++++++++++++++- src/memory/page_allocator/zoned_buddy.rs | 12 +++- 5 files changed, 72 insertions(+), 10 deletions(-) diff --git a/src/arch/x86/paging/address_space.rs b/src/arch/x86/paging/address_space.rs index 26739c2..e425cb6 100644 --- a/src/arch/x86/paging/address_space.rs +++ b/src/arch/x86/paging/address_space.rs @@ -138,8 +138,8 @@ impl NestedPageTable for AddressSpace { impl NestedPageTableLevel for PageTableLevel { type PageSize = PageSize; - fn page_size(&self) -> Option { - PageSize::try_from(1 << self.1).ok() + fn region_size(&self) -> usize { + 1 << self.1 } fn new_sublevel(&self, alloc: &impl PageAllocatorTrait) -> Option { diff --git a/src/arch/x86/paging/mod.rs b/src/arch/x86/paging/mod.rs index a10c7d3..f4de066 100644 --- a/src/arch/x86/paging/mod.rs +++ b/src/arch/x86/paging/mod.rs @@ -125,7 +125,11 @@ pub(super) fn setup_paging(boot_info: &multiboot2::BootInformation) { }; crate::println!("Wrote!"); crate::println!("Testing page mapping: {}", unsafe { *test }); - // kernel_address_space.unmap_free(VirtAddr::from_mut_ptr_of(test), PageSize::min() as _); + kernel_address_space + .unmap_free(VirtAddr::from_mut_ptr_of(test), 4096) + .unwrap(); + crate::println!("Testing page unmapping (You should see a page fault):"); + crate::println!("Huh? {}", unsafe { *test }); } macro_rules! linker_symbol { diff --git a/src/memory/address_space.rs b/src/memory/address_space.rs index 8f89ac8..b09356d 100644 --- a/src/memory/address_space.rs +++ b/src/memory/address_space.rs @@ -1,5 +1,5 @@ use super::{PageAllocatorTrait, PageSizeTrait}; -use memory_addr::{MemoryAddr, PhysAddr, VirtAddr}; +use memory_addr::{PhysAddr, VirtAddr}; bitflags::bitflags! { /// Generic page table entry flags that indicate the corresponding mapped @@ -69,6 +69,6 @@ pub trait AddressSpaceTrait { alloc: &impl PageAllocatorTrait, ) -> MappingResult; - /// TODO: Doc + /// Unmap a region of memory from the address space and mark it as free fn unmap_free(&self, vaddr: VirtAddr, size: usize) -> MappingResult<()>; } diff --git a/src/memory/address_space/nested_page_table.rs b/src/memory/address_space/nested_page_table.rs index cc8f4ad..c6074a7 100644 --- a/src/memory/address_space/nested_page_table.rs +++ b/src/memory/address_space/nested_page_table.rs @@ -1,3 +1,5 @@ +use memory_addr::MemoryAddr; + use super::{MappingError, MappingFlags, MappingResult}; use super::{PageAllocatorTrait, PageSizeTrait}; use super::{PhysAddr, VirtAddr}; @@ -10,13 +12,24 @@ pub enum PageTableEntry { Page(PhysAddr, MappingFlags), } +impl PageTableEntry { + const NULL: Self = Self::Page(PhysAddr::from_usize(0), MappingFlags::empty()); +} + /// A single level of a nested page table /// (underlying type should be something like a pointer that's freely cloneable) pub trait NestedPageTableLevel: Clone + Sized { type PageSize: PageSizeTrait; + /// Get the size of a page/page table of this layer, similar to page_size, but + /// returns the memory region that a sub-level page table manages if page can't + /// be mapped here + fn region_size(&self) -> usize; + /// Get page size of this layer, if a page can be mapped here - fn page_size(&self) -> Option; + fn page_size(&self) -> Option { + self.region_size().try_into().ok() + } /// Allocate a new page table level, that's gonna come after this one fn new_sublevel(&self, alloc: &impl PageAllocatorTrait) -> Option; @@ -57,6 +70,44 @@ pub trait NestedPageTableLevel: Clone + Sized { next_level.map_page(vaddr, paddr, page_size, flags, alloc) } } + + fn unmap_free(&self, vaddr: VirtAddr, size: usize) -> MappingResult<()> { + let region_size = self.region_size(); + let start = vaddr.align_down(region_size); + let end = (vaddr + size).align_up(region_size); + for page in (start.as_usize()..end.as_usize()).step_by(region_size) { + let page = VirtAddr::from(page); + let entry = self.get_entry(page)?; + if page < vaddr || page + region_size > vaddr + size { + match entry { + PageTableEntry::Level(level) => { + if page < vaddr { + level.unmap_free(vaddr, page + region_size - vaddr)?; + } else { + level.unmap_free(page, vaddr + size - page)?; + } + } + PageTableEntry::Page(paddr, flags) => { + if flags.contains(MappingFlags::PRESENT) { + return Err(MappingError::UnmappingPartOfLargePage(paddr)); + } + } + } + } else { + match entry { + PageTableEntry::Level(level) => { + level.unmap_free(page, region_size)?; + } + PageTableEntry::Page(_, flags) => { + if flags.contains(MappingFlags::PRESENT) { + self.set_entry(page, PageTableEntry::NULL)?; + } + } + } + } + } + Ok(()) + } } /// Implementation of [`super::AddressSpaceTrait`] for a nested page table @@ -115,7 +166,6 @@ pub trait NestedPageTable { /// Implementation of [`super::AddressSpaceTrait::unmap_free`] fn unmap_free(&self, vaddr: VirtAddr, size: usize) -> MappingResult<()> { - todo!(); - Ok(()) + self.top_level().unmap_free(vaddr, size) } } diff --git a/src/memory/page_allocator/zoned_buddy.rs b/src/memory/page_allocator/zoned_buddy.rs index ab422d7..5755edc 100644 --- a/src/memory/page_allocator/zoned_buddy.rs +++ b/src/memory/page_allocator/zoned_buddy.rs @@ -1,3 +1,5 @@ +use core::alloc::AllocError; + use super::{PageAllocatorTrait, PageSizeTrait, PhysAddr}; use crate::sync::RwLock; @@ -33,7 +35,7 @@ impl ZonedBuddy { } } - pub fn add_zone(&self, start: usize, size: usize) -> Result<(), ()> { + pub fn add_zone(&self, start: usize, size: usize) -> Result<(), AllocError> { debug_assert!( start % BLOCK_SIZE == 0, "zone is not aligned ({:#x})", @@ -59,7 +61,7 @@ impl ZonedBuddy { size / BLOCK_SIZE, &alloc::alloc::Global, ) - .ok_or(())?, + .ok_or(AllocError)?, }); } Ok(()) @@ -86,6 +88,12 @@ impl ZonedBuddy { } } +impl Default for ZonedBuddy { + fn default() -> Self { + Self::new() + } +} + impl PageAllocatorTrait for ZonedBuddy { From 77a1d4816ee517699be0b736075a8747278b6327 Mon Sep 17 00:00:00 2001 From: InfiniteCoder Date: Mon, 17 Feb 2025 16:52:32 +0300 Subject: [PATCH 8/9] Page freeing!!! --- README.md | 5 + a.out | Bin 56784 -> 61248 bytes bochsrc | 3 + build.sh | 21 ++-- bx_enh_dbg.ini | 26 +++++ flake.nix | 2 +- rust-toolchain.toml | 2 +- src/arch/x86/interrupts.rs | 20 ++-- src/arch/x86/mod.rs | 6 +- src/arch/x86/paging/address_space.rs | 100 +++--------------- src/arch/x86/paging/mod.rs | 57 +++++----- src/arch/x86/x32/bootstrap.S | 76 +++++++++++-- src/arch/x86/x32/linker.ld | 6 +- src/arch/x86/x32/target.json | 2 +- src/memory/address_space.rs | 7 +- src/memory/address_space/nested_page_table.rs | 81 +++++++++----- src/memory/page_allocator/zoned_buddy.rs | 29 ++++- 17 files changed, 268 insertions(+), 175 deletions(-) create mode 100644 bochsrc create mode 100644 bx_enh_dbg.ini diff --git a/README.md b/README.md index 792752f..5cef7e4 100644 --- a/README.md +++ b/README.md @@ -9,3 +9,8 @@ Then, run the build script by issuing `./build.sh` | x86 (i486) | Works | | | x86 (i386) | Unsupported | TLB flushing | | x86_64 | TODO | | + +## Help!!! +Here are some things you could help with: +- Find a way to make freeing in ZonedBuddy page allocator checked (you can free a random page rigth now and it won't even panic) +- Find a way to also check freeing in NestedPageTable diff --git a/a.out b/a.out index f340bd2467415e1e6267c83ee00ea7ba42b12a26..fb0980d9e2ec31d5e3735301b5df5759f4e185d0 100644 GIT binary patch delta 938 zcmZ{hKWI}?6o=2tdudIZ($t2g7OhE1V;V)GL$E?g(m`+#GdPHYw!s=&uqjFQM4h@> zj)H@mAUcLD(ts_f2qKt=*8CT(ph??7p-yV!IqxQN5xm2>-}%n(y?c{a$gArg)Z1PY z>|}kF^AG$sTTYAI&78{vt!32=Hh^2tv}N@PKnSMT_TN0B06b+RX@fPUyh^2Uw$*AG z!8|3Paayz)d{yuaSVhB88a96Kp`x#uo1Z9%llmDYiWz+*ozkwoG!JEZA;~aIV3-kt z1e=2dvkapIOAJl|C&L(l5q1;A*?2nO?PPoeQ63#5m}fXn5M%HYxKwR>FZW#EH7PGp zf9FU~^6!w=oY8kf;2*!(_76=`m@q@$1|;ckl-K=^kH{8am;8R{2D{Kbf2XS$-f$>y zjQZKA-;DZ`tGSC$SP2iBM+MkX-)JaFTz!AJe>xsdB~yvnr3>-+^n45liVJCOO7k$)}u@iz1JR`MRf z#`{Z{Ejtd>|1C^KAC|mJ@+XAv;_nB( z+@XqHQ6hM<8ZddAxKIr$Gb`aCT(3?jGq}Yq`f5STNAZS7CQQ@ kEok2qYM&<6)SGXseissI1~2Z}i` z>}CXU!=T*B&mO1=YCt$3r9jCSlK&^mKHSgp`X2+s<|B{3FgePCv>*Y0FrS@43Qar$ zD!v3wJZrMxGjYi_pr`-?6UYrH;N)b-XX1=YCKm$99bob(n7jZcZ-dFtAac_4myCv! zUp#aTK1&9I57$-k^YsUHFI@G+$itj8KFHVknXD@OQB6I_kK!AW7h!rNUeW%TJ;VM{w U;TKTvJ2Os=TM%yg /dev/null; then @@ -27,7 +28,7 @@ fi # Utils build() { if ! cargo build; then - return -1 + return 1 fi KERNEL="target/target/debug/satan" @@ -44,12 +45,20 @@ build() { } run() { - qemu-system-$QEMU_SYSTEM -d guest_errors -no-reboot -cdrom bin/os.iso + if [ $EMULATOR = "bochs" ]; then + bochs -q + else + qemu-system-$QEMU_SYSTEM -d guest_errors -no-reboot -cdrom bin/os.iso + fi } debug() { - qemu-system-$QEMU_SYSTEM -d guest_errors -no-reboot -cdrom bin/os.iso -s -S & - rust-gdb target/target/debug/satan -x gdbinit + if [ $EMULATOR = "bochs" ]; then + bochs -q + else + qemu-system-$QEMU_SYSTEM -d guest_errors -no-reboot -cdrom bin/os.iso -s -S & + rust-gdb target/target/debug/satan -x gdbinit + fi } clean() { diff --git a/bx_enh_dbg.ini b/bx_enh_dbg.ini new file mode 100644 index 0000000..04d7997 --- /dev/null +++ b/bx_enh_dbg.ini @@ -0,0 +1,26 @@ +# bx_enh_dbg_ini +SeeReg[0] = TRUE +SeeReg[1] = TRUE +SeeReg[2] = TRUE +SeeReg[3] = TRUE +SeeReg[4] = FALSE +SeeReg[5] = FALSE +SeeReg[6] = FALSE +SeeReg[7] = FALSE +SingleCPU = FALSE +ShowIOWindows = TRUE +ShowButtons = TRUE +SeeRegColors = TRUE +ignoreNxtT = TRUE +ignSSDisasm = TRUE +UprCase = 0 +DumpInAsciiMode = 3 +isLittleEndian = TRUE +DefaultAsmLines = 512 +DumpWSIndex = 0 +DockOrder = 0x123 +ListWidthPix[0] = 180 +ListWidthPix[1] = 245 +ListWidthPix[2] = 280 +MainWindow = 0, 0, 709, 500 +FontName = Normal diff --git a/flake.nix b/flake.nix index c8ee1cf..c50f8bf 100644 --- a/flake.nix +++ b/flake.nix @@ -18,7 +18,7 @@ { devShell = pkgs.mkShell { buildInputs = with pkgs; [ - qemu + qemu bochs libisoburn mtools pkgs-crosssystem.buildPackages.grub2 pkgs-crosssystem.buildPackages.gcc diff --git a/rust-toolchain.toml b/rust-toolchain.toml index 0543d26..e29af69 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -1,3 +1,3 @@ [toolchain] -channel = "nightly-2024-12-11" +channel = "nightly-2025-02-17" components = ["rust-src", "rust-analyzer"] diff --git a/src/arch/x86/interrupts.rs b/src/arch/x86/interrupts.rs index 75d0856..ea25054 100644 --- a/src/arch/x86/interrupts.rs +++ b/src/arch/x86/interrupts.rs @@ -31,16 +31,16 @@ struct InterruptStackFrame { /// Central interrupt handler, all interrupts come here specifying an interrupt number fn interrupt_handler(interrupt: u8, error_code: usize) { + if interrupt == 0x20 { + // Timer + return; + } if interrupt == 0x0E { // Page fault crate::println!("Page fault!\nError code:\n{:#032b}", error_code); crate::println!(" ^ ^^IRUWP"); crate::println!(" SGX SSPK "); - return; - } - if interrupt == 0x20 { - // Timer - return; + panic!("Halt"); } if interrupt == 0x21 { // Keyboard @@ -48,7 +48,15 @@ fn interrupt_handler(interrupt: u8, error_code: usize) { crate::println!("Keyboard: {}", scancode); return; } - + if interrupt <= 0x08 { + panic!("Double fault!!!\nError code: {:#x}", error_code); + } + if interrupt <= 0x1F { + panic!( + "Unhandled exception: {:#x}\nError code: {:#x}", + interrupt, error_code + ); + } loop {} } diff --git a/src/arch/x86/mod.rs b/src/arch/x86/mod.rs index c5239cc..ed5c1b7 100644 --- a/src/arch/x86/mod.rs +++ b/src/arch/x86/mod.rs @@ -33,7 +33,9 @@ mod allocator { /// after assembly bootstrap setus up GDT and higher-half address space #[no_mangle] pub extern "cdecl" fn ksetup(mb_magic: u32, mbi_ptr: u32) -> ! { - // loop {} + crate::println!("Hello, SATAN!"); + interrupts::setup(); + let boot_info = if mb_magic == multiboot2::MAGIC { let boot_info = unsafe { multiboot2::BootInformation::load(mbi_ptr as *const multiboot2::BootInformationHeader) @@ -52,8 +54,6 @@ pub extern "cdecl" fn ksetup(mb_magic: u32, mbi_ptr: u32) -> ! { ); }; - crate::println!("Hello, SATAN!"); - interrupts::setup(); paging::setup_paging(&boot_info); loop {} diff --git a/src/arch/x86/paging/address_space.rs b/src/arch/x86/paging/address_space.rs index e425cb6..3f6b971 100644 --- a/src/arch/x86/paging/address_space.rs +++ b/src/arch/x86/paging/address_space.rs @@ -53,86 +53,6 @@ impl NestedPageTable for AddressSpace { fn top_level(&self) -> Self::Level { self.0.clone() } - - // fn unset_entry(layer: Self::Layer, vaddr: VirtAddr, page_size: PageSize) -> MappingResult<()> { - // debug_assert_eq!(1usize << layer.1, page_size as usize); - // let mut entry = Self::get_entry(&layer, vaddr); - // if !entry.flags().contains(PTEFlags::P) { - // return Err(MappingError::UnmappingNotMapped(vaddr)); - // } - // *entry = PTEntry::NULL; - // flush_tlb(vaddr); - // Ok(()) - // } - - // fn next_layer(layer: Self::Layer, vaddr: VirtAddr, map: bool) -> MappingResult { - // let entry = Self::get_entry(&layer, vaddr); - - // if entry.flags().contains(PTEFlags::P | PTEFlags::PS) { - // if map { - // return Err(MappingError::MappingOver(entry.address())); - // } else { - // return Err(MappingError::UnmappingPartOfLargePage(entry.address())); - // } - // } - - // let entry = if !entry.flags().contains(PTEFlags::P) { - // drop(entry); - // if !map { - // return Err(MappingError::UnmappingNotMapped(vaddr)); - // } - - // // Create a new page table - // let page_table_addr = crate::memory::PAGE_ALLOCATOR - // .alloc(PageSize::min()) - // .unwrap(); - // let mut page_table = tmp_page::map::(page_table_addr); - - // // Clear the page table - // for index in 0..PAGE_TABLE_ENTRIES { - // page_table[index] = PTEntry::NULL; - // } - - // drop(page_table); - - // // Set the entry to this page table - // let mut entry = Self::get_entry(&layer, vaddr); - // *entry = PTEntry::new_page_table(page_table_addr); - // entry - // } else { - // entry - // }; - - // Ok((entry.address(), layer.1 - PAGE_LEVEL_BITS)) - // } - - // fn top_layer(&self) -> Self::Layer { - // #[cfg(target_arch = "x86")] - // return (self.0, 22); - // #[cfg(target_arch = "x86_64")] - // return (self.0, 39); - // } - - // /// Decrement reference count of all pages related to this one - // fn free_page(&self, layer: &Self::Layer, vaddr: VirtAddr) -> MappingResult<()> { - // let mut entry = Self::get_entry(&layer, vaddr); - // if !entry.flags().contains(PTEFlags::P) { - // return Ok(()); - // } - - // // if !entry.flags().contains(PTEFlags::PS) && page_info(entry.address()).uses() { - // // for page in 0..Self::page_size(layer) / PageSize::min() as usize { - // // // - // // } - // // } - // // free_page( - // // entry.address(), - // // PageSize::from_usize(Self::page_size(layer)).unwrap(), - // // ); - // *entry = PTEntry::NULL; - // flush_tlb(vaddr); - // Ok(()) - // } } impl NestedPageTableLevel for PageTableLevel { @@ -154,6 +74,15 @@ impl NestedPageTableLevel for PageTableLevel { Some(PageTableLevel(addr, self.1 - super::PAGE_LEVEL_BITS)) } + fn free_sublevel( + &self, + sublevel: Self, + alloc: &impl PageAllocatorTrait, + ) -> MappingResult<()> { + alloc.free(sublevel.0, PageSize::Size4K); + Ok(()) + } + fn set_entry( &self, vaddr: VirtAddr, @@ -205,12 +134,17 @@ impl AddressSpaceTrait for AddressSpace { vaddr: VirtAddr, size: usize, flags: if_entry::MappingFlags, - alloc: &impl crate::memory::PageAllocatorTrait, + alloc: &impl PageAllocatorTrait, ) -> MappingResult { ::map_alloc(self, vaddr, size, flags, alloc) } - fn unmap_free(&self, vaddr: VirtAddr, size: usize) -> MappingResult<()> { - ::unmap_free(self, vaddr, size) + fn unmap_free( + &self, + vaddr: VirtAddr, + size: usize, + alloc: &impl PageAllocatorTrait, + ) -> MappingResult<()> { + ::unmap_free(self, vaddr, size, alloc) } } diff --git a/src/arch/x86/paging/mod.rs b/src/arch/x86/paging/mod.rs index f4de066..6e784ce 100644 --- a/src/arch/x86/paging/mod.rs +++ b/src/arch/x86/paging/mod.rs @@ -64,17 +64,6 @@ fn flush_tlb(address: VirtAddr) { /// Setup paging pub(super) fn setup_paging(boot_info: &multiboot2::BootInformation) { - // Enable PSE - unsafe { - core::arch::asm!( - "mov %cr4, {tmp}", - "or $0x10, {tmp}", - "mov {tmp}, %cr4", - tmp = out(reg) _, - options(att_syntax) - ); - } - let page_allocator = PageAllocator::new(); let kernel_address_space = VirtAddr::from_usize(&raw const KERNEL_TOP_LEVEL_PAGE_TABLE as _); let kernel_address_space = AddressSpace::from_paddr(kernel_virt2phys(kernel_address_space)); @@ -84,29 +73,30 @@ pub(super) fn setup_paging(boot_info: &multiboot2::BootInformation) { .add_zone(kernel_virt2phys(kernel_reserved_end()).as_usize(), 0x16000) .unwrap(); - // Add zones to the page allocator - let memory_map_tag = boot_info - .memory_map_tag() - .expect("Memory map not available"); - for region in memory_map_tag.memory_areas() { - use multiboot2::MemoryAreaType; - let typ = MemoryAreaType::from(region.typ()); - if typ == MemoryAreaType::Available { - // if page_allocator - // .add_zone( - // region.start_address() as _, - // memory_addr::align_down_4k(region.size() as _), - // ) - // .is_err() - // { - // crate::println!("Failed to add some memory zones"); - // } - } - } + // // Add zones to the page allocator + // let memory_map_tag = boot_info + // .memory_map_tag() + // .expect("Memory map not available"); + // for region in memory_map_tag.memory_areas() { + // use multiboot2::MemoryAreaType; + // let typ = MemoryAreaType::from(region.typ()); + // if typ == MemoryAreaType::Available { + // // if page_allocator + // // .add_zone( + // // region.start_address() as _, + // // memory_addr::align_down_4k(region.size() as _), + // // ) + // // .is_err() + // // { + // // crate::println!("Failed to add some memory zones"); + // // } + // } + // } // TODO: Free boot info and bootstrap code // TEST + crate::println!("Total memory: {}", page_allocator.total_memory()); use crate::memory::MappingFlags; use crate::memory::{AddressSpaceTrait, PageSizeTrait}; let test = 0xc0801000 as *mut u32; @@ -120,14 +110,19 @@ pub(super) fn setup_paging(boot_info: &multiboot2::BootInformation) { .unwrap() .as_mut_ptr_of(); crate::println!("Mapped!"); + crate::println!("Allocated memory: {}", page_allocator.allocated_memory()); unsafe { *test = 42; }; crate::println!("Wrote!"); crate::println!("Testing page mapping: {}", unsafe { *test }); kernel_address_space - .unmap_free(VirtAddr::from_mut_ptr_of(test), 4096) + .unmap_free(VirtAddr::from_mut_ptr_of(test), 4096, &page_allocator) .unwrap(); + crate::println!( + "Allocated memory after freeing: {}", + page_allocator.allocated_memory() + ); crate::println!("Testing page unmapping (You should see a page fault):"); crate::println!("Huh? {}", unsafe { *test }); } diff --git a/src/arch/x86/x32/bootstrap.S b/src/arch/x86/x32/bootstrap.S index 4726aee..c5cd2cc 100644 --- a/src/arch/x86/x32/bootstrap.S +++ b/src/arch/x86/x32/bootstrap.S @@ -68,7 +68,8 @@ kernel_top_level_page_table: .fill 1024, 4, 0 kernel_page_table_bootstrap: .fill 1024, 4, 0 -kernel_page_table1_higher_half: +kernel_page_tables_higher_half: + .fill 1024, 4, 0 .fill 1024, 4, 0 .global kernel_tmp_page_entry_address @@ -77,10 +78,10 @@ kernel_tmp_page_entry_address: .section .stack, "aw" bootstrap_stack: - .skip 16384 + .skip 0x4000 bootstrap_stack_top: tss_stack: - .skip 16384 + .skip 0x4000 tss_stack_top: .section .bootstrap, "ax" @@ -102,6 +103,7 @@ mmap: jb mmap ret +.extern kernel_bootstrap_end .extern kernel_start .extern data_start .extern kernel_end @@ -147,11 +149,35 @@ after_gdt: or $0b00000011, %eax mov %eax, kernel_top_level_page_table - KERNEL_OFFSET - # Map first page table (4MB) in the higher half of the address space - mov $kernel_page_table1_higher_half - KERNEL_OFFSET, %eax + # Map some more of lower pages + mov $0x400000, %esi + mov $kernel_top_level_page_table - KERNEL_OFFSET + 4, %edi + +map_lower.loop: + mov %esi, %eax + or $0b10000011, %eax + mov %eax, (%edi) + + add $0x400000, %esi + add $4, %edi + cmp $0x800000, %esi + ja map_lower.loop + + # Map first 2 page tables into the higher half of the address space + mov $kernel_page_tables_higher_half - KERNEL_OFFSET, %eax or $0b00000011, %eax - mov %eax, kernel_top_level_page_table - KERNEL_OFFSET + KERNEL_OFFSET / 0x400000 * 4 + mov $kernel_top_level_page_table - KERNEL_OFFSET + KERNEL_OFFSET / 0x400000 * 4, %ebx + mov %eax, (%ebx) + add $0x1000, %eax + add $4, %ebx + mov %eax, (%ebx) + + # Check if bootstrap fits + mov $0x400000, %eax + cmp $kernel_bootstrap_end, %eax + jb kernelBootstrapTooBig + mov $0, %esi # Start address mov $0x100000, %eax # End address mov $kernel_page_table_bootstrap - KERNEL_OFFSET, %edi # Page table address @@ -162,7 +188,7 @@ after_gdt: call mmap # Check if kernel fits - mov $KERNEL_OFFSET + 0x400000, %eax + mov $KERNEL_OFFSET + 0x800000, %eax cmp $kernel_reserved_end, %eax jb kernelTooBig @@ -171,7 +197,7 @@ after_gdt: # Compute offset into page table mov $kernel_start - KERNEL_OFFSET, %edi shr $10, %edi - add $kernel_page_table1_higher_half - KERNEL_OFFSET, %edi # Add to the page table address + add $kernel_page_tables_higher_half - KERNEL_OFFSET, %edi # Add to the page table address mov $0b100000001, %ebx # Flags call mmap mov $kernel_end - KERNEL_OFFSET, %eax # End address @@ -181,9 +207,14 @@ after_gdt: # Setup tmp page mov $kernel_tmp_page_address - KERNEL_OFFSET, %edi shr $10, %edi - add $kernel_page_table1_higher_half, %edi # Add to the page table address + add $kernel_page_tables_higher_half, %edi # Add to the page table address mov %edi, kernel_tmp_page_entry_address - KERNEL_OFFSET + # Enable PSE + mov %cr4, %eax + or $0x10, %eax + mov %eax, %cr4 + # Enable paging mov $kernel_top_level_page_table - KERNEL_OFFSET, %eax mov %eax, %cr3 @@ -217,5 +248,32 @@ kernelTooBig: movw $'i' | 0x0400, 0xb8018 movw $'g' | 0x0400, 0xb801a movw $'!' | 0x0400, 0xb801c + jmp kernelTooBig.loop +kernelBootstrapTooBig: + movw $'K' | 0x0400, 0xb8000 + movw $'e' | 0x0400, 0xb8002 + movw $'r' | 0x0400, 0xb8004 + movw $'n' | 0x0400, 0xb8006 + movw $'e' | 0x0400, 0xb8008 + movw $'l' | 0x0400, 0xb800a + movw $' ' | 0x0400, 0xb800c + movw $'b' | 0x0400, 0xb800e + movw $'o' | 0x0400, 0xb8010 + movw $'o' | 0x0400, 0xb8012 + movw $'t' | 0x0400, 0xb8014 + movw $'s' | 0x0400, 0xb8016 + movw $'t' | 0x0400, 0xb8018 + movw $'r' | 0x0400, 0xb801a + movw $'a' | 0x0400, 0xb801c + movw $'p' | 0x0400, 0xb801e + movw $' ' | 0x0400, 0xb8020 + movw $'t' | 0x0400, 0xb8022 + movw $'o' | 0x0400, 0xb8024 + movw $'o' | 0x0400, 0xb8026 + movw $' ' | 0x0400, 0xb8028 + movw $'b' | 0x0400, 0xb802a + movw $'i' | 0x0400, 0xb802c + movw $'g' | 0x0400, 0xb802e + movw $'!' | 0x0400, 0xb8030 kernelTooBig.loop: jmp kernelTooBig.loop diff --git a/src/arch/x86/x32/linker.ld b/src/arch/x86/x32/linker.ld index 15f1dd7..6e2a95c 100644 --- a/src/arch/x86/x32/linker.ld +++ b/src/arch/x86/x32/linker.ld @@ -11,6 +11,8 @@ SECTIONS { *(.bootstrap) } + kernel_bootstrap_end = .; + . += KERNEL_OFFSET; /* Read-only code */ @@ -23,6 +25,7 @@ SECTIONS { /* Read-only data, page aligned to allow use of the no-execute feature */ .rodata ALIGN (4K) : AT (ADDR (.rodata) - KERNEL_OFFSET) { *(.rodata .rodata.*) + *(.got .got.*) } /* Read-write data, page aligned for the .padata section */ @@ -40,6 +43,5 @@ SECTIONS { kernel_tmp_page_address = ALIGN(4K); /* Add a symbol that indicates the end address of the space reserved for kernel. */ - kernel_reserved_end = kernel_tmp_page_address + 4096; - + kernel_reserved_end = kernel_tmp_page_address + 4K; } diff --git a/src/arch/x86/x32/target.json b/src/arch/x86/x32/target.json index b856ce0..8d031fe 100644 --- a/src/arch/x86/x32/target.json +++ b/src/arch/x86/x32/target.json @@ -18,5 +18,5 @@ }, "panic-strategy": "abort", "disable-redzone": true, - "features": "-mmx,-sse,+soft-float" + "features": "-mmx,-sse" } diff --git a/src/memory/address_space.rs b/src/memory/address_space.rs index b09356d..3ae32fb 100644 --- a/src/memory/address_space.rs +++ b/src/memory/address_space.rs @@ -70,5 +70,10 @@ pub trait AddressSpaceTrait { ) -> MappingResult; /// Unmap a region of memory from the address space and mark it as free - fn unmap_free(&self, vaddr: VirtAddr, size: usize) -> MappingResult<()>; + fn unmap_free( + &self, + vaddr: VirtAddr, + size: usize, + alloc: &impl PageAllocatorTrait, + ) -> MappingResult<()>; } diff --git a/src/memory/address_space/nested_page_table.rs b/src/memory/address_space/nested_page_table.rs index c6074a7..2760ae4 100644 --- a/src/memory/address_space/nested_page_table.rs +++ b/src/memory/address_space/nested_page_table.rs @@ -14,6 +14,17 @@ pub enum PageTableEntry { impl PageTableEntry { const NULL: Self = Self::Page(PhysAddr::from_usize(0), MappingFlags::empty()); + + /// Returns true if the page entry is mapped to somewhere (maybe even swapped), false + /// if it is free + pub fn mapped(&self) -> bool { + match self { + PageTableEntry::Level(_) => true, + PageTableEntry::Page(phys_addr, mapping_flags) => { + mapping_flags.contains(MappingFlags::PRESENT) || phys_addr.as_usize() != 0 + } + } + } } /// A single level of a nested page table @@ -34,6 +45,14 @@ pub trait NestedPageTableLevel: Clone + Sized { /// Allocate a new page table level, that's gonna come after this one fn new_sublevel(&self, alloc: &impl PageAllocatorTrait) -> Option; + /// Free a page table level, that was a part of this page table. All it's sublevels + /// were already freed + fn free_sublevel( + &self, + sublevel: Self, + alloc: &impl PageAllocatorTrait, + ) -> MappingResult<()>; + /// Set an entry in this level. vaddr might not be aligned if entry /// is [`PageTableEntry::Level`] fn set_entry(&self, vaddr: VirtAddr, entry: PageTableEntry) -> MappingResult<()>; @@ -71,7 +90,12 @@ pub trait NestedPageTableLevel: Clone + Sized { } } - fn unmap_free(&self, vaddr: VirtAddr, size: usize) -> MappingResult<()> { + fn unmap_free( + &self, + vaddr: VirtAddr, + size: usize, + alloc: &impl PageAllocatorTrait, + ) -> MappingResult<()> { let region_size = self.region_size(); let start = vaddr.align_down(region_size); let end = (vaddr + size).align_up(region_size); @@ -82,9 +106,23 @@ pub trait NestedPageTableLevel: Clone + Sized { match entry { PageTableEntry::Level(level) => { if page < vaddr { - level.unmap_free(vaddr, page + region_size - vaddr)?; + level.unmap_free(vaddr, page + region_size - vaddr, alloc)?; } else { - level.unmap_free(page, vaddr + size - page)?; + level.unmap_free(page, vaddr + size - page, alloc)?; + } + let mut mapped = false; + for entry_addr in (page.as_usize()..page.as_usize() + region_size) + .step_by(level.region_size()) + { + let entry = level.get_entry(entry_addr.into())?; + if entry.mapped() { + mapped = true; + break; + } + } + if !mapped { + self.free_sublevel(level, alloc)?; + self.set_entry(page, PageTableEntry::NULL)?; } } PageTableEntry::Page(paddr, flags) => { @@ -96,10 +134,13 @@ pub trait NestedPageTableLevel: Clone + Sized { } else { match entry { PageTableEntry::Level(level) => { - level.unmap_free(page, region_size)?; + level.unmap_free(page, region_size, alloc)?; + self.free_sublevel(level, alloc)?; + self.set_entry(page, PageTableEntry::NULL)?; } - PageTableEntry::Page(_, flags) => { + PageTableEntry::Page(paddr, flags) => { if flags.contains(MappingFlags::PRESENT) { + alloc.free(paddr, self.page_size().unwrap()); self.set_entry(page, PageTableEntry::NULL)?; } } @@ -122,27 +163,6 @@ pub trait NestedPageTable { /// Get top level page table for this address space fn top_level(&self) -> Self::Level; - // /// Unmap a single (possibly large/huge) page or a whole page table of the same size. - // /// As a layer should take [`AddressSpaceTrait::top_layer`] - // /// DOES NOT FREE - // fn unmap_page( - // &self, - // layer: Self::Layer, - // vaddr: VirtAddr, - // page_size: PageSize, - // alloc: &impl PageAllocatorTrait, - // ) -> MappingResult<()> { - // if !vaddr.is_aligned(page_size.clone()) { - // return Err(MappingError::UnalignedVirtualAddress(vaddr)); - // } - - // if Self::page_size(&layer) == page_size.clone().into() { - // Self::set_entry(layer, vaddr, page_size) - // } else { - // self.unmap_page(Self::next_layer(layer, vaddr, None)?, vaddr, page_size) - // } - // } - /// Implementation of [`super::AddressSpaceTrait::map_alloc`] fn map_alloc( &self, @@ -165,7 +185,12 @@ pub trait NestedPageTable { } /// Implementation of [`super::AddressSpaceTrait::unmap_free`] - fn unmap_free(&self, vaddr: VirtAddr, size: usize) -> MappingResult<()> { - self.top_level().unmap_free(vaddr, size) + fn unmap_free( + &self, + vaddr: VirtAddr, + size: usize, + alloc: &impl PageAllocatorTrait, + ) -> MappingResult<()> { + self.top_level().unmap_free(vaddr, size, alloc) } } diff --git a/src/memory/page_allocator/zoned_buddy.rs b/src/memory/page_allocator/zoned_buddy.rs index 5755edc..2dfac3b 100644 --- a/src/memory/page_allocator/zoned_buddy.rs +++ b/src/memory/page_allocator/zoned_buddy.rs @@ -1,4 +1,5 @@ use core::alloc::AllocError; +use core::sync::atomic::AtomicUsize; use super::{PageAllocatorTrait, PageSizeTrait, PhysAddr}; use crate::sync::RwLock; @@ -13,6 +14,7 @@ impl lock_free_buddy_allocator::cpuid::Cpu for CpuId { struct Zone { start: usize, size: usize, + allocated: AtomicUsize, buddy: lock_free_buddy_allocator::buddy_alloc::BuddyAlloc< 'static, PAGE_SIZE, @@ -56,6 +58,7 @@ impl ZonedBuddy { self.zones.write().push(Zone { start, size, + allocated: AtomicUsize::new(0), buddy: lock_free_buddy_allocator::buddy_alloc::BuddyAlloc::new( start, size / BLOCK_SIZE, @@ -67,25 +70,45 @@ impl ZonedBuddy { Ok(()) } - fn alloc(&self, size: usize) -> Option { + pub fn alloc(&self, size: usize) -> Option { let blocks = size / BLOCK_SIZE; for zone in self.zones.read().iter() { if let Some(addr) = zone.buddy.alloc(blocks) { + zone.allocated + .fetch_add(size, core::sync::atomic::Ordering::SeqCst); return Some(PhysAddr::from_usize(addr)); } } None } - fn free(&self, allocation: PhysAddr, size: usize) { + pub fn free(&self, allocation: PhysAddr, size: usize) { let start = allocation.as_usize(); let blocks = size / BLOCK_SIZE; for zone in self.zones.read().iter() { - if start > zone.start && start + size < zone.start + zone.size { + if start >= zone.start && start + size <= zone.start + zone.size { zone.buddy.free(allocation.as_usize(), blocks); + zone.allocated + .fetch_sub(size, core::sync::atomic::Ordering::SeqCst); } } } + + /// Returns total amount of memory managed by the allocator. + /// To get free space, use [`Self::total_memory`] - [`Self::allocated_memory`] + pub fn total_memory(&self) -> usize { + self.zones + .read() + .iter() + .fold(0, |acc, zone| acc + zone.size) + } + + /// Returns the amount of allocated memory + pub fn allocated_memory(&self) -> usize { + self.zones.read().iter().fold(0, |acc, zone| { + acc + zone.allocated.load(core::sync::atomic::Ordering::SeqCst) + }) + } } impl Default for ZonedBuddy { From da0b1dba6b1a923916cf94e44c65fc1e865b6b22 Mon Sep 17 00:00:00 2001 From: InfiniteCoder Date: Thu, 20 Feb 2025 19:48:04 +0300 Subject: [PATCH 9/9] [PAGING] properly add available memory --- bx_enh_dbg.ini | 26 --------------- rust-toolchain.toml | 2 +- src/arch/x86/mod.rs | 1 + src/arch/x86/paging/address_space.rs | 2 +- src/arch/x86/paging/mod.rs | 47 ++++++++++++++-------------- 5 files changed, 27 insertions(+), 51 deletions(-) delete mode 100644 bx_enh_dbg.ini diff --git a/bx_enh_dbg.ini b/bx_enh_dbg.ini deleted file mode 100644 index 04d7997..0000000 --- a/bx_enh_dbg.ini +++ /dev/null @@ -1,26 +0,0 @@ -# bx_enh_dbg_ini -SeeReg[0] = TRUE -SeeReg[1] = TRUE -SeeReg[2] = TRUE -SeeReg[3] = TRUE -SeeReg[4] = FALSE -SeeReg[5] = FALSE -SeeReg[6] = FALSE -SeeReg[7] = FALSE -SingleCPU = FALSE -ShowIOWindows = TRUE -ShowButtons = TRUE -SeeRegColors = TRUE -ignoreNxtT = TRUE -ignSSDisasm = TRUE -UprCase = 0 -DumpInAsciiMode = 3 -isLittleEndian = TRUE -DefaultAsmLines = 512 -DumpWSIndex = 0 -DockOrder = 0x123 -ListWidthPix[0] = 180 -ListWidthPix[1] = 245 -ListWidthPix[2] = 280 -MainWindow = 0, 0, 709, 500 -FontName = Normal diff --git a/rust-toolchain.toml b/rust-toolchain.toml index e29af69..20713ce 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -1,3 +1,3 @@ [toolchain] -channel = "nightly-2025-02-17" +channel = "nightly" components = ["rust-src", "rust-analyzer"] diff --git a/src/arch/x86/mod.rs b/src/arch/x86/mod.rs index ed5c1b7..756bb1c 100644 --- a/src/arch/x86/mod.rs +++ b/src/arch/x86/mod.rs @@ -20,6 +20,7 @@ mod allocator { const SIZE: usize = 0x1000; static mut ARENA: [u8; SIZE] = [0; SIZE]; + // TODO: Use system allocator on OOM #[global_allocator] static ALLOCATOR: talc::Talck, talc::ClaimOnOom> = talc::Talc::new(unsafe { // if we're in a hosted environment, the Rust runtime may allocate before diff --git a/src/arch/x86/paging/address_space.rs b/src/arch/x86/paging/address_space.rs index 3f6b971..d8caca5 100644 --- a/src/arch/x86/paging/address_space.rs +++ b/src/arch/x86/paging/address_space.rs @@ -41,7 +41,7 @@ impl PageTableLevel { let page_table = tmp_page::map::(self.0); let mask = super::PAGE_TABLE_ENTRIES - 1; - let index = vaddr.as_usize() >> self.1 & mask; + let index = (vaddr.as_usize() >> self.1) & mask; crate::sync::MappedLockGuard::map(page_table, |page_table| &mut page_table[index]) } } diff --git a/src/arch/x86/paging/mod.rs b/src/arch/x86/paging/mod.rs index 6e784ce..2db1115 100644 --- a/src/arch/x86/paging/mod.rs +++ b/src/arch/x86/paging/mod.rs @@ -25,6 +25,7 @@ extern "C" { linker_symbol! { kernel_offset(KERNEL_OFFSET_SYMBOL) => "KERNEL_OFFSET"; + kernel_end(KERNEL_END) => "kernel_end"; kernel_reserved_end(KERNEL_RESERVED_END) => "kernel_reserved_end"; } @@ -68,30 +69,30 @@ pub(super) fn setup_paging(boot_info: &multiboot2::BootInformation) { let kernel_address_space = VirtAddr::from_usize(&raw const KERNEL_TOP_LEVEL_PAGE_TABLE as _); let kernel_address_space = AddressSpace::from_paddr(kernel_virt2phys(kernel_address_space)); - // TODO: Properly add zones and avoid adding kernel - page_allocator - .add_zone(kernel_virt2phys(kernel_reserved_end()).as_usize(), 0x16000) - .unwrap(); + // Add zones to the page allocator + let memory_map_tag = boot_info + .memory_map_tag() + .expect("Memory map not available"); + for region in memory_map_tag.memory_areas() { + use multiboot2::MemoryAreaType; + let typ = MemoryAreaType::from(region.typ()); + if typ == MemoryAreaType::Available { + let kernel_physical_end = kernel_virt2phys(kernel_end()); + let start = PhysAddr::from_usize(region.start_address() as _); + let start = start.max(kernel_physical_end).align_up_4k(); + let end = PhysAddr::from_usize(region.end_address() as _); + if end <= start { + continue; + } - // // Add zones to the page allocator - // let memory_map_tag = boot_info - // .memory_map_tag() - // .expect("Memory map not available"); - // for region in memory_map_tag.memory_areas() { - // use multiboot2::MemoryAreaType; - // let typ = MemoryAreaType::from(region.typ()); - // if typ == MemoryAreaType::Available { - // // if page_allocator - // // .add_zone( - // // region.start_address() as _, - // // memory_addr::align_down_4k(region.size() as _), - // // ) - // // .is_err() - // // { - // // crate::println!("Failed to add some memory zones"); - // // } - // } - // } + if page_allocator + .add_zone(start.as_usize(), memory_addr::align_down_4k(end - start)) + .is_err() + { + crate::println!("Failed to add some memory zones"); + } + } + } // TODO: Free boot info and bootstrap code