Commit Diff


commit - 2935cd12356e32581616d26dedee999d49dd98ba
commit + a9f11a50ab3f672b1b1a7799b89ef63e2eff0e26
blob - /dev/null
blob + ca61b3ef15323214ebc6f289099fccefd86a0c2c (mode 644)
--- /dev/null
+++ src/alloc.rs
@@ -0,0 +1,138 @@
+// vim: set tw=79 cc=80 ts=4 sw=4 sts=4 et :
+//
+// Copyright (c) 2025-2026 Murilo Ijanc' <murilo@ijanc.org>
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+//
+
+use core::alloc::{GlobalAlloc, Layout};
+use core::ptr::null_mut;
+use core::sync::atomic::{AtomicUsize, Ordering};
+
+use crate::serial;
+
+// Kernel heap: 16 MiB at a fixed virtual address
+pub const HEAP_START: usize = 0xFFFF_FFFF_C000_0000;
+pub const HEAP_SIZE: usize = 16 * 1024 * 1024; // 16 MiB
+pub const HEAP_END: usize = HEAP_START + HEAP_SIZE;
+
+/// Bump allocator: fast, simple, no reuse until all freed.
+pub struct BumpAllocator {
+    next: AtomicUsize,
+    count: AtomicUsize,
+}
+
+impl BumpAllocator {
+    pub const fn new() -> Self {
+        BumpAllocator {
+            next: AtomicUsize::new(HEAP_START),
+            count: AtomicUsize::new(0),
+        }
+    }
+}
+
+unsafe impl GlobalAlloc for BumpAllocator {
+    unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
+        loop {
+            let current = self.next.load(Ordering::Relaxed);
+            let align = layout.align();
+            let aligned =
+                (current + align - 1) & !(align - 1);
+            let end = aligned + layout.size();
+
+            if end > HEAP_END {
+                return null_mut();
+            }
+
+            // CAS loop for lock-free bump allocation
+            if self
+                .next
+                .compare_exchange_weak(
+                    current,
+                    end,
+                    Ordering::SeqCst,
+                    Ordering::Relaxed,
+                )
+                .is_ok()
+            {
+                self.count.fetch_add(1, Ordering::Relaxed);
+                return aligned as *mut u8;
+            }
+        }
+    }
+
+    unsafe fn dealloc(&self, _ptr: *mut u8, _layout: Layout) {
+        let prev = self.count.fetch_sub(1, Ordering::Relaxed);
+        if prev == 1 {
+            // All allocations freed, reset the bump pointer
+            self.next.store(HEAP_START, Ordering::Relaxed);
+        }
+    }
+}
+
+#[global_allocator]
+static ALLOCATOR: BumpAllocator = BumpAllocator::new();
+
+/// Map the kernel heap pages using the Limine memory map.
+/// Must be called before any heap allocation.
+pub fn init_heap(
+    hhdm_offset: u64,
+    page_alloc: &mut dyn FnMut() -> Option<u64>,
+) {
+    use crate::paging;
+
+    let pages_needed = HEAP_SIZE / 4096;
+    let mut mapped = 0;
+
+    for i in 0..pages_needed {
+        let vaddr = (HEAP_START + i * 4096) as u64;
+        let paddr = match page_alloc() {
+            Some(p) => p,
+            None => {
+                serial::print("heap: out of pages at ");
+                print_num(mapped);
+                serial::print("\n");
+                break;
+            }
+        };
+
+        // PTE flags: present + writable
+        paging::map_page(vaddr, paddr, 0x3, page_alloc);
+        mapped += 1;
+    }
+
+    serial::print("heap: mapped ");
+    print_num(mapped);
+    serial::print(" pages (");
+    print_num(mapped * 4); // KiB
+    serial::print(" KiB)\n");
+}
+
+fn print_num(val: usize) {
+    if val == 0 {
+        serial::putc(b'0');
+        return;
+    }
+    let mut buf = [0u8; 20];
+    let mut n = val;
+    let mut i = 0;
+    while n > 0 {
+        buf[i] = b'0' + (n % 10) as u8;
+        n /= 10;
+        i += 1;
+    }
+    while i > 0 {
+        i -= 1;
+        serial::putc(buf[i]);
+    }
+}
blob - /dev/null
blob + 0a4e0f698056bc68fc82d17fd3ee9eb6a7b48122 (mode 644)
--- /dev/null
+++ src/paging.rs
@@ -0,0 +1,329 @@
+// vim: set tw=79 cc=80 ts=4 sw=4 sts=4 et :
+//
+// Copyright (c) 2025-2026 Murilo Ijanc' <murilo@ijanc.org>
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+//
+
+use core::arch::asm;
+
+use crate::serial;
+
+// Page table entry flags
+const PTE_PRESENT: u64 = 1 << 0;
+const PTE_WRITABLE: u64 = 1 << 1;
+const PTE_ADDR_MASK: u64 = 0x000F_FFFF_FFFF_F000;
+
+// HHDM offset provided by Limine (set during init)
+static mut HHDM_OFFSET: u64 = 0;
+
+/// Initialize paging module with the HHDM offset from Limine.
+pub fn init(hhdm_offset: u64) {
+    unsafe {
+        HHDM_OFFSET = hhdm_offset;
+    }
+}
+
+/// Read the CR3 register (physical address of PML4).
+fn read_cr3() -> u64 {
+    let cr3: u64;
+    unsafe {
+        asm!("mov {}, cr3", out(reg) cr3, options(nostack));
+    }
+    cr3 & PTE_ADDR_MASK
+}
+
+/// Convert a physical address to a virtual address via HHDM.
+fn phys_to_virt(phys: u64) -> *mut u64 {
+    unsafe { (phys + HHDM_OFFSET) as *mut u64 }
+}
+
+/// Walk the 4-level page table and resolve a virtual address
+/// to its physical address. Returns None if unmapped.
+pub fn virt_to_phys(vaddr: u64) -> Option<u64> {
+    let pml4_index = (vaddr >> 39) & 0x1FF;
+    let pdpt_index = (vaddr >> 30) & 0x1FF;
+    let pd_index = (vaddr >> 21) & 0x1FF;
+    let pt_index = (vaddr >> 12) & 0x1FF;
+    let offset = vaddr & 0xFFF;
+
+    let pml4 = phys_to_virt(read_cr3());
+
+    // PML4 → PDPT
+    let pml4e = unsafe { *pml4.add(pml4_index as usize) };
+    if pml4e & PTE_PRESENT == 0 {
+        return None;
+    }
+
+    // PDPT → PD
+    let pdpt = phys_to_virt(pml4e & PTE_ADDR_MASK);
+    let pdpte = unsafe { *pdpt.add(pdpt_index as usize) };
+    if pdpte & PTE_PRESENT == 0 {
+        return None;
+    }
+    // 1 GiB huge page
+    if pdpte & (1 << 7) != 0 {
+        let phys_base = pdpte & 0x000F_FFFF_C000_0000;
+        return Some(phys_base + (vaddr & 0x3FFF_FFFF));
+    }
+
+    // PD → PT
+    let pd = phys_to_virt(pdpte & PTE_ADDR_MASK);
+    let pde = unsafe { *pd.add(pd_index as usize) };
+    if pde & PTE_PRESENT == 0 {
+        return None;
+    }
+    // 2 MiB huge page
+    if pde & (1 << 7) != 0 {
+        let phys_base = pde & 0x000F_FFFF_FFE0_0000;
+        return Some(phys_base + (vaddr & 0x1FFFFF));
+    }
+
+    // PT → Page
+    let pt = phys_to_virt(pde & PTE_ADDR_MASK);
+    let pte = unsafe { *pt.add(pt_index as usize) };
+    if pte & PTE_PRESENT == 0 {
+        return None;
+    }
+
+    Some((pte & PTE_ADDR_MASK) + offset)
+}
+
+/// Map a virtual page to a physical page.
+/// Allocates intermediate tables from `page_alloc` if needed.
+pub fn map_page(
+    vaddr: u64,
+    paddr: u64,
+    flags: u64,
+    page_alloc: &mut dyn FnMut() -> Option<u64>,
+) -> bool {
+    let pml4_index = (vaddr >> 39) & 0x1FF;
+    let pdpt_index = (vaddr >> 30) & 0x1FF;
+    let pd_index = (vaddr >> 21) & 0x1FF;
+    let pt_index = (vaddr >> 12) & 0x1FF;
+
+    let pml4 = phys_to_virt(read_cr3());
+
+    // Ensure PDPT exists
+    let pdpt = ensure_table(pml4, pml4_index as usize, flags, page_alloc);
+    let pdpt = match pdpt {
+        Some(p) => p,
+        None => return false,
+    };
+
+    // Ensure PD exists
+    let pd = ensure_table(pdpt, pdpt_index as usize, flags, page_alloc);
+    let pd = match pd {
+        Some(p) => p,
+        None => return false,
+    };
+
+    // Ensure PT exists
+    let pt = ensure_table(pd, pd_index as usize, flags, page_alloc);
+    let pt = match pt {
+        Some(p) => p,
+        None => return false,
+    };
+
+    // Set the PT entry
+    unsafe {
+        pt.add(pt_index as usize)
+            .write_volatile((paddr & PTE_ADDR_MASK) | flags);
+    }
+
+    // Flush TLB for this page
+    unsafe {
+        asm!("invlpg [{}]", in(reg) vaddr, options(nostack));
+    }
+
+    true
+}
+
+/// Unmap a virtual page (clear the present bit in the PT entry).
+pub fn unmap_page(vaddr: u64) {
+    let pml4_index = (vaddr >> 39) & 0x1FF;
+    let pdpt_index = (vaddr >> 30) & 0x1FF;
+    let pd_index = (vaddr >> 21) & 0x1FF;
+    let pt_index = (vaddr >> 12) & 0x1FF;
+
+    let pml4 = phys_to_virt(read_cr3());
+
+    let pml4e = unsafe { *pml4.add(pml4_index as usize) };
+    if pml4e & PTE_PRESENT == 0 {
+        return;
+    }
+
+    let pdpt = phys_to_virt(pml4e & PTE_ADDR_MASK);
+    let pdpte = unsafe { *pdpt.add(pdpt_index as usize) };
+    if pdpte & PTE_PRESENT == 0 {
+        return;
+    }
+
+    let pd = phys_to_virt(pdpte & PTE_ADDR_MASK);
+    let pde = unsafe { *pd.add(pd_index as usize) };
+    if pde & PTE_PRESENT == 0 {
+        return;
+    }
+
+    let pt = phys_to_virt(pde & PTE_ADDR_MASK);
+
+    // Clear the entry
+    unsafe {
+        pt.add(pt_index as usize).write_volatile(0);
+    }
+
+    // Flush TLB
+    unsafe {
+        asm!("invlpg [{}]", in(reg) vaddr, options(nostack));
+    }
+}
+
+/// Ensure a next-level table exists at `table[index]`.
+/// If the entry is not present, allocate a new page, zero it,
+/// and install it. `flags` are propagated to intermediate
+/// entries (e.g. PTE_USER must be set at all levels).
+fn ensure_table(
+    table: *mut u64,
+    index: usize,
+    flags: u64,
+    page_alloc: &mut dyn FnMut() -> Option<u64>,
+) -> Option<*mut u64> {
+    let entry = unsafe { *table.add(index) };
+    let intermediate_flags =
+        PTE_PRESENT | PTE_WRITABLE | (flags & (1 << 2));
+
+    if entry & PTE_PRESENT != 0 {
+        // Add flags (e.g. User bit) if not already set
+        if entry & intermediate_flags != intermediate_flags {
+            unsafe {
+                table
+                    .add(index)
+                    .write_volatile(entry | intermediate_flags);
+            }
+        }
+        return Some(phys_to_virt(entry & PTE_ADDR_MASK));
+    }
+
+    // Allocate a new page for the table
+    let new_page_phys = page_alloc()?;
+    let new_page_virt = phys_to_virt(new_page_phys);
+
+    // Zero the new table
+    unsafe {
+        core::ptr::write_bytes(
+            new_page_virt as *mut u8, 0, 4096,
+        );
+    }
+
+    // Install in parent table
+    unsafe {
+        table.add(index).write_volatile(
+            new_page_phys | intermediate_flags,
+        );
+    }
+
+    Some(new_page_virt)
+}
+
+/// Print paging demo: resolve kernel address, map a new page,
+/// write to it, unmap it, and trigger a page fault.
+pub fn demo(usable_page: u64) {
+    // 1. Resolve a known virtual address (our own kmain)
+    let kmain_vaddr = crate::kmain as *const () as u64;
+    serial::print("virt_to_phys(kmain) = ");
+    match virt_to_phys(kmain_vaddr) {
+        Some(phys) => {
+            serial::print("0x");
+            print_hex(phys);
+            serial::print("\n");
+        }
+        None => serial::print("unmapped!\n"),
+    }
+
+    // 2. Map a test page at a known virtual address
+    let test_vaddr: u64 = 0xFFFF_8000_1000_0000;
+    let test_paddr = usable_page;
+
+    serial::print("mapping 0x");
+    print_hex(test_vaddr);
+    serial::print(" -> 0x");
+    print_hex(test_paddr);
+    serial::print("\n");
+
+    // Simple bump allocator for intermediate page tables
+    static mut ALLOC_NEXT: u64 = 0;
+    unsafe {
+        ALLOC_NEXT = usable_page + 0x1000;
+    }
+
+    let ok = map_page(
+        test_vaddr,
+        test_paddr,
+        PTE_PRESENT | PTE_WRITABLE,
+        &mut || unsafe {
+            let page = ALLOC_NEXT;
+            ALLOC_NEXT += 0x1000;
+            Some(page)
+        },
+    );
+
+    if !ok {
+        serial::print("map_page failed!\n");
+        return;
+    }
+
+    // 3. Write and read through the new mapping
+    let ptr = test_vaddr as *mut u64;
+    unsafe {
+        ptr.write_volatile(0xDEAD_BEEF_CAFE_BABE);
+        let val = ptr.read_volatile();
+        serial::print("wrote and read: 0x");
+        print_hex(val);
+        serial::print("\n");
+    }
+
+    // 4. Verify via virt_to_phys
+    match virt_to_phys(test_vaddr) {
+        Some(phys) => {
+            serial::print("virt_to_phys(test) = 0x");
+            print_hex(phys);
+            serial::print("\n");
+        }
+        None => serial::print("virt_to_phys failed!\n"),
+    }
+
+    // 5. Unmap and trigger page fault
+    serial::print("unmapping test page...\n");
+    unmap_page(test_vaddr);
+
+    serial::print(
+        "accessing unmapped page (expect #PF)...\n",
+    );
+    unsafe {
+        // This will cause a page fault — our IDT handler
+        // will catch it and print the faulting address.
+        core::ptr::read_volatile(test_vaddr as *const u8);
+    }
+}
+
+fn print_hex(val: u64) {
+    let hex = b"0123456789abcdef";
+    let mut started = false;
+    for i in (0..16).rev() {
+        let nibble = ((val >> (i * 4)) & 0xF) as usize;
+        if nibble != 0 || started || i == 0 {
+            serial::putc(hex[nibble]);
+            started = true;
+        }
+    }
+}
blob - /dev/null
blob + 706bb5cf2dd536c6cd8890d674baabce8697f94d (mode 644)
--- /dev/null
+++ src/pmm.rs
@@ -0,0 +1,124 @@
+// vim: set tw=79 cc=80 ts=4 sw=4 sts=4 et :
+//
+// Copyright (c) 2025-2026 Murilo Ijanc' <murilo@ijanc.org>
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+//
+
+//! Physical Memory Manager — simple bump allocator over the
+//! Limine memory map. Allocates 4 KiB aligned physical pages.
+
+use crate::serial;
+
+const PAGE_SIZE: u64 = 4096;
+
+static mut REGIONS: [Region; 32] = [Region::EMPTY; 32];
+static mut REGION_COUNT: usize = 0;
+
+#[derive(Clone, Copy)]
+struct Region {
+    base: u64,
+    next: u64,
+    end: u64,
+}
+
+impl Region {
+    const EMPTY: Self = Region {
+        base: 0,
+        next: 0,
+        end: 0,
+    };
+}
+
+/// Initialize the PMM from the Limine memory map.
+/// Only considers USABLE regions.
+pub fn init(entries: &[&limine::memmap::Entry]) {
+    let mut count = 0;
+    let mut total_pages: u64 = 0;
+
+    for entry in entries {
+        if entry.type_ != limine::memmap::MEMMAP_USABLE {
+            continue;
+        }
+        if count >= 32 {
+            break;
+        }
+
+        // Align base up to page boundary
+        let base =
+            (entry.base + PAGE_SIZE - 1) & !(PAGE_SIZE - 1);
+        let end = entry.base + entry.length;
+        if base >= end {
+            continue;
+        }
+
+        let pages = (end - base) / PAGE_SIZE;
+        total_pages += pages;
+
+        unsafe {
+            REGIONS[count] = Region {
+                base,
+                next: base,
+                end,
+            };
+        }
+        count += 1;
+    }
+
+    unsafe {
+        REGION_COUNT = count;
+    }
+
+    serial::print("pmm: ");
+    print_num(total_pages as usize);
+    serial::print(" pages (");
+    print_num((total_pages * 4) as usize);
+    serial::print(" KiB) in ");
+    print_num(count);
+    serial::print(" regions\n");
+}
+
+/// Allocate a single 4 KiB physical page.
+/// Returns the physical address or None if exhausted.
+pub fn alloc_page() -> Option<u64> {
+    unsafe {
+        for i in 0..REGION_COUNT {
+            let r = &mut REGIONS[i];
+            if r.next + PAGE_SIZE <= r.end {
+                let page = r.next;
+                r.next += PAGE_SIZE;
+                return Some(page);
+            }
+        }
+    }
+    None
+}
+
+fn print_num(val: usize) {
+    if val == 0 {
+        serial::putc(b'0');
+        return;
+    }
+    let mut buf = [0u8; 20];
+    let mut n = val;
+    let mut i = 0;
+    while n > 0 {
+        buf[i] = b'0' + (n % 10) as u8;
+        n /= 10;
+        i += 1;
+    }
+    while i > 0 {
+        i -= 1;
+        serial::putc(buf[i]);
+    }
+}