From 4611b89a1d714948a32759d48c67085be6c3bd41 Mon Sep 17 00:00:00 2001 From: juls0730 <62722391+juls0730@users.noreply.github.com> Date: Mon, 13 May 2024 00:12:37 -0500 Subject: [PATCH] a lot of stuff, but mainly, a decent allocator --- Makefile | 14 +- README.md | 9 +- scripts/font.py | 2 + src/arch/x86_64/gdt.rs | 144 ++++ src/arch/x86_64/interrupts/apic.rs | 170 ++-- src/arch/x86_64/interrupts/exceptions.rs | 2 +- src/arch/x86_64/interrupts/mod.rs | 21 +- src/arch/x86_64/mod.rs | 1 + src/arch/x86_64/stack_trace.rs | 82 +- src/arch/x86_64/x86_64-unknown-none.json | 2 +- src/drivers/fs/devfs.rs | 1 + src/drivers/fs/fat.rs | 100 --- src/drivers/fs/initramfs/chunk_reader.rs | 142 ++++ src/drivers/fs/initramfs/compressors/mod.rs | 1 - src/drivers/fs/initramfs/mod.rs | 760 +++--------------- src/drivers/fs/initramfs/superblock.rs | 175 ++++ src/drivers/fs/vfs.rs | 184 ++--- src/drivers/serial.rs | 20 +- src/drivers/storage/ide.rs | 5 +- .../fs/initramfs/compressors => libs}/gzip.rs | 2 +- src/libs/math.rs | 89 -- src/libs/mod.rs | 2 +- src/libs/sync/mutex.rs | 26 +- src/main.rs | 100 ++- src/mem/allocator.rs | 332 +++----- src/mem/mod.rs | 116 +-- src/mem/pmm.rs | 42 +- 27 files changed, 1045 insertions(+), 1499 deletions(-) create mode 100644 src/arch/x86_64/gdt.rs create mode 100644 src/drivers/fs/initramfs/chunk_reader.rs delete mode 100644 src/drivers/fs/initramfs/compressors/mod.rs create mode 100644 src/drivers/fs/initramfs/superblock.rs rename src/{drivers/fs/initramfs/compressors => libs}/gzip.rs (99%) delete mode 100644 src/libs/math.rs diff --git a/Makefile b/Makefile index bdf09c9..f14195f 100644 --- a/Makefile +++ b/Makefile @@ -20,6 +20,7 @@ IMAGE_PATH = ${ARTIFACTS_PATH}/${IMAGE_NAME} CARGO_OPTS = --target=src/arch/${ARCH}/${ARCH}-unknown-none.json QEMU_OPTS += -m ${MEMORY} -drive id=hd0,format=raw,file=${IMAGE_PATH} LIMINE_BOOT_VARIATION = X64 +LIMINE_BRANCH = v7.x-binary ifeq (${MODE},release) CARGO_OPTS += --release @@ -54,7 +55,7 @@ ifneq (${UEFI},) endif endif -.PHONY: all check run-scripts prepare-bin-files copy-initramfs-files compile-initramfs copy-iso-files build-iso compile-bootloader compile-binaries ovmf clean run build line-count +.PHONY: all build all: build @@ -78,6 +79,7 @@ copy-initramfs-files: echo "Hello World from Initramfs" > ${INITRAMFS_PATH}/example.txt echo "Second file for testing" > ${INITRAMFS_PATH}/example2.txt mkdir -p ${INITRAMFS_PATH}/firstdir/seconddirbutlonger/ + mkdir ${INITRAMFS_PATH}/mnt/ echo "Nexted file reads!!" > ${INITRAMFS_PATH}/firstdir/seconddirbutlonger/yeah.txt compile-initramfs: copy-initramfs-files @@ -88,10 +90,7 @@ run-scripts: ifeq (${EXPORT_SYMBOLS},true) nm target/${ARCH}-unknown-none/${MODE}/CappuccinOS.elf > scripts/symbols.table @if [ ! -d "scripts/rustc_demangle" ]; then \ - echo "Cloning rustc_demangle.py into scripts/rustc_demangle/..."; \ git clone "https://github.com/juls0730/rustc_demangle.py" "scripts/rustc_demangle"; \ - else \ - echo "Folder scripts/rustc_demangle already exists. Skipping clone."; \ fi python scripts/demangle-symbols.py mv scripts/symbols.table ${INITRAMFS_PATH}/ @@ -100,7 +99,7 @@ endif python scripts/font.py mv scripts/font.psf ${INITRAMFS_PATH}/ - python scripts/initramfs-test.py 100 ${INITRAMFS_PATH}/ + #python scripts/initramfs-test.py 100 ${INITRAMFS_PATH}/ copy-iso-files: # Limine files @@ -154,10 +153,11 @@ endif sudo losetup -d `cat loopback_dev` rm -rf loopback_dev +# TODO: do something better for the directory checking maybe compile-bootloader: @if [ ! -d "limine" ]; then \ echo "Cloning Limine into limine/..."; \ - git clone https://github.com/limine-bootloader/limine.git --branch=v6.x-branch-binary --depth=1; \ + git clone https://github.com/limine-bootloader/limine.git --branch=${LIMINE_BRANCH} --depth=1; \ else \ echo "Folder limine already exists. Skipping clone."; \ fi @@ -175,7 +175,7 @@ ovmf-x86_64: ovmf ovmf-riscv64: ovmf mkdir -p ovmf/ovmf-riscv64 @if [ ! -d "ovmf/ovmf-riscv64/OVMF.fd" ]; then \ - cd ovmf/ovmf-riscv64 && curl -o OVMF.fd https://retrage.github.io/edk2-nightly/bin/RELEASERISCV64_VIRT_CODE.fd && dd if=/dev/zero of=OVMF.fd bs=1 count=0 seek=33554432; \ + cd ovmf/ovmf-riscv64 && curl -o OVMF.fd https://retrage.github.io/edk2-nightly/bin/RELEASERISCV64_VIRT_CODE.fd; \ fi ovmf-aarch64: diff --git a/README.md b/README.md index aaa5d0f..8430d8d 100644 --- a/README.md +++ b/README.md @@ -7,14 +7,14 @@ CappuccinOS is a small x86-64 operating system written from scratch in rust. Thi ## Features - [X] Serial output - [X] Hardware interrupts -- [X] PS/2 Keyboard support -- [X] ANSI color codes in console - [X] Heap allocation - [ ] Externalized kernel modules - [X] Initramfs - [X] Squashfs driver - [X] Programmatic reads - [X] Decompression +- [ ] PS/2 Keyboard support +- [ ] ANSI color codes in console - [ ] SMP - [ ] Use APIC instead of PIC - [ ] Pre-emptive multitasking @@ -31,7 +31,7 @@ CappuccinOS is a small x86-64 operating system written from scratch in rust. Thi - [ ] MMC/Nand device support - [ ] M.2 NVME device support - [ ] Basic shell - - [X] Basic I/O + - [ ] Basic I/O - [ ] Executing Programs from disk - [ ] Lua interpreter - [ ] Memory management @@ -114,12 +114,15 @@ Some Resources I used over the creation of CappuccinOS: - [OSDev wiki](https://wiki.osdev.org) - Wikipedia on various random things - [Squashfs Binary Format](https://dr-emann.github.io/squashfs/squashfs.html) +- [GRUB](https://www.gnu.org/software/grub/grub-download.html) Mainly for Squashfs things, even though I later learned it does things incorrectly And mostly for examples of how people did stuff I used these (projects made by people who might actually have a clue what they're doing): +- This is missing some entries somehow - [MOROS](https://github.com/vinc/moros) - [Felix](https://github.com/mrgian/felix) - [mOS](https://github.com/Moldytzu/mOS) - [rust_os](https://github.com/thepowersgang/rust_os/tree/master) +- [Lyre](https://github.com/Lyre-OS/klyre) ## License CappuccinOS is license under the MIT License. Feel free to modify and distribute in accordance with the license. \ No newline at end of file diff --git a/scripts/font.py b/scripts/font.py index afe7519..a96d1e9 100644 --- a/scripts/font.py +++ b/scripts/font.py @@ -4,6 +4,7 @@ def create_psf2_file(font_data, psf2_file_path): magic_bytes = b'\x72\xB5\x4A\x86' version = 0 header_size = 32 + # means theres a unicode table flags = 0x00000001 num_glyphs = len(font_data) height = 16 @@ -20,6 +21,7 @@ def create_psf2_file(font_data, psf2_file_path): psf2_file.write(psf2_file_content) if __name__ == "__main__": + # TODO: maybe dont just dump a bunch of hex in here idk font_data = [ [0x00, 0x00, 0x7E, 0x81, 0x99, 0xA5, 0x85, 0x89, 0x89, 0x81, 0x89, 0x7E, 0x00, 0x00, 0x00, 0x00], [0x00, 0x00, 0x7E, 0x81, 0xA5, 0x81, 0x81, 0xBD, 0x99, 0x81, 0x81, 0x7E, 0x00, 0x00, 0x00, 0x00], diff --git a/src/arch/x86_64/gdt.rs b/src/arch/x86_64/gdt.rs new file mode 100644 index 0000000..ae79b6b --- /dev/null +++ b/src/arch/x86_64/gdt.rs @@ -0,0 +1,144 @@ +#[derive(Default)] +#[repr(C)] +struct GDTDescriptor { + limit: u16, + base_low: u16, + base_mid: u8, + access: u8, + granularity: u8, + base_high: u8, +} + +#[derive(Default)] +#[repr(C)] +struct TSSDescriptor { + length: u16, + base_low: u16, + base_mid: u8, + flags1: u8, + flags2: u8, + base_high: u8, + base_upper: u32, + _reserved: u32, +} + +#[derive(Default)] +#[repr(C)] +struct GDT { + descriptors: [GDTDescriptor; 11], + tss: TSSDescriptor, +} + +#[repr(C, packed)] +struct GDTPtr { + limit: u16, + base: u64, +} + +static mut GDT: Option = None; +static mut GDTR: GDTPtr = GDTPtr { limit: 0, base: 0 }; + +pub fn gdt_init() { + unsafe { + GDT = Some(GDT::default()); + let gdt = GDT.as_mut().unwrap(); + + gdt.descriptors[0].limit = 0; + gdt.descriptors[0].base_low = 0; + gdt.descriptors[0].base_mid = 0; + gdt.descriptors[0].access = 0; + gdt.descriptors[0].granularity = 0; + gdt.descriptors[0].base_high = 0; + + gdt.descriptors[1].limit = 0xFFFF; + gdt.descriptors[1].base_low = 0; + gdt.descriptors[1].base_mid = 0; + gdt.descriptors[1].access = 0x9A; + gdt.descriptors[1].granularity = 0; + gdt.descriptors[1].base_high = 0; + + gdt.descriptors[2].limit = 0xFFFF; + gdt.descriptors[2].base_low = 0; + gdt.descriptors[2].base_mid = 0; + gdt.descriptors[2].access = 0x92; + gdt.descriptors[2].granularity = 0; + gdt.descriptors[2].base_high = 0; + + gdt.descriptors[3].limit = 0xFFFF; + gdt.descriptors[3].base_low = 0; + gdt.descriptors[3].base_mid = 0; + gdt.descriptors[3].access = 0x9A; + gdt.descriptors[3].granularity = 0xCF; + gdt.descriptors[3].base_high = 0; + + gdt.descriptors[4].limit = 0xFFFF; + gdt.descriptors[4].base_low = 0; + gdt.descriptors[4].base_mid = 0; + gdt.descriptors[4].access = 0x92; + gdt.descriptors[4].granularity = 0xCF; + gdt.descriptors[4].base_high = 0; + + gdt.descriptors[5].limit = 0; + gdt.descriptors[5].base_low = 0; + gdt.descriptors[5].base_mid = 0; + gdt.descriptors[5].access = 0x9A; + gdt.descriptors[5].granularity = 0x20; + gdt.descriptors[5].base_high = 0; + + gdt.descriptors[6].limit = 0; + gdt.descriptors[6].base_low = 0; + gdt.descriptors[6].base_mid = 0; + gdt.descriptors[6].access = 0x92; + gdt.descriptors[6].granularity = 0; + gdt.descriptors[6].base_high = 0; + + // descriptors[7] and descriptors[8] are already dummy entries for SYSENTER + + gdt.descriptors[9].limit = 0; + gdt.descriptors[9].base_low = 0; + gdt.descriptors[9].base_mid = 0; + gdt.descriptors[9].access = 0xFA; + gdt.descriptors[9].granularity = 0x20; + gdt.descriptors[9].base_high = 0; + + gdt.descriptors[10].limit = 0; + gdt.descriptors[10].base_low = 0; + gdt.descriptors[10].base_mid = 0; + gdt.descriptors[10].access = 0xF2; + gdt.descriptors[10].granularity = 0; + gdt.descriptors[10].base_high = 0; + + gdt.tss.length = 104; + gdt.tss.base_low = 0; + gdt.tss.base_mid = 0; + gdt.tss.flags1 = 0x89; + gdt.tss.flags2 = 0; + gdt.tss.base_high = 0; + gdt.tss.base_upper = 0; + gdt.tss._reserved = 0; + + GDTR.limit = core::mem::size_of::() as u16 - 1; + GDTR.base = gdt as *mut GDT as u64; + } + + gdt_reload(); +} + +pub fn gdt_reload() { + unsafe { + core::arch::asm!( + "lgdt [{}]", + "push 0x28", + "lea rax, [rip+0x3]", + "push rax", + "retfq", + "mov eax, 0x30", + "mov ds, eax", + "mov es, eax", + "mov fs, eax", + "mov gs, eax", + "mov ss, eax", + in(reg) core::ptr::addr_of!(GDTR) + ); + } +} diff --git a/src/arch/x86_64/interrupts/apic.rs b/src/arch/x86_64/interrupts/apic.rs index 4190a70..1c7ed7f 100644 --- a/src/arch/x86_64/interrupts/apic.rs +++ b/src/arch/x86_64/interrupts/apic.rs @@ -2,10 +2,7 @@ use crate::{drivers::acpi::SMP_REQUEST, hcf, libs::cell::OnceCell}; use alloc::{sync::Arc, vec::Vec}; -use super::super::{ - cpu_get_msr, cpu_set_msr, - io::{inb, outb}, -}; +use super::super::{cpu_get_msr, cpu_set_msr, io::outb}; #[repr(C, packed)] #[derive(Clone, Copy, Debug)] @@ -107,33 +104,41 @@ impl APIC { let ptr_end = unsafe { ptr.add(madt.header.length as usize - 44) }; while (ptr as usize) < (ptr_end as usize) { - match unsafe { *ptr } { + // ptr may or may bot be aligned, although I have had crashes related to this pointer being not aligned + // and tbh I dont really care about the performance impact of reading unaligned pointers right now + // TODO + match unsafe { core::ptr::read_unaligned(ptr) } { 0 => { if unsafe { *(ptr.add(4)) } & 1 != 0 { - cpus.push(unsafe { *ptr.add(2).cast::() }); + cpus.push(unsafe { core::ptr::read_unaligned(ptr.add(2).cast::()) }); } } 1 => unsafe { io_apic = Some(IOAPIC { - ioapic_id: *ptr.add(2), - _reserved: *ptr.add(3), - ptr: (*ptr.add(4).cast::()) as *mut u8, - global_interrupt_base: *ptr.add(8).cast::(), + ioapic_id: core::ptr::read_unaligned(ptr.add(2)), + _reserved: core::ptr::read_unaligned(ptr.add(3)), + ptr: (core::ptr::read_unaligned(ptr.add(4).cast::())) as *mut u8, + global_interrupt_base: core::ptr::read_unaligned(ptr.add(8).cast::()), }) }, 2 => unsafe { io_apic_source_override = Some(IOAPICSourceOverride { - bus_source: *ptr.add(2), - irq_source: *ptr.add(3), - global_system_interrupt: *ptr.add(4).cast::(), - flags: *ptr.add(8).cast::(), + bus_source: core::ptr::read_unaligned(ptr.add(2)), + irq_source: core::ptr::read_unaligned(ptr.add(3)), + global_system_interrupt: core::ptr::read_unaligned( + ptr.add(4).cast::(), + ), + flags: core::ptr::read_unaligned(ptr.add(8).cast::()), }) }, - 5 => lapic_ptr = unsafe { *(ptr.add(4).cast::()) } as *mut u8, + 5 => { + lapic_ptr = + unsafe { core::ptr::read_unaligned(ptr.add(4).cast::()) } as *mut u8 + } _ => {} } - ptr = unsafe { ptr.add((*ptr.add(1)) as usize) }; + ptr = unsafe { ptr.add(core::ptr::read_unaligned(ptr.add(1)) as usize) }; } if io_apic.is_none() || io_apic_source_override.is_none() { @@ -168,10 +173,6 @@ impl APIC { crate::println!("{number_of_inputs}"); - // // hopefully nothing important is on that page :shrug: - // // TODO: use the page allocator we wrote maybe - // unsafe { core::ptr::copy(test as *mut u8, 0x8000 as *mut u8, 4096) } - let smp_request = SMP_REQUEST.get_response().get_mut(); if smp_request.is_none() { @@ -189,82 +190,12 @@ impl APIC { cpu.goto_address = test; } - // for cpu_apic in apic.cpus.iter() { - // let lapic_id = cpu_apic.apic_id; - - // // TODO: If CPU is the BSP, do not intialize it - - // crate::log_info!("Initializing CPU {processor_id:<02}, please wait",); - - // match apic.bootstrap_processor(processor_id, 0x8000) { - // Err(_) => crate::log_error!("Failed to initialize CPU {processor_id:<02}!"), - // Ok(_) => crate::log_ok!("Successfully initialized CPU {processor_id:<02}!"), - // } - // } - // Set and enable keyboard interrupt apic.set_interrupt(0x01, 0x01); return Ok(apic); } - // pub fn bootstrap_processor(&self, processor_id: u8, startup_page: usize) -> Result<(), ()> { - // // Clear LAPIC errors - // self.write_lapic(0x280, 0); - // // Select Auxiliary Processor - // self.write_lapic( - // 0x310, - // (self.read_lapic(0x310) & 0x00FFFFFF) | (processor_id as u32) << 24, - // ); - // // send INIT Inter-Processor Interrupt - // self.write_lapic(0x300, (self.read_lapic(0x300) & 0x00FFFFFF) | 0x00C500); - - // // Wait for IPI delivery - // while self.read_lapic(0x300) & (1 << 12) != 0 { - // unsafe { - // core::arch::asm!("pause"); - // } - // } - - // // Select Auxiliary Processor - // self.write_lapic( - // 0x310, - // (self.read_lapic(0x310) & 0x00FFFFFF) | (processor_id as u32) << 24, - // ); - // // deassert - // self.write_lapic(0x300, (self.read_lapic(0x300) & 0x00FFFFFF) | 0x00C500); - - // // Wait for IPI delivery - // while self.read_lapic(0x300) & (1 << 12) != 0 { - // unsafe { - // core::arch::asm!("pause"); - // } - // } - - // msdelay(10); - - // for i in 0..2 { - // self.write_lapic(0x280, 0); - // self.write_lapic( - // 0x310, - // (self.read_lapic(0x310) & 0x00FFFFFF) | (processor_id as u32) << 24, - // ); - // self.write_lapic(0x300, (self.read_lapic(0x300) & 0xfff0f800) | 0x000608); - // if i == 0 { - // usdelay(200); - // } else { - // msdelay(1000); - // } - // while self.read_lapic(0x300) & (1 << 12) != 0 { - // unsafe { - // core::arch::asm!("pause"); - // } - // } - // } - - // return Ok(()); - // } - pub fn read_ioapic(&self, reg: u32) -> u32 { unsafe { core::ptr::write_volatile(self.io_apic.ptr.cast::(), reg & 0xff); @@ -336,40 +267,41 @@ fn disable_pic() { outb(PIC_DATA_SLAVE, 0xFF); } -pub fn usdelay(useconds: u16) { - let pit_count = ((useconds as u32 * 1193) / 1000) as u16; +// // TODO: last I remember these didnt work +// pub fn usdelay(useconds: u16) { +// let pit_count = ((useconds as u32 * 1193) / 1000) as u16; - pit_delay(pit_count); -} +// pit_delay(pit_count); +// } -pub fn msdelay(ms: u32) { - let mut total_count = ms * 1193; +// pub fn msdelay(ms: u32) { +// let mut total_count = ms * 1193; - while total_count > 0 { - let chunk_count = if total_count > u16::MAX as u32 { - u16::MAX - } else { - total_count as u16 - }; +// while total_count > 0 { +// let chunk_count = if total_count > u16::MAX as u32 { +// u16::MAX +// } else { +// total_count as u16 +// }; - pit_delay(chunk_count); +// pit_delay(chunk_count); - total_count -= chunk_count as u32; - } -} +// total_count -= chunk_count as u32; +// } +// } -pub fn pit_delay(count: u16) { - // Set PIT to mode 0 - outb(0x43, 0x30); - outb(0x40, (count & 0xFF) as u8); - outb(0x40, ((count & 0xFF00) >> 8) as u8); - loop { - // Tell PIT to give us a timer status - outb(0x43, 0xE2); - if ((inb(0x40) >> 7) & 0x01) != 0 { - break; - } - } -} +// pub fn pit_delay(count: u16) { +// // Set PIT to mode 0 +// outb(0x43, 0x30); +// outb(0x40, (count & 0xFF) as u8); +// outb(0x40, ((count & 0xFF00) >> 8) as u8); +// loop { +// // Tell PIT to give us a timer status +// outb(0x43, 0xE2); +// if ((inb(0x40) >> 7) & 0x01) != 0 { +// break; +// } +// } +// } pub static APIC: OnceCell = OnceCell::new(); diff --git a/src/arch/x86_64/interrupts/exceptions.rs b/src/arch/x86_64/interrupts/exceptions.rs index 394c090..415bb97 100644 --- a/src/arch/x86_64/interrupts/exceptions.rs +++ b/src/arch/x86_64/interrupts/exceptions.rs @@ -107,7 +107,7 @@ exception_function!(0x0D, general_protection_fault); exception_function!(0x0E, page_fault); exception_function!(0xFF, generic_handler); -pub fn set_exceptions() { +pub fn exceptions_init() { for i in 0..32 { idt_set_gate(i, generic_handler as usize); } diff --git a/src/arch/x86_64/interrupts/mod.rs b/src/arch/x86_64/interrupts/mod.rs index 0ab15cf..722e0b4 100755 --- a/src/arch/x86_64/interrupts/mod.rs +++ b/src/arch/x86_64/interrupts/mod.rs @@ -1,11 +1,13 @@ pub mod apic; -mod exceptions; +pub mod exceptions; use crate::{ // arch::{apic, x86_common::pic::ChainedPics}, libs::sync::Mutex, }; +use self::apic::APIC; + #[repr(C, packed)] #[derive(Clone, Copy)] struct IdtEntry { @@ -73,7 +75,7 @@ pub fn idt_set_gate(num: u8, function_ptr: usize) { // If the interrupt with this number occurred with the "null" interrupt handler // We will need to tell the PIC that interrupt is over, this stops new interrupts // From never firing because "it was never finished" - // signal_end_of_interrupt(num); + // signal_end_of_interrupt(); } extern "x86-interrupt" fn null_interrupt_handler() { @@ -86,7 +88,7 @@ extern "x86-interrupt" fn timer_handler() { signal_end_of_interrupt(); } -fn idt_init() { +pub fn idt_init() { unsafe { let idt_size = core::mem::size_of::() * 256; { @@ -101,8 +103,6 @@ fn idt_init() { idt_set_gate(num, null_interrupt_handler as usize); } - exceptions::set_exceptions(); - idt_set_gate(InterruptIndex::Timer.as_u8(), timer_handler as usize); idt_set_gate(0x80, syscall as usize); @@ -114,7 +114,7 @@ fn idt_init() { } pub fn signal_end_of_interrupt() { - apic::APIC.end_of_interrupt(); + APIC.end_of_interrupt(); } #[naked] @@ -146,12 +146,11 @@ pub extern "C" fn syscall_handler(_rdi: u64, _rsi: u64, rdx: u64, rcx: u64) { crate::print!("{message}"); } -pub fn init() { - crate::drivers::acpi::init_acpi(); - - idt_init(); - +pub fn enable_interrupts() { unsafe { + #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] core::arch::asm!("sti"); + + // TODO: arm and riscv stuff } } diff --git a/src/arch/x86_64/mod.rs b/src/arch/x86_64/mod.rs index 30aa24b..c60039c 100644 --- a/src/arch/x86_64/mod.rs +++ b/src/arch/x86_64/mod.rs @@ -1,3 +1,4 @@ +pub mod gdt; pub mod interrupts; pub mod io; pub mod stack_trace; diff --git a/src/arch/x86_64/stack_trace.rs b/src/arch/x86_64/stack_trace.rs index 9bec98a..de3e8c6 100644 --- a/src/arch/x86_64/stack_trace.rs +++ b/src/arch/x86_64/stack_trace.rs @@ -1,5 +1,7 @@ use alloc::{borrow::ToOwned, string::String, vec::Vec}; +use crate::drivers::fs::vfs::vfs_open; + // use crate::drivers::fs::vfs::VfsFileSystem; #[repr(C)] @@ -44,56 +46,58 @@ pub fn print_stack_trace(max_frames: usize, rbp: u64) { } fn get_function_name(function_address: u64) -> Result<(String, u64), ()> { - return Err(()); + // TODO: dont rely on initramfs being mounted at / + let mut symbols_fd = vfs_open("/symbols.table")?; - // let symbols_fd = (*crate::drivers::fs::initramfs::INITRAMFS).open("/symbols.table")?; + let symbols_table_bytes = symbols_fd.ops.open( + 0, + crate::drivers::fs::vfs::UserCred { uid: 0, gid: 0 }, + symbols_fd.as_ptr(), + )?; + let symbols_table = core::str::from_utf8(&symbols_table_bytes).ok().ok_or(())?; - // let symbols_table_bytes = symbols_fd.read()?; - // let symbols_table = core::str::from_utf8(&symbols_table_bytes).ok().ok_or(())?; + let mut previous_symbol: Option<(&str, u64)> = None; - // let mut previous_symbol: Option<(&str, u64)> = None; + let symbols_table_lines: Vec<&str> = symbols_table.lines().collect(); - // let symbols_table_lines: Vec<&str> = symbols_table.lines().collect(); + for (i, line) in symbols_table_lines.iter().enumerate() { + let line_parts: Vec<&str> = line.splitn(2, ' ').collect(); - // for (i, line) in symbols_table_lines.iter().enumerate() { - // let line_parts: Vec<&str> = line.splitn(2, ' ').collect(); + if line_parts.len() < 2 { + continue; + } - // if line_parts.len() < 2 { - // continue; - // } + let (address, function_name) = ( + u64::from_str_radix(line_parts[0], 16).ok().ok_or(())?, + line_parts[1], + ); - // let (address, function_name) = ( - // u64::from_str_radix(line_parts[0], 16).ok().ok_or(())?, - // line_parts[1], - // ); + if address == function_address { + return Ok((function_name.to_owned(), 0)); + } - // if address == function_address { - // return Ok((function_name.to_owned(), 0)); - // } + if i == symbols_table_lines.len() - 1 { + return Ok((function_name.to_owned(), function_address - address)); + } - // if i == symbols_table_lines.len() - 1 { - // return Ok((function_name.to_owned(), function_address - address)); - // } + if i == 0 { + if function_address < address { + return Err(()); + } - // if i == 0 { - // if function_address < address { - // return Err(()); - // } + previous_symbol = Some((function_name, address)); + continue; + } - // previous_symbol = Some((function_name, address)); - // continue; - // } + if let Some(prev_symbol) = previous_symbol { + if function_address > prev_symbol.1 && function_address < address { + // function is previous symbol + return Ok((prev_symbol.0.to_owned(), address - prev_symbol.1)); + } + } - // if function_address > previous_symbol.unwrap().1 && function_address < address { - // // function is previous symbol - // return Ok(( - // previous_symbol.unwrap().0.to_owned(), - // address - previous_symbol.unwrap().1, - // )); - // } + previous_symbol = Some((function_name, address)); + } - // previous_symbol = Some((function_name, address)); - // } - - // unreachable!(); + unreachable!(); } diff --git a/src/arch/x86_64/x86_64-unknown-none.json b/src/arch/x86_64/x86_64-unknown-none.json index ea72956..74308ba 100644 --- a/src/arch/x86_64/x86_64-unknown-none.json +++ b/src/arch/x86_64/x86_64-unknown-none.json @@ -1,6 +1,6 @@ { "cpu": "x86-64", - "data-layout": "e-m:e-i64:64-f80:128-n8:16:32:64-S128", + "data-layout": "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", "llvm-target": "x86_64-unknown-none", "target-endian": "little", "target-pointer-width": "64", diff --git a/src/drivers/fs/devfs.rs b/src/drivers/fs/devfs.rs index 1052ead..f1593ea 100644 --- a/src/drivers/fs/devfs.rs +++ b/src/drivers/fs/devfs.rs @@ -6,6 +6,7 @@ pub enum DeviceType { BlockDevice = 1, } +#[allow(unused)] pub struct Device { typ: DeviceType, block_size: usize, diff --git a/src/drivers/fs/fat.rs b/src/drivers/fs/fat.rs index 599fb67..66a7d7d 100755 --- a/src/drivers/fs/fat.rs +++ b/src/drivers/fs/fat.rs @@ -567,38 +567,6 @@ impl FsOps for FatFs { ) -> super::vfs::VNode { todo!("FAT VGET"); } - - // fn open(&self, path: &str) -> Result, ()> { - // let path_componenets: Vec<&str> = path.trim_start_matches('/').split('/').collect(); - // let mut current_cluster = match self.fat_type { - // FatType::Fat32(ebpb) => ebpb.root_dir_cluster as usize, - // _ => self.sector_to_cluster( - // self.bpb.reserved_sectors as usize - // + (self.bpb.fat_count as usize * self.sectors_per_fat), - // ), - // }; - - // for path in path_componenets { - // let file_entry: FileEntry = self.find_entry_in_directory(current_cluster, path)?; - - // if file_entry.attributes == FileEntryAttributes::Directory as u8 { - // current_cluster = (((file_entry.high_first_cluster_number as u32) << 16) - // | file_entry.low_first_cluster_number as u32) - // as usize; - // } else { - // return Ok(Box::new(FatFile { - // fat_fs: self, - // file_entry, - // })); - // } - // } - - // return Err(()); - // } - - // fn read_dir(&self, _path: &str) -> Result, ()> { - // unimplemented!(); - // } } enum File { @@ -859,77 +827,9 @@ impl<'a> VNodeOperations for File { } struct FatFile { - // fat_fs: &'a FatFs, file_entry: FileEntry, } -// impl<'a> VfsFile for FatFile<'a> { -// fn read(&self) -> Result, ()> { -// let mut file: Vec = Vec::with_capacity(self.file_entry.file_size as usize); -// let mut file_ptr_index = 0; - -// let mut cluster = ((self.file_entry.high_first_cluster_number as u32) << 16) -// | self.file_entry.low_first_cluster_number as u32; -// let cluster_size = self.fat_fs.cluster_size; - -// let mut copied_bytes = 0; - -// loop { -// let cluster_data = self.fat_fs.read_cluster(cluster as usize)?; - -// let remaining = self.file_entry.file_size as usize - copied_bytes; -// let to_copy = if remaining > cluster_size { -// cluster_size -// } else { -// remaining -// }; - -// unsafe { -// core::ptr::copy_nonoverlapping( -// cluster_data.as_ptr(), -// file.as_mut_ptr().add(file_ptr_index), -// to_copy, -// ); - -// file.set_len(file.len() + to_copy); -// } - -// file_ptr_index += cluster_size; - -// copied_bytes += to_copy; - -// cluster = self.fat_fs.get_next_cluster(cluster as usize); - -// match self.fat_fs.fat_type { -// FatType::Fat12(_) => { -// if cluster >= EOC_12 { -// break; -// } -// } -// FatType::Fat16(_) => { -// if cluster >= EOC_16 { -// break; -// } -// } -// FatType::Fat32(_) => { -// if cluster >= EOC_32 { -// break; -// } -// } -// } -// } - -// return Ok(Arc::from(file)); -// } -// } - struct FatDirectory { - // fat_fs: &'a FatFs, directory_cluster: usize, } - -// impl<'a> VfsDirectory for FatDirectory<'a> { -// fn list_files(&self) -> Result]>, ()> { -// unimplemented!(); -// } -// } diff --git a/src/drivers/fs/initramfs/chunk_reader.rs b/src/drivers/fs/initramfs/chunk_reader.rs new file mode 100644 index 0000000..37bea5d --- /dev/null +++ b/src/drivers/fs/initramfs/chunk_reader.rs @@ -0,0 +1,142 @@ +use alloc::borrow::Cow; +use alloc::vec::Vec; +use core::ops::Index; +use core::ops::{Range, RangeFrom}; + +const HEADER_SIZE: usize = 2; + +struct Chunk<'a> { + data: Cow<'a, [u8]>, +} + +impl Chunk<'_> { + fn header(&self) -> u16 { + u16::from_le_bytes(self.data[0..HEADER_SIZE].try_into().unwrap()) + } + + fn len(&self) -> usize { + self.header() as usize & 0x7FFF + } + + fn is_compressed(&self) -> bool { + self.header() & 0x8000 == 0 + } + + fn decompress(&mut self, decompressor: &dyn Fn(&[u8]) -> Result, ()>) { + if self.is_compressed() { + let decompressed_data = decompressor(&self.data[HEADER_SIZE..]).unwrap(); + + let header = decompressed_data.len() as u16 | 0x8000; + + let data = [header.to_le_bytes().to_vec(), decompressed_data].concat(); + + self.data = Cow::Owned(data); + } + } +} + +impl Index for Chunk<'_> { + type Output = u8; + + fn index(&self, index: usize) -> &Self::Output { + &self.data[index] + } +} + +impl Index> for Chunk<'_> { + type Output = [u8]; + + fn index(&self, index: Range) -> &Self::Output { + &self.data[index] + } +} + +impl Index> for Chunk<'_> { + type Output = [u8]; + + fn index(&self, index: RangeFrom) -> &Self::Output { + &self.data[index] + } +} + +pub struct ChunkReader<'a, F> { + chunks: Vec>, + decompressor: F, +} + +impl<'a, F: Fn(&[u8]) -> Result, ()>> ChunkReader<'a, F> { + pub fn new(data: &'a [u8], decompressor: F) -> Self { + let mut chunks: Vec> = Vec::new(); + + let mut offset = 0; + loop { + if offset == data.len() { + break; + } + + let length = + (u16::from_le_bytes(data[offset..offset + HEADER_SIZE].try_into().unwrap()) + & 0x7FFF) as usize + + HEADER_SIZE; + + chunks.push(Chunk { + data: Cow::Borrowed(&data[offset..offset + length]), + }); + + offset += length; + } + + Self { + chunks, + decompressor, + } + } + + pub fn get_slice(&mut self, mut chunk: u64, mut offset: u16, size: usize) -> Vec { + // handle cases where the chunks arent aligned to CHUNK_SIZE (they're compressed and are doing stupid things) + { + let mut chunk_idx = 0; + let mut total_length = 0; + + while total_length != chunk { + chunk_idx += 1; + total_length += (self.chunks[0].len() as usize + HEADER_SIZE) as u64; + } + + chunk = chunk_idx; + } + + let mut chunks_to_read = 1; + { + let mut available_bytes = { + self.chunks[chunk as usize].decompress(&self.decompressor); + self.chunks[chunk as usize][offset as usize..].len() + }; + + while available_bytes < size { + self.chunks[chunk as usize + chunks_to_read].decompress(&self.decompressor); + available_bytes += self.chunks[chunk as usize + chunks_to_read].len(); + chunks_to_read += 1; + } + } + + let mut data = Vec::new(); + + for i in chunk as usize..chunk as usize + chunks_to_read { + self.chunks[i].decompress(&self.decompressor); + + let block_start = offset as usize + HEADER_SIZE; + let mut block_end = self.chunks[i].len() + HEADER_SIZE; + + if (block_end - block_start) > size { + block_end = block_start + size; + } + + data.extend(self.chunks[i][block_start..block_end].into_iter()); + + offset = 0; + } + + data + } +} diff --git a/src/drivers/fs/initramfs/compressors/mod.rs b/src/drivers/fs/initramfs/compressors/mod.rs deleted file mode 100644 index abcab22..0000000 --- a/src/drivers/fs/initramfs/compressors/mod.rs +++ /dev/null @@ -1 +0,0 @@ -pub mod gzip; diff --git a/src/drivers/fs/initramfs/mod.rs b/src/drivers/fs/initramfs/mod.rs index be346fd..1f6e9c0 100755 --- a/src/drivers/fs/initramfs/mod.rs +++ b/src/drivers/fs/initramfs/mod.rs @@ -1,22 +1,11 @@ -pub mod compressors; +mod chunk_reader; +mod superblock; -use core::{ - fmt::{self, Debug}, - mem::MaybeUninit, - ops::{Index, Range, RangeFrom, RangeFull}, -}; +use core::{fmt::Debug, mem::MaybeUninit}; -use alloc::{borrow::Cow, boxed::Box, string::String, sync::Arc, vec::Vec}; +use alloc::{boxed::Box, string::String, sync::Arc, vec::Vec}; use limine::ModuleRequest; -use crate::{ - libs::{ - cell::Cell, - math::{ceil, floor}, - }, - println, -}; - use super::vfs::{FsOps, VNode, VNodeOperations, VNodeType}; pub static MODULE_REQUEST: ModuleRequest = ModuleRequest::new(0); @@ -65,7 +54,6 @@ pub fn init() -> Squashfs<'static> { #[repr(u8)] #[derive(Clone, Copy)] enum Table { - // Metadata table Inode, Dir, Frag, @@ -74,153 +62,15 @@ enum Table { Xattr, } -const CHUNK_SIZE: usize = 8192; -const HEADER_SIZE: usize = 2; - -struct Chunk<'a> { - data: Cow<'a, [u8]>, -} - -impl Chunk<'_> { - fn header(&self) -> u16 { - u16::from_le_bytes(self.data[0..HEADER_SIZE].try_into().unwrap()) - } - - fn len(&self) -> usize { - self.header() as usize & 0x7FFF - } - - fn is_compressed(&self) -> bool { - self.header() & 0x8000 == 0 - } - - fn decompress(&mut self, decompressor: &dyn Fn(&[u8]) -> Result, ()>) { - if self.is_compressed() { - let decompressed_data = decompressor(&self.data[HEADER_SIZE..]).unwrap(); - - let header = decompressed_data.len() as u16 | 0x8000; - - let data = [header.to_le_bytes().to_vec(), decompressed_data].concat(); - - self.data = Cow::Owned(data); - } - } -} - -impl Index for Chunk<'_> { - type Output = u8; - - fn index(&self, index: usize) -> &Self::Output { - &self.data[index] - } -} - -impl Index> for Chunk<'_> { - type Output = [u8]; - - fn index(&self, index: Range) -> &Self::Output { - &self.data[index] - } -} - -impl Index> for Chunk<'_> { - type Output = [u8]; - - fn index(&self, index: RangeFrom) -> &Self::Output { - &self.data[index] - } -} - -struct ChunkReader<'a, F> { - chunks: Vec>, - decompressor: F, -} - -impl<'a, F: Fn(&[u8]) -> Result, ()>> ChunkReader<'a, F> { - fn new(data: &'a [u8], decompressor: F) -> Self { - let mut chunks: Vec> = Vec::new(); - - let mut offset = 0; - loop { - if offset == data.len() { - break; - } - - let length = - (u16::from_le_bytes(data[offset..offset + HEADER_SIZE].try_into().unwrap()) - & 0x7FFF) as usize - + HEADER_SIZE; - - chunks.push(Chunk { - data: Cow::Borrowed(&data[offset..offset + length]), - }); - - offset += length; - } - - Self { - chunks, - decompressor, - } - } - - pub fn get_slice(&mut self, mut chunk: u64, mut offset: u16, size: usize) -> Vec { - // handle cases where the chunks arent aligned to CHUNK_SIZE (they're compressed and are doing stupid things) - { - let mut chunk_idx = 0; - let mut total_length = 0; - - while total_length != chunk { - chunk_idx += 1; - total_length += (self.chunks[0].len() as usize + HEADER_SIZE) as u64; - } - - chunk = chunk_idx; - } - - let mut chunks_to_read = 1; - { - let mut available_bytes = { - self.chunks[chunk as usize].decompress(&self.decompressor); - self.chunks[chunk as usize][offset as usize..].len() - }; - - while available_bytes < size { - self.chunks[chunk as usize + chunks_to_read].decompress(&self.decompressor); - available_bytes += self.chunks[chunk as usize + chunks_to_read].len(); - chunks_to_read += 1; - } - } - - let mut data = Vec::new(); - - for i in chunk as usize..chunk as usize + chunks_to_read { - self.chunks[i].decompress(&self.decompressor); - - let block_start = offset as usize + HEADER_SIZE; - let mut block_end = self.chunks[i].len() + HEADER_SIZE; - - if (block_end - block_start) > size { - block_end = block_start + size; - } - - data.extend(self.chunks[i][block_start..block_end].into_iter()); - - offset = 0; - } - - data - } -} - #[repr(C)] // #[derive(Debug)] pub struct Squashfs<'a> { - pub superblock: SquashfsSuperblock, + pub superblock: superblock::SquashfsSuperblock, start: *mut u8, + decompressor: Box Result, ()>>, data_table: &'a [u8], - inode_table: Cell Result, ()>>>>, - directory_table: Cell Result, ()>>>>, + inode_table: chunk_reader::ChunkReader<'a, Box Result, ()>>>, + directory_table: chunk_reader::ChunkReader<'a, Box Result, ()>>>, fragment_table: Option<&'a [u8]>, export_table: Option<&'a [u8]>, id_table: &'a [u8], @@ -236,19 +86,22 @@ impl Squashfs<'_> { let squashfs_data: &[u8] = unsafe { core::slice::from_raw_parts(ptr, length) }; - let superblock = SquashfsSuperblock::new(squashfs_data)?; + let superblock = superblock::SquashfsSuperblock::new(squashfs_data)?; - let decompressor = match superblock.compressor { - SquashfsCompressionType::Gzip => Box::new(compressors::gzip::uncompress_data), + let decompressor = match superblock.compressor() { + superblock::SquashfsCompressionType::Gzip => { + Box::new(crate::libs::gzip::uncompress_data) + } compressor => panic!("Unsupported SquashFS decompressor {compressor:?}"), }; // The easy part with none of this metadata nonesense - let data_table = &squashfs_data - [core::mem::size_of::()..superblock.inode_table as usize]; + let data_table = &squashfs_data[core::mem::size_of::() + ..superblock.inode_table as usize]; let mut tables: Vec<(Table, u64)> = Vec::new(); + // todo: there's probably a better way to do this tables.push((Table::Inode, superblock.inode_table)); tables.push((Table::Dir, superblock.dir_table)); @@ -267,10 +120,10 @@ impl Squashfs<'_> { } let mut inode_table: MaybeUninit< - ChunkReader<'static, Box Result, ()>>>, + chunk_reader::ChunkReader<'static, Box Result, ()>>>, > = MaybeUninit::uninit(); let mut directory_table: MaybeUninit< - ChunkReader<'static, Box Result, ()>>>, + chunk_reader::ChunkReader<'static, Box Result, ()>>>, > = MaybeUninit::uninit(); let mut fragment_table = None; let mut export_table = None; @@ -286,12 +139,16 @@ impl Squashfs<'_> { match table { Table::Inode => { - inode_table = - MaybeUninit::new(ChunkReader::new(whole_table, decompressor.clone())); + inode_table = MaybeUninit::new(chunk_reader::ChunkReader::new( + whole_table, + decompressor.clone(), + )); } Table::Dir => { - directory_table = - MaybeUninit::new(ChunkReader::new(whole_table, decompressor.clone())); + directory_table = MaybeUninit::new(chunk_reader::ChunkReader::new( + whole_table, + decompressor.clone(), + )); } Table::Frag => { fragment_table = Some(whole_table); @@ -305,9 +162,10 @@ impl Squashfs<'_> { return Ok(Squashfs { superblock, start: ptr, + decompressor, data_table, - inode_table: Cell::new(unsafe { inode_table.assume_init() }), - directory_table: Cell::new(unsafe { directory_table.assume_init() }), + inode_table: unsafe { inode_table.assume_init() }, + directory_table: unsafe { directory_table.assume_init() }, fragment_table, export_table, id_table, @@ -329,11 +187,8 @@ impl Squashfs<'_> { fn read_inode(&mut self, inode: u64) -> Inode { let (inode_block, inode_offset) = self.get_inode_block_offset(inode); - // println!("inode block: {inode_block} inode offset: {inode_offset}"); - let file_type = InodeFileType::from(u16::from_le_bytes( self.inode_table - .get_mut() .get_slice(inode_block, inode_offset, 2) .try_into() .unwrap(), @@ -346,13 +201,9 @@ impl Squashfs<'_> { inode_type => unimplemented!("Inode type {inode_type:?}"), }; - let inode_bytes: &[u8] = - &self - .inode_table - .get_mut() - .get_slice(inode_block, inode_offset, inode_size); - - // println!("{inode_bytes:X?}"); + let inode_bytes: &[u8] = &self + .inode_table + .get_slice(inode_block, inode_offset, inode_size); Inode::from(inode_bytes) } @@ -368,8 +219,6 @@ impl Squashfs<'_> { _ => return Err(()), }; - println!("here"); - let dir_size = match dir { Inode::BasicDirectory(dir) => dir.file_size as usize, Inode::ExtendedDirectory(dir) => dir.file_size as usize, @@ -381,20 +230,10 @@ impl Squashfs<'_> { return Err(()); } - let (mut directory_block, mut directory_offset) = - self.get_inode_block_offset(dir_inode as u64); - - let directory_table_offset = - ((directory_block as usize / 8194) * 8192) + directory_offset as usize; - - // println!("here"); - - // println!("past here"); - - // println!("dir_size: {dir_size}"); + let (directory_block, directory_offset) = self.get_inode_block_offset(dir_inode as u64); let mut directory_table_header = { - let bytes: &[u8] = &self.directory_table.get_mut().get_slice( + let bytes: &[u8] = &self.directory_table.get_slice( directory_block, directory_offset, core::mem::size_of::(), @@ -406,44 +245,11 @@ impl Squashfs<'_> { let mut offset = core::mem::size_of::(); let mut i = 0; - println!("looking for {name}"); - loop { - println!( - "{directory_block} {directory_offset} {} {}", - directory_offset as usize + offset, - directory_table_header.start - ); - - // TODO: this is dumb, but it works - if self.directory_table.get().chunks[directory_block as usize / 8194].len() - - HEADER_SIZE - < directory_offset as usize + offset as usize - { - directory_block += 8194; - directory_offset = ((offset + directory_offset as usize) - - (self.directory_table.get().chunks[directory_block as usize / 8194].len() - - HEADER_SIZE)) as u16 - - HEADER_SIZE as u16; - offset = 0; - } - - println!( - "{directory_block} {directory_offset} {}", - directory_offset as usize + offset - ); - - // println!( - // "directory table offset: {}", - // directory_table_offset + offset - // ); - if i == directory_table_header.entry_count && offset != dir_size { - // println!("reading next dir"); - //read second table directory_table_header = { - let bytes: &[u8] = &self.directory_table.get_mut().get_slice( + let bytes: &[u8] = &self.directory_table.get_slice( directory_block, directory_offset + offset as u16, core::mem::size_of::(), @@ -452,23 +258,18 @@ impl Squashfs<'_> { DirectoryTableHeader::from(bytes) }; - // println!("{directory_table_header:?}"); - i = 0; offset += core::mem::size_of::(); - // todo!("read next table"); continue; } if offset >= dir_size { - println!("We have reached the end"); - break; } let name_size = u16::from_le_bytes( - self.directory_table.get_mut() + self.directory_table .get_slice( directory_block, directory_offset + (offset as u16 + 6), @@ -480,36 +281,15 @@ impl Squashfs<'_> { // the name is stored off-by-one + 1; - // println!( - // "{:X?}", - // &self.directory_table.get_slice( - // directory_block as usize, - // directory_offset as usize + offset - // ..directory_offset as usize + offset + (8 + name_size), - // ) - // ); - - let directory_entry = - DirectoryTableEntry::from_bytes(&self.directory_table.get_mut().get_slice( - directory_block, - directory_offset + offset as u16, - 8 + name_size, - )); - - println!("{}", directory_entry.name); - println!("{directory_entry:?} {offset} {}", 8 + name_size); + let directory_entry = DirectoryTableEntry::from_bytes(&self.directory_table.get_slice( + directory_block, + directory_offset + offset as u16, + 8 + name_size, + )); offset += 8 + name_size; - // println!("{offset}"); - if directory_entry.name == name { - println!( - "READING: {} {}", - ((directory_table_header.start as usize / (CHUNK_SIZE + HEADER_SIZE)) & 0xFFFF), - directory_entry.offset - ); - let directory_entry_inode = (directory_table_header.start as usize) << 16 | (directory_entry.offset as usize); @@ -546,9 +326,11 @@ impl Squashfs<'_> { let bytes = if metadata_block.0 { &table[2..] } else { table }; if table_is_compressed { - match self.superblock.compressor { - SquashfsCompressionType::Gzip => { - buffer.extend_from_slice(&compressors::gzip::uncompress_data(bytes).unwrap()); + match self.superblock.compressor() { + superblock::SquashfsCompressionType::Gzip => { + buffer.extend_from_slice( + &crate::libs::gzip::uncompress_data(bytes).unwrap_or(bytes.to_vec()), + ); } _ => { crate::println!("Unsupported compression type") @@ -563,14 +345,14 @@ impl Squashfs<'_> { } impl<'a> FsOps for Squashfs<'a> { - fn mount(&mut self, path: &str, data: &mut *mut u8, vfsp: *const super::vfs::Vfs) { + fn mount(&mut self, _path: &str, data: &mut *mut u8, _vfspp: *const super::vfs::Vfs) { // STUB // not recommended:tm: *data = core::ptr::addr_of!(*self) as *mut u8; } - fn unmount(&mut self, vfsp: *const super::vfs::Vfs) { + fn unmount(&mut self, _vfsp: *const super::vfs::Vfs) { // STUB } @@ -591,53 +373,27 @@ impl<'a> FsOps for Squashfs<'a> { } } - fn fid(&mut self, path: &str, vfsp: *const super::vfs::Vfs) -> Option { + fn fid(&mut self, _path: &str, _vfspp: *const super::vfs::Vfs) -> Option { todo!(); } - fn statfs(&mut self, vfsp: *const super::vfs::Vfs) -> super::vfs::StatFs { + fn statfs(&mut self, _vfsp: *const super::vfs::Vfs) -> super::vfs::StatFs { todo!(); } - fn sync(&mut self, vfsp: *const super::vfs::Vfs) { + fn sync(&mut self, _vfsp: *const super::vfs::Vfs) { todo!(); } - fn vget(&mut self, fid: super::vfs::FileId, vfsp: *const super::vfs::Vfs) -> super::vfs::VNode { + fn vget( + &mut self, + _fid: super::vfs::FileId, + _vfsp: *const super::vfs::Vfs, + ) -> super::vfs::VNode { todo!(); } } -// impl<'a> VfsFileSystem for Squashfs<'a> { -// fn open(&self, path: &str) -> Result, ()> { -// let path_components: Vec<&str> = path.trim_start_matches('/').split('/').collect(); -// let mut current_dir = self.read_root_dir(); - -// for (i, &part) in path_components.iter().enumerate() { -// let file = current_dir.find(part).ok_or(())?; - -// match file { -// Inode::BasicDirectory(dir) => { -// current_dir = dir; -// } -// Inode::BasicFile(file) => { -// if i < path_components.len() - 1 { -// return Err(()); -// } - -// return Ok(Box::new(file)); -// } -// } -// } - -// return Err(()); -// } - -// fn read_dir(&self, _path: &str) -> Result, ()> { -// unimplemented!() -// } -// } - #[derive(Clone, Copy, Debug)] enum Inode { BasicFile(BasicFileInode), @@ -663,7 +419,12 @@ impl From<&[u8]> for Inode { } impl VNodeOperations for Inode { - fn open(&mut self, f: u32, c: super::vfs::UserCred, vp: *const VNode) -> Result, ()> { + fn open( + &mut self, + _f: u32, + _c: super::vfs::UserCred, + vp: *const VNode, + ) -> Result, ()> { let squashfs = unsafe { (*(*vp).parent).data.cast::() }; match self { @@ -689,14 +450,7 @@ impl VNodeOperations for Inode { (*squashfs).fragment_table.unwrap(), ( false, - Some(false), // Some( - // !self - // .header - // .squashfs - // .superblock - // .features() - // .uncompressed_fragments, - // ), + Some(!(*squashfs).superblock.features().uncompressed_fragments), ), ); @@ -753,57 +507,60 @@ impl VNodeOperations for Inode { }, _ => panic!("Tried to open non-file"), } - - todo!() } - fn close(&mut self, f: u32, c: super::vfs::UserCred, vp: *const VNode) { + fn close(&mut self, _f: u32, _c: super::vfs::UserCred, _vp: *const VNode) { todo!() } fn rdwr( &mut self, - uiop: *const super::vfs::UIO, - direction: super::vfs::IODirection, - f: u32, - c: super::vfs::UserCred, - vp: *const VNode, + _uiop: *const super::vfs::UIO, + _direction: super::vfs::IODirection, + _f: u32, + _c: super::vfs::UserCred, + _vp: *const VNode, ) { todo!() } - fn ioctl(&mut self, com: u32, d: *mut u8, f: u32, c: super::vfs::UserCred, vp: *const VNode) { + fn ioctl( + &mut self, + _com: u32, + _d: *mut u8, + _f: u32, + _c: super::vfs::UserCred, + _vp: *const VNode, + ) { todo!() } - fn select(&mut self, w: super::vfs::IODirection, c: super::vfs::UserCred, vp: *const VNode) { + fn select(&mut self, _w: super::vfs::IODirection, _c: super::vfs::UserCred, _vp: *const VNode) { todo!() } - fn getattr(&mut self, c: super::vfs::UserCred, vp: *const VNode) -> super::vfs::VAttr { + fn getattr(&mut self, _c: super::vfs::UserCred, _vp: *const VNode) -> super::vfs::VAttr { todo!() } - fn setattr(&mut self, va: super::vfs::VAttr, c: super::vfs::UserCred, vp: *const VNode) { + fn setattr(&mut self, _va: super::vfs::VAttr, _c: super::vfs::UserCred, _vp: *const VNode) { todo!() } - fn access(&mut self, m: u32, c: super::vfs::UserCred, vp: *const VNode) { + fn access(&mut self, _m: u32, _c: super::vfs::UserCred, _vp: *const VNode) { todo!() } fn lookup( &mut self, nm: &str, - c: super::vfs::UserCred, + _c: super::vfs::UserCred, vp: *const VNode, ) -> Result { let squashfs = unsafe { (*(*vp).parent).data.cast::() }; match self { Inode::BasicDirectory(_) | Inode::ExtendedDirectory(_) => unsafe { - println!("Looking for {nm}"); - let inode = (*squashfs).find_entry_in_directory(*self, nm)?; let vnode_type = match inode { Inode::BasicDirectory(_) | Inode::ExtendedDirectory(_) => VNodeType::Directory, @@ -831,88 +588,93 @@ impl VNodeOperations for Inode { fn create( &mut self, - nm: &str, - va: super::vfs::VAttr, - e: u32, - m: u32, - c: super::vfs::UserCred, - vp: *const VNode, + _nm: &str, + _va: super::vfs::VAttr, + _e: u32, + _m: u32, + _c: super::vfs::UserCred, + _vp: *const VNode, ) -> Result { todo!() } fn link( &mut self, - target_dir: *mut super::vfs::VNode, - target_name: &str, - c: super::vfs::UserCred, - vp: *const VNode, + _target_dir: *mut super::vfs::VNode, + _target_name: &str, + _c: super::vfs::UserCred, + _vp: *const VNode, ) { todo!() } fn rename( &mut self, - nm: &str, - target_dir: *mut super::vfs::VNode, - target_name: &str, - c: super::vfs::UserCred, - vp: *const VNode, + _nm: &str, + _target_dir: *mut super::vfs::VNode, + _target_name: &str, + _c: super::vfs::UserCred, + _vp: *const VNode, ) { todo!() } fn mkdir( &mut self, - nm: &str, - va: super::vfs::VAttr, - c: super::vfs::UserCred, - vp: *const VNode, + _nm: &str, + _va: super::vfs::VAttr, + _c: super::vfs::UserCred, + _vp: *const VNode, ) -> Result { todo!() } - fn readdir(&mut self, uiop: *const super::vfs::UIO, c: super::vfs::UserCred, vp: *const VNode) { + fn readdir( + &mut self, + _uiop: *const super::vfs::UIO, + _c: super::vfs::UserCred, + _vp: *const VNode, + ) { todo!() } fn symlink( &mut self, - link_name: &str, - va: super::vfs::VAttr, - target_name: &str, - c: super::vfs::UserCred, - vp: *const VNode, + _link_name: &str, + _va: super::vfs::VAttr, + _target_name: &str, + _c: super::vfs::UserCred, + _vp: *const VNode, ) { todo!() } fn readlink( &mut self, - uiop: *const super::vfs::UIO, - c: super::vfs::UserCred, - vp: *const VNode, + _uiop: *const super::vfs::UIO, + _c: super::vfs::UserCred, + _vp: *const VNode, ) { todo!() } - fn fsync(&mut self, c: super::vfs::UserCred, vp: *const VNode) { + fn fsync(&mut self, _c: super::vfs::UserCred, _vp: *const VNode) { todo!() } - fn inactive(&mut self, c: super::vfs::UserCred, vp: *const VNode) { + fn inactive(&mut self, _c: super::vfs::UserCred, _vp: *const VNode) { todo!() } - fn bmap(&mut self, block_number: u32, bnp: (), vp: *const VNode) -> super::vfs::VNode { + fn bmap(&mut self, _block_number: u32, _bnp: (), _vp: *const VNode) -> super::vfs::VNode { todo!() } - fn strategy(&mut self, bp: (), vp: *const VNode) { + fn strategy(&mut self, _bp: (), _vp: *const VNode) { todo!() } - fn bread(&mut self, block_number: u32, vp: *const VNode) -> Arc<[u8]> { + fn bread(&mut self, _block_number: u32, _vp: *const VNode) -> Arc<[u8]> { todo!() } } @@ -940,11 +702,9 @@ inode_enum_try_into!(BasicFileInode, BasicFile); inode_enum_try_into!(BasicDirectoryInode, BasicDirectory); inode_enum_try_into!(ExtendedDirectoryInode, ExtendedDirectory); -// TODO: can we remove the dependence on squahsfs?? #[repr(C)] -#[derive(Clone, Copy)] +#[derive(Clone, Copy, Debug)] struct InodeHeader { - // squashfs: &'a Squashfs<'a>, file_type: InodeFileType, _reserved: [u16; 3], mtime: u32, @@ -967,17 +727,6 @@ impl InodeHeader { } } -impl Debug for InodeHeader { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("InodeHeader") - .field("file_type", &self.file_type) - .field("_reserved", &self._reserved) - .field("mtime", &self.mtime) - .field("inode_num", &self.inode_num) - .finish() - } -} - #[repr(C)] #[derive(Clone, Copy, Debug)] struct BasicDirectoryInode { @@ -1137,101 +886,6 @@ impl BasicFileInode { } } -// impl<'a> VfsFile for BasicFileInode<'a> { -// fn read(&self) -> Result, ()> { -// // TODO: is this really how you're supposed to do this? -// let mut block_data: Vec = Vec::with_capacity(self.file_size as usize); - -// let data_table: Vec; - -// let block_offset = if self.frag_idx == u32::MAX { -// data_table = self.header.squashfs.get_decompressed_table( -// self.header.squashfs.data_table, -// ( -// false, -// Some( -// !self -// .header -// .squashfs -// .superblock -// .features() -// .uncompressed_data_blocks, -// ), -// ), -// ); - -// self.block_offset as usize -// } else { -// // Tail end packing -// let fragment_table = self.header.squashfs.get_decompressed_table( -// self.header.squashfs.fragment_table.unwrap(), -// ( -// false, -// Some(false), // Some( -// // !self -// // .header -// // .squashfs -// // .superblock -// // .features() -// // .uncompressed_fragments, -// // ), -// ), -// ); - -// let fragment_pointer = (self.header.squashfs.start as u64 -// + u64::from_le_bytes( -// fragment_table[self.frag_idx as usize..(self.frag_idx + 8) as usize] -// .try_into() -// .unwrap(), -// )) as *mut u8; - -// // build array since fragment_pointer is not guaranteed to be 0x02 aligned -// // We add two since fragment_pointer points to the beginning of the fragment block, -// // Which is a metadata block, and we get the size, but that excludes the two header bytes, -// // And since we are building the array due to unaligned pointer shenanigans we need to -// // include the header bytes otherwise we are short by two bytes -// let fragment_block_size = unsafe { -// u16::from_le(core::ptr::read_unaligned(fragment_pointer as *mut u16)) & 0x7FFF -// } + 2; - -// let mut fragment_block_raw = Vec::new(); -// for i in 0..fragment_block_size as usize { -// fragment_block_raw -// .push(unsafe { core::ptr::read_unaligned(fragment_pointer.add(i)) }) -// } - -// let fragment_block = self -// .header -// .squashfs -// .get_decompressed_table(&fragment_block_raw, (true, None)); - -// let fragment_start = u64::from_le_bytes(fragment_block[0..8].try_into().unwrap()); -// let fragment_size = u32::from_le_bytes(fragment_block[8..12].try_into().unwrap()); -// let fragment_compressed = fragment_size & 1 << 24 == 0; -// let fragment_size = fragment_size & 0xFEFFFFFF; - -// let data_table_raw = unsafe { -// core::slice::from_raw_parts( -// (self.header.squashfs.start as u64 + fragment_start) as *mut u8, -// fragment_size as usize, -// ) -// .to_vec() -// }; - -// data_table = self -// .header -// .squashfs -// .get_decompressed_table(&data_table_raw, (false, Some(fragment_compressed))); - -// self.block_offset as usize -// }; - -// block_data.extend(&data_table[block_offset..(block_offset + self.file_size as usize)]); - -// return Ok(Arc::from(block_data)); -// } -// } - #[repr(C)] #[derive(Debug)] struct DirectoryTableHeader { @@ -1323,175 +977,3 @@ impl From for InodeFileType { } } } - -#[repr(u16)] -#[derive(Clone, Copy, Debug, Eq, PartialEq)] -enum SquashfsCompressionType { - Gzip = 1, - Lzma = 2, - Lzo = 3, - Xz = 4, - Lz4 = 5, - Zstd = 6, -} - -#[repr(u16)] -enum SquashfsFlags { - UncompressedInodes = 0x0001, - UncompressedDataBlocks = 0x0002, - Reserved = 0x0004, - UncompressedFragments = 0x0008, - UnusedFragments = 0x0010, - FragmentsAlwaysPresent = 0x0020, - DeduplicatedData = 0x0040, - PresentNFSTable = 0x0080, - UncompressedXattrs = 0x0100, - NoXattrs = 0x0200, - PresentCompressorOptions = 0x0400, - UncompressedIDTable = 0x0800, -} - -#[allow(dead_code)] -#[derive(Debug)] -struct SquashfsFeatures { - uncompressed_inodes: bool, - uncompressed_data_blocks: bool, - _reserved: bool, - uncompressed_fragments: bool, - unused_fragments: bool, - fragments_always_present: bool, - deduplicated_data: bool, - nfs_table_present: bool, - uncompressed_xattrs: bool, - no_xattrs: bool, - compressor_options_present: bool, - uncompressed_id_table: bool, -} - -impl From for SquashfsCompressionType { - fn from(value: u16) -> Self { - match value { - 1 => Self::Gzip, - 2 => Self::Lzma, - 3 => Self::Lzo, - 4 => Self::Xz, - 5 => Self::Lz4, - 6 => Self::Zstd, - _ => panic!("Unexpected Squashfs compression type!"), - } - } -} - -#[repr(C, packed)] -#[derive(Clone, Copy, Debug)] -pub struct SquashfsSuperblock { - magic: u32, // 0x73717368 - inode_count: u32, // 0x02 - mod_time: u32, // varies - block_size: u32, // 0x20000 - frag_count: u32, // 0x01 - compressor: SquashfsCompressionType, // GZIP - block_log: u16, // 0x11 - flags: u16, // 0xC0 - id_count: u16, // 0x01 - ver_major: u16, // 0x04 - ver_minor: u16, // 0x00 - root_inode: u64, // - bytes_used: u64, // 0x0103 - id_table: u64, // 0x00FB - xattr_table: u64, // 0xFFFFFFFFFFFFFFFF - inode_table: u64, // 0x7B - dir_table: u64, // 0xA4 - frag_table: u64, // 0xD5 - export_table: u64, // 0xED -} - -impl SquashfsSuperblock { - fn new(bytes: &[u8]) -> Result { - let superblock = Self { - magic: u32::from_le_bytes(bytes[0..4].try_into().unwrap()), - inode_count: u32::from_le_bytes(bytes[4..8].try_into().unwrap()), - mod_time: u32::from_le_bytes(bytes[8..12].try_into().unwrap()), - block_size: u32::from_le_bytes(bytes[12..16].try_into().unwrap()), - frag_count: u32::from_le_bytes(bytes[16..20].try_into().unwrap()), - compressor: u16::from_le_bytes(bytes[20..22].try_into().unwrap()).into(), - block_log: u16::from_le_bytes(bytes[22..24].try_into().unwrap()), - flags: u16::from_le_bytes(bytes[24..26].try_into().unwrap()), - id_count: u16::from_le_bytes(bytes[26..28].try_into().unwrap()), - ver_major: u16::from_le_bytes(bytes[28..30].try_into().unwrap()), - ver_minor: u16::from_le_bytes(bytes[30..32].try_into().unwrap()), - root_inode: u64::from_le_bytes(bytes[32..40].try_into().unwrap()), - bytes_used: u64::from_le_bytes(bytes[40..48].try_into().unwrap()), - id_table: u64::from_le_bytes(bytes[48..56].try_into().unwrap()), - xattr_table: u64::from_le_bytes(bytes[56..64].try_into().unwrap()), - inode_table: u64::from_le_bytes(bytes[64..72].try_into().unwrap()), - dir_table: u64::from_le_bytes(bytes[72..80].try_into().unwrap()), - frag_table: u64::from_le_bytes(bytes[80..88].try_into().unwrap()), - export_table: u64::from_le_bytes(bytes[88..96].try_into().unwrap()), - }; - - if superblock.magic != 0x73717368 { - return Err(()); - } - - if superblock.ver_major != 4 || superblock.ver_minor != 0 { - return Err(()); - } - - if superblock.block_size > 1048576 { - return Err(()); - } - - if superblock.block_log > 20 { - return Err(()); - } - - if superblock.block_size != (1 << superblock.block_log) { - return Err(()); - } - - if superblock.block_size == 0 { - return Err(()); - } - - if ((superblock.block_size - 1) & superblock.block_size) != 0 { - return Err(()); - } - - return Ok(superblock); - } - - fn features(&self) -> SquashfsFeatures { - let uncompressed_inodes = (self.flags & SquashfsFlags::UncompressedInodes as u16) != 0; - let uncompressed_data_blocks = - (self.flags & SquashfsFlags::UncompressedDataBlocks as u16) != 0; - let _reserved = (self.flags & SquashfsFlags::Reserved as u16) != 0; - let uncompressed_fragments = - (self.flags & SquashfsFlags::UncompressedFragments as u16) != 0; - let unused_fragments = (self.flags & SquashfsFlags::UnusedFragments as u16) != 0; - let fragments_always_present = - (self.flags & SquashfsFlags::FragmentsAlwaysPresent as u16) != 0; - let deduplicated_data = (self.flags & SquashfsFlags::DeduplicatedData as u16) != 0; - let nfs_table_present = (self.flags & SquashfsFlags::PresentNFSTable as u16) != 0; - let uncompressed_xattrs = (self.flags & SquashfsFlags::UncompressedXattrs as u16) != 0; - let no_xattrs = (self.flags & SquashfsFlags::NoXattrs as u16) != 0; - let compressor_options_present = - (self.flags & SquashfsFlags::PresentCompressorOptions as u16) != 0; - let uncompressed_id_table = (self.flags & SquashfsFlags::UncompressedIDTable as u16) != 0; - - return SquashfsFeatures { - uncompressed_inodes, - uncompressed_data_blocks, - _reserved, - uncompressed_fragments, - unused_fragments, - fragments_always_present, - deduplicated_data, - nfs_table_present, - uncompressed_xattrs, - no_xattrs, - compressor_options_present, - uncompressed_id_table, - }; - } -} diff --git a/src/drivers/fs/initramfs/superblock.rs b/src/drivers/fs/initramfs/superblock.rs new file mode 100644 index 0000000..806fda4 --- /dev/null +++ b/src/drivers/fs/initramfs/superblock.rs @@ -0,0 +1,175 @@ +#[repr(u16)] +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum SquashfsCompressionType { + Gzip = 1, + Lzma = 2, + Lzo = 3, + Xz = 4, + Lz4 = 5, + Zstd = 6, +} + +impl From for SquashfsCompressionType { + fn from(value: u16) -> Self { + match value { + 1 => Self::Gzip, + 2 => Self::Lzma, + 3 => Self::Lzo, + 4 => Self::Xz, + 5 => Self::Lz4, + 6 => Self::Zstd, + _ => panic!("Unexpected Squashfs compression type!"), + } + } +} + +#[repr(u16)] +enum SquashfsFlags { + UncompressedInodes = 0x0001, + UncompressedDataBlocks = 0x0002, + Reserved = 0x0004, + UncompressedFragments = 0x0008, + UnusedFragments = 0x0010, + FragmentsAlwaysPresent = 0x0020, + DeduplicatedData = 0x0040, + PresentNFSTable = 0x0080, + UncompressedXattrs = 0x0100, + NoXattrs = 0x0200, + PresentCompressorOptions = 0x0400, + UncompressedIDTable = 0x0800, +} + +#[allow(dead_code)] +#[derive(Debug)] +pub struct SquashfsFeatures { + pub uncompressed_inodes: bool, + pub uncompressed_data_blocks: bool, + _reserved: bool, + pub uncompressed_fragments: bool, + pub unused_fragments: bool, + pub fragments_always_present: bool, + pub deduplicated_data: bool, + pub nfs_table_present: bool, + pub uncompressed_xattrs: bool, + pub no_xattrs: bool, + pub compressor_options_present: bool, + pub uncompressed_id_table: bool, +} + +#[repr(C, packed)] +#[derive(Clone, Copy, Debug)] +pub struct SquashfsSuperblock { + magic: u32, // 0x73717368 + inode_count: u32, // 0x02 + mod_time: u32, // varies + pub block_size: u32, // 0x20000 + frag_count: u32, // 0x01 + compressor: SquashfsCompressionType, // GZIP + block_log: u16, // 0x11 + flags: u16, // 0xC0 + id_count: u16, // 0x01 + ver_major: u16, // 0x04 + ver_minor: u16, // 0x00 + pub root_inode: u64, // + bytes_used: u64, // 0x0103 + pub id_table: u64, // 0x00FB + pub xattr_table: u64, // 0xFFFFFFFFFFFFFFFF + pub inode_table: u64, // 0x7B + pub dir_table: u64, // 0xA4 + pub frag_table: u64, // 0xD5 + pub export_table: u64, // 0xED +} + +impl SquashfsSuperblock { + pub fn new(bytes: &[u8]) -> Result { + let superblock = Self { + magic: u32::from_le_bytes(bytes[0..4].try_into().unwrap()), + inode_count: u32::from_le_bytes(bytes[4..8].try_into().unwrap()), + mod_time: u32::from_le_bytes(bytes[8..12].try_into().unwrap()), + block_size: u32::from_le_bytes(bytes[12..16].try_into().unwrap()), + frag_count: u32::from_le_bytes(bytes[16..20].try_into().unwrap()), + compressor: u16::from_le_bytes(bytes[20..22].try_into().unwrap()).into(), + block_log: u16::from_le_bytes(bytes[22..24].try_into().unwrap()), + flags: u16::from_le_bytes(bytes[24..26].try_into().unwrap()), + id_count: u16::from_le_bytes(bytes[26..28].try_into().unwrap()), + ver_major: u16::from_le_bytes(bytes[28..30].try_into().unwrap()), + ver_minor: u16::from_le_bytes(bytes[30..32].try_into().unwrap()), + root_inode: u64::from_le_bytes(bytes[32..40].try_into().unwrap()), + bytes_used: u64::from_le_bytes(bytes[40..48].try_into().unwrap()), + id_table: u64::from_le_bytes(bytes[48..56].try_into().unwrap()), + xattr_table: u64::from_le_bytes(bytes[56..64].try_into().unwrap()), + inode_table: u64::from_le_bytes(bytes[64..72].try_into().unwrap()), + dir_table: u64::from_le_bytes(bytes[72..80].try_into().unwrap()), + frag_table: u64::from_le_bytes(bytes[80..88].try_into().unwrap()), + export_table: u64::from_le_bytes(bytes[88..96].try_into().unwrap()), + }; + + if superblock.magic != 0x73717368 { + return Err(()); + } + + if superblock.ver_major != 4 || superblock.ver_minor != 0 { + return Err(()); + } + + if superblock.block_size > 1048576 { + return Err(()); + } + + if superblock.block_log > 20 { + return Err(()); + } + + if superblock.block_size != (1 << superblock.block_log) { + return Err(()); + } + + if superblock.block_size == 0 { + return Err(()); + } + + if ((superblock.block_size - 1) & superblock.block_size) != 0 { + return Err(()); + } + + return Ok(superblock); + } + + pub fn compressor(&self) -> SquashfsCompressionType { + self.compressor + } + + pub fn features(&self) -> SquashfsFeatures { + let uncompressed_inodes = (self.flags & SquashfsFlags::UncompressedInodes as u16) != 0; + let uncompressed_data_blocks = + (self.flags & SquashfsFlags::UncompressedDataBlocks as u16) != 0; + let _reserved = (self.flags & SquashfsFlags::Reserved as u16) != 0; + let uncompressed_fragments = + (self.flags & SquashfsFlags::UncompressedFragments as u16) != 0; + let unused_fragments = (self.flags & SquashfsFlags::UnusedFragments as u16) != 0; + let fragments_always_present = + (self.flags & SquashfsFlags::FragmentsAlwaysPresent as u16) != 0; + let deduplicated_data = (self.flags & SquashfsFlags::DeduplicatedData as u16) != 0; + let nfs_table_present = (self.flags & SquashfsFlags::PresentNFSTable as u16) != 0; + let uncompressed_xattrs = (self.flags & SquashfsFlags::UncompressedXattrs as u16) != 0; + let no_xattrs = (self.flags & SquashfsFlags::NoXattrs as u16) != 0; + let compressor_options_present = + (self.flags & SquashfsFlags::PresentCompressorOptions as u16) != 0; + let uncompressed_id_table = (self.flags & SquashfsFlags::UncompressedIDTable as u16) != 0; + + return SquashfsFeatures { + uncompressed_inodes, + uncompressed_data_blocks, + _reserved, + uncompressed_fragments, + unused_fragments, + fragments_always_present, + deduplicated_data, + nfs_table_present, + uncompressed_xattrs, + no_xattrs, + compressor_options_present, + uncompressed_id_table, + }; + } +} diff --git a/src/drivers/fs/vfs.rs b/src/drivers/fs/vfs.rs index 9dec810..c421df7 100755 --- a/src/drivers/fs/vfs.rs +++ b/src/drivers/fs/vfs.rs @@ -1,81 +1,23 @@ -// use alloc::{ -// boxed::Box, -// string::{String, ToString}, -// sync::Arc, -// vec::Vec, -// }; - -// use crate::{drivers::pci::PCI_DEVICES, libs::sync::Mutex}; - -// pub trait VfsFileSystem { -// fn open(&self, path: &str) -> Result, ()>; -// fn read_dir(&self, path: &str) -> Result, ()>; -// } - -// pub trait VfsFile { -// fn read(&self) -> Result, ()>; -// } - -// pub trait VfsDirectory { -// fn list_files(&self) -> Result]>, ()>; -// } - -// pub static VFS_INSTANCES: Mutex> = Mutex::new(Vec::new()); - -// pub struct Vfs { -// _identifier: String, -// file_system: Box, -// } - -// impl Vfs { -// pub fn new(file_system: Box, identifier: &str) -> Self { -// return Self { -// _identifier: identifier.to_string(), -// file_system, -// }; -// } - -// pub fn open(&self, path: &str) -> Result, ()> { -// return self.file_system.open(path); -// } - -// pub fn read_dir(&self, path: &str) -> Result, ()> { -// return self.file_system.read_dir(path); -// } -// } - -// pub fn init() { -// // TODO: Deduce which storage medium(s) we're using -// let pci_devices_lock = PCI_DEVICES.lock(); -// let mass_storage_devices = pci_devices_lock -// .iter() -// .filter(|&pci_device| pci_device.class_code == 0x01) -// .collect::>(); - -// for pci_device in mass_storage_devices { -// match pci_device.subclass_code { -// 0x01 => crate::drivers::storage::ide::init(), -// _ => {} -// } -// } -// } - use core::fmt::Debug; use alloc::{ - alloc::{alloc, handle_alloc_error}, + alloc::{alloc, dealloc}, boxed::Box, sync::Arc, vec::Vec, }; use crate::{ - log_info, - mem::{ALLOCATOR, PHYSICAL_MEMORY_MANAGER}, + log_info, log_ok, + mem::{ + // ALLOCATOR, + PHYSICAL_MEMORY_MANAGER, + }, }; static mut ROOT_VFS: Vfs = Vfs::null(); +#[allow(unused)] pub struct Vfs { next: Option<*mut Vfs>, ops: Option>, @@ -121,11 +63,13 @@ pub trait FsOps { fn vget(&mut self, fid: FileId, vfsp: *const Vfs) -> VNode; } +#[allow(unused)] pub struct FileId { len: u16, data: u8, } +#[allow(unused)] pub struct StatFs { typ: u32, block_size: u32, @@ -192,11 +136,13 @@ pub enum IODirection { Write, } +#[allow(unused)] pub struct IoVec { iov_base: *mut u8, iov_len: usize, } +#[allow(unused)] pub struct UIO { iov: *mut IoVec, iov_count: u32, @@ -260,6 +206,7 @@ pub trait VNodeOperations { fn bread(&mut self, block_number: u32, vp: *const VNode) -> Arc<[u8]>; } +#[allow(unused)] pub struct VAttr { typ: VNode, mode: u16, @@ -281,11 +228,9 @@ pub struct VAttr { pub fn add_vfs(mount_point: &str, fs_ops: Box) -> Result<(), ()> { let layout = alloc::alloc::Layout::new::(); - // TODO: investigate why on earth this gives me an allocation error - // let vfs = unsafe { alloc(layout).cast::() }; - let vfs = PHYSICAL_MEMORY_MANAGER.alloc(1).unwrap().cast::(); + let vfs_ptr = unsafe { alloc(layout).cast::() }; - let vfs = unsafe { &mut *vfs }; + let vfs = unsafe { &mut *vfs_ptr }; (*vfs) = Vfs::null(); (*vfs).ops = Some(fs_ops); @@ -308,58 +253,67 @@ pub fn add_vfs(mount_point: &str, fs_ops: Box) -> Result<(), ()> { } unsafe { ROOT_VFS.next = Some(vfs.as_mut_ptr()) }; + } else { + if unsafe { ROOT_VFS.next.is_none() } { + unsafe { dealloc(vfs_ptr.cast::(), layout) }; + return Err(()); + } - return Ok(()); + let target_vfs = unsafe { ROOT_VFS.next.unwrap() }; + + let mut cur_vnode = unsafe { (*target_vfs).ops.as_mut().unwrap().root(target_vfs) }; + + let parts = mount_point.split('/').collect::>(); + + for part in parts { + if part.is_empty() { + continue; + } + + // TODO: dont just lookup everything as the root user + if let Ok(vnode) = + cur_vnode + .ops + .lookup(part, UserCred { uid: 0, gid: 0 }, cur_vnode.as_ptr()) + { + cur_vnode = vnode; + } else { + unsafe { dealloc(vfs_ptr.cast::(), layout) }; + return Err(()); + } + } + + if cur_vnode.vfs_mounted_here.is_some() { + unsafe { dealloc(vfs_ptr.cast::(), layout) }; + return Err(()); + } + + { + let vfsp = vfs.as_ptr(); + + (*vfs) + .ops + .as_mut() + .unwrap() + .mount(mount_point, &mut vfs.data, vfsp); + } + + cur_vnode.vfs_mounted_here = Some(vfs.as_mut_ptr()); } + log_ok!("Added vfs at {mount_point}"); + + return Ok(()); +} + +pub fn vfs_open(path: &str) -> Result { if unsafe { ROOT_VFS.next.is_none() } { return Err(()); } - let target_vfs = unsafe { ROOT_VFS.next.unwrap() }; - - let binding = unsafe { &mut (*target_vfs).ops }; - let mut cur_vnode = binding.as_mut().unwrap().root(target_vfs); - - let parts = mount_point.split('/').collect::>(); - - for part in parts { - // TODO: dont just lookup everything as the root user - if let Ok(vnode) = - cur_vnode - .ops - .lookup(part, UserCred { uid: 0, gid: 0 }, cur_vnode.as_ptr()) - { - cur_vnode = vnode; - } else { - return Err(()); - } - } - - if cur_vnode.vfs_mounted_here.is_some() { - return Err(()); - } - - { - let vfsp = vfs.as_ptr(); - - (*vfs) - .ops - .as_mut() - .unwrap() - .mount(mount_point, &mut vfs.data, vfsp); - } - - cur_vnode.vfs_mounted_here = Some(vfs.as_mut_ptr()); - - return Err(()); -} - -pub fn vfs_open(path: &str) -> Result { let parts = path.split('/').collect::>(); let target_vfs = unsafe { ROOT_VFS.next.unwrap() }; - let binding = unsafe { &mut (*target_vfs).ops }; - let mut cur_vnode = binding.as_mut().unwrap().root(target_vfs); + let mut cur_vnode = unsafe { (*target_vfs).ops.as_mut().unwrap().root(target_vfs) }; for part in parts { if part.is_empty() { @@ -371,7 +325,11 @@ pub fn vfs_open(path: &str) -> Result { .ops .lookup(part, UserCred { uid: 0, gid: 0 }, cur_vnode.as_ptr()) { - cur_vnode = vnode; + if let Some(vfs) = vnode.vfs_mounted_here { + cur_vnode = unsafe { (*vfs).ops.as_mut().unwrap().root(vfs) } + } else { + cur_vnode = vnode; + } } else { return Err(()); } diff --git a/src/drivers/serial.rs b/src/drivers/serial.rs index 5a1a615..e07beed 100644 --- a/src/drivers/serial.rs +++ b/src/drivers/serial.rs @@ -19,6 +19,12 @@ pub static POISONED: AtomicBool = AtomicBool::new(false); // PORT + 4: Modem control register #[cfg(target_arch = "x86_64")] pub fn init_serial() -> u8 { + outb(PORT + 7, 0x42); + if inb(PORT + 7) != 0x42 { + // serial port does not exist + return 1; + } + outb(PORT + 1, 0x00); outb(PORT + 3, 0x80); outb(PORT, 0x03); @@ -31,14 +37,12 @@ pub fn init_serial() -> u8 { // Check if serial is faulty if inb(PORT) != 0xAE { - crate::log_error!("Serial Driver failed to initialize"); POISONED.store(true, core::sync::atomic::Ordering::Relaxed); return 1; } // Set serial in normal operation mode outb(PORT + 4, 0x0F); - crate::log_ok!("Serial Driver successfully initialized"); return 0; } @@ -52,21 +56,21 @@ pub fn write_string(string: &str) { #[cfg(not(target_arch = "x86_64"))] { for &ch in string.as_bytes() { - write_serial(ch as char); + write_serial(ch); } } } -#[cfg(not(target_arch = "x86_64"))] -pub fn init_serial() -> u8 { - return 0; -} - #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] fn is_transmit_empty() -> bool { return inb((PORT + 5) & 0x20) == 0; } +#[cfg(not(target_arch = "x86_64"))] +pub fn init_serial() -> u8 { + return 0; +} + #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] pub fn write_serial(character: u8) { while is_transmit_empty() {} diff --git a/src/drivers/storage/ide.rs b/src/drivers/storage/ide.rs index cce6667..454eb79 100755 --- a/src/drivers/storage/ide.rs +++ b/src/drivers/storage/ide.rs @@ -1,6 +1,6 @@ use core::mem::size_of; -use alloc::{boxed::Box, format, sync::Arc, vec::Vec}; +use alloc::{boxed::Box, sync::Arc, vec::Vec}; use crate::{ arch::io::{inb, insw, inw, outb, outsw}, @@ -690,7 +690,8 @@ fn ide_initialize(bar0: u32, bar1: u32, _bar2: u32, _bar3: u32, _bar4: u32) { let fat_fs = fat_fs.unwrap(); - add_vfs("/", Box::new(fat_fs)); + // TODO + let _ = add_vfs("/", Box::new(fat_fs)); // let vfs = crate::drivers::fs::vfs::Vfs::new( // Box::new(fat_fs), diff --git a/src/drivers/fs/initramfs/compressors/gzip.rs b/src/libs/gzip.rs similarity index 99% rename from src/drivers/fs/initramfs/compressors/gzip.rs rename to src/libs/gzip.rs index 7aacace..05064df 100644 --- a/src/drivers/fs/initramfs/compressors/gzip.rs +++ b/src/libs/gzip.rs @@ -1,4 +1,4 @@ -use alloc::{sync::Arc, vec::Vec}; +use alloc::vec::Vec; use crate::libs::sync::Mutex; diff --git a/src/libs/math.rs b/src/libs/math.rs deleted file mode 100644 index 717e494..0000000 --- a/src/libs/math.rs +++ /dev/null @@ -1,89 +0,0 @@ -pub fn abs(x: f64) -> f64 { - return f64::from_bits(x.to_bits() & (u64::MAX / 2)); -} - -const TOINT: f64 = 1. / f64::EPSILON; - -pub fn floor(x: f64) -> f64 { - #[cfg(all( - any(target_arch = "x86", target_arch = "x86_64"), - not(target_feature = "sse2") - ))] - { - if abs(x).to_bits() < 4503599627370496.0_f64.to_bits() { - let truncated = x as i64 as f64; - if truncated > x { - return truncated - 1.0; - } else { - return truncated; - } - } else { - return x; - } - } - - let ui = x.to_bits(); - let e = ((ui >> 52) & 0x7FF) as i32; - - if (e >= 0x3FF + 52) || (x == 0.) { - return x; - } - - let y = if (ui >> 63) != 0 { - x - TOINT + TOINT - x - } else { - x + TOINT + TOINT - x - }; - - if e < 0x3FF { - return if (ui >> 63) != 0 { -1. } else { 0. }; - } - - if y > 0. { - return x + y - 1.; - } else { - return x + y; - } -} - -pub fn ceil(x: f64) -> f64 { - #[cfg(all( - any(target_arch = "x86", target_arch = "x86_64"), - not(target_feature = "sse2") - ))] - { - if abs(x).to_bits() < 4503599627370496.0_f64.to_bits() { - let truncated = x as i64 as f64; - if truncated < x { - return truncated + 1.0; - } else { - return truncated; - } - } else { - return x; - } - } - - let u: u64 = x.to_bits(); - let e: i64 = (u >> 52 & 0x7ff) as i64; - - if e >= 0x3ff + 52 || x == 0. { - return x; - } - - let y = if (u >> 63) != 0 { - x - TOINT + TOINT - x - } else { - x + TOINT - TOINT - x - }; - - if e < 0x3ff { - return if (u >> 63) != 0 { -0. } else { 1. }; - } - - if y < 0. { - return x + y + 1.; - } else { - return x + y; - } -} diff --git a/src/libs/mod.rs b/src/libs/mod.rs index 8a99071..bf3c2eb 100644 --- a/src/libs/mod.rs +++ b/src/libs/mod.rs @@ -1,4 +1,4 @@ pub mod cell; -pub mod math; +pub mod gzip; pub mod sync; pub mod uuid; diff --git a/src/libs/sync/mutex.rs b/src/libs/sync/mutex.rs index d725796..5284b8d 100755 --- a/src/libs/sync/mutex.rs +++ b/src/libs/sync/mutex.rs @@ -4,12 +4,12 @@ use core::{ sync::atomic::{AtomicBool, Ordering}, }; -pub struct Mutex { +pub struct Mutex { locked: AtomicBool, data: UnsafeCell, } -unsafe impl Sync for Mutex {} +unsafe impl Sync for Mutex {} impl Mutex { #[inline] @@ -21,10 +21,11 @@ impl Mutex { } pub fn lock(&self) -> MutexGuard<'_, T> { - // if self.locked.load(Ordering::Acquire) == true { - // unsafe { core::arch::asm!("out dx, al", in("dx") 0x3f8, in("al") 'S' as u8) }; - // } - while self.locked.swap(true, Ordering::Acquire) { + while self + .locked + .compare_exchange(false, true, Ordering::SeqCst, Ordering::SeqCst) + .is_err() + { // spin lock } return MutexGuard { mutex: self }; @@ -44,29 +45,26 @@ impl core::fmt::Debug for Mutex { } } -pub struct MutexGuard<'a, T: ?Sized> { +pub struct MutexGuard<'a, T> { mutex: &'a Mutex, } -impl<'a, T: ?Sized> Deref for MutexGuard<'a, T> { +impl<'a, T> Deref for MutexGuard<'a, T> { type Target = T; fn deref(&self) -> &Self::Target { - // unsafe { core::arch::asm!("out dx, al", in("dx") 0x3f8, in("al") 'D' as u8) }; - unsafe { &*self.mutex.data.get() } } } -impl<'a, T: ?Sized> DerefMut for MutexGuard<'a, T> { +impl<'a, T> DerefMut for MutexGuard<'a, T> { fn deref_mut(&mut self) -> &mut T { - // unsafe { core::arch::asm!("out dx, al", in("dx") 0x3f8, in("al") 'M' as u8) }; unsafe { &mut *self.mutex.data.get() } } } -impl<'a, T: ?Sized> Drop for MutexGuard<'a, T> { +impl<'a, T> Drop for MutexGuard<'a, T> { fn drop(&mut self) { - self.mutex.locked.store(false, Ordering::Release); + self.mutex.locked.store(false, Ordering::SeqCst); } } diff --git a/src/main.rs b/src/main.rs index e7eb8a6..b0075b7 100644 --- a/src/main.rs +++ b/src/main.rs @@ -1,14 +1,11 @@ -#![feature(abi_x86_interrupt, naked_functions)] -// Unforunately, this doesnt actually work with rust-analyzer, so if you want the annoying -// Error about "unnecessary returns" to go away, see https://github.com/rust-lang/rust-analyzer/issues/16542 -// And if that issue ever gets closed, and you're reading this, feel free to remove this comment +#![feature(abi_x86_interrupt, naked_functions, const_mut_refs)] #![allow(clippy::needless_return)] #![no_std] #![no_main] use core::ffi::CStr; -use alloc::{format, vec::Vec}; +use alloc::vec::Vec; use limine::KernelFileRequest; use crate::drivers::fs::{ @@ -27,15 +24,20 @@ pub static KERNEL_REQUEST: KernelFileRequest = KernelFileRequest::new(0); #[no_mangle] pub extern "C" fn _start() -> ! { - #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] - arch::interrupts::init(); - drivers::serial::init_serial(); + arch::gdt::gdt_init(); + arch::interrupts::idt_init(); + arch::interrupts::exceptions::exceptions_init(); + arch::interrupts::enable_interrupts(); + // TODO: memory stuff + mem::pmm::pmm_init(); + mem::init_allocator(); + drivers::acpi::init_acpi(); - // let squashfs = initramfs::init(); - - // crate::println!("{:?}", squashfs.superblock); + kmain() +} +pub fn kmain() -> ! { let _ = drivers::fs::vfs::add_vfs("/", alloc::boxed::Box::new(initramfs::init())); #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] @@ -57,36 +59,14 @@ pub extern "C" fn _start() -> ! { // let file = vfs_open("/example.txt").unwrap(); crate::println!( "{:X?}", - core::str::from_utf8( - &file - .ops - .open(0, UserCred { uid: 0, gid: 0 }, file.as_ptr()) - .unwrap() - ) - .unwrap() + &file + .ops + .open(0, UserCred { uid: 0, gid: 0 }, file.as_ptr()) + .unwrap() ); - let fb = drivers::video::get_framebuffer().unwrap(); - let length = (fb.height * fb.width) * (fb.bpp / 8); - let pages = length / crate::mem::pmm::PAGE_SIZE; - let buffer = unsafe { - core::slice::from_raw_parts_mut( - crate::mem::PHYSICAL_MEMORY_MANAGER - .alloc(pages) - .expect("Could not allocate color buffer") as *mut u32, - length, - ) - }; - - for y in 0..fb.height { - let r = ((y as f32) / ((fb.height - 1) as f32)) * 200.0; - for x in 0..fb.width { - let g = ((x as f32) / ((fb.width - 1) as f32)) * 200.0; - buffer[y * fb.width + x] = ((r as u32) << 16) | ((g as u32) << 8) | 175; - } - } - - fb.blit_screen(buffer, None); + // as a sign that we didnt panic + draw_gradient(); // loop { // let ch = read_serial(); @@ -109,6 +89,42 @@ pub extern "C" fn _start() -> ! { hcf(); } +fn draw_gradient() { + let fb = drivers::video::get_framebuffer().unwrap(); + let length = (fb.height * fb.width) * (fb.bpp / 8); + let pages = length / crate::mem::pmm::PAGE_SIZE; + + let buffer_ptr = crate::mem::PHYSICAL_MEMORY_MANAGER.alloc(pages); + + if buffer_ptr.is_null() { + panic!("Failed to allocate screen buffer") + } + + let buffer = unsafe { + core::slice::from_raw_parts_mut( + crate::mem::PHYSICAL_MEMORY_MANAGER + .alloc(pages) + .cast::(), + length, + ) + }; + + for y in 0..fb.height { + for x in 0..fb.width { + let r = (255 * x) / (fb.width - 1); + let g = (255 * y) / (fb.height - 1); + let b = 255 - r; + + let pixel = ((r as u32) << 16) | ((g as u32) << 8) | (b as u32); + buffer[((y * fb.pitch) / (fb.bpp / 8)) + x] = pixel + } + } + + fb.blit_screen(buffer, None); + + crate::mem::PHYSICAL_MEMORY_MANAGER.dealloc(buffer_ptr, pages); +} + #[macro_export] macro_rules! println { () => ($crate::print!("\n")); @@ -118,7 +134,6 @@ macro_rules! println { #[macro_export] macro_rules! print { ($($arg:tt)*) => ( - $crate::drivers::serial::write_string(&alloc::format!($($arg)*).replace('\n', "\n\r")) ) } @@ -160,6 +175,7 @@ impl KernelFeatures { } } +// TODO: Do this vastly differently pub static KERNEL_FEATURES: libs::cell::LazyCell = libs::cell::LazyCell::new(parse_kernel_cmdline); @@ -207,9 +223,7 @@ fn parse_kernel_cmdline() -> KernelFeatures { #[panic_handler] fn panic(info: &core::panic::PanicInfo) -> ! { - let msg = &format!("{info}\n").replace('\n', "\n\r"); - - drivers::serial::write_string(msg); + crate::println!("{info}"); #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] { diff --git a/src/mem/allocator.rs b/src/mem/allocator.rs index e97e67e..ad3be1b 100644 --- a/src/mem/allocator.rs +++ b/src/mem/allocator.rs @@ -1,274 +1,158 @@ -// Original code from: https://github.com/DrChat/buddyalloc/blob/master/src/heap.rs -// But I made it ~~much worse~~ *better* by making it GlobalAlloc compatible -// By using A custom Mutex implementation (which also sucks), -// I was able to remove all the mut's In the original code. +use core::{ + alloc::{GlobalAlloc, Layout}, + ptr::NonNull, +}; -// TODO: Replace this with a slab allocator that can take advantage of the page frame allocator +use crate::{libs::sync::Mutex, mem::pmm::PAGE_SIZE}; -use core::alloc::{GlobalAlloc, Layout}; -use core::cmp::{max, min}; -use core::ptr; -use core::sync::atomic::Ordering::SeqCst; -use core::sync::atomic::{AtomicPtr, AtomicU8, AtomicUsize}; +use super::align_up; -use crate::libs::sync::Mutex; +#[derive(Debug)] +struct MemNode { + next: Option>, + size: usize, +} -const fn log2(num: usize) -> u8 { - let mut temp = num; - let mut result = 0; - - temp >>= 1; - - while temp != 0 { - result += 1; - temp >>= 1; +impl MemNode { + const fn new(size: usize) -> Self { + Self { next: None, size } } - return result; -} + pub fn addr(&self) -> usize { + self as *const Self as usize + } -const MIN_HEAP_ALIGN: usize = 4096; -const HEAP_BLOCKS: usize = 16; + pub fn end_addr(&self) -> usize { + self.addr() + self.len() + } -pub struct FreeBlock { - next: *mut FreeBlock, -} - -impl FreeBlock { - #[inline] - const fn new(next: *mut FreeBlock) -> Self { - Self { next } + pub fn len(&self) -> usize { + self.size } } -pub struct BuddyAllocator { - pub heap_start: AtomicPtr, - heap_size: AtomicUsize, - free_lists: Mutex<[*mut FreeBlock; HEAP_BLOCKS]>, - min_block_size: AtomicUsize, - min_block_size_log2: AtomicU8, +pub struct LinkedListAllocator { + head: MemNode, } -impl BuddyAllocator { - pub const fn new_unchecked(heap_start: *mut u8, heap_size: usize) -> Self { - let min_block_size_raw = heap_size >> (HEAP_BLOCKS - 1); - let min_block_size = AtomicUsize::new(min_block_size_raw); - let mut free_lists_buf: [*mut FreeBlock; HEAP_BLOCKS] = [ptr::null_mut(); HEAP_BLOCKS]; - - free_lists_buf[HEAP_BLOCKS - 1] = heap_start as *mut FreeBlock; - - let free_lists: Mutex<[*mut FreeBlock; HEAP_BLOCKS]> = Mutex::new(free_lists_buf); - - let heap_start = AtomicPtr::new(heap_start); - let heap_size = AtomicUsize::new(heap_size); +unsafe impl Sync for LinkedListAllocator {} +impl LinkedListAllocator { + pub const fn new() -> Self { Self { - heap_start, - heap_size, - free_lists, - min_block_size, - min_block_size_log2: AtomicU8::new(log2(min_block_size_raw)), + head: MemNode::new(0), } } - fn allocation_size(&self, mut size: usize, align: usize) -> Option { - if !align.is_power_of_two() { - return None; - } - - if align > MIN_HEAP_ALIGN { - return None; - } - - if align > size { - size = align; - } - - size = max(size, self.min_block_size.load(SeqCst)); - - size = size.next_power_of_two(); - - if size > self.heap_size.load(SeqCst) { - return None; - } - - return Some(size); - } - - fn allocation_order(&self, size: usize, align: usize) -> Option { - return self - .allocation_size(size, align) - .map(|s| (log2(s) - self.min_block_size_log2.load(SeqCst)) as usize); - } - - #[inline] - fn order_size(&self, order: usize) -> usize { - return 1 << (self.min_block_size_log2.load(SeqCst) as usize + order); - } - - fn free_list_pop(&self, order: usize) -> Option<*mut u8> { - let mut free_lists_lock = self.free_lists.lock(); - - let candidate = (*free_lists_lock)[order]; - - if candidate.is_null() { - return None; - } - - if order != free_lists_lock.len() - 1 { - (*free_lists_lock)[order] = unsafe { (*candidate).next }; - } else { - (*free_lists_lock)[order] = ptr::null_mut(); - } - - return Some(candidate as *mut u8); - } - - fn free_list_insert(&self, order: usize, block: *mut u8) { - let mut free_lists_lock = self.free_lists.lock(); - let free_block_ptr = block as *mut FreeBlock; - - unsafe { *free_block_ptr = FreeBlock::new((*free_lists_lock)[order]) }; - - (*free_lists_lock)[order] = free_block_ptr; - } - - fn free_list_remove(&self, order: usize, block: *mut u8) -> bool { - let block_ptr = block as *mut FreeBlock; - - let mut checking: &mut *mut FreeBlock = &mut (*self.free_lists.lock())[order]; - + pub fn init(&mut self, pages: usize) { unsafe { - while !(*checking).is_null() { - if *checking == block_ptr { - *checking = (*(*checking)).next; - return true; - } - - checking = &mut ((*(*checking)).next); - } - } - return false; - } - - fn split_free_block(&self, block: *mut u8, mut order: usize, order_needed: usize) { - let mut size_to_split = self.order_size(order); - - while order > order_needed { - size_to_split >>= 1; - order -= 1; - - let split = unsafe { block.add(size_to_split) }; - self.free_list_insert(order, split); + self.add_free_region( + super::PHYSICAL_MEMORY_MANAGER.alloc(pages), + PAGE_SIZE * pages, + ); } } - fn buddy(&self, order: usize, block: *mut u8) -> Option<*mut u8> { - assert!(block >= self.heap_start.load(SeqCst)); + unsafe fn add_free_region(&mut self, addr: *mut u8, size: usize) { + assert_eq!( + align_up(addr as usize, core::mem::align_of::()), + addr as usize + ); + assert!(size >= core::mem::size_of::()); - let relative = unsafe { block.offset_from(self.heap_start.load(SeqCst)) } as usize; - let size = self.order_size(order); - if size >= self.heap_size.load(SeqCst) { - return None; - } else { - return Some(unsafe { self.heap_start.load(SeqCst).add(relative ^ size) }); - } + let mut node = MemNode::new(size); + node.next = self.head.next.take(); + + addr.cast::().write(node); + self.head.next = Some(NonNull::new_unchecked(addr.cast::())); } - pub fn get_total_mem(&self) -> usize { - return self.heap_size.load(SeqCst); - } + fn alloc_from_node(node: &MemNode, layout: Layout) -> *mut u8 { + let start = align_up(node.addr() as usize, layout.align()); + let end = start + layout.size(); - pub fn get_free_mem(&self) -> usize { - let free_lists_lock = self.free_lists.lock(); - let mut free_mem = 0; - - unsafe { - for order in 0..free_lists_lock.len() { - let mut block = (*free_lists_lock)[order]; - - while !block.is_null() { - free_mem += self.order_size(order); - block = (*block).next; - } - } + if end > node.end_addr() as usize { + // aligned address goes outside the bounds of the node + return core::ptr::null_mut(); } - return free_mem; - } - - pub fn get_used_mem(&self) -> usize { - return self.get_total_mem() - self.get_free_mem(); - } -} - -unsafe impl GlobalAlloc for BuddyAllocator { - unsafe fn alloc(&self, layout: Layout) -> *mut u8 { - if let Some(order_needed) = self.allocation_order(layout.size(), layout.align()) { - let free_lists_len = { self.free_lists.lock().len() }; - - for order in order_needed..free_lists_len { - if let Some(block) = self.free_list_pop(order) { - if order > order_needed { - self.split_free_block(block, order, order_needed); - } - - return block; - } - } + let extra = node.end_addr() as usize - end; + if extra > 0 && extra < core::mem::size_of::() { + // Node size minus allocation size is less than the minimum size needed for a node, + // thus, if we let the allocation to happen in this node, we lose track of the extra memory + // lost by this allocation + return core::ptr::null_mut(); } - return ptr::null_mut(); + return start as *mut u8; } - unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) { - let initial_order = self - .allocation_order(layout.size(), layout.align()) - .expect("Tried to dispose of invalid block"); + unsafe fn find_region(&mut self, layout: Layout) -> Option> { + let mut current_node = &mut self.head; - let mut block = ptr; - let free_lists_len = { self.free_lists.lock().len() }; + while let Some(node) = current_node.next.as_mut() { + let node = node.as_mut(); - for order in initial_order..free_lists_len { - if let Some(buddy) = self.buddy(order, block) { - if self.free_list_remove(order, block) { - block = min(block, buddy); - continue; - } + if Self::alloc_from_node(node, layout).is_null() { + current_node = current_node.next.as_mut().unwrap().as_mut(); + continue; } - self.free_list_insert(order, block); - return; + // `node` is suitable for this allocation + let next = node.next.take(); + let ret = Some(current_node.next.take().unwrap()); + current_node.next = next; + return ret; } + + return None; } -} -#[no_mangle] -pub extern "C" fn malloc(size: usize) -> *mut u8 { - let layout = alloc::alloc::Layout::from_size_align(size, 2); + fn size_align(layout: Layout) -> Layout { + let layout = layout + .align_to(core::mem::align_of::()) + .expect("Failed to align allocation") + .pad_to_align(); + + let size = layout.size().max(core::mem::size_of::()); + return Layout::from_size_align(size, layout.align()).expect("Failed to create layout"); + } + + unsafe fn inner_alloc(&mut self, layout: Layout) -> *mut u8 { + let layout = Self::size_align(layout); + + if let Some(region) = self.find_region(layout) { + // immutable pointers are a government conspiracy anyways + let end = (region.as_ref().addr() + layout.size()) as *mut u8; + let extra = region.as_ref().end_addr() - end as usize; + + if extra > 0 { + self.add_free_region(end, extra) + } + + return region.as_ref().addr() as *mut u8; + } - if layout.is_err() { return core::ptr::null_mut(); } - unsafe { - return alloc::alloc::alloc(layout.unwrap()); - }; + unsafe fn inner_dealloc(&mut self, ptr: *mut u8, layout: Layout) { + let layout = Self::size_align(layout); + + self.add_free_region(ptr, layout.size()); + } } -#[no_mangle] -pub extern "C" fn free(ptr: *mut u8, size: usize) { - if ptr.is_null() { - return; +unsafe impl GlobalAlloc for Mutex { + unsafe fn alloc(&self, layout: Layout) -> *mut u8 { + let mut allocator = self.lock(); + + allocator.inner_alloc(layout) } - let layout = alloc::alloc::Layout::from_size_align(size, 2); + unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) { + let mut allocator = self.lock(); - if layout.is_err() { - return; + allocator.inner_dealloc(ptr, layout); } - - unsafe { - alloc::alloc::dealloc(ptr, layout.unwrap()); - }; } diff --git a/src/mem/mod.rs b/src/mem/mod.rs index adbb516..08487dd 100755 --- a/src/mem/mod.rs +++ b/src/mem/mod.rs @@ -1,65 +1,24 @@ pub mod allocator; pub mod pmm; -use core::alloc::GlobalAlloc; +use crate::libs::{cell::OnceCell, sync::Mutex}; -use limine::{MemmapEntry, NonNullPtr}; - -use crate::libs::{cell::LazyCell, sync::Mutex}; - -use self::{allocator::BuddyAllocator, pmm::PhysicalMemoryManager}; +use self::{allocator::LinkedListAllocator, pmm::PhysicalMemoryManager}; static MEMMAP_REQUEST: limine::MemmapRequest = limine::MemmapRequest::new(0); static HHDM_REQUEST: limine::HhdmRequest = limine::HhdmRequest::new(0); -pub static MEMMAP: LazyCell]>> = LazyCell::new(|| { - let memmap_request = MEMMAP_REQUEST - .get_response() - .get_mut() - .expect("Failed to get Memory map!"); +pub static PHYSICAL_MEMORY_MANAGER: OnceCell = OnceCell::new(); - return Mutex::new(memmap_request.memmap_mut()); -}); - -pub static HHDM_OFFSET: LazyCell = LazyCell::new(|| { - let hhdm = HHDM_REQUEST - .get_response() - .get() - .expect("Failed to get Higher Half Direct Map!"); - - return hhdm.offset as usize; -}); - -pub static PHYSICAL_MEMORY_MANAGER: LazyCell = - LazyCell::new(PhysicalMemoryManager::new); - -pub struct Allocator { - pub inner: LazyCell, +pub fn align_up(addr: usize, align: usize) -> usize { + let offset = (addr as *const u8).align_offset(align); + addr + offset } -unsafe impl GlobalAlloc for Allocator { - unsafe fn alloc(&self, layout: core::alloc::Layout) -> *mut u8 { - self.inner.alloc(layout) - } - - unsafe fn dealloc(&self, ptr: *mut u8, layout: core::alloc::Layout) { - self.inner.dealloc(ptr, layout) - } -} - -const HEAP_PAGES: usize = 4096; -const HEAP_SIZE: usize = HEAP_PAGES * 1024; +const HEAP_PAGES: usize = 1024; // 4 MiB heap #[global_allocator] -pub static ALLOCATOR: Allocator = Allocator { - inner: LazyCell::new(|| { - let heap_start = PHYSICAL_MEMORY_MANAGER - .alloc(HEAP_PAGES) - .expect("Failed to allocate heap!"); - - BuddyAllocator::new_unchecked(heap_start, HEAP_SIZE) - }), -}; +pub static ALLOCATOR: Mutex = Mutex::new(LinkedListAllocator::new()); pub fn log_memory_map() { let memmap_request = MEMMAP_REQUEST.get_response().get_mut(); @@ -82,20 +41,43 @@ pub fn log_memory_map() { } } -pub struct Label { - size: usize, - text_label: &'static str, +pub fn init_allocator() { + let mut allocator_lock = ALLOCATOR.lock(); + allocator_lock.init(HEAP_PAGES); + + drop(allocator_lock); + + crate::println!( + "{} of memory available", + PHYSICAL_MEMORY_MANAGER.total_memory().label_bytes() + ) +} + +pub enum Label { + BYTE(usize), + KIB(usize), + MIB(usize), + GIB(usize), } impl core::fmt::Display for Label { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - return write!(f, "{}{}", self.size, self.text_label); + match self { + Label::BYTE(count) => { + write!(f, "{count} Byte(s)") + } + Label::KIB(count) => { + write!(f, "{count} KiB(s)") + } + Label::MIB(count) => { + write!(f, "{count} MiB(s)") + } + Label::GIB(count) => { + write!(f, "{count} GiB(s)") + } + } } } - -// Hacky solution to avoid allocation, but keep the names -static BYTE_LABELS: (&str, &str, &str, &str) = ("GiB", "MiB", "KiB", "Bytes"); - pub trait LabelBytes { fn label_bytes(&self) -> Label; } @@ -105,25 +87,13 @@ impl LabelBytes for usize { let bytes = *self; if bytes >> 30 > 0 { - return Label { - size: bytes >> 30, - text_label: BYTE_LABELS.0, - }; + return Label::GIB(bytes >> 30); } else if bytes >> 20 > 0 { - return Label { - size: bytes >> 20, - text_label: BYTE_LABELS.1, - }; + return Label::MIB(bytes >> 20); } else if bytes >> 10 > 0 { - return Label { - size: bytes >> 10, - text_label: BYTE_LABELS.2, - }; + return Label::KIB(bytes >> 10); } else { - return Label { - size: bytes, - text_label: BYTE_LABELS.3, - }; + return Label::BYTE(bytes); } } } diff --git a/src/mem/pmm.rs b/src/mem/pmm.rs index 202ef2e..7220f91 100644 --- a/src/mem/pmm.rs +++ b/src/mem/pmm.rs @@ -2,6 +2,8 @@ use core::sync::atomic::{AtomicPtr, AtomicUsize, Ordering}; +use super::{HHDM_REQUEST, MEMMAP_REQUEST}; + pub const PAGE_SIZE: usize = 4096; #[derive(Debug)] @@ -13,6 +15,10 @@ pub struct PhysicalMemoryManager { used_pages: AtomicUsize, } +pub fn pmm_init() { + super::PHYSICAL_MEMORY_MANAGER.set(PhysicalMemoryManager::new()); +} + impl PhysicalMemoryManager { pub fn new() -> Self { let pmm = Self { @@ -23,11 +29,22 @@ impl PhysicalMemoryManager { used_pages: AtomicUsize::new(0), }; - let hhdm_offset = *super::HHDM_OFFSET; + let hhdm_req = HHDM_REQUEST + .get_response() + .get() + .expect("Failed to get Higher Half Direct Map!"); + + let hhdm_offset = hhdm_req.offset as usize; + + let memmap = MEMMAP_REQUEST + .get_response() + .get_mut() + .expect("Failed to get Memory map!") + .memmap_mut(); let mut highest_addr: usize = 0; - for entry in super::MEMMAP.lock().iter() { + for entry in memmap.iter() { if entry.typ == limine::MemoryMapEntryType::Usable { pmm.usable_pages .fetch_add(entry.len as usize / PAGE_SIZE, Ordering::SeqCst); @@ -42,7 +59,7 @@ impl PhysicalMemoryManager { let bitmap_size = ((pmm.highest_page_idx.load(Ordering::SeqCst) / 8) + PAGE_SIZE - 1) & !(PAGE_SIZE - 1); - for entry in super::MEMMAP.lock().iter_mut() { + for entry in memmap.iter_mut() { if entry.typ != limine::MemoryMapEntryType::Usable { continue; } @@ -63,7 +80,7 @@ impl PhysicalMemoryManager { } } - for entry in super::MEMMAP.lock().iter() { + for entry in memmap.iter() { if entry.typ != limine::MemoryMapEntryType::Usable { continue; } @@ -99,7 +116,7 @@ impl PhysicalMemoryManager { return core::ptr::null_mut(); } - pub fn alloc_nozero(&self, pages: usize) -> Result<*mut u8, ()> { + pub fn alloc_nozero(&self, pages: usize) -> *mut u8 { // Attempt to allocate n pages with a search limit of the amount of usable pages let mut page_addr = self.inner_alloc(pages, self.highest_page_idx.load(Ordering::SeqCst)); @@ -111,22 +128,27 @@ impl PhysicalMemoryManager { // If page_addr is still null, we have ran out of usable memory if page_addr.is_null() { - return Err(()); + return core::ptr::null_mut(); } } self.used_pages.fetch_add(pages, Ordering::SeqCst); - return Ok(page_addr); + return page_addr; } - pub fn alloc(&self, pages: usize) -> Result<*mut u8, ()> { - let ret = self.alloc_nozero(pages)?; + pub fn alloc(&self, pages: usize) -> *mut u8 { + let ret = self.alloc_nozero(pages); + + if ret.is_null() { + return ret; + } + unsafe { core::ptr::write_bytes(ret, 0x00, pages * PAGE_SIZE); }; - return Ok(ret); + return ret; } pub fn dealloc(&self, addr: *mut u8, pages: usize) {