a lot of stuff, but mainly, a decent allocator
This commit is contained in:
14
Makefile
14
Makefile
@@ -20,6 +20,7 @@ IMAGE_PATH = ${ARTIFACTS_PATH}/${IMAGE_NAME}
|
||||
CARGO_OPTS = --target=src/arch/${ARCH}/${ARCH}-unknown-none.json
|
||||
QEMU_OPTS += -m ${MEMORY} -drive id=hd0,format=raw,file=${IMAGE_PATH}
|
||||
LIMINE_BOOT_VARIATION = X64
|
||||
LIMINE_BRANCH = v7.x-binary
|
||||
|
||||
ifeq (${MODE},release)
|
||||
CARGO_OPTS += --release
|
||||
@@ -54,7 +55,7 @@ ifneq (${UEFI},)
|
||||
endif
|
||||
endif
|
||||
|
||||
.PHONY: all check run-scripts prepare-bin-files copy-initramfs-files compile-initramfs copy-iso-files build-iso compile-bootloader compile-binaries ovmf clean run build line-count
|
||||
.PHONY: all build
|
||||
|
||||
all: build
|
||||
|
||||
@@ -78,6 +79,7 @@ copy-initramfs-files:
|
||||
echo "Hello World from Initramfs" > ${INITRAMFS_PATH}/example.txt
|
||||
echo "Second file for testing" > ${INITRAMFS_PATH}/example2.txt
|
||||
mkdir -p ${INITRAMFS_PATH}/firstdir/seconddirbutlonger/
|
||||
mkdir ${INITRAMFS_PATH}/mnt/
|
||||
echo "Nexted file reads!!" > ${INITRAMFS_PATH}/firstdir/seconddirbutlonger/yeah.txt
|
||||
|
||||
compile-initramfs: copy-initramfs-files
|
||||
@@ -88,10 +90,7 @@ run-scripts:
|
||||
ifeq (${EXPORT_SYMBOLS},true)
|
||||
nm target/${ARCH}-unknown-none/${MODE}/CappuccinOS.elf > scripts/symbols.table
|
||||
@if [ ! -d "scripts/rustc_demangle" ]; then \
|
||||
echo "Cloning rustc_demangle.py into scripts/rustc_demangle/..."; \
|
||||
git clone "https://github.com/juls0730/rustc_demangle.py" "scripts/rustc_demangle"; \
|
||||
else \
|
||||
echo "Folder scripts/rustc_demangle already exists. Skipping clone."; \
|
||||
fi
|
||||
python scripts/demangle-symbols.py
|
||||
mv scripts/symbols.table ${INITRAMFS_PATH}/
|
||||
@@ -100,7 +99,7 @@ endif
|
||||
python scripts/font.py
|
||||
mv scripts/font.psf ${INITRAMFS_PATH}/
|
||||
|
||||
python scripts/initramfs-test.py 100 ${INITRAMFS_PATH}/
|
||||
#python scripts/initramfs-test.py 100 ${INITRAMFS_PATH}/
|
||||
|
||||
copy-iso-files:
|
||||
# Limine files
|
||||
@@ -154,10 +153,11 @@ endif
|
||||
sudo losetup -d `cat loopback_dev`
|
||||
rm -rf loopback_dev
|
||||
|
||||
# TODO: do something better for the directory checking maybe
|
||||
compile-bootloader:
|
||||
@if [ ! -d "limine" ]; then \
|
||||
echo "Cloning Limine into limine/..."; \
|
||||
git clone https://github.com/limine-bootloader/limine.git --branch=v6.x-branch-binary --depth=1; \
|
||||
git clone https://github.com/limine-bootloader/limine.git --branch=${LIMINE_BRANCH} --depth=1; \
|
||||
else \
|
||||
echo "Folder limine already exists. Skipping clone."; \
|
||||
fi
|
||||
@@ -175,7 +175,7 @@ ovmf-x86_64: ovmf
|
||||
ovmf-riscv64: ovmf
|
||||
mkdir -p ovmf/ovmf-riscv64
|
||||
@if [ ! -d "ovmf/ovmf-riscv64/OVMF.fd" ]; then \
|
||||
cd ovmf/ovmf-riscv64 && curl -o OVMF.fd https://retrage.github.io/edk2-nightly/bin/RELEASERISCV64_VIRT_CODE.fd && dd if=/dev/zero of=OVMF.fd bs=1 count=0 seek=33554432; \
|
||||
cd ovmf/ovmf-riscv64 && curl -o OVMF.fd https://retrage.github.io/edk2-nightly/bin/RELEASERISCV64_VIRT_CODE.fd; \
|
||||
fi
|
||||
|
||||
ovmf-aarch64:
|
||||
|
||||
@@ -7,14 +7,14 @@ CappuccinOS is a small x86-64 operating system written from scratch in rust. Thi
|
||||
## Features
|
||||
- [X] Serial output
|
||||
- [X] Hardware interrupts
|
||||
- [X] PS/2 Keyboard support
|
||||
- [X] ANSI color codes in console
|
||||
- [X] Heap allocation
|
||||
- [ ] Externalized kernel modules
|
||||
- [X] Initramfs
|
||||
- [X] Squashfs driver
|
||||
- [X] Programmatic reads
|
||||
- [X] Decompression
|
||||
- [ ] PS/2 Keyboard support
|
||||
- [ ] ANSI color codes in console
|
||||
- [ ] SMP
|
||||
- [ ] Use APIC instead of PIC
|
||||
- [ ] Pre-emptive multitasking
|
||||
@@ -31,7 +31,7 @@ CappuccinOS is a small x86-64 operating system written from scratch in rust. Thi
|
||||
- [ ] MMC/Nand device support
|
||||
- [ ] M.2 NVME device support
|
||||
- [ ] Basic shell
|
||||
- [X] Basic I/O
|
||||
- [ ] Basic I/O
|
||||
- [ ] Executing Programs from disk
|
||||
- [ ] Lua interpreter
|
||||
- [ ] Memory management
|
||||
@@ -114,12 +114,15 @@ Some Resources I used over the creation of CappuccinOS:
|
||||
- [OSDev wiki](https://wiki.osdev.org)
|
||||
- Wikipedia on various random things
|
||||
- [Squashfs Binary Format](https://dr-emann.github.io/squashfs/squashfs.html)
|
||||
- [GRUB](https://www.gnu.org/software/grub/grub-download.html) Mainly for Squashfs things, even though I later learned it does things incorrectly
|
||||
|
||||
And mostly for examples of how people did stuff I used these (projects made by people who might actually have a clue what they're doing):
|
||||
- This is missing some entries somehow
|
||||
- [MOROS](https://github.com/vinc/moros)
|
||||
- [Felix](https://github.com/mrgian/felix)
|
||||
- [mOS](https://github.com/Moldytzu/mOS)
|
||||
- [rust_os](https://github.com/thepowersgang/rust_os/tree/master)
|
||||
- [Lyre](https://github.com/Lyre-OS/klyre)
|
||||
|
||||
## License
|
||||
CappuccinOS is license under the MIT License. Feel free to modify and distribute in accordance with the license.
|
||||
@@ -4,6 +4,7 @@ def create_psf2_file(font_data, psf2_file_path):
|
||||
magic_bytes = b'\x72\xB5\x4A\x86'
|
||||
version = 0
|
||||
header_size = 32
|
||||
# means theres a unicode table
|
||||
flags = 0x00000001
|
||||
num_glyphs = len(font_data)
|
||||
height = 16
|
||||
@@ -20,6 +21,7 @@ def create_psf2_file(font_data, psf2_file_path):
|
||||
psf2_file.write(psf2_file_content)
|
||||
|
||||
if __name__ == "__main__":
|
||||
# TODO: maybe dont just dump a bunch of hex in here idk
|
||||
font_data = [
|
||||
[0x00, 0x00, 0x7E, 0x81, 0x99, 0xA5, 0x85, 0x89, 0x89, 0x81, 0x89, 0x7E, 0x00, 0x00, 0x00, 0x00],
|
||||
[0x00, 0x00, 0x7E, 0x81, 0xA5, 0x81, 0x81, 0xBD, 0x99, 0x81, 0x81, 0x7E, 0x00, 0x00, 0x00, 0x00],
|
||||
|
||||
144
src/arch/x86_64/gdt.rs
Normal file
144
src/arch/x86_64/gdt.rs
Normal file
@@ -0,0 +1,144 @@
|
||||
#[derive(Default)]
|
||||
#[repr(C)]
|
||||
struct GDTDescriptor {
|
||||
limit: u16,
|
||||
base_low: u16,
|
||||
base_mid: u8,
|
||||
access: u8,
|
||||
granularity: u8,
|
||||
base_high: u8,
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
#[repr(C)]
|
||||
struct TSSDescriptor {
|
||||
length: u16,
|
||||
base_low: u16,
|
||||
base_mid: u8,
|
||||
flags1: u8,
|
||||
flags2: u8,
|
||||
base_high: u8,
|
||||
base_upper: u32,
|
||||
_reserved: u32,
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
#[repr(C)]
|
||||
struct GDT {
|
||||
descriptors: [GDTDescriptor; 11],
|
||||
tss: TSSDescriptor,
|
||||
}
|
||||
|
||||
#[repr(C, packed)]
|
||||
struct GDTPtr {
|
||||
limit: u16,
|
||||
base: u64,
|
||||
}
|
||||
|
||||
static mut GDT: Option<GDT> = None;
|
||||
static mut GDTR: GDTPtr = GDTPtr { limit: 0, base: 0 };
|
||||
|
||||
pub fn gdt_init() {
|
||||
unsafe {
|
||||
GDT = Some(GDT::default());
|
||||
let gdt = GDT.as_mut().unwrap();
|
||||
|
||||
gdt.descriptors[0].limit = 0;
|
||||
gdt.descriptors[0].base_low = 0;
|
||||
gdt.descriptors[0].base_mid = 0;
|
||||
gdt.descriptors[0].access = 0;
|
||||
gdt.descriptors[0].granularity = 0;
|
||||
gdt.descriptors[0].base_high = 0;
|
||||
|
||||
gdt.descriptors[1].limit = 0xFFFF;
|
||||
gdt.descriptors[1].base_low = 0;
|
||||
gdt.descriptors[1].base_mid = 0;
|
||||
gdt.descriptors[1].access = 0x9A;
|
||||
gdt.descriptors[1].granularity = 0;
|
||||
gdt.descriptors[1].base_high = 0;
|
||||
|
||||
gdt.descriptors[2].limit = 0xFFFF;
|
||||
gdt.descriptors[2].base_low = 0;
|
||||
gdt.descriptors[2].base_mid = 0;
|
||||
gdt.descriptors[2].access = 0x92;
|
||||
gdt.descriptors[2].granularity = 0;
|
||||
gdt.descriptors[2].base_high = 0;
|
||||
|
||||
gdt.descriptors[3].limit = 0xFFFF;
|
||||
gdt.descriptors[3].base_low = 0;
|
||||
gdt.descriptors[3].base_mid = 0;
|
||||
gdt.descriptors[3].access = 0x9A;
|
||||
gdt.descriptors[3].granularity = 0xCF;
|
||||
gdt.descriptors[3].base_high = 0;
|
||||
|
||||
gdt.descriptors[4].limit = 0xFFFF;
|
||||
gdt.descriptors[4].base_low = 0;
|
||||
gdt.descriptors[4].base_mid = 0;
|
||||
gdt.descriptors[4].access = 0x92;
|
||||
gdt.descriptors[4].granularity = 0xCF;
|
||||
gdt.descriptors[4].base_high = 0;
|
||||
|
||||
gdt.descriptors[5].limit = 0;
|
||||
gdt.descriptors[5].base_low = 0;
|
||||
gdt.descriptors[5].base_mid = 0;
|
||||
gdt.descriptors[5].access = 0x9A;
|
||||
gdt.descriptors[5].granularity = 0x20;
|
||||
gdt.descriptors[5].base_high = 0;
|
||||
|
||||
gdt.descriptors[6].limit = 0;
|
||||
gdt.descriptors[6].base_low = 0;
|
||||
gdt.descriptors[6].base_mid = 0;
|
||||
gdt.descriptors[6].access = 0x92;
|
||||
gdt.descriptors[6].granularity = 0;
|
||||
gdt.descriptors[6].base_high = 0;
|
||||
|
||||
// descriptors[7] and descriptors[8] are already dummy entries for SYSENTER
|
||||
|
||||
gdt.descriptors[9].limit = 0;
|
||||
gdt.descriptors[9].base_low = 0;
|
||||
gdt.descriptors[9].base_mid = 0;
|
||||
gdt.descriptors[9].access = 0xFA;
|
||||
gdt.descriptors[9].granularity = 0x20;
|
||||
gdt.descriptors[9].base_high = 0;
|
||||
|
||||
gdt.descriptors[10].limit = 0;
|
||||
gdt.descriptors[10].base_low = 0;
|
||||
gdt.descriptors[10].base_mid = 0;
|
||||
gdt.descriptors[10].access = 0xF2;
|
||||
gdt.descriptors[10].granularity = 0;
|
||||
gdt.descriptors[10].base_high = 0;
|
||||
|
||||
gdt.tss.length = 104;
|
||||
gdt.tss.base_low = 0;
|
||||
gdt.tss.base_mid = 0;
|
||||
gdt.tss.flags1 = 0x89;
|
||||
gdt.tss.flags2 = 0;
|
||||
gdt.tss.base_high = 0;
|
||||
gdt.tss.base_upper = 0;
|
||||
gdt.tss._reserved = 0;
|
||||
|
||||
GDTR.limit = core::mem::size_of::<GDT>() as u16 - 1;
|
||||
GDTR.base = gdt as *mut GDT as u64;
|
||||
}
|
||||
|
||||
gdt_reload();
|
||||
}
|
||||
|
||||
pub fn gdt_reload() {
|
||||
unsafe {
|
||||
core::arch::asm!(
|
||||
"lgdt [{}]",
|
||||
"push 0x28",
|
||||
"lea rax, [rip+0x3]",
|
||||
"push rax",
|
||||
"retfq",
|
||||
"mov eax, 0x30",
|
||||
"mov ds, eax",
|
||||
"mov es, eax",
|
||||
"mov fs, eax",
|
||||
"mov gs, eax",
|
||||
"mov ss, eax",
|
||||
in(reg) core::ptr::addr_of!(GDTR)
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -2,10 +2,7 @@ use crate::{drivers::acpi::SMP_REQUEST, hcf, libs::cell::OnceCell};
|
||||
|
||||
use alloc::{sync::Arc, vec::Vec};
|
||||
|
||||
use super::super::{
|
||||
cpu_get_msr, cpu_set_msr,
|
||||
io::{inb, outb},
|
||||
};
|
||||
use super::super::{cpu_get_msr, cpu_set_msr, io::outb};
|
||||
|
||||
#[repr(C, packed)]
|
||||
#[derive(Clone, Copy, Debug)]
|
||||
@@ -107,33 +104,41 @@ impl APIC {
|
||||
let ptr_end = unsafe { ptr.add(madt.header.length as usize - 44) };
|
||||
|
||||
while (ptr as usize) < (ptr_end as usize) {
|
||||
match unsafe { *ptr } {
|
||||
// ptr may or may bot be aligned, although I have had crashes related to this pointer being not aligned
|
||||
// and tbh I dont really care about the performance impact of reading unaligned pointers right now
|
||||
// TODO
|
||||
match unsafe { core::ptr::read_unaligned(ptr) } {
|
||||
0 => {
|
||||
if unsafe { *(ptr.add(4)) } & 1 != 0 {
|
||||
cpus.push(unsafe { *ptr.add(2).cast::<LAPIC>() });
|
||||
cpus.push(unsafe { core::ptr::read_unaligned(ptr.add(2).cast::<LAPIC>()) });
|
||||
}
|
||||
}
|
||||
1 => unsafe {
|
||||
io_apic = Some(IOAPIC {
|
||||
ioapic_id: *ptr.add(2),
|
||||
_reserved: *ptr.add(3),
|
||||
ptr: (*ptr.add(4).cast::<u32>()) as *mut u8,
|
||||
global_interrupt_base: *ptr.add(8).cast::<u32>(),
|
||||
ioapic_id: core::ptr::read_unaligned(ptr.add(2)),
|
||||
_reserved: core::ptr::read_unaligned(ptr.add(3)),
|
||||
ptr: (core::ptr::read_unaligned(ptr.add(4).cast::<u32>())) as *mut u8,
|
||||
global_interrupt_base: core::ptr::read_unaligned(ptr.add(8).cast::<u32>()),
|
||||
})
|
||||
},
|
||||
2 => unsafe {
|
||||
io_apic_source_override = Some(IOAPICSourceOverride {
|
||||
bus_source: *ptr.add(2),
|
||||
irq_source: *ptr.add(3),
|
||||
global_system_interrupt: *ptr.add(4).cast::<u32>(),
|
||||
flags: *ptr.add(8).cast::<u16>(),
|
||||
bus_source: core::ptr::read_unaligned(ptr.add(2)),
|
||||
irq_source: core::ptr::read_unaligned(ptr.add(3)),
|
||||
global_system_interrupt: core::ptr::read_unaligned(
|
||||
ptr.add(4).cast::<u32>(),
|
||||
),
|
||||
flags: core::ptr::read_unaligned(ptr.add(8).cast::<u16>()),
|
||||
})
|
||||
},
|
||||
5 => lapic_ptr = unsafe { *(ptr.add(4).cast::<u64>()) } as *mut u8,
|
||||
5 => {
|
||||
lapic_ptr =
|
||||
unsafe { core::ptr::read_unaligned(ptr.add(4).cast::<u64>()) } as *mut u8
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
|
||||
ptr = unsafe { ptr.add((*ptr.add(1)) as usize) };
|
||||
ptr = unsafe { ptr.add(core::ptr::read_unaligned(ptr.add(1)) as usize) };
|
||||
}
|
||||
|
||||
if io_apic.is_none() || io_apic_source_override.is_none() {
|
||||
@@ -168,10 +173,6 @@ impl APIC {
|
||||
|
||||
crate::println!("{number_of_inputs}");
|
||||
|
||||
// // hopefully nothing important is on that page :shrug:
|
||||
// // TODO: use the page allocator we wrote maybe
|
||||
// unsafe { core::ptr::copy(test as *mut u8, 0x8000 as *mut u8, 4096) }
|
||||
|
||||
let smp_request = SMP_REQUEST.get_response().get_mut();
|
||||
|
||||
if smp_request.is_none() {
|
||||
@@ -189,82 +190,12 @@ impl APIC {
|
||||
cpu.goto_address = test;
|
||||
}
|
||||
|
||||
// for cpu_apic in apic.cpus.iter() {
|
||||
// let lapic_id = cpu_apic.apic_id;
|
||||
|
||||
// // TODO: If CPU is the BSP, do not intialize it
|
||||
|
||||
// crate::log_info!("Initializing CPU {processor_id:<02}, please wait",);
|
||||
|
||||
// match apic.bootstrap_processor(processor_id, 0x8000) {
|
||||
// Err(_) => crate::log_error!("Failed to initialize CPU {processor_id:<02}!"),
|
||||
// Ok(_) => crate::log_ok!("Successfully initialized CPU {processor_id:<02}!"),
|
||||
// }
|
||||
// }
|
||||
|
||||
// Set and enable keyboard interrupt
|
||||
apic.set_interrupt(0x01, 0x01);
|
||||
|
||||
return Ok(apic);
|
||||
}
|
||||
|
||||
// pub fn bootstrap_processor(&self, processor_id: u8, startup_page: usize) -> Result<(), ()> {
|
||||
// // Clear LAPIC errors
|
||||
// self.write_lapic(0x280, 0);
|
||||
// // Select Auxiliary Processor
|
||||
// self.write_lapic(
|
||||
// 0x310,
|
||||
// (self.read_lapic(0x310) & 0x00FFFFFF) | (processor_id as u32) << 24,
|
||||
// );
|
||||
// // send INIT Inter-Processor Interrupt
|
||||
// self.write_lapic(0x300, (self.read_lapic(0x300) & 0x00FFFFFF) | 0x00C500);
|
||||
|
||||
// // Wait for IPI delivery
|
||||
// while self.read_lapic(0x300) & (1 << 12) != 0 {
|
||||
// unsafe {
|
||||
// core::arch::asm!("pause");
|
||||
// }
|
||||
// }
|
||||
|
||||
// // Select Auxiliary Processor
|
||||
// self.write_lapic(
|
||||
// 0x310,
|
||||
// (self.read_lapic(0x310) & 0x00FFFFFF) | (processor_id as u32) << 24,
|
||||
// );
|
||||
// // deassert
|
||||
// self.write_lapic(0x300, (self.read_lapic(0x300) & 0x00FFFFFF) | 0x00C500);
|
||||
|
||||
// // Wait for IPI delivery
|
||||
// while self.read_lapic(0x300) & (1 << 12) != 0 {
|
||||
// unsafe {
|
||||
// core::arch::asm!("pause");
|
||||
// }
|
||||
// }
|
||||
|
||||
// msdelay(10);
|
||||
|
||||
// for i in 0..2 {
|
||||
// self.write_lapic(0x280, 0);
|
||||
// self.write_lapic(
|
||||
// 0x310,
|
||||
// (self.read_lapic(0x310) & 0x00FFFFFF) | (processor_id as u32) << 24,
|
||||
// );
|
||||
// self.write_lapic(0x300, (self.read_lapic(0x300) & 0xfff0f800) | 0x000608);
|
||||
// if i == 0 {
|
||||
// usdelay(200);
|
||||
// } else {
|
||||
// msdelay(1000);
|
||||
// }
|
||||
// while self.read_lapic(0x300) & (1 << 12) != 0 {
|
||||
// unsafe {
|
||||
// core::arch::asm!("pause");
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
|
||||
// return Ok(());
|
||||
// }
|
||||
|
||||
pub fn read_ioapic(&self, reg: u32) -> u32 {
|
||||
unsafe {
|
||||
core::ptr::write_volatile(self.io_apic.ptr.cast::<u32>(), reg & 0xff);
|
||||
@@ -336,40 +267,41 @@ fn disable_pic() {
|
||||
outb(PIC_DATA_SLAVE, 0xFF);
|
||||
}
|
||||
|
||||
pub fn usdelay(useconds: u16) {
|
||||
let pit_count = ((useconds as u32 * 1193) / 1000) as u16;
|
||||
// // TODO: last I remember these didnt work
|
||||
// pub fn usdelay(useconds: u16) {
|
||||
// let pit_count = ((useconds as u32 * 1193) / 1000) as u16;
|
||||
|
||||
pit_delay(pit_count);
|
||||
}
|
||||
// pit_delay(pit_count);
|
||||
// }
|
||||
|
||||
pub fn msdelay(ms: u32) {
|
||||
let mut total_count = ms * 1193;
|
||||
// pub fn msdelay(ms: u32) {
|
||||
// let mut total_count = ms * 1193;
|
||||
|
||||
while total_count > 0 {
|
||||
let chunk_count = if total_count > u16::MAX as u32 {
|
||||
u16::MAX
|
||||
} else {
|
||||
total_count as u16
|
||||
};
|
||||
// while total_count > 0 {
|
||||
// let chunk_count = if total_count > u16::MAX as u32 {
|
||||
// u16::MAX
|
||||
// } else {
|
||||
// total_count as u16
|
||||
// };
|
||||
|
||||
pit_delay(chunk_count);
|
||||
// pit_delay(chunk_count);
|
||||
|
||||
total_count -= chunk_count as u32;
|
||||
}
|
||||
}
|
||||
// total_count -= chunk_count as u32;
|
||||
// }
|
||||
// }
|
||||
|
||||
pub fn pit_delay(count: u16) {
|
||||
// Set PIT to mode 0
|
||||
outb(0x43, 0x30);
|
||||
outb(0x40, (count & 0xFF) as u8);
|
||||
outb(0x40, ((count & 0xFF00) >> 8) as u8);
|
||||
loop {
|
||||
// Tell PIT to give us a timer status
|
||||
outb(0x43, 0xE2);
|
||||
if ((inb(0x40) >> 7) & 0x01) != 0 {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
// pub fn pit_delay(count: u16) {
|
||||
// // Set PIT to mode 0
|
||||
// outb(0x43, 0x30);
|
||||
// outb(0x40, (count & 0xFF) as u8);
|
||||
// outb(0x40, ((count & 0xFF00) >> 8) as u8);
|
||||
// loop {
|
||||
// // Tell PIT to give us a timer status
|
||||
// outb(0x43, 0xE2);
|
||||
// if ((inb(0x40) >> 7) & 0x01) != 0 {
|
||||
// break;
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
|
||||
pub static APIC: OnceCell<APIC> = OnceCell::new();
|
||||
|
||||
@@ -107,7 +107,7 @@ exception_function!(0x0D, general_protection_fault);
|
||||
exception_function!(0x0E, page_fault);
|
||||
exception_function!(0xFF, generic_handler);
|
||||
|
||||
pub fn set_exceptions() {
|
||||
pub fn exceptions_init() {
|
||||
for i in 0..32 {
|
||||
idt_set_gate(i, generic_handler as usize);
|
||||
}
|
||||
|
||||
@@ -1,11 +1,13 @@
|
||||
pub mod apic;
|
||||
mod exceptions;
|
||||
pub mod exceptions;
|
||||
|
||||
use crate::{
|
||||
// arch::{apic, x86_common::pic::ChainedPics},
|
||||
libs::sync::Mutex,
|
||||
};
|
||||
|
||||
use self::apic::APIC;
|
||||
|
||||
#[repr(C, packed)]
|
||||
#[derive(Clone, Copy)]
|
||||
struct IdtEntry {
|
||||
@@ -73,7 +75,7 @@ pub fn idt_set_gate(num: u8, function_ptr: usize) {
|
||||
// If the interrupt with this number occurred with the "null" interrupt handler
|
||||
// We will need to tell the PIC that interrupt is over, this stops new interrupts
|
||||
// From never firing because "it was never finished"
|
||||
// signal_end_of_interrupt(num);
|
||||
// signal_end_of_interrupt();
|
||||
}
|
||||
|
||||
extern "x86-interrupt" fn null_interrupt_handler() {
|
||||
@@ -86,7 +88,7 @@ extern "x86-interrupt" fn timer_handler() {
|
||||
signal_end_of_interrupt();
|
||||
}
|
||||
|
||||
fn idt_init() {
|
||||
pub fn idt_init() {
|
||||
unsafe {
|
||||
let idt_size = core::mem::size_of::<IdtEntry>() * 256;
|
||||
{
|
||||
@@ -101,8 +103,6 @@ fn idt_init() {
|
||||
idt_set_gate(num, null_interrupt_handler as usize);
|
||||
}
|
||||
|
||||
exceptions::set_exceptions();
|
||||
|
||||
idt_set_gate(InterruptIndex::Timer.as_u8(), timer_handler as usize);
|
||||
idt_set_gate(0x80, syscall as usize);
|
||||
|
||||
@@ -114,7 +114,7 @@ fn idt_init() {
|
||||
}
|
||||
|
||||
pub fn signal_end_of_interrupt() {
|
||||
apic::APIC.end_of_interrupt();
|
||||
APIC.end_of_interrupt();
|
||||
}
|
||||
|
||||
#[naked]
|
||||
@@ -146,12 +146,11 @@ pub extern "C" fn syscall_handler(_rdi: u64, _rsi: u64, rdx: u64, rcx: u64) {
|
||||
crate::print!("{message}");
|
||||
}
|
||||
|
||||
pub fn init() {
|
||||
crate::drivers::acpi::init_acpi();
|
||||
|
||||
idt_init();
|
||||
|
||||
pub fn enable_interrupts() {
|
||||
unsafe {
|
||||
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
|
||||
core::arch::asm!("sti");
|
||||
|
||||
// TODO: arm and riscv stuff
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
pub mod gdt;
|
||||
pub mod interrupts;
|
||||
pub mod io;
|
||||
pub mod stack_trace;
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
use alloc::{borrow::ToOwned, string::String, vec::Vec};
|
||||
|
||||
use crate::drivers::fs::vfs::vfs_open;
|
||||
|
||||
// use crate::drivers::fs::vfs::VfsFileSystem;
|
||||
|
||||
#[repr(C)]
|
||||
@@ -44,56 +46,58 @@ pub fn print_stack_trace(max_frames: usize, rbp: u64) {
|
||||
}
|
||||
|
||||
fn get_function_name(function_address: u64) -> Result<(String, u64), ()> {
|
||||
return Err(());
|
||||
// TODO: dont rely on initramfs being mounted at /
|
||||
let mut symbols_fd = vfs_open("/symbols.table")?;
|
||||
|
||||
// let symbols_fd = (*crate::drivers::fs::initramfs::INITRAMFS).open("/symbols.table")?;
|
||||
let symbols_table_bytes = symbols_fd.ops.open(
|
||||
0,
|
||||
crate::drivers::fs::vfs::UserCred { uid: 0, gid: 0 },
|
||||
symbols_fd.as_ptr(),
|
||||
)?;
|
||||
let symbols_table = core::str::from_utf8(&symbols_table_bytes).ok().ok_or(())?;
|
||||
|
||||
// let symbols_table_bytes = symbols_fd.read()?;
|
||||
// let symbols_table = core::str::from_utf8(&symbols_table_bytes).ok().ok_or(())?;
|
||||
let mut previous_symbol: Option<(&str, u64)> = None;
|
||||
|
||||
// let mut previous_symbol: Option<(&str, u64)> = None;
|
||||
let symbols_table_lines: Vec<&str> = symbols_table.lines().collect();
|
||||
|
||||
// let symbols_table_lines: Vec<&str> = symbols_table.lines().collect();
|
||||
for (i, line) in symbols_table_lines.iter().enumerate() {
|
||||
let line_parts: Vec<&str> = line.splitn(2, ' ').collect();
|
||||
|
||||
// for (i, line) in symbols_table_lines.iter().enumerate() {
|
||||
// let line_parts: Vec<&str> = line.splitn(2, ' ').collect();
|
||||
|
||||
// if line_parts.len() < 2 {
|
||||
// continue;
|
||||
// }
|
||||
|
||||
// let (address, function_name) = (
|
||||
// u64::from_str_radix(line_parts[0], 16).ok().ok_or(())?,
|
||||
// line_parts[1],
|
||||
// );
|
||||
|
||||
// if address == function_address {
|
||||
// return Ok((function_name.to_owned(), 0));
|
||||
// }
|
||||
|
||||
// if i == symbols_table_lines.len() - 1 {
|
||||
// return Ok((function_name.to_owned(), function_address - address));
|
||||
// }
|
||||
|
||||
// if i == 0 {
|
||||
// if function_address < address {
|
||||
// return Err(());
|
||||
// }
|
||||
|
||||
// previous_symbol = Some((function_name, address));
|
||||
// continue;
|
||||
// }
|
||||
|
||||
// if function_address > previous_symbol.unwrap().1 && function_address < address {
|
||||
// // function is previous symbol
|
||||
// return Ok((
|
||||
// previous_symbol.unwrap().0.to_owned(),
|
||||
// address - previous_symbol.unwrap().1,
|
||||
// ));
|
||||
// }
|
||||
|
||||
// previous_symbol = Some((function_name, address));
|
||||
// }
|
||||
|
||||
// unreachable!();
|
||||
if line_parts.len() < 2 {
|
||||
continue;
|
||||
}
|
||||
|
||||
let (address, function_name) = (
|
||||
u64::from_str_radix(line_parts[0], 16).ok().ok_or(())?,
|
||||
line_parts[1],
|
||||
);
|
||||
|
||||
if address == function_address {
|
||||
return Ok((function_name.to_owned(), 0));
|
||||
}
|
||||
|
||||
if i == symbols_table_lines.len() - 1 {
|
||||
return Ok((function_name.to_owned(), function_address - address));
|
||||
}
|
||||
|
||||
if i == 0 {
|
||||
if function_address < address {
|
||||
return Err(());
|
||||
}
|
||||
|
||||
previous_symbol = Some((function_name, address));
|
||||
continue;
|
||||
}
|
||||
|
||||
if let Some(prev_symbol) = previous_symbol {
|
||||
if function_address > prev_symbol.1 && function_address < address {
|
||||
// function is previous symbol
|
||||
return Ok((prev_symbol.0.to_owned(), address - prev_symbol.1));
|
||||
}
|
||||
}
|
||||
|
||||
previous_symbol = Some((function_name, address));
|
||||
}
|
||||
|
||||
unreachable!();
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"cpu": "x86-64",
|
||||
"data-layout": "e-m:e-i64:64-f80:128-n8:16:32:64-S128",
|
||||
"data-layout": "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128",
|
||||
"llvm-target": "x86_64-unknown-none",
|
||||
"target-endian": "little",
|
||||
"target-pointer-width": "64",
|
||||
|
||||
@@ -6,6 +6,7 @@ pub enum DeviceType {
|
||||
BlockDevice = 1,
|
||||
}
|
||||
|
||||
#[allow(unused)]
|
||||
pub struct Device {
|
||||
typ: DeviceType,
|
||||
block_size: usize,
|
||||
|
||||
@@ -567,38 +567,6 @@ impl FsOps for FatFs {
|
||||
) -> super::vfs::VNode {
|
||||
todo!("FAT VGET");
|
||||
}
|
||||
|
||||
// fn open(&self, path: &str) -> Result<Box<dyn VfsFile + '_>, ()> {
|
||||
// let path_componenets: Vec<&str> = path.trim_start_matches('/').split('/').collect();
|
||||
// let mut current_cluster = match self.fat_type {
|
||||
// FatType::Fat32(ebpb) => ebpb.root_dir_cluster as usize,
|
||||
// _ => self.sector_to_cluster(
|
||||
// self.bpb.reserved_sectors as usize
|
||||
// + (self.bpb.fat_count as usize * self.sectors_per_fat),
|
||||
// ),
|
||||
// };
|
||||
|
||||
// for path in path_componenets {
|
||||
// let file_entry: FileEntry = self.find_entry_in_directory(current_cluster, path)?;
|
||||
|
||||
// if file_entry.attributes == FileEntryAttributes::Directory as u8 {
|
||||
// current_cluster = (((file_entry.high_first_cluster_number as u32) << 16)
|
||||
// | file_entry.low_first_cluster_number as u32)
|
||||
// as usize;
|
||||
// } else {
|
||||
// return Ok(Box::new(FatFile {
|
||||
// fat_fs: self,
|
||||
// file_entry,
|
||||
// }));
|
||||
// }
|
||||
// }
|
||||
|
||||
// return Err(());
|
||||
// }
|
||||
|
||||
// fn read_dir(&self, _path: &str) -> Result<Box<dyn VfsDirectory>, ()> {
|
||||
// unimplemented!();
|
||||
// }
|
||||
}
|
||||
|
||||
enum File {
|
||||
@@ -859,77 +827,9 @@ impl<'a> VNodeOperations for File {
|
||||
}
|
||||
|
||||
struct FatFile {
|
||||
// fat_fs: &'a FatFs,
|
||||
file_entry: FileEntry,
|
||||
}
|
||||
|
||||
// impl<'a> VfsFile for FatFile<'a> {
|
||||
// fn read(&self) -> Result<Arc<[u8]>, ()> {
|
||||
// let mut file: Vec<u8> = Vec::with_capacity(self.file_entry.file_size as usize);
|
||||
// let mut file_ptr_index = 0;
|
||||
|
||||
// let mut cluster = ((self.file_entry.high_first_cluster_number as u32) << 16)
|
||||
// | self.file_entry.low_first_cluster_number as u32;
|
||||
// let cluster_size = self.fat_fs.cluster_size;
|
||||
|
||||
// let mut copied_bytes = 0;
|
||||
|
||||
// loop {
|
||||
// let cluster_data = self.fat_fs.read_cluster(cluster as usize)?;
|
||||
|
||||
// let remaining = self.file_entry.file_size as usize - copied_bytes;
|
||||
// let to_copy = if remaining > cluster_size {
|
||||
// cluster_size
|
||||
// } else {
|
||||
// remaining
|
||||
// };
|
||||
|
||||
// unsafe {
|
||||
// core::ptr::copy_nonoverlapping(
|
||||
// cluster_data.as_ptr(),
|
||||
// file.as_mut_ptr().add(file_ptr_index),
|
||||
// to_copy,
|
||||
// );
|
||||
|
||||
// file.set_len(file.len() + to_copy);
|
||||
// }
|
||||
|
||||
// file_ptr_index += cluster_size;
|
||||
|
||||
// copied_bytes += to_copy;
|
||||
|
||||
// cluster = self.fat_fs.get_next_cluster(cluster as usize);
|
||||
|
||||
// match self.fat_fs.fat_type {
|
||||
// FatType::Fat12(_) => {
|
||||
// if cluster >= EOC_12 {
|
||||
// break;
|
||||
// }
|
||||
// }
|
||||
// FatType::Fat16(_) => {
|
||||
// if cluster >= EOC_16 {
|
||||
// break;
|
||||
// }
|
||||
// }
|
||||
// FatType::Fat32(_) => {
|
||||
// if cluster >= EOC_32 {
|
||||
// break;
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
|
||||
// return Ok(Arc::from(file));
|
||||
// }
|
||||
// }
|
||||
|
||||
struct FatDirectory {
|
||||
// fat_fs: &'a FatFs,
|
||||
directory_cluster: usize,
|
||||
}
|
||||
|
||||
// impl<'a> VfsDirectory for FatDirectory<'a> {
|
||||
// fn list_files(&self) -> Result<Arc<[Box<dyn VfsFile>]>, ()> {
|
||||
// unimplemented!();
|
||||
// }
|
||||
// }
|
||||
|
||||
142
src/drivers/fs/initramfs/chunk_reader.rs
Normal file
142
src/drivers/fs/initramfs/chunk_reader.rs
Normal file
@@ -0,0 +1,142 @@
|
||||
use alloc::borrow::Cow;
|
||||
use alloc::vec::Vec;
|
||||
use core::ops::Index;
|
||||
use core::ops::{Range, RangeFrom};
|
||||
|
||||
const HEADER_SIZE: usize = 2;
|
||||
|
||||
struct Chunk<'a> {
|
||||
data: Cow<'a, [u8]>,
|
||||
}
|
||||
|
||||
impl Chunk<'_> {
|
||||
fn header(&self) -> u16 {
|
||||
u16::from_le_bytes(self.data[0..HEADER_SIZE].try_into().unwrap())
|
||||
}
|
||||
|
||||
fn len(&self) -> usize {
|
||||
self.header() as usize & 0x7FFF
|
||||
}
|
||||
|
||||
fn is_compressed(&self) -> bool {
|
||||
self.header() & 0x8000 == 0
|
||||
}
|
||||
|
||||
fn decompress(&mut self, decompressor: &dyn Fn(&[u8]) -> Result<Vec<u8>, ()>) {
|
||||
if self.is_compressed() {
|
||||
let decompressed_data = decompressor(&self.data[HEADER_SIZE..]).unwrap();
|
||||
|
||||
let header = decompressed_data.len() as u16 | 0x8000;
|
||||
|
||||
let data = [header.to_le_bytes().to_vec(), decompressed_data].concat();
|
||||
|
||||
self.data = Cow::Owned(data);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Index<usize> for Chunk<'_> {
|
||||
type Output = u8;
|
||||
|
||||
fn index(&self, index: usize) -> &Self::Output {
|
||||
&self.data[index]
|
||||
}
|
||||
}
|
||||
|
||||
impl Index<Range<usize>> for Chunk<'_> {
|
||||
type Output = [u8];
|
||||
|
||||
fn index(&self, index: Range<usize>) -> &Self::Output {
|
||||
&self.data[index]
|
||||
}
|
||||
}
|
||||
|
||||
impl Index<RangeFrom<usize>> for Chunk<'_> {
|
||||
type Output = [u8];
|
||||
|
||||
fn index(&self, index: RangeFrom<usize>) -> &Self::Output {
|
||||
&self.data[index]
|
||||
}
|
||||
}
|
||||
|
||||
pub struct ChunkReader<'a, F> {
|
||||
chunks: Vec<Chunk<'a>>,
|
||||
decompressor: F,
|
||||
}
|
||||
|
||||
impl<'a, F: Fn(&[u8]) -> Result<Vec<u8>, ()>> ChunkReader<'a, F> {
|
||||
pub fn new(data: &'a [u8], decompressor: F) -> Self {
|
||||
let mut chunks: Vec<Chunk<'_>> = Vec::new();
|
||||
|
||||
let mut offset = 0;
|
||||
loop {
|
||||
if offset == data.len() {
|
||||
break;
|
||||
}
|
||||
|
||||
let length =
|
||||
(u16::from_le_bytes(data[offset..offset + HEADER_SIZE].try_into().unwrap())
|
||||
& 0x7FFF) as usize
|
||||
+ HEADER_SIZE;
|
||||
|
||||
chunks.push(Chunk {
|
||||
data: Cow::Borrowed(&data[offset..offset + length]),
|
||||
});
|
||||
|
||||
offset += length;
|
||||
}
|
||||
|
||||
Self {
|
||||
chunks,
|
||||
decompressor,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_slice(&mut self, mut chunk: u64, mut offset: u16, size: usize) -> Vec<u8> {
|
||||
// handle cases where the chunks arent aligned to CHUNK_SIZE (they're compressed and are doing stupid things)
|
||||
{
|
||||
let mut chunk_idx = 0;
|
||||
let mut total_length = 0;
|
||||
|
||||
while total_length != chunk {
|
||||
chunk_idx += 1;
|
||||
total_length += (self.chunks[0].len() as usize + HEADER_SIZE) as u64;
|
||||
}
|
||||
|
||||
chunk = chunk_idx;
|
||||
}
|
||||
|
||||
let mut chunks_to_read = 1;
|
||||
{
|
||||
let mut available_bytes = {
|
||||
self.chunks[chunk as usize].decompress(&self.decompressor);
|
||||
self.chunks[chunk as usize][offset as usize..].len()
|
||||
};
|
||||
|
||||
while available_bytes < size {
|
||||
self.chunks[chunk as usize + chunks_to_read].decompress(&self.decompressor);
|
||||
available_bytes += self.chunks[chunk as usize + chunks_to_read].len();
|
||||
chunks_to_read += 1;
|
||||
}
|
||||
}
|
||||
|
||||
let mut data = Vec::new();
|
||||
|
||||
for i in chunk as usize..chunk as usize + chunks_to_read {
|
||||
self.chunks[i].decompress(&self.decompressor);
|
||||
|
||||
let block_start = offset as usize + HEADER_SIZE;
|
||||
let mut block_end = self.chunks[i].len() + HEADER_SIZE;
|
||||
|
||||
if (block_end - block_start) > size {
|
||||
block_end = block_start + size;
|
||||
}
|
||||
|
||||
data.extend(self.chunks[i][block_start..block_end].into_iter());
|
||||
|
||||
offset = 0;
|
||||
}
|
||||
|
||||
data
|
||||
}
|
||||
}
|
||||
@@ -1 +0,0 @@
|
||||
pub mod gzip;
|
||||
File diff suppressed because it is too large
Load Diff
175
src/drivers/fs/initramfs/superblock.rs
Normal file
175
src/drivers/fs/initramfs/superblock.rs
Normal file
@@ -0,0 +1,175 @@
|
||||
#[repr(u16)]
|
||||
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
|
||||
pub enum SquashfsCompressionType {
|
||||
Gzip = 1,
|
||||
Lzma = 2,
|
||||
Lzo = 3,
|
||||
Xz = 4,
|
||||
Lz4 = 5,
|
||||
Zstd = 6,
|
||||
}
|
||||
|
||||
impl From<u16> for SquashfsCompressionType {
|
||||
fn from(value: u16) -> Self {
|
||||
match value {
|
||||
1 => Self::Gzip,
|
||||
2 => Self::Lzma,
|
||||
3 => Self::Lzo,
|
||||
4 => Self::Xz,
|
||||
5 => Self::Lz4,
|
||||
6 => Self::Zstd,
|
||||
_ => panic!("Unexpected Squashfs compression type!"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[repr(u16)]
|
||||
enum SquashfsFlags {
|
||||
UncompressedInodes = 0x0001,
|
||||
UncompressedDataBlocks = 0x0002,
|
||||
Reserved = 0x0004,
|
||||
UncompressedFragments = 0x0008,
|
||||
UnusedFragments = 0x0010,
|
||||
FragmentsAlwaysPresent = 0x0020,
|
||||
DeduplicatedData = 0x0040,
|
||||
PresentNFSTable = 0x0080,
|
||||
UncompressedXattrs = 0x0100,
|
||||
NoXattrs = 0x0200,
|
||||
PresentCompressorOptions = 0x0400,
|
||||
UncompressedIDTable = 0x0800,
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
#[derive(Debug)]
|
||||
pub struct SquashfsFeatures {
|
||||
pub uncompressed_inodes: bool,
|
||||
pub uncompressed_data_blocks: bool,
|
||||
_reserved: bool,
|
||||
pub uncompressed_fragments: bool,
|
||||
pub unused_fragments: bool,
|
||||
pub fragments_always_present: bool,
|
||||
pub deduplicated_data: bool,
|
||||
pub nfs_table_present: bool,
|
||||
pub uncompressed_xattrs: bool,
|
||||
pub no_xattrs: bool,
|
||||
pub compressor_options_present: bool,
|
||||
pub uncompressed_id_table: bool,
|
||||
}
|
||||
|
||||
#[repr(C, packed)]
|
||||
#[derive(Clone, Copy, Debug)]
|
||||
pub struct SquashfsSuperblock {
|
||||
magic: u32, // 0x73717368
|
||||
inode_count: u32, // 0x02
|
||||
mod_time: u32, // varies
|
||||
pub block_size: u32, // 0x20000
|
||||
frag_count: u32, // 0x01
|
||||
compressor: SquashfsCompressionType, // GZIP
|
||||
block_log: u16, // 0x11
|
||||
flags: u16, // 0xC0
|
||||
id_count: u16, // 0x01
|
||||
ver_major: u16, // 0x04
|
||||
ver_minor: u16, // 0x00
|
||||
pub root_inode: u64, //
|
||||
bytes_used: u64, // 0x0103
|
||||
pub id_table: u64, // 0x00FB
|
||||
pub xattr_table: u64, // 0xFFFFFFFFFFFFFFFF
|
||||
pub inode_table: u64, // 0x7B
|
||||
pub dir_table: u64, // 0xA4
|
||||
pub frag_table: u64, // 0xD5
|
||||
pub export_table: u64, // 0xED
|
||||
}
|
||||
|
||||
impl SquashfsSuperblock {
|
||||
pub fn new(bytes: &[u8]) -> Result<Self, ()> {
|
||||
let superblock = Self {
|
||||
magic: u32::from_le_bytes(bytes[0..4].try_into().unwrap()),
|
||||
inode_count: u32::from_le_bytes(bytes[4..8].try_into().unwrap()),
|
||||
mod_time: u32::from_le_bytes(bytes[8..12].try_into().unwrap()),
|
||||
block_size: u32::from_le_bytes(bytes[12..16].try_into().unwrap()),
|
||||
frag_count: u32::from_le_bytes(bytes[16..20].try_into().unwrap()),
|
||||
compressor: u16::from_le_bytes(bytes[20..22].try_into().unwrap()).into(),
|
||||
block_log: u16::from_le_bytes(bytes[22..24].try_into().unwrap()),
|
||||
flags: u16::from_le_bytes(bytes[24..26].try_into().unwrap()),
|
||||
id_count: u16::from_le_bytes(bytes[26..28].try_into().unwrap()),
|
||||
ver_major: u16::from_le_bytes(bytes[28..30].try_into().unwrap()),
|
||||
ver_minor: u16::from_le_bytes(bytes[30..32].try_into().unwrap()),
|
||||
root_inode: u64::from_le_bytes(bytes[32..40].try_into().unwrap()),
|
||||
bytes_used: u64::from_le_bytes(bytes[40..48].try_into().unwrap()),
|
||||
id_table: u64::from_le_bytes(bytes[48..56].try_into().unwrap()),
|
||||
xattr_table: u64::from_le_bytes(bytes[56..64].try_into().unwrap()),
|
||||
inode_table: u64::from_le_bytes(bytes[64..72].try_into().unwrap()),
|
||||
dir_table: u64::from_le_bytes(bytes[72..80].try_into().unwrap()),
|
||||
frag_table: u64::from_le_bytes(bytes[80..88].try_into().unwrap()),
|
||||
export_table: u64::from_le_bytes(bytes[88..96].try_into().unwrap()),
|
||||
};
|
||||
|
||||
if superblock.magic != 0x73717368 {
|
||||
return Err(());
|
||||
}
|
||||
|
||||
if superblock.ver_major != 4 || superblock.ver_minor != 0 {
|
||||
return Err(());
|
||||
}
|
||||
|
||||
if superblock.block_size > 1048576 {
|
||||
return Err(());
|
||||
}
|
||||
|
||||
if superblock.block_log > 20 {
|
||||
return Err(());
|
||||
}
|
||||
|
||||
if superblock.block_size != (1 << superblock.block_log) {
|
||||
return Err(());
|
||||
}
|
||||
|
||||
if superblock.block_size == 0 {
|
||||
return Err(());
|
||||
}
|
||||
|
||||
if ((superblock.block_size - 1) & superblock.block_size) != 0 {
|
||||
return Err(());
|
||||
}
|
||||
|
||||
return Ok(superblock);
|
||||
}
|
||||
|
||||
pub fn compressor(&self) -> SquashfsCompressionType {
|
||||
self.compressor
|
||||
}
|
||||
|
||||
pub fn features(&self) -> SquashfsFeatures {
|
||||
let uncompressed_inodes = (self.flags & SquashfsFlags::UncompressedInodes as u16) != 0;
|
||||
let uncompressed_data_blocks =
|
||||
(self.flags & SquashfsFlags::UncompressedDataBlocks as u16) != 0;
|
||||
let _reserved = (self.flags & SquashfsFlags::Reserved as u16) != 0;
|
||||
let uncompressed_fragments =
|
||||
(self.flags & SquashfsFlags::UncompressedFragments as u16) != 0;
|
||||
let unused_fragments = (self.flags & SquashfsFlags::UnusedFragments as u16) != 0;
|
||||
let fragments_always_present =
|
||||
(self.flags & SquashfsFlags::FragmentsAlwaysPresent as u16) != 0;
|
||||
let deduplicated_data = (self.flags & SquashfsFlags::DeduplicatedData as u16) != 0;
|
||||
let nfs_table_present = (self.flags & SquashfsFlags::PresentNFSTable as u16) != 0;
|
||||
let uncompressed_xattrs = (self.flags & SquashfsFlags::UncompressedXattrs as u16) != 0;
|
||||
let no_xattrs = (self.flags & SquashfsFlags::NoXattrs as u16) != 0;
|
||||
let compressor_options_present =
|
||||
(self.flags & SquashfsFlags::PresentCompressorOptions as u16) != 0;
|
||||
let uncompressed_id_table = (self.flags & SquashfsFlags::UncompressedIDTable as u16) != 0;
|
||||
|
||||
return SquashfsFeatures {
|
||||
uncompressed_inodes,
|
||||
uncompressed_data_blocks,
|
||||
_reserved,
|
||||
uncompressed_fragments,
|
||||
unused_fragments,
|
||||
fragments_always_present,
|
||||
deduplicated_data,
|
||||
nfs_table_present,
|
||||
uncompressed_xattrs,
|
||||
no_xattrs,
|
||||
compressor_options_present,
|
||||
uncompressed_id_table,
|
||||
};
|
||||
}
|
||||
}
|
||||
@@ -1,81 +1,23 @@
|
||||
// use alloc::{
|
||||
// boxed::Box,
|
||||
// string::{String, ToString},
|
||||
// sync::Arc,
|
||||
// vec::Vec,
|
||||
// };
|
||||
|
||||
// use crate::{drivers::pci::PCI_DEVICES, libs::sync::Mutex};
|
||||
|
||||
// pub trait VfsFileSystem {
|
||||
// fn open(&self, path: &str) -> Result<Box<dyn VfsFile + '_>, ()>;
|
||||
// fn read_dir(&self, path: &str) -> Result<Box<dyn VfsDirectory>, ()>;
|
||||
// }
|
||||
|
||||
// pub trait VfsFile {
|
||||
// fn read(&self) -> Result<Arc<[u8]>, ()>;
|
||||
// }
|
||||
|
||||
// pub trait VfsDirectory {
|
||||
// fn list_files(&self) -> Result<Arc<[Box<dyn VfsFile>]>, ()>;
|
||||
// }
|
||||
|
||||
// pub static VFS_INSTANCES: Mutex<Vec<Vfs>> = Mutex::new(Vec::new());
|
||||
|
||||
// pub struct Vfs {
|
||||
// _identifier: String,
|
||||
// file_system: Box<dyn VfsFileSystem>,
|
||||
// }
|
||||
|
||||
// impl Vfs {
|
||||
// pub fn new(file_system: Box<dyn VfsFileSystem>, identifier: &str) -> Self {
|
||||
// return Self {
|
||||
// _identifier: identifier.to_string(),
|
||||
// file_system,
|
||||
// };
|
||||
// }
|
||||
|
||||
// pub fn open(&self, path: &str) -> Result<Box<dyn VfsFile + '_>, ()> {
|
||||
// return self.file_system.open(path);
|
||||
// }
|
||||
|
||||
// pub fn read_dir(&self, path: &str) -> Result<Box<dyn VfsDirectory>, ()> {
|
||||
// return self.file_system.read_dir(path);
|
||||
// }
|
||||
// }
|
||||
|
||||
// pub fn init() {
|
||||
// // TODO: Deduce which storage medium(s) we're using
|
||||
// let pci_devices_lock = PCI_DEVICES.lock();
|
||||
// let mass_storage_devices = pci_devices_lock
|
||||
// .iter()
|
||||
// .filter(|&pci_device| pci_device.class_code == 0x01)
|
||||
// .collect::<Vec<_>>();
|
||||
|
||||
// for pci_device in mass_storage_devices {
|
||||
// match pci_device.subclass_code {
|
||||
// 0x01 => crate::drivers::storage::ide::init(),
|
||||
// _ => {}
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
|
||||
use core::fmt::Debug;
|
||||
|
||||
use alloc::{
|
||||
alloc::{alloc, handle_alloc_error},
|
||||
alloc::{alloc, dealloc},
|
||||
boxed::Box,
|
||||
sync::Arc,
|
||||
vec::Vec,
|
||||
};
|
||||
|
||||
use crate::{
|
||||
log_info,
|
||||
mem::{ALLOCATOR, PHYSICAL_MEMORY_MANAGER},
|
||||
log_info, log_ok,
|
||||
mem::{
|
||||
// ALLOCATOR,
|
||||
PHYSICAL_MEMORY_MANAGER,
|
||||
},
|
||||
};
|
||||
|
||||
static mut ROOT_VFS: Vfs = Vfs::null();
|
||||
|
||||
#[allow(unused)]
|
||||
pub struct Vfs {
|
||||
next: Option<*mut Vfs>,
|
||||
ops: Option<Box<dyn FsOps>>,
|
||||
@@ -121,11 +63,13 @@ pub trait FsOps {
|
||||
fn vget(&mut self, fid: FileId, vfsp: *const Vfs) -> VNode;
|
||||
}
|
||||
|
||||
#[allow(unused)]
|
||||
pub struct FileId {
|
||||
len: u16,
|
||||
data: u8,
|
||||
}
|
||||
|
||||
#[allow(unused)]
|
||||
pub struct StatFs {
|
||||
typ: u32,
|
||||
block_size: u32,
|
||||
@@ -192,11 +136,13 @@ pub enum IODirection {
|
||||
Write,
|
||||
}
|
||||
|
||||
#[allow(unused)]
|
||||
pub struct IoVec {
|
||||
iov_base: *mut u8,
|
||||
iov_len: usize,
|
||||
}
|
||||
|
||||
#[allow(unused)]
|
||||
pub struct UIO {
|
||||
iov: *mut IoVec,
|
||||
iov_count: u32,
|
||||
@@ -260,6 +206,7 @@ pub trait VNodeOperations {
|
||||
fn bread(&mut self, block_number: u32, vp: *const VNode) -> Arc<[u8]>;
|
||||
}
|
||||
|
||||
#[allow(unused)]
|
||||
pub struct VAttr {
|
||||
typ: VNode,
|
||||
mode: u16,
|
||||
@@ -281,11 +228,9 @@ pub struct VAttr {
|
||||
|
||||
pub fn add_vfs(mount_point: &str, fs_ops: Box<dyn FsOps>) -> Result<(), ()> {
|
||||
let layout = alloc::alloc::Layout::new::<Vfs>();
|
||||
// TODO: investigate why on earth this gives me an allocation error
|
||||
// let vfs = unsafe { alloc(layout).cast::<Vfs>() };
|
||||
let vfs = PHYSICAL_MEMORY_MANAGER.alloc(1).unwrap().cast::<Vfs>();
|
||||
let vfs_ptr = unsafe { alloc(layout).cast::<Vfs>() };
|
||||
|
||||
let vfs = unsafe { &mut *vfs };
|
||||
let vfs = unsafe { &mut *vfs_ptr };
|
||||
|
||||
(*vfs) = Vfs::null();
|
||||
(*vfs).ops = Some(fs_ops);
|
||||
@@ -308,22 +253,23 @@ pub fn add_vfs(mount_point: &str, fs_ops: Box<dyn FsOps>) -> Result<(), ()> {
|
||||
}
|
||||
|
||||
unsafe { ROOT_VFS.next = Some(vfs.as_mut_ptr()) };
|
||||
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
} else {
|
||||
if unsafe { ROOT_VFS.next.is_none() } {
|
||||
unsafe { dealloc(vfs_ptr.cast::<u8>(), layout) };
|
||||
return Err(());
|
||||
}
|
||||
|
||||
let target_vfs = unsafe { ROOT_VFS.next.unwrap() };
|
||||
|
||||
let binding = unsafe { &mut (*target_vfs).ops };
|
||||
let mut cur_vnode = binding.as_mut().unwrap().root(target_vfs);
|
||||
let mut cur_vnode = unsafe { (*target_vfs).ops.as_mut().unwrap().root(target_vfs) };
|
||||
|
||||
let parts = mount_point.split('/').collect::<Vec<&str>>();
|
||||
|
||||
for part in parts {
|
||||
if part.is_empty() {
|
||||
continue;
|
||||
}
|
||||
|
||||
// TODO: dont just lookup everything as the root user
|
||||
if let Ok(vnode) =
|
||||
cur_vnode
|
||||
@@ -332,11 +278,13 @@ pub fn add_vfs(mount_point: &str, fs_ops: Box<dyn FsOps>) -> Result<(), ()> {
|
||||
{
|
||||
cur_vnode = vnode;
|
||||
} else {
|
||||
unsafe { dealloc(vfs_ptr.cast::<u8>(), layout) };
|
||||
return Err(());
|
||||
}
|
||||
}
|
||||
|
||||
if cur_vnode.vfs_mounted_here.is_some() {
|
||||
unsafe { dealloc(vfs_ptr.cast::<u8>(), layout) };
|
||||
return Err(());
|
||||
}
|
||||
|
||||
@@ -351,15 +299,21 @@ pub fn add_vfs(mount_point: &str, fs_ops: Box<dyn FsOps>) -> Result<(), ()> {
|
||||
}
|
||||
|
||||
cur_vnode.vfs_mounted_here = Some(vfs.as_mut_ptr());
|
||||
}
|
||||
|
||||
return Err(());
|
||||
log_ok!("Added vfs at {mount_point}");
|
||||
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
pub fn vfs_open(path: &str) -> Result<VNode, ()> {
|
||||
if unsafe { ROOT_VFS.next.is_none() } {
|
||||
return Err(());
|
||||
}
|
||||
|
||||
let parts = path.split('/').collect::<Vec<&str>>();
|
||||
let target_vfs = unsafe { ROOT_VFS.next.unwrap() };
|
||||
let binding = unsafe { &mut (*target_vfs).ops };
|
||||
let mut cur_vnode = binding.as_mut().unwrap().root(target_vfs);
|
||||
let mut cur_vnode = unsafe { (*target_vfs).ops.as_mut().unwrap().root(target_vfs) };
|
||||
|
||||
for part in parts {
|
||||
if part.is_empty() {
|
||||
@@ -371,7 +325,11 @@ pub fn vfs_open(path: &str) -> Result<VNode, ()> {
|
||||
.ops
|
||||
.lookup(part, UserCred { uid: 0, gid: 0 }, cur_vnode.as_ptr())
|
||||
{
|
||||
if let Some(vfs) = vnode.vfs_mounted_here {
|
||||
cur_vnode = unsafe { (*vfs).ops.as_mut().unwrap().root(vfs) }
|
||||
} else {
|
||||
cur_vnode = vnode;
|
||||
}
|
||||
} else {
|
||||
return Err(());
|
||||
}
|
||||
|
||||
@@ -19,6 +19,12 @@ pub static POISONED: AtomicBool = AtomicBool::new(false);
|
||||
// PORT + 4: Modem control register
|
||||
#[cfg(target_arch = "x86_64")]
|
||||
pub fn init_serial() -> u8 {
|
||||
outb(PORT + 7, 0x42);
|
||||
if inb(PORT + 7) != 0x42 {
|
||||
// serial port does not exist
|
||||
return 1;
|
||||
}
|
||||
|
||||
outb(PORT + 1, 0x00);
|
||||
outb(PORT + 3, 0x80);
|
||||
outb(PORT, 0x03);
|
||||
@@ -31,14 +37,12 @@ pub fn init_serial() -> u8 {
|
||||
|
||||
// Check if serial is faulty
|
||||
if inb(PORT) != 0xAE {
|
||||
crate::log_error!("Serial Driver failed to initialize");
|
||||
POISONED.store(true, core::sync::atomic::Ordering::Relaxed);
|
||||
return 1;
|
||||
}
|
||||
|
||||
// Set serial in normal operation mode
|
||||
outb(PORT + 4, 0x0F);
|
||||
crate::log_ok!("Serial Driver successfully initialized");
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -52,21 +56,21 @@ pub fn write_string(string: &str) {
|
||||
#[cfg(not(target_arch = "x86_64"))]
|
||||
{
|
||||
for &ch in string.as_bytes() {
|
||||
write_serial(ch as char);
|
||||
write_serial(ch);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(not(target_arch = "x86_64"))]
|
||||
pub fn init_serial() -> u8 {
|
||||
return 0;
|
||||
}
|
||||
|
||||
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
|
||||
fn is_transmit_empty() -> bool {
|
||||
return inb((PORT + 5) & 0x20) == 0;
|
||||
}
|
||||
|
||||
#[cfg(not(target_arch = "x86_64"))]
|
||||
pub fn init_serial() -> u8 {
|
||||
return 0;
|
||||
}
|
||||
|
||||
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
|
||||
pub fn write_serial(character: u8) {
|
||||
while is_transmit_empty() {}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
use core::mem::size_of;
|
||||
|
||||
use alloc::{boxed::Box, format, sync::Arc, vec::Vec};
|
||||
use alloc::{boxed::Box, sync::Arc, vec::Vec};
|
||||
|
||||
use crate::{
|
||||
arch::io::{inb, insw, inw, outb, outsw},
|
||||
@@ -690,7 +690,8 @@ fn ide_initialize(bar0: u32, bar1: u32, _bar2: u32, _bar3: u32, _bar4: u32) {
|
||||
|
||||
let fat_fs = fat_fs.unwrap();
|
||||
|
||||
add_vfs("/", Box::new(fat_fs));
|
||||
// TODO
|
||||
let _ = add_vfs("/", Box::new(fat_fs));
|
||||
|
||||
// let vfs = crate::drivers::fs::vfs::Vfs::new(
|
||||
// Box::new(fat_fs),
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
use alloc::{sync::Arc, vec::Vec};
|
||||
use alloc::vec::Vec;
|
||||
|
||||
use crate::libs::sync::Mutex;
|
||||
|
||||
@@ -1,89 +0,0 @@
|
||||
pub fn abs(x: f64) -> f64 {
|
||||
return f64::from_bits(x.to_bits() & (u64::MAX / 2));
|
||||
}
|
||||
|
||||
const TOINT: f64 = 1. / f64::EPSILON;
|
||||
|
||||
pub fn floor(x: f64) -> f64 {
|
||||
#[cfg(all(
|
||||
any(target_arch = "x86", target_arch = "x86_64"),
|
||||
not(target_feature = "sse2")
|
||||
))]
|
||||
{
|
||||
if abs(x).to_bits() < 4503599627370496.0_f64.to_bits() {
|
||||
let truncated = x as i64 as f64;
|
||||
if truncated > x {
|
||||
return truncated - 1.0;
|
||||
} else {
|
||||
return truncated;
|
||||
}
|
||||
} else {
|
||||
return x;
|
||||
}
|
||||
}
|
||||
|
||||
let ui = x.to_bits();
|
||||
let e = ((ui >> 52) & 0x7FF) as i32;
|
||||
|
||||
if (e >= 0x3FF + 52) || (x == 0.) {
|
||||
return x;
|
||||
}
|
||||
|
||||
let y = if (ui >> 63) != 0 {
|
||||
x - TOINT + TOINT - x
|
||||
} else {
|
||||
x + TOINT + TOINT - x
|
||||
};
|
||||
|
||||
if e < 0x3FF {
|
||||
return if (ui >> 63) != 0 { -1. } else { 0. };
|
||||
}
|
||||
|
||||
if y > 0. {
|
||||
return x + y - 1.;
|
||||
} else {
|
||||
return x + y;
|
||||
}
|
||||
}
|
||||
|
||||
pub fn ceil(x: f64) -> f64 {
|
||||
#[cfg(all(
|
||||
any(target_arch = "x86", target_arch = "x86_64"),
|
||||
not(target_feature = "sse2")
|
||||
))]
|
||||
{
|
||||
if abs(x).to_bits() < 4503599627370496.0_f64.to_bits() {
|
||||
let truncated = x as i64 as f64;
|
||||
if truncated < x {
|
||||
return truncated + 1.0;
|
||||
} else {
|
||||
return truncated;
|
||||
}
|
||||
} else {
|
||||
return x;
|
||||
}
|
||||
}
|
||||
|
||||
let u: u64 = x.to_bits();
|
||||
let e: i64 = (u >> 52 & 0x7ff) as i64;
|
||||
|
||||
if e >= 0x3ff + 52 || x == 0. {
|
||||
return x;
|
||||
}
|
||||
|
||||
let y = if (u >> 63) != 0 {
|
||||
x - TOINT + TOINT - x
|
||||
} else {
|
||||
x + TOINT - TOINT - x
|
||||
};
|
||||
|
||||
if e < 0x3ff {
|
||||
return if (u >> 63) != 0 { -0. } else { 1. };
|
||||
}
|
||||
|
||||
if y < 0. {
|
||||
return x + y + 1.;
|
||||
} else {
|
||||
return x + y;
|
||||
}
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
pub mod cell;
|
||||
pub mod math;
|
||||
pub mod gzip;
|
||||
pub mod sync;
|
||||
pub mod uuid;
|
||||
|
||||
@@ -4,12 +4,12 @@ use core::{
|
||||
sync::atomic::{AtomicBool, Ordering},
|
||||
};
|
||||
|
||||
pub struct Mutex<T: ?Sized> {
|
||||
pub struct Mutex<T> {
|
||||
locked: AtomicBool,
|
||||
data: UnsafeCell<T>,
|
||||
}
|
||||
|
||||
unsafe impl<T: ?Sized> Sync for Mutex<T> {}
|
||||
unsafe impl<T> Sync for Mutex<T> {}
|
||||
|
||||
impl<T> Mutex<T> {
|
||||
#[inline]
|
||||
@@ -21,10 +21,11 @@ impl<T> Mutex<T> {
|
||||
}
|
||||
|
||||
pub fn lock(&self) -> MutexGuard<'_, T> {
|
||||
// if self.locked.load(Ordering::Acquire) == true {
|
||||
// unsafe { core::arch::asm!("out dx, al", in("dx") 0x3f8, in("al") 'S' as u8) };
|
||||
// }
|
||||
while self.locked.swap(true, Ordering::Acquire) {
|
||||
while self
|
||||
.locked
|
||||
.compare_exchange(false, true, Ordering::SeqCst, Ordering::SeqCst)
|
||||
.is_err()
|
||||
{
|
||||
// spin lock
|
||||
}
|
||||
return MutexGuard { mutex: self };
|
||||
@@ -44,29 +45,26 @@ impl<T> core::fmt::Debug for Mutex<T> {
|
||||
}
|
||||
}
|
||||
|
||||
pub struct MutexGuard<'a, T: ?Sized> {
|
||||
pub struct MutexGuard<'a, T> {
|
||||
mutex: &'a Mutex<T>,
|
||||
}
|
||||
|
||||
impl<'a, T: ?Sized> Deref for MutexGuard<'a, T> {
|
||||
impl<'a, T> Deref for MutexGuard<'a, T> {
|
||||
type Target = T;
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
// unsafe { core::arch::asm!("out dx, al", in("dx") 0x3f8, in("al") 'D' as u8) };
|
||||
|
||||
unsafe { &*self.mutex.data.get() }
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T: ?Sized> DerefMut for MutexGuard<'a, T> {
|
||||
impl<'a, T> DerefMut for MutexGuard<'a, T> {
|
||||
fn deref_mut(&mut self) -> &mut T {
|
||||
// unsafe { core::arch::asm!("out dx, al", in("dx") 0x3f8, in("al") 'M' as u8) };
|
||||
unsafe { &mut *self.mutex.data.get() }
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T: ?Sized> Drop for MutexGuard<'a, T> {
|
||||
impl<'a, T> Drop for MutexGuard<'a, T> {
|
||||
fn drop(&mut self) {
|
||||
self.mutex.locked.store(false, Ordering::Release);
|
||||
self.mutex.locked.store(false, Ordering::SeqCst);
|
||||
}
|
||||
}
|
||||
|
||||
92
src/main.rs
92
src/main.rs
@@ -1,14 +1,11 @@
|
||||
#![feature(abi_x86_interrupt, naked_functions)]
|
||||
// Unforunately, this doesnt actually work with rust-analyzer, so if you want the annoying
|
||||
// Error about "unnecessary returns" to go away, see https://github.com/rust-lang/rust-analyzer/issues/16542
|
||||
// And if that issue ever gets closed, and you're reading this, feel free to remove this comment
|
||||
#![feature(abi_x86_interrupt, naked_functions, const_mut_refs)]
|
||||
#![allow(clippy::needless_return)]
|
||||
#![no_std]
|
||||
#![no_main]
|
||||
|
||||
use core::ffi::CStr;
|
||||
|
||||
use alloc::{format, vec::Vec};
|
||||
use alloc::vec::Vec;
|
||||
use limine::KernelFileRequest;
|
||||
|
||||
use crate::drivers::fs::{
|
||||
@@ -27,15 +24,20 @@ pub static KERNEL_REQUEST: KernelFileRequest = KernelFileRequest::new(0);
|
||||
|
||||
#[no_mangle]
|
||||
pub extern "C" fn _start() -> ! {
|
||||
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
|
||||
arch::interrupts::init();
|
||||
|
||||
drivers::serial::init_serial();
|
||||
arch::gdt::gdt_init();
|
||||
arch::interrupts::idt_init();
|
||||
arch::interrupts::exceptions::exceptions_init();
|
||||
arch::interrupts::enable_interrupts();
|
||||
// TODO: memory stuff
|
||||
mem::pmm::pmm_init();
|
||||
mem::init_allocator();
|
||||
drivers::acpi::init_acpi();
|
||||
|
||||
// let squashfs = initramfs::init();
|
||||
|
||||
// crate::println!("{:?}", squashfs.superblock);
|
||||
kmain()
|
||||
}
|
||||
|
||||
pub fn kmain() -> ! {
|
||||
let _ = drivers::fs::vfs::add_vfs("/", alloc::boxed::Box::new(initramfs::init()));
|
||||
|
||||
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
|
||||
@@ -57,36 +59,14 @@ pub extern "C" fn _start() -> ! {
|
||||
// let file = vfs_open("/example.txt").unwrap();
|
||||
crate::println!(
|
||||
"{:X?}",
|
||||
core::str::from_utf8(
|
||||
&file
|
||||
.ops
|
||||
.open(0, UserCred { uid: 0, gid: 0 }, file.as_ptr())
|
||||
.unwrap()
|
||||
)
|
||||
.unwrap()
|
||||
);
|
||||
|
||||
let fb = drivers::video::get_framebuffer().unwrap();
|
||||
let length = (fb.height * fb.width) * (fb.bpp / 8);
|
||||
let pages = length / crate::mem::pmm::PAGE_SIZE;
|
||||
let buffer = unsafe {
|
||||
core::slice::from_raw_parts_mut(
|
||||
crate::mem::PHYSICAL_MEMORY_MANAGER
|
||||
.alloc(pages)
|
||||
.expect("Could not allocate color buffer") as *mut u32,
|
||||
length,
|
||||
)
|
||||
};
|
||||
|
||||
for y in 0..fb.height {
|
||||
let r = ((y as f32) / ((fb.height - 1) as f32)) * 200.0;
|
||||
for x in 0..fb.width {
|
||||
let g = ((x as f32) / ((fb.width - 1) as f32)) * 200.0;
|
||||
buffer[y * fb.width + x] = ((r as u32) << 16) | ((g as u32) << 8) | 175;
|
||||
}
|
||||
}
|
||||
|
||||
fb.blit_screen(buffer, None);
|
||||
// as a sign that we didnt panic
|
||||
draw_gradient();
|
||||
|
||||
// loop {
|
||||
// let ch = read_serial();
|
||||
@@ -109,6 +89,42 @@ pub extern "C" fn _start() -> ! {
|
||||
hcf();
|
||||
}
|
||||
|
||||
fn draw_gradient() {
|
||||
let fb = drivers::video::get_framebuffer().unwrap();
|
||||
let length = (fb.height * fb.width) * (fb.bpp / 8);
|
||||
let pages = length / crate::mem::pmm::PAGE_SIZE;
|
||||
|
||||
let buffer_ptr = crate::mem::PHYSICAL_MEMORY_MANAGER.alloc(pages);
|
||||
|
||||
if buffer_ptr.is_null() {
|
||||
panic!("Failed to allocate screen buffer")
|
||||
}
|
||||
|
||||
let buffer = unsafe {
|
||||
core::slice::from_raw_parts_mut(
|
||||
crate::mem::PHYSICAL_MEMORY_MANAGER
|
||||
.alloc(pages)
|
||||
.cast::<u32>(),
|
||||
length,
|
||||
)
|
||||
};
|
||||
|
||||
for y in 0..fb.height {
|
||||
for x in 0..fb.width {
|
||||
let r = (255 * x) / (fb.width - 1);
|
||||
let g = (255 * y) / (fb.height - 1);
|
||||
let b = 255 - r;
|
||||
|
||||
let pixel = ((r as u32) << 16) | ((g as u32) << 8) | (b as u32);
|
||||
buffer[((y * fb.pitch) / (fb.bpp / 8)) + x] = pixel
|
||||
}
|
||||
}
|
||||
|
||||
fb.blit_screen(buffer, None);
|
||||
|
||||
crate::mem::PHYSICAL_MEMORY_MANAGER.dealloc(buffer_ptr, pages);
|
||||
}
|
||||
|
||||
#[macro_export]
|
||||
macro_rules! println {
|
||||
() => ($crate::print!("\n"));
|
||||
@@ -118,7 +134,6 @@ macro_rules! println {
|
||||
#[macro_export]
|
||||
macro_rules! print {
|
||||
($($arg:tt)*) => (
|
||||
|
||||
$crate::drivers::serial::write_string(&alloc::format!($($arg)*).replace('\n', "\n\r"))
|
||||
)
|
||||
}
|
||||
@@ -160,6 +175,7 @@ impl KernelFeatures {
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: Do this vastly differently
|
||||
pub static KERNEL_FEATURES: libs::cell::LazyCell<KernelFeatures> =
|
||||
libs::cell::LazyCell::new(parse_kernel_cmdline);
|
||||
|
||||
@@ -207,9 +223,7 @@ fn parse_kernel_cmdline() -> KernelFeatures {
|
||||
|
||||
#[panic_handler]
|
||||
fn panic(info: &core::panic::PanicInfo) -> ! {
|
||||
let msg = &format!("{info}\n").replace('\n', "\n\r");
|
||||
|
||||
drivers::serial::write_string(msg);
|
||||
crate::println!("{info}");
|
||||
|
||||
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
|
||||
{
|
||||
|
||||
@@ -1,274 +1,158 @@
|
||||
// Original code from: https://github.com/DrChat/buddyalloc/blob/master/src/heap.rs
|
||||
// But I made it ~~much worse~~ *better* by making it GlobalAlloc compatible
|
||||
// By using A custom Mutex implementation (which also sucks),
|
||||
// I was able to remove all the mut's In the original code.
|
||||
use core::{
|
||||
alloc::{GlobalAlloc, Layout},
|
||||
ptr::NonNull,
|
||||
};
|
||||
|
||||
// TODO: Replace this with a slab allocator that can take advantage of the page frame allocator
|
||||
use crate::{libs::sync::Mutex, mem::pmm::PAGE_SIZE};
|
||||
|
||||
use core::alloc::{GlobalAlloc, Layout};
|
||||
use core::cmp::{max, min};
|
||||
use core::ptr;
|
||||
use core::sync::atomic::Ordering::SeqCst;
|
||||
use core::sync::atomic::{AtomicPtr, AtomicU8, AtomicUsize};
|
||||
use super::align_up;
|
||||
|
||||
use crate::libs::sync::Mutex;
|
||||
|
||||
const fn log2(num: usize) -> u8 {
|
||||
let mut temp = num;
|
||||
let mut result = 0;
|
||||
|
||||
temp >>= 1;
|
||||
|
||||
while temp != 0 {
|
||||
result += 1;
|
||||
temp >>= 1;
|
||||
#[derive(Debug)]
|
||||
struct MemNode {
|
||||
next: Option<NonNull<Self>>,
|
||||
size: usize,
|
||||
}
|
||||
|
||||
return result;
|
||||
impl MemNode {
|
||||
const fn new(size: usize) -> Self {
|
||||
Self { next: None, size }
|
||||
}
|
||||
|
||||
const MIN_HEAP_ALIGN: usize = 4096;
|
||||
const HEAP_BLOCKS: usize = 16;
|
||||
|
||||
pub struct FreeBlock {
|
||||
next: *mut FreeBlock,
|
||||
pub fn addr(&self) -> usize {
|
||||
self as *const Self as usize
|
||||
}
|
||||
|
||||
impl FreeBlock {
|
||||
#[inline]
|
||||
const fn new(next: *mut FreeBlock) -> Self {
|
||||
Self { next }
|
||||
pub fn end_addr(&self) -> usize {
|
||||
self.addr() + self.len()
|
||||
}
|
||||
|
||||
pub fn len(&self) -> usize {
|
||||
self.size
|
||||
}
|
||||
}
|
||||
|
||||
pub struct BuddyAllocator {
|
||||
pub heap_start: AtomicPtr<u8>,
|
||||
heap_size: AtomicUsize,
|
||||
free_lists: Mutex<[*mut FreeBlock; HEAP_BLOCKS]>,
|
||||
min_block_size: AtomicUsize,
|
||||
min_block_size_log2: AtomicU8,
|
||||
pub struct LinkedListAllocator {
|
||||
head: MemNode,
|
||||
}
|
||||
|
||||
impl BuddyAllocator {
|
||||
pub const fn new_unchecked(heap_start: *mut u8, heap_size: usize) -> Self {
|
||||
let min_block_size_raw = heap_size >> (HEAP_BLOCKS - 1);
|
||||
let min_block_size = AtomicUsize::new(min_block_size_raw);
|
||||
let mut free_lists_buf: [*mut FreeBlock; HEAP_BLOCKS] = [ptr::null_mut(); HEAP_BLOCKS];
|
||||
|
||||
free_lists_buf[HEAP_BLOCKS - 1] = heap_start as *mut FreeBlock;
|
||||
|
||||
let free_lists: Mutex<[*mut FreeBlock; HEAP_BLOCKS]> = Mutex::new(free_lists_buf);
|
||||
|
||||
let heap_start = AtomicPtr::new(heap_start);
|
||||
let heap_size = AtomicUsize::new(heap_size);
|
||||
unsafe impl Sync for LinkedListAllocator {}
|
||||
|
||||
impl LinkedListAllocator {
|
||||
pub const fn new() -> Self {
|
||||
Self {
|
||||
heap_start,
|
||||
heap_size,
|
||||
free_lists,
|
||||
min_block_size,
|
||||
min_block_size_log2: AtomicU8::new(log2(min_block_size_raw)),
|
||||
head: MemNode::new(0),
|
||||
}
|
||||
}
|
||||
|
||||
fn allocation_size(&self, mut size: usize, align: usize) -> Option<usize> {
|
||||
if !align.is_power_of_two() {
|
||||
return None;
|
||||
}
|
||||
|
||||
if align > MIN_HEAP_ALIGN {
|
||||
return None;
|
||||
}
|
||||
|
||||
if align > size {
|
||||
size = align;
|
||||
}
|
||||
|
||||
size = max(size, self.min_block_size.load(SeqCst));
|
||||
|
||||
size = size.next_power_of_two();
|
||||
|
||||
if size > self.heap_size.load(SeqCst) {
|
||||
return None;
|
||||
}
|
||||
|
||||
return Some(size);
|
||||
}
|
||||
|
||||
fn allocation_order(&self, size: usize, align: usize) -> Option<usize> {
|
||||
return self
|
||||
.allocation_size(size, align)
|
||||
.map(|s| (log2(s) - self.min_block_size_log2.load(SeqCst)) as usize);
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn order_size(&self, order: usize) -> usize {
|
||||
return 1 << (self.min_block_size_log2.load(SeqCst) as usize + order);
|
||||
}
|
||||
|
||||
fn free_list_pop(&self, order: usize) -> Option<*mut u8> {
|
||||
let mut free_lists_lock = self.free_lists.lock();
|
||||
|
||||
let candidate = (*free_lists_lock)[order];
|
||||
|
||||
if candidate.is_null() {
|
||||
return None;
|
||||
}
|
||||
|
||||
if order != free_lists_lock.len() - 1 {
|
||||
(*free_lists_lock)[order] = unsafe { (*candidate).next };
|
||||
} else {
|
||||
(*free_lists_lock)[order] = ptr::null_mut();
|
||||
}
|
||||
|
||||
return Some(candidate as *mut u8);
|
||||
}
|
||||
|
||||
fn free_list_insert(&self, order: usize, block: *mut u8) {
|
||||
let mut free_lists_lock = self.free_lists.lock();
|
||||
let free_block_ptr = block as *mut FreeBlock;
|
||||
|
||||
unsafe { *free_block_ptr = FreeBlock::new((*free_lists_lock)[order]) };
|
||||
|
||||
(*free_lists_lock)[order] = free_block_ptr;
|
||||
}
|
||||
|
||||
fn free_list_remove(&self, order: usize, block: *mut u8) -> bool {
|
||||
let block_ptr = block as *mut FreeBlock;
|
||||
|
||||
let mut checking: &mut *mut FreeBlock = &mut (*self.free_lists.lock())[order];
|
||||
|
||||
pub fn init(&mut self, pages: usize) {
|
||||
unsafe {
|
||||
while !(*checking).is_null() {
|
||||
if *checking == block_ptr {
|
||||
*checking = (*(*checking)).next;
|
||||
return true;
|
||||
}
|
||||
|
||||
checking = &mut ((*(*checking)).next);
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
fn split_free_block(&self, block: *mut u8, mut order: usize, order_needed: usize) {
|
||||
let mut size_to_split = self.order_size(order);
|
||||
|
||||
while order > order_needed {
|
||||
size_to_split >>= 1;
|
||||
order -= 1;
|
||||
|
||||
let split = unsafe { block.add(size_to_split) };
|
||||
self.free_list_insert(order, split);
|
||||
self.add_free_region(
|
||||
super::PHYSICAL_MEMORY_MANAGER.alloc(pages),
|
||||
PAGE_SIZE * pages,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
fn buddy(&self, order: usize, block: *mut u8) -> Option<*mut u8> {
|
||||
assert!(block >= self.heap_start.load(SeqCst));
|
||||
unsafe fn add_free_region(&mut self, addr: *mut u8, size: usize) {
|
||||
assert_eq!(
|
||||
align_up(addr as usize, core::mem::align_of::<MemNode>()),
|
||||
addr as usize
|
||||
);
|
||||
assert!(size >= core::mem::size_of::<MemNode>());
|
||||
|
||||
let relative = unsafe { block.offset_from(self.heap_start.load(SeqCst)) } as usize;
|
||||
let size = self.order_size(order);
|
||||
if size >= self.heap_size.load(SeqCst) {
|
||||
return None;
|
||||
} else {
|
||||
return Some(unsafe { self.heap_start.load(SeqCst).add(relative ^ size) });
|
||||
}
|
||||
let mut node = MemNode::new(size);
|
||||
node.next = self.head.next.take();
|
||||
|
||||
addr.cast::<MemNode>().write(node);
|
||||
self.head.next = Some(NonNull::new_unchecked(addr.cast::<MemNode>()));
|
||||
}
|
||||
|
||||
pub fn get_total_mem(&self) -> usize {
|
||||
return self.heap_size.load(SeqCst);
|
||||
}
|
||||
fn alloc_from_node(node: &MemNode, layout: Layout) -> *mut u8 {
|
||||
let start = align_up(node.addr() as usize, layout.align());
|
||||
let end = start + layout.size();
|
||||
|
||||
pub fn get_free_mem(&self) -> usize {
|
||||
let free_lists_lock = self.free_lists.lock();
|
||||
let mut free_mem = 0;
|
||||
|
||||
unsafe {
|
||||
for order in 0..free_lists_lock.len() {
|
||||
let mut block = (*free_lists_lock)[order];
|
||||
|
||||
while !block.is_null() {
|
||||
free_mem += self.order_size(order);
|
||||
block = (*block).next;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return free_mem;
|
||||
}
|
||||
|
||||
pub fn get_used_mem(&self) -> usize {
|
||||
return self.get_total_mem() - self.get_free_mem();
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl GlobalAlloc for BuddyAllocator {
|
||||
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
|
||||
if let Some(order_needed) = self.allocation_order(layout.size(), layout.align()) {
|
||||
let free_lists_len = { self.free_lists.lock().len() };
|
||||
|
||||
for order in order_needed..free_lists_len {
|
||||
if let Some(block) = self.free_list_pop(order) {
|
||||
if order > order_needed {
|
||||
self.split_free_block(block, order, order_needed);
|
||||
}
|
||||
|
||||
return block;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return ptr::null_mut();
|
||||
}
|
||||
|
||||
unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
|
||||
let initial_order = self
|
||||
.allocation_order(layout.size(), layout.align())
|
||||
.expect("Tried to dispose of invalid block");
|
||||
|
||||
let mut block = ptr;
|
||||
let free_lists_len = { self.free_lists.lock().len() };
|
||||
|
||||
for order in initial_order..free_lists_len {
|
||||
if let Some(buddy) = self.buddy(order, block) {
|
||||
if self.free_list_remove(order, block) {
|
||||
block = min(block, buddy);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
self.free_list_insert(order, block);
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
pub extern "C" fn malloc(size: usize) -> *mut u8 {
|
||||
let layout = alloc::alloc::Layout::from_size_align(size, 2);
|
||||
|
||||
if layout.is_err() {
|
||||
if end > node.end_addr() as usize {
|
||||
// aligned address goes outside the bounds of the node
|
||||
return core::ptr::null_mut();
|
||||
}
|
||||
|
||||
unsafe {
|
||||
return alloc::alloc::alloc(layout.unwrap());
|
||||
};
|
||||
let extra = node.end_addr() as usize - end;
|
||||
if extra > 0 && extra < core::mem::size_of::<MemNode>() {
|
||||
// Node size minus allocation size is less than the minimum size needed for a node,
|
||||
// thus, if we let the allocation to happen in this node, we lose track of the extra memory
|
||||
// lost by this allocation
|
||||
return core::ptr::null_mut();
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
pub extern "C" fn free(ptr: *mut u8, size: usize) {
|
||||
if ptr.is_null() {
|
||||
return;
|
||||
return start as *mut u8;
|
||||
}
|
||||
|
||||
let layout = alloc::alloc::Layout::from_size_align(size, 2);
|
||||
unsafe fn find_region(&mut self, layout: Layout) -> Option<NonNull<MemNode>> {
|
||||
let mut current_node = &mut self.head;
|
||||
|
||||
if layout.is_err() {
|
||||
return;
|
||||
while let Some(node) = current_node.next.as_mut() {
|
||||
let node = node.as_mut();
|
||||
|
||||
if Self::alloc_from_node(node, layout).is_null() {
|
||||
current_node = current_node.next.as_mut().unwrap().as_mut();
|
||||
continue;
|
||||
}
|
||||
|
||||
unsafe {
|
||||
alloc::alloc::dealloc(ptr, layout.unwrap());
|
||||
};
|
||||
// `node` is suitable for this allocation
|
||||
let next = node.next.take();
|
||||
let ret = Some(current_node.next.take().unwrap());
|
||||
current_node.next = next;
|
||||
return ret;
|
||||
}
|
||||
|
||||
return None;
|
||||
}
|
||||
|
||||
fn size_align(layout: Layout) -> Layout {
|
||||
let layout = layout
|
||||
.align_to(core::mem::align_of::<MemNode>())
|
||||
.expect("Failed to align allocation")
|
||||
.pad_to_align();
|
||||
|
||||
let size = layout.size().max(core::mem::size_of::<MemNode>());
|
||||
return Layout::from_size_align(size, layout.align()).expect("Failed to create layout");
|
||||
}
|
||||
|
||||
unsafe fn inner_alloc(&mut self, layout: Layout) -> *mut u8 {
|
||||
let layout = Self::size_align(layout);
|
||||
|
||||
if let Some(region) = self.find_region(layout) {
|
||||
// immutable pointers are a government conspiracy anyways
|
||||
let end = (region.as_ref().addr() + layout.size()) as *mut u8;
|
||||
let extra = region.as_ref().end_addr() - end as usize;
|
||||
|
||||
if extra > 0 {
|
||||
self.add_free_region(end, extra)
|
||||
}
|
||||
|
||||
return region.as_ref().addr() as *mut u8;
|
||||
}
|
||||
|
||||
return core::ptr::null_mut();
|
||||
}
|
||||
|
||||
unsafe fn inner_dealloc(&mut self, ptr: *mut u8, layout: Layout) {
|
||||
let layout = Self::size_align(layout);
|
||||
|
||||
self.add_free_region(ptr, layout.size());
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl GlobalAlloc for Mutex<LinkedListAllocator> {
|
||||
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
|
||||
let mut allocator = self.lock();
|
||||
|
||||
allocator.inner_alloc(layout)
|
||||
}
|
||||
|
||||
unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
|
||||
let mut allocator = self.lock();
|
||||
|
||||
allocator.inner_dealloc(ptr, layout);
|
||||
}
|
||||
}
|
||||
|
||||
116
src/mem/mod.rs
116
src/mem/mod.rs
@@ -1,65 +1,24 @@
|
||||
pub mod allocator;
|
||||
pub mod pmm;
|
||||
|
||||
use core::alloc::GlobalAlloc;
|
||||
use crate::libs::{cell::OnceCell, sync::Mutex};
|
||||
|
||||
use limine::{MemmapEntry, NonNullPtr};
|
||||
|
||||
use crate::libs::{cell::LazyCell, sync::Mutex};
|
||||
|
||||
use self::{allocator::BuddyAllocator, pmm::PhysicalMemoryManager};
|
||||
use self::{allocator::LinkedListAllocator, pmm::PhysicalMemoryManager};
|
||||
|
||||
static MEMMAP_REQUEST: limine::MemmapRequest = limine::MemmapRequest::new(0);
|
||||
static HHDM_REQUEST: limine::HhdmRequest = limine::HhdmRequest::new(0);
|
||||
|
||||
pub static MEMMAP: LazyCell<Mutex<&mut [NonNullPtr<MemmapEntry>]>> = LazyCell::new(|| {
|
||||
let memmap_request = MEMMAP_REQUEST
|
||||
.get_response()
|
||||
.get_mut()
|
||||
.expect("Failed to get Memory map!");
|
||||
pub static PHYSICAL_MEMORY_MANAGER: OnceCell<PhysicalMemoryManager> = OnceCell::new();
|
||||
|
||||
return Mutex::new(memmap_request.memmap_mut());
|
||||
});
|
||||
|
||||
pub static HHDM_OFFSET: LazyCell<usize> = LazyCell::new(|| {
|
||||
let hhdm = HHDM_REQUEST
|
||||
.get_response()
|
||||
.get()
|
||||
.expect("Failed to get Higher Half Direct Map!");
|
||||
|
||||
return hhdm.offset as usize;
|
||||
});
|
||||
|
||||
pub static PHYSICAL_MEMORY_MANAGER: LazyCell<PhysicalMemoryManager> =
|
||||
LazyCell::new(PhysicalMemoryManager::new);
|
||||
|
||||
pub struct Allocator {
|
||||
pub inner: LazyCell<BuddyAllocator>,
|
||||
pub fn align_up(addr: usize, align: usize) -> usize {
|
||||
let offset = (addr as *const u8).align_offset(align);
|
||||
addr + offset
|
||||
}
|
||||
|
||||
unsafe impl GlobalAlloc for Allocator {
|
||||
unsafe fn alloc(&self, layout: core::alloc::Layout) -> *mut u8 {
|
||||
self.inner.alloc(layout)
|
||||
}
|
||||
|
||||
unsafe fn dealloc(&self, ptr: *mut u8, layout: core::alloc::Layout) {
|
||||
self.inner.dealloc(ptr, layout)
|
||||
}
|
||||
}
|
||||
|
||||
const HEAP_PAGES: usize = 4096;
|
||||
const HEAP_SIZE: usize = HEAP_PAGES * 1024;
|
||||
const HEAP_PAGES: usize = 1024; // 4 MiB heap
|
||||
|
||||
#[global_allocator]
|
||||
pub static ALLOCATOR: Allocator = Allocator {
|
||||
inner: LazyCell::new(|| {
|
||||
let heap_start = PHYSICAL_MEMORY_MANAGER
|
||||
.alloc(HEAP_PAGES)
|
||||
.expect("Failed to allocate heap!");
|
||||
|
||||
BuddyAllocator::new_unchecked(heap_start, HEAP_SIZE)
|
||||
}),
|
||||
};
|
||||
pub static ALLOCATOR: Mutex<LinkedListAllocator> = Mutex::new(LinkedListAllocator::new());
|
||||
|
||||
pub fn log_memory_map() {
|
||||
let memmap_request = MEMMAP_REQUEST.get_response().get_mut();
|
||||
@@ -82,20 +41,43 @@ pub fn log_memory_map() {
|
||||
}
|
||||
}
|
||||
|
||||
pub struct Label {
|
||||
size: usize,
|
||||
text_label: &'static str,
|
||||
pub fn init_allocator() {
|
||||
let mut allocator_lock = ALLOCATOR.lock();
|
||||
allocator_lock.init(HEAP_PAGES);
|
||||
|
||||
drop(allocator_lock);
|
||||
|
||||
crate::println!(
|
||||
"{} of memory available",
|
||||
PHYSICAL_MEMORY_MANAGER.total_memory().label_bytes()
|
||||
)
|
||||
}
|
||||
|
||||
pub enum Label {
|
||||
BYTE(usize),
|
||||
KIB(usize),
|
||||
MIB(usize),
|
||||
GIB(usize),
|
||||
}
|
||||
|
||||
impl core::fmt::Display for Label {
|
||||
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
|
||||
return write!(f, "{}{}", self.size, self.text_label);
|
||||
match self {
|
||||
Label::BYTE(count) => {
|
||||
write!(f, "{count} Byte(s)")
|
||||
}
|
||||
Label::KIB(count) => {
|
||||
write!(f, "{count} KiB(s)")
|
||||
}
|
||||
Label::MIB(count) => {
|
||||
write!(f, "{count} MiB(s)")
|
||||
}
|
||||
Label::GIB(count) => {
|
||||
write!(f, "{count} GiB(s)")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Hacky solution to avoid allocation, but keep the names
|
||||
static BYTE_LABELS: (&str, &str, &str, &str) = ("GiB", "MiB", "KiB", "Bytes");
|
||||
|
||||
pub trait LabelBytes {
|
||||
fn label_bytes(&self) -> Label;
|
||||
}
|
||||
@@ -105,25 +87,13 @@ impl LabelBytes for usize {
|
||||
let bytes = *self;
|
||||
|
||||
if bytes >> 30 > 0 {
|
||||
return Label {
|
||||
size: bytes >> 30,
|
||||
text_label: BYTE_LABELS.0,
|
||||
};
|
||||
return Label::GIB(bytes >> 30);
|
||||
} else if bytes >> 20 > 0 {
|
||||
return Label {
|
||||
size: bytes >> 20,
|
||||
text_label: BYTE_LABELS.1,
|
||||
};
|
||||
return Label::MIB(bytes >> 20);
|
||||
} else if bytes >> 10 > 0 {
|
||||
return Label {
|
||||
size: bytes >> 10,
|
||||
text_label: BYTE_LABELS.2,
|
||||
};
|
||||
return Label::KIB(bytes >> 10);
|
||||
} else {
|
||||
return Label {
|
||||
size: bytes,
|
||||
text_label: BYTE_LABELS.3,
|
||||
};
|
||||
return Label::BYTE(bytes);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,6 +2,8 @@
|
||||
|
||||
use core::sync::atomic::{AtomicPtr, AtomicUsize, Ordering};
|
||||
|
||||
use super::{HHDM_REQUEST, MEMMAP_REQUEST};
|
||||
|
||||
pub const PAGE_SIZE: usize = 4096;
|
||||
|
||||
#[derive(Debug)]
|
||||
@@ -13,6 +15,10 @@ pub struct PhysicalMemoryManager {
|
||||
used_pages: AtomicUsize,
|
||||
}
|
||||
|
||||
pub fn pmm_init() {
|
||||
super::PHYSICAL_MEMORY_MANAGER.set(PhysicalMemoryManager::new());
|
||||
}
|
||||
|
||||
impl PhysicalMemoryManager {
|
||||
pub fn new() -> Self {
|
||||
let pmm = Self {
|
||||
@@ -23,11 +29,22 @@ impl PhysicalMemoryManager {
|
||||
used_pages: AtomicUsize::new(0),
|
||||
};
|
||||
|
||||
let hhdm_offset = *super::HHDM_OFFSET;
|
||||
let hhdm_req = HHDM_REQUEST
|
||||
.get_response()
|
||||
.get()
|
||||
.expect("Failed to get Higher Half Direct Map!");
|
||||
|
||||
let hhdm_offset = hhdm_req.offset as usize;
|
||||
|
||||
let memmap = MEMMAP_REQUEST
|
||||
.get_response()
|
||||
.get_mut()
|
||||
.expect("Failed to get Memory map!")
|
||||
.memmap_mut();
|
||||
|
||||
let mut highest_addr: usize = 0;
|
||||
|
||||
for entry in super::MEMMAP.lock().iter() {
|
||||
for entry in memmap.iter() {
|
||||
if entry.typ == limine::MemoryMapEntryType::Usable {
|
||||
pmm.usable_pages
|
||||
.fetch_add(entry.len as usize / PAGE_SIZE, Ordering::SeqCst);
|
||||
@@ -42,7 +59,7 @@ impl PhysicalMemoryManager {
|
||||
let bitmap_size =
|
||||
((pmm.highest_page_idx.load(Ordering::SeqCst) / 8) + PAGE_SIZE - 1) & !(PAGE_SIZE - 1);
|
||||
|
||||
for entry in super::MEMMAP.lock().iter_mut() {
|
||||
for entry in memmap.iter_mut() {
|
||||
if entry.typ != limine::MemoryMapEntryType::Usable {
|
||||
continue;
|
||||
}
|
||||
@@ -63,7 +80,7 @@ impl PhysicalMemoryManager {
|
||||
}
|
||||
}
|
||||
|
||||
for entry in super::MEMMAP.lock().iter() {
|
||||
for entry in memmap.iter() {
|
||||
if entry.typ != limine::MemoryMapEntryType::Usable {
|
||||
continue;
|
||||
}
|
||||
@@ -99,7 +116,7 @@ impl PhysicalMemoryManager {
|
||||
return core::ptr::null_mut();
|
||||
}
|
||||
|
||||
pub fn alloc_nozero(&self, pages: usize) -> Result<*mut u8, ()> {
|
||||
pub fn alloc_nozero(&self, pages: usize) -> *mut u8 {
|
||||
// Attempt to allocate n pages with a search limit of the amount of usable pages
|
||||
let mut page_addr = self.inner_alloc(pages, self.highest_page_idx.load(Ordering::SeqCst));
|
||||
|
||||
@@ -111,22 +128,27 @@ impl PhysicalMemoryManager {
|
||||
|
||||
// If page_addr is still null, we have ran out of usable memory
|
||||
if page_addr.is_null() {
|
||||
return Err(());
|
||||
return core::ptr::null_mut();
|
||||
}
|
||||
}
|
||||
|
||||
self.used_pages.fetch_add(pages, Ordering::SeqCst);
|
||||
|
||||
return Ok(page_addr);
|
||||
return page_addr;
|
||||
}
|
||||
|
||||
pub fn alloc(&self, pages: usize) -> *mut u8 {
|
||||
let ret = self.alloc_nozero(pages);
|
||||
|
||||
if ret.is_null() {
|
||||
return ret;
|
||||
}
|
||||
|
||||
pub fn alloc(&self, pages: usize) -> Result<*mut u8, ()> {
|
||||
let ret = self.alloc_nozero(pages)?;
|
||||
unsafe {
|
||||
core::ptr::write_bytes(ret, 0x00, pages * PAGE_SIZE);
|
||||
};
|
||||
|
||||
return Ok(ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
pub fn dealloc(&self, addr: *mut u8, pages: usize) {
|
||||
|
||||
Reference in New Issue
Block a user