better pointers and a broken vmm
better pointers and a broken vmm
This commit is contained in:
@@ -8,3 +8,9 @@ build-std = [
|
||||
[build]
|
||||
target = "./src/arch/x86_64/x86_64-unknown-none.json"
|
||||
rustflags = ["-Cforce-frame-pointers=yes"]
|
||||
|
||||
# use this to reduce the binary size, I've seen these reduce the kernel by 60Kib
|
||||
# you could use opt-level = "z" and save another 50k, but imo, speed is much more valuable than size
|
||||
[profile.release]
|
||||
strip = true
|
||||
lto = true
|
||||
|
||||
37
.github/workflows/loc.yml
vendored
37
.github/workflows/loc.yml
vendored
@@ -1,37 +0,0 @@
|
||||
name: Count LOC
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: ["trunk"]
|
||||
|
||||
jobs:
|
||||
count-loc:
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
loc: ${{ steps.loc.outputs.loc }}
|
||||
steps:
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Install cloc
|
||||
run: sudo apt-get install cloc
|
||||
|
||||
- id: loc
|
||||
run: |
|
||||
LOC_VALUE=$(make line-count | tail -n 1)
|
||||
echo "loc=${LOC_VALUE}" >> "$GITHUB_OUTPUT"
|
||||
shell: bash
|
||||
make-badge:
|
||||
runs-on: ubuntu-latest
|
||||
needs: count-loc
|
||||
steps:
|
||||
- env:
|
||||
LOC: ${{needs.count-loc.outputs.loc}}
|
||||
uses: Schneegans/dynamic-badges-action@v1.7.0
|
||||
with:
|
||||
auth: ${{ secrets.GIST_SECRET }}
|
||||
gistID: c16f26c4c5ab7f613fe758c913f9e71f
|
||||
filename: cappuccinos-loc.json
|
||||
label: Lines Of Code
|
||||
message: ${{ env.LOC }}
|
||||
color: blue
|
||||
4
.gitignore
vendored
4
.gitignore
vendored
@@ -4,9 +4,7 @@
|
||||
|
||||
# Bochs
|
||||
bx_enh_dbg.ini
|
||||
bochsout.txt
|
||||
|
||||
# limine
|
||||
limine/
|
||||
|
||||
# rewrite stuff
|
||||
src.bak
|
||||
12
README.md
12
README.md
@@ -1,8 +1,16 @@
|
||||
# CappuccinOS
|
||||
|
||||
<!--
|
||||
Use Tokei instead of a custom loc count, tokei and my custom loc count seem to disagree by 30-100 lines but I suspect tokei to be more accurate than cloc
|
||||

|
||||
-->
|
||||
|
||||
CappuccinOS is a small x86-64 operating system written from scratch in rust. This README will guide you through the process of building and running CappuccinOS.
|
||||
[](https://github.com/juls0730/CappuccinOS).
|
||||
|
||||
CappuccinOS is a small _next generation_ x86-64 operating system written from scratch in Rust. This README will guide you through the process of building and running CappuccinOS.
|
||||
|
||||
> [!WARNING]
|
||||
> This project is in early development. Things will change.
|
||||
|
||||
## Features
|
||||
|
||||
@@ -79,7 +87,7 @@ Install the dependencies:
|
||||
|
||||
<details>
|
||||
<summary>Ubuntu</summary>
|
||||
Python should be installed by default, and if it's not, make an issue or a PR and I'll fix it
|
||||
# Python should be installed by default, and if it's not, make an issue or a PR and I'll fix it
|
||||
|
||||
sudo apt install gdisk dosfstools squashfs-tools
|
||||
# Optionally
|
||||
|
||||
@@ -24,7 +24,7 @@ pub fn inw(port: u16) -> u16 {
|
||||
///
|
||||
/// This function panics if the supplied buffer's size is smaller than `count`.
|
||||
#[inline(always)]
|
||||
pub unsafe fn insw(port: u16, buffer: *mut u16, count: usize) {
|
||||
pub unsafe fn insw(port: u16, buffer: VirtualPtr<u16>, count: usize) {
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -34,7 +34,7 @@ pub unsafe fn insw(port: u16, buffer: *mut u16, count: usize) {
|
||||
///
|
||||
/// This function panics if the supplied buffer's size is smaller than `count`.
|
||||
#[inline(always)]
|
||||
pub unsafe fn outsb(port: u16, buffer: *const u8, count: usize) {
|
||||
pub unsafe fn outsb(port: u16, buffer: VirtualPtr<u8>, count: usize) {
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -44,7 +44,7 @@ pub unsafe fn outsb(port: u16, buffer: *const u8, count: usize) {
|
||||
///
|
||||
/// This function panics if the supplied buffer's size is smaller than `count`.
|
||||
#[inline(always)]
|
||||
pub unsafe fn outsw(port: u16, buffer: *mut u16, count: usize) {
|
||||
pub unsafe fn outsw(port: u16, buffer: VirtualPtr<u16>, count: usize) {
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
@@ -24,7 +24,7 @@ pub fn inw(port: u16) -> u16 {
|
||||
///
|
||||
/// This function panics if the supplied buffer's size is smaller than `count`.
|
||||
#[inline(always)]
|
||||
pub unsafe fn insw(port: u16, buffer: *mut u16, count: usize) {
|
||||
pub unsafe fn insw(port: u16, buffer: VirtualPtr<u16>, count: usize) {
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -34,7 +34,7 @@ pub unsafe fn insw(port: u16, buffer: *mut u16, count: usize) {
|
||||
///
|
||||
/// This function panics if the supplied buffer's size is smaller than `count`.
|
||||
#[inline(always)]
|
||||
pub unsafe fn outsb(port: u16, buffer: *const u8, count: usize) {
|
||||
pub unsafe fn outsb(port: u16, buffer: VirtualPtr<u8>, count: usize) {
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -44,7 +44,7 @@ pub unsafe fn outsb(port: u16, buffer: *const u8, count: usize) {
|
||||
///
|
||||
/// This function panics if the supplied buffer's size is smaller than `count`.
|
||||
#[inline(always)]
|
||||
pub unsafe fn outsw(port: u16, buffer: *mut u16, count: usize) {
|
||||
pub unsafe fn outsw(port: u16, buffer: VirtualPtr<u16>, count: usize) {
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
use crate::{drivers::acpi::SMP_REQUEST, hcf, libs::cell::OnceCell, mem::HHDM_OFFSET, LogLevel};
|
||||
use crate::{hcf, libs::cell::OnceCell, mem::VirtualPtr, LogLevel};
|
||||
|
||||
use alloc::{sync::Arc, vec::Vec};
|
||||
|
||||
@@ -46,7 +46,7 @@ pub struct LAPIC {
|
||||
pub struct IOAPIC {
|
||||
pub ioapic_id: u8,
|
||||
_reserved: u8,
|
||||
pub ptr: *mut u8,
|
||||
pub ptr: VirtualPtr<u8>,
|
||||
pub global_interrupt_base: u32,
|
||||
}
|
||||
|
||||
@@ -62,11 +62,11 @@ pub struct IOAPICSourceOverride {
|
||||
#[derive(Debug)]
|
||||
pub struct APIC {
|
||||
pub io_apic: IOAPIC,
|
||||
local_apic: *mut u8,
|
||||
local_apic: VirtualPtr<u8>,
|
||||
pub cpus: Arc<[LAPIC]>,
|
||||
}
|
||||
|
||||
unsafe extern "C" fn test<'a>(cpu: &'a limine::smp::Cpu) -> ! {
|
||||
unsafe extern "C" fn test(cpu: &limine::smp::Cpu) -> ! {
|
||||
crate::log!(LogLevel::Debug, "hey from CPU {:<02}", cpu.id);
|
||||
|
||||
hcf();
|
||||
@@ -100,51 +100,56 @@ impl APIC {
|
||||
core::ptr::addr_of!(madt)
|
||||
);
|
||||
|
||||
let mut lapic_ptr = (madt.inner.local_apic_address as usize + *HHDM_OFFSET) as *mut u8;
|
||||
let hhdm_offset = crate::libs::limine::get_hhdm_offset();
|
||||
|
||||
let mut lapic_ptr: VirtualPtr<u8> =
|
||||
VirtualPtr::from(madt.inner.local_apic_address as usize + hhdm_offset);
|
||||
let mut io_apic = None;
|
||||
let mut io_apic_source_override = None;
|
||||
|
||||
let mut ptr = madt.extra.unwrap().as_ptr();
|
||||
let ptr_end = unsafe { ptr.add(madt.header.length as usize - 44) };
|
||||
// constant pointers are a lie anyways
|
||||
let mut ptr: VirtualPtr<u8> = VirtualPtr::from(madt.extra.unwrap().as_ptr());
|
||||
let ptr_end: VirtualPtr<u8> = unsafe { ptr.add(madt.header.length as usize - 44) };
|
||||
|
||||
while (ptr as usize) < (ptr_end as usize) {
|
||||
while (ptr.addr()) < (ptr_end.addr()) {
|
||||
// ptr may or may bot be aligned, although I have had crashes related to this pointer being not aligned
|
||||
// and tbh I dont really care about the performance impact of reading unaligned pointers right now
|
||||
// TODO
|
||||
match unsafe { core::ptr::read_unaligned(ptr) } {
|
||||
match unsafe { ptr.read_unaligned() } {
|
||||
0 => {
|
||||
if unsafe { *(ptr.add(4)) } & 1 != 0 {
|
||||
cpus.push(unsafe { core::ptr::read_unaligned(ptr.add(2).cast::<LAPIC>()) });
|
||||
if unsafe { ptr.add(4).read() } & 1 != 0 {
|
||||
cpus.push(unsafe { ptr.add(2).cast::<LAPIC>().read_unaligned() });
|
||||
}
|
||||
}
|
||||
1 => unsafe {
|
||||
io_apic = Some(IOAPIC {
|
||||
ioapic_id: core::ptr::read_unaligned(ptr.add(2)),
|
||||
_reserved: core::ptr::read_unaligned(ptr.add(3)),
|
||||
ptr: (core::ptr::read_unaligned(ptr.add(4).cast::<u32>()) as usize
|
||||
+ *HHDM_OFFSET) as *mut u8,
|
||||
global_interrupt_base: core::ptr::read_unaligned(ptr.add(8).cast::<u32>()),
|
||||
ioapic_id: ptr.add(2).read_unaligned(),
|
||||
_reserved: ptr.add(3).read_unaligned(),
|
||||
ptr: VirtualPtr::from(
|
||||
ptr.add(4).cast::<u32>().read_unaligned() as usize + hhdm_offset,
|
||||
),
|
||||
global_interrupt_base: ptr.add(8).cast::<u32>().read_unaligned(),
|
||||
})
|
||||
},
|
||||
2 => unsafe {
|
||||
io_apic_source_override = Some(IOAPICSourceOverride {
|
||||
bus_source: core::ptr::read_unaligned(ptr.add(2)),
|
||||
irq_source: core::ptr::read_unaligned(ptr.add(3)),
|
||||
global_system_interrupt: core::ptr::read_unaligned(
|
||||
ptr.add(4).cast::<u32>(),
|
||||
),
|
||||
flags: core::ptr::read_unaligned(ptr.add(8).cast::<u16>()),
|
||||
bus_source: ptr.add(2).read_unaligned(),
|
||||
irq_source: ptr.add(3).read_unaligned(),
|
||||
global_system_interrupt: ptr.add(4).cast::<u32>().read_unaligned(),
|
||||
flags: ptr.add(8).cast::<u16>().read_unaligned(),
|
||||
})
|
||||
},
|
||||
5 => {
|
||||
lapic_ptr = (unsafe { core::ptr::read_unaligned(ptr.add(4).cast::<u64>()) }
|
||||
as usize
|
||||
+ *HHDM_OFFSET) as *mut u8
|
||||
lapic_ptr = unsafe {
|
||||
VirtualPtr::from(
|
||||
ptr.add(4).cast::<u64>().read_unaligned() as usize + hhdm_offset,
|
||||
)
|
||||
}
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
|
||||
ptr = unsafe { ptr.add(core::ptr::read_unaligned(ptr.add(1)) as usize) };
|
||||
ptr = unsafe { ptr.add(ptr.add(1).read_unaligned() as usize) };
|
||||
}
|
||||
|
||||
if io_apic.is_none() || io_apic_source_override.is_none() {
|
||||
@@ -180,7 +185,7 @@ impl APIC {
|
||||
|
||||
// crate::println!("{number_of_inputs}");
|
||||
|
||||
let smp_request = unsafe { SMP_REQUEST.get_response_mut() };
|
||||
let smp_request = crate::libs::limine::get_smp();
|
||||
|
||||
if smp_request.is_none() {
|
||||
panic!("Failed to get smp from limine!");
|
||||
@@ -205,27 +210,32 @@ impl APIC {
|
||||
|
||||
pub fn read_ioapic(&self, reg: u32) -> u32 {
|
||||
unsafe {
|
||||
core::ptr::write_volatile(self.io_apic.ptr.cast::<u32>(), reg & 0xff);
|
||||
return core::ptr::read_volatile(self.io_apic.ptr.cast::<u32>().add(4));
|
||||
let ptr = self.io_apic.ptr;
|
||||
ptr.cast::<u32>().as_raw_ptr().write_volatile(reg & 0xFF);
|
||||
return ptr.cast::<u32>().add(4).read_volatile();
|
||||
}
|
||||
}
|
||||
|
||||
pub fn write_ioapic(&self, reg: u32, value: u32) {
|
||||
unsafe {
|
||||
core::ptr::write_volatile(self.io_apic.ptr.cast::<u32>(), reg & 0xff);
|
||||
core::ptr::write_volatile(self.io_apic.ptr.cast::<u32>().add(4), value);
|
||||
let ptr = self.io_apic.ptr;
|
||||
ptr.cast::<u32>().write_volatile(reg & 0xFF);
|
||||
ptr.cast::<u32>().add(4).write_volatile(value);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn read_lapic(&self, reg: u32) -> u32 {
|
||||
unsafe {
|
||||
return *self.local_apic.add(reg as usize).cast::<u32>();
|
||||
return self.local_apic.add(reg as usize).cast::<u32>().read();
|
||||
}
|
||||
}
|
||||
|
||||
pub fn write_lapic(&self, reg: u32, value: u32) {
|
||||
unsafe {
|
||||
core::ptr::write_volatile(self.local_apic.add(reg as usize).cast::<u32>(), value);
|
||||
self.local_apic
|
||||
.add(reg as usize)
|
||||
.cast::<u32>()
|
||||
.write_volatile(value);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
use core::sync::atomic::{AtomicU8, Ordering};
|
||||
|
||||
use super::idt_set_gate;
|
||||
use crate::{hcf, log, LogLevel};
|
||||
use crate::{hcf, log, mem::VirtualPtr, LogLevel};
|
||||
|
||||
#[repr(C)]
|
||||
#[derive(Clone, Copy, Debug)]
|
||||
@@ -37,7 +37,7 @@ struct Registers {
|
||||
static FAULTED: AtomicU8 = AtomicU8::new(0);
|
||||
|
||||
extern "C" fn exception_handler(registers: u64) {
|
||||
let registers = unsafe { *(registers as *const Registers) };
|
||||
let registers = unsafe { VirtualPtr::<Registers>::from(registers as usize).read() };
|
||||
|
||||
match FAULTED.fetch_add(1, Ordering::SeqCst) {
|
||||
0 => {}
|
||||
@@ -67,9 +67,10 @@ extern "C" fn exception_handler(registers: u64) {
|
||||
}
|
||||
0x0E => {
|
||||
log!(LogLevel::Fatal, "PAGE FAULT!");
|
||||
}
|
||||
0xFF => {
|
||||
log!(LogLevel::Fatal, "EXCEPTION!");
|
||||
log!(
|
||||
LogLevel::Debug,
|
||||
"HINT: Find the last pointer you touched and make sure it's in virtual memory"
|
||||
);
|
||||
}
|
||||
_ => {
|
||||
log!(LogLevel::Fatal, "EXCEPTION!");
|
||||
@@ -82,7 +83,7 @@ extern "C" fn exception_handler(registers: u64) {
|
||||
}
|
||||
|
||||
fn print_registers(registers: &Registers) {
|
||||
log!(LogLevel::Info, "{:-^width$}", " REGISTERS ", width = 98);
|
||||
log!(LogLevel::Info, "{:─^width$}", " REGISTERS ", width = 98);
|
||||
|
||||
log!(
|
||||
LogLevel::Info,
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
pub mod apic;
|
||||
pub mod exceptions;
|
||||
|
||||
use crate::LogLevel;
|
||||
use crate::{mem::VirtualPtr, LogLevel};
|
||||
|
||||
use self::apic::APIC;
|
||||
|
||||
@@ -130,10 +130,10 @@ pub extern "C" fn syscall() {
|
||||
}
|
||||
|
||||
pub extern "C" fn syscall_handler(_rdi: u64, _rsi: u64, rdx: u64, rcx: u64) {
|
||||
let buf = rdx as *const u8; // Treat as pointer to u8 (byte array)
|
||||
let buf: VirtualPtr<u8> = unsafe { VirtualPtr::from(rdx as usize) }; // Treat as pointer to u8 (byte array)
|
||||
let count = rcx as usize;
|
||||
|
||||
let slice = unsafe { core::slice::from_raw_parts(buf, count) };
|
||||
let slice = unsafe { core::slice::from_raw_parts(buf.as_raw_ptr(), count) };
|
||||
let message = core::str::from_utf8(slice).unwrap();
|
||||
crate::print!("{message}");
|
||||
}
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
use core::arch::asm;
|
||||
|
||||
use crate::mem::VirtualPtr;
|
||||
|
||||
#[inline(always)]
|
||||
pub fn outb(port: u16, value: u8) {
|
||||
unsafe {
|
||||
@@ -60,7 +62,9 @@ pub fn inw(port: u16) -> u16 {
|
||||
///
|
||||
/// This function panics if the supplied buffer's size is smaller than `count`.
|
||||
#[inline(always)]
|
||||
pub unsafe fn insw(port: u16, buffer: *mut u16, count: usize) {
|
||||
pub unsafe fn insw(port: u16, buffer: VirtualPtr<u16>, count: usize) {
|
||||
let buffer = buffer.as_raw_ptr();
|
||||
|
||||
asm!("cld",
|
||||
"rep insw",
|
||||
in("dx") port,
|
||||
@@ -75,7 +79,9 @@ pub unsafe fn insw(port: u16, buffer: *mut u16, count: usize) {
|
||||
///
|
||||
/// This function panics if the supplied buffer's size is smaller than `count`.
|
||||
#[inline(always)]
|
||||
pub unsafe fn outsb(port: u16, buffer: *const u8, count: usize) {
|
||||
pub unsafe fn outsb(port: u16, buffer: VirtualPtr<u8>, count: usize) {
|
||||
let buffer = buffer.as_raw_ptr();
|
||||
|
||||
asm!("cld",
|
||||
"rep outsb",
|
||||
in("dx") port,
|
||||
@@ -90,7 +96,9 @@ pub unsafe fn outsb(port: u16, buffer: *const u8, count: usize) {
|
||||
///
|
||||
/// This function panics if the supplied buffer's size is smaller than `count`.
|
||||
#[inline(always)]
|
||||
pub unsafe fn outsw(port: u16, buffer: *const u16, count: usize) {
|
||||
pub unsafe fn outsw(port: u16, buffer: VirtualPtr<u16>, count: usize) {
|
||||
let buffer = buffer.as_raw_ptr();
|
||||
|
||||
asm!("cld",
|
||||
"rep outsw",
|
||||
in("dx") port,
|
||||
|
||||
@@ -3,7 +3,7 @@ use alloc::{borrow::ToOwned, string::String, vec::Vec};
|
||||
use crate::{
|
||||
drivers::fs::vfs::{vfs_open, UserCred},
|
||||
log,
|
||||
mem::HHDM_OFFSET,
|
||||
mem::VirtualPtr,
|
||||
LogLevel,
|
||||
};
|
||||
|
||||
@@ -12,15 +12,15 @@ use crate::{
|
||||
#[repr(C)]
|
||||
#[derive(Clone, Copy, Debug)]
|
||||
struct StackFrame {
|
||||
back: *const StackFrame,
|
||||
back: usize,
|
||||
rip: usize,
|
||||
}
|
||||
|
||||
pub fn print_stack_trace(max_frames: usize, rbp: u64) {
|
||||
let mut stackframe = rbp as *const StackFrame;
|
||||
let mut stackframe: VirtualPtr<StackFrame> = VirtualPtr::from(rbp as usize);
|
||||
let mut frames_processed = 0;
|
||||
|
||||
log!(LogLevel::Info, "{:-^width$}", " Stack Trace ", width = 98);
|
||||
log!(LogLevel::Info, "{:─^width$}", " Stack Trace ", width = 98);
|
||||
for _ in 0..max_frames {
|
||||
frames_processed += 1;
|
||||
|
||||
@@ -28,12 +28,10 @@ pub fn print_stack_trace(max_frames: usize, rbp: u64) {
|
||||
break;
|
||||
}
|
||||
|
||||
let instruction_ptr = unsafe { (*stackframe).rip };
|
||||
let instruction_ptr = unsafe { (stackframe.read()).rip };
|
||||
|
||||
if instruction_ptr < *HHDM_OFFSET {
|
||||
unsafe {
|
||||
stackframe = (*stackframe).back;
|
||||
};
|
||||
if instruction_ptr < crate::libs::limine::get_hhdm_offset() {
|
||||
stackframe = unsafe { VirtualPtr::from((stackframe.read()).back) };
|
||||
continue;
|
||||
}
|
||||
|
||||
@@ -47,9 +45,7 @@ pub fn print_stack_trace(max_frames: usize, rbp: u64) {
|
||||
|
||||
log!(LogLevel::Info, "{:#X} {address_info}", instruction_ptr);
|
||||
|
||||
unsafe {
|
||||
stackframe = (*stackframe).back;
|
||||
};
|
||||
stackframe = unsafe { VirtualPtr::from((stackframe.read()).back) };
|
||||
}
|
||||
|
||||
if frames_processed == max_frames && !stackframe.is_null() {
|
||||
@@ -63,7 +59,7 @@ fn get_function_name(function_address: u64) -> Result<(String, u64), ()> {
|
||||
|
||||
let symbols_table_bytes = symbols_fd
|
||||
.open(0, UserCred { uid: 0, gid: 0 })
|
||||
.read(0, 0, 0)?;
|
||||
.read_all(0, 0)?;
|
||||
let symbols_table = core::str::from_utf8(&symbols_table_bytes).ok().ok_or(())?;
|
||||
|
||||
let mut previous_symbol: Option<(&str, u64)> = None;
|
||||
|
||||
@@ -1,20 +1,15 @@
|
||||
use core::ops::Add;
|
||||
|
||||
use alloc::vec::Vec;
|
||||
use limine::request::RsdpRequest;
|
||||
use limine::request::SmpRequest;
|
||||
|
||||
use crate::libs::limine::get_hhdm_offset;
|
||||
use crate::mem::{PhysicalPtr, VirtualPtr};
|
||||
use crate::LogLevel;
|
||||
use crate::{
|
||||
arch::io::{inw, outb},
|
||||
libs::cell::OnceCell,
|
||||
mem::HHDM_OFFSET,
|
||||
};
|
||||
|
||||
#[used]
|
||||
#[link_section = ".requests"]
|
||||
pub static mut SMP_REQUEST: SmpRequest = SmpRequest::new();
|
||||
|
||||
#[repr(C, packed)]
|
||||
#[derive(Clone, Copy, Debug)]
|
||||
pub struct SDTHeader {
|
||||
@@ -38,13 +33,19 @@ pub struct SDT<'a, T> {
|
||||
}
|
||||
|
||||
impl<'a, T> SDT<'a, T> {
|
||||
unsafe fn new(mut ptr: *const u8) -> Self {
|
||||
if (ptr as usize) < *HHDM_OFFSET {
|
||||
ptr = ptr.add(*HHDM_OFFSET);
|
||||
}
|
||||
unsafe fn new(ptr: PhysicalPtr<u8>) -> Self {
|
||||
// let hhdm_offset = get_hhdm_offset();
|
||||
|
||||
let length = core::ptr::read_unaligned(ptr.add(4).cast::<u32>());
|
||||
let data = core::slice::from_raw_parts(ptr, length as usize);
|
||||
// if (ptr as usize) < hhdm_offset {
|
||||
// ptr = ptr.add(hhdm_offset);
|
||||
// }
|
||||
|
||||
let ptr = ptr.to_higher_half();
|
||||
|
||||
// let length = core::ptr::read_unaligned(ptr.add(4).cast::<u32>());
|
||||
// let data = core::slice::from_raw_parts(ptr, length as usize);
|
||||
let length = ptr.add(4).cast::<u32>().read_unaligned();
|
||||
let data = core::slice::from_raw_parts(ptr.as_raw_ptr(), length as usize);
|
||||
|
||||
crate::log!(LogLevel::Trace, "SDT at: {ptr:p}");
|
||||
|
||||
@@ -124,25 +125,27 @@ impl<'a> RootSDT<'a> {
|
||||
return (self.header().length as usize - core::mem::size_of::<SDTHeader>()) / ptr_size;
|
||||
}
|
||||
|
||||
unsafe fn get(&self, idx: usize) -> *const u8 {
|
||||
unsafe fn get(&self, idx: usize) -> VirtualPtr<u8> {
|
||||
let mut offset = 0;
|
||||
|
||||
let root_ptr = match self {
|
||||
RootSDT::RSDT(rsdt) => {
|
||||
let ptrs = (rsdt.inner.pointers as usize).add(*HHDM_OFFSET) as *const u8;
|
||||
let ptrs: VirtualPtr<u8> =
|
||||
VirtualPtr::from((rsdt.inner.pointers as usize).add(get_hhdm_offset()));
|
||||
assert!(!ptrs.is_null());
|
||||
ptrs.add(offset)
|
||||
}
|
||||
RootSDT::XSDT(xsdt) => {
|
||||
let ptrs = (xsdt.inner.pointers as usize).add(*HHDM_OFFSET) as *const u8;
|
||||
let ptrs: VirtualPtr<u8> =
|
||||
VirtualPtr::from((xsdt.inner.pointers as usize).add(get_hhdm_offset()));
|
||||
assert!(!ptrs.is_null());
|
||||
ptrs.add(offset)
|
||||
}
|
||||
};
|
||||
|
||||
for _ in 0..idx {
|
||||
let header: &SDTHeader = &*root_ptr.add(offset).cast::<SDTHeader>();
|
||||
offset += header.length as usize;
|
||||
let header: VirtualPtr<SDTHeader> = root_ptr.add(offset).cast::<SDTHeader>();
|
||||
offset += header.as_ref().unwrap().length as usize;
|
||||
}
|
||||
|
||||
return root_ptr.add(offset);
|
||||
@@ -157,30 +160,28 @@ struct ACPI<'a> {
|
||||
|
||||
static ACPI: OnceCell<ACPI> = OnceCell::new();
|
||||
|
||||
static RSDP_REQ: RsdpRequest = RsdpRequest::new();
|
||||
|
||||
fn resolve_acpi() {
|
||||
let rsdp_ptr = RSDP_REQ.get_response();
|
||||
let rsdp_ptr = crate::libs::limine::get_rdsp_ptr();
|
||||
if rsdp_ptr.is_none() {
|
||||
panic!("RSDP not found!");
|
||||
}
|
||||
|
||||
let rsdp = unsafe { &*rsdp_ptr.unwrap().address().cast::<RSDP>() };
|
||||
let rsdp = unsafe { &*rsdp_ptr.unwrap().cast::<RSDP>() };
|
||||
|
||||
// TODO: validate RSDT
|
||||
let root_sdt = {
|
||||
if rsdp.revision == 0 {
|
||||
RootSDT::RSDT(unsafe { SDT::new(rsdp.rsdt_addr as *mut u8) })
|
||||
RootSDT::RSDT(unsafe { SDT::new(PhysicalPtr::from(rsdp.rsdt_addr as usize)) })
|
||||
} else {
|
||||
let xsdt = unsafe { &*rsdp_ptr.unwrap().address().cast::<XSDP>() };
|
||||
RootSDT::XSDT(unsafe { SDT::new(xsdt.xsdt_addr as *mut u8) })
|
||||
let xsdt = unsafe { &*rsdp_ptr.unwrap().cast::<XSDP>() };
|
||||
RootSDT::XSDT(unsafe { SDT::new(PhysicalPtr::from(xsdt.xsdt_addr as usize)) })
|
||||
}
|
||||
};
|
||||
|
||||
let tables: Vec<[u8; 4]> = (0..root_sdt.len())
|
||||
.map(|i| {
|
||||
let sdt_ptr = unsafe { root_sdt.get(i) };
|
||||
let signature = unsafe { core::slice::from_raw_parts(sdt_ptr, 4) };
|
||||
let signature = unsafe { core::slice::from_raw_parts(sdt_ptr.as_raw_ptr(), 4) };
|
||||
signature.try_into().unwrap()
|
||||
})
|
||||
.collect();
|
||||
@@ -264,7 +265,6 @@ struct FADT {
|
||||
x_gpe1_block: GenericAddressStructure,
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
pub fn init_acpi() {
|
||||
resolve_acpi();
|
||||
|
||||
@@ -298,7 +298,7 @@ pub fn find_table<T>(table_name: &str) -> Option<SDT<T>> {
|
||||
if table == table_name.as_bytes() {
|
||||
let ptr = unsafe { ACPI.root_sdt.get(i) };
|
||||
|
||||
let table = unsafe { SDT::new(ptr) };
|
||||
let table = unsafe { SDT::new(ptr.to_lower_half()) };
|
||||
return Some(table);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -373,11 +373,9 @@ impl FatFs {
|
||||
if filename != name {
|
||||
continue;
|
||||
}
|
||||
} else {
|
||||
if name.to_uppercase() != formatted_short_filename {
|
||||
} else if name.to_uppercase() != formatted_short_filename {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
return Ok(file_entry);
|
||||
}
|
||||
@@ -599,7 +597,7 @@ impl VNodeOperations for File {
|
||||
loop {
|
||||
let cluster_data = unsafe { (*fat_fs).read_cluster(cluster as usize)? };
|
||||
|
||||
let remaining = count as usize - copied_bytes;
|
||||
let remaining = count - copied_bytes;
|
||||
let to_copy = if remaining > cluster_size {
|
||||
cluster_size - offset
|
||||
} else {
|
||||
|
||||
@@ -4,38 +4,11 @@ mod superblock;
|
||||
use core::{fmt::Debug, mem::MaybeUninit, ptr::NonNull};
|
||||
|
||||
use alloc::{boxed::Box, string::String, sync::Arc, vec::Vec};
|
||||
use limine::request::ModuleRequest;
|
||||
|
||||
use super::vfs::{FsOps, VNode, VNodeOperations, VNodeType};
|
||||
|
||||
#[used]
|
||||
#[link_section = ".requests"]
|
||||
pub static MODULE_REQUEST: ModuleRequest = ModuleRequest::new();
|
||||
|
||||
pub fn init() -> Squashfs<'static> {
|
||||
// TODO: Put the module request stuff in another file?
|
||||
if MODULE_REQUEST.get_response().is_none() {
|
||||
panic!("Module request in none!");
|
||||
}
|
||||
let module_response = MODULE_REQUEST.get_response().unwrap();
|
||||
|
||||
let mut initramfs = None;
|
||||
|
||||
let module_name = "initramfs.img";
|
||||
|
||||
for module in module_response.modules() {
|
||||
let path = core::str::from_utf8(module.path());
|
||||
if path.is_err() {
|
||||
continue;
|
||||
}
|
||||
|
||||
if !path.unwrap().contains(module_name) {
|
||||
continue;
|
||||
}
|
||||
|
||||
initramfs = Some(module);
|
||||
}
|
||||
// End TODO
|
||||
let initramfs = crate::libs::limine::get_module("initramfs.img");
|
||||
|
||||
if initramfs.is_none() {
|
||||
panic!("Initramfs was not found!");
|
||||
@@ -501,7 +474,7 @@ impl VNodeOperations for Inode {
|
||||
file.block_offset as usize
|
||||
} + offset;
|
||||
|
||||
block_data.extend(&data_table[block_offset..(block_offset + count as usize)]);
|
||||
block_data.extend(&data_table[block_offset..(block_offset + count)]);
|
||||
|
||||
return Ok(Arc::from(block_data));
|
||||
},
|
||||
|
||||
@@ -223,12 +223,14 @@ impl File {
|
||||
unsafe { self.descriptor.as_mut() }
|
||||
}
|
||||
|
||||
pub fn read(&mut self, mut count: usize, offset: usize, f: u32) -> Result<Arc<[u8]>, ()> {
|
||||
if count == 0 {
|
||||
count = self.len() - offset;
|
||||
pub fn read(&mut self, count: usize, offset: usize, f: u32) -> Result<Arc<[u8]>, ()> {
|
||||
return self.get_node().read(count, offset, f);
|
||||
}
|
||||
|
||||
return self.get_node().read(count, offset, f);
|
||||
pub fn read_all(&mut self, offset: usize, f: u32) -> Result<Arc<[u8]>, ()> {
|
||||
let count = self.len() - offset;
|
||||
|
||||
return self.read(count, offset, f);
|
||||
}
|
||||
|
||||
pub fn write(&mut self, offset: usize, buf: &[u8], f: u32) {
|
||||
@@ -279,6 +281,18 @@ impl TreeNode {
|
||||
pub fn lookup(&mut self, name: &str) -> Result<&mut Self, ()> {
|
||||
let parent = Some(self.as_ptr());
|
||||
|
||||
if name == ".." {
|
||||
if let Some(mut parent_node) = self.parent {
|
||||
return Ok(unsafe { parent_node.as_mut() });
|
||||
} else {
|
||||
return Err(());
|
||||
}
|
||||
}
|
||||
|
||||
if name == "." {
|
||||
return Ok(self);
|
||||
}
|
||||
|
||||
if !self.children.contains_key(name) {
|
||||
let vnode: VNode;
|
||||
|
||||
|
||||
@@ -2,6 +2,7 @@ use core::sync::atomic::AtomicBool;
|
||||
|
||||
#[cfg(target_arch = "x86_64")]
|
||||
use crate::arch::io::{inb, outb, outsb};
|
||||
use crate::mem::VirtualPtr;
|
||||
|
||||
// COM1
|
||||
pub static PORT: u16 = 0x3f8;
|
||||
@@ -51,7 +52,7 @@ pub fn write_string(string: &str) {
|
||||
{
|
||||
while is_transmit_empty() {}
|
||||
|
||||
unsafe { outsb(PORT, string.as_ptr(), string.len()) }
|
||||
unsafe { outsb(PORT, VirtualPtr::from(string.as_ptr()), string.len()) }
|
||||
}
|
||||
#[cfg(not(target_arch = "x86_64"))]
|
||||
{
|
||||
|
||||
@@ -1,7 +1,9 @@
|
||||
use core::mem::size_of;
|
||||
|
||||
use alloc::vec;
|
||||
use alloc::{boxed::Box, sync::Arc, vec::Vec};
|
||||
|
||||
use crate::mem::VirtualPtr;
|
||||
use crate::{
|
||||
arch::io::{inb, insw, inw, outb, outsw},
|
||||
drivers::{
|
||||
@@ -297,8 +299,7 @@ impl ATABus {
|
||||
sector: u64,
|
||||
sector_count: usize,
|
||||
) -> Result<Arc<[u8]>, ()> {
|
||||
let mut buffer: Vec<u8> = Vec::with_capacity(ATA_SECTOR_SIZE * sector_count);
|
||||
buffer.resize(buffer.capacity(), 0);
|
||||
let mut buffer: Vec<u8> = vec![0; ATA_SECTOR_SIZE * sector_count];
|
||||
|
||||
self.ide_access(
|
||||
drive,
|
||||
@@ -434,14 +435,14 @@ impl ATABus {
|
||||
ATADriveDirection::Read => unsafe {
|
||||
insw(
|
||||
self.io_bar + ATADriveDataRegister::Data as u16,
|
||||
(buffer.as_mut_ptr().cast::<u16>()).add(buffer_offset),
|
||||
VirtualPtr::from((buffer.as_mut_ptr().cast::<u16>()).add(buffer_offset)),
|
||||
ATA_SECTOR_SIZE / size_of::<u16>(),
|
||||
);
|
||||
},
|
||||
ATADriveDirection::Write => unsafe {
|
||||
outsw(
|
||||
self.io_bar + ATADriveDataRegister::Data as u16,
|
||||
(buffer.as_mut_ptr().cast::<u16>()).add(buffer_offset),
|
||||
VirtualPtr::from((buffer.as_mut_ptr().cast::<u16>()).add(buffer_offset)),
|
||||
ATA_SECTOR_SIZE / size_of::<u16>(),
|
||||
)
|
||||
},
|
||||
@@ -507,8 +508,8 @@ impl ATADrive {
|
||||
return unsafe { *(sectors.cast::<u32>()) } as u64;
|
||||
}
|
||||
|
||||
pub fn as_ptr(&self) -> *const ATADrive {
|
||||
return core::ptr::addr_of!(*self);
|
||||
pub fn as_ptr(&self) -> VirtualPtr<ATADrive> {
|
||||
return unsafe { VirtualPtr::new(core::ptr::addr_of!(*self) as *mut ATADrive) };
|
||||
}
|
||||
}
|
||||
|
||||
@@ -673,7 +674,7 @@ fn ide_initialize(bar0: u32, bar1: u32, _bar2: u32, _bar3: u32, _bar4: u32) {
|
||||
attributes,
|
||||
partition_name,
|
||||
},
|
||||
drive.as_ptr(),
|
||||
drive.as_ptr().as_raw_ptr(),
|
||||
)));
|
||||
}
|
||||
|
||||
|
||||
@@ -1,6 +1,4 @@
|
||||
use limine::{framebuffer, request::FramebufferRequest};
|
||||
|
||||
use crate::libs::cell::OnceCell;
|
||||
use crate::{libs::cell::OnceCell, mem::VirtualPtr};
|
||||
|
||||
#[derive(Clone, Copy, Debug)]
|
||||
pub struct Framebuffer {
|
||||
@@ -8,12 +6,18 @@ pub struct Framebuffer {
|
||||
pub height: usize,
|
||||
pub bpp: usize,
|
||||
pub pitch: usize,
|
||||
pub pointer: *mut u8,
|
||||
pub pointer: VirtualPtr<u8>,
|
||||
}
|
||||
|
||||
impl Framebuffer {
|
||||
#[inline]
|
||||
const fn new(bpp: usize, pitch: usize, ptr: *mut u8, width: usize, height: usize) -> Self {
|
||||
const fn new(
|
||||
bpp: usize,
|
||||
pitch: usize,
|
||||
ptr: VirtualPtr<u8>,
|
||||
width: usize,
|
||||
height: usize,
|
||||
) -> Self {
|
||||
return Self {
|
||||
width,
|
||||
height,
|
||||
@@ -32,7 +36,7 @@ impl Framebuffer {
|
||||
let pixel_offset = (y * self.pitch as u32 + (x * (self.bpp / 8) as u32)) as isize;
|
||||
|
||||
unsafe {
|
||||
*(self.pointer.offset(pixel_offset).cast::<u32>()) = color;
|
||||
self.pointer.offset(pixel_offset).cast::<u32>().write(color);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -54,14 +58,14 @@ impl Framebuffer {
|
||||
unsafe {
|
||||
core::ptr::copy_nonoverlapping(
|
||||
buffer.as_ptr(),
|
||||
self.pointer.cast::<u32>(),
|
||||
self.pointer.cast::<u32>().as_raw_ptr(),
|
||||
buffer.len(),
|
||||
);
|
||||
|
||||
if let Some(mirror_buffer) = mirror_buffer {
|
||||
core::ptr::copy_nonoverlapping(
|
||||
buffer.as_ptr(),
|
||||
mirror_buffer.pointer.cast::<u32>(),
|
||||
mirror_buffer.pointer.cast::<u32>().as_raw_ptr(),
|
||||
buffer.len(),
|
||||
);
|
||||
}
|
||||
@@ -69,28 +73,18 @@ impl Framebuffer {
|
||||
}
|
||||
}
|
||||
|
||||
#[used]
|
||||
#[link_section = ".requests"]
|
||||
pub static FRAMEBUFFER_REQUEST: FramebufferRequest = FramebufferRequest::new();
|
||||
pub static FRAMEBUFFER: OnceCell<Option<Framebuffer>> = OnceCell::new();
|
||||
|
||||
pub fn get_framebuffer() -> Option<Framebuffer> {
|
||||
*FRAMEBUFFER.get_or_set(|| {
|
||||
let framebuffer_response = crate::drivers::video::FRAMEBUFFER_REQUEST.get_response()?;
|
||||
let framebuffer = framebuffer_response.framebuffers().next();
|
||||
|
||||
if framebuffer.is_none() {
|
||||
return None;
|
||||
}
|
||||
|
||||
let framebuffer_response = framebuffer.as_ref().unwrap();
|
||||
let limine_frambuffer = crate::libs::limine::get_framebuffer()?;
|
||||
|
||||
let framebuffer = Framebuffer::new(
|
||||
framebuffer_response.bpp() as usize,
|
||||
framebuffer_response.pitch() as usize,
|
||||
framebuffer_response.addr(),
|
||||
framebuffer_response.width() as usize,
|
||||
framebuffer_response.height() as usize,
|
||||
limine_frambuffer.bpp() as usize,
|
||||
limine_frambuffer.pitch() as usize,
|
||||
unsafe { VirtualPtr::new(limine_frambuffer.addr()) },
|
||||
limine_frambuffer.width() as usize,
|
||||
limine_frambuffer.height() as usize,
|
||||
);
|
||||
|
||||
return Some(framebuffer);
|
||||
|
||||
@@ -46,7 +46,7 @@ impl<T> OnceCell<T> {
|
||||
}
|
||||
}
|
||||
|
||||
fn get_unchecked(&self) -> &T {
|
||||
pub fn get_unchecked(&self) -> &T {
|
||||
match self.state.get() {
|
||||
OnceCellState::Initialized(data) => data,
|
||||
_ => panic!("Attempted to access uninitialized data!"),
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
use alloc::vec;
|
||||
use alloc::vec::Vec;
|
||||
|
||||
#[derive(Debug)]
|
||||
@@ -114,8 +115,7 @@ struct HuffRing {
|
||||
|
||||
impl HuffRing {
|
||||
fn new() -> Self {
|
||||
let mut data = Vec::with_capacity(32 * 1024);
|
||||
data.resize(data.capacity(), 0);
|
||||
let data = vec![0; 32 * 1024];
|
||||
|
||||
return Self { pointer: 0, data };
|
||||
}
|
||||
@@ -395,10 +395,10 @@ fn build_huffman(lengths: &[u8], size: usize, out: &mut Huff) {
|
||||
out.counts[i] = 0;
|
||||
}
|
||||
|
||||
for i in 0..size {
|
||||
assert!(lengths[i] <= 15);
|
||||
for &length in lengths.iter().take(size) {
|
||||
assert!(length <= 15);
|
||||
|
||||
out.counts[lengths[i] as usize] += 1;
|
||||
out.counts[length as usize] += 1;
|
||||
}
|
||||
|
||||
out.counts[0] = 0;
|
||||
|
||||
130
src/libs/limine.rs
Normal file
130
src/libs/limine.rs
Normal file
@@ -0,0 +1,130 @@
|
||||
use limine::{
|
||||
file::File,
|
||||
framebuffer::Framebuffer,
|
||||
paging::Mode,
|
||||
request::{
|
||||
FramebufferRequest, KernelAddressRequest, KernelFileRequest, ModuleRequest, RsdpRequest,
|
||||
SmpRequest,
|
||||
},
|
||||
response::{KernelAddressResponse, KernelFileResponse, SmpResponse},
|
||||
BaseRevision,
|
||||
};
|
||||
|
||||
use super::cell::OnceCell;
|
||||
|
||||
// Be sure to mark all limine requests with #[used], otherwise they may be removed by the compiler.
|
||||
#[used]
|
||||
// The .requests section allows limine to find the requests faster and more safely.
|
||||
#[link_section = ".requests"]
|
||||
static BASE_REVISION: BaseRevision = BaseRevision::new();
|
||||
|
||||
#[used]
|
||||
#[link_section = ".requests"]
|
||||
static KERNEL_REQUEST: KernelFileRequest = KernelFileRequest::new();
|
||||
|
||||
#[used]
|
||||
#[link_section = ".requests"]
|
||||
static KERNEL_ADDRESS_REQUEST: KernelAddressRequest = KernelAddressRequest::new();
|
||||
|
||||
#[used]
|
||||
#[link_section = ".requests"]
|
||||
static mut SMP_REQUEST: SmpRequest = SmpRequest::new();
|
||||
|
||||
#[used]
|
||||
#[link_section = ".requests"]
|
||||
static RSDP_REQ: RsdpRequest = RsdpRequest::new();
|
||||
|
||||
#[used]
|
||||
#[link_section = ".requests"]
|
||||
static FRAMEBUFFER_REQUEST: FramebufferRequest = FramebufferRequest::new();
|
||||
|
||||
#[used]
|
||||
#[link_section = ".requests"]
|
||||
static MODULE_REQUEST: ModuleRequest = ModuleRequest::new();
|
||||
|
||||
#[used]
|
||||
#[link_section = ".requests"]
|
||||
static mut MEMMAP_REQUEST: limine::request::MemoryMapRequest =
|
||||
limine::request::MemoryMapRequest::new();
|
||||
|
||||
#[used]
|
||||
#[link_section = ".requests"]
|
||||
static HHDM_REQUEST: limine::request::HhdmRequest = limine::request::HhdmRequest::new();
|
||||
|
||||
#[used]
|
||||
#[link_section = ".requests"]
|
||||
static PAGING_REQUEST: limine::request::PagingModeRequest =
|
||||
limine::request::PagingModeRequest::new();
|
||||
|
||||
pub fn get_module<'a>(module_name: &str) -> Option<&'a File> {
|
||||
if MODULE_REQUEST.get_response().is_none() {
|
||||
panic!("Module request in none!");
|
||||
}
|
||||
let module_response = MODULE_REQUEST.get_response().unwrap();
|
||||
|
||||
let mut file = None;
|
||||
|
||||
for &module in module_response.modules() {
|
||||
let path = core::str::from_utf8(module.path());
|
||||
if path.is_err() {
|
||||
continue;
|
||||
}
|
||||
|
||||
if !path.unwrap().contains(module_name) {
|
||||
continue;
|
||||
}
|
||||
|
||||
file = Some(module);
|
||||
}
|
||||
|
||||
return file;
|
||||
}
|
||||
|
||||
pub fn get_rdsp_ptr() -> Option<*const ()> {
|
||||
return Some(RSDP_REQ.get_response()?.address());
|
||||
}
|
||||
|
||||
pub fn get_smp<'a>() -> Option<&'a mut SmpResponse> {
|
||||
return unsafe { SMP_REQUEST.get_response_mut() };
|
||||
}
|
||||
|
||||
pub fn get_framebuffer<'a>() -> Option<Framebuffer<'a>> {
|
||||
return FRAMEBUFFER_REQUEST.get_response()?.framebuffers().next();
|
||||
}
|
||||
|
||||
pub static HHDM_OFFSET: OnceCell<usize> = OnceCell::new();
|
||||
|
||||
pub fn get_hhdm_offset() -> usize {
|
||||
if let Err(()) = HHDM_OFFSET.get() {
|
||||
HHDM_OFFSET.set(
|
||||
HHDM_REQUEST
|
||||
.get_response()
|
||||
.expect("Failed to get HHDM!")
|
||||
.offset() as usize,
|
||||
);
|
||||
}
|
||||
|
||||
// Note: this clones the usize
|
||||
return *HHDM_OFFSET.get_unchecked();
|
||||
}
|
||||
|
||||
pub fn get_memmap<'a>() -> &'a mut [&'a mut limine::memory_map::Entry] {
|
||||
return unsafe {
|
||||
MEMMAP_REQUEST
|
||||
.get_response_mut()
|
||||
.expect("Failed to get Memory map!")
|
||||
.entries_mut()
|
||||
};
|
||||
}
|
||||
|
||||
pub fn get_kernel_address<'a>() -> &'a KernelAddressResponse {
|
||||
return KERNEL_ADDRESS_REQUEST.get_response().unwrap();
|
||||
}
|
||||
|
||||
pub fn get_kernel_file<'a>() -> Option<&'a KernelFileResponse> {
|
||||
return KERNEL_REQUEST.get_response();
|
||||
}
|
||||
|
||||
pub fn get_paging_level() -> Mode {
|
||||
return PAGING_REQUEST.get_response().unwrap().mode();
|
||||
}
|
||||
@@ -1,4 +1,5 @@
|
||||
pub mod cell;
|
||||
pub mod gzip;
|
||||
pub mod limine;
|
||||
pub mod sync;
|
||||
pub mod uuid;
|
||||
|
||||
55
src/main.rs
55
src/main.rs
@@ -6,8 +6,8 @@
|
||||
use core::arch::x86_64::__cpuid;
|
||||
|
||||
use alloc::vec::Vec;
|
||||
use limine::{request::KernelFileRequest, BaseRevision};
|
||||
use mem::{LabelBytes, HHDM_OFFSET, PHYSICAL_MEMORY_MANAGER};
|
||||
use libs::limine::{get_hhdm_offset, get_kernel_file};
|
||||
use mem::{pmm::total_memory, LabelBytes};
|
||||
|
||||
use crate::drivers::fs::{
|
||||
initramfs,
|
||||
@@ -26,16 +26,6 @@ pub static BUILD_ID: &str = "__BUILD_ID__";
|
||||
|
||||
pub static LOG_LEVEL: u8 = if cfg!(debug_assertions) { 1 } else { 2 };
|
||||
|
||||
// Be sure to mark all limine requests with #[used], otherwise they may be removed by the compiler.
|
||||
#[used]
|
||||
// The .requests section allows limine to find the requests faster and more safely.
|
||||
#[link_section = ".requests"]
|
||||
static BASE_REVISION: BaseRevision = BaseRevision::new();
|
||||
|
||||
#[used]
|
||||
#[link_section = ".requests"]
|
||||
pub static KERNEL_REQUEST: KernelFileRequest = KernelFileRequest::new();
|
||||
|
||||
#[no_mangle]
|
||||
pub extern "C" fn _start() -> ! {
|
||||
drivers::serial::init_serial();
|
||||
@@ -46,6 +36,7 @@ pub extern "C" fn _start() -> ! {
|
||||
// TODO: memory stuff
|
||||
mem::pmm::pmm_init();
|
||||
mem::init_allocator();
|
||||
mem::vmm::vmm_init();
|
||||
drivers::acpi::init_acpi();
|
||||
|
||||
parse_kernel_cmdline();
|
||||
@@ -66,10 +57,11 @@ pub fn kmain() -> ! {
|
||||
vfs_open("/firstdir/seconddirbutlonger/yeah.txt")
|
||||
.unwrap()
|
||||
.open(0, UserCred { uid: 0, gid: 0 })
|
||||
.read(0, 0, 0)
|
||||
.read_all(0, 0)
|
||||
);
|
||||
|
||||
drivers::storage::ide::init();
|
||||
|
||||
let limine_dir = vfs_open("/mnt/boot/limine").unwrap();
|
||||
|
||||
crate::println!(
|
||||
@@ -78,7 +70,7 @@ pub fn kmain() -> ! {
|
||||
.lookup("limine.conf")
|
||||
.unwrap()
|
||||
.open(0, UserCred { uid: 0, gid: 0 })
|
||||
.read(0, 0, 0)
|
||||
.read_all(0, 0)
|
||||
);
|
||||
|
||||
let root_dir = vfs_open("/").unwrap();
|
||||
@@ -95,7 +87,7 @@ pub fn kmain() -> ! {
|
||||
.lookup("limine.conf")
|
||||
.unwrap()
|
||||
.open(0, UserCred { uid: 0, gid: 0 })
|
||||
.read(0, 10, 0)
|
||||
.read_all(10, 0)
|
||||
);
|
||||
|
||||
let _ = drivers::fs::vfs::del_vfs("/mnt");
|
||||
@@ -108,7 +100,7 @@ pub fn kmain() -> ! {
|
||||
.lookup("limine.conf")
|
||||
.unwrap()
|
||||
.open(0, UserCred { uid: 0, gid: 0 })
|
||||
.read(0, 0, 0)
|
||||
.read_all(0, 0)
|
||||
);
|
||||
|
||||
// let file = vfs_open("/example.txt").unwrap();
|
||||
@@ -140,16 +132,18 @@ pub fn kmain() -> ! {
|
||||
fn draw_gradient() {
|
||||
let fb = drivers::video::get_framebuffer().unwrap();
|
||||
let length = (fb.height * fb.width) * (fb.bpp / 8);
|
||||
let pages = length / crate::mem::pmm::PAGE_SIZE;
|
||||
let pages = length / crate::mem::PAGE_SIZE;
|
||||
|
||||
let buffer_ptr =
|
||||
(crate::mem::PHYSICAL_MEMORY_MANAGER.alloc(pages) as usize + *HHDM_OFFSET) as *mut u8;
|
||||
let hhdm_offset = get_hhdm_offset();
|
||||
|
||||
let buffer_ptr = crate::mem::pmm::pmm_alloc(pages).to_higher_half();
|
||||
|
||||
if buffer_ptr.is_null() {
|
||||
panic!("Failed to allocate screen buffer")
|
||||
}
|
||||
|
||||
let buffer = unsafe { core::slice::from_raw_parts_mut(buffer_ptr.cast::<u32>(), length) };
|
||||
let buffer =
|
||||
unsafe { core::slice::from_raw_parts_mut(buffer_ptr.cast::<u32>().as_raw_ptr(), length) };
|
||||
|
||||
for y in 0..fb.height {
|
||||
for x in 0..fb.width {
|
||||
@@ -164,8 +158,7 @@ fn draw_gradient() {
|
||||
|
||||
fb.blit_screen(buffer, None);
|
||||
|
||||
crate::mem::PHYSICAL_MEMORY_MANAGER
|
||||
.dealloc((buffer_ptr as usize - *HHDM_OFFSET) as *mut u8, pages);
|
||||
crate::mem::pmm::pmm_dealloc(unsafe { buffer_ptr.to_lower_half() }, pages);
|
||||
}
|
||||
|
||||
fn print_boot_info() {
|
||||
@@ -175,10 +168,7 @@ fn print_boot_info() {
|
||||
crate::println!("║╚╗║╬╚╗║╬║║╬║║║║║═╣║═╣║║║║║║║║║╠══║");
|
||||
crate::println!("╚═╝╚══╝║╔╝║╔╝╚═╝╚═╝╚═╝╚╝╚╩═╝╚═╝╚══╝");
|
||||
crate::println!("───────╚╝─╚╝ ©juls0730 {BUILD_ID}");
|
||||
crate::println!(
|
||||
"{} of memory available",
|
||||
PHYSICAL_MEMORY_MANAGER.total_memory().label_bytes()
|
||||
);
|
||||
crate::println!("{} of memory available", total_memory().label_bytes());
|
||||
crate::println!(
|
||||
"The kernel was built in {} mode",
|
||||
if cfg!(debug_assertions) {
|
||||
@@ -187,13 +177,16 @@ fn print_boot_info() {
|
||||
"release"
|
||||
}
|
||||
);
|
||||
if unsafe { __cpuid(0x80000000).eax } >= 0x80000004 {
|
||||
let processor_brand = get_processor_brand();
|
||||
if let Some(processor_brand) = get_processor_brand() {
|
||||
crate::println!("Detected CPU: {processor_brand}");
|
||||
}
|
||||
}
|
||||
|
||||
fn get_processor_brand() -> alloc::string::String {
|
||||
fn get_processor_brand() -> Option<alloc::string::String> {
|
||||
if unsafe { __cpuid(0x80000000).eax } >= 0x80000004 {
|
||||
return None;
|
||||
}
|
||||
|
||||
let mut brand_buf = [0u8; 48];
|
||||
|
||||
let mut offset = 0;
|
||||
@@ -215,7 +208,7 @@ fn get_processor_brand() -> alloc::string::String {
|
||||
brand.push(char as char);
|
||||
}
|
||||
|
||||
brand
|
||||
return Some(brand);
|
||||
}
|
||||
|
||||
#[macro_export]
|
||||
@@ -290,7 +283,7 @@ fn parse_kernel_cmdline() {
|
||||
log_level: crate::LOG_LEVEL,
|
||||
};
|
||||
|
||||
let kernel_file_response = KERNEL_REQUEST.get_response();
|
||||
let kernel_file_response = get_kernel_file();
|
||||
if kernel_file_response.is_none() {
|
||||
KERNEL_FEATURES.set(kernel_features);
|
||||
return;
|
||||
|
||||
@@ -3,9 +3,9 @@ use core::{
|
||||
ptr::NonNull,
|
||||
};
|
||||
|
||||
use crate::{libs::sync::Mutex, mem::pmm::PAGE_SIZE};
|
||||
use crate::libs::sync::Mutex;
|
||||
|
||||
use super::{align_up, HHDM_OFFSET};
|
||||
use super::{align_up, pmm::pmm_alloc, VirtualPtr, PAGE_SIZE};
|
||||
|
||||
#[derive(Debug)]
|
||||
struct MemNode {
|
||||
@@ -46,26 +46,21 @@ impl LinkedListAllocator {
|
||||
|
||||
pub fn init(&mut self, pages: usize) {
|
||||
unsafe {
|
||||
self.add_free_region(
|
||||
super::PHYSICAL_MEMORY_MANAGER
|
||||
.alloc(pages)
|
||||
.add(*HHDM_OFFSET),
|
||||
PAGE_SIZE * pages,
|
||||
);
|
||||
self.add_free_region(pmm_alloc(1).to_higher_half(), PAGE_SIZE * pages);
|
||||
}
|
||||
}
|
||||
|
||||
unsafe fn add_free_region(&mut self, addr: *mut u8, size: usize) {
|
||||
unsafe fn add_free_region(&mut self, ptr: VirtualPtr<u8>, size: usize) {
|
||||
assert_eq!(
|
||||
align_up(addr as usize, core::mem::align_of::<MemNode>()),
|
||||
addr as usize
|
||||
align_up(ptr.addr(), core::mem::align_of::<MemNode>()),
|
||||
ptr.addr()
|
||||
);
|
||||
assert!(size >= core::mem::size_of::<MemNode>());
|
||||
|
||||
let mut target_node = &mut self.head;
|
||||
|
||||
while let Some(mut next_node) = target_node.next {
|
||||
if next_node.as_ref().addr() > addr as usize {
|
||||
if next_node.as_ref().addr() > ptr.addr() {
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -75,8 +70,8 @@ impl LinkedListAllocator {
|
||||
let mut node = MemNode::new(size);
|
||||
node.next = target_node.next.take();
|
||||
|
||||
addr.cast::<MemNode>().write(node);
|
||||
target_node.next = Some(NonNull::new_unchecked(addr.cast::<MemNode>()));
|
||||
ptr.cast::<MemNode>().write(node);
|
||||
target_node.next = Some(NonNull::new_unchecked(ptr.cast::<MemNode>().as_raw_ptr()));
|
||||
}
|
||||
|
||||
unsafe fn coalesce_memory(&mut self) {
|
||||
@@ -96,13 +91,13 @@ impl LinkedListAllocator {
|
||||
}
|
||||
}
|
||||
|
||||
fn alloc_from_node(node: &MemNode, layout: Layout) -> *mut u8 {
|
||||
fn alloc_from_node(node: &MemNode, layout: Layout) -> VirtualPtr<u8> {
|
||||
let start = align_up(node.addr(), layout.align());
|
||||
let end = start + layout.size();
|
||||
|
||||
if end > node.end_addr() {
|
||||
// aligned address goes outside the bounds of the node
|
||||
return core::ptr::null_mut();
|
||||
return VirtualPtr::null_mut();
|
||||
}
|
||||
|
||||
let extra = node.end_addr() - end;
|
||||
@@ -110,10 +105,10 @@ impl LinkedListAllocator {
|
||||
// Node size minus allocation size is less than the minimum size needed for a node,
|
||||
// thus, if we let the allocation to happen in this node, we lose track of the extra memory
|
||||
// lost by this allocation
|
||||
return core::ptr::null_mut();
|
||||
return VirtualPtr::null_mut();
|
||||
}
|
||||
|
||||
return start as *mut u8;
|
||||
return VirtualPtr::from(start);
|
||||
}
|
||||
|
||||
unsafe fn find_region(&mut self, layout: Layout) -> Option<NonNull<MemNode>> {
|
||||
@@ -172,25 +167,25 @@ impl LinkedListAllocator {
|
||||
return Layout::from_size_align(size, layout.align()).expect("Failed to create layout");
|
||||
}
|
||||
|
||||
unsafe fn inner_alloc(&mut self, layout: Layout) -> *mut u8 {
|
||||
unsafe fn inner_alloc(&mut self, layout: Layout) -> VirtualPtr<u8> {
|
||||
let layout = Self::size_align(layout);
|
||||
|
||||
if let Some(region) = self.find_region(layout) {
|
||||
// immutable pointers are a government conspiracy anyways
|
||||
let end = (region.as_ref().addr() + layout.size()) as *mut u8;
|
||||
let extra = region.as_ref().end_addr() - end as usize;
|
||||
let end = VirtualPtr::from(region.as_ref().addr() + layout.size());
|
||||
let extra = region.as_ref().end_addr() - end.addr();
|
||||
|
||||
if extra > 0 {
|
||||
self.add_free_region(end, extra)
|
||||
}
|
||||
|
||||
return region.as_ref().addr() as *mut u8;
|
||||
return VirtualPtr::from(region.as_ref().addr());
|
||||
}
|
||||
|
||||
return core::ptr::null_mut();
|
||||
return VirtualPtr::null_mut();
|
||||
}
|
||||
|
||||
unsafe fn inner_dealloc(&mut self, ptr: *mut u8, layout: Layout) {
|
||||
unsafe fn inner_dealloc(&mut self, ptr: VirtualPtr<u8>, layout: Layout) {
|
||||
let layout = Self::size_align(layout);
|
||||
|
||||
self.add_free_region(ptr, layout.size());
|
||||
@@ -202,12 +197,12 @@ unsafe impl GlobalAlloc for Mutex<LinkedListAllocator> {
|
||||
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
|
||||
let mut allocator = self.lock();
|
||||
|
||||
allocator.inner_alloc(layout)
|
||||
allocator.inner_alloc(layout).as_raw_ptr()
|
||||
}
|
||||
|
||||
unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
|
||||
let mut allocator = self.lock();
|
||||
|
||||
allocator.inner_dealloc(ptr, layout);
|
||||
allocator.inner_dealloc(VirtualPtr::new(ptr), layout);
|
||||
}
|
||||
}
|
||||
|
||||
288
src/mem/mod.rs
288
src/mem/mod.rs
@@ -1,28 +1,274 @@
|
||||
pub mod allocator;
|
||||
pub mod pmm;
|
||||
pub mod vmm;
|
||||
|
||||
use crate::{
|
||||
libs::{cell::OnceCell, sync::Mutex},
|
||||
// LogLevel,
|
||||
use core::fmt::{write, Debug, Pointer};
|
||||
|
||||
use crate::libs::{limine::get_hhdm_offset, sync::Mutex};
|
||||
|
||||
use self::allocator::LinkedListAllocator;
|
||||
|
||||
/// A PhysicalPtr is a pointer that uses a physical location in memory. These pointers are not readable or mutable as we cannot gurantee we are viewing the correct section of memory or that this memory is mapped to that location at all, resulting in a Page Fault.
|
||||
// TODO: make this use only a usize or something instead of a ptr
|
||||
#[repr(transparent)]
|
||||
#[derive(Clone, Copy)]
|
||||
pub struct PhysicalPtr<T> {
|
||||
inner: *mut T,
|
||||
}
|
||||
|
||||
impl<T> PhysicalPtr<T> {
|
||||
pub const fn new(ptr: *mut T) -> Self {
|
||||
return Self { inner: ptr };
|
||||
}
|
||||
|
||||
pub const fn null_mut() -> Self {
|
||||
return Self {
|
||||
inner: core::ptr::null_mut(),
|
||||
};
|
||||
}
|
||||
|
||||
use self::{allocator::LinkedListAllocator, pmm::PhysicalMemoryManager};
|
||||
pub fn as_raw_ptr(&self) -> *mut T {
|
||||
return self.inner;
|
||||
}
|
||||
|
||||
#[used]
|
||||
#[link_section = ".requests"]
|
||||
static mut MEMMAP_REQUEST: limine::request::MemoryMapRequest =
|
||||
limine::request::MemoryMapRequest::new();
|
||||
pub fn addr(&self) -> usize {
|
||||
return self.inner as usize;
|
||||
}
|
||||
|
||||
#[used]
|
||||
#[link_section = ".requests"]
|
||||
static HHDM_REQUEST: limine::request::HhdmRequest = limine::request::HhdmRequest::new();
|
||||
pub static HHDM_OFFSET: OnceCell<usize> = OnceCell::new();
|
||||
pub fn is_null(&self) -> bool {
|
||||
return self.inner.is_null();
|
||||
}
|
||||
|
||||
pub static PHYSICAL_MEMORY_MANAGER: OnceCell<PhysicalMemoryManager> = OnceCell::new();
|
||||
/// # Safety:
|
||||
/// see core::ptr::mut_ptr.add()
|
||||
pub unsafe fn add(&self, count: usize) -> Self {
|
||||
return Self::new(self.inner.add(count));
|
||||
}
|
||||
|
||||
pub fn align_up(addr: usize, align: usize) -> usize {
|
||||
let offset = (addr as *const u8).align_offset(align);
|
||||
addr + offset
|
||||
/// # Safety:
|
||||
/// see core::ptr::mut_ptr.sub()
|
||||
pub unsafe fn sub(&self, count: usize) -> Self {
|
||||
return Self::new(self.inner.sub(count));
|
||||
}
|
||||
|
||||
/// # Safety:
|
||||
/// see core::ptr::mut_ptr.offset()
|
||||
pub unsafe fn offset(&self, count: isize) -> Self {
|
||||
return Self::new(self.inner.offset(count));
|
||||
}
|
||||
|
||||
pub const fn cast<U>(&self) -> PhysicalPtr<U> {
|
||||
return PhysicalPtr::new(self.inner.cast::<U>());
|
||||
}
|
||||
|
||||
// torn if this should be unsafe or not
|
||||
pub fn to_higher_half(&self) -> VirtualPtr<T> {
|
||||
return unsafe {
|
||||
VirtualPtr::new(self.cast::<u8>().add(get_hhdm_offset()).inner.cast::<T>())
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> From<usize> for PhysicalPtr<T> {
|
||||
fn from(addr: usize) -> Self {
|
||||
PhysicalPtr {
|
||||
inner: addr as *mut T,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> From<*mut T> for PhysicalPtr<T> {
|
||||
fn from(ptr: *mut T) -> Self {
|
||||
PhysicalPtr { inner: ptr }
|
||||
}
|
||||
}
|
||||
|
||||
// constant pointers are a lie anyways tbh
|
||||
impl<T> From<*const T> for PhysicalPtr<T> {
|
||||
fn from(ptr: *const T) -> Self {
|
||||
PhysicalPtr {
|
||||
inner: ptr as *mut T,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Debug for PhysicalPtr<T> {
|
||||
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
|
||||
write(f, format_args!("PhysicalPtr({:p})", self.inner))
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Pointer for PhysicalPtr<T> {
|
||||
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
|
||||
write(f, format_args!("PhysicalPtr({:p})", self.inner))
|
||||
}
|
||||
}
|
||||
|
||||
/// A Virtual Pointer is a pointer that uses a virtual address. These pointers are readable and mutable as they map to a physical address through paging.
|
||||
#[repr(transparent)]
|
||||
#[derive(Clone, Copy)]
|
||||
pub struct VirtualPtr<T> {
|
||||
inner: *mut T,
|
||||
}
|
||||
|
||||
impl<T> VirtualPtr<T> {
|
||||
pub const fn new(ptr: *mut T) -> Self {
|
||||
return Self { inner: ptr };
|
||||
}
|
||||
|
||||
pub const fn null_mut() -> Self {
|
||||
return Self {
|
||||
inner: core::ptr::null_mut(),
|
||||
};
|
||||
}
|
||||
|
||||
pub fn addr(&self) -> usize {
|
||||
return self.inner as usize;
|
||||
}
|
||||
|
||||
pub fn as_raw_ptr(&self) -> *mut T {
|
||||
return self.inner;
|
||||
}
|
||||
|
||||
pub fn is_null(&self) -> bool {
|
||||
return self.inner.is_null();
|
||||
}
|
||||
|
||||
/// # Safety:
|
||||
/// see core::ptr::mut_ptr.add()
|
||||
pub unsafe fn add(&self, count: usize) -> Self {
|
||||
return Self::new(self.inner.add(count));
|
||||
}
|
||||
|
||||
/// # Safety:
|
||||
/// see core::ptr::mut_ptr.sub()
|
||||
pub unsafe fn sub(&self, count: usize) -> Self {
|
||||
return Self::new(self.inner.sub(count));
|
||||
}
|
||||
|
||||
/// # Safety:
|
||||
/// see core::ptr::mut_ptr.offset()
|
||||
pub unsafe fn offset(&self, count: isize) -> Self {
|
||||
return Self::new(self.inner.offset(count));
|
||||
}
|
||||
|
||||
pub const fn cast<U>(&self) -> VirtualPtr<U> {
|
||||
return VirtualPtr::new(self.inner.cast::<U>());
|
||||
}
|
||||
|
||||
/// # Safety:
|
||||
/// see core::ptr::mut_ptr.write_bytes()
|
||||
pub unsafe fn write_bytes(&self, val: u8, count: usize) {
|
||||
self.inner.write_bytes(val, count);
|
||||
}
|
||||
|
||||
/// # Safety:
|
||||
/// Ensure the pointer is in the higher half
|
||||
pub unsafe fn to_lower_half(&self) -> PhysicalPtr<T> {
|
||||
return unsafe {
|
||||
// be very careful with the math here
|
||||
PhysicalPtr::new(self.cast::<u8>().sub(get_hhdm_offset()).inner.cast::<T>())
|
||||
};
|
||||
}
|
||||
|
||||
/// # Safety:
|
||||
/// Ensure that the pointer is a valid virtual pointer and follows the same rules as ptr::read
|
||||
pub const unsafe fn read(&self) -> T {
|
||||
return self.inner.read();
|
||||
}
|
||||
|
||||
/// # Safety:
|
||||
/// Ensure that the pointer is a valid virtual pointer and follows the same rules as ptr::read_unaligned
|
||||
pub const unsafe fn read_unaligned(&self) -> T {
|
||||
return self.inner.read_unaligned();
|
||||
}
|
||||
|
||||
/// # Safety:
|
||||
/// Ensure that the pointer is a valid virtual pointer and follows the same rules as ptr::read_unaligned
|
||||
pub unsafe fn read_volatile(&self) -> T {
|
||||
return self.inner.read_volatile();
|
||||
}
|
||||
|
||||
/// # Safety:
|
||||
/// Ensure that the pointer is a valid virtual pointer and follows the same rules as ptr::write
|
||||
pub unsafe fn write(&self, val: T) {
|
||||
self.inner.write(val);
|
||||
}
|
||||
|
||||
/// # Safety:
|
||||
/// Ensure that the pointer is a valid virtual pointer and follows the same rules as ptr::write_unaligned
|
||||
pub unsafe fn write_unaligned(&self, val: T) {
|
||||
self.inner.write_unaligned(val);
|
||||
}
|
||||
|
||||
/// # Safety:
|
||||
/// Ensure that the pointer is a valid virtual pointer and follows the same rules as ptr::write_volatile
|
||||
pub unsafe fn write_volatile(&self, val: T) {
|
||||
self.inner.write_volatile(val);
|
||||
}
|
||||
|
||||
pub unsafe fn copy_to_nonoverlapping(&self, dest: VirtualPtr<T>, count: usize) {
|
||||
self.inner.copy_to_nonoverlapping(dest.as_raw_ptr(), count)
|
||||
}
|
||||
|
||||
pub unsafe fn copy_from_nonoverlapping(&self, src: VirtualPtr<T>, count: usize) {
|
||||
self.inner.copy_from_nonoverlapping(src.as_raw_ptr(), count)
|
||||
}
|
||||
|
||||
pub unsafe fn as_ref(&self) -> Option<&T> {
|
||||
return self.inner.as_ref();
|
||||
}
|
||||
|
||||
pub unsafe fn as_mut(&self) -> Option<&mut T> {
|
||||
return self.inner.as_mut();
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> From<usize> for VirtualPtr<T> {
|
||||
fn from(addr: usize) -> Self {
|
||||
VirtualPtr {
|
||||
inner: addr as *mut T,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> From<*mut T> for VirtualPtr<T> {
|
||||
fn from(ptr: *mut T) -> Self {
|
||||
VirtualPtr { inner: ptr }
|
||||
}
|
||||
}
|
||||
|
||||
// constant pointers are a lie anyways tbh
|
||||
impl<T> From<*const T> for VirtualPtr<T> {
|
||||
fn from(ptr: *const T) -> Self {
|
||||
VirtualPtr {
|
||||
inner: ptr as *mut T,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Debug for VirtualPtr<T> {
|
||||
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
|
||||
write(f, format_args!("VirtualPtr({:p})", self.inner))
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Pointer for VirtualPtr<T> {
|
||||
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
|
||||
write(f, format_args!("VirtualPtr({:p})", self.inner))
|
||||
}
|
||||
}
|
||||
|
||||
pub const PAGE_SIZE: usize = 4096;
|
||||
|
||||
pub fn align_up(val: usize, align: usize) -> usize {
|
||||
assert!(align.is_power_of_two());
|
||||
(val + align - 1) & !(align - 1)
|
||||
}
|
||||
|
||||
pub fn align_down(val: usize, align: usize) -> usize {
|
||||
assert!(align.is_power_of_two());
|
||||
val & !(align - 1)
|
||||
}
|
||||
|
||||
const HEAP_PAGES: usize = 1024; // 4 MiB heap
|
||||
@@ -30,7 +276,7 @@ const HEAP_PAGES: usize = 1024; // 4 MiB heap
|
||||
#[global_allocator]
|
||||
pub static ALLOCATOR: Mutex<LinkedListAllocator> = Mutex::new(LinkedListAllocator::new());
|
||||
|
||||
// TODO: Limine-rs 0.2.0 does NOT have debug implemented for a lot of it's types, so until that is fixed, either go without Type, or hack limine-rs locally
|
||||
// TODO: Limine-rs 0.2.0 does NOT have debug implemented for a lot of it's types, so until that is fixed, either go without Type, or hack limine-rs locally (tracking https://github.com/limine-bootloader/limine-rs/pull/30)
|
||||
// pub fn log_memory_map() {
|
||||
// let memmap_request = unsafe { MEMMAP_REQUEST.get_response_mut() };
|
||||
// if memmap_request.is_none() {
|
||||
@@ -133,13 +379,13 @@ impl LabelBytes for usize {
|
||||
|
||||
/// # Safety
|
||||
/// This will produce undefined behavior if dst is not valid for count writes
|
||||
pub unsafe fn memset32(dst: *mut u32, val: u32, count: usize) {
|
||||
pub unsafe fn memset32(dst: VirtualPtr<u32>, val: u32, count: usize) {
|
||||
#[cfg(not(any(target_arch = "x86", target_arch = "x86_64")))]
|
||||
{
|
||||
let mut buf = dst;
|
||||
unsafe {
|
||||
while buf < dst.add(count) {
|
||||
core::ptr::write_volatile(buf, val);
|
||||
while buf.addr() < dst.add(count).addr() {
|
||||
buf.write_volatile(val);
|
||||
buf = buf.offset(1);
|
||||
}
|
||||
}
|
||||
@@ -148,6 +394,8 @@ pub unsafe fn memset32(dst: *mut u32, val: u32, count: usize) {
|
||||
|
||||
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
|
||||
{
|
||||
let dst = dst.as_raw_ptr();
|
||||
|
||||
core::arch::asm!(
|
||||
"rep stosd",
|
||||
inout("ecx") count => _,
|
||||
|
||||
313
src/mem/pmm.rs
313
src/mem/pmm.rs
@@ -2,165 +2,33 @@
|
||||
|
||||
use core::sync::atomic::{AtomicPtr, AtomicUsize, Ordering};
|
||||
|
||||
use super::{HHDM_OFFSET, HHDM_REQUEST, MEMMAP_REQUEST};
|
||||
use crate::{
|
||||
libs::limine::{get_hhdm_offset, get_memmap},
|
||||
LogLevel,
|
||||
};
|
||||
|
||||
pub const PAGE_SIZE: usize = 4096;
|
||||
use super::{PhysicalPtr, VirtualPtr, PAGE_SIZE};
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct PhysicalMemoryManager {
|
||||
bitmap: AtomicPtr<u8>,
|
||||
highest_page_idx: AtomicUsize,
|
||||
last_used_page_idx: AtomicUsize,
|
||||
usable_pages: AtomicUsize,
|
||||
used_pages: AtomicUsize,
|
||||
struct PhysicalMemoryManager {
|
||||
pub bitmap: AtomicPtr<u8>,
|
||||
pub highest_page_idx: AtomicUsize,
|
||||
pub last_used_page_idx: AtomicUsize,
|
||||
pub usable_pages: AtomicUsize,
|
||||
pub used_pages: AtomicUsize,
|
||||
}
|
||||
|
||||
pub fn pmm_init() {
|
||||
super::PHYSICAL_MEMORY_MANAGER.set(PhysicalMemoryManager::new());
|
||||
}
|
||||
static mut PHYSICAL_MEMORY_MANAGER: PhysicalMemoryManager = PhysicalMemoryManager::new();
|
||||
|
||||
impl PhysicalMemoryManager {
|
||||
pub fn new() -> Self {
|
||||
let pmm = Self {
|
||||
const fn new() -> Self {
|
||||
return Self {
|
||||
bitmap: AtomicPtr::new(core::ptr::null_mut()),
|
||||
highest_page_idx: AtomicUsize::new(0),
|
||||
last_used_page_idx: AtomicUsize::new(0),
|
||||
usable_pages: AtomicUsize::new(0),
|
||||
used_pages: AtomicUsize::new(0),
|
||||
};
|
||||
|
||||
let hhdm_req = HHDM_REQUEST
|
||||
.get_response()
|
||||
.expect("Failed to get Higher Half Direct Map!");
|
||||
|
||||
let hhdm_offset = hhdm_req.offset() as usize;
|
||||
|
||||
HHDM_OFFSET.set(hhdm_offset);
|
||||
|
||||
let memmap = unsafe {
|
||||
MEMMAP_REQUEST
|
||||
.get_response_mut()
|
||||
.expect("Failed to get Memory map!")
|
||||
.entries_mut()
|
||||
};
|
||||
|
||||
let mut highest_addr: usize = 0;
|
||||
|
||||
for entry in memmap.iter() {
|
||||
if entry.entry_type == limine::memory_map::EntryType::USABLE {
|
||||
pmm.usable_pages
|
||||
.fetch_add(entry.length as usize / PAGE_SIZE, Ordering::SeqCst);
|
||||
if highest_addr < (entry.base + entry.length) as usize {
|
||||
highest_addr = (entry.base + entry.length) as usize;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pmm.highest_page_idx
|
||||
.store(highest_addr / PAGE_SIZE, Ordering::SeqCst);
|
||||
let bitmap_size =
|
||||
((pmm.highest_page_idx.load(Ordering::SeqCst) / 8) + PAGE_SIZE - 1) & !(PAGE_SIZE - 1);
|
||||
|
||||
for entry in memmap.iter_mut() {
|
||||
if entry.entry_type != limine::memory_map::EntryType::USABLE {
|
||||
continue;
|
||||
}
|
||||
|
||||
if entry.length as usize >= bitmap_size {
|
||||
let ptr = (entry.base as usize + hhdm_offset) as *mut u8;
|
||||
pmm.bitmap.store(ptr, Ordering::SeqCst);
|
||||
|
||||
unsafe {
|
||||
// Set the bit map to non-free
|
||||
core::ptr::write_bytes(ptr, 0xFF, bitmap_size);
|
||||
};
|
||||
|
||||
entry.length -= bitmap_size as u64;
|
||||
entry.base += bitmap_size as u64;
|
||||
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
for entry in memmap.iter() {
|
||||
if entry.entry_type != limine::memory_map::EntryType::USABLE {
|
||||
continue;
|
||||
}
|
||||
|
||||
for i in 0..(entry.length as usize / PAGE_SIZE) {
|
||||
pmm.bitmap_reset((entry.base as usize + (i * PAGE_SIZE)) / PAGE_SIZE);
|
||||
}
|
||||
}
|
||||
|
||||
return pmm;
|
||||
}
|
||||
|
||||
fn inner_alloc(&self, pages: usize, limit: usize) -> *mut u8 {
|
||||
let mut p: usize = 0;
|
||||
|
||||
while self.last_used_page_idx.load(Ordering::SeqCst) < limit {
|
||||
if self.bitmap_test(self.last_used_page_idx.fetch_add(1, Ordering::SeqCst)) {
|
||||
p = 0;
|
||||
continue;
|
||||
}
|
||||
|
||||
p += 1;
|
||||
if p == pages {
|
||||
let page = self.last_used_page_idx.load(Ordering::SeqCst) - pages;
|
||||
for i in page..self.last_used_page_idx.load(Ordering::SeqCst) {
|
||||
self.bitmap_set(i);
|
||||
}
|
||||
return (page * PAGE_SIZE) as *mut u8;
|
||||
}
|
||||
}
|
||||
|
||||
// We have hit the search limit, but did not find any suitable memory regions starting from last_used_page_idx
|
||||
return core::ptr::null_mut();
|
||||
}
|
||||
|
||||
pub fn alloc_nozero(&self, pages: usize) -> *mut u8 {
|
||||
// Attempt to allocate n pages with a search limit of the amount of usable pages
|
||||
let mut page_addr = self.inner_alloc(pages, self.highest_page_idx.load(Ordering::SeqCst));
|
||||
|
||||
if page_addr.is_null() {
|
||||
// If page_addr is null, then attempt to allocate n pages, but starting from
|
||||
// The beginning of the bitmap and with a limit of the old last_used_page_idx
|
||||
let last = self.last_used_page_idx.swap(0, Ordering::SeqCst);
|
||||
page_addr = self.inner_alloc(pages, last);
|
||||
|
||||
// If page_addr is still null, we have ran out of usable memory
|
||||
if page_addr.is_null() {
|
||||
return core::ptr::null_mut();
|
||||
}
|
||||
}
|
||||
|
||||
self.used_pages.fetch_add(pages, Ordering::SeqCst);
|
||||
|
||||
return page_addr;
|
||||
}
|
||||
|
||||
pub fn alloc(&self, pages: usize) -> *mut u8 {
|
||||
let ret = self.alloc_nozero(pages);
|
||||
|
||||
if ret.is_null() {
|
||||
return ret;
|
||||
}
|
||||
|
||||
unsafe {
|
||||
core::ptr::write_bytes(ret.add(*HHDM_OFFSET), 0x00, pages * PAGE_SIZE);
|
||||
};
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
pub fn dealloc(&self, addr: *mut u8, pages: usize) {
|
||||
let page = addr as usize / PAGE_SIZE;
|
||||
|
||||
for i in page..(page + pages) {
|
||||
self.bitmap_reset(i);
|
||||
}
|
||||
|
||||
self.used_pages.fetch_sub(pages, Ordering::SeqCst);
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
@@ -189,17 +57,154 @@ impl PhysicalMemoryManager {
|
||||
(*self.bitmap.load(Ordering::SeqCst).add(byte_index)) &= !(1 << bit_index);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn total_memory(&self) -> usize {
|
||||
return self.usable_pages.load(Ordering::SeqCst) * 4096;
|
||||
}
|
||||
|
||||
pub fn usable_memory(&self) -> usize {
|
||||
return (self.usable_pages.load(Ordering::SeqCst) * 4096)
|
||||
- (self.used_pages.load(Ordering::SeqCst) * 4096);
|
||||
pub fn pmm_init() {
|
||||
// we borrow the pointer because it is discouraged to make mutable reference to a mutable static and in Rust 2024 that will be a hard error
|
||||
let pmm = unsafe { &mut *core::ptr::addr_of_mut!(PHYSICAL_MEMORY_MANAGER) };
|
||||
|
||||
let memmap = get_memmap();
|
||||
|
||||
let mut highest_addr: usize = 0;
|
||||
|
||||
for entry in memmap.iter() {
|
||||
if entry.entry_type == limine::memory_map::EntryType::USABLE {
|
||||
pmm.usable_pages
|
||||
.fetch_add(entry.length as usize / PAGE_SIZE, Ordering::SeqCst);
|
||||
if highest_addr < (entry.base + entry.length) as usize {
|
||||
highest_addr = (entry.base + entry.length) as usize;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn used_memory(&self) -> usize {
|
||||
return self.used_pages.load(Ordering::SeqCst) * 4096;
|
||||
pmm.highest_page_idx
|
||||
.store(highest_addr / PAGE_SIZE, Ordering::SeqCst);
|
||||
let bitmap_size =
|
||||
((pmm.highest_page_idx.load(Ordering::SeqCst) / 8) + PAGE_SIZE - 1) & !(PAGE_SIZE - 1);
|
||||
|
||||
for entry in memmap.iter_mut() {
|
||||
if entry.entry_type != limine::memory_map::EntryType::USABLE {
|
||||
continue;
|
||||
}
|
||||
|
||||
if entry.length as usize >= bitmap_size {
|
||||
let ptr = VirtualPtr::from(entry.base as usize + get_hhdm_offset());
|
||||
pmm.bitmap.store(ptr.as_raw_ptr(), Ordering::SeqCst);
|
||||
|
||||
unsafe {
|
||||
// Set the bit map to non-free
|
||||
ptr.write_bytes(0xFF, bitmap_size);
|
||||
};
|
||||
|
||||
entry.length -= bitmap_size as u64;
|
||||
entry.base += bitmap_size as u64;
|
||||
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
for entry in memmap.iter() {
|
||||
if entry.entry_type != limine::memory_map::EntryType::USABLE {
|
||||
continue;
|
||||
}
|
||||
|
||||
for i in 0..(entry.length as usize / PAGE_SIZE) {
|
||||
pmm.bitmap_reset((entry.base as usize + (i * PAGE_SIZE)) / PAGE_SIZE);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn get_pmm<'a>() -> &'a mut PhysicalMemoryManager {
|
||||
return unsafe { &mut *core::ptr::addr_of_mut!(PHYSICAL_MEMORY_MANAGER) };
|
||||
}
|
||||
|
||||
fn pmm_inner_alloc(pages: usize, limit: usize) -> PhysicalPtr<u8> {
|
||||
let pmm = get_pmm();
|
||||
let mut p: usize = 0;
|
||||
|
||||
while pmm.last_used_page_idx.load(Ordering::SeqCst) < limit {
|
||||
if pmm.bitmap_test(pmm.last_used_page_idx.fetch_add(1, Ordering::SeqCst)) {
|
||||
p = 0;
|
||||
continue;
|
||||
}
|
||||
|
||||
p += 1;
|
||||
if p == pages {
|
||||
let page = pmm.last_used_page_idx.load(Ordering::SeqCst) - pages;
|
||||
for i in page..pmm.last_used_page_idx.load(Ordering::SeqCst) {
|
||||
pmm.bitmap_set(i);
|
||||
}
|
||||
return PhysicalPtr::from(page * PAGE_SIZE);
|
||||
}
|
||||
}
|
||||
|
||||
// We have hit the search limit, but did not find any suitable memory regions starting from last_used_page_idx
|
||||
crate::log!(LogLevel::Fatal, "Out Of Memory!");
|
||||
return PhysicalPtr::null_mut();
|
||||
}
|
||||
|
||||
pub fn pmm_alloc_nozero(pages: usize) -> PhysicalPtr<u8> {
|
||||
let pmm = get_pmm();
|
||||
|
||||
// Attempt to allocate n pages with a search limit of the amount of usable pages
|
||||
let mut page_addr = pmm_inner_alloc(pages, pmm.highest_page_idx.load(Ordering::SeqCst));
|
||||
|
||||
if page_addr.is_null() {
|
||||
// If page_addr is null, then attempt to allocate n pages, but starting from
|
||||
// The beginning of the bitmap and with a limit of the old last_used_page_idx
|
||||
let last = pmm.last_used_page_idx.swap(0, Ordering::SeqCst);
|
||||
page_addr = pmm_inner_alloc(pages, last);
|
||||
|
||||
// If page_addr is still null, we have ran out of usable memory
|
||||
if page_addr.is_null() {
|
||||
return PhysicalPtr::null_mut();
|
||||
}
|
||||
}
|
||||
|
||||
pmm.used_pages.fetch_add(pages, Ordering::SeqCst);
|
||||
|
||||
return page_addr;
|
||||
}
|
||||
|
||||
pub fn pmm_alloc(pages: usize) -> PhysicalPtr<u8> {
|
||||
let ret = pmm_alloc_nozero(pages);
|
||||
|
||||
if ret.is_null() {
|
||||
return ret;
|
||||
}
|
||||
|
||||
unsafe {
|
||||
ret.to_higher_half().write_bytes(0x00, pages * PAGE_SIZE);
|
||||
};
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
pub fn pmm_dealloc(ptr: PhysicalPtr<u8>, pages: usize) {
|
||||
let pmm = get_pmm();
|
||||
let page = ptr.addr() as usize / PAGE_SIZE;
|
||||
|
||||
for i in page..(page + pages) {
|
||||
pmm.bitmap_reset(i);
|
||||
}
|
||||
|
||||
pmm.used_pages.fetch_sub(pages, Ordering::SeqCst);
|
||||
}
|
||||
|
||||
pub fn total_memory() -> usize {
|
||||
let pmm = get_pmm();
|
||||
return pmm.usable_pages.load(Ordering::SeqCst) * 4096;
|
||||
}
|
||||
|
||||
pub fn usable_memory() -> usize {
|
||||
let pmm = get_pmm();
|
||||
|
||||
return (pmm.usable_pages.load(Ordering::SeqCst) * 4096)
|
||||
- (pmm.used_pages.load(Ordering::SeqCst) * 4096);
|
||||
}
|
||||
|
||||
pub fn used_memory() -> usize {
|
||||
let pmm = get_pmm();
|
||||
|
||||
return pmm.used_pages.load(Ordering::SeqCst) * 4096;
|
||||
}
|
||||
|
||||
488
src/mem/vmm.rs
Normal file
488
src/mem/vmm.rs
Normal file
@@ -0,0 +1,488 @@
|
||||
use core::arch::x86_64::__cpuid;
|
||||
|
||||
use limine::memory_map::EntryType;
|
||||
|
||||
use crate::{
|
||||
hcf,
|
||||
libs::{
|
||||
cell::OnceCell,
|
||||
limine::{get_hhdm_offset, get_kernel_address, get_memmap, get_paging_level},
|
||||
},
|
||||
};
|
||||
|
||||
use super::{align_down, align_up, pmm::pmm_alloc, PhysicalPtr};
|
||||
|
||||
const PT_FLAG_VALID: u64 = 1 << 0;
|
||||
const PT_FLAG_WRITE: u64 = 1 << 1;
|
||||
const PT_FLAG_USER: u64 = 1 << 2;
|
||||
const PT_FLAG_LARGE: u64 = 1 << 7;
|
||||
const PT_FLAG_NX: u64 = 1 << 63;
|
||||
const PT_PADDR_MASK: u64 = 0x000F_FFFF_FFFF_FFFF;
|
||||
|
||||
const PT_TABLE_FLAGS: u64 = PT_FLAG_VALID | PT_FLAG_WRITE | PT_FLAG_USER;
|
||||
|
||||
// I know it's literally 8 bytes, but... fight me
|
||||
#[derive(Clone)]
|
||||
#[repr(transparent)]
|
||||
pub struct PageTableEntry(u64);
|
||||
|
||||
impl PageTableEntry {
|
||||
pub fn new(addr: u64, flags: u64) -> Self {
|
||||
Self(addr | flags)
|
||||
}
|
||||
|
||||
pub fn addr(&self) -> u64 {
|
||||
self.0 & PT_PADDR_MASK
|
||||
}
|
||||
|
||||
// TODO: probably a more elegant way to do this
|
||||
pub fn get_field(&self, field: Field) -> u64 {
|
||||
match field {
|
||||
Field::Present => (self.0 >> 0) & 1,
|
||||
Field::ReadWrite => (self.0 >> 1) & 1,
|
||||
Field::UserSupervisor => (self.0 >> 2) & 1,
|
||||
Field::WriteThrough => (self.0 >> 3) & 1,
|
||||
Field::CacheDisable => (self.0 >> 4) & 1,
|
||||
Field::Accessed => (self.0 >> 5) & 1,
|
||||
Field::Avl0 => (self.0 >> 6) & 1,
|
||||
Field::PageSize => (self.0 >> 7) & 1,
|
||||
Field::Avl1 => (self.0 >> 8) & 0xF,
|
||||
Field::Addr => (self.0 >> 12) & 0x000F_FFFF_FFFF_FFFF,
|
||||
Field::Nx => (self.0 >> 63) & 1,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn set_field(&mut self, field: Field, value: u64) {
|
||||
let mask = match field {
|
||||
Field::Present => 1 << 0,
|
||||
Field::ReadWrite => 1 << 1,
|
||||
Field::UserSupervisor => 1 << 2,
|
||||
Field::WriteThrough => 1 << 3,
|
||||
Field::CacheDisable => 1 << 4,
|
||||
Field::Accessed => 1 << 5,
|
||||
Field::Avl0 => 1 << 6,
|
||||
Field::PageSize => 1 << 7,
|
||||
Field::Avl1 => 0xF << 8,
|
||||
Field::Addr => 0x000F_FFFF_FFFF_FFFF << 12,
|
||||
Field::Nx => 1 << 63,
|
||||
};
|
||||
let shift = match field {
|
||||
Field::Present => 0,
|
||||
Field::ReadWrite => 1,
|
||||
Field::UserSupervisor => 2,
|
||||
Field::WriteThrough => 3,
|
||||
Field::CacheDisable => 4,
|
||||
Field::Accessed => 5,
|
||||
Field::Avl0 => 6,
|
||||
Field::PageSize => 7,
|
||||
Field::Avl1 => 8,
|
||||
Field::Addr => 12,
|
||||
Field::Nx => 63,
|
||||
};
|
||||
|
||||
self.0 = (self.0 & !mask) | ((value << shift) & mask);
|
||||
}
|
||||
|
||||
fn is_table(&self) -> bool {
|
||||
(self.0 & (PT_FLAG_VALID | PT_FLAG_LARGE)) == PT_FLAG_VALID
|
||||
}
|
||||
|
||||
fn is_large(&self) -> bool {
|
||||
(self.0 & (PT_FLAG_VALID | PT_FLAG_LARGE)) == (PT_FLAG_VALID | PT_FLAG_LARGE)
|
||||
}
|
||||
|
||||
fn vmm_flags(&self) -> u64 {
|
||||
self.0 & (PT_FLAG_WRITE | PT_FLAG_NX)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
pub enum Field {
|
||||
Present,
|
||||
ReadWrite,
|
||||
UserSupervisor,
|
||||
WriteThrough,
|
||||
CacheDisable,
|
||||
Accessed,
|
||||
Avl0,
|
||||
PageSize,
|
||||
Avl1,
|
||||
Addr,
|
||||
Nx,
|
||||
}
|
||||
|
||||
#[repr(align(0x1000))]
|
||||
#[repr(C)]
|
||||
pub struct PageDirectory {
|
||||
entries: [PageTableEntry; 512],
|
||||
}
|
||||
|
||||
impl PageDirectory {
|
||||
pub fn get_mut_ptr(&mut self) -> *mut Self {
|
||||
return core::ptr::addr_of_mut!(*self);
|
||||
}
|
||||
}
|
||||
|
||||
pub static PAGE_SIZES: [u64; 5] = [0x1000, 0x200000, 0x40000000, 0x8000000000, 0x1000000000000];
|
||||
|
||||
pub static mut KENREL_PAGE_DIRECTORY: *mut PageDirectory = core::ptr::null_mut();
|
||||
|
||||
pub fn vmm_init() {
|
||||
let page_directory = unsafe {
|
||||
&mut *(pmm_alloc(1)
|
||||
.to_higher_half()
|
||||
.cast::<PageDirectory>()
|
||||
.as_raw_ptr())
|
||||
};
|
||||
|
||||
unsafe { KENREL_PAGE_DIRECTORY = page_directory as *mut PageDirectory };
|
||||
|
||||
let mut i = 0;
|
||||
for entry in get_memmap() {
|
||||
if entry.entry_type != EntryType::KERNEL_AND_MODULES {
|
||||
continue;
|
||||
}
|
||||
|
||||
let kernel_addr = get_kernel_address();
|
||||
|
||||
let base = kernel_addr.physical_base();
|
||||
let length = entry.length;
|
||||
let top = base + length;
|
||||
|
||||
let aligned_base = align_down(base as usize, 0x40000000);
|
||||
let aligned_top = align_up(top as usize, 0x40000000);
|
||||
let aligned_length = aligned_top - aligned_base;
|
||||
|
||||
while i <= aligned_length {
|
||||
let page = aligned_base + i;
|
||||
|
||||
crate::println!(
|
||||
"Mapping the kernel from {:X} to {:X}",
|
||||
page,
|
||||
kernel_addr.virtual_base()
|
||||
);
|
||||
|
||||
vmm_map(
|
||||
page_directory,
|
||||
page + kernel_addr.virtual_base() as usize,
|
||||
page as usize,
|
||||
0x02,
|
||||
PageSize::Size1GiB,
|
||||
);
|
||||
i += 0x40000000
|
||||
}
|
||||
}
|
||||
|
||||
while i <= 0x100000000 {
|
||||
// vmm_map(page_directory, i, i, 0x03, PageSize::Size4KiB);
|
||||
vmm_map(
|
||||
page_directory,
|
||||
i + get_hhdm_offset(),
|
||||
i,
|
||||
0x02,
|
||||
PageSize::Size1GiB,
|
||||
);
|
||||
|
||||
i += 0x40000000;
|
||||
}
|
||||
|
||||
for entry in get_memmap() {
|
||||
if entry.entry_type == EntryType::RESERVED || entry.entry_type == EntryType::BAD_MEMORY {
|
||||
continue;
|
||||
}
|
||||
|
||||
let mut base = entry.base;
|
||||
let length = entry.length;
|
||||
let top = base + length;
|
||||
|
||||
if base < 0x100000000 {
|
||||
base = 0x100000000;
|
||||
}
|
||||
|
||||
if base >= top {
|
||||
continue;
|
||||
}
|
||||
|
||||
let aligned_base = align_down(base as usize, 0x40000000);
|
||||
let aligned_top = align_up(top as usize, 0x40000000);
|
||||
let aligned_length = aligned_top - aligned_base;
|
||||
|
||||
i = 0;
|
||||
while i < aligned_length {
|
||||
let page = aligned_base + i;
|
||||
|
||||
// vmm_map(
|
||||
// page_directory,
|
||||
// i + entry.base as usize,
|
||||
// i + entry.base as usize,
|
||||
// 0x02,
|
||||
// PageSize::Size4KiB,
|
||||
// );
|
||||
vmm_map(
|
||||
page_directory,
|
||||
page + get_hhdm_offset(),
|
||||
page,
|
||||
0x02,
|
||||
PageSize::Size1GiB,
|
||||
);
|
||||
|
||||
i += 0x40000000;
|
||||
}
|
||||
}
|
||||
|
||||
for entry in get_memmap() {
|
||||
if entry.entry_type != EntryType::FRAMEBUFFER {
|
||||
continue;
|
||||
}
|
||||
|
||||
let base = entry.base;
|
||||
let length = entry.length;
|
||||
let top = base + length;
|
||||
|
||||
let aligned_base = align_down(base as usize, 0x1000);
|
||||
let aligned_top = align_up(top as usize, 0x1000);
|
||||
let aligned_length = aligned_top - aligned_base;
|
||||
|
||||
while i < aligned_length {
|
||||
let page = aligned_base + i;
|
||||
vmm_map(
|
||||
page_directory,
|
||||
page + get_hhdm_offset(),
|
||||
page,
|
||||
0x02 | 1 << 3,
|
||||
PageSize::Size4KiB,
|
||||
);
|
||||
|
||||
i += 0x1000;
|
||||
}
|
||||
}
|
||||
|
||||
unsafe { va_space_switch(page_directory) };
|
||||
}
|
||||
|
||||
pub fn get_kernel_pdpt() -> &'static mut PageDirectory {
|
||||
return unsafe { &mut *KENREL_PAGE_DIRECTORY };
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Eq, PartialEq)]
|
||||
pub enum PageSize {
|
||||
Size4KiB = 0,
|
||||
Size2MiB,
|
||||
Size1GiB,
|
||||
}
|
||||
|
||||
pub fn vmm_map(
|
||||
page_directory: &mut PageDirectory,
|
||||
virtual_addr: usize,
|
||||
physical_addr: usize,
|
||||
mut flags: u64,
|
||||
page_size: PageSize,
|
||||
) {
|
||||
let pml5_entry: usize = (virtual_addr & ((0x1ff as u64) << 48) as usize) >> 48;
|
||||
let pml4_entry: usize = (virtual_addr & ((0x1ff as u64) << 39) as usize) >> 39;
|
||||
let pml3_entry: usize = (virtual_addr & ((0x1ff as u64) << 30) as usize) >> 30;
|
||||
let pml2_entry: usize = (virtual_addr & ((0x1ff as u64) << 21) as usize) >> 21;
|
||||
let pml1_entry: usize = (virtual_addr & ((0x1ff as u64) << 12) as usize) >> 12;
|
||||
|
||||
let (pml5, pml4, pml3, pml2, pml1): (
|
||||
&mut PageDirectory,
|
||||
&mut PageDirectory,
|
||||
&mut PageDirectory,
|
||||
&mut PageDirectory,
|
||||
&mut PageDirectory,
|
||||
);
|
||||
|
||||
flags |= 0x01;
|
||||
|
||||
match get_paging_level() {
|
||||
limine::paging::Mode::FIVE_LEVEL => {
|
||||
pml5 = page_directory;
|
||||
pml4 = unsafe {
|
||||
let ptr = get_next_level(pml5, virtual_addr, page_size, 4, pml5_entry);
|
||||
&mut *ptr.to_higher_half().as_raw_ptr()
|
||||
};
|
||||
}
|
||||
limine::paging::Mode::FOUR_LEVEL => {
|
||||
pml4 = page_directory;
|
||||
}
|
||||
_ => unreachable!(),
|
||||
}
|
||||
|
||||
pml3 = unsafe {
|
||||
let ptr = get_next_level(pml4, virtual_addr, page_size, 3, pml4_entry);
|
||||
&mut *ptr.to_higher_half().as_raw_ptr()
|
||||
};
|
||||
|
||||
if page_size == PageSize::Size1GiB {
|
||||
if is_1gib_page_supported() {
|
||||
pml3.entries[pml3_entry] = PageTableEntry(physical_addr as u64 | flags | PT_FLAG_LARGE);
|
||||
} else {
|
||||
let mut i = 0;
|
||||
while i < 0x40000000 {
|
||||
vmm_map(
|
||||
page_directory,
|
||||
virtual_addr + i,
|
||||
physical_addr + i,
|
||||
flags,
|
||||
PageSize::Size2MiB,
|
||||
);
|
||||
|
||||
i += 0x200000;
|
||||
}
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
pml2 = unsafe {
|
||||
let ptr = get_next_level(pml3, virtual_addr, page_size, 2, pml3_entry);
|
||||
&mut *ptr.to_higher_half().as_raw_ptr()
|
||||
};
|
||||
|
||||
if page_size == PageSize::Size2MiB {
|
||||
pml2.entries[pml2_entry] = PageTableEntry(physical_addr as u64 | flags | PT_FLAG_LARGE);
|
||||
return;
|
||||
}
|
||||
|
||||
pml1 = unsafe {
|
||||
let ptr = get_next_level(pml2, virtual_addr, page_size, 1, pml2_entry);
|
||||
&mut *ptr.to_higher_half().as_raw_ptr()
|
||||
};
|
||||
|
||||
if (flags & (1 << 12)) != 0 {
|
||||
flags &= !(1 << 12);
|
||||
flags |= 1 << 7;
|
||||
}
|
||||
|
||||
pml1.entries[pml1_entry] = PageTableEntry(physical_addr as u64 | flags);
|
||||
}
|
||||
|
||||
fn get_next_level(
|
||||
page_directory: &mut PageDirectory,
|
||||
virtual_addr: usize,
|
||||
desired_size: PageSize,
|
||||
level: usize,
|
||||
entry: usize,
|
||||
) -> PhysicalPtr<PageDirectory> {
|
||||
let ret: PhysicalPtr<PageDirectory>;
|
||||
|
||||
if page_directory.entries[entry].is_table() {
|
||||
ret = PhysicalPtr::from(page_directory.entries[entry].addr() as usize);
|
||||
} else {
|
||||
if page_directory.entries[entry].is_large() {
|
||||
// We are replacing an existing large page with a smaller page
|
||||
|
||||
if (level >= 3) || (level == 0) {
|
||||
panic!("Unexpected level!");
|
||||
}
|
||||
if desired_size as usize >= 3 {
|
||||
panic!("Unexpected page size!");
|
||||
}
|
||||
|
||||
let old_page_size = PAGE_SIZES[level];
|
||||
let new_page_size = PAGE_SIZES[desired_size as usize];
|
||||
|
||||
crate::println!("OLD {old_page_size:X} NEW {new_page_size:X}");
|
||||
|
||||
// ((x) & (PT_FLAG_WRITE | PT_FLAG_NX))
|
||||
let old_flags = page_directory.entries[entry].vmm_flags();
|
||||
let old_phys = page_directory.entries[entry].addr();
|
||||
let old_virt = virtual_addr as u64 & !(old_page_size - 1);
|
||||
|
||||
if (old_phys & (old_page_size - 1)) != 0 {
|
||||
panic!(
|
||||
"Unexpected page table entry address! {:X} {:X}",
|
||||
old_phys, old_page_size
|
||||
);
|
||||
}
|
||||
|
||||
ret = pmm_alloc(1).cast::<PageDirectory>();
|
||||
page_directory.entries[entry] = PageTableEntry::new(ret.addr() as u64, PT_TABLE_FLAGS);
|
||||
|
||||
let mut i: usize = 0;
|
||||
while i < old_page_size as usize {
|
||||
vmm_map(
|
||||
page_directory,
|
||||
(old_virt as usize) + i,
|
||||
(old_phys as usize) + i,
|
||||
old_flags,
|
||||
desired_size,
|
||||
);
|
||||
|
||||
i += new_page_size as usize;
|
||||
}
|
||||
} else {
|
||||
ret = pmm_alloc(1).cast::<PageDirectory>();
|
||||
page_directory.entries[entry] = PageTableEntry::new(ret.addr() as u64, PT_TABLE_FLAGS);
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static IS_1GIB_SUPPORTED: OnceCell<bool> = OnceCell::new();
|
||||
|
||||
fn is_1gib_page_supported() -> bool {
|
||||
if let Err(()) = IS_1GIB_SUPPORTED.get() {
|
||||
let cpuid = unsafe { __cpuid(0x80000001) };
|
||||
|
||||
if (cpuid.edx & (1 << 26)) == (1 << 26) {
|
||||
IS_1GIB_SUPPORTED.set(true);
|
||||
crate::println!("1GiB is supported!");
|
||||
} else {
|
||||
IS_1GIB_SUPPORTED.set(false);
|
||||
crate::println!("1GiB is not supported!");
|
||||
}
|
||||
}
|
||||
|
||||
return *IS_1GIB_SUPPORTED.get_unchecked();
|
||||
}
|
||||
|
||||
/// Loads a new page directory and switched the Virtual Address Space
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// If the memory space has not been remapped to the HHDM before switching, this will cause Undefined Behavior.
|
||||
unsafe fn va_space_switch(page_directory: &mut PageDirectory) {
|
||||
let hhdm_offset = get_hhdm_offset();
|
||||
let kernel_virtual_base = get_kernel_address().virtual_base();
|
||||
|
||||
// cast so we can do easy math
|
||||
let mut pd_ptr = page_directory.get_mut_ptr().cast::<u8>();
|
||||
|
||||
if pd_ptr as usize > kernel_virtual_base as usize {
|
||||
pd_ptr = pd_ptr.sub(kernel_virtual_base as usize);
|
||||
} else if pd_ptr as usize > hhdm_offset {
|
||||
pd_ptr = pd_ptr.sub(hhdm_offset);
|
||||
}
|
||||
|
||||
crate::println!("SWITCHING VA SPACE {pd_ptr:p}");
|
||||
crate::println!("HHDM_OFFSET: {hhdm_offset:#x}");
|
||||
crate::println!("KERNEL_VIRTUAL_BASE: {kernel_virtual_base:#x}");
|
||||
crate::println!("Page directory virtual address: {pd_ptr:p}");
|
||||
|
||||
assert_eq!(
|
||||
pd_ptr as usize % 0x1000,
|
||||
0,
|
||||
"Page directory pointer is not aligned"
|
||||
);
|
||||
|
||||
let mut cr3 = 0;
|
||||
unsafe { core::arch::asm!("mov rax, cr3", out("rax") cr3) };
|
||||
|
||||
crate::println!("{cr3:X}");
|
||||
|
||||
// hcf();
|
||||
|
||||
unsafe { core::arch::asm!("mov cr3, {0:r}", in(reg) pd_ptr) };
|
||||
// test(pd_ptr);
|
||||
|
||||
crate::println!("waa");
|
||||
}
|
||||
|
||||
#[naked]
|
||||
pub extern "C" fn test(ptr: *mut u8) {
|
||||
unsafe {
|
||||
core::arch::asm!("mov cr3, rdi", "ret", options(noreturn));
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user