small various changes

small various changes
This commit is contained in:
Zoe
2024-08-28 00:37:27 -05:00
parent f93484bf81
commit f5a78801a5
20 changed files with 330 additions and 325 deletions

View File

@@ -144,6 +144,32 @@ And mostly for examples of how people did stuff I used these (projects made by p
- [mOS](https://github.com/Moldytzu/mOS)
- [rust_os](https://github.com/thepowersgang/rust_os/tree/master)
- [Lyre](https://github.com/Lyre-OS/klyre)
- [Limine](https://github.com/limine-bootloader/limine) as my paging implementation is largely a rust translation of its
```
Copyright (C) 2019-2024 mintsuki and contributors.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
```
## License

View File

@@ -1,5 +1,5 @@
{
"data-layout": "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128",
"data-layout": "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128-Fn32",
"llvm-target": "aarch64-unknown-none",
"target-endian": "little",
"target-pointer-width": "64",

View File

@@ -1,3 +1,5 @@
use crate::mem::VirtualPtr;
#[inline(always)]
pub fn outb(port: u16, value: u8) {
return;

View File

@@ -1 +1,2 @@
pub mod io;
pub mod paging;

View File

@@ -0,0 +1 @@

View File

@@ -1,3 +1,5 @@
use crate::mem::VirtualPtr;
#[inline(always)]
pub fn outb(port: u16, value: u8) {
return;

View File

@@ -1 +1,2 @@
pub mod io;
pub mod paging;

View File

@@ -0,0 +1 @@

View File

@@ -187,9 +187,7 @@ impl APIC {
let smp_request = crate::libs::limine::get_smp();
if smp_request.is_none() {
panic!("Failed to get smp from limine!");
}
assert!(smp_request.is_some(), "Failed to get smp from limine!");
let smp_request = smp_request.unwrap();
let bsp_lapic_id = smp_request.bsp_lapic_id();

View File

@@ -130,7 +130,7 @@ pub extern "C" fn syscall() {
}
pub extern "C" fn syscall_handler(_rdi: u64, _rsi: u64, rdx: u64, rcx: u64) {
let buf: VirtualPtr<u8> = unsafe { VirtualPtr::from(rdx as usize) }; // Treat as pointer to u8 (byte array)
let buf: VirtualPtr<u8> = VirtualPtr::from(rdx as usize); // Treat as pointer to u8 (byte array)
let count = rcx as usize;
let slice = unsafe { core::slice::from_raw_parts(buf.as_raw_ptr(), count) };

View File

@@ -1,6 +1,7 @@
pub mod gdt;
pub mod interrupts;
pub mod io;
pub mod paging;
pub mod stack_trace;
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]

253
src/arch/x86_64/paging.rs Normal file
View File

@@ -0,0 +1,253 @@
use core::arch::x86_64::__cpuid;
use crate::{
libs::{
cell::OnceCell,
limine::{get_hhdm_offset, get_kernel_address, get_paging_level},
},
mem::vmm::get_next_level,
LogLevel,
};
const PT_FLAG_VALID: u64 = 1 << 0;
const PT_FLAG_WRITE: u64 = 1 << 1;
const PT_FLAG_USER: u64 = 1 << 2;
const PT_FLAG_LARGE: u64 = 1 << 7;
const PT_FLAG_NX: u64 = 1 << 63;
const PT_PADDR_MASK: u64 = 0x0000_FFFF_FFFF_F000;
pub const PT_TABLE_FLAGS: u64 = PT_FLAG_VALID | PT_FLAG_WRITE | PT_FLAG_USER;
// I know it's literally 8 bytes, but... fight me
#[derive(Clone)]
#[repr(transparent)]
pub struct PageTableEntry(u64);
impl PageTableEntry {
pub fn new(addr: u64, flags: u64) -> Self {
Self(addr | flags)
}
pub fn addr(&self) -> u64 {
self.0 & PT_PADDR_MASK
}
// TODO: probably a more elegant way to do this
pub fn get_field(&self, field: Field) -> u64 {
match field {
Field::Present => (self.0 >> 0) & 1,
Field::ReadWrite => (self.0 >> 1) & 1,
Field::UserSupervisor => (self.0 >> 2) & 1,
Field::WriteThrough => (self.0 >> 3) & 1,
Field::CacheDisable => (self.0 >> 4) & 1,
Field::Accessed => (self.0 >> 5) & 1,
Field::Avl0 => (self.0 >> 6) & 1,
Field::PageSize => (self.0 >> 7) & 1,
Field::Avl1 => (self.0 >> 8) & 0xF,
Field::Addr => (self.0 >> 12) & 0x000F_FFFF_FFFF_FFFF,
Field::Nx => (self.0 >> 63) & 1,
}
}
pub fn set_field(&mut self, field: Field, value: u64) {
let mask = match field {
Field::Present => 1 << 0,
Field::ReadWrite => 1 << 1,
Field::UserSupervisor => 1 << 2,
Field::WriteThrough => 1 << 3,
Field::CacheDisable => 1 << 4,
Field::Accessed => 1 << 5,
Field::Avl0 => 1 << 6,
Field::PageSize => 1 << 7,
Field::Avl1 => 0xF << 8,
Field::Addr => 0x000F_FFFF_FFFF_FFFF << 12,
Field::Nx => 1 << 63,
};
let shift = match field {
Field::Present => 0,
Field::ReadWrite => 1,
Field::UserSupervisor => 2,
Field::WriteThrough => 3,
Field::CacheDisable => 4,
Field::Accessed => 5,
Field::Avl0 => 6,
Field::PageSize => 7,
Field::Avl1 => 8,
Field::Addr => 12,
Field::Nx => 63,
};
self.0 = (self.0 & !mask) | ((value << shift) & mask);
}
pub fn is_table(&self) -> bool {
(self.0 & (PT_FLAG_VALID | PT_FLAG_LARGE)) == PT_FLAG_VALID
}
pub fn is_large(&self) -> bool {
(self.0 & (PT_FLAG_VALID | PT_FLAG_LARGE)) == (PT_FLAG_VALID | PT_FLAG_LARGE)
}
pub fn vmm_flags(&self) -> u64 {
self.0 & (PT_FLAG_WRITE | PT_FLAG_NX)
}
}
#[derive(Debug, Clone, Copy)]
pub enum Field {
Present,
ReadWrite,
UserSupervisor,
WriteThrough,
CacheDisable,
Accessed,
Avl0,
PageSize,
Avl1,
Addr,
Nx,
}
#[repr(align(0x1000))]
#[repr(C)]
pub struct PageDirectory {
pub entries: [PageTableEntry; 512],
}
impl PageDirectory {
pub fn get_mut_ptr(&mut self) -> *mut Self {
return core::ptr::addr_of_mut!(*self);
}
}
#[derive(Clone, Copy, Eq, PartialEq)]
pub enum PageSize {
Size4KiB = 0,
Size2MiB,
Size1GiB,
}
pub fn vmm_map(
page_directory: &mut PageDirectory,
virtual_addr: usize,
physical_addr: usize,
mut flags: u64,
page_size: PageSize,
) {
let pml5_entry: usize = (virtual_addr & ((0x1ff as u64) << 48) as usize) >> 48;
let pml4_entry: usize = (virtual_addr & ((0x1ff as u64) << 39) as usize) >> 39;
let pml3_entry: usize = (virtual_addr & ((0x1ff as u64) << 30) as usize) >> 30;
let pml2_entry: usize = (virtual_addr & ((0x1ff as u64) << 21) as usize) >> 21;
let pml1_entry: usize = (virtual_addr & ((0x1ff as u64) << 12) as usize) >> 12;
let (pml5, pml4, pml3, pml2, pml1): (
&mut PageDirectory,
&mut PageDirectory,
&mut PageDirectory,
&mut PageDirectory,
&mut PageDirectory,
);
flags |= 0x01;
match get_paging_level() {
limine::paging::Mode::FIVE_LEVEL => {
pml5 = page_directory;
pml4 = unsafe {
let ptr = get_next_level(pml5, virtual_addr, page_size, 4, pml5_entry);
&mut *ptr.to_higher_half().as_raw_ptr()
};
}
limine::paging::Mode::FOUR_LEVEL => {
pml4 = page_directory;
}
_ => unreachable!(),
}
pml3 = unsafe {
let ptr = get_next_level(pml4, virtual_addr, page_size, 3, pml4_entry);
&mut *ptr.to_higher_half().as_raw_ptr()
};
if page_size == PageSize::Size1GiB {
if is_1gib_page_supported() {
pml3.entries[pml3_entry] = PageTableEntry(physical_addr as u64 | flags | PT_FLAG_LARGE);
} else {
let mut i = 0;
while i < 0x40000000 {
vmm_map(
page_directory,
virtual_addr + i,
physical_addr + i,
flags,
PageSize::Size2MiB,
);
i += 0x200000;
}
}
return;
}
pml2 = unsafe {
let ptr = get_next_level(pml3, virtual_addr, page_size, 2, pml3_entry);
&mut *ptr.to_higher_half().as_raw_ptr()
};
if page_size == PageSize::Size2MiB {
pml2.entries[pml2_entry] = PageTableEntry(physical_addr as u64 | flags | PT_FLAG_LARGE);
return;
}
pml1 = unsafe {
let ptr = get_next_level(pml2, virtual_addr, page_size, 1, pml2_entry);
&mut *ptr.to_higher_half().as_raw_ptr()
};
if (flags & (1 << 12)) != 0 {
flags &= !(1 << 12);
flags |= 1 << 7;
}
pml1.entries[pml1_entry] = PageTableEntry(physical_addr as u64 | flags);
}
static IS_1GIB_SUPPORTED: OnceCell<bool> = OnceCell::new();
fn is_1gib_page_supported() -> bool {
if let Err(()) = IS_1GIB_SUPPORTED.get() {
let cpuid = unsafe { __cpuid(0x80000001) };
if (cpuid.edx & (1 << 26)) == (1 << 26) {
IS_1GIB_SUPPORTED.set(true);
crate::log!(LogLevel::Debug, "1GiB pages are supported!");
} else {
IS_1GIB_SUPPORTED.set(false);
crate::log!(LogLevel::Debug, "1GiB pages are not supported!");
}
}
return *IS_1GIB_SUPPORTED.get_unchecked();
}
/// Loads a new page directory and switched the Virtual Address Space
///
/// # Safety
///
/// If the memory space has not been remapped to the HHDM before switching, this will cause Undefined Behavior.
pub unsafe fn va_space_switch(page_directory: &mut PageDirectory) {
let hhdm_offset = get_hhdm_offset();
let kernel_virtual_base = get_kernel_address().virtual_base();
// cast so we can do easy math
let mut pd_ptr = page_directory.get_mut_ptr().cast::<u8>();
if pd_ptr as usize > kernel_virtual_base as usize {
pd_ptr = pd_ptr.sub(kernel_virtual_base as usize);
} else if pd_ptr as usize > hhdm_offset {
pd_ptr = pd_ptr.sub(hhdm_offset);
}
unsafe { core::arch::asm!("mov cr3, {0:r}", in(reg) pd_ptr) };
}

View File

@@ -162,9 +162,7 @@ static ACPI: OnceCell<ACPI> = OnceCell::new();
fn resolve_acpi() {
let rsdp_ptr = crate::libs::limine::get_rdsp_ptr();
if rsdp_ptr.is_none() {
panic!("RSDP not found!");
}
assert!(rsdp_ptr.is_some(), "RSDP not found!");
let rsdp = unsafe { &*rsdp_ptr.unwrap().cast::<RSDP>() };

View File

@@ -10,16 +10,13 @@ use super::vfs::{FsOps, VNode, VNodeOperations, VNodeType};
pub fn init() -> Squashfs<'static> {
let initramfs = crate::libs::limine::get_module("initramfs.img");
if initramfs.is_none() {
panic!("Initramfs was not found!");
}
assert!(initramfs.is_some(), "initramfs was not found!");
let initramfs = initramfs.unwrap();
let squashfs = Squashfs::new(initramfs.addr());
if squashfs.is_err() {
panic!("Initramfs in corrupt!");
}
assert!(squashfs.is_ok(), "Initramfs is corrupt!");
let squashfs = squashfs.unwrap();

View File

@@ -86,9 +86,7 @@ impl Vfs {
}
pub fn mount(&mut self, path: &str) {
if self.fs.is_none() {
panic!("FsOps is null");
}
assert!(self.fs.is_some(), "FsOps is null!");
let vfsp = self.as_ptr();
@@ -100,9 +98,7 @@ impl Vfs {
}
pub fn unmount(&mut self) {
if self.fs.is_none() {
panic!("FsOps is null");
}
assert!(self.fs.is_some(), "FsOps is null!");
let vfsp = self.as_ptr();
@@ -110,9 +106,7 @@ impl Vfs {
}
pub fn root(&mut self) -> VNode {
if self.fs.is_none() {
panic!("FsOps is null");
}
assert!(self.fs.is_some(), "FsOps is null!");
let vfsp = self.as_ptr();
@@ -120,9 +114,7 @@ impl Vfs {
}
pub fn statfs(&mut self) -> StatFs {
if self.fs.is_none() {
panic!("FsOps is null");
}
assert!(self.fs.is_some(), "FsOps is null!");
let vfsp = self.as_ptr();
@@ -130,9 +122,7 @@ impl Vfs {
}
pub fn sync(&mut self) {
if self.fs.is_none() {
panic!("FsOps is null");
}
assert!(self.fs.is_some(), "FsOps is null!");
let vfsp = self.as_ptr();
@@ -140,9 +130,7 @@ impl Vfs {
}
pub fn fid(&mut self, path: &str) -> Option<FileId> {
if self.fs.is_none() {
panic!("FsOps is null");
}
assert!(self.fs.is_some(), "FsOps is null!");
let vfsp = self.as_ptr();
@@ -150,9 +138,7 @@ impl Vfs {
}
pub fn vget(&mut self, fid: FileId) -> VNode {
if self.fs.is_none() {
panic!("FsOps is null");
}
assert!(self.fs.is_some(), "FsOps is null!");
let vfsp = self.as_ptr();

View File

@@ -588,15 +588,14 @@ fn ide_initialize(bar0: u32, bar1: u32, _bar2: u32, _bar3: u32, _bar4: u32) {
let mbr_sector: MBR = (*drive.read(0, 1).expect("Failed to read first sector")).into();
if u16::from_le_bytes(mbr_sector.signature) != 0xAA55 {
panic!("MBR is corrupted!");
}
assert_eq!(u16::from_le_bytes(mbr_sector.signature), 0xAA55);
let mbr_partitions = mbr_sector.partitions();
if mbr_partitions[0].partition_type != 0xEE {
panic!("MBR disks are unsupported")
}
assert_eq!(
mbr_partitions[0].partition_type, 0xEE,
"MBR disks are unsupported!"
);
let gpt_sector = drive.read(1, 1).expect("Failed to read sector 2");

View File

@@ -144,9 +144,11 @@ impl InflateContext {
pub fn get_bit(&mut self) -> bool {
if self.bit_index == 8 {
self.input_buf.remove(0);
if self.input_buf.is_empty() {
panic!("Not enough data! {:X?}", self.output_buf);
}
assert!(
!self.input_buf.is_empty(),
"Not enough data! {:X?}",
self.output_buf
);
self.bit_index = 0;
}

View File

@@ -57,10 +57,7 @@ static PAGING_REQUEST: limine::request::PagingModeRequest =
limine::request::PagingModeRequest::new();
pub fn get_module<'a>(module_name: &str) -> Option<&'a File> {
if MODULE_REQUEST.get_response().is_none() {
panic!("Module request in none!");
}
let module_response = MODULE_REQUEST.get_response().unwrap();
let module_response = MODULE_REQUEST.get_response()?;
let mut file = None;

View File

@@ -138,9 +138,7 @@ fn draw_gradient() {
let buffer_ptr = crate::mem::pmm::pmm_alloc(pages).to_higher_half();
if buffer_ptr.is_null() {
panic!("Failed to allocate screen buffer")
}
assert!(!buffer_ptr.is_null(), "Failed to allocate screen buffer!");
let buffer =
unsafe { core::slice::from_raw_parts_mut(buffer_ptr.cast::<u32>().as_raw_ptr(), length) };

View File

@@ -1,124 +1,17 @@
use core::arch::x86_64::__cpuid;
use limine::memory_map::EntryType;
use crate::libs::{
cell::OnceCell,
limine::{get_hhdm_offset, get_kernel_address, get_memmap, get_paging_level},
use crate::{
arch::paging::{
va_space_switch, vmm_map, PageDirectory, PageSize, PageTableEntry, PT_TABLE_FLAGS,
},
libs::limine::{get_hhdm_offset, get_kernel_address, get_memmap},
};
use super::{align_down, align_up, pmm::pmm_alloc, PhysicalPtr};
const PT_FLAG_VALID: u64 = 1 << 0;
const PT_FLAG_WRITE: u64 = 1 << 1;
const PT_FLAG_USER: u64 = 1 << 2;
const PT_FLAG_LARGE: u64 = 1 << 7;
const PT_FLAG_NX: u64 = 1 << 63;
const PT_PADDR_MASK: u64 = 0x0000_FFFF_FFFF_F000;
const PT_TABLE_FLAGS: u64 = PT_FLAG_VALID | PT_FLAG_WRITE | PT_FLAG_USER;
// I know it's literally 8 bytes, but... fight me
#[derive(Clone)]
#[repr(transparent)]
pub struct PageTableEntry(u64);
impl PageTableEntry {
pub fn new(addr: u64, flags: u64) -> Self {
Self(addr | flags)
}
pub fn addr(&self) -> u64 {
self.0 & PT_PADDR_MASK
}
// TODO: probably a more elegant way to do this
pub fn get_field(&self, field: Field) -> u64 {
match field {
Field::Present => (self.0 >> 0) & 1,
Field::ReadWrite => (self.0 >> 1) & 1,
Field::UserSupervisor => (self.0 >> 2) & 1,
Field::WriteThrough => (self.0 >> 3) & 1,
Field::CacheDisable => (self.0 >> 4) & 1,
Field::Accessed => (self.0 >> 5) & 1,
Field::Avl0 => (self.0 >> 6) & 1,
Field::PageSize => (self.0 >> 7) & 1,
Field::Avl1 => (self.0 >> 8) & 0xF,
Field::Addr => (self.0 >> 12) & 0x000F_FFFF_FFFF_FFFF,
Field::Nx => (self.0 >> 63) & 1,
}
}
pub fn set_field(&mut self, field: Field, value: u64) {
let mask = match field {
Field::Present => 1 << 0,
Field::ReadWrite => 1 << 1,
Field::UserSupervisor => 1 << 2,
Field::WriteThrough => 1 << 3,
Field::CacheDisable => 1 << 4,
Field::Accessed => 1 << 5,
Field::Avl0 => 1 << 6,
Field::PageSize => 1 << 7,
Field::Avl1 => 0xF << 8,
Field::Addr => 0x000F_FFFF_FFFF_FFFF << 12,
Field::Nx => 1 << 63,
};
let shift = match field {
Field::Present => 0,
Field::ReadWrite => 1,
Field::UserSupervisor => 2,
Field::WriteThrough => 3,
Field::CacheDisable => 4,
Field::Accessed => 5,
Field::Avl0 => 6,
Field::PageSize => 7,
Field::Avl1 => 8,
Field::Addr => 12,
Field::Nx => 63,
};
self.0 = (self.0 & !mask) | ((value << shift) & mask);
}
fn is_table(&self) -> bool {
(self.0 & (PT_FLAG_VALID | PT_FLAG_LARGE)) == PT_FLAG_VALID
}
fn is_large(&self) -> bool {
(self.0 & (PT_FLAG_VALID | PT_FLAG_LARGE)) == (PT_FLAG_VALID | PT_FLAG_LARGE)
}
fn vmm_flags(&self) -> u64 {
self.0 & (PT_FLAG_WRITE | PT_FLAG_NX)
}
}
#[derive(Debug, Clone, Copy)]
pub enum Field {
Present,
ReadWrite,
UserSupervisor,
WriteThrough,
CacheDisable,
Accessed,
Avl0,
PageSize,
Avl1,
Addr,
Nx,
}
#[repr(align(0x1000))]
#[repr(C)]
pub struct PageDirectory {
entries: [PageTableEntry; 512],
}
impl PageDirectory {
pub fn get_mut_ptr(&mut self) -> *mut Self {
return core::ptr::addr_of_mut!(*self);
}
}
const VMM_FLAG_WRITE: u64 = 1 << 1;
const VMM_FLAG_NOEXEC: u64 = 1 << 63;
const VMM_FLAG_FB: u64 = 1 << 3 | 1 << 12;
pub static PAGE_SIZES: [u64; 5] = [0x1000, 0x200000, 0x40000000, 0x8000000000, 0x1000000000000];
@@ -140,7 +33,7 @@ pub fn vmm_init() {
page_directory,
i + get_hhdm_offset(),
i,
PT_FLAG_WRITE,
VMM_FLAG_WRITE,
PageSize::Size1GiB,
);
@@ -223,100 +116,7 @@ pub fn get_kernel_pdpt() -> &'static mut PageDirectory {
return unsafe { &mut *KENREL_PAGE_DIRECTORY };
}
#[derive(Clone, Copy, Eq, PartialEq)]
pub enum PageSize {
Size4KiB = 0,
Size2MiB,
Size1GiB,
}
pub fn vmm_map(
page_directory: &mut PageDirectory,
virtual_addr: usize,
physical_addr: usize,
mut flags: u64,
page_size: PageSize,
) {
let pml5_entry: usize = (virtual_addr & ((0x1ff as u64) << 48) as usize) >> 48;
let pml4_entry: usize = (virtual_addr & ((0x1ff as u64) << 39) as usize) >> 39;
let pml3_entry: usize = (virtual_addr & ((0x1ff as u64) << 30) as usize) >> 30;
let pml2_entry: usize = (virtual_addr & ((0x1ff as u64) << 21) as usize) >> 21;
let pml1_entry: usize = (virtual_addr & ((0x1ff as u64) << 12) as usize) >> 12;
let (pml5, pml4, pml3, pml2, pml1): (
&mut PageDirectory,
&mut PageDirectory,
&mut PageDirectory,
&mut PageDirectory,
&mut PageDirectory,
);
flags |= 0x01;
match get_paging_level() {
limine::paging::Mode::FIVE_LEVEL => {
pml5 = page_directory;
pml4 = unsafe {
let ptr = get_next_level(pml5, virtual_addr, page_size, 4, pml5_entry);
&mut *ptr.to_higher_half().as_raw_ptr()
};
}
limine::paging::Mode::FOUR_LEVEL => {
pml4 = page_directory;
}
_ => unreachable!(),
}
pml3 = unsafe {
let ptr = get_next_level(pml4, virtual_addr, page_size, 3, pml4_entry);
&mut *ptr.to_higher_half().as_raw_ptr()
};
if page_size == PageSize::Size1GiB {
if is_1gib_page_supported() {
pml3.entries[pml3_entry] = PageTableEntry(physical_addr as u64 | flags | PT_FLAG_LARGE);
} else {
let mut i = 0;
while i < 0x40000000 {
vmm_map(
page_directory,
virtual_addr + i,
physical_addr + i,
flags,
PageSize::Size2MiB,
);
i += 0x200000;
}
}
return;
}
pml2 = unsafe {
let ptr = get_next_level(pml3, virtual_addr, page_size, 2, pml3_entry);
&mut *ptr.to_higher_half().as_raw_ptr()
};
if page_size == PageSize::Size2MiB {
pml2.entries[pml2_entry] = PageTableEntry(physical_addr as u64 | flags | PT_FLAG_LARGE);
return;
}
pml1 = unsafe {
let ptr = get_next_level(pml2, virtual_addr, page_size, 1, pml2_entry);
&mut *ptr.to_higher_half().as_raw_ptr()
};
if (flags & (1 << 12)) != 0 {
flags &= !(1 << 12);
flags |= 1 << 7;
}
pml1.entries[pml1_entry] = PageTableEntry(physical_addr as u64 | flags);
}
fn get_next_level(
pub fn get_next_level(
page_directory: &mut PageDirectory,
virtual_addr: usize,
desired_size: PageSize,
@@ -331,12 +131,7 @@ fn get_next_level(
if page_directory.entries[entry].is_large() {
// We are replacing an existing large page with a smaller page
if (level >= 3) || (level == 0) {
panic!("Unexpected level!");
}
if desired_size as usize >= 3 {
panic!("Unexpected page size!");
}
assert!(level <= 3 && level != 0, "Unexpected level!");
let old_page_size = PAGE_SIZES[level];
let new_page_size = PAGE_SIZES[desired_size as usize];
@@ -346,12 +141,11 @@ fn get_next_level(
let old_phys = page_directory.entries[entry].addr();
let old_virt = virtual_addr as u64 & !(old_page_size - 1);
if (old_phys & (old_page_size - 1)) != 0 {
panic!(
"Unexpected page table entry address! {:X} {:X}",
old_phys, old_page_size
);
}
assert_eq!(
old_phys & (old_page_size - 1),
0,
"Unexpected page table entry address!"
);
ret = pmm_alloc(1).cast::<PageDirectory>();
page_directory.entries[entry] = PageTableEntry::new(ret.addr() as u64, PT_TABLE_FLAGS);
@@ -376,55 +170,3 @@ fn get_next_level(
return ret;
}
static IS_1GIB_SUPPORTED: OnceCell<bool> = OnceCell::new();
fn is_1gib_page_supported() -> bool {
if let Err(()) = IS_1GIB_SUPPORTED.get() {
let cpuid = unsafe { __cpuid(0x80000001) };
if (cpuid.edx & (1 << 26)) == (1 << 26) {
IS_1GIB_SUPPORTED.set(true);
crate::println!("1GiB is supported!");
} else {
IS_1GIB_SUPPORTED.set(false);
crate::println!("1GiB is not supported!");
}
}
return *IS_1GIB_SUPPORTED.get_unchecked();
}
/// Loads a new page directory and switched the Virtual Address Space
///
/// # Safety
///
/// If the memory space has not been remapped to the HHDM before switching, this will cause Undefined Behavior.
unsafe fn va_space_switch(page_directory: &mut PageDirectory) {
let hhdm_offset = get_hhdm_offset();
let kernel_virtual_base = get_kernel_address().virtual_base();
// cast so we can do easy math
let mut pd_ptr = page_directory.get_mut_ptr().cast::<u8>();
if pd_ptr as usize > kernel_virtual_base as usize {
pd_ptr = pd_ptr.sub(kernel_virtual_base as usize);
} else if pd_ptr as usize > hhdm_offset {
pd_ptr = pd_ptr.sub(hhdm_offset);
}
crate::println!("SWITCHING VA SPACE {pd_ptr:p}");
crate::println!("HHDM_OFFSET: {hhdm_offset:#x}");
crate::println!("KERNEL_VIRTUAL_BASE: {kernel_virtual_base:#x}");
crate::println!("Page directory virtual address: {pd_ptr:p}");
assert_eq!(
pd_ptr as usize % 0x1000,
0,
"Page directory pointer is not aligned"
);
unsafe { core::arch::asm!("mov cr3, {0:r}", in(reg) pd_ptr) };
crate::println!("waa");
}