fix ub in vfs.rs and offset all allocation addresses to hhdm
This commit is contained in:
@@ -1,11 +1,12 @@
|
||||
# CappuccinOS/scripts
|
||||
|
||||
This folder is responsible for holding all the scripts that are necessary for building CappuccinOS
|
||||
|
||||
- **demangle-symbols.py**<br/>
|
||||
This file takes in a symbols file generated by the `nm` program and outputs a symbol file with the symbol names demangled, it uses my library, [rustc_demangle.py](https://github.com/juls0730/rustc_demangle.py) which is a python port of the Rust symbol demangling library [rustc-demangle](https://github.com/rust-lang/rustc-demangle).
|
||||
This file takes in a symbols file generated by the `nm` program and outputs a symbol file with the symbol names demangled, it uses my library, [rustc_demangle.py](https://github.com/juls0730/rustc_demangle.py) which is a python port of the Rust symbol demangling library [rustc-demangle](https://github.com/rust-lang/rustc-demangle).
|
||||
|
||||
- **font.py**<br/>
|
||||
This file takes an array of u8 numbers and exports a PC Screen Font file (2.0)
|
||||
This file takes an array of u8 numbers and exports a PC Screen Font file (2.0)
|
||||
|
||||
- **initramfs-test.py**
|
||||
This file generates tons of files in the initramfs directory from huge to small files nested in directories, etc. It's intended to test the squashfs driver
|
||||
- **initramfs-test.py**<br/>
|
||||
This file generates tons of files in the initramfs directory from huge to small files nested in directories, etc. It's intended to test the squashfs driver
|
||||
|
||||
@@ -1,3 +1,7 @@
|
||||
/* Tell the linker that we want an x86_64 ELF64 output file */
|
||||
OUTPUT_FORMAT(elf64-x86-64)
|
||||
OUTPUT_ARCH(i386:x86-64)
|
||||
|
||||
/* We want the symbol _start to be our entry point */
|
||||
ENTRY(_start)
|
||||
|
||||
@@ -5,20 +9,31 @@ ENTRY(_start)
|
||||
/* MMU permissions */
|
||||
PHDRS
|
||||
{
|
||||
text PT_LOAD FLAGS((1 << 0) | (1 << 2)) ; /* Execute + Read */
|
||||
rodata PT_LOAD FLAGS((1 << 2)) ; /* Read only */
|
||||
data PT_LOAD FLAGS((1 << 1) | (1 << 2)) ; /* Write + Read */
|
||||
dynamic PT_DYNAMIC FLAGS((1 << 1) | (1 << 2)) ; /* Dynamic PHDR for relocations */
|
||||
requests PT_LOAD FLAGS((1 << 1) | (1 << 2)) ; /* Write + Read */
|
||||
text PT_LOAD FLAGS((1 << 0) | (1 << 2)) ; /* Execute + Read */
|
||||
rodata PT_LOAD FLAGS((1 << 2)) ; /* Read only */
|
||||
data PT_LOAD FLAGS((1 << 1) | (1 << 2)) ; /* Write + Read */
|
||||
dynamic PT_DYNAMIC FLAGS((1 << 1) | (1 << 2)) ; /* Dynamic PHDR for relocations */
|
||||
}
|
||||
|
||||
SECTIONS
|
||||
{
|
||||
/* We wanna be placed in the topmost 2GiB of the address space, for optimizations */
|
||||
/* We wanna be placed in the topmost 2GiB of the address space, for optimisations */
|
||||
/* and because that is what the Limine spec mandates. */
|
||||
/* Any address in this region will do, but often 0xffffffff80000000 is chosen as */
|
||||
/* that is the beginning of the region. */
|
||||
. = 0xffffffff80000000;
|
||||
|
||||
/* Define a section to contain the Limine requests and assign it to its own PHDR */
|
||||
.requests : {
|
||||
KEEP(*(.requests_start_marker))
|
||||
KEEP(*(.requests))
|
||||
KEEP(*(.requests_end_marker))
|
||||
} :requests
|
||||
|
||||
/* Move to the next memory page for .text */
|
||||
. += CONSTANT(MAXPAGESIZE);
|
||||
|
||||
.text : {
|
||||
*(.text .text.*)
|
||||
} :text
|
||||
@@ -42,13 +57,14 @@ SECTIONS
|
||||
*(.dynamic)
|
||||
} :data :dynamic
|
||||
|
||||
|
||||
/* NOTE: .bss needs to be the last thing mapped to :data, otherwise lots of */
|
||||
/* unnecessary zeros will be written to the binary. */
|
||||
/* If you need, for example, .init_array and .fini_array, those should be placed */
|
||||
/* above this. */
|
||||
.bss : {
|
||||
*(COMMON)
|
||||
*(.bss .bss.*)
|
||||
*(COMMON)
|
||||
} :data
|
||||
|
||||
/* Discard .note.* and .eh_frame since they may cause issues on some hosts. */
|
||||
|
||||
@@ -16,7 +16,7 @@ pub fn print_stack_trace(max_frames: usize, rbp: u64) {
|
||||
|
||||
crate::println!("Stack Trace:");
|
||||
for _frame in 0..max_frames {
|
||||
if stackframe.is_null() || unsafe { (*stackframe).back.is_null() } {
|
||||
if stackframe.is_null() || unsafe { core::ptr::read_unaligned(stackframe).back.is_null() } {
|
||||
break;
|
||||
}
|
||||
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
{
|
||||
"arch": "x86_64",
|
||||
"cpu": "x86-64",
|
||||
"data-layout": "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128",
|
||||
"llvm-target": "x86_64-unknown-none",
|
||||
@@ -7,7 +8,6 @@
|
||||
"target-c-int-width": "32",
|
||||
"features": "-mmx,-sse,+soft-float",
|
||||
"os": "CappuccinOS",
|
||||
"arch": "x86_64",
|
||||
"linker": "rust-lld",
|
||||
"linker-flavor": "ld.lld",
|
||||
"pre-link-args": {
|
||||
|
||||
@@ -432,10 +432,6 @@ impl FatFs {
|
||||
}
|
||||
|
||||
fn cluster_to_sector(&self, cluster: usize) -> usize {
|
||||
crate::println!("bytes per sector: {}", unsafe {
|
||||
core::ptr::read_unaligned(core::ptr::addr_of!(self.bpb.bytes_per_sector))
|
||||
});
|
||||
|
||||
let fat_size = self.sectors_per_fat;
|
||||
let root_dir_sectors = ((self.bpb.root_directory_count * 32)
|
||||
+ (self.bpb.bytes_per_sector - 1))
|
||||
|
||||
@@ -1,22 +1,22 @@
|
||||
use core::fmt::Debug;
|
||||
use core::{fmt::Debug, ptr::NonNull};
|
||||
|
||||
use alloc::{
|
||||
// alloc::{alloc, dealloc},
|
||||
alloc::{alloc, dealloc, handle_alloc_error},
|
||||
boxed::Box,
|
||||
string::{String, ToString},
|
||||
sync::Arc,
|
||||
vec::Vec,
|
||||
};
|
||||
|
||||
use crate::{log_info, log_ok, mem::PHYSICAL_MEMORY_MANAGER};
|
||||
use crate::{log_info, log_ok};
|
||||
|
||||
static mut ROOT_VFS: Vfs = Vfs::null();
|
||||
|
||||
#[allow(unused)]
|
||||
pub struct Vfs {
|
||||
mount_point: Option<String>,
|
||||
next: Option<*mut Vfs>,
|
||||
ops: Option<Box<dyn FsOps>>,
|
||||
next: Option<NonNull<Vfs>>,
|
||||
ops: Option<NonNull<dyn FsOps>>,
|
||||
// vnode_covered: Option<*const VNode>,
|
||||
flags: u32,
|
||||
block_size: u32,
|
||||
@@ -222,21 +222,31 @@ pub struct VAttr {
|
||||
used_blocks: u32,
|
||||
}
|
||||
|
||||
unsafe fn find_mount_point(file_path: &str) -> Option<*mut Vfs> {
|
||||
unsafe fn find_mount_point(file_path: &str) -> Option<NonNull<Vfs>> {
|
||||
// TODO: refactor
|
||||
let mut mount_point = ROOT_VFS.next;
|
||||
let mut mount_point: Option<NonNull<Vfs>> = None;
|
||||
let mut current = ROOT_VFS.next;
|
||||
|
||||
while let Some(node) = current {
|
||||
if node
|
||||
.as_ref()
|
||||
.mount_point
|
||||
.as_ref()
|
||||
.expect("Null mount point")
|
||||
== "/"
|
||||
&& mount_point.is_none()
|
||||
{
|
||||
mount_point = Some(node);
|
||||
}
|
||||
|
||||
let mount_point_str = node
|
||||
.as_ref()
|
||||
.unwrap()
|
||||
.mount_point
|
||||
.as_ref()
|
||||
.expect("Null mount point");
|
||||
if file_path.starts_with(mount_point_str)
|
||||
&& mount_point_str.len()
|
||||
> (mount_point.unwrap().as_ref().unwrap())
|
||||
> (mount_point.unwrap().as_ref())
|
||||
.mount_point
|
||||
.as_ref()
|
||||
.unwrap()
|
||||
@@ -244,7 +254,7 @@ unsafe fn find_mount_point(file_path: &str) -> Option<*mut Vfs> {
|
||||
{
|
||||
mount_point = Some(node);
|
||||
}
|
||||
current = unsafe { (*node).next };
|
||||
current = unsafe { (*node.as_ptr()).next };
|
||||
}
|
||||
|
||||
mount_point
|
||||
@@ -255,42 +265,66 @@ pub fn add_vfs(mut mount_point: &str, fs_ops: Box<dyn FsOps>) -> Result<(), ()>
|
||||
mount_point = mount_point.trim_end_matches('/');
|
||||
}
|
||||
|
||||
// let layout = alloc::alloc::Layout::new::<Vfs>();
|
||||
/// # Safety
|
||||
/// Consumes vfs
|
||||
unsafe fn deallocate_vfs(vfs: NonNull<Vfs>) {
|
||||
let fs_ops_box = Box::from_raw(vfs.as_ref().ops.unwrap().as_ptr());
|
||||
drop(fs_ops_box);
|
||||
dealloc(
|
||||
vfs.as_ptr().cast::<u8>(),
|
||||
alloc::alloc::Layout::new::<Vfs>(),
|
||||
);
|
||||
}
|
||||
|
||||
let layout = alloc::alloc::Layout::new::<Vfs>();
|
||||
// TODO: its fucking broken again
|
||||
let vfs_ptr = PHYSICAL_MEMORY_MANAGER.alloc(1).cast::<Vfs>();
|
||||
// let vfs_ptr = PHYSICAL_MEMORY_MANAGER.alloc(1).cast::<Vfs>();
|
||||
let vfs_ptr = unsafe { alloc(layout).cast::<Vfs>() };
|
||||
|
||||
let vfs = unsafe { &mut *vfs_ptr };
|
||||
if vfs_ptr.is_null() {
|
||||
handle_alloc_error(layout)
|
||||
}
|
||||
|
||||
(*vfs) = Vfs::null();
|
||||
(*vfs).ops = Some(fs_ops);
|
||||
(*vfs).mount_point = Some(mount_point.to_string());
|
||||
// Initialize the data so we can use the nonnull helpers
|
||||
unsafe {
|
||||
let mut vfs = Vfs::null();
|
||||
vfs.ops = Some(NonNull::new_unchecked(Box::into_raw(fs_ops)));
|
||||
vfs.mount_point = Some(mount_point.to_string());
|
||||
*vfs_ptr = vfs;
|
||||
};
|
||||
|
||||
log_info!("Adding vfs at {mount_point} {vfs_ptr:p}");
|
||||
// Safety: We know vfs_ptr is not null
|
||||
let mut vfs_ptr = unsafe { NonNull::new_unchecked(vfs_ptr) };
|
||||
|
||||
let vfs = unsafe { vfs_ptr.as_mut() };
|
||||
|
||||
log_info!("Adding vfs at {mount_point}");
|
||||
|
||||
// TODO: dont give / special treatment
|
||||
if mount_point == "/" {
|
||||
if unsafe { ROOT_VFS.next.is_some() } {
|
||||
// unsafe { dealloc(vfs_ptr.cast::<u8>(), layout) };
|
||||
PHYSICAL_MEMORY_MANAGER.dealloc(vfs_ptr.cast::<u8>(), 1);
|
||||
unsafe {
|
||||
deallocate_vfs(vfs_ptr);
|
||||
};
|
||||
// PHYSICAL_MEMORY_MANAGER.dealloc(vfs_ptr.cast::<u8>(), 1);
|
||||
return Err(());
|
||||
}
|
||||
|
||||
{
|
||||
let vfsp = vfs.as_ptr();
|
||||
|
||||
(*vfs)
|
||||
.ops
|
||||
.as_mut()
|
||||
unsafe {
|
||||
vfs.ops
|
||||
.unwrap()
|
||||
.mount(mount_point, &mut vfs.data, vfsp);
|
||||
.as_mut()
|
||||
.mount(mount_point, &mut vfs.data, vfs_ptr.as_ptr());
|
||||
}
|
||||
|
||||
unsafe { ROOT_VFS.next = Some(vfs.as_mut_ptr()) };
|
||||
unsafe { ROOT_VFS.next = Some(vfs_ptr) };
|
||||
} else {
|
||||
// TODO: technically this allows you to mount file systems at nonexistent mount point
|
||||
if unsafe { ROOT_VFS.next.is_none() } {
|
||||
// unsafe { dealloc(vfs_ptr.cast::<u8>(), layout) };
|
||||
PHYSICAL_MEMORY_MANAGER.dealloc(vfs_ptr.cast::<u8>(), 1);
|
||||
unsafe {
|
||||
deallocate_vfs(vfs_ptr);
|
||||
};
|
||||
// PHYSICAL_MEMORY_MANAGER.dealloc(vfs_ptr.cast::<u8>(), 1);
|
||||
return Err(());
|
||||
}
|
||||
|
||||
@@ -299,71 +333,37 @@ pub fn add_vfs(mut mount_point: &str, fs_ops: Box<dyn FsOps>) -> Result<(), ()>
|
||||
let mut next_vfs = unsafe { ROOT_VFS.next };
|
||||
|
||||
while let Some(target_vfs) = next_vfs {
|
||||
if unsafe { target_vfs.as_ref().unwrap().mount_point.as_ref().unwrap() == mount_point }
|
||||
{
|
||||
// unsafe { dealloc(vfs_ptr.cast::<u8>(), layout) };
|
||||
PHYSICAL_MEMORY_MANAGER.dealloc(vfs_ptr.cast::<u8>(), 1);
|
||||
if unsafe { target_vfs.as_ref().mount_point.as_ref().unwrap() == mount_point } {
|
||||
unsafe {
|
||||
deallocate_vfs(vfs_ptr);
|
||||
};
|
||||
// PHYSICAL_MEMORY_MANAGER.dealloc(vfs_ptr.cast::<u8>(), 1);
|
||||
return Err(());
|
||||
}
|
||||
|
||||
if unsafe { (*target_vfs).next }.is_none() {
|
||||
if unsafe { target_vfs.as_ref().next }.is_none() {
|
||||
break;
|
||||
}
|
||||
|
||||
next_vfs = unsafe { (*target_vfs).next };
|
||||
next_vfs = unsafe { target_vfs.as_ref().next };
|
||||
}
|
||||
|
||||
if next_vfs.is_none() {
|
||||
// unsafe { dealloc(vfs_ptr.cast::<u8>(), layout) };
|
||||
PHYSICAL_MEMORY_MANAGER.dealloc(vfs_ptr.cast::<u8>(), 1);
|
||||
unsafe {
|
||||
deallocate_vfs(vfs_ptr);
|
||||
};
|
||||
// PHYSICAL_MEMORY_MANAGER.dealloc(vfs_ptr.cast::<u8>(), 1);
|
||||
return Err(());
|
||||
}
|
||||
|
||||
{
|
||||
let vfsp = vfs.as_ptr();
|
||||
|
||||
(*vfs)
|
||||
.ops
|
||||
.as_mut()
|
||||
unsafe {
|
||||
vfs.ops
|
||||
.unwrap()
|
||||
.mount(mount_point, &mut vfs.data, vfsp);
|
||||
.as_mut()
|
||||
.mount(mount_point, &mut vfs.data, vfs_ptr.as_ptr());
|
||||
}
|
||||
|
||||
unsafe { (*(next_vfs.unwrap())).next = Some(vfs) };
|
||||
|
||||
// let mut cur_vnode = unsafe { (*target_vfs).ops.as_mut().unwrap().root(target_vfs) };
|
||||
|
||||
// let parts = mount_point.split('/').collect::<Vec<&str>>();
|
||||
|
||||
// for part in parts {
|
||||
// if part.is_empty() {
|
||||
// continue;
|
||||
// }
|
||||
|
||||
// // TODO: dont just lookup everything as the root user
|
||||
// if let Ok(vnode) =
|
||||
// cur_vnode
|
||||
// .ops
|
||||
// .lookup(part, UserCred { uid: 0, gid: 0 }, cur_vnode.as_ptr())
|
||||
// {
|
||||
// cur_vnode = vnode;
|
||||
// } else {
|
||||
// unsafe { dealloc(vfs_ptr.cast::<u8>(), layout) };
|
||||
// return Err(());
|
||||
// }
|
||||
// }
|
||||
|
||||
// if cur_vnode.vfs_mounted_here.is_some() {
|
||||
// unsafe { dealloc(vfs_ptr.cast::<u8>(), layout) };
|
||||
// return Err(());
|
||||
// }
|
||||
|
||||
// {
|
||||
// let vfsp = vfs.as_ptr();
|
||||
|
||||
// }
|
||||
|
||||
// cur_vnode.vfs_mounted_here = Some(vfs.as_mut_ptr());
|
||||
unsafe { (next_vfs.unwrap()).as_mut().next = Some(vfs_ptr) };
|
||||
}
|
||||
|
||||
log_ok!("Added vfs at {mount_point}");
|
||||
@@ -383,14 +383,16 @@ pub fn vfs_open(path: &str) -> Result<VNode, ()> {
|
||||
}
|
||||
|
||||
let mut cur_vnode = unsafe {
|
||||
(*root_vfs.unwrap())
|
||||
.ops
|
||||
.as_mut()
|
||||
root_vfs
|
||||
.unwrap()
|
||||
.root(root_vfs.unwrap())
|
||||
.as_mut()
|
||||
.ops
|
||||
.unwrap()
|
||||
.as_mut()
|
||||
.root(root_vfs.unwrap().as_ptr())
|
||||
};
|
||||
|
||||
let path = &path[unsafe { (*root_vfs.unwrap()).mount_point.as_ref().unwrap() }.len()..];
|
||||
let path = &path[unsafe { root_vfs.unwrap().as_ref().mount_point.as_ref().unwrap() }.len()..];
|
||||
|
||||
let parts = path.split('/').collect::<Vec<&str>>();
|
||||
|
||||
|
||||
18
src/main.rs
18
src/main.rs
@@ -1,4 +1,4 @@
|
||||
#![feature(abi_x86_interrupt, naked_functions, const_mut_refs)]
|
||||
#![feature(allocator_api, abi_x86_interrupt, naked_functions, const_mut_refs)]
|
||||
#![allow(clippy::needless_return)]
|
||||
#![no_std]
|
||||
#![no_main]
|
||||
@@ -7,6 +7,7 @@ use core::ffi::CStr;
|
||||
|
||||
use alloc::vec::Vec;
|
||||
use limine::KernelFileRequest;
|
||||
use mem::HHDM_OFFSET;
|
||||
|
||||
use crate::drivers::fs::{
|
||||
initramfs,
|
||||
@@ -45,6 +46,14 @@ pub fn kmain() -> ! {
|
||||
|
||||
let mut file = vfs_open("/firstdir/seconddirbutlonger/yeah.txt").unwrap();
|
||||
|
||||
crate::println!(
|
||||
"YEAH.TXT: {:X?}",
|
||||
&file
|
||||
.ops
|
||||
.open(0, UserCred { uid: 0, gid: 0 }, file.as_ptr())
|
||||
.unwrap()
|
||||
);
|
||||
|
||||
drivers::storage::ide::init();
|
||||
|
||||
let mut nested_file = vfs_open("/mnt/boot/limine/limine.cfg").unwrap();
|
||||
@@ -57,13 +66,6 @@ pub fn kmain() -> ! {
|
||||
);
|
||||
|
||||
// let file = vfs_open("/example.txt").unwrap();
|
||||
crate::println!(
|
||||
"YEAH.TXT: {:X?}",
|
||||
&file
|
||||
.ops
|
||||
.open(0, UserCred { uid: 0, gid: 0 }, file.as_ptr())
|
||||
.unwrap()
|
||||
);
|
||||
|
||||
// as a sign that we didnt panic
|
||||
draw_gradient();
|
||||
|
||||
@@ -5,7 +5,7 @@ use core::{
|
||||
|
||||
use crate::{libs::sync::Mutex, mem::pmm::PAGE_SIZE};
|
||||
|
||||
use super::align_up;
|
||||
use super::{align_up, HHDM_OFFSET};
|
||||
|
||||
#[derive(Debug)]
|
||||
struct MemNode {
|
||||
@@ -47,7 +47,9 @@ impl LinkedListAllocator {
|
||||
pub fn init(&mut self, pages: usize) {
|
||||
unsafe {
|
||||
self.add_free_region(
|
||||
super::PHYSICAL_MEMORY_MANAGER.alloc(pages),
|
||||
super::PHYSICAL_MEMORY_MANAGER
|
||||
.alloc(pages)
|
||||
.add(*HHDM_OFFSET),
|
||||
PAGE_SIZE * pages,
|
||||
);
|
||||
}
|
||||
|
||||
@@ -7,6 +7,7 @@ use self::{allocator::LinkedListAllocator, pmm::PhysicalMemoryManager};
|
||||
|
||||
static MEMMAP_REQUEST: limine::MemmapRequest = limine::MemmapRequest::new(0);
|
||||
static HHDM_REQUEST: limine::HhdmRequest = limine::HhdmRequest::new(0);
|
||||
pub static HHDM_OFFSET: OnceCell<usize> = OnceCell::new();
|
||||
|
||||
pub static PHYSICAL_MEMORY_MANAGER: OnceCell<PhysicalMemoryManager> = OnceCell::new();
|
||||
|
||||
@@ -15,7 +16,7 @@ pub fn align_up(addr: usize, align: usize) -> usize {
|
||||
addr + offset
|
||||
}
|
||||
|
||||
const HEAP_PAGES: usize = 4096; // 8 MiB heap
|
||||
const HEAP_PAGES: usize = 1024; // 4 MiB heap
|
||||
|
||||
#[global_allocator]
|
||||
pub static ALLOCATOR: Mutex<LinkedListAllocator> = Mutex::new(LinkedListAllocator::new());
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
use core::sync::atomic::{AtomicPtr, AtomicUsize, Ordering};
|
||||
|
||||
use super::{HHDM_REQUEST, MEMMAP_REQUEST};
|
||||
use super::{HHDM_OFFSET, HHDM_REQUEST, MEMMAP_REQUEST};
|
||||
|
||||
pub const PAGE_SIZE: usize = 4096;
|
||||
|
||||
@@ -36,6 +36,8 @@ impl PhysicalMemoryManager {
|
||||
|
||||
let hhdm_offset = hhdm_req.offset as usize;
|
||||
|
||||
HHDM_OFFSET.set(hhdm_offset);
|
||||
|
||||
let memmap = MEMMAP_REQUEST
|
||||
.get_response()
|
||||
.get_mut()
|
||||
|
||||
Reference in New Issue
Block a user