diff --git a/kernel/src/memory/mod.rs b/kernel/src/memory/mod.rs index 89b7aeb..dba8c4d 100644 --- a/kernel/src/memory/mod.rs +++ b/kernel/src/memory/mod.rs @@ -22,24 +22,6 @@ use x86_64::{ PhysAddr, }; -/// Creates an example mapping for the given page to frame `0xb8000`. -pub fn create_example_mapping( - page: Page, - mapper: &mut OffsetPageTable, - frame_allocator: &mut impl FrameAllocator, -) { - use x86_64::structures::paging::PageTableFlags as Flags; - - let frame = PhysFrame::containing_address(PhysAddr::new(0xb8000)); - let flags = Flags::PRESENT | Flags::WRITABLE; - - let map_to_result = unsafe { - // FIXME: this is not safe, we do it only for testing - mapper.map_to(page, frame, flags, frame_allocator) - }; - map_to_result.expect("map_to failed").flush(); -} - use bootloader::bootinfo::{MemoryMap, MemoryRegionType}; pub struct BootInfoFrameAllocator { @@ -57,7 +39,7 @@ impl BootInfoFrameAllocator { fn usable_frames(&self) -> impl Iterator { let regions = self.memory_map.iter(); - let usable_regions = regions.filter(|r| r.region_type == MemoryRegionType::Usable); + let usable_regions = regions.filter(|r| r.region_type == MemoryRegionType::Usable); //Syntax doubt... argument inside regions.filter() let addr_ranges = usable_regions.map(|r| r.range.start_addr()..r.range.end_addr()); let frame_addresses = addr_ranges.flat_map(|r| r.step_by(4096)); frame_addresses.map(|addr| PhysFrame::containing_address(PhysAddr::new(addr))) diff --git a/kernel/src/scheduler/mod.rs b/kernel/src/scheduler/mod.rs index e69de29..9866dc8 100644 --- a/kernel/src/scheduler/mod.rs +++ b/kernel/src/scheduler/mod.rs @@ -0,0 +1,282 @@ +// Adding boilerplate scheduler functions + +use crate::gdt; +use crate::mem; +use crate::serial_println; +use alloc::boxed::Box; +use alloc::vec::Vec; +use core::fmt::Display; +use lazy_static::lazy_static; +use spin::Mutex; + +#[derive(Debug, Clone)] +pub struct Context { + pub rbp: u64, + pub rax: u64, + pub rbx: u64, + pub rcx: u64, + pub rdx: u64, + pub rsi: u64, + pub rdi: u64, + pub r8: u64, + pub r9: u64, + pub r10: u64, + pub r11: u64, + pub r12: u64, + pub r13: u64, + pub r14: u64, + pub r15: u64, + pub rip: u64, + pub cs: u64, + pub rflags: u64, + pub rsp: u64, + pub ss: u64, +} + +#[inline(never)] +pub unsafe fn jmp_to_usermode(code: mem::VirtAddr, stack_end: mem::VirtAddr) { + let (cs_idx, ds_idx) = gdt::set_usermode_segs(); + x86_64::instructions::tlb::flush_all(); // flush the TLB after address-space switch + asm!("\ + push rax // stack segment + push rsi // rsp + push 0x200 // rflags (only interrupt bit set) + push rdx // code segment + push rdi // ret to virtual addr + iretq", + in("rdi") code.addr(), in("rsi") stack_end.addr(), in("dx") cs_idx, in("ax") ds_idx); +} + +pub struct Scheduler { + tasks: Mutex>, + cur_task: Mutex>, +} + +impl Scheduler{ + pub fn new() -> Scheduler { + Scheduler { + tasks: Mutex::new(Vec::new()), + cur_task: Mutex::new(None), // so that next task is 0 + } + } + + pub unsafe fn schedule(&self, fn_addr: mem::VirtAddr){ + + } + + pub unsafe fn save_current_context(&self, ctxp: *const Context) { + + } + + pub unsafe fn run_next(&self) { + //Call to jump to usermode lies here + } +} + +lazy_static! { + pub static ref SCHEDULER: Scheduler = Scheduler::new(); +} + +/* + +//This is the scheduler code I referred to create the above template. Only wrote stuff which I could understand and explain + +use crate::gdt; +use crate::mem; +use crate::serial_println; +use alloc::boxed::Box; +use alloc::vec::Vec; +use core::fmt::Display; +use lazy_static::lazy_static; +use spin::Mutex; + +#[derive(Debug, Clone)] +pub struct Context { + pub rbp: u64, + pub rax: u64, + pub rbx: u64, + pub rcx: u64, + pub rdx: u64, + pub rsi: u64, + pub rdi: u64, + pub r8: u64, + pub r9: u64, + pub r10: u64, + pub r11: u64, + pub r12: u64, + pub r13: u64, + pub r14: u64, + pub r15: u64, + pub rip: u64, + pub cs: u64, + pub rflags: u64, + pub rsp: u64, + pub ss: u64, +} + +#[inline(always)] +pub unsafe fn get_context() -> *const Context { + let ctxp: *const Context; + asm!("push r15; push r14; push r13; push r12; push r11; push r10; push r9;\ + push r8; push rdi; push rsi; push rdx; push rcx; push rbx; push rax; push rbp;\ + mov {}, rsp; sub rsp, 0x400;", + out(reg) ctxp); + ctxp +} + +#[inline(always)] +pub unsafe fn restore_context(ctxr: &Context) { + asm!("mov rsp, {};\ + pop rbp; pop rax; pop rbx; pop rcx; pop rdx; pop rsi; pop rdi; pop r8; pop r9;\ + pop r10; pop r11; pop r12; pop r13; pop r14; pop r15; iretq;", + in(reg) ctxr); +} + +#[inline(never)] +pub unsafe fn jmp_to_usermode(code: mem::VirtAddr, stack_end: mem::VirtAddr) { + let (cs_idx, ds_idx) = gdt::set_usermode_segs(); + x86_64::instructions::tlb::flush_all(); // flush the TLB after address-space switch + asm!("\ + push rax // stack segment + push rsi // rsp + push 0x200 // rflags (only interrupt bit set) + push rdx // code segment + push rdi // ret to virtual addr + iretq", + in("rdi") code.addr(), in("rsi") stack_end.addr(), in("dx") cs_idx, in("ax") ds_idx); +} + +#[derive(Clone, Debug)] +enum TaskState { + // a task's state can either be + SavedContext(Context), // a saved context + StartingInfo(mem::VirtAddr, mem::VirtAddr), // or a starting instruction and stack pointer +} + +struct Task { + state: TaskState, // the current state of the task + task_pt: Box, // the page table for this task + _stack_vec: Vec, // a vector to keep the task's stack space +} + +impl Task { + pub fn new( + exec_base: mem::VirtAddr, + stack_end: mem::VirtAddr, + _stack_vec: Vec, + task_pt: Box, + ) -> Task { + Task { + state: TaskState::StartingInfo(exec_base, stack_end), + _stack_vec, + task_pt, + } + } +} + +impl Display for Task { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + unsafe { + write!( + f, + "PT: {}, Context: {:x?}", + self.task_pt.phys_addr(), + self.state + ) + } + } +} + +pub struct Scheduler { + tasks: Mutex>, + cur_task: Mutex>, +} + +impl Scheduler { + pub fn new() -> Scheduler { + Scheduler { + tasks: Mutex::new(Vec::new()), + cur_task: Mutex::new(None), // so that next task is 0 + } + } + + pub unsafe fn schedule(&self, fn_addr: mem::VirtAddr) { + let userspace_fn_phys = fn_addr.to_phys().unwrap().0; // virtual address to physical + let page_phys_start = (userspace_fn_phys.addr() >> 12) << 12; // zero out page offset to get which page we should map + let fn_page_offset = userspace_fn_phys.addr() - page_phys_start; // offset of function from page start + let userspace_fn_virt_base = 0x400000; // target virtual address of page + let userspace_fn_virt = userspace_fn_virt_base + fn_page_offset; // target virtual address of function + serial_println!( + "Mapping {:x} to {:x}", + page_phys_start, + userspace_fn_virt_base + ); + let mut task_pt = mem::PageTable::new(); // copy over the kernel's page tables + task_pt.map_virt_to_phys( + mem::VirtAddr::new(userspace_fn_virt_base), + mem::PhysAddr::new(page_phys_start), + mem::BIT_PRESENT | mem::BIT_USER, + ); // map the program's code + task_pt.map_virt_to_phys( + mem::VirtAddr::new(userspace_fn_virt_base).offset(0x1000), + mem::PhysAddr::new(page_phys_start).offset(0x1000), + mem::BIT_PRESENT | mem::BIT_USER, + ); // also map another page to be sure we got the entire function in + let mut stack_space: Vec = Vec::with_capacity(0x1000); // allocate some memory to use for the stack + let stack_space_phys = mem::VirtAddr::new(stack_space.as_mut_ptr() as *const u8 as u64) + .to_phys() + .unwrap() + .0; + // take physical address of stack + task_pt.map_virt_to_phys( + mem::VirtAddr::new(0x800000), + stack_space_phys, + mem::BIT_PRESENT | mem::BIT_WRITABLE | mem::BIT_USER, + ); // map the stack memory to 0x800000 + let task = Task::new( + mem::VirtAddr::new(userspace_fn_virt), + mem::VirtAddr::new(0x801000), + stack_space, + task_pt, + ); // create task struct + self.tasks.lock().push(task); // push task struct to list of tasks + } + + pub unsafe fn save_current_context(&self, ctxp: *const Context) { + self.cur_task.lock().map(|cur_task_idx| { + // if there is a current task + let ctx = (*ctxp).clone(); + self.tasks.lock()[cur_task_idx].state = TaskState::SavedContext(ctx); + // replace its context with the given one + }); + } + + pub unsafe fn run_next(&self) { + let tasks_len = self.tasks.lock().len(); // how many tasks are available + if tasks_len > 0 { + let task_state = { + let mut cur_task_opt = self.cur_task.lock(); // lock the current task index + let cur_task = cur_task_opt.get_or_insert(0); // default to 0 + let next_task = (*cur_task + 1) % tasks_len; // next task index + *cur_task = next_task; + let task = &self.tasks.lock()[next_task]; // get the next task + serial_println!("Switching to task #{} ({})", next_task, task); + task.task_pt.enable(); // enable task's page table + task.state.clone() // clone task state information + }; // release held locks + match task_state { + TaskState::SavedContext(ctx) => { + restore_context(&ctx) // either restore the saved context + } + TaskState::StartingInfo(exec_base, stack_end) => { + jmp_to_usermode(exec_base, stack_end) // or initialize the task with the given instruction, stack pointers + } + } + } + } +} + +lazy_static! { + pub static ref SCHEDULER: Scheduler = Scheduler::new(); +} +*/ \ No newline at end of file diff --git a/kernel/src/userspace/mod.rs b/kernel/src/userspace/mod.rs new file mode 100644 index 0000000..33ef508 --- /dev/null +++ b/kernel/src/userspace/mod.rs @@ -0,0 +1,63 @@ +pub mod userspace; + +#[naked] //As we will be adding our own unsafe functions here executing our own assembly language we don't need Prologue and Epilogue for the function + +// To get into the kernel mode for performing the syscall execution, one must set the registers to the desired syscall number and its parameters and perform a syscall instruction + +// Breaking the process in three step (Returning execution to the user program): +// 1) Enable the page table that has this program's memory mapped to the correct virtual addresses +// 2) Setting the cs and ds registers to proper indexes in the GDT to indicate that we are currently in Ring3 or usermode and use the respective segment +// 3) Setting the registers for sysretq and iretq operations + +//Obviously you can remove the jmp statement from the program from infinitely looping + +pub unsafe fn userspace_prog_1() { + asm!("\ + start: + mov rax, 0xCA11 + mov rdi, 10 + mov rsi, 20 + mov rdx, 30 + mov r10, 40 + syscall + jmp start + ":::: "volatile", "intel"); +} + +//The syscall should automatically invoke handlers::process_syscall with the following registers set as rax - syscall address, and rdi rsi rdx r10 are just 4 registers for 4 arguments passed to the syscall. +//You can increase this by increasing the number of arguments the syscall can handle and setting the respective registers with the appropriate values in the userspace program. :) + + +//Random user space program which does something (has miniloops) and calls syscalls +#[naked] +pub unsafe fn userspace_prog_1() { + asm!("\ + mov rbx, 0xf0000000 + prog1start: + push 0x595ca11a // keep the syscall number in the stack + mov rbp, 0x0 // distinct values for each register + mov rax, 0x1 + mov rcx, 0x3 + mov rdx, 0x4 + mov rdi, 0x6 + mov r8, 0x7 + mov r9, 0x8 + mov r10, 0x9 + mov r11, 0x10 + mov r12, 0x11 + mov r13, 0x12 + mov r14, 0x13 + mov r15, 0x14 + xor rax, rax + prog1loop: + inc rax + cmp rax, 0x4000000 + jnz prog1loop // loop for some milliseconds + pop rax // pop syscall number from the stack + inc rbx // increase loop counter + mov rdi, rsp // first syscall arg is rsp + mov rsi, rbx // second syscall arg is the loop counter + syscall // perform the syscall! + jmp prog1start // do it all over + "); +} \ No newline at end of file