Fix many issues relating to paging, split page fault into 2 vectors

This commit is contained in:
Ry 2022-09-15 17:17:29 -07:00
parent 3df3a98f3c
commit 8be5c65c73
4 changed files with 358 additions and 260 deletions

File diff suppressed because it is too large Load Diff

View File

@ -7,6 +7,7 @@ pub mod cpu;
pub mod keyboard; pub mod keyboard;
pub mod mouse; pub mod mouse;
pub mod disk; pub mod disk;
pub mod setjmp;
use audio::AudioChannel; use audio::AudioChannel;
use bus::Bus; use bus::Bus;
@ -158,6 +159,9 @@ fn main() {
builder.spawn({ builder.spawn({
move || { move || {
loop { loop {
if let Some(exception) = unsafe { cpu.setjmp() } {
cpu.interrupt(Interrupt::Exception(exception));
}
while !cpu.halted { while !cpu.halted {
if let Ok(exception) = exception_receiver.try_recv() { if let Ok(exception) = exception_receiver.try_recv() {
cpu.interrupt(Interrupt::Exception(exception)); cpu.interrupt(Interrupt::Exception(exception));

View File

@ -2,6 +2,7 @@
use crate::error; use crate::error;
use crate::cpu::Exception; use crate::cpu::Exception;
use crate::setjmp::{JumpEnv, longjmp};
use std::cell::UnsafeCell; use std::cell::UnsafeCell;
use std::collections::HashMap; use std::collections::HashMap;
@ -111,26 +112,39 @@ impl Memory {
let old_state = *self.mmu_enabled(); let old_state = *self.mmu_enabled();
*self.mmu_enabled() = false; *self.mmu_enabled() = false;
let directory_address = *self.paging_directory_address(); let directory_address = *self.paging_directory_address();
let directory = self.read_32(directory_address + (page_directory_index * 4)); let directory = self.read_opt_32(directory_address + (page_directory_index * 4));
let dir_present = directory & 0b1 != 0; match directory {
let dir_address = directory & 0xFFFFF000; Some(directory) => {
if dir_present { let dir_present = directory & 0b1 != 0;
let table = self.read_32(dir_address + (page_table_index * 4)); let dir_address = directory & 0xFFFFF000;
let table_present = table & 0b01 != 0; if dir_present {
let table_rw = table & 0b10 != 0; let table = self.read_opt_32(dir_address + (page_table_index * 4));
let table_address = table & 0xFFFFF000; match table {
Some(table) => {
let table_present = table & 0b01 != 0;
let table_rw = table & 0b10 != 0;
let table_address = table & 0xFFFFF000;
if table_present { if table_present {
let tlb_entry = MemoryPage { let tlb_entry = MemoryPage {
physical_address: table_address, physical_address: table_address,
present: table_present, present: table_present,
rw: table_rw, rw: table_rw,
}; };
self.tlb().entry((page_directory_index << 22) | (page_table_index << 12)).or_insert(tlb_entry); self.tlb().entry((page_directory_index << 22) | (page_table_index << 12)).or_insert(tlb_entry);
}
},
None => {}
}
}
*self.mmu_enabled() = old_state;
dir_present
},
None => {
*self.mmu_enabled() = old_state;
false
} }
} }
*self.mmu_enabled() = old_state;
dir_present
} }
pub fn virtual_to_physical(&mut self, virtual_address: u32) -> Option<(u32, bool)> { pub fn virtual_to_physical(&mut self, virtual_address: u32) -> Option<(u32, bool)> {
@ -170,47 +184,53 @@ impl Memory {
physical_address physical_address
} }
pub fn read_8(&mut self, mut address: u32) -> u8 { pub fn read_opt_8(&mut self, mut address: u32) -> Option<u8> {
if *self.mmu_enabled() { if *self.mmu_enabled() {
(address, _) = self.virtual_to_physical(address as u32).unwrap_or_else(|| { let address_maybe = self.virtual_to_physical(address as u32);
self.exception_sender().send(Exception::PageFault(address)).unwrap(); match address_maybe {
(0, false) Some(addr) => address = addr.0,
}); None => return None,
}
} }
let address = address as usize; let address = address as usize;
let result = if address >= MEMORY_ROM_START && address < MEMORY_ROM_START + MEMORY_ROM_SIZE { if address >= MEMORY_ROM_START && address < MEMORY_ROM_START + MEMORY_ROM_SIZE {
self.rom().get(address - MEMORY_ROM_START) self.rom().get(address - MEMORY_ROM_START).map(|value| *value)
} else { } else {
self.ram().get(address - MEMORY_RAM_START) self.ram().get(address - MEMORY_RAM_START).map(|value| *value)
};
match result {
Some(value) => {
*value
}
None => {
error(&format!("attempting to read from unmapped physical memory address: {:#010X}", address));
}
} }
} }
pub fn read_16(&mut self, address: u32) -> u16 { pub fn read_opt_16(&mut self, address: u32) -> Option<u16> {
(self.read_8(address) as u16) | Some(
(self.read_8(address + 1) as u16) << 8 (self.read_opt_8(address)? as u16) |
(self.read_opt_8(address + 1)? as u16) << 8
)
} }
pub fn read_32(&mut self, address: u32) -> u32 { pub fn read_opt_32(&mut self, address: u32) -> Option<u32> {
(self.read_8(address) as u32) | Some(
(self.read_8(address + 1) as u32) << 8 | (self.read_opt_8(address)? as u32) |
(self.read_8(address + 2) as u32) << 16 | (self.read_opt_8(address + 1)? as u32) << 8 |
(self.read_8(address + 3) as u32) << 24 (self.read_opt_8(address + 2)? as u32) << 16 |
(self.read_opt_8(address + 3)? as u32) << 24
)
}
pub fn read_8(&mut self, onfault: &JumpEnv<Exception>, address: u32) -> u8 {
self.read_opt_8(address).unwrap_or_else(|| unsafe { longjmp(onfault, Exception::PageFaultRead(address)) })
}
pub fn read_16(&mut self, onfault: &JumpEnv<Exception>, address: u32) -> u16 {
self.read_opt_16(address).unwrap_or_else(|| unsafe { longjmp(onfault, Exception::PageFaultRead(address)) })
}
pub fn read_32(&mut self, onfault: &JumpEnv<Exception>, address: u32) -> u32 {
self.read_opt_32(address).unwrap_or_else(|| unsafe { longjmp(onfault, Exception::PageFaultRead(address)) })
} }
pub fn write_8(&mut self, mut address: u32, byte: u8) { pub fn write_8(&mut self, mut address: u32, byte: u8) {
let mut writable = true; let mut writable = true;
if *self.mmu_enabled() { if *self.mmu_enabled() {
(address, writable) = self.virtual_to_physical(address as u32).unwrap_or_else(|| { (address, writable) = self.virtual_to_physical(address as u32).unwrap_or_else(|| {
self.exception_sender().send(Exception::PageFault(address)).unwrap(); self.exception_sender().send(Exception::PageFaultWrite(address)).unwrap();
(0, false) (0, false)
}); });
} }
@ -231,7 +251,7 @@ impl Memory {
} }
} }
} else { } else {
self.exception_sender().send(Exception::PageFault(address)).unwrap(); self.exception_sender().send(Exception::PageFaultWrite(address)).unwrap();
} }
} }
pub fn write_16(&mut self, address: u32, half: u16) { pub fn write_16(&mut self, address: u32, half: u16) {

56
src/setjmp.rs Normal file
View File

@ -0,0 +1,56 @@
// setjmp.rs
use std::os::raw::*;
mod internal {
use std::os::raw::*;
use std::cell::*;
extern "C" {
pub fn setjmp(env: *mut c_void) -> c_int;
pub fn longjmp(env: *mut c_void, status: c_int) -> !;
}
pub type JumpBuf = [u8; 512];
pub struct JumpEnv<T>{
buffer: Box<RefCell<JumpBuf>>,
value: Cell<Option<T>>,
}
impl<T> JumpEnv<T> {
pub fn new() -> Self {
Self {
buffer: Box::new(RefCell::new([0u8; 512])),
value: Cell::default(),
}
}
pub fn buffer(&self) -> RefMut<JumpBuf> {
self.buffer.borrow_mut()
}
pub fn value_set(&self, value: T) {
self.value.set(Some(value));
}
pub fn value_take(&self) -> Option<T> {
self.value.take()
}
}
}
pub use internal::JumpEnv;
pub unsafe fn setjmp<T>(env: &JumpEnv<T>) -> Option<T> {
if internal::setjmp(env.buffer().as_mut_ptr() as *mut c_void) != 0 {
env.value_take()
} else {
None
}
}
pub unsafe fn longjmp<T>(env: &JumpEnv<T>, value: T) -> ! {
env.value_set(value);
internal::longjmp(env.buffer().as_mut_ptr() as *mut c_void, 1)
}