very incomplete memory rework

This commit is contained in:
Brooke Vibber 2025-12-30 17:42:20 -08:00
commit aadedf4863

View file

@ -4,50 +4,57 @@ use std::vec::Vec;
const PAGE_BITS: usize = 12; const PAGE_BITS: usize = 12;
const PAGE_SIZE: usize = 2 << PAGE_BITS; const PAGE_SIZE: usize = 2 << PAGE_BITS;
const TABLE_BITS: i64 = 9;
const TABLE_SIZE: i64 = 2 << TABLE_BITS;
const SV39_PAGE_BITS: i64 = 9 + 9 + 9;
// Actively used bits in our limited implementation: // Actively used bits in our limited implementation:
const BIT_V: u8 = 1; const BIT_V: i64 = 1;
const BIT_R: u8 = 2; const BIT_R: i64 = 2;
const BIT_W: u8 = 4; const BIT_W: i64 = 4;
const BIT_X: u8 = 8; const BIT_X: i64 = 8;
// (Not used) - user mode accessible // (Not used) - user mode accessible
const BIT_U: u8 = 16; const BIT_U: i64 = 16;
/*
// (Not used) - global mapping // (Not used) - global mapping
const BIT_G: u8 = 32; const BIT_G: i64 = 32;
// (Not presently used) accessed -- set if we've touched this since last time A cleared // (Not presently used) accessed -- set if we've touched this since last time A cleared
const BIT_A: u8 = 64; const BIT_A: i64 = 64;
// (Not presently used)dirty -- written since last time D cleared // (Not presently used) dirty -- written since last time D cleared
// Note each core/hart has its own dirty state for local JIT // Note each core/hart has its own dirty state for local JIT
const BIT_D: u8 = 128; const BIT_D: i64 = 128;
// (Not used or room for it) reserved for supervisor // (Not used) reserved for supervisor
const BITS_RSW_LO: i64 = 256; const BITS_RSW_LO: i64 = 256;
const BITS_RSW_HI: i64 = 512; const BITS_RSW_HI: i64 = 512;
const BITS_RSW: i64 = BITS_RSW_LO | BITS_RSW_HI; const BITS_RSW: i64 = BITS_RSW_LO | BITS_RSW_HI;
*/
#[repr(C)] #[repr(C)]
#[derive(Clone, Copy)] #[derive(Clone, Copy)]
struct PageTableEntry { struct PageTableEntry {
flags: u8 flags: i64
} }
impl PageTableEntry { impl PageTableEntry {
fn new(flags: u8) -> Self { fn new(flags: i64) -> Self {
return Self { return Self {
flags flags
} }
} }
fn as_u8(&self) -> u8 { fn as_i64(&self) -> i64 {
return self.flags; return self.flags;
} }
fn is_leaf(&self) -> bool {
return self.flags & (BIT_V | BIT_R | BIT_W | BIT_X) == BIT_V;
}
fn is_valid(&self) -> bool { fn is_valid(&self) -> bool {
return self.flags & BIT_V == BIT_V; return self.flags & BIT_V == BIT_V;
} }
@ -64,6 +71,14 @@ impl PageTableEntry {
return self.flags & (BIT_V | BIT_R | BIT_X) == (BIT_V | BIT_R | BIT_X); return self.flags & (BIT_V | BIT_R | BIT_X) == (BIT_V | BIT_R | BIT_X);
} }
fn get_physical_page(&self) -> i64 {
return (self.flags << 10) >> 20;
}
fn get_physical(&self) -> i64 {
return self.get_physical_page() << PAGE_BITS;
}
} }
type ExecutorFunc = fn(i64, &mut CoreState, &mut MachineState) -> i64; type ExecutorFunc = fn(i64, &mut CoreState, &mut MachineState) -> i64;
@ -108,19 +123,19 @@ impl MachineState {
return Self::new_with_state(memory, pages); return Self::new_with_state(memory, pages);
} }
fn get_page_table_entry(&self, page: usize) -> PageTableEntry { fn get_page_table_entry(&self, address: i64) -> PageTableEntry {
if page < self.pages.len() { if (address as u64) < (self.memory.len() as u64) {
return self.pages[page]; return PageTableEntry::new(self.ld_physical(address as usize));
} else { } else {
return PageTableEntry::new(0); panic!("xxx");
} }
} }
fn set_page_table_entry(&mut self, page: usize, entry: PageTableEntry) { fn set_page_table_entry(&mut self, address: i64, entry: PageTableEntry) {
if page < self.pages.len() { if (address as u64) < (self.memory.len() as u64) {
self.pages[page] = entry; self.sd_physical(address as usize, entry.as_i64());
} else { } else {
panic!("@fixme: handle attempts to expand address space"); panic!("xxx");
} }
} }
@ -213,6 +228,7 @@ impl MachineState {
struct CoreState { struct CoreState {
// Integer registers // Integer registers
x: [i64; 32], x: [i64; 32],
satp_ppn: i64,
// Do we need pc? we're passing it around as active state // Do we need pc? we're passing it around as active state
@ -257,6 +273,7 @@ impl CoreState {
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0 0, 0, 0, 0, 0, 0, 0, 0
], ],
satp_ppn: 0,
f: [ f: [
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
@ -269,17 +286,53 @@ impl CoreState {
} }
} }
fn lb(&self, machine: &MachineState, address: i64) -> i64 { fn trap(&mut self, machine: &mut MachineState, pc: i64) -> i64 {
let raw_page = (address as u64) >> PAGE_BITS; panic!("@fixme implement traps");
if raw_page < machine.pages.len() as u64 { }
let addr = address as usize;
let page = raw_page as usize; fn get_page_table_entry(&mut self, machine: &mut MachineState, page: i64) -> PageTableEntry {
let entry = machine.get_page_table_entry(page); return machine.get_page_table_entry(page)
if entry.is_readable() { }
return machine.lb_physical(address as usize);
} fn dereference(&mut self, machine: &mut MachineState, address: i64) -> (PageTableEntry, usize) {
let raw = (address as u64) as i64;
let high = ((address as u64) >> 39) as i64;
let vpn2 = (((address as u64) << (64 - 39)) >> (64 - 9)) as i64;
let vpn1 = (((address as u64) << (64 - 39 + 9)) >> (64 - 9 - 9)) as i64;
let vpn0 = (((address as u64) << (64 - 39 + 9 + 9)) >> (64 - 9 - 9)) as i64;
if high != 0 {
panic!("@fixme: handle trap for kernel address space or invalid SV39 address");
} }
panic!("@fixme: trap on read failure on page");
let pte2 = machine.get_page_table_entry(self.satp_ppn + (vpn2 << 3));
if !pte2.is_leaf() {
let pte1 = machine.get_page_table_entry(pte2.get_physical() + (vpn1 << 3));
if !pte1.is_leaf() {
let pte0 = machine.get_page_table_entry(pte1.get_physical() + (vpn0 << 3));
if pte0.is_leaf() {
// 4 KiB pages
let shift = 64 - 12;
return (pte0, (raw << shift >> shift) as usize);
} else {
panic!("@fixme: trap on leaf node when we're out of levels");
}
} else {
// 2 MiB megapages
let shift = 64 - 12 - 9;
return (pte1, (raw << shift >> shift) as usize);
}
} else {
// 1 GiB gigapages
let shift = 64 - 12 - 9 - 9;
return (pte2, (raw << shift >> shift) as usize);
}
panic!("@fixme: handle unexpected non-leaf node");
}
fn lb(&mut self, machine: &mut MachineState, pc: i64, rd: usize, rs1: usize, imm: i32) -> i64 {
//...
panic!("@fixme");
} }
fn lbu(&self, machine: &MachineState, address: i64) -> i64 { fn lbu(&self, machine: &MachineState, address: i64) -> i64 {