more compact page tables, just need 8 bits per 4 KiB
This commit is contained in:
parent
e885d2633e
commit
1e46ee1009
1 changed files with 36 additions and 21 deletions
57
src/main.rs
57
src/main.rs
|
|
@ -1,45 +1,47 @@
|
|||
use std::collections::HashMap;
|
||||
use std::vec::Vec;
|
||||
|
||||
const PAGE_SIZE: usize = 4096;
|
||||
const PAGE_BITS: usize = 12;
|
||||
const PAGE_SIZE: usize = 2 << PAGE_BITS;
|
||||
|
||||
// Actively used bits in our limited implementation:
|
||||
const BIT_V: i64 = 1;
|
||||
const BIT_R: i64 = 2;
|
||||
const BIT_W: i64 = 4;
|
||||
const BIT_X: i64 = 8;
|
||||
const BIT_V: u8 = 1;
|
||||
const BIT_R: u8 = 2;
|
||||
const BIT_W: u8 = 4;
|
||||
const BIT_X: u8 = 8;
|
||||
|
||||
// (Not used) - user mode accessible
|
||||
const BIT_U: i64 = 16;
|
||||
const BIT_U: u8 = 16;
|
||||
|
||||
// (Not used) - global mapping
|
||||
const BIT_G: i64 = 32;
|
||||
const BIT_G: u8 = 32;
|
||||
|
||||
// (Not presently used) accessed -- set if we've touched this since last time A cleared
|
||||
const BIT_A: i64 = 64;
|
||||
const BIT_A: u8 = 64;
|
||||
|
||||
// (Will be used for JIT on writes to executable pags) dirty -- written since last time D cleared
|
||||
const BIT_D: i64 = 128;
|
||||
const BIT_D: u8 = 128;
|
||||
|
||||
// (Not used)
|
||||
/*
|
||||
// (Not used) reserved for supervisor
|
||||
const BITS_RSW_LO: i64 = 256;
|
||||
const BITS_RSW_HI: i64 = 512;
|
||||
const BITS_RSW: i64 = BITS_RSW_LO | BITS_RSW_HI;
|
||||
|
||||
*/
|
||||
|
||||
type ExecutorFunc = fn(i64, &mut CoreState, &mut MachineState) -> i64;
|
||||
|
||||
#[repr(C)]
|
||||
struct MachineState {
|
||||
memory: Vec<u8>,
|
||||
pages: Vec<u32>,
|
||||
pages: Vec<u8>,
|
||||
}
|
||||
|
||||
/**
|
||||
* Note that physical memory accessors can traverse page boundaries;
|
||||
* we lay out linear memory from 0 to +4 gigabytes and will allocate
|
||||
* as many page table entries as are needed to cover RAM. These eat
|
||||
* up an extra 4 bytes per 4 kilobytes of address space used, initially
|
||||
* up an extra 1 byte per 4 kilobytes of address space used, initially
|
||||
* allocating enough for all physical memory initially allocated.
|
||||
*
|
||||
* This will be relatively space-inefficient for sparse address spaces
|
||||
|
|
@ -50,11 +52,11 @@ struct MachineState {
|
|||
* page tables, even if running on different threads.
|
||||
*/
|
||||
impl MachineState {
|
||||
fn restore(memory: Vec<u8>, pages: Vec<u32>) -> Self {
|
||||
if ((memory.len() >> 12) << 12) != memory.len() {
|
||||
fn restore(memory: Vec<u8>, pages: Vec<u8>) -> Self {
|
||||
if ((memory.len() >> PAGE_BITS) << PAGE_BITS) != memory.len() {
|
||||
panic!("memory size must be a multiple of 4096 bytes");
|
||||
}
|
||||
if ((memory.len() >> 12)) != pages.len() {
|
||||
if ((memory.len() >> PAGE_BITS)) != pages.len() {
|
||||
panic!("page data is wrong length for memory size");
|
||||
}
|
||||
return Self {
|
||||
|
|
@ -65,16 +67,26 @@ impl MachineState {
|
|||
|
||||
fn new(memory_size: usize) -> Self {
|
||||
let memory = vec![0u8; memory_size];
|
||||
let pages= vec![0u32; memory_size >> 12];
|
||||
let pages = vec![0u8; memory_size >> PAGE_BITS];
|
||||
return Self::restore(memory, pages);
|
||||
}
|
||||
|
||||
fn get_page_info(&mut self, address: usize) -> u32 {
|
||||
return self.pages[address >> 12];
|
||||
fn get_page_table_entry(&mut self, address: usize) -> u32 {
|
||||
let page = address >> PAGE_BITS;
|
||||
if page < self.pages.len() {
|
||||
return self.pages[page] as u32;
|
||||
} else {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
fn set_page_info(&mut self, address: usize, value: u32) {
|
||||
self.pages[address >> 12] = value;
|
||||
fn set_page_table_entry(&mut self, address: usize, bits: u32) {
|
||||
let page = address >> PAGE_BITS;
|
||||
if page < self.pages.len() {
|
||||
self.pages[address >> 12] = bits as u8;
|
||||
} else {
|
||||
panic!("@fixme: handle attempts to expand address space");
|
||||
}
|
||||
}
|
||||
|
||||
fn lb_physical(&mut self, address: usize) -> i64 {
|
||||
|
|
@ -157,6 +169,9 @@ impl MachineState {
|
|||
self.memory[address + 7] = (value >> 56) as u8;
|
||||
}
|
||||
|
||||
fn store_buffer_physical(&mut self, address: usize, bytes: &[u8]) {
|
||||
self.memory[address..address + bytes.len()].copy_from_slice(bytes);
|
||||
}
|
||||
}
|
||||
|
||||
#[repr(C)]
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue