almost got out-of-memory test working

Signed-off-by: Sean Cross <sean@xobs.io>
This commit is contained in:
Sean Cross 2024-01-03 22:19:21 +08:00
parent 4b2e4b0548
commit abef5b7db3
10 changed files with 408 additions and 509 deletions

16
Cargo.lock generated
View File

@ -13,6 +13,14 @@ dependencies = [
"scroll", "scroll",
] ]
[[package]]
name = "jurubas"
version = "0.1.0"
dependencies = [
"goblin",
"riscv-cpu",
]
[[package]] [[package]]
name = "log" name = "log"
version = "0.4.20" version = "0.4.20"
@ -47,14 +55,6 @@ dependencies = [
name = "riscv-cpu" name = "riscv-cpu"
version = "0.1.0" version = "0.1.0"
[[package]]
name = "rouns"
version = "0.1.0"
dependencies = [
"goblin",
"riscv-cpu",
]
[[package]] [[package]]
name = "scroll" name = "scroll"
version = "0.11.0" version = "0.11.0"

View File

@ -1,12 +1,10 @@
workspace = { members = ["crates/riscv-cpu"] } workspace = { members = ["crates/riscv-cpu"] }
[package] [package]
name = "rouns" name = "jurubas"
version = "0.1.0" version = "0.1.0"
edition = "2021" edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies] [dependencies]
riscv-cpu = { path = "crates/riscv-cpu" } riscv-cpu = { path = "crates/riscv-cpu" }
goblin = { version = "0.7.1", features = [ "std", "elf32", "alloc" ]} goblin = { version = "0.7.1", features = [ "std", "elf32", "alloc" ]}

View File

@ -1,4 +1,4 @@
use std::sync::{mpsc::Receiver, Arc, RwLock}; use std::sync::{mpsc::Receiver, Arc, Mutex};
pub use super::mmu::Memory; pub use super::mmu::Memory;
use super::mmu::{AddressingMode, Mmu}; use super::mmu::{AddressingMode, Mmu};
@ -54,10 +54,12 @@ pub const MIP_SEIP: u64 = 0x200;
const MIP_STIP: u64 = 0x020; const MIP_STIP: u64 = 0x020;
const MIP_SSIP: u64 = 0x002; const MIP_SSIP: u64 = 0x002;
pub type ResponseData = ([i64; 8], Option<(Vec<u8>, u64)>);
pub enum TickResult { pub enum TickResult {
Ok, Ok,
ExitThread(u64), ExitThread(u64),
PauseEmulation(Receiver<([i64; 8], Option<(Vec<u8>, u64)>)>), PauseEmulation(Receiver<ResponseData>),
CpuTrap(Trap), CpuTrap(Trap),
} }
@ -74,9 +76,8 @@ pub struct Cpu {
pc: u64, pc: u64,
csr: [u64; CSR_CAPACITY], csr: [u64; CSR_CAPACITY],
mmu: Mmu, mmu: Mmu,
memory: Arc<RwLock<dyn Memory + Send + Sync>>, memory: Arc<Mutex<dyn Memory + Send + Sync>>,
reservation: u64, // @TODO: Should support multiple address reservations reservation: Option<u64>, // @TODO: Should support multiple address reservations
is_reservation_set: bool,
_dump_flag: bool, _dump_flag: bool,
// decode_cache: DecodeCache, // decode_cache: DecodeCache,
unsigned_data_mask: u64, unsigned_data_mask: u64,
@ -127,7 +128,7 @@ pub enum TrapType {
UserExternalInterrupt, UserExternalInterrupt,
SupervisorExternalInterrupt, SupervisorExternalInterrupt,
MachineExternalInterrupt, MachineExternalInterrupt,
PauseEmulation(std::sync::mpsc::Receiver<([i64; 8], Option<(Vec<u8>, u64)>)>), PauseEmulation(Receiver<ResponseData>),
} }
fn _get_privilege_mode_name(mode: &PrivilegeMode) -> &'static str { fn _get_privilege_mode_name(mode: &PrivilegeMode) -> &'static str {
@ -225,11 +226,11 @@ pub struct CpuBuilder {
xlen: Xlen, xlen: Xlen,
pc: u64, pc: u64,
sp: u64, sp: u64,
memory: Arc<RwLock<dyn Memory + Send + Sync>>, memory: Arc<Mutex<dyn Memory + Send + Sync>>,
} }
impl CpuBuilder { impl CpuBuilder {
pub fn new(memory: Arc<RwLock<dyn Memory + Send + Sync>>) -> Self { pub fn new(memory: Arc<Mutex<dyn Memory + Send + Sync>>) -> Self {
CpuBuilder { CpuBuilder {
xlen: Xlen::Bit64, xlen: Xlen::Bit64,
memory, memory,
@ -266,7 +267,7 @@ impl Cpu {
/// ///
/// # Arguments /// # Arguments
/// * `Terminal` /// * `Terminal`
pub fn new(memory: Arc<RwLock<dyn Memory + Send + Sync>>) -> Self { pub fn new(memory: Arc<Mutex<dyn Memory + Send + Sync>>) -> Self {
Cpu { Cpu {
clock: 0, clock: 0,
xlen: Xlen::Bit64, xlen: Xlen::Bit64,
@ -277,8 +278,7 @@ impl Cpu {
pc: 0, pc: 0,
csr: [0; CSR_CAPACITY], csr: [0; CSR_CAPACITY],
mmu: Mmu::new(Xlen::Bit64, memory.clone()), mmu: Mmu::new(Xlen::Bit64, memory.clone()),
reservation: 0, reservation: None,
is_reservation_set: false,
_dump_flag: false, _dump_flag: false,
// decode_cache: DecodeCache::new(), // decode_cache: DecodeCache::new(),
unsigned_data_mask: 0xffffffffffffffff, unsigned_data_mask: 0xffffffffffffffff,
@ -563,11 +563,6 @@ impl Cpu {
} }
} }
fn handle_exception(&mut self, exception: Trap, instruction_address: u64) {
println!("!!! Exception Trap !!!: {:x?}", exception);
self.handle_trap(exception, instruction_address, false);
}
fn handle_trap(&mut self, trap: Trap, instruction_address: u64, is_interrupt: bool) -> bool { fn handle_trap(&mut self, trap: Trap, instruction_address: u64, is_interrupt: bool) -> bool {
let current_privilege_encoding = get_privilege_encoding(&self.privilege_mode) as u64; let current_privilege_encoding = get_privilege_encoding(&self.privilege_mode) as u64;
let cause = get_trap_cause(&trap, &self.xlen); let cause = get_trap_cause(&trap, &self.xlen);
@ -2448,7 +2443,7 @@ const INSTRUCTIONS: [Instruction; INSTRUCTION_NUM] = [
for (src, dest) in cpu.x[10..].iter().zip(args.iter_mut()) { for (src, dest) in cpu.x[10..].iter().zip(args.iter_mut()) {
*dest = *src; *dest = *src;
} }
match cpu.memory.write().unwrap().syscall(args) { match cpu.memory.lock().unwrap().syscall(args) {
super::mmu::SyscallResult::Ok(result) => { super::mmu::SyscallResult::Ok(result) => {
for (src, dest) in result.iter().zip(cpu.x[10..].iter_mut()) { for (src, dest) in result.iter().zip(cpu.x[10..].iter_mut()) {
*dest = *src; *dest = *src;
@ -2938,14 +2933,11 @@ const INSTRUCTIONS: [Instruction; INSTRUCTION_NUM] = [
operation: |cpu, word, _address| { operation: |cpu, word, _address| {
let f = parse_format_r(word); let f = parse_format_r(word);
// @TODO: Implement properly // @TODO: Implement properly
cpu.x[f.rd] = match cpu.mmu.load_doubleword(cpu.x[f.rs1] as u64) { let address = cpu.x[f.rs1] as u64;
Ok(data) => { cpu.x[f.rd] = cpu.mmu.load_doubleword(address)? as i64;
cpu.is_reservation_set = true; if cpu.mmu.reserve(address) {
cpu.reservation = cpu.x[f.rs1] as u64; // Is virtual address ok? cpu.reservation = Some(address);
data as i64 }
}
Err(e) => return Err(e),
};
Ok(()) Ok(())
}, },
disassemble: dump_format_r, disassemble: dump_format_r,
@ -2957,14 +2949,11 @@ const INSTRUCTIONS: [Instruction; INSTRUCTION_NUM] = [
operation: |cpu, word, _address| { operation: |cpu, word, _address| {
let f = parse_format_r(word); let f = parse_format_r(word);
// @TODO: Implement properly // @TODO: Implement properly
cpu.x[f.rd] = match cpu.mmu.load_word(cpu.x[f.rs1] as u64) { let address = cpu.x[f.rs1] as u64;
Ok(data) => { cpu.x[f.rd] = cpu.mmu.load_word(address)? as i64;
cpu.is_reservation_set = true; if cpu.mmu.reserve(address) {
cpu.reservation = cpu.x[f.rs1] as u64; // Is virtual address ok? cpu.reservation = Some(address);
data as i32 as i64 }
}
Err(e) => return Err(e),
};
Ok(()) Ok(())
}, },
disassemble: dump_format_r, disassemble: dump_format_r,
@ -3223,19 +3212,14 @@ const INSTRUCTIONS: [Instruction; INSTRUCTION_NUM] = [
operation: |cpu, word, _address| { operation: |cpu, word, _address| {
let f = parse_format_r(word); let f = parse_format_r(word);
// @TODO: Implement properly // @TODO: Implement properly
cpu.x[f.rd] = match cpu.is_reservation_set && cpu.reservation == (cpu.x[f.rs1] as u64) { let address = cpu.x[f.rs1] as u64;
true => match cpu if Some(address) == cpu.reservation.take() {
.mmu cpu.mmu.store_doubleword(address, cpu.x[f.rs2] as u64)?;
.store_doubleword(cpu.x[f.rs1] as u64, cpu.x[f.rs2] as u64) cpu.mmu.clear_reservation(address);
{ cpu.x[f.rd] = 0;
Ok(()) => { return Ok(());
cpu.is_reservation_set = false; }
0 cpu.x[f.rd] = 1;
}
Err(e) => return Err(e),
},
false => 1,
};
Ok(()) Ok(())
}, },
disassemble: dump_format_r, disassemble: dump_format_r,
@ -3247,16 +3231,14 @@ const INSTRUCTIONS: [Instruction; INSTRUCTION_NUM] = [
operation: |cpu, word, _address| { operation: |cpu, word, _address| {
let f = parse_format_r(word); let f = parse_format_r(word);
// @TODO: Implement properly // @TODO: Implement properly
cpu.x[f.rd] = match cpu.is_reservation_set && cpu.reservation == (cpu.x[f.rs1] as u64) { let address = cpu.x[f.rs1] as u64;
true => match cpu.mmu.store_word(cpu.x[f.rs1] as u64, cpu.x[f.rs2] as u32) { if Some(address) == cpu.reservation.take() {
Ok(()) => { cpu.mmu.clear_reservation(address);
cpu.is_reservation_set = false; cpu.mmu.store_word(address, cpu.x[f.rs2] as u32)?;
0 cpu.x[f.rd] = 0;
} return Ok(());
Err(e) => return Err(e), }
}, cpu.x[f.rd] = 1;
false => 1,
};
Ok(()) Ok(())
}, },
disassemble: dump_format_r, disassemble: dump_format_r,
@ -4144,77 +4126,3 @@ mod test_cpu {
assert_eq!(memory_base, cpu.read_pc()); assert_eq!(memory_base, cpu.read_pc());
} }
} }
#[cfg(test)]
mod test_decode_cache {
use super::*;
#[test]
fn initialize() {
let _cache = DecodeCache::new();
}
#[test]
fn insert() {
let mut cache = DecodeCache::new();
cache.insert(0, 0);
}
#[test]
fn get() {
let mut cache = DecodeCache::new();
cache.insert(1, 2);
// Cache hit test
match cache.get(1) {
Some(index) => assert_eq!(2, index),
None => panic!("Unexpected cache miss"),
};
// Cache miss test
match cache.get(2) {
Some(_index) => panic!("Unexpected cache hit"),
None => {}
};
}
#[test]
fn lru() {
let mut cache = DecodeCache::new();
cache.insert(0, 1);
match cache.get(0) {
Some(index) => assert_eq!(1, index),
None => panic!("Unexpected cache miss"),
};
for i in 1..DECODE_CACHE_ENTRY_NUM + 1 {
cache.insert(i as u32, i + 1);
}
// The oldest entry should have been removed because of the overflow
match cache.get(0) {
Some(_index) => panic!("Unexpected cache hit"),
None => {}
};
// With this .get(), the entry with the word "1" moves to the tail of the list
// and the entry with the word "2" becomes the oldest entry.
match cache.get(1) {
Some(index) => assert_eq!(2, index),
None => {}
};
// The oldest entry with the word "2" will be removed due to the overflow
cache.insert(
DECODE_CACHE_ENTRY_NUM as u32 + 1,
DECODE_CACHE_ENTRY_NUM + 2,
);
match cache.get(2) {
Some(_index) => panic!("Unexpected cache hit"),
None => {}
};
}
}

View File

@ -1,5 +1,7 @@
pub mod cpu; pub mod cpu;
pub mod memory;
pub mod mmu; pub mod mmu;
#[cfg(test)]
pub mod memory;
pub use cpu::{Cpu, CpuBuilder, Xlen}; pub use cpu::{Cpu, CpuBuilder, Xlen};

View File

@ -1,6 +1,5 @@
/// Emulates main memory. /// Emulates main memory.
pub struct Memory { pub struct Memory { /// Memory content
/// Memory content
data: Vec<u64>, data: Vec<u64>,
} }

View File

@ -1,13 +1,13 @@
use std::{ use std::{
collections::HashMap, sync::mpsc::Receiver,
sync::{Arc, RwLock}, sync::{Arc, Mutex},
}; };
use crate::cpu::{decode_privilege_mode, PrivilegeMode, Trap, TrapType, Xlen}; use crate::cpu::{decode_privilege_mode, PrivilegeMode, ResponseData, Trap, TrapType, Xlen};
pub enum SyscallResult { pub enum SyscallResult {
Ok([i64; 8]), Ok([i64; 8]),
Defer(std::sync::mpsc::Receiver<([i64; 8], Option<(Vec<u8>, u64)>)>), Defer(Receiver<ResponseData>),
} }
impl From<[i64; 8]> for SyscallResult { impl From<[i64; 8]> for SyscallResult {
@ -16,8 +16,8 @@ impl From<[i64; 8]> for SyscallResult {
} }
} }
impl From<std::sync::mpsc::Receiver<([i64; 8], Option<(Vec<u8>, u64)>)>> for SyscallResult { impl From<std::sync::mpsc::Receiver<ResponseData>> for SyscallResult {
fn from(receiver: std::sync::mpsc::Receiver<([i64; 8], Option<(Vec<u8>, u64)>)>) -> Self { fn from(receiver: std::sync::mpsc::Receiver<ResponseData>) -> Self {
SyscallResult::Defer(receiver) SyscallResult::Defer(receiver)
} }
} }
@ -33,6 +33,9 @@ pub trait Memory {
fn write_u64(&mut self, p_address: u64, value: u64); fn write_u64(&mut self, p_address: u64, value: u64);
fn validate_address(&self, address: u64) -> bool; fn validate_address(&self, address: u64) -> bool;
fn syscall(&mut self, args: [i64; 8]) -> SyscallResult; fn syscall(&mut self, args: [i64; 8]) -> SyscallResult;
fn translate(&self, v_address: u64) -> Option<u64>;
fn reserve(&mut self, p_address: u64) -> bool;
fn clear_reservation(&mut self, p_address: u64);
} }
/// Emulates Memory Management Unit. It holds the Main memory and peripheral /// Emulates Memory Management Unit. It holds the Main memory and peripheral
@ -46,29 +49,13 @@ pub struct Mmu {
ppn: u64, ppn: u64,
addressing_mode: AddressingMode, addressing_mode: AddressingMode,
privilege_mode: PrivilegeMode, privilege_mode: PrivilegeMode,
memory: Arc<RwLock<dyn Memory + Send + Sync>>, memory: Arc<Mutex<dyn Memory + Send + Sync>>,
// /// The size of main memory (if initialized) // /// The size of main memory (if initialized)
// memory_length: Option<NonZeroU64>, // memory_length: Option<NonZeroU64>,
/// Address translation can be affected `mstatus` (MPRV, MPP in machine mode) /// Address translation can be affected `mstatus` (MPRV, MPP in machine mode)
/// then `Mmu` has copy of it. /// then `Mmu` has copy of it.
mstatus: u64, mstatus: u64,
/// Address translation page cache. Experimental feature.
/// The cache is cleared when translation mapping can be changed;
/// xlen, ppn, privilege_mode, or addressing_mode is updated.
/// Precisely it isn't good enough because page table entries
/// can be updated anytime with store instructions, of course
/// very depending on how pages are mapped tho.
/// But observing all page table entries is high cost so
/// ignoring so far. Then this cache optimization can cause a bug
/// due to unexpected (meaning not in page fault handler)
/// page table entry update. So this is experimental feature and
/// disabled by default. If you want to enable, use `enable_page_cache()`.
page_cache_enabled: bool,
fetch_page_cache: HashMap<u64, u64>,
load_page_cache: HashMap<u64, u64>,
store_page_cache: HashMap<u64, u64>,
} }
#[derive(Debug)] #[derive(Debug)]
@ -100,7 +87,7 @@ impl Mmu {
/// ///
/// # Arguments /// # Arguments
/// * `xlen` /// * `xlen`
pub fn new(xlen: Xlen, memory: Arc<RwLock<dyn Memory + Send + Sync>>) -> Self { pub fn new(xlen: Xlen, memory: Arc<Mutex<dyn Memory + Send + Sync>>) -> Self {
Mmu { Mmu {
// clock: 0, // clock: 0,
xlen, xlen,
@ -109,10 +96,6 @@ impl Mmu {
privilege_mode: PrivilegeMode::Machine, privilege_mode: PrivilegeMode::Machine,
memory, memory,
mstatus: 0, mstatus: 0,
page_cache_enabled: false,
fetch_page_cache: HashMap::default(),
load_page_cache: HashMap::default(),
store_page_cache: HashMap::default(),
} }
} }
@ -122,37 +105,6 @@ impl Mmu {
/// * `xlen` /// * `xlen`
pub fn update_xlen(&mut self, xlen: Xlen) { pub fn update_xlen(&mut self, xlen: Xlen) {
self.xlen = xlen; self.xlen = xlen;
self.clear_page_cache();
}
// /// Initializes Main memory. This method is expected to be called only once.
// ///
// /// # Arguments
// /// * `capacity`
// pub fn init_memory(&mut self, capacity: u64) {
// assert!(self.memory_length.is_none());
// self.memory_length = Some(NonZeroU64::new(capacity).unwrap());
// self.memory.init(capacity);
// }
// pub fn memory_size(&self) -> u64 {
// self.memory_length.unwrap().get()
// }
/// Enables or disables page cache optimization.
///
/// # Arguments
/// * `enabled`
pub fn enable_page_cache(&mut self, enabled: bool) {
self.page_cache_enabled = enabled;
self.clear_page_cache();
}
/// Clears page cache entries
fn clear_page_cache(&mut self) {
self.fetch_page_cache.clear();
self.load_page_cache.clear();
self.store_page_cache.clear();
} }
/// Runs one cycle of MMU and peripheral devices. /// Runs one cycle of MMU and peripheral devices.
@ -164,7 +116,6 @@ impl Mmu {
/// * `new_addressing_mode` /// * `new_addressing_mode`
pub fn update_addressing_mode(&mut self, new_addressing_mode: AddressingMode) { pub fn update_addressing_mode(&mut self, new_addressing_mode: AddressingMode) {
self.addressing_mode = new_addressing_mode; self.addressing_mode = new_addressing_mode;
self.clear_page_cache();
} }
/// Updates privilege mode /// Updates privilege mode
@ -173,7 +124,6 @@ impl Mmu {
/// * `mode` /// * `mode`
pub fn update_privilege_mode(&mut self, mode: PrivilegeMode) { pub fn update_privilege_mode(&mut self, mode: PrivilegeMode) {
self.privilege_mode = mode; self.privilege_mode = mode;
self.clear_page_cache();
} }
/// Updates mstatus copy. `CPU` needs to call this method whenever /// Updates mstatus copy. `CPU` needs to call this method whenever
@ -191,7 +141,6 @@ impl Mmu {
/// * `ppn` /// * `ppn`
pub fn update_ppn(&mut self, ppn: u64) { pub fn update_ppn(&mut self, ppn: u64) {
self.ppn = ppn; self.ppn = ppn;
self.clear_page_cache();
} }
fn trim_to_xlen(&self, address: u64) -> u64 { fn trim_to_xlen(&self, address: u64) -> u64 {
@ -426,7 +375,7 @@ impl Mmu {
/// * `p_address` Physical address /// * `p_address` Physical address
pub(crate) fn load_raw(&self, p_address: u64) -> u8 { pub(crate) fn load_raw(&self, p_address: u64) -> u8 {
self.memory self.memory
.read() .lock() // .read()
.unwrap() .unwrap()
.read_u8(self.trim_to_xlen(p_address)) .read_u8(self.trim_to_xlen(p_address))
} }
@ -438,7 +387,7 @@ impl Mmu {
/// * `p_address` Physical address /// * `p_address` Physical address
fn load_halfword_raw(&self, p_address: u64) -> u16 { fn load_halfword_raw(&self, p_address: u64) -> u16 {
self.memory self.memory
.read() .lock() // .read()
.unwrap() .unwrap()
.read_u16(self.trim_to_xlen(p_address)) .read_u16(self.trim_to_xlen(p_address))
} }
@ -450,7 +399,7 @@ impl Mmu {
/// * `p_address` Physical address /// * `p_address` Physical address
pub fn load_word_raw(&self, p_address: u64) -> u32 { pub fn load_word_raw(&self, p_address: u64) -> u32 {
self.memory self.memory
.read() .lock() // .read()
.unwrap() .unwrap()
.read_u32(self.trim_to_xlen(p_address)) .read_u32(self.trim_to_xlen(p_address))
} }
@ -462,7 +411,7 @@ impl Mmu {
/// * `p_address` Physical address /// * `p_address` Physical address
fn load_doubleword_raw(&self, p_address: u64) -> u64 { fn load_doubleword_raw(&self, p_address: u64) -> u64 {
self.memory self.memory
.read() .lock() // .read()
.unwrap() .unwrap()
.read_u64(self.trim_to_xlen(p_address)) .read_u64(self.trim_to_xlen(p_address))
} }
@ -475,7 +424,7 @@ impl Mmu {
/// * `value` data written /// * `value` data written
pub(crate) fn store_raw(&self, p_address: u64, value: u8) { pub(crate) fn store_raw(&self, p_address: u64, value: u8) {
self.memory self.memory
.write() .lock() // .write()
.unwrap() .unwrap()
.write_u8(self.trim_to_xlen(p_address), value) .write_u8(self.trim_to_xlen(p_address), value)
} }
@ -488,7 +437,7 @@ impl Mmu {
/// * `value` data written /// * `value` data written
pub(crate) fn store_halfword_raw(&self, p_address: u64, value: u16) { pub(crate) fn store_halfword_raw(&self, p_address: u64, value: u16) {
self.memory self.memory
.write() .lock() // .write()
.unwrap() .unwrap()
.write_u16(self.trim_to_xlen(p_address), value) .write_u16(self.trim_to_xlen(p_address), value)
} }
@ -501,7 +450,7 @@ impl Mmu {
/// * `value` data written /// * `value` data written
pub(crate) fn store_word_raw(&self, p_address: u64, value: u32) { pub(crate) fn store_word_raw(&self, p_address: u64, value: u32) {
self.memory self.memory
.write() .lock() // .write()
.unwrap() .unwrap()
.write_u32(self.trim_to_xlen(p_address), value) .write_u32(self.trim_to_xlen(p_address), value)
} }
@ -514,7 +463,7 @@ impl Mmu {
/// * `value` data written /// * `value` data written
fn store_doubleword_raw(&self, p_address: u64, value: u64) { fn store_doubleword_raw(&self, p_address: u64, value: u64) {
self.memory self.memory
.write() .lock() // .write()
.unwrap() .unwrap()
.write_u64(self.trim_to_xlen(p_address), value) .write_u64(self.trim_to_xlen(p_address), value)
} }
@ -529,14 +478,38 @@ impl Mmu {
.ok() .ok()
.map(|p_address| { .map(|p_address| {
self.memory self.memory
.write() .lock() // .read()
.unwrap() .unwrap()
.validate_address(self.trim_to_xlen(p_address)) .validate_address(self.trim_to_xlen(p_address))
}) })
} }
pub fn reserve(&mut self, p_address: u64) -> bool {
self.memory
.lock() // .write()
.unwrap()
.reserve(self.trim_to_xlen(p_address))
}
pub fn clear_reservation(&mut self, p_address: u64) {
self.memory
.lock() // .write()
.unwrap()
.clear_reservation(self.trim_to_xlen(p_address))
}
fn translate_address(&self, v_address: u64, access_type: &MemoryAccessType) -> Result<u64, ()> { fn translate_address(&self, v_address: u64, access_type: &MemoryAccessType) -> Result<u64, ()> {
self.translate_address_with_privilege_mode(v_address, access_type, self.privilege_mode) if let AddressingMode::None = self.addressing_mode {
Ok(v_address)
} else {
// self.memory.lock() // .read().unwrap().translate(v_address).ok_or(())
let phys = self.translate_address_with_privilege_mode(
v_address,
access_type,
self.privilege_mode,
)?;
Ok(phys)
}
} }
fn translate_address_with_privilege_mode( fn translate_address_with_privilege_mode(
@ -546,18 +519,6 @@ impl Mmu {
privilege_mode: PrivilegeMode, privilege_mode: PrivilegeMode,
) -> Result<u64, ()> { ) -> Result<u64, ()> {
let address = self.trim_to_xlen(v_address); let address = self.trim_to_xlen(v_address);
let v_page = address & !0xfff;
if let Some(p_page) = match self.page_cache_enabled {
true => match access_type {
MemoryAccessType::Execute => self.fetch_page_cache.get(&v_page),
MemoryAccessType::Read => self.load_page_cache.get(&v_page),
MemoryAccessType::Write => self.store_page_cache.get(&v_page),
MemoryAccessType::DontCare => None,
},
false => None,
} {
return Ok(p_page | (address & 0xfff));
}
match self.addressing_mode { match self.addressing_mode {
AddressingMode::None => Ok(address), AddressingMode::None => Ok(address),
@ -618,24 +579,6 @@ impl Mmu {
panic!("AddressingMode SV48 is not supported yet."); panic!("AddressingMode SV48 is not supported yet.");
} }
} }
// if self.page_cache_enabled {
// match p_address {
// Ok(p_address) => {
// let p_page = p_address & !0xfff;
// match access_type {
// MemoryAccessType::Execute => self.fetch_page_cache.insert(v_page, p_page),
// MemoryAccessType::Read => self.load_page_cache.insert(v_page, p_page),
// MemoryAccessType::Write => self.store_page_cache.insert(v_page, p_page),
// MemoryAccessType::DontCare => None,
// };
// Ok(p_address)
// }
// Err(()) => Err(()),
// }
// } else {
// p_address
// }
} }
fn traverse_page( fn traverse_page(
@ -766,99 +709,3 @@ impl Mmu {
Ok(p_address) Ok(p_address)
} }
} }
// pub struct MemoryWrapper {
// memory: Memory,
// dram_base: u64,
// }
// impl MemoryWrapper {
// fn new(dram_base: u64) -> Self {
// MemoryWrapper {
// memory: Memory::new(),
// dram_base,
// }
// }
// fn init(&mut self, capacity: u64) {
// self.memory.init(capacity);
// }
// pub fn read_byte(&self, p_address: u64) -> u8 {
// debug_assert!(
// p_address >= self.dram_base,
// "Memory address must equals to or bigger than self.dram_base. {:X}",
// p_address
// );
// self.memory.read_byte(p_address - self.dram_base)
// }
// pub fn read_halfword(&self, p_address: u64) -> u16 {
// debug_assert!(
// p_address >= self.dram_base && p_address.wrapping_add(1) >= self.dram_base,
// "Memory address must equals to or bigger than self.dram_base. {:X}",
// p_address
// );
// self.memory.read_halfword(p_address - self.dram_base)
// }
// pub fn read_word(&self, p_address: u64) -> u32 {
// debug_assert!(
// p_address >= self.dram_base && p_address.wrapping_add(3) >= self.dram_base,
// "Memory address must equals to or bigger than self.dram_base. {:X}",
// p_address
// );
// self.memory.read_word(p_address - self.dram_base)
// }
// pub fn read_doubleword(&self, p_address: u64) -> u64 {
// debug_assert!(
// p_address >= self.dram_base && p_address.wrapping_add(7) >= self.dram_base,
// "Memory address must equals to or bigger than self.dram_base. {:X}",
// p_address
// );
// self.memory.read_doubleword(p_address - self.dram_base)
// }
// pub fn write_byte(&mut self, p_address: u64, value: u8) {
// debug_assert!(
// p_address >= self.dram_base,
// "Memory address must equals to or bigger than self.dram_base. {:X}",
// p_address
// );
// self.memory.write_byte(p_address - self.dram_base, value)
// }
// pub fn write_halfword(&mut self, p_address: u64, value: u16) {
// debug_assert!(
// p_address >= self.dram_base && p_address.wrapping_add(1) >= self.dram_base,
// "Memory address must equals to or bigger than self.dram_base. {:X}",
// p_address
// );
// self.memory
// .write_halfword(p_address - self.dram_base, value)
// }
// pub fn write_word(&mut self, p_address: u64, value: u32) {
// debug_assert!(
// p_address >= self.dram_base && p_address.wrapping_add(3) >= self.dram_base,
// "Memory address must equals to or bigger than self.dram_base. {:X}",
// p_address
// );
// self.memory.write_word(p_address - self.dram_base, value)
// }
// pub fn write_doubleword(&mut self, p_address: u64, value: u64) {
// debug_assert!(
// p_address >= self.dram_base && p_address.wrapping_add(7) >= self.dram_base,
// "Memory address must equals to or bigger than self.dram_base. {:X}",
// p_address
// );
// self.memory
// .write_doubleword(p_address - self.dram_base, value)
// }
// pub fn validate_address(&self, address: u64) -> bool {
// self.memory.validate_address(address - self.dram_base)
// }
// }

View File

@ -8,11 +8,17 @@ use std::{
sync::{ sync::{
atomic::{AtomicI64, Ordering}, atomic::{AtomicI64, Ordering},
mpsc::{Receiver, Sender}, mpsc::{Receiver, Sender},
Arc, RwLock, Arc, Mutex,
}, },
}; };
use crate::xous::definitions::SyscallErrorNumber;
const MEMORY_BASE: u32 = 0x8000_0000; const MEMORY_BASE: u32 = 0x8000_0000;
const ALLOCATION_START: u32 = 0x4000_0000;
const ALLOCATION_END: u32 = ALLOCATION_START + 5 * 1024 * 1024;
const HEAP_START: u32 = 0xa000_0000;
const HEAP_END: u32 = HEAP_START + 5 * 1024 * 1024;
#[derive(Debug)] #[derive(Debug)]
pub enum LoadError { pub enum LoadError {
@ -45,6 +51,7 @@ const MMUFLAG_ACCESSED: u32 = 0x40;
const MMUFLAG_DIRTY: u32 = 0x80; const MMUFLAG_DIRTY: u32 = 0x80;
impl std::error::Error for LoadError {} impl std::error::Error for LoadError {}
pub type ResponseData = ([i64; 8], Option<(Vec<u8>, u64)>);
enum MemoryCommand { enum MemoryCommand {
Exit, Exit,
@ -59,30 +66,14 @@ enum MemoryCommand {
u32, /* argument 4 */ u32, /* argument 4 */
Sender<i64>, /* Thread ID */ Sender<i64>, /* Thread ID */
), ),
JoinThread(u32, Sender<([i64; 8], Option<(Vec<u8>, u64)>)>), JoinThread(u32, Sender<ResponseData>),
}
struct Memory {
base: u32,
data: HashMap<usize, [u8; 4096]>,
allocated_pages: BTreeSet<usize>,
free_pages: BTreeSet<usize>,
heap_start: u32,
heap_size: u32,
// allocation_start: u32,
allocation_previous: u32,
l1_pt: u32,
satp: u32,
connections: HashMap<u32, Box<dyn services::Service + Send + Sync>>,
memory_cmd: Sender<MemoryCommand>,
turbo: bool,
} }
struct Worker { struct Worker {
cpu: riscv_cpu::Cpu, cpu: riscv_cpu::Cpu,
cmd: Sender<MemoryCommand>, cmd: Sender<MemoryCommand>,
tid: i64, tid: i64,
memory: Arc<RwLock<Memory>>, memory: Arc<Mutex<Memory>>,
} }
impl Worker { impl Worker {
@ -90,7 +81,7 @@ impl Worker {
cpu: riscv_cpu::Cpu, cpu: riscv_cpu::Cpu,
cmd: Sender<MemoryCommand>, cmd: Sender<MemoryCommand>,
tid: i64, tid: i64,
memory: Arc<RwLock<Memory>>, memory: Arc<Mutex<Memory>>,
) -> Self { ) -> Self {
Self { Self {
cpu, cpu,
@ -126,7 +117,7 @@ impl Worker {
return; return;
} }
TickResult::CpuTrap(trap) => { TickResult::CpuTrap(trap) => {
self.memory.read().unwrap().print_mmu(); self.memory.lock().unwrap().print_mmu();
// called `Result::unwrap()` on an `Err` value: "Valid bit is 0, or read is 0 and write is 1 at 40002fec: 000802e6" // called `Result::unwrap()` on an `Err` value: "Valid bit is 0, or read is 0 and write is 1 at 40002fec: 000802e6"
println!( println!(
"CPU trap at PC {:08x}, exiting thread {}: {:x?}", "CPU trap at PC {:08x}, exiting thread {}: {:x?}",
@ -149,18 +140,39 @@ struct WorkerHandle {
joiner: std::thread::JoinHandle<()>, joiner: std::thread::JoinHandle<()>,
} }
struct Memory {
base: u32,
data: HashMap<usize, [u8; 4096]>,
allocated_pages: BTreeSet<usize>,
free_pages: BTreeSet<usize>,
heap_start: u32,
heap_size: u32,
// allocation_start: u32,
allocation_previous: u32,
l1_pt: u32,
satp: u32,
connections: HashMap<u32, Box<dyn services::Service + Send + Sync>>,
memory_cmd: Sender<MemoryCommand>,
translation_cache: HashMap<u32, u32>,
allocated_bytes: u32,
reservations: HashSet<u32>,
}
impl Memory { impl Memory {
pub fn new(base: u32, size: usize) -> (Self, Receiver<MemoryCommand>) { pub fn new(base: u32, size: usize) -> (Self, Receiver<MemoryCommand>) {
let mut backing = HashMap::new(); let mut backing = HashMap::new();
let mut free_pages = BTreeSet::new(); let mut free_pages = BTreeSet::new();
let mut allocated_pages = BTreeSet::new(); let mut allocated_pages = BTreeSet::new();
// Populate the backing table as well as the list of free pages
for phys in (base..(base + size as u32)).step_by(4096) { for phys in (base..(base + size as u32)).step_by(4096) {
backing.insert(phys as usize, [0; 4096]); backing.insert(phys as usize, [0; 4096]);
free_pages.insert(phys as usize); free_pages.insert(phys as usize);
} }
// Remove the l0 page table // Allocate the l0 page table
assert!(free_pages.remove(&(MEMORY_BASE as usize + 4096))); assert!(free_pages.remove(&(MEMORY_BASE as usize + 4096)));
assert!(allocated_pages.insert(MEMORY_BASE as usize + 4096)); assert!(allocated_pages.insert(MEMORY_BASE as usize + 4096));
let (memory_cmd, memory_cmd_rx) = std::sync::mpsc::channel(); let (memory_cmd, memory_cmd_rx) = std::sync::mpsc::channel();
( (
Self { Self {
@ -170,79 +182,81 @@ impl Memory {
free_pages, free_pages,
l1_pt: MEMORY_BASE + 4096, l1_pt: MEMORY_BASE + 4096,
satp: ((4096 + MEMORY_BASE) >> 12) | 0x8000_0000, satp: ((4096 + MEMORY_BASE) >> 12) | 0x8000_0000,
heap_start: 0x6000_0000, heap_start: HEAP_START,
heap_size: 0, heap_size: 0,
allocation_previous: 0x4000_0000, allocation_previous: ALLOCATION_START,
connections: HashMap::new(), connections: HashMap::new(),
memory_cmd, memory_cmd,
turbo: false, translation_cache: HashMap::new(),
allocated_bytes: 4096,
reservations: HashSet::new(),
}, },
memory_cmd_rx, memory_cmd_rx,
) )
} }
pub fn turbo(&mut self) { // fn memory_ck(&self) {
self.turbo = true; // if self.turbo {
} // return;
// }
// let mut seen_pages = HashMap::new();
// seen_pages.insert(self.l1_pt, 0);
// for vpn1 in 0..1024 {
// let l1_entry = self.read_u32(self.l1_pt as u64 + vpn1 * 4);
// if l1_entry & MMUFLAG_VALID == 0 {
// continue;
// }
pub fn normal(&mut self) { // let superpage_addr = vpn1 as u32 * (1 << 22);
self.turbo = false;
self.memory_ck();
}
fn memory_ck(&self) { // for vpn0 in 0..1024 {
// if self.turbo { // let l0_entry = self.read_u32((((l1_entry >> 10) << 12) as u64) + vpn0 as u64 * 4);
// return; // if l0_entry & 0x1 == 0 {
// } // continue;
// let mut seen_pages = HashMap::new(); // }
// seen_pages.insert(self.l1_pt, 0); // let phys = (l0_entry >> 10) << 12;
// for vpn1 in 0..1024 { // let current = superpage_addr + vpn0 as u32 * (1 << 12);
// let l1_entry = self.read_u32(self.l1_pt as u64 + vpn1 * 4); // if let Some(existing) = seen_pages.get(&phys) {
// if l1_entry & MMUFLAG_VALID == 0 { // self.print_mmu();
// continue; // panic!(
// } // "Error! Page {:08x} is mapped twice! Once at {:08x} and once at {:08x}",
// phys, existing, current,
// );
// }
// seen_pages.insert(phys, current);
// }
// }
// }
// let superpage_addr = vpn1 as u32 * (1 << 22); /// Allocate a physical page from RAM.
fn allocate_phys_page(&mut self) -> Option<u32> {
// for vpn0 in 0..1024 { let Some(phys) = self.free_pages.pop_first() else {
// let l0_entry = self.read_u32((((l1_entry >> 10) << 12) as u64) + vpn0 as u64 * 4); // panic!(
// if l0_entry & 0x1 == 0 { // "out of memory when attempting to allocate a page. There are {} bytes allocated.",
// continue; // self.allocated_bytes
// } // );
// let phys = (l0_entry >> 10) << 12; return None;
// let current = superpage_addr + vpn0 as u32 * (1 << 12); };
// if let Some(existing) = seen_pages.get(&phys) {
// self.print_mmu();
// panic!(
// "Error! Page {:08x} is mapped twice! Once at {:08x} and once at {:08x}",
// phys, existing, current,
// );
// }
// seen_pages.insert(phys, current);
// }
// }
}
fn allocate_page(&mut self) -> u32 {
self.memory_ck();
let phys = self.free_pages.pop_first().expect("out of memory");
assert!(self.allocated_pages.insert(phys)); assert!(self.allocated_pages.insert(phys));
self.allocated_bytes += 4096;
// The root (l1) pagetable is defined to be mapped into our virtual // The root (l1) pagetable is defined to be mapped into our virtual
// address space at this address. // address space at this address.
if phys == 0 { if phys == 0 {
panic!("Attempt to allocate zero page"); panic!("Attempt to allocate zero page");
} }
self.memory_ck(); Some(phys as u32)
phys as u32
} }
fn free_page(&mut self, virt: u32) -> Result<(), ()> { fn free_virt_page(&mut self, virt: u32) -> Result<(), ()> {
self.memory_ck(); let phys = self
let phys = self.virt_to_phys(virt).ok_or(())?; .virt_to_phys(virt)
.ok_or(())
.expect("tried to free a page that was allocated");
let vpn1 = ((virt >> 22) & ((1 << 10) - 1)) as usize * 4; let vpn1 = ((virt >> 22) & ((1 << 10) - 1)) as usize * 4;
let vpn0 = ((virt >> 12) & ((1 << 10) - 1)) as usize * 4; let vpn0 = ((virt >> 12) & ((1 << 10) - 1)) as usize * 4;
self.allocated_bytes -= 4096;
// The root (l1) pagetable is defined to be mapped into our virtual // The root (l1) pagetable is defined to be mapped into our virtual
// address space at this address. // address space at this address.
@ -250,68 +264,123 @@ impl Memory {
// If the level 1 pagetable doesn't exist, then this address is invalid // If the level 1 pagetable doesn't exist, then this address is invalid
let l1_pt_entry = self.read_u32(self.l1_pt as u64 + vpn1 as u64); let l1_pt_entry = self.read_u32(self.l1_pt as u64 + vpn1 as u64);
if l1_pt_entry & MMUFLAG_VALID == 0 { if l1_pt_entry & MMUFLAG_VALID == 0 {
return Ok(()); panic!("Tried to free a page where the level 1 pagetable didn't exist");
} }
// println!("Deallocating page {:08x} @ {:08x}", virt, phys); assert!(self.allocated_pages.remove(&(phys as usize)));
if !self.allocated_pages.remove(&(phys as usize)) {
// self.print_mmu();
panic!(
"Page {:08x} @ {:08x} wasn't in the list of allocated pages!",
phys, virt
);
}
assert!(self.free_pages.insert(phys as usize)); assert!(self.free_pages.insert(phys as usize));
assert!(self.translation_cache.remove(&virt).is_some());
let l0_pt_phys = ((l1_pt_entry >> 10) << 12) + vpn0 as u32; let l0_pt_phys = ((l1_pt_entry >> 10) << 12) + vpn0 as u32;
assert!(self.read_u32(l0_pt_phys as u64) & MMUFLAG_VALID != 0);
self.write_u32(l0_pt_phys as u64, 0); self.write_u32(l0_pt_phys as u64, 0);
self.memory_ck();
Ok(()) Ok(())
} }
fn allocate_virt_region(&mut self, size: usize) -> Option<u32> { fn allocate_virt_region(&mut self, size: usize) -> Option<u32> {
self.memory_ck(); let size = size as u32;
let mut start = self.allocation_previous; // Look for a sequence of `size` pages that are free.
// Find a free region that will fit this page. let mut address = None;
'outer: loop { for potential_start in (self.allocation_previous..ALLOCATION_END - size)
for page in (start..(start + size as u32)).step_by(4096) { .step_by(4096)
if self.virt_to_phys(page).is_some() { .chain((ALLOCATION_START..self.allocation_previous - size).step_by(4096))
start = page + 4096; {
continue 'outer; let mut all_free = true;
for check_page in (potential_start..potential_start + size).step_by(4096) {
if self.virt_to_phys(check_page).is_some() {
all_free = false;
break;
} }
} }
break; if all_free {
self.allocation_previous = potential_start + size;
address = Some(potential_start);
break;
}
} }
// Allocate the region if let Some(address) = address {
for page in (start..(start + size as u32)).step_by(4096) { let mut error_mark = None;
self.ensure_page(page); for page in (address..(address + size)).step_by(4096) {
// println!( if self.ensure_page(page).is_none() {
// "Allocated {:08x} @ {:08x}", error_mark = Some(page);
// page, break;
// self.virt_to_phys(page).unwrap() }
// ); }
if let Some(error_mark) = error_mark {
for page in (address..error_mark).step_by(4096) {
self.free_virt_page(page).unwrap();
}
return None;
}
} }
self.allocation_previous = start + size as u32 + 4096; address
self.memory_ck(); // for potential_start in (start..initial).step_by(PAGE_SIZE) {
Some(start) // let mut all_free = true;
// for check_page in (potential_start..potential_start + size).step_by(PAGE_SIZE) {
// if !crate::arch::mem::address_available(check_page) {
// all_free = false;
// break;
// }
// }
// if all_free {
// match kind {
// xous_kernel::MemoryType::Default => {
// process_inner.mem_default_last = potential_start
// }
// xous_kernel::MemoryType::Messages => {
// process_inner.mem_message_last = potential_start
// }
// other => panic!("invalid kind: {:?}", other),
// }
// return Ok(potential_start as *mut u8);
// }
// }
// Err(xous_kernel::Error::BadAddress)
// let mut start = self.allocation_previous;
// // Find a free region that will fit this page.
// 'outer: loop {
// for page in (start..(start + size as u32)).step_by(4096) {
// // If this page is allocated, skip it
// if self.virt_to_phys(page).is_some() {
// start = page + 4096;
// continue 'outer;
// }
// }
// break;
// }
// // Allocate the region
// for page in (start..(start + size as u32)).step_by(4096) {
// self.ensure_page(page);
// // println!(
// // "Allocated {:08x} @ {:08x}",
// // page,
// // self.virt_to_phys(page).unwrap()
// // );
// }
// self.allocation_previous = start + size as u32 + 4096;
// Some(start)
} }
fn ensure_page(&mut self, address: u32) { fn ensure_page(&mut self, virt: u32) -> Option<bool> {
self.memory_ck(); let mut allocated = false;
let vpn1 = ((address >> 22) & ((1 << 10) - 1)) as usize * 4; let vpn1 = ((virt >> 22) & ((1 << 10) - 1)) as usize * 4;
let vpn0 = ((address >> 12) & ((1 << 10) - 1)) as usize * 4; let vpn0 = ((virt >> 12) & ((1 << 10) - 1)) as usize * 4;
// If the level 1 pagetable doesn't exist, then this address is invalid // If the level 1 pagetable doesn't exist, then this address is invalid
let mut l1_pt_entry = self.read_u32(self.l1_pt as u64 + vpn1 as u64); let mut l1_pt_entry = self.read_u32(self.l1_pt as u64 + vpn1 as u64);
if l1_pt_entry & MMUFLAG_VALID == 0 { if l1_pt_entry & MMUFLAG_VALID == 0 {
// Allocate a new page for the level 1 pagetable // Allocate a new page for the level 1 pagetable
let l0_pt_phys = self.allocate_page(); let Some(l0_pt_phys) = self.allocate_phys_page() else {
return None;
};
// println!("Allocating level 0 pagetable at {:08x}", l0_pt_phys); // println!("Allocating level 0 pagetable at {:08x}", l0_pt_phys);
l1_pt_entry = l1_pt_entry =
((l0_pt_phys >> 12) << 10) | MMUFLAG_VALID | MMUFLAG_DIRTY | MMUFLAG_ACCESSED; ((l0_pt_phys >> 12) << 10) | MMUFLAG_VALID | MMUFLAG_DIRTY | MMUFLAG_ACCESSED;
// Map the level 1 pagetable into the root pagetable // Map the level 1 pagetable into the root pagetable
self.write_u32(self.l1_pt as u64 + vpn1 as u64, l1_pt_entry); self.write_u32(self.l1_pt as u64 + vpn1 as u64, l1_pt_entry);
allocated = true;
} }
let l0_pt_phys = ((l1_pt_entry >> 10) << 12) + vpn0 as u32; let l0_pt_phys = ((l1_pt_entry >> 10) << 12) + vpn0 as u32;
@ -319,8 +388,10 @@ impl Memory {
// Ensure the entry hasn't already been mapped. // Ensure the entry hasn't already been mapped.
if l0_pt_entry & MMUFLAG_VALID == 0 { if l0_pt_entry & MMUFLAG_VALID == 0 {
let page_phys = self.allocate_page(); let Some(phys) = self.allocate_phys_page() else {
l0_pt_entry = ((page_phys >> 12) << 10) return None;
};
l0_pt_entry = ((phys >> 12) << 10)
| MMUFLAG_VALID | MMUFLAG_VALID
| MMUFLAG_WRITABLE | MMUFLAG_WRITABLE
| MMUFLAG_READABLE | MMUFLAG_READABLE
@ -330,6 +401,8 @@ impl Memory {
| MMUFLAG_ACCESSED; | MMUFLAG_ACCESSED;
// Map the level 0 pagetable into the level 1 pagetable // Map the level 0 pagetable into the level 1 pagetable
self.write_u32(l0_pt_phys as u64, l0_pt_entry); self.write_u32(l0_pt_phys as u64, l0_pt_entry);
assert!(self.translation_cache.insert(virt, phys).is_none());
allocated = true;
} }
assert!(self assert!(self
.allocated_pages .allocated_pages
@ -337,11 +410,10 @@ impl Memory {
assert!(!self assert!(!self
.free_pages .free_pages
.contains(&(((l0_pt_entry >> 10) << 12) as usize))); .contains(&(((l0_pt_entry >> 10) << 12) as usize)));
self.memory_ck(); Some(allocated)
} }
fn remove_memory_flags(&mut self, virt: u32, new_flags: u32) { fn remove_memory_flags(&mut self, virt: u32, new_flags: u32) {
self.memory_ck();
// Ensure they're only adjusting legal flags // Ensure they're only adjusting legal flags
assert!(new_flags & !(MMUFLAG_READABLE | MMUFLAG_WRITABLE | MMUFLAG_EXECUTABLE) == 0); assert!(new_flags & !(MMUFLAG_READABLE | MMUFLAG_WRITABLE | MMUFLAG_EXECUTABLE) == 0);
@ -376,7 +448,6 @@ impl Memory {
(((l1_pt_entry >> 10) << 12) + vpn0 as u32) as u64, (((l1_pt_entry >> 10) << 12) + vpn0 as u32) as u64,
l0_pt_entry, l0_pt_entry,
); );
self.memory_ck();
} }
fn write_bytes(&mut self, data: &[u8], start: u32) { fn write_bytes(&mut self, data: &[u8], start: u32) {
@ -444,11 +515,12 @@ impl Memory {
let l0_pt_entry = self.read_u32((((l1_pt_entry >> 10) << 12) + vpn0 as u32) as u64); let l0_pt_entry = self.read_u32((((l1_pt_entry >> 10) << 12) + vpn0 as u32) as u64);
// Ensure the entry hasn't already been mapped. // Check if the mapping is valid
if l0_pt_entry & MMUFLAG_VALID == 0 { if l0_pt_entry & MMUFLAG_VALID == 0 {
return None; None
} else {
Some(((l0_pt_entry >> 10) << 12) | offset)
} }
Some(((l0_pt_entry >> 10) << 12) | offset)
} }
} }
@ -565,33 +637,38 @@ impl riscv_cpu::cpu::Memory for Memory {
match syscall { match syscall {
Syscall::IncreaseHeap(bytes, _flags) => { Syscall::IncreaseHeap(bytes, _flags) => {
// println!("IncreaseHeap({} bytes, flags: {:02x})", bytes, _flags); // println!("IncreaseHeap({} bytes, flags: {:02x})", bytes, _flags);
let increase_bytes = bytes as u32;
let heap_address = self.heap_start + self.heap_size; let heap_address = self.heap_start + self.heap_size;
match bytes { if self.heap_size.wrapping_add(increase_bytes) > HEAP_END {
bytes if bytes < 0 => { [
self.heap_size -= bytes.unsigned_abs() as u32; SyscallResultNumber::Error as i64,
panic!("Reducing size not supported!"); SyscallErrorNumber::OutOfMemory as i64,
0,
0,
0,
0,
0,
0,
]
.into()
} else {
for new_address in (heap_address..(heap_address + increase_bytes)).step_by(4096)
{
self.ensure_page(new_address);
} }
bytes if bytes > 0 => { self.heap_size += increase_bytes;
for new_address in [
(heap_address..(heap_address + bytes as u32)).step_by(4096) SyscallResultNumber::MemoryRange as i64,
{ heap_address as i64,
self.ensure_page(new_address); bytes,
} 0,
self.heap_size += bytes as u32; 0,
} 0,
_ => {} 0,
0,
]
.into()
} }
[
SyscallResultNumber::MemoryRange as i64,
heap_address as i64,
bytes,
0,
0,
0,
0,
0,
]
.into()
} }
Syscall::MapMemory(phys, virt, size, _flags) => { Syscall::MapMemory(phys, virt, size, _flags) => {
@ -605,21 +682,36 @@ impl riscv_cpu::cpu::Memory for Memory {
if phys != 0 { if phys != 0 {
unimplemented!("Non-zero phys address"); unimplemented!("Non-zero phys address");
} }
let region = self if let Some(region) = self.allocate_virt_region(size as usize) {
.allocate_virt_region(size as usize) [
.expect("out of memory"); SyscallResultNumber::MemoryRange as i64,
// println!(" -> {:08x}", region); region as i64,
[ size,
SyscallResultNumber::MemoryRange as i64, 0,
region as i64, 0,
size, 0,
0, 0,
0, 0,
0, ]
0, .into()
0, } else {
] // self.print_mmu();
.into() println!(
"Couldn't find a free spot to allocate {} bytes of virtual memory, or out of memory",
size as usize
);
[
SyscallResultNumber::Error as i64,
SyscallErrorNumber::OutOfMemory as i64,
0,
0,
0,
0,
0,
0,
]
.into()
}
} }
Syscall::Connect(id) => { Syscall::Connect(id) => {
// println!( // println!(
@ -832,7 +924,7 @@ impl riscv_cpu::cpu::Memory for Memory {
Syscall::UnmapMemory(address, size) => { Syscall::UnmapMemory(address, size) => {
// println!("UnmapMemory({:08x}, {})", address, size); // println!("UnmapMemory({:08x}, {})", address, size);
for offset in (address..address + size).step_by(4096) { for offset in (address..address + size).step_by(4096) {
self.free_page(offset as u32).unwrap(); self.free_virt_page(offset as u32).unwrap();
} }
[SyscallResultNumber::Ok as i64, 0, 0, 0, 0, 0, 0, 0].into() [SyscallResultNumber::Ok as i64, 0, 0, 0, 0, 0, 0, 0].into()
} }
@ -855,10 +947,27 @@ impl riscv_cpu::cpu::Memory for Memory {
} }
} }
} }
fn translate(&self, v_address: u64) -> Option<u64> {
let v_address = v_address as u32;
let ppn = v_address & !0xfff;
let offset = v_address & 0xfff;
self.translation_cache
.get(&ppn)
.map(|x| (*x + offset) as u64)
}
fn reserve(&mut self, p_address: u64) -> bool {
self.reservations.insert(p_address as u32)
}
fn clear_reservation(&mut self, p_address: u64) {
self.reservations.remove(&(p_address as u32));
}
} }
pub struct Machine { pub struct Machine {
memory: Arc<RwLock<Memory>>, memory: Arc<Mutex<Memory>>,
workers: Vec<WorkerHandle>, workers: Vec<WorkerHandle>,
satp: u64, satp: u64,
memory_cmd_sender: Sender<MemoryCommand>, memory_cmd_sender: Sender<MemoryCommand>,
@ -870,7 +979,7 @@ impl Machine {
pub fn new(program: &[u8]) -> Result<Self, LoadError> { pub fn new(program: &[u8]) -> Result<Self, LoadError> {
let (memory, memory_cmd) = Memory::new(MEMORY_BASE, 16 * 1024 * 1024); let (memory, memory_cmd) = Memory::new(MEMORY_BASE, 16 * 1024 * 1024);
let memory_cmd_sender = memory.memory_cmd.clone(); let memory_cmd_sender = memory.memory_cmd.clone();
let memory = Arc::new(RwLock::new(memory)); let memory = Arc::new(Mutex::new(memory));
let mut machine = Self { let mut machine = Self {
memory, memory,
@ -900,8 +1009,7 @@ impl Machine {
return Err(LoadError::BitSizeError); return Err(LoadError::BitSizeError);
} }
let mut memory_writer = self.memory.write().unwrap(); let mut memory_writer = self.memory.lock().unwrap();
memory_writer.turbo();
for sh in elf.section_headers { for sh in elf.section_headers {
if sh.sh_flags as u32 & goblin::elf::section_header::SHF_ALLOC == 0 { if sh.sh_flags as u32 & goblin::elf::section_header::SHF_ALLOC == 0 {
// println!( // println!(
@ -918,7 +1026,9 @@ impl Machine {
if sh.sh_type & goblin::elf::section_header::SHT_NOBITS != 0 { if sh.sh_type & goblin::elf::section_header::SHT_NOBITS != 0 {
for addr in sh.sh_addr..(sh.sh_addr + sh.sh_size) { for addr in sh.sh_addr..(sh.sh_addr + sh.sh_size) {
memory_writer.ensure_page(addr.try_into().unwrap()); memory_writer
.ensure_page(addr.try_into().unwrap())
.expect("out of memory");
} }
} else { } else {
memory_writer.write_bytes( memory_writer.write_bytes(
@ -928,16 +1038,13 @@ impl Machine {
} }
} }
memory_writer.normal();
// TODO: Get memory permissions correct
let satp = memory_writer.satp.into(); let satp = memory_writer.satp.into();
// Ensure stack is allocated // Ensure stack is allocated
for page in (0xc000_0000..0xc002_0000).step_by(4096) { for page in (0xc000_0000..0xc002_0000).step_by(4096) {
memory_writer.ensure_page(page); memory_writer.ensure_page(page).expect("out of memory");
} }
drop(memory_writer);
cpu.write_csr(riscv_cpu::cpu::CSR_SATP_ADDRESS, satp) cpu.write_csr(riscv_cpu::cpu::CSR_SATP_ADDRESS, satp)
.map_err(|_| LoadError::SatpWriteError)?; .map_err(|_| LoadError::SatpWriteError)?;

View File

@ -26,6 +26,38 @@ pub enum SyscallResultNumber {
Scalar5 = 20, Scalar5 = 20,
} }
#[derive(Debug, Copy, Clone)]
pub enum SyscallErrorNumber {
NoError = 0,
BadAlignment = 1,
BadAddress = 2,
OutOfMemory = 3,
MemoryInUse = 4,
InterruptNotFound = 5,
InterruptInUse = 6,
InvalidString = 7,
ServerExists = 8,
ServerNotFound = 9,
ProcessNotFound = 10,
ProcessNotChild = 11,
ProcessTerminated = 12,
Timeout = 13,
InternalError = 14,
ServerQueueFull = 15,
ThreadNotAvailable = 16,
UnhandledSyscall = 17,
InvalidSyscall = 18,
ShareViolation = 19,
InvalidThread = 20,
InvalidPID = 21,
UnknownError = 22,
AccessDenied = 23,
UseBeforeInit = 24,
DoubleFree = 25,
DebugInProgress = 26,
InvalidLimit = 27,
}
#[derive(Debug)] #[derive(Debug)]
pub enum Syscall { pub enum Syscall {
Unknown([i64; 8]), Unknown([i64; 8]),

View File

@ -2,16 +2,20 @@ use std::sync::mpsc::Receiver;
pub mod log; pub mod log;
pub mod ticktimer; pub mod ticktimer;
pub type ResponseData = ([i64; 8], Option<(Vec<u8>, u64)>);
#[allow(dead_code)]
pub enum ScalarResult { pub enum ScalarResult {
Scalar1(u32), Scalar1(u32),
Scalar2([u32; 2]), Scalar2([u32; 2]),
Scalar5([u32; 5]), Scalar5([u32; 5]),
WaitForResponse(Receiver<([i64; 8], Option<(Vec<u8>, u64)>)>), WaitForResponse(Receiver<ResponseData>),
} }
#[allow(dead_code)]
pub enum LendResult { pub enum LendResult {
MemoryReturned([u32; 2]), MemoryReturned([u32; 2]),
WaitForResponse(Receiver<([i64; 8], Option<(Vec<u8>, u64)>)>), WaitForResponse(Receiver<ResponseData>),
} }
pub trait Service { pub trait Service {

View File

@ -39,10 +39,12 @@ impl Service for Log {
let print_buffer = &buf[0..extra[1] as usize]; let print_buffer = &buf[0..extra[1] as usize];
// println!("Log stdout:"); // println!("Log stdout:");
std::io::stdout().write_all(print_buffer).unwrap(); std::io::stdout().write_all(print_buffer).unwrap();
std::io::stdout().flush().unwrap();
} else if opcode == LogLendOpcode::StandardError as u32 { } else if opcode == LogLendOpcode::StandardError as u32 {
let print_buffer = &buf[0..extra[1] as usize]; let print_buffer = &buf[0..extra[1] as usize];
// println!("Log stderr:"); // println!("Log stderr:");
std::io::stderr().write_all(print_buffer).unwrap(); std::io::stderr().write_all(print_buffer).unwrap();
std::io::stderr().flush().unwrap();
} else { } else {
panic!("Log lend {}: {} {:x?}", sender, opcode, buf); panic!("Log lend {}: {} {:x?}", sender, opcode, buf);
} }