almost got out-of-memory test working

Signed-off-by: Sean Cross <sean@xobs.io>
This commit is contained in:
2024-01-03 22:19:21 +08:00
parent 4b2e4b0548
commit abef5b7db3
10 changed files with 408 additions and 509 deletions

View File

@ -1,4 +1,4 @@
use std::sync::{mpsc::Receiver, Arc, RwLock};
use std::sync::{mpsc::Receiver, Arc, Mutex};
pub use super::mmu::Memory;
use super::mmu::{AddressingMode, Mmu};
@ -54,10 +54,12 @@ pub const MIP_SEIP: u64 = 0x200;
const MIP_STIP: u64 = 0x020;
const MIP_SSIP: u64 = 0x002;
pub type ResponseData = ([i64; 8], Option<(Vec<u8>, u64)>);
pub enum TickResult {
Ok,
ExitThread(u64),
PauseEmulation(Receiver<([i64; 8], Option<(Vec<u8>, u64)>)>),
PauseEmulation(Receiver<ResponseData>),
CpuTrap(Trap),
}
@ -74,9 +76,8 @@ pub struct Cpu {
pc: u64,
csr: [u64; CSR_CAPACITY],
mmu: Mmu,
memory: Arc<RwLock<dyn Memory + Send + Sync>>,
reservation: u64, // @TODO: Should support multiple address reservations
is_reservation_set: bool,
memory: Arc<Mutex<dyn Memory + Send + Sync>>,
reservation: Option<u64>, // @TODO: Should support multiple address reservations
_dump_flag: bool,
// decode_cache: DecodeCache,
unsigned_data_mask: u64,
@ -127,7 +128,7 @@ pub enum TrapType {
UserExternalInterrupt,
SupervisorExternalInterrupt,
MachineExternalInterrupt,
PauseEmulation(std::sync::mpsc::Receiver<([i64; 8], Option<(Vec<u8>, u64)>)>),
PauseEmulation(Receiver<ResponseData>),
}
fn _get_privilege_mode_name(mode: &PrivilegeMode) -> &'static str {
@ -225,11 +226,11 @@ pub struct CpuBuilder {
xlen: Xlen,
pc: u64,
sp: u64,
memory: Arc<RwLock<dyn Memory + Send + Sync>>,
memory: Arc<Mutex<dyn Memory + Send + Sync>>,
}
impl CpuBuilder {
pub fn new(memory: Arc<RwLock<dyn Memory + Send + Sync>>) -> Self {
pub fn new(memory: Arc<Mutex<dyn Memory + Send + Sync>>) -> Self {
CpuBuilder {
xlen: Xlen::Bit64,
memory,
@ -266,7 +267,7 @@ impl Cpu {
///
/// # Arguments
/// * `Terminal`
pub fn new(memory: Arc<RwLock<dyn Memory + Send + Sync>>) -> Self {
pub fn new(memory: Arc<Mutex<dyn Memory + Send + Sync>>) -> Self {
Cpu {
clock: 0,
xlen: Xlen::Bit64,
@ -277,8 +278,7 @@ impl Cpu {
pc: 0,
csr: [0; CSR_CAPACITY],
mmu: Mmu::new(Xlen::Bit64, memory.clone()),
reservation: 0,
is_reservation_set: false,
reservation: None,
_dump_flag: false,
// decode_cache: DecodeCache::new(),
unsigned_data_mask: 0xffffffffffffffff,
@ -563,11 +563,6 @@ impl Cpu {
}
}
fn handle_exception(&mut self, exception: Trap, instruction_address: u64) {
println!("!!! Exception Trap !!!: {:x?}", exception);
self.handle_trap(exception, instruction_address, false);
}
fn handle_trap(&mut self, trap: Trap, instruction_address: u64, is_interrupt: bool) -> bool {
let current_privilege_encoding = get_privilege_encoding(&self.privilege_mode) as u64;
let cause = get_trap_cause(&trap, &self.xlen);
@ -2448,7 +2443,7 @@ const INSTRUCTIONS: [Instruction; INSTRUCTION_NUM] = [
for (src, dest) in cpu.x[10..].iter().zip(args.iter_mut()) {
*dest = *src;
}
match cpu.memory.write().unwrap().syscall(args) {
match cpu.memory.lock().unwrap().syscall(args) {
super::mmu::SyscallResult::Ok(result) => {
for (src, dest) in result.iter().zip(cpu.x[10..].iter_mut()) {
*dest = *src;
@ -2938,14 +2933,11 @@ const INSTRUCTIONS: [Instruction; INSTRUCTION_NUM] = [
operation: |cpu, word, _address| {
let f = parse_format_r(word);
// @TODO: Implement properly
cpu.x[f.rd] = match cpu.mmu.load_doubleword(cpu.x[f.rs1] as u64) {
Ok(data) => {
cpu.is_reservation_set = true;
cpu.reservation = cpu.x[f.rs1] as u64; // Is virtual address ok?
data as i64
}
Err(e) => return Err(e),
};
let address = cpu.x[f.rs1] as u64;
cpu.x[f.rd] = cpu.mmu.load_doubleword(address)? as i64;
if cpu.mmu.reserve(address) {
cpu.reservation = Some(address);
}
Ok(())
},
disassemble: dump_format_r,
@ -2957,14 +2949,11 @@ const INSTRUCTIONS: [Instruction; INSTRUCTION_NUM] = [
operation: |cpu, word, _address| {
let f = parse_format_r(word);
// @TODO: Implement properly
cpu.x[f.rd] = match cpu.mmu.load_word(cpu.x[f.rs1] as u64) {
Ok(data) => {
cpu.is_reservation_set = true;
cpu.reservation = cpu.x[f.rs1] as u64; // Is virtual address ok?
data as i32 as i64
}
Err(e) => return Err(e),
};
let address = cpu.x[f.rs1] as u64;
cpu.x[f.rd] = cpu.mmu.load_word(address)? as i64;
if cpu.mmu.reserve(address) {
cpu.reservation = Some(address);
}
Ok(())
},
disassemble: dump_format_r,
@ -3223,19 +3212,14 @@ const INSTRUCTIONS: [Instruction; INSTRUCTION_NUM] = [
operation: |cpu, word, _address| {
let f = parse_format_r(word);
// @TODO: Implement properly
cpu.x[f.rd] = match cpu.is_reservation_set && cpu.reservation == (cpu.x[f.rs1] as u64) {
true => match cpu
.mmu
.store_doubleword(cpu.x[f.rs1] as u64, cpu.x[f.rs2] as u64)
{
Ok(()) => {
cpu.is_reservation_set = false;
0
}
Err(e) => return Err(e),
},
false => 1,
};
let address = cpu.x[f.rs1] as u64;
if Some(address) == cpu.reservation.take() {
cpu.mmu.store_doubleword(address, cpu.x[f.rs2] as u64)?;
cpu.mmu.clear_reservation(address);
cpu.x[f.rd] = 0;
return Ok(());
}
cpu.x[f.rd] = 1;
Ok(())
},
disassemble: dump_format_r,
@ -3247,16 +3231,14 @@ const INSTRUCTIONS: [Instruction; INSTRUCTION_NUM] = [
operation: |cpu, word, _address| {
let f = parse_format_r(word);
// @TODO: Implement properly
cpu.x[f.rd] = match cpu.is_reservation_set && cpu.reservation == (cpu.x[f.rs1] as u64) {
true => match cpu.mmu.store_word(cpu.x[f.rs1] as u64, cpu.x[f.rs2] as u32) {
Ok(()) => {
cpu.is_reservation_set = false;
0
}
Err(e) => return Err(e),
},
false => 1,
};
let address = cpu.x[f.rs1] as u64;
if Some(address) == cpu.reservation.take() {
cpu.mmu.clear_reservation(address);
cpu.mmu.store_word(address, cpu.x[f.rs2] as u32)?;
cpu.x[f.rd] = 0;
return Ok(());
}
cpu.x[f.rd] = 1;
Ok(())
},
disassemble: dump_format_r,
@ -4144,77 +4126,3 @@ mod test_cpu {
assert_eq!(memory_base, cpu.read_pc());
}
}
#[cfg(test)]
mod test_decode_cache {
use super::*;
#[test]
fn initialize() {
let _cache = DecodeCache::new();
}
#[test]
fn insert() {
let mut cache = DecodeCache::new();
cache.insert(0, 0);
}
#[test]
fn get() {
let mut cache = DecodeCache::new();
cache.insert(1, 2);
// Cache hit test
match cache.get(1) {
Some(index) => assert_eq!(2, index),
None => panic!("Unexpected cache miss"),
};
// Cache miss test
match cache.get(2) {
Some(_index) => panic!("Unexpected cache hit"),
None => {}
};
}
#[test]
fn lru() {
let mut cache = DecodeCache::new();
cache.insert(0, 1);
match cache.get(0) {
Some(index) => assert_eq!(1, index),
None => panic!("Unexpected cache miss"),
};
for i in 1..DECODE_CACHE_ENTRY_NUM + 1 {
cache.insert(i as u32, i + 1);
}
// The oldest entry should have been removed because of the overflow
match cache.get(0) {
Some(_index) => panic!("Unexpected cache hit"),
None => {}
};
// With this .get(), the entry with the word "1" moves to the tail of the list
// and the entry with the word "2" becomes the oldest entry.
match cache.get(1) {
Some(index) => assert_eq!(2, index),
None => {}
};
// The oldest entry with the word "2" will be removed due to the overflow
cache.insert(
DECODE_CACHE_ENTRY_NUM as u32 + 1,
DECODE_CACHE_ENTRY_NUM + 2,
);
match cache.get(2) {
Some(_index) => panic!("Unexpected cache hit"),
None => {}
};
}
}

View File

@ -1,5 +1,7 @@
pub mod cpu;
pub mod memory;
pub mod mmu;
#[cfg(test)]
pub mod memory;
pub use cpu::{Cpu, CpuBuilder, Xlen};

View File

@ -1,6 +1,5 @@
/// Emulates main memory.
pub struct Memory {
/// Memory content
pub struct Memory { /// Memory content
data: Vec<u64>,
}

View File

@ -1,13 +1,13 @@
use std::{
collections::HashMap,
sync::{Arc, RwLock},
sync::mpsc::Receiver,
sync::{Arc, Mutex},
};
use crate::cpu::{decode_privilege_mode, PrivilegeMode, Trap, TrapType, Xlen};
use crate::cpu::{decode_privilege_mode, PrivilegeMode, ResponseData, Trap, TrapType, Xlen};
pub enum SyscallResult {
Ok([i64; 8]),
Defer(std::sync::mpsc::Receiver<([i64; 8], Option<(Vec<u8>, u64)>)>),
Defer(Receiver<ResponseData>),
}
impl From<[i64; 8]> for SyscallResult {
@ -16,8 +16,8 @@ impl From<[i64; 8]> for SyscallResult {
}
}
impl From<std::sync::mpsc::Receiver<([i64; 8], Option<(Vec<u8>, u64)>)>> for SyscallResult {
fn from(receiver: std::sync::mpsc::Receiver<([i64; 8], Option<(Vec<u8>, u64)>)>) -> Self {
impl From<std::sync::mpsc::Receiver<ResponseData>> for SyscallResult {
fn from(receiver: std::sync::mpsc::Receiver<ResponseData>) -> Self {
SyscallResult::Defer(receiver)
}
}
@ -33,6 +33,9 @@ pub trait Memory {
fn write_u64(&mut self, p_address: u64, value: u64);
fn validate_address(&self, address: u64) -> bool;
fn syscall(&mut self, args: [i64; 8]) -> SyscallResult;
fn translate(&self, v_address: u64) -> Option<u64>;
fn reserve(&mut self, p_address: u64) -> bool;
fn clear_reservation(&mut self, p_address: u64);
}
/// Emulates Memory Management Unit. It holds the Main memory and peripheral
@ -46,29 +49,13 @@ pub struct Mmu {
ppn: u64,
addressing_mode: AddressingMode,
privilege_mode: PrivilegeMode,
memory: Arc<RwLock<dyn Memory + Send + Sync>>,
memory: Arc<Mutex<dyn Memory + Send + Sync>>,
// /// The size of main memory (if initialized)
// memory_length: Option<NonZeroU64>,
/// Address translation can be affected `mstatus` (MPRV, MPP in machine mode)
/// then `Mmu` has copy of it.
mstatus: u64,
/// Address translation page cache. Experimental feature.
/// The cache is cleared when translation mapping can be changed;
/// xlen, ppn, privilege_mode, or addressing_mode is updated.
/// Precisely it isn't good enough because page table entries
/// can be updated anytime with store instructions, of course
/// very depending on how pages are mapped tho.
/// But observing all page table entries is high cost so
/// ignoring so far. Then this cache optimization can cause a bug
/// due to unexpected (meaning not in page fault handler)
/// page table entry update. So this is experimental feature and
/// disabled by default. If you want to enable, use `enable_page_cache()`.
page_cache_enabled: bool,
fetch_page_cache: HashMap<u64, u64>,
load_page_cache: HashMap<u64, u64>,
store_page_cache: HashMap<u64, u64>,
}
#[derive(Debug)]
@ -100,7 +87,7 @@ impl Mmu {
///
/// # Arguments
/// * `xlen`
pub fn new(xlen: Xlen, memory: Arc<RwLock<dyn Memory + Send + Sync>>) -> Self {
pub fn new(xlen: Xlen, memory: Arc<Mutex<dyn Memory + Send + Sync>>) -> Self {
Mmu {
// clock: 0,
xlen,
@ -109,10 +96,6 @@ impl Mmu {
privilege_mode: PrivilegeMode::Machine,
memory,
mstatus: 0,
page_cache_enabled: false,
fetch_page_cache: HashMap::default(),
load_page_cache: HashMap::default(),
store_page_cache: HashMap::default(),
}
}
@ -122,37 +105,6 @@ impl Mmu {
/// * `xlen`
pub fn update_xlen(&mut self, xlen: Xlen) {
self.xlen = xlen;
self.clear_page_cache();
}
// /// Initializes Main memory. This method is expected to be called only once.
// ///
// /// # Arguments
// /// * `capacity`
// pub fn init_memory(&mut self, capacity: u64) {
// assert!(self.memory_length.is_none());
// self.memory_length = Some(NonZeroU64::new(capacity).unwrap());
// self.memory.init(capacity);
// }
// pub fn memory_size(&self) -> u64 {
// self.memory_length.unwrap().get()
// }
/// Enables or disables page cache optimization.
///
/// # Arguments
/// * `enabled`
pub fn enable_page_cache(&mut self, enabled: bool) {
self.page_cache_enabled = enabled;
self.clear_page_cache();
}
/// Clears page cache entries
fn clear_page_cache(&mut self) {
self.fetch_page_cache.clear();
self.load_page_cache.clear();
self.store_page_cache.clear();
}
/// Runs one cycle of MMU and peripheral devices.
@ -164,7 +116,6 @@ impl Mmu {
/// * `new_addressing_mode`
pub fn update_addressing_mode(&mut self, new_addressing_mode: AddressingMode) {
self.addressing_mode = new_addressing_mode;
self.clear_page_cache();
}
/// Updates privilege mode
@ -173,7 +124,6 @@ impl Mmu {
/// * `mode`
pub fn update_privilege_mode(&mut self, mode: PrivilegeMode) {
self.privilege_mode = mode;
self.clear_page_cache();
}
/// Updates mstatus copy. `CPU` needs to call this method whenever
@ -191,7 +141,6 @@ impl Mmu {
/// * `ppn`
pub fn update_ppn(&mut self, ppn: u64) {
self.ppn = ppn;
self.clear_page_cache();
}
fn trim_to_xlen(&self, address: u64) -> u64 {
@ -426,7 +375,7 @@ impl Mmu {
/// * `p_address` Physical address
pub(crate) fn load_raw(&self, p_address: u64) -> u8 {
self.memory
.read()
.lock() // .read()
.unwrap()
.read_u8(self.trim_to_xlen(p_address))
}
@ -438,7 +387,7 @@ impl Mmu {
/// * `p_address` Physical address
fn load_halfword_raw(&self, p_address: u64) -> u16 {
self.memory
.read()
.lock() // .read()
.unwrap()
.read_u16(self.trim_to_xlen(p_address))
}
@ -450,7 +399,7 @@ impl Mmu {
/// * `p_address` Physical address
pub fn load_word_raw(&self, p_address: u64) -> u32 {
self.memory
.read()
.lock() // .read()
.unwrap()
.read_u32(self.trim_to_xlen(p_address))
}
@ -462,7 +411,7 @@ impl Mmu {
/// * `p_address` Physical address
fn load_doubleword_raw(&self, p_address: u64) -> u64 {
self.memory
.read()
.lock() // .read()
.unwrap()
.read_u64(self.trim_to_xlen(p_address))
}
@ -475,7 +424,7 @@ impl Mmu {
/// * `value` data written
pub(crate) fn store_raw(&self, p_address: u64, value: u8) {
self.memory
.write()
.lock() // .write()
.unwrap()
.write_u8(self.trim_to_xlen(p_address), value)
}
@ -488,7 +437,7 @@ impl Mmu {
/// * `value` data written
pub(crate) fn store_halfword_raw(&self, p_address: u64, value: u16) {
self.memory
.write()
.lock() // .write()
.unwrap()
.write_u16(self.trim_to_xlen(p_address), value)
}
@ -501,7 +450,7 @@ impl Mmu {
/// * `value` data written
pub(crate) fn store_word_raw(&self, p_address: u64, value: u32) {
self.memory
.write()
.lock() // .write()
.unwrap()
.write_u32(self.trim_to_xlen(p_address), value)
}
@ -514,7 +463,7 @@ impl Mmu {
/// * `value` data written
fn store_doubleword_raw(&self, p_address: u64, value: u64) {
self.memory
.write()
.lock() // .write()
.unwrap()
.write_u64(self.trim_to_xlen(p_address), value)
}
@ -529,14 +478,38 @@ impl Mmu {
.ok()
.map(|p_address| {
self.memory
.write()
.lock() // .read()
.unwrap()
.validate_address(self.trim_to_xlen(p_address))
})
}
pub fn reserve(&mut self, p_address: u64) -> bool {
self.memory
.lock() // .write()
.unwrap()
.reserve(self.trim_to_xlen(p_address))
}
pub fn clear_reservation(&mut self, p_address: u64) {
self.memory
.lock() // .write()
.unwrap()
.clear_reservation(self.trim_to_xlen(p_address))
}
fn translate_address(&self, v_address: u64, access_type: &MemoryAccessType) -> Result<u64, ()> {
self.translate_address_with_privilege_mode(v_address, access_type, self.privilege_mode)
if let AddressingMode::None = self.addressing_mode {
Ok(v_address)
} else {
// self.memory.lock() // .read().unwrap().translate(v_address).ok_or(())
let phys = self.translate_address_with_privilege_mode(
v_address,
access_type,
self.privilege_mode,
)?;
Ok(phys)
}
}
fn translate_address_with_privilege_mode(
@ -546,18 +519,6 @@ impl Mmu {
privilege_mode: PrivilegeMode,
) -> Result<u64, ()> {
let address = self.trim_to_xlen(v_address);
let v_page = address & !0xfff;
if let Some(p_page) = match self.page_cache_enabled {
true => match access_type {
MemoryAccessType::Execute => self.fetch_page_cache.get(&v_page),
MemoryAccessType::Read => self.load_page_cache.get(&v_page),
MemoryAccessType::Write => self.store_page_cache.get(&v_page),
MemoryAccessType::DontCare => None,
},
false => None,
} {
return Ok(p_page | (address & 0xfff));
}
match self.addressing_mode {
AddressingMode::None => Ok(address),
@ -618,24 +579,6 @@ impl Mmu {
panic!("AddressingMode SV48 is not supported yet.");
}
}
// if self.page_cache_enabled {
// match p_address {
// Ok(p_address) => {
// let p_page = p_address & !0xfff;
// match access_type {
// MemoryAccessType::Execute => self.fetch_page_cache.insert(v_page, p_page),
// MemoryAccessType::Read => self.load_page_cache.insert(v_page, p_page),
// MemoryAccessType::Write => self.store_page_cache.insert(v_page, p_page),
// MemoryAccessType::DontCare => None,
// };
// Ok(p_address)
// }
// Err(()) => Err(()),
// }
// } else {
// p_address
// }
}
fn traverse_page(
@ -766,99 +709,3 @@ impl Mmu {
Ok(p_address)
}
}
// pub struct MemoryWrapper {
// memory: Memory,
// dram_base: u64,
// }
// impl MemoryWrapper {
// fn new(dram_base: u64) -> Self {
// MemoryWrapper {
// memory: Memory::new(),
// dram_base,
// }
// }
// fn init(&mut self, capacity: u64) {
// self.memory.init(capacity);
// }
// pub fn read_byte(&self, p_address: u64) -> u8 {
// debug_assert!(
// p_address >= self.dram_base,
// "Memory address must equals to or bigger than self.dram_base. {:X}",
// p_address
// );
// self.memory.read_byte(p_address - self.dram_base)
// }
// pub fn read_halfword(&self, p_address: u64) -> u16 {
// debug_assert!(
// p_address >= self.dram_base && p_address.wrapping_add(1) >= self.dram_base,
// "Memory address must equals to or bigger than self.dram_base. {:X}",
// p_address
// );
// self.memory.read_halfword(p_address - self.dram_base)
// }
// pub fn read_word(&self, p_address: u64) -> u32 {
// debug_assert!(
// p_address >= self.dram_base && p_address.wrapping_add(3) >= self.dram_base,
// "Memory address must equals to or bigger than self.dram_base. {:X}",
// p_address
// );
// self.memory.read_word(p_address - self.dram_base)
// }
// pub fn read_doubleword(&self, p_address: u64) -> u64 {
// debug_assert!(
// p_address >= self.dram_base && p_address.wrapping_add(7) >= self.dram_base,
// "Memory address must equals to or bigger than self.dram_base. {:X}",
// p_address
// );
// self.memory.read_doubleword(p_address - self.dram_base)
// }
// pub fn write_byte(&mut self, p_address: u64, value: u8) {
// debug_assert!(
// p_address >= self.dram_base,
// "Memory address must equals to or bigger than self.dram_base. {:X}",
// p_address
// );
// self.memory.write_byte(p_address - self.dram_base, value)
// }
// pub fn write_halfword(&mut self, p_address: u64, value: u16) {
// debug_assert!(
// p_address >= self.dram_base && p_address.wrapping_add(1) >= self.dram_base,
// "Memory address must equals to or bigger than self.dram_base. {:X}",
// p_address
// );
// self.memory
// .write_halfword(p_address - self.dram_base, value)
// }
// pub fn write_word(&mut self, p_address: u64, value: u32) {
// debug_assert!(
// p_address >= self.dram_base && p_address.wrapping_add(3) >= self.dram_base,
// "Memory address must equals to or bigger than self.dram_base. {:X}",
// p_address
// );
// self.memory.write_word(p_address - self.dram_base, value)
// }
// pub fn write_doubleword(&mut self, p_address: u64, value: u64) {
// debug_assert!(
// p_address >= self.dram_base && p_address.wrapping_add(7) >= self.dram_base,
// "Memory address must equals to or bigger than self.dram_base. {:X}",
// p_address
// );
// self.memory
// .write_doubleword(p_address - self.dram_base, value)
// }
// pub fn validate_address(&self, address: u64) -> bool {
// self.memory.validate_address(address - self.dram_base)
// }
// }