get mapmemory working
Signed-off-by: Sean Cross <sean@xobs.io>
This commit is contained in:
parent
f54c3bdc6e
commit
dfb33a95a1
@ -1,4 +1,7 @@
|
||||
use crate::mmu::{AddressingMode, Mmu};
|
||||
use std::sync::{Arc, RwLock};
|
||||
|
||||
pub use super::mmu::Memory;
|
||||
use super::mmu::{AddressingMode, Mmu};
|
||||
|
||||
const DEFAULT_MEMORY_BASE: u64 = 0x80000000;
|
||||
|
||||
@ -53,10 +56,6 @@ pub const MIP_SEIP: u64 = 0x200;
|
||||
const MIP_STIP: u64 = 0x020;
|
||||
const MIP_SSIP: u64 = 0x002;
|
||||
|
||||
pub trait EventHandler {
|
||||
fn handle_event(&mut self, cpu: &mut Cpu, args: [i64; 8]) -> [i64; 8];
|
||||
}
|
||||
|
||||
/// Emulates a RISC-V CPU core
|
||||
pub struct Cpu {
|
||||
clock: u64,
|
||||
@ -70,13 +69,12 @@ pub struct Cpu {
|
||||
pc: u64,
|
||||
csr: [u64; CSR_CAPACITY],
|
||||
mmu: Mmu,
|
||||
memory: Arc<RwLock<dyn Memory + Send + Sync>>,
|
||||
reservation: u64, // @TODO: Should support multiple address reservations
|
||||
is_reservation_set: bool,
|
||||
_dump_flag: bool,
|
||||
// decode_cache: DecodeCache,
|
||||
unsigned_data_mask: u64,
|
||||
memory_base: u64,
|
||||
handler: Option<Box<dyn EventHandler>>,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
@ -85,7 +83,7 @@ pub enum Xlen {
|
||||
Bit64, // @TODO: Support Bit128
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
#[derive(Clone, Copy, Debug)]
|
||||
pub enum PrivilegeMode {
|
||||
User,
|
||||
Supervisor,
|
||||
@ -146,7 +144,7 @@ fn get_privilege_encoding(mode: &PrivilegeMode) -> u8 {
|
||||
}
|
||||
|
||||
/// Returns `PrivilegeMode` from encoded privilege mode bits
|
||||
pub fn get_privilege_mode(encoding: u64) -> PrivilegeMode {
|
||||
pub fn decode_privilege_mode(encoding: u64) -> PrivilegeMode {
|
||||
match encoding {
|
||||
0 => PrivilegeMode::User,
|
||||
1 => PrivilegeMode::Supervisor,
|
||||
@ -217,18 +215,14 @@ fn get_trap_cause(trap: &Trap, xlen: &Xlen) -> u64 {
|
||||
|
||||
pub struct CpuBuilder {
|
||||
xlen: Xlen,
|
||||
memory_size: u64,
|
||||
memory_base: u64,
|
||||
handler: Option<Box<dyn EventHandler>>,
|
||||
memory: Arc<RwLock<dyn Memory + Send + Sync>>,
|
||||
}
|
||||
|
||||
impl CpuBuilder {
|
||||
pub fn new() -> Self {
|
||||
pub fn new(memory: Arc<RwLock<dyn Memory + Send + Sync>>) -> Self {
|
||||
CpuBuilder {
|
||||
xlen: Xlen::Bit64,
|
||||
memory_size: 0,
|
||||
memory_base: DEFAULT_MEMORY_BASE,
|
||||
handler: None,
|
||||
memory,
|
||||
}
|
||||
}
|
||||
|
||||
@ -237,45 +231,19 @@ impl CpuBuilder {
|
||||
self
|
||||
}
|
||||
|
||||
pub fn memory_size(mut self, memory_size: u64) -> Self {
|
||||
self.memory_size = memory_size;
|
||||
self
|
||||
}
|
||||
|
||||
pub fn handler(mut self, handler: Box<dyn EventHandler>) -> Self {
|
||||
self.handler = Some(handler);
|
||||
self
|
||||
}
|
||||
|
||||
pub fn build(self) -> Cpu {
|
||||
let mut cpu = Cpu::new(self.memory_base);
|
||||
let mut cpu = Cpu::new(self.memory);
|
||||
cpu.update_xlen(self.xlen.clone());
|
||||
cpu.mmu.init_memory(self.memory_size);
|
||||
if self.handler.is_some() {
|
||||
cpu.set_handler(self.handler);
|
||||
}
|
||||
cpu
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for CpuBuilder {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for Cpu {
|
||||
fn default() -> Self {
|
||||
Self::new(DEFAULT_MEMORY_BASE)
|
||||
}
|
||||
}
|
||||
|
||||
impl Cpu {
|
||||
/// Creates a new `Cpu`.
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `Terminal`
|
||||
pub fn new(memory_base: u64) -> Self {
|
||||
pub fn new(memory: Arc<RwLock<dyn Memory + Send + Sync>>) -> Self {
|
||||
Cpu {
|
||||
clock: 0,
|
||||
xlen: Xlen::Bit64,
|
||||
@ -285,14 +253,13 @@ impl Cpu {
|
||||
f: [0.0; 32],
|
||||
pc: 0,
|
||||
csr: [0; CSR_CAPACITY],
|
||||
mmu: Mmu::new(Xlen::Bit64, memory_base),
|
||||
mmu: Mmu::new(Xlen::Bit64, memory.clone()),
|
||||
reservation: 0,
|
||||
is_reservation_set: false,
|
||||
_dump_flag: false,
|
||||
// decode_cache: DecodeCache::new(),
|
||||
unsigned_data_mask: 0xffffffffffffffff,
|
||||
memory_base,
|
||||
handler: None,
|
||||
memory,
|
||||
}
|
||||
// let mut cpu = ;
|
||||
// cpu.x[0xb] = 0x1020; // I don't know why but Linux boot seems to require this initialization
|
||||
@ -300,14 +267,6 @@ impl Cpu {
|
||||
// cpu
|
||||
}
|
||||
|
||||
/// Assigns an event handler to the CPU.
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `handler` An object that implements the [`EventHandler`](trait.EventHandler.html) trait
|
||||
pub fn set_handler(&mut self, handler: Option<Box<dyn EventHandler>>) {
|
||||
self.handler = handler;
|
||||
}
|
||||
|
||||
/// Updates Program Counter content
|
||||
///
|
||||
/// # Arguments
|
||||
@ -1507,13 +1466,13 @@ impl Cpu {
|
||||
&mut self.mmu
|
||||
}
|
||||
|
||||
pub fn memory_base(&self) -> u64 {
|
||||
self.memory_base
|
||||
}
|
||||
// pub fn memory_base(&self) -> u64 {
|
||||
// self.memory_base
|
||||
// }
|
||||
|
||||
pub fn memory_size(&self) -> u64 {
|
||||
self.mmu.memory_size()
|
||||
}
|
||||
// pub fn memory_size(&self) -> u64 {
|
||||
// self.mmu.memory_size()
|
||||
// }
|
||||
|
||||
pub fn phys_read_u32(&self, address: u64) -> u32 {
|
||||
self.mmu.load_word_raw(address)
|
||||
@ -2455,30 +2414,27 @@ const INSTRUCTIONS: [Instruction; INSTRUCTION_NUM] = [
|
||||
mask: 0xffffffff,
|
||||
data: 0x00000073,
|
||||
name: "ECALL",
|
||||
operation: |cpu, _word, address| {
|
||||
if let Some(mut handler) = cpu.handler.take() {
|
||||
operation: |cpu, _word, _address| {
|
||||
let mut args = [0i64; 8];
|
||||
for (src, dest) in cpu.x[10..].iter().zip(args.iter_mut()) {
|
||||
*dest = *src;
|
||||
}
|
||||
let result = handler.handle_event(cpu, args);
|
||||
let result = cpu.memory.write().unwrap().syscall(args);
|
||||
for (src, dest) in result.iter().zip(cpu.x[10..].iter_mut()) {
|
||||
*dest = *src;
|
||||
}
|
||||
cpu.handler = Some(handler);
|
||||
return Ok(());
|
||||
}
|
||||
Ok(())
|
||||
|
||||
let exception_type = match cpu.privilege_mode {
|
||||
PrivilegeMode::User => TrapType::EnvironmentCallFromUMode,
|
||||
PrivilegeMode::Supervisor => TrapType::EnvironmentCallFromSMode,
|
||||
PrivilegeMode::Machine => TrapType::EnvironmentCallFromMMode,
|
||||
PrivilegeMode::Reserved => panic!("Unknown Privilege mode"),
|
||||
};
|
||||
Err(Trap {
|
||||
trap_type: exception_type,
|
||||
value: address,
|
||||
})
|
||||
// let exception_type = match cpu.privilege_mode {
|
||||
// PrivilegeMode::User => TrapType::EnvironmentCallFromUMode,
|
||||
// PrivilegeMode::Supervisor => TrapType::EnvironmentCallFromSMode,
|
||||
// PrivilegeMode::Machine => TrapType::EnvironmentCallFromMMode,
|
||||
// PrivilegeMode::Reserved => panic!("Unknown Privilege mode"),
|
||||
// };
|
||||
// Err(Trap {
|
||||
// trap_type: exception_type,
|
||||
// value: address,
|
||||
// })
|
||||
},
|
||||
disassemble: dump_empty,
|
||||
},
|
||||
@ -3102,7 +3058,7 @@ const INSTRUCTIONS: [Instruction; INSTRUCTION_NUM] = [
|
||||
let status = cpu.read_csr_raw(CSR_MSTATUS_ADDRESS);
|
||||
let mpie = (status >> 7) & 1;
|
||||
let mpp = (status >> 11) & 0x3;
|
||||
let mprv = match get_privilege_mode(mpp) {
|
||||
let mprv = match decode_privilege_mode(mpp) {
|
||||
PrivilegeMode::Machine => (status >> 17) & 1,
|
||||
_ => 0,
|
||||
};
|
||||
@ -3470,7 +3426,7 @@ const INSTRUCTIONS: [Instruction; INSTRUCTION_NUM] = [
|
||||
let status = cpu.read_csr_raw(CSR_SSTATUS_ADDRESS);
|
||||
let spie = (status >> 5) & 1;
|
||||
let spp = (status >> 8) & 1;
|
||||
let mprv = match get_privilege_mode(spp) {
|
||||
let mprv = match decode_privilege_mode(spp) {
|
||||
PrivilegeMode::Machine => (status >> 17) & 1,
|
||||
_ => 0,
|
||||
};
|
||||
|
@ -1,10 +1,23 @@
|
||||
use std::{collections::HashMap, num::NonZeroU64};
|
||||
|
||||
use crate::{
|
||||
cpu::{get_privilege_mode, PrivilegeMode, Trap, TrapType, Xlen},
|
||||
memory::Memory,
|
||||
use std::{
|
||||
collections::HashMap,
|
||||
sync::{Arc, RwLock},
|
||||
};
|
||||
|
||||
use crate::cpu::{decode_privilege_mode, PrivilegeMode, Trap, TrapType, Xlen};
|
||||
|
||||
pub trait Memory {
|
||||
fn read_u8(&self, p_address: u64) -> u8;
|
||||
fn read_u16(&self, p_address: u64) -> u16;
|
||||
fn read_u32(&self, p_address: u64) -> u32;
|
||||
fn read_u64(&self, p_address: u64) -> u64;
|
||||
fn write_u8(&mut self, p_address: u64, value: u8);
|
||||
fn write_u16(&mut self, p_address: u64, value: u16);
|
||||
fn write_u32(&mut self, p_address: u64, value: u32);
|
||||
fn write_u64(&mut self, p_address: u64, value: u64);
|
||||
fn validate_address(&self, address: u64) -> bool;
|
||||
fn syscall(&mut self, args: [i64; 8]) -> [i64; 8];
|
||||
}
|
||||
|
||||
/// Emulates Memory Management Unit. It holds the Main memory and peripheral
|
||||
/// devices, maps address to them, and accesses them depending on address.
|
||||
/// It also manages virtual-physical address translation and memoty protection.
|
||||
@ -16,11 +29,10 @@ pub struct Mmu {
|
||||
ppn: u64,
|
||||
addressing_mode: AddressingMode,
|
||||
privilege_mode: PrivilegeMode,
|
||||
memory: MemoryWrapper,
|
||||
|
||||
/// The size of main memory (if initialized)
|
||||
memory_length: Option<NonZeroU64>,
|
||||
memory: Arc<RwLock<dyn Memory + Send + Sync>>,
|
||||
|
||||
// /// The size of main memory (if initialized)
|
||||
// memory_length: Option<NonZeroU64>,
|
||||
/// Address translation can be affected `mstatus` (MPRV, MPP in machine mode)
|
||||
/// then `Mmu` has copy of it.
|
||||
mstatus: u64,
|
||||
@ -71,27 +83,19 @@ impl Mmu {
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `xlen`
|
||||
/// * `terminal`
|
||||
pub fn new(xlen: Xlen, dram_base: u64) -> Self {
|
||||
// // Load default device tree binary content
|
||||
// let content = include_bytes!("./device/dtb.dtb");
|
||||
// for i in 0..content.len() {
|
||||
// dtb[i] = content[i];
|
||||
// }
|
||||
|
||||
pub fn new(xlen: Xlen, memory: Arc<RwLock<dyn Memory + Send + Sync>>) -> Self {
|
||||
Mmu {
|
||||
// clock: 0,
|
||||
xlen,
|
||||
ppn: 0,
|
||||
addressing_mode: AddressingMode::None,
|
||||
privilege_mode: PrivilegeMode::Machine,
|
||||
memory: MemoryWrapper::new(dram_base),
|
||||
memory,
|
||||
mstatus: 0,
|
||||
page_cache_enabled: false,
|
||||
fetch_page_cache: HashMap::default(),
|
||||
load_page_cache: HashMap::default(),
|
||||
store_page_cache: HashMap::default(),
|
||||
memory_length: None,
|
||||
}
|
||||
}
|
||||
|
||||
@ -104,19 +108,19 @@ impl Mmu {
|
||||
self.clear_page_cache();
|
||||
}
|
||||
|
||||
/// Initializes Main memory. This method is expected to be called only once.
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `capacity`
|
||||
pub fn init_memory(&mut self, capacity: u64) {
|
||||
assert!(self.memory_length.is_none());
|
||||
self.memory_length = Some(NonZeroU64::new(capacity).unwrap());
|
||||
self.memory.init(capacity);
|
||||
}
|
||||
// /// Initializes Main memory. This method is expected to be called only once.
|
||||
// ///
|
||||
// /// # Arguments
|
||||
// /// * `capacity`
|
||||
// pub fn init_memory(&mut self, capacity: u64) {
|
||||
// assert!(self.memory_length.is_none());
|
||||
// self.memory_length = Some(NonZeroU64::new(capacity).unwrap());
|
||||
// self.memory.init(capacity);
|
||||
// }
|
||||
|
||||
pub fn memory_size(&self) -> u64 {
|
||||
self.memory_length.unwrap().get()
|
||||
}
|
||||
// pub fn memory_size(&self) -> u64 {
|
||||
// self.memory_length.unwrap().get()
|
||||
// }
|
||||
|
||||
/// Enables or disables page cache optimization.
|
||||
///
|
||||
@ -185,7 +189,7 @@ impl Mmu {
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `v_address` Virtual address
|
||||
fn fetch(&mut self, v_address: u64) -> Result<u8, Trap> {
|
||||
fn fetch(&self, v_address: u64) -> Result<u8, Trap> {
|
||||
self.translate_address(v_address, &MemoryAccessType::Execute)
|
||||
.map(|p_address| self.load_raw(p_address))
|
||||
.map_err(|()| Trap {
|
||||
@ -199,7 +203,7 @@ impl Mmu {
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `v_address` Virtual address
|
||||
pub fn fetch_word(&mut self, v_address: u64) -> Result<u32, Trap> {
|
||||
pub fn fetch_word(&self, v_address: u64) -> Result<u32, Trap> {
|
||||
let width = 4;
|
||||
if (v_address & 0xfff) <= (0x1000 - width) {
|
||||
// Fast path. All bytes fetched are in the same page so
|
||||
@ -228,7 +232,7 @@ impl Mmu {
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `v_address` Virtual address
|
||||
pub fn load(&mut self, v_address: u64) -> Result<u8, Trap> {
|
||||
pub fn load(&self, v_address: u64) -> Result<u8, Trap> {
|
||||
let effective_address = self.trim_to_xlen(v_address);
|
||||
match self.translate_address(effective_address, &MemoryAccessType::Read) {
|
||||
Ok(p_address) => Ok(self.load_raw(p_address)),
|
||||
@ -245,7 +249,7 @@ impl Mmu {
|
||||
/// # Arguments
|
||||
/// * `v_address` Virtual address
|
||||
/// * `width` Must be 1, 2, 4, or 8
|
||||
fn load_bytes(&mut self, v_address: u64, width: u64) -> Result<u64, Trap> {
|
||||
fn load_bytes(&self, v_address: u64, width: u64) -> Result<u64, Trap> {
|
||||
debug_assert!(
|
||||
width == 1 || width == 2 || width == 4 || width == 8,
|
||||
"Width must be 1, 2, 4, or 8. {:X}",
|
||||
@ -287,7 +291,7 @@ impl Mmu {
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `v_address` Virtual address
|
||||
pub fn load_halfword(&mut self, v_address: u64) -> Result<u16, Trap> {
|
||||
pub fn load_halfword(&self, v_address: u64) -> Result<u16, Trap> {
|
||||
self.load_bytes(v_address, 2).map(|data| data as u16)
|
||||
}
|
||||
|
||||
@ -296,7 +300,7 @@ impl Mmu {
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `v_address` Virtual address
|
||||
pub fn load_word(&mut self, v_address: u64) -> Result<u32, Trap> {
|
||||
pub fn load_word(&self, v_address: u64) -> Result<u32, Trap> {
|
||||
self.load_bytes(v_address, 4).map(|data| data as u32)
|
||||
}
|
||||
|
||||
@ -305,7 +309,7 @@ impl Mmu {
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `v_address` Virtual address
|
||||
pub fn load_doubleword(&mut self, v_address: u64) -> Result<u64, Trap> {
|
||||
pub fn load_doubleword(&self, v_address: u64) -> Result<u64, Trap> {
|
||||
self.load_bytes(v_address, 8)
|
||||
}
|
||||
|
||||
@ -315,7 +319,7 @@ impl Mmu {
|
||||
/// # Arguments
|
||||
/// * `v_address` Virtual address
|
||||
/// * `value`
|
||||
pub fn store(&mut self, v_address: u64, value: u8) -> Result<(), Trap> {
|
||||
pub fn store(&self, v_address: u64, value: u8) -> Result<(), Trap> {
|
||||
self.translate_address(v_address, &MemoryAccessType::Write)
|
||||
.map(|p_address| self.store_raw(p_address, value))
|
||||
.map_err(|()| Trap {
|
||||
@ -331,7 +335,7 @@ impl Mmu {
|
||||
/// * `v_address` Virtual address
|
||||
/// * `value` data written
|
||||
/// * `width` Must be 1, 2, 4, or 8
|
||||
fn store_bytes(&mut self, v_address: u64, value: u64, width: u64) -> Result<(), Trap> {
|
||||
fn store_bytes(&self, v_address: u64, value: u64, width: u64) -> Result<(), Trap> {
|
||||
debug_assert!(
|
||||
width == 1 || width == 2 || width == 4 || width == 8,
|
||||
"Width must be 1, 2, 4, or 8. {:X}",
|
||||
@ -374,7 +378,7 @@ impl Mmu {
|
||||
/// # Arguments
|
||||
/// * `v_address` Virtual address
|
||||
/// * `value` data written
|
||||
pub fn store_halfword(&mut self, v_address: u64, value: u16) -> Result<(), Trap> {
|
||||
pub fn store_halfword(&self, v_address: u64, value: u16) -> Result<(), Trap> {
|
||||
self.store_bytes(v_address, value as u64, 2)
|
||||
}
|
||||
|
||||
@ -384,7 +388,7 @@ impl Mmu {
|
||||
/// # Arguments
|
||||
/// * `v_address` Virtual address
|
||||
/// * `value` data written
|
||||
pub fn store_word(&mut self, v_address: u64, value: u32) -> Result<(), Trap> {
|
||||
pub fn store_word(&self, v_address: u64, value: u32) -> Result<(), Trap> {
|
||||
self.store_bytes(v_address, value as u64, 4)
|
||||
}
|
||||
|
||||
@ -394,7 +398,7 @@ impl Mmu {
|
||||
/// # Arguments
|
||||
/// * `v_address` Virtual address
|
||||
/// * `value` data written
|
||||
pub fn store_doubleword(&mut self, v_address: u64, value: u64) -> Result<(), Trap> {
|
||||
pub fn store_doubleword(&self, v_address: u64, value: u64) -> Result<(), Trap> {
|
||||
self.store_bytes(v_address, value, 8)
|
||||
}
|
||||
|
||||
@ -404,7 +408,10 @@ impl Mmu {
|
||||
/// # Arguments
|
||||
/// * `p_address` Physical address
|
||||
pub(crate) fn load_raw(&self, p_address: u64) -> u8 {
|
||||
self.memory.read_byte(self.trim_to_xlen(p_address))
|
||||
self.memory
|
||||
.read()
|
||||
.unwrap()
|
||||
.read_u8(self.trim_to_xlen(p_address))
|
||||
}
|
||||
|
||||
/// Loads two bytes from main memory or peripheral devices depending on
|
||||
@ -413,7 +420,10 @@ impl Mmu {
|
||||
/// # Arguments
|
||||
/// * `p_address` Physical address
|
||||
fn load_halfword_raw(&self, p_address: u64) -> u16 {
|
||||
self.memory.read_halfword(self.trim_to_xlen(p_address))
|
||||
self.memory
|
||||
.read()
|
||||
.unwrap()
|
||||
.read_u16(self.trim_to_xlen(p_address))
|
||||
}
|
||||
|
||||
/// Loads four bytes from main memory or peripheral devices depending on
|
||||
@ -422,7 +432,10 @@ impl Mmu {
|
||||
/// # Arguments
|
||||
/// * `p_address` Physical address
|
||||
pub fn load_word_raw(&self, p_address: u64) -> u32 {
|
||||
self.memory.read_word(self.trim_to_xlen(p_address))
|
||||
self.memory
|
||||
.read()
|
||||
.unwrap()
|
||||
.read_u32(self.trim_to_xlen(p_address))
|
||||
}
|
||||
|
||||
/// Loads eight bytes from main memory or peripheral devices depending on
|
||||
@ -431,7 +444,10 @@ impl Mmu {
|
||||
/// # Arguments
|
||||
/// * `p_address` Physical address
|
||||
fn load_doubleword_raw(&self, p_address: u64) -> u64 {
|
||||
self.memory.read_doubleword(self.trim_to_xlen(p_address))
|
||||
self.memory
|
||||
.read()
|
||||
.unwrap()
|
||||
.read_u64(self.trim_to_xlen(p_address))
|
||||
}
|
||||
|
||||
/// Stores a byte to main memory or peripheral devices depending on
|
||||
@ -440,8 +456,11 @@ impl Mmu {
|
||||
/// # Arguments
|
||||
/// * `p_address` Physical address
|
||||
/// * `value` data written
|
||||
pub(crate) fn store_raw(&mut self, p_address: u64, value: u8) {
|
||||
self.memory.write_byte(self.trim_to_xlen(p_address), value)
|
||||
pub(crate) fn store_raw(&self, p_address: u64, value: u8) {
|
||||
self.memory
|
||||
.write()
|
||||
.unwrap()
|
||||
.write_u8(self.trim_to_xlen(p_address), value)
|
||||
}
|
||||
|
||||
/// Stores two bytes to main memory or peripheral devices depending on
|
||||
@ -450,9 +469,11 @@ impl Mmu {
|
||||
/// # Arguments
|
||||
/// * `p_address` Physical address
|
||||
/// * `value` data written
|
||||
pub(crate) fn store_halfword_raw(&mut self, p_address: u64, value: u16) {
|
||||
pub(crate) fn store_halfword_raw(&self, p_address: u64, value: u16) {
|
||||
self.memory
|
||||
.write_halfword(self.trim_to_xlen(p_address), value)
|
||||
.write()
|
||||
.unwrap()
|
||||
.write_u16(self.trim_to_xlen(p_address), value)
|
||||
}
|
||||
|
||||
/// Stores four bytes to main memory or peripheral devices depending on
|
||||
@ -461,8 +482,11 @@ impl Mmu {
|
||||
/// # Arguments
|
||||
/// * `p_address` Physical address
|
||||
/// * `value` data written
|
||||
pub(crate) fn store_word_raw(&mut self, p_address: u64, value: u32) {
|
||||
self.memory.write_word(self.trim_to_xlen(p_address), value)
|
||||
pub(crate) fn store_word_raw(&self, p_address: u64, value: u32) {
|
||||
self.memory
|
||||
.write()
|
||||
.unwrap()
|
||||
.write_u32(self.trim_to_xlen(p_address), value)
|
||||
}
|
||||
|
||||
/// Stores eight bytes to main memory or peripheral devices depending on
|
||||
@ -471,9 +495,11 @@ impl Mmu {
|
||||
/// # Arguments
|
||||
/// * `p_address` Physical address
|
||||
/// * `value` data written
|
||||
fn store_doubleword_raw(&mut self, p_address: u64, value: u64) {
|
||||
fn store_doubleword_raw(&self, p_address: u64, value: u64) {
|
||||
self.memory
|
||||
.write_doubleword(self.trim_to_xlen(p_address), value)
|
||||
.write()
|
||||
.unwrap()
|
||||
.write_u64(self.trim_to_xlen(p_address), value)
|
||||
}
|
||||
|
||||
/// Checks if passed virtual address is valid (pointing a certain device) or not.
|
||||
@ -481,18 +507,26 @@ impl Mmu {
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `v_address` Virtual address
|
||||
pub fn validate_address(&mut self, v_address: u64) -> Option<bool> {
|
||||
if let Ok(p_address) = self.translate_address(v_address, &MemoryAccessType::DontCare) {
|
||||
Some(self.memory.validate_address(self.trim_to_xlen(p_address)))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
pub fn validate_address(&self, v_address: u64) -> Option<bool> {
|
||||
self.translate_address(v_address, &MemoryAccessType::DontCare)
|
||||
.ok()
|
||||
.map(|p_address| {
|
||||
self.memory
|
||||
.write()
|
||||
.unwrap()
|
||||
.validate_address(self.trim_to_xlen(p_address))
|
||||
})
|
||||
}
|
||||
|
||||
fn translate_address(
|
||||
&mut self,
|
||||
fn translate_address(&self, v_address: u64, access_type: &MemoryAccessType) -> Result<u64, ()> {
|
||||
self.translate_address_with_privilege_mode(v_address, access_type, self.privilege_mode)
|
||||
}
|
||||
|
||||
fn translate_address_with_privilege_mode(
|
||||
&self,
|
||||
v_address: u64,
|
||||
access_type: &MemoryAccessType,
|
||||
privilege_mode: PrivilegeMode,
|
||||
) -> Result<u64, ()> {
|
||||
let address = self.trim_to_xlen(v_address);
|
||||
let v_page = address & !0xfff;
|
||||
@ -510,28 +544,24 @@ impl Mmu {
|
||||
|
||||
let p_address = match self.addressing_mode {
|
||||
AddressingMode::None => Ok(address),
|
||||
AddressingMode::SV32 => match self.privilege_mode {
|
||||
AddressingMode::SV32 => match privilege_mode {
|
||||
// @TODO: Optimize
|
||||
PrivilegeMode::Machine => match access_type {
|
||||
MemoryAccessType::Execute => Ok(address),
|
||||
// @TODO: Remove magic number
|
||||
_ => match (self.mstatus >> 17) & 1 {
|
||||
0 => Ok(address),
|
||||
_ => {
|
||||
let privilege_mode = get_privilege_mode((self.mstatus >> 9) & 3);
|
||||
match privilege_mode {
|
||||
PrivilegeMode::Machine => {
|
||||
if let MemoryAccessType::Execute = access_type {
|
||||
Ok(address)
|
||||
} else if (self.mstatus >> 17) & 1 == 0 {
|
||||
Ok(address)
|
||||
} else {
|
||||
match decode_privilege_mode((self.mstatus >> 9) & 3) {
|
||||
PrivilegeMode::Machine => Ok(address),
|
||||
_ => {
|
||||
let current_privilege_mode = self.privilege_mode.clone();
|
||||
self.update_privilege_mode(privilege_mode);
|
||||
let result = self.translate_address(v_address, access_type);
|
||||
self.update_privilege_mode(current_privilege_mode);
|
||||
result
|
||||
temp_privilege_mode => self.translate_address_with_privilege_mode(
|
||||
v_address,
|
||||
access_type,
|
||||
temp_privilege_mode,
|
||||
),
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
},
|
||||
PrivilegeMode::User | PrivilegeMode::Supervisor => {
|
||||
let vpns = [(address >> 12) & 0x3ff, (address >> 22) & 0x3ff];
|
||||
self.traverse_page(address, 2 - 1, self.ppn, &vpns, access_type)
|
||||
@ -541,26 +571,22 @@ impl Mmu {
|
||||
AddressingMode::SV39 => match self.privilege_mode {
|
||||
// @TODO: Optimize
|
||||
// @TODO: Remove duplicated code with SV32
|
||||
PrivilegeMode::Machine => match access_type {
|
||||
MemoryAccessType::Execute => Ok(address),
|
||||
// @TODO: Remove magic number
|
||||
_ => match (self.mstatus >> 17) & 1 {
|
||||
0 => Ok(address),
|
||||
_ => {
|
||||
let privilege_mode = get_privilege_mode((self.mstatus >> 9) & 3);
|
||||
match privilege_mode {
|
||||
PrivilegeMode::Machine => {
|
||||
if let MemoryAccessType::Execute = access_type {
|
||||
Ok(address)
|
||||
} else if (self.mstatus >> 17) & 1 == 0 {
|
||||
Ok(address)
|
||||
} else {
|
||||
match decode_privilege_mode((self.mstatus >> 9) & 3) {
|
||||
PrivilegeMode::Machine => Ok(address),
|
||||
_ => {
|
||||
let current_privilege_mode = self.privilege_mode.clone();
|
||||
self.update_privilege_mode(privilege_mode);
|
||||
let result = self.translate_address(v_address, access_type);
|
||||
self.update_privilege_mode(current_privilege_mode);
|
||||
result
|
||||
temp_privilege_mode => self.translate_address_with_privilege_mode(
|
||||
v_address,
|
||||
access_type,
|
||||
temp_privilege_mode,
|
||||
),
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
},
|
||||
PrivilegeMode::User | PrivilegeMode::Supervisor => {
|
||||
let vpns = [
|
||||
(address >> 12) & 0x1ff,
|
||||
@ -576,27 +602,27 @@ impl Mmu {
|
||||
}
|
||||
};
|
||||
|
||||
if self.page_cache_enabled {
|
||||
match p_address {
|
||||
Ok(p_address) => {
|
||||
let p_page = p_address & !0xfff;
|
||||
match access_type {
|
||||
MemoryAccessType::Execute => self.fetch_page_cache.insert(v_page, p_page),
|
||||
MemoryAccessType::Read => self.load_page_cache.insert(v_page, p_page),
|
||||
MemoryAccessType::Write => self.store_page_cache.insert(v_page, p_page),
|
||||
MemoryAccessType::DontCare => None,
|
||||
};
|
||||
Ok(p_address)
|
||||
}
|
||||
Err(()) => Err(()),
|
||||
}
|
||||
} else {
|
||||
// if self.page_cache_enabled {
|
||||
// match p_address {
|
||||
// Ok(p_address) => {
|
||||
// let p_page = p_address & !0xfff;
|
||||
// match access_type {
|
||||
// MemoryAccessType::Execute => self.fetch_page_cache.insert(v_page, p_page),
|
||||
// MemoryAccessType::Read => self.load_page_cache.insert(v_page, p_page),
|
||||
// MemoryAccessType::Write => self.store_page_cache.insert(v_page, p_page),
|
||||
// MemoryAccessType::DontCare => None,
|
||||
// };
|
||||
// Ok(p_address)
|
||||
// }
|
||||
// Err(()) => Err(()),
|
||||
// }
|
||||
// } else {
|
||||
p_address
|
||||
}
|
||||
// }
|
||||
}
|
||||
|
||||
fn traverse_page(
|
||||
&mut self,
|
||||
&self,
|
||||
v_address: u64,
|
||||
level: u8,
|
||||
parent_ppn: u64,
|
||||
@ -724,98 +750,98 @@ impl Mmu {
|
||||
}
|
||||
}
|
||||
|
||||
pub struct MemoryWrapper {
|
||||
memory: Memory,
|
||||
dram_base: u64,
|
||||
}
|
||||
// pub struct MemoryWrapper {
|
||||
// memory: Memory,
|
||||
// dram_base: u64,
|
||||
// }
|
||||
|
||||
impl MemoryWrapper {
|
||||
fn new(dram_base: u64) -> Self {
|
||||
MemoryWrapper {
|
||||
memory: Memory::new(),
|
||||
dram_base,
|
||||
}
|
||||
}
|
||||
// impl MemoryWrapper {
|
||||
// fn new(dram_base: u64) -> Self {
|
||||
// MemoryWrapper {
|
||||
// memory: Memory::new(),
|
||||
// dram_base,
|
||||
// }
|
||||
// }
|
||||
|
||||
fn init(&mut self, capacity: u64) {
|
||||
self.memory.init(capacity);
|
||||
}
|
||||
// fn init(&mut self, capacity: u64) {
|
||||
// self.memory.init(capacity);
|
||||
// }
|
||||
|
||||
pub fn read_byte(&self, p_address: u64) -> u8 {
|
||||
debug_assert!(
|
||||
p_address >= self.dram_base,
|
||||
"Memory address must equals to or bigger than self.dram_base. {:X}",
|
||||
p_address
|
||||
);
|
||||
self.memory.read_byte(p_address - self.dram_base)
|
||||
}
|
||||
// pub fn read_byte(&self, p_address: u64) -> u8 {
|
||||
// debug_assert!(
|
||||
// p_address >= self.dram_base,
|
||||
// "Memory address must equals to or bigger than self.dram_base. {:X}",
|
||||
// p_address
|
||||
// );
|
||||
// self.memory.read_byte(p_address - self.dram_base)
|
||||
// }
|
||||
|
||||
pub fn read_halfword(&self, p_address: u64) -> u16 {
|
||||
debug_assert!(
|
||||
p_address >= self.dram_base && p_address.wrapping_add(1) >= self.dram_base,
|
||||
"Memory address must equals to or bigger than self.dram_base. {:X}",
|
||||
p_address
|
||||
);
|
||||
self.memory.read_halfword(p_address - self.dram_base)
|
||||
}
|
||||
// pub fn read_halfword(&self, p_address: u64) -> u16 {
|
||||
// debug_assert!(
|
||||
// p_address >= self.dram_base && p_address.wrapping_add(1) >= self.dram_base,
|
||||
// "Memory address must equals to or bigger than self.dram_base. {:X}",
|
||||
// p_address
|
||||
// );
|
||||
// self.memory.read_halfword(p_address - self.dram_base)
|
||||
// }
|
||||
|
||||
pub fn read_word(&self, p_address: u64) -> u32 {
|
||||
debug_assert!(
|
||||
p_address >= self.dram_base && p_address.wrapping_add(3) >= self.dram_base,
|
||||
"Memory address must equals to or bigger than self.dram_base. {:X}",
|
||||
p_address
|
||||
);
|
||||
self.memory.read_word(p_address - self.dram_base)
|
||||
}
|
||||
// pub fn read_word(&self, p_address: u64) -> u32 {
|
||||
// debug_assert!(
|
||||
// p_address >= self.dram_base && p_address.wrapping_add(3) >= self.dram_base,
|
||||
// "Memory address must equals to or bigger than self.dram_base. {:X}",
|
||||
// p_address
|
||||
// );
|
||||
// self.memory.read_word(p_address - self.dram_base)
|
||||
// }
|
||||
|
||||
pub fn read_doubleword(&self, p_address: u64) -> u64 {
|
||||
debug_assert!(
|
||||
p_address >= self.dram_base && p_address.wrapping_add(7) >= self.dram_base,
|
||||
"Memory address must equals to or bigger than self.dram_base. {:X}",
|
||||
p_address
|
||||
);
|
||||
self.memory.read_doubleword(p_address - self.dram_base)
|
||||
}
|
||||
// pub fn read_doubleword(&self, p_address: u64) -> u64 {
|
||||
// debug_assert!(
|
||||
// p_address >= self.dram_base && p_address.wrapping_add(7) >= self.dram_base,
|
||||
// "Memory address must equals to or bigger than self.dram_base. {:X}",
|
||||
// p_address
|
||||
// );
|
||||
// self.memory.read_doubleword(p_address - self.dram_base)
|
||||
// }
|
||||
|
||||
pub fn write_byte(&mut self, p_address: u64, value: u8) {
|
||||
debug_assert!(
|
||||
p_address >= self.dram_base,
|
||||
"Memory address must equals to or bigger than self.dram_base. {:X}",
|
||||
p_address
|
||||
);
|
||||
self.memory.write_byte(p_address - self.dram_base, value)
|
||||
}
|
||||
// pub fn write_byte(&mut self, p_address: u64, value: u8) {
|
||||
// debug_assert!(
|
||||
// p_address >= self.dram_base,
|
||||
// "Memory address must equals to or bigger than self.dram_base. {:X}",
|
||||
// p_address
|
||||
// );
|
||||
// self.memory.write_byte(p_address - self.dram_base, value)
|
||||
// }
|
||||
|
||||
pub fn write_halfword(&mut self, p_address: u64, value: u16) {
|
||||
debug_assert!(
|
||||
p_address >= self.dram_base && p_address.wrapping_add(1) >= self.dram_base,
|
||||
"Memory address must equals to or bigger than self.dram_base. {:X}",
|
||||
p_address
|
||||
);
|
||||
self.memory
|
||||
.write_halfword(p_address - self.dram_base, value)
|
||||
}
|
||||
// pub fn write_halfword(&mut self, p_address: u64, value: u16) {
|
||||
// debug_assert!(
|
||||
// p_address >= self.dram_base && p_address.wrapping_add(1) >= self.dram_base,
|
||||
// "Memory address must equals to or bigger than self.dram_base. {:X}",
|
||||
// p_address
|
||||
// );
|
||||
// self.memory
|
||||
// .write_halfword(p_address - self.dram_base, value)
|
||||
// }
|
||||
|
||||
pub fn write_word(&mut self, p_address: u64, value: u32) {
|
||||
debug_assert!(
|
||||
p_address >= self.dram_base && p_address.wrapping_add(3) >= self.dram_base,
|
||||
"Memory address must equals to or bigger than self.dram_base. {:X}",
|
||||
p_address
|
||||
);
|
||||
self.memory.write_word(p_address - self.dram_base, value)
|
||||
}
|
||||
// pub fn write_word(&mut self, p_address: u64, value: u32) {
|
||||
// debug_assert!(
|
||||
// p_address >= self.dram_base && p_address.wrapping_add(3) >= self.dram_base,
|
||||
// "Memory address must equals to or bigger than self.dram_base. {:X}",
|
||||
// p_address
|
||||
// );
|
||||
// self.memory.write_word(p_address - self.dram_base, value)
|
||||
// }
|
||||
|
||||
pub fn write_doubleword(&mut self, p_address: u64, value: u64) {
|
||||
debug_assert!(
|
||||
p_address >= self.dram_base && p_address.wrapping_add(7) >= self.dram_base,
|
||||
"Memory address must equals to or bigger than self.dram_base. {:X}",
|
||||
p_address
|
||||
);
|
||||
self.memory
|
||||
.write_doubleword(p_address - self.dram_base, value)
|
||||
}
|
||||
// pub fn write_doubleword(&mut self, p_address: u64, value: u64) {
|
||||
// debug_assert!(
|
||||
// p_address >= self.dram_base && p_address.wrapping_add(7) >= self.dram_base,
|
||||
// "Memory address must equals to or bigger than self.dram_base. {:X}",
|
||||
// p_address
|
||||
// );
|
||||
// self.memory
|
||||
// .write_doubleword(p_address - self.dram_base, value)
|
||||
// }
|
||||
|
||||
pub fn validate_address(&self, address: u64) -> bool {
|
||||
self.memory.validate_address(address - self.dram_base)
|
||||
}
|
||||
}
|
||||
// pub fn validate_address(&self, address: u64) -> bool {
|
||||
// self.memory.validate_address(address - self.dram_base)
|
||||
// }
|
||||
// }
|
||||
|
19
src/main.rs
19
src/main.rs
@ -1,27 +1,18 @@
|
||||
mod xous;
|
||||
|
||||
use std::io::Read;
|
||||
use xous::XousHandler;
|
||||
use xous::Machine;
|
||||
|
||||
fn main() {
|
||||
fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||
let mut std_tests = Vec::new();
|
||||
std::fs::File::open("std-tests")
|
||||
.expect("couldn't open std-tests")
|
||||
.read_to_end(&mut std_tests)
|
||||
.expect("couldn't read std-tests");
|
||||
|
||||
let mut cpu = riscv_cpu::CpuBuilder::new()
|
||||
.memory_size(16 * 1024 * 1024)
|
||||
.xlen(riscv_cpu::Xlen::Bit32)
|
||||
.build();
|
||||
let mut xous = Machine::new(&std_tests)?;
|
||||
|
||||
let mut xous = XousHandler::new(&cpu);
|
||||
xous.load_program_to_cpu(&mut cpu, &std_tests)
|
||||
.expect("couldn't load std-tests");
|
||||
xous.run()?;
|
||||
|
||||
cpu.set_handler(Some(Box::new(xous)));
|
||||
|
||||
for _tick in 0..1000 {
|
||||
cpu.tick();
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
520
src/xous.rs
520
src/xous.rs
@ -1,7 +1,16 @@
|
||||
use riscv_cpu::cpu::EventHandler;
|
||||
use riscv_cpu::cpu::Memory as OtherMemory;
|
||||
mod definitions;
|
||||
|
||||
use definitions::{Syscall, SyscallNumber, SyscallResultNumber};
|
||||
use std::{
|
||||
collections::{BTreeSet, HashMap},
|
||||
sync::{
|
||||
mpsc::{Receiver, Sender},
|
||||
Arc, RwLock,
|
||||
},
|
||||
};
|
||||
|
||||
const MEMORY_BASE: u32 = 0x8000_0000;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum LoadError {
|
||||
@ -35,51 +44,171 @@ const MMUFLAG_DIRTY: u32 = 0x80;
|
||||
|
||||
impl std::error::Error for LoadError {}
|
||||
|
||||
pub struct XousHandler {
|
||||
memory_base: u32,
|
||||
allocator_offset: u32,
|
||||
satp: u32,
|
||||
l1_pt: u32,
|
||||
struct Memory {
|
||||
base: u32,
|
||||
data: HashMap<usize, [u8; 4096]>,
|
||||
allocated_pages: BTreeSet<usize>,
|
||||
free_pages: BTreeSet<usize>,
|
||||
heap_start: u32,
|
||||
heap_size: u32,
|
||||
allocation_start: u32,
|
||||
allocation_previous: u32,
|
||||
l1_pt: u32,
|
||||
satp: u32,
|
||||
}
|
||||
|
||||
impl XousHandler {
|
||||
pub fn new(cpu: &riscv_cpu::Cpu) -> Self {
|
||||
let memory_base = cpu.memory_base() as u32;
|
||||
// let memory_size = cpu.memory_size();
|
||||
enum WorkerCommand {
|
||||
Start,
|
||||
MemoryRange(u32 /* address */, u32 /* size */),
|
||||
}
|
||||
|
||||
enum WorkerResponse {
|
||||
Started,
|
||||
Exited(u32),
|
||||
AllocateMemory(
|
||||
u32, /* phys */
|
||||
u32, /* virt */
|
||||
u32, /* size */
|
||||
u32, /* flags */
|
||||
),
|
||||
}
|
||||
|
||||
struct Worker {
|
||||
cpu: riscv_cpu::Cpu,
|
||||
tx: Sender<WorkerResponse>,
|
||||
rx: Receiver<WorkerCommand>,
|
||||
}
|
||||
|
||||
impl Worker {
|
||||
fn new(
|
||||
cpu: riscv_cpu::Cpu,
|
||||
rx: Receiver<WorkerCommand>,
|
||||
worker_response_tx: Sender<WorkerResponse>,
|
||||
) -> Self {
|
||||
Self {
|
||||
memory_base,
|
||||
l1_pt: memory_base + 4096,
|
||||
allocator_offset: 8192,
|
||||
satp: ((4096 + memory_base) >> 12) | 0x8000_0000,
|
||||
heap_start: 0xa000_0000,
|
||||
cpu,
|
||||
tx: worker_response_tx,
|
||||
rx,
|
||||
}
|
||||
}
|
||||
fn run(&mut self) {
|
||||
self.rx.recv().unwrap();
|
||||
for _tick in 0..1000 {
|
||||
self.cpu.tick();
|
||||
}
|
||||
self.tx.send(WorkerResponse::Exited(1)).unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
struct WorkerHandle {
|
||||
tx: Sender<WorkerCommand>,
|
||||
}
|
||||
|
||||
impl Memory {
|
||||
pub fn new(base: u32, size: usize) -> Self {
|
||||
let mut data = HashMap::new();
|
||||
let mut free_pages = BTreeSet::new();
|
||||
let mut allocated_pages = BTreeSet::new();
|
||||
for page in (base..(base + size as u32)).step_by(4096) {
|
||||
data.insert(page as usize, [0; 4096]);
|
||||
free_pages.insert(page as usize);
|
||||
}
|
||||
// Remove the l0 page table
|
||||
free_pages.remove(&(MEMORY_BASE as usize + 4096));
|
||||
allocated_pages.insert(MEMORY_BASE as usize + 4096);
|
||||
Self {
|
||||
base,
|
||||
data,
|
||||
allocated_pages,
|
||||
free_pages,
|
||||
l1_pt: MEMORY_BASE + 4096,
|
||||
satp: ((4096 + MEMORY_BASE) >> 12) | 0x8000_0000,
|
||||
heap_start: 0x6000_0000,
|
||||
heap_size: 0,
|
||||
allocation_previous: 0x4000_0000,
|
||||
allocation_start: 0x4000_0000,
|
||||
}
|
||||
}
|
||||
|
||||
fn allocate_page(&mut self) -> u32 {
|
||||
let page = self.allocator_offset;
|
||||
self.allocator_offset += 4096;
|
||||
page + self.memory_base
|
||||
let page = self.free_pages.pop_first().expect("out of memory");
|
||||
self.allocated_pages.insert(page);
|
||||
page as u32
|
||||
}
|
||||
|
||||
fn write_bytes(&mut self, cpu: &mut riscv_cpu::Cpu, data: &[u8], start: u32) {
|
||||
fn allocate_virt_region(&mut self, size: usize) -> Option<u32> {
|
||||
let mut start = self.allocation_previous;
|
||||
// Find a free region that will fit this page.
|
||||
'outer: loop {
|
||||
for page in (start..(start + size as u32)).step_by(4096) {
|
||||
if self.virt_to_phys(page).is_some() {
|
||||
start = page + 4096;
|
||||
continue 'outer;
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
// Allocate the region
|
||||
for page in (start..(start + size as u32)).step_by(4096) {
|
||||
self.ensure_page(page);
|
||||
}
|
||||
self.allocation_previous = start + size as u32 + 4096;
|
||||
Some(start)
|
||||
}
|
||||
|
||||
fn ensure_page(&mut self, address: u32) {
|
||||
let vpn1 = ((address >> 22) & ((1 << 10) - 1)) as usize * 4;
|
||||
let vpn0 = ((address >> 12) & ((1 << 10) - 1)) as usize * 4;
|
||||
|
||||
// The root (l1) pagetable is defined to be mapped into our virtual
|
||||
// address space at this address.
|
||||
|
||||
// If the level 1 pagetable doesn't exist, then this address is invalid
|
||||
let mut l1_pt_entry = self.read_u32(self.l1_pt as u64 + vpn1 as u64);
|
||||
if l1_pt_entry & MMUFLAG_VALID == 0 {
|
||||
// Allocate a new page for the level 1 pagetable
|
||||
let l0_pt_phys = self.allocate_page();
|
||||
// println!("Allocating level 0 pagetable at {:08x}", l0_pt_phys);
|
||||
l1_pt_entry =
|
||||
((l0_pt_phys >> 12) << 10) | MMUFLAG_VALID | MMUFLAG_DIRTY | MMUFLAG_ACCESSED;
|
||||
// Map the level 1 pagetable into the root pagetable
|
||||
self.write_u32(self.l1_pt as u64 + vpn1 as u64, l1_pt_entry);
|
||||
}
|
||||
|
||||
let l0_pt_phys = ((l1_pt_entry >> 10) << 12) + vpn0 as u32;
|
||||
let mut l0_pt_entry = self.read_u32(l0_pt_phys as u64);
|
||||
|
||||
// Ensure the entry hasn't already been mapped.
|
||||
if l0_pt_entry & MMUFLAG_VALID == 0 {
|
||||
let page_phys = self.allocate_page();
|
||||
l0_pt_entry = ((page_phys >> 12) << 10)
|
||||
| MMUFLAG_VALID
|
||||
| MMUFLAG_WRITABLE
|
||||
| MMUFLAG_READABLE
|
||||
| MMUFLAG_EXECUTABLE
|
||||
| MMUFLAG_USERMODE
|
||||
| MMUFLAG_DIRTY
|
||||
| MMUFLAG_ACCESSED;
|
||||
// Map the level 0 pagetable into the level 1 pagetable
|
||||
self.write_u32(l0_pt_phys as u64, l0_pt_entry);
|
||||
}
|
||||
}
|
||||
|
||||
fn write_bytes(&mut self, data: &[u8], start: u32) {
|
||||
for (i, byte) in data.iter().enumerate() {
|
||||
let i = i as u32;
|
||||
self.ensure_page(cpu, start + i);
|
||||
let phys = self.virt_to_phys(cpu, start + i).unwrap();
|
||||
self.ensure_page(start + i);
|
||||
let phys = self.virt_to_phys(start + i).unwrap();
|
||||
|
||||
cpu.phys_write_u8(phys as u64, *byte);
|
||||
self.write_u8(phys as u64, *byte);
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub fn print_mmu(&self, cpu: &riscv_cpu::Cpu) {
|
||||
pub fn print_mmu(&self) {
|
||||
println!("Memory Map:");
|
||||
for vpn1 in (0..4096).step_by(4) {
|
||||
let l1_entry = cpu.phys_read_u32(self.l1_pt as u64 + vpn1);
|
||||
let l1_entry = self.read_u32(self.l1_pt as u64 + vpn1);
|
||||
if l1_entry & MMUFLAG_VALID == 0 {
|
||||
continue;
|
||||
}
|
||||
@ -94,7 +223,7 @@ impl XousHandler {
|
||||
);
|
||||
|
||||
for vpn0 in (0..4096).step_by(4) {
|
||||
let l0_entry = cpu.phys_read_u32((((l1_entry >> 10) << 12) as u64) + vpn0 as u64);
|
||||
let l0_entry = self.read_u32((((l1_entry >> 10) << 12) as u64) + vpn0 as u64);
|
||||
if l0_entry & 0x7 == 0 {
|
||||
continue;
|
||||
}
|
||||
@ -111,14 +240,14 @@ impl XousHandler {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn virt_to_phys(&self, cpu: &riscv_cpu::Cpu, virt: u32) -> Option<u32> {
|
||||
pub fn virt_to_phys(&self, virt: u32) -> Option<u32> {
|
||||
let vpn1 = ((virt >> 22) & ((1 << 10) - 1)) as usize * 4;
|
||||
let vpn0 = ((virt >> 12) & ((1 << 10) - 1)) as usize * 4;
|
||||
let offset = virt & ((1 << 12) - 1);
|
||||
|
||||
// The root (l1) pagetable is defined to be mapped into our virtual
|
||||
// address space at this address.
|
||||
let l1_pt_entry = cpu.phys_read_u32(self.l1_pt as u64 + vpn1 as u64);
|
||||
let l1_pt_entry = self.read_u32(self.l1_pt as u64 + vpn1 as u64);
|
||||
|
||||
// If the level 1 pagetable doesn't exist, then this address is invalid
|
||||
if l1_pt_entry & MMUFLAG_VALID == 0 {
|
||||
@ -128,7 +257,7 @@ impl XousHandler {
|
||||
return None;
|
||||
}
|
||||
|
||||
let l0_pt_entry = cpu.phys_read_u32((((l1_pt_entry >> 10) << 12) + vpn0 as u32) as u64);
|
||||
let l0_pt_entry = self.read_u32((((l1_pt_entry >> 10) << 12) + vpn0 as u32) as u64);
|
||||
|
||||
// Ensure the entry hasn't already been mapped.
|
||||
if l0_pt_entry & MMUFLAG_VALID == 0 {
|
||||
@ -136,50 +265,213 @@ impl XousHandler {
|
||||
}
|
||||
Some(((l0_pt_entry >> 10) << 12) | offset)
|
||||
}
|
||||
}
|
||||
|
||||
fn ensure_page(&mut self, cpu: &mut riscv_cpu::Cpu, address: u32) {
|
||||
let vpn1 = ((address >> 22) & ((1 << 10) - 1)) as usize * 4;
|
||||
let vpn0 = ((address >> 12) & ((1 << 10) - 1)) as usize * 4;
|
||||
|
||||
// The root (l1) pagetable is defined to be mapped into our virtual
|
||||
// address space at this address.
|
||||
|
||||
// If the level 1 pagetable doesn't exist, then this address is invalid
|
||||
let mut l1_pt_entry = cpu.phys_read_u32(self.l1_pt as u64 + vpn1 as u64);
|
||||
if l1_pt_entry & MMUFLAG_VALID == 0 {
|
||||
// Allocate a new page for the level 1 pagetable
|
||||
let l0_pt_phys = self.allocate_page();
|
||||
// println!("Allocating level 0 pagetable at {:08x}", l0_pt_phys);
|
||||
l1_pt_entry =
|
||||
((l0_pt_phys >> 12) << 10) | MMUFLAG_VALID | MMUFLAG_DIRTY | MMUFLAG_ACCESSED;
|
||||
// Map the level 1 pagetable into the root pagetable
|
||||
cpu.phys_write_u32(self.l1_pt as u64 + vpn1 as u64, l1_pt_entry);
|
||||
impl riscv_cpu::cpu::Memory for Memory {
|
||||
fn read_u8(&self, address: u64) -> u8 {
|
||||
let page = address as usize & !0xfff;
|
||||
let offset = address as usize & 0xfff;
|
||||
self.data.get(&page).map(|page| page[offset]).unwrap_or(0)
|
||||
}
|
||||
|
||||
let l0_pt_phys = ((l1_pt_entry >> 10) << 12) + vpn0 as u32;
|
||||
let mut l0_pt_entry = cpu.phys_read_u32(l0_pt_phys as u64);
|
||||
fn read_u16(&self, address: u64) -> u16 {
|
||||
let page = address as usize & !0xfff;
|
||||
let offset = address as usize & 0xfff;
|
||||
self.data
|
||||
.get(&page)
|
||||
.map(|page| u16::from_le_bytes([page[offset], page[offset + 1]]))
|
||||
.unwrap_or(0)
|
||||
}
|
||||
|
||||
// Ensure the entry hasn't already been mapped.
|
||||
if l0_pt_entry & MMUFLAG_VALID == 0 {
|
||||
let page_phys = self.allocate_page();
|
||||
l0_pt_entry = ((page_phys >> 12) << 10)
|
||||
| MMUFLAG_VALID
|
||||
| MMUFLAG_WRITABLE
|
||||
| MMUFLAG_READABLE
|
||||
| MMUFLAG_EXECUTABLE
|
||||
| MMUFLAG_USERMODE
|
||||
| MMUFLAG_DIRTY
|
||||
| MMUFLAG_ACCESSED;
|
||||
// Map the level 0 pagetable into the level 1 pagetable
|
||||
cpu.phys_write_u32(l0_pt_phys as u64, l0_pt_entry);
|
||||
fn read_u32(&self, address: u64) -> u32 {
|
||||
let page = address as usize & !0xfff;
|
||||
let offset = address as usize & 0xfff;
|
||||
self.data
|
||||
.get(&page)
|
||||
.map(|page| {
|
||||
u32::from_le_bytes([
|
||||
page[offset],
|
||||
page[offset + 1],
|
||||
page[offset + 2],
|
||||
page[offset + 3],
|
||||
])
|
||||
})
|
||||
.unwrap_or(0)
|
||||
}
|
||||
|
||||
fn read_u64(&self, address: u64) -> u64 {
|
||||
let page = address as usize & !0xfff;
|
||||
let offset = address as usize & 0xfff;
|
||||
self.data
|
||||
.get(&page)
|
||||
.map(|page| {
|
||||
u64::from_le_bytes([
|
||||
page[offset],
|
||||
page[offset + 1],
|
||||
page[offset + 2],
|
||||
page[offset + 3],
|
||||
page[offset + 4],
|
||||
page[offset + 5],
|
||||
page[offset + 6],
|
||||
page[offset + 7],
|
||||
])
|
||||
})
|
||||
.unwrap_or(0)
|
||||
}
|
||||
|
||||
fn write_u8(&mut self, address: u64, value: u8) {
|
||||
let page = address as usize & !0xfff;
|
||||
let offset = address as usize & 0xfff;
|
||||
if let Some(page) = self.data.get_mut(&page) {
|
||||
page[offset] = value;
|
||||
}
|
||||
}
|
||||
|
||||
pub fn load_program_to_cpu(
|
||||
&mut self,
|
||||
cpu: &mut riscv_cpu::Cpu,
|
||||
program: &[u8],
|
||||
) -> Result<(), LoadError> {
|
||||
fn write_u16(&mut self, address: u64, value: u16) {
|
||||
let page = address as usize & !0xfff;
|
||||
let offset = address as usize & 0xfff;
|
||||
if let Some(page) = self.data.get_mut(&page) {
|
||||
let bytes = value.to_le_bytes();
|
||||
page[offset] = bytes[0];
|
||||
page[offset + 1] = bytes[1];
|
||||
}
|
||||
}
|
||||
|
||||
fn write_u32(&mut self, address: u64, value: u32) {
|
||||
let page = address as usize & !0xfff;
|
||||
let offset = address as usize & 0xfff;
|
||||
if let Some(page) = self.data.get_mut(&page) {
|
||||
let bytes = value.to_le_bytes();
|
||||
page[offset] = bytes[0];
|
||||
page[offset + 1] = bytes[1];
|
||||
page[offset + 2] = bytes[2];
|
||||
page[offset + 3] = bytes[3];
|
||||
}
|
||||
}
|
||||
|
||||
fn write_u64(&mut self, address: u64, value: u64) {
|
||||
let page = address as usize & !0xfff;
|
||||
let offset = address as usize & 0xfff;
|
||||
if let Some(page) = self.data.get_mut(&page) {
|
||||
let bytes = value.to_le_bytes();
|
||||
page[offset] = bytes[0];
|
||||
page[offset + 1] = bytes[1];
|
||||
page[offset + 2] = bytes[2];
|
||||
page[offset + 3] = bytes[3];
|
||||
page[offset + 4] = bytes[4];
|
||||
page[offset + 5] = bytes[5];
|
||||
page[offset + 6] = bytes[6];
|
||||
page[offset + 7] = bytes[7];
|
||||
}
|
||||
}
|
||||
|
||||
fn validate_address(&self, address: u64) -> bool {
|
||||
if address < self.base as u64 {
|
||||
return false;
|
||||
}
|
||||
let address = address as usize - self.base as usize;
|
||||
address < self.data.len()
|
||||
}
|
||||
|
||||
fn syscall(&mut self, args: [i64; 8]) -> [i64; 8] {
|
||||
let syscall: Syscall = args.into();
|
||||
println!("Syscall {:?} with args: {:?}", syscall, &args[1..]);
|
||||
|
||||
print!("Syscall: ");
|
||||
match syscall {
|
||||
Syscall::IncreaseHeap(bytes, _flags) => {
|
||||
println!("IncreaseHeap({} bytes, flags: {:02x})", bytes, _flags);
|
||||
let heap_start = self.heap_start;
|
||||
let heap_address = self.heap_start + self.heap_size;
|
||||
match bytes {
|
||||
bytes if bytes < 0 => {
|
||||
self.heap_size -= bytes.unsigned_abs() as u32;
|
||||
panic!("Reducing size not supported!");
|
||||
}
|
||||
bytes if bytes > 0 => {
|
||||
for new_address in
|
||||
(heap_address..(heap_address + bytes as u32)).step_by(4096)
|
||||
{
|
||||
self.ensure_page(new_address);
|
||||
}
|
||||
self.heap_size += bytes as u32;
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
[
|
||||
SyscallResultNumber::MemoryRange as i64,
|
||||
heap_address as i64,
|
||||
bytes,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
]
|
||||
}
|
||||
|
||||
Syscall::MapMemory(phys, virt, size, _flags) => {
|
||||
if virt != 0 {
|
||||
unimplemented!("Non-zero virt address");
|
||||
}
|
||||
if phys != 0 {
|
||||
unimplemented!("Non-zero phys address");
|
||||
}
|
||||
let region = self
|
||||
.allocate_virt_region(size as usize)
|
||||
.expect("out of memory");
|
||||
[
|
||||
SyscallResultNumber::MemoryRange as i64,
|
||||
region as i64,
|
||||
size,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
]
|
||||
}
|
||||
Syscall::Unknown(args) => {
|
||||
println!(
|
||||
"Unhandled {:?}: {:?}",
|
||||
SyscallNumber::from(args[0]),
|
||||
&args[1..]
|
||||
);
|
||||
[SyscallResultNumber::Unimplemented as _, 0, 0, 0, 0, 0, 0, 0]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct Machine {
|
||||
memory: Arc<RwLock<Memory>>,
|
||||
workers: Vec<WorkerHandle>,
|
||||
worker_response: Receiver<WorkerResponse>,
|
||||
worker_response_tx: Sender<WorkerResponse>,
|
||||
}
|
||||
|
||||
impl Machine {
|
||||
pub fn new(program: &[u8]) -> Result<Self, LoadError> {
|
||||
let memory = Arc::new(RwLock::new(Memory::new(MEMORY_BASE, 16 * 1024 * 1024)));
|
||||
|
||||
let (worker_response_tx, worker_response) = std::sync::mpsc::channel();
|
||||
let mut machine = Self {
|
||||
memory,
|
||||
workers: vec![],
|
||||
worker_response_tx,
|
||||
worker_response,
|
||||
};
|
||||
|
||||
machine.load_program(program)?;
|
||||
|
||||
Ok(machine)
|
||||
}
|
||||
|
||||
pub fn load_program(&mut self, program: &[u8]) -> Result<(), LoadError> {
|
||||
let mut cpu = riscv_cpu::CpuBuilder::new(self.memory.clone())
|
||||
.xlen(riscv_cpu::Xlen::Bit32)
|
||||
.build();
|
||||
|
||||
let goblin::Object::Elf(elf) =
|
||||
goblin::Object::parse(program).map_err(|_| LoadError::IncorrectFormat)?
|
||||
else {
|
||||
@ -189,43 +481,34 @@ impl XousHandler {
|
||||
return Err(LoadError::BitSizeError);
|
||||
}
|
||||
|
||||
let mut memory_writer = self.memory.write().unwrap();
|
||||
for sh in elf.section_headers {
|
||||
if sh.sh_flags as u32 & goblin::elf::section_header::SHF_ALLOC == 0 {
|
||||
continue;
|
||||
}
|
||||
if sh.sh_type & goblin::elf::section_header::SHT_NOBITS != 0 {
|
||||
for addr in sh.sh_addr..(sh.sh_addr + sh.sh_size) {
|
||||
self.ensure_page(cpu, addr.try_into().unwrap());
|
||||
// self.write_virt_u8(cpu, addr.try_into().unwrap(), 0);
|
||||
memory_writer.ensure_page(addr.try_into().unwrap());
|
||||
}
|
||||
} else {
|
||||
self.write_bytes(
|
||||
cpu,
|
||||
memory_writer.write_bytes(
|
||||
&program[sh.sh_offset as usize..(sh.sh_offset + sh.sh_size) as usize],
|
||||
sh.sh_addr.try_into().unwrap(),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
self.print_mmu(cpu);
|
||||
memory_writer.print_mmu();
|
||||
|
||||
// TODO: Get memory permissions correct
|
||||
|
||||
let satp = self.satp.into();
|
||||
let satp = memory_writer.satp.into();
|
||||
|
||||
// Ensure stack is allocated
|
||||
for page in (0xc000_0000..0xc002_0000).step_by(4096) {
|
||||
self.ensure_page(cpu, page);
|
||||
memory_writer.ensure_page(page);
|
||||
}
|
||||
|
||||
// for (offset, byte) in shadow_memory.into_iter().enumerate() {
|
||||
// if byte == 0 {
|
||||
// continue;
|
||||
// }
|
||||
// // println!("Writing {:02x} to {:08x}", byte, offset as u64 + memory_base);
|
||||
// cpu.phys_write_u8(offset as u64 + memory_base, byte);
|
||||
// }
|
||||
|
||||
cpu.write_csr(riscv_cpu::cpu::CSR_SATP_ADDRESS, satp)
|
||||
.map_err(|_| LoadError::SatpWriteError)?;
|
||||
cpu.update_pc(elf.entry);
|
||||
@ -243,53 +526,38 @@ impl XousHandler {
|
||||
// Update the stack pointer
|
||||
cpu.write_register(2, 0xc002_0000 - 4);
|
||||
|
||||
let (tx, rx) = std::sync::mpsc::channel();
|
||||
let worker_tx = self.worker_response_tx.clone();
|
||||
let mem = self.memory.clone();
|
||||
std::thread::spawn(move || Worker::new(cpu, rx, worker_tx).run());
|
||||
|
||||
self.workers.push(WorkerHandle { tx });
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn run(&mut self) -> Result<(), Box<dyn std::error::Error>> {
|
||||
self.workers[0].tx.send(WorkerCommand::Start)?;
|
||||
self.worker_response.recv().unwrap();
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl XousHandler {
|
||||
fn syscall(&mut self, cpu: &mut riscv_cpu::Cpu, syscall: Syscall) -> [i64; 8] {
|
||||
print!("Syscall: ");
|
||||
match syscall {
|
||||
Syscall::IncreaseHeap(bytes, _flags) => {
|
||||
println!("IncreaseHeap({} bytes, flags: {:02x})", bytes, _flags);
|
||||
let heap_address = self.heap_start + self.heap_size;
|
||||
match bytes {
|
||||
bytes if bytes < 0 => {
|
||||
self.heap_size -= bytes.unsigned_abs() as u32;
|
||||
panic!("Reducing size not supported!");
|
||||
},
|
||||
bytes if bytes > 0 => {
|
||||
for new_address in (heap_address..(heap_address + bytes as u32)).step_by(4096) {
|
||||
self.ensure_page(cpu, new_address);
|
||||
}
|
||||
self.heap_size += bytes as u32;
|
||||
},
|
||||
_ => {},
|
||||
}
|
||||
[
|
||||
SyscallResultNumber::MemoryRange as i64,
|
||||
heap_address as i64,
|
||||
bytes,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
]
|
||||
}
|
||||
Syscall::Unknown(args) => {
|
||||
println!("Unhandled {:?}: {:?}", SyscallNumber::from(args[0]), &args[1..]);
|
||||
[SyscallResultNumber::Unimplemented as _, 0, 0, 0, 0, 0, 0, 0]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl EventHandler for XousHandler {
|
||||
fn handle_event(&mut self, cpu: &mut riscv_cpu::Cpu, args: [i64; 8]) -> [i64; 8] {
|
||||
let syscall: Syscall = args.into();
|
||||
// println!("Syscall {:?} with args: {:?}", syscall, &args[1..]);
|
||||
self.syscall(cpu, syscall)
|
||||
}
|
||||
}
|
||||
// impl SyscallHandler for Worker {
|
||||
// fn syscall(&mut self, cpu: &mut riscv_cpu::Cpu, args: [i64; 8]) -> [i64; 8] {
|
||||
// let syscall: Syscall = args.into();
|
||||
// println!("Syscall {:?} with args: {:?}", syscall, &args[1..]);
|
||||
// // self.syscall(cpu, syscall)
|
||||
// [
|
||||
// SyscallResultNumber::Unimplemented as i64,
|
||||
// 0,
|
||||
// 0,
|
||||
// 0,
|
||||
// 0,
|
||||
// 0,
|
||||
// 0,
|
||||
// 0,
|
||||
// ]
|
||||
// }
|
||||
// }
|
||||
|
@ -22,6 +22,12 @@ pub enum Syscall {
|
||||
i64, /* number of bytes to add */
|
||||
i64, /* memory flags */
|
||||
),
|
||||
MapMemory(
|
||||
i64, /* address */
|
||||
i64, /* size */
|
||||
i64, /* flags */
|
||||
i64, /* name */
|
||||
),
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
@ -50,6 +56,7 @@ impl From<[i64; 8]> for Syscall {
|
||||
fn from(value: [i64; 8]) -> Self {
|
||||
match value[0].into() {
|
||||
SyscallNumber::IncreaseHeap => Syscall::IncreaseHeap(value[1], value[2]),
|
||||
SyscallNumber::MapMemory => Syscall::MapMemory(value[1], value[2], value[3], value[4]),
|
||||
_ => Syscall::Unknown(value),
|
||||
}
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user