move loading into xous

Signed-off-by: Sean Cross <sean@xobs.io>
This commit is contained in:
Sean Cross 2023-12-30 23:41:50 +08:00
parent 8be17ff6f1
commit ff16f35de5
4 changed files with 595 additions and 347 deletions

View File

@ -1515,7 +1515,15 @@ impl Cpu {
self.mmu.memory_size()
}
pub fn phys_read_u8(&mut self, address: u64) -> u8 {
pub fn phys_read_u32(&self, address: u64) -> u32 {
self.mmu.load_word_raw(address)
}
pub fn phys_write_u32(&mut self, address: u64, value: u32) {
self.mmu.store_word_raw(address, value)
}
pub fn phys_read_u8(&self, address: u64) -> u8 {
self.mmu.load_raw(address)
}
@ -2458,9 +2466,9 @@ const INSTRUCTIONS: [Instruction; INSTRUCTION_NUM] = [
*dest = *src;
}
cpu.handler = Some(handler);
return Ok(())
return Ok(());
}
let exception_type = match cpu.privilege_mode {
PrivilegeMode::User => TrapType::EnvironmentCallFromUMode,
PrivilegeMode::Supervisor => TrapType::EnvironmentCallFromSMode,

View File

@ -173,7 +173,7 @@ impl Mmu {
self.clear_page_cache();
}
fn get_effective_address(&self, address: u64) -> u64 {
fn trim_to_xlen(&self, address: u64) -> u64 {
match self.xlen {
Xlen::Bit32 => address & 0xffffffff,
Xlen::Bit64 => address,
@ -204,7 +204,7 @@ impl Mmu {
if (v_address & 0xfff) <= (0x1000 - width) {
// Fast path. All bytes fetched are in the same page so
// translating an address only once.
let effective_address = self.get_effective_address(v_address);
let effective_address = self.trim_to_xlen(v_address);
self.translate_address(effective_address, &MemoryAccessType::Execute)
.map(|p_address| self.load_word_raw(p_address))
.map_err(|()| Trap {
@ -229,7 +229,7 @@ impl Mmu {
/// # Arguments
/// * `v_address` Virtual address
pub fn load(&mut self, v_address: u64) -> Result<u8, Trap> {
let effective_address = self.get_effective_address(v_address);
let effective_address = self.trim_to_xlen(v_address);
match self.translate_address(effective_address, &MemoryAccessType::Read) {
Ok(p_address) => Ok(self.load_raw(p_address)),
Err(()) => Err(Trap {
@ -403,8 +403,8 @@ impl Mmu {
///
/// # Arguments
/// * `p_address` Physical address
pub(crate) fn load_raw(&mut self, p_address: u64) -> u8 {
self.memory.read_byte(self.get_effective_address(p_address))
pub(crate) fn load_raw(&self, p_address: u64) -> u8 {
self.memory.read_byte(self.trim_to_xlen(p_address))
}
/// Loads two bytes from main memory or peripheral devices depending on
@ -412,9 +412,8 @@ impl Mmu {
///
/// # Arguments
/// * `p_address` Physical address
fn load_halfword_raw(&mut self, p_address: u64) -> u16 {
self.memory
.read_halfword(self.get_effective_address(p_address))
fn load_halfword_raw(&self, p_address: u64) -> u16 {
self.memory.read_halfword(self.trim_to_xlen(p_address))
}
/// Loads four bytes from main memory or peripheral devices depending on
@ -422,10 +421,8 @@ impl Mmu {
///
/// # Arguments
/// * `p_address` Physical address
pub fn load_word_raw(&mut self, p_address: u64) -> u32 {
let val = self.memory.read_word(self.get_effective_address(p_address));
// println!("Read value from {:08x}: {:08x}", p_address, val);
val
pub fn load_word_raw(&self, p_address: u64) -> u32 {
self.memory.read_word(self.trim_to_xlen(p_address))
}
/// Loads eight bytes from main memory or peripheral devices depending on
@ -433,9 +430,8 @@ impl Mmu {
///
/// # Arguments
/// * `p_address` Physical address
fn load_doubleword_raw(&mut self, p_address: u64) -> u64 {
self.memory
.read_doubleword(self.get_effective_address(p_address))
fn load_doubleword_raw(&self, p_address: u64) -> u64 {
self.memory.read_doubleword(self.trim_to_xlen(p_address))
}
/// Stores a byte to main memory or peripheral devices depending on
@ -445,8 +441,7 @@ impl Mmu {
/// * `p_address` Physical address
/// * `value` data written
pub(crate) fn store_raw(&mut self, p_address: u64, value: u8) {
self.memory
.write_byte(self.get_effective_address(p_address), value)
self.memory.write_byte(self.trim_to_xlen(p_address), value)
}
/// Stores two bytes to main memory or peripheral devices depending on
@ -455,9 +450,9 @@ impl Mmu {
/// # Arguments
/// * `p_address` Physical address
/// * `value` data written
fn store_halfword_raw(&mut self, p_address: u64, value: u16) {
pub(crate) fn store_halfword_raw(&mut self, p_address: u64, value: u16) {
self.memory
.write_halfword(self.get_effective_address(p_address), value)
.write_halfword(self.trim_to_xlen(p_address), value)
}
/// Stores four bytes to main memory or peripheral devices depending on
@ -466,9 +461,8 @@ impl Mmu {
/// # Arguments
/// * `p_address` Physical address
/// * `value` data written
fn store_word_raw(&mut self, p_address: u64, value: u32) {
self.memory
.write_word(self.get_effective_address(p_address), value)
pub(crate) fn store_word_raw(&mut self, p_address: u64, value: u32) {
self.memory.write_word(self.trim_to_xlen(p_address), value)
}
/// Stores eight bytes to main memory or peripheral devices depending on
@ -479,7 +473,7 @@ impl Mmu {
/// * `value` data written
fn store_doubleword_raw(&mut self, p_address: u64, value: u64) {
self.memory
.write_doubleword(self.get_effective_address(p_address), value)
.write_doubleword(self.trim_to_xlen(p_address), value)
}
/// Checks if passed virtual address is valid (pointing a certain device) or not.
@ -489,10 +483,7 @@ impl Mmu {
/// * `v_address` Virtual address
pub fn validate_address(&mut self, v_address: u64) -> Option<bool> {
if let Ok(p_address) = self.translate_address(v_address, &MemoryAccessType::DontCare) {
Some(
self.memory
.validate_address(self.get_effective_address(p_address)),
)
Some(self.memory.validate_address(self.trim_to_xlen(p_address)))
} else {
None
}
@ -503,7 +494,7 @@ impl Mmu {
v_address: u64,
access_type: &MemoryAccessType,
) -> Result<u64, ()> {
let address = self.get_effective_address(v_address);
let address = self.trim_to_xlen(v_address);
let v_page = address & !0xfff;
if let Some(p_page) = match self.page_cache_enabled {
true => match access_type {
@ -750,7 +741,7 @@ impl MemoryWrapper {
self.memory.init(capacity);
}
pub fn read_byte(&mut self, p_address: u64) -> u8 {
pub fn read_byte(&self, p_address: u64) -> u8 {
debug_assert!(
p_address >= self.dram_base,
"Memory address must equals to or bigger than self.dram_base. {:X}",
@ -759,7 +750,7 @@ impl MemoryWrapper {
self.memory.read_byte(p_address - self.dram_base)
}
pub fn read_halfword(&mut self, p_address: u64) -> u16 {
pub fn read_halfword(&self, p_address: u64) -> u16 {
debug_assert!(
p_address >= self.dram_base && p_address.wrapping_add(1) >= self.dram_base,
"Memory address must equals to or bigger than self.dram_base. {:X}",
@ -768,7 +759,7 @@ impl MemoryWrapper {
self.memory.read_halfword(p_address - self.dram_base)
}
pub fn read_word(&mut self, p_address: u64) -> u32 {
pub fn read_word(&self, p_address: u64) -> u32 {
debug_assert!(
p_address >= self.dram_base && p_address.wrapping_add(3) >= self.dram_base,
"Memory address must equals to or bigger than self.dram_base. {:X}",
@ -777,7 +768,7 @@ impl MemoryWrapper {
self.memory.read_word(p_address - self.dram_base)
}
pub fn read_doubleword(&mut self, p_address: u64) -> u64 {
pub fn read_doubleword(&self, p_address: u64) -> u64 {
debug_assert!(
p_address >= self.dram_base && p_address.wrapping_add(7) >= self.dram_base,
"Memory address must equals to or bigger than self.dram_base. {:X}",

View File

@ -1,355 +1,357 @@
use std::io::Read;
use xous::XousHandler;
mod xous;
#[derive(Debug)]
enum LoadError {
IncorrectFormat,
BitSizeError,
SatpWriteError,
MstatusWriteError,
CpuTrap(riscv_cpu::cpu::Trap),
}
// #[derive(Debug)]
// enum LoadError {
// IncorrectFormat,
// BitSizeError,
// SatpWriteError,
// MstatusWriteError,
// CpuTrap(riscv_cpu::cpu::Trap),
// }
impl std::fmt::Display for LoadError {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
match self {
LoadError::IncorrectFormat => write!(f, "Incorrect format"),
LoadError::BitSizeError => write!(f, "Incorrect bit size"),
LoadError::SatpWriteError => write!(f, "Couldn't write to SATP register"),
LoadError::MstatusWriteError => write!(f, "Couldn't write to MSTATUS register"),
LoadError::CpuTrap(trap) => write!(f, "CPU trap: {:?}", trap),
}
}
}
// impl std::fmt::Display for LoadError {
// fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
// match self {
// LoadError::IncorrectFormat => write!(f, "Incorrect format"),
// LoadError::BitSizeError => write!(f, "Incorrect bit size"),
// LoadError::SatpWriteError => write!(f, "Couldn't write to SATP register"),
// LoadError::MstatusWriteError => write!(f, "Couldn't write to MSTATUS register"),
// LoadError::CpuTrap(trap) => write!(f, "CPU trap: {:?}", trap),
// }
// }
// }
impl std::error::Error for LoadError {}
// impl std::error::Error for LoadError {}
struct MemoryManager32<'a> {
memory: &'a mut [u8],
base: u32,
allocator_offset: u32,
satp: u32,
l1_pt: u32,
}
// struct MemoryManager32<'a> {
// memory: &'a mut [u8],
// base: u32,
// allocator_offset: u32,
// satp: u32,
// l1_pt: u32,
// }
const MMUFLAG_VALID: u32 = 0x01;
pub const MMUFLAG_READABLE: u32 = 0x02;
pub const MMUFLAG_WRITABLE: u32 = 0x04;
pub const MMUFLAG_EXECUTABLE: u32 = 0x8;
pub const MMUFLAG_U: u32 = 0x10;
pub const MMUFLAG_ACCESSED: u32 = 0x40;
pub const MMUFLAG_DIRTY: u32 = 0x80;
// const MMUFLAG_VALID: u32 = 0x01;
// pub const MMUFLAG_READABLE: u32 = 0x02;
// pub const MMUFLAG_WRITABLE: u32 = 0x04;
// pub const MMUFLAG_EXECUTABLE: u32 = 0x8;
// pub const MMUFLAG_U: u32 = 0x10;
// pub const MMUFLAG_ACCESSED: u32 = 0x40;
// pub const MMUFLAG_DIRTY: u32 = 0x80;
impl<'a> MemoryManager32<'a> {
fn new(memory: &'a mut [u8], base: u32) -> Self {
// Allocate a single process. Place the root page table at
// the second block of memory.
Self {
memory,
base,
allocator_offset: 8192,
l1_pt: base + 4096,
satp: ((4096 + base) >> 12) | 0x8000_0000,
}
}
// impl<'a> MemoryManager32<'a> {
// fn new(memory: &'a mut [u8], base: u32) -> Self {
// // Allocate a single process. Place the root page table at
// // the second block of memory.
// Self {
// memory,
// base,
// allocator_offset: 8192,
// l1_pt: base + 4096,
// satp: ((4096 + base) >> 12) | 0x8000_0000,
// }
// }
fn allocate_page(&mut self) -> u32 {
let page = self.allocator_offset;
self.allocator_offset += 4096;
page + self.base
}
// fn allocate_page(&mut self) -> u32 {
// let page = self.allocator_offset;
// self.allocator_offset += 4096;
// page + self.base
// }
pub fn virt_to_phys(&self, virt: u32) -> Option<u32> {
let vpn1 = ((virt >> 22) & ((1 << 10) - 1)) as usize * 4;
let vpn0 = ((virt >> 12) & ((1 << 10) - 1)) as usize * 4;
let offset = virt & ((1 << 12) - 1);
// pub fn virt_to_phys(&self, virt: u32) -> Option<u32> {
// let vpn1 = ((virt >> 22) & ((1 << 10) - 1)) as usize * 4;
// let vpn0 = ((virt >> 12) & ((1 << 10) - 1)) as usize * 4;
// let offset = virt & ((1 << 12) - 1);
// The root (l1) pagetable is defined to be mapped into our virtual
// address space at this address.
let l1_pt = &self.memory[(self.l1_pt - self.base).try_into().unwrap()..];
// // The root (l1) pagetable is defined to be mapped into our virtual
// // address space at this address.
// let l1_pt = &self.memory[(self.l1_pt - self.base).try_into().unwrap()..];
// If the level 1 pagetable doesn't exist, then this address is invalid
let l1_pt_entry = u32::from_le_bytes(l1_pt[vpn1..vpn1 + 4].try_into().unwrap());
if l1_pt_entry & MMUFLAG_VALID == 0 {
return None;
}
if l1_pt_entry & (MMUFLAG_EXECUTABLE | MMUFLAG_READABLE | MMUFLAG_WRITABLE) != 0 {
return None;
}
// // If the level 1 pagetable doesn't exist, then this address is invalid
// let l1_pt_entry = u32::from_le_bytes(l1_pt[vpn1..vpn1 + 4].try_into().unwrap());
// if l1_pt_entry & MMUFLAG_VALID == 0 {
// return None;
// }
// if l1_pt_entry & (MMUFLAG_EXECUTABLE | MMUFLAG_READABLE | MMUFLAG_WRITABLE) != 0 {
// return None;
// }
let l0_pt = &self.memory[(((l1_pt_entry >> 10) << 12) - self.base)
.try_into()
.unwrap()..];
let l0_pt_entry = u32::from_le_bytes(l0_pt[vpn0..vpn0 + 4].try_into().unwrap());
// let l0_pt = &self.memory[(((l1_pt_entry >> 10) << 12) - self.base)
// .try_into()
// .unwrap()..];
// let l0_pt_entry = u32::from_le_bytes(l0_pt[vpn0..vpn0 + 4].try_into().unwrap());
// Ensure the entry hasn't already been mapped.
if l0_pt_entry & MMUFLAG_VALID == 0 {
return None;
}
Some(((l0_pt_entry >> 10) << 12) | offset)
}
// // Ensure the entry hasn't already been mapped.
// if l0_pt_entry & MMUFLAG_VALID == 0 {
// return None;
// }
// Some(((l0_pt_entry >> 10) << 12) | offset)
// }
fn read_phys_u32(&self, address: u32) -> u32 {
assert!(address >= self.base && address <= self.base + self.memory.len() as u32);
u32::from_le_bytes(
self.memory[(address - self.base).try_into().unwrap()
..(address - self.base + 4).try_into().unwrap()]
.try_into()
.unwrap(),
)
}
// fn read_phys_u32(&self, address: u32) -> u32 {
// assert!(address >= self.base && address <= self.base + self.memory.len() as u32);
// u32::from_le_bytes(
// self.memory[(address - self.base).try_into().unwrap()
// ..(address - self.base + 4).try_into().unwrap()]
// .try_into()
// .unwrap(),
// )
// }
fn write_phys_u32(&mut self, address: u32, value: u32) {
assert!(address >= self.base && address <= self.base + self.memory.len() as u32);
for (src, dest) in value
.to_le_bytes()
.iter()
.zip(self.memory[(address - self.base).try_into().unwrap()..].iter_mut())
{
*dest = *src;
}
}
// fn write_phys_u32(&mut self, address: u32, value: u32) {
// assert!(address >= self.base && address <= self.base + self.memory.len() as u32);
// for (src, dest) in value
// .to_le_bytes()
// .iter()
// .zip(self.memory[(address - self.base).try_into().unwrap()..].iter_mut())
// {
// *dest = *src;
// }
// }
fn read_virt_u32(&self, address: u32) -> u32 {
self.read_phys_u32(self.virt_to_phys(address).unwrap())
}
// fn read_virt_u32(&self, address: u32) -> u32 {
// self.read_phys_u32(self.virt_to_phys(address).unwrap())
// }
fn write_virt_u8(&mut self, address: u32, value: u8) {
let phys: usize = (self.virt_to_phys(address).unwrap() - self.base)
.try_into()
.unwrap();
self.memory[phys] = value;
}
// fn write_virt_u8(&mut self, address: u32, value: u8) {
// let phys: usize = (self.virt_to_phys(address).unwrap() - self.base)
// .try_into()
// .unwrap();
// self.memory[phys] = value;
// }
fn is_allocated(&self, address: u32) -> bool {
self.virt_to_phys(address).is_some()
}
// fn is_allocated(&self, address: u32) -> bool {
// self.virt_to_phys(address).is_some()
// }
/// Allocate a brand-new memory mapping. When this memory mapping is created,
/// it will be ready to use in a new process, however it will have no actual
/// program code. It will, however, have the following pages mapped:
///
/// 1. The kernel will be mapped to superpage 1023, meaning the kernel can
/// switch to this process and do things.
/// 2. A page will be allocated for superpage 1022, to contain pages for
/// process-specific code.
/// 3. A page will be allocated for superpage 1021, to contain pages for
/// managing pages.
/// 4. The root pagetable will be allocated and mapped at 0xff800000,
/// ensuring new superpages can be allocated.
/// 5. A context page will be allocated at 0xff801000, ensuring the
/// process can actually be run.
/// 6. Individual pagetable mappings are mapped at 0xff400000
/// At the end of this operation, the following mapping will take place. Note that
/// names are repeated in the chart below to indicate they are the same page
/// represented multiple times. Items in brackets are offsets (in `usize`-words)
/// from the start of the page. For example, offset 1023 on the root pagetable
/// (address 4092) contains an entry that points to the kernel superpage.
/// +----------------+
/// | Root Pagetable |
/// | root |
/// +----------------+
/// |
/// +---------------+-------------------+------------------+
/// | | |
/// [1021] [1022] [1023]
/// v v v
/// +--------------+ +--------------+ +--------+
/// | Level 0/1021 | | Level 0/1022 | | Kernel |
/// | pages_l0 | | process_l0 | | |
/// +--------------+ +--------------+ +--------+
/// | |
/// +-------+---------+ +---+-----------+
/// | | | |
/// [1021] [1022] [0] [1]
/// v v v v
/// +--------------+ +--------------+ +----------------+ +---------+
/// | Level 0/1021 | | Level 0/1022 | | Root Pagetable | | Context |
/// +--------------+ +--------------+ +----------------+ +---------+
fn ensure_page(&mut self, virt: u32) {
let vpn1 = (virt >> 22) & ((1 << 10) - 1);
let vpn0 = (virt >> 12) & ((1 << 10) - 1);
// /// Allocate a brand-new memory mapping. When this memory mapping is created,
// /// it will be ready to use in a new process, however it will have no actual
// /// program code. It will, however, have the following pages mapped:
// ///
// /// 1. The kernel will be mapped to superpage 1023, meaning the kernel can
// /// switch to this process and do things.
// /// 2. A page will be allocated for superpage 1022, to contain pages for
// /// process-specific code.
// /// 3. A page will be allocated for superpage 1021, to contain pages for
// /// managing pages.
// /// 4. The root pagetable will be allocated and mapped at 0xff800000,
// /// ensuring new superpages can be allocated.
// /// 5. A context page will be allocated at 0xff801000, ensuring the
// /// process can actually be run.
// /// 6. Individual pagetable mappings are mapped at 0xff400000
// /// At the end of this operation, the following mapping will take place. Note that
// /// names are repeated in the chart below to indicate they are the same page
// /// represented multiple times. Items in brackets are offsets (in `usize`-words)
// /// from the start of the page. For example, offset 1023 on the root pagetable
// /// (address 4092) contains an entry that points to the kernel superpage.
// /// +----------------+
// /// | Root Pagetable |
// /// | root |
// /// +----------------+
// /// |
// /// +---------------+-------------------+------------------+
// /// | | |
// /// [1021] [1022] [1023]
// /// v v v
// /// +--------------+ +--------------+ +--------+
// /// | Level 0/1021 | | Level 0/1022 | | Kernel |
// /// | pages_l0 | | process_l0 | | |
// /// +--------------+ +--------------+ +--------+
// /// | |
// /// +-------+---------+ +---+-----------+
// /// | | | |
// /// [1021] [1022] [0] [1]
// /// v v v v
// /// +--------------+ +--------------+ +----------------+ +---------+
// /// | Level 0/1021 | | Level 0/1022 | | Root Pagetable | | Context |
// /// +--------------+ +--------------+ +----------------+ +---------+
// fn ensure_page(&mut self, virt: u32) {
// let vpn1 = (virt >> 22) & ((1 << 10) - 1);
// let vpn0 = (virt >> 12) & ((1 << 10) - 1);
// println!("Ensuring page {:08x} exists", virt);
// // println!("Ensuring page {:08x} exists", virt);
// Ensure there's a level 0 pagetable
let mut l1_pt_entry = self.read_phys_u32(self.l1_pt + vpn1 * 4);
if l1_pt_entry & MMUFLAG_VALID == 0 {
// Allocate a new page for the level 1 pagetable
let l0_pt_phys = self.allocate_page();
// println!("Allocating level 0 pagetable at {:08x}", l0_pt_phys);
l1_pt_entry =
((l0_pt_phys >> 12) << 10) | MMUFLAG_VALID | MMUFLAG_DIRTY | MMUFLAG_ACCESSED;
// Map the level 1 pagetable into the root pagetable
self.write_phys_u32(self.l1_pt + vpn1 * 4, l1_pt_entry);
}
// // Ensure there's a level 0 pagetable
// let mut l1_pt_entry = self.read_phys_u32(self.l1_pt + vpn1 * 4);
// if l1_pt_entry & MMUFLAG_VALID == 0 {
// // Allocate a new page for the level 1 pagetable
// let l0_pt_phys = self.allocate_page();
// // println!("Allocating level 0 pagetable at {:08x}", l0_pt_phys);
// l1_pt_entry =
// ((l0_pt_phys >> 12) << 10) | MMUFLAG_VALID | MMUFLAG_DIRTY | MMUFLAG_ACCESSED;
// // Map the level 1 pagetable into the root pagetable
// self.write_phys_u32(self.l1_pt + vpn1 * 4, l1_pt_entry);
// }
let l0_pt_phys = l1_pt_entry >> 10 << 12;
// println!(
// "Level 0 pagetable at {:08x} (l1_pt_entry: {:08x})",
// l0_pt_phys, l1_pt_entry
// );
// let l0_pt_phys = l1_pt_entry >> 10 << 12;
// // println!(
// // "Level 0 pagetable at {:08x} (l1_pt_entry: {:08x})",
// // l0_pt_phys, l1_pt_entry
// // );
// Ensure the page is mapped
let mut l0_pt_entry = self.read_phys_u32(l0_pt_phys + vpn0 * 4);
if l0_pt_entry & MMUFLAG_VALID == 0 {
// Allocate a new page for the level 0 pagetable
let page_phys = self.allocate_page();
// println!("Allocating physical page at {:08x}", page_phys);
l0_pt_entry = ((page_phys >> 12) << 10)
| MMUFLAG_VALID
| MMUFLAG_WRITABLE
| MMUFLAG_READABLE
| MMUFLAG_EXECUTABLE
| MMUFLAG_DIRTY
| MMUFLAG_ACCESSED;
// Map the level 0 pagetable into the level 1 pagetable
self.write_phys_u32(l0_pt_phys + vpn0 * 4, l0_pt_entry);
}
}
// // Ensure the page is mapped
// let mut l0_pt_entry = self.read_phys_u32(l0_pt_phys + vpn0 * 4);
// if l0_pt_entry & MMUFLAG_VALID == 0 {
// // Allocate a new page for the level 0 pagetable
// let page_phys = self.allocate_page();
// // println!("Allocating physical page at {:08x}", page_phys);
// l0_pt_entry = ((page_phys >> 12) << 10)
// | MMUFLAG_VALID
// | MMUFLAG_WRITABLE
// | MMUFLAG_READABLE
// | MMUFLAG_EXECUTABLE
// | MMUFLAG_DIRTY
// | MMUFLAG_ACCESSED;
// // Map the level 0 pagetable into the level 1 pagetable
// self.write_phys_u32(l0_pt_phys + vpn0 * 4, l0_pt_entry);
// }
// }
fn write_bytes(&mut self, data: &[u8], start: u32) {
for (i, byte) in data.iter().enumerate() {
let i = i as u32;
// println!("Map: {}", self);
self.ensure_page(start + i);
// println!("Writing byte to {:08x}...", start + i);
// println!("Map: {}", self);
if start + i == 0x258062 {
println!("Writing {:02x} to {:08x}", byte, start + i);
}
// fn write_bytes(&mut self, data: &[u8], start: u32) {
// for (i, byte) in data.iter().enumerate() {
// let i = i as u32;
// // println!("Map: {}", self);
// self.ensure_page(start + i);
// // println!("Writing byte to {:08x}...", start + i);
// // println!("Map: {}", self);
// if start + i == 0x258062 {
// println!("Writing {:02x} to {:08x}", byte, start + i);
// }
self.write_virt_u8(start + i, *byte);
}
}
}
// self.write_virt_u8(start + i, *byte);
// }
// }
// }
impl core::fmt::Display for MemoryManager32<'_> {
fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
writeln!(f, "Memory Maps:")?;
let l1_pt = &self.memory[(self.l1_pt - self.base).try_into().unwrap()..];
for (i, l1_entry) in l1_pt[0..4096].chunks(4).enumerate() {
let l1_entry = u32::from_le_bytes(l1_entry.try_into().unwrap());
if l1_entry == 0 {
continue;
}
let superpage_addr = i as u32 * (1 << 22);
writeln!(
f,
" {:4} Superpage for {:08x} @ {:08x} (flags: {:?})",
i,
superpage_addr,
(l1_entry >> 10) << 12,
// MMUFlags::from_bits(l1_entry & 0xff).unwrap()
l1_entry & 0xff,
)?;
// impl core::fmt::Display for MemoryManager32<'_> {
// fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
// writeln!(f, "Memory Maps:")?;
// let l1_pt = &self.memory[(self.l1_pt - self.base).try_into().unwrap()..];
// for (i, l1_entry) in l1_pt[0..4096].chunks(4).enumerate() {
// let l1_entry = u32::from_le_bytes(l1_entry.try_into().unwrap());
// if l1_entry == 0 {
// continue;
// }
// let superpage_addr = i as u32 * (1 << 22);
// writeln!(
// f,
// " {:4} Superpage for {:08x} @ {:08x} (flags: {:?})",
// i,
// superpage_addr,
// (l1_entry >> 10) << 12,
// // MMUFlags::from_bits(l1_entry & 0xff).unwrap()
// l1_entry & 0xff,
// )?;
let l0_pt = &self.memory[(((l1_entry >> 10) << 12) - self.base).try_into().unwrap()..];
for (j, l0_entry) in l0_pt[0..4096].chunks(4).enumerate() {
let l0_entry = u32::from_le_bytes(l0_entry.try_into().unwrap());
if l0_entry & 0x7 == 0 {
continue;
}
let page_addr = j as u32 * (1 << 12);
writeln!(
f,
" {:4} {:08x} -> {:08x} (flags: {:?})",
j,
superpage_addr + page_addr,
(l0_entry >> 10) << 12,
// MMUFlags::from_bits(l0_entry & 0xff).unwrap()
l0_entry & 0xff,
)?;
}
}
Ok(())
}
}
// let l0_pt = &self.memory[(((l1_entry >> 10) << 12) - self.base).try_into().unwrap()..];
// for (j, l0_entry) in l0_pt[0..4096].chunks(4).enumerate() {
// let l0_entry = u32::from_le_bytes(l0_entry.try_into().unwrap());
// if l0_entry & 0x7 == 0 {
// continue;
// }
// let page_addr = j as u32 * (1 << 12);
// writeln!(
// f,
// " {:4} {:08x} -> {:08x} (flags: {:?})",
// j,
// superpage_addr + page_addr,
// (l0_entry >> 10) << 12,
// // MMUFlags::from_bits(l0_entry & 0xff).unwrap()
// l0_entry & 0xff,
// )?;
// }
// }
// Ok(())
// }
// }
fn load_program_to_cpu(cpu: &mut riscv_cpu::Cpu, program: &[u8]) -> Result<(), LoadError> {
let memory_base = cpu.memory_base();
let memory_size = cpu.memory_size();
// fn load_program_to_cpu(cpu: &mut riscv_cpu::Cpu, program: &[u8]) -> Result<(), LoadError> {
// let memory_base = cpu.memory_base();
// let memory_size = cpu.memory_size();
let goblin::Object::Elf(elf) =
goblin::Object::parse(program).map_err(|_| LoadError::IncorrectFormat)?
else {
return Err(LoadError::IncorrectFormat);
};
if elf.is_64 {
return Err(LoadError::BitSizeError);
}
// let goblin::Object::Elf(elf) =
// goblin::Object::parse(program).map_err(|_| LoadError::IncorrectFormat)?
// else {
// return Err(LoadError::IncorrectFormat);
// };
// if elf.is_64 {
// return Err(LoadError::BitSizeError);
// }
let mut shadow_memory = vec![0; memory_size as usize];
let mut mm = MemoryManager32::new(&mut shadow_memory, memory_base as u32);
// let mut shadow_memory = vec![0; memory_size as usize];
// let mut mm = MemoryManager32::new(&mut shadow_memory, memory_base as u32);
for sh in elf.section_headers {
if sh.sh_flags as u32 & goblin::elf::section_header::SHF_ALLOC == 0 {
// println!(
// "Skipping section {}",
// elf.shdr_strtab
// .get_at(sh.sh_name)
// .unwrap_or("???unknown???")
// );
continue;
}
// println!(
// "Section {}: Loading {} bytes at {:x}",
// elf.shdr_strtab
// .get_at(sh.sh_name)
// .unwrap_or("???unknown???"),
// sh.sh_size,
// sh.sh_offset
// );
if sh.sh_type & goblin::elf::section_header::SHT_NOBITS != 0 {
for addr in sh.sh_addr..(sh.sh_addr + sh.sh_size) {
mm.ensure_page(addr.try_into().unwrap());
mm.write_virt_u8(addr.try_into().unwrap(), 0);
}
} else {
mm.write_bytes(
&program[sh.sh_offset as usize..(sh.sh_offset + sh.sh_size) as usize],
sh.sh_addr.try_into().unwrap(),
);
}
}
// for sh in elf.section_headers {
// if sh.sh_flags as u32 & goblin::elf::section_header::SHF_ALLOC == 0 {
// // println!(
// // "Skipping section {}",
// // elf.shdr_strtab
// // .get_at(sh.sh_name)
// // .unwrap_or("???unknown???")
// // );
// continue;
// }
// // println!(
// // "Section {}: Loading {} bytes at {:x}",
// // elf.shdr_strtab
// // .get_at(sh.sh_name)
// // .unwrap_or("???unknown???"),
// // sh.sh_size,
// // sh.sh_offset
// // );
// if sh.sh_type & goblin::elf::section_header::SHT_NOBITS != 0 {
// for addr in sh.sh_addr..(sh.sh_addr + sh.sh_size) {
// mm.ensure_page(addr.try_into().unwrap());
// mm.write_virt_u8(addr.try_into().unwrap(), 0);
// }
// } else {
// mm.write_bytes(
// &program[sh.sh_offset as usize..(sh.sh_offset + sh.sh_size) as usize],
// sh.sh_addr.try_into().unwrap(),
// );
// }
// }
// TODO: Get memory permissions correct
// // TODO: Get memory permissions correct
let satp = mm.satp.into();
// let satp = mm.satp.into();
// Ensure stack is allocated
for page in (0xc000_0000..0xc002_0000).step_by(4096) {
mm.ensure_page(page);
}
// // Ensure stack is allocated
// for page in (0xc000_0000..0xc002_0000).step_by(4096) {
// mm.ensure_page(page);
// }
for (offset, byte) in shadow_memory.into_iter().enumerate() {
if byte == 0 {
continue;
}
// println!("Writing {:02x} to {:08x}", byte, offset as u64 + memory_base);
cpu.phys_write_u8(offset as u64 + memory_base, byte);
}
// for (offset, byte) in shadow_memory.into_iter().enumerate() {
// if byte == 0 {
// continue;
// }
// // println!("Writing {:02x} to {:08x}", byte, offset as u64 + memory_base);
// cpu.phys_write_u8(offset as u64 + memory_base, byte);
// }
cpu.write_csr(riscv_cpu::cpu::CSR_SATP_ADDRESS, satp)
.map_err(|_| LoadError::SatpWriteError)?;
cpu.update_pc(elf.entry);
// cpu.write_csr(riscv_cpu::cpu::CSR_SATP_ADDRESS, satp)
// .map_err(|_| LoadError::SatpWriteError)?;
// cpu.update_pc(elf.entry);
// Return to User Mode (0 << 11) with interrupts disabled (1 << 5)
cpu.write_csr(riscv_cpu::cpu::CSR_MSTATUS_ADDRESS, 1 << 5)
.map_err(|_| LoadError::MstatusWriteError)?;
// // Return to User Mode (0 << 11) with interrupts disabled (1 << 5)
// cpu.write_csr(riscv_cpu::cpu::CSR_MSTATUS_ADDRESS, 1 << 5)
// .map_err(|_| LoadError::MstatusWriteError)?;
cpu.write_csr(riscv_cpu::cpu::CSR_SEPC_ADDRESS, elf.entry)
.unwrap();
// cpu.write_csr(riscv_cpu::cpu::CSR_SEPC_ADDRESS, elf.entry)
// .unwrap();
// SRET to return to user mode
cpu.execute_opcode(0x10200073).map_err(LoadError::CpuTrap)?;
// // SRET to return to user mode
// cpu.execute_opcode(0x10200073).map_err(LoadError::CpuTrap)?;
// Update the stack pointer
cpu.write_register(2, 0xc002_0000 - 4);
// // Update the stack pointer
// cpu.write_register(2, 0xc002_0000 - 4);
Ok(())
}
// Ok(())
// }
fn main() {
let mut std_tests = Vec::new();
@ -361,10 +363,13 @@ fn main() {
let mut cpu = riscv_cpu::CpuBuilder::new()
.memory_size(16 * 1024 * 1024)
.xlen(riscv_cpu::Xlen::Bit32)
.handler(Box::new(xous::XousHandler {}))
.build();
load_program_to_cpu(&mut cpu, &std_tests).expect("couldn't load std-tests");
let mut xous = XousHandler::new(&cpu);
xous.load_program_to_cpu(&mut cpu, &std_tests)
.expect("couldn't load std-tests");
cpu.set_handler(Some(Box::new(xous)));
for tick in 0..1000 {
cpu.tick();

View File

@ -1,6 +1,250 @@
use riscv_cpu::cpu::EventHandler;
// mod mem;
pub struct XousHandler {}
#[derive(Debug)]
pub enum LoadError {
IncorrectFormat,
BitSizeError,
SatpWriteError,
MstatusWriteError,
CpuTrap(riscv_cpu::cpu::Trap),
}
impl std::fmt::Display for LoadError {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
match self {
LoadError::IncorrectFormat => write!(f, "Incorrect format"),
LoadError::BitSizeError => write!(f, "Incorrect bit size"),
LoadError::SatpWriteError => write!(f, "Couldn't write to SATP register"),
LoadError::MstatusWriteError => write!(f, "Couldn't write to MSTATUS register"),
LoadError::CpuTrap(trap) => write!(f, "CPU trap: {:?}", trap),
}
}
}
const MMUFLAG_VALID: u32 = 0x01;
const MMUFLAG_READABLE: u32 = 0x02;
const MMUFLAG_WRITABLE: u32 = 0x04;
const MMUFLAG_EXECUTABLE: u32 = 0x8;
const MMUFLAG_USERMODE: u32 = 0x10;
const MMUFLAG_GLOBAL: u32 = 0x20;
const MMUFLAG_ACCESSED: u32 = 0x40;
const MMUFLAG_DIRTY: u32 = 0x80;
impl std::error::Error for LoadError {}
pub struct XousHandler {
memory_base: u32,
allocator_offset: u32,
satp: u32,
l1_pt: u32,
}
impl XousHandler {
pub fn new(cpu: &riscv_cpu::Cpu) -> Self {
let memory_base = cpu.memory_base() as u32;
// let memory_size = cpu.memory_size();
Self {
memory_base,
l1_pt: memory_base + 4096,
allocator_offset: 8192,
satp: ((4096 + memory_base) >> 12) | 0x8000_0000,
}
}
fn allocate_page(&mut self) -> u32 {
let page = self.allocator_offset;
self.allocator_offset += 4096;
page + self.memory_base
}
fn write_bytes(&mut self, cpu: &mut riscv_cpu::Cpu, data: &[u8], start: u32) {
for (i, byte) in data.iter().enumerate() {
let i = i as u32;
// self.print_mmu(cpu);
self.ensure_page(cpu, start + i);
let phys = self.virt_to_phys(cpu, start + i).unwrap();
// println!("Writing byte to {:08x}...", start + i);
// self.print_mmu(cpu);
if start + i == 0x258062 {
println!("Writing {:02x} to {:08x}", byte, start + i);
}
cpu.phys_write_u8(phys as u64, *byte);
}
}
pub fn print_mmu(&self, cpu: &riscv_cpu::Cpu) {
println!("Memory Map:");
for vpn1 in (0..4096).step_by(4) {
let l1_entry = cpu.phys_read_u32(self.l1_pt as u64 + vpn1);
if l1_entry & MMUFLAG_VALID == 0 {
continue;
}
let superpage_addr = vpn1 as u32 * (1 << 22);
println!(
" {:4} Superpage for {:08x} @ {:08x} (flags: {:?})",
vpn1,
superpage_addr,
(l1_entry >> 10) << 12,
// MMUFlags::from_bits(l1_entry & 0xff).unwrap()
l1_entry & 0xff,
);
for vpn0 in (0..4096).step_by(4) {
let l0_entry = cpu.phys_read_u32((((l1_entry >> 10) << 12) as u64) + vpn0 as u64);
if l0_entry & 0x7 == 0 {
continue;
}
let page_addr = vpn0 as u32 * (1 << 12);
println!(
" {:4} {:08x} -> {:08x} (flags: {:?})",
vpn0,
superpage_addr + page_addr,
(l0_entry >> 10) << 12,
// MMUFlags::from_bits(l0_entry & 0xff).unwrap()
l0_entry & 0xff,
);
}
}
}
pub fn virt_to_phys(&self, cpu: &riscv_cpu::Cpu, virt: u32) -> Option<u32> {
let vpn1 = ((virt >> 22) & ((1 << 10) - 1)) as usize * 4;
let vpn0 = ((virt >> 12) & ((1 << 10) - 1)) as usize * 4;
let offset = virt & ((1 << 12) - 1);
// The root (l1) pagetable is defined to be mapped into our virtual
// address space at this address.
let l1_pt_entry = cpu.phys_read_u32(self.l1_pt as u64 + vpn1 as u64);
// If the level 1 pagetable doesn't exist, then this address is invalid
if l1_pt_entry & MMUFLAG_VALID == 0 {
return None;
}
if l1_pt_entry & (MMUFLAG_EXECUTABLE | MMUFLAG_READABLE | MMUFLAG_WRITABLE) != 0 {
return None;
}
let l0_pt_entry = cpu.phys_read_u32((((l1_pt_entry >> 10) << 12) + vpn0 as u32) as u64);
// Ensure the entry hasn't already been mapped.
if l0_pt_entry & MMUFLAG_VALID == 0 {
return None;
}
Some(((l0_pt_entry >> 10) << 12) | offset)
}
fn ensure_page(&mut self, cpu: &mut riscv_cpu::Cpu, address: u32) {
let vpn1 = ((address >> 22) & ((1 << 10) - 1)) as usize * 4;
let vpn0 = ((address >> 12) & ((1 << 10) - 1)) as usize * 4;
// The root (l1) pagetable is defined to be mapped into our virtual
// address space at this address.
// If the level 1 pagetable doesn't exist, then this address is invalid
let mut l1_pt_entry = cpu.phys_read_u32(self.l1_pt as u64 + vpn1 as u64);
if l1_pt_entry & MMUFLAG_VALID == 0 {
// Allocate a new page for the level 1 pagetable
let l0_pt_phys = self.allocate_page();
// println!("Allocating level 0 pagetable at {:08x}", l0_pt_phys);
l1_pt_entry =
((l0_pt_phys >> 12) << 10) | MMUFLAG_VALID | MMUFLAG_DIRTY | MMUFLAG_ACCESSED;
// Map the level 1 pagetable into the root pagetable
cpu.phys_write_u32(self.l1_pt as u64 + vpn1 as u64, l1_pt_entry);
}
let l0_pt_phys = ((l1_pt_entry >> 10) << 12) + vpn0 as u32;
let mut l0_pt_entry = cpu.phys_read_u32(l0_pt_phys as u64);
// Ensure the entry hasn't already been mapped.
if l0_pt_entry & MMUFLAG_VALID == 0 {
let page_phys = self.allocate_page();
l0_pt_entry = ((page_phys >> 12) << 10)
| MMUFLAG_VALID
| MMUFLAG_WRITABLE
| MMUFLAG_READABLE
| MMUFLAG_EXECUTABLE
| MMUFLAG_USERMODE
| MMUFLAG_DIRTY
| MMUFLAG_ACCESSED;
// Map the level 0 pagetable into the level 1 pagetable
cpu.phys_write_u32(l0_pt_phys as u64, l0_pt_entry);
}
}
pub fn load_program_to_cpu(
&mut self,
cpu: &mut riscv_cpu::Cpu,
program: &[u8],
) -> Result<(), LoadError> {
let goblin::Object::Elf(elf) =
goblin::Object::parse(program).map_err(|_| LoadError::IncorrectFormat)?
else {
return Err(LoadError::IncorrectFormat);
};
if elf.is_64 {
return Err(LoadError::BitSizeError);
}
for sh in elf.section_headers {
if sh.sh_flags as u32 & goblin::elf::section_header::SHF_ALLOC == 0 {
continue;
}
if sh.sh_type & goblin::elf::section_header::SHT_NOBITS != 0 {
for addr in sh.sh_addr..(sh.sh_addr + sh.sh_size) {
self.ensure_page(cpu, addr.try_into().unwrap());
// self.write_virt_u8(cpu, addr.try_into().unwrap(), 0);
}
} else {
self.write_bytes(
cpu,
&program[sh.sh_offset as usize..(sh.sh_offset + sh.sh_size) as usize],
sh.sh_addr.try_into().unwrap(),
);
}
}
self.print_mmu(cpu);
// TODO: Get memory permissions correct
let satp = self.satp.into();
// Ensure stack is allocated
for page in (0xc000_0000..0xc002_0000).step_by(4096) {
self.ensure_page(cpu, page);
}
// for (offset, byte) in shadow_memory.into_iter().enumerate() {
// if byte == 0 {
// continue;
// }
// // println!("Writing {:02x} to {:08x}", byte, offset as u64 + memory_base);
// cpu.phys_write_u8(offset as u64 + memory_base, byte);
// }
cpu.write_csr(riscv_cpu::cpu::CSR_SATP_ADDRESS, satp)
.map_err(|_| LoadError::SatpWriteError)?;
cpu.update_pc(elf.entry);
// Return to User Mode (0 << 11) with interrupts disabled (1 << 5)
cpu.write_csr(riscv_cpu::cpu::CSR_MSTATUS_ADDRESS, 1 << 5)
.map_err(|_| LoadError::MstatusWriteError)?;
cpu.write_csr(riscv_cpu::cpu::CSR_SEPC_ADDRESS, elf.entry)
.unwrap();
// SRET to return to user mode
cpu.execute_opcode(0x10200073).map_err(LoadError::CpuTrap)?;
// Update the stack pointer
cpu.write_register(2, 0xc002_0000 - 4);
Ok(())
}
}
#[derive(Debug)]
enum Syscall {