switch emulators

Signed-off-by: Sean Cross <sean@xobs.io>
This commit is contained in:
Sean Cross 2023-12-29 22:59:42 +08:00
parent eb5881a358
commit 9ae18afa67
7 changed files with 5172 additions and 4160 deletions

View File

@ -1,11 +0,0 @@
use crate::riscv::exception::Exception;
pub enum XLen {
X32,
X64,
}
pub trait Bus<XLen> {
fn read(&mut self, addr: XLen, size: u8) -> Result<XLen, Exception>;
fn write(&mut self, addr: XLen, value: XLen, size: u8) -> Result<(), Exception>;
}

File diff suppressed because it is too large Load Diff

View File

@ -1,397 +0,0 @@
//! The csr module contains all the control and status registers.
use std::fmt;
use std::ops::{Bound, Range, RangeBounds, RangeInclusive};
pub type CsrAddress = u16;
pub type CsrFieldRange = RangeInclusive<usize>;
pub const MXLEN: usize = 64;
/// The number of CSRs. The field is 12 bits so the maximum kind of CSRs is 4096 (2**12).
pub const CSR_SIZE: usize = 4096;
//////////////////////////////
// User-level CSR addresses //
//////////////////////////////
// User trap setup.
/// User status register.
const USTATUS: CsrAddress = 0x000;
/// User trap handler base address.
const UTVEC: CsrAddress = 0x005;
// User trap handling.
/// User exception program counter.
const UEPC: CsrAddress = 0x041;
/// User trap cause.
const UCAUSE: CsrAddress = 0x042;
/// User bad address or instruction.
const _UTVAL: CsrAddress = 0x043;
// User floating-point CSRs.
/// Flating-point accrued exceptions.
const _FFLAGS: CsrAddress = 0x001;
/// Floating-point dynamic rounding mode.
const _FRB: CsrAddress = 0x002;
/// Floating-point control and status register (frm + fflags).
pub const FCSR: CsrAddress = 0x003;
// User Counter/Timers.
/// Timer for RDTIME instruction.
const TIME: CsrAddress = 0xc01;
/////////////////////////////////////
// Supervisor-level CSR addresses //
////////////////////////////////////
// Supervisor trap setup.
/// Supervisor status register.
pub const SSTATUS: CsrAddress = 0x100;
/// Supervisor exception delegation register.
const SEDELEG: CsrAddress = 0x102;
/// Supervisor interrupt delegation register.
const SIDELEG: CsrAddress = 0x103;
/// Supervisor interrupt-enable register.
pub const SIE: CsrAddress = 0x104;
/// Supervisor trap handler base address.
pub const STVEC: CsrAddress = 0x105;
// Supervisor trap handling.
/// Scratch register for supervisor trap handlers.
const _SSCRATCH: CsrAddress = 0x140;
/// Supervisor exception program counter.
pub const SEPC: CsrAddress = 0x141;
/// Supervisor trap cause.
pub const SCAUSE: CsrAddress = 0x142;
/// Supervisor bad address or instruction.
pub const STVAL: CsrAddress = 0x143;
/// Supervisor interrupt pending.
pub const SIP: CsrAddress = 0x144;
// Supervisor protection and translation.
/// Supervisor address translation and protection.
pub const SATP: CsrAddress = 0x180;
// SSTATUS fields.
const SSTATUS_SIE_MASK: u64 = 0x2; // sstatus[1]
const SSTATUS_SPIE_MASK: u64 = 0x20; // sstatus[5]
const SSTATUS_UBE_MASK: u64 = 0x40; // sstatus[6]
const SSTATUS_SPP_MASK: u64 = 0x100; // sstatus[8]
const SSTATUS_FS_MASK: u64 = 0x6000; // sstatus[14:13]
const SSTATUS_XS_MASK: u64 = 0x18000; // sstatus[16:15]
const SSTATUS_SUM_MASK: u64 = 0x40000; // sstatus[18]
const SSTATUS_MXR_MASK: u64 = 0x80000; // sstatus[19]
const SSTATUS_UXL_MASK: u64 = 0x3_00000000; // sstatus[33:32]
const SSTATUS_SD_MASK: u64 = 0x80000000_00000000; // sstatus[63]
const SSTATUS_MASK: u64 = SSTATUS_SIE_MASK
| SSTATUS_SPIE_MASK
| SSTATUS_UBE_MASK
| SSTATUS_SPP_MASK
| SSTATUS_FS_MASK
| SSTATUS_XS_MASK
| SSTATUS_SUM_MASK
| SSTATUS_MXR_MASK
| SSTATUS_UXL_MASK
| SSTATUS_SD_MASK;
/// Global interrupt-enable bit for supervisor mode.
pub const XSTATUS_SIE: CsrFieldRange = 1..=1;
/// Previous interrupt-enable bit for supervisor mode.
pub const XSTATUS_SPIE: CsrFieldRange = 5..=5;
/// Previous privilege mode for supervisor mode.
pub const XSTATUS_SPP: CsrFieldRange = 8..=8;
/////////////////////////////////
// Machine-level CSR addresses //
/////////////////////////////////
// Machine information registers.
/// Vendor ID.
const MVENDORID: CsrAddress = 0xf11;
/// Architecture ID.
const MARCHID: CsrAddress = 0xf12;
/// Implementation ID.
const MIMPID: CsrAddress = 0xf13;
/// Hardware thread ID.
const MHARTID: CsrAddress = 0xf14;
// Machine trap setup.
/// Machine status register.
pub const MSTATUS: CsrAddress = 0x300;
/// ISA and extensions.
const MISA: CsrAddress = 0x301;
/// Machine exception delefation register.
pub const MEDELEG: CsrAddress = 0x302;
/// Machine interrupt delefation register.
pub const MIDELEG: CsrAddress = 0x303;
/// Machine interrupt-enable register.
pub const MIE: CsrAddress = 0x304;
/// Machine trap-handler base address.
pub const MTVEC: CsrAddress = 0x305;
/// Machine counter enable.
const _MCOUNTEREN: CsrAddress = 0x306;
// Machine trap handling.
/// Scratch register for machine trap handlers.
const _MSCRATCH: CsrAddress = 0x340;
/// Machine exception program counter.
pub const MEPC: CsrAddress = 0x341;
/// Machine trap cause.
pub const MCAUSE: CsrAddress = 0x342;
/// Machine bad address or instruction.
pub const MTVAL: CsrAddress = 0x343;
/// Machine interrupt pending.
pub const MIP: CsrAddress = 0x344;
// Machine memory protection.
/// Physical memory protection configuration.
const _PMPCFG0: CsrAddress = 0x3a0;
/// Physical memory protection address register.
const _PMPADDR0: CsrAddress = 0x3b0;
// MSTATUS fields.
/// Global interrupt-enable bit for machine mode.
pub const MSTATUS_MIE: CsrFieldRange = 3..=3;
/// Previous interrupt-enable bit for machine mode.
pub const MSTATUS_MPIE: CsrFieldRange = 7..=7;
/// Previous privilege mode for machine mode.
pub const MSTATUS_MPP: CsrFieldRange = 11..=12;
/// Modify privilege bit.
pub const MSTATUS_MPRV: CsrFieldRange = 17..=17;
// MIP fields.
/// Supervisor software interrupt.
pub const SSIP_BIT: u64 = 1 << 1;
/// Machine software interrupt.
pub const MSIP_BIT: u64 = 1 << 3;
/// Supervisor timer interrupt.
pub const STIP_BIT: u64 = 1 << 5;
/// Machine timer interrupt.
pub const MTIP_BIT: u64 = 1 << 7;
/// Supervisor external interrupt.
pub const SEIP_BIT: u64 = 1 << 9;
/// Machine external interrupt.
pub const MEIP_BIT: u64 = 1 << 11;
/// The state to contains all the CSRs.
pub struct State {
csrs: [u64; CSR_SIZE],
}
impl fmt::Display for State {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
f,
"{}",
format!(
"{}\n{}\n{}",
format!(
"mstatus={:>#18x} mtvec={:>#18x} mepc={:>#18x}\n mcause={:>#18x} medeleg={:>#18x} mideleg={:>#18x}",
self.read(MSTATUS),
self.read(MTVEC),
self.read(MEPC),
self.read(MCAUSE),
self.read(MEDELEG),
self.read(MIDELEG),
),
format!(
"sstatus={:>#18x} stvec={:>#18x} sepc={:>#18x}\n scause={:>#18x} sedeleg={:>#18x} sideleg={:>#18x}",
self.read(SSTATUS),
self.read(STVEC),
self.read(SEPC),
self.read(SCAUSE),
self.read(SEDELEG),
self.read(SIDELEG),
),
format!(
"ustatus={:>#18x} utvec={:>#18x} uepc={:>#18x}\n ucause={:>#18x}",
self.read(USTATUS),
self.read(UTVEC),
self.read(UEPC),
self.read(UCAUSE),
),
)
)
}
}
impl State {
/// Create a new `state` object.
pub fn new() -> Self {
let mut csrs = [0; CSR_SIZE];
let misa: u64 = (2 << 62) | // MXL[1:0]=2 (XLEN is 64)
(1 << 20) | // Extensions[20] (User mode implemented)
(1 << 18) | // Extensions[18] (Supervisor mode implemented)
(1 << 12) | // Extensions[12] (Integer Multiply/Divide extension)
(1 << 8) | // Extensions[8] (RV32I/64I/128I base ISA)
(1 << 5) | // Extensions[5] (Single-precision floating-point extension)
(1 << 3) | // Extensions[3] (Double-precision floating-point extension)
(1 << 2) | // Extensions[2] (Compressed extension)
1; // Extensions[0] (Atomic extension)
csrs[MISA as usize] = misa;
Self { csrs }
}
/// Increment the value in the TIME register.
pub fn increment_time(&mut self) {
self.csrs[TIME as usize] = self.csrs[TIME as usize].wrapping_add(1);
}
/// Read the val from the CSR.
pub fn read(&self, addr: CsrAddress) -> u64 {
// 4.1 Supervisor CSRs
// "The supervisor should only view CSR state that should be visible to a supervisor-level
// operating system. In particular, there is no information about the existence (or
// non-existence) of higher privilege levels (machine level or other) visible in the CSRs
// accessible by the supervisor. Many supervisor CSRs are a subset of the equivalent
// machine-mode CSR, and the machinemode chapter should be read first to help understand
// the supervisor-level CSR descriptions."
match addr {
SSTATUS => self.csrs[MSTATUS as usize] & SSTATUS_MASK,
SIE => self.csrs[MIE as usize] & self.csrs[MIDELEG as usize],
SIP => self.csrs[MIP as usize] & self.csrs[MIDELEG as usize],
_ => self.csrs[addr as usize],
}
}
/// Write the val to the CSR.
pub fn write(&mut self, addr: CsrAddress, val: u64) {
// 4.1 Supervisor CSRs
// "The supervisor should only view CSR state that should be visible to a supervisor-level
// operating system. In particular, there is no information about the existence (or
// non-existence) of higher privilege levels (machine level or other) visible in the CSRs
// accessible by the supervisor. Many supervisor CSRs are a subset of the equivalent
// machine-mode CSR, and the machinemode chapter should be read first to help understand
// the supervisor-level CSR descriptions."
match addr {
MVENDORID => {}
MARCHID => {}
MIMPID => {}
MHARTID => {}
SSTATUS => {
self.csrs[MSTATUS as usize] =
(self.csrs[MSTATUS as usize] & !SSTATUS_MASK) | (val & SSTATUS_MASK);
}
SIE => {
self.csrs[MIE as usize] = (self.csrs[MIE as usize] & !self.csrs[MIDELEG as usize])
| (val & self.csrs[MIDELEG as usize]);
}
SIP => {
let mask = SSIP_BIT & self.csrs[MIDELEG as usize];
self.csrs[MIP as usize] = (self.csrs[MIP as usize] & !mask) | (val & mask);
}
_ => self.csrs[addr as usize] = val,
}
}
/// Read a bit from the CSR.
pub fn read_bit(&self, addr: CsrAddress, bit: usize) -> u64 {
if bit >= MXLEN {
// TODO: raise exception?
}
if (self.read(addr) & (1 << bit)) != 0 {
1
} else {
0
}
}
/// Read a arbitrary length of bits from the CSR.
pub fn read_bits<T: RangeBounds<usize>>(&self, addr: CsrAddress, range: T) -> u64 {
let range = to_range(&range, MXLEN);
if (range.start >= MXLEN) | (range.end > MXLEN) | (range.start >= range.end) {
// TODO: ranse exception?
}
// Bitmask for high bits.
let mut bitmask = 0;
if range.end != 64 {
bitmask = !0 << range.end;
}
// Shift away low bits.
(self.read(addr) as u64 & !bitmask) >> range.start
}
/// Write a bit to the CSR.
pub fn write_bit(&mut self, addr: CsrAddress, bit: usize, val: u64) {
if bit >= MXLEN {
// TODO: raise exception?
}
if val > 1 {
// TODO: raise exception
}
if val == 1 {
self.write(addr, self.read(addr) | 1 << bit);
} else if val == 0 {
self.write(addr, self.read(addr) & !(1 << bit));
}
}
/// Write an arbitrary length of bits to the CSR.
pub fn write_bits<T: RangeBounds<usize>>(&mut self, addr: CsrAddress, range: T, val: u64) {
let range = to_range(&range, MXLEN);
if (range.start >= MXLEN) | (range.end > MXLEN) | (range.start >= range.end) {
// TODO: ranse exception?
}
if (val >> (range.end - range.start)) != 0 {
// TODO: raise exception
}
let bitmask = (!0 << range.end) | !(!0 << range.start);
// Set bits.
self.write(addr, (self.read(addr) & bitmask) | (val << range.start))
}
/// Read bit(s) from a given field in the SSTATUS register.
pub fn read_sstatus(&self, range: CsrFieldRange) -> u64 {
self.read_bits(SSTATUS, range)
}
/// Read bit(s) from a given field in the MSTATUS register.
pub fn read_mstatus(&self, range: CsrFieldRange) -> u64 {
self.read_bits(MSTATUS, range)
}
/// Write bit(s) to a given field in the SSTATUS register.
pub fn write_sstatus(&mut self, range: CsrFieldRange, val: u64) {
self.write_bits(SSTATUS, range, val);
}
/// Write bit(s) to a given field in the MSTATUS register.
pub fn write_mstatus(&mut self, range: CsrFieldRange, val: u64) {
self.write_bits(MSTATUS, range, val);
}
/// Reset all the CSRs.
pub fn reset(&mut self) {
self.csrs = [0; CSR_SIZE];
let misa: u64 = (2 << 62) | // MXL[1:0]=2 (XLEN is 64)
(1 << 18) | // Extensions[18] (Supervisor mode implemented)
(1 << 12) | // Extensions[12] (Integer Multiply/Divide extension)
(1 << 8) | // Extensions[8] (RV32I/64I/128I base ISA)
(1 << 5) | // Extensions[5] (Single-precision floating-point extension)
(1 << 3) | // Extensions[3] (Double-precision floating-point extension)
(1 << 2) | // Extensions[2] (Compressed extension)
1; // Extensions[0] (Atomic extension)
self.csrs[MISA as usize] = misa;
}
}
/// Convert the val implement `RangeBounds` to the `Range` struct.
fn to_range<T: RangeBounds<usize>>(generic_range: &T, bit_length: usize) -> Range<usize> {
let start = match generic_range.start_bound() {
Bound::Excluded(&val) => val + 1,
Bound::Included(&val) => val,
Bound::Unbounded => 0,
};
let end = match generic_range.end_bound() {
Bound::Excluded(&val) => val,
Bound::Included(&val) => val + 1,
Bound::Unbounded => bit_length,
};
start..end
}

View File

@ -1,240 +0,0 @@
//! The exception module contains all the exception kinds and the function to handle exceptions.
use crate::riscv::{
bus::Bus,
cpu::{Cpu, Mode},
csr::*,
};
use super::bus::XLen;
/// All the exception kinds.
#[derive(Debug, PartialEq)]
pub enum Exception {
/// With the addition of the C extension, no instructions can raise
/// instruction-address-misaligned exceptions.
InstructionAddressMisaligned,
InstructionAccessFault,
IllegalInstruction(u64),
Breakpoint,
LoadAddressMisaligned,
LoadAccessFault,
StoreAMOAddressMisaligned,
StoreAMOAccessFault,
EnvironmentCallFromUMode,
EnvironmentCallFromSMode,
EnvironmentCallFromMMode,
// Stores a trap value (the faulting address) for page fault exceptions.
InstructionPageFault(u64),
LoadPageFault(u64),
StoreAMOPageFault(u64),
}
/// All the trap kinds.
#[derive(Debug)]
pub enum Trap {
/// The trap is visible to, and handled by, software running inside the execution
/// environment.
Contained,
/// The trap is a synchronous exception that is an explicit call to the execution
/// environment requesting an action on behalf of software inside the execution environment.
Requested,
/// The trap is handled transparently by the execution environment and execution
/// resumes normally after the trap is handled.
Invisible,
/// The trap represents a fatal failure and causes the execution environment to terminate
/// execution.
Fatal,
}
impl Exception {
fn exception_code(&self) -> u64 {
match self {
Exception::InstructionAddressMisaligned => 0,
Exception::InstructionAccessFault => 1,
Exception::IllegalInstruction(_) => 2,
Exception::Breakpoint => 3,
Exception::LoadAddressMisaligned => 4,
Exception::LoadAccessFault => 5,
Exception::StoreAMOAddressMisaligned => 6,
Exception::StoreAMOAccessFault => 7,
Exception::EnvironmentCallFromUMode => 8,
Exception::EnvironmentCallFromSMode => 9,
Exception::EnvironmentCallFromMMode => 11,
Exception::InstructionPageFault(_) => 12,
Exception::LoadPageFault(_) => 13,
Exception::StoreAMOPageFault(_) => 15,
}
}
fn epc(&self, pc: u64) -> u64 {
// 3.2.1 Environment Call and Breakpoint
// "ECALL and EBREAK cause the receiving privilege modes epc register to be set to the
// address of the ECALL or EBREAK instruction itself, not the address of the following
// instruction."
match self {
Exception::Breakpoint
| Exception::EnvironmentCallFromUMode
| Exception::EnvironmentCallFromSMode
| Exception::EnvironmentCallFromMMode
// TODO: why page fault needs this?
| Exception::InstructionPageFault(_)
| Exception::LoadPageFault(_)
| Exception::StoreAMOPageFault(_) => pc,
_ => pc.wrapping_add(4),
}
}
fn trap_value(&self, pc: u64) -> u64 {
// 3.1.17 Machine Trap Value Register (mtval)
// 4.1.9 Supervisor Trap Value Register (stval)
// "When a hardware breakpoint is triggered, or an address-misaligned, access-fault, or
// page-fault exception occurs on an instruction fetch, load, or store, mtval (stval) is
// written with the faulting virtual address. On an illegal instruction trap, mtval (stval)
// may be written with the first XLEN or ILEN bits of the faulting instruction as described
// below. For other traps, mtval (stval) is set to zero, but a future standard may redefine
// mtval's (stval's) setting for other traps."
match self {
Exception::InstructionAddressMisaligned
| Exception::InstructionAccessFault
| Exception::Breakpoint
| Exception::LoadAddressMisaligned
| Exception::LoadAccessFault
| Exception::StoreAMOAddressMisaligned
| Exception::StoreAMOAccessFault => pc,
Exception::InstructionPageFault(val)
| Exception::LoadPageFault(val)
| Exception::StoreAMOPageFault(val) => *val,
Exception::IllegalInstruction(val) => *val,
_ => 0,
}
}
/// Update CSRs and the program counter depending on an exception.
pub fn take_trap<Bus>(&self, cpu: &mut Cpu<Bus>) -> Trap
where
Bus: crate::riscv::bus::Bus<XLen>,
{
// 1.2 Privilege Levels
// "Traps that increase privilege level are termed vertical traps, while traps that remain
// at the same privilege level are termed horizontal traps."
let exception_pc = self.epc(cpu.pc);
let previous_mode = cpu.mode;
let cause = self.exception_code();
// 3.1.8 Machine Trap Delegation Registers (medeleg and mideleg)
// "By default, all traps at any privilege level are handled in machine mode"
// "To increase performance, implementations can provide individual read/write bits within
// medeleg and mideleg to indicate that certain exceptions and interrupts should be
// processed directly by a lower privilege level."
//
// "medeleg has a bit position allocated for every synchronous exception shown in Table 3.6
// on page 37, with the index of the bit position equal to the value returned in the mcause
// register (i.e., setting bit 8 allows user-mode environment calls to be delegated to a
// lower-privilege trap handler)."
if previous_mode <= Mode::Supervisor && ((cpu.state.read(MEDELEG) >> cause) & 1) == 1 {
// Handle the trap in S-mode.
cpu.mode = Mode::Supervisor;
// Set the program counter to the supervisor trap-handler base address (stvec).
cpu.pc = (cpu.state.read(STVEC) & !1) as u64;
// 4.1.9 Supervisor Exception Program Counter (sepc)
// "The low bit of sepc (sepc[0]) is always zero."
// "When a trap is taken into S-mode, sepc is written with the virtual address of
// the instruction that was interrupted or that encountered the exception.
// Otherwise, sepc is never written by the implementation, though it may be
// explicitly written by software."
cpu.state.write(SEPC, exception_pc & !1);
// 4.1.10 Supervisor Cause Register (scause)
// "When a trap is taken into S-mode, scause is written with a code indicating
// the event that caused the trap. Otherwise, scause is never written by the
// implementation, though it may be explicitly written by software."
cpu.state.write(SCAUSE, cause);
// 4.1.11 Supervisor Trap Value (stval) Register
// "When a trap is taken into S-mode, stval is written with exception-specific
// information to assist software in handling the trap. Otherwise, stval is never
// written by the implementation, though it may be explicitly written by software."
cpu.state.write(STVAL, self.trap_value(exception_pc));
// Set a previous interrupt-enable bit for supervisor mode (SPIE, 5) to the value
// of a global interrupt-enable bit for supervisor mode (SIE, 1).
cpu.state
.write_sstatus(XSTATUS_SPIE, cpu.state.read_sstatus(XSTATUS_SIE));
// Set a global interrupt-enable bit for supervisor mode (SIE, 1) to 0.
cpu.state.write_sstatus(XSTATUS_SIE, 0);
// 4.1.1 Supervisor Status Register (sstatus)
// "When a trap is taken, SPP is set to 0 if the trap originated from user mode, or
// 1 otherwise."
match previous_mode {
Mode::User => cpu.state.write_sstatus(XSTATUS_SPP, 0),
_ => cpu.state.write_sstatus(XSTATUS_SPP, 1),
}
} else {
// Handle the trap in M-mode.
cpu.mode = Mode::Machine;
// Set the program counter to the machine trap-handler base address (mtvec).
cpu.pc = (cpu.state.read(MTVEC) & !1) as u64;
// 3.1.15 Machine Exception Program Counter (mepc)
// "The low bit of mepc (mepc[0]) is always zero."
// "When a trap is taken into M-mode, mepc is written with the virtual address of
// the instruction that was interrupted or that encountered the exception.
// Otherwise, mepc is never written by the implementation, though it may be
// explicitly written by software."
cpu.state.write(MEPC, exception_pc & !1);
// 3.1.16 Machine Cause Register (mcause)
// "When a trap is taken into M-mode, mcause is written with a code indicating
// the event that caused the trap. Otherwise, mcause is never written by the
// implementation, though it may be explicitly written by software."
cpu.state.write(MCAUSE, cause);
// 3.1.17 Machine Trap Value (mtval) Register
// "When a trap is taken into M-mode, mtval is either set to zero or written with
// exception-specific information to assist software in handling the trap.
// Otherwise, mtval is never written by the implementation, though it may be
// explicitly written by software."
cpu.state.write(MTVAL, self.trap_value(exception_pc));
// Set a previous interrupt-enable bit for machine mode (MPIE, 7) to the value
// of a global interrupt-enable bit for machine mode (MIE, 3).
cpu.state
.write_mstatus(MSTATUS_MPIE, cpu.state.read_mstatus(MSTATUS_MIE));
// Set a global interrupt-enable bit for machine mode (MIE, 3) to 0.
cpu.state.write_mstatus(MSTATUS_MIE, 0);
// When a trap is taken from privilege mode y into privilege mode x, xPIE is set
// to the value of x IE; x IE is set to 0; and xPP is set to y.
match previous_mode {
Mode::User => cpu.state.write_mstatus(MSTATUS_MPP, Mode::User as u64),
Mode::Supervisor => cpu
.state
.write_mstatus(MSTATUS_MPP, Mode::Supervisor as u64),
Mode::Machine => cpu.state.write_mstatus(MSTATUS_MPP, Mode::Machine as u64),
_ => panic!("previous privilege mode is invalid"),
}
}
match self {
Exception::InstructionAddressMisaligned | Exception::InstructionAccessFault => {
Trap::Fatal
}
Exception::IllegalInstruction(_) => Trap::Invisible,
Exception::Breakpoint => Trap::Requested,
Exception::LoadAddressMisaligned
| Exception::LoadAccessFault
| Exception::StoreAMOAddressMisaligned
| Exception::StoreAMOAccessFault => Trap::Fatal,
Exception::EnvironmentCallFromUMode
| Exception::EnvironmentCallFromSMode
| Exception::EnvironmentCallFromMMode => Trap::Requested,
Exception::InstructionPageFault(_)
| Exception::LoadPageFault(_)
| Exception::StoreAMOPageFault(_) => Trap::Invisible,
}
}
}

169
src/riscv/memory.rs Normal file
View File

@ -0,0 +1,169 @@
/// Emulates main memory.
pub struct Memory {
/// Memory content
data: Vec<u64>
}
impl Memory {
/// Creates a new `Memory`
pub fn new() -> Self {
Memory {
data: vec![]
}
}
/// Initializes memory content.
/// This method is expected to be called only once.
///
/// # Arguments
/// * `capacity`
pub fn init(&mut self, capacity: u64) {
for _i in 0..((capacity + 7) / 8) {
self.data.push(0);
}
}
/// Reads a byte from memory.
///
/// # Arguments
/// * `address`
pub fn read_byte(&self, address: u64) -> u8 {
let index = (address >> 3) as usize;
let pos = ((address % 8) as u64) * 8;
(self.data[index] >> pos) as u8
}
/// Reads two bytes from memory.
///
/// # Arguments
/// * `address`
pub fn read_halfword(&self, address: u64) -> u16 {
if (address % 2) == 0 {
let index = (address >> 3) as usize;
let pos = ((address % 8) as u64) * 8;
(self.data[index] >> pos) as u16
} else {
self.read_bytes(address, 2) as u16
}
}
/// Reads four bytes from memory.
///
/// # Arguments
/// * `address`
pub fn read_word(&self, address: u64) -> u32 {
if (address % 4) == 0 {
let index = (address >> 3) as usize;
let pos = ((address % 8) as u64) * 8;
(self.data[index] >> pos) as u32
} else {
self.read_bytes(address, 4) as u32
}
}
/// Reads eight bytes from memory.
///
/// # Arguments
/// * `address`
pub fn read_doubleword(&self, address: u64) -> u64 {
if (address % 8) == 0 {
let index = (address >> 3) as usize;
self.data[index]
} else if (address % 4) == 0 {
(self.read_word(address) as u64) | ((self.read_word(address.wrapping_add(4)) as u64) << 4)
} else {
self.read_bytes(address, 8)
}
}
/// Reads multiple bytes from memory.
///
/// # Arguments
/// * `address`
/// * `width` up to eight
pub fn read_bytes(&self, address: u64, width: u64) -> u64 {
let mut data = 0 as u64;
for i in 0..width {
data |= (self.read_byte(address.wrapping_add(i)) as u64) << (i * 8);
}
data
}
/// Writes a byte to memory.
///
/// # Arguments
/// * `address`
/// * `value`
pub fn write_byte(&mut self, address: u64, value: u8) {
let index = (address >> 3) as usize;
let pos = ((address % 8) as u64) * 8;
self.data[index] = (self.data[index] & !(0xff << pos)) | ((value as u64) << pos);
}
/// Writes two bytes to memory.
///
/// # Arguments
/// * `address`
/// * `value`
pub fn write_halfword(&mut self, address: u64, value: u16) {
if (address % 2) == 0 {
let index = (address >> 3) as usize;
let pos = ((address % 8) as u64) * 8;
self.data[index] = (self.data[index] & !(0xffff << pos)) | ((value as u64) << pos);
} else {
self.write_bytes(address, value as u64, 2);
}
}
/// Writes four bytes to memory.
///
/// # Arguments
/// * `address`
/// * `value`
pub fn write_word(&mut self, address: u64, value: u32) {
if (address % 4) == 0 {
let index = (address >> 3) as usize;
let pos = ((address % 8) as u64) * 8;
self.data[index] = (self.data[index] & !(0xffffffff << pos)) | ((value as u64) << pos);
} else {
self.write_bytes(address, value as u64, 4);
}
}
/// Writes eight bytes to memory.
///
/// # Arguments
/// * `address`
/// * `value`
pub fn write_doubleword(&mut self, address: u64, value: u64) {
if (address % 8) == 0 {
let index = (address >> 3) as usize;
self.data[index] = value;
} else if (address % 4) == 0 {
self.write_word(address, (value & 0xffffffff) as u32);
self.write_word(address.wrapping_add(4), (value >> 32) as u32);
} else {
self.write_bytes(address, value, 8);
}
}
/// Write multiple bytes to memory.
///
/// # Arguments
/// * `address`
/// * `value`
/// * `width` up to eight
pub fn write_bytes(&mut self, address: u64, value: u64, width: u64) {
for i in 0..width {
self.write_byte(address.wrapping_add(i), (value >> (i * 8)) as u8);
}
}
/// Check if the address is valid memory address
///
/// # Arguments
/// * `address`
pub fn validate_address(&self, address: u64) -> bool {
return (address as usize) < self.data.len()
}
}

962
src/riscv/mmu.rs Normal file
View File

@ -0,0 +1,962 @@
/// DRAM base address. Offset from this base address
/// is the address in main memory.
pub const DRAM_BASE: u64 = 0x80000000;
use std::collections::HashMap;
use crate::riscv::{
cpu::{get_privilege_mode, PrivilegeMode, Trap, TrapType, Xlen},
memory::Memory,
};
/// Emulates Memory Management Unit. It holds the Main memory and peripheral
/// devices, maps address to them, and accesses them depending on address.
/// It also manages virtual-physical address translation and memoty protection.
/// It may also be said Bus.
/// @TODO: Memory protection is not implemented yet. We should support.
pub struct Mmu {
clock: u64,
xlen: Xlen,
ppn: u64,
addressing_mode: AddressingMode,
privilege_mode: PrivilegeMode,
memory: MemoryWrapper,
/// Address translation can be affected `mstatus` (MPRV, MPP in machine mode)
/// then `Mmu` has copy of it.
mstatus: u64,
/// Address translation page cache. Experimental feature.
/// The cache is cleared when translation mapping can be changed;
/// xlen, ppn, privilege_mode, or addressing_mode is updated.
/// Precisely it isn't good enough because page table entries
/// can be updated anytime with store instructions, of course
/// very depending on how pages are mapped tho.
/// But observing all page table entries is high cost so
/// ignoring so far. Then this cache optimization can cause a bug
/// due to unexpected (meaning not in page fault handler)
/// page table entry update. So this is experimental feature and
/// disabled by default. If you want to enable, use `enable_page_cache()`.
page_cache_enabled: bool,
fetch_page_cache: HashMap<u64, u64>,
load_page_cache: HashMap<u64, u64>,
store_page_cache: HashMap<u64, u64>,
}
pub enum AddressingMode {
None,
SV32,
SV39,
SV48, // @TODO: Implement
}
enum MemoryAccessType {
Execute,
Read,
Write,
DontCare,
}
fn _get_addressing_mode_name(mode: &AddressingMode) -> &'static str {
match mode {
AddressingMode::None => "None",
AddressingMode::SV32 => "SV32",
AddressingMode::SV39 => "SV39",
AddressingMode::SV48 => "SV48",
}
}
impl Mmu {
/// Creates a new `Mmu`.
///
/// # Arguments
/// * `xlen`
/// * `terminal`
pub fn new(xlen: Xlen) -> Self {
// // Load default device tree binary content
// let content = include_bytes!("./device/dtb.dtb");
// for i in 0..content.len() {
// dtb[i] = content[i];
// }
Mmu {
clock: 0,
xlen,
ppn: 0,
addressing_mode: AddressingMode::None,
privilege_mode: PrivilegeMode::Machine,
memory: MemoryWrapper::new(),
mstatus: 0,
page_cache_enabled: false,
fetch_page_cache: HashMap::default(),
load_page_cache: HashMap::default(),
store_page_cache: HashMap::default(),
}
}
/// Updates XLEN, 32-bit or 64-bit
///
/// # Arguments
/// * `xlen`
pub fn update_xlen(&mut self, xlen: Xlen) {
self.xlen = xlen;
self.clear_page_cache();
}
/// Initializes Main memory. This method is expected to be called only once.
///
/// # Arguments
/// * `capacity`
pub fn init_memory(&mut self, capacity: u64) {
self.memory.init(capacity);
}
/// Enables or disables page cache optimization.
///
/// # Arguments
/// * `enabled`
pub fn enable_page_cache(&mut self, enabled: bool) {
self.page_cache_enabled = enabled;
self.clear_page_cache();
}
/// Clears page cache entries
fn clear_page_cache(&mut self) {
self.fetch_page_cache.clear();
self.load_page_cache.clear();
self.store_page_cache.clear();
}
/// Runs one cycle of MMU and peripheral devices.
pub fn tick(&mut self, mip: &mut u64) {}
/// Updates addressing mode
///
/// # Arguments
/// * `new_addressing_mode`
pub fn update_addressing_mode(&mut self, new_addressing_mode: AddressingMode) {
self.addressing_mode = new_addressing_mode;
self.clear_page_cache();
}
/// Updates privilege mode
///
/// # Arguments
/// * `mode`
pub fn update_privilege_mode(&mut self, mode: PrivilegeMode) {
self.privilege_mode = mode;
self.clear_page_cache();
}
/// Updates mstatus copy. `CPU` needs to call this method whenever
/// `mstatus` is updated.
///
/// # Arguments
/// * `mstatus`
pub fn update_mstatus(&mut self, mstatus: u64) {
self.mstatus = mstatus;
}
/// Updates PPN used for address translation
///
/// # Arguments
/// * `ppn`
pub fn update_ppn(&mut self, ppn: u64) {
self.ppn = ppn;
self.clear_page_cache();
}
fn get_effective_address(&self, address: u64) -> u64 {
match self.xlen {
Xlen::Bit32 => address & 0xffffffff,
Xlen::Bit64 => address,
}
}
/// Fetches an instruction byte. This method takes virtual address
/// and translates into physical address inside.
///
/// # Arguments
/// * `v_address` Virtual address
fn fetch(&mut self, v_address: u64) -> Result<u8, Trap> {
match self.translate_address(v_address, &MemoryAccessType::Execute) {
Ok(p_address) => Ok(self.load_raw(p_address)),
Err(()) => {
return Err(Trap {
trap_type: TrapType::InstructionPageFault,
value: v_address,
})
}
}
}
/// Fetches instruction four bytes. This method takes virtual address
/// and translates into physical address inside.
///
/// # Arguments
/// * `v_address` Virtual address
pub fn fetch_word(&mut self, v_address: u64) -> Result<u32, Trap> {
let width = 4;
match (v_address & 0xfff) <= (0x1000 - width) {
true => {
// Fast path. All bytes fetched are in the same page so
// translating an address only once.
let effective_address = self.get_effective_address(v_address);
match self.translate_address(effective_address, &MemoryAccessType::Execute) {
Ok(p_address) => Ok(self.load_word_raw(p_address)),
Err(()) => Err(Trap {
trap_type: TrapType::InstructionPageFault,
value: effective_address,
}),
}
}
false => {
let mut data = 0 as u32;
for i in 0..width {
match self.fetch(v_address.wrapping_add(i)) {
Ok(byte) => data |= (byte as u32) << (i * 8),
Err(e) => return Err(e),
};
}
Ok(data)
}
}
}
/// Loads an byte. This method takes virtual address and translates
/// into physical address inside.
///
/// # Arguments
/// * `v_address` Virtual address
pub fn load(&mut self, v_address: u64) -> Result<u8, Trap> {
let effective_address = self.get_effective_address(v_address);
match self.translate_address(effective_address, &MemoryAccessType::Read) {
Ok(p_address) => Ok(self.load_raw(p_address)),
Err(()) => Err(Trap {
trap_type: TrapType::LoadPageFault,
value: v_address,
}),
}
}
/// Loads multiple bytes. This method takes virtual address and translates
/// into physical address inside.
///
/// # Arguments
/// * `v_address` Virtual address
/// * `width` Must be 1, 2, 4, or 8
fn load_bytes(&mut self, v_address: u64, width: u64) -> Result<u64, Trap> {
debug_assert!(
width == 1 || width == 2 || width == 4 || width == 8,
"Width must be 1, 2, 4, or 8. {:X}",
width
);
match (v_address & 0xfff) <= (0x1000 - width) {
true => match self.translate_address(v_address, &MemoryAccessType::Read) {
Ok(p_address) => {
// Fast path. All bytes fetched are in the same page so
// translating an address only once.
match width {
1 => Ok(self.load_raw(p_address) as u64),
2 => Ok(self.load_halfword_raw(p_address) as u64),
4 => Ok(self.load_word_raw(p_address) as u64),
8 => Ok(self.load_doubleword_raw(p_address)),
_ => panic!("Width must be 1, 2, 4, or 8. {:X}", width),
}
}
Err(()) => Err(Trap {
trap_type: TrapType::LoadPageFault,
value: v_address,
}),
},
false => {
let mut data = 0 as u64;
for i in 0..width {
match self.load(v_address.wrapping_add(i)) {
Ok(byte) => data |= (byte as u64) << (i * 8),
Err(e) => return Err(e),
};
}
Ok(data)
}
}
}
/// Loads two bytes. This method takes virtual address and translates
/// into physical address inside.
///
/// # Arguments
/// * `v_address` Virtual address
pub fn load_halfword(&mut self, v_address: u64) -> Result<u16, Trap> {
match self.load_bytes(v_address, 2) {
Ok(data) => Ok(data as u16),
Err(e) => Err(e),
}
}
/// Loads four bytes. This method takes virtual address and translates
/// into physical address inside.
///
/// # Arguments
/// * `v_address` Virtual address
pub fn load_word(&mut self, v_address: u64) -> Result<u32, Trap> {
match self.load_bytes(v_address, 4) {
Ok(data) => Ok(data as u32),
Err(e) => Err(e),
}
}
/// Loads eight bytes. This method takes virtual address and translates
/// into physical address inside.
///
/// # Arguments
/// * `v_address` Virtual address
pub fn load_doubleword(&mut self, v_address: u64) -> Result<u64, Trap> {
match self.load_bytes(v_address, 8) {
Ok(data) => Ok(data as u64),
Err(e) => Err(e),
}
}
/// Store an byte. This method takes virtual address and translates
/// into physical address inside.
///
/// # Arguments
/// * `v_address` Virtual address
/// * `value`
pub fn store(&mut self, v_address: u64, value: u8) -> Result<(), Trap> {
match self.translate_address(v_address, &MemoryAccessType::Write) {
Ok(p_address) => {
self.store_raw(p_address, value);
Ok(())
}
Err(()) => Err(Trap {
trap_type: TrapType::StorePageFault,
value: v_address,
}),
}
}
/// Stores multiple bytes. This method takes virtual address and translates
/// into physical address inside.
///
/// # Arguments
/// * `v_address` Virtual address
/// * `value` data written
/// * `width` Must be 1, 2, 4, or 8
fn store_bytes(&mut self, v_address: u64, value: u64, width: u64) -> Result<(), Trap> {
debug_assert!(
width == 1 || width == 2 || width == 4 || width == 8,
"Width must be 1, 2, 4, or 8. {:X}",
width
);
match (v_address & 0xfff) <= (0x1000 - width) {
true => match self.translate_address(v_address, &MemoryAccessType::Write) {
Ok(p_address) => {
// Fast path. All bytes fetched are in the same page so
// translating an address only once.
match width {
1 => self.store_raw(p_address, value as u8),
2 => self.store_halfword_raw(p_address, value as u16),
4 => self.store_word_raw(p_address, value as u32),
8 => self.store_doubleword_raw(p_address, value),
_ => panic!("Width must be 1, 2, 4, or 8. {:X}", width),
}
Ok(())
}
Err(()) => Err(Trap {
trap_type: TrapType::StorePageFault,
value: v_address,
}),
},
false => {
for i in 0..width {
match self.store(v_address.wrapping_add(i), ((value >> (i * 8)) & 0xff) as u8) {
Ok(()) => {}
Err(e) => return Err(e),
}
}
Ok(())
}
}
}
/// Stores two bytes. This method takes virtual address and translates
/// into physical address inside.
///
/// # Arguments
/// * `v_address` Virtual address
/// * `value` data written
pub fn store_halfword(&mut self, v_address: u64, value: u16) -> Result<(), Trap> {
self.store_bytes(v_address, value as u64, 2)
}
/// Stores four bytes. This method takes virtual address and translates
/// into physical address inside.
///
/// # Arguments
/// * `v_address` Virtual address
/// * `value` data written
pub fn store_word(&mut self, v_address: u64, value: u32) -> Result<(), Trap> {
self.store_bytes(v_address, value as u64, 4)
}
/// Stores eight bytes. This method takes virtual address and translates
/// into physical address inside.
///
/// # Arguments
/// * `v_address` Virtual address
/// * `value` data written
pub fn store_doubleword(&mut self, v_address: u64, value: u64) -> Result<(), Trap> {
self.store_bytes(v_address, value as u64, 8)
}
/// Loads a byte from main memory or peripheral devices depending on
/// physical address.
///
/// # Arguments
/// * `p_address` Physical address
fn load_raw(&mut self, p_address: u64) -> u8 {
let effective_address = self.get_effective_address(p_address);
// @TODO: Mapping should be configurable with dtb
match effective_address >= DRAM_BASE {
true => self.memory.read_byte(effective_address),
false => match effective_address {
// // I don't know why but dtb data seems to be stored from 0x1020 on Linux.
// // It might be from self.x[0xb] initialization?
// // And DTB size is arbitray.
// 0x00001020..=0x00001fff => self.dtb[effective_address as usize - 0x1020],
// 0x02000000..=0x0200ffff => self.clint.load(effective_address),
// 0x0C000000..=0x0fffffff => self.plic.load(effective_address),
// 0x10000000..=0x100000ff => self.uart.load(effective_address),
// 0x10001000..=0x10001FFF => self.disk.load(effective_address),
_ => panic!("Unknown memory mapping {:X}.", effective_address),
},
}
}
/// Loads two bytes from main memory or peripheral devices depending on
/// physical address.
///
/// # Arguments
/// * `p_address` Physical address
fn load_halfword_raw(&mut self, p_address: u64) -> u16 {
let effective_address = self.get_effective_address(p_address);
match effective_address >= DRAM_BASE
&& effective_address.wrapping_add(1) > effective_address
{
// Fast path. Directly load main memory at a time.
true => self.memory.read_halfword(effective_address),
false => {
let mut data = 0 as u16;
for i in 0..2 {
data |= (self.load_raw(effective_address.wrapping_add(i)) as u16) << (i * 8)
}
data
}
}
}
/// Loads four bytes from main memory or peripheral devices depending on
/// physical address.
///
/// # Arguments
/// * `p_address` Physical address
pub fn load_word_raw(&mut self, p_address: u64) -> u32 {
let effective_address = self.get_effective_address(p_address);
match effective_address >= DRAM_BASE
&& effective_address.wrapping_add(3) > effective_address
{
// Fast path. Directly load main memory at a time.
true => self.memory.read_word(effective_address),
false => {
let mut data = 0 as u32;
for i in 0..4 {
data |= (self.load_raw(effective_address.wrapping_add(i)) as u32) << (i * 8)
}
data
}
}
}
/// Loads eight bytes from main memory or peripheral devices depending on
/// physical address.
///
/// # Arguments
/// * `p_address` Physical address
fn load_doubleword_raw(&mut self, p_address: u64) -> u64 {
let effective_address = self.get_effective_address(p_address);
match effective_address >= DRAM_BASE
&& effective_address.wrapping_add(7) > effective_address
{
// Fast path. Directly load main memory at a time.
true => self.memory.read_doubleword(effective_address),
false => {
let mut data = 0 as u64;
for i in 0..8 {
data |= (self.load_raw(effective_address.wrapping_add(i)) as u64) << (i * 8)
}
data
}
}
}
/// Stores a byte to main memory or peripheral devices depending on
/// physical address.
///
/// # Arguments
/// * `p_address` Physical address
/// * `value` data written
pub fn store_raw(&mut self, p_address: u64, value: u8) {
let effective_address = self.get_effective_address(p_address);
// @TODO: Mapping should be configurable with dtb
match effective_address >= DRAM_BASE {
true => self.memory.write_byte(effective_address, value),
false => match effective_address {
// 0x02000000..=0x0200ffff => self.clint.store(effective_address, value),
// 0x0c000000..=0x0fffffff => self.plic.store(effective_address, value),
// 0x10000000..=0x100000ff => self.uart.store(effective_address, value),
// 0x10001000..=0x10001FFF => self.disk.store(effective_address, value),
_ => panic!("Unknown memory mapping {:X}.", effective_address),
},
};
}
/// Stores two bytes to main memory or peripheral devices depending on
/// physical address.
///
/// # Arguments
/// * `p_address` Physical address
/// * `value` data written
fn store_halfword_raw(&mut self, p_address: u64, value: u16) {
let effective_address = self.get_effective_address(p_address);
match effective_address >= DRAM_BASE
&& effective_address.wrapping_add(1) > effective_address
{
// Fast path. Directly store to main memory at a time.
true => self.memory.write_halfword(effective_address, value),
false => {
for i in 0..2 {
self.store_raw(
effective_address.wrapping_add(i),
((value >> (i * 8)) & 0xff) as u8,
);
}
}
}
}
/// Stores four bytes to main memory or peripheral devices depending on
/// physical address.
///
/// # Arguments
/// * `p_address` Physical address
/// * `value` data written
fn store_word_raw(&mut self, p_address: u64, value: u32) {
let effective_address = self.get_effective_address(p_address);
match effective_address >= DRAM_BASE
&& effective_address.wrapping_add(3) > effective_address
{
// Fast path. Directly store to main memory at a time.
true => self.memory.write_word(effective_address, value),
false => {
for i in 0..4 {
self.store_raw(
effective_address.wrapping_add(i),
((value >> (i * 8)) & 0xff) as u8,
);
}
}
}
}
/// Stores eight bytes to main memory or peripheral devices depending on
/// physical address.
///
/// # Arguments
/// * `p_address` Physical address
/// * `value` data written
fn store_doubleword_raw(&mut self, p_address: u64, value: u64) {
let effective_address = self.get_effective_address(p_address);
match effective_address >= DRAM_BASE
&& effective_address.wrapping_add(7) > effective_address
{
// Fast path. Directly store to main memory at a time.
true => self.memory.write_doubleword(effective_address, value),
false => {
for i in 0..8 {
self.store_raw(
effective_address.wrapping_add(i),
((value >> (i * 8)) & 0xff) as u8,
);
}
}
}
}
/// Checks if passed virtual address is valid (pointing a certain device) or not.
/// This method can return page fault trap.
///
/// # Arguments
/// * `v_address` Virtual address
pub fn validate_address(&mut self, v_address: u64) -> Result<bool, ()> {
// @TODO: Support other access types?
let p_address = match self.translate_address(v_address, &MemoryAccessType::DontCare) {
Ok(address) => address,
Err(()) => return Err(()),
};
let effective_address = self.get_effective_address(p_address);
let valid = match effective_address >= DRAM_BASE {
true => self.memory.validate_address(effective_address),
false => match effective_address {
0x00001020..=0x00001fff => true,
0x02000000..=0x0200ffff => true,
0x0C000000..=0x0fffffff => true,
0x10000000..=0x100000ff => true,
0x10001000..=0x10001FFF => true,
_ => false,
},
};
Ok(valid)
}
fn translate_address(
&mut self,
v_address: u64,
access_type: &MemoryAccessType,
) -> Result<u64, ()> {
let address = self.get_effective_address(v_address);
let v_page = address & !0xfff;
let cache = match self.page_cache_enabled {
true => match access_type {
MemoryAccessType::Execute => self.fetch_page_cache.get(&v_page),
MemoryAccessType::Read => self.load_page_cache.get(&v_page),
MemoryAccessType::Write => self.store_page_cache.get(&v_page),
MemoryAccessType::DontCare => None,
},
false => None,
};
match cache {
Some(p_page) => Ok(p_page | (address & 0xfff)),
None => {
let p_address = match self.addressing_mode {
AddressingMode::None => Ok(address),
AddressingMode::SV32 => match self.privilege_mode {
// @TODO: Optimize
PrivilegeMode::Machine => match access_type {
MemoryAccessType::Execute => Ok(address),
// @TODO: Remove magic number
_ => match (self.mstatus >> 17) & 1 {
0 => Ok(address),
_ => {
let privilege_mode =
get_privilege_mode((self.mstatus >> 9) & 3);
match privilege_mode {
PrivilegeMode::Machine => Ok(address),
_ => {
let current_privilege_mode =
self.privilege_mode.clone();
self.update_privilege_mode(privilege_mode);
let result =
self.translate_address(v_address, access_type);
self.update_privilege_mode(current_privilege_mode);
result
}
}
}
},
},
PrivilegeMode::User | PrivilegeMode::Supervisor => {
let vpns = [(address >> 12) & 0x3ff, (address >> 22) & 0x3ff];
self.traverse_page(address, 2 - 1, self.ppn, &vpns, &access_type)
}
_ => Ok(address),
},
AddressingMode::SV39 => match self.privilege_mode {
// @TODO: Optimize
// @TODO: Remove duplicated code with SV32
PrivilegeMode::Machine => match access_type {
MemoryAccessType::Execute => Ok(address),
// @TODO: Remove magic number
_ => match (self.mstatus >> 17) & 1 {
0 => Ok(address),
_ => {
let privilege_mode =
get_privilege_mode((self.mstatus >> 9) & 3);
match privilege_mode {
PrivilegeMode::Machine => Ok(address),
_ => {
let current_privilege_mode =
self.privilege_mode.clone();
self.update_privilege_mode(privilege_mode);
let result =
self.translate_address(v_address, access_type);
self.update_privilege_mode(current_privilege_mode);
result
}
}
}
},
},
PrivilegeMode::User | PrivilegeMode::Supervisor => {
let vpns = [
(address >> 12) & 0x1ff,
(address >> 21) & 0x1ff,
(address >> 30) & 0x1ff,
];
self.traverse_page(address, 3 - 1, self.ppn, &vpns, &access_type)
}
_ => Ok(address),
},
AddressingMode::SV48 => {
panic!("AddressingMode SV48 is not supported yet.");
}
};
match self.page_cache_enabled {
true => match p_address {
Ok(p_address) => {
let p_page = p_address & !0xfff;
match access_type {
MemoryAccessType::Execute => {
self.fetch_page_cache.insert(v_page, p_page)
}
MemoryAccessType::Read => {
self.load_page_cache.insert(v_page, p_page)
}
MemoryAccessType::Write => {
self.store_page_cache.insert(v_page, p_page)
}
MemoryAccessType::DontCare => None,
};
Ok(p_address)
}
Err(()) => Err(()),
},
false => p_address,
}
}
}
}
fn traverse_page(
&mut self,
v_address: u64,
level: u8,
parent_ppn: u64,
vpns: &[u64],
access_type: &MemoryAccessType,
) -> Result<u64, ()> {
let pagesize = 4096;
let ptesize = match self.addressing_mode {
AddressingMode::SV32 => 4,
_ => 8,
};
let pte_address = parent_ppn * pagesize + vpns[level as usize] * ptesize;
let pte = match self.addressing_mode {
AddressingMode::SV32 => self.load_word_raw(pte_address) as u64,
_ => self.load_doubleword_raw(pte_address),
};
let ppn = match self.addressing_mode {
AddressingMode::SV32 => (pte >> 10) & 0x3fffff,
_ => (pte >> 10) & 0xfffffffffff,
};
let ppns = match self.addressing_mode {
AddressingMode::SV32 => [(pte >> 10) & 0x3ff, (pte >> 20) & 0xfff, 0 /*dummy*/],
AddressingMode::SV39 => [
(pte >> 10) & 0x1ff,
(pte >> 19) & 0x1ff,
(pte >> 28) & 0x3ffffff,
],
_ => panic!(), // Shouldn't happen
};
let _rsw = (pte >> 8) & 0x3;
let d = (pte >> 7) & 1;
let a = (pte >> 6) & 1;
let _g = (pte >> 5) & 1;
let _u = (pte >> 4) & 1;
let x = (pte >> 3) & 1;
let w = (pte >> 2) & 1;
let r = (pte >> 1) & 1;
let v = pte & 1;
// println!("VA:{:X} Level:{:X} PTE_AD:{:X} PTE:{:X} PPPN:{:X} PPN:{:X} PPN1:{:X} PPN0:{:X}", v_address, level, pte_address, pte, parent_ppn, ppn, ppns[1], ppns[0]);
if v == 0 || (r == 0 && w == 1) {
return Err(());
}
if r == 0 && x == 0 {
return match level {
0 => Err(()),
_ => self.traverse_page(v_address, level - 1, ppn, vpns, access_type),
};
}
// Leaf page found
if a == 0
|| (match access_type {
MemoryAccessType::Write => d == 0,
_ => false,
})
{
let new_pte = pte
| (1 << 6)
| (match access_type {
MemoryAccessType::Write => 1 << 7,
_ => 0,
});
match self.addressing_mode {
AddressingMode::SV32 => self.store_word_raw(pte_address, new_pte as u32),
_ => self.store_doubleword_raw(pte_address, new_pte),
};
}
match access_type {
MemoryAccessType::Execute => {
if x == 0 {
return Err(());
}
}
MemoryAccessType::Read => {
if r == 0 {
return Err(());
}
}
MemoryAccessType::Write => {
if w == 0 {
return Err(());
}
}
_ => {}
};
let offset = v_address & 0xfff; // [11:0]
// @TODO: Optimize
let p_address = match self.addressing_mode {
AddressingMode::SV32 => match level {
1 => {
if ppns[0] != 0 {
return Err(());
}
(ppns[1] << 22) | (vpns[0] << 12) | offset
}
0 => (ppn << 12) | offset,
_ => panic!(), // Shouldn't happen
},
_ => match level {
2 => {
if ppns[1] != 0 || ppns[0] != 0 {
return Err(());
}
(ppns[2] << 30) | (vpns[1] << 21) | (vpns[0] << 12) | offset
}
1 => {
if ppns[0] != 0 {
return Err(());
}
(ppns[2] << 30) | (ppns[1] << 21) | (vpns[0] << 12) | offset
}
0 => (ppn << 12) | offset,
_ => panic!(), // Shouldn't happen
},
};
// println!("PA:{:X}", p_address);
Ok(p_address)
}
}
/// [`Memory`](../memory/struct.Memory.html) wrapper. Converts physical address to the one in memory
/// using [`DRAM_BASE`](constant.DRAM_BASE.html) and accesses [`Memory`](../memory/struct.Memory.html).
pub struct MemoryWrapper {
memory: Memory,
}
impl MemoryWrapper {
fn new() -> Self {
MemoryWrapper {
memory: Memory::new(),
}
}
fn init(&mut self, capacity: u64) {
self.memory.init(capacity);
}
pub fn read_byte(&mut self, p_address: u64) -> u8 {
debug_assert!(
p_address >= DRAM_BASE,
"Memory address must equals to or bigger than DRAM_BASE. {:X}",
p_address
);
self.memory.read_byte(p_address - DRAM_BASE)
}
pub fn read_halfword(&mut self, p_address: u64) -> u16 {
debug_assert!(
p_address >= DRAM_BASE && p_address.wrapping_add(1) >= DRAM_BASE,
"Memory address must equals to or bigger than DRAM_BASE. {:X}",
p_address
);
self.memory.read_halfword(p_address - DRAM_BASE)
}
pub fn read_word(&mut self, p_address: u64) -> u32 {
debug_assert!(
p_address >= DRAM_BASE && p_address.wrapping_add(3) >= DRAM_BASE,
"Memory address must equals to or bigger than DRAM_BASE. {:X}",
p_address
);
self.memory.read_word(p_address - DRAM_BASE)
}
pub fn read_doubleword(&mut self, p_address: u64) -> u64 {
debug_assert!(
p_address >= DRAM_BASE && p_address.wrapping_add(7) >= DRAM_BASE,
"Memory address must equals to or bigger than DRAM_BASE. {:X}",
p_address
);
self.memory.read_doubleword(p_address - DRAM_BASE)
}
pub fn write_byte(&mut self, p_address: u64, value: u8) {
debug_assert!(
p_address >= DRAM_BASE,
"Memory address must equals to or bigger than DRAM_BASE. {:X}",
p_address
);
self.memory.write_byte(p_address - DRAM_BASE, value)
}
pub fn write_halfword(&mut self, p_address: u64, value: u16) {
debug_assert!(
p_address >= DRAM_BASE && p_address.wrapping_add(1) >= DRAM_BASE,
"Memory address must equals to or bigger than DRAM_BASE. {:X}",
p_address
);
self.memory.write_halfword(p_address - DRAM_BASE, value)
}
pub fn write_word(&mut self, p_address: u64, value: u32) {
debug_assert!(
p_address >= DRAM_BASE && p_address.wrapping_add(3) >= DRAM_BASE,
"Memory address must equals to or bigger than DRAM_BASE. {:X}",
p_address
);
self.memory.write_word(p_address - DRAM_BASE, value)
}
pub fn write_doubleword(&mut self, p_address: u64, value: u64) {
debug_assert!(
p_address >= DRAM_BASE && p_address.wrapping_add(7) >= DRAM_BASE,
"Memory address must equals to or bigger than DRAM_BASE. {:X}",
p_address
);
self.memory.write_doubleword(p_address - DRAM_BASE, value)
}
pub fn validate_address(&self, address: u64) -> bool {
self.memory.validate_address(address - DRAM_BASE)
}
}

View File

@ -1,4 +1,3 @@
mod bus;
mod cpu;
mod csr;
mod exception;
mod memory;
mod mmu;