diff --git a/model/riscv_fetch.sail b/model/riscv_fetch.sail index c37af8748..2f945f24f 100644 --- a/model/riscv_fetch.sail +++ b/model/riscv_fetch.sail @@ -26,8 +26,8 @@ function fetch() -> FetchResult = { if (use_pc_bits[0] != bitzero | (use_pc_bits[1] != bitzero & not(currentlyEnabled(Ext_Zca)))) then F_Error(E_Fetch_Addr_Align(), PC) else match translateAddr(use_pc, InstructionFetch()) { - TR_Failure(e, _) => F_Error(e, PC), - TR_Address(ppclo, _) => { + Err(e, _) => F_Error(e, PC), + Ok(ppclo, _) => { /* split instruction fetch into 16-bit granules to handle RVC, as * well as to generate precise fault addresses in any fetch * exceptions. @@ -44,8 +44,8 @@ function fetch() -> FetchResult = { Ext_FetchAddr_Error(e) => F_Ext_Error(e), Ext_FetchAddr_OK(use_pc_hi) => { match translateAddr(use_pc_hi, InstructionFetch()) { - TR_Failure(e, _) => F_Error(e, PC_hi), - TR_Address(ppchi, _) => { + Err(e, _) => F_Error(e, PC_hi), + Ok(ppchi, _) => { match mem_read(InstructionFetch(), ppchi, 2, false, false, false) { Err(e) => F_Error(e, PC_hi), Ok(ihi) => F_Base(append(ihi, ilo)) diff --git a/model/riscv_fetch_rvfi.sail b/model/riscv_fetch_rvfi.sail index 643582c63..506ab0e0b 100644 --- a/model/riscv_fetch_rvfi.sail +++ b/model/riscv_fetch_rvfi.sail @@ -21,8 +21,8 @@ function rvfi_fetch() -> FetchResult = { if (use_pc_bits[0] != bitzero | (use_pc_bits[1] != bitzero & not(currentlyEnabled(Ext_Zca)))) then F_Error(E_Fetch_Addr_Align(), PC) else match translateAddr(use_pc, InstructionFetch()) { - TR_Failure(e, _) => F_Error(e, PC), - TR_Address(_, _) => { + Err(e, _) => F_Error(e, PC), + Ok(_, _) => { let i = rvfi_instruction[rvfi_insn]; rvfi_inst_data[rvfi_insn] = zero_extend(i); if (i[1 .. 0] != 0b11) @@ -34,8 +34,8 @@ function rvfi_fetch() -> FetchResult = { Ext_FetchAddr_Error(e) => F_Ext_Error(e), Ext_FetchAddr_OK(use_pc_hi) => match translateAddr(use_pc_hi, InstructionFetch()) { - TR_Failure(e, _) => F_Error(e, PC), - TR_Address(_, _) => F_Base(i) + Err(e, _) => F_Error(e, PC), + Ok(_, _) => F_Base(i) } } } diff --git a/model/riscv_insts_zaamo.sail b/model/riscv_insts_zaamo.sail index 6d458d516..04d46f689 100644 --- a/model/riscv_insts_zaamo.sail +++ b/model/riscv_insts_zaamo.sail @@ -58,8 +58,8 @@ function clause execute (AMO(op, aq, rl, rs2, rs1, width, rd)) = { if not(is_aligned_addr(bits_of(vaddr), width)) then Memory_Exception(vaddr, E_SAMO_Addr_Align()) else match translateAddr(vaddr, ReadWrite(Data, Data)) { - TR_Failure(e, _) => Memory_Exception(vaddr, e), - TR_Address(addr, _) => { + Err(e, _) => Memory_Exception(vaddr, e), + Ok(addr, _) => { let rs2_val = X(rs2)[width_bytes * 8 - 1 .. 0]; match mem_write_ea(addr, width_bytes, aq & rl, rl, true) { Err(e) => Memory_Exception(vaddr, e), diff --git a/model/riscv_insts_zicbom.sail b/model/riscv_insts_zicbom.sail index 599a3fc38..7c850097d 100644 --- a/model/riscv_insts_zicbom.sail +++ b/model/riscv_insts_zicbom.sail @@ -80,7 +80,7 @@ function process_clean_inval(rs1, cbop) = { Ext_DataAddr_Error(e) => Ext_DataAddr_Check_Failure(e), Ext_DataAddr_OK(vaddr) => { let res : option(ExceptionType) = match translateAddr(vaddr, Read(Data)) { - TR_Address(paddr, _) => { + Ok(paddr, _) => { // "A cache-block management instruction is permitted to access the // specified cache block whenever a load instruction or store instruction // is permitted to access the corresponding physical addresses. If @@ -105,7 +105,7 @@ function process_clean_inval(rs1, cbop) = { _ => None(), } }, - TR_Failure(e, _) => Some(e) + Err(e, _) => Some(e) }; // "If access to the cache block is not permitted, a cache-block management // instruction raises a store page fault or store guest-page fault exception diff --git a/model/riscv_insts_zicboz.sail b/model/riscv_insts_zicboz.sail index 38998fc8b..329bf45ff 100644 --- a/model/riscv_insts_zicboz.sail +++ b/model/riscv_insts_zicboz.sail @@ -44,8 +44,8 @@ function clause execute(ZICBOZ(rs1)) = { // was encoded in the instruction. We subtract the negative offset // (add the positive offset) to get it. Normally this will be // equal to rs1, but pointer masking can change that. - TR_Failure(e, _) => Memory_Exception(vaddr - negative_offset, e), - TR_Address(paddr, _) => { + Err(e, _) => Memory_Exception(vaddr - negative_offset, e), + Ok(paddr, _) => { match mem_write_ea(paddr, cache_block_size, false, false, false) { Err(e) => Memory_Exception(vaddr - negative_offset, e), Ok(_) => { diff --git a/model/riscv_vmem.sail b/model/riscv_vmem.sail index 7ecc5ea9e..68530d875 100644 --- a/model/riscv_vmem.sail +++ b/model/riscv_vmem.sail @@ -209,10 +209,7 @@ function translationMode(priv : Privilege) -> SATPMode = { // Result of address translation // PUBLIC -union TR_Result('paddr : Type, 'failure : Type) = { - TR_Address : ('paddr, ext_ptw), - TR_Failure : ('failure, ext_ptw) -} +type TR_Result('paddr : Type, 'failure : Type) = result(('paddr, ext_ptw), ('failure, ext_ptw)) // This function can be ignored on first reading since TLBs are not // part of RISC-V architecture spec (see TLB NOTE above). @@ -239,15 +236,15 @@ function translate_TLB_hit forall 'v, is_sv_mode('v) . ( match pte_check { PTE_Check_Failure(ext_ptw, ext_ptw_fail) => - TR_Failure(ext_get_ptw_error(ext_ptw_fail), ext_ptw), + Err(ext_get_ptw_error(ext_ptw_fail), ext_ptw), PTE_Check_Success(ext_ptw) => match update_PTE_Bits(pte, ac) { - None() => TR_Address(tlb_get_ppn(sv_width, ent, vpn), ext_ptw), + None() => Ok(tlb_get_ppn(sv_width, ent, vpn), ext_ptw), Some(pte') => // Step 9 of VATP. See riscv_platform.sail. if not(plat_enable_dirty_update()) then // pte needs dirty/accessed update but that is not enabled - TR_Failure(PTW_PTE_Update(), ext_ptw) + Err(PTW_PTE_Update(), ext_ptw) else { // Writeback the PTE (which has new A/D bits) write_TLB(tlb_index, tlb_set_pte(ent, pte')); @@ -256,7 +253,7 @@ function translate_TLB_hit forall 'v, is_sv_mode('v) . ( Err(e) => internal_error(__FILE__, __LINE__, "invalid physical address in TLB") }; - TR_Address(tlb_get_ppn(sv_width, ent, vpn), ext_ptw) + Ok(tlb_get_ppn(sv_width, ent, vpn), ext_ptw) } } } @@ -281,30 +278,30 @@ function translate_TLB_miss forall 'v, is_sv_mode('v) . ( let ptw_result = pt_walk(sv_width, vpn, ac, priv, mxr, do_sum, base_ppn, initial_level, false, ext_ptw); match ptw_result { - Err(f, ext_ptw) => TR_Failure(f, ext_ptw), + Err(f, ext_ptw) => Err(f, ext_ptw), Ok(struct {ppn, pte, pteAddr, level, global}, ext_ptw) => { let ext_pte = ext_bits_of_PTE(pte); // Without TLBs, this 'match' expression can be replaced simply - // by: 'TR_Address(ppn, ext_ptw)' (see TLB NOTE above) + // by: 'Ok(ppn, ext_ptw)' (see TLB NOTE above) match update_PTE_Bits(pte, ac) { None() => { add_to_TLB(sv_width, asid, vpn, ppn, pte, pteAddr, level, global); - TR_Address(ppn, ext_ptw) + Ok(ppn, ext_ptw) }, Some(pte) => // Step 9 of VATP. See riscv_platform.sail. if not(plat_enable_dirty_update()) then // pte needs dirty/accessed update but that is not enabled - TR_Failure(PTW_PTE_Update(), ext_ptw) + Err(PTW_PTE_Update(), ext_ptw) else { // Writeback the PTE (which has new A/D bits) match write_pte(pteAddr, pte_width, pte) { Ok(_) => { add_to_TLB(sv_width, asid, vpn, ppn, pte, pteAddr, level, global); - TR_Address(ppn, ext_ptw) + Ok(ppn, ext_ptw) }, Err(e) => - TR_Failure(PTW_Access(), ext_ptw) + Err(PTW_Access(), ext_ptw) } } } @@ -367,7 +364,7 @@ function translateAddr( let mode = translationMode(effPriv); - if mode == Bare then return TR_Address(Physaddr(zero_extend(bits_of(vAddr))), init_ext_ptw); + if mode == Bare then return Ok(Physaddr(zero_extend(bits_of(vAddr))), init_ext_ptw); // Sv39 -> 39, etc. let sv_width = satp_mode_width(mode); @@ -380,7 +377,7 @@ function translateAddr( let svAddr = bits_of(vAddr)[sv_width - 1 .. 0]; if bits_of(vAddr) != sign_extend(svAddr) then { - TR_Failure(translationException(ac, PTW_Invalid_Addr()), init_ext_ptw) + Err(translationException(ac, PTW_Invalid_Addr()), init_ext_ptw) } else { let mxr = mstatus[MXR] == 0b1; let do_sum = mstatus[SUM] == 0b1; @@ -397,7 +394,7 @@ function translateAddr( init_ext_ptw); // Fixup result PA or exception match res { - TR_Address(ppn, ext_ptw) => { + Ok(ppn, ext_ptw) => { // Step 10 of VATP. // Append the page offset. This is now a 34 or 56 bit address. let paddr = ppn @ bits_of(vAddr)[pagesize_bits - 1 .. 0]; @@ -405,9 +402,9 @@ function translateAddr( // On RV64 paddr can be 34 or 56 bits, so we zero extend to 64. // On RV32 paddr can only be 34 bits. Sail knows this due to // the assertion above. - TR_Address(Physaddr(zero_extend(paddr)), ext_ptw) + Ok(Physaddr(zero_extend(paddr)), ext_ptw) }, - TR_Failure(f, ext_ptw) => TR_Failure(translationException(ac, f), ext_ptw) + Err(f, ext_ptw) => Err(translationException(ac, f), ext_ptw) } } } diff --git a/model/riscv_vmem_utils.sail b/model/riscv_vmem_utils.sail index 8100893c9..4b5753bc0 100644 --- a/model/riscv_vmem_utils.sail +++ b/model/riscv_vmem_utils.sail @@ -131,11 +131,11 @@ function vmem_write_addr(vaddr, width, data, acc, aq, rl, res) = { let offset = i; let vaddr = vaddr + (offset * bytes); match translateAddr(Virtaddr(vaddr), acc) { - TR_Failure(e, _) => return Err(Memory_Exception(Virtaddr(vaddr), e)), + Err(e, _) => return Err(Memory_Exception(Virtaddr(vaddr), e)), /* NOTE: Currently, we only announce the effective address if address translation is successful. This may need revisiting, particularly in the misaligned case. */ - TR_Address(paddr, _) => { + Ok(paddr, _) => { /* If res is true, the load should be aligned, and this loop should only execute once */ if res & not(match_reservation(bits_of(paddr))) then { write_success = false @@ -213,9 +213,9 @@ function vmem_read(rs, offset, width, acc, aq, rl, res) = { let offset = i; let vaddr = vaddr + (offset * bytes); match translateAddr(Virtaddr(vaddr), acc) { - TR_Failure(e, _) => return Err(Memory_Exception(Virtaddr(vaddr), e)), + Err(e, _) => return Err(Memory_Exception(Virtaddr(vaddr), e)), - TR_Address(paddr, _) => match mem_read(acc, paddr, bytes, aq, rl, res) { + Ok(paddr, _) => match mem_read(acc, paddr, bytes, aq, rl, res) { Err(e) => return Err(Memory_Exception(Virtaddr(vaddr), e)), Ok(v) => {