@@ -10,7 +10,7 @@ use crate::memory::Memory;
1010use crate :: { Error , ISA_A , ISA_B , ISA_MOP , RISCV_PAGESIZE } ;
1111
1212const RISCV_PAGESIZE_MASK : u64 = RISCV_PAGESIZE as u64 - 1 ;
13- const INSTRUCTION_CACHE_SIZE : usize = 4096 ;
13+ const INSTRUCTION_CACHE_SIZE : usize = 2048 ;
1414
1515pub struct Decoder {
1616 factories : Vec < InstructionFactory > ,
@@ -99,14 +99,14 @@ impl Decoder {
9999 let instruction_cache_key = {
100100 // according to RISC-V instruction encoding, the lowest bit in PC will always be zero
101101 let pc = pc >> 1 ;
102- // Here we try to balance between local code and remote code. At times,
103- // we can find the code jumping to a remote function(e.g., memcpy or
104- // alloc), then resumes execution at a local location. Previous cache
105- // key only optimizes for local operations, while this new cache key
106- // balances the code between a 8192-byte local region, and certain remote
107- // code region. Notice the value 12 and 8 here are chosen by empirical
108- // evidence .
109- ( ( pc & 0xFF ) | ( pc >> 12 << 8 ) ) as usize % INSTRUCTION_CACHE_SIZE
102+ // This indexing strategy optimizes instruction cache utilization by improving the distribution of addresses.
103+ // - `pc >> 5`: Incorporates higher bits to ensure a more even spread across cache indices.
104+ // - `pc << 1`: Spreads lower-bit information into higher positions, enhancing variability.
105+ // - `^` (XOR): Further randomizes index distribution, reducing cache conflicts and improving hit rates.
106+ //
107+ // This approach helps balance cache efficiency between local execution and remote function calls,
108+ // reducing hotspots and improving overall performance .
109+ ( ( pc >> 5 ) ^ ( pc << 1 ) ) as usize % INSTRUCTION_CACHE_SIZE
110110 } ;
111111 let cached_instruction = self . instructions_cache [ instruction_cache_key] ;
112112 if cached_instruction. 0 == pc {
0 commit comments