Skip to content

Commit 40f7e60

Browse files
authored
Merge pull request #108 from sysprog21/rfence
Reduce RFENCE IPI cache flush scope
2 parents c2809ea + fb11de8 commit 40f7e60

File tree

3 files changed

+77
-12
lines changed

3 files changed

+77
-12
lines changed

main.c

Lines changed: 26 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -453,28 +453,42 @@ static inline sbi_ret_t handle_sbi_ecall_RFENCE(hart_t *hart, int32_t fid)
453453
* completely.
454454
*/
455455
uint64_t hart_mask, hart_mask_base;
456+
uint32_t start_addr, size;
456457
switch (fid) {
457-
case 0:
458+
case SBI_RFENCE__I:
459+
/* Instruction cache flush - ignored in interpreter mode */
458460
return (sbi_ret_t){SBI_SUCCESS, 0};
459-
case 1:
461+
case SBI_RFENCE__VMA:
462+
case SBI_RFENCE__VMA_ASID:
463+
/* RFENCE.VMA and RFENCE.VMA.ASID both use the same parameters:
464+
* a0: hart_mask (low bits)
465+
* a1: hart_mask_base (high bits)
466+
* a2: start_addr
467+
* a3: size
468+
* For VMA_ASID, a4 contains asid (currently ignored)
469+
*/
460470
hart_mask = (uint64_t) hart->x_regs[RV_R_A0];
461471
hart_mask_base = (uint64_t) hart->x_regs[RV_R_A1];
472+
start_addr = hart->x_regs[RV_R_A2];
473+
size = hart->x_regs[RV_R_A3];
474+
462475
if (hart_mask_base == 0xFFFFFFFFFFFFFFFF) {
463-
for (uint32_t i = 0; i < hart->vm->n_hart; i++) {
464-
mmu_invalidate(hart->vm->hart[i]);
465-
}
476+
/* Flush all harts */
477+
for (uint32_t i = 0; i < hart->vm->n_hart; i++)
478+
mmu_invalidate_range(hart->vm->hart[i], start_addr, size);
466479
} else {
480+
/* Flush specified harts based on mask */
467481
for (int i = hart_mask_base; hart_mask; hart_mask >>= 1, i++) {
468-
mmu_invalidate(hart->vm->hart[i]);
482+
if (hart_mask & 1)
483+
mmu_invalidate_range(hart->vm->hart[i], start_addr, size);
469484
}
470485
}
471486
return (sbi_ret_t){SBI_SUCCESS, 0};
472-
case 2:
473-
case 3:
474-
case 4:
475-
case 5:
476-
case 6:
477-
case 7:
487+
case SBI_RFENCE__GVMA_VMID:
488+
case SBI_RFENCE__GVMA:
489+
case SBI_RFENCE__VVMA_ASID:
490+
case SBI_RFENCE__VVMA:
491+
/* Hypervisor-related RFENCE operations - not implemented */
478492
return (sbi_ret_t){SBI_SUCCESS, 0};
479493
default:
480494
return (sbi_ret_t){SBI_ERR_FAILED, 0};

riscv.c

Lines changed: 48 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -199,6 +199,54 @@ void mmu_invalidate(hart_t *vm)
199199
}
200200
}
201201

202+
/* Invalidate MMU caches for a specific virtual address range.
203+
* If size is 0 or -1, invalidate all caches (equivalent to mmu_invalidate()).
204+
* Otherwise, only invalidate cache entries whose VPN falls within
205+
* [start_addr >> PAGE_SHIFT, (start_addr + size - 1) >> PAGE_SHIFT].
206+
*/
207+
void mmu_invalidate_range(hart_t *vm, uint32_t start_addr, uint32_t size)
208+
{
209+
/* SBI spec: size == 0 or size == -1 means flush entire address space */
210+
if (size == 0 || size == (uint32_t) -1) {
211+
mmu_invalidate(vm);
212+
return;
213+
}
214+
215+
/* Calculate VPN range: [start_vpn, end_vpn] inclusive.
216+
* Use 64-bit arithmetic to prevent overflow when (start_addr + size - 1)
217+
* exceeds UINT32_MAX. For example:
218+
* start_addr = 0xFFF00000, size = 0x00200000
219+
* 32-bit: 0xFFF00000 + 0x00200000 - 1 = 0x000FFFFF (wraps)
220+
* 64-bit: 0xFFF00000 + 0x00200000 - 1 = 0x100FFFFF (correct)
221+
* Clamp to RV32 address space maximum before calculating end_vpn.
222+
*/
223+
uint32_t start_vpn = start_addr >> RV_PAGE_SHIFT;
224+
uint64_t end_addr = (uint64_t) start_addr + size - 1;
225+
if (end_addr > UINT32_MAX)
226+
end_addr = UINT32_MAX;
227+
uint32_t end_vpn = (uint32_t) end_addr >> RV_PAGE_SHIFT;
228+
229+
/* Check each cache entry and invalidate if in range.
230+
* Since we only have 4 cache entries total (fetch: 1, load: 2, store: 1),
231+
* simple sequential checks are sufficient.
232+
*/
233+
if (vm->cache_fetch.n_pages >= start_vpn &&
234+
vm->cache_fetch.n_pages <= end_vpn)
235+
vm->cache_fetch.n_pages = 0xFFFFFFFF;
236+
237+
if (vm->cache_load[0].n_pages >= start_vpn &&
238+
vm->cache_load[0].n_pages <= end_vpn)
239+
vm->cache_load[0].n_pages = 0xFFFFFFFF;
240+
241+
if (vm->cache_load[1].n_pages >= start_vpn &&
242+
vm->cache_load[1].n_pages <= end_vpn)
243+
vm->cache_load[1].n_pages = 0xFFFFFFFF;
244+
245+
if (vm->cache_store.n_pages >= start_vpn &&
246+
vm->cache_store.n_pages <= end_vpn)
247+
vm->cache_store.n_pages = 0xFFFFFFFF;
248+
}
249+
202250
/* Pre-verify the root page table to minimize page table access during
203251
* translation time.
204252
*/

riscv.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -197,3 +197,6 @@ void vm_error_report(const hart_t *vm);
197197

198198
/* Invalidate all MMU translation caches (fetch, load, store) */
199199
void mmu_invalidate(hart_t *vm);
200+
201+
/* Invalidate MMU caches for a specific virtual address range */
202+
void mmu_invalidate_range(hart_t *vm, uint32_t start_addr, uint32_t size);

0 commit comments

Comments
 (0)