@@ -226,25 +226,28 @@ void mmu_invalidate_range(hart_t *vm, uint32_t start_addr, uint32_t size)
226226 end_addr = UINT32_MAX ;
227227 uint32_t end_vpn = (uint32_t ) end_addr >> RV_PAGE_SHIFT ;
228228
229- /* Check each cache entry and invalidate if in range.
230- * Since we only have 4 cache entries total (fetch: 1, load: 2, store: 1),
231- * simple sequential checks are sufficient.
232- */
229+ /* Cache invalidation for fetch cache */
233230 if (vm -> cache_fetch .n_pages >= start_vpn &&
234231 vm -> cache_fetch .n_pages <= end_vpn )
235232 vm -> cache_fetch .n_pages = 0xFFFFFFFF ;
236233
237- if (vm -> cache_load [0 ].n_pages >= start_vpn &&
238- vm -> cache_load [0 ].n_pages <= end_vpn )
239- vm -> cache_load [0 ].n_pages = 0xFFFFFFFF ;
240-
241- if (vm -> cache_load [1 ].n_pages >= start_vpn &&
242- vm -> cache_load [1 ].n_pages <= end_vpn )
243- vm -> cache_load [1 ].n_pages = 0xFFFFFFFF ;
234+ /* Invalidate load cache: 8 sets × 2 ways */
235+ for (int set = 0 ; set < 8 ; set ++ ) {
236+ for (int way = 0 ; way < 2 ; way ++ ) {
237+ if (vm -> cache_load [set ].ways [way ].n_pages >= start_vpn &&
238+ vm -> cache_load [set ].ways [way ].n_pages <= end_vpn )
239+ vm -> cache_load [set ].ways [way ].n_pages = 0xFFFFFFFF ;
240+ }
241+ }
244242
245- if (vm -> cache_store .n_pages >= start_vpn &&
246- vm -> cache_store .n_pages <= end_vpn )
247- vm -> cache_store .n_pages = 0xFFFFFFFF ;
243+ /* Invalidate store cache: 8 sets × 2 ways */
244+ for (int set = 0 ; set < 8 ; set ++ ) {
245+ for (int way = 0 ; way < 2 ; way ++ ) {
246+ if (vm -> cache_store [set ].ways [way ].n_pages >= start_vpn &&
247+ vm -> cache_store [set ].ways [way ].n_pages <= end_vpn )
248+ vm -> cache_store [set ].ways [way ].n_pages = 0xFFFFFFFF ;
249+ }
250+ }
248251}
249252
250253/* Pre-verify the root page table to minimize page table access during
0 commit comments