Skip to content

Commit 7ff75e2

Browse files
polarvidmysterywolf
authored andcommittedDec 14, 2024·
feat: arm64: mmu: auto-sensing of best paging stride
Improves the memory mapping process by dynamically selecting the optimal paging stride (4K or 2M) based on virtual address alignment and mapping size. This eliminates the need for upfront stride determination, enhancing flexibility and maintainability in memory management. Changes: - Replaced fixed stride selection logic with a dynamic decision loop. - Removed `npages` calculation and replaced with `remaining_sz` to track unprocessed memory size. - Added assertions to ensure `size` is properly aligned to the smallest page size. - Adjusted loop to dynamically determine and apply the appropriate stride (4K or 2M) for each mapping iteration. - Updated virtual and physical address increments to use the dynamically selected stride. Signed-off-by: Shell <[email protected]>
1 parent 02a1149 commit 7ff75e2

File tree

1 file changed

+18
-16
lines changed
  • libcpu/aarch64/common

1 file changed

+18
-16
lines changed
 

‎libcpu/aarch64/common/mmu.c

+18-16
Original file line numberDiff line numberDiff line change
@@ -275,27 +275,27 @@ void *rt_hw_mmu_map(rt_aspace_t aspace, void *v_addr, void *p_addr, size_t size,
275275
int ret = -1;
276276

277277
void *unmap_va = v_addr;
278-
size_t npages;
278+
size_t remaining_sz = size;
279279
size_t stride;
280280
int (*mapper)(unsigned long *lv0_tbl, void *vaddr, void *paddr, unsigned long attr);
281281

282-
if (((rt_ubase_t)v_addr & ARCH_SECTION_MASK) || (size & ARCH_SECTION_MASK))
283-
{
284-
/* legacy 4k mapping */
285-
npages = size >> ARCH_PAGE_SHIFT;
286-
stride = ARCH_PAGE_SIZE;
287-
mapper = _kernel_map_4K;
288-
}
289-
else
290-
{
291-
/* 2m huge page */
292-
npages = size >> ARCH_SECTION_SHIFT;
293-
stride = ARCH_SECTION_SIZE;
294-
mapper = _kernel_map_2M;
295-
}
282+
RT_ASSERT(!(size & ARCH_PAGE_MASK));
296283

297-
while (npages--)
284+
while (remaining_sz)
298285
{
286+
if (((rt_ubase_t)v_addr & ARCH_SECTION_MASK) || (remaining_sz < ARCH_SECTION_SIZE))
287+
{
288+
/* legacy 4k mapping */
289+
stride = ARCH_PAGE_SIZE;
290+
mapper = _kernel_map_4K;
291+
}
292+
else
293+
{
294+
/* 2m huge page */
295+
stride = ARCH_SECTION_SIZE;
296+
mapper = _kernel_map_2M;
297+
}
298+
299299
MM_PGTBL_LOCK(aspace);
300300
ret = mapper(aspace->page_table, v_addr, p_addr, attr);
301301
MM_PGTBL_UNLOCK(aspace);
@@ -314,6 +314,8 @@ void *rt_hw_mmu_map(rt_aspace_t aspace, void *v_addr, void *p_addr, size_t size,
314314
}
315315
break;
316316
}
317+
318+
remaining_sz -= stride;
317319
v_addr = (char *)v_addr + stride;
318320
p_addr = (char *)p_addr + stride;
319321
}

0 commit comments

Comments
 (0)
Please sign in to comment.