Skip to content

Commit 77f30b7

Browse files
committed
Revert "[components/mm] support for scalable memory management (RT-Thread#7277)"
This reverts commit 470454d.
1 parent cbedb6b commit 77f30b7

File tree

3 files changed

+37
-158
lines changed

3 files changed

+37
-158
lines changed

libcpu/aarch64/common/cache.h

+1-4
Original file line numberDiff line numberDiff line change
@@ -23,10 +23,7 @@ void rt_hw_cpu_dcache_invalidate(void *start_addr, unsigned long size);
2323

2424
static inline void rt_hw_icache_invalidate_all(void)
2525
{
26-
/* wait for any modification complete */
27-
__asm__ volatile ("dsb ishst");
28-
__asm__ volatile ("ic iallu");
29-
__asm__ volatile ("isb");
26+
__asm_invalidate_icache_all();
3027
}
3128

3229
void rt_hw_cpu_icache_invalidate(void *addr, rt_size_t size);

libcpu/aarch64/common/mmu.c

+36-132
Original file line numberDiff line numberDiff line change
@@ -1,14 +1,13 @@
11
/*
2-
* Copyright (c) 2006-2023, RT-Thread Development Team
2+
* Copyright (c) 2006-2018, RT-Thread Development Team
33
*
44
* SPDX-License-Identifier: Apache-2.0
55
*
66
* Change Logs:
77
* Date Author Notes
88
* 2012-01-10 bernard porting to AM1808
9-
* 2021-11-28 GuEe-GUI first version
10-
* 2022-12-10 WangXiaoyao porting to MM
119
*/
10+
1211
#include <board.h>
1312
#include <rthw.h>
1413
#include <rtthread.h>
@@ -80,7 +79,6 @@ static void _kenrel_unmap_4K(unsigned long *lv0_tbl, void *v_addr)
8079
{
8180
break;
8281
}
83-
/* next table entry in current level */
8482
level_info[level].pos = cur_lv_tbl + off;
8583
cur_lv_tbl = (unsigned long *)(page & MMU_ADDRESS_MASK);
8684
cur_lv_tbl = (unsigned long *)((unsigned long)cur_lv_tbl - PV_OFFSET);
@@ -189,104 +187,19 @@ static int _kernel_map_4K(unsigned long *lv0_tbl, void *vaddr, void *paddr, unsi
189187
return ret;
190188
}
191189

192-
static int _kernel_map_2M(unsigned long *lv0_tbl, void *vaddr, void *paddr, unsigned long attr)
193-
{
194-
int ret = 0;
195-
int level;
196-
unsigned long *cur_lv_tbl = lv0_tbl;
197-
unsigned long page;
198-
unsigned long off;
199-
unsigned long va = (unsigned long)vaddr;
200-
unsigned long pa = (unsigned long)paddr;
201-
202-
int level_shift = MMU_ADDRESS_BITS;
203-
204-
if (va & ARCH_SECTION_MASK)
205-
{
206-
return MMU_MAP_ERROR_VANOTALIGN;
207-
}
208-
if (pa & ARCH_SECTION_MASK)
209-
{
210-
return MMU_MAP_ERROR_PANOTALIGN;
211-
}
212-
for (level = 0; level < MMU_TBL_BLOCK_2M_LEVEL; level++)
213-
{
214-
off = (va >> level_shift);
215-
off &= MMU_LEVEL_MASK;
216-
if (!(cur_lv_tbl[off] & MMU_TYPE_USED))
217-
{
218-
page = (unsigned long)rt_pages_alloc_ext(0, PAGE_ANY_AVAILABLE);
219-
if (!page)
220-
{
221-
ret = MMU_MAP_ERROR_NOPAGE;
222-
goto err;
223-
}
224-
rt_memset((char *)page, 0, ARCH_PAGE_SIZE);
225-
rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, (void *)page, ARCH_PAGE_SIZE);
226-
cur_lv_tbl[off] = (page + PV_OFFSET) | MMU_TYPE_TABLE;
227-
rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, cur_lv_tbl + off, sizeof(void *));
228-
}
229-
else
230-
{
231-
page = cur_lv_tbl[off];
232-
page &= MMU_ADDRESS_MASK;
233-
/* page to va */
234-
page -= PV_OFFSET;
235-
rt_page_ref_inc((void *)page, 0);
236-
}
237-
page = cur_lv_tbl[off];
238-
if ((page & MMU_TYPE_MASK) == MMU_TYPE_BLOCK)
239-
{
240-
/* is block! error! */
241-
ret = MMU_MAP_ERROR_CONFLICT;
242-
goto err;
243-
}
244-
cur_lv_tbl = (unsigned long *)(page & MMU_ADDRESS_MASK);
245-
cur_lv_tbl = (unsigned long *)((unsigned long)cur_lv_tbl - PV_OFFSET);
246-
level_shift -= MMU_LEVEL_SHIFT;
247-
}
248-
/* now is level page */
249-
attr &= MMU_ATTRIB_MASK;
250-
pa |= (attr | MMU_TYPE_BLOCK); /* block */
251-
off = (va >> ARCH_SECTION_SHIFT);
252-
off &= MMU_LEVEL_MASK;
253-
cur_lv_tbl[off] = pa;
254-
rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, cur_lv_tbl + off, sizeof(void *));
255-
return ret;
256-
err:
257-
_kenrel_unmap_4K(lv0_tbl, (void *)va);
258-
return ret;
259-
}
260-
261190
void *rt_hw_mmu_map(rt_aspace_t aspace, void *v_addr, void *p_addr, size_t size,
262191
size_t attr)
263192
{
264193
int ret = -1;
265194

266195
void *unmap_va = v_addr;
267-
size_t npages;
268-
size_t stride;
269-
int (*mapper)(unsigned long *lv0_tbl, void *vaddr, void *paddr, unsigned long attr);
270-
271-
if (((rt_ubase_t)v_addr & ARCH_SECTION_MASK) || (size & ARCH_SECTION_MASK))
272-
{
273-
/* legacy 4k mapping */
274-
npages = size >> ARCH_PAGE_SHIFT;
275-
stride = ARCH_PAGE_SIZE;
276-
mapper = _kernel_map_4K;
277-
}
278-
else
279-
{
280-
/* 2m huge page */
281-
npages = size >> ARCH_SECTION_SHIFT;
282-
stride = ARCH_SECTION_SIZE;
283-
mapper = _kernel_map_2M;
284-
}
196+
size_t npages = size >> ARCH_PAGE_SHIFT;
285197

198+
// TODO trying with HUGEPAGE here
286199
while (npages--)
287200
{
288201
MM_PGTBL_LOCK(aspace);
289-
ret = mapper(aspace->page_table, v_addr, p_addr, attr);
202+
ret = _kernel_map_4K(aspace->page_table, v_addr, p_addr, attr);
290203
MM_PGTBL_UNLOCK(aspace);
291204

292205
if (ret != 0)
@@ -299,12 +212,12 @@ void *rt_hw_mmu_map(rt_aspace_t aspace, void *v_addr, void *p_addr, size_t size,
299212
MM_PGTBL_LOCK(aspace);
300213
_kenrel_unmap_4K(aspace->page_table, (void *)unmap_va);
301214
MM_PGTBL_UNLOCK(aspace);
302-
unmap_va = (char *)unmap_va + stride;
215+
unmap_va += ARCH_PAGE_SIZE;
303216
}
304217
break;
305218
}
306-
v_addr = (char *)v_addr + stride;
307-
p_addr = (char *)p_addr + stride;
219+
v_addr += ARCH_PAGE_SIZE;
220+
p_addr += ARCH_PAGE_SIZE;
308221
}
309222

310223
if (ret == 0)
@@ -330,7 +243,7 @@ void rt_hw_mmu_unmap(rt_aspace_t aspace, void *v_addr, size_t size)
330243
MM_PGTBL_LOCK(aspace);
331244
_kenrel_unmap_4K(aspace->page_table, v_addr);
332245
MM_PGTBL_UNLOCK(aspace);
333-
v_addr = (char *)v_addr + ARCH_PAGE_SIZE;
246+
v_addr += ARCH_PAGE_SIZE;
334247
}
335248
}
336249

@@ -340,7 +253,7 @@ void rt_hw_aspace_switch(rt_aspace_t aspace)
340253
{
341254
void *pgtbl = aspace->page_table;
342255
pgtbl = rt_kmem_v2p(pgtbl);
343-
rt_ubase_t tcr;
256+
uintptr_t tcr;
344257

345258
__asm__ volatile("msr ttbr0_el1, %0" ::"r"(pgtbl) : "memory");
346259

@@ -423,19 +336,20 @@ void rt_hw_mmu_setup(rt_aspace_t aspace, struct mem_desc *mdesc, int desc_nr)
423336
rt_page_cleanup();
424337
}
425338

339+
426340
#ifdef RT_USING_SMART
427-
static void _init_region(void *vaddr, size_t size)
341+
static inline void _init_region(void *vaddr, size_t size)
428342
{
429343
rt_ioremap_start = vaddr;
430344
rt_ioremap_size = size;
431-
rt_mpr_start = (char *)rt_ioremap_start - rt_mpr_size;
345+
rt_mpr_start = rt_ioremap_start - rt_mpr_size;
432346
}
433347
#else
434348

435-
#define RTOS_VEND (0xfffffffff000UL)
349+
#define RTOS_VEND ((void *)0xfffffffff000UL)
436350
static inline void _init_region(void *vaddr, size_t size)
437351
{
438-
rt_mpr_start = (void *)(RTOS_VEND - rt_mpr_size);
352+
rt_mpr_start = RTOS_VEND - rt_mpr_size;
439353
}
440354
#endif
441355

@@ -480,7 +394,7 @@ int rt_hw_mmu_map_init(rt_aspace_t aspace, void *v_address, size_t size,
480394
rt_aspace_init(aspace, (void *)KERNEL_VADDR_START, 0 - KERNEL_VADDR_START,
481395
vtable);
482396
#else
483-
rt_aspace_init(aspace, (void *)0x1000, RTOS_VEND - 0x1000ul, vtable);
397+
rt_aspace_init(aspace, (void *)0x1000, RTOS_VEND - (void *)0x1000, vtable);
484398
#endif
485399

486400
_init_region(v_address, size);
@@ -671,35 +585,26 @@ void *rt_hw_mmu_v2p(rt_aspace_t aspace, void *v_addr)
671585
{
672586
int level_shift;
673587
unsigned long paddr;
588+
unsigned long *pte = _query(aspace, v_addr, &level_shift);
674589

675-
if (aspace == &rt_kernel_space)
590+
if (pte)
676591
{
677-
paddr = (unsigned long)rt_hw_mmu_kernel_v2p(v_addr);
592+
paddr = *pte & MMU_ADDRESS_MASK;
593+
paddr |= (uintptr_t)v_addr & ((1ul << level_shift) - 1);
678594
}
679595
else
680596
{
681-
unsigned long *pte = _query(aspace, v_addr, &level_shift);
682-
683-
if (pte)
684-
{
685-
paddr = *pte & MMU_ADDRESS_MASK;
686-
paddr |= (rt_ubase_t)v_addr & ((1ul << level_shift) - 1);
687-
}
688-
else
689-
{
690-
paddr = (unsigned long)ARCH_MAP_FAILED;
691-
}
597+
paddr = (unsigned long)ARCH_MAP_FAILED;
692598
}
693-
694599
return (void *)paddr;
695600
}
696601

697-
static int _noncache(rt_ubase_t *pte)
602+
static int _noncache(uintptr_t *pte)
698603
{
699604
int err = 0;
700-
const rt_ubase_t idx_shift = 2;
701-
const rt_ubase_t idx_mask = 0x7 << idx_shift;
702-
rt_ubase_t entry = *pte;
605+
const uintptr_t idx_shift = 2;
606+
const uintptr_t idx_mask = 0x7 << idx_shift;
607+
uintptr_t entry = *pte;
703608
if ((entry & idx_mask) == (NORMAL_MEM << idx_shift))
704609
{
705610
*pte = (entry & ~idx_mask) | (NORMAL_NOCACHE_MEM << idx_shift);
@@ -712,12 +617,12 @@ static int _noncache(rt_ubase_t *pte)
712617
return err;
713618
}
714619

715-
static int _cache(rt_ubase_t *pte)
620+
static int _cache(uintptr_t *pte)
716621
{
717622
int err = 0;
718-
const rt_ubase_t idx_shift = 2;
719-
const rt_ubase_t idx_mask = 0x7 << idx_shift;
720-
rt_ubase_t entry = *pte;
623+
const uintptr_t idx_shift = 2;
624+
const uintptr_t idx_mask = 0x7 << idx_shift;
625+
uintptr_t entry = *pte;
721626
if ((entry & idx_mask) == (NORMAL_NOCACHE_MEM << idx_shift))
722627
{
723628
*pte = (entry & ~idx_mask) | (NORMAL_MEM << idx_shift);
@@ -730,7 +635,7 @@ static int _cache(rt_ubase_t *pte)
730635
return err;
731636
}
732637

733-
static int (*control_handler[MMU_CNTL_DUMMY_END])(rt_ubase_t *pte) = {
638+
static int (*control_handler[MMU_CNTL_DUMMY_END])(uintptr_t *pte) = {
734639
[MMU_CNTL_CACHE] = _cache,
735640
[MMU_CNTL_NONCACHE] = _noncache,
736641
};
@@ -740,26 +645,25 @@ int rt_hw_mmu_control(struct rt_aspace *aspace, void *vaddr, size_t size,
740645
{
741646
int level_shift;
742647
int err = -RT_EINVAL;
743-
rt_ubase_t vstart = (rt_ubase_t)vaddr;
744-
rt_ubase_t vend = vstart + size;
648+
void *vend = vaddr + size;
745649

746-
int (*handler)(rt_ubase_t * pte);
650+
int (*handler)(uintptr_t * pte);
747651
if (cmd >= 0 && cmd < MMU_CNTL_DUMMY_END)
748652
{
749653
handler = control_handler[cmd];
750654

751-
while (vstart < vend)
655+
while (vaddr < vend)
752656
{
753-
rt_ubase_t *pte = _query(aspace, (void *)vstart, &level_shift);
754-
rt_ubase_t range_end = vstart + (1ul << level_shift);
657+
uintptr_t *pte = _query(aspace, vaddr, &level_shift);
658+
void *range_end = vaddr + (1ul << level_shift);
755659
RT_ASSERT(range_end <= vend);
756660

757661
if (pte)
758662
{
759663
err = handler(pte);
760664
RT_ASSERT(err == RT_EOK);
761665
}
762-
vstart = range_end;
666+
vaddr = range_end;
763667
}
764668
}
765669
else

libcpu/aarch64/common/mmu.h

-22
Original file line numberDiff line numberDiff line change
@@ -97,28 +97,6 @@ static inline void *rt_hw_mmu_tbl_get()
9797
return (void *)(tbl & ((1ul << 48) - 2));
9898
}
9999

100-
static inline void *rt_hw_mmu_kernel_v2p(void *v_addr)
101-
{
102-
rt_ubase_t par;
103-
void *paddr;
104-
asm volatile("at s1e1w, %0"::"r"(v_addr):"memory");
105-
asm volatile("mrs %0, par_el1":"=r"(par)::"memory");
106-
107-
if (par & 0x1)
108-
{
109-
paddr = ARCH_MAP_FAILED;
110-
}
111-
else
112-
{
113-
#define MMU_ADDRESS_MASK 0x0000fffffffff000UL
114-
par &= MMU_ADDRESS_MASK;
115-
par |= (rt_ubase_t)v_addr & ARCH_PAGE_MASK;
116-
paddr = (void *)par;
117-
}
118-
119-
return paddr;
120-
}
121-
122100
int rt_hw_mmu_control(struct rt_aspace *aspace, void *vaddr, size_t size,
123101
enum rt_mmu_cntl cmd);
124102

0 commit comments

Comments
 (0)