1
1
/*
2
- * Copyright (c) 2006-2023 , RT-Thread Development Team
2
+ * Copyright (c) 2006-2018 , RT-Thread Development Team
3
3
*
4
4
* SPDX-License-Identifier: Apache-2.0
5
5
*
6
6
* Change Logs:
7
7
* Date Author Notes
8
8
* 2012-01-10 bernard porting to AM1808
9
- * 2021-11-28 GuEe-GUI first version
10
- * 2022-12-10 WangXiaoyao porting to MM
11
9
*/
10
+
12
11
#include <board.h>
13
12
#include <rthw.h>
14
13
#include <rtthread.h>
@@ -80,7 +79,6 @@ static void _kenrel_unmap_4K(unsigned long *lv0_tbl, void *v_addr)
80
79
{
81
80
break ;
82
81
}
83
- /* next table entry in current level */
84
82
level_info [level ].pos = cur_lv_tbl + off ;
85
83
cur_lv_tbl = (unsigned long * )(page & MMU_ADDRESS_MASK );
86
84
cur_lv_tbl = (unsigned long * )((unsigned long )cur_lv_tbl - PV_OFFSET );
@@ -189,104 +187,19 @@ static int _kernel_map_4K(unsigned long *lv0_tbl, void *vaddr, void *paddr, unsi
189
187
return ret ;
190
188
}
191
189
192
- static int _kernel_map_2M (unsigned long * lv0_tbl , void * vaddr , void * paddr , unsigned long attr )
193
- {
194
- int ret = 0 ;
195
- int level ;
196
- unsigned long * cur_lv_tbl = lv0_tbl ;
197
- unsigned long page ;
198
- unsigned long off ;
199
- unsigned long va = (unsigned long )vaddr ;
200
- unsigned long pa = (unsigned long )paddr ;
201
-
202
- int level_shift = MMU_ADDRESS_BITS ;
203
-
204
- if (va & ARCH_SECTION_MASK )
205
- {
206
- return MMU_MAP_ERROR_VANOTALIGN ;
207
- }
208
- if (pa & ARCH_SECTION_MASK )
209
- {
210
- return MMU_MAP_ERROR_PANOTALIGN ;
211
- }
212
- for (level = 0 ; level < MMU_TBL_BLOCK_2M_LEVEL ; level ++ )
213
- {
214
- off = (va >> level_shift );
215
- off &= MMU_LEVEL_MASK ;
216
- if (!(cur_lv_tbl [off ] & MMU_TYPE_USED ))
217
- {
218
- page = (unsigned long )rt_pages_alloc_ext (0 , PAGE_ANY_AVAILABLE );
219
- if (!page )
220
- {
221
- ret = MMU_MAP_ERROR_NOPAGE ;
222
- goto err ;
223
- }
224
- rt_memset ((char * )page , 0 , ARCH_PAGE_SIZE );
225
- rt_hw_cpu_dcache_ops (RT_HW_CACHE_FLUSH , (void * )page , ARCH_PAGE_SIZE );
226
- cur_lv_tbl [off ] = (page + PV_OFFSET ) | MMU_TYPE_TABLE ;
227
- rt_hw_cpu_dcache_ops (RT_HW_CACHE_FLUSH , cur_lv_tbl + off , sizeof (void * ));
228
- }
229
- else
230
- {
231
- page = cur_lv_tbl [off ];
232
- page &= MMU_ADDRESS_MASK ;
233
- /* page to va */
234
- page -= PV_OFFSET ;
235
- rt_page_ref_inc ((void * )page , 0 );
236
- }
237
- page = cur_lv_tbl [off ];
238
- if ((page & MMU_TYPE_MASK ) == MMU_TYPE_BLOCK )
239
- {
240
- /* is block! error! */
241
- ret = MMU_MAP_ERROR_CONFLICT ;
242
- goto err ;
243
- }
244
- cur_lv_tbl = (unsigned long * )(page & MMU_ADDRESS_MASK );
245
- cur_lv_tbl = (unsigned long * )((unsigned long )cur_lv_tbl - PV_OFFSET );
246
- level_shift -= MMU_LEVEL_SHIFT ;
247
- }
248
- /* now is level page */
249
- attr &= MMU_ATTRIB_MASK ;
250
- pa |= (attr | MMU_TYPE_BLOCK ); /* block */
251
- off = (va >> ARCH_SECTION_SHIFT );
252
- off &= MMU_LEVEL_MASK ;
253
- cur_lv_tbl [off ] = pa ;
254
- rt_hw_cpu_dcache_ops (RT_HW_CACHE_FLUSH , cur_lv_tbl + off , sizeof (void * ));
255
- return ret ;
256
- err :
257
- _kenrel_unmap_4K (lv0_tbl , (void * )va );
258
- return ret ;
259
- }
260
-
261
190
void * rt_hw_mmu_map (rt_aspace_t aspace , void * v_addr , void * p_addr , size_t size ,
262
191
size_t attr )
263
192
{
264
193
int ret = -1 ;
265
194
266
195
void * unmap_va = v_addr ;
267
- size_t npages ;
268
- size_t stride ;
269
- int (* mapper )(unsigned long * lv0_tbl , void * vaddr , void * paddr , unsigned long attr );
270
-
271
- if (((rt_ubase_t )v_addr & ARCH_SECTION_MASK ) || (size & ARCH_SECTION_MASK ))
272
- {
273
- /* legacy 4k mapping */
274
- npages = size >> ARCH_PAGE_SHIFT ;
275
- stride = ARCH_PAGE_SIZE ;
276
- mapper = _kernel_map_4K ;
277
- }
278
- else
279
- {
280
- /* 2m huge page */
281
- npages = size >> ARCH_SECTION_SHIFT ;
282
- stride = ARCH_SECTION_SIZE ;
283
- mapper = _kernel_map_2M ;
284
- }
196
+ size_t npages = size >> ARCH_PAGE_SHIFT ;
285
197
198
+ // TODO trying with HUGEPAGE here
286
199
while (npages -- )
287
200
{
288
201
MM_PGTBL_LOCK (aspace );
289
- ret = mapper (aspace -> page_table , v_addr , p_addr , attr );
202
+ ret = _kernel_map_4K (aspace -> page_table , v_addr , p_addr , attr );
290
203
MM_PGTBL_UNLOCK (aspace );
291
204
292
205
if (ret != 0 )
@@ -299,12 +212,12 @@ void *rt_hw_mmu_map(rt_aspace_t aspace, void *v_addr, void *p_addr, size_t size,
299
212
MM_PGTBL_LOCK (aspace );
300
213
_kenrel_unmap_4K (aspace -> page_table , (void * )unmap_va );
301
214
MM_PGTBL_UNLOCK (aspace );
302
- unmap_va = ( char * ) unmap_va + stride ;
215
+ unmap_va += ARCH_PAGE_SIZE ;
303
216
}
304
217
break ;
305
218
}
306
- v_addr = ( char * ) v_addr + stride ;
307
- p_addr = ( char * ) p_addr + stride ;
219
+ v_addr += ARCH_PAGE_SIZE ;
220
+ p_addr += ARCH_PAGE_SIZE ;
308
221
}
309
222
310
223
if (ret == 0 )
@@ -330,7 +243,7 @@ void rt_hw_mmu_unmap(rt_aspace_t aspace, void *v_addr, size_t size)
330
243
MM_PGTBL_LOCK (aspace );
331
244
_kenrel_unmap_4K (aspace -> page_table , v_addr );
332
245
MM_PGTBL_UNLOCK (aspace );
333
- v_addr = ( char * ) v_addr + ARCH_PAGE_SIZE ;
246
+ v_addr += ARCH_PAGE_SIZE ;
334
247
}
335
248
}
336
249
@@ -340,7 +253,7 @@ void rt_hw_aspace_switch(rt_aspace_t aspace)
340
253
{
341
254
void * pgtbl = aspace -> page_table ;
342
255
pgtbl = rt_kmem_v2p (pgtbl );
343
- rt_ubase_t tcr ;
256
+ uintptr_t tcr ;
344
257
345
258
__asm__ volatile ("msr ttbr0_el1, %0" ::"r" (pgtbl ) : "memory" );
346
259
@@ -423,19 +336,20 @@ void rt_hw_mmu_setup(rt_aspace_t aspace, struct mem_desc *mdesc, int desc_nr)
423
336
rt_page_cleanup ();
424
337
}
425
338
339
+
426
340
#ifdef RT_USING_SMART
427
- static void _init_region (void * vaddr , size_t size )
341
+ static inline void _init_region (void * vaddr , size_t size )
428
342
{
429
343
rt_ioremap_start = vaddr ;
430
344
rt_ioremap_size = size ;
431
- rt_mpr_start = ( char * ) rt_ioremap_start - rt_mpr_size ;
345
+ rt_mpr_start = rt_ioremap_start - rt_mpr_size ;
432
346
}
433
347
#else
434
348
435
- #define RTOS_VEND (0xfffffffff000UL)
349
+ #define RTOS_VEND ((void *) 0xfffffffff000UL)
436
350
static inline void _init_region (void * vaddr , size_t size )
437
351
{
438
- rt_mpr_start = ( void * )( RTOS_VEND - rt_mpr_size ) ;
352
+ rt_mpr_start = RTOS_VEND - rt_mpr_size ;
439
353
}
440
354
#endif
441
355
@@ -480,7 +394,7 @@ int rt_hw_mmu_map_init(rt_aspace_t aspace, void *v_address, size_t size,
480
394
rt_aspace_init (aspace , (void * )KERNEL_VADDR_START , 0 - KERNEL_VADDR_START ,
481
395
vtable );
482
396
#else
483
- rt_aspace_init (aspace , (void * )0x1000 , RTOS_VEND - 0x1000ul , vtable );
397
+ rt_aspace_init (aspace , (void * )0x1000 , RTOS_VEND - ( void * ) 0x1000 , vtable );
484
398
#endif
485
399
486
400
_init_region (v_address , size );
@@ -671,35 +585,26 @@ void *rt_hw_mmu_v2p(rt_aspace_t aspace, void *v_addr)
671
585
{
672
586
int level_shift ;
673
587
unsigned long paddr ;
588
+ unsigned long * pte = _query (aspace , v_addr , & level_shift );
674
589
675
- if (aspace == & rt_kernel_space )
590
+ if (pte )
676
591
{
677
- paddr = (unsigned long )rt_hw_mmu_kernel_v2p (v_addr );
592
+ paddr = * pte & MMU_ADDRESS_MASK ;
593
+ paddr |= (uintptr_t )v_addr & ((1ul << level_shift ) - 1 );
678
594
}
679
595
else
680
596
{
681
- unsigned long * pte = _query (aspace , v_addr , & level_shift );
682
-
683
- if (pte )
684
- {
685
- paddr = * pte & MMU_ADDRESS_MASK ;
686
- paddr |= (rt_ubase_t )v_addr & ((1ul << level_shift ) - 1 );
687
- }
688
- else
689
- {
690
- paddr = (unsigned long )ARCH_MAP_FAILED ;
691
- }
597
+ paddr = (unsigned long )ARCH_MAP_FAILED ;
692
598
}
693
-
694
599
return (void * )paddr ;
695
600
}
696
601
697
- static int _noncache (rt_ubase_t * pte )
602
+ static int _noncache (uintptr_t * pte )
698
603
{
699
604
int err = 0 ;
700
- const rt_ubase_t idx_shift = 2 ;
701
- const rt_ubase_t idx_mask = 0x7 << idx_shift ;
702
- rt_ubase_t entry = * pte ;
605
+ const uintptr_t idx_shift = 2 ;
606
+ const uintptr_t idx_mask = 0x7 << idx_shift ;
607
+ uintptr_t entry = * pte ;
703
608
if ((entry & idx_mask ) == (NORMAL_MEM << idx_shift ))
704
609
{
705
610
* pte = (entry & ~idx_mask ) | (NORMAL_NOCACHE_MEM << idx_shift );
@@ -712,12 +617,12 @@ static int _noncache(rt_ubase_t *pte)
712
617
return err ;
713
618
}
714
619
715
- static int _cache (rt_ubase_t * pte )
620
+ static int _cache (uintptr_t * pte )
716
621
{
717
622
int err = 0 ;
718
- const rt_ubase_t idx_shift = 2 ;
719
- const rt_ubase_t idx_mask = 0x7 << idx_shift ;
720
- rt_ubase_t entry = * pte ;
623
+ const uintptr_t idx_shift = 2 ;
624
+ const uintptr_t idx_mask = 0x7 << idx_shift ;
625
+ uintptr_t entry = * pte ;
721
626
if ((entry & idx_mask ) == (NORMAL_NOCACHE_MEM << idx_shift ))
722
627
{
723
628
* pte = (entry & ~idx_mask ) | (NORMAL_MEM << idx_shift );
@@ -730,7 +635,7 @@ static int _cache(rt_ubase_t *pte)
730
635
return err ;
731
636
}
732
637
733
- static int (* control_handler [MMU_CNTL_DUMMY_END ])(rt_ubase_t * pte ) = {
638
+ static int (* control_handler [MMU_CNTL_DUMMY_END ])(uintptr_t * pte ) = {
734
639
[MMU_CNTL_CACHE ] = _cache ,
735
640
[MMU_CNTL_NONCACHE ] = _noncache ,
736
641
};
@@ -740,26 +645,25 @@ int rt_hw_mmu_control(struct rt_aspace *aspace, void *vaddr, size_t size,
740
645
{
741
646
int level_shift ;
742
647
int err = - RT_EINVAL ;
743
- rt_ubase_t vstart = (rt_ubase_t )vaddr ;
744
- rt_ubase_t vend = vstart + size ;
648
+ void * vend = vaddr + size ;
745
649
746
- int (* handler )(rt_ubase_t * pte );
650
+ int (* handler )(uintptr_t * pte );
747
651
if (cmd >= 0 && cmd < MMU_CNTL_DUMMY_END )
748
652
{
749
653
handler = control_handler [cmd ];
750
654
751
- while (vstart < vend )
655
+ while (vaddr < vend )
752
656
{
753
- rt_ubase_t * pte = _query (aspace , ( void * ) vstart , & level_shift );
754
- rt_ubase_t range_end = vstart + (1ul << level_shift );
657
+ uintptr_t * pte = _query (aspace , vaddr , & level_shift );
658
+ void * range_end = vaddr + (1ul << level_shift );
755
659
RT_ASSERT (range_end <= vend );
756
660
757
661
if (pte )
758
662
{
759
663
err = handler (pte );
760
664
RT_ASSERT (err == RT_EOK );
761
665
}
762
- vstart = range_end ;
666
+ vaddr = range_end ;
763
667
}
764
668
}
765
669
else
0 commit comments