Skip to content

Commit beee77f

Browse files
authored
feat: support ARCH_REMAP_KERNEL on libcpu/c906 (#9123)
feat: [libcpu/c906] support ARCH_REMAP_KERNEL This change was necessary to enable the remapping of the kernel image to a high virtual address region on the c906 platform. Changes: - Introduced new configuration options `ARCH_REMAP_KERNEL`, and `ARCH_USING_ASID` under the `ARCH_RISCV64` section. - Updated MMU initialization and switching functions to incorporate remapping handling. - Modified page table setup for proper memory attribute settings. - Added support for early memory setup, kernel remapping - Added conditional compilation for ASID support in the `rt_aspace` struct, since this is not enable currently for most architecture. Signed-off-by: Shell <[email protected]>
1 parent 6180dab commit beee77f

File tree

4 files changed

+85
-17
lines changed

4 files changed

+85
-17
lines changed

components/mm/mm_aspace.h

+4
Original file line numberDiff line numberDiff line change
@@ -49,7 +49,11 @@ typedef struct rt_aspace
4949
struct rt_mutex bst_lock;
5050

5151
struct rt_mem_obj *private_object;
52+
53+
#ifdef ARCH_USING_ASID
5254
rt_uint64_t asid;
55+
#endif /* ARCH_USING_ASID */
56+
5357
} *rt_aspace_t;
5458

5559
typedef struct rt_varea

libcpu/Kconfig

+11-7
Original file line numberDiff line numberDiff line change
@@ -262,13 +262,17 @@ config ARCH_RISCV64
262262
select ARCH_CPU_64BIT
263263
bool
264264

265-
if ARCH_RISCV64
266-
config ARCH_REMAP_KERNEL
267-
bool
268-
depends on RT_USING_SMART
269-
help
270-
Remapping kernel image to high virtual address region
271-
endif
265+
config ARCH_REMAP_KERNEL
266+
bool
267+
depends on RT_USING_SMART
268+
help
269+
Remapping kernel image to high virtual address region
270+
271+
config ARCH_USING_ASID
272+
bool
273+
depends on RT_USING_SMART
274+
help
275+
Using ASID support from architecture
272276

273277
config ARCH_IA32
274278
bool

libcpu/risc-v/t-head/c906/mmu.c

+25-3
Original file line numberDiff line numberDiff line change
@@ -43,6 +43,7 @@ static void *current_mmu_table = RT_NULL;
4343
volatile __attribute__((aligned(4 * 1024)))
4444
rt_ubase_t MMUTable[__SIZE(VPN2_BIT)];
4545

46+
#ifdef ARCH_USING_ASID
4647
static rt_uint8_t ASID_BITS = 0;
4748
static rt_uint32_t next_asid;
4849
static rt_uint64_t global_asid_generation;
@@ -109,6 +110,24 @@ void rt_hw_aspace_switch(rt_aspace_t aspace)
109110
asm volatile("sfence.vma x0,%0"::"r"(asid):"memory");
110111
}
111112

113+
#define ASID_INIT() _asid_init()
114+
115+
#else /* ARCH_USING_ASID */
116+
117+
#define ASID_INIT()
118+
119+
void rt_hw_aspace_switch(rt_aspace_t aspace)
120+
{
121+
uintptr_t page_table = (uintptr_t)rt_kmem_v2p(aspace->page_table);
122+
current_mmu_table = aspace->page_table;
123+
124+
write_csr(satp, (((size_t)SATP_MODE) << SATP_MODE_OFFSET) |
125+
((rt_ubase_t)page_table >> PAGE_OFFSET_BIT));
126+
rt_hw_tlb_invalidate_all_local();
127+
}
128+
129+
#endif /* ARCH_USING_ASID */
130+
112131
void *rt_hw_mmu_tbl_get()
113132
{
114133
return current_mmu_table;
@@ -552,7 +571,7 @@ void rt_hw_mmu_setup(rt_aspace_t aspace, struct mem_desc *mdesc, int desc_nr)
552571
mdesc++;
553572
}
554573

555-
_asid_init();
574+
ASID_INIT();
556575

557576
rt_hw_aspace_switch(&rt_kernel_space);
558577
rt_page_cleanup();
@@ -601,13 +620,15 @@ void rt_hw_mem_setup_early(void)
601620
LOG_E("%s: not aligned virtual address. pv_offset %p", __func__, pv_off);
602621
RT_ASSERT(0);
603622
}
623+
604624
/**
605625
* identical mapping,
606626
* PC are still at lower region before relocating to high memory
607627
*/
608628
for (size_t i = 0; i < __SIZE(PPN0_BIT); i++)
609629
{
610-
early_pgtbl[i] = COMBINEPTE(ps, PAGE_ATTR_RWX | PTE_G | PTE_V);
630+
early_pgtbl[i] = COMBINEPTE(ps, PAGE_ATTR_RWX | PTE_G | PTE_V | PTE_CACHE |
631+
PTE_SHARE | PTE_BUF | PTE_A | PTE_D);
611632
ps += L1_PAGE_SIZE;
612633
}
613634

@@ -621,7 +642,8 @@ void rt_hw_mem_setup_early(void)
621642
rt_size_t ve_idx = GET_L1(vs + 0x80000000);
622643
for (size_t i = vs_idx; i < ve_idx; i++)
623644
{
624-
early_pgtbl[i] = COMBINEPTE(ps, PAGE_ATTR_RWX | PTE_G | PTE_V);
645+
early_pgtbl[i] = COMBINEPTE(ps, PAGE_ATTR_RWX | PTE_G | PTE_V | PTE_CACHE |
646+
PTE_SHARE | PTE_BUF | PTE_A | PTE_D);
625647
ps += L1_PAGE_SIZE;
626648
}
627649

libcpu/risc-v/t-head/c906/startup_gcc.S

+45-7
Original file line numberDiff line numberDiff line change
@@ -8,13 +8,16 @@
88
* 2018/10/01 Bernard The first version
99
* 2018/12/27 Jesven Add SMP support
1010
* 2020/6/12 Xim Port to QEMU and remove SMP support
11+
* 2024-06-30 Shell Support of kernel remapping
1112
*/
1213

1314
#include <encoding.h>
1415
#include <cpuport.h>
1516

16-
boot_hartid: .int
17-
.global boot_hartid
17+
.data
18+
.global boot_hartid /* global varible rt_boot_hartid in .data section */
19+
boot_hartid:
20+
.word 0xdeadbeef
1821

1922
.global _start
2023
.section ".start", "ax"
@@ -72,8 +75,6 @@ _start:
7275
li x31,0
7376

7477
/* set to disable FPU */
75-
li t0, SSTATUS_FS
76-
csrc sstatus, t0
7778
li t0, SSTATUS_SUM
7879
csrs sstatus, t0
7980

@@ -86,8 +87,45 @@ _start:
8687
la sp, __stack_start__
8788
li t0, __STACKSIZE__
8889
add sp, sp, t0
89-
csrw sscratch, sp
9090

91+
/**
92+
* sscratch is always zero on kernel mode
93+
*/
94+
csrw sscratch, zero
9195
call init_bss
92-
call sbi_init
93-
j primary_cpu_entry
96+
#ifdef ARCH_MM_MMU
97+
call rt_hw_mem_setup_early
98+
call rt_kmem_pvoff
99+
/* a0 := pvoff */
100+
beq a0, zero, 1f
101+
102+
/* relocate pc */
103+
la x1, _after_pc_relocation
104+
sub x1, x1, a0
105+
ret
106+
_after_pc_relocation:
107+
/* relocate gp */
108+
sub gp, gp, a0
109+
110+
/* relocate context: sp */
111+
la sp, __stack_start__
112+
li t0, __STACKSIZE__
113+
add sp, sp, t0
114+
115+
/* reset s0-fp */
116+
mv s0, zero
117+
118+
/* relocate stvec */
119+
la t0, trap_entry
120+
csrw stvec, t0
121+
1:
122+
#endif
123+
call sbi_init
124+
call primary_cpu_entry
125+
126+
_never_return_here:
127+
j .
128+
129+
.global _start_link_addr
130+
_start_link_addr:
131+
.dword __text_start

0 commit comments

Comments
 (0)