Skip to content

Commit 2fa5e9e

Browse files
committed
[mm/page] multi-list page manager
[mm/page] page debugger [libcpu/aarch64] hugepage support
1 parent ca1b1e5 commit 2fa5e9e

File tree

17 files changed

+546
-165
lines changed

17 files changed

+546
-165
lines changed

components/drivers/virtio/virtio_net.c

+2-1
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,7 @@
1111
#include <rthw.h>
1212
#include <rtthread.h>
1313
#include <cpuport.h>
14+
#include <mm_aspace.h>
1415

1516
#ifdef RT_USING_VIRTIO_NET
1617

@@ -106,7 +107,7 @@ static struct pbuf *virtio_net_rx(rt_device_t dev)
106107
#ifdef RT_USING_SMP
107108
level = rt_spin_lock_irqsave(&virtio_dev->spinlock);
108109
#endif
109-
rt_memcpy(p->payload, (void *)VIRTIO_PA2VA(queue_rx->desc[id].addr), len);
110+
rt_memcpy(p->payload, (void *)queue_rx->desc[id].addr - PV_OFFSET, len);
110111

111112
queue_rx->used_idx++;
112113

components/lwp/arch/aarch64/cortex-a/lwp_arch.c

+1-1
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@ int arch_user_space_init(struct rt_lwp *lwp)
2626
{
2727
size_t *mmu_table;
2828

29-
mmu_table = (size_t *)rt_pages_alloc(0);
29+
mmu_table = (size_t *)rt_pages_alloc_ext(0, PAGE_ANY_AVAILABLE);
3030
if (!mmu_table)
3131
{
3232
return -RT_ENOMEM;

components/lwp/arch/risc-v/rv64/lwp_arch.c

+1-1
Original file line numberDiff line numberDiff line change
@@ -91,7 +91,7 @@ int arch_user_space_init(struct rt_lwp *lwp)
9191
{
9292
rt_ubase_t *mmu_table;
9393

94-
mmu_table = (rt_ubase_t *)rt_pages_alloc(0);
94+
mmu_table = (rt_ubase_t *)rt_pages_alloc_ext(0, PAGE_ANY_AVAILABLE);
9595
if (!mmu_table)
9696
{
9797
return -RT_ENOMEM;

components/lwp/arch/x86/i386/lwp_arch.c

+1-1
Original file line numberDiff line numberDiff line change
@@ -82,7 +82,7 @@ int arch_user_space_init(struct rt_lwp *lwp)
8282
{
8383
rt_size_t *mmu_table;
8484

85-
mmu_table = (rt_size_t *)rt_pages_alloc(0);
85+
mmu_table = (rt_size_t *)rt_pages_alloc_ext(0, PAGE_ANY_AVAILABLE);
8686
if (!mmu_table)
8787
{
8888
return -1;

components/lwp/lwp.c

+13
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,7 @@
99
* 2018-11-02 heyuanjie fix complie error in iar
1010
* 2021-02-03 lizhirui add 64-bit arch support and riscv64 arch support
1111
* 2021-08-26 linzhenxing add lwp_setcwd\lwp_getcwd
12+
* 2023-02-20 wangxiaoyao inv icache before new app startup
1213
*/
1314

1415
#include <rthw.h>
@@ -1097,6 +1098,18 @@ static void _lwp_thread_entry(void *parameter)
10971098
icache_invalid_all();
10981099
}
10991100

1101+
/**
1102+
* without ASID support, it will be a special case when trying to run application
1103+
* and exit multiple times and a same page frame allocated to it bound to
1104+
* different text segment. Then we are in a situation where icache contains
1105+
* out-of-dated data and must be handle by the running core itself.
1106+
* with ASID support, this should be a rare case that ASID & page frame both
1107+
* identical to previous running application.
1108+
*
1109+
* For a new application loaded into memory, icache are seen as empty. And there
1110+
* should be nothing in the icache entry to match. So this icache invalidation
1111+
* operation should have barely influence.
1112+
*/
11001113
rt_hw_icache_invalidate_all();
11011114

11021115
#ifdef ARCH_MM_MMU

components/lwp/lwp_shm.c

+2-3
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@
66
* Change Logs:
77
* Date Author Notes
88
* 2019-10-12 Jesven first version
9+
* 2023-02-20 wangxiaoyao adapt to mm
910
*/
1011
#include <rthw.h>
1112
#include <rtthread.h>
@@ -17,8 +18,6 @@
1718

1819
#include <lwp_user_mm.h>
1920
#include <mmu.h>
20-
#include <mm_aspace.h>
21-
#include <mm_flag.h>
2221

2322
/* the kernel structure to represent a share-memory */
2423
struct lwp_shm_struct
@@ -140,7 +139,7 @@ static int _lwp_shmget(size_t key, size_t size, int create)
140139

141140
/* allocate pages up to 2's exponent to cover the required size */
142141
bit = rt_page_bits(size);
143-
page_addr = rt_pages_alloc(bit); /* virtual address */
142+
page_addr = rt_pages_alloc_ext(bit, PAGE_ANY_AVAILABLE); /* virtual address */
144143
if (!page_addr)
145144
{
146145
goto err;

components/lwp/lwp_syscall.c

+3-3
Original file line numberDiff line numberDiff line change
@@ -1861,7 +1861,7 @@ static char *_insert_args(int new_argc, char *new_argv[], struct lwp_args_info *
18611861
{
18621862
goto quit;
18631863
}
1864-
page = rt_pages_alloc(0); /* 1 page */
1864+
page = rt_pages_alloc_ext(0, PAGE_ANY_AVAILABLE); /* 1 page */
18651865
if (!page)
18661866
{
18671867
goto quit;
@@ -2065,7 +2065,7 @@ int load_ldso(struct rt_lwp *lwp, char *exec_name, char *const argv[], char *con
20652065
}
20662066
}
20672067

2068-
page = rt_pages_alloc(0); /* 1 page */
2068+
page = rt_pages_alloc_ext(0, PAGE_ANY_AVAILABLE); /* 1 page */
20692069
if (!page)
20702070
{
20712071
SET_ERRNO(ENOMEM);
@@ -2252,7 +2252,7 @@ sysret_t sys_execve(const char *path, char *const argv[], char *const envp[])
22522252
SET_ERRNO(EINVAL);
22532253
goto quit;
22542254
}
2255-
page = rt_pages_alloc(0); /* 1 page */
2255+
page = rt_pages_alloc_ext(0, PAGE_ANY_AVAILABLE); /* 1 page */
22562256
if (!page)
22572257
{
22582258
SET_ERRNO(ENOMEM);

components/lwp/lwp_user_mm.c

+2-1
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,7 @@
1010
* 2021-02-12 lizhirui add 64-bit support for lwp_brk
1111
* 2021-02-19 lizhirui add riscv64 support for lwp_user_accessable and lwp_get_from_user
1212
* 2021-06-07 lizhirui modify user space bound check
13+
* 2022-12-25 wangxiaoyao adapt to new mm
1314
*/
1415

1516
#include <rtthread.h>
@@ -130,7 +131,7 @@ static void _user_do_page_fault(struct rt_varea *varea,
130131

131132
if (!(varea->flag & MMF_TEXT))
132133
{
133-
void *cp = rt_pages_alloc(0);
134+
void *cp = rt_pages_alloc_ext(0, PAGE_ANY_AVAILABLE);
134135
if (cp)
135136
{
136137
memcpy(cp, vaddr, ARCH_PAGE_SIZE);

components/mm/mm_aspace.c

+33-39
Original file line numberDiff line numberDiff line change
@@ -134,24 +134,16 @@ static int _do_named_map(rt_aspace_t aspace, void *vaddr, rt_size_t length,
134134
int err = RT_EOK;
135135

136136
/* it's ensured by caller that (void*)end will not overflow */
137-
void *end = vaddr + length;
138137
void *phyaddr = (void *)(offset << MM_PAGE_SHIFT);
139-
while (vaddr != end)
138+
139+
void *ret = rt_hw_mmu_map(aspace, vaddr, phyaddr, length, attr);
140+
if (ret == RT_NULL)
140141
{
141-
/* TODO try to map with huge TLB, when flag & HUGEPAGE */
142-
rt_size_t pgsz = ARCH_PAGE_SIZE;
143-
void *ret = rt_hw_mmu_map(aspace, vaddr, phyaddr, pgsz, attr);
144-
if (ret == RT_NULL)
145-
{
146-
err = -RT_ERROR;
147-
break;
148-
}
149-
vaddr += pgsz;
150-
phyaddr += pgsz;
142+
err = -RT_ERROR;
151143
}
152144

153145
if (err == RT_EOK)
154-
rt_hw_tlb_invalidate_range(aspace, end - length, length, ARCH_PAGE_SIZE);
146+
rt_hw_tlb_invalidate_range(aspace, vaddr, length, ARCH_PAGE_SIZE);
155147

156148
return err;
157149
}
@@ -164,7 +156,7 @@ rt_inline void _do_page_fault(struct rt_aspace_fault_msg *msg, rt_size_t off,
164156
msg->fault_vaddr = vaddr;
165157
msg->fault_op = MM_FAULT_OP_READ;
166158
msg->fault_type = MM_FAULT_TYPE_PAGE_FAULT;
167-
msg->response.status = -1;
159+
msg->response.status = MM_FAULT_STATUS_UNRECOVERABLE;
168160
msg->response.vaddr = 0;
169161
msg->response.size = 0;
170162

@@ -180,9 +172,9 @@ int _varea_map_with_msg(rt_varea_t varea, struct rt_aspace_fault_msg *msg)
180172
* the page returned by handler is not checked
181173
* cause no much assumption can make on it
182174
*/
183-
void *store = msg->response.vaddr;
175+
char *store = msg->response.vaddr;
184176
rt_size_t store_sz = msg->response.size;
185-
if (msg->fault_vaddr + store_sz > varea->start + varea->size)
177+
if ((char *)msg->fault_vaddr + store_sz > (char *)varea->start + varea->size)
186178
{
187179
LOG_W("%s: too much (0x%lx) of buffer on vaddr %p is provided",
188180
__func__, store_sz, msg->fault_vaddr);
@@ -232,9 +224,9 @@ static int _do_prefetch(rt_aspace_t aspace, rt_varea_t varea, void *start,
232224
int err = RT_EOK;
233225

234226
/* it's ensured by caller that start & size ara page-aligned */
235-
void *end = start + size;
236-
void *vaddr = start;
237-
rt_size_t off = varea->offset + ((start - varea->start) >> ARCH_PAGE_SHIFT);
227+
char *end = (char *)start + size;
228+
char *vaddr = start;
229+
rt_size_t off = varea->offset + ((vaddr - (char *)varea->start) >> ARCH_PAGE_SHIFT);
238230

239231
while (vaddr != end)
240232
{
@@ -243,8 +235,10 @@ static int _do_prefetch(rt_aspace_t aspace, rt_varea_t varea, void *start,
243235
_do_page_fault(&msg, off, vaddr, varea->mem_obj, varea);
244236

245237
if (_varea_map_with_msg(varea, &msg))
238+
{
239+
err = -RT_ENOMEM;
246240
break;
247-
241+
}
248242
/**
249243
* It's hard to identify the mapping pattern on a customized handler
250244
* So we terminate the prefetch process on that case
@@ -386,7 +380,7 @@ rt_varea_t _varea_create(void *start, rt_size_t size)
386380
}
387381

388382
#define _IS_OVERFLOW(start, length) ((length) > (0ul - (uintptr_t)(start)))
389-
#define _IS_OVERSIZE(start, length, limit_s, limit_sz) (((length) + (rt_size_t)((start) - (limit_start))) > (limit_size))
383+
#define _IS_OVERSIZE(start, length, limit_s, limit_sz) (((length) + (rt_size_t)((char *)(start) - (char *)(limit_start))) > (limit_size))
390384

391385
static inline int _not_in_range(void *start, rt_size_t length,
392386
void *limit_start, rt_size_t limit_size)
@@ -642,7 +636,7 @@ int rt_aspace_unmap(rt_aspace_t aspace, void *addr)
642636
if (_not_in_range(addr, 1, aspace->start, aspace->size))
643637
{
644638
LOG_I("%s: %lx not in range of aspace[%lx:%lx]", __func__, addr,
645-
aspace->start, aspace->start + aspace->size);
639+
aspace->start, (char *)aspace->start + aspace->size);
646640
return -RT_EINVAL;
647641
}
648642

@@ -658,7 +652,7 @@ static inline void *_lower(void *a, void *b)
658652

659653
static inline void *_align(void *va, rt_ubase_t align_mask)
660654
{
661-
return (void *)((rt_ubase_t)(va + ~align_mask) & align_mask);
655+
return (void *)((rt_ubase_t)((char *)va + ~align_mask) & align_mask);
662656
}
663657

664658
static void *_ascending_search(rt_varea_t varea, rt_size_t req_size,
@@ -667,17 +661,17 @@ static void *_ascending_search(rt_varea_t varea, rt_size_t req_size,
667661
void *ret = RT_NULL;
668662
while (varea && varea->start < limit.end)
669663
{
670-
void *candidate = varea->start + varea->size;
664+
char *candidate = (char *)varea->start + varea->size;
671665
candidate = _align(candidate, align_mask);
672666

673-
if (candidate > limit.end || limit.end - candidate + 1 < req_size)
667+
if (candidate > (char *)limit.end || (char *)limit.end - candidate + 1 < req_size)
674668
break;
675669

676670
rt_varea_t nx_va = ASPACE_VAREA_NEXT(varea);
677671
if (nx_va)
678672
{
679673
rt_size_t gap_size =
680-
_lower(limit.end, nx_va->start - 1) - candidate + 1;
674+
(char *)_lower(limit.end, (char *)nx_va->start - 1) - candidate + 1;
681675
if (gap_size >= req_size)
682676
{
683677
ret = candidate;
@@ -703,15 +697,15 @@ static void *_find_head_and_asc_search(rt_aspace_t aspace, rt_size_t req_size,
703697
rt_varea_t varea = _aspace_bst_search_exceed(aspace, limit.start);
704698
if (varea)
705699
{
706-
void *candidate = _align(limit.start, align_mask);
707-
rt_size_t gap_size = varea->start - candidate;
700+
char *candidate = _align(limit.start, align_mask);
701+
rt_size_t gap_size = (char *)varea->start - candidate;
708702
if (gap_size >= req_size)
709703
{
710704
rt_varea_t former = _aspace_bst_search(aspace, limit.start);
711705
if (former)
712706
{
713-
candidate = _align(former->start + former->size, align_mask);
714-
gap_size = varea->start - candidate;
707+
candidate = _align((char *)former->start + former->size, align_mask);
708+
gap_size = (char *)varea->start - candidate;
715709

716710
if (gap_size >= req_size)
717711
va = candidate;
@@ -730,12 +724,12 @@ static void *_find_head_and_asc_search(rt_aspace_t aspace, rt_size_t req_size,
730724
}
731725
else
732726
{
733-
void *candidate;
727+
char *candidate;
734728
rt_size_t gap_size;
735729

736730
candidate = limit.start;
737731
candidate = _align(candidate, align_mask);
738-
gap_size = limit.end - candidate + 1;
732+
gap_size = (char *)limit.end - candidate + 1;
739733

740734
if (gap_size >= req_size)
741735
va = candidate;
@@ -750,7 +744,7 @@ static void *_find_free(rt_aspace_t aspace, void *prefer, rt_size_t req_size,
750744
{
751745
rt_varea_t varea = NULL;
752746
void *va = RT_NULL;
753-
struct _mm_range limit = {limit_start, limit_start + limit_size - 1};
747+
struct _mm_range limit = {limit_start, (char *)limit_start + limit_size - 1};
754748

755749
rt_ubase_t align_mask = ~0ul;
756750
if (flags & MMF_REQUEST_ALIGN)
@@ -762,7 +756,7 @@ static void *_find_free(rt_aspace_t aspace, void *prefer, rt_size_t req_size,
762756
{
763757
/* if prefer and free, just return the prefer region */
764758
prefer = _align(prefer, align_mask);
765-
struct _mm_range range = {prefer, prefer + req_size - 1};
759+
struct _mm_range range = {prefer, (char *)prefer + req_size - 1};
766760
varea = _aspace_bst_search_overlap(aspace, range);
767761

768762
if (!varea)
@@ -780,7 +774,7 @@ static void *_find_free(rt_aspace_t aspace, void *prefer, rt_size_t req_size,
780774
if (va == RT_NULL)
781775
{
782776
/* rewind to first range */
783-
limit.end = varea->start - 1;
777+
limit.end = (char *)varea->start - 1;
784778
va = _find_head_and_asc_search(aspace, req_size, align_mask,
785779
limit);
786780
}
@@ -798,7 +792,7 @@ int rt_aspace_load_page(rt_aspace_t aspace, void *addr, rt_size_t npage)
798792
{
799793
int err = RT_EOK;
800794
rt_varea_t varea;
801-
void *end = addr + (npage << ARCH_PAGE_SHIFT);
795+
char *end = (char *)addr + (npage << ARCH_PAGE_SHIFT);
802796

803797
WR_LOCK(aspace);
804798
varea = _aspace_bst_search(aspace, addr);
@@ -809,7 +803,7 @@ int rt_aspace_load_page(rt_aspace_t aspace, void *addr, rt_size_t npage)
809803
LOG_W("%s: varea not exist", __func__);
810804
err = -RT_ENOENT;
811805
}
812-
else if (addr >= end || (rt_size_t)addr & ARCH_PAGE_MASK ||
806+
else if ((char *)addr >= end || (rt_size_t)addr & ARCH_PAGE_MASK ||
813807
_not_in_range(addr, npage << ARCH_PAGE_SHIFT, varea->start,
814808
varea->size))
815809
{
@@ -938,12 +932,12 @@ static int _dump(rt_varea_t varea, void *arg)
938932
{
939933
if (varea->mem_obj && varea->mem_obj->get_name)
940934
{
941-
rt_kprintf("[%p - %p] %s\n", varea->start, varea->start + varea->size,
935+
rt_kprintf("[%p - %p] %s\n", varea->start, (char *)varea->start + varea->size,
942936
varea->mem_obj->get_name(varea));
943937
}
944938
else
945939
{
946-
rt_kprintf("[%p - %p] phy-map\n", varea->start, varea->start + varea->size);
940+
rt_kprintf("[%p - %p] phy-map\n", varea->start, (char *)varea->start + varea->size);
947941
rt_kprintf("\t\\_ paddr = %p\n", varea->offset << MM_PAGE_SHIFT);
948942
}
949943
return 0;

components/mm/mm_fault.c

+2-2
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,7 @@
2828

2929
static int _fetch_page(rt_varea_t varea, struct rt_aspace_fault_msg *msg)
3030
{
31-
int err;
31+
int err = UNRECOVERABLE;
3232
msg->response.status = MM_FAULT_STATUS_UNRECOVERABLE;
3333
msg->response.vaddr = 0;
3434
msg->response.size = 0;
@@ -104,7 +104,7 @@ int rt_aspace_fault_try_fix(struct rt_aspace_fault_msg *msg)
104104
if (varea)
105105
{
106106
void *pa = rt_hw_mmu_v2p(aspace, msg->fault_vaddr);
107-
msg->off = (msg->fault_vaddr - varea->start) >> ARCH_PAGE_SHIFT;
107+
msg->off = ((char *)msg->fault_vaddr - (char *)varea->start) >> ARCH_PAGE_SHIFT;
108108

109109
/* permission checked by fault op */
110110
switch (msg->fault_op)

components/mm/mm_object.c

+1-1
Original file line numberDiff line numberDiff line change
@@ -70,7 +70,7 @@ void rt_varea_pgmgr_pop(rt_varea_t varea, void *vaddr, rt_size_t size)
7070
static void on_page_fault(struct rt_varea *varea, struct rt_aspace_fault_msg *msg)
7171
{
7272
void *page;
73-
page = rt_pages_alloc(0);
73+
page = rt_pages_alloc_ext(0, PAGE_ANY_AVAILABLE);
7474

7575
if (!page)
7676
{

0 commit comments

Comments
 (0)