Skip to content

Commit a0309fa

Browse files
keesakpm00
authored andcommitted
mm: vmalloc: support more granular vrealloc() sizing
Introduce struct vm_struct::requested_size so that the requested (re)allocation size is retained separately from the allocated area size. This means that KASAN will correctly poison the correct spans of requested bytes. This also means we can support growing the usable portion of an allocation that can already be supported by the existing area's existing allocation. Link: https://lkml.kernel.org/r/[email protected] Fixes: 3ddc2fe ("mm: vmalloc: implement vrealloc()") Signed-off-by: Kees Cook <[email protected]> Reported-by: Erhard Furtner <[email protected]> Closes: https://lore.kernel.org/all/[email protected]/ Reviewed-by: Danilo Krummrich <[email protected]> Cc: Michal Hocko <[email protected]> Cc: "Uladzislau Rezki (Sony)" <[email protected]> Cc: Vlastimil Babka <[email protected]> Cc: <[email protected]> Signed-off-by: Andrew Morton <[email protected]>
1 parent a8efadd commit a0309fa

File tree

2 files changed

+25
-7
lines changed

2 files changed

+25
-7
lines changed

include/linux/vmalloc.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -61,6 +61,7 @@ struct vm_struct {
6161
unsigned int nr_pages;
6262
phys_addr_t phys_addr;
6363
const void *caller;
64+
unsigned long requested_size;
6465
};
6566

6667
struct vmap_area {

mm/vmalloc.c

Lines changed: 24 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1940,7 +1940,7 @@ static inline void setup_vmalloc_vm(struct vm_struct *vm,
19401940
{
19411941
vm->flags = flags;
19421942
vm->addr = (void *)va->va_start;
1943-
vm->size = va_size(va);
1943+
vm->size = vm->requested_size = va_size(va);
19441944
vm->caller = caller;
19451945
va->vm = vm;
19461946
}
@@ -3133,6 +3133,7 @@ struct vm_struct *__get_vm_area_node(unsigned long size,
31333133

31343134
area->flags = flags;
31353135
area->caller = caller;
3136+
area->requested_size = requested_size;
31363137

31373138
va = alloc_vmap_area(size, align, start, end, node, gfp_mask, 0, area);
31383139
if (IS_ERR(va)) {
@@ -4063,6 +4064,8 @@ EXPORT_SYMBOL(vzalloc_node_noprof);
40634064
*/
40644065
void *vrealloc_noprof(const void *p, size_t size, gfp_t flags)
40654066
{
4067+
struct vm_struct *vm = NULL;
4068+
size_t alloced_size = 0;
40664069
size_t old_size = 0;
40674070
void *n;
40684071

@@ -4072,30 +4075,44 @@ void *vrealloc_noprof(const void *p, size_t size, gfp_t flags)
40724075
}
40734076

40744077
if (p) {
4075-
struct vm_struct *vm;
4076-
40774078
vm = find_vm_area(p);
40784079
if (unlikely(!vm)) {
40794080
WARN(1, "Trying to vrealloc() nonexistent vm area (%p)\n", p);
40804081
return NULL;
40814082
}
40824083

4083-
old_size = get_vm_area_size(vm);
4084+
alloced_size = get_vm_area_size(vm);
4085+
old_size = vm->requested_size;
4086+
if (WARN(alloced_size < old_size,
4087+
"vrealloc() has mismatched area vs requested sizes (%p)\n", p))
4088+
return NULL;
40844089
}
40854090

40864091
/*
40874092
* TODO: Shrink the vm_area, i.e. unmap and free unused pages. What
40884093
* would be a good heuristic for when to shrink the vm_area?
40894094
*/
40904095
if (size <= old_size) {
4091-
/* Zero out spare memory. */
4092-
if (want_init_on_alloc(flags))
4096+
/* Zero out "freed" memory. */
4097+
if (want_init_on_free())
40934098
memset((void *)p + size, 0, old_size - size);
4099+
vm->requested_size = size;
40944100
kasan_poison_vmalloc(p + size, old_size - size);
4095-
kasan_unpoison_vmalloc(p, size, KASAN_VMALLOC_PROT_NORMAL);
40964101
return (void *)p;
40974102
}
40984103

4104+
/*
4105+
* We already have the bytes available in the allocation; use them.
4106+
*/
4107+
if (size <= alloced_size) {
4108+
kasan_unpoison_vmalloc(p + old_size, size - old_size,
4109+
KASAN_VMALLOC_PROT_NORMAL);
4110+
/* Zero out "alloced" memory. */
4111+
if (want_init_on_alloc(flags))
4112+
memset((void *)p + old_size, 0, size - old_size);
4113+
vm->requested_size = size;
4114+
}
4115+
40994116
/* TODO: Grow the vm_area, i.e. allocate and map additional pages. */
41004117
n = __vmalloc_noprof(size, flags);
41014118
if (!n)

0 commit comments

Comments
 (0)