Skip to content

Commit 0b391a5

Browse files
keesgregkh
authored andcommitted
mm: vmalloc: support more granular vrealloc() sizing
commit a0309fa upstream. Introduce struct vm_struct::requested_size so that the requested (re)allocation size is retained separately from the allocated area size. This means that KASAN will correctly poison the correct spans of requested bytes. This also means we can support growing the usable portion of an allocation that can already be supported by the existing area's existing allocation. Link: https://lkml.kernel.org/r/[email protected] Fixes: 3ddc2fe ("mm: vmalloc: implement vrealloc()") Signed-off-by: Kees Cook <[email protected]> Reported-by: Erhard Furtner <[email protected]> Closes: https://lore.kernel.org/all/[email protected]/ Reviewed-by: Danilo Krummrich <[email protected]> Cc: Michal Hocko <[email protected]> Cc: "Uladzislau Rezki (Sony)" <[email protected]> Cc: Vlastimil Babka <[email protected]> Cc: <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Greg Kroah-Hartman <[email protected]>
1 parent 2b35c1a commit 0b391a5

File tree

2 files changed

+25
-7
lines changed

2 files changed

+25
-7
lines changed

include/linux/vmalloc.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -61,6 +61,7 @@ struct vm_struct {
6161
unsigned int nr_pages;
6262
phys_addr_t phys_addr;
6363
const void *caller;
64+
unsigned long requested_size;
6465
};
6566

6667
struct vmap_area {

mm/vmalloc.c

Lines changed: 24 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1940,7 +1940,7 @@ static inline void setup_vmalloc_vm(struct vm_struct *vm,
19401940
{
19411941
vm->flags = flags;
19421942
vm->addr = (void *)va->va_start;
1943-
vm->size = va_size(va);
1943+
vm->size = vm->requested_size = va_size(va);
19441944
vm->caller = caller;
19451945
va->vm = vm;
19461946
}
@@ -3133,6 +3133,7 @@ struct vm_struct *__get_vm_area_node(unsigned long size,
31333133

31343134
area->flags = flags;
31353135
area->caller = caller;
3136+
area->requested_size = requested_size;
31363137

31373138
va = alloc_vmap_area(size, align, start, end, node, gfp_mask, 0, area);
31383139
if (IS_ERR(va)) {
@@ -4067,6 +4068,8 @@ EXPORT_SYMBOL(vzalloc_node_noprof);
40674068
*/
40684069
void *vrealloc_noprof(const void *p, size_t size, gfp_t flags)
40694070
{
4071+
struct vm_struct *vm = NULL;
4072+
size_t alloced_size = 0;
40704073
size_t old_size = 0;
40714074
void *n;
40724075

@@ -4076,30 +4079,44 @@ void *vrealloc_noprof(const void *p, size_t size, gfp_t flags)
40764079
}
40774080

40784081
if (p) {
4079-
struct vm_struct *vm;
4080-
40814082
vm = find_vm_area(p);
40824083
if (unlikely(!vm)) {
40834084
WARN(1, "Trying to vrealloc() nonexistent vm area (%p)\n", p);
40844085
return NULL;
40854086
}
40864087

4087-
old_size = get_vm_area_size(vm);
4088+
alloced_size = get_vm_area_size(vm);
4089+
old_size = vm->requested_size;
4090+
if (WARN(alloced_size < old_size,
4091+
"vrealloc() has mismatched area vs requested sizes (%p)\n", p))
4092+
return NULL;
40884093
}
40894094

40904095
/*
40914096
* TODO: Shrink the vm_area, i.e. unmap and free unused pages. What
40924097
* would be a good heuristic for when to shrink the vm_area?
40934098
*/
40944099
if (size <= old_size) {
4095-
/* Zero out spare memory. */
4096-
if (want_init_on_alloc(flags))
4100+
/* Zero out "freed" memory. */
4101+
if (want_init_on_free())
40974102
memset((void *)p + size, 0, old_size - size);
4103+
vm->requested_size = size;
40984104
kasan_poison_vmalloc(p + size, old_size - size);
4099-
kasan_unpoison_vmalloc(p, size, KASAN_VMALLOC_PROT_NORMAL);
41004105
return (void *)p;
41014106
}
41024107

4108+
/*
4109+
* We already have the bytes available in the allocation; use them.
4110+
*/
4111+
if (size <= alloced_size) {
4112+
kasan_unpoison_vmalloc(p + old_size, size - old_size,
4113+
KASAN_VMALLOC_PROT_NORMAL);
4114+
/* Zero out "alloced" memory. */
4115+
if (want_init_on_alloc(flags))
4116+
memset((void *)p + old_size, 0, size - old_size);
4117+
vm->requested_size = size;
4118+
}
4119+
41034120
/* TODO: Grow the vm_area, i.e. allocate and map additional pages. */
41044121
n = __vmalloc_noprof(size, flags);
41054122
if (!n)

0 commit comments

Comments
 (0)