@@ -1940,7 +1940,7 @@ static inline void setup_vmalloc_vm(struct vm_struct *vm,
1940
1940
{
1941
1941
vm -> flags = flags ;
1942
1942
vm -> addr = (void * )va -> va_start ;
1943
- vm -> size = va_size (va );
1943
+ vm -> size = vm -> requested_size = va_size (va );
1944
1944
vm -> caller = caller ;
1945
1945
va -> vm = vm ;
1946
1946
}
@@ -3133,6 +3133,7 @@ struct vm_struct *__get_vm_area_node(unsigned long size,
3133
3133
3134
3134
area -> flags = flags ;
3135
3135
area -> caller = caller ;
3136
+ area -> requested_size = requested_size ;
3136
3137
3137
3138
va = alloc_vmap_area (size , align , start , end , node , gfp_mask , 0 , area );
3138
3139
if (IS_ERR (va )) {
@@ -4063,6 +4064,8 @@ EXPORT_SYMBOL(vzalloc_node_noprof);
4063
4064
*/
4064
4065
void * vrealloc_noprof (const void * p , size_t size , gfp_t flags )
4065
4066
{
4067
+ struct vm_struct * vm = NULL ;
4068
+ size_t alloced_size = 0 ;
4066
4069
size_t old_size = 0 ;
4067
4070
void * n ;
4068
4071
@@ -4072,30 +4075,44 @@ void *vrealloc_noprof(const void *p, size_t size, gfp_t flags)
4072
4075
}
4073
4076
4074
4077
if (p ) {
4075
- struct vm_struct * vm ;
4076
-
4077
4078
vm = find_vm_area (p );
4078
4079
if (unlikely (!vm )) {
4079
4080
WARN (1 , "Trying to vrealloc() nonexistent vm area (%p)\n" , p );
4080
4081
return NULL ;
4081
4082
}
4082
4083
4083
- old_size = get_vm_area_size (vm );
4084
+ alloced_size = get_vm_area_size (vm );
4085
+ old_size = vm -> requested_size ;
4086
+ if (WARN (alloced_size < old_size ,
4087
+ "vrealloc() has mismatched area vs requested sizes (%p)\n" , p ))
4088
+ return NULL ;
4084
4089
}
4085
4090
4086
4091
/*
4087
4092
* TODO: Shrink the vm_area, i.e. unmap and free unused pages. What
4088
4093
* would be a good heuristic for when to shrink the vm_area?
4089
4094
*/
4090
4095
if (size <= old_size ) {
4091
- /* Zero out spare memory. */
4092
- if (want_init_on_alloc ( flags ))
4096
+ /* Zero out "freed" memory. */
4097
+ if (want_init_on_free ( ))
4093
4098
memset ((void * )p + size , 0 , old_size - size );
4099
+ vm -> requested_size = size ;
4094
4100
kasan_poison_vmalloc (p + size , old_size - size );
4095
- kasan_unpoison_vmalloc (p , size , KASAN_VMALLOC_PROT_NORMAL );
4096
4101
return (void * )p ;
4097
4102
}
4098
4103
4104
+ /*
4105
+ * We already have the bytes available in the allocation; use them.
4106
+ */
4107
+ if (size <= alloced_size ) {
4108
+ kasan_unpoison_vmalloc (p + old_size , size - old_size ,
4109
+ KASAN_VMALLOC_PROT_NORMAL );
4110
+ /* Zero out "alloced" memory. */
4111
+ if (want_init_on_alloc (flags ))
4112
+ memset ((void * )p + old_size , 0 , size - old_size );
4113
+ vm -> requested_size = size ;
4114
+ }
4115
+
4099
4116
/* TODO: Grow the vm_area, i.e. allocate and map additional pages. */
4100
4117
n = __vmalloc_noprof (size , flags );
4101
4118
if (!n )
0 commit comments