Skip to content

Commit 7af3a0a

Browse files
Zhen Leiwildea01
Zhen Lei
authored andcommitted
arm64/numa: support HAVE_SETUP_PER_CPU_AREA
To make each percpu area allocated from its local numa node. Without this patch, all percpu areas will be allocated from the node which cpu0 belongs to. Signed-off-by: Zhen Lei <[email protected]> Signed-off-by: Will Deacon <[email protected]>
1 parent f11c7ba commit 7af3a0a

File tree

2 files changed

+60
-0
lines changed

2 files changed

+60
-0
lines changed

arch/arm64/Kconfig

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -600,6 +600,14 @@ config USE_PERCPU_NUMA_NODE_ID
600600
def_bool y
601601
depends on NUMA
602602

603+
config HAVE_SETUP_PER_CPU_AREA
604+
def_bool y
605+
depends on NUMA
606+
607+
config NEED_PER_CPU_EMBED_FIRST_CHUNK
608+
def_bool y
609+
depends on NUMA
610+
603611
source kernel/Kconfig.preempt
604612
source kernel/Kconfig.hz
605613

arch/arm64/mm/numa.c

Lines changed: 52 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -26,6 +26,7 @@
2626
#include <linux/of.h>
2727

2828
#include <asm/acpi.h>
29+
#include <asm/sections.h>
2930

3031
struct pglist_data *node_data[MAX_NUMNODES] __read_mostly;
3132
EXPORT_SYMBOL(node_data);
@@ -131,6 +132,57 @@ void __init early_map_cpu_to_node(unsigned int cpu, int nid)
131132
cpu_to_node_map[cpu] = nid;
132133
}
133134

135+
#ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA
136+
unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
137+
EXPORT_SYMBOL(__per_cpu_offset);
138+
139+
static int __init early_cpu_to_node(int cpu)
140+
{
141+
return cpu_to_node_map[cpu];
142+
}
143+
144+
static int __init pcpu_cpu_distance(unsigned int from, unsigned int to)
145+
{
146+
return node_distance(from, to);
147+
}
148+
149+
static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size,
150+
size_t align)
151+
{
152+
int nid = early_cpu_to_node(cpu);
153+
154+
return memblock_virt_alloc_try_nid(size, align,
155+
__pa(MAX_DMA_ADDRESS), MEMBLOCK_ALLOC_ACCESSIBLE, nid);
156+
}
157+
158+
static void __init pcpu_fc_free(void *ptr, size_t size)
159+
{
160+
memblock_free_early(__pa(ptr), size);
161+
}
162+
163+
void __init setup_per_cpu_areas(void)
164+
{
165+
unsigned long delta;
166+
unsigned int cpu;
167+
int rc;
168+
169+
/*
170+
* Always reserve area for module percpu variables. That's
171+
* what the legacy allocator did.
172+
*/
173+
rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE,
174+
PERCPU_DYNAMIC_RESERVE, PAGE_SIZE,
175+
pcpu_cpu_distance,
176+
pcpu_fc_alloc, pcpu_fc_free);
177+
if (rc < 0)
178+
panic("Failed to initialize percpu areas.");
179+
180+
delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
181+
for_each_possible_cpu(cpu)
182+
__per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
183+
}
184+
#endif
185+
134186
/**
135187
* numa_add_memblk - Set node id to memblk
136188
* @nid: NUMA node ID of the new memblk

0 commit comments

Comments
 (0)