ba4d40bb5c
Now for a completely different but trivial approach. I just boot tested it with 255 CPUS and everything worked. Currently everything (except module data) we place in the per cpu area we know about at compile time. So instead of allocating a fixed size for the per_cpu area allocate the number of bytes we need plus a fixed constant for to be used for modules. It isn't perfect but it is much less of a pain to work with than what we are doing now. AK: fixed warning Signed-off-by: Eric W. Biederman <ebiederm@xmission.com> Signed-off-by: Andi Kleen <ak@suse.de>
66 lines
2.0 KiB
C
66 lines
2.0 KiB
C
#ifndef _ASM_X8664_PERCPU_H_
|
|
#define _ASM_X8664_PERCPU_H_
|
|
#include <linux/compiler.h>
|
|
|
|
/* Same as asm-generic/percpu.h, except that we store the per cpu offset
|
|
in the PDA. Longer term the PDA and every per cpu variable
|
|
should be just put into a single section and referenced directly
|
|
from %gs */
|
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
#include <asm/pda.h>
|
|
|
|
#ifdef CONFIG_MODULES
|
|
# define PERCPU_MODULE_RESERVE 8192
|
|
#else
|
|
# define PERCPU_MODULE_RESERVE 0
|
|
#endif
|
|
|
|
#define PERCPU_ENOUGH_ROOM \
|
|
(ALIGN(__per_cpu_end - __per_cpu_start, SMP_CACHE_BYTES) + \
|
|
PERCPU_MODULE_RESERVE)
|
|
|
|
#define __per_cpu_offset(cpu) (cpu_pda(cpu)->data_offset)
|
|
#define __my_cpu_offset() read_pda(data_offset)
|
|
|
|
#define per_cpu_offset(x) (__per_cpu_offset(x))
|
|
|
|
/* Separate out the type, so (int[3], foo) works. */
|
|
#define DEFINE_PER_CPU(type, name) \
|
|
__attribute__((__section__(".data.percpu"))) __typeof__(type) per_cpu__##name
|
|
|
|
/* var is in discarded region: offset to particular copy we want */
|
|
#define per_cpu(var, cpu) (*RELOC_HIDE(&per_cpu__##var, __per_cpu_offset(cpu)))
|
|
#define __get_cpu_var(var) (*RELOC_HIDE(&per_cpu__##var, __my_cpu_offset()))
|
|
#define __raw_get_cpu_var(var) (*RELOC_HIDE(&per_cpu__##var, __my_cpu_offset()))
|
|
|
|
/* A macro to avoid #include hell... */
|
|
#define percpu_modcopy(pcpudst, src, size) \
|
|
do { \
|
|
unsigned int __i; \
|
|
for_each_possible_cpu(__i) \
|
|
memcpy((pcpudst)+__per_cpu_offset(__i), \
|
|
(src), (size)); \
|
|
} while (0)
|
|
|
|
extern void setup_per_cpu_areas(void);
|
|
|
|
#else /* ! SMP */
|
|
|
|
#define DEFINE_PER_CPU(type, name) \
|
|
__typeof__(type) per_cpu__##name
|
|
|
|
#define per_cpu(var, cpu) (*((void)(cpu), &per_cpu__##var))
|
|
#define __get_cpu_var(var) per_cpu__##var
|
|
#define __raw_get_cpu_var(var) per_cpu__##var
|
|
|
|
#endif /* SMP */
|
|
|
|
#define DECLARE_PER_CPU(type, name) extern __typeof__(type) per_cpu__##name
|
|
|
|
#define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var)
|
|
#define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var)
|
|
|
|
#endif /* _ASM_X8664_PERCPU_H_ */
|