488fc08d91
Currently, x86_64 and ia64 arches do not clear the corresponding bits in the node's cpumask when a cpu goes down or cpu bring up is cancelled. This is buggy since there are pieces of common code where the cpumask is checked in the cpu down code path to decide on things (like in the slab down path). PPC does the right thing, but x86_64 and ia64 don't (This was the reason Sonny hit upon a slab bug during cpu offline on ppc and could not reproduce on other arches). This patch fixes it for x86_64. I won't attempt ia64 as I cannot test it. Credit for spotting this should go to Alok. (akpm: this was applied, then reverted. But it's OK now because we now use for_each_cpu() in the right places). Signed-off-by: Alok N Kataria <alokk@calsoftinc.com> Signed-off-by: Ravikiran Thirumalai <kiran@scalex86.org> Signed-off-by: Shai Fultheim <shai@scalex86.org> Signed-off-by: Andi Kleen <ak@suse.de> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
39 lines
786 B
C
39 lines
786 B
C
#ifndef _ASM_X8664_NUMA_H
|
|
#define _ASM_X8664_NUMA_H 1
|
|
|
|
#include <linux/nodemask.h>
|
|
#include <asm/numnodes.h>
|
|
|
|
struct node {
|
|
u64 start,end;
|
|
};
|
|
|
|
extern int compute_hash_shift(struct node *nodes, int numnodes);
|
|
extern int pxm_to_node(int nid);
|
|
|
|
#define ZONE_ALIGN (1UL << (MAX_ORDER+PAGE_SHIFT))
|
|
|
|
extern void numa_add_cpu(int cpu);
|
|
extern void numa_init_array(void);
|
|
extern int numa_off;
|
|
|
|
extern void numa_set_node(int cpu, int node);
|
|
|
|
extern unsigned char apicid_to_node[256];
|
|
#ifdef CONFIG_NUMA
|
|
extern void __init init_cpu_to_node(void);
|
|
|
|
static inline void clear_node_cpumask(int cpu)
|
|
{
|
|
clear_bit(cpu, &node_to_cpumask[cpu_to_node(cpu)]);
|
|
}
|
|
|
|
#else
|
|
#define init_cpu_to_node() do {} while (0)
|
|
#define clear_node_cpumask(cpu) do {} while (0)
|
|
#endif
|
|
|
|
#define NUMA_NO_NODE 0xff
|
|
|
|
#endif
|