1e9f28fa1e
Add a new sched domain for representing multi-core with shared caches between cores. Consider a dual package system, each package containing two cores and with last level cache shared between cores with in a package. If there are two runnable processes, with this appended patch those two processes will be scheduled on different packages. On such systems, with this patch we have observed 8% perf improvement with specJBB(2 warehouse) benchmark and 35% improvement with CFP2000 rate(with 2 users). This new domain will come into play only on multi-core systems with shared caches. On other systems, this sched domain will be removed by domain degeneration code. This new domain can be also used for implementing power savings policy (see OLS 2005 CMP kernel scheduler paper for more details.. I will post another patch for power savings policy soon) Most of the arch/* file changes are for cpu_coregroup_map() implementation. Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com> Cc: Ingo Molnar <mingo@elte.hu> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
74 lines
1.9 KiB
C
74 lines
1.9 KiB
C
#ifndef _ASM_X86_64_TOPOLOGY_H
|
|
#define _ASM_X86_64_TOPOLOGY_H
|
|
|
|
#include <linux/config.h>
|
|
|
|
#ifdef CONFIG_NUMA
|
|
|
|
#include <asm/mpspec.h>
|
|
#include <asm/bitops.h>
|
|
|
|
/* Map the K8 CPU local memory controllers to a simple 1:1 CPU:NODE topology */
|
|
|
|
extern cpumask_t cpu_online_map;
|
|
|
|
extern unsigned char cpu_to_node[];
|
|
extern cpumask_t node_to_cpumask[];
|
|
|
|
#ifdef CONFIG_ACPI_NUMA
|
|
extern int __node_distance(int, int);
|
|
#define node_distance(a,b) __node_distance(a,b)
|
|
/* #else fallback version */
|
|
#endif
|
|
|
|
#define cpu_to_node(cpu) (cpu_to_node[cpu])
|
|
#define parent_node(node) (node)
|
|
#define node_to_first_cpu(node) (first_cpu(node_to_cpumask[node]))
|
|
#define node_to_cpumask(node) (node_to_cpumask[node])
|
|
#define pcibus_to_node(bus) ((long)(bus->sysdata))
|
|
#define pcibus_to_cpumask(bus) node_to_cpumask(pcibus_to_node(bus));
|
|
|
|
#define numa_node_id() read_pda(nodenumber)
|
|
|
|
/* sched_domains SD_NODE_INIT for x86_64 machines */
|
|
#define SD_NODE_INIT (struct sched_domain) { \
|
|
.span = CPU_MASK_NONE, \
|
|
.parent = NULL, \
|
|
.groups = NULL, \
|
|
.min_interval = 8, \
|
|
.max_interval = 32, \
|
|
.busy_factor = 32, \
|
|
.imbalance_pct = 125, \
|
|
.cache_nice_tries = 2, \
|
|
.busy_idx = 3, \
|
|
.idle_idx = 2, \
|
|
.newidle_idx = 0, \
|
|
.wake_idx = 1, \
|
|
.forkexec_idx = 1, \
|
|
.per_cpu_gain = 100, \
|
|
.flags = SD_LOAD_BALANCE \
|
|
| SD_BALANCE_FORK \
|
|
| SD_BALANCE_EXEC \
|
|
| SD_WAKE_BALANCE, \
|
|
.last_balance = jiffies, \
|
|
.balance_interval = 1, \
|
|
.nr_balance_failed = 0, \
|
|
}
|
|
|
|
#endif
|
|
|
|
#ifdef CONFIG_SMP
|
|
#define topology_physical_package_id(cpu) \
|
|
(phys_proc_id[cpu] == BAD_APICID ? -1 : phys_proc_id[cpu])
|
|
#define topology_core_id(cpu) \
|
|
(cpu_core_id[cpu] == BAD_APICID ? 0 : cpu_core_id[cpu])
|
|
#define topology_core_siblings(cpu) (cpu_core_map[cpu])
|
|
#define topology_thread_siblings(cpu) (cpu_sibling_map[cpu])
|
|
#endif
|
|
|
|
#include <asm-generic/topology.h>
|
|
|
|
extern cpumask_t cpu_coregroup_map(int cpu);
|
|
|
|
#endif
|