android_kernel_xiaomi_sm8350/arch/x86_64/kernel/mce_intel.c
Andi Kleen 95833c83f3 [PATCH] x86_64: Add idle notifiers
This adds a new notifier chain that is called with IDLE_START
when a CPU goes idle and IDLE_END when it goes out of idle.
The context can be idle thread or interrupt context.

Since we cannot rely on MONITOR/MWAIT existing the idle
end check currently has to be done in all interrupt
handlers.

They were originally inspired by the similar s390 implementation.

They have a variety of applications:
- They will be needed for CONFIG_NO_IDLE_HZ
- They can be used for oprofile to fix up the missing time
in idle when performance counters don't tick.
- They can be used for better C state management in ACPI
- They could be used for microstate accounting.

This is just infrastructure so far, no users.

Signed-off-by: Andi Kleen <ak@suse.de>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-01-11 19:04:55 -08:00

102 lines
2.3 KiB
C

/*
* Intel specific MCE features.
* Copyright 2004 Zwane Mwaikambo <zwane@linuxpower.ca>
*/
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/percpu.h>
#include <asm/processor.h>
#include <asm/msr.h>
#include <asm/mce.h>
#include <asm/hw_irq.h>
#include <asm/idle.h>
static DEFINE_PER_CPU(unsigned long, next_check);
asmlinkage void smp_thermal_interrupt(void)
{
struct mce m;
ack_APIC_irq();
exit_idle();
irq_enter();
if (time_before(jiffies, __get_cpu_var(next_check)))
goto done;
__get_cpu_var(next_check) = jiffies + HZ*300;
memset(&m, 0, sizeof(m));
m.cpu = smp_processor_id();
m.bank = MCE_THERMAL_BANK;
rdtscll(m.tsc);
rdmsrl(MSR_IA32_THERM_STATUS, m.status);
if (m.status & 0x1) {
printk(KERN_EMERG
"CPU%d: Temperature above threshold, cpu clock throttled\n", m.cpu);
add_taint(TAINT_MACHINE_CHECK);
} else {
printk(KERN_EMERG "CPU%d: Temperature/speed normal\n", m.cpu);
}
mce_log(&m);
done:
irq_exit();
}
static void __cpuinit intel_init_thermal(struct cpuinfo_x86 *c)
{
u32 l, h;
int tm2 = 0;
unsigned int cpu = smp_processor_id();
if (!cpu_has(c, X86_FEATURE_ACPI))
return;
if (!cpu_has(c, X86_FEATURE_ACC))
return;
/* first check if TM1 is already enabled by the BIOS, in which
* case there might be some SMM goo which handles it, so we can't even
* put a handler since it might be delivered via SMI already.
*/
rdmsr(MSR_IA32_MISC_ENABLE, l, h);
h = apic_read(APIC_LVTTHMR);
if ((l & (1 << 3)) && (h & APIC_DM_SMI)) {
printk(KERN_DEBUG
"CPU%d: Thermal monitoring handled by SMI\n", cpu);
return;
}
if (cpu_has(c, X86_FEATURE_TM2) && (l & (1 << 13)))
tm2 = 1;
if (h & APIC_VECTOR_MASK) {
printk(KERN_DEBUG
"CPU%d: Thermal LVT vector (%#x) already "
"installed\n", cpu, (h & APIC_VECTOR_MASK));
return;
}
h = THERMAL_APIC_VECTOR;
h |= (APIC_DM_FIXED | APIC_LVT_MASKED);
apic_write_around(APIC_LVTTHMR, h);
rdmsr(MSR_IA32_THERM_INTERRUPT, l, h);
wrmsr(MSR_IA32_THERM_INTERRUPT, l | 0x03, h);
rdmsr(MSR_IA32_MISC_ENABLE, l, h);
wrmsr(MSR_IA32_MISC_ENABLE, l | (1 << 3), h);
l = apic_read(APIC_LVTTHMR);
apic_write_around(APIC_LVTTHMR, l & ~APIC_LVT_MASKED);
printk(KERN_INFO "CPU%d: Thermal monitoring enabled (%s)\n",
cpu, tm2 ? "TM2" : "TM1");
return;
}
void __cpuinit mce_intel_feature_init(struct cpuinfo_x86 *c)
{
intel_init_thermal(c);
}