2005-04-16 18:20:36 -04:00
|
|
|
/*
|
|
|
|
* Copyright (C) 1991, 1992 Linus Torvalds
|
|
|
|
* Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
|
|
|
|
*
|
|
|
|
* Pentium III FXSR, SSE support
|
|
|
|
* Gareth Hughes <gareth@valinux.com>, May 2000
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* 'Traps.c' handles hardware traps and faults after we have saved some
|
|
|
|
* state in 'entry.S'.
|
|
|
|
*/
|
2008-07-01 19:30:30 -04:00
|
|
|
#include <linux/moduleparam.h>
|
|
|
|
#include <linux/interrupt.h>
|
|
|
|
#include <linux/kallsyms.h>
|
|
|
|
#include <linux/spinlock.h>
|
|
|
|
#include <linux/kprobes.h>
|
|
|
|
#include <linux/uaccess.h>
|
|
|
|
#include <linux/utsname.h>
|
|
|
|
#include <linux/kdebug.h>
|
2005-04-16 18:20:36 -04:00
|
|
|
#include <linux/kernel.h>
|
2008-07-01 19:30:30 -04:00
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/ptrace.h>
|
2005-04-16 18:20:36 -04:00
|
|
|
#include <linux/string.h>
|
2008-07-01 19:30:30 -04:00
|
|
|
#include <linux/unwind.h>
|
|
|
|
#include <linux/delay.h>
|
2005-04-16 18:20:36 -04:00
|
|
|
#include <linux/errno.h>
|
2008-07-01 19:30:30 -04:00
|
|
|
#include <linux/kexec.h>
|
|
|
|
#include <linux/sched.h>
|
2005-04-16 18:20:36 -04:00
|
|
|
#include <linux/timer.h>
|
|
|
|
#include <linux/init.h>
|
2006-12-08 05:36:22 -05:00
|
|
|
#include <linux/bug.h>
|
2008-07-01 19:30:30 -04:00
|
|
|
#include <linux/nmi.h>
|
|
|
|
#include <linux/mm.h>
|
2008-07-29 01:48:55 -04:00
|
|
|
#include <linux/smp.h>
|
|
|
|
#include <linux/io.h>
|
2008-03-19 13:25:35 -04:00
|
|
|
|
2007-07-19 04:49:46 -04:00
|
|
|
#if defined(CONFIG_EDAC)
|
|
|
|
#include <linux/edac.h>
|
|
|
|
#endif
|
|
|
|
|
2008-07-01 19:30:30 -04:00
|
|
|
#include <asm/stacktrace.h>
|
|
|
|
#include <asm/processor.h>
|
2005-04-16 18:20:36 -04:00
|
|
|
#include <asm/debugreg.h>
|
2008-07-01 19:30:30 -04:00
|
|
|
#include <asm/atomic.h>
|
|
|
|
#include <asm/system.h>
|
|
|
|
#include <asm/unwind.h>
|
2005-04-16 18:20:36 -04:00
|
|
|
#include <asm/desc.h>
|
|
|
|
#include <asm/i387.h>
|
|
|
|
#include <asm/pgalloc.h>
|
|
|
|
#include <asm/proto.h>
|
2008-07-01 19:30:30 -04:00
|
|
|
#include <asm/pda.h>
|
2008-07-15 11:39:13 -04:00
|
|
|
#include <asm/traps.h>
|
2008-07-01 19:30:30 -04:00
|
|
|
|
|
|
|
#include <mach_traps.h>
|
2005-04-16 18:20:36 -04:00
|
|
|
|
2008-07-01 19:30:30 -04:00
|
|
|
static int ignore_nmis;
|
2008-01-30 07:33:08 -05:00
|
|
|
|
2005-04-16 18:20:36 -04:00
|
|
|
static inline void conditional_sti(struct pt_regs *regs)
|
|
|
|
{
|
2008-01-30 07:30:56 -05:00
|
|
|
if (regs->flags & X86_EFLAGS_IF)
|
2005-04-16 18:20:36 -04:00
|
|
|
local_irq_enable();
|
|
|
|
}
|
|
|
|
|
[PATCH] arch/x86_64/kernel/traps.c PTRACE_SINGLESTEP oops
We found a problem with x86_64 kernels with preemption enabled, where
having multiple tasks doing ptrace singlesteps around the same time will
cause the system to 'oops'. The problem seems that a task can get
preempted out of the do_debug() processing while it is running on the
DEBUG_STACK stack. If another task on that same cpu then enters do_debug()
and uses the same per-cpu DEBUG_STACK stack, the previous preempted tasks's
stack contents can be corrupted, and the system will oops when the
preempted task is context switched back in again.
The typical oops looks like the following:
Unable to handle kernel paging request at ffffffffffffffae RIP: <ffffffff805452a1>{thread_return+34}
PGD 103027 PUD 102429067 PMD 0
Oops: 0002 [1] PREEMPT SMP
CPU 0
Modules linked in:
Pid: 3786, comm: ssdd Not tainted 2.6.15.2 #1
RIP: 0010:[<ffffffff805452a1>] <ffffffff805452a1>{thread_return+34}
RSP: 0018:ffffffff80824058 EFLAGS: 000136c2
RAX: ffff81017e12cea0 RBX: 0000000000000000 RCX: 00000000c0000100
RDX: 0000000000000000 RSI: ffff8100f7856e20 RDI: ffff81017e12cea0
RBP: 0000000000000046 R08: ffff8100f68a6000 R09: 0000000000000000
R10: 0000000000000000 R11: ffff81017e12cea0 R12: ffff81000c2d53e8
R13: ffff81017f5b3be8 R14: ffff81000c0036e0 R15: 000001056cbfc899
FS: 00002aaaaaad9b00(0000) GS:ffffffff80883800(0000) knlGS:0000000000000000
CS: 0010 DS: 0000 ES: 0000 CR0: 000000008005003b
CR2: ffffffffffffffae CR3: 00000000f6fcf000 CR4: 00000000000006e0
Process ssdd (pid: 3786, threadinfo ffff8100f68a6000, task ffff8100f7856e20)
Stack: ffffffff808240d8 ffffffff8012a84a ffff8100055f6c00 0000000000000020
0000000000000001 ffff81000c0036e0 ffffffff808240b8 0000000000000000
0000000000000000 0000000000000000
Call Trace: <#DB>
<ffffffff8012a84a>{try_to_wake_up+985}
<ffffffff8012c0d3>{kick_process+87}
<ffffffff8013b262>{signal_wake_up+48}
<ffffffff8013b5ce>{specific_send_sig_info+179}
<ffffffff80546abc>{_spin_unlock_irqrestore+27}
<ffffffff8013b67c>{force_sig_info+159}
<ffffffff801103a0>{do_debug+289} <ffffffff80110278>{sync_regs+103}
<ffffffff8010ed9a>{paranoid_userspace+35}
Unable to handle kernel paging request at 00007fffffb7d000 RIP: <ffffffff8010f2e4>{show_trace+465}
PGD f6f25067 PUD f6fcc067 PMD f6957067 PTE 0
Oops: 0000 [2] PREEMPT SMP
This patch disables preemptions for the task upon entry to do_debug(), before
interrupts are reenabled, and then disables preemption before exiting
do_debug(), after disabling interrupts. I've noticed that the task can be
preempted either at the end of an interrupt, or on the call to
force_sig_info() on the spin_unlock_irqrestore() processing. It might be
better to attempt to code a fix in entry.S around the code that calls
do_debug().
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-02-12 17:34:58 -05:00
|
|
|
static inline void preempt_conditional_sti(struct pt_regs *regs)
|
|
|
|
{
|
2008-02-13 14:21:06 -05:00
|
|
|
inc_preempt_count();
|
2008-01-30 07:30:56 -05:00
|
|
|
if (regs->flags & X86_EFLAGS_IF)
|
[PATCH] arch/x86_64/kernel/traps.c PTRACE_SINGLESTEP oops
We found a problem with x86_64 kernels with preemption enabled, where
having multiple tasks doing ptrace singlesteps around the same time will
cause the system to 'oops'. The problem seems that a task can get
preempted out of the do_debug() processing while it is running on the
DEBUG_STACK stack. If another task on that same cpu then enters do_debug()
and uses the same per-cpu DEBUG_STACK stack, the previous preempted tasks's
stack contents can be corrupted, and the system will oops when the
preempted task is context switched back in again.
The typical oops looks like the following:
Unable to handle kernel paging request at ffffffffffffffae RIP: <ffffffff805452a1>{thread_return+34}
PGD 103027 PUD 102429067 PMD 0
Oops: 0002 [1] PREEMPT SMP
CPU 0
Modules linked in:
Pid: 3786, comm: ssdd Not tainted 2.6.15.2 #1
RIP: 0010:[<ffffffff805452a1>] <ffffffff805452a1>{thread_return+34}
RSP: 0018:ffffffff80824058 EFLAGS: 000136c2
RAX: ffff81017e12cea0 RBX: 0000000000000000 RCX: 00000000c0000100
RDX: 0000000000000000 RSI: ffff8100f7856e20 RDI: ffff81017e12cea0
RBP: 0000000000000046 R08: ffff8100f68a6000 R09: 0000000000000000
R10: 0000000000000000 R11: ffff81017e12cea0 R12: ffff81000c2d53e8
R13: ffff81017f5b3be8 R14: ffff81000c0036e0 R15: 000001056cbfc899
FS: 00002aaaaaad9b00(0000) GS:ffffffff80883800(0000) knlGS:0000000000000000
CS: 0010 DS: 0000 ES: 0000 CR0: 000000008005003b
CR2: ffffffffffffffae CR3: 00000000f6fcf000 CR4: 00000000000006e0
Process ssdd (pid: 3786, threadinfo ffff8100f68a6000, task ffff8100f7856e20)
Stack: ffffffff808240d8 ffffffff8012a84a ffff8100055f6c00 0000000000000020
0000000000000001 ffff81000c0036e0 ffffffff808240b8 0000000000000000
0000000000000000 0000000000000000
Call Trace: <#DB>
<ffffffff8012a84a>{try_to_wake_up+985}
<ffffffff8012c0d3>{kick_process+87}
<ffffffff8013b262>{signal_wake_up+48}
<ffffffff8013b5ce>{specific_send_sig_info+179}
<ffffffff80546abc>{_spin_unlock_irqrestore+27}
<ffffffff8013b67c>{force_sig_info+159}
<ffffffff801103a0>{do_debug+289} <ffffffff80110278>{sync_regs+103}
<ffffffff8010ed9a>{paranoid_userspace+35}
Unable to handle kernel paging request at 00007fffffb7d000 RIP: <ffffffff8010f2e4>{show_trace+465}
PGD f6f25067 PUD f6fcc067 PMD f6957067 PTE 0
Oops: 0000 [2] PREEMPT SMP
This patch disables preemptions for the task upon entry to do_debug(), before
interrupts are reenabled, and then disables preemption before exiting
do_debug(), after disabling interrupts. I've noticed that the task can be
preempted either at the end of an interrupt, or on the call to
force_sig_info() on the spin_unlock_irqrestore() processing. It might be
better to attempt to code a fix in entry.S around the code that calls
do_debug().
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-02-12 17:34:58 -05:00
|
|
|
local_irq_enable();
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void preempt_conditional_cli(struct pt_regs *regs)
|
|
|
|
{
|
2008-01-30 07:30:56 -05:00
|
|
|
if (regs->flags & X86_EFLAGS_IF)
|
[PATCH] arch/x86_64/kernel/traps.c PTRACE_SINGLESTEP oops
We found a problem with x86_64 kernels with preemption enabled, where
having multiple tasks doing ptrace singlesteps around the same time will
cause the system to 'oops'. The problem seems that a task can get
preempted out of the do_debug() processing while it is running on the
DEBUG_STACK stack. If another task on that same cpu then enters do_debug()
and uses the same per-cpu DEBUG_STACK stack, the previous preempted tasks's
stack contents can be corrupted, and the system will oops when the
preempted task is context switched back in again.
The typical oops looks like the following:
Unable to handle kernel paging request at ffffffffffffffae RIP: <ffffffff805452a1>{thread_return+34}
PGD 103027 PUD 102429067 PMD 0
Oops: 0002 [1] PREEMPT SMP
CPU 0
Modules linked in:
Pid: 3786, comm: ssdd Not tainted 2.6.15.2 #1
RIP: 0010:[<ffffffff805452a1>] <ffffffff805452a1>{thread_return+34}
RSP: 0018:ffffffff80824058 EFLAGS: 000136c2
RAX: ffff81017e12cea0 RBX: 0000000000000000 RCX: 00000000c0000100
RDX: 0000000000000000 RSI: ffff8100f7856e20 RDI: ffff81017e12cea0
RBP: 0000000000000046 R08: ffff8100f68a6000 R09: 0000000000000000
R10: 0000000000000000 R11: ffff81017e12cea0 R12: ffff81000c2d53e8
R13: ffff81017f5b3be8 R14: ffff81000c0036e0 R15: 000001056cbfc899
FS: 00002aaaaaad9b00(0000) GS:ffffffff80883800(0000) knlGS:0000000000000000
CS: 0010 DS: 0000 ES: 0000 CR0: 000000008005003b
CR2: ffffffffffffffae CR3: 00000000f6fcf000 CR4: 00000000000006e0
Process ssdd (pid: 3786, threadinfo ffff8100f68a6000, task ffff8100f7856e20)
Stack: ffffffff808240d8 ffffffff8012a84a ffff8100055f6c00 0000000000000020
0000000000000001 ffff81000c0036e0 ffffffff808240b8 0000000000000000
0000000000000000 0000000000000000
Call Trace: <#DB>
<ffffffff8012a84a>{try_to_wake_up+985}
<ffffffff8012c0d3>{kick_process+87}
<ffffffff8013b262>{signal_wake_up+48}
<ffffffff8013b5ce>{specific_send_sig_info+179}
<ffffffff80546abc>{_spin_unlock_irqrestore+27}
<ffffffff8013b67c>{force_sig_info+159}
<ffffffff801103a0>{do_debug+289} <ffffffff80110278>{sync_regs+103}
<ffffffff8010ed9a>{paranoid_userspace+35}
Unable to handle kernel paging request at 00007fffffb7d000 RIP: <ffffffff8010f2e4>{show_trace+465}
PGD f6f25067 PUD f6fcc067 PMD f6957067 PTE 0
Oops: 0000 [2] PREEMPT SMP
This patch disables preemptions for the task upon entry to do_debug(), before
interrupts are reenabled, and then disables preemption before exiting
do_debug(), after disabling interrupts. I've noticed that the task can be
preempted either at the end of an interrupt, or on the call to
force_sig_info() on the spin_unlock_irqrestore() processing. It might be
better to attempt to code a fix in entry.S around the code that calls
do_debug().
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-02-12 17:34:58 -05:00
|
|
|
local_irq_disable();
|
2006-05-15 12:19:47 -04:00
|
|
|
/* Make sure to not schedule here because we could be running
|
|
|
|
on an exception stack. */
|
2008-02-13 14:21:06 -05:00
|
|
|
dec_preempt_count();
|
[PATCH] arch/x86_64/kernel/traps.c PTRACE_SINGLESTEP oops
We found a problem with x86_64 kernels with preemption enabled, where
having multiple tasks doing ptrace singlesteps around the same time will
cause the system to 'oops'. The problem seems that a task can get
preempted out of the do_debug() processing while it is running on the
DEBUG_STACK stack. If another task on that same cpu then enters do_debug()
and uses the same per-cpu DEBUG_STACK stack, the previous preempted tasks's
stack contents can be corrupted, and the system will oops when the
preempted task is context switched back in again.
The typical oops looks like the following:
Unable to handle kernel paging request at ffffffffffffffae RIP: <ffffffff805452a1>{thread_return+34}
PGD 103027 PUD 102429067 PMD 0
Oops: 0002 [1] PREEMPT SMP
CPU 0
Modules linked in:
Pid: 3786, comm: ssdd Not tainted 2.6.15.2 #1
RIP: 0010:[<ffffffff805452a1>] <ffffffff805452a1>{thread_return+34}
RSP: 0018:ffffffff80824058 EFLAGS: 000136c2
RAX: ffff81017e12cea0 RBX: 0000000000000000 RCX: 00000000c0000100
RDX: 0000000000000000 RSI: ffff8100f7856e20 RDI: ffff81017e12cea0
RBP: 0000000000000046 R08: ffff8100f68a6000 R09: 0000000000000000
R10: 0000000000000000 R11: ffff81017e12cea0 R12: ffff81000c2d53e8
R13: ffff81017f5b3be8 R14: ffff81000c0036e0 R15: 000001056cbfc899
FS: 00002aaaaaad9b00(0000) GS:ffffffff80883800(0000) knlGS:0000000000000000
CS: 0010 DS: 0000 ES: 0000 CR0: 000000008005003b
CR2: ffffffffffffffae CR3: 00000000f6fcf000 CR4: 00000000000006e0
Process ssdd (pid: 3786, threadinfo ffff8100f68a6000, task ffff8100f7856e20)
Stack: ffffffff808240d8 ffffffff8012a84a ffff8100055f6c00 0000000000000020
0000000000000001 ffff81000c0036e0 ffffffff808240b8 0000000000000000
0000000000000000 0000000000000000
Call Trace: <#DB>
<ffffffff8012a84a>{try_to_wake_up+985}
<ffffffff8012c0d3>{kick_process+87}
<ffffffff8013b262>{signal_wake_up+48}
<ffffffff8013b5ce>{specific_send_sig_info+179}
<ffffffff80546abc>{_spin_unlock_irqrestore+27}
<ffffffff8013b67c>{force_sig_info+159}
<ffffffff801103a0>{do_debug+289} <ffffffff80110278>{sync_regs+103}
<ffffffff8010ed9a>{paranoid_userspace+35}
Unable to handle kernel paging request at 00007fffffb7d000 RIP: <ffffffff8010f2e4>{show_trace+465}
PGD f6f25067 PUD f6fcc067 PMD f6957067 PTE 0
Oops: 0000 [2] PREEMPT SMP
This patch disables preemptions for the task upon entry to do_debug(), before
interrupts are reenabled, and then disables preemption before exiting
do_debug(), after disabling interrupts. I've noticed that the task can be
preempted either at the end of an interrupt, or on the call to
force_sig_info() on the spin_unlock_irqrestore() processing. It might be
better to attempt to code a fix in entry.S around the code that calls
do_debug().
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-02-12 17:34:58 -05:00
|
|
|
}
|
|
|
|
|
2008-07-01 19:29:44 -04:00
|
|
|
static void __kprobes
|
|
|
|
do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
|
|
|
|
long error_code, siginfo_t *info)
|
2005-04-16 18:20:36 -04:00
|
|
|
{
|
2006-01-11 16:42:14 -05:00
|
|
|
struct task_struct *tsk = current;
|
|
|
|
|
2008-07-01 19:31:34 -04:00
|
|
|
if (!user_mode(regs))
|
|
|
|
goto kernel_trap;
|
2005-04-16 18:20:36 -04:00
|
|
|
|
2008-07-01 19:31:34 -04:00
|
|
|
/*
|
|
|
|
* We want error_code and trap_no set for userspace faults and
|
|
|
|
* kernelspace faults which result in die(), but not
|
|
|
|
* kernelspace faults which are fixed up. die() gives the
|
|
|
|
* process no chance to handle the signal and notice the
|
|
|
|
* kernel fault information, so that won't result in polluting
|
|
|
|
* the information about previously queued, but not yet
|
|
|
|
* delivered, faults. See also do_general_protection below.
|
|
|
|
*/
|
|
|
|
tsk->thread.error_code = error_code;
|
|
|
|
tsk->thread.trap_no = trapnr;
|
|
|
|
|
|
|
|
if (show_unhandled_signals && unhandled_signal(tsk, signr) &&
|
|
|
|
printk_ratelimit()) {
|
|
|
|
printk(KERN_INFO
|
|
|
|
"%s[%d] trap %s ip:%lx sp:%lx error:%lx",
|
|
|
|
tsk->comm, tsk->pid, str,
|
|
|
|
regs->ip, regs->sp, error_code);
|
|
|
|
print_vma_addr(" in ", regs->ip);
|
|
|
|
printk("\n");
|
2005-04-16 18:20:36 -04:00
|
|
|
}
|
|
|
|
|
2008-07-01 19:31:34 -04:00
|
|
|
if (info)
|
|
|
|
force_sig_info(signr, info, tsk);
|
|
|
|
else
|
|
|
|
force_sig(signr, tsk);
|
|
|
|
return;
|
2005-04-16 18:20:36 -04:00
|
|
|
|
2008-07-01 19:31:34 -04:00
|
|
|
kernel_trap:
|
2008-01-30 07:32:59 -05:00
|
|
|
if (!fixup_exception(regs)) {
|
|
|
|
tsk->thread.error_code = error_code;
|
|
|
|
tsk->thread.trap_no = trapnr;
|
|
|
|
die(str, regs, error_code);
|
2005-04-16 18:20:36 -04:00
|
|
|
}
|
2008-01-30 07:32:59 -05:00
|
|
|
return;
|
2005-04-16 18:20:36 -04:00
|
|
|
}
|
|
|
|
|
2008-09-26 08:03:08 -04:00
|
|
|
#define DO_ERROR(trapnr, signr, str, name) \
|
2008-09-30 12:41:36 -04:00
|
|
|
dotraplinkage void do_##name(struct pt_regs *regs, long error_code) \
|
2008-07-01 19:29:44 -04:00
|
|
|
{ \
|
|
|
|
if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
|
|
|
|
== NOTIFY_STOP) \
|
|
|
|
return; \
|
2006-05-15 12:19:47 -04:00
|
|
|
conditional_sti(regs); \
|
2008-07-01 19:29:44 -04:00
|
|
|
do_trap(trapnr, signr, str, regs, error_code, NULL); \
|
2005-04-16 18:20:36 -04:00
|
|
|
}
|
|
|
|
|
2008-07-01 19:29:44 -04:00
|
|
|
#define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \
|
2008-09-30 12:41:36 -04:00
|
|
|
dotraplinkage void do_##name(struct pt_regs *regs, long error_code) \
|
2008-07-01 19:29:44 -04:00
|
|
|
{ \
|
|
|
|
siginfo_t info; \
|
|
|
|
info.si_signo = signr; \
|
|
|
|
info.si_errno = 0; \
|
|
|
|
info.si_code = sicode; \
|
|
|
|
info.si_addr = (void __user *)siaddr; \
|
|
|
|
if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
|
|
|
|
== NOTIFY_STOP) \
|
|
|
|
return; \
|
2006-05-15 12:19:47 -04:00
|
|
|
conditional_sti(regs); \
|
2008-07-01 19:29:44 -04:00
|
|
|
do_trap(trapnr, signr, str, regs, error_code, &info); \
|
2005-04-16 18:20:36 -04:00
|
|
|
}
|
|
|
|
|
2008-07-01 19:29:44 -04:00
|
|
|
DO_ERROR_INFO(0, SIGFPE, "divide error", divide_error, FPE_INTDIV, regs->ip)
|
|
|
|
DO_ERROR(4, SIGSEGV, "overflow", overflow)
|
|
|
|
DO_ERROR(5, SIGSEGV, "bounds", bounds)
|
|
|
|
DO_ERROR_INFO(6, SIGILL, "invalid opcode", invalid_op, ILL_ILLOPN, regs->ip)
|
|
|
|
DO_ERROR(9, SIGFPE, "coprocessor segment overrun", coprocessor_segment_overrun)
|
2005-04-16 18:20:36 -04:00
|
|
|
DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS)
|
2008-07-01 19:29:44 -04:00
|
|
|
DO_ERROR(11, SIGBUS, "segment not present", segment_not_present)
|
2005-04-16 18:20:36 -04:00
|
|
|
DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0)
|
2006-05-15 12:19:47 -04:00
|
|
|
|
|
|
|
/* Runs on IST stack */
|
2008-09-30 12:41:36 -04:00
|
|
|
dotraplinkage void do_stack_segment(struct pt_regs *regs, long error_code)
|
2006-05-15 12:19:47 -04:00
|
|
|
{
|
|
|
|
if (notify_die(DIE_TRAP, "stack segment", regs, error_code,
|
|
|
|
12, SIGBUS) == NOTIFY_STOP)
|
|
|
|
return;
|
|
|
|
preempt_conditional_sti(regs);
|
|
|
|
do_trap(12, SIGBUS, "stack segment", regs, error_code, NULL);
|
|
|
|
preempt_conditional_cli(regs);
|
|
|
|
}
|
2006-01-11 16:42:17 -05:00
|
|
|
|
2008-09-30 12:41:36 -04:00
|
|
|
dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code)
|
2006-01-11 16:42:17 -05:00
|
|
|
{
|
|
|
|
static const char str[] = "double fault";
|
|
|
|
struct task_struct *tsk = current;
|
|
|
|
|
|
|
|
/* Return not checked because double check cannot be ignored */
|
|
|
|
notify_die(DIE_TRAP, str, regs, error_code, 8, SIGSEGV);
|
|
|
|
|
|
|
|
tsk->thread.error_code = error_code;
|
|
|
|
tsk->thread.trap_no = 8;
|
|
|
|
|
|
|
|
/* This is always a kernel trap and never fixable (and thus must
|
|
|
|
never return). */
|
|
|
|
for (;;)
|
|
|
|
die(str, regs, error_code);
|
|
|
|
}
|
2005-04-16 18:20:36 -04:00
|
|
|
|
2008-09-30 12:41:36 -04:00
|
|
|
dotraplinkage void __kprobes
|
2008-07-01 19:32:04 -04:00
|
|
|
do_general_protection(struct pt_regs *regs, long error_code)
|
2005-04-16 18:20:36 -04:00
|
|
|
{
|
2008-07-01 19:32:04 -04:00
|
|
|
struct task_struct *tsk;
|
2006-01-11 16:42:14 -05:00
|
|
|
|
2005-04-16 18:20:36 -04:00
|
|
|
conditional_sti(regs);
|
|
|
|
|
2008-07-01 19:32:04 -04:00
|
|
|
tsk = current;
|
|
|
|
if (!user_mode(regs))
|
|
|
|
goto gp_in_kernel;
|
2005-04-16 18:20:36 -04:00
|
|
|
|
2008-07-01 19:32:04 -04:00
|
|
|
tsk->thread.error_code = error_code;
|
|
|
|
tsk->thread.trap_no = 13;
|
|
|
|
|
|
|
|
if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) &&
|
|
|
|
printk_ratelimit()) {
|
|
|
|
printk(KERN_INFO
|
|
|
|
"%s[%d] general protection ip:%lx sp:%lx error:%lx",
|
|
|
|
tsk->comm, tsk->pid,
|
|
|
|
regs->ip, regs->sp, error_code);
|
|
|
|
print_vma_addr(" in ", regs->ip);
|
|
|
|
printk("\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
force_sig(SIGSEGV, tsk);
|
|
|
|
return;
|
2005-04-16 18:20:36 -04:00
|
|
|
|
2008-07-01 19:32:04 -04:00
|
|
|
gp_in_kernel:
|
2008-01-30 07:32:59 -05:00
|
|
|
if (fixup_exception(regs))
|
|
|
|
return;
|
2007-05-02 13:27:05 -04:00
|
|
|
|
2008-01-30 07:32:59 -05:00
|
|
|
tsk->thread.error_code = error_code;
|
|
|
|
tsk->thread.trap_no = 13;
|
|
|
|
if (notify_die(DIE_GPF, "general protection fault", regs,
|
|
|
|
error_code, 13, SIGSEGV) == NOTIFY_STOP)
|
|
|
|
return;
|
|
|
|
die("general protection fault", regs, error_code);
|
2005-04-16 18:20:36 -04:00
|
|
|
}
|
|
|
|
|
2008-04-19 13:19:55 -04:00
|
|
|
static notrace __kprobes void
|
2008-07-01 19:29:44 -04:00
|
|
|
mem_parity_error(unsigned char reason, struct pt_regs *regs)
|
2005-04-16 18:20:36 -04:00
|
|
|
{
|
2006-09-26 04:52:27 -04:00
|
|
|
printk(KERN_EMERG "Uhhuh. NMI received for unknown reason %02x.\n",
|
|
|
|
reason);
|
2006-12-06 20:14:03 -05:00
|
|
|
printk(KERN_EMERG "You have some hardware problem, likely on the PCI bus.\n");
|
2006-09-26 04:52:27 -04:00
|
|
|
|
2007-07-19 04:49:46 -04:00
|
|
|
#if defined(CONFIG_EDAC)
|
2008-07-01 19:29:44 -04:00
|
|
|
if (edac_handler_set()) {
|
2007-07-19 04:49:46 -04:00
|
|
|
edac_atomic_assert_error();
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2006-09-26 04:52:27 -04:00
|
|
|
if (panic_on_unrecovered_nmi)
|
2006-09-26 04:52:27 -04:00
|
|
|
panic("NMI: Not continuing");
|
|
|
|
|
|
|
|
printk(KERN_EMERG "Dazed and confused, but trying to continue\n");
|
2005-04-16 18:20:36 -04:00
|
|
|
|
|
|
|
/* Clear and disable the memory parity error line. */
|
|
|
|
reason = (reason & 0xf) | 4;
|
|
|
|
outb(reason, 0x61);
|
|
|
|
}
|
|
|
|
|
2008-04-19 13:19:55 -04:00
|
|
|
static notrace __kprobes void
|
2008-07-01 19:29:44 -04:00
|
|
|
io_check_error(unsigned char reason, struct pt_regs *regs)
|
2005-04-16 18:20:36 -04:00
|
|
|
{
|
2008-10-03 16:00:35 -04:00
|
|
|
unsigned long i;
|
|
|
|
|
|
|
|
printk(KERN_EMERG "NMI: IOCK error (debug interrupt?)\n");
|
2005-04-16 18:20:36 -04:00
|
|
|
show_registers(regs);
|
|
|
|
|
|
|
|
/* Re-enable the IOCK line, wait for a few seconds */
|
|
|
|
reason = (reason & 0xf) | 8;
|
|
|
|
outb(reason, 0x61);
|
2008-10-03 16:00:35 -04:00
|
|
|
|
|
|
|
i = 2000;
|
|
|
|
while (--i)
|
|
|
|
udelay(1000);
|
|
|
|
|
2005-04-16 18:20:36 -04:00
|
|
|
reason &= ~8;
|
|
|
|
outb(reason, 0x61);
|
|
|
|
}
|
|
|
|
|
2008-04-19 13:19:55 -04:00
|
|
|
static notrace __kprobes void
|
2008-07-29 01:48:55 -04:00
|
|
|
unknown_nmi_error(unsigned char reason, struct pt_regs *regs)
|
2006-09-26 04:52:27 -04:00
|
|
|
{
|
2008-07-29 01:48:55 -04:00
|
|
|
if (notify_die(DIE_NMIUNKNOWN, "nmi", regs, reason, 2, SIGINT) ==
|
|
|
|
NOTIFY_STOP)
|
2008-02-15 15:55:53 -05:00
|
|
|
return;
|
2006-09-26 04:52:27 -04:00
|
|
|
printk(KERN_EMERG "Uhhuh. NMI received for unknown reason %02x.\n",
|
|
|
|
reason);
|
|
|
|
printk(KERN_EMERG "Do you have a strange power saving mode enabled?\n");
|
2006-09-26 04:52:27 -04:00
|
|
|
|
|
|
|
if (panic_on_unrecovered_nmi)
|
2006-09-26 04:52:27 -04:00
|
|
|
panic("NMI: Not continuing");
|
2006-09-26 04:52:27 -04:00
|
|
|
|
2006-09-26 04:52:27 -04:00
|
|
|
printk(KERN_EMERG "Dazed and confused, but trying to continue\n");
|
2005-04-16 18:20:36 -04:00
|
|
|
}
|
|
|
|
|
2005-04-16 18:25:03 -04:00
|
|
|
/* Runs on IST stack. This code must keep interrupts off all the time.
|
|
|
|
Nested NMIs are prevented by the CPU. */
|
2008-07-02 12:39:01 -04:00
|
|
|
asmlinkage notrace __kprobes void default_do_nmi(struct pt_regs *regs)
|
2005-04-16 18:20:36 -04:00
|
|
|
{
|
|
|
|
unsigned char reason = 0;
|
2005-06-25 17:55:00 -04:00
|
|
|
int cpu;
|
|
|
|
|
|
|
|
cpu = smp_processor_id();
|
2005-04-16 18:20:36 -04:00
|
|
|
|
2008-07-01 19:29:44 -04:00
|
|
|
/* Only the BSP gets external NMIs from the system. */
|
2005-06-25 17:55:00 -04:00
|
|
|
if (!cpu)
|
2005-04-16 18:20:36 -04:00
|
|
|
reason = get_nmi_reason();
|
|
|
|
|
|
|
|
if (!(reason & 0xc0)) {
|
2006-01-11 16:42:14 -05:00
|
|
|
if (notify_die(DIE_NMI_IPI, "nmi_ipi", regs, reason, 2, SIGINT)
|
2005-04-16 18:20:36 -04:00
|
|
|
== NOTIFY_STOP)
|
|
|
|
return;
|
|
|
|
/*
|
|
|
|
* Ok, so this is none of the documented NMI sources,
|
|
|
|
* so it must be the NMI watchdog.
|
|
|
|
*/
|
2008-07-01 19:29:44 -04:00
|
|
|
if (nmi_watchdog_tick(regs, reason))
|
2005-04-16 18:20:36 -04:00
|
|
|
return;
|
2008-07-01 19:29:44 -04:00
|
|
|
if (!do_nmi_callback(regs, cpu))
|
2006-09-26 04:52:26 -04:00
|
|
|
unknown_nmi_error(reason, regs);
|
|
|
|
|
2005-04-16 18:20:36 -04:00
|
|
|
return;
|
|
|
|
}
|
2006-01-11 16:42:14 -05:00
|
|
|
if (notify_die(DIE_NMI, "nmi", regs, reason, 2, SIGINT) == NOTIFY_STOP)
|
2008-07-01 19:29:44 -04:00
|
|
|
return;
|
2005-04-16 18:20:36 -04:00
|
|
|
|
|
|
|
/* AK: following checks seem to be broken on modern chipsets. FIXME */
|
|
|
|
if (reason & 0x80)
|
|
|
|
mem_parity_error(reason, regs);
|
|
|
|
if (reason & 0x40)
|
|
|
|
io_check_error(reason, regs);
|
|
|
|
}
|
|
|
|
|
2008-09-30 12:41:36 -04:00
|
|
|
dotraplinkage notrace __kprobes void
|
2008-05-24 11:36:33 -04:00
|
|
|
do_nmi(struct pt_regs *regs, long error_code)
|
|
|
|
{
|
|
|
|
nmi_enter();
|
2008-07-01 19:29:44 -04:00
|
|
|
|
2008-05-24 11:36:33 -04:00
|
|
|
add_pda(__nmi_count, 1);
|
2008-07-01 19:29:44 -04:00
|
|
|
|
2008-05-24 11:36:33 -04:00
|
|
|
if (!ignore_nmis)
|
|
|
|
default_do_nmi(regs);
|
2008-07-01 19:29:44 -04:00
|
|
|
|
2008-05-24 11:36:33 -04:00
|
|
|
nmi_exit();
|
|
|
|
}
|
|
|
|
|
|
|
|
void stop_nmi(void)
|
|
|
|
{
|
|
|
|
acpi_nmi_disable();
|
|
|
|
ignore_nmis++;
|
|
|
|
}
|
|
|
|
|
|
|
|
void restart_nmi(void)
|
|
|
|
{
|
|
|
|
ignore_nmis--;
|
|
|
|
acpi_nmi_enable();
|
|
|
|
}
|
|
|
|
|
2006-01-11 16:43:00 -05:00
|
|
|
/* runs on IST stack. */
|
2008-09-30 12:41:36 -04:00
|
|
|
dotraplinkage void __kprobes do_int3(struct pt_regs *regs, long error_code)
|
2005-04-16 18:20:36 -04:00
|
|
|
{
|
2008-07-01 19:29:44 -04:00
|
|
|
if (notify_die(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP)
|
|
|
|
== NOTIFY_STOP)
|
2005-04-16 18:20:36 -04:00
|
|
|
return;
|
2008-07-01 19:29:44 -04:00
|
|
|
|
2006-05-15 12:19:47 -04:00
|
|
|
preempt_conditional_sti(regs);
|
2005-04-16 18:20:36 -04:00
|
|
|
do_trap(3, SIGTRAP, "int3", regs, error_code, NULL);
|
2006-05-15 12:19:47 -04:00
|
|
|
preempt_conditional_cli(regs);
|
2005-04-16 18:20:36 -04:00
|
|
|
}
|
|
|
|
|
2005-04-16 18:25:03 -04:00
|
|
|
/* Help handler running on IST stack to switch back to user stack
|
|
|
|
for scheduling or signal handling. The actual stack switch is done in
|
|
|
|
entry.S */
|
2006-02-03 15:50:41 -05:00
|
|
|
asmlinkage __kprobes struct pt_regs *sync_regs(struct pt_regs *eregs)
|
2005-04-16 18:25:03 -04:00
|
|
|
{
|
|
|
|
struct pt_regs *regs = eregs;
|
|
|
|
/* Did already sync */
|
2008-01-30 07:30:56 -05:00
|
|
|
if (eregs == (struct pt_regs *)eregs->sp)
|
2005-04-16 18:25:03 -04:00
|
|
|
;
|
|
|
|
/* Exception from user space */
|
2005-06-23 03:08:46 -04:00
|
|
|
else if (user_mode(eregs))
|
2006-01-12 04:05:38 -05:00
|
|
|
regs = task_pt_regs(current);
|
2005-04-16 18:25:03 -04:00
|
|
|
/* Exception from kernel and interrupts are enabled. Move to
|
2008-07-29 01:48:55 -04:00
|
|
|
kernel process stack. */
|
2008-01-30 07:30:56 -05:00
|
|
|
else if (eregs->flags & X86_EFLAGS_IF)
|
|
|
|
regs = (struct pt_regs *)(eregs->sp -= sizeof(struct pt_regs));
|
2005-04-16 18:25:03 -04:00
|
|
|
if (eregs != regs)
|
|
|
|
*regs = *eregs;
|
|
|
|
return regs;
|
|
|
|
}
|
|
|
|
|
2005-04-16 18:20:36 -04:00
|
|
|
/* runs on IST stack. */
|
2008-09-30 12:41:36 -04:00
|
|
|
dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
|
2005-04-16 18:20:36 -04:00
|
|
|
{
|
|
|
|
struct task_struct *tsk = current;
|
2008-07-01 19:33:14 -04:00
|
|
|
unsigned long condition;
|
2008-09-30 12:41:37 -04:00
|
|
|
int si_code;
|
2005-04-16 18:20:36 -04:00
|
|
|
|
2005-06-23 03:08:46 -04:00
|
|
|
get_debugreg(condition, 6);
|
2005-04-16 18:20:36 -04:00
|
|
|
|
2008-01-30 07:30:54 -05:00
|
|
|
/*
|
|
|
|
* The processor cleared BTF, so don't mark that we need it set.
|
|
|
|
*/
|
|
|
|
clear_tsk_thread_flag(tsk, TIF_DEBUGCTLMSR);
|
|
|
|
tsk->thread.debugctlmsr = 0;
|
|
|
|
|
2005-04-16 18:20:36 -04:00
|
|
|
if (notify_die(DIE_DEBUG, "debug", regs, condition, error_code,
|
2005-04-16 18:25:13 -04:00
|
|
|
SIGTRAP) == NOTIFY_STOP)
|
2005-04-16 18:25:03 -04:00
|
|
|
return;
|
2005-04-16 18:25:13 -04:00
|
|
|
|
2008-09-30 12:41:37 -04:00
|
|
|
/* It's safe to allow irq's after DR6 has been saved */
|
[PATCH] arch/x86_64/kernel/traps.c PTRACE_SINGLESTEP oops
We found a problem with x86_64 kernels with preemption enabled, where
having multiple tasks doing ptrace singlesteps around the same time will
cause the system to 'oops'. The problem seems that a task can get
preempted out of the do_debug() processing while it is running on the
DEBUG_STACK stack. If another task on that same cpu then enters do_debug()
and uses the same per-cpu DEBUG_STACK stack, the previous preempted tasks's
stack contents can be corrupted, and the system will oops when the
preempted task is context switched back in again.
The typical oops looks like the following:
Unable to handle kernel paging request at ffffffffffffffae RIP: <ffffffff805452a1>{thread_return+34}
PGD 103027 PUD 102429067 PMD 0
Oops: 0002 [1] PREEMPT SMP
CPU 0
Modules linked in:
Pid: 3786, comm: ssdd Not tainted 2.6.15.2 #1
RIP: 0010:[<ffffffff805452a1>] <ffffffff805452a1>{thread_return+34}
RSP: 0018:ffffffff80824058 EFLAGS: 000136c2
RAX: ffff81017e12cea0 RBX: 0000000000000000 RCX: 00000000c0000100
RDX: 0000000000000000 RSI: ffff8100f7856e20 RDI: ffff81017e12cea0
RBP: 0000000000000046 R08: ffff8100f68a6000 R09: 0000000000000000
R10: 0000000000000000 R11: ffff81017e12cea0 R12: ffff81000c2d53e8
R13: ffff81017f5b3be8 R14: ffff81000c0036e0 R15: 000001056cbfc899
FS: 00002aaaaaad9b00(0000) GS:ffffffff80883800(0000) knlGS:0000000000000000
CS: 0010 DS: 0000 ES: 0000 CR0: 000000008005003b
CR2: ffffffffffffffae CR3: 00000000f6fcf000 CR4: 00000000000006e0
Process ssdd (pid: 3786, threadinfo ffff8100f68a6000, task ffff8100f7856e20)
Stack: ffffffff808240d8 ffffffff8012a84a ffff8100055f6c00 0000000000000020
0000000000000001 ffff81000c0036e0 ffffffff808240b8 0000000000000000
0000000000000000 0000000000000000
Call Trace: <#DB>
<ffffffff8012a84a>{try_to_wake_up+985}
<ffffffff8012c0d3>{kick_process+87}
<ffffffff8013b262>{signal_wake_up+48}
<ffffffff8013b5ce>{specific_send_sig_info+179}
<ffffffff80546abc>{_spin_unlock_irqrestore+27}
<ffffffff8013b67c>{force_sig_info+159}
<ffffffff801103a0>{do_debug+289} <ffffffff80110278>{sync_regs+103}
<ffffffff8010ed9a>{paranoid_userspace+35}
Unable to handle kernel paging request at 00007fffffb7d000 RIP: <ffffffff8010f2e4>{show_trace+465}
PGD f6f25067 PUD f6fcc067 PMD f6957067 PTE 0
Oops: 0000 [2] PREEMPT SMP
This patch disables preemptions for the task upon entry to do_debug(), before
interrupts are reenabled, and then disables preemption before exiting
do_debug(), after disabling interrupts. I've noticed that the task can be
preempted either at the end of an interrupt, or on the call to
force_sig_info() on the spin_unlock_irqrestore() processing. It might be
better to attempt to code a fix in entry.S around the code that calls
do_debug().
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-02-12 17:34:58 -05:00
|
|
|
preempt_conditional_sti(regs);
|
2005-04-16 18:20:36 -04:00
|
|
|
|
|
|
|
/* Mask out spurious debug traps due to lazy DR7 setting */
|
|
|
|
if (condition & (DR_TRAP0|DR_TRAP1|DR_TRAP2|DR_TRAP3)) {
|
2008-07-01 19:29:44 -04:00
|
|
|
if (!tsk->thread.debugreg7)
|
2005-04-16 18:20:36 -04:00
|
|
|
goto clear_dr7;
|
|
|
|
}
|
|
|
|
|
2008-09-30 12:41:37 -04:00
|
|
|
/* Save debug status register where ptrace can see it */
|
2005-04-16 18:20:36 -04:00
|
|
|
tsk->thread.debugreg6 = condition;
|
|
|
|
|
2008-01-30 07:30:50 -05:00
|
|
|
/*
|
|
|
|
* Single-stepping through TF: make sure we ignore any events in
|
|
|
|
* kernel space (but re-enable TF when returning to user mode).
|
|
|
|
*/
|
2005-04-16 18:25:13 -04:00
|
|
|
if (condition & DR_STEP) {
|
2008-07-01 19:29:44 -04:00
|
|
|
if (!user_mode(regs))
|
|
|
|
goto clear_TF_reenable;
|
2005-04-16 18:20:36 -04:00
|
|
|
}
|
|
|
|
|
2008-09-30 12:41:37 -04:00
|
|
|
si_code = get_si_code(condition);
|
2005-04-16 18:20:36 -04:00
|
|
|
/* Ok, finally something we can handle */
|
2008-09-30 12:41:37 -04:00
|
|
|
send_sigtrap(tsk, regs, error_code, si_code);
|
2005-04-16 18:20:36 -04:00
|
|
|
|
2008-09-30 12:41:37 -04:00
|
|
|
/*
|
|
|
|
* Disable additional traps. They'll be re-enabled when
|
|
|
|
* the signal is delivered.
|
|
|
|
*/
|
2005-04-16 18:20:36 -04:00
|
|
|
clear_dr7:
|
2008-07-01 19:29:44 -04:00
|
|
|
set_debugreg(0, 7);
|
[PATCH] arch/x86_64/kernel/traps.c PTRACE_SINGLESTEP oops
We found a problem with x86_64 kernels with preemption enabled, where
having multiple tasks doing ptrace singlesteps around the same time will
cause the system to 'oops'. The problem seems that a task can get
preempted out of the do_debug() processing while it is running on the
DEBUG_STACK stack. If another task on that same cpu then enters do_debug()
and uses the same per-cpu DEBUG_STACK stack, the previous preempted tasks's
stack contents can be corrupted, and the system will oops when the
preempted task is context switched back in again.
The typical oops looks like the following:
Unable to handle kernel paging request at ffffffffffffffae RIP: <ffffffff805452a1>{thread_return+34}
PGD 103027 PUD 102429067 PMD 0
Oops: 0002 [1] PREEMPT SMP
CPU 0
Modules linked in:
Pid: 3786, comm: ssdd Not tainted 2.6.15.2 #1
RIP: 0010:[<ffffffff805452a1>] <ffffffff805452a1>{thread_return+34}
RSP: 0018:ffffffff80824058 EFLAGS: 000136c2
RAX: ffff81017e12cea0 RBX: 0000000000000000 RCX: 00000000c0000100
RDX: 0000000000000000 RSI: ffff8100f7856e20 RDI: ffff81017e12cea0
RBP: 0000000000000046 R08: ffff8100f68a6000 R09: 0000000000000000
R10: 0000000000000000 R11: ffff81017e12cea0 R12: ffff81000c2d53e8
R13: ffff81017f5b3be8 R14: ffff81000c0036e0 R15: 000001056cbfc899
FS: 00002aaaaaad9b00(0000) GS:ffffffff80883800(0000) knlGS:0000000000000000
CS: 0010 DS: 0000 ES: 0000 CR0: 000000008005003b
CR2: ffffffffffffffae CR3: 00000000f6fcf000 CR4: 00000000000006e0
Process ssdd (pid: 3786, threadinfo ffff8100f68a6000, task ffff8100f7856e20)
Stack: ffffffff808240d8 ffffffff8012a84a ffff8100055f6c00 0000000000000020
0000000000000001 ffff81000c0036e0 ffffffff808240b8 0000000000000000
0000000000000000 0000000000000000
Call Trace: <#DB>
<ffffffff8012a84a>{try_to_wake_up+985}
<ffffffff8012c0d3>{kick_process+87}
<ffffffff8013b262>{signal_wake_up+48}
<ffffffff8013b5ce>{specific_send_sig_info+179}
<ffffffff80546abc>{_spin_unlock_irqrestore+27}
<ffffffff8013b67c>{force_sig_info+159}
<ffffffff801103a0>{do_debug+289} <ffffffff80110278>{sync_regs+103}
<ffffffff8010ed9a>{paranoid_userspace+35}
Unable to handle kernel paging request at 00007fffffb7d000 RIP: <ffffffff8010f2e4>{show_trace+465}
PGD f6f25067 PUD f6fcc067 PMD f6957067 PTE 0
Oops: 0000 [2] PREEMPT SMP
This patch disables preemptions for the task upon entry to do_debug(), before
interrupts are reenabled, and then disables preemption before exiting
do_debug(), after disabling interrupts. I've noticed that the task can be
preempted either at the end of an interrupt, or on the call to
force_sig_info() on the spin_unlock_irqrestore() processing. It might be
better to attempt to code a fix in entry.S around the code that calls
do_debug().
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-02-12 17:34:58 -05:00
|
|
|
preempt_conditional_cli(regs);
|
2005-04-16 18:25:03 -04:00
|
|
|
return;
|
2005-04-16 18:20:36 -04:00
|
|
|
|
|
|
|
clear_TF_reenable:
|
|
|
|
set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
|
2008-01-30 07:31:27 -05:00
|
|
|
regs->flags &= ~X86_EFLAGS_TF;
|
[PATCH] arch/x86_64/kernel/traps.c PTRACE_SINGLESTEP oops
We found a problem with x86_64 kernels with preemption enabled, where
having multiple tasks doing ptrace singlesteps around the same time will
cause the system to 'oops'. The problem seems that a task can get
preempted out of the do_debug() processing while it is running on the
DEBUG_STACK stack. If another task on that same cpu then enters do_debug()
and uses the same per-cpu DEBUG_STACK stack, the previous preempted tasks's
stack contents can be corrupted, and the system will oops when the
preempted task is context switched back in again.
The typical oops looks like the following:
Unable to handle kernel paging request at ffffffffffffffae RIP: <ffffffff805452a1>{thread_return+34}
PGD 103027 PUD 102429067 PMD 0
Oops: 0002 [1] PREEMPT SMP
CPU 0
Modules linked in:
Pid: 3786, comm: ssdd Not tainted 2.6.15.2 #1
RIP: 0010:[<ffffffff805452a1>] <ffffffff805452a1>{thread_return+34}
RSP: 0018:ffffffff80824058 EFLAGS: 000136c2
RAX: ffff81017e12cea0 RBX: 0000000000000000 RCX: 00000000c0000100
RDX: 0000000000000000 RSI: ffff8100f7856e20 RDI: ffff81017e12cea0
RBP: 0000000000000046 R08: ffff8100f68a6000 R09: 0000000000000000
R10: 0000000000000000 R11: ffff81017e12cea0 R12: ffff81000c2d53e8
R13: ffff81017f5b3be8 R14: ffff81000c0036e0 R15: 000001056cbfc899
FS: 00002aaaaaad9b00(0000) GS:ffffffff80883800(0000) knlGS:0000000000000000
CS: 0010 DS: 0000 ES: 0000 CR0: 000000008005003b
CR2: ffffffffffffffae CR3: 00000000f6fcf000 CR4: 00000000000006e0
Process ssdd (pid: 3786, threadinfo ffff8100f68a6000, task ffff8100f7856e20)
Stack: ffffffff808240d8 ffffffff8012a84a ffff8100055f6c00 0000000000000020
0000000000000001 ffff81000c0036e0 ffffffff808240b8 0000000000000000
0000000000000000 0000000000000000
Call Trace: <#DB>
<ffffffff8012a84a>{try_to_wake_up+985}
<ffffffff8012c0d3>{kick_process+87}
<ffffffff8013b262>{signal_wake_up+48}
<ffffffff8013b5ce>{specific_send_sig_info+179}
<ffffffff80546abc>{_spin_unlock_irqrestore+27}
<ffffffff8013b67c>{force_sig_info+159}
<ffffffff801103a0>{do_debug+289} <ffffffff80110278>{sync_regs+103}
<ffffffff8010ed9a>{paranoid_userspace+35}
Unable to handle kernel paging request at 00007fffffb7d000 RIP: <ffffffff8010f2e4>{show_trace+465}
PGD f6f25067 PUD f6fcc067 PMD f6957067 PTE 0
Oops: 0000 [2] PREEMPT SMP
This patch disables preemptions for the task upon entry to do_debug(), before
interrupts are reenabled, and then disables preemption before exiting
do_debug(), after disabling interrupts. I've noticed that the task can be
preempted either at the end of an interrupt, or on the call to
force_sig_info() on the spin_unlock_irqrestore() processing. It might be
better to attempt to code a fix in entry.S around the code that calls
do_debug().
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-02-12 17:34:58 -05:00
|
|
|
preempt_conditional_cli(regs);
|
2008-07-01 19:29:44 -04:00
|
|
|
return;
|
2005-04-16 18:20:36 -04:00
|
|
|
}
|
|
|
|
|
2006-01-11 16:42:14 -05:00
|
|
|
static int kernel_math_error(struct pt_regs *regs, const char *str, int trapnr)
|
2005-04-16 18:20:36 -04:00
|
|
|
{
|
2008-01-30 07:32:59 -05:00
|
|
|
if (fixup_exception(regs))
|
2005-04-16 18:20:36 -04:00
|
|
|
return 1;
|
2008-01-30 07:32:59 -05:00
|
|
|
|
2006-01-11 16:42:14 -05:00
|
|
|
notify_die(DIE_GPF, str, regs, 0, trapnr, SIGFPE);
|
2005-04-16 18:25:06 -04:00
|
|
|
/* Illegal floating point operation in the kernel */
|
2006-01-11 16:42:14 -05:00
|
|
|
current->thread.trap_no = trapnr;
|
2005-04-16 18:20:36 -04:00
|
|
|
die(str, regs, 0);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Note that we play around with the 'TS' bit in an attempt to get
|
|
|
|
* the correct behaviour even in the presence of the asynchronous
|
|
|
|
* IRQ13 behaviour
|
|
|
|
*/
|
2008-09-30 12:41:34 -04:00
|
|
|
void math_error(void __user *ip)
|
2005-04-16 18:20:36 -04:00
|
|
|
{
|
2008-07-01 19:29:44 -04:00
|
|
|
struct task_struct *task;
|
2005-04-16 18:20:36 -04:00
|
|
|
siginfo_t info;
|
|
|
|
unsigned short cwd, swd;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Save the info for the exception handler and clear the error.
|
|
|
|
*/
|
|
|
|
task = current;
|
|
|
|
save_init_fpu(task);
|
|
|
|
task->thread.trap_no = 16;
|
|
|
|
task->thread.error_code = 0;
|
|
|
|
info.si_signo = SIGFPE;
|
|
|
|
info.si_errno = 0;
|
|
|
|
info.si_code = __SI_FAULT;
|
2008-01-30 07:30:56 -05:00
|
|
|
info.si_addr = ip;
|
2005-04-16 18:20:36 -04:00
|
|
|
/*
|
|
|
|
* (~cwd & swd) will mask out exceptions that are not set to unmasked
|
|
|
|
* status. 0x3f is the exception bits in these regs, 0x200 is the
|
|
|
|
* C1 reg you need in case of a stack fault, 0x040 is the stack
|
|
|
|
* fault bit. We should only be taking one exception at a time,
|
|
|
|
* so if this combination doesn't produce any single exception,
|
|
|
|
* then we have a bad program that isn't synchronizing its FPU usage
|
|
|
|
* and it will suffer the consequences since we won't be able to
|
|
|
|
* fully reproduce the context of the exception
|
|
|
|
*/
|
|
|
|
cwd = get_fpu_cwd(task);
|
|
|
|
swd = get_fpu_swd(task);
|
[PATCH] x86-64: Fix incorrect FP signals
This is the same patch that went into i386 just before 2.6.13
came out. I still can't build 64-bit user apps, so I tested
with program (see below) in 32-bit mode on 64-bit kernel:
Before:
$ fpsig
handler: nr = 8, si = 0x0804bc90, vuc = 0x0804bd10
handler: altstack is at 0x0804b000, ebp = 0x0804bc7c
handler: si_signo = 8, si_errno = 0, si_code = 0 [unknown]
handler: fpu cwd = 0xb40, fpu swd = 0xbaa0
handler: i387 unmasked precision exception, rounded up
After:
$ fpsig
handler: nr = 8, si = 0x0804bc90, vuc = 0x0804bd10
handler: altstack is at 0x0804b000, ebp = 0x0804bc7c
handler: si_signo = 8, si_errno = 0, si_code = 6 [inexact result]
handler: fpu cwd = 0xb40, fpu swd = 0xbaa0
handler: i387 unmasked precision exception, rounded up
Signed-off-by: Chuck Ebbert <76306.1226@compuserve.com>
Signed-off-by: Andi Kleen <ak@suse.de>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-09-12 12:49:25 -04:00
|
|
|
switch (swd & ~cwd & 0x3f) {
|
2008-07-01 19:29:44 -04:00
|
|
|
case 0x000: /* No unmasked exception */
|
|
|
|
default: /* Multiple exceptions */
|
|
|
|
break;
|
|
|
|
case 0x001: /* Invalid Op */
|
|
|
|
/*
|
|
|
|
* swd & 0x240 == 0x040: Stack Underflow
|
|
|
|
* swd & 0x240 == 0x240: Stack Overflow
|
|
|
|
* User must clear the SF bit (0x40) if set
|
|
|
|
*/
|
|
|
|
info.si_code = FPE_FLTINV;
|
|
|
|
break;
|
|
|
|
case 0x002: /* Denormalize */
|
|
|
|
case 0x010: /* Underflow */
|
|
|
|
info.si_code = FPE_FLTUND;
|
|
|
|
break;
|
|
|
|
case 0x004: /* Zero Divide */
|
|
|
|
info.si_code = FPE_FLTDIV;
|
|
|
|
break;
|
|
|
|
case 0x008: /* Overflow */
|
|
|
|
info.si_code = FPE_FLTOVF;
|
|
|
|
break;
|
|
|
|
case 0x020: /* Precision */
|
|
|
|
info.si_code = FPE_FLTRES;
|
|
|
|
break;
|
2005-04-16 18:20:36 -04:00
|
|
|
}
|
|
|
|
force_sig_info(SIGFPE, &info, task);
|
|
|
|
}
|
|
|
|
|
2008-09-30 12:41:36 -04:00
|
|
|
dotraplinkage void do_coprocessor_error(struct pt_regs *regs, long error_code)
|
2008-09-30 12:41:34 -04:00
|
|
|
{
|
|
|
|
conditional_sti(regs);
|
|
|
|
if (!user_mode(regs) &&
|
|
|
|
kernel_math_error(regs, "kernel x87 math error", 16))
|
|
|
|
return;
|
|
|
|
math_error((void __user *)regs->ip);
|
|
|
|
}
|
|
|
|
|
2005-04-16 18:20:36 -04:00
|
|
|
asmlinkage void bad_intr(void)
|
|
|
|
{
|
2008-07-29 01:48:55 -04:00
|
|
|
printk("bad interrupt");
|
2005-04-16 18:20:36 -04:00
|
|
|
}
|
|
|
|
|
2008-09-30 12:41:34 -04:00
|
|
|
static void simd_math_error(void __user *ip)
|
2005-04-16 18:20:36 -04:00
|
|
|
{
|
2008-07-01 19:29:44 -04:00
|
|
|
struct task_struct *task;
|
2005-04-16 18:20:36 -04:00
|
|
|
siginfo_t info;
|
|
|
|
unsigned short mxcsr;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Save the info for the exception handler and clear the error.
|
|
|
|
*/
|
|
|
|
task = current;
|
|
|
|
save_init_fpu(task);
|
|
|
|
task->thread.trap_no = 19;
|
|
|
|
task->thread.error_code = 0;
|
|
|
|
info.si_signo = SIGFPE;
|
|
|
|
info.si_errno = 0;
|
|
|
|
info.si_code = __SI_FAULT;
|
2008-01-30 07:30:56 -05:00
|
|
|
info.si_addr = ip;
|
2005-04-16 18:20:36 -04:00
|
|
|
/*
|
|
|
|
* The SIMD FPU exceptions are handled a little differently, as there
|
|
|
|
* is only a single status/control register. Thus, to determine which
|
|
|
|
* unmasked exception was caught we must mask the exception mask bits
|
|
|
|
* at 0x1f80, and then use these to mask the exception bits at 0x3f.
|
|
|
|
*/
|
|
|
|
mxcsr = get_fpu_mxcsr(task);
|
|
|
|
switch (~((mxcsr & 0x1f80) >> 7) & (mxcsr & 0x3f)) {
|
2008-07-01 19:29:44 -04:00
|
|
|
case 0x000:
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
case 0x001: /* Invalid Op */
|
|
|
|
info.si_code = FPE_FLTINV;
|
|
|
|
break;
|
|
|
|
case 0x002: /* Denormalize */
|
|
|
|
case 0x010: /* Underflow */
|
|
|
|
info.si_code = FPE_FLTUND;
|
|
|
|
break;
|
|
|
|
case 0x004: /* Zero Divide */
|
|
|
|
info.si_code = FPE_FLTDIV;
|
|
|
|
break;
|
|
|
|
case 0x008: /* Overflow */
|
|
|
|
info.si_code = FPE_FLTOVF;
|
|
|
|
break;
|
|
|
|
case 0x020: /* Precision */
|
|
|
|
info.si_code = FPE_FLTRES;
|
|
|
|
break;
|
2005-04-16 18:20:36 -04:00
|
|
|
}
|
|
|
|
force_sig_info(SIGFPE, &info, task);
|
|
|
|
}
|
|
|
|
|
2008-09-30 12:41:36 -04:00
|
|
|
dotraplinkage void
|
|
|
|
do_simd_coprocessor_error(struct pt_regs *regs, long error_code)
|
2008-09-30 12:41:34 -04:00
|
|
|
{
|
|
|
|
conditional_sti(regs);
|
|
|
|
if (!user_mode(regs) &&
|
|
|
|
kernel_math_error(regs, "kernel simd math error", 19))
|
|
|
|
return;
|
|
|
|
simd_math_error((void __user *)regs->ip);
|
|
|
|
}
|
|
|
|
|
2008-09-30 12:41:36 -04:00
|
|
|
dotraplinkage void
|
|
|
|
do_spurious_interrupt_bug(struct pt_regs *regs, long error_code)
|
2005-04-16 18:20:36 -04:00
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
asmlinkage void __attribute__((weak)) smp_thermal_interrupt(void)
|
2005-11-05 11:25:53 -05:00
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
asmlinkage void __attribute__((weak)) mce_threshold_interrupt(void)
|
2005-04-16 18:20:36 -04:00
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2008-07-01 19:29:44 -04:00
|
|
|
* 'math_state_restore()' saves the current math information in the
|
2005-04-16 18:20:36 -04:00
|
|
|
* old math state array, and gets the new ones from the current task
|
|
|
|
*
|
|
|
|
* Careful.. There are problems with IBM-designed IRQ13 behaviour.
|
|
|
|
* Don't touch unless you *really* know how it works.
|
|
|
|
*/
|
|
|
|
asmlinkage void math_state_restore(void)
|
|
|
|
{
|
2008-10-03 17:16:12 -04:00
|
|
|
struct thread_info *thread = current_thread_info();
|
|
|
|
struct task_struct *tsk = thread->task;
|
2005-04-16 18:20:36 -04:00
|
|
|
|
2008-10-03 17:16:12 -04:00
|
|
|
if (!tsk_used_math(tsk)) {
|
2008-03-10 18:28:05 -04:00
|
|
|
local_irq_enable();
|
|
|
|
/*
|
|
|
|
* does a slab alloc which can sleep
|
|
|
|
*/
|
2008-10-03 17:16:12 -04:00
|
|
|
if (init_fpu(tsk)) {
|
2008-03-10 18:28:05 -04:00
|
|
|
/*
|
|
|
|
* ran out of memory!
|
|
|
|
*/
|
|
|
|
do_group_exit(SIGKILL);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
local_irq_disable();
|
|
|
|
}
|
|
|
|
|
2008-07-01 19:29:44 -04:00
|
|
|
clts(); /* Allow maths ops (or we recurse) */
|
x64, fpu: fix possible FPU leakage in error conditions
On Thu, Jul 24, 2008 at 03:43:44PM -0700, Linus Torvalds wrote:
> So how about this patch as a starting point? This is the RightThing(tm) to
> do regardless, and if it then makes it easier to do some other cleanups,
> we should do it first. What do you think?
restore_fpu_checking() calls init_fpu() in error conditions.
While this is wrong(as our main intention is to clear the fpu state of
the thread), this was benign before commit 92d140e21f1 ("x86: fix taking
DNA during 64bit sigreturn").
Post commit 92d140e21f1, live FPU registers may not belong to this
process at this error scenario.
In the error condition for restore_fpu_checking() (especially during the
64bit signal return), we are doing init_fpu(), which saves the live FPU
register state (possibly belonging to some other process context) into
the thread struct (through unlazy_fpu() in init_fpu()). This is wrong
and can leak the FPU data.
For the signal handler restore error condition in restore_i387(), clear
the fpu state present in the thread struct(before ultimately sending a
SIGSEGV for badframe).
For the paranoid error condition check in math_state_restore(), send a
SIGSEGV, if we fail to restore the state.
Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com>
Cc: <stable@kernel.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2008-07-24 21:07:56 -04:00
|
|
|
/*
|
|
|
|
* Paranoid restore. send a SIGSEGV if we fail to restore the state.
|
|
|
|
*/
|
2008-10-03 17:16:12 -04:00
|
|
|
if (unlikely(restore_fpu_checking(tsk))) {
|
x64, fpu: fix possible FPU leakage in error conditions
On Thu, Jul 24, 2008 at 03:43:44PM -0700, Linus Torvalds wrote:
> So how about this patch as a starting point? This is the RightThing(tm) to
> do regardless, and if it then makes it easier to do some other cleanups,
> we should do it first. What do you think?
restore_fpu_checking() calls init_fpu() in error conditions.
While this is wrong(as our main intention is to clear the fpu state of
the thread), this was benign before commit 92d140e21f1 ("x86: fix taking
DNA during 64bit sigreturn").
Post commit 92d140e21f1, live FPU registers may not belong to this
process at this error scenario.
In the error condition for restore_fpu_checking() (especially during the
64bit signal return), we are doing init_fpu(), which saves the live FPU
register state (possibly belonging to some other process context) into
the thread struct (through unlazy_fpu() in init_fpu()). This is wrong
and can leak the FPU data.
For the signal handler restore error condition in restore_i387(), clear
the fpu state present in the thread struct(before ultimately sending a
SIGSEGV for badframe).
For the paranoid error condition check in math_state_restore(), send a
SIGSEGV, if we fail to restore the state.
Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com>
Cc: <stable@kernel.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2008-07-24 21:07:56 -04:00
|
|
|
stts();
|
2008-10-03 17:16:12 -04:00
|
|
|
force_sig(SIGSEGV, tsk);
|
x64, fpu: fix possible FPU leakage in error conditions
On Thu, Jul 24, 2008 at 03:43:44PM -0700, Linus Torvalds wrote:
> So how about this patch as a starting point? This is the RightThing(tm) to
> do regardless, and if it then makes it easier to do some other cleanups,
> we should do it first. What do you think?
restore_fpu_checking() calls init_fpu() in error conditions.
While this is wrong(as our main intention is to clear the fpu state of
the thread), this was benign before commit 92d140e21f1 ("x86: fix taking
DNA during 64bit sigreturn").
Post commit 92d140e21f1, live FPU registers may not belong to this
process at this error scenario.
In the error condition for restore_fpu_checking() (especially during the
64bit signal return), we are doing init_fpu(), which saves the live FPU
register state (possibly belonging to some other process context) into
the thread struct (through unlazy_fpu() in init_fpu()). This is wrong
and can leak the FPU data.
For the signal handler restore error condition in restore_i387(), clear
the fpu state present in the thread struct(before ultimately sending a
SIGSEGV for badframe).
For the paranoid error condition check in math_state_restore(), send a
SIGSEGV, if we fail to restore the state.
Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com>
Cc: <stable@kernel.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2008-07-24 21:07:56 -04:00
|
|
|
return;
|
|
|
|
}
|
2008-10-03 17:16:12 -04:00
|
|
|
thread->status |= TS_USEDFPU; /* So we fnsave on switch_to() */
|
|
|
|
tsk->fpu_counter++;
|
2005-04-16 18:20:36 -04:00
|
|
|
}
|
2008-01-30 07:31:10 -05:00
|
|
|
EXPORT_SYMBOL_GPL(math_state_restore);
|
2005-04-16 18:20:36 -04:00
|
|
|
|
2008-09-30 12:41:36 -04:00
|
|
|
dotraplinkage void __kprobes
|
|
|
|
do_device_not_available(struct pt_regs *regs, long error)
|
|
|
|
{
|
|
|
|
math_state_restore();
|
|
|
|
}
|
|
|
|
|
2005-04-16 18:20:36 -04:00
|
|
|
void __init trap_init(void)
|
|
|
|
{
|
2008-07-01 19:29:44 -04:00
|
|
|
set_intr_gate(0, ÷_error);
|
|
|
|
set_intr_gate_ist(1, &debug, DEBUG_STACK);
|
|
|
|
set_intr_gate_ist(2, &nmi, NMI_STACK);
|
2008-07-29 01:48:55 -04:00
|
|
|
/* int3 can be called from all */
|
2008-10-03 16:00:32 -04:00
|
|
|
set_system_intr_gate_ist(3, &int3, DEBUG_STACK);
|
2008-07-29 01:48:55 -04:00
|
|
|
/* int4 can be called from all */
|
2008-10-03 16:00:32 -04:00
|
|
|
set_system_intr_gate(4, &overflow);
|
2008-07-01 19:29:44 -04:00
|
|
|
set_intr_gate(5, &bounds);
|
|
|
|
set_intr_gate(6, &invalid_op);
|
|
|
|
set_intr_gate(7, &device_not_available);
|
|
|
|
set_intr_gate_ist(8, &double_fault, DOUBLEFAULT_STACK);
|
|
|
|
set_intr_gate(9, &coprocessor_segment_overrun);
|
|
|
|
set_intr_gate(10, &invalid_TSS);
|
|
|
|
set_intr_gate(11, &segment_not_present);
|
|
|
|
set_intr_gate_ist(12, &stack_segment, STACKFAULT_STACK);
|
|
|
|
set_intr_gate(13, &general_protection);
|
|
|
|
set_intr_gate(14, &page_fault);
|
|
|
|
set_intr_gate(15, &spurious_interrupt_bug);
|
|
|
|
set_intr_gate(16, &coprocessor_error);
|
|
|
|
set_intr_gate(17, &alignment_check);
|
2005-04-16 18:20:36 -04:00
|
|
|
#ifdef CONFIG_X86_MCE
|
2008-07-01 19:29:44 -04:00
|
|
|
set_intr_gate_ist(18, &machine_check, MCE_STACK);
|
2005-04-16 18:20:36 -04:00
|
|
|
#endif
|
2008-07-01 19:29:44 -04:00
|
|
|
set_intr_gate(19, &simd_coprocessor_error);
|
2005-04-16 18:20:36 -04:00
|
|
|
|
|
|
|
#ifdef CONFIG_IA32_EMULATION
|
2008-10-03 16:00:32 -04:00
|
|
|
set_system_intr_gate(IA32_SYSCALL_VECTOR, ia32_syscall);
|
2005-04-16 18:20:36 -04:00
|
|
|
#endif
|
|
|
|
/*
|
2008-07-01 19:29:44 -04:00
|
|
|
* Should be a barrier for any external CPU state:
|
2005-04-16 18:20:36 -04:00
|
|
|
*/
|
|
|
|
cpu_init();
|
|
|
|
}
|