2005-04-16 18:20:36 -04:00
|
|
|
/*
|
|
|
|
* Copyright (C) 1991, 1992 Linus Torvalds
|
|
|
|
* Copyright (C) 2000, 2001, 2002 Andi Kleen SuSE Labs
|
|
|
|
*
|
|
|
|
* 1997-11-28 Modified for POSIX.1b signals by Richard Henderson
|
|
|
|
* 2000-06-20 Pentium III FXSR, SSE support by Gareth Hughes
|
|
|
|
* 2000-2002 x86-64 support by Andi Kleen
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/sched.h>
|
|
|
|
#include <linux/mm.h>
|
|
|
|
#include <linux/smp.h>
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/signal.h>
|
|
|
|
#include <linux/errno.h>
|
|
|
|
#include <linux/wait.h>
|
|
|
|
#include <linux/ptrace.h>
|
2008-03-14 20:46:38 -04:00
|
|
|
#include <linux/tracehook.h>
|
2005-04-16 18:20:36 -04:00
|
|
|
#include <linux/unistd.h>
|
|
|
|
#include <linux/stddef.h>
|
|
|
|
#include <linux/personality.h>
|
|
|
|
#include <linux/compiler.h>
|
2008-07-29 01:48:54 -04:00
|
|
|
#include <linux/uaccess.h>
|
|
|
|
|
2008-02-08 15:09:59 -05:00
|
|
|
#include <asm/processor.h>
|
2005-04-16 18:20:36 -04:00
|
|
|
#include <asm/ucontext.h>
|
|
|
|
#include <asm/i387.h>
|
|
|
|
#include <asm/proto.h>
|
2005-05-27 15:52:55 -04:00
|
|
|
#include <asm/ia32_unistd.h>
|
x86_64: support poll() on /dev/mcelog
Background:
/dev/mcelog is typically polled manually. This is less than optimal for
situations where accurate accounting of MCEs is important. Calling
poll() on /dev/mcelog does not work.
Description:
This patch adds support for poll() to /dev/mcelog. This results in
immediate wakeup of user apps whenever the poller finds MCEs. Because
the exception handler can not take any locks, it can not call the wakeup
itself. Instead, it uses a thread_info flag (TIF_MCE_NOTIFY) which is
caught at the next return from interrupt or exit from idle, calling the
mce_user_notify() routine. This patch also disables the "fake panic"
path of the mce_panic(), because it results in printk()s in the exception
handler and crashy systems.
This patch also does some small cleanup for essentially unused variables,
and moves the user notification into the body of the poller, so it is
only called once per poll, rather than once per CPU.
Result:
Applications can now poll() on /dev/mcelog. When an error is logged
(whether through the poller or through an exception) the applications are
woken up promptly. This should not affect any previous behaviors. If no
MCEs are being logged, there is no overhead.
Alternatives:
I considered simply supporting poll() through the poller and not using
TIF_MCE_NOTIFY at all. However, the time between an uncorrectable error
happening and the user application being notified is *the*most* critical
window for us. Many uncorrectable errors can be logged to the network if
given a chance.
I also considered doing the MCE poll directly from the idle notifier, but
decided that was overkill.
Testing:
I used an error-injecting DIMM to create lots of correctable DRAM errors
and verified that my user app is woken up in sync with the polling interval.
I also used the northbridge to inject uncorrectable ECC errors, and
verified (printk() to the rescue) that the notify routine is called and the
user app does wake up. I built with PREEMPT on and off, and verified
that my machine survives MCEs.
[wli@holomorphy.com: build fix]
Signed-off-by: Tim Hockin <thockin@google.com>
Signed-off-by: William Irwin <bill.irwin@oracle.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Andi Kleen <ak@suse.de>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-07-21 11:10:36 -04:00
|
|
|
#include <asm/mce.h>
|
2008-04-19 18:37:09 -04:00
|
|
|
#include <asm/syscall.h>
|
2008-07-21 12:04:13 -04:00
|
|
|
#include <asm/syscalls.h>
|
2008-02-08 15:10:00 -05:00
|
|
|
#include "sigframe.h"
|
2005-04-16 18:20:36 -04:00
|
|
|
|
|
|
|
#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
|
|
|
|
|
2008-02-08 15:09:59 -05:00
|
|
|
#define __FIX_EFLAGS (X86_EFLAGS_AC | X86_EFLAGS_OF | \
|
|
|
|
X86_EFLAGS_DF | X86_EFLAGS_TF | X86_EFLAGS_SF | \
|
|
|
|
X86_EFLAGS_ZF | X86_EFLAGS_AF | X86_EFLAGS_PF | \
|
|
|
|
X86_EFLAGS_CF)
|
|
|
|
|
|
|
|
#ifdef CONFIG_X86_32
|
|
|
|
# define FIX_EFLAGS (__FIX_EFLAGS | X86_EFLAGS_RF)
|
|
|
|
#else
|
|
|
|
# define FIX_EFLAGS __FIX_EFLAGS
|
|
|
|
#endif
|
|
|
|
|
2005-04-16 18:20:36 -04:00
|
|
|
asmlinkage long
|
|
|
|
sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss,
|
|
|
|
struct pt_regs *regs)
|
|
|
|
{
|
2008-01-30 07:30:56 -05:00
|
|
|
return do_sigaltstack(uss, uoss, regs->sp);
|
2005-04-16 18:20:36 -04:00
|
|
|
}
|
|
|
|
|
2008-10-03 01:09:20 -04:00
|
|
|
#define COPY(x) { \
|
|
|
|
err |= __get_user(regs->x, &sc->x); \
|
|
|
|
}
|
|
|
|
|
|
|
|
#define COPY_SEG_STRICT(seg) { \
|
|
|
|
unsigned short tmp; \
|
|
|
|
err |= __get_user(tmp, &sc->seg); \
|
|
|
|
regs->seg = tmp | 3; \
|
|
|
|
}
|
|
|
|
|
2005-04-16 18:20:36 -04:00
|
|
|
/*
|
|
|
|
* Do a signal return; undo the signal stack.
|
|
|
|
*/
|
|
|
|
static int
|
2008-02-08 15:10:02 -05:00
|
|
|
restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc,
|
|
|
|
unsigned long *pax)
|
2005-04-16 18:20:36 -04:00
|
|
|
{
|
|
|
|
unsigned int err = 0;
|
|
|
|
|
|
|
|
/* Always make any pending restarted system calls return -EINTR */
|
|
|
|
current_thread_info()->restart_block.fn = do_no_restart_syscall;
|
|
|
|
|
2008-01-30 07:30:56 -05:00
|
|
|
COPY(di); COPY(si); COPY(bp); COPY(sp); COPY(bx);
|
|
|
|
COPY(dx); COPY(cx); COPY(ip);
|
2005-04-16 18:20:36 -04:00
|
|
|
COPY(r8);
|
|
|
|
COPY(r9);
|
|
|
|
COPY(r10);
|
|
|
|
COPY(r11);
|
|
|
|
COPY(r12);
|
|
|
|
COPY(r13);
|
|
|
|
COPY(r14);
|
|
|
|
COPY(r15);
|
|
|
|
|
2005-11-05 11:25:54 -05:00
|
|
|
/* Kernel saves and restores only the CS segment register on signals,
|
|
|
|
* which is the bare minimum needed to allow mixed 32/64-bit code.
|
|
|
|
* App's signal handler can save/restore other segments if needed. */
|
2008-10-03 01:09:20 -04:00
|
|
|
COPY_SEG_STRICT(cs);
|
2005-11-05 11:25:54 -05:00
|
|
|
|
2005-04-16 18:20:36 -04:00
|
|
|
{
|
|
|
|
unsigned int tmpflags;
|
2008-01-30 07:30:56 -05:00
|
|
|
err |= __get_user(tmpflags, &sc->flags);
|
2008-02-08 15:09:59 -05:00
|
|
|
regs->flags = (regs->flags & ~FIX_EFLAGS) | (tmpflags & FIX_EFLAGS);
|
2008-01-30 07:30:56 -05:00
|
|
|
regs->orig_ax = -1; /* disable syscall checks */
|
2005-04-16 18:20:36 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
{
|
2008-09-09 20:21:17 -04:00
|
|
|
void __user *buf;
|
|
|
|
|
2005-04-16 18:20:36 -04:00
|
|
|
err |= __get_user(buf, &sc->fpstate);
|
2008-07-29 13:29:22 -04:00
|
|
|
err |= restore_i387_xstate(buf);
|
2005-04-16 18:20:36 -04:00
|
|
|
}
|
|
|
|
|
2008-02-08 15:10:02 -05:00
|
|
|
err |= __get_user(*pax, &sc->ax);
|
2005-04-16 18:20:36 -04:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2008-09-12 20:03:31 -04:00
|
|
|
static long do_rt_sigreturn(struct pt_regs *regs)
|
2005-04-16 18:20:36 -04:00
|
|
|
{
|
|
|
|
struct rt_sigframe __user *frame;
|
2008-01-30 07:30:56 -05:00
|
|
|
unsigned long ax;
|
2008-09-12 20:03:31 -04:00
|
|
|
sigset_t set;
|
2005-04-16 18:20:36 -04:00
|
|
|
|
2008-02-08 15:10:00 -05:00
|
|
|
frame = (struct rt_sigframe __user *)(regs->sp - sizeof(long));
|
|
|
|
if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
|
2005-04-16 18:20:36 -04:00
|
|
|
goto badframe;
|
2008-02-08 15:10:00 -05:00
|
|
|
if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
|
2005-04-16 18:20:36 -04:00
|
|
|
goto badframe;
|
|
|
|
|
|
|
|
sigdelsetmask(&set, ~_BLOCKABLE);
|
|
|
|
spin_lock_irq(¤t->sighand->siglock);
|
|
|
|
current->blocked = set;
|
|
|
|
recalc_sigpending();
|
|
|
|
spin_unlock_irq(¤t->sighand->siglock);
|
2008-07-29 01:48:54 -04:00
|
|
|
|
2008-01-30 07:30:56 -05:00
|
|
|
if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &ax))
|
2005-04-16 18:20:36 -04:00
|
|
|
goto badframe;
|
|
|
|
|
2008-01-30 07:30:56 -05:00
|
|
|
if (do_sigaltstack(&frame->uc.uc_stack, NULL, regs->sp) == -EFAULT)
|
2005-04-16 18:20:36 -04:00
|
|
|
goto badframe;
|
|
|
|
|
2008-01-30 07:30:56 -05:00
|
|
|
return ax;
|
2005-04-16 18:20:36 -04:00
|
|
|
|
|
|
|
badframe:
|
2008-09-12 20:03:31 -04:00
|
|
|
signal_fault(regs, frame, "rt_sigreturn");
|
2005-04-16 18:20:36 -04:00
|
|
|
return 0;
|
2008-07-29 01:48:54 -04:00
|
|
|
}
|
2005-04-16 18:20:36 -04:00
|
|
|
|
2008-09-12 20:03:31 -04:00
|
|
|
asmlinkage long sys_rt_sigreturn(struct pt_regs *regs)
|
|
|
|
{
|
|
|
|
return do_rt_sigreturn(regs);
|
|
|
|
}
|
|
|
|
|
2005-04-16 18:20:36 -04:00
|
|
|
/*
|
|
|
|
* Set up a signal frame.
|
|
|
|
*/
|
|
|
|
|
|
|
|
static inline int
|
2008-07-29 01:48:54 -04:00
|
|
|
setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
|
|
|
|
unsigned long mask, struct task_struct *me)
|
2005-04-16 18:20:36 -04:00
|
|
|
{
|
|
|
|
int err = 0;
|
|
|
|
|
2005-11-05 11:25:54 -05:00
|
|
|
err |= __put_user(regs->cs, &sc->cs);
|
2005-04-16 18:20:36 -04:00
|
|
|
err |= __put_user(0, &sc->gs);
|
|
|
|
err |= __put_user(0, &sc->fs);
|
|
|
|
|
2008-01-30 07:30:56 -05:00
|
|
|
err |= __put_user(regs->di, &sc->di);
|
|
|
|
err |= __put_user(regs->si, &sc->si);
|
|
|
|
err |= __put_user(regs->bp, &sc->bp);
|
|
|
|
err |= __put_user(regs->sp, &sc->sp);
|
|
|
|
err |= __put_user(regs->bx, &sc->bx);
|
|
|
|
err |= __put_user(regs->dx, &sc->dx);
|
|
|
|
err |= __put_user(regs->cx, &sc->cx);
|
|
|
|
err |= __put_user(regs->ax, &sc->ax);
|
2005-04-16 18:20:36 -04:00
|
|
|
err |= __put_user(regs->r8, &sc->r8);
|
|
|
|
err |= __put_user(regs->r9, &sc->r9);
|
|
|
|
err |= __put_user(regs->r10, &sc->r10);
|
|
|
|
err |= __put_user(regs->r11, &sc->r11);
|
|
|
|
err |= __put_user(regs->r12, &sc->r12);
|
|
|
|
err |= __put_user(regs->r13, &sc->r13);
|
|
|
|
err |= __put_user(regs->r14, &sc->r14);
|
|
|
|
err |= __put_user(regs->r15, &sc->r15);
|
|
|
|
err |= __put_user(me->thread.trap_no, &sc->trapno);
|
|
|
|
err |= __put_user(me->thread.error_code, &sc->err);
|
2008-01-30 07:30:56 -05:00
|
|
|
err |= __put_user(regs->ip, &sc->ip);
|
|
|
|
err |= __put_user(regs->flags, &sc->flags);
|
2005-04-16 18:20:36 -04:00
|
|
|
err |= __put_user(mask, &sc->oldmask);
|
|
|
|
err |= __put_user(me->thread.cr2, &sc->cr2);
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Determine which stack to use..
|
|
|
|
*/
|
|
|
|
|
|
|
|
static void __user *
|
|
|
|
get_stack(struct k_sigaction *ka, struct pt_regs *regs, unsigned long size)
|
|
|
|
{
|
2008-01-30 07:30:56 -05:00
|
|
|
unsigned long sp;
|
2005-04-16 18:20:36 -04:00
|
|
|
|
|
|
|
/* Default to using normal stack - redzone*/
|
2008-01-30 07:30:56 -05:00
|
|
|
sp = regs->sp - 128;
|
2005-04-16 18:20:36 -04:00
|
|
|
|
|
|
|
/* This is the X/Open sanctioned signal stack switching. */
|
|
|
|
if (ka->sa.sa_flags & SA_ONSTACK) {
|
2008-01-30 07:30:56 -05:00
|
|
|
if (sas_ss_flags(sp) == 0)
|
|
|
|
sp = current->sas_ss_sp + current->sas_ss_size;
|
2005-04-16 18:20:36 -04:00
|
|
|
}
|
|
|
|
|
2008-07-29 13:29:25 -04:00
|
|
|
return (void __user *)round_down(sp - size, 64);
|
2005-04-16 18:20:36 -04:00
|
|
|
}
|
|
|
|
|
2008-09-05 19:28:06 -04:00
|
|
|
static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
|
|
|
|
sigset_t *set, struct pt_regs *regs)
|
2005-04-16 18:20:36 -04:00
|
|
|
{
|
|
|
|
struct rt_sigframe __user *frame;
|
2008-07-29 13:29:22 -04:00
|
|
|
void __user *fp = NULL;
|
2005-04-16 18:20:36 -04:00
|
|
|
int err = 0;
|
|
|
|
struct task_struct *me = current;
|
|
|
|
|
|
|
|
if (used_math()) {
|
2008-07-29 13:29:21 -04:00
|
|
|
fp = get_stack(ka, regs, sig_xstate_size);
|
2005-04-16 18:20:36 -04:00
|
|
|
frame = (void __user *)round_down(
|
|
|
|
(unsigned long)fp - sizeof(struct rt_sigframe), 16) - 8;
|
|
|
|
|
2008-07-29 13:29:22 -04:00
|
|
|
if (save_i387_xstate(fp) < 0)
|
2008-09-12 20:02:53 -04:00
|
|
|
return -EFAULT;
|
2005-04-16 18:20:36 -04:00
|
|
|
} else
|
|
|
|
frame = get_stack(ka, regs, sizeof(struct rt_sigframe)) - 8;
|
|
|
|
|
|
|
|
if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
|
2008-09-12 20:01:09 -04:00
|
|
|
return -EFAULT;
|
2005-04-16 18:20:36 -04:00
|
|
|
|
2008-07-29 01:48:54 -04:00
|
|
|
if (ka->sa.sa_flags & SA_SIGINFO) {
|
2008-09-12 20:02:53 -04:00
|
|
|
if (copy_siginfo_to_user(&frame->info, info))
|
2008-09-12 20:01:09 -04:00
|
|
|
return -EFAULT;
|
2005-04-16 18:20:36 -04:00
|
|
|
}
|
2008-07-29 01:48:54 -04:00
|
|
|
|
2005-04-16 18:20:36 -04:00
|
|
|
/* Create the ucontext. */
|
2008-07-29 13:29:25 -04:00
|
|
|
if (cpu_has_xsave)
|
|
|
|
err |= __put_user(UC_FP_XSTATE, &frame->uc.uc_flags);
|
|
|
|
else
|
|
|
|
err |= __put_user(0, &frame->uc.uc_flags);
|
2005-04-16 18:20:36 -04:00
|
|
|
err |= __put_user(0, &frame->uc.uc_link);
|
|
|
|
err |= __put_user(me->sas_ss_sp, &frame->uc.uc_stack.ss_sp);
|
2008-01-30 07:30:56 -05:00
|
|
|
err |= __put_user(sas_ss_flags(regs->sp),
|
2005-04-16 18:20:36 -04:00
|
|
|
&frame->uc.uc_stack.ss_flags);
|
|
|
|
err |= __put_user(me->sas_ss_size, &frame->uc.uc_stack.ss_size);
|
|
|
|
err |= setup_sigcontext(&frame->uc.uc_mcontext, regs, set->sig[0], me);
|
|
|
|
err |= __put_user(fp, &frame->uc.uc_mcontext.fpstate);
|
2008-07-29 01:48:54 -04:00
|
|
|
if (sizeof(*set) == 16) {
|
2005-04-16 18:20:36 -04:00
|
|
|
__put_user(set->sig[0], &frame->uc.uc_sigmask.sig[0]);
|
2008-07-29 01:48:54 -04:00
|
|
|
__put_user(set->sig[1], &frame->uc.uc_sigmask.sig[1]);
|
2005-04-16 18:20:36 -04:00
|
|
|
} else
|
|
|
|
err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
|
|
|
|
|
|
|
|
/* Set up to return from userspace. If provided, use a stub
|
|
|
|
already in userspace. */
|
|
|
|
/* x86-64 should always use SA_RESTORER. */
|
|
|
|
if (ka->sa.sa_flags & SA_RESTORER) {
|
|
|
|
err |= __put_user(ka->sa.sa_restorer, &frame->pretcode);
|
|
|
|
} else {
|
|
|
|
/* could use a vstub here */
|
2008-09-12 20:01:09 -04:00
|
|
|
return -EFAULT;
|
2005-04-16 18:20:36 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
if (err)
|
2008-09-12 20:01:09 -04:00
|
|
|
return -EFAULT;
|
2005-04-16 18:20:36 -04:00
|
|
|
|
|
|
|
/* Set up registers for signal handler */
|
2008-01-30 07:30:56 -05:00
|
|
|
regs->di = sig;
|
2008-07-29 01:48:54 -04:00
|
|
|
/* In case the signal handler was declared without prototypes */
|
2008-01-30 07:30:56 -05:00
|
|
|
regs->ax = 0;
|
2005-04-16 18:20:36 -04:00
|
|
|
|
|
|
|
/* This also works for non SA_SIGINFO handlers because they expect the
|
|
|
|
next argument after the signal number on the stack. */
|
2008-01-30 07:30:56 -05:00
|
|
|
regs->si = (unsigned long)&frame->info;
|
|
|
|
regs->dx = (unsigned long)&frame->uc;
|
|
|
|
regs->ip = (unsigned long) ka->sa.sa_handler;
|
2005-04-16 18:20:36 -04:00
|
|
|
|
2008-01-30 07:30:56 -05:00
|
|
|
regs->sp = (unsigned long)frame;
|
2005-04-16 18:20:36 -04:00
|
|
|
|
2005-11-05 11:25:54 -05:00
|
|
|
/* Set up the CS register to run signal handlers in 64-bit mode,
|
|
|
|
even if the handler happens to be interrupting 32-bit code. */
|
|
|
|
regs->cs = __USER_CS;
|
|
|
|
|
2006-09-26 04:52:26 -04:00
|
|
|
return 0;
|
2005-04-16 18:20:36 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* OK, we're invoking a handler
|
2008-07-29 01:48:54 -04:00
|
|
|
*/
|
2008-09-24 22:12:54 -04:00
|
|
|
static int signr_convert(int sig)
|
|
|
|
{
|
|
|
|
return sig;
|
|
|
|
}
|
|
|
|
|
2008-09-24 22:13:11 -04:00
|
|
|
#ifdef CONFIG_IA32_EMULATION
|
|
|
|
#define is_ia32 test_thread_flag(TIF_IA32)
|
|
|
|
#else
|
|
|
|
#define is_ia32 0
|
|
|
|
#endif
|
|
|
|
|
2008-09-05 19:28:06 -04:00
|
|
|
static int
|
|
|
|
setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
|
|
|
|
sigset_t *set, struct pt_regs *regs)
|
|
|
|
{
|
2008-09-24 22:12:54 -04:00
|
|
|
int usig = signr_convert(sig);
|
2008-09-05 19:28:06 -04:00
|
|
|
int ret;
|
|
|
|
|
2008-09-24 22:13:11 -04:00
|
|
|
/* Set up the stack frame */
|
|
|
|
if (is_ia32) {
|
2008-09-05 19:28:06 -04:00
|
|
|
if (ka->sa.sa_flags & SA_SIGINFO)
|
2008-09-24 22:12:54 -04:00
|
|
|
ret = ia32_setup_rt_frame(usig, ka, info, set, regs);
|
2008-09-05 19:28:06 -04:00
|
|
|
else
|
2008-09-24 22:12:54 -04:00
|
|
|
ret = ia32_setup_frame(usig, ka, set, regs);
|
2008-09-05 19:28:06 -04:00
|
|
|
} else
|
2008-09-24 22:13:11 -04:00
|
|
|
ret = __setup_rt_frame(sig, ka, info, set, regs);
|
2008-09-05 19:28:06 -04:00
|
|
|
|
2008-09-12 20:01:09 -04:00
|
|
|
if (ret) {
|
|
|
|
force_sigsegv(sig, current);
|
|
|
|
return -EFAULT;
|
|
|
|
}
|
|
|
|
|
2008-09-05 19:28:06 -04:00
|
|
|
return ret;
|
|
|
|
}
|
2005-04-16 18:20:36 -04:00
|
|
|
|
2005-06-23 03:08:37 -04:00
|
|
|
static int
|
2005-04-16 18:20:36 -04:00
|
|
|
handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
|
2008-02-08 15:09:58 -05:00
|
|
|
sigset_t *oldset, struct pt_regs *regs)
|
2005-04-16 18:20:36 -04:00
|
|
|
{
|
2005-06-23 03:08:37 -04:00
|
|
|
int ret;
|
|
|
|
|
2005-04-16 18:20:36 -04:00
|
|
|
/* Are we from a system call? */
|
2008-04-19 18:37:09 -04:00
|
|
|
if (syscall_get_nr(current, regs) >= 0) {
|
2005-04-16 18:20:36 -04:00
|
|
|
/* If so, check system call restarting.. */
|
2008-04-19 18:37:09 -04:00
|
|
|
switch (syscall_get_error(current, regs)) {
|
2008-02-08 15:09:58 -05:00
|
|
|
case -ERESTART_RESTARTBLOCK:
|
|
|
|
case -ERESTARTNOHAND:
|
|
|
|
regs->ax = -EINTR;
|
|
|
|
break;
|
2005-04-16 18:20:36 -04:00
|
|
|
|
2008-02-08 15:09:58 -05:00
|
|
|
case -ERESTARTSYS:
|
|
|
|
if (!(ka->sa.sa_flags & SA_RESTART)) {
|
|
|
|
regs->ax = -EINTR;
|
2005-04-16 18:20:36 -04:00
|
|
|
break;
|
2008-02-08 15:09:58 -05:00
|
|
|
}
|
|
|
|
/* fallthrough */
|
|
|
|
case -ERESTARTNOINTR:
|
|
|
|
regs->ax = regs->orig_ax;
|
|
|
|
regs->ip -= 2;
|
|
|
|
break;
|
2005-04-16 18:20:36 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2005-04-16 18:25:00 -04:00
|
|
|
/*
|
2008-01-30 07:30:50 -05:00
|
|
|
* If TF is set due to a debugger (TIF_FORCED_TF), clear the TF
|
|
|
|
* flag so that register information in the sigcontext is correct.
|
2005-04-16 18:25:00 -04:00
|
|
|
*/
|
2008-01-30 07:30:56 -05:00
|
|
|
if (unlikely(regs->flags & X86_EFLAGS_TF) &&
|
2008-01-30 07:30:50 -05:00
|
|
|
likely(test_and_clear_thread_flag(TIF_FORCED_TF)))
|
2008-01-30 07:30:56 -05:00
|
|
|
regs->flags &= ~X86_EFLAGS_TF;
|
2005-04-16 18:25:00 -04:00
|
|
|
|
2005-06-23 03:08:37 -04:00
|
|
|
ret = setup_rt_frame(sig, ka, info, oldset, regs);
|
2005-04-16 18:20:36 -04:00
|
|
|
|
2008-09-09 20:22:12 -04:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
2008-04-19 17:26:54 -04:00
|
|
|
|
2008-09-23 20:22:32 -04:00
|
|
|
#ifdef CONFIG_X86_64
|
2008-09-09 20:22:12 -04:00
|
|
|
/*
|
|
|
|
* This has nothing to do with segment registers,
|
|
|
|
* despite the name. This magic affects uaccess.h
|
|
|
|
* macros' behavior. Reset it to the normal setting.
|
|
|
|
*/
|
|
|
|
set_fs(USER_DS);
|
2008-09-23 20:22:32 -04:00
|
|
|
#endif
|
2008-03-14 20:46:38 -04:00
|
|
|
|
2008-09-09 20:22:12 -04:00
|
|
|
/*
|
|
|
|
* Clear the direction flag as per the ABI for function entry.
|
|
|
|
*/
|
|
|
|
regs->flags &= ~X86_EFLAGS_DF;
|
2005-06-23 03:08:37 -04:00
|
|
|
|
2008-09-09 20:22:12 -04:00
|
|
|
/*
|
|
|
|
* Clear TF when entering the signal handler, but
|
|
|
|
* notify any tracer that was single-stepping it.
|
|
|
|
* The tracer may want to single-step inside the
|
|
|
|
* handler too.
|
|
|
|
*/
|
|
|
|
regs->flags &= ~X86_EFLAGS_TF;
|
|
|
|
|
|
|
|
spin_lock_irq(¤t->sighand->siglock);
|
|
|
|
sigorsets(¤t->blocked, ¤t->blocked, &ka->sa.sa_mask);
|
|
|
|
if (!(ka->sa.sa_flags & SA_NODEFER))
|
|
|
|
sigaddset(¤t->blocked, sig);
|
|
|
|
recalc_sigpending();
|
|
|
|
spin_unlock_irq(¤t->sighand->siglock);
|
|
|
|
|
|
|
|
tracehook_signal_handler(sig, info, ka, regs,
|
|
|
|
test_thread_flag(TIF_SINGLESTEP));
|
|
|
|
|
|
|
|
return 0;
|
2005-04-16 18:20:36 -04:00
|
|
|
}
|
|
|
|
|
2008-09-05 19:27:39 -04:00
|
|
|
#define NR_restart_syscall \
|
|
|
|
test_thread_flag(TIF_IA32) ? __NR_ia32_restart_syscall : __NR_restart_syscall
|
2005-04-16 18:20:36 -04:00
|
|
|
/*
|
|
|
|
* Note that 'init' is a special process: it doesn't get signals it doesn't
|
|
|
|
* want to handle. Thus you cannot kill init even with a SIGKILL even by
|
|
|
|
* mistake.
|
|
|
|
*/
|
2006-09-26 04:52:26 -04:00
|
|
|
static void do_signal(struct pt_regs *regs)
|
2005-04-16 18:20:36 -04:00
|
|
|
{
|
|
|
|
struct k_sigaction ka;
|
|
|
|
siginfo_t info;
|
|
|
|
int signr;
|
2006-09-26 04:52:26 -04:00
|
|
|
sigset_t *oldset;
|
2005-04-16 18:20:36 -04:00
|
|
|
|
|
|
|
/*
|
2008-02-08 15:09:58 -05:00
|
|
|
* We want the common case to go fast, which is why we may in certain
|
|
|
|
* cases get here from kernel mode. Just return without doing anything
|
2005-04-16 18:20:36 -04:00
|
|
|
* if so.
|
2008-02-08 15:09:58 -05:00
|
|
|
* X86_32: vm86 regs switched out by assembly code before reaching
|
|
|
|
* here, so testing against kernel CS suffices.
|
2005-04-16 18:20:36 -04:00
|
|
|
*/
|
2005-06-23 03:08:46 -04:00
|
|
|
if (!user_mode(regs))
|
2006-09-26 04:52:26 -04:00
|
|
|
return;
|
2005-04-16 18:20:36 -04:00
|
|
|
|
2008-04-30 03:53:10 -04:00
|
|
|
if (current_thread_info()->status & TS_RESTORE_SIGMASK)
|
2006-09-26 04:52:26 -04:00
|
|
|
oldset = ¤t->saved_sigmask;
|
|
|
|
else
|
2005-04-16 18:20:36 -04:00
|
|
|
oldset = ¤t->blocked;
|
|
|
|
|
|
|
|
signr = get_signal_to_deliver(&info, &ka, regs, NULL);
|
|
|
|
if (signr > 0) {
|
2008-09-23 20:19:44 -04:00
|
|
|
/*
|
|
|
|
* Re-enable any watchpoints before delivering the
|
2005-04-16 18:20:36 -04:00
|
|
|
* signal to user space. The processor register will
|
|
|
|
* have been cleared if the watchpoint triggered
|
|
|
|
* inside the kernel.
|
|
|
|
*/
|
|
|
|
if (current->thread.debugreg7)
|
2005-06-23 03:08:46 -04:00
|
|
|
set_debugreg(current->thread.debugreg7, 7);
|
2005-04-16 18:20:36 -04:00
|
|
|
|
2008-09-23 20:19:44 -04:00
|
|
|
/* Whee! Actually deliver the signal. */
|
2006-09-26 04:52:26 -04:00
|
|
|
if (handle_signal(signr, &info, &ka, oldset, regs) == 0) {
|
2008-04-30 03:53:10 -04:00
|
|
|
/*
|
|
|
|
* A signal was successfully delivered; the saved
|
2006-09-26 04:52:26 -04:00
|
|
|
* sigmask will have been stored in the signal frame,
|
|
|
|
* and will be restored by sigreturn, so we can simply
|
2008-04-30 03:53:10 -04:00
|
|
|
* clear the TS_RESTORE_SIGMASK flag.
|
|
|
|
*/
|
|
|
|
current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
|
2006-09-26 04:52:26 -04:00
|
|
|
}
|
|
|
|
return;
|
2005-04-16 18:20:36 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Did we come from a system call? */
|
2008-04-19 18:37:09 -04:00
|
|
|
if (syscall_get_nr(current, regs) >= 0) {
|
2005-04-16 18:20:36 -04:00
|
|
|
/* Restart the system call - no handlers present */
|
2008-04-19 18:37:09 -04:00
|
|
|
switch (syscall_get_error(current, regs)) {
|
2006-09-26 04:52:26 -04:00
|
|
|
case -ERESTARTNOHAND:
|
|
|
|
case -ERESTARTSYS:
|
|
|
|
case -ERESTARTNOINTR:
|
2008-01-30 07:30:56 -05:00
|
|
|
regs->ax = regs->orig_ax;
|
|
|
|
regs->ip -= 2;
|
2006-09-26 04:52:26 -04:00
|
|
|
break;
|
2008-09-23 20:19:44 -04:00
|
|
|
|
2006-09-26 04:52:26 -04:00
|
|
|
case -ERESTART_RESTARTBLOCK:
|
2008-09-05 19:27:39 -04:00
|
|
|
regs->ax = NR_restart_syscall;
|
2008-01-30 07:30:56 -05:00
|
|
|
regs->ip -= 2;
|
2006-09-26 04:52:26 -04:00
|
|
|
break;
|
2005-04-16 18:20:36 -04:00
|
|
|
}
|
|
|
|
}
|
2006-09-26 04:52:26 -04:00
|
|
|
|
2008-02-08 15:09:58 -05:00
|
|
|
/*
|
|
|
|
* If there's no signal to deliver, we just put the saved sigmask
|
|
|
|
* back.
|
|
|
|
*/
|
2008-04-30 03:53:10 -04:00
|
|
|
if (current_thread_info()->status & TS_RESTORE_SIGMASK) {
|
|
|
|
current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
|
2006-09-26 04:52:26 -04:00
|
|
|
sigprocmask(SIG_SETMASK, ¤t->saved_sigmask, NULL);
|
|
|
|
}
|
2005-04-16 18:20:36 -04:00
|
|
|
}
|
|
|
|
|
2008-09-23 20:21:45 -04:00
|
|
|
/*
|
|
|
|
* notification of userspace execution resumption
|
|
|
|
* - triggered by the TIF_WORK_MASK flags
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
do_notify_resume(struct pt_regs *regs, void *unused, __u32 thread_info_flags)
|
2005-04-16 18:20:36 -04:00
|
|
|
{
|
2008-09-23 20:21:45 -04:00
|
|
|
#if defined(CONFIG_X86_64) && defined(CONFIG_X86_MCE)
|
x86_64: support poll() on /dev/mcelog
Background:
/dev/mcelog is typically polled manually. This is less than optimal for
situations where accurate accounting of MCEs is important. Calling
poll() on /dev/mcelog does not work.
Description:
This patch adds support for poll() to /dev/mcelog. This results in
immediate wakeup of user apps whenever the poller finds MCEs. Because
the exception handler can not take any locks, it can not call the wakeup
itself. Instead, it uses a thread_info flag (TIF_MCE_NOTIFY) which is
caught at the next return from interrupt or exit from idle, calling the
mce_user_notify() routine. This patch also disables the "fake panic"
path of the mce_panic(), because it results in printk()s in the exception
handler and crashy systems.
This patch also does some small cleanup for essentially unused variables,
and moves the user notification into the body of the poller, so it is
only called once per poll, rather than once per CPU.
Result:
Applications can now poll() on /dev/mcelog. When an error is logged
(whether through the poller or through an exception) the applications are
woken up promptly. This should not affect any previous behaviors. If no
MCEs are being logged, there is no overhead.
Alternatives:
I considered simply supporting poll() through the poller and not using
TIF_MCE_NOTIFY at all. However, the time between an uncorrectable error
happening and the user application being notified is *the*most* critical
window for us. Many uncorrectable errors can be logged to the network if
given a chance.
I also considered doing the MCE poll directly from the idle notifier, but
decided that was overkill.
Testing:
I used an error-injecting DIMM to create lots of correctable DRAM errors
and verified that my user app is woken up in sync with the polling interval.
I also used the northbridge to inject uncorrectable ECC errors, and
verified (printk() to the rescue) that the notify routine is called and the
user app does wake up. I built with PREEMPT on and off, and verified
that my machine survives MCEs.
[wli@holomorphy.com: build fix]
Signed-off-by: Tim Hockin <thockin@google.com>
Signed-off-by: William Irwin <bill.irwin@oracle.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Andi Kleen <ak@suse.de>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-07-21 11:10:36 -04:00
|
|
|
/* notify userspace of pending MCEs */
|
|
|
|
if (thread_info_flags & _TIF_MCE_NOTIFY)
|
|
|
|
mce_notify_user();
|
2008-09-23 20:21:45 -04:00
|
|
|
#endif /* CONFIG_X86_64 && CONFIG_X86_MCE */
|
x86_64: support poll() on /dev/mcelog
Background:
/dev/mcelog is typically polled manually. This is less than optimal for
situations where accurate accounting of MCEs is important. Calling
poll() on /dev/mcelog does not work.
Description:
This patch adds support for poll() to /dev/mcelog. This results in
immediate wakeup of user apps whenever the poller finds MCEs. Because
the exception handler can not take any locks, it can not call the wakeup
itself. Instead, it uses a thread_info flag (TIF_MCE_NOTIFY) which is
caught at the next return from interrupt or exit from idle, calling the
mce_user_notify() routine. This patch also disables the "fake panic"
path of the mce_panic(), because it results in printk()s in the exception
handler and crashy systems.
This patch also does some small cleanup for essentially unused variables,
and moves the user notification into the body of the poller, so it is
only called once per poll, rather than once per CPU.
Result:
Applications can now poll() on /dev/mcelog. When an error is logged
(whether through the poller or through an exception) the applications are
woken up promptly. This should not affect any previous behaviors. If no
MCEs are being logged, there is no overhead.
Alternatives:
I considered simply supporting poll() through the poller and not using
TIF_MCE_NOTIFY at all. However, the time between an uncorrectable error
happening and the user application being notified is *the*most* critical
window for us. Many uncorrectable errors can be logged to the network if
given a chance.
I also considered doing the MCE poll directly from the idle notifier, but
decided that was overkill.
Testing:
I used an error-injecting DIMM to create lots of correctable DRAM errors
and verified that my user app is woken up in sync with the polling interval.
I also used the northbridge to inject uncorrectable ECC errors, and
verified (printk() to the rescue) that the notify routine is called and the
user app does wake up. I built with PREEMPT on and off, and verified
that my machine survives MCEs.
[wli@holomorphy.com: build fix]
Signed-off-by: Tim Hockin <thockin@google.com>
Signed-off-by: William Irwin <bill.irwin@oracle.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Andi Kleen <ak@suse.de>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-07-21 11:10:36 -04:00
|
|
|
|
2005-04-16 18:20:36 -04:00
|
|
|
/* deal with pending signal delivery */
|
2008-04-30 03:53:10 -04:00
|
|
|
if (thread_info_flags & _TIF_SIGPENDING)
|
2006-09-26 04:52:26 -04:00
|
|
|
do_signal(regs);
|
2008-04-19 22:10:57 -04:00
|
|
|
|
|
|
|
if (thread_info_flags & _TIF_NOTIFY_RESUME) {
|
|
|
|
clear_thread_flag(TIF_NOTIFY_RESUME);
|
|
|
|
tracehook_notify_resume(regs);
|
|
|
|
}
|
2008-09-23 20:21:45 -04:00
|
|
|
|
|
|
|
#ifdef CONFIG_X86_32
|
|
|
|
clear_thread_flag(TIF_IRET);
|
|
|
|
#endif /* CONFIG_X86_32 */
|
2005-04-16 18:20:36 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
void signal_fault(struct pt_regs *regs, void __user *frame, char *where)
|
2008-07-29 01:48:54 -04:00
|
|
|
{
|
|
|
|
struct task_struct *me = current;
|
2008-09-09 20:18:50 -04:00
|
|
|
|
2008-01-30 07:33:18 -05:00
|
|
|
if (show_unhandled_signals && printk_ratelimit()) {
|
2008-09-09 20:18:50 -04:00
|
|
|
printk(KERN_INFO
|
|
|
|
"%s[%d] bad frame in %s frame:%p ip:%lx sp:%lx orax:%lx",
|
|
|
|
me->comm, me->pid, where, frame,
|
|
|
|
regs->ip, regs->sp, regs->orig_ax);
|
2008-01-30 07:33:18 -05:00
|
|
|
print_vma_addr(" in ", regs->ip);
|
2008-09-09 20:18:50 -04:00
|
|
|
printk(KERN_CONT "\n");
|
2008-01-30 07:33:18 -05:00
|
|
|
}
|
2005-04-16 18:20:36 -04:00
|
|
|
|
2008-07-29 01:48:54 -04:00
|
|
|
force_sig(SIGSEGV, me);
|
|
|
|
}
|