2005-04-16 18:20:36 -04:00
|
|
|
/*
|
|
|
|
* linux/kernel/irq/proc.c
|
|
|
|
*
|
|
|
|
* Copyright (C) 1992, 1998-2004 Linus Torvalds, Ingo Molnar
|
|
|
|
*
|
|
|
|
* This file contains the /proc/irq/ handling code.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/irq.h>
|
|
|
|
#include <linux/proc_fs.h>
|
|
|
|
#include <linux/interrupt.h>
|
|
|
|
|
2006-01-08 04:02:17 -05:00
|
|
|
#include "internals.h"
|
|
|
|
|
2006-06-29 05:24:42 -04:00
|
|
|
static struct proc_dir_entry *root_irq_dir;
|
2005-04-16 18:20:36 -04:00
|
|
|
|
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
|
[PATCH] x86/x86_64: deferred handling of writes to /proc/irqxx/smp_affinity
When handling writes to /proc/irq, current code is re-programming rte
entries directly. This is not recommended and could potentially cause
chipset's to lockup, or cause missing interrupts.
CONFIG_IRQ_BALANCE does this correctly, where it re-programs only when the
interrupt is pending. The same needs to be done for /proc/irq handling as well.
Otherwise user space irq balancers are really not doing the right thing.
- Changed pending_irq_balance_cpumask to pending_irq_migrate_cpumask for
lack of a generic name.
- added move_irq out of IRQ_BALANCE, and added this same to X86_64
- Added new proc handler for write, so we can do deferred write at irq
handling time.
- Display of /proc/irq/XX/smp_affinity used to display CPU_MASKALL, instead
it now shows only active cpu masks, or exactly what was set.
- Provided a common move_irq implementation, instead of duplicating
when using generic irq framework.
Tested on i386/x86_64 and ia64 with CONFIG_PCI_MSI turned on and off.
Tested UP builds as well.
MSI testing: tbd: I have cards, need to look for a x-over cable, although I
did test an earlier version of this patch. Will test in a couple days.
Signed-off-by: Ashok Raj <ashok.raj@intel.com>
Acked-by: Zwane Mwaikambo <zwane@holomorphy.com>
Grudgingly-acked-by: Andi Kleen <ak@muc.de>
Signed-off-by: Coywolf Qi Hunt <coywolf@lovecn.org>
Signed-off-by: Ashok Raj <ashok.raj@intel.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-09-06 18:16:15 -04:00
|
|
|
#ifdef CONFIG_GENERIC_PENDING_IRQ
|
|
|
|
void proc_set_irq_affinity(unsigned int irq, cpumask_t mask_val)
|
|
|
|
{
|
2006-06-23 05:04:22 -04:00
|
|
|
set_balance_irq_affinity(irq, mask_val);
|
|
|
|
|
[PATCH] x86/x86_64: deferred handling of writes to /proc/irqxx/smp_affinity
When handling writes to /proc/irq, current code is re-programming rte
entries directly. This is not recommended and could potentially cause
chipset's to lockup, or cause missing interrupts.
CONFIG_IRQ_BALANCE does this correctly, where it re-programs only when the
interrupt is pending. The same needs to be done for /proc/irq handling as well.
Otherwise user space irq balancers are really not doing the right thing.
- Changed pending_irq_balance_cpumask to pending_irq_migrate_cpumask for
lack of a generic name.
- added move_irq out of IRQ_BALANCE, and added this same to X86_64
- Added new proc handler for write, so we can do deferred write at irq
handling time.
- Display of /proc/irq/XX/smp_affinity used to display CPU_MASKALL, instead
it now shows only active cpu masks, or exactly what was set.
- Provided a common move_irq implementation, instead of duplicating
when using generic irq framework.
Tested on i386/x86_64 and ia64 with CONFIG_PCI_MSI turned on and off.
Tested UP builds as well.
MSI testing: tbd: I have cards, need to look for a x-over cable, although I
did test an earlier version of this patch. Will test in a couple days.
Signed-off-by: Ashok Raj <ashok.raj@intel.com>
Acked-by: Zwane Mwaikambo <zwane@holomorphy.com>
Grudgingly-acked-by: Andi Kleen <ak@muc.de>
Signed-off-by: Coywolf Qi Hunt <coywolf@lovecn.org>
Signed-off-by: Ashok Raj <ashok.raj@intel.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-09-06 18:16:15 -04:00
|
|
|
/*
|
|
|
|
* Save these away for later use. Re-progam when the
|
|
|
|
* interrupt is pending
|
|
|
|
*/
|
|
|
|
set_pending_irq(irq, mask_val);
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
void proc_set_irq_affinity(unsigned int irq, cpumask_t mask_val)
|
2005-04-16 18:20:36 -04:00
|
|
|
{
|
2006-06-23 05:04:22 -04:00
|
|
|
set_balance_irq_affinity(irq, mask_val);
|
2006-06-29 05:24:38 -04:00
|
|
|
irq_desc[irq].affinity = mask_val;
|
[PATCH] genirq: rename desc->handler to desc->chip
This patch-queue improves the generic IRQ layer to be truly generic, by adding
various abstractions and features to it, without impacting existing
functionality.
While the queue can be best described as "fix and improve everything in the
generic IRQ layer that we could think of", and thus it consists of many
smaller features and lots of cleanups, the one feature that stands out most is
the new 'irq chip' abstraction.
The irq-chip abstraction is about describing and coding and IRQ controller
driver by mapping its raw hardware capabilities [and quirks, if needed] in a
straightforward way, without having to think about "IRQ flow"
(level/edge/etc.) type of details.
This stands in contrast with the current 'irq-type' model of genirq
architectures, which 'mixes' raw hardware capabilities with 'flow' details.
The patchset supports both types of irq controller designs at once, and
converts i386 and x86_64 to the new irq-chip design.
As a bonus side-effect of the irq-chip approach, chained interrupt controllers
(master/slave PIC constructs, etc.) are now supported by design as well.
The end result of this patchset intends to be simpler architecture-level code
and more consolidation between architectures.
We reused many bits of code and many concepts from Russell King's ARM IRQ
layer, the merging of which was one of the motivations for this patchset.
This patch:
rename desc->handler to desc->chip.
Originally i did not want to do this, because it's a big patch. But having
both "desc->handler", "desc->handle_irq" and "action->handler" caused a
large degree of confusion and made the code appear alot less clean than it
truly is.
I have also attempted a dual approach as well by introducing a
desc->chip alias - but that just wasnt robust enough and broke
frequently.
So lets get over with this quickly. The conversion was done automatically
via scripts and converts all the code in the kernel.
This renaming patch is the first one amongst the patches, so that the
remaining patches can stay flexible and can be merged and split up
without having some big monolithic patch act as a merge barrier.
[akpm@osdl.org: build fix]
[akpm@osdl.org: another build fix]
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-06-29 05:24:36 -04:00
|
|
|
irq_desc[irq].chip->set_affinity(irq, mask_val);
|
2005-04-16 18:20:36 -04:00
|
|
|
}
|
[PATCH] x86/x86_64: deferred handling of writes to /proc/irqxx/smp_affinity
When handling writes to /proc/irq, current code is re-programming rte
entries directly. This is not recommended and could potentially cause
chipset's to lockup, or cause missing interrupts.
CONFIG_IRQ_BALANCE does this correctly, where it re-programs only when the
interrupt is pending. The same needs to be done for /proc/irq handling as well.
Otherwise user space irq balancers are really not doing the right thing.
- Changed pending_irq_balance_cpumask to pending_irq_migrate_cpumask for
lack of a generic name.
- added move_irq out of IRQ_BALANCE, and added this same to X86_64
- Added new proc handler for write, so we can do deferred write at irq
handling time.
- Display of /proc/irq/XX/smp_affinity used to display CPU_MASKALL, instead
it now shows only active cpu masks, or exactly what was set.
- Provided a common move_irq implementation, instead of duplicating
when using generic irq framework.
Tested on i386/x86_64 and ia64 with CONFIG_PCI_MSI turned on and off.
Tested UP builds as well.
MSI testing: tbd: I have cards, need to look for a x-over cable, although I
did test an earlier version of this patch. Will test in a couple days.
Signed-off-by: Ashok Raj <ashok.raj@intel.com>
Acked-by: Zwane Mwaikambo <zwane@holomorphy.com>
Grudgingly-acked-by: Andi Kleen <ak@muc.de>
Signed-off-by: Coywolf Qi Hunt <coywolf@lovecn.org>
Signed-off-by: Ashok Raj <ashok.raj@intel.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-09-06 18:16:15 -04:00
|
|
|
#endif
|
2005-04-16 18:20:36 -04:00
|
|
|
|
|
|
|
static int irq_affinity_read_proc(char *page, char **start, off_t off,
|
|
|
|
int count, int *eof, void *data)
|
|
|
|
{
|
2006-06-29 05:24:38 -04:00
|
|
|
int len = cpumask_scnprintf(page, count, irq_desc[(long)data].affinity);
|
2005-04-16 18:20:36 -04:00
|
|
|
|
|
|
|
if (count - len < 2)
|
|
|
|
return -EINVAL;
|
|
|
|
len += sprintf(page + len, "\n");
|
|
|
|
return len;
|
|
|
|
}
|
|
|
|
|
|
|
|
int no_irq_affinity;
|
|
|
|
static int irq_affinity_write_proc(struct file *file, const char __user *buffer,
|
|
|
|
unsigned long count, void *data)
|
|
|
|
{
|
|
|
|
unsigned int irq = (int)(long)data, full_count = count, err;
|
|
|
|
cpumask_t new_value, tmp;
|
|
|
|
|
2006-12-08 05:35:58 -05:00
|
|
|
if (!irq_desc[irq].chip->set_affinity || no_irq_affinity ||
|
|
|
|
CHECK_IRQ_PER_CPU(irq_desc[irq].status))
|
2005-04-16 18:20:36 -04:00
|
|
|
return -EIO;
|
|
|
|
|
2006-10-11 04:21:55 -04:00
|
|
|
err = cpumask_parse_user(buffer, count, new_value);
|
2005-04-16 18:20:36 -04:00
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Do not allow disabling IRQs completely - it's a too easy
|
|
|
|
* way to make the system unusable accidentally :-) At least
|
|
|
|
* one online CPU still has to be targeted.
|
|
|
|
*/
|
|
|
|
cpus_and(tmp, new_value, cpu_online_map);
|
|
|
|
if (cpus_empty(tmp))
|
2006-01-06 03:12:21 -05:00
|
|
|
/* Special case for empty set - allow the architecture
|
|
|
|
code to set default SMP affinity. */
|
|
|
|
return select_smp_affinity(irq) ? -EINVAL : full_count;
|
2005-04-16 18:20:36 -04:00
|
|
|
|
|
|
|
proc_set_irq_affinity(irq, new_value);
|
|
|
|
|
|
|
|
return full_count;
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#define MAX_NAMELEN 128
|
|
|
|
|
|
|
|
static int name_unique(unsigned int irq, struct irqaction *new_action)
|
|
|
|
{
|
|
|
|
struct irq_desc *desc = irq_desc + irq;
|
|
|
|
struct irqaction *action;
|
|
|
|
|
|
|
|
for (action = desc->action ; action; action = action->next)
|
|
|
|
if ((action != new_action) && action->name &&
|
|
|
|
!strcmp(new_action->name, action->name))
|
|
|
|
return 0;
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
void register_handler_proc(unsigned int irq, struct irqaction *action)
|
|
|
|
{
|
|
|
|
char name [MAX_NAMELEN];
|
|
|
|
|
2006-06-29 05:24:42 -04:00
|
|
|
if (!irq_desc[irq].dir || action->dir || !action->name ||
|
2005-04-16 18:20:36 -04:00
|
|
|
!name_unique(irq, action))
|
|
|
|
return;
|
|
|
|
|
|
|
|
memset(name, 0, MAX_NAMELEN);
|
|
|
|
snprintf(name, MAX_NAMELEN, "%s", action->name);
|
|
|
|
|
|
|
|
/* create /proc/irq/1234/handler/ */
|
2006-06-29 05:24:42 -04:00
|
|
|
action->dir = proc_mkdir(name, irq_desc[irq].dir);
|
2005-04-16 18:20:36 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
#undef MAX_NAMELEN
|
|
|
|
|
|
|
|
#define MAX_NAMELEN 10
|
|
|
|
|
|
|
|
void register_irq_proc(unsigned int irq)
|
|
|
|
{
|
|
|
|
char name [MAX_NAMELEN];
|
|
|
|
|
|
|
|
if (!root_irq_dir ||
|
2006-06-29 05:24:57 -04:00
|
|
|
(irq_desc[irq].chip == &no_irq_chip) ||
|
2006-06-29 05:24:42 -04:00
|
|
|
irq_desc[irq].dir)
|
2005-04-16 18:20:36 -04:00
|
|
|
return;
|
|
|
|
|
|
|
|
memset(name, 0, MAX_NAMELEN);
|
|
|
|
sprintf(name, "%d", irq);
|
|
|
|
|
|
|
|
/* create /proc/irq/1234 */
|
2006-06-29 05:24:42 -04:00
|
|
|
irq_desc[irq].dir = proc_mkdir(name, root_irq_dir);
|
2005-04-16 18:20:36 -04:00
|
|
|
|
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
{
|
|
|
|
struct proc_dir_entry *entry;
|
|
|
|
|
|
|
|
/* create /proc/irq/<irq>/smp_affinity */
|
2006-06-29 05:24:42 -04:00
|
|
|
entry = create_proc_entry("smp_affinity", 0600, irq_desc[irq].dir);
|
2005-04-16 18:20:36 -04:00
|
|
|
|
|
|
|
if (entry) {
|
|
|
|
entry->nlink = 1;
|
|
|
|
entry->data = (void *)(long)irq;
|
|
|
|
entry->read_proc = irq_affinity_read_proc;
|
|
|
|
entry->write_proc = irq_affinity_write_proc;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
#undef MAX_NAMELEN
|
|
|
|
|
|
|
|
void unregister_handler_proc(unsigned int irq, struct irqaction *action)
|
|
|
|
{
|
|
|
|
if (action->dir)
|
2006-06-29 05:24:42 -04:00
|
|
|
remove_proc_entry(action->dir->name, irq_desc[irq].dir);
|
2005-04-16 18:20:36 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
void init_irq_proc(void)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
/* create /proc/irq */
|
|
|
|
root_irq_dir = proc_mkdir("irq", NULL);
|
|
|
|
if (!root_irq_dir)
|
|
|
|
return;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Create entries for all existing IRQs.
|
|
|
|
*/
|
|
|
|
for (i = 0; i < NR_IRQS; i++)
|
|
|
|
register_irq_proc(i);
|
|
|
|
}
|
|
|
|
|