genirq: use iterators for irq_desc loops
Use for_each_irq_desc[_reverse] for all the iteration loops.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
diff --git a/drivers/xen/events.c b/drivers/xen/events.c
index e6d47e8..9ce1ab6 100644
--- a/drivers/xen/events.c
+++ b/drivers/xen/events.c
@@ -137,14 +137,12 @@
static void init_evtchn_cpu_bindings(void)
{
#ifdef CONFIG_SMP
+ struct irq_desc *desc;
int i;
+
/* By default all event channels notify CPU#0. */
- for (i = 0; i < nr_irqs; i++) {
- struct irq_desc *desc = irq_to_desc(i);
- if (!desc)
- continue;
+ for_each_irq_desc(i, desc)
desc->affinity = cpumask_of_cpu(0);
- }
#endif
memset(cpu_evtchn, 0, sizeof(cpu_evtchn));
@@ -233,7 +231,7 @@
int irq;
/* Only allocate from dynirq range */
- for (irq = 0; irq < nr_irqs; irq++)
+ for_each_irq_nr(irq)
if (irq_bindcount[irq] == 0)
break;
@@ -794,7 +792,7 @@
mask_evtchn(evtchn);
/* No IRQ <-> event-channel mappings. */
- for (irq = 0; irq < nr_irqs; irq++)
+ for_each_irq_nr(irq)
irq_info[irq].evtchn = 0; /* zap event-channel binding */
for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
@@ -826,7 +824,7 @@
mask_evtchn(i);
/* Dynamic IRQ space is currently unbound. Zero the refcnts. */
- for (i = 0; i < nr_irqs; i++)
+ for_each_irq_nr(i)
irq_bindcount[i] = 0;
irq_ctx_init(smp_processor_id());
diff --git a/kernel/irq/autoprobe.c b/kernel/irq/autoprobe.c
index b3a5549..0cbff18 100644
--- a/kernel/irq/autoprobe.c
+++ b/kernel/irq/autoprobe.c
@@ -30,19 +30,16 @@
unsigned long probe_irq_on(void)
{
struct irq_desc *desc;
- unsigned long mask;
- unsigned int i;
+ unsigned long mask = 0;
+ unsigned int status;
+ int i;
mutex_lock(&probing_active);
/*
* something may have generated an irq long ago and we want to
* flush such a longstanding irq before considering it as spurious.
*/
- for (i = nr_irqs-1; i > 0; i--) {
- desc = irq_to_desc(i);
- if (!desc)
- continue;
-
+ for_each_irq_desc_reverse(i, desc) {
spin_lock_irq(&desc->lock);
if (!desc->action && !(desc->status & IRQ_NOPROBE)) {
/*
@@ -70,11 +67,7 @@
* (we must startup again here because if a longstanding irq
* happened in the previous stage, it may have masked itself)
*/
- for (i = nr_irqs-1; i > 0; i--) {
- desc = irq_to_desc(i);
- if (!desc)
- continue;
-
+ for_each_irq_desc_reverse(i, desc) {
spin_lock_irq(&desc->lock);
if (!desc->action && !(desc->status & IRQ_NOPROBE)) {
desc->status |= IRQ_AUTODETECT | IRQ_WAITING;
@@ -92,13 +85,7 @@
/*
* Now filter out any obviously spurious interrupts
*/
- mask = 0;
- for (i = 0; i < nr_irqs; i++) {
- unsigned int status;
-
- desc = irq_to_desc(i);
- if (!desc)
- continue;
+ for_each_irq_desc(i, desc) {
spin_lock_irq(&desc->lock);
status = desc->status;
@@ -132,16 +119,11 @@
*/
unsigned int probe_irq_mask(unsigned long val)
{
- unsigned int mask;
+ unsigned int status, mask = 0;
+ struct irq_desc *desc;
int i;
- mask = 0;
- for (i = 0; i < nr_irqs; i++) {
- struct irq_desc *desc = irq_to_desc(i);
- unsigned int status;
-
- if (!desc)
- continue;
+ for_each_irq_desc(i, desc) {
spin_lock_irq(&desc->lock);
status = desc->status;
@@ -180,13 +162,10 @@
int probe_irq_off(unsigned long val)
{
int i, irq_found = 0, nr_irqs = 0;
+ struct irq_desc *desc;
+ unsigned int status;
- for (i = 0; i < nr_irqs; i++) {
- struct irq_desc *desc = irq_to_desc(i);
- unsigned int status;
-
- if (!desc)
- continue;
+ for_each_irq_desc(i, desc) {
spin_lock_irq(&desc->lock);
status = desc->status;
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
index a69368f..c815b42 100644
--- a/kernel/irq/handle.c
+++ b/kernel/irq/handle.c
@@ -268,9 +268,10 @@
void early_init_irq_lock_class(void)
{
+ struct irq_desc *desc;
int i;
- for (i = 0; i < nr_irqs; i++)
- lockdep_set_class(&irq_desc[i].lock, &irq_desc_lock_class);
+ for_each_irq_desc(i, desc)
+ lockdep_set_class(&desc->lock, &irq_desc_lock_class);
}
#endif