blob: 4684b7595e63a2cd8287c57bb28815fa70d142d1 [file] [log] [blame]
Yang Yingliangf1e0bb02015-09-24 17:32:13 +08001/*
2 * Generic cpu hotunplug interrupt migration code copied from the
3 * arch/arm implementation
4 *
5 * Copyright (C) Russell King
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#include <linux/interrupt.h>
12#include <linux/ratelimit.h>
13#include <linux/irq.h>
Olav Hauganca2a91e2016-05-29 19:35:54 -070014#include <linux/cpumask.h>
Yang Yingliangf1e0bb02015-09-24 17:32:13 +080015
16#include "internals.h"
17
18static bool migrate_one_irq(struct irq_desc *desc)
19{
20 struct irq_data *d = irq_desc_get_irq_data(desc);
21 const struct cpumask *affinity = d->common->affinity;
22 struct irq_chip *c;
23 bool ret = false;
Olav Hauganca2a91e2016-05-29 19:35:54 -070024 struct cpumask available_cpus;
Yang Yingliangf1e0bb02015-09-24 17:32:13 +080025
26 /*
27 * If this is a per-CPU interrupt, or the affinity does not
28 * include this CPU, then we have nothing to do.
29 */
30 if (irqd_is_per_cpu(d) ||
31 !cpumask_test_cpu(smp_processor_id(), affinity))
32 return false;
33
Olav Hauganca2a91e2016-05-29 19:35:54 -070034 cpumask_copy(&available_cpus, affinity);
35 cpumask_andnot(&available_cpus, &available_cpus, cpu_isolated_mask);
36 affinity = &available_cpus;
37
Yang Yingliangf1e0bb02015-09-24 17:32:13 +080038 if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
Pavankumar Kondetif817f452017-06-16 09:36:25 +053039 /*
40 * The order of preference for selecting a fallback CPU is
41 *
42 * (1) online and un-isolated CPU from default affinity
43 * (2) online and un-isolated CPU
44 * (3) online CPU
45 */
Olav Hauganca2a91e2016-05-29 19:35:54 -070046 cpumask_andnot(&available_cpus, cpu_online_mask,
47 cpu_isolated_mask);
Pavankumar Kondetif817f452017-06-16 09:36:25 +053048 if (cpumask_intersects(&available_cpus, irq_default_affinity))
49 cpumask_and(&available_cpus, &available_cpus,
50 irq_default_affinity);
51 else if (cpumask_empty(&available_cpus))
Olav Hauganca2a91e2016-05-29 19:35:54 -070052 affinity = cpu_online_mask;
Pavankumar Kondetif817f452017-06-16 09:36:25 +053053
Pavankumar Kondeti3c61a7d2017-06-22 13:33:08 +053054 /*
55 * We are overriding the affinity with all online and
56 * un-isolated cpus. irq_set_affinity_locked() call
57 * below notify this mask to PM QOS affinity listener.
58 * That results in applying the CPU_DMA_LATENCY QOS
59 * to all the CPUs specified in the mask. But the low
60 * level irqchip driver sets the affinity of an irq
61 * to only one CPU. So pick only one CPU from the
62 * prepared mask while overriding the user affinity.
63 */
64 affinity = cpumask_of(cpumask_any(affinity));
Yang Yingliangf1e0bb02015-09-24 17:32:13 +080065 ret = true;
66 }
67
68 c = irq_data_get_irq_chip(d);
69 if (!c->irq_set_affinity) {
Thomas Gleixner58c9c872015-10-22 14:34:57 +020070 pr_debug("IRQ%u: unable to set affinity\n", d->irq);
Yang Yingliangf1e0bb02015-09-24 17:32:13 +080071 } else {
Maulik Shah813aa772017-02-21 20:44:46 +053072 int r = irq_set_affinity_locked(d, affinity, false);
Yang Yingliangf1e0bb02015-09-24 17:32:13 +080073 if (r)
74 pr_warn_ratelimited("IRQ%u: set affinity failed(%d).\n",
75 d->irq, r);
76 }
77
78 return ret;
79}
80
81/**
82 * irq_migrate_all_off_this_cpu - Migrate irqs away from offline cpu
83 *
84 * The current CPU has been marked offline. Migrate IRQs off this CPU.
85 * If the affinity settings do not allow other CPUs, force them onto any
86 * available CPU.
87 *
88 * Note: we must iterate over all IRQs, whether they have an attached
89 * action structure or not, as we need to get chained interrupts too.
90 */
91void irq_migrate_all_off_this_cpu(void)
92{
93 unsigned int irq;
94 struct irq_desc *desc;
95 unsigned long flags;
96
97 local_irq_save(flags);
98
99 for_each_active_irq(irq) {
100 bool affinity_broken;
101
102 desc = irq_to_desc(irq);
Prasad Sodagudib00a54c92016-10-07 08:35:08 +0530103 if (!desc)
104 continue;
105
Yang Yingliangf1e0bb02015-09-24 17:32:13 +0800106 raw_spin_lock(&desc->lock);
107 affinity_broken = migrate_one_irq(desc);
108 raw_spin_unlock(&desc->lock);
109
110 if (affinity_broken)
111 pr_warn_ratelimited("IRQ%u no longer affine to CPU%u\n",
112 irq, smp_processor_id());
113 }
114
115 local_irq_restore(flags);
116}