blob: f44c0bc95aa2f45ad42462a5f23f4db4672d1257 [file] [log] [blame]
Cliff Wickman18129242008-06-02 08:56:14 -05001/*
2 * SGI UltraViolet TLB flush routines.
3 *
Cliff Wickmana26fd712014-05-14 16:15:47 -05004 * (c) 2008-2014 Cliff Wickman <cpw@sgi.com>, SGI.
Cliff Wickman18129242008-06-02 08:56:14 -05005 *
6 * This code is released under the GNU General Public License version 2 or
7 * later.
8 */
Jeremy Fitzhardingeaef8f5b2008-10-14 21:43:43 -07009#include <linux/seq_file.h>
Cliff Wickman18129242008-06-02 08:56:14 -050010#include <linux/proc_fs.h>
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -050011#include <linux/debugfs.h>
Cliff Wickman18129242008-06-02 08:56:14 -050012#include <linux/kernel.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090013#include <linux/slab.h>
Jean Delvareca4445642011-03-25 15:20:14 +010014#include <linux/delay.h>
Cliff Wickman18129242008-06-02 08:56:14 -050015
Cliff Wickman18129242008-06-02 08:56:14 -050016#include <asm/mmu_context.h>
Tejun Heobdbcdd42009-01-21 17:26:06 +090017#include <asm/uv/uv.h>
Cliff Wickman18129242008-06-02 08:56:14 -050018#include <asm/uv/uv_mmrs.h>
Ingo Molnarb4c286e2008-06-18 14:28:19 +020019#include <asm/uv/uv_hub.h>
Cliff Wickman18129242008-06-02 08:56:14 -050020#include <asm/uv/uv_bau.h>
Ingo Molnar7b6aa332009-02-17 13:58:15 +010021#include <asm/apic.h>
Cliff Wickmanb194b1202008-06-12 08:23:48 -050022#include <asm/tsc.h>
Cliff Wickman99dd8712008-08-19 12:51:59 -050023#include <asm/irq_vectors.h>
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -050024#include <asm/timer.h>
Cliff Wickman18129242008-06-02 08:56:14 -050025
Andrew Banman8e3b21b2017-03-09 10:42:11 -060026static struct bau_operations ops __ro_after_init;
Andrew Banman4f059d52016-09-21 11:09:21 -050027
Cliff Wickman12a66112010-06-02 16:22:01 -050028/* timeouts in nanoseconds (indexed by UVH_AGING_PRESCALE_SEL urgency7 30:28) */
Colin Ian Kingb45e4c42017-08-10 16:57:09 +010029static const int timeout_base_ns[] = {
Cliff Wickman12a66112010-06-02 16:22:01 -050030 20,
31 160,
32 1280,
33 10240,
34 81920,
35 655360,
36 5242880,
37 167772160
38};
Cliff Wickmanf073cc82011-05-24 13:07:36 -050039
Cliff Wickman12a66112010-06-02 16:22:01 -050040static int timeout_us;
Alex Thorlton1c532e02016-03-31 14:18:29 -050041static bool nobau = true;
Cliff Wickman26ef8572012-06-22 08:13:30 -050042static int nobau_perm;
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -050043
44/* tunables: */
Cliff Wickmanf073cc82011-05-24 13:07:36 -050045static int max_concurr = MAX_BAU_CONCURRENT;
46static int max_concurr_const = MAX_BAU_CONCURRENT;
47static int plugged_delay = PLUGGED_DELAY;
48static int plugsb4reset = PLUGSB4RESET;
Cliff Wickman8b6e5112012-06-22 08:14:59 -050049static int giveup_limit = GIVEUP_LIMIT;
Cliff Wickmanf073cc82011-05-24 13:07:36 -050050static int timeoutsb4reset = TIMEOUTSB4RESET;
51static int ipi_reset_limit = IPI_RESET_LIMIT;
52static int complete_threshold = COMPLETE_THRESHOLD;
53static int congested_respns_us = CONGESTED_RESPONSE_US;
54static int congested_reps = CONGESTED_REPS;
Cliff Wickman8b6e5112012-06-22 08:14:59 -050055static int disabled_period = DISABLED_PERIOD;
Cliff Wickmanf073cc82011-05-24 13:07:36 -050056
57static struct tunables tunables[] = {
Andrew Banman67492c82016-09-21 11:09:12 -050058 {&max_concurr, MAX_BAU_CONCURRENT}, /* must be [0] */
59 {&plugged_delay, PLUGGED_DELAY},
60 {&plugsb4reset, PLUGSB4RESET},
61 {&timeoutsb4reset, TIMEOUTSB4RESET},
62 {&ipi_reset_limit, IPI_RESET_LIMIT},
63 {&complete_threshold, COMPLETE_THRESHOLD},
64 {&congested_respns_us, CONGESTED_RESPONSE_US},
65 {&congested_reps, CONGESTED_REPS},
66 {&disabled_period, DISABLED_PERIOD},
67 {&giveup_limit, GIVEUP_LIMIT}
Cliff Wickmanf073cc82011-05-24 13:07:36 -050068};
69
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -050070static struct dentry *tunables_dir;
71static struct dentry *tunables_file;
72
Cliff Wickmanf073cc82011-05-24 13:07:36 -050073/* these correspond to the statistics printed by ptc_seq_show() */
74static char *stat_description[] = {
75 "sent: number of shootdown messages sent",
76 "stime: time spent sending messages",
77 "numuvhubs: number of hubs targeted with shootdown",
78 "numuvhubs16: number times 16 or more hubs targeted",
79 "numuvhubs8: number times 8 or more hubs targeted",
80 "numuvhubs4: number times 4 or more hubs targeted",
81 "numuvhubs2: number times 2 or more hubs targeted",
82 "numuvhubs1: number times 1 hub targeted",
83 "numcpus: number of cpus targeted with shootdown",
84 "dto: number of destination timeouts",
85 "retries: destination timeout retries sent",
86 "rok: : destination timeouts successfully retried",
87 "resetp: ipi-style resource resets for plugs",
88 "resett: ipi-style resource resets for timeouts",
89 "giveup: fall-backs to ipi-style shootdowns",
90 "sto: number of source timeouts",
91 "bz: number of stay-busy's",
92 "throt: number times spun in throttle",
93 "swack: image of UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE",
94 "recv: shootdown messages received",
95 "rtime: time spent processing messages",
96 "all: shootdown all-tlb messages",
97 "one: shootdown one-tlb messages",
98 "mult: interrupts that found multiple messages",
99 "none: interrupts that found no messages",
100 "retry: number of retry messages processed",
101 "canc: number messages canceled by retries",
102 "nocan: number retries that found nothing to cancel",
103 "reset: number of ipi-style reset requests processed",
104 "rcan: number messages canceled by reset requests",
105 "disable: number times use of the BAU was disabled",
106 "enable: number times use of the BAU was re-enabled"
107};
108
Alex Thorlton1c532e02016-03-31 14:18:29 -0500109static int __init setup_bau(char *arg)
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500110{
Alex Thorlton1c532e02016-03-31 14:18:29 -0500111 int result;
112
113 if (!arg)
114 return -EINVAL;
115
116 result = strtobool(arg, &nobau);
117 if (result)
118 return result;
119
120 /* we need to flip the logic here, so that bau=y sets nobau to false */
121 nobau = !nobau;
122
123 if (!nobau)
124 pr_info("UV BAU Enabled\n");
125 else
126 pr_info("UV BAU Disabled\n");
127
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500128 return 0;
129}
Alex Thorlton1c532e02016-03-31 14:18:29 -0500130early_param("bau", setup_bau);
Ingo Molnarb4c286e2008-06-18 14:28:19 +0200131
Cliff Wickman94ca8e42009-04-14 10:56:48 -0500132/* base pnode in this partition */
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500133static int uv_base_pnode __read_mostly;
Cliff Wickman18129242008-06-02 08:56:14 -0500134
Ingo Molnardc163a42008-06-18 14:15:43 +0200135static DEFINE_PER_CPU(struct ptc_stats, ptcstats);
136static DEFINE_PER_CPU(struct bau_control, bau_control);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500137static DEFINE_PER_CPU(cpumask_var_t, uv_flush_tlb_mask);
138
Cliff Wickman26ef8572012-06-22 08:13:30 -0500139static void
140set_bau_on(void)
141{
142 int cpu;
143 struct bau_control *bcp;
144
145 if (nobau_perm) {
146 pr_info("BAU not initialized; cannot be turned on\n");
147 return;
148 }
Alex Thorlton1c532e02016-03-31 14:18:29 -0500149 nobau = false;
Cliff Wickman26ef8572012-06-22 08:13:30 -0500150 for_each_present_cpu(cpu) {
151 bcp = &per_cpu(bau_control, cpu);
Alex Thorlton1c532e02016-03-31 14:18:29 -0500152 bcp->nobau = false;
Cliff Wickman26ef8572012-06-22 08:13:30 -0500153 }
154 pr_info("BAU turned on\n");
155 return;
156}
157
158static void
159set_bau_off(void)
160{
161 int cpu;
162 struct bau_control *bcp;
163
Alex Thorlton1c532e02016-03-31 14:18:29 -0500164 nobau = true;
Cliff Wickman26ef8572012-06-22 08:13:30 -0500165 for_each_present_cpu(cpu) {
166 bcp = &per_cpu(bau_control, cpu);
Alex Thorlton1c532e02016-03-31 14:18:29 -0500167 bcp->nobau = true;
Cliff Wickman26ef8572012-06-22 08:13:30 -0500168 }
169 pr_info("BAU turned off\n");
170 return;
171}
172
Cliff Wickman18129242008-06-02 08:56:14 -0500173/*
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500174 * Determine the first node on a uvhub. 'Nodes' are used for kernel
175 * memory allocation.
Cliff Wickman9674f352009-04-03 08:34:05 -0500176 */
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500177static int __init uvhub_to_first_node(int uvhub)
Cliff Wickman9674f352009-04-03 08:34:05 -0500178{
179 int node, b;
180
181 for_each_online_node(node) {
182 b = uv_node_to_blade_id(node);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500183 if (uvhub == b)
Cliff Wickman9674f352009-04-03 08:34:05 -0500184 return node;
185 }
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500186 return -1;
Cliff Wickman9674f352009-04-03 08:34:05 -0500187}
188
189/*
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500190 * Determine the apicid of the first cpu on a uvhub.
Cliff Wickman9674f352009-04-03 08:34:05 -0500191 */
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500192static int __init uvhub_to_first_apicid(int uvhub)
Cliff Wickman9674f352009-04-03 08:34:05 -0500193{
194 int cpu;
195
196 for_each_present_cpu(cpu)
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500197 if (uvhub == uv_cpu_to_blade_id(cpu))
Cliff Wickman9674f352009-04-03 08:34:05 -0500198 return per_cpu(x86_cpu_to_apicid, cpu);
199 return -1;
200}
201
202/*
Cliff Wickman18129242008-06-02 08:56:14 -0500203 * Free a software acknowledge hardware resource by clearing its Pending
204 * bit. This will return a reply to the sender.
205 * If the message has timed out, a reply has already been sent by the
206 * hardware but the resource has not been released. In that case our
207 * clear of the Timeout bit (as well) will free the resource. No reply will
208 * be sent (the hardware will only do one reply per message).
209 */
Cliff Wickmanc5d35d32012-01-16 15:19:47 -0600210static void reply_to_message(struct msg_desc *mdp, struct bau_control *bcp,
211 int do_acknowledge)
Cliff Wickman18129242008-06-02 08:56:14 -0500212{
Cliff Wickmanb194b1202008-06-12 08:23:48 -0500213 unsigned long dw;
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500214 struct bau_pq_entry *msg;
Cliff Wickman18129242008-06-02 08:56:14 -0500215
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500216 msg = mdp->msg;
Cliff Wickmanc5d35d32012-01-16 15:19:47 -0600217 if (!msg->canceled && do_acknowledge) {
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500218 dw = (msg->swack_vec << UV_SW_ACK_NPENDING) | msg->swack_vec;
Andrew Banman21e3f122016-09-21 11:09:17 -0500219 ops.write_l_sw_ack(dw);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500220 }
Cliff Wickman18129242008-06-02 08:56:14 -0500221 msg->replied_to = 1;
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500222 msg->swack_vec = 0;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500223}
224
225/*
226 * Process the receipt of a RETRY message
227 */
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500228static void bau_process_retry_msg(struct msg_desc *mdp,
229 struct bau_control *bcp)
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500230{
231 int i;
232 int cancel_count = 0;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500233 unsigned long msg_res;
234 unsigned long mmr = 0;
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500235 struct bau_pq_entry *msg = mdp->msg;
236 struct bau_pq_entry *msg2;
237 struct ptc_stats *stat = bcp->statp;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500238
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500239 stat->d_retries++;
240 /*
241 * cancel any message from msg+1 to the retry itself
242 */
243 for (msg2 = msg+1, i = 0; i < DEST_Q_SIZE; msg2++, i++) {
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500244 if (msg2 > mdp->queue_last)
245 msg2 = mdp->queue_first;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500246 if (msg2 == msg)
247 break;
248
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500249 /* same conditions for cancellation as do_reset */
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500250 if ((msg2->replied_to == 0) && (msg2->canceled == 0) &&
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500251 (msg2->swack_vec) && ((msg2->swack_vec &
252 msg->swack_vec) == 0) &&
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500253 (msg2->sending_cpu == msg->sending_cpu) &&
254 (msg2->msg_type != MSG_NOOP)) {
Andrew Banman21e3f122016-09-21 11:09:17 -0500255 mmr = ops.read_l_sw_ack();
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500256 msg_res = msg2->swack_vec;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500257 /*
258 * This is a message retry; clear the resources held
259 * by the previous message only if they timed out.
260 * If it has not timed out we have an unexpected
261 * situation to report.
262 */
Cliff Wickman39847e72010-06-02 16:22:02 -0500263 if (mmr & (msg_res << UV_SW_ACK_NPENDING)) {
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500264 unsigned long mr;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500265 /*
Cliff Wickmanc5d35d32012-01-16 15:19:47 -0600266 * Is the resource timed out?
267 * Make everyone ignore the cancelled message.
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500268 */
269 msg2->canceled = 1;
270 stat->d_canceled++;
271 cancel_count++;
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500272 mr = (msg_res << UV_SW_ACK_NPENDING) | msg_res;
Andrew Banman21e3f122016-09-21 11:09:17 -0500273 ops.write_l_sw_ack(mr);
Cliff Wickman39847e72010-06-02 16:22:02 -0500274 }
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500275 }
276 }
277 if (!cancel_count)
278 stat->d_nocanceled++;
Cliff Wickman18129242008-06-02 08:56:14 -0500279}
280
281/*
282 * Do all the things a cpu should do for a TLB shootdown message.
283 * Other cpu's may come here at the same time for this message.
284 */
Cliff Wickmanc5d35d32012-01-16 15:19:47 -0600285static void bau_process_message(struct msg_desc *mdp, struct bau_control *bcp,
286 int do_acknowledge)
Cliff Wickman18129242008-06-02 08:56:14 -0500287{
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500288 short socket_ack_count = 0;
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500289 short *sp;
290 struct atomic_short *asp;
291 struct ptc_stats *stat = bcp->statp;
292 struct bau_pq_entry *msg = mdp->msg;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500293 struct bau_control *smaster = bcp->socket_master;
Cliff Wickman18129242008-06-02 08:56:14 -0500294
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500295 /*
296 * This must be a normal message, or retry of a normal message
297 */
Cliff Wickman18129242008-06-02 08:56:14 -0500298 if (msg->address == TLB_FLUSH_ALL) {
299 local_flush_tlb();
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500300 stat->d_alltlb++;
Cliff Wickman18129242008-06-02 08:56:14 -0500301 } else {
302 __flush_tlb_one(msg->address);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500303 stat->d_onetlb++;
Cliff Wickman18129242008-06-02 08:56:14 -0500304 }
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500305 stat->d_requestee++;
Cliff Wickman18129242008-06-02 08:56:14 -0500306
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500307 /*
308 * One cpu on each uvhub has the additional job on a RETRY
309 * of releasing the resource held by the message that is
310 * being retried. That message is identified by sending
311 * cpu number.
312 */
313 if (msg->msg_type == MSG_RETRY && bcp == bcp->uvhub_master)
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500314 bau_process_retry_msg(mdp, bcp);
Cliff Wickman18129242008-06-02 08:56:14 -0500315
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500316 /*
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500317 * This is a swack message, so we have to reply to it.
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500318 * Count each responding cpu on the socket. This avoids
319 * pinging the count's cache line back and forth between
320 * the sockets.
321 */
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500322 sp = &smaster->socket_acknowledge_count[mdp->msg_slot];
323 asp = (struct atomic_short *)sp;
324 socket_ack_count = atom_asr(1, asp);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500325 if (socket_ack_count == bcp->cpus_in_socket) {
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500326 int msg_ack_count;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500327 /*
328 * Both sockets dump their completed count total into
329 * the message's count.
330 */
Cliff Wickman8b6e5112012-06-22 08:14:59 -0500331 *sp = 0;
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500332 asp = (struct atomic_short *)&msg->acknowledge_count;
333 msg_ack_count = atom_asr(socket_ack_count, asp);
Ingo Molnardc163a42008-06-18 14:15:43 +0200334
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500335 if (msg_ack_count == bcp->cpus_in_uvhub) {
336 /*
337 * All cpus in uvhub saw it; reply
Cliff Wickmanc5d35d32012-01-16 15:19:47 -0600338 * (unless we are in the UV2 workaround)
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500339 */
Cliff Wickmanc5d35d32012-01-16 15:19:47 -0600340 reply_to_message(mdp, bcp, do_acknowledge);
Ingo Molnardc163a42008-06-18 14:15:43 +0200341 }
342 }
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500343
344 return;
Cliff Wickman18129242008-06-02 08:56:14 -0500345}
346
347/*
cpw@sgi.com485f07d2011-06-21 07:21:29 -0500348 * Determine the first cpu on a pnode.
Cliff Wickman18129242008-06-02 08:56:14 -0500349 */
cpw@sgi.com485f07d2011-06-21 07:21:29 -0500350static int pnode_to_first_cpu(int pnode, struct bau_control *smaster)
Cliff Wickman18129242008-06-02 08:56:14 -0500351{
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500352 int cpu;
cpw@sgi.com485f07d2011-06-21 07:21:29 -0500353 struct hub_and_pnode *hpp;
354
355 for_each_present_cpu(cpu) {
356 hpp = &smaster->thp[cpu];
357 if (pnode == hpp->pnode)
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500358 return cpu;
cpw@sgi.com485f07d2011-06-21 07:21:29 -0500359 }
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500360 return -1;
Cliff Wickman18129242008-06-02 08:56:14 -0500361}
362
Cliff Wickmanb194b1202008-06-12 08:23:48 -0500363/*
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500364 * Last resort when we get a large number of destination timeouts is
365 * to clear resources held by a given cpu.
366 * Do this with IPI so that all messages in the BAU message queue
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500367 * can be identified by their nonzero swack_vec field.
Cliff Wickmanb194b1202008-06-12 08:23:48 -0500368 *
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500369 * This is entered for a single cpu on the uvhub.
370 * The sender want's this uvhub to free a specific message's
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500371 * swack resources.
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500372 */
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500373static void do_reset(void *ptr)
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500374{
375 int i;
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500376 struct bau_control *bcp = &per_cpu(bau_control, smp_processor_id());
377 struct reset_args *rap = (struct reset_args *)ptr;
378 struct bau_pq_entry *msg;
379 struct ptc_stats *stat = bcp->statp;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500380
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500381 stat->d_resets++;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500382 /*
383 * We're looking for the given sender, and
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500384 * will free its swack resource.
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500385 * If all cpu's finally responded after the timeout, its
386 * message 'replied_to' was set.
387 */
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500388 for (msg = bcp->queue_first, i = 0; i < DEST_Q_SIZE; msg++, i++) {
389 unsigned long msg_res;
390 /* do_reset: same conditions for cancellation as
391 bau_process_retry_msg() */
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500392 if ((msg->replied_to == 0) &&
393 (msg->canceled == 0) &&
394 (msg->sending_cpu == rap->sender) &&
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500395 (msg->swack_vec) &&
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500396 (msg->msg_type != MSG_NOOP)) {
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500397 unsigned long mmr;
398 unsigned long mr;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500399 /*
400 * make everyone else ignore this message
401 */
402 msg->canceled = 1;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500403 /*
404 * only reset the resource if it is still pending
405 */
Andrew Banman21e3f122016-09-21 11:09:17 -0500406 mmr = ops.read_l_sw_ack();
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500407 msg_res = msg->swack_vec;
408 mr = (msg_res << UV_SW_ACK_NPENDING) | msg_res;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500409 if (mmr & msg_res) {
410 stat->d_rcanceled++;
Andrew Banman21e3f122016-09-21 11:09:17 -0500411 ops.write_l_sw_ack(mr);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500412 }
413 }
414 }
415 return;
416}
417
418/*
419 * Use IPI to get all target uvhubs to release resources held by
420 * a given sending cpu number.
421 */
cpw@sgi.coma456eaa2011-06-21 07:21:30 -0500422static void reset_with_ipi(struct pnmask *distribution, struct bau_control *bcp)
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500423{
cpw@sgi.com485f07d2011-06-21 07:21:29 -0500424 int pnode;
425 int apnode;
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500426 int maskbits;
cpw@sgi.com485f07d2011-06-21 07:21:29 -0500427 int sender = bcp->cpu;
cpw@sgi.com442d3922011-06-21 07:21:31 -0500428 cpumask_t *mask = bcp->uvhub_master->cpumask;
cpw@sgi.com485f07d2011-06-21 07:21:29 -0500429 struct bau_control *smaster = bcp->socket_master;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500430 struct reset_args reset_args;
431
432 reset_args.sender = sender;
Rusty Russell020b37a2015-03-02 22:05:49 +1030433 cpumask_clear(mask);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500434 /* find a single cpu for each uvhub in this distribution mask */
cpw@sgi.coma456eaa2011-06-21 07:21:30 -0500435 maskbits = sizeof(struct pnmask) * BITSPERBYTE;
cpw@sgi.com485f07d2011-06-21 07:21:29 -0500436 /* each bit is a pnode relative to the partition base pnode */
437 for (pnode = 0; pnode < maskbits; pnode++) {
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500438 int cpu;
cpw@sgi.com485f07d2011-06-21 07:21:29 -0500439 if (!bau_uvhub_isset(pnode, distribution))
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500440 continue;
cpw@sgi.com485f07d2011-06-21 07:21:29 -0500441 apnode = pnode + bcp->partition_base_pnode;
442 cpu = pnode_to_first_cpu(apnode, smaster);
Rusty Russell020b37a2015-03-02 22:05:49 +1030443 cpumask_set_cpu(cpu, mask);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500444 }
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500445
446 /* IPI all cpus; preemption is already disabled */
cpw@sgi.com442d3922011-06-21 07:21:31 -0500447 smp_call_function_many(mask, do_reset, (void *)&reset_args, 1);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500448 return;
449}
450
Peter Zijlstra20d1c862013-11-29 15:40:29 +0100451/*
452 * Not to be confused with cycles_2_ns() from tsc.c; this gives a relative
453 * number, not an absolute. It converts a duration in cycles to a duration in
454 * ns.
455 */
456static inline unsigned long long cycles_2_ns(unsigned long long cyc)
457{
Peter Zijlstra59eaef72017-05-02 13:22:07 +0200458 struct cyc2ns_data data;
Peter Zijlstra20d1c862013-11-29 15:40:29 +0100459 unsigned long long ns;
460
Peter Zijlstra59eaef72017-05-02 13:22:07 +0200461 cyc2ns_read_begin(&data);
462 ns = mul_u64_u32_shr(cyc, data.cyc2ns_mul, data.cyc2ns_shift);
463 cyc2ns_read_end();
Peter Zijlstra20d1c862013-11-29 15:40:29 +0100464
Peter Zijlstra20d1c862013-11-29 15:40:29 +0100465 return ns;
466}
467
468/*
469 * The reverse of the above; converts a duration in ns to a duration in cycles.
Cliff Wickmana26fd712014-05-14 16:15:47 -0500470 */
Peter Zijlstra20d1c862013-11-29 15:40:29 +0100471static inline unsigned long long ns_2_cycles(unsigned long long ns)
472{
Peter Zijlstra59eaef72017-05-02 13:22:07 +0200473 struct cyc2ns_data data;
Peter Zijlstra20d1c862013-11-29 15:40:29 +0100474 unsigned long long cyc;
475
Peter Zijlstra59eaef72017-05-02 13:22:07 +0200476 cyc2ns_read_begin(&data);
477 cyc = (ns << data.cyc2ns_shift) / data.cyc2ns_mul;
478 cyc2ns_read_end();
Peter Zijlstra20d1c862013-11-29 15:40:29 +0100479
Peter Zijlstra20d1c862013-11-29 15:40:29 +0100480 return cyc;
481}
482
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500483static inline unsigned long cycles_2_us(unsigned long long cyc)
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500484{
Peter Zijlstra20d1c862013-11-29 15:40:29 +0100485 return cycles_2_ns(cyc) / NSEC_PER_USEC;
486}
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500487
Peter Zijlstra20d1c862013-11-29 15:40:29 +0100488static inline cycles_t sec_2_cycles(unsigned long sec)
489{
490 return ns_2_cycles(sec * NSEC_PER_SEC);
491}
492
493static inline unsigned long long usec_2_cycles(unsigned long usec)
494{
495 return ns_2_cycles(usec * NSEC_PER_USEC);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500496}
497
498/*
499 * wait for all cpus on this hub to finish their sends and go quiet
500 * leaves uvhub_quiesce set so that no new broadcasts are started by
501 * bau_flush_send_and_wait()
502 */
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500503static inline void quiesce_local_uvhub(struct bau_control *hmaster)
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500504{
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500505 atom_asr(1, (struct atomic_short *)&hmaster->uvhub_quiesce);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500506}
507
508/*
509 * mark this quiet-requestor as done
510 */
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500511static inline void end_uvhub_quiesce(struct bau_control *hmaster)
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500512{
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500513 atom_asr(-1, (struct atomic_short *)&hmaster->uvhub_quiesce);
514}
515
516static unsigned long uv1_read_status(unsigned long mmr_offset, int right_shift)
517{
518 unsigned long descriptor_status;
519
520 descriptor_status = uv_read_local_mmr(mmr_offset);
521 descriptor_status >>= right_shift;
522 descriptor_status &= UV_ACT_STATUS_MASK;
523 return descriptor_status;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500524}
525
526/*
527 * Wait for completion of a broadcast software ack message
528 * return COMPLETE, RETRY(PLUGGED or TIMEOUT) or GIVEUP
Cliff Wickmanb194b1202008-06-12 08:23:48 -0500529 */
Jack Steiner2a919592011-05-11 12:50:28 -0500530static int uv1_wait_completion(struct bau_desc *bau_desc,
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500531 struct bau_control *bcp, long try)
Cliff Wickmanb194b1202008-06-12 08:23:48 -0500532{
Cliff Wickmanb194b1202008-06-12 08:23:48 -0500533 unsigned long descriptor_status;
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500534 cycles_t ttm;
Andrew Banmandfeb28f2017-03-09 10:42:12 -0600535 u64 mmr_offset = bcp->status_mmr;
536 int right_shift = bcp->status_index;
Cliff Wickman712157a2010-06-02 16:22:02 -0500537 struct ptc_stats *stat = bcp->statp;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500538
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500539 descriptor_status = uv1_read_status(mmr_offset, right_shift);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500540 /* spin on the status MMR, waiting for it to go idle */
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500541 while ((descriptor_status != DS_IDLE)) {
Cliff Wickmanb194b1202008-06-12 08:23:48 -0500542 /*
Jack Steiner2a919592011-05-11 12:50:28 -0500543 * Our software ack messages may be blocked because
544 * there are no swack resources available. As long
545 * as none of them has timed out hardware will NACK
546 * our message and its state will stay IDLE.
Cliff Wickmanb194b1202008-06-12 08:23:48 -0500547 */
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500548 if (descriptor_status == DS_SOURCE_TIMEOUT) {
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500549 stat->s_stimeout++;
550 return FLUSH_GIVEUP;
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500551 } else if (descriptor_status == DS_DESTINATION_TIMEOUT) {
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500552 stat->s_dtimeout++;
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500553 ttm = get_cycles();
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500554
555 /*
556 * Our retries may be blocked by all destination
557 * swack resources being consumed, and a timeout
558 * pending. In that case hardware returns the
559 * ERROR that looks like a destination timeout.
560 */
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500561 if (cycles_2_us(ttm - bcp->send_message) < timeout_us) {
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500562 bcp->conseccompletes = 0;
563 return FLUSH_RETRY_PLUGGED;
564 }
565
566 bcp->conseccompletes = 0;
567 return FLUSH_RETRY_TIMEOUT;
568 } else {
569 /*
570 * descriptor_status is still BUSY
571 */
572 cpu_relax();
Cliff Wickmanb194b1202008-06-12 08:23:48 -0500573 }
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500574 descriptor_status = uv1_read_status(mmr_offset, right_shift);
Cliff Wickmanb194b1202008-06-12 08:23:48 -0500575 }
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500576 bcp->conseccompletes++;
Cliff Wickmanb194b1202008-06-12 08:23:48 -0500577 return FLUSH_COMPLETE;
578}
579
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500580/*
Cliff Wickman8b6e5112012-06-22 08:14:59 -0500581 * UV2 could have an extra bit of status in the ACTIVATION_STATUS_2 register.
582 * But not currently used.
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500583 */
Cliff Wickmana26fd712014-05-14 16:15:47 -0500584static unsigned long uv2_3_read_status(unsigned long offset, int rshft, int desc)
Jack Steiner2a919592011-05-11 12:50:28 -0500585{
Masahiro Yamadaf148b412016-09-11 14:58:21 +0900586 return ((read_lmmr(offset) >> rshft) & UV_ACT_STATUS_MASK) << 1;
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500587}
588
Cliff Wickmanc5d35d32012-01-16 15:19:47 -0600589/*
Cliff Wickmanc5d35d32012-01-16 15:19:47 -0600590 * Entered when a bau descriptor has gone into a permanent busy wait because
591 * of a hardware bug.
592 * Workaround the bug.
593 */
Colin Ian King5122daa2017-07-04 09:31:29 +0100594static int handle_uv2_busy(struct bau_control *bcp)
Cliff Wickmanc5d35d32012-01-16 15:19:47 -0600595{
Cliff Wickmanc5d35d32012-01-16 15:19:47 -0600596 struct ptc_stats *stat = bcp->statp;
Cliff Wickmanc5d35d32012-01-16 15:19:47 -0600597
598 stat->s_uv2_wars++;
Cliff Wickman8b6e5112012-06-22 08:14:59 -0500599 bcp->busy = 1;
600 return FLUSH_GIVEUP;
Cliff Wickmanc5d35d32012-01-16 15:19:47 -0600601}
602
Cliff Wickmana26fd712014-05-14 16:15:47 -0500603static int uv2_3_wait_completion(struct bau_desc *bau_desc,
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500604 struct bau_control *bcp, long try)
605{
606 unsigned long descriptor_stat;
607 cycles_t ttm;
Andrew Banmandfeb28f2017-03-09 10:42:12 -0600608 u64 mmr_offset = bcp->status_mmr;
609 int right_shift = bcp->status_index;
Cliff Wickman8b6e5112012-06-22 08:14:59 -0500610 int desc = bcp->uvhub_cpu;
Cliff Wickmanc5d35d32012-01-16 15:19:47 -0600611 long busy_reps = 0;
Jack Steiner2a919592011-05-11 12:50:28 -0500612 struct ptc_stats *stat = bcp->statp;
613
Cliff Wickmana26fd712014-05-14 16:15:47 -0500614 descriptor_stat = uv2_3_read_status(mmr_offset, right_shift, desc);
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500615
Jack Steiner2a919592011-05-11 12:50:28 -0500616 /* spin on the status MMR, waiting for it to go idle */
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500617 while (descriptor_stat != UV2H_DESC_IDLE) {
Cliff Wickman8b6e5112012-06-22 08:14:59 -0500618 if ((descriptor_stat == UV2H_DESC_SOURCE_TIMEOUT)) {
619 /*
620 * A h/w bug on the destination side may
621 * have prevented the message being marked
622 * pending, thus it doesn't get replied to
623 * and gets continually nacked until it times
624 * out with a SOURCE_TIMEOUT.
625 */
Jack Steiner2a919592011-05-11 12:50:28 -0500626 stat->s_stimeout++;
627 return FLUSH_GIVEUP;
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500628 } else if (descriptor_stat == UV2H_DESC_DEST_TIMEOUT) {
Cliff Wickman8b6e5112012-06-22 08:14:59 -0500629 ttm = get_cycles();
630
631 /*
632 * Our retries may be blocked by all destination
633 * swack resources being consumed, and a timeout
634 * pending. In that case hardware returns the
635 * ERROR that looks like a destination timeout.
636 * Without using the extended status we have to
637 * deduce from the short time that this was a
638 * strong nack.
639 */
640 if (cycles_2_us(ttm - bcp->send_message) < timeout_us) {
641 bcp->conseccompletes = 0;
642 stat->s_plugged++;
643 /* FLUSH_RETRY_PLUGGED causes hang on boot */
644 return FLUSH_GIVEUP;
645 }
Jack Steiner2a919592011-05-11 12:50:28 -0500646 stat->s_dtimeout++;
Jack Steiner2a919592011-05-11 12:50:28 -0500647 bcp->conseccompletes = 0;
Cliff Wickman8b6e5112012-06-22 08:14:59 -0500648 /* FLUSH_RETRY_TIMEOUT causes hang on boot */
649 return FLUSH_GIVEUP;
Jack Steiner2a919592011-05-11 12:50:28 -0500650 } else {
Cliff Wickmanc5d35d32012-01-16 15:19:47 -0600651 busy_reps++;
652 if (busy_reps > 1000000) {
653 /* not to hammer on the clock */
654 busy_reps = 0;
655 ttm = get_cycles();
Cliff Wickmana26fd712014-05-14 16:15:47 -0500656 if ((ttm - bcp->send_message) > bcp->timeout_interval)
Cliff Wickmanc5d35d32012-01-16 15:19:47 -0600657 return handle_uv2_busy(bcp);
Cliff Wickmanc5d35d32012-01-16 15:19:47 -0600658 }
Jack Steiner2a919592011-05-11 12:50:28 -0500659 /*
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500660 * descriptor_stat is still BUSY
Jack Steiner2a919592011-05-11 12:50:28 -0500661 */
662 cpu_relax();
663 }
Cliff Wickmana26fd712014-05-14 16:15:47 -0500664 descriptor_stat = uv2_3_read_status(mmr_offset, right_shift, desc);
Jack Steiner2a919592011-05-11 12:50:28 -0500665 }
666 bcp->conseccompletes++;
667 return FLUSH_COMPLETE;
668}
669
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500670/*
Andrew Banman2f2a0332017-03-09 10:42:14 -0600671 * Returns the status of current BAU message for cpu desc as a bit field
672 * [Error][Busy][Aux]
673 */
674static u64 read_status(u64 status_mmr, int index, int desc)
675{
676 u64 stat;
677
678 stat = ((read_lmmr(status_mmr) >> index) & UV_ACT_STATUS_MASK) << 1;
679 stat |= (read_lmmr(UVH_LB_BAU_SB_ACTIVATION_STATUS_2) >> desc) & 0x1;
680
681 return stat;
682}
683
684static int uv4_wait_completion(struct bau_desc *bau_desc,
685 struct bau_control *bcp, long try)
686{
687 struct ptc_stats *stat = bcp->statp;
688 u64 descriptor_stat;
689 u64 mmr = bcp->status_mmr;
690 int index = bcp->status_index;
691 int desc = bcp->uvhub_cpu;
692
693 descriptor_stat = read_status(mmr, index, desc);
694
695 /* spin on the status MMR, waiting for it to go idle */
696 while (descriptor_stat != UV2H_DESC_IDLE) {
697 switch (descriptor_stat) {
698 case UV2H_DESC_SOURCE_TIMEOUT:
699 stat->s_stimeout++;
700 return FLUSH_GIVEUP;
701
702 case UV2H_DESC_DEST_TIMEOUT:
703 stat->s_dtimeout++;
704 bcp->conseccompletes = 0;
705 return FLUSH_RETRY_TIMEOUT;
706
707 case UV2H_DESC_DEST_STRONG_NACK:
708 stat->s_plugged++;
709 bcp->conseccompletes = 0;
710 return FLUSH_RETRY_PLUGGED;
711
712 case UV2H_DESC_DEST_PUT_ERR:
713 bcp->conseccompletes = 0;
714 return FLUSH_GIVEUP;
715
716 default:
717 /* descriptor_stat is still BUSY */
718 cpu_relax();
719 }
720 descriptor_stat = read_status(mmr, index, desc);
721 }
722 bcp->conseccompletes++;
723 return FLUSH_COMPLETE;
724}
725
726/*
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500727 * Our retries are blocked by all destination sw ack resources being
Cliff Wickmanf6d8a562010-06-02 16:22:02 -0500728 * in use, and a timeout is pending. In that case hardware immediately
729 * returns the ERROR that looks like a destination timeout.
730 */
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500731static void destination_plugged(struct bau_desc *bau_desc,
732 struct bau_control *bcp,
Cliff Wickmanf6d8a562010-06-02 16:22:02 -0500733 struct bau_control *hmaster, struct ptc_stats *stat)
734{
735 udelay(bcp->plugged_delay);
736 bcp->plugged_tries++;
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500737
Cliff Wickmanf6d8a562010-06-02 16:22:02 -0500738 if (bcp->plugged_tries >= bcp->plugsb4reset) {
739 bcp->plugged_tries = 0;
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500740
Cliff Wickmanf6d8a562010-06-02 16:22:02 -0500741 quiesce_local_uvhub(hmaster);
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500742
Cliff Wickmanf6d8a562010-06-02 16:22:02 -0500743 spin_lock(&hmaster->queue_lock);
cpw@sgi.com485f07d2011-06-21 07:21:29 -0500744 reset_with_ipi(&bau_desc->distribution, bcp);
Cliff Wickmanf6d8a562010-06-02 16:22:02 -0500745 spin_unlock(&hmaster->queue_lock);
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500746
Cliff Wickmanf6d8a562010-06-02 16:22:02 -0500747 end_uvhub_quiesce(hmaster);
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500748
Cliff Wickmanf6d8a562010-06-02 16:22:02 -0500749 bcp->ipi_attempts++;
750 stat->s_resets_plug++;
751 }
752}
753
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500754static void destination_timeout(struct bau_desc *bau_desc,
755 struct bau_control *bcp, struct bau_control *hmaster,
756 struct ptc_stats *stat)
Cliff Wickmanf6d8a562010-06-02 16:22:02 -0500757{
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500758 hmaster->max_concurr = 1;
Cliff Wickmanf6d8a562010-06-02 16:22:02 -0500759 bcp->timeout_tries++;
760 if (bcp->timeout_tries >= bcp->timeoutsb4reset) {
761 bcp->timeout_tries = 0;
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500762
Cliff Wickmanf6d8a562010-06-02 16:22:02 -0500763 quiesce_local_uvhub(hmaster);
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500764
Cliff Wickmanf6d8a562010-06-02 16:22:02 -0500765 spin_lock(&hmaster->queue_lock);
cpw@sgi.com485f07d2011-06-21 07:21:29 -0500766 reset_with_ipi(&bau_desc->distribution, bcp);
Cliff Wickmanf6d8a562010-06-02 16:22:02 -0500767 spin_unlock(&hmaster->queue_lock);
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500768
Cliff Wickmanf6d8a562010-06-02 16:22:02 -0500769 end_uvhub_quiesce(hmaster);
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500770
Cliff Wickmanf6d8a562010-06-02 16:22:02 -0500771 bcp->ipi_attempts++;
772 stat->s_resets_timeout++;
773 }
774}
775
776/*
Cliff Wickman8b6e5112012-06-22 08:14:59 -0500777 * Stop all cpus on a uvhub from using the BAU for a period of time.
778 * This is reversed by check_enable.
Cliff Wickman50fb55a2010-06-02 16:22:02 -0500779 */
Cliff Wickman8b6e5112012-06-22 08:14:59 -0500780static void disable_for_period(struct bau_control *bcp, struct ptc_stats *stat)
Cliff Wickman50fb55a2010-06-02 16:22:02 -0500781{
Cliff Wickman8b6e5112012-06-22 08:14:59 -0500782 int tcpu;
783 struct bau_control *tbcp;
784 struct bau_control *hmaster;
785 cycles_t tm1;
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500786
Cliff Wickman8b6e5112012-06-22 08:14:59 -0500787 hmaster = bcp->uvhub_master;
788 spin_lock(&hmaster->disable_lock);
789 if (!bcp->baudisabled) {
Cliff Wickman50fb55a2010-06-02 16:22:02 -0500790 stat->s_bau_disabled++;
Cliff Wickman8b6e5112012-06-22 08:14:59 -0500791 tm1 = get_cycles();
Cliff Wickman50fb55a2010-06-02 16:22:02 -0500792 for_each_present_cpu(tcpu) {
793 tbcp = &per_cpu(bau_control, tcpu);
Cliff Wickman8b6e5112012-06-22 08:14:59 -0500794 if (tbcp->uvhub_master == hmaster) {
795 tbcp->baudisabled = 1;
796 tbcp->set_bau_on_time =
797 tm1 + bcp->disabled_period;
798 }
Cliff Wickman50fb55a2010-06-02 16:22:02 -0500799 }
800 }
Cliff Wickman8b6e5112012-06-22 08:14:59 -0500801 spin_unlock(&hmaster->disable_lock);
Cliff Wickman50fb55a2010-06-02 16:22:02 -0500802}
803
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500804static void count_max_concurr(int stat, struct bau_control *bcp,
805 struct bau_control *hmaster)
806{
807 bcp->plugged_tries = 0;
808 bcp->timeout_tries = 0;
809 if (stat != FLUSH_COMPLETE)
810 return;
811 if (bcp->conseccompletes <= bcp->complete_threshold)
812 return;
813 if (hmaster->max_concurr >= hmaster->max_concurr_const)
814 return;
815 hmaster->max_concurr++;
816}
817
818static void record_send_stats(cycles_t time1, cycles_t time2,
819 struct bau_control *bcp, struct ptc_stats *stat,
820 int completion_status, int try)
821{
822 cycles_t elapsed;
823
824 if (time2 > time1) {
825 elapsed = time2 - time1;
826 stat->s_time += elapsed;
827
828 if ((completion_status == FLUSH_COMPLETE) && (try == 1)) {
829 bcp->period_requests++;
830 bcp->period_time += elapsed;
Justin Ernstdfc1ed12017-07-13 13:33:23 -0500831 if ((elapsed > usec_2_cycles(bcp->cong_response_us)) &&
Cliff Wickman8b6e5112012-06-22 08:14:59 -0500832 (bcp->period_requests > bcp->cong_reps) &&
833 ((bcp->period_time / bcp->period_requests) >
Justin Ernstdfc1ed12017-07-13 13:33:23 -0500834 usec_2_cycles(bcp->cong_response_us))) {
Cliff Wickman8b6e5112012-06-22 08:14:59 -0500835 stat->s_congested++;
836 disable_for_period(bcp, stat);
837 }
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500838 }
839 } else
840 stat->s_requestor--;
841
842 if (completion_status == FLUSH_COMPLETE && try > 1)
843 stat->s_retriesok++;
Cliff Wickman8b6e5112012-06-22 08:14:59 -0500844 else if (completion_status == FLUSH_GIVEUP) {
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500845 stat->s_giveup++;
Cliff Wickman8b6e5112012-06-22 08:14:59 -0500846 if (get_cycles() > bcp->period_end)
847 bcp->period_giveups = 0;
848 bcp->period_giveups++;
849 if (bcp->period_giveups == 1)
850 bcp->period_end = get_cycles() + bcp->disabled_period;
851 if (bcp->period_giveups > bcp->giveup_limit) {
852 disable_for_period(bcp, stat);
853 stat->s_giveuplimit++;
854 }
855 }
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500856}
857
858/*
859 * Because of a uv1 hardware bug only a limited number of concurrent
860 * requests can be made.
861 */
862static void uv1_throttle(struct bau_control *hmaster, struct ptc_stats *stat)
863{
864 spinlock_t *lock = &hmaster->uvhub_lock;
865 atomic_t *v;
866
867 v = &hmaster->active_descriptor_count;
868 if (!atomic_inc_unless_ge(lock, v, hmaster->max_concurr)) {
869 stat->s_throttles++;
870 do {
871 cpu_relax();
872 } while (!atomic_inc_unless_ge(lock, v, hmaster->max_concurr));
873 }
874}
875
876/*
877 * Handle the completion status of a message send.
878 */
879static void handle_cmplt(int completion_status, struct bau_desc *bau_desc,
880 struct bau_control *bcp, struct bau_control *hmaster,
881 struct ptc_stats *stat)
882{
883 if (completion_status == FLUSH_RETRY_PLUGGED)
884 destination_plugged(bau_desc, bcp, hmaster, stat);
885 else if (completion_status == FLUSH_RETRY_TIMEOUT)
886 destination_timeout(bau_desc, bcp, hmaster, stat);
887}
888
889/*
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500890 * Send a broadcast and wait for it to complete.
Cliff Wickmanb194b1202008-06-12 08:23:48 -0500891 *
Cliff Wickmanf6d8a562010-06-02 16:22:02 -0500892 * The flush_mask contains the cpus the broadcast is to be sent to including
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500893 * cpus that are on the local uvhub.
Cliff Wickmanb194b1202008-06-12 08:23:48 -0500894 *
Cliff Wickman450a0072010-06-02 16:22:02 -0500895 * Returns 0 if all flushing represented in the mask was done.
896 * Returns 1 if it gives up entirely and the original cpu mask is to be
897 * returned to the kernel.
Cliff Wickmanb194b1202008-06-12 08:23:48 -0500898 */
Colin Ian King5122daa2017-07-04 09:31:29 +0100899static int uv_flush_send_and_wait(struct cpumask *flush_mask,
900 struct bau_control *bcp,
901 struct bau_desc *bau_desc)
Cliff Wickmanb194b1202008-06-12 08:23:48 -0500902{
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500903 int seq_number = 0;
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500904 int completion_stat = 0;
Cliff Wickmanda87c932012-01-16 15:17:50 -0600905 int uv1 = 0;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500906 long try = 0;
Ingo Molnarb4c286e2008-06-18 14:28:19 +0200907 unsigned long index;
Cliff Wickmanb194b1202008-06-12 08:23:48 -0500908 cycles_t time1;
909 cycles_t time2;
Cliff Wickman712157a2010-06-02 16:22:02 -0500910 struct ptc_stats *stat = bcp->statp;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500911 struct bau_control *hmaster = bcp->uvhub_master;
Cliff Wickmanda87c932012-01-16 15:17:50 -0600912 struct uv1_bau_msg_header *uv1_hdr = NULL;
Cliff Wickmana26fd712014-05-14 16:15:47 -0500913 struct uv2_3_bau_msg_header *uv2_3_hdr = NULL;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500914
Andrew Banman491bd882017-03-09 10:42:09 -0600915 if (bcp->uvhub_version == UV_BAU_V1) {
Cliff Wickman8b6e5112012-06-22 08:14:59 -0500916 uv1 = 1;
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500917 uv1_throttle(hmaster, stat);
Cliff Wickman8b6e5112012-06-22 08:14:59 -0500918 }
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500919
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500920 while (hmaster->uvhub_quiesce)
921 cpu_relax();
Cliff Wickmanb194b1202008-06-12 08:23:48 -0500922
Cliff Wickmanb194b1202008-06-12 08:23:48 -0500923 time1 = get_cycles();
Cliff Wickman8b6e5112012-06-22 08:14:59 -0500924 if (uv1)
925 uv1_hdr = &bau_desc->header.uv1_hdr;
926 else
Cliff Wickmana26fd712014-05-14 16:15:47 -0500927 /* uv2 and uv3 */
928 uv2_3_hdr = &bau_desc->header.uv2_3_hdr;
Cliff Wickman8b6e5112012-06-22 08:14:59 -0500929
Cliff Wickmanb194b1202008-06-12 08:23:48 -0500930 do {
Cliff Wickman8b6e5112012-06-22 08:14:59 -0500931 if (try == 0) {
Cliff Wickmanda87c932012-01-16 15:17:50 -0600932 if (uv1)
933 uv1_hdr->msg_type = MSG_REGULAR;
934 else
Cliff Wickmana26fd712014-05-14 16:15:47 -0500935 uv2_3_hdr->msg_type = MSG_REGULAR;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500936 seq_number = bcp->message_number++;
937 } else {
Cliff Wickmanda87c932012-01-16 15:17:50 -0600938 if (uv1)
939 uv1_hdr->msg_type = MSG_RETRY;
940 else
Cliff Wickmana26fd712014-05-14 16:15:47 -0500941 uv2_3_hdr->msg_type = MSG_RETRY;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500942 stat->s_retry_messages++;
943 }
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500944
Cliff Wickmanda87c932012-01-16 15:17:50 -0600945 if (uv1)
946 uv1_hdr->sequence = seq_number;
947 else
Cliff Wickmana26fd712014-05-14 16:15:47 -0500948 uv2_3_hdr->sequence = seq_number;
Cliff Wickman8b6e5112012-06-22 08:14:59 -0500949 index = (1UL << AS_PUSH_SHIFT) | bcp->uvhub_cpu;
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500950 bcp->send_message = get_cycles();
951
952 write_mmr_activation(index);
953
954 try++;
Andrew Banman2620bbb2017-03-09 10:42:13 -0600955 completion_stat = ops.wait_completion(bau_desc, bcp, try);
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500956
957 handle_cmplt(completion_stat, bau_desc, bcp, hmaster, stat);
958
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -0500959 if (bcp->ipi_attempts >= bcp->ipi_reset_limit) {
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500960 bcp->ipi_attempts = 0;
Cliff Wickman8b6e5112012-06-22 08:14:59 -0500961 stat->s_overipilimit++;
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500962 completion_stat = FLUSH_GIVEUP;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500963 break;
964 }
965 cpu_relax();
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500966 } while ((completion_stat == FLUSH_RETRY_PLUGGED) ||
967 (completion_stat == FLUSH_RETRY_TIMEOUT));
968
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500969 time2 = get_cycles();
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500970
971 count_max_concurr(completion_stat, bcp, hmaster);
972
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500973 while (hmaster->uvhub_quiesce)
974 cpu_relax();
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500975
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500976 atomic_dec(&hmaster->active_descriptor_count);
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500977
978 record_send_stats(time1, time2, bcp, stat, completion_stat, try);
979
980 if (completion_stat == FLUSH_GIVEUP)
Cliff Wickmanc5d35d32012-01-16 15:19:47 -0600981 /* FLUSH_GIVEUP will fall back to using IPI's for tlb flush */
Cliff Wickman450a0072010-06-02 16:22:02 -0500982 return 1;
Cliff Wickman450a0072010-06-02 16:22:02 -0500983 return 0;
Cliff Wickmanb194b1202008-06-12 08:23:48 -0500984}
985
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500986/*
Cliff Wickman8b6e5112012-06-22 08:14:59 -0500987 * The BAU is disabled for this uvhub. When the disabled time period has
988 * expired re-enable it.
989 * Return 0 if it is re-enabled for all cpus on this uvhub.
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500990 */
991static int check_enable(struct bau_control *bcp, struct ptc_stats *stat)
992{
993 int tcpu;
994 struct bau_control *tbcp;
Cliff Wickman8b6e5112012-06-22 08:14:59 -0500995 struct bau_control *hmaster;
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500996
Cliff Wickman8b6e5112012-06-22 08:14:59 -0500997 hmaster = bcp->uvhub_master;
998 spin_lock(&hmaster->disable_lock);
999 if (bcp->baudisabled && (get_cycles() >= bcp->set_bau_on_time)) {
1000 stat->s_bau_reenabled++;
1001 for_each_present_cpu(tcpu) {
1002 tbcp = &per_cpu(bau_control, tcpu);
1003 if (tbcp->uvhub_master == hmaster) {
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001004 tbcp->baudisabled = 0;
1005 tbcp->period_requests = 0;
1006 tbcp->period_time = 0;
Cliff Wickman8b6e5112012-06-22 08:14:59 -05001007 tbcp->period_giveups = 0;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001008 }
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001009 }
Cliff Wickman8b6e5112012-06-22 08:14:59 -05001010 spin_unlock(&hmaster->disable_lock);
1011 return 0;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001012 }
Cliff Wickman8b6e5112012-06-22 08:14:59 -05001013 spin_unlock(&hmaster->disable_lock);
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001014 return -1;
1015}
1016
1017static void record_send_statistics(struct ptc_stats *stat, int locals, int hubs,
1018 int remotes, struct bau_desc *bau_desc)
1019{
1020 stat->s_requestor++;
1021 stat->s_ntargcpu += remotes + locals;
1022 stat->s_ntargremotes += remotes;
1023 stat->s_ntarglocals += locals;
1024
1025 /* uvhub statistics */
1026 hubs = bau_uvhub_weight(&bau_desc->distribution);
1027 if (locals) {
1028 stat->s_ntarglocaluvhub++;
1029 stat->s_ntargremoteuvhub += (hubs - 1);
1030 } else
1031 stat->s_ntargremoteuvhub += hubs;
1032
1033 stat->s_ntarguvhub += hubs;
1034
1035 if (hubs >= 16)
1036 stat->s_ntarguvhub16++;
1037 else if (hubs >= 8)
1038 stat->s_ntarguvhub8++;
1039 else if (hubs >= 4)
1040 stat->s_ntarguvhub4++;
1041 else if (hubs >= 2)
1042 stat->s_ntarguvhub2++;
1043 else
1044 stat->s_ntarguvhub1++;
1045}
1046
1047/*
1048 * Translate a cpu mask to the uvhub distribution mask in the BAU
1049 * activation descriptor.
1050 */
1051static int set_distrib_bits(struct cpumask *flush_mask, struct bau_control *bcp,
1052 struct bau_desc *bau_desc, int *localsp, int *remotesp)
1053{
1054 int cpu;
1055 int pnode;
1056 int cnt = 0;
1057 struct hub_and_pnode *hpp;
1058
1059 for_each_cpu(cpu, flush_mask) {
1060 /*
1061 * The distribution vector is a bit map of pnodes, relative
1062 * to the partition base pnode (and the partition base nasid
1063 * in the header).
1064 * Translate cpu to pnode and hub using a local memory array.
1065 */
1066 hpp = &bcp->socket_master->thp[cpu];
1067 pnode = hpp->pnode - bcp->partition_base_pnode;
1068 bau_uvhub_set(pnode, &bau_desc->distribution);
1069 cnt++;
1070 if (hpp->uvhub == bcp->uvhub)
1071 (*localsp)++;
1072 else
1073 (*remotesp)++;
1074 }
1075 if (!cnt)
1076 return 1;
1077 return 0;
1078}
1079
1080/*
1081 * globally purge translation cache of a virtual address or all TLB's
Tejun Heobdbcdd42009-01-21 17:26:06 +09001082 * @cpumask: mask of all cpu's in which the address is to be removed
Cliff Wickman18129242008-06-02 08:56:14 -05001083 * @mm: mm_struct containing virtual address range
Alex Shi57c4f43042012-12-18 12:22:14 -08001084 * @start: start virtual address to be removed from TLB
1085 * @end: end virtual address to be remove from TLB
Tejun Heobdbcdd42009-01-21 17:26:06 +09001086 * @cpu: the current cpu
Cliff Wickman18129242008-06-02 08:56:14 -05001087 *
1088 * This is the entry point for initiating any UV global TLB shootdown.
1089 *
1090 * Purges the translation caches of all specified processors of the given
1091 * virtual address, or purges all TLB's on specified processors.
1092 *
Tejun Heobdbcdd42009-01-21 17:26:06 +09001093 * The caller has derived the cpumask from the mm_struct. This function
1094 * is called only if there are bits set in the mask. (e.g. flush_tlb_page())
Cliff Wickman18129242008-06-02 08:56:14 -05001095 *
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001096 * The cpumask is converted into a uvhubmask of the uvhubs containing
1097 * those cpus.
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001098 *
Tejun Heobdbcdd42009-01-21 17:26:06 +09001099 * Note that this function should be called with preemption disabled.
1100 *
1101 * Returns NULL if all remote flushing was done.
1102 * Returns pointer to cpumask if some remote flushing remains to be
1103 * done. The returned pointer is valid till preemption is re-enabled.
Cliff Wickman18129242008-06-02 08:56:14 -05001104 */
Tejun Heobdbcdd42009-01-21 17:26:06 +09001105const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
Andy Lutomirskia2055ab2017-05-28 10:00:10 -07001106 const struct flush_tlb_info *info)
Cliff Wickman18129242008-06-02 08:56:14 -05001107{
Andy Lutomirskia2055ab2017-05-28 10:00:10 -07001108 unsigned int cpu = smp_processor_id();
Andrew Banmane9be3642017-03-09 10:42:10 -06001109 int locals = 0, remotes = 0, hubs = 0;
Ingo Molnardc163a42008-06-18 14:15:43 +02001110 struct bau_desc *bau_desc;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001111 struct cpumask *flush_mask;
1112 struct ptc_stats *stat;
1113 struct bau_control *bcp;
Andrew Banmane9be3642017-03-09 10:42:10 -06001114 unsigned long descriptor_status, status, address;
Cliff Wickman18129242008-06-02 08:56:14 -05001115
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001116 bcp = &per_cpu(bau_control, cpu);
Cliff Wickman26ef8572012-06-22 08:13:30 -05001117
1118 if (bcp->nobau)
1119 return cpumask;
Cliff Wickman50fb55a2010-06-02 16:22:02 -05001120
cpw3eae49c2013-12-03 17:15:30 -06001121 stat = bcp->statp;
1122 stat->s_enters++;
1123
Cliff Wickman8b6e5112012-06-22 08:14:59 -05001124 if (bcp->busy) {
1125 descriptor_status =
1126 read_lmmr(UVH_LB_BAU_SB_ACTIVATION_STATUS_0);
1127 status = ((descriptor_status >> (bcp->uvhub_cpu *
1128 UV_ACT_STATUS_SIZE)) & UV_ACT_STATUS_MASK) << 1;
1129 if (status == UV2H_DESC_BUSY)
1130 return cpumask;
1131 bcp->busy = 0;
1132 }
1133
Cliff Wickman50fb55a2010-06-02 16:22:02 -05001134 /* bau was disabled due to slow response */
1135 if (bcp->baudisabled) {
Cliff Wickman8b6e5112012-06-22 08:14:59 -05001136 if (check_enable(bcp, stat)) {
1137 stat->s_ipifordisabled++;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001138 return cpumask;
Cliff Wickman8b6e5112012-06-22 08:14:59 -05001139 }
Cliff Wickman50fb55a2010-06-02 16:22:02 -05001140 }
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001141
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001142 /*
1143 * Each sending cpu has a per-cpu mask which it fills from the caller's
Cliff Wickman450a0072010-06-02 16:22:02 -05001144 * cpu mask. All cpus are converted to uvhubs and copied to the
1145 * activation descriptor.
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001146 */
1147 flush_mask = (struct cpumask *)per_cpu(uv_flush_tlb_mask, cpu);
Cliff Wickman450a0072010-06-02 16:22:02 -05001148 /* don't actually do a shootdown of the local cpu */
Tejun Heobdbcdd42009-01-21 17:26:06 +09001149 cpumask_andnot(flush_mask, cpumask, cpumask_of(cpu));
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001150
Rusty Russell020b37a2015-03-02 22:05:49 +10301151 if (cpumask_test_cpu(cpu, cpumask))
Cliff Wickman450a0072010-06-02 16:22:02 -05001152 stat->s_ntargself++;
Tejun Heobdbcdd42009-01-21 17:26:06 +09001153
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001154 bau_desc = bcp->descriptor_base;
Cliff Wickman8b6e5112012-06-22 08:14:59 -05001155 bau_desc += (ITEMS_PER_DESC * bcp->uvhub_cpu);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001156 bau_uvhubs_clear(&bau_desc->distribution, UV_DISTRIBUTION_SIZE);
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001157 if (set_distrib_bits(flush_mask, bcp, bau_desc, &locals, &remotes))
Cliff Wickman450a0072010-06-02 16:22:02 -05001158 return NULL;
Cliff Wickman450a0072010-06-02 16:22:02 -05001159
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001160 record_send_statistics(stat, locals, hubs, remotes, bau_desc);
Cliff Wickman18129242008-06-02 08:56:14 -05001161
Andy Lutomirskia2055ab2017-05-28 10:00:10 -07001162 if (!info->end || (info->end - info->start) <= PAGE_SIZE)
1163 address = info->start;
Alex Shi57c4f43042012-12-18 12:22:14 -08001164 else
Andrew Banmane9be3642017-03-09 10:42:10 -06001165 address = TLB_FLUSH_ALL;
1166
1167 switch (bcp->uvhub_version) {
1168 case UV_BAU_V1:
1169 case UV_BAU_V2:
1170 case UV_BAU_V3:
1171 bau_desc->payload.uv1_2_3.address = address;
1172 bau_desc->payload.uv1_2_3.sending_cpu = cpu;
1173 break;
1174 case UV_BAU_V4:
1175 bau_desc->payload.uv4.address = address;
1176 bau_desc->payload.uv4.sending_cpu = cpu;
1177 bau_desc->payload.uv4.qualifier = BAU_DESC_QUALIFIER;
1178 break;
1179 }
1180
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001181 /*
Cliff Wickman450a0072010-06-02 16:22:02 -05001182 * uv_flush_send_and_wait returns 0 if all cpu's were messaged,
1183 * or 1 if it gave up and the original cpumask should be returned.
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001184 */
Cliff Wickman8b6e5112012-06-22 08:14:59 -05001185 if (!uv_flush_send_and_wait(flush_mask, bcp, bau_desc))
Cliff Wickman450a0072010-06-02 16:22:02 -05001186 return NULL;
1187 else
1188 return cpumask;
Cliff Wickman18129242008-06-02 08:56:14 -05001189}
1190
1191/*
Cliff Wickman8b6e5112012-06-22 08:14:59 -05001192 * Search the message queue for any 'other' unprocessed message with the
1193 * same software acknowledge resource bit vector as the 'msg' message.
Cliff Wickmanc5d35d32012-01-16 15:19:47 -06001194 */
Colin Ian King5122daa2017-07-04 09:31:29 +01001195static struct bau_pq_entry *find_another_by_swack(struct bau_pq_entry *msg,
1196 struct bau_control *bcp)
Cliff Wickmanc5d35d32012-01-16 15:19:47 -06001197{
1198 struct bau_pq_entry *msg_next = msg + 1;
Cliff Wickman8b6e5112012-06-22 08:14:59 -05001199 unsigned char swack_vec = msg->swack_vec;
Cliff Wickmanc5d35d32012-01-16 15:19:47 -06001200
1201 if (msg_next > bcp->queue_last)
1202 msg_next = bcp->queue_first;
Cliff Wickman8b6e5112012-06-22 08:14:59 -05001203 while (msg_next != msg) {
1204 if ((msg_next->canceled == 0) && (msg_next->replied_to == 0) &&
1205 (msg_next->swack_vec == swack_vec))
Cliff Wickmanc5d35d32012-01-16 15:19:47 -06001206 return msg_next;
1207 msg_next++;
1208 if (msg_next > bcp->queue_last)
1209 msg_next = bcp->queue_first;
1210 }
1211 return NULL;
1212}
1213
1214/*
1215 * UV2 needs to work around a bug in which an arriving message has not
1216 * set a bit in the UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE register.
1217 * Such a message must be ignored.
1218 */
Colin Ian Kingb45e4c42017-08-10 16:57:09 +01001219static void process_uv2_message(struct msg_desc *mdp, struct bau_control *bcp)
Cliff Wickmanc5d35d32012-01-16 15:19:47 -06001220{
1221 unsigned long mmr_image;
1222 unsigned char swack_vec;
1223 struct bau_pq_entry *msg = mdp->msg;
1224 struct bau_pq_entry *other_msg;
1225
Andrew Banman21e3f122016-09-21 11:09:17 -05001226 mmr_image = ops.read_l_sw_ack();
Cliff Wickmanc5d35d32012-01-16 15:19:47 -06001227 swack_vec = msg->swack_vec;
1228
1229 if ((swack_vec & mmr_image) == 0) {
1230 /*
1231 * This message was assigned a swack resource, but no
1232 * reserved acknowlegment is pending.
1233 * The bug has prevented this message from setting the MMR.
Cliff Wickmanc5d35d32012-01-16 15:19:47 -06001234 */
Cliff Wickmanc5d35d32012-01-16 15:19:47 -06001235 /*
Cliff Wickman8b6e5112012-06-22 08:14:59 -05001236 * Some message has set the MMR 'pending' bit; it might have
1237 * been another message. Look for that message.
Cliff Wickmanc5d35d32012-01-16 15:19:47 -06001238 */
Cliff Wickman8b6e5112012-06-22 08:14:59 -05001239 other_msg = find_another_by_swack(msg, bcp);
1240 if (other_msg) {
1241 /*
1242 * There is another. Process this one but do not
1243 * ack it.
1244 */
1245 bau_process_message(mdp, bcp, 0);
1246 /*
1247 * Let the natural processing of that other message
1248 * acknowledge it. Don't get the processing of sw_ack's
1249 * out of order.
1250 */
1251 return;
1252 }
Cliff Wickmanc5d35d32012-01-16 15:19:47 -06001253 }
1254
1255 /*
Cliff Wickman8b6e5112012-06-22 08:14:59 -05001256 * Either the MMR shows this one pending a reply or there is no
1257 * other message using this sw_ack, so it is safe to acknowledge it.
Cliff Wickmanc5d35d32012-01-16 15:19:47 -06001258 */
1259 bau_process_message(mdp, bcp, 1);
1260
1261 return;
1262}
1263
1264/*
Cliff Wickman18129242008-06-02 08:56:14 -05001265 * The BAU message interrupt comes here. (registered by set_intr_gate)
1266 * See entry_64.S
1267 *
1268 * We received a broadcast assist message.
1269 *
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001270 * Interrupts are disabled; this interrupt could represent
Cliff Wickman18129242008-06-02 08:56:14 -05001271 * the receipt of several messages.
1272 *
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001273 * All cores/threads on this hub get this interrupt.
1274 * The last one to see it does the software ack.
Cliff Wickman18129242008-06-02 08:56:14 -05001275 * (the resource will not be freed until noninterruptable cpus see this
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001276 * interrupt; hardware may timeout the s/w ack and reply ERROR)
Cliff Wickman18129242008-06-02 08:56:14 -05001277 */
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001278void uv_bau_message_interrupt(struct pt_regs *regs)
Cliff Wickman18129242008-06-02 08:56:14 -05001279{
Cliff Wickman18129242008-06-02 08:56:14 -05001280 int count = 0;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001281 cycles_t time_start;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001282 struct bau_pq_entry *msg;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001283 struct bau_control *bcp;
1284 struct ptc_stats *stat;
1285 struct msg_desc msgdesc;
Cliff Wickman18129242008-06-02 08:56:14 -05001286
Cliff Wickman88ed9dd2012-01-16 15:21:46 -06001287 ack_APIC_irq();
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001288 time_start = get_cycles();
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001289
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001290 bcp = &per_cpu(bau_control, smp_processor_id());
Cliff Wickman712157a2010-06-02 16:22:02 -05001291 stat = bcp->statp;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001292
1293 msgdesc.queue_first = bcp->queue_first;
1294 msgdesc.queue_last = bcp->queue_last;
1295
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001296 msg = bcp->bau_msg_head;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001297 while (msg->swack_vec) {
Cliff Wickman18129242008-06-02 08:56:14 -05001298 count++;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001299
1300 msgdesc.msg_slot = msg - msgdesc.queue_first;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001301 msgdesc.msg = msg;
Andrew Banman491bd882017-03-09 10:42:09 -06001302 if (bcp->uvhub_version == UV_BAU_V2)
Cliff Wickmanc5d35d32012-01-16 15:19:47 -06001303 process_uv2_message(&msgdesc, bcp);
1304 else
Cliff Wickmana26fd712014-05-14 16:15:47 -05001305 /* no error workaround for uv1 or uv3 */
Cliff Wickmanc5d35d32012-01-16 15:19:47 -06001306 bau_process_message(&msgdesc, bcp, 1);
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001307
Cliff Wickman18129242008-06-02 08:56:14 -05001308 msg++;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001309 if (msg > msgdesc.queue_last)
1310 msg = msgdesc.queue_first;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001311 bcp->bau_msg_head = msg;
Cliff Wickman18129242008-06-02 08:56:14 -05001312 }
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001313 stat->d_time += (get_cycles() - time_start);
Cliff Wickman18129242008-06-02 08:56:14 -05001314 if (!count)
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001315 stat->d_nomsg++;
Cliff Wickman18129242008-06-02 08:56:14 -05001316 else if (count > 1)
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001317 stat->d_multmsg++;
Cliff Wickman18129242008-06-02 08:56:14 -05001318}
1319
Cliff Wickmanc4c46882009-04-03 08:34:32 -05001320/*
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001321 * Each target uvhub (i.e. a uvhub that has cpu's) needs to have
Cliff Wickmanc4c46882009-04-03 08:34:32 -05001322 * shootdown message timeouts enabled. The timeout does not cause
1323 * an interrupt, but causes an error message to be returned to
1324 * the sender.
1325 */
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001326static void __init enable_timeouts(void)
Cliff Wickman18129242008-06-02 08:56:14 -05001327{
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001328 int uvhub;
1329 int nuvhubs;
Cliff Wickman18129242008-06-02 08:56:14 -05001330 int pnode;
Cliff Wickmanc4c46882009-04-03 08:34:32 -05001331 unsigned long mmr_image;
Cliff Wickman18129242008-06-02 08:56:14 -05001332
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001333 nuvhubs = uv_num_possible_blades();
Cliff Wickmanc4c46882009-04-03 08:34:32 -05001334
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001335 for (uvhub = 0; uvhub < nuvhubs; uvhub++) {
1336 if (!uv_blade_nr_possible_cpus(uvhub))
Cliff Wickman18129242008-06-02 08:56:14 -05001337 continue;
Cliff Wickmanc4c46882009-04-03 08:34:32 -05001338
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001339 pnode = uv_blade_to_pnode(uvhub);
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001340 mmr_image = read_mmr_misc_control(pnode);
Cliff Wickmanc4c46882009-04-03 08:34:32 -05001341 /*
1342 * Set the timeout period and then lock it in, in three
1343 * steps; captures and locks in the period.
1344 *
1345 * To program the period, the SOFT_ACK_MODE must be off.
1346 */
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001347 mmr_image &= ~(1L << SOFTACK_MSHIFT);
1348 write_mmr_misc_control(pnode, mmr_image);
Cliff Wickmanc4c46882009-04-03 08:34:32 -05001349 /*
1350 * Set the 4-bit period.
1351 */
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001352 mmr_image &= ~((unsigned long)0xf << SOFTACK_PSHIFT);
1353 mmr_image |= (SOFTACK_TIMEOUT_PERIOD << SOFTACK_PSHIFT);
1354 write_mmr_misc_control(pnode, mmr_image);
Cliff Wickmanc4c46882009-04-03 08:34:32 -05001355 /*
Jack Steiner2a919592011-05-11 12:50:28 -05001356 * UV1:
Cliff Wickmanc4c46882009-04-03 08:34:32 -05001357 * Subsequent reversals of the timebase bit (3) cause an
1358 * immediate timeout of one or all INTD resources as
1359 * indicated in bits 2:0 (7 causes all of them to timeout).
1360 */
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001361 mmr_image |= (1L << SOFTACK_MSHIFT);
Jack Steiner2a919592011-05-11 12:50:28 -05001362 if (is_uv2_hub()) {
Cliff Wickmana26fd712014-05-14 16:15:47 -05001363 /* do not touch the legacy mode bit */
Cliff Wickman8b6e5112012-06-22 08:14:59 -05001364 /* hw bug workaround; do not use extended status */
1365 mmr_image &= ~(1L << UV2_EXT_SHFT);
Cliff Wickmana26fd712014-05-14 16:15:47 -05001366 } else if (is_uv3_hub()) {
1367 mmr_image &= ~(1L << PREFETCH_HINT_SHFT);
1368 mmr_image |= (1L << SB_STATUS_SHFT);
Jack Steiner2a919592011-05-11 12:50:28 -05001369 }
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001370 write_mmr_misc_control(pnode, mmr_image);
Cliff Wickman18129242008-06-02 08:56:14 -05001371 }
Cliff Wickman18129242008-06-02 08:56:14 -05001372}
1373
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001374static void *ptc_seq_start(struct seq_file *file, loff_t *offset)
Cliff Wickman18129242008-06-02 08:56:14 -05001375{
1376 if (*offset < num_possible_cpus())
1377 return offset;
1378 return NULL;
1379}
1380
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001381static void *ptc_seq_next(struct seq_file *file, void *data, loff_t *offset)
Cliff Wickman18129242008-06-02 08:56:14 -05001382{
1383 (*offset)++;
1384 if (*offset < num_possible_cpus())
1385 return offset;
1386 return NULL;
1387}
1388
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001389static void ptc_seq_stop(struct seq_file *file, void *data)
Cliff Wickman18129242008-06-02 08:56:14 -05001390{
1391}
1392
1393/*
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001394 * Display the statistics thru /proc/sgi_uv/ptc_statistics
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001395 * 'data' points to the cpu number
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001396 * Note: see the descriptions in stat_description[].
Cliff Wickman18129242008-06-02 08:56:14 -05001397 */
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001398static int ptc_seq_show(struct seq_file *file, void *data)
Cliff Wickman18129242008-06-02 08:56:14 -05001399{
1400 struct ptc_stats *stat;
Cliff Wickman26ef8572012-06-22 08:13:30 -05001401 struct bau_control *bcp;
Cliff Wickman18129242008-06-02 08:56:14 -05001402 int cpu;
1403
1404 cpu = *(loff_t *)data;
Cliff Wickman18129242008-06-02 08:56:14 -05001405 if (!cpu) {
Rasmus Villemoes37367082014-11-28 22:03:41 +01001406 seq_puts(file,
1407 "# cpu bauoff sent stime self locals remotes ncpus localhub ");
1408 seq_puts(file, "remotehub numuvhubs numuvhubs16 numuvhubs8 ");
1409 seq_puts(file,
1410 "numuvhubs4 numuvhubs2 numuvhubs1 dto snacks retries ");
1411 seq_puts(file,
1412 "rok resetp resett giveup sto bz throt disable ");
1413 seq_puts(file,
1414 "enable wars warshw warwaits enters ipidis plugged ");
1415 seq_puts(file,
1416 "ipiover glim cong swack recv rtime all one mult ");
1417 seq_puts(file, "none retry canc nocan reset rcan\n");
Cliff Wickman18129242008-06-02 08:56:14 -05001418 }
1419 if (cpu < num_possible_cpus() && cpu_online(cpu)) {
Cliff Wickman26ef8572012-06-22 08:13:30 -05001420 bcp = &per_cpu(bau_control, cpu);
James Custerfa2a79ce2014-11-02 12:16:39 -06001421 if (bcp->nobau) {
1422 seq_printf(file, "cpu %d bau disabled\n", cpu);
1423 return 0;
1424 }
Cliff Wickman26ef8572012-06-22 08:13:30 -05001425 stat = bcp->statp;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001426 /* source side statistics */
1427 seq_printf(file,
Cliff Wickman8b6e5112012-06-22 08:14:59 -05001428 "cpu %d %d %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld ",
Cliff Wickman26ef8572012-06-22 08:13:30 -05001429 cpu, bcp->nobau, stat->s_requestor,
1430 cycles_2_us(stat->s_time),
Cliff Wickman450a0072010-06-02 16:22:02 -05001431 stat->s_ntargself, stat->s_ntarglocals,
1432 stat->s_ntargremotes, stat->s_ntargcpu,
1433 stat->s_ntarglocaluvhub, stat->s_ntargremoteuvhub,
1434 stat->s_ntarguvhub, stat->s_ntarguvhub16);
Cliff Wickmanb54bd9b2012-01-16 15:22:38 -06001435 seq_printf(file, "%ld %ld %ld %ld %ld %ld ",
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001436 stat->s_ntarguvhub8, stat->s_ntarguvhub4,
1437 stat->s_ntarguvhub2, stat->s_ntarguvhub1,
Cliff Wickmanb54bd9b2012-01-16 15:22:38 -06001438 stat->s_dtimeout, stat->s_strongnacks);
Cliff Wickman8b6e5112012-06-22 08:14:59 -05001439 seq_printf(file, "%ld %ld %ld %ld %ld %ld %ld %ld ",
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001440 stat->s_retry_messages, stat->s_retriesok,
1441 stat->s_resets_plug, stat->s_resets_timeout,
1442 stat->s_giveup, stat->s_stimeout,
Cliff Wickman8b6e5112012-06-22 08:14:59 -05001443 stat->s_busy, stat->s_throttles);
1444 seq_printf(file, "%ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld ",
1445 stat->s_bau_disabled, stat->s_bau_reenabled,
1446 stat->s_uv2_wars, stat->s_uv2_wars_hw,
1447 stat->s_uv2_war_waits, stat->s_enters,
1448 stat->s_ipifordisabled, stat->s_plugged,
1449 stat->s_overipilimit, stat->s_giveuplimit,
1450 stat->s_congested);
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001451
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001452 /* destination side statistics */
1453 seq_printf(file,
Cliff Wickman8b6e5112012-06-22 08:14:59 -05001454 "%lx %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld\n",
Andrew Banman21e3f122016-09-21 11:09:17 -05001455 ops.read_g_sw_ack(uv_cpu_to_pnode(cpu)),
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001456 stat->d_requestee, cycles_2_us(stat->d_time),
1457 stat->d_alltlb, stat->d_onetlb, stat->d_multmsg,
1458 stat->d_nomsg, stat->d_retries, stat->d_canceled,
1459 stat->d_nocanceled, stat->d_resets,
1460 stat->d_rcanceled);
Cliff Wickman18129242008-06-02 08:56:14 -05001461 }
Cliff Wickman18129242008-06-02 08:56:14 -05001462 return 0;
1463}
1464
1465/*
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001466 * Display the tunables thru debugfs
1467 */
1468static ssize_t tunables_read(struct file *file, char __user *userbuf,
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001469 size_t count, loff_t *ppos)
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001470{
Dan Carpenterb365a852010-09-29 10:41:05 +02001471 char *buf;
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001472 int ret;
1473
Cliff Wickman8b6e5112012-06-22 08:14:59 -05001474 buf = kasprintf(GFP_KERNEL, "%s %s %s\n%d %d %d %d %d %d %d %d %d %d\n",
1475 "max_concur plugged_delay plugsb4reset timeoutsb4reset",
1476 "ipi_reset_limit complete_threshold congested_response_us",
1477 "congested_reps disabled_period giveup_limit",
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001478 max_concurr, plugged_delay, plugsb4reset,
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001479 timeoutsb4reset, ipi_reset_limit, complete_threshold,
Cliff Wickman8b6e5112012-06-22 08:14:59 -05001480 congested_respns_us, congested_reps, disabled_period,
1481 giveup_limit);
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001482
Dan Carpenterb365a852010-09-29 10:41:05 +02001483 if (!buf)
1484 return -ENOMEM;
1485
1486 ret = simple_read_from_buffer(userbuf, count, ppos, buf, strlen(buf));
1487 kfree(buf);
1488 return ret;
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001489}
1490
1491/*
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001492 * handle a write to /proc/sgi_uv/ptc_statistics
1493 * -1: reset the statistics
Cliff Wickman18129242008-06-02 08:56:14 -05001494 * 0: display meaning of the statistics
Cliff Wickman18129242008-06-02 08:56:14 -05001495 */
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001496static ssize_t ptc_proc_write(struct file *file, const char __user *user,
1497 size_t count, loff_t *data)
Cliff Wickman18129242008-06-02 08:56:14 -05001498{
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001499 int cpu;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001500 int i;
1501 int elements;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001502 long input_arg;
Cliff Wickman18129242008-06-02 08:56:14 -05001503 char optstr[64];
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001504 struct ptc_stats *stat;
Cliff Wickman18129242008-06-02 08:56:14 -05001505
Cliff Wickmane7eb8722008-06-23 08:32:25 -05001506 if (count == 0 || count > sizeof(optstr))
Cliff Wickmancef53272008-06-19 11:16:24 -05001507 return -EINVAL;
Cliff Wickman18129242008-06-02 08:56:14 -05001508 if (copy_from_user(optstr, user, count))
1509 return -EFAULT;
1510 optstr[count - 1] = '\0';
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001511
Cliff Wickman26ef8572012-06-22 08:13:30 -05001512 if (!strcmp(optstr, "on")) {
1513 set_bau_on();
1514 return count;
1515 } else if (!strcmp(optstr, "off")) {
1516 set_bau_off();
1517 return count;
1518 }
1519
Daniel Walter164109e2014-08-08 14:24:03 -07001520 if (kstrtol(optstr, 10, &input_arg) < 0) {
Andrew Banmanefa59ab2016-09-21 11:09:13 -05001521 pr_debug("%s is invalid\n", optstr);
Cliff Wickman18129242008-06-02 08:56:14 -05001522 return -EINVAL;
1523 }
1524
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001525 if (input_arg == 0) {
Sasha Levin64441742012-12-20 14:11:34 -05001526 elements = ARRAY_SIZE(stat_description);
Andrew Banmanefa59ab2016-09-21 11:09:13 -05001527 pr_debug("# cpu: cpu number\n");
1528 pr_debug("Sender statistics:\n");
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001529 for (i = 0; i < elements; i++)
Andrew Banmanefa59ab2016-09-21 11:09:13 -05001530 pr_debug("%s\n", stat_description[i]);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001531 } else if (input_arg == -1) {
1532 for_each_present_cpu(cpu) {
1533 stat = &per_cpu(ptcstats, cpu);
1534 memset(stat, 0, sizeof(struct ptc_stats));
1535 }
Cliff Wickman18129242008-06-02 08:56:14 -05001536 }
1537
1538 return count;
1539}
1540
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001541static int local_atoi(const char *name)
1542{
1543 int val = 0;
1544
1545 for (;; name++) {
1546 switch (*name) {
1547 case '0' ... '9':
1548 val = 10*val+(*name-'0');
1549 break;
1550 default:
1551 return val;
1552 }
1553 }
1554}
1555
1556/*
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001557 * Parse the values written to /sys/kernel/debug/sgi_uv/bau_tunables.
1558 * Zero values reset them to defaults.
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001559 */
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001560static int parse_tunables_write(struct bau_control *bcp, char *instr,
1561 int count)
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001562{
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001563 char *p;
1564 char *q;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001565 int cnt = 0;
1566 int val;
Sasha Levin64441742012-12-20 14:11:34 -05001567 int e = ARRAY_SIZE(tunables);
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001568
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001569 p = instr + strspn(instr, WHITESPACE);
1570 q = p;
1571 for (; *p; p = q + strspn(q, WHITESPACE)) {
1572 q = p + strcspn(p, WHITESPACE);
1573 cnt++;
1574 if (q == p)
1575 break;
1576 }
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001577 if (cnt != e) {
Andrew Banmanefa59ab2016-09-21 11:09:13 -05001578 pr_info("bau tunable error: should be %d values\n", e);
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001579 return -EINVAL;
1580 }
1581
1582 p = instr + strspn(instr, WHITESPACE);
1583 q = p;
1584 for (cnt = 0; *p; p = q + strspn(q, WHITESPACE), cnt++) {
1585 q = p + strcspn(p, WHITESPACE);
1586 val = local_atoi(p);
1587 switch (cnt) {
1588 case 0:
1589 if (val == 0) {
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001590 max_concurr = MAX_BAU_CONCURRENT;
1591 max_concurr_const = MAX_BAU_CONCURRENT;
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001592 continue;
1593 }
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001594 if (val < 1 || val > bcp->cpus_in_uvhub) {
Andrew Banmanefa59ab2016-09-21 11:09:13 -05001595 pr_debug(
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001596 "Error: BAU max concurrent %d is invalid\n",
1597 val);
1598 return -EINVAL;
1599 }
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001600 max_concurr = val;
1601 max_concurr_const = val;
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001602 continue;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001603 default:
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001604 if (val == 0)
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001605 *tunables[cnt].tunp = tunables[cnt].deflt;
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001606 else
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001607 *tunables[cnt].tunp = val;
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001608 continue;
1609 }
1610 if (q == p)
1611 break;
1612 }
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001613 return 0;
1614}
1615
1616/*
1617 * Handle a write to debugfs. (/sys/kernel/debug/sgi_uv/bau_tunables)
1618 */
1619static ssize_t tunables_write(struct file *file, const char __user *user,
1620 size_t count, loff_t *data)
1621{
1622 int cpu;
1623 int ret;
1624 char instr[100];
1625 struct bau_control *bcp;
1626
1627 if (count == 0 || count > sizeof(instr)-1)
1628 return -EINVAL;
1629 if (copy_from_user(instr, user, count))
1630 return -EFAULT;
1631
1632 instr[count] = '\0';
1633
cpw@sgi.com00b30cf2011-06-21 07:21:26 -05001634 cpu = get_cpu();
1635 bcp = &per_cpu(bau_control, cpu);
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001636 ret = parse_tunables_write(bcp, instr, count);
cpw@sgi.com00b30cf2011-06-21 07:21:26 -05001637 put_cpu();
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001638 if (ret)
1639 return ret;
1640
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001641 for_each_present_cpu(cpu) {
1642 bcp = &per_cpu(bau_control, cpu);
Andrew Banman67492c82016-09-21 11:09:12 -05001643 bcp->max_concurr = max_concurr;
1644 bcp->max_concurr_const = max_concurr;
1645 bcp->plugged_delay = plugged_delay;
1646 bcp->plugsb4reset = plugsb4reset;
1647 bcp->timeoutsb4reset = timeoutsb4reset;
1648 bcp->ipi_reset_limit = ipi_reset_limit;
1649 bcp->complete_threshold = complete_threshold;
1650 bcp->cong_response_us = congested_respns_us;
1651 bcp->cong_reps = congested_reps;
1652 bcp->disabled_period = sec_2_cycles(disabled_period);
1653 bcp->giveup_limit = giveup_limit;
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001654 }
1655 return count;
1656}
1657
Cliff Wickman18129242008-06-02 08:56:14 -05001658static const struct seq_operations uv_ptc_seq_ops = {
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001659 .start = ptc_seq_start,
1660 .next = ptc_seq_next,
1661 .stop = ptc_seq_stop,
1662 .show = ptc_seq_show
Cliff Wickman18129242008-06-02 08:56:14 -05001663};
1664
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001665static int ptc_proc_open(struct inode *inode, struct file *file)
Cliff Wickman18129242008-06-02 08:56:14 -05001666{
1667 return seq_open(file, &uv_ptc_seq_ops);
1668}
1669
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001670static int tunables_open(struct inode *inode, struct file *file)
1671{
1672 return 0;
1673}
1674
Cliff Wickman18129242008-06-02 08:56:14 -05001675static const struct file_operations proc_uv_ptc_operations = {
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001676 .open = ptc_proc_open,
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001677 .read = seq_read,
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001678 .write = ptc_proc_write,
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001679 .llseek = seq_lseek,
1680 .release = seq_release,
Cliff Wickman18129242008-06-02 08:56:14 -05001681};
1682
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001683static const struct file_operations tunables_fops = {
1684 .open = tunables_open,
1685 .read = tunables_read,
1686 .write = tunables_write,
Arnd Bergmann6038f372010-08-15 18:52:59 +02001687 .llseek = default_llseek,
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001688};
1689
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001690static int __init uv_ptc_init(void)
Cliff Wickman18129242008-06-02 08:56:14 -05001691{
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001692 struct proc_dir_entry *proc_uv_ptc;
Cliff Wickman18129242008-06-02 08:56:14 -05001693
1694 if (!is_uv_system())
1695 return 0;
1696
Alexey Dobriyan10f02d112009-08-23 23:17:27 +04001697 proc_uv_ptc = proc_create(UV_PTC_BASENAME, 0444, NULL,
1698 &proc_uv_ptc_operations);
Cliff Wickman18129242008-06-02 08:56:14 -05001699 if (!proc_uv_ptc) {
Andrew Banmanefa59ab2016-09-21 11:09:13 -05001700 pr_err("unable to create %s proc entry\n",
Cliff Wickman18129242008-06-02 08:56:14 -05001701 UV_PTC_BASENAME);
1702 return -EINVAL;
1703 }
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001704
1705 tunables_dir = debugfs_create_dir(UV_BAU_TUNABLES_DIR, NULL);
1706 if (!tunables_dir) {
Andrew Banmanefa59ab2016-09-21 11:09:13 -05001707 pr_err("unable to create debugfs directory %s\n",
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001708 UV_BAU_TUNABLES_DIR);
1709 return -EINVAL;
1710 }
1711 tunables_file = debugfs_create_file(UV_BAU_TUNABLES_FILE, 0600,
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001712 tunables_dir, NULL, &tunables_fops);
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001713 if (!tunables_file) {
Andrew Banmanefa59ab2016-09-21 11:09:13 -05001714 pr_err("unable to create debugfs file %s\n",
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001715 UV_BAU_TUNABLES_FILE);
1716 return -EINVAL;
1717 }
Cliff Wickman18129242008-06-02 08:56:14 -05001718 return 0;
1719}
1720
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001721/*
Cliff Wickman77ed23f2011-05-10 08:26:43 -05001722 * Initialize the sending side's sending buffers.
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001723 */
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001724static void activation_descriptor_init(int node, int pnode, int base_pnode)
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001725{
1726 int i;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001727 int cpu;
Cliff Wickmanda87c932012-01-16 15:17:50 -06001728 int uv1 = 0;
Jack Steiner6a469e42011-09-20 13:55:04 -07001729 unsigned long gpa;
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001730 unsigned long m;
1731 unsigned long n;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001732 size_t dsize;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001733 struct bau_desc *bau_desc;
1734 struct bau_desc *bd2;
Cliff Wickmanda87c932012-01-16 15:17:50 -06001735 struct uv1_bau_msg_header *uv1_hdr;
Cliff Wickmana26fd712014-05-14 16:15:47 -05001736 struct uv2_3_bau_msg_header *uv2_3_hdr;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001737 struct bau_control *bcp;
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001738
Cliff Wickman0e2595c2009-05-20 08:10:57 -05001739 /*
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001740 * each bau_desc is 64 bytes; there are 8 (ITEMS_PER_DESC)
1741 * per cpu; and one per cpu on the uvhub (ADP_SZ)
Cliff Wickman0e2595c2009-05-20 08:10:57 -05001742 */
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001743 dsize = sizeof(struct bau_desc) * ADP_SZ * ITEMS_PER_DESC;
1744 bau_desc = kmalloc_node(dsize, GFP_KERNEL, node);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001745 BUG_ON(!bau_desc);
Ingo Molnarb4c286e2008-06-18 14:28:19 +02001746
Jack Steiner6a469e42011-09-20 13:55:04 -07001747 gpa = uv_gpa(bau_desc);
1748 n = uv_gpa_to_gnode(gpa);
Andrew Banman21e3f122016-09-21 11:09:17 -05001749 m = ops.bau_gpa_to_offset(gpa);
Cliff Wickmanda87c932012-01-16 15:17:50 -06001750 if (is_uv1_hub())
1751 uv1 = 1;
Ingo Molnarb4c286e2008-06-18 14:28:19 +02001752
Cliff Wickman77ed23f2011-05-10 08:26:43 -05001753 /* the 14-bit pnode */
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001754 write_mmr_descriptor_base(pnode, (n << UV_DESC_PSHIFT | m));
Cliff Wickman0e2595c2009-05-20 08:10:57 -05001755 /*
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001756 * Initializing all 8 (ITEMS_PER_DESC) descriptors for each
Cliff Wickman0e2595c2009-05-20 08:10:57 -05001757 * cpu even though we only use the first one; one descriptor can
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001758 * describe a broadcast to 256 uv hubs.
Cliff Wickman0e2595c2009-05-20 08:10:57 -05001759 */
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001760 for (i = 0, bd2 = bau_desc; i < (ADP_SZ * ITEMS_PER_DESC); i++, bd2++) {
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001761 memset(bd2, 0, sizeof(struct bau_desc));
Cliff Wickmanda87c932012-01-16 15:17:50 -06001762 if (uv1) {
1763 uv1_hdr = &bd2->header.uv1_hdr;
Andrew Banman67492c82016-09-21 11:09:12 -05001764 uv1_hdr->swack_flag = 1;
Cliff Wickmanda87c932012-01-16 15:17:50 -06001765 /*
1766 * The base_dest_nasid set in the message header
1767 * is the nasid of the first uvhub in the partition.
1768 * The bit map will indicate destination pnode numbers
1769 * relative to that base. They may not be consecutive
1770 * if nasid striding is being used.
1771 */
1772 uv1_hdr->base_dest_nasid =
Andrew Banman67492c82016-09-21 11:09:12 -05001773 UV_PNODE_TO_NASID(base_pnode);
1774 uv1_hdr->dest_subnodeid = UV_LB_SUBNODEID;
1775 uv1_hdr->command = UV_NET_ENDPOINT_INTD;
1776 uv1_hdr->int_both = 1;
Cliff Wickmanda87c932012-01-16 15:17:50 -06001777 /*
1778 * all others need to be set to zero:
1779 * fairness chaining multilevel count replied_to
1780 */
1781 } else {
Cliff Wickman8b6e5112012-06-22 08:14:59 -05001782 /*
Cliff Wickmana26fd712014-05-14 16:15:47 -05001783 * BIOS uses legacy mode, but uv2 and uv3 hardware always
Cliff Wickman8b6e5112012-06-22 08:14:59 -05001784 * uses native mode for selective broadcasts.
1785 */
Cliff Wickmana26fd712014-05-14 16:15:47 -05001786 uv2_3_hdr = &bd2->header.uv2_3_hdr;
Andrew Banman67492c82016-09-21 11:09:12 -05001787 uv2_3_hdr->swack_flag = 1;
Cliff Wickmana26fd712014-05-14 16:15:47 -05001788 uv2_3_hdr->base_dest_nasid =
Andrew Banman67492c82016-09-21 11:09:12 -05001789 UV_PNODE_TO_NASID(base_pnode);
1790 uv2_3_hdr->dest_subnodeid = UV_LB_SUBNODEID;
1791 uv2_3_hdr->command = UV_NET_ENDPOINT_INTD;
Cliff Wickmanda87c932012-01-16 15:17:50 -06001792 }
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001793 }
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001794 for_each_present_cpu(cpu) {
1795 if (pnode != uv_blade_to_pnode(uv_cpu_to_blade_id(cpu)))
1796 continue;
1797 bcp = &per_cpu(bau_control, cpu);
1798 bcp->descriptor_base = bau_desc;
1799 }
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001800}
1801
1802/*
1803 * initialize the destination side's receiving buffers
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001804 * entered for each uvhub in the partition
1805 * - node is first node (kernel memory notion) on the uvhub
1806 * - pnode is the uvhub's physical identifier
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001807 */
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001808static void pq_init(int node, int pnode)
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001809{
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001810 int cpu;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001811 size_t plsize;
Ingo Molnarb4c286e2008-06-18 14:28:19 +02001812 char *cp;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001813 void *vp;
Andrew Banmand2a57afa2016-09-21 11:09:14 -05001814 unsigned long gnode, first, last, tail;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001815 struct bau_pq_entry *pqp;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001816 struct bau_control *bcp;
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001817
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001818 plsize = (DEST_Q_SIZE + 1) * sizeof(struct bau_pq_entry);
1819 vp = kmalloc_node(plsize, GFP_KERNEL, node);
1820 pqp = (struct bau_pq_entry *)vp;
Ingo Molnardc163a42008-06-18 14:15:43 +02001821 BUG_ON(!pqp);
Ingo Molnarb4c286e2008-06-18 14:28:19 +02001822
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001823 cp = (char *)pqp + 31;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001824 pqp = (struct bau_pq_entry *)(((unsigned long)cp >> 5) << 5);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001825
1826 for_each_present_cpu(cpu) {
1827 if (pnode != uv_cpu_to_pnode(cpu))
1828 continue;
1829 /* for every cpu on this pnode: */
1830 bcp = &per_cpu(bau_control, cpu);
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001831 bcp->queue_first = pqp;
1832 bcp->bau_msg_head = pqp;
1833 bcp->queue_last = pqp + (DEST_Q_SIZE - 1);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001834 }
Andrew Banmand2a57afa2016-09-21 11:09:14 -05001835
Andrew Banman21e3f122016-09-21 11:09:17 -05001836 first = ops.bau_gpa_to_offset(uv_gpa(pqp));
1837 last = ops.bau_gpa_to_offset(uv_gpa(pqp + (DEST_Q_SIZE - 1)));
Andrew Banmand2a57afa2016-09-21 11:09:14 -05001838
Cliff Wickman4ea3c512009-04-16 07:53:09 -05001839 /*
Andrew Banman6d780592016-09-21 11:09:20 -05001840 * Pre UV4, the gnode is required to locate the payload queue
1841 * and the payload queue tail must be maintained by the kernel.
Cliff Wickman4ea3c512009-04-16 07:53:09 -05001842 */
Andrew Banman6d780592016-09-21 11:09:20 -05001843 bcp = &per_cpu(bau_control, smp_processor_id());
Andrew Banman491bd882017-03-09 10:42:09 -06001844 if (bcp->uvhub_version <= UV_BAU_V3) {
Andrew Banman6d780592016-09-21 11:09:20 -05001845 tail = first;
1846 gnode = uv_gpa_to_gnode(uv_gpa(pqp));
1847 first = (gnode << UV_PAYLOADQ_GNODE_SHIFT) | tail;
1848 write_mmr_payload_tail(pnode, tail);
1849 }
1850
Andrew Banman21e3f122016-09-21 11:09:17 -05001851 ops.write_payload_first(pnode, first);
1852 ops.write_payload_last(pnode, last);
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001853
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001854 /* in effect, all msg_type's are set to MSG_NOOP */
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001855 memset(pqp, 0, sizeof(struct bau_pq_entry) * DEST_Q_SIZE);
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001856}
1857
1858/*
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001859 * Initialization of each UV hub's structures
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001860 */
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001861static void __init init_uvhub(int uvhub, int vector, int base_pnode)
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001862{
Cliff Wickman9674f352009-04-03 08:34:05 -05001863 int node;
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001864 int pnode;
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001865 unsigned long apicid;
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001866
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001867 node = uvhub_to_first_node(uvhub);
1868 pnode = uv_blade_to_pnode(uvhub);
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001869
1870 activation_descriptor_init(node, pnode, base_pnode);
1871
1872 pq_init(node, pnode);
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001873 /*
Cliff Wickman77ed23f2011-05-10 08:26:43 -05001874 * The below initialization can't be in firmware because the
1875 * messaging IRQ will be determined by the OS.
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001876 */
Dimitri Sivanich8191c9f2010-11-16 16:23:52 -06001877 apicid = uvhub_to_first_apicid(uvhub) | uv_apicid_hibits;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001878 write_mmr_data_config(pnode, ((apicid << 32) | vector));
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001879}
1880
1881/*
Cliff Wickman12a66112010-06-02 16:22:01 -05001882 * We will set BAU_MISC_CONTROL with a timeout period.
1883 * But the BIOS has set UVH_AGING_PRESCALE_SEL and UVH_TRANSACTION_TIMEOUT.
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001884 * So the destination timeout period has to be calculated from them.
Cliff Wickman12a66112010-06-02 16:22:01 -05001885 */
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001886static int calculate_destination_timeout(void)
Cliff Wickman12a66112010-06-02 16:22:01 -05001887{
1888 unsigned long mmr_image;
1889 int mult1;
1890 int mult2;
1891 int index;
1892 int base;
1893 int ret;
1894 unsigned long ts_ns;
1895
Jack Steiner2a919592011-05-11 12:50:28 -05001896 if (is_uv1_hub()) {
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001897 mult1 = SOFTACK_TIMEOUT_PERIOD & BAU_MISC_CONTROL_MULT_MASK;
Jack Steiner2a919592011-05-11 12:50:28 -05001898 mmr_image = uv_read_local_mmr(UVH_AGING_PRESCALE_SEL);
1899 index = (mmr_image >> BAU_URGENCY_7_SHIFT) & BAU_URGENCY_7_MASK;
1900 mmr_image = uv_read_local_mmr(UVH_TRANSACTION_TIMEOUT);
1901 mult2 = (mmr_image >> BAU_TRANS_SHIFT) & BAU_TRANS_MASK;
Cliff Wickman11cab712012-06-22 08:12:12 -05001902 ts_ns = timeout_base_ns[index];
1903 ts_ns *= (mult1 * mult2);
Jack Steiner2a919592011-05-11 12:50:28 -05001904 ret = ts_ns / 1000;
1905 } else {
Cliff Wickmana26fd712014-05-14 16:15:47 -05001906 /* same destination timeout for uv2 and uv3 */
Cliff Wickmand059f9f2012-01-16 15:18:48 -06001907 /* 4 bits 0/1 for 10/80us base, 3 bits of multiplier */
1908 mmr_image = uv_read_local_mmr(UVH_LB_BAU_MISC_CONTROL);
Jack Steiner2a919592011-05-11 12:50:28 -05001909 mmr_image = (mmr_image & UV_SA_MASK) >> UV_SA_SHFT;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001910 if (mmr_image & (1L << UV2_ACK_UNITS_SHFT))
Cliff Wickmand059f9f2012-01-16 15:18:48 -06001911 base = 80;
Jack Steiner2a919592011-05-11 12:50:28 -05001912 else
Cliff Wickmand059f9f2012-01-16 15:18:48 -06001913 base = 10;
1914 mult1 = mmr_image & UV2_ACK_MASK;
Jack Steiner2a919592011-05-11 12:50:28 -05001915 ret = mult1 * base;
1916 }
Cliff Wickman12a66112010-06-02 16:22:01 -05001917 return ret;
1918}
1919
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001920static void __init init_per_cpu_tunables(void)
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001921{
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001922 int cpu;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001923 struct bau_control *bcp;
1924
1925 for_each_present_cpu(cpu) {
1926 bcp = &per_cpu(bau_control, cpu);
1927 bcp->baudisabled = 0;
Cliff Wickman26ef8572012-06-22 08:13:30 -05001928 if (nobau)
Alex Thorlton1c532e02016-03-31 14:18:29 -05001929 bcp->nobau = true;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001930 bcp->statp = &per_cpu(ptcstats, cpu);
1931 /* time interval to catch a hardware stay-busy bug */
1932 bcp->timeout_interval = usec_2_cycles(2*timeout_us);
1933 bcp->max_concurr = max_concurr;
1934 bcp->max_concurr_const = max_concurr;
1935 bcp->plugged_delay = plugged_delay;
1936 bcp->plugsb4reset = plugsb4reset;
1937 bcp->timeoutsb4reset = timeoutsb4reset;
1938 bcp->ipi_reset_limit = ipi_reset_limit;
1939 bcp->complete_threshold = complete_threshold;
1940 bcp->cong_response_us = congested_respns_us;
1941 bcp->cong_reps = congested_reps;
Andrew Banman67492c82016-09-21 11:09:12 -05001942 bcp->disabled_period = sec_2_cycles(disabled_period);
1943 bcp->giveup_limit = giveup_limit;
Cliff Wickmand2ebc712012-01-18 09:40:47 -06001944 spin_lock_init(&bcp->queue_lock);
1945 spin_lock_init(&bcp->uvhub_lock);
Cliff Wickman8b6e5112012-06-22 08:14:59 -05001946 spin_lock_init(&bcp->disable_lock);
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001947 }
1948}
1949
1950/*
1951 * Scan all cpus to collect blade and socket summaries.
1952 */
1953static int __init get_cpu_topology(int base_pnode,
1954 struct uvhub_desc *uvhub_descs,
1955 unsigned char *uvhub_mask)
1956{
1957 int cpu;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001958 int pnode;
1959 int uvhub;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001960 int socket;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001961 struct bau_control *bcp;
1962 struct uvhub_desc *bdp;
1963 struct socket_desc *sdp;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001964
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001965 for_each_present_cpu(cpu) {
1966 bcp = &per_cpu(bau_control, cpu);
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001967
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001968 memset(bcp, 0, sizeof(struct bau_control));
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001969
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001970 pnode = uv_cpu_hub_info(cpu)->pnode;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001971 if ((pnode - base_pnode) >= UV_DISTRIBUTION_SIZE) {
Andrew Banmanefa59ab2016-09-21 11:09:13 -05001972 pr_emerg(
Cliff Wickman77ed23f2011-05-10 08:26:43 -05001973 "cpu %d pnode %d-%d beyond %d; BAU disabled\n",
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001974 cpu, pnode, base_pnode, UV_DISTRIBUTION_SIZE);
Cliff Wickman77ed23f2011-05-10 08:26:43 -05001975 return 1;
1976 }
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001977
Cliff Wickman77ed23f2011-05-10 08:26:43 -05001978 bcp->osnode = cpu_to_node(cpu);
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001979 bcp->partition_base_pnode = base_pnode;
1980
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001981 uvhub = uv_cpu_hub_info(cpu)->numa_blade_id;
Cliff Wickmanc4026cf2010-07-30 14:10:55 -05001982 *(uvhub_mask + (uvhub/8)) |= (1 << (uvhub%8));
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001983 bdp = &uvhub_descs[uvhub];
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001984
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001985 bdp->num_cpus++;
1986 bdp->uvhub = uvhub;
1987 bdp->pnode = pnode;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001988
Cliff Wickmana8328ee2010-06-02 16:22:02 -05001989 /* kludge: 'assuming' one node per socket, and assuming that
1990 disabling a socket just leaves a gap in node numbers */
Cliff Wickman77ed23f2011-05-10 08:26:43 -05001991 socket = bcp->osnode & 1;
Cliff Wickmana8328ee2010-06-02 16:22:02 -05001992 bdp->socket_mask |= (1 << socket);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001993 sdp = &bdp->socket[socket];
1994 sdp->cpu_number[sdp->num_cpus] = cpu;
1995 sdp->num_cpus++;
Cliff Wickmancfa60912011-01-03 12:03:53 -06001996 if (sdp->num_cpus > MAX_CPUS_PER_SOCKET) {
Andrew Banmanefa59ab2016-09-21 11:09:13 -05001997 pr_emerg("%d cpus per socket invalid\n",
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001998 sdp->num_cpus);
Cliff Wickmancfa60912011-01-03 12:03:53 -06001999 return 1;
2000 }
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05002001 }
Cliff Wickmanf073cc82011-05-24 13:07:36 -05002002 return 0;
2003}
2004
2005/*
2006 * Each socket is to get a local array of pnodes/hubs.
2007 */
2008static void make_per_cpu_thp(struct bau_control *smaster)
2009{
2010 int cpu;
2011 size_t hpsz = sizeof(struct hub_and_pnode) * num_possible_cpus();
2012
2013 smaster->thp = kmalloc_node(hpsz, GFP_KERNEL, smaster->osnode);
2014 memset(smaster->thp, 0, hpsz);
2015 for_each_present_cpu(cpu) {
2016 smaster->thp[cpu].pnode = uv_cpu_hub_info(cpu)->pnode;
2017 smaster->thp[cpu].uvhub = uv_cpu_hub_info(cpu)->numa_blade_id;
2018 }
2019}
2020
2021/*
cpw@sgi.com442d3922011-06-21 07:21:31 -05002022 * Each uvhub is to get a local cpumask.
2023 */
2024static void make_per_hub_cpumask(struct bau_control *hmaster)
2025{
2026 int sz = sizeof(cpumask_t);
2027
2028 hmaster->cpumask = kzalloc_node(sz, GFP_KERNEL, hmaster->osnode);
2029}
2030
2031/*
Cliff Wickmanf073cc82011-05-24 13:07:36 -05002032 * Initialize all the per_cpu information for the cpu's on a given socket,
2033 * given what has been gathered into the socket_desc struct.
2034 * And reports the chosen hub and socket masters back to the caller.
2035 */
2036static int scan_sock(struct socket_desc *sdp, struct uvhub_desc *bdp,
2037 struct bau_control **smasterp,
2038 struct bau_control **hmasterp)
2039{
Andrew Banmandfeb28f2017-03-09 10:42:12 -06002040 int i, cpu, uvhub_cpu;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05002041 struct bau_control *bcp;
2042
2043 for (i = 0; i < sdp->num_cpus; i++) {
2044 cpu = sdp->cpu_number[i];
2045 bcp = &per_cpu(bau_control, cpu);
2046 bcp->cpu = cpu;
2047 if (i == 0) {
2048 *smasterp = bcp;
2049 if (!(*hmasterp))
2050 *hmasterp = bcp;
2051 }
2052 bcp->cpus_in_uvhub = bdp->num_cpus;
2053 bcp->cpus_in_socket = sdp->num_cpus;
2054 bcp->socket_master = *smasterp;
2055 bcp->uvhub = bdp->uvhub;
Cliff Wickmanda87c932012-01-16 15:17:50 -06002056 if (is_uv1_hub())
Andrew Banman491bd882017-03-09 10:42:09 -06002057 bcp->uvhub_version = UV_BAU_V1;
Cliff Wickmanda87c932012-01-16 15:17:50 -06002058 else if (is_uv2_hub())
Andrew Banman491bd882017-03-09 10:42:09 -06002059 bcp->uvhub_version = UV_BAU_V2;
Cliff Wickmana26fd712014-05-14 16:15:47 -05002060 else if (is_uv3_hub())
Andrew Banman491bd882017-03-09 10:42:09 -06002061 bcp->uvhub_version = UV_BAU_V3;
Andrew Banman58d4ab42016-09-21 11:09:18 -05002062 else if (is_uv4_hub())
Andrew Banman491bd882017-03-09 10:42:09 -06002063 bcp->uvhub_version = UV_BAU_V4;
Cliff Wickmanda87c932012-01-16 15:17:50 -06002064 else {
Andrew Banman58d4ab42016-09-21 11:09:18 -05002065 pr_emerg("uvhub version not 1, 2, 3, or 4\n");
Cliff Wickmanda87c932012-01-16 15:17:50 -06002066 return 1;
2067 }
Cliff Wickmanf073cc82011-05-24 13:07:36 -05002068 bcp->uvhub_master = *hmasterp;
Andrew Banmandfeb28f2017-03-09 10:42:12 -06002069 uvhub_cpu = uv_cpu_blade_processor_id(cpu);
2070 bcp->uvhub_cpu = uvhub_cpu;
2071
2072 /*
2073 * The ERROR and BUSY status registers are located pairwise over
2074 * the STATUS_0 and STATUS_1 mmrs; each an array[32] of 2 bits.
2075 */
2076 if (uvhub_cpu < UV_CPUS_PER_AS) {
2077 bcp->status_mmr = UVH_LB_BAU_SB_ACTIVATION_STATUS_0;
2078 bcp->status_index = uvhub_cpu * UV_ACT_STATUS_SIZE;
2079 } else {
2080 bcp->status_mmr = UVH_LB_BAU_SB_ACTIVATION_STATUS_1;
2081 bcp->status_index = (uvhub_cpu - UV_CPUS_PER_AS)
2082 * UV_ACT_STATUS_SIZE;
2083 }
Mike Travis5627a8252016-04-29 16:54:14 -05002084
Cliff Wickmanf073cc82011-05-24 13:07:36 -05002085 if (bcp->uvhub_cpu >= MAX_CPUS_PER_UVHUB) {
Andrew Banmanefa59ab2016-09-21 11:09:13 -05002086 pr_emerg("%d cpus per uvhub invalid\n",
Cliff Wickmanf073cc82011-05-24 13:07:36 -05002087 bcp->uvhub_cpu);
2088 return 1;
2089 }
2090 }
2091 return 0;
2092}
2093
2094/*
2095 * Summarize the blade and socket topology into the per_cpu structures.
2096 */
2097static int __init summarize_uvhub_sockets(int nuvhubs,
2098 struct uvhub_desc *uvhub_descs,
2099 unsigned char *uvhub_mask)
2100{
2101 int socket;
2102 int uvhub;
2103 unsigned short socket_mask;
2104
Cliff Wickmanc4026cf2010-07-30 14:10:55 -05002105 for (uvhub = 0; uvhub < nuvhubs; uvhub++) {
Cliff Wickmanf073cc82011-05-24 13:07:36 -05002106 struct uvhub_desc *bdp;
2107 struct bau_control *smaster = NULL;
2108 struct bau_control *hmaster = NULL;
2109
Cliff Wickmanc4026cf2010-07-30 14:10:55 -05002110 if (!(*(uvhub_mask + (uvhub/8)) & (1 << (uvhub%8))))
2111 continue;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05002112
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05002113 bdp = &uvhub_descs[uvhub];
Cliff Wickmana8328ee2010-06-02 16:22:02 -05002114 socket_mask = bdp->socket_mask;
2115 socket = 0;
2116 while (socket_mask) {
Cliff Wickmanf073cc82011-05-24 13:07:36 -05002117 struct socket_desc *sdp;
2118 if ((socket_mask & 1)) {
2119 sdp = &bdp->socket[socket];
2120 if (scan_sock(sdp, bdp, &smaster, &hmaster))
Cliff Wickmancfa60912011-01-03 12:03:53 -06002121 return 1;
cpw@sgi.com9c9153d2011-06-21 07:21:28 -05002122 make_per_cpu_thp(smaster);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05002123 }
2124 socket++;
Cliff Wickmana8328ee2010-06-02 16:22:02 -05002125 socket_mask = (socket_mask >> 1);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05002126 }
cpw@sgi.com442d3922011-06-21 07:21:31 -05002127 make_per_hub_cpumask(hmaster);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05002128 }
Cliff Wickmanf073cc82011-05-24 13:07:36 -05002129 return 0;
2130}
2131
2132/*
2133 * initialize the bau_control structure for each cpu
2134 */
2135static int __init init_per_cpu(int nuvhubs, int base_part_pnode)
2136{
2137 unsigned char *uvhub_mask;
2138 void *vp;
2139 struct uvhub_desc *uvhub_descs;
2140
Andrew Banmane879c112016-09-21 11:09:19 -05002141 if (is_uv3_hub() || is_uv2_hub() || is_uv1_hub())
2142 timeout_us = calculate_destination_timeout();
Cliff Wickmanf073cc82011-05-24 13:07:36 -05002143
2144 vp = kmalloc(nuvhubs * sizeof(struct uvhub_desc), GFP_KERNEL);
2145 uvhub_descs = (struct uvhub_desc *)vp;
2146 memset(uvhub_descs, 0, nuvhubs * sizeof(struct uvhub_desc));
2147 uvhub_mask = kzalloc((nuvhubs+7)/8, GFP_KERNEL);
2148
2149 if (get_cpu_topology(base_part_pnode, uvhub_descs, uvhub_mask))
cpw@sgi.combbd270e2011-06-21 07:21:32 -05002150 goto fail;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05002151
2152 if (summarize_uvhub_sockets(nuvhubs, uvhub_descs, uvhub_mask))
cpw@sgi.combbd270e2011-06-21 07:21:32 -05002153 goto fail;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05002154
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05002155 kfree(uvhub_descs);
Cliff Wickmanc4026cf2010-07-30 14:10:55 -05002156 kfree(uvhub_mask);
Cliff Wickmanf073cc82011-05-24 13:07:36 -05002157 init_per_cpu_tunables();
Cliff Wickmancfa60912011-01-03 12:03:53 -06002158 return 0;
cpw@sgi.combbd270e2011-06-21 07:21:32 -05002159
2160fail:
2161 kfree(uvhub_descs);
2162 kfree(uvhub_mask);
2163 return 1;
Cliff Wickmanb194b1202008-06-12 08:23:48 -05002164}
Cliff Wickman18129242008-06-02 08:56:14 -05002165
Andrew Banman2620bbb2017-03-09 10:42:13 -06002166static const struct bau_operations uv1_bau_ops __initconst = {
Andrew Banman8e3b21b2017-03-09 10:42:11 -06002167 .bau_gpa_to_offset = uv_gpa_to_offset,
2168 .read_l_sw_ack = read_mmr_sw_ack,
2169 .read_g_sw_ack = read_gmmr_sw_ack,
2170 .write_l_sw_ack = write_mmr_sw_ack,
2171 .write_g_sw_ack = write_gmmr_sw_ack,
2172 .write_payload_first = write_mmr_payload_first,
2173 .write_payload_last = write_mmr_payload_last,
Andrew Banman2620bbb2017-03-09 10:42:13 -06002174 .wait_completion = uv1_wait_completion,
2175};
2176
2177static const struct bau_operations uv2_3_bau_ops __initconst = {
2178 .bau_gpa_to_offset = uv_gpa_to_offset,
2179 .read_l_sw_ack = read_mmr_sw_ack,
2180 .read_g_sw_ack = read_gmmr_sw_ack,
2181 .write_l_sw_ack = write_mmr_sw_ack,
2182 .write_g_sw_ack = write_gmmr_sw_ack,
2183 .write_payload_first = write_mmr_payload_first,
2184 .write_payload_last = write_mmr_payload_last,
2185 .wait_completion = uv2_3_wait_completion,
Andrew Banman8e3b21b2017-03-09 10:42:11 -06002186};
2187
2188static const struct bau_operations uv4_bau_ops __initconst = {
2189 .bau_gpa_to_offset = uv_gpa_to_soc_phys_ram,
2190 .read_l_sw_ack = read_mmr_proc_sw_ack,
2191 .read_g_sw_ack = read_gmmr_proc_sw_ack,
2192 .write_l_sw_ack = write_mmr_proc_sw_ack,
2193 .write_g_sw_ack = write_gmmr_proc_sw_ack,
2194 .write_payload_first = write_mmr_proc_payload_first,
2195 .write_payload_last = write_mmr_proc_payload_last,
Andrew Banman2f2a0332017-03-09 10:42:14 -06002196 .wait_completion = uv4_wait_completion,
Andrew Banman8e3b21b2017-03-09 10:42:11 -06002197};
2198
Cliff Wickman18129242008-06-02 08:56:14 -05002199/*
2200 * Initialization of BAU-related structures
2201 */
Cliff Wickmanb194b1202008-06-12 08:23:48 -05002202static int __init uv_bau_init(void)
Cliff Wickman18129242008-06-02 08:56:14 -05002203{
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05002204 int uvhub;
2205 int pnode;
2206 int nuvhubs;
Rusty Russell2c74d662009-03-18 08:22:30 +10302207 int cur_cpu;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05002208 int cpus;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05002209 int vector;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05002210 cpumask_var_t *mask;
Cliff Wickman18129242008-06-02 08:56:14 -05002211
2212 if (!is_uv_system())
2213 return 0;
2214
Andrew Banman4f059d52016-09-21 11:09:21 -05002215 if (is_uv4_hub())
2216 ops = uv4_bau_ops;
2217 else if (is_uv3_hub())
Andrew Banman2620bbb2017-03-09 10:42:13 -06002218 ops = uv2_3_bau_ops;
Andrew Banman5e4f96f2016-09-21 11:09:16 -05002219 else if (is_uv2_hub())
Andrew Banman2620bbb2017-03-09 10:42:13 -06002220 ops = uv2_3_bau_ops;
Andrew Banman5e4f96f2016-09-21 11:09:16 -05002221 else if (is_uv1_hub())
Andrew Banman2620bbb2017-03-09 10:42:13 -06002222 ops = uv1_bau_ops;
Andrew Banman5e4f96f2016-09-21 11:09:16 -05002223
Andrew Banman2fe9a5c2017-07-20 17:05:51 -05002224 nuvhubs = uv_num_possible_blades();
2225 if (nuvhubs < 2) {
2226 pr_crit("UV: BAU disabled - insufficient hub count\n");
2227 goto err_bau_disable;
2228 }
2229
Cliff Wickmanf073cc82011-05-24 13:07:36 -05002230 for_each_possible_cpu(cur_cpu) {
2231 mask = &per_cpu(uv_flush_tlb_mask, cur_cpu);
2232 zalloc_cpumask_var_node(mask, GFP_KERNEL, cpu_to_node(cur_cpu));
2233 }
Rusty Russell76ba0ec2009-03-13 14:49:57 +10302234
Cliff Wickmanf073cc82011-05-24 13:07:36 -05002235 uv_base_pnode = 0x7fffffff;
Cliff Wickman77ed23f2011-05-10 08:26:43 -05002236 for (uvhub = 0; uvhub < nuvhubs; uvhub++) {
Cliff Wickmanf073cc82011-05-24 13:07:36 -05002237 cpus = uv_blade_nr_possible_cpus(uvhub);
2238 if (cpus && (uv_blade_to_pnode(uvhub) < uv_base_pnode))
2239 uv_base_pnode = uv_blade_to_pnode(uvhub);
Cliff Wickman77ed23f2011-05-10 08:26:43 -05002240 }
2241
Andrew Banmane879c112016-09-21 11:09:19 -05002242 /* software timeouts are not supported on UV4 */
2243 if (is_uv3_hub() || is_uv2_hub() || is_uv1_hub())
2244 enable_timeouts();
Cliff Wickmand059f9f2012-01-16 15:18:48 -06002245
Cliff Wickmanf073cc82011-05-24 13:07:36 -05002246 if (init_per_cpu(nuvhubs, uv_base_pnode)) {
Andrew Banman2fe9a5c2017-07-20 17:05:51 -05002247 pr_crit("UV: BAU disabled - per CPU init failed\n");
2248 goto err_bau_disable;
Cliff Wickmancfa60912011-01-03 12:03:53 -06002249 }
Ingo Molnarb4c286e2008-06-18 14:28:19 +02002250
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05002251 vector = UV_BAU_MESSAGE;
Cliff Wickmana26fd712014-05-14 16:15:47 -05002252 for_each_possible_blade(uvhub) {
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05002253 if (uv_blade_nr_possible_cpus(uvhub))
Cliff Wickmanf073cc82011-05-24 13:07:36 -05002254 init_uvhub(uvhub, vector, uv_base_pnode);
Cliff Wickmana26fd712014-05-14 16:15:47 -05002255 }
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05002256
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05002257 alloc_intr_gate(vector, uv_bau_message_intr1);
2258
2259 for_each_possible_blade(uvhub) {
Cliff Wickman93a7ca02010-07-16 10:11:21 -05002260 if (uv_blade_nr_possible_cpus(uvhub)) {
Cliff Wickmanf073cc82011-05-24 13:07:36 -05002261 unsigned long val;
2262 unsigned long mmr;
Cliff Wickman93a7ca02010-07-16 10:11:21 -05002263 pnode = uv_blade_to_pnode(uvhub);
2264 /* INIT the bau */
Cliff Wickmanf073cc82011-05-24 13:07:36 -05002265 val = 1L << 63;
2266 write_gmmr_activation(pnode, val);
Cliff Wickman93a7ca02010-07-16 10:11:21 -05002267 mmr = 1; /* should be 1 to broadcast to both sockets */
Cliff Wickmanda87c932012-01-16 15:17:50 -06002268 if (!is_uv1_hub())
2269 write_mmr_data_broadcast(pnode, mmr);
Cliff Wickman93a7ca02010-07-16 10:11:21 -05002270 }
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05002271 }
Ingo Molnarb4c286e2008-06-18 14:28:19 +02002272
Cliff Wickman18129242008-06-02 08:56:14 -05002273 return 0;
Andrew Banman2fe9a5c2017-07-20 17:05:51 -05002274
2275err_bau_disable:
2276
2277 for_each_possible_cpu(cur_cpu)
2278 free_cpumask_var(per_cpu(uv_flush_tlb_mask, cur_cpu));
2279
2280 set_bau_off();
2281 nobau_perm = 1;
2282
2283 return -EINVAL;
Cliff Wickman18129242008-06-02 08:56:14 -05002284}
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05002285core_initcall(uv_bau_init);
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05002286fs_initcall(uv_ptc_init);