blob: 795671593528c6ae12634b249f51cbf673eb1211 [file] [log] [blame]
Cliff Wickman18129242008-06-02 08:56:14 -05001/*
2 * SGI UltraViolet TLB flush routines.
3 *
Cliff Wickmana26fd712014-05-14 16:15:47 -05004 * (c) 2008-2014 Cliff Wickman <cpw@sgi.com>, SGI.
Cliff Wickman18129242008-06-02 08:56:14 -05005 *
6 * This code is released under the GNU General Public License version 2 or
7 * later.
8 */
Jeremy Fitzhardingeaef8f5b2008-10-14 21:43:43 -07009#include <linux/seq_file.h>
Cliff Wickman18129242008-06-02 08:56:14 -050010#include <linux/proc_fs.h>
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -050011#include <linux/debugfs.h>
Cliff Wickman18129242008-06-02 08:56:14 -050012#include <linux/kernel.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090013#include <linux/slab.h>
Jean Delvareca4445642011-03-25 15:20:14 +010014#include <linux/delay.h>
Cliff Wickman18129242008-06-02 08:56:14 -050015
Cliff Wickman18129242008-06-02 08:56:14 -050016#include <asm/mmu_context.h>
Tejun Heobdbcdd42009-01-21 17:26:06 +090017#include <asm/uv/uv.h>
Cliff Wickman18129242008-06-02 08:56:14 -050018#include <asm/uv/uv_mmrs.h>
Ingo Molnarb4c286e2008-06-18 14:28:19 +020019#include <asm/uv/uv_hub.h>
Cliff Wickman18129242008-06-02 08:56:14 -050020#include <asm/uv/uv_bau.h>
Ingo Molnar7b6aa332009-02-17 13:58:15 +010021#include <asm/apic.h>
Cliff Wickmanb194b1202008-06-12 08:23:48 -050022#include <asm/tsc.h>
Cliff Wickman99dd8712008-08-19 12:51:59 -050023#include <asm/irq_vectors.h>
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -050024#include <asm/timer.h>
Cliff Wickman18129242008-06-02 08:56:14 -050025
Andrew Banman8e3b21b2017-03-09 10:42:11 -060026static struct bau_operations ops __ro_after_init;
Andrew Banman4f059d52016-09-21 11:09:21 -050027
Cliff Wickman12a66112010-06-02 16:22:01 -050028/* timeouts in nanoseconds (indexed by UVH_AGING_PRESCALE_SEL urgency7 30:28) */
29static int timeout_base_ns[] = {
30 20,
31 160,
32 1280,
33 10240,
34 81920,
35 655360,
36 5242880,
37 167772160
38};
Cliff Wickmanf073cc82011-05-24 13:07:36 -050039
Cliff Wickman12a66112010-06-02 16:22:01 -050040static int timeout_us;
Alex Thorlton1c532e02016-03-31 14:18:29 -050041static bool nobau = true;
Cliff Wickman26ef8572012-06-22 08:13:30 -050042static int nobau_perm;
Cliff Wickman50fb55a2010-06-02 16:22:02 -050043static cycles_t congested_cycles;
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -050044
45/* tunables: */
Cliff Wickmanf073cc82011-05-24 13:07:36 -050046static int max_concurr = MAX_BAU_CONCURRENT;
47static int max_concurr_const = MAX_BAU_CONCURRENT;
48static int plugged_delay = PLUGGED_DELAY;
49static int plugsb4reset = PLUGSB4RESET;
Cliff Wickman8b6e5112012-06-22 08:14:59 -050050static int giveup_limit = GIVEUP_LIMIT;
Cliff Wickmanf073cc82011-05-24 13:07:36 -050051static int timeoutsb4reset = TIMEOUTSB4RESET;
52static int ipi_reset_limit = IPI_RESET_LIMIT;
53static int complete_threshold = COMPLETE_THRESHOLD;
54static int congested_respns_us = CONGESTED_RESPONSE_US;
55static int congested_reps = CONGESTED_REPS;
Cliff Wickman8b6e5112012-06-22 08:14:59 -050056static int disabled_period = DISABLED_PERIOD;
Cliff Wickmanf073cc82011-05-24 13:07:36 -050057
58static struct tunables tunables[] = {
Andrew Banman67492c82016-09-21 11:09:12 -050059 {&max_concurr, MAX_BAU_CONCURRENT}, /* must be [0] */
60 {&plugged_delay, PLUGGED_DELAY},
61 {&plugsb4reset, PLUGSB4RESET},
62 {&timeoutsb4reset, TIMEOUTSB4RESET},
63 {&ipi_reset_limit, IPI_RESET_LIMIT},
64 {&complete_threshold, COMPLETE_THRESHOLD},
65 {&congested_respns_us, CONGESTED_RESPONSE_US},
66 {&congested_reps, CONGESTED_REPS},
67 {&disabled_period, DISABLED_PERIOD},
68 {&giveup_limit, GIVEUP_LIMIT}
Cliff Wickmanf073cc82011-05-24 13:07:36 -050069};
70
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -050071static struct dentry *tunables_dir;
72static struct dentry *tunables_file;
73
Cliff Wickmanf073cc82011-05-24 13:07:36 -050074/* these correspond to the statistics printed by ptc_seq_show() */
75static char *stat_description[] = {
76 "sent: number of shootdown messages sent",
77 "stime: time spent sending messages",
78 "numuvhubs: number of hubs targeted with shootdown",
79 "numuvhubs16: number times 16 or more hubs targeted",
80 "numuvhubs8: number times 8 or more hubs targeted",
81 "numuvhubs4: number times 4 or more hubs targeted",
82 "numuvhubs2: number times 2 or more hubs targeted",
83 "numuvhubs1: number times 1 hub targeted",
84 "numcpus: number of cpus targeted with shootdown",
85 "dto: number of destination timeouts",
86 "retries: destination timeout retries sent",
87 "rok: : destination timeouts successfully retried",
88 "resetp: ipi-style resource resets for plugs",
89 "resett: ipi-style resource resets for timeouts",
90 "giveup: fall-backs to ipi-style shootdowns",
91 "sto: number of source timeouts",
92 "bz: number of stay-busy's",
93 "throt: number times spun in throttle",
94 "swack: image of UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE",
95 "recv: shootdown messages received",
96 "rtime: time spent processing messages",
97 "all: shootdown all-tlb messages",
98 "one: shootdown one-tlb messages",
99 "mult: interrupts that found multiple messages",
100 "none: interrupts that found no messages",
101 "retry: number of retry messages processed",
102 "canc: number messages canceled by retries",
103 "nocan: number retries that found nothing to cancel",
104 "reset: number of ipi-style reset requests processed",
105 "rcan: number messages canceled by reset requests",
106 "disable: number times use of the BAU was disabled",
107 "enable: number times use of the BAU was re-enabled"
108};
109
Alex Thorlton1c532e02016-03-31 14:18:29 -0500110static int __init setup_bau(char *arg)
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500111{
Alex Thorlton1c532e02016-03-31 14:18:29 -0500112 int result;
113
114 if (!arg)
115 return -EINVAL;
116
117 result = strtobool(arg, &nobau);
118 if (result)
119 return result;
120
121 /* we need to flip the logic here, so that bau=y sets nobau to false */
122 nobau = !nobau;
123
124 if (!nobau)
125 pr_info("UV BAU Enabled\n");
126 else
127 pr_info("UV BAU Disabled\n");
128
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500129 return 0;
130}
Alex Thorlton1c532e02016-03-31 14:18:29 -0500131early_param("bau", setup_bau);
Ingo Molnarb4c286e2008-06-18 14:28:19 +0200132
Cliff Wickman94ca8e42009-04-14 10:56:48 -0500133/* base pnode in this partition */
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500134static int uv_base_pnode __read_mostly;
Cliff Wickman18129242008-06-02 08:56:14 -0500135
Ingo Molnardc163a42008-06-18 14:15:43 +0200136static DEFINE_PER_CPU(struct ptc_stats, ptcstats);
137static DEFINE_PER_CPU(struct bau_control, bau_control);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500138static DEFINE_PER_CPU(cpumask_var_t, uv_flush_tlb_mask);
139
Cliff Wickman26ef8572012-06-22 08:13:30 -0500140static void
141set_bau_on(void)
142{
143 int cpu;
144 struct bau_control *bcp;
145
146 if (nobau_perm) {
147 pr_info("BAU not initialized; cannot be turned on\n");
148 return;
149 }
Alex Thorlton1c532e02016-03-31 14:18:29 -0500150 nobau = false;
Cliff Wickman26ef8572012-06-22 08:13:30 -0500151 for_each_present_cpu(cpu) {
152 bcp = &per_cpu(bau_control, cpu);
Alex Thorlton1c532e02016-03-31 14:18:29 -0500153 bcp->nobau = false;
Cliff Wickman26ef8572012-06-22 08:13:30 -0500154 }
155 pr_info("BAU turned on\n");
156 return;
157}
158
159static void
160set_bau_off(void)
161{
162 int cpu;
163 struct bau_control *bcp;
164
Alex Thorlton1c532e02016-03-31 14:18:29 -0500165 nobau = true;
Cliff Wickman26ef8572012-06-22 08:13:30 -0500166 for_each_present_cpu(cpu) {
167 bcp = &per_cpu(bau_control, cpu);
Alex Thorlton1c532e02016-03-31 14:18:29 -0500168 bcp->nobau = true;
Cliff Wickman26ef8572012-06-22 08:13:30 -0500169 }
170 pr_info("BAU turned off\n");
171 return;
172}
173
Cliff Wickman18129242008-06-02 08:56:14 -0500174/*
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500175 * Determine the first node on a uvhub. 'Nodes' are used for kernel
176 * memory allocation.
Cliff Wickman9674f352009-04-03 08:34:05 -0500177 */
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500178static int __init uvhub_to_first_node(int uvhub)
Cliff Wickman9674f352009-04-03 08:34:05 -0500179{
180 int node, b;
181
182 for_each_online_node(node) {
183 b = uv_node_to_blade_id(node);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500184 if (uvhub == b)
Cliff Wickman9674f352009-04-03 08:34:05 -0500185 return node;
186 }
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500187 return -1;
Cliff Wickman9674f352009-04-03 08:34:05 -0500188}
189
190/*
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500191 * Determine the apicid of the first cpu on a uvhub.
Cliff Wickman9674f352009-04-03 08:34:05 -0500192 */
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500193static int __init uvhub_to_first_apicid(int uvhub)
Cliff Wickman9674f352009-04-03 08:34:05 -0500194{
195 int cpu;
196
197 for_each_present_cpu(cpu)
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500198 if (uvhub == uv_cpu_to_blade_id(cpu))
Cliff Wickman9674f352009-04-03 08:34:05 -0500199 return per_cpu(x86_cpu_to_apicid, cpu);
200 return -1;
201}
202
203/*
Cliff Wickman18129242008-06-02 08:56:14 -0500204 * Free a software acknowledge hardware resource by clearing its Pending
205 * bit. This will return a reply to the sender.
206 * If the message has timed out, a reply has already been sent by the
207 * hardware but the resource has not been released. In that case our
208 * clear of the Timeout bit (as well) will free the resource. No reply will
209 * be sent (the hardware will only do one reply per message).
210 */
Cliff Wickmanc5d35d32012-01-16 15:19:47 -0600211static void reply_to_message(struct msg_desc *mdp, struct bau_control *bcp,
212 int do_acknowledge)
Cliff Wickman18129242008-06-02 08:56:14 -0500213{
Cliff Wickmanb194b1202008-06-12 08:23:48 -0500214 unsigned long dw;
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500215 struct bau_pq_entry *msg;
Cliff Wickman18129242008-06-02 08:56:14 -0500216
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500217 msg = mdp->msg;
Cliff Wickmanc5d35d32012-01-16 15:19:47 -0600218 if (!msg->canceled && do_acknowledge) {
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500219 dw = (msg->swack_vec << UV_SW_ACK_NPENDING) | msg->swack_vec;
Andrew Banman21e3f122016-09-21 11:09:17 -0500220 ops.write_l_sw_ack(dw);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500221 }
Cliff Wickman18129242008-06-02 08:56:14 -0500222 msg->replied_to = 1;
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500223 msg->swack_vec = 0;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500224}
225
226/*
227 * Process the receipt of a RETRY message
228 */
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500229static void bau_process_retry_msg(struct msg_desc *mdp,
230 struct bau_control *bcp)
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500231{
232 int i;
233 int cancel_count = 0;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500234 unsigned long msg_res;
235 unsigned long mmr = 0;
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500236 struct bau_pq_entry *msg = mdp->msg;
237 struct bau_pq_entry *msg2;
238 struct ptc_stats *stat = bcp->statp;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500239
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500240 stat->d_retries++;
241 /*
242 * cancel any message from msg+1 to the retry itself
243 */
244 for (msg2 = msg+1, i = 0; i < DEST_Q_SIZE; msg2++, i++) {
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500245 if (msg2 > mdp->queue_last)
246 msg2 = mdp->queue_first;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500247 if (msg2 == msg)
248 break;
249
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500250 /* same conditions for cancellation as do_reset */
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500251 if ((msg2->replied_to == 0) && (msg2->canceled == 0) &&
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500252 (msg2->swack_vec) && ((msg2->swack_vec &
253 msg->swack_vec) == 0) &&
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500254 (msg2->sending_cpu == msg->sending_cpu) &&
255 (msg2->msg_type != MSG_NOOP)) {
Andrew Banman21e3f122016-09-21 11:09:17 -0500256 mmr = ops.read_l_sw_ack();
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500257 msg_res = msg2->swack_vec;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500258 /*
259 * This is a message retry; clear the resources held
260 * by the previous message only if they timed out.
261 * If it has not timed out we have an unexpected
262 * situation to report.
263 */
Cliff Wickman39847e72010-06-02 16:22:02 -0500264 if (mmr & (msg_res << UV_SW_ACK_NPENDING)) {
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500265 unsigned long mr;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500266 /*
Cliff Wickmanc5d35d32012-01-16 15:19:47 -0600267 * Is the resource timed out?
268 * Make everyone ignore the cancelled message.
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500269 */
270 msg2->canceled = 1;
271 stat->d_canceled++;
272 cancel_count++;
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500273 mr = (msg_res << UV_SW_ACK_NPENDING) | msg_res;
Andrew Banman21e3f122016-09-21 11:09:17 -0500274 ops.write_l_sw_ack(mr);
Cliff Wickman39847e72010-06-02 16:22:02 -0500275 }
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500276 }
277 }
278 if (!cancel_count)
279 stat->d_nocanceled++;
Cliff Wickman18129242008-06-02 08:56:14 -0500280}
281
282/*
283 * Do all the things a cpu should do for a TLB shootdown message.
284 * Other cpu's may come here at the same time for this message.
285 */
Cliff Wickmanc5d35d32012-01-16 15:19:47 -0600286static void bau_process_message(struct msg_desc *mdp, struct bau_control *bcp,
287 int do_acknowledge)
Cliff Wickman18129242008-06-02 08:56:14 -0500288{
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500289 short socket_ack_count = 0;
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500290 short *sp;
291 struct atomic_short *asp;
292 struct ptc_stats *stat = bcp->statp;
293 struct bau_pq_entry *msg = mdp->msg;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500294 struct bau_control *smaster = bcp->socket_master;
Cliff Wickman18129242008-06-02 08:56:14 -0500295
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500296 /*
297 * This must be a normal message, or retry of a normal message
298 */
Cliff Wickman18129242008-06-02 08:56:14 -0500299 if (msg->address == TLB_FLUSH_ALL) {
300 local_flush_tlb();
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500301 stat->d_alltlb++;
Cliff Wickman18129242008-06-02 08:56:14 -0500302 } else {
303 __flush_tlb_one(msg->address);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500304 stat->d_onetlb++;
Cliff Wickman18129242008-06-02 08:56:14 -0500305 }
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500306 stat->d_requestee++;
Cliff Wickman18129242008-06-02 08:56:14 -0500307
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500308 /*
309 * One cpu on each uvhub has the additional job on a RETRY
310 * of releasing the resource held by the message that is
311 * being retried. That message is identified by sending
312 * cpu number.
313 */
314 if (msg->msg_type == MSG_RETRY && bcp == bcp->uvhub_master)
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500315 bau_process_retry_msg(mdp, bcp);
Cliff Wickman18129242008-06-02 08:56:14 -0500316
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500317 /*
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500318 * This is a swack message, so we have to reply to it.
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500319 * Count each responding cpu on the socket. This avoids
320 * pinging the count's cache line back and forth between
321 * the sockets.
322 */
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500323 sp = &smaster->socket_acknowledge_count[mdp->msg_slot];
324 asp = (struct atomic_short *)sp;
325 socket_ack_count = atom_asr(1, asp);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500326 if (socket_ack_count == bcp->cpus_in_socket) {
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500327 int msg_ack_count;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500328 /*
329 * Both sockets dump their completed count total into
330 * the message's count.
331 */
Cliff Wickman8b6e5112012-06-22 08:14:59 -0500332 *sp = 0;
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500333 asp = (struct atomic_short *)&msg->acknowledge_count;
334 msg_ack_count = atom_asr(socket_ack_count, asp);
Ingo Molnardc163a42008-06-18 14:15:43 +0200335
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500336 if (msg_ack_count == bcp->cpus_in_uvhub) {
337 /*
338 * All cpus in uvhub saw it; reply
Cliff Wickmanc5d35d32012-01-16 15:19:47 -0600339 * (unless we are in the UV2 workaround)
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500340 */
Cliff Wickmanc5d35d32012-01-16 15:19:47 -0600341 reply_to_message(mdp, bcp, do_acknowledge);
Ingo Molnardc163a42008-06-18 14:15:43 +0200342 }
343 }
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500344
345 return;
Cliff Wickman18129242008-06-02 08:56:14 -0500346}
347
348/*
cpw@sgi.com485f07d2011-06-21 07:21:29 -0500349 * Determine the first cpu on a pnode.
Cliff Wickman18129242008-06-02 08:56:14 -0500350 */
cpw@sgi.com485f07d2011-06-21 07:21:29 -0500351static int pnode_to_first_cpu(int pnode, struct bau_control *smaster)
Cliff Wickman18129242008-06-02 08:56:14 -0500352{
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500353 int cpu;
cpw@sgi.com485f07d2011-06-21 07:21:29 -0500354 struct hub_and_pnode *hpp;
355
356 for_each_present_cpu(cpu) {
357 hpp = &smaster->thp[cpu];
358 if (pnode == hpp->pnode)
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500359 return cpu;
cpw@sgi.com485f07d2011-06-21 07:21:29 -0500360 }
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500361 return -1;
Cliff Wickman18129242008-06-02 08:56:14 -0500362}
363
Cliff Wickmanb194b1202008-06-12 08:23:48 -0500364/*
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500365 * Last resort when we get a large number of destination timeouts is
366 * to clear resources held by a given cpu.
367 * Do this with IPI so that all messages in the BAU message queue
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500368 * can be identified by their nonzero swack_vec field.
Cliff Wickmanb194b1202008-06-12 08:23:48 -0500369 *
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500370 * This is entered for a single cpu on the uvhub.
371 * The sender want's this uvhub to free a specific message's
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500372 * swack resources.
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500373 */
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500374static void do_reset(void *ptr)
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500375{
376 int i;
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500377 struct bau_control *bcp = &per_cpu(bau_control, smp_processor_id());
378 struct reset_args *rap = (struct reset_args *)ptr;
379 struct bau_pq_entry *msg;
380 struct ptc_stats *stat = bcp->statp;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500381
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500382 stat->d_resets++;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500383 /*
384 * We're looking for the given sender, and
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500385 * will free its swack resource.
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500386 * If all cpu's finally responded after the timeout, its
387 * message 'replied_to' was set.
388 */
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500389 for (msg = bcp->queue_first, i = 0; i < DEST_Q_SIZE; msg++, i++) {
390 unsigned long msg_res;
391 /* do_reset: same conditions for cancellation as
392 bau_process_retry_msg() */
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500393 if ((msg->replied_to == 0) &&
394 (msg->canceled == 0) &&
395 (msg->sending_cpu == rap->sender) &&
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500396 (msg->swack_vec) &&
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500397 (msg->msg_type != MSG_NOOP)) {
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500398 unsigned long mmr;
399 unsigned long mr;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500400 /*
401 * make everyone else ignore this message
402 */
403 msg->canceled = 1;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500404 /*
405 * only reset the resource if it is still pending
406 */
Andrew Banman21e3f122016-09-21 11:09:17 -0500407 mmr = ops.read_l_sw_ack();
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500408 msg_res = msg->swack_vec;
409 mr = (msg_res << UV_SW_ACK_NPENDING) | msg_res;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500410 if (mmr & msg_res) {
411 stat->d_rcanceled++;
Andrew Banman21e3f122016-09-21 11:09:17 -0500412 ops.write_l_sw_ack(mr);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500413 }
414 }
415 }
416 return;
417}
418
419/*
420 * Use IPI to get all target uvhubs to release resources held by
421 * a given sending cpu number.
422 */
cpw@sgi.coma456eaa2011-06-21 07:21:30 -0500423static void reset_with_ipi(struct pnmask *distribution, struct bau_control *bcp)
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500424{
cpw@sgi.com485f07d2011-06-21 07:21:29 -0500425 int pnode;
426 int apnode;
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500427 int maskbits;
cpw@sgi.com485f07d2011-06-21 07:21:29 -0500428 int sender = bcp->cpu;
cpw@sgi.com442d3922011-06-21 07:21:31 -0500429 cpumask_t *mask = bcp->uvhub_master->cpumask;
cpw@sgi.com485f07d2011-06-21 07:21:29 -0500430 struct bau_control *smaster = bcp->socket_master;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500431 struct reset_args reset_args;
432
433 reset_args.sender = sender;
Rusty Russell020b37a2015-03-02 22:05:49 +1030434 cpumask_clear(mask);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500435 /* find a single cpu for each uvhub in this distribution mask */
cpw@sgi.coma456eaa2011-06-21 07:21:30 -0500436 maskbits = sizeof(struct pnmask) * BITSPERBYTE;
cpw@sgi.com485f07d2011-06-21 07:21:29 -0500437 /* each bit is a pnode relative to the partition base pnode */
438 for (pnode = 0; pnode < maskbits; pnode++) {
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500439 int cpu;
cpw@sgi.com485f07d2011-06-21 07:21:29 -0500440 if (!bau_uvhub_isset(pnode, distribution))
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500441 continue;
cpw@sgi.com485f07d2011-06-21 07:21:29 -0500442 apnode = pnode + bcp->partition_base_pnode;
443 cpu = pnode_to_first_cpu(apnode, smaster);
Rusty Russell020b37a2015-03-02 22:05:49 +1030444 cpumask_set_cpu(cpu, mask);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500445 }
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500446
447 /* IPI all cpus; preemption is already disabled */
cpw@sgi.com442d3922011-06-21 07:21:31 -0500448 smp_call_function_many(mask, do_reset, (void *)&reset_args, 1);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500449 return;
450}
451
Peter Zijlstra20d1c862013-11-29 15:40:29 +0100452/*
453 * Not to be confused with cycles_2_ns() from tsc.c; this gives a relative
454 * number, not an absolute. It converts a duration in cycles to a duration in
455 * ns.
456 */
457static inline unsigned long long cycles_2_ns(unsigned long long cyc)
458{
Peter Zijlstra59eaef72017-05-02 13:22:07 +0200459 struct cyc2ns_data data;
Peter Zijlstra20d1c862013-11-29 15:40:29 +0100460 unsigned long long ns;
461
Peter Zijlstra59eaef72017-05-02 13:22:07 +0200462 cyc2ns_read_begin(&data);
463 ns = mul_u64_u32_shr(cyc, data.cyc2ns_mul, data.cyc2ns_shift);
464 cyc2ns_read_end();
Peter Zijlstra20d1c862013-11-29 15:40:29 +0100465
Peter Zijlstra20d1c862013-11-29 15:40:29 +0100466 return ns;
467}
468
469/*
470 * The reverse of the above; converts a duration in ns to a duration in cycles.
Cliff Wickmana26fd712014-05-14 16:15:47 -0500471 */
Peter Zijlstra20d1c862013-11-29 15:40:29 +0100472static inline unsigned long long ns_2_cycles(unsigned long long ns)
473{
Peter Zijlstra59eaef72017-05-02 13:22:07 +0200474 struct cyc2ns_data data;
Peter Zijlstra20d1c862013-11-29 15:40:29 +0100475 unsigned long long cyc;
476
Peter Zijlstra59eaef72017-05-02 13:22:07 +0200477 cyc2ns_read_begin(&data);
478 cyc = (ns << data.cyc2ns_shift) / data.cyc2ns_mul;
479 cyc2ns_read_end();
Peter Zijlstra20d1c862013-11-29 15:40:29 +0100480
Peter Zijlstra20d1c862013-11-29 15:40:29 +0100481 return cyc;
482}
483
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500484static inline unsigned long cycles_2_us(unsigned long long cyc)
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500485{
Peter Zijlstra20d1c862013-11-29 15:40:29 +0100486 return cycles_2_ns(cyc) / NSEC_PER_USEC;
487}
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500488
Peter Zijlstra20d1c862013-11-29 15:40:29 +0100489static inline cycles_t sec_2_cycles(unsigned long sec)
490{
491 return ns_2_cycles(sec * NSEC_PER_SEC);
492}
493
494static inline unsigned long long usec_2_cycles(unsigned long usec)
495{
496 return ns_2_cycles(usec * NSEC_PER_USEC);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500497}
498
499/*
500 * wait for all cpus on this hub to finish their sends and go quiet
501 * leaves uvhub_quiesce set so that no new broadcasts are started by
502 * bau_flush_send_and_wait()
503 */
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500504static inline void quiesce_local_uvhub(struct bau_control *hmaster)
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500505{
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500506 atom_asr(1, (struct atomic_short *)&hmaster->uvhub_quiesce);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500507}
508
509/*
510 * mark this quiet-requestor as done
511 */
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500512static inline void end_uvhub_quiesce(struct bau_control *hmaster)
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500513{
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500514 atom_asr(-1, (struct atomic_short *)&hmaster->uvhub_quiesce);
515}
516
517static unsigned long uv1_read_status(unsigned long mmr_offset, int right_shift)
518{
519 unsigned long descriptor_status;
520
521 descriptor_status = uv_read_local_mmr(mmr_offset);
522 descriptor_status >>= right_shift;
523 descriptor_status &= UV_ACT_STATUS_MASK;
524 return descriptor_status;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500525}
526
527/*
528 * Wait for completion of a broadcast software ack message
529 * return COMPLETE, RETRY(PLUGGED or TIMEOUT) or GIVEUP
Cliff Wickmanb194b1202008-06-12 08:23:48 -0500530 */
Jack Steiner2a919592011-05-11 12:50:28 -0500531static int uv1_wait_completion(struct bau_desc *bau_desc,
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500532 struct bau_control *bcp, long try)
Cliff Wickmanb194b1202008-06-12 08:23:48 -0500533{
Cliff Wickmanb194b1202008-06-12 08:23:48 -0500534 unsigned long descriptor_status;
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500535 cycles_t ttm;
Andrew Banmandfeb28f2017-03-09 10:42:12 -0600536 u64 mmr_offset = bcp->status_mmr;
537 int right_shift = bcp->status_index;
Cliff Wickman712157a2010-06-02 16:22:02 -0500538 struct ptc_stats *stat = bcp->statp;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500539
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500540 descriptor_status = uv1_read_status(mmr_offset, right_shift);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500541 /* spin on the status MMR, waiting for it to go idle */
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500542 while ((descriptor_status != DS_IDLE)) {
Cliff Wickmanb194b1202008-06-12 08:23:48 -0500543 /*
Jack Steiner2a919592011-05-11 12:50:28 -0500544 * Our software ack messages may be blocked because
545 * there are no swack resources available. As long
546 * as none of them has timed out hardware will NACK
547 * our message and its state will stay IDLE.
Cliff Wickmanb194b1202008-06-12 08:23:48 -0500548 */
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500549 if (descriptor_status == DS_SOURCE_TIMEOUT) {
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500550 stat->s_stimeout++;
551 return FLUSH_GIVEUP;
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500552 } else if (descriptor_status == DS_DESTINATION_TIMEOUT) {
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500553 stat->s_dtimeout++;
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500554 ttm = get_cycles();
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500555
556 /*
557 * Our retries may be blocked by all destination
558 * swack resources being consumed, and a timeout
559 * pending. In that case hardware returns the
560 * ERROR that looks like a destination timeout.
561 */
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500562 if (cycles_2_us(ttm - bcp->send_message) < timeout_us) {
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500563 bcp->conseccompletes = 0;
564 return FLUSH_RETRY_PLUGGED;
565 }
566
567 bcp->conseccompletes = 0;
568 return FLUSH_RETRY_TIMEOUT;
569 } else {
570 /*
571 * descriptor_status is still BUSY
572 */
573 cpu_relax();
Cliff Wickmanb194b1202008-06-12 08:23:48 -0500574 }
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500575 descriptor_status = uv1_read_status(mmr_offset, right_shift);
Cliff Wickmanb194b1202008-06-12 08:23:48 -0500576 }
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500577 bcp->conseccompletes++;
Cliff Wickmanb194b1202008-06-12 08:23:48 -0500578 return FLUSH_COMPLETE;
579}
580
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500581/*
Cliff Wickman8b6e5112012-06-22 08:14:59 -0500582 * UV2 could have an extra bit of status in the ACTIVATION_STATUS_2 register.
583 * But not currently used.
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500584 */
Cliff Wickmana26fd712014-05-14 16:15:47 -0500585static unsigned long uv2_3_read_status(unsigned long offset, int rshft, int desc)
Jack Steiner2a919592011-05-11 12:50:28 -0500586{
Masahiro Yamadaf148b412016-09-11 14:58:21 +0900587 return ((read_lmmr(offset) >> rshft) & UV_ACT_STATUS_MASK) << 1;
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500588}
589
Cliff Wickmanc5d35d32012-01-16 15:19:47 -0600590/*
591 * Return whether the status of the descriptor that is normally used for this
592 * cpu (the one indexed by its hub-relative cpu number) is busy.
593 * The status of the original 32 descriptors is always reflected in the 64
594 * bits of UVH_LB_BAU_SB_ACTIVATION_STATUS_0.
595 * The bit provided by the activation_status_2 register is irrelevant to
596 * the status if it is only being tested for busy or not busy.
597 */
598int normal_busy(struct bau_control *bcp)
599{
600 int cpu = bcp->uvhub_cpu;
601 int mmr_offset;
602 int right_shift;
603
604 mmr_offset = UVH_LB_BAU_SB_ACTIVATION_STATUS_0;
605 right_shift = cpu * UV_ACT_STATUS_SIZE;
606 return (((((read_lmmr(mmr_offset) >> right_shift) &
607 UV_ACT_STATUS_MASK)) << 1) == UV2H_DESC_BUSY);
608}
609
610/*
611 * Entered when a bau descriptor has gone into a permanent busy wait because
612 * of a hardware bug.
613 * Workaround the bug.
614 */
615int handle_uv2_busy(struct bau_control *bcp)
616{
Cliff Wickmanc5d35d32012-01-16 15:19:47 -0600617 struct ptc_stats *stat = bcp->statp;
Cliff Wickmanc5d35d32012-01-16 15:19:47 -0600618
619 stat->s_uv2_wars++;
Cliff Wickman8b6e5112012-06-22 08:14:59 -0500620 bcp->busy = 1;
621 return FLUSH_GIVEUP;
Cliff Wickmanc5d35d32012-01-16 15:19:47 -0600622}
623
Cliff Wickmana26fd712014-05-14 16:15:47 -0500624static int uv2_3_wait_completion(struct bau_desc *bau_desc,
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500625 struct bau_control *bcp, long try)
626{
627 unsigned long descriptor_stat;
628 cycles_t ttm;
Andrew Banmandfeb28f2017-03-09 10:42:12 -0600629 u64 mmr_offset = bcp->status_mmr;
630 int right_shift = bcp->status_index;
Cliff Wickman8b6e5112012-06-22 08:14:59 -0500631 int desc = bcp->uvhub_cpu;
Cliff Wickmanc5d35d32012-01-16 15:19:47 -0600632 long busy_reps = 0;
Jack Steiner2a919592011-05-11 12:50:28 -0500633 struct ptc_stats *stat = bcp->statp;
634
Cliff Wickmana26fd712014-05-14 16:15:47 -0500635 descriptor_stat = uv2_3_read_status(mmr_offset, right_shift, desc);
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500636
Jack Steiner2a919592011-05-11 12:50:28 -0500637 /* spin on the status MMR, waiting for it to go idle */
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500638 while (descriptor_stat != UV2H_DESC_IDLE) {
Cliff Wickman8b6e5112012-06-22 08:14:59 -0500639 if ((descriptor_stat == UV2H_DESC_SOURCE_TIMEOUT)) {
640 /*
641 * A h/w bug on the destination side may
642 * have prevented the message being marked
643 * pending, thus it doesn't get replied to
644 * and gets continually nacked until it times
645 * out with a SOURCE_TIMEOUT.
646 */
Jack Steiner2a919592011-05-11 12:50:28 -0500647 stat->s_stimeout++;
648 return FLUSH_GIVEUP;
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500649 } else if (descriptor_stat == UV2H_DESC_DEST_TIMEOUT) {
Cliff Wickman8b6e5112012-06-22 08:14:59 -0500650 ttm = get_cycles();
651
652 /*
653 * Our retries may be blocked by all destination
654 * swack resources being consumed, and a timeout
655 * pending. In that case hardware returns the
656 * ERROR that looks like a destination timeout.
657 * Without using the extended status we have to
658 * deduce from the short time that this was a
659 * strong nack.
660 */
661 if (cycles_2_us(ttm - bcp->send_message) < timeout_us) {
662 bcp->conseccompletes = 0;
663 stat->s_plugged++;
664 /* FLUSH_RETRY_PLUGGED causes hang on boot */
665 return FLUSH_GIVEUP;
666 }
Jack Steiner2a919592011-05-11 12:50:28 -0500667 stat->s_dtimeout++;
Jack Steiner2a919592011-05-11 12:50:28 -0500668 bcp->conseccompletes = 0;
Cliff Wickman8b6e5112012-06-22 08:14:59 -0500669 /* FLUSH_RETRY_TIMEOUT causes hang on boot */
670 return FLUSH_GIVEUP;
Jack Steiner2a919592011-05-11 12:50:28 -0500671 } else {
Cliff Wickmanc5d35d32012-01-16 15:19:47 -0600672 busy_reps++;
673 if (busy_reps > 1000000) {
674 /* not to hammer on the clock */
675 busy_reps = 0;
676 ttm = get_cycles();
Cliff Wickmana26fd712014-05-14 16:15:47 -0500677 if ((ttm - bcp->send_message) > bcp->timeout_interval)
Cliff Wickmanc5d35d32012-01-16 15:19:47 -0600678 return handle_uv2_busy(bcp);
Cliff Wickmanc5d35d32012-01-16 15:19:47 -0600679 }
Jack Steiner2a919592011-05-11 12:50:28 -0500680 /*
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500681 * descriptor_stat is still BUSY
Jack Steiner2a919592011-05-11 12:50:28 -0500682 */
683 cpu_relax();
684 }
Cliff Wickmana26fd712014-05-14 16:15:47 -0500685 descriptor_stat = uv2_3_read_status(mmr_offset, right_shift, desc);
Jack Steiner2a919592011-05-11 12:50:28 -0500686 }
687 bcp->conseccompletes++;
688 return FLUSH_COMPLETE;
689}
690
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500691/*
Andrew Banman2f2a0332017-03-09 10:42:14 -0600692 * Returns the status of current BAU message for cpu desc as a bit field
693 * [Error][Busy][Aux]
694 */
695static u64 read_status(u64 status_mmr, int index, int desc)
696{
697 u64 stat;
698
699 stat = ((read_lmmr(status_mmr) >> index) & UV_ACT_STATUS_MASK) << 1;
700 stat |= (read_lmmr(UVH_LB_BAU_SB_ACTIVATION_STATUS_2) >> desc) & 0x1;
701
702 return stat;
703}
704
705static int uv4_wait_completion(struct bau_desc *bau_desc,
706 struct bau_control *bcp, long try)
707{
708 struct ptc_stats *stat = bcp->statp;
709 u64 descriptor_stat;
710 u64 mmr = bcp->status_mmr;
711 int index = bcp->status_index;
712 int desc = bcp->uvhub_cpu;
713
714 descriptor_stat = read_status(mmr, index, desc);
715
716 /* spin on the status MMR, waiting for it to go idle */
717 while (descriptor_stat != UV2H_DESC_IDLE) {
718 switch (descriptor_stat) {
719 case UV2H_DESC_SOURCE_TIMEOUT:
720 stat->s_stimeout++;
721 return FLUSH_GIVEUP;
722
723 case UV2H_DESC_DEST_TIMEOUT:
724 stat->s_dtimeout++;
725 bcp->conseccompletes = 0;
726 return FLUSH_RETRY_TIMEOUT;
727
728 case UV2H_DESC_DEST_STRONG_NACK:
729 stat->s_plugged++;
730 bcp->conseccompletes = 0;
731 return FLUSH_RETRY_PLUGGED;
732
733 case UV2H_DESC_DEST_PUT_ERR:
734 bcp->conseccompletes = 0;
735 return FLUSH_GIVEUP;
736
737 default:
738 /* descriptor_stat is still BUSY */
739 cpu_relax();
740 }
741 descriptor_stat = read_status(mmr, index, desc);
742 }
743 bcp->conseccompletes++;
744 return FLUSH_COMPLETE;
745}
746
747/*
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500748 * Our retries are blocked by all destination sw ack resources being
Cliff Wickmanf6d8a562010-06-02 16:22:02 -0500749 * in use, and a timeout is pending. In that case hardware immediately
750 * returns the ERROR that looks like a destination timeout.
751 */
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500752static void destination_plugged(struct bau_desc *bau_desc,
753 struct bau_control *bcp,
Cliff Wickmanf6d8a562010-06-02 16:22:02 -0500754 struct bau_control *hmaster, struct ptc_stats *stat)
755{
756 udelay(bcp->plugged_delay);
757 bcp->plugged_tries++;
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500758
Cliff Wickmanf6d8a562010-06-02 16:22:02 -0500759 if (bcp->plugged_tries >= bcp->plugsb4reset) {
760 bcp->plugged_tries = 0;
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500761
Cliff Wickmanf6d8a562010-06-02 16:22:02 -0500762 quiesce_local_uvhub(hmaster);
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500763
Cliff Wickmanf6d8a562010-06-02 16:22:02 -0500764 spin_lock(&hmaster->queue_lock);
cpw@sgi.com485f07d2011-06-21 07:21:29 -0500765 reset_with_ipi(&bau_desc->distribution, bcp);
Cliff Wickmanf6d8a562010-06-02 16:22:02 -0500766 spin_unlock(&hmaster->queue_lock);
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500767
Cliff Wickmanf6d8a562010-06-02 16:22:02 -0500768 end_uvhub_quiesce(hmaster);
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500769
Cliff Wickmanf6d8a562010-06-02 16:22:02 -0500770 bcp->ipi_attempts++;
771 stat->s_resets_plug++;
772 }
773}
774
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500775static void destination_timeout(struct bau_desc *bau_desc,
776 struct bau_control *bcp, struct bau_control *hmaster,
777 struct ptc_stats *stat)
Cliff Wickmanf6d8a562010-06-02 16:22:02 -0500778{
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500779 hmaster->max_concurr = 1;
Cliff Wickmanf6d8a562010-06-02 16:22:02 -0500780 bcp->timeout_tries++;
781 if (bcp->timeout_tries >= bcp->timeoutsb4reset) {
782 bcp->timeout_tries = 0;
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500783
Cliff Wickmanf6d8a562010-06-02 16:22:02 -0500784 quiesce_local_uvhub(hmaster);
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500785
Cliff Wickmanf6d8a562010-06-02 16:22:02 -0500786 spin_lock(&hmaster->queue_lock);
cpw@sgi.com485f07d2011-06-21 07:21:29 -0500787 reset_with_ipi(&bau_desc->distribution, bcp);
Cliff Wickmanf6d8a562010-06-02 16:22:02 -0500788 spin_unlock(&hmaster->queue_lock);
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500789
Cliff Wickmanf6d8a562010-06-02 16:22:02 -0500790 end_uvhub_quiesce(hmaster);
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500791
Cliff Wickmanf6d8a562010-06-02 16:22:02 -0500792 bcp->ipi_attempts++;
793 stat->s_resets_timeout++;
794 }
795}
796
797/*
Cliff Wickman8b6e5112012-06-22 08:14:59 -0500798 * Stop all cpus on a uvhub from using the BAU for a period of time.
799 * This is reversed by check_enable.
Cliff Wickman50fb55a2010-06-02 16:22:02 -0500800 */
Cliff Wickman8b6e5112012-06-22 08:14:59 -0500801static void disable_for_period(struct bau_control *bcp, struct ptc_stats *stat)
Cliff Wickman50fb55a2010-06-02 16:22:02 -0500802{
Cliff Wickman8b6e5112012-06-22 08:14:59 -0500803 int tcpu;
804 struct bau_control *tbcp;
805 struct bau_control *hmaster;
806 cycles_t tm1;
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500807
Cliff Wickman8b6e5112012-06-22 08:14:59 -0500808 hmaster = bcp->uvhub_master;
809 spin_lock(&hmaster->disable_lock);
810 if (!bcp->baudisabled) {
Cliff Wickman50fb55a2010-06-02 16:22:02 -0500811 stat->s_bau_disabled++;
Cliff Wickman8b6e5112012-06-22 08:14:59 -0500812 tm1 = get_cycles();
Cliff Wickman50fb55a2010-06-02 16:22:02 -0500813 for_each_present_cpu(tcpu) {
814 tbcp = &per_cpu(bau_control, tcpu);
Cliff Wickman8b6e5112012-06-22 08:14:59 -0500815 if (tbcp->uvhub_master == hmaster) {
816 tbcp->baudisabled = 1;
817 tbcp->set_bau_on_time =
818 tm1 + bcp->disabled_period;
819 }
Cliff Wickman50fb55a2010-06-02 16:22:02 -0500820 }
821 }
Cliff Wickman8b6e5112012-06-22 08:14:59 -0500822 spin_unlock(&hmaster->disable_lock);
Cliff Wickman50fb55a2010-06-02 16:22:02 -0500823}
824
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500825static void count_max_concurr(int stat, struct bau_control *bcp,
826 struct bau_control *hmaster)
827{
828 bcp->plugged_tries = 0;
829 bcp->timeout_tries = 0;
830 if (stat != FLUSH_COMPLETE)
831 return;
832 if (bcp->conseccompletes <= bcp->complete_threshold)
833 return;
834 if (hmaster->max_concurr >= hmaster->max_concurr_const)
835 return;
836 hmaster->max_concurr++;
837}
838
839static void record_send_stats(cycles_t time1, cycles_t time2,
840 struct bau_control *bcp, struct ptc_stats *stat,
841 int completion_status, int try)
842{
843 cycles_t elapsed;
844
845 if (time2 > time1) {
846 elapsed = time2 - time1;
847 stat->s_time += elapsed;
848
849 if ((completion_status == FLUSH_COMPLETE) && (try == 1)) {
850 bcp->period_requests++;
851 bcp->period_time += elapsed;
852 if ((elapsed > congested_cycles) &&
Cliff Wickman8b6e5112012-06-22 08:14:59 -0500853 (bcp->period_requests > bcp->cong_reps) &&
854 ((bcp->period_time / bcp->period_requests) >
855 congested_cycles)) {
856 stat->s_congested++;
857 disable_for_period(bcp, stat);
858 }
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500859 }
860 } else
861 stat->s_requestor--;
862
863 if (completion_status == FLUSH_COMPLETE && try > 1)
864 stat->s_retriesok++;
Cliff Wickman8b6e5112012-06-22 08:14:59 -0500865 else if (completion_status == FLUSH_GIVEUP) {
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500866 stat->s_giveup++;
Cliff Wickman8b6e5112012-06-22 08:14:59 -0500867 if (get_cycles() > bcp->period_end)
868 bcp->period_giveups = 0;
869 bcp->period_giveups++;
870 if (bcp->period_giveups == 1)
871 bcp->period_end = get_cycles() + bcp->disabled_period;
872 if (bcp->period_giveups > bcp->giveup_limit) {
873 disable_for_period(bcp, stat);
874 stat->s_giveuplimit++;
875 }
876 }
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500877}
878
879/*
880 * Because of a uv1 hardware bug only a limited number of concurrent
881 * requests can be made.
882 */
883static void uv1_throttle(struct bau_control *hmaster, struct ptc_stats *stat)
884{
885 spinlock_t *lock = &hmaster->uvhub_lock;
886 atomic_t *v;
887
888 v = &hmaster->active_descriptor_count;
889 if (!atomic_inc_unless_ge(lock, v, hmaster->max_concurr)) {
890 stat->s_throttles++;
891 do {
892 cpu_relax();
893 } while (!atomic_inc_unless_ge(lock, v, hmaster->max_concurr));
894 }
895}
896
897/*
898 * Handle the completion status of a message send.
899 */
900static void handle_cmplt(int completion_status, struct bau_desc *bau_desc,
901 struct bau_control *bcp, struct bau_control *hmaster,
902 struct ptc_stats *stat)
903{
904 if (completion_status == FLUSH_RETRY_PLUGGED)
905 destination_plugged(bau_desc, bcp, hmaster, stat);
906 else if (completion_status == FLUSH_RETRY_TIMEOUT)
907 destination_timeout(bau_desc, bcp, hmaster, stat);
908}
909
910/*
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500911 * Send a broadcast and wait for it to complete.
Cliff Wickmanb194b1202008-06-12 08:23:48 -0500912 *
Cliff Wickmanf6d8a562010-06-02 16:22:02 -0500913 * The flush_mask contains the cpus the broadcast is to be sent to including
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500914 * cpus that are on the local uvhub.
Cliff Wickmanb194b1202008-06-12 08:23:48 -0500915 *
Cliff Wickman450a0072010-06-02 16:22:02 -0500916 * Returns 0 if all flushing represented in the mask was done.
917 * Returns 1 if it gives up entirely and the original cpu mask is to be
918 * returned to the kernel.
Cliff Wickmanb194b1202008-06-12 08:23:48 -0500919 */
Cliff Wickman8b6e5112012-06-22 08:14:59 -0500920int uv_flush_send_and_wait(struct cpumask *flush_mask, struct bau_control *bcp,
921 struct bau_desc *bau_desc)
Cliff Wickmanb194b1202008-06-12 08:23:48 -0500922{
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500923 int seq_number = 0;
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500924 int completion_stat = 0;
Cliff Wickmanda87c932012-01-16 15:17:50 -0600925 int uv1 = 0;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500926 long try = 0;
Ingo Molnarb4c286e2008-06-18 14:28:19 +0200927 unsigned long index;
Cliff Wickmanb194b1202008-06-12 08:23:48 -0500928 cycles_t time1;
929 cycles_t time2;
Cliff Wickman712157a2010-06-02 16:22:02 -0500930 struct ptc_stats *stat = bcp->statp;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500931 struct bau_control *hmaster = bcp->uvhub_master;
Cliff Wickmanda87c932012-01-16 15:17:50 -0600932 struct uv1_bau_msg_header *uv1_hdr = NULL;
Cliff Wickmana26fd712014-05-14 16:15:47 -0500933 struct uv2_3_bau_msg_header *uv2_3_hdr = NULL;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500934
Andrew Banman491bd882017-03-09 10:42:09 -0600935 if (bcp->uvhub_version == UV_BAU_V1) {
Cliff Wickman8b6e5112012-06-22 08:14:59 -0500936 uv1 = 1;
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500937 uv1_throttle(hmaster, stat);
Cliff Wickman8b6e5112012-06-22 08:14:59 -0500938 }
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500939
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500940 while (hmaster->uvhub_quiesce)
941 cpu_relax();
Cliff Wickmanb194b1202008-06-12 08:23:48 -0500942
Cliff Wickmanb194b1202008-06-12 08:23:48 -0500943 time1 = get_cycles();
Cliff Wickman8b6e5112012-06-22 08:14:59 -0500944 if (uv1)
945 uv1_hdr = &bau_desc->header.uv1_hdr;
946 else
Cliff Wickmana26fd712014-05-14 16:15:47 -0500947 /* uv2 and uv3 */
948 uv2_3_hdr = &bau_desc->header.uv2_3_hdr;
Cliff Wickman8b6e5112012-06-22 08:14:59 -0500949
Cliff Wickmanb194b1202008-06-12 08:23:48 -0500950 do {
Cliff Wickman8b6e5112012-06-22 08:14:59 -0500951 if (try == 0) {
Cliff Wickmanda87c932012-01-16 15:17:50 -0600952 if (uv1)
953 uv1_hdr->msg_type = MSG_REGULAR;
954 else
Cliff Wickmana26fd712014-05-14 16:15:47 -0500955 uv2_3_hdr->msg_type = MSG_REGULAR;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500956 seq_number = bcp->message_number++;
957 } else {
Cliff Wickmanda87c932012-01-16 15:17:50 -0600958 if (uv1)
959 uv1_hdr->msg_type = MSG_RETRY;
960 else
Cliff Wickmana26fd712014-05-14 16:15:47 -0500961 uv2_3_hdr->msg_type = MSG_RETRY;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500962 stat->s_retry_messages++;
963 }
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500964
Cliff Wickmanda87c932012-01-16 15:17:50 -0600965 if (uv1)
966 uv1_hdr->sequence = seq_number;
967 else
Cliff Wickmana26fd712014-05-14 16:15:47 -0500968 uv2_3_hdr->sequence = seq_number;
Cliff Wickman8b6e5112012-06-22 08:14:59 -0500969 index = (1UL << AS_PUSH_SHIFT) | bcp->uvhub_cpu;
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500970 bcp->send_message = get_cycles();
971
972 write_mmr_activation(index);
973
974 try++;
Andrew Banman2620bbb2017-03-09 10:42:13 -0600975 completion_stat = ops.wait_completion(bau_desc, bcp, try);
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500976
977 handle_cmplt(completion_stat, bau_desc, bcp, hmaster, stat);
978
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -0500979 if (bcp->ipi_attempts >= bcp->ipi_reset_limit) {
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500980 bcp->ipi_attempts = 0;
Cliff Wickman8b6e5112012-06-22 08:14:59 -0500981 stat->s_overipilimit++;
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500982 completion_stat = FLUSH_GIVEUP;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500983 break;
984 }
985 cpu_relax();
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500986 } while ((completion_stat == FLUSH_RETRY_PLUGGED) ||
987 (completion_stat == FLUSH_RETRY_TIMEOUT));
988
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500989 time2 = get_cycles();
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500990
991 count_max_concurr(completion_stat, bcp, hmaster);
992
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500993 while (hmaster->uvhub_quiesce)
994 cpu_relax();
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500995
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500996 atomic_dec(&hmaster->active_descriptor_count);
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500997
998 record_send_stats(time1, time2, bcp, stat, completion_stat, try);
999
1000 if (completion_stat == FLUSH_GIVEUP)
Cliff Wickmanc5d35d32012-01-16 15:19:47 -06001001 /* FLUSH_GIVEUP will fall back to using IPI's for tlb flush */
Cliff Wickman450a0072010-06-02 16:22:02 -05001002 return 1;
Cliff Wickman450a0072010-06-02 16:22:02 -05001003 return 0;
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001004}
1005
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001006/*
Cliff Wickman8b6e5112012-06-22 08:14:59 -05001007 * The BAU is disabled for this uvhub. When the disabled time period has
1008 * expired re-enable it.
1009 * Return 0 if it is re-enabled for all cpus on this uvhub.
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001010 */
1011static int check_enable(struct bau_control *bcp, struct ptc_stats *stat)
1012{
1013 int tcpu;
1014 struct bau_control *tbcp;
Cliff Wickman8b6e5112012-06-22 08:14:59 -05001015 struct bau_control *hmaster;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001016
Cliff Wickman8b6e5112012-06-22 08:14:59 -05001017 hmaster = bcp->uvhub_master;
1018 spin_lock(&hmaster->disable_lock);
1019 if (bcp->baudisabled && (get_cycles() >= bcp->set_bau_on_time)) {
1020 stat->s_bau_reenabled++;
1021 for_each_present_cpu(tcpu) {
1022 tbcp = &per_cpu(bau_control, tcpu);
1023 if (tbcp->uvhub_master == hmaster) {
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001024 tbcp->baudisabled = 0;
1025 tbcp->period_requests = 0;
1026 tbcp->period_time = 0;
Cliff Wickman8b6e5112012-06-22 08:14:59 -05001027 tbcp->period_giveups = 0;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001028 }
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001029 }
Cliff Wickman8b6e5112012-06-22 08:14:59 -05001030 spin_unlock(&hmaster->disable_lock);
1031 return 0;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001032 }
Cliff Wickman8b6e5112012-06-22 08:14:59 -05001033 spin_unlock(&hmaster->disable_lock);
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001034 return -1;
1035}
1036
1037static void record_send_statistics(struct ptc_stats *stat, int locals, int hubs,
1038 int remotes, struct bau_desc *bau_desc)
1039{
1040 stat->s_requestor++;
1041 stat->s_ntargcpu += remotes + locals;
1042 stat->s_ntargremotes += remotes;
1043 stat->s_ntarglocals += locals;
1044
1045 /* uvhub statistics */
1046 hubs = bau_uvhub_weight(&bau_desc->distribution);
1047 if (locals) {
1048 stat->s_ntarglocaluvhub++;
1049 stat->s_ntargremoteuvhub += (hubs - 1);
1050 } else
1051 stat->s_ntargremoteuvhub += hubs;
1052
1053 stat->s_ntarguvhub += hubs;
1054
1055 if (hubs >= 16)
1056 stat->s_ntarguvhub16++;
1057 else if (hubs >= 8)
1058 stat->s_ntarguvhub8++;
1059 else if (hubs >= 4)
1060 stat->s_ntarguvhub4++;
1061 else if (hubs >= 2)
1062 stat->s_ntarguvhub2++;
1063 else
1064 stat->s_ntarguvhub1++;
1065}
1066
1067/*
1068 * Translate a cpu mask to the uvhub distribution mask in the BAU
1069 * activation descriptor.
1070 */
1071static int set_distrib_bits(struct cpumask *flush_mask, struct bau_control *bcp,
1072 struct bau_desc *bau_desc, int *localsp, int *remotesp)
1073{
1074 int cpu;
1075 int pnode;
1076 int cnt = 0;
1077 struct hub_and_pnode *hpp;
1078
1079 for_each_cpu(cpu, flush_mask) {
1080 /*
1081 * The distribution vector is a bit map of pnodes, relative
1082 * to the partition base pnode (and the partition base nasid
1083 * in the header).
1084 * Translate cpu to pnode and hub using a local memory array.
1085 */
1086 hpp = &bcp->socket_master->thp[cpu];
1087 pnode = hpp->pnode - bcp->partition_base_pnode;
1088 bau_uvhub_set(pnode, &bau_desc->distribution);
1089 cnt++;
1090 if (hpp->uvhub == bcp->uvhub)
1091 (*localsp)++;
1092 else
1093 (*remotesp)++;
1094 }
1095 if (!cnt)
1096 return 1;
1097 return 0;
1098}
1099
1100/*
1101 * globally purge translation cache of a virtual address or all TLB's
Tejun Heobdbcdd42009-01-21 17:26:06 +09001102 * @cpumask: mask of all cpu's in which the address is to be removed
Cliff Wickman18129242008-06-02 08:56:14 -05001103 * @mm: mm_struct containing virtual address range
Alex Shi57c4f43042012-12-18 12:22:14 -08001104 * @start: start virtual address to be removed from TLB
1105 * @end: end virtual address to be remove from TLB
Tejun Heobdbcdd42009-01-21 17:26:06 +09001106 * @cpu: the current cpu
Cliff Wickman18129242008-06-02 08:56:14 -05001107 *
1108 * This is the entry point for initiating any UV global TLB shootdown.
1109 *
1110 * Purges the translation caches of all specified processors of the given
1111 * virtual address, or purges all TLB's on specified processors.
1112 *
Tejun Heobdbcdd42009-01-21 17:26:06 +09001113 * The caller has derived the cpumask from the mm_struct. This function
1114 * is called only if there are bits set in the mask. (e.g. flush_tlb_page())
Cliff Wickman18129242008-06-02 08:56:14 -05001115 *
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001116 * The cpumask is converted into a uvhubmask of the uvhubs containing
1117 * those cpus.
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001118 *
Tejun Heobdbcdd42009-01-21 17:26:06 +09001119 * Note that this function should be called with preemption disabled.
1120 *
1121 * Returns NULL if all remote flushing was done.
1122 * Returns pointer to cpumask if some remote flushing remains to be
1123 * done. The returned pointer is valid till preemption is re-enabled.
Cliff Wickman18129242008-06-02 08:56:14 -05001124 */
Tejun Heobdbcdd42009-01-21 17:26:06 +09001125const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
Cliff Wickmana26fd712014-05-14 16:15:47 -05001126 struct mm_struct *mm,
1127 unsigned long start,
1128 unsigned long end,
1129 unsigned int cpu)
Cliff Wickman18129242008-06-02 08:56:14 -05001130{
Andrew Banmane9be3642017-03-09 10:42:10 -06001131 int locals = 0, remotes = 0, hubs = 0;
Ingo Molnardc163a42008-06-18 14:15:43 +02001132 struct bau_desc *bau_desc;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001133 struct cpumask *flush_mask;
1134 struct ptc_stats *stat;
1135 struct bau_control *bcp;
Andrew Banmane9be3642017-03-09 10:42:10 -06001136 unsigned long descriptor_status, status, address;
Cliff Wickman18129242008-06-02 08:56:14 -05001137
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001138 bcp = &per_cpu(bau_control, cpu);
Cliff Wickman26ef8572012-06-22 08:13:30 -05001139
1140 if (bcp->nobau)
1141 return cpumask;
Cliff Wickman50fb55a2010-06-02 16:22:02 -05001142
cpw3eae49c2013-12-03 17:15:30 -06001143 stat = bcp->statp;
1144 stat->s_enters++;
1145
Cliff Wickman8b6e5112012-06-22 08:14:59 -05001146 if (bcp->busy) {
1147 descriptor_status =
1148 read_lmmr(UVH_LB_BAU_SB_ACTIVATION_STATUS_0);
1149 status = ((descriptor_status >> (bcp->uvhub_cpu *
1150 UV_ACT_STATUS_SIZE)) & UV_ACT_STATUS_MASK) << 1;
1151 if (status == UV2H_DESC_BUSY)
1152 return cpumask;
1153 bcp->busy = 0;
1154 }
1155
Cliff Wickman50fb55a2010-06-02 16:22:02 -05001156 /* bau was disabled due to slow response */
1157 if (bcp->baudisabled) {
Cliff Wickman8b6e5112012-06-22 08:14:59 -05001158 if (check_enable(bcp, stat)) {
1159 stat->s_ipifordisabled++;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001160 return cpumask;
Cliff Wickman8b6e5112012-06-22 08:14:59 -05001161 }
Cliff Wickman50fb55a2010-06-02 16:22:02 -05001162 }
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001163
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001164 /*
1165 * Each sending cpu has a per-cpu mask which it fills from the caller's
Cliff Wickman450a0072010-06-02 16:22:02 -05001166 * cpu mask. All cpus are converted to uvhubs and copied to the
1167 * activation descriptor.
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001168 */
1169 flush_mask = (struct cpumask *)per_cpu(uv_flush_tlb_mask, cpu);
Cliff Wickman450a0072010-06-02 16:22:02 -05001170 /* don't actually do a shootdown of the local cpu */
Tejun Heobdbcdd42009-01-21 17:26:06 +09001171 cpumask_andnot(flush_mask, cpumask, cpumask_of(cpu));
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001172
Rusty Russell020b37a2015-03-02 22:05:49 +10301173 if (cpumask_test_cpu(cpu, cpumask))
Cliff Wickman450a0072010-06-02 16:22:02 -05001174 stat->s_ntargself++;
Tejun Heobdbcdd42009-01-21 17:26:06 +09001175
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001176 bau_desc = bcp->descriptor_base;
Cliff Wickman8b6e5112012-06-22 08:14:59 -05001177 bau_desc += (ITEMS_PER_DESC * bcp->uvhub_cpu);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001178 bau_uvhubs_clear(&bau_desc->distribution, UV_DISTRIBUTION_SIZE);
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001179 if (set_distrib_bits(flush_mask, bcp, bau_desc, &locals, &remotes))
Cliff Wickman450a0072010-06-02 16:22:02 -05001180 return NULL;
Cliff Wickman450a0072010-06-02 16:22:02 -05001181
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001182 record_send_statistics(stat, locals, hubs, remotes, bau_desc);
Cliff Wickman18129242008-06-02 08:56:14 -05001183
Alex Shi57c4f43042012-12-18 12:22:14 -08001184 if (!end || (end - start) <= PAGE_SIZE)
Andrew Banmane9be3642017-03-09 10:42:10 -06001185 address = start;
Alex Shi57c4f43042012-12-18 12:22:14 -08001186 else
Andrew Banmane9be3642017-03-09 10:42:10 -06001187 address = TLB_FLUSH_ALL;
1188
1189 switch (bcp->uvhub_version) {
1190 case UV_BAU_V1:
1191 case UV_BAU_V2:
1192 case UV_BAU_V3:
1193 bau_desc->payload.uv1_2_3.address = address;
1194 bau_desc->payload.uv1_2_3.sending_cpu = cpu;
1195 break;
1196 case UV_BAU_V4:
1197 bau_desc->payload.uv4.address = address;
1198 bau_desc->payload.uv4.sending_cpu = cpu;
1199 bau_desc->payload.uv4.qualifier = BAU_DESC_QUALIFIER;
1200 break;
1201 }
1202
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001203 /*
Cliff Wickman450a0072010-06-02 16:22:02 -05001204 * uv_flush_send_and_wait returns 0 if all cpu's were messaged,
1205 * or 1 if it gave up and the original cpumask should be returned.
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001206 */
Cliff Wickman8b6e5112012-06-22 08:14:59 -05001207 if (!uv_flush_send_and_wait(flush_mask, bcp, bau_desc))
Cliff Wickman450a0072010-06-02 16:22:02 -05001208 return NULL;
1209 else
1210 return cpumask;
Cliff Wickman18129242008-06-02 08:56:14 -05001211}
1212
1213/*
Cliff Wickman8b6e5112012-06-22 08:14:59 -05001214 * Search the message queue for any 'other' unprocessed message with the
1215 * same software acknowledge resource bit vector as the 'msg' message.
Cliff Wickmanc5d35d32012-01-16 15:19:47 -06001216 */
1217struct bau_pq_entry *find_another_by_swack(struct bau_pq_entry *msg,
Cliff Wickman8b6e5112012-06-22 08:14:59 -05001218 struct bau_control *bcp)
Cliff Wickmanc5d35d32012-01-16 15:19:47 -06001219{
1220 struct bau_pq_entry *msg_next = msg + 1;
Cliff Wickman8b6e5112012-06-22 08:14:59 -05001221 unsigned char swack_vec = msg->swack_vec;
Cliff Wickmanc5d35d32012-01-16 15:19:47 -06001222
1223 if (msg_next > bcp->queue_last)
1224 msg_next = bcp->queue_first;
Cliff Wickman8b6e5112012-06-22 08:14:59 -05001225 while (msg_next != msg) {
1226 if ((msg_next->canceled == 0) && (msg_next->replied_to == 0) &&
1227 (msg_next->swack_vec == swack_vec))
Cliff Wickmanc5d35d32012-01-16 15:19:47 -06001228 return msg_next;
1229 msg_next++;
1230 if (msg_next > bcp->queue_last)
1231 msg_next = bcp->queue_first;
1232 }
1233 return NULL;
1234}
1235
1236/*
1237 * UV2 needs to work around a bug in which an arriving message has not
1238 * set a bit in the UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE register.
1239 * Such a message must be ignored.
1240 */
1241void process_uv2_message(struct msg_desc *mdp, struct bau_control *bcp)
1242{
1243 unsigned long mmr_image;
1244 unsigned char swack_vec;
1245 struct bau_pq_entry *msg = mdp->msg;
1246 struct bau_pq_entry *other_msg;
1247
Andrew Banman21e3f122016-09-21 11:09:17 -05001248 mmr_image = ops.read_l_sw_ack();
Cliff Wickmanc5d35d32012-01-16 15:19:47 -06001249 swack_vec = msg->swack_vec;
1250
1251 if ((swack_vec & mmr_image) == 0) {
1252 /*
1253 * This message was assigned a swack resource, but no
1254 * reserved acknowlegment is pending.
1255 * The bug has prevented this message from setting the MMR.
Cliff Wickmanc5d35d32012-01-16 15:19:47 -06001256 */
Cliff Wickmanc5d35d32012-01-16 15:19:47 -06001257 /*
Cliff Wickman8b6e5112012-06-22 08:14:59 -05001258 * Some message has set the MMR 'pending' bit; it might have
1259 * been another message. Look for that message.
Cliff Wickmanc5d35d32012-01-16 15:19:47 -06001260 */
Cliff Wickman8b6e5112012-06-22 08:14:59 -05001261 other_msg = find_another_by_swack(msg, bcp);
1262 if (other_msg) {
1263 /*
1264 * There is another. Process this one but do not
1265 * ack it.
1266 */
1267 bau_process_message(mdp, bcp, 0);
1268 /*
1269 * Let the natural processing of that other message
1270 * acknowledge it. Don't get the processing of sw_ack's
1271 * out of order.
1272 */
1273 return;
1274 }
Cliff Wickmanc5d35d32012-01-16 15:19:47 -06001275 }
1276
1277 /*
Cliff Wickman8b6e5112012-06-22 08:14:59 -05001278 * Either the MMR shows this one pending a reply or there is no
1279 * other message using this sw_ack, so it is safe to acknowledge it.
Cliff Wickmanc5d35d32012-01-16 15:19:47 -06001280 */
1281 bau_process_message(mdp, bcp, 1);
1282
1283 return;
1284}
1285
1286/*
Cliff Wickman18129242008-06-02 08:56:14 -05001287 * The BAU message interrupt comes here. (registered by set_intr_gate)
1288 * See entry_64.S
1289 *
1290 * We received a broadcast assist message.
1291 *
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001292 * Interrupts are disabled; this interrupt could represent
Cliff Wickman18129242008-06-02 08:56:14 -05001293 * the receipt of several messages.
1294 *
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001295 * All cores/threads on this hub get this interrupt.
1296 * The last one to see it does the software ack.
Cliff Wickman18129242008-06-02 08:56:14 -05001297 * (the resource will not be freed until noninterruptable cpus see this
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001298 * interrupt; hardware may timeout the s/w ack and reply ERROR)
Cliff Wickman18129242008-06-02 08:56:14 -05001299 */
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001300void uv_bau_message_interrupt(struct pt_regs *regs)
Cliff Wickman18129242008-06-02 08:56:14 -05001301{
Cliff Wickman18129242008-06-02 08:56:14 -05001302 int count = 0;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001303 cycles_t time_start;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001304 struct bau_pq_entry *msg;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001305 struct bau_control *bcp;
1306 struct ptc_stats *stat;
1307 struct msg_desc msgdesc;
Cliff Wickman18129242008-06-02 08:56:14 -05001308
Cliff Wickman88ed9dd2012-01-16 15:21:46 -06001309 ack_APIC_irq();
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001310 time_start = get_cycles();
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001311
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001312 bcp = &per_cpu(bau_control, smp_processor_id());
Cliff Wickman712157a2010-06-02 16:22:02 -05001313 stat = bcp->statp;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001314
1315 msgdesc.queue_first = bcp->queue_first;
1316 msgdesc.queue_last = bcp->queue_last;
1317
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001318 msg = bcp->bau_msg_head;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001319 while (msg->swack_vec) {
Cliff Wickman18129242008-06-02 08:56:14 -05001320 count++;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001321
1322 msgdesc.msg_slot = msg - msgdesc.queue_first;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001323 msgdesc.msg = msg;
Andrew Banman491bd882017-03-09 10:42:09 -06001324 if (bcp->uvhub_version == UV_BAU_V2)
Cliff Wickmanc5d35d32012-01-16 15:19:47 -06001325 process_uv2_message(&msgdesc, bcp);
1326 else
Cliff Wickmana26fd712014-05-14 16:15:47 -05001327 /* no error workaround for uv1 or uv3 */
Cliff Wickmanc5d35d32012-01-16 15:19:47 -06001328 bau_process_message(&msgdesc, bcp, 1);
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001329
Cliff Wickman18129242008-06-02 08:56:14 -05001330 msg++;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001331 if (msg > msgdesc.queue_last)
1332 msg = msgdesc.queue_first;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001333 bcp->bau_msg_head = msg;
Cliff Wickman18129242008-06-02 08:56:14 -05001334 }
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001335 stat->d_time += (get_cycles() - time_start);
Cliff Wickman18129242008-06-02 08:56:14 -05001336 if (!count)
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001337 stat->d_nomsg++;
Cliff Wickman18129242008-06-02 08:56:14 -05001338 else if (count > 1)
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001339 stat->d_multmsg++;
Cliff Wickman18129242008-06-02 08:56:14 -05001340}
1341
Cliff Wickmanc4c46882009-04-03 08:34:32 -05001342/*
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001343 * Each target uvhub (i.e. a uvhub that has cpu's) needs to have
Cliff Wickmanc4c46882009-04-03 08:34:32 -05001344 * shootdown message timeouts enabled. The timeout does not cause
1345 * an interrupt, but causes an error message to be returned to
1346 * the sender.
1347 */
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001348static void __init enable_timeouts(void)
Cliff Wickman18129242008-06-02 08:56:14 -05001349{
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001350 int uvhub;
1351 int nuvhubs;
Cliff Wickman18129242008-06-02 08:56:14 -05001352 int pnode;
Cliff Wickmanc4c46882009-04-03 08:34:32 -05001353 unsigned long mmr_image;
Cliff Wickman18129242008-06-02 08:56:14 -05001354
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001355 nuvhubs = uv_num_possible_blades();
Cliff Wickmanc4c46882009-04-03 08:34:32 -05001356
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001357 for (uvhub = 0; uvhub < nuvhubs; uvhub++) {
1358 if (!uv_blade_nr_possible_cpus(uvhub))
Cliff Wickman18129242008-06-02 08:56:14 -05001359 continue;
Cliff Wickmanc4c46882009-04-03 08:34:32 -05001360
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001361 pnode = uv_blade_to_pnode(uvhub);
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001362 mmr_image = read_mmr_misc_control(pnode);
Cliff Wickmanc4c46882009-04-03 08:34:32 -05001363 /*
1364 * Set the timeout period and then lock it in, in three
1365 * steps; captures and locks in the period.
1366 *
1367 * To program the period, the SOFT_ACK_MODE must be off.
1368 */
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001369 mmr_image &= ~(1L << SOFTACK_MSHIFT);
1370 write_mmr_misc_control(pnode, mmr_image);
Cliff Wickmanc4c46882009-04-03 08:34:32 -05001371 /*
1372 * Set the 4-bit period.
1373 */
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001374 mmr_image &= ~((unsigned long)0xf << SOFTACK_PSHIFT);
1375 mmr_image |= (SOFTACK_TIMEOUT_PERIOD << SOFTACK_PSHIFT);
1376 write_mmr_misc_control(pnode, mmr_image);
Cliff Wickmanc4c46882009-04-03 08:34:32 -05001377 /*
Jack Steiner2a919592011-05-11 12:50:28 -05001378 * UV1:
Cliff Wickmanc4c46882009-04-03 08:34:32 -05001379 * Subsequent reversals of the timebase bit (3) cause an
1380 * immediate timeout of one or all INTD resources as
1381 * indicated in bits 2:0 (7 causes all of them to timeout).
1382 */
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001383 mmr_image |= (1L << SOFTACK_MSHIFT);
Jack Steiner2a919592011-05-11 12:50:28 -05001384 if (is_uv2_hub()) {
Cliff Wickmana26fd712014-05-14 16:15:47 -05001385 /* do not touch the legacy mode bit */
Cliff Wickman8b6e5112012-06-22 08:14:59 -05001386 /* hw bug workaround; do not use extended status */
1387 mmr_image &= ~(1L << UV2_EXT_SHFT);
Cliff Wickmana26fd712014-05-14 16:15:47 -05001388 } else if (is_uv3_hub()) {
1389 mmr_image &= ~(1L << PREFETCH_HINT_SHFT);
1390 mmr_image |= (1L << SB_STATUS_SHFT);
Jack Steiner2a919592011-05-11 12:50:28 -05001391 }
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001392 write_mmr_misc_control(pnode, mmr_image);
Cliff Wickman18129242008-06-02 08:56:14 -05001393 }
Cliff Wickman18129242008-06-02 08:56:14 -05001394}
1395
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001396static void *ptc_seq_start(struct seq_file *file, loff_t *offset)
Cliff Wickman18129242008-06-02 08:56:14 -05001397{
1398 if (*offset < num_possible_cpus())
1399 return offset;
1400 return NULL;
1401}
1402
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001403static void *ptc_seq_next(struct seq_file *file, void *data, loff_t *offset)
Cliff Wickman18129242008-06-02 08:56:14 -05001404{
1405 (*offset)++;
1406 if (*offset < num_possible_cpus())
1407 return offset;
1408 return NULL;
1409}
1410
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001411static void ptc_seq_stop(struct seq_file *file, void *data)
Cliff Wickman18129242008-06-02 08:56:14 -05001412{
1413}
1414
1415/*
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001416 * Display the statistics thru /proc/sgi_uv/ptc_statistics
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001417 * 'data' points to the cpu number
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001418 * Note: see the descriptions in stat_description[].
Cliff Wickman18129242008-06-02 08:56:14 -05001419 */
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001420static int ptc_seq_show(struct seq_file *file, void *data)
Cliff Wickman18129242008-06-02 08:56:14 -05001421{
1422 struct ptc_stats *stat;
Cliff Wickman26ef8572012-06-22 08:13:30 -05001423 struct bau_control *bcp;
Cliff Wickman18129242008-06-02 08:56:14 -05001424 int cpu;
1425
1426 cpu = *(loff_t *)data;
Cliff Wickman18129242008-06-02 08:56:14 -05001427 if (!cpu) {
Rasmus Villemoes37367082014-11-28 22:03:41 +01001428 seq_puts(file,
1429 "# cpu bauoff sent stime self locals remotes ncpus localhub ");
1430 seq_puts(file, "remotehub numuvhubs numuvhubs16 numuvhubs8 ");
1431 seq_puts(file,
1432 "numuvhubs4 numuvhubs2 numuvhubs1 dto snacks retries ");
1433 seq_puts(file,
1434 "rok resetp resett giveup sto bz throt disable ");
1435 seq_puts(file,
1436 "enable wars warshw warwaits enters ipidis plugged ");
1437 seq_puts(file,
1438 "ipiover glim cong swack recv rtime all one mult ");
1439 seq_puts(file, "none retry canc nocan reset rcan\n");
Cliff Wickman18129242008-06-02 08:56:14 -05001440 }
1441 if (cpu < num_possible_cpus() && cpu_online(cpu)) {
Cliff Wickman26ef8572012-06-22 08:13:30 -05001442 bcp = &per_cpu(bau_control, cpu);
James Custerfa2a79ce2014-11-02 12:16:39 -06001443 if (bcp->nobau) {
1444 seq_printf(file, "cpu %d bau disabled\n", cpu);
1445 return 0;
1446 }
Cliff Wickman26ef8572012-06-22 08:13:30 -05001447 stat = bcp->statp;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001448 /* source side statistics */
1449 seq_printf(file,
Cliff Wickman8b6e5112012-06-22 08:14:59 -05001450 "cpu %d %d %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld ",
Cliff Wickman26ef8572012-06-22 08:13:30 -05001451 cpu, bcp->nobau, stat->s_requestor,
1452 cycles_2_us(stat->s_time),
Cliff Wickman450a0072010-06-02 16:22:02 -05001453 stat->s_ntargself, stat->s_ntarglocals,
1454 stat->s_ntargremotes, stat->s_ntargcpu,
1455 stat->s_ntarglocaluvhub, stat->s_ntargremoteuvhub,
1456 stat->s_ntarguvhub, stat->s_ntarguvhub16);
Cliff Wickmanb54bd9b2012-01-16 15:22:38 -06001457 seq_printf(file, "%ld %ld %ld %ld %ld %ld ",
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001458 stat->s_ntarguvhub8, stat->s_ntarguvhub4,
1459 stat->s_ntarguvhub2, stat->s_ntarguvhub1,
Cliff Wickmanb54bd9b2012-01-16 15:22:38 -06001460 stat->s_dtimeout, stat->s_strongnacks);
Cliff Wickman8b6e5112012-06-22 08:14:59 -05001461 seq_printf(file, "%ld %ld %ld %ld %ld %ld %ld %ld ",
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001462 stat->s_retry_messages, stat->s_retriesok,
1463 stat->s_resets_plug, stat->s_resets_timeout,
1464 stat->s_giveup, stat->s_stimeout,
Cliff Wickman8b6e5112012-06-22 08:14:59 -05001465 stat->s_busy, stat->s_throttles);
1466 seq_printf(file, "%ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld ",
1467 stat->s_bau_disabled, stat->s_bau_reenabled,
1468 stat->s_uv2_wars, stat->s_uv2_wars_hw,
1469 stat->s_uv2_war_waits, stat->s_enters,
1470 stat->s_ipifordisabled, stat->s_plugged,
1471 stat->s_overipilimit, stat->s_giveuplimit,
1472 stat->s_congested);
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001473
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001474 /* destination side statistics */
1475 seq_printf(file,
Cliff Wickman8b6e5112012-06-22 08:14:59 -05001476 "%lx %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld\n",
Andrew Banman21e3f122016-09-21 11:09:17 -05001477 ops.read_g_sw_ack(uv_cpu_to_pnode(cpu)),
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001478 stat->d_requestee, cycles_2_us(stat->d_time),
1479 stat->d_alltlb, stat->d_onetlb, stat->d_multmsg,
1480 stat->d_nomsg, stat->d_retries, stat->d_canceled,
1481 stat->d_nocanceled, stat->d_resets,
1482 stat->d_rcanceled);
Cliff Wickman18129242008-06-02 08:56:14 -05001483 }
Cliff Wickman18129242008-06-02 08:56:14 -05001484 return 0;
1485}
1486
1487/*
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001488 * Display the tunables thru debugfs
1489 */
1490static ssize_t tunables_read(struct file *file, char __user *userbuf,
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001491 size_t count, loff_t *ppos)
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001492{
Dan Carpenterb365a852010-09-29 10:41:05 +02001493 char *buf;
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001494 int ret;
1495
Cliff Wickman8b6e5112012-06-22 08:14:59 -05001496 buf = kasprintf(GFP_KERNEL, "%s %s %s\n%d %d %d %d %d %d %d %d %d %d\n",
1497 "max_concur plugged_delay plugsb4reset timeoutsb4reset",
1498 "ipi_reset_limit complete_threshold congested_response_us",
1499 "congested_reps disabled_period giveup_limit",
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001500 max_concurr, plugged_delay, plugsb4reset,
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001501 timeoutsb4reset, ipi_reset_limit, complete_threshold,
Cliff Wickman8b6e5112012-06-22 08:14:59 -05001502 congested_respns_us, congested_reps, disabled_period,
1503 giveup_limit);
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001504
Dan Carpenterb365a852010-09-29 10:41:05 +02001505 if (!buf)
1506 return -ENOMEM;
1507
1508 ret = simple_read_from_buffer(userbuf, count, ppos, buf, strlen(buf));
1509 kfree(buf);
1510 return ret;
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001511}
1512
1513/*
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001514 * handle a write to /proc/sgi_uv/ptc_statistics
1515 * -1: reset the statistics
Cliff Wickman18129242008-06-02 08:56:14 -05001516 * 0: display meaning of the statistics
Cliff Wickman18129242008-06-02 08:56:14 -05001517 */
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001518static ssize_t ptc_proc_write(struct file *file, const char __user *user,
1519 size_t count, loff_t *data)
Cliff Wickman18129242008-06-02 08:56:14 -05001520{
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001521 int cpu;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001522 int i;
1523 int elements;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001524 long input_arg;
Cliff Wickman18129242008-06-02 08:56:14 -05001525 char optstr[64];
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001526 struct ptc_stats *stat;
Cliff Wickman18129242008-06-02 08:56:14 -05001527
Cliff Wickmane7eb8722008-06-23 08:32:25 -05001528 if (count == 0 || count > sizeof(optstr))
Cliff Wickmancef53272008-06-19 11:16:24 -05001529 return -EINVAL;
Cliff Wickman18129242008-06-02 08:56:14 -05001530 if (copy_from_user(optstr, user, count))
1531 return -EFAULT;
1532 optstr[count - 1] = '\0';
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001533
Cliff Wickman26ef8572012-06-22 08:13:30 -05001534 if (!strcmp(optstr, "on")) {
1535 set_bau_on();
1536 return count;
1537 } else if (!strcmp(optstr, "off")) {
1538 set_bau_off();
1539 return count;
1540 }
1541
Daniel Walter164109e2014-08-08 14:24:03 -07001542 if (kstrtol(optstr, 10, &input_arg) < 0) {
Andrew Banmanefa59ab2016-09-21 11:09:13 -05001543 pr_debug("%s is invalid\n", optstr);
Cliff Wickman18129242008-06-02 08:56:14 -05001544 return -EINVAL;
1545 }
1546
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001547 if (input_arg == 0) {
Sasha Levin64441742012-12-20 14:11:34 -05001548 elements = ARRAY_SIZE(stat_description);
Andrew Banmanefa59ab2016-09-21 11:09:13 -05001549 pr_debug("# cpu: cpu number\n");
1550 pr_debug("Sender statistics:\n");
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001551 for (i = 0; i < elements; i++)
Andrew Banmanefa59ab2016-09-21 11:09:13 -05001552 pr_debug("%s\n", stat_description[i]);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001553 } else if (input_arg == -1) {
1554 for_each_present_cpu(cpu) {
1555 stat = &per_cpu(ptcstats, cpu);
1556 memset(stat, 0, sizeof(struct ptc_stats));
1557 }
Cliff Wickman18129242008-06-02 08:56:14 -05001558 }
1559
1560 return count;
1561}
1562
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001563static int local_atoi(const char *name)
1564{
1565 int val = 0;
1566
1567 for (;; name++) {
1568 switch (*name) {
1569 case '0' ... '9':
1570 val = 10*val+(*name-'0');
1571 break;
1572 default:
1573 return val;
1574 }
1575 }
1576}
1577
1578/*
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001579 * Parse the values written to /sys/kernel/debug/sgi_uv/bau_tunables.
1580 * Zero values reset them to defaults.
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001581 */
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001582static int parse_tunables_write(struct bau_control *bcp, char *instr,
1583 int count)
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001584{
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001585 char *p;
1586 char *q;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001587 int cnt = 0;
1588 int val;
Sasha Levin64441742012-12-20 14:11:34 -05001589 int e = ARRAY_SIZE(tunables);
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001590
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001591 p = instr + strspn(instr, WHITESPACE);
1592 q = p;
1593 for (; *p; p = q + strspn(q, WHITESPACE)) {
1594 q = p + strcspn(p, WHITESPACE);
1595 cnt++;
1596 if (q == p)
1597 break;
1598 }
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001599 if (cnt != e) {
Andrew Banmanefa59ab2016-09-21 11:09:13 -05001600 pr_info("bau tunable error: should be %d values\n", e);
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001601 return -EINVAL;
1602 }
1603
1604 p = instr + strspn(instr, WHITESPACE);
1605 q = p;
1606 for (cnt = 0; *p; p = q + strspn(q, WHITESPACE), cnt++) {
1607 q = p + strcspn(p, WHITESPACE);
1608 val = local_atoi(p);
1609 switch (cnt) {
1610 case 0:
1611 if (val == 0) {
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001612 max_concurr = MAX_BAU_CONCURRENT;
1613 max_concurr_const = MAX_BAU_CONCURRENT;
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001614 continue;
1615 }
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001616 if (val < 1 || val > bcp->cpus_in_uvhub) {
Andrew Banmanefa59ab2016-09-21 11:09:13 -05001617 pr_debug(
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001618 "Error: BAU max concurrent %d is invalid\n",
1619 val);
1620 return -EINVAL;
1621 }
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001622 max_concurr = val;
1623 max_concurr_const = val;
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001624 continue;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001625 default:
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001626 if (val == 0)
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001627 *tunables[cnt].tunp = tunables[cnt].deflt;
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001628 else
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001629 *tunables[cnt].tunp = val;
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001630 continue;
1631 }
1632 if (q == p)
1633 break;
1634 }
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001635 return 0;
1636}
1637
1638/*
1639 * Handle a write to debugfs. (/sys/kernel/debug/sgi_uv/bau_tunables)
1640 */
1641static ssize_t tunables_write(struct file *file, const char __user *user,
1642 size_t count, loff_t *data)
1643{
1644 int cpu;
1645 int ret;
1646 char instr[100];
1647 struct bau_control *bcp;
1648
1649 if (count == 0 || count > sizeof(instr)-1)
1650 return -EINVAL;
1651 if (copy_from_user(instr, user, count))
1652 return -EFAULT;
1653
1654 instr[count] = '\0';
1655
cpw@sgi.com00b30cf2011-06-21 07:21:26 -05001656 cpu = get_cpu();
1657 bcp = &per_cpu(bau_control, cpu);
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001658 ret = parse_tunables_write(bcp, instr, count);
cpw@sgi.com00b30cf2011-06-21 07:21:26 -05001659 put_cpu();
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001660 if (ret)
1661 return ret;
1662
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001663 for_each_present_cpu(cpu) {
1664 bcp = &per_cpu(bau_control, cpu);
Andrew Banman67492c82016-09-21 11:09:12 -05001665 bcp->max_concurr = max_concurr;
1666 bcp->max_concurr_const = max_concurr;
1667 bcp->plugged_delay = plugged_delay;
1668 bcp->plugsb4reset = plugsb4reset;
1669 bcp->timeoutsb4reset = timeoutsb4reset;
1670 bcp->ipi_reset_limit = ipi_reset_limit;
1671 bcp->complete_threshold = complete_threshold;
1672 bcp->cong_response_us = congested_respns_us;
1673 bcp->cong_reps = congested_reps;
1674 bcp->disabled_period = sec_2_cycles(disabled_period);
1675 bcp->giveup_limit = giveup_limit;
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001676 }
1677 return count;
1678}
1679
Cliff Wickman18129242008-06-02 08:56:14 -05001680static const struct seq_operations uv_ptc_seq_ops = {
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001681 .start = ptc_seq_start,
1682 .next = ptc_seq_next,
1683 .stop = ptc_seq_stop,
1684 .show = ptc_seq_show
Cliff Wickman18129242008-06-02 08:56:14 -05001685};
1686
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001687static int ptc_proc_open(struct inode *inode, struct file *file)
Cliff Wickman18129242008-06-02 08:56:14 -05001688{
1689 return seq_open(file, &uv_ptc_seq_ops);
1690}
1691
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001692static int tunables_open(struct inode *inode, struct file *file)
1693{
1694 return 0;
1695}
1696
Cliff Wickman18129242008-06-02 08:56:14 -05001697static const struct file_operations proc_uv_ptc_operations = {
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001698 .open = ptc_proc_open,
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001699 .read = seq_read,
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001700 .write = ptc_proc_write,
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001701 .llseek = seq_lseek,
1702 .release = seq_release,
Cliff Wickman18129242008-06-02 08:56:14 -05001703};
1704
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001705static const struct file_operations tunables_fops = {
1706 .open = tunables_open,
1707 .read = tunables_read,
1708 .write = tunables_write,
Arnd Bergmann6038f372010-08-15 18:52:59 +02001709 .llseek = default_llseek,
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001710};
1711
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001712static int __init uv_ptc_init(void)
Cliff Wickman18129242008-06-02 08:56:14 -05001713{
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001714 struct proc_dir_entry *proc_uv_ptc;
Cliff Wickman18129242008-06-02 08:56:14 -05001715
1716 if (!is_uv_system())
1717 return 0;
1718
Alexey Dobriyan10f02d112009-08-23 23:17:27 +04001719 proc_uv_ptc = proc_create(UV_PTC_BASENAME, 0444, NULL,
1720 &proc_uv_ptc_operations);
Cliff Wickman18129242008-06-02 08:56:14 -05001721 if (!proc_uv_ptc) {
Andrew Banmanefa59ab2016-09-21 11:09:13 -05001722 pr_err("unable to create %s proc entry\n",
Cliff Wickman18129242008-06-02 08:56:14 -05001723 UV_PTC_BASENAME);
1724 return -EINVAL;
1725 }
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001726
1727 tunables_dir = debugfs_create_dir(UV_BAU_TUNABLES_DIR, NULL);
1728 if (!tunables_dir) {
Andrew Banmanefa59ab2016-09-21 11:09:13 -05001729 pr_err("unable to create debugfs directory %s\n",
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001730 UV_BAU_TUNABLES_DIR);
1731 return -EINVAL;
1732 }
1733 tunables_file = debugfs_create_file(UV_BAU_TUNABLES_FILE, 0600,
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001734 tunables_dir, NULL, &tunables_fops);
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001735 if (!tunables_file) {
Andrew Banmanefa59ab2016-09-21 11:09:13 -05001736 pr_err("unable to create debugfs file %s\n",
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001737 UV_BAU_TUNABLES_FILE);
1738 return -EINVAL;
1739 }
Cliff Wickman18129242008-06-02 08:56:14 -05001740 return 0;
1741}
1742
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001743/*
Cliff Wickman77ed23f2011-05-10 08:26:43 -05001744 * Initialize the sending side's sending buffers.
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001745 */
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001746static void activation_descriptor_init(int node, int pnode, int base_pnode)
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001747{
1748 int i;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001749 int cpu;
Cliff Wickmanda87c932012-01-16 15:17:50 -06001750 int uv1 = 0;
Jack Steiner6a469e42011-09-20 13:55:04 -07001751 unsigned long gpa;
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001752 unsigned long m;
1753 unsigned long n;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001754 size_t dsize;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001755 struct bau_desc *bau_desc;
1756 struct bau_desc *bd2;
Cliff Wickmanda87c932012-01-16 15:17:50 -06001757 struct uv1_bau_msg_header *uv1_hdr;
Cliff Wickmana26fd712014-05-14 16:15:47 -05001758 struct uv2_3_bau_msg_header *uv2_3_hdr;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001759 struct bau_control *bcp;
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001760
Cliff Wickman0e2595c2009-05-20 08:10:57 -05001761 /*
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001762 * each bau_desc is 64 bytes; there are 8 (ITEMS_PER_DESC)
1763 * per cpu; and one per cpu on the uvhub (ADP_SZ)
Cliff Wickman0e2595c2009-05-20 08:10:57 -05001764 */
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001765 dsize = sizeof(struct bau_desc) * ADP_SZ * ITEMS_PER_DESC;
1766 bau_desc = kmalloc_node(dsize, GFP_KERNEL, node);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001767 BUG_ON(!bau_desc);
Ingo Molnarb4c286e2008-06-18 14:28:19 +02001768
Jack Steiner6a469e42011-09-20 13:55:04 -07001769 gpa = uv_gpa(bau_desc);
1770 n = uv_gpa_to_gnode(gpa);
Andrew Banman21e3f122016-09-21 11:09:17 -05001771 m = ops.bau_gpa_to_offset(gpa);
Cliff Wickmanda87c932012-01-16 15:17:50 -06001772 if (is_uv1_hub())
1773 uv1 = 1;
Ingo Molnarb4c286e2008-06-18 14:28:19 +02001774
Cliff Wickman77ed23f2011-05-10 08:26:43 -05001775 /* the 14-bit pnode */
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001776 write_mmr_descriptor_base(pnode, (n << UV_DESC_PSHIFT | m));
Cliff Wickman0e2595c2009-05-20 08:10:57 -05001777 /*
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001778 * Initializing all 8 (ITEMS_PER_DESC) descriptors for each
Cliff Wickman0e2595c2009-05-20 08:10:57 -05001779 * cpu even though we only use the first one; one descriptor can
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001780 * describe a broadcast to 256 uv hubs.
Cliff Wickman0e2595c2009-05-20 08:10:57 -05001781 */
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001782 for (i = 0, bd2 = bau_desc; i < (ADP_SZ * ITEMS_PER_DESC); i++, bd2++) {
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001783 memset(bd2, 0, sizeof(struct bau_desc));
Cliff Wickmanda87c932012-01-16 15:17:50 -06001784 if (uv1) {
1785 uv1_hdr = &bd2->header.uv1_hdr;
Andrew Banman67492c82016-09-21 11:09:12 -05001786 uv1_hdr->swack_flag = 1;
Cliff Wickmanda87c932012-01-16 15:17:50 -06001787 /*
1788 * The base_dest_nasid set in the message header
1789 * is the nasid of the first uvhub in the partition.
1790 * The bit map will indicate destination pnode numbers
1791 * relative to that base. They may not be consecutive
1792 * if nasid striding is being used.
1793 */
1794 uv1_hdr->base_dest_nasid =
Andrew Banman67492c82016-09-21 11:09:12 -05001795 UV_PNODE_TO_NASID(base_pnode);
1796 uv1_hdr->dest_subnodeid = UV_LB_SUBNODEID;
1797 uv1_hdr->command = UV_NET_ENDPOINT_INTD;
1798 uv1_hdr->int_both = 1;
Cliff Wickmanda87c932012-01-16 15:17:50 -06001799 /*
1800 * all others need to be set to zero:
1801 * fairness chaining multilevel count replied_to
1802 */
1803 } else {
Cliff Wickman8b6e5112012-06-22 08:14:59 -05001804 /*
Cliff Wickmana26fd712014-05-14 16:15:47 -05001805 * BIOS uses legacy mode, but uv2 and uv3 hardware always
Cliff Wickman8b6e5112012-06-22 08:14:59 -05001806 * uses native mode for selective broadcasts.
1807 */
Cliff Wickmana26fd712014-05-14 16:15:47 -05001808 uv2_3_hdr = &bd2->header.uv2_3_hdr;
Andrew Banman67492c82016-09-21 11:09:12 -05001809 uv2_3_hdr->swack_flag = 1;
Cliff Wickmana26fd712014-05-14 16:15:47 -05001810 uv2_3_hdr->base_dest_nasid =
Andrew Banman67492c82016-09-21 11:09:12 -05001811 UV_PNODE_TO_NASID(base_pnode);
1812 uv2_3_hdr->dest_subnodeid = UV_LB_SUBNODEID;
1813 uv2_3_hdr->command = UV_NET_ENDPOINT_INTD;
Cliff Wickmanda87c932012-01-16 15:17:50 -06001814 }
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001815 }
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001816 for_each_present_cpu(cpu) {
1817 if (pnode != uv_blade_to_pnode(uv_cpu_to_blade_id(cpu)))
1818 continue;
1819 bcp = &per_cpu(bau_control, cpu);
1820 bcp->descriptor_base = bau_desc;
1821 }
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001822}
1823
1824/*
1825 * initialize the destination side's receiving buffers
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001826 * entered for each uvhub in the partition
1827 * - node is first node (kernel memory notion) on the uvhub
1828 * - pnode is the uvhub's physical identifier
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001829 */
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001830static void pq_init(int node, int pnode)
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001831{
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001832 int cpu;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001833 size_t plsize;
Ingo Molnarb4c286e2008-06-18 14:28:19 +02001834 char *cp;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001835 void *vp;
Andrew Banmand2a57afa2016-09-21 11:09:14 -05001836 unsigned long gnode, first, last, tail;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001837 struct bau_pq_entry *pqp;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001838 struct bau_control *bcp;
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001839
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001840 plsize = (DEST_Q_SIZE + 1) * sizeof(struct bau_pq_entry);
1841 vp = kmalloc_node(plsize, GFP_KERNEL, node);
1842 pqp = (struct bau_pq_entry *)vp;
Ingo Molnardc163a42008-06-18 14:15:43 +02001843 BUG_ON(!pqp);
Ingo Molnarb4c286e2008-06-18 14:28:19 +02001844
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001845 cp = (char *)pqp + 31;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001846 pqp = (struct bau_pq_entry *)(((unsigned long)cp >> 5) << 5);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001847
1848 for_each_present_cpu(cpu) {
1849 if (pnode != uv_cpu_to_pnode(cpu))
1850 continue;
1851 /* for every cpu on this pnode: */
1852 bcp = &per_cpu(bau_control, cpu);
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001853 bcp->queue_first = pqp;
1854 bcp->bau_msg_head = pqp;
1855 bcp->queue_last = pqp + (DEST_Q_SIZE - 1);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001856 }
Andrew Banmand2a57afa2016-09-21 11:09:14 -05001857
Andrew Banman21e3f122016-09-21 11:09:17 -05001858 first = ops.bau_gpa_to_offset(uv_gpa(pqp));
1859 last = ops.bau_gpa_to_offset(uv_gpa(pqp + (DEST_Q_SIZE - 1)));
Andrew Banmand2a57afa2016-09-21 11:09:14 -05001860
Cliff Wickman4ea3c512009-04-16 07:53:09 -05001861 /*
Andrew Banman6d780592016-09-21 11:09:20 -05001862 * Pre UV4, the gnode is required to locate the payload queue
1863 * and the payload queue tail must be maintained by the kernel.
Cliff Wickman4ea3c512009-04-16 07:53:09 -05001864 */
Andrew Banman6d780592016-09-21 11:09:20 -05001865 bcp = &per_cpu(bau_control, smp_processor_id());
Andrew Banman491bd882017-03-09 10:42:09 -06001866 if (bcp->uvhub_version <= UV_BAU_V3) {
Andrew Banman6d780592016-09-21 11:09:20 -05001867 tail = first;
1868 gnode = uv_gpa_to_gnode(uv_gpa(pqp));
1869 first = (gnode << UV_PAYLOADQ_GNODE_SHIFT) | tail;
1870 write_mmr_payload_tail(pnode, tail);
1871 }
1872
Andrew Banman21e3f122016-09-21 11:09:17 -05001873 ops.write_payload_first(pnode, first);
1874 ops.write_payload_last(pnode, last);
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001875
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001876 /* in effect, all msg_type's are set to MSG_NOOP */
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001877 memset(pqp, 0, sizeof(struct bau_pq_entry) * DEST_Q_SIZE);
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001878}
1879
1880/*
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001881 * Initialization of each UV hub's structures
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001882 */
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001883static void __init init_uvhub(int uvhub, int vector, int base_pnode)
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001884{
Cliff Wickman9674f352009-04-03 08:34:05 -05001885 int node;
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001886 int pnode;
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001887 unsigned long apicid;
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001888
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001889 node = uvhub_to_first_node(uvhub);
1890 pnode = uv_blade_to_pnode(uvhub);
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001891
1892 activation_descriptor_init(node, pnode, base_pnode);
1893
1894 pq_init(node, pnode);
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001895 /*
Cliff Wickman77ed23f2011-05-10 08:26:43 -05001896 * The below initialization can't be in firmware because the
1897 * messaging IRQ will be determined by the OS.
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001898 */
Dimitri Sivanich8191c9f2010-11-16 16:23:52 -06001899 apicid = uvhub_to_first_apicid(uvhub) | uv_apicid_hibits;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001900 write_mmr_data_config(pnode, ((apicid << 32) | vector));
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001901}
1902
1903/*
Cliff Wickman12a66112010-06-02 16:22:01 -05001904 * We will set BAU_MISC_CONTROL with a timeout period.
1905 * But the BIOS has set UVH_AGING_PRESCALE_SEL and UVH_TRANSACTION_TIMEOUT.
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001906 * So the destination timeout period has to be calculated from them.
Cliff Wickman12a66112010-06-02 16:22:01 -05001907 */
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001908static int calculate_destination_timeout(void)
Cliff Wickman12a66112010-06-02 16:22:01 -05001909{
1910 unsigned long mmr_image;
1911 int mult1;
1912 int mult2;
1913 int index;
1914 int base;
1915 int ret;
1916 unsigned long ts_ns;
1917
Jack Steiner2a919592011-05-11 12:50:28 -05001918 if (is_uv1_hub()) {
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001919 mult1 = SOFTACK_TIMEOUT_PERIOD & BAU_MISC_CONTROL_MULT_MASK;
Jack Steiner2a919592011-05-11 12:50:28 -05001920 mmr_image = uv_read_local_mmr(UVH_AGING_PRESCALE_SEL);
1921 index = (mmr_image >> BAU_URGENCY_7_SHIFT) & BAU_URGENCY_7_MASK;
1922 mmr_image = uv_read_local_mmr(UVH_TRANSACTION_TIMEOUT);
1923 mult2 = (mmr_image >> BAU_TRANS_SHIFT) & BAU_TRANS_MASK;
Cliff Wickman11cab712012-06-22 08:12:12 -05001924 ts_ns = timeout_base_ns[index];
1925 ts_ns *= (mult1 * mult2);
Jack Steiner2a919592011-05-11 12:50:28 -05001926 ret = ts_ns / 1000;
1927 } else {
Cliff Wickmana26fd712014-05-14 16:15:47 -05001928 /* same destination timeout for uv2 and uv3 */
Cliff Wickmand059f9f2012-01-16 15:18:48 -06001929 /* 4 bits 0/1 for 10/80us base, 3 bits of multiplier */
1930 mmr_image = uv_read_local_mmr(UVH_LB_BAU_MISC_CONTROL);
Jack Steiner2a919592011-05-11 12:50:28 -05001931 mmr_image = (mmr_image & UV_SA_MASK) >> UV_SA_SHFT;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001932 if (mmr_image & (1L << UV2_ACK_UNITS_SHFT))
Cliff Wickmand059f9f2012-01-16 15:18:48 -06001933 base = 80;
Jack Steiner2a919592011-05-11 12:50:28 -05001934 else
Cliff Wickmand059f9f2012-01-16 15:18:48 -06001935 base = 10;
1936 mult1 = mmr_image & UV2_ACK_MASK;
Jack Steiner2a919592011-05-11 12:50:28 -05001937 ret = mult1 * base;
1938 }
Cliff Wickman12a66112010-06-02 16:22:01 -05001939 return ret;
1940}
1941
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001942static void __init init_per_cpu_tunables(void)
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001943{
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001944 int cpu;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001945 struct bau_control *bcp;
1946
1947 for_each_present_cpu(cpu) {
1948 bcp = &per_cpu(bau_control, cpu);
1949 bcp->baudisabled = 0;
Cliff Wickman26ef8572012-06-22 08:13:30 -05001950 if (nobau)
Alex Thorlton1c532e02016-03-31 14:18:29 -05001951 bcp->nobau = true;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001952 bcp->statp = &per_cpu(ptcstats, cpu);
1953 /* time interval to catch a hardware stay-busy bug */
1954 bcp->timeout_interval = usec_2_cycles(2*timeout_us);
1955 bcp->max_concurr = max_concurr;
1956 bcp->max_concurr_const = max_concurr;
1957 bcp->plugged_delay = plugged_delay;
1958 bcp->plugsb4reset = plugsb4reset;
1959 bcp->timeoutsb4reset = timeoutsb4reset;
1960 bcp->ipi_reset_limit = ipi_reset_limit;
1961 bcp->complete_threshold = complete_threshold;
1962 bcp->cong_response_us = congested_respns_us;
1963 bcp->cong_reps = congested_reps;
Andrew Banman67492c82016-09-21 11:09:12 -05001964 bcp->disabled_period = sec_2_cycles(disabled_period);
1965 bcp->giveup_limit = giveup_limit;
Cliff Wickmand2ebc712012-01-18 09:40:47 -06001966 spin_lock_init(&bcp->queue_lock);
1967 spin_lock_init(&bcp->uvhub_lock);
Cliff Wickman8b6e5112012-06-22 08:14:59 -05001968 spin_lock_init(&bcp->disable_lock);
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001969 }
1970}
1971
1972/*
1973 * Scan all cpus to collect blade and socket summaries.
1974 */
1975static int __init get_cpu_topology(int base_pnode,
1976 struct uvhub_desc *uvhub_descs,
1977 unsigned char *uvhub_mask)
1978{
1979 int cpu;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001980 int pnode;
1981 int uvhub;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001982 int socket;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001983 struct bau_control *bcp;
1984 struct uvhub_desc *bdp;
1985 struct socket_desc *sdp;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001986
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001987 for_each_present_cpu(cpu) {
1988 bcp = &per_cpu(bau_control, cpu);
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001989
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001990 memset(bcp, 0, sizeof(struct bau_control));
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001991
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001992 pnode = uv_cpu_hub_info(cpu)->pnode;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001993 if ((pnode - base_pnode) >= UV_DISTRIBUTION_SIZE) {
Andrew Banmanefa59ab2016-09-21 11:09:13 -05001994 pr_emerg(
Cliff Wickman77ed23f2011-05-10 08:26:43 -05001995 "cpu %d pnode %d-%d beyond %d; BAU disabled\n",
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001996 cpu, pnode, base_pnode, UV_DISTRIBUTION_SIZE);
Cliff Wickman77ed23f2011-05-10 08:26:43 -05001997 return 1;
1998 }
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001999
Cliff Wickman77ed23f2011-05-10 08:26:43 -05002000 bcp->osnode = cpu_to_node(cpu);
Cliff Wickmanf073cc82011-05-24 13:07:36 -05002001 bcp->partition_base_pnode = base_pnode;
2002
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05002003 uvhub = uv_cpu_hub_info(cpu)->numa_blade_id;
Cliff Wickmanc4026cf2010-07-30 14:10:55 -05002004 *(uvhub_mask + (uvhub/8)) |= (1 << (uvhub%8));
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05002005 bdp = &uvhub_descs[uvhub];
Cliff Wickmanf073cc82011-05-24 13:07:36 -05002006
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05002007 bdp->num_cpus++;
2008 bdp->uvhub = uvhub;
2009 bdp->pnode = pnode;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05002010
Cliff Wickmana8328ee2010-06-02 16:22:02 -05002011 /* kludge: 'assuming' one node per socket, and assuming that
2012 disabling a socket just leaves a gap in node numbers */
Cliff Wickman77ed23f2011-05-10 08:26:43 -05002013 socket = bcp->osnode & 1;
Cliff Wickmana8328ee2010-06-02 16:22:02 -05002014 bdp->socket_mask |= (1 << socket);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05002015 sdp = &bdp->socket[socket];
2016 sdp->cpu_number[sdp->num_cpus] = cpu;
2017 sdp->num_cpus++;
Cliff Wickmancfa60912011-01-03 12:03:53 -06002018 if (sdp->num_cpus > MAX_CPUS_PER_SOCKET) {
Andrew Banmanefa59ab2016-09-21 11:09:13 -05002019 pr_emerg("%d cpus per socket invalid\n",
Cliff Wickmanf073cc82011-05-24 13:07:36 -05002020 sdp->num_cpus);
Cliff Wickmancfa60912011-01-03 12:03:53 -06002021 return 1;
2022 }
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05002023 }
Cliff Wickmanf073cc82011-05-24 13:07:36 -05002024 return 0;
2025}
2026
2027/*
2028 * Each socket is to get a local array of pnodes/hubs.
2029 */
2030static void make_per_cpu_thp(struct bau_control *smaster)
2031{
2032 int cpu;
2033 size_t hpsz = sizeof(struct hub_and_pnode) * num_possible_cpus();
2034
2035 smaster->thp = kmalloc_node(hpsz, GFP_KERNEL, smaster->osnode);
2036 memset(smaster->thp, 0, hpsz);
2037 for_each_present_cpu(cpu) {
2038 smaster->thp[cpu].pnode = uv_cpu_hub_info(cpu)->pnode;
2039 smaster->thp[cpu].uvhub = uv_cpu_hub_info(cpu)->numa_blade_id;
2040 }
2041}
2042
2043/*
cpw@sgi.com442d3922011-06-21 07:21:31 -05002044 * Each uvhub is to get a local cpumask.
2045 */
2046static void make_per_hub_cpumask(struct bau_control *hmaster)
2047{
2048 int sz = sizeof(cpumask_t);
2049
2050 hmaster->cpumask = kzalloc_node(sz, GFP_KERNEL, hmaster->osnode);
2051}
2052
2053/*
Cliff Wickmanf073cc82011-05-24 13:07:36 -05002054 * Initialize all the per_cpu information for the cpu's on a given socket,
2055 * given what has been gathered into the socket_desc struct.
2056 * And reports the chosen hub and socket masters back to the caller.
2057 */
2058static int scan_sock(struct socket_desc *sdp, struct uvhub_desc *bdp,
2059 struct bau_control **smasterp,
2060 struct bau_control **hmasterp)
2061{
Andrew Banmandfeb28f2017-03-09 10:42:12 -06002062 int i, cpu, uvhub_cpu;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05002063 struct bau_control *bcp;
2064
2065 for (i = 0; i < sdp->num_cpus; i++) {
2066 cpu = sdp->cpu_number[i];
2067 bcp = &per_cpu(bau_control, cpu);
2068 bcp->cpu = cpu;
2069 if (i == 0) {
2070 *smasterp = bcp;
2071 if (!(*hmasterp))
2072 *hmasterp = bcp;
2073 }
2074 bcp->cpus_in_uvhub = bdp->num_cpus;
2075 bcp->cpus_in_socket = sdp->num_cpus;
2076 bcp->socket_master = *smasterp;
2077 bcp->uvhub = bdp->uvhub;
Cliff Wickmanda87c932012-01-16 15:17:50 -06002078 if (is_uv1_hub())
Andrew Banman491bd882017-03-09 10:42:09 -06002079 bcp->uvhub_version = UV_BAU_V1;
Cliff Wickmanda87c932012-01-16 15:17:50 -06002080 else if (is_uv2_hub())
Andrew Banman491bd882017-03-09 10:42:09 -06002081 bcp->uvhub_version = UV_BAU_V2;
Cliff Wickmana26fd712014-05-14 16:15:47 -05002082 else if (is_uv3_hub())
Andrew Banman491bd882017-03-09 10:42:09 -06002083 bcp->uvhub_version = UV_BAU_V3;
Andrew Banman58d4ab42016-09-21 11:09:18 -05002084 else if (is_uv4_hub())
Andrew Banman491bd882017-03-09 10:42:09 -06002085 bcp->uvhub_version = UV_BAU_V4;
Cliff Wickmanda87c932012-01-16 15:17:50 -06002086 else {
Andrew Banman58d4ab42016-09-21 11:09:18 -05002087 pr_emerg("uvhub version not 1, 2, 3, or 4\n");
Cliff Wickmanda87c932012-01-16 15:17:50 -06002088 return 1;
2089 }
Cliff Wickmanf073cc82011-05-24 13:07:36 -05002090 bcp->uvhub_master = *hmasterp;
Andrew Banmandfeb28f2017-03-09 10:42:12 -06002091 uvhub_cpu = uv_cpu_blade_processor_id(cpu);
2092 bcp->uvhub_cpu = uvhub_cpu;
2093
2094 /*
2095 * The ERROR and BUSY status registers are located pairwise over
2096 * the STATUS_0 and STATUS_1 mmrs; each an array[32] of 2 bits.
2097 */
2098 if (uvhub_cpu < UV_CPUS_PER_AS) {
2099 bcp->status_mmr = UVH_LB_BAU_SB_ACTIVATION_STATUS_0;
2100 bcp->status_index = uvhub_cpu * UV_ACT_STATUS_SIZE;
2101 } else {
2102 bcp->status_mmr = UVH_LB_BAU_SB_ACTIVATION_STATUS_1;
2103 bcp->status_index = (uvhub_cpu - UV_CPUS_PER_AS)
2104 * UV_ACT_STATUS_SIZE;
2105 }
Mike Travis5627a8252016-04-29 16:54:14 -05002106
Cliff Wickmanf073cc82011-05-24 13:07:36 -05002107 if (bcp->uvhub_cpu >= MAX_CPUS_PER_UVHUB) {
Andrew Banmanefa59ab2016-09-21 11:09:13 -05002108 pr_emerg("%d cpus per uvhub invalid\n",
Cliff Wickmanf073cc82011-05-24 13:07:36 -05002109 bcp->uvhub_cpu);
2110 return 1;
2111 }
2112 }
2113 return 0;
2114}
2115
2116/*
2117 * Summarize the blade and socket topology into the per_cpu structures.
2118 */
2119static int __init summarize_uvhub_sockets(int nuvhubs,
2120 struct uvhub_desc *uvhub_descs,
2121 unsigned char *uvhub_mask)
2122{
2123 int socket;
2124 int uvhub;
2125 unsigned short socket_mask;
2126
Cliff Wickmanc4026cf2010-07-30 14:10:55 -05002127 for (uvhub = 0; uvhub < nuvhubs; uvhub++) {
Cliff Wickmanf073cc82011-05-24 13:07:36 -05002128 struct uvhub_desc *bdp;
2129 struct bau_control *smaster = NULL;
2130 struct bau_control *hmaster = NULL;
2131
Cliff Wickmanc4026cf2010-07-30 14:10:55 -05002132 if (!(*(uvhub_mask + (uvhub/8)) & (1 << (uvhub%8))))
2133 continue;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05002134
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05002135 bdp = &uvhub_descs[uvhub];
Cliff Wickmana8328ee2010-06-02 16:22:02 -05002136 socket_mask = bdp->socket_mask;
2137 socket = 0;
2138 while (socket_mask) {
Cliff Wickmanf073cc82011-05-24 13:07:36 -05002139 struct socket_desc *sdp;
2140 if ((socket_mask & 1)) {
2141 sdp = &bdp->socket[socket];
2142 if (scan_sock(sdp, bdp, &smaster, &hmaster))
Cliff Wickmancfa60912011-01-03 12:03:53 -06002143 return 1;
cpw@sgi.com9c9153d2011-06-21 07:21:28 -05002144 make_per_cpu_thp(smaster);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05002145 }
2146 socket++;
Cliff Wickmana8328ee2010-06-02 16:22:02 -05002147 socket_mask = (socket_mask >> 1);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05002148 }
cpw@sgi.com442d3922011-06-21 07:21:31 -05002149 make_per_hub_cpumask(hmaster);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05002150 }
Cliff Wickmanf073cc82011-05-24 13:07:36 -05002151 return 0;
2152}
2153
2154/*
2155 * initialize the bau_control structure for each cpu
2156 */
2157static int __init init_per_cpu(int nuvhubs, int base_part_pnode)
2158{
2159 unsigned char *uvhub_mask;
2160 void *vp;
2161 struct uvhub_desc *uvhub_descs;
2162
Andrew Banmane879c112016-09-21 11:09:19 -05002163 if (is_uv3_hub() || is_uv2_hub() || is_uv1_hub())
2164 timeout_us = calculate_destination_timeout();
Cliff Wickmanf073cc82011-05-24 13:07:36 -05002165
2166 vp = kmalloc(nuvhubs * sizeof(struct uvhub_desc), GFP_KERNEL);
2167 uvhub_descs = (struct uvhub_desc *)vp;
2168 memset(uvhub_descs, 0, nuvhubs * sizeof(struct uvhub_desc));
2169 uvhub_mask = kzalloc((nuvhubs+7)/8, GFP_KERNEL);
2170
2171 if (get_cpu_topology(base_part_pnode, uvhub_descs, uvhub_mask))
cpw@sgi.combbd270e2011-06-21 07:21:32 -05002172 goto fail;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05002173
2174 if (summarize_uvhub_sockets(nuvhubs, uvhub_descs, uvhub_mask))
cpw@sgi.combbd270e2011-06-21 07:21:32 -05002175 goto fail;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05002176
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05002177 kfree(uvhub_descs);
Cliff Wickmanc4026cf2010-07-30 14:10:55 -05002178 kfree(uvhub_mask);
Cliff Wickmanf073cc82011-05-24 13:07:36 -05002179 init_per_cpu_tunables();
Cliff Wickmancfa60912011-01-03 12:03:53 -06002180 return 0;
cpw@sgi.combbd270e2011-06-21 07:21:32 -05002181
2182fail:
2183 kfree(uvhub_descs);
2184 kfree(uvhub_mask);
2185 return 1;
Cliff Wickmanb194b1202008-06-12 08:23:48 -05002186}
Cliff Wickman18129242008-06-02 08:56:14 -05002187
Andrew Banman2620bbb2017-03-09 10:42:13 -06002188static const struct bau_operations uv1_bau_ops __initconst = {
Andrew Banman8e3b21b2017-03-09 10:42:11 -06002189 .bau_gpa_to_offset = uv_gpa_to_offset,
2190 .read_l_sw_ack = read_mmr_sw_ack,
2191 .read_g_sw_ack = read_gmmr_sw_ack,
2192 .write_l_sw_ack = write_mmr_sw_ack,
2193 .write_g_sw_ack = write_gmmr_sw_ack,
2194 .write_payload_first = write_mmr_payload_first,
2195 .write_payload_last = write_mmr_payload_last,
Andrew Banman2620bbb2017-03-09 10:42:13 -06002196 .wait_completion = uv1_wait_completion,
2197};
2198
2199static const struct bau_operations uv2_3_bau_ops __initconst = {
2200 .bau_gpa_to_offset = uv_gpa_to_offset,
2201 .read_l_sw_ack = read_mmr_sw_ack,
2202 .read_g_sw_ack = read_gmmr_sw_ack,
2203 .write_l_sw_ack = write_mmr_sw_ack,
2204 .write_g_sw_ack = write_gmmr_sw_ack,
2205 .write_payload_first = write_mmr_payload_first,
2206 .write_payload_last = write_mmr_payload_last,
2207 .wait_completion = uv2_3_wait_completion,
Andrew Banman8e3b21b2017-03-09 10:42:11 -06002208};
2209
2210static const struct bau_operations uv4_bau_ops __initconst = {
2211 .bau_gpa_to_offset = uv_gpa_to_soc_phys_ram,
2212 .read_l_sw_ack = read_mmr_proc_sw_ack,
2213 .read_g_sw_ack = read_gmmr_proc_sw_ack,
2214 .write_l_sw_ack = write_mmr_proc_sw_ack,
2215 .write_g_sw_ack = write_gmmr_proc_sw_ack,
2216 .write_payload_first = write_mmr_proc_payload_first,
2217 .write_payload_last = write_mmr_proc_payload_last,
Andrew Banman2f2a0332017-03-09 10:42:14 -06002218 .wait_completion = uv4_wait_completion,
Andrew Banman8e3b21b2017-03-09 10:42:11 -06002219};
2220
Cliff Wickman18129242008-06-02 08:56:14 -05002221/*
2222 * Initialization of BAU-related structures
2223 */
Cliff Wickmanb194b1202008-06-12 08:23:48 -05002224static int __init uv_bau_init(void)
Cliff Wickman18129242008-06-02 08:56:14 -05002225{
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05002226 int uvhub;
2227 int pnode;
2228 int nuvhubs;
Rusty Russell2c74d662009-03-18 08:22:30 +10302229 int cur_cpu;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05002230 int cpus;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05002231 int vector;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05002232 cpumask_var_t *mask;
Cliff Wickman18129242008-06-02 08:56:14 -05002233
2234 if (!is_uv_system())
2235 return 0;
2236
Andrew Banman4f059d52016-09-21 11:09:21 -05002237 if (is_uv4_hub())
2238 ops = uv4_bau_ops;
2239 else if (is_uv3_hub())
Andrew Banman2620bbb2017-03-09 10:42:13 -06002240 ops = uv2_3_bau_ops;
Andrew Banman5e4f96f2016-09-21 11:09:16 -05002241 else if (is_uv2_hub())
Andrew Banman2620bbb2017-03-09 10:42:13 -06002242 ops = uv2_3_bau_ops;
Andrew Banman5e4f96f2016-09-21 11:09:16 -05002243 else if (is_uv1_hub())
Andrew Banman2620bbb2017-03-09 10:42:13 -06002244 ops = uv1_bau_ops;
Andrew Banman5e4f96f2016-09-21 11:09:16 -05002245
Cliff Wickmanf073cc82011-05-24 13:07:36 -05002246 for_each_possible_cpu(cur_cpu) {
2247 mask = &per_cpu(uv_flush_tlb_mask, cur_cpu);
2248 zalloc_cpumask_var_node(mask, GFP_KERNEL, cpu_to_node(cur_cpu));
2249 }
Rusty Russell76ba0ec2009-03-13 14:49:57 +10302250
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05002251 nuvhubs = uv_num_possible_blades();
Cliff Wickmanf073cc82011-05-24 13:07:36 -05002252 congested_cycles = usec_2_cycles(congested_respns_us);
Cliff Wickman9674f352009-04-03 08:34:05 -05002253
Cliff Wickmanf073cc82011-05-24 13:07:36 -05002254 uv_base_pnode = 0x7fffffff;
Cliff Wickman77ed23f2011-05-10 08:26:43 -05002255 for (uvhub = 0; uvhub < nuvhubs; uvhub++) {
Cliff Wickmanf073cc82011-05-24 13:07:36 -05002256 cpus = uv_blade_nr_possible_cpus(uvhub);
2257 if (cpus && (uv_blade_to_pnode(uvhub) < uv_base_pnode))
2258 uv_base_pnode = uv_blade_to_pnode(uvhub);
Cliff Wickman77ed23f2011-05-10 08:26:43 -05002259 }
2260
Andrew Banmane879c112016-09-21 11:09:19 -05002261 /* software timeouts are not supported on UV4 */
2262 if (is_uv3_hub() || is_uv2_hub() || is_uv1_hub())
2263 enable_timeouts();
Cliff Wickmand059f9f2012-01-16 15:18:48 -06002264
Cliff Wickmanf073cc82011-05-24 13:07:36 -05002265 if (init_per_cpu(nuvhubs, uv_base_pnode)) {
Cliff Wickman26ef8572012-06-22 08:13:30 -05002266 set_bau_off();
2267 nobau_perm = 1;
Cliff Wickmancfa60912011-01-03 12:03:53 -06002268 return 0;
2269 }
Ingo Molnarb4c286e2008-06-18 14:28:19 +02002270
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05002271 vector = UV_BAU_MESSAGE;
Cliff Wickmana26fd712014-05-14 16:15:47 -05002272 for_each_possible_blade(uvhub) {
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05002273 if (uv_blade_nr_possible_cpus(uvhub))
Cliff Wickmanf073cc82011-05-24 13:07:36 -05002274 init_uvhub(uvhub, vector, uv_base_pnode);
Cliff Wickmana26fd712014-05-14 16:15:47 -05002275 }
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05002276
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05002277 alloc_intr_gate(vector, uv_bau_message_intr1);
2278
2279 for_each_possible_blade(uvhub) {
Cliff Wickman93a7ca02010-07-16 10:11:21 -05002280 if (uv_blade_nr_possible_cpus(uvhub)) {
Cliff Wickmanf073cc82011-05-24 13:07:36 -05002281 unsigned long val;
2282 unsigned long mmr;
Cliff Wickman93a7ca02010-07-16 10:11:21 -05002283 pnode = uv_blade_to_pnode(uvhub);
2284 /* INIT the bau */
Cliff Wickmanf073cc82011-05-24 13:07:36 -05002285 val = 1L << 63;
2286 write_gmmr_activation(pnode, val);
Cliff Wickman93a7ca02010-07-16 10:11:21 -05002287 mmr = 1; /* should be 1 to broadcast to both sockets */
Cliff Wickmanda87c932012-01-16 15:17:50 -06002288 if (!is_uv1_hub())
2289 write_mmr_data_broadcast(pnode, mmr);
Cliff Wickman93a7ca02010-07-16 10:11:21 -05002290 }
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05002291 }
Ingo Molnarb4c286e2008-06-18 14:28:19 +02002292
Cliff Wickman18129242008-06-02 08:56:14 -05002293 return 0;
2294}
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05002295core_initcall(uv_bau_init);
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05002296fs_initcall(uv_ptc_init);