blob: f25982cdff9006960d9e354d132ff01df717ad9b [file] [log] [blame]
Cliff Wickman18129242008-06-02 08:56:14 -05001/*
2 * SGI UltraViolet TLB flush routines.
3 *
Cliff Wickmana26fd712014-05-14 16:15:47 -05004 * (c) 2008-2014 Cliff Wickman <cpw@sgi.com>, SGI.
Cliff Wickman18129242008-06-02 08:56:14 -05005 *
6 * This code is released under the GNU General Public License version 2 or
7 * later.
8 */
Jeremy Fitzhardingeaef8f5b2008-10-14 21:43:43 -07009#include <linux/seq_file.h>
Cliff Wickman18129242008-06-02 08:56:14 -050010#include <linux/proc_fs.h>
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -050011#include <linux/debugfs.h>
Cliff Wickman18129242008-06-02 08:56:14 -050012#include <linux/kernel.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090013#include <linux/slab.h>
Jean Delvareca4445642011-03-25 15:20:14 +010014#include <linux/delay.h>
Cliff Wickman18129242008-06-02 08:56:14 -050015
Cliff Wickman18129242008-06-02 08:56:14 -050016#include <asm/mmu_context.h>
Tejun Heobdbcdd42009-01-21 17:26:06 +090017#include <asm/uv/uv.h>
Cliff Wickman18129242008-06-02 08:56:14 -050018#include <asm/uv/uv_mmrs.h>
Ingo Molnarb4c286e2008-06-18 14:28:19 +020019#include <asm/uv/uv_hub.h>
Cliff Wickman18129242008-06-02 08:56:14 -050020#include <asm/uv/uv_bau.h>
Ingo Molnar7b6aa332009-02-17 13:58:15 +010021#include <asm/apic.h>
Cliff Wickmanb194b1202008-06-12 08:23:48 -050022#include <asm/tsc.h>
Cliff Wickman99dd8712008-08-19 12:51:59 -050023#include <asm/irq_vectors.h>
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -050024#include <asm/timer.h>
Cliff Wickman18129242008-06-02 08:56:14 -050025
Andrew Banman5e4f96f2016-09-21 11:09:16 -050026static struct bau_operations ops;
27
28static struct bau_operations uv123_bau_ops = {
29 .bau_gpa_to_offset = uv_gpa_to_offset,
30 .read_l_sw_ack = read_mmr_sw_ack,
31 .read_g_sw_ack = read_gmmr_sw_ack,
32 .write_l_sw_ack = write_mmr_sw_ack,
33 .write_g_sw_ack = write_gmmr_sw_ack,
34 .write_payload_first = write_mmr_payload_first,
35 .write_payload_last = write_mmr_payload_last,
36};
37
Andrew Banman4f059d52016-09-21 11:09:21 -050038static struct bau_operations uv4_bau_ops = {
39 .bau_gpa_to_offset = uv_gpa_to_soc_phys_ram,
40 .read_l_sw_ack = read_mmr_proc_sw_ack,
41 .read_g_sw_ack = read_gmmr_proc_sw_ack,
42 .write_l_sw_ack = write_mmr_proc_sw_ack,
43 .write_g_sw_ack = write_gmmr_proc_sw_ack,
44 .write_payload_first = write_mmr_proc_payload_first,
45 .write_payload_last = write_mmr_proc_payload_last,
46};
47
48
Cliff Wickman12a66112010-06-02 16:22:01 -050049/* timeouts in nanoseconds (indexed by UVH_AGING_PRESCALE_SEL urgency7 30:28) */
50static int timeout_base_ns[] = {
51 20,
52 160,
53 1280,
54 10240,
55 81920,
56 655360,
57 5242880,
58 167772160
59};
Cliff Wickmanf073cc82011-05-24 13:07:36 -050060
Cliff Wickman12a66112010-06-02 16:22:01 -050061static int timeout_us;
Alex Thorlton1c532e02016-03-31 14:18:29 -050062static bool nobau = true;
Cliff Wickman26ef8572012-06-22 08:13:30 -050063static int nobau_perm;
Cliff Wickman50fb55a2010-06-02 16:22:02 -050064static cycles_t congested_cycles;
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -050065
66/* tunables: */
Cliff Wickmanf073cc82011-05-24 13:07:36 -050067static int max_concurr = MAX_BAU_CONCURRENT;
68static int max_concurr_const = MAX_BAU_CONCURRENT;
69static int plugged_delay = PLUGGED_DELAY;
70static int plugsb4reset = PLUGSB4RESET;
Cliff Wickman8b6e5112012-06-22 08:14:59 -050071static int giveup_limit = GIVEUP_LIMIT;
Cliff Wickmanf073cc82011-05-24 13:07:36 -050072static int timeoutsb4reset = TIMEOUTSB4RESET;
73static int ipi_reset_limit = IPI_RESET_LIMIT;
74static int complete_threshold = COMPLETE_THRESHOLD;
75static int congested_respns_us = CONGESTED_RESPONSE_US;
76static int congested_reps = CONGESTED_REPS;
Cliff Wickman8b6e5112012-06-22 08:14:59 -050077static int disabled_period = DISABLED_PERIOD;
Cliff Wickmanf073cc82011-05-24 13:07:36 -050078
79static struct tunables tunables[] = {
Andrew Banman67492c82016-09-21 11:09:12 -050080 {&max_concurr, MAX_BAU_CONCURRENT}, /* must be [0] */
81 {&plugged_delay, PLUGGED_DELAY},
82 {&plugsb4reset, PLUGSB4RESET},
83 {&timeoutsb4reset, TIMEOUTSB4RESET},
84 {&ipi_reset_limit, IPI_RESET_LIMIT},
85 {&complete_threshold, COMPLETE_THRESHOLD},
86 {&congested_respns_us, CONGESTED_RESPONSE_US},
87 {&congested_reps, CONGESTED_REPS},
88 {&disabled_period, DISABLED_PERIOD},
89 {&giveup_limit, GIVEUP_LIMIT}
Cliff Wickmanf073cc82011-05-24 13:07:36 -050090};
91
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -050092static struct dentry *tunables_dir;
93static struct dentry *tunables_file;
94
Cliff Wickmanf073cc82011-05-24 13:07:36 -050095/* these correspond to the statistics printed by ptc_seq_show() */
96static char *stat_description[] = {
97 "sent: number of shootdown messages sent",
98 "stime: time spent sending messages",
99 "numuvhubs: number of hubs targeted with shootdown",
100 "numuvhubs16: number times 16 or more hubs targeted",
101 "numuvhubs8: number times 8 or more hubs targeted",
102 "numuvhubs4: number times 4 or more hubs targeted",
103 "numuvhubs2: number times 2 or more hubs targeted",
104 "numuvhubs1: number times 1 hub targeted",
105 "numcpus: number of cpus targeted with shootdown",
106 "dto: number of destination timeouts",
107 "retries: destination timeout retries sent",
108 "rok: : destination timeouts successfully retried",
109 "resetp: ipi-style resource resets for plugs",
110 "resett: ipi-style resource resets for timeouts",
111 "giveup: fall-backs to ipi-style shootdowns",
112 "sto: number of source timeouts",
113 "bz: number of stay-busy's",
114 "throt: number times spun in throttle",
115 "swack: image of UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE",
116 "recv: shootdown messages received",
117 "rtime: time spent processing messages",
118 "all: shootdown all-tlb messages",
119 "one: shootdown one-tlb messages",
120 "mult: interrupts that found multiple messages",
121 "none: interrupts that found no messages",
122 "retry: number of retry messages processed",
123 "canc: number messages canceled by retries",
124 "nocan: number retries that found nothing to cancel",
125 "reset: number of ipi-style reset requests processed",
126 "rcan: number messages canceled by reset requests",
127 "disable: number times use of the BAU was disabled",
128 "enable: number times use of the BAU was re-enabled"
129};
130
Alex Thorlton1c532e02016-03-31 14:18:29 -0500131static int __init setup_bau(char *arg)
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500132{
Alex Thorlton1c532e02016-03-31 14:18:29 -0500133 int result;
134
135 if (!arg)
136 return -EINVAL;
137
138 result = strtobool(arg, &nobau);
139 if (result)
140 return result;
141
142 /* we need to flip the logic here, so that bau=y sets nobau to false */
143 nobau = !nobau;
144
145 if (!nobau)
146 pr_info("UV BAU Enabled\n");
147 else
148 pr_info("UV BAU Disabled\n");
149
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500150 return 0;
151}
Alex Thorlton1c532e02016-03-31 14:18:29 -0500152early_param("bau", setup_bau);
Ingo Molnarb4c286e2008-06-18 14:28:19 +0200153
Cliff Wickman94ca8e42009-04-14 10:56:48 -0500154/* base pnode in this partition */
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500155static int uv_base_pnode __read_mostly;
Cliff Wickman18129242008-06-02 08:56:14 -0500156
Ingo Molnardc163a42008-06-18 14:15:43 +0200157static DEFINE_PER_CPU(struct ptc_stats, ptcstats);
158static DEFINE_PER_CPU(struct bau_control, bau_control);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500159static DEFINE_PER_CPU(cpumask_var_t, uv_flush_tlb_mask);
160
Cliff Wickman26ef8572012-06-22 08:13:30 -0500161static void
162set_bau_on(void)
163{
164 int cpu;
165 struct bau_control *bcp;
166
167 if (nobau_perm) {
168 pr_info("BAU not initialized; cannot be turned on\n");
169 return;
170 }
Alex Thorlton1c532e02016-03-31 14:18:29 -0500171 nobau = false;
Cliff Wickman26ef8572012-06-22 08:13:30 -0500172 for_each_present_cpu(cpu) {
173 bcp = &per_cpu(bau_control, cpu);
Alex Thorlton1c532e02016-03-31 14:18:29 -0500174 bcp->nobau = false;
Cliff Wickman26ef8572012-06-22 08:13:30 -0500175 }
176 pr_info("BAU turned on\n");
177 return;
178}
179
180static void
181set_bau_off(void)
182{
183 int cpu;
184 struct bau_control *bcp;
185
Alex Thorlton1c532e02016-03-31 14:18:29 -0500186 nobau = true;
Cliff Wickman26ef8572012-06-22 08:13:30 -0500187 for_each_present_cpu(cpu) {
188 bcp = &per_cpu(bau_control, cpu);
Alex Thorlton1c532e02016-03-31 14:18:29 -0500189 bcp->nobau = true;
Cliff Wickman26ef8572012-06-22 08:13:30 -0500190 }
191 pr_info("BAU turned off\n");
192 return;
193}
194
Cliff Wickman18129242008-06-02 08:56:14 -0500195/*
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500196 * Determine the first node on a uvhub. 'Nodes' are used for kernel
197 * memory allocation.
Cliff Wickman9674f352009-04-03 08:34:05 -0500198 */
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500199static int __init uvhub_to_first_node(int uvhub)
Cliff Wickman9674f352009-04-03 08:34:05 -0500200{
201 int node, b;
202
203 for_each_online_node(node) {
204 b = uv_node_to_blade_id(node);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500205 if (uvhub == b)
Cliff Wickman9674f352009-04-03 08:34:05 -0500206 return node;
207 }
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500208 return -1;
Cliff Wickman9674f352009-04-03 08:34:05 -0500209}
210
211/*
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500212 * Determine the apicid of the first cpu on a uvhub.
Cliff Wickman9674f352009-04-03 08:34:05 -0500213 */
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500214static int __init uvhub_to_first_apicid(int uvhub)
Cliff Wickman9674f352009-04-03 08:34:05 -0500215{
216 int cpu;
217
218 for_each_present_cpu(cpu)
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500219 if (uvhub == uv_cpu_to_blade_id(cpu))
Cliff Wickman9674f352009-04-03 08:34:05 -0500220 return per_cpu(x86_cpu_to_apicid, cpu);
221 return -1;
222}
223
224/*
Cliff Wickman18129242008-06-02 08:56:14 -0500225 * Free a software acknowledge hardware resource by clearing its Pending
226 * bit. This will return a reply to the sender.
227 * If the message has timed out, a reply has already been sent by the
228 * hardware but the resource has not been released. In that case our
229 * clear of the Timeout bit (as well) will free the resource. No reply will
230 * be sent (the hardware will only do one reply per message).
231 */
Cliff Wickmanc5d35d32012-01-16 15:19:47 -0600232static void reply_to_message(struct msg_desc *mdp, struct bau_control *bcp,
233 int do_acknowledge)
Cliff Wickman18129242008-06-02 08:56:14 -0500234{
Cliff Wickmanb194b1202008-06-12 08:23:48 -0500235 unsigned long dw;
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500236 struct bau_pq_entry *msg;
Cliff Wickman18129242008-06-02 08:56:14 -0500237
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500238 msg = mdp->msg;
Cliff Wickmanc5d35d32012-01-16 15:19:47 -0600239 if (!msg->canceled && do_acknowledge) {
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500240 dw = (msg->swack_vec << UV_SW_ACK_NPENDING) | msg->swack_vec;
Andrew Banman21e3f122016-09-21 11:09:17 -0500241 ops.write_l_sw_ack(dw);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500242 }
Cliff Wickman18129242008-06-02 08:56:14 -0500243 msg->replied_to = 1;
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500244 msg->swack_vec = 0;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500245}
246
247/*
248 * Process the receipt of a RETRY message
249 */
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500250static void bau_process_retry_msg(struct msg_desc *mdp,
251 struct bau_control *bcp)
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500252{
253 int i;
254 int cancel_count = 0;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500255 unsigned long msg_res;
256 unsigned long mmr = 0;
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500257 struct bau_pq_entry *msg = mdp->msg;
258 struct bau_pq_entry *msg2;
259 struct ptc_stats *stat = bcp->statp;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500260
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500261 stat->d_retries++;
262 /*
263 * cancel any message from msg+1 to the retry itself
264 */
265 for (msg2 = msg+1, i = 0; i < DEST_Q_SIZE; msg2++, i++) {
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500266 if (msg2 > mdp->queue_last)
267 msg2 = mdp->queue_first;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500268 if (msg2 == msg)
269 break;
270
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500271 /* same conditions for cancellation as do_reset */
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500272 if ((msg2->replied_to == 0) && (msg2->canceled == 0) &&
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500273 (msg2->swack_vec) && ((msg2->swack_vec &
274 msg->swack_vec) == 0) &&
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500275 (msg2->sending_cpu == msg->sending_cpu) &&
276 (msg2->msg_type != MSG_NOOP)) {
Andrew Banman21e3f122016-09-21 11:09:17 -0500277 mmr = ops.read_l_sw_ack();
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500278 msg_res = msg2->swack_vec;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500279 /*
280 * This is a message retry; clear the resources held
281 * by the previous message only if they timed out.
282 * If it has not timed out we have an unexpected
283 * situation to report.
284 */
Cliff Wickman39847e72010-06-02 16:22:02 -0500285 if (mmr & (msg_res << UV_SW_ACK_NPENDING)) {
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500286 unsigned long mr;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500287 /*
Cliff Wickmanc5d35d32012-01-16 15:19:47 -0600288 * Is the resource timed out?
289 * Make everyone ignore the cancelled message.
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500290 */
291 msg2->canceled = 1;
292 stat->d_canceled++;
293 cancel_count++;
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500294 mr = (msg_res << UV_SW_ACK_NPENDING) | msg_res;
Andrew Banman21e3f122016-09-21 11:09:17 -0500295 ops.write_l_sw_ack(mr);
Cliff Wickman39847e72010-06-02 16:22:02 -0500296 }
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500297 }
298 }
299 if (!cancel_count)
300 stat->d_nocanceled++;
Cliff Wickman18129242008-06-02 08:56:14 -0500301}
302
303/*
304 * Do all the things a cpu should do for a TLB shootdown message.
305 * Other cpu's may come here at the same time for this message.
306 */
Cliff Wickmanc5d35d32012-01-16 15:19:47 -0600307static void bau_process_message(struct msg_desc *mdp, struct bau_control *bcp,
308 int do_acknowledge)
Cliff Wickman18129242008-06-02 08:56:14 -0500309{
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500310 short socket_ack_count = 0;
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500311 short *sp;
312 struct atomic_short *asp;
313 struct ptc_stats *stat = bcp->statp;
314 struct bau_pq_entry *msg = mdp->msg;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500315 struct bau_control *smaster = bcp->socket_master;
Cliff Wickman18129242008-06-02 08:56:14 -0500316
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500317 /*
318 * This must be a normal message, or retry of a normal message
319 */
Cliff Wickman18129242008-06-02 08:56:14 -0500320 if (msg->address == TLB_FLUSH_ALL) {
321 local_flush_tlb();
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500322 stat->d_alltlb++;
Cliff Wickman18129242008-06-02 08:56:14 -0500323 } else {
324 __flush_tlb_one(msg->address);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500325 stat->d_onetlb++;
Cliff Wickman18129242008-06-02 08:56:14 -0500326 }
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500327 stat->d_requestee++;
Cliff Wickman18129242008-06-02 08:56:14 -0500328
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500329 /*
330 * One cpu on each uvhub has the additional job on a RETRY
331 * of releasing the resource held by the message that is
332 * being retried. That message is identified by sending
333 * cpu number.
334 */
335 if (msg->msg_type == MSG_RETRY && bcp == bcp->uvhub_master)
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500336 bau_process_retry_msg(mdp, bcp);
Cliff Wickman18129242008-06-02 08:56:14 -0500337
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500338 /*
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500339 * This is a swack message, so we have to reply to it.
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500340 * Count each responding cpu on the socket. This avoids
341 * pinging the count's cache line back and forth between
342 * the sockets.
343 */
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500344 sp = &smaster->socket_acknowledge_count[mdp->msg_slot];
345 asp = (struct atomic_short *)sp;
346 socket_ack_count = atom_asr(1, asp);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500347 if (socket_ack_count == bcp->cpus_in_socket) {
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500348 int msg_ack_count;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500349 /*
350 * Both sockets dump their completed count total into
351 * the message's count.
352 */
Cliff Wickman8b6e5112012-06-22 08:14:59 -0500353 *sp = 0;
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500354 asp = (struct atomic_short *)&msg->acknowledge_count;
355 msg_ack_count = atom_asr(socket_ack_count, asp);
Ingo Molnardc163a42008-06-18 14:15:43 +0200356
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500357 if (msg_ack_count == bcp->cpus_in_uvhub) {
358 /*
359 * All cpus in uvhub saw it; reply
Cliff Wickmanc5d35d32012-01-16 15:19:47 -0600360 * (unless we are in the UV2 workaround)
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500361 */
Cliff Wickmanc5d35d32012-01-16 15:19:47 -0600362 reply_to_message(mdp, bcp, do_acknowledge);
Ingo Molnardc163a42008-06-18 14:15:43 +0200363 }
364 }
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500365
366 return;
Cliff Wickman18129242008-06-02 08:56:14 -0500367}
368
369/*
cpw@sgi.com485f07d2011-06-21 07:21:29 -0500370 * Determine the first cpu on a pnode.
Cliff Wickman18129242008-06-02 08:56:14 -0500371 */
cpw@sgi.com485f07d2011-06-21 07:21:29 -0500372static int pnode_to_first_cpu(int pnode, struct bau_control *smaster)
Cliff Wickman18129242008-06-02 08:56:14 -0500373{
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500374 int cpu;
cpw@sgi.com485f07d2011-06-21 07:21:29 -0500375 struct hub_and_pnode *hpp;
376
377 for_each_present_cpu(cpu) {
378 hpp = &smaster->thp[cpu];
379 if (pnode == hpp->pnode)
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500380 return cpu;
cpw@sgi.com485f07d2011-06-21 07:21:29 -0500381 }
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500382 return -1;
Cliff Wickman18129242008-06-02 08:56:14 -0500383}
384
Cliff Wickmanb194b1202008-06-12 08:23:48 -0500385/*
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500386 * Last resort when we get a large number of destination timeouts is
387 * to clear resources held by a given cpu.
388 * Do this with IPI so that all messages in the BAU message queue
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500389 * can be identified by their nonzero swack_vec field.
Cliff Wickmanb194b1202008-06-12 08:23:48 -0500390 *
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500391 * This is entered for a single cpu on the uvhub.
392 * The sender want's this uvhub to free a specific message's
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500393 * swack resources.
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500394 */
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500395static void do_reset(void *ptr)
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500396{
397 int i;
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500398 struct bau_control *bcp = &per_cpu(bau_control, smp_processor_id());
399 struct reset_args *rap = (struct reset_args *)ptr;
400 struct bau_pq_entry *msg;
401 struct ptc_stats *stat = bcp->statp;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500402
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500403 stat->d_resets++;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500404 /*
405 * We're looking for the given sender, and
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500406 * will free its swack resource.
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500407 * If all cpu's finally responded after the timeout, its
408 * message 'replied_to' was set.
409 */
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500410 for (msg = bcp->queue_first, i = 0; i < DEST_Q_SIZE; msg++, i++) {
411 unsigned long msg_res;
412 /* do_reset: same conditions for cancellation as
413 bau_process_retry_msg() */
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500414 if ((msg->replied_to == 0) &&
415 (msg->canceled == 0) &&
416 (msg->sending_cpu == rap->sender) &&
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500417 (msg->swack_vec) &&
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500418 (msg->msg_type != MSG_NOOP)) {
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500419 unsigned long mmr;
420 unsigned long mr;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500421 /*
422 * make everyone else ignore this message
423 */
424 msg->canceled = 1;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500425 /*
426 * only reset the resource if it is still pending
427 */
Andrew Banman21e3f122016-09-21 11:09:17 -0500428 mmr = ops.read_l_sw_ack();
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500429 msg_res = msg->swack_vec;
430 mr = (msg_res << UV_SW_ACK_NPENDING) | msg_res;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500431 if (mmr & msg_res) {
432 stat->d_rcanceled++;
Andrew Banman21e3f122016-09-21 11:09:17 -0500433 ops.write_l_sw_ack(mr);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500434 }
435 }
436 }
437 return;
438}
439
440/*
441 * Use IPI to get all target uvhubs to release resources held by
442 * a given sending cpu number.
443 */
cpw@sgi.coma456eaa2011-06-21 07:21:30 -0500444static void reset_with_ipi(struct pnmask *distribution, struct bau_control *bcp)
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500445{
cpw@sgi.com485f07d2011-06-21 07:21:29 -0500446 int pnode;
447 int apnode;
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500448 int maskbits;
cpw@sgi.com485f07d2011-06-21 07:21:29 -0500449 int sender = bcp->cpu;
cpw@sgi.com442d3922011-06-21 07:21:31 -0500450 cpumask_t *mask = bcp->uvhub_master->cpumask;
cpw@sgi.com485f07d2011-06-21 07:21:29 -0500451 struct bau_control *smaster = bcp->socket_master;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500452 struct reset_args reset_args;
453
454 reset_args.sender = sender;
Rusty Russell020b37a2015-03-02 22:05:49 +1030455 cpumask_clear(mask);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500456 /* find a single cpu for each uvhub in this distribution mask */
cpw@sgi.coma456eaa2011-06-21 07:21:30 -0500457 maskbits = sizeof(struct pnmask) * BITSPERBYTE;
cpw@sgi.com485f07d2011-06-21 07:21:29 -0500458 /* each bit is a pnode relative to the partition base pnode */
459 for (pnode = 0; pnode < maskbits; pnode++) {
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500460 int cpu;
cpw@sgi.com485f07d2011-06-21 07:21:29 -0500461 if (!bau_uvhub_isset(pnode, distribution))
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500462 continue;
cpw@sgi.com485f07d2011-06-21 07:21:29 -0500463 apnode = pnode + bcp->partition_base_pnode;
464 cpu = pnode_to_first_cpu(apnode, smaster);
Rusty Russell020b37a2015-03-02 22:05:49 +1030465 cpumask_set_cpu(cpu, mask);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500466 }
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500467
468 /* IPI all cpus; preemption is already disabled */
cpw@sgi.com442d3922011-06-21 07:21:31 -0500469 smp_call_function_many(mask, do_reset, (void *)&reset_args, 1);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500470 return;
471}
472
Peter Zijlstra20d1c862013-11-29 15:40:29 +0100473/*
474 * Not to be confused with cycles_2_ns() from tsc.c; this gives a relative
475 * number, not an absolute. It converts a duration in cycles to a duration in
476 * ns.
477 */
478static inline unsigned long long cycles_2_ns(unsigned long long cyc)
479{
480 struct cyc2ns_data *data = cyc2ns_read_begin();
481 unsigned long long ns;
482
483 ns = mul_u64_u32_shr(cyc, data->cyc2ns_mul, data->cyc2ns_shift);
484
485 cyc2ns_read_end(data);
486 return ns;
487}
488
489/*
490 * The reverse of the above; converts a duration in ns to a duration in cycles.
Cliff Wickmana26fd712014-05-14 16:15:47 -0500491 */
Peter Zijlstra20d1c862013-11-29 15:40:29 +0100492static inline unsigned long long ns_2_cycles(unsigned long long ns)
493{
494 struct cyc2ns_data *data = cyc2ns_read_begin();
495 unsigned long long cyc;
496
497 cyc = (ns << data->cyc2ns_shift) / data->cyc2ns_mul;
498
499 cyc2ns_read_end(data);
500 return cyc;
501}
502
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500503static inline unsigned long cycles_2_us(unsigned long long cyc)
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500504{
Peter Zijlstra20d1c862013-11-29 15:40:29 +0100505 return cycles_2_ns(cyc) / NSEC_PER_USEC;
506}
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500507
Peter Zijlstra20d1c862013-11-29 15:40:29 +0100508static inline cycles_t sec_2_cycles(unsigned long sec)
509{
510 return ns_2_cycles(sec * NSEC_PER_SEC);
511}
512
513static inline unsigned long long usec_2_cycles(unsigned long usec)
514{
515 return ns_2_cycles(usec * NSEC_PER_USEC);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500516}
517
518/*
519 * wait for all cpus on this hub to finish their sends and go quiet
520 * leaves uvhub_quiesce set so that no new broadcasts are started by
521 * bau_flush_send_and_wait()
522 */
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500523static inline void quiesce_local_uvhub(struct bau_control *hmaster)
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500524{
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500525 atom_asr(1, (struct atomic_short *)&hmaster->uvhub_quiesce);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500526}
527
528/*
529 * mark this quiet-requestor as done
530 */
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500531static inline void end_uvhub_quiesce(struct bau_control *hmaster)
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500532{
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500533 atom_asr(-1, (struct atomic_short *)&hmaster->uvhub_quiesce);
534}
535
536static unsigned long uv1_read_status(unsigned long mmr_offset, int right_shift)
537{
538 unsigned long descriptor_status;
539
540 descriptor_status = uv_read_local_mmr(mmr_offset);
541 descriptor_status >>= right_shift;
542 descriptor_status &= UV_ACT_STATUS_MASK;
543 return descriptor_status;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500544}
545
546/*
547 * Wait for completion of a broadcast software ack message
548 * return COMPLETE, RETRY(PLUGGED or TIMEOUT) or GIVEUP
Cliff Wickmanb194b1202008-06-12 08:23:48 -0500549 */
Jack Steiner2a919592011-05-11 12:50:28 -0500550static int uv1_wait_completion(struct bau_desc *bau_desc,
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500551 unsigned long mmr_offset, int right_shift,
552 struct bau_control *bcp, long try)
Cliff Wickmanb194b1202008-06-12 08:23:48 -0500553{
Cliff Wickmanb194b1202008-06-12 08:23:48 -0500554 unsigned long descriptor_status;
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500555 cycles_t ttm;
Cliff Wickman712157a2010-06-02 16:22:02 -0500556 struct ptc_stats *stat = bcp->statp;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500557
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500558 descriptor_status = uv1_read_status(mmr_offset, right_shift);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500559 /* spin on the status MMR, waiting for it to go idle */
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500560 while ((descriptor_status != DS_IDLE)) {
Cliff Wickmanb194b1202008-06-12 08:23:48 -0500561 /*
Jack Steiner2a919592011-05-11 12:50:28 -0500562 * Our software ack messages may be blocked because
563 * there are no swack resources available. As long
564 * as none of them has timed out hardware will NACK
565 * our message and its state will stay IDLE.
Cliff Wickmanb194b1202008-06-12 08:23:48 -0500566 */
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500567 if (descriptor_status == DS_SOURCE_TIMEOUT) {
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500568 stat->s_stimeout++;
569 return FLUSH_GIVEUP;
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500570 } else if (descriptor_status == DS_DESTINATION_TIMEOUT) {
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500571 stat->s_dtimeout++;
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500572 ttm = get_cycles();
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500573
574 /*
575 * Our retries may be blocked by all destination
576 * swack resources being consumed, and a timeout
577 * pending. In that case hardware returns the
578 * ERROR that looks like a destination timeout.
579 */
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500580 if (cycles_2_us(ttm - bcp->send_message) < timeout_us) {
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500581 bcp->conseccompletes = 0;
582 return FLUSH_RETRY_PLUGGED;
583 }
584
585 bcp->conseccompletes = 0;
586 return FLUSH_RETRY_TIMEOUT;
587 } else {
588 /*
589 * descriptor_status is still BUSY
590 */
591 cpu_relax();
Cliff Wickmanb194b1202008-06-12 08:23:48 -0500592 }
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500593 descriptor_status = uv1_read_status(mmr_offset, right_shift);
Cliff Wickmanb194b1202008-06-12 08:23:48 -0500594 }
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500595 bcp->conseccompletes++;
Cliff Wickmanb194b1202008-06-12 08:23:48 -0500596 return FLUSH_COMPLETE;
597}
598
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500599/*
Cliff Wickman8b6e5112012-06-22 08:14:59 -0500600 * UV2 could have an extra bit of status in the ACTIVATION_STATUS_2 register.
601 * But not currently used.
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500602 */
Cliff Wickmana26fd712014-05-14 16:15:47 -0500603static unsigned long uv2_3_read_status(unsigned long offset, int rshft, int desc)
Jack Steiner2a919592011-05-11 12:50:28 -0500604{
Masahiro Yamadaf148b412016-09-11 14:58:21 +0900605 return ((read_lmmr(offset) >> rshft) & UV_ACT_STATUS_MASK) << 1;
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500606}
607
Cliff Wickmanc5d35d32012-01-16 15:19:47 -0600608/*
609 * Return whether the status of the descriptor that is normally used for this
610 * cpu (the one indexed by its hub-relative cpu number) is busy.
611 * The status of the original 32 descriptors is always reflected in the 64
612 * bits of UVH_LB_BAU_SB_ACTIVATION_STATUS_0.
613 * The bit provided by the activation_status_2 register is irrelevant to
614 * the status if it is only being tested for busy or not busy.
615 */
616int normal_busy(struct bau_control *bcp)
617{
618 int cpu = bcp->uvhub_cpu;
619 int mmr_offset;
620 int right_shift;
621
622 mmr_offset = UVH_LB_BAU_SB_ACTIVATION_STATUS_0;
623 right_shift = cpu * UV_ACT_STATUS_SIZE;
624 return (((((read_lmmr(mmr_offset) >> right_shift) &
625 UV_ACT_STATUS_MASK)) << 1) == UV2H_DESC_BUSY);
626}
627
628/*
629 * Entered when a bau descriptor has gone into a permanent busy wait because
630 * of a hardware bug.
631 * Workaround the bug.
632 */
633int handle_uv2_busy(struct bau_control *bcp)
634{
Cliff Wickmanc5d35d32012-01-16 15:19:47 -0600635 struct ptc_stats *stat = bcp->statp;
Cliff Wickmanc5d35d32012-01-16 15:19:47 -0600636
637 stat->s_uv2_wars++;
Cliff Wickman8b6e5112012-06-22 08:14:59 -0500638 bcp->busy = 1;
639 return FLUSH_GIVEUP;
Cliff Wickmanc5d35d32012-01-16 15:19:47 -0600640}
641
Cliff Wickmana26fd712014-05-14 16:15:47 -0500642static int uv2_3_wait_completion(struct bau_desc *bau_desc,
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500643 unsigned long mmr_offset, int right_shift,
644 struct bau_control *bcp, long try)
645{
646 unsigned long descriptor_stat;
647 cycles_t ttm;
Cliff Wickman8b6e5112012-06-22 08:14:59 -0500648 int desc = bcp->uvhub_cpu;
Cliff Wickmanc5d35d32012-01-16 15:19:47 -0600649 long busy_reps = 0;
Jack Steiner2a919592011-05-11 12:50:28 -0500650 struct ptc_stats *stat = bcp->statp;
651
Cliff Wickmana26fd712014-05-14 16:15:47 -0500652 descriptor_stat = uv2_3_read_status(mmr_offset, right_shift, desc);
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500653
Jack Steiner2a919592011-05-11 12:50:28 -0500654 /* spin on the status MMR, waiting for it to go idle */
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500655 while (descriptor_stat != UV2H_DESC_IDLE) {
Cliff Wickman8b6e5112012-06-22 08:14:59 -0500656 if ((descriptor_stat == UV2H_DESC_SOURCE_TIMEOUT)) {
657 /*
658 * A h/w bug on the destination side may
659 * have prevented the message being marked
660 * pending, thus it doesn't get replied to
661 * and gets continually nacked until it times
662 * out with a SOURCE_TIMEOUT.
663 */
Jack Steiner2a919592011-05-11 12:50:28 -0500664 stat->s_stimeout++;
665 return FLUSH_GIVEUP;
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500666 } else if (descriptor_stat == UV2H_DESC_DEST_TIMEOUT) {
Cliff Wickman8b6e5112012-06-22 08:14:59 -0500667 ttm = get_cycles();
668
669 /*
670 * Our retries may be blocked by all destination
671 * swack resources being consumed, and a timeout
672 * pending. In that case hardware returns the
673 * ERROR that looks like a destination timeout.
674 * Without using the extended status we have to
675 * deduce from the short time that this was a
676 * strong nack.
677 */
678 if (cycles_2_us(ttm - bcp->send_message) < timeout_us) {
679 bcp->conseccompletes = 0;
680 stat->s_plugged++;
681 /* FLUSH_RETRY_PLUGGED causes hang on boot */
682 return FLUSH_GIVEUP;
683 }
Jack Steiner2a919592011-05-11 12:50:28 -0500684 stat->s_dtimeout++;
Jack Steiner2a919592011-05-11 12:50:28 -0500685 bcp->conseccompletes = 0;
Cliff Wickman8b6e5112012-06-22 08:14:59 -0500686 /* FLUSH_RETRY_TIMEOUT causes hang on boot */
687 return FLUSH_GIVEUP;
Jack Steiner2a919592011-05-11 12:50:28 -0500688 } else {
Cliff Wickmanc5d35d32012-01-16 15:19:47 -0600689 busy_reps++;
690 if (busy_reps > 1000000) {
691 /* not to hammer on the clock */
692 busy_reps = 0;
693 ttm = get_cycles();
Cliff Wickmana26fd712014-05-14 16:15:47 -0500694 if ((ttm - bcp->send_message) > bcp->timeout_interval)
Cliff Wickmanc5d35d32012-01-16 15:19:47 -0600695 return handle_uv2_busy(bcp);
Cliff Wickmanc5d35d32012-01-16 15:19:47 -0600696 }
Jack Steiner2a919592011-05-11 12:50:28 -0500697 /*
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500698 * descriptor_stat is still BUSY
Jack Steiner2a919592011-05-11 12:50:28 -0500699 */
700 cpu_relax();
701 }
Cliff Wickmana26fd712014-05-14 16:15:47 -0500702 descriptor_stat = uv2_3_read_status(mmr_offset, right_shift, desc);
Jack Steiner2a919592011-05-11 12:50:28 -0500703 }
704 bcp->conseccompletes++;
705 return FLUSH_COMPLETE;
706}
707
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500708/*
709 * There are 2 status registers; each and array[32] of 2 bits. Set up for
710 * which register to read and position in that register based on cpu in
711 * current hub.
712 */
Cliff Wickmana26fd712014-05-14 16:15:47 -0500713static int wait_completion(struct bau_desc *bau_desc, struct bau_control *bcp, long try)
Jack Steiner2a919592011-05-11 12:50:28 -0500714{
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500715 int right_shift;
716 unsigned long mmr_offset;
Cliff Wickman8b6e5112012-06-22 08:14:59 -0500717 int desc = bcp->uvhub_cpu;
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500718
Cliff Wickmanc5d35d32012-01-16 15:19:47 -0600719 if (desc < UV_CPUS_PER_AS) {
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500720 mmr_offset = UVH_LB_BAU_SB_ACTIVATION_STATUS_0;
Cliff Wickmanc5d35d32012-01-16 15:19:47 -0600721 right_shift = desc * UV_ACT_STATUS_SIZE;
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500722 } else {
723 mmr_offset = UVH_LB_BAU_SB_ACTIVATION_STATUS_1;
Cliff Wickmanc5d35d32012-01-16 15:19:47 -0600724 right_shift = ((desc - UV_CPUS_PER_AS) * UV_ACT_STATUS_SIZE);
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500725 }
726
Cliff Wickmanda87c932012-01-16 15:17:50 -0600727 if (bcp->uvhub_version == 1)
Cliff Wickmana26fd712014-05-14 16:15:47 -0500728 return uv1_wait_completion(bau_desc, mmr_offset, right_shift, bcp, try);
Jack Steiner2a919592011-05-11 12:50:28 -0500729 else
Cliff Wickmana26fd712014-05-14 16:15:47 -0500730 return uv2_3_wait_completion(bau_desc, mmr_offset, right_shift, bcp, try);
Jack Steiner2a919592011-05-11 12:50:28 -0500731}
732
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500733/*
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500734 * Our retries are blocked by all destination sw ack resources being
Cliff Wickmanf6d8a562010-06-02 16:22:02 -0500735 * in use, and a timeout is pending. In that case hardware immediately
736 * returns the ERROR that looks like a destination timeout.
737 */
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500738static void destination_plugged(struct bau_desc *bau_desc,
739 struct bau_control *bcp,
Cliff Wickmanf6d8a562010-06-02 16:22:02 -0500740 struct bau_control *hmaster, struct ptc_stats *stat)
741{
742 udelay(bcp->plugged_delay);
743 bcp->plugged_tries++;
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500744
Cliff Wickmanf6d8a562010-06-02 16:22:02 -0500745 if (bcp->plugged_tries >= bcp->plugsb4reset) {
746 bcp->plugged_tries = 0;
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500747
Cliff Wickmanf6d8a562010-06-02 16:22:02 -0500748 quiesce_local_uvhub(hmaster);
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500749
Cliff Wickmanf6d8a562010-06-02 16:22:02 -0500750 spin_lock(&hmaster->queue_lock);
cpw@sgi.com485f07d2011-06-21 07:21:29 -0500751 reset_with_ipi(&bau_desc->distribution, bcp);
Cliff Wickmanf6d8a562010-06-02 16:22:02 -0500752 spin_unlock(&hmaster->queue_lock);
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500753
Cliff Wickmanf6d8a562010-06-02 16:22:02 -0500754 end_uvhub_quiesce(hmaster);
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500755
Cliff Wickmanf6d8a562010-06-02 16:22:02 -0500756 bcp->ipi_attempts++;
757 stat->s_resets_plug++;
758 }
759}
760
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500761static void destination_timeout(struct bau_desc *bau_desc,
762 struct bau_control *bcp, struct bau_control *hmaster,
763 struct ptc_stats *stat)
Cliff Wickmanf6d8a562010-06-02 16:22:02 -0500764{
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500765 hmaster->max_concurr = 1;
Cliff Wickmanf6d8a562010-06-02 16:22:02 -0500766 bcp->timeout_tries++;
767 if (bcp->timeout_tries >= bcp->timeoutsb4reset) {
768 bcp->timeout_tries = 0;
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500769
Cliff Wickmanf6d8a562010-06-02 16:22:02 -0500770 quiesce_local_uvhub(hmaster);
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500771
Cliff Wickmanf6d8a562010-06-02 16:22:02 -0500772 spin_lock(&hmaster->queue_lock);
cpw@sgi.com485f07d2011-06-21 07:21:29 -0500773 reset_with_ipi(&bau_desc->distribution, bcp);
Cliff Wickmanf6d8a562010-06-02 16:22:02 -0500774 spin_unlock(&hmaster->queue_lock);
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500775
Cliff Wickmanf6d8a562010-06-02 16:22:02 -0500776 end_uvhub_quiesce(hmaster);
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500777
Cliff Wickmanf6d8a562010-06-02 16:22:02 -0500778 bcp->ipi_attempts++;
779 stat->s_resets_timeout++;
780 }
781}
782
783/*
Cliff Wickman8b6e5112012-06-22 08:14:59 -0500784 * Stop all cpus on a uvhub from using the BAU for a period of time.
785 * This is reversed by check_enable.
Cliff Wickman50fb55a2010-06-02 16:22:02 -0500786 */
Cliff Wickman8b6e5112012-06-22 08:14:59 -0500787static void disable_for_period(struct bau_control *bcp, struct ptc_stats *stat)
Cliff Wickman50fb55a2010-06-02 16:22:02 -0500788{
Cliff Wickman8b6e5112012-06-22 08:14:59 -0500789 int tcpu;
790 struct bau_control *tbcp;
791 struct bau_control *hmaster;
792 cycles_t tm1;
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500793
Cliff Wickman8b6e5112012-06-22 08:14:59 -0500794 hmaster = bcp->uvhub_master;
795 spin_lock(&hmaster->disable_lock);
796 if (!bcp->baudisabled) {
Cliff Wickman50fb55a2010-06-02 16:22:02 -0500797 stat->s_bau_disabled++;
Cliff Wickman8b6e5112012-06-22 08:14:59 -0500798 tm1 = get_cycles();
Cliff Wickman50fb55a2010-06-02 16:22:02 -0500799 for_each_present_cpu(tcpu) {
800 tbcp = &per_cpu(bau_control, tcpu);
Cliff Wickman8b6e5112012-06-22 08:14:59 -0500801 if (tbcp->uvhub_master == hmaster) {
802 tbcp->baudisabled = 1;
803 tbcp->set_bau_on_time =
804 tm1 + bcp->disabled_period;
805 }
Cliff Wickman50fb55a2010-06-02 16:22:02 -0500806 }
807 }
Cliff Wickman8b6e5112012-06-22 08:14:59 -0500808 spin_unlock(&hmaster->disable_lock);
Cliff Wickman50fb55a2010-06-02 16:22:02 -0500809}
810
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500811static void count_max_concurr(int stat, struct bau_control *bcp,
812 struct bau_control *hmaster)
813{
814 bcp->plugged_tries = 0;
815 bcp->timeout_tries = 0;
816 if (stat != FLUSH_COMPLETE)
817 return;
818 if (bcp->conseccompletes <= bcp->complete_threshold)
819 return;
820 if (hmaster->max_concurr >= hmaster->max_concurr_const)
821 return;
822 hmaster->max_concurr++;
823}
824
825static void record_send_stats(cycles_t time1, cycles_t time2,
826 struct bau_control *bcp, struct ptc_stats *stat,
827 int completion_status, int try)
828{
829 cycles_t elapsed;
830
831 if (time2 > time1) {
832 elapsed = time2 - time1;
833 stat->s_time += elapsed;
834
835 if ((completion_status == FLUSH_COMPLETE) && (try == 1)) {
836 bcp->period_requests++;
837 bcp->period_time += elapsed;
838 if ((elapsed > congested_cycles) &&
Cliff Wickman8b6e5112012-06-22 08:14:59 -0500839 (bcp->period_requests > bcp->cong_reps) &&
840 ((bcp->period_time / bcp->period_requests) >
841 congested_cycles)) {
842 stat->s_congested++;
843 disable_for_period(bcp, stat);
844 }
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500845 }
846 } else
847 stat->s_requestor--;
848
849 if (completion_status == FLUSH_COMPLETE && try > 1)
850 stat->s_retriesok++;
Cliff Wickman8b6e5112012-06-22 08:14:59 -0500851 else if (completion_status == FLUSH_GIVEUP) {
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500852 stat->s_giveup++;
Cliff Wickman8b6e5112012-06-22 08:14:59 -0500853 if (get_cycles() > bcp->period_end)
854 bcp->period_giveups = 0;
855 bcp->period_giveups++;
856 if (bcp->period_giveups == 1)
857 bcp->period_end = get_cycles() + bcp->disabled_period;
858 if (bcp->period_giveups > bcp->giveup_limit) {
859 disable_for_period(bcp, stat);
860 stat->s_giveuplimit++;
861 }
862 }
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500863}
864
865/*
866 * Because of a uv1 hardware bug only a limited number of concurrent
867 * requests can be made.
868 */
869static void uv1_throttle(struct bau_control *hmaster, struct ptc_stats *stat)
870{
871 spinlock_t *lock = &hmaster->uvhub_lock;
872 atomic_t *v;
873
874 v = &hmaster->active_descriptor_count;
875 if (!atomic_inc_unless_ge(lock, v, hmaster->max_concurr)) {
876 stat->s_throttles++;
877 do {
878 cpu_relax();
879 } while (!atomic_inc_unless_ge(lock, v, hmaster->max_concurr));
880 }
881}
882
883/*
884 * Handle the completion status of a message send.
885 */
886static void handle_cmplt(int completion_status, struct bau_desc *bau_desc,
887 struct bau_control *bcp, struct bau_control *hmaster,
888 struct ptc_stats *stat)
889{
890 if (completion_status == FLUSH_RETRY_PLUGGED)
891 destination_plugged(bau_desc, bcp, hmaster, stat);
892 else if (completion_status == FLUSH_RETRY_TIMEOUT)
893 destination_timeout(bau_desc, bcp, hmaster, stat);
894}
895
896/*
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500897 * Send a broadcast and wait for it to complete.
Cliff Wickmanb194b1202008-06-12 08:23:48 -0500898 *
Cliff Wickmanf6d8a562010-06-02 16:22:02 -0500899 * The flush_mask contains the cpus the broadcast is to be sent to including
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500900 * cpus that are on the local uvhub.
Cliff Wickmanb194b1202008-06-12 08:23:48 -0500901 *
Cliff Wickman450a0072010-06-02 16:22:02 -0500902 * Returns 0 if all flushing represented in the mask was done.
903 * Returns 1 if it gives up entirely and the original cpu mask is to be
904 * returned to the kernel.
Cliff Wickmanb194b1202008-06-12 08:23:48 -0500905 */
Cliff Wickman8b6e5112012-06-22 08:14:59 -0500906int uv_flush_send_and_wait(struct cpumask *flush_mask, struct bau_control *bcp,
907 struct bau_desc *bau_desc)
Cliff Wickmanb194b1202008-06-12 08:23:48 -0500908{
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500909 int seq_number = 0;
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500910 int completion_stat = 0;
Cliff Wickmanda87c932012-01-16 15:17:50 -0600911 int uv1 = 0;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500912 long try = 0;
Ingo Molnarb4c286e2008-06-18 14:28:19 +0200913 unsigned long index;
Cliff Wickmanb194b1202008-06-12 08:23:48 -0500914 cycles_t time1;
915 cycles_t time2;
Cliff Wickman712157a2010-06-02 16:22:02 -0500916 struct ptc_stats *stat = bcp->statp;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500917 struct bau_control *hmaster = bcp->uvhub_master;
Cliff Wickmanda87c932012-01-16 15:17:50 -0600918 struct uv1_bau_msg_header *uv1_hdr = NULL;
Cliff Wickmana26fd712014-05-14 16:15:47 -0500919 struct uv2_3_bau_msg_header *uv2_3_hdr = NULL;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500920
Cliff Wickman8b6e5112012-06-22 08:14:59 -0500921 if (bcp->uvhub_version == 1) {
922 uv1 = 1;
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500923 uv1_throttle(hmaster, stat);
Cliff Wickman8b6e5112012-06-22 08:14:59 -0500924 }
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500925
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500926 while (hmaster->uvhub_quiesce)
927 cpu_relax();
Cliff Wickmanb194b1202008-06-12 08:23:48 -0500928
Cliff Wickmanb194b1202008-06-12 08:23:48 -0500929 time1 = get_cycles();
Cliff Wickman8b6e5112012-06-22 08:14:59 -0500930 if (uv1)
931 uv1_hdr = &bau_desc->header.uv1_hdr;
932 else
Cliff Wickmana26fd712014-05-14 16:15:47 -0500933 /* uv2 and uv3 */
934 uv2_3_hdr = &bau_desc->header.uv2_3_hdr;
Cliff Wickman8b6e5112012-06-22 08:14:59 -0500935
Cliff Wickmanb194b1202008-06-12 08:23:48 -0500936 do {
Cliff Wickman8b6e5112012-06-22 08:14:59 -0500937 if (try == 0) {
Cliff Wickmanda87c932012-01-16 15:17:50 -0600938 if (uv1)
939 uv1_hdr->msg_type = MSG_REGULAR;
940 else
Cliff Wickmana26fd712014-05-14 16:15:47 -0500941 uv2_3_hdr->msg_type = MSG_REGULAR;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500942 seq_number = bcp->message_number++;
943 } else {
Cliff Wickmanda87c932012-01-16 15:17:50 -0600944 if (uv1)
945 uv1_hdr->msg_type = MSG_RETRY;
946 else
Cliff Wickmana26fd712014-05-14 16:15:47 -0500947 uv2_3_hdr->msg_type = MSG_RETRY;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500948 stat->s_retry_messages++;
949 }
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500950
Cliff Wickmanda87c932012-01-16 15:17:50 -0600951 if (uv1)
952 uv1_hdr->sequence = seq_number;
953 else
Cliff Wickmana26fd712014-05-14 16:15:47 -0500954 uv2_3_hdr->sequence = seq_number;
Cliff Wickman8b6e5112012-06-22 08:14:59 -0500955 index = (1UL << AS_PUSH_SHIFT) | bcp->uvhub_cpu;
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500956 bcp->send_message = get_cycles();
957
958 write_mmr_activation(index);
959
960 try++;
961 completion_stat = wait_completion(bau_desc, bcp, try);
962
963 handle_cmplt(completion_stat, bau_desc, bcp, hmaster, stat);
964
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -0500965 if (bcp->ipi_attempts >= bcp->ipi_reset_limit) {
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500966 bcp->ipi_attempts = 0;
Cliff Wickman8b6e5112012-06-22 08:14:59 -0500967 stat->s_overipilimit++;
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500968 completion_stat = FLUSH_GIVEUP;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500969 break;
970 }
971 cpu_relax();
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500972 } while ((completion_stat == FLUSH_RETRY_PLUGGED) ||
973 (completion_stat == FLUSH_RETRY_TIMEOUT));
974
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500975 time2 = get_cycles();
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500976
977 count_max_concurr(completion_stat, bcp, hmaster);
978
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500979 while (hmaster->uvhub_quiesce)
980 cpu_relax();
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500981
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500982 atomic_dec(&hmaster->active_descriptor_count);
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500983
984 record_send_stats(time1, time2, bcp, stat, completion_stat, try);
985
986 if (completion_stat == FLUSH_GIVEUP)
Cliff Wickmanc5d35d32012-01-16 15:19:47 -0600987 /* FLUSH_GIVEUP will fall back to using IPI's for tlb flush */
Cliff Wickman450a0072010-06-02 16:22:02 -0500988 return 1;
Cliff Wickman450a0072010-06-02 16:22:02 -0500989 return 0;
Cliff Wickmanb194b1202008-06-12 08:23:48 -0500990}
991
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500992/*
Cliff Wickman8b6e5112012-06-22 08:14:59 -0500993 * The BAU is disabled for this uvhub. When the disabled time period has
994 * expired re-enable it.
995 * Return 0 if it is re-enabled for all cpus on this uvhub.
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500996 */
997static int check_enable(struct bau_control *bcp, struct ptc_stats *stat)
998{
999 int tcpu;
1000 struct bau_control *tbcp;
Cliff Wickman8b6e5112012-06-22 08:14:59 -05001001 struct bau_control *hmaster;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001002
Cliff Wickman8b6e5112012-06-22 08:14:59 -05001003 hmaster = bcp->uvhub_master;
1004 spin_lock(&hmaster->disable_lock);
1005 if (bcp->baudisabled && (get_cycles() >= bcp->set_bau_on_time)) {
1006 stat->s_bau_reenabled++;
1007 for_each_present_cpu(tcpu) {
1008 tbcp = &per_cpu(bau_control, tcpu);
1009 if (tbcp->uvhub_master == hmaster) {
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001010 tbcp->baudisabled = 0;
1011 tbcp->period_requests = 0;
1012 tbcp->period_time = 0;
Cliff Wickman8b6e5112012-06-22 08:14:59 -05001013 tbcp->period_giveups = 0;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001014 }
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001015 }
Cliff Wickman8b6e5112012-06-22 08:14:59 -05001016 spin_unlock(&hmaster->disable_lock);
1017 return 0;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001018 }
Cliff Wickman8b6e5112012-06-22 08:14:59 -05001019 spin_unlock(&hmaster->disable_lock);
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001020 return -1;
1021}
1022
1023static void record_send_statistics(struct ptc_stats *stat, int locals, int hubs,
1024 int remotes, struct bau_desc *bau_desc)
1025{
1026 stat->s_requestor++;
1027 stat->s_ntargcpu += remotes + locals;
1028 stat->s_ntargremotes += remotes;
1029 stat->s_ntarglocals += locals;
1030
1031 /* uvhub statistics */
1032 hubs = bau_uvhub_weight(&bau_desc->distribution);
1033 if (locals) {
1034 stat->s_ntarglocaluvhub++;
1035 stat->s_ntargremoteuvhub += (hubs - 1);
1036 } else
1037 stat->s_ntargremoteuvhub += hubs;
1038
1039 stat->s_ntarguvhub += hubs;
1040
1041 if (hubs >= 16)
1042 stat->s_ntarguvhub16++;
1043 else if (hubs >= 8)
1044 stat->s_ntarguvhub8++;
1045 else if (hubs >= 4)
1046 stat->s_ntarguvhub4++;
1047 else if (hubs >= 2)
1048 stat->s_ntarguvhub2++;
1049 else
1050 stat->s_ntarguvhub1++;
1051}
1052
1053/*
1054 * Translate a cpu mask to the uvhub distribution mask in the BAU
1055 * activation descriptor.
1056 */
1057static int set_distrib_bits(struct cpumask *flush_mask, struct bau_control *bcp,
1058 struct bau_desc *bau_desc, int *localsp, int *remotesp)
1059{
1060 int cpu;
1061 int pnode;
1062 int cnt = 0;
1063 struct hub_and_pnode *hpp;
1064
1065 for_each_cpu(cpu, flush_mask) {
1066 /*
1067 * The distribution vector is a bit map of pnodes, relative
1068 * to the partition base pnode (and the partition base nasid
1069 * in the header).
1070 * Translate cpu to pnode and hub using a local memory array.
1071 */
1072 hpp = &bcp->socket_master->thp[cpu];
1073 pnode = hpp->pnode - bcp->partition_base_pnode;
1074 bau_uvhub_set(pnode, &bau_desc->distribution);
1075 cnt++;
1076 if (hpp->uvhub == bcp->uvhub)
1077 (*localsp)++;
1078 else
1079 (*remotesp)++;
1080 }
1081 if (!cnt)
1082 return 1;
1083 return 0;
1084}
1085
1086/*
1087 * globally purge translation cache of a virtual address or all TLB's
Tejun Heobdbcdd42009-01-21 17:26:06 +09001088 * @cpumask: mask of all cpu's in which the address is to be removed
Cliff Wickman18129242008-06-02 08:56:14 -05001089 * @mm: mm_struct containing virtual address range
Alex Shi57c4f432012-12-18 12:22:14 -08001090 * @start: start virtual address to be removed from TLB
1091 * @end: end virtual address to be remove from TLB
Tejun Heobdbcdd42009-01-21 17:26:06 +09001092 * @cpu: the current cpu
Cliff Wickman18129242008-06-02 08:56:14 -05001093 *
1094 * This is the entry point for initiating any UV global TLB shootdown.
1095 *
1096 * Purges the translation caches of all specified processors of the given
1097 * virtual address, or purges all TLB's on specified processors.
1098 *
Tejun Heobdbcdd42009-01-21 17:26:06 +09001099 * The caller has derived the cpumask from the mm_struct. This function
1100 * is called only if there are bits set in the mask. (e.g. flush_tlb_page())
Cliff Wickman18129242008-06-02 08:56:14 -05001101 *
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001102 * The cpumask is converted into a uvhubmask of the uvhubs containing
1103 * those cpus.
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001104 *
Tejun Heobdbcdd42009-01-21 17:26:06 +09001105 * Note that this function should be called with preemption disabled.
1106 *
1107 * Returns NULL if all remote flushing was done.
1108 * Returns pointer to cpumask if some remote flushing remains to be
1109 * done. The returned pointer is valid till preemption is re-enabled.
Cliff Wickman18129242008-06-02 08:56:14 -05001110 */
Tejun Heobdbcdd42009-01-21 17:26:06 +09001111const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
Cliff Wickmana26fd712014-05-14 16:15:47 -05001112 struct mm_struct *mm,
1113 unsigned long start,
1114 unsigned long end,
1115 unsigned int cpu)
Cliff Wickman18129242008-06-02 08:56:14 -05001116{
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001117 int locals = 0;
Cliff Wickman450a0072010-06-02 16:22:02 -05001118 int remotes = 0;
1119 int hubs = 0;
Ingo Molnardc163a42008-06-18 14:15:43 +02001120 struct bau_desc *bau_desc;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001121 struct cpumask *flush_mask;
1122 struct ptc_stats *stat;
1123 struct bau_control *bcp;
Cliff Wickman8b6e5112012-06-22 08:14:59 -05001124 unsigned long descriptor_status;
1125 unsigned long status;
Cliff Wickman18129242008-06-02 08:56:14 -05001126
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001127 bcp = &per_cpu(bau_control, cpu);
Cliff Wickman26ef8572012-06-22 08:13:30 -05001128
1129 if (bcp->nobau)
1130 return cpumask;
Cliff Wickman50fb55a2010-06-02 16:22:02 -05001131
cpw3eae49c2013-12-03 17:15:30 -06001132 stat = bcp->statp;
1133 stat->s_enters++;
1134
Cliff Wickman8b6e5112012-06-22 08:14:59 -05001135 if (bcp->busy) {
1136 descriptor_status =
1137 read_lmmr(UVH_LB_BAU_SB_ACTIVATION_STATUS_0);
1138 status = ((descriptor_status >> (bcp->uvhub_cpu *
1139 UV_ACT_STATUS_SIZE)) & UV_ACT_STATUS_MASK) << 1;
1140 if (status == UV2H_DESC_BUSY)
1141 return cpumask;
1142 bcp->busy = 0;
1143 }
1144
Cliff Wickman50fb55a2010-06-02 16:22:02 -05001145 /* bau was disabled due to slow response */
1146 if (bcp->baudisabled) {
Cliff Wickman8b6e5112012-06-22 08:14:59 -05001147 if (check_enable(bcp, stat)) {
1148 stat->s_ipifordisabled++;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001149 return cpumask;
Cliff Wickman8b6e5112012-06-22 08:14:59 -05001150 }
Cliff Wickman50fb55a2010-06-02 16:22:02 -05001151 }
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001152
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001153 /*
1154 * Each sending cpu has a per-cpu mask which it fills from the caller's
Cliff Wickman450a0072010-06-02 16:22:02 -05001155 * cpu mask. All cpus are converted to uvhubs and copied to the
1156 * activation descriptor.
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001157 */
1158 flush_mask = (struct cpumask *)per_cpu(uv_flush_tlb_mask, cpu);
Cliff Wickman450a0072010-06-02 16:22:02 -05001159 /* don't actually do a shootdown of the local cpu */
Tejun Heobdbcdd42009-01-21 17:26:06 +09001160 cpumask_andnot(flush_mask, cpumask, cpumask_of(cpu));
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001161
Rusty Russell020b37a2015-03-02 22:05:49 +10301162 if (cpumask_test_cpu(cpu, cpumask))
Cliff Wickman450a0072010-06-02 16:22:02 -05001163 stat->s_ntargself++;
Tejun Heobdbcdd42009-01-21 17:26:06 +09001164
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001165 bau_desc = bcp->descriptor_base;
Cliff Wickman8b6e5112012-06-22 08:14:59 -05001166 bau_desc += (ITEMS_PER_DESC * bcp->uvhub_cpu);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001167 bau_uvhubs_clear(&bau_desc->distribution, UV_DISTRIBUTION_SIZE);
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001168 if (set_distrib_bits(flush_mask, bcp, bau_desc, &locals, &remotes))
Cliff Wickman450a0072010-06-02 16:22:02 -05001169 return NULL;
Cliff Wickman450a0072010-06-02 16:22:02 -05001170
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001171 record_send_statistics(stat, locals, hubs, remotes, bau_desc);
Cliff Wickman18129242008-06-02 08:56:14 -05001172
Alex Shi57c4f432012-12-18 12:22:14 -08001173 if (!end || (end - start) <= PAGE_SIZE)
1174 bau_desc->payload.address = start;
1175 else
1176 bau_desc->payload.address = TLB_FLUSH_ALL;
Tejun Heobdbcdd42009-01-21 17:26:06 +09001177 bau_desc->payload.sending_cpu = cpu;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001178 /*
Cliff Wickman450a0072010-06-02 16:22:02 -05001179 * uv_flush_send_and_wait returns 0 if all cpu's were messaged,
1180 * or 1 if it gave up and the original cpumask should be returned.
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001181 */
Cliff Wickman8b6e5112012-06-22 08:14:59 -05001182 if (!uv_flush_send_and_wait(flush_mask, bcp, bau_desc))
Cliff Wickman450a0072010-06-02 16:22:02 -05001183 return NULL;
1184 else
1185 return cpumask;
Cliff Wickman18129242008-06-02 08:56:14 -05001186}
1187
1188/*
Cliff Wickman8b6e5112012-06-22 08:14:59 -05001189 * Search the message queue for any 'other' unprocessed message with the
1190 * same software acknowledge resource bit vector as the 'msg' message.
Cliff Wickmanc5d35d32012-01-16 15:19:47 -06001191 */
1192struct bau_pq_entry *find_another_by_swack(struct bau_pq_entry *msg,
Cliff Wickman8b6e5112012-06-22 08:14:59 -05001193 struct bau_control *bcp)
Cliff Wickmanc5d35d32012-01-16 15:19:47 -06001194{
1195 struct bau_pq_entry *msg_next = msg + 1;
Cliff Wickman8b6e5112012-06-22 08:14:59 -05001196 unsigned char swack_vec = msg->swack_vec;
Cliff Wickmanc5d35d32012-01-16 15:19:47 -06001197
1198 if (msg_next > bcp->queue_last)
1199 msg_next = bcp->queue_first;
Cliff Wickman8b6e5112012-06-22 08:14:59 -05001200 while (msg_next != msg) {
1201 if ((msg_next->canceled == 0) && (msg_next->replied_to == 0) &&
1202 (msg_next->swack_vec == swack_vec))
Cliff Wickmanc5d35d32012-01-16 15:19:47 -06001203 return msg_next;
1204 msg_next++;
1205 if (msg_next > bcp->queue_last)
1206 msg_next = bcp->queue_first;
1207 }
1208 return NULL;
1209}
1210
1211/*
1212 * UV2 needs to work around a bug in which an arriving message has not
1213 * set a bit in the UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE register.
1214 * Such a message must be ignored.
1215 */
1216void process_uv2_message(struct msg_desc *mdp, struct bau_control *bcp)
1217{
1218 unsigned long mmr_image;
1219 unsigned char swack_vec;
1220 struct bau_pq_entry *msg = mdp->msg;
1221 struct bau_pq_entry *other_msg;
1222
Andrew Banman21e3f122016-09-21 11:09:17 -05001223 mmr_image = ops.read_l_sw_ack();
Cliff Wickmanc5d35d32012-01-16 15:19:47 -06001224 swack_vec = msg->swack_vec;
1225
1226 if ((swack_vec & mmr_image) == 0) {
1227 /*
1228 * This message was assigned a swack resource, but no
1229 * reserved acknowlegment is pending.
1230 * The bug has prevented this message from setting the MMR.
Cliff Wickmanc5d35d32012-01-16 15:19:47 -06001231 */
Cliff Wickmanc5d35d32012-01-16 15:19:47 -06001232 /*
Cliff Wickman8b6e5112012-06-22 08:14:59 -05001233 * Some message has set the MMR 'pending' bit; it might have
1234 * been another message. Look for that message.
Cliff Wickmanc5d35d32012-01-16 15:19:47 -06001235 */
Cliff Wickman8b6e5112012-06-22 08:14:59 -05001236 other_msg = find_another_by_swack(msg, bcp);
1237 if (other_msg) {
1238 /*
1239 * There is another. Process this one but do not
1240 * ack it.
1241 */
1242 bau_process_message(mdp, bcp, 0);
1243 /*
1244 * Let the natural processing of that other message
1245 * acknowledge it. Don't get the processing of sw_ack's
1246 * out of order.
1247 */
1248 return;
1249 }
Cliff Wickmanc5d35d32012-01-16 15:19:47 -06001250 }
1251
1252 /*
Cliff Wickman8b6e5112012-06-22 08:14:59 -05001253 * Either the MMR shows this one pending a reply or there is no
1254 * other message using this sw_ack, so it is safe to acknowledge it.
Cliff Wickmanc5d35d32012-01-16 15:19:47 -06001255 */
1256 bau_process_message(mdp, bcp, 1);
1257
1258 return;
1259}
1260
1261/*
Cliff Wickman18129242008-06-02 08:56:14 -05001262 * The BAU message interrupt comes here. (registered by set_intr_gate)
1263 * See entry_64.S
1264 *
1265 * We received a broadcast assist message.
1266 *
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001267 * Interrupts are disabled; this interrupt could represent
Cliff Wickman18129242008-06-02 08:56:14 -05001268 * the receipt of several messages.
1269 *
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001270 * All cores/threads on this hub get this interrupt.
1271 * The last one to see it does the software ack.
Cliff Wickman18129242008-06-02 08:56:14 -05001272 * (the resource will not be freed until noninterruptable cpus see this
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001273 * interrupt; hardware may timeout the s/w ack and reply ERROR)
Cliff Wickman18129242008-06-02 08:56:14 -05001274 */
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001275void uv_bau_message_interrupt(struct pt_regs *regs)
Cliff Wickman18129242008-06-02 08:56:14 -05001276{
Cliff Wickman18129242008-06-02 08:56:14 -05001277 int count = 0;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001278 cycles_t time_start;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001279 struct bau_pq_entry *msg;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001280 struct bau_control *bcp;
1281 struct ptc_stats *stat;
1282 struct msg_desc msgdesc;
Cliff Wickman18129242008-06-02 08:56:14 -05001283
Cliff Wickman88ed9dd2012-01-16 15:21:46 -06001284 ack_APIC_irq();
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001285 time_start = get_cycles();
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001286
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001287 bcp = &per_cpu(bau_control, smp_processor_id());
Cliff Wickman712157a2010-06-02 16:22:02 -05001288 stat = bcp->statp;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001289
1290 msgdesc.queue_first = bcp->queue_first;
1291 msgdesc.queue_last = bcp->queue_last;
1292
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001293 msg = bcp->bau_msg_head;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001294 while (msg->swack_vec) {
Cliff Wickman18129242008-06-02 08:56:14 -05001295 count++;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001296
1297 msgdesc.msg_slot = msg - msgdesc.queue_first;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001298 msgdesc.msg = msg;
Cliff Wickmanc5d35d32012-01-16 15:19:47 -06001299 if (bcp->uvhub_version == 2)
1300 process_uv2_message(&msgdesc, bcp);
1301 else
Cliff Wickmana26fd712014-05-14 16:15:47 -05001302 /* no error workaround for uv1 or uv3 */
Cliff Wickmanc5d35d32012-01-16 15:19:47 -06001303 bau_process_message(&msgdesc, bcp, 1);
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001304
Cliff Wickman18129242008-06-02 08:56:14 -05001305 msg++;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001306 if (msg > msgdesc.queue_last)
1307 msg = msgdesc.queue_first;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001308 bcp->bau_msg_head = msg;
Cliff Wickman18129242008-06-02 08:56:14 -05001309 }
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001310 stat->d_time += (get_cycles() - time_start);
Cliff Wickman18129242008-06-02 08:56:14 -05001311 if (!count)
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001312 stat->d_nomsg++;
Cliff Wickman18129242008-06-02 08:56:14 -05001313 else if (count > 1)
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001314 stat->d_multmsg++;
Cliff Wickman18129242008-06-02 08:56:14 -05001315}
1316
Cliff Wickmanc4c46882009-04-03 08:34:32 -05001317/*
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001318 * Each target uvhub (i.e. a uvhub that has cpu's) needs to have
Cliff Wickmanc4c46882009-04-03 08:34:32 -05001319 * shootdown message timeouts enabled. The timeout does not cause
1320 * an interrupt, but causes an error message to be returned to
1321 * the sender.
1322 */
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001323static void __init enable_timeouts(void)
Cliff Wickman18129242008-06-02 08:56:14 -05001324{
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001325 int uvhub;
1326 int nuvhubs;
Cliff Wickman18129242008-06-02 08:56:14 -05001327 int pnode;
Cliff Wickmanc4c46882009-04-03 08:34:32 -05001328 unsigned long mmr_image;
Cliff Wickman18129242008-06-02 08:56:14 -05001329
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001330 nuvhubs = uv_num_possible_blades();
Cliff Wickmanc4c46882009-04-03 08:34:32 -05001331
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001332 for (uvhub = 0; uvhub < nuvhubs; uvhub++) {
1333 if (!uv_blade_nr_possible_cpus(uvhub))
Cliff Wickman18129242008-06-02 08:56:14 -05001334 continue;
Cliff Wickmanc4c46882009-04-03 08:34:32 -05001335
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001336 pnode = uv_blade_to_pnode(uvhub);
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001337 mmr_image = read_mmr_misc_control(pnode);
Cliff Wickmanc4c46882009-04-03 08:34:32 -05001338 /*
1339 * Set the timeout period and then lock it in, in three
1340 * steps; captures and locks in the period.
1341 *
1342 * To program the period, the SOFT_ACK_MODE must be off.
1343 */
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001344 mmr_image &= ~(1L << SOFTACK_MSHIFT);
1345 write_mmr_misc_control(pnode, mmr_image);
Cliff Wickmanc4c46882009-04-03 08:34:32 -05001346 /*
1347 * Set the 4-bit period.
1348 */
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001349 mmr_image &= ~((unsigned long)0xf << SOFTACK_PSHIFT);
1350 mmr_image |= (SOFTACK_TIMEOUT_PERIOD << SOFTACK_PSHIFT);
1351 write_mmr_misc_control(pnode, mmr_image);
Cliff Wickmanc4c46882009-04-03 08:34:32 -05001352 /*
Jack Steiner2a919592011-05-11 12:50:28 -05001353 * UV1:
Cliff Wickmanc4c46882009-04-03 08:34:32 -05001354 * Subsequent reversals of the timebase bit (3) cause an
1355 * immediate timeout of one or all INTD resources as
1356 * indicated in bits 2:0 (7 causes all of them to timeout).
1357 */
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001358 mmr_image |= (1L << SOFTACK_MSHIFT);
Jack Steiner2a919592011-05-11 12:50:28 -05001359 if (is_uv2_hub()) {
Cliff Wickmana26fd712014-05-14 16:15:47 -05001360 /* do not touch the legacy mode bit */
Cliff Wickman8b6e5112012-06-22 08:14:59 -05001361 /* hw bug workaround; do not use extended status */
1362 mmr_image &= ~(1L << UV2_EXT_SHFT);
Cliff Wickmana26fd712014-05-14 16:15:47 -05001363 } else if (is_uv3_hub()) {
1364 mmr_image &= ~(1L << PREFETCH_HINT_SHFT);
1365 mmr_image |= (1L << SB_STATUS_SHFT);
Jack Steiner2a919592011-05-11 12:50:28 -05001366 }
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001367 write_mmr_misc_control(pnode, mmr_image);
Cliff Wickman18129242008-06-02 08:56:14 -05001368 }
Cliff Wickman18129242008-06-02 08:56:14 -05001369}
1370
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001371static void *ptc_seq_start(struct seq_file *file, loff_t *offset)
Cliff Wickman18129242008-06-02 08:56:14 -05001372{
1373 if (*offset < num_possible_cpus())
1374 return offset;
1375 return NULL;
1376}
1377
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001378static void *ptc_seq_next(struct seq_file *file, void *data, loff_t *offset)
Cliff Wickman18129242008-06-02 08:56:14 -05001379{
1380 (*offset)++;
1381 if (*offset < num_possible_cpus())
1382 return offset;
1383 return NULL;
1384}
1385
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001386static void ptc_seq_stop(struct seq_file *file, void *data)
Cliff Wickman18129242008-06-02 08:56:14 -05001387{
1388}
1389
1390/*
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001391 * Display the statistics thru /proc/sgi_uv/ptc_statistics
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001392 * 'data' points to the cpu number
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001393 * Note: see the descriptions in stat_description[].
Cliff Wickman18129242008-06-02 08:56:14 -05001394 */
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001395static int ptc_seq_show(struct seq_file *file, void *data)
Cliff Wickman18129242008-06-02 08:56:14 -05001396{
1397 struct ptc_stats *stat;
Cliff Wickman26ef8572012-06-22 08:13:30 -05001398 struct bau_control *bcp;
Cliff Wickman18129242008-06-02 08:56:14 -05001399 int cpu;
1400
1401 cpu = *(loff_t *)data;
Cliff Wickman18129242008-06-02 08:56:14 -05001402 if (!cpu) {
Rasmus Villemoes37367082014-11-28 22:03:41 +01001403 seq_puts(file,
1404 "# cpu bauoff sent stime self locals remotes ncpus localhub ");
1405 seq_puts(file, "remotehub numuvhubs numuvhubs16 numuvhubs8 ");
1406 seq_puts(file,
1407 "numuvhubs4 numuvhubs2 numuvhubs1 dto snacks retries ");
1408 seq_puts(file,
1409 "rok resetp resett giveup sto bz throt disable ");
1410 seq_puts(file,
1411 "enable wars warshw warwaits enters ipidis plugged ");
1412 seq_puts(file,
1413 "ipiover glim cong swack recv rtime all one mult ");
1414 seq_puts(file, "none retry canc nocan reset rcan\n");
Cliff Wickman18129242008-06-02 08:56:14 -05001415 }
1416 if (cpu < num_possible_cpus() && cpu_online(cpu)) {
Cliff Wickman26ef8572012-06-22 08:13:30 -05001417 bcp = &per_cpu(bau_control, cpu);
James Custerfa2a79ce2014-11-02 12:16:39 -06001418 if (bcp->nobau) {
1419 seq_printf(file, "cpu %d bau disabled\n", cpu);
1420 return 0;
1421 }
Cliff Wickman26ef8572012-06-22 08:13:30 -05001422 stat = bcp->statp;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001423 /* source side statistics */
1424 seq_printf(file,
Cliff Wickman8b6e5112012-06-22 08:14:59 -05001425 "cpu %d %d %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld ",
Cliff Wickman26ef8572012-06-22 08:13:30 -05001426 cpu, bcp->nobau, stat->s_requestor,
1427 cycles_2_us(stat->s_time),
Cliff Wickman450a0072010-06-02 16:22:02 -05001428 stat->s_ntargself, stat->s_ntarglocals,
1429 stat->s_ntargremotes, stat->s_ntargcpu,
1430 stat->s_ntarglocaluvhub, stat->s_ntargremoteuvhub,
1431 stat->s_ntarguvhub, stat->s_ntarguvhub16);
Cliff Wickmanb54bd9b2012-01-16 15:22:38 -06001432 seq_printf(file, "%ld %ld %ld %ld %ld %ld ",
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001433 stat->s_ntarguvhub8, stat->s_ntarguvhub4,
1434 stat->s_ntarguvhub2, stat->s_ntarguvhub1,
Cliff Wickmanb54bd9b2012-01-16 15:22:38 -06001435 stat->s_dtimeout, stat->s_strongnacks);
Cliff Wickman8b6e5112012-06-22 08:14:59 -05001436 seq_printf(file, "%ld %ld %ld %ld %ld %ld %ld %ld ",
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001437 stat->s_retry_messages, stat->s_retriesok,
1438 stat->s_resets_plug, stat->s_resets_timeout,
1439 stat->s_giveup, stat->s_stimeout,
Cliff Wickman8b6e5112012-06-22 08:14:59 -05001440 stat->s_busy, stat->s_throttles);
1441 seq_printf(file, "%ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld ",
1442 stat->s_bau_disabled, stat->s_bau_reenabled,
1443 stat->s_uv2_wars, stat->s_uv2_wars_hw,
1444 stat->s_uv2_war_waits, stat->s_enters,
1445 stat->s_ipifordisabled, stat->s_plugged,
1446 stat->s_overipilimit, stat->s_giveuplimit,
1447 stat->s_congested);
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001448
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001449 /* destination side statistics */
1450 seq_printf(file,
Cliff Wickman8b6e5112012-06-22 08:14:59 -05001451 "%lx %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld\n",
Andrew Banman21e3f122016-09-21 11:09:17 -05001452 ops.read_g_sw_ack(uv_cpu_to_pnode(cpu)),
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001453 stat->d_requestee, cycles_2_us(stat->d_time),
1454 stat->d_alltlb, stat->d_onetlb, stat->d_multmsg,
1455 stat->d_nomsg, stat->d_retries, stat->d_canceled,
1456 stat->d_nocanceled, stat->d_resets,
1457 stat->d_rcanceled);
Cliff Wickman18129242008-06-02 08:56:14 -05001458 }
Cliff Wickman18129242008-06-02 08:56:14 -05001459 return 0;
1460}
1461
1462/*
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001463 * Display the tunables thru debugfs
1464 */
1465static ssize_t tunables_read(struct file *file, char __user *userbuf,
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001466 size_t count, loff_t *ppos)
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001467{
Dan Carpenterb365a852010-09-29 10:41:05 +02001468 char *buf;
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001469 int ret;
1470
Cliff Wickman8b6e5112012-06-22 08:14:59 -05001471 buf = kasprintf(GFP_KERNEL, "%s %s %s\n%d %d %d %d %d %d %d %d %d %d\n",
1472 "max_concur plugged_delay plugsb4reset timeoutsb4reset",
1473 "ipi_reset_limit complete_threshold congested_response_us",
1474 "congested_reps disabled_period giveup_limit",
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001475 max_concurr, plugged_delay, plugsb4reset,
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001476 timeoutsb4reset, ipi_reset_limit, complete_threshold,
Cliff Wickman8b6e5112012-06-22 08:14:59 -05001477 congested_respns_us, congested_reps, disabled_period,
1478 giveup_limit);
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001479
Dan Carpenterb365a852010-09-29 10:41:05 +02001480 if (!buf)
1481 return -ENOMEM;
1482
1483 ret = simple_read_from_buffer(userbuf, count, ppos, buf, strlen(buf));
1484 kfree(buf);
1485 return ret;
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001486}
1487
1488/*
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001489 * handle a write to /proc/sgi_uv/ptc_statistics
1490 * -1: reset the statistics
Cliff Wickman18129242008-06-02 08:56:14 -05001491 * 0: display meaning of the statistics
Cliff Wickman18129242008-06-02 08:56:14 -05001492 */
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001493static ssize_t ptc_proc_write(struct file *file, const char __user *user,
1494 size_t count, loff_t *data)
Cliff Wickman18129242008-06-02 08:56:14 -05001495{
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001496 int cpu;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001497 int i;
1498 int elements;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001499 long input_arg;
Cliff Wickman18129242008-06-02 08:56:14 -05001500 char optstr[64];
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001501 struct ptc_stats *stat;
Cliff Wickman18129242008-06-02 08:56:14 -05001502
Cliff Wickmane7eb8722008-06-23 08:32:25 -05001503 if (count == 0 || count > sizeof(optstr))
Cliff Wickmancef53272008-06-19 11:16:24 -05001504 return -EINVAL;
Cliff Wickman18129242008-06-02 08:56:14 -05001505 if (copy_from_user(optstr, user, count))
1506 return -EFAULT;
1507 optstr[count - 1] = '\0';
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001508
Cliff Wickman26ef8572012-06-22 08:13:30 -05001509 if (!strcmp(optstr, "on")) {
1510 set_bau_on();
1511 return count;
1512 } else if (!strcmp(optstr, "off")) {
1513 set_bau_off();
1514 return count;
1515 }
1516
Daniel Walter164109e2014-08-08 14:24:03 -07001517 if (kstrtol(optstr, 10, &input_arg) < 0) {
Andrew Banmanefa59ab2016-09-21 11:09:13 -05001518 pr_debug("%s is invalid\n", optstr);
Cliff Wickman18129242008-06-02 08:56:14 -05001519 return -EINVAL;
1520 }
1521
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001522 if (input_arg == 0) {
Sasha Levin64441742012-12-20 14:11:34 -05001523 elements = ARRAY_SIZE(stat_description);
Andrew Banmanefa59ab2016-09-21 11:09:13 -05001524 pr_debug("# cpu: cpu number\n");
1525 pr_debug("Sender statistics:\n");
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001526 for (i = 0; i < elements; i++)
Andrew Banmanefa59ab2016-09-21 11:09:13 -05001527 pr_debug("%s\n", stat_description[i]);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001528 } else if (input_arg == -1) {
1529 for_each_present_cpu(cpu) {
1530 stat = &per_cpu(ptcstats, cpu);
1531 memset(stat, 0, sizeof(struct ptc_stats));
1532 }
Cliff Wickman18129242008-06-02 08:56:14 -05001533 }
1534
1535 return count;
1536}
1537
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001538static int local_atoi(const char *name)
1539{
1540 int val = 0;
1541
1542 for (;; name++) {
1543 switch (*name) {
1544 case '0' ... '9':
1545 val = 10*val+(*name-'0');
1546 break;
1547 default:
1548 return val;
1549 }
1550 }
1551}
1552
1553/*
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001554 * Parse the values written to /sys/kernel/debug/sgi_uv/bau_tunables.
1555 * Zero values reset them to defaults.
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001556 */
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001557static int parse_tunables_write(struct bau_control *bcp, char *instr,
1558 int count)
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001559{
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001560 char *p;
1561 char *q;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001562 int cnt = 0;
1563 int val;
Sasha Levin64441742012-12-20 14:11:34 -05001564 int e = ARRAY_SIZE(tunables);
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001565
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001566 p = instr + strspn(instr, WHITESPACE);
1567 q = p;
1568 for (; *p; p = q + strspn(q, WHITESPACE)) {
1569 q = p + strcspn(p, WHITESPACE);
1570 cnt++;
1571 if (q == p)
1572 break;
1573 }
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001574 if (cnt != e) {
Andrew Banmanefa59ab2016-09-21 11:09:13 -05001575 pr_info("bau tunable error: should be %d values\n", e);
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001576 return -EINVAL;
1577 }
1578
1579 p = instr + strspn(instr, WHITESPACE);
1580 q = p;
1581 for (cnt = 0; *p; p = q + strspn(q, WHITESPACE), cnt++) {
1582 q = p + strcspn(p, WHITESPACE);
1583 val = local_atoi(p);
1584 switch (cnt) {
1585 case 0:
1586 if (val == 0) {
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001587 max_concurr = MAX_BAU_CONCURRENT;
1588 max_concurr_const = MAX_BAU_CONCURRENT;
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001589 continue;
1590 }
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001591 if (val < 1 || val > bcp->cpus_in_uvhub) {
Andrew Banmanefa59ab2016-09-21 11:09:13 -05001592 pr_debug(
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001593 "Error: BAU max concurrent %d is invalid\n",
1594 val);
1595 return -EINVAL;
1596 }
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001597 max_concurr = val;
1598 max_concurr_const = val;
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001599 continue;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001600 default:
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001601 if (val == 0)
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001602 *tunables[cnt].tunp = tunables[cnt].deflt;
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001603 else
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001604 *tunables[cnt].tunp = val;
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001605 continue;
1606 }
1607 if (q == p)
1608 break;
1609 }
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001610 return 0;
1611}
1612
1613/*
1614 * Handle a write to debugfs. (/sys/kernel/debug/sgi_uv/bau_tunables)
1615 */
1616static ssize_t tunables_write(struct file *file, const char __user *user,
1617 size_t count, loff_t *data)
1618{
1619 int cpu;
1620 int ret;
1621 char instr[100];
1622 struct bau_control *bcp;
1623
1624 if (count == 0 || count > sizeof(instr)-1)
1625 return -EINVAL;
1626 if (copy_from_user(instr, user, count))
1627 return -EFAULT;
1628
1629 instr[count] = '\0';
1630
cpw@sgi.com00b30cf2011-06-21 07:21:26 -05001631 cpu = get_cpu();
1632 bcp = &per_cpu(bau_control, cpu);
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001633 ret = parse_tunables_write(bcp, instr, count);
cpw@sgi.com00b30cf2011-06-21 07:21:26 -05001634 put_cpu();
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001635 if (ret)
1636 return ret;
1637
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001638 for_each_present_cpu(cpu) {
1639 bcp = &per_cpu(bau_control, cpu);
Andrew Banman67492c82016-09-21 11:09:12 -05001640 bcp->max_concurr = max_concurr;
1641 bcp->max_concurr_const = max_concurr;
1642 bcp->plugged_delay = plugged_delay;
1643 bcp->plugsb4reset = plugsb4reset;
1644 bcp->timeoutsb4reset = timeoutsb4reset;
1645 bcp->ipi_reset_limit = ipi_reset_limit;
1646 bcp->complete_threshold = complete_threshold;
1647 bcp->cong_response_us = congested_respns_us;
1648 bcp->cong_reps = congested_reps;
1649 bcp->disabled_period = sec_2_cycles(disabled_period);
1650 bcp->giveup_limit = giveup_limit;
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001651 }
1652 return count;
1653}
1654
Cliff Wickman18129242008-06-02 08:56:14 -05001655static const struct seq_operations uv_ptc_seq_ops = {
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001656 .start = ptc_seq_start,
1657 .next = ptc_seq_next,
1658 .stop = ptc_seq_stop,
1659 .show = ptc_seq_show
Cliff Wickman18129242008-06-02 08:56:14 -05001660};
1661
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001662static int ptc_proc_open(struct inode *inode, struct file *file)
Cliff Wickman18129242008-06-02 08:56:14 -05001663{
1664 return seq_open(file, &uv_ptc_seq_ops);
1665}
1666
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001667static int tunables_open(struct inode *inode, struct file *file)
1668{
1669 return 0;
1670}
1671
Cliff Wickman18129242008-06-02 08:56:14 -05001672static const struct file_operations proc_uv_ptc_operations = {
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001673 .open = ptc_proc_open,
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001674 .read = seq_read,
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001675 .write = ptc_proc_write,
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001676 .llseek = seq_lseek,
1677 .release = seq_release,
Cliff Wickman18129242008-06-02 08:56:14 -05001678};
1679
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001680static const struct file_operations tunables_fops = {
1681 .open = tunables_open,
1682 .read = tunables_read,
1683 .write = tunables_write,
Arnd Bergmann6038f372010-08-15 18:52:59 +02001684 .llseek = default_llseek,
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001685};
1686
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001687static int __init uv_ptc_init(void)
Cliff Wickman18129242008-06-02 08:56:14 -05001688{
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001689 struct proc_dir_entry *proc_uv_ptc;
Cliff Wickman18129242008-06-02 08:56:14 -05001690
1691 if (!is_uv_system())
1692 return 0;
1693
Alexey Dobriyan10f02d112009-08-23 23:17:27 +04001694 proc_uv_ptc = proc_create(UV_PTC_BASENAME, 0444, NULL,
1695 &proc_uv_ptc_operations);
Cliff Wickman18129242008-06-02 08:56:14 -05001696 if (!proc_uv_ptc) {
Andrew Banmanefa59ab2016-09-21 11:09:13 -05001697 pr_err("unable to create %s proc entry\n",
Cliff Wickman18129242008-06-02 08:56:14 -05001698 UV_PTC_BASENAME);
1699 return -EINVAL;
1700 }
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001701
1702 tunables_dir = debugfs_create_dir(UV_BAU_TUNABLES_DIR, NULL);
1703 if (!tunables_dir) {
Andrew Banmanefa59ab2016-09-21 11:09:13 -05001704 pr_err("unable to create debugfs directory %s\n",
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001705 UV_BAU_TUNABLES_DIR);
1706 return -EINVAL;
1707 }
1708 tunables_file = debugfs_create_file(UV_BAU_TUNABLES_FILE, 0600,
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001709 tunables_dir, NULL, &tunables_fops);
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001710 if (!tunables_file) {
Andrew Banmanefa59ab2016-09-21 11:09:13 -05001711 pr_err("unable to create debugfs file %s\n",
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001712 UV_BAU_TUNABLES_FILE);
1713 return -EINVAL;
1714 }
Cliff Wickman18129242008-06-02 08:56:14 -05001715 return 0;
1716}
1717
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001718/*
Cliff Wickman77ed23f2011-05-10 08:26:43 -05001719 * Initialize the sending side's sending buffers.
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001720 */
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001721static void activation_descriptor_init(int node, int pnode, int base_pnode)
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001722{
1723 int i;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001724 int cpu;
Cliff Wickmanda87c932012-01-16 15:17:50 -06001725 int uv1 = 0;
Jack Steiner6a469e42011-09-20 13:55:04 -07001726 unsigned long gpa;
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001727 unsigned long m;
1728 unsigned long n;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001729 size_t dsize;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001730 struct bau_desc *bau_desc;
1731 struct bau_desc *bd2;
Cliff Wickmanda87c932012-01-16 15:17:50 -06001732 struct uv1_bau_msg_header *uv1_hdr;
Cliff Wickmana26fd712014-05-14 16:15:47 -05001733 struct uv2_3_bau_msg_header *uv2_3_hdr;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001734 struct bau_control *bcp;
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001735
Cliff Wickman0e2595c2009-05-20 08:10:57 -05001736 /*
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001737 * each bau_desc is 64 bytes; there are 8 (ITEMS_PER_DESC)
1738 * per cpu; and one per cpu on the uvhub (ADP_SZ)
Cliff Wickman0e2595c2009-05-20 08:10:57 -05001739 */
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001740 dsize = sizeof(struct bau_desc) * ADP_SZ * ITEMS_PER_DESC;
1741 bau_desc = kmalloc_node(dsize, GFP_KERNEL, node);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001742 BUG_ON(!bau_desc);
Ingo Molnarb4c286e2008-06-18 14:28:19 +02001743
Jack Steiner6a469e42011-09-20 13:55:04 -07001744 gpa = uv_gpa(bau_desc);
1745 n = uv_gpa_to_gnode(gpa);
Andrew Banman21e3f122016-09-21 11:09:17 -05001746 m = ops.bau_gpa_to_offset(gpa);
Cliff Wickmanda87c932012-01-16 15:17:50 -06001747 if (is_uv1_hub())
1748 uv1 = 1;
Ingo Molnarb4c286e2008-06-18 14:28:19 +02001749
Cliff Wickman77ed23f2011-05-10 08:26:43 -05001750 /* the 14-bit pnode */
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001751 write_mmr_descriptor_base(pnode, (n << UV_DESC_PSHIFT | m));
Cliff Wickman0e2595c2009-05-20 08:10:57 -05001752 /*
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001753 * Initializing all 8 (ITEMS_PER_DESC) descriptors for each
Cliff Wickman0e2595c2009-05-20 08:10:57 -05001754 * cpu even though we only use the first one; one descriptor can
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001755 * describe a broadcast to 256 uv hubs.
Cliff Wickman0e2595c2009-05-20 08:10:57 -05001756 */
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001757 for (i = 0, bd2 = bau_desc; i < (ADP_SZ * ITEMS_PER_DESC); i++, bd2++) {
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001758 memset(bd2, 0, sizeof(struct bau_desc));
Cliff Wickmanda87c932012-01-16 15:17:50 -06001759 if (uv1) {
1760 uv1_hdr = &bd2->header.uv1_hdr;
Andrew Banman67492c82016-09-21 11:09:12 -05001761 uv1_hdr->swack_flag = 1;
Cliff Wickmanda87c932012-01-16 15:17:50 -06001762 /*
1763 * The base_dest_nasid set in the message header
1764 * is the nasid of the first uvhub in the partition.
1765 * The bit map will indicate destination pnode numbers
1766 * relative to that base. They may not be consecutive
1767 * if nasid striding is being used.
1768 */
1769 uv1_hdr->base_dest_nasid =
Andrew Banman67492c82016-09-21 11:09:12 -05001770 UV_PNODE_TO_NASID(base_pnode);
1771 uv1_hdr->dest_subnodeid = UV_LB_SUBNODEID;
1772 uv1_hdr->command = UV_NET_ENDPOINT_INTD;
1773 uv1_hdr->int_both = 1;
Cliff Wickmanda87c932012-01-16 15:17:50 -06001774 /*
1775 * all others need to be set to zero:
1776 * fairness chaining multilevel count replied_to
1777 */
1778 } else {
Cliff Wickman8b6e5112012-06-22 08:14:59 -05001779 /*
Cliff Wickmana26fd712014-05-14 16:15:47 -05001780 * BIOS uses legacy mode, but uv2 and uv3 hardware always
Cliff Wickman8b6e5112012-06-22 08:14:59 -05001781 * uses native mode for selective broadcasts.
1782 */
Cliff Wickmana26fd712014-05-14 16:15:47 -05001783 uv2_3_hdr = &bd2->header.uv2_3_hdr;
Andrew Banman67492c82016-09-21 11:09:12 -05001784 uv2_3_hdr->swack_flag = 1;
Cliff Wickmana26fd712014-05-14 16:15:47 -05001785 uv2_3_hdr->base_dest_nasid =
Andrew Banman67492c82016-09-21 11:09:12 -05001786 UV_PNODE_TO_NASID(base_pnode);
1787 uv2_3_hdr->dest_subnodeid = UV_LB_SUBNODEID;
1788 uv2_3_hdr->command = UV_NET_ENDPOINT_INTD;
Cliff Wickmanda87c932012-01-16 15:17:50 -06001789 }
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001790 }
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001791 for_each_present_cpu(cpu) {
1792 if (pnode != uv_blade_to_pnode(uv_cpu_to_blade_id(cpu)))
1793 continue;
1794 bcp = &per_cpu(bau_control, cpu);
1795 bcp->descriptor_base = bau_desc;
1796 }
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001797}
1798
1799/*
1800 * initialize the destination side's receiving buffers
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001801 * entered for each uvhub in the partition
1802 * - node is first node (kernel memory notion) on the uvhub
1803 * - pnode is the uvhub's physical identifier
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001804 */
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001805static void pq_init(int node, int pnode)
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001806{
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001807 int cpu;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001808 size_t plsize;
Ingo Molnarb4c286e2008-06-18 14:28:19 +02001809 char *cp;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001810 void *vp;
Andrew Banmand2a57afa2016-09-21 11:09:14 -05001811 unsigned long gnode, first, last, tail;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001812 struct bau_pq_entry *pqp;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001813 struct bau_control *bcp;
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001814
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001815 plsize = (DEST_Q_SIZE + 1) * sizeof(struct bau_pq_entry);
1816 vp = kmalloc_node(plsize, GFP_KERNEL, node);
1817 pqp = (struct bau_pq_entry *)vp;
Ingo Molnardc163a42008-06-18 14:15:43 +02001818 BUG_ON(!pqp);
Ingo Molnarb4c286e2008-06-18 14:28:19 +02001819
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001820 cp = (char *)pqp + 31;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001821 pqp = (struct bau_pq_entry *)(((unsigned long)cp >> 5) << 5);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001822
1823 for_each_present_cpu(cpu) {
1824 if (pnode != uv_cpu_to_pnode(cpu))
1825 continue;
1826 /* for every cpu on this pnode: */
1827 bcp = &per_cpu(bau_control, cpu);
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001828 bcp->queue_first = pqp;
1829 bcp->bau_msg_head = pqp;
1830 bcp->queue_last = pqp + (DEST_Q_SIZE - 1);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001831 }
Andrew Banmand2a57afa2016-09-21 11:09:14 -05001832
Andrew Banman21e3f122016-09-21 11:09:17 -05001833 first = ops.bau_gpa_to_offset(uv_gpa(pqp));
1834 last = ops.bau_gpa_to_offset(uv_gpa(pqp + (DEST_Q_SIZE - 1)));
Andrew Banmand2a57afa2016-09-21 11:09:14 -05001835
Cliff Wickman4ea3c512009-04-16 07:53:09 -05001836 /*
Andrew Banman6d780592016-09-21 11:09:20 -05001837 * Pre UV4, the gnode is required to locate the payload queue
1838 * and the payload queue tail must be maintained by the kernel.
Cliff Wickman4ea3c512009-04-16 07:53:09 -05001839 */
Andrew Banman6d780592016-09-21 11:09:20 -05001840 bcp = &per_cpu(bau_control, smp_processor_id());
1841 if (bcp->uvhub_version <= 3) {
1842 tail = first;
1843 gnode = uv_gpa_to_gnode(uv_gpa(pqp));
1844 first = (gnode << UV_PAYLOADQ_GNODE_SHIFT) | tail;
1845 write_mmr_payload_tail(pnode, tail);
1846 }
1847
Andrew Banman21e3f122016-09-21 11:09:17 -05001848 ops.write_payload_first(pnode, first);
1849 ops.write_payload_last(pnode, last);
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001850
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001851 /* in effect, all msg_type's are set to MSG_NOOP */
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001852 memset(pqp, 0, sizeof(struct bau_pq_entry) * DEST_Q_SIZE);
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001853}
1854
1855/*
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001856 * Initialization of each UV hub's structures
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001857 */
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001858static void __init init_uvhub(int uvhub, int vector, int base_pnode)
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001859{
Cliff Wickman9674f352009-04-03 08:34:05 -05001860 int node;
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001861 int pnode;
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001862 unsigned long apicid;
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001863
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001864 node = uvhub_to_first_node(uvhub);
1865 pnode = uv_blade_to_pnode(uvhub);
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001866
1867 activation_descriptor_init(node, pnode, base_pnode);
1868
1869 pq_init(node, pnode);
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001870 /*
Cliff Wickman77ed23f2011-05-10 08:26:43 -05001871 * The below initialization can't be in firmware because the
1872 * messaging IRQ will be determined by the OS.
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001873 */
Dimitri Sivanich8191c9f2010-11-16 16:23:52 -06001874 apicid = uvhub_to_first_apicid(uvhub) | uv_apicid_hibits;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001875 write_mmr_data_config(pnode, ((apicid << 32) | vector));
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001876}
1877
1878/*
Cliff Wickman12a66112010-06-02 16:22:01 -05001879 * We will set BAU_MISC_CONTROL with a timeout period.
1880 * But the BIOS has set UVH_AGING_PRESCALE_SEL and UVH_TRANSACTION_TIMEOUT.
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001881 * So the destination timeout period has to be calculated from them.
Cliff Wickman12a66112010-06-02 16:22:01 -05001882 */
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001883static int calculate_destination_timeout(void)
Cliff Wickman12a66112010-06-02 16:22:01 -05001884{
1885 unsigned long mmr_image;
1886 int mult1;
1887 int mult2;
1888 int index;
1889 int base;
1890 int ret;
1891 unsigned long ts_ns;
1892
Jack Steiner2a919592011-05-11 12:50:28 -05001893 if (is_uv1_hub()) {
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001894 mult1 = SOFTACK_TIMEOUT_PERIOD & BAU_MISC_CONTROL_MULT_MASK;
Jack Steiner2a919592011-05-11 12:50:28 -05001895 mmr_image = uv_read_local_mmr(UVH_AGING_PRESCALE_SEL);
1896 index = (mmr_image >> BAU_URGENCY_7_SHIFT) & BAU_URGENCY_7_MASK;
1897 mmr_image = uv_read_local_mmr(UVH_TRANSACTION_TIMEOUT);
1898 mult2 = (mmr_image >> BAU_TRANS_SHIFT) & BAU_TRANS_MASK;
Cliff Wickman11cab712012-06-22 08:12:12 -05001899 ts_ns = timeout_base_ns[index];
1900 ts_ns *= (mult1 * mult2);
Jack Steiner2a919592011-05-11 12:50:28 -05001901 ret = ts_ns / 1000;
1902 } else {
Cliff Wickmana26fd712014-05-14 16:15:47 -05001903 /* same destination timeout for uv2 and uv3 */
Cliff Wickmand059f9f2012-01-16 15:18:48 -06001904 /* 4 bits 0/1 for 10/80us base, 3 bits of multiplier */
1905 mmr_image = uv_read_local_mmr(UVH_LB_BAU_MISC_CONTROL);
Jack Steiner2a919592011-05-11 12:50:28 -05001906 mmr_image = (mmr_image & UV_SA_MASK) >> UV_SA_SHFT;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001907 if (mmr_image & (1L << UV2_ACK_UNITS_SHFT))
Cliff Wickmand059f9f2012-01-16 15:18:48 -06001908 base = 80;
Jack Steiner2a919592011-05-11 12:50:28 -05001909 else
Cliff Wickmand059f9f2012-01-16 15:18:48 -06001910 base = 10;
1911 mult1 = mmr_image & UV2_ACK_MASK;
Jack Steiner2a919592011-05-11 12:50:28 -05001912 ret = mult1 * base;
1913 }
Cliff Wickman12a66112010-06-02 16:22:01 -05001914 return ret;
1915}
1916
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001917static void __init init_per_cpu_tunables(void)
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001918{
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001919 int cpu;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001920 struct bau_control *bcp;
1921
1922 for_each_present_cpu(cpu) {
1923 bcp = &per_cpu(bau_control, cpu);
1924 bcp->baudisabled = 0;
Cliff Wickman26ef8572012-06-22 08:13:30 -05001925 if (nobau)
Alex Thorlton1c532e02016-03-31 14:18:29 -05001926 bcp->nobau = true;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001927 bcp->statp = &per_cpu(ptcstats, cpu);
1928 /* time interval to catch a hardware stay-busy bug */
1929 bcp->timeout_interval = usec_2_cycles(2*timeout_us);
1930 bcp->max_concurr = max_concurr;
1931 bcp->max_concurr_const = max_concurr;
1932 bcp->plugged_delay = plugged_delay;
1933 bcp->plugsb4reset = plugsb4reset;
1934 bcp->timeoutsb4reset = timeoutsb4reset;
1935 bcp->ipi_reset_limit = ipi_reset_limit;
1936 bcp->complete_threshold = complete_threshold;
1937 bcp->cong_response_us = congested_respns_us;
1938 bcp->cong_reps = congested_reps;
Andrew Banman67492c82016-09-21 11:09:12 -05001939 bcp->disabled_period = sec_2_cycles(disabled_period);
1940 bcp->giveup_limit = giveup_limit;
Cliff Wickmand2ebc712012-01-18 09:40:47 -06001941 spin_lock_init(&bcp->queue_lock);
1942 spin_lock_init(&bcp->uvhub_lock);
Cliff Wickman8b6e5112012-06-22 08:14:59 -05001943 spin_lock_init(&bcp->disable_lock);
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001944 }
1945}
1946
1947/*
1948 * Scan all cpus to collect blade and socket summaries.
1949 */
1950static int __init get_cpu_topology(int base_pnode,
1951 struct uvhub_desc *uvhub_descs,
1952 unsigned char *uvhub_mask)
1953{
1954 int cpu;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001955 int pnode;
1956 int uvhub;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001957 int socket;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001958 struct bau_control *bcp;
1959 struct uvhub_desc *bdp;
1960 struct socket_desc *sdp;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001961
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001962 for_each_present_cpu(cpu) {
1963 bcp = &per_cpu(bau_control, cpu);
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001964
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001965 memset(bcp, 0, sizeof(struct bau_control));
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001966
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001967 pnode = uv_cpu_hub_info(cpu)->pnode;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001968 if ((pnode - base_pnode) >= UV_DISTRIBUTION_SIZE) {
Andrew Banmanefa59ab2016-09-21 11:09:13 -05001969 pr_emerg(
Cliff Wickman77ed23f2011-05-10 08:26:43 -05001970 "cpu %d pnode %d-%d beyond %d; BAU disabled\n",
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001971 cpu, pnode, base_pnode, UV_DISTRIBUTION_SIZE);
Cliff Wickman77ed23f2011-05-10 08:26:43 -05001972 return 1;
1973 }
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001974
Cliff Wickman77ed23f2011-05-10 08:26:43 -05001975 bcp->osnode = cpu_to_node(cpu);
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001976 bcp->partition_base_pnode = base_pnode;
1977
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001978 uvhub = uv_cpu_hub_info(cpu)->numa_blade_id;
Cliff Wickmanc4026cf2010-07-30 14:10:55 -05001979 *(uvhub_mask + (uvhub/8)) |= (1 << (uvhub%8));
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001980 bdp = &uvhub_descs[uvhub];
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001981
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001982 bdp->num_cpus++;
1983 bdp->uvhub = uvhub;
1984 bdp->pnode = pnode;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001985
Cliff Wickmana8328ee2010-06-02 16:22:02 -05001986 /* kludge: 'assuming' one node per socket, and assuming that
1987 disabling a socket just leaves a gap in node numbers */
Cliff Wickman77ed23f2011-05-10 08:26:43 -05001988 socket = bcp->osnode & 1;
Cliff Wickmana8328ee2010-06-02 16:22:02 -05001989 bdp->socket_mask |= (1 << socket);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001990 sdp = &bdp->socket[socket];
1991 sdp->cpu_number[sdp->num_cpus] = cpu;
1992 sdp->num_cpus++;
Cliff Wickmancfa60912011-01-03 12:03:53 -06001993 if (sdp->num_cpus > MAX_CPUS_PER_SOCKET) {
Andrew Banmanefa59ab2016-09-21 11:09:13 -05001994 pr_emerg("%d cpus per socket invalid\n",
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001995 sdp->num_cpus);
Cliff Wickmancfa60912011-01-03 12:03:53 -06001996 return 1;
1997 }
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001998 }
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001999 return 0;
2000}
2001
2002/*
2003 * Each socket is to get a local array of pnodes/hubs.
2004 */
2005static void make_per_cpu_thp(struct bau_control *smaster)
2006{
2007 int cpu;
2008 size_t hpsz = sizeof(struct hub_and_pnode) * num_possible_cpus();
2009
2010 smaster->thp = kmalloc_node(hpsz, GFP_KERNEL, smaster->osnode);
2011 memset(smaster->thp, 0, hpsz);
2012 for_each_present_cpu(cpu) {
2013 smaster->thp[cpu].pnode = uv_cpu_hub_info(cpu)->pnode;
2014 smaster->thp[cpu].uvhub = uv_cpu_hub_info(cpu)->numa_blade_id;
2015 }
2016}
2017
2018/*
cpw@sgi.com442d3922011-06-21 07:21:31 -05002019 * Each uvhub is to get a local cpumask.
2020 */
2021static void make_per_hub_cpumask(struct bau_control *hmaster)
2022{
2023 int sz = sizeof(cpumask_t);
2024
2025 hmaster->cpumask = kzalloc_node(sz, GFP_KERNEL, hmaster->osnode);
2026}
2027
2028/*
Cliff Wickmanf073cc82011-05-24 13:07:36 -05002029 * Initialize all the per_cpu information for the cpu's on a given socket,
2030 * given what has been gathered into the socket_desc struct.
2031 * And reports the chosen hub and socket masters back to the caller.
2032 */
2033static int scan_sock(struct socket_desc *sdp, struct uvhub_desc *bdp,
2034 struct bau_control **smasterp,
2035 struct bau_control **hmasterp)
2036{
2037 int i;
2038 int cpu;
2039 struct bau_control *bcp;
2040
2041 for (i = 0; i < sdp->num_cpus; i++) {
2042 cpu = sdp->cpu_number[i];
2043 bcp = &per_cpu(bau_control, cpu);
2044 bcp->cpu = cpu;
2045 if (i == 0) {
2046 *smasterp = bcp;
2047 if (!(*hmasterp))
2048 *hmasterp = bcp;
2049 }
2050 bcp->cpus_in_uvhub = bdp->num_cpus;
2051 bcp->cpus_in_socket = sdp->num_cpus;
2052 bcp->socket_master = *smasterp;
2053 bcp->uvhub = bdp->uvhub;
Cliff Wickmanda87c932012-01-16 15:17:50 -06002054 if (is_uv1_hub())
2055 bcp->uvhub_version = 1;
2056 else if (is_uv2_hub())
2057 bcp->uvhub_version = 2;
Cliff Wickmana26fd712014-05-14 16:15:47 -05002058 else if (is_uv3_hub())
2059 bcp->uvhub_version = 3;
Andrew Banman58d4ab42016-09-21 11:09:18 -05002060 else if (is_uv4_hub())
2061 bcp->uvhub_version = 4;
Cliff Wickmanda87c932012-01-16 15:17:50 -06002062 else {
Andrew Banman58d4ab42016-09-21 11:09:18 -05002063 pr_emerg("uvhub version not 1, 2, 3, or 4\n");
Cliff Wickmanda87c932012-01-16 15:17:50 -06002064 return 1;
2065 }
Cliff Wickmanf073cc82011-05-24 13:07:36 -05002066 bcp->uvhub_master = *hmasterp;
Mike Travis5627a8252016-04-29 16:54:14 -05002067 bcp->uvhub_cpu = uv_cpu_blade_processor_id(cpu);
2068
Cliff Wickmanf073cc82011-05-24 13:07:36 -05002069 if (bcp->uvhub_cpu >= MAX_CPUS_PER_UVHUB) {
Andrew Banmanefa59ab2016-09-21 11:09:13 -05002070 pr_emerg("%d cpus per uvhub invalid\n",
Cliff Wickmanf073cc82011-05-24 13:07:36 -05002071 bcp->uvhub_cpu);
2072 return 1;
2073 }
2074 }
2075 return 0;
2076}
2077
2078/*
2079 * Summarize the blade and socket topology into the per_cpu structures.
2080 */
2081static int __init summarize_uvhub_sockets(int nuvhubs,
2082 struct uvhub_desc *uvhub_descs,
2083 unsigned char *uvhub_mask)
2084{
2085 int socket;
2086 int uvhub;
2087 unsigned short socket_mask;
2088
Cliff Wickmanc4026cf2010-07-30 14:10:55 -05002089 for (uvhub = 0; uvhub < nuvhubs; uvhub++) {
Cliff Wickmanf073cc82011-05-24 13:07:36 -05002090 struct uvhub_desc *bdp;
2091 struct bau_control *smaster = NULL;
2092 struct bau_control *hmaster = NULL;
2093
Cliff Wickmanc4026cf2010-07-30 14:10:55 -05002094 if (!(*(uvhub_mask + (uvhub/8)) & (1 << (uvhub%8))))
2095 continue;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05002096
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05002097 bdp = &uvhub_descs[uvhub];
Cliff Wickmana8328ee2010-06-02 16:22:02 -05002098 socket_mask = bdp->socket_mask;
2099 socket = 0;
2100 while (socket_mask) {
Cliff Wickmanf073cc82011-05-24 13:07:36 -05002101 struct socket_desc *sdp;
2102 if ((socket_mask & 1)) {
2103 sdp = &bdp->socket[socket];
2104 if (scan_sock(sdp, bdp, &smaster, &hmaster))
Cliff Wickmancfa60912011-01-03 12:03:53 -06002105 return 1;
cpw@sgi.com9c9153d2011-06-21 07:21:28 -05002106 make_per_cpu_thp(smaster);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05002107 }
2108 socket++;
Cliff Wickmana8328ee2010-06-02 16:22:02 -05002109 socket_mask = (socket_mask >> 1);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05002110 }
cpw@sgi.com442d3922011-06-21 07:21:31 -05002111 make_per_hub_cpumask(hmaster);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05002112 }
Cliff Wickmanf073cc82011-05-24 13:07:36 -05002113 return 0;
2114}
2115
2116/*
2117 * initialize the bau_control structure for each cpu
2118 */
2119static int __init init_per_cpu(int nuvhubs, int base_part_pnode)
2120{
2121 unsigned char *uvhub_mask;
2122 void *vp;
2123 struct uvhub_desc *uvhub_descs;
2124
Andrew Banmane879c112016-09-21 11:09:19 -05002125 if (is_uv3_hub() || is_uv2_hub() || is_uv1_hub())
2126 timeout_us = calculate_destination_timeout();
Cliff Wickmanf073cc82011-05-24 13:07:36 -05002127
2128 vp = kmalloc(nuvhubs * sizeof(struct uvhub_desc), GFP_KERNEL);
2129 uvhub_descs = (struct uvhub_desc *)vp;
2130 memset(uvhub_descs, 0, nuvhubs * sizeof(struct uvhub_desc));
2131 uvhub_mask = kzalloc((nuvhubs+7)/8, GFP_KERNEL);
2132
2133 if (get_cpu_topology(base_part_pnode, uvhub_descs, uvhub_mask))
cpw@sgi.combbd270e2011-06-21 07:21:32 -05002134 goto fail;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05002135
2136 if (summarize_uvhub_sockets(nuvhubs, uvhub_descs, uvhub_mask))
cpw@sgi.combbd270e2011-06-21 07:21:32 -05002137 goto fail;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05002138
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05002139 kfree(uvhub_descs);
Cliff Wickmanc4026cf2010-07-30 14:10:55 -05002140 kfree(uvhub_mask);
Cliff Wickmanf073cc82011-05-24 13:07:36 -05002141 init_per_cpu_tunables();
Cliff Wickmancfa60912011-01-03 12:03:53 -06002142 return 0;
cpw@sgi.combbd270e2011-06-21 07:21:32 -05002143
2144fail:
2145 kfree(uvhub_descs);
2146 kfree(uvhub_mask);
2147 return 1;
Cliff Wickmanb194b1202008-06-12 08:23:48 -05002148}
Cliff Wickman18129242008-06-02 08:56:14 -05002149
2150/*
2151 * Initialization of BAU-related structures
2152 */
Cliff Wickmanb194b1202008-06-12 08:23:48 -05002153static int __init uv_bau_init(void)
Cliff Wickman18129242008-06-02 08:56:14 -05002154{
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05002155 int uvhub;
2156 int pnode;
2157 int nuvhubs;
Rusty Russell2c74d662009-03-18 08:22:30 +10302158 int cur_cpu;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05002159 int cpus;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05002160 int vector;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05002161 cpumask_var_t *mask;
Cliff Wickman18129242008-06-02 08:56:14 -05002162
2163 if (!is_uv_system())
2164 return 0;
2165
Andrew Banman4f059d52016-09-21 11:09:21 -05002166 if (is_uv4_hub())
2167 ops = uv4_bau_ops;
2168 else if (is_uv3_hub())
Andrew Banman5e4f96f2016-09-21 11:09:16 -05002169 ops = uv123_bau_ops;
2170 else if (is_uv2_hub())
2171 ops = uv123_bau_ops;
2172 else if (is_uv1_hub())
2173 ops = uv123_bau_ops;
2174
Cliff Wickmanf073cc82011-05-24 13:07:36 -05002175 for_each_possible_cpu(cur_cpu) {
2176 mask = &per_cpu(uv_flush_tlb_mask, cur_cpu);
2177 zalloc_cpumask_var_node(mask, GFP_KERNEL, cpu_to_node(cur_cpu));
2178 }
Rusty Russell76ba0ec2009-03-13 14:49:57 +10302179
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05002180 nuvhubs = uv_num_possible_blades();
Cliff Wickmanf073cc82011-05-24 13:07:36 -05002181 congested_cycles = usec_2_cycles(congested_respns_us);
Cliff Wickman9674f352009-04-03 08:34:05 -05002182
Cliff Wickmanf073cc82011-05-24 13:07:36 -05002183 uv_base_pnode = 0x7fffffff;
Cliff Wickman77ed23f2011-05-10 08:26:43 -05002184 for (uvhub = 0; uvhub < nuvhubs; uvhub++) {
Cliff Wickmanf073cc82011-05-24 13:07:36 -05002185 cpus = uv_blade_nr_possible_cpus(uvhub);
2186 if (cpus && (uv_blade_to_pnode(uvhub) < uv_base_pnode))
2187 uv_base_pnode = uv_blade_to_pnode(uvhub);
Cliff Wickman77ed23f2011-05-10 08:26:43 -05002188 }
2189
Andrew Banmane879c112016-09-21 11:09:19 -05002190 /* software timeouts are not supported on UV4 */
2191 if (is_uv3_hub() || is_uv2_hub() || is_uv1_hub())
2192 enable_timeouts();
Cliff Wickmand059f9f2012-01-16 15:18:48 -06002193
Cliff Wickmanf073cc82011-05-24 13:07:36 -05002194 if (init_per_cpu(nuvhubs, uv_base_pnode)) {
Cliff Wickman26ef8572012-06-22 08:13:30 -05002195 set_bau_off();
2196 nobau_perm = 1;
Cliff Wickmancfa60912011-01-03 12:03:53 -06002197 return 0;
2198 }
Ingo Molnarb4c286e2008-06-18 14:28:19 +02002199
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05002200 vector = UV_BAU_MESSAGE;
Cliff Wickmana26fd712014-05-14 16:15:47 -05002201 for_each_possible_blade(uvhub) {
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05002202 if (uv_blade_nr_possible_cpus(uvhub))
Cliff Wickmanf073cc82011-05-24 13:07:36 -05002203 init_uvhub(uvhub, vector, uv_base_pnode);
Cliff Wickmana26fd712014-05-14 16:15:47 -05002204 }
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05002205
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05002206 alloc_intr_gate(vector, uv_bau_message_intr1);
2207
2208 for_each_possible_blade(uvhub) {
Cliff Wickman93a7ca02010-07-16 10:11:21 -05002209 if (uv_blade_nr_possible_cpus(uvhub)) {
Cliff Wickmanf073cc82011-05-24 13:07:36 -05002210 unsigned long val;
2211 unsigned long mmr;
Cliff Wickman93a7ca02010-07-16 10:11:21 -05002212 pnode = uv_blade_to_pnode(uvhub);
2213 /* INIT the bau */
Cliff Wickmanf073cc82011-05-24 13:07:36 -05002214 val = 1L << 63;
2215 write_gmmr_activation(pnode, val);
Cliff Wickman93a7ca02010-07-16 10:11:21 -05002216 mmr = 1; /* should be 1 to broadcast to both sockets */
Cliff Wickmanda87c932012-01-16 15:17:50 -06002217 if (!is_uv1_hub())
2218 write_mmr_data_broadcast(pnode, mmr);
Cliff Wickman93a7ca02010-07-16 10:11:21 -05002219 }
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05002220 }
Ingo Molnarb4c286e2008-06-18 14:28:19 +02002221
Cliff Wickman18129242008-06-02 08:56:14 -05002222 return 0;
2223}
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05002224core_initcall(uv_bau_init);
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05002225fs_initcall(uv_ptc_init);