blob: 1492170cbb5a0928435401049af9729c5ee19de4 [file] [log] [blame]
Cliff Wickman18129242008-06-02 08:56:14 -05001/*
2 * SGI UltraViolet TLB flush routines.
3 *
Cliff Wickmanf073cc82011-05-24 13:07:36 -05004 * (c) 2008-2011 Cliff Wickman <cpw@sgi.com>, SGI.
Cliff Wickman18129242008-06-02 08:56:14 -05005 *
6 * This code is released under the GNU General Public License version 2 or
7 * later.
8 */
Jeremy Fitzhardingeaef8f5b2008-10-14 21:43:43 -07009#include <linux/seq_file.h>
Cliff Wickman18129242008-06-02 08:56:14 -050010#include <linux/proc_fs.h>
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -050011#include <linux/debugfs.h>
Cliff Wickman18129242008-06-02 08:56:14 -050012#include <linux/kernel.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090013#include <linux/slab.h>
Jean Delvareca4445642011-03-25 15:20:14 +010014#include <linux/delay.h>
Cliff Wickman18129242008-06-02 08:56:14 -050015
Cliff Wickman18129242008-06-02 08:56:14 -050016#include <asm/mmu_context.h>
Tejun Heobdbcdd42009-01-21 17:26:06 +090017#include <asm/uv/uv.h>
Cliff Wickman18129242008-06-02 08:56:14 -050018#include <asm/uv/uv_mmrs.h>
Ingo Molnarb4c286e2008-06-18 14:28:19 +020019#include <asm/uv/uv_hub.h>
Cliff Wickman18129242008-06-02 08:56:14 -050020#include <asm/uv/uv_bau.h>
Ingo Molnar7b6aa332009-02-17 13:58:15 +010021#include <asm/apic.h>
Ingo Molnarb4c286e2008-06-18 14:28:19 +020022#include <asm/idle.h>
Cliff Wickmanb194b1202008-06-12 08:23:48 -050023#include <asm/tsc.h>
Cliff Wickman99dd8712008-08-19 12:51:59 -050024#include <asm/irq_vectors.h>
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -050025#include <asm/timer.h>
Cliff Wickman18129242008-06-02 08:56:14 -050026
Cliff Wickman12a66112010-06-02 16:22:01 -050027/* timeouts in nanoseconds (indexed by UVH_AGING_PRESCALE_SEL urgency7 30:28) */
28static int timeout_base_ns[] = {
29 20,
30 160,
31 1280,
32 10240,
33 81920,
34 655360,
35 5242880,
36 167772160
37};
Cliff Wickmanf073cc82011-05-24 13:07:36 -050038
Cliff Wickman12a66112010-06-02 16:22:01 -050039static int timeout_us;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -050040static int nobau;
Cliff Wickman26ef8572012-06-22 08:13:30 -050041static int nobau_perm;
Cliff Wickman50fb55a2010-06-02 16:22:02 -050042static int baudisabled;
43static spinlock_t disable_lock;
44static cycles_t congested_cycles;
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -050045
46/* tunables: */
Cliff Wickmanf073cc82011-05-24 13:07:36 -050047static int max_concurr = MAX_BAU_CONCURRENT;
48static int max_concurr_const = MAX_BAU_CONCURRENT;
49static int plugged_delay = PLUGGED_DELAY;
50static int plugsb4reset = PLUGSB4RESET;
51static int timeoutsb4reset = TIMEOUTSB4RESET;
52static int ipi_reset_limit = IPI_RESET_LIMIT;
53static int complete_threshold = COMPLETE_THRESHOLD;
54static int congested_respns_us = CONGESTED_RESPONSE_US;
55static int congested_reps = CONGESTED_REPS;
56static int congested_period = CONGESTED_PERIOD;
57
58static struct tunables tunables[] = {
59 {&max_concurr, MAX_BAU_CONCURRENT}, /* must be [0] */
60 {&plugged_delay, PLUGGED_DELAY},
61 {&plugsb4reset, PLUGSB4RESET},
62 {&timeoutsb4reset, TIMEOUTSB4RESET},
63 {&ipi_reset_limit, IPI_RESET_LIMIT},
64 {&complete_threshold, COMPLETE_THRESHOLD},
65 {&congested_respns_us, CONGESTED_RESPONSE_US},
66 {&congested_reps, CONGESTED_REPS},
67 {&congested_period, CONGESTED_PERIOD}
68};
69
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -050070static struct dentry *tunables_dir;
71static struct dentry *tunables_file;
72
Cliff Wickmanf073cc82011-05-24 13:07:36 -050073/* these correspond to the statistics printed by ptc_seq_show() */
74static char *stat_description[] = {
75 "sent: number of shootdown messages sent",
76 "stime: time spent sending messages",
77 "numuvhubs: number of hubs targeted with shootdown",
78 "numuvhubs16: number times 16 or more hubs targeted",
79 "numuvhubs8: number times 8 or more hubs targeted",
80 "numuvhubs4: number times 4 or more hubs targeted",
81 "numuvhubs2: number times 2 or more hubs targeted",
82 "numuvhubs1: number times 1 hub targeted",
83 "numcpus: number of cpus targeted with shootdown",
84 "dto: number of destination timeouts",
85 "retries: destination timeout retries sent",
86 "rok: : destination timeouts successfully retried",
87 "resetp: ipi-style resource resets for plugs",
88 "resett: ipi-style resource resets for timeouts",
89 "giveup: fall-backs to ipi-style shootdowns",
90 "sto: number of source timeouts",
91 "bz: number of stay-busy's",
92 "throt: number times spun in throttle",
93 "swack: image of UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE",
94 "recv: shootdown messages received",
95 "rtime: time spent processing messages",
96 "all: shootdown all-tlb messages",
97 "one: shootdown one-tlb messages",
98 "mult: interrupts that found multiple messages",
99 "none: interrupts that found no messages",
100 "retry: number of retry messages processed",
101 "canc: number messages canceled by retries",
102 "nocan: number retries that found nothing to cancel",
103 "reset: number of ipi-style reset requests processed",
104 "rcan: number messages canceled by reset requests",
105 "disable: number times use of the BAU was disabled",
106 "enable: number times use of the BAU was re-enabled"
107};
108
109static int __init
110setup_nobau(char *arg)
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500111{
112 nobau = 1;
113 return 0;
114}
115early_param("nobau", setup_nobau);
Ingo Molnarb4c286e2008-06-18 14:28:19 +0200116
Cliff Wickman94ca8e42009-04-14 10:56:48 -0500117/* base pnode in this partition */
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500118static int uv_base_pnode __read_mostly;
Cliff Wickman18129242008-06-02 08:56:14 -0500119
Ingo Molnardc163a42008-06-18 14:15:43 +0200120static DEFINE_PER_CPU(struct ptc_stats, ptcstats);
121static DEFINE_PER_CPU(struct bau_control, bau_control);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500122static DEFINE_PER_CPU(cpumask_var_t, uv_flush_tlb_mask);
123
Cliff Wickman26ef8572012-06-22 08:13:30 -0500124static void
125set_bau_on(void)
126{
127 int cpu;
128 struct bau_control *bcp;
129
130 if (nobau_perm) {
131 pr_info("BAU not initialized; cannot be turned on\n");
132 return;
133 }
134 nobau = 0;
135 for_each_present_cpu(cpu) {
136 bcp = &per_cpu(bau_control, cpu);
137 bcp->nobau = 0;
138 }
139 pr_info("BAU turned on\n");
140 return;
141}
142
143static void
144set_bau_off(void)
145{
146 int cpu;
147 struct bau_control *bcp;
148
149 nobau = 1;
150 for_each_present_cpu(cpu) {
151 bcp = &per_cpu(bau_control, cpu);
152 bcp->nobau = 1;
153 }
154 pr_info("BAU turned off\n");
155 return;
156}
157
Cliff Wickman18129242008-06-02 08:56:14 -0500158/*
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500159 * Determine the first node on a uvhub. 'Nodes' are used for kernel
160 * memory allocation.
Cliff Wickman9674f352009-04-03 08:34:05 -0500161 */
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500162static int __init uvhub_to_first_node(int uvhub)
Cliff Wickman9674f352009-04-03 08:34:05 -0500163{
164 int node, b;
165
166 for_each_online_node(node) {
167 b = uv_node_to_blade_id(node);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500168 if (uvhub == b)
Cliff Wickman9674f352009-04-03 08:34:05 -0500169 return node;
170 }
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500171 return -1;
Cliff Wickman9674f352009-04-03 08:34:05 -0500172}
173
174/*
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500175 * Determine the apicid of the first cpu on a uvhub.
Cliff Wickman9674f352009-04-03 08:34:05 -0500176 */
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500177static int __init uvhub_to_first_apicid(int uvhub)
Cliff Wickman9674f352009-04-03 08:34:05 -0500178{
179 int cpu;
180
181 for_each_present_cpu(cpu)
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500182 if (uvhub == uv_cpu_to_blade_id(cpu))
Cliff Wickman9674f352009-04-03 08:34:05 -0500183 return per_cpu(x86_cpu_to_apicid, cpu);
184 return -1;
185}
186
187/*
Cliff Wickman18129242008-06-02 08:56:14 -0500188 * Free a software acknowledge hardware resource by clearing its Pending
189 * bit. This will return a reply to the sender.
190 * If the message has timed out, a reply has already been sent by the
191 * hardware but the resource has not been released. In that case our
192 * clear of the Timeout bit (as well) will free the resource. No reply will
193 * be sent (the hardware will only do one reply per message).
194 */
Cliff Wickmanc5d35d32012-01-16 15:19:47 -0600195static void reply_to_message(struct msg_desc *mdp, struct bau_control *bcp,
196 int do_acknowledge)
Cliff Wickman18129242008-06-02 08:56:14 -0500197{
Cliff Wickmanb194b1202008-06-12 08:23:48 -0500198 unsigned long dw;
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500199 struct bau_pq_entry *msg;
Cliff Wickman18129242008-06-02 08:56:14 -0500200
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500201 msg = mdp->msg;
Cliff Wickmanc5d35d32012-01-16 15:19:47 -0600202 if (!msg->canceled && do_acknowledge) {
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500203 dw = (msg->swack_vec << UV_SW_ACK_NPENDING) | msg->swack_vec;
204 write_mmr_sw_ack(dw);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500205 }
Cliff Wickman18129242008-06-02 08:56:14 -0500206 msg->replied_to = 1;
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500207 msg->swack_vec = 0;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500208}
209
210/*
211 * Process the receipt of a RETRY message
212 */
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500213static void bau_process_retry_msg(struct msg_desc *mdp,
214 struct bau_control *bcp)
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500215{
216 int i;
217 int cancel_count = 0;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500218 unsigned long msg_res;
219 unsigned long mmr = 0;
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500220 struct bau_pq_entry *msg = mdp->msg;
221 struct bau_pq_entry *msg2;
222 struct ptc_stats *stat = bcp->statp;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500223
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500224 stat->d_retries++;
225 /*
226 * cancel any message from msg+1 to the retry itself
227 */
228 for (msg2 = msg+1, i = 0; i < DEST_Q_SIZE; msg2++, i++) {
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500229 if (msg2 > mdp->queue_last)
230 msg2 = mdp->queue_first;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500231 if (msg2 == msg)
232 break;
233
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500234 /* same conditions for cancellation as do_reset */
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500235 if ((msg2->replied_to == 0) && (msg2->canceled == 0) &&
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500236 (msg2->swack_vec) && ((msg2->swack_vec &
237 msg->swack_vec) == 0) &&
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500238 (msg2->sending_cpu == msg->sending_cpu) &&
239 (msg2->msg_type != MSG_NOOP)) {
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500240 mmr = read_mmr_sw_ack();
241 msg_res = msg2->swack_vec;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500242 /*
243 * This is a message retry; clear the resources held
244 * by the previous message only if they timed out.
245 * If it has not timed out we have an unexpected
246 * situation to report.
247 */
Cliff Wickman39847e72010-06-02 16:22:02 -0500248 if (mmr & (msg_res << UV_SW_ACK_NPENDING)) {
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500249 unsigned long mr;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500250 /*
Cliff Wickmanc5d35d32012-01-16 15:19:47 -0600251 * Is the resource timed out?
252 * Make everyone ignore the cancelled message.
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500253 */
254 msg2->canceled = 1;
255 stat->d_canceled++;
256 cancel_count++;
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500257 mr = (msg_res << UV_SW_ACK_NPENDING) | msg_res;
258 write_mmr_sw_ack(mr);
Cliff Wickman39847e72010-06-02 16:22:02 -0500259 }
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500260 }
261 }
262 if (!cancel_count)
263 stat->d_nocanceled++;
Cliff Wickman18129242008-06-02 08:56:14 -0500264}
265
266/*
267 * Do all the things a cpu should do for a TLB shootdown message.
268 * Other cpu's may come here at the same time for this message.
269 */
Cliff Wickmanc5d35d32012-01-16 15:19:47 -0600270static void bau_process_message(struct msg_desc *mdp, struct bau_control *bcp,
271 int do_acknowledge)
Cliff Wickman18129242008-06-02 08:56:14 -0500272{
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500273 short socket_ack_count = 0;
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500274 short *sp;
275 struct atomic_short *asp;
276 struct ptc_stats *stat = bcp->statp;
277 struct bau_pq_entry *msg = mdp->msg;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500278 struct bau_control *smaster = bcp->socket_master;
Cliff Wickman18129242008-06-02 08:56:14 -0500279
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500280 /*
281 * This must be a normal message, or retry of a normal message
282 */
Cliff Wickman18129242008-06-02 08:56:14 -0500283 if (msg->address == TLB_FLUSH_ALL) {
284 local_flush_tlb();
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500285 stat->d_alltlb++;
Cliff Wickman18129242008-06-02 08:56:14 -0500286 } else {
287 __flush_tlb_one(msg->address);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500288 stat->d_onetlb++;
Cliff Wickman18129242008-06-02 08:56:14 -0500289 }
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500290 stat->d_requestee++;
Cliff Wickman18129242008-06-02 08:56:14 -0500291
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500292 /*
293 * One cpu on each uvhub has the additional job on a RETRY
294 * of releasing the resource held by the message that is
295 * being retried. That message is identified by sending
296 * cpu number.
297 */
298 if (msg->msg_type == MSG_RETRY && bcp == bcp->uvhub_master)
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500299 bau_process_retry_msg(mdp, bcp);
Cliff Wickman18129242008-06-02 08:56:14 -0500300
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500301 /*
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500302 * This is a swack message, so we have to reply to it.
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500303 * Count each responding cpu on the socket. This avoids
304 * pinging the count's cache line back and forth between
305 * the sockets.
306 */
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500307 sp = &smaster->socket_acknowledge_count[mdp->msg_slot];
308 asp = (struct atomic_short *)sp;
309 socket_ack_count = atom_asr(1, asp);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500310 if (socket_ack_count == bcp->cpus_in_socket) {
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500311 int msg_ack_count;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500312 /*
313 * Both sockets dump their completed count total into
314 * the message's count.
315 */
316 smaster->socket_acknowledge_count[mdp->msg_slot] = 0;
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500317 asp = (struct atomic_short *)&msg->acknowledge_count;
318 msg_ack_count = atom_asr(socket_ack_count, asp);
Ingo Molnardc163a42008-06-18 14:15:43 +0200319
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500320 if (msg_ack_count == bcp->cpus_in_uvhub) {
321 /*
322 * All cpus in uvhub saw it; reply
Cliff Wickmanc5d35d32012-01-16 15:19:47 -0600323 * (unless we are in the UV2 workaround)
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500324 */
Cliff Wickmanc5d35d32012-01-16 15:19:47 -0600325 reply_to_message(mdp, bcp, do_acknowledge);
Ingo Molnardc163a42008-06-18 14:15:43 +0200326 }
327 }
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500328
329 return;
Cliff Wickman18129242008-06-02 08:56:14 -0500330}
331
332/*
cpw@sgi.com485f07d2011-06-21 07:21:29 -0500333 * Determine the first cpu on a pnode.
Cliff Wickman18129242008-06-02 08:56:14 -0500334 */
cpw@sgi.com485f07d2011-06-21 07:21:29 -0500335static int pnode_to_first_cpu(int pnode, struct bau_control *smaster)
Cliff Wickman18129242008-06-02 08:56:14 -0500336{
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500337 int cpu;
cpw@sgi.com485f07d2011-06-21 07:21:29 -0500338 struct hub_and_pnode *hpp;
339
340 for_each_present_cpu(cpu) {
341 hpp = &smaster->thp[cpu];
342 if (pnode == hpp->pnode)
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500343 return cpu;
cpw@sgi.com485f07d2011-06-21 07:21:29 -0500344 }
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500345 return -1;
Cliff Wickman18129242008-06-02 08:56:14 -0500346}
347
Cliff Wickmanb194b1202008-06-12 08:23:48 -0500348/*
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500349 * Last resort when we get a large number of destination timeouts is
350 * to clear resources held by a given cpu.
351 * Do this with IPI so that all messages in the BAU message queue
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500352 * can be identified by their nonzero swack_vec field.
Cliff Wickmanb194b1202008-06-12 08:23:48 -0500353 *
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500354 * This is entered for a single cpu on the uvhub.
355 * The sender want's this uvhub to free a specific message's
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500356 * swack resources.
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500357 */
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500358static void do_reset(void *ptr)
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500359{
360 int i;
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500361 struct bau_control *bcp = &per_cpu(bau_control, smp_processor_id());
362 struct reset_args *rap = (struct reset_args *)ptr;
363 struct bau_pq_entry *msg;
364 struct ptc_stats *stat = bcp->statp;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500365
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500366 stat->d_resets++;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500367 /*
368 * We're looking for the given sender, and
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500369 * will free its swack resource.
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500370 * If all cpu's finally responded after the timeout, its
371 * message 'replied_to' was set.
372 */
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500373 for (msg = bcp->queue_first, i = 0; i < DEST_Q_SIZE; msg++, i++) {
374 unsigned long msg_res;
375 /* do_reset: same conditions for cancellation as
376 bau_process_retry_msg() */
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500377 if ((msg->replied_to == 0) &&
378 (msg->canceled == 0) &&
379 (msg->sending_cpu == rap->sender) &&
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500380 (msg->swack_vec) &&
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500381 (msg->msg_type != MSG_NOOP)) {
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500382 unsigned long mmr;
383 unsigned long mr;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500384 /*
385 * make everyone else ignore this message
386 */
387 msg->canceled = 1;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500388 /*
389 * only reset the resource if it is still pending
390 */
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500391 mmr = read_mmr_sw_ack();
392 msg_res = msg->swack_vec;
393 mr = (msg_res << UV_SW_ACK_NPENDING) | msg_res;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500394 if (mmr & msg_res) {
395 stat->d_rcanceled++;
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500396 write_mmr_sw_ack(mr);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500397 }
398 }
399 }
400 return;
401}
402
403/*
404 * Use IPI to get all target uvhubs to release resources held by
405 * a given sending cpu number.
406 */
cpw@sgi.coma456eaa2011-06-21 07:21:30 -0500407static void reset_with_ipi(struct pnmask *distribution, struct bau_control *bcp)
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500408{
cpw@sgi.com485f07d2011-06-21 07:21:29 -0500409 int pnode;
410 int apnode;
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500411 int maskbits;
cpw@sgi.com485f07d2011-06-21 07:21:29 -0500412 int sender = bcp->cpu;
cpw@sgi.com442d3922011-06-21 07:21:31 -0500413 cpumask_t *mask = bcp->uvhub_master->cpumask;
cpw@sgi.com485f07d2011-06-21 07:21:29 -0500414 struct bau_control *smaster = bcp->socket_master;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500415 struct reset_args reset_args;
416
417 reset_args.sender = sender;
cpw@sgi.com442d3922011-06-21 07:21:31 -0500418 cpus_clear(*mask);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500419 /* find a single cpu for each uvhub in this distribution mask */
cpw@sgi.coma456eaa2011-06-21 07:21:30 -0500420 maskbits = sizeof(struct pnmask) * BITSPERBYTE;
cpw@sgi.com485f07d2011-06-21 07:21:29 -0500421 /* each bit is a pnode relative to the partition base pnode */
422 for (pnode = 0; pnode < maskbits; pnode++) {
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500423 int cpu;
cpw@sgi.com485f07d2011-06-21 07:21:29 -0500424 if (!bau_uvhub_isset(pnode, distribution))
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500425 continue;
cpw@sgi.com485f07d2011-06-21 07:21:29 -0500426 apnode = pnode + bcp->partition_base_pnode;
427 cpu = pnode_to_first_cpu(apnode, smaster);
cpw@sgi.com442d3922011-06-21 07:21:31 -0500428 cpu_set(cpu, *mask);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500429 }
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500430
431 /* IPI all cpus; preemption is already disabled */
cpw@sgi.com442d3922011-06-21 07:21:31 -0500432 smp_call_function_many(mask, do_reset, (void *)&reset_args, 1);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500433 return;
434}
435
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500436static inline unsigned long cycles_2_us(unsigned long long cyc)
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500437{
438 unsigned long long ns;
439 unsigned long us;
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500440 int cpu = smp_processor_id();
441
442 ns = (cyc * per_cpu(cyc2ns, cpu)) >> CYC2NS_SCALE_FACTOR;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500443 us = ns / 1000;
444 return us;
445}
446
447/*
448 * wait for all cpus on this hub to finish their sends and go quiet
449 * leaves uvhub_quiesce set so that no new broadcasts are started by
450 * bau_flush_send_and_wait()
451 */
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500452static inline void quiesce_local_uvhub(struct bau_control *hmaster)
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500453{
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500454 atom_asr(1, (struct atomic_short *)&hmaster->uvhub_quiesce);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500455}
456
457/*
458 * mark this quiet-requestor as done
459 */
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500460static inline void end_uvhub_quiesce(struct bau_control *hmaster)
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500461{
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500462 atom_asr(-1, (struct atomic_short *)&hmaster->uvhub_quiesce);
463}
464
465static unsigned long uv1_read_status(unsigned long mmr_offset, int right_shift)
466{
467 unsigned long descriptor_status;
468
469 descriptor_status = uv_read_local_mmr(mmr_offset);
470 descriptor_status >>= right_shift;
471 descriptor_status &= UV_ACT_STATUS_MASK;
472 return descriptor_status;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500473}
474
475/*
476 * Wait for completion of a broadcast software ack message
477 * return COMPLETE, RETRY(PLUGGED or TIMEOUT) or GIVEUP
Cliff Wickmanb194b1202008-06-12 08:23:48 -0500478 */
Jack Steiner2a919592011-05-11 12:50:28 -0500479static int uv1_wait_completion(struct bau_desc *bau_desc,
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500480 unsigned long mmr_offset, int right_shift,
481 struct bau_control *bcp, long try)
Cliff Wickmanb194b1202008-06-12 08:23:48 -0500482{
Cliff Wickmanb194b1202008-06-12 08:23:48 -0500483 unsigned long descriptor_status;
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500484 cycles_t ttm;
Cliff Wickman712157a2010-06-02 16:22:02 -0500485 struct ptc_stats *stat = bcp->statp;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500486
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500487 descriptor_status = uv1_read_status(mmr_offset, right_shift);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500488 /* spin on the status MMR, waiting for it to go idle */
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500489 while ((descriptor_status != DS_IDLE)) {
Cliff Wickmanb194b1202008-06-12 08:23:48 -0500490 /*
Jack Steiner2a919592011-05-11 12:50:28 -0500491 * Our software ack messages may be blocked because
492 * there are no swack resources available. As long
493 * as none of them has timed out hardware will NACK
494 * our message and its state will stay IDLE.
Cliff Wickmanb194b1202008-06-12 08:23:48 -0500495 */
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500496 if (descriptor_status == DS_SOURCE_TIMEOUT) {
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500497 stat->s_stimeout++;
498 return FLUSH_GIVEUP;
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500499 } else if (descriptor_status == DS_DESTINATION_TIMEOUT) {
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500500 stat->s_dtimeout++;
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500501 ttm = get_cycles();
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500502
503 /*
504 * Our retries may be blocked by all destination
505 * swack resources being consumed, and a timeout
506 * pending. In that case hardware returns the
507 * ERROR that looks like a destination timeout.
508 */
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500509 if (cycles_2_us(ttm - bcp->send_message) < timeout_us) {
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500510 bcp->conseccompletes = 0;
511 return FLUSH_RETRY_PLUGGED;
512 }
513
514 bcp->conseccompletes = 0;
515 return FLUSH_RETRY_TIMEOUT;
516 } else {
517 /*
518 * descriptor_status is still BUSY
519 */
520 cpu_relax();
Cliff Wickmanb194b1202008-06-12 08:23:48 -0500521 }
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500522 descriptor_status = uv1_read_status(mmr_offset, right_shift);
Cliff Wickmanb194b1202008-06-12 08:23:48 -0500523 }
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500524 bcp->conseccompletes++;
Cliff Wickmanb194b1202008-06-12 08:23:48 -0500525 return FLUSH_COMPLETE;
526}
527
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500528/*
529 * UV2 has an extra bit of status in the ACTIVATION_STATUS_2 register.
530 */
Cliff Wickmanc5d35d32012-01-16 15:19:47 -0600531static unsigned long uv2_read_status(unsigned long offset, int rshft, int desc)
Jack Steiner2a919592011-05-11 12:50:28 -0500532{
533 unsigned long descriptor_status;
534 unsigned long descriptor_status2;
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500535
536 descriptor_status = ((read_lmmr(offset) >> rshft) & UV_ACT_STATUS_MASK);
Cliff Wickmanc5d35d32012-01-16 15:19:47 -0600537 descriptor_status2 = (read_mmr_uv2_status() >> desc) & 0x1UL;
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500538 descriptor_status = (descriptor_status << 1) | descriptor_status2;
539 return descriptor_status;
540}
541
Cliff Wickmanc5d35d32012-01-16 15:19:47 -0600542/*
543 * Return whether the status of the descriptor that is normally used for this
544 * cpu (the one indexed by its hub-relative cpu number) is busy.
545 * The status of the original 32 descriptors is always reflected in the 64
546 * bits of UVH_LB_BAU_SB_ACTIVATION_STATUS_0.
547 * The bit provided by the activation_status_2 register is irrelevant to
548 * the status if it is only being tested for busy or not busy.
549 */
550int normal_busy(struct bau_control *bcp)
551{
552 int cpu = bcp->uvhub_cpu;
553 int mmr_offset;
554 int right_shift;
555
556 mmr_offset = UVH_LB_BAU_SB_ACTIVATION_STATUS_0;
557 right_shift = cpu * UV_ACT_STATUS_SIZE;
558 return (((((read_lmmr(mmr_offset) >> right_shift) &
559 UV_ACT_STATUS_MASK)) << 1) == UV2H_DESC_BUSY);
560}
561
562/*
563 * Entered when a bau descriptor has gone into a permanent busy wait because
564 * of a hardware bug.
565 * Workaround the bug.
566 */
567int handle_uv2_busy(struct bau_control *bcp)
568{
569 int busy_one = bcp->using_desc;
570 int normal = bcp->uvhub_cpu;
571 int selected = -1;
572 int i;
573 unsigned long descriptor_status;
574 unsigned long status;
575 int mmr_offset;
576 struct bau_desc *bau_desc_old;
577 struct bau_desc *bau_desc_new;
578 struct bau_control *hmaster = bcp->uvhub_master;
579 struct ptc_stats *stat = bcp->statp;
580 cycles_t ttm;
581
582 stat->s_uv2_wars++;
583 spin_lock(&hmaster->uvhub_lock);
584 /* try for the original first */
585 if (busy_one != normal) {
586 if (!normal_busy(bcp))
587 selected = normal;
588 }
589 if (selected < 0) {
590 /* can't use the normal, select an alternate */
591 mmr_offset = UVH_LB_BAU_SB_ACTIVATION_STATUS_1;
592 descriptor_status = read_lmmr(mmr_offset);
593
594 /* scan available descriptors 32-63 */
595 for (i = 0; i < UV_CPUS_PER_AS; i++) {
596 if ((hmaster->inuse_map & (1 << i)) == 0) {
597 status = ((descriptor_status >>
598 (i * UV_ACT_STATUS_SIZE)) &
599 UV_ACT_STATUS_MASK) << 1;
600 if (status != UV2H_DESC_BUSY) {
601 selected = i + UV_CPUS_PER_AS;
602 break;
603 }
604 }
605 }
606 }
607
608 if (busy_one != normal)
609 /* mark the busy alternate as not in-use */
610 hmaster->inuse_map &= ~(1 << (busy_one - UV_CPUS_PER_AS));
611
612 if (selected >= 0) {
613 /* switch to the selected descriptor */
614 if (selected != normal) {
615 /* set the selected alternate as in-use */
616 hmaster->inuse_map |=
617 (1 << (selected - UV_CPUS_PER_AS));
618 if (selected > stat->s_uv2_wars_hw)
619 stat->s_uv2_wars_hw = selected;
620 }
621 bau_desc_old = bcp->descriptor_base;
622 bau_desc_old += (ITEMS_PER_DESC * busy_one);
623 bcp->using_desc = selected;
624 bau_desc_new = bcp->descriptor_base;
625 bau_desc_new += (ITEMS_PER_DESC * selected);
626 *bau_desc_new = *bau_desc_old;
627 } else {
628 /*
629 * All are busy. Wait for the normal one for this cpu to
630 * free up.
631 */
632 stat->s_uv2_war_waits++;
633 spin_unlock(&hmaster->uvhub_lock);
634 ttm = get_cycles();
635 do {
636 cpu_relax();
637 } while (normal_busy(bcp));
638 spin_lock(&hmaster->uvhub_lock);
639 /* switch to the original descriptor */
640 bcp->using_desc = normal;
641 bau_desc_old = bcp->descriptor_base;
642 bau_desc_old += (ITEMS_PER_DESC * bcp->using_desc);
643 bcp->using_desc = (ITEMS_PER_DESC * normal);
644 bau_desc_new = bcp->descriptor_base;
645 bau_desc_new += (ITEMS_PER_DESC * normal);
646 *bau_desc_new = *bau_desc_old; /* copy the entire descriptor */
647 }
648 spin_unlock(&hmaster->uvhub_lock);
649 return FLUSH_RETRY_BUSYBUG;
650}
651
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500652static int uv2_wait_completion(struct bau_desc *bau_desc,
653 unsigned long mmr_offset, int right_shift,
654 struct bau_control *bcp, long try)
655{
656 unsigned long descriptor_stat;
657 cycles_t ttm;
Cliff Wickmanc5d35d32012-01-16 15:19:47 -0600658 int desc = bcp->using_desc;
659 long busy_reps = 0;
Jack Steiner2a919592011-05-11 12:50:28 -0500660 struct ptc_stats *stat = bcp->statp;
661
Cliff Wickmanc5d35d32012-01-16 15:19:47 -0600662 descriptor_stat = uv2_read_status(mmr_offset, right_shift, desc);
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500663
Jack Steiner2a919592011-05-11 12:50:28 -0500664 /* spin on the status MMR, waiting for it to go idle */
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500665 while (descriptor_stat != UV2H_DESC_IDLE) {
Jack Steiner2a919592011-05-11 12:50:28 -0500666 /*
667 * Our software ack messages may be blocked because
668 * there are no swack resources available. As long
669 * as none of them has timed out hardware will NACK
670 * our message and its state will stay IDLE.
671 */
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500672 if ((descriptor_stat == UV2H_DESC_SOURCE_TIMEOUT) ||
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500673 (descriptor_stat == UV2H_DESC_DEST_PUT_ERR)) {
Jack Steiner2a919592011-05-11 12:50:28 -0500674 stat->s_stimeout++;
675 return FLUSH_GIVEUP;
Cliff Wickmanb54bd9b2012-01-16 15:22:38 -0600676 } else if (descriptor_stat == UV2H_DESC_DEST_STRONG_NACK) {
677 stat->s_strongnacks++;
678 bcp->conseccompletes = 0;
679 return FLUSH_GIVEUP;
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500680 } else if (descriptor_stat == UV2H_DESC_DEST_TIMEOUT) {
Jack Steiner2a919592011-05-11 12:50:28 -0500681 stat->s_dtimeout++;
Jack Steiner2a919592011-05-11 12:50:28 -0500682 bcp->conseccompletes = 0;
683 return FLUSH_RETRY_TIMEOUT;
684 } else {
Cliff Wickmanc5d35d32012-01-16 15:19:47 -0600685 busy_reps++;
686 if (busy_reps > 1000000) {
687 /* not to hammer on the clock */
688 busy_reps = 0;
689 ttm = get_cycles();
690 if ((ttm - bcp->send_message) >
691 (bcp->clocks_per_100_usec)) {
692 return handle_uv2_busy(bcp);
693 }
694 }
Jack Steiner2a919592011-05-11 12:50:28 -0500695 /*
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500696 * descriptor_stat is still BUSY
Jack Steiner2a919592011-05-11 12:50:28 -0500697 */
698 cpu_relax();
699 }
Cliff Wickmanc5d35d32012-01-16 15:19:47 -0600700 descriptor_stat = uv2_read_status(mmr_offset, right_shift,
701 desc);
Jack Steiner2a919592011-05-11 12:50:28 -0500702 }
703 bcp->conseccompletes++;
704 return FLUSH_COMPLETE;
705}
706
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500707/*
708 * There are 2 status registers; each and array[32] of 2 bits. Set up for
709 * which register to read and position in that register based on cpu in
710 * current hub.
711 */
712static int wait_completion(struct bau_desc *bau_desc,
713 struct bau_control *bcp, long try)
Jack Steiner2a919592011-05-11 12:50:28 -0500714{
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500715 int right_shift;
716 unsigned long mmr_offset;
Cliff Wickmanc5d35d32012-01-16 15:19:47 -0600717 int desc = bcp->using_desc;
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500718
Cliff Wickmanc5d35d32012-01-16 15:19:47 -0600719 if (desc < UV_CPUS_PER_AS) {
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500720 mmr_offset = UVH_LB_BAU_SB_ACTIVATION_STATUS_0;
Cliff Wickmanc5d35d32012-01-16 15:19:47 -0600721 right_shift = desc * UV_ACT_STATUS_SIZE;
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500722 } else {
723 mmr_offset = UVH_LB_BAU_SB_ACTIVATION_STATUS_1;
Cliff Wickmanc5d35d32012-01-16 15:19:47 -0600724 right_shift = ((desc - UV_CPUS_PER_AS) * UV_ACT_STATUS_SIZE);
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500725 }
726
Cliff Wickmanda87c932012-01-16 15:17:50 -0600727 if (bcp->uvhub_version == 1)
Jack Steiner2a919592011-05-11 12:50:28 -0500728 return uv1_wait_completion(bau_desc, mmr_offset, right_shift,
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500729 bcp, try);
Jack Steiner2a919592011-05-11 12:50:28 -0500730 else
731 return uv2_wait_completion(bau_desc, mmr_offset, right_shift,
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500732 bcp, try);
Jack Steiner2a919592011-05-11 12:50:28 -0500733}
734
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500735static inline cycles_t sec_2_cycles(unsigned long sec)
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500736{
737 unsigned long ns;
738 cycles_t cyc;
739
740 ns = sec * 1000000000;
741 cyc = (ns << CYC2NS_SCALE_FACTOR)/(per_cpu(cyc2ns, smp_processor_id()));
742 return cyc;
743}
744
745/*
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500746 * Our retries are blocked by all destination sw ack resources being
Cliff Wickmanf6d8a562010-06-02 16:22:02 -0500747 * in use, and a timeout is pending. In that case hardware immediately
748 * returns the ERROR that looks like a destination timeout.
749 */
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500750static void destination_plugged(struct bau_desc *bau_desc,
751 struct bau_control *bcp,
Cliff Wickmanf6d8a562010-06-02 16:22:02 -0500752 struct bau_control *hmaster, struct ptc_stats *stat)
753{
754 udelay(bcp->plugged_delay);
755 bcp->plugged_tries++;
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500756
Cliff Wickmanf6d8a562010-06-02 16:22:02 -0500757 if (bcp->plugged_tries >= bcp->plugsb4reset) {
758 bcp->plugged_tries = 0;
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500759
Cliff Wickmanf6d8a562010-06-02 16:22:02 -0500760 quiesce_local_uvhub(hmaster);
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500761
Cliff Wickmanf6d8a562010-06-02 16:22:02 -0500762 spin_lock(&hmaster->queue_lock);
cpw@sgi.com485f07d2011-06-21 07:21:29 -0500763 reset_with_ipi(&bau_desc->distribution, bcp);
Cliff Wickmanf6d8a562010-06-02 16:22:02 -0500764 spin_unlock(&hmaster->queue_lock);
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500765
Cliff Wickmanf6d8a562010-06-02 16:22:02 -0500766 end_uvhub_quiesce(hmaster);
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500767
Cliff Wickmanf6d8a562010-06-02 16:22:02 -0500768 bcp->ipi_attempts++;
769 stat->s_resets_plug++;
770 }
771}
772
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500773static void destination_timeout(struct bau_desc *bau_desc,
774 struct bau_control *bcp, struct bau_control *hmaster,
775 struct ptc_stats *stat)
Cliff Wickmanf6d8a562010-06-02 16:22:02 -0500776{
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500777 hmaster->max_concurr = 1;
Cliff Wickmanf6d8a562010-06-02 16:22:02 -0500778 bcp->timeout_tries++;
779 if (bcp->timeout_tries >= bcp->timeoutsb4reset) {
780 bcp->timeout_tries = 0;
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500781
Cliff Wickmanf6d8a562010-06-02 16:22:02 -0500782 quiesce_local_uvhub(hmaster);
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500783
Cliff Wickmanf6d8a562010-06-02 16:22:02 -0500784 spin_lock(&hmaster->queue_lock);
cpw@sgi.com485f07d2011-06-21 07:21:29 -0500785 reset_with_ipi(&bau_desc->distribution, bcp);
Cliff Wickmanf6d8a562010-06-02 16:22:02 -0500786 spin_unlock(&hmaster->queue_lock);
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500787
Cliff Wickmanf6d8a562010-06-02 16:22:02 -0500788 end_uvhub_quiesce(hmaster);
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500789
Cliff Wickmanf6d8a562010-06-02 16:22:02 -0500790 bcp->ipi_attempts++;
791 stat->s_resets_timeout++;
792 }
793}
794
795/*
Cliff Wickman50fb55a2010-06-02 16:22:02 -0500796 * Completions are taking a very long time due to a congested numalink
797 * network.
798 */
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500799static void disable_for_congestion(struct bau_control *bcp,
800 struct ptc_stats *stat)
Cliff Wickman50fb55a2010-06-02 16:22:02 -0500801{
Cliff Wickman50fb55a2010-06-02 16:22:02 -0500802 /* let only one cpu do this disabling */
803 spin_lock(&disable_lock);
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500804
Cliff Wickman50fb55a2010-06-02 16:22:02 -0500805 if (!baudisabled && bcp->period_requests &&
806 ((bcp->period_time / bcp->period_requests) > congested_cycles)) {
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500807 int tcpu;
808 struct bau_control *tbcp;
Cliff Wickman50fb55a2010-06-02 16:22:02 -0500809 /* it becomes this cpu's job to turn on the use of the
810 BAU again */
811 baudisabled = 1;
812 bcp->set_bau_off = 1;
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500813 bcp->set_bau_on_time = get_cycles();
814 bcp->set_bau_on_time += sec_2_cycles(bcp->cong_period);
Cliff Wickman50fb55a2010-06-02 16:22:02 -0500815 stat->s_bau_disabled++;
816 for_each_present_cpu(tcpu) {
817 tbcp = &per_cpu(bau_control, tcpu);
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500818 tbcp->baudisabled = 1;
Cliff Wickman50fb55a2010-06-02 16:22:02 -0500819 }
820 }
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500821
Cliff Wickman50fb55a2010-06-02 16:22:02 -0500822 spin_unlock(&disable_lock);
823}
824
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500825static void count_max_concurr(int stat, struct bau_control *bcp,
826 struct bau_control *hmaster)
827{
828 bcp->plugged_tries = 0;
829 bcp->timeout_tries = 0;
830 if (stat != FLUSH_COMPLETE)
831 return;
832 if (bcp->conseccompletes <= bcp->complete_threshold)
833 return;
834 if (hmaster->max_concurr >= hmaster->max_concurr_const)
835 return;
836 hmaster->max_concurr++;
837}
838
839static void record_send_stats(cycles_t time1, cycles_t time2,
840 struct bau_control *bcp, struct ptc_stats *stat,
841 int completion_status, int try)
842{
843 cycles_t elapsed;
844
845 if (time2 > time1) {
846 elapsed = time2 - time1;
847 stat->s_time += elapsed;
848
849 if ((completion_status == FLUSH_COMPLETE) && (try == 1)) {
850 bcp->period_requests++;
851 bcp->period_time += elapsed;
852 if ((elapsed > congested_cycles) &&
853 (bcp->period_requests > bcp->cong_reps))
854 disable_for_congestion(bcp, stat);
855 }
856 } else
857 stat->s_requestor--;
858
859 if (completion_status == FLUSH_COMPLETE && try > 1)
860 stat->s_retriesok++;
861 else if (completion_status == FLUSH_GIVEUP)
862 stat->s_giveup++;
863}
864
865/*
866 * Because of a uv1 hardware bug only a limited number of concurrent
867 * requests can be made.
868 */
869static void uv1_throttle(struct bau_control *hmaster, struct ptc_stats *stat)
870{
871 spinlock_t *lock = &hmaster->uvhub_lock;
872 atomic_t *v;
873
874 v = &hmaster->active_descriptor_count;
875 if (!atomic_inc_unless_ge(lock, v, hmaster->max_concurr)) {
876 stat->s_throttles++;
877 do {
878 cpu_relax();
879 } while (!atomic_inc_unless_ge(lock, v, hmaster->max_concurr));
880 }
881}
882
883/*
884 * Handle the completion status of a message send.
885 */
886static void handle_cmplt(int completion_status, struct bau_desc *bau_desc,
887 struct bau_control *bcp, struct bau_control *hmaster,
888 struct ptc_stats *stat)
889{
890 if (completion_status == FLUSH_RETRY_PLUGGED)
891 destination_plugged(bau_desc, bcp, hmaster, stat);
892 else if (completion_status == FLUSH_RETRY_TIMEOUT)
893 destination_timeout(bau_desc, bcp, hmaster, stat);
894}
895
896/*
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500897 * Send a broadcast and wait for it to complete.
Cliff Wickmanb194b1202008-06-12 08:23:48 -0500898 *
Cliff Wickmanf6d8a562010-06-02 16:22:02 -0500899 * The flush_mask contains the cpus the broadcast is to be sent to including
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500900 * cpus that are on the local uvhub.
Cliff Wickmanb194b1202008-06-12 08:23:48 -0500901 *
Cliff Wickman450a0072010-06-02 16:22:02 -0500902 * Returns 0 if all flushing represented in the mask was done.
903 * Returns 1 if it gives up entirely and the original cpu mask is to be
904 * returned to the kernel.
Cliff Wickmanb194b1202008-06-12 08:23:48 -0500905 */
Cliff Wickmanc5d35d32012-01-16 15:19:47 -0600906int uv_flush_send_and_wait(struct cpumask *flush_mask, struct bau_control *bcp)
Cliff Wickmanb194b1202008-06-12 08:23:48 -0500907{
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500908 int seq_number = 0;
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500909 int completion_stat = 0;
Cliff Wickmanda87c932012-01-16 15:17:50 -0600910 int uv1 = 0;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500911 long try = 0;
Ingo Molnarb4c286e2008-06-18 14:28:19 +0200912 unsigned long index;
Cliff Wickmanb194b1202008-06-12 08:23:48 -0500913 cycles_t time1;
914 cycles_t time2;
Cliff Wickman712157a2010-06-02 16:22:02 -0500915 struct ptc_stats *stat = bcp->statp;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500916 struct bau_control *hmaster = bcp->uvhub_master;
Cliff Wickmanda87c932012-01-16 15:17:50 -0600917 struct uv1_bau_msg_header *uv1_hdr = NULL;
918 struct uv2_bau_msg_header *uv2_hdr = NULL;
Cliff Wickmanc5d35d32012-01-16 15:19:47 -0600919 struct bau_desc *bau_desc;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500920
Cliff Wickmanc5d35d32012-01-16 15:19:47 -0600921 if (bcp->uvhub_version == 1)
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500922 uv1_throttle(hmaster, stat);
923
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500924 while (hmaster->uvhub_quiesce)
925 cpu_relax();
Cliff Wickmanb194b1202008-06-12 08:23:48 -0500926
Cliff Wickmanb194b1202008-06-12 08:23:48 -0500927 time1 = get_cycles();
928 do {
Cliff Wickmanc5d35d32012-01-16 15:19:47 -0600929 bau_desc = bcp->descriptor_base;
930 bau_desc += (ITEMS_PER_DESC * bcp->using_desc);
931 if (bcp->uvhub_version == 1) {
932 uv1 = 1;
933 uv1_hdr = &bau_desc->header.uv1_hdr;
934 } else
935 uv2_hdr = &bau_desc->header.uv2_hdr;
936 if ((try == 0) || (completion_stat == FLUSH_RETRY_BUSYBUG)) {
Cliff Wickmanda87c932012-01-16 15:17:50 -0600937 if (uv1)
938 uv1_hdr->msg_type = MSG_REGULAR;
939 else
940 uv2_hdr->msg_type = MSG_REGULAR;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500941 seq_number = bcp->message_number++;
942 } else {
Cliff Wickmanda87c932012-01-16 15:17:50 -0600943 if (uv1)
944 uv1_hdr->msg_type = MSG_RETRY;
945 else
946 uv2_hdr->msg_type = MSG_RETRY;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500947 stat->s_retry_messages++;
948 }
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500949
Cliff Wickmanda87c932012-01-16 15:17:50 -0600950 if (uv1)
951 uv1_hdr->sequence = seq_number;
952 else
953 uv2_hdr->sequence = seq_number;
Cliff Wickmanc5d35d32012-01-16 15:19:47 -0600954 index = (1UL << AS_PUSH_SHIFT) | bcp->using_desc;
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500955 bcp->send_message = get_cycles();
956
957 write_mmr_activation(index);
958
959 try++;
960 completion_stat = wait_completion(bau_desc, bcp, try);
Cliff Wickmanc5d35d32012-01-16 15:19:47 -0600961 /* UV2: wait_completion() may change the bcp->using_desc */
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500962
963 handle_cmplt(completion_stat, bau_desc, bcp, hmaster, stat);
964
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -0500965 if (bcp->ipi_attempts >= bcp->ipi_reset_limit) {
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500966 bcp->ipi_attempts = 0;
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500967 completion_stat = FLUSH_GIVEUP;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500968 break;
969 }
970 cpu_relax();
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500971 } while ((completion_stat == FLUSH_RETRY_PLUGGED) ||
Cliff Wickmanc5d35d32012-01-16 15:19:47 -0600972 (completion_stat == FLUSH_RETRY_BUSYBUG) ||
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500973 (completion_stat == FLUSH_RETRY_TIMEOUT));
974
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500975 time2 = get_cycles();
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500976
977 count_max_concurr(completion_stat, bcp, hmaster);
978
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500979 while (hmaster->uvhub_quiesce)
980 cpu_relax();
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500981
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500982 atomic_dec(&hmaster->active_descriptor_count);
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500983
984 record_send_stats(time1, time2, bcp, stat, completion_stat, try);
985
986 if (completion_stat == FLUSH_GIVEUP)
Cliff Wickmanc5d35d32012-01-16 15:19:47 -0600987 /* FLUSH_GIVEUP will fall back to using IPI's for tlb flush */
Cliff Wickman450a0072010-06-02 16:22:02 -0500988 return 1;
Cliff Wickman450a0072010-06-02 16:22:02 -0500989 return 0;
Cliff Wickmanb194b1202008-06-12 08:23:48 -0500990}
991
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500992/*
993 * The BAU is disabled. When the disabled time period has expired, the cpu
994 * that disabled it must re-enable it.
995 * Return 0 if it is re-enabled for all cpus.
996 */
997static int check_enable(struct bau_control *bcp, struct ptc_stats *stat)
998{
999 int tcpu;
1000 struct bau_control *tbcp;
1001
1002 if (bcp->set_bau_off) {
1003 if (get_cycles() >= bcp->set_bau_on_time) {
1004 stat->s_bau_reenabled++;
1005 baudisabled = 0;
1006 for_each_present_cpu(tcpu) {
1007 tbcp = &per_cpu(bau_control, tcpu);
1008 tbcp->baudisabled = 0;
1009 tbcp->period_requests = 0;
1010 tbcp->period_time = 0;
1011 }
1012 return 0;
1013 }
1014 }
1015 return -1;
1016}
1017
1018static void record_send_statistics(struct ptc_stats *stat, int locals, int hubs,
1019 int remotes, struct bau_desc *bau_desc)
1020{
1021 stat->s_requestor++;
1022 stat->s_ntargcpu += remotes + locals;
1023 stat->s_ntargremotes += remotes;
1024 stat->s_ntarglocals += locals;
1025
1026 /* uvhub statistics */
1027 hubs = bau_uvhub_weight(&bau_desc->distribution);
1028 if (locals) {
1029 stat->s_ntarglocaluvhub++;
1030 stat->s_ntargremoteuvhub += (hubs - 1);
1031 } else
1032 stat->s_ntargremoteuvhub += hubs;
1033
1034 stat->s_ntarguvhub += hubs;
1035
1036 if (hubs >= 16)
1037 stat->s_ntarguvhub16++;
1038 else if (hubs >= 8)
1039 stat->s_ntarguvhub8++;
1040 else if (hubs >= 4)
1041 stat->s_ntarguvhub4++;
1042 else if (hubs >= 2)
1043 stat->s_ntarguvhub2++;
1044 else
1045 stat->s_ntarguvhub1++;
1046}
1047
1048/*
1049 * Translate a cpu mask to the uvhub distribution mask in the BAU
1050 * activation descriptor.
1051 */
1052static int set_distrib_bits(struct cpumask *flush_mask, struct bau_control *bcp,
1053 struct bau_desc *bau_desc, int *localsp, int *remotesp)
1054{
1055 int cpu;
1056 int pnode;
1057 int cnt = 0;
1058 struct hub_and_pnode *hpp;
1059
1060 for_each_cpu(cpu, flush_mask) {
1061 /*
1062 * The distribution vector is a bit map of pnodes, relative
1063 * to the partition base pnode (and the partition base nasid
1064 * in the header).
1065 * Translate cpu to pnode and hub using a local memory array.
1066 */
1067 hpp = &bcp->socket_master->thp[cpu];
1068 pnode = hpp->pnode - bcp->partition_base_pnode;
1069 bau_uvhub_set(pnode, &bau_desc->distribution);
1070 cnt++;
1071 if (hpp->uvhub == bcp->uvhub)
1072 (*localsp)++;
1073 else
1074 (*remotesp)++;
1075 }
1076 if (!cnt)
1077 return 1;
1078 return 0;
1079}
1080
1081/*
1082 * globally purge translation cache of a virtual address or all TLB's
Tejun Heobdbcdd42009-01-21 17:26:06 +09001083 * @cpumask: mask of all cpu's in which the address is to be removed
Cliff Wickman18129242008-06-02 08:56:14 -05001084 * @mm: mm_struct containing virtual address range
1085 * @va: virtual address to be removed (or TLB_FLUSH_ALL for all TLB's on cpu)
Tejun Heobdbcdd42009-01-21 17:26:06 +09001086 * @cpu: the current cpu
Cliff Wickman18129242008-06-02 08:56:14 -05001087 *
1088 * This is the entry point for initiating any UV global TLB shootdown.
1089 *
1090 * Purges the translation caches of all specified processors of the given
1091 * virtual address, or purges all TLB's on specified processors.
1092 *
Tejun Heobdbcdd42009-01-21 17:26:06 +09001093 * The caller has derived the cpumask from the mm_struct. This function
1094 * is called only if there are bits set in the mask. (e.g. flush_tlb_page())
Cliff Wickman18129242008-06-02 08:56:14 -05001095 *
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001096 * The cpumask is converted into a uvhubmask of the uvhubs containing
1097 * those cpus.
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001098 *
Tejun Heobdbcdd42009-01-21 17:26:06 +09001099 * Note that this function should be called with preemption disabled.
1100 *
1101 * Returns NULL if all remote flushing was done.
1102 * Returns pointer to cpumask if some remote flushing remains to be
1103 * done. The returned pointer is valid till preemption is re-enabled.
Cliff Wickman18129242008-06-02 08:56:14 -05001104 */
Tejun Heobdbcdd42009-01-21 17:26:06 +09001105const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001106 struct mm_struct *mm, unsigned long va,
1107 unsigned int cpu)
Cliff Wickman18129242008-06-02 08:56:14 -05001108{
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001109 int locals = 0;
Cliff Wickman450a0072010-06-02 16:22:02 -05001110 int remotes = 0;
1111 int hubs = 0;
Ingo Molnardc163a42008-06-18 14:15:43 +02001112 struct bau_desc *bau_desc;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001113 struct cpumask *flush_mask;
1114 struct ptc_stats *stat;
1115 struct bau_control *bcp;
Cliff Wickman18129242008-06-02 08:56:14 -05001116
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001117 bcp = &per_cpu(bau_control, cpu);
Cliff Wickman712157a2010-06-02 16:22:02 -05001118 stat = bcp->statp;
Cliff Wickman26ef8572012-06-22 08:13:30 -05001119 stat->s_enters++;
1120
1121 if (bcp->nobau)
1122 return cpumask;
Cliff Wickman50fb55a2010-06-02 16:22:02 -05001123
1124 /* bau was disabled due to slow response */
1125 if (bcp->baudisabled) {
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001126 if (check_enable(bcp, stat))
1127 return cpumask;
Cliff Wickman50fb55a2010-06-02 16:22:02 -05001128 }
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001129
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001130 /*
1131 * Each sending cpu has a per-cpu mask which it fills from the caller's
Cliff Wickman450a0072010-06-02 16:22:02 -05001132 * cpu mask. All cpus are converted to uvhubs and copied to the
1133 * activation descriptor.
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001134 */
1135 flush_mask = (struct cpumask *)per_cpu(uv_flush_tlb_mask, cpu);
Cliff Wickman450a0072010-06-02 16:22:02 -05001136 /* don't actually do a shootdown of the local cpu */
Tejun Heobdbcdd42009-01-21 17:26:06 +09001137 cpumask_andnot(flush_mask, cpumask, cpumask_of(cpu));
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001138
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001139 if (cpu_isset(cpu, *cpumask))
Cliff Wickman450a0072010-06-02 16:22:02 -05001140 stat->s_ntargself++;
Tejun Heobdbcdd42009-01-21 17:26:06 +09001141
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001142 bau_desc = bcp->descriptor_base;
Cliff Wickmanc5d35d32012-01-16 15:19:47 -06001143 bau_desc += (ITEMS_PER_DESC * bcp->using_desc);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001144 bau_uvhubs_clear(&bau_desc->distribution, UV_DISTRIBUTION_SIZE);
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001145 if (set_distrib_bits(flush_mask, bcp, bau_desc, &locals, &remotes))
Cliff Wickman450a0072010-06-02 16:22:02 -05001146 return NULL;
Cliff Wickman450a0072010-06-02 16:22:02 -05001147
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001148 record_send_statistics(stat, locals, hubs, remotes, bau_desc);
Cliff Wickman18129242008-06-02 08:56:14 -05001149
1150 bau_desc->payload.address = va;
Tejun Heobdbcdd42009-01-21 17:26:06 +09001151 bau_desc->payload.sending_cpu = cpu;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001152 /*
Cliff Wickman450a0072010-06-02 16:22:02 -05001153 * uv_flush_send_and_wait returns 0 if all cpu's were messaged,
1154 * or 1 if it gave up and the original cpumask should be returned.
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001155 */
Cliff Wickmanc5d35d32012-01-16 15:19:47 -06001156 if (!uv_flush_send_and_wait(flush_mask, bcp))
Cliff Wickman450a0072010-06-02 16:22:02 -05001157 return NULL;
1158 else
1159 return cpumask;
Cliff Wickman18129242008-06-02 08:56:14 -05001160}
1161
1162/*
Cliff Wickmanc5d35d32012-01-16 15:19:47 -06001163 * Search the message queue for any 'other' message with the same software
1164 * acknowledge resource bit vector.
1165 */
1166struct bau_pq_entry *find_another_by_swack(struct bau_pq_entry *msg,
1167 struct bau_control *bcp, unsigned char swack_vec)
1168{
1169 struct bau_pq_entry *msg_next = msg + 1;
1170
1171 if (msg_next > bcp->queue_last)
1172 msg_next = bcp->queue_first;
1173 while ((msg_next->swack_vec != 0) && (msg_next != msg)) {
1174 if (msg_next->swack_vec == swack_vec)
1175 return msg_next;
1176 msg_next++;
1177 if (msg_next > bcp->queue_last)
1178 msg_next = bcp->queue_first;
1179 }
1180 return NULL;
1181}
1182
1183/*
1184 * UV2 needs to work around a bug in which an arriving message has not
1185 * set a bit in the UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE register.
1186 * Such a message must be ignored.
1187 */
1188void process_uv2_message(struct msg_desc *mdp, struct bau_control *bcp)
1189{
1190 unsigned long mmr_image;
1191 unsigned char swack_vec;
1192 struct bau_pq_entry *msg = mdp->msg;
1193 struct bau_pq_entry *other_msg;
1194
1195 mmr_image = read_mmr_sw_ack();
1196 swack_vec = msg->swack_vec;
1197
1198 if ((swack_vec & mmr_image) == 0) {
1199 /*
1200 * This message was assigned a swack resource, but no
1201 * reserved acknowlegment is pending.
1202 * The bug has prevented this message from setting the MMR.
1203 * And no other message has used the same sw_ack resource.
1204 * Do the requested shootdown but do not reply to the msg.
1205 * (the 0 means make no acknowledge)
1206 */
1207 bau_process_message(mdp, bcp, 0);
1208 return;
1209 }
1210
1211 /*
1212 * Some message has set the MMR 'pending' bit; it might have been
1213 * another message. Look for that message.
1214 */
1215 other_msg = find_another_by_swack(msg, bcp, msg->swack_vec);
1216 if (other_msg) {
1217 /* There is another. Do not ack the current one. */
1218 bau_process_message(mdp, bcp, 0);
1219 /*
1220 * Let the natural processing of that message acknowledge
1221 * it. Don't get the processing of sw_ack's out of order.
1222 */
1223 return;
1224 }
1225
1226 /*
1227 * There is no other message using this sw_ack, so it is safe to
1228 * acknowledge it.
1229 */
1230 bau_process_message(mdp, bcp, 1);
1231
1232 return;
1233}
1234
1235/*
Cliff Wickman18129242008-06-02 08:56:14 -05001236 * The BAU message interrupt comes here. (registered by set_intr_gate)
1237 * See entry_64.S
1238 *
1239 * We received a broadcast assist message.
1240 *
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001241 * Interrupts are disabled; this interrupt could represent
Cliff Wickman18129242008-06-02 08:56:14 -05001242 * the receipt of several messages.
1243 *
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001244 * All cores/threads on this hub get this interrupt.
1245 * The last one to see it does the software ack.
Cliff Wickman18129242008-06-02 08:56:14 -05001246 * (the resource will not be freed until noninterruptable cpus see this
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001247 * interrupt; hardware may timeout the s/w ack and reply ERROR)
Cliff Wickman18129242008-06-02 08:56:14 -05001248 */
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001249void uv_bau_message_interrupt(struct pt_regs *regs)
Cliff Wickman18129242008-06-02 08:56:14 -05001250{
Cliff Wickman18129242008-06-02 08:56:14 -05001251 int count = 0;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001252 cycles_t time_start;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001253 struct bau_pq_entry *msg;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001254 struct bau_control *bcp;
1255 struct ptc_stats *stat;
1256 struct msg_desc msgdesc;
Cliff Wickman18129242008-06-02 08:56:14 -05001257
Cliff Wickman88ed9dd2012-01-16 15:21:46 -06001258 ack_APIC_irq();
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001259 time_start = get_cycles();
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001260
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001261 bcp = &per_cpu(bau_control, smp_processor_id());
Cliff Wickman712157a2010-06-02 16:22:02 -05001262 stat = bcp->statp;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001263
1264 msgdesc.queue_first = bcp->queue_first;
1265 msgdesc.queue_last = bcp->queue_last;
1266
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001267 msg = bcp->bau_msg_head;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001268 while (msg->swack_vec) {
Cliff Wickman18129242008-06-02 08:56:14 -05001269 count++;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001270
1271 msgdesc.msg_slot = msg - msgdesc.queue_first;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001272 msgdesc.msg = msg;
Cliff Wickmanc5d35d32012-01-16 15:19:47 -06001273 if (bcp->uvhub_version == 2)
1274 process_uv2_message(&msgdesc, bcp);
1275 else
1276 bau_process_message(&msgdesc, bcp, 1);
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001277
Cliff Wickman18129242008-06-02 08:56:14 -05001278 msg++;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001279 if (msg > msgdesc.queue_last)
1280 msg = msgdesc.queue_first;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001281 bcp->bau_msg_head = msg;
Cliff Wickman18129242008-06-02 08:56:14 -05001282 }
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001283 stat->d_time += (get_cycles() - time_start);
Cliff Wickman18129242008-06-02 08:56:14 -05001284 if (!count)
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001285 stat->d_nomsg++;
Cliff Wickman18129242008-06-02 08:56:14 -05001286 else if (count > 1)
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001287 stat->d_multmsg++;
Cliff Wickman18129242008-06-02 08:56:14 -05001288}
1289
Cliff Wickmanc4c46882009-04-03 08:34:32 -05001290/*
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001291 * Each target uvhub (i.e. a uvhub that has cpu's) needs to have
Cliff Wickmanc4c46882009-04-03 08:34:32 -05001292 * shootdown message timeouts enabled. The timeout does not cause
1293 * an interrupt, but causes an error message to be returned to
1294 * the sender.
1295 */
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001296static void __init enable_timeouts(void)
Cliff Wickman18129242008-06-02 08:56:14 -05001297{
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001298 int uvhub;
1299 int nuvhubs;
Cliff Wickman18129242008-06-02 08:56:14 -05001300 int pnode;
Cliff Wickmanc4c46882009-04-03 08:34:32 -05001301 unsigned long mmr_image;
Cliff Wickman18129242008-06-02 08:56:14 -05001302
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001303 nuvhubs = uv_num_possible_blades();
Cliff Wickmanc4c46882009-04-03 08:34:32 -05001304
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001305 for (uvhub = 0; uvhub < nuvhubs; uvhub++) {
1306 if (!uv_blade_nr_possible_cpus(uvhub))
Cliff Wickman18129242008-06-02 08:56:14 -05001307 continue;
Cliff Wickmanc4c46882009-04-03 08:34:32 -05001308
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001309 pnode = uv_blade_to_pnode(uvhub);
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001310 mmr_image = read_mmr_misc_control(pnode);
Cliff Wickmanc4c46882009-04-03 08:34:32 -05001311 /*
1312 * Set the timeout period and then lock it in, in three
1313 * steps; captures and locks in the period.
1314 *
1315 * To program the period, the SOFT_ACK_MODE must be off.
1316 */
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001317 mmr_image &= ~(1L << SOFTACK_MSHIFT);
1318 write_mmr_misc_control(pnode, mmr_image);
Cliff Wickmanc4c46882009-04-03 08:34:32 -05001319 /*
1320 * Set the 4-bit period.
1321 */
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001322 mmr_image &= ~((unsigned long)0xf << SOFTACK_PSHIFT);
1323 mmr_image |= (SOFTACK_TIMEOUT_PERIOD << SOFTACK_PSHIFT);
1324 write_mmr_misc_control(pnode, mmr_image);
Cliff Wickmanc4c46882009-04-03 08:34:32 -05001325 /*
Jack Steiner2a919592011-05-11 12:50:28 -05001326 * UV1:
Cliff Wickmanc4c46882009-04-03 08:34:32 -05001327 * Subsequent reversals of the timebase bit (3) cause an
1328 * immediate timeout of one or all INTD resources as
1329 * indicated in bits 2:0 (7 causes all of them to timeout).
1330 */
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001331 mmr_image |= (1L << SOFTACK_MSHIFT);
Jack Steiner2a919592011-05-11 12:50:28 -05001332 if (is_uv2_hub()) {
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001333 mmr_image |= (1L << UV2_EXT_SHFT);
Jack Steiner2a919592011-05-11 12:50:28 -05001334 }
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001335 write_mmr_misc_control(pnode, mmr_image);
Cliff Wickman18129242008-06-02 08:56:14 -05001336 }
Cliff Wickman18129242008-06-02 08:56:14 -05001337}
1338
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001339static void *ptc_seq_start(struct seq_file *file, loff_t *offset)
Cliff Wickman18129242008-06-02 08:56:14 -05001340{
1341 if (*offset < num_possible_cpus())
1342 return offset;
1343 return NULL;
1344}
1345
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001346static void *ptc_seq_next(struct seq_file *file, void *data, loff_t *offset)
Cliff Wickman18129242008-06-02 08:56:14 -05001347{
1348 (*offset)++;
1349 if (*offset < num_possible_cpus())
1350 return offset;
1351 return NULL;
1352}
1353
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001354static void ptc_seq_stop(struct seq_file *file, void *data)
Cliff Wickman18129242008-06-02 08:56:14 -05001355{
1356}
1357
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001358static inline unsigned long long usec_2_cycles(unsigned long microsec)
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001359{
1360 unsigned long ns;
1361 unsigned long long cyc;
1362
Cliff Wickman12a66112010-06-02 16:22:01 -05001363 ns = microsec * 1000;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001364 cyc = (ns << CYC2NS_SCALE_FACTOR)/(per_cpu(cyc2ns, smp_processor_id()));
1365 return cyc;
1366}
1367
Cliff Wickman18129242008-06-02 08:56:14 -05001368/*
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001369 * Display the statistics thru /proc/sgi_uv/ptc_statistics
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001370 * 'data' points to the cpu number
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001371 * Note: see the descriptions in stat_description[].
Cliff Wickman18129242008-06-02 08:56:14 -05001372 */
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001373static int ptc_seq_show(struct seq_file *file, void *data)
Cliff Wickman18129242008-06-02 08:56:14 -05001374{
1375 struct ptc_stats *stat;
Cliff Wickman26ef8572012-06-22 08:13:30 -05001376 struct bau_control *bcp;
Cliff Wickman18129242008-06-02 08:56:14 -05001377 int cpu;
1378
1379 cpu = *(loff_t *)data;
Cliff Wickman18129242008-06-02 08:56:14 -05001380 if (!cpu) {
1381 seq_printf(file,
Cliff Wickman26ef8572012-06-22 08:13:30 -05001382 "# cpu bauoff sent stime self locals remotes ncpus localhub ");
Cliff Wickman18129242008-06-02 08:56:14 -05001383 seq_printf(file,
Cliff Wickman450a0072010-06-02 16:22:02 -05001384 "remotehub numuvhubs numuvhubs16 numuvhubs8 ");
1385 seq_printf(file,
Cliff Wickmanb54bd9b2012-01-16 15:22:38 -06001386 "numuvhubs4 numuvhubs2 numuvhubs1 dto snacks retries rok ");
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001387 seq_printf(file,
Cliff Wickman26ef8572012-06-22 08:13:30 -05001388 "resetp resett giveup sto bz throt enters swack recv rtime ");
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001389 seq_printf(file,
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001390 "all one mult none retry canc nocan reset rcan ");
Cliff Wickman50fb55a2010-06-02 16:22:02 -05001391 seq_printf(file,
Cliff Wickmanc5d35d32012-01-16 15:19:47 -06001392 "disable enable wars warshw warwaits\n");
Cliff Wickman18129242008-06-02 08:56:14 -05001393 }
1394 if (cpu < num_possible_cpus() && cpu_online(cpu)) {
Cliff Wickman26ef8572012-06-22 08:13:30 -05001395 bcp = &per_cpu(bau_control, cpu);
1396 stat = bcp->statp;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001397 /* source side statistics */
1398 seq_printf(file,
Cliff Wickman26ef8572012-06-22 08:13:30 -05001399 "cpu %d %d %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld ",
1400 cpu, bcp->nobau, stat->s_requestor,
1401 cycles_2_us(stat->s_time),
Cliff Wickman450a0072010-06-02 16:22:02 -05001402 stat->s_ntargself, stat->s_ntarglocals,
1403 stat->s_ntargremotes, stat->s_ntargcpu,
1404 stat->s_ntarglocaluvhub, stat->s_ntargremoteuvhub,
1405 stat->s_ntarguvhub, stat->s_ntarguvhub16);
Cliff Wickmanb54bd9b2012-01-16 15:22:38 -06001406 seq_printf(file, "%ld %ld %ld %ld %ld %ld ",
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001407 stat->s_ntarguvhub8, stat->s_ntarguvhub4,
1408 stat->s_ntarguvhub2, stat->s_ntarguvhub1,
Cliff Wickmanb54bd9b2012-01-16 15:22:38 -06001409 stat->s_dtimeout, stat->s_strongnacks);
Cliff Wickman26ef8572012-06-22 08:13:30 -05001410 seq_printf(file, "%ld %ld %ld %ld %ld %ld %ld %ld %ld ",
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001411 stat->s_retry_messages, stat->s_retriesok,
1412 stat->s_resets_plug, stat->s_resets_timeout,
1413 stat->s_giveup, stat->s_stimeout,
Cliff Wickman26ef8572012-06-22 08:13:30 -05001414 stat->s_busy, stat->s_throttles, stat->s_enters);
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001415
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001416 /* destination side statistics */
1417 seq_printf(file,
Cliff Wickman50fb55a2010-06-02 16:22:02 -05001418 "%lx %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld ",
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001419 read_gmmr_sw_ack(uv_cpu_to_pnode(cpu)),
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001420 stat->d_requestee, cycles_2_us(stat->d_time),
1421 stat->d_alltlb, stat->d_onetlb, stat->d_multmsg,
1422 stat->d_nomsg, stat->d_retries, stat->d_canceled,
1423 stat->d_nocanceled, stat->d_resets,
1424 stat->d_rcanceled);
Cliff Wickmanc5d35d32012-01-16 15:19:47 -06001425 seq_printf(file, "%ld %ld %ld %ld %ld\n",
1426 stat->s_bau_disabled, stat->s_bau_reenabled,
1427 stat->s_uv2_wars, stat->s_uv2_wars_hw,
1428 stat->s_uv2_war_waits);
Cliff Wickman18129242008-06-02 08:56:14 -05001429 }
Cliff Wickman18129242008-06-02 08:56:14 -05001430 return 0;
1431}
1432
1433/*
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001434 * Display the tunables thru debugfs
1435 */
1436static ssize_t tunables_read(struct file *file, char __user *userbuf,
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001437 size_t count, loff_t *ppos)
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001438{
Dan Carpenterb365a852010-09-29 10:41:05 +02001439 char *buf;
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001440 int ret;
1441
Dan Carpenterb365a852010-09-29 10:41:05 +02001442 buf = kasprintf(GFP_KERNEL, "%s %s %s\n%d %d %d %d %d %d %d %d %d\n",
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001443 "max_concur plugged_delay plugsb4reset",
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001444 "timeoutsb4reset ipi_reset_limit complete_threshold",
1445 "congested_response_us congested_reps congested_period",
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001446 max_concurr, plugged_delay, plugsb4reset,
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001447 timeoutsb4reset, ipi_reset_limit, complete_threshold,
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001448 congested_respns_us, congested_reps, congested_period);
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001449
Dan Carpenterb365a852010-09-29 10:41:05 +02001450 if (!buf)
1451 return -ENOMEM;
1452
1453 ret = simple_read_from_buffer(userbuf, count, ppos, buf, strlen(buf));
1454 kfree(buf);
1455 return ret;
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001456}
1457
1458/*
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001459 * handle a write to /proc/sgi_uv/ptc_statistics
1460 * -1: reset the statistics
Cliff Wickman18129242008-06-02 08:56:14 -05001461 * 0: display meaning of the statistics
Cliff Wickman18129242008-06-02 08:56:14 -05001462 */
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001463static ssize_t ptc_proc_write(struct file *file, const char __user *user,
1464 size_t count, loff_t *data)
Cliff Wickman18129242008-06-02 08:56:14 -05001465{
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001466 int cpu;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001467 int i;
1468 int elements;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001469 long input_arg;
Cliff Wickman18129242008-06-02 08:56:14 -05001470 char optstr[64];
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001471 struct ptc_stats *stat;
Cliff Wickman18129242008-06-02 08:56:14 -05001472
Cliff Wickmane7eb8722008-06-23 08:32:25 -05001473 if (count == 0 || count > sizeof(optstr))
Cliff Wickmancef53272008-06-19 11:16:24 -05001474 return -EINVAL;
Cliff Wickman18129242008-06-02 08:56:14 -05001475 if (copy_from_user(optstr, user, count))
1476 return -EFAULT;
1477 optstr[count - 1] = '\0';
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001478
Cliff Wickman26ef8572012-06-22 08:13:30 -05001479 if (!strcmp(optstr, "on")) {
1480 set_bau_on();
1481 return count;
1482 } else if (!strcmp(optstr, "off")) {
1483 set_bau_off();
1484 return count;
1485 }
1486
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001487 if (strict_strtol(optstr, 10, &input_arg) < 0) {
Cliff Wickman18129242008-06-02 08:56:14 -05001488 printk(KERN_DEBUG "%s is invalid\n", optstr);
1489 return -EINVAL;
1490 }
1491
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001492 if (input_arg == 0) {
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001493 elements = sizeof(stat_description)/sizeof(*stat_description);
Cliff Wickman18129242008-06-02 08:56:14 -05001494 printk(KERN_DEBUG "# cpu: cpu number\n");
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001495 printk(KERN_DEBUG "Sender statistics:\n");
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001496 for (i = 0; i < elements; i++)
1497 printk(KERN_DEBUG "%s\n", stat_description[i]);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001498 } else if (input_arg == -1) {
1499 for_each_present_cpu(cpu) {
1500 stat = &per_cpu(ptcstats, cpu);
1501 memset(stat, 0, sizeof(struct ptc_stats));
1502 }
Cliff Wickman18129242008-06-02 08:56:14 -05001503 }
1504
1505 return count;
1506}
1507
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001508static int local_atoi(const char *name)
1509{
1510 int val = 0;
1511
1512 for (;; name++) {
1513 switch (*name) {
1514 case '0' ... '9':
1515 val = 10*val+(*name-'0');
1516 break;
1517 default:
1518 return val;
1519 }
1520 }
1521}
1522
1523/*
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001524 * Parse the values written to /sys/kernel/debug/sgi_uv/bau_tunables.
1525 * Zero values reset them to defaults.
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001526 */
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001527static int parse_tunables_write(struct bau_control *bcp, char *instr,
1528 int count)
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001529{
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001530 char *p;
1531 char *q;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001532 int cnt = 0;
1533 int val;
1534 int e = sizeof(tunables) / sizeof(*tunables);
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001535
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001536 p = instr + strspn(instr, WHITESPACE);
1537 q = p;
1538 for (; *p; p = q + strspn(q, WHITESPACE)) {
1539 q = p + strcspn(p, WHITESPACE);
1540 cnt++;
1541 if (q == p)
1542 break;
1543 }
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001544 if (cnt != e) {
1545 printk(KERN_INFO "bau tunable error: should be %d values\n", e);
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001546 return -EINVAL;
1547 }
1548
1549 p = instr + strspn(instr, WHITESPACE);
1550 q = p;
1551 for (cnt = 0; *p; p = q + strspn(q, WHITESPACE), cnt++) {
1552 q = p + strcspn(p, WHITESPACE);
1553 val = local_atoi(p);
1554 switch (cnt) {
1555 case 0:
1556 if (val == 0) {
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001557 max_concurr = MAX_BAU_CONCURRENT;
1558 max_concurr_const = MAX_BAU_CONCURRENT;
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001559 continue;
1560 }
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001561 if (val < 1 || val > bcp->cpus_in_uvhub) {
1562 printk(KERN_DEBUG
1563 "Error: BAU max concurrent %d is invalid\n",
1564 val);
1565 return -EINVAL;
1566 }
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001567 max_concurr = val;
1568 max_concurr_const = val;
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001569 continue;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001570 default:
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001571 if (val == 0)
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001572 *tunables[cnt].tunp = tunables[cnt].deflt;
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001573 else
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001574 *tunables[cnt].tunp = val;
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001575 continue;
1576 }
1577 if (q == p)
1578 break;
1579 }
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001580 return 0;
1581}
1582
1583/*
1584 * Handle a write to debugfs. (/sys/kernel/debug/sgi_uv/bau_tunables)
1585 */
1586static ssize_t tunables_write(struct file *file, const char __user *user,
1587 size_t count, loff_t *data)
1588{
1589 int cpu;
1590 int ret;
1591 char instr[100];
1592 struct bau_control *bcp;
1593
1594 if (count == 0 || count > sizeof(instr)-1)
1595 return -EINVAL;
1596 if (copy_from_user(instr, user, count))
1597 return -EFAULT;
1598
1599 instr[count] = '\0';
1600
cpw@sgi.com00b30cf2011-06-21 07:21:26 -05001601 cpu = get_cpu();
1602 bcp = &per_cpu(bau_control, cpu);
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001603 ret = parse_tunables_write(bcp, instr, count);
cpw@sgi.com00b30cf2011-06-21 07:21:26 -05001604 put_cpu();
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001605 if (ret)
1606 return ret;
1607
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001608 for_each_present_cpu(cpu) {
1609 bcp = &per_cpu(bau_control, cpu);
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001610 bcp->max_concurr = max_concurr;
1611 bcp->max_concurr_const = max_concurr;
1612 bcp->plugged_delay = plugged_delay;
1613 bcp->plugsb4reset = plugsb4reset;
1614 bcp->timeoutsb4reset = timeoutsb4reset;
1615 bcp->ipi_reset_limit = ipi_reset_limit;
1616 bcp->complete_threshold = complete_threshold;
1617 bcp->cong_response_us = congested_respns_us;
1618 bcp->cong_reps = congested_reps;
1619 bcp->cong_period = congested_period;
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001620 }
1621 return count;
1622}
1623
Cliff Wickman18129242008-06-02 08:56:14 -05001624static const struct seq_operations uv_ptc_seq_ops = {
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001625 .start = ptc_seq_start,
1626 .next = ptc_seq_next,
1627 .stop = ptc_seq_stop,
1628 .show = ptc_seq_show
Cliff Wickman18129242008-06-02 08:56:14 -05001629};
1630
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001631static int ptc_proc_open(struct inode *inode, struct file *file)
Cliff Wickman18129242008-06-02 08:56:14 -05001632{
1633 return seq_open(file, &uv_ptc_seq_ops);
1634}
1635
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001636static int tunables_open(struct inode *inode, struct file *file)
1637{
1638 return 0;
1639}
1640
Cliff Wickman18129242008-06-02 08:56:14 -05001641static const struct file_operations proc_uv_ptc_operations = {
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001642 .open = ptc_proc_open,
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001643 .read = seq_read,
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001644 .write = ptc_proc_write,
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001645 .llseek = seq_lseek,
1646 .release = seq_release,
Cliff Wickman18129242008-06-02 08:56:14 -05001647};
1648
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001649static const struct file_operations tunables_fops = {
1650 .open = tunables_open,
1651 .read = tunables_read,
1652 .write = tunables_write,
Arnd Bergmann6038f372010-08-15 18:52:59 +02001653 .llseek = default_llseek,
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001654};
1655
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001656static int __init uv_ptc_init(void)
Cliff Wickman18129242008-06-02 08:56:14 -05001657{
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001658 struct proc_dir_entry *proc_uv_ptc;
Cliff Wickman18129242008-06-02 08:56:14 -05001659
1660 if (!is_uv_system())
1661 return 0;
1662
Alexey Dobriyan10f02d112009-08-23 23:17:27 +04001663 proc_uv_ptc = proc_create(UV_PTC_BASENAME, 0444, NULL,
1664 &proc_uv_ptc_operations);
Cliff Wickman18129242008-06-02 08:56:14 -05001665 if (!proc_uv_ptc) {
1666 printk(KERN_ERR "unable to create %s proc entry\n",
1667 UV_PTC_BASENAME);
1668 return -EINVAL;
1669 }
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001670
1671 tunables_dir = debugfs_create_dir(UV_BAU_TUNABLES_DIR, NULL);
1672 if (!tunables_dir) {
1673 printk(KERN_ERR "unable to create debugfs directory %s\n",
1674 UV_BAU_TUNABLES_DIR);
1675 return -EINVAL;
1676 }
1677 tunables_file = debugfs_create_file(UV_BAU_TUNABLES_FILE, 0600,
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001678 tunables_dir, NULL, &tunables_fops);
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001679 if (!tunables_file) {
1680 printk(KERN_ERR "unable to create debugfs file %s\n",
1681 UV_BAU_TUNABLES_FILE);
1682 return -EINVAL;
1683 }
Cliff Wickman18129242008-06-02 08:56:14 -05001684 return 0;
1685}
1686
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001687/*
Cliff Wickman77ed23f2011-05-10 08:26:43 -05001688 * Initialize the sending side's sending buffers.
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001689 */
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001690static void activation_descriptor_init(int node, int pnode, int base_pnode)
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001691{
1692 int i;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001693 int cpu;
Cliff Wickmanda87c932012-01-16 15:17:50 -06001694 int uv1 = 0;
Jack Steiner6a469e42011-09-20 13:55:04 -07001695 unsigned long gpa;
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001696 unsigned long m;
1697 unsigned long n;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001698 size_t dsize;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001699 struct bau_desc *bau_desc;
1700 struct bau_desc *bd2;
Cliff Wickmanda87c932012-01-16 15:17:50 -06001701 struct uv1_bau_msg_header *uv1_hdr;
1702 struct uv2_bau_msg_header *uv2_hdr;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001703 struct bau_control *bcp;
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001704
Cliff Wickman0e2595c2009-05-20 08:10:57 -05001705 /*
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001706 * each bau_desc is 64 bytes; there are 8 (ITEMS_PER_DESC)
1707 * per cpu; and one per cpu on the uvhub (ADP_SZ)
Cliff Wickman0e2595c2009-05-20 08:10:57 -05001708 */
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001709 dsize = sizeof(struct bau_desc) * ADP_SZ * ITEMS_PER_DESC;
1710 bau_desc = kmalloc_node(dsize, GFP_KERNEL, node);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001711 BUG_ON(!bau_desc);
Ingo Molnarb4c286e2008-06-18 14:28:19 +02001712
Jack Steiner6a469e42011-09-20 13:55:04 -07001713 gpa = uv_gpa(bau_desc);
1714 n = uv_gpa_to_gnode(gpa);
1715 m = uv_gpa_to_offset(gpa);
Cliff Wickmanda87c932012-01-16 15:17:50 -06001716 if (is_uv1_hub())
1717 uv1 = 1;
Ingo Molnarb4c286e2008-06-18 14:28:19 +02001718
Cliff Wickman77ed23f2011-05-10 08:26:43 -05001719 /* the 14-bit pnode */
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001720 write_mmr_descriptor_base(pnode, (n << UV_DESC_PSHIFT | m));
Cliff Wickman0e2595c2009-05-20 08:10:57 -05001721 /*
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001722 * Initializing all 8 (ITEMS_PER_DESC) descriptors for each
Cliff Wickman0e2595c2009-05-20 08:10:57 -05001723 * cpu even though we only use the first one; one descriptor can
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001724 * describe a broadcast to 256 uv hubs.
Cliff Wickman0e2595c2009-05-20 08:10:57 -05001725 */
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001726 for (i = 0, bd2 = bau_desc; i < (ADP_SZ * ITEMS_PER_DESC); i++, bd2++) {
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001727 memset(bd2, 0, sizeof(struct bau_desc));
Cliff Wickmanda87c932012-01-16 15:17:50 -06001728 if (uv1) {
1729 uv1_hdr = &bd2->header.uv1_hdr;
1730 uv1_hdr->swack_flag = 1;
1731 /*
1732 * The base_dest_nasid set in the message header
1733 * is the nasid of the first uvhub in the partition.
1734 * The bit map will indicate destination pnode numbers
1735 * relative to that base. They may not be consecutive
1736 * if nasid striding is being used.
1737 */
1738 uv1_hdr->base_dest_nasid =
1739 UV_PNODE_TO_NASID(base_pnode);
1740 uv1_hdr->dest_subnodeid = UV_LB_SUBNODEID;
1741 uv1_hdr->command = UV_NET_ENDPOINT_INTD;
1742 uv1_hdr->int_both = 1;
1743 /*
1744 * all others need to be set to zero:
1745 * fairness chaining multilevel count replied_to
1746 */
1747 } else {
1748 uv2_hdr = &bd2->header.uv2_hdr;
1749 uv2_hdr->swack_flag = 1;
1750 uv2_hdr->base_dest_nasid =
1751 UV_PNODE_TO_NASID(base_pnode);
1752 uv2_hdr->dest_subnodeid = UV_LB_SUBNODEID;
1753 uv2_hdr->command = UV_NET_ENDPOINT_INTD;
1754 }
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001755 }
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001756 for_each_present_cpu(cpu) {
1757 if (pnode != uv_blade_to_pnode(uv_cpu_to_blade_id(cpu)))
1758 continue;
1759 bcp = &per_cpu(bau_control, cpu);
1760 bcp->descriptor_base = bau_desc;
1761 }
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001762}
1763
1764/*
1765 * initialize the destination side's receiving buffers
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001766 * entered for each uvhub in the partition
1767 * - node is first node (kernel memory notion) on the uvhub
1768 * - pnode is the uvhub's physical identifier
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001769 */
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001770static void pq_init(int node, int pnode)
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001771{
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001772 int cpu;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001773 size_t plsize;
Ingo Molnarb4c286e2008-06-18 14:28:19 +02001774 char *cp;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001775 void *vp;
1776 unsigned long pn;
1777 unsigned long first;
1778 unsigned long pn_first;
1779 unsigned long last;
1780 struct bau_pq_entry *pqp;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001781 struct bau_control *bcp;
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001782
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001783 plsize = (DEST_Q_SIZE + 1) * sizeof(struct bau_pq_entry);
1784 vp = kmalloc_node(plsize, GFP_KERNEL, node);
1785 pqp = (struct bau_pq_entry *)vp;
Ingo Molnardc163a42008-06-18 14:15:43 +02001786 BUG_ON(!pqp);
Ingo Molnarb4c286e2008-06-18 14:28:19 +02001787
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001788 cp = (char *)pqp + 31;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001789 pqp = (struct bau_pq_entry *)(((unsigned long)cp >> 5) << 5);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001790
1791 for_each_present_cpu(cpu) {
1792 if (pnode != uv_cpu_to_pnode(cpu))
1793 continue;
1794 /* for every cpu on this pnode: */
1795 bcp = &per_cpu(bau_control, cpu);
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001796 bcp->queue_first = pqp;
1797 bcp->bau_msg_head = pqp;
1798 bcp->queue_last = pqp + (DEST_Q_SIZE - 1);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001799 }
Cliff Wickman4ea3c512009-04-16 07:53:09 -05001800 /*
Jack Steiner6a469e42011-09-20 13:55:04 -07001801 * need the gnode of where the memory was really allocated
Cliff Wickman4ea3c512009-04-16 07:53:09 -05001802 */
Jack Steiner6a469e42011-09-20 13:55:04 -07001803 pn = uv_gpa_to_gnode(uv_gpa(pqp));
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001804 first = uv_physnodeaddr(pqp);
1805 pn_first = ((unsigned long)pn << UV_PAYLOADQ_PNODE_SHIFT) | first;
1806 last = uv_physnodeaddr(pqp + (DEST_Q_SIZE - 1));
1807 write_mmr_payload_first(pnode, pn_first);
1808 write_mmr_payload_tail(pnode, first);
1809 write_mmr_payload_last(pnode, last);
Cliff Wickmanc5d35d32012-01-16 15:19:47 -06001810 write_gmmr_sw_ack(pnode, 0xffffUL);
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001811
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001812 /* in effect, all msg_type's are set to MSG_NOOP */
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001813 memset(pqp, 0, sizeof(struct bau_pq_entry) * DEST_Q_SIZE);
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001814}
1815
1816/*
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001817 * Initialization of each UV hub's structures
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001818 */
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001819static void __init init_uvhub(int uvhub, int vector, int base_pnode)
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001820{
Cliff Wickman9674f352009-04-03 08:34:05 -05001821 int node;
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001822 int pnode;
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001823 unsigned long apicid;
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001824
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001825 node = uvhub_to_first_node(uvhub);
1826 pnode = uv_blade_to_pnode(uvhub);
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001827
1828 activation_descriptor_init(node, pnode, base_pnode);
1829
1830 pq_init(node, pnode);
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001831 /*
Cliff Wickman77ed23f2011-05-10 08:26:43 -05001832 * The below initialization can't be in firmware because the
1833 * messaging IRQ will be determined by the OS.
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001834 */
Dimitri Sivanich8191c9f2010-11-16 16:23:52 -06001835 apicid = uvhub_to_first_apicid(uvhub) | uv_apicid_hibits;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001836 write_mmr_data_config(pnode, ((apicid << 32) | vector));
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001837}
1838
1839/*
Cliff Wickman12a66112010-06-02 16:22:01 -05001840 * We will set BAU_MISC_CONTROL with a timeout period.
1841 * But the BIOS has set UVH_AGING_PRESCALE_SEL and UVH_TRANSACTION_TIMEOUT.
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001842 * So the destination timeout period has to be calculated from them.
Cliff Wickman12a66112010-06-02 16:22:01 -05001843 */
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001844static int calculate_destination_timeout(void)
Cliff Wickman12a66112010-06-02 16:22:01 -05001845{
1846 unsigned long mmr_image;
1847 int mult1;
1848 int mult2;
1849 int index;
1850 int base;
1851 int ret;
1852 unsigned long ts_ns;
1853
Jack Steiner2a919592011-05-11 12:50:28 -05001854 if (is_uv1_hub()) {
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001855 mult1 = SOFTACK_TIMEOUT_PERIOD & BAU_MISC_CONTROL_MULT_MASK;
Jack Steiner2a919592011-05-11 12:50:28 -05001856 mmr_image = uv_read_local_mmr(UVH_AGING_PRESCALE_SEL);
1857 index = (mmr_image >> BAU_URGENCY_7_SHIFT) & BAU_URGENCY_7_MASK;
1858 mmr_image = uv_read_local_mmr(UVH_TRANSACTION_TIMEOUT);
1859 mult2 = (mmr_image >> BAU_TRANS_SHIFT) & BAU_TRANS_MASK;
Cliff Wickman11cab712012-06-22 08:12:12 -05001860 ts_ns = timeout_base_ns[index];
1861 ts_ns *= (mult1 * mult2);
Jack Steiner2a919592011-05-11 12:50:28 -05001862 ret = ts_ns / 1000;
1863 } else {
Cliff Wickmand059f9f2012-01-16 15:18:48 -06001864 /* 4 bits 0/1 for 10/80us base, 3 bits of multiplier */
1865 mmr_image = uv_read_local_mmr(UVH_LB_BAU_MISC_CONTROL);
Jack Steiner2a919592011-05-11 12:50:28 -05001866 mmr_image = (mmr_image & UV_SA_MASK) >> UV_SA_SHFT;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001867 if (mmr_image & (1L << UV2_ACK_UNITS_SHFT))
Cliff Wickmand059f9f2012-01-16 15:18:48 -06001868 base = 80;
Jack Steiner2a919592011-05-11 12:50:28 -05001869 else
Cliff Wickmand059f9f2012-01-16 15:18:48 -06001870 base = 10;
1871 mult1 = mmr_image & UV2_ACK_MASK;
Jack Steiner2a919592011-05-11 12:50:28 -05001872 ret = mult1 * base;
1873 }
Cliff Wickman12a66112010-06-02 16:22:01 -05001874 return ret;
1875}
1876
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001877static void __init init_per_cpu_tunables(void)
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001878{
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001879 int cpu;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001880 struct bau_control *bcp;
1881
1882 for_each_present_cpu(cpu) {
1883 bcp = &per_cpu(bau_control, cpu);
1884 bcp->baudisabled = 0;
Cliff Wickman26ef8572012-06-22 08:13:30 -05001885 if (nobau)
1886 bcp->nobau = 1;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001887 bcp->statp = &per_cpu(ptcstats, cpu);
1888 /* time interval to catch a hardware stay-busy bug */
1889 bcp->timeout_interval = usec_2_cycles(2*timeout_us);
1890 bcp->max_concurr = max_concurr;
1891 bcp->max_concurr_const = max_concurr;
1892 bcp->plugged_delay = plugged_delay;
1893 bcp->plugsb4reset = plugsb4reset;
1894 bcp->timeoutsb4reset = timeoutsb4reset;
1895 bcp->ipi_reset_limit = ipi_reset_limit;
1896 bcp->complete_threshold = complete_threshold;
1897 bcp->cong_response_us = congested_respns_us;
1898 bcp->cong_reps = congested_reps;
1899 bcp->cong_period = congested_period;
Cliff Wickmanc5d35d32012-01-16 15:19:47 -06001900 bcp->clocks_per_100_usec = usec_2_cycles(100);
Cliff Wickmand2ebc712012-01-18 09:40:47 -06001901 spin_lock_init(&bcp->queue_lock);
1902 spin_lock_init(&bcp->uvhub_lock);
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001903 }
1904}
1905
1906/*
1907 * Scan all cpus to collect blade and socket summaries.
1908 */
1909static int __init get_cpu_topology(int base_pnode,
1910 struct uvhub_desc *uvhub_descs,
1911 unsigned char *uvhub_mask)
1912{
1913 int cpu;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001914 int pnode;
1915 int uvhub;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001916 int socket;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001917 struct bau_control *bcp;
1918 struct uvhub_desc *bdp;
1919 struct socket_desc *sdp;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001920
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001921 for_each_present_cpu(cpu) {
1922 bcp = &per_cpu(bau_control, cpu);
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001923
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001924 memset(bcp, 0, sizeof(struct bau_control));
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001925
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001926 pnode = uv_cpu_hub_info(cpu)->pnode;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001927 if ((pnode - base_pnode) >= UV_DISTRIBUTION_SIZE) {
Cliff Wickman77ed23f2011-05-10 08:26:43 -05001928 printk(KERN_EMERG
1929 "cpu %d pnode %d-%d beyond %d; BAU disabled\n",
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001930 cpu, pnode, base_pnode, UV_DISTRIBUTION_SIZE);
Cliff Wickman77ed23f2011-05-10 08:26:43 -05001931 return 1;
1932 }
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001933
Cliff Wickman77ed23f2011-05-10 08:26:43 -05001934 bcp->osnode = cpu_to_node(cpu);
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001935 bcp->partition_base_pnode = base_pnode;
1936
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001937 uvhub = uv_cpu_hub_info(cpu)->numa_blade_id;
Cliff Wickmanc4026cf2010-07-30 14:10:55 -05001938 *(uvhub_mask + (uvhub/8)) |= (1 << (uvhub%8));
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001939 bdp = &uvhub_descs[uvhub];
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001940
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001941 bdp->num_cpus++;
1942 bdp->uvhub = uvhub;
1943 bdp->pnode = pnode;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001944
Cliff Wickmana8328ee2010-06-02 16:22:02 -05001945 /* kludge: 'assuming' one node per socket, and assuming that
1946 disabling a socket just leaves a gap in node numbers */
Cliff Wickman77ed23f2011-05-10 08:26:43 -05001947 socket = bcp->osnode & 1;
Cliff Wickmana8328ee2010-06-02 16:22:02 -05001948 bdp->socket_mask |= (1 << socket);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001949 sdp = &bdp->socket[socket];
1950 sdp->cpu_number[sdp->num_cpus] = cpu;
1951 sdp->num_cpus++;
Cliff Wickmancfa60912011-01-03 12:03:53 -06001952 if (sdp->num_cpus > MAX_CPUS_PER_SOCKET) {
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001953 printk(KERN_EMERG "%d cpus per socket invalid\n",
1954 sdp->num_cpus);
Cliff Wickmancfa60912011-01-03 12:03:53 -06001955 return 1;
1956 }
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001957 }
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001958 return 0;
1959}
1960
1961/*
1962 * Each socket is to get a local array of pnodes/hubs.
1963 */
1964static void make_per_cpu_thp(struct bau_control *smaster)
1965{
1966 int cpu;
1967 size_t hpsz = sizeof(struct hub_and_pnode) * num_possible_cpus();
1968
1969 smaster->thp = kmalloc_node(hpsz, GFP_KERNEL, smaster->osnode);
1970 memset(smaster->thp, 0, hpsz);
1971 for_each_present_cpu(cpu) {
1972 smaster->thp[cpu].pnode = uv_cpu_hub_info(cpu)->pnode;
1973 smaster->thp[cpu].uvhub = uv_cpu_hub_info(cpu)->numa_blade_id;
1974 }
1975}
1976
1977/*
cpw@sgi.com442d3922011-06-21 07:21:31 -05001978 * Each uvhub is to get a local cpumask.
1979 */
1980static void make_per_hub_cpumask(struct bau_control *hmaster)
1981{
1982 int sz = sizeof(cpumask_t);
1983
1984 hmaster->cpumask = kzalloc_node(sz, GFP_KERNEL, hmaster->osnode);
1985}
1986
1987/*
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001988 * Initialize all the per_cpu information for the cpu's on a given socket,
1989 * given what has been gathered into the socket_desc struct.
1990 * And reports the chosen hub and socket masters back to the caller.
1991 */
1992static int scan_sock(struct socket_desc *sdp, struct uvhub_desc *bdp,
1993 struct bau_control **smasterp,
1994 struct bau_control **hmasterp)
1995{
1996 int i;
1997 int cpu;
1998 struct bau_control *bcp;
1999
2000 for (i = 0; i < sdp->num_cpus; i++) {
2001 cpu = sdp->cpu_number[i];
2002 bcp = &per_cpu(bau_control, cpu);
2003 bcp->cpu = cpu;
2004 if (i == 0) {
2005 *smasterp = bcp;
2006 if (!(*hmasterp))
2007 *hmasterp = bcp;
2008 }
2009 bcp->cpus_in_uvhub = bdp->num_cpus;
2010 bcp->cpus_in_socket = sdp->num_cpus;
2011 bcp->socket_master = *smasterp;
2012 bcp->uvhub = bdp->uvhub;
Cliff Wickmanda87c932012-01-16 15:17:50 -06002013 if (is_uv1_hub())
2014 bcp->uvhub_version = 1;
2015 else if (is_uv2_hub())
2016 bcp->uvhub_version = 2;
2017 else {
2018 printk(KERN_EMERG "uvhub version not 1 or 2\n");
2019 return 1;
2020 }
Cliff Wickmanf073cc82011-05-24 13:07:36 -05002021 bcp->uvhub_master = *hmasterp;
2022 bcp->uvhub_cpu = uv_cpu_hub_info(cpu)->blade_processor_id;
Cliff Wickmanc5d35d32012-01-16 15:19:47 -06002023 bcp->using_desc = bcp->uvhub_cpu;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05002024 if (bcp->uvhub_cpu >= MAX_CPUS_PER_UVHUB) {
2025 printk(KERN_EMERG "%d cpus per uvhub invalid\n",
2026 bcp->uvhub_cpu);
2027 return 1;
2028 }
2029 }
2030 return 0;
2031}
2032
2033/*
2034 * Summarize the blade and socket topology into the per_cpu structures.
2035 */
2036static int __init summarize_uvhub_sockets(int nuvhubs,
2037 struct uvhub_desc *uvhub_descs,
2038 unsigned char *uvhub_mask)
2039{
2040 int socket;
2041 int uvhub;
2042 unsigned short socket_mask;
2043
Cliff Wickmanc4026cf2010-07-30 14:10:55 -05002044 for (uvhub = 0; uvhub < nuvhubs; uvhub++) {
Cliff Wickmanf073cc82011-05-24 13:07:36 -05002045 struct uvhub_desc *bdp;
2046 struct bau_control *smaster = NULL;
2047 struct bau_control *hmaster = NULL;
2048
Cliff Wickmanc4026cf2010-07-30 14:10:55 -05002049 if (!(*(uvhub_mask + (uvhub/8)) & (1 << (uvhub%8))))
2050 continue;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05002051
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05002052 bdp = &uvhub_descs[uvhub];
Cliff Wickmana8328ee2010-06-02 16:22:02 -05002053 socket_mask = bdp->socket_mask;
2054 socket = 0;
2055 while (socket_mask) {
Cliff Wickmanf073cc82011-05-24 13:07:36 -05002056 struct socket_desc *sdp;
2057 if ((socket_mask & 1)) {
2058 sdp = &bdp->socket[socket];
2059 if (scan_sock(sdp, bdp, &smaster, &hmaster))
Cliff Wickmancfa60912011-01-03 12:03:53 -06002060 return 1;
cpw@sgi.com9c9153d2011-06-21 07:21:28 -05002061 make_per_cpu_thp(smaster);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05002062 }
2063 socket++;
Cliff Wickmana8328ee2010-06-02 16:22:02 -05002064 socket_mask = (socket_mask >> 1);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05002065 }
cpw@sgi.com442d3922011-06-21 07:21:31 -05002066 make_per_hub_cpumask(hmaster);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05002067 }
Cliff Wickmanf073cc82011-05-24 13:07:36 -05002068 return 0;
2069}
2070
2071/*
2072 * initialize the bau_control structure for each cpu
2073 */
2074static int __init init_per_cpu(int nuvhubs, int base_part_pnode)
2075{
2076 unsigned char *uvhub_mask;
2077 void *vp;
2078 struct uvhub_desc *uvhub_descs;
2079
2080 timeout_us = calculate_destination_timeout();
2081
2082 vp = kmalloc(nuvhubs * sizeof(struct uvhub_desc), GFP_KERNEL);
2083 uvhub_descs = (struct uvhub_desc *)vp;
2084 memset(uvhub_descs, 0, nuvhubs * sizeof(struct uvhub_desc));
2085 uvhub_mask = kzalloc((nuvhubs+7)/8, GFP_KERNEL);
2086
2087 if (get_cpu_topology(base_part_pnode, uvhub_descs, uvhub_mask))
cpw@sgi.combbd270e2011-06-21 07:21:32 -05002088 goto fail;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05002089
2090 if (summarize_uvhub_sockets(nuvhubs, uvhub_descs, uvhub_mask))
cpw@sgi.combbd270e2011-06-21 07:21:32 -05002091 goto fail;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05002092
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05002093 kfree(uvhub_descs);
Cliff Wickmanc4026cf2010-07-30 14:10:55 -05002094 kfree(uvhub_mask);
Cliff Wickmanf073cc82011-05-24 13:07:36 -05002095 init_per_cpu_tunables();
Cliff Wickmancfa60912011-01-03 12:03:53 -06002096 return 0;
cpw@sgi.combbd270e2011-06-21 07:21:32 -05002097
2098fail:
2099 kfree(uvhub_descs);
2100 kfree(uvhub_mask);
2101 return 1;
Cliff Wickmanb194b1202008-06-12 08:23:48 -05002102}
Cliff Wickman18129242008-06-02 08:56:14 -05002103
2104/*
2105 * Initialization of BAU-related structures
2106 */
Cliff Wickmanb194b1202008-06-12 08:23:48 -05002107static int __init uv_bau_init(void)
Cliff Wickman18129242008-06-02 08:56:14 -05002108{
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05002109 int uvhub;
2110 int pnode;
2111 int nuvhubs;
Rusty Russell2c74d662009-03-18 08:22:30 +10302112 int cur_cpu;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05002113 int cpus;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05002114 int vector;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05002115 cpumask_var_t *mask;
Cliff Wickman18129242008-06-02 08:56:14 -05002116
2117 if (!is_uv_system())
2118 return 0;
2119
Cliff Wickmanf073cc82011-05-24 13:07:36 -05002120 for_each_possible_cpu(cur_cpu) {
2121 mask = &per_cpu(uv_flush_tlb_mask, cur_cpu);
2122 zalloc_cpumask_var_node(mask, GFP_KERNEL, cpu_to_node(cur_cpu));
2123 }
Rusty Russell76ba0ec2009-03-13 14:49:57 +10302124
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05002125 nuvhubs = uv_num_possible_blades();
Cliff Wickman50fb55a2010-06-02 16:22:02 -05002126 spin_lock_init(&disable_lock);
Cliff Wickmanf073cc82011-05-24 13:07:36 -05002127 congested_cycles = usec_2_cycles(congested_respns_us);
Cliff Wickman9674f352009-04-03 08:34:05 -05002128
Cliff Wickmanf073cc82011-05-24 13:07:36 -05002129 uv_base_pnode = 0x7fffffff;
Cliff Wickman77ed23f2011-05-10 08:26:43 -05002130 for (uvhub = 0; uvhub < nuvhubs; uvhub++) {
Cliff Wickmanf073cc82011-05-24 13:07:36 -05002131 cpus = uv_blade_nr_possible_cpus(uvhub);
2132 if (cpus && (uv_blade_to_pnode(uvhub) < uv_base_pnode))
2133 uv_base_pnode = uv_blade_to_pnode(uvhub);
Cliff Wickman77ed23f2011-05-10 08:26:43 -05002134 }
2135
Cliff Wickmand059f9f2012-01-16 15:18:48 -06002136 enable_timeouts();
2137
Cliff Wickmanf073cc82011-05-24 13:07:36 -05002138 if (init_per_cpu(nuvhubs, uv_base_pnode)) {
Cliff Wickman26ef8572012-06-22 08:13:30 -05002139 set_bau_off();
2140 nobau_perm = 1;
Cliff Wickmancfa60912011-01-03 12:03:53 -06002141 return 0;
2142 }
Ingo Molnarb4c286e2008-06-18 14:28:19 +02002143
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05002144 vector = UV_BAU_MESSAGE;
2145 for_each_possible_blade(uvhub)
2146 if (uv_blade_nr_possible_cpus(uvhub))
Cliff Wickmanf073cc82011-05-24 13:07:36 -05002147 init_uvhub(uvhub, vector, uv_base_pnode);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05002148
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05002149 alloc_intr_gate(vector, uv_bau_message_intr1);
2150
2151 for_each_possible_blade(uvhub) {
Cliff Wickman93a7ca02010-07-16 10:11:21 -05002152 if (uv_blade_nr_possible_cpus(uvhub)) {
Cliff Wickmanf073cc82011-05-24 13:07:36 -05002153 unsigned long val;
2154 unsigned long mmr;
Cliff Wickman93a7ca02010-07-16 10:11:21 -05002155 pnode = uv_blade_to_pnode(uvhub);
2156 /* INIT the bau */
Cliff Wickmanf073cc82011-05-24 13:07:36 -05002157 val = 1L << 63;
2158 write_gmmr_activation(pnode, val);
Cliff Wickman93a7ca02010-07-16 10:11:21 -05002159 mmr = 1; /* should be 1 to broadcast to both sockets */
Cliff Wickmanda87c932012-01-16 15:17:50 -06002160 if (!is_uv1_hub())
2161 write_mmr_data_broadcast(pnode, mmr);
Cliff Wickman93a7ca02010-07-16 10:11:21 -05002162 }
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05002163 }
Ingo Molnarb4c286e2008-06-18 14:28:19 +02002164
Cliff Wickman18129242008-06-02 08:56:14 -05002165 return 0;
2166}
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05002167core_initcall(uv_bau_init);
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05002168fs_initcall(uv_ptc_init);