blob: 4b28e09e265ec0ce6546f076153c2e43b8251e41 [file] [log] [blame]
Cliff Wickman18129242008-06-02 08:56:14 -05001/*
2 * SGI UltraViolet TLB flush routines.
3 *
Cliff Wickmanf073cc82011-05-24 13:07:36 -05004 * (c) 2008-2011 Cliff Wickman <cpw@sgi.com>, SGI.
Cliff Wickman18129242008-06-02 08:56:14 -05005 *
6 * This code is released under the GNU General Public License version 2 or
7 * later.
8 */
Jeremy Fitzhardingeaef8f5b2008-10-14 21:43:43 -07009#include <linux/seq_file.h>
Cliff Wickman18129242008-06-02 08:56:14 -050010#include <linux/proc_fs.h>
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -050011#include <linux/debugfs.h>
Cliff Wickman18129242008-06-02 08:56:14 -050012#include <linux/kernel.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090013#include <linux/slab.h>
Jean Delvareca4445642011-03-25 15:20:14 +010014#include <linux/delay.h>
Cliff Wickman18129242008-06-02 08:56:14 -050015
Cliff Wickman18129242008-06-02 08:56:14 -050016#include <asm/mmu_context.h>
Tejun Heobdbcdd42009-01-21 17:26:06 +090017#include <asm/uv/uv.h>
Cliff Wickman18129242008-06-02 08:56:14 -050018#include <asm/uv/uv_mmrs.h>
Ingo Molnarb4c286e2008-06-18 14:28:19 +020019#include <asm/uv/uv_hub.h>
Cliff Wickman18129242008-06-02 08:56:14 -050020#include <asm/uv/uv_bau.h>
Ingo Molnar7b6aa332009-02-17 13:58:15 +010021#include <asm/apic.h>
Ingo Molnarb4c286e2008-06-18 14:28:19 +020022#include <asm/idle.h>
Cliff Wickmanb194b1202008-06-12 08:23:48 -050023#include <asm/tsc.h>
Cliff Wickman99dd8712008-08-19 12:51:59 -050024#include <asm/irq_vectors.h>
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -050025#include <asm/timer.h>
Cliff Wickman18129242008-06-02 08:56:14 -050026
Cliff Wickman12a66112010-06-02 16:22:01 -050027/* timeouts in nanoseconds (indexed by UVH_AGING_PRESCALE_SEL urgency7 30:28) */
28static int timeout_base_ns[] = {
29 20,
30 160,
31 1280,
32 10240,
33 81920,
34 655360,
35 5242880,
36 167772160
37};
Cliff Wickmanf073cc82011-05-24 13:07:36 -050038
Cliff Wickman12a66112010-06-02 16:22:01 -050039static int timeout_us;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -050040static int nobau;
Cliff Wickman50fb55a2010-06-02 16:22:02 -050041static int baudisabled;
42static spinlock_t disable_lock;
43static cycles_t congested_cycles;
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -050044
45/* tunables: */
Cliff Wickmanf073cc82011-05-24 13:07:36 -050046static int max_concurr = MAX_BAU_CONCURRENT;
47static int max_concurr_const = MAX_BAU_CONCURRENT;
48static int plugged_delay = PLUGGED_DELAY;
49static int plugsb4reset = PLUGSB4RESET;
50static int timeoutsb4reset = TIMEOUTSB4RESET;
51static int ipi_reset_limit = IPI_RESET_LIMIT;
52static int complete_threshold = COMPLETE_THRESHOLD;
53static int congested_respns_us = CONGESTED_RESPONSE_US;
54static int congested_reps = CONGESTED_REPS;
55static int congested_period = CONGESTED_PERIOD;
56
57static struct tunables tunables[] = {
58 {&max_concurr, MAX_BAU_CONCURRENT}, /* must be [0] */
59 {&plugged_delay, PLUGGED_DELAY},
60 {&plugsb4reset, PLUGSB4RESET},
61 {&timeoutsb4reset, TIMEOUTSB4RESET},
62 {&ipi_reset_limit, IPI_RESET_LIMIT},
63 {&complete_threshold, COMPLETE_THRESHOLD},
64 {&congested_respns_us, CONGESTED_RESPONSE_US},
65 {&congested_reps, CONGESTED_REPS},
66 {&congested_period, CONGESTED_PERIOD}
67};
68
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -050069static struct dentry *tunables_dir;
70static struct dentry *tunables_file;
71
Cliff Wickmanf073cc82011-05-24 13:07:36 -050072/* these correspond to the statistics printed by ptc_seq_show() */
73static char *stat_description[] = {
74 "sent: number of shootdown messages sent",
75 "stime: time spent sending messages",
76 "numuvhubs: number of hubs targeted with shootdown",
77 "numuvhubs16: number times 16 or more hubs targeted",
78 "numuvhubs8: number times 8 or more hubs targeted",
79 "numuvhubs4: number times 4 or more hubs targeted",
80 "numuvhubs2: number times 2 or more hubs targeted",
81 "numuvhubs1: number times 1 hub targeted",
82 "numcpus: number of cpus targeted with shootdown",
83 "dto: number of destination timeouts",
84 "retries: destination timeout retries sent",
85 "rok: : destination timeouts successfully retried",
86 "resetp: ipi-style resource resets for plugs",
87 "resett: ipi-style resource resets for timeouts",
88 "giveup: fall-backs to ipi-style shootdowns",
89 "sto: number of source timeouts",
90 "bz: number of stay-busy's",
91 "throt: number times spun in throttle",
92 "swack: image of UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE",
93 "recv: shootdown messages received",
94 "rtime: time spent processing messages",
95 "all: shootdown all-tlb messages",
96 "one: shootdown one-tlb messages",
97 "mult: interrupts that found multiple messages",
98 "none: interrupts that found no messages",
99 "retry: number of retry messages processed",
100 "canc: number messages canceled by retries",
101 "nocan: number retries that found nothing to cancel",
102 "reset: number of ipi-style reset requests processed",
103 "rcan: number messages canceled by reset requests",
104 "disable: number times use of the BAU was disabled",
105 "enable: number times use of the BAU was re-enabled"
106};
107
108static int __init
109setup_nobau(char *arg)
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500110{
111 nobau = 1;
112 return 0;
113}
114early_param("nobau", setup_nobau);
Ingo Molnarb4c286e2008-06-18 14:28:19 +0200115
Cliff Wickman94ca8e42009-04-14 10:56:48 -0500116/* base pnode in this partition */
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500117static int uv_base_pnode __read_mostly;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500118/* position of pnode (which is nasid>>1): */
119static int uv_nshift __read_mostly;
120static unsigned long uv_mmask __read_mostly;
Cliff Wickman18129242008-06-02 08:56:14 -0500121
Ingo Molnardc163a42008-06-18 14:15:43 +0200122static DEFINE_PER_CPU(struct ptc_stats, ptcstats);
123static DEFINE_PER_CPU(struct bau_control, bau_control);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500124static DEFINE_PER_CPU(cpumask_var_t, uv_flush_tlb_mask);
125
Cliff Wickman18129242008-06-02 08:56:14 -0500126/*
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500127 * Determine the first node on a uvhub. 'Nodes' are used for kernel
128 * memory allocation.
Cliff Wickman9674f352009-04-03 08:34:05 -0500129 */
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500130static int __init uvhub_to_first_node(int uvhub)
Cliff Wickman9674f352009-04-03 08:34:05 -0500131{
132 int node, b;
133
134 for_each_online_node(node) {
135 b = uv_node_to_blade_id(node);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500136 if (uvhub == b)
Cliff Wickman9674f352009-04-03 08:34:05 -0500137 return node;
138 }
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500139 return -1;
Cliff Wickman9674f352009-04-03 08:34:05 -0500140}
141
142/*
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500143 * Determine the apicid of the first cpu on a uvhub.
Cliff Wickman9674f352009-04-03 08:34:05 -0500144 */
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500145static int __init uvhub_to_first_apicid(int uvhub)
Cliff Wickman9674f352009-04-03 08:34:05 -0500146{
147 int cpu;
148
149 for_each_present_cpu(cpu)
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500150 if (uvhub == uv_cpu_to_blade_id(cpu))
Cliff Wickman9674f352009-04-03 08:34:05 -0500151 return per_cpu(x86_cpu_to_apicid, cpu);
152 return -1;
153}
154
155/*
Cliff Wickman18129242008-06-02 08:56:14 -0500156 * Free a software acknowledge hardware resource by clearing its Pending
157 * bit. This will return a reply to the sender.
158 * If the message has timed out, a reply has already been sent by the
159 * hardware but the resource has not been released. In that case our
160 * clear of the Timeout bit (as well) will free the resource. No reply will
161 * be sent (the hardware will only do one reply per message).
162 */
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500163static void reply_to_message(struct msg_desc *mdp, struct bau_control *bcp)
Cliff Wickman18129242008-06-02 08:56:14 -0500164{
Cliff Wickmanb194b1202008-06-12 08:23:48 -0500165 unsigned long dw;
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500166 struct bau_pq_entry *msg;
Cliff Wickman18129242008-06-02 08:56:14 -0500167
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500168 msg = mdp->msg;
169 if (!msg->canceled) {
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500170 dw = (msg->swack_vec << UV_SW_ACK_NPENDING) | msg->swack_vec;
171 write_mmr_sw_ack(dw);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500172 }
Cliff Wickman18129242008-06-02 08:56:14 -0500173 msg->replied_to = 1;
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500174 msg->swack_vec = 0;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500175}
176
177/*
178 * Process the receipt of a RETRY message
179 */
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500180static void bau_process_retry_msg(struct msg_desc *mdp,
181 struct bau_control *bcp)
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500182{
183 int i;
184 int cancel_count = 0;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500185 unsigned long msg_res;
186 unsigned long mmr = 0;
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500187 struct bau_pq_entry *msg = mdp->msg;
188 struct bau_pq_entry *msg2;
189 struct ptc_stats *stat = bcp->statp;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500190
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500191 stat->d_retries++;
192 /*
193 * cancel any message from msg+1 to the retry itself
194 */
195 for (msg2 = msg+1, i = 0; i < DEST_Q_SIZE; msg2++, i++) {
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500196 if (msg2 > mdp->queue_last)
197 msg2 = mdp->queue_first;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500198 if (msg2 == msg)
199 break;
200
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500201 /* same conditions for cancellation as do_reset */
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500202 if ((msg2->replied_to == 0) && (msg2->canceled == 0) &&
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500203 (msg2->swack_vec) && ((msg2->swack_vec &
204 msg->swack_vec) == 0) &&
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500205 (msg2->sending_cpu == msg->sending_cpu) &&
206 (msg2->msg_type != MSG_NOOP)) {
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500207 mmr = read_mmr_sw_ack();
208 msg_res = msg2->swack_vec;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500209 /*
210 * This is a message retry; clear the resources held
211 * by the previous message only if they timed out.
212 * If it has not timed out we have an unexpected
213 * situation to report.
214 */
Cliff Wickman39847e72010-06-02 16:22:02 -0500215 if (mmr & (msg_res << UV_SW_ACK_NPENDING)) {
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500216 unsigned long mr;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500217 /*
218 * is the resource timed out?
219 * make everyone ignore the cancelled message.
220 */
221 msg2->canceled = 1;
222 stat->d_canceled++;
223 cancel_count++;
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500224 mr = (msg_res << UV_SW_ACK_NPENDING) | msg_res;
225 write_mmr_sw_ack(mr);
Cliff Wickman39847e72010-06-02 16:22:02 -0500226 }
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500227 }
228 }
229 if (!cancel_count)
230 stat->d_nocanceled++;
Cliff Wickman18129242008-06-02 08:56:14 -0500231}
232
233/*
234 * Do all the things a cpu should do for a TLB shootdown message.
235 * Other cpu's may come here at the same time for this message.
236 */
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500237static void bau_process_message(struct msg_desc *mdp,
238 struct bau_control *bcp)
Cliff Wickman18129242008-06-02 08:56:14 -0500239{
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500240 short socket_ack_count = 0;
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500241 short *sp;
242 struct atomic_short *asp;
243 struct ptc_stats *stat = bcp->statp;
244 struct bau_pq_entry *msg = mdp->msg;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500245 struct bau_control *smaster = bcp->socket_master;
Cliff Wickman18129242008-06-02 08:56:14 -0500246
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500247 /*
248 * This must be a normal message, or retry of a normal message
249 */
Cliff Wickman18129242008-06-02 08:56:14 -0500250 if (msg->address == TLB_FLUSH_ALL) {
251 local_flush_tlb();
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500252 stat->d_alltlb++;
Cliff Wickman18129242008-06-02 08:56:14 -0500253 } else {
254 __flush_tlb_one(msg->address);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500255 stat->d_onetlb++;
Cliff Wickman18129242008-06-02 08:56:14 -0500256 }
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500257 stat->d_requestee++;
Cliff Wickman18129242008-06-02 08:56:14 -0500258
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500259 /*
260 * One cpu on each uvhub has the additional job on a RETRY
261 * of releasing the resource held by the message that is
262 * being retried. That message is identified by sending
263 * cpu number.
264 */
265 if (msg->msg_type == MSG_RETRY && bcp == bcp->uvhub_master)
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500266 bau_process_retry_msg(mdp, bcp);
Cliff Wickman18129242008-06-02 08:56:14 -0500267
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500268 /*
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500269 * This is a swack message, so we have to reply to it.
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500270 * Count each responding cpu on the socket. This avoids
271 * pinging the count's cache line back and forth between
272 * the sockets.
273 */
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500274 sp = &smaster->socket_acknowledge_count[mdp->msg_slot];
275 asp = (struct atomic_short *)sp;
276 socket_ack_count = atom_asr(1, asp);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500277 if (socket_ack_count == bcp->cpus_in_socket) {
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500278 int msg_ack_count;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500279 /*
280 * Both sockets dump their completed count total into
281 * the message's count.
282 */
283 smaster->socket_acknowledge_count[mdp->msg_slot] = 0;
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500284 asp = (struct atomic_short *)&msg->acknowledge_count;
285 msg_ack_count = atom_asr(socket_ack_count, asp);
Ingo Molnardc163a42008-06-18 14:15:43 +0200286
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500287 if (msg_ack_count == bcp->cpus_in_uvhub) {
288 /*
289 * All cpus in uvhub saw it; reply
290 */
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500291 reply_to_message(mdp, bcp);
Ingo Molnardc163a42008-06-18 14:15:43 +0200292 }
293 }
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500294
295 return;
Cliff Wickman18129242008-06-02 08:56:14 -0500296}
297
298/*
cpw@sgi.com485f07d2011-06-21 07:21:29 -0500299 * Determine the first cpu on a pnode.
Cliff Wickman18129242008-06-02 08:56:14 -0500300 */
cpw@sgi.com485f07d2011-06-21 07:21:29 -0500301static int pnode_to_first_cpu(int pnode, struct bau_control *smaster)
Cliff Wickman18129242008-06-02 08:56:14 -0500302{
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500303 int cpu;
cpw@sgi.com485f07d2011-06-21 07:21:29 -0500304 struct hub_and_pnode *hpp;
305
306 for_each_present_cpu(cpu) {
307 hpp = &smaster->thp[cpu];
308 if (pnode == hpp->pnode)
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500309 return cpu;
cpw@sgi.com485f07d2011-06-21 07:21:29 -0500310 }
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500311 return -1;
Cliff Wickman18129242008-06-02 08:56:14 -0500312}
313
Cliff Wickmanb194b1202008-06-12 08:23:48 -0500314/*
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500315 * Last resort when we get a large number of destination timeouts is
316 * to clear resources held by a given cpu.
317 * Do this with IPI so that all messages in the BAU message queue
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500318 * can be identified by their nonzero swack_vec field.
Cliff Wickmanb194b1202008-06-12 08:23:48 -0500319 *
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500320 * This is entered for a single cpu on the uvhub.
321 * The sender want's this uvhub to free a specific message's
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500322 * swack resources.
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500323 */
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500324static void do_reset(void *ptr)
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500325{
326 int i;
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500327 struct bau_control *bcp = &per_cpu(bau_control, smp_processor_id());
328 struct reset_args *rap = (struct reset_args *)ptr;
329 struct bau_pq_entry *msg;
330 struct ptc_stats *stat = bcp->statp;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500331
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500332 stat->d_resets++;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500333 /*
334 * We're looking for the given sender, and
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500335 * will free its swack resource.
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500336 * If all cpu's finally responded after the timeout, its
337 * message 'replied_to' was set.
338 */
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500339 for (msg = bcp->queue_first, i = 0; i < DEST_Q_SIZE; msg++, i++) {
340 unsigned long msg_res;
341 /* do_reset: same conditions for cancellation as
342 bau_process_retry_msg() */
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500343 if ((msg->replied_to == 0) &&
344 (msg->canceled == 0) &&
345 (msg->sending_cpu == rap->sender) &&
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500346 (msg->swack_vec) &&
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500347 (msg->msg_type != MSG_NOOP)) {
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500348 unsigned long mmr;
349 unsigned long mr;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500350 /*
351 * make everyone else ignore this message
352 */
353 msg->canceled = 1;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500354 /*
355 * only reset the resource if it is still pending
356 */
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500357 mmr = read_mmr_sw_ack();
358 msg_res = msg->swack_vec;
359 mr = (msg_res << UV_SW_ACK_NPENDING) | msg_res;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500360 if (mmr & msg_res) {
361 stat->d_rcanceled++;
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500362 write_mmr_sw_ack(mr);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500363 }
364 }
365 }
366 return;
367}
368
369/*
370 * Use IPI to get all target uvhubs to release resources held by
371 * a given sending cpu number.
372 */
cpw@sgi.com485f07d2011-06-21 07:21:29 -0500373static void reset_with_ipi(struct bau_targ_hubmask *distribution,
374 struct bau_control *bcp)
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500375{
cpw@sgi.com485f07d2011-06-21 07:21:29 -0500376 int pnode;
377 int apnode;
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500378 int maskbits;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500379 cpumask_t mask;
cpw@sgi.com485f07d2011-06-21 07:21:29 -0500380 int sender = bcp->cpu;
381 struct bau_control *smaster = bcp->socket_master;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500382 struct reset_args reset_args;
383
384 reset_args.sender = sender;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500385 cpus_clear(mask);
386 /* find a single cpu for each uvhub in this distribution mask */
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500387 maskbits = sizeof(struct bau_targ_hubmask) * BITSPERBYTE;
cpw@sgi.com485f07d2011-06-21 07:21:29 -0500388 /* each bit is a pnode relative to the partition base pnode */
389 for (pnode = 0; pnode < maskbits; pnode++) {
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500390 int cpu;
cpw@sgi.com485f07d2011-06-21 07:21:29 -0500391 if (!bau_uvhub_isset(pnode, distribution))
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500392 continue;
cpw@sgi.com485f07d2011-06-21 07:21:29 -0500393 apnode = pnode + bcp->partition_base_pnode;
394 cpu = pnode_to_first_cpu(apnode, smaster);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500395 cpu_set(cpu, mask);
396 }
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500397
398 /* IPI all cpus; preemption is already disabled */
399 smp_call_function_many(&mask, do_reset, (void *)&reset_args, 1);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500400 return;
401}
402
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500403static inline unsigned long cycles_2_us(unsigned long long cyc)
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500404{
405 unsigned long long ns;
406 unsigned long us;
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500407 int cpu = smp_processor_id();
408
409 ns = (cyc * per_cpu(cyc2ns, cpu)) >> CYC2NS_SCALE_FACTOR;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500410 us = ns / 1000;
411 return us;
412}
413
414/*
415 * wait for all cpus on this hub to finish their sends and go quiet
416 * leaves uvhub_quiesce set so that no new broadcasts are started by
417 * bau_flush_send_and_wait()
418 */
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500419static inline void quiesce_local_uvhub(struct bau_control *hmaster)
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500420{
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500421 atom_asr(1, (struct atomic_short *)&hmaster->uvhub_quiesce);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500422}
423
424/*
425 * mark this quiet-requestor as done
426 */
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500427static inline void end_uvhub_quiesce(struct bau_control *hmaster)
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500428{
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500429 atom_asr(-1, (struct atomic_short *)&hmaster->uvhub_quiesce);
430}
431
432static unsigned long uv1_read_status(unsigned long mmr_offset, int right_shift)
433{
434 unsigned long descriptor_status;
435
436 descriptor_status = uv_read_local_mmr(mmr_offset);
437 descriptor_status >>= right_shift;
438 descriptor_status &= UV_ACT_STATUS_MASK;
439 return descriptor_status;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500440}
441
442/*
443 * Wait for completion of a broadcast software ack message
444 * return COMPLETE, RETRY(PLUGGED or TIMEOUT) or GIVEUP
Cliff Wickmanb194b1202008-06-12 08:23:48 -0500445 */
Jack Steiner2a919592011-05-11 12:50:28 -0500446static int uv1_wait_completion(struct bau_desc *bau_desc,
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500447 unsigned long mmr_offset, int right_shift,
448 struct bau_control *bcp, long try)
Cliff Wickmanb194b1202008-06-12 08:23:48 -0500449{
Cliff Wickmanb194b1202008-06-12 08:23:48 -0500450 unsigned long descriptor_status;
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500451 cycles_t ttm;
Cliff Wickman712157a2010-06-02 16:22:02 -0500452 struct ptc_stats *stat = bcp->statp;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500453
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500454 descriptor_status = uv1_read_status(mmr_offset, right_shift);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500455 /* spin on the status MMR, waiting for it to go idle */
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500456 while ((descriptor_status != DS_IDLE)) {
Cliff Wickmanb194b1202008-06-12 08:23:48 -0500457 /*
Jack Steiner2a919592011-05-11 12:50:28 -0500458 * Our software ack messages may be blocked because
459 * there are no swack resources available. As long
460 * as none of them has timed out hardware will NACK
461 * our message and its state will stay IDLE.
Cliff Wickmanb194b1202008-06-12 08:23:48 -0500462 */
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500463 if (descriptor_status == DS_SOURCE_TIMEOUT) {
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500464 stat->s_stimeout++;
465 return FLUSH_GIVEUP;
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500466 } else if (descriptor_status == DS_DESTINATION_TIMEOUT) {
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500467 stat->s_dtimeout++;
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500468 ttm = get_cycles();
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500469
470 /*
471 * Our retries may be blocked by all destination
472 * swack resources being consumed, and a timeout
473 * pending. In that case hardware returns the
474 * ERROR that looks like a destination timeout.
475 */
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500476 if (cycles_2_us(ttm - bcp->send_message) < timeout_us) {
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500477 bcp->conseccompletes = 0;
478 return FLUSH_RETRY_PLUGGED;
479 }
480
481 bcp->conseccompletes = 0;
482 return FLUSH_RETRY_TIMEOUT;
483 } else {
484 /*
485 * descriptor_status is still BUSY
486 */
487 cpu_relax();
Cliff Wickmanb194b1202008-06-12 08:23:48 -0500488 }
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500489 descriptor_status = uv1_read_status(mmr_offset, right_shift);
Cliff Wickmanb194b1202008-06-12 08:23:48 -0500490 }
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500491 bcp->conseccompletes++;
Cliff Wickmanb194b1202008-06-12 08:23:48 -0500492 return FLUSH_COMPLETE;
493}
494
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500495/*
496 * UV2 has an extra bit of status in the ACTIVATION_STATUS_2 register.
497 */
498static unsigned long uv2_read_status(unsigned long offset, int rshft, int cpu)
Jack Steiner2a919592011-05-11 12:50:28 -0500499{
500 unsigned long descriptor_status;
501 unsigned long descriptor_status2;
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500502
503 descriptor_status = ((read_lmmr(offset) >> rshft) & UV_ACT_STATUS_MASK);
504 descriptor_status2 = (read_mmr_uv2_status() >> cpu) & 0x1UL;
505 descriptor_status = (descriptor_status << 1) | descriptor_status2;
506 return descriptor_status;
507}
508
509static int uv2_wait_completion(struct bau_desc *bau_desc,
510 unsigned long mmr_offset, int right_shift,
511 struct bau_control *bcp, long try)
512{
513 unsigned long descriptor_stat;
514 cycles_t ttm;
515 int cpu = bcp->uvhub_cpu;
Jack Steiner2a919592011-05-11 12:50:28 -0500516 struct ptc_stats *stat = bcp->statp;
517
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500518 descriptor_stat = uv2_read_status(mmr_offset, right_shift, cpu);
519
Jack Steiner2a919592011-05-11 12:50:28 -0500520 /* spin on the status MMR, waiting for it to go idle */
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500521 while (descriptor_stat != UV2H_DESC_IDLE) {
Jack Steiner2a919592011-05-11 12:50:28 -0500522 /*
523 * Our software ack messages may be blocked because
524 * there are no swack resources available. As long
525 * as none of them has timed out hardware will NACK
526 * our message and its state will stay IDLE.
527 */
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500528 if ((descriptor_stat == UV2H_DESC_SOURCE_TIMEOUT) ||
529 (descriptor_stat == UV2H_DESC_DEST_STRONG_NACK) ||
530 (descriptor_stat == UV2H_DESC_DEST_PUT_ERR)) {
Jack Steiner2a919592011-05-11 12:50:28 -0500531 stat->s_stimeout++;
532 return FLUSH_GIVEUP;
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500533 } else if (descriptor_stat == UV2H_DESC_DEST_TIMEOUT) {
Jack Steiner2a919592011-05-11 12:50:28 -0500534 stat->s_dtimeout++;
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500535 ttm = get_cycles();
Jack Steiner2a919592011-05-11 12:50:28 -0500536 /*
537 * Our retries may be blocked by all destination
538 * swack resources being consumed, and a timeout
539 * pending. In that case hardware returns the
540 * ERROR that looks like a destination timeout.
541 */
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500542 if (cycles_2_us(ttm - bcp->send_message) < timeout_us) {
Jack Steiner2a919592011-05-11 12:50:28 -0500543 bcp->conseccompletes = 0;
544 return FLUSH_RETRY_PLUGGED;
545 }
Jack Steiner2a919592011-05-11 12:50:28 -0500546 bcp->conseccompletes = 0;
547 return FLUSH_RETRY_TIMEOUT;
548 } else {
549 /*
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500550 * descriptor_stat is still BUSY
Jack Steiner2a919592011-05-11 12:50:28 -0500551 */
552 cpu_relax();
553 }
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500554 descriptor_stat = uv2_read_status(mmr_offset, right_shift, cpu);
Jack Steiner2a919592011-05-11 12:50:28 -0500555 }
556 bcp->conseccompletes++;
557 return FLUSH_COMPLETE;
558}
559
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500560/*
561 * There are 2 status registers; each and array[32] of 2 bits. Set up for
562 * which register to read and position in that register based on cpu in
563 * current hub.
564 */
565static int wait_completion(struct bau_desc *bau_desc,
566 struct bau_control *bcp, long try)
Jack Steiner2a919592011-05-11 12:50:28 -0500567{
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500568 int right_shift;
569 unsigned long mmr_offset;
570 int cpu = bcp->uvhub_cpu;
571
572 if (cpu < UV_CPUS_PER_AS) {
573 mmr_offset = UVH_LB_BAU_SB_ACTIVATION_STATUS_0;
574 right_shift = cpu * UV_ACT_STATUS_SIZE;
575 } else {
576 mmr_offset = UVH_LB_BAU_SB_ACTIVATION_STATUS_1;
577 right_shift = ((cpu - UV_CPUS_PER_AS) * UV_ACT_STATUS_SIZE);
578 }
579
Jack Steiner2a919592011-05-11 12:50:28 -0500580 if (is_uv1_hub())
581 return uv1_wait_completion(bau_desc, mmr_offset, right_shift,
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500582 bcp, try);
Jack Steiner2a919592011-05-11 12:50:28 -0500583 else
584 return uv2_wait_completion(bau_desc, mmr_offset, right_shift,
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500585 bcp, try);
Jack Steiner2a919592011-05-11 12:50:28 -0500586}
587
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500588static inline cycles_t sec_2_cycles(unsigned long sec)
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500589{
590 unsigned long ns;
591 cycles_t cyc;
592
593 ns = sec * 1000000000;
594 cyc = (ns << CYC2NS_SCALE_FACTOR)/(per_cpu(cyc2ns, smp_processor_id()));
595 return cyc;
596}
597
598/*
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500599 * Our retries are blocked by all destination sw ack resources being
Cliff Wickmanf6d8a562010-06-02 16:22:02 -0500600 * in use, and a timeout is pending. In that case hardware immediately
601 * returns the ERROR that looks like a destination timeout.
602 */
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500603static void destination_plugged(struct bau_desc *bau_desc,
604 struct bau_control *bcp,
Cliff Wickmanf6d8a562010-06-02 16:22:02 -0500605 struct bau_control *hmaster, struct ptc_stats *stat)
606{
607 udelay(bcp->plugged_delay);
608 bcp->plugged_tries++;
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500609
Cliff Wickmanf6d8a562010-06-02 16:22:02 -0500610 if (bcp->plugged_tries >= bcp->plugsb4reset) {
611 bcp->plugged_tries = 0;
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500612
Cliff Wickmanf6d8a562010-06-02 16:22:02 -0500613 quiesce_local_uvhub(hmaster);
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500614
Cliff Wickmanf6d8a562010-06-02 16:22:02 -0500615 spin_lock(&hmaster->queue_lock);
cpw@sgi.com485f07d2011-06-21 07:21:29 -0500616 reset_with_ipi(&bau_desc->distribution, bcp);
Cliff Wickmanf6d8a562010-06-02 16:22:02 -0500617 spin_unlock(&hmaster->queue_lock);
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500618
Cliff Wickmanf6d8a562010-06-02 16:22:02 -0500619 end_uvhub_quiesce(hmaster);
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500620
Cliff Wickmanf6d8a562010-06-02 16:22:02 -0500621 bcp->ipi_attempts++;
622 stat->s_resets_plug++;
623 }
624}
625
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500626static void destination_timeout(struct bau_desc *bau_desc,
627 struct bau_control *bcp, struct bau_control *hmaster,
628 struct ptc_stats *stat)
Cliff Wickmanf6d8a562010-06-02 16:22:02 -0500629{
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500630 hmaster->max_concurr = 1;
Cliff Wickmanf6d8a562010-06-02 16:22:02 -0500631 bcp->timeout_tries++;
632 if (bcp->timeout_tries >= bcp->timeoutsb4reset) {
633 bcp->timeout_tries = 0;
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500634
Cliff Wickmanf6d8a562010-06-02 16:22:02 -0500635 quiesce_local_uvhub(hmaster);
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500636
Cliff Wickmanf6d8a562010-06-02 16:22:02 -0500637 spin_lock(&hmaster->queue_lock);
cpw@sgi.com485f07d2011-06-21 07:21:29 -0500638 reset_with_ipi(&bau_desc->distribution, bcp);
Cliff Wickmanf6d8a562010-06-02 16:22:02 -0500639 spin_unlock(&hmaster->queue_lock);
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500640
Cliff Wickmanf6d8a562010-06-02 16:22:02 -0500641 end_uvhub_quiesce(hmaster);
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500642
Cliff Wickmanf6d8a562010-06-02 16:22:02 -0500643 bcp->ipi_attempts++;
644 stat->s_resets_timeout++;
645 }
646}
647
648/*
Cliff Wickman50fb55a2010-06-02 16:22:02 -0500649 * Completions are taking a very long time due to a congested numalink
650 * network.
651 */
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500652static void disable_for_congestion(struct bau_control *bcp,
653 struct ptc_stats *stat)
Cliff Wickman50fb55a2010-06-02 16:22:02 -0500654{
Cliff Wickman50fb55a2010-06-02 16:22:02 -0500655 /* let only one cpu do this disabling */
656 spin_lock(&disable_lock);
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500657
Cliff Wickman50fb55a2010-06-02 16:22:02 -0500658 if (!baudisabled && bcp->period_requests &&
659 ((bcp->period_time / bcp->period_requests) > congested_cycles)) {
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500660 int tcpu;
661 struct bau_control *tbcp;
Cliff Wickman50fb55a2010-06-02 16:22:02 -0500662 /* it becomes this cpu's job to turn on the use of the
663 BAU again */
664 baudisabled = 1;
665 bcp->set_bau_off = 1;
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500666 bcp->set_bau_on_time = get_cycles();
667 bcp->set_bau_on_time += sec_2_cycles(bcp->cong_period);
Cliff Wickman50fb55a2010-06-02 16:22:02 -0500668 stat->s_bau_disabled++;
669 for_each_present_cpu(tcpu) {
670 tbcp = &per_cpu(bau_control, tcpu);
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500671 tbcp->baudisabled = 1;
Cliff Wickman50fb55a2010-06-02 16:22:02 -0500672 }
673 }
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500674
Cliff Wickman50fb55a2010-06-02 16:22:02 -0500675 spin_unlock(&disable_lock);
676}
677
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500678static void count_max_concurr(int stat, struct bau_control *bcp,
679 struct bau_control *hmaster)
680{
681 bcp->plugged_tries = 0;
682 bcp->timeout_tries = 0;
683 if (stat != FLUSH_COMPLETE)
684 return;
685 if (bcp->conseccompletes <= bcp->complete_threshold)
686 return;
687 if (hmaster->max_concurr >= hmaster->max_concurr_const)
688 return;
689 hmaster->max_concurr++;
690}
691
692static void record_send_stats(cycles_t time1, cycles_t time2,
693 struct bau_control *bcp, struct ptc_stats *stat,
694 int completion_status, int try)
695{
696 cycles_t elapsed;
697
698 if (time2 > time1) {
699 elapsed = time2 - time1;
700 stat->s_time += elapsed;
701
702 if ((completion_status == FLUSH_COMPLETE) && (try == 1)) {
703 bcp->period_requests++;
704 bcp->period_time += elapsed;
705 if ((elapsed > congested_cycles) &&
706 (bcp->period_requests > bcp->cong_reps))
707 disable_for_congestion(bcp, stat);
708 }
709 } else
710 stat->s_requestor--;
711
712 if (completion_status == FLUSH_COMPLETE && try > 1)
713 stat->s_retriesok++;
714 else if (completion_status == FLUSH_GIVEUP)
715 stat->s_giveup++;
716}
717
718/*
719 * Because of a uv1 hardware bug only a limited number of concurrent
720 * requests can be made.
721 */
722static void uv1_throttle(struct bau_control *hmaster, struct ptc_stats *stat)
723{
724 spinlock_t *lock = &hmaster->uvhub_lock;
725 atomic_t *v;
726
727 v = &hmaster->active_descriptor_count;
728 if (!atomic_inc_unless_ge(lock, v, hmaster->max_concurr)) {
729 stat->s_throttles++;
730 do {
731 cpu_relax();
732 } while (!atomic_inc_unless_ge(lock, v, hmaster->max_concurr));
733 }
734}
735
736/*
737 * Handle the completion status of a message send.
738 */
739static void handle_cmplt(int completion_status, struct bau_desc *bau_desc,
740 struct bau_control *bcp, struct bau_control *hmaster,
741 struct ptc_stats *stat)
742{
743 if (completion_status == FLUSH_RETRY_PLUGGED)
744 destination_plugged(bau_desc, bcp, hmaster, stat);
745 else if (completion_status == FLUSH_RETRY_TIMEOUT)
746 destination_timeout(bau_desc, bcp, hmaster, stat);
747}
748
749/*
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500750 * Send a broadcast and wait for it to complete.
Cliff Wickmanb194b1202008-06-12 08:23:48 -0500751 *
Cliff Wickmanf6d8a562010-06-02 16:22:02 -0500752 * The flush_mask contains the cpus the broadcast is to be sent to including
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500753 * cpus that are on the local uvhub.
Cliff Wickmanb194b1202008-06-12 08:23:48 -0500754 *
Cliff Wickman450a0072010-06-02 16:22:02 -0500755 * Returns 0 if all flushing represented in the mask was done.
756 * Returns 1 if it gives up entirely and the original cpu mask is to be
757 * returned to the kernel.
Cliff Wickmanb194b1202008-06-12 08:23:48 -0500758 */
Cliff Wickman450a0072010-06-02 16:22:02 -0500759int uv_flush_send_and_wait(struct bau_desc *bau_desc,
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500760 struct cpumask *flush_mask, struct bau_control *bcp)
Cliff Wickmanb194b1202008-06-12 08:23:48 -0500761{
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500762 int seq_number = 0;
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500763 int completion_stat = 0;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500764 long try = 0;
Ingo Molnarb4c286e2008-06-18 14:28:19 +0200765 unsigned long index;
Cliff Wickmanb194b1202008-06-12 08:23:48 -0500766 cycles_t time1;
767 cycles_t time2;
Cliff Wickman712157a2010-06-02 16:22:02 -0500768 struct ptc_stats *stat = bcp->statp;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500769 struct bau_control *hmaster = bcp->uvhub_master;
770
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500771 if (is_uv1_hub())
772 uv1_throttle(hmaster, stat);
773
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500774 while (hmaster->uvhub_quiesce)
775 cpu_relax();
Cliff Wickmanb194b1202008-06-12 08:23:48 -0500776
Cliff Wickmanb194b1202008-06-12 08:23:48 -0500777 time1 = get_cycles();
778 do {
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500779 if (try == 0) {
Cliff Wickman7fba1bc2010-06-02 16:22:02 -0500780 bau_desc->header.msg_type = MSG_REGULAR;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500781 seq_number = bcp->message_number++;
782 } else {
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500783 bau_desc->header.msg_type = MSG_RETRY;
784 stat->s_retry_messages++;
785 }
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500786
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500787 bau_desc->header.sequence = seq_number;
788 index = (1UL << AS_PUSH_SHIFT) | bcp->uvhub_cpu;
789 bcp->send_message = get_cycles();
790
791 write_mmr_activation(index);
792
793 try++;
794 completion_stat = wait_completion(bau_desc, bcp, try);
795
796 handle_cmplt(completion_stat, bau_desc, bcp, hmaster, stat);
797
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -0500798 if (bcp->ipi_attempts >= bcp->ipi_reset_limit) {
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500799 bcp->ipi_attempts = 0;
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500800 completion_stat = FLUSH_GIVEUP;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500801 break;
802 }
803 cpu_relax();
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500804 } while ((completion_stat == FLUSH_RETRY_PLUGGED) ||
805 (completion_stat == FLUSH_RETRY_TIMEOUT));
806
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500807 time2 = get_cycles();
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500808
809 count_max_concurr(completion_stat, bcp, hmaster);
810
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500811 while (hmaster->uvhub_quiesce)
812 cpu_relax();
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500813
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500814 atomic_dec(&hmaster->active_descriptor_count);
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500815
816 record_send_stats(time1, time2, bcp, stat, completion_stat, try);
817
818 if (completion_stat == FLUSH_GIVEUP)
Cliff Wickman450a0072010-06-02 16:22:02 -0500819 return 1;
Cliff Wickman450a0072010-06-02 16:22:02 -0500820 return 0;
Cliff Wickmanb194b1202008-06-12 08:23:48 -0500821}
822
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500823/*
824 * The BAU is disabled. When the disabled time period has expired, the cpu
825 * that disabled it must re-enable it.
826 * Return 0 if it is re-enabled for all cpus.
827 */
828static int check_enable(struct bau_control *bcp, struct ptc_stats *stat)
829{
830 int tcpu;
831 struct bau_control *tbcp;
832
833 if (bcp->set_bau_off) {
834 if (get_cycles() >= bcp->set_bau_on_time) {
835 stat->s_bau_reenabled++;
836 baudisabled = 0;
837 for_each_present_cpu(tcpu) {
838 tbcp = &per_cpu(bau_control, tcpu);
839 tbcp->baudisabled = 0;
840 tbcp->period_requests = 0;
841 tbcp->period_time = 0;
842 }
843 return 0;
844 }
845 }
846 return -1;
847}
848
849static void record_send_statistics(struct ptc_stats *stat, int locals, int hubs,
850 int remotes, struct bau_desc *bau_desc)
851{
852 stat->s_requestor++;
853 stat->s_ntargcpu += remotes + locals;
854 stat->s_ntargremotes += remotes;
855 stat->s_ntarglocals += locals;
856
857 /* uvhub statistics */
858 hubs = bau_uvhub_weight(&bau_desc->distribution);
859 if (locals) {
860 stat->s_ntarglocaluvhub++;
861 stat->s_ntargremoteuvhub += (hubs - 1);
862 } else
863 stat->s_ntargremoteuvhub += hubs;
864
865 stat->s_ntarguvhub += hubs;
866
867 if (hubs >= 16)
868 stat->s_ntarguvhub16++;
869 else if (hubs >= 8)
870 stat->s_ntarguvhub8++;
871 else if (hubs >= 4)
872 stat->s_ntarguvhub4++;
873 else if (hubs >= 2)
874 stat->s_ntarguvhub2++;
875 else
876 stat->s_ntarguvhub1++;
877}
878
879/*
880 * Translate a cpu mask to the uvhub distribution mask in the BAU
881 * activation descriptor.
882 */
883static int set_distrib_bits(struct cpumask *flush_mask, struct bau_control *bcp,
884 struct bau_desc *bau_desc, int *localsp, int *remotesp)
885{
886 int cpu;
887 int pnode;
888 int cnt = 0;
889 struct hub_and_pnode *hpp;
890
891 for_each_cpu(cpu, flush_mask) {
892 /*
893 * The distribution vector is a bit map of pnodes, relative
894 * to the partition base pnode (and the partition base nasid
895 * in the header).
896 * Translate cpu to pnode and hub using a local memory array.
897 */
898 hpp = &bcp->socket_master->thp[cpu];
899 pnode = hpp->pnode - bcp->partition_base_pnode;
900 bau_uvhub_set(pnode, &bau_desc->distribution);
901 cnt++;
902 if (hpp->uvhub == bcp->uvhub)
903 (*localsp)++;
904 else
905 (*remotesp)++;
906 }
907 if (!cnt)
908 return 1;
909 return 0;
910}
911
912/*
913 * globally purge translation cache of a virtual address or all TLB's
Tejun Heobdbcdd42009-01-21 17:26:06 +0900914 * @cpumask: mask of all cpu's in which the address is to be removed
Cliff Wickman18129242008-06-02 08:56:14 -0500915 * @mm: mm_struct containing virtual address range
916 * @va: virtual address to be removed (or TLB_FLUSH_ALL for all TLB's on cpu)
Tejun Heobdbcdd42009-01-21 17:26:06 +0900917 * @cpu: the current cpu
Cliff Wickman18129242008-06-02 08:56:14 -0500918 *
919 * This is the entry point for initiating any UV global TLB shootdown.
920 *
921 * Purges the translation caches of all specified processors of the given
922 * virtual address, or purges all TLB's on specified processors.
923 *
Tejun Heobdbcdd42009-01-21 17:26:06 +0900924 * The caller has derived the cpumask from the mm_struct. This function
925 * is called only if there are bits set in the mask. (e.g. flush_tlb_page())
Cliff Wickman18129242008-06-02 08:56:14 -0500926 *
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500927 * The cpumask is converted into a uvhubmask of the uvhubs containing
928 * those cpus.
Cliff Wickmanb194b1202008-06-12 08:23:48 -0500929 *
Tejun Heobdbcdd42009-01-21 17:26:06 +0900930 * Note that this function should be called with preemption disabled.
931 *
932 * Returns NULL if all remote flushing was done.
933 * Returns pointer to cpumask if some remote flushing remains to be
934 * done. The returned pointer is valid till preemption is re-enabled.
Cliff Wickman18129242008-06-02 08:56:14 -0500935 */
Tejun Heobdbcdd42009-01-21 17:26:06 +0900936const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500937 struct mm_struct *mm, unsigned long va,
938 unsigned int cpu)
Cliff Wickman18129242008-06-02 08:56:14 -0500939{
Cliff Wickmanb194b1202008-06-12 08:23:48 -0500940 int locals = 0;
Cliff Wickman450a0072010-06-02 16:22:02 -0500941 int remotes = 0;
942 int hubs = 0;
Ingo Molnardc163a42008-06-18 14:15:43 +0200943 struct bau_desc *bau_desc;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500944 struct cpumask *flush_mask;
945 struct ptc_stats *stat;
946 struct bau_control *bcp;
Cliff Wickman18129242008-06-02 08:56:14 -0500947
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -0500948 /* kernel was booted 'nobau' */
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500949 if (nobau)
950 return cpumask;
951
952 bcp = &per_cpu(bau_control, cpu);
Cliff Wickman712157a2010-06-02 16:22:02 -0500953 stat = bcp->statp;
Cliff Wickman50fb55a2010-06-02 16:22:02 -0500954
955 /* bau was disabled due to slow response */
956 if (bcp->baudisabled) {
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500957 if (check_enable(bcp, stat))
958 return cpumask;
Cliff Wickman50fb55a2010-06-02 16:22:02 -0500959 }
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -0500960
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500961 /*
962 * Each sending cpu has a per-cpu mask which it fills from the caller's
Cliff Wickman450a0072010-06-02 16:22:02 -0500963 * cpu mask. All cpus are converted to uvhubs and copied to the
964 * activation descriptor.
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500965 */
966 flush_mask = (struct cpumask *)per_cpu(uv_flush_tlb_mask, cpu);
Cliff Wickman450a0072010-06-02 16:22:02 -0500967 /* don't actually do a shootdown of the local cpu */
Tejun Heobdbcdd42009-01-21 17:26:06 +0900968 cpumask_andnot(flush_mask, cpumask, cpumask_of(cpu));
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500969
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500970 if (cpu_isset(cpu, *cpumask))
Cliff Wickman450a0072010-06-02 16:22:02 -0500971 stat->s_ntargself++;
Tejun Heobdbcdd42009-01-21 17:26:06 +0900972
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500973 bau_desc = bcp->descriptor_base;
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500974 bau_desc += ITEMS_PER_DESC * bcp->uvhub_cpu;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500975 bau_uvhubs_clear(&bau_desc->distribution, UV_DISTRIBUTION_SIZE);
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500976 if (set_distrib_bits(flush_mask, bcp, bau_desc, &locals, &remotes))
Cliff Wickman450a0072010-06-02 16:22:02 -0500977 return NULL;
Cliff Wickman450a0072010-06-02 16:22:02 -0500978
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500979 record_send_statistics(stat, locals, hubs, remotes, bau_desc);
Cliff Wickman18129242008-06-02 08:56:14 -0500980
981 bau_desc->payload.address = va;
Tejun Heobdbcdd42009-01-21 17:26:06 +0900982 bau_desc->payload.sending_cpu = cpu;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500983 /*
Cliff Wickman450a0072010-06-02 16:22:02 -0500984 * uv_flush_send_and_wait returns 0 if all cpu's were messaged,
985 * or 1 if it gave up and the original cpumask should be returned.
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500986 */
Cliff Wickman450a0072010-06-02 16:22:02 -0500987 if (!uv_flush_send_and_wait(bau_desc, flush_mask, bcp))
988 return NULL;
989 else
990 return cpumask;
Cliff Wickman18129242008-06-02 08:56:14 -0500991}
992
993/*
994 * The BAU message interrupt comes here. (registered by set_intr_gate)
995 * See entry_64.S
996 *
997 * We received a broadcast assist message.
998 *
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500999 * Interrupts are disabled; this interrupt could represent
Cliff Wickman18129242008-06-02 08:56:14 -05001000 * the receipt of several messages.
1001 *
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001002 * All cores/threads on this hub get this interrupt.
1003 * The last one to see it does the software ack.
Cliff Wickman18129242008-06-02 08:56:14 -05001004 * (the resource will not be freed until noninterruptable cpus see this
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001005 * interrupt; hardware may timeout the s/w ack and reply ERROR)
Cliff Wickman18129242008-06-02 08:56:14 -05001006 */
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001007void uv_bau_message_interrupt(struct pt_regs *regs)
Cliff Wickman18129242008-06-02 08:56:14 -05001008{
Cliff Wickman18129242008-06-02 08:56:14 -05001009 int count = 0;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001010 cycles_t time_start;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001011 struct bau_pq_entry *msg;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001012 struct bau_control *bcp;
1013 struct ptc_stats *stat;
1014 struct msg_desc msgdesc;
Cliff Wickman18129242008-06-02 08:56:14 -05001015
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001016 time_start = get_cycles();
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001017
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001018 bcp = &per_cpu(bau_control, smp_processor_id());
Cliff Wickman712157a2010-06-02 16:22:02 -05001019 stat = bcp->statp;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001020
1021 msgdesc.queue_first = bcp->queue_first;
1022 msgdesc.queue_last = bcp->queue_last;
1023
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001024 msg = bcp->bau_msg_head;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001025 while (msg->swack_vec) {
Cliff Wickman18129242008-06-02 08:56:14 -05001026 count++;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001027
1028 msgdesc.msg_slot = msg - msgdesc.queue_first;
1029 msgdesc.swack_slot = ffs(msg->swack_vec) - 1;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001030 msgdesc.msg = msg;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001031 bau_process_message(&msgdesc, bcp);
1032
Cliff Wickman18129242008-06-02 08:56:14 -05001033 msg++;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001034 if (msg > msgdesc.queue_last)
1035 msg = msgdesc.queue_first;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001036 bcp->bau_msg_head = msg;
Cliff Wickman18129242008-06-02 08:56:14 -05001037 }
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001038 stat->d_time += (get_cycles() - time_start);
Cliff Wickman18129242008-06-02 08:56:14 -05001039 if (!count)
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001040 stat->d_nomsg++;
Cliff Wickman18129242008-06-02 08:56:14 -05001041 else if (count > 1)
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001042 stat->d_multmsg++;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001043
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001044 ack_APIC_irq();
Cliff Wickman18129242008-06-02 08:56:14 -05001045}
1046
Cliff Wickmanc4c46882009-04-03 08:34:32 -05001047/*
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001048 * Each target uvhub (i.e. a uvhub that has cpu's) needs to have
Cliff Wickmanc4c46882009-04-03 08:34:32 -05001049 * shootdown message timeouts enabled. The timeout does not cause
1050 * an interrupt, but causes an error message to be returned to
1051 * the sender.
1052 */
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001053static void __init enable_timeouts(void)
Cliff Wickman18129242008-06-02 08:56:14 -05001054{
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001055 int uvhub;
1056 int nuvhubs;
Cliff Wickman18129242008-06-02 08:56:14 -05001057 int pnode;
Cliff Wickmanc4c46882009-04-03 08:34:32 -05001058 unsigned long mmr_image;
Cliff Wickman18129242008-06-02 08:56:14 -05001059
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001060 nuvhubs = uv_num_possible_blades();
Cliff Wickmanc4c46882009-04-03 08:34:32 -05001061
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001062 for (uvhub = 0; uvhub < nuvhubs; uvhub++) {
1063 if (!uv_blade_nr_possible_cpus(uvhub))
Cliff Wickman18129242008-06-02 08:56:14 -05001064 continue;
Cliff Wickmanc4c46882009-04-03 08:34:32 -05001065
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001066 pnode = uv_blade_to_pnode(uvhub);
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001067 mmr_image = read_mmr_misc_control(pnode);
Cliff Wickmanc4c46882009-04-03 08:34:32 -05001068 /*
1069 * Set the timeout period and then lock it in, in three
1070 * steps; captures and locks in the period.
1071 *
1072 * To program the period, the SOFT_ACK_MODE must be off.
1073 */
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001074 mmr_image &= ~(1L << SOFTACK_MSHIFT);
1075 write_mmr_misc_control(pnode, mmr_image);
Cliff Wickmanc4c46882009-04-03 08:34:32 -05001076 /*
1077 * Set the 4-bit period.
1078 */
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001079 mmr_image &= ~((unsigned long)0xf << SOFTACK_PSHIFT);
1080 mmr_image |= (SOFTACK_TIMEOUT_PERIOD << SOFTACK_PSHIFT);
1081 write_mmr_misc_control(pnode, mmr_image);
Cliff Wickmanc4c46882009-04-03 08:34:32 -05001082 /*
Jack Steiner2a919592011-05-11 12:50:28 -05001083 * UV1:
Cliff Wickmanc4c46882009-04-03 08:34:32 -05001084 * Subsequent reversals of the timebase bit (3) cause an
1085 * immediate timeout of one or all INTD resources as
1086 * indicated in bits 2:0 (7 causes all of them to timeout).
1087 */
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001088 mmr_image |= (1L << SOFTACK_MSHIFT);
Jack Steiner2a919592011-05-11 12:50:28 -05001089 if (is_uv2_hub()) {
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001090 mmr_image |= (1L << UV2_LEG_SHFT);
1091 mmr_image |= (1L << UV2_EXT_SHFT);
Jack Steiner2a919592011-05-11 12:50:28 -05001092 }
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001093 write_mmr_misc_control(pnode, mmr_image);
Cliff Wickman18129242008-06-02 08:56:14 -05001094 }
Cliff Wickman18129242008-06-02 08:56:14 -05001095}
1096
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001097static void *ptc_seq_start(struct seq_file *file, loff_t *offset)
Cliff Wickman18129242008-06-02 08:56:14 -05001098{
1099 if (*offset < num_possible_cpus())
1100 return offset;
1101 return NULL;
1102}
1103
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001104static void *ptc_seq_next(struct seq_file *file, void *data, loff_t *offset)
Cliff Wickman18129242008-06-02 08:56:14 -05001105{
1106 (*offset)++;
1107 if (*offset < num_possible_cpus())
1108 return offset;
1109 return NULL;
1110}
1111
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001112static void ptc_seq_stop(struct seq_file *file, void *data)
Cliff Wickman18129242008-06-02 08:56:14 -05001113{
1114}
1115
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001116static inline unsigned long long usec_2_cycles(unsigned long microsec)
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001117{
1118 unsigned long ns;
1119 unsigned long long cyc;
1120
Cliff Wickman12a66112010-06-02 16:22:01 -05001121 ns = microsec * 1000;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001122 cyc = (ns << CYC2NS_SCALE_FACTOR)/(per_cpu(cyc2ns, smp_processor_id()));
1123 return cyc;
1124}
1125
Cliff Wickman18129242008-06-02 08:56:14 -05001126/*
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001127 * Display the statistics thru /proc/sgi_uv/ptc_statistics
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001128 * 'data' points to the cpu number
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001129 * Note: see the descriptions in stat_description[].
Cliff Wickman18129242008-06-02 08:56:14 -05001130 */
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001131static int ptc_seq_show(struct seq_file *file, void *data)
Cliff Wickman18129242008-06-02 08:56:14 -05001132{
1133 struct ptc_stats *stat;
1134 int cpu;
1135
1136 cpu = *(loff_t *)data;
Cliff Wickman18129242008-06-02 08:56:14 -05001137 if (!cpu) {
1138 seq_printf(file,
Cliff Wickman450a0072010-06-02 16:22:02 -05001139 "# cpu sent stime self locals remotes ncpus localhub ");
Cliff Wickman18129242008-06-02 08:56:14 -05001140 seq_printf(file,
Cliff Wickman450a0072010-06-02 16:22:02 -05001141 "remotehub numuvhubs numuvhubs16 numuvhubs8 ");
1142 seq_printf(file,
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001143 "numuvhubs4 numuvhubs2 numuvhubs1 dto retries rok ");
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001144 seq_printf(file,
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001145 "resetp resett giveup sto bz throt swack recv rtime ");
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001146 seq_printf(file,
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001147 "all one mult none retry canc nocan reset rcan ");
Cliff Wickman50fb55a2010-06-02 16:22:02 -05001148 seq_printf(file,
1149 "disable enable\n");
Cliff Wickman18129242008-06-02 08:56:14 -05001150 }
1151 if (cpu < num_possible_cpus() && cpu_online(cpu)) {
1152 stat = &per_cpu(ptcstats, cpu);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001153 /* source side statistics */
1154 seq_printf(file,
1155 "cpu %d %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld ",
1156 cpu, stat->s_requestor, cycles_2_us(stat->s_time),
Cliff Wickman450a0072010-06-02 16:22:02 -05001157 stat->s_ntargself, stat->s_ntarglocals,
1158 stat->s_ntargremotes, stat->s_ntargcpu,
1159 stat->s_ntarglocaluvhub, stat->s_ntargremoteuvhub,
1160 stat->s_ntarguvhub, stat->s_ntarguvhub16);
1161 seq_printf(file, "%ld %ld %ld %ld %ld ",
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001162 stat->s_ntarguvhub8, stat->s_ntarguvhub4,
1163 stat->s_ntarguvhub2, stat->s_ntarguvhub1,
Cliff Wickman450a0072010-06-02 16:22:02 -05001164 stat->s_dtimeout);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001165 seq_printf(file, "%ld %ld %ld %ld %ld %ld %ld %ld ",
1166 stat->s_retry_messages, stat->s_retriesok,
1167 stat->s_resets_plug, stat->s_resets_timeout,
1168 stat->s_giveup, stat->s_stimeout,
1169 stat->s_busy, stat->s_throttles);
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001170
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001171 /* destination side statistics */
1172 seq_printf(file,
Cliff Wickman50fb55a2010-06-02 16:22:02 -05001173 "%lx %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld ",
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001174 read_gmmr_sw_ack(uv_cpu_to_pnode(cpu)),
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001175 stat->d_requestee, cycles_2_us(stat->d_time),
1176 stat->d_alltlb, stat->d_onetlb, stat->d_multmsg,
1177 stat->d_nomsg, stat->d_retries, stat->d_canceled,
1178 stat->d_nocanceled, stat->d_resets,
1179 stat->d_rcanceled);
Cliff Wickman50fb55a2010-06-02 16:22:02 -05001180 seq_printf(file, "%ld %ld\n",
1181 stat->s_bau_disabled, stat->s_bau_reenabled);
Cliff Wickman18129242008-06-02 08:56:14 -05001182 }
Cliff Wickman18129242008-06-02 08:56:14 -05001183 return 0;
1184}
1185
1186/*
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001187 * Display the tunables thru debugfs
1188 */
1189static ssize_t tunables_read(struct file *file, char __user *userbuf,
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001190 size_t count, loff_t *ppos)
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001191{
Dan Carpenterb365a852010-09-29 10:41:05 +02001192 char *buf;
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001193 int ret;
1194
Dan Carpenterb365a852010-09-29 10:41:05 +02001195 buf = kasprintf(GFP_KERNEL, "%s %s %s\n%d %d %d %d %d %d %d %d %d\n",
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001196 "max_concur plugged_delay plugsb4reset",
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001197 "timeoutsb4reset ipi_reset_limit complete_threshold",
1198 "congested_response_us congested_reps congested_period",
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001199 max_concurr, plugged_delay, plugsb4reset,
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001200 timeoutsb4reset, ipi_reset_limit, complete_threshold,
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001201 congested_respns_us, congested_reps, congested_period);
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001202
Dan Carpenterb365a852010-09-29 10:41:05 +02001203 if (!buf)
1204 return -ENOMEM;
1205
1206 ret = simple_read_from_buffer(userbuf, count, ppos, buf, strlen(buf));
1207 kfree(buf);
1208 return ret;
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001209}
1210
1211/*
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001212 * handle a write to /proc/sgi_uv/ptc_statistics
1213 * -1: reset the statistics
Cliff Wickman18129242008-06-02 08:56:14 -05001214 * 0: display meaning of the statistics
Cliff Wickman18129242008-06-02 08:56:14 -05001215 */
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001216static ssize_t ptc_proc_write(struct file *file, const char __user *user,
1217 size_t count, loff_t *data)
Cliff Wickman18129242008-06-02 08:56:14 -05001218{
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001219 int cpu;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001220 int i;
1221 int elements;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001222 long input_arg;
Cliff Wickman18129242008-06-02 08:56:14 -05001223 char optstr[64];
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001224 struct ptc_stats *stat;
Cliff Wickman18129242008-06-02 08:56:14 -05001225
Cliff Wickmane7eb8722008-06-23 08:32:25 -05001226 if (count == 0 || count > sizeof(optstr))
Cliff Wickmancef53272008-06-19 11:16:24 -05001227 return -EINVAL;
Cliff Wickman18129242008-06-02 08:56:14 -05001228 if (copy_from_user(optstr, user, count))
1229 return -EFAULT;
1230 optstr[count - 1] = '\0';
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001231
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001232 if (strict_strtol(optstr, 10, &input_arg) < 0) {
Cliff Wickman18129242008-06-02 08:56:14 -05001233 printk(KERN_DEBUG "%s is invalid\n", optstr);
1234 return -EINVAL;
1235 }
1236
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001237 if (input_arg == 0) {
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001238 elements = sizeof(stat_description)/sizeof(*stat_description);
Cliff Wickman18129242008-06-02 08:56:14 -05001239 printk(KERN_DEBUG "# cpu: cpu number\n");
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001240 printk(KERN_DEBUG "Sender statistics:\n");
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001241 for (i = 0; i < elements; i++)
1242 printk(KERN_DEBUG "%s\n", stat_description[i]);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001243 } else if (input_arg == -1) {
1244 for_each_present_cpu(cpu) {
1245 stat = &per_cpu(ptcstats, cpu);
1246 memset(stat, 0, sizeof(struct ptc_stats));
1247 }
Cliff Wickman18129242008-06-02 08:56:14 -05001248 }
1249
1250 return count;
1251}
1252
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001253static int local_atoi(const char *name)
1254{
1255 int val = 0;
1256
1257 for (;; name++) {
1258 switch (*name) {
1259 case '0' ... '9':
1260 val = 10*val+(*name-'0');
1261 break;
1262 default:
1263 return val;
1264 }
1265 }
1266}
1267
1268/*
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001269 * Parse the values written to /sys/kernel/debug/sgi_uv/bau_tunables.
1270 * Zero values reset them to defaults.
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001271 */
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001272static int parse_tunables_write(struct bau_control *bcp, char *instr,
1273 int count)
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001274{
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001275 char *p;
1276 char *q;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001277 int cnt = 0;
1278 int val;
1279 int e = sizeof(tunables) / sizeof(*tunables);
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001280
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001281 p = instr + strspn(instr, WHITESPACE);
1282 q = p;
1283 for (; *p; p = q + strspn(q, WHITESPACE)) {
1284 q = p + strcspn(p, WHITESPACE);
1285 cnt++;
1286 if (q == p)
1287 break;
1288 }
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001289 if (cnt != e) {
1290 printk(KERN_INFO "bau tunable error: should be %d values\n", e);
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001291 return -EINVAL;
1292 }
1293
1294 p = instr + strspn(instr, WHITESPACE);
1295 q = p;
1296 for (cnt = 0; *p; p = q + strspn(q, WHITESPACE), cnt++) {
1297 q = p + strcspn(p, WHITESPACE);
1298 val = local_atoi(p);
1299 switch (cnt) {
1300 case 0:
1301 if (val == 0) {
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001302 max_concurr = MAX_BAU_CONCURRENT;
1303 max_concurr_const = MAX_BAU_CONCURRENT;
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001304 continue;
1305 }
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001306 if (val < 1 || val > bcp->cpus_in_uvhub) {
1307 printk(KERN_DEBUG
1308 "Error: BAU max concurrent %d is invalid\n",
1309 val);
1310 return -EINVAL;
1311 }
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001312 max_concurr = val;
1313 max_concurr_const = val;
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001314 continue;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001315 default:
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001316 if (val == 0)
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001317 *tunables[cnt].tunp = tunables[cnt].deflt;
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001318 else
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001319 *tunables[cnt].tunp = val;
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001320 continue;
1321 }
1322 if (q == p)
1323 break;
1324 }
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001325 return 0;
1326}
1327
1328/*
1329 * Handle a write to debugfs. (/sys/kernel/debug/sgi_uv/bau_tunables)
1330 */
1331static ssize_t tunables_write(struct file *file, const char __user *user,
1332 size_t count, loff_t *data)
1333{
1334 int cpu;
1335 int ret;
1336 char instr[100];
1337 struct bau_control *bcp;
1338
1339 if (count == 0 || count > sizeof(instr)-1)
1340 return -EINVAL;
1341 if (copy_from_user(instr, user, count))
1342 return -EFAULT;
1343
1344 instr[count] = '\0';
1345
cpw@sgi.com00b30cf2011-06-21 07:21:26 -05001346 cpu = get_cpu();
1347 bcp = &per_cpu(bau_control, cpu);
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001348 ret = parse_tunables_write(bcp, instr, count);
cpw@sgi.com00b30cf2011-06-21 07:21:26 -05001349 put_cpu();
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001350 if (ret)
1351 return ret;
1352
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001353 for_each_present_cpu(cpu) {
1354 bcp = &per_cpu(bau_control, cpu);
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001355 bcp->max_concurr = max_concurr;
1356 bcp->max_concurr_const = max_concurr;
1357 bcp->plugged_delay = plugged_delay;
1358 bcp->plugsb4reset = plugsb4reset;
1359 bcp->timeoutsb4reset = timeoutsb4reset;
1360 bcp->ipi_reset_limit = ipi_reset_limit;
1361 bcp->complete_threshold = complete_threshold;
1362 bcp->cong_response_us = congested_respns_us;
1363 bcp->cong_reps = congested_reps;
1364 bcp->cong_period = congested_period;
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001365 }
1366 return count;
1367}
1368
Cliff Wickman18129242008-06-02 08:56:14 -05001369static const struct seq_operations uv_ptc_seq_ops = {
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001370 .start = ptc_seq_start,
1371 .next = ptc_seq_next,
1372 .stop = ptc_seq_stop,
1373 .show = ptc_seq_show
Cliff Wickman18129242008-06-02 08:56:14 -05001374};
1375
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001376static int ptc_proc_open(struct inode *inode, struct file *file)
Cliff Wickman18129242008-06-02 08:56:14 -05001377{
1378 return seq_open(file, &uv_ptc_seq_ops);
1379}
1380
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001381static int tunables_open(struct inode *inode, struct file *file)
1382{
1383 return 0;
1384}
1385
Cliff Wickman18129242008-06-02 08:56:14 -05001386static const struct file_operations proc_uv_ptc_operations = {
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001387 .open = ptc_proc_open,
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001388 .read = seq_read,
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001389 .write = ptc_proc_write,
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001390 .llseek = seq_lseek,
1391 .release = seq_release,
Cliff Wickman18129242008-06-02 08:56:14 -05001392};
1393
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001394static const struct file_operations tunables_fops = {
1395 .open = tunables_open,
1396 .read = tunables_read,
1397 .write = tunables_write,
Arnd Bergmann6038f372010-08-15 18:52:59 +02001398 .llseek = default_llseek,
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001399};
1400
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001401static int __init uv_ptc_init(void)
Cliff Wickman18129242008-06-02 08:56:14 -05001402{
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001403 struct proc_dir_entry *proc_uv_ptc;
Cliff Wickman18129242008-06-02 08:56:14 -05001404
1405 if (!is_uv_system())
1406 return 0;
1407
Alexey Dobriyan10f02d112009-08-23 23:17:27 +04001408 proc_uv_ptc = proc_create(UV_PTC_BASENAME, 0444, NULL,
1409 &proc_uv_ptc_operations);
Cliff Wickman18129242008-06-02 08:56:14 -05001410 if (!proc_uv_ptc) {
1411 printk(KERN_ERR "unable to create %s proc entry\n",
1412 UV_PTC_BASENAME);
1413 return -EINVAL;
1414 }
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001415
1416 tunables_dir = debugfs_create_dir(UV_BAU_TUNABLES_DIR, NULL);
1417 if (!tunables_dir) {
1418 printk(KERN_ERR "unable to create debugfs directory %s\n",
1419 UV_BAU_TUNABLES_DIR);
1420 return -EINVAL;
1421 }
1422 tunables_file = debugfs_create_file(UV_BAU_TUNABLES_FILE, 0600,
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001423 tunables_dir, NULL, &tunables_fops);
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001424 if (!tunables_file) {
1425 printk(KERN_ERR "unable to create debugfs file %s\n",
1426 UV_BAU_TUNABLES_FILE);
1427 return -EINVAL;
1428 }
Cliff Wickman18129242008-06-02 08:56:14 -05001429 return 0;
1430}
1431
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001432/*
Cliff Wickman77ed23f2011-05-10 08:26:43 -05001433 * Initialize the sending side's sending buffers.
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001434 */
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001435static void activation_descriptor_init(int node, int pnode, int base_pnode)
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001436{
1437 int i;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001438 int cpu;
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001439 unsigned long pa;
1440 unsigned long m;
1441 unsigned long n;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001442 size_t dsize;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001443 struct bau_desc *bau_desc;
1444 struct bau_desc *bd2;
1445 struct bau_control *bcp;
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001446
Cliff Wickman0e2595c2009-05-20 08:10:57 -05001447 /*
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001448 * each bau_desc is 64 bytes; there are 8 (ITEMS_PER_DESC)
1449 * per cpu; and one per cpu on the uvhub (ADP_SZ)
Cliff Wickman0e2595c2009-05-20 08:10:57 -05001450 */
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001451 dsize = sizeof(struct bau_desc) * ADP_SZ * ITEMS_PER_DESC;
1452 bau_desc = kmalloc_node(dsize, GFP_KERNEL, node);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001453 BUG_ON(!bau_desc);
Ingo Molnarb4c286e2008-06-18 14:28:19 +02001454
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001455 pa = uv_gpa(bau_desc); /* need the real nasid*/
1456 n = pa >> uv_nshift;
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001457 m = pa & uv_mmask;
Ingo Molnarb4c286e2008-06-18 14:28:19 +02001458
Cliff Wickman77ed23f2011-05-10 08:26:43 -05001459 /* the 14-bit pnode */
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001460 write_mmr_descriptor_base(pnode, (n << UV_DESC_PSHIFT | m));
Cliff Wickman0e2595c2009-05-20 08:10:57 -05001461 /*
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001462 * Initializing all 8 (ITEMS_PER_DESC) descriptors for each
Cliff Wickman0e2595c2009-05-20 08:10:57 -05001463 * cpu even though we only use the first one; one descriptor can
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001464 * describe a broadcast to 256 uv hubs.
Cliff Wickman0e2595c2009-05-20 08:10:57 -05001465 */
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001466 for (i = 0, bd2 = bau_desc; i < (ADP_SZ * ITEMS_PER_DESC); i++, bd2++) {
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001467 memset(bd2, 0, sizeof(struct bau_desc));
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001468 bd2->header.swack_flag = 1;
Cliff Wickman94ca8e42009-04-14 10:56:48 -05001469 /*
Cliff Wickman77ed23f2011-05-10 08:26:43 -05001470 * The base_dest_nasid set in the message header is the nasid
1471 * of the first uvhub in the partition. The bit map will
1472 * indicate destination pnode numbers relative to that base.
1473 * They may not be consecutive if nasid striding is being used.
Cliff Wickman94ca8e42009-04-14 10:56:48 -05001474 */
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001475 bd2->header.base_dest_nasid = UV_PNODE_TO_NASID(base_pnode);
1476 bd2->header.dest_subnodeid = UV_LB_SUBNODEID;
1477 bd2->header.command = UV_NET_ENDPOINT_INTD;
1478 bd2->header.int_both = 1;
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001479 /*
1480 * all others need to be set to zero:
1481 * fairness chaining multilevel count replied_to
1482 */
1483 }
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001484 for_each_present_cpu(cpu) {
1485 if (pnode != uv_blade_to_pnode(uv_cpu_to_blade_id(cpu)))
1486 continue;
1487 bcp = &per_cpu(bau_control, cpu);
1488 bcp->descriptor_base = bau_desc;
1489 }
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001490}
1491
1492/*
1493 * initialize the destination side's receiving buffers
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001494 * entered for each uvhub in the partition
1495 * - node is first node (kernel memory notion) on the uvhub
1496 * - pnode is the uvhub's physical identifier
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001497 */
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001498static void pq_init(int node, int pnode)
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001499{
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001500 int cpu;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001501 size_t plsize;
Ingo Molnarb4c286e2008-06-18 14:28:19 +02001502 char *cp;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001503 void *vp;
1504 unsigned long pn;
1505 unsigned long first;
1506 unsigned long pn_first;
1507 unsigned long last;
1508 struct bau_pq_entry *pqp;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001509 struct bau_control *bcp;
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001510
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001511 plsize = (DEST_Q_SIZE + 1) * sizeof(struct bau_pq_entry);
1512 vp = kmalloc_node(plsize, GFP_KERNEL, node);
1513 pqp = (struct bau_pq_entry *)vp;
Ingo Molnardc163a42008-06-18 14:15:43 +02001514 BUG_ON(!pqp);
Ingo Molnarb4c286e2008-06-18 14:28:19 +02001515
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001516 cp = (char *)pqp + 31;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001517 pqp = (struct bau_pq_entry *)(((unsigned long)cp >> 5) << 5);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001518
1519 for_each_present_cpu(cpu) {
1520 if (pnode != uv_cpu_to_pnode(cpu))
1521 continue;
1522 /* for every cpu on this pnode: */
1523 bcp = &per_cpu(bau_control, cpu);
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001524 bcp->queue_first = pqp;
1525 bcp->bau_msg_head = pqp;
1526 bcp->queue_last = pqp + (DEST_Q_SIZE - 1);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001527 }
Cliff Wickman4ea3c512009-04-16 07:53:09 -05001528 /*
1529 * need the pnode of where the memory was really allocated
1530 */
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001531 pn = uv_gpa(pqp) >> uv_nshift;
1532 first = uv_physnodeaddr(pqp);
1533 pn_first = ((unsigned long)pn << UV_PAYLOADQ_PNODE_SHIFT) | first;
1534 last = uv_physnodeaddr(pqp + (DEST_Q_SIZE - 1));
1535 write_mmr_payload_first(pnode, pn_first);
1536 write_mmr_payload_tail(pnode, first);
1537 write_mmr_payload_last(pnode, last);
1538
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001539 /* in effect, all msg_type's are set to MSG_NOOP */
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001540 memset(pqp, 0, sizeof(struct bau_pq_entry) * DEST_Q_SIZE);
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001541}
1542
1543/*
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001544 * Initialization of each UV hub's structures
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001545 */
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001546static void __init init_uvhub(int uvhub, int vector, int base_pnode)
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001547{
Cliff Wickman9674f352009-04-03 08:34:05 -05001548 int node;
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001549 int pnode;
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001550 unsigned long apicid;
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001551
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001552 node = uvhub_to_first_node(uvhub);
1553 pnode = uv_blade_to_pnode(uvhub);
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001554
1555 activation_descriptor_init(node, pnode, base_pnode);
1556
1557 pq_init(node, pnode);
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001558 /*
Cliff Wickman77ed23f2011-05-10 08:26:43 -05001559 * The below initialization can't be in firmware because the
1560 * messaging IRQ will be determined by the OS.
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001561 */
Dimitri Sivanich8191c9f2010-11-16 16:23:52 -06001562 apicid = uvhub_to_first_apicid(uvhub) | uv_apicid_hibits;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001563 write_mmr_data_config(pnode, ((apicid << 32) | vector));
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001564}
1565
1566/*
Cliff Wickman12a66112010-06-02 16:22:01 -05001567 * We will set BAU_MISC_CONTROL with a timeout period.
1568 * But the BIOS has set UVH_AGING_PRESCALE_SEL and UVH_TRANSACTION_TIMEOUT.
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001569 * So the destination timeout period has to be calculated from them.
Cliff Wickman12a66112010-06-02 16:22:01 -05001570 */
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001571static int calculate_destination_timeout(void)
Cliff Wickman12a66112010-06-02 16:22:01 -05001572{
1573 unsigned long mmr_image;
1574 int mult1;
1575 int mult2;
1576 int index;
1577 int base;
1578 int ret;
1579 unsigned long ts_ns;
1580
Jack Steiner2a919592011-05-11 12:50:28 -05001581 if (is_uv1_hub()) {
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001582 mult1 = SOFTACK_TIMEOUT_PERIOD & BAU_MISC_CONTROL_MULT_MASK;
Jack Steiner2a919592011-05-11 12:50:28 -05001583 mmr_image = uv_read_local_mmr(UVH_AGING_PRESCALE_SEL);
1584 index = (mmr_image >> BAU_URGENCY_7_SHIFT) & BAU_URGENCY_7_MASK;
1585 mmr_image = uv_read_local_mmr(UVH_TRANSACTION_TIMEOUT);
1586 mult2 = (mmr_image >> BAU_TRANS_SHIFT) & BAU_TRANS_MASK;
1587 base = timeout_base_ns[index];
1588 ts_ns = base * mult1 * mult2;
1589 ret = ts_ns / 1000;
1590 } else {
1591 /* 4 bits 0/1 for 10/80us, 3 bits of multiplier */
1592 mmr_image = uv_read_local_mmr(UVH_AGING_PRESCALE_SEL);
1593 mmr_image = (mmr_image & UV_SA_MASK) >> UV_SA_SHFT;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001594 if (mmr_image & (1L << UV2_ACK_UNITS_SHFT))
Jack Steiner2a919592011-05-11 12:50:28 -05001595 mult1 = 80;
1596 else
1597 mult1 = 10;
1598 base = mmr_image & UV2_ACK_MASK;
1599 ret = mult1 * base;
1600 }
Cliff Wickman12a66112010-06-02 16:22:01 -05001601 return ret;
1602}
1603
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001604static void __init init_per_cpu_tunables(void)
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001605{
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001606 int cpu;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001607 struct bau_control *bcp;
1608
1609 for_each_present_cpu(cpu) {
1610 bcp = &per_cpu(bau_control, cpu);
1611 bcp->baudisabled = 0;
1612 bcp->statp = &per_cpu(ptcstats, cpu);
1613 /* time interval to catch a hardware stay-busy bug */
1614 bcp->timeout_interval = usec_2_cycles(2*timeout_us);
1615 bcp->max_concurr = max_concurr;
1616 bcp->max_concurr_const = max_concurr;
1617 bcp->plugged_delay = plugged_delay;
1618 bcp->plugsb4reset = plugsb4reset;
1619 bcp->timeoutsb4reset = timeoutsb4reset;
1620 bcp->ipi_reset_limit = ipi_reset_limit;
1621 bcp->complete_threshold = complete_threshold;
1622 bcp->cong_response_us = congested_respns_us;
1623 bcp->cong_reps = congested_reps;
1624 bcp->cong_period = congested_period;
1625 }
1626}
1627
1628/*
1629 * Scan all cpus to collect blade and socket summaries.
1630 */
1631static int __init get_cpu_topology(int base_pnode,
1632 struct uvhub_desc *uvhub_descs,
1633 unsigned char *uvhub_mask)
1634{
1635 int cpu;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001636 int pnode;
1637 int uvhub;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001638 int socket;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001639 struct bau_control *bcp;
1640 struct uvhub_desc *bdp;
1641 struct socket_desc *sdp;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001642
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001643 for_each_present_cpu(cpu) {
1644 bcp = &per_cpu(bau_control, cpu);
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001645
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001646 memset(bcp, 0, sizeof(struct bau_control));
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001647
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001648 pnode = uv_cpu_hub_info(cpu)->pnode;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001649 if ((pnode - base_pnode) >= UV_DISTRIBUTION_SIZE) {
Cliff Wickman77ed23f2011-05-10 08:26:43 -05001650 printk(KERN_EMERG
1651 "cpu %d pnode %d-%d beyond %d; BAU disabled\n",
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001652 cpu, pnode, base_pnode, UV_DISTRIBUTION_SIZE);
Cliff Wickman77ed23f2011-05-10 08:26:43 -05001653 return 1;
1654 }
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001655
Cliff Wickman77ed23f2011-05-10 08:26:43 -05001656 bcp->osnode = cpu_to_node(cpu);
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001657 bcp->partition_base_pnode = base_pnode;
1658
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001659 uvhub = uv_cpu_hub_info(cpu)->numa_blade_id;
Cliff Wickmanc4026cf2010-07-30 14:10:55 -05001660 *(uvhub_mask + (uvhub/8)) |= (1 << (uvhub%8));
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001661 bdp = &uvhub_descs[uvhub];
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001662
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001663 bdp->num_cpus++;
1664 bdp->uvhub = uvhub;
1665 bdp->pnode = pnode;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001666
Cliff Wickmana8328ee2010-06-02 16:22:02 -05001667 /* kludge: 'assuming' one node per socket, and assuming that
1668 disabling a socket just leaves a gap in node numbers */
Cliff Wickman77ed23f2011-05-10 08:26:43 -05001669 socket = bcp->osnode & 1;
Cliff Wickmana8328ee2010-06-02 16:22:02 -05001670 bdp->socket_mask |= (1 << socket);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001671 sdp = &bdp->socket[socket];
1672 sdp->cpu_number[sdp->num_cpus] = cpu;
1673 sdp->num_cpus++;
Cliff Wickmancfa60912011-01-03 12:03:53 -06001674 if (sdp->num_cpus > MAX_CPUS_PER_SOCKET) {
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001675 printk(KERN_EMERG "%d cpus per socket invalid\n",
1676 sdp->num_cpus);
Cliff Wickmancfa60912011-01-03 12:03:53 -06001677 return 1;
1678 }
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001679 }
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001680 return 0;
1681}
1682
1683/*
1684 * Each socket is to get a local array of pnodes/hubs.
1685 */
1686static void make_per_cpu_thp(struct bau_control *smaster)
1687{
1688 int cpu;
1689 size_t hpsz = sizeof(struct hub_and_pnode) * num_possible_cpus();
1690
1691 smaster->thp = kmalloc_node(hpsz, GFP_KERNEL, smaster->osnode);
1692 memset(smaster->thp, 0, hpsz);
1693 for_each_present_cpu(cpu) {
1694 smaster->thp[cpu].pnode = uv_cpu_hub_info(cpu)->pnode;
1695 smaster->thp[cpu].uvhub = uv_cpu_hub_info(cpu)->numa_blade_id;
1696 }
1697}
1698
1699/*
1700 * Initialize all the per_cpu information for the cpu's on a given socket,
1701 * given what has been gathered into the socket_desc struct.
1702 * And reports the chosen hub and socket masters back to the caller.
1703 */
1704static int scan_sock(struct socket_desc *sdp, struct uvhub_desc *bdp,
1705 struct bau_control **smasterp,
1706 struct bau_control **hmasterp)
1707{
1708 int i;
1709 int cpu;
1710 struct bau_control *bcp;
1711
1712 for (i = 0; i < sdp->num_cpus; i++) {
1713 cpu = sdp->cpu_number[i];
1714 bcp = &per_cpu(bau_control, cpu);
1715 bcp->cpu = cpu;
1716 if (i == 0) {
1717 *smasterp = bcp;
1718 if (!(*hmasterp))
1719 *hmasterp = bcp;
1720 }
1721 bcp->cpus_in_uvhub = bdp->num_cpus;
1722 bcp->cpus_in_socket = sdp->num_cpus;
1723 bcp->socket_master = *smasterp;
1724 bcp->uvhub = bdp->uvhub;
1725 bcp->uvhub_master = *hmasterp;
1726 bcp->uvhub_cpu = uv_cpu_hub_info(cpu)->blade_processor_id;
1727 if (bcp->uvhub_cpu >= MAX_CPUS_PER_UVHUB) {
1728 printk(KERN_EMERG "%d cpus per uvhub invalid\n",
1729 bcp->uvhub_cpu);
1730 return 1;
1731 }
1732 }
1733 return 0;
1734}
1735
1736/*
1737 * Summarize the blade and socket topology into the per_cpu structures.
1738 */
1739static int __init summarize_uvhub_sockets(int nuvhubs,
1740 struct uvhub_desc *uvhub_descs,
1741 unsigned char *uvhub_mask)
1742{
1743 int socket;
1744 int uvhub;
1745 unsigned short socket_mask;
1746
Cliff Wickmanc4026cf2010-07-30 14:10:55 -05001747 for (uvhub = 0; uvhub < nuvhubs; uvhub++) {
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001748 struct uvhub_desc *bdp;
1749 struct bau_control *smaster = NULL;
1750 struct bau_control *hmaster = NULL;
1751
Cliff Wickmanc4026cf2010-07-30 14:10:55 -05001752 if (!(*(uvhub_mask + (uvhub/8)) & (1 << (uvhub%8))))
1753 continue;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001754
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001755 bdp = &uvhub_descs[uvhub];
Cliff Wickmana8328ee2010-06-02 16:22:02 -05001756 socket_mask = bdp->socket_mask;
1757 socket = 0;
1758 while (socket_mask) {
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001759 struct socket_desc *sdp;
1760 if ((socket_mask & 1)) {
1761 sdp = &bdp->socket[socket];
1762 if (scan_sock(sdp, bdp, &smaster, &hmaster))
Cliff Wickmancfa60912011-01-03 12:03:53 -06001763 return 1;
cpw@sgi.com9c9153d2011-06-21 07:21:28 -05001764 make_per_cpu_thp(smaster);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001765 }
1766 socket++;
Cliff Wickmana8328ee2010-06-02 16:22:02 -05001767 socket_mask = (socket_mask >> 1);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001768 }
1769 }
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001770 return 0;
1771}
1772
1773/*
1774 * initialize the bau_control structure for each cpu
1775 */
1776static int __init init_per_cpu(int nuvhubs, int base_part_pnode)
1777{
1778 unsigned char *uvhub_mask;
1779 void *vp;
1780 struct uvhub_desc *uvhub_descs;
1781
1782 timeout_us = calculate_destination_timeout();
1783
1784 vp = kmalloc(nuvhubs * sizeof(struct uvhub_desc), GFP_KERNEL);
1785 uvhub_descs = (struct uvhub_desc *)vp;
1786 memset(uvhub_descs, 0, nuvhubs * sizeof(struct uvhub_desc));
1787 uvhub_mask = kzalloc((nuvhubs+7)/8, GFP_KERNEL);
1788
1789 if (get_cpu_topology(base_part_pnode, uvhub_descs, uvhub_mask))
1790 return 1;
1791
1792 if (summarize_uvhub_sockets(nuvhubs, uvhub_descs, uvhub_mask))
1793 return 1;
1794
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001795 kfree(uvhub_descs);
Cliff Wickmanc4026cf2010-07-30 14:10:55 -05001796 kfree(uvhub_mask);
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001797 init_per_cpu_tunables();
Cliff Wickmancfa60912011-01-03 12:03:53 -06001798 return 0;
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001799}
Cliff Wickman18129242008-06-02 08:56:14 -05001800
1801/*
1802 * Initialization of BAU-related structures
1803 */
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001804static int __init uv_bau_init(void)
Cliff Wickman18129242008-06-02 08:56:14 -05001805{
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001806 int uvhub;
1807 int pnode;
1808 int nuvhubs;
Rusty Russell2c74d662009-03-18 08:22:30 +10301809 int cur_cpu;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001810 int cpus;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001811 int vector;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001812 cpumask_var_t *mask;
Cliff Wickman18129242008-06-02 08:56:14 -05001813
1814 if (!is_uv_system())
1815 return 0;
1816
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001817 if (nobau)
1818 return 0;
1819
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001820 for_each_possible_cpu(cur_cpu) {
1821 mask = &per_cpu(uv_flush_tlb_mask, cur_cpu);
1822 zalloc_cpumask_var_node(mask, GFP_KERNEL, cpu_to_node(cur_cpu));
1823 }
Rusty Russell76ba0ec2009-03-13 14:49:57 +10301824
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001825 uv_nshift = uv_hub_info->m_val;
Robin Holt036ed8b2009-10-15 17:40:00 -05001826 uv_mmask = (1UL << uv_hub_info->m_val) - 1;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001827 nuvhubs = uv_num_possible_blades();
Cliff Wickman50fb55a2010-06-02 16:22:02 -05001828 spin_lock_init(&disable_lock);
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001829 congested_cycles = usec_2_cycles(congested_respns_us);
Cliff Wickman9674f352009-04-03 08:34:05 -05001830
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001831 uv_base_pnode = 0x7fffffff;
Cliff Wickman77ed23f2011-05-10 08:26:43 -05001832 for (uvhub = 0; uvhub < nuvhubs; uvhub++) {
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001833 cpus = uv_blade_nr_possible_cpus(uvhub);
1834 if (cpus && (uv_blade_to_pnode(uvhub) < uv_base_pnode))
1835 uv_base_pnode = uv_blade_to_pnode(uvhub);
Cliff Wickman77ed23f2011-05-10 08:26:43 -05001836 }
1837
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001838 if (init_per_cpu(nuvhubs, uv_base_pnode)) {
Cliff Wickmancfa60912011-01-03 12:03:53 -06001839 nobau = 1;
1840 return 0;
1841 }
Ingo Molnarb4c286e2008-06-18 14:28:19 +02001842
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001843 vector = UV_BAU_MESSAGE;
1844 for_each_possible_blade(uvhub)
1845 if (uv_blade_nr_possible_cpus(uvhub))
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001846 init_uvhub(uvhub, vector, uv_base_pnode);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001847
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001848 enable_timeouts();
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001849 alloc_intr_gate(vector, uv_bau_message_intr1);
1850
1851 for_each_possible_blade(uvhub) {
Cliff Wickman93a7ca02010-07-16 10:11:21 -05001852 if (uv_blade_nr_possible_cpus(uvhub)) {
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001853 unsigned long val;
1854 unsigned long mmr;
Cliff Wickman93a7ca02010-07-16 10:11:21 -05001855 pnode = uv_blade_to_pnode(uvhub);
1856 /* INIT the bau */
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001857 val = 1L << 63;
1858 write_gmmr_activation(pnode, val);
Cliff Wickman93a7ca02010-07-16 10:11:21 -05001859 mmr = 1; /* should be 1 to broadcast to both sockets */
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001860 write_mmr_data_broadcast(pnode, mmr);
Cliff Wickman93a7ca02010-07-16 10:11:21 -05001861 }
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001862 }
Ingo Molnarb4c286e2008-06-18 14:28:19 +02001863
Cliff Wickman18129242008-06-02 08:56:14 -05001864 return 0;
1865}
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001866core_initcall(uv_bau_init);
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001867fs_initcall(uv_ptc_init);