blob: 4686bf1e56ece63b94592eef35fd25bf31aab2f6 [file] [log] [blame]
Cliff Wickman18129242008-06-02 08:56:14 -05001/*
2 * SGI UltraViolet TLB flush routines.
3 *
Cliff Wickmanf073cc82011-05-24 13:07:36 -05004 * (c) 2008-2011 Cliff Wickman <cpw@sgi.com>, SGI.
Cliff Wickman18129242008-06-02 08:56:14 -05005 *
6 * This code is released under the GNU General Public License version 2 or
7 * later.
8 */
Jeremy Fitzhardingeaef8f5b2008-10-14 21:43:43 -07009#include <linux/seq_file.h>
Cliff Wickman18129242008-06-02 08:56:14 -050010#include <linux/proc_fs.h>
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -050011#include <linux/debugfs.h>
Cliff Wickman18129242008-06-02 08:56:14 -050012#include <linux/kernel.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090013#include <linux/slab.h>
Jean Delvareca4445642011-03-25 15:20:14 +010014#include <linux/delay.h>
Cliff Wickman18129242008-06-02 08:56:14 -050015
Cliff Wickman18129242008-06-02 08:56:14 -050016#include <asm/mmu_context.h>
Tejun Heobdbcdd42009-01-21 17:26:06 +090017#include <asm/uv/uv.h>
Cliff Wickman18129242008-06-02 08:56:14 -050018#include <asm/uv/uv_mmrs.h>
Ingo Molnarb4c286e2008-06-18 14:28:19 +020019#include <asm/uv/uv_hub.h>
Cliff Wickman18129242008-06-02 08:56:14 -050020#include <asm/uv/uv_bau.h>
Ingo Molnar7b6aa332009-02-17 13:58:15 +010021#include <asm/apic.h>
Ingo Molnarb4c286e2008-06-18 14:28:19 +020022#include <asm/idle.h>
Cliff Wickmanb194b122008-06-12 08:23:48 -050023#include <asm/tsc.h>
Cliff Wickman99dd8712008-08-19 12:51:59 -050024#include <asm/irq_vectors.h>
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -050025#include <asm/timer.h>
Cliff Wickman18129242008-06-02 08:56:14 -050026
Cliff Wickman12a66112010-06-02 16:22:01 -050027/* timeouts in nanoseconds (indexed by UVH_AGING_PRESCALE_SEL urgency7 30:28) */
28static int timeout_base_ns[] = {
29 20,
30 160,
31 1280,
32 10240,
33 81920,
34 655360,
35 5242880,
36 167772160
37};
Cliff Wickmanf073cc82011-05-24 13:07:36 -050038
Cliff Wickman12a66112010-06-02 16:22:01 -050039static int timeout_us;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -050040static int nobau;
Cliff Wickman50fb55a2010-06-02 16:22:02 -050041static int baudisabled;
42static spinlock_t disable_lock;
43static cycles_t congested_cycles;
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -050044
45/* tunables: */
Cliff Wickmanf073cc82011-05-24 13:07:36 -050046static int max_concurr = MAX_BAU_CONCURRENT;
47static int max_concurr_const = MAX_BAU_CONCURRENT;
48static int plugged_delay = PLUGGED_DELAY;
49static int plugsb4reset = PLUGSB4RESET;
50static int timeoutsb4reset = TIMEOUTSB4RESET;
51static int ipi_reset_limit = IPI_RESET_LIMIT;
52static int complete_threshold = COMPLETE_THRESHOLD;
53static int congested_respns_us = CONGESTED_RESPONSE_US;
54static int congested_reps = CONGESTED_REPS;
55static int congested_period = CONGESTED_PERIOD;
56
57static struct tunables tunables[] = {
58 {&max_concurr, MAX_BAU_CONCURRENT}, /* must be [0] */
59 {&plugged_delay, PLUGGED_DELAY},
60 {&plugsb4reset, PLUGSB4RESET},
61 {&timeoutsb4reset, TIMEOUTSB4RESET},
62 {&ipi_reset_limit, IPI_RESET_LIMIT},
63 {&complete_threshold, COMPLETE_THRESHOLD},
64 {&congested_respns_us, CONGESTED_RESPONSE_US},
65 {&congested_reps, CONGESTED_REPS},
66 {&congested_period, CONGESTED_PERIOD}
67};
68
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -050069static struct dentry *tunables_dir;
70static struct dentry *tunables_file;
71
Cliff Wickmanf073cc82011-05-24 13:07:36 -050072/* these correspond to the statistics printed by ptc_seq_show() */
73static char *stat_description[] = {
74 "sent: number of shootdown messages sent",
75 "stime: time spent sending messages",
76 "numuvhubs: number of hubs targeted with shootdown",
77 "numuvhubs16: number times 16 or more hubs targeted",
78 "numuvhubs8: number times 8 or more hubs targeted",
79 "numuvhubs4: number times 4 or more hubs targeted",
80 "numuvhubs2: number times 2 or more hubs targeted",
81 "numuvhubs1: number times 1 hub targeted",
82 "numcpus: number of cpus targeted with shootdown",
83 "dto: number of destination timeouts",
84 "retries: destination timeout retries sent",
85 "rok: : destination timeouts successfully retried",
86 "resetp: ipi-style resource resets for plugs",
87 "resett: ipi-style resource resets for timeouts",
88 "giveup: fall-backs to ipi-style shootdowns",
89 "sto: number of source timeouts",
90 "bz: number of stay-busy's",
91 "throt: number times spun in throttle",
92 "swack: image of UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE",
93 "recv: shootdown messages received",
94 "rtime: time spent processing messages",
95 "all: shootdown all-tlb messages",
96 "one: shootdown one-tlb messages",
97 "mult: interrupts that found multiple messages",
98 "none: interrupts that found no messages",
99 "retry: number of retry messages processed",
100 "canc: number messages canceled by retries",
101 "nocan: number retries that found nothing to cancel",
102 "reset: number of ipi-style reset requests processed",
103 "rcan: number messages canceled by reset requests",
104 "disable: number times use of the BAU was disabled",
105 "enable: number times use of the BAU was re-enabled"
106};
107
108static int __init
109setup_nobau(char *arg)
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500110{
111 nobau = 1;
112 return 0;
113}
114early_param("nobau", setup_nobau);
Ingo Molnarb4c286e2008-06-18 14:28:19 +0200115
Cliff Wickman94ca8e42009-04-14 10:56:48 -0500116/* base pnode in this partition */
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500117static int uv_base_pnode __read_mostly;
Cliff Wickman18129242008-06-02 08:56:14 -0500118
Ingo Molnardc163a42008-06-18 14:15:43 +0200119static DEFINE_PER_CPU(struct ptc_stats, ptcstats);
120static DEFINE_PER_CPU(struct bau_control, bau_control);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500121static DEFINE_PER_CPU(cpumask_var_t, uv_flush_tlb_mask);
122
Cliff Wickman18129242008-06-02 08:56:14 -0500123/*
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500124 * Determine the first node on a uvhub. 'Nodes' are used for kernel
125 * memory allocation.
Cliff Wickman9674f352009-04-03 08:34:05 -0500126 */
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500127static int __init uvhub_to_first_node(int uvhub)
Cliff Wickman9674f352009-04-03 08:34:05 -0500128{
129 int node, b;
130
131 for_each_online_node(node) {
132 b = uv_node_to_blade_id(node);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500133 if (uvhub == b)
Cliff Wickman9674f352009-04-03 08:34:05 -0500134 return node;
135 }
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500136 return -1;
Cliff Wickman9674f352009-04-03 08:34:05 -0500137}
138
139/*
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500140 * Determine the apicid of the first cpu on a uvhub.
Cliff Wickman9674f352009-04-03 08:34:05 -0500141 */
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500142static int __init uvhub_to_first_apicid(int uvhub)
Cliff Wickman9674f352009-04-03 08:34:05 -0500143{
144 int cpu;
145
146 for_each_present_cpu(cpu)
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500147 if (uvhub == uv_cpu_to_blade_id(cpu))
Cliff Wickman9674f352009-04-03 08:34:05 -0500148 return per_cpu(x86_cpu_to_apicid, cpu);
149 return -1;
150}
151
152/*
Cliff Wickman18129242008-06-02 08:56:14 -0500153 * Free a software acknowledge hardware resource by clearing its Pending
154 * bit. This will return a reply to the sender.
155 * If the message has timed out, a reply has already been sent by the
156 * hardware but the resource has not been released. In that case our
157 * clear of the Timeout bit (as well) will free the resource. No reply will
158 * be sent (the hardware will only do one reply per message).
159 */
Cliff Wickmanc5d35d32012-01-16 15:19:47 -0600160static void reply_to_message(struct msg_desc *mdp, struct bau_control *bcp,
161 int do_acknowledge)
Cliff Wickman18129242008-06-02 08:56:14 -0500162{
Cliff Wickmanb194b122008-06-12 08:23:48 -0500163 unsigned long dw;
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500164 struct bau_pq_entry *msg;
Cliff Wickman18129242008-06-02 08:56:14 -0500165
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500166 msg = mdp->msg;
Cliff Wickmanc5d35d32012-01-16 15:19:47 -0600167 if (!msg->canceled && do_acknowledge) {
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500168 dw = (msg->swack_vec << UV_SW_ACK_NPENDING) | msg->swack_vec;
169 write_mmr_sw_ack(dw);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500170 }
Cliff Wickman18129242008-06-02 08:56:14 -0500171 msg->replied_to = 1;
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500172 msg->swack_vec = 0;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500173}
174
175/*
176 * Process the receipt of a RETRY message
177 */
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500178static void bau_process_retry_msg(struct msg_desc *mdp,
179 struct bau_control *bcp)
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500180{
181 int i;
182 int cancel_count = 0;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500183 unsigned long msg_res;
184 unsigned long mmr = 0;
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500185 struct bau_pq_entry *msg = mdp->msg;
186 struct bau_pq_entry *msg2;
187 struct ptc_stats *stat = bcp->statp;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500188
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500189 stat->d_retries++;
190 /*
191 * cancel any message from msg+1 to the retry itself
192 */
193 for (msg2 = msg+1, i = 0; i < DEST_Q_SIZE; msg2++, i++) {
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500194 if (msg2 > mdp->queue_last)
195 msg2 = mdp->queue_first;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500196 if (msg2 == msg)
197 break;
198
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500199 /* same conditions for cancellation as do_reset */
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500200 if ((msg2->replied_to == 0) && (msg2->canceled == 0) &&
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500201 (msg2->swack_vec) && ((msg2->swack_vec &
202 msg->swack_vec) == 0) &&
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500203 (msg2->sending_cpu == msg->sending_cpu) &&
204 (msg2->msg_type != MSG_NOOP)) {
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500205 mmr = read_mmr_sw_ack();
206 msg_res = msg2->swack_vec;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500207 /*
208 * This is a message retry; clear the resources held
209 * by the previous message only if they timed out.
210 * If it has not timed out we have an unexpected
211 * situation to report.
212 */
Cliff Wickman39847e72010-06-02 16:22:02 -0500213 if (mmr & (msg_res << UV_SW_ACK_NPENDING)) {
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500214 unsigned long mr;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500215 /*
Cliff Wickmanc5d35d32012-01-16 15:19:47 -0600216 * Is the resource timed out?
217 * Make everyone ignore the cancelled message.
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500218 */
219 msg2->canceled = 1;
220 stat->d_canceled++;
221 cancel_count++;
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500222 mr = (msg_res << UV_SW_ACK_NPENDING) | msg_res;
223 write_mmr_sw_ack(mr);
Cliff Wickman39847e72010-06-02 16:22:02 -0500224 }
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500225 }
226 }
227 if (!cancel_count)
228 stat->d_nocanceled++;
Cliff Wickman18129242008-06-02 08:56:14 -0500229}
230
231/*
232 * Do all the things a cpu should do for a TLB shootdown message.
233 * Other cpu's may come here at the same time for this message.
234 */
Cliff Wickmanc5d35d32012-01-16 15:19:47 -0600235static void bau_process_message(struct msg_desc *mdp, struct bau_control *bcp,
236 int do_acknowledge)
Cliff Wickman18129242008-06-02 08:56:14 -0500237{
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500238 short socket_ack_count = 0;
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500239 short *sp;
240 struct atomic_short *asp;
241 struct ptc_stats *stat = bcp->statp;
242 struct bau_pq_entry *msg = mdp->msg;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500243 struct bau_control *smaster = bcp->socket_master;
Cliff Wickman18129242008-06-02 08:56:14 -0500244
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500245 /*
246 * This must be a normal message, or retry of a normal message
247 */
Cliff Wickman18129242008-06-02 08:56:14 -0500248 if (msg->address == TLB_FLUSH_ALL) {
249 local_flush_tlb();
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500250 stat->d_alltlb++;
Cliff Wickman18129242008-06-02 08:56:14 -0500251 } else {
252 __flush_tlb_one(msg->address);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500253 stat->d_onetlb++;
Cliff Wickman18129242008-06-02 08:56:14 -0500254 }
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500255 stat->d_requestee++;
Cliff Wickman18129242008-06-02 08:56:14 -0500256
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500257 /*
258 * One cpu on each uvhub has the additional job on a RETRY
259 * of releasing the resource held by the message that is
260 * being retried. That message is identified by sending
261 * cpu number.
262 */
263 if (msg->msg_type == MSG_RETRY && bcp == bcp->uvhub_master)
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500264 bau_process_retry_msg(mdp, bcp);
Cliff Wickman18129242008-06-02 08:56:14 -0500265
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500266 /*
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500267 * This is a swack message, so we have to reply to it.
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500268 * Count each responding cpu on the socket. This avoids
269 * pinging the count's cache line back and forth between
270 * the sockets.
271 */
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500272 sp = &smaster->socket_acknowledge_count[mdp->msg_slot];
273 asp = (struct atomic_short *)sp;
274 socket_ack_count = atom_asr(1, asp);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500275 if (socket_ack_count == bcp->cpus_in_socket) {
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500276 int msg_ack_count;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500277 /*
278 * Both sockets dump their completed count total into
279 * the message's count.
280 */
281 smaster->socket_acknowledge_count[mdp->msg_slot] = 0;
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500282 asp = (struct atomic_short *)&msg->acknowledge_count;
283 msg_ack_count = atom_asr(socket_ack_count, asp);
Ingo Molnardc163a42008-06-18 14:15:43 +0200284
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500285 if (msg_ack_count == bcp->cpus_in_uvhub) {
286 /*
287 * All cpus in uvhub saw it; reply
Cliff Wickmanc5d35d32012-01-16 15:19:47 -0600288 * (unless we are in the UV2 workaround)
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500289 */
Cliff Wickmanc5d35d32012-01-16 15:19:47 -0600290 reply_to_message(mdp, bcp, do_acknowledge);
Ingo Molnardc163a42008-06-18 14:15:43 +0200291 }
292 }
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500293
294 return;
Cliff Wickman18129242008-06-02 08:56:14 -0500295}
296
297/*
cpw@sgi.com485f07d2011-06-21 07:21:29 -0500298 * Determine the first cpu on a pnode.
Cliff Wickman18129242008-06-02 08:56:14 -0500299 */
cpw@sgi.com485f07d2011-06-21 07:21:29 -0500300static int pnode_to_first_cpu(int pnode, struct bau_control *smaster)
Cliff Wickman18129242008-06-02 08:56:14 -0500301{
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500302 int cpu;
cpw@sgi.com485f07d2011-06-21 07:21:29 -0500303 struct hub_and_pnode *hpp;
304
305 for_each_present_cpu(cpu) {
306 hpp = &smaster->thp[cpu];
307 if (pnode == hpp->pnode)
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500308 return cpu;
cpw@sgi.com485f07d2011-06-21 07:21:29 -0500309 }
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500310 return -1;
Cliff Wickman18129242008-06-02 08:56:14 -0500311}
312
Cliff Wickmanb194b122008-06-12 08:23:48 -0500313/*
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500314 * Last resort when we get a large number of destination timeouts is
315 * to clear resources held by a given cpu.
316 * Do this with IPI so that all messages in the BAU message queue
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500317 * can be identified by their nonzero swack_vec field.
Cliff Wickmanb194b122008-06-12 08:23:48 -0500318 *
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500319 * This is entered for a single cpu on the uvhub.
320 * The sender want's this uvhub to free a specific message's
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500321 * swack resources.
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500322 */
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500323static void do_reset(void *ptr)
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500324{
325 int i;
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500326 struct bau_control *bcp = &per_cpu(bau_control, smp_processor_id());
327 struct reset_args *rap = (struct reset_args *)ptr;
328 struct bau_pq_entry *msg;
329 struct ptc_stats *stat = bcp->statp;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500330
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500331 stat->d_resets++;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500332 /*
333 * We're looking for the given sender, and
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500334 * will free its swack resource.
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500335 * If all cpu's finally responded after the timeout, its
336 * message 'replied_to' was set.
337 */
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500338 for (msg = bcp->queue_first, i = 0; i < DEST_Q_SIZE; msg++, i++) {
339 unsigned long msg_res;
340 /* do_reset: same conditions for cancellation as
341 bau_process_retry_msg() */
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500342 if ((msg->replied_to == 0) &&
343 (msg->canceled == 0) &&
344 (msg->sending_cpu == rap->sender) &&
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500345 (msg->swack_vec) &&
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500346 (msg->msg_type != MSG_NOOP)) {
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500347 unsigned long mmr;
348 unsigned long mr;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500349 /*
350 * make everyone else ignore this message
351 */
352 msg->canceled = 1;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500353 /*
354 * only reset the resource if it is still pending
355 */
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500356 mmr = read_mmr_sw_ack();
357 msg_res = msg->swack_vec;
358 mr = (msg_res << UV_SW_ACK_NPENDING) | msg_res;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500359 if (mmr & msg_res) {
360 stat->d_rcanceled++;
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500361 write_mmr_sw_ack(mr);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500362 }
363 }
364 }
365 return;
366}
367
368/*
369 * Use IPI to get all target uvhubs to release resources held by
370 * a given sending cpu number.
371 */
cpw@sgi.coma456eaa2011-06-21 07:21:30 -0500372static void reset_with_ipi(struct pnmask *distribution, struct bau_control *bcp)
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500373{
cpw@sgi.com485f07d2011-06-21 07:21:29 -0500374 int pnode;
375 int apnode;
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500376 int maskbits;
cpw@sgi.com485f07d2011-06-21 07:21:29 -0500377 int sender = bcp->cpu;
cpw@sgi.com442d3922011-06-21 07:21:31 -0500378 cpumask_t *mask = bcp->uvhub_master->cpumask;
cpw@sgi.com485f07d2011-06-21 07:21:29 -0500379 struct bau_control *smaster = bcp->socket_master;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500380 struct reset_args reset_args;
381
382 reset_args.sender = sender;
cpw@sgi.com442d3922011-06-21 07:21:31 -0500383 cpus_clear(*mask);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500384 /* find a single cpu for each uvhub in this distribution mask */
cpw@sgi.coma456eaa2011-06-21 07:21:30 -0500385 maskbits = sizeof(struct pnmask) * BITSPERBYTE;
cpw@sgi.com485f07d2011-06-21 07:21:29 -0500386 /* each bit is a pnode relative to the partition base pnode */
387 for (pnode = 0; pnode < maskbits; pnode++) {
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500388 int cpu;
cpw@sgi.com485f07d2011-06-21 07:21:29 -0500389 if (!bau_uvhub_isset(pnode, distribution))
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500390 continue;
cpw@sgi.com485f07d2011-06-21 07:21:29 -0500391 apnode = pnode + bcp->partition_base_pnode;
392 cpu = pnode_to_first_cpu(apnode, smaster);
cpw@sgi.com442d3922011-06-21 07:21:31 -0500393 cpu_set(cpu, *mask);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500394 }
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500395
396 /* IPI all cpus; preemption is already disabled */
cpw@sgi.com442d3922011-06-21 07:21:31 -0500397 smp_call_function_many(mask, do_reset, (void *)&reset_args, 1);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500398 return;
399}
400
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500401static inline unsigned long cycles_2_us(unsigned long long cyc)
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500402{
403 unsigned long long ns;
404 unsigned long us;
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500405 int cpu = smp_processor_id();
406
407 ns = (cyc * per_cpu(cyc2ns, cpu)) >> CYC2NS_SCALE_FACTOR;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500408 us = ns / 1000;
409 return us;
410}
411
412/*
413 * wait for all cpus on this hub to finish their sends and go quiet
414 * leaves uvhub_quiesce set so that no new broadcasts are started by
415 * bau_flush_send_and_wait()
416 */
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500417static inline void quiesce_local_uvhub(struct bau_control *hmaster)
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500418{
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500419 atom_asr(1, (struct atomic_short *)&hmaster->uvhub_quiesce);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500420}
421
422/*
423 * mark this quiet-requestor as done
424 */
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500425static inline void end_uvhub_quiesce(struct bau_control *hmaster)
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500426{
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500427 atom_asr(-1, (struct atomic_short *)&hmaster->uvhub_quiesce);
428}
429
430static unsigned long uv1_read_status(unsigned long mmr_offset, int right_shift)
431{
432 unsigned long descriptor_status;
433
434 descriptor_status = uv_read_local_mmr(mmr_offset);
435 descriptor_status >>= right_shift;
436 descriptor_status &= UV_ACT_STATUS_MASK;
437 return descriptor_status;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500438}
439
440/*
441 * Wait for completion of a broadcast software ack message
442 * return COMPLETE, RETRY(PLUGGED or TIMEOUT) or GIVEUP
Cliff Wickmanb194b122008-06-12 08:23:48 -0500443 */
Jack Steiner2a919592011-05-11 12:50:28 -0500444static int uv1_wait_completion(struct bau_desc *bau_desc,
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500445 unsigned long mmr_offset, int right_shift,
446 struct bau_control *bcp, long try)
Cliff Wickmanb194b122008-06-12 08:23:48 -0500447{
Cliff Wickmanb194b122008-06-12 08:23:48 -0500448 unsigned long descriptor_status;
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500449 cycles_t ttm;
Cliff Wickman712157a2010-06-02 16:22:02 -0500450 struct ptc_stats *stat = bcp->statp;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500451
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500452 descriptor_status = uv1_read_status(mmr_offset, right_shift);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500453 /* spin on the status MMR, waiting for it to go idle */
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500454 while ((descriptor_status != DS_IDLE)) {
Cliff Wickmanb194b122008-06-12 08:23:48 -0500455 /*
Jack Steiner2a919592011-05-11 12:50:28 -0500456 * Our software ack messages may be blocked because
457 * there are no swack resources available. As long
458 * as none of them has timed out hardware will NACK
459 * our message and its state will stay IDLE.
Cliff Wickmanb194b122008-06-12 08:23:48 -0500460 */
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500461 if (descriptor_status == DS_SOURCE_TIMEOUT) {
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500462 stat->s_stimeout++;
463 return FLUSH_GIVEUP;
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500464 } else if (descriptor_status == DS_DESTINATION_TIMEOUT) {
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500465 stat->s_dtimeout++;
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500466 ttm = get_cycles();
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500467
468 /*
469 * Our retries may be blocked by all destination
470 * swack resources being consumed, and a timeout
471 * pending. In that case hardware returns the
472 * ERROR that looks like a destination timeout.
473 */
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500474 if (cycles_2_us(ttm - bcp->send_message) < timeout_us) {
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500475 bcp->conseccompletes = 0;
476 return FLUSH_RETRY_PLUGGED;
477 }
478
479 bcp->conseccompletes = 0;
480 return FLUSH_RETRY_TIMEOUT;
481 } else {
482 /*
483 * descriptor_status is still BUSY
484 */
485 cpu_relax();
Cliff Wickmanb194b122008-06-12 08:23:48 -0500486 }
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500487 descriptor_status = uv1_read_status(mmr_offset, right_shift);
Cliff Wickmanb194b122008-06-12 08:23:48 -0500488 }
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500489 bcp->conseccompletes++;
Cliff Wickmanb194b122008-06-12 08:23:48 -0500490 return FLUSH_COMPLETE;
491}
492
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500493/*
494 * UV2 has an extra bit of status in the ACTIVATION_STATUS_2 register.
495 */
Cliff Wickmanc5d35d32012-01-16 15:19:47 -0600496static unsigned long uv2_read_status(unsigned long offset, int rshft, int desc)
Jack Steiner2a919592011-05-11 12:50:28 -0500497{
498 unsigned long descriptor_status;
499 unsigned long descriptor_status2;
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500500
501 descriptor_status = ((read_lmmr(offset) >> rshft) & UV_ACT_STATUS_MASK);
Cliff Wickmanc5d35d32012-01-16 15:19:47 -0600502 descriptor_status2 = (read_mmr_uv2_status() >> desc) & 0x1UL;
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500503 descriptor_status = (descriptor_status << 1) | descriptor_status2;
504 return descriptor_status;
505}
506
Cliff Wickmanc5d35d32012-01-16 15:19:47 -0600507/*
508 * Return whether the status of the descriptor that is normally used for this
509 * cpu (the one indexed by its hub-relative cpu number) is busy.
510 * The status of the original 32 descriptors is always reflected in the 64
511 * bits of UVH_LB_BAU_SB_ACTIVATION_STATUS_0.
512 * The bit provided by the activation_status_2 register is irrelevant to
513 * the status if it is only being tested for busy or not busy.
514 */
515int normal_busy(struct bau_control *bcp)
516{
517 int cpu = bcp->uvhub_cpu;
518 int mmr_offset;
519 int right_shift;
520
521 mmr_offset = UVH_LB_BAU_SB_ACTIVATION_STATUS_0;
522 right_shift = cpu * UV_ACT_STATUS_SIZE;
523 return (((((read_lmmr(mmr_offset) >> right_shift) &
524 UV_ACT_STATUS_MASK)) << 1) == UV2H_DESC_BUSY);
525}
526
527/*
528 * Entered when a bau descriptor has gone into a permanent busy wait because
529 * of a hardware bug.
530 * Workaround the bug.
531 */
532int handle_uv2_busy(struct bau_control *bcp)
533{
534 int busy_one = bcp->using_desc;
535 int normal = bcp->uvhub_cpu;
536 int selected = -1;
537 int i;
538 unsigned long descriptor_status;
539 unsigned long status;
540 int mmr_offset;
541 struct bau_desc *bau_desc_old;
542 struct bau_desc *bau_desc_new;
543 struct bau_control *hmaster = bcp->uvhub_master;
544 struct ptc_stats *stat = bcp->statp;
545 cycles_t ttm;
546
547 stat->s_uv2_wars++;
548 spin_lock(&hmaster->uvhub_lock);
549 /* try for the original first */
550 if (busy_one != normal) {
551 if (!normal_busy(bcp))
552 selected = normal;
553 }
554 if (selected < 0) {
555 /* can't use the normal, select an alternate */
556 mmr_offset = UVH_LB_BAU_SB_ACTIVATION_STATUS_1;
557 descriptor_status = read_lmmr(mmr_offset);
558
559 /* scan available descriptors 32-63 */
560 for (i = 0; i < UV_CPUS_PER_AS; i++) {
561 if ((hmaster->inuse_map & (1 << i)) == 0) {
562 status = ((descriptor_status >>
563 (i * UV_ACT_STATUS_SIZE)) &
564 UV_ACT_STATUS_MASK) << 1;
565 if (status != UV2H_DESC_BUSY) {
566 selected = i + UV_CPUS_PER_AS;
567 break;
568 }
569 }
570 }
571 }
572
573 if (busy_one != normal)
574 /* mark the busy alternate as not in-use */
575 hmaster->inuse_map &= ~(1 << (busy_one - UV_CPUS_PER_AS));
576
577 if (selected >= 0) {
578 /* switch to the selected descriptor */
579 if (selected != normal) {
580 /* set the selected alternate as in-use */
581 hmaster->inuse_map |=
582 (1 << (selected - UV_CPUS_PER_AS));
583 if (selected > stat->s_uv2_wars_hw)
584 stat->s_uv2_wars_hw = selected;
585 }
586 bau_desc_old = bcp->descriptor_base;
587 bau_desc_old += (ITEMS_PER_DESC * busy_one);
588 bcp->using_desc = selected;
589 bau_desc_new = bcp->descriptor_base;
590 bau_desc_new += (ITEMS_PER_DESC * selected);
591 *bau_desc_new = *bau_desc_old;
592 } else {
593 /*
594 * All are busy. Wait for the normal one for this cpu to
595 * free up.
596 */
597 stat->s_uv2_war_waits++;
598 spin_unlock(&hmaster->uvhub_lock);
599 ttm = get_cycles();
600 do {
601 cpu_relax();
602 } while (normal_busy(bcp));
603 spin_lock(&hmaster->uvhub_lock);
604 /* switch to the original descriptor */
605 bcp->using_desc = normal;
606 bau_desc_old = bcp->descriptor_base;
607 bau_desc_old += (ITEMS_PER_DESC * bcp->using_desc);
608 bcp->using_desc = (ITEMS_PER_DESC * normal);
609 bau_desc_new = bcp->descriptor_base;
610 bau_desc_new += (ITEMS_PER_DESC * normal);
611 *bau_desc_new = *bau_desc_old; /* copy the entire descriptor */
612 }
613 spin_unlock(&hmaster->uvhub_lock);
614 return FLUSH_RETRY_BUSYBUG;
615}
616
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500617static int uv2_wait_completion(struct bau_desc *bau_desc,
618 unsigned long mmr_offset, int right_shift,
619 struct bau_control *bcp, long try)
620{
621 unsigned long descriptor_stat;
622 cycles_t ttm;
Cliff Wickmanc5d35d32012-01-16 15:19:47 -0600623 int desc = bcp->using_desc;
624 long busy_reps = 0;
Jack Steiner2a919592011-05-11 12:50:28 -0500625 struct ptc_stats *stat = bcp->statp;
626
Cliff Wickmanc5d35d32012-01-16 15:19:47 -0600627 descriptor_stat = uv2_read_status(mmr_offset, right_shift, desc);
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500628
Jack Steiner2a919592011-05-11 12:50:28 -0500629 /* spin on the status MMR, waiting for it to go idle */
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500630 while (descriptor_stat != UV2H_DESC_IDLE) {
Jack Steiner2a919592011-05-11 12:50:28 -0500631 /*
632 * Our software ack messages may be blocked because
633 * there are no swack resources available. As long
634 * as none of them has timed out hardware will NACK
635 * our message and its state will stay IDLE.
636 */
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500637 if ((descriptor_stat == UV2H_DESC_SOURCE_TIMEOUT) ||
638 (descriptor_stat == UV2H_DESC_DEST_STRONG_NACK) ||
639 (descriptor_stat == UV2H_DESC_DEST_PUT_ERR)) {
Jack Steiner2a919592011-05-11 12:50:28 -0500640 stat->s_stimeout++;
641 return FLUSH_GIVEUP;
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500642 } else if (descriptor_stat == UV2H_DESC_DEST_TIMEOUT) {
Jack Steiner2a919592011-05-11 12:50:28 -0500643 stat->s_dtimeout++;
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500644 ttm = get_cycles();
Jack Steiner2a919592011-05-11 12:50:28 -0500645 bcp->conseccompletes = 0;
646 return FLUSH_RETRY_TIMEOUT;
647 } else {
Cliff Wickmanc5d35d32012-01-16 15:19:47 -0600648 busy_reps++;
649 if (busy_reps > 1000000) {
650 /* not to hammer on the clock */
651 busy_reps = 0;
652 ttm = get_cycles();
653 if ((ttm - bcp->send_message) >
654 (bcp->clocks_per_100_usec)) {
655 return handle_uv2_busy(bcp);
656 }
657 }
Jack Steiner2a919592011-05-11 12:50:28 -0500658 /*
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500659 * descriptor_stat is still BUSY
Jack Steiner2a919592011-05-11 12:50:28 -0500660 */
661 cpu_relax();
662 }
Cliff Wickmanc5d35d32012-01-16 15:19:47 -0600663 descriptor_stat = uv2_read_status(mmr_offset, right_shift,
664 desc);
Jack Steiner2a919592011-05-11 12:50:28 -0500665 }
666 bcp->conseccompletes++;
667 return FLUSH_COMPLETE;
668}
669
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500670/*
671 * There are 2 status registers; each and array[32] of 2 bits. Set up for
672 * which register to read and position in that register based on cpu in
673 * current hub.
674 */
675static int wait_completion(struct bau_desc *bau_desc,
676 struct bau_control *bcp, long try)
Jack Steiner2a919592011-05-11 12:50:28 -0500677{
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500678 int right_shift;
679 unsigned long mmr_offset;
Cliff Wickmanc5d35d32012-01-16 15:19:47 -0600680 int desc = bcp->using_desc;
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500681
Cliff Wickmanc5d35d32012-01-16 15:19:47 -0600682 if (desc < UV_CPUS_PER_AS) {
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500683 mmr_offset = UVH_LB_BAU_SB_ACTIVATION_STATUS_0;
Cliff Wickmanc5d35d32012-01-16 15:19:47 -0600684 right_shift = desc * UV_ACT_STATUS_SIZE;
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500685 } else {
686 mmr_offset = UVH_LB_BAU_SB_ACTIVATION_STATUS_1;
Cliff Wickmanc5d35d32012-01-16 15:19:47 -0600687 right_shift = ((desc - UV_CPUS_PER_AS) * UV_ACT_STATUS_SIZE);
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500688 }
689
Cliff Wickmanda87c932012-01-16 15:17:50 -0600690 if (bcp->uvhub_version == 1)
Jack Steiner2a919592011-05-11 12:50:28 -0500691 return uv1_wait_completion(bau_desc, mmr_offset, right_shift,
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500692 bcp, try);
Jack Steiner2a919592011-05-11 12:50:28 -0500693 else
694 return uv2_wait_completion(bau_desc, mmr_offset, right_shift,
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500695 bcp, try);
Jack Steiner2a919592011-05-11 12:50:28 -0500696}
697
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500698static inline cycles_t sec_2_cycles(unsigned long sec)
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500699{
700 unsigned long ns;
701 cycles_t cyc;
702
703 ns = sec * 1000000000;
704 cyc = (ns << CYC2NS_SCALE_FACTOR)/(per_cpu(cyc2ns, smp_processor_id()));
705 return cyc;
706}
707
708/*
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500709 * Our retries are blocked by all destination sw ack resources being
Cliff Wickmanf6d8a562010-06-02 16:22:02 -0500710 * in use, and a timeout is pending. In that case hardware immediately
711 * returns the ERROR that looks like a destination timeout.
712 */
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500713static void destination_plugged(struct bau_desc *bau_desc,
714 struct bau_control *bcp,
Cliff Wickmanf6d8a562010-06-02 16:22:02 -0500715 struct bau_control *hmaster, struct ptc_stats *stat)
716{
717 udelay(bcp->plugged_delay);
718 bcp->plugged_tries++;
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500719
Cliff Wickmanf6d8a562010-06-02 16:22:02 -0500720 if (bcp->plugged_tries >= bcp->plugsb4reset) {
721 bcp->plugged_tries = 0;
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500722
Cliff Wickmanf6d8a562010-06-02 16:22:02 -0500723 quiesce_local_uvhub(hmaster);
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500724
Cliff Wickmanf6d8a562010-06-02 16:22:02 -0500725 spin_lock(&hmaster->queue_lock);
cpw@sgi.com485f07d2011-06-21 07:21:29 -0500726 reset_with_ipi(&bau_desc->distribution, bcp);
Cliff Wickmanf6d8a562010-06-02 16:22:02 -0500727 spin_unlock(&hmaster->queue_lock);
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500728
Cliff Wickmanf6d8a562010-06-02 16:22:02 -0500729 end_uvhub_quiesce(hmaster);
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500730
Cliff Wickmanf6d8a562010-06-02 16:22:02 -0500731 bcp->ipi_attempts++;
732 stat->s_resets_plug++;
733 }
734}
735
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500736static void destination_timeout(struct bau_desc *bau_desc,
737 struct bau_control *bcp, struct bau_control *hmaster,
738 struct ptc_stats *stat)
Cliff Wickmanf6d8a562010-06-02 16:22:02 -0500739{
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500740 hmaster->max_concurr = 1;
Cliff Wickmanf6d8a562010-06-02 16:22:02 -0500741 bcp->timeout_tries++;
742 if (bcp->timeout_tries >= bcp->timeoutsb4reset) {
743 bcp->timeout_tries = 0;
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500744
Cliff Wickmanf6d8a562010-06-02 16:22:02 -0500745 quiesce_local_uvhub(hmaster);
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500746
Cliff Wickmanf6d8a562010-06-02 16:22:02 -0500747 spin_lock(&hmaster->queue_lock);
cpw@sgi.com485f07d2011-06-21 07:21:29 -0500748 reset_with_ipi(&bau_desc->distribution, bcp);
Cliff Wickmanf6d8a562010-06-02 16:22:02 -0500749 spin_unlock(&hmaster->queue_lock);
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500750
Cliff Wickmanf6d8a562010-06-02 16:22:02 -0500751 end_uvhub_quiesce(hmaster);
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500752
Cliff Wickmanf6d8a562010-06-02 16:22:02 -0500753 bcp->ipi_attempts++;
754 stat->s_resets_timeout++;
755 }
756}
757
758/*
Cliff Wickman50fb55a2010-06-02 16:22:02 -0500759 * Completions are taking a very long time due to a congested numalink
760 * network.
761 */
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500762static void disable_for_congestion(struct bau_control *bcp,
763 struct ptc_stats *stat)
Cliff Wickman50fb55a2010-06-02 16:22:02 -0500764{
Cliff Wickman50fb55a2010-06-02 16:22:02 -0500765 /* let only one cpu do this disabling */
766 spin_lock(&disable_lock);
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500767
Cliff Wickman50fb55a2010-06-02 16:22:02 -0500768 if (!baudisabled && bcp->period_requests &&
769 ((bcp->period_time / bcp->period_requests) > congested_cycles)) {
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500770 int tcpu;
771 struct bau_control *tbcp;
Cliff Wickman50fb55a2010-06-02 16:22:02 -0500772 /* it becomes this cpu's job to turn on the use of the
773 BAU again */
774 baudisabled = 1;
775 bcp->set_bau_off = 1;
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500776 bcp->set_bau_on_time = get_cycles();
777 bcp->set_bau_on_time += sec_2_cycles(bcp->cong_period);
Cliff Wickman50fb55a2010-06-02 16:22:02 -0500778 stat->s_bau_disabled++;
779 for_each_present_cpu(tcpu) {
780 tbcp = &per_cpu(bau_control, tcpu);
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500781 tbcp->baudisabled = 1;
Cliff Wickman50fb55a2010-06-02 16:22:02 -0500782 }
783 }
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500784
Cliff Wickman50fb55a2010-06-02 16:22:02 -0500785 spin_unlock(&disable_lock);
786}
787
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500788static void count_max_concurr(int stat, struct bau_control *bcp,
789 struct bau_control *hmaster)
790{
791 bcp->plugged_tries = 0;
792 bcp->timeout_tries = 0;
793 if (stat != FLUSH_COMPLETE)
794 return;
795 if (bcp->conseccompletes <= bcp->complete_threshold)
796 return;
797 if (hmaster->max_concurr >= hmaster->max_concurr_const)
798 return;
799 hmaster->max_concurr++;
800}
801
802static void record_send_stats(cycles_t time1, cycles_t time2,
803 struct bau_control *bcp, struct ptc_stats *stat,
804 int completion_status, int try)
805{
806 cycles_t elapsed;
807
808 if (time2 > time1) {
809 elapsed = time2 - time1;
810 stat->s_time += elapsed;
811
812 if ((completion_status == FLUSH_COMPLETE) && (try == 1)) {
813 bcp->period_requests++;
814 bcp->period_time += elapsed;
815 if ((elapsed > congested_cycles) &&
816 (bcp->period_requests > bcp->cong_reps))
817 disable_for_congestion(bcp, stat);
818 }
819 } else
820 stat->s_requestor--;
821
822 if (completion_status == FLUSH_COMPLETE && try > 1)
823 stat->s_retriesok++;
824 else if (completion_status == FLUSH_GIVEUP)
825 stat->s_giveup++;
826}
827
828/*
829 * Because of a uv1 hardware bug only a limited number of concurrent
830 * requests can be made.
831 */
832static void uv1_throttle(struct bau_control *hmaster, struct ptc_stats *stat)
833{
834 spinlock_t *lock = &hmaster->uvhub_lock;
835 atomic_t *v;
836
837 v = &hmaster->active_descriptor_count;
838 if (!atomic_inc_unless_ge(lock, v, hmaster->max_concurr)) {
839 stat->s_throttles++;
840 do {
841 cpu_relax();
842 } while (!atomic_inc_unless_ge(lock, v, hmaster->max_concurr));
843 }
844}
845
846/*
847 * Handle the completion status of a message send.
848 */
849static void handle_cmplt(int completion_status, struct bau_desc *bau_desc,
850 struct bau_control *bcp, struct bau_control *hmaster,
851 struct ptc_stats *stat)
852{
853 if (completion_status == FLUSH_RETRY_PLUGGED)
854 destination_plugged(bau_desc, bcp, hmaster, stat);
855 else if (completion_status == FLUSH_RETRY_TIMEOUT)
856 destination_timeout(bau_desc, bcp, hmaster, stat);
857}
858
859/*
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500860 * Send a broadcast and wait for it to complete.
Cliff Wickmanb194b122008-06-12 08:23:48 -0500861 *
Cliff Wickmanf6d8a562010-06-02 16:22:02 -0500862 * The flush_mask contains the cpus the broadcast is to be sent to including
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500863 * cpus that are on the local uvhub.
Cliff Wickmanb194b122008-06-12 08:23:48 -0500864 *
Cliff Wickman450a0072010-06-02 16:22:02 -0500865 * Returns 0 if all flushing represented in the mask was done.
866 * Returns 1 if it gives up entirely and the original cpu mask is to be
867 * returned to the kernel.
Cliff Wickmanb194b122008-06-12 08:23:48 -0500868 */
Cliff Wickmanc5d35d32012-01-16 15:19:47 -0600869int uv_flush_send_and_wait(struct cpumask *flush_mask, struct bau_control *bcp)
Cliff Wickmanb194b122008-06-12 08:23:48 -0500870{
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500871 int seq_number = 0;
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500872 int completion_stat = 0;
Cliff Wickmanda87c932012-01-16 15:17:50 -0600873 int uv1 = 0;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500874 long try = 0;
Ingo Molnarb4c286e2008-06-18 14:28:19 +0200875 unsigned long index;
Cliff Wickmanb194b122008-06-12 08:23:48 -0500876 cycles_t time1;
877 cycles_t time2;
Cliff Wickman712157a2010-06-02 16:22:02 -0500878 struct ptc_stats *stat = bcp->statp;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500879 struct bau_control *hmaster = bcp->uvhub_master;
Cliff Wickmanda87c932012-01-16 15:17:50 -0600880 struct uv1_bau_msg_header *uv1_hdr = NULL;
881 struct uv2_bau_msg_header *uv2_hdr = NULL;
Cliff Wickmanc5d35d32012-01-16 15:19:47 -0600882 struct bau_desc *bau_desc;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500883
Cliff Wickmanc5d35d32012-01-16 15:19:47 -0600884 if (bcp->uvhub_version == 1)
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500885 uv1_throttle(hmaster, stat);
886
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500887 while (hmaster->uvhub_quiesce)
888 cpu_relax();
Cliff Wickmanb194b122008-06-12 08:23:48 -0500889
Cliff Wickmanb194b122008-06-12 08:23:48 -0500890 time1 = get_cycles();
891 do {
Cliff Wickmanc5d35d32012-01-16 15:19:47 -0600892 bau_desc = bcp->descriptor_base;
893 bau_desc += (ITEMS_PER_DESC * bcp->using_desc);
894 if (bcp->uvhub_version == 1) {
895 uv1 = 1;
896 uv1_hdr = &bau_desc->header.uv1_hdr;
897 } else
898 uv2_hdr = &bau_desc->header.uv2_hdr;
899 if ((try == 0) || (completion_stat == FLUSH_RETRY_BUSYBUG)) {
Cliff Wickmanda87c932012-01-16 15:17:50 -0600900 if (uv1)
901 uv1_hdr->msg_type = MSG_REGULAR;
902 else
903 uv2_hdr->msg_type = MSG_REGULAR;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500904 seq_number = bcp->message_number++;
905 } else {
Cliff Wickmanda87c932012-01-16 15:17:50 -0600906 if (uv1)
907 uv1_hdr->msg_type = MSG_RETRY;
908 else
909 uv2_hdr->msg_type = MSG_RETRY;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500910 stat->s_retry_messages++;
911 }
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500912
Cliff Wickmanda87c932012-01-16 15:17:50 -0600913 if (uv1)
914 uv1_hdr->sequence = seq_number;
915 else
916 uv2_hdr->sequence = seq_number;
Cliff Wickmanc5d35d32012-01-16 15:19:47 -0600917 index = (1UL << AS_PUSH_SHIFT) | bcp->using_desc;
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500918 bcp->send_message = get_cycles();
919
920 write_mmr_activation(index);
921
922 try++;
923 completion_stat = wait_completion(bau_desc, bcp, try);
Cliff Wickmanc5d35d32012-01-16 15:19:47 -0600924 /* UV2: wait_completion() may change the bcp->using_desc */
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500925
926 handle_cmplt(completion_stat, bau_desc, bcp, hmaster, stat);
927
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -0500928 if (bcp->ipi_attempts >= bcp->ipi_reset_limit) {
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500929 bcp->ipi_attempts = 0;
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500930 completion_stat = FLUSH_GIVEUP;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500931 break;
932 }
933 cpu_relax();
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500934 } while ((completion_stat == FLUSH_RETRY_PLUGGED) ||
Cliff Wickmanc5d35d32012-01-16 15:19:47 -0600935 (completion_stat == FLUSH_RETRY_BUSYBUG) ||
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500936 (completion_stat == FLUSH_RETRY_TIMEOUT));
937
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500938 time2 = get_cycles();
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500939
940 count_max_concurr(completion_stat, bcp, hmaster);
941
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500942 while (hmaster->uvhub_quiesce)
943 cpu_relax();
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500944
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500945 atomic_dec(&hmaster->active_descriptor_count);
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500946
947 record_send_stats(time1, time2, bcp, stat, completion_stat, try);
948
949 if (completion_stat == FLUSH_GIVEUP)
Cliff Wickmanc5d35d32012-01-16 15:19:47 -0600950 /* FLUSH_GIVEUP will fall back to using IPI's for tlb flush */
Cliff Wickman450a0072010-06-02 16:22:02 -0500951 return 1;
Cliff Wickman450a0072010-06-02 16:22:02 -0500952 return 0;
Cliff Wickmanb194b122008-06-12 08:23:48 -0500953}
954
Cliff Wickmanf073cc82011-05-24 13:07:36 -0500955/*
956 * The BAU is disabled. When the disabled time period has expired, the cpu
957 * that disabled it must re-enable it.
958 * Return 0 if it is re-enabled for all cpus.
959 */
960static int check_enable(struct bau_control *bcp, struct ptc_stats *stat)
961{
962 int tcpu;
963 struct bau_control *tbcp;
964
965 if (bcp->set_bau_off) {
966 if (get_cycles() >= bcp->set_bau_on_time) {
967 stat->s_bau_reenabled++;
968 baudisabled = 0;
969 for_each_present_cpu(tcpu) {
970 tbcp = &per_cpu(bau_control, tcpu);
971 tbcp->baudisabled = 0;
972 tbcp->period_requests = 0;
973 tbcp->period_time = 0;
974 }
975 return 0;
976 }
977 }
978 return -1;
979}
980
981static void record_send_statistics(struct ptc_stats *stat, int locals, int hubs,
982 int remotes, struct bau_desc *bau_desc)
983{
984 stat->s_requestor++;
985 stat->s_ntargcpu += remotes + locals;
986 stat->s_ntargremotes += remotes;
987 stat->s_ntarglocals += locals;
988
989 /* uvhub statistics */
990 hubs = bau_uvhub_weight(&bau_desc->distribution);
991 if (locals) {
992 stat->s_ntarglocaluvhub++;
993 stat->s_ntargremoteuvhub += (hubs - 1);
994 } else
995 stat->s_ntargremoteuvhub += hubs;
996
997 stat->s_ntarguvhub += hubs;
998
999 if (hubs >= 16)
1000 stat->s_ntarguvhub16++;
1001 else if (hubs >= 8)
1002 stat->s_ntarguvhub8++;
1003 else if (hubs >= 4)
1004 stat->s_ntarguvhub4++;
1005 else if (hubs >= 2)
1006 stat->s_ntarguvhub2++;
1007 else
1008 stat->s_ntarguvhub1++;
1009}
1010
1011/*
1012 * Translate a cpu mask to the uvhub distribution mask in the BAU
1013 * activation descriptor.
1014 */
1015static int set_distrib_bits(struct cpumask *flush_mask, struct bau_control *bcp,
1016 struct bau_desc *bau_desc, int *localsp, int *remotesp)
1017{
1018 int cpu;
1019 int pnode;
1020 int cnt = 0;
1021 struct hub_and_pnode *hpp;
1022
1023 for_each_cpu(cpu, flush_mask) {
1024 /*
1025 * The distribution vector is a bit map of pnodes, relative
1026 * to the partition base pnode (and the partition base nasid
1027 * in the header).
1028 * Translate cpu to pnode and hub using a local memory array.
1029 */
1030 hpp = &bcp->socket_master->thp[cpu];
1031 pnode = hpp->pnode - bcp->partition_base_pnode;
1032 bau_uvhub_set(pnode, &bau_desc->distribution);
1033 cnt++;
1034 if (hpp->uvhub == bcp->uvhub)
1035 (*localsp)++;
1036 else
1037 (*remotesp)++;
1038 }
1039 if (!cnt)
1040 return 1;
1041 return 0;
1042}
1043
1044/*
1045 * globally purge translation cache of a virtual address or all TLB's
Tejun Heobdbcdd42009-01-21 17:26:06 +09001046 * @cpumask: mask of all cpu's in which the address is to be removed
Cliff Wickman18129242008-06-02 08:56:14 -05001047 * @mm: mm_struct containing virtual address range
1048 * @va: virtual address to be removed (or TLB_FLUSH_ALL for all TLB's on cpu)
Tejun Heobdbcdd42009-01-21 17:26:06 +09001049 * @cpu: the current cpu
Cliff Wickman18129242008-06-02 08:56:14 -05001050 *
1051 * This is the entry point for initiating any UV global TLB shootdown.
1052 *
1053 * Purges the translation caches of all specified processors of the given
1054 * virtual address, or purges all TLB's on specified processors.
1055 *
Tejun Heobdbcdd42009-01-21 17:26:06 +09001056 * The caller has derived the cpumask from the mm_struct. This function
1057 * is called only if there are bits set in the mask. (e.g. flush_tlb_page())
Cliff Wickman18129242008-06-02 08:56:14 -05001058 *
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001059 * The cpumask is converted into a uvhubmask of the uvhubs containing
1060 * those cpus.
Cliff Wickmanb194b122008-06-12 08:23:48 -05001061 *
Tejun Heobdbcdd42009-01-21 17:26:06 +09001062 * Note that this function should be called with preemption disabled.
1063 *
1064 * Returns NULL if all remote flushing was done.
1065 * Returns pointer to cpumask if some remote flushing remains to be
1066 * done. The returned pointer is valid till preemption is re-enabled.
Cliff Wickman18129242008-06-02 08:56:14 -05001067 */
Tejun Heobdbcdd42009-01-21 17:26:06 +09001068const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001069 struct mm_struct *mm, unsigned long va,
1070 unsigned int cpu)
Cliff Wickman18129242008-06-02 08:56:14 -05001071{
Cliff Wickmanb194b122008-06-12 08:23:48 -05001072 int locals = 0;
Cliff Wickman450a0072010-06-02 16:22:02 -05001073 int remotes = 0;
1074 int hubs = 0;
Ingo Molnardc163a42008-06-18 14:15:43 +02001075 struct bau_desc *bau_desc;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001076 struct cpumask *flush_mask;
1077 struct ptc_stats *stat;
1078 struct bau_control *bcp;
Cliff Wickman18129242008-06-02 08:56:14 -05001079
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001080 /* kernel was booted 'nobau' */
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001081 if (nobau)
1082 return cpumask;
1083
1084 bcp = &per_cpu(bau_control, cpu);
Cliff Wickman712157a2010-06-02 16:22:02 -05001085 stat = bcp->statp;
Cliff Wickman50fb55a2010-06-02 16:22:02 -05001086
1087 /* bau was disabled due to slow response */
1088 if (bcp->baudisabled) {
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001089 if (check_enable(bcp, stat))
1090 return cpumask;
Cliff Wickman50fb55a2010-06-02 16:22:02 -05001091 }
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001092
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001093 /*
1094 * Each sending cpu has a per-cpu mask which it fills from the caller's
Cliff Wickman450a0072010-06-02 16:22:02 -05001095 * cpu mask. All cpus are converted to uvhubs and copied to the
1096 * activation descriptor.
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001097 */
1098 flush_mask = (struct cpumask *)per_cpu(uv_flush_tlb_mask, cpu);
Cliff Wickman450a0072010-06-02 16:22:02 -05001099 /* don't actually do a shootdown of the local cpu */
Tejun Heobdbcdd42009-01-21 17:26:06 +09001100 cpumask_andnot(flush_mask, cpumask, cpumask_of(cpu));
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001101
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001102 if (cpu_isset(cpu, *cpumask))
Cliff Wickman450a0072010-06-02 16:22:02 -05001103 stat->s_ntargself++;
Tejun Heobdbcdd42009-01-21 17:26:06 +09001104
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001105 bau_desc = bcp->descriptor_base;
Cliff Wickmanc5d35d32012-01-16 15:19:47 -06001106 bau_desc += (ITEMS_PER_DESC * bcp->using_desc);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001107 bau_uvhubs_clear(&bau_desc->distribution, UV_DISTRIBUTION_SIZE);
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001108 if (set_distrib_bits(flush_mask, bcp, bau_desc, &locals, &remotes))
Cliff Wickman450a0072010-06-02 16:22:02 -05001109 return NULL;
Cliff Wickman450a0072010-06-02 16:22:02 -05001110
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001111 record_send_statistics(stat, locals, hubs, remotes, bau_desc);
Cliff Wickman18129242008-06-02 08:56:14 -05001112
1113 bau_desc->payload.address = va;
Tejun Heobdbcdd42009-01-21 17:26:06 +09001114 bau_desc->payload.sending_cpu = cpu;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001115 /*
Cliff Wickman450a0072010-06-02 16:22:02 -05001116 * uv_flush_send_and_wait returns 0 if all cpu's were messaged,
1117 * or 1 if it gave up and the original cpumask should be returned.
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001118 */
Cliff Wickmanc5d35d32012-01-16 15:19:47 -06001119 if (!uv_flush_send_and_wait(flush_mask, bcp))
Cliff Wickman450a0072010-06-02 16:22:02 -05001120 return NULL;
1121 else
1122 return cpumask;
Cliff Wickman18129242008-06-02 08:56:14 -05001123}
1124
1125/*
Cliff Wickmanc5d35d32012-01-16 15:19:47 -06001126 * Search the message queue for any 'other' message with the same software
1127 * acknowledge resource bit vector.
1128 */
1129struct bau_pq_entry *find_another_by_swack(struct bau_pq_entry *msg,
1130 struct bau_control *bcp, unsigned char swack_vec)
1131{
1132 struct bau_pq_entry *msg_next = msg + 1;
1133
1134 if (msg_next > bcp->queue_last)
1135 msg_next = bcp->queue_first;
1136 while ((msg_next->swack_vec != 0) && (msg_next != msg)) {
1137 if (msg_next->swack_vec == swack_vec)
1138 return msg_next;
1139 msg_next++;
1140 if (msg_next > bcp->queue_last)
1141 msg_next = bcp->queue_first;
1142 }
1143 return NULL;
1144}
1145
1146/*
1147 * UV2 needs to work around a bug in which an arriving message has not
1148 * set a bit in the UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE register.
1149 * Such a message must be ignored.
1150 */
1151void process_uv2_message(struct msg_desc *mdp, struct bau_control *bcp)
1152{
1153 unsigned long mmr_image;
1154 unsigned char swack_vec;
1155 struct bau_pq_entry *msg = mdp->msg;
1156 struct bau_pq_entry *other_msg;
1157
1158 mmr_image = read_mmr_sw_ack();
1159 swack_vec = msg->swack_vec;
1160
1161 if ((swack_vec & mmr_image) == 0) {
1162 /*
1163 * This message was assigned a swack resource, but no
1164 * reserved acknowlegment is pending.
1165 * The bug has prevented this message from setting the MMR.
1166 * And no other message has used the same sw_ack resource.
1167 * Do the requested shootdown but do not reply to the msg.
1168 * (the 0 means make no acknowledge)
1169 */
1170 bau_process_message(mdp, bcp, 0);
1171 return;
1172 }
1173
1174 /*
1175 * Some message has set the MMR 'pending' bit; it might have been
1176 * another message. Look for that message.
1177 */
1178 other_msg = find_another_by_swack(msg, bcp, msg->swack_vec);
1179 if (other_msg) {
1180 /* There is another. Do not ack the current one. */
1181 bau_process_message(mdp, bcp, 0);
1182 /*
1183 * Let the natural processing of that message acknowledge
1184 * it. Don't get the processing of sw_ack's out of order.
1185 */
1186 return;
1187 }
1188
1189 /*
1190 * There is no other message using this sw_ack, so it is safe to
1191 * acknowledge it.
1192 */
1193 bau_process_message(mdp, bcp, 1);
1194
1195 return;
1196}
1197
1198/*
Cliff Wickman18129242008-06-02 08:56:14 -05001199 * The BAU message interrupt comes here. (registered by set_intr_gate)
1200 * See entry_64.S
1201 *
1202 * We received a broadcast assist message.
1203 *
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001204 * Interrupts are disabled; this interrupt could represent
Cliff Wickman18129242008-06-02 08:56:14 -05001205 * the receipt of several messages.
1206 *
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001207 * All cores/threads on this hub get this interrupt.
1208 * The last one to see it does the software ack.
Cliff Wickman18129242008-06-02 08:56:14 -05001209 * (the resource will not be freed until noninterruptable cpus see this
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001210 * interrupt; hardware may timeout the s/w ack and reply ERROR)
Cliff Wickman18129242008-06-02 08:56:14 -05001211 */
Cliff Wickmanb194b122008-06-12 08:23:48 -05001212void uv_bau_message_interrupt(struct pt_regs *regs)
Cliff Wickman18129242008-06-02 08:56:14 -05001213{
Cliff Wickman18129242008-06-02 08:56:14 -05001214 int count = 0;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001215 cycles_t time_start;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001216 struct bau_pq_entry *msg;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001217 struct bau_control *bcp;
1218 struct ptc_stats *stat;
1219 struct msg_desc msgdesc;
Cliff Wickman18129242008-06-02 08:56:14 -05001220
Cliff Wickman88ed9dd2012-01-16 15:21:46 -06001221 ack_APIC_irq();
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001222 time_start = get_cycles();
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001223
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001224 bcp = &per_cpu(bau_control, smp_processor_id());
Cliff Wickman712157a2010-06-02 16:22:02 -05001225 stat = bcp->statp;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001226
1227 msgdesc.queue_first = bcp->queue_first;
1228 msgdesc.queue_last = bcp->queue_last;
1229
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001230 msg = bcp->bau_msg_head;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001231 while (msg->swack_vec) {
Cliff Wickman18129242008-06-02 08:56:14 -05001232 count++;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001233
1234 msgdesc.msg_slot = msg - msgdesc.queue_first;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001235 msgdesc.msg = msg;
Cliff Wickmanc5d35d32012-01-16 15:19:47 -06001236 if (bcp->uvhub_version == 2)
1237 process_uv2_message(&msgdesc, bcp);
1238 else
1239 bau_process_message(&msgdesc, bcp, 1);
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001240
Cliff Wickman18129242008-06-02 08:56:14 -05001241 msg++;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001242 if (msg > msgdesc.queue_last)
1243 msg = msgdesc.queue_first;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001244 bcp->bau_msg_head = msg;
Cliff Wickman18129242008-06-02 08:56:14 -05001245 }
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001246 stat->d_time += (get_cycles() - time_start);
Cliff Wickman18129242008-06-02 08:56:14 -05001247 if (!count)
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001248 stat->d_nomsg++;
Cliff Wickman18129242008-06-02 08:56:14 -05001249 else if (count > 1)
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001250 stat->d_multmsg++;
Cliff Wickman18129242008-06-02 08:56:14 -05001251}
1252
Cliff Wickmanc4c46882009-04-03 08:34:32 -05001253/*
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001254 * Each target uvhub (i.e. a uvhub that has cpu's) needs to have
Cliff Wickmanc4c46882009-04-03 08:34:32 -05001255 * shootdown message timeouts enabled. The timeout does not cause
1256 * an interrupt, but causes an error message to be returned to
1257 * the sender.
1258 */
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001259static void __init enable_timeouts(void)
Cliff Wickman18129242008-06-02 08:56:14 -05001260{
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001261 int uvhub;
1262 int nuvhubs;
Cliff Wickman18129242008-06-02 08:56:14 -05001263 int pnode;
Cliff Wickmanc4c46882009-04-03 08:34:32 -05001264 unsigned long mmr_image;
Cliff Wickman18129242008-06-02 08:56:14 -05001265
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001266 nuvhubs = uv_num_possible_blades();
Cliff Wickmanc4c46882009-04-03 08:34:32 -05001267
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001268 for (uvhub = 0; uvhub < nuvhubs; uvhub++) {
1269 if (!uv_blade_nr_possible_cpus(uvhub))
Cliff Wickman18129242008-06-02 08:56:14 -05001270 continue;
Cliff Wickmanc4c46882009-04-03 08:34:32 -05001271
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001272 pnode = uv_blade_to_pnode(uvhub);
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001273 mmr_image = read_mmr_misc_control(pnode);
Cliff Wickmanc4c46882009-04-03 08:34:32 -05001274 /*
1275 * Set the timeout period and then lock it in, in three
1276 * steps; captures and locks in the period.
1277 *
1278 * To program the period, the SOFT_ACK_MODE must be off.
1279 */
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001280 mmr_image &= ~(1L << SOFTACK_MSHIFT);
1281 write_mmr_misc_control(pnode, mmr_image);
Cliff Wickmanc4c46882009-04-03 08:34:32 -05001282 /*
1283 * Set the 4-bit period.
1284 */
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001285 mmr_image &= ~((unsigned long)0xf << SOFTACK_PSHIFT);
1286 mmr_image |= (SOFTACK_TIMEOUT_PERIOD << SOFTACK_PSHIFT);
1287 write_mmr_misc_control(pnode, mmr_image);
Cliff Wickmanc4c46882009-04-03 08:34:32 -05001288 /*
Jack Steiner2a919592011-05-11 12:50:28 -05001289 * UV1:
Cliff Wickmanc4c46882009-04-03 08:34:32 -05001290 * Subsequent reversals of the timebase bit (3) cause an
1291 * immediate timeout of one or all INTD resources as
1292 * indicated in bits 2:0 (7 causes all of them to timeout).
1293 */
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001294 mmr_image |= (1L << SOFTACK_MSHIFT);
Jack Steiner2a919592011-05-11 12:50:28 -05001295 if (is_uv2_hub()) {
Cliff Wickmanda87c932012-01-16 15:17:50 -06001296 mmr_image &= ~(1L << UV2_LEG_SHFT);
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001297 mmr_image |= (1L << UV2_EXT_SHFT);
Jack Steiner2a919592011-05-11 12:50:28 -05001298 }
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001299 write_mmr_misc_control(pnode, mmr_image);
Cliff Wickman18129242008-06-02 08:56:14 -05001300 }
Cliff Wickman18129242008-06-02 08:56:14 -05001301}
1302
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001303static void *ptc_seq_start(struct seq_file *file, loff_t *offset)
Cliff Wickman18129242008-06-02 08:56:14 -05001304{
1305 if (*offset < num_possible_cpus())
1306 return offset;
1307 return NULL;
1308}
1309
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001310static void *ptc_seq_next(struct seq_file *file, void *data, loff_t *offset)
Cliff Wickman18129242008-06-02 08:56:14 -05001311{
1312 (*offset)++;
1313 if (*offset < num_possible_cpus())
1314 return offset;
1315 return NULL;
1316}
1317
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001318static void ptc_seq_stop(struct seq_file *file, void *data)
Cliff Wickman18129242008-06-02 08:56:14 -05001319{
1320}
1321
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001322static inline unsigned long long usec_2_cycles(unsigned long microsec)
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001323{
1324 unsigned long ns;
1325 unsigned long long cyc;
1326
Cliff Wickman12a66112010-06-02 16:22:01 -05001327 ns = microsec * 1000;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001328 cyc = (ns << CYC2NS_SCALE_FACTOR)/(per_cpu(cyc2ns, smp_processor_id()));
1329 return cyc;
1330}
1331
Cliff Wickman18129242008-06-02 08:56:14 -05001332/*
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001333 * Display the statistics thru /proc/sgi_uv/ptc_statistics
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001334 * 'data' points to the cpu number
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001335 * Note: see the descriptions in stat_description[].
Cliff Wickman18129242008-06-02 08:56:14 -05001336 */
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001337static int ptc_seq_show(struct seq_file *file, void *data)
Cliff Wickman18129242008-06-02 08:56:14 -05001338{
1339 struct ptc_stats *stat;
1340 int cpu;
1341
1342 cpu = *(loff_t *)data;
Cliff Wickman18129242008-06-02 08:56:14 -05001343 if (!cpu) {
1344 seq_printf(file,
Cliff Wickman450a0072010-06-02 16:22:02 -05001345 "# cpu sent stime self locals remotes ncpus localhub ");
Cliff Wickman18129242008-06-02 08:56:14 -05001346 seq_printf(file,
Cliff Wickman450a0072010-06-02 16:22:02 -05001347 "remotehub numuvhubs numuvhubs16 numuvhubs8 ");
1348 seq_printf(file,
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001349 "numuvhubs4 numuvhubs2 numuvhubs1 dto retries rok ");
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001350 seq_printf(file,
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001351 "resetp resett giveup sto bz throt swack recv rtime ");
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001352 seq_printf(file,
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001353 "all one mult none retry canc nocan reset rcan ");
Cliff Wickman50fb55a2010-06-02 16:22:02 -05001354 seq_printf(file,
Cliff Wickmanc5d35d32012-01-16 15:19:47 -06001355 "disable enable wars warshw warwaits\n");
Cliff Wickman18129242008-06-02 08:56:14 -05001356 }
1357 if (cpu < num_possible_cpus() && cpu_online(cpu)) {
1358 stat = &per_cpu(ptcstats, cpu);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001359 /* source side statistics */
1360 seq_printf(file,
1361 "cpu %d %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld ",
1362 cpu, stat->s_requestor, cycles_2_us(stat->s_time),
Cliff Wickman450a0072010-06-02 16:22:02 -05001363 stat->s_ntargself, stat->s_ntarglocals,
1364 stat->s_ntargremotes, stat->s_ntargcpu,
1365 stat->s_ntarglocaluvhub, stat->s_ntargremoteuvhub,
1366 stat->s_ntarguvhub, stat->s_ntarguvhub16);
1367 seq_printf(file, "%ld %ld %ld %ld %ld ",
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001368 stat->s_ntarguvhub8, stat->s_ntarguvhub4,
1369 stat->s_ntarguvhub2, stat->s_ntarguvhub1,
Cliff Wickman450a0072010-06-02 16:22:02 -05001370 stat->s_dtimeout);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001371 seq_printf(file, "%ld %ld %ld %ld %ld %ld %ld %ld ",
1372 stat->s_retry_messages, stat->s_retriesok,
1373 stat->s_resets_plug, stat->s_resets_timeout,
1374 stat->s_giveup, stat->s_stimeout,
1375 stat->s_busy, stat->s_throttles);
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001376
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001377 /* destination side statistics */
1378 seq_printf(file,
Cliff Wickman50fb55a2010-06-02 16:22:02 -05001379 "%lx %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld ",
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001380 read_gmmr_sw_ack(uv_cpu_to_pnode(cpu)),
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001381 stat->d_requestee, cycles_2_us(stat->d_time),
1382 stat->d_alltlb, stat->d_onetlb, stat->d_multmsg,
1383 stat->d_nomsg, stat->d_retries, stat->d_canceled,
1384 stat->d_nocanceled, stat->d_resets,
1385 stat->d_rcanceled);
Cliff Wickmanc5d35d32012-01-16 15:19:47 -06001386 seq_printf(file, "%ld %ld %ld %ld %ld\n",
1387 stat->s_bau_disabled, stat->s_bau_reenabled,
1388 stat->s_uv2_wars, stat->s_uv2_wars_hw,
1389 stat->s_uv2_war_waits);
Cliff Wickman18129242008-06-02 08:56:14 -05001390 }
Cliff Wickman18129242008-06-02 08:56:14 -05001391 return 0;
1392}
1393
1394/*
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001395 * Display the tunables thru debugfs
1396 */
1397static ssize_t tunables_read(struct file *file, char __user *userbuf,
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001398 size_t count, loff_t *ppos)
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001399{
Dan Carpenterb365a852010-09-29 10:41:05 +02001400 char *buf;
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001401 int ret;
1402
Dan Carpenterb365a852010-09-29 10:41:05 +02001403 buf = kasprintf(GFP_KERNEL, "%s %s %s\n%d %d %d %d %d %d %d %d %d\n",
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001404 "max_concur plugged_delay plugsb4reset",
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001405 "timeoutsb4reset ipi_reset_limit complete_threshold",
1406 "congested_response_us congested_reps congested_period",
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001407 max_concurr, plugged_delay, plugsb4reset,
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001408 timeoutsb4reset, ipi_reset_limit, complete_threshold,
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001409 congested_respns_us, congested_reps, congested_period);
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001410
Dan Carpenterb365a852010-09-29 10:41:05 +02001411 if (!buf)
1412 return -ENOMEM;
1413
1414 ret = simple_read_from_buffer(userbuf, count, ppos, buf, strlen(buf));
1415 kfree(buf);
1416 return ret;
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001417}
1418
1419/*
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001420 * handle a write to /proc/sgi_uv/ptc_statistics
1421 * -1: reset the statistics
Cliff Wickman18129242008-06-02 08:56:14 -05001422 * 0: display meaning of the statistics
Cliff Wickman18129242008-06-02 08:56:14 -05001423 */
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001424static ssize_t ptc_proc_write(struct file *file, const char __user *user,
1425 size_t count, loff_t *data)
Cliff Wickman18129242008-06-02 08:56:14 -05001426{
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001427 int cpu;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001428 int i;
1429 int elements;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001430 long input_arg;
Cliff Wickman18129242008-06-02 08:56:14 -05001431 char optstr[64];
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001432 struct ptc_stats *stat;
Cliff Wickman18129242008-06-02 08:56:14 -05001433
Cliff Wickmane7eb8722008-06-23 08:32:25 -05001434 if (count == 0 || count > sizeof(optstr))
Cliff Wickmancef53272008-06-19 11:16:24 -05001435 return -EINVAL;
Cliff Wickman18129242008-06-02 08:56:14 -05001436 if (copy_from_user(optstr, user, count))
1437 return -EFAULT;
1438 optstr[count - 1] = '\0';
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001439
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001440 if (strict_strtol(optstr, 10, &input_arg) < 0) {
Cliff Wickman18129242008-06-02 08:56:14 -05001441 printk(KERN_DEBUG "%s is invalid\n", optstr);
1442 return -EINVAL;
1443 }
1444
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001445 if (input_arg == 0) {
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001446 elements = sizeof(stat_description)/sizeof(*stat_description);
Cliff Wickman18129242008-06-02 08:56:14 -05001447 printk(KERN_DEBUG "# cpu: cpu number\n");
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001448 printk(KERN_DEBUG "Sender statistics:\n");
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001449 for (i = 0; i < elements; i++)
1450 printk(KERN_DEBUG "%s\n", stat_description[i]);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001451 } else if (input_arg == -1) {
1452 for_each_present_cpu(cpu) {
1453 stat = &per_cpu(ptcstats, cpu);
1454 memset(stat, 0, sizeof(struct ptc_stats));
1455 }
Cliff Wickman18129242008-06-02 08:56:14 -05001456 }
1457
1458 return count;
1459}
1460
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001461static int local_atoi(const char *name)
1462{
1463 int val = 0;
1464
1465 for (;; name++) {
1466 switch (*name) {
1467 case '0' ... '9':
1468 val = 10*val+(*name-'0');
1469 break;
1470 default:
1471 return val;
1472 }
1473 }
1474}
1475
1476/*
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001477 * Parse the values written to /sys/kernel/debug/sgi_uv/bau_tunables.
1478 * Zero values reset them to defaults.
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001479 */
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001480static int parse_tunables_write(struct bau_control *bcp, char *instr,
1481 int count)
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001482{
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001483 char *p;
1484 char *q;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001485 int cnt = 0;
1486 int val;
1487 int e = sizeof(tunables) / sizeof(*tunables);
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001488
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001489 p = instr + strspn(instr, WHITESPACE);
1490 q = p;
1491 for (; *p; p = q + strspn(q, WHITESPACE)) {
1492 q = p + strcspn(p, WHITESPACE);
1493 cnt++;
1494 if (q == p)
1495 break;
1496 }
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001497 if (cnt != e) {
1498 printk(KERN_INFO "bau tunable error: should be %d values\n", e);
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001499 return -EINVAL;
1500 }
1501
1502 p = instr + strspn(instr, WHITESPACE);
1503 q = p;
1504 for (cnt = 0; *p; p = q + strspn(q, WHITESPACE), cnt++) {
1505 q = p + strcspn(p, WHITESPACE);
1506 val = local_atoi(p);
1507 switch (cnt) {
1508 case 0:
1509 if (val == 0) {
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001510 max_concurr = MAX_BAU_CONCURRENT;
1511 max_concurr_const = MAX_BAU_CONCURRENT;
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001512 continue;
1513 }
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001514 if (val < 1 || val > bcp->cpus_in_uvhub) {
1515 printk(KERN_DEBUG
1516 "Error: BAU max concurrent %d is invalid\n",
1517 val);
1518 return -EINVAL;
1519 }
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001520 max_concurr = val;
1521 max_concurr_const = val;
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001522 continue;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001523 default:
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001524 if (val == 0)
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001525 *tunables[cnt].tunp = tunables[cnt].deflt;
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001526 else
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001527 *tunables[cnt].tunp = val;
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001528 continue;
1529 }
1530 if (q == p)
1531 break;
1532 }
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001533 return 0;
1534}
1535
1536/*
1537 * Handle a write to debugfs. (/sys/kernel/debug/sgi_uv/bau_tunables)
1538 */
1539static ssize_t tunables_write(struct file *file, const char __user *user,
1540 size_t count, loff_t *data)
1541{
1542 int cpu;
1543 int ret;
1544 char instr[100];
1545 struct bau_control *bcp;
1546
1547 if (count == 0 || count > sizeof(instr)-1)
1548 return -EINVAL;
1549 if (copy_from_user(instr, user, count))
1550 return -EFAULT;
1551
1552 instr[count] = '\0';
1553
cpw@sgi.com00b30cf2011-06-21 07:21:26 -05001554 cpu = get_cpu();
1555 bcp = &per_cpu(bau_control, cpu);
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001556 ret = parse_tunables_write(bcp, instr, count);
cpw@sgi.com00b30cf2011-06-21 07:21:26 -05001557 put_cpu();
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001558 if (ret)
1559 return ret;
1560
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001561 for_each_present_cpu(cpu) {
1562 bcp = &per_cpu(bau_control, cpu);
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001563 bcp->max_concurr = max_concurr;
1564 bcp->max_concurr_const = max_concurr;
1565 bcp->plugged_delay = plugged_delay;
1566 bcp->plugsb4reset = plugsb4reset;
1567 bcp->timeoutsb4reset = timeoutsb4reset;
1568 bcp->ipi_reset_limit = ipi_reset_limit;
1569 bcp->complete_threshold = complete_threshold;
1570 bcp->cong_response_us = congested_respns_us;
1571 bcp->cong_reps = congested_reps;
1572 bcp->cong_period = congested_period;
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001573 }
1574 return count;
1575}
1576
Cliff Wickman18129242008-06-02 08:56:14 -05001577static const struct seq_operations uv_ptc_seq_ops = {
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001578 .start = ptc_seq_start,
1579 .next = ptc_seq_next,
1580 .stop = ptc_seq_stop,
1581 .show = ptc_seq_show
Cliff Wickman18129242008-06-02 08:56:14 -05001582};
1583
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001584static int ptc_proc_open(struct inode *inode, struct file *file)
Cliff Wickman18129242008-06-02 08:56:14 -05001585{
1586 return seq_open(file, &uv_ptc_seq_ops);
1587}
1588
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001589static int tunables_open(struct inode *inode, struct file *file)
1590{
1591 return 0;
1592}
1593
Cliff Wickman18129242008-06-02 08:56:14 -05001594static const struct file_operations proc_uv_ptc_operations = {
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001595 .open = ptc_proc_open,
Cliff Wickmanb194b122008-06-12 08:23:48 -05001596 .read = seq_read,
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001597 .write = ptc_proc_write,
Cliff Wickmanb194b122008-06-12 08:23:48 -05001598 .llseek = seq_lseek,
1599 .release = seq_release,
Cliff Wickman18129242008-06-02 08:56:14 -05001600};
1601
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001602static const struct file_operations tunables_fops = {
1603 .open = tunables_open,
1604 .read = tunables_read,
1605 .write = tunables_write,
Arnd Bergmann6038f372010-08-15 18:52:59 +02001606 .llseek = default_llseek,
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001607};
1608
Cliff Wickmanb194b122008-06-12 08:23:48 -05001609static int __init uv_ptc_init(void)
Cliff Wickman18129242008-06-02 08:56:14 -05001610{
Cliff Wickmanb194b122008-06-12 08:23:48 -05001611 struct proc_dir_entry *proc_uv_ptc;
Cliff Wickman18129242008-06-02 08:56:14 -05001612
1613 if (!is_uv_system())
1614 return 0;
1615
Alexey Dobriyan10f02d112009-08-23 23:17:27 +04001616 proc_uv_ptc = proc_create(UV_PTC_BASENAME, 0444, NULL,
1617 &proc_uv_ptc_operations);
Cliff Wickman18129242008-06-02 08:56:14 -05001618 if (!proc_uv_ptc) {
1619 printk(KERN_ERR "unable to create %s proc entry\n",
1620 UV_PTC_BASENAME);
1621 return -EINVAL;
1622 }
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001623
1624 tunables_dir = debugfs_create_dir(UV_BAU_TUNABLES_DIR, NULL);
1625 if (!tunables_dir) {
1626 printk(KERN_ERR "unable to create debugfs directory %s\n",
1627 UV_BAU_TUNABLES_DIR);
1628 return -EINVAL;
1629 }
1630 tunables_file = debugfs_create_file(UV_BAU_TUNABLES_FILE, 0600,
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001631 tunables_dir, NULL, &tunables_fops);
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001632 if (!tunables_file) {
1633 printk(KERN_ERR "unable to create debugfs file %s\n",
1634 UV_BAU_TUNABLES_FILE);
1635 return -EINVAL;
1636 }
Cliff Wickman18129242008-06-02 08:56:14 -05001637 return 0;
1638}
1639
Cliff Wickmanb194b122008-06-12 08:23:48 -05001640/*
Cliff Wickman77ed23f2011-05-10 08:26:43 -05001641 * Initialize the sending side's sending buffers.
Cliff Wickmanb194b122008-06-12 08:23:48 -05001642 */
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001643static void activation_descriptor_init(int node, int pnode, int base_pnode)
Cliff Wickmanb194b122008-06-12 08:23:48 -05001644{
1645 int i;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001646 int cpu;
Cliff Wickmanda87c932012-01-16 15:17:50 -06001647 int uv1 = 0;
Jack Steiner6a469e42011-09-20 13:55:04 -07001648 unsigned long gpa;
Cliff Wickmanb194b122008-06-12 08:23:48 -05001649 unsigned long m;
1650 unsigned long n;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001651 size_t dsize;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001652 struct bau_desc *bau_desc;
1653 struct bau_desc *bd2;
Cliff Wickmanda87c932012-01-16 15:17:50 -06001654 struct uv1_bau_msg_header *uv1_hdr;
1655 struct uv2_bau_msg_header *uv2_hdr;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001656 struct bau_control *bcp;
Cliff Wickmanb194b122008-06-12 08:23:48 -05001657
Cliff Wickman0e2595c2009-05-20 08:10:57 -05001658 /*
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001659 * each bau_desc is 64 bytes; there are 8 (ITEMS_PER_DESC)
1660 * per cpu; and one per cpu on the uvhub (ADP_SZ)
Cliff Wickman0e2595c2009-05-20 08:10:57 -05001661 */
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001662 dsize = sizeof(struct bau_desc) * ADP_SZ * ITEMS_PER_DESC;
1663 bau_desc = kmalloc_node(dsize, GFP_KERNEL, node);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001664 BUG_ON(!bau_desc);
Ingo Molnarb4c286e2008-06-18 14:28:19 +02001665
Jack Steiner6a469e42011-09-20 13:55:04 -07001666 gpa = uv_gpa(bau_desc);
1667 n = uv_gpa_to_gnode(gpa);
1668 m = uv_gpa_to_offset(gpa);
Cliff Wickmanda87c932012-01-16 15:17:50 -06001669 if (is_uv1_hub())
1670 uv1 = 1;
Ingo Molnarb4c286e2008-06-18 14:28:19 +02001671
Cliff Wickman77ed23f2011-05-10 08:26:43 -05001672 /* the 14-bit pnode */
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001673 write_mmr_descriptor_base(pnode, (n << UV_DESC_PSHIFT | m));
Cliff Wickman0e2595c2009-05-20 08:10:57 -05001674 /*
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001675 * Initializing all 8 (ITEMS_PER_DESC) descriptors for each
Cliff Wickman0e2595c2009-05-20 08:10:57 -05001676 * cpu even though we only use the first one; one descriptor can
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001677 * describe a broadcast to 256 uv hubs.
Cliff Wickman0e2595c2009-05-20 08:10:57 -05001678 */
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001679 for (i = 0, bd2 = bau_desc; i < (ADP_SZ * ITEMS_PER_DESC); i++, bd2++) {
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001680 memset(bd2, 0, sizeof(struct bau_desc));
Cliff Wickmanda87c932012-01-16 15:17:50 -06001681 if (uv1) {
1682 uv1_hdr = &bd2->header.uv1_hdr;
1683 uv1_hdr->swack_flag = 1;
1684 /*
1685 * The base_dest_nasid set in the message header
1686 * is the nasid of the first uvhub in the partition.
1687 * The bit map will indicate destination pnode numbers
1688 * relative to that base. They may not be consecutive
1689 * if nasid striding is being used.
1690 */
1691 uv1_hdr->base_dest_nasid =
1692 UV_PNODE_TO_NASID(base_pnode);
1693 uv1_hdr->dest_subnodeid = UV_LB_SUBNODEID;
1694 uv1_hdr->command = UV_NET_ENDPOINT_INTD;
1695 uv1_hdr->int_both = 1;
1696 /*
1697 * all others need to be set to zero:
1698 * fairness chaining multilevel count replied_to
1699 */
1700 } else {
1701 uv2_hdr = &bd2->header.uv2_hdr;
1702 uv2_hdr->swack_flag = 1;
1703 uv2_hdr->base_dest_nasid =
1704 UV_PNODE_TO_NASID(base_pnode);
1705 uv2_hdr->dest_subnodeid = UV_LB_SUBNODEID;
1706 uv2_hdr->command = UV_NET_ENDPOINT_INTD;
1707 }
Cliff Wickmanb194b122008-06-12 08:23:48 -05001708 }
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001709 for_each_present_cpu(cpu) {
1710 if (pnode != uv_blade_to_pnode(uv_cpu_to_blade_id(cpu)))
1711 continue;
1712 bcp = &per_cpu(bau_control, cpu);
1713 bcp->descriptor_base = bau_desc;
1714 }
Cliff Wickmanb194b122008-06-12 08:23:48 -05001715}
1716
1717/*
1718 * initialize the destination side's receiving buffers
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001719 * entered for each uvhub in the partition
1720 * - node is first node (kernel memory notion) on the uvhub
1721 * - pnode is the uvhub's physical identifier
Cliff Wickmanb194b122008-06-12 08:23:48 -05001722 */
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001723static void pq_init(int node, int pnode)
Cliff Wickmanb194b122008-06-12 08:23:48 -05001724{
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001725 int cpu;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001726 size_t plsize;
Ingo Molnarb4c286e2008-06-18 14:28:19 +02001727 char *cp;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001728 void *vp;
1729 unsigned long pn;
1730 unsigned long first;
1731 unsigned long pn_first;
1732 unsigned long last;
1733 struct bau_pq_entry *pqp;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001734 struct bau_control *bcp;
Cliff Wickmanb194b122008-06-12 08:23:48 -05001735
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001736 plsize = (DEST_Q_SIZE + 1) * sizeof(struct bau_pq_entry);
1737 vp = kmalloc_node(plsize, GFP_KERNEL, node);
1738 pqp = (struct bau_pq_entry *)vp;
Ingo Molnardc163a42008-06-18 14:15:43 +02001739 BUG_ON(!pqp);
Ingo Molnarb4c286e2008-06-18 14:28:19 +02001740
Cliff Wickmanb194b122008-06-12 08:23:48 -05001741 cp = (char *)pqp + 31;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001742 pqp = (struct bau_pq_entry *)(((unsigned long)cp >> 5) << 5);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001743
1744 for_each_present_cpu(cpu) {
1745 if (pnode != uv_cpu_to_pnode(cpu))
1746 continue;
1747 /* for every cpu on this pnode: */
1748 bcp = &per_cpu(bau_control, cpu);
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001749 bcp->queue_first = pqp;
1750 bcp->bau_msg_head = pqp;
1751 bcp->queue_last = pqp + (DEST_Q_SIZE - 1);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001752 }
Cliff Wickman4ea3c512009-04-16 07:53:09 -05001753 /*
Jack Steiner6a469e42011-09-20 13:55:04 -07001754 * need the gnode of where the memory was really allocated
Cliff Wickman4ea3c512009-04-16 07:53:09 -05001755 */
Jack Steiner6a469e42011-09-20 13:55:04 -07001756 pn = uv_gpa_to_gnode(uv_gpa(pqp));
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001757 first = uv_physnodeaddr(pqp);
1758 pn_first = ((unsigned long)pn << UV_PAYLOADQ_PNODE_SHIFT) | first;
1759 last = uv_physnodeaddr(pqp + (DEST_Q_SIZE - 1));
1760 write_mmr_payload_first(pnode, pn_first);
1761 write_mmr_payload_tail(pnode, first);
1762 write_mmr_payload_last(pnode, last);
Cliff Wickmanc5d35d32012-01-16 15:19:47 -06001763 write_gmmr_sw_ack(pnode, 0xffffUL);
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001764
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001765 /* in effect, all msg_type's are set to MSG_NOOP */
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001766 memset(pqp, 0, sizeof(struct bau_pq_entry) * DEST_Q_SIZE);
Cliff Wickmanb194b122008-06-12 08:23:48 -05001767}
1768
1769/*
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001770 * Initialization of each UV hub's structures
Cliff Wickmanb194b122008-06-12 08:23:48 -05001771 */
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001772static void __init init_uvhub(int uvhub, int vector, int base_pnode)
Cliff Wickmanb194b122008-06-12 08:23:48 -05001773{
Cliff Wickman9674f352009-04-03 08:34:05 -05001774 int node;
Cliff Wickmanb194b122008-06-12 08:23:48 -05001775 int pnode;
Cliff Wickmanb194b122008-06-12 08:23:48 -05001776 unsigned long apicid;
Cliff Wickmanb194b122008-06-12 08:23:48 -05001777
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001778 node = uvhub_to_first_node(uvhub);
1779 pnode = uv_blade_to_pnode(uvhub);
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001780
1781 activation_descriptor_init(node, pnode, base_pnode);
1782
1783 pq_init(node, pnode);
Cliff Wickmanb194b122008-06-12 08:23:48 -05001784 /*
Cliff Wickman77ed23f2011-05-10 08:26:43 -05001785 * The below initialization can't be in firmware because the
1786 * messaging IRQ will be determined by the OS.
Cliff Wickmanb194b122008-06-12 08:23:48 -05001787 */
Dimitri Sivanich8191c9f2010-11-16 16:23:52 -06001788 apicid = uvhub_to_first_apicid(uvhub) | uv_apicid_hibits;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001789 write_mmr_data_config(pnode, ((apicid << 32) | vector));
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001790}
1791
1792/*
Cliff Wickman12a66112010-06-02 16:22:01 -05001793 * We will set BAU_MISC_CONTROL with a timeout period.
1794 * But the BIOS has set UVH_AGING_PRESCALE_SEL and UVH_TRANSACTION_TIMEOUT.
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001795 * So the destination timeout period has to be calculated from them.
Cliff Wickman12a66112010-06-02 16:22:01 -05001796 */
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001797static int calculate_destination_timeout(void)
Cliff Wickman12a66112010-06-02 16:22:01 -05001798{
1799 unsigned long mmr_image;
1800 int mult1;
1801 int mult2;
1802 int index;
1803 int base;
1804 int ret;
1805 unsigned long ts_ns;
1806
Jack Steiner2a919592011-05-11 12:50:28 -05001807 if (is_uv1_hub()) {
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001808 mult1 = SOFTACK_TIMEOUT_PERIOD & BAU_MISC_CONTROL_MULT_MASK;
Jack Steiner2a919592011-05-11 12:50:28 -05001809 mmr_image = uv_read_local_mmr(UVH_AGING_PRESCALE_SEL);
1810 index = (mmr_image >> BAU_URGENCY_7_SHIFT) & BAU_URGENCY_7_MASK;
1811 mmr_image = uv_read_local_mmr(UVH_TRANSACTION_TIMEOUT);
1812 mult2 = (mmr_image >> BAU_TRANS_SHIFT) & BAU_TRANS_MASK;
1813 base = timeout_base_ns[index];
1814 ts_ns = base * mult1 * mult2;
1815 ret = ts_ns / 1000;
1816 } else {
Cliff Wickmand059f9f2012-01-16 15:18:48 -06001817 /* 4 bits 0/1 for 10/80us base, 3 bits of multiplier */
1818 mmr_image = uv_read_local_mmr(UVH_LB_BAU_MISC_CONTROL);
Jack Steiner2a919592011-05-11 12:50:28 -05001819 mmr_image = (mmr_image & UV_SA_MASK) >> UV_SA_SHFT;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001820 if (mmr_image & (1L << UV2_ACK_UNITS_SHFT))
Cliff Wickmand059f9f2012-01-16 15:18:48 -06001821 base = 80;
Jack Steiner2a919592011-05-11 12:50:28 -05001822 else
Cliff Wickmand059f9f2012-01-16 15:18:48 -06001823 base = 10;
1824 mult1 = mmr_image & UV2_ACK_MASK;
Jack Steiner2a919592011-05-11 12:50:28 -05001825 ret = mult1 * base;
1826 }
Cliff Wickman12a66112010-06-02 16:22:01 -05001827 return ret;
1828}
1829
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001830static void __init init_per_cpu_tunables(void)
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001831{
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001832 int cpu;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001833 struct bau_control *bcp;
1834
1835 for_each_present_cpu(cpu) {
1836 bcp = &per_cpu(bau_control, cpu);
1837 bcp->baudisabled = 0;
1838 bcp->statp = &per_cpu(ptcstats, cpu);
1839 /* time interval to catch a hardware stay-busy bug */
1840 bcp->timeout_interval = usec_2_cycles(2*timeout_us);
1841 bcp->max_concurr = max_concurr;
1842 bcp->max_concurr_const = max_concurr;
1843 bcp->plugged_delay = plugged_delay;
1844 bcp->plugsb4reset = plugsb4reset;
1845 bcp->timeoutsb4reset = timeoutsb4reset;
1846 bcp->ipi_reset_limit = ipi_reset_limit;
1847 bcp->complete_threshold = complete_threshold;
1848 bcp->cong_response_us = congested_respns_us;
1849 bcp->cong_reps = congested_reps;
1850 bcp->cong_period = congested_period;
Cliff Wickmanc5d35d32012-01-16 15:19:47 -06001851 bcp->clocks_per_100_usec = usec_2_cycles(100);
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001852 }
1853}
1854
1855/*
1856 * Scan all cpus to collect blade and socket summaries.
1857 */
1858static int __init get_cpu_topology(int base_pnode,
1859 struct uvhub_desc *uvhub_descs,
1860 unsigned char *uvhub_mask)
1861{
1862 int cpu;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001863 int pnode;
1864 int uvhub;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001865 int socket;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001866 struct bau_control *bcp;
1867 struct uvhub_desc *bdp;
1868 struct socket_desc *sdp;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001869
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001870 for_each_present_cpu(cpu) {
1871 bcp = &per_cpu(bau_control, cpu);
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001872
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001873 memset(bcp, 0, sizeof(struct bau_control));
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001874
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001875 pnode = uv_cpu_hub_info(cpu)->pnode;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001876 if ((pnode - base_pnode) >= UV_DISTRIBUTION_SIZE) {
Cliff Wickman77ed23f2011-05-10 08:26:43 -05001877 printk(KERN_EMERG
1878 "cpu %d pnode %d-%d beyond %d; BAU disabled\n",
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001879 cpu, pnode, base_pnode, UV_DISTRIBUTION_SIZE);
Cliff Wickman77ed23f2011-05-10 08:26:43 -05001880 return 1;
1881 }
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001882
Cliff Wickman77ed23f2011-05-10 08:26:43 -05001883 bcp->osnode = cpu_to_node(cpu);
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001884 bcp->partition_base_pnode = base_pnode;
1885
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001886 uvhub = uv_cpu_hub_info(cpu)->numa_blade_id;
Cliff Wickmanc4026cf2010-07-30 14:10:55 -05001887 *(uvhub_mask + (uvhub/8)) |= (1 << (uvhub%8));
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001888 bdp = &uvhub_descs[uvhub];
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001889
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001890 bdp->num_cpus++;
1891 bdp->uvhub = uvhub;
1892 bdp->pnode = pnode;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001893
Cliff Wickmana8328ee2010-06-02 16:22:02 -05001894 /* kludge: 'assuming' one node per socket, and assuming that
1895 disabling a socket just leaves a gap in node numbers */
Cliff Wickman77ed23f2011-05-10 08:26:43 -05001896 socket = bcp->osnode & 1;
Cliff Wickmana8328ee2010-06-02 16:22:02 -05001897 bdp->socket_mask |= (1 << socket);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001898 sdp = &bdp->socket[socket];
1899 sdp->cpu_number[sdp->num_cpus] = cpu;
1900 sdp->num_cpus++;
Cliff Wickmancfa60912011-01-03 12:03:53 -06001901 if (sdp->num_cpus > MAX_CPUS_PER_SOCKET) {
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001902 printk(KERN_EMERG "%d cpus per socket invalid\n",
1903 sdp->num_cpus);
Cliff Wickmancfa60912011-01-03 12:03:53 -06001904 return 1;
1905 }
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001906 }
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001907 return 0;
1908}
1909
1910/*
1911 * Each socket is to get a local array of pnodes/hubs.
1912 */
1913static void make_per_cpu_thp(struct bau_control *smaster)
1914{
1915 int cpu;
1916 size_t hpsz = sizeof(struct hub_and_pnode) * num_possible_cpus();
1917
1918 smaster->thp = kmalloc_node(hpsz, GFP_KERNEL, smaster->osnode);
1919 memset(smaster->thp, 0, hpsz);
1920 for_each_present_cpu(cpu) {
1921 smaster->thp[cpu].pnode = uv_cpu_hub_info(cpu)->pnode;
1922 smaster->thp[cpu].uvhub = uv_cpu_hub_info(cpu)->numa_blade_id;
1923 }
1924}
1925
1926/*
cpw@sgi.com442d3922011-06-21 07:21:31 -05001927 * Each uvhub is to get a local cpumask.
1928 */
1929static void make_per_hub_cpumask(struct bau_control *hmaster)
1930{
1931 int sz = sizeof(cpumask_t);
1932
1933 hmaster->cpumask = kzalloc_node(sz, GFP_KERNEL, hmaster->osnode);
1934}
1935
1936/*
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001937 * Initialize all the per_cpu information for the cpu's on a given socket,
1938 * given what has been gathered into the socket_desc struct.
1939 * And reports the chosen hub and socket masters back to the caller.
1940 */
1941static int scan_sock(struct socket_desc *sdp, struct uvhub_desc *bdp,
1942 struct bau_control **smasterp,
1943 struct bau_control **hmasterp)
1944{
1945 int i;
1946 int cpu;
1947 struct bau_control *bcp;
1948
1949 for (i = 0; i < sdp->num_cpus; i++) {
1950 cpu = sdp->cpu_number[i];
1951 bcp = &per_cpu(bau_control, cpu);
1952 bcp->cpu = cpu;
1953 if (i == 0) {
1954 *smasterp = bcp;
1955 if (!(*hmasterp))
1956 *hmasterp = bcp;
1957 }
1958 bcp->cpus_in_uvhub = bdp->num_cpus;
1959 bcp->cpus_in_socket = sdp->num_cpus;
1960 bcp->socket_master = *smasterp;
1961 bcp->uvhub = bdp->uvhub;
Cliff Wickmanda87c932012-01-16 15:17:50 -06001962 if (is_uv1_hub())
1963 bcp->uvhub_version = 1;
1964 else if (is_uv2_hub())
1965 bcp->uvhub_version = 2;
1966 else {
1967 printk(KERN_EMERG "uvhub version not 1 or 2\n");
1968 return 1;
1969 }
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001970 bcp->uvhub_master = *hmasterp;
1971 bcp->uvhub_cpu = uv_cpu_hub_info(cpu)->blade_processor_id;
Cliff Wickmanc5d35d32012-01-16 15:19:47 -06001972 bcp->using_desc = bcp->uvhub_cpu;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001973 if (bcp->uvhub_cpu >= MAX_CPUS_PER_UVHUB) {
1974 printk(KERN_EMERG "%d cpus per uvhub invalid\n",
1975 bcp->uvhub_cpu);
1976 return 1;
1977 }
1978 }
1979 return 0;
1980}
1981
1982/*
1983 * Summarize the blade and socket topology into the per_cpu structures.
1984 */
1985static int __init summarize_uvhub_sockets(int nuvhubs,
1986 struct uvhub_desc *uvhub_descs,
1987 unsigned char *uvhub_mask)
1988{
1989 int socket;
1990 int uvhub;
1991 unsigned short socket_mask;
1992
Cliff Wickmanc4026cf2010-07-30 14:10:55 -05001993 for (uvhub = 0; uvhub < nuvhubs; uvhub++) {
Cliff Wickmanf073cc82011-05-24 13:07:36 -05001994 struct uvhub_desc *bdp;
1995 struct bau_control *smaster = NULL;
1996 struct bau_control *hmaster = NULL;
1997
Cliff Wickmanc4026cf2010-07-30 14:10:55 -05001998 if (!(*(uvhub_mask + (uvhub/8)) & (1 << (uvhub%8))))
1999 continue;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05002000
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05002001 bdp = &uvhub_descs[uvhub];
Cliff Wickmana8328ee2010-06-02 16:22:02 -05002002 socket_mask = bdp->socket_mask;
2003 socket = 0;
2004 while (socket_mask) {
Cliff Wickmanf073cc82011-05-24 13:07:36 -05002005 struct socket_desc *sdp;
2006 if ((socket_mask & 1)) {
2007 sdp = &bdp->socket[socket];
2008 if (scan_sock(sdp, bdp, &smaster, &hmaster))
Cliff Wickmancfa60912011-01-03 12:03:53 -06002009 return 1;
cpw@sgi.com9c9153d2011-06-21 07:21:28 -05002010 make_per_cpu_thp(smaster);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05002011 }
2012 socket++;
Cliff Wickmana8328ee2010-06-02 16:22:02 -05002013 socket_mask = (socket_mask >> 1);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05002014 }
cpw@sgi.com442d3922011-06-21 07:21:31 -05002015 make_per_hub_cpumask(hmaster);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05002016 }
Cliff Wickmanf073cc82011-05-24 13:07:36 -05002017 return 0;
2018}
2019
2020/*
2021 * initialize the bau_control structure for each cpu
2022 */
2023static int __init init_per_cpu(int nuvhubs, int base_part_pnode)
2024{
2025 unsigned char *uvhub_mask;
2026 void *vp;
2027 struct uvhub_desc *uvhub_descs;
2028
2029 timeout_us = calculate_destination_timeout();
2030
2031 vp = kmalloc(nuvhubs * sizeof(struct uvhub_desc), GFP_KERNEL);
2032 uvhub_descs = (struct uvhub_desc *)vp;
2033 memset(uvhub_descs, 0, nuvhubs * sizeof(struct uvhub_desc));
2034 uvhub_mask = kzalloc((nuvhubs+7)/8, GFP_KERNEL);
2035
2036 if (get_cpu_topology(base_part_pnode, uvhub_descs, uvhub_mask))
cpw@sgi.combbd270e2011-06-21 07:21:32 -05002037 goto fail;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05002038
2039 if (summarize_uvhub_sockets(nuvhubs, uvhub_descs, uvhub_mask))
cpw@sgi.combbd270e2011-06-21 07:21:32 -05002040 goto fail;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05002041
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05002042 kfree(uvhub_descs);
Cliff Wickmanc4026cf2010-07-30 14:10:55 -05002043 kfree(uvhub_mask);
Cliff Wickmanf073cc82011-05-24 13:07:36 -05002044 init_per_cpu_tunables();
Cliff Wickmancfa60912011-01-03 12:03:53 -06002045 return 0;
cpw@sgi.combbd270e2011-06-21 07:21:32 -05002046
2047fail:
2048 kfree(uvhub_descs);
2049 kfree(uvhub_mask);
2050 return 1;
Cliff Wickmanb194b122008-06-12 08:23:48 -05002051}
Cliff Wickman18129242008-06-02 08:56:14 -05002052
2053/*
2054 * Initialization of BAU-related structures
2055 */
Cliff Wickmanb194b122008-06-12 08:23:48 -05002056static int __init uv_bau_init(void)
Cliff Wickman18129242008-06-02 08:56:14 -05002057{
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05002058 int uvhub;
2059 int pnode;
2060 int nuvhubs;
Rusty Russell2c74d662009-03-18 08:22:30 +10302061 int cur_cpu;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05002062 int cpus;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05002063 int vector;
Cliff Wickmanf073cc82011-05-24 13:07:36 -05002064 cpumask_var_t *mask;
Cliff Wickman18129242008-06-02 08:56:14 -05002065
2066 if (!is_uv_system())
2067 return 0;
2068
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05002069 if (nobau)
2070 return 0;
2071
Cliff Wickmanf073cc82011-05-24 13:07:36 -05002072 for_each_possible_cpu(cur_cpu) {
2073 mask = &per_cpu(uv_flush_tlb_mask, cur_cpu);
2074 zalloc_cpumask_var_node(mask, GFP_KERNEL, cpu_to_node(cur_cpu));
2075 }
Rusty Russell76ba0ec2009-03-13 14:49:57 +10302076
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05002077 nuvhubs = uv_num_possible_blades();
Cliff Wickman50fb55a2010-06-02 16:22:02 -05002078 spin_lock_init(&disable_lock);
Cliff Wickmanf073cc82011-05-24 13:07:36 -05002079 congested_cycles = usec_2_cycles(congested_respns_us);
Cliff Wickman9674f352009-04-03 08:34:05 -05002080
Cliff Wickmanf073cc82011-05-24 13:07:36 -05002081 uv_base_pnode = 0x7fffffff;
Cliff Wickman77ed23f2011-05-10 08:26:43 -05002082 for (uvhub = 0; uvhub < nuvhubs; uvhub++) {
Cliff Wickmanf073cc82011-05-24 13:07:36 -05002083 cpus = uv_blade_nr_possible_cpus(uvhub);
2084 if (cpus && (uv_blade_to_pnode(uvhub) < uv_base_pnode))
2085 uv_base_pnode = uv_blade_to_pnode(uvhub);
Cliff Wickman77ed23f2011-05-10 08:26:43 -05002086 }
2087
Cliff Wickmand059f9f2012-01-16 15:18:48 -06002088 enable_timeouts();
2089
Cliff Wickmanf073cc82011-05-24 13:07:36 -05002090 if (init_per_cpu(nuvhubs, uv_base_pnode)) {
Cliff Wickmancfa60912011-01-03 12:03:53 -06002091 nobau = 1;
2092 return 0;
2093 }
Ingo Molnarb4c286e2008-06-18 14:28:19 +02002094
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05002095 vector = UV_BAU_MESSAGE;
2096 for_each_possible_blade(uvhub)
2097 if (uv_blade_nr_possible_cpus(uvhub))
Cliff Wickmanf073cc82011-05-24 13:07:36 -05002098 init_uvhub(uvhub, vector, uv_base_pnode);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05002099
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05002100 alloc_intr_gate(vector, uv_bau_message_intr1);
2101
2102 for_each_possible_blade(uvhub) {
Cliff Wickman93a7ca02010-07-16 10:11:21 -05002103 if (uv_blade_nr_possible_cpus(uvhub)) {
Cliff Wickmanf073cc82011-05-24 13:07:36 -05002104 unsigned long val;
2105 unsigned long mmr;
Cliff Wickman93a7ca02010-07-16 10:11:21 -05002106 pnode = uv_blade_to_pnode(uvhub);
2107 /* INIT the bau */
Cliff Wickmanf073cc82011-05-24 13:07:36 -05002108 val = 1L << 63;
2109 write_gmmr_activation(pnode, val);
Cliff Wickman93a7ca02010-07-16 10:11:21 -05002110 mmr = 1; /* should be 1 to broadcast to both sockets */
Cliff Wickmanda87c932012-01-16 15:17:50 -06002111 if (!is_uv1_hub())
2112 write_mmr_data_broadcast(pnode, mmr);
Cliff Wickman93a7ca02010-07-16 10:11:21 -05002113 }
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05002114 }
Ingo Molnarb4c286e2008-06-18 14:28:19 +02002115
Cliff Wickman18129242008-06-02 08:56:14 -05002116 return 0;
2117}
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05002118core_initcall(uv_bau_init);
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05002119fs_initcall(uv_ptc_init);