blob: 7cb6424317f642bcbb44caabdd6c92cf0fbfa605 [file] [log] [blame]
Cliff Wickman18129242008-06-02 08:56:14 -05001/*
2 * SGI UltraViolet TLB flush routines.
3 *
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05004 * (c) 2008-2010 Cliff Wickman <cpw@sgi.com>, SGI.
Cliff Wickman18129242008-06-02 08:56:14 -05005 *
6 * This code is released under the GNU General Public License version 2 or
7 * later.
8 */
Jeremy Fitzhardingeaef8f5b2008-10-14 21:43:43 -07009#include <linux/seq_file.h>
Cliff Wickman18129242008-06-02 08:56:14 -050010#include <linux/proc_fs.h>
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -050011#include <linux/debugfs.h>
Cliff Wickman18129242008-06-02 08:56:14 -050012#include <linux/kernel.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090013#include <linux/slab.h>
Jean Delvareca4445642011-03-25 15:20:14 +010014#include <linux/delay.h>
Cliff Wickman18129242008-06-02 08:56:14 -050015
Cliff Wickman18129242008-06-02 08:56:14 -050016#include <asm/mmu_context.h>
Tejun Heobdbcdd42009-01-21 17:26:06 +090017#include <asm/uv/uv.h>
Cliff Wickman18129242008-06-02 08:56:14 -050018#include <asm/uv/uv_mmrs.h>
Ingo Molnarb4c286e2008-06-18 14:28:19 +020019#include <asm/uv/uv_hub.h>
Cliff Wickman18129242008-06-02 08:56:14 -050020#include <asm/uv/uv_bau.h>
Ingo Molnar7b6aa332009-02-17 13:58:15 +010021#include <asm/apic.h>
Ingo Molnarb4c286e2008-06-18 14:28:19 +020022#include <asm/idle.h>
Cliff Wickmanb194b1202008-06-12 08:23:48 -050023#include <asm/tsc.h>
Cliff Wickman99dd8712008-08-19 12:51:59 -050024#include <asm/irq_vectors.h>
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -050025#include <asm/timer.h>
Cliff Wickman18129242008-06-02 08:56:14 -050026
Cliff Wickman12a66112010-06-02 16:22:01 -050027/* timeouts in nanoseconds (indexed by UVH_AGING_PRESCALE_SEL urgency7 30:28) */
28static int timeout_base_ns[] = {
29 20,
30 160,
31 1280,
32 10240,
33 81920,
34 655360,
35 5242880,
36 167772160
37};
38static int timeout_us;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -050039static int nobau;
Cliff Wickman50fb55a2010-06-02 16:22:02 -050040static int baudisabled;
41static spinlock_t disable_lock;
42static cycles_t congested_cycles;
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -050043
44/* tunables: */
45static int max_bau_concurrent = MAX_BAU_CONCURRENT;
46static int max_bau_concurrent_constant = MAX_BAU_CONCURRENT;
47static int plugged_delay = PLUGGED_DELAY;
48static int plugsb4reset = PLUGSB4RESET;
49static int timeoutsb4reset = TIMEOUTSB4RESET;
50static int ipi_reset_limit = IPI_RESET_LIMIT;
51static int complete_threshold = COMPLETE_THRESHOLD;
52static int congested_response_us = CONGESTED_RESPONSE_US;
53static int congested_reps = CONGESTED_REPS;
54static int congested_period = CONGESTED_PERIOD;
55static struct dentry *tunables_dir;
56static struct dentry *tunables_file;
57
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -050058static int __init setup_nobau(char *arg)
59{
60 nobau = 1;
61 return 0;
62}
63early_param("nobau", setup_nobau);
Ingo Molnarb4c286e2008-06-18 14:28:19 +020064
Cliff Wickman94ca8e42009-04-14 10:56:48 -050065/* base pnode in this partition */
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -050066static int uv_partition_base_pnode __read_mostly;
67/* position of pnode (which is nasid>>1): */
68static int uv_nshift __read_mostly;
69static unsigned long uv_mmask __read_mostly;
Cliff Wickman18129242008-06-02 08:56:14 -050070
Ingo Molnardc163a42008-06-18 14:15:43 +020071static DEFINE_PER_CPU(struct ptc_stats, ptcstats);
72static DEFINE_PER_CPU(struct bau_control, bau_control);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -050073static DEFINE_PER_CPU(cpumask_var_t, uv_flush_tlb_mask);
74
Cliff Wickman18129242008-06-02 08:56:14 -050075/*
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -050076 * Determine the first node on a uvhub. 'Nodes' are used for kernel
77 * memory allocation.
Cliff Wickman9674f352009-04-03 08:34:05 -050078 */
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -050079static int __init uvhub_to_first_node(int uvhub)
Cliff Wickman9674f352009-04-03 08:34:05 -050080{
81 int node, b;
82
83 for_each_online_node(node) {
84 b = uv_node_to_blade_id(node);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -050085 if (uvhub == b)
Cliff Wickman9674f352009-04-03 08:34:05 -050086 return node;
87 }
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -050088 return -1;
Cliff Wickman9674f352009-04-03 08:34:05 -050089}
90
91/*
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -050092 * Determine the apicid of the first cpu on a uvhub.
Cliff Wickman9674f352009-04-03 08:34:05 -050093 */
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -050094static int __init uvhub_to_first_apicid(int uvhub)
Cliff Wickman9674f352009-04-03 08:34:05 -050095{
96 int cpu;
97
98 for_each_present_cpu(cpu)
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -050099 if (uvhub == uv_cpu_to_blade_id(cpu))
Cliff Wickman9674f352009-04-03 08:34:05 -0500100 return per_cpu(x86_cpu_to_apicid, cpu);
101 return -1;
102}
103
104/*
Cliff Wickman18129242008-06-02 08:56:14 -0500105 * Free a software acknowledge hardware resource by clearing its Pending
106 * bit. This will return a reply to the sender.
107 * If the message has timed out, a reply has already been sent by the
108 * hardware but the resource has not been released. In that case our
109 * clear of the Timeout bit (as well) will free the resource. No reply will
110 * be sent (the hardware will only do one reply per message).
111 */
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500112static inline void uv_reply_to_message(struct msg_desc *mdp,
113 struct bau_control *bcp)
Cliff Wickman18129242008-06-02 08:56:14 -0500114{
Cliff Wickmanb194b1202008-06-12 08:23:48 -0500115 unsigned long dw;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500116 struct bau_payload_queue_entry *msg;
Cliff Wickman18129242008-06-02 08:56:14 -0500117
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500118 msg = mdp->msg;
119 if (!msg->canceled) {
120 dw = (msg->sw_ack_vector << UV_SW_ACK_NPENDING) |
121 msg->sw_ack_vector;
122 uv_write_local_mmr(
123 UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS, dw);
124 }
Cliff Wickman18129242008-06-02 08:56:14 -0500125 msg->replied_to = 1;
126 msg->sw_ack_vector = 0;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500127}
128
129/*
130 * Process the receipt of a RETRY message
131 */
132static inline void uv_bau_process_retry_msg(struct msg_desc *mdp,
133 struct bau_control *bcp)
134{
135 int i;
136 int cancel_count = 0;
137 int slot2;
138 unsigned long msg_res;
139 unsigned long mmr = 0;
140 struct bau_payload_queue_entry *msg;
141 struct bau_payload_queue_entry *msg2;
142 struct ptc_stats *stat;
143
144 msg = mdp->msg;
Cliff Wickman712157a2010-06-02 16:22:02 -0500145 stat = bcp->statp;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500146 stat->d_retries++;
147 /*
148 * cancel any message from msg+1 to the retry itself
149 */
150 for (msg2 = msg+1, i = 0; i < DEST_Q_SIZE; msg2++, i++) {
151 if (msg2 > mdp->va_queue_last)
152 msg2 = mdp->va_queue_first;
153 if (msg2 == msg)
154 break;
155
156 /* same conditions for cancellation as uv_do_reset */
157 if ((msg2->replied_to == 0) && (msg2->canceled == 0) &&
158 (msg2->sw_ack_vector) && ((msg2->sw_ack_vector &
159 msg->sw_ack_vector) == 0) &&
160 (msg2->sending_cpu == msg->sending_cpu) &&
161 (msg2->msg_type != MSG_NOOP)) {
162 slot2 = msg2 - mdp->va_queue_first;
163 mmr = uv_read_local_mmr
164 (UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE);
Cliff Wickman39847e72010-06-02 16:22:02 -0500165 msg_res = msg2->sw_ack_vector;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500166 /*
167 * This is a message retry; clear the resources held
168 * by the previous message only if they timed out.
169 * If it has not timed out we have an unexpected
170 * situation to report.
171 */
Cliff Wickman39847e72010-06-02 16:22:02 -0500172 if (mmr & (msg_res << UV_SW_ACK_NPENDING)) {
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500173 /*
174 * is the resource timed out?
175 * make everyone ignore the cancelled message.
176 */
177 msg2->canceled = 1;
178 stat->d_canceled++;
179 cancel_count++;
180 uv_write_local_mmr(
181 UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS,
Cliff Wickman39847e72010-06-02 16:22:02 -0500182 (msg_res << UV_SW_ACK_NPENDING) |
183 msg_res);
184 }
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500185 }
186 }
187 if (!cancel_count)
188 stat->d_nocanceled++;
Cliff Wickman18129242008-06-02 08:56:14 -0500189}
190
191/*
192 * Do all the things a cpu should do for a TLB shootdown message.
193 * Other cpu's may come here at the same time for this message.
194 */
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500195static void uv_bau_process_message(struct msg_desc *mdp,
196 struct bau_control *bcp)
Cliff Wickman18129242008-06-02 08:56:14 -0500197{
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500198 int msg_ack_count;
199 short socket_ack_count = 0;
200 struct ptc_stats *stat;
201 struct bau_payload_queue_entry *msg;
202 struct bau_control *smaster = bcp->socket_master;
Cliff Wickman18129242008-06-02 08:56:14 -0500203
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500204 /*
205 * This must be a normal message, or retry of a normal message
206 */
207 msg = mdp->msg;
Cliff Wickman712157a2010-06-02 16:22:02 -0500208 stat = bcp->statp;
Cliff Wickman18129242008-06-02 08:56:14 -0500209 if (msg->address == TLB_FLUSH_ALL) {
210 local_flush_tlb();
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500211 stat->d_alltlb++;
Cliff Wickman18129242008-06-02 08:56:14 -0500212 } else {
213 __flush_tlb_one(msg->address);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500214 stat->d_onetlb++;
Cliff Wickman18129242008-06-02 08:56:14 -0500215 }
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500216 stat->d_requestee++;
Cliff Wickman18129242008-06-02 08:56:14 -0500217
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500218 /*
219 * One cpu on each uvhub has the additional job on a RETRY
220 * of releasing the resource held by the message that is
221 * being retried. That message is identified by sending
222 * cpu number.
223 */
224 if (msg->msg_type == MSG_RETRY && bcp == bcp->uvhub_master)
225 uv_bau_process_retry_msg(mdp, bcp);
Cliff Wickman18129242008-06-02 08:56:14 -0500226
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500227 /*
228 * This is a sw_ack message, so we have to reply to it.
229 * Count each responding cpu on the socket. This avoids
230 * pinging the count's cache line back and forth between
231 * the sockets.
232 */
233 socket_ack_count = atomic_add_short_return(1, (struct atomic_short *)
234 &smaster->socket_acknowledge_count[mdp->msg_slot]);
235 if (socket_ack_count == bcp->cpus_in_socket) {
236 /*
237 * Both sockets dump their completed count total into
238 * the message's count.
239 */
240 smaster->socket_acknowledge_count[mdp->msg_slot] = 0;
241 msg_ack_count = atomic_add_short_return(socket_ack_count,
242 (struct atomic_short *)&msg->acknowledge_count);
Ingo Molnardc163a42008-06-18 14:15:43 +0200243
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500244 if (msg_ack_count == bcp->cpus_in_uvhub) {
245 /*
246 * All cpus in uvhub saw it; reply
247 */
248 uv_reply_to_message(mdp, bcp);
Ingo Molnardc163a42008-06-18 14:15:43 +0200249 }
250 }
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500251
252 return;
Cliff Wickman18129242008-06-02 08:56:14 -0500253}
254
255/*
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500256 * Determine the first cpu on a uvhub.
Cliff Wickman18129242008-06-02 08:56:14 -0500257 */
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500258static int uvhub_to_first_cpu(int uvhub)
Cliff Wickman18129242008-06-02 08:56:14 -0500259{
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500260 int cpu;
261 for_each_present_cpu(cpu)
262 if (uvhub == uv_cpu_to_blade_id(cpu))
263 return cpu;
264 return -1;
Cliff Wickman18129242008-06-02 08:56:14 -0500265}
266
Cliff Wickmanb194b1202008-06-12 08:23:48 -0500267/*
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500268 * Last resort when we get a large number of destination timeouts is
269 * to clear resources held by a given cpu.
270 * Do this with IPI so that all messages in the BAU message queue
271 * can be identified by their nonzero sw_ack_vector field.
Cliff Wickmanb194b1202008-06-12 08:23:48 -0500272 *
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500273 * This is entered for a single cpu on the uvhub.
274 * The sender want's this uvhub to free a specific message's
275 * sw_ack resources.
276 */
277static void
278uv_do_reset(void *ptr)
279{
280 int i;
281 int slot;
282 int count = 0;
283 unsigned long mmr;
284 unsigned long msg_res;
285 struct bau_control *bcp;
286 struct reset_args *rap;
287 struct bau_payload_queue_entry *msg;
288 struct ptc_stats *stat;
289
290 bcp = &per_cpu(bau_control, smp_processor_id());
291 rap = (struct reset_args *)ptr;
Cliff Wickman712157a2010-06-02 16:22:02 -0500292 stat = bcp->statp;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500293 stat->d_resets++;
294
295 /*
296 * We're looking for the given sender, and
297 * will free its sw_ack resource.
298 * If all cpu's finally responded after the timeout, its
299 * message 'replied_to' was set.
300 */
301 for (msg = bcp->va_queue_first, i = 0; i < DEST_Q_SIZE; msg++, i++) {
302 /* uv_do_reset: same conditions for cancellation as
303 uv_bau_process_retry_msg() */
304 if ((msg->replied_to == 0) &&
305 (msg->canceled == 0) &&
306 (msg->sending_cpu == rap->sender) &&
307 (msg->sw_ack_vector) &&
308 (msg->msg_type != MSG_NOOP)) {
309 /*
310 * make everyone else ignore this message
311 */
312 msg->canceled = 1;
313 slot = msg - bcp->va_queue_first;
314 count++;
315 /*
316 * only reset the resource if it is still pending
317 */
318 mmr = uv_read_local_mmr
319 (UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE);
Cliff Wickman39847e72010-06-02 16:22:02 -0500320 msg_res = msg->sw_ack_vector;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500321 if (mmr & msg_res) {
322 stat->d_rcanceled++;
323 uv_write_local_mmr(
324 UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS,
Cliff Wickman39847e72010-06-02 16:22:02 -0500325 (msg_res << UV_SW_ACK_NPENDING) |
326 msg_res);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500327 }
328 }
329 }
330 return;
331}
332
333/*
334 * Use IPI to get all target uvhubs to release resources held by
335 * a given sending cpu number.
336 */
337static void uv_reset_with_ipi(struct bau_target_uvhubmask *distribution,
338 int sender)
339{
340 int uvhub;
341 int cpu;
342 cpumask_t mask;
343 struct reset_args reset_args;
344
345 reset_args.sender = sender;
346
347 cpus_clear(mask);
348 /* find a single cpu for each uvhub in this distribution mask */
349 for (uvhub = 0;
350 uvhub < sizeof(struct bau_target_uvhubmask) * BITSPERBYTE;
351 uvhub++) {
352 if (!bau_uvhub_isset(uvhub, distribution))
353 continue;
354 /* find a cpu for this uvhub */
355 cpu = uvhub_to_first_cpu(uvhub);
356 cpu_set(cpu, mask);
357 }
358 /* IPI all cpus; Preemption is already disabled */
359 smp_call_function_many(&mask, uv_do_reset, (void *)&reset_args, 1);
360 return;
361}
362
363static inline unsigned long
364cycles_2_us(unsigned long long cyc)
365{
366 unsigned long long ns;
367 unsigned long us;
368 ns = (cyc * per_cpu(cyc2ns, smp_processor_id()))
369 >> CYC2NS_SCALE_FACTOR;
370 us = ns / 1000;
371 return us;
372}
373
374/*
375 * wait for all cpus on this hub to finish their sends and go quiet
376 * leaves uvhub_quiesce set so that no new broadcasts are started by
377 * bau_flush_send_and_wait()
378 */
379static inline void
380quiesce_local_uvhub(struct bau_control *hmaster)
381{
382 atomic_add_short_return(1, (struct atomic_short *)
383 &hmaster->uvhub_quiesce);
384}
385
386/*
387 * mark this quiet-requestor as done
388 */
389static inline void
390end_uvhub_quiesce(struct bau_control *hmaster)
391{
392 atomic_add_short_return(-1, (struct atomic_short *)
393 &hmaster->uvhub_quiesce);
394}
395
396/*
397 * Wait for completion of a broadcast software ack message
398 * return COMPLETE, RETRY(PLUGGED or TIMEOUT) or GIVEUP
Cliff Wickmanb194b1202008-06-12 08:23:48 -0500399 */
Ingo Molnardc163a42008-06-18 14:15:43 +0200400static int uv_wait_completion(struct bau_desc *bau_desc,
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500401 unsigned long mmr_offset, int right_shift, int this_cpu,
402 struct bau_control *bcp, struct bau_control *smaster, long try)
Cliff Wickmanb194b1202008-06-12 08:23:48 -0500403{
Cliff Wickmanb194b1202008-06-12 08:23:48 -0500404 unsigned long descriptor_status;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500405 cycles_t ttime;
Cliff Wickman712157a2010-06-02 16:22:02 -0500406 struct ptc_stats *stat = bcp->statp;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500407 struct bau_control *hmaster;
Cliff Wickmanb194b1202008-06-12 08:23:48 -0500408
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500409 hmaster = bcp->uvhub_master;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500410
411 /* spin on the status MMR, waiting for it to go idle */
Cliff Wickmanb194b1202008-06-12 08:23:48 -0500412 while ((descriptor_status = (((unsigned long)
413 uv_read_local_mmr(mmr_offset) >>
414 right_shift) & UV_ACT_STATUS_MASK)) !=
415 DESC_STATUS_IDLE) {
Cliff Wickmanb194b1202008-06-12 08:23:48 -0500416 /*
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500417 * Our software ack messages may be blocked because there are
418 * no swack resources available. As long as none of them
419 * has timed out hardware will NACK our message and its
420 * state will stay IDLE.
Cliff Wickmanb194b1202008-06-12 08:23:48 -0500421 */
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500422 if (descriptor_status == DESC_STATUS_SOURCE_TIMEOUT) {
423 stat->s_stimeout++;
424 return FLUSH_GIVEUP;
425 } else if (descriptor_status ==
426 DESC_STATUS_DESTINATION_TIMEOUT) {
427 stat->s_dtimeout++;
428 ttime = get_cycles();
429
430 /*
431 * Our retries may be blocked by all destination
432 * swack resources being consumed, and a timeout
433 * pending. In that case hardware returns the
434 * ERROR that looks like a destination timeout.
435 */
Cliff Wickman12a66112010-06-02 16:22:01 -0500436 if (cycles_2_us(ttime - bcp->send_message) <
437 timeout_us) {
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500438 bcp->conseccompletes = 0;
439 return FLUSH_RETRY_PLUGGED;
440 }
441
442 bcp->conseccompletes = 0;
443 return FLUSH_RETRY_TIMEOUT;
444 } else {
445 /*
446 * descriptor_status is still BUSY
447 */
448 cpu_relax();
Cliff Wickmanb194b1202008-06-12 08:23:48 -0500449 }
450 }
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500451 bcp->conseccompletes++;
Cliff Wickmanb194b1202008-06-12 08:23:48 -0500452 return FLUSH_COMPLETE;
453}
454
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500455static inline cycles_t
456sec_2_cycles(unsigned long sec)
457{
458 unsigned long ns;
459 cycles_t cyc;
460
461 ns = sec * 1000000000;
462 cyc = (ns << CYC2NS_SCALE_FACTOR)/(per_cpu(cyc2ns, smp_processor_id()));
463 return cyc;
464}
465
466/*
467 * conditionally add 1 to *v, unless *v is >= u
468 * return 0 if we cannot add 1 to *v because it is >= u
469 * return 1 if we can add 1 to *v because it is < u
470 * the add is atomic
471 *
472 * This is close to atomic_add_unless(), but this allows the 'u' value
473 * to be lowered below the current 'v'. atomic_add_unless can only stop
474 * on equal.
475 */
476static inline int atomic_inc_unless_ge(spinlock_t *lock, atomic_t *v, int u)
477{
478 spin_lock(lock);
479 if (atomic_read(v) >= u) {
480 spin_unlock(lock);
481 return 0;
482 }
483 atomic_inc(v);
484 spin_unlock(lock);
485 return 1;
486}
487
Cliff Wickman50fb55a2010-06-02 16:22:02 -0500488/*
Cliff Wickmanf6d8a562010-06-02 16:22:02 -0500489 * Our retries are blocked by all destination swack resources being
490 * in use, and a timeout is pending. In that case hardware immediately
491 * returns the ERROR that looks like a destination timeout.
492 */
493static void
494destination_plugged(struct bau_desc *bau_desc, struct bau_control *bcp,
495 struct bau_control *hmaster, struct ptc_stats *stat)
496{
497 udelay(bcp->plugged_delay);
498 bcp->plugged_tries++;
499 if (bcp->plugged_tries >= bcp->plugsb4reset) {
500 bcp->plugged_tries = 0;
501 quiesce_local_uvhub(hmaster);
502 spin_lock(&hmaster->queue_lock);
503 uv_reset_with_ipi(&bau_desc->distribution, bcp->cpu);
504 spin_unlock(&hmaster->queue_lock);
505 end_uvhub_quiesce(hmaster);
506 bcp->ipi_attempts++;
507 stat->s_resets_plug++;
508 }
509}
510
511static void
512destination_timeout(struct bau_desc *bau_desc, struct bau_control *bcp,
513 struct bau_control *hmaster, struct ptc_stats *stat)
514{
515 hmaster->max_bau_concurrent = 1;
516 bcp->timeout_tries++;
517 if (bcp->timeout_tries >= bcp->timeoutsb4reset) {
518 bcp->timeout_tries = 0;
519 quiesce_local_uvhub(hmaster);
520 spin_lock(&hmaster->queue_lock);
521 uv_reset_with_ipi(&bau_desc->distribution, bcp->cpu);
522 spin_unlock(&hmaster->queue_lock);
523 end_uvhub_quiesce(hmaster);
524 bcp->ipi_attempts++;
525 stat->s_resets_timeout++;
526 }
527}
528
529/*
Cliff Wickman50fb55a2010-06-02 16:22:02 -0500530 * Completions are taking a very long time due to a congested numalink
531 * network.
532 */
533static void
534disable_for_congestion(struct bau_control *bcp, struct ptc_stats *stat)
535{
536 int tcpu;
537 struct bau_control *tbcp;
538
539 /* let only one cpu do this disabling */
540 spin_lock(&disable_lock);
541 if (!baudisabled && bcp->period_requests &&
542 ((bcp->period_time / bcp->period_requests) > congested_cycles)) {
543 /* it becomes this cpu's job to turn on the use of the
544 BAU again */
545 baudisabled = 1;
546 bcp->set_bau_off = 1;
547 bcp->set_bau_on_time = get_cycles() +
548 sec_2_cycles(bcp->congested_period);
549 stat->s_bau_disabled++;
550 for_each_present_cpu(tcpu) {
551 tbcp = &per_cpu(bau_control, tcpu);
552 tbcp->baudisabled = 1;
553 }
554 }
555 spin_unlock(&disable_lock);
556}
557
Cliff Wickmanb194b1202008-06-12 08:23:48 -0500558/**
559 * uv_flush_send_and_wait
560 *
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500561 * Send a broadcast and wait for it to complete.
Cliff Wickmanb194b1202008-06-12 08:23:48 -0500562 *
Cliff Wickmanf6d8a562010-06-02 16:22:02 -0500563 * The flush_mask contains the cpus the broadcast is to be sent to including
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500564 * cpus that are on the local uvhub.
Cliff Wickmanb194b1202008-06-12 08:23:48 -0500565 *
Cliff Wickman450a0072010-06-02 16:22:02 -0500566 * Returns 0 if all flushing represented in the mask was done.
567 * Returns 1 if it gives up entirely and the original cpu mask is to be
568 * returned to the kernel.
Cliff Wickmanb194b1202008-06-12 08:23:48 -0500569 */
Cliff Wickman450a0072010-06-02 16:22:02 -0500570int uv_flush_send_and_wait(struct bau_desc *bau_desc,
571 struct cpumask *flush_mask, struct bau_control *bcp)
Cliff Wickmanb194b1202008-06-12 08:23:48 -0500572{
Cliff Wickmanb194b1202008-06-12 08:23:48 -0500573 int right_shift;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500574 int completion_status = 0;
575 int seq_number = 0;
576 long try = 0;
577 int cpu = bcp->uvhub_cpu;
578 int this_cpu = bcp->cpu;
Cliff Wickmanb194b1202008-06-12 08:23:48 -0500579 unsigned long mmr_offset;
Ingo Molnarb4c286e2008-06-18 14:28:19 +0200580 unsigned long index;
Cliff Wickmanb194b1202008-06-12 08:23:48 -0500581 cycles_t time1;
582 cycles_t time2;
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -0500583 cycles_t elapsed;
Cliff Wickman712157a2010-06-02 16:22:02 -0500584 struct ptc_stats *stat = bcp->statp;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500585 struct bau_control *smaster = bcp->socket_master;
586 struct bau_control *hmaster = bcp->uvhub_master;
587
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500588 if (!atomic_inc_unless_ge(&hmaster->uvhub_lock,
589 &hmaster->active_descriptor_count,
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -0500590 hmaster->max_bau_concurrent)) {
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500591 stat->s_throttles++;
592 do {
593 cpu_relax();
594 } while (!atomic_inc_unless_ge(&hmaster->uvhub_lock,
595 &hmaster->active_descriptor_count,
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -0500596 hmaster->max_bau_concurrent));
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500597 }
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500598 while (hmaster->uvhub_quiesce)
599 cpu_relax();
Cliff Wickmanb194b1202008-06-12 08:23:48 -0500600
601 if (cpu < UV_CPUS_PER_ACT_STATUS) {
602 mmr_offset = UVH_LB_BAU_SB_ACTIVATION_STATUS_0;
603 right_shift = cpu * UV_ACT_STATUS_SIZE;
604 } else {
605 mmr_offset = UVH_LB_BAU_SB_ACTIVATION_STATUS_1;
606 right_shift =
607 ((cpu - UV_CPUS_PER_ACT_STATUS) * UV_ACT_STATUS_SIZE);
608 }
609 time1 = get_cycles();
610 do {
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500611 if (try == 0) {
Cliff Wickman7fba1bc2010-06-02 16:22:02 -0500612 bau_desc->header.msg_type = MSG_REGULAR;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500613 seq_number = bcp->message_number++;
614 } else {
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500615 bau_desc->header.msg_type = MSG_RETRY;
616 stat->s_retry_messages++;
617 }
618 bau_desc->header.sequence = seq_number;
Ingo Molnardc163a42008-06-18 14:15:43 +0200619 index = (1UL << UVH_LB_BAU_SB_ACTIVATION_CONTROL_PUSH_SHFT) |
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500620 bcp->uvhub_cpu;
621 bcp->send_message = get_cycles();
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500622 uv_write_local_mmr(UVH_LB_BAU_SB_ACTIVATION_CONTROL, index);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500623 try++;
624 completion_status = uv_wait_completion(bau_desc, mmr_offset,
625 right_shift, this_cpu, bcp, smaster, try);
626
627 if (completion_status == FLUSH_RETRY_PLUGGED) {
Cliff Wickmanf6d8a562010-06-02 16:22:02 -0500628 destination_plugged(bau_desc, bcp, hmaster, stat);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500629 } else if (completion_status == FLUSH_RETRY_TIMEOUT) {
Cliff Wickmanf6d8a562010-06-02 16:22:02 -0500630 destination_timeout(bau_desc, bcp, hmaster, stat);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500631 }
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -0500632 if (bcp->ipi_attempts >= bcp->ipi_reset_limit) {
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500633 bcp->ipi_attempts = 0;
634 completion_status = FLUSH_GIVEUP;
635 break;
636 }
637 cpu_relax();
638 } while ((completion_status == FLUSH_RETRY_PLUGGED) ||
639 (completion_status == FLUSH_RETRY_TIMEOUT));
640 time2 = get_cycles();
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -0500641 bcp->plugged_tries = 0;
642 bcp->timeout_tries = 0;
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -0500643 if ((completion_status == FLUSH_COMPLETE) &&
644 (bcp->conseccompletes > bcp->complete_threshold) &&
645 (hmaster->max_bau_concurrent <
646 hmaster->max_bau_concurrent_constant))
647 hmaster->max_bau_concurrent++;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500648 while (hmaster->uvhub_quiesce)
649 cpu_relax();
650 atomic_dec(&hmaster->active_descriptor_count);
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -0500651 if (time2 > time1) {
652 elapsed = time2 - time1;
653 stat->s_time += elapsed;
Cliff Wickman50fb55a2010-06-02 16:22:02 -0500654 if ((completion_status == FLUSH_COMPLETE) && (try == 1)) {
655 bcp->period_requests++;
656 bcp->period_time += elapsed;
657 if ((elapsed > congested_cycles) &&
658 (bcp->period_requests > bcp->congested_reps)) {
659 disable_for_congestion(bcp, stat);
660 }
661 }
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -0500662 } else
Cliff Wickman450a0072010-06-02 16:22:02 -0500663 stat->s_requestor--;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500664 if (completion_status == FLUSH_COMPLETE && try > 1)
665 stat->s_retriesok++;
666 else if (completion_status == FLUSH_GIVEUP) {
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500667 stat->s_giveup++;
Cliff Wickman450a0072010-06-02 16:22:02 -0500668 return 1;
Cliff Wickmanb194b1202008-06-12 08:23:48 -0500669 }
Cliff Wickman450a0072010-06-02 16:22:02 -0500670 return 0;
Cliff Wickmanb194b1202008-06-12 08:23:48 -0500671}
672
Cliff Wickman18129242008-06-02 08:56:14 -0500673/**
674 * uv_flush_tlb_others - globally purge translation cache of a virtual
675 * address or all TLB's
Tejun Heobdbcdd42009-01-21 17:26:06 +0900676 * @cpumask: mask of all cpu's in which the address is to be removed
Cliff Wickman18129242008-06-02 08:56:14 -0500677 * @mm: mm_struct containing virtual address range
678 * @va: virtual address to be removed (or TLB_FLUSH_ALL for all TLB's on cpu)
Tejun Heobdbcdd42009-01-21 17:26:06 +0900679 * @cpu: the current cpu
Cliff Wickman18129242008-06-02 08:56:14 -0500680 *
681 * This is the entry point for initiating any UV global TLB shootdown.
682 *
683 * Purges the translation caches of all specified processors of the given
684 * virtual address, or purges all TLB's on specified processors.
685 *
Tejun Heobdbcdd42009-01-21 17:26:06 +0900686 * The caller has derived the cpumask from the mm_struct. This function
687 * is called only if there are bits set in the mask. (e.g. flush_tlb_page())
Cliff Wickman18129242008-06-02 08:56:14 -0500688 *
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500689 * The cpumask is converted into a uvhubmask of the uvhubs containing
690 * those cpus.
Cliff Wickmanb194b1202008-06-12 08:23:48 -0500691 *
Tejun Heobdbcdd42009-01-21 17:26:06 +0900692 * Note that this function should be called with preemption disabled.
693 *
694 * Returns NULL if all remote flushing was done.
695 * Returns pointer to cpumask if some remote flushing remains to be
696 * done. The returned pointer is valid till preemption is re-enabled.
Cliff Wickman18129242008-06-02 08:56:14 -0500697 */
Tejun Heobdbcdd42009-01-21 17:26:06 +0900698const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
699 struct mm_struct *mm,
700 unsigned long va, unsigned int cpu)
Cliff Wickman18129242008-06-02 08:56:14 -0500701{
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500702 int tcpu;
703 int uvhub;
Cliff Wickmanb194b1202008-06-12 08:23:48 -0500704 int locals = 0;
Cliff Wickman450a0072010-06-02 16:22:02 -0500705 int remotes = 0;
706 int hubs = 0;
Ingo Molnardc163a42008-06-18 14:15:43 +0200707 struct bau_desc *bau_desc;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500708 struct cpumask *flush_mask;
709 struct ptc_stats *stat;
710 struct bau_control *bcp;
Cliff Wickman50fb55a2010-06-02 16:22:02 -0500711 struct bau_control *tbcp;
Cliff Wickman18129242008-06-02 08:56:14 -0500712
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -0500713 /* kernel was booted 'nobau' */
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500714 if (nobau)
715 return cpumask;
716
717 bcp = &per_cpu(bau_control, cpu);
Cliff Wickman712157a2010-06-02 16:22:02 -0500718 stat = bcp->statp;
Cliff Wickman50fb55a2010-06-02 16:22:02 -0500719
720 /* bau was disabled due to slow response */
721 if (bcp->baudisabled) {
722 /* the cpu that disabled it must re-enable it */
723 if (bcp->set_bau_off) {
724 if (get_cycles() >= bcp->set_bau_on_time) {
725 stat->s_bau_reenabled++;
726 baudisabled = 0;
727 for_each_present_cpu(tcpu) {
728 tbcp = &per_cpu(bau_control, tcpu);
729 tbcp->baudisabled = 0;
730 tbcp->period_requests = 0;
731 tbcp->period_time = 0;
732 }
733 }
734 }
735 return cpumask;
736 }
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -0500737
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500738 /*
739 * Each sending cpu has a per-cpu mask which it fills from the caller's
Cliff Wickman450a0072010-06-02 16:22:02 -0500740 * cpu mask. All cpus are converted to uvhubs and copied to the
741 * activation descriptor.
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500742 */
743 flush_mask = (struct cpumask *)per_cpu(uv_flush_tlb_mask, cpu);
Cliff Wickman450a0072010-06-02 16:22:02 -0500744 /* don't actually do a shootdown of the local cpu */
Tejun Heobdbcdd42009-01-21 17:26:06 +0900745 cpumask_andnot(flush_mask, cpumask, cpumask_of(cpu));
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500746 if (cpu_isset(cpu, *cpumask))
Cliff Wickman450a0072010-06-02 16:22:02 -0500747 stat->s_ntargself++;
Tejun Heobdbcdd42009-01-21 17:26:06 +0900748
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500749 bau_desc = bcp->descriptor_base;
750 bau_desc += UV_ITEMS_PER_DESCRIPTOR * bcp->uvhub_cpu;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500751 bau_uvhubs_clear(&bau_desc->distribution, UV_DISTRIBUTION_SIZE);
Cliff Wickman450a0072010-06-02 16:22:02 -0500752
753 /* cpu statistics */
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500754 for_each_cpu(tcpu, flush_mask) {
755 uvhub = uv_cpu_to_blade_id(tcpu);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500756 bau_uvhub_set(uvhub, &bau_desc->distribution);
Cliff Wickman450a0072010-06-02 16:22:02 -0500757 if (uvhub == bcp->uvhub)
758 locals++;
Cliff Wickmanb194b1202008-06-12 08:23:48 -0500759 else
Cliff Wickman450a0072010-06-02 16:22:02 -0500760 remotes++;
Cliff Wickmanb194b1202008-06-12 08:23:48 -0500761 }
Cliff Wickman450a0072010-06-02 16:22:02 -0500762 if ((locals + remotes) == 0)
763 return NULL;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500764 stat->s_requestor++;
Cliff Wickman450a0072010-06-02 16:22:02 -0500765 stat->s_ntargcpu += remotes + locals;
766 stat->s_ntargremotes += remotes;
767 stat->s_ntarglocals += locals;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500768 remotes = bau_uvhub_weight(&bau_desc->distribution);
Cliff Wickman450a0072010-06-02 16:22:02 -0500769
770 /* uvhub statistics */
771 hubs = bau_uvhub_weight(&bau_desc->distribution);
772 if (locals) {
773 stat->s_ntarglocaluvhub++;
774 stat->s_ntargremoteuvhub += (hubs - 1);
775 } else
776 stat->s_ntargremoteuvhub += hubs;
777 stat->s_ntarguvhub += hubs;
778 if (hubs >= 16)
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500779 stat->s_ntarguvhub16++;
Cliff Wickman450a0072010-06-02 16:22:02 -0500780 else if (hubs >= 8)
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500781 stat->s_ntarguvhub8++;
Cliff Wickman450a0072010-06-02 16:22:02 -0500782 else if (hubs >= 4)
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500783 stat->s_ntarguvhub4++;
Cliff Wickman450a0072010-06-02 16:22:02 -0500784 else if (hubs >= 2)
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500785 stat->s_ntarguvhub2++;
786 else
787 stat->s_ntarguvhub1++;
Cliff Wickman18129242008-06-02 08:56:14 -0500788
789 bau_desc->payload.address = va;
Tejun Heobdbcdd42009-01-21 17:26:06 +0900790 bau_desc->payload.sending_cpu = cpu;
Cliff Wickman18129242008-06-02 08:56:14 -0500791
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500792 /*
Cliff Wickman450a0072010-06-02 16:22:02 -0500793 * uv_flush_send_and_wait returns 0 if all cpu's were messaged,
794 * or 1 if it gave up and the original cpumask should be returned.
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500795 */
Cliff Wickman450a0072010-06-02 16:22:02 -0500796 if (!uv_flush_send_and_wait(bau_desc, flush_mask, bcp))
797 return NULL;
798 else
799 return cpumask;
Cliff Wickman18129242008-06-02 08:56:14 -0500800}
801
802/*
803 * The BAU message interrupt comes here. (registered by set_intr_gate)
804 * See entry_64.S
805 *
806 * We received a broadcast assist message.
807 *
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500808 * Interrupts are disabled; this interrupt could represent
Cliff Wickman18129242008-06-02 08:56:14 -0500809 * the receipt of several messages.
810 *
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500811 * All cores/threads on this hub get this interrupt.
812 * The last one to see it does the software ack.
Cliff Wickman18129242008-06-02 08:56:14 -0500813 * (the resource will not be freed until noninterruptable cpus see this
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500814 * interrupt; hardware may timeout the s/w ack and reply ERROR)
Cliff Wickman18129242008-06-02 08:56:14 -0500815 */
Cliff Wickmanb194b1202008-06-12 08:23:48 -0500816void uv_bau_message_interrupt(struct pt_regs *regs)
Cliff Wickman18129242008-06-02 08:56:14 -0500817{
Cliff Wickman18129242008-06-02 08:56:14 -0500818 int count = 0;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500819 cycles_t time_start;
820 struct bau_payload_queue_entry *msg;
821 struct bau_control *bcp;
822 struct ptc_stats *stat;
823 struct msg_desc msgdesc;
Cliff Wickman18129242008-06-02 08:56:14 -0500824
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500825 time_start = get_cycles();
826 bcp = &per_cpu(bau_control, smp_processor_id());
Cliff Wickman712157a2010-06-02 16:22:02 -0500827 stat = bcp->statp;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500828 msgdesc.va_queue_first = bcp->va_queue_first;
829 msgdesc.va_queue_last = bcp->va_queue_last;
830 msg = bcp->bau_msg_head;
Cliff Wickman18129242008-06-02 08:56:14 -0500831 while (msg->sw_ack_vector) {
832 count++;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500833 msgdesc.msg_slot = msg - msgdesc.va_queue_first;
834 msgdesc.sw_ack_slot = ffs(msg->sw_ack_vector) - 1;
835 msgdesc.msg = msg;
836 uv_bau_process_message(&msgdesc, bcp);
Cliff Wickman18129242008-06-02 08:56:14 -0500837 msg++;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500838 if (msg > msgdesc.va_queue_last)
839 msg = msgdesc.va_queue_first;
840 bcp->bau_msg_head = msg;
Cliff Wickman18129242008-06-02 08:56:14 -0500841 }
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500842 stat->d_time += (get_cycles() - time_start);
Cliff Wickman18129242008-06-02 08:56:14 -0500843 if (!count)
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500844 stat->d_nomsg++;
Cliff Wickman18129242008-06-02 08:56:14 -0500845 else if (count > 1)
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500846 stat->d_multmsg++;
847 ack_APIC_irq();
Cliff Wickman18129242008-06-02 08:56:14 -0500848}
849
Cliff Wickmanc4c46882009-04-03 08:34:32 -0500850/*
851 * uv_enable_timeouts
852 *
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500853 * Each target uvhub (i.e. a uvhub that has no cpu's) needs to have
Cliff Wickmanc4c46882009-04-03 08:34:32 -0500854 * shootdown message timeouts enabled. The timeout does not cause
855 * an interrupt, but causes an error message to be returned to
856 * the sender.
857 */
Cliff Wickmanb194b1202008-06-12 08:23:48 -0500858static void uv_enable_timeouts(void)
Cliff Wickman18129242008-06-02 08:56:14 -0500859{
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500860 int uvhub;
861 int nuvhubs;
Cliff Wickman18129242008-06-02 08:56:14 -0500862 int pnode;
Cliff Wickmanc4c46882009-04-03 08:34:32 -0500863 unsigned long mmr_image;
Cliff Wickman18129242008-06-02 08:56:14 -0500864
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500865 nuvhubs = uv_num_possible_blades();
Cliff Wickmanc4c46882009-04-03 08:34:32 -0500866
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500867 for (uvhub = 0; uvhub < nuvhubs; uvhub++) {
868 if (!uv_blade_nr_possible_cpus(uvhub))
Cliff Wickman18129242008-06-02 08:56:14 -0500869 continue;
Cliff Wickmanc4c46882009-04-03 08:34:32 -0500870
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500871 pnode = uv_blade_to_pnode(uvhub);
Cliff Wickmanc4c46882009-04-03 08:34:32 -0500872 mmr_image =
873 uv_read_global_mmr64(pnode, UVH_LB_BAU_MISC_CONTROL);
874 /*
875 * Set the timeout period and then lock it in, in three
876 * steps; captures and locks in the period.
877 *
878 * To program the period, the SOFT_ACK_MODE must be off.
879 */
880 mmr_image &= ~((unsigned long)1 <<
Jack Steiner6f4edd62010-03-10 14:44:58 -0600881 UVH_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_SHFT);
Cliff Wickmanc4c46882009-04-03 08:34:32 -0500882 uv_write_global_mmr64
883 (pnode, UVH_LB_BAU_MISC_CONTROL, mmr_image);
884 /*
885 * Set the 4-bit period.
886 */
887 mmr_image &= ~((unsigned long)0xf <<
Jack Steiner6f4edd62010-03-10 14:44:58 -0600888 UVH_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHFT);
Cliff Wickmanc4c46882009-04-03 08:34:32 -0500889 mmr_image |= (UV_INTD_SOFT_ACK_TIMEOUT_PERIOD <<
Jack Steiner6f4edd62010-03-10 14:44:58 -0600890 UVH_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHFT);
Cliff Wickmanc4c46882009-04-03 08:34:32 -0500891 uv_write_global_mmr64
892 (pnode, UVH_LB_BAU_MISC_CONTROL, mmr_image);
893 /*
894 * Subsequent reversals of the timebase bit (3) cause an
895 * immediate timeout of one or all INTD resources as
896 * indicated in bits 2:0 (7 causes all of them to timeout).
897 */
898 mmr_image |= ((unsigned long)1 <<
Jack Steiner6f4edd62010-03-10 14:44:58 -0600899 UVH_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_SHFT);
Cliff Wickmanc4c46882009-04-03 08:34:32 -0500900 uv_write_global_mmr64
901 (pnode, UVH_LB_BAU_MISC_CONTROL, mmr_image);
Cliff Wickman18129242008-06-02 08:56:14 -0500902 }
Cliff Wickman18129242008-06-02 08:56:14 -0500903}
904
Cliff Wickmanb194b1202008-06-12 08:23:48 -0500905static void *uv_ptc_seq_start(struct seq_file *file, loff_t *offset)
Cliff Wickman18129242008-06-02 08:56:14 -0500906{
907 if (*offset < num_possible_cpus())
908 return offset;
909 return NULL;
910}
911
Cliff Wickmanb194b1202008-06-12 08:23:48 -0500912static void *uv_ptc_seq_next(struct seq_file *file, void *data, loff_t *offset)
Cliff Wickman18129242008-06-02 08:56:14 -0500913{
914 (*offset)++;
915 if (*offset < num_possible_cpus())
916 return offset;
917 return NULL;
918}
919
Cliff Wickmanb194b1202008-06-12 08:23:48 -0500920static void uv_ptc_seq_stop(struct seq_file *file, void *data)
Cliff Wickman18129242008-06-02 08:56:14 -0500921{
922}
923
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500924static inline unsigned long long
Cliff Wickman12a66112010-06-02 16:22:01 -0500925microsec_2_cycles(unsigned long microsec)
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500926{
927 unsigned long ns;
928 unsigned long long cyc;
929
Cliff Wickman12a66112010-06-02 16:22:01 -0500930 ns = microsec * 1000;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500931 cyc = (ns << CYC2NS_SCALE_FACTOR)/(per_cpu(cyc2ns, smp_processor_id()));
932 return cyc;
933}
934
Cliff Wickman18129242008-06-02 08:56:14 -0500935/*
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500936 * Display the statistics thru /proc.
937 * 'data' points to the cpu number
Cliff Wickman18129242008-06-02 08:56:14 -0500938 */
Cliff Wickmanb194b1202008-06-12 08:23:48 -0500939static int uv_ptc_seq_show(struct seq_file *file, void *data)
Cliff Wickman18129242008-06-02 08:56:14 -0500940{
941 struct ptc_stats *stat;
942 int cpu;
943
944 cpu = *(loff_t *)data;
945
946 if (!cpu) {
947 seq_printf(file,
Cliff Wickman450a0072010-06-02 16:22:02 -0500948 "# cpu sent stime self locals remotes ncpus localhub ");
Cliff Wickman18129242008-06-02 08:56:14 -0500949 seq_printf(file,
Cliff Wickman450a0072010-06-02 16:22:02 -0500950 "remotehub numuvhubs numuvhubs16 numuvhubs8 ");
951 seq_printf(file,
952 "numuvhubs4 numuvhubs2 numuvhubs1 dto ");
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500953 seq_printf(file,
954 "retries rok resetp resett giveup sto bz throt ");
955 seq_printf(file,
956 "sw_ack recv rtime all ");
957 seq_printf(file,
Cliff Wickman50fb55a2010-06-02 16:22:02 -0500958 "one mult none retry canc nocan reset rcan ");
959 seq_printf(file,
960 "disable enable\n");
Cliff Wickman18129242008-06-02 08:56:14 -0500961 }
962 if (cpu < num_possible_cpus() && cpu_online(cpu)) {
963 stat = &per_cpu(ptcstats, cpu);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500964 /* source side statistics */
965 seq_printf(file,
966 "cpu %d %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld ",
967 cpu, stat->s_requestor, cycles_2_us(stat->s_time),
Cliff Wickman450a0072010-06-02 16:22:02 -0500968 stat->s_ntargself, stat->s_ntarglocals,
969 stat->s_ntargremotes, stat->s_ntargcpu,
970 stat->s_ntarglocaluvhub, stat->s_ntargremoteuvhub,
971 stat->s_ntarguvhub, stat->s_ntarguvhub16);
972 seq_printf(file, "%ld %ld %ld %ld %ld ",
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500973 stat->s_ntarguvhub8, stat->s_ntarguvhub4,
974 stat->s_ntarguvhub2, stat->s_ntarguvhub1,
Cliff Wickman450a0072010-06-02 16:22:02 -0500975 stat->s_dtimeout);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500976 seq_printf(file, "%ld %ld %ld %ld %ld %ld %ld %ld ",
977 stat->s_retry_messages, stat->s_retriesok,
978 stat->s_resets_plug, stat->s_resets_timeout,
979 stat->s_giveup, stat->s_stimeout,
980 stat->s_busy, stat->s_throttles);
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -0500981
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500982 /* destination side statistics */
983 seq_printf(file,
Cliff Wickman50fb55a2010-06-02 16:22:02 -0500984 "%lx %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld ",
Cliff Wickman9674f352009-04-03 08:34:05 -0500985 uv_read_global_mmr64(uv_cpu_to_pnode(cpu),
Cliff Wickman18129242008-06-02 08:56:14 -0500986 UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE),
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500987 stat->d_requestee, cycles_2_us(stat->d_time),
988 stat->d_alltlb, stat->d_onetlb, stat->d_multmsg,
989 stat->d_nomsg, stat->d_retries, stat->d_canceled,
990 stat->d_nocanceled, stat->d_resets,
991 stat->d_rcanceled);
Cliff Wickman50fb55a2010-06-02 16:22:02 -0500992 seq_printf(file, "%ld %ld\n",
993 stat->s_bau_disabled, stat->s_bau_reenabled);
Cliff Wickman18129242008-06-02 08:56:14 -0500994 }
995
996 return 0;
997}
998
999/*
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001000 * Display the tunables thru debugfs
1001 */
1002static ssize_t tunables_read(struct file *file, char __user *userbuf,
1003 size_t count, loff_t *ppos)
1004{
Dan Carpenterb365a852010-09-29 10:41:05 +02001005 char *buf;
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001006 int ret;
1007
Dan Carpenterb365a852010-09-29 10:41:05 +02001008 buf = kasprintf(GFP_KERNEL, "%s %s %s\n%d %d %d %d %d %d %d %d %d\n",
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001009 "max_bau_concurrent plugged_delay plugsb4reset",
1010 "timeoutsb4reset ipi_reset_limit complete_threshold",
1011 "congested_response_us congested_reps congested_period",
1012 max_bau_concurrent, plugged_delay, plugsb4reset,
1013 timeoutsb4reset, ipi_reset_limit, complete_threshold,
1014 congested_response_us, congested_reps, congested_period);
1015
Dan Carpenterb365a852010-09-29 10:41:05 +02001016 if (!buf)
1017 return -ENOMEM;
1018
1019 ret = simple_read_from_buffer(userbuf, count, ppos, buf, strlen(buf));
1020 kfree(buf);
1021 return ret;
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001022}
1023
1024/*
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001025 * -1: resetf the statistics
Cliff Wickman18129242008-06-02 08:56:14 -05001026 * 0: display meaning of the statistics
Cliff Wickman18129242008-06-02 08:56:14 -05001027 */
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001028static ssize_t uv_ptc_proc_write(struct file *file, const char __user *user,
Ingo Molnarb4c286e2008-06-18 14:28:19 +02001029 size_t count, loff_t *data)
Cliff Wickman18129242008-06-02 08:56:14 -05001030{
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001031 int cpu;
1032 long input_arg;
Cliff Wickman18129242008-06-02 08:56:14 -05001033 char optstr[64];
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001034 struct ptc_stats *stat;
Cliff Wickman18129242008-06-02 08:56:14 -05001035
Cliff Wickmane7eb8722008-06-23 08:32:25 -05001036 if (count == 0 || count > sizeof(optstr))
Cliff Wickmancef53272008-06-19 11:16:24 -05001037 return -EINVAL;
Cliff Wickman18129242008-06-02 08:56:14 -05001038 if (copy_from_user(optstr, user, count))
1039 return -EFAULT;
1040 optstr[count - 1] = '\0';
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001041 if (strict_strtol(optstr, 10, &input_arg) < 0) {
Cliff Wickman18129242008-06-02 08:56:14 -05001042 printk(KERN_DEBUG "%s is invalid\n", optstr);
1043 return -EINVAL;
1044 }
1045
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001046 if (input_arg == 0) {
Cliff Wickman18129242008-06-02 08:56:14 -05001047 printk(KERN_DEBUG "# cpu: cpu number\n");
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001048 printk(KERN_DEBUG "Sender statistics:\n");
Cliff Wickman18129242008-06-02 08:56:14 -05001049 printk(KERN_DEBUG
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001050 "sent: number of shootdown messages sent\n");
Cliff Wickman18129242008-06-02 08:56:14 -05001051 printk(KERN_DEBUG
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001052 "stime: time spent sending messages\n");
Cliff Wickman18129242008-06-02 08:56:14 -05001053 printk(KERN_DEBUG
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001054 "numuvhubs: number of hubs targeted with shootdown\n");
Cliff Wickman18129242008-06-02 08:56:14 -05001055 printk(KERN_DEBUG
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001056 "numuvhubs16: number times 16 or more hubs targeted\n");
Cliff Wickman18129242008-06-02 08:56:14 -05001057 printk(KERN_DEBUG
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001058 "numuvhubs8: number times 8 or more hubs targeted\n");
Cliff Wickman18129242008-06-02 08:56:14 -05001059 printk(KERN_DEBUG
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001060 "numuvhubs4: number times 4 or more hubs targeted\n");
Cliff Wickman18129242008-06-02 08:56:14 -05001061 printk(KERN_DEBUG
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001062 "numuvhubs2: number times 2 or more hubs targeted\n");
Cliff Wickman18129242008-06-02 08:56:14 -05001063 printk(KERN_DEBUG
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001064 "numuvhubs1: number times 1 hub targeted\n");
Cliff Wickman18129242008-06-02 08:56:14 -05001065 printk(KERN_DEBUG
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001066 "numcpus: number of cpus targeted with shootdown\n");
Cliff Wickman18129242008-06-02 08:56:14 -05001067 printk(KERN_DEBUG
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001068 "dto: number of destination timeouts\n");
Cliff Wickman18129242008-06-02 08:56:14 -05001069 printk(KERN_DEBUG
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001070 "retries: destination timeout retries sent\n");
1071 printk(KERN_DEBUG
1072 "rok: : destination timeouts successfully retried\n");
1073 printk(KERN_DEBUG
1074 "resetp: ipi-style resource resets for plugs\n");
1075 printk(KERN_DEBUG
1076 "resett: ipi-style resource resets for timeouts\n");
1077 printk(KERN_DEBUG
1078 "giveup: fall-backs to ipi-style shootdowns\n");
1079 printk(KERN_DEBUG
1080 "sto: number of source timeouts\n");
1081 printk(KERN_DEBUG
1082 "bz: number of stay-busy's\n");
1083 printk(KERN_DEBUG
1084 "throt: number times spun in throttle\n");
1085 printk(KERN_DEBUG "Destination side statistics:\n");
1086 printk(KERN_DEBUG
1087 "sw_ack: image of UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE\n");
1088 printk(KERN_DEBUG
1089 "recv: shootdown messages received\n");
1090 printk(KERN_DEBUG
1091 "rtime: time spent processing messages\n");
1092 printk(KERN_DEBUG
1093 "all: shootdown all-tlb messages\n");
1094 printk(KERN_DEBUG
1095 "one: shootdown one-tlb messages\n");
1096 printk(KERN_DEBUG
1097 "mult: interrupts that found multiple messages\n");
1098 printk(KERN_DEBUG
1099 "none: interrupts that found no messages\n");
1100 printk(KERN_DEBUG
1101 "retry: number of retry messages processed\n");
1102 printk(KERN_DEBUG
1103 "canc: number messages canceled by retries\n");
1104 printk(KERN_DEBUG
1105 "nocan: number retries that found nothing to cancel\n");
1106 printk(KERN_DEBUG
1107 "reset: number of ipi-style reset requests processed\n");
1108 printk(KERN_DEBUG
1109 "rcan: number messages canceled by reset requests\n");
Cliff Wickman50fb55a2010-06-02 16:22:02 -05001110 printk(KERN_DEBUG
1111 "disable: number times use of the BAU was disabled\n");
1112 printk(KERN_DEBUG
1113 "enable: number times use of the BAU was re-enabled\n");
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001114 } else if (input_arg == -1) {
1115 for_each_present_cpu(cpu) {
1116 stat = &per_cpu(ptcstats, cpu);
1117 memset(stat, 0, sizeof(struct ptc_stats));
1118 }
Cliff Wickman18129242008-06-02 08:56:14 -05001119 }
1120
1121 return count;
1122}
1123
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001124static int local_atoi(const char *name)
1125{
1126 int val = 0;
1127
1128 for (;; name++) {
1129 switch (*name) {
1130 case '0' ... '9':
1131 val = 10*val+(*name-'0');
1132 break;
1133 default:
1134 return val;
1135 }
1136 }
1137}
1138
1139/*
1140 * set the tunables
1141 * 0 values reset them to defaults
1142 */
1143static ssize_t tunables_write(struct file *file, const char __user *user,
1144 size_t count, loff_t *data)
1145{
1146 int cpu;
1147 int cnt = 0;
1148 int val;
1149 char *p;
1150 char *q;
1151 char instr[64];
1152 struct bau_control *bcp;
1153
1154 if (count == 0 || count > sizeof(instr)-1)
1155 return -EINVAL;
1156 if (copy_from_user(instr, user, count))
1157 return -EFAULT;
1158
1159 instr[count] = '\0';
1160 /* count the fields */
1161 p = instr + strspn(instr, WHITESPACE);
1162 q = p;
1163 for (; *p; p = q + strspn(q, WHITESPACE)) {
1164 q = p + strcspn(p, WHITESPACE);
1165 cnt++;
1166 if (q == p)
1167 break;
1168 }
1169 if (cnt != 9) {
1170 printk(KERN_INFO "bau tunable error: should be 9 numbers\n");
1171 return -EINVAL;
1172 }
1173
1174 p = instr + strspn(instr, WHITESPACE);
1175 q = p;
1176 for (cnt = 0; *p; p = q + strspn(q, WHITESPACE), cnt++) {
1177 q = p + strcspn(p, WHITESPACE);
1178 val = local_atoi(p);
1179 switch (cnt) {
1180 case 0:
1181 if (val == 0) {
1182 max_bau_concurrent = MAX_BAU_CONCURRENT;
1183 max_bau_concurrent_constant =
1184 MAX_BAU_CONCURRENT;
1185 continue;
1186 }
1187 bcp = &per_cpu(bau_control, smp_processor_id());
1188 if (val < 1 || val > bcp->cpus_in_uvhub) {
1189 printk(KERN_DEBUG
1190 "Error: BAU max concurrent %d is invalid\n",
1191 val);
1192 return -EINVAL;
1193 }
1194 max_bau_concurrent = val;
1195 max_bau_concurrent_constant = val;
1196 continue;
1197 case 1:
1198 if (val == 0)
1199 plugged_delay = PLUGGED_DELAY;
1200 else
1201 plugged_delay = val;
1202 continue;
1203 case 2:
1204 if (val == 0)
1205 plugsb4reset = PLUGSB4RESET;
1206 else
1207 plugsb4reset = val;
1208 continue;
1209 case 3:
1210 if (val == 0)
1211 timeoutsb4reset = TIMEOUTSB4RESET;
1212 else
1213 timeoutsb4reset = val;
1214 continue;
1215 case 4:
1216 if (val == 0)
1217 ipi_reset_limit = IPI_RESET_LIMIT;
1218 else
1219 ipi_reset_limit = val;
1220 continue;
1221 case 5:
1222 if (val == 0)
1223 complete_threshold = COMPLETE_THRESHOLD;
1224 else
1225 complete_threshold = val;
1226 continue;
1227 case 6:
1228 if (val == 0)
1229 congested_response_us = CONGESTED_RESPONSE_US;
1230 else
1231 congested_response_us = val;
1232 continue;
1233 case 7:
1234 if (val == 0)
1235 congested_reps = CONGESTED_REPS;
1236 else
1237 congested_reps = val;
1238 continue;
1239 case 8:
1240 if (val == 0)
1241 congested_period = CONGESTED_PERIOD;
1242 else
1243 congested_period = val;
1244 continue;
1245 }
1246 if (q == p)
1247 break;
1248 }
1249 for_each_present_cpu(cpu) {
1250 bcp = &per_cpu(bau_control, cpu);
1251 bcp->max_bau_concurrent = max_bau_concurrent;
1252 bcp->max_bau_concurrent_constant = max_bau_concurrent;
1253 bcp->plugged_delay = plugged_delay;
1254 bcp->plugsb4reset = plugsb4reset;
1255 bcp->timeoutsb4reset = timeoutsb4reset;
1256 bcp->ipi_reset_limit = ipi_reset_limit;
1257 bcp->complete_threshold = complete_threshold;
1258 bcp->congested_response_us = congested_response_us;
1259 bcp->congested_reps = congested_reps;
1260 bcp->congested_period = congested_period;
1261 }
1262 return count;
1263}
1264
Cliff Wickman18129242008-06-02 08:56:14 -05001265static const struct seq_operations uv_ptc_seq_ops = {
Ingo Molnardc163a42008-06-18 14:15:43 +02001266 .start = uv_ptc_seq_start,
1267 .next = uv_ptc_seq_next,
1268 .stop = uv_ptc_seq_stop,
1269 .show = uv_ptc_seq_show
Cliff Wickman18129242008-06-02 08:56:14 -05001270};
1271
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001272static int uv_ptc_proc_open(struct inode *inode, struct file *file)
Cliff Wickman18129242008-06-02 08:56:14 -05001273{
1274 return seq_open(file, &uv_ptc_seq_ops);
1275}
1276
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001277static int tunables_open(struct inode *inode, struct file *file)
1278{
1279 return 0;
1280}
1281
Cliff Wickman18129242008-06-02 08:56:14 -05001282static const struct file_operations proc_uv_ptc_operations = {
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001283 .open = uv_ptc_proc_open,
1284 .read = seq_read,
1285 .write = uv_ptc_proc_write,
1286 .llseek = seq_lseek,
1287 .release = seq_release,
Cliff Wickman18129242008-06-02 08:56:14 -05001288};
1289
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001290static const struct file_operations tunables_fops = {
1291 .open = tunables_open,
1292 .read = tunables_read,
1293 .write = tunables_write,
Arnd Bergmann6038f372010-08-15 18:52:59 +02001294 .llseek = default_llseek,
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001295};
1296
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001297static int __init uv_ptc_init(void)
Cliff Wickman18129242008-06-02 08:56:14 -05001298{
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001299 struct proc_dir_entry *proc_uv_ptc;
Cliff Wickman18129242008-06-02 08:56:14 -05001300
1301 if (!is_uv_system())
1302 return 0;
1303
Alexey Dobriyan10f02d112009-08-23 23:17:27 +04001304 proc_uv_ptc = proc_create(UV_PTC_BASENAME, 0444, NULL,
1305 &proc_uv_ptc_operations);
Cliff Wickman18129242008-06-02 08:56:14 -05001306 if (!proc_uv_ptc) {
1307 printk(KERN_ERR "unable to create %s proc entry\n",
1308 UV_PTC_BASENAME);
1309 return -EINVAL;
1310 }
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001311
1312 tunables_dir = debugfs_create_dir(UV_BAU_TUNABLES_DIR, NULL);
1313 if (!tunables_dir) {
1314 printk(KERN_ERR "unable to create debugfs directory %s\n",
1315 UV_BAU_TUNABLES_DIR);
1316 return -EINVAL;
1317 }
1318 tunables_file = debugfs_create_file(UV_BAU_TUNABLES_FILE, 0600,
1319 tunables_dir, NULL, &tunables_fops);
1320 if (!tunables_file) {
1321 printk(KERN_ERR "unable to create debugfs file %s\n",
1322 UV_BAU_TUNABLES_FILE);
1323 return -EINVAL;
1324 }
Cliff Wickman18129242008-06-02 08:56:14 -05001325 return 0;
1326}
1327
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001328/*
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001329 * initialize the sending side's sending buffers
1330 */
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001331static void
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001332uv_activation_descriptor_init(int node, int pnode)
1333{
1334 int i;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001335 int cpu;
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001336 unsigned long pa;
1337 unsigned long m;
1338 unsigned long n;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001339 struct bau_desc *bau_desc;
1340 struct bau_desc *bd2;
1341 struct bau_control *bcp;
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001342
Cliff Wickman0e2595c2009-05-20 08:10:57 -05001343 /*
1344 * each bau_desc is 64 bytes; there are 8 (UV_ITEMS_PER_DESCRIPTOR)
Cliff Wickmancfa60912011-01-03 12:03:53 -06001345 * per cpu; and one per cpu on the uvhub (UV_ADP_SIZE)
Cliff Wickman0e2595c2009-05-20 08:10:57 -05001346 */
Jesper Juhl8e5e9522010-11-09 00:08:11 +01001347 bau_desc = kmalloc_node(sizeof(struct bau_desc) * UV_ADP_SIZE
1348 * UV_ITEMS_PER_DESCRIPTOR, GFP_KERNEL, node);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001349 BUG_ON(!bau_desc);
Ingo Molnarb4c286e2008-06-18 14:28:19 +02001350
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001351 pa = uv_gpa(bau_desc); /* need the real nasid*/
1352 n = pa >> uv_nshift;
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001353 m = pa & uv_mmask;
Ingo Molnarb4c286e2008-06-18 14:28:19 +02001354
Cliff Wickman9c26f522009-06-24 09:41:59 -05001355 uv_write_global_mmr64(pnode, UVH_LB_BAU_SB_DESCRIPTOR_BASE,
1356 (n << UV_DESC_BASE_PNODE_SHIFT | m));
Ingo Molnarb4c286e2008-06-18 14:28:19 +02001357
Cliff Wickman0e2595c2009-05-20 08:10:57 -05001358 /*
1359 * initializing all 8 (UV_ITEMS_PER_DESCRIPTOR) descriptors for each
1360 * cpu even though we only use the first one; one descriptor can
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001361 * describe a broadcast to 256 uv hubs.
Cliff Wickman0e2595c2009-05-20 08:10:57 -05001362 */
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001363 for (i = 0, bd2 = bau_desc; i < (UV_ADP_SIZE*UV_ITEMS_PER_DESCRIPTOR);
1364 i++, bd2++) {
1365 memset(bd2, 0, sizeof(struct bau_desc));
1366 bd2->header.sw_ack_flag = 1;
Cliff Wickman94ca8e42009-04-14 10:56:48 -05001367 /*
Cliff Wickman54712622011-03-09 08:15:57 -06001368 * base_dest_nodeid is the nasid of the first uvhub
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001369 * in the partition. The bit map will indicate uvhub numbers,
1370 * which are 0-N in a partition. Pnodes are unique system-wide.
Cliff Wickman94ca8e42009-04-14 10:56:48 -05001371 */
Cliff Wickman54712622011-03-09 08:15:57 -06001372 bd2->header.base_dest_nodeid = UV_PNODE_TO_NASID(uv_partition_base_pnode);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001373 bd2->header.dest_subnodeid = 0x10; /* the LB */
1374 bd2->header.command = UV_NET_ENDPOINT_INTD;
1375 bd2->header.int_both = 1;
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001376 /*
1377 * all others need to be set to zero:
1378 * fairness chaining multilevel count replied_to
1379 */
1380 }
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001381 for_each_present_cpu(cpu) {
1382 if (pnode != uv_blade_to_pnode(uv_cpu_to_blade_id(cpu)))
1383 continue;
1384 bcp = &per_cpu(bau_control, cpu);
1385 bcp->descriptor_base = bau_desc;
1386 }
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001387}
1388
1389/*
1390 * initialize the destination side's receiving buffers
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001391 * entered for each uvhub in the partition
1392 * - node is first node (kernel memory notion) on the uvhub
1393 * - pnode is the uvhub's physical identifier
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001394 */
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001395static void
1396uv_payload_queue_init(int node, int pnode)
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001397{
Cliff Wickman4ea3c512009-04-16 07:53:09 -05001398 int pn;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001399 int cpu;
Ingo Molnarb4c286e2008-06-18 14:28:19 +02001400 char *cp;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001401 unsigned long pa;
1402 struct bau_payload_queue_entry *pqp;
1403 struct bau_payload_queue_entry *pqp_malloc;
1404 struct bau_control *bcp;
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001405
Jesper Juhl8e5e9522010-11-09 00:08:11 +01001406 pqp = kmalloc_node((DEST_Q_SIZE + 1)
1407 * sizeof(struct bau_payload_queue_entry),
1408 GFP_KERNEL, node);
Ingo Molnardc163a42008-06-18 14:15:43 +02001409 BUG_ON(!pqp);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001410 pqp_malloc = pqp;
Ingo Molnarb4c286e2008-06-18 14:28:19 +02001411
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001412 cp = (char *)pqp + 31;
1413 pqp = (struct bau_payload_queue_entry *)(((unsigned long)cp >> 5) << 5);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001414
1415 for_each_present_cpu(cpu) {
1416 if (pnode != uv_cpu_to_pnode(cpu))
1417 continue;
1418 /* for every cpu on this pnode: */
1419 bcp = &per_cpu(bau_control, cpu);
1420 bcp->va_queue_first = pqp;
1421 bcp->bau_msg_head = pqp;
1422 bcp->va_queue_last = pqp + (DEST_Q_SIZE - 1);
1423 }
Cliff Wickman4ea3c512009-04-16 07:53:09 -05001424 /*
1425 * need the pnode of where the memory was really allocated
1426 */
1427 pa = uv_gpa(pqp);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001428 pn = pa >> uv_nshift;
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001429 uv_write_global_mmr64(pnode,
1430 UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST,
Cliff Wickman4ea3c512009-04-16 07:53:09 -05001431 ((unsigned long)pn << UV_PAYLOADQ_PNODE_SHIFT) |
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001432 uv_physnodeaddr(pqp));
1433 uv_write_global_mmr64(pnode, UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL,
1434 uv_physnodeaddr(pqp));
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001435 uv_write_global_mmr64(pnode, UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST,
1436 (unsigned long)
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001437 uv_physnodeaddr(pqp + (DEST_Q_SIZE - 1)));
1438 /* in effect, all msg_type's are set to MSG_NOOP */
Ingo Molnardc163a42008-06-18 14:15:43 +02001439 memset(pqp, 0, sizeof(struct bau_payload_queue_entry) * DEST_Q_SIZE);
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001440}
1441
1442/*
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001443 * Initialization of each UV hub's structures
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001444 */
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001445static void __init uv_init_uvhub(int uvhub, int vector)
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001446{
Cliff Wickman9674f352009-04-03 08:34:05 -05001447 int node;
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001448 int pnode;
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001449 unsigned long apicid;
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001450
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001451 node = uvhub_to_first_node(uvhub);
1452 pnode = uv_blade_to_pnode(uvhub);
1453 uv_activation_descriptor_init(node, pnode);
1454 uv_payload_queue_init(node, pnode);
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001455 /*
1456 * the below initialization can't be in firmware because the
1457 * messaging IRQ will be determined by the OS
1458 */
Dimitri Sivanich8191c9f2010-11-16 16:23:52 -06001459 apicid = uvhub_to_first_apicid(uvhub) | uv_apicid_hibits;
Cliff Wickmane38e2af2009-11-19 17:12:43 -06001460 uv_write_global_mmr64(pnode, UVH_BAU_DATA_CONFIG,
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001461 ((apicid << 32) | vector));
1462}
1463
1464/*
Cliff Wickman12a66112010-06-02 16:22:01 -05001465 * We will set BAU_MISC_CONTROL with a timeout period.
1466 * But the BIOS has set UVH_AGING_PRESCALE_SEL and UVH_TRANSACTION_TIMEOUT.
1467 * So the destination timeout period has be be calculated from them.
1468 */
1469static int
1470calculate_destination_timeout(void)
1471{
1472 unsigned long mmr_image;
1473 int mult1;
1474 int mult2;
1475 int index;
1476 int base;
1477 int ret;
1478 unsigned long ts_ns;
1479
1480 mult1 = UV_INTD_SOFT_ACK_TIMEOUT_PERIOD & BAU_MISC_CONTROL_MULT_MASK;
1481 mmr_image = uv_read_local_mmr(UVH_AGING_PRESCALE_SEL);
1482 index = (mmr_image >> BAU_URGENCY_7_SHIFT) & BAU_URGENCY_7_MASK;
1483 mmr_image = uv_read_local_mmr(UVH_TRANSACTION_TIMEOUT);
1484 mult2 = (mmr_image >> BAU_TRANS_SHIFT) & BAU_TRANS_MASK;
1485 base = timeout_base_ns[index];
1486 ts_ns = base * mult1 * mult2;
1487 ret = ts_ns / 1000;
1488 return ret;
1489}
1490
1491/*
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001492 * initialize the bau_control structure for each cpu
1493 */
Cliff Wickmancfa60912011-01-03 12:03:53 -06001494static int __init uv_init_per_cpu(int nuvhubs)
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001495{
Cliff Wickmana8328ee2010-06-02 16:22:02 -05001496 int i;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001497 int cpu;
1498 int pnode;
1499 int uvhub;
Cliff Wickmanc4026cf2010-07-30 14:10:55 -05001500 int have_hmaster;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001501 short socket = 0;
Cliff Wickmana8328ee2010-06-02 16:22:02 -05001502 unsigned short socket_mask;
Cliff Wickmanc4026cf2010-07-30 14:10:55 -05001503 unsigned char *uvhub_mask;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001504 struct bau_control *bcp;
1505 struct uvhub_desc *bdp;
1506 struct socket_desc *sdp;
1507 struct bau_control *hmaster = NULL;
1508 struct bau_control *smaster = NULL;
1509 struct socket_desc {
1510 short num_cpus;
Cliff Wickmancfa60912011-01-03 12:03:53 -06001511 short cpu_number[MAX_CPUS_PER_SOCKET];
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001512 };
1513 struct uvhub_desc {
Cliff Wickmana8328ee2010-06-02 16:22:02 -05001514 unsigned short socket_mask;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001515 short num_cpus;
1516 short uvhub;
1517 short pnode;
1518 struct socket_desc socket[2];
1519 };
1520 struct uvhub_desc *uvhub_descs;
1521
Cliff Wickman12a66112010-06-02 16:22:01 -05001522 timeout_us = calculate_destination_timeout();
1523
Jesper Juhl8e5e9522010-11-09 00:08:11 +01001524 uvhub_descs = kmalloc(nuvhubs * sizeof(struct uvhub_desc), GFP_KERNEL);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001525 memset(uvhub_descs, 0, nuvhubs * sizeof(struct uvhub_desc));
Cliff Wickmanc4026cf2010-07-30 14:10:55 -05001526 uvhub_mask = kzalloc((nuvhubs+7)/8, GFP_KERNEL);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001527 for_each_present_cpu(cpu) {
1528 bcp = &per_cpu(bau_control, cpu);
1529 memset(bcp, 0, sizeof(struct bau_control));
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001530 pnode = uv_cpu_hub_info(cpu)->pnode;
1531 uvhub = uv_cpu_hub_info(cpu)->numa_blade_id;
Cliff Wickmanc4026cf2010-07-30 14:10:55 -05001532 *(uvhub_mask + (uvhub/8)) |= (1 << (uvhub%8));
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001533 bdp = &uvhub_descs[uvhub];
1534 bdp->num_cpus++;
1535 bdp->uvhub = uvhub;
1536 bdp->pnode = pnode;
Cliff Wickmana8328ee2010-06-02 16:22:02 -05001537 /* kludge: 'assuming' one node per socket, and assuming that
1538 disabling a socket just leaves a gap in node numbers */
Cliff Wickmanc4026cf2010-07-30 14:10:55 -05001539 socket = (cpu_to_node(cpu) & 1);
Cliff Wickmana8328ee2010-06-02 16:22:02 -05001540 bdp->socket_mask |= (1 << socket);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001541 sdp = &bdp->socket[socket];
1542 sdp->cpu_number[sdp->num_cpus] = cpu;
1543 sdp->num_cpus++;
Cliff Wickmancfa60912011-01-03 12:03:53 -06001544 if (sdp->num_cpus > MAX_CPUS_PER_SOCKET) {
1545 printk(KERN_EMERG "%d cpus per socket invalid\n", sdp->num_cpus);
1546 return 1;
1547 }
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001548 }
Cliff Wickmanc4026cf2010-07-30 14:10:55 -05001549 for (uvhub = 0; uvhub < nuvhubs; uvhub++) {
1550 if (!(*(uvhub_mask + (uvhub/8)) & (1 << (uvhub%8))))
1551 continue;
1552 have_hmaster = 0;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001553 bdp = &uvhub_descs[uvhub];
Cliff Wickmana8328ee2010-06-02 16:22:02 -05001554 socket_mask = bdp->socket_mask;
1555 socket = 0;
1556 while (socket_mask) {
1557 if (!(socket_mask & 1))
1558 goto nextsocket;
1559 sdp = &bdp->socket[socket];
1560 for (i = 0; i < sdp->num_cpus; i++) {
1561 cpu = sdp->cpu_number[i];
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001562 bcp = &per_cpu(bau_control, cpu);
1563 bcp->cpu = cpu;
Cliff Wickmana8328ee2010-06-02 16:22:02 -05001564 if (i == 0) {
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001565 smaster = bcp;
Cliff Wickmanc4026cf2010-07-30 14:10:55 -05001566 if (!have_hmaster) {
1567 have_hmaster++;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001568 hmaster = bcp;
Cliff Wickmanc4026cf2010-07-30 14:10:55 -05001569 }
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001570 }
1571 bcp->cpus_in_uvhub = bdp->num_cpus;
1572 bcp->cpus_in_socket = sdp->num_cpus;
1573 bcp->socket_master = smaster;
Cliff Wickmana8328ee2010-06-02 16:22:02 -05001574 bcp->uvhub = bdp->uvhub;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001575 bcp->uvhub_master = hmaster;
Cliff Wickmana8328ee2010-06-02 16:22:02 -05001576 bcp->uvhub_cpu = uv_cpu_hub_info(cpu)->
1577 blade_processor_id;
Cliff Wickmancfa60912011-01-03 12:03:53 -06001578 if (bcp->uvhub_cpu >= MAX_CPUS_PER_UVHUB) {
1579 printk(KERN_EMERG
1580 "%d cpus per uvhub invalid\n",
1581 bcp->uvhub_cpu);
1582 return 1;
1583 }
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001584 }
Cliff Wickmana8328ee2010-06-02 16:22:02 -05001585nextsocket:
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001586 socket++;
Cliff Wickmana8328ee2010-06-02 16:22:02 -05001587 socket_mask = (socket_mask >> 1);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001588 }
1589 }
1590 kfree(uvhub_descs);
Cliff Wickmanc4026cf2010-07-30 14:10:55 -05001591 kfree(uvhub_mask);
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001592 for_each_present_cpu(cpu) {
1593 bcp = &per_cpu(bau_control, cpu);
Cliff Wickman50fb55a2010-06-02 16:22:02 -05001594 bcp->baudisabled = 0;
Cliff Wickman712157a2010-06-02 16:22:02 -05001595 bcp->statp = &per_cpu(ptcstats, cpu);
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001596 /* time interval to catch a hardware stay-busy bug */
1597 bcp->timeout_interval = microsec_2_cycles(2*timeout_us);
1598 bcp->max_bau_concurrent = max_bau_concurrent;
1599 bcp->max_bau_concurrent_constant = max_bau_concurrent;
1600 bcp->plugged_delay = plugged_delay;
1601 bcp->plugsb4reset = plugsb4reset;
1602 bcp->timeoutsb4reset = timeoutsb4reset;
1603 bcp->ipi_reset_limit = ipi_reset_limit;
1604 bcp->complete_threshold = complete_threshold;
1605 bcp->congested_response_us = congested_response_us;
1606 bcp->congested_reps = congested_reps;
1607 bcp->congested_period = congested_period;
1608 }
Cliff Wickmancfa60912011-01-03 12:03:53 -06001609 return 0;
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001610}
Cliff Wickman18129242008-06-02 08:56:14 -05001611
1612/*
1613 * Initialization of BAU-related structures
1614 */
Cliff Wickmanb194b1202008-06-12 08:23:48 -05001615static int __init uv_bau_init(void)
Cliff Wickman18129242008-06-02 08:56:14 -05001616{
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001617 int uvhub;
1618 int pnode;
1619 int nuvhubs;
Rusty Russell2c74d662009-03-18 08:22:30 +10301620 int cur_cpu;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001621 int vector;
1622 unsigned long mmr;
Cliff Wickman18129242008-06-02 08:56:14 -05001623
1624 if (!is_uv_system())
1625 return 0;
1626
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001627 if (nobau)
1628 return 0;
1629
Rusty Russell76ba0ec2009-03-13 14:49:57 +10301630 for_each_possible_cpu(cur_cpu)
Yinghai Lueaa95842009-06-06 14:51:36 -07001631 zalloc_cpumask_var_node(&per_cpu(uv_flush_tlb_mask, cur_cpu),
Rusty Russell76ba0ec2009-03-13 14:49:57 +10301632 GFP_KERNEL, cpu_to_node(cur_cpu));
1633
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001634 uv_nshift = uv_hub_info->m_val;
Robin Holt036ed8b2009-10-15 17:40:00 -05001635 uv_mmask = (1UL << uv_hub_info->m_val) - 1;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001636 nuvhubs = uv_num_possible_blades();
Cliff Wickman50fb55a2010-06-02 16:22:02 -05001637 spin_lock_init(&disable_lock);
1638 congested_cycles = microsec_2_cycles(congested_response_us);
Cliff Wickman9674f352009-04-03 08:34:05 -05001639
Cliff Wickmancfa60912011-01-03 12:03:53 -06001640 if (uv_init_per_cpu(nuvhubs)) {
1641 nobau = 1;
1642 return 0;
1643 }
Ingo Molnarb4c286e2008-06-18 14:28:19 +02001644
Cliff Wickman94ca8e42009-04-14 10:56:48 -05001645 uv_partition_base_pnode = 0x7fffffff;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001646 for (uvhub = 0; uvhub < nuvhubs; uvhub++)
1647 if (uv_blade_nr_possible_cpus(uvhub) &&
1648 (uv_blade_to_pnode(uvhub) < uv_partition_base_pnode))
1649 uv_partition_base_pnode = uv_blade_to_pnode(uvhub);
Cliff Wickman9674f352009-04-03 08:34:05 -05001650
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001651 vector = UV_BAU_MESSAGE;
1652 for_each_possible_blade(uvhub)
1653 if (uv_blade_nr_possible_cpus(uvhub))
1654 uv_init_uvhub(uvhub, vector);
1655
Cliff Wickman18129242008-06-02 08:56:14 -05001656 uv_enable_timeouts();
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001657 alloc_intr_gate(vector, uv_bau_message_intr1);
1658
1659 for_each_possible_blade(uvhub) {
Cliff Wickman93a7ca02010-07-16 10:11:21 -05001660 if (uv_blade_nr_possible_cpus(uvhub)) {
1661 pnode = uv_blade_to_pnode(uvhub);
1662 /* INIT the bau */
1663 uv_write_global_mmr64(pnode,
1664 UVH_LB_BAU_SB_ACTIVATION_CONTROL,
1665 ((unsigned long)1 << 63));
1666 mmr = 1; /* should be 1 to broadcast to both sockets */
1667 uv_write_global_mmr64(pnode, UVH_BAU_DATA_BROADCAST,
1668 mmr);
1669 }
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001670 }
Ingo Molnarb4c286e2008-06-18 14:28:19 +02001671
Cliff Wickman18129242008-06-02 08:56:14 -05001672 return 0;
1673}
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001674core_initcall(uv_bau_init);
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001675fs_initcall(uv_ptc_init);