Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 1 | /* |
| 2 | * SGI UltraViolet TLB flush routines. |
| 3 | * |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 4 | * (c) 2008-2010 Cliff Wickman <cpw@sgi.com>, SGI. |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 5 | * |
| 6 | * This code is released under the GNU General Public License version 2 or |
| 7 | * later. |
| 8 | */ |
Jeremy Fitzhardinge | aef8f5b | 2008-10-14 21:43:43 -0700 | [diff] [blame] | 9 | #include <linux/seq_file.h> |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 10 | #include <linux/proc_fs.h> |
Cliff Wickman | e8e5e8a | 2010-06-02 16:22:01 -0500 | [diff] [blame] | 11 | #include <linux/debugfs.h> |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 12 | #include <linux/kernel.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 13 | #include <linux/slab.h> |
Jean Delvare | ca444564 | 2011-03-25 15:20:14 +0100 | [diff] [blame] | 14 | #include <linux/delay.h> |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 15 | |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 16 | #include <asm/mmu_context.h> |
Tejun Heo | bdbcdd4 | 2009-01-21 17:26:06 +0900 | [diff] [blame] | 17 | #include <asm/uv/uv.h> |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 18 | #include <asm/uv/uv_mmrs.h> |
Ingo Molnar | b4c286e | 2008-06-18 14:28:19 +0200 | [diff] [blame] | 19 | #include <asm/uv/uv_hub.h> |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 20 | #include <asm/uv/uv_bau.h> |
Ingo Molnar | 7b6aa33 | 2009-02-17 13:58:15 +0100 | [diff] [blame] | 21 | #include <asm/apic.h> |
Ingo Molnar | b4c286e | 2008-06-18 14:28:19 +0200 | [diff] [blame] | 22 | #include <asm/idle.h> |
Cliff Wickman | b194b12 | 2008-06-12 08:23:48 -0500 | [diff] [blame] | 23 | #include <asm/tsc.h> |
Cliff Wickman | 99dd871 | 2008-08-19 12:51:59 -0500 | [diff] [blame] | 24 | #include <asm/irq_vectors.h> |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 25 | #include <asm/timer.h> |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 26 | |
Cliff Wickman | 12a6611 | 2010-06-02 16:22:01 -0500 | [diff] [blame] | 27 | /* timeouts in nanoseconds (indexed by UVH_AGING_PRESCALE_SEL urgency7 30:28) */ |
| 28 | static int timeout_base_ns[] = { |
| 29 | 20, |
| 30 | 160, |
| 31 | 1280, |
| 32 | 10240, |
| 33 | 81920, |
| 34 | 655360, |
| 35 | 5242880, |
| 36 | 167772160 |
| 37 | }; |
| 38 | static int timeout_us; |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 39 | static int nobau; |
Cliff Wickman | 50fb55a | 2010-06-02 16:22:02 -0500 | [diff] [blame] | 40 | static int baudisabled; |
| 41 | static spinlock_t disable_lock; |
| 42 | static cycles_t congested_cycles; |
Cliff Wickman | e8e5e8a | 2010-06-02 16:22:01 -0500 | [diff] [blame] | 43 | |
| 44 | /* tunables: */ |
| 45 | static int max_bau_concurrent = MAX_BAU_CONCURRENT; |
| 46 | static int max_bau_concurrent_constant = MAX_BAU_CONCURRENT; |
| 47 | static int plugged_delay = PLUGGED_DELAY; |
| 48 | static int plugsb4reset = PLUGSB4RESET; |
| 49 | static int timeoutsb4reset = TIMEOUTSB4RESET; |
| 50 | static int ipi_reset_limit = IPI_RESET_LIMIT; |
| 51 | static int complete_threshold = COMPLETE_THRESHOLD; |
| 52 | static int congested_response_us = CONGESTED_RESPONSE_US; |
| 53 | static int congested_reps = CONGESTED_REPS; |
| 54 | static int congested_period = CONGESTED_PERIOD; |
| 55 | static struct dentry *tunables_dir; |
| 56 | static struct dentry *tunables_file; |
| 57 | |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 58 | static int __init setup_nobau(char *arg) |
| 59 | { |
| 60 | nobau = 1; |
| 61 | return 0; |
| 62 | } |
| 63 | early_param("nobau", setup_nobau); |
Ingo Molnar | b4c286e | 2008-06-18 14:28:19 +0200 | [diff] [blame] | 64 | |
Cliff Wickman | 94ca8e4 | 2009-04-14 10:56:48 -0500 | [diff] [blame] | 65 | /* base pnode in this partition */ |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 66 | static int uv_partition_base_pnode __read_mostly; |
| 67 | /* position of pnode (which is nasid>>1): */ |
| 68 | static int uv_nshift __read_mostly; |
| 69 | static unsigned long uv_mmask __read_mostly; |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 70 | |
Ingo Molnar | dc163a4 | 2008-06-18 14:15:43 +0200 | [diff] [blame] | 71 | static DEFINE_PER_CPU(struct ptc_stats, ptcstats); |
| 72 | static DEFINE_PER_CPU(struct bau_control, bau_control); |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 73 | static DEFINE_PER_CPU(cpumask_var_t, uv_flush_tlb_mask); |
| 74 | |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 75 | /* |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 76 | * Determine the first node on a uvhub. 'Nodes' are used for kernel |
| 77 | * memory allocation. |
Cliff Wickman | 9674f35 | 2009-04-03 08:34:05 -0500 | [diff] [blame] | 78 | */ |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 79 | static int __init uvhub_to_first_node(int uvhub) |
Cliff Wickman | 9674f35 | 2009-04-03 08:34:05 -0500 | [diff] [blame] | 80 | { |
| 81 | int node, b; |
| 82 | |
| 83 | for_each_online_node(node) { |
| 84 | b = uv_node_to_blade_id(node); |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 85 | if (uvhub == b) |
Cliff Wickman | 9674f35 | 2009-04-03 08:34:05 -0500 | [diff] [blame] | 86 | return node; |
| 87 | } |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 88 | return -1; |
Cliff Wickman | 9674f35 | 2009-04-03 08:34:05 -0500 | [diff] [blame] | 89 | } |
| 90 | |
| 91 | /* |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 92 | * Determine the apicid of the first cpu on a uvhub. |
Cliff Wickman | 9674f35 | 2009-04-03 08:34:05 -0500 | [diff] [blame] | 93 | */ |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 94 | static int __init uvhub_to_first_apicid(int uvhub) |
Cliff Wickman | 9674f35 | 2009-04-03 08:34:05 -0500 | [diff] [blame] | 95 | { |
| 96 | int cpu; |
| 97 | |
| 98 | for_each_present_cpu(cpu) |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 99 | if (uvhub == uv_cpu_to_blade_id(cpu)) |
Cliff Wickman | 9674f35 | 2009-04-03 08:34:05 -0500 | [diff] [blame] | 100 | return per_cpu(x86_cpu_to_apicid, cpu); |
| 101 | return -1; |
| 102 | } |
| 103 | |
| 104 | /* |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 105 | * Free a software acknowledge hardware resource by clearing its Pending |
| 106 | * bit. This will return a reply to the sender. |
| 107 | * If the message has timed out, a reply has already been sent by the |
| 108 | * hardware but the resource has not been released. In that case our |
| 109 | * clear of the Timeout bit (as well) will free the resource. No reply will |
| 110 | * be sent (the hardware will only do one reply per message). |
| 111 | */ |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 112 | static inline void uv_reply_to_message(struct msg_desc *mdp, |
| 113 | struct bau_control *bcp) |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 114 | { |
Cliff Wickman | b194b12 | 2008-06-12 08:23:48 -0500 | [diff] [blame] | 115 | unsigned long dw; |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 116 | struct bau_payload_queue_entry *msg; |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 117 | |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 118 | msg = mdp->msg; |
| 119 | if (!msg->canceled) { |
| 120 | dw = (msg->sw_ack_vector << UV_SW_ACK_NPENDING) | |
| 121 | msg->sw_ack_vector; |
| 122 | uv_write_local_mmr( |
| 123 | UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS, dw); |
| 124 | } |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 125 | msg->replied_to = 1; |
| 126 | msg->sw_ack_vector = 0; |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 127 | } |
| 128 | |
| 129 | /* |
| 130 | * Process the receipt of a RETRY message |
| 131 | */ |
| 132 | static inline void uv_bau_process_retry_msg(struct msg_desc *mdp, |
| 133 | struct bau_control *bcp) |
| 134 | { |
| 135 | int i; |
| 136 | int cancel_count = 0; |
| 137 | int slot2; |
| 138 | unsigned long msg_res; |
| 139 | unsigned long mmr = 0; |
| 140 | struct bau_payload_queue_entry *msg; |
| 141 | struct bau_payload_queue_entry *msg2; |
| 142 | struct ptc_stats *stat; |
| 143 | |
| 144 | msg = mdp->msg; |
Cliff Wickman | 712157a | 2010-06-02 16:22:02 -0500 | [diff] [blame] | 145 | stat = bcp->statp; |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 146 | stat->d_retries++; |
| 147 | /* |
| 148 | * cancel any message from msg+1 to the retry itself |
| 149 | */ |
| 150 | for (msg2 = msg+1, i = 0; i < DEST_Q_SIZE; msg2++, i++) { |
| 151 | if (msg2 > mdp->va_queue_last) |
| 152 | msg2 = mdp->va_queue_first; |
| 153 | if (msg2 == msg) |
| 154 | break; |
| 155 | |
| 156 | /* same conditions for cancellation as uv_do_reset */ |
| 157 | if ((msg2->replied_to == 0) && (msg2->canceled == 0) && |
| 158 | (msg2->sw_ack_vector) && ((msg2->sw_ack_vector & |
| 159 | msg->sw_ack_vector) == 0) && |
| 160 | (msg2->sending_cpu == msg->sending_cpu) && |
| 161 | (msg2->msg_type != MSG_NOOP)) { |
| 162 | slot2 = msg2 - mdp->va_queue_first; |
| 163 | mmr = uv_read_local_mmr |
| 164 | (UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE); |
Cliff Wickman | 39847e7 | 2010-06-02 16:22:02 -0500 | [diff] [blame] | 165 | msg_res = msg2->sw_ack_vector; |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 166 | /* |
| 167 | * This is a message retry; clear the resources held |
| 168 | * by the previous message only if they timed out. |
| 169 | * If it has not timed out we have an unexpected |
| 170 | * situation to report. |
| 171 | */ |
Cliff Wickman | 39847e7 | 2010-06-02 16:22:02 -0500 | [diff] [blame] | 172 | if (mmr & (msg_res << UV_SW_ACK_NPENDING)) { |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 173 | /* |
| 174 | * is the resource timed out? |
| 175 | * make everyone ignore the cancelled message. |
| 176 | */ |
| 177 | msg2->canceled = 1; |
| 178 | stat->d_canceled++; |
| 179 | cancel_count++; |
| 180 | uv_write_local_mmr( |
| 181 | UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS, |
Cliff Wickman | 39847e7 | 2010-06-02 16:22:02 -0500 | [diff] [blame] | 182 | (msg_res << UV_SW_ACK_NPENDING) | |
| 183 | msg_res); |
| 184 | } |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 185 | } |
| 186 | } |
| 187 | if (!cancel_count) |
| 188 | stat->d_nocanceled++; |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 189 | } |
| 190 | |
| 191 | /* |
| 192 | * Do all the things a cpu should do for a TLB shootdown message. |
| 193 | * Other cpu's may come here at the same time for this message. |
| 194 | */ |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 195 | static void uv_bau_process_message(struct msg_desc *mdp, |
| 196 | struct bau_control *bcp) |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 197 | { |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 198 | int msg_ack_count; |
| 199 | short socket_ack_count = 0; |
| 200 | struct ptc_stats *stat; |
| 201 | struct bau_payload_queue_entry *msg; |
| 202 | struct bau_control *smaster = bcp->socket_master; |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 203 | |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 204 | /* |
| 205 | * This must be a normal message, or retry of a normal message |
| 206 | */ |
| 207 | msg = mdp->msg; |
Cliff Wickman | 712157a | 2010-06-02 16:22:02 -0500 | [diff] [blame] | 208 | stat = bcp->statp; |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 209 | if (msg->address == TLB_FLUSH_ALL) { |
| 210 | local_flush_tlb(); |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 211 | stat->d_alltlb++; |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 212 | } else { |
| 213 | __flush_tlb_one(msg->address); |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 214 | stat->d_onetlb++; |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 215 | } |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 216 | stat->d_requestee++; |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 217 | |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 218 | /* |
| 219 | * One cpu on each uvhub has the additional job on a RETRY |
| 220 | * of releasing the resource held by the message that is |
| 221 | * being retried. That message is identified by sending |
| 222 | * cpu number. |
| 223 | */ |
| 224 | if (msg->msg_type == MSG_RETRY && bcp == bcp->uvhub_master) |
| 225 | uv_bau_process_retry_msg(mdp, bcp); |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 226 | |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 227 | /* |
| 228 | * This is a sw_ack message, so we have to reply to it. |
| 229 | * Count each responding cpu on the socket. This avoids |
| 230 | * pinging the count's cache line back and forth between |
| 231 | * the sockets. |
| 232 | */ |
| 233 | socket_ack_count = atomic_add_short_return(1, (struct atomic_short *) |
| 234 | &smaster->socket_acknowledge_count[mdp->msg_slot]); |
| 235 | if (socket_ack_count == bcp->cpus_in_socket) { |
| 236 | /* |
| 237 | * Both sockets dump their completed count total into |
| 238 | * the message's count. |
| 239 | */ |
| 240 | smaster->socket_acknowledge_count[mdp->msg_slot] = 0; |
| 241 | msg_ack_count = atomic_add_short_return(socket_ack_count, |
| 242 | (struct atomic_short *)&msg->acknowledge_count); |
Ingo Molnar | dc163a4 | 2008-06-18 14:15:43 +0200 | [diff] [blame] | 243 | |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 244 | if (msg_ack_count == bcp->cpus_in_uvhub) { |
| 245 | /* |
| 246 | * All cpus in uvhub saw it; reply |
| 247 | */ |
| 248 | uv_reply_to_message(mdp, bcp); |
Ingo Molnar | dc163a4 | 2008-06-18 14:15:43 +0200 | [diff] [blame] | 249 | } |
| 250 | } |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 251 | |
| 252 | return; |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 253 | } |
| 254 | |
| 255 | /* |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 256 | * Determine the first cpu on a uvhub. |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 257 | */ |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 258 | static int uvhub_to_first_cpu(int uvhub) |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 259 | { |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 260 | int cpu; |
| 261 | for_each_present_cpu(cpu) |
| 262 | if (uvhub == uv_cpu_to_blade_id(cpu)) |
| 263 | return cpu; |
| 264 | return -1; |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 265 | } |
| 266 | |
Cliff Wickman | b194b12 | 2008-06-12 08:23:48 -0500 | [diff] [blame] | 267 | /* |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 268 | * Last resort when we get a large number of destination timeouts is |
| 269 | * to clear resources held by a given cpu. |
| 270 | * Do this with IPI so that all messages in the BAU message queue |
| 271 | * can be identified by their nonzero sw_ack_vector field. |
Cliff Wickman | b194b12 | 2008-06-12 08:23:48 -0500 | [diff] [blame] | 272 | * |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 273 | * This is entered for a single cpu on the uvhub. |
| 274 | * The sender want's this uvhub to free a specific message's |
| 275 | * sw_ack resources. |
| 276 | */ |
| 277 | static void |
| 278 | uv_do_reset(void *ptr) |
| 279 | { |
| 280 | int i; |
| 281 | int slot; |
| 282 | int count = 0; |
| 283 | unsigned long mmr; |
| 284 | unsigned long msg_res; |
| 285 | struct bau_control *bcp; |
| 286 | struct reset_args *rap; |
| 287 | struct bau_payload_queue_entry *msg; |
| 288 | struct ptc_stats *stat; |
| 289 | |
| 290 | bcp = &per_cpu(bau_control, smp_processor_id()); |
| 291 | rap = (struct reset_args *)ptr; |
Cliff Wickman | 712157a | 2010-06-02 16:22:02 -0500 | [diff] [blame] | 292 | stat = bcp->statp; |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 293 | stat->d_resets++; |
| 294 | |
| 295 | /* |
| 296 | * We're looking for the given sender, and |
| 297 | * will free its sw_ack resource. |
| 298 | * If all cpu's finally responded after the timeout, its |
| 299 | * message 'replied_to' was set. |
| 300 | */ |
| 301 | for (msg = bcp->va_queue_first, i = 0; i < DEST_Q_SIZE; msg++, i++) { |
| 302 | /* uv_do_reset: same conditions for cancellation as |
| 303 | uv_bau_process_retry_msg() */ |
| 304 | if ((msg->replied_to == 0) && |
| 305 | (msg->canceled == 0) && |
| 306 | (msg->sending_cpu == rap->sender) && |
| 307 | (msg->sw_ack_vector) && |
| 308 | (msg->msg_type != MSG_NOOP)) { |
| 309 | /* |
| 310 | * make everyone else ignore this message |
| 311 | */ |
| 312 | msg->canceled = 1; |
| 313 | slot = msg - bcp->va_queue_first; |
| 314 | count++; |
| 315 | /* |
| 316 | * only reset the resource if it is still pending |
| 317 | */ |
| 318 | mmr = uv_read_local_mmr |
| 319 | (UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE); |
Cliff Wickman | 39847e7 | 2010-06-02 16:22:02 -0500 | [diff] [blame] | 320 | msg_res = msg->sw_ack_vector; |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 321 | if (mmr & msg_res) { |
| 322 | stat->d_rcanceled++; |
| 323 | uv_write_local_mmr( |
| 324 | UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS, |
Cliff Wickman | 39847e7 | 2010-06-02 16:22:02 -0500 | [diff] [blame] | 325 | (msg_res << UV_SW_ACK_NPENDING) | |
| 326 | msg_res); |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 327 | } |
| 328 | } |
| 329 | } |
| 330 | return; |
| 331 | } |
| 332 | |
| 333 | /* |
| 334 | * Use IPI to get all target uvhubs to release resources held by |
| 335 | * a given sending cpu number. |
| 336 | */ |
| 337 | static void uv_reset_with_ipi(struct bau_target_uvhubmask *distribution, |
| 338 | int sender) |
| 339 | { |
| 340 | int uvhub; |
| 341 | int cpu; |
| 342 | cpumask_t mask; |
| 343 | struct reset_args reset_args; |
| 344 | |
| 345 | reset_args.sender = sender; |
| 346 | |
| 347 | cpus_clear(mask); |
| 348 | /* find a single cpu for each uvhub in this distribution mask */ |
| 349 | for (uvhub = 0; |
| 350 | uvhub < sizeof(struct bau_target_uvhubmask) * BITSPERBYTE; |
| 351 | uvhub++) { |
| 352 | if (!bau_uvhub_isset(uvhub, distribution)) |
| 353 | continue; |
| 354 | /* find a cpu for this uvhub */ |
| 355 | cpu = uvhub_to_first_cpu(uvhub); |
| 356 | cpu_set(cpu, mask); |
| 357 | } |
| 358 | /* IPI all cpus; Preemption is already disabled */ |
| 359 | smp_call_function_many(&mask, uv_do_reset, (void *)&reset_args, 1); |
| 360 | return; |
| 361 | } |
| 362 | |
| 363 | static inline unsigned long |
| 364 | cycles_2_us(unsigned long long cyc) |
| 365 | { |
| 366 | unsigned long long ns; |
| 367 | unsigned long us; |
| 368 | ns = (cyc * per_cpu(cyc2ns, smp_processor_id())) |
| 369 | >> CYC2NS_SCALE_FACTOR; |
| 370 | us = ns / 1000; |
| 371 | return us; |
| 372 | } |
| 373 | |
| 374 | /* |
| 375 | * wait for all cpus on this hub to finish their sends and go quiet |
| 376 | * leaves uvhub_quiesce set so that no new broadcasts are started by |
| 377 | * bau_flush_send_and_wait() |
| 378 | */ |
| 379 | static inline void |
| 380 | quiesce_local_uvhub(struct bau_control *hmaster) |
| 381 | { |
| 382 | atomic_add_short_return(1, (struct atomic_short *) |
| 383 | &hmaster->uvhub_quiesce); |
| 384 | } |
| 385 | |
| 386 | /* |
| 387 | * mark this quiet-requestor as done |
| 388 | */ |
| 389 | static inline void |
| 390 | end_uvhub_quiesce(struct bau_control *hmaster) |
| 391 | { |
| 392 | atomic_add_short_return(-1, (struct atomic_short *) |
| 393 | &hmaster->uvhub_quiesce); |
| 394 | } |
| 395 | |
| 396 | /* |
| 397 | * Wait for completion of a broadcast software ack message |
| 398 | * return COMPLETE, RETRY(PLUGGED or TIMEOUT) or GIVEUP |
Cliff Wickman | b194b12 | 2008-06-12 08:23:48 -0500 | [diff] [blame] | 399 | */ |
Ingo Molnar | dc163a4 | 2008-06-18 14:15:43 +0200 | [diff] [blame] | 400 | static int uv_wait_completion(struct bau_desc *bau_desc, |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 401 | unsigned long mmr_offset, int right_shift, int this_cpu, |
| 402 | struct bau_control *bcp, struct bau_control *smaster, long try) |
Cliff Wickman | b194b12 | 2008-06-12 08:23:48 -0500 | [diff] [blame] | 403 | { |
Cliff Wickman | b194b12 | 2008-06-12 08:23:48 -0500 | [diff] [blame] | 404 | unsigned long descriptor_status; |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 405 | cycles_t ttime; |
Cliff Wickman | 712157a | 2010-06-02 16:22:02 -0500 | [diff] [blame] | 406 | struct ptc_stats *stat = bcp->statp; |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 407 | struct bau_control *hmaster; |
Cliff Wickman | b194b12 | 2008-06-12 08:23:48 -0500 | [diff] [blame] | 408 | |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 409 | hmaster = bcp->uvhub_master; |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 410 | |
| 411 | /* spin on the status MMR, waiting for it to go idle */ |
Cliff Wickman | b194b12 | 2008-06-12 08:23:48 -0500 | [diff] [blame] | 412 | while ((descriptor_status = (((unsigned long) |
| 413 | uv_read_local_mmr(mmr_offset) >> |
| 414 | right_shift) & UV_ACT_STATUS_MASK)) != |
| 415 | DESC_STATUS_IDLE) { |
Cliff Wickman | b194b12 | 2008-06-12 08:23:48 -0500 | [diff] [blame] | 416 | /* |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 417 | * Our software ack messages may be blocked because there are |
| 418 | * no swack resources available. As long as none of them |
| 419 | * has timed out hardware will NACK our message and its |
| 420 | * state will stay IDLE. |
Cliff Wickman | b194b12 | 2008-06-12 08:23:48 -0500 | [diff] [blame] | 421 | */ |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 422 | if (descriptor_status == DESC_STATUS_SOURCE_TIMEOUT) { |
| 423 | stat->s_stimeout++; |
| 424 | return FLUSH_GIVEUP; |
| 425 | } else if (descriptor_status == |
| 426 | DESC_STATUS_DESTINATION_TIMEOUT) { |
| 427 | stat->s_dtimeout++; |
| 428 | ttime = get_cycles(); |
| 429 | |
| 430 | /* |
| 431 | * Our retries may be blocked by all destination |
| 432 | * swack resources being consumed, and a timeout |
| 433 | * pending. In that case hardware returns the |
| 434 | * ERROR that looks like a destination timeout. |
| 435 | */ |
Cliff Wickman | 12a6611 | 2010-06-02 16:22:01 -0500 | [diff] [blame] | 436 | if (cycles_2_us(ttime - bcp->send_message) < |
| 437 | timeout_us) { |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 438 | bcp->conseccompletes = 0; |
| 439 | return FLUSH_RETRY_PLUGGED; |
| 440 | } |
| 441 | |
| 442 | bcp->conseccompletes = 0; |
| 443 | return FLUSH_RETRY_TIMEOUT; |
| 444 | } else { |
| 445 | /* |
| 446 | * descriptor_status is still BUSY |
| 447 | */ |
| 448 | cpu_relax(); |
Cliff Wickman | b194b12 | 2008-06-12 08:23:48 -0500 | [diff] [blame] | 449 | } |
| 450 | } |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 451 | bcp->conseccompletes++; |
Cliff Wickman | b194b12 | 2008-06-12 08:23:48 -0500 | [diff] [blame] | 452 | return FLUSH_COMPLETE; |
| 453 | } |
| 454 | |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 455 | static inline cycles_t |
| 456 | sec_2_cycles(unsigned long sec) |
| 457 | { |
| 458 | unsigned long ns; |
| 459 | cycles_t cyc; |
| 460 | |
| 461 | ns = sec * 1000000000; |
| 462 | cyc = (ns << CYC2NS_SCALE_FACTOR)/(per_cpu(cyc2ns, smp_processor_id())); |
| 463 | return cyc; |
| 464 | } |
| 465 | |
| 466 | /* |
| 467 | * conditionally add 1 to *v, unless *v is >= u |
| 468 | * return 0 if we cannot add 1 to *v because it is >= u |
| 469 | * return 1 if we can add 1 to *v because it is < u |
| 470 | * the add is atomic |
| 471 | * |
| 472 | * This is close to atomic_add_unless(), but this allows the 'u' value |
| 473 | * to be lowered below the current 'v'. atomic_add_unless can only stop |
| 474 | * on equal. |
| 475 | */ |
| 476 | static inline int atomic_inc_unless_ge(spinlock_t *lock, atomic_t *v, int u) |
| 477 | { |
| 478 | spin_lock(lock); |
| 479 | if (atomic_read(v) >= u) { |
| 480 | spin_unlock(lock); |
| 481 | return 0; |
| 482 | } |
| 483 | atomic_inc(v); |
| 484 | spin_unlock(lock); |
| 485 | return 1; |
| 486 | } |
| 487 | |
Cliff Wickman | 50fb55a | 2010-06-02 16:22:02 -0500 | [diff] [blame] | 488 | /* |
Cliff Wickman | f6d8a56 | 2010-06-02 16:22:02 -0500 | [diff] [blame] | 489 | * Our retries are blocked by all destination swack resources being |
| 490 | * in use, and a timeout is pending. In that case hardware immediately |
| 491 | * returns the ERROR that looks like a destination timeout. |
| 492 | */ |
| 493 | static void |
| 494 | destination_plugged(struct bau_desc *bau_desc, struct bau_control *bcp, |
| 495 | struct bau_control *hmaster, struct ptc_stats *stat) |
| 496 | { |
| 497 | udelay(bcp->plugged_delay); |
| 498 | bcp->plugged_tries++; |
| 499 | if (bcp->plugged_tries >= bcp->plugsb4reset) { |
| 500 | bcp->plugged_tries = 0; |
| 501 | quiesce_local_uvhub(hmaster); |
| 502 | spin_lock(&hmaster->queue_lock); |
| 503 | uv_reset_with_ipi(&bau_desc->distribution, bcp->cpu); |
| 504 | spin_unlock(&hmaster->queue_lock); |
| 505 | end_uvhub_quiesce(hmaster); |
| 506 | bcp->ipi_attempts++; |
| 507 | stat->s_resets_plug++; |
| 508 | } |
| 509 | } |
| 510 | |
| 511 | static void |
| 512 | destination_timeout(struct bau_desc *bau_desc, struct bau_control *bcp, |
| 513 | struct bau_control *hmaster, struct ptc_stats *stat) |
| 514 | { |
| 515 | hmaster->max_bau_concurrent = 1; |
| 516 | bcp->timeout_tries++; |
| 517 | if (bcp->timeout_tries >= bcp->timeoutsb4reset) { |
| 518 | bcp->timeout_tries = 0; |
| 519 | quiesce_local_uvhub(hmaster); |
| 520 | spin_lock(&hmaster->queue_lock); |
| 521 | uv_reset_with_ipi(&bau_desc->distribution, bcp->cpu); |
| 522 | spin_unlock(&hmaster->queue_lock); |
| 523 | end_uvhub_quiesce(hmaster); |
| 524 | bcp->ipi_attempts++; |
| 525 | stat->s_resets_timeout++; |
| 526 | } |
| 527 | } |
| 528 | |
| 529 | /* |
Cliff Wickman | 50fb55a | 2010-06-02 16:22:02 -0500 | [diff] [blame] | 530 | * Completions are taking a very long time due to a congested numalink |
| 531 | * network. |
| 532 | */ |
| 533 | static void |
| 534 | disable_for_congestion(struct bau_control *bcp, struct ptc_stats *stat) |
| 535 | { |
| 536 | int tcpu; |
| 537 | struct bau_control *tbcp; |
| 538 | |
| 539 | /* let only one cpu do this disabling */ |
| 540 | spin_lock(&disable_lock); |
| 541 | if (!baudisabled && bcp->period_requests && |
| 542 | ((bcp->period_time / bcp->period_requests) > congested_cycles)) { |
| 543 | /* it becomes this cpu's job to turn on the use of the |
| 544 | BAU again */ |
| 545 | baudisabled = 1; |
| 546 | bcp->set_bau_off = 1; |
| 547 | bcp->set_bau_on_time = get_cycles() + |
| 548 | sec_2_cycles(bcp->congested_period); |
| 549 | stat->s_bau_disabled++; |
| 550 | for_each_present_cpu(tcpu) { |
| 551 | tbcp = &per_cpu(bau_control, tcpu); |
| 552 | tbcp->baudisabled = 1; |
| 553 | } |
| 554 | } |
| 555 | spin_unlock(&disable_lock); |
| 556 | } |
| 557 | |
Cliff Wickman | b194b12 | 2008-06-12 08:23:48 -0500 | [diff] [blame] | 558 | /** |
| 559 | * uv_flush_send_and_wait |
| 560 | * |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 561 | * Send a broadcast and wait for it to complete. |
Cliff Wickman | b194b12 | 2008-06-12 08:23:48 -0500 | [diff] [blame] | 562 | * |
Cliff Wickman | f6d8a56 | 2010-06-02 16:22:02 -0500 | [diff] [blame] | 563 | * The flush_mask contains the cpus the broadcast is to be sent to including |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 564 | * cpus that are on the local uvhub. |
Cliff Wickman | b194b12 | 2008-06-12 08:23:48 -0500 | [diff] [blame] | 565 | * |
Cliff Wickman | 450a007 | 2010-06-02 16:22:02 -0500 | [diff] [blame] | 566 | * Returns 0 if all flushing represented in the mask was done. |
| 567 | * Returns 1 if it gives up entirely and the original cpu mask is to be |
| 568 | * returned to the kernel. |
Cliff Wickman | b194b12 | 2008-06-12 08:23:48 -0500 | [diff] [blame] | 569 | */ |
Cliff Wickman | 450a007 | 2010-06-02 16:22:02 -0500 | [diff] [blame] | 570 | int uv_flush_send_and_wait(struct bau_desc *bau_desc, |
| 571 | struct cpumask *flush_mask, struct bau_control *bcp) |
Cliff Wickman | b194b12 | 2008-06-12 08:23:48 -0500 | [diff] [blame] | 572 | { |
Cliff Wickman | b194b12 | 2008-06-12 08:23:48 -0500 | [diff] [blame] | 573 | int right_shift; |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 574 | int completion_status = 0; |
| 575 | int seq_number = 0; |
| 576 | long try = 0; |
| 577 | int cpu = bcp->uvhub_cpu; |
| 578 | int this_cpu = bcp->cpu; |
Cliff Wickman | b194b12 | 2008-06-12 08:23:48 -0500 | [diff] [blame] | 579 | unsigned long mmr_offset; |
Ingo Molnar | b4c286e | 2008-06-18 14:28:19 +0200 | [diff] [blame] | 580 | unsigned long index; |
Cliff Wickman | b194b12 | 2008-06-12 08:23:48 -0500 | [diff] [blame] | 581 | cycles_t time1; |
| 582 | cycles_t time2; |
Cliff Wickman | e8e5e8a | 2010-06-02 16:22:01 -0500 | [diff] [blame] | 583 | cycles_t elapsed; |
Cliff Wickman | 712157a | 2010-06-02 16:22:02 -0500 | [diff] [blame] | 584 | struct ptc_stats *stat = bcp->statp; |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 585 | struct bau_control *smaster = bcp->socket_master; |
| 586 | struct bau_control *hmaster = bcp->uvhub_master; |
| 587 | |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 588 | if (!atomic_inc_unless_ge(&hmaster->uvhub_lock, |
| 589 | &hmaster->active_descriptor_count, |
Cliff Wickman | e8e5e8a | 2010-06-02 16:22:01 -0500 | [diff] [blame] | 590 | hmaster->max_bau_concurrent)) { |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 591 | stat->s_throttles++; |
| 592 | do { |
| 593 | cpu_relax(); |
| 594 | } while (!atomic_inc_unless_ge(&hmaster->uvhub_lock, |
| 595 | &hmaster->active_descriptor_count, |
Cliff Wickman | e8e5e8a | 2010-06-02 16:22:01 -0500 | [diff] [blame] | 596 | hmaster->max_bau_concurrent)); |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 597 | } |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 598 | while (hmaster->uvhub_quiesce) |
| 599 | cpu_relax(); |
Cliff Wickman | b194b12 | 2008-06-12 08:23:48 -0500 | [diff] [blame] | 600 | |
| 601 | if (cpu < UV_CPUS_PER_ACT_STATUS) { |
| 602 | mmr_offset = UVH_LB_BAU_SB_ACTIVATION_STATUS_0; |
| 603 | right_shift = cpu * UV_ACT_STATUS_SIZE; |
| 604 | } else { |
| 605 | mmr_offset = UVH_LB_BAU_SB_ACTIVATION_STATUS_1; |
| 606 | right_shift = |
| 607 | ((cpu - UV_CPUS_PER_ACT_STATUS) * UV_ACT_STATUS_SIZE); |
| 608 | } |
| 609 | time1 = get_cycles(); |
| 610 | do { |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 611 | if (try == 0) { |
Cliff Wickman | 7fba1bc | 2010-06-02 16:22:02 -0500 | [diff] [blame] | 612 | bau_desc->header.msg_type = MSG_REGULAR; |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 613 | seq_number = bcp->message_number++; |
| 614 | } else { |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 615 | bau_desc->header.msg_type = MSG_RETRY; |
| 616 | stat->s_retry_messages++; |
| 617 | } |
| 618 | bau_desc->header.sequence = seq_number; |
Ingo Molnar | dc163a4 | 2008-06-18 14:15:43 +0200 | [diff] [blame] | 619 | index = (1UL << UVH_LB_BAU_SB_ACTIVATION_CONTROL_PUSH_SHFT) | |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 620 | bcp->uvhub_cpu; |
| 621 | bcp->send_message = get_cycles(); |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 622 | uv_write_local_mmr(UVH_LB_BAU_SB_ACTIVATION_CONTROL, index); |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 623 | try++; |
| 624 | completion_status = uv_wait_completion(bau_desc, mmr_offset, |
| 625 | right_shift, this_cpu, bcp, smaster, try); |
| 626 | |
| 627 | if (completion_status == FLUSH_RETRY_PLUGGED) { |
Cliff Wickman | f6d8a56 | 2010-06-02 16:22:02 -0500 | [diff] [blame] | 628 | destination_plugged(bau_desc, bcp, hmaster, stat); |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 629 | } else if (completion_status == FLUSH_RETRY_TIMEOUT) { |
Cliff Wickman | f6d8a56 | 2010-06-02 16:22:02 -0500 | [diff] [blame] | 630 | destination_timeout(bau_desc, bcp, hmaster, stat); |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 631 | } |
Cliff Wickman | e8e5e8a | 2010-06-02 16:22:01 -0500 | [diff] [blame] | 632 | if (bcp->ipi_attempts >= bcp->ipi_reset_limit) { |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 633 | bcp->ipi_attempts = 0; |
| 634 | completion_status = FLUSH_GIVEUP; |
| 635 | break; |
| 636 | } |
| 637 | cpu_relax(); |
| 638 | } while ((completion_status == FLUSH_RETRY_PLUGGED) || |
| 639 | (completion_status == FLUSH_RETRY_TIMEOUT)); |
| 640 | time2 = get_cycles(); |
Cliff Wickman | e8e5e8a | 2010-06-02 16:22:01 -0500 | [diff] [blame] | 641 | bcp->plugged_tries = 0; |
| 642 | bcp->timeout_tries = 0; |
Cliff Wickman | e8e5e8a | 2010-06-02 16:22:01 -0500 | [diff] [blame] | 643 | if ((completion_status == FLUSH_COMPLETE) && |
| 644 | (bcp->conseccompletes > bcp->complete_threshold) && |
| 645 | (hmaster->max_bau_concurrent < |
| 646 | hmaster->max_bau_concurrent_constant)) |
| 647 | hmaster->max_bau_concurrent++; |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 648 | while (hmaster->uvhub_quiesce) |
| 649 | cpu_relax(); |
| 650 | atomic_dec(&hmaster->active_descriptor_count); |
Cliff Wickman | e8e5e8a | 2010-06-02 16:22:01 -0500 | [diff] [blame] | 651 | if (time2 > time1) { |
| 652 | elapsed = time2 - time1; |
| 653 | stat->s_time += elapsed; |
Cliff Wickman | 50fb55a | 2010-06-02 16:22:02 -0500 | [diff] [blame] | 654 | if ((completion_status == FLUSH_COMPLETE) && (try == 1)) { |
| 655 | bcp->period_requests++; |
| 656 | bcp->period_time += elapsed; |
| 657 | if ((elapsed > congested_cycles) && |
| 658 | (bcp->period_requests > bcp->congested_reps)) { |
| 659 | disable_for_congestion(bcp, stat); |
| 660 | } |
| 661 | } |
Cliff Wickman | e8e5e8a | 2010-06-02 16:22:01 -0500 | [diff] [blame] | 662 | } else |
Cliff Wickman | 450a007 | 2010-06-02 16:22:02 -0500 | [diff] [blame] | 663 | stat->s_requestor--; |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 664 | if (completion_status == FLUSH_COMPLETE && try > 1) |
| 665 | stat->s_retriesok++; |
| 666 | else if (completion_status == FLUSH_GIVEUP) { |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 667 | stat->s_giveup++; |
Cliff Wickman | 450a007 | 2010-06-02 16:22:02 -0500 | [diff] [blame] | 668 | return 1; |
Cliff Wickman | b194b12 | 2008-06-12 08:23:48 -0500 | [diff] [blame] | 669 | } |
Cliff Wickman | 450a007 | 2010-06-02 16:22:02 -0500 | [diff] [blame] | 670 | return 0; |
Cliff Wickman | b194b12 | 2008-06-12 08:23:48 -0500 | [diff] [blame] | 671 | } |
| 672 | |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 673 | /** |
| 674 | * uv_flush_tlb_others - globally purge translation cache of a virtual |
| 675 | * address or all TLB's |
Tejun Heo | bdbcdd4 | 2009-01-21 17:26:06 +0900 | [diff] [blame] | 676 | * @cpumask: mask of all cpu's in which the address is to be removed |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 677 | * @mm: mm_struct containing virtual address range |
| 678 | * @va: virtual address to be removed (or TLB_FLUSH_ALL for all TLB's on cpu) |
Tejun Heo | bdbcdd4 | 2009-01-21 17:26:06 +0900 | [diff] [blame] | 679 | * @cpu: the current cpu |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 680 | * |
| 681 | * This is the entry point for initiating any UV global TLB shootdown. |
| 682 | * |
| 683 | * Purges the translation caches of all specified processors of the given |
| 684 | * virtual address, or purges all TLB's on specified processors. |
| 685 | * |
Tejun Heo | bdbcdd4 | 2009-01-21 17:26:06 +0900 | [diff] [blame] | 686 | * The caller has derived the cpumask from the mm_struct. This function |
| 687 | * is called only if there are bits set in the mask. (e.g. flush_tlb_page()) |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 688 | * |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 689 | * The cpumask is converted into a uvhubmask of the uvhubs containing |
| 690 | * those cpus. |
Cliff Wickman | b194b12 | 2008-06-12 08:23:48 -0500 | [diff] [blame] | 691 | * |
Tejun Heo | bdbcdd4 | 2009-01-21 17:26:06 +0900 | [diff] [blame] | 692 | * Note that this function should be called with preemption disabled. |
| 693 | * |
| 694 | * Returns NULL if all remote flushing was done. |
| 695 | * Returns pointer to cpumask if some remote flushing remains to be |
| 696 | * done. The returned pointer is valid till preemption is re-enabled. |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 697 | */ |
Tejun Heo | bdbcdd4 | 2009-01-21 17:26:06 +0900 | [diff] [blame] | 698 | const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask, |
| 699 | struct mm_struct *mm, |
| 700 | unsigned long va, unsigned int cpu) |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 701 | { |
Cliff Wickman | b194b12 | 2008-06-12 08:23:48 -0500 | [diff] [blame] | 702 | int locals = 0; |
Cliff Wickman | 450a007 | 2010-06-02 16:22:02 -0500 | [diff] [blame] | 703 | int remotes = 0; |
| 704 | int hubs = 0; |
Cliff Wickman | 77ed23f | 2011-05-10 08:26:43 -0500 | [diff] [blame^] | 705 | int tcpu; |
| 706 | int tpnode; |
Ingo Molnar | dc163a4 | 2008-06-18 14:15:43 +0200 | [diff] [blame] | 707 | struct bau_desc *bau_desc; |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 708 | struct cpumask *flush_mask; |
| 709 | struct ptc_stats *stat; |
| 710 | struct bau_control *bcp; |
Cliff Wickman | 50fb55a | 2010-06-02 16:22:02 -0500 | [diff] [blame] | 711 | struct bau_control *tbcp; |
Cliff Wickman | 77ed23f | 2011-05-10 08:26:43 -0500 | [diff] [blame^] | 712 | struct hub_and_pnode *hpp; |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 713 | |
Cliff Wickman | e8e5e8a | 2010-06-02 16:22:01 -0500 | [diff] [blame] | 714 | /* kernel was booted 'nobau' */ |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 715 | if (nobau) |
| 716 | return cpumask; |
| 717 | |
| 718 | bcp = &per_cpu(bau_control, cpu); |
Cliff Wickman | 712157a | 2010-06-02 16:22:02 -0500 | [diff] [blame] | 719 | stat = bcp->statp; |
Cliff Wickman | 50fb55a | 2010-06-02 16:22:02 -0500 | [diff] [blame] | 720 | |
| 721 | /* bau was disabled due to slow response */ |
| 722 | if (bcp->baudisabled) { |
| 723 | /* the cpu that disabled it must re-enable it */ |
| 724 | if (bcp->set_bau_off) { |
| 725 | if (get_cycles() >= bcp->set_bau_on_time) { |
| 726 | stat->s_bau_reenabled++; |
| 727 | baudisabled = 0; |
| 728 | for_each_present_cpu(tcpu) { |
| 729 | tbcp = &per_cpu(bau_control, tcpu); |
| 730 | tbcp->baudisabled = 0; |
| 731 | tbcp->period_requests = 0; |
| 732 | tbcp->period_time = 0; |
| 733 | } |
| 734 | } |
| 735 | } |
| 736 | return cpumask; |
| 737 | } |
Cliff Wickman | e8e5e8a | 2010-06-02 16:22:01 -0500 | [diff] [blame] | 738 | |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 739 | /* |
| 740 | * Each sending cpu has a per-cpu mask which it fills from the caller's |
Cliff Wickman | 450a007 | 2010-06-02 16:22:02 -0500 | [diff] [blame] | 741 | * cpu mask. All cpus are converted to uvhubs and copied to the |
| 742 | * activation descriptor. |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 743 | */ |
| 744 | flush_mask = (struct cpumask *)per_cpu(uv_flush_tlb_mask, cpu); |
Cliff Wickman | 450a007 | 2010-06-02 16:22:02 -0500 | [diff] [blame] | 745 | /* don't actually do a shootdown of the local cpu */ |
Tejun Heo | bdbcdd4 | 2009-01-21 17:26:06 +0900 | [diff] [blame] | 746 | cpumask_andnot(flush_mask, cpumask, cpumask_of(cpu)); |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 747 | if (cpu_isset(cpu, *cpumask)) |
Cliff Wickman | 450a007 | 2010-06-02 16:22:02 -0500 | [diff] [blame] | 748 | stat->s_ntargself++; |
Tejun Heo | bdbcdd4 | 2009-01-21 17:26:06 +0900 | [diff] [blame] | 749 | |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 750 | bau_desc = bcp->descriptor_base; |
| 751 | bau_desc += UV_ITEMS_PER_DESCRIPTOR * bcp->uvhub_cpu; |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 752 | bau_uvhubs_clear(&bau_desc->distribution, UV_DISTRIBUTION_SIZE); |
Cliff Wickman | 450a007 | 2010-06-02 16:22:02 -0500 | [diff] [blame] | 753 | |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 754 | for_each_cpu(tcpu, flush_mask) { |
Cliff Wickman | 77ed23f | 2011-05-10 08:26:43 -0500 | [diff] [blame^] | 755 | /* |
| 756 | * The distribution vector is a bit map of pnodes, relative |
| 757 | * to the partition base pnode (and the partition base nasid |
| 758 | * in the header). |
| 759 | * Translate cpu to pnode and hub using an array stored |
| 760 | * in local memory. |
| 761 | */ |
| 762 | hpp = &bcp->socket_master->target_hub_and_pnode[tcpu]; |
| 763 | tpnode = hpp->pnode - bcp->partition_base_pnode; |
| 764 | bau_uvhub_set(tpnode, &bau_desc->distribution); |
| 765 | if (hpp->uvhub == bcp->uvhub) |
Cliff Wickman | 450a007 | 2010-06-02 16:22:02 -0500 | [diff] [blame] | 766 | locals++; |
Cliff Wickman | b194b12 | 2008-06-12 08:23:48 -0500 | [diff] [blame] | 767 | else |
Cliff Wickman | 450a007 | 2010-06-02 16:22:02 -0500 | [diff] [blame] | 768 | remotes++; |
Cliff Wickman | b194b12 | 2008-06-12 08:23:48 -0500 | [diff] [blame] | 769 | } |
Cliff Wickman | 450a007 | 2010-06-02 16:22:02 -0500 | [diff] [blame] | 770 | if ((locals + remotes) == 0) |
| 771 | return NULL; |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 772 | stat->s_requestor++; |
Cliff Wickman | 450a007 | 2010-06-02 16:22:02 -0500 | [diff] [blame] | 773 | stat->s_ntargcpu += remotes + locals; |
| 774 | stat->s_ntargremotes += remotes; |
| 775 | stat->s_ntarglocals += locals; |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 776 | remotes = bau_uvhub_weight(&bau_desc->distribution); |
Cliff Wickman | 450a007 | 2010-06-02 16:22:02 -0500 | [diff] [blame] | 777 | |
| 778 | /* uvhub statistics */ |
| 779 | hubs = bau_uvhub_weight(&bau_desc->distribution); |
| 780 | if (locals) { |
| 781 | stat->s_ntarglocaluvhub++; |
| 782 | stat->s_ntargremoteuvhub += (hubs - 1); |
| 783 | } else |
| 784 | stat->s_ntargremoteuvhub += hubs; |
| 785 | stat->s_ntarguvhub += hubs; |
| 786 | if (hubs >= 16) |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 787 | stat->s_ntarguvhub16++; |
Cliff Wickman | 450a007 | 2010-06-02 16:22:02 -0500 | [diff] [blame] | 788 | else if (hubs >= 8) |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 789 | stat->s_ntarguvhub8++; |
Cliff Wickman | 450a007 | 2010-06-02 16:22:02 -0500 | [diff] [blame] | 790 | else if (hubs >= 4) |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 791 | stat->s_ntarguvhub4++; |
Cliff Wickman | 450a007 | 2010-06-02 16:22:02 -0500 | [diff] [blame] | 792 | else if (hubs >= 2) |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 793 | stat->s_ntarguvhub2++; |
| 794 | else |
| 795 | stat->s_ntarguvhub1++; |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 796 | |
| 797 | bau_desc->payload.address = va; |
Tejun Heo | bdbcdd4 | 2009-01-21 17:26:06 +0900 | [diff] [blame] | 798 | bau_desc->payload.sending_cpu = cpu; |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 799 | |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 800 | /* |
Cliff Wickman | 450a007 | 2010-06-02 16:22:02 -0500 | [diff] [blame] | 801 | * uv_flush_send_and_wait returns 0 if all cpu's were messaged, |
| 802 | * or 1 if it gave up and the original cpumask should be returned. |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 803 | */ |
Cliff Wickman | 450a007 | 2010-06-02 16:22:02 -0500 | [diff] [blame] | 804 | if (!uv_flush_send_and_wait(bau_desc, flush_mask, bcp)) |
| 805 | return NULL; |
| 806 | else |
| 807 | return cpumask; |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 808 | } |
| 809 | |
| 810 | /* |
| 811 | * The BAU message interrupt comes here. (registered by set_intr_gate) |
| 812 | * See entry_64.S |
| 813 | * |
| 814 | * We received a broadcast assist message. |
| 815 | * |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 816 | * Interrupts are disabled; this interrupt could represent |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 817 | * the receipt of several messages. |
| 818 | * |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 819 | * All cores/threads on this hub get this interrupt. |
| 820 | * The last one to see it does the software ack. |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 821 | * (the resource will not be freed until noninterruptable cpus see this |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 822 | * interrupt; hardware may timeout the s/w ack and reply ERROR) |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 823 | */ |
Cliff Wickman | b194b12 | 2008-06-12 08:23:48 -0500 | [diff] [blame] | 824 | void uv_bau_message_interrupt(struct pt_regs *regs) |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 825 | { |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 826 | int count = 0; |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 827 | cycles_t time_start; |
| 828 | struct bau_payload_queue_entry *msg; |
| 829 | struct bau_control *bcp; |
| 830 | struct ptc_stats *stat; |
| 831 | struct msg_desc msgdesc; |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 832 | |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 833 | time_start = get_cycles(); |
| 834 | bcp = &per_cpu(bau_control, smp_processor_id()); |
Cliff Wickman | 712157a | 2010-06-02 16:22:02 -0500 | [diff] [blame] | 835 | stat = bcp->statp; |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 836 | msgdesc.va_queue_first = bcp->va_queue_first; |
| 837 | msgdesc.va_queue_last = bcp->va_queue_last; |
| 838 | msg = bcp->bau_msg_head; |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 839 | while (msg->sw_ack_vector) { |
| 840 | count++; |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 841 | msgdesc.msg_slot = msg - msgdesc.va_queue_first; |
| 842 | msgdesc.sw_ack_slot = ffs(msg->sw_ack_vector) - 1; |
| 843 | msgdesc.msg = msg; |
| 844 | uv_bau_process_message(&msgdesc, bcp); |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 845 | msg++; |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 846 | if (msg > msgdesc.va_queue_last) |
| 847 | msg = msgdesc.va_queue_first; |
| 848 | bcp->bau_msg_head = msg; |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 849 | } |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 850 | stat->d_time += (get_cycles() - time_start); |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 851 | if (!count) |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 852 | stat->d_nomsg++; |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 853 | else if (count > 1) |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 854 | stat->d_multmsg++; |
| 855 | ack_APIC_irq(); |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 856 | } |
| 857 | |
Cliff Wickman | c4c4688 | 2009-04-03 08:34:32 -0500 | [diff] [blame] | 858 | /* |
| 859 | * uv_enable_timeouts |
| 860 | * |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 861 | * Each target uvhub (i.e. a uvhub that has no cpu's) needs to have |
Cliff Wickman | c4c4688 | 2009-04-03 08:34:32 -0500 | [diff] [blame] | 862 | * shootdown message timeouts enabled. The timeout does not cause |
| 863 | * an interrupt, but causes an error message to be returned to |
| 864 | * the sender. |
| 865 | */ |
Cliff Wickman | 77ed23f | 2011-05-10 08:26:43 -0500 | [diff] [blame^] | 866 | static void __init uv_enable_timeouts(void) |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 867 | { |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 868 | int uvhub; |
| 869 | int nuvhubs; |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 870 | int pnode; |
Cliff Wickman | c4c4688 | 2009-04-03 08:34:32 -0500 | [diff] [blame] | 871 | unsigned long mmr_image; |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 872 | |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 873 | nuvhubs = uv_num_possible_blades(); |
Cliff Wickman | c4c4688 | 2009-04-03 08:34:32 -0500 | [diff] [blame] | 874 | |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 875 | for (uvhub = 0; uvhub < nuvhubs; uvhub++) { |
| 876 | if (!uv_blade_nr_possible_cpus(uvhub)) |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 877 | continue; |
Cliff Wickman | c4c4688 | 2009-04-03 08:34:32 -0500 | [diff] [blame] | 878 | |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 879 | pnode = uv_blade_to_pnode(uvhub); |
Cliff Wickman | c4c4688 | 2009-04-03 08:34:32 -0500 | [diff] [blame] | 880 | mmr_image = |
| 881 | uv_read_global_mmr64(pnode, UVH_LB_BAU_MISC_CONTROL); |
| 882 | /* |
| 883 | * Set the timeout period and then lock it in, in three |
| 884 | * steps; captures and locks in the period. |
| 885 | * |
| 886 | * To program the period, the SOFT_ACK_MODE must be off. |
| 887 | */ |
| 888 | mmr_image &= ~((unsigned long)1 << |
Jack Steiner | 6f4edd6 | 2010-03-10 14:44:58 -0600 | [diff] [blame] | 889 | UVH_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_SHFT); |
Cliff Wickman | c4c4688 | 2009-04-03 08:34:32 -0500 | [diff] [blame] | 890 | uv_write_global_mmr64 |
| 891 | (pnode, UVH_LB_BAU_MISC_CONTROL, mmr_image); |
| 892 | /* |
| 893 | * Set the 4-bit period. |
| 894 | */ |
| 895 | mmr_image &= ~((unsigned long)0xf << |
Jack Steiner | 6f4edd6 | 2010-03-10 14:44:58 -0600 | [diff] [blame] | 896 | UVH_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHFT); |
Cliff Wickman | c4c4688 | 2009-04-03 08:34:32 -0500 | [diff] [blame] | 897 | mmr_image |= (UV_INTD_SOFT_ACK_TIMEOUT_PERIOD << |
Jack Steiner | 6f4edd6 | 2010-03-10 14:44:58 -0600 | [diff] [blame] | 898 | UVH_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHFT); |
Cliff Wickman | c4c4688 | 2009-04-03 08:34:32 -0500 | [diff] [blame] | 899 | uv_write_global_mmr64 |
| 900 | (pnode, UVH_LB_BAU_MISC_CONTROL, mmr_image); |
| 901 | /* |
| 902 | * Subsequent reversals of the timebase bit (3) cause an |
| 903 | * immediate timeout of one or all INTD resources as |
| 904 | * indicated in bits 2:0 (7 causes all of them to timeout). |
| 905 | */ |
| 906 | mmr_image |= ((unsigned long)1 << |
Jack Steiner | 6f4edd6 | 2010-03-10 14:44:58 -0600 | [diff] [blame] | 907 | UVH_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_SHFT); |
Cliff Wickman | c4c4688 | 2009-04-03 08:34:32 -0500 | [diff] [blame] | 908 | uv_write_global_mmr64 |
| 909 | (pnode, UVH_LB_BAU_MISC_CONTROL, mmr_image); |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 910 | } |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 911 | } |
| 912 | |
Cliff Wickman | b194b12 | 2008-06-12 08:23:48 -0500 | [diff] [blame] | 913 | static void *uv_ptc_seq_start(struct seq_file *file, loff_t *offset) |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 914 | { |
| 915 | if (*offset < num_possible_cpus()) |
| 916 | return offset; |
| 917 | return NULL; |
| 918 | } |
| 919 | |
Cliff Wickman | b194b12 | 2008-06-12 08:23:48 -0500 | [diff] [blame] | 920 | static void *uv_ptc_seq_next(struct seq_file *file, void *data, loff_t *offset) |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 921 | { |
| 922 | (*offset)++; |
| 923 | if (*offset < num_possible_cpus()) |
| 924 | return offset; |
| 925 | return NULL; |
| 926 | } |
| 927 | |
Cliff Wickman | b194b12 | 2008-06-12 08:23:48 -0500 | [diff] [blame] | 928 | static void uv_ptc_seq_stop(struct seq_file *file, void *data) |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 929 | { |
| 930 | } |
| 931 | |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 932 | static inline unsigned long long |
Cliff Wickman | 12a6611 | 2010-06-02 16:22:01 -0500 | [diff] [blame] | 933 | microsec_2_cycles(unsigned long microsec) |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 934 | { |
| 935 | unsigned long ns; |
| 936 | unsigned long long cyc; |
| 937 | |
Cliff Wickman | 12a6611 | 2010-06-02 16:22:01 -0500 | [diff] [blame] | 938 | ns = microsec * 1000; |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 939 | cyc = (ns << CYC2NS_SCALE_FACTOR)/(per_cpu(cyc2ns, smp_processor_id())); |
| 940 | return cyc; |
| 941 | } |
| 942 | |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 943 | /* |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 944 | * Display the statistics thru /proc. |
| 945 | * 'data' points to the cpu number |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 946 | */ |
Cliff Wickman | b194b12 | 2008-06-12 08:23:48 -0500 | [diff] [blame] | 947 | static int uv_ptc_seq_show(struct seq_file *file, void *data) |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 948 | { |
| 949 | struct ptc_stats *stat; |
| 950 | int cpu; |
| 951 | |
| 952 | cpu = *(loff_t *)data; |
| 953 | |
| 954 | if (!cpu) { |
| 955 | seq_printf(file, |
Cliff Wickman | 450a007 | 2010-06-02 16:22:02 -0500 | [diff] [blame] | 956 | "# cpu sent stime self locals remotes ncpus localhub "); |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 957 | seq_printf(file, |
Cliff Wickman | 450a007 | 2010-06-02 16:22:02 -0500 | [diff] [blame] | 958 | "remotehub numuvhubs numuvhubs16 numuvhubs8 "); |
| 959 | seq_printf(file, |
| 960 | "numuvhubs4 numuvhubs2 numuvhubs1 dto "); |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 961 | seq_printf(file, |
| 962 | "retries rok resetp resett giveup sto bz throt "); |
| 963 | seq_printf(file, |
| 964 | "sw_ack recv rtime all "); |
| 965 | seq_printf(file, |
Cliff Wickman | 50fb55a | 2010-06-02 16:22:02 -0500 | [diff] [blame] | 966 | "one mult none retry canc nocan reset rcan "); |
| 967 | seq_printf(file, |
| 968 | "disable enable\n"); |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 969 | } |
| 970 | if (cpu < num_possible_cpus() && cpu_online(cpu)) { |
| 971 | stat = &per_cpu(ptcstats, cpu); |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 972 | /* source side statistics */ |
| 973 | seq_printf(file, |
| 974 | "cpu %d %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld ", |
| 975 | cpu, stat->s_requestor, cycles_2_us(stat->s_time), |
Cliff Wickman | 450a007 | 2010-06-02 16:22:02 -0500 | [diff] [blame] | 976 | stat->s_ntargself, stat->s_ntarglocals, |
| 977 | stat->s_ntargremotes, stat->s_ntargcpu, |
| 978 | stat->s_ntarglocaluvhub, stat->s_ntargremoteuvhub, |
| 979 | stat->s_ntarguvhub, stat->s_ntarguvhub16); |
| 980 | seq_printf(file, "%ld %ld %ld %ld %ld ", |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 981 | stat->s_ntarguvhub8, stat->s_ntarguvhub4, |
| 982 | stat->s_ntarguvhub2, stat->s_ntarguvhub1, |
Cliff Wickman | 450a007 | 2010-06-02 16:22:02 -0500 | [diff] [blame] | 983 | stat->s_dtimeout); |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 984 | seq_printf(file, "%ld %ld %ld %ld %ld %ld %ld %ld ", |
| 985 | stat->s_retry_messages, stat->s_retriesok, |
| 986 | stat->s_resets_plug, stat->s_resets_timeout, |
| 987 | stat->s_giveup, stat->s_stimeout, |
| 988 | stat->s_busy, stat->s_throttles); |
Cliff Wickman | e8e5e8a | 2010-06-02 16:22:01 -0500 | [diff] [blame] | 989 | |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 990 | /* destination side statistics */ |
| 991 | seq_printf(file, |
Cliff Wickman | 50fb55a | 2010-06-02 16:22:02 -0500 | [diff] [blame] | 992 | "%lx %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld ", |
Cliff Wickman | 9674f35 | 2009-04-03 08:34:05 -0500 | [diff] [blame] | 993 | uv_read_global_mmr64(uv_cpu_to_pnode(cpu), |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 994 | UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE), |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 995 | stat->d_requestee, cycles_2_us(stat->d_time), |
| 996 | stat->d_alltlb, stat->d_onetlb, stat->d_multmsg, |
| 997 | stat->d_nomsg, stat->d_retries, stat->d_canceled, |
| 998 | stat->d_nocanceled, stat->d_resets, |
| 999 | stat->d_rcanceled); |
Cliff Wickman | 50fb55a | 2010-06-02 16:22:02 -0500 | [diff] [blame] | 1000 | seq_printf(file, "%ld %ld\n", |
| 1001 | stat->s_bau_disabled, stat->s_bau_reenabled); |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 1002 | } |
| 1003 | |
| 1004 | return 0; |
| 1005 | } |
| 1006 | |
| 1007 | /* |
Cliff Wickman | e8e5e8a | 2010-06-02 16:22:01 -0500 | [diff] [blame] | 1008 | * Display the tunables thru debugfs |
| 1009 | */ |
| 1010 | static ssize_t tunables_read(struct file *file, char __user *userbuf, |
| 1011 | size_t count, loff_t *ppos) |
| 1012 | { |
Dan Carpenter | b365a85 | 2010-09-29 10:41:05 +0200 | [diff] [blame] | 1013 | char *buf; |
Cliff Wickman | e8e5e8a | 2010-06-02 16:22:01 -0500 | [diff] [blame] | 1014 | int ret; |
| 1015 | |
Dan Carpenter | b365a85 | 2010-09-29 10:41:05 +0200 | [diff] [blame] | 1016 | buf = kasprintf(GFP_KERNEL, "%s %s %s\n%d %d %d %d %d %d %d %d %d\n", |
Cliff Wickman | e8e5e8a | 2010-06-02 16:22:01 -0500 | [diff] [blame] | 1017 | "max_bau_concurrent plugged_delay plugsb4reset", |
| 1018 | "timeoutsb4reset ipi_reset_limit complete_threshold", |
| 1019 | "congested_response_us congested_reps congested_period", |
| 1020 | max_bau_concurrent, plugged_delay, plugsb4reset, |
| 1021 | timeoutsb4reset, ipi_reset_limit, complete_threshold, |
| 1022 | congested_response_us, congested_reps, congested_period); |
| 1023 | |
Dan Carpenter | b365a85 | 2010-09-29 10:41:05 +0200 | [diff] [blame] | 1024 | if (!buf) |
| 1025 | return -ENOMEM; |
| 1026 | |
| 1027 | ret = simple_read_from_buffer(userbuf, count, ppos, buf, strlen(buf)); |
| 1028 | kfree(buf); |
| 1029 | return ret; |
Cliff Wickman | e8e5e8a | 2010-06-02 16:22:01 -0500 | [diff] [blame] | 1030 | } |
| 1031 | |
| 1032 | /* |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 1033 | * -1: resetf the statistics |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 1034 | * 0: display meaning of the statistics |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 1035 | */ |
Cliff Wickman | b194b12 | 2008-06-12 08:23:48 -0500 | [diff] [blame] | 1036 | static ssize_t uv_ptc_proc_write(struct file *file, const char __user *user, |
Ingo Molnar | b4c286e | 2008-06-18 14:28:19 +0200 | [diff] [blame] | 1037 | size_t count, loff_t *data) |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 1038 | { |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 1039 | int cpu; |
| 1040 | long input_arg; |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 1041 | char optstr[64]; |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 1042 | struct ptc_stats *stat; |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 1043 | |
Cliff Wickman | e7eb872 | 2008-06-23 08:32:25 -0500 | [diff] [blame] | 1044 | if (count == 0 || count > sizeof(optstr)) |
Cliff Wickman | cef5327 | 2008-06-19 11:16:24 -0500 | [diff] [blame] | 1045 | return -EINVAL; |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 1046 | if (copy_from_user(optstr, user, count)) |
| 1047 | return -EFAULT; |
| 1048 | optstr[count - 1] = '\0'; |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 1049 | if (strict_strtol(optstr, 10, &input_arg) < 0) { |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 1050 | printk(KERN_DEBUG "%s is invalid\n", optstr); |
| 1051 | return -EINVAL; |
| 1052 | } |
| 1053 | |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 1054 | if (input_arg == 0) { |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 1055 | printk(KERN_DEBUG "# cpu: cpu number\n"); |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 1056 | printk(KERN_DEBUG "Sender statistics:\n"); |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 1057 | printk(KERN_DEBUG |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 1058 | "sent: number of shootdown messages sent\n"); |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 1059 | printk(KERN_DEBUG |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 1060 | "stime: time spent sending messages\n"); |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 1061 | printk(KERN_DEBUG |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 1062 | "numuvhubs: number of hubs targeted with shootdown\n"); |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 1063 | printk(KERN_DEBUG |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 1064 | "numuvhubs16: number times 16 or more hubs targeted\n"); |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 1065 | printk(KERN_DEBUG |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 1066 | "numuvhubs8: number times 8 or more hubs targeted\n"); |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 1067 | printk(KERN_DEBUG |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 1068 | "numuvhubs4: number times 4 or more hubs targeted\n"); |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 1069 | printk(KERN_DEBUG |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 1070 | "numuvhubs2: number times 2 or more hubs targeted\n"); |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 1071 | printk(KERN_DEBUG |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 1072 | "numuvhubs1: number times 1 hub targeted\n"); |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 1073 | printk(KERN_DEBUG |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 1074 | "numcpus: number of cpus targeted with shootdown\n"); |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 1075 | printk(KERN_DEBUG |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 1076 | "dto: number of destination timeouts\n"); |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 1077 | printk(KERN_DEBUG |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 1078 | "retries: destination timeout retries sent\n"); |
| 1079 | printk(KERN_DEBUG |
| 1080 | "rok: : destination timeouts successfully retried\n"); |
| 1081 | printk(KERN_DEBUG |
| 1082 | "resetp: ipi-style resource resets for plugs\n"); |
| 1083 | printk(KERN_DEBUG |
| 1084 | "resett: ipi-style resource resets for timeouts\n"); |
| 1085 | printk(KERN_DEBUG |
| 1086 | "giveup: fall-backs to ipi-style shootdowns\n"); |
| 1087 | printk(KERN_DEBUG |
| 1088 | "sto: number of source timeouts\n"); |
| 1089 | printk(KERN_DEBUG |
| 1090 | "bz: number of stay-busy's\n"); |
| 1091 | printk(KERN_DEBUG |
| 1092 | "throt: number times spun in throttle\n"); |
| 1093 | printk(KERN_DEBUG "Destination side statistics:\n"); |
| 1094 | printk(KERN_DEBUG |
| 1095 | "sw_ack: image of UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE\n"); |
| 1096 | printk(KERN_DEBUG |
| 1097 | "recv: shootdown messages received\n"); |
| 1098 | printk(KERN_DEBUG |
| 1099 | "rtime: time spent processing messages\n"); |
| 1100 | printk(KERN_DEBUG |
| 1101 | "all: shootdown all-tlb messages\n"); |
| 1102 | printk(KERN_DEBUG |
| 1103 | "one: shootdown one-tlb messages\n"); |
| 1104 | printk(KERN_DEBUG |
| 1105 | "mult: interrupts that found multiple messages\n"); |
| 1106 | printk(KERN_DEBUG |
| 1107 | "none: interrupts that found no messages\n"); |
| 1108 | printk(KERN_DEBUG |
| 1109 | "retry: number of retry messages processed\n"); |
| 1110 | printk(KERN_DEBUG |
| 1111 | "canc: number messages canceled by retries\n"); |
| 1112 | printk(KERN_DEBUG |
| 1113 | "nocan: number retries that found nothing to cancel\n"); |
| 1114 | printk(KERN_DEBUG |
| 1115 | "reset: number of ipi-style reset requests processed\n"); |
| 1116 | printk(KERN_DEBUG |
| 1117 | "rcan: number messages canceled by reset requests\n"); |
Cliff Wickman | 50fb55a | 2010-06-02 16:22:02 -0500 | [diff] [blame] | 1118 | printk(KERN_DEBUG |
| 1119 | "disable: number times use of the BAU was disabled\n"); |
| 1120 | printk(KERN_DEBUG |
| 1121 | "enable: number times use of the BAU was re-enabled\n"); |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 1122 | } else if (input_arg == -1) { |
| 1123 | for_each_present_cpu(cpu) { |
| 1124 | stat = &per_cpu(ptcstats, cpu); |
| 1125 | memset(stat, 0, sizeof(struct ptc_stats)); |
| 1126 | } |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 1127 | } |
| 1128 | |
| 1129 | return count; |
| 1130 | } |
| 1131 | |
Cliff Wickman | e8e5e8a | 2010-06-02 16:22:01 -0500 | [diff] [blame] | 1132 | static int local_atoi(const char *name) |
| 1133 | { |
| 1134 | int val = 0; |
| 1135 | |
| 1136 | for (;; name++) { |
| 1137 | switch (*name) { |
| 1138 | case '0' ... '9': |
| 1139 | val = 10*val+(*name-'0'); |
| 1140 | break; |
| 1141 | default: |
| 1142 | return val; |
| 1143 | } |
| 1144 | } |
| 1145 | } |
| 1146 | |
| 1147 | /* |
| 1148 | * set the tunables |
| 1149 | * 0 values reset them to defaults |
| 1150 | */ |
| 1151 | static ssize_t tunables_write(struct file *file, const char __user *user, |
| 1152 | size_t count, loff_t *data) |
| 1153 | { |
| 1154 | int cpu; |
| 1155 | int cnt = 0; |
| 1156 | int val; |
| 1157 | char *p; |
| 1158 | char *q; |
| 1159 | char instr[64]; |
| 1160 | struct bau_control *bcp; |
| 1161 | |
| 1162 | if (count == 0 || count > sizeof(instr)-1) |
| 1163 | return -EINVAL; |
| 1164 | if (copy_from_user(instr, user, count)) |
| 1165 | return -EFAULT; |
| 1166 | |
| 1167 | instr[count] = '\0'; |
| 1168 | /* count the fields */ |
| 1169 | p = instr + strspn(instr, WHITESPACE); |
| 1170 | q = p; |
| 1171 | for (; *p; p = q + strspn(q, WHITESPACE)) { |
| 1172 | q = p + strcspn(p, WHITESPACE); |
| 1173 | cnt++; |
| 1174 | if (q == p) |
| 1175 | break; |
| 1176 | } |
| 1177 | if (cnt != 9) { |
| 1178 | printk(KERN_INFO "bau tunable error: should be 9 numbers\n"); |
| 1179 | return -EINVAL; |
| 1180 | } |
| 1181 | |
| 1182 | p = instr + strspn(instr, WHITESPACE); |
| 1183 | q = p; |
| 1184 | for (cnt = 0; *p; p = q + strspn(q, WHITESPACE), cnt++) { |
| 1185 | q = p + strcspn(p, WHITESPACE); |
| 1186 | val = local_atoi(p); |
| 1187 | switch (cnt) { |
| 1188 | case 0: |
| 1189 | if (val == 0) { |
| 1190 | max_bau_concurrent = MAX_BAU_CONCURRENT; |
| 1191 | max_bau_concurrent_constant = |
| 1192 | MAX_BAU_CONCURRENT; |
| 1193 | continue; |
| 1194 | } |
| 1195 | bcp = &per_cpu(bau_control, smp_processor_id()); |
| 1196 | if (val < 1 || val > bcp->cpus_in_uvhub) { |
| 1197 | printk(KERN_DEBUG |
| 1198 | "Error: BAU max concurrent %d is invalid\n", |
| 1199 | val); |
| 1200 | return -EINVAL; |
| 1201 | } |
| 1202 | max_bau_concurrent = val; |
| 1203 | max_bau_concurrent_constant = val; |
| 1204 | continue; |
| 1205 | case 1: |
| 1206 | if (val == 0) |
| 1207 | plugged_delay = PLUGGED_DELAY; |
| 1208 | else |
| 1209 | plugged_delay = val; |
| 1210 | continue; |
| 1211 | case 2: |
| 1212 | if (val == 0) |
| 1213 | plugsb4reset = PLUGSB4RESET; |
| 1214 | else |
| 1215 | plugsb4reset = val; |
| 1216 | continue; |
| 1217 | case 3: |
| 1218 | if (val == 0) |
| 1219 | timeoutsb4reset = TIMEOUTSB4RESET; |
| 1220 | else |
| 1221 | timeoutsb4reset = val; |
| 1222 | continue; |
| 1223 | case 4: |
| 1224 | if (val == 0) |
| 1225 | ipi_reset_limit = IPI_RESET_LIMIT; |
| 1226 | else |
| 1227 | ipi_reset_limit = val; |
| 1228 | continue; |
| 1229 | case 5: |
| 1230 | if (val == 0) |
| 1231 | complete_threshold = COMPLETE_THRESHOLD; |
| 1232 | else |
| 1233 | complete_threshold = val; |
| 1234 | continue; |
| 1235 | case 6: |
| 1236 | if (val == 0) |
| 1237 | congested_response_us = CONGESTED_RESPONSE_US; |
| 1238 | else |
| 1239 | congested_response_us = val; |
| 1240 | continue; |
| 1241 | case 7: |
| 1242 | if (val == 0) |
| 1243 | congested_reps = CONGESTED_REPS; |
| 1244 | else |
| 1245 | congested_reps = val; |
| 1246 | continue; |
| 1247 | case 8: |
| 1248 | if (val == 0) |
| 1249 | congested_period = CONGESTED_PERIOD; |
| 1250 | else |
| 1251 | congested_period = val; |
| 1252 | continue; |
| 1253 | } |
| 1254 | if (q == p) |
| 1255 | break; |
| 1256 | } |
| 1257 | for_each_present_cpu(cpu) { |
| 1258 | bcp = &per_cpu(bau_control, cpu); |
| 1259 | bcp->max_bau_concurrent = max_bau_concurrent; |
| 1260 | bcp->max_bau_concurrent_constant = max_bau_concurrent; |
| 1261 | bcp->plugged_delay = plugged_delay; |
| 1262 | bcp->plugsb4reset = plugsb4reset; |
| 1263 | bcp->timeoutsb4reset = timeoutsb4reset; |
| 1264 | bcp->ipi_reset_limit = ipi_reset_limit; |
| 1265 | bcp->complete_threshold = complete_threshold; |
| 1266 | bcp->congested_response_us = congested_response_us; |
| 1267 | bcp->congested_reps = congested_reps; |
| 1268 | bcp->congested_period = congested_period; |
| 1269 | } |
| 1270 | return count; |
| 1271 | } |
| 1272 | |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 1273 | static const struct seq_operations uv_ptc_seq_ops = { |
Ingo Molnar | dc163a4 | 2008-06-18 14:15:43 +0200 | [diff] [blame] | 1274 | .start = uv_ptc_seq_start, |
| 1275 | .next = uv_ptc_seq_next, |
| 1276 | .stop = uv_ptc_seq_stop, |
| 1277 | .show = uv_ptc_seq_show |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 1278 | }; |
| 1279 | |
Cliff Wickman | b194b12 | 2008-06-12 08:23:48 -0500 | [diff] [blame] | 1280 | static int uv_ptc_proc_open(struct inode *inode, struct file *file) |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 1281 | { |
| 1282 | return seq_open(file, &uv_ptc_seq_ops); |
| 1283 | } |
| 1284 | |
Cliff Wickman | e8e5e8a | 2010-06-02 16:22:01 -0500 | [diff] [blame] | 1285 | static int tunables_open(struct inode *inode, struct file *file) |
| 1286 | { |
| 1287 | return 0; |
| 1288 | } |
| 1289 | |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 1290 | static const struct file_operations proc_uv_ptc_operations = { |
Cliff Wickman | b194b12 | 2008-06-12 08:23:48 -0500 | [diff] [blame] | 1291 | .open = uv_ptc_proc_open, |
| 1292 | .read = seq_read, |
| 1293 | .write = uv_ptc_proc_write, |
| 1294 | .llseek = seq_lseek, |
| 1295 | .release = seq_release, |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 1296 | }; |
| 1297 | |
Cliff Wickman | e8e5e8a | 2010-06-02 16:22:01 -0500 | [diff] [blame] | 1298 | static const struct file_operations tunables_fops = { |
| 1299 | .open = tunables_open, |
| 1300 | .read = tunables_read, |
| 1301 | .write = tunables_write, |
Arnd Bergmann | 6038f37 | 2010-08-15 18:52:59 +0200 | [diff] [blame] | 1302 | .llseek = default_llseek, |
Cliff Wickman | e8e5e8a | 2010-06-02 16:22:01 -0500 | [diff] [blame] | 1303 | }; |
| 1304 | |
Cliff Wickman | b194b12 | 2008-06-12 08:23:48 -0500 | [diff] [blame] | 1305 | static int __init uv_ptc_init(void) |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 1306 | { |
Cliff Wickman | b194b12 | 2008-06-12 08:23:48 -0500 | [diff] [blame] | 1307 | struct proc_dir_entry *proc_uv_ptc; |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 1308 | |
| 1309 | if (!is_uv_system()) |
| 1310 | return 0; |
| 1311 | |
Alexey Dobriyan | 10f02d11 | 2009-08-23 23:17:27 +0400 | [diff] [blame] | 1312 | proc_uv_ptc = proc_create(UV_PTC_BASENAME, 0444, NULL, |
| 1313 | &proc_uv_ptc_operations); |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 1314 | if (!proc_uv_ptc) { |
| 1315 | printk(KERN_ERR "unable to create %s proc entry\n", |
| 1316 | UV_PTC_BASENAME); |
| 1317 | return -EINVAL; |
| 1318 | } |
Cliff Wickman | e8e5e8a | 2010-06-02 16:22:01 -0500 | [diff] [blame] | 1319 | |
| 1320 | tunables_dir = debugfs_create_dir(UV_BAU_TUNABLES_DIR, NULL); |
| 1321 | if (!tunables_dir) { |
| 1322 | printk(KERN_ERR "unable to create debugfs directory %s\n", |
| 1323 | UV_BAU_TUNABLES_DIR); |
| 1324 | return -EINVAL; |
| 1325 | } |
| 1326 | tunables_file = debugfs_create_file(UV_BAU_TUNABLES_FILE, 0600, |
| 1327 | tunables_dir, NULL, &tunables_fops); |
| 1328 | if (!tunables_file) { |
| 1329 | printk(KERN_ERR "unable to create debugfs file %s\n", |
| 1330 | UV_BAU_TUNABLES_FILE); |
| 1331 | return -EINVAL; |
| 1332 | } |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 1333 | return 0; |
| 1334 | } |
| 1335 | |
Cliff Wickman | b194b12 | 2008-06-12 08:23:48 -0500 | [diff] [blame] | 1336 | /* |
Cliff Wickman | 77ed23f | 2011-05-10 08:26:43 -0500 | [diff] [blame^] | 1337 | * Initialize the sending side's sending buffers. |
Cliff Wickman | b194b12 | 2008-06-12 08:23:48 -0500 | [diff] [blame] | 1338 | */ |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 1339 | static void |
Cliff Wickman | 77ed23f | 2011-05-10 08:26:43 -0500 | [diff] [blame^] | 1340 | uv_activation_descriptor_init(int node, int pnode, int base_pnode) |
Cliff Wickman | b194b12 | 2008-06-12 08:23:48 -0500 | [diff] [blame] | 1341 | { |
| 1342 | int i; |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 1343 | int cpu; |
Cliff Wickman | b194b12 | 2008-06-12 08:23:48 -0500 | [diff] [blame] | 1344 | unsigned long pa; |
| 1345 | unsigned long m; |
| 1346 | unsigned long n; |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 1347 | struct bau_desc *bau_desc; |
| 1348 | struct bau_desc *bd2; |
| 1349 | struct bau_control *bcp; |
Cliff Wickman | b194b12 | 2008-06-12 08:23:48 -0500 | [diff] [blame] | 1350 | |
Cliff Wickman | 0e2595c | 2009-05-20 08:10:57 -0500 | [diff] [blame] | 1351 | /* |
| 1352 | * each bau_desc is 64 bytes; there are 8 (UV_ITEMS_PER_DESCRIPTOR) |
Cliff Wickman | cfa6091 | 2011-01-03 12:03:53 -0600 | [diff] [blame] | 1353 | * per cpu; and one per cpu on the uvhub (UV_ADP_SIZE) |
Cliff Wickman | 0e2595c | 2009-05-20 08:10:57 -0500 | [diff] [blame] | 1354 | */ |
Jesper Juhl | 8e5e952 | 2010-11-09 00:08:11 +0100 | [diff] [blame] | 1355 | bau_desc = kmalloc_node(sizeof(struct bau_desc) * UV_ADP_SIZE |
| 1356 | * UV_ITEMS_PER_DESCRIPTOR, GFP_KERNEL, node); |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 1357 | BUG_ON(!bau_desc); |
Ingo Molnar | b4c286e | 2008-06-18 14:28:19 +0200 | [diff] [blame] | 1358 | |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 1359 | pa = uv_gpa(bau_desc); /* need the real nasid*/ |
| 1360 | n = pa >> uv_nshift; |
Cliff Wickman | b194b12 | 2008-06-12 08:23:48 -0500 | [diff] [blame] | 1361 | m = pa & uv_mmask; |
Ingo Molnar | b4c286e | 2008-06-18 14:28:19 +0200 | [diff] [blame] | 1362 | |
Cliff Wickman | 77ed23f | 2011-05-10 08:26:43 -0500 | [diff] [blame^] | 1363 | /* the 14-bit pnode */ |
Cliff Wickman | 9c26f52 | 2009-06-24 09:41:59 -0500 | [diff] [blame] | 1364 | uv_write_global_mmr64(pnode, UVH_LB_BAU_SB_DESCRIPTOR_BASE, |
| 1365 | (n << UV_DESC_BASE_PNODE_SHIFT | m)); |
Cliff Wickman | 0e2595c | 2009-05-20 08:10:57 -0500 | [diff] [blame] | 1366 | /* |
Cliff Wickman | 77ed23f | 2011-05-10 08:26:43 -0500 | [diff] [blame^] | 1367 | * Initializing all 8 (UV_ITEMS_PER_DESCRIPTOR) descriptors for each |
Cliff Wickman | 0e2595c | 2009-05-20 08:10:57 -0500 | [diff] [blame] | 1368 | * cpu even though we only use the first one; one descriptor can |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 1369 | * describe a broadcast to 256 uv hubs. |
Cliff Wickman | 0e2595c | 2009-05-20 08:10:57 -0500 | [diff] [blame] | 1370 | */ |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 1371 | for (i = 0, bd2 = bau_desc; i < (UV_ADP_SIZE*UV_ITEMS_PER_DESCRIPTOR); |
| 1372 | i++, bd2++) { |
| 1373 | memset(bd2, 0, sizeof(struct bau_desc)); |
| 1374 | bd2->header.sw_ack_flag = 1; |
Cliff Wickman | 94ca8e4 | 2009-04-14 10:56:48 -0500 | [diff] [blame] | 1375 | /* |
Cliff Wickman | 77ed23f | 2011-05-10 08:26:43 -0500 | [diff] [blame^] | 1376 | * The base_dest_nasid set in the message header is the nasid |
| 1377 | * of the first uvhub in the partition. The bit map will |
| 1378 | * indicate destination pnode numbers relative to that base. |
| 1379 | * They may not be consecutive if nasid striding is being used. |
Cliff Wickman | 94ca8e4 | 2009-04-14 10:56:48 -0500 | [diff] [blame] | 1380 | */ |
Cliff Wickman | 77ed23f | 2011-05-10 08:26:43 -0500 | [diff] [blame^] | 1381 | bd2->header.base_dest_nasid = UV_PNODE_TO_NASID(base_pnode); |
| 1382 | bd2->header.dest_subnodeid = UV_LB_SUBNODEID; |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 1383 | bd2->header.command = UV_NET_ENDPOINT_INTD; |
| 1384 | bd2->header.int_both = 1; |
Cliff Wickman | b194b12 | 2008-06-12 08:23:48 -0500 | [diff] [blame] | 1385 | /* |
| 1386 | * all others need to be set to zero: |
| 1387 | * fairness chaining multilevel count replied_to |
| 1388 | */ |
| 1389 | } |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 1390 | for_each_present_cpu(cpu) { |
| 1391 | if (pnode != uv_blade_to_pnode(uv_cpu_to_blade_id(cpu))) |
| 1392 | continue; |
| 1393 | bcp = &per_cpu(bau_control, cpu); |
| 1394 | bcp->descriptor_base = bau_desc; |
| 1395 | } |
Cliff Wickman | b194b12 | 2008-06-12 08:23:48 -0500 | [diff] [blame] | 1396 | } |
| 1397 | |
| 1398 | /* |
| 1399 | * initialize the destination side's receiving buffers |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 1400 | * entered for each uvhub in the partition |
| 1401 | * - node is first node (kernel memory notion) on the uvhub |
| 1402 | * - pnode is the uvhub's physical identifier |
Cliff Wickman | b194b12 | 2008-06-12 08:23:48 -0500 | [diff] [blame] | 1403 | */ |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 1404 | static void |
| 1405 | uv_payload_queue_init(int node, int pnode) |
Cliff Wickman | b194b12 | 2008-06-12 08:23:48 -0500 | [diff] [blame] | 1406 | { |
Cliff Wickman | 4ea3c51 | 2009-04-16 07:53:09 -0500 | [diff] [blame] | 1407 | int pn; |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 1408 | int cpu; |
Ingo Molnar | b4c286e | 2008-06-18 14:28:19 +0200 | [diff] [blame] | 1409 | char *cp; |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 1410 | unsigned long pa; |
| 1411 | struct bau_payload_queue_entry *pqp; |
| 1412 | struct bau_payload_queue_entry *pqp_malloc; |
| 1413 | struct bau_control *bcp; |
Cliff Wickman | b194b12 | 2008-06-12 08:23:48 -0500 | [diff] [blame] | 1414 | |
Jesper Juhl | 8e5e952 | 2010-11-09 00:08:11 +0100 | [diff] [blame] | 1415 | pqp = kmalloc_node((DEST_Q_SIZE + 1) |
| 1416 | * sizeof(struct bau_payload_queue_entry), |
| 1417 | GFP_KERNEL, node); |
Ingo Molnar | dc163a4 | 2008-06-18 14:15:43 +0200 | [diff] [blame] | 1418 | BUG_ON(!pqp); |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 1419 | pqp_malloc = pqp; |
Ingo Molnar | b4c286e | 2008-06-18 14:28:19 +0200 | [diff] [blame] | 1420 | |
Cliff Wickman | b194b12 | 2008-06-12 08:23:48 -0500 | [diff] [blame] | 1421 | cp = (char *)pqp + 31; |
| 1422 | pqp = (struct bau_payload_queue_entry *)(((unsigned long)cp >> 5) << 5); |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 1423 | |
| 1424 | for_each_present_cpu(cpu) { |
| 1425 | if (pnode != uv_cpu_to_pnode(cpu)) |
| 1426 | continue; |
| 1427 | /* for every cpu on this pnode: */ |
| 1428 | bcp = &per_cpu(bau_control, cpu); |
| 1429 | bcp->va_queue_first = pqp; |
| 1430 | bcp->bau_msg_head = pqp; |
| 1431 | bcp->va_queue_last = pqp + (DEST_Q_SIZE - 1); |
| 1432 | } |
Cliff Wickman | 4ea3c51 | 2009-04-16 07:53:09 -0500 | [diff] [blame] | 1433 | /* |
| 1434 | * need the pnode of where the memory was really allocated |
| 1435 | */ |
| 1436 | pa = uv_gpa(pqp); |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 1437 | pn = pa >> uv_nshift; |
Cliff Wickman | b194b12 | 2008-06-12 08:23:48 -0500 | [diff] [blame] | 1438 | uv_write_global_mmr64(pnode, |
| 1439 | UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST, |
Cliff Wickman | 4ea3c51 | 2009-04-16 07:53:09 -0500 | [diff] [blame] | 1440 | ((unsigned long)pn << UV_PAYLOADQ_PNODE_SHIFT) | |
Cliff Wickman | b194b12 | 2008-06-12 08:23:48 -0500 | [diff] [blame] | 1441 | uv_physnodeaddr(pqp)); |
| 1442 | uv_write_global_mmr64(pnode, UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL, |
| 1443 | uv_physnodeaddr(pqp)); |
Cliff Wickman | b194b12 | 2008-06-12 08:23:48 -0500 | [diff] [blame] | 1444 | uv_write_global_mmr64(pnode, UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST, |
| 1445 | (unsigned long) |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 1446 | uv_physnodeaddr(pqp + (DEST_Q_SIZE - 1))); |
| 1447 | /* in effect, all msg_type's are set to MSG_NOOP */ |
Ingo Molnar | dc163a4 | 2008-06-18 14:15:43 +0200 | [diff] [blame] | 1448 | memset(pqp, 0, sizeof(struct bau_payload_queue_entry) * DEST_Q_SIZE); |
Cliff Wickman | b194b12 | 2008-06-12 08:23:48 -0500 | [diff] [blame] | 1449 | } |
| 1450 | |
| 1451 | /* |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 1452 | * Initialization of each UV hub's structures |
Cliff Wickman | b194b12 | 2008-06-12 08:23:48 -0500 | [diff] [blame] | 1453 | */ |
Cliff Wickman | 77ed23f | 2011-05-10 08:26:43 -0500 | [diff] [blame^] | 1454 | static void __init uv_init_uvhub(int uvhub, int vector, int base_pnode) |
Cliff Wickman | b194b12 | 2008-06-12 08:23:48 -0500 | [diff] [blame] | 1455 | { |
Cliff Wickman | 9674f35 | 2009-04-03 08:34:05 -0500 | [diff] [blame] | 1456 | int node; |
Cliff Wickman | b194b12 | 2008-06-12 08:23:48 -0500 | [diff] [blame] | 1457 | int pnode; |
Cliff Wickman | b194b12 | 2008-06-12 08:23:48 -0500 | [diff] [blame] | 1458 | unsigned long apicid; |
Cliff Wickman | b194b12 | 2008-06-12 08:23:48 -0500 | [diff] [blame] | 1459 | |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 1460 | node = uvhub_to_first_node(uvhub); |
| 1461 | pnode = uv_blade_to_pnode(uvhub); |
Cliff Wickman | 77ed23f | 2011-05-10 08:26:43 -0500 | [diff] [blame^] | 1462 | uv_activation_descriptor_init(node, pnode, base_pnode); |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 1463 | uv_payload_queue_init(node, pnode); |
Cliff Wickman | b194b12 | 2008-06-12 08:23:48 -0500 | [diff] [blame] | 1464 | /* |
Cliff Wickman | 77ed23f | 2011-05-10 08:26:43 -0500 | [diff] [blame^] | 1465 | * The below initialization can't be in firmware because the |
| 1466 | * messaging IRQ will be determined by the OS. |
Cliff Wickman | b194b12 | 2008-06-12 08:23:48 -0500 | [diff] [blame] | 1467 | */ |
Dimitri Sivanich | 8191c9f | 2010-11-16 16:23:52 -0600 | [diff] [blame] | 1468 | apicid = uvhub_to_first_apicid(uvhub) | uv_apicid_hibits; |
Cliff Wickman | e38e2af | 2009-11-19 17:12:43 -0600 | [diff] [blame] | 1469 | uv_write_global_mmr64(pnode, UVH_BAU_DATA_CONFIG, |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 1470 | ((apicid << 32) | vector)); |
| 1471 | } |
| 1472 | |
| 1473 | /* |
Cliff Wickman | 12a6611 | 2010-06-02 16:22:01 -0500 | [diff] [blame] | 1474 | * We will set BAU_MISC_CONTROL with a timeout period. |
| 1475 | * But the BIOS has set UVH_AGING_PRESCALE_SEL and UVH_TRANSACTION_TIMEOUT. |
| 1476 | * So the destination timeout period has be be calculated from them. |
| 1477 | */ |
| 1478 | static int |
| 1479 | calculate_destination_timeout(void) |
| 1480 | { |
| 1481 | unsigned long mmr_image; |
| 1482 | int mult1; |
| 1483 | int mult2; |
| 1484 | int index; |
| 1485 | int base; |
| 1486 | int ret; |
| 1487 | unsigned long ts_ns; |
| 1488 | |
| 1489 | mult1 = UV_INTD_SOFT_ACK_TIMEOUT_PERIOD & BAU_MISC_CONTROL_MULT_MASK; |
| 1490 | mmr_image = uv_read_local_mmr(UVH_AGING_PRESCALE_SEL); |
| 1491 | index = (mmr_image >> BAU_URGENCY_7_SHIFT) & BAU_URGENCY_7_MASK; |
| 1492 | mmr_image = uv_read_local_mmr(UVH_TRANSACTION_TIMEOUT); |
| 1493 | mult2 = (mmr_image >> BAU_TRANS_SHIFT) & BAU_TRANS_MASK; |
| 1494 | base = timeout_base_ns[index]; |
| 1495 | ts_ns = base * mult1 * mult2; |
| 1496 | ret = ts_ns / 1000; |
| 1497 | return ret; |
| 1498 | } |
| 1499 | |
| 1500 | /* |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 1501 | * initialize the bau_control structure for each cpu |
| 1502 | */ |
Cliff Wickman | 77ed23f | 2011-05-10 08:26:43 -0500 | [diff] [blame^] | 1503 | static int __init uv_init_per_cpu(int nuvhubs, int base_part_pnode) |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 1504 | { |
Cliff Wickman | a8328ee | 2010-06-02 16:22:02 -0500 | [diff] [blame] | 1505 | int i; |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 1506 | int cpu; |
Cliff Wickman | 77ed23f | 2011-05-10 08:26:43 -0500 | [diff] [blame^] | 1507 | int tcpu; |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 1508 | int pnode; |
| 1509 | int uvhub; |
Cliff Wickman | c4026cf | 2010-07-30 14:10:55 -0500 | [diff] [blame] | 1510 | int have_hmaster; |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 1511 | short socket = 0; |
Cliff Wickman | a8328ee | 2010-06-02 16:22:02 -0500 | [diff] [blame] | 1512 | unsigned short socket_mask; |
Cliff Wickman | c4026cf | 2010-07-30 14:10:55 -0500 | [diff] [blame] | 1513 | unsigned char *uvhub_mask; |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 1514 | struct bau_control *bcp; |
| 1515 | struct uvhub_desc *bdp; |
| 1516 | struct socket_desc *sdp; |
| 1517 | struct bau_control *hmaster = NULL; |
| 1518 | struct bau_control *smaster = NULL; |
| 1519 | struct socket_desc { |
| 1520 | short num_cpus; |
Cliff Wickman | cfa6091 | 2011-01-03 12:03:53 -0600 | [diff] [blame] | 1521 | short cpu_number[MAX_CPUS_PER_SOCKET]; |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 1522 | }; |
| 1523 | struct uvhub_desc { |
Cliff Wickman | a8328ee | 2010-06-02 16:22:02 -0500 | [diff] [blame] | 1524 | unsigned short socket_mask; |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 1525 | short num_cpus; |
| 1526 | short uvhub; |
| 1527 | short pnode; |
| 1528 | struct socket_desc socket[2]; |
| 1529 | }; |
| 1530 | struct uvhub_desc *uvhub_descs; |
| 1531 | |
Cliff Wickman | 12a6611 | 2010-06-02 16:22:01 -0500 | [diff] [blame] | 1532 | timeout_us = calculate_destination_timeout(); |
| 1533 | |
Jesper Juhl | 8e5e952 | 2010-11-09 00:08:11 +0100 | [diff] [blame] | 1534 | uvhub_descs = kmalloc(nuvhubs * sizeof(struct uvhub_desc), GFP_KERNEL); |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 1535 | memset(uvhub_descs, 0, nuvhubs * sizeof(struct uvhub_desc)); |
Cliff Wickman | c4026cf | 2010-07-30 14:10:55 -0500 | [diff] [blame] | 1536 | uvhub_mask = kzalloc((nuvhubs+7)/8, GFP_KERNEL); |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 1537 | for_each_present_cpu(cpu) { |
| 1538 | bcp = &per_cpu(bau_control, cpu); |
| 1539 | memset(bcp, 0, sizeof(struct bau_control)); |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 1540 | pnode = uv_cpu_hub_info(cpu)->pnode; |
Cliff Wickman | 77ed23f | 2011-05-10 08:26:43 -0500 | [diff] [blame^] | 1541 | if ((pnode - base_part_pnode) >= UV_DISTRIBUTION_SIZE) { |
| 1542 | printk(KERN_EMERG |
| 1543 | "cpu %d pnode %d-%d beyond %d; BAU disabled\n", |
| 1544 | cpu, pnode, base_part_pnode, |
| 1545 | UV_DISTRIBUTION_SIZE); |
| 1546 | return 1; |
| 1547 | } |
| 1548 | bcp->osnode = cpu_to_node(cpu); |
| 1549 | bcp->partition_base_pnode = uv_partition_base_pnode; |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 1550 | uvhub = uv_cpu_hub_info(cpu)->numa_blade_id; |
Cliff Wickman | c4026cf | 2010-07-30 14:10:55 -0500 | [diff] [blame] | 1551 | *(uvhub_mask + (uvhub/8)) |= (1 << (uvhub%8)); |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 1552 | bdp = &uvhub_descs[uvhub]; |
| 1553 | bdp->num_cpus++; |
| 1554 | bdp->uvhub = uvhub; |
| 1555 | bdp->pnode = pnode; |
Cliff Wickman | a8328ee | 2010-06-02 16:22:02 -0500 | [diff] [blame] | 1556 | /* kludge: 'assuming' one node per socket, and assuming that |
| 1557 | disabling a socket just leaves a gap in node numbers */ |
Cliff Wickman | 77ed23f | 2011-05-10 08:26:43 -0500 | [diff] [blame^] | 1558 | socket = bcp->osnode & 1; |
Cliff Wickman | a8328ee | 2010-06-02 16:22:02 -0500 | [diff] [blame] | 1559 | bdp->socket_mask |= (1 << socket); |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 1560 | sdp = &bdp->socket[socket]; |
| 1561 | sdp->cpu_number[sdp->num_cpus] = cpu; |
| 1562 | sdp->num_cpus++; |
Cliff Wickman | cfa6091 | 2011-01-03 12:03:53 -0600 | [diff] [blame] | 1563 | if (sdp->num_cpus > MAX_CPUS_PER_SOCKET) { |
| 1564 | printk(KERN_EMERG "%d cpus per socket invalid\n", sdp->num_cpus); |
| 1565 | return 1; |
| 1566 | } |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 1567 | } |
Cliff Wickman | c4026cf | 2010-07-30 14:10:55 -0500 | [diff] [blame] | 1568 | for (uvhub = 0; uvhub < nuvhubs; uvhub++) { |
| 1569 | if (!(*(uvhub_mask + (uvhub/8)) & (1 << (uvhub%8)))) |
| 1570 | continue; |
| 1571 | have_hmaster = 0; |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 1572 | bdp = &uvhub_descs[uvhub]; |
Cliff Wickman | a8328ee | 2010-06-02 16:22:02 -0500 | [diff] [blame] | 1573 | socket_mask = bdp->socket_mask; |
| 1574 | socket = 0; |
| 1575 | while (socket_mask) { |
| 1576 | if (!(socket_mask & 1)) |
| 1577 | goto nextsocket; |
| 1578 | sdp = &bdp->socket[socket]; |
| 1579 | for (i = 0; i < sdp->num_cpus; i++) { |
| 1580 | cpu = sdp->cpu_number[i]; |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 1581 | bcp = &per_cpu(bau_control, cpu); |
| 1582 | bcp->cpu = cpu; |
Cliff Wickman | a8328ee | 2010-06-02 16:22:02 -0500 | [diff] [blame] | 1583 | if (i == 0) { |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 1584 | smaster = bcp; |
Cliff Wickman | c4026cf | 2010-07-30 14:10:55 -0500 | [diff] [blame] | 1585 | if (!have_hmaster) { |
| 1586 | have_hmaster++; |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 1587 | hmaster = bcp; |
Cliff Wickman | c4026cf | 2010-07-30 14:10:55 -0500 | [diff] [blame] | 1588 | } |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 1589 | } |
| 1590 | bcp->cpus_in_uvhub = bdp->num_cpus; |
| 1591 | bcp->cpus_in_socket = sdp->num_cpus; |
| 1592 | bcp->socket_master = smaster; |
Cliff Wickman | a8328ee | 2010-06-02 16:22:02 -0500 | [diff] [blame] | 1593 | bcp->uvhub = bdp->uvhub; |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 1594 | bcp->uvhub_master = hmaster; |
Cliff Wickman | a8328ee | 2010-06-02 16:22:02 -0500 | [diff] [blame] | 1595 | bcp->uvhub_cpu = uv_cpu_hub_info(cpu)-> |
| 1596 | blade_processor_id; |
Cliff Wickman | cfa6091 | 2011-01-03 12:03:53 -0600 | [diff] [blame] | 1597 | if (bcp->uvhub_cpu >= MAX_CPUS_PER_UVHUB) { |
| 1598 | printk(KERN_EMERG |
| 1599 | "%d cpus per uvhub invalid\n", |
| 1600 | bcp->uvhub_cpu); |
| 1601 | return 1; |
| 1602 | } |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 1603 | } |
Cliff Wickman | a8328ee | 2010-06-02 16:22:02 -0500 | [diff] [blame] | 1604 | nextsocket: |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 1605 | socket++; |
Cliff Wickman | a8328ee | 2010-06-02 16:22:02 -0500 | [diff] [blame] | 1606 | socket_mask = (socket_mask >> 1); |
Cliff Wickman | 77ed23f | 2011-05-10 08:26:43 -0500 | [diff] [blame^] | 1607 | /* each socket gets a local array of pnodes/hubs */ |
| 1608 | bcp = smaster; |
| 1609 | bcp->target_hub_and_pnode = kmalloc_node( |
| 1610 | sizeof(struct hub_and_pnode) * |
| 1611 | num_possible_cpus(), GFP_KERNEL, bcp->osnode); |
| 1612 | memset(bcp->target_hub_and_pnode, 0, |
| 1613 | sizeof(struct hub_and_pnode) * |
| 1614 | num_possible_cpus()); |
| 1615 | for_each_present_cpu(tcpu) { |
| 1616 | bcp->target_hub_and_pnode[tcpu].pnode = |
| 1617 | uv_cpu_hub_info(tcpu)->pnode; |
| 1618 | bcp->target_hub_and_pnode[tcpu].uvhub = |
| 1619 | uv_cpu_hub_info(tcpu)->numa_blade_id; |
| 1620 | } |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 1621 | } |
| 1622 | } |
| 1623 | kfree(uvhub_descs); |
Cliff Wickman | c4026cf | 2010-07-30 14:10:55 -0500 | [diff] [blame] | 1624 | kfree(uvhub_mask); |
Cliff Wickman | e8e5e8a | 2010-06-02 16:22:01 -0500 | [diff] [blame] | 1625 | for_each_present_cpu(cpu) { |
| 1626 | bcp = &per_cpu(bau_control, cpu); |
Cliff Wickman | 50fb55a | 2010-06-02 16:22:02 -0500 | [diff] [blame] | 1627 | bcp->baudisabled = 0; |
Cliff Wickman | 712157a | 2010-06-02 16:22:02 -0500 | [diff] [blame] | 1628 | bcp->statp = &per_cpu(ptcstats, cpu); |
Cliff Wickman | e8e5e8a | 2010-06-02 16:22:01 -0500 | [diff] [blame] | 1629 | /* time interval to catch a hardware stay-busy bug */ |
| 1630 | bcp->timeout_interval = microsec_2_cycles(2*timeout_us); |
| 1631 | bcp->max_bau_concurrent = max_bau_concurrent; |
| 1632 | bcp->max_bau_concurrent_constant = max_bau_concurrent; |
| 1633 | bcp->plugged_delay = plugged_delay; |
| 1634 | bcp->plugsb4reset = plugsb4reset; |
| 1635 | bcp->timeoutsb4reset = timeoutsb4reset; |
| 1636 | bcp->ipi_reset_limit = ipi_reset_limit; |
| 1637 | bcp->complete_threshold = complete_threshold; |
| 1638 | bcp->congested_response_us = congested_response_us; |
| 1639 | bcp->congested_reps = congested_reps; |
| 1640 | bcp->congested_period = congested_period; |
| 1641 | } |
Cliff Wickman | cfa6091 | 2011-01-03 12:03:53 -0600 | [diff] [blame] | 1642 | return 0; |
Cliff Wickman | b194b12 | 2008-06-12 08:23:48 -0500 | [diff] [blame] | 1643 | } |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 1644 | |
| 1645 | /* |
| 1646 | * Initialization of BAU-related structures |
| 1647 | */ |
Cliff Wickman | b194b12 | 2008-06-12 08:23:48 -0500 | [diff] [blame] | 1648 | static int __init uv_bau_init(void) |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 1649 | { |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 1650 | int uvhub; |
| 1651 | int pnode; |
| 1652 | int nuvhubs; |
Rusty Russell | 2c74d66 | 2009-03-18 08:22:30 +1030 | [diff] [blame] | 1653 | int cur_cpu; |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 1654 | int vector; |
| 1655 | unsigned long mmr; |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 1656 | |
| 1657 | if (!is_uv_system()) |
| 1658 | return 0; |
| 1659 | |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 1660 | if (nobau) |
| 1661 | return 0; |
| 1662 | |
Rusty Russell | 76ba0ec | 2009-03-13 14:49:57 +1030 | [diff] [blame] | 1663 | for_each_possible_cpu(cur_cpu) |
Yinghai Lu | eaa9584 | 2009-06-06 14:51:36 -0700 | [diff] [blame] | 1664 | zalloc_cpumask_var_node(&per_cpu(uv_flush_tlb_mask, cur_cpu), |
Rusty Russell | 76ba0ec | 2009-03-13 14:49:57 +1030 | [diff] [blame] | 1665 | GFP_KERNEL, cpu_to_node(cur_cpu)); |
| 1666 | |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 1667 | uv_nshift = uv_hub_info->m_val; |
Robin Holt | 036ed8b | 2009-10-15 17:40:00 -0500 | [diff] [blame] | 1668 | uv_mmask = (1UL << uv_hub_info->m_val) - 1; |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 1669 | nuvhubs = uv_num_possible_blades(); |
Cliff Wickman | 50fb55a | 2010-06-02 16:22:02 -0500 | [diff] [blame] | 1670 | spin_lock_init(&disable_lock); |
| 1671 | congested_cycles = microsec_2_cycles(congested_response_us); |
Cliff Wickman | 9674f35 | 2009-04-03 08:34:05 -0500 | [diff] [blame] | 1672 | |
Cliff Wickman | 77ed23f | 2011-05-10 08:26:43 -0500 | [diff] [blame^] | 1673 | uv_partition_base_pnode = 0x7fffffff; |
| 1674 | for (uvhub = 0; uvhub < nuvhubs; uvhub++) { |
| 1675 | if (uv_blade_nr_possible_cpus(uvhub) && |
| 1676 | (uv_blade_to_pnode(uvhub) < uv_partition_base_pnode)) |
| 1677 | uv_partition_base_pnode = uv_blade_to_pnode(uvhub); |
| 1678 | } |
| 1679 | |
| 1680 | if (uv_init_per_cpu(nuvhubs, uv_partition_base_pnode)) { |
Cliff Wickman | cfa6091 | 2011-01-03 12:03:53 -0600 | [diff] [blame] | 1681 | nobau = 1; |
| 1682 | return 0; |
| 1683 | } |
Ingo Molnar | b4c286e | 2008-06-18 14:28:19 +0200 | [diff] [blame] | 1684 | |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 1685 | vector = UV_BAU_MESSAGE; |
| 1686 | for_each_possible_blade(uvhub) |
| 1687 | if (uv_blade_nr_possible_cpus(uvhub)) |
Cliff Wickman | 77ed23f | 2011-05-10 08:26:43 -0500 | [diff] [blame^] | 1688 | uv_init_uvhub(uvhub, vector, uv_partition_base_pnode); |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 1689 | |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 1690 | uv_enable_timeouts(); |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 1691 | alloc_intr_gate(vector, uv_bau_message_intr1); |
| 1692 | |
| 1693 | for_each_possible_blade(uvhub) { |
Cliff Wickman | 93a7ca0 | 2010-07-16 10:11:21 -0500 | [diff] [blame] | 1694 | if (uv_blade_nr_possible_cpus(uvhub)) { |
| 1695 | pnode = uv_blade_to_pnode(uvhub); |
| 1696 | /* INIT the bau */ |
| 1697 | uv_write_global_mmr64(pnode, |
| 1698 | UVH_LB_BAU_SB_ACTIVATION_CONTROL, |
| 1699 | ((unsigned long)1 << 63)); |
| 1700 | mmr = 1; /* should be 1 to broadcast to both sockets */ |
| 1701 | uv_write_global_mmr64(pnode, UVH_BAU_DATA_BROADCAST, |
| 1702 | mmr); |
| 1703 | } |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 1704 | } |
Ingo Molnar | b4c286e | 2008-06-18 14:28:19 +0200 | [diff] [blame] | 1705 | |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 1706 | return 0; |
| 1707 | } |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 1708 | core_initcall(uv_bau_init); |
Cliff Wickman | e8e5e8a | 2010-06-02 16:22:01 -0500 | [diff] [blame] | 1709 | fs_initcall(uv_ptc_init); |