Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 1 | /* |
| 2 | * This file is subject to the terms and conditions of the GNU General Public |
| 3 | * License. See the file "COPYING" in the main directory of this archive |
| 4 | * for more details. |
| 5 | * |
| 6 | * SGI UV Broadcast Assist Unit definitions |
| 7 | * |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 8 | * Copyright (C) 2008-2011 Silicon Graphics, Inc. All rights reserved. |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 9 | */ |
| 10 | |
H. Peter Anvin | 05e4d31 | 2008-10-23 00:01:39 -0700 | [diff] [blame] | 11 | #ifndef _ASM_X86_UV_UV_BAU_H |
| 12 | #define _ASM_X86_UV_UV_BAU_H |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 13 | |
| 14 | #include <linux/bitmap.h> |
| 15 | #define BITSPERBYTE 8 |
| 16 | |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 17 | /* |
Cliff Wickman | b194b120 | 2008-06-12 08:23:48 -0500 | [diff] [blame] | 18 | * Broadcast Assist Unit messaging structures |
| 19 | * |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 20 | * Selective Broadcast activations are induced by software action |
| 21 | * specifying a particular 8-descriptor "set" via a 6-bit index written |
| 22 | * to an MMR. |
| 23 | * Thus there are 64 unique 512-byte sets of SB descriptors - one set for |
| 24 | * each 6-bit index value. These descriptor sets are mapped in sequence |
| 25 | * starting with set 0 located at the address specified in the |
| 26 | * BAU_SB_DESCRIPTOR_BASE register, set 1 is located at BASE + 512, |
| 27 | * set 2 is at BASE + 2*512, set 3 at BASE + 3*512, and so on. |
| 28 | * |
Cliff Wickman | cfa6091 | 2011-01-03 12:03:53 -0600 | [diff] [blame] | 29 | * We will use one set for sending BAU messages from each of the |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 30 | * cpu's on the uvhub. |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 31 | * |
| 32 | * TLB shootdown will use the first of the 8 descriptors of each set. |
| 33 | * Each of the descriptors is 64 bytes in size (8*64 = 512 bytes in a set). |
| 34 | */ |
| 35 | |
Cliff Wickman | cfa6091 | 2011-01-03 12:03:53 -0600 | [diff] [blame] | 36 | #define MAX_CPUS_PER_UVHUB 64 |
| 37 | #define MAX_CPUS_PER_SOCKET 32 |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 38 | #define ADP_SZ 64 /* hardware-provided max. */ |
| 39 | #define UV_CPUS_PER_AS 32 /* hardware-provided max. */ |
| 40 | #define ITEMS_PER_DESC 8 |
Cliff Wickman | 50fb55a | 2010-06-02 16:22:02 -0500 | [diff] [blame] | 41 | /* the 'throttle' to prevent the hardware stay-busy bug */ |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 42 | #define MAX_BAU_CONCURRENT 3 |
Cliff Wickman | b194b120 | 2008-06-12 08:23:48 -0500 | [diff] [blame] | 43 | #define UV_ACT_STATUS_MASK 0x3 |
| 44 | #define UV_ACT_STATUS_SIZE 2 |
Cliff Wickman | b194b120 | 2008-06-12 08:23:48 -0500 | [diff] [blame] | 45 | #define UV_DISTRIBUTION_SIZE 256 |
| 46 | #define UV_SW_ACK_NPENDING 8 |
Jack Steiner | 2a91959 | 2011-05-11 12:50:28 -0500 | [diff] [blame] | 47 | #define UV1_NET_ENDPOINT_INTD 0x38 |
| 48 | #define UV2_NET_ENDPOINT_INTD 0x28 |
| 49 | #define UV_NET_ENDPOINT_INTD (is_uv1_hub() ? \ |
| 50 | UV1_NET_ENDPOINT_INTD : UV2_NET_ENDPOINT_INTD) |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 51 | #define UV_DESC_PSHIFT 49 |
Cliff Wickman | b194b120 | 2008-06-12 08:23:48 -0500 | [diff] [blame] | 52 | #define UV_PAYLOADQ_PNODE_SHIFT 49 |
| 53 | #define UV_PTC_BASENAME "sgi_uv/ptc_statistics" |
Cliff Wickman | e8e5e8a | 2010-06-02 16:22:01 -0500 | [diff] [blame] | 54 | #define UV_BAU_BASENAME "sgi_uv/bau_tunables" |
| 55 | #define UV_BAU_TUNABLES_DIR "sgi_uv" |
| 56 | #define UV_BAU_TUNABLES_FILE "bau_tunables" |
| 57 | #define WHITESPACE " \t\n" |
Jack Steiner | 6a469e4 | 2011-09-20 13:55:04 -0700 | [diff] [blame] | 58 | #define uv_mmask ((1UL << uv_hub_info->m_val) - 1) |
Cliff Wickman | b194b120 | 2008-06-12 08:23:48 -0500 | [diff] [blame] | 59 | #define uv_physnodeaddr(x) ((__pa((unsigned long)(x)) & uv_mmask)) |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 60 | #define cpubit_isset(cpu, bau_local_cpumask) \ |
| 61 | test_bit((cpu), (bau_local_cpumask).bits) |
Jack Steiner | 2a91959 | 2011-05-11 12:50:28 -0500 | [diff] [blame] | 62 | |
Cliff Wickman | 12a6611 | 2010-06-02 16:22:01 -0500 | [diff] [blame] | 63 | /* [19:16] SOFT_ACK timeout period 19: 1 is urgency 7 17:16 1 is multiplier */ |
Jack Steiner | 2a91959 | 2011-05-11 12:50:28 -0500 | [diff] [blame] | 64 | /* |
| 65 | * UV2: Bit 19 selects between |
| 66 | * (0): 10 microsecond timebase and |
| 67 | * (1): 80 microseconds |
Cliff Wickman | da87c93 | 2012-01-16 15:17:50 -0600 | [diff] [blame] | 68 | * we're using 560us, similar to UV1: 65 units of 10us |
Jack Steiner | 2a91959 | 2011-05-11 12:50:28 -0500 | [diff] [blame] | 69 | */ |
| 70 | #define UV1_INTD_SOFT_ACK_TIMEOUT_PERIOD (9UL) |
cpw@sgi.com | ae90c23 | 2011-06-21 07:21:33 -0500 | [diff] [blame] | 71 | #define UV2_INTD_SOFT_ACK_TIMEOUT_PERIOD (15UL) |
Jack Steiner | 2a91959 | 2011-05-11 12:50:28 -0500 | [diff] [blame] | 72 | |
| 73 | #define UV_INTD_SOFT_ACK_TIMEOUT_PERIOD (is_uv1_hub() ? \ |
| 74 | UV1_INTD_SOFT_ACK_TIMEOUT_PERIOD : \ |
| 75 | UV2_INTD_SOFT_ACK_TIMEOUT_PERIOD) |
| 76 | |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 77 | #define BAU_MISC_CONTROL_MULT_MASK 3 |
Cliff Wickman | 12a6611 | 2010-06-02 16:22:01 -0500 | [diff] [blame] | 78 | |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 79 | #define UVH_AGING_PRESCALE_SEL 0x000000b000UL |
Cliff Wickman | 12a6611 | 2010-06-02 16:22:01 -0500 | [diff] [blame] | 80 | /* [30:28] URGENCY_7 an index into a table of times */ |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 81 | #define BAU_URGENCY_7_SHIFT 28 |
| 82 | #define BAU_URGENCY_7_MASK 7 |
Cliff Wickman | 12a6611 | 2010-06-02 16:22:01 -0500 | [diff] [blame] | 83 | |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 84 | #define UVH_TRANSACTION_TIMEOUT 0x000000b200UL |
Cliff Wickman | 12a6611 | 2010-06-02 16:22:01 -0500 | [diff] [blame] | 85 | /* [45:40] BAU - BAU transaction timeout select - a multiplier */ |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 86 | #define BAU_TRANS_SHIFT 40 |
| 87 | #define BAU_TRANS_MASK 0x3f |
| 88 | |
| 89 | /* |
| 90 | * shorten some awkward names |
| 91 | */ |
| 92 | #define AS_PUSH_SHIFT UVH_LB_BAU_SB_ACTIVATION_CONTROL_PUSH_SHFT |
| 93 | #define SOFTACK_MSHIFT UVH_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_SHFT |
| 94 | #define SOFTACK_PSHIFT UVH_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHFT |
| 95 | #define SOFTACK_TIMEOUT_PERIOD UV_INTD_SOFT_ACK_TIMEOUT_PERIOD |
| 96 | #define write_gmmr uv_write_global_mmr64 |
| 97 | #define write_lmmr uv_write_local_mmr |
| 98 | #define read_lmmr uv_read_local_mmr |
| 99 | #define read_gmmr uv_read_global_mmr64 |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 100 | |
Cliff Wickman | b194b120 | 2008-06-12 08:23:48 -0500 | [diff] [blame] | 101 | /* |
| 102 | * bits in UVH_LB_BAU_SB_ACTIVATION_STATUS_0/1 |
| 103 | */ |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 104 | #define DS_IDLE 0 |
| 105 | #define DS_ACTIVE 1 |
| 106 | #define DS_DESTINATION_TIMEOUT 2 |
| 107 | #define DS_SOURCE_TIMEOUT 3 |
Jack Steiner | 2a91959 | 2011-05-11 12:50:28 -0500 | [diff] [blame] | 108 | /* |
| 109 | * bits put together from HRP_LB_BAU_SB_ACTIVATION_STATUS_0/1/2 |
cpw@sgi.com | ae90c23 | 2011-06-21 07:21:33 -0500 | [diff] [blame] | 110 | * values 1 and 3 will not occur |
| 111 | * Decoded meaning ERROR BUSY AUX ERR |
| 112 | * ------------------------------- ---- ----- ------- |
| 113 | * IDLE 0 0 0 |
| 114 | * BUSY (active) 0 1 0 |
| 115 | * SW Ack Timeout (destination) 1 0 0 |
| 116 | * SW Ack INTD rejected (strong NACK) 1 0 1 |
| 117 | * Source Side Time Out Detected 1 1 0 |
| 118 | * Destination Side PUT Failed 1 1 1 |
Jack Steiner | 2a91959 | 2011-05-11 12:50:28 -0500 | [diff] [blame] | 119 | */ |
| 120 | #define UV2H_DESC_IDLE 0 |
cpw@sgi.com | ae90c23 | 2011-06-21 07:21:33 -0500 | [diff] [blame] | 121 | #define UV2H_DESC_BUSY 2 |
| 122 | #define UV2H_DESC_DEST_TIMEOUT 4 |
| 123 | #define UV2H_DESC_DEST_STRONG_NACK 5 |
Jack Steiner | 2a91959 | 2011-05-11 12:50:28 -0500 | [diff] [blame] | 124 | #define UV2H_DESC_SOURCE_TIMEOUT 6 |
| 125 | #define UV2H_DESC_DEST_PUT_ERR 7 |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 126 | |
Cliff Wickman | e8e5e8a | 2010-06-02 16:22:01 -0500 | [diff] [blame] | 127 | /* |
| 128 | * delay for 'plugged' timeout retries, in microseconds |
| 129 | */ |
| 130 | #define PLUGGED_DELAY 10 |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 131 | |
| 132 | /* |
| 133 | * threshholds at which to use IPI to free resources |
| 134 | */ |
Cliff Wickman | e8e5e8a | 2010-06-02 16:22:01 -0500 | [diff] [blame] | 135 | /* after this # consecutive 'plugged' timeouts, use IPI to release resources */ |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 136 | #define PLUGSB4RESET 100 |
Cliff Wickman | e8e5e8a | 2010-06-02 16:22:01 -0500 | [diff] [blame] | 137 | /* after this many consecutive timeouts, use IPI to release resources */ |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 138 | #define TIMEOUTSB4RESET 1 |
Cliff Wickman | e8e5e8a | 2010-06-02 16:22:01 -0500 | [diff] [blame] | 139 | /* at this number uses of IPI to release resources, giveup the request */ |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 140 | #define IPI_RESET_LIMIT 1 |
Cliff Wickman | e8e5e8a | 2010-06-02 16:22:01 -0500 | [diff] [blame] | 141 | /* after this # consecutive successes, bump up the throttle if it was lowered */ |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 142 | #define COMPLETE_THRESHOLD 5 |
Cliff Wickman | 8b6e511 | 2012-06-22 08:14:59 -0500 | [diff] [blame] | 143 | /* after this # of giveups (fall back to kernel IPI's) disable the use of |
| 144 | the BAU for a period of time */ |
| 145 | #define GIVEUP_LIMIT 100 |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 146 | |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 147 | #define UV_LB_SUBNODEID 0x10 |
Cliff Wickman | 77ed23f | 2011-05-10 08:26:43 -0500 | [diff] [blame] | 148 | |
Jack Steiner | 2a91959 | 2011-05-11 12:50:28 -0500 | [diff] [blame] | 149 | /* these two are the same for UV1 and UV2: */ |
| 150 | #define UV_SA_SHFT UVH_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHFT |
| 151 | #define UV_SA_MASK UVH_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_MASK |
| 152 | /* 4 bits of software ack period */ |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 153 | #define UV2_ACK_MASK 0x7UL |
| 154 | #define UV2_ACK_UNITS_SHFT 3 |
Jack Steiner | 2a91959 | 2011-05-11 12:50:28 -0500 | [diff] [blame] | 155 | #define UV2_EXT_SHFT UV2H_LB_BAU_MISC_CONTROL_ENABLE_EXTENDED_SB_STATUS_SHFT |
| 156 | |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 157 | /* |
Cliff Wickman | b194b120 | 2008-06-12 08:23:48 -0500 | [diff] [blame] | 158 | * number of entries in the destination side payload queue |
| 159 | */ |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 160 | #define DEST_Q_SIZE 20 |
Cliff Wickman | b194b120 | 2008-06-12 08:23:48 -0500 | [diff] [blame] | 161 | /* |
| 162 | * number of destination side software ack resources |
| 163 | */ |
Ingo Molnar | dc163a4 | 2008-06-18 14:15:43 +0200 | [diff] [blame] | 164 | #define DEST_NUM_RESOURCES 8 |
Cliff Wickman | b194b120 | 2008-06-12 08:23:48 -0500 | [diff] [blame] | 165 | /* |
| 166 | * completion statuses for sending a TLB flush message |
| 167 | */ |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 168 | #define FLUSH_RETRY_PLUGGED 1 |
| 169 | #define FLUSH_RETRY_TIMEOUT 2 |
| 170 | #define FLUSH_GIVEUP 3 |
| 171 | #define FLUSH_COMPLETE 4 |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 172 | |
Cliff Wickman | b194b120 | 2008-06-12 08:23:48 -0500 | [diff] [blame] | 173 | /* |
Cliff Wickman | e8e5e8a | 2010-06-02 16:22:01 -0500 | [diff] [blame] | 174 | * tuning the action when the numalink network is extremely delayed |
| 175 | */ |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 176 | #define CONGESTED_RESPONSE_US 1000 /* 'long' response time, in |
| 177 | microseconds */ |
| 178 | #define CONGESTED_REPS 10 /* long delays averaged over |
| 179 | this many broadcasts */ |
Cliff Wickman | 8b6e511 | 2012-06-22 08:14:59 -0500 | [diff] [blame] | 180 | #define DISABLED_PERIOD 10 /* time for the bau to be |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 181 | disabled, in seconds */ |
| 182 | /* see msg_type: */ |
| 183 | #define MSG_NOOP 0 |
| 184 | #define MSG_REGULAR 1 |
| 185 | #define MSG_RETRY 2 |
Cliff Wickman | e8e5e8a | 2010-06-02 16:22:01 -0500 | [diff] [blame] | 186 | |
| 187 | /* |
Cliff Wickman | b194b120 | 2008-06-12 08:23:48 -0500 | [diff] [blame] | 188 | * Distribution: 32 bytes (256 bits) (bytes 0-0x1f of descriptor) |
| 189 | * If the 'multilevel' flag in the header portion of the descriptor |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 190 | * has been set to 0, then endpoint multi-unicast mode is selected. |
| 191 | * The distribution specification (32 bytes) is interpreted as a 256-bit |
| 192 | * distribution vector. Adjacent bits correspond to consecutive even numbered |
| 193 | * nodeIDs. The result of adding the index of a given bit to the 15-bit |
Cliff Wickman | 77ed23f | 2011-05-10 08:26:43 -0500 | [diff] [blame] | 194 | * 'base_dest_nasid' field of the header corresponds to the |
Cliff Wickman | b194b120 | 2008-06-12 08:23:48 -0500 | [diff] [blame] | 195 | * destination nodeID associated with that specified bit. |
| 196 | */ |
cpw@sgi.com | a456eaa | 2011-06-21 07:21:30 -0500 | [diff] [blame] | 197 | struct pnmask { |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 198 | unsigned long bits[BITS_TO_LONGS(UV_DISTRIBUTION_SIZE)]; |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 199 | }; |
| 200 | |
Cliff Wickman | b194b120 | 2008-06-12 08:23:48 -0500 | [diff] [blame] | 201 | /* |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 202 | * mask of cpu's on a uvhub |
Cliff Wickman | b194b120 | 2008-06-12 08:23:48 -0500 | [diff] [blame] | 203 | * (during initialization we need to check that unsigned long has |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 204 | * enough bits for max. cpu's per uvhub) |
Cliff Wickman | b194b120 | 2008-06-12 08:23:48 -0500 | [diff] [blame] | 205 | */ |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 206 | struct bau_local_cpumask { |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 207 | unsigned long bits; |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 208 | }; |
| 209 | |
| 210 | /* |
| 211 | * Payload: 16 bytes (128 bits) (bytes 0x20-0x2f of descriptor) |
| 212 | * only 12 bytes (96 bits) of the payload area are usable. |
| 213 | * An additional 3 bytes (bits 27:4) of the header address are carried |
| 214 | * to the next bytes of the destination payload queue. |
| 215 | * And an additional 2 bytes of the header Suppl_A field are also |
| 216 | * carried to the destination payload queue. |
| 217 | * But the first byte of the Suppl_A becomes bits 127:120 (the 16th byte) |
| 218 | * of the destination payload queue, which is written by the hardware |
| 219 | * with the s/w ack resource bit vector. |
| 220 | * [ effective message contents (16 bytes (128 bits) maximum), not counting |
| 221 | * the s/w ack bit vector ] |
| 222 | */ |
| 223 | |
Cliff Wickman | b194b120 | 2008-06-12 08:23:48 -0500 | [diff] [blame] | 224 | /* |
| 225 | * The payload is software-defined for INTD transactions |
| 226 | */ |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 227 | struct bau_msg_payload { |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 228 | unsigned long address; /* signifies a page or all |
| 229 | TLB's of the cpu */ |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 230 | /* 64 bits */ |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 231 | unsigned short sending_cpu; /* filled in by sender */ |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 232 | /* 16 bits */ |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 233 | unsigned short acknowledge_count; /* filled in by destination */ |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 234 | /* 16 bits */ |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 235 | unsigned int reserved1:32; /* not usable */ |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 236 | }; |
| 237 | |
| 238 | |
Cliff Wickman | b194b120 | 2008-06-12 08:23:48 -0500 | [diff] [blame] | 239 | /* |
Cliff Wickman | da87c93 | 2012-01-16 15:17:50 -0600 | [diff] [blame] | 240 | * UV1 Message header: 16 bytes (128 bits) (bytes 0x30-0x3f of descriptor) |
Cliff Wickman | b194b120 | 2008-06-12 08:23:48 -0500 | [diff] [blame] | 241 | * see table 4.2.3.0.1 in broacast_assist spec. |
| 242 | */ |
Cliff Wickman | da87c93 | 2012-01-16 15:17:50 -0600 | [diff] [blame] | 243 | struct uv1_bau_msg_header { |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 244 | unsigned int dest_subnodeid:6; /* must be 0x10, for the LB */ |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 245 | /* bits 5:0 */ |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 246 | unsigned int base_dest_nasid:15; /* nasid of the first bit */ |
| 247 | /* bits 20:6 */ /* in uvhub map */ |
| 248 | unsigned int command:8; /* message type */ |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 249 | /* bits 28:21 */ |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 250 | /* 0x38: SN3net EndPoint Message */ |
| 251 | unsigned int rsvd_1:3; /* must be zero */ |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 252 | /* bits 31:29 */ |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 253 | /* int will align on 32 bits */ |
| 254 | unsigned int rsvd_2:9; /* must be zero */ |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 255 | /* bits 40:32 */ |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 256 | /* Suppl_A is 56-41 */ |
| 257 | unsigned int sequence:16; /* message sequence number */ |
| 258 | /* bits 56:41 */ /* becomes bytes 16-17 of msg */ |
| 259 | /* Address field (96:57) is |
| 260 | never used as an address |
| 261 | (these are address bits |
| 262 | 42:3) */ |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 263 | |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 264 | unsigned int rsvd_3:1; /* must be zero */ |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 265 | /* bit 57 */ |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 266 | /* address bits 27:4 are payload */ |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 267 | /* these next 24 (58-81) bits become bytes 12-14 of msg */ |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 268 | /* bits 65:58 land in byte 12 */ |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 269 | unsigned int replied_to:1; /* sent as 0 by the source to |
| 270 | byte 12 */ |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 271 | /* bit 58 */ |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 272 | unsigned int msg_type:3; /* software type of the |
| 273 | message */ |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 274 | /* bits 61:59 */ |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 275 | unsigned int canceled:1; /* message canceled, resource |
| 276 | is to be freed*/ |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 277 | /* bit 62 */ |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 278 | unsigned int payload_1a:1; /* not currently used */ |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 279 | /* bit 63 */ |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 280 | unsigned int payload_1b:2; /* not currently used */ |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 281 | /* bits 65:64 */ |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 282 | |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 283 | /* bits 73:66 land in byte 13 */ |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 284 | unsigned int payload_1ca:6; /* not currently used */ |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 285 | /* bits 71:66 */ |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 286 | unsigned int payload_1c:2; /* not currently used */ |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 287 | /* bits 73:72 */ |
| 288 | |
| 289 | /* bits 81:74 land in byte 14 */ |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 290 | unsigned int payload_1d:6; /* not currently used */ |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 291 | /* bits 79:74 */ |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 292 | unsigned int payload_1e:2; /* not currently used */ |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 293 | /* bits 81:80 */ |
| 294 | |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 295 | unsigned int rsvd_4:7; /* must be zero */ |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 296 | /* bits 88:82 */ |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 297 | unsigned int swack_flag:1; /* software acknowledge flag */ |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 298 | /* bit 89 */ |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 299 | /* INTD trasactions at |
| 300 | destination are to wait for |
| 301 | software acknowledge */ |
| 302 | unsigned int rsvd_5:6; /* must be zero */ |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 303 | /* bits 95:90 */ |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 304 | unsigned int rsvd_6:5; /* must be zero */ |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 305 | /* bits 100:96 */ |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 306 | unsigned int int_both:1; /* if 1, interrupt both sockets |
| 307 | on the uvhub */ |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 308 | /* bit 101*/ |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 309 | unsigned int fairness:3; /* usually zero */ |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 310 | /* bits 104:102 */ |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 311 | unsigned int multilevel:1; /* multi-level multicast |
| 312 | format */ |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 313 | /* bit 105 */ |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 314 | /* 0 for TLB: endpoint multi-unicast messages */ |
| 315 | unsigned int chaining:1; /* next descriptor is part of |
| 316 | this activation*/ |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 317 | /* bit 106 */ |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 318 | unsigned int rsvd_7:21; /* must be zero */ |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 319 | /* bits 127:107 */ |
| 320 | }; |
| 321 | |
Cliff Wickman | b194b120 | 2008-06-12 08:23:48 -0500 | [diff] [blame] | 322 | /* |
Cliff Wickman | da87c93 | 2012-01-16 15:17:50 -0600 | [diff] [blame] | 323 | * UV2 Message header: 16 bytes (128 bits) (bytes 0x30-0x3f of descriptor) |
| 324 | * see figure 9-2 of harp_sys.pdf |
| 325 | */ |
| 326 | struct uv2_bau_msg_header { |
| 327 | unsigned int base_dest_nasid:15; /* nasid of the first bit */ |
| 328 | /* bits 14:0 */ /* in uvhub map */ |
| 329 | unsigned int dest_subnodeid:5; /* must be 0x10, for the LB */ |
| 330 | /* bits 19:15 */ |
| 331 | unsigned int rsvd_1:1; /* must be zero */ |
| 332 | /* bit 20 */ |
| 333 | /* Address bits 59:21 */ |
| 334 | /* bits 25:2 of address (44:21) are payload */ |
| 335 | /* these next 24 bits become bytes 12-14 of msg */ |
| 336 | /* bits 28:21 land in byte 12 */ |
| 337 | unsigned int replied_to:1; /* sent as 0 by the source to |
| 338 | byte 12 */ |
| 339 | /* bit 21 */ |
| 340 | unsigned int msg_type:3; /* software type of the |
| 341 | message */ |
| 342 | /* bits 24:22 */ |
| 343 | unsigned int canceled:1; /* message canceled, resource |
| 344 | is to be freed*/ |
| 345 | /* bit 25 */ |
| 346 | unsigned int payload_1:3; /* not currently used */ |
| 347 | /* bits 28:26 */ |
| 348 | |
| 349 | /* bits 36:29 land in byte 13 */ |
| 350 | unsigned int payload_2a:3; /* not currently used */ |
| 351 | unsigned int payload_2b:5; /* not currently used */ |
| 352 | /* bits 36:29 */ |
| 353 | |
| 354 | /* bits 44:37 land in byte 14 */ |
| 355 | unsigned int payload_3:8; /* not currently used */ |
| 356 | /* bits 44:37 */ |
| 357 | |
| 358 | unsigned int rsvd_2:7; /* reserved */ |
| 359 | /* bits 51:45 */ |
| 360 | unsigned int swack_flag:1; /* software acknowledge flag */ |
| 361 | /* bit 52 */ |
| 362 | unsigned int rsvd_3a:3; /* must be zero */ |
| 363 | unsigned int rsvd_3b:8; /* must be zero */ |
| 364 | unsigned int rsvd_3c:8; /* must be zero */ |
| 365 | unsigned int rsvd_3d:3; /* must be zero */ |
| 366 | /* bits 74:53 */ |
| 367 | unsigned int fairness:3; /* usually zero */ |
| 368 | /* bits 77:75 */ |
| 369 | |
| 370 | unsigned int sequence:16; /* message sequence number */ |
| 371 | /* bits 93:78 Suppl_A */ |
| 372 | unsigned int chaining:1; /* next descriptor is part of |
| 373 | this activation*/ |
| 374 | /* bit 94 */ |
| 375 | unsigned int multilevel:1; /* multi-level multicast |
| 376 | format */ |
| 377 | /* bit 95 */ |
| 378 | unsigned int rsvd_4:24; /* ordered / source node / |
| 379 | source subnode / aging |
| 380 | must be zero */ |
| 381 | /* bits 119:96 */ |
| 382 | unsigned int command:8; /* message type */ |
| 383 | /* bits 127:120 */ |
| 384 | }; |
| 385 | |
| 386 | /* |
Ingo Molnar | dc163a4 | 2008-06-18 14:15:43 +0200 | [diff] [blame] | 387 | * The activation descriptor: |
Cliff Wickman | b194b120 | 2008-06-12 08:23:48 -0500 | [diff] [blame] | 388 | * The format of the message to send, plus all accompanying control |
| 389 | * Should be 64 bytes |
| 390 | */ |
Ingo Molnar | dc163a4 | 2008-06-18 14:15:43 +0200 | [diff] [blame] | 391 | struct bau_desc { |
Cliff Wickman | da87c93 | 2012-01-16 15:17:50 -0600 | [diff] [blame] | 392 | struct pnmask distribution; |
Cliff Wickman | b194b120 | 2008-06-12 08:23:48 -0500 | [diff] [blame] | 393 | /* |
| 394 | * message template, consisting of header and payload: |
| 395 | */ |
Cliff Wickman | da87c93 | 2012-01-16 15:17:50 -0600 | [diff] [blame] | 396 | union bau_msg_header { |
| 397 | struct uv1_bau_msg_header uv1_hdr; |
| 398 | struct uv2_bau_msg_header uv2_hdr; |
| 399 | } header; |
| 400 | |
| 401 | struct bau_msg_payload payload; |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 402 | }; |
Cliff Wickman | da87c93 | 2012-01-16 15:17:50 -0600 | [diff] [blame] | 403 | /* UV1: |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 404 | * -payload-- ---------header------ |
| 405 | * bytes 0-11 bits 41-56 bits 58-81 |
| 406 | * A B (2) C (3) |
| 407 | * |
| 408 | * A/B/C are moved to: |
| 409 | * A C B |
| 410 | * bytes 0-11 bytes 12-14 bytes 16-17 (byte 15 filled in by hw as vector) |
| 411 | * ------------payload queue----------- |
| 412 | */ |
Cliff Wickman | da87c93 | 2012-01-16 15:17:50 -0600 | [diff] [blame] | 413 | /* UV2: |
| 414 | * -payload-- ---------header------ |
| 415 | * bytes 0-11 bits 70-78 bits 21-44 |
| 416 | * A B (2) C (3) |
| 417 | * |
| 418 | * A/B/C are moved to: |
| 419 | * A C B |
| 420 | * bytes 0-11 bytes 12-14 bytes 16-17 (byte 15 filled in by hw as vector) |
| 421 | * ------------payload queue----------- |
| 422 | */ |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 423 | |
| 424 | /* |
| 425 | * The payload queue on the destination side is an array of these. |
| 426 | * With BAU_MISC_CONTROL set for software acknowledge mode, the messages |
| 427 | * are 32 bytes (2 micropackets) (256 bits) in length, but contain only 17 |
| 428 | * bytes of usable data, including the sw ack vector in byte 15 (bits 127:120) |
| 429 | * (12 bytes come from bau_msg_payload, 3 from payload_1, 2 from |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 430 | * swack_vec and payload_2) |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 431 | * "Enabling Software Acknowledgment mode (see Section 4.3.3 Software |
| 432 | * Acknowledge Processing) also selects 32 byte (17 bytes usable) payload |
| 433 | * operation." |
| 434 | */ |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 435 | struct bau_pq_entry { |
| 436 | unsigned long address; /* signifies a page or all TLB's |
| 437 | of the cpu */ |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 438 | /* 64 bits, bytes 0-7 */ |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 439 | unsigned short sending_cpu; /* cpu that sent the message */ |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 440 | /* 16 bits, bytes 8-9 */ |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 441 | unsigned short acknowledge_count; /* filled in by destination */ |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 442 | /* 16 bits, bytes 10-11 */ |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 443 | /* these next 3 bytes come from bits 58-81 of the message header */ |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 444 | unsigned short replied_to:1; /* sent as 0 by the source */ |
| 445 | unsigned short msg_type:3; /* software message type */ |
| 446 | unsigned short canceled:1; /* sent as 0 by the source */ |
| 447 | unsigned short unused1:3; /* not currently using */ |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 448 | /* byte 12 */ |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 449 | unsigned char unused2a; /* not currently using */ |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 450 | /* byte 13 */ |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 451 | unsigned char unused2; /* not currently using */ |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 452 | /* byte 14 */ |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 453 | unsigned char swack_vec; /* filled in by the hardware */ |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 454 | /* byte 15 (bits 127:120) */ |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 455 | unsigned short sequence; /* message sequence number */ |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 456 | /* bytes 16-17 */ |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 457 | unsigned char unused4[2]; /* not currently using bytes 18-19 */ |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 458 | /* bytes 18-19 */ |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 459 | int number_of_cpus; /* filled in at destination */ |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 460 | /* 32 bits, bytes 20-23 (aligned) */ |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 461 | unsigned char unused5[8]; /* not using */ |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 462 | /* bytes 24-31 */ |
| 463 | }; |
| 464 | |
Cliff Wickman | 4faca15 | 2010-06-02 16:22:02 -0500 | [diff] [blame] | 465 | struct msg_desc { |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 466 | struct bau_pq_entry *msg; |
| 467 | int msg_slot; |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 468 | struct bau_pq_entry *queue_first; |
| 469 | struct bau_pq_entry *queue_last; |
Cliff Wickman | 4faca15 | 2010-06-02 16:22:02 -0500 | [diff] [blame] | 470 | }; |
| 471 | |
| 472 | struct reset_args { |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 473 | int sender; |
Cliff Wickman | 4faca15 | 2010-06-02 16:22:02 -0500 | [diff] [blame] | 474 | }; |
| 475 | |
| 476 | /* |
| 477 | * This structure is allocated per_cpu for UV TLB shootdown statistics. |
| 478 | */ |
| 479 | struct ptc_stats { |
| 480 | /* sender statistics */ |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 481 | unsigned long s_giveup; /* number of fall backs to |
| 482 | IPI-style flushes */ |
| 483 | unsigned long s_requestor; /* number of shootdown |
| 484 | requests */ |
| 485 | unsigned long s_stimeout; /* source side timeouts */ |
| 486 | unsigned long s_dtimeout; /* destination side timeouts */ |
Cliff Wickman | b54bd9b | 2012-01-16 15:22:38 -0600 | [diff] [blame] | 487 | unsigned long s_strongnacks; /* number of strong nack's */ |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 488 | unsigned long s_time; /* time spent in sending side */ |
| 489 | unsigned long s_retriesok; /* successful retries */ |
| 490 | unsigned long s_ntargcpu; /* total number of cpu's |
| 491 | targeted */ |
| 492 | unsigned long s_ntargself; /* times the sending cpu was |
| 493 | targeted */ |
| 494 | unsigned long s_ntarglocals; /* targets of cpus on the local |
| 495 | blade */ |
| 496 | unsigned long s_ntargremotes; /* targets of cpus on remote |
| 497 | blades */ |
| 498 | unsigned long s_ntarglocaluvhub; /* targets of the local hub */ |
| 499 | unsigned long s_ntargremoteuvhub; /* remotes hubs targeted */ |
| 500 | unsigned long s_ntarguvhub; /* total number of uvhubs |
| 501 | targeted */ |
| 502 | unsigned long s_ntarguvhub16; /* number of times target |
| 503 | hubs >= 16*/ |
| 504 | unsigned long s_ntarguvhub8; /* number of times target |
| 505 | hubs >= 8 */ |
| 506 | unsigned long s_ntarguvhub4; /* number of times target |
| 507 | hubs >= 4 */ |
| 508 | unsigned long s_ntarguvhub2; /* number of times target |
| 509 | hubs >= 2 */ |
| 510 | unsigned long s_ntarguvhub1; /* number of times target |
| 511 | hubs == 1 */ |
| 512 | unsigned long s_resets_plug; /* ipi-style resets from plug |
| 513 | state */ |
| 514 | unsigned long s_resets_timeout; /* ipi-style resets from |
| 515 | timeouts */ |
| 516 | unsigned long s_busy; /* status stayed busy past |
| 517 | s/w timer */ |
| 518 | unsigned long s_throttles; /* waits in throttle */ |
| 519 | unsigned long s_retry_messages; /* retry broadcasts */ |
| 520 | unsigned long s_bau_reenabled; /* for bau enable/disable */ |
| 521 | unsigned long s_bau_disabled; /* for bau enable/disable */ |
Cliff Wickman | c5d35d3 | 2012-01-16 15:19:47 -0600 | [diff] [blame] | 522 | unsigned long s_uv2_wars; /* uv2 workaround, perm. busy */ |
| 523 | unsigned long s_uv2_wars_hw; /* uv2 workaround, hiwater */ |
| 524 | unsigned long s_uv2_war_waits; /* uv2 workaround, long waits */ |
Cliff Wickman | 8b6e511 | 2012-06-22 08:14:59 -0500 | [diff] [blame] | 525 | unsigned long s_overipilimit; /* over the ipi reset limit */ |
| 526 | unsigned long s_giveuplimit; /* disables, over giveup limit*/ |
| 527 | unsigned long s_enters; /* entries to the driver */ |
| 528 | unsigned long s_ipifordisabled; /* fall back to IPI; disabled */ |
| 529 | unsigned long s_plugged; /* plugged by h/w bug*/ |
| 530 | unsigned long s_congested; /* giveup on long wait */ |
Cliff Wickman | 4faca15 | 2010-06-02 16:22:02 -0500 | [diff] [blame] | 531 | /* destination statistics */ |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 532 | unsigned long d_alltlb; /* times all tlb's on this |
| 533 | cpu were flushed */ |
| 534 | unsigned long d_onetlb; /* times just one tlb on this |
| 535 | cpu was flushed */ |
| 536 | unsigned long d_multmsg; /* interrupts with multiple |
| 537 | messages */ |
| 538 | unsigned long d_nomsg; /* interrupts with no message */ |
| 539 | unsigned long d_time; /* time spent on destination |
| 540 | side */ |
| 541 | unsigned long d_requestee; /* number of messages |
| 542 | processed */ |
| 543 | unsigned long d_retries; /* number of retry messages |
| 544 | processed */ |
| 545 | unsigned long d_canceled; /* number of messages canceled |
| 546 | by retries */ |
| 547 | unsigned long d_nocanceled; /* retries that found nothing |
| 548 | to cancel */ |
| 549 | unsigned long d_resets; /* number of ipi-style requests |
| 550 | processed */ |
| 551 | unsigned long d_rcanceled; /* number of messages canceled |
| 552 | by resets */ |
| 553 | }; |
| 554 | |
| 555 | struct tunables { |
| 556 | int *tunp; |
| 557 | int deflt; |
Cliff Wickman | 4faca15 | 2010-06-02 16:22:02 -0500 | [diff] [blame] | 558 | }; |
| 559 | |
Cliff Wickman | 77ed23f | 2011-05-10 08:26:43 -0500 | [diff] [blame] | 560 | struct hub_and_pnode { |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 561 | short uvhub; |
| 562 | short pnode; |
Cliff Wickman | 77ed23f | 2011-05-10 08:26:43 -0500 | [diff] [blame] | 563 | }; |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 564 | |
| 565 | struct socket_desc { |
| 566 | short num_cpus; |
| 567 | short cpu_number[MAX_CPUS_PER_SOCKET]; |
| 568 | }; |
| 569 | |
| 570 | struct uvhub_desc { |
| 571 | unsigned short socket_mask; |
| 572 | short num_cpus; |
| 573 | short uvhub; |
| 574 | short pnode; |
| 575 | struct socket_desc socket[2]; |
| 576 | }; |
| 577 | |
Cliff Wickman | b194b120 | 2008-06-12 08:23:48 -0500 | [diff] [blame] | 578 | /* |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 579 | * one per-cpu; to locate the software tables |
Cliff Wickman | b194b120 | 2008-06-12 08:23:48 -0500 | [diff] [blame] | 580 | */ |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 581 | struct bau_control { |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 582 | struct bau_desc *descriptor_base; |
| 583 | struct bau_pq_entry *queue_first; |
| 584 | struct bau_pq_entry *queue_last; |
| 585 | struct bau_pq_entry *bau_msg_head; |
| 586 | struct bau_control *uvhub_master; |
| 587 | struct bau_control *socket_master; |
| 588 | struct ptc_stats *statp; |
cpw@sgi.com | 442d392 | 2011-06-21 07:21:31 -0500 | [diff] [blame] | 589 | cpumask_t *cpumask; |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 590 | unsigned long timeout_interval; |
| 591 | unsigned long set_bau_on_time; |
| 592 | atomic_t active_descriptor_count; |
| 593 | int plugged_tries; |
| 594 | int timeout_tries; |
| 595 | int ipi_attempts; |
| 596 | int conseccompletes; |
Cliff Wickman | 26ef857 | 2012-06-22 08:13:30 -0500 | [diff] [blame] | 597 | short nobau; |
Cliff Wickman | 8b6e511 | 2012-06-22 08:14:59 -0500 | [diff] [blame] | 598 | short baudisabled; |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 599 | short cpu; |
| 600 | short osnode; |
| 601 | short uvhub_cpu; |
| 602 | short uvhub; |
Cliff Wickman | da87c93 | 2012-01-16 15:17:50 -0600 | [diff] [blame] | 603 | short uvhub_version; |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 604 | short cpus_in_socket; |
| 605 | short cpus_in_uvhub; |
| 606 | short partition_base_pnode; |
Cliff Wickman | 8b6e511 | 2012-06-22 08:14:59 -0500 | [diff] [blame] | 607 | short busy; /* all were busy (war) */ |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 608 | unsigned short message_number; |
| 609 | unsigned short uvhub_quiesce; |
| 610 | short socket_acknowledge_count[DEST_Q_SIZE]; |
| 611 | cycles_t send_message; |
Cliff Wickman | 8b6e511 | 2012-06-22 08:14:59 -0500 | [diff] [blame] | 612 | cycles_t period_end; |
| 613 | cycles_t period_time; |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 614 | spinlock_t uvhub_lock; |
| 615 | spinlock_t queue_lock; |
Cliff Wickman | 8b6e511 | 2012-06-22 08:14:59 -0500 | [diff] [blame] | 616 | spinlock_t disable_lock; |
Cliff Wickman | e8e5e8a | 2010-06-02 16:22:01 -0500 | [diff] [blame] | 617 | /* tunables */ |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 618 | int max_concurr; |
| 619 | int max_concurr_const; |
| 620 | int plugged_delay; |
| 621 | int plugsb4reset; |
| 622 | int timeoutsb4reset; |
| 623 | int ipi_reset_limit; |
| 624 | int complete_threshold; |
| 625 | int cong_response_us; |
| 626 | int cong_reps; |
Cliff Wickman | 8b6e511 | 2012-06-22 08:14:59 -0500 | [diff] [blame] | 627 | cycles_t disabled_period; |
| 628 | int period_giveups; |
| 629 | int giveup_limit; |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 630 | long period_requests; |
| 631 | struct hub_and_pnode *thp; |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 632 | }; |
| 633 | |
cpw@sgi.com | b18fb2c | 2011-06-21 07:21:27 -0500 | [diff] [blame] | 634 | static inline unsigned long read_mmr_uv2_status(void) |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 635 | { |
| 636 | return read_lmmr(UV2H_LB_BAU_SB_ACTIVATION_STATUS_2); |
| 637 | } |
| 638 | |
cpw@sgi.com | b18fb2c | 2011-06-21 07:21:27 -0500 | [diff] [blame] | 639 | static inline void write_mmr_data_broadcast(int pnode, unsigned long mmr_image) |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 640 | { |
| 641 | write_gmmr(pnode, UVH_BAU_DATA_BROADCAST, mmr_image); |
| 642 | } |
| 643 | |
cpw@sgi.com | b18fb2c | 2011-06-21 07:21:27 -0500 | [diff] [blame] | 644 | static inline void write_mmr_descriptor_base(int pnode, unsigned long mmr_image) |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 645 | { |
| 646 | write_gmmr(pnode, UVH_LB_BAU_SB_DESCRIPTOR_BASE, mmr_image); |
| 647 | } |
| 648 | |
cpw@sgi.com | b18fb2c | 2011-06-21 07:21:27 -0500 | [diff] [blame] | 649 | static inline void write_mmr_activation(unsigned long index) |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 650 | { |
| 651 | write_lmmr(UVH_LB_BAU_SB_ACTIVATION_CONTROL, index); |
| 652 | } |
| 653 | |
cpw@sgi.com | b18fb2c | 2011-06-21 07:21:27 -0500 | [diff] [blame] | 654 | static inline void write_gmmr_activation(int pnode, unsigned long mmr_image) |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 655 | { |
| 656 | write_gmmr(pnode, UVH_LB_BAU_SB_ACTIVATION_CONTROL, mmr_image); |
| 657 | } |
| 658 | |
cpw@sgi.com | b18fb2c | 2011-06-21 07:21:27 -0500 | [diff] [blame] | 659 | static inline void write_mmr_payload_first(int pnode, unsigned long mmr_image) |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 660 | { |
| 661 | write_gmmr(pnode, UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST, mmr_image); |
| 662 | } |
| 663 | |
cpw@sgi.com | b18fb2c | 2011-06-21 07:21:27 -0500 | [diff] [blame] | 664 | static inline void write_mmr_payload_tail(int pnode, unsigned long mmr_image) |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 665 | { |
| 666 | write_gmmr(pnode, UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL, mmr_image); |
| 667 | } |
| 668 | |
cpw@sgi.com | b18fb2c | 2011-06-21 07:21:27 -0500 | [diff] [blame] | 669 | static inline void write_mmr_payload_last(int pnode, unsigned long mmr_image) |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 670 | { |
| 671 | write_gmmr(pnode, UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST, mmr_image); |
| 672 | } |
| 673 | |
cpw@sgi.com | b18fb2c | 2011-06-21 07:21:27 -0500 | [diff] [blame] | 674 | static inline void write_mmr_misc_control(int pnode, unsigned long mmr_image) |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 675 | { |
| 676 | write_gmmr(pnode, UVH_LB_BAU_MISC_CONTROL, mmr_image); |
| 677 | } |
| 678 | |
cpw@sgi.com | b18fb2c | 2011-06-21 07:21:27 -0500 | [diff] [blame] | 679 | static inline unsigned long read_mmr_misc_control(int pnode) |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 680 | { |
| 681 | return read_gmmr(pnode, UVH_LB_BAU_MISC_CONTROL); |
| 682 | } |
| 683 | |
cpw@sgi.com | b18fb2c | 2011-06-21 07:21:27 -0500 | [diff] [blame] | 684 | static inline void write_mmr_sw_ack(unsigned long mr) |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 685 | { |
| 686 | uv_write_local_mmr(UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS, mr); |
| 687 | } |
| 688 | |
Cliff Wickman | c5d35d3 | 2012-01-16 15:19:47 -0600 | [diff] [blame] | 689 | static inline void write_gmmr_sw_ack(int pnode, unsigned long mr) |
| 690 | { |
| 691 | write_gmmr(pnode, UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS, mr); |
| 692 | } |
| 693 | |
cpw@sgi.com | b18fb2c | 2011-06-21 07:21:27 -0500 | [diff] [blame] | 694 | static inline unsigned long read_mmr_sw_ack(void) |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 695 | { |
| 696 | return read_lmmr(UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE); |
| 697 | } |
| 698 | |
cpw@sgi.com | b18fb2c | 2011-06-21 07:21:27 -0500 | [diff] [blame] | 699 | static inline unsigned long read_gmmr_sw_ack(int pnode) |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 700 | { |
| 701 | return read_gmmr(pnode, UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE); |
| 702 | } |
| 703 | |
cpw@sgi.com | b18fb2c | 2011-06-21 07:21:27 -0500 | [diff] [blame] | 704 | static inline void write_mmr_data_config(int pnode, unsigned long mr) |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 705 | { |
| 706 | uv_write_global_mmr64(pnode, UVH_BAU_DATA_CONFIG, mr); |
| 707 | } |
| 708 | |
cpw@sgi.com | a456eaa | 2011-06-21 07:21:30 -0500 | [diff] [blame] | 709 | static inline int bau_uvhub_isset(int uvhub, struct pnmask *dstp) |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 710 | { |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 711 | return constant_test_bit(uvhub, &dstp->bits[0]); |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 712 | } |
cpw@sgi.com | a456eaa | 2011-06-21 07:21:30 -0500 | [diff] [blame] | 713 | static inline void bau_uvhub_set(int pnode, struct pnmask *dstp) |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 714 | { |
Cliff Wickman | 77ed23f | 2011-05-10 08:26:43 -0500 | [diff] [blame] | 715 | __set_bit(pnode, &dstp->bits[0]); |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 716 | } |
cpw@sgi.com | a456eaa | 2011-06-21 07:21:30 -0500 | [diff] [blame] | 717 | static inline void bau_uvhubs_clear(struct pnmask *dstp, |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 718 | int nbits) |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 719 | { |
| 720 | bitmap_zero(&dstp->bits[0], nbits); |
| 721 | } |
cpw@sgi.com | a456eaa | 2011-06-21 07:21:30 -0500 | [diff] [blame] | 722 | static inline int bau_uvhub_weight(struct pnmask *dstp) |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 723 | { |
| 724 | return bitmap_weight((unsigned long *)&dstp->bits[0], |
| 725 | UV_DISTRIBUTION_SIZE); |
| 726 | } |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 727 | |
| 728 | static inline void bau_cpubits_clear(struct bau_local_cpumask *dstp, int nbits) |
| 729 | { |
| 730 | bitmap_zero(&dstp->bits, nbits); |
| 731 | } |
| 732 | |
Cliff Wickman | b194b120 | 2008-06-12 08:23:48 -0500 | [diff] [blame] | 733 | extern void uv_bau_message_intr1(void); |
Seiji Aguchi | cf910e8 | 2013-06-20 11:46:53 -0400 | [diff] [blame] | 734 | #ifdef CONFIG_TRACING |
| 735 | #define trace_uv_bau_message_intr1 uv_bau_message_intr1 |
| 736 | #endif |
Cliff Wickman | b194b120 | 2008-06-12 08:23:48 -0500 | [diff] [blame] | 737 | extern void uv_bau_timeout_intr1(void); |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 738 | |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 739 | struct atomic_short { |
| 740 | short counter; |
| 741 | }; |
| 742 | |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 743 | /* |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 744 | * atomic_read_short - read a short atomic variable |
| 745 | * @v: pointer of type atomic_short |
| 746 | * |
| 747 | * Atomically reads the value of @v. |
| 748 | */ |
| 749 | static inline int atomic_read_short(const struct atomic_short *v) |
| 750 | { |
| 751 | return v->counter; |
| 752 | } |
| 753 | |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 754 | /* |
| 755 | * atom_asr - add and return a short int |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 756 | * @i: short value to add |
| 757 | * @v: pointer of type atomic_short |
| 758 | * |
| 759 | * Atomically adds @i to @v and returns @i + @v |
| 760 | */ |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 761 | static inline int atom_asr(short i, struct atomic_short *v) |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 762 | { |
Jeremy Fitzhardinge | 8b8bc2f | 2011-08-23 16:59:58 -0700 | [diff] [blame] | 763 | return i + xadd(&v->counter, i); |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 764 | } |
| 765 | |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 766 | /* |
| 767 | * conditionally add 1 to *v, unless *v is >= u |
| 768 | * return 0 if we cannot add 1 to *v because it is >= u |
| 769 | * return 1 if we can add 1 to *v because it is < u |
| 770 | * the add is atomic |
| 771 | * |
| 772 | * This is close to atomic_add_unless(), but this allows the 'u' value |
| 773 | * to be lowered below the current 'v'. atomic_add_unless can only stop |
| 774 | * on equal. |
| 775 | */ |
| 776 | static inline int atomic_inc_unless_ge(spinlock_t *lock, atomic_t *v, int u) |
| 777 | { |
| 778 | spin_lock(lock); |
| 779 | if (atomic_read(v) >= u) { |
| 780 | spin_unlock(lock); |
| 781 | return 0; |
| 782 | } |
| 783 | atomic_inc(v); |
| 784 | spin_unlock(lock); |
| 785 | return 1; |
| 786 | } |
| 787 | |
H. Peter Anvin | 05e4d31 | 2008-10-23 00:01:39 -0700 | [diff] [blame] | 788 | #endif /* _ASM_X86_UV_UV_BAU_H */ |