Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 1 | /* |
| 2 | * This file is subject to the terms and conditions of the GNU General Public |
| 3 | * License. See the file "COPYING" in the main directory of this archive |
| 4 | * for more details. |
| 5 | * |
| 6 | * SGI UV Broadcast Assist Unit definitions |
| 7 | * |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 8 | * Copyright (C) 2008-2011 Silicon Graphics, Inc. All rights reserved. |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 9 | */ |
| 10 | |
H. Peter Anvin | 05e4d31 | 2008-10-23 00:01:39 -0700 | [diff] [blame] | 11 | #ifndef _ASM_X86_UV_UV_BAU_H |
| 12 | #define _ASM_X86_UV_UV_BAU_H |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 13 | |
| 14 | #include <linux/bitmap.h> |
| 15 | #define BITSPERBYTE 8 |
| 16 | |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 17 | /* |
Cliff Wickman | b194b120 | 2008-06-12 08:23:48 -0500 | [diff] [blame] | 18 | * Broadcast Assist Unit messaging structures |
| 19 | * |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 20 | * Selective Broadcast activations are induced by software action |
| 21 | * specifying a particular 8-descriptor "set" via a 6-bit index written |
| 22 | * to an MMR. |
| 23 | * Thus there are 64 unique 512-byte sets of SB descriptors - one set for |
| 24 | * each 6-bit index value. These descriptor sets are mapped in sequence |
| 25 | * starting with set 0 located at the address specified in the |
| 26 | * BAU_SB_DESCRIPTOR_BASE register, set 1 is located at BASE + 512, |
| 27 | * set 2 is at BASE + 2*512, set 3 at BASE + 3*512, and so on. |
| 28 | * |
Cliff Wickman | cfa6091 | 2011-01-03 12:03:53 -0600 | [diff] [blame] | 29 | * We will use one set for sending BAU messages from each of the |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 30 | * cpu's on the uvhub. |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 31 | * |
| 32 | * TLB shootdown will use the first of the 8 descriptors of each set. |
| 33 | * Each of the descriptors is 64 bytes in size (8*64 = 512 bytes in a set). |
| 34 | */ |
| 35 | |
Cliff Wickman | cfa6091 | 2011-01-03 12:03:53 -0600 | [diff] [blame] | 36 | #define MAX_CPUS_PER_UVHUB 64 |
| 37 | #define MAX_CPUS_PER_SOCKET 32 |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 38 | #define ADP_SZ 64 /* hardware-provided max. */ |
| 39 | #define UV_CPUS_PER_AS 32 /* hardware-provided max. */ |
| 40 | #define ITEMS_PER_DESC 8 |
Cliff Wickman | 50fb55a | 2010-06-02 16:22:02 -0500 | [diff] [blame] | 41 | /* the 'throttle' to prevent the hardware stay-busy bug */ |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 42 | #define MAX_BAU_CONCURRENT 3 |
Cliff Wickman | b194b120 | 2008-06-12 08:23:48 -0500 | [diff] [blame] | 43 | #define UV_ACT_STATUS_MASK 0x3 |
| 44 | #define UV_ACT_STATUS_SIZE 2 |
Cliff Wickman | b194b120 | 2008-06-12 08:23:48 -0500 | [diff] [blame] | 45 | #define UV_DISTRIBUTION_SIZE 256 |
| 46 | #define UV_SW_ACK_NPENDING 8 |
Jack Steiner | 2a91959 | 2011-05-11 12:50:28 -0500 | [diff] [blame] | 47 | #define UV1_NET_ENDPOINT_INTD 0x38 |
| 48 | #define UV2_NET_ENDPOINT_INTD 0x28 |
| 49 | #define UV_NET_ENDPOINT_INTD (is_uv1_hub() ? \ |
| 50 | UV1_NET_ENDPOINT_INTD : UV2_NET_ENDPOINT_INTD) |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 51 | #define UV_DESC_PSHIFT 49 |
Cliff Wickman | b194b120 | 2008-06-12 08:23:48 -0500 | [diff] [blame] | 52 | #define UV_PAYLOADQ_PNODE_SHIFT 49 |
| 53 | #define UV_PTC_BASENAME "sgi_uv/ptc_statistics" |
Cliff Wickman | e8e5e8a | 2010-06-02 16:22:01 -0500 | [diff] [blame] | 54 | #define UV_BAU_BASENAME "sgi_uv/bau_tunables" |
| 55 | #define UV_BAU_TUNABLES_DIR "sgi_uv" |
| 56 | #define UV_BAU_TUNABLES_FILE "bau_tunables" |
| 57 | #define WHITESPACE " \t\n" |
Cliff Wickman | b194b120 | 2008-06-12 08:23:48 -0500 | [diff] [blame] | 58 | #define uv_physnodeaddr(x) ((__pa((unsigned long)(x)) & uv_mmask)) |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 59 | #define cpubit_isset(cpu, bau_local_cpumask) \ |
| 60 | test_bit((cpu), (bau_local_cpumask).bits) |
Jack Steiner | 2a91959 | 2011-05-11 12:50:28 -0500 | [diff] [blame] | 61 | |
Cliff Wickman | 12a6611 | 2010-06-02 16:22:01 -0500 | [diff] [blame] | 62 | /* [19:16] SOFT_ACK timeout period 19: 1 is urgency 7 17:16 1 is multiplier */ |
Jack Steiner | 2a91959 | 2011-05-11 12:50:28 -0500 | [diff] [blame] | 63 | /* |
| 64 | * UV2: Bit 19 selects between |
| 65 | * (0): 10 microsecond timebase and |
| 66 | * (1): 80 microseconds |
| 67 | * we're using 655us, similar to UV1: 65 units of 10us |
| 68 | */ |
| 69 | #define UV1_INTD_SOFT_ACK_TIMEOUT_PERIOD (9UL) |
| 70 | #define UV2_INTD_SOFT_ACK_TIMEOUT_PERIOD (65*10UL) |
| 71 | |
| 72 | #define UV_INTD_SOFT_ACK_TIMEOUT_PERIOD (is_uv1_hub() ? \ |
| 73 | UV1_INTD_SOFT_ACK_TIMEOUT_PERIOD : \ |
| 74 | UV2_INTD_SOFT_ACK_TIMEOUT_PERIOD) |
| 75 | |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 76 | #define BAU_MISC_CONTROL_MULT_MASK 3 |
Cliff Wickman | 12a6611 | 2010-06-02 16:22:01 -0500 | [diff] [blame] | 77 | |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 78 | #define UVH_AGING_PRESCALE_SEL 0x000000b000UL |
Cliff Wickman | 12a6611 | 2010-06-02 16:22:01 -0500 | [diff] [blame] | 79 | /* [30:28] URGENCY_7 an index into a table of times */ |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 80 | #define BAU_URGENCY_7_SHIFT 28 |
| 81 | #define BAU_URGENCY_7_MASK 7 |
Cliff Wickman | 12a6611 | 2010-06-02 16:22:01 -0500 | [diff] [blame] | 82 | |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 83 | #define UVH_TRANSACTION_TIMEOUT 0x000000b200UL |
Cliff Wickman | 12a6611 | 2010-06-02 16:22:01 -0500 | [diff] [blame] | 84 | /* [45:40] BAU - BAU transaction timeout select - a multiplier */ |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 85 | #define BAU_TRANS_SHIFT 40 |
| 86 | #define BAU_TRANS_MASK 0x3f |
| 87 | |
| 88 | /* |
| 89 | * shorten some awkward names |
| 90 | */ |
| 91 | #define AS_PUSH_SHIFT UVH_LB_BAU_SB_ACTIVATION_CONTROL_PUSH_SHFT |
| 92 | #define SOFTACK_MSHIFT UVH_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_SHFT |
| 93 | #define SOFTACK_PSHIFT UVH_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHFT |
| 94 | #define SOFTACK_TIMEOUT_PERIOD UV_INTD_SOFT_ACK_TIMEOUT_PERIOD |
| 95 | #define write_gmmr uv_write_global_mmr64 |
| 96 | #define write_lmmr uv_write_local_mmr |
| 97 | #define read_lmmr uv_read_local_mmr |
| 98 | #define read_gmmr uv_read_global_mmr64 |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 99 | |
Cliff Wickman | b194b120 | 2008-06-12 08:23:48 -0500 | [diff] [blame] | 100 | /* |
| 101 | * bits in UVH_LB_BAU_SB_ACTIVATION_STATUS_0/1 |
| 102 | */ |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 103 | #define DS_IDLE 0 |
| 104 | #define DS_ACTIVE 1 |
| 105 | #define DS_DESTINATION_TIMEOUT 2 |
| 106 | #define DS_SOURCE_TIMEOUT 3 |
Jack Steiner | 2a91959 | 2011-05-11 12:50:28 -0500 | [diff] [blame] | 107 | /* |
| 108 | * bits put together from HRP_LB_BAU_SB_ACTIVATION_STATUS_0/1/2 |
| 109 | * values 1 and 5 will not occur |
| 110 | */ |
| 111 | #define UV2H_DESC_IDLE 0 |
| 112 | #define UV2H_DESC_DEST_TIMEOUT 2 |
| 113 | #define UV2H_DESC_DEST_STRONG_NACK 3 |
| 114 | #define UV2H_DESC_BUSY 4 |
| 115 | #define UV2H_DESC_SOURCE_TIMEOUT 6 |
| 116 | #define UV2H_DESC_DEST_PUT_ERR 7 |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 117 | |
Cliff Wickman | e8e5e8a | 2010-06-02 16:22:01 -0500 | [diff] [blame] | 118 | /* |
| 119 | * delay for 'plugged' timeout retries, in microseconds |
| 120 | */ |
| 121 | #define PLUGGED_DELAY 10 |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 122 | |
| 123 | /* |
| 124 | * threshholds at which to use IPI to free resources |
| 125 | */ |
Cliff Wickman | e8e5e8a | 2010-06-02 16:22:01 -0500 | [diff] [blame] | 126 | /* after this # consecutive 'plugged' timeouts, use IPI to release resources */ |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 127 | #define PLUGSB4RESET 100 |
Cliff Wickman | e8e5e8a | 2010-06-02 16:22:01 -0500 | [diff] [blame] | 128 | /* after this many consecutive timeouts, use IPI to release resources */ |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 129 | #define TIMEOUTSB4RESET 1 |
Cliff Wickman | e8e5e8a | 2010-06-02 16:22:01 -0500 | [diff] [blame] | 130 | /* at this number uses of IPI to release resources, giveup the request */ |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 131 | #define IPI_RESET_LIMIT 1 |
Cliff Wickman | e8e5e8a | 2010-06-02 16:22:01 -0500 | [diff] [blame] | 132 | /* after this # consecutive successes, bump up the throttle if it was lowered */ |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 133 | #define COMPLETE_THRESHOLD 5 |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 134 | |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 135 | #define UV_LB_SUBNODEID 0x10 |
Cliff Wickman | 77ed23f | 2011-05-10 08:26:43 -0500 | [diff] [blame] | 136 | |
Jack Steiner | 2a91959 | 2011-05-11 12:50:28 -0500 | [diff] [blame] | 137 | /* these two are the same for UV1 and UV2: */ |
| 138 | #define UV_SA_SHFT UVH_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHFT |
| 139 | #define UV_SA_MASK UVH_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_MASK |
| 140 | /* 4 bits of software ack period */ |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 141 | #define UV2_ACK_MASK 0x7UL |
| 142 | #define UV2_ACK_UNITS_SHFT 3 |
Jack Steiner | 2a91959 | 2011-05-11 12:50:28 -0500 | [diff] [blame] | 143 | #define UV2_LEG_SHFT UV2H_LB_BAU_MISC_CONTROL_USE_LEGACY_DESCRIPTOR_FORMATS_SHFT |
| 144 | #define UV2_EXT_SHFT UV2H_LB_BAU_MISC_CONTROL_ENABLE_EXTENDED_SB_STATUS_SHFT |
| 145 | |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 146 | /* |
Cliff Wickman | b194b120 | 2008-06-12 08:23:48 -0500 | [diff] [blame] | 147 | * number of entries in the destination side payload queue |
| 148 | */ |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 149 | #define DEST_Q_SIZE 20 |
Cliff Wickman | b194b120 | 2008-06-12 08:23:48 -0500 | [diff] [blame] | 150 | /* |
| 151 | * number of destination side software ack resources |
| 152 | */ |
Ingo Molnar | dc163a4 | 2008-06-18 14:15:43 +0200 | [diff] [blame] | 153 | #define DEST_NUM_RESOURCES 8 |
Cliff Wickman | b194b120 | 2008-06-12 08:23:48 -0500 | [diff] [blame] | 154 | /* |
| 155 | * completion statuses for sending a TLB flush message |
| 156 | */ |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 157 | #define FLUSH_RETRY_PLUGGED 1 |
| 158 | #define FLUSH_RETRY_TIMEOUT 2 |
| 159 | #define FLUSH_GIVEUP 3 |
| 160 | #define FLUSH_COMPLETE 4 |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 161 | |
Cliff Wickman | b194b120 | 2008-06-12 08:23:48 -0500 | [diff] [blame] | 162 | /* |
Cliff Wickman | e8e5e8a | 2010-06-02 16:22:01 -0500 | [diff] [blame] | 163 | * tuning the action when the numalink network is extremely delayed |
| 164 | */ |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 165 | #define CONGESTED_RESPONSE_US 1000 /* 'long' response time, in |
| 166 | microseconds */ |
| 167 | #define CONGESTED_REPS 10 /* long delays averaged over |
| 168 | this many broadcasts */ |
| 169 | #define CONGESTED_PERIOD 30 /* time for the bau to be |
| 170 | disabled, in seconds */ |
| 171 | /* see msg_type: */ |
| 172 | #define MSG_NOOP 0 |
| 173 | #define MSG_REGULAR 1 |
| 174 | #define MSG_RETRY 2 |
Cliff Wickman | e8e5e8a | 2010-06-02 16:22:01 -0500 | [diff] [blame] | 175 | |
| 176 | /* |
Cliff Wickman | b194b120 | 2008-06-12 08:23:48 -0500 | [diff] [blame] | 177 | * Distribution: 32 bytes (256 bits) (bytes 0-0x1f of descriptor) |
| 178 | * If the 'multilevel' flag in the header portion of the descriptor |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 179 | * has been set to 0, then endpoint multi-unicast mode is selected. |
| 180 | * The distribution specification (32 bytes) is interpreted as a 256-bit |
| 181 | * distribution vector. Adjacent bits correspond to consecutive even numbered |
| 182 | * nodeIDs. The result of adding the index of a given bit to the 15-bit |
Cliff Wickman | 77ed23f | 2011-05-10 08:26:43 -0500 | [diff] [blame] | 183 | * 'base_dest_nasid' field of the header corresponds to the |
Cliff Wickman | b194b120 | 2008-06-12 08:23:48 -0500 | [diff] [blame] | 184 | * destination nodeID associated with that specified bit. |
| 185 | */ |
cpw@sgi.com | a456eaa | 2011-06-21 07:21:30 -0500 | [diff] [blame] | 186 | struct pnmask { |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 187 | unsigned long bits[BITS_TO_LONGS(UV_DISTRIBUTION_SIZE)]; |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 188 | }; |
| 189 | |
Cliff Wickman | b194b120 | 2008-06-12 08:23:48 -0500 | [diff] [blame] | 190 | /* |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 191 | * mask of cpu's on a uvhub |
Cliff Wickman | b194b120 | 2008-06-12 08:23:48 -0500 | [diff] [blame] | 192 | * (during initialization we need to check that unsigned long has |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 193 | * enough bits for max. cpu's per uvhub) |
Cliff Wickman | b194b120 | 2008-06-12 08:23:48 -0500 | [diff] [blame] | 194 | */ |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 195 | struct bau_local_cpumask { |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 196 | unsigned long bits; |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 197 | }; |
| 198 | |
| 199 | /* |
| 200 | * Payload: 16 bytes (128 bits) (bytes 0x20-0x2f of descriptor) |
| 201 | * only 12 bytes (96 bits) of the payload area are usable. |
| 202 | * An additional 3 bytes (bits 27:4) of the header address are carried |
| 203 | * to the next bytes of the destination payload queue. |
| 204 | * And an additional 2 bytes of the header Suppl_A field are also |
| 205 | * carried to the destination payload queue. |
| 206 | * But the first byte of the Suppl_A becomes bits 127:120 (the 16th byte) |
| 207 | * of the destination payload queue, which is written by the hardware |
| 208 | * with the s/w ack resource bit vector. |
| 209 | * [ effective message contents (16 bytes (128 bits) maximum), not counting |
| 210 | * the s/w ack bit vector ] |
| 211 | */ |
| 212 | |
Cliff Wickman | b194b120 | 2008-06-12 08:23:48 -0500 | [diff] [blame] | 213 | /* |
| 214 | * The payload is software-defined for INTD transactions |
| 215 | */ |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 216 | struct bau_msg_payload { |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 217 | unsigned long address; /* signifies a page or all |
| 218 | TLB's of the cpu */ |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 219 | /* 64 bits */ |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 220 | unsigned short sending_cpu; /* filled in by sender */ |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 221 | /* 16 bits */ |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 222 | unsigned short acknowledge_count; /* filled in by destination */ |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 223 | /* 16 bits */ |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 224 | unsigned int reserved1:32; /* not usable */ |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 225 | }; |
| 226 | |
| 227 | |
Cliff Wickman | b194b120 | 2008-06-12 08:23:48 -0500 | [diff] [blame] | 228 | /* |
| 229 | * Message header: 16 bytes (128 bits) (bytes 0x30-0x3f of descriptor) |
| 230 | * see table 4.2.3.0.1 in broacast_assist spec. |
| 231 | */ |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 232 | struct bau_msg_header { |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 233 | unsigned int dest_subnodeid:6; /* must be 0x10, for the LB */ |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 234 | /* bits 5:0 */ |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 235 | unsigned int base_dest_nasid:15; /* nasid of the first bit */ |
| 236 | /* bits 20:6 */ /* in uvhub map */ |
| 237 | unsigned int command:8; /* message type */ |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 238 | /* bits 28:21 */ |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 239 | /* 0x38: SN3net EndPoint Message */ |
| 240 | unsigned int rsvd_1:3; /* must be zero */ |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 241 | /* bits 31:29 */ |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 242 | /* int will align on 32 bits */ |
| 243 | unsigned int rsvd_2:9; /* must be zero */ |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 244 | /* bits 40:32 */ |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 245 | /* Suppl_A is 56-41 */ |
| 246 | unsigned int sequence:16; /* message sequence number */ |
| 247 | /* bits 56:41 */ /* becomes bytes 16-17 of msg */ |
| 248 | /* Address field (96:57) is |
| 249 | never used as an address |
| 250 | (these are address bits |
| 251 | 42:3) */ |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 252 | |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 253 | unsigned int rsvd_3:1; /* must be zero */ |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 254 | /* bit 57 */ |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 255 | /* address bits 27:4 are payload */ |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 256 | /* these next 24 (58-81) bits become bytes 12-14 of msg */ |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 257 | /* bits 65:58 land in byte 12 */ |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 258 | unsigned int replied_to:1; /* sent as 0 by the source to |
| 259 | byte 12 */ |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 260 | /* bit 58 */ |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 261 | unsigned int msg_type:3; /* software type of the |
| 262 | message */ |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 263 | /* bits 61:59 */ |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 264 | unsigned int canceled:1; /* message canceled, resource |
| 265 | is to be freed*/ |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 266 | /* bit 62 */ |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 267 | unsigned int payload_1a:1; /* not currently used */ |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 268 | /* bit 63 */ |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 269 | unsigned int payload_1b:2; /* not currently used */ |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 270 | /* bits 65:64 */ |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 271 | |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 272 | /* bits 73:66 land in byte 13 */ |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 273 | unsigned int payload_1ca:6; /* not currently used */ |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 274 | /* bits 71:66 */ |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 275 | unsigned int payload_1c:2; /* not currently used */ |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 276 | /* bits 73:72 */ |
| 277 | |
| 278 | /* bits 81:74 land in byte 14 */ |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 279 | unsigned int payload_1d:6; /* not currently used */ |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 280 | /* bits 79:74 */ |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 281 | unsigned int payload_1e:2; /* not currently used */ |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 282 | /* bits 81:80 */ |
| 283 | |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 284 | unsigned int rsvd_4:7; /* must be zero */ |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 285 | /* bits 88:82 */ |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 286 | unsigned int swack_flag:1; /* software acknowledge flag */ |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 287 | /* bit 89 */ |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 288 | /* INTD trasactions at |
| 289 | destination are to wait for |
| 290 | software acknowledge */ |
| 291 | unsigned int rsvd_5:6; /* must be zero */ |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 292 | /* bits 95:90 */ |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 293 | unsigned int rsvd_6:5; /* must be zero */ |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 294 | /* bits 100:96 */ |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 295 | unsigned int int_both:1; /* if 1, interrupt both sockets |
| 296 | on the uvhub */ |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 297 | /* bit 101*/ |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 298 | unsigned int fairness:3; /* usually zero */ |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 299 | /* bits 104:102 */ |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 300 | unsigned int multilevel:1; /* multi-level multicast |
| 301 | format */ |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 302 | /* bit 105 */ |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 303 | /* 0 for TLB: endpoint multi-unicast messages */ |
| 304 | unsigned int chaining:1; /* next descriptor is part of |
| 305 | this activation*/ |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 306 | /* bit 106 */ |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 307 | unsigned int rsvd_7:21; /* must be zero */ |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 308 | /* bits 127:107 */ |
| 309 | }; |
| 310 | |
Cliff Wickman | b194b120 | 2008-06-12 08:23:48 -0500 | [diff] [blame] | 311 | /* |
Ingo Molnar | dc163a4 | 2008-06-18 14:15:43 +0200 | [diff] [blame] | 312 | * The activation descriptor: |
Cliff Wickman | b194b120 | 2008-06-12 08:23:48 -0500 | [diff] [blame] | 313 | * The format of the message to send, plus all accompanying control |
| 314 | * Should be 64 bytes |
| 315 | */ |
Ingo Molnar | dc163a4 | 2008-06-18 14:15:43 +0200 | [diff] [blame] | 316 | struct bau_desc { |
cpw@sgi.com | a456eaa | 2011-06-21 07:21:30 -0500 | [diff] [blame] | 317 | struct pnmask distribution; |
Cliff Wickman | b194b120 | 2008-06-12 08:23:48 -0500 | [diff] [blame] | 318 | /* |
| 319 | * message template, consisting of header and payload: |
| 320 | */ |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 321 | struct bau_msg_header header; |
| 322 | struct bau_msg_payload payload; |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 323 | }; |
| 324 | /* |
| 325 | * -payload-- ---------header------ |
| 326 | * bytes 0-11 bits 41-56 bits 58-81 |
| 327 | * A B (2) C (3) |
| 328 | * |
| 329 | * A/B/C are moved to: |
| 330 | * A C B |
| 331 | * bytes 0-11 bytes 12-14 bytes 16-17 (byte 15 filled in by hw as vector) |
| 332 | * ------------payload queue----------- |
| 333 | */ |
| 334 | |
| 335 | /* |
| 336 | * The payload queue on the destination side is an array of these. |
| 337 | * With BAU_MISC_CONTROL set for software acknowledge mode, the messages |
| 338 | * are 32 bytes (2 micropackets) (256 bits) in length, but contain only 17 |
| 339 | * bytes of usable data, including the sw ack vector in byte 15 (bits 127:120) |
| 340 | * (12 bytes come from bau_msg_payload, 3 from payload_1, 2 from |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 341 | * swack_vec and payload_2) |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 342 | * "Enabling Software Acknowledgment mode (see Section 4.3.3 Software |
| 343 | * Acknowledge Processing) also selects 32 byte (17 bytes usable) payload |
| 344 | * operation." |
| 345 | */ |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 346 | struct bau_pq_entry { |
| 347 | unsigned long address; /* signifies a page or all TLB's |
| 348 | of the cpu */ |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 349 | /* 64 bits, bytes 0-7 */ |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 350 | unsigned short sending_cpu; /* cpu that sent the message */ |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 351 | /* 16 bits, bytes 8-9 */ |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 352 | unsigned short acknowledge_count; /* filled in by destination */ |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 353 | /* 16 bits, bytes 10-11 */ |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 354 | /* these next 3 bytes come from bits 58-81 of the message header */ |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 355 | unsigned short replied_to:1; /* sent as 0 by the source */ |
| 356 | unsigned short msg_type:3; /* software message type */ |
| 357 | unsigned short canceled:1; /* sent as 0 by the source */ |
| 358 | unsigned short unused1:3; /* not currently using */ |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 359 | /* byte 12 */ |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 360 | unsigned char unused2a; /* not currently using */ |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 361 | /* byte 13 */ |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 362 | unsigned char unused2; /* not currently using */ |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 363 | /* byte 14 */ |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 364 | unsigned char swack_vec; /* filled in by the hardware */ |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 365 | /* byte 15 (bits 127:120) */ |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 366 | unsigned short sequence; /* message sequence number */ |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 367 | /* bytes 16-17 */ |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 368 | unsigned char unused4[2]; /* not currently using bytes 18-19 */ |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 369 | /* bytes 18-19 */ |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 370 | int number_of_cpus; /* filled in at destination */ |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 371 | /* 32 bits, bytes 20-23 (aligned) */ |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 372 | unsigned char unused5[8]; /* not using */ |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 373 | /* bytes 24-31 */ |
| 374 | }; |
| 375 | |
Cliff Wickman | 4faca15 | 2010-06-02 16:22:02 -0500 | [diff] [blame] | 376 | struct msg_desc { |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 377 | struct bau_pq_entry *msg; |
| 378 | int msg_slot; |
| 379 | int swack_slot; |
| 380 | struct bau_pq_entry *queue_first; |
| 381 | struct bau_pq_entry *queue_last; |
Cliff Wickman | 4faca15 | 2010-06-02 16:22:02 -0500 | [diff] [blame] | 382 | }; |
| 383 | |
| 384 | struct reset_args { |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 385 | int sender; |
Cliff Wickman | 4faca15 | 2010-06-02 16:22:02 -0500 | [diff] [blame] | 386 | }; |
| 387 | |
| 388 | /* |
| 389 | * This structure is allocated per_cpu for UV TLB shootdown statistics. |
| 390 | */ |
| 391 | struct ptc_stats { |
| 392 | /* sender statistics */ |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 393 | unsigned long s_giveup; /* number of fall backs to |
| 394 | IPI-style flushes */ |
| 395 | unsigned long s_requestor; /* number of shootdown |
| 396 | requests */ |
| 397 | unsigned long s_stimeout; /* source side timeouts */ |
| 398 | unsigned long s_dtimeout; /* destination side timeouts */ |
| 399 | unsigned long s_time; /* time spent in sending side */ |
| 400 | unsigned long s_retriesok; /* successful retries */ |
| 401 | unsigned long s_ntargcpu; /* total number of cpu's |
| 402 | targeted */ |
| 403 | unsigned long s_ntargself; /* times the sending cpu was |
| 404 | targeted */ |
| 405 | unsigned long s_ntarglocals; /* targets of cpus on the local |
| 406 | blade */ |
| 407 | unsigned long s_ntargremotes; /* targets of cpus on remote |
| 408 | blades */ |
| 409 | unsigned long s_ntarglocaluvhub; /* targets of the local hub */ |
| 410 | unsigned long s_ntargremoteuvhub; /* remotes hubs targeted */ |
| 411 | unsigned long s_ntarguvhub; /* total number of uvhubs |
| 412 | targeted */ |
| 413 | unsigned long s_ntarguvhub16; /* number of times target |
| 414 | hubs >= 16*/ |
| 415 | unsigned long s_ntarguvhub8; /* number of times target |
| 416 | hubs >= 8 */ |
| 417 | unsigned long s_ntarguvhub4; /* number of times target |
| 418 | hubs >= 4 */ |
| 419 | unsigned long s_ntarguvhub2; /* number of times target |
| 420 | hubs >= 2 */ |
| 421 | unsigned long s_ntarguvhub1; /* number of times target |
| 422 | hubs == 1 */ |
| 423 | unsigned long s_resets_plug; /* ipi-style resets from plug |
| 424 | state */ |
| 425 | unsigned long s_resets_timeout; /* ipi-style resets from |
| 426 | timeouts */ |
| 427 | unsigned long s_busy; /* status stayed busy past |
| 428 | s/w timer */ |
| 429 | unsigned long s_throttles; /* waits in throttle */ |
| 430 | unsigned long s_retry_messages; /* retry broadcasts */ |
| 431 | unsigned long s_bau_reenabled; /* for bau enable/disable */ |
| 432 | unsigned long s_bau_disabled; /* for bau enable/disable */ |
Cliff Wickman | 4faca15 | 2010-06-02 16:22:02 -0500 | [diff] [blame] | 433 | /* destination statistics */ |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 434 | unsigned long d_alltlb; /* times all tlb's on this |
| 435 | cpu were flushed */ |
| 436 | unsigned long d_onetlb; /* times just one tlb on this |
| 437 | cpu was flushed */ |
| 438 | unsigned long d_multmsg; /* interrupts with multiple |
| 439 | messages */ |
| 440 | unsigned long d_nomsg; /* interrupts with no message */ |
| 441 | unsigned long d_time; /* time spent on destination |
| 442 | side */ |
| 443 | unsigned long d_requestee; /* number of messages |
| 444 | processed */ |
| 445 | unsigned long d_retries; /* number of retry messages |
| 446 | processed */ |
| 447 | unsigned long d_canceled; /* number of messages canceled |
| 448 | by retries */ |
| 449 | unsigned long d_nocanceled; /* retries that found nothing |
| 450 | to cancel */ |
| 451 | unsigned long d_resets; /* number of ipi-style requests |
| 452 | processed */ |
| 453 | unsigned long d_rcanceled; /* number of messages canceled |
| 454 | by resets */ |
| 455 | }; |
| 456 | |
| 457 | struct tunables { |
| 458 | int *tunp; |
| 459 | int deflt; |
Cliff Wickman | 4faca15 | 2010-06-02 16:22:02 -0500 | [diff] [blame] | 460 | }; |
| 461 | |
Cliff Wickman | 77ed23f | 2011-05-10 08:26:43 -0500 | [diff] [blame] | 462 | struct hub_and_pnode { |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 463 | short uvhub; |
| 464 | short pnode; |
Cliff Wickman | 77ed23f | 2011-05-10 08:26:43 -0500 | [diff] [blame] | 465 | }; |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 466 | |
| 467 | struct socket_desc { |
| 468 | short num_cpus; |
| 469 | short cpu_number[MAX_CPUS_PER_SOCKET]; |
| 470 | }; |
| 471 | |
| 472 | struct uvhub_desc { |
| 473 | unsigned short socket_mask; |
| 474 | short num_cpus; |
| 475 | short uvhub; |
| 476 | short pnode; |
| 477 | struct socket_desc socket[2]; |
| 478 | }; |
| 479 | |
Cliff Wickman | b194b120 | 2008-06-12 08:23:48 -0500 | [diff] [blame] | 480 | /* |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 481 | * one per-cpu; to locate the software tables |
Cliff Wickman | b194b120 | 2008-06-12 08:23:48 -0500 | [diff] [blame] | 482 | */ |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 483 | struct bau_control { |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 484 | struct bau_desc *descriptor_base; |
| 485 | struct bau_pq_entry *queue_first; |
| 486 | struct bau_pq_entry *queue_last; |
| 487 | struct bau_pq_entry *bau_msg_head; |
| 488 | struct bau_control *uvhub_master; |
| 489 | struct bau_control *socket_master; |
| 490 | struct ptc_stats *statp; |
cpw@sgi.com | 442d392 | 2011-06-21 07:21:31 -0500 | [diff] [blame^] | 491 | cpumask_t *cpumask; |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 492 | unsigned long timeout_interval; |
| 493 | unsigned long set_bau_on_time; |
| 494 | atomic_t active_descriptor_count; |
| 495 | int plugged_tries; |
| 496 | int timeout_tries; |
| 497 | int ipi_attempts; |
| 498 | int conseccompletes; |
| 499 | int baudisabled; |
| 500 | int set_bau_off; |
| 501 | short cpu; |
| 502 | short osnode; |
| 503 | short uvhub_cpu; |
| 504 | short uvhub; |
| 505 | short cpus_in_socket; |
| 506 | short cpus_in_uvhub; |
| 507 | short partition_base_pnode; |
| 508 | unsigned short message_number; |
| 509 | unsigned short uvhub_quiesce; |
| 510 | short socket_acknowledge_count[DEST_Q_SIZE]; |
| 511 | cycles_t send_message; |
| 512 | spinlock_t uvhub_lock; |
| 513 | spinlock_t queue_lock; |
Cliff Wickman | e8e5e8a | 2010-06-02 16:22:01 -0500 | [diff] [blame] | 514 | /* tunables */ |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 515 | int max_concurr; |
| 516 | int max_concurr_const; |
| 517 | int plugged_delay; |
| 518 | int plugsb4reset; |
| 519 | int timeoutsb4reset; |
| 520 | int ipi_reset_limit; |
| 521 | int complete_threshold; |
| 522 | int cong_response_us; |
| 523 | int cong_reps; |
| 524 | int cong_period; |
| 525 | cycles_t period_time; |
| 526 | long period_requests; |
| 527 | struct hub_and_pnode *thp; |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 528 | }; |
| 529 | |
cpw@sgi.com | b18fb2c | 2011-06-21 07:21:27 -0500 | [diff] [blame] | 530 | static inline unsigned long read_mmr_uv2_status(void) |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 531 | { |
| 532 | return read_lmmr(UV2H_LB_BAU_SB_ACTIVATION_STATUS_2); |
| 533 | } |
| 534 | |
cpw@sgi.com | b18fb2c | 2011-06-21 07:21:27 -0500 | [diff] [blame] | 535 | static inline void write_mmr_data_broadcast(int pnode, unsigned long mmr_image) |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 536 | { |
| 537 | write_gmmr(pnode, UVH_BAU_DATA_BROADCAST, mmr_image); |
| 538 | } |
| 539 | |
cpw@sgi.com | b18fb2c | 2011-06-21 07:21:27 -0500 | [diff] [blame] | 540 | static inline void write_mmr_descriptor_base(int pnode, unsigned long mmr_image) |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 541 | { |
| 542 | write_gmmr(pnode, UVH_LB_BAU_SB_DESCRIPTOR_BASE, mmr_image); |
| 543 | } |
| 544 | |
cpw@sgi.com | b18fb2c | 2011-06-21 07:21:27 -0500 | [diff] [blame] | 545 | static inline void write_mmr_activation(unsigned long index) |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 546 | { |
| 547 | write_lmmr(UVH_LB_BAU_SB_ACTIVATION_CONTROL, index); |
| 548 | } |
| 549 | |
cpw@sgi.com | b18fb2c | 2011-06-21 07:21:27 -0500 | [diff] [blame] | 550 | static inline void write_gmmr_activation(int pnode, unsigned long mmr_image) |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 551 | { |
| 552 | write_gmmr(pnode, UVH_LB_BAU_SB_ACTIVATION_CONTROL, mmr_image); |
| 553 | } |
| 554 | |
cpw@sgi.com | b18fb2c | 2011-06-21 07:21:27 -0500 | [diff] [blame] | 555 | static inline void write_mmr_payload_first(int pnode, unsigned long mmr_image) |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 556 | { |
| 557 | write_gmmr(pnode, UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST, mmr_image); |
| 558 | } |
| 559 | |
cpw@sgi.com | b18fb2c | 2011-06-21 07:21:27 -0500 | [diff] [blame] | 560 | static inline void write_mmr_payload_tail(int pnode, unsigned long mmr_image) |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 561 | { |
| 562 | write_gmmr(pnode, UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL, mmr_image); |
| 563 | } |
| 564 | |
cpw@sgi.com | b18fb2c | 2011-06-21 07:21:27 -0500 | [diff] [blame] | 565 | static inline void write_mmr_payload_last(int pnode, unsigned long mmr_image) |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 566 | { |
| 567 | write_gmmr(pnode, UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST, mmr_image); |
| 568 | } |
| 569 | |
cpw@sgi.com | b18fb2c | 2011-06-21 07:21:27 -0500 | [diff] [blame] | 570 | static inline void write_mmr_misc_control(int pnode, unsigned long mmr_image) |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 571 | { |
| 572 | write_gmmr(pnode, UVH_LB_BAU_MISC_CONTROL, mmr_image); |
| 573 | } |
| 574 | |
cpw@sgi.com | b18fb2c | 2011-06-21 07:21:27 -0500 | [diff] [blame] | 575 | static inline unsigned long read_mmr_misc_control(int pnode) |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 576 | { |
| 577 | return read_gmmr(pnode, UVH_LB_BAU_MISC_CONTROL); |
| 578 | } |
| 579 | |
cpw@sgi.com | b18fb2c | 2011-06-21 07:21:27 -0500 | [diff] [blame] | 580 | static inline void write_mmr_sw_ack(unsigned long mr) |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 581 | { |
| 582 | uv_write_local_mmr(UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS, mr); |
| 583 | } |
| 584 | |
cpw@sgi.com | b18fb2c | 2011-06-21 07:21:27 -0500 | [diff] [blame] | 585 | static inline unsigned long read_mmr_sw_ack(void) |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 586 | { |
| 587 | return read_lmmr(UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE); |
| 588 | } |
| 589 | |
cpw@sgi.com | b18fb2c | 2011-06-21 07:21:27 -0500 | [diff] [blame] | 590 | static inline unsigned long read_gmmr_sw_ack(int pnode) |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 591 | { |
| 592 | return read_gmmr(pnode, UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE); |
| 593 | } |
| 594 | |
cpw@sgi.com | b18fb2c | 2011-06-21 07:21:27 -0500 | [diff] [blame] | 595 | static inline void write_mmr_data_config(int pnode, unsigned long mr) |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 596 | { |
| 597 | uv_write_global_mmr64(pnode, UVH_BAU_DATA_CONFIG, mr); |
| 598 | } |
| 599 | |
cpw@sgi.com | a456eaa | 2011-06-21 07:21:30 -0500 | [diff] [blame] | 600 | static inline int bau_uvhub_isset(int uvhub, struct pnmask *dstp) |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 601 | { |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 602 | return constant_test_bit(uvhub, &dstp->bits[0]); |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 603 | } |
cpw@sgi.com | a456eaa | 2011-06-21 07:21:30 -0500 | [diff] [blame] | 604 | static inline void bau_uvhub_set(int pnode, struct pnmask *dstp) |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 605 | { |
Cliff Wickman | 77ed23f | 2011-05-10 08:26:43 -0500 | [diff] [blame] | 606 | __set_bit(pnode, &dstp->bits[0]); |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 607 | } |
cpw@sgi.com | a456eaa | 2011-06-21 07:21:30 -0500 | [diff] [blame] | 608 | static inline void bau_uvhubs_clear(struct pnmask *dstp, |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 609 | int nbits) |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 610 | { |
| 611 | bitmap_zero(&dstp->bits[0], nbits); |
| 612 | } |
cpw@sgi.com | a456eaa | 2011-06-21 07:21:30 -0500 | [diff] [blame] | 613 | static inline int bau_uvhub_weight(struct pnmask *dstp) |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 614 | { |
| 615 | return bitmap_weight((unsigned long *)&dstp->bits[0], |
| 616 | UV_DISTRIBUTION_SIZE); |
| 617 | } |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 618 | |
| 619 | static inline void bau_cpubits_clear(struct bau_local_cpumask *dstp, int nbits) |
| 620 | { |
| 621 | bitmap_zero(&dstp->bits, nbits); |
| 622 | } |
| 623 | |
Cliff Wickman | b194b120 | 2008-06-12 08:23:48 -0500 | [diff] [blame] | 624 | extern void uv_bau_message_intr1(void); |
| 625 | extern void uv_bau_timeout_intr1(void); |
Cliff Wickman | 1812924 | 2008-06-02 08:56:14 -0500 | [diff] [blame] | 626 | |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 627 | struct atomic_short { |
| 628 | short counter; |
| 629 | }; |
| 630 | |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 631 | /* |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 632 | * atomic_read_short - read a short atomic variable |
| 633 | * @v: pointer of type atomic_short |
| 634 | * |
| 635 | * Atomically reads the value of @v. |
| 636 | */ |
| 637 | static inline int atomic_read_short(const struct atomic_short *v) |
| 638 | { |
| 639 | return v->counter; |
| 640 | } |
| 641 | |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 642 | /* |
| 643 | * atom_asr - add and return a short int |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 644 | * @i: short value to add |
| 645 | * @v: pointer of type atomic_short |
| 646 | * |
| 647 | * Atomically adds @i to @v and returns @i + @v |
| 648 | */ |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 649 | static inline int atom_asr(short i, struct atomic_short *v) |
Cliff Wickman | b8f7fb1 | 2010-04-14 11:35:46 -0500 | [diff] [blame] | 650 | { |
| 651 | short __i = i; |
| 652 | asm volatile(LOCK_PREFIX "xaddw %0, %1" |
| 653 | : "+r" (i), "+m" (v->counter) |
| 654 | : : "memory"); |
| 655 | return i + __i; |
| 656 | } |
| 657 | |
Cliff Wickman | f073cc8 | 2011-05-24 13:07:36 -0500 | [diff] [blame] | 658 | /* |
| 659 | * conditionally add 1 to *v, unless *v is >= u |
| 660 | * return 0 if we cannot add 1 to *v because it is >= u |
| 661 | * return 1 if we can add 1 to *v because it is < u |
| 662 | * the add is atomic |
| 663 | * |
| 664 | * This is close to atomic_add_unless(), but this allows the 'u' value |
| 665 | * to be lowered below the current 'v'. atomic_add_unless can only stop |
| 666 | * on equal. |
| 667 | */ |
| 668 | static inline int atomic_inc_unless_ge(spinlock_t *lock, atomic_t *v, int u) |
| 669 | { |
| 670 | spin_lock(lock); |
| 671 | if (atomic_read(v) >= u) { |
| 672 | spin_unlock(lock); |
| 673 | return 0; |
| 674 | } |
| 675 | atomic_inc(v); |
| 676 | spin_unlock(lock); |
| 677 | return 1; |
| 678 | } |
| 679 | |
H. Peter Anvin | 05e4d31 | 2008-10-23 00:01:39 -0700 | [diff] [blame] | 680 | #endif /* _ASM_X86_UV_UV_BAU_H */ |