Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 1 | /* |
Jubin John | 05d6ac1 | 2016-02-14 20:22:17 -0800 | [diff] [blame] | 2 | * Copyright(c) 2015, 2016 Intel Corporation. |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 3 | * |
| 4 | * This file is provided under a dual BSD/GPLv2 license. When using or |
| 5 | * redistributing this file, you may do so under either license. |
| 6 | * |
| 7 | * GPL LICENSE SUMMARY |
| 8 | * |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 9 | * This program is free software; you can redistribute it and/or modify |
| 10 | * it under the terms of version 2 of the GNU General Public License as |
| 11 | * published by the Free Software Foundation. |
| 12 | * |
| 13 | * This program is distributed in the hope that it will be useful, but |
| 14 | * WITHOUT ANY WARRANTY; without even the implied warranty of |
| 15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| 16 | * General Public License for more details. |
| 17 | * |
| 18 | * BSD LICENSE |
| 19 | * |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 20 | * Redistribution and use in source and binary forms, with or without |
| 21 | * modification, are permitted provided that the following conditions |
| 22 | * are met: |
| 23 | * |
| 24 | * - Redistributions of source code must retain the above copyright |
| 25 | * notice, this list of conditions and the following disclaimer. |
| 26 | * - Redistributions in binary form must reproduce the above copyright |
| 27 | * notice, this list of conditions and the following disclaimer in |
| 28 | * the documentation and/or other materials provided with the |
| 29 | * distribution. |
| 30 | * - Neither the name of Intel Corporation nor the names of its |
| 31 | * contributors may be used to endorse or promote products derived |
| 32 | * from this software without specific prior written permission. |
| 33 | * |
| 34 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
| 35 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
| 36 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
| 37 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
| 38 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
| 39 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
| 40 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
| 41 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
| 42 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| 43 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| 44 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 45 | * |
| 46 | */ |
| 47 | |
| 48 | #include <linux/delay.h> |
| 49 | #include "hfi.h" |
| 50 | #include "qp.h" |
| 51 | #include "trace.h" |
| 52 | |
| 53 | #define SC_CTXT_PACKET_EGRESS_TIMEOUT 350 /* in chip cycles */ |
| 54 | |
| 55 | #define SC(name) SEND_CTXT_##name |
| 56 | /* |
| 57 | * Send Context functions |
| 58 | */ |
| 59 | static void sc_wait_for_packet_egress(struct send_context *sc, int pause); |
| 60 | |
| 61 | /* |
| 62 | * Set the CM reset bit and wait for it to clear. Use the provided |
| 63 | * sendctrl register. This routine has no locking. |
| 64 | */ |
| 65 | void __cm_reset(struct hfi1_devdata *dd, u64 sendctrl) |
| 66 | { |
| 67 | write_csr(dd, SEND_CTRL, sendctrl | SEND_CTRL_CM_RESET_SMASK); |
| 68 | while (1) { |
| 69 | udelay(1); |
| 70 | sendctrl = read_csr(dd, SEND_CTRL); |
| 71 | if ((sendctrl & SEND_CTRL_CM_RESET_SMASK) == 0) |
| 72 | break; |
| 73 | } |
| 74 | } |
| 75 | |
| 76 | /* defined in header release 48 and higher */ |
| 77 | #ifndef SEND_CTRL_UNSUPPORTED_VL_SHIFT |
| 78 | #define SEND_CTRL_UNSUPPORTED_VL_SHIFT 3 |
| 79 | #define SEND_CTRL_UNSUPPORTED_VL_MASK 0xffull |
| 80 | #define SEND_CTRL_UNSUPPORTED_VL_SMASK (SEND_CTRL_UNSUPPORTED_VL_MASK \ |
| 81 | << SEND_CTRL_UNSUPPORTED_VL_SHIFT) |
| 82 | #endif |
| 83 | |
| 84 | /* global control of PIO send */ |
| 85 | void pio_send_control(struct hfi1_devdata *dd, int op) |
| 86 | { |
| 87 | u64 reg, mask; |
| 88 | unsigned long flags; |
| 89 | int write = 1; /* write sendctrl back */ |
| 90 | int flush = 0; /* re-read sendctrl to make sure it is flushed */ |
| 91 | |
| 92 | spin_lock_irqsave(&dd->sendctrl_lock, flags); |
| 93 | |
| 94 | reg = read_csr(dd, SEND_CTRL); |
| 95 | switch (op) { |
| 96 | case PSC_GLOBAL_ENABLE: |
| 97 | reg |= SEND_CTRL_SEND_ENABLE_SMASK; |
| 98 | /* Fall through */ |
| 99 | case PSC_DATA_VL_ENABLE: |
| 100 | /* Disallow sending on VLs not enabled */ |
Jubin John | 8638b77 | 2016-02-14 20:19:24 -0800 | [diff] [blame] | 101 | mask = (((~0ull) << num_vls) & SEND_CTRL_UNSUPPORTED_VL_MASK) << |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 102 | SEND_CTRL_UNSUPPORTED_VL_SHIFT; |
| 103 | reg = (reg & ~SEND_CTRL_UNSUPPORTED_VL_SMASK) | mask; |
| 104 | break; |
| 105 | case PSC_GLOBAL_DISABLE: |
| 106 | reg &= ~SEND_CTRL_SEND_ENABLE_SMASK; |
| 107 | break; |
| 108 | case PSC_GLOBAL_VLARB_ENABLE: |
| 109 | reg |= SEND_CTRL_VL_ARBITER_ENABLE_SMASK; |
| 110 | break; |
| 111 | case PSC_GLOBAL_VLARB_DISABLE: |
| 112 | reg &= ~SEND_CTRL_VL_ARBITER_ENABLE_SMASK; |
| 113 | break; |
| 114 | case PSC_CM_RESET: |
| 115 | __cm_reset(dd, reg); |
| 116 | write = 0; /* CSR already written (and flushed) */ |
| 117 | break; |
| 118 | case PSC_DATA_VL_DISABLE: |
| 119 | reg |= SEND_CTRL_UNSUPPORTED_VL_SMASK; |
| 120 | flush = 1; |
| 121 | break; |
| 122 | default: |
| 123 | dd_dev_err(dd, "%s: invalid control %d\n", __func__, op); |
| 124 | break; |
| 125 | } |
| 126 | |
| 127 | if (write) { |
| 128 | write_csr(dd, SEND_CTRL, reg); |
| 129 | if (flush) |
Jubin John | 50e5dcb | 2016-02-14 20:19:41 -0800 | [diff] [blame] | 130 | (void)read_csr(dd, SEND_CTRL); /* flush write */ |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 131 | } |
| 132 | |
| 133 | spin_unlock_irqrestore(&dd->sendctrl_lock, flags); |
| 134 | } |
| 135 | |
| 136 | /* number of send context memory pools */ |
| 137 | #define NUM_SC_POOLS 2 |
| 138 | |
| 139 | /* Send Context Size (SCS) wildcards */ |
| 140 | #define SCS_POOL_0 -1 |
| 141 | #define SCS_POOL_1 -2 |
Jianxin Xiong | 44306f1 | 2016-04-12 11:30:28 -0700 | [diff] [blame] | 142 | |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 143 | /* Send Context Count (SCC) wildcards */ |
| 144 | #define SCC_PER_VL -1 |
| 145 | #define SCC_PER_CPU -2 |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 146 | #define SCC_PER_KRCVQ -3 |
Jianxin Xiong | 44306f1 | 2016-04-12 11:30:28 -0700 | [diff] [blame] | 147 | |
| 148 | /* Send Context Size (SCS) constants */ |
| 149 | #define SCS_ACK_CREDITS 32 |
| 150 | #define SCS_VL15_CREDITS 102 /* 3 pkts of 2048B data + 128B header */ |
| 151 | |
| 152 | #define PIO_THRESHOLD_CEILING 4096 |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 153 | |
| 154 | #define PIO_WAIT_BATCH_SIZE 5 |
| 155 | |
| 156 | /* default send context sizes */ |
| 157 | static struct sc_config_sizes sc_config_sizes[SC_MAX] = { |
| 158 | [SC_KERNEL] = { .size = SCS_POOL_0, /* even divide, pool 0 */ |
Jianxin Xiong | 44306f1 | 2016-04-12 11:30:28 -0700 | [diff] [blame] | 159 | .count = SCC_PER_VL }, /* one per NUMA */ |
| 160 | [SC_ACK] = { .size = SCS_ACK_CREDITS, |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 161 | .count = SCC_PER_KRCVQ }, |
| 162 | [SC_USER] = { .size = SCS_POOL_0, /* even divide, pool 0 */ |
| 163 | .count = SCC_PER_CPU }, /* one per CPU */ |
Jianxin Xiong | 44306f1 | 2016-04-12 11:30:28 -0700 | [diff] [blame] | 164 | [SC_VL15] = { .size = SCS_VL15_CREDITS, |
| 165 | .count = 1 }, |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 166 | |
| 167 | }; |
| 168 | |
| 169 | /* send context memory pool configuration */ |
| 170 | struct mem_pool_config { |
| 171 | int centipercent; /* % of memory, in 100ths of 1% */ |
| 172 | int absolute_blocks; /* absolute block count */ |
| 173 | }; |
| 174 | |
| 175 | /* default memory pool configuration: 100% in pool 0 */ |
| 176 | static struct mem_pool_config sc_mem_pool_config[NUM_SC_POOLS] = { |
| 177 | /* centi%, abs blocks */ |
| 178 | { 10000, -1 }, /* pool 0 */ |
| 179 | { 0, -1 }, /* pool 1 */ |
| 180 | }; |
| 181 | |
| 182 | /* memory pool information, used when calculating final sizes */ |
| 183 | struct mem_pool_info { |
Jubin John | 4d114fd | 2016-02-14 20:21:43 -0800 | [diff] [blame] | 184 | int centipercent; /* |
| 185 | * 100th of 1% of memory to use, -1 if blocks |
| 186 | * already set |
| 187 | */ |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 188 | int count; /* count of contexts in the pool */ |
| 189 | int blocks; /* block size of the pool */ |
| 190 | int size; /* context size, in blocks */ |
| 191 | }; |
| 192 | |
| 193 | /* |
| 194 | * Convert a pool wildcard to a valid pool index. The wildcards |
| 195 | * start at -1 and increase negatively. Map them as: |
| 196 | * -1 => 0 |
| 197 | * -2 => 1 |
| 198 | * etc. |
| 199 | * |
| 200 | * Return -1 on non-wildcard input, otherwise convert to a pool number. |
| 201 | */ |
| 202 | static int wildcard_to_pool(int wc) |
| 203 | { |
| 204 | if (wc >= 0) |
| 205 | return -1; /* non-wildcard */ |
| 206 | return -wc - 1; |
| 207 | } |
| 208 | |
| 209 | static const char *sc_type_names[SC_MAX] = { |
| 210 | "kernel", |
| 211 | "ack", |
Jianxin Xiong | 44306f1 | 2016-04-12 11:30:28 -0700 | [diff] [blame] | 212 | "user", |
| 213 | "vl15" |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 214 | }; |
| 215 | |
| 216 | static const char *sc_type_name(int index) |
| 217 | { |
| 218 | if (index < 0 || index >= SC_MAX) |
| 219 | return "unknown"; |
| 220 | return sc_type_names[index]; |
| 221 | } |
| 222 | |
| 223 | /* |
| 224 | * Read the send context memory pool configuration and send context |
| 225 | * size configuration. Replace any wildcards and come up with final |
| 226 | * counts and sizes for the send context types. |
| 227 | */ |
| 228 | int init_sc_pools_and_sizes(struct hfi1_devdata *dd) |
| 229 | { |
| 230 | struct mem_pool_info mem_pool_info[NUM_SC_POOLS] = { { 0 } }; |
| 231 | int total_blocks = (dd->chip_pio_mem_size / PIO_BLOCK_SIZE) - 1; |
| 232 | int total_contexts = 0; |
| 233 | int fixed_blocks; |
| 234 | int pool_blocks; |
| 235 | int used_blocks; |
| 236 | int cp_total; /* centipercent total */ |
| 237 | int ab_total; /* absolute block total */ |
| 238 | int extra; |
| 239 | int i; |
| 240 | |
| 241 | /* |
Jianxin Xiong | 44306f1 | 2016-04-12 11:30:28 -0700 | [diff] [blame] | 242 | * When SDMA is enabled, kernel context pio packet size is capped by |
| 243 | * "piothreshold". Reduce pio buffer allocation for kernel context by |
| 244 | * setting it to a fixed size. The allocation allows 3-deep buffering |
| 245 | * of the largest pio packets plus up to 128 bytes header, sufficient |
| 246 | * to maintain verbs performance. |
| 247 | * |
| 248 | * When SDMA is disabled, keep the default pooling allocation. |
| 249 | */ |
| 250 | if (HFI1_CAP_IS_KSET(SDMA)) { |
| 251 | u16 max_pkt_size = (piothreshold < PIO_THRESHOLD_CEILING) ? |
| 252 | piothreshold : PIO_THRESHOLD_CEILING; |
| 253 | sc_config_sizes[SC_KERNEL].size = |
| 254 | 3 * (max_pkt_size + 128) / PIO_BLOCK_SIZE; |
| 255 | } |
| 256 | |
| 257 | /* |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 258 | * Step 0: |
| 259 | * - copy the centipercents/absolute sizes from the pool config |
| 260 | * - sanity check these values |
| 261 | * - add up centipercents, then later check for full value |
| 262 | * - add up absolute blocks, then later check for over-commit |
| 263 | */ |
| 264 | cp_total = 0; |
| 265 | ab_total = 0; |
| 266 | for (i = 0; i < NUM_SC_POOLS; i++) { |
| 267 | int cp = sc_mem_pool_config[i].centipercent; |
| 268 | int ab = sc_mem_pool_config[i].absolute_blocks; |
| 269 | |
| 270 | /* |
| 271 | * A negative value is "unused" or "invalid". Both *can* |
| 272 | * be valid, but centipercent wins, so check that first |
| 273 | */ |
| 274 | if (cp >= 0) { /* centipercent valid */ |
| 275 | cp_total += cp; |
| 276 | } else if (ab >= 0) { /* absolute blocks valid */ |
| 277 | ab_total += ab; |
| 278 | } else { /* neither valid */ |
| 279 | dd_dev_err( |
| 280 | dd, |
| 281 | "Send context memory pool %d: both the block count and centipercent are invalid\n", |
| 282 | i); |
| 283 | return -EINVAL; |
| 284 | } |
| 285 | |
| 286 | mem_pool_info[i].centipercent = cp; |
| 287 | mem_pool_info[i].blocks = ab; |
| 288 | } |
| 289 | |
| 290 | /* do not use both % and absolute blocks for different pools */ |
| 291 | if (cp_total != 0 && ab_total != 0) { |
| 292 | dd_dev_err( |
| 293 | dd, |
| 294 | "All send context memory pools must be described as either centipercent or blocks, no mixing between pools\n"); |
| 295 | return -EINVAL; |
| 296 | } |
| 297 | |
| 298 | /* if any percentages are present, they must add up to 100% x 100 */ |
| 299 | if (cp_total != 0 && cp_total != 10000) { |
| 300 | dd_dev_err( |
| 301 | dd, |
| 302 | "Send context memory pool centipercent is %d, expecting 10000\n", |
| 303 | cp_total); |
| 304 | return -EINVAL; |
| 305 | } |
| 306 | |
| 307 | /* the absolute pool total cannot be more than the mem total */ |
| 308 | if (ab_total > total_blocks) { |
| 309 | dd_dev_err( |
| 310 | dd, |
| 311 | "Send context memory pool absolute block count %d is larger than the memory size %d\n", |
| 312 | ab_total, total_blocks); |
| 313 | return -EINVAL; |
| 314 | } |
| 315 | |
| 316 | /* |
| 317 | * Step 2: |
| 318 | * - copy from the context size config |
| 319 | * - replace context type wildcard counts with real values |
| 320 | * - add up non-memory pool block sizes |
| 321 | * - add up memory pool user counts |
| 322 | */ |
| 323 | fixed_blocks = 0; |
| 324 | for (i = 0; i < SC_MAX; i++) { |
| 325 | int count = sc_config_sizes[i].count; |
| 326 | int size = sc_config_sizes[i].size; |
| 327 | int pool; |
| 328 | |
| 329 | /* |
| 330 | * Sanity check count: Either a positive value or |
| 331 | * one of the expected wildcards is valid. The positive |
| 332 | * value is checked later when we compare against total |
| 333 | * memory available. |
| 334 | */ |
| 335 | if (i == SC_ACK) { |
| 336 | count = dd->n_krcv_queues; |
| 337 | } else if (i == SC_KERNEL) { |
Jianxin Xiong | 44306f1 | 2016-04-12 11:30:28 -0700 | [diff] [blame] | 338 | count = INIT_SC_PER_VL * num_vls; |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 339 | } else if (count == SCC_PER_CPU) { |
| 340 | count = dd->num_rcv_contexts - dd->n_krcv_queues; |
| 341 | } else if (count < 0) { |
| 342 | dd_dev_err( |
| 343 | dd, |
| 344 | "%s send context invalid count wildcard %d\n", |
| 345 | sc_type_name(i), count); |
| 346 | return -EINVAL; |
| 347 | } |
| 348 | if (total_contexts + count > dd->chip_send_contexts) |
| 349 | count = dd->chip_send_contexts - total_contexts; |
| 350 | |
| 351 | total_contexts += count; |
| 352 | |
| 353 | /* |
| 354 | * Sanity check pool: The conversion will return a pool |
| 355 | * number or -1 if a fixed (non-negative) value. The fixed |
| 356 | * value is checked later when we compare against |
| 357 | * total memory available. |
| 358 | */ |
| 359 | pool = wildcard_to_pool(size); |
| 360 | if (pool == -1) { /* non-wildcard */ |
| 361 | fixed_blocks += size * count; |
| 362 | } else if (pool < NUM_SC_POOLS) { /* valid wildcard */ |
| 363 | mem_pool_info[pool].count += count; |
| 364 | } else { /* invalid wildcard */ |
| 365 | dd_dev_err( |
| 366 | dd, |
| 367 | "%s send context invalid pool wildcard %d\n", |
| 368 | sc_type_name(i), size); |
| 369 | return -EINVAL; |
| 370 | } |
| 371 | |
| 372 | dd->sc_sizes[i].count = count; |
| 373 | dd->sc_sizes[i].size = size; |
| 374 | } |
| 375 | if (fixed_blocks > total_blocks) { |
| 376 | dd_dev_err( |
| 377 | dd, |
| 378 | "Send context fixed block count, %u, larger than total block count %u\n", |
| 379 | fixed_blocks, total_blocks); |
| 380 | return -EINVAL; |
| 381 | } |
| 382 | |
| 383 | /* step 3: calculate the blocks in the pools, and pool context sizes */ |
| 384 | pool_blocks = total_blocks - fixed_blocks; |
| 385 | if (ab_total > pool_blocks) { |
| 386 | dd_dev_err( |
| 387 | dd, |
| 388 | "Send context fixed pool sizes, %u, larger than pool block count %u\n", |
| 389 | ab_total, pool_blocks); |
| 390 | return -EINVAL; |
| 391 | } |
| 392 | /* subtract off the fixed pool blocks */ |
| 393 | pool_blocks -= ab_total; |
| 394 | |
| 395 | for (i = 0; i < NUM_SC_POOLS; i++) { |
| 396 | struct mem_pool_info *pi = &mem_pool_info[i]; |
| 397 | |
| 398 | /* % beats absolute blocks */ |
| 399 | if (pi->centipercent >= 0) |
| 400 | pi->blocks = (pool_blocks * pi->centipercent) / 10000; |
| 401 | |
| 402 | if (pi->blocks == 0 && pi->count != 0) { |
| 403 | dd_dev_err( |
| 404 | dd, |
| 405 | "Send context memory pool %d has %u contexts, but no blocks\n", |
| 406 | i, pi->count); |
| 407 | return -EINVAL; |
| 408 | } |
| 409 | if (pi->count == 0) { |
| 410 | /* warn about wasted blocks */ |
| 411 | if (pi->blocks != 0) |
| 412 | dd_dev_err( |
| 413 | dd, |
| 414 | "Send context memory pool %d has %u blocks, but zero contexts\n", |
| 415 | i, pi->blocks); |
| 416 | pi->size = 0; |
| 417 | } else { |
| 418 | pi->size = pi->blocks / pi->count; |
| 419 | } |
| 420 | } |
| 421 | |
| 422 | /* step 4: fill in the context type sizes from the pool sizes */ |
| 423 | used_blocks = 0; |
| 424 | for (i = 0; i < SC_MAX; i++) { |
| 425 | if (dd->sc_sizes[i].size < 0) { |
| 426 | unsigned pool = wildcard_to_pool(dd->sc_sizes[i].size); |
| 427 | |
| 428 | WARN_ON_ONCE(pool >= NUM_SC_POOLS); |
| 429 | dd->sc_sizes[i].size = mem_pool_info[pool].size; |
| 430 | } |
| 431 | /* make sure we are not larger than what is allowed by the HW */ |
| 432 | #define PIO_MAX_BLOCKS 1024 |
| 433 | if (dd->sc_sizes[i].size > PIO_MAX_BLOCKS) |
| 434 | dd->sc_sizes[i].size = PIO_MAX_BLOCKS; |
| 435 | |
| 436 | /* calculate our total usage */ |
| 437 | used_blocks += dd->sc_sizes[i].size * dd->sc_sizes[i].count; |
| 438 | } |
| 439 | extra = total_blocks - used_blocks; |
| 440 | if (extra != 0) |
| 441 | dd_dev_info(dd, "unused send context blocks: %d\n", extra); |
| 442 | |
| 443 | return total_contexts; |
| 444 | } |
| 445 | |
| 446 | int init_send_contexts(struct hfi1_devdata *dd) |
| 447 | { |
| 448 | u16 base; |
| 449 | int ret, i, j, context; |
| 450 | |
| 451 | ret = init_credit_return(dd); |
| 452 | if (ret) |
| 453 | return ret; |
| 454 | |
| 455 | dd->hw_to_sw = kmalloc_array(TXE_NUM_CONTEXTS, sizeof(u8), |
| 456 | GFP_KERNEL); |
| 457 | dd->send_contexts = kcalloc(dd->num_send_contexts, |
| 458 | sizeof(struct send_context_info), |
| 459 | GFP_KERNEL); |
| 460 | if (!dd->send_contexts || !dd->hw_to_sw) { |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 461 | kfree(dd->hw_to_sw); |
| 462 | kfree(dd->send_contexts); |
| 463 | free_credit_return(dd); |
| 464 | return -ENOMEM; |
| 465 | } |
| 466 | |
| 467 | /* hardware context map starts with invalid send context indices */ |
| 468 | for (i = 0; i < TXE_NUM_CONTEXTS; i++) |
| 469 | dd->hw_to_sw[i] = INVALID_SCI; |
| 470 | |
| 471 | /* |
| 472 | * All send contexts have their credit sizes. Allocate credits |
| 473 | * for each context one after another from the global space. |
| 474 | */ |
| 475 | context = 0; |
| 476 | base = 1; |
| 477 | for (i = 0; i < SC_MAX; i++) { |
| 478 | struct sc_config_sizes *scs = &dd->sc_sizes[i]; |
| 479 | |
| 480 | for (j = 0; j < scs->count; j++) { |
| 481 | struct send_context_info *sci = |
| 482 | &dd->send_contexts[context]; |
| 483 | sci->type = i; |
| 484 | sci->base = base; |
| 485 | sci->credits = scs->size; |
| 486 | |
| 487 | context++; |
| 488 | base += scs->size; |
| 489 | } |
| 490 | } |
| 491 | |
| 492 | return 0; |
| 493 | } |
| 494 | |
| 495 | /* |
| 496 | * Allocate a software index and hardware context of the given type. |
| 497 | * |
| 498 | * Must be called with dd->sc_lock held. |
| 499 | */ |
| 500 | static int sc_hw_alloc(struct hfi1_devdata *dd, int type, u32 *sw_index, |
| 501 | u32 *hw_context) |
| 502 | { |
| 503 | struct send_context_info *sci; |
| 504 | u32 index; |
| 505 | u32 context; |
| 506 | |
| 507 | for (index = 0, sci = &dd->send_contexts[0]; |
| 508 | index < dd->num_send_contexts; index++, sci++) { |
| 509 | if (sci->type == type && sci->allocated == 0) { |
| 510 | sci->allocated = 1; |
| 511 | /* use a 1:1 mapping, but make them non-equal */ |
| 512 | context = dd->chip_send_contexts - index - 1; |
| 513 | dd->hw_to_sw[context] = index; |
| 514 | *sw_index = index; |
| 515 | *hw_context = context; |
| 516 | return 0; /* success */ |
| 517 | } |
| 518 | } |
| 519 | dd_dev_err(dd, "Unable to locate a free type %d send context\n", type); |
| 520 | return -ENOSPC; |
| 521 | } |
| 522 | |
| 523 | /* |
| 524 | * Free the send context given by its software index. |
| 525 | * |
| 526 | * Must be called with dd->sc_lock held. |
| 527 | */ |
| 528 | static void sc_hw_free(struct hfi1_devdata *dd, u32 sw_index, u32 hw_context) |
| 529 | { |
| 530 | struct send_context_info *sci; |
| 531 | |
| 532 | sci = &dd->send_contexts[sw_index]; |
| 533 | if (!sci->allocated) { |
| 534 | dd_dev_err(dd, "%s: sw_index %u not allocated? hw_context %u\n", |
Jubin John | 17fb4f2 | 2016-02-14 20:21:52 -0800 | [diff] [blame] | 535 | __func__, sw_index, hw_context); |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 536 | } |
| 537 | sci->allocated = 0; |
| 538 | dd->hw_to_sw[hw_context] = INVALID_SCI; |
| 539 | } |
| 540 | |
| 541 | /* return the base context of a context in a group */ |
| 542 | static inline u32 group_context(u32 context, u32 group) |
| 543 | { |
| 544 | return (context >> group) << group; |
| 545 | } |
| 546 | |
| 547 | /* return the size of a group */ |
| 548 | static inline u32 group_size(u32 group) |
| 549 | { |
| 550 | return 1 << group; |
| 551 | } |
| 552 | |
| 553 | /* |
Tymoteusz Kielan | 6036818 | 2016-09-06 04:35:54 -0700 | [diff] [blame] | 554 | * Obtain the credit return addresses, kernel virtual and bus, for the |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 555 | * given sc. |
| 556 | * |
| 557 | * To understand this routine: |
Tymoteusz Kielan | 6036818 | 2016-09-06 04:35:54 -0700 | [diff] [blame] | 558 | * o va and dma are arrays of struct credit_return. One for each physical |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 559 | * send context, per NUMA. |
| 560 | * o Each send context always looks in its relative location in a struct |
| 561 | * credit_return for its credit return. |
| 562 | * o Each send context in a group must have its return address CSR programmed |
| 563 | * with the same value. Use the address of the first send context in the |
| 564 | * group. |
| 565 | */ |
Tymoteusz Kielan | 6036818 | 2016-09-06 04:35:54 -0700 | [diff] [blame] | 566 | static void cr_group_addresses(struct send_context *sc, dma_addr_t *dma) |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 567 | { |
| 568 | u32 gc = group_context(sc->hw_context, sc->group); |
| 569 | u32 index = sc->hw_context & 0x7; |
| 570 | |
| 571 | sc->hw_free = &sc->dd->cr_base[sc->node].va[gc].cr[index]; |
Tymoteusz Kielan | 6036818 | 2016-09-06 04:35:54 -0700 | [diff] [blame] | 572 | *dma = (unsigned long) |
| 573 | &((struct credit_return *)sc->dd->cr_base[sc->node].dma)[gc]; |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 574 | } |
| 575 | |
| 576 | /* |
| 577 | * Work queue function triggered in error interrupt routine for |
| 578 | * kernel contexts. |
| 579 | */ |
| 580 | static void sc_halted(struct work_struct *work) |
| 581 | { |
| 582 | struct send_context *sc; |
| 583 | |
| 584 | sc = container_of(work, struct send_context, halt_work); |
| 585 | sc_restart(sc); |
| 586 | } |
| 587 | |
| 588 | /* |
| 589 | * Calculate PIO block threshold for this send context using the given MTU. |
| 590 | * Trigger a return when one MTU plus optional header of credits remain. |
| 591 | * |
| 592 | * Parameter mtu is in bytes. |
| 593 | * Parameter hdrqentsize is in DWORDs. |
| 594 | * |
| 595 | * Return value is what to write into the CSR: trigger return when |
| 596 | * unreturned credits pass this count. |
| 597 | */ |
| 598 | u32 sc_mtu_to_threshold(struct send_context *sc, u32 mtu, u32 hdrqentsize) |
| 599 | { |
| 600 | u32 release_credits; |
| 601 | u32 threshold; |
| 602 | |
| 603 | /* add in the header size, then divide by the PIO block size */ |
| 604 | mtu += hdrqentsize << 2; |
| 605 | release_credits = DIV_ROUND_UP(mtu, PIO_BLOCK_SIZE); |
| 606 | |
| 607 | /* check against this context's credits */ |
| 608 | if (sc->credits <= release_credits) |
| 609 | threshold = 1; |
| 610 | else |
| 611 | threshold = sc->credits - release_credits; |
| 612 | |
| 613 | return threshold; |
| 614 | } |
| 615 | |
| 616 | /* |
| 617 | * Calculate credit threshold in terms of percent of the allocated credits. |
| 618 | * Trigger when unreturned credits equal or exceed the percentage of the whole. |
| 619 | * |
| 620 | * Return value is what to write into the CSR: trigger return when |
| 621 | * unreturned credits pass this count. |
| 622 | */ |
Jianxin Xiong | 44306f1 | 2016-04-12 11:30:28 -0700 | [diff] [blame] | 623 | u32 sc_percent_to_threshold(struct send_context *sc, u32 percent) |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 624 | { |
| 625 | return (sc->credits * percent) / 100; |
| 626 | } |
| 627 | |
| 628 | /* |
| 629 | * Set the credit return threshold. |
| 630 | */ |
| 631 | void sc_set_cr_threshold(struct send_context *sc, u32 new_threshold) |
| 632 | { |
| 633 | unsigned long flags; |
| 634 | u32 old_threshold; |
| 635 | int force_return = 0; |
| 636 | |
| 637 | spin_lock_irqsave(&sc->credit_ctrl_lock, flags); |
| 638 | |
| 639 | old_threshold = (sc->credit_ctrl >> |
| 640 | SC(CREDIT_CTRL_THRESHOLD_SHIFT)) |
| 641 | & SC(CREDIT_CTRL_THRESHOLD_MASK); |
| 642 | |
| 643 | if (new_threshold != old_threshold) { |
| 644 | sc->credit_ctrl = |
| 645 | (sc->credit_ctrl |
| 646 | & ~SC(CREDIT_CTRL_THRESHOLD_SMASK)) |
| 647 | | ((new_threshold |
| 648 | & SC(CREDIT_CTRL_THRESHOLD_MASK)) |
| 649 | << SC(CREDIT_CTRL_THRESHOLD_SHIFT)); |
| 650 | write_kctxt_csr(sc->dd, sc->hw_context, |
Jubin John | 17fb4f2 | 2016-02-14 20:21:52 -0800 | [diff] [blame] | 651 | SC(CREDIT_CTRL), sc->credit_ctrl); |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 652 | |
| 653 | /* force a credit return on change to avoid a possible stall */ |
| 654 | force_return = 1; |
| 655 | } |
| 656 | |
| 657 | spin_unlock_irqrestore(&sc->credit_ctrl_lock, flags); |
| 658 | |
| 659 | if (force_return) |
| 660 | sc_return_credits(sc); |
| 661 | } |
| 662 | |
| 663 | /* |
| 664 | * set_pio_integrity |
| 665 | * |
| 666 | * Set the CHECK_ENABLE register for the send context 'sc'. |
| 667 | */ |
| 668 | void set_pio_integrity(struct send_context *sc) |
| 669 | { |
| 670 | struct hfi1_devdata *dd = sc->dd; |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 671 | u32 hw_context = sc->hw_context; |
| 672 | int type = sc->type; |
| 673 | |
Jakub Pawlak | d9ac455 | 2016-10-10 06:14:56 -0700 | [diff] [blame] | 674 | write_kctxt_csr(dd, hw_context, |
| 675 | SC(CHECK_ENABLE), |
| 676 | hfi1_pkt_default_send_ctxt_mask(dd, type)); |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 677 | } |
| 678 | |
Mike Marciniszyn | a054374 | 2015-12-07 15:39:22 -0500 | [diff] [blame] | 679 | static u32 get_buffers_allocated(struct send_context *sc) |
| 680 | { |
| 681 | int cpu; |
| 682 | u32 ret = 0; |
| 683 | |
| 684 | for_each_possible_cpu(cpu) |
| 685 | ret += *per_cpu_ptr(sc->buffers_allocated, cpu); |
| 686 | return ret; |
| 687 | } |
| 688 | |
| 689 | static void reset_buffers_allocated(struct send_context *sc) |
| 690 | { |
| 691 | int cpu; |
| 692 | |
| 693 | for_each_possible_cpu(cpu) |
| 694 | (*per_cpu_ptr(sc->buffers_allocated, cpu)) = 0; |
| 695 | } |
| 696 | |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 697 | /* |
| 698 | * Allocate a NUMA relative send context structure of the given type along |
| 699 | * with a HW context. |
| 700 | */ |
| 701 | struct send_context *sc_alloc(struct hfi1_devdata *dd, int type, |
| 702 | uint hdrqentsize, int numa) |
| 703 | { |
| 704 | struct send_context_info *sci; |
Mike Marciniszyn | a054374 | 2015-12-07 15:39:22 -0500 | [diff] [blame] | 705 | struct send_context *sc = NULL; |
Tymoteusz Kielan | 6036818 | 2016-09-06 04:35:54 -0700 | [diff] [blame] | 706 | dma_addr_t dma; |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 707 | unsigned long flags; |
| 708 | u64 reg; |
| 709 | u32 thresh; |
| 710 | u32 sw_index; |
| 711 | u32 hw_context; |
| 712 | int ret; |
| 713 | u8 opval, opmask; |
| 714 | |
| 715 | /* do not allocate while frozen */ |
| 716 | if (dd->flags & HFI1_FROZEN) |
| 717 | return NULL; |
| 718 | |
Jubin John | fcdd76d | 2016-02-14 20:21:16 -0800 | [diff] [blame] | 719 | sc = kzalloc_node(sizeof(*sc), GFP_KERNEL, numa); |
Alison Schofield | 806e6e1 | 2015-10-12 14:28:36 -0700 | [diff] [blame] | 720 | if (!sc) |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 721 | return NULL; |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 722 | |
Mike Marciniszyn | a054374 | 2015-12-07 15:39:22 -0500 | [diff] [blame] | 723 | sc->buffers_allocated = alloc_percpu(u32); |
| 724 | if (!sc->buffers_allocated) { |
| 725 | kfree(sc); |
| 726 | dd_dev_err(dd, |
| 727 | "Cannot allocate buffers_allocated per cpu counters\n" |
| 728 | ); |
| 729 | return NULL; |
| 730 | } |
| 731 | |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 732 | spin_lock_irqsave(&dd->sc_lock, flags); |
| 733 | ret = sc_hw_alloc(dd, type, &sw_index, &hw_context); |
| 734 | if (ret) { |
| 735 | spin_unlock_irqrestore(&dd->sc_lock, flags); |
Mike Marciniszyn | a054374 | 2015-12-07 15:39:22 -0500 | [diff] [blame] | 736 | free_percpu(sc->buffers_allocated); |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 737 | kfree(sc); |
| 738 | return NULL; |
| 739 | } |
| 740 | |
| 741 | sci = &dd->send_contexts[sw_index]; |
| 742 | sci->sc = sc; |
| 743 | |
| 744 | sc->dd = dd; |
| 745 | sc->node = numa; |
| 746 | sc->type = type; |
| 747 | spin_lock_init(&sc->alloc_lock); |
| 748 | spin_lock_init(&sc->release_lock); |
| 749 | spin_lock_init(&sc->credit_ctrl_lock); |
| 750 | INIT_LIST_HEAD(&sc->piowait); |
| 751 | INIT_WORK(&sc->halt_work, sc_halted); |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 752 | init_waitqueue_head(&sc->halt_wait); |
| 753 | |
| 754 | /* grouping is always single context for now */ |
| 755 | sc->group = 0; |
| 756 | |
| 757 | sc->sw_index = sw_index; |
| 758 | sc->hw_context = hw_context; |
Tymoteusz Kielan | 6036818 | 2016-09-06 04:35:54 -0700 | [diff] [blame] | 759 | cr_group_addresses(sc, &dma); |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 760 | sc->credits = sci->credits; |
| 761 | |
| 762 | /* PIO Send Memory Address details */ |
| 763 | #define PIO_ADDR_CONTEXT_MASK 0xfful |
| 764 | #define PIO_ADDR_CONTEXT_SHIFT 16 |
| 765 | sc->base_addr = dd->piobase + ((hw_context & PIO_ADDR_CONTEXT_MASK) |
| 766 | << PIO_ADDR_CONTEXT_SHIFT); |
| 767 | |
| 768 | /* set base and credits */ |
| 769 | reg = ((sci->credits & SC(CTRL_CTXT_DEPTH_MASK)) |
| 770 | << SC(CTRL_CTXT_DEPTH_SHIFT)) |
| 771 | | ((sci->base & SC(CTRL_CTXT_BASE_MASK)) |
| 772 | << SC(CTRL_CTXT_BASE_SHIFT)); |
| 773 | write_kctxt_csr(dd, hw_context, SC(CTRL), reg); |
| 774 | |
| 775 | set_pio_integrity(sc); |
| 776 | |
| 777 | /* unmask all errors */ |
| 778 | write_kctxt_csr(dd, hw_context, SC(ERR_MASK), (u64)-1); |
| 779 | |
| 780 | /* set the default partition key */ |
| 781 | write_kctxt_csr(dd, hw_context, SC(CHECK_PARTITION_KEY), |
Harish Chegondi | bf64009 | 2016-03-05 08:49:29 -0800 | [diff] [blame] | 782 | (SC(CHECK_PARTITION_KEY_VALUE_MASK) & |
| 783 | DEFAULT_PKEY) << |
Jubin John | 17fb4f2 | 2016-02-14 20:21:52 -0800 | [diff] [blame] | 784 | SC(CHECK_PARTITION_KEY_VALUE_SHIFT)); |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 785 | |
| 786 | /* per context type checks */ |
| 787 | if (type == SC_USER) { |
| 788 | opval = USER_OPCODE_CHECK_VAL; |
| 789 | opmask = USER_OPCODE_CHECK_MASK; |
| 790 | } else { |
| 791 | opval = OPCODE_CHECK_VAL_DISABLED; |
| 792 | opmask = OPCODE_CHECK_MASK_DISABLED; |
| 793 | } |
| 794 | |
| 795 | /* set the send context check opcode mask and value */ |
| 796 | write_kctxt_csr(dd, hw_context, SC(CHECK_OPCODE), |
Jubin John | 17fb4f2 | 2016-02-14 20:21:52 -0800 | [diff] [blame] | 797 | ((u64)opmask << SC(CHECK_OPCODE_MASK_SHIFT)) | |
| 798 | ((u64)opval << SC(CHECK_OPCODE_VALUE_SHIFT))); |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 799 | |
| 800 | /* set up credit return */ |
Tymoteusz Kielan | 6036818 | 2016-09-06 04:35:54 -0700 | [diff] [blame] | 801 | reg = dma & SC(CREDIT_RETURN_ADDR_ADDRESS_SMASK); |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 802 | write_kctxt_csr(dd, hw_context, SC(CREDIT_RETURN_ADDR), reg); |
| 803 | |
| 804 | /* |
| 805 | * Calculate the initial credit return threshold. |
| 806 | * |
| 807 | * For Ack contexts, set a threshold for half the credits. |
| 808 | * For User contexts use the given percentage. This has been |
| 809 | * sanitized on driver start-up. |
Jianxin Xiong | 44306f1 | 2016-04-12 11:30:28 -0700 | [diff] [blame] | 810 | * For Kernel contexts, use the default MTU plus a header |
| 811 | * or half the credits, whichever is smaller. This should |
| 812 | * work for both the 3-deep buffering allocation and the |
| 813 | * pooling allocation. |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 814 | */ |
| 815 | if (type == SC_ACK) { |
| 816 | thresh = sc_percent_to_threshold(sc, 50); |
| 817 | } else if (type == SC_USER) { |
| 818 | thresh = sc_percent_to_threshold(sc, |
Jubin John | 17fb4f2 | 2016-02-14 20:21:52 -0800 | [diff] [blame] | 819 | user_credit_return_threshold); |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 820 | } else { /* kernel */ |
Jianxin Xiong | 44306f1 | 2016-04-12 11:30:28 -0700 | [diff] [blame] | 821 | thresh = min(sc_percent_to_threshold(sc, 50), |
| 822 | sc_mtu_to_threshold(sc, hfi1_max_mtu, |
| 823 | hdrqentsize)); |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 824 | } |
| 825 | reg = thresh << SC(CREDIT_CTRL_THRESHOLD_SHIFT); |
| 826 | /* add in early return */ |
| 827 | if (type == SC_USER && HFI1_CAP_IS_USET(EARLY_CREDIT_RETURN)) |
| 828 | reg |= SC(CREDIT_CTRL_EARLY_RETURN_SMASK); |
| 829 | else if (HFI1_CAP_IS_KSET(EARLY_CREDIT_RETURN)) /* kernel, ack */ |
| 830 | reg |= SC(CREDIT_CTRL_EARLY_RETURN_SMASK); |
| 831 | |
| 832 | /* set up write-through credit_ctrl */ |
| 833 | sc->credit_ctrl = reg; |
| 834 | write_kctxt_csr(dd, hw_context, SC(CREDIT_CTRL), reg); |
| 835 | |
| 836 | /* User send contexts should not allow sending on VL15 */ |
| 837 | if (type == SC_USER) { |
| 838 | reg = 1ULL << 15; |
| 839 | write_kctxt_csr(dd, hw_context, SC(CHECK_VL), reg); |
| 840 | } |
| 841 | |
| 842 | spin_unlock_irqrestore(&dd->sc_lock, flags); |
| 843 | |
| 844 | /* |
| 845 | * Allocate shadow ring to track outstanding PIO buffers _after_ |
| 846 | * unlocking. We don't know the size until the lock is held and |
| 847 | * we can't allocate while the lock is held. No one is using |
| 848 | * the context yet, so allocate it now. |
| 849 | * |
| 850 | * User contexts do not get a shadow ring. |
| 851 | */ |
| 852 | if (type != SC_USER) { |
| 853 | /* |
| 854 | * Size the shadow ring 1 larger than the number of credits |
| 855 | * so head == tail can mean empty. |
| 856 | */ |
| 857 | sc->sr_size = sci->credits + 1; |
| 858 | sc->sr = kzalloc_node(sizeof(union pio_shadow_ring) * |
| 859 | sc->sr_size, GFP_KERNEL, numa); |
| 860 | if (!sc->sr) { |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 861 | sc_free(sc); |
| 862 | return NULL; |
| 863 | } |
| 864 | } |
| 865 | |
Sebastian Sanchez | 6c63e42 | 2015-11-06 20:06:56 -0500 | [diff] [blame] | 866 | hfi1_cdbg(PIO, |
| 867 | "Send context %u(%u) %s group %u credits %u credit_ctrl 0x%llx threshold %u\n", |
| 868 | sw_index, |
| 869 | hw_context, |
| 870 | sc_type_name(type), |
| 871 | sc->group, |
| 872 | sc->credits, |
| 873 | sc->credit_ctrl, |
| 874 | thresh); |
| 875 | |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 876 | return sc; |
| 877 | } |
| 878 | |
| 879 | /* free a per-NUMA send context structure */ |
| 880 | void sc_free(struct send_context *sc) |
| 881 | { |
| 882 | struct hfi1_devdata *dd; |
| 883 | unsigned long flags; |
| 884 | u32 sw_index; |
| 885 | u32 hw_context; |
| 886 | |
| 887 | if (!sc) |
| 888 | return; |
| 889 | |
| 890 | sc->flags |= SCF_IN_FREE; /* ensure no restarts */ |
| 891 | dd = sc->dd; |
| 892 | if (!list_empty(&sc->piowait)) |
| 893 | dd_dev_err(dd, "piowait list not empty!\n"); |
| 894 | sw_index = sc->sw_index; |
| 895 | hw_context = sc->hw_context; |
| 896 | sc_disable(sc); /* make sure the HW is disabled */ |
| 897 | flush_work(&sc->halt_work); |
| 898 | |
| 899 | spin_lock_irqsave(&dd->sc_lock, flags); |
| 900 | dd->send_contexts[sw_index].sc = NULL; |
| 901 | |
| 902 | /* clear/disable all registers set in sc_alloc */ |
| 903 | write_kctxt_csr(dd, hw_context, SC(CTRL), 0); |
| 904 | write_kctxt_csr(dd, hw_context, SC(CHECK_ENABLE), 0); |
| 905 | write_kctxt_csr(dd, hw_context, SC(ERR_MASK), 0); |
| 906 | write_kctxt_csr(dd, hw_context, SC(CHECK_PARTITION_KEY), 0); |
| 907 | write_kctxt_csr(dd, hw_context, SC(CHECK_OPCODE), 0); |
| 908 | write_kctxt_csr(dd, hw_context, SC(CREDIT_RETURN_ADDR), 0); |
| 909 | write_kctxt_csr(dd, hw_context, SC(CREDIT_CTRL), 0); |
| 910 | |
| 911 | /* release the index and context for re-use */ |
| 912 | sc_hw_free(dd, sw_index, hw_context); |
| 913 | spin_unlock_irqrestore(&dd->sc_lock, flags); |
| 914 | |
| 915 | kfree(sc->sr); |
Mike Marciniszyn | a054374 | 2015-12-07 15:39:22 -0500 | [diff] [blame] | 916 | free_percpu(sc->buffers_allocated); |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 917 | kfree(sc); |
| 918 | } |
| 919 | |
| 920 | /* disable the context */ |
| 921 | void sc_disable(struct send_context *sc) |
| 922 | { |
| 923 | u64 reg; |
| 924 | unsigned long flags; |
| 925 | struct pio_buf *pbuf; |
| 926 | |
| 927 | if (!sc) |
| 928 | return; |
| 929 | |
| 930 | /* do all steps, even if already disabled */ |
| 931 | spin_lock_irqsave(&sc->alloc_lock, flags); |
| 932 | reg = read_kctxt_csr(sc->dd, sc->hw_context, SC(CTRL)); |
| 933 | reg &= ~SC(CTRL_CTXT_ENABLE_SMASK); |
| 934 | sc->flags &= ~SCF_ENABLED; |
| 935 | sc_wait_for_packet_egress(sc, 1); |
| 936 | write_kctxt_csr(sc->dd, sc->hw_context, SC(CTRL), reg); |
| 937 | spin_unlock_irqrestore(&sc->alloc_lock, flags); |
| 938 | |
| 939 | /* |
| 940 | * Flush any waiters. Once the context is disabled, |
| 941 | * credit return interrupts are stopped (although there |
| 942 | * could be one in-process when the context is disabled). |
| 943 | * Wait one microsecond for any lingering interrupts, then |
| 944 | * proceed with the flush. |
| 945 | */ |
| 946 | udelay(1); |
| 947 | spin_lock_irqsave(&sc->release_lock, flags); |
| 948 | if (sc->sr) { /* this context has a shadow ring */ |
| 949 | while (sc->sr_tail != sc->sr_head) { |
| 950 | pbuf = &sc->sr[sc->sr_tail].pbuf; |
| 951 | if (pbuf->cb) |
| 952 | (*pbuf->cb)(pbuf->arg, PRC_SC_DISABLE); |
| 953 | sc->sr_tail++; |
| 954 | if (sc->sr_tail >= sc->sr_size) |
| 955 | sc->sr_tail = 0; |
| 956 | } |
| 957 | } |
| 958 | spin_unlock_irqrestore(&sc->release_lock, flags); |
| 959 | } |
| 960 | |
| 961 | /* return SendEgressCtxtStatus.PacketOccupancy */ |
| 962 | #define packet_occupancy(r) \ |
| 963 | (((r) & SEND_EGRESS_CTXT_STATUS_CTXT_EGRESS_PACKET_OCCUPANCY_SMASK)\ |
| 964 | >> SEND_EGRESS_CTXT_STATUS_CTXT_EGRESS_PACKET_OCCUPANCY_SHIFT) |
| 965 | |
| 966 | /* is egress halted on the context? */ |
| 967 | #define egress_halted(r) \ |
| 968 | ((r) & SEND_EGRESS_CTXT_STATUS_CTXT_EGRESS_HALT_STATUS_SMASK) |
| 969 | |
| 970 | /* wait for packet egress, optionally pause for credit return */ |
| 971 | static void sc_wait_for_packet_egress(struct send_context *sc, int pause) |
| 972 | { |
| 973 | struct hfi1_devdata *dd = sc->dd; |
Vennila Megavannan | 25d97dd | 2015-10-26 10:28:30 -0400 | [diff] [blame] | 974 | u64 reg = 0; |
| 975 | u64 reg_prev; |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 976 | u32 loop = 0; |
| 977 | |
| 978 | while (1) { |
Vennila Megavannan | 25d97dd | 2015-10-26 10:28:30 -0400 | [diff] [blame] | 979 | reg_prev = reg; |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 980 | reg = read_csr(dd, sc->hw_context * 8 + |
| 981 | SEND_EGRESS_CTXT_STATUS); |
| 982 | /* done if egress is stopped */ |
| 983 | if (egress_halted(reg)) |
| 984 | break; |
| 985 | reg = packet_occupancy(reg); |
| 986 | if (reg == 0) |
| 987 | break; |
Vennila Megavannan | 25d97dd | 2015-10-26 10:28:30 -0400 | [diff] [blame] | 988 | /* counter is reset if occupancy count changes */ |
| 989 | if (reg != reg_prev) |
| 990 | loop = 0; |
Jubin John | c3c64a9 | 2016-06-09 07:51:27 -0700 | [diff] [blame] | 991 | if (loop > 50000) { |
Vennila Megavannan | 25d97dd | 2015-10-26 10:28:30 -0400 | [diff] [blame] | 992 | /* timed out - bounce the link */ |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 993 | dd_dev_err(dd, |
Jubin John | 17fb4f2 | 2016-02-14 20:21:52 -0800 | [diff] [blame] | 994 | "%s: context %u(%u) timeout waiting for packets to egress, remaining count %u, bouncing link\n", |
| 995 | __func__, sc->sw_index, |
| 996 | sc->hw_context, (u32)reg); |
Vennila Megavannan | 25d97dd | 2015-10-26 10:28:30 -0400 | [diff] [blame] | 997 | queue_work(dd->pport->hfi1_wq, |
Jubin John | 17fb4f2 | 2016-02-14 20:21:52 -0800 | [diff] [blame] | 998 | &dd->pport->link_bounce_work); |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 999 | break; |
| 1000 | } |
| 1001 | loop++; |
| 1002 | udelay(1); |
| 1003 | } |
| 1004 | |
| 1005 | if (pause) |
| 1006 | /* Add additional delay to ensure chip returns all credits */ |
| 1007 | pause_for_credit_return(dd); |
| 1008 | } |
| 1009 | |
| 1010 | void sc_wait(struct hfi1_devdata *dd) |
| 1011 | { |
| 1012 | int i; |
| 1013 | |
| 1014 | for (i = 0; i < dd->num_send_contexts; i++) { |
| 1015 | struct send_context *sc = dd->send_contexts[i].sc; |
| 1016 | |
| 1017 | if (!sc) |
| 1018 | continue; |
| 1019 | sc_wait_for_packet_egress(sc, 0); |
| 1020 | } |
| 1021 | } |
| 1022 | |
| 1023 | /* |
| 1024 | * Restart a context after it has been halted due to error. |
| 1025 | * |
| 1026 | * If the first step fails - wait for the halt to be asserted, return early. |
| 1027 | * Otherwise complain about timeouts but keep going. |
| 1028 | * |
| 1029 | * It is expected that allocations (enabled flag bit) have been shut off |
| 1030 | * already (only applies to kernel contexts). |
| 1031 | */ |
| 1032 | int sc_restart(struct send_context *sc) |
| 1033 | { |
| 1034 | struct hfi1_devdata *dd = sc->dd; |
| 1035 | u64 reg; |
| 1036 | u32 loop; |
| 1037 | int count; |
| 1038 | |
| 1039 | /* bounce off if not halted, or being free'd */ |
| 1040 | if (!(sc->flags & SCF_HALTED) || (sc->flags & SCF_IN_FREE)) |
| 1041 | return -EINVAL; |
| 1042 | |
| 1043 | dd_dev_info(dd, "restarting send context %u(%u)\n", sc->sw_index, |
Jubin John | 17fb4f2 | 2016-02-14 20:21:52 -0800 | [diff] [blame] | 1044 | sc->hw_context); |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 1045 | |
| 1046 | /* |
| 1047 | * Step 1: Wait for the context to actually halt. |
| 1048 | * |
| 1049 | * The error interrupt is asynchronous to actually setting halt |
| 1050 | * on the context. |
| 1051 | */ |
| 1052 | loop = 0; |
| 1053 | while (1) { |
| 1054 | reg = read_kctxt_csr(dd, sc->hw_context, SC(STATUS)); |
| 1055 | if (reg & SC(STATUS_CTXT_HALTED_SMASK)) |
| 1056 | break; |
| 1057 | if (loop > 100) { |
| 1058 | dd_dev_err(dd, "%s: context %u(%u) not halting, skipping\n", |
Jubin John | 17fb4f2 | 2016-02-14 20:21:52 -0800 | [diff] [blame] | 1059 | __func__, sc->sw_index, sc->hw_context); |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 1060 | return -ETIME; |
| 1061 | } |
| 1062 | loop++; |
| 1063 | udelay(1); |
| 1064 | } |
| 1065 | |
| 1066 | /* |
| 1067 | * Step 2: Ensure no users are still trying to write to PIO. |
| 1068 | * |
| 1069 | * For kernel contexts, we have already turned off buffer allocation. |
| 1070 | * Now wait for the buffer count to go to zero. |
| 1071 | * |
| 1072 | * For user contexts, the user handling code has cut off write access |
| 1073 | * to the context's PIO pages before calling this routine and will |
| 1074 | * restore write access after this routine returns. |
| 1075 | */ |
| 1076 | if (sc->type != SC_USER) { |
| 1077 | /* kernel context */ |
| 1078 | loop = 0; |
| 1079 | while (1) { |
Mike Marciniszyn | a054374 | 2015-12-07 15:39:22 -0500 | [diff] [blame] | 1080 | count = get_buffers_allocated(sc); |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 1081 | if (count == 0) |
| 1082 | break; |
| 1083 | if (loop > 100) { |
| 1084 | dd_dev_err(dd, |
Jubin John | 17fb4f2 | 2016-02-14 20:21:52 -0800 | [diff] [blame] | 1085 | "%s: context %u(%u) timeout waiting for PIO buffers to zero, remaining %d\n", |
| 1086 | __func__, sc->sw_index, |
| 1087 | sc->hw_context, count); |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 1088 | } |
| 1089 | loop++; |
| 1090 | udelay(1); |
| 1091 | } |
| 1092 | } |
| 1093 | |
| 1094 | /* |
| 1095 | * Step 3: Wait for all packets to egress. |
| 1096 | * This is done while disabling the send context |
| 1097 | * |
| 1098 | * Step 4: Disable the context |
| 1099 | * |
| 1100 | * This is a superset of the halt. After the disable, the |
| 1101 | * errors can be cleared. |
| 1102 | */ |
| 1103 | sc_disable(sc); |
| 1104 | |
| 1105 | /* |
| 1106 | * Step 5: Enable the context |
| 1107 | * |
| 1108 | * This enable will clear the halted flag and per-send context |
| 1109 | * error flags. |
| 1110 | */ |
| 1111 | return sc_enable(sc); |
| 1112 | } |
| 1113 | |
| 1114 | /* |
| 1115 | * PIO freeze processing. To be called after the TXE block is fully frozen. |
| 1116 | * Go through all frozen send contexts and disable them. The contexts are |
| 1117 | * already stopped by the freeze. |
| 1118 | */ |
| 1119 | void pio_freeze(struct hfi1_devdata *dd) |
| 1120 | { |
| 1121 | struct send_context *sc; |
| 1122 | int i; |
| 1123 | |
| 1124 | for (i = 0; i < dd->num_send_contexts; i++) { |
| 1125 | sc = dd->send_contexts[i].sc; |
| 1126 | /* |
| 1127 | * Don't disable unallocated, unfrozen, or user send contexts. |
| 1128 | * User send contexts will be disabled when the process |
| 1129 | * calls into the driver to reset its context. |
| 1130 | */ |
| 1131 | if (!sc || !(sc->flags & SCF_FROZEN) || sc->type == SC_USER) |
| 1132 | continue; |
| 1133 | |
| 1134 | /* only need to disable, the context is already stopped */ |
| 1135 | sc_disable(sc); |
| 1136 | } |
| 1137 | } |
| 1138 | |
| 1139 | /* |
| 1140 | * Unfreeze PIO for kernel send contexts. The precondition for calling this |
| 1141 | * is that all PIO send contexts have been disabled and the SPC freeze has |
| 1142 | * been cleared. Now perform the last step and re-enable each kernel context. |
| 1143 | * User (PSM) processing will occur when PSM calls into the kernel to |
| 1144 | * acknowledge the freeze. |
| 1145 | */ |
| 1146 | void pio_kernel_unfreeze(struct hfi1_devdata *dd) |
| 1147 | { |
| 1148 | struct send_context *sc; |
| 1149 | int i; |
| 1150 | |
| 1151 | for (i = 0; i < dd->num_send_contexts; i++) { |
| 1152 | sc = dd->send_contexts[i].sc; |
| 1153 | if (!sc || !(sc->flags & SCF_FROZEN) || sc->type == SC_USER) |
| 1154 | continue; |
| 1155 | |
| 1156 | sc_enable(sc); /* will clear the sc frozen flag */ |
| 1157 | } |
| 1158 | } |
| 1159 | |
| 1160 | /* |
| 1161 | * Wait for the SendPioInitCtxt.PioInitInProgress bit to clear. |
| 1162 | * Returns: |
| 1163 | * -ETIMEDOUT - if we wait too long |
| 1164 | * -EIO - if there was an error |
| 1165 | */ |
| 1166 | static int pio_init_wait_progress(struct hfi1_devdata *dd) |
| 1167 | { |
| 1168 | u64 reg; |
| 1169 | int max, count = 0; |
| 1170 | |
| 1171 | /* max is the longest possible HW init time / delay */ |
| 1172 | max = (dd->icode == ICODE_FPGA_EMULATION) ? 120 : 5; |
| 1173 | while (1) { |
| 1174 | reg = read_csr(dd, SEND_PIO_INIT_CTXT); |
| 1175 | if (!(reg & SEND_PIO_INIT_CTXT_PIO_INIT_IN_PROGRESS_SMASK)) |
| 1176 | break; |
| 1177 | if (count >= max) |
| 1178 | return -ETIMEDOUT; |
| 1179 | udelay(5); |
| 1180 | count++; |
| 1181 | } |
| 1182 | |
| 1183 | return reg & SEND_PIO_INIT_CTXT_PIO_INIT_ERR_SMASK ? -EIO : 0; |
| 1184 | } |
| 1185 | |
| 1186 | /* |
| 1187 | * Reset all of the send contexts to their power-on state. Used |
| 1188 | * only during manual init - no lock against sc_enable needed. |
| 1189 | */ |
| 1190 | void pio_reset_all(struct hfi1_devdata *dd) |
| 1191 | { |
| 1192 | int ret; |
| 1193 | |
| 1194 | /* make sure the init engine is not busy */ |
| 1195 | ret = pio_init_wait_progress(dd); |
| 1196 | /* ignore any timeout */ |
| 1197 | if (ret == -EIO) { |
| 1198 | /* clear the error */ |
| 1199 | write_csr(dd, SEND_PIO_ERR_CLEAR, |
Jubin John | 17fb4f2 | 2016-02-14 20:21:52 -0800 | [diff] [blame] | 1200 | SEND_PIO_ERR_CLEAR_PIO_INIT_SM_IN_ERR_SMASK); |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 1201 | } |
| 1202 | |
| 1203 | /* reset init all */ |
| 1204 | write_csr(dd, SEND_PIO_INIT_CTXT, |
Jubin John | 17fb4f2 | 2016-02-14 20:21:52 -0800 | [diff] [blame] | 1205 | SEND_PIO_INIT_CTXT_PIO_ALL_CTXT_INIT_SMASK); |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 1206 | udelay(2); |
| 1207 | ret = pio_init_wait_progress(dd); |
| 1208 | if (ret < 0) { |
| 1209 | dd_dev_err(dd, |
Jubin John | 17fb4f2 | 2016-02-14 20:21:52 -0800 | [diff] [blame] | 1210 | "PIO send context init %s while initializing all PIO blocks\n", |
| 1211 | ret == -ETIMEDOUT ? "is stuck" : "had an error"); |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 1212 | } |
| 1213 | } |
| 1214 | |
| 1215 | /* enable the context */ |
| 1216 | int sc_enable(struct send_context *sc) |
| 1217 | { |
| 1218 | u64 sc_ctrl, reg, pio; |
| 1219 | struct hfi1_devdata *dd; |
| 1220 | unsigned long flags; |
| 1221 | int ret = 0; |
| 1222 | |
| 1223 | if (!sc) |
| 1224 | return -EINVAL; |
| 1225 | dd = sc->dd; |
| 1226 | |
| 1227 | /* |
| 1228 | * Obtain the allocator lock to guard against any allocation |
| 1229 | * attempts (which should not happen prior to context being |
| 1230 | * enabled). On the release/disable side we don't need to |
| 1231 | * worry about locking since the releaser will not do anything |
| 1232 | * if the context accounting values have not changed. |
| 1233 | */ |
| 1234 | spin_lock_irqsave(&sc->alloc_lock, flags); |
| 1235 | sc_ctrl = read_kctxt_csr(dd, sc->hw_context, SC(CTRL)); |
| 1236 | if ((sc_ctrl & SC(CTRL_CTXT_ENABLE_SMASK))) |
| 1237 | goto unlock; /* already enabled */ |
| 1238 | |
| 1239 | /* IMPORTANT: only clear free and fill if transitioning 0 -> 1 */ |
| 1240 | |
| 1241 | *sc->hw_free = 0; |
| 1242 | sc->free = 0; |
| 1243 | sc->alloc_free = 0; |
| 1244 | sc->fill = 0; |
| 1245 | sc->sr_head = 0; |
| 1246 | sc->sr_tail = 0; |
| 1247 | sc->flags = 0; |
Mike Marciniszyn | a054374 | 2015-12-07 15:39:22 -0500 | [diff] [blame] | 1248 | /* the alloc lock insures no fast path allocation */ |
| 1249 | reset_buffers_allocated(sc); |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 1250 | |
| 1251 | /* |
| 1252 | * Clear all per-context errors. Some of these will be set when |
| 1253 | * we are re-enabling after a context halt. Now that the context |
| 1254 | * is disabled, the halt will not clear until after the PIO init |
| 1255 | * engine runs below. |
| 1256 | */ |
| 1257 | reg = read_kctxt_csr(dd, sc->hw_context, SC(ERR_STATUS)); |
| 1258 | if (reg) |
Jubin John | 17fb4f2 | 2016-02-14 20:21:52 -0800 | [diff] [blame] | 1259 | write_kctxt_csr(dd, sc->hw_context, SC(ERR_CLEAR), reg); |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 1260 | |
| 1261 | /* |
| 1262 | * The HW PIO initialization engine can handle only one init |
| 1263 | * request at a time. Serialize access to each device's engine. |
| 1264 | */ |
| 1265 | spin_lock(&dd->sc_init_lock); |
| 1266 | /* |
| 1267 | * Since access to this code block is serialized and |
| 1268 | * each access waits for the initialization to complete |
| 1269 | * before releasing the lock, the PIO initialization engine |
| 1270 | * should not be in use, so we don't have to wait for the |
| 1271 | * InProgress bit to go down. |
| 1272 | */ |
| 1273 | pio = ((sc->hw_context & SEND_PIO_INIT_CTXT_PIO_CTXT_NUM_MASK) << |
| 1274 | SEND_PIO_INIT_CTXT_PIO_CTXT_NUM_SHIFT) | |
| 1275 | SEND_PIO_INIT_CTXT_PIO_SINGLE_CTXT_INIT_SMASK; |
| 1276 | write_csr(dd, SEND_PIO_INIT_CTXT, pio); |
| 1277 | /* |
| 1278 | * Wait until the engine is done. Give the chip the required time |
| 1279 | * so, hopefully, we read the register just once. |
| 1280 | */ |
| 1281 | udelay(2); |
| 1282 | ret = pio_init_wait_progress(dd); |
| 1283 | spin_unlock(&dd->sc_init_lock); |
| 1284 | if (ret) { |
| 1285 | dd_dev_err(dd, |
| 1286 | "sctxt%u(%u): Context not enabled due to init failure %d\n", |
| 1287 | sc->sw_index, sc->hw_context, ret); |
| 1288 | goto unlock; |
| 1289 | } |
| 1290 | |
| 1291 | /* |
| 1292 | * All is well. Enable the context. |
| 1293 | */ |
| 1294 | sc_ctrl |= SC(CTRL_CTXT_ENABLE_SMASK); |
| 1295 | write_kctxt_csr(dd, sc->hw_context, SC(CTRL), sc_ctrl); |
| 1296 | /* |
| 1297 | * Read SendCtxtCtrl to force the write out and prevent a timing |
| 1298 | * hazard where a PIO write may reach the context before the enable. |
| 1299 | */ |
| 1300 | read_kctxt_csr(dd, sc->hw_context, SC(CTRL)); |
| 1301 | sc->flags |= SCF_ENABLED; |
| 1302 | |
| 1303 | unlock: |
| 1304 | spin_unlock_irqrestore(&sc->alloc_lock, flags); |
| 1305 | |
| 1306 | return ret; |
| 1307 | } |
| 1308 | |
| 1309 | /* force a credit return on the context */ |
| 1310 | void sc_return_credits(struct send_context *sc) |
| 1311 | { |
| 1312 | if (!sc) |
| 1313 | return; |
| 1314 | |
| 1315 | /* a 0->1 transition schedules a credit return */ |
| 1316 | write_kctxt_csr(sc->dd, sc->hw_context, SC(CREDIT_FORCE), |
Jubin John | 17fb4f2 | 2016-02-14 20:21:52 -0800 | [diff] [blame] | 1317 | SC(CREDIT_FORCE_FORCE_RETURN_SMASK)); |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 1318 | /* |
| 1319 | * Ensure that the write is flushed and the credit return is |
| 1320 | * scheduled. We care more about the 0 -> 1 transition. |
| 1321 | */ |
| 1322 | read_kctxt_csr(sc->dd, sc->hw_context, SC(CREDIT_FORCE)); |
| 1323 | /* set back to 0 for next time */ |
| 1324 | write_kctxt_csr(sc->dd, sc->hw_context, SC(CREDIT_FORCE), 0); |
| 1325 | } |
| 1326 | |
| 1327 | /* allow all in-flight packets to drain on the context */ |
| 1328 | void sc_flush(struct send_context *sc) |
| 1329 | { |
| 1330 | if (!sc) |
| 1331 | return; |
| 1332 | |
| 1333 | sc_wait_for_packet_egress(sc, 1); |
| 1334 | } |
| 1335 | |
| 1336 | /* drop all packets on the context, no waiting until they are sent */ |
| 1337 | void sc_drop(struct send_context *sc) |
| 1338 | { |
| 1339 | if (!sc) |
| 1340 | return; |
| 1341 | |
| 1342 | dd_dev_info(sc->dd, "%s: context %u(%u) - not implemented\n", |
Jubin John | 17fb4f2 | 2016-02-14 20:21:52 -0800 | [diff] [blame] | 1343 | __func__, sc->sw_index, sc->hw_context); |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 1344 | } |
| 1345 | |
| 1346 | /* |
| 1347 | * Start the software reaction to a context halt or SPC freeze: |
| 1348 | * - mark the context as halted or frozen |
| 1349 | * - stop buffer allocations |
| 1350 | * |
| 1351 | * Called from the error interrupt. Other work is deferred until |
| 1352 | * out of the interrupt. |
| 1353 | */ |
| 1354 | void sc_stop(struct send_context *sc, int flag) |
| 1355 | { |
| 1356 | unsigned long flags; |
| 1357 | |
| 1358 | /* mark the context */ |
| 1359 | sc->flags |= flag; |
| 1360 | |
| 1361 | /* stop buffer allocations */ |
| 1362 | spin_lock_irqsave(&sc->alloc_lock, flags); |
| 1363 | sc->flags &= ~SCF_ENABLED; |
| 1364 | spin_unlock_irqrestore(&sc->alloc_lock, flags); |
| 1365 | wake_up(&sc->halt_wait); |
| 1366 | } |
| 1367 | |
Jubin John | 8638b77 | 2016-02-14 20:19:24 -0800 | [diff] [blame] | 1368 | #define BLOCK_DWORDS (PIO_BLOCK_SIZE / sizeof(u32)) |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 1369 | #define dwords_to_blocks(x) DIV_ROUND_UP(x, BLOCK_DWORDS) |
| 1370 | |
| 1371 | /* |
| 1372 | * The send context buffer "allocator". |
| 1373 | * |
| 1374 | * @sc: the PIO send context we are allocating from |
| 1375 | * @len: length of whole packet - including PBC - in dwords |
| 1376 | * @cb: optional callback to call when the buffer is finished sending |
| 1377 | * @arg: argument for cb |
| 1378 | * |
| 1379 | * Return a pointer to a PIO buffer if successful, NULL if not enough room. |
| 1380 | */ |
| 1381 | struct pio_buf *sc_buffer_alloc(struct send_context *sc, u32 dw_len, |
| 1382 | pio_release_cb cb, void *arg) |
| 1383 | { |
| 1384 | struct pio_buf *pbuf = NULL; |
| 1385 | unsigned long flags; |
| 1386 | unsigned long avail; |
| 1387 | unsigned long blocks = dwords_to_blocks(dw_len); |
| 1388 | unsigned long start_fill; |
| 1389 | int trycount = 0; |
| 1390 | u32 head, next; |
| 1391 | |
| 1392 | spin_lock_irqsave(&sc->alloc_lock, flags); |
| 1393 | if (!(sc->flags & SCF_ENABLED)) { |
| 1394 | spin_unlock_irqrestore(&sc->alloc_lock, flags); |
| 1395 | goto done; |
| 1396 | } |
| 1397 | |
| 1398 | retry: |
| 1399 | avail = (unsigned long)sc->credits - (sc->fill - sc->alloc_free); |
| 1400 | if (blocks > avail) { |
| 1401 | /* not enough room */ |
| 1402 | if (unlikely(trycount)) { /* already tried to get more room */ |
| 1403 | spin_unlock_irqrestore(&sc->alloc_lock, flags); |
| 1404 | goto done; |
| 1405 | } |
| 1406 | /* copy from receiver cache line and recalculate */ |
| 1407 | sc->alloc_free = ACCESS_ONCE(sc->free); |
| 1408 | avail = |
| 1409 | (unsigned long)sc->credits - |
| 1410 | (sc->fill - sc->alloc_free); |
| 1411 | if (blocks > avail) { |
| 1412 | /* still no room, actively update */ |
| 1413 | spin_unlock_irqrestore(&sc->alloc_lock, flags); |
| 1414 | sc_release_update(sc); |
| 1415 | spin_lock_irqsave(&sc->alloc_lock, flags); |
| 1416 | sc->alloc_free = ACCESS_ONCE(sc->free); |
| 1417 | trycount++; |
| 1418 | goto retry; |
| 1419 | } |
| 1420 | } |
| 1421 | |
| 1422 | /* there is enough room */ |
| 1423 | |
Mike Marciniszyn | a054374 | 2015-12-07 15:39:22 -0500 | [diff] [blame] | 1424 | preempt_disable(); |
| 1425 | this_cpu_inc(*sc->buffers_allocated); |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 1426 | |
| 1427 | /* read this once */ |
| 1428 | head = sc->sr_head; |
| 1429 | |
| 1430 | /* "allocate" the buffer */ |
| 1431 | start_fill = sc->fill; |
| 1432 | sc->fill += blocks; |
| 1433 | |
| 1434 | /* |
| 1435 | * Fill the parts that the releaser looks at before moving the head. |
| 1436 | * The only necessary piece is the sent_at field. The credits |
| 1437 | * we have just allocated cannot have been returned yet, so the |
| 1438 | * cb and arg will not be looked at for a "while". Put them |
| 1439 | * on this side of the memory barrier anyway. |
| 1440 | */ |
| 1441 | pbuf = &sc->sr[head].pbuf; |
| 1442 | pbuf->sent_at = sc->fill; |
| 1443 | pbuf->cb = cb; |
| 1444 | pbuf->arg = arg; |
| 1445 | pbuf->sc = sc; /* could be filled in at sc->sr init time */ |
| 1446 | /* make sure this is in memory before updating the head */ |
| 1447 | |
| 1448 | /* calculate next head index, do not store */ |
| 1449 | next = head + 1; |
| 1450 | if (next >= sc->sr_size) |
| 1451 | next = 0; |
Jubin John | 4d114fd | 2016-02-14 20:21:43 -0800 | [diff] [blame] | 1452 | /* |
| 1453 | * update the head - must be last! - the releaser can look at fields |
| 1454 | * in pbuf once we move the head |
| 1455 | */ |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 1456 | smp_wmb(); |
| 1457 | sc->sr_head = next; |
| 1458 | spin_unlock_irqrestore(&sc->alloc_lock, flags); |
| 1459 | |
| 1460 | /* finish filling in the buffer outside the lock */ |
| 1461 | pbuf->start = sc->base_addr + ((start_fill % sc->credits) |
| 1462 | * PIO_BLOCK_SIZE); |
| 1463 | pbuf->size = sc->credits * PIO_BLOCK_SIZE; |
| 1464 | pbuf->end = sc->base_addr + pbuf->size; |
| 1465 | pbuf->block_count = blocks; |
| 1466 | pbuf->qw_written = 0; |
| 1467 | pbuf->carry_bytes = 0; |
| 1468 | pbuf->carry.val64 = 0; |
| 1469 | done: |
| 1470 | return pbuf; |
| 1471 | } |
| 1472 | |
| 1473 | /* |
| 1474 | * There are at least two entities that can turn on credit return |
| 1475 | * interrupts and they can overlap. Avoid problems by implementing |
| 1476 | * a count scheme that is enforced by a lock. The lock is needed because |
| 1477 | * the count and CSR write must be paired. |
| 1478 | */ |
| 1479 | |
| 1480 | /* |
| 1481 | * Start credit return interrupts. This is managed by a count. If already |
| 1482 | * on, just increment the count. |
| 1483 | */ |
| 1484 | void sc_add_credit_return_intr(struct send_context *sc) |
| 1485 | { |
| 1486 | unsigned long flags; |
| 1487 | |
| 1488 | /* lock must surround both the count change and the CSR update */ |
| 1489 | spin_lock_irqsave(&sc->credit_ctrl_lock, flags); |
| 1490 | if (sc->credit_intr_count == 0) { |
| 1491 | sc->credit_ctrl |= SC(CREDIT_CTRL_CREDIT_INTR_SMASK); |
| 1492 | write_kctxt_csr(sc->dd, sc->hw_context, |
Jubin John | 17fb4f2 | 2016-02-14 20:21:52 -0800 | [diff] [blame] | 1493 | SC(CREDIT_CTRL), sc->credit_ctrl); |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 1494 | } |
| 1495 | sc->credit_intr_count++; |
| 1496 | spin_unlock_irqrestore(&sc->credit_ctrl_lock, flags); |
| 1497 | } |
| 1498 | |
| 1499 | /* |
| 1500 | * Stop credit return interrupts. This is managed by a count. Decrement the |
| 1501 | * count, if the last user, then turn the credit interrupts off. |
| 1502 | */ |
| 1503 | void sc_del_credit_return_intr(struct send_context *sc) |
| 1504 | { |
| 1505 | unsigned long flags; |
| 1506 | |
| 1507 | WARN_ON(sc->credit_intr_count == 0); |
| 1508 | |
| 1509 | /* lock must surround both the count change and the CSR update */ |
| 1510 | spin_lock_irqsave(&sc->credit_ctrl_lock, flags); |
| 1511 | sc->credit_intr_count--; |
| 1512 | if (sc->credit_intr_count == 0) { |
| 1513 | sc->credit_ctrl &= ~SC(CREDIT_CTRL_CREDIT_INTR_SMASK); |
| 1514 | write_kctxt_csr(sc->dd, sc->hw_context, |
Jubin John | 17fb4f2 | 2016-02-14 20:21:52 -0800 | [diff] [blame] | 1515 | SC(CREDIT_CTRL), sc->credit_ctrl); |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 1516 | } |
| 1517 | spin_unlock_irqrestore(&sc->credit_ctrl_lock, flags); |
| 1518 | } |
| 1519 | |
| 1520 | /* |
| 1521 | * The caller must be careful when calling this. All needint calls |
| 1522 | * must be paired with !needint. |
| 1523 | */ |
| 1524 | void hfi1_sc_wantpiobuf_intr(struct send_context *sc, u32 needint) |
| 1525 | { |
| 1526 | if (needint) |
| 1527 | sc_add_credit_return_intr(sc); |
| 1528 | else |
| 1529 | sc_del_credit_return_intr(sc); |
| 1530 | trace_hfi1_wantpiointr(sc, needint, sc->credit_ctrl); |
| 1531 | if (needint) { |
| 1532 | mmiowb(); |
| 1533 | sc_return_credits(sc); |
| 1534 | } |
| 1535 | } |
| 1536 | |
| 1537 | /** |
| 1538 | * sc_piobufavail - callback when a PIO buffer is available |
| 1539 | * @sc: the send context |
| 1540 | * |
| 1541 | * This is called from the interrupt handler when a PIO buffer is |
| 1542 | * available after hfi1_verbs_send() returned an error that no buffers were |
| 1543 | * available. Disable the interrupt if there are no more QPs waiting. |
| 1544 | */ |
| 1545 | static void sc_piobufavail(struct send_context *sc) |
| 1546 | { |
| 1547 | struct hfi1_devdata *dd = sc->dd; |
| 1548 | struct hfi1_ibdev *dev = &dd->verbs_dev; |
| 1549 | struct list_head *list; |
Dennis Dalessandro | 895420d | 2016-01-19 14:42:28 -0800 | [diff] [blame] | 1550 | struct rvt_qp *qps[PIO_WAIT_BATCH_SIZE]; |
| 1551 | struct rvt_qp *qp; |
Dennis Dalessandro | 4c6829c | 2016-01-19 14:42:00 -0800 | [diff] [blame] | 1552 | struct hfi1_qp_priv *priv; |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 1553 | unsigned long flags; |
| 1554 | unsigned i, n = 0; |
| 1555 | |
Jianxin Xiong | 44306f1 | 2016-04-12 11:30:28 -0700 | [diff] [blame] | 1556 | if (dd->send_contexts[sc->sw_index].type != SC_KERNEL && |
| 1557 | dd->send_contexts[sc->sw_index].type != SC_VL15) |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 1558 | return; |
| 1559 | list = &sc->piowait; |
| 1560 | /* |
| 1561 | * Note: checking that the piowait list is empty and clearing |
| 1562 | * the buffer available interrupt needs to be atomic or we |
| 1563 | * could end up with QPs on the wait list with the interrupt |
| 1564 | * disabled. |
| 1565 | */ |
| 1566 | write_seqlock_irqsave(&dev->iowait_lock, flags); |
| 1567 | while (!list_empty(list)) { |
| 1568 | struct iowait *wait; |
| 1569 | |
| 1570 | if (n == ARRAY_SIZE(qps)) |
Mike Marciniszyn | 60df295 | 2016-03-07 11:35:30 -0800 | [diff] [blame] | 1571 | break; |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 1572 | wait = list_first_entry(list, struct iowait, list); |
Dennis Dalessandro | 4c6829c | 2016-01-19 14:42:00 -0800 | [diff] [blame] | 1573 | qp = iowait_to_qp(wait); |
| 1574 | priv = qp->priv; |
| 1575 | list_del_init(&priv->s_iowait.list); |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 1576 | /* refcount held until actual wake up */ |
| 1577 | qps[n++] = qp; |
| 1578 | } |
| 1579 | /* |
Mike Marciniszyn | 60df295 | 2016-03-07 11:35:30 -0800 | [diff] [blame] | 1580 | * If there had been waiters and there are more |
| 1581 | * insure that we redo the force to avoid a potential hang. |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 1582 | */ |
Mike Marciniszyn | 60df295 | 2016-03-07 11:35:30 -0800 | [diff] [blame] | 1583 | if (n) { |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 1584 | hfi1_sc_wantpiobuf_intr(sc, 0); |
Mike Marciniszyn | 60df295 | 2016-03-07 11:35:30 -0800 | [diff] [blame] | 1585 | if (!list_empty(list)) |
| 1586 | hfi1_sc_wantpiobuf_intr(sc, 1); |
| 1587 | } |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 1588 | write_sequnlock_irqrestore(&dev->iowait_lock, flags); |
| 1589 | |
| 1590 | for (i = 0; i < n; i++) |
Mike Marciniszyn | 14553ca | 2016-02-14 12:45:36 -0800 | [diff] [blame] | 1591 | hfi1_qp_wakeup(qps[i], |
| 1592 | RVT_S_WAIT_PIO | RVT_S_WAIT_PIO_DRAIN); |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 1593 | } |
| 1594 | |
| 1595 | /* translate a send credit update to a bit code of reasons */ |
| 1596 | static inline int fill_code(u64 hw_free) |
| 1597 | { |
| 1598 | int code = 0; |
| 1599 | |
| 1600 | if (hw_free & CR_STATUS_SMASK) |
| 1601 | code |= PRC_STATUS_ERR; |
| 1602 | if (hw_free & CR_CREDIT_RETURN_DUE_TO_PBC_SMASK) |
| 1603 | code |= PRC_PBC; |
| 1604 | if (hw_free & CR_CREDIT_RETURN_DUE_TO_THRESHOLD_SMASK) |
| 1605 | code |= PRC_THRESHOLD; |
| 1606 | if (hw_free & CR_CREDIT_RETURN_DUE_TO_ERR_SMASK) |
| 1607 | code |= PRC_FILL_ERR; |
| 1608 | if (hw_free & CR_CREDIT_RETURN_DUE_TO_FORCE_SMASK) |
| 1609 | code |= PRC_SC_DISABLE; |
| 1610 | return code; |
| 1611 | } |
| 1612 | |
| 1613 | /* use the jiffies compare to get the wrap right */ |
| 1614 | #define sent_before(a, b) time_before(a, b) /* a < b */ |
| 1615 | |
| 1616 | /* |
| 1617 | * The send context buffer "releaser". |
| 1618 | */ |
| 1619 | void sc_release_update(struct send_context *sc) |
| 1620 | { |
| 1621 | struct pio_buf *pbuf; |
| 1622 | u64 hw_free; |
| 1623 | u32 head, tail; |
| 1624 | unsigned long old_free; |
Mike Marciniszyn | e607a22 | 2015-12-03 14:34:18 -0500 | [diff] [blame] | 1625 | unsigned long free; |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 1626 | unsigned long extra; |
| 1627 | unsigned long flags; |
| 1628 | int code; |
| 1629 | |
| 1630 | if (!sc) |
| 1631 | return; |
| 1632 | |
| 1633 | spin_lock_irqsave(&sc->release_lock, flags); |
| 1634 | /* update free */ |
| 1635 | hw_free = le64_to_cpu(*sc->hw_free); /* volatile read */ |
| 1636 | old_free = sc->free; |
| 1637 | extra = (((hw_free & CR_COUNTER_SMASK) >> CR_COUNTER_SHIFT) |
| 1638 | - (old_free & CR_COUNTER_MASK)) |
| 1639 | & CR_COUNTER_MASK; |
Mike Marciniszyn | e607a22 | 2015-12-03 14:34:18 -0500 | [diff] [blame] | 1640 | free = old_free + extra; |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 1641 | trace_hfi1_piofree(sc, extra); |
| 1642 | |
| 1643 | /* call sent buffer callbacks */ |
| 1644 | code = -1; /* code not yet set */ |
| 1645 | head = ACCESS_ONCE(sc->sr_head); /* snapshot the head */ |
| 1646 | tail = sc->sr_tail; |
| 1647 | while (head != tail) { |
| 1648 | pbuf = &sc->sr[tail].pbuf; |
| 1649 | |
Mike Marciniszyn | e607a22 | 2015-12-03 14:34:18 -0500 | [diff] [blame] | 1650 | if (sent_before(free, pbuf->sent_at)) { |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 1651 | /* not sent yet */ |
| 1652 | break; |
| 1653 | } |
| 1654 | if (pbuf->cb) { |
| 1655 | if (code < 0) /* fill in code on first user */ |
| 1656 | code = fill_code(hw_free); |
| 1657 | (*pbuf->cb)(pbuf->arg, code); |
| 1658 | } |
| 1659 | |
| 1660 | tail++; |
| 1661 | if (tail >= sc->sr_size) |
| 1662 | tail = 0; |
| 1663 | } |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 1664 | sc->sr_tail = tail; |
Mike Marciniszyn | e607a22 | 2015-12-03 14:34:18 -0500 | [diff] [blame] | 1665 | /* make sure tail is updated before free */ |
| 1666 | smp_wmb(); |
| 1667 | sc->free = free; |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 1668 | spin_unlock_irqrestore(&sc->release_lock, flags); |
| 1669 | sc_piobufavail(sc); |
| 1670 | } |
| 1671 | |
| 1672 | /* |
| 1673 | * Send context group releaser. Argument is the send context that caused |
| 1674 | * the interrupt. Called from the send context interrupt handler. |
| 1675 | * |
| 1676 | * Call release on all contexts in the group. |
| 1677 | * |
| 1678 | * This routine takes the sc_lock without an irqsave because it is only |
| 1679 | * called from an interrupt handler. Adjust if that changes. |
| 1680 | */ |
| 1681 | void sc_group_release_update(struct hfi1_devdata *dd, u32 hw_context) |
| 1682 | { |
| 1683 | struct send_context *sc; |
| 1684 | u32 sw_index; |
| 1685 | u32 gc, gc_end; |
| 1686 | |
| 1687 | spin_lock(&dd->sc_lock); |
| 1688 | sw_index = dd->hw_to_sw[hw_context]; |
| 1689 | if (unlikely(sw_index >= dd->num_send_contexts)) { |
| 1690 | dd_dev_err(dd, "%s: invalid hw (%u) to sw (%u) mapping\n", |
Jubin John | 17fb4f2 | 2016-02-14 20:21:52 -0800 | [diff] [blame] | 1691 | __func__, hw_context, sw_index); |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 1692 | goto done; |
| 1693 | } |
| 1694 | sc = dd->send_contexts[sw_index].sc; |
| 1695 | if (unlikely(!sc)) |
| 1696 | goto done; |
| 1697 | |
| 1698 | gc = group_context(hw_context, sc->group); |
| 1699 | gc_end = gc + group_size(sc->group); |
| 1700 | for (; gc < gc_end; gc++) { |
| 1701 | sw_index = dd->hw_to_sw[gc]; |
| 1702 | if (unlikely(sw_index >= dd->num_send_contexts)) { |
| 1703 | dd_dev_err(dd, |
Jubin John | 17fb4f2 | 2016-02-14 20:21:52 -0800 | [diff] [blame] | 1704 | "%s: invalid hw (%u) to sw (%u) mapping\n", |
| 1705 | __func__, hw_context, sw_index); |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 1706 | continue; |
| 1707 | } |
| 1708 | sc_release_update(dd->send_contexts[sw_index].sc); |
| 1709 | } |
| 1710 | done: |
| 1711 | spin_unlock(&dd->sc_lock); |
| 1712 | } |
| 1713 | |
Jubin John | 35f6bef | 2016-02-14 12:46:10 -0800 | [diff] [blame] | 1714 | /* |
| 1715 | * pio_select_send_context_vl() - select send context |
| 1716 | * @dd: devdata |
| 1717 | * @selector: a spreading factor |
| 1718 | * @vl: this vl |
| 1719 | * |
| 1720 | * This function returns a send context based on the selector and a vl. |
| 1721 | * The mapping fields are protected by RCU |
| 1722 | */ |
| 1723 | struct send_context *pio_select_send_context_vl(struct hfi1_devdata *dd, |
| 1724 | u32 selector, u8 vl) |
| 1725 | { |
| 1726 | struct pio_vl_map *m; |
| 1727 | struct pio_map_elem *e; |
| 1728 | struct send_context *rval; |
| 1729 | |
| 1730 | /* |
| 1731 | * NOTE This should only happen if SC->VL changed after the initial |
| 1732 | * checks on the QP/AH |
| 1733 | * Default will return VL0's send context below |
| 1734 | */ |
| 1735 | if (unlikely(vl >= num_vls)) { |
| 1736 | rval = NULL; |
| 1737 | goto done; |
| 1738 | } |
| 1739 | |
| 1740 | rcu_read_lock(); |
| 1741 | m = rcu_dereference(dd->pio_map); |
| 1742 | if (unlikely(!m)) { |
| 1743 | rcu_read_unlock(); |
| 1744 | return dd->vld[0].sc; |
| 1745 | } |
| 1746 | e = m->map[vl & m->mask]; |
| 1747 | rval = e->ksc[selector & e->mask]; |
| 1748 | rcu_read_unlock(); |
| 1749 | |
| 1750 | done: |
| 1751 | rval = !rval ? dd->vld[0].sc : rval; |
| 1752 | return rval; |
| 1753 | } |
| 1754 | |
| 1755 | /* |
| 1756 | * pio_select_send_context_sc() - select send context |
| 1757 | * @dd: devdata |
| 1758 | * @selector: a spreading factor |
| 1759 | * @sc5: the 5 bit sc |
| 1760 | * |
| 1761 | * This function returns an send context based on the selector and an sc |
| 1762 | */ |
| 1763 | struct send_context *pio_select_send_context_sc(struct hfi1_devdata *dd, |
| 1764 | u32 selector, u8 sc5) |
| 1765 | { |
| 1766 | u8 vl = sc_to_vlt(dd, sc5); |
| 1767 | |
| 1768 | return pio_select_send_context_vl(dd, selector, vl); |
| 1769 | } |
| 1770 | |
| 1771 | /* |
| 1772 | * Free the indicated map struct |
| 1773 | */ |
| 1774 | static void pio_map_free(struct pio_vl_map *m) |
| 1775 | { |
| 1776 | int i; |
| 1777 | |
| 1778 | for (i = 0; m && i < m->actual_vls; i++) |
| 1779 | kfree(m->map[i]); |
| 1780 | kfree(m); |
| 1781 | } |
| 1782 | |
| 1783 | /* |
| 1784 | * Handle RCU callback |
| 1785 | */ |
| 1786 | static void pio_map_rcu_callback(struct rcu_head *list) |
| 1787 | { |
| 1788 | struct pio_vl_map *m = container_of(list, struct pio_vl_map, list); |
| 1789 | |
| 1790 | pio_map_free(m); |
| 1791 | } |
| 1792 | |
| 1793 | /* |
Jubin John | b4ba663 | 2016-06-09 07:51:08 -0700 | [diff] [blame] | 1794 | * Set credit return threshold for the kernel send context |
| 1795 | */ |
| 1796 | static void set_threshold(struct hfi1_devdata *dd, int scontext, int i) |
| 1797 | { |
| 1798 | u32 thres; |
| 1799 | |
| 1800 | thres = min(sc_percent_to_threshold(dd->kernel_send_context[scontext], |
| 1801 | 50), |
| 1802 | sc_mtu_to_threshold(dd->kernel_send_context[scontext], |
| 1803 | dd->vld[i].mtu, |
| 1804 | dd->rcd[0]->rcvhdrqentsize)); |
| 1805 | sc_set_cr_threshold(dd->kernel_send_context[scontext], thres); |
| 1806 | } |
| 1807 | |
| 1808 | /* |
Jubin John | 35f6bef | 2016-02-14 12:46:10 -0800 | [diff] [blame] | 1809 | * pio_map_init - called when #vls change |
| 1810 | * @dd: hfi1_devdata |
| 1811 | * @port: port number |
| 1812 | * @num_vls: number of vls |
| 1813 | * @vl_scontexts: per vl send context mapping (optional) |
| 1814 | * |
| 1815 | * This routine changes the mapping based on the number of vls. |
| 1816 | * |
| 1817 | * vl_scontexts is used to specify a non-uniform vl/send context |
| 1818 | * loading. NULL implies auto computing the loading and giving each |
| 1819 | * VL an uniform distribution of send contexts per VL. |
| 1820 | * |
| 1821 | * The auto algorithm computers the sc_per_vl and the number of extra |
| 1822 | * send contexts. Any extra send contexts are added from the last VL |
| 1823 | * on down |
| 1824 | * |
| 1825 | * rcu locking is used here to control access to the mapping fields. |
| 1826 | * |
| 1827 | * If either the num_vls or num_send_contexts are non-power of 2, the |
| 1828 | * array sizes in the struct pio_vl_map and the struct pio_map_elem are |
| 1829 | * rounded up to the next highest power of 2 and the first entry is |
| 1830 | * reused in a round robin fashion. |
| 1831 | * |
| 1832 | * If an error occurs the map change is not done and the mapping is not |
| 1833 | * chaged. |
| 1834 | * |
| 1835 | */ |
| 1836 | int pio_map_init(struct hfi1_devdata *dd, u8 port, u8 num_vls, u8 *vl_scontexts) |
| 1837 | { |
| 1838 | int i, j; |
| 1839 | int extra, sc_per_vl; |
| 1840 | int scontext = 1; |
| 1841 | int num_kernel_send_contexts = 0; |
| 1842 | u8 lvl_scontexts[OPA_MAX_VLS]; |
| 1843 | struct pio_vl_map *oldmap, *newmap; |
| 1844 | |
| 1845 | if (!vl_scontexts) { |
Jubin John | f158486 | 2016-05-24 12:51:06 -0700 | [diff] [blame] | 1846 | for (i = 0; i < dd->num_send_contexts; i++) |
Jubin John | 35f6bef | 2016-02-14 12:46:10 -0800 | [diff] [blame] | 1847 | if (dd->send_contexts[i].type == SC_KERNEL) |
| 1848 | num_kernel_send_contexts++; |
| 1849 | /* truncate divide */ |
| 1850 | sc_per_vl = num_kernel_send_contexts / num_vls; |
| 1851 | /* extras */ |
| 1852 | extra = num_kernel_send_contexts % num_vls; |
| 1853 | vl_scontexts = lvl_scontexts; |
| 1854 | /* add extras from last vl down */ |
| 1855 | for (i = num_vls - 1; i >= 0; i--, extra--) |
| 1856 | vl_scontexts[i] = sc_per_vl + (extra > 0 ? 1 : 0); |
| 1857 | } |
| 1858 | /* build new map */ |
| 1859 | newmap = kzalloc(sizeof(*newmap) + |
| 1860 | roundup_pow_of_two(num_vls) * |
| 1861 | sizeof(struct pio_map_elem *), |
| 1862 | GFP_KERNEL); |
| 1863 | if (!newmap) |
| 1864 | goto bail; |
| 1865 | newmap->actual_vls = num_vls; |
| 1866 | newmap->vls = roundup_pow_of_two(num_vls); |
| 1867 | newmap->mask = (1 << ilog2(newmap->vls)) - 1; |
| 1868 | for (i = 0; i < newmap->vls; i++) { |
| 1869 | /* save for wrap around */ |
| 1870 | int first_scontext = scontext; |
| 1871 | |
| 1872 | if (i < newmap->actual_vls) { |
| 1873 | int sz = roundup_pow_of_two(vl_scontexts[i]); |
| 1874 | |
| 1875 | /* only allocate once */ |
| 1876 | newmap->map[i] = kzalloc(sizeof(*newmap->map[i]) + |
| 1877 | sz * sizeof(struct |
| 1878 | send_context *), |
| 1879 | GFP_KERNEL); |
| 1880 | if (!newmap->map[i]) |
| 1881 | goto bail; |
| 1882 | newmap->map[i]->mask = (1 << ilog2(sz)) - 1; |
Jubin John | b4ba663 | 2016-06-09 07:51:08 -0700 | [diff] [blame] | 1883 | /* |
| 1884 | * assign send contexts and |
| 1885 | * adjust credit return threshold |
| 1886 | */ |
Jubin John | 35f6bef | 2016-02-14 12:46:10 -0800 | [diff] [blame] | 1887 | for (j = 0; j < sz; j++) { |
Jubin John | b4ba663 | 2016-06-09 07:51:08 -0700 | [diff] [blame] | 1888 | if (dd->kernel_send_context[scontext]) { |
Jubin John | 35f6bef | 2016-02-14 12:46:10 -0800 | [diff] [blame] | 1889 | newmap->map[i]->ksc[j] = |
| 1890 | dd->kernel_send_context[scontext]; |
Jubin John | b4ba663 | 2016-06-09 07:51:08 -0700 | [diff] [blame] | 1891 | set_threshold(dd, scontext, i); |
| 1892 | } |
Jubin John | 35f6bef | 2016-02-14 12:46:10 -0800 | [diff] [blame] | 1893 | if (++scontext >= first_scontext + |
| 1894 | vl_scontexts[i]) |
| 1895 | /* wrap back to first send context */ |
| 1896 | scontext = first_scontext; |
| 1897 | } |
| 1898 | } else { |
| 1899 | /* just re-use entry without allocating */ |
| 1900 | newmap->map[i] = newmap->map[i % num_vls]; |
| 1901 | } |
| 1902 | scontext = first_scontext + vl_scontexts[i]; |
| 1903 | } |
| 1904 | /* newmap in hand, save old map */ |
| 1905 | spin_lock_irq(&dd->pio_map_lock); |
| 1906 | oldmap = rcu_dereference_protected(dd->pio_map, |
| 1907 | lockdep_is_held(&dd->pio_map_lock)); |
| 1908 | |
| 1909 | /* publish newmap */ |
| 1910 | rcu_assign_pointer(dd->pio_map, newmap); |
| 1911 | |
| 1912 | spin_unlock_irq(&dd->pio_map_lock); |
| 1913 | /* success, free any old map after grace period */ |
| 1914 | if (oldmap) |
| 1915 | call_rcu(&oldmap->list, pio_map_rcu_callback); |
| 1916 | return 0; |
| 1917 | bail: |
| 1918 | /* free any partial allocation */ |
| 1919 | pio_map_free(newmap); |
| 1920 | return -ENOMEM; |
| 1921 | } |
| 1922 | |
| 1923 | void free_pio_map(struct hfi1_devdata *dd) |
| 1924 | { |
| 1925 | /* Free PIO map if allocated */ |
| 1926 | if (rcu_access_pointer(dd->pio_map)) { |
| 1927 | spin_lock_irq(&dd->pio_map_lock); |
Jubin John | 79d0c08 | 2016-02-26 13:33:33 -0800 | [diff] [blame] | 1928 | pio_map_free(rcu_access_pointer(dd->pio_map)); |
Jubin John | 35f6bef | 2016-02-14 12:46:10 -0800 | [diff] [blame] | 1929 | RCU_INIT_POINTER(dd->pio_map, NULL); |
| 1930 | spin_unlock_irq(&dd->pio_map_lock); |
| 1931 | synchronize_rcu(); |
| 1932 | } |
| 1933 | kfree(dd->kernel_send_context); |
| 1934 | dd->kernel_send_context = NULL; |
| 1935 | } |
| 1936 | |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 1937 | int init_pervl_scs(struct hfi1_devdata *dd) |
| 1938 | { |
| 1939 | int i; |
Jubin John | 35f6bef | 2016-02-14 12:46:10 -0800 | [diff] [blame] | 1940 | u64 mask, all_vl_mask = (u64)0x80ff; /* VLs 0-7, 15 */ |
| 1941 | u64 data_vls_mask = (u64)0x00ff; /* VLs 0-7 */ |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 1942 | u32 ctxt; |
Jubin John | 35f6bef | 2016-02-14 12:46:10 -0800 | [diff] [blame] | 1943 | struct hfi1_pportdata *ppd = dd->pport; |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 1944 | |
Jianxin Xiong | 44306f1 | 2016-04-12 11:30:28 -0700 | [diff] [blame] | 1945 | dd->vld[15].sc = sc_alloc(dd, SC_VL15, |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 1946 | dd->rcd[0]->rcvhdrqentsize, dd->node); |
| 1947 | if (!dd->vld[15].sc) |
Ira Weiny | 042b015 | 2016-07-27 21:06:15 -0400 | [diff] [blame] | 1948 | return -ENOMEM; |
| 1949 | |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 1950 | hfi1_init_ctxt(dd->vld[15].sc); |
| 1951 | dd->vld[15].mtu = enum_to_mtu(OPA_MTU_2048); |
Jubin John | 35f6bef | 2016-02-14 12:46:10 -0800 | [diff] [blame] | 1952 | |
Tymoteusz Kielan | 1b23f02 | 2016-07-25 13:38:01 -0700 | [diff] [blame] | 1953 | dd->kernel_send_context = kzalloc_node(dd->num_send_contexts * |
Jubin John | 35f6bef | 2016-02-14 12:46:10 -0800 | [diff] [blame] | 1954 | sizeof(struct send_context *), |
| 1955 | GFP_KERNEL, dd->node); |
Ira Weiny | 042b015 | 2016-07-27 21:06:15 -0400 | [diff] [blame] | 1956 | if (!dd->kernel_send_context) |
| 1957 | goto freesc15; |
| 1958 | |
Jubin John | 35f6bef | 2016-02-14 12:46:10 -0800 | [diff] [blame] | 1959 | dd->kernel_send_context[0] = dd->vld[15].sc; |
| 1960 | |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 1961 | for (i = 0; i < num_vls; i++) { |
| 1962 | /* |
| 1963 | * Since this function does not deal with a specific |
| 1964 | * receive context but we need the RcvHdrQ entry size, |
| 1965 | * use the size from rcd[0]. It is guaranteed to be |
| 1966 | * valid at this point and will remain the same for all |
| 1967 | * receive contexts. |
| 1968 | */ |
| 1969 | dd->vld[i].sc = sc_alloc(dd, SC_KERNEL, |
| 1970 | dd->rcd[0]->rcvhdrqentsize, dd->node); |
| 1971 | if (!dd->vld[i].sc) |
| 1972 | goto nomem; |
Jubin John | 35f6bef | 2016-02-14 12:46:10 -0800 | [diff] [blame] | 1973 | dd->kernel_send_context[i + 1] = dd->vld[i].sc; |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 1974 | hfi1_init_ctxt(dd->vld[i].sc); |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 1975 | /* non VL15 start with the max MTU */ |
| 1976 | dd->vld[i].mtu = hfi1_max_mtu; |
| 1977 | } |
Jubin John | 35f6bef | 2016-02-14 12:46:10 -0800 | [diff] [blame] | 1978 | for (i = num_vls; i < INIT_SC_PER_VL * num_vls; i++) { |
| 1979 | dd->kernel_send_context[i + 1] = |
| 1980 | sc_alloc(dd, SC_KERNEL, dd->rcd[0]->rcvhdrqentsize, dd->node); |
| 1981 | if (!dd->kernel_send_context[i + 1]) |
| 1982 | goto nomem; |
| 1983 | hfi1_init_ctxt(dd->kernel_send_context[i + 1]); |
| 1984 | } |
| 1985 | |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 1986 | sc_enable(dd->vld[15].sc); |
| 1987 | ctxt = dd->vld[15].sc->hw_context; |
| 1988 | mask = all_vl_mask & ~(1LL << 15); |
| 1989 | write_kctxt_csr(dd, ctxt, SC(CHECK_VL), mask); |
| 1990 | dd_dev_info(dd, |
| 1991 | "Using send context %u(%u) for VL15\n", |
| 1992 | dd->vld[15].sc->sw_index, ctxt); |
Jubin John | 35f6bef | 2016-02-14 12:46:10 -0800 | [diff] [blame] | 1993 | |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 1994 | for (i = 0; i < num_vls; i++) { |
| 1995 | sc_enable(dd->vld[i].sc); |
| 1996 | ctxt = dd->vld[i].sc->hw_context; |
Jubin John | 35f6bef | 2016-02-14 12:46:10 -0800 | [diff] [blame] | 1997 | mask = all_vl_mask & ~(data_vls_mask); |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 1998 | write_kctxt_csr(dd, ctxt, SC(CHECK_VL), mask); |
| 1999 | } |
Jubin John | 35f6bef | 2016-02-14 12:46:10 -0800 | [diff] [blame] | 2000 | for (i = num_vls; i < INIT_SC_PER_VL * num_vls; i++) { |
| 2001 | sc_enable(dd->kernel_send_context[i + 1]); |
| 2002 | ctxt = dd->kernel_send_context[i + 1]->hw_context; |
| 2003 | mask = all_vl_mask & ~(data_vls_mask); |
| 2004 | write_kctxt_csr(dd, ctxt, SC(CHECK_VL), mask); |
| 2005 | } |
| 2006 | |
| 2007 | if (pio_map_init(dd, ppd->port - 1, num_vls, NULL)) |
| 2008 | goto nomem; |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 2009 | return 0; |
Ira Weiny | 042b015 | 2016-07-27 21:06:15 -0400 | [diff] [blame] | 2010 | |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 2011 | nomem: |
Ira Weiny | 042b015 | 2016-07-27 21:06:15 -0400 | [diff] [blame] | 2012 | for (i = 0; i < num_vls; i++) { |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 2013 | sc_free(dd->vld[i].sc); |
Ira Weiny | 042b015 | 2016-07-27 21:06:15 -0400 | [diff] [blame] | 2014 | dd->vld[i].sc = NULL; |
| 2015 | } |
| 2016 | |
Jubin John | 35f6bef | 2016-02-14 12:46:10 -0800 | [diff] [blame] | 2017 | for (i = num_vls; i < INIT_SC_PER_VL * num_vls; i++) |
| 2018 | sc_free(dd->kernel_send_context[i + 1]); |
Ira Weiny | 042b015 | 2016-07-27 21:06:15 -0400 | [diff] [blame] | 2019 | |
| 2020 | kfree(dd->kernel_send_context); |
| 2021 | dd->kernel_send_context = NULL; |
| 2022 | |
| 2023 | freesc15: |
| 2024 | sc_free(dd->vld[15].sc); |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 2025 | return -ENOMEM; |
| 2026 | } |
| 2027 | |
| 2028 | int init_credit_return(struct hfi1_devdata *dd) |
| 2029 | { |
| 2030 | int ret; |
| 2031 | int num_numa; |
| 2032 | int i; |
| 2033 | |
| 2034 | num_numa = num_online_nodes(); |
| 2035 | /* enforce the expectation that the numas are compact */ |
| 2036 | for (i = 0; i < num_numa; i++) { |
| 2037 | if (!node_online(i)) { |
| 2038 | dd_dev_err(dd, "NUMA nodes are not compact\n"); |
| 2039 | ret = -EINVAL; |
| 2040 | goto done; |
| 2041 | } |
| 2042 | } |
| 2043 | |
| 2044 | dd->cr_base = kcalloc( |
| 2045 | num_numa, |
| 2046 | sizeof(struct credit_return_base), |
| 2047 | GFP_KERNEL); |
| 2048 | if (!dd->cr_base) { |
| 2049 | dd_dev_err(dd, "Unable to allocate credit return base\n"); |
| 2050 | ret = -ENOMEM; |
| 2051 | goto done; |
| 2052 | } |
| 2053 | for (i = 0; i < num_numa; i++) { |
| 2054 | int bytes = TXE_NUM_CONTEXTS * sizeof(struct credit_return); |
| 2055 | |
| 2056 | set_dev_node(&dd->pcidev->dev, i); |
| 2057 | dd->cr_base[i].va = dma_zalloc_coherent( |
| 2058 | &dd->pcidev->dev, |
| 2059 | bytes, |
Tymoteusz Kielan | 6036818 | 2016-09-06 04:35:54 -0700 | [diff] [blame] | 2060 | &dd->cr_base[i].dma, |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 2061 | GFP_KERNEL); |
Jubin John | d125a6c | 2016-02-14 20:19:49 -0800 | [diff] [blame] | 2062 | if (!dd->cr_base[i].va) { |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 2063 | set_dev_node(&dd->pcidev->dev, dd->node); |
| 2064 | dd_dev_err(dd, |
Jubin John | 17fb4f2 | 2016-02-14 20:21:52 -0800 | [diff] [blame] | 2065 | "Unable to allocate credit return DMA range for NUMA %d\n", |
| 2066 | i); |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 2067 | ret = -ENOMEM; |
| 2068 | goto done; |
| 2069 | } |
| 2070 | } |
| 2071 | set_dev_node(&dd->pcidev->dev, dd->node); |
| 2072 | |
| 2073 | ret = 0; |
| 2074 | done: |
| 2075 | return ret; |
| 2076 | } |
| 2077 | |
| 2078 | void free_credit_return(struct hfi1_devdata *dd) |
| 2079 | { |
| 2080 | int num_numa; |
| 2081 | int i; |
| 2082 | |
| 2083 | if (!dd->cr_base) |
| 2084 | return; |
| 2085 | |
| 2086 | num_numa = num_online_nodes(); |
| 2087 | for (i = 0; i < num_numa; i++) { |
| 2088 | if (dd->cr_base[i].va) { |
| 2089 | dma_free_coherent(&dd->pcidev->dev, |
Jubin John | 17fb4f2 | 2016-02-14 20:21:52 -0800 | [diff] [blame] | 2090 | TXE_NUM_CONTEXTS * |
| 2091 | sizeof(struct credit_return), |
| 2092 | dd->cr_base[i].va, |
Tymoteusz Kielan | 6036818 | 2016-09-06 04:35:54 -0700 | [diff] [blame] | 2093 | dd->cr_base[i].dma); |
Mike Marciniszyn | 7724105 | 2015-07-30 15:17:43 -0400 | [diff] [blame] | 2094 | } |
| 2095 | } |
| 2096 | kfree(dd->cr_base); |
| 2097 | dd->cr_base = NULL; |
| 2098 | } |