Ralph Campbell | f931551 | 2010-05-23 21:44:54 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved. |
| 3 | * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. |
| 4 | * |
| 5 | * This software is available to you under a choice of one of two |
| 6 | * licenses. You may choose to be licensed under the terms of the GNU |
| 7 | * General Public License (GPL) Version 2, available from the file |
| 8 | * COPYING in the main directory of this source tree, or the |
| 9 | * OpenIB.org BSD license below: |
| 10 | * |
| 11 | * Redistribution and use in source and binary forms, with or |
| 12 | * without modification, are permitted provided that the following |
| 13 | * conditions are met: |
| 14 | * |
| 15 | * - Redistributions of source code must retain the above |
| 16 | * copyright notice, this list of conditions and the following |
| 17 | * disclaimer. |
| 18 | * |
| 19 | * - Redistributions in binary form must reproduce the above |
| 20 | * copyright notice, this list of conditions and the following |
| 21 | * disclaimer in the documentation and/or other materials |
| 22 | * provided with the distribution. |
| 23 | * |
| 24 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
| 25 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
| 26 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND |
| 27 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS |
| 28 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN |
| 29 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN |
| 30 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
| 31 | * SOFTWARE. |
| 32 | */ |
| 33 | |
| 34 | #include <linux/spinlock.h> |
| 35 | |
| 36 | #include "qib.h" |
| 37 | #include "qib_mad.h" |
| 38 | |
| 39 | /* |
| 40 | * Convert the AETH RNR timeout code into the number of microseconds. |
| 41 | */ |
| 42 | const u32 ib_qib_rnr_table[32] = { |
| 43 | 655360, /* 00: 655.36 */ |
| 44 | 10, /* 01: .01 */ |
| 45 | 20, /* 02 .02 */ |
| 46 | 30, /* 03: .03 */ |
| 47 | 40, /* 04: .04 */ |
| 48 | 60, /* 05: .06 */ |
| 49 | 80, /* 06: .08 */ |
| 50 | 120, /* 07: .12 */ |
| 51 | 160, /* 08: .16 */ |
| 52 | 240, /* 09: .24 */ |
| 53 | 320, /* 0A: .32 */ |
| 54 | 480, /* 0B: .48 */ |
| 55 | 640, /* 0C: .64 */ |
| 56 | 960, /* 0D: .96 */ |
| 57 | 1280, /* 0E: 1.28 */ |
| 58 | 1920, /* 0F: 1.92 */ |
| 59 | 2560, /* 10: 2.56 */ |
| 60 | 3840, /* 11: 3.84 */ |
| 61 | 5120, /* 12: 5.12 */ |
| 62 | 7680, /* 13: 7.68 */ |
| 63 | 10240, /* 14: 10.24 */ |
| 64 | 15360, /* 15: 15.36 */ |
| 65 | 20480, /* 16: 20.48 */ |
| 66 | 30720, /* 17: 30.72 */ |
| 67 | 40960, /* 18: 40.96 */ |
| 68 | 61440, /* 19: 61.44 */ |
| 69 | 81920, /* 1A: 81.92 */ |
| 70 | 122880, /* 1B: 122.88 */ |
| 71 | 163840, /* 1C: 163.84 */ |
| 72 | 245760, /* 1D: 245.76 */ |
| 73 | 327680, /* 1E: 327.68 */ |
| 74 | 491520 /* 1F: 491.52 */ |
| 75 | }; |
| 76 | |
| 77 | /* |
| 78 | * Validate a RWQE and fill in the SGE state. |
| 79 | * Return 1 if OK. |
| 80 | */ |
| 81 | static int qib_init_sge(struct qib_qp *qp, struct qib_rwqe *wqe) |
| 82 | { |
| 83 | int i, j, ret; |
| 84 | struct ib_wc wc; |
| 85 | struct qib_lkey_table *rkt; |
| 86 | struct qib_pd *pd; |
| 87 | struct qib_sge_state *ss; |
| 88 | |
| 89 | rkt = &to_idev(qp->ibqp.device)->lk_table; |
| 90 | pd = to_ipd(qp->ibqp.srq ? qp->ibqp.srq->pd : qp->ibqp.pd); |
| 91 | ss = &qp->r_sge; |
| 92 | ss->sg_list = qp->r_sg_list; |
| 93 | qp->r_len = 0; |
| 94 | for (i = j = 0; i < wqe->num_sge; i++) { |
| 95 | if (wqe->sg_list[i].length == 0) |
| 96 | continue; |
| 97 | /* Check LKEY */ |
| 98 | if (!qib_lkey_ok(rkt, pd, j ? &ss->sg_list[j - 1] : &ss->sge, |
| 99 | &wqe->sg_list[i], IB_ACCESS_LOCAL_WRITE)) |
| 100 | goto bad_lkey; |
| 101 | qp->r_len += wqe->sg_list[i].length; |
| 102 | j++; |
| 103 | } |
| 104 | ss->num_sge = j; |
| 105 | ss->total_len = qp->r_len; |
| 106 | ret = 1; |
| 107 | goto bail; |
| 108 | |
| 109 | bad_lkey: |
| 110 | while (j) { |
| 111 | struct qib_sge *sge = --j ? &ss->sg_list[j - 1] : &ss->sge; |
| 112 | |
Mike Marciniszyn | 6a82649 | 2012-06-27 18:33:12 -0400 | [diff] [blame] | 113 | qib_put_mr(sge->mr); |
Ralph Campbell | f931551 | 2010-05-23 21:44:54 -0700 | [diff] [blame] | 114 | } |
| 115 | ss->num_sge = 0; |
| 116 | memset(&wc, 0, sizeof(wc)); |
| 117 | wc.wr_id = wqe->wr_id; |
| 118 | wc.status = IB_WC_LOC_PROT_ERR; |
| 119 | wc.opcode = IB_WC_RECV; |
| 120 | wc.qp = &qp->ibqp; |
| 121 | /* Signal solicited completion event. */ |
| 122 | qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1); |
| 123 | ret = 0; |
| 124 | bail: |
| 125 | return ret; |
| 126 | } |
| 127 | |
| 128 | /** |
| 129 | * qib_get_rwqe - copy the next RWQE into the QP's RWQE |
| 130 | * @qp: the QP |
| 131 | * @wr_id_only: update qp->r_wr_id only, not qp->r_sge |
| 132 | * |
| 133 | * Return -1 if there is a local error, 0 if no RWQE is available, |
| 134 | * otherwise return 1. |
| 135 | * |
| 136 | * Can be called from interrupt level. |
| 137 | */ |
| 138 | int qib_get_rwqe(struct qib_qp *qp, int wr_id_only) |
| 139 | { |
| 140 | unsigned long flags; |
| 141 | struct qib_rq *rq; |
| 142 | struct qib_rwq *wq; |
| 143 | struct qib_srq *srq; |
| 144 | struct qib_rwqe *wqe; |
| 145 | void (*handler)(struct ib_event *, void *); |
| 146 | u32 tail; |
| 147 | int ret; |
| 148 | |
| 149 | if (qp->ibqp.srq) { |
| 150 | srq = to_isrq(qp->ibqp.srq); |
| 151 | handler = srq->ibsrq.event_handler; |
| 152 | rq = &srq->rq; |
| 153 | } else { |
| 154 | srq = NULL; |
| 155 | handler = NULL; |
| 156 | rq = &qp->r_rq; |
| 157 | } |
| 158 | |
| 159 | spin_lock_irqsave(&rq->lock, flags); |
| 160 | if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) { |
| 161 | ret = 0; |
| 162 | goto unlock; |
| 163 | } |
| 164 | |
| 165 | wq = rq->wq; |
| 166 | tail = wq->tail; |
| 167 | /* Validate tail before using it since it is user writable. */ |
| 168 | if (tail >= rq->size) |
| 169 | tail = 0; |
| 170 | if (unlikely(tail == wq->head)) { |
| 171 | ret = 0; |
| 172 | goto unlock; |
| 173 | } |
| 174 | /* Make sure entry is read after head index is read. */ |
| 175 | smp_rmb(); |
| 176 | wqe = get_rwqe_ptr(rq, tail); |
| 177 | /* |
| 178 | * Even though we update the tail index in memory, the verbs |
| 179 | * consumer is not supposed to post more entries until a |
| 180 | * completion is generated. |
| 181 | */ |
| 182 | if (++tail >= rq->size) |
| 183 | tail = 0; |
| 184 | wq->tail = tail; |
| 185 | if (!wr_id_only && !qib_init_sge(qp, wqe)) { |
| 186 | ret = -1; |
| 187 | goto unlock; |
| 188 | } |
| 189 | qp->r_wr_id = wqe->wr_id; |
| 190 | |
| 191 | ret = 1; |
| 192 | set_bit(QIB_R_WRID_VALID, &qp->r_aflags); |
| 193 | if (handler) { |
| 194 | u32 n; |
| 195 | |
| 196 | /* |
| 197 | * Validate head pointer value and compute |
| 198 | * the number of remaining WQEs. |
| 199 | */ |
| 200 | n = wq->head; |
| 201 | if (n >= rq->size) |
| 202 | n = 0; |
| 203 | if (n < tail) |
| 204 | n += rq->size - tail; |
| 205 | else |
| 206 | n -= tail; |
| 207 | if (n < srq->limit) { |
| 208 | struct ib_event ev; |
| 209 | |
| 210 | srq->limit = 0; |
| 211 | spin_unlock_irqrestore(&rq->lock, flags); |
| 212 | ev.device = qp->ibqp.device; |
| 213 | ev.element.srq = qp->ibqp.srq; |
| 214 | ev.event = IB_EVENT_SRQ_LIMIT_REACHED; |
| 215 | handler(&ev, srq->ibsrq.srq_context); |
| 216 | goto bail; |
| 217 | } |
| 218 | } |
| 219 | unlock: |
| 220 | spin_unlock_irqrestore(&rq->lock, flags); |
| 221 | bail: |
| 222 | return ret; |
| 223 | } |
| 224 | |
| 225 | /* |
| 226 | * Switch to alternate path. |
| 227 | * The QP s_lock should be held and interrupts disabled. |
| 228 | */ |
| 229 | void qib_migrate_qp(struct qib_qp *qp) |
| 230 | { |
| 231 | struct ib_event ev; |
| 232 | |
| 233 | qp->s_mig_state = IB_MIG_MIGRATED; |
| 234 | qp->remote_ah_attr = qp->alt_ah_attr; |
| 235 | qp->port_num = qp->alt_ah_attr.port_num; |
| 236 | qp->s_pkey_index = qp->s_alt_pkey_index; |
| 237 | |
| 238 | ev.device = qp->ibqp.device; |
| 239 | ev.element.qp = &qp->ibqp; |
| 240 | ev.event = IB_EVENT_PATH_MIG; |
| 241 | qp->ibqp.event_handler(&ev, qp->ibqp.qp_context); |
| 242 | } |
| 243 | |
| 244 | static __be64 get_sguid(struct qib_ibport *ibp, unsigned index) |
| 245 | { |
| 246 | if (!index) { |
| 247 | struct qib_pportdata *ppd = ppd_from_ibp(ibp); |
| 248 | |
| 249 | return ppd->guid; |
| 250 | } else |
| 251 | return ibp->guids[index - 1]; |
| 252 | } |
| 253 | |
| 254 | static int gid_ok(union ib_gid *gid, __be64 gid_prefix, __be64 id) |
| 255 | { |
| 256 | return (gid->global.interface_id == id && |
| 257 | (gid->global.subnet_prefix == gid_prefix || |
| 258 | gid->global.subnet_prefix == IB_DEFAULT_GID_PREFIX)); |
| 259 | } |
| 260 | |
| 261 | /* |
| 262 | * |
Mike Marciniszyn | 9fd5473 | 2011-09-23 13:17:00 -0400 | [diff] [blame] | 263 | * This should be called with the QP r_lock held. |
| 264 | * |
| 265 | * The s_lock will be acquired around the qib_migrate_qp() call. |
Ralph Campbell | f931551 | 2010-05-23 21:44:54 -0700 | [diff] [blame] | 266 | */ |
| 267 | int qib_ruc_check_hdr(struct qib_ibport *ibp, struct qib_ib_header *hdr, |
| 268 | int has_grh, struct qib_qp *qp, u32 bth0) |
| 269 | { |
| 270 | __be64 guid; |
Mike Marciniszyn | 9fd5473 | 2011-09-23 13:17:00 -0400 | [diff] [blame] | 271 | unsigned long flags; |
Ralph Campbell | f931551 | 2010-05-23 21:44:54 -0700 | [diff] [blame] | 272 | |
| 273 | if (qp->s_mig_state == IB_MIG_ARMED && (bth0 & IB_BTH_MIG_REQ)) { |
| 274 | if (!has_grh) { |
| 275 | if (qp->alt_ah_attr.ah_flags & IB_AH_GRH) |
| 276 | goto err; |
| 277 | } else { |
| 278 | if (!(qp->alt_ah_attr.ah_flags & IB_AH_GRH)) |
| 279 | goto err; |
| 280 | guid = get_sguid(ibp, qp->alt_ah_attr.grh.sgid_index); |
| 281 | if (!gid_ok(&hdr->u.l.grh.dgid, ibp->gid_prefix, guid)) |
| 282 | goto err; |
| 283 | if (!gid_ok(&hdr->u.l.grh.sgid, |
| 284 | qp->alt_ah_attr.grh.dgid.global.subnet_prefix, |
| 285 | qp->alt_ah_attr.grh.dgid.global.interface_id)) |
| 286 | goto err; |
| 287 | } |
| 288 | if (!qib_pkey_ok((u16)bth0, |
| 289 | qib_get_pkey(ibp, qp->s_alt_pkey_index))) { |
| 290 | qib_bad_pqkey(ibp, IB_NOTICE_TRAP_BAD_PKEY, |
| 291 | (u16)bth0, |
| 292 | (be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF, |
| 293 | 0, qp->ibqp.qp_num, |
| 294 | hdr->lrh[3], hdr->lrh[1]); |
| 295 | goto err; |
| 296 | } |
| 297 | /* Validate the SLID. See Ch. 9.6.1.5 and 17.2.8 */ |
| 298 | if (be16_to_cpu(hdr->lrh[3]) != qp->alt_ah_attr.dlid || |
| 299 | ppd_from_ibp(ibp)->port != qp->alt_ah_attr.port_num) |
| 300 | goto err; |
Mike Marciniszyn | 9fd5473 | 2011-09-23 13:17:00 -0400 | [diff] [blame] | 301 | spin_lock_irqsave(&qp->s_lock, flags); |
Ralph Campbell | f931551 | 2010-05-23 21:44:54 -0700 | [diff] [blame] | 302 | qib_migrate_qp(qp); |
Mike Marciniszyn | 9fd5473 | 2011-09-23 13:17:00 -0400 | [diff] [blame] | 303 | spin_unlock_irqrestore(&qp->s_lock, flags); |
Ralph Campbell | f931551 | 2010-05-23 21:44:54 -0700 | [diff] [blame] | 304 | } else { |
| 305 | if (!has_grh) { |
| 306 | if (qp->remote_ah_attr.ah_flags & IB_AH_GRH) |
| 307 | goto err; |
| 308 | } else { |
| 309 | if (!(qp->remote_ah_attr.ah_flags & IB_AH_GRH)) |
| 310 | goto err; |
| 311 | guid = get_sguid(ibp, |
| 312 | qp->remote_ah_attr.grh.sgid_index); |
| 313 | if (!gid_ok(&hdr->u.l.grh.dgid, ibp->gid_prefix, guid)) |
| 314 | goto err; |
| 315 | if (!gid_ok(&hdr->u.l.grh.sgid, |
| 316 | qp->remote_ah_attr.grh.dgid.global.subnet_prefix, |
| 317 | qp->remote_ah_attr.grh.dgid.global.interface_id)) |
| 318 | goto err; |
| 319 | } |
| 320 | if (!qib_pkey_ok((u16)bth0, |
| 321 | qib_get_pkey(ibp, qp->s_pkey_index))) { |
| 322 | qib_bad_pqkey(ibp, IB_NOTICE_TRAP_BAD_PKEY, |
| 323 | (u16)bth0, |
| 324 | (be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF, |
| 325 | 0, qp->ibqp.qp_num, |
| 326 | hdr->lrh[3], hdr->lrh[1]); |
| 327 | goto err; |
| 328 | } |
| 329 | /* Validate the SLID. See Ch. 9.6.1.5 */ |
| 330 | if (be16_to_cpu(hdr->lrh[3]) != qp->remote_ah_attr.dlid || |
| 331 | ppd_from_ibp(ibp)->port != qp->port_num) |
| 332 | goto err; |
| 333 | if (qp->s_mig_state == IB_MIG_REARM && |
| 334 | !(bth0 & IB_BTH_MIG_REQ)) |
| 335 | qp->s_mig_state = IB_MIG_ARMED; |
| 336 | } |
| 337 | |
| 338 | return 0; |
| 339 | |
| 340 | err: |
| 341 | return 1; |
| 342 | } |
| 343 | |
| 344 | /** |
| 345 | * qib_ruc_loopback - handle UC and RC lookback requests |
| 346 | * @sqp: the sending QP |
| 347 | * |
| 348 | * This is called from qib_do_send() to |
| 349 | * forward a WQE addressed to the same HCA. |
| 350 | * Note that although we are single threaded due to the tasklet, we still |
| 351 | * have to protect against post_send(). We don't have to worry about |
| 352 | * receive interrupts since this is a connected protocol and all packets |
| 353 | * will pass through here. |
| 354 | */ |
| 355 | static void qib_ruc_loopback(struct qib_qp *sqp) |
| 356 | { |
| 357 | struct qib_ibport *ibp = to_iport(sqp->ibqp.device, sqp->port_num); |
| 358 | struct qib_qp *qp; |
| 359 | struct qib_swqe *wqe; |
| 360 | struct qib_sge *sge; |
| 361 | unsigned long flags; |
| 362 | struct ib_wc wc; |
| 363 | u64 sdata; |
| 364 | atomic64_t *maddr; |
| 365 | enum ib_wc_status send_status; |
| 366 | int release; |
| 367 | int ret; |
| 368 | |
| 369 | /* |
| 370 | * Note that we check the responder QP state after |
| 371 | * checking the requester's state. |
| 372 | */ |
| 373 | qp = qib_lookup_qpn(ibp, sqp->remote_qpn); |
| 374 | |
| 375 | spin_lock_irqsave(&sqp->s_lock, flags); |
| 376 | |
| 377 | /* Return if we are already busy processing a work request. */ |
| 378 | if ((sqp->s_flags & (QIB_S_BUSY | QIB_S_ANY_WAIT)) || |
| 379 | !(ib_qib_state_ops[sqp->state] & QIB_PROCESS_OR_FLUSH_SEND)) |
| 380 | goto unlock; |
| 381 | |
| 382 | sqp->s_flags |= QIB_S_BUSY; |
| 383 | |
| 384 | again: |
| 385 | if (sqp->s_last == sqp->s_head) |
| 386 | goto clr_busy; |
| 387 | wqe = get_swqe_ptr(sqp, sqp->s_last); |
| 388 | |
| 389 | /* Return if it is not OK to start a new work reqeust. */ |
| 390 | if (!(ib_qib_state_ops[sqp->state] & QIB_PROCESS_NEXT_SEND_OK)) { |
| 391 | if (!(ib_qib_state_ops[sqp->state] & QIB_FLUSH_SEND)) |
| 392 | goto clr_busy; |
| 393 | /* We are in the error state, flush the work request. */ |
| 394 | send_status = IB_WC_WR_FLUSH_ERR; |
| 395 | goto flush_send; |
| 396 | } |
| 397 | |
| 398 | /* |
| 399 | * We can rely on the entry not changing without the s_lock |
| 400 | * being held until we update s_last. |
| 401 | * We increment s_cur to indicate s_last is in progress. |
| 402 | */ |
| 403 | if (sqp->s_last == sqp->s_cur) { |
| 404 | if (++sqp->s_cur >= sqp->s_size) |
| 405 | sqp->s_cur = 0; |
| 406 | } |
| 407 | spin_unlock_irqrestore(&sqp->s_lock, flags); |
| 408 | |
| 409 | if (!qp || !(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) || |
| 410 | qp->ibqp.qp_type != sqp->ibqp.qp_type) { |
| 411 | ibp->n_pkt_drops++; |
| 412 | /* |
| 413 | * For RC, the requester would timeout and retry so |
| 414 | * shortcut the timeouts and just signal too many retries. |
| 415 | */ |
| 416 | if (sqp->ibqp.qp_type == IB_QPT_RC) |
| 417 | send_status = IB_WC_RETRY_EXC_ERR; |
| 418 | else |
| 419 | send_status = IB_WC_SUCCESS; |
| 420 | goto serr; |
| 421 | } |
| 422 | |
| 423 | memset(&wc, 0, sizeof wc); |
| 424 | send_status = IB_WC_SUCCESS; |
| 425 | |
| 426 | release = 1; |
| 427 | sqp->s_sge.sge = wqe->sg_list[0]; |
| 428 | sqp->s_sge.sg_list = wqe->sg_list + 1; |
| 429 | sqp->s_sge.num_sge = wqe->wr.num_sge; |
| 430 | sqp->s_len = wqe->length; |
| 431 | switch (wqe->wr.opcode) { |
| 432 | case IB_WR_SEND_WITH_IMM: |
| 433 | wc.wc_flags = IB_WC_WITH_IMM; |
| 434 | wc.ex.imm_data = wqe->wr.ex.imm_data; |
| 435 | /* FALLTHROUGH */ |
| 436 | case IB_WR_SEND: |
| 437 | ret = qib_get_rwqe(qp, 0); |
| 438 | if (ret < 0) |
| 439 | goto op_err; |
| 440 | if (!ret) |
| 441 | goto rnr_nak; |
| 442 | break; |
| 443 | |
| 444 | case IB_WR_RDMA_WRITE_WITH_IMM: |
| 445 | if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE))) |
| 446 | goto inv_err; |
| 447 | wc.wc_flags = IB_WC_WITH_IMM; |
| 448 | wc.ex.imm_data = wqe->wr.ex.imm_data; |
| 449 | ret = qib_get_rwqe(qp, 1); |
| 450 | if (ret < 0) |
| 451 | goto op_err; |
| 452 | if (!ret) |
| 453 | goto rnr_nak; |
| 454 | /* FALLTHROUGH */ |
| 455 | case IB_WR_RDMA_WRITE: |
| 456 | if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE))) |
| 457 | goto inv_err; |
| 458 | if (wqe->length == 0) |
| 459 | break; |
| 460 | if (unlikely(!qib_rkey_ok(qp, &qp->r_sge.sge, wqe->length, |
| 461 | wqe->wr.wr.rdma.remote_addr, |
| 462 | wqe->wr.wr.rdma.rkey, |
| 463 | IB_ACCESS_REMOTE_WRITE))) |
| 464 | goto acc_err; |
| 465 | qp->r_sge.sg_list = NULL; |
| 466 | qp->r_sge.num_sge = 1; |
| 467 | qp->r_sge.total_len = wqe->length; |
| 468 | break; |
| 469 | |
| 470 | case IB_WR_RDMA_READ: |
| 471 | if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ))) |
| 472 | goto inv_err; |
| 473 | if (unlikely(!qib_rkey_ok(qp, &sqp->s_sge.sge, wqe->length, |
| 474 | wqe->wr.wr.rdma.remote_addr, |
| 475 | wqe->wr.wr.rdma.rkey, |
| 476 | IB_ACCESS_REMOTE_READ))) |
| 477 | goto acc_err; |
| 478 | release = 0; |
| 479 | sqp->s_sge.sg_list = NULL; |
| 480 | sqp->s_sge.num_sge = 1; |
| 481 | qp->r_sge.sge = wqe->sg_list[0]; |
| 482 | qp->r_sge.sg_list = wqe->sg_list + 1; |
| 483 | qp->r_sge.num_sge = wqe->wr.num_sge; |
| 484 | qp->r_sge.total_len = wqe->length; |
| 485 | break; |
| 486 | |
| 487 | case IB_WR_ATOMIC_CMP_AND_SWP: |
| 488 | case IB_WR_ATOMIC_FETCH_AND_ADD: |
| 489 | if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC))) |
| 490 | goto inv_err; |
| 491 | if (unlikely(!qib_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64), |
| 492 | wqe->wr.wr.atomic.remote_addr, |
| 493 | wqe->wr.wr.atomic.rkey, |
| 494 | IB_ACCESS_REMOTE_ATOMIC))) |
| 495 | goto acc_err; |
| 496 | /* Perform atomic OP and save result. */ |
| 497 | maddr = (atomic64_t *) qp->r_sge.sge.vaddr; |
| 498 | sdata = wqe->wr.wr.atomic.compare_add; |
| 499 | *(u64 *) sqp->s_sge.sge.vaddr = |
| 500 | (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ? |
| 501 | (u64) atomic64_add_return(sdata, maddr) - sdata : |
| 502 | (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr, |
| 503 | sdata, wqe->wr.wr.atomic.swap); |
Mike Marciniszyn | 6a82649 | 2012-06-27 18:33:12 -0400 | [diff] [blame] | 504 | qib_put_mr(qp->r_sge.sge.mr); |
Ralph Campbell | f931551 | 2010-05-23 21:44:54 -0700 | [diff] [blame] | 505 | qp->r_sge.num_sge = 0; |
| 506 | goto send_comp; |
| 507 | |
| 508 | default: |
| 509 | send_status = IB_WC_LOC_QP_OP_ERR; |
| 510 | goto serr; |
| 511 | } |
| 512 | |
| 513 | sge = &sqp->s_sge.sge; |
| 514 | while (sqp->s_len) { |
| 515 | u32 len = sqp->s_len; |
| 516 | |
| 517 | if (len > sge->length) |
| 518 | len = sge->length; |
| 519 | if (len > sge->sge_length) |
| 520 | len = sge->sge_length; |
| 521 | BUG_ON(len == 0); |
| 522 | qib_copy_sge(&qp->r_sge, sge->vaddr, len, release); |
| 523 | sge->vaddr += len; |
| 524 | sge->length -= len; |
| 525 | sge->sge_length -= len; |
| 526 | if (sge->sge_length == 0) { |
| 527 | if (!release) |
Mike Marciniszyn | 6a82649 | 2012-06-27 18:33:12 -0400 | [diff] [blame] | 528 | qib_put_mr(sge->mr); |
Ralph Campbell | f931551 | 2010-05-23 21:44:54 -0700 | [diff] [blame] | 529 | if (--sqp->s_sge.num_sge) |
| 530 | *sge = *sqp->s_sge.sg_list++; |
| 531 | } else if (sge->length == 0 && sge->mr->lkey) { |
| 532 | if (++sge->n >= QIB_SEGSZ) { |
| 533 | if (++sge->m >= sge->mr->mapsz) |
| 534 | break; |
| 535 | sge->n = 0; |
| 536 | } |
| 537 | sge->vaddr = |
| 538 | sge->mr->map[sge->m]->segs[sge->n].vaddr; |
| 539 | sge->length = |
| 540 | sge->mr->map[sge->m]->segs[sge->n].length; |
| 541 | } |
| 542 | sqp->s_len -= len; |
| 543 | } |
| 544 | if (release) |
Mike Marciniszyn | 6a82649 | 2012-06-27 18:33:12 -0400 | [diff] [blame] | 545 | qib_put_ss(&qp->r_sge); |
Ralph Campbell | f931551 | 2010-05-23 21:44:54 -0700 | [diff] [blame] | 546 | |
| 547 | if (!test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags)) |
| 548 | goto send_comp; |
| 549 | |
| 550 | if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM) |
| 551 | wc.opcode = IB_WC_RECV_RDMA_WITH_IMM; |
| 552 | else |
| 553 | wc.opcode = IB_WC_RECV; |
| 554 | wc.wr_id = qp->r_wr_id; |
| 555 | wc.status = IB_WC_SUCCESS; |
| 556 | wc.byte_len = wqe->length; |
| 557 | wc.qp = &qp->ibqp; |
| 558 | wc.src_qp = qp->remote_qpn; |
| 559 | wc.slid = qp->remote_ah_attr.dlid; |
| 560 | wc.sl = qp->remote_ah_attr.sl; |
| 561 | wc.port_num = 1; |
| 562 | /* Signal completion event if the solicited bit is set. */ |
| 563 | qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, |
| 564 | wqe->wr.send_flags & IB_SEND_SOLICITED); |
| 565 | |
| 566 | send_comp: |
| 567 | spin_lock_irqsave(&sqp->s_lock, flags); |
| 568 | ibp->n_loop_pkts++; |
| 569 | flush_send: |
| 570 | sqp->s_rnr_retry = sqp->s_rnr_retry_cnt; |
| 571 | qib_send_complete(sqp, wqe, send_status); |
| 572 | goto again; |
| 573 | |
| 574 | rnr_nak: |
| 575 | /* Handle RNR NAK */ |
| 576 | if (qp->ibqp.qp_type == IB_QPT_UC) |
| 577 | goto send_comp; |
| 578 | ibp->n_rnr_naks++; |
| 579 | /* |
| 580 | * Note: we don't need the s_lock held since the BUSY flag |
| 581 | * makes this single threaded. |
| 582 | */ |
| 583 | if (sqp->s_rnr_retry == 0) { |
| 584 | send_status = IB_WC_RNR_RETRY_EXC_ERR; |
| 585 | goto serr; |
| 586 | } |
| 587 | if (sqp->s_rnr_retry_cnt < 7) |
| 588 | sqp->s_rnr_retry--; |
| 589 | spin_lock_irqsave(&sqp->s_lock, flags); |
| 590 | if (!(ib_qib_state_ops[sqp->state] & QIB_PROCESS_RECV_OK)) |
| 591 | goto clr_busy; |
| 592 | sqp->s_flags |= QIB_S_WAIT_RNR; |
| 593 | sqp->s_timer.function = qib_rc_rnr_retry; |
| 594 | sqp->s_timer.expires = jiffies + |
| 595 | usecs_to_jiffies(ib_qib_rnr_table[qp->r_min_rnr_timer]); |
| 596 | add_timer(&sqp->s_timer); |
| 597 | goto clr_busy; |
| 598 | |
| 599 | op_err: |
| 600 | send_status = IB_WC_REM_OP_ERR; |
| 601 | wc.status = IB_WC_LOC_QP_OP_ERR; |
| 602 | goto err; |
| 603 | |
| 604 | inv_err: |
| 605 | send_status = IB_WC_REM_INV_REQ_ERR; |
| 606 | wc.status = IB_WC_LOC_QP_OP_ERR; |
| 607 | goto err; |
| 608 | |
| 609 | acc_err: |
| 610 | send_status = IB_WC_REM_ACCESS_ERR; |
| 611 | wc.status = IB_WC_LOC_PROT_ERR; |
| 612 | err: |
| 613 | /* responder goes to error state */ |
| 614 | qib_rc_error(qp, wc.status); |
| 615 | |
| 616 | serr: |
| 617 | spin_lock_irqsave(&sqp->s_lock, flags); |
| 618 | qib_send_complete(sqp, wqe, send_status); |
| 619 | if (sqp->ibqp.qp_type == IB_QPT_RC) { |
| 620 | int lastwqe = qib_error_qp(sqp, IB_WC_WR_FLUSH_ERR); |
| 621 | |
| 622 | sqp->s_flags &= ~QIB_S_BUSY; |
| 623 | spin_unlock_irqrestore(&sqp->s_lock, flags); |
| 624 | if (lastwqe) { |
| 625 | struct ib_event ev; |
| 626 | |
| 627 | ev.device = sqp->ibqp.device; |
| 628 | ev.element.qp = &sqp->ibqp; |
| 629 | ev.event = IB_EVENT_QP_LAST_WQE_REACHED; |
| 630 | sqp->ibqp.event_handler(&ev, sqp->ibqp.qp_context); |
| 631 | } |
| 632 | goto done; |
| 633 | } |
| 634 | clr_busy: |
| 635 | sqp->s_flags &= ~QIB_S_BUSY; |
| 636 | unlock: |
| 637 | spin_unlock_irqrestore(&sqp->s_lock, flags); |
| 638 | done: |
| 639 | if (qp && atomic_dec_and_test(&qp->refcount)) |
| 640 | wake_up(&qp->wait); |
| 641 | } |
| 642 | |
| 643 | /** |
| 644 | * qib_make_grh - construct a GRH header |
| 645 | * @ibp: a pointer to the IB port |
| 646 | * @hdr: a pointer to the GRH header being constructed |
| 647 | * @grh: the global route address to send to |
| 648 | * @hwords: the number of 32 bit words of header being sent |
| 649 | * @nwords: the number of 32 bit words of data being sent |
| 650 | * |
| 651 | * Return the size of the header in 32 bit words. |
| 652 | */ |
| 653 | u32 qib_make_grh(struct qib_ibport *ibp, struct ib_grh *hdr, |
| 654 | struct ib_global_route *grh, u32 hwords, u32 nwords) |
| 655 | { |
| 656 | hdr->version_tclass_flow = |
| 657 | cpu_to_be32((IB_GRH_VERSION << IB_GRH_VERSION_SHIFT) | |
| 658 | (grh->traffic_class << IB_GRH_TCLASS_SHIFT) | |
| 659 | (grh->flow_label << IB_GRH_FLOW_SHIFT)); |
| 660 | hdr->paylen = cpu_to_be16((hwords - 2 + nwords + SIZE_OF_CRC) << 2); |
| 661 | /* next_hdr is defined by C8-7 in ch. 8.4.1 */ |
| 662 | hdr->next_hdr = IB_GRH_NEXT_HDR; |
| 663 | hdr->hop_limit = grh->hop_limit; |
| 664 | /* The SGID is 32-bit aligned. */ |
| 665 | hdr->sgid.global.subnet_prefix = ibp->gid_prefix; |
| 666 | hdr->sgid.global.interface_id = grh->sgid_index ? |
| 667 | ibp->guids[grh->sgid_index - 1] : ppd_from_ibp(ibp)->guid; |
| 668 | hdr->dgid = grh->dgid; |
| 669 | |
| 670 | /* GRH header size in 32-bit words. */ |
| 671 | return sizeof(struct ib_grh) / sizeof(u32); |
| 672 | } |
| 673 | |
| 674 | void qib_make_ruc_header(struct qib_qp *qp, struct qib_other_headers *ohdr, |
| 675 | u32 bth0, u32 bth2) |
| 676 | { |
| 677 | struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); |
| 678 | u16 lrh0; |
| 679 | u32 nwords; |
| 680 | u32 extra_bytes; |
| 681 | |
| 682 | /* Construct the header. */ |
| 683 | extra_bytes = -qp->s_cur_size & 3; |
| 684 | nwords = (qp->s_cur_size + extra_bytes) >> 2; |
| 685 | lrh0 = QIB_LRH_BTH; |
| 686 | if (unlikely(qp->remote_ah_attr.ah_flags & IB_AH_GRH)) { |
Mike Marciniszyn | 1c94283 | 2012-05-07 14:02:31 -0400 | [diff] [blame] | 687 | qp->s_hdrwords += qib_make_grh(ibp, &qp->s_hdr->u.l.grh, |
Ralph Campbell | f931551 | 2010-05-23 21:44:54 -0700 | [diff] [blame] | 688 | &qp->remote_ah_attr.grh, |
| 689 | qp->s_hdrwords, nwords); |
| 690 | lrh0 = QIB_LRH_GRH; |
| 691 | } |
| 692 | lrh0 |= ibp->sl_to_vl[qp->remote_ah_attr.sl] << 12 | |
| 693 | qp->remote_ah_attr.sl << 4; |
Mike Marciniszyn | 1c94283 | 2012-05-07 14:02:31 -0400 | [diff] [blame] | 694 | qp->s_hdr->lrh[0] = cpu_to_be16(lrh0); |
| 695 | qp->s_hdr->lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid); |
| 696 | qp->s_hdr->lrh[2] = cpu_to_be16(qp->s_hdrwords + nwords + SIZE_OF_CRC); |
| 697 | qp->s_hdr->lrh[3] = cpu_to_be16(ppd_from_ibp(ibp)->lid | |
Ralph Campbell | f931551 | 2010-05-23 21:44:54 -0700 | [diff] [blame] | 698 | qp->remote_ah_attr.src_path_bits); |
| 699 | bth0 |= qib_get_pkey(ibp, qp->s_pkey_index); |
| 700 | bth0 |= extra_bytes << 20; |
| 701 | if (qp->s_mig_state == IB_MIG_MIGRATED) |
| 702 | bth0 |= IB_BTH_MIG_REQ; |
| 703 | ohdr->bth[0] = cpu_to_be32(bth0); |
| 704 | ohdr->bth[1] = cpu_to_be32(qp->remote_qpn); |
| 705 | ohdr->bth[2] = cpu_to_be32(bth2); |
| 706 | } |
| 707 | |
| 708 | /** |
| 709 | * qib_do_send - perform a send on a QP |
| 710 | * @work: contains a pointer to the QP |
| 711 | * |
| 712 | * Process entries in the send work queue until credit or queue is |
| 713 | * exhausted. Only allow one CPU to send a packet per QP (tasklet). |
| 714 | * Otherwise, two threads could send packets out of order. |
| 715 | */ |
| 716 | void qib_do_send(struct work_struct *work) |
| 717 | { |
| 718 | struct qib_qp *qp = container_of(work, struct qib_qp, s_work); |
| 719 | struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); |
| 720 | struct qib_pportdata *ppd = ppd_from_ibp(ibp); |
| 721 | int (*make_req)(struct qib_qp *qp); |
| 722 | unsigned long flags; |
| 723 | |
| 724 | if ((qp->ibqp.qp_type == IB_QPT_RC || |
| 725 | qp->ibqp.qp_type == IB_QPT_UC) && |
| 726 | (qp->remote_ah_attr.dlid & ~((1 << ppd->lmc) - 1)) == ppd->lid) { |
| 727 | qib_ruc_loopback(qp); |
| 728 | return; |
| 729 | } |
| 730 | |
| 731 | if (qp->ibqp.qp_type == IB_QPT_RC) |
| 732 | make_req = qib_make_rc_req; |
| 733 | else if (qp->ibqp.qp_type == IB_QPT_UC) |
| 734 | make_req = qib_make_uc_req; |
| 735 | else |
| 736 | make_req = qib_make_ud_req; |
| 737 | |
| 738 | spin_lock_irqsave(&qp->s_lock, flags); |
| 739 | |
| 740 | /* Return if we are already busy processing a work request. */ |
| 741 | if (!qib_send_ok(qp)) { |
| 742 | spin_unlock_irqrestore(&qp->s_lock, flags); |
| 743 | return; |
| 744 | } |
| 745 | |
| 746 | qp->s_flags |= QIB_S_BUSY; |
| 747 | |
| 748 | spin_unlock_irqrestore(&qp->s_lock, flags); |
| 749 | |
| 750 | do { |
| 751 | /* Check for a constructed packet to be sent. */ |
| 752 | if (qp->s_hdrwords != 0) { |
| 753 | /* |
| 754 | * If the packet cannot be sent now, return and |
| 755 | * the send tasklet will be woken up later. |
| 756 | */ |
Mike Marciniszyn | 1c94283 | 2012-05-07 14:02:31 -0400 | [diff] [blame] | 757 | if (qib_verbs_send(qp, qp->s_hdr, qp->s_hdrwords, |
Ralph Campbell | f931551 | 2010-05-23 21:44:54 -0700 | [diff] [blame] | 758 | qp->s_cur_sge, qp->s_cur_size)) |
| 759 | break; |
| 760 | /* Record that s_hdr is empty. */ |
| 761 | qp->s_hdrwords = 0; |
| 762 | } |
| 763 | } while (make_req(qp)); |
| 764 | } |
| 765 | |
| 766 | /* |
| 767 | * This should be called with s_lock held. |
| 768 | */ |
| 769 | void qib_send_complete(struct qib_qp *qp, struct qib_swqe *wqe, |
| 770 | enum ib_wc_status status) |
| 771 | { |
| 772 | u32 old_last, last; |
| 773 | unsigned i; |
| 774 | |
| 775 | if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_OR_FLUSH_SEND)) |
| 776 | return; |
| 777 | |
| 778 | for (i = 0; i < wqe->wr.num_sge; i++) { |
| 779 | struct qib_sge *sge = &wqe->sg_list[i]; |
| 780 | |
Mike Marciniszyn | 6a82649 | 2012-06-27 18:33:12 -0400 | [diff] [blame] | 781 | qib_put_mr(sge->mr); |
Ralph Campbell | f931551 | 2010-05-23 21:44:54 -0700 | [diff] [blame] | 782 | } |
| 783 | if (qp->ibqp.qp_type == IB_QPT_UD || |
| 784 | qp->ibqp.qp_type == IB_QPT_SMI || |
| 785 | qp->ibqp.qp_type == IB_QPT_GSI) |
| 786 | atomic_dec(&to_iah(wqe->wr.wr.ud.ah)->refcount); |
| 787 | |
| 788 | /* See ch. 11.2.4.1 and 10.7.3.1 */ |
| 789 | if (!(qp->s_flags & QIB_S_SIGNAL_REQ_WR) || |
| 790 | (wqe->wr.send_flags & IB_SEND_SIGNALED) || |
| 791 | status != IB_WC_SUCCESS) { |
| 792 | struct ib_wc wc; |
| 793 | |
| 794 | memset(&wc, 0, sizeof wc); |
| 795 | wc.wr_id = wqe->wr.wr_id; |
| 796 | wc.status = status; |
| 797 | wc.opcode = ib_qib_wc_opcode[wqe->wr.opcode]; |
| 798 | wc.qp = &qp->ibqp; |
| 799 | if (status == IB_WC_SUCCESS) |
| 800 | wc.byte_len = wqe->length; |
| 801 | qib_cq_enter(to_icq(qp->ibqp.send_cq), &wc, |
| 802 | status != IB_WC_SUCCESS); |
| 803 | } |
| 804 | |
| 805 | last = qp->s_last; |
| 806 | old_last = last; |
| 807 | if (++last >= qp->s_size) |
| 808 | last = 0; |
| 809 | qp->s_last = last; |
| 810 | if (qp->s_acked == old_last) |
| 811 | qp->s_acked = last; |
| 812 | if (qp->s_cur == old_last) |
| 813 | qp->s_cur = last; |
| 814 | if (qp->s_tail == old_last) |
| 815 | qp->s_tail = last; |
| 816 | if (qp->state == IB_QPS_SQD && last == qp->s_cur) |
| 817 | qp->s_draining = 0; |
| 818 | } |