blob: 80f113078720d628b79657e22757caff0f0748b1 [file] [log] [blame]
Ralph Campbellf9315512010-05-23 21:44:54 -07001/*
2 * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#include <linux/spinlock.h>
Ira Weiny0629cb02015-09-03 14:16:30 -040035#include <rdma/ib_smi.h>
Ralph Campbellf9315512010-05-23 21:44:54 -070036
37#include "qib.h"
38#include "qib_mad.h"
39
40/*
41 * Convert the AETH RNR timeout code into the number of microseconds.
42 */
43const u32 ib_qib_rnr_table[32] = {
44 655360, /* 00: 655.36 */
45 10, /* 01: .01 */
46 20, /* 02 .02 */
47 30, /* 03: .03 */
48 40, /* 04: .04 */
49 60, /* 05: .06 */
50 80, /* 06: .08 */
51 120, /* 07: .12 */
52 160, /* 08: .16 */
53 240, /* 09: .24 */
54 320, /* 0A: .32 */
55 480, /* 0B: .48 */
56 640, /* 0C: .64 */
57 960, /* 0D: .96 */
58 1280, /* 0E: 1.28 */
59 1920, /* 0F: 1.92 */
60 2560, /* 10: 2.56 */
61 3840, /* 11: 3.84 */
62 5120, /* 12: 5.12 */
63 7680, /* 13: 7.68 */
64 10240, /* 14: 10.24 */
65 15360, /* 15: 15.36 */
66 20480, /* 16: 20.48 */
67 30720, /* 17: 30.72 */
68 40960, /* 18: 40.96 */
69 61440, /* 19: 61.44 */
70 81920, /* 1A: 81.92 */
71 122880, /* 1B: 122.88 */
72 163840, /* 1C: 163.84 */
73 245760, /* 1D: 245.76 */
74 327680, /* 1E: 327.68 */
75 491520 /* 1F: 491.52 */
76};
77
78/*
79 * Validate a RWQE and fill in the SGE state.
80 * Return 1 if OK.
81 */
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -080082static int qib_init_sge(struct rvt_qp *qp, struct rvt_rwqe *wqe)
Ralph Campbellf9315512010-05-23 21:44:54 -070083{
84 int i, j, ret;
85 struct ib_wc wc;
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -080086 struct rvt_lkey_table *rkt;
Dennis Dalessandrof44728d2016-01-22 12:44:44 -080087 struct rvt_pd *pd;
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -080088 struct rvt_sge_state *ss;
Ralph Campbellf9315512010-05-23 21:44:54 -070089
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -080090 rkt = &to_idev(qp->ibqp.device)->rdi.lkey_table;
Dennis Dalessandrof44728d2016-01-22 12:44:44 -080091 pd = ibpd_to_rvtpd(qp->ibqp.srq ? qp->ibqp.srq->pd : qp->ibqp.pd);
Ralph Campbellf9315512010-05-23 21:44:54 -070092 ss = &qp->r_sge;
93 ss->sg_list = qp->r_sg_list;
94 qp->r_len = 0;
95 for (i = j = 0; i < wqe->num_sge; i++) {
96 if (wqe->sg_list[i].length == 0)
97 continue;
98 /* Check LKEY */
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -080099 if (!rvt_lkey_ok(rkt, pd, j ? &ss->sg_list[j - 1] : &ss->sge,
Ralph Campbellf9315512010-05-23 21:44:54 -0700100 &wqe->sg_list[i], IB_ACCESS_LOCAL_WRITE))
101 goto bad_lkey;
102 qp->r_len += wqe->sg_list[i].length;
103 j++;
104 }
105 ss->num_sge = j;
106 ss->total_len = qp->r_len;
107 ret = 1;
108 goto bail;
109
110bad_lkey:
111 while (j) {
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800112 struct rvt_sge *sge = --j ? &ss->sg_list[j - 1] : &ss->sge;
Ralph Campbellf9315512010-05-23 21:44:54 -0700113
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800114 rvt_put_mr(sge->mr);
Ralph Campbellf9315512010-05-23 21:44:54 -0700115 }
116 ss->num_sge = 0;
117 memset(&wc, 0, sizeof(wc));
118 wc.wr_id = wqe->wr_id;
119 wc.status = IB_WC_LOC_PROT_ERR;
120 wc.opcode = IB_WC_RECV;
121 wc.qp = &qp->ibqp;
122 /* Signal solicited completion event. */
Harish Chegondi4bb88e52016-01-22 13:07:36 -0800123 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1);
Ralph Campbellf9315512010-05-23 21:44:54 -0700124 ret = 0;
125bail:
126 return ret;
127}
128
129/**
130 * qib_get_rwqe - copy the next RWQE into the QP's RWQE
131 * @qp: the QP
132 * @wr_id_only: update qp->r_wr_id only, not qp->r_sge
133 *
134 * Return -1 if there is a local error, 0 if no RWQE is available,
135 * otherwise return 1.
136 *
137 * Can be called from interrupt level.
138 */
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800139int qib_get_rwqe(struct rvt_qp *qp, int wr_id_only)
Ralph Campbellf9315512010-05-23 21:44:54 -0700140{
141 unsigned long flags;
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800142 struct rvt_rq *rq;
143 struct rvt_rwq *wq;
Dennis Dalessandro894c7272016-01-22 12:46:17 -0800144 struct rvt_srq *srq;
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800145 struct rvt_rwqe *wqe;
Ralph Campbellf9315512010-05-23 21:44:54 -0700146 void (*handler)(struct ib_event *, void *);
147 u32 tail;
148 int ret;
149
150 if (qp->ibqp.srq) {
Dennis Dalessandro894c7272016-01-22 12:46:17 -0800151 srq = ibsrq_to_rvtsrq(qp->ibqp.srq);
Ralph Campbellf9315512010-05-23 21:44:54 -0700152 handler = srq->ibsrq.event_handler;
153 rq = &srq->rq;
154 } else {
155 srq = NULL;
156 handler = NULL;
157 rq = &qp->r_rq;
158 }
159
160 spin_lock_irqsave(&rq->lock, flags);
161 if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) {
162 ret = 0;
163 goto unlock;
164 }
165
166 wq = rq->wq;
167 tail = wq->tail;
168 /* Validate tail before using it since it is user writable. */
169 if (tail >= rq->size)
170 tail = 0;
171 if (unlikely(tail == wq->head)) {
172 ret = 0;
173 goto unlock;
174 }
175 /* Make sure entry is read after head index is read. */
176 smp_rmb();
177 wqe = get_rwqe_ptr(rq, tail);
178 /*
179 * Even though we update the tail index in memory, the verbs
180 * consumer is not supposed to post more entries until a
181 * completion is generated.
182 */
183 if (++tail >= rq->size)
184 tail = 0;
185 wq->tail = tail;
186 if (!wr_id_only && !qib_init_sge(qp, wqe)) {
187 ret = -1;
188 goto unlock;
189 }
190 qp->r_wr_id = wqe->wr_id;
191
192 ret = 1;
Harish Chegondi01ba79d2016-01-22 12:56:46 -0800193 set_bit(RVT_R_WRID_VALID, &qp->r_aflags);
Ralph Campbellf9315512010-05-23 21:44:54 -0700194 if (handler) {
195 u32 n;
196
197 /*
198 * Validate head pointer value and compute
199 * the number of remaining WQEs.
200 */
201 n = wq->head;
202 if (n >= rq->size)
203 n = 0;
204 if (n < tail)
205 n += rq->size - tail;
206 else
207 n -= tail;
208 if (n < srq->limit) {
209 struct ib_event ev;
210
211 srq->limit = 0;
212 spin_unlock_irqrestore(&rq->lock, flags);
213 ev.device = qp->ibqp.device;
214 ev.element.srq = qp->ibqp.srq;
215 ev.event = IB_EVENT_SRQ_LIMIT_REACHED;
216 handler(&ev, srq->ibsrq.srq_context);
217 goto bail;
218 }
219 }
220unlock:
221 spin_unlock_irqrestore(&rq->lock, flags);
222bail:
223 return ret;
224}
225
226/*
227 * Switch to alternate path.
228 * The QP s_lock should be held and interrupts disabled.
229 */
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800230void qib_migrate_qp(struct rvt_qp *qp)
Ralph Campbellf9315512010-05-23 21:44:54 -0700231{
232 struct ib_event ev;
233
234 qp->s_mig_state = IB_MIG_MIGRATED;
235 qp->remote_ah_attr = qp->alt_ah_attr;
236 qp->port_num = qp->alt_ah_attr.port_num;
237 qp->s_pkey_index = qp->s_alt_pkey_index;
238
239 ev.device = qp->ibqp.device;
240 ev.element.qp = &qp->ibqp;
241 ev.event = IB_EVENT_PATH_MIG;
242 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
243}
244
245static __be64 get_sguid(struct qib_ibport *ibp, unsigned index)
246{
247 if (!index) {
248 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
249
250 return ppd->guid;
Mike Marciniszyna46a2802015-01-16 10:52:18 -0500251 }
252 return ibp->guids[index - 1];
Ralph Campbellf9315512010-05-23 21:44:54 -0700253}
254
255static int gid_ok(union ib_gid *gid, __be64 gid_prefix, __be64 id)
256{
257 return (gid->global.interface_id == id &&
258 (gid->global.subnet_prefix == gid_prefix ||
259 gid->global.subnet_prefix == IB_DEFAULT_GID_PREFIX));
260}
261
262/*
263 *
Mike Marciniszyn9fd54732011-09-23 13:17:00 -0400264 * This should be called with the QP r_lock held.
265 *
266 * The s_lock will be acquired around the qib_migrate_qp() call.
Ralph Campbellf9315512010-05-23 21:44:54 -0700267 */
268int qib_ruc_check_hdr(struct qib_ibport *ibp, struct qib_ib_header *hdr,
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800269 int has_grh, struct rvt_qp *qp, u32 bth0)
Ralph Campbellf9315512010-05-23 21:44:54 -0700270{
271 __be64 guid;
Mike Marciniszyn9fd54732011-09-23 13:17:00 -0400272 unsigned long flags;
Ralph Campbellf9315512010-05-23 21:44:54 -0700273
274 if (qp->s_mig_state == IB_MIG_ARMED && (bth0 & IB_BTH_MIG_REQ)) {
275 if (!has_grh) {
276 if (qp->alt_ah_attr.ah_flags & IB_AH_GRH)
277 goto err;
278 } else {
279 if (!(qp->alt_ah_attr.ah_flags & IB_AH_GRH))
280 goto err;
281 guid = get_sguid(ibp, qp->alt_ah_attr.grh.sgid_index);
Harish Chegondif24a6d42016-01-22 12:56:02 -0800282 if (!gid_ok(&hdr->u.l.grh.dgid,
283 ibp->rvp.gid_prefix, guid))
Ralph Campbellf9315512010-05-23 21:44:54 -0700284 goto err;
285 if (!gid_ok(&hdr->u.l.grh.sgid,
286 qp->alt_ah_attr.grh.dgid.global.subnet_prefix,
287 qp->alt_ah_attr.grh.dgid.global.interface_id))
288 goto err;
289 }
290 if (!qib_pkey_ok((u16)bth0,
291 qib_get_pkey(ibp, qp->s_alt_pkey_index))) {
292 qib_bad_pqkey(ibp, IB_NOTICE_TRAP_BAD_PKEY,
293 (u16)bth0,
294 (be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF,
295 0, qp->ibqp.qp_num,
296 hdr->lrh[3], hdr->lrh[1]);
297 goto err;
298 }
299 /* Validate the SLID. See Ch. 9.6.1.5 and 17.2.8 */
300 if (be16_to_cpu(hdr->lrh[3]) != qp->alt_ah_attr.dlid ||
301 ppd_from_ibp(ibp)->port != qp->alt_ah_attr.port_num)
302 goto err;
Mike Marciniszyn9fd54732011-09-23 13:17:00 -0400303 spin_lock_irqsave(&qp->s_lock, flags);
Ralph Campbellf9315512010-05-23 21:44:54 -0700304 qib_migrate_qp(qp);
Mike Marciniszyn9fd54732011-09-23 13:17:00 -0400305 spin_unlock_irqrestore(&qp->s_lock, flags);
Ralph Campbellf9315512010-05-23 21:44:54 -0700306 } else {
307 if (!has_grh) {
308 if (qp->remote_ah_attr.ah_flags & IB_AH_GRH)
309 goto err;
310 } else {
311 if (!(qp->remote_ah_attr.ah_flags & IB_AH_GRH))
312 goto err;
313 guid = get_sguid(ibp,
314 qp->remote_ah_attr.grh.sgid_index);
Harish Chegondif24a6d42016-01-22 12:56:02 -0800315 if (!gid_ok(&hdr->u.l.grh.dgid,
316 ibp->rvp.gid_prefix, guid))
Ralph Campbellf9315512010-05-23 21:44:54 -0700317 goto err;
318 if (!gid_ok(&hdr->u.l.grh.sgid,
319 qp->remote_ah_attr.grh.dgid.global.subnet_prefix,
320 qp->remote_ah_attr.grh.dgid.global.interface_id))
321 goto err;
322 }
323 if (!qib_pkey_ok((u16)bth0,
324 qib_get_pkey(ibp, qp->s_pkey_index))) {
325 qib_bad_pqkey(ibp, IB_NOTICE_TRAP_BAD_PKEY,
326 (u16)bth0,
327 (be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF,
328 0, qp->ibqp.qp_num,
329 hdr->lrh[3], hdr->lrh[1]);
330 goto err;
331 }
332 /* Validate the SLID. See Ch. 9.6.1.5 */
333 if (be16_to_cpu(hdr->lrh[3]) != qp->remote_ah_attr.dlid ||
334 ppd_from_ibp(ibp)->port != qp->port_num)
335 goto err;
336 if (qp->s_mig_state == IB_MIG_REARM &&
337 !(bth0 & IB_BTH_MIG_REQ))
338 qp->s_mig_state = IB_MIG_ARMED;
339 }
340
341 return 0;
342
343err:
344 return 1;
345}
346
347/**
348 * qib_ruc_loopback - handle UC and RC lookback requests
349 * @sqp: the sending QP
350 *
351 * This is called from qib_do_send() to
352 * forward a WQE addressed to the same HCA.
353 * Note that although we are single threaded due to the tasklet, we still
354 * have to protect against post_send(). We don't have to worry about
355 * receive interrupts since this is a connected protocol and all packets
356 * will pass through here.
357 */
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800358static void qib_ruc_loopback(struct rvt_qp *sqp)
Ralph Campbellf9315512010-05-23 21:44:54 -0700359{
360 struct qib_ibport *ibp = to_iport(sqp->ibqp.device, sqp->port_num);
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800361 struct rvt_qp *qp;
362 struct rvt_swqe *wqe;
363 struct rvt_sge *sge;
Ralph Campbellf9315512010-05-23 21:44:54 -0700364 unsigned long flags;
365 struct ib_wc wc;
366 u64 sdata;
367 atomic64_t *maddr;
368 enum ib_wc_status send_status;
369 int release;
370 int ret;
371
372 /*
373 * Note that we check the responder QP state after
374 * checking the requester's state.
375 */
376 qp = qib_lookup_qpn(ibp, sqp->remote_qpn);
377
378 spin_lock_irqsave(&sqp->s_lock, flags);
379
380 /* Return if we are already busy processing a work request. */
Harish Chegondi01ba79d2016-01-22 12:56:46 -0800381 if ((sqp->s_flags & (RVT_S_BUSY | RVT_S_ANY_WAIT)) ||
Ralph Campbellf9315512010-05-23 21:44:54 -0700382 !(ib_qib_state_ops[sqp->state] & QIB_PROCESS_OR_FLUSH_SEND))
383 goto unlock;
384
Harish Chegondi01ba79d2016-01-22 12:56:46 -0800385 sqp->s_flags |= RVT_S_BUSY;
Ralph Campbellf9315512010-05-23 21:44:54 -0700386
387again:
388 if (sqp->s_last == sqp->s_head)
389 goto clr_busy;
390 wqe = get_swqe_ptr(sqp, sqp->s_last);
391
392 /* Return if it is not OK to start a new work reqeust. */
393 if (!(ib_qib_state_ops[sqp->state] & QIB_PROCESS_NEXT_SEND_OK)) {
394 if (!(ib_qib_state_ops[sqp->state] & QIB_FLUSH_SEND))
395 goto clr_busy;
396 /* We are in the error state, flush the work request. */
397 send_status = IB_WC_WR_FLUSH_ERR;
398 goto flush_send;
399 }
400
401 /*
402 * We can rely on the entry not changing without the s_lock
403 * being held until we update s_last.
404 * We increment s_cur to indicate s_last is in progress.
405 */
406 if (sqp->s_last == sqp->s_cur) {
407 if (++sqp->s_cur >= sqp->s_size)
408 sqp->s_cur = 0;
409 }
410 spin_unlock_irqrestore(&sqp->s_lock, flags);
411
412 if (!qp || !(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) ||
413 qp->ibqp.qp_type != sqp->ibqp.qp_type) {
Harish Chegondif24a6d42016-01-22 12:56:02 -0800414 ibp->rvp.n_pkt_drops++;
Ralph Campbellf9315512010-05-23 21:44:54 -0700415 /*
416 * For RC, the requester would timeout and retry so
417 * shortcut the timeouts and just signal too many retries.
418 */
419 if (sqp->ibqp.qp_type == IB_QPT_RC)
420 send_status = IB_WC_RETRY_EXC_ERR;
421 else
422 send_status = IB_WC_SUCCESS;
423 goto serr;
424 }
425
Mike Marciniszyn041af0b2015-01-16 10:50:32 -0500426 memset(&wc, 0, sizeof(wc));
Ralph Campbellf9315512010-05-23 21:44:54 -0700427 send_status = IB_WC_SUCCESS;
428
429 release = 1;
430 sqp->s_sge.sge = wqe->sg_list[0];
431 sqp->s_sge.sg_list = wqe->sg_list + 1;
432 sqp->s_sge.num_sge = wqe->wr.num_sge;
433 sqp->s_len = wqe->length;
434 switch (wqe->wr.opcode) {
435 case IB_WR_SEND_WITH_IMM:
436 wc.wc_flags = IB_WC_WITH_IMM;
437 wc.ex.imm_data = wqe->wr.ex.imm_data;
438 /* FALLTHROUGH */
439 case IB_WR_SEND:
440 ret = qib_get_rwqe(qp, 0);
441 if (ret < 0)
442 goto op_err;
443 if (!ret)
444 goto rnr_nak;
445 break;
446
447 case IB_WR_RDMA_WRITE_WITH_IMM:
448 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
449 goto inv_err;
450 wc.wc_flags = IB_WC_WITH_IMM;
451 wc.ex.imm_data = wqe->wr.ex.imm_data;
452 ret = qib_get_rwqe(qp, 1);
453 if (ret < 0)
454 goto op_err;
455 if (!ret)
456 goto rnr_nak;
457 /* FALLTHROUGH */
458 case IB_WR_RDMA_WRITE:
459 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
460 goto inv_err;
461 if (wqe->length == 0)
462 break;
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800463 if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, wqe->length,
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100464 wqe->rdma_wr.remote_addr,
465 wqe->rdma_wr.rkey,
Ralph Campbellf9315512010-05-23 21:44:54 -0700466 IB_ACCESS_REMOTE_WRITE)))
467 goto acc_err;
468 qp->r_sge.sg_list = NULL;
469 qp->r_sge.num_sge = 1;
470 qp->r_sge.total_len = wqe->length;
471 break;
472
473 case IB_WR_RDMA_READ:
474 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ)))
475 goto inv_err;
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800476 if (unlikely(!rvt_rkey_ok(qp, &sqp->s_sge.sge, wqe->length,
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100477 wqe->rdma_wr.remote_addr,
478 wqe->rdma_wr.rkey,
Ralph Campbellf9315512010-05-23 21:44:54 -0700479 IB_ACCESS_REMOTE_READ)))
480 goto acc_err;
481 release = 0;
482 sqp->s_sge.sg_list = NULL;
483 sqp->s_sge.num_sge = 1;
484 qp->r_sge.sge = wqe->sg_list[0];
485 qp->r_sge.sg_list = wqe->sg_list + 1;
486 qp->r_sge.num_sge = wqe->wr.num_sge;
487 qp->r_sge.total_len = wqe->length;
488 break;
489
490 case IB_WR_ATOMIC_CMP_AND_SWP:
491 case IB_WR_ATOMIC_FETCH_AND_ADD:
492 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC)))
493 goto inv_err;
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800494 if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64),
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100495 wqe->atomic_wr.remote_addr,
496 wqe->atomic_wr.rkey,
Ralph Campbellf9315512010-05-23 21:44:54 -0700497 IB_ACCESS_REMOTE_ATOMIC)))
498 goto acc_err;
499 /* Perform atomic OP and save result. */
500 maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100501 sdata = wqe->atomic_wr.compare_add;
Ralph Campbellf9315512010-05-23 21:44:54 -0700502 *(u64 *) sqp->s_sge.sge.vaddr =
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100503 (wqe->atomic_wr.wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
Ralph Campbellf9315512010-05-23 21:44:54 -0700504 (u64) atomic64_add_return(sdata, maddr) - sdata :
505 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100506 sdata, wqe->atomic_wr.swap);
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800507 rvt_put_mr(qp->r_sge.sge.mr);
Ralph Campbellf9315512010-05-23 21:44:54 -0700508 qp->r_sge.num_sge = 0;
509 goto send_comp;
510
511 default:
512 send_status = IB_WC_LOC_QP_OP_ERR;
513 goto serr;
514 }
515
516 sge = &sqp->s_sge.sge;
517 while (sqp->s_len) {
518 u32 len = sqp->s_len;
519
520 if (len > sge->length)
521 len = sge->length;
522 if (len > sge->sge_length)
523 len = sge->sge_length;
524 BUG_ON(len == 0);
525 qib_copy_sge(&qp->r_sge, sge->vaddr, len, release);
526 sge->vaddr += len;
527 sge->length -= len;
528 sge->sge_length -= len;
529 if (sge->sge_length == 0) {
530 if (!release)
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800531 rvt_put_mr(sge->mr);
Ralph Campbellf9315512010-05-23 21:44:54 -0700532 if (--sqp->s_sge.num_sge)
533 *sge = *sqp->s_sge.sg_list++;
534 } else if (sge->length == 0 && sge->mr->lkey) {
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800535 if (++sge->n >= RVT_SEGSZ) {
Ralph Campbellf9315512010-05-23 21:44:54 -0700536 if (++sge->m >= sge->mr->mapsz)
537 break;
538 sge->n = 0;
539 }
540 sge->vaddr =
541 sge->mr->map[sge->m]->segs[sge->n].vaddr;
542 sge->length =
543 sge->mr->map[sge->m]->segs[sge->n].length;
544 }
545 sqp->s_len -= len;
546 }
547 if (release)
Mike Marciniszyn6a826492012-06-27 18:33:12 -0400548 qib_put_ss(&qp->r_sge);
Ralph Campbellf9315512010-05-23 21:44:54 -0700549
Harish Chegondi01ba79d2016-01-22 12:56:46 -0800550 if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags))
Ralph Campbellf9315512010-05-23 21:44:54 -0700551 goto send_comp;
552
553 if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM)
554 wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
555 else
556 wc.opcode = IB_WC_RECV;
557 wc.wr_id = qp->r_wr_id;
558 wc.status = IB_WC_SUCCESS;
559 wc.byte_len = wqe->length;
560 wc.qp = &qp->ibqp;
561 wc.src_qp = qp->remote_qpn;
562 wc.slid = qp->remote_ah_attr.dlid;
563 wc.sl = qp->remote_ah_attr.sl;
564 wc.port_num = 1;
565 /* Signal completion event if the solicited bit is set. */
Harish Chegondi4bb88e52016-01-22 13:07:36 -0800566 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc,
567 wqe->wr.send_flags & IB_SEND_SOLICITED);
Ralph Campbellf9315512010-05-23 21:44:54 -0700568
569send_comp:
570 spin_lock_irqsave(&sqp->s_lock, flags);
Harish Chegondif24a6d42016-01-22 12:56:02 -0800571 ibp->rvp.n_loop_pkts++;
Ralph Campbellf9315512010-05-23 21:44:54 -0700572flush_send:
573 sqp->s_rnr_retry = sqp->s_rnr_retry_cnt;
574 qib_send_complete(sqp, wqe, send_status);
575 goto again;
576
577rnr_nak:
578 /* Handle RNR NAK */
579 if (qp->ibqp.qp_type == IB_QPT_UC)
580 goto send_comp;
Harish Chegondif24a6d42016-01-22 12:56:02 -0800581 ibp->rvp.n_rnr_naks++;
Ralph Campbellf9315512010-05-23 21:44:54 -0700582 /*
583 * Note: we don't need the s_lock held since the BUSY flag
584 * makes this single threaded.
585 */
586 if (sqp->s_rnr_retry == 0) {
587 send_status = IB_WC_RNR_RETRY_EXC_ERR;
588 goto serr;
589 }
590 if (sqp->s_rnr_retry_cnt < 7)
591 sqp->s_rnr_retry--;
592 spin_lock_irqsave(&sqp->s_lock, flags);
593 if (!(ib_qib_state_ops[sqp->state] & QIB_PROCESS_RECV_OK))
594 goto clr_busy;
Harish Chegondi01ba79d2016-01-22 12:56:46 -0800595 sqp->s_flags |= RVT_S_WAIT_RNR;
Ralph Campbellf9315512010-05-23 21:44:54 -0700596 sqp->s_timer.function = qib_rc_rnr_retry;
597 sqp->s_timer.expires = jiffies +
598 usecs_to_jiffies(ib_qib_rnr_table[qp->r_min_rnr_timer]);
599 add_timer(&sqp->s_timer);
600 goto clr_busy;
601
602op_err:
603 send_status = IB_WC_REM_OP_ERR;
604 wc.status = IB_WC_LOC_QP_OP_ERR;
605 goto err;
606
607inv_err:
608 send_status = IB_WC_REM_INV_REQ_ERR;
609 wc.status = IB_WC_LOC_QP_OP_ERR;
610 goto err;
611
612acc_err:
613 send_status = IB_WC_REM_ACCESS_ERR;
614 wc.status = IB_WC_LOC_PROT_ERR;
615err:
616 /* responder goes to error state */
617 qib_rc_error(qp, wc.status);
618
619serr:
620 spin_lock_irqsave(&sqp->s_lock, flags);
621 qib_send_complete(sqp, wqe, send_status);
622 if (sqp->ibqp.qp_type == IB_QPT_RC) {
623 int lastwqe = qib_error_qp(sqp, IB_WC_WR_FLUSH_ERR);
624
Harish Chegondi01ba79d2016-01-22 12:56:46 -0800625 sqp->s_flags &= ~RVT_S_BUSY;
Ralph Campbellf9315512010-05-23 21:44:54 -0700626 spin_unlock_irqrestore(&sqp->s_lock, flags);
627 if (lastwqe) {
628 struct ib_event ev;
629
630 ev.device = sqp->ibqp.device;
631 ev.element.qp = &sqp->ibqp;
632 ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
633 sqp->ibqp.event_handler(&ev, sqp->ibqp.qp_context);
634 }
635 goto done;
636 }
637clr_busy:
Harish Chegondi01ba79d2016-01-22 12:56:46 -0800638 sqp->s_flags &= ~RVT_S_BUSY;
Ralph Campbellf9315512010-05-23 21:44:54 -0700639unlock:
640 spin_unlock_irqrestore(&sqp->s_lock, flags);
641done:
642 if (qp && atomic_dec_and_test(&qp->refcount))
643 wake_up(&qp->wait);
644}
645
646/**
647 * qib_make_grh - construct a GRH header
648 * @ibp: a pointer to the IB port
649 * @hdr: a pointer to the GRH header being constructed
650 * @grh: the global route address to send to
651 * @hwords: the number of 32 bit words of header being sent
652 * @nwords: the number of 32 bit words of data being sent
653 *
654 * Return the size of the header in 32 bit words.
655 */
656u32 qib_make_grh(struct qib_ibport *ibp, struct ib_grh *hdr,
657 struct ib_global_route *grh, u32 hwords, u32 nwords)
658{
659 hdr->version_tclass_flow =
660 cpu_to_be32((IB_GRH_VERSION << IB_GRH_VERSION_SHIFT) |
661 (grh->traffic_class << IB_GRH_TCLASS_SHIFT) |
662 (grh->flow_label << IB_GRH_FLOW_SHIFT));
663 hdr->paylen = cpu_to_be16((hwords - 2 + nwords + SIZE_OF_CRC) << 2);
664 /* next_hdr is defined by C8-7 in ch. 8.4.1 */
665 hdr->next_hdr = IB_GRH_NEXT_HDR;
666 hdr->hop_limit = grh->hop_limit;
667 /* The SGID is 32-bit aligned. */
Harish Chegondif24a6d42016-01-22 12:56:02 -0800668 hdr->sgid.global.subnet_prefix = ibp->rvp.gid_prefix;
Ralph Campbellf9315512010-05-23 21:44:54 -0700669 hdr->sgid.global.interface_id = grh->sgid_index ?
670 ibp->guids[grh->sgid_index - 1] : ppd_from_ibp(ibp)->guid;
671 hdr->dgid = grh->dgid;
672
673 /* GRH header size in 32-bit words. */
674 return sizeof(struct ib_grh) / sizeof(u32);
675}
676
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800677void qib_make_ruc_header(struct rvt_qp *qp, struct qib_other_headers *ohdr,
Ralph Campbellf9315512010-05-23 21:44:54 -0700678 u32 bth0, u32 bth2)
679{
Dennis Dalessandroffc26902016-01-22 12:45:11 -0800680 struct qib_qp_priv *priv = qp->priv;
Ralph Campbellf9315512010-05-23 21:44:54 -0700681 struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
682 u16 lrh0;
683 u32 nwords;
684 u32 extra_bytes;
685
686 /* Construct the header. */
687 extra_bytes = -qp->s_cur_size & 3;
688 nwords = (qp->s_cur_size + extra_bytes) >> 2;
689 lrh0 = QIB_LRH_BTH;
690 if (unlikely(qp->remote_ah_attr.ah_flags & IB_AH_GRH)) {
Dennis Dalessandroffc26902016-01-22 12:45:11 -0800691 qp->s_hdrwords += qib_make_grh(ibp, &priv->s_hdr->u.l.grh,
Ralph Campbellf9315512010-05-23 21:44:54 -0700692 &qp->remote_ah_attr.grh,
693 qp->s_hdrwords, nwords);
694 lrh0 = QIB_LRH_GRH;
695 }
696 lrh0 |= ibp->sl_to_vl[qp->remote_ah_attr.sl] << 12 |
697 qp->remote_ah_attr.sl << 4;
Dennis Dalessandroffc26902016-01-22 12:45:11 -0800698 priv->s_hdr->lrh[0] = cpu_to_be16(lrh0);
699 priv->s_hdr->lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid);
700 priv->s_hdr->lrh[2] =
701 cpu_to_be16(qp->s_hdrwords + nwords + SIZE_OF_CRC);
702 priv->s_hdr->lrh[3] = cpu_to_be16(ppd_from_ibp(ibp)->lid |
Ralph Campbellf9315512010-05-23 21:44:54 -0700703 qp->remote_ah_attr.src_path_bits);
704 bth0 |= qib_get_pkey(ibp, qp->s_pkey_index);
705 bth0 |= extra_bytes << 20;
706 if (qp->s_mig_state == IB_MIG_MIGRATED)
707 bth0 |= IB_BTH_MIG_REQ;
708 ohdr->bth[0] = cpu_to_be32(bth0);
709 ohdr->bth[1] = cpu_to_be32(qp->remote_qpn);
710 ohdr->bth[2] = cpu_to_be32(bth2);
Mike Marciniszyn7d7632a2014-03-07 08:40:55 -0500711 this_cpu_inc(ibp->pmastats->n_unicast_xmit);
Ralph Campbellf9315512010-05-23 21:44:54 -0700712}
713
714/**
715 * qib_do_send - perform a send on a QP
716 * @work: contains a pointer to the QP
717 *
718 * Process entries in the send work queue until credit or queue is
719 * exhausted. Only allow one CPU to send a packet per QP (tasklet).
720 * Otherwise, two threads could send packets out of order.
721 */
722void qib_do_send(struct work_struct *work)
723{
Dennis Dalessandroffc26902016-01-22 12:45:11 -0800724 struct qib_qp_priv *priv = container_of(work, struct qib_qp_priv,
725 s_work);
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800726 struct rvt_qp *qp = priv->owner;
Ralph Campbellf9315512010-05-23 21:44:54 -0700727 struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
728 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800729 int (*make_req)(struct rvt_qp *qp);
Ralph Campbellf9315512010-05-23 21:44:54 -0700730 unsigned long flags;
731
732 if ((qp->ibqp.qp_type == IB_QPT_RC ||
733 qp->ibqp.qp_type == IB_QPT_UC) &&
734 (qp->remote_ah_attr.dlid & ~((1 << ppd->lmc) - 1)) == ppd->lid) {
735 qib_ruc_loopback(qp);
736 return;
737 }
738
739 if (qp->ibqp.qp_type == IB_QPT_RC)
740 make_req = qib_make_rc_req;
741 else if (qp->ibqp.qp_type == IB_QPT_UC)
742 make_req = qib_make_uc_req;
743 else
744 make_req = qib_make_ud_req;
745
746 spin_lock_irqsave(&qp->s_lock, flags);
747
748 /* Return if we are already busy processing a work request. */
749 if (!qib_send_ok(qp)) {
750 spin_unlock_irqrestore(&qp->s_lock, flags);
751 return;
752 }
753
Harish Chegondi01ba79d2016-01-22 12:56:46 -0800754 qp->s_flags |= RVT_S_BUSY;
Ralph Campbellf9315512010-05-23 21:44:54 -0700755
756 spin_unlock_irqrestore(&qp->s_lock, flags);
757
758 do {
759 /* Check for a constructed packet to be sent. */
760 if (qp->s_hdrwords != 0) {
761 /*
762 * If the packet cannot be sent now, return and
763 * the send tasklet will be woken up later.
764 */
Dennis Dalessandroffc26902016-01-22 12:45:11 -0800765 if (qib_verbs_send(qp, priv->s_hdr, qp->s_hdrwords,
Ralph Campbellf9315512010-05-23 21:44:54 -0700766 qp->s_cur_sge, qp->s_cur_size))
767 break;
768 /* Record that s_hdr is empty. */
769 qp->s_hdrwords = 0;
770 }
771 } while (make_req(qp));
772}
773
774/*
775 * This should be called with s_lock held.
776 */
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800777void qib_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe,
Ralph Campbellf9315512010-05-23 21:44:54 -0700778 enum ib_wc_status status)
779{
780 u32 old_last, last;
781 unsigned i;
782
783 if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_OR_FLUSH_SEND))
784 return;
785
786 for (i = 0; i < wqe->wr.num_sge; i++) {
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800787 struct rvt_sge *sge = &wqe->sg_list[i];
Ralph Campbellf9315512010-05-23 21:44:54 -0700788
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800789 rvt_put_mr(sge->mr);
Ralph Campbellf9315512010-05-23 21:44:54 -0700790 }
791 if (qp->ibqp.qp_type == IB_QPT_UD ||
792 qp->ibqp.qp_type == IB_QPT_SMI ||
793 qp->ibqp.qp_type == IB_QPT_GSI)
Dennis Dalessandro96ab1ac2016-01-22 12:46:07 -0800794 atomic_dec(&ibah_to_rvtah(wqe->ud_wr.ah)->refcount);
Ralph Campbellf9315512010-05-23 21:44:54 -0700795
796 /* See ch. 11.2.4.1 and 10.7.3.1 */
Harish Chegondi01ba79d2016-01-22 12:56:46 -0800797 if (!(qp->s_flags & RVT_S_SIGNAL_REQ_WR) ||
Ralph Campbellf9315512010-05-23 21:44:54 -0700798 (wqe->wr.send_flags & IB_SEND_SIGNALED) ||
799 status != IB_WC_SUCCESS) {
800 struct ib_wc wc;
801
Mike Marciniszyn041af0b2015-01-16 10:50:32 -0500802 memset(&wc, 0, sizeof(wc));
Ralph Campbellf9315512010-05-23 21:44:54 -0700803 wc.wr_id = wqe->wr.wr_id;
804 wc.status = status;
805 wc.opcode = ib_qib_wc_opcode[wqe->wr.opcode];
806 wc.qp = &qp->ibqp;
807 if (status == IB_WC_SUCCESS)
808 wc.byte_len = wqe->length;
Harish Chegondi4bb88e52016-01-22 13:07:36 -0800809 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.send_cq), &wc,
Ralph Campbellf9315512010-05-23 21:44:54 -0700810 status != IB_WC_SUCCESS);
811 }
812
813 last = qp->s_last;
814 old_last = last;
815 if (++last >= qp->s_size)
816 last = 0;
817 qp->s_last = last;
818 if (qp->s_acked == old_last)
819 qp->s_acked = last;
820 if (qp->s_cur == old_last)
821 qp->s_cur = last;
822 if (qp->s_tail == old_last)
823 qp->s_tail = last;
824 if (qp->state == IB_QPS_SQD && last == qp->s_cur)
825 qp->s_draining = 0;
826}