blob: 6e1adf709483eb068125595b34bb869acb9a8ddd [file] [log] [blame]
Ralph Campbellf9315512010-05-23 21:44:54 -07001/*
2 * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#include <linux/spinlock.h>
Ira Weiny0629cb02015-09-03 14:16:30 -040035#include <rdma/ib_smi.h>
Ralph Campbellf9315512010-05-23 21:44:54 -070036
37#include "qib.h"
38#include "qib_mad.h"
39
40/*
Ralph Campbellf9315512010-05-23 21:44:54 -070041 * Validate a RWQE and fill in the SGE state.
42 * Return 1 if OK.
43 */
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -080044static int qib_init_sge(struct rvt_qp *qp, struct rvt_rwqe *wqe)
Ralph Campbellf9315512010-05-23 21:44:54 -070045{
46 int i, j, ret;
47 struct ib_wc wc;
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -080048 struct rvt_lkey_table *rkt;
Dennis Dalessandrof44728d2016-01-22 12:44:44 -080049 struct rvt_pd *pd;
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -080050 struct rvt_sge_state *ss;
Ralph Campbellf9315512010-05-23 21:44:54 -070051
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -080052 rkt = &to_idev(qp->ibqp.device)->rdi.lkey_table;
Dennis Dalessandrof44728d2016-01-22 12:44:44 -080053 pd = ibpd_to_rvtpd(qp->ibqp.srq ? qp->ibqp.srq->pd : qp->ibqp.pd);
Ralph Campbellf9315512010-05-23 21:44:54 -070054 ss = &qp->r_sge;
55 ss->sg_list = qp->r_sg_list;
56 qp->r_len = 0;
57 for (i = j = 0; i < wqe->num_sge; i++) {
58 if (wqe->sg_list[i].length == 0)
59 continue;
60 /* Check LKEY */
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -080061 if (!rvt_lkey_ok(rkt, pd, j ? &ss->sg_list[j - 1] : &ss->sge,
Ralph Campbellf9315512010-05-23 21:44:54 -070062 &wqe->sg_list[i], IB_ACCESS_LOCAL_WRITE))
63 goto bad_lkey;
64 qp->r_len += wqe->sg_list[i].length;
65 j++;
66 }
67 ss->num_sge = j;
68 ss->total_len = qp->r_len;
69 ret = 1;
70 goto bail;
71
72bad_lkey:
73 while (j) {
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -080074 struct rvt_sge *sge = --j ? &ss->sg_list[j - 1] : &ss->sge;
Ralph Campbellf9315512010-05-23 21:44:54 -070075
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -080076 rvt_put_mr(sge->mr);
Ralph Campbellf9315512010-05-23 21:44:54 -070077 }
78 ss->num_sge = 0;
79 memset(&wc, 0, sizeof(wc));
80 wc.wr_id = wqe->wr_id;
81 wc.status = IB_WC_LOC_PROT_ERR;
82 wc.opcode = IB_WC_RECV;
83 wc.qp = &qp->ibqp;
84 /* Signal solicited completion event. */
Harish Chegondi4bb88e52016-01-22 13:07:36 -080085 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1);
Ralph Campbellf9315512010-05-23 21:44:54 -070086 ret = 0;
87bail:
88 return ret;
89}
90
91/**
92 * qib_get_rwqe - copy the next RWQE into the QP's RWQE
93 * @qp: the QP
94 * @wr_id_only: update qp->r_wr_id only, not qp->r_sge
95 *
96 * Return -1 if there is a local error, 0 if no RWQE is available,
97 * otherwise return 1.
98 *
99 * Can be called from interrupt level.
100 */
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800101int qib_get_rwqe(struct rvt_qp *qp, int wr_id_only)
Ralph Campbellf9315512010-05-23 21:44:54 -0700102{
103 unsigned long flags;
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800104 struct rvt_rq *rq;
105 struct rvt_rwq *wq;
Dennis Dalessandro894c7272016-01-22 12:46:17 -0800106 struct rvt_srq *srq;
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800107 struct rvt_rwqe *wqe;
Ralph Campbellf9315512010-05-23 21:44:54 -0700108 void (*handler)(struct ib_event *, void *);
109 u32 tail;
110 int ret;
111
112 if (qp->ibqp.srq) {
Dennis Dalessandro894c7272016-01-22 12:46:17 -0800113 srq = ibsrq_to_rvtsrq(qp->ibqp.srq);
Ralph Campbellf9315512010-05-23 21:44:54 -0700114 handler = srq->ibsrq.event_handler;
115 rq = &srq->rq;
116 } else {
117 srq = NULL;
118 handler = NULL;
119 rq = &qp->r_rq;
120 }
121
122 spin_lock_irqsave(&rq->lock, flags);
Harish Chegondidb3ef0e2016-01-22 13:07:42 -0800123 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) {
Ralph Campbellf9315512010-05-23 21:44:54 -0700124 ret = 0;
125 goto unlock;
126 }
127
128 wq = rq->wq;
129 tail = wq->tail;
130 /* Validate tail before using it since it is user writable. */
131 if (tail >= rq->size)
132 tail = 0;
133 if (unlikely(tail == wq->head)) {
134 ret = 0;
135 goto unlock;
136 }
137 /* Make sure entry is read after head index is read. */
138 smp_rmb();
Harish Chegondi70696ea2016-02-03 14:20:27 -0800139 wqe = rvt_get_rwqe_ptr(rq, tail);
Ralph Campbellf9315512010-05-23 21:44:54 -0700140 /*
141 * Even though we update the tail index in memory, the verbs
142 * consumer is not supposed to post more entries until a
143 * completion is generated.
144 */
145 if (++tail >= rq->size)
146 tail = 0;
147 wq->tail = tail;
148 if (!wr_id_only && !qib_init_sge(qp, wqe)) {
149 ret = -1;
150 goto unlock;
151 }
152 qp->r_wr_id = wqe->wr_id;
153
154 ret = 1;
Harish Chegondi01ba79d2016-01-22 12:56:46 -0800155 set_bit(RVT_R_WRID_VALID, &qp->r_aflags);
Ralph Campbellf9315512010-05-23 21:44:54 -0700156 if (handler) {
157 u32 n;
158
159 /*
160 * Validate head pointer value and compute
161 * the number of remaining WQEs.
162 */
163 n = wq->head;
164 if (n >= rq->size)
165 n = 0;
166 if (n < tail)
167 n += rq->size - tail;
168 else
169 n -= tail;
170 if (n < srq->limit) {
171 struct ib_event ev;
172
173 srq->limit = 0;
174 spin_unlock_irqrestore(&rq->lock, flags);
175 ev.device = qp->ibqp.device;
176 ev.element.srq = qp->ibqp.srq;
177 ev.event = IB_EVENT_SRQ_LIMIT_REACHED;
178 handler(&ev, srq->ibsrq.srq_context);
179 goto bail;
180 }
181 }
182unlock:
183 spin_unlock_irqrestore(&rq->lock, flags);
184bail:
185 return ret;
186}
187
188/*
189 * Switch to alternate path.
190 * The QP s_lock should be held and interrupts disabled.
191 */
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800192void qib_migrate_qp(struct rvt_qp *qp)
Ralph Campbellf9315512010-05-23 21:44:54 -0700193{
194 struct ib_event ev;
195
196 qp->s_mig_state = IB_MIG_MIGRATED;
197 qp->remote_ah_attr = qp->alt_ah_attr;
198 qp->port_num = qp->alt_ah_attr.port_num;
199 qp->s_pkey_index = qp->s_alt_pkey_index;
200
201 ev.device = qp->ibqp.device;
202 ev.element.qp = &qp->ibqp;
203 ev.event = IB_EVENT_PATH_MIG;
204 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
205}
206
207static __be64 get_sguid(struct qib_ibport *ibp, unsigned index)
208{
209 if (!index) {
210 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
211
212 return ppd->guid;
Mike Marciniszyna46a2802015-01-16 10:52:18 -0500213 }
214 return ibp->guids[index - 1];
Ralph Campbellf9315512010-05-23 21:44:54 -0700215}
216
217static int gid_ok(union ib_gid *gid, __be64 gid_prefix, __be64 id)
218{
219 return (gid->global.interface_id == id &&
220 (gid->global.subnet_prefix == gid_prefix ||
221 gid->global.subnet_prefix == IB_DEFAULT_GID_PREFIX));
222}
223
224/*
225 *
Mike Marciniszyn9fd54732011-09-23 13:17:00 -0400226 * This should be called with the QP r_lock held.
227 *
228 * The s_lock will be acquired around the qib_migrate_qp() call.
Ralph Campbellf9315512010-05-23 21:44:54 -0700229 */
Mike Marciniszyn261a4352016-09-06 04:35:05 -0700230int qib_ruc_check_hdr(struct qib_ibport *ibp, struct ib_header *hdr,
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800231 int has_grh, struct rvt_qp *qp, u32 bth0)
Ralph Campbellf9315512010-05-23 21:44:54 -0700232{
233 __be64 guid;
Mike Marciniszyn9fd54732011-09-23 13:17:00 -0400234 unsigned long flags;
Ralph Campbellf9315512010-05-23 21:44:54 -0700235
236 if (qp->s_mig_state == IB_MIG_ARMED && (bth0 & IB_BTH_MIG_REQ)) {
237 if (!has_grh) {
238 if (qp->alt_ah_attr.ah_flags & IB_AH_GRH)
239 goto err;
240 } else {
241 if (!(qp->alt_ah_attr.ah_flags & IB_AH_GRH))
242 goto err;
243 guid = get_sguid(ibp, qp->alt_ah_attr.grh.sgid_index);
Harish Chegondif24a6d42016-01-22 12:56:02 -0800244 if (!gid_ok(&hdr->u.l.grh.dgid,
245 ibp->rvp.gid_prefix, guid))
Ralph Campbellf9315512010-05-23 21:44:54 -0700246 goto err;
247 if (!gid_ok(&hdr->u.l.grh.sgid,
248 qp->alt_ah_attr.grh.dgid.global.subnet_prefix,
249 qp->alt_ah_attr.grh.dgid.global.interface_id))
250 goto err;
251 }
252 if (!qib_pkey_ok((u16)bth0,
253 qib_get_pkey(ibp, qp->s_alt_pkey_index))) {
254 qib_bad_pqkey(ibp, IB_NOTICE_TRAP_BAD_PKEY,
255 (u16)bth0,
256 (be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF,
257 0, qp->ibqp.qp_num,
258 hdr->lrh[3], hdr->lrh[1]);
259 goto err;
260 }
261 /* Validate the SLID. See Ch. 9.6.1.5 and 17.2.8 */
262 if (be16_to_cpu(hdr->lrh[3]) != qp->alt_ah_attr.dlid ||
263 ppd_from_ibp(ibp)->port != qp->alt_ah_attr.port_num)
264 goto err;
Mike Marciniszyn9fd54732011-09-23 13:17:00 -0400265 spin_lock_irqsave(&qp->s_lock, flags);
Ralph Campbellf9315512010-05-23 21:44:54 -0700266 qib_migrate_qp(qp);
Mike Marciniszyn9fd54732011-09-23 13:17:00 -0400267 spin_unlock_irqrestore(&qp->s_lock, flags);
Ralph Campbellf9315512010-05-23 21:44:54 -0700268 } else {
269 if (!has_grh) {
270 if (qp->remote_ah_attr.ah_flags & IB_AH_GRH)
271 goto err;
272 } else {
273 if (!(qp->remote_ah_attr.ah_flags & IB_AH_GRH))
274 goto err;
275 guid = get_sguid(ibp,
276 qp->remote_ah_attr.grh.sgid_index);
Harish Chegondif24a6d42016-01-22 12:56:02 -0800277 if (!gid_ok(&hdr->u.l.grh.dgid,
278 ibp->rvp.gid_prefix, guid))
Ralph Campbellf9315512010-05-23 21:44:54 -0700279 goto err;
280 if (!gid_ok(&hdr->u.l.grh.sgid,
281 qp->remote_ah_attr.grh.dgid.global.subnet_prefix,
282 qp->remote_ah_attr.grh.dgid.global.interface_id))
283 goto err;
284 }
285 if (!qib_pkey_ok((u16)bth0,
286 qib_get_pkey(ibp, qp->s_pkey_index))) {
287 qib_bad_pqkey(ibp, IB_NOTICE_TRAP_BAD_PKEY,
288 (u16)bth0,
289 (be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF,
290 0, qp->ibqp.qp_num,
291 hdr->lrh[3], hdr->lrh[1]);
292 goto err;
293 }
294 /* Validate the SLID. See Ch. 9.6.1.5 */
295 if (be16_to_cpu(hdr->lrh[3]) != qp->remote_ah_attr.dlid ||
296 ppd_from_ibp(ibp)->port != qp->port_num)
297 goto err;
298 if (qp->s_mig_state == IB_MIG_REARM &&
299 !(bth0 & IB_BTH_MIG_REQ))
300 qp->s_mig_state = IB_MIG_ARMED;
301 }
302
303 return 0;
304
305err:
306 return 1;
307}
308
309/**
310 * qib_ruc_loopback - handle UC and RC lookback requests
311 * @sqp: the sending QP
312 *
313 * This is called from qib_do_send() to
314 * forward a WQE addressed to the same HCA.
315 * Note that although we are single threaded due to the tasklet, we still
316 * have to protect against post_send(). We don't have to worry about
317 * receive interrupts since this is a connected protocol and all packets
318 * will pass through here.
319 */
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800320static void qib_ruc_loopback(struct rvt_qp *sqp)
Ralph Campbellf9315512010-05-23 21:44:54 -0700321{
322 struct qib_ibport *ibp = to_iport(sqp->ibqp.device, sqp->port_num);
Harish Chegondi1cefc2c2016-02-03 14:20:19 -0800323 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
324 struct qib_devdata *dd = ppd->dd;
325 struct rvt_dev_info *rdi = &dd->verbs_dev.rdi;
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800326 struct rvt_qp *qp;
327 struct rvt_swqe *wqe;
328 struct rvt_sge *sge;
Ralph Campbellf9315512010-05-23 21:44:54 -0700329 unsigned long flags;
330 struct ib_wc wc;
331 u64 sdata;
332 atomic64_t *maddr;
333 enum ib_wc_status send_status;
334 int release;
335 int ret;
336
Harish Chegondi1cefc2c2016-02-03 14:20:19 -0800337 rcu_read_lock();
Ralph Campbellf9315512010-05-23 21:44:54 -0700338 /*
339 * Note that we check the responder QP state after
340 * checking the requester's state.
341 */
Harish Chegondi1cefc2c2016-02-03 14:20:19 -0800342 qp = rvt_lookup_qpn(rdi, &ibp->rvp, sqp->remote_qpn);
343 if (!qp)
344 goto done;
Ralph Campbellf9315512010-05-23 21:44:54 -0700345
346 spin_lock_irqsave(&sqp->s_lock, flags);
347
348 /* Return if we are already busy processing a work request. */
Harish Chegondi01ba79d2016-01-22 12:56:46 -0800349 if ((sqp->s_flags & (RVT_S_BUSY | RVT_S_ANY_WAIT)) ||
Harish Chegondidb3ef0e2016-01-22 13:07:42 -0800350 !(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_OR_FLUSH_SEND))
Ralph Campbellf9315512010-05-23 21:44:54 -0700351 goto unlock;
352
Harish Chegondi01ba79d2016-01-22 12:56:46 -0800353 sqp->s_flags |= RVT_S_BUSY;
Ralph Campbellf9315512010-05-23 21:44:54 -0700354
355again:
Mike Marciniszyn46a80d62016-02-14 12:10:04 -0800356 smp_read_barrier_depends(); /* see post_one_send() */
357 if (sqp->s_last == ACCESS_ONCE(sqp->s_head))
Ralph Campbellf9315512010-05-23 21:44:54 -0700358 goto clr_busy;
Harish Chegondidb3ef0e2016-01-22 13:07:42 -0800359 wqe = rvt_get_swqe_ptr(sqp, sqp->s_last);
Ralph Campbellf9315512010-05-23 21:44:54 -0700360
361 /* Return if it is not OK to start a new work reqeust. */
Harish Chegondidb3ef0e2016-01-22 13:07:42 -0800362 if (!(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_NEXT_SEND_OK)) {
363 if (!(ib_rvt_state_ops[sqp->state] & RVT_FLUSH_SEND))
Ralph Campbellf9315512010-05-23 21:44:54 -0700364 goto clr_busy;
365 /* We are in the error state, flush the work request. */
366 send_status = IB_WC_WR_FLUSH_ERR;
367 goto flush_send;
368 }
369
370 /*
371 * We can rely on the entry not changing without the s_lock
372 * being held until we update s_last.
373 * We increment s_cur to indicate s_last is in progress.
374 */
375 if (sqp->s_last == sqp->s_cur) {
376 if (++sqp->s_cur >= sqp->s_size)
377 sqp->s_cur = 0;
378 }
379 spin_unlock_irqrestore(&sqp->s_lock, flags);
380
Harish Chegondidb3ef0e2016-01-22 13:07:42 -0800381 if (!qp || !(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) ||
Ralph Campbellf9315512010-05-23 21:44:54 -0700382 qp->ibqp.qp_type != sqp->ibqp.qp_type) {
Harish Chegondif24a6d42016-01-22 12:56:02 -0800383 ibp->rvp.n_pkt_drops++;
Ralph Campbellf9315512010-05-23 21:44:54 -0700384 /*
385 * For RC, the requester would timeout and retry so
386 * shortcut the timeouts and just signal too many retries.
387 */
388 if (sqp->ibqp.qp_type == IB_QPT_RC)
389 send_status = IB_WC_RETRY_EXC_ERR;
390 else
391 send_status = IB_WC_SUCCESS;
392 goto serr;
393 }
394
Mike Marciniszyn041af0b2015-01-16 10:50:32 -0500395 memset(&wc, 0, sizeof(wc));
Ralph Campbellf9315512010-05-23 21:44:54 -0700396 send_status = IB_WC_SUCCESS;
397
398 release = 1;
399 sqp->s_sge.sge = wqe->sg_list[0];
400 sqp->s_sge.sg_list = wqe->sg_list + 1;
401 sqp->s_sge.num_sge = wqe->wr.num_sge;
402 sqp->s_len = wqe->length;
403 switch (wqe->wr.opcode) {
404 case IB_WR_SEND_WITH_IMM:
405 wc.wc_flags = IB_WC_WITH_IMM;
406 wc.ex.imm_data = wqe->wr.ex.imm_data;
407 /* FALLTHROUGH */
408 case IB_WR_SEND:
409 ret = qib_get_rwqe(qp, 0);
410 if (ret < 0)
411 goto op_err;
412 if (!ret)
413 goto rnr_nak;
414 break;
415
416 case IB_WR_RDMA_WRITE_WITH_IMM:
417 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
418 goto inv_err;
419 wc.wc_flags = IB_WC_WITH_IMM;
420 wc.ex.imm_data = wqe->wr.ex.imm_data;
421 ret = qib_get_rwqe(qp, 1);
422 if (ret < 0)
423 goto op_err;
424 if (!ret)
425 goto rnr_nak;
426 /* FALLTHROUGH */
427 case IB_WR_RDMA_WRITE:
428 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
429 goto inv_err;
430 if (wqe->length == 0)
431 break;
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800432 if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, wqe->length,
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100433 wqe->rdma_wr.remote_addr,
434 wqe->rdma_wr.rkey,
Ralph Campbellf9315512010-05-23 21:44:54 -0700435 IB_ACCESS_REMOTE_WRITE)))
436 goto acc_err;
437 qp->r_sge.sg_list = NULL;
438 qp->r_sge.num_sge = 1;
439 qp->r_sge.total_len = wqe->length;
440 break;
441
442 case IB_WR_RDMA_READ:
443 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ)))
444 goto inv_err;
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800445 if (unlikely(!rvt_rkey_ok(qp, &sqp->s_sge.sge, wqe->length,
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100446 wqe->rdma_wr.remote_addr,
447 wqe->rdma_wr.rkey,
Ralph Campbellf9315512010-05-23 21:44:54 -0700448 IB_ACCESS_REMOTE_READ)))
449 goto acc_err;
450 release = 0;
451 sqp->s_sge.sg_list = NULL;
452 sqp->s_sge.num_sge = 1;
453 qp->r_sge.sge = wqe->sg_list[0];
454 qp->r_sge.sg_list = wqe->sg_list + 1;
455 qp->r_sge.num_sge = wqe->wr.num_sge;
456 qp->r_sge.total_len = wqe->length;
457 break;
458
459 case IB_WR_ATOMIC_CMP_AND_SWP:
460 case IB_WR_ATOMIC_FETCH_AND_ADD:
461 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC)))
462 goto inv_err;
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800463 if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64),
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100464 wqe->atomic_wr.remote_addr,
465 wqe->atomic_wr.rkey,
Ralph Campbellf9315512010-05-23 21:44:54 -0700466 IB_ACCESS_REMOTE_ATOMIC)))
467 goto acc_err;
468 /* Perform atomic OP and save result. */
469 maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100470 sdata = wqe->atomic_wr.compare_add;
Ralph Campbellf9315512010-05-23 21:44:54 -0700471 *(u64 *) sqp->s_sge.sge.vaddr =
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100472 (wqe->atomic_wr.wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
Ralph Campbellf9315512010-05-23 21:44:54 -0700473 (u64) atomic64_add_return(sdata, maddr) - sdata :
474 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100475 sdata, wqe->atomic_wr.swap);
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800476 rvt_put_mr(qp->r_sge.sge.mr);
Ralph Campbellf9315512010-05-23 21:44:54 -0700477 qp->r_sge.num_sge = 0;
478 goto send_comp;
479
480 default:
481 send_status = IB_WC_LOC_QP_OP_ERR;
482 goto serr;
483 }
484
485 sge = &sqp->s_sge.sge;
486 while (sqp->s_len) {
487 u32 len = sqp->s_len;
488
489 if (len > sge->length)
490 len = sge->length;
491 if (len > sge->sge_length)
492 len = sge->sge_length;
493 BUG_ON(len == 0);
494 qib_copy_sge(&qp->r_sge, sge->vaddr, len, release);
495 sge->vaddr += len;
496 sge->length -= len;
497 sge->sge_length -= len;
498 if (sge->sge_length == 0) {
499 if (!release)
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800500 rvt_put_mr(sge->mr);
Ralph Campbellf9315512010-05-23 21:44:54 -0700501 if (--sqp->s_sge.num_sge)
502 *sge = *sqp->s_sge.sg_list++;
503 } else if (sge->length == 0 && sge->mr->lkey) {
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800504 if (++sge->n >= RVT_SEGSZ) {
Ralph Campbellf9315512010-05-23 21:44:54 -0700505 if (++sge->m >= sge->mr->mapsz)
506 break;
507 sge->n = 0;
508 }
509 sge->vaddr =
510 sge->mr->map[sge->m]->segs[sge->n].vaddr;
511 sge->length =
512 sge->mr->map[sge->m]->segs[sge->n].length;
513 }
514 sqp->s_len -= len;
515 }
516 if (release)
Harish Chegondi70696ea2016-02-03 14:20:27 -0800517 rvt_put_ss(&qp->r_sge);
Ralph Campbellf9315512010-05-23 21:44:54 -0700518
Harish Chegondi01ba79d2016-01-22 12:56:46 -0800519 if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags))
Ralph Campbellf9315512010-05-23 21:44:54 -0700520 goto send_comp;
521
522 if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM)
523 wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
524 else
525 wc.opcode = IB_WC_RECV;
526 wc.wr_id = qp->r_wr_id;
527 wc.status = IB_WC_SUCCESS;
528 wc.byte_len = wqe->length;
529 wc.qp = &qp->ibqp;
530 wc.src_qp = qp->remote_qpn;
531 wc.slid = qp->remote_ah_attr.dlid;
532 wc.sl = qp->remote_ah_attr.sl;
533 wc.port_num = 1;
534 /* Signal completion event if the solicited bit is set. */
Harish Chegondi4bb88e52016-01-22 13:07:36 -0800535 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc,
536 wqe->wr.send_flags & IB_SEND_SOLICITED);
Ralph Campbellf9315512010-05-23 21:44:54 -0700537
538send_comp:
539 spin_lock_irqsave(&sqp->s_lock, flags);
Harish Chegondif24a6d42016-01-22 12:56:02 -0800540 ibp->rvp.n_loop_pkts++;
Ralph Campbellf9315512010-05-23 21:44:54 -0700541flush_send:
542 sqp->s_rnr_retry = sqp->s_rnr_retry_cnt;
543 qib_send_complete(sqp, wqe, send_status);
544 goto again;
545
546rnr_nak:
547 /* Handle RNR NAK */
548 if (qp->ibqp.qp_type == IB_QPT_UC)
549 goto send_comp;
Harish Chegondif24a6d42016-01-22 12:56:02 -0800550 ibp->rvp.n_rnr_naks++;
Ralph Campbellf9315512010-05-23 21:44:54 -0700551 /*
552 * Note: we don't need the s_lock held since the BUSY flag
553 * makes this single threaded.
554 */
555 if (sqp->s_rnr_retry == 0) {
556 send_status = IB_WC_RNR_RETRY_EXC_ERR;
557 goto serr;
558 }
559 if (sqp->s_rnr_retry_cnt < 7)
560 sqp->s_rnr_retry--;
561 spin_lock_irqsave(&sqp->s_lock, flags);
Harish Chegondidb3ef0e2016-01-22 13:07:42 -0800562 if (!(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_RECV_OK))
Ralph Campbellf9315512010-05-23 21:44:54 -0700563 goto clr_busy;
Venkata Sandeep Dhanalakotab4238e72017-02-08 05:27:25 -0800564 rvt_add_rnr_timer(sqp, qp->r_min_rnr_timer <<
Don Hiatt832666c2017-02-08 05:28:25 -0800565 IB_AETH_CREDIT_SHIFT);
Ralph Campbellf9315512010-05-23 21:44:54 -0700566 goto clr_busy;
567
568op_err:
569 send_status = IB_WC_REM_OP_ERR;
570 wc.status = IB_WC_LOC_QP_OP_ERR;
571 goto err;
572
573inv_err:
574 send_status = IB_WC_REM_INV_REQ_ERR;
575 wc.status = IB_WC_LOC_QP_OP_ERR;
576 goto err;
577
578acc_err:
579 send_status = IB_WC_REM_ACCESS_ERR;
580 wc.status = IB_WC_LOC_PROT_ERR;
581err:
582 /* responder goes to error state */
Brian Weltybeb5a042017-02-08 05:27:01 -0800583 rvt_rc_error(qp, wc.status);
Ralph Campbellf9315512010-05-23 21:44:54 -0700584
585serr:
586 spin_lock_irqsave(&sqp->s_lock, flags);
587 qib_send_complete(sqp, wqe, send_status);
588 if (sqp->ibqp.qp_type == IB_QPT_RC) {
Harish Chegondi70696ea2016-02-03 14:20:27 -0800589 int lastwqe = rvt_error_qp(sqp, IB_WC_WR_FLUSH_ERR);
Ralph Campbellf9315512010-05-23 21:44:54 -0700590
Harish Chegondi01ba79d2016-01-22 12:56:46 -0800591 sqp->s_flags &= ~RVT_S_BUSY;
Ralph Campbellf9315512010-05-23 21:44:54 -0700592 spin_unlock_irqrestore(&sqp->s_lock, flags);
593 if (lastwqe) {
594 struct ib_event ev;
595
596 ev.device = sqp->ibqp.device;
597 ev.element.qp = &sqp->ibqp;
598 ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
599 sqp->ibqp.event_handler(&ev, sqp->ibqp.qp_context);
600 }
601 goto done;
602 }
603clr_busy:
Harish Chegondi01ba79d2016-01-22 12:56:46 -0800604 sqp->s_flags &= ~RVT_S_BUSY;
Ralph Campbellf9315512010-05-23 21:44:54 -0700605unlock:
606 spin_unlock_irqrestore(&sqp->s_lock, flags);
607done:
Harish Chegondi1cefc2c2016-02-03 14:20:19 -0800608 rcu_read_unlock();
Ralph Campbellf9315512010-05-23 21:44:54 -0700609}
610
611/**
612 * qib_make_grh - construct a GRH header
613 * @ibp: a pointer to the IB port
614 * @hdr: a pointer to the GRH header being constructed
615 * @grh: the global route address to send to
616 * @hwords: the number of 32 bit words of header being sent
617 * @nwords: the number of 32 bit words of data being sent
618 *
619 * Return the size of the header in 32 bit words.
620 */
621u32 qib_make_grh(struct qib_ibport *ibp, struct ib_grh *hdr,
622 struct ib_global_route *grh, u32 hwords, u32 nwords)
623{
624 hdr->version_tclass_flow =
625 cpu_to_be32((IB_GRH_VERSION << IB_GRH_VERSION_SHIFT) |
626 (grh->traffic_class << IB_GRH_TCLASS_SHIFT) |
627 (grh->flow_label << IB_GRH_FLOW_SHIFT));
628 hdr->paylen = cpu_to_be16((hwords - 2 + nwords + SIZE_OF_CRC) << 2);
629 /* next_hdr is defined by C8-7 in ch. 8.4.1 */
630 hdr->next_hdr = IB_GRH_NEXT_HDR;
631 hdr->hop_limit = grh->hop_limit;
632 /* The SGID is 32-bit aligned. */
Harish Chegondif24a6d42016-01-22 12:56:02 -0800633 hdr->sgid.global.subnet_prefix = ibp->rvp.gid_prefix;
Ralph Campbellf9315512010-05-23 21:44:54 -0700634 hdr->sgid.global.interface_id = grh->sgid_index ?
635 ibp->guids[grh->sgid_index - 1] : ppd_from_ibp(ibp)->guid;
636 hdr->dgid = grh->dgid;
637
638 /* GRH header size in 32-bit words. */
639 return sizeof(struct ib_grh) / sizeof(u32);
640}
641
Mike Marciniszyn261a4352016-09-06 04:35:05 -0700642void qib_make_ruc_header(struct rvt_qp *qp, struct ib_other_headers *ohdr,
Ralph Campbellf9315512010-05-23 21:44:54 -0700643 u32 bth0, u32 bth2)
644{
Dennis Dalessandroffc26902016-01-22 12:45:11 -0800645 struct qib_qp_priv *priv = qp->priv;
Ralph Campbellf9315512010-05-23 21:44:54 -0700646 struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
647 u16 lrh0;
648 u32 nwords;
649 u32 extra_bytes;
650
651 /* Construct the header. */
652 extra_bytes = -qp->s_cur_size & 3;
653 nwords = (qp->s_cur_size + extra_bytes) >> 2;
654 lrh0 = QIB_LRH_BTH;
655 if (unlikely(qp->remote_ah_attr.ah_flags & IB_AH_GRH)) {
Dennis Dalessandroffc26902016-01-22 12:45:11 -0800656 qp->s_hdrwords += qib_make_grh(ibp, &priv->s_hdr->u.l.grh,
Ralph Campbellf9315512010-05-23 21:44:54 -0700657 &qp->remote_ah_attr.grh,
658 qp->s_hdrwords, nwords);
659 lrh0 = QIB_LRH_GRH;
660 }
661 lrh0 |= ibp->sl_to_vl[qp->remote_ah_attr.sl] << 12 |
662 qp->remote_ah_attr.sl << 4;
Dennis Dalessandroffc26902016-01-22 12:45:11 -0800663 priv->s_hdr->lrh[0] = cpu_to_be16(lrh0);
664 priv->s_hdr->lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid);
665 priv->s_hdr->lrh[2] =
666 cpu_to_be16(qp->s_hdrwords + nwords + SIZE_OF_CRC);
667 priv->s_hdr->lrh[3] = cpu_to_be16(ppd_from_ibp(ibp)->lid |
Ralph Campbellf9315512010-05-23 21:44:54 -0700668 qp->remote_ah_attr.src_path_bits);
669 bth0 |= qib_get_pkey(ibp, qp->s_pkey_index);
670 bth0 |= extra_bytes << 20;
671 if (qp->s_mig_state == IB_MIG_MIGRATED)
672 bth0 |= IB_BTH_MIG_REQ;
673 ohdr->bth[0] = cpu_to_be32(bth0);
674 ohdr->bth[1] = cpu_to_be32(qp->remote_qpn);
675 ohdr->bth[2] = cpu_to_be32(bth2);
Mike Marciniszyn7d7632a2014-03-07 08:40:55 -0500676 this_cpu_inc(ibp->pmastats->n_unicast_xmit);
Ralph Campbellf9315512010-05-23 21:44:54 -0700677}
678
Harish Chegondidb3ef0e2016-01-22 13:07:42 -0800679void _qib_do_send(struct work_struct *work)
680{
681 struct qib_qp_priv *priv = container_of(work, struct qib_qp_priv,
682 s_work);
683 struct rvt_qp *qp = priv->owner;
684
685 qib_do_send(qp);
686}
687
Ralph Campbellf9315512010-05-23 21:44:54 -0700688/**
689 * qib_do_send - perform a send on a QP
Harish Chegondidb3ef0e2016-01-22 13:07:42 -0800690 * @qp: pointer to the QP
Ralph Campbellf9315512010-05-23 21:44:54 -0700691 *
692 * Process entries in the send work queue until credit or queue is
693 * exhausted. Only allow one CPU to send a packet per QP (tasklet).
694 * Otherwise, two threads could send packets out of order.
695 */
Harish Chegondidb3ef0e2016-01-22 13:07:42 -0800696void qib_do_send(struct rvt_qp *qp)
Ralph Campbellf9315512010-05-23 21:44:54 -0700697{
Harish Chegondidb3ef0e2016-01-22 13:07:42 -0800698 struct qib_qp_priv *priv = qp->priv;
Ralph Campbellf9315512010-05-23 21:44:54 -0700699 struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
700 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
Mike Marciniszyn747f4d72016-04-12 10:46:10 -0700701 int (*make_req)(struct rvt_qp *qp, unsigned long *flags);
Ralph Campbellf9315512010-05-23 21:44:54 -0700702 unsigned long flags;
703
704 if ((qp->ibqp.qp_type == IB_QPT_RC ||
705 qp->ibqp.qp_type == IB_QPT_UC) &&
706 (qp->remote_ah_attr.dlid & ~((1 << ppd->lmc) - 1)) == ppd->lid) {
707 qib_ruc_loopback(qp);
708 return;
709 }
710
711 if (qp->ibqp.qp_type == IB_QPT_RC)
712 make_req = qib_make_rc_req;
713 else if (qp->ibqp.qp_type == IB_QPT_UC)
714 make_req = qib_make_uc_req;
715 else
716 make_req = qib_make_ud_req;
717
718 spin_lock_irqsave(&qp->s_lock, flags);
719
720 /* Return if we are already busy processing a work request. */
721 if (!qib_send_ok(qp)) {
722 spin_unlock_irqrestore(&qp->s_lock, flags);
723 return;
724 }
725
Harish Chegondi01ba79d2016-01-22 12:56:46 -0800726 qp->s_flags |= RVT_S_BUSY;
Ralph Campbellf9315512010-05-23 21:44:54 -0700727
Ralph Campbellf9315512010-05-23 21:44:54 -0700728 do {
729 /* Check for a constructed packet to be sent. */
730 if (qp->s_hdrwords != 0) {
Mike Marciniszyn46a80d62016-02-14 12:10:04 -0800731 spin_unlock_irqrestore(&qp->s_lock, flags);
Ralph Campbellf9315512010-05-23 21:44:54 -0700732 /*
733 * If the packet cannot be sent now, return and
734 * the send tasklet will be woken up later.
735 */
Dennis Dalessandroffc26902016-01-22 12:45:11 -0800736 if (qib_verbs_send(qp, priv->s_hdr, qp->s_hdrwords,
Ralph Campbellf9315512010-05-23 21:44:54 -0700737 qp->s_cur_sge, qp->s_cur_size))
Mike Marciniszyn46a80d62016-02-14 12:10:04 -0800738 return;
Ralph Campbellf9315512010-05-23 21:44:54 -0700739 /* Record that s_hdr is empty. */
740 qp->s_hdrwords = 0;
Mike Marciniszyn46a80d62016-02-14 12:10:04 -0800741 spin_lock_irqsave(&qp->s_lock, flags);
Ralph Campbellf9315512010-05-23 21:44:54 -0700742 }
Mike Marciniszyn747f4d72016-04-12 10:46:10 -0700743 } while (make_req(qp, &flags));
Mike Marciniszyn46a80d62016-02-14 12:10:04 -0800744
745 spin_unlock_irqrestore(&qp->s_lock, flags);
Ralph Campbellf9315512010-05-23 21:44:54 -0700746}
747
748/*
749 * This should be called with s_lock held.
750 */
Dennis Dalessandro7c2e11f2016-01-22 12:45:59 -0800751void qib_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe,
Ralph Campbellf9315512010-05-23 21:44:54 -0700752 enum ib_wc_status status)
753{
754 u32 old_last, last;
Ralph Campbellf9315512010-05-23 21:44:54 -0700755
Harish Chegondidb3ef0e2016-01-22 13:07:42 -0800756 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_OR_FLUSH_SEND))
Ralph Campbellf9315512010-05-23 21:44:54 -0700757 return;
758
Mike Marciniszynee845412016-02-04 11:03:28 -0800759 last = qp->s_last;
760 old_last = last;
761 if (++last >= qp->s_size)
762 last = 0;
763 qp->s_last = last;
764 /* See post_send() */
765 barrier();
Mike Marciniszync64607a2016-12-07 19:34:31 -0800766 rvt_put_swqe(wqe);
Ralph Campbellf9315512010-05-23 21:44:54 -0700767 if (qp->ibqp.qp_type == IB_QPT_UD ||
768 qp->ibqp.qp_type == IB_QPT_SMI ||
769 qp->ibqp.qp_type == IB_QPT_GSI)
Dennis Dalessandro96ab1ac2016-01-22 12:46:07 -0800770 atomic_dec(&ibah_to_rvtah(wqe->ud_wr.ah)->refcount);
Ralph Campbellf9315512010-05-23 21:44:54 -0700771
Mike Marciniszyn43a474a2017-03-20 17:25:04 -0700772 rvt_qp_swqe_complete(qp,
773 wqe,
774 ib_qib_wc_opcode[wqe->wr.opcode],
775 status);
Ralph Campbellf9315512010-05-23 21:44:54 -0700776
Ralph Campbellf9315512010-05-23 21:44:54 -0700777 if (qp->s_acked == old_last)
778 qp->s_acked = last;
779 if (qp->s_cur == old_last)
780 qp->s_cur = last;
781 if (qp->s_tail == old_last)
782 qp->s_tail = last;
783 if (qp->state == IB_QPS_SQD && last == qp->s_cur)
784 qp->s_draining = 0;
785}