blob: 0cced9a4a345a8e8232b8ac8102e1d93f0584080 [file] [log] [blame]
Mike Marciniszyn77241052015-07-30 15:17:43 -04001/*
Mike Marciniszynb6eac932017-04-09 10:16:35 -07002 * Copyright(c) 2015 - 2017 Intel Corporation.
Mike Marciniszyn77241052015-07-30 15:17:43 -04003 *
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
6 *
7 * GPL LICENSE SUMMARY
8 *
Mike Marciniszyn77241052015-07-30 15:17:43 -04009 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * BSD LICENSE
19 *
Mike Marciniszyn77241052015-07-30 15:17:43 -040020 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
22 * are met:
23 *
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
29 * distribution.
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
33 *
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45 *
46 */
47
48#include <linux/spinlock.h>
49
50#include "hfi.h"
51#include "mad.h"
52#include "qp.h"
Mike Marciniszyn45842ab2016-02-14 12:44:34 -080053#include "verbs_txreq.h"
Dennis Dalessandrobb5df5f2016-02-14 12:44:43 -080054#include "trace.h"
Mike Marciniszyn77241052015-07-30 15:17:43 -040055
56/*
Mike Marciniszyn77241052015-07-30 15:17:43 -040057 * Validate a RWQE and fill in the SGE state.
58 * Return 1 if OK.
59 */
Dennis Dalessandro895420d2016-01-19 14:42:28 -080060static int init_sge(struct rvt_qp *qp, struct rvt_rwqe *wqe)
Mike Marciniszyn77241052015-07-30 15:17:43 -040061{
62 int i, j, ret;
63 struct ib_wc wc;
Dennis Dalessandrocd4ceee2016-01-19 14:41:55 -080064 struct rvt_lkey_table *rkt;
Dennis Dalessandro4f87ccf2016-01-19 14:41:50 -080065 struct rvt_pd *pd;
Dennis Dalessandro895420d2016-01-19 14:42:28 -080066 struct rvt_sge_state *ss;
Mike Marciniszyn77241052015-07-30 15:17:43 -040067
Dennis Dalessandro895420d2016-01-19 14:42:28 -080068 rkt = &to_idev(qp->ibqp.device)->rdi.lkey_table;
Dennis Dalessandro4f87ccf2016-01-19 14:41:50 -080069 pd = ibpd_to_rvtpd(qp->ibqp.srq ? qp->ibqp.srq->pd : qp->ibqp.pd);
Mike Marciniszyn77241052015-07-30 15:17:43 -040070 ss = &qp->r_sge;
71 ss->sg_list = qp->r_sg_list;
72 qp->r_len = 0;
73 for (i = j = 0; i < wqe->num_sge; i++) {
74 if (wqe->sg_list[i].length == 0)
75 continue;
76 /* Check LKEY */
Mike Marciniszyn3ffea7d2017-07-29 08:43:43 -070077 ret = rvt_lkey_ok(rkt, pd, j ? &ss->sg_list[j - 1] : &ss->sge,
78 NULL, &wqe->sg_list[i],
79 IB_ACCESS_LOCAL_WRITE);
80 if (unlikely(ret <= 0))
Mike Marciniszyn77241052015-07-30 15:17:43 -040081 goto bad_lkey;
82 qp->r_len += wqe->sg_list[i].length;
83 j++;
84 }
85 ss->num_sge = j;
86 ss->total_len = qp->r_len;
87 ret = 1;
88 goto bail;
89
90bad_lkey:
91 while (j) {
Dennis Dalessandro895420d2016-01-19 14:42:28 -080092 struct rvt_sge *sge = --j ? &ss->sg_list[j - 1] : &ss->sge;
Mike Marciniszyn77241052015-07-30 15:17:43 -040093
Dennis Dalessandro895420d2016-01-19 14:42:28 -080094 rvt_put_mr(sge->mr);
Mike Marciniszyn77241052015-07-30 15:17:43 -040095 }
96 ss->num_sge = 0;
97 memset(&wc, 0, sizeof(wc));
98 wc.wr_id = wqe->wr_id;
99 wc.status = IB_WC_LOC_PROT_ERR;
100 wc.opcode = IB_WC_RECV;
101 wc.qp = &qp->ibqp;
102 /* Signal solicited completion event. */
Dennis Dalessandroabd712d2016-01-19 14:43:22 -0800103 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400104 ret = 0;
105bail:
106 return ret;
107}
108
109/**
Dennis Dalessandroec4274f2016-01-19 14:43:44 -0800110 * hfi1_rvt_get_rwqe - copy the next RWQE into the QP's RWQE
Mike Marciniszyn77241052015-07-30 15:17:43 -0400111 * @qp: the QP
112 * @wr_id_only: update qp->r_wr_id only, not qp->r_sge
113 *
114 * Return -1 if there is a local error, 0 if no RWQE is available,
115 * otherwise return 1.
116 *
117 * Can be called from interrupt level.
118 */
Dennis Dalessandroec4274f2016-01-19 14:43:44 -0800119int hfi1_rvt_get_rwqe(struct rvt_qp *qp, int wr_id_only)
Mike Marciniszyn77241052015-07-30 15:17:43 -0400120{
121 unsigned long flags;
Dennis Dalessandro895420d2016-01-19 14:42:28 -0800122 struct rvt_rq *rq;
123 struct rvt_rwq *wq;
Dennis Dalessandro39db3e62016-01-19 14:42:33 -0800124 struct rvt_srq *srq;
Dennis Dalessandro895420d2016-01-19 14:42:28 -0800125 struct rvt_rwqe *wqe;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400126 void (*handler)(struct ib_event *, void *);
127 u32 tail;
128 int ret;
129
130 if (qp->ibqp.srq) {
Dennis Dalessandro39db3e62016-01-19 14:42:33 -0800131 srq = ibsrq_to_rvtsrq(qp->ibqp.srq);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400132 handler = srq->ibsrq.event_handler;
133 rq = &srq->rq;
134 } else {
135 srq = NULL;
136 handler = NULL;
137 rq = &qp->r_rq;
138 }
139
140 spin_lock_irqsave(&rq->lock, flags);
Dennis Dalessandro83693bd2016-01-19 14:43:33 -0800141 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -0400142 ret = 0;
143 goto unlock;
144 }
145
146 wq = rq->wq;
147 tail = wq->tail;
148 /* Validate tail before using it since it is user writable. */
149 if (tail >= rq->size)
150 tail = 0;
151 if (unlikely(tail == wq->head)) {
152 ret = 0;
153 goto unlock;
154 }
155 /* Make sure entry is read after head index is read. */
156 smp_rmb();
Dennis Dalessandroec4274f2016-01-19 14:43:44 -0800157 wqe = rvt_get_rwqe_ptr(rq, tail);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400158 /*
159 * Even though we update the tail index in memory, the verbs
160 * consumer is not supposed to post more entries until a
161 * completion is generated.
162 */
163 if (++tail >= rq->size)
164 tail = 0;
165 wq->tail = tail;
166 if (!wr_id_only && !init_sge(qp, wqe)) {
167 ret = -1;
168 goto unlock;
169 }
170 qp->r_wr_id = wqe->wr_id;
171
172 ret = 1;
Dennis Dalessandro54d10c12016-01-19 14:43:01 -0800173 set_bit(RVT_R_WRID_VALID, &qp->r_aflags);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400174 if (handler) {
175 u32 n;
176
177 /*
178 * Validate head pointer value and compute
179 * the number of remaining WQEs.
180 */
181 n = wq->head;
182 if (n >= rq->size)
183 n = 0;
184 if (n < tail)
185 n += rq->size - tail;
186 else
187 n -= tail;
188 if (n < srq->limit) {
189 struct ib_event ev;
190
191 srq->limit = 0;
192 spin_unlock_irqrestore(&rq->lock, flags);
193 ev.device = qp->ibqp.device;
194 ev.element.srq = qp->ibqp.srq;
195 ev.event = IB_EVENT_SRQ_LIMIT_REACHED;
196 handler(&ev, srq->ibsrq.srq_context);
197 goto bail;
198 }
199 }
200unlock:
201 spin_unlock_irqrestore(&rq->lock, flags);
202bail:
203 return ret;
204}
205
Mike Marciniszyn77241052015-07-30 15:17:43 -0400206static int gid_ok(union ib_gid *gid, __be64 gid_prefix, __be64 id)
207{
208 return (gid->global.interface_id == id &&
209 (gid->global.subnet_prefix == gid_prefix ||
210 gid->global.subnet_prefix == IB_DEFAULT_GID_PREFIX));
211}
212
213/*
214 *
215 * This should be called with the QP r_lock held.
216 *
217 * The s_lock will be acquired around the hfi1_migrate_qp() call.
218 */
Don Hiatt90397462017-05-12 09:20:20 -0700219int hfi1_ruc_check_hdr(struct hfi1_ibport *ibp, struct hfi1_packet *packet)
Mike Marciniszyn77241052015-07-30 15:17:43 -0400220{
221 __be64 guid;
222 unsigned long flags;
Don Hiatt90397462017-05-12 09:20:20 -0700223 struct rvt_qp *qp = packet->qp;
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400224 u8 sc5 = ibp->sl_to_sc[rdma_ah_get_sl(&qp->remote_ah_attr)];
Don Hiatt90397462017-05-12 09:20:20 -0700225 u32 dlid = packet->dlid;
226 u32 slid = packet->slid;
227 u32 sl = packet->sl;
228 int migrated;
229 u32 bth0, bth1;
Don Hiatt5786adf32017-08-04 13:54:10 -0700230 u16 pkey;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400231
Don Hiatt90397462017-05-12 09:20:20 -0700232 bth0 = be32_to_cpu(packet->ohdr->bth[0]);
233 bth1 = be32_to_cpu(packet->ohdr->bth[1]);
Don Hiatt5786adf32017-08-04 13:54:10 -0700234 if (packet->etype == RHF_RCV_TYPE_BYPASS) {
235 pkey = hfi1_16B_get_pkey(packet->hdr);
236 migrated = bth1 & OPA_BTH_MIG_REQ;
237 } else {
238 pkey = ib_bth_get_pkey(packet->ohdr);
239 migrated = bth0 & IB_BTH_MIG_REQ;
240 }
Don Hiatt90397462017-05-12 09:20:20 -0700241
242 if (qp->s_mig_state == IB_MIG_ARMED && migrated) {
243 if (!packet->grh) {
Don Hiatt5786adf32017-08-04 13:54:10 -0700244 if ((rdma_ah_get_ah_flags(&qp->alt_ah_attr) &
245 IB_AH_GRH) &&
246 (packet->etype != RHF_RCV_TYPE_BYPASS))
Don Hiatt90397462017-05-12 09:20:20 -0700247 return 1;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400248 } else {
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400249 const struct ib_global_route *grh;
250
251 if (!(rdma_ah_get_ah_flags(&qp->alt_ah_attr) &
252 IB_AH_GRH))
Don Hiatt90397462017-05-12 09:20:20 -0700253 return 1;
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400254 grh = rdma_ah_read_grh(&qp->alt_ah_attr);
255 guid = get_sguid(ibp, grh->sgid_index);
Don Hiatt90397462017-05-12 09:20:20 -0700256 if (!gid_ok(&packet->grh->dgid, ibp->rvp.gid_prefix,
Dennis Dalessandro4eb06882016-01-19 14:42:39 -0800257 guid))
Don Hiatt90397462017-05-12 09:20:20 -0700258 return 1;
Jubin John17fb4f22016-02-14 20:21:52 -0800259 if (!gid_ok(
Don Hiatt90397462017-05-12 09:20:20 -0700260 &packet->grh->sgid,
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400261 grh->dgid.global.subnet_prefix,
262 grh->dgid.global.interface_id))
Don Hiatt90397462017-05-12 09:20:20 -0700263 return 1;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400264 }
Don Hiatt5786adf32017-08-04 13:54:10 -0700265 if (unlikely(rcv_pkey_check(ppd_from_ibp(ibp), pkey,
Don Hiatt90397462017-05-12 09:20:20 -0700266 sc5, slid))) {
Don Hiatt5786adf32017-08-04 13:54:10 -0700267 hfi1_bad_pkey(ibp, pkey, sl, 0, qp->ibqp.qp_num,
268 slid, dlid);
Don Hiatt90397462017-05-12 09:20:20 -0700269 return 1;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400270 }
271 /* Validate the SLID. See Ch. 9.6.1.5 and 17.2.8 */
Don Hiatt90397462017-05-12 09:20:20 -0700272 if (slid != rdma_ah_get_dlid(&qp->alt_ah_attr) ||
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400273 ppd_from_ibp(ibp)->port !=
274 rdma_ah_get_port_num(&qp->alt_ah_attr))
Don Hiatt90397462017-05-12 09:20:20 -0700275 return 1;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400276 spin_lock_irqsave(&qp->s_lock, flags);
277 hfi1_migrate_qp(qp);
278 spin_unlock_irqrestore(&qp->s_lock, flags);
279 } else {
Don Hiatt90397462017-05-12 09:20:20 -0700280 if (!packet->grh) {
Don Hiatt5786adf32017-08-04 13:54:10 -0700281 if ((rdma_ah_get_ah_flags(&qp->remote_ah_attr) &
282 IB_AH_GRH) &&
283 (packet->etype != RHF_RCV_TYPE_BYPASS))
Don Hiatt90397462017-05-12 09:20:20 -0700284 return 1;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400285 } else {
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400286 const struct ib_global_route *grh;
287
288 if (!(rdma_ah_get_ah_flags(&qp->remote_ah_attr) &
289 IB_AH_GRH))
Don Hiatt90397462017-05-12 09:20:20 -0700290 return 1;
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400291 grh = rdma_ah_read_grh(&qp->remote_ah_attr);
292 guid = get_sguid(ibp, grh->sgid_index);
Don Hiatt90397462017-05-12 09:20:20 -0700293 if (!gid_ok(&packet->grh->dgid, ibp->rvp.gid_prefix,
Dennis Dalessandro4eb06882016-01-19 14:42:39 -0800294 guid))
Don Hiatt90397462017-05-12 09:20:20 -0700295 return 1;
Jubin John17fb4f22016-02-14 20:21:52 -0800296 if (!gid_ok(
Don Hiatt90397462017-05-12 09:20:20 -0700297 &packet->grh->sgid,
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400298 grh->dgid.global.subnet_prefix,
299 grh->dgid.global.interface_id))
Don Hiatt90397462017-05-12 09:20:20 -0700300 return 1;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400301 }
Don Hiatt5786adf32017-08-04 13:54:10 -0700302 if (unlikely(rcv_pkey_check(ppd_from_ibp(ibp), pkey,
Don Hiatt90397462017-05-12 09:20:20 -0700303 sc5, slid))) {
Don Hiatt5786adf32017-08-04 13:54:10 -0700304 hfi1_bad_pkey(ibp, pkey, sl, 0, qp->ibqp.qp_num,
305 slid, dlid);
Don Hiatt90397462017-05-12 09:20:20 -0700306 return 1;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400307 }
308 /* Validate the SLID. See Ch. 9.6.1.5 */
Don Hiatt90397462017-05-12 09:20:20 -0700309 if ((slid != rdma_ah_get_dlid(&qp->remote_ah_attr)) ||
Mike Marciniszyn77241052015-07-30 15:17:43 -0400310 ppd_from_ibp(ibp)->port != qp->port_num)
Don Hiatt90397462017-05-12 09:20:20 -0700311 return 1;
312 if (qp->s_mig_state == IB_MIG_REARM && !migrated)
Mike Marciniszyn77241052015-07-30 15:17:43 -0400313 qp->s_mig_state = IB_MIG_ARMED;
314 }
315
316 return 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400317}
318
319/**
320 * ruc_loopback - handle UC and RC loopback requests
321 * @sqp: the sending QP
322 *
323 * This is called from hfi1_do_send() to
324 * forward a WQE addressed to the same HFI.
Dennis Dalessandroca00c622016-09-25 07:42:08 -0700325 * Note that although we are single threaded due to the send engine, we still
Mike Marciniszyn77241052015-07-30 15:17:43 -0400326 * have to protect against post_send(). We don't have to worry about
327 * receive interrupts since this is a connected protocol and all packets
328 * will pass through here.
329 */
Dennis Dalessandro895420d2016-01-19 14:42:28 -0800330static void ruc_loopback(struct rvt_qp *sqp)
Mike Marciniszyn77241052015-07-30 15:17:43 -0400331{
332 struct hfi1_ibport *ibp = to_iport(sqp->ibqp.device, sqp->port_num);
Dennis Dalessandro895420d2016-01-19 14:42:28 -0800333 struct rvt_qp *qp;
334 struct rvt_swqe *wqe;
335 struct rvt_sge *sge;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400336 unsigned long flags;
337 struct ib_wc wc;
338 u64 sdata;
339 atomic64_t *maddr;
340 enum ib_wc_status send_status;
Brian Welty0128fce2017-02-08 05:27:31 -0800341 bool release;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400342 int ret;
Brian Welty0128fce2017-02-08 05:27:31 -0800343 bool copy_last = false;
Jianxin Xiong0db3dfa2016-07-25 13:38:37 -0700344 int local_ops = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400345
346 rcu_read_lock();
347
348 /*
349 * Note that we check the responder QP state after
350 * checking the requester's state.
351 */
Dennis Dalessandroec4274f2016-01-19 14:43:44 -0800352 qp = rvt_lookup_qpn(ib_to_rvt(sqp->ibqp.device), &ibp->rvp,
353 sqp->remote_qpn);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400354
355 spin_lock_irqsave(&sqp->s_lock, flags);
356
357 /* Return if we are already busy processing a work request. */
Dennis Dalessandro54d10c12016-01-19 14:43:01 -0800358 if ((sqp->s_flags & (RVT_S_BUSY | RVT_S_ANY_WAIT)) ||
Dennis Dalessandro83693bd2016-01-19 14:43:33 -0800359 !(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_OR_FLUSH_SEND))
Mike Marciniszyn77241052015-07-30 15:17:43 -0400360 goto unlock;
361
Dennis Dalessandro54d10c12016-01-19 14:43:01 -0800362 sqp->s_flags |= RVT_S_BUSY;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400363
364again:
Mike Marciniszyn46a80d62016-02-14 12:10:04 -0800365 smp_read_barrier_depends(); /* see post_one_send() */
Mark Rutland6aa7de02017-10-23 14:07:29 -0700366 if (sqp->s_last == READ_ONCE(sqp->s_head))
Mike Marciniszyn77241052015-07-30 15:17:43 -0400367 goto clr_busy;
Dennis Dalessandro83693bd2016-01-19 14:43:33 -0800368 wqe = rvt_get_swqe_ptr(sqp, sqp->s_last);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400369
370 /* Return if it is not OK to start a new work request. */
Dennis Dalessandro83693bd2016-01-19 14:43:33 -0800371 if (!(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_NEXT_SEND_OK)) {
372 if (!(ib_rvt_state_ops[sqp->state] & RVT_FLUSH_SEND))
Mike Marciniszyn77241052015-07-30 15:17:43 -0400373 goto clr_busy;
374 /* We are in the error state, flush the work request. */
375 send_status = IB_WC_WR_FLUSH_ERR;
376 goto flush_send;
377 }
378
379 /*
380 * We can rely on the entry not changing without the s_lock
381 * being held until we update s_last.
382 * We increment s_cur to indicate s_last is in progress.
383 */
384 if (sqp->s_last == sqp->s_cur) {
385 if (++sqp->s_cur >= sqp->s_size)
386 sqp->s_cur = 0;
387 }
388 spin_unlock_irqrestore(&sqp->s_lock, flags);
389
Dennis Dalessandro83693bd2016-01-19 14:43:33 -0800390 if (!qp || !(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) ||
Mike Marciniszyn77241052015-07-30 15:17:43 -0400391 qp->ibqp.qp_type != sqp->ibqp.qp_type) {
Dennis Dalessandro4eb06882016-01-19 14:42:39 -0800392 ibp->rvp.n_pkt_drops++;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400393 /*
394 * For RC, the requester would timeout and retry so
395 * shortcut the timeouts and just signal too many retries.
396 */
397 if (sqp->ibqp.qp_type == IB_QPT_RC)
398 send_status = IB_WC_RETRY_EXC_ERR;
399 else
400 send_status = IB_WC_SUCCESS;
401 goto serr;
402 }
403
404 memset(&wc, 0, sizeof(wc));
405 send_status = IB_WC_SUCCESS;
406
Brian Welty0128fce2017-02-08 05:27:31 -0800407 release = true;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400408 sqp->s_sge.sge = wqe->sg_list[0];
409 sqp->s_sge.sg_list = wqe->sg_list + 1;
410 sqp->s_sge.num_sge = wqe->wr.num_sge;
411 sqp->s_len = wqe->length;
412 switch (wqe->wr.opcode) {
Jianxin Xiong0db3dfa2016-07-25 13:38:37 -0700413 case IB_WR_REG_MR:
Jianxin Xiong0db3dfa2016-07-25 13:38:37 -0700414 goto send_comp;
415
416 case IB_WR_LOCAL_INV:
Jianxin Xiongd9b13c22016-07-25 13:39:45 -0700417 if (!(wqe->wr.send_flags & RVT_SEND_COMPLETION_ONLY)) {
418 if (rvt_invalidate_rkey(sqp,
419 wqe->wr.ex.invalidate_rkey))
420 send_status = IB_WC_LOC_PROT_ERR;
421 local_ops = 1;
422 }
Jianxin Xiong0db3dfa2016-07-25 13:38:37 -0700423 goto send_comp;
424
425 case IB_WR_SEND_WITH_INV:
426 if (!rvt_invalidate_rkey(qp, wqe->wr.ex.invalidate_rkey)) {
427 wc.wc_flags = IB_WC_WITH_INVALIDATE;
428 wc.ex.invalidate_rkey = wqe->wr.ex.invalidate_rkey;
429 }
430 goto send;
431
Mike Marciniszyn77241052015-07-30 15:17:43 -0400432 case IB_WR_SEND_WITH_IMM:
433 wc.wc_flags = IB_WC_WITH_IMM;
434 wc.ex.imm_data = wqe->wr.ex.imm_data;
435 /* FALLTHROUGH */
436 case IB_WR_SEND:
Jianxin Xiong0db3dfa2016-07-25 13:38:37 -0700437send:
Dennis Dalessandroec4274f2016-01-19 14:43:44 -0800438 ret = hfi1_rvt_get_rwqe(qp, 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400439 if (ret < 0)
440 goto op_err;
441 if (!ret)
442 goto rnr_nak;
443 break;
444
445 case IB_WR_RDMA_WRITE_WITH_IMM:
446 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
447 goto inv_err;
448 wc.wc_flags = IB_WC_WITH_IMM;
449 wc.ex.imm_data = wqe->wr.ex.imm_data;
Dennis Dalessandroec4274f2016-01-19 14:43:44 -0800450 ret = hfi1_rvt_get_rwqe(qp, 1);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400451 if (ret < 0)
452 goto op_err;
453 if (!ret)
454 goto rnr_nak;
Dean Luick7b0b01a2016-02-03 14:35:49 -0800455 /* skip copy_last set and qp_access_flags recheck */
456 goto do_write;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400457 case IB_WR_RDMA_WRITE:
Brian Welty0128fce2017-02-08 05:27:31 -0800458 copy_last = rvt_is_user_qp(qp);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400459 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
460 goto inv_err;
Dean Luick7b0b01a2016-02-03 14:35:49 -0800461do_write:
Mike Marciniszyn77241052015-07-30 15:17:43 -0400462 if (wqe->length == 0)
Harish Chegondi42d6ec12016-03-05 08:49:24 -0800463 break;
Dennis Dalessandro895420d2016-01-19 14:42:28 -0800464 if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, wqe->length,
465 wqe->rdma_wr.remote_addr,
466 wqe->rdma_wr.rkey,
467 IB_ACCESS_REMOTE_WRITE)))
Mike Marciniszyn77241052015-07-30 15:17:43 -0400468 goto acc_err;
469 qp->r_sge.sg_list = NULL;
470 qp->r_sge.num_sge = 1;
471 qp->r_sge.total_len = wqe->length;
472 break;
473
474 case IB_WR_RDMA_READ:
475 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ)))
476 goto inv_err;
Dennis Dalessandro895420d2016-01-19 14:42:28 -0800477 if (unlikely(!rvt_rkey_ok(qp, &sqp->s_sge.sge, wqe->length,
478 wqe->rdma_wr.remote_addr,
479 wqe->rdma_wr.rkey,
480 IB_ACCESS_REMOTE_READ)))
Mike Marciniszyn77241052015-07-30 15:17:43 -0400481 goto acc_err;
Brian Welty0128fce2017-02-08 05:27:31 -0800482 release = false;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400483 sqp->s_sge.sg_list = NULL;
484 sqp->s_sge.num_sge = 1;
485 qp->r_sge.sge = wqe->sg_list[0];
486 qp->r_sge.sg_list = wqe->sg_list + 1;
487 qp->r_sge.num_sge = wqe->wr.num_sge;
488 qp->r_sge.total_len = wqe->length;
489 break;
490
491 case IB_WR_ATOMIC_CMP_AND_SWP:
492 case IB_WR_ATOMIC_FETCH_AND_ADD:
493 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC)))
494 goto inv_err;
Dennis Dalessandro895420d2016-01-19 14:42:28 -0800495 if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64),
496 wqe->atomic_wr.remote_addr,
497 wqe->atomic_wr.rkey,
498 IB_ACCESS_REMOTE_ATOMIC)))
Mike Marciniszyn77241052015-07-30 15:17:43 -0400499 goto acc_err;
500 /* Perform atomic OP and save result. */
Jubin John50e5dcb2016-02-14 20:19:41 -0800501 maddr = (atomic64_t *)qp->r_sge.sge.vaddr;
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100502 sdata = wqe->atomic_wr.compare_add;
Jubin John50e5dcb2016-02-14 20:19:41 -0800503 *(u64 *)sqp->s_sge.sge.vaddr =
Mike Marciniszyn77241052015-07-30 15:17:43 -0400504 (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
Jubin John50e5dcb2016-02-14 20:19:41 -0800505 (u64)atomic64_add_return(sdata, maddr) - sdata :
506 (u64)cmpxchg((u64 *)qp->r_sge.sge.vaddr,
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100507 sdata, wqe->atomic_wr.swap);
Dennis Dalessandro895420d2016-01-19 14:42:28 -0800508 rvt_put_mr(qp->r_sge.sge.mr);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400509 qp->r_sge.num_sge = 0;
510 goto send_comp;
511
512 default:
513 send_status = IB_WC_LOC_QP_OP_ERR;
514 goto serr;
515 }
516
517 sge = &sqp->s_sge.sge;
518 while (sqp->s_len) {
519 u32 len = sqp->s_len;
520
521 if (len > sge->length)
522 len = sge->length;
523 if (len > sge->sge_length)
524 len = sge->sge_length;
525 WARN_ON_ONCE(len == 0);
Dean Luick7b0b01a2016-02-03 14:35:49 -0800526 hfi1_copy_sge(&qp->r_sge, sge->vaddr, len, release, copy_last);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400527 sge->vaddr += len;
528 sge->length -= len;
529 sge->sge_length -= len;
530 if (sge->sge_length == 0) {
531 if (!release)
Dennis Dalessandro895420d2016-01-19 14:42:28 -0800532 rvt_put_mr(sge->mr);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400533 if (--sqp->s_sge.num_sge)
534 *sge = *sqp->s_sge.sg_list++;
535 } else if (sge->length == 0 && sge->mr->lkey) {
Dennis Dalessandrocd4ceee2016-01-19 14:41:55 -0800536 if (++sge->n >= RVT_SEGSZ) {
Mike Marciniszyn77241052015-07-30 15:17:43 -0400537 if (++sge->m >= sge->mr->mapsz)
538 break;
539 sge->n = 0;
540 }
541 sge->vaddr =
542 sge->mr->map[sge->m]->segs[sge->n].vaddr;
543 sge->length =
544 sge->mr->map[sge->m]->segs[sge->n].length;
545 }
546 sqp->s_len -= len;
547 }
548 if (release)
Dennis Dalessandroec4274f2016-01-19 14:43:44 -0800549 rvt_put_ss(&qp->r_sge);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400550
Dennis Dalessandro54d10c12016-01-19 14:43:01 -0800551 if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags))
Mike Marciniszyn77241052015-07-30 15:17:43 -0400552 goto send_comp;
553
554 if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM)
555 wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
556 else
557 wc.opcode = IB_WC_RECV;
558 wc.wr_id = qp->r_wr_id;
559 wc.status = IB_WC_SUCCESS;
560 wc.byte_len = wqe->length;
561 wc.qp = &qp->ibqp;
562 wc.src_qp = qp->remote_qpn;
Don Hiattb64581a2017-11-06 06:39:22 -0800563 wc.slid = rdma_ah_get_dlid(&qp->remote_ah_attr) & U16_MAX;
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400564 wc.sl = rdma_ah_get_sl(&qp->remote_ah_attr);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400565 wc.port_num = 1;
566 /* Signal completion event if the solicited bit is set. */
Dennis Dalessandroabd712d2016-01-19 14:43:22 -0800567 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc,
568 wqe->wr.send_flags & IB_SEND_SOLICITED);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400569
570send_comp:
571 spin_lock_irqsave(&sqp->s_lock, flags);
Dennis Dalessandro4eb06882016-01-19 14:42:39 -0800572 ibp->rvp.n_loop_pkts++;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400573flush_send:
574 sqp->s_rnr_retry = sqp->s_rnr_retry_cnt;
575 hfi1_send_complete(sqp, wqe, send_status);
Jianxin Xiong0db3dfa2016-07-25 13:38:37 -0700576 if (local_ops) {
577 atomic_dec(&sqp->local_ops_pending);
578 local_ops = 0;
579 }
Mike Marciniszyn77241052015-07-30 15:17:43 -0400580 goto again;
581
582rnr_nak:
583 /* Handle RNR NAK */
584 if (qp->ibqp.qp_type == IB_QPT_UC)
585 goto send_comp;
Dennis Dalessandro4eb06882016-01-19 14:42:39 -0800586 ibp->rvp.n_rnr_naks++;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400587 /*
588 * Note: we don't need the s_lock held since the BUSY flag
589 * makes this single threaded.
590 */
591 if (sqp->s_rnr_retry == 0) {
592 send_status = IB_WC_RNR_RETRY_EXC_ERR;
593 goto serr;
594 }
595 if (sqp->s_rnr_retry_cnt < 7)
596 sqp->s_rnr_retry--;
597 spin_lock_irqsave(&sqp->s_lock, flags);
Dennis Dalessandro83693bd2016-01-19 14:43:33 -0800598 if (!(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_RECV_OK))
Mike Marciniszyn77241052015-07-30 15:17:43 -0400599 goto clr_busy;
Venkata Sandeep Dhanalakota56acbbf2017-02-08 05:27:19 -0800600 rvt_add_rnr_timer(sqp, qp->r_min_rnr_timer <<
Don Hiatt832666c2017-02-08 05:28:25 -0800601 IB_AETH_CREDIT_SHIFT);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400602 goto clr_busy;
603
604op_err:
605 send_status = IB_WC_REM_OP_ERR;
606 wc.status = IB_WC_LOC_QP_OP_ERR;
607 goto err;
608
609inv_err:
610 send_status = IB_WC_REM_INV_REQ_ERR;
611 wc.status = IB_WC_LOC_QP_OP_ERR;
612 goto err;
613
614acc_err:
615 send_status = IB_WC_REM_ACCESS_ERR;
616 wc.status = IB_WC_LOC_PROT_ERR;
617err:
618 /* responder goes to error state */
Brian Weltybeb5a042017-02-08 05:27:01 -0800619 rvt_rc_error(qp, wc.status);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400620
621serr:
622 spin_lock_irqsave(&sqp->s_lock, flags);
623 hfi1_send_complete(sqp, wqe, send_status);
624 if (sqp->ibqp.qp_type == IB_QPT_RC) {
Dennis Dalessandroec4274f2016-01-19 14:43:44 -0800625 int lastwqe = rvt_error_qp(sqp, IB_WC_WR_FLUSH_ERR);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400626
Dennis Dalessandro54d10c12016-01-19 14:43:01 -0800627 sqp->s_flags &= ~RVT_S_BUSY;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400628 spin_unlock_irqrestore(&sqp->s_lock, flags);
629 if (lastwqe) {
630 struct ib_event ev;
631
632 ev.device = sqp->ibqp.device;
633 ev.element.qp = &sqp->ibqp;
634 ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
635 sqp->ibqp.event_handler(&ev, sqp->ibqp.qp_context);
636 }
637 goto done;
638 }
639clr_busy:
Dennis Dalessandro54d10c12016-01-19 14:43:01 -0800640 sqp->s_flags &= ~RVT_S_BUSY;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400641unlock:
642 spin_unlock_irqrestore(&sqp->s_lock, flags);
643done:
644 rcu_read_unlock();
645}
646
647/**
648 * hfi1_make_grh - construct a GRH header
649 * @ibp: a pointer to the IB port
650 * @hdr: a pointer to the GRH header being constructed
651 * @grh: the global route address to send to
Don Hiatt88733e32017-08-04 13:54:23 -0700652 * @hwords: size of header after grh being sent in dwords
Mike Marciniszyn77241052015-07-30 15:17:43 -0400653 * @nwords: the number of 32 bit words of data being sent
654 *
655 * Return the size of the header in 32 bit words.
656 */
657u32 hfi1_make_grh(struct hfi1_ibport *ibp, struct ib_grh *hdr,
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400658 const struct ib_global_route *grh, u32 hwords, u32 nwords)
Mike Marciniszyn77241052015-07-30 15:17:43 -0400659{
660 hdr->version_tclass_flow =
661 cpu_to_be32((IB_GRH_VERSION << IB_GRH_VERSION_SHIFT) |
662 (grh->traffic_class << IB_GRH_TCLASS_SHIFT) |
663 (grh->flow_label << IB_GRH_FLOW_SHIFT));
Don Hiatt88733e32017-08-04 13:54:23 -0700664 hdr->paylen = cpu_to_be16((hwords + nwords) << 2);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400665 /* next_hdr is defined by C8-7 in ch. 8.4.1 */
666 hdr->next_hdr = IB_GRH_NEXT_HDR;
667 hdr->hop_limit = grh->hop_limit;
668 /* The SGID is 32-bit aligned. */
Dennis Dalessandro4eb06882016-01-19 14:42:39 -0800669 hdr->sgid.global.subnet_prefix = ibp->rvp.gid_prefix;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400670 hdr->sgid.global.interface_id =
Jakub Pawlaka6cd5f02016-10-17 04:19:30 -0700671 grh->sgid_index < HFI1_GUIDS_PER_PORT ?
672 get_sguid(ibp, grh->sgid_index) :
673 get_sguid(ibp, HFI1_PORT_GUID_INDEX);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400674 hdr->dgid = grh->dgid;
675
676 /* GRH header size in 32-bit words. */
677 return sizeof(struct ib_grh) / sizeof(u32);
678}
679
Don Hiatt30e07412017-08-04 13:54:04 -0700680#define BTH2_OFFSET (offsetof(struct hfi1_sdma_header, \
681 hdr.ibh.u.oth.bth[2]) / 4)
Mike Marciniszyn77241052015-07-30 15:17:43 -0400682
683/**
Dasaratharaman Chandramoulia9b6b3b2016-07-25 13:40:16 -0700684 * build_ahg - create ahg in s_ahg
Mike Marciniszyn77241052015-07-30 15:17:43 -0400685 * @qp: a pointer to QP
686 * @npsn: the next PSN for the request/response
687 *
688 * This routine handles the AHG by allocating an ahg entry and causing the
689 * copy of the first middle.
690 *
691 * Subsequent middles use the copied entry, editing the
692 * PSN with 1 or 2 edits.
693 */
Dennis Dalessandro895420d2016-01-19 14:42:28 -0800694static inline void build_ahg(struct rvt_qp *qp, u32 npsn)
Mike Marciniszyn77241052015-07-30 15:17:43 -0400695{
Dennis Dalessandro4c6829c2016-01-19 14:42:00 -0800696 struct hfi1_qp_priv *priv = qp->priv;
Dennis Dalessandrobb5df5f2016-02-14 12:44:43 -0800697
Dennis Dalessandro54d10c12016-01-19 14:43:01 -0800698 if (unlikely(qp->s_flags & RVT_S_AHG_CLEAR))
Mike Marciniszyn77241052015-07-30 15:17:43 -0400699 clear_ahg(qp);
Dennis Dalessandro54d10c12016-01-19 14:43:01 -0800700 if (!(qp->s_flags & RVT_S_AHG_VALID)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -0400701 /* first middle that needs copy */
Mike Marciniszynd7b8ba52015-11-09 19:13:59 -0500702 if (qp->s_ahgidx < 0)
Dennis Dalessandro4c6829c2016-01-19 14:42:00 -0800703 qp->s_ahgidx = sdma_ahg_alloc(priv->s_sde);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400704 if (qp->s_ahgidx >= 0) {
705 qp->s_ahgpsn = npsn;
Dasaratharaman Chandramoulia9b6b3b2016-07-25 13:40:16 -0700706 priv->s_ahg->tx_flags |= SDMA_TXREQ_F_AHG_COPY;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400707 /* save to protect a change in another thread */
Dasaratharaman Chandramoulia9b6b3b2016-07-25 13:40:16 -0700708 priv->s_ahg->ahgidx = qp->s_ahgidx;
Dennis Dalessandro54d10c12016-01-19 14:43:01 -0800709 qp->s_flags |= RVT_S_AHG_VALID;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400710 }
711 } else {
712 /* subsequent middle after valid */
713 if (qp->s_ahgidx >= 0) {
Dasaratharaman Chandramoulia9b6b3b2016-07-25 13:40:16 -0700714 priv->s_ahg->tx_flags |= SDMA_TXREQ_F_USE_AHG;
715 priv->s_ahg->ahgidx = qp->s_ahgidx;
716 priv->s_ahg->ahgcount++;
717 priv->s_ahg->ahgdesc[0] =
Mike Marciniszyn77241052015-07-30 15:17:43 -0400718 sdma_build_ahg_descriptor(
719 (__force u16)cpu_to_be16((u16)npsn),
720 BTH2_OFFSET,
721 16,
722 16);
723 if ((npsn & 0xffff0000) !=
724 (qp->s_ahgpsn & 0xffff0000)) {
Dasaratharaman Chandramoulia9b6b3b2016-07-25 13:40:16 -0700725 priv->s_ahg->ahgcount++;
726 priv->s_ahg->ahgdesc[1] =
Mike Marciniszyn77241052015-07-30 15:17:43 -0400727 sdma_build_ahg_descriptor(
728 (__force u16)cpu_to_be16(
729 (u16)(npsn >> 16)),
730 BTH2_OFFSET,
731 0,
732 16);
733 }
734 }
735 }
736}
737
Don Hiatt5b6cabb2017-08-04 13:54:41 -0700738static inline void hfi1_make_ruc_bth(struct rvt_qp *qp,
739 struct ib_other_headers *ohdr,
740 u32 bth0, u32 bth1, u32 bth2)
741{
742 bth1 |= qp->remote_qpn;
743 ohdr->bth[0] = cpu_to_be32(bth0);
744 ohdr->bth[1] = cpu_to_be32(bth1);
745 ohdr->bth[2] = cpu_to_be32(bth2);
746}
747
748static inline void hfi1_make_ruc_header_16B(struct rvt_qp *qp,
749 struct ib_other_headers *ohdr,
750 u32 bth0, u32 bth2, int middle,
751 struct hfi1_pkt_state *ps)
752{
753 struct hfi1_qp_priv *priv = qp->priv;
754 struct hfi1_ibport *ibp = ps->ibp;
755 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
756 u32 bth1 = 0;
757 u32 slid;
758 u16 pkey = hfi1_get_pkey(ibp, qp->s_pkey_index);
759 u8 l4 = OPA_16B_L4_IB_LOCAL;
Mitko Haralanov96362582018-02-01 10:46:07 -0800760 u8 extra_bytes = hfi1_get_16b_padding(
761 (ps->s_txreq->hdr_dwords << 2),
762 ps->s_txreq->s_cur_size);
Don Hiatt5b6cabb2017-08-04 13:54:41 -0700763 u32 nwords = SIZE_OF_CRC + ((ps->s_txreq->s_cur_size +
764 extra_bytes + SIZE_OF_LT) >> 2);
765 u8 becn = 0;
766
767 if (unlikely(rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH) &&
768 hfi1_check_mcast(rdma_ah_get_dlid(&qp->remote_ah_attr))) {
769 struct ib_grh *grh;
770 struct ib_global_route *grd =
771 rdma_ah_retrieve_grh(&qp->remote_ah_attr);
772 int hdrwords;
773
774 /*
775 * Ensure OPA GIDs are transformed to IB gids
776 * before creating the GRH.
777 */
778 if (grd->sgid_index == OPA_GID_INDEX)
779 grd->sgid_index = 0;
780 grh = &ps->s_txreq->phdr.hdr.opah.u.l.grh;
781 l4 = OPA_16B_L4_IB_GLOBAL;
Mitko Haralanov96362582018-02-01 10:46:07 -0800782 hdrwords = ps->s_txreq->hdr_dwords - 4;
783 ps->s_txreq->hdr_dwords += hfi1_make_grh(ibp, grh, grd,
784 hdrwords, nwords);
Don Hiatt5b6cabb2017-08-04 13:54:41 -0700785 middle = 0;
786 }
787
788 if (qp->s_mig_state == IB_MIG_MIGRATED)
789 bth1 |= OPA_BTH_MIG_REQ;
790 else
791 middle = 0;
792
793 if (middle)
794 build_ahg(qp, bth2);
795 else
796 qp->s_flags &= ~RVT_S_AHG_VALID;
797
798 bth0 |= pkey;
799 bth0 |= extra_bytes << 20;
800 if (qp->s_flags & RVT_S_ECN) {
801 qp->s_flags &= ~RVT_S_ECN;
802 /* we recently received a FECN, so return a BECN */
803 becn = 1;
804 }
805 hfi1_make_ruc_bth(qp, ohdr, bth0, bth1, bth2);
806
807 if (!ppd->lid)
808 slid = be32_to_cpu(OPA_LID_PERMISSIVE);
809 else
810 slid = ppd->lid |
811 (rdma_ah_get_path_bits(&qp->remote_ah_attr) &
812 ((1 << ppd->lmc) - 1));
813
814 hfi1_make_16b_hdr(&ps->s_txreq->phdr.hdr.opah,
815 slid,
816 opa_get_lid(rdma_ah_get_dlid(&qp->remote_ah_attr),
817 16B),
Mitko Haralanov96362582018-02-01 10:46:07 -0800818 (ps->s_txreq->hdr_dwords + nwords) >> 1,
Don Hiatt5b6cabb2017-08-04 13:54:41 -0700819 pkey, becn, 0, l4, priv->s_sc);
820}
821
822static inline void hfi1_make_ruc_header_9B(struct rvt_qp *qp,
823 struct ib_other_headers *ohdr,
824 u32 bth0, u32 bth2, int middle,
825 struct hfi1_pkt_state *ps)
826{
827 struct hfi1_qp_priv *priv = qp->priv;
828 struct hfi1_ibport *ibp = ps->ibp;
Don Hiatt5b6cabb2017-08-04 13:54:41 -0700829 u32 bth1 = 0;
830 u16 pkey = hfi1_get_pkey(ibp, qp->s_pkey_index);
831 u16 lrh0 = HFI1_LRH_BTH;
Don Hiatt5b6cabb2017-08-04 13:54:41 -0700832 u8 extra_bytes = -ps->s_txreq->s_cur_size & 3;
833 u32 nwords = SIZE_OF_CRC + ((ps->s_txreq->s_cur_size +
834 extra_bytes) >> 2);
835
836 if (unlikely(rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH)) {
837 struct ib_grh *grh = &ps->s_txreq->phdr.hdr.ibh.u.l.grh;
Mitko Haralanov96362582018-02-01 10:46:07 -0800838 int hdrwords = ps->s_txreq->hdr_dwords - 2;
Don Hiatt5b6cabb2017-08-04 13:54:41 -0700839
840 lrh0 = HFI1_LRH_GRH;
Mitko Haralanov96362582018-02-01 10:46:07 -0800841 ps->s_txreq->hdr_dwords +=
Don Hiatt5b6cabb2017-08-04 13:54:41 -0700842 hfi1_make_grh(ibp, grh,
843 rdma_ah_read_grh(&qp->remote_ah_attr),
844 hdrwords, nwords);
845 middle = 0;
846 }
847 lrh0 |= (priv->s_sc & 0xf) << 12 |
848 (rdma_ah_get_sl(&qp->remote_ah_attr) & 0xf) << 4;
849
850 if (qp->s_mig_state == IB_MIG_MIGRATED)
851 bth0 |= IB_BTH_MIG_REQ;
852 else
853 middle = 0;
854
855 if (middle)
856 build_ahg(qp, bth2);
857 else
858 qp->s_flags &= ~RVT_S_AHG_VALID;
859
860 bth0 |= pkey;
861 bth0 |= extra_bytes << 20;
862 if (qp->s_flags & RVT_S_ECN) {
863 qp->s_flags &= ~RVT_S_ECN;
864 /* we recently received a FECN, so return a BECN */
865 bth1 |= (IB_BECN_MASK << IB_BECN_SHIFT);
866 }
867 hfi1_make_ruc_bth(qp, ohdr, bth0, bth1, bth2);
Don Hiatt5b6cabb2017-08-04 13:54:41 -0700868 hfi1_make_ib_hdr(&ps->s_txreq->phdr.hdr.ibh,
869 lrh0,
Mitko Haralanov96362582018-02-01 10:46:07 -0800870 ps->s_txreq->hdr_dwords + nwords,
Don Hiatt5b6cabb2017-08-04 13:54:41 -0700871 opa_get_lid(rdma_ah_get_dlid(&qp->remote_ah_attr), 9B),
872 ppd_from_ibp(ibp)->lid |
873 rdma_ah_get_path_bits(&qp->remote_ah_attr));
874}
875
876typedef void (*hfi1_make_ruc_hdr)(struct rvt_qp *qp,
877 struct ib_other_headers *ohdr,
878 u32 bth0, u32 bth2, int middle,
879 struct hfi1_pkt_state *ps);
880
881/* We support only two types - 9B and 16B for now */
882static const hfi1_make_ruc_hdr hfi1_ruc_header_tbl[2] = {
883 [HFI1_PKT_TYPE_9B] = &hfi1_make_ruc_header_9B,
884 [HFI1_PKT_TYPE_16B] = &hfi1_make_ruc_header_16B
885};
886
Mike Marciniszyn261a4352016-09-06 04:35:05 -0700887void hfi1_make_ruc_header(struct rvt_qp *qp, struct ib_other_headers *ohdr,
Dennis Dalessandrobb5df5f2016-02-14 12:44:43 -0800888 u32 bth0, u32 bth2, int middle,
889 struct hfi1_pkt_state *ps)
Mike Marciniszyn77241052015-07-30 15:17:43 -0400890{
Dennis Dalessandro4c6829c2016-01-19 14:42:00 -0800891 struct hfi1_qp_priv *priv = qp->priv;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400892
Mike Marciniszyn77241052015-07-30 15:17:43 -0400893 /*
Dasaratharaman Chandramoulia9b6b3b2016-07-25 13:40:16 -0700894 * reset s_ahg/AHG fields
Mike Marciniszyn77241052015-07-30 15:17:43 -0400895 *
896 * This insures that the ahgentry/ahgcount
897 * are at a non-AHG default to protect
898 * build_verbs_tx_desc() from using
899 * an include ahgidx.
900 *
901 * build_ahg() will modify as appropriate
902 * to use the AHG feature.
903 */
Dasaratharaman Chandramoulia9b6b3b2016-07-25 13:40:16 -0700904 priv->s_ahg->tx_flags = 0;
905 priv->s_ahg->ahgcount = 0;
906 priv->s_ahg->ahgidx = 0;
Don Hiatt5b6cabb2017-08-04 13:54:41 -0700907
908 /* Make the appropriate header */
909 hfi1_ruc_header_tbl[priv->hdr_type](qp, ohdr, bth0, bth2, middle, ps);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400910}
911
Dean Luickb4219222015-10-26 10:28:35 -0400912/* when sending, force a reschedule every one of these periods */
913#define SEND_RESCHED_TIMEOUT (5 * HZ) /* 5s in jiffies */
914
Mike Marciniszyndd1ed102017-05-04 05:14:10 -0700915/**
916 * schedule_send_yield - test for a yield required for QP send engine
917 * @timeout: Final time for timeout slice for jiffies
918 * @qp: a pointer to QP
919 * @ps: a pointer to a structure with commonly lookup values for
920 * the the send engine progress
921 *
922 * This routine checks if the time slice for the QP has expired
923 * for RC QPs, if so an additional work entry is queued. At this
924 * point, other QPs have an opportunity to be scheduled. It
925 * returns true if a yield is required, otherwise, false
926 * is returned.
927 */
928static bool schedule_send_yield(struct rvt_qp *qp,
929 struct hfi1_pkt_state *ps)
930{
Kaike Wanbcad2912017-07-24 07:45:37 -0700931 ps->pkts_sent = true;
932
Mike Marciniszyndd1ed102017-05-04 05:14:10 -0700933 if (unlikely(time_after(jiffies, ps->timeout))) {
934 if (!ps->in_thread ||
935 workqueue_congested(ps->cpu, ps->ppd->hfi1_wq)) {
936 spin_lock_irqsave(&qp->s_lock, ps->flags);
937 qp->s_flags &= ~RVT_S_BUSY;
938 hfi1_schedule_send(qp);
939 spin_unlock_irqrestore(&qp->s_lock, ps->flags);
940 this_cpu_inc(*ps->ppd->dd->send_schedule);
941 trace_hfi1_rc_expired_time_slice(qp, true);
942 return true;
943 }
944
945 cond_resched();
946 this_cpu_inc(*ps->ppd->dd->send_schedule);
947 ps->timeout = jiffies + ps->timeout_int;
948 }
949
950 trace_hfi1_rc_expired_time_slice(qp, false);
951 return false;
952}
953
Mike Marciniszynb6eac932017-04-09 10:16:35 -0700954void hfi1_do_send_from_rvt(struct rvt_qp *qp)
955{
956 hfi1_do_send(qp, false);
957}
958
Dennis Dalessandro83693bd2016-01-19 14:43:33 -0800959void _hfi1_do_send(struct work_struct *work)
960{
961 struct iowait *wait = container_of(work, struct iowait, iowork);
962 struct rvt_qp *qp = iowait_to_qp(wait);
963
Mike Marciniszynb6eac932017-04-09 10:16:35 -0700964 hfi1_do_send(qp, true);
Dennis Dalessandro83693bd2016-01-19 14:43:33 -0800965}
966
Mike Marciniszyn77241052015-07-30 15:17:43 -0400967/**
968 * hfi1_do_send - perform a send on a QP
969 * @work: contains a pointer to the QP
Mike Marciniszynb6eac932017-04-09 10:16:35 -0700970 * @in_thread: true if in a workqueue thread
Mike Marciniszyn77241052015-07-30 15:17:43 -0400971 *
972 * Process entries in the send work queue until credit or queue is
Dennis Dalessandroca00c622016-09-25 07:42:08 -0700973 * exhausted. Only allow one CPU to send a packet per QP.
Mike Marciniszyn77241052015-07-30 15:17:43 -0400974 * Otherwise, two threads could send packets out of order.
975 */
Mike Marciniszynb6eac932017-04-09 10:16:35 -0700976void hfi1_do_send(struct rvt_qp *qp, bool in_thread)
Mike Marciniszyn77241052015-07-30 15:17:43 -0400977{
Dennis Dalessandrod46e5142015-11-11 00:34:37 -0500978 struct hfi1_pkt_state ps;
Vennila Megavannan23cd4712016-02-03 14:34:23 -0800979 struct hfi1_qp_priv *priv = qp->priv;
Dennis Dalessandrobb5df5f2016-02-14 12:44:43 -0800980 int (*make_req)(struct rvt_qp *qp, struct hfi1_pkt_state *ps);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400981
Dennis Dalessandrod46e5142015-11-11 00:34:37 -0500982 ps.dev = to_idev(qp->ibqp.device);
983 ps.ibp = to_iport(qp->ibqp.device, qp->port_num);
984 ps.ppd = ppd_from_ibp(ps.ibp);
Mike Marciniszyndd1ed102017-05-04 05:14:10 -0700985 ps.in_thread = in_thread;
986
987 trace_hfi1_rc_do_send(qp, in_thread);
Dennis Dalessandrod46e5142015-11-11 00:34:37 -0500988
Vennila Megavannan23cd4712016-02-03 14:34:23 -0800989 switch (qp->ibqp.qp_type) {
990 case IB_QPT_RC:
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400991 if (!loopback && ((rdma_ah_get_dlid(&qp->remote_ah_attr) &
992 ~((1 << ps.ppd->lmc) - 1)) ==
993 ps.ppd->lid)) {
Vennila Megavannan23cd4712016-02-03 14:34:23 -0800994 ruc_loopback(qp);
995 return;
996 }
Mike Marciniszyn77241052015-07-30 15:17:43 -0400997 make_req = hfi1_make_rc_req;
Mike Marciniszyndd1ed102017-05-04 05:14:10 -0700998 ps.timeout_int = qp->timeout_jiffies;
Vennila Megavannan23cd4712016-02-03 14:34:23 -0800999 break;
1000 case IB_QPT_UC:
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04001001 if (!loopback && ((rdma_ah_get_dlid(&qp->remote_ah_attr) &
1002 ~((1 << ps.ppd->lmc) - 1)) ==
1003 ps.ppd->lid)) {
Vennila Megavannan23cd4712016-02-03 14:34:23 -08001004 ruc_loopback(qp);
1005 return;
1006 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04001007 make_req = hfi1_make_uc_req;
Mike Marciniszyndd1ed102017-05-04 05:14:10 -07001008 ps.timeout_int = SEND_RESCHED_TIMEOUT;
Vennila Megavannan23cd4712016-02-03 14:34:23 -08001009 break;
1010 default:
Mike Marciniszyn77241052015-07-30 15:17:43 -04001011 make_req = hfi1_make_ud_req;
Mike Marciniszyndd1ed102017-05-04 05:14:10 -07001012 ps.timeout_int = SEND_RESCHED_TIMEOUT;
Vennila Megavannan23cd4712016-02-03 14:34:23 -08001013 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04001014
Mike Marciniszyn747f4d72016-04-12 10:46:10 -07001015 spin_lock_irqsave(&qp->s_lock, ps.flags);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001016
1017 /* Return if we are already busy processing a work request. */
1018 if (!hfi1_send_ok(qp)) {
Mike Marciniszyn747f4d72016-04-12 10:46:10 -07001019 spin_unlock_irqrestore(&qp->s_lock, ps.flags);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001020 return;
1021 }
1022
Dennis Dalessandro54d10c12016-01-19 14:43:01 -08001023 qp->s_flags |= RVT_S_BUSY;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001024
Mike Marciniszyndd1ed102017-05-04 05:14:10 -07001025 ps.timeout_int = ps.timeout_int / 8;
1026 ps.timeout = jiffies + ps.timeout_int;
1027 ps.cpu = priv->s_sde ? priv->s_sde->cpu :
Vennila Megavannan23cd4712016-02-03 14:34:23 -08001028 cpumask_first(cpumask_of_node(ps.ppd->dd->node));
Kaike Wanbcad2912017-07-24 07:45:37 -07001029 ps.pkts_sent = false;
Mike Marciniszyndd1ed102017-05-04 05:14:10 -07001030
Mike Marciniszyn711e1042016-02-14 12:45:18 -08001031 /* insure a pre-built packet is handled */
1032 ps.s_txreq = get_waiting_verbs_txreq(qp);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001033 do {
1034 /* Check for a constructed packet to be sent. */
Mitko Haralanov96362582018-02-01 10:46:07 -08001035 if (ps.s_txreq) {
Mike Marciniszyn747f4d72016-04-12 10:46:10 -07001036 spin_unlock_irqrestore(&qp->s_lock, ps.flags);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001037 /*
1038 * If the packet cannot be sent now, return and
Dennis Dalessandroca00c622016-09-25 07:42:08 -07001039 * the send engine will be woken up later.
Mike Marciniszyn77241052015-07-30 15:17:43 -04001040 */
Dennis Dalessandrod46e5142015-11-11 00:34:37 -05001041 if (hfi1_verbs_send(qp, &ps))
Mike Marciniszyn46a80d62016-02-14 12:10:04 -08001042 return;
Mike Marciniszyn46a80d62016-02-14 12:10:04 -08001043 /* allow other tasks to run */
Mike Marciniszyndd1ed102017-05-04 05:14:10 -07001044 if (schedule_send_yield(qp, &ps))
1045 return;
1046
Mike Marciniszyn747f4d72016-04-12 10:46:10 -07001047 spin_lock_irqsave(&qp->s_lock, ps.flags);
Dean Luickb4219222015-10-26 10:28:35 -04001048 }
Dennis Dalessandrobb5df5f2016-02-14 12:44:43 -08001049 } while (make_req(qp, &ps));
Kaike Wanbcad2912017-07-24 07:45:37 -07001050 iowait_starve_clear(ps.pkts_sent, &priv->s_iowait);
Mike Marciniszyn747f4d72016-04-12 10:46:10 -07001051 spin_unlock_irqrestore(&qp->s_lock, ps.flags);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001052}
1053
1054/*
1055 * This should be called with s_lock held.
1056 */
Dennis Dalessandro895420d2016-01-19 14:42:28 -08001057void hfi1_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe,
Mike Marciniszyn77241052015-07-30 15:17:43 -04001058 enum ib_wc_status status)
1059{
1060 u32 old_last, last;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001061
Dennis Dalessandro83693bd2016-01-19 14:43:33 -08001062 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_OR_FLUSH_SEND))
Mike Marciniszyn77241052015-07-30 15:17:43 -04001063 return;
1064
Mike Marciniszyn6c2ab0b2016-02-04 11:03:19 -08001065 last = qp->s_last;
1066 old_last = last;
Mike Marciniszyn9260b352017-03-20 17:25:23 -07001067 trace_hfi1_qp_send_completion(qp, wqe, last);
Mike Marciniszyn6c2ab0b2016-02-04 11:03:19 -08001068 if (++last >= qp->s_size)
1069 last = 0;
Mike Marciniszyn9260b352017-03-20 17:25:23 -07001070 trace_hfi1_qp_send_completion(qp, wqe, last);
Mike Marciniszyn6c2ab0b2016-02-04 11:03:19 -08001071 qp->s_last = last;
1072 /* See post_send() */
1073 barrier();
Mike Marciniszync64607a2016-12-07 19:34:31 -08001074 rvt_put_swqe(wqe);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001075 if (qp->ibqp.qp_type == IB_QPT_UD ||
1076 qp->ibqp.qp_type == IB_QPT_SMI ||
1077 qp->ibqp.qp_type == IB_QPT_GSI)
Dennis Dalessandro15723f02016-01-19 14:42:17 -08001078 atomic_dec(&ibah_to_rvtah(wqe->ud_wr.ah)->refcount);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001079
Mike Marciniszyn43a474a2017-03-20 17:25:04 -07001080 rvt_qp_swqe_complete(qp,
1081 wqe,
1082 ib_hfi1_wc_opcode[wqe->wr.opcode],
1083 status);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001084
Mike Marciniszyn77241052015-07-30 15:17:43 -04001085 if (qp->s_acked == old_last)
1086 qp->s_acked = last;
1087 if (qp->s_cur == old_last)
1088 qp->s_cur = last;
1089 if (qp->s_tail == old_last)
1090 qp->s_tail = last;
1091 if (qp->state == IB_QPS_SQD && last == qp->s_cur)
1092 qp->s_draining = 0;
1093}