blob: 9cf506a9a79691417c2e13ae5d84642be5dbef04 [file] [log] [blame]
Mike Marciniszyn77241052015-07-30 15:17:43 -04001/*
Mike Marciniszynb6eac932017-04-09 10:16:35 -07002 * Copyright(c) 2015 - 2017 Intel Corporation.
Mike Marciniszyn77241052015-07-30 15:17:43 -04003 *
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
6 *
7 * GPL LICENSE SUMMARY
8 *
Mike Marciniszyn77241052015-07-30 15:17:43 -04009 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * BSD LICENSE
19 *
Mike Marciniszyn77241052015-07-30 15:17:43 -040020 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
22 * are met:
23 *
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
29 * distribution.
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
33 *
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45 *
46 */
47
48#include <linux/spinlock.h>
49
50#include "hfi.h"
51#include "mad.h"
52#include "qp.h"
Mike Marciniszyn45842ab2016-02-14 12:44:34 -080053#include "verbs_txreq.h"
Dennis Dalessandrobb5df5f2016-02-14 12:44:43 -080054#include "trace.h"
Mike Marciniszyn77241052015-07-30 15:17:43 -040055
56/*
Mike Marciniszyn77241052015-07-30 15:17:43 -040057 * Validate a RWQE and fill in the SGE state.
58 * Return 1 if OK.
59 */
Dennis Dalessandro895420d2016-01-19 14:42:28 -080060static int init_sge(struct rvt_qp *qp, struct rvt_rwqe *wqe)
Mike Marciniszyn77241052015-07-30 15:17:43 -040061{
62 int i, j, ret;
63 struct ib_wc wc;
Dennis Dalessandrocd4ceee2016-01-19 14:41:55 -080064 struct rvt_lkey_table *rkt;
Dennis Dalessandro4f87ccf2016-01-19 14:41:50 -080065 struct rvt_pd *pd;
Dennis Dalessandro895420d2016-01-19 14:42:28 -080066 struct rvt_sge_state *ss;
Mike Marciniszyn77241052015-07-30 15:17:43 -040067
Dennis Dalessandro895420d2016-01-19 14:42:28 -080068 rkt = &to_idev(qp->ibqp.device)->rdi.lkey_table;
Dennis Dalessandro4f87ccf2016-01-19 14:41:50 -080069 pd = ibpd_to_rvtpd(qp->ibqp.srq ? qp->ibqp.srq->pd : qp->ibqp.pd);
Mike Marciniszyn77241052015-07-30 15:17:43 -040070 ss = &qp->r_sge;
71 ss->sg_list = qp->r_sg_list;
72 qp->r_len = 0;
73 for (i = j = 0; i < wqe->num_sge; i++) {
74 if (wqe->sg_list[i].length == 0)
75 continue;
76 /* Check LKEY */
Dennis Dalessandro895420d2016-01-19 14:42:28 -080077 if (!rvt_lkey_ok(rkt, pd, j ? &ss->sg_list[j - 1] : &ss->sge,
Mike Marciniszyn14fe13f2017-05-12 09:20:31 -070078 NULL, &wqe->sg_list[i], IB_ACCESS_LOCAL_WRITE))
Mike Marciniszyn77241052015-07-30 15:17:43 -040079 goto bad_lkey;
80 qp->r_len += wqe->sg_list[i].length;
81 j++;
82 }
83 ss->num_sge = j;
84 ss->total_len = qp->r_len;
85 ret = 1;
86 goto bail;
87
88bad_lkey:
89 while (j) {
Dennis Dalessandro895420d2016-01-19 14:42:28 -080090 struct rvt_sge *sge = --j ? &ss->sg_list[j - 1] : &ss->sge;
Mike Marciniszyn77241052015-07-30 15:17:43 -040091
Dennis Dalessandro895420d2016-01-19 14:42:28 -080092 rvt_put_mr(sge->mr);
Mike Marciniszyn77241052015-07-30 15:17:43 -040093 }
94 ss->num_sge = 0;
95 memset(&wc, 0, sizeof(wc));
96 wc.wr_id = wqe->wr_id;
97 wc.status = IB_WC_LOC_PROT_ERR;
98 wc.opcode = IB_WC_RECV;
99 wc.qp = &qp->ibqp;
100 /* Signal solicited completion event. */
Dennis Dalessandroabd712d2016-01-19 14:43:22 -0800101 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400102 ret = 0;
103bail:
104 return ret;
105}
106
107/**
Dennis Dalessandroec4274f2016-01-19 14:43:44 -0800108 * hfi1_rvt_get_rwqe - copy the next RWQE into the QP's RWQE
Mike Marciniszyn77241052015-07-30 15:17:43 -0400109 * @qp: the QP
110 * @wr_id_only: update qp->r_wr_id only, not qp->r_sge
111 *
112 * Return -1 if there is a local error, 0 if no RWQE is available,
113 * otherwise return 1.
114 *
115 * Can be called from interrupt level.
116 */
Dennis Dalessandroec4274f2016-01-19 14:43:44 -0800117int hfi1_rvt_get_rwqe(struct rvt_qp *qp, int wr_id_only)
Mike Marciniszyn77241052015-07-30 15:17:43 -0400118{
119 unsigned long flags;
Dennis Dalessandro895420d2016-01-19 14:42:28 -0800120 struct rvt_rq *rq;
121 struct rvt_rwq *wq;
Dennis Dalessandro39db3e62016-01-19 14:42:33 -0800122 struct rvt_srq *srq;
Dennis Dalessandro895420d2016-01-19 14:42:28 -0800123 struct rvt_rwqe *wqe;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400124 void (*handler)(struct ib_event *, void *);
125 u32 tail;
126 int ret;
127
128 if (qp->ibqp.srq) {
Dennis Dalessandro39db3e62016-01-19 14:42:33 -0800129 srq = ibsrq_to_rvtsrq(qp->ibqp.srq);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400130 handler = srq->ibsrq.event_handler;
131 rq = &srq->rq;
132 } else {
133 srq = NULL;
134 handler = NULL;
135 rq = &qp->r_rq;
136 }
137
138 spin_lock_irqsave(&rq->lock, flags);
Dennis Dalessandro83693bd2016-01-19 14:43:33 -0800139 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -0400140 ret = 0;
141 goto unlock;
142 }
143
144 wq = rq->wq;
145 tail = wq->tail;
146 /* Validate tail before using it since it is user writable. */
147 if (tail >= rq->size)
148 tail = 0;
149 if (unlikely(tail == wq->head)) {
150 ret = 0;
151 goto unlock;
152 }
153 /* Make sure entry is read after head index is read. */
154 smp_rmb();
Dennis Dalessandroec4274f2016-01-19 14:43:44 -0800155 wqe = rvt_get_rwqe_ptr(rq, tail);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400156 /*
157 * Even though we update the tail index in memory, the verbs
158 * consumer is not supposed to post more entries until a
159 * completion is generated.
160 */
161 if (++tail >= rq->size)
162 tail = 0;
163 wq->tail = tail;
164 if (!wr_id_only && !init_sge(qp, wqe)) {
165 ret = -1;
166 goto unlock;
167 }
168 qp->r_wr_id = wqe->wr_id;
169
170 ret = 1;
Dennis Dalessandro54d10c12016-01-19 14:43:01 -0800171 set_bit(RVT_R_WRID_VALID, &qp->r_aflags);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400172 if (handler) {
173 u32 n;
174
175 /*
176 * Validate head pointer value and compute
177 * the number of remaining WQEs.
178 */
179 n = wq->head;
180 if (n >= rq->size)
181 n = 0;
182 if (n < tail)
183 n += rq->size - tail;
184 else
185 n -= tail;
186 if (n < srq->limit) {
187 struct ib_event ev;
188
189 srq->limit = 0;
190 spin_unlock_irqrestore(&rq->lock, flags);
191 ev.device = qp->ibqp.device;
192 ev.element.srq = qp->ibqp.srq;
193 ev.event = IB_EVENT_SRQ_LIMIT_REACHED;
194 handler(&ev, srq->ibsrq.srq_context);
195 goto bail;
196 }
197 }
198unlock:
199 spin_unlock_irqrestore(&rq->lock, flags);
200bail:
201 return ret;
202}
203
Mike Marciniszyn77241052015-07-30 15:17:43 -0400204static int gid_ok(union ib_gid *gid, __be64 gid_prefix, __be64 id)
205{
206 return (gid->global.interface_id == id &&
207 (gid->global.subnet_prefix == gid_prefix ||
208 gid->global.subnet_prefix == IB_DEFAULT_GID_PREFIX));
209}
210
211/*
212 *
213 * This should be called with the QP r_lock held.
214 *
215 * The s_lock will be acquired around the hfi1_migrate_qp() call.
216 */
Don Hiatt90397462017-05-12 09:20:20 -0700217int hfi1_ruc_check_hdr(struct hfi1_ibport *ibp, struct hfi1_packet *packet)
Mike Marciniszyn77241052015-07-30 15:17:43 -0400218{
219 __be64 guid;
220 unsigned long flags;
Don Hiatt90397462017-05-12 09:20:20 -0700221 struct rvt_qp *qp = packet->qp;
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400222 u8 sc5 = ibp->sl_to_sc[rdma_ah_get_sl(&qp->remote_ah_attr)];
Don Hiatt90397462017-05-12 09:20:20 -0700223 u32 dlid = packet->dlid;
224 u32 slid = packet->slid;
225 u32 sl = packet->sl;
226 int migrated;
227 u32 bth0, bth1;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400228
Don Hiatt90397462017-05-12 09:20:20 -0700229 bth0 = be32_to_cpu(packet->ohdr->bth[0]);
230 bth1 = be32_to_cpu(packet->ohdr->bth[1]);
231 migrated = bth0 & IB_BTH_MIG_REQ;
232
233 if (qp->s_mig_state == IB_MIG_ARMED && migrated) {
234 if (!packet->grh) {
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400235 if (rdma_ah_get_ah_flags(&qp->alt_ah_attr) &
236 IB_AH_GRH)
Don Hiatt90397462017-05-12 09:20:20 -0700237 return 1;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400238 } else {
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400239 const struct ib_global_route *grh;
240
241 if (!(rdma_ah_get_ah_flags(&qp->alt_ah_attr) &
242 IB_AH_GRH))
Don Hiatt90397462017-05-12 09:20:20 -0700243 return 1;
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400244 grh = rdma_ah_read_grh(&qp->alt_ah_attr);
245 guid = get_sguid(ibp, grh->sgid_index);
Don Hiatt90397462017-05-12 09:20:20 -0700246 if (!gid_ok(&packet->grh->dgid, ibp->rvp.gid_prefix,
Dennis Dalessandro4eb06882016-01-19 14:42:39 -0800247 guid))
Don Hiatt90397462017-05-12 09:20:20 -0700248 return 1;
Jubin John17fb4f22016-02-14 20:21:52 -0800249 if (!gid_ok(
Don Hiatt90397462017-05-12 09:20:20 -0700250 &packet->grh->sgid,
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400251 grh->dgid.global.subnet_prefix,
252 grh->dgid.global.interface_id))
Don Hiatt90397462017-05-12 09:20:20 -0700253 return 1;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400254 }
Don Hiatt90397462017-05-12 09:20:20 -0700255 if (unlikely(rcv_pkey_check(ppd_from_ibp(ibp), (u16)bth0,
256 sc5, slid))) {
Dennis Dalessandro13d84912017-05-29 17:22:01 -0700257 hfi1_bad_pkey(ibp, (u16)bth0, sl,
258 0, qp->ibqp.qp_num, slid, dlid);
Don Hiatt90397462017-05-12 09:20:20 -0700259 return 1;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400260 }
261 /* Validate the SLID. See Ch. 9.6.1.5 and 17.2.8 */
Don Hiatt90397462017-05-12 09:20:20 -0700262 if (slid != rdma_ah_get_dlid(&qp->alt_ah_attr) ||
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400263 ppd_from_ibp(ibp)->port !=
264 rdma_ah_get_port_num(&qp->alt_ah_attr))
Don Hiatt90397462017-05-12 09:20:20 -0700265 return 1;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400266 spin_lock_irqsave(&qp->s_lock, flags);
267 hfi1_migrate_qp(qp);
268 spin_unlock_irqrestore(&qp->s_lock, flags);
269 } else {
Don Hiatt90397462017-05-12 09:20:20 -0700270 if (!packet->grh) {
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400271 if (rdma_ah_get_ah_flags(&qp->remote_ah_attr) &
272 IB_AH_GRH)
Don Hiatt90397462017-05-12 09:20:20 -0700273 return 1;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400274 } else {
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400275 const struct ib_global_route *grh;
276
277 if (!(rdma_ah_get_ah_flags(&qp->remote_ah_attr) &
278 IB_AH_GRH))
Don Hiatt90397462017-05-12 09:20:20 -0700279 return 1;
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400280 grh = rdma_ah_read_grh(&qp->remote_ah_attr);
281 guid = get_sguid(ibp, grh->sgid_index);
Don Hiatt90397462017-05-12 09:20:20 -0700282 if (!gid_ok(&packet->grh->dgid, ibp->rvp.gid_prefix,
Dennis Dalessandro4eb06882016-01-19 14:42:39 -0800283 guid))
Don Hiatt90397462017-05-12 09:20:20 -0700284 return 1;
Jubin John17fb4f22016-02-14 20:21:52 -0800285 if (!gid_ok(
Don Hiatt90397462017-05-12 09:20:20 -0700286 &packet->grh->sgid,
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400287 grh->dgid.global.subnet_prefix,
288 grh->dgid.global.interface_id))
Don Hiatt90397462017-05-12 09:20:20 -0700289 return 1;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400290 }
Don Hiatt90397462017-05-12 09:20:20 -0700291 if (unlikely(rcv_pkey_check(ppd_from_ibp(ibp), (u16)bth0,
292 sc5, slid))) {
Dennis Dalessandro13d84912017-05-29 17:22:01 -0700293 hfi1_bad_pkey(ibp, (u16)bth0, sl,
294 0, qp->ibqp.qp_num, slid, dlid);
Don Hiatt90397462017-05-12 09:20:20 -0700295 return 1;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400296 }
297 /* Validate the SLID. See Ch. 9.6.1.5 */
Don Hiatt90397462017-05-12 09:20:20 -0700298 if ((slid != rdma_ah_get_dlid(&qp->remote_ah_attr)) ||
Mike Marciniszyn77241052015-07-30 15:17:43 -0400299 ppd_from_ibp(ibp)->port != qp->port_num)
Don Hiatt90397462017-05-12 09:20:20 -0700300 return 1;
301 if (qp->s_mig_state == IB_MIG_REARM && !migrated)
Mike Marciniszyn77241052015-07-30 15:17:43 -0400302 qp->s_mig_state = IB_MIG_ARMED;
303 }
304
305 return 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400306}
307
308/**
309 * ruc_loopback - handle UC and RC loopback requests
310 * @sqp: the sending QP
311 *
312 * This is called from hfi1_do_send() to
313 * forward a WQE addressed to the same HFI.
Dennis Dalessandroca00c622016-09-25 07:42:08 -0700314 * Note that although we are single threaded due to the send engine, we still
Mike Marciniszyn77241052015-07-30 15:17:43 -0400315 * have to protect against post_send(). We don't have to worry about
316 * receive interrupts since this is a connected protocol and all packets
317 * will pass through here.
318 */
Dennis Dalessandro895420d2016-01-19 14:42:28 -0800319static void ruc_loopback(struct rvt_qp *sqp)
Mike Marciniszyn77241052015-07-30 15:17:43 -0400320{
321 struct hfi1_ibport *ibp = to_iport(sqp->ibqp.device, sqp->port_num);
Dennis Dalessandro895420d2016-01-19 14:42:28 -0800322 struct rvt_qp *qp;
323 struct rvt_swqe *wqe;
324 struct rvt_sge *sge;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400325 unsigned long flags;
326 struct ib_wc wc;
327 u64 sdata;
328 atomic64_t *maddr;
329 enum ib_wc_status send_status;
Brian Welty0128fce2017-02-08 05:27:31 -0800330 bool release;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400331 int ret;
Brian Welty0128fce2017-02-08 05:27:31 -0800332 bool copy_last = false;
Jianxin Xiong0db3dfa2016-07-25 13:38:37 -0700333 int local_ops = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400334
335 rcu_read_lock();
336
337 /*
338 * Note that we check the responder QP state after
339 * checking the requester's state.
340 */
Dennis Dalessandroec4274f2016-01-19 14:43:44 -0800341 qp = rvt_lookup_qpn(ib_to_rvt(sqp->ibqp.device), &ibp->rvp,
342 sqp->remote_qpn);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400343
344 spin_lock_irqsave(&sqp->s_lock, flags);
345
346 /* Return if we are already busy processing a work request. */
Dennis Dalessandro54d10c12016-01-19 14:43:01 -0800347 if ((sqp->s_flags & (RVT_S_BUSY | RVT_S_ANY_WAIT)) ||
Dennis Dalessandro83693bd2016-01-19 14:43:33 -0800348 !(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_OR_FLUSH_SEND))
Mike Marciniszyn77241052015-07-30 15:17:43 -0400349 goto unlock;
350
Dennis Dalessandro54d10c12016-01-19 14:43:01 -0800351 sqp->s_flags |= RVT_S_BUSY;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400352
353again:
Mike Marciniszyn46a80d62016-02-14 12:10:04 -0800354 smp_read_barrier_depends(); /* see post_one_send() */
355 if (sqp->s_last == ACCESS_ONCE(sqp->s_head))
Mike Marciniszyn77241052015-07-30 15:17:43 -0400356 goto clr_busy;
Dennis Dalessandro83693bd2016-01-19 14:43:33 -0800357 wqe = rvt_get_swqe_ptr(sqp, sqp->s_last);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400358
359 /* Return if it is not OK to start a new work request. */
Dennis Dalessandro83693bd2016-01-19 14:43:33 -0800360 if (!(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_NEXT_SEND_OK)) {
361 if (!(ib_rvt_state_ops[sqp->state] & RVT_FLUSH_SEND))
Mike Marciniszyn77241052015-07-30 15:17:43 -0400362 goto clr_busy;
363 /* We are in the error state, flush the work request. */
364 send_status = IB_WC_WR_FLUSH_ERR;
365 goto flush_send;
366 }
367
368 /*
369 * We can rely on the entry not changing without the s_lock
370 * being held until we update s_last.
371 * We increment s_cur to indicate s_last is in progress.
372 */
373 if (sqp->s_last == sqp->s_cur) {
374 if (++sqp->s_cur >= sqp->s_size)
375 sqp->s_cur = 0;
376 }
377 spin_unlock_irqrestore(&sqp->s_lock, flags);
378
Dennis Dalessandro83693bd2016-01-19 14:43:33 -0800379 if (!qp || !(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) ||
Mike Marciniszyn77241052015-07-30 15:17:43 -0400380 qp->ibqp.qp_type != sqp->ibqp.qp_type) {
Dennis Dalessandro4eb06882016-01-19 14:42:39 -0800381 ibp->rvp.n_pkt_drops++;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400382 /*
383 * For RC, the requester would timeout and retry so
384 * shortcut the timeouts and just signal too many retries.
385 */
386 if (sqp->ibqp.qp_type == IB_QPT_RC)
387 send_status = IB_WC_RETRY_EXC_ERR;
388 else
389 send_status = IB_WC_SUCCESS;
390 goto serr;
391 }
392
393 memset(&wc, 0, sizeof(wc));
394 send_status = IB_WC_SUCCESS;
395
Brian Welty0128fce2017-02-08 05:27:31 -0800396 release = true;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400397 sqp->s_sge.sge = wqe->sg_list[0];
398 sqp->s_sge.sg_list = wqe->sg_list + 1;
399 sqp->s_sge.num_sge = wqe->wr.num_sge;
400 sqp->s_len = wqe->length;
401 switch (wqe->wr.opcode) {
Jianxin Xiong0db3dfa2016-07-25 13:38:37 -0700402 case IB_WR_REG_MR:
Jianxin Xiong0db3dfa2016-07-25 13:38:37 -0700403 goto send_comp;
404
405 case IB_WR_LOCAL_INV:
Jianxin Xiongd9b13c22016-07-25 13:39:45 -0700406 if (!(wqe->wr.send_flags & RVT_SEND_COMPLETION_ONLY)) {
407 if (rvt_invalidate_rkey(sqp,
408 wqe->wr.ex.invalidate_rkey))
409 send_status = IB_WC_LOC_PROT_ERR;
410 local_ops = 1;
411 }
Jianxin Xiong0db3dfa2016-07-25 13:38:37 -0700412 goto send_comp;
413
414 case IB_WR_SEND_WITH_INV:
415 if (!rvt_invalidate_rkey(qp, wqe->wr.ex.invalidate_rkey)) {
416 wc.wc_flags = IB_WC_WITH_INVALIDATE;
417 wc.ex.invalidate_rkey = wqe->wr.ex.invalidate_rkey;
418 }
419 goto send;
420
Mike Marciniszyn77241052015-07-30 15:17:43 -0400421 case IB_WR_SEND_WITH_IMM:
422 wc.wc_flags = IB_WC_WITH_IMM;
423 wc.ex.imm_data = wqe->wr.ex.imm_data;
424 /* FALLTHROUGH */
425 case IB_WR_SEND:
Jianxin Xiong0db3dfa2016-07-25 13:38:37 -0700426send:
Dennis Dalessandroec4274f2016-01-19 14:43:44 -0800427 ret = hfi1_rvt_get_rwqe(qp, 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400428 if (ret < 0)
429 goto op_err;
430 if (!ret)
431 goto rnr_nak;
432 break;
433
434 case IB_WR_RDMA_WRITE_WITH_IMM:
435 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
436 goto inv_err;
437 wc.wc_flags = IB_WC_WITH_IMM;
438 wc.ex.imm_data = wqe->wr.ex.imm_data;
Dennis Dalessandroec4274f2016-01-19 14:43:44 -0800439 ret = hfi1_rvt_get_rwqe(qp, 1);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400440 if (ret < 0)
441 goto op_err;
442 if (!ret)
443 goto rnr_nak;
Dean Luick7b0b01a2016-02-03 14:35:49 -0800444 /* skip copy_last set and qp_access_flags recheck */
445 goto do_write;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400446 case IB_WR_RDMA_WRITE:
Brian Welty0128fce2017-02-08 05:27:31 -0800447 copy_last = rvt_is_user_qp(qp);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400448 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
449 goto inv_err;
Dean Luick7b0b01a2016-02-03 14:35:49 -0800450do_write:
Mike Marciniszyn77241052015-07-30 15:17:43 -0400451 if (wqe->length == 0)
Harish Chegondi42d6ec12016-03-05 08:49:24 -0800452 break;
Dennis Dalessandro895420d2016-01-19 14:42:28 -0800453 if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, wqe->length,
454 wqe->rdma_wr.remote_addr,
455 wqe->rdma_wr.rkey,
456 IB_ACCESS_REMOTE_WRITE)))
Mike Marciniszyn77241052015-07-30 15:17:43 -0400457 goto acc_err;
458 qp->r_sge.sg_list = NULL;
459 qp->r_sge.num_sge = 1;
460 qp->r_sge.total_len = wqe->length;
461 break;
462
463 case IB_WR_RDMA_READ:
464 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ)))
465 goto inv_err;
Dennis Dalessandro895420d2016-01-19 14:42:28 -0800466 if (unlikely(!rvt_rkey_ok(qp, &sqp->s_sge.sge, wqe->length,
467 wqe->rdma_wr.remote_addr,
468 wqe->rdma_wr.rkey,
469 IB_ACCESS_REMOTE_READ)))
Mike Marciniszyn77241052015-07-30 15:17:43 -0400470 goto acc_err;
Brian Welty0128fce2017-02-08 05:27:31 -0800471 release = false;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400472 sqp->s_sge.sg_list = NULL;
473 sqp->s_sge.num_sge = 1;
474 qp->r_sge.sge = wqe->sg_list[0];
475 qp->r_sge.sg_list = wqe->sg_list + 1;
476 qp->r_sge.num_sge = wqe->wr.num_sge;
477 qp->r_sge.total_len = wqe->length;
478 break;
479
480 case IB_WR_ATOMIC_CMP_AND_SWP:
481 case IB_WR_ATOMIC_FETCH_AND_ADD:
482 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC)))
483 goto inv_err;
Dennis Dalessandro895420d2016-01-19 14:42:28 -0800484 if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64),
485 wqe->atomic_wr.remote_addr,
486 wqe->atomic_wr.rkey,
487 IB_ACCESS_REMOTE_ATOMIC)))
Mike Marciniszyn77241052015-07-30 15:17:43 -0400488 goto acc_err;
489 /* Perform atomic OP and save result. */
Jubin John50e5dcb2016-02-14 20:19:41 -0800490 maddr = (atomic64_t *)qp->r_sge.sge.vaddr;
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100491 sdata = wqe->atomic_wr.compare_add;
Jubin John50e5dcb2016-02-14 20:19:41 -0800492 *(u64 *)sqp->s_sge.sge.vaddr =
Mike Marciniszyn77241052015-07-30 15:17:43 -0400493 (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
Jubin John50e5dcb2016-02-14 20:19:41 -0800494 (u64)atomic64_add_return(sdata, maddr) - sdata :
495 (u64)cmpxchg((u64 *)qp->r_sge.sge.vaddr,
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100496 sdata, wqe->atomic_wr.swap);
Dennis Dalessandro895420d2016-01-19 14:42:28 -0800497 rvt_put_mr(qp->r_sge.sge.mr);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400498 qp->r_sge.num_sge = 0;
499 goto send_comp;
500
501 default:
502 send_status = IB_WC_LOC_QP_OP_ERR;
503 goto serr;
504 }
505
506 sge = &sqp->s_sge.sge;
507 while (sqp->s_len) {
508 u32 len = sqp->s_len;
509
510 if (len > sge->length)
511 len = sge->length;
512 if (len > sge->sge_length)
513 len = sge->sge_length;
514 WARN_ON_ONCE(len == 0);
Dean Luick7b0b01a2016-02-03 14:35:49 -0800515 hfi1_copy_sge(&qp->r_sge, sge->vaddr, len, release, copy_last);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400516 sge->vaddr += len;
517 sge->length -= len;
518 sge->sge_length -= len;
519 if (sge->sge_length == 0) {
520 if (!release)
Dennis Dalessandro895420d2016-01-19 14:42:28 -0800521 rvt_put_mr(sge->mr);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400522 if (--sqp->s_sge.num_sge)
523 *sge = *sqp->s_sge.sg_list++;
524 } else if (sge->length == 0 && sge->mr->lkey) {
Dennis Dalessandrocd4ceee2016-01-19 14:41:55 -0800525 if (++sge->n >= RVT_SEGSZ) {
Mike Marciniszyn77241052015-07-30 15:17:43 -0400526 if (++sge->m >= sge->mr->mapsz)
527 break;
528 sge->n = 0;
529 }
530 sge->vaddr =
531 sge->mr->map[sge->m]->segs[sge->n].vaddr;
532 sge->length =
533 sge->mr->map[sge->m]->segs[sge->n].length;
534 }
535 sqp->s_len -= len;
536 }
537 if (release)
Dennis Dalessandroec4274f2016-01-19 14:43:44 -0800538 rvt_put_ss(&qp->r_sge);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400539
Dennis Dalessandro54d10c12016-01-19 14:43:01 -0800540 if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags))
Mike Marciniszyn77241052015-07-30 15:17:43 -0400541 goto send_comp;
542
543 if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM)
544 wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
545 else
546 wc.opcode = IB_WC_RECV;
547 wc.wr_id = qp->r_wr_id;
548 wc.status = IB_WC_SUCCESS;
549 wc.byte_len = wqe->length;
550 wc.qp = &qp->ibqp;
551 wc.src_qp = qp->remote_qpn;
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400552 wc.slid = rdma_ah_get_dlid(&qp->remote_ah_attr);
553 wc.sl = rdma_ah_get_sl(&qp->remote_ah_attr);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400554 wc.port_num = 1;
555 /* Signal completion event if the solicited bit is set. */
Dennis Dalessandroabd712d2016-01-19 14:43:22 -0800556 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc,
557 wqe->wr.send_flags & IB_SEND_SOLICITED);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400558
559send_comp:
560 spin_lock_irqsave(&sqp->s_lock, flags);
Dennis Dalessandro4eb06882016-01-19 14:42:39 -0800561 ibp->rvp.n_loop_pkts++;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400562flush_send:
563 sqp->s_rnr_retry = sqp->s_rnr_retry_cnt;
564 hfi1_send_complete(sqp, wqe, send_status);
Jianxin Xiong0db3dfa2016-07-25 13:38:37 -0700565 if (local_ops) {
566 atomic_dec(&sqp->local_ops_pending);
567 local_ops = 0;
568 }
Mike Marciniszyn77241052015-07-30 15:17:43 -0400569 goto again;
570
571rnr_nak:
572 /* Handle RNR NAK */
573 if (qp->ibqp.qp_type == IB_QPT_UC)
574 goto send_comp;
Dennis Dalessandro4eb06882016-01-19 14:42:39 -0800575 ibp->rvp.n_rnr_naks++;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400576 /*
577 * Note: we don't need the s_lock held since the BUSY flag
578 * makes this single threaded.
579 */
580 if (sqp->s_rnr_retry == 0) {
581 send_status = IB_WC_RNR_RETRY_EXC_ERR;
582 goto serr;
583 }
584 if (sqp->s_rnr_retry_cnt < 7)
585 sqp->s_rnr_retry--;
586 spin_lock_irqsave(&sqp->s_lock, flags);
Dennis Dalessandro83693bd2016-01-19 14:43:33 -0800587 if (!(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_RECV_OK))
Mike Marciniszyn77241052015-07-30 15:17:43 -0400588 goto clr_busy;
Venkata Sandeep Dhanalakota56acbbf2017-02-08 05:27:19 -0800589 rvt_add_rnr_timer(sqp, qp->r_min_rnr_timer <<
Don Hiatt832666c2017-02-08 05:28:25 -0800590 IB_AETH_CREDIT_SHIFT);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400591 goto clr_busy;
592
593op_err:
594 send_status = IB_WC_REM_OP_ERR;
595 wc.status = IB_WC_LOC_QP_OP_ERR;
596 goto err;
597
598inv_err:
599 send_status = IB_WC_REM_INV_REQ_ERR;
600 wc.status = IB_WC_LOC_QP_OP_ERR;
601 goto err;
602
603acc_err:
604 send_status = IB_WC_REM_ACCESS_ERR;
605 wc.status = IB_WC_LOC_PROT_ERR;
606err:
607 /* responder goes to error state */
Brian Weltybeb5a042017-02-08 05:27:01 -0800608 rvt_rc_error(qp, wc.status);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400609
610serr:
611 spin_lock_irqsave(&sqp->s_lock, flags);
612 hfi1_send_complete(sqp, wqe, send_status);
613 if (sqp->ibqp.qp_type == IB_QPT_RC) {
Dennis Dalessandroec4274f2016-01-19 14:43:44 -0800614 int lastwqe = rvt_error_qp(sqp, IB_WC_WR_FLUSH_ERR);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400615
Dennis Dalessandro54d10c12016-01-19 14:43:01 -0800616 sqp->s_flags &= ~RVT_S_BUSY;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400617 spin_unlock_irqrestore(&sqp->s_lock, flags);
618 if (lastwqe) {
619 struct ib_event ev;
620
621 ev.device = sqp->ibqp.device;
622 ev.element.qp = &sqp->ibqp;
623 ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
624 sqp->ibqp.event_handler(&ev, sqp->ibqp.qp_context);
625 }
626 goto done;
627 }
628clr_busy:
Dennis Dalessandro54d10c12016-01-19 14:43:01 -0800629 sqp->s_flags &= ~RVT_S_BUSY;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400630unlock:
631 spin_unlock_irqrestore(&sqp->s_lock, flags);
632done:
633 rcu_read_unlock();
634}
635
636/**
637 * hfi1_make_grh - construct a GRH header
638 * @ibp: a pointer to the IB port
639 * @hdr: a pointer to the GRH header being constructed
640 * @grh: the global route address to send to
641 * @hwords: the number of 32 bit words of header being sent
642 * @nwords: the number of 32 bit words of data being sent
643 *
644 * Return the size of the header in 32 bit words.
645 */
646u32 hfi1_make_grh(struct hfi1_ibport *ibp, struct ib_grh *hdr,
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400647 const struct ib_global_route *grh, u32 hwords, u32 nwords)
Mike Marciniszyn77241052015-07-30 15:17:43 -0400648{
649 hdr->version_tclass_flow =
650 cpu_to_be32((IB_GRH_VERSION << IB_GRH_VERSION_SHIFT) |
651 (grh->traffic_class << IB_GRH_TCLASS_SHIFT) |
652 (grh->flow_label << IB_GRH_FLOW_SHIFT));
653 hdr->paylen = cpu_to_be16((hwords - 2 + nwords + SIZE_OF_CRC) << 2);
654 /* next_hdr is defined by C8-7 in ch. 8.4.1 */
655 hdr->next_hdr = IB_GRH_NEXT_HDR;
656 hdr->hop_limit = grh->hop_limit;
657 /* The SGID is 32-bit aligned. */
Dennis Dalessandro4eb06882016-01-19 14:42:39 -0800658 hdr->sgid.global.subnet_prefix = ibp->rvp.gid_prefix;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400659 hdr->sgid.global.interface_id =
Jakub Pawlaka6cd5f02016-10-17 04:19:30 -0700660 grh->sgid_index < HFI1_GUIDS_PER_PORT ?
661 get_sguid(ibp, grh->sgid_index) :
662 get_sguid(ibp, HFI1_PORT_GUID_INDEX);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400663 hdr->dgid = grh->dgid;
664
665 /* GRH header size in 32-bit words. */
666 return sizeof(struct ib_grh) / sizeof(u32);
667}
668
Don Hiattd4d602e2016-07-25 13:40:22 -0700669#define BTH2_OFFSET (offsetof(struct hfi1_sdma_header, hdr.u.oth.bth[2]) / 4)
Mike Marciniszyn77241052015-07-30 15:17:43 -0400670
671/**
Dasaratharaman Chandramoulia9b6b3b2016-07-25 13:40:16 -0700672 * build_ahg - create ahg in s_ahg
Mike Marciniszyn77241052015-07-30 15:17:43 -0400673 * @qp: a pointer to QP
674 * @npsn: the next PSN for the request/response
675 *
676 * This routine handles the AHG by allocating an ahg entry and causing the
677 * copy of the first middle.
678 *
679 * Subsequent middles use the copied entry, editing the
680 * PSN with 1 or 2 edits.
681 */
Dennis Dalessandro895420d2016-01-19 14:42:28 -0800682static inline void build_ahg(struct rvt_qp *qp, u32 npsn)
Mike Marciniszyn77241052015-07-30 15:17:43 -0400683{
Dennis Dalessandro4c6829c2016-01-19 14:42:00 -0800684 struct hfi1_qp_priv *priv = qp->priv;
Dennis Dalessandrobb5df5f2016-02-14 12:44:43 -0800685
Dennis Dalessandro54d10c12016-01-19 14:43:01 -0800686 if (unlikely(qp->s_flags & RVT_S_AHG_CLEAR))
Mike Marciniszyn77241052015-07-30 15:17:43 -0400687 clear_ahg(qp);
Dennis Dalessandro54d10c12016-01-19 14:43:01 -0800688 if (!(qp->s_flags & RVT_S_AHG_VALID)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -0400689 /* first middle that needs copy */
Mike Marciniszynd7b8ba52015-11-09 19:13:59 -0500690 if (qp->s_ahgidx < 0)
Dennis Dalessandro4c6829c2016-01-19 14:42:00 -0800691 qp->s_ahgidx = sdma_ahg_alloc(priv->s_sde);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400692 if (qp->s_ahgidx >= 0) {
693 qp->s_ahgpsn = npsn;
Dasaratharaman Chandramoulia9b6b3b2016-07-25 13:40:16 -0700694 priv->s_ahg->tx_flags |= SDMA_TXREQ_F_AHG_COPY;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400695 /* save to protect a change in another thread */
Dasaratharaman Chandramoulia9b6b3b2016-07-25 13:40:16 -0700696 priv->s_ahg->ahgidx = qp->s_ahgidx;
Dennis Dalessandro54d10c12016-01-19 14:43:01 -0800697 qp->s_flags |= RVT_S_AHG_VALID;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400698 }
699 } else {
700 /* subsequent middle after valid */
701 if (qp->s_ahgidx >= 0) {
Dasaratharaman Chandramoulia9b6b3b2016-07-25 13:40:16 -0700702 priv->s_ahg->tx_flags |= SDMA_TXREQ_F_USE_AHG;
703 priv->s_ahg->ahgidx = qp->s_ahgidx;
704 priv->s_ahg->ahgcount++;
705 priv->s_ahg->ahgdesc[0] =
Mike Marciniszyn77241052015-07-30 15:17:43 -0400706 sdma_build_ahg_descriptor(
707 (__force u16)cpu_to_be16((u16)npsn),
708 BTH2_OFFSET,
709 16,
710 16);
711 if ((npsn & 0xffff0000) !=
712 (qp->s_ahgpsn & 0xffff0000)) {
Dasaratharaman Chandramoulia9b6b3b2016-07-25 13:40:16 -0700713 priv->s_ahg->ahgcount++;
714 priv->s_ahg->ahgdesc[1] =
Mike Marciniszyn77241052015-07-30 15:17:43 -0400715 sdma_build_ahg_descriptor(
716 (__force u16)cpu_to_be16(
717 (u16)(npsn >> 16)),
718 BTH2_OFFSET,
719 0,
720 16);
721 }
722 }
723 }
724}
725
Mike Marciniszyn261a4352016-09-06 04:35:05 -0700726void hfi1_make_ruc_header(struct rvt_qp *qp, struct ib_other_headers *ohdr,
Dennis Dalessandrobb5df5f2016-02-14 12:44:43 -0800727 u32 bth0, u32 bth2, int middle,
728 struct hfi1_pkt_state *ps)
Mike Marciniszyn77241052015-07-30 15:17:43 -0400729{
Dennis Dalessandro4c6829c2016-01-19 14:42:00 -0800730 struct hfi1_qp_priv *priv = qp->priv;
Dennis Dalessandrobb5df5f2016-02-14 12:44:43 -0800731 struct hfi1_ibport *ibp = ps->ibp;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400732 u16 lrh0;
733 u32 nwords;
734 u32 extra_bytes;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400735 u32 bth1;
736
737 /* Construct the header. */
Don Hiatte922ae02016-12-07 19:33:00 -0800738 extra_bytes = -ps->s_txreq->s_cur_size & 3;
739 nwords = (ps->s_txreq->s_cur_size + extra_bytes) >> 2;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400740 lrh0 = HFI1_LRH_BTH;
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400741 if (unlikely(rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH)) {
742 qp->s_hdrwords +=
743 hfi1_make_grh(ibp,
744 &ps->s_txreq->phdr.hdr.u.l.grh,
745 rdma_ah_read_grh(&qp->remote_ah_attr),
746 qp->s_hdrwords, nwords);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400747 lrh0 = HFI1_LRH_GRH;
748 middle = 0;
749 }
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400750 lrh0 |= (priv->s_sc & 0xf) << 12 |
751 (rdma_ah_get_sl(&qp->remote_ah_attr) & 0xf) << 4;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400752 /*
Dasaratharaman Chandramoulia9b6b3b2016-07-25 13:40:16 -0700753 * reset s_ahg/AHG fields
Mike Marciniszyn77241052015-07-30 15:17:43 -0400754 *
755 * This insures that the ahgentry/ahgcount
756 * are at a non-AHG default to protect
757 * build_verbs_tx_desc() from using
758 * an include ahgidx.
759 *
760 * build_ahg() will modify as appropriate
761 * to use the AHG feature.
762 */
Dasaratharaman Chandramoulia9b6b3b2016-07-25 13:40:16 -0700763 priv->s_ahg->tx_flags = 0;
764 priv->s_ahg->ahgcount = 0;
765 priv->s_ahg->ahgidx = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400766 if (qp->s_mig_state == IB_MIG_MIGRATED)
767 bth0 |= IB_BTH_MIG_REQ;
768 else
769 middle = 0;
770 if (middle)
771 build_ahg(qp, bth2);
772 else
Dennis Dalessandro54d10c12016-01-19 14:43:01 -0800773 qp->s_flags &= ~RVT_S_AHG_VALID;
Dennis Dalessandrobb5df5f2016-02-14 12:44:43 -0800774 ps->s_txreq->phdr.hdr.lrh[0] = cpu_to_be16(lrh0);
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400775 ps->s_txreq->phdr.hdr.lrh[1] =
776 cpu_to_be16(rdma_ah_get_dlid(&qp->remote_ah_attr));
Dennis Dalessandrobb5df5f2016-02-14 12:44:43 -0800777 ps->s_txreq->phdr.hdr.lrh[2] =
Mike Marciniszyn77241052015-07-30 15:17:43 -0400778 cpu_to_be16(qp->s_hdrwords + nwords + SIZE_OF_CRC);
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400779 ps->s_txreq->phdr.hdr.lrh[3] =
780 cpu_to_be16(ppd_from_ibp(ibp)->lid |
781 rdma_ah_get_path_bits(&qp->remote_ah_attr));
Mike Marciniszyn77241052015-07-30 15:17:43 -0400782 bth0 |= hfi1_get_pkey(ibp, qp->s_pkey_index);
783 bth0 |= extra_bytes << 20;
784 ohdr->bth[0] = cpu_to_be32(bth0);
785 bth1 = qp->remote_qpn;
Dennis Dalessandro54d10c12016-01-19 14:43:01 -0800786 if (qp->s_flags & RVT_S_ECN) {
787 qp->s_flags &= ~RVT_S_ECN;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400788 /* we recently received a FECN, so return a BECN */
Don Hiatt3d591092017-04-09 10:16:28 -0700789 bth1 |= (IB_BECN_MASK << IB_BECN_SHIFT);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400790 }
791 ohdr->bth[1] = cpu_to_be32(bth1);
792 ohdr->bth[2] = cpu_to_be32(bth2);
793}
794
Dean Luickb4219222015-10-26 10:28:35 -0400795/* when sending, force a reschedule every one of these periods */
796#define SEND_RESCHED_TIMEOUT (5 * HZ) /* 5s in jiffies */
797
Mike Marciniszyndd1ed102017-05-04 05:14:10 -0700798/**
799 * schedule_send_yield - test for a yield required for QP send engine
800 * @timeout: Final time for timeout slice for jiffies
801 * @qp: a pointer to QP
802 * @ps: a pointer to a structure with commonly lookup values for
803 * the the send engine progress
804 *
805 * This routine checks if the time slice for the QP has expired
806 * for RC QPs, if so an additional work entry is queued. At this
807 * point, other QPs have an opportunity to be scheduled. It
808 * returns true if a yield is required, otherwise, false
809 * is returned.
810 */
811static bool schedule_send_yield(struct rvt_qp *qp,
812 struct hfi1_pkt_state *ps)
813{
814 if (unlikely(time_after(jiffies, ps->timeout))) {
815 if (!ps->in_thread ||
816 workqueue_congested(ps->cpu, ps->ppd->hfi1_wq)) {
817 spin_lock_irqsave(&qp->s_lock, ps->flags);
818 qp->s_flags &= ~RVT_S_BUSY;
819 hfi1_schedule_send(qp);
820 spin_unlock_irqrestore(&qp->s_lock, ps->flags);
821 this_cpu_inc(*ps->ppd->dd->send_schedule);
822 trace_hfi1_rc_expired_time_slice(qp, true);
823 return true;
824 }
825
826 cond_resched();
827 this_cpu_inc(*ps->ppd->dd->send_schedule);
828 ps->timeout = jiffies + ps->timeout_int;
829 }
830
831 trace_hfi1_rc_expired_time_slice(qp, false);
832 return false;
833}
834
Mike Marciniszynb6eac932017-04-09 10:16:35 -0700835void hfi1_do_send_from_rvt(struct rvt_qp *qp)
836{
837 hfi1_do_send(qp, false);
838}
839
Dennis Dalessandro83693bd2016-01-19 14:43:33 -0800840void _hfi1_do_send(struct work_struct *work)
841{
842 struct iowait *wait = container_of(work, struct iowait, iowork);
843 struct rvt_qp *qp = iowait_to_qp(wait);
844
Mike Marciniszynb6eac932017-04-09 10:16:35 -0700845 hfi1_do_send(qp, true);
Dennis Dalessandro83693bd2016-01-19 14:43:33 -0800846}
847
Mike Marciniszyn77241052015-07-30 15:17:43 -0400848/**
849 * hfi1_do_send - perform a send on a QP
850 * @work: contains a pointer to the QP
Mike Marciniszynb6eac932017-04-09 10:16:35 -0700851 * @in_thread: true if in a workqueue thread
Mike Marciniszyn77241052015-07-30 15:17:43 -0400852 *
853 * Process entries in the send work queue until credit or queue is
Dennis Dalessandroca00c622016-09-25 07:42:08 -0700854 * exhausted. Only allow one CPU to send a packet per QP.
Mike Marciniszyn77241052015-07-30 15:17:43 -0400855 * Otherwise, two threads could send packets out of order.
856 */
Mike Marciniszynb6eac932017-04-09 10:16:35 -0700857void hfi1_do_send(struct rvt_qp *qp, bool in_thread)
Mike Marciniszyn77241052015-07-30 15:17:43 -0400858{
Dennis Dalessandrod46e5142015-11-11 00:34:37 -0500859 struct hfi1_pkt_state ps;
Vennila Megavannan23cd4712016-02-03 14:34:23 -0800860 struct hfi1_qp_priv *priv = qp->priv;
Dennis Dalessandrobb5df5f2016-02-14 12:44:43 -0800861 int (*make_req)(struct rvt_qp *qp, struct hfi1_pkt_state *ps);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400862
Dennis Dalessandrod46e5142015-11-11 00:34:37 -0500863 ps.dev = to_idev(qp->ibqp.device);
864 ps.ibp = to_iport(qp->ibqp.device, qp->port_num);
865 ps.ppd = ppd_from_ibp(ps.ibp);
Mike Marciniszyndd1ed102017-05-04 05:14:10 -0700866 ps.in_thread = in_thread;
867
868 trace_hfi1_rc_do_send(qp, in_thread);
Dennis Dalessandrod46e5142015-11-11 00:34:37 -0500869
Vennila Megavannan23cd4712016-02-03 14:34:23 -0800870 switch (qp->ibqp.qp_type) {
871 case IB_QPT_RC:
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400872 if (!loopback && ((rdma_ah_get_dlid(&qp->remote_ah_attr) &
873 ~((1 << ps.ppd->lmc) - 1)) ==
874 ps.ppd->lid)) {
Vennila Megavannan23cd4712016-02-03 14:34:23 -0800875 ruc_loopback(qp);
876 return;
877 }
Mike Marciniszyn77241052015-07-30 15:17:43 -0400878 make_req = hfi1_make_rc_req;
Mike Marciniszyndd1ed102017-05-04 05:14:10 -0700879 ps.timeout_int = qp->timeout_jiffies;
Vennila Megavannan23cd4712016-02-03 14:34:23 -0800880 break;
881 case IB_QPT_UC:
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -0400882 if (!loopback && ((rdma_ah_get_dlid(&qp->remote_ah_attr) &
883 ~((1 << ps.ppd->lmc) - 1)) ==
884 ps.ppd->lid)) {
Vennila Megavannan23cd4712016-02-03 14:34:23 -0800885 ruc_loopback(qp);
886 return;
887 }
Mike Marciniszyn77241052015-07-30 15:17:43 -0400888 make_req = hfi1_make_uc_req;
Mike Marciniszyndd1ed102017-05-04 05:14:10 -0700889 ps.timeout_int = SEND_RESCHED_TIMEOUT;
Vennila Megavannan23cd4712016-02-03 14:34:23 -0800890 break;
891 default:
Mike Marciniszyn77241052015-07-30 15:17:43 -0400892 make_req = hfi1_make_ud_req;
Mike Marciniszyndd1ed102017-05-04 05:14:10 -0700893 ps.timeout_int = SEND_RESCHED_TIMEOUT;
Vennila Megavannan23cd4712016-02-03 14:34:23 -0800894 }
Mike Marciniszyn77241052015-07-30 15:17:43 -0400895
Mike Marciniszyn747f4d72016-04-12 10:46:10 -0700896 spin_lock_irqsave(&qp->s_lock, ps.flags);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400897
898 /* Return if we are already busy processing a work request. */
899 if (!hfi1_send_ok(qp)) {
Mike Marciniszyn747f4d72016-04-12 10:46:10 -0700900 spin_unlock_irqrestore(&qp->s_lock, ps.flags);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400901 return;
902 }
903
Dennis Dalessandro54d10c12016-01-19 14:43:01 -0800904 qp->s_flags |= RVT_S_BUSY;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400905
Mike Marciniszyndd1ed102017-05-04 05:14:10 -0700906 ps.timeout_int = ps.timeout_int / 8;
907 ps.timeout = jiffies + ps.timeout_int;
908 ps.cpu = priv->s_sde ? priv->s_sde->cpu :
Vennila Megavannan23cd4712016-02-03 14:34:23 -0800909 cpumask_first(cpumask_of_node(ps.ppd->dd->node));
Mike Marciniszyndd1ed102017-05-04 05:14:10 -0700910
Mike Marciniszyn711e1042016-02-14 12:45:18 -0800911 /* insure a pre-built packet is handled */
912 ps.s_txreq = get_waiting_verbs_txreq(qp);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400913 do {
914 /* Check for a constructed packet to be sent. */
915 if (qp->s_hdrwords != 0) {
Mike Marciniszyn747f4d72016-04-12 10:46:10 -0700916 spin_unlock_irqrestore(&qp->s_lock, ps.flags);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400917 /*
918 * If the packet cannot be sent now, return and
Dennis Dalessandroca00c622016-09-25 07:42:08 -0700919 * the send engine will be woken up later.
Mike Marciniszyn77241052015-07-30 15:17:43 -0400920 */
Dennis Dalessandrod46e5142015-11-11 00:34:37 -0500921 if (hfi1_verbs_send(qp, &ps))
Mike Marciniszyn46a80d62016-02-14 12:10:04 -0800922 return;
Dasaratharaman Chandramoulia9b6b3b2016-07-25 13:40:16 -0700923 /* Record that s_ahg is empty. */
Mike Marciniszyn77241052015-07-30 15:17:43 -0400924 qp->s_hdrwords = 0;
Mike Marciniszyn46a80d62016-02-14 12:10:04 -0800925 /* allow other tasks to run */
Mike Marciniszyndd1ed102017-05-04 05:14:10 -0700926 if (schedule_send_yield(qp, &ps))
927 return;
928
Mike Marciniszyn747f4d72016-04-12 10:46:10 -0700929 spin_lock_irqsave(&qp->s_lock, ps.flags);
Dean Luickb4219222015-10-26 10:28:35 -0400930 }
Dennis Dalessandrobb5df5f2016-02-14 12:44:43 -0800931 } while (make_req(qp, &ps));
Mike Marciniszyn46a80d62016-02-14 12:10:04 -0800932
Mike Marciniszyn747f4d72016-04-12 10:46:10 -0700933 spin_unlock_irqrestore(&qp->s_lock, ps.flags);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400934}
935
936/*
937 * This should be called with s_lock held.
938 */
Dennis Dalessandro895420d2016-01-19 14:42:28 -0800939void hfi1_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe,
Mike Marciniszyn77241052015-07-30 15:17:43 -0400940 enum ib_wc_status status)
941{
942 u32 old_last, last;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400943
Dennis Dalessandro83693bd2016-01-19 14:43:33 -0800944 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_OR_FLUSH_SEND))
Mike Marciniszyn77241052015-07-30 15:17:43 -0400945 return;
946
Mike Marciniszyn6c2ab0b2016-02-04 11:03:19 -0800947 last = qp->s_last;
948 old_last = last;
Mike Marciniszyn9260b352017-03-20 17:25:23 -0700949 trace_hfi1_qp_send_completion(qp, wqe, last);
Mike Marciniszyn6c2ab0b2016-02-04 11:03:19 -0800950 if (++last >= qp->s_size)
951 last = 0;
Mike Marciniszyn9260b352017-03-20 17:25:23 -0700952 trace_hfi1_qp_send_completion(qp, wqe, last);
Mike Marciniszyn6c2ab0b2016-02-04 11:03:19 -0800953 qp->s_last = last;
954 /* See post_send() */
955 barrier();
Mike Marciniszync64607a2016-12-07 19:34:31 -0800956 rvt_put_swqe(wqe);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400957 if (qp->ibqp.qp_type == IB_QPT_UD ||
958 qp->ibqp.qp_type == IB_QPT_SMI ||
959 qp->ibqp.qp_type == IB_QPT_GSI)
Dennis Dalessandro15723f02016-01-19 14:42:17 -0800960 atomic_dec(&ibah_to_rvtah(wqe->ud_wr.ah)->refcount);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400961
Mike Marciniszyn43a474a2017-03-20 17:25:04 -0700962 rvt_qp_swqe_complete(qp,
963 wqe,
964 ib_hfi1_wc_opcode[wqe->wr.opcode],
965 status);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400966
Mike Marciniszyn77241052015-07-30 15:17:43 -0400967 if (qp->s_acked == old_last)
968 qp->s_acked = last;
969 if (qp->s_cur == old_last)
970 qp->s_cur = last;
971 if (qp->s_tail == old_last)
972 qp->s_tail = last;
973 if (qp->state == IB_QPS_SQD && last == qp->s_cur)
974 qp->s_draining = 0;
975}