blob: 6fb70f0064a682c006ffa26845a47904440c99e7 [file] [log] [blame]
Mike Marciniszyn77241052015-07-30 15:17:43 -04001/*
Michael J. Ruhlf4cd8762017-05-04 05:14:39 -07002 * Copyright(c) 2015 - 2017 Intel Corporation.
Mike Marciniszyn77241052015-07-30 15:17:43 -04003 *
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
6 *
7 * GPL LICENSE SUMMARY
8 *
Mike Marciniszyn77241052015-07-30 15:17:43 -04009 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * BSD LICENSE
19 *
Mike Marciniszyn77241052015-07-30 15:17:43 -040020 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
22 * are met:
23 *
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
29 * distribution.
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
33 *
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45 *
46 */
47#include <linux/mm.h>
48#include <linux/types.h>
49#include <linux/device.h>
50#include <linux/dmapool.h>
51#include <linux/slab.h>
52#include <linux/list.h>
53#include <linux/highmem.h>
54#include <linux/io.h>
55#include <linux/uio.h>
56#include <linux/rbtree.h>
57#include <linux/spinlock.h>
58#include <linux/delay.h>
59#include <linux/kthread.h>
60#include <linux/mmu_context.h>
61#include <linux/module.h>
62#include <linux/vmalloc.h>
Michael J. Ruhl1bb0d7b2017-02-08 05:28:31 -080063#include <linux/string.h>
Mike Marciniszyn77241052015-07-30 15:17:43 -040064
65#include "hfi.h"
66#include "sdma.h"
67#include "user_sdma.h"
Mike Marciniszyn77241052015-07-30 15:17:43 -040068#include "verbs.h" /* for the headers */
69#include "common.h" /* for struct hfi1_tid_info */
70#include "trace.h"
Mitko Haralanov5cd3a88d2016-03-08 11:15:22 -080071#include "mmu_rb.h"
Mike Marciniszyn77241052015-07-30 15:17:43 -040072
73static uint hfi1_sdma_comp_ring_size = 128;
74module_param_named(sdma_comp_size, hfi1_sdma_comp_ring_size, uint, S_IRUGO);
75MODULE_PARM_DESC(sdma_comp_size, "Size of User SDMA completion ring. Default: 128");
76
77/* The maximum number of Data io vectors per message/request */
78#define MAX_VECTORS_PER_REQ 8
79/*
80 * Maximum number of packet to send from each message/request
81 * before moving to the next one.
82 */
83#define MAX_PKTS_PER_QUEUE 16
84
85#define num_pages(x) (1 + ((((x) - 1) & PAGE_MASK) >> PAGE_SHIFT))
86
87#define req_opcode(x) \
88 (((x) >> HFI1_SDMA_REQ_OPCODE_SHIFT) & HFI1_SDMA_REQ_OPCODE_MASK)
89#define req_version(x) \
90 (((x) >> HFI1_SDMA_REQ_VERSION_SHIFT) & HFI1_SDMA_REQ_OPCODE_MASK)
91#define req_iovcnt(x) \
92 (((x) >> HFI1_SDMA_REQ_IOVCNT_SHIFT) & HFI1_SDMA_REQ_IOVCNT_MASK)
93
94/* Number of BTH.PSN bits used for sequence number in expected rcvs */
95#define BTH_SEQ_MASK 0x7ffull
96
97/*
98 * Define fields in the KDETH header so we can update the header
99 * template.
100 */
101#define KDETH_OFFSET_SHIFT 0
102#define KDETH_OFFSET_MASK 0x7fff
103#define KDETH_OM_SHIFT 15
104#define KDETH_OM_MASK 0x1
105#define KDETH_TID_SHIFT 16
106#define KDETH_TID_MASK 0x3ff
107#define KDETH_TIDCTRL_SHIFT 26
108#define KDETH_TIDCTRL_MASK 0x3
109#define KDETH_INTR_SHIFT 28
110#define KDETH_INTR_MASK 0x1
111#define KDETH_SH_SHIFT 29
112#define KDETH_SH_MASK 0x1
113#define KDETH_HCRC_UPPER_SHIFT 16
114#define KDETH_HCRC_UPPER_MASK 0xff
115#define KDETH_HCRC_LOWER_SHIFT 24
116#define KDETH_HCRC_LOWER_MASK 0xff
117
Jubin Johnaf534932016-08-31 07:24:27 -0700118#define AHG_KDETH_INTR_SHIFT 12
Jakub Pawlake7301392016-12-07 19:32:41 -0800119#define AHG_KDETH_SH_SHIFT 13
Sebastian Sancheze3304b7cc42017-05-26 05:35:18 -0700120#define AHG_KDETH_ARRAY_SIZE 9
Jubin Johnaf534932016-08-31 07:24:27 -0700121
Mike Marciniszyn77241052015-07-30 15:17:43 -0400122#define PBC2LRH(x) ((((x) & 0xfff) << 2) - 4)
123#define LRH2PBC(x) ((((x) >> 2) + 1) & 0xfff)
124
125#define KDETH_GET(val, field) \
126 (((le32_to_cpu((val))) >> KDETH_##field##_SHIFT) & KDETH_##field##_MASK)
127#define KDETH_SET(dw, field, val) do { \
128 u32 dwval = le32_to_cpu(dw); \
129 dwval &= ~(KDETH_##field##_MASK << KDETH_##field##_SHIFT); \
130 dwval |= (((val) & KDETH_##field##_MASK) << \
131 KDETH_##field##_SHIFT); \
132 dw = cpu_to_le32(dwval); \
133 } while (0)
134
135#define AHG_HEADER_SET(arr, idx, dw, bit, width, value) \
136 do { \
137 if ((idx) < ARRAY_SIZE((arr))) \
138 (arr)[(idx++)] = sdma_build_ahg_descriptor( \
139 (__force u16)(value), (dw), (bit), \
140 (width)); \
141 else \
142 return -ERANGE; \
143 } while (0)
144
145/* KDETH OM multipliers and switch over point */
146#define KDETH_OM_SMALL 4
Sebastian Sanchezade6f8a2017-05-04 05:14:16 -0700147#define KDETH_OM_SMALL_SHIFT 2
Mike Marciniszyn77241052015-07-30 15:17:43 -0400148#define KDETH_OM_LARGE 64
Sebastian Sanchezade6f8a2017-05-04 05:14:16 -0700149#define KDETH_OM_LARGE_SHIFT 6
Mike Marciniszyn77241052015-07-30 15:17:43 -0400150#define KDETH_OM_MAX_SIZE (1 << ((KDETH_OM_LARGE / KDETH_OM_SMALL) + 1))
151
Jakub Pawlake7301392016-12-07 19:32:41 -0800152/* Tx request flag bits */
153#define TXREQ_FLAGS_REQ_ACK BIT(0) /* Set the ACK bit in the header */
154#define TXREQ_FLAGS_REQ_DISABLE_SH BIT(1) /* Disable header suppression */
Mike Marciniszyn77241052015-07-30 15:17:43 -0400155
Dean Luick7b3256e2016-07-28 15:21:18 -0400156/* SDMA request flag bits */
Sebastian Sanchezb8884292017-05-26 05:35:44 -0700157#define SDMA_REQ_HAS_ERROR 1
158#define SDMA_REQ_DONE_ERROR 2
Mike Marciniszyn77241052015-07-30 15:17:43 -0400159
Sunny Kumarcb326492015-11-06 10:06:43 +0530160#define SDMA_PKT_Q_INACTIVE BIT(0)
161#define SDMA_PKT_Q_ACTIVE BIT(1)
162#define SDMA_PKT_Q_DEFERRED BIT(2)
Mike Marciniszyn77241052015-07-30 15:17:43 -0400163
164/*
165 * Maximum retry attempts to submit a TX request
166 * before putting the process to sleep.
167 */
168#define MAX_DEFER_RETRY_COUNT 1
169
170static unsigned initial_pkt_count = 8;
171
172#define SDMA_IOWAIT_TIMEOUT 1000 /* in milliseconds */
173
Mitko Haralanov9565c6a2016-05-19 05:21:18 -0700174struct sdma_mmu_node;
175
Mike Marciniszyn77241052015-07-30 15:17:43 -0400176struct user_sdma_iovec {
Mitko Haralanov0f2d87d2016-02-03 14:35:06 -0800177 struct list_head list;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400178 struct iovec iov;
179 /* number of pages in this vector */
180 unsigned npages;
181 /* array of pinned pages for this vector */
182 struct page **pages;
Jubin John4d114fd2016-02-14 20:21:43 -0800183 /*
184 * offset into the virtual address space of the vector at
185 * which we last left off.
186 */
Mike Marciniszyn77241052015-07-30 15:17:43 -0400187 u64 offset;
Mitko Haralanov9565c6a2016-05-19 05:21:18 -0700188 struct sdma_mmu_node *node;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400189};
190
Mitko Haralanov5cd3a88d2016-03-08 11:15:22 -0800191struct sdma_mmu_node {
192 struct mmu_rb_node rb;
Mitko Haralanov5511d782016-03-08 11:15:44 -0800193 struct hfi1_user_sdma_pkt_q *pq;
Mitko Haralanov5cd3a88d2016-03-08 11:15:22 -0800194 atomic_t refcount;
195 struct page **pages;
196 unsigned npages;
Dean Luickb7df1922016-07-28 15:21:23 -0400197};
198
199/* evict operation argument */
200struct evict_data {
201 u32 cleared; /* count evicted so far */
202 u32 target; /* target count to evict */
Mitko Haralanov5cd3a88d2016-03-08 11:15:22 -0800203};
204
Mike Marciniszyn77241052015-07-30 15:17:43 -0400205struct user_sdma_request {
Mike Marciniszyn77241052015-07-30 15:17:43 -0400206 /* This is the original header from user space */
207 struct hfi1_pkt_header hdr;
Sebastian Sancheze3304b7cc42017-05-26 05:35:18 -0700208
209 /* Read mostly fields */
210 struct hfi1_user_sdma_pkt_q *pq ____cacheline_aligned_in_smp;
211 struct hfi1_user_sdma_comp_q *cq;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400212 /*
213 * Pointer to the SDMA engine for this request.
214 * Since different request could be on different VLs,
215 * each request will need it's own engine pointer.
216 */
217 struct sdma_engine *sde;
Sebastian Sancheze3304b7cc42017-05-26 05:35:18 -0700218 struct sdma_req_info info;
219 /* TID array values copied from the tid_iov vector */
220 u32 *tids;
221 /* total length of the data in the request */
222 u32 data_len;
223 /* number of elements copied to the tids array */
224 u16 n_tids;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400225 /*
Sebastian Sancheze3304b7cc42017-05-26 05:35:18 -0700226 * We copy the iovs for this request (based on
227 * info.iovcnt). These are only the data vectors
Mike Marciniszyn77241052015-07-30 15:17:43 -0400228 */
Sebastian Sancheze3304b7cc42017-05-26 05:35:18 -0700229 u8 data_iovs;
230 s8 ahg_idx;
231
232 /* Writeable fields shared with interrupt */
233 u64 seqcomp ____cacheline_aligned_in_smp;
234 u64 seqsubmitted;
235 unsigned long flags;
236 /* status of the last txreq completed */
237 int status;
238
239 /* Send side fields */
240 struct list_head txps ____cacheline_aligned_in_smp;
241 u64 seqnum;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400242 /*
243 * KDETH.OFFSET (TID) field
244 * The offset can cover multiple packets, depending on the
245 * size of the TID entry.
246 */
247 u32 tidoffset;
248 /*
Sebastian Sancheze3304b7cc42017-05-26 05:35:18 -0700249 * KDETH.Offset (Eager) field
250 * We need to remember the initial value so the headers
251 * can be updated properly.
Mike Marciniszyn77241052015-07-30 15:17:43 -0400252 */
Sebastian Sancheze3304b7cc42017-05-26 05:35:18 -0700253 u32 koffset;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400254 u32 sent;
Sebastian Sancheze3304b7cc42017-05-26 05:35:18 -0700255 /* TID index copied from the tid_iov vector */
256 u16 tididx;
257 /* progress index moving along the iovs array */
258 u8 iov_idx;
Sebastian Sanchezb8884292017-05-26 05:35:44 -0700259 u8 done;
Sebastian Sancheze3304b7cc42017-05-26 05:35:18 -0700260
261 struct user_sdma_iovec iovs[MAX_VECTORS_PER_REQ];
262} ____cacheline_aligned_in_smp;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400263
Mitko Haralanovb9fb63182015-10-26 10:28:37 -0400264/*
265 * A single txreq could span up to 3 physical pages when the MTU
266 * is sufficiently large (> 4K). Each of the IOV pointers also
267 * needs it's own set of flags so the vector has been handled
268 * independently of each other.
269 */
Mike Marciniszyn77241052015-07-30 15:17:43 -0400270struct user_sdma_txreq {
271 /* Packet header for the txreq */
272 struct hfi1_pkt_header hdr;
273 struct sdma_txreq txreq;
Mitko Haralanova0d40692015-12-08 17:10:13 -0500274 struct list_head list;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400275 struct user_sdma_request *req;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400276 u16 flags;
277 unsigned busycount;
278 u64 seqnum;
279};
280
281#define SDMA_DBG(req, fmt, ...) \
282 hfi1_cdbg(SDMA, "[%u:%u:%u:%u] " fmt, (req)->pq->dd->unit, \
283 (req)->pq->ctxt, (req)->pq->subctxt, (req)->info.comp_idx, \
284 ##__VA_ARGS__)
285#define SDMA_Q_DBG(pq, fmt, ...) \
286 hfi1_cdbg(SDMA, "[%u:%u:%u] " fmt, (pq)->dd->unit, (pq)->ctxt, \
287 (pq)->subctxt, ##__VA_ARGS__)
288
Michael J. Ruhlf4cd8762017-05-04 05:14:39 -0700289static int user_sdma_send_pkts(struct user_sdma_request *req,
290 unsigned maxpkts);
291static int num_user_pages(const struct iovec *iov);
292static void user_sdma_txreq_cb(struct sdma_txreq *txreq, int status);
293static inline void pq_update(struct hfi1_user_sdma_pkt_q *pq);
294static void user_sdma_free_request(struct user_sdma_request *req, bool unpin);
295static int pin_vector_pages(struct user_sdma_request *req,
296 struct user_sdma_iovec *iovec);
297static void unpin_vector_pages(struct mm_struct *mm, struct page **pages,
298 unsigned start, unsigned npages);
299static int check_header_template(struct user_sdma_request *req,
300 struct hfi1_pkt_header *hdr, u32 lrhlen,
301 u32 datalen);
302static int set_txreq_header(struct user_sdma_request *req,
303 struct user_sdma_txreq *tx, u32 datalen);
304static int set_txreq_header_ahg(struct user_sdma_request *req,
305 struct user_sdma_txreq *tx, u32 len);
306static inline void set_comp_state(struct hfi1_user_sdma_pkt_q *pq,
307 struct hfi1_user_sdma_comp_q *cq,
308 u16 idx, enum hfi1_sdma_comp_state state,
309 int ret);
310static inline u32 set_pkt_bth_psn(__be32 bthpsn, u8 expct, u32 frags);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400311static inline u32 get_lrh_len(struct hfi1_pkt_header, u32 len);
312
313static int defer_packet_queue(
Michael J. Ruhlf4cd8762017-05-04 05:14:39 -0700314 struct sdma_engine *sde,
315 struct iowait *wait,
316 struct sdma_txreq *txreq,
317 unsigned int seq);
318static void activate_packet_queue(struct iowait *wait, int reason);
319static bool sdma_rb_filter(struct mmu_rb_node *node, unsigned long addr,
320 unsigned long len);
321static int sdma_rb_insert(void *arg, struct mmu_rb_node *mnode);
Dean Luickb7df1922016-07-28 15:21:23 -0400322static int sdma_rb_evict(void *arg, struct mmu_rb_node *mnode,
323 void *arg2, bool *stop);
Michael J. Ruhlf4cd8762017-05-04 05:14:39 -0700324static void sdma_rb_remove(void *arg, struct mmu_rb_node *mnode);
325static int sdma_rb_invalidate(void *arg, struct mmu_rb_node *mnode);
Mitko Haralanov5cd3a88d2016-03-08 11:15:22 -0800326
327static struct mmu_rb_ops sdma_rb_ops = {
328 .filter = sdma_rb_filter,
329 .insert = sdma_rb_insert,
Dean Luickb7df1922016-07-28 15:21:23 -0400330 .evict = sdma_rb_evict,
Mitko Haralanov5cd3a88d2016-03-08 11:15:22 -0800331 .remove = sdma_rb_remove,
332 .invalidate = sdma_rb_invalidate
333};
Mike Marciniszyn77241052015-07-30 15:17:43 -0400334
Mike Marciniszyn77241052015-07-30 15:17:43 -0400335static int defer_packet_queue(
336 struct sdma_engine *sde,
337 struct iowait *wait,
338 struct sdma_txreq *txreq,
339 unsigned seq)
340{
341 struct hfi1_user_sdma_pkt_q *pq =
342 container_of(wait, struct hfi1_user_sdma_pkt_q, busy);
343 struct hfi1_ibdev *dev = &pq->dd->verbs_dev;
344 struct user_sdma_txreq *tx =
345 container_of(txreq, struct user_sdma_txreq, txreq);
346
347 if (sdma_progress(sde, seq, txreq)) {
348 if (tx->busycount++ < MAX_DEFER_RETRY_COUNT)
349 goto eagain;
350 }
351 /*
352 * We are assuming that if the list is enqueued somewhere, it
353 * is to the dmawait list since that is the only place where
354 * it is supposed to be enqueued.
355 */
356 xchg(&pq->state, SDMA_PKT_Q_DEFERRED);
357 write_seqlock(&dev->iowait_lock);
358 if (list_empty(&pq->busy.list))
359 list_add_tail(&pq->busy.list, &sde->dmawait);
360 write_sequnlock(&dev->iowait_lock);
361 return -EBUSY;
362eagain:
363 return -EAGAIN;
364}
365
366static void activate_packet_queue(struct iowait *wait, int reason)
367{
368 struct hfi1_user_sdma_pkt_q *pq =
369 container_of(wait, struct hfi1_user_sdma_pkt_q, busy);
370 xchg(&pq->state, SDMA_PKT_Q_ACTIVE);
371 wake_up(&wait->wait_dma);
372};
373
374static void sdma_kmem_cache_ctor(void *obj)
375{
Janani Ravichandran16ccad02016-02-25 15:08:17 -0500376 struct user_sdma_txreq *tx = obj;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400377
378 memset(tx, 0, sizeof(*tx));
379}
380
Michael J. Ruhl5042cdd2017-05-04 05:14:45 -0700381int hfi1_user_sdma_alloc_queues(struct hfi1_ctxtdata *uctxt,
382 struct hfi1_filedata *fd)
Mike Marciniszyn77241052015-07-30 15:17:43 -0400383{
Michael J. Ruhl62239fc2017-05-04 05:15:21 -0700384 int ret = -ENOMEM;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400385 char buf[64];
386 struct hfi1_devdata *dd;
387 struct hfi1_user_sdma_comp_q *cq;
388 struct hfi1_user_sdma_pkt_q *pq;
389 unsigned long flags;
390
Michael J. Ruhl62239fc2017-05-04 05:15:21 -0700391 if (!uctxt || !fd)
392 return -EBADF;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400393
Michael J. Ruhl62239fc2017-05-04 05:15:21 -0700394 if (!hfi1_sdma_comp_ring_size)
395 return -EINVAL;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400396
397 dd = uctxt->dd;
398
399 pq = kzalloc(sizeof(*pq), GFP_KERNEL);
Alison Schofield806e6e12015-10-12 14:28:36 -0700400 if (!pq)
Michael J. Ruhl62239fc2017-05-04 05:15:21 -0700401 return -ENOMEM;
Dean Luick7b3256e2016-07-28 15:21:18 -0400402
Mike Marciniszyn77241052015-07-30 15:17:43 -0400403 INIT_LIST_HEAD(&pq->list);
404 pq->dd = dd;
405 pq->ctxt = uctxt->ctxt;
Ira Weiny9e10af42015-10-30 18:58:40 -0400406 pq->subctxt = fd->subctxt;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400407 pq->n_max_reqs = hfi1_sdma_comp_ring_size;
408 pq->state = SDMA_PKT_Q_INACTIVE;
409 atomic_set(&pq->n_reqs, 0);
Mitko Haralanova0d40692015-12-08 17:10:13 -0500410 init_waitqueue_head(&pq->wait);
Dean Luickb7df1922016-07-28 15:21:23 -0400411 atomic_set(&pq->n_locked, 0);
Ira Weiny3faa3d92016-07-28 15:21:19 -0400412 pq->mm = fd->mm;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400413
414 iowait_init(&pq->busy, 0, NULL, defer_packet_queue,
Mike Marciniszyna545f532016-02-14 12:45:53 -0800415 activate_packet_queue, NULL);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400416 pq->reqidx = 0;
Michael J. Ruhl62239fc2017-05-04 05:15:21 -0700417
418 pq->reqs = kcalloc(hfi1_sdma_comp_ring_size,
419 sizeof(*pq->reqs),
420 GFP_KERNEL);
421 if (!pq->reqs)
422 goto pq_reqs_nomem;
423
424 pq->req_in_use = kcalloc(BITS_TO_LONGS(hfi1_sdma_comp_ring_size),
425 sizeof(*pq->req_in_use),
426 GFP_KERNEL);
427 if (!pq->req_in_use)
428 goto pq_reqs_no_in_use;
429
Mike Marciniszyn77241052015-07-30 15:17:43 -0400430 snprintf(buf, 64, "txreq-kmem-cache-%u-%u-%u", dd->unit, uctxt->ctxt,
Ira Weiny9e10af42015-10-30 18:58:40 -0400431 fd->subctxt);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400432 pq->txreq_cache = kmem_cache_create(buf,
Michael J. Ruhl62239fc2017-05-04 05:15:21 -0700433 sizeof(struct user_sdma_txreq),
Mike Marciniszyn77241052015-07-30 15:17:43 -0400434 L1_CACHE_BYTES,
435 SLAB_HWCACHE_ALIGN,
436 sdma_kmem_cache_ctor);
437 if (!pq->txreq_cache) {
438 dd_dev_err(dd, "[%u] Failed to allocate TxReq cache\n",
439 uctxt->ctxt);
440 goto pq_txreq_nomem;
441 }
Michael J. Ruhl62239fc2017-05-04 05:15:21 -0700442
Mike Marciniszyn77241052015-07-30 15:17:43 -0400443 cq = kzalloc(sizeof(*cq), GFP_KERNEL);
Alison Schofield806e6e12015-10-12 14:28:36 -0700444 if (!cq)
Mike Marciniszyn77241052015-07-30 15:17:43 -0400445 goto cq_nomem;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400446
Markus Elfringe036c202017-02-10 08:50:45 +0100447 cq->comps = vmalloc_user(PAGE_ALIGN(sizeof(*cq->comps)
448 * hfi1_sdma_comp_ring_size));
Alison Schofield806e6e12015-10-12 14:28:36 -0700449 if (!cq->comps)
Mike Marciniszyn77241052015-07-30 15:17:43 -0400450 goto cq_comps_nomem;
Alison Schofield806e6e12015-10-12 14:28:36 -0700451
Mike Marciniszyn77241052015-07-30 15:17:43 -0400452 cq->nentries = hfi1_sdma_comp_ring_size;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400453
Dean Luickb85ced92016-07-28 15:21:24 -0400454 ret = hfi1_mmu_rb_register(pq, pq->mm, &sdma_rb_ops, dd->pport->hfi1_wq,
455 &pq->handler);
Mitko Haralanov5cd3a88d2016-03-08 11:15:22 -0800456 if (ret) {
457 dd_dev_err(dd, "Failed to register with MMU %d", ret);
Michael J. Ruhl62239fc2017-05-04 05:15:21 -0700458 goto pq_mmu_fail;
Mitko Haralanov5cd3a88d2016-03-08 11:15:22 -0800459 }
460
Michael J. Ruhl62239fc2017-05-04 05:15:21 -0700461 fd->pq = pq;
462 fd->cq = cq;
463
Mike Marciniszyn77241052015-07-30 15:17:43 -0400464 spin_lock_irqsave(&uctxt->sdma_qlock, flags);
465 list_add(&pq->list, &uctxt->sdma_queues);
466 spin_unlock_irqrestore(&uctxt->sdma_qlock, flags);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400467
Michael J. Ruhl62239fc2017-05-04 05:15:21 -0700468 return 0;
469
470pq_mmu_fail:
471 vfree(cq->comps);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400472cq_comps_nomem:
473 kfree(cq);
474cq_nomem:
475 kmem_cache_destroy(pq->txreq_cache);
476pq_txreq_nomem:
Dean Luick7b3256e2016-07-28 15:21:18 -0400477 kfree(pq->req_in_use);
478pq_reqs_no_in_use:
Mike Marciniszyn77241052015-07-30 15:17:43 -0400479 kfree(pq->reqs);
480pq_reqs_nomem:
481 kfree(pq);
Michael J. Ruhl62239fc2017-05-04 05:15:21 -0700482
Mike Marciniszyn77241052015-07-30 15:17:43 -0400483 return ret;
484}
485
486int hfi1_user_sdma_free_queues(struct hfi1_filedata *fd)
487{
488 struct hfi1_ctxtdata *uctxt = fd->uctxt;
489 struct hfi1_user_sdma_pkt_q *pq;
490 unsigned long flags;
491
492 hfi1_cdbg(SDMA, "[%u:%u:%u] Freeing user SDMA queues", uctxt->dd->unit,
493 uctxt->ctxt, fd->subctxt);
494 pq = fd->pq;
495 if (pq) {
Dean Luicke0b09ac2016-07-28 15:21:20 -0400496 if (pq->handler)
497 hfi1_mmu_rb_unregister(pq->handler);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400498 spin_lock_irqsave(&uctxt->sdma_qlock, flags);
499 if (!list_empty(&pq->list))
500 list_del_init(&pq->list);
501 spin_unlock_irqrestore(&uctxt->sdma_qlock, flags);
502 iowait_sdma_drain(&pq->busy);
Mitko Haralanova0d40692015-12-08 17:10:13 -0500503 /* Wait until all requests have been freed. */
504 wait_event_interruptible(
505 pq->wait,
506 (ACCESS_ONCE(pq->state) == SDMA_PKT_Q_INACTIVE));
507 kfree(pq->reqs);
Dean Luick7b3256e2016-07-28 15:21:18 -0400508 kfree(pq->req_in_use);
Julia Lawalladad44d2015-09-13 14:15:04 +0200509 kmem_cache_destroy(pq->txreq_cache);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400510 kfree(pq);
511 fd->pq = NULL;
512 }
513 if (fd->cq) {
Bhumika Goyala4d7d052016-02-14 20:34:28 +0530514 vfree(fd->cq->comps);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400515 kfree(fd->cq);
516 fd->cq = NULL;
517 }
518 return 0;
519}
520
Jianxin Xiong14833b82016-07-01 16:01:56 -0700521static u8 dlid_to_selector(u16 dlid)
522{
523 static u8 mapping[256];
524 static int initialized;
525 static u8 next;
526 int hash;
527
528 if (!initialized) {
529 memset(mapping, 0xFF, 256);
530 initialized = 1;
531 }
532
533 hash = ((dlid >> 8) ^ dlid) & 0xFF;
534 if (mapping[hash] == 0xFF) {
535 mapping[hash] = next;
536 next = (next + 1) & 0x7F;
537 }
538
539 return mapping[hash];
540}
541
Michael J. Ruhl5042cdd2017-05-04 05:14:45 -0700542int hfi1_user_sdma_process_request(struct hfi1_filedata *fd,
543 struct iovec *iovec, unsigned long dim,
544 unsigned long *count)
Mike Marciniszyn77241052015-07-30 15:17:43 -0400545{
Dean Luickff4ce9b2016-07-28 12:27:34 -0400546 int ret = 0, i;
Ira Weiny9e10af42015-10-30 18:58:40 -0400547 struct hfi1_ctxtdata *uctxt = fd->uctxt;
548 struct hfi1_user_sdma_pkt_q *pq = fd->pq;
549 struct hfi1_user_sdma_comp_q *cq = fd->cq;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400550 struct hfi1_devdata *dd = pq->dd;
551 unsigned long idx = 0;
552 u8 pcount = initial_pkt_count;
553 struct sdma_req_info info;
554 struct user_sdma_request *req;
555 u8 opcode, sc, vl;
Jianxin Xiongb583faf2016-05-19 05:21:57 -0700556 int req_queued = 0;
Jianxin Xiong14833b82016-07-01 16:01:56 -0700557 u16 dlid;
Tadeusz Struk0cb2aa62016-09-25 07:44:23 -0700558 u32 selector;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400559
560 if (iovec[idx].iov_len < sizeof(info) + sizeof(req->hdr)) {
561 hfi1_cdbg(
562 SDMA,
563 "[%u:%u:%u] First vector not big enough for header %lu/%lu",
Ira Weiny9e10af42015-10-30 18:58:40 -0400564 dd->unit, uctxt->ctxt, fd->subctxt,
Mike Marciniszyn77241052015-07-30 15:17:43 -0400565 iovec[idx].iov_len, sizeof(info) + sizeof(req->hdr));
Mitko Haralanovfaa98b82015-12-08 17:10:11 -0500566 return -EINVAL;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400567 }
568 ret = copy_from_user(&info, iovec[idx].iov_base, sizeof(info));
569 if (ret) {
570 hfi1_cdbg(SDMA, "[%u:%u:%u] Failed to copy info QW (%d)",
Ira Weiny9e10af42015-10-30 18:58:40 -0400571 dd->unit, uctxt->ctxt, fd->subctxt, ret);
Mitko Haralanovfaa98b82015-12-08 17:10:11 -0500572 return -EFAULT;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400573 }
Mitko Haralanov0f2d87d2016-02-03 14:35:06 -0800574
Ira Weiny9e10af42015-10-30 18:58:40 -0400575 trace_hfi1_sdma_user_reqinfo(dd, uctxt->ctxt, fd->subctxt,
Mike Marciniszyn77241052015-07-30 15:17:43 -0400576 (u16 *)&info);
Dean Luick4fa0d222016-07-28 15:21:14 -0400577
578 if (info.comp_idx >= hfi1_sdma_comp_ring_size) {
579 hfi1_cdbg(SDMA,
580 "[%u:%u:%u:%u] Invalid comp index",
581 dd->unit, uctxt->ctxt, fd->subctxt, info.comp_idx);
582 return -EINVAL;
583 }
584
Dean Luick9ff73c82016-07-28 15:21:15 -0400585 /*
586 * Sanity check the header io vector count. Need at least 1 vector
587 * (header) and cannot be larger than the actual io vector count.
588 */
589 if (req_iovcnt(info.ctrl) < 1 || req_iovcnt(info.ctrl) > dim) {
590 hfi1_cdbg(SDMA,
591 "[%u:%u:%u:%u] Invalid iov count %d, dim %ld",
592 dd->unit, uctxt->ctxt, fd->subctxt, info.comp_idx,
593 req_iovcnt(info.ctrl), dim);
594 return -EINVAL;
595 }
596
Mike Marciniszyn77241052015-07-30 15:17:43 -0400597 if (!info.fragsize) {
598 hfi1_cdbg(SDMA,
599 "[%u:%u:%u:%u] Request does not specify fragsize",
Ira Weiny9e10af42015-10-30 18:58:40 -0400600 dd->unit, uctxt->ctxt, fd->subctxt, info.comp_idx);
Mitko Haralanovfaa98b82015-12-08 17:10:11 -0500601 return -EINVAL;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400602 }
Dean Luick7b3256e2016-07-28 15:21:18 -0400603
604 /* Try to claim the request. */
605 if (test_and_set_bit(info.comp_idx, pq->req_in_use)) {
606 hfi1_cdbg(SDMA, "[%u:%u:%u] Entry %u is in use",
607 dd->unit, uctxt->ctxt, fd->subctxt,
608 info.comp_idx);
609 return -EBADSLT;
610 }
Mike Marciniszyn77241052015-07-30 15:17:43 -0400611 /*
Dean Luick7b3256e2016-07-28 15:21:18 -0400612 * All safety checks have been done and this request has been claimed.
Mike Marciniszyn77241052015-07-30 15:17:43 -0400613 */
614 hfi1_cdbg(SDMA, "[%u:%u:%u] Using req/comp entry %u\n", dd->unit,
Ira Weiny9e10af42015-10-30 18:58:40 -0400615 uctxt->ctxt, fd->subctxt, info.comp_idx);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400616 req = pq->reqs + info.comp_idx;
Dean Luick9ff73c82016-07-28 15:21:15 -0400617 req->data_iovs = req_iovcnt(info.ctrl) - 1; /* subtract header vector */
Sebastian Sanchezceb26562017-05-12 09:19:36 -0700618 req->data_len = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400619 req->pq = pq;
620 req->cq = cq;
Mitko Haralanova0d40692015-12-08 17:10:13 -0500621 req->status = -1;
Sebastian Sanchez780a4c12017-05-04 05:14:51 -0700622 req->ahg_idx = -1;
Sebastian Sanchezceb26562017-05-12 09:19:36 -0700623 req->iov_idx = 0;
624 req->sent = 0;
625 req->seqnum = 0;
626 req->seqcomp = 0;
627 req->seqsubmitted = 0;
628 req->flags = 0;
629 req->tids = NULL;
Sebastian Sanchezb8884292017-05-26 05:35:44 -0700630 req->done = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400631 INIT_LIST_HEAD(&req->txps);
Mitko Haralanova0d40692015-12-08 17:10:13 -0500632
Mike Marciniszyn77241052015-07-30 15:17:43 -0400633 memcpy(&req->info, &info, sizeof(info));
634
Dean Luick9ff73c82016-07-28 15:21:15 -0400635 if (req_opcode(info.ctrl) == EXPECTED) {
636 /* expected must have a TID info and at least one data vector */
637 if (req->data_iovs < 2) {
638 SDMA_DBG(req,
639 "Not enough vectors for expected request");
640 ret = -EINVAL;
641 goto free_req;
642 }
Mike Marciniszyn77241052015-07-30 15:17:43 -0400643 req->data_iovs--;
Dean Luick9ff73c82016-07-28 15:21:15 -0400644 }
Mike Marciniszyn77241052015-07-30 15:17:43 -0400645
646 if (!info.npkts || req->data_iovs > MAX_VECTORS_PER_REQ) {
647 SDMA_DBG(req, "Too many vectors (%u/%u)", req->data_iovs,
648 MAX_VECTORS_PER_REQ);
Dean Luick9da7e9a2016-07-28 15:21:17 -0400649 ret = -EINVAL;
650 goto free_req;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400651 }
652 /* Copy the header from the user buffer */
653 ret = copy_from_user(&req->hdr, iovec[idx].iov_base + sizeof(info),
654 sizeof(req->hdr));
655 if (ret) {
656 SDMA_DBG(req, "Failed to copy header template (%d)", ret);
657 ret = -EFAULT;
658 goto free_req;
659 }
660
661 /* If Static rate control is not enabled, sanitize the header. */
662 if (!HFI1_CAP_IS_USET(STATIC_RATE_CTRL))
663 req->hdr.pbc[2] = 0;
664
665 /* Validate the opcode. Do not trust packets from user space blindly. */
666 opcode = (be32_to_cpu(req->hdr.bth[0]) >> 24) & 0xff;
667 if ((opcode & USER_OPCODE_CHECK_MASK) !=
668 USER_OPCODE_CHECK_VAL) {
669 SDMA_DBG(req, "Invalid opcode (%d)", opcode);
670 ret = -EINVAL;
671 goto free_req;
672 }
673 /*
674 * Validate the vl. Do not trust packets from user space blindly.
675 * VL comes from PBC, SC comes from LRH, and the VL needs to
676 * match the SC look up.
677 */
678 vl = (le16_to_cpu(req->hdr.pbc[0]) >> 12) & 0xF;
679 sc = (((be16_to_cpu(req->hdr.lrh[0]) >> 12) & 0xF) |
680 (((le16_to_cpu(req->hdr.pbc[1]) >> 14) & 0x1) << 4));
681 if (vl >= dd->pport->vls_operational ||
682 vl != sc_to_vlt(dd, sc)) {
683 SDMA_DBG(req, "Invalid SC(%u)/VL(%u)", sc, vl);
684 ret = -EINVAL;
685 goto free_req;
686 }
687
Sebastian Sancheze38d1e42016-04-12 11:22:21 -0700688 /* Checking P_KEY for requests from user-space */
689 if (egress_pkey_check(dd->pport, req->hdr.lrh, req->hdr.bth, sc,
690 PKEY_CHECK_INVALID)) {
691 ret = -EINVAL;
692 goto free_req;
693 }
694
Mike Marciniszyn77241052015-07-30 15:17:43 -0400695 /*
696 * Also should check the BTH.lnh. If it says the next header is GRH then
697 * the RXE parsing will be off and will land in the middle of the KDETH
698 * or miss it entirely.
699 */
700 if ((be16_to_cpu(req->hdr.lrh[0]) & 0x3) == HFI1_LRH_GRH) {
701 SDMA_DBG(req, "User tried to pass in a GRH");
702 ret = -EINVAL;
703 goto free_req;
704 }
705
706 req->koffset = le32_to_cpu(req->hdr.kdeth.swdata[6]);
Jubin John4d114fd2016-02-14 20:21:43 -0800707 /*
708 * Calculate the initial TID offset based on the values of
709 * KDETH.OFFSET and KDETH.OM that are passed in.
710 */
Mike Marciniszyn77241052015-07-30 15:17:43 -0400711 req->tidoffset = KDETH_GET(req->hdr.kdeth.ver_tid_offset, OFFSET) *
712 (KDETH_GET(req->hdr.kdeth.ver_tid_offset, OM) ?
713 KDETH_OM_LARGE : KDETH_OM_SMALL);
714 SDMA_DBG(req, "Initial TID offset %u", req->tidoffset);
715 idx++;
716
717 /* Save all the IO vector structures */
Dean Luickff4ce9b2016-07-28 12:27:34 -0400718 for (i = 0; i < req->data_iovs; i++) {
Sebastian Sanchezceb26562017-05-12 09:19:36 -0700719 req->iovs[i].offset = 0;
Mitko Haralanov0f2d87d2016-02-03 14:35:06 -0800720 INIT_LIST_HEAD(&req->iovs[i].list);
Markus Elfringdb6f0282017-02-10 21:45:38 +0100721 memcpy(&req->iovs[i].iov,
722 iovec + idx++,
723 sizeof(req->iovs[i].iov));
Mitko Haralanov5cd3a88d2016-03-08 11:15:22 -0800724 ret = pin_vector_pages(req, &req->iovs[i]);
725 if (ret) {
Sebastian Sanchezceb26562017-05-12 09:19:36 -0700726 req->data_iovs = i;
Mitko Haralanov5cd3a88d2016-03-08 11:15:22 -0800727 req->status = ret;
728 goto free_req;
729 }
Dean Luickff4ce9b2016-07-28 12:27:34 -0400730 req->data_len += req->iovs[i].iov.iov_len;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400731 }
732 SDMA_DBG(req, "total data length %u", req->data_len);
733
734 if (pcount > req->info.npkts)
735 pcount = req->info.npkts;
736 /*
737 * Copy any TID info
738 * User space will provide the TID info only when the
739 * request type is EXPECTED. This is true even if there is
740 * only one packet in the request and the header is already
741 * setup. The reason for the singular TID case is that the
742 * driver needs to perform safety checks.
743 */
744 if (req_opcode(req->info.ctrl) == EXPECTED) {
745 u16 ntids = iovec[idx].iov_len / sizeof(*req->tids);
Michael J. Ruhl1bb0d7b2017-02-08 05:28:31 -0800746 u32 *tmp;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400747
748 if (!ntids || ntids > MAX_TID_PAIR_ENTRIES) {
749 ret = -EINVAL;
750 goto free_req;
751 }
Michael J. Ruhl1bb0d7b2017-02-08 05:28:31 -0800752
Mike Marciniszyn77241052015-07-30 15:17:43 -0400753 /*
754 * We have to copy all of the tids because they may vary
755 * in size and, therefore, the TID count might not be
756 * equal to the pkt count. However, there is no way to
757 * tell at this point.
758 */
Michael J. Ruhl1bb0d7b2017-02-08 05:28:31 -0800759 tmp = memdup_user(iovec[idx].iov_base,
760 ntids * sizeof(*req->tids));
761 if (IS_ERR(tmp)) {
762 ret = PTR_ERR(tmp);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400763 SDMA_DBG(req, "Failed to copy %d TIDs (%d)",
764 ntids, ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400765 goto free_req;
766 }
Michael J. Ruhl1bb0d7b2017-02-08 05:28:31 -0800767 req->tids = tmp;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400768 req->n_tids = ntids;
Sebastian Sanchezceb26562017-05-12 09:19:36 -0700769 req->tididx = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400770 idx++;
771 }
772
Jianxin Xiong14833b82016-07-01 16:01:56 -0700773 dlid = be16_to_cpu(req->hdr.lrh[1]);
774 selector = dlid_to_selector(dlid);
Tadeusz Struk0cb2aa62016-09-25 07:44:23 -0700775 selector += uctxt->ctxt + fd->subctxt;
776 req->sde = sdma_select_user_engine(dd, selector, vl);
Jianxin Xiong14833b82016-07-01 16:01:56 -0700777
Mike Marciniszyn77241052015-07-30 15:17:43 -0400778 if (!req->sde || !sdma_running(req->sde)) {
779 ret = -ECOMM;
780 goto free_req;
781 }
782
783 /* We don't need an AHG entry if the request contains only one packet */
Sebastian Sanchez780a4c12017-05-04 05:14:51 -0700784 if (req->info.npkts > 1 && HFI1_CAP_IS_USET(SDMA_AHG))
785 req->ahg_idx = sdma_ahg_alloc(req->sde);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400786
Mitko Haralanov0f2d87d2016-02-03 14:35:06 -0800787 set_comp_state(pq, cq, info.comp_idx, QUEUED, 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400788 atomic_inc(&pq->n_reqs);
Jianxin Xiongb583faf2016-05-19 05:21:57 -0700789 req_queued = 1;
Mitko Haralanov0f2d87d2016-02-03 14:35:06 -0800790 /* Send the first N packets in the request to buy us some time */
791 ret = user_sdma_send_pkts(req, pcount);
792 if (unlikely(ret < 0 && ret != -EBUSY)) {
793 req->status = ret;
Mitko Haralanov0f2d87d2016-02-03 14:35:06 -0800794 goto free_req;
795 }
Mike Marciniszyn77241052015-07-30 15:17:43 -0400796
Mitko Haralanov0f2d87d2016-02-03 14:35:06 -0800797 /*
798 * It is possible that the SDMA engine would have processed all the
799 * submitted packets by the time we get here. Therefore, only set
800 * packet queue state to ACTIVE if there are still uncompleted
801 * requests.
802 */
803 if (atomic_read(&pq->n_reqs))
804 xchg(&pq->state, SDMA_PKT_Q_ACTIVE);
805
806 /*
807 * This is a somewhat blocking send implementation.
808 * The driver will block the caller until all packets of the
809 * request have been submitted to the SDMA engine. However, it
810 * will not wait for send completions.
811 */
Sebastian Sanchezb8884292017-05-26 05:35:44 -0700812 while (req->seqsubmitted != req->info.npkts) {
Mitko Haralanov0f2d87d2016-02-03 14:35:06 -0800813 ret = user_sdma_send_pkts(req, pcount);
814 if (ret < 0) {
815 if (ret != -EBUSY) {
816 req->status = ret;
817 set_bit(SDMA_REQ_DONE_ERROR, &req->flags);
Mitko Haralanova402d6a2016-02-03 14:37:41 -0800818 if (ACCESS_ONCE(req->seqcomp) ==
819 req->seqsubmitted - 1)
820 goto free_req;
Mitko Haralanov0f2d87d2016-02-03 14:35:06 -0800821 return ret;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400822 }
Mitko Haralanov0f2d87d2016-02-03 14:35:06 -0800823 wait_event_interruptible_timeout(
824 pq->busy.wait_dma,
825 (pq->state == SDMA_PKT_Q_ACTIVE),
826 msecs_to_jiffies(
827 SDMA_IOWAIT_TIMEOUT));
Mike Marciniszyn77241052015-07-30 15:17:43 -0400828 }
Mike Marciniszyn77241052015-07-30 15:17:43 -0400829 }
Mike Marciniszyn77241052015-07-30 15:17:43 -0400830 *count += idx;
Mitko Haralanova0d40692015-12-08 17:10:13 -0500831 return 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400832free_req:
Mitko Haralanov0f2d87d2016-02-03 14:35:06 -0800833 user_sdma_free_request(req, true);
Jianxin Xiongb583faf2016-05-19 05:21:57 -0700834 if (req_queued)
835 pq_update(pq);
Mitko Haralanov0f2d87d2016-02-03 14:35:06 -0800836 set_comp_state(pq, cq, info.comp_idx, ERROR, req->status);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400837 return ret;
838}
839
840static inline u32 compute_data_length(struct user_sdma_request *req,
Jubin John17fb4f22016-02-14 20:21:52 -0800841 struct user_sdma_txreq *tx)
Mike Marciniszyn77241052015-07-30 15:17:43 -0400842{
843 /*
844 * Determine the proper size of the packet data.
845 * The size of the data of the first packet is in the header
846 * template. However, it includes the header and ICRC, which need
847 * to be subtracted.
Ira Weinyc4929802016-07-27 21:08:42 -0400848 * The minimum representable packet data length in a header is 4 bytes,
849 * therefore, when the data length request is less than 4 bytes, there's
850 * only one packet, and the packet data length is equal to that of the
851 * request data length.
Mike Marciniszyn77241052015-07-30 15:17:43 -0400852 * The size of the remaining packets is the minimum of the frag
853 * size (MTU) or remaining data in the request.
854 */
855 u32 len;
856
857 if (!req->seqnum) {
Ira Weinyc4929802016-07-27 21:08:42 -0400858 if (req->data_len < sizeof(u32))
859 len = req->data_len;
860 else
861 len = ((be16_to_cpu(req->hdr.lrh[2]) << 2) -
862 (sizeof(tx->hdr) - 4));
Mike Marciniszyn77241052015-07-30 15:17:43 -0400863 } else if (req_opcode(req->info.ctrl) == EXPECTED) {
864 u32 tidlen = EXP_TID_GET(req->tids[req->tididx], LEN) *
865 PAGE_SIZE;
Jubin John4d114fd2016-02-14 20:21:43 -0800866 /*
867 * Get the data length based on the remaining space in the
868 * TID pair.
869 */
Mike Marciniszyn77241052015-07-30 15:17:43 -0400870 len = min(tidlen - req->tidoffset, (u32)req->info.fragsize);
871 /* If we've filled up the TID pair, move to the next one. */
872 if (unlikely(!len) && ++req->tididx < req->n_tids &&
873 req->tids[req->tididx]) {
874 tidlen = EXP_TID_GET(req->tids[req->tididx],
875 LEN) * PAGE_SIZE;
876 req->tidoffset = 0;
877 len = min_t(u32, tidlen, req->info.fragsize);
878 }
Jubin John4d114fd2016-02-14 20:21:43 -0800879 /*
880 * Since the TID pairs map entire pages, make sure that we
Mike Marciniszyn77241052015-07-30 15:17:43 -0400881 * are not going to try to send more data that we have
Jubin John4d114fd2016-02-14 20:21:43 -0800882 * remaining.
883 */
Mike Marciniszyn77241052015-07-30 15:17:43 -0400884 len = min(len, req->data_len - req->sent);
Jubin Johne4909742016-02-14 20:22:00 -0800885 } else {
Mike Marciniszyn77241052015-07-30 15:17:43 -0400886 len = min(req->data_len - req->sent, (u32)req->info.fragsize);
Jubin Johne4909742016-02-14 20:22:00 -0800887 }
Mike Marciniszyn77241052015-07-30 15:17:43 -0400888 SDMA_DBG(req, "Data Length = %u", len);
889 return len;
890}
891
Ira Weinyc4929802016-07-27 21:08:42 -0400892static inline u32 pad_len(u32 len)
893{
894 if (len & (sizeof(u32) - 1))
895 len += sizeof(u32) - (len & (sizeof(u32) - 1));
896 return len;
897}
898
Mike Marciniszyn77241052015-07-30 15:17:43 -0400899static inline u32 get_lrh_len(struct hfi1_pkt_header hdr, u32 len)
900{
901 /* (Size of complete header - size of PBC) + 4B ICRC + data length */
902 return ((sizeof(hdr) - sizeof(hdr.pbc)) + 4 + len);
903}
904
905static int user_sdma_send_pkts(struct user_sdma_request *req, unsigned maxpkts)
906{
Harish Chegondi0b115ef2016-09-06 04:35:37 -0700907 int ret = 0, count;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400908 unsigned npkts = 0;
909 struct user_sdma_txreq *tx = NULL;
910 struct hfi1_user_sdma_pkt_q *pq = NULL;
911 struct user_sdma_iovec *iovec = NULL;
912
Mitko Haralanovfaa98b82015-12-08 17:10:11 -0500913 if (!req->pq)
914 return -EINVAL;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400915
916 pq = req->pq;
917
Mitko Haralanov6a5464f2015-12-08 17:10:12 -0500918 /* If tx completion has reported an error, we are done. */
919 if (test_bit(SDMA_REQ_HAS_ERROR, &req->flags)) {
920 set_bit(SDMA_REQ_DONE_ERROR, &req->flags);
921 return -EFAULT;
922 }
923
Mike Marciniszyn77241052015-07-30 15:17:43 -0400924 /*
925 * Check if we might have sent the entire request already
926 */
927 if (unlikely(req->seqnum == req->info.npkts)) {
928 if (!list_empty(&req->txps))
929 goto dosend;
Mitko Haralanovfaa98b82015-12-08 17:10:11 -0500930 return ret;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400931 }
932
933 if (!maxpkts || maxpkts > req->info.npkts - req->seqnum)
934 maxpkts = req->info.npkts - req->seqnum;
935
936 while (npkts < maxpkts) {
937 u32 datalen = 0, queued = 0, data_sent = 0;
938 u64 iov_offset = 0;
939
940 /*
941 * Check whether any of the completions have come back
942 * with errors. If so, we are not going to process any
943 * more packets from this request.
944 */
945 if (test_bit(SDMA_REQ_HAS_ERROR, &req->flags)) {
946 set_bit(SDMA_REQ_DONE_ERROR, &req->flags);
Mitko Haralanovfaa98b82015-12-08 17:10:11 -0500947 return -EFAULT;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400948 }
949
950 tx = kmem_cache_alloc(pq->txreq_cache, GFP_KERNEL);
Mitko Haralanovfaa98b82015-12-08 17:10:11 -0500951 if (!tx)
952 return -ENOMEM;
953
Mike Marciniszyn77241052015-07-30 15:17:43 -0400954 tx->flags = 0;
955 tx->req = req;
956 tx->busycount = 0;
Mitko Haralanova0d40692015-12-08 17:10:13 -0500957 INIT_LIST_HEAD(&tx->list);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400958
Jakub Pawlake7301392016-12-07 19:32:41 -0800959 /*
960 * For the last packet set the ACK request
961 * and disable header suppression.
962 */
Mike Marciniszyn77241052015-07-30 15:17:43 -0400963 if (req->seqnum == req->info.npkts - 1)
Jakub Pawlake7301392016-12-07 19:32:41 -0800964 tx->flags |= (TXREQ_FLAGS_REQ_ACK |
965 TXREQ_FLAGS_REQ_DISABLE_SH);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400966
967 /*
968 * Calculate the payload size - this is min of the fragment
969 * (MTU) size or the remaining bytes in the request but only
970 * if we have payload data.
971 */
972 if (req->data_len) {
973 iovec = &req->iovs[req->iov_idx];
974 if (ACCESS_ONCE(iovec->offset) == iovec->iov.iov_len) {
975 if (++req->iov_idx == req->data_iovs) {
976 ret = -EFAULT;
977 goto free_txreq;
978 }
979 iovec = &req->iovs[req->iov_idx];
980 WARN_ON(iovec->offset);
981 }
982
Mike Marciniszyn77241052015-07-30 15:17:43 -0400983 datalen = compute_data_length(req, tx);
Jakub Pawlake7301392016-12-07 19:32:41 -0800984
985 /*
986 * Disable header suppression for the payload <= 8DWS.
987 * If there is an uncorrectable error in the receive
988 * data FIFO when the received payload size is less than
989 * or equal to 8DWS then the RxDmaDataFifoRdUncErr is
990 * not reported.There is set RHF.EccErr if the header
991 * is not suppressed.
992 */
Mike Marciniszyn77241052015-07-30 15:17:43 -0400993 if (!datalen) {
994 SDMA_DBG(req,
995 "Request has data but pkt len is 0");
996 ret = -EFAULT;
997 goto free_tx;
Jakub Pawlake7301392016-12-07 19:32:41 -0800998 } else if (datalen <= 32) {
999 tx->flags |= TXREQ_FLAGS_REQ_DISABLE_SH;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001000 }
1001 }
1002
Sebastian Sanchez780a4c12017-05-04 05:14:51 -07001003 if (req->ahg_idx >= 0) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04001004 if (!req->seqnum) {
1005 u16 pbclen = le16_to_cpu(req->hdr.pbc[0]);
Ira Weinyc4929802016-07-27 21:08:42 -04001006 u32 lrhlen = get_lrh_len(req->hdr,
1007 pad_len(datalen));
Mike Marciniszyn77241052015-07-30 15:17:43 -04001008 /*
1009 * Copy the request header into the tx header
1010 * because the HW needs a cacheline-aligned
1011 * address.
1012 * This copy can be optimized out if the hdr
1013 * member of user_sdma_request were also
1014 * cacheline aligned.
1015 */
1016 memcpy(&tx->hdr, &req->hdr, sizeof(tx->hdr));
1017 if (PBC2LRH(pbclen) != lrhlen) {
1018 pbclen = (pbclen & 0xf000) |
1019 LRH2PBC(lrhlen);
1020 tx->hdr.pbc[0] = cpu_to_le16(pbclen);
1021 }
Jakub Pawlake7301392016-12-07 19:32:41 -08001022 ret = check_header_template(req, &tx->hdr,
1023 lrhlen, datalen);
1024 if (ret)
1025 goto free_tx;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001026 ret = sdma_txinit_ahg(&tx->txreq,
1027 SDMA_TXREQ_F_AHG_COPY,
1028 sizeof(tx->hdr) + datalen,
1029 req->ahg_idx, 0, NULL, 0,
1030 user_sdma_txreq_cb);
1031 if (ret)
1032 goto free_tx;
1033 ret = sdma_txadd_kvaddr(pq->dd, &tx->txreq,
1034 &tx->hdr,
1035 sizeof(tx->hdr));
1036 if (ret)
1037 goto free_txreq;
1038 } else {
1039 int changes;
1040
1041 changes = set_txreq_header_ahg(req, tx,
1042 datalen);
1043 if (changes < 0)
1044 goto free_tx;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001045 }
1046 } else {
1047 ret = sdma_txinit(&tx->txreq, 0, sizeof(req->hdr) +
1048 datalen, user_sdma_txreq_cb);
1049 if (ret)
1050 goto free_tx;
1051 /*
1052 * Modify the header for this packet. This only needs
1053 * to be done if we are not going to use AHG. Otherwise,
1054 * the HW will do it based on the changes we gave it
1055 * during sdma_txinit_ahg().
1056 */
1057 ret = set_txreq_header(req, tx, datalen);
1058 if (ret)
1059 goto free_txreq;
1060 }
1061
1062 /*
1063 * If the request contains any data vectors, add up to
1064 * fragsize bytes to the descriptor.
1065 */
1066 while (queued < datalen &&
1067 (req->sent + data_sent) < req->data_len) {
1068 unsigned long base, offset;
1069 unsigned pageidx, len;
1070
1071 base = (unsigned long)iovec->iov.iov_base;
Amitoj Kaur Chawla72a5f6a2016-02-20 19:08:02 +05301072 offset = offset_in_page(base + iovec->offset +
1073 iov_offset);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001074 pageidx = (((iovec->offset + iov_offset +
1075 base) - (base & PAGE_MASK)) >> PAGE_SHIFT);
1076 len = offset + req->info.fragsize > PAGE_SIZE ?
1077 PAGE_SIZE - offset : req->info.fragsize;
1078 len = min((datalen - queued), len);
1079 ret = sdma_txadd_page(pq->dd, &tx->txreq,
1080 iovec->pages[pageidx],
1081 offset, len);
1082 if (ret) {
Mitko Haralanova0d40692015-12-08 17:10:13 -05001083 SDMA_DBG(req, "SDMA txreq add page failed %d\n",
1084 ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001085 goto free_txreq;
1086 }
1087 iov_offset += len;
1088 queued += len;
1089 data_sent += len;
1090 if (unlikely(queued < datalen &&
1091 pageidx == iovec->npages &&
Mitko Haralanov5cd3a88d2016-03-08 11:15:22 -08001092 req->iov_idx < req->data_iovs - 1)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04001093 iovec->offset += iov_offset;
1094 iovec = &req->iovs[++req->iov_idx];
Mike Marciniszyn77241052015-07-30 15:17:43 -04001095 iov_offset = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001096 }
1097 }
1098 /*
1099 * The txreq was submitted successfully so we can update
1100 * the counters.
1101 */
1102 req->koffset += datalen;
1103 if (req_opcode(req->info.ctrl) == EXPECTED)
1104 req->tidoffset += datalen;
1105 req->sent += data_sent;
Mitko Haralanov5cd3a88d2016-03-08 11:15:22 -08001106 if (req->data_len)
1107 iovec->offset += iov_offset;
Mitko Haralanovc7cbf2f2016-02-03 14:35:23 -08001108 list_add_tail(&tx->txreq.list, &req->txps);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001109 /*
1110 * It is important to increment this here as it is used to
1111 * generate the BTH.PSN and, therefore, can't be bulk-updated
1112 * outside of the loop.
1113 */
1114 tx->seqnum = req->seqnum++;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001115 npkts++;
1116 }
1117dosend:
Harish Chegondi0b115ef2016-09-06 04:35:37 -07001118 ret = sdma_send_txlist(req->sde, &pq->busy, &req->txps, &count);
1119 req->seqsubmitted += count;
1120 if (req->seqsubmitted == req->info.npkts) {
Sebastian Sanchezb8884292017-05-26 05:35:44 -07001121 WRITE_ONCE(req->done, 1);
Harish Chegondi0b115ef2016-09-06 04:35:37 -07001122 /*
1123 * The txreq has already been submitted to the HW queue
1124 * so we can free the AHG entry now. Corruption will not
1125 * happen due to the sequential manner in which
1126 * descriptors are processed.
1127 */
Sebastian Sanchez780a4c12017-05-04 05:14:51 -07001128 if (req->ahg_idx >= 0)
Harish Chegondi0b115ef2016-09-06 04:35:37 -07001129 sdma_ahg_free(req->sde, req->ahg_idx);
Mitko Haralanovc7cbf2f2016-02-03 14:35:23 -08001130 }
Mitko Haralanovfaa98b82015-12-08 17:10:11 -05001131 return ret;
1132
Mike Marciniszyn77241052015-07-30 15:17:43 -04001133free_txreq:
1134 sdma_txclean(pq->dd, &tx->txreq);
1135free_tx:
1136 kmem_cache_free(pq->txreq_cache, tx);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001137 return ret;
1138}
1139
1140/*
1141 * How many pages in this iovec element?
1142 */
1143static inline int num_user_pages(const struct iovec *iov)
1144{
Jubin John50e5dcb2016-02-14 20:19:41 -08001145 const unsigned long addr = (unsigned long)iov->iov_base;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001146 const unsigned long len = iov->iov_len;
1147 const unsigned long spage = addr & PAGE_MASK;
1148 const unsigned long epage = (addr + len - 1) & PAGE_MASK;
1149
1150 return 1 + ((epage - spage) >> PAGE_SHIFT);
1151}
1152
Mitko Haralanov5511d782016-03-08 11:15:44 -08001153static u32 sdma_cache_evict(struct hfi1_user_sdma_pkt_q *pq, u32 npages)
1154{
Dean Luickb7df1922016-07-28 15:21:23 -04001155 struct evict_data evict_data;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001156
Dean Luickb7df1922016-07-28 15:21:23 -04001157 evict_data.cleared = 0;
1158 evict_data.target = npages;
1159 hfi1_mmu_rb_evict(pq->handler, &evict_data);
1160 return evict_data.cleared;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001161}
1162
Mike Marciniszyn77241052015-07-30 15:17:43 -04001163static int pin_vector_pages(struct user_sdma_request *req,
Ira Weiny72720dd2016-07-28 12:27:25 -04001164 struct user_sdma_iovec *iovec)
1165{
Mitko Haralanov5511d782016-03-08 11:15:44 -08001166 int ret = 0, pinned, npages, cleared;
Mitko Haralanov5cd3a88d2016-03-08 11:15:22 -08001167 struct page **pages;
1168 struct hfi1_user_sdma_pkt_q *pq = req->pq;
1169 struct sdma_mmu_node *node = NULL;
1170 struct mmu_rb_node *rb_node;
Sebastian Sanchez7be85672017-05-26 05:35:12 -07001171 bool extracted;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001172
Sebastian Sanchez7be85672017-05-26 05:35:12 -07001173 extracted =
1174 hfi1_mmu_rb_remove_unless_exact(pq->handler,
1175 (unsigned long)
1176 iovec->iov.iov_base,
1177 iovec->iov.iov_len, &rb_node);
1178 if (rb_node) {
Mitko Haralanov5cd3a88d2016-03-08 11:15:22 -08001179 node = container_of(rb_node, struct sdma_mmu_node, rb);
Sebastian Sanchez7be85672017-05-26 05:35:12 -07001180 if (!extracted) {
1181 atomic_inc(&node->refcount);
1182 iovec->pages = node->pages;
1183 iovec->npages = node->npages;
1184 iovec->node = node;
1185 return 0;
1186 }
1187 }
Mitko Haralanova0d40692015-12-08 17:10:13 -05001188
Mitko Haralanov5cd3a88d2016-03-08 11:15:22 -08001189 if (!node) {
1190 node = kzalloc(sizeof(*node), GFP_KERNEL);
1191 if (!node)
1192 return -ENOMEM;
1193
1194 node->rb.addr = (unsigned long)iovec->iov.iov_base;
Mitko Haralanov5511d782016-03-08 11:15:44 -08001195 node->pq = pq;
Mitko Haralanov5cd3a88d2016-03-08 11:15:22 -08001196 atomic_set(&node->refcount, 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001197 }
Mitko Haralanova0d40692015-12-08 17:10:13 -05001198
Mike Marciniszyn77241052015-07-30 15:17:43 -04001199 npages = num_user_pages(&iovec->iov);
Mitko Haralanov5cd3a88d2016-03-08 11:15:22 -08001200 if (node->npages < npages) {
1201 pages = kcalloc(npages, sizeof(*pages), GFP_KERNEL);
1202 if (!pages) {
1203 SDMA_DBG(req, "Failed page array alloc");
1204 ret = -ENOMEM;
1205 goto bail;
1206 }
1207 memcpy(pages, node->pages, node->npages * sizeof(*pages));
1208
1209 npages -= node->npages;
Mitko Haralanove88c9272016-04-12 10:46:53 -07001210
Mitko Haralanov5511d782016-03-08 11:15:44 -08001211retry:
Dean Luickb7df1922016-07-28 15:21:23 -04001212 if (!hfi1_can_pin_pages(pq->dd, pq->mm,
1213 atomic_read(&pq->n_locked), npages)) {
Mitko Haralanov5511d782016-03-08 11:15:44 -08001214 cleared = sdma_cache_evict(pq, npages);
Mitko Haralanov5511d782016-03-08 11:15:44 -08001215 if (cleared >= npages)
1216 goto retry;
1217 }
Ira Weiny3faa3d92016-07-28 15:21:19 -04001218 pinned = hfi1_acquire_user_pages(pq->mm,
Mitko Haralanov5cd3a88d2016-03-08 11:15:22 -08001219 ((unsigned long)iovec->iov.iov_base +
1220 (node->npages * PAGE_SIZE)), npages, 0,
1221 pages + node->npages);
1222 if (pinned < 0) {
1223 kfree(pages);
1224 ret = pinned;
1225 goto bail;
1226 }
1227 if (pinned != npages) {
Ira Weiny3faa3d92016-07-28 15:21:19 -04001228 unpin_vector_pages(pq->mm, pages, node->npages,
Mitko Haralanov849e3e92016-04-12 10:46:16 -07001229 pinned);
Mitko Haralanov5cd3a88d2016-03-08 11:15:22 -08001230 ret = -EFAULT;
1231 goto bail;
1232 }
1233 kfree(node->pages);
Mitko Haralanovde790932016-04-12 10:46:41 -07001234 node->rb.len = iovec->iov.iov_len;
Mitko Haralanov5cd3a88d2016-03-08 11:15:22 -08001235 node->pages = pages;
1236 node->npages += pinned;
1237 npages = node->npages;
Dean Luickb7df1922016-07-28 15:21:23 -04001238 atomic_add(pinned, &pq->n_locked);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001239 }
Mitko Haralanov5cd3a88d2016-03-08 11:15:22 -08001240 iovec->pages = node->pages;
1241 iovec->npages = npages;
Mitko Haralanov9565c6a2016-05-19 05:21:18 -07001242 iovec->node = node;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001243
Dean Luicke0b09ac2016-07-28 15:21:20 -04001244 ret = hfi1_mmu_rb_insert(req->pq->handler, &node->rb);
Mitko Haralanovf53af852016-04-12 10:46:47 -07001245 if (ret) {
Dean Luickb7df1922016-07-28 15:21:23 -04001246 atomic_sub(node->npages, &pq->n_locked);
Dean Luicka383f8e2016-07-28 15:21:16 -04001247 iovec->node = NULL;
Mitko Haralanovf53af852016-04-12 10:46:47 -07001248 goto bail;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001249 }
1250 return 0;
Mitko Haralanov5cd3a88d2016-03-08 11:15:22 -08001251bail:
Mitko Haralanovf53af852016-04-12 10:46:47 -07001252 if (rb_node)
Ira Weiny3faa3d92016-07-28 15:21:19 -04001253 unpin_vector_pages(pq->mm, node->pages, 0, node->npages);
Mitko Haralanovf53af852016-04-12 10:46:47 -07001254 kfree(node);
Mitko Haralanov5cd3a88d2016-03-08 11:15:22 -08001255 return ret;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001256}
1257
Mitko Haralanovbd3a8942016-03-08 11:15:33 -08001258static void unpin_vector_pages(struct mm_struct *mm, struct page **pages,
Mitko Haralanov849e3e92016-04-12 10:46:16 -07001259 unsigned start, unsigned npages)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001260{
Ira Weiny639297b2016-07-28 12:27:33 -04001261 hfi1_release_user_pages(mm, pages + start, npages, false);
Mitko Haralanov5cd3a88d2016-03-08 11:15:22 -08001262 kfree(pages);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001263}
1264
1265static int check_header_template(struct user_sdma_request *req,
1266 struct hfi1_pkt_header *hdr, u32 lrhlen,
1267 u32 datalen)
1268{
1269 /*
1270 * Perform safety checks for any type of packet:
1271 * - transfer size is multiple of 64bytes
Ira Weinyc4929802016-07-27 21:08:42 -04001272 * - packet length is multiple of 4 bytes
Mike Marciniszyn77241052015-07-30 15:17:43 -04001273 * - packet length is not larger than MTU size
1274 *
1275 * These checks are only done for the first packet of the
1276 * transfer since the header is "given" to us by user space.
1277 * For the remainder of the packets we compute the values.
1278 */
Ira Weinyc4929802016-07-27 21:08:42 -04001279 if (req->info.fragsize % PIO_BLOCK_SIZE || lrhlen & 0x3 ||
Mike Marciniszyn77241052015-07-30 15:17:43 -04001280 lrhlen > get_lrh_len(*hdr, req->info.fragsize))
1281 return -EINVAL;
1282
1283 if (req_opcode(req->info.ctrl) == EXPECTED) {
1284 /*
1285 * The header is checked only on the first packet. Furthermore,
1286 * we ensure that at least one TID entry is copied when the
1287 * request is submitted. Therefore, we don't have to verify that
1288 * tididx points to something sane.
1289 */
1290 u32 tidval = req->tids[req->tididx],
1291 tidlen = EXP_TID_GET(tidval, LEN) * PAGE_SIZE,
1292 tididx = EXP_TID_GET(tidval, IDX),
1293 tidctrl = EXP_TID_GET(tidval, CTRL),
1294 tidoff;
1295 __le32 kval = hdr->kdeth.ver_tid_offset;
1296
1297 tidoff = KDETH_GET(kval, OFFSET) *
1298 (KDETH_GET(req->hdr.kdeth.ver_tid_offset, OM) ?
1299 KDETH_OM_LARGE : KDETH_OM_SMALL);
1300 /*
1301 * Expected receive packets have the following
1302 * additional checks:
1303 * - offset is not larger than the TID size
1304 * - TIDCtrl values match between header and TID array
1305 * - TID indexes match between header and TID array
1306 */
1307 if ((tidoff + datalen > tidlen) ||
1308 KDETH_GET(kval, TIDCTRL) != tidctrl ||
1309 KDETH_GET(kval, TID) != tididx)
1310 return -EINVAL;
1311 }
1312 return 0;
1313}
1314
1315/*
1316 * Correctly set the BTH.PSN field based on type of
1317 * transfer - eager packets can just increment the PSN but
1318 * expected packets encode generation and sequence in the
1319 * BTH.PSN field so just incrementing will result in errors.
1320 */
1321static inline u32 set_pkt_bth_psn(__be32 bthpsn, u8 expct, u32 frags)
1322{
1323 u32 val = be32_to_cpu(bthpsn),
1324 mask = (HFI1_CAP_IS_KSET(EXTENDED_PSN) ? 0x7fffffffull :
1325 0xffffffull),
1326 psn = val & mask;
1327 if (expct)
1328 psn = (psn & ~BTH_SEQ_MASK) | ((psn + frags) & BTH_SEQ_MASK);
1329 else
1330 psn = psn + frags;
1331 return psn & mask;
1332}
1333
1334static int set_txreq_header(struct user_sdma_request *req,
1335 struct user_sdma_txreq *tx, u32 datalen)
1336{
1337 struct hfi1_user_sdma_pkt_q *pq = req->pq;
1338 struct hfi1_pkt_header *hdr = &tx->hdr;
Sebastian Sanchezade6f8a2017-05-04 05:14:16 -07001339 u8 omfactor; /* KDETH.OM */
Mike Marciniszyn77241052015-07-30 15:17:43 -04001340 u16 pbclen;
1341 int ret;
Ira Weinyc4929802016-07-27 21:08:42 -04001342 u32 tidval = 0, lrhlen = get_lrh_len(*hdr, pad_len(datalen));
Mike Marciniszyn77241052015-07-30 15:17:43 -04001343
1344 /* Copy the header template to the request before modification */
1345 memcpy(hdr, &req->hdr, sizeof(*hdr));
1346
1347 /*
1348 * Check if the PBC and LRH length are mismatched. If so
1349 * adjust both in the header.
1350 */
1351 pbclen = le16_to_cpu(hdr->pbc[0]);
1352 if (PBC2LRH(pbclen) != lrhlen) {
1353 pbclen = (pbclen & 0xf000) | LRH2PBC(lrhlen);
1354 hdr->pbc[0] = cpu_to_le16(pbclen);
1355 hdr->lrh[2] = cpu_to_be16(lrhlen >> 2);
1356 /*
1357 * Third packet
1358 * This is the first packet in the sequence that has
1359 * a "static" size that can be used for the rest of
1360 * the packets (besides the last one).
1361 */
1362 if (unlikely(req->seqnum == 2)) {
1363 /*
1364 * From this point on the lengths in both the
1365 * PBC and LRH are the same until the last
1366 * packet.
1367 * Adjust the template so we don't have to update
1368 * every packet
1369 */
1370 req->hdr.pbc[0] = hdr->pbc[0];
1371 req->hdr.lrh[2] = hdr->lrh[2];
1372 }
1373 }
1374 /*
1375 * We only have to modify the header if this is not the
1376 * first packet in the request. Otherwise, we use the
1377 * header given to us.
1378 */
1379 if (unlikely(!req->seqnum)) {
1380 ret = check_header_template(req, hdr, lrhlen, datalen);
1381 if (ret)
1382 return ret;
1383 goto done;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001384 }
1385
1386 hdr->bth[2] = cpu_to_be32(
1387 set_pkt_bth_psn(hdr->bth[2],
1388 (req_opcode(req->info.ctrl) == EXPECTED),
1389 req->seqnum));
1390
1391 /* Set ACK request on last packet */
Jakub Pawlake7301392016-12-07 19:32:41 -08001392 if (unlikely(tx->flags & TXREQ_FLAGS_REQ_ACK))
Jubin John8638b772016-02-14 20:19:24 -08001393 hdr->bth[2] |= cpu_to_be32(1UL << 31);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001394
1395 /* Set the new offset */
1396 hdr->kdeth.swdata[6] = cpu_to_le32(req->koffset);
1397 /* Expected packets have to fill in the new TID information */
1398 if (req_opcode(req->info.ctrl) == EXPECTED) {
1399 tidval = req->tids[req->tididx];
1400 /*
1401 * If the offset puts us at the end of the current TID,
1402 * advance everything.
1403 */
1404 if ((req->tidoffset) == (EXP_TID_GET(tidval, LEN) *
1405 PAGE_SIZE)) {
1406 req->tidoffset = 0;
Jubin John4d114fd2016-02-14 20:21:43 -08001407 /*
1408 * Since we don't copy all the TIDs, all at once,
1409 * we have to check again.
1410 */
Mike Marciniszyn77241052015-07-30 15:17:43 -04001411 if (++req->tididx > req->n_tids - 1 ||
1412 !req->tids[req->tididx]) {
1413 return -EINVAL;
1414 }
1415 tidval = req->tids[req->tididx];
1416 }
Sebastian Sanchezade6f8a2017-05-04 05:14:16 -07001417 omfactor = EXP_TID_GET(tidval, LEN) * PAGE_SIZE >=
1418 KDETH_OM_MAX_SIZE ? KDETH_OM_LARGE_SHIFT :
1419 KDETH_OM_SMALL_SHIFT;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001420 /* Set KDETH.TIDCtrl based on value for this TID. */
1421 KDETH_SET(hdr->kdeth.ver_tid_offset, TIDCTRL,
1422 EXP_TID_GET(tidval, CTRL));
1423 /* Set KDETH.TID based on value for this TID */
1424 KDETH_SET(hdr->kdeth.ver_tid_offset, TID,
1425 EXP_TID_GET(tidval, IDX));
Jakub Pawlake7301392016-12-07 19:32:41 -08001426 /* Clear KDETH.SH when DISABLE_SH flag is set */
1427 if (unlikely(tx->flags & TXREQ_FLAGS_REQ_DISABLE_SH))
Mike Marciniszyn77241052015-07-30 15:17:43 -04001428 KDETH_SET(hdr->kdeth.ver_tid_offset, SH, 0);
1429 /*
1430 * Set the KDETH.OFFSET and KDETH.OM based on size of
1431 * transfer.
1432 */
1433 SDMA_DBG(req, "TID offset %ubytes %uunits om%u",
Sebastian Sanchezade6f8a2017-05-04 05:14:16 -07001434 req->tidoffset, req->tidoffset >> omfactor,
1435 omfactor != KDETH_OM_SMALL_SHIFT);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001436 KDETH_SET(hdr->kdeth.ver_tid_offset, OFFSET,
Sebastian Sanchezade6f8a2017-05-04 05:14:16 -07001437 req->tidoffset >> omfactor);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001438 KDETH_SET(hdr->kdeth.ver_tid_offset, OM,
Sebastian Sanchezade6f8a2017-05-04 05:14:16 -07001439 omfactor != KDETH_OM_SMALL_SHIFT);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001440 }
1441done:
1442 trace_hfi1_sdma_user_header(pq->dd, pq->ctxt, pq->subctxt,
1443 req->info.comp_idx, hdr, tidval);
1444 return sdma_txadd_kvaddr(pq->dd, &tx->txreq, hdr, sizeof(*hdr));
1445}
1446
1447static int set_txreq_header_ahg(struct user_sdma_request *req,
Sebastian Sancheze3304b7cc42017-05-26 05:35:18 -07001448 struct user_sdma_txreq *tx, u32 datalen)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001449{
Sebastian Sancheze3304b7cc42017-05-26 05:35:18 -07001450 u32 ahg[AHG_KDETH_ARRAY_SIZE];
Mike Marciniszyn77241052015-07-30 15:17:43 -04001451 int diff = 0;
Sebastian Sanchezade6f8a2017-05-04 05:14:16 -07001452 u8 omfactor; /* KDETH.OM */
Mike Marciniszyn77241052015-07-30 15:17:43 -04001453 struct hfi1_user_sdma_pkt_q *pq = req->pq;
1454 struct hfi1_pkt_header *hdr = &req->hdr;
1455 u16 pbclen = le16_to_cpu(hdr->pbc[0]);
Sebastian Sancheze3304b7cc42017-05-26 05:35:18 -07001456 u32 val32, tidval = 0, lrhlen = get_lrh_len(*hdr, pad_len(datalen));
Mike Marciniszyn77241052015-07-30 15:17:43 -04001457
1458 if (PBC2LRH(pbclen) != lrhlen) {
1459 /* PBC.PbcLengthDWs */
Sebastian Sancheze3304b7cc42017-05-26 05:35:18 -07001460 AHG_HEADER_SET(ahg, diff, 0, 0, 12,
Mike Marciniszyn77241052015-07-30 15:17:43 -04001461 cpu_to_le16(LRH2PBC(lrhlen)));
1462 /* LRH.PktLen (we need the full 16 bits due to byte swap) */
Sebastian Sancheze3304b7cc42017-05-26 05:35:18 -07001463 AHG_HEADER_SET(ahg, diff, 3, 0, 16,
Mike Marciniszyn77241052015-07-30 15:17:43 -04001464 cpu_to_be16(lrhlen >> 2));
1465 }
1466
1467 /*
1468 * Do the common updates
1469 */
1470 /* BTH.PSN and BTH.A */
1471 val32 = (be32_to_cpu(hdr->bth[2]) + req->seqnum) &
1472 (HFI1_CAP_IS_KSET(EXTENDED_PSN) ? 0x7fffffff : 0xffffff);
Jakub Pawlake7301392016-12-07 19:32:41 -08001473 if (unlikely(tx->flags & TXREQ_FLAGS_REQ_ACK))
Mike Marciniszyn77241052015-07-30 15:17:43 -04001474 val32 |= 1UL << 31;
Sebastian Sancheze3304b7cc42017-05-26 05:35:18 -07001475 AHG_HEADER_SET(ahg, diff, 6, 0, 16, cpu_to_be16(val32 >> 16));
1476 AHG_HEADER_SET(ahg, diff, 6, 16, 16, cpu_to_be16(val32 & 0xffff));
Mike Marciniszyn77241052015-07-30 15:17:43 -04001477 /* KDETH.Offset */
Sebastian Sancheze3304b7cc42017-05-26 05:35:18 -07001478 AHG_HEADER_SET(ahg, diff, 15, 0, 16,
Mike Marciniszyn77241052015-07-30 15:17:43 -04001479 cpu_to_le16(req->koffset & 0xffff));
Sebastian Sancheze3304b7cc42017-05-26 05:35:18 -07001480 AHG_HEADER_SET(ahg, diff, 15, 16, 16, cpu_to_le16(req->koffset >> 16));
Mike Marciniszyn77241052015-07-30 15:17:43 -04001481 if (req_opcode(req->info.ctrl) == EXPECTED) {
1482 __le16 val;
1483
1484 tidval = req->tids[req->tididx];
1485
1486 /*
1487 * If the offset puts us at the end of the current TID,
1488 * advance everything.
1489 */
1490 if ((req->tidoffset) == (EXP_TID_GET(tidval, LEN) *
1491 PAGE_SIZE)) {
1492 req->tidoffset = 0;
Jubin John4d114fd2016-02-14 20:21:43 -08001493 /*
1494 * Since we don't copy all the TIDs, all at once,
1495 * we have to check again.
1496 */
Mike Marciniszyn77241052015-07-30 15:17:43 -04001497 if (++req->tididx > req->n_tids - 1 ||
Sebastian Sancheze3304b7cc42017-05-26 05:35:18 -07001498 !req->tids[req->tididx])
Mike Marciniszyn77241052015-07-30 15:17:43 -04001499 return -EINVAL;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001500 tidval = req->tids[req->tididx];
1501 }
Sebastian Sanchezade6f8a2017-05-04 05:14:16 -07001502 omfactor = ((EXP_TID_GET(tidval, LEN) *
Mike Marciniszyn77241052015-07-30 15:17:43 -04001503 PAGE_SIZE) >=
Sebastian Sanchezade6f8a2017-05-04 05:14:16 -07001504 KDETH_OM_MAX_SIZE) ? KDETH_OM_LARGE_SHIFT :
1505 KDETH_OM_SMALL_SHIFT;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001506 /* KDETH.OM and KDETH.OFFSET (TID) */
Sebastian Sancheze3304b7cc42017-05-26 05:35:18 -07001507 AHG_HEADER_SET(ahg, diff, 7, 0, 16,
Sebastian Sanchezade6f8a2017-05-04 05:14:16 -07001508 ((!!(omfactor - KDETH_OM_SMALL_SHIFT)) << 15 |
1509 ((req->tidoffset >> omfactor)
1510 & 0x7fff)));
Jakub Pawlake7301392016-12-07 19:32:41 -08001511 /* KDETH.TIDCtrl, KDETH.TID, KDETH.Intr, KDETH.SH */
Mike Marciniszyn77241052015-07-30 15:17:43 -04001512 val = cpu_to_le16(((EXP_TID_GET(tidval, CTRL) & 0x3) << 10) |
Jakub Pawlake7301392016-12-07 19:32:41 -08001513 (EXP_TID_GET(tidval, IDX) & 0x3ff));
1514
1515 if (unlikely(tx->flags & TXREQ_FLAGS_REQ_DISABLE_SH)) {
1516 val |= cpu_to_le16((KDETH_GET(hdr->kdeth.ver_tid_offset,
1517 INTR) <<
1518 AHG_KDETH_INTR_SHIFT));
Jubin Johne4909742016-02-14 20:22:00 -08001519 } else {
Jakub Pawlake7301392016-12-07 19:32:41 -08001520 val |= KDETH_GET(hdr->kdeth.ver_tid_offset, SH) ?
1521 cpu_to_le16(0x1 << AHG_KDETH_SH_SHIFT) :
1522 cpu_to_le16((KDETH_GET(hdr->kdeth.ver_tid_offset,
1523 INTR) <<
1524 AHG_KDETH_INTR_SHIFT));
Jubin Johne4909742016-02-14 20:22:00 -08001525 }
Jakub Pawlake7301392016-12-07 19:32:41 -08001526
Sebastian Sancheze3304b7cc42017-05-26 05:35:18 -07001527 AHG_HEADER_SET(ahg, diff, 7, 16, 14, val);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001528 }
Sebastian Sancheze3304b7cc42017-05-26 05:35:18 -07001529 if (diff < 0)
1530 return diff;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001531
1532 trace_hfi1_sdma_user_header_ahg(pq->dd, pq->ctxt, pq->subctxt,
1533 req->info.comp_idx, req->sde->this_idx,
Sebastian Sancheze3304b7cc42017-05-26 05:35:18 -07001534 req->ahg_idx, ahg, diff, tidval);
1535 sdma_txinit_ahg(&tx->txreq,
1536 SDMA_TXREQ_F_USE_AHG,
1537 datalen, req->ahg_idx, diff,
1538 ahg, sizeof(req->hdr),
1539 user_sdma_txreq_cb);
1540
Mike Marciniszyn77241052015-07-30 15:17:43 -04001541 return diff;
1542}
1543
Mitko Haralanova0d40692015-12-08 17:10:13 -05001544/*
1545 * SDMA tx request completion callback. Called when the SDMA progress
1546 * state machine gets notification that the SDMA descriptors for this
1547 * tx request have been processed by the DMA engine. Called in
1548 * interrupt context.
1549 */
Mike Marciniszyna545f532016-02-14 12:45:53 -08001550static void user_sdma_txreq_cb(struct sdma_txreq *txreq, int status)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001551{
1552 struct user_sdma_txreq *tx =
1553 container_of(txreq, struct user_sdma_txreq, txreq);
Mitko Haralanova0d40692015-12-08 17:10:13 -05001554 struct user_sdma_request *req;
Mitko Haralanov0f2d87d2016-02-03 14:35:06 -08001555 struct hfi1_user_sdma_pkt_q *pq;
1556 struct hfi1_user_sdma_comp_q *cq;
1557 u16 idx;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001558
Mitko Haralanova0d40692015-12-08 17:10:13 -05001559 if (!tx->req)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001560 return;
1561
Mitko Haralanova0d40692015-12-08 17:10:13 -05001562 req = tx->req;
Mitko Haralanov0f2d87d2016-02-03 14:35:06 -08001563 pq = req->pq;
1564 cq = req->cq;
Mitko Haralanovb9fb63182015-10-26 10:28:37 -04001565
Mike Marciniszyn77241052015-07-30 15:17:43 -04001566 if (status != SDMA_TXREQ_S_OK) {
Mitko Haralanova0d40692015-12-08 17:10:13 -05001567 SDMA_DBG(req, "SDMA completion with error %d",
1568 status);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001569 set_bit(SDMA_REQ_HAS_ERROR, &req->flags);
Mitko Haralanova0d40692015-12-08 17:10:13 -05001570 }
1571
Mitko Haralanov0f2d87d2016-02-03 14:35:06 -08001572 req->seqcomp = tx->seqnum;
1573 kmem_cache_free(pq->txreq_cache, tx);
1574 tx = NULL;
1575
1576 idx = req->info.comp_idx;
1577 if (req->status == -1 && status == SDMA_TXREQ_S_OK) {
1578 if (req->seqcomp == req->info.npkts - 1) {
1579 req->status = 0;
1580 user_sdma_free_request(req, false);
1581 pq_update(pq);
1582 set_comp_state(pq, cq, idx, COMPLETE, 0);
1583 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04001584 } else {
Mitko Haralanov0f2d87d2016-02-03 14:35:06 -08001585 if (status != SDMA_TXREQ_S_OK)
1586 req->status = status;
Mitko Haralanovc7cbf2f2016-02-03 14:35:23 -08001587 if (req->seqcomp == (ACCESS_ONCE(req->seqsubmitted) - 1) &&
Sebastian Sanchezb8884292017-05-26 05:35:44 -07001588 (READ_ONCE(req->done) ||
Mitko Haralanovc7cbf2f2016-02-03 14:35:23 -08001589 test_bit(SDMA_REQ_DONE_ERROR, &req->flags))) {
Mitko Haralanov0f2d87d2016-02-03 14:35:06 -08001590 user_sdma_free_request(req, false);
1591 pq_update(pq);
1592 set_comp_state(pq, cq, idx, ERROR, req->status);
1593 }
Mitko Haralanova0d40692015-12-08 17:10:13 -05001594 }
1595}
1596
Mitko Haralanov0f2d87d2016-02-03 14:35:06 -08001597static inline void pq_update(struct hfi1_user_sdma_pkt_q *pq)
Mitko Haralanova0d40692015-12-08 17:10:13 -05001598{
Mitko Haralanov0f2d87d2016-02-03 14:35:06 -08001599 if (atomic_dec_and_test(&pq->n_reqs)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04001600 xchg(&pq->state, SDMA_PKT_Q_INACTIVE);
Mitko Haralanova0d40692015-12-08 17:10:13 -05001601 wake_up(&pq->wait);
1602 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04001603}
1604
Mitko Haralanov0f2d87d2016-02-03 14:35:06 -08001605static void user_sdma_free_request(struct user_sdma_request *req, bool unpin)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001606{
1607 if (!list_empty(&req->txps)) {
1608 struct sdma_txreq *t, *p;
1609
1610 list_for_each_entry_safe(t, p, &req->txps, list) {
1611 struct user_sdma_txreq *tx =
1612 container_of(t, struct user_sdma_txreq, txreq);
1613 list_del_init(&t->list);
1614 sdma_txclean(req->pq->dd, t);
1615 kmem_cache_free(req->pq->txreq_cache, tx);
1616 }
1617 }
1618 if (req->data_iovs) {
Mitko Haralanov5cd3a88d2016-03-08 11:15:22 -08001619 struct sdma_mmu_node *node;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001620 int i;
1621
Mitko Haralanov5cd3a88d2016-03-08 11:15:22 -08001622 for (i = 0; i < req->data_iovs; i++) {
Mitko Haralanov9565c6a2016-05-19 05:21:18 -07001623 node = req->iovs[i].node;
1624 if (!node)
Mitko Haralanov5cd3a88d2016-03-08 11:15:22 -08001625 continue;
1626
Mitko Haralanov5cd3a88d2016-03-08 11:15:22 -08001627 if (unpin)
Dean Luicke0b09ac2016-07-28 15:21:20 -04001628 hfi1_mmu_rb_remove(req->pq->handler,
Mitko Haralanov5cd3a88d2016-03-08 11:15:22 -08001629 &node->rb);
1630 else
1631 atomic_dec(&node->refcount);
1632 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04001633 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04001634 kfree(req->tids);
Dean Luick7b3256e2016-07-28 15:21:18 -04001635 clear_bit(req->info.comp_idx, req->pq->req_in_use);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001636}
1637
Mitko Haralanov0f2d87d2016-02-03 14:35:06 -08001638static inline void set_comp_state(struct hfi1_user_sdma_pkt_q *pq,
1639 struct hfi1_user_sdma_comp_q *cq,
1640 u16 idx, enum hfi1_sdma_comp_state state,
1641 int ret)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001642{
Mitko Haralanov0f2d87d2016-02-03 14:35:06 -08001643 hfi1_cdbg(SDMA, "[%u:%u:%u:%u] Setting completion status %u %d",
1644 pq->dd->unit, pq->ctxt, pq->subctxt, idx, state, ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001645 if (state == ERROR)
Mitko Haralanov0f2d87d2016-02-03 14:35:06 -08001646 cq->comps[idx].errcode = -ret;
Michael J. Ruhl0519c522017-03-20 17:24:45 -07001647 smp_wmb(); /* make sure errcode is visible first */
1648 cq->comps[idx].status = state;
Mitko Haralanov0f2d87d2016-02-03 14:35:06 -08001649 trace_hfi1_sdma_user_completion(pq->dd, pq->ctxt, pq->subctxt,
1650 idx, state, ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001651}
Mitko Haralanov5cd3a88d2016-03-08 11:15:22 -08001652
1653static bool sdma_rb_filter(struct mmu_rb_node *node, unsigned long addr,
1654 unsigned long len)
1655{
1656 return (bool)(node->addr == addr);
1657}
1658
Dean Luicke0b09ac2016-07-28 15:21:20 -04001659static int sdma_rb_insert(void *arg, struct mmu_rb_node *mnode)
Mitko Haralanov5cd3a88d2016-03-08 11:15:22 -08001660{
1661 struct sdma_mmu_node *node =
1662 container_of(mnode, struct sdma_mmu_node, rb);
1663
1664 atomic_inc(&node->refcount);
1665 return 0;
1666}
1667
Dean Luickb7df1922016-07-28 15:21:23 -04001668/*
1669 * Return 1 to remove the node from the rb tree and call the remove op.
1670 *
1671 * Called with the rb tree lock held.
1672 */
1673static int sdma_rb_evict(void *arg, struct mmu_rb_node *mnode,
1674 void *evict_arg, bool *stop)
1675{
1676 struct sdma_mmu_node *node =
1677 container_of(mnode, struct sdma_mmu_node, rb);
1678 struct evict_data *evict_data = evict_arg;
1679
1680 /* is this node still being used? */
1681 if (atomic_read(&node->refcount))
1682 return 0; /* keep this node */
1683
1684 /* this node will be evicted, add its pages to our count */
1685 evict_data->cleared += node->npages;
1686
1687 /* have enough pages been cleared? */
1688 if (evict_data->cleared >= evict_data->target)
1689 *stop = true;
1690
1691 return 1; /* remove this node */
1692}
1693
Dean Luick082b3532016-07-28 15:21:25 -04001694static void sdma_rb_remove(void *arg, struct mmu_rb_node *mnode)
Mitko Haralanov5cd3a88d2016-03-08 11:15:22 -08001695{
1696 struct sdma_mmu_node *node =
1697 container_of(mnode, struct sdma_mmu_node, rb);
1698
Dean Luickb7df1922016-07-28 15:21:23 -04001699 atomic_sub(node->npages, &node->pq->n_locked);
Mitko Haralanov5511d782016-03-08 11:15:44 -08001700
Dean Luickb85ced92016-07-28 15:21:24 -04001701 unpin_vector_pages(node->pq->mm, node->pages, 0, node->npages);
1702
Mitko Haralanov5cd3a88d2016-03-08 11:15:22 -08001703 kfree(node);
1704}
1705
Dean Luicke0b09ac2016-07-28 15:21:20 -04001706static int sdma_rb_invalidate(void *arg, struct mmu_rb_node *mnode)
Mitko Haralanov5cd3a88d2016-03-08 11:15:22 -08001707{
1708 struct sdma_mmu_node *node =
1709 container_of(mnode, struct sdma_mmu_node, rb);
1710
1711 if (!atomic_read(&node->refcount))
1712 return 1;
1713 return 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001714}