blob: 4c51a75b238a1b0c868aa8d70500e80b21c554f3 [file] [log] [blame]
Mike Marciniszyn77241052015-07-30 15:17:43 -04001/*
Vishwanathapura, Niranjanad4829ea2017-04-12 20:29:28 -07002 * Copyright(c) 2015-2017 Intel Corporation.
Mike Marciniszyn77241052015-07-30 15:17:43 -04003 *
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
6 *
7 * GPL LICENSE SUMMARY
8 *
Mike Marciniszyn77241052015-07-30 15:17:43 -04009 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * BSD LICENSE
19 *
Mike Marciniszyn77241052015-07-30 15:17:43 -040020 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
22 * are met:
23 *
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
29 * distribution.
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
33 *
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45 *
46 */
47
48#include <linux/pci.h>
49#include <linux/netdevice.h>
50#include <linux/vmalloc.h>
51#include <linux/delay.h>
52#include <linux/idr.h>
53#include <linux/module.h>
54#include <linux/printk.h>
55#include <linux/hrtimer.h>
Michael J. Ruhl8737ce92017-05-04 05:15:15 -070056#include <linux/bitmap.h>
Dennis Dalessandroec3f2c12016-01-19 14:41:33 -080057#include <rdma/rdma_vt.h>
Mike Marciniszyn77241052015-07-30 15:17:43 -040058
59#include "hfi.h"
60#include "device.h"
61#include "common.h"
Sebastian Sanchez6c63e422015-11-06 20:06:56 -050062#include "trace.h"
Mike Marciniszyn77241052015-07-30 15:17:43 -040063#include "mad.h"
64#include "sdma.h"
65#include "debugfs.h"
66#include "verbs.h"
Ashutosh Dixitaffa48d2016-02-03 14:33:06 -080067#include "aspm.h"
Dennis Dalessandro41973442016-07-25 07:52:36 -070068#include "affinity.h"
Vishwanathapura, Niranjanad4829ea2017-04-12 20:29:28 -070069#include "vnic.h"
Michael J. Ruhlfe4e74e2017-06-09 16:00:12 -070070#include "exp_rcv.h"
Mike Marciniszyn77241052015-07-30 15:17:43 -040071
72#undef pr_fmt
73#define pr_fmt(fmt) DRIVER_NAME ": " fmt
74
Mike Marciniszyndd1ed102017-05-04 05:14:10 -070075#define HFI1_MAX_ACTIVE_WORKQUEUE_ENTRIES 5
Mike Marciniszyn77241052015-07-30 15:17:43 -040076/*
77 * min buffers we want to have per context, after driver
78 */
79#define HFI1_MIN_USER_CTXT_BUFCNT 7
80
81#define HFI1_MIN_HDRQ_EGRBUF_CNT 2
Sebastian Sancheze002dcc2016-02-03 14:34:32 -080082#define HFI1_MAX_HDRQ_EGRBUF_CNT 16352
Mike Marciniszyn77241052015-07-30 15:17:43 -040083#define HFI1_MIN_EAGER_BUFFER_SIZE (4 * 1024) /* 4KB */
84#define HFI1_MAX_EAGER_BUFFER_SIZE (256 * 1024) /* 256KB */
85
86/*
87 * Number of user receive contexts we are configured to use (to allow for more
88 * pio buffers per ctxt, etc.) Zero means use one user context per CPU.
89 */
Sebastian Sanchez2ce6bf22015-12-11 08:44:48 -050090int num_user_contexts = -1;
91module_param_named(num_user_contexts, num_user_contexts, uint, S_IRUGO);
Mike Marciniszyn77241052015-07-30 15:17:43 -040092MODULE_PARM_DESC(
Sebastian Sanchez2ce6bf22015-12-11 08:44:48 -050093 num_user_contexts, "Set max number of user contexts to use");
Mike Marciniszyn77241052015-07-30 15:17:43 -040094
Mark F. Brown5b55ea32016-01-11 18:30:54 -050095uint krcvqs[RXE_NUM_DATA_VL];
Mike Marciniszyn77241052015-07-30 15:17:43 -040096int krcvqsset;
Mark F. Brown5b55ea32016-01-11 18:30:54 -050097module_param_array(krcvqs, uint, &krcvqsset, S_IRUGO);
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -050098MODULE_PARM_DESC(krcvqs, "Array of the number of non-control kernel receive queues by VL");
Mike Marciniszyn77241052015-07-30 15:17:43 -040099
100/* computed based on above array */
Harish Chegondi429b6a72016-08-31 07:24:40 -0700101unsigned long n_krcvqs;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400102
103static unsigned hfi1_rcvarr_split = 25;
104module_param_named(rcvarr_split, hfi1_rcvarr_split, uint, S_IRUGO);
105MODULE_PARM_DESC(rcvarr_split, "Percent of context's RcvArray entries used for Eager buffers");
106
Tymoteusz Kielan9746fa42017-05-04 05:14:22 -0700107static uint eager_buffer_size = (8 << 20); /* 8MB */
Mike Marciniszyn77241052015-07-30 15:17:43 -0400108module_param(eager_buffer_size, uint, S_IRUGO);
Tymoteusz Kielan9746fa42017-05-04 05:14:22 -0700109MODULE_PARM_DESC(eager_buffer_size, "Size of the eager buffers, default: 8MB");
Mike Marciniszyn77241052015-07-30 15:17:43 -0400110
111static uint rcvhdrcnt = 2048; /* 2x the max eager buffer count */
112module_param_named(rcvhdrcnt, rcvhdrcnt, uint, S_IRUGO);
113MODULE_PARM_DESC(rcvhdrcnt, "Receive header queue count (default 2048)");
114
115static uint hfi1_hdrq_entsize = 32;
116module_param_named(hdrq_entsize, hfi1_hdrq_entsize, uint, S_IRUGO);
117MODULE_PARM_DESC(hdrq_entsize, "Size of header queue entries: 2 - 8B, 16 - 64B (default), 32 - 128B");
118
119unsigned int user_credit_return_threshold = 33; /* default is 33% */
120module_param(user_credit_return_threshold, uint, S_IRUGO);
Jubin Johnecb95a02015-12-17 19:24:14 -0500121MODULE_PARM_DESC(user_credit_return_threshold, "Credit return threshold for user send contexts, return when unreturned credits passes this many blocks (in percent of allocated blocks, 0 is off)");
Mike Marciniszyn77241052015-07-30 15:17:43 -0400122
Michael J. Ruhlf4cd8762017-05-04 05:14:39 -0700123static inline u64 encode_rcv_header_entry_size(u16 size);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400124
125static struct idr hfi1_unit_table;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400126
Michael J. Ruhlf2a3bc02017-08-04 13:52:38 -0700127static int hfi1_create_kctxt(struct hfi1_devdata *dd,
128 struct hfi1_pportdata *ppd)
Mike Marciniszyn77241052015-07-30 15:17:43 -0400129{
Michael J. Ruhlf2a3bc02017-08-04 13:52:38 -0700130 struct hfi1_ctxtdata *rcd;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400131 int ret;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400132
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -0500133 /* Control context has to be always 0 */
134 BUILD_BUG_ON(HFI1_CTRL_CTXT != 0);
135
Michael J. Ruhlf2a3bc02017-08-04 13:52:38 -0700136 ret = hfi1_create_ctxtdata(ppd, dd->node, &rcd);
137 if (ret < 0) {
138 dd_dev_err(dd, "Kernel receive context allocation failed\n");
139 return ret;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400140 }
141
Ashutosh Dixitaffa48d2016-02-03 14:33:06 -0800142 /*
Michael J. Ruhlf2a3bc02017-08-04 13:52:38 -0700143 * Set up the kernel context flags here and now because they use
144 * default values for all receive side memories. User contexts will
145 * be handled as they are created.
Ashutosh Dixitaffa48d2016-02-03 14:33:06 -0800146 */
Michael J. Ruhlf2a3bc02017-08-04 13:52:38 -0700147 rcd->flags = HFI1_CAP_KGET(MULTI_PKT_EGR) |
148 HFI1_CAP_KGET(NODROP_RHQ_FULL) |
149 HFI1_CAP_KGET(NODROP_EGR_FULL) |
150 HFI1_CAP_KGET(DMA_RTAIL);
151
152 /* Control context must use DMA_RTAIL */
153 if (rcd->ctxt == HFI1_CTRL_CTXT)
154 rcd->flags |= HFI1_CAP_DMA_RTAIL;
155 rcd->seq_cnt = 1;
156
157 rcd->sc = sc_alloc(dd, SC_ACK, rcd->rcvhdrqentsize, dd->node);
158 if (!rcd->sc) {
159 dd_dev_err(dd, "Kernel send context allocation failed\n");
160 return -ENOMEM;
161 }
162 hfi1_init_ctxt(rcd->sc);
Ashutosh Dixitaffa48d2016-02-03 14:33:06 -0800163
Mike Marciniszyn77241052015-07-30 15:17:43 -0400164 return 0;
Michael J. Ruhlf2a3bc02017-08-04 13:52:38 -0700165}
Michael J. Ruhl9b60d2c2017-05-04 05:15:09 -0700166
Michael J. Ruhlf2a3bc02017-08-04 13:52:38 -0700167/*
168 * Create the receive context array and one or more kernel contexts
169 */
170int hfi1_create_kctxts(struct hfi1_devdata *dd)
171{
172 u16 i;
173 int ret;
174
175 dd->rcd = kzalloc_node(dd->num_rcv_contexts * sizeof(*dd->rcd),
176 GFP_KERNEL, dd->node);
177 if (!dd->rcd)
178 return -ENOMEM;
179
180 for (i = 0; i < dd->first_dyn_alloc_ctxt; ++i) {
181 ret = hfi1_create_kctxt(dd, dd->pport);
182 if (ret)
183 goto bail;
184 }
185
186 return 0;
187bail:
Michael J. Ruhlf683c802017-06-09 16:00:19 -0700188 for (i = 0; dd->rcd && i < dd->first_dyn_alloc_ctxt; ++i)
Michael J. Ruhld295dbe2017-08-04 13:52:44 -0700189 hfi1_free_ctxt(dd->rcd[i]);
Michael J. Ruhlf683c802017-06-09 16:00:19 -0700190
191 /* All the contexts should be freed, free the array */
Mike Marciniszyn77241052015-07-30 15:17:43 -0400192 kfree(dd->rcd);
193 dd->rcd = NULL;
194 return ret;
195}
196
197/*
Michael J. Ruhld295dbe2017-08-04 13:52:44 -0700198 * Helper routines for the receive context reference count (rcd and uctxt).
Michael J. Ruhlf683c802017-06-09 16:00:19 -0700199 */
200static void hfi1_rcd_init(struct hfi1_ctxtdata *rcd)
201{
202 kref_init(&rcd->kref);
203}
204
Michael J. Ruhlf2a3bc02017-08-04 13:52:38 -0700205/**
206 * hfi1_rcd_free - When reference is zero clean up.
207 * @kref: pointer to an initialized rcd data structure
208 *
209 */
Michael J. Ruhlf683c802017-06-09 16:00:19 -0700210static void hfi1_rcd_free(struct kref *kref)
211{
Michael J. Ruhld295dbe2017-08-04 13:52:44 -0700212 unsigned long flags;
Michael J. Ruhlf683c802017-06-09 16:00:19 -0700213 struct hfi1_ctxtdata *rcd =
214 container_of(kref, struct hfi1_ctxtdata, kref);
215
216 hfi1_free_ctxtdata(rcd->dd, rcd);
Michael J. Ruhld295dbe2017-08-04 13:52:44 -0700217
218 spin_lock_irqsave(&rcd->dd->uctxt_lock, flags);
219 rcd->dd->rcd[rcd->ctxt] = NULL;
220 spin_unlock_irqrestore(&rcd->dd->uctxt_lock, flags);
221
Michael J. Ruhlf683c802017-06-09 16:00:19 -0700222 kfree(rcd);
223}
224
Michael J. Ruhlf2a3bc02017-08-04 13:52:38 -0700225/**
226 * hfi1_rcd_put - decrement reference for rcd
227 * @rcd: pointer to an initialized rcd data structure
228 *
229 * Use this to put a reference after the init.
230 */
Michael J. Ruhlf683c802017-06-09 16:00:19 -0700231int hfi1_rcd_put(struct hfi1_ctxtdata *rcd)
232{
233 if (rcd)
234 return kref_put(&rcd->kref, hfi1_rcd_free);
235
236 return 0;
237}
238
Michael J. Ruhlf2a3bc02017-08-04 13:52:38 -0700239/**
240 * hfi1_rcd_get - increment reference for rcd
241 * @rcd: pointer to an initialized rcd data structure
242 *
243 * Use this to get a reference after the init.
244 */
Michael J. Ruhlf683c802017-06-09 16:00:19 -0700245void hfi1_rcd_get(struct hfi1_ctxtdata *rcd)
246{
247 kref_get(&rcd->kref);
248}
249
Michael J. Ruhlf2a3bc02017-08-04 13:52:38 -0700250/**
251 * allocate_rcd_index - allocate an rcd index from the rcd array
252 * @dd: pointer to a valid devdata structure
253 * @rcd: rcd data structure to assign
254 * @index: pointer to index that is allocated
255 *
256 * Find an empty index in the rcd array, and assign the given rcd to it.
257 * If the array is full, we are EBUSY.
258 *
259 */
Michael J. Ruhld295dbe2017-08-04 13:52:44 -0700260static int allocate_rcd_index(struct hfi1_devdata *dd,
Michael J. Ruhlf2a3bc02017-08-04 13:52:38 -0700261 struct hfi1_ctxtdata *rcd, u16 *index)
262{
263 unsigned long flags;
264 u16 ctxt;
265
266 spin_lock_irqsave(&dd->uctxt_lock, flags);
267 for (ctxt = 0; ctxt < dd->num_rcv_contexts; ctxt++)
268 if (!dd->rcd[ctxt])
269 break;
270
271 if (ctxt < dd->num_rcv_contexts) {
272 rcd->ctxt = ctxt;
273 dd->rcd[ctxt] = rcd;
274 hfi1_rcd_init(rcd);
275 }
276 spin_unlock_irqrestore(&dd->uctxt_lock, flags);
277
278 if (ctxt >= dd->num_rcv_contexts)
279 return -EBUSY;
280
281 *index = ctxt;
282
283 return 0;
284}
285
Michael J. Ruhld295dbe2017-08-04 13:52:44 -0700286/**
Michael J. Ruhld59075a2017-09-26 07:01:16 -0700287 * hfi1_rcd_get_by_index_safe - validate the ctxt index before accessing the
288 * array
289 * @dd: pointer to a valid devdata structure
290 * @ctxt: the index of an possilbe rcd
291 *
292 * This is a wrapper for hfi1_rcd_get_by_index() to validate that the given
293 * ctxt index is valid.
294 *
295 * The caller is responsible for making the _put().
296 *
297 */
298struct hfi1_ctxtdata *hfi1_rcd_get_by_index_safe(struct hfi1_devdata *dd,
299 u16 ctxt)
300{
301 if (ctxt < dd->num_rcv_contexts)
302 return hfi1_rcd_get_by_index(dd, ctxt);
303
304 return NULL;
305}
306
307/**
Michael J. Ruhld295dbe2017-08-04 13:52:44 -0700308 * hfi1_rcd_get_by_index
309 * @dd: pointer to a valid devdata structure
310 * @ctxt: the index of an possilbe rcd
311 *
312 * We need to protect access to the rcd array. If access is needed to
313 * one or more index, get the protecting spinlock and then increment the
314 * kref.
315 *
316 * The caller is responsible for making the _put().
317 *
318 */
319struct hfi1_ctxtdata *hfi1_rcd_get_by_index(struct hfi1_devdata *dd, u16 ctxt)
320{
321 unsigned long flags;
322 struct hfi1_ctxtdata *rcd = NULL;
323
324 spin_lock_irqsave(&dd->uctxt_lock, flags);
325 if (dd->rcd[ctxt]) {
326 rcd = dd->rcd[ctxt];
327 hfi1_rcd_get(rcd);
328 }
329 spin_unlock_irqrestore(&dd->uctxt_lock, flags);
330
331 return rcd;
332}
333
Michael J. Ruhlf683c802017-06-09 16:00:19 -0700334/*
Michael J. Ruhld295dbe2017-08-04 13:52:44 -0700335 * Common code for user and kernel context create and setup.
336 * NOTE: the initial kref is done here (hf1_rcd_init()).
Mike Marciniszyn77241052015-07-30 15:17:43 -0400337 */
Michael J. Ruhlf2a3bc02017-08-04 13:52:38 -0700338int hfi1_create_ctxtdata(struct hfi1_pportdata *ppd, int numa,
339 struct hfi1_ctxtdata **context)
Mike Marciniszyn77241052015-07-30 15:17:43 -0400340{
341 struct hfi1_devdata *dd = ppd->dd;
342 struct hfi1_ctxtdata *rcd;
343 unsigned kctxt_ngroups = 0;
344 u32 base;
345
346 if (dd->rcv_entries.nctxt_extra >
Vishwanathapura, Niranjana22807402017-04-12 20:29:29 -0700347 dd->num_rcv_contexts - dd->first_dyn_alloc_ctxt)
Mike Marciniszyn77241052015-07-30 15:17:43 -0400348 kctxt_ngroups = (dd->rcv_entries.nctxt_extra -
Vishwanathapura, Niranjana22807402017-04-12 20:29:29 -0700349 (dd->num_rcv_contexts - dd->first_dyn_alloc_ctxt));
Jianxin Xiong4dfe7cc2016-10-17 04:19:41 -0700350 rcd = kzalloc_node(sizeof(*rcd), GFP_KERNEL, numa);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400351 if (rcd) {
352 u32 rcvtids, max_entries;
Michael J. Ruhlf2a3bc02017-08-04 13:52:38 -0700353 u16 ctxt;
354 int ret;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400355
Michael J. Ruhlf2a3bc02017-08-04 13:52:38 -0700356 ret = allocate_rcd_index(dd, rcd, &ctxt);
357 if (ret) {
358 *context = NULL;
359 kfree(rcd);
360 return ret;
361 }
362
Mike Marciniszyn77241052015-07-30 15:17:43 -0400363 INIT_LIST_HEAD(&rcd->qp_wait_list);
Michael J. Ruhlfe4e74e2017-06-09 16:00:12 -0700364 hfi1_exp_tid_group_init(&rcd->tid_group_list);
365 hfi1_exp_tid_group_init(&rcd->tid_used_list);
366 hfi1_exp_tid_group_init(&rcd->tid_full_list);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400367 rcd->ppd = ppd;
368 rcd->dd = dd;
Michael J. Ruhl8737ce92017-05-04 05:15:15 -0700369 __set_bit(0, rcd->in_use_ctxts);
Mitko Haralanov957558c2016-02-03 14:33:40 -0800370 rcd->numa_id = numa;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400371 rcd->rcv_array_groups = dd->rcv_entries.ngroups;
372
Mitko Haralanov463e6eb2016-02-05 11:57:53 -0500373 mutex_init(&rcd->exp_lock);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400374
Michael J. Ruhld295dbe2017-08-04 13:52:44 -0700375 hfi1_cdbg(PROC, "setting up context %u\n", rcd->ctxt);
376
Mike Marciniszyn77241052015-07-30 15:17:43 -0400377 /*
378 * Calculate the context's RcvArray entry starting point.
379 * We do this here because we have to take into account all
380 * the RcvArray entries that previous context would have
Vishwanathapura, Niranjana22807402017-04-12 20:29:29 -0700381 * taken and we have to account for any extra groups assigned
382 * to the static (kernel) or dynamic (vnic/user) contexts.
Mike Marciniszyn77241052015-07-30 15:17:43 -0400383 */
Vishwanathapura, Niranjana22807402017-04-12 20:29:29 -0700384 if (ctxt < dd->first_dyn_alloc_ctxt) {
Mike Marciniszyn77241052015-07-30 15:17:43 -0400385 if (ctxt < kctxt_ngroups) {
386 base = ctxt * (dd->rcv_entries.ngroups + 1);
387 rcd->rcv_array_groups++;
Dennis Dalessandroee495ad2017-04-09 10:17:18 -0700388 } else {
Mike Marciniszyn77241052015-07-30 15:17:43 -0400389 base = kctxt_ngroups +
390 (ctxt * dd->rcv_entries.ngroups);
Dennis Dalessandroee495ad2017-04-09 10:17:18 -0700391 }
Mike Marciniszyn77241052015-07-30 15:17:43 -0400392 } else {
Vishwanathapura, Niranjana22807402017-04-12 20:29:29 -0700393 u16 ct = ctxt - dd->first_dyn_alloc_ctxt;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400394
395 base = ((dd->n_krcv_queues * dd->rcv_entries.ngroups) +
396 kctxt_ngroups);
397 if (ct < dd->rcv_entries.nctxt_extra) {
398 base += ct * (dd->rcv_entries.ngroups + 1);
399 rcd->rcv_array_groups++;
Dennis Dalessandroee495ad2017-04-09 10:17:18 -0700400 } else {
Mike Marciniszyn77241052015-07-30 15:17:43 -0400401 base += dd->rcv_entries.nctxt_extra +
402 (ct * dd->rcv_entries.ngroups);
Dennis Dalessandroee495ad2017-04-09 10:17:18 -0700403 }
Mike Marciniszyn77241052015-07-30 15:17:43 -0400404 }
405 rcd->eager_base = base * dd->rcv_entries.group_size;
406
Mike Marciniszyn77241052015-07-30 15:17:43 -0400407 rcd->rcvhdrq_cnt = rcvhdrcnt;
408 rcd->rcvhdrqentsize = hfi1_hdrq_entsize;
409 /*
410 * Simple Eager buffer allocation: we have already pre-allocated
411 * the number of RcvArray entry groups. Each ctxtdata structure
412 * holds the number of groups for that context.
413 *
414 * To follow CSR requirements and maintain cacheline alignment,
415 * make sure all sizes and bases are multiples of group_size.
416 *
417 * The expected entry count is what is left after assigning
418 * eager.
419 */
420 max_entries = rcd->rcv_array_groups *
421 dd->rcv_entries.group_size;
422 rcvtids = ((max_entries * hfi1_rcvarr_split) / 100);
423 rcd->egrbufs.count = round_down(rcvtids,
424 dd->rcv_entries.group_size);
425 if (rcd->egrbufs.count > MAX_EAGER_ENTRIES) {
426 dd_dev_err(dd, "ctxt%u: requested too many RcvArray entries.\n",
427 rcd->ctxt);
428 rcd->egrbufs.count = MAX_EAGER_ENTRIES;
429 }
Sebastian Sanchez6c63e422015-11-06 20:06:56 -0500430 hfi1_cdbg(PROC,
431 "ctxt%u: max Eager buffer RcvArray entries: %u\n",
432 rcd->ctxt, rcd->egrbufs.count);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400433
434 /*
435 * Allocate array that will hold the eager buffer accounting
436 * data.
437 * This will allocate the maximum possible buffer count based
438 * on the value of the RcvArray split parameter.
439 * The resulting value will be rounded down to the closest
440 * multiple of dd->rcv_entries.group_size.
441 */
Sebastian Sanchezb448bf92017-02-08 05:26:37 -0800442 rcd->egrbufs.buffers = kzalloc_node(
443 rcd->egrbufs.count * sizeof(*rcd->egrbufs.buffers),
444 GFP_KERNEL, numa);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400445 if (!rcd->egrbufs.buffers)
446 goto bail;
Sebastian Sanchezb448bf92017-02-08 05:26:37 -0800447 rcd->egrbufs.rcvtids = kzalloc_node(
448 rcd->egrbufs.count *
449 sizeof(*rcd->egrbufs.rcvtids),
450 GFP_KERNEL, numa);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400451 if (!rcd->egrbufs.rcvtids)
452 goto bail;
453 rcd->egrbufs.size = eager_buffer_size;
454 /*
455 * The size of the buffers programmed into the RcvArray
456 * entries needs to be big enough to handle the highest
457 * MTU supported.
458 */
459 if (rcd->egrbufs.size < hfi1_max_mtu) {
460 rcd->egrbufs.size = __roundup_pow_of_two(hfi1_max_mtu);
Sebastian Sanchez6c63e422015-11-06 20:06:56 -0500461 hfi1_cdbg(PROC,
462 "ctxt%u: eager bufs size too small. Adjusting to %zu\n",
Mike Marciniszyn77241052015-07-30 15:17:43 -0400463 rcd->ctxt, rcd->egrbufs.size);
464 }
465 rcd->egrbufs.rcvtid_size = HFI1_MAX_EAGER_BUFFER_SIZE;
466
Vishwanathapura, Niranjana22807402017-04-12 20:29:29 -0700467 /* Applicable only for statically created kernel contexts */
468 if (ctxt < dd->first_dyn_alloc_ctxt) {
Sebastian Sanchezb448bf92017-02-08 05:26:37 -0800469 rcd->opstats = kzalloc_node(sizeof(*rcd->opstats),
470 GFP_KERNEL, numa);
Alison Schofield806e6e12015-10-12 14:28:36 -0700471 if (!rcd->opstats)
Mike Marciniszyn77241052015-07-30 15:17:43 -0400472 goto bail;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400473 }
Michael J. Ruhlf683c802017-06-09 16:00:19 -0700474
Michael J. Ruhlf2a3bc02017-08-04 13:52:38 -0700475 *context = rcd;
476 return 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400477 }
Michael J. Ruhlf2a3bc02017-08-04 13:52:38 -0700478
Mike Marciniszyn77241052015-07-30 15:17:43 -0400479bail:
Michael J. Ruhlf2a3bc02017-08-04 13:52:38 -0700480 *context = NULL;
Michael J. Ruhld295dbe2017-08-04 13:52:44 -0700481 hfi1_free_ctxt(rcd);
Michael J. Ruhlf2a3bc02017-08-04 13:52:38 -0700482 return -ENOMEM;
483}
484
485/**
486 * hfi1_free_ctxt
Michael J. Ruhlf2a3bc02017-08-04 13:52:38 -0700487 * @rcd: pointer to an initialized rcd data structure
488 *
Michael J. Ruhld295dbe2017-08-04 13:52:44 -0700489 * This wrapper is the free function that matches hfi1_create_ctxtdata().
490 * When a context is done being used (kernel or user), this function is called
491 * for the "final" put to match the kref init from hf1i_create_ctxtdata().
492 * Other users of the context do a get/put sequence to make sure that the
493 * structure isn't removed while in use.
Michael J. Ruhlf2a3bc02017-08-04 13:52:38 -0700494 */
Michael J. Ruhld295dbe2017-08-04 13:52:44 -0700495void hfi1_free_ctxt(struct hfi1_ctxtdata *rcd)
Michael J. Ruhlf2a3bc02017-08-04 13:52:38 -0700496{
Michael J. Ruhld295dbe2017-08-04 13:52:44 -0700497 hfi1_rcd_put(rcd);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400498}
499
500/*
501 * Convert a receive header entry size that to the encoding used in the CSR.
502 *
503 * Return a zero if the given size is invalid.
504 */
505static inline u64 encode_rcv_header_entry_size(u16 size)
506{
507 /* there are only 3 valid receive header entry sizes */
508 if (size == 2)
509 return 1;
510 if (size == 16)
511 return 2;
512 else if (size == 32)
513 return 4;
514 return 0; /* invalid */
515}
516
517/*
518 * Select the largest ccti value over all SLs to determine the intra-
519 * packet gap for the link.
520 *
521 * called with cca_timer_lock held (to protect access to cca_timer
522 * array), and rcu_read_lock() (to protect access to cc_state).
523 */
524void set_link_ipg(struct hfi1_pportdata *ppd)
525{
526 struct hfi1_devdata *dd = ppd->dd;
527 struct cc_state *cc_state;
528 int i;
529 u16 cce, ccti_limit, max_ccti = 0;
530 u16 shift, mult;
531 u64 src;
532 u32 current_egress_rate; /* Mbits /sec */
533 u32 max_pkt_time;
534 /*
535 * max_pkt_time is the maximum packet egress time in units
536 * of the fabric clock period 1/(805 MHz).
537 */
538
539 cc_state = get_cc_state(ppd);
540
Jubin Johnd125a6c2016-02-14 20:19:49 -0800541 if (!cc_state)
Mike Marciniszyn77241052015-07-30 15:17:43 -0400542 /*
543 * This should _never_ happen - rcu_read_lock() is held,
544 * and set_link_ipg() should not be called if cc_state
545 * is NULL.
546 */
547 return;
548
549 for (i = 0; i < OPA_MAX_SLS; i++) {
550 u16 ccti = ppd->cca_timer[i].ccti;
551
552 if (ccti > max_ccti)
553 max_ccti = ccti;
554 }
555
556 ccti_limit = cc_state->cct.ccti_limit;
557 if (max_ccti > ccti_limit)
558 max_ccti = ccti_limit;
559
560 cce = cc_state->cct.entries[max_ccti].entry;
561 shift = (cce & 0xc000) >> 14;
562 mult = (cce & 0x3fff);
563
564 current_egress_rate = active_egress_rate(ppd);
565
566 max_pkt_time = egress_cycles(ppd->ibmaxlen, current_egress_rate);
567
568 src = (max_pkt_time >> shift) * mult;
569
570 src &= SEND_STATIC_RATE_CONTROL_CSR_SRC_RELOAD_SMASK;
571 src <<= SEND_STATIC_RATE_CONTROL_CSR_SRC_RELOAD_SHIFT;
572
573 write_csr(dd, SEND_STATIC_RATE_CONTROL, src);
574}
575
576static enum hrtimer_restart cca_timer_fn(struct hrtimer *t)
577{
578 struct cca_timer *cca_timer;
579 struct hfi1_pportdata *ppd;
580 int sl;
Jubin Johnd35cf7442016-04-14 08:31:53 -0700581 u16 ccti_timer, ccti_min;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400582 struct cc_state *cc_state;
Dean Luickb77d7132015-10-26 10:28:43 -0400583 unsigned long flags;
Jubin Johnd35cf7442016-04-14 08:31:53 -0700584 enum hrtimer_restart ret = HRTIMER_NORESTART;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400585
586 cca_timer = container_of(t, struct cca_timer, hrtimer);
587 ppd = cca_timer->ppd;
588 sl = cca_timer->sl;
589
590 rcu_read_lock();
591
592 cc_state = get_cc_state(ppd);
593
Jubin Johnd125a6c2016-02-14 20:19:49 -0800594 if (!cc_state) {
Mike Marciniszyn77241052015-07-30 15:17:43 -0400595 rcu_read_unlock();
596 return HRTIMER_NORESTART;
597 }
598
599 /*
600 * 1) decrement ccti for SL
601 * 2) calculate IPG for link (set_link_ipg())
602 * 3) restart timer, unless ccti is at min value
603 */
604
605 ccti_min = cc_state->cong_setting.entries[sl].ccti_min;
606 ccti_timer = cc_state->cong_setting.entries[sl].ccti_timer;
607
Dean Luickb77d7132015-10-26 10:28:43 -0400608 spin_lock_irqsave(&ppd->cca_timer_lock, flags);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400609
Jubin Johnd35cf7442016-04-14 08:31:53 -0700610 if (cca_timer->ccti > ccti_min) {
Mike Marciniszyn77241052015-07-30 15:17:43 -0400611 cca_timer->ccti--;
612 set_link_ipg(ppd);
613 }
614
Jubin Johnd35cf7442016-04-14 08:31:53 -0700615 if (cca_timer->ccti > ccti_min) {
Mike Marciniszyn77241052015-07-30 15:17:43 -0400616 unsigned long nsec = 1024 * ccti_timer;
617 /* ccti_timer is in units of 1.024 usec */
618 hrtimer_forward_now(t, ns_to_ktime(nsec));
Jubin Johnd35cf7442016-04-14 08:31:53 -0700619 ret = HRTIMER_RESTART;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400620 }
Jubin Johnd35cf7442016-04-14 08:31:53 -0700621
622 spin_unlock_irqrestore(&ppd->cca_timer_lock, flags);
623 rcu_read_unlock();
624 return ret;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400625}
626
627/*
628 * Common code for initializing the physical port structure.
629 */
630void hfi1_init_pportdata(struct pci_dev *pdev, struct hfi1_pportdata *ppd,
631 struct hfi1_devdata *dd, u8 hw_pidx, u8 port)
632{
Jianxin Xiong8adf71f2016-07-25 13:39:14 -0700633 int i;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400634 uint default_pkey_idx;
Jianxin Xiong8adf71f2016-07-25 13:39:14 -0700635 struct cc_state *cc_state;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400636
637 ppd->dd = dd;
638 ppd->hw_pidx = hw_pidx;
639 ppd->port = port; /* IB port number, not index */
640
641 default_pkey_idx = 1;
642
643 ppd->pkeys[default_pkey_idx] = DEFAULT_P_KEY;
Neel Desai53526502017-04-09 10:16:59 -0700644 ppd->part_enforce |= HFI1_PART_ENFORCE_IN;
Neel Desai53526502017-04-09 10:16:59 -0700645
Mike Marciniszyn77241052015-07-30 15:17:43 -0400646 if (loopback) {
647 hfi1_early_err(&pdev->dev,
648 "Faking data partition 0x8001 in idx %u\n",
649 !default_pkey_idx);
650 ppd->pkeys[!default_pkey_idx] = 0x8001;
651 }
652
653 INIT_WORK(&ppd->link_vc_work, handle_verify_cap);
654 INIT_WORK(&ppd->link_up_work, handle_link_up);
655 INIT_WORK(&ppd->link_down_work, handle_link_down);
656 INIT_WORK(&ppd->freeze_work, handle_freeze);
657 INIT_WORK(&ppd->link_downgrade_work, handle_link_downgrade);
658 INIT_WORK(&ppd->sma_message_work, handle_sma_message);
659 INIT_WORK(&ppd->link_bounce_work, handle_link_bounce);
Dean Luick673b9752016-08-31 07:24:33 -0700660 INIT_DELAYED_WORK(&ppd->start_link_work, handle_start_link);
Jim Snowfb9036d2016-01-11 18:32:21 -0500661 INIT_WORK(&ppd->linkstate_active_work, receive_interrupt_work);
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -0800662 INIT_WORK(&ppd->qsfp_info.qsfp_work, qsfp_event);
663
Mike Marciniszyn77241052015-07-30 15:17:43 -0400664 mutex_init(&ppd->hls_lock);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400665 spin_lock_init(&ppd->qsfp_info.qsfp_lock);
666
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -0800667 ppd->qsfp_info.ppd = ppd;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400668 ppd->sm_trap_qp = 0x0;
669 ppd->sa_qp = 0x1;
670
671 ppd->hfi1_wq = NULL;
672
673 spin_lock_init(&ppd->cca_timer_lock);
674
675 for (i = 0; i < OPA_MAX_SLS; i++) {
676 hrtimer_init(&ppd->cca_timer[i].hrtimer, CLOCK_MONOTONIC,
677 HRTIMER_MODE_REL);
678 ppd->cca_timer[i].ppd = ppd;
679 ppd->cca_timer[i].sl = i;
680 ppd->cca_timer[i].ccti = 0;
681 ppd->cca_timer[i].hrtimer.function = cca_timer_fn;
682 }
683
684 ppd->cc_max_table_entries = IB_CC_TABLE_CAP_DEFAULT;
685
686 spin_lock_init(&ppd->cc_state_lock);
687 spin_lock_init(&ppd->cc_log_lock);
Jianxin Xiong8adf71f2016-07-25 13:39:14 -0700688 cc_state = kzalloc(sizeof(*cc_state), GFP_KERNEL);
689 RCU_INIT_POINTER(ppd->cc_state, cc_state);
690 if (!cc_state)
Mike Marciniszyn77241052015-07-30 15:17:43 -0400691 goto bail;
692 return;
693
694bail:
695
696 hfi1_early_err(&pdev->dev,
697 "Congestion Control Agent disabled for port %d\n", port);
698}
699
700/*
701 * Do initialization for device that is only needed on
702 * first detect, not on resets.
703 */
704static int loadtime_init(struct hfi1_devdata *dd)
705{
706 return 0;
707}
708
709/**
710 * init_after_reset - re-initialize after a reset
711 * @dd: the hfi1_ib device
712 *
713 * sanity check at least some of the values after reset, and
714 * ensure no receive or transmit (explicitly, in case reset
715 * failed
716 */
717static int init_after_reset(struct hfi1_devdata *dd)
718{
719 int i;
Michael J. Ruhld295dbe2017-08-04 13:52:44 -0700720 struct hfi1_ctxtdata *rcd;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400721 /*
722 * Ensure chip does no sends or receives, tail updates, or
723 * pioavail updates while we re-initialize. This is mostly
724 * for the driver data structures, not chip registers.
725 */
Michael J. Ruhld295dbe2017-08-04 13:52:44 -0700726 for (i = 0; i < dd->num_rcv_contexts; i++) {
727 rcd = hfi1_rcd_get_by_index(dd, i);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400728 hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_DIS |
Michael J. Ruhl22505632017-07-24 07:46:06 -0700729 HFI1_RCVCTRL_INTRAVAIL_DIS |
Michael J. Ruhld295dbe2017-08-04 13:52:44 -0700730 HFI1_RCVCTRL_TAILUPD_DIS, rcd);
731 hfi1_rcd_put(rcd);
732 }
Mike Marciniszyn77241052015-07-30 15:17:43 -0400733 pio_send_control(dd, PSC_GLOBAL_DISABLE);
734 for (i = 0; i < dd->num_send_contexts; i++)
735 sc_disable(dd->send_contexts[i].sc);
736
737 return 0;
738}
739
740static void enable_chip(struct hfi1_devdata *dd)
741{
Michael J. Ruhld295dbe2017-08-04 13:52:44 -0700742 struct hfi1_ctxtdata *rcd;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400743 u32 rcvmask;
Michael J. Ruhle6f76222017-07-24 07:45:55 -0700744 u16 i;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400745
746 /* enable PIO send */
747 pio_send_control(dd, PSC_GLOBAL_ENABLE);
748
749 /*
750 * Enable kernel ctxts' receive and receive interrupt.
751 * Other ctxts done as user opens and initializes them.
752 */
Vishwanathapura, Niranjana22807402017-04-12 20:29:29 -0700753 for (i = 0; i < dd->first_dyn_alloc_ctxt; ++i) {
Michael J. Ruhld295dbe2017-08-04 13:52:44 -0700754 rcd = hfi1_rcd_get_by_index(dd, i);
755 if (!rcd)
756 continue;
Mitko Haralanov566c1572016-02-03 14:32:49 -0800757 rcvmask = HFI1_RCVCTRL_CTXT_ENB | HFI1_RCVCTRL_INTRAVAIL_ENB;
Michael J. Ruhld295dbe2017-08-04 13:52:44 -0700758 rcvmask |= HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL) ?
Mike Marciniszyn77241052015-07-30 15:17:43 -0400759 HFI1_RCVCTRL_TAILUPD_ENB : HFI1_RCVCTRL_TAILUPD_DIS;
Michael J. Ruhld295dbe2017-08-04 13:52:44 -0700760 if (!HFI1_CAP_KGET_MASK(rcd->flags, MULTI_PKT_EGR))
Mike Marciniszyn77241052015-07-30 15:17:43 -0400761 rcvmask |= HFI1_RCVCTRL_ONE_PKT_EGR_ENB;
Michael J. Ruhld295dbe2017-08-04 13:52:44 -0700762 if (HFI1_CAP_KGET_MASK(rcd->flags, NODROP_RHQ_FULL))
Mike Marciniszyn77241052015-07-30 15:17:43 -0400763 rcvmask |= HFI1_RCVCTRL_NO_RHQ_DROP_ENB;
Michael J. Ruhld295dbe2017-08-04 13:52:44 -0700764 if (HFI1_CAP_KGET_MASK(rcd->flags, NODROP_EGR_FULL))
Mike Marciniszyn77241052015-07-30 15:17:43 -0400765 rcvmask |= HFI1_RCVCTRL_NO_EGR_DROP_ENB;
Michael J. Ruhld295dbe2017-08-04 13:52:44 -0700766 hfi1_rcvctrl(dd, rcvmask, rcd);
767 sc_enable(rcd->sc);
768 hfi1_rcd_put(rcd);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400769 }
770}
771
772/**
773 * create_workqueues - create per port workqueues
774 * @dd: the hfi1_ib device
775 */
776static int create_workqueues(struct hfi1_devdata *dd)
777{
778 int pidx;
779 struct hfi1_pportdata *ppd;
780
781 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
782 ppd = dd->pport + pidx;
783 if (!ppd->hfi1_wq) {
Mike Marciniszyn77241052015-07-30 15:17:43 -0400784 ppd->hfi1_wq =
Mike Marciniszyn0a226ed2015-11-09 19:13:58 -0500785 alloc_workqueue(
786 "hfi%d_%d",
787 WQ_SYSFS | WQ_HIGHPRI | WQ_CPU_INTENSIVE,
Mike Marciniszyndd1ed102017-05-04 05:14:10 -0700788 HFI1_MAX_ACTIVE_WORKQUEUE_ENTRIES,
Mike Marciniszyn0a226ed2015-11-09 19:13:58 -0500789 dd->unit, pidx);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400790 if (!ppd->hfi1_wq)
791 goto wq_error;
792 }
Sebastian Sanchez71d47002017-07-29 08:43:49 -0700793 if (!ppd->link_wq) {
794 /*
795 * Make the link workqueue single-threaded to enforce
796 * serialization.
797 */
798 ppd->link_wq =
799 alloc_workqueue(
800 "hfi_link_%d_%d",
801 WQ_SYSFS | WQ_MEM_RECLAIM | WQ_UNBOUND,
802 1, /* max_active */
803 dd->unit, pidx);
804 if (!ppd->link_wq)
805 goto wq_error;
806 }
Mike Marciniszyn77241052015-07-30 15:17:43 -0400807 }
808 return 0;
809wq_error:
Mike Marciniszyn0a226ed2015-11-09 19:13:58 -0500810 pr_err("alloc_workqueue failed for port %d\n", pidx + 1);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400811 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
812 ppd = dd->pport + pidx;
813 if (ppd->hfi1_wq) {
814 destroy_workqueue(ppd->hfi1_wq);
815 ppd->hfi1_wq = NULL;
816 }
Sebastian Sanchez71d47002017-07-29 08:43:49 -0700817 if (ppd->link_wq) {
818 destroy_workqueue(ppd->link_wq);
819 ppd->link_wq = NULL;
820 }
Mike Marciniszyn77241052015-07-30 15:17:43 -0400821 }
822 return -ENOMEM;
823}
824
825/**
826 * hfi1_init - do the actual initialization sequence on the chip
827 * @dd: the hfi1_ib device
828 * @reinit: re-initializing, so don't allocate new memory
829 *
830 * Do the actual initialization sequence on the chip. This is done
831 * both from the init routine called from the PCI infrastructure, and
832 * when we reset the chip, or detect that it was reset internally,
833 * or it's administratively re-enabled.
834 *
835 * Memory allocation here and in called routines is only done in
836 * the first case (reinit == 0). We have to be careful, because even
837 * without memory allocation, we need to re-write all the chip registers
838 * TIDs, etc. after the reset or enable has completed.
839 */
840int hfi1_init(struct hfi1_devdata *dd, int reinit)
841{
842 int ret = 0, pidx, lastfail = 0;
Michael J. Ruhle6f76222017-07-24 07:45:55 -0700843 unsigned long len;
844 u16 i;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400845 struct hfi1_ctxtdata *rcd;
846 struct hfi1_pportdata *ppd;
847
848 /* Set up recv low level handlers */
849 dd->normal_rhf_rcv_functions[RHF_RCV_TYPE_EXPECTED] =
850 kdeth_process_expected;
851 dd->normal_rhf_rcv_functions[RHF_RCV_TYPE_EAGER] =
852 kdeth_process_eager;
853 dd->normal_rhf_rcv_functions[RHF_RCV_TYPE_IB] = process_receive_ib;
854 dd->normal_rhf_rcv_functions[RHF_RCV_TYPE_ERROR] =
855 process_receive_error;
856 dd->normal_rhf_rcv_functions[RHF_RCV_TYPE_BYPASS] =
857 process_receive_bypass;
858 dd->normal_rhf_rcv_functions[RHF_RCV_TYPE_INVALID5] =
859 process_receive_invalid;
860 dd->normal_rhf_rcv_functions[RHF_RCV_TYPE_INVALID6] =
861 process_receive_invalid;
862 dd->normal_rhf_rcv_functions[RHF_RCV_TYPE_INVALID7] =
863 process_receive_invalid;
864 dd->rhf_rcv_function_map = dd->normal_rhf_rcv_functions;
865
866 /* Set up send low level handlers */
867 dd->process_pio_send = hfi1_verbs_send_pio;
868 dd->process_dma_send = hfi1_verbs_send_dma;
869 dd->pio_inline_send = pio_copy;
Vishwanathapura, Niranjana64551ed2017-04-12 20:29:30 -0700870 dd->process_vnic_dma_send = hfi1_vnic_send_dma;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400871
Mike Marciniszyn995deaf2015-11-16 21:59:29 -0500872 if (is_ax(dd)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -0400873 atomic_set(&dd->drop_packet, DROP_PACKET_ON);
874 dd->do_drop = 1;
875 } else {
876 atomic_set(&dd->drop_packet, DROP_PACKET_OFF);
877 dd->do_drop = 0;
878 }
879
880 /* make sure the link is not "up" */
881 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
882 ppd = dd->pport + pidx;
883 ppd->linkup = 0;
884 }
885
886 if (reinit)
887 ret = init_after_reset(dd);
888 else
889 ret = loadtime_init(dd);
890 if (ret)
891 goto done;
892
Mark F. Brown46b010d2015-11-09 19:18:20 -0500893 /* allocate dummy tail memory for all receive contexts */
894 dd->rcvhdrtail_dummy_kvaddr = dma_zalloc_coherent(
895 &dd->pcidev->dev, sizeof(u64),
Tymoteusz Kielan60368182016-09-06 04:35:54 -0700896 &dd->rcvhdrtail_dummy_dma,
Mark F. Brown46b010d2015-11-09 19:18:20 -0500897 GFP_KERNEL);
898
899 if (!dd->rcvhdrtail_dummy_kvaddr) {
900 dd_dev_err(dd, "cannot allocate dummy tail memory\n");
901 ret = -ENOMEM;
902 goto done;
903 }
904
Mike Marciniszyn77241052015-07-30 15:17:43 -0400905 /* dd->rcd can be NULL if early initialization failed */
Vishwanathapura, Niranjana22807402017-04-12 20:29:29 -0700906 for (i = 0; dd->rcd && i < dd->first_dyn_alloc_ctxt; ++i) {
Mike Marciniszyn77241052015-07-30 15:17:43 -0400907 /*
908 * Set up the (kernel) rcvhdr queue and egr TIDs. If doing
909 * re-init, the simplest way to handle this is to free
910 * existing, and re-allocate.
911 * Need to re-create rest of ctxt 0 ctxtdata as well.
912 */
Michael J. Ruhld295dbe2017-08-04 13:52:44 -0700913 rcd = hfi1_rcd_get_by_index(dd, i);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400914 if (!rcd)
915 continue;
916
917 rcd->do_interrupt = &handle_receive_interrupt;
918
919 lastfail = hfi1_create_rcvhdrq(dd, rcd);
920 if (!lastfail)
921 lastfail = hfi1_setup_eagerbufs(rcd);
Ashutosh Dixit39239792016-05-12 10:24:00 -0700922 if (lastfail) {
Mike Marciniszyn77241052015-07-30 15:17:43 -0400923 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -0800924 "failed to allocate kernel ctxt's rcvhdrq and/or egr bufs\n");
Ashutosh Dixit39239792016-05-12 10:24:00 -0700925 ret = lastfail;
926 }
Michael J. Ruhld295dbe2017-08-04 13:52:44 -0700927 hfi1_rcd_put(rcd);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400928 }
Mike Marciniszyn77241052015-07-30 15:17:43 -0400929
930 /* Allocate enough memory for user event notification. */
Amitoj Kaur Chawla84449912016-03-04 22:30:43 +0530931 len = PAGE_ALIGN(dd->chip_rcv_contexts * HFI1_MAX_SHARED_CTXTS *
932 sizeof(*dd->events));
Mike Marciniszyn77241052015-07-30 15:17:43 -0400933 dd->events = vmalloc_user(len);
934 if (!dd->events)
935 dd_dev_err(dd, "Failed to allocate user events page\n");
936 /*
937 * Allocate a page for device and port status.
938 * Page will be shared amongst all user processes.
939 */
940 dd->status = vmalloc_user(PAGE_SIZE);
941 if (!dd->status)
942 dd_dev_err(dd, "Failed to allocate dev status page\n");
943 else
944 dd->freezelen = PAGE_SIZE - (sizeof(*dd->status) -
945 sizeof(dd->status->freezemsg));
946 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
947 ppd = dd->pport + pidx;
948 if (dd->status)
949 /* Currently, we only have one port */
950 ppd->statusp = &dd->status->port;
951
952 set_mtu(ppd);
953 }
954
955 /* enable chip even if we have an error, so we can debug cause */
956 enable_chip(dd);
957
Mike Marciniszyn77241052015-07-30 15:17:43 -0400958done:
959 /*
960 * Set status even if port serdes is not initialized
961 * so that diags will work.
962 */
963 if (dd->status)
964 dd->status->dev |= HFI1_STATUS_CHIP_PRESENT |
965 HFI1_STATUS_INITTED;
966 if (!ret) {
967 /* enable all interrupts from the chip */
968 set_intr_state(dd, 1);
969
970 /* chip is OK for user apps; mark it as initialized */
971 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
972 ppd = dd->pport + pidx;
973
Jubin John4d114fd2016-02-14 20:21:43 -0800974 /*
975 * start the serdes - must be after interrupts are
976 * enabled so we are notified when the link goes up
Mike Marciniszyn77241052015-07-30 15:17:43 -0400977 */
Mike Marciniszyn77241052015-07-30 15:17:43 -0400978 lastfail = bringup_serdes(ppd);
979 if (lastfail)
980 dd_dev_info(dd,
Jubin John17fb4f22016-02-14 20:21:52 -0800981 "Failed to bring up port %u\n",
982 ppd->port);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400983
984 /*
985 * Set status even if port serdes is not initialized
986 * so that diags will work.
987 */
988 if (ppd->statusp)
989 *ppd->statusp |= HFI1_STATUS_CHIP_PRESENT |
990 HFI1_STATUS_INITTED;
991 if (!ppd->link_speed_enabled)
992 continue;
993 }
994 }
995
996 /* if ret is non-zero, we probably should do some cleanup here... */
997 return ret;
998}
999
1000static inline struct hfi1_devdata *__hfi1_lookup(int unit)
1001{
1002 return idr_find(&hfi1_unit_table, unit);
1003}
1004
1005struct hfi1_devdata *hfi1_lookup(int unit)
1006{
1007 struct hfi1_devdata *dd;
1008 unsigned long flags;
1009
1010 spin_lock_irqsave(&hfi1_devs_lock, flags);
1011 dd = __hfi1_lookup(unit);
1012 spin_unlock_irqrestore(&hfi1_devs_lock, flags);
1013
1014 return dd;
1015}
1016
1017/*
1018 * Stop the timers during unit shutdown, or after an error late
1019 * in initialization.
1020 */
1021static void stop_timers(struct hfi1_devdata *dd)
1022{
1023 struct hfi1_pportdata *ppd;
1024 int pidx;
1025
1026 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
1027 ppd = dd->pport + pidx;
Kees Cook80641352017-10-16 15:51:54 -07001028 if (ppd->led_override_timer.function) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04001029 del_timer_sync(&ppd->led_override_timer);
1030 atomic_set(&ppd->led_override_timer_active, 0);
1031 }
1032 }
1033}
1034
1035/**
1036 * shutdown_device - shut down a device
1037 * @dd: the hfi1_ib device
1038 *
1039 * This is called to make the device quiet when we are about to
1040 * unload the driver, and also when the device is administratively
1041 * disabled. It does not free any data structures.
1042 * Everything it does has to be setup again by hfi1_init(dd, 1)
1043 */
1044static void shutdown_device(struct hfi1_devdata *dd)
1045{
1046 struct hfi1_pportdata *ppd;
Michael J. Ruhld295dbe2017-08-04 13:52:44 -07001047 struct hfi1_ctxtdata *rcd;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001048 unsigned pidx;
1049 int i;
1050
1051 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
1052 ppd = dd->pport + pidx;
1053
1054 ppd->linkup = 0;
1055 if (ppd->statusp)
1056 *ppd->statusp &= ~(HFI1_STATUS_IB_CONF |
1057 HFI1_STATUS_IB_READY);
1058 }
1059 dd->flags &= ~HFI1_INITTED;
1060
Michael J. Ruhl82a97922018-02-01 10:43:42 -08001061 /* mask and clean up interrupts, but not errors */
Mike Marciniszyn77241052015-07-30 15:17:43 -04001062 set_intr_state(dd, 0);
Michael J. Ruhl82a97922018-02-01 10:43:42 -08001063 hfi1_clean_up_interrupts(dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001064
1065 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
1066 ppd = dd->pport + pidx;
Michael J. Ruhld295dbe2017-08-04 13:52:44 -07001067 for (i = 0; i < dd->num_rcv_contexts; i++) {
1068 rcd = hfi1_rcd_get_by_index(dd, i);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001069 hfi1_rcvctrl(dd, HFI1_RCVCTRL_TAILUPD_DIS |
Michael J. Ruhl22505632017-07-24 07:46:06 -07001070 HFI1_RCVCTRL_CTXT_DIS |
1071 HFI1_RCVCTRL_INTRAVAIL_DIS |
1072 HFI1_RCVCTRL_PKEY_DIS |
Michael J. Ruhld295dbe2017-08-04 13:52:44 -07001073 HFI1_RCVCTRL_ONE_PKT_EGR_DIS, rcd);
1074 hfi1_rcd_put(rcd);
1075 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04001076 /*
1077 * Gracefully stop all sends allowing any in progress to
1078 * trickle out first.
1079 */
1080 for (i = 0; i < dd->num_send_contexts; i++)
1081 sc_flush(dd->send_contexts[i].sc);
1082 }
1083
1084 /*
1085 * Enough for anything that's going to trickle out to have actually
1086 * done so.
1087 */
1088 udelay(20);
1089
1090 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
1091 ppd = dd->pport + pidx;
1092
1093 /* disable all contexts */
1094 for (i = 0; i < dd->num_send_contexts; i++)
1095 sc_disable(dd->send_contexts[i].sc);
1096 /* disable the send device */
1097 pio_send_control(dd, PSC_GLOBAL_DISABLE);
1098
Easwar Hariharan91ab4ed2016-02-03 14:35:57 -08001099 shutdown_led_override(ppd);
1100
Mike Marciniszyn77241052015-07-30 15:17:43 -04001101 /*
1102 * Clear SerdesEnable.
1103 * We can't count on interrupts since we are stopping.
1104 */
1105 hfi1_quiet_serdes(ppd);
1106
1107 if (ppd->hfi1_wq) {
1108 destroy_workqueue(ppd->hfi1_wq);
1109 ppd->hfi1_wq = NULL;
1110 }
Sebastian Sanchez71d47002017-07-29 08:43:49 -07001111 if (ppd->link_wq) {
1112 destroy_workqueue(ppd->link_wq);
1113 ppd->link_wq = NULL;
1114 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04001115 }
1116 sdma_exit(dd);
1117}
1118
1119/**
1120 * hfi1_free_ctxtdata - free a context's allocated data
1121 * @dd: the hfi1_ib device
1122 * @rcd: the ctxtdata structure
1123 *
1124 * free up any allocated data for a context
Mike Marciniszyn77241052015-07-30 15:17:43 -04001125 * It should never change any chip state, or global driver state.
1126 */
1127void hfi1_free_ctxtdata(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
1128{
Michael J. Ruhlf683c802017-06-09 16:00:19 -07001129 u32 e;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001130
1131 if (!rcd)
1132 return;
1133
1134 if (rcd->rcvhdrq) {
1135 dma_free_coherent(&dd->pcidev->dev, rcd->rcvhdrq_size,
Tymoteusz Kielan60368182016-09-06 04:35:54 -07001136 rcd->rcvhdrq, rcd->rcvhdrq_dma);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001137 rcd->rcvhdrq = NULL;
1138 if (rcd->rcvhdrtail_kvaddr) {
1139 dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE,
1140 (void *)rcd->rcvhdrtail_kvaddr,
Tymoteusz Kielan60368182016-09-06 04:35:54 -07001141 rcd->rcvhdrqtailaddr_dma);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001142 rcd->rcvhdrtail_kvaddr = NULL;
1143 }
1144 }
1145
1146 /* all the RcvArray entries should have been cleared by now */
1147 kfree(rcd->egrbufs.rcvtids);
Michael J. Ruhlf683c802017-06-09 16:00:19 -07001148 rcd->egrbufs.rcvtids = NULL;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001149
1150 for (e = 0; e < rcd->egrbufs.alloced; e++) {
Tymoteusz Kielan60368182016-09-06 04:35:54 -07001151 if (rcd->egrbufs.buffers[e].dma)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001152 dma_free_coherent(&dd->pcidev->dev,
1153 rcd->egrbufs.buffers[e].len,
1154 rcd->egrbufs.buffers[e].addr,
Tymoteusz Kielan60368182016-09-06 04:35:54 -07001155 rcd->egrbufs.buffers[e].dma);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001156 }
1157 kfree(rcd->egrbufs.buffers);
Michael J. Ruhlf683c802017-06-09 16:00:19 -07001158 rcd->egrbufs.alloced = 0;
1159 rcd->egrbufs.buffers = NULL;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001160
1161 sc_free(rcd->sc);
Michael J. Ruhlf683c802017-06-09 16:00:19 -07001162 rcd->sc = NULL;
1163
Mike Marciniszyn77241052015-07-30 15:17:43 -04001164 vfree(rcd->subctxt_uregbase);
1165 vfree(rcd->subctxt_rcvegrbuf);
1166 vfree(rcd->subctxt_rcvhdr_base);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001167 kfree(rcd->opstats);
Michael J. Ruhlf683c802017-06-09 16:00:19 -07001168
1169 rcd->subctxt_uregbase = NULL;
1170 rcd->subctxt_rcvegrbuf = NULL;
1171 rcd->subctxt_rcvhdr_base = NULL;
1172 rcd->opstats = NULL;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001173}
1174
Dean Luick78eb1292016-03-05 08:49:45 -08001175/*
1176 * Release our hold on the shared asic data. If we are the last one,
Dean Luickdba715f2016-07-06 17:28:52 -04001177 * return the structure to be finalized outside the lock. Must be
1178 * holding hfi1_devs_lock.
Dean Luick78eb1292016-03-05 08:49:45 -08001179 */
Dean Luickdba715f2016-07-06 17:28:52 -04001180static struct hfi1_asic_data *release_asic_data(struct hfi1_devdata *dd)
Dean Luick78eb1292016-03-05 08:49:45 -08001181{
Dean Luickdba715f2016-07-06 17:28:52 -04001182 struct hfi1_asic_data *ad;
Dean Luick78eb1292016-03-05 08:49:45 -08001183 int other;
1184
1185 if (!dd->asic_data)
Dean Luickdba715f2016-07-06 17:28:52 -04001186 return NULL;
Dean Luick78eb1292016-03-05 08:49:45 -08001187 dd->asic_data->dds[dd->hfi1_id] = NULL;
1188 other = dd->hfi1_id ? 0 : 1;
Dean Luickdba715f2016-07-06 17:28:52 -04001189 ad = dd->asic_data;
Dean Luick78eb1292016-03-05 08:49:45 -08001190 dd->asic_data = NULL;
Dean Luickdba715f2016-07-06 17:28:52 -04001191 /* return NULL if the other dd still has a link */
1192 return ad->dds[other] ? NULL : ad;
1193}
1194
1195static void finalize_asic_data(struct hfi1_devdata *dd,
1196 struct hfi1_asic_data *ad)
1197{
1198 clean_up_i2c(dd, ad);
1199 kfree(ad);
Dean Luick78eb1292016-03-05 08:49:45 -08001200}
1201
Dennis Dalessandroe11ffbd2016-05-19 05:26:44 -07001202static void __hfi1_free_devdata(struct kobject *kobj)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001203{
Dennis Dalessandroe11ffbd2016-05-19 05:26:44 -07001204 struct hfi1_devdata *dd =
1205 container_of(kobj, struct hfi1_devdata, kobj);
Dean Luickdba715f2016-07-06 17:28:52 -04001206 struct hfi1_asic_data *ad;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001207 unsigned long flags;
1208
1209 spin_lock_irqsave(&hfi1_devs_lock, flags);
1210 idr_remove(&hfi1_unit_table, dd->unit);
1211 list_del(&dd->list);
Dean Luickdba715f2016-07-06 17:28:52 -04001212 ad = release_asic_data(dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001213 spin_unlock_irqrestore(&hfi1_devs_lock, flags);
Dean Luickdba715f2016-07-06 17:28:52 -04001214 if (ad)
1215 finalize_asic_data(dd, ad);
Easwar Hariharanc3838b32016-02-09 14:29:13 -08001216 free_platform_config(dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001217 rcu_barrier(); /* wait for rcu callbacks to complete */
1218 free_percpu(dd->int_counter);
1219 free_percpu(dd->rcv_limit);
Vennila Megavannan89abfc82016-02-03 14:34:07 -08001220 free_percpu(dd->send_schedule);
Mike Marciniszyn1b311f82017-10-23 06:06:08 -07001221 free_percpu(dd->tx_opstats);
Alex Estrin473291b2018-02-01 10:43:50 -08001222 sdma_clean(dd, dd->num_sdma);
Jubin Johnea0e4ce2016-04-20 06:05:24 -07001223 rvt_dealloc_device(&dd->verbs_dev.rdi);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001224}
1225
Dennis Dalessandroe11ffbd2016-05-19 05:26:44 -07001226static struct kobj_type hfi1_devdata_type = {
1227 .release = __hfi1_free_devdata,
1228};
1229
1230void hfi1_free_devdata(struct hfi1_devdata *dd)
1231{
1232 kobject_put(&dd->kobj);
1233}
1234
Mike Marciniszyn77241052015-07-30 15:17:43 -04001235/*
1236 * Allocate our primary per-unit data structure. Must be done via verbs
1237 * allocator, because the verbs cleanup process both does cleanup and
1238 * free of the data structure.
1239 * "extra" is for chip-specific data.
1240 *
1241 * Use the idr mechanism to get a unit number for this unit.
1242 */
1243struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev, size_t extra)
1244{
1245 unsigned long flags;
1246 struct hfi1_devdata *dd;
Dennis Dalessandro7af6d002016-01-19 14:44:06 -08001247 int ret, nports;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001248
Dennis Dalessandro7af6d002016-01-19 14:44:06 -08001249 /* extra is * number of ports */
1250 nports = extra / sizeof(struct hfi1_pportdata);
1251
1252 dd = (struct hfi1_devdata *)rvt_alloc_device(sizeof(*dd) + extra,
1253 nports);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001254 if (!dd)
1255 return ERR_PTR(-ENOMEM);
Dennis Dalessandro7af6d002016-01-19 14:44:06 -08001256 dd->num_pports = nports;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001257 dd->pport = (struct hfi1_pportdata *)(dd + 1);
1258
1259 INIT_LIST_HEAD(&dd->list);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001260 idr_preload(GFP_KERNEL);
1261 spin_lock_irqsave(&hfi1_devs_lock, flags);
1262
1263 ret = idr_alloc(&hfi1_unit_table, dd, 0, 0, GFP_NOWAIT);
1264 if (ret >= 0) {
1265 dd->unit = ret;
1266 list_add(&dd->list, &hfi1_dev_list);
1267 }
1268
1269 spin_unlock_irqrestore(&hfi1_devs_lock, flags);
1270 idr_preload_end();
1271
1272 if (ret < 0) {
1273 hfi1_early_err(&pdev->dev,
1274 "Could not allocate unit ID: error %d\n", -ret);
1275 goto bail;
1276 }
Michael J. Ruhl5084c8f2017-12-18 19:56:37 -08001277 rvt_set_ibdev_name(&dd->verbs_dev.rdi, "%s_%d", class_name(), dd->unit);
1278
Mike Marciniszyn77241052015-07-30 15:17:43 -04001279 /*
1280 * Initialize all locks for the device. This needs to be as early as
1281 * possible so locks are usable.
1282 */
1283 spin_lock_init(&dd->sc_lock);
1284 spin_lock_init(&dd->sendctrl_lock);
1285 spin_lock_init(&dd->rcvctrl_lock);
1286 spin_lock_init(&dd->uctxt_lock);
1287 spin_lock_init(&dd->hfi1_diag_trans_lock);
1288 spin_lock_init(&dd->sc_init_lock);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001289 spin_lock_init(&dd->dc8051_memlock);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001290 seqlock_init(&dd->sc2vl_lock);
1291 spin_lock_init(&dd->sde_map_lock);
Jubin John35f6bef2016-02-14 12:46:10 -08001292 spin_lock_init(&dd->pio_map_lock);
Tadeusz Struk22546b72017-04-28 10:40:02 -07001293 mutex_init(&dd->dc8051_lock);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001294 init_waitqueue_head(&dd->event_queue);
1295
1296 dd->int_counter = alloc_percpu(u64);
1297 if (!dd->int_counter) {
1298 ret = -ENOMEM;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001299 goto bail;
1300 }
1301
1302 dd->rcv_limit = alloc_percpu(u64);
1303 if (!dd->rcv_limit) {
1304 ret = -ENOMEM;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001305 goto bail;
1306 }
1307
Vennila Megavannan89abfc82016-02-03 14:34:07 -08001308 dd->send_schedule = alloc_percpu(u64);
1309 if (!dd->send_schedule) {
1310 ret = -ENOMEM;
Vennila Megavannan89abfc82016-02-03 14:34:07 -08001311 goto bail;
1312 }
1313
Mike Marciniszyn1b311f82017-10-23 06:06:08 -07001314 dd->tx_opstats = alloc_percpu(struct hfi1_opcode_stats_perctx);
1315 if (!dd->tx_opstats) {
1316 ret = -ENOMEM;
1317 goto bail;
1318 }
1319
Dennis Dalessandroe11ffbd2016-05-19 05:26:44 -07001320 kobject_init(&dd->kobj, &hfi1_devdata_type);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001321 return dd;
1322
1323bail:
1324 if (!list_empty(&dd->list))
1325 list_del_init(&dd->list);
Jubin Johnea0e4ce2016-04-20 06:05:24 -07001326 rvt_dealloc_device(&dd->verbs_dev.rdi);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001327 return ERR_PTR(ret);
1328}
1329
1330/*
1331 * Called from freeze mode handlers, and from PCI error
1332 * reporting code. Should be paranoid about state of
1333 * system and data structures.
1334 */
1335void hfi1_disable_after_error(struct hfi1_devdata *dd)
1336{
1337 if (dd->flags & HFI1_INITTED) {
1338 u32 pidx;
1339
1340 dd->flags &= ~HFI1_INITTED;
1341 if (dd->pport)
1342 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
1343 struct hfi1_pportdata *ppd;
1344
1345 ppd = dd->pport + pidx;
1346 if (dd->flags & HFI1_PRESENT)
1347 set_link_state(ppd, HLS_DN_DISABLE);
1348
1349 if (ppd->statusp)
1350 *ppd->statusp &= ~HFI1_STATUS_IB_READY;
1351 }
1352 }
1353
1354 /*
1355 * Mark as having had an error for driver, and also
1356 * for /sys and status word mapped to user programs.
1357 * This marks unit as not usable, until reset.
1358 */
1359 if (dd->status)
1360 dd->status->dev |= HFI1_STATUS_HWERROR;
1361}
1362
1363static void remove_one(struct pci_dev *);
1364static int init_one(struct pci_dev *, const struct pci_device_id *);
1365
1366#define DRIVER_LOAD_MSG "Intel " DRIVER_NAME " loaded: "
1367#define PFX DRIVER_NAME ": "
1368
Sebastian Sanchezd6373012016-07-25 07:54:48 -07001369const struct pci_device_id hfi1_pci_tbl[] = {
Mike Marciniszyn77241052015-07-30 15:17:43 -04001370 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL0) },
1371 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL1) },
1372 { 0, }
1373};
1374
1375MODULE_DEVICE_TABLE(pci, hfi1_pci_tbl);
1376
1377static struct pci_driver hfi1_pci_driver = {
1378 .name = DRIVER_NAME,
1379 .probe = init_one,
1380 .remove = remove_one,
1381 .id_table = hfi1_pci_tbl,
1382 .err_handler = &hfi1_pci_err_handler,
1383};
1384
1385static void __init compute_krcvqs(void)
1386{
1387 int i;
1388
1389 for (i = 0; i < krcvqsset; i++)
1390 n_krcvqs += krcvqs[i];
1391}
1392
1393/*
1394 * Do all the generic driver unit- and chip-independent memory
1395 * allocation and initialization.
1396 */
1397static int __init hfi1_mod_init(void)
1398{
1399 int ret;
1400
1401 ret = dev_init();
1402 if (ret)
1403 goto bail;
1404
Sebastian Sanchezd6373012016-07-25 07:54:48 -07001405 ret = node_affinity_init();
1406 if (ret)
1407 goto bail;
Dennis Dalessandro41973442016-07-25 07:52:36 -07001408
Mike Marciniszyn77241052015-07-30 15:17:43 -04001409 /* validate max MTU before any devices start */
1410 if (!valid_opa_max_mtu(hfi1_max_mtu)) {
1411 pr_err("Invalid max_mtu 0x%x, using 0x%x instead\n",
1412 hfi1_max_mtu, HFI1_DEFAULT_MAX_MTU);
1413 hfi1_max_mtu = HFI1_DEFAULT_MAX_MTU;
1414 }
1415 /* valid CUs run from 1-128 in powers of 2 */
1416 if (hfi1_cu > 128 || !is_power_of_2(hfi1_cu))
1417 hfi1_cu = 1;
1418 /* valid credit return threshold is 0-100, variable is unsigned */
1419 if (user_credit_return_threshold > 100)
1420 user_credit_return_threshold = 100;
1421
1422 compute_krcvqs();
Jubin John4d114fd2016-02-14 20:21:43 -08001423 /*
1424 * sanitize receive interrupt count, time must wait until after
1425 * the hardware type is known
1426 */
Mike Marciniszyn77241052015-07-30 15:17:43 -04001427 if (rcv_intr_count > RCV_HDR_HEAD_COUNTER_MASK)
1428 rcv_intr_count = RCV_HDR_HEAD_COUNTER_MASK;
1429 /* reject invalid combinations */
1430 if (rcv_intr_count == 0 && rcv_intr_timeout == 0) {
1431 pr_err("Invalid mode: both receive interrupt count and available timeout are zero - setting interrupt count to 1\n");
1432 rcv_intr_count = 1;
1433 }
1434 if (rcv_intr_count > 1 && rcv_intr_timeout == 0) {
1435 /*
1436 * Avoid indefinite packet delivery by requiring a timeout
1437 * if count is > 1.
1438 */
1439 pr_err("Invalid mode: receive interrupt count greater than 1 and available timeout is zero - setting available timeout to 1\n");
1440 rcv_intr_timeout = 1;
1441 }
1442 if (rcv_intr_dynamic && !(rcv_intr_count > 1 && rcv_intr_timeout > 0)) {
1443 /*
1444 * The dynamic algorithm expects a non-zero timeout
1445 * and a count > 1.
1446 */
1447 pr_err("Invalid mode: dynamic receive interrupt mitigation with invalid count and timeout - turning dynamic off\n");
1448 rcv_intr_dynamic = 0;
1449 }
1450
1451 /* sanitize link CRC options */
1452 link_crc_mask &= SUPPORTED_CRCS;
1453
1454 /*
1455 * These must be called before the driver is registered with
1456 * the PCI subsystem.
1457 */
1458 idr_init(&hfi1_unit_table);
1459
1460 hfi1_dbg_init();
Dean Luick528ee9f2016-03-05 08:50:43 -08001461 ret = hfi1_wss_init();
1462 if (ret < 0)
1463 goto bail_wss;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001464 ret = pci_register_driver(&hfi1_pci_driver);
1465 if (ret < 0) {
1466 pr_err("Unable to register driver: error %d\n", -ret);
1467 goto bail_dev;
1468 }
1469 goto bail; /* all OK */
1470
1471bail_dev:
Dean Luick528ee9f2016-03-05 08:50:43 -08001472 hfi1_wss_exit();
1473bail_wss:
Mike Marciniszyn77241052015-07-30 15:17:43 -04001474 hfi1_dbg_exit();
1475 idr_destroy(&hfi1_unit_table);
1476 dev_cleanup();
1477bail:
1478 return ret;
1479}
1480
1481module_init(hfi1_mod_init);
1482
1483/*
1484 * Do the non-unit driver cleanup, memory free, etc. at unload.
1485 */
1486static void __exit hfi1_mod_cleanup(void)
1487{
1488 pci_unregister_driver(&hfi1_pci_driver);
Dennis Dalessandro41973442016-07-25 07:52:36 -07001489 node_affinity_destroy();
Dean Luick528ee9f2016-03-05 08:50:43 -08001490 hfi1_wss_exit();
Mike Marciniszyn77241052015-07-30 15:17:43 -04001491 hfi1_dbg_exit();
Mike Marciniszyn77241052015-07-30 15:17:43 -04001492
1493 idr_destroy(&hfi1_unit_table);
1494 dispose_firmware(); /* asymmetric with obtain_firmware() */
1495 dev_cleanup();
1496}
1497
1498module_exit(hfi1_mod_cleanup);
1499
1500/* this can only be called after a successful initialization */
1501static void cleanup_device_data(struct hfi1_devdata *dd)
1502{
1503 int ctxt;
1504 int pidx;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001505
1506 /* users can't do anything more with chip */
1507 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
1508 struct hfi1_pportdata *ppd = &dd->pport[pidx];
1509 struct cc_state *cc_state;
1510 int i;
1511
1512 if (ppd->statusp)
1513 *ppd->statusp &= ~HFI1_STATUS_CHIP_PRESENT;
1514
1515 for (i = 0; i < OPA_MAX_SLS; i++)
1516 hrtimer_cancel(&ppd->cca_timer[i].hrtimer);
1517
1518 spin_lock(&ppd->cc_state_lock);
Jianxin Xiong8adf71f2016-07-25 13:39:14 -07001519 cc_state = get_cc_state_protected(ppd);
Muhammad Falak R Wanieea57072016-05-01 18:05:31 +05301520 RCU_INIT_POINTER(ppd->cc_state, NULL);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001521 spin_unlock(&ppd->cc_state_lock);
1522
1523 if (cc_state)
Wei Yongjun476d95b2016-08-10 03:14:04 +00001524 kfree_rcu(cc_state, rcu);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001525 }
1526
1527 free_credit_return(dd);
1528
Mark F. Brown46b010d2015-11-09 19:18:20 -05001529 if (dd->rcvhdrtail_dummy_kvaddr) {
1530 dma_free_coherent(&dd->pcidev->dev, sizeof(u64),
1531 (void *)dd->rcvhdrtail_dummy_kvaddr,
Tymoteusz Kielan60368182016-09-06 04:35:54 -07001532 dd->rcvhdrtail_dummy_dma);
Dan Carpentera8b7da52016-05-28 08:01:20 +03001533 dd->rcvhdrtail_dummy_kvaddr = NULL;
Mark F. Brown46b010d2015-11-09 19:18:20 -05001534 }
1535
Michael J. Ruhld295dbe2017-08-04 13:52:44 -07001536 /*
1537 * Free any resources still in use (usually just kernel contexts)
1538 * at unload; we do for ctxtcnt, because that's what we allocate.
1539 */
1540 for (ctxt = 0; dd->rcd && ctxt < dd->num_rcv_contexts; ctxt++) {
1541 struct hfi1_ctxtdata *rcd = dd->rcd[ctxt];
Mike Marciniszyn77241052015-07-30 15:17:43 -04001542
Mike Marciniszyn77241052015-07-30 15:17:43 -04001543 if (rcd) {
1544 hfi1_clear_tids(rcd);
Michael J. Ruhld295dbe2017-08-04 13:52:44 -07001545 hfi1_free_ctxt(rcd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001546 }
1547 }
Michael J. Ruhld295dbe2017-08-04 13:52:44 -07001548
1549 kfree(dd->rcd);
1550 dd->rcd = NULL;
1551
Jubin John35f6bef2016-02-14 12:46:10 -08001552 free_pio_map(dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001553 /* must follow rcv context free - need to remove rcv's hooks */
1554 for (ctxt = 0; ctxt < dd->num_send_contexts; ctxt++)
1555 sc_free(dd->send_contexts[ctxt].sc);
1556 dd->num_send_contexts = 0;
1557 kfree(dd->send_contexts);
1558 dd->send_contexts = NULL;
Jubin John79d0c082016-02-26 13:33:33 -08001559 kfree(dd->hw_to_sw);
1560 dd->hw_to_sw = NULL;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001561 kfree(dd->boardname);
1562 vfree(dd->events);
1563 vfree(dd->status);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001564}
1565
1566/*
1567 * Clean up on unit shutdown, or error during unit load after
1568 * successful initialization.
1569 */
1570static void postinit_cleanup(struct hfi1_devdata *dd)
1571{
1572 hfi1_start_cleanup(dd);
1573
1574 hfi1_pcie_ddcleanup(dd);
1575 hfi1_pcie_cleanup(dd->pcidev);
1576
1577 cleanup_device_data(dd);
1578
1579 hfi1_free_devdata(dd);
1580}
1581
Krzysztof Blaszkowski11501ab2016-10-25 13:12:11 -07001582static int init_validate_rcvhdrcnt(struct device *dev, uint thecnt)
1583{
1584 if (thecnt <= HFI1_MIN_HDRQ_EGRBUF_CNT) {
1585 hfi1_early_err(dev, "Receive header queue count too small\n");
1586 return -EINVAL;
1587 }
1588
1589 if (thecnt > HFI1_MAX_HDRQ_EGRBUF_CNT) {
1590 hfi1_early_err(dev,
1591 "Receive header queue count cannot be greater than %u\n",
1592 HFI1_MAX_HDRQ_EGRBUF_CNT);
1593 return -EINVAL;
1594 }
1595
1596 if (thecnt % HDRQ_INCREMENT) {
1597 hfi1_early_err(dev, "Receive header queue count %d must be divisible by %lu\n",
1598 thecnt, HDRQ_INCREMENT);
1599 return -EINVAL;
1600 }
1601
1602 return 0;
1603}
1604
Mike Marciniszyn77241052015-07-30 15:17:43 -04001605static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1606{
1607 int ret = 0, j, pidx, initfail;
Krzysztof Blaszkowski83fb4af2016-10-17 04:19:24 -07001608 struct hfi1_devdata *dd;
Harish Chegondie8597eb2015-12-01 15:38:20 -05001609 struct hfi1_pportdata *ppd;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001610
1611 /* First, lock the non-writable module parameters */
1612 HFI1_CAP_LOCK();
1613
Tadeusz Struk5d6f08a2017-03-20 17:25:29 -07001614 /* Validate dev ids */
1615 if (!(ent->device == PCI_DEVICE_ID_INTEL0 ||
1616 ent->device == PCI_DEVICE_ID_INTEL1)) {
1617 hfi1_early_err(&pdev->dev,
1618 "Failing on unknown Intel deviceid 0x%x\n",
1619 ent->device);
1620 ret = -ENODEV;
1621 goto bail;
1622 }
1623
Mike Marciniszyn77241052015-07-30 15:17:43 -04001624 /* Validate some global module parameters */
Krzysztof Blaszkowski11501ab2016-10-25 13:12:11 -07001625 ret = init_validate_rcvhdrcnt(&pdev->dev, rcvhdrcnt);
1626 if (ret)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001627 goto bail;
Krzysztof Blaszkowski11501ab2016-10-25 13:12:11 -07001628
Mike Marciniszyn77241052015-07-30 15:17:43 -04001629 /* use the encoding function as a sanitization check */
1630 if (!encode_rcv_header_entry_size(hfi1_hdrq_entsize)) {
1631 hfi1_early_err(&pdev->dev, "Invalid HdrQ Entry size %u\n",
1632 hfi1_hdrq_entsize);
Sebastian Sanchez07859de2015-12-10 16:02:49 -05001633 ret = -EINVAL;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001634 goto bail;
1635 }
1636
1637 /* The receive eager buffer size must be set before the receive
1638 * contexts are created.
1639 *
1640 * Set the eager buffer size. Validate that it falls in a range
1641 * allowed by the hardware - all powers of 2 between the min and
1642 * max. The maximum valid MTU is within the eager buffer range
1643 * so we do not need to cap the max_mtu by an eager buffer size
1644 * setting.
1645 */
1646 if (eager_buffer_size) {
1647 if (!is_power_of_2(eager_buffer_size))
1648 eager_buffer_size =
1649 roundup_pow_of_two(eager_buffer_size);
1650 eager_buffer_size =
1651 clamp_val(eager_buffer_size,
1652 MIN_EAGER_BUFFER * 8,
1653 MAX_EAGER_BUFFER_TOTAL);
1654 hfi1_early_info(&pdev->dev, "Eager buffer size %u\n",
1655 eager_buffer_size);
1656 } else {
1657 hfi1_early_err(&pdev->dev, "Invalid Eager buffer size of 0\n");
1658 ret = -EINVAL;
1659 goto bail;
1660 }
1661
1662 /* restrict value of hfi1_rcvarr_split */
1663 hfi1_rcvarr_split = clamp_val(hfi1_rcvarr_split, 0, 100);
1664
1665 ret = hfi1_pcie_init(pdev, ent);
1666 if (ret)
1667 goto bail;
1668
Krzysztof Blaszkowski83fb4af2016-10-17 04:19:24 -07001669 /*
1670 * Do device-specific initialization, function table setup, dd
1671 * allocation, etc.
1672 */
1673 dd = hfi1_init_dd(pdev, ent);
1674
1675 if (IS_ERR(dd)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04001676 ret = PTR_ERR(dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001677 goto clean_bail; /* error already printed */
Krzysztof Blaszkowski83fb4af2016-10-17 04:19:24 -07001678 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04001679
1680 ret = create_workqueues(dd);
1681 if (ret)
1682 goto clean_bail;
1683
1684 /* do the generic initialization */
1685 initfail = hfi1_init(dd, 0);
1686
Vishwanathapura, Niranjanad4829ea2017-04-12 20:29:28 -07001687 /* setup vnic */
1688 hfi1_vnic_setup(dd);
1689
Mike Marciniszyn77241052015-07-30 15:17:43 -04001690 ret = hfi1_register_ib_device(dd);
1691
1692 /*
1693 * Now ready for use. this should be cleared whenever we
1694 * detect a reset, or initiate one. If earlier failure,
1695 * we still create devices, so diags, etc. can be used
1696 * to determine cause of problem.
1697 */
Dean Luicked6f6532016-02-18 11:12:25 -08001698 if (!initfail && !ret) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04001699 dd->flags |= HFI1_INITTED;
Dean Luicked6f6532016-02-18 11:12:25 -08001700 /* create debufs files after init and ib register */
1701 hfi1_dbg_ibdev_init(&dd->verbs_dev);
1702 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04001703
1704 j = hfi1_device_create(dd);
1705 if (j)
1706 dd_dev_err(dd, "Failed to create /dev devices: %d\n", -j);
1707
1708 if (initfail || ret) {
Michael J. Ruhl82a97922018-02-01 10:43:42 -08001709 hfi1_clean_up_interrupts(dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001710 stop_timers(dd);
1711 flush_workqueue(ib_wq);
Harish Chegondie8597eb2015-12-01 15:38:20 -05001712 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04001713 hfi1_quiet_serdes(dd->pport + pidx);
Harish Chegondie8597eb2015-12-01 15:38:20 -05001714 ppd = dd->pport + pidx;
1715 if (ppd->hfi1_wq) {
1716 destroy_workqueue(ppd->hfi1_wq);
1717 ppd->hfi1_wq = NULL;
1718 }
Sebastian Sanchez71d47002017-07-29 08:43:49 -07001719 if (ppd->link_wq) {
1720 destroy_workqueue(ppd->link_wq);
1721 ppd->link_wq = NULL;
1722 }
Harish Chegondie8597eb2015-12-01 15:38:20 -05001723 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04001724 if (!j)
1725 hfi1_device_remove(dd);
1726 if (!ret)
1727 hfi1_unregister_ib_device(dd);
Vishwanathapura, Niranjana22807402017-04-12 20:29:29 -07001728 hfi1_vnic_cleanup(dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001729 postinit_cleanup(dd);
1730 if (initfail)
1731 ret = initfail;
1732 goto bail; /* everything already cleaned */
1733 }
1734
1735 sdma_start(dd);
1736
1737 return 0;
1738
1739clean_bail:
1740 hfi1_pcie_cleanup(pdev);
1741bail:
1742 return ret;
1743}
1744
Tadeusz Strukacd7c8f2016-10-25 08:57:55 -07001745static void wait_for_clients(struct hfi1_devdata *dd)
1746{
1747 /*
1748 * Remove the device init value and complete the device if there is
1749 * no clients or wait for active clients to finish.
1750 */
1751 if (atomic_dec_and_test(&dd->user_refcount))
1752 complete(&dd->user_comp);
1753
1754 wait_for_completion(&dd->user_comp);
1755}
1756
Mike Marciniszyn77241052015-07-30 15:17:43 -04001757static void remove_one(struct pci_dev *pdev)
1758{
1759 struct hfi1_devdata *dd = pci_get_drvdata(pdev);
1760
Dean Luicked6f6532016-02-18 11:12:25 -08001761 /* close debugfs files before ib unregister */
1762 hfi1_dbg_ibdev_exit(&dd->verbs_dev);
Tadeusz Strukacd7c8f2016-10-25 08:57:55 -07001763
1764 /* remove the /dev hfi1 interface */
1765 hfi1_device_remove(dd);
1766
1767 /* wait for existing user space clients to finish */
1768 wait_for_clients(dd);
1769
Mike Marciniszyn77241052015-07-30 15:17:43 -04001770 /* unregister from IB core */
1771 hfi1_unregister_ib_device(dd);
1772
Vishwanathapura, Niranjanad4829ea2017-04-12 20:29:28 -07001773 /* cleanup vnic */
1774 hfi1_vnic_cleanup(dd);
1775
Mike Marciniszyn77241052015-07-30 15:17:43 -04001776 /*
1777 * Disable the IB link, disable interrupts on the device,
1778 * clear dma engines, etc.
1779 */
1780 shutdown_device(dd);
1781
1782 stop_timers(dd);
1783
1784 /* wait until all of our (qsfp) queue_work() calls complete */
1785 flush_workqueue(ib_wq);
1786
Mike Marciniszyn77241052015-07-30 15:17:43 -04001787 postinit_cleanup(dd);
1788}
1789
1790/**
1791 * hfi1_create_rcvhdrq - create a receive header queue
1792 * @dd: the hfi1_ib device
1793 * @rcd: the context data
1794 *
1795 * This must be contiguous memory (from an i/o perspective), and must be
1796 * DMA'able (which means for some systems, it will go through an IOMMU,
1797 * or be forced into a low address range).
1798 */
1799int hfi1_create_rcvhdrq(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
1800{
1801 unsigned amt;
1802 u64 reg;
1803
1804 if (!rcd->rcvhdrq) {
Tymoteusz Kielan60368182016-09-06 04:35:54 -07001805 dma_addr_t dma_hdrqtail;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001806 gfp_t gfp_flags;
1807
1808 /*
1809 * rcvhdrqentsize is in DWs, so we have to convert to bytes
1810 * (* sizeof(u32)).
1811 */
Amitoj Kaur Chawla84449912016-03-04 22:30:43 +05301812 amt = PAGE_ALIGN(rcd->rcvhdrq_cnt * rcd->rcvhdrqentsize *
1813 sizeof(u32));
Mike Marciniszyn77241052015-07-30 15:17:43 -04001814
Niranjana Vishwanathapuracc9a97e2017-11-06 06:38:52 -08001815 if (rcd->ctxt < dd->first_dyn_alloc_ctxt || rcd->is_vnic)
Vishwanathapura, Niranjana22807402017-04-12 20:29:29 -07001816 gfp_flags = GFP_KERNEL;
1817 else
1818 gfp_flags = GFP_USER;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001819 rcd->rcvhdrq = dma_zalloc_coherent(
Tymoteusz Kielan60368182016-09-06 04:35:54 -07001820 &dd->pcidev->dev, amt, &rcd->rcvhdrq_dma,
Mike Marciniszyn77241052015-07-30 15:17:43 -04001821 gfp_flags | __GFP_COMP);
1822
1823 if (!rcd->rcvhdrq) {
1824 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08001825 "attempt to allocate %d bytes for ctxt %u rcvhdrq failed\n",
1826 amt, rcd->ctxt);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001827 goto bail;
1828 }
1829
Mike Marciniszyn77241052015-07-30 15:17:43 -04001830 if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL)) {
1831 rcd->rcvhdrtail_kvaddr = dma_zalloc_coherent(
Tymoteusz Kielan60368182016-09-06 04:35:54 -07001832 &dd->pcidev->dev, PAGE_SIZE, &dma_hdrqtail,
Mike Marciniszyn77241052015-07-30 15:17:43 -04001833 gfp_flags);
1834 if (!rcd->rcvhdrtail_kvaddr)
1835 goto bail_free;
Tymoteusz Kielan60368182016-09-06 04:35:54 -07001836 rcd->rcvhdrqtailaddr_dma = dma_hdrqtail;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001837 }
1838
1839 rcd->rcvhdrq_size = amt;
1840 }
1841 /*
1842 * These values are per-context:
1843 * RcvHdrCnt
1844 * RcvHdrEntSize
1845 * RcvHdrSize
1846 */
1847 reg = ((u64)(rcd->rcvhdrq_cnt >> HDRQ_SIZE_SHIFT)
1848 & RCV_HDR_CNT_CNT_MASK)
1849 << RCV_HDR_CNT_CNT_SHIFT;
1850 write_kctxt_csr(dd, rcd->ctxt, RCV_HDR_CNT, reg);
1851 reg = (encode_rcv_header_entry_size(rcd->rcvhdrqentsize)
1852 & RCV_HDR_ENT_SIZE_ENT_SIZE_MASK)
1853 << RCV_HDR_ENT_SIZE_ENT_SIZE_SHIFT;
1854 write_kctxt_csr(dd, rcd->ctxt, RCV_HDR_ENT_SIZE, reg);
1855 reg = (dd->rcvhdrsize & RCV_HDR_SIZE_HDR_SIZE_MASK)
1856 << RCV_HDR_SIZE_HDR_SIZE_SHIFT;
1857 write_kctxt_csr(dd, rcd->ctxt, RCV_HDR_SIZE, reg);
Mark F. Brown46b010d2015-11-09 19:18:20 -05001858
1859 /*
1860 * Program dummy tail address for every receive context
1861 * before enabling any receive context
1862 */
1863 write_kctxt_csr(dd, rcd->ctxt, RCV_HDR_TAIL_ADDR,
Tymoteusz Kielan60368182016-09-06 04:35:54 -07001864 dd->rcvhdrtail_dummy_dma);
Mark F. Brown46b010d2015-11-09 19:18:20 -05001865
Mike Marciniszyn77241052015-07-30 15:17:43 -04001866 return 0;
1867
1868bail_free:
1869 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08001870 "attempt to allocate 1 page for ctxt %u rcvhdrqtailaddr failed\n",
1871 rcd->ctxt);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001872 dma_free_coherent(&dd->pcidev->dev, amt, rcd->rcvhdrq,
Tymoteusz Kielan60368182016-09-06 04:35:54 -07001873 rcd->rcvhdrq_dma);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001874 rcd->rcvhdrq = NULL;
1875bail:
1876 return -ENOMEM;
1877}
1878
1879/**
1880 * allocate eager buffers, both kernel and user contexts.
1881 * @rcd: the context we are setting up.
1882 *
1883 * Allocate the eager TID buffers and program them into hip.
1884 * They are no longer completely contiguous, we do multiple allocation
1885 * calls. Otherwise we get the OOM code involved, by asking for too
1886 * much per call, with disastrous results on some kernels.
1887 */
1888int hfi1_setup_eagerbufs(struct hfi1_ctxtdata *rcd)
1889{
1890 struct hfi1_devdata *dd = rcd->dd;
1891 u32 max_entries, egrtop, alloced_bytes = 0, idx = 0;
1892 gfp_t gfp_flags;
1893 u16 order;
1894 int ret = 0;
1895 u16 round_mtu = roundup_pow_of_two(hfi1_max_mtu);
1896
1897 /*
1898 * GFP_USER, but without GFP_FS, so buffer cache can be
1899 * coalesced (we hope); otherwise, even at order 4,
1900 * heavy filesystem activity makes these fail, and we can
1901 * use compound pages.
1902 */
Mel Gorman71baba42015-11-06 16:28:28 -08001903 gfp_flags = __GFP_RECLAIM | __GFP_IO | __GFP_COMP;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001904
1905 /*
1906 * The minimum size of the eager buffers is a groups of MTU-sized
1907 * buffers.
1908 * The global eager_buffer_size parameter is checked against the
1909 * theoretical lower limit of the value. Here, we check against the
1910 * MTU.
1911 */
1912 if (rcd->egrbufs.size < (round_mtu * dd->rcv_entries.group_size))
1913 rcd->egrbufs.size = round_mtu * dd->rcv_entries.group_size;
1914 /*
1915 * If using one-pkt-per-egr-buffer, lower the eager buffer
1916 * size to the max MTU (page-aligned).
1917 */
1918 if (!HFI1_CAP_KGET_MASK(rcd->flags, MULTI_PKT_EGR))
1919 rcd->egrbufs.rcvtid_size = round_mtu;
1920
1921 /*
1922 * Eager buffers sizes of 1MB or less require smaller TID sizes
1923 * to satisfy the "multiple of 8 RcvArray entries" requirement.
1924 */
1925 if (rcd->egrbufs.size <= (1 << 20))
1926 rcd->egrbufs.rcvtid_size = max((unsigned long)round_mtu,
1927 rounddown_pow_of_two(rcd->egrbufs.size / 8));
1928
1929 while (alloced_bytes < rcd->egrbufs.size &&
1930 rcd->egrbufs.alloced < rcd->egrbufs.count) {
1931 rcd->egrbufs.buffers[idx].addr =
1932 dma_zalloc_coherent(&dd->pcidev->dev,
1933 rcd->egrbufs.rcvtid_size,
Tymoteusz Kielan60368182016-09-06 04:35:54 -07001934 &rcd->egrbufs.buffers[idx].dma,
Mike Marciniszyn77241052015-07-30 15:17:43 -04001935 gfp_flags);
1936 if (rcd->egrbufs.buffers[idx].addr) {
1937 rcd->egrbufs.buffers[idx].len =
1938 rcd->egrbufs.rcvtid_size;
1939 rcd->egrbufs.rcvtids[rcd->egrbufs.alloced].addr =
1940 rcd->egrbufs.buffers[idx].addr;
Tymoteusz Kielan60368182016-09-06 04:35:54 -07001941 rcd->egrbufs.rcvtids[rcd->egrbufs.alloced].dma =
1942 rcd->egrbufs.buffers[idx].dma;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001943 rcd->egrbufs.alloced++;
1944 alloced_bytes += rcd->egrbufs.rcvtid_size;
1945 idx++;
1946 } else {
1947 u32 new_size, i, j;
1948 u64 offset = 0;
1949
1950 /*
1951 * Fail the eager buffer allocation if:
1952 * - we are already using the lowest acceptable size
1953 * - we are using one-pkt-per-egr-buffer (this implies
1954 * that we are accepting only one size)
1955 */
1956 if (rcd->egrbufs.rcvtid_size == round_mtu ||
1957 !HFI1_CAP_KGET_MASK(rcd->flags, MULTI_PKT_EGR)) {
1958 dd_dev_err(dd, "ctxt%u: Failed to allocate eager buffers\n",
Jubin John17fb4f22016-02-14 20:21:52 -08001959 rcd->ctxt);
Michael J. Ruhl94679062017-05-04 05:14:28 -07001960 ret = -ENOMEM;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001961 goto bail_rcvegrbuf_phys;
1962 }
1963
1964 new_size = rcd->egrbufs.rcvtid_size / 2;
1965
1966 /*
1967 * If the first attempt to allocate memory failed, don't
1968 * fail everything but continue with the next lower
1969 * size.
1970 */
1971 if (idx == 0) {
1972 rcd->egrbufs.rcvtid_size = new_size;
1973 continue;
1974 }
1975
1976 /*
1977 * Re-partition already allocated buffers to a smaller
1978 * size.
1979 */
1980 rcd->egrbufs.alloced = 0;
1981 for (i = 0, j = 0, offset = 0; j < idx; i++) {
1982 if (i >= rcd->egrbufs.count)
1983 break;
Tymoteusz Kielan60368182016-09-06 04:35:54 -07001984 rcd->egrbufs.rcvtids[i].dma =
1985 rcd->egrbufs.buffers[j].dma + offset;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001986 rcd->egrbufs.rcvtids[i].addr =
1987 rcd->egrbufs.buffers[j].addr + offset;
1988 rcd->egrbufs.alloced++;
Tymoteusz Kielan60368182016-09-06 04:35:54 -07001989 if ((rcd->egrbufs.buffers[j].dma + offset +
Mike Marciniszyn77241052015-07-30 15:17:43 -04001990 new_size) ==
Tymoteusz Kielan60368182016-09-06 04:35:54 -07001991 (rcd->egrbufs.buffers[j].dma +
Mike Marciniszyn77241052015-07-30 15:17:43 -04001992 rcd->egrbufs.buffers[j].len)) {
1993 j++;
1994 offset = 0;
Jubin Johne4909742016-02-14 20:22:00 -08001995 } else {
Mike Marciniszyn77241052015-07-30 15:17:43 -04001996 offset += new_size;
Jubin Johne4909742016-02-14 20:22:00 -08001997 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04001998 }
1999 rcd->egrbufs.rcvtid_size = new_size;
2000 }
2001 }
2002 rcd->egrbufs.numbufs = idx;
2003 rcd->egrbufs.size = alloced_bytes;
2004
Sebastian Sanchez6c63e422015-11-06 20:06:56 -05002005 hfi1_cdbg(PROC,
2006 "ctxt%u: Alloced %u rcv tid entries @ %uKB, total %zuKB\n",
Grzegorz Heldt23002d52016-07-25 13:39:33 -07002007 rcd->ctxt, rcd->egrbufs.alloced,
2008 rcd->egrbufs.rcvtid_size / 1024, rcd->egrbufs.size / 1024);
Sebastian Sanchez6c63e422015-11-06 20:06:56 -05002009
Mike Marciniszyn77241052015-07-30 15:17:43 -04002010 /*
2011 * Set the contexts rcv array head update threshold to the closest
2012 * power of 2 (so we can use a mask instead of modulo) below half
2013 * the allocated entries.
2014 */
2015 rcd->egrbufs.threshold =
2016 rounddown_pow_of_two(rcd->egrbufs.alloced / 2);
2017 /*
2018 * Compute the expected RcvArray entry base. This is done after
2019 * allocating the eager buffers in order to maximize the
2020 * expected RcvArray entries for the context.
2021 */
2022 max_entries = rcd->rcv_array_groups * dd->rcv_entries.group_size;
2023 egrtop = roundup(rcd->egrbufs.alloced, dd->rcv_entries.group_size);
2024 rcd->expected_count = max_entries - egrtop;
2025 if (rcd->expected_count > MAX_TID_PAIR_ENTRIES * 2)
2026 rcd->expected_count = MAX_TID_PAIR_ENTRIES * 2;
2027
2028 rcd->expected_base = rcd->eager_base + egrtop;
Sebastian Sanchez6c63e422015-11-06 20:06:56 -05002029 hfi1_cdbg(PROC, "ctxt%u: eager:%u, exp:%u, egrbase:%u, expbase:%u\n",
2030 rcd->ctxt, rcd->egrbufs.alloced, rcd->expected_count,
2031 rcd->eager_base, rcd->expected_base);
Mike Marciniszyn77241052015-07-30 15:17:43 -04002032
2033 if (!hfi1_rcvbuf_validate(rcd->egrbufs.rcvtid_size, PT_EAGER, &order)) {
Sebastian Sanchez6c63e422015-11-06 20:06:56 -05002034 hfi1_cdbg(PROC,
2035 "ctxt%u: current Eager buffer size is invalid %u\n",
2036 rcd->ctxt, rcd->egrbufs.rcvtid_size);
Mike Marciniszyn77241052015-07-30 15:17:43 -04002037 ret = -EINVAL;
Michael J. Ruhl62239fc2017-05-04 05:15:21 -07002038 goto bail_rcvegrbuf_phys;
Mike Marciniszyn77241052015-07-30 15:17:43 -04002039 }
2040
2041 for (idx = 0; idx < rcd->egrbufs.alloced; idx++) {
2042 hfi1_put_tid(dd, rcd->eager_base + idx, PT_EAGER,
Tymoteusz Kielan60368182016-09-06 04:35:54 -07002043 rcd->egrbufs.rcvtids[idx].dma, order);
Mike Marciniszyn77241052015-07-30 15:17:43 -04002044 cond_resched();
2045 }
Michael J. Ruhl62239fc2017-05-04 05:15:21 -07002046
2047 return 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04002048
2049bail_rcvegrbuf_phys:
2050 for (idx = 0; idx < rcd->egrbufs.alloced &&
Jubin John17fb4f22016-02-14 20:21:52 -08002051 rcd->egrbufs.buffers[idx].addr;
Mike Marciniszyn77241052015-07-30 15:17:43 -04002052 idx++) {
2053 dma_free_coherent(&dd->pcidev->dev,
2054 rcd->egrbufs.buffers[idx].len,
2055 rcd->egrbufs.buffers[idx].addr,
Tymoteusz Kielan60368182016-09-06 04:35:54 -07002056 rcd->egrbufs.buffers[idx].dma);
Mike Marciniszyn77241052015-07-30 15:17:43 -04002057 rcd->egrbufs.buffers[idx].addr = NULL;
Tymoteusz Kielan60368182016-09-06 04:35:54 -07002058 rcd->egrbufs.buffers[idx].dma = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04002059 rcd->egrbufs.buffers[idx].len = 0;
2060 }
Michael J. Ruhl62239fc2017-05-04 05:15:21 -07002061
Mike Marciniszyn77241052015-07-30 15:17:43 -04002062 return ret;
2063}