blob: c81c44525dd514e200d57332201818e502c6fb47 [file] [log] [blame]
Mike Marciniszyn77241052015-07-30 15:17:43 -04001/*
Jubin John05d6ac12016-02-14 20:22:17 -08002 * Copyright(c) 2015, 2016 Intel Corporation.
Mike Marciniszyn77241052015-07-30 15:17:43 -04003 *
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
6 *
7 * GPL LICENSE SUMMARY
8 *
Mike Marciniszyn77241052015-07-30 15:17:43 -04009 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * BSD LICENSE
19 *
Mike Marciniszyn77241052015-07-30 15:17:43 -040020 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
22 * are met:
23 *
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
29 * distribution.
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
33 *
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45 *
46 */
47
48#include <linux/pci.h>
49#include <linux/netdevice.h>
50#include <linux/vmalloc.h>
51#include <linux/delay.h>
52#include <linux/idr.h>
53#include <linux/module.h>
54#include <linux/printk.h>
55#include <linux/hrtimer.h>
Dennis Dalessandroec3f2c12016-01-19 14:41:33 -080056#include <rdma/rdma_vt.h>
Mike Marciniszyn77241052015-07-30 15:17:43 -040057
58#include "hfi.h"
59#include "device.h"
60#include "common.h"
Sebastian Sanchez6c63e422015-11-06 20:06:56 -050061#include "trace.h"
Mike Marciniszyn77241052015-07-30 15:17:43 -040062#include "mad.h"
63#include "sdma.h"
64#include "debugfs.h"
65#include "verbs.h"
Ashutosh Dixitaffa48d2016-02-03 14:33:06 -080066#include "aspm.h"
Dennis Dalessandro41973442016-07-25 07:52:36 -070067#include "affinity.h"
Mike Marciniszyn77241052015-07-30 15:17:43 -040068
69#undef pr_fmt
70#define pr_fmt(fmt) DRIVER_NAME ": " fmt
71
72/*
73 * min buffers we want to have per context, after driver
74 */
75#define HFI1_MIN_USER_CTXT_BUFCNT 7
76
77#define HFI1_MIN_HDRQ_EGRBUF_CNT 2
Sebastian Sancheze002dcc2016-02-03 14:34:32 -080078#define HFI1_MAX_HDRQ_EGRBUF_CNT 16352
Mike Marciniszyn77241052015-07-30 15:17:43 -040079#define HFI1_MIN_EAGER_BUFFER_SIZE (4 * 1024) /* 4KB */
80#define HFI1_MAX_EAGER_BUFFER_SIZE (256 * 1024) /* 256KB */
81
82/*
83 * Number of user receive contexts we are configured to use (to allow for more
84 * pio buffers per ctxt, etc.) Zero means use one user context per CPU.
85 */
Sebastian Sanchez2ce6bf22015-12-11 08:44:48 -050086int num_user_contexts = -1;
87module_param_named(num_user_contexts, num_user_contexts, uint, S_IRUGO);
Mike Marciniszyn77241052015-07-30 15:17:43 -040088MODULE_PARM_DESC(
Sebastian Sanchez2ce6bf22015-12-11 08:44:48 -050089 num_user_contexts, "Set max number of user contexts to use");
Mike Marciniszyn77241052015-07-30 15:17:43 -040090
Mark F. Brown5b55ea32016-01-11 18:30:54 -050091uint krcvqs[RXE_NUM_DATA_VL];
Mike Marciniszyn77241052015-07-30 15:17:43 -040092int krcvqsset;
Mark F. Brown5b55ea32016-01-11 18:30:54 -050093module_param_array(krcvqs, uint, &krcvqsset, S_IRUGO);
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -050094MODULE_PARM_DESC(krcvqs, "Array of the number of non-control kernel receive queues by VL");
Mike Marciniszyn77241052015-07-30 15:17:43 -040095
96/* computed based on above array */
Harish Chegondi429b6a72016-08-31 07:24:40 -070097unsigned long n_krcvqs;
Mike Marciniszyn77241052015-07-30 15:17:43 -040098
99static unsigned hfi1_rcvarr_split = 25;
100module_param_named(rcvarr_split, hfi1_rcvarr_split, uint, S_IRUGO);
101MODULE_PARM_DESC(rcvarr_split, "Percent of context's RcvArray entries used for Eager buffers");
102
103static uint eager_buffer_size = (2 << 20); /* 2MB */
104module_param(eager_buffer_size, uint, S_IRUGO);
105MODULE_PARM_DESC(eager_buffer_size, "Size of the eager buffers, default: 2MB");
106
107static uint rcvhdrcnt = 2048; /* 2x the max eager buffer count */
108module_param_named(rcvhdrcnt, rcvhdrcnt, uint, S_IRUGO);
109MODULE_PARM_DESC(rcvhdrcnt, "Receive header queue count (default 2048)");
110
111static uint hfi1_hdrq_entsize = 32;
112module_param_named(hdrq_entsize, hfi1_hdrq_entsize, uint, S_IRUGO);
113MODULE_PARM_DESC(hdrq_entsize, "Size of header queue entries: 2 - 8B, 16 - 64B (default), 32 - 128B");
114
115unsigned int user_credit_return_threshold = 33; /* default is 33% */
116module_param(user_credit_return_threshold, uint, S_IRUGO);
Jubin Johnecb95a02015-12-17 19:24:14 -0500117MODULE_PARM_DESC(user_credit_return_threshold, "Credit return threshold for user send contexts, return when unreturned credits passes this many blocks (in percent of allocated blocks, 0 is off)");
Mike Marciniszyn77241052015-07-30 15:17:43 -0400118
119static inline u64 encode_rcv_header_entry_size(u16);
120
121static struct idr hfi1_unit_table;
122u32 hfi1_cpulist_count;
123unsigned long *hfi1_cpulist;
124
125/*
126 * Common code for creating the receive context array.
127 */
128int hfi1_create_ctxts(struct hfi1_devdata *dd)
129{
130 unsigned i;
131 int ret;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400132
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -0500133 /* Control context has to be always 0 */
134 BUILD_BUG_ON(HFI1_CTRL_CTXT != 0);
135
Mitko Haralanov377f1112016-02-03 14:33:58 -0800136 dd->rcd = kzalloc_node(dd->num_rcv_contexts * sizeof(*dd->rcd),
137 GFP_KERNEL, dd->node);
Alison Schofield806e6e12015-10-12 14:28:36 -0700138 if (!dd->rcd)
Mike Marciniszyn77241052015-07-30 15:17:43 -0400139 goto nomem;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400140
141 /* create one or more kernel contexts */
142 for (i = 0; i < dd->first_user_ctxt; ++i) {
143 struct hfi1_pportdata *ppd;
144 struct hfi1_ctxtdata *rcd;
145
146 ppd = dd->pport + (i % dd->num_pports);
Jianxin Xiong4dfe7cc2016-10-17 04:19:41 -0700147
148 /* dd->rcd[i] gets assigned inside the callee */
Mitko Haralanov957558c2016-02-03 14:33:40 -0800149 rcd = hfi1_create_ctxtdata(ppd, i, dd->node);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400150 if (!rcd) {
151 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -0800152 "Unable to allocate kernel receive context, failing\n");
Mike Marciniszyn77241052015-07-30 15:17:43 -0400153 goto nomem;
154 }
155 /*
156 * Set up the kernel context flags here and now because they
157 * use default values for all receive side memories. User
158 * contexts will be handled as they are created.
159 */
160 rcd->flags = HFI1_CAP_KGET(MULTI_PKT_EGR) |
161 HFI1_CAP_KGET(NODROP_RHQ_FULL) |
162 HFI1_CAP_KGET(NODROP_EGR_FULL) |
163 HFI1_CAP_KGET(DMA_RTAIL);
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -0500164
165 /* Control context must use DMA_RTAIL */
166 if (rcd->ctxt == HFI1_CTRL_CTXT)
167 rcd->flags |= HFI1_CAP_DMA_RTAIL;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400168 rcd->seq_cnt = 1;
169
170 rcd->sc = sc_alloc(dd, SC_ACK, rcd->rcvhdrqentsize, dd->node);
171 if (!rcd->sc) {
172 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -0800173 "Unable to allocate kernel send context, failing\n");
Mike Marciniszyn77241052015-07-30 15:17:43 -0400174 goto nomem;
175 }
176
177 ret = hfi1_init_ctxt(rcd->sc);
178 if (ret < 0) {
179 dd_dev_err(dd,
180 "Failed to setup kernel receive context, failing\n");
Mike Marciniszyn77241052015-07-30 15:17:43 -0400181 ret = -EFAULT;
182 goto bail;
183 }
184 }
185
Ashutosh Dixitaffa48d2016-02-03 14:33:06 -0800186 /*
187 * Initialize aspm, to be done after gen3 transition and setting up
188 * contexts and before enabling interrupts
189 */
190 aspm_init(dd);
191
Mike Marciniszyn77241052015-07-30 15:17:43 -0400192 return 0;
193nomem:
194 ret = -ENOMEM;
195bail:
Jianxin Xiong4dfe7cc2016-10-17 04:19:41 -0700196 if (dd->rcd) {
197 for (i = 0; i < dd->num_rcv_contexts; ++i)
198 hfi1_free_ctxtdata(dd, dd->rcd[i]);
199 }
Mike Marciniszyn77241052015-07-30 15:17:43 -0400200 kfree(dd->rcd);
201 dd->rcd = NULL;
202 return ret;
203}
204
205/*
206 * Common code for user and kernel context setup.
207 */
Mitko Haralanov957558c2016-02-03 14:33:40 -0800208struct hfi1_ctxtdata *hfi1_create_ctxtdata(struct hfi1_pportdata *ppd, u32 ctxt,
209 int numa)
Mike Marciniszyn77241052015-07-30 15:17:43 -0400210{
211 struct hfi1_devdata *dd = ppd->dd;
212 struct hfi1_ctxtdata *rcd;
213 unsigned kctxt_ngroups = 0;
214 u32 base;
215
216 if (dd->rcv_entries.nctxt_extra >
217 dd->num_rcv_contexts - dd->first_user_ctxt)
218 kctxt_ngroups = (dd->rcv_entries.nctxt_extra -
219 (dd->num_rcv_contexts - dd->first_user_ctxt));
Jianxin Xiong4dfe7cc2016-10-17 04:19:41 -0700220 rcd = kzalloc_node(sizeof(*rcd), GFP_KERNEL, numa);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400221 if (rcd) {
222 u32 rcvtids, max_entries;
223
Sebastian Sanchez6c63e422015-11-06 20:06:56 -0500224 hfi1_cdbg(PROC, "setting up context %u\n", ctxt);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400225
226 INIT_LIST_HEAD(&rcd->qp_wait_list);
227 rcd->ppd = ppd;
228 rcd->dd = dd;
229 rcd->cnt = 1;
230 rcd->ctxt = ctxt;
231 dd->rcd[ctxt] = rcd;
Mitko Haralanov957558c2016-02-03 14:33:40 -0800232 rcd->numa_id = numa;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400233 rcd->rcv_array_groups = dd->rcv_entries.ngroups;
234
Mitko Haralanov463e6eb2016-02-05 11:57:53 -0500235 mutex_init(&rcd->exp_lock);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400236
237 /*
238 * Calculate the context's RcvArray entry starting point.
239 * We do this here because we have to take into account all
240 * the RcvArray entries that previous context would have
241 * taken and we have to account for any extra groups
242 * assigned to the kernel or user contexts.
243 */
244 if (ctxt < dd->first_user_ctxt) {
245 if (ctxt < kctxt_ngroups) {
246 base = ctxt * (dd->rcv_entries.ngroups + 1);
247 rcd->rcv_array_groups++;
248 } else
249 base = kctxt_ngroups +
250 (ctxt * dd->rcv_entries.ngroups);
251 } else {
252 u16 ct = ctxt - dd->first_user_ctxt;
253
254 base = ((dd->n_krcv_queues * dd->rcv_entries.ngroups) +
255 kctxt_ngroups);
256 if (ct < dd->rcv_entries.nctxt_extra) {
257 base += ct * (dd->rcv_entries.ngroups + 1);
258 rcd->rcv_array_groups++;
259 } else
260 base += dd->rcv_entries.nctxt_extra +
261 (ct * dd->rcv_entries.ngroups);
262 }
263 rcd->eager_base = base * dd->rcv_entries.group_size;
264
Mike Marciniszyn77241052015-07-30 15:17:43 -0400265 rcd->rcvhdrq_cnt = rcvhdrcnt;
266 rcd->rcvhdrqentsize = hfi1_hdrq_entsize;
267 /*
268 * Simple Eager buffer allocation: we have already pre-allocated
269 * the number of RcvArray entry groups. Each ctxtdata structure
270 * holds the number of groups for that context.
271 *
272 * To follow CSR requirements and maintain cacheline alignment,
273 * make sure all sizes and bases are multiples of group_size.
274 *
275 * The expected entry count is what is left after assigning
276 * eager.
277 */
278 max_entries = rcd->rcv_array_groups *
279 dd->rcv_entries.group_size;
280 rcvtids = ((max_entries * hfi1_rcvarr_split) / 100);
281 rcd->egrbufs.count = round_down(rcvtids,
282 dd->rcv_entries.group_size);
283 if (rcd->egrbufs.count > MAX_EAGER_ENTRIES) {
284 dd_dev_err(dd, "ctxt%u: requested too many RcvArray entries.\n",
285 rcd->ctxt);
286 rcd->egrbufs.count = MAX_EAGER_ENTRIES;
287 }
Sebastian Sanchez6c63e422015-11-06 20:06:56 -0500288 hfi1_cdbg(PROC,
289 "ctxt%u: max Eager buffer RcvArray entries: %u\n",
290 rcd->ctxt, rcd->egrbufs.count);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400291
292 /*
293 * Allocate array that will hold the eager buffer accounting
294 * data.
295 * This will allocate the maximum possible buffer count based
296 * on the value of the RcvArray split parameter.
297 * The resulting value will be rounded down to the closest
298 * multiple of dd->rcv_entries.group_size.
299 */
Sebastian Sancheze0fcd1e2017-02-08 05:26:37 -0800300 rcd->egrbufs.buffers = kzalloc_node(
301 rcd->egrbufs.count * sizeof(*rcd->egrbufs.buffers),
302 GFP_KERNEL, numa);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400303 if (!rcd->egrbufs.buffers)
304 goto bail;
Sebastian Sancheze0fcd1e2017-02-08 05:26:37 -0800305 rcd->egrbufs.rcvtids = kzalloc_node(
306 rcd->egrbufs.count *
307 sizeof(*rcd->egrbufs.rcvtids),
308 GFP_KERNEL, numa);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400309 if (!rcd->egrbufs.rcvtids)
310 goto bail;
311 rcd->egrbufs.size = eager_buffer_size;
312 /*
313 * The size of the buffers programmed into the RcvArray
314 * entries needs to be big enough to handle the highest
315 * MTU supported.
316 */
317 if (rcd->egrbufs.size < hfi1_max_mtu) {
318 rcd->egrbufs.size = __roundup_pow_of_two(hfi1_max_mtu);
Sebastian Sanchez6c63e422015-11-06 20:06:56 -0500319 hfi1_cdbg(PROC,
320 "ctxt%u: eager bufs size too small. Adjusting to %zu\n",
Mike Marciniszyn77241052015-07-30 15:17:43 -0400321 rcd->ctxt, rcd->egrbufs.size);
322 }
323 rcd->egrbufs.rcvtid_size = HFI1_MAX_EAGER_BUFFER_SIZE;
324
325 if (ctxt < dd->first_user_ctxt) { /* N/A for PSM contexts */
Sebastian Sancheze0fcd1e2017-02-08 05:26:37 -0800326 rcd->opstats = kzalloc_node(sizeof(*rcd->opstats),
327 GFP_KERNEL, numa);
Alison Schofield806e6e12015-10-12 14:28:36 -0700328 if (!rcd->opstats)
Mike Marciniszyn77241052015-07-30 15:17:43 -0400329 goto bail;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400330 }
331 }
332 return rcd;
333bail:
Jakub Pawlak3a6982d2016-09-25 07:42:23 -0700334 dd->rcd[ctxt] = NULL;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400335 kfree(rcd->egrbufs.rcvtids);
336 kfree(rcd->egrbufs.buffers);
337 kfree(rcd);
338 return NULL;
339}
340
341/*
342 * Convert a receive header entry size that to the encoding used in the CSR.
343 *
344 * Return a zero if the given size is invalid.
345 */
346static inline u64 encode_rcv_header_entry_size(u16 size)
347{
348 /* there are only 3 valid receive header entry sizes */
349 if (size == 2)
350 return 1;
351 if (size == 16)
352 return 2;
353 else if (size == 32)
354 return 4;
355 return 0; /* invalid */
356}
357
358/*
359 * Select the largest ccti value over all SLs to determine the intra-
360 * packet gap for the link.
361 *
362 * called with cca_timer_lock held (to protect access to cca_timer
363 * array), and rcu_read_lock() (to protect access to cc_state).
364 */
365void set_link_ipg(struct hfi1_pportdata *ppd)
366{
367 struct hfi1_devdata *dd = ppd->dd;
368 struct cc_state *cc_state;
369 int i;
370 u16 cce, ccti_limit, max_ccti = 0;
371 u16 shift, mult;
372 u64 src;
373 u32 current_egress_rate; /* Mbits /sec */
374 u32 max_pkt_time;
375 /*
376 * max_pkt_time is the maximum packet egress time in units
377 * of the fabric clock period 1/(805 MHz).
378 */
379
380 cc_state = get_cc_state(ppd);
381
Jubin Johnd125a6c2016-02-14 20:19:49 -0800382 if (!cc_state)
Mike Marciniszyn77241052015-07-30 15:17:43 -0400383 /*
384 * This should _never_ happen - rcu_read_lock() is held,
385 * and set_link_ipg() should not be called if cc_state
386 * is NULL.
387 */
388 return;
389
390 for (i = 0; i < OPA_MAX_SLS; i++) {
391 u16 ccti = ppd->cca_timer[i].ccti;
392
393 if (ccti > max_ccti)
394 max_ccti = ccti;
395 }
396
397 ccti_limit = cc_state->cct.ccti_limit;
398 if (max_ccti > ccti_limit)
399 max_ccti = ccti_limit;
400
401 cce = cc_state->cct.entries[max_ccti].entry;
402 shift = (cce & 0xc000) >> 14;
403 mult = (cce & 0x3fff);
404
405 current_egress_rate = active_egress_rate(ppd);
406
407 max_pkt_time = egress_cycles(ppd->ibmaxlen, current_egress_rate);
408
409 src = (max_pkt_time >> shift) * mult;
410
411 src &= SEND_STATIC_RATE_CONTROL_CSR_SRC_RELOAD_SMASK;
412 src <<= SEND_STATIC_RATE_CONTROL_CSR_SRC_RELOAD_SHIFT;
413
414 write_csr(dd, SEND_STATIC_RATE_CONTROL, src);
415}
416
417static enum hrtimer_restart cca_timer_fn(struct hrtimer *t)
418{
419 struct cca_timer *cca_timer;
420 struct hfi1_pportdata *ppd;
421 int sl;
Jubin Johnd35cf742016-04-14 08:31:53 -0700422 u16 ccti_timer, ccti_min;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400423 struct cc_state *cc_state;
Dean Luickb77d7132015-10-26 10:28:43 -0400424 unsigned long flags;
Jubin Johnd35cf742016-04-14 08:31:53 -0700425 enum hrtimer_restart ret = HRTIMER_NORESTART;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400426
427 cca_timer = container_of(t, struct cca_timer, hrtimer);
428 ppd = cca_timer->ppd;
429 sl = cca_timer->sl;
430
431 rcu_read_lock();
432
433 cc_state = get_cc_state(ppd);
434
Jubin Johnd125a6c2016-02-14 20:19:49 -0800435 if (!cc_state) {
Mike Marciniszyn77241052015-07-30 15:17:43 -0400436 rcu_read_unlock();
437 return HRTIMER_NORESTART;
438 }
439
440 /*
441 * 1) decrement ccti for SL
442 * 2) calculate IPG for link (set_link_ipg())
443 * 3) restart timer, unless ccti is at min value
444 */
445
446 ccti_min = cc_state->cong_setting.entries[sl].ccti_min;
447 ccti_timer = cc_state->cong_setting.entries[sl].ccti_timer;
448
Dean Luickb77d7132015-10-26 10:28:43 -0400449 spin_lock_irqsave(&ppd->cca_timer_lock, flags);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400450
Jubin Johnd35cf742016-04-14 08:31:53 -0700451 if (cca_timer->ccti > ccti_min) {
Mike Marciniszyn77241052015-07-30 15:17:43 -0400452 cca_timer->ccti--;
453 set_link_ipg(ppd);
454 }
455
Jubin Johnd35cf742016-04-14 08:31:53 -0700456 if (cca_timer->ccti > ccti_min) {
Mike Marciniszyn77241052015-07-30 15:17:43 -0400457 unsigned long nsec = 1024 * ccti_timer;
458 /* ccti_timer is in units of 1.024 usec */
459 hrtimer_forward_now(t, ns_to_ktime(nsec));
Jubin Johnd35cf742016-04-14 08:31:53 -0700460 ret = HRTIMER_RESTART;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400461 }
Jubin Johnd35cf742016-04-14 08:31:53 -0700462
463 spin_unlock_irqrestore(&ppd->cca_timer_lock, flags);
464 rcu_read_unlock();
465 return ret;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400466}
467
468/*
469 * Common code for initializing the physical port structure.
470 */
471void hfi1_init_pportdata(struct pci_dev *pdev, struct hfi1_pportdata *ppd,
472 struct hfi1_devdata *dd, u8 hw_pidx, u8 port)
473{
Jianxin Xiong8adf71f2016-07-25 13:39:14 -0700474 int i;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400475 uint default_pkey_idx;
Jianxin Xiong8adf71f2016-07-25 13:39:14 -0700476 struct cc_state *cc_state;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400477
478 ppd->dd = dd;
479 ppd->hw_pidx = hw_pidx;
480 ppd->port = port; /* IB port number, not index */
481
482 default_pkey_idx = 1;
483
484 ppd->pkeys[default_pkey_idx] = DEFAULT_P_KEY;
485 if (loopback) {
486 hfi1_early_err(&pdev->dev,
487 "Faking data partition 0x8001 in idx %u\n",
488 !default_pkey_idx);
489 ppd->pkeys[!default_pkey_idx] = 0x8001;
490 }
491
492 INIT_WORK(&ppd->link_vc_work, handle_verify_cap);
493 INIT_WORK(&ppd->link_up_work, handle_link_up);
494 INIT_WORK(&ppd->link_down_work, handle_link_down);
495 INIT_WORK(&ppd->freeze_work, handle_freeze);
496 INIT_WORK(&ppd->link_downgrade_work, handle_link_downgrade);
497 INIT_WORK(&ppd->sma_message_work, handle_sma_message);
498 INIT_WORK(&ppd->link_bounce_work, handle_link_bounce);
Dean Luick673b9752016-08-31 07:24:33 -0700499 INIT_DELAYED_WORK(&ppd->start_link_work, handle_start_link);
Jim Snowfb9036d2016-01-11 18:32:21 -0500500 INIT_WORK(&ppd->linkstate_active_work, receive_interrupt_work);
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -0800501 INIT_WORK(&ppd->qsfp_info.qsfp_work, qsfp_event);
502
Mike Marciniszyn77241052015-07-30 15:17:43 -0400503 mutex_init(&ppd->hls_lock);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400504 spin_lock_init(&ppd->qsfp_info.qsfp_lock);
505
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -0800506 ppd->qsfp_info.ppd = ppd;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400507 ppd->sm_trap_qp = 0x0;
508 ppd->sa_qp = 0x1;
509
510 ppd->hfi1_wq = NULL;
511
512 spin_lock_init(&ppd->cca_timer_lock);
513
514 for (i = 0; i < OPA_MAX_SLS; i++) {
515 hrtimer_init(&ppd->cca_timer[i].hrtimer, CLOCK_MONOTONIC,
516 HRTIMER_MODE_REL);
517 ppd->cca_timer[i].ppd = ppd;
518 ppd->cca_timer[i].sl = i;
519 ppd->cca_timer[i].ccti = 0;
520 ppd->cca_timer[i].hrtimer.function = cca_timer_fn;
521 }
522
523 ppd->cc_max_table_entries = IB_CC_TABLE_CAP_DEFAULT;
524
525 spin_lock_init(&ppd->cc_state_lock);
526 spin_lock_init(&ppd->cc_log_lock);
Jianxin Xiong8adf71f2016-07-25 13:39:14 -0700527 cc_state = kzalloc(sizeof(*cc_state), GFP_KERNEL);
528 RCU_INIT_POINTER(ppd->cc_state, cc_state);
529 if (!cc_state)
Mike Marciniszyn77241052015-07-30 15:17:43 -0400530 goto bail;
531 return;
532
533bail:
534
535 hfi1_early_err(&pdev->dev,
536 "Congestion Control Agent disabled for port %d\n", port);
537}
538
539/*
540 * Do initialization for device that is only needed on
541 * first detect, not on resets.
542 */
543static int loadtime_init(struct hfi1_devdata *dd)
544{
545 return 0;
546}
547
548/**
549 * init_after_reset - re-initialize after a reset
550 * @dd: the hfi1_ib device
551 *
552 * sanity check at least some of the values after reset, and
553 * ensure no receive or transmit (explicitly, in case reset
554 * failed
555 */
556static int init_after_reset(struct hfi1_devdata *dd)
557{
558 int i;
559
560 /*
561 * Ensure chip does no sends or receives, tail updates, or
562 * pioavail updates while we re-initialize. This is mostly
563 * for the driver data structures, not chip registers.
564 */
565 for (i = 0; i < dd->num_rcv_contexts; i++)
566 hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_DIS |
567 HFI1_RCVCTRL_INTRAVAIL_DIS |
568 HFI1_RCVCTRL_TAILUPD_DIS, i);
569 pio_send_control(dd, PSC_GLOBAL_DISABLE);
570 for (i = 0; i < dd->num_send_contexts; i++)
571 sc_disable(dd->send_contexts[i].sc);
572
573 return 0;
574}
575
576static void enable_chip(struct hfi1_devdata *dd)
577{
578 u32 rcvmask;
579 u32 i;
580
581 /* enable PIO send */
582 pio_send_control(dd, PSC_GLOBAL_ENABLE);
583
584 /*
585 * Enable kernel ctxts' receive and receive interrupt.
586 * Other ctxts done as user opens and initializes them.
587 */
Mike Marciniszyn77241052015-07-30 15:17:43 -0400588 for (i = 0; i < dd->first_user_ctxt; ++i) {
Mitko Haralanov566c1572016-02-03 14:32:49 -0800589 rcvmask = HFI1_RCVCTRL_CTXT_ENB | HFI1_RCVCTRL_INTRAVAIL_ENB;
Mike Marciniszyn77241052015-07-30 15:17:43 -0400590 rcvmask |= HFI1_CAP_KGET_MASK(dd->rcd[i]->flags, DMA_RTAIL) ?
591 HFI1_RCVCTRL_TAILUPD_ENB : HFI1_RCVCTRL_TAILUPD_DIS;
592 if (!HFI1_CAP_KGET_MASK(dd->rcd[i]->flags, MULTI_PKT_EGR))
593 rcvmask |= HFI1_RCVCTRL_ONE_PKT_EGR_ENB;
594 if (HFI1_CAP_KGET_MASK(dd->rcd[i]->flags, NODROP_RHQ_FULL))
595 rcvmask |= HFI1_RCVCTRL_NO_RHQ_DROP_ENB;
596 if (HFI1_CAP_KGET_MASK(dd->rcd[i]->flags, NODROP_EGR_FULL))
597 rcvmask |= HFI1_RCVCTRL_NO_EGR_DROP_ENB;
598 hfi1_rcvctrl(dd, rcvmask, i);
599 sc_enable(dd->rcd[i]->sc);
600 }
601}
602
603/**
604 * create_workqueues - create per port workqueues
605 * @dd: the hfi1_ib device
606 */
607static int create_workqueues(struct hfi1_devdata *dd)
608{
609 int pidx;
610 struct hfi1_pportdata *ppd;
611
612 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
613 ppd = dd->pport + pidx;
614 if (!ppd->hfi1_wq) {
Mike Marciniszyn77241052015-07-30 15:17:43 -0400615 ppd->hfi1_wq =
Mike Marciniszyn0a226ed2015-11-09 19:13:58 -0500616 alloc_workqueue(
617 "hfi%d_%d",
618 WQ_SYSFS | WQ_HIGHPRI | WQ_CPU_INTENSIVE,
619 dd->num_sdma,
620 dd->unit, pidx);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400621 if (!ppd->hfi1_wq)
622 goto wq_error;
623 }
624 }
625 return 0;
626wq_error:
Mike Marciniszyn0a226ed2015-11-09 19:13:58 -0500627 pr_err("alloc_workqueue failed for port %d\n", pidx + 1);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400628 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
629 ppd = dd->pport + pidx;
630 if (ppd->hfi1_wq) {
631 destroy_workqueue(ppd->hfi1_wq);
632 ppd->hfi1_wq = NULL;
633 }
634 }
635 return -ENOMEM;
636}
637
638/**
639 * hfi1_init - do the actual initialization sequence on the chip
640 * @dd: the hfi1_ib device
641 * @reinit: re-initializing, so don't allocate new memory
642 *
643 * Do the actual initialization sequence on the chip. This is done
644 * both from the init routine called from the PCI infrastructure, and
645 * when we reset the chip, or detect that it was reset internally,
646 * or it's administratively re-enabled.
647 *
648 * Memory allocation here and in called routines is only done in
649 * the first case (reinit == 0). We have to be careful, because even
650 * without memory allocation, we need to re-write all the chip registers
651 * TIDs, etc. after the reset or enable has completed.
652 */
653int hfi1_init(struct hfi1_devdata *dd, int reinit)
654{
655 int ret = 0, pidx, lastfail = 0;
656 unsigned i, len;
657 struct hfi1_ctxtdata *rcd;
658 struct hfi1_pportdata *ppd;
659
660 /* Set up recv low level handlers */
661 dd->normal_rhf_rcv_functions[RHF_RCV_TYPE_EXPECTED] =
662 kdeth_process_expected;
663 dd->normal_rhf_rcv_functions[RHF_RCV_TYPE_EAGER] =
664 kdeth_process_eager;
665 dd->normal_rhf_rcv_functions[RHF_RCV_TYPE_IB] = process_receive_ib;
666 dd->normal_rhf_rcv_functions[RHF_RCV_TYPE_ERROR] =
667 process_receive_error;
668 dd->normal_rhf_rcv_functions[RHF_RCV_TYPE_BYPASS] =
669 process_receive_bypass;
670 dd->normal_rhf_rcv_functions[RHF_RCV_TYPE_INVALID5] =
671 process_receive_invalid;
672 dd->normal_rhf_rcv_functions[RHF_RCV_TYPE_INVALID6] =
673 process_receive_invalid;
674 dd->normal_rhf_rcv_functions[RHF_RCV_TYPE_INVALID7] =
675 process_receive_invalid;
676 dd->rhf_rcv_function_map = dd->normal_rhf_rcv_functions;
677
678 /* Set up send low level handlers */
679 dd->process_pio_send = hfi1_verbs_send_pio;
680 dd->process_dma_send = hfi1_verbs_send_dma;
681 dd->pio_inline_send = pio_copy;
682
Mike Marciniszyn995deaf2015-11-16 21:59:29 -0500683 if (is_ax(dd)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -0400684 atomic_set(&dd->drop_packet, DROP_PACKET_ON);
685 dd->do_drop = 1;
686 } else {
687 atomic_set(&dd->drop_packet, DROP_PACKET_OFF);
688 dd->do_drop = 0;
689 }
690
691 /* make sure the link is not "up" */
692 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
693 ppd = dd->pport + pidx;
694 ppd->linkup = 0;
695 }
696
697 if (reinit)
698 ret = init_after_reset(dd);
699 else
700 ret = loadtime_init(dd);
701 if (ret)
702 goto done;
703
Mark F. Brown46b010d2015-11-09 19:18:20 -0500704 /* allocate dummy tail memory for all receive contexts */
705 dd->rcvhdrtail_dummy_kvaddr = dma_zalloc_coherent(
706 &dd->pcidev->dev, sizeof(u64),
Tymoteusz Kielan60368182016-09-06 04:35:54 -0700707 &dd->rcvhdrtail_dummy_dma,
Mark F. Brown46b010d2015-11-09 19:18:20 -0500708 GFP_KERNEL);
709
710 if (!dd->rcvhdrtail_dummy_kvaddr) {
711 dd_dev_err(dd, "cannot allocate dummy tail memory\n");
712 ret = -ENOMEM;
713 goto done;
714 }
715
Mike Marciniszyn77241052015-07-30 15:17:43 -0400716 /* dd->rcd can be NULL if early initialization failed */
717 for (i = 0; dd->rcd && i < dd->first_user_ctxt; ++i) {
718 /*
719 * Set up the (kernel) rcvhdr queue and egr TIDs. If doing
720 * re-init, the simplest way to handle this is to free
721 * existing, and re-allocate.
722 * Need to re-create rest of ctxt 0 ctxtdata as well.
723 */
724 rcd = dd->rcd[i];
725 if (!rcd)
726 continue;
727
728 rcd->do_interrupt = &handle_receive_interrupt;
729
730 lastfail = hfi1_create_rcvhdrq(dd, rcd);
731 if (!lastfail)
732 lastfail = hfi1_setup_eagerbufs(rcd);
Ashutosh Dixit39239792016-05-12 10:24:00 -0700733 if (lastfail) {
Mike Marciniszyn77241052015-07-30 15:17:43 -0400734 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -0800735 "failed to allocate kernel ctxt's rcvhdrq and/or egr bufs\n");
Ashutosh Dixit39239792016-05-12 10:24:00 -0700736 ret = lastfail;
737 }
Mike Marciniszyn77241052015-07-30 15:17:43 -0400738 }
Mike Marciniszyn77241052015-07-30 15:17:43 -0400739
740 /* Allocate enough memory for user event notification. */
Amitoj Kaur Chawla84449912016-03-04 22:30:43 +0530741 len = PAGE_ALIGN(dd->chip_rcv_contexts * HFI1_MAX_SHARED_CTXTS *
742 sizeof(*dd->events));
Mike Marciniszyn77241052015-07-30 15:17:43 -0400743 dd->events = vmalloc_user(len);
744 if (!dd->events)
745 dd_dev_err(dd, "Failed to allocate user events page\n");
746 /*
747 * Allocate a page for device and port status.
748 * Page will be shared amongst all user processes.
749 */
750 dd->status = vmalloc_user(PAGE_SIZE);
751 if (!dd->status)
752 dd_dev_err(dd, "Failed to allocate dev status page\n");
753 else
754 dd->freezelen = PAGE_SIZE - (sizeof(*dd->status) -
755 sizeof(dd->status->freezemsg));
756 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
757 ppd = dd->pport + pidx;
758 if (dd->status)
759 /* Currently, we only have one port */
760 ppd->statusp = &dd->status->port;
761
762 set_mtu(ppd);
763 }
764
765 /* enable chip even if we have an error, so we can debug cause */
766 enable_chip(dd);
767
Mike Marciniszyn77241052015-07-30 15:17:43 -0400768done:
769 /*
770 * Set status even if port serdes is not initialized
771 * so that diags will work.
772 */
773 if (dd->status)
774 dd->status->dev |= HFI1_STATUS_CHIP_PRESENT |
775 HFI1_STATUS_INITTED;
776 if (!ret) {
777 /* enable all interrupts from the chip */
778 set_intr_state(dd, 1);
779
780 /* chip is OK for user apps; mark it as initialized */
781 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
782 ppd = dd->pport + pidx;
783
Jubin John4d114fd2016-02-14 20:21:43 -0800784 /*
785 * start the serdes - must be after interrupts are
786 * enabled so we are notified when the link goes up
Mike Marciniszyn77241052015-07-30 15:17:43 -0400787 */
Mike Marciniszyn77241052015-07-30 15:17:43 -0400788 lastfail = bringup_serdes(ppd);
789 if (lastfail)
790 dd_dev_info(dd,
Jubin John17fb4f22016-02-14 20:21:52 -0800791 "Failed to bring up port %u\n",
792 ppd->port);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400793
794 /*
795 * Set status even if port serdes is not initialized
796 * so that diags will work.
797 */
798 if (ppd->statusp)
799 *ppd->statusp |= HFI1_STATUS_CHIP_PRESENT |
800 HFI1_STATUS_INITTED;
801 if (!ppd->link_speed_enabled)
802 continue;
803 }
804 }
805
806 /* if ret is non-zero, we probably should do some cleanup here... */
807 return ret;
808}
809
810static inline struct hfi1_devdata *__hfi1_lookup(int unit)
811{
812 return idr_find(&hfi1_unit_table, unit);
813}
814
815struct hfi1_devdata *hfi1_lookup(int unit)
816{
817 struct hfi1_devdata *dd;
818 unsigned long flags;
819
820 spin_lock_irqsave(&hfi1_devs_lock, flags);
821 dd = __hfi1_lookup(unit);
822 spin_unlock_irqrestore(&hfi1_devs_lock, flags);
823
824 return dd;
825}
826
827/*
828 * Stop the timers during unit shutdown, or after an error late
829 * in initialization.
830 */
831static void stop_timers(struct hfi1_devdata *dd)
832{
833 struct hfi1_pportdata *ppd;
834 int pidx;
835
836 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
837 ppd = dd->pport + pidx;
838 if (ppd->led_override_timer.data) {
839 del_timer_sync(&ppd->led_override_timer);
840 atomic_set(&ppd->led_override_timer_active, 0);
841 }
842 }
843}
844
845/**
846 * shutdown_device - shut down a device
847 * @dd: the hfi1_ib device
848 *
849 * This is called to make the device quiet when we are about to
850 * unload the driver, and also when the device is administratively
851 * disabled. It does not free any data structures.
852 * Everything it does has to be setup again by hfi1_init(dd, 1)
853 */
854static void shutdown_device(struct hfi1_devdata *dd)
855{
856 struct hfi1_pportdata *ppd;
857 unsigned pidx;
858 int i;
859
Alex Estrin9cac0a02018-05-02 06:43:15 -0700860 if (dd->flags & HFI1_SHUTDOWN)
861 return;
862 dd->flags |= HFI1_SHUTDOWN;
863
Mike Marciniszyn77241052015-07-30 15:17:43 -0400864 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
865 ppd = dd->pport + pidx;
866
867 ppd->linkup = 0;
868 if (ppd->statusp)
869 *ppd->statusp &= ~(HFI1_STATUS_IB_CONF |
870 HFI1_STATUS_IB_READY);
871 }
872 dd->flags &= ~HFI1_INITTED;
873
874 /* mask interrupts, but not errors */
875 set_intr_state(dd, 0);
876
877 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
878 ppd = dd->pport + pidx;
879 for (i = 0; i < dd->num_rcv_contexts; i++)
880 hfi1_rcvctrl(dd, HFI1_RCVCTRL_TAILUPD_DIS |
881 HFI1_RCVCTRL_CTXT_DIS |
882 HFI1_RCVCTRL_INTRAVAIL_DIS |
883 HFI1_RCVCTRL_PKEY_DIS |
884 HFI1_RCVCTRL_ONE_PKT_EGR_DIS, i);
885 /*
886 * Gracefully stop all sends allowing any in progress to
887 * trickle out first.
888 */
889 for (i = 0; i < dd->num_send_contexts; i++)
890 sc_flush(dd->send_contexts[i].sc);
891 }
892
893 /*
894 * Enough for anything that's going to trickle out to have actually
895 * done so.
896 */
897 udelay(20);
898
899 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
900 ppd = dd->pport + pidx;
901
902 /* disable all contexts */
903 for (i = 0; i < dd->num_send_contexts; i++)
904 sc_disable(dd->send_contexts[i].sc);
905 /* disable the send device */
906 pio_send_control(dd, PSC_GLOBAL_DISABLE);
907
Easwar Hariharan91ab4ed2016-02-03 14:35:57 -0800908 shutdown_led_override(ppd);
909
Mike Marciniszyn77241052015-07-30 15:17:43 -0400910 /*
911 * Clear SerdesEnable.
912 * We can't count on interrupts since we are stopping.
913 */
914 hfi1_quiet_serdes(ppd);
915
916 if (ppd->hfi1_wq) {
917 destroy_workqueue(ppd->hfi1_wq);
918 ppd->hfi1_wq = NULL;
919 }
920 }
921 sdma_exit(dd);
922}
923
924/**
925 * hfi1_free_ctxtdata - free a context's allocated data
926 * @dd: the hfi1_ib device
927 * @rcd: the ctxtdata structure
928 *
929 * free up any allocated data for a context
930 * This should not touch anything that would affect a simultaneous
931 * re-allocation of context data, because it is called after hfi1_mutex
932 * is released (and can be called from reinit as well).
933 * It should never change any chip state, or global driver state.
934 */
935void hfi1_free_ctxtdata(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
936{
937 unsigned e;
938
939 if (!rcd)
940 return;
941
942 if (rcd->rcvhdrq) {
943 dma_free_coherent(&dd->pcidev->dev, rcd->rcvhdrq_size,
Tymoteusz Kielan60368182016-09-06 04:35:54 -0700944 rcd->rcvhdrq, rcd->rcvhdrq_dma);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400945 rcd->rcvhdrq = NULL;
946 if (rcd->rcvhdrtail_kvaddr) {
947 dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE,
948 (void *)rcd->rcvhdrtail_kvaddr,
Tymoteusz Kielan60368182016-09-06 04:35:54 -0700949 rcd->rcvhdrqtailaddr_dma);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400950 rcd->rcvhdrtail_kvaddr = NULL;
951 }
952 }
953
954 /* all the RcvArray entries should have been cleared by now */
955 kfree(rcd->egrbufs.rcvtids);
956
957 for (e = 0; e < rcd->egrbufs.alloced; e++) {
Tymoteusz Kielan60368182016-09-06 04:35:54 -0700958 if (rcd->egrbufs.buffers[e].dma)
Mike Marciniszyn77241052015-07-30 15:17:43 -0400959 dma_free_coherent(&dd->pcidev->dev,
960 rcd->egrbufs.buffers[e].len,
961 rcd->egrbufs.buffers[e].addr,
Tymoteusz Kielan60368182016-09-06 04:35:54 -0700962 rcd->egrbufs.buffers[e].dma);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400963 }
964 kfree(rcd->egrbufs.buffers);
965
966 sc_free(rcd->sc);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400967 vfree(rcd->user_event_mask);
968 vfree(rcd->subctxt_uregbase);
969 vfree(rcd->subctxt_rcvegrbuf);
970 vfree(rcd->subctxt_rcvhdr_base);
Mike Marciniszyn77241052015-07-30 15:17:43 -0400971 kfree(rcd->opstats);
972 kfree(rcd);
973}
974
Dean Luick78eb1292016-03-05 08:49:45 -0800975/*
976 * Release our hold on the shared asic data. If we are the last one,
Dean Luickdba715f2016-07-06 17:28:52 -0400977 * return the structure to be finalized outside the lock. Must be
978 * holding hfi1_devs_lock.
Dean Luick78eb1292016-03-05 08:49:45 -0800979 */
Dean Luickdba715f2016-07-06 17:28:52 -0400980static struct hfi1_asic_data *release_asic_data(struct hfi1_devdata *dd)
Dean Luick78eb1292016-03-05 08:49:45 -0800981{
Dean Luickdba715f2016-07-06 17:28:52 -0400982 struct hfi1_asic_data *ad;
Dean Luick78eb1292016-03-05 08:49:45 -0800983 int other;
984
985 if (!dd->asic_data)
Dean Luickdba715f2016-07-06 17:28:52 -0400986 return NULL;
Dean Luick78eb1292016-03-05 08:49:45 -0800987 dd->asic_data->dds[dd->hfi1_id] = NULL;
988 other = dd->hfi1_id ? 0 : 1;
Dean Luickdba715f2016-07-06 17:28:52 -0400989 ad = dd->asic_data;
Dean Luick78eb1292016-03-05 08:49:45 -0800990 dd->asic_data = NULL;
Dean Luickdba715f2016-07-06 17:28:52 -0400991 /* return NULL if the other dd still has a link */
992 return ad->dds[other] ? NULL : ad;
993}
994
995static void finalize_asic_data(struct hfi1_devdata *dd,
996 struct hfi1_asic_data *ad)
997{
998 clean_up_i2c(dd, ad);
999 kfree(ad);
Dean Luick78eb1292016-03-05 08:49:45 -08001000}
1001
Dennis Dalessandroe11ffbd2016-05-19 05:26:44 -07001002static void __hfi1_free_devdata(struct kobject *kobj)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001003{
Dennis Dalessandroe11ffbd2016-05-19 05:26:44 -07001004 struct hfi1_devdata *dd =
1005 container_of(kobj, struct hfi1_devdata, kobj);
Dean Luickdba715f2016-07-06 17:28:52 -04001006 struct hfi1_asic_data *ad;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001007 unsigned long flags;
1008
1009 spin_lock_irqsave(&hfi1_devs_lock, flags);
1010 idr_remove(&hfi1_unit_table, dd->unit);
1011 list_del(&dd->list);
Dean Luickdba715f2016-07-06 17:28:52 -04001012 ad = release_asic_data(dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001013 spin_unlock_irqrestore(&hfi1_devs_lock, flags);
Dean Luickdba715f2016-07-06 17:28:52 -04001014 if (ad)
1015 finalize_asic_data(dd, ad);
Easwar Hariharanc3838b32016-02-09 14:29:13 -08001016 free_platform_config(dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001017 rcu_barrier(); /* wait for rcu callbacks to complete */
1018 free_percpu(dd->int_counter);
1019 free_percpu(dd->rcv_limit);
Vennila Megavannan89abfc82016-02-03 14:34:07 -08001020 free_percpu(dd->send_schedule);
Jubin Johnea0e4ce2016-04-20 06:05:24 -07001021 rvt_dealloc_device(&dd->verbs_dev.rdi);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001022}
1023
Dennis Dalessandroe11ffbd2016-05-19 05:26:44 -07001024static struct kobj_type hfi1_devdata_type = {
1025 .release = __hfi1_free_devdata,
1026};
1027
1028void hfi1_free_devdata(struct hfi1_devdata *dd)
1029{
1030 kobject_put(&dd->kobj);
1031}
1032
Mike Marciniszyn77241052015-07-30 15:17:43 -04001033/*
1034 * Allocate our primary per-unit data structure. Must be done via verbs
1035 * allocator, because the verbs cleanup process both does cleanup and
1036 * free of the data structure.
1037 * "extra" is for chip-specific data.
1038 *
1039 * Use the idr mechanism to get a unit number for this unit.
1040 */
1041struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev, size_t extra)
1042{
1043 unsigned long flags;
1044 struct hfi1_devdata *dd;
Dennis Dalessandro7af6d002016-01-19 14:44:06 -08001045 int ret, nports;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001046
Dennis Dalessandro7af6d002016-01-19 14:44:06 -08001047 /* extra is * number of ports */
1048 nports = extra / sizeof(struct hfi1_pportdata);
1049
1050 dd = (struct hfi1_devdata *)rvt_alloc_device(sizeof(*dd) + extra,
1051 nports);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001052 if (!dd)
1053 return ERR_PTR(-ENOMEM);
Dennis Dalessandro7af6d002016-01-19 14:44:06 -08001054 dd->num_pports = nports;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001055 dd->pport = (struct hfi1_pportdata *)(dd + 1);
Sebastian Sanchez900ed3e2018-05-01 05:35:58 -07001056 dd->pcidev = pdev;
1057 pci_set_drvdata(pdev, dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001058
1059 INIT_LIST_HEAD(&dd->list);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001060 idr_preload(GFP_KERNEL);
1061 spin_lock_irqsave(&hfi1_devs_lock, flags);
1062
1063 ret = idr_alloc(&hfi1_unit_table, dd, 0, 0, GFP_NOWAIT);
1064 if (ret >= 0) {
1065 dd->unit = ret;
1066 list_add(&dd->list, &hfi1_dev_list);
1067 }
1068
1069 spin_unlock_irqrestore(&hfi1_devs_lock, flags);
1070 idr_preload_end();
1071
1072 if (ret < 0) {
1073 hfi1_early_err(&pdev->dev,
1074 "Could not allocate unit ID: error %d\n", -ret);
1075 goto bail;
1076 }
1077 /*
1078 * Initialize all locks for the device. This needs to be as early as
1079 * possible so locks are usable.
1080 */
1081 spin_lock_init(&dd->sc_lock);
1082 spin_lock_init(&dd->sendctrl_lock);
1083 spin_lock_init(&dd->rcvctrl_lock);
1084 spin_lock_init(&dd->uctxt_lock);
1085 spin_lock_init(&dd->hfi1_diag_trans_lock);
1086 spin_lock_init(&dd->sc_init_lock);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001087 spin_lock_init(&dd->dc8051_memlock);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001088 seqlock_init(&dd->sc2vl_lock);
1089 spin_lock_init(&dd->sde_map_lock);
Jubin John35f6bef2016-02-14 12:46:10 -08001090 spin_lock_init(&dd->pio_map_lock);
Tadeusz Strukb38508b2017-04-28 10:40:02 -07001091 mutex_init(&dd->dc8051_lock);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001092 init_waitqueue_head(&dd->event_queue);
1093
1094 dd->int_counter = alloc_percpu(u64);
1095 if (!dd->int_counter) {
1096 ret = -ENOMEM;
1097 hfi1_early_err(&pdev->dev,
1098 "Could not allocate per-cpu int_counter\n");
1099 goto bail;
1100 }
1101
1102 dd->rcv_limit = alloc_percpu(u64);
1103 if (!dd->rcv_limit) {
1104 ret = -ENOMEM;
1105 hfi1_early_err(&pdev->dev,
1106 "Could not allocate per-cpu rcv_limit\n");
1107 goto bail;
1108 }
1109
Vennila Megavannan89abfc82016-02-03 14:34:07 -08001110 dd->send_schedule = alloc_percpu(u64);
1111 if (!dd->send_schedule) {
1112 ret = -ENOMEM;
1113 hfi1_early_err(&pdev->dev,
1114 "Could not allocate per-cpu int_counter\n");
1115 goto bail;
1116 }
1117
Mike Marciniszyn77241052015-07-30 15:17:43 -04001118 if (!hfi1_cpulist_count) {
1119 u32 count = num_online_cpus();
1120
Shraddha Barke314fcc02015-10-09 21:03:26 +05301121 hfi1_cpulist = kcalloc(BITS_TO_LONGS(count), sizeof(long),
1122 GFP_KERNEL);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001123 if (hfi1_cpulist)
1124 hfi1_cpulist_count = count;
1125 else
1126 hfi1_early_err(
1127 &pdev->dev,
1128 "Could not alloc cpulist info, cpu affinity might be wrong\n");
1129 }
Dennis Dalessandroe11ffbd2016-05-19 05:26:44 -07001130 kobject_init(&dd->kobj, &hfi1_devdata_type);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001131 return dd;
1132
1133bail:
1134 if (!list_empty(&dd->list))
1135 list_del_init(&dd->list);
Jubin Johnea0e4ce2016-04-20 06:05:24 -07001136 rvt_dealloc_device(&dd->verbs_dev.rdi);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001137 return ERR_PTR(ret);
1138}
1139
1140/*
1141 * Called from freeze mode handlers, and from PCI error
1142 * reporting code. Should be paranoid about state of
1143 * system and data structures.
1144 */
1145void hfi1_disable_after_error(struct hfi1_devdata *dd)
1146{
1147 if (dd->flags & HFI1_INITTED) {
1148 u32 pidx;
1149
1150 dd->flags &= ~HFI1_INITTED;
1151 if (dd->pport)
1152 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
1153 struct hfi1_pportdata *ppd;
1154
1155 ppd = dd->pport + pidx;
1156 if (dd->flags & HFI1_PRESENT)
1157 set_link_state(ppd, HLS_DN_DISABLE);
1158
1159 if (ppd->statusp)
1160 *ppd->statusp &= ~HFI1_STATUS_IB_READY;
1161 }
1162 }
1163
1164 /*
1165 * Mark as having had an error for driver, and also
1166 * for /sys and status word mapped to user programs.
1167 * This marks unit as not usable, until reset.
1168 */
1169 if (dd->status)
1170 dd->status->dev |= HFI1_STATUS_HWERROR;
1171}
1172
1173static void remove_one(struct pci_dev *);
1174static int init_one(struct pci_dev *, const struct pci_device_id *);
Alex Estrin9cac0a02018-05-02 06:43:15 -07001175static void shutdown_one(struct pci_dev *);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001176
1177#define DRIVER_LOAD_MSG "Intel " DRIVER_NAME " loaded: "
1178#define PFX DRIVER_NAME ": "
1179
Sebastian Sanchezd6373012016-07-25 07:54:48 -07001180const struct pci_device_id hfi1_pci_tbl[] = {
Mike Marciniszyn77241052015-07-30 15:17:43 -04001181 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL0) },
1182 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL1) },
1183 { 0, }
1184};
1185
1186MODULE_DEVICE_TABLE(pci, hfi1_pci_tbl);
1187
1188static struct pci_driver hfi1_pci_driver = {
1189 .name = DRIVER_NAME,
1190 .probe = init_one,
1191 .remove = remove_one,
Alex Estrin9cac0a02018-05-02 06:43:15 -07001192 .shutdown = shutdown_one,
Mike Marciniszyn77241052015-07-30 15:17:43 -04001193 .id_table = hfi1_pci_tbl,
1194 .err_handler = &hfi1_pci_err_handler,
1195};
1196
1197static void __init compute_krcvqs(void)
1198{
1199 int i;
1200
1201 for (i = 0; i < krcvqsset; i++)
1202 n_krcvqs += krcvqs[i];
1203}
1204
1205/*
1206 * Do all the generic driver unit- and chip-independent memory
1207 * allocation and initialization.
1208 */
1209static int __init hfi1_mod_init(void)
1210{
1211 int ret;
1212
1213 ret = dev_init();
1214 if (ret)
1215 goto bail;
1216
Sebastian Sanchezd6373012016-07-25 07:54:48 -07001217 ret = node_affinity_init();
1218 if (ret)
1219 goto bail;
Dennis Dalessandro41973442016-07-25 07:52:36 -07001220
Mike Marciniszyn77241052015-07-30 15:17:43 -04001221 /* validate max MTU before any devices start */
1222 if (!valid_opa_max_mtu(hfi1_max_mtu)) {
1223 pr_err("Invalid max_mtu 0x%x, using 0x%x instead\n",
1224 hfi1_max_mtu, HFI1_DEFAULT_MAX_MTU);
1225 hfi1_max_mtu = HFI1_DEFAULT_MAX_MTU;
1226 }
1227 /* valid CUs run from 1-128 in powers of 2 */
1228 if (hfi1_cu > 128 || !is_power_of_2(hfi1_cu))
1229 hfi1_cu = 1;
1230 /* valid credit return threshold is 0-100, variable is unsigned */
1231 if (user_credit_return_threshold > 100)
1232 user_credit_return_threshold = 100;
1233
1234 compute_krcvqs();
Jubin John4d114fd2016-02-14 20:21:43 -08001235 /*
1236 * sanitize receive interrupt count, time must wait until after
1237 * the hardware type is known
1238 */
Mike Marciniszyn77241052015-07-30 15:17:43 -04001239 if (rcv_intr_count > RCV_HDR_HEAD_COUNTER_MASK)
1240 rcv_intr_count = RCV_HDR_HEAD_COUNTER_MASK;
1241 /* reject invalid combinations */
1242 if (rcv_intr_count == 0 && rcv_intr_timeout == 0) {
1243 pr_err("Invalid mode: both receive interrupt count and available timeout are zero - setting interrupt count to 1\n");
1244 rcv_intr_count = 1;
1245 }
1246 if (rcv_intr_count > 1 && rcv_intr_timeout == 0) {
1247 /*
1248 * Avoid indefinite packet delivery by requiring a timeout
1249 * if count is > 1.
1250 */
1251 pr_err("Invalid mode: receive interrupt count greater than 1 and available timeout is zero - setting available timeout to 1\n");
1252 rcv_intr_timeout = 1;
1253 }
1254 if (rcv_intr_dynamic && !(rcv_intr_count > 1 && rcv_intr_timeout > 0)) {
1255 /*
1256 * The dynamic algorithm expects a non-zero timeout
1257 * and a count > 1.
1258 */
1259 pr_err("Invalid mode: dynamic receive interrupt mitigation with invalid count and timeout - turning dynamic off\n");
1260 rcv_intr_dynamic = 0;
1261 }
1262
1263 /* sanitize link CRC options */
1264 link_crc_mask &= SUPPORTED_CRCS;
1265
1266 /*
1267 * These must be called before the driver is registered with
1268 * the PCI subsystem.
1269 */
1270 idr_init(&hfi1_unit_table);
1271
1272 hfi1_dbg_init();
Dean Luick528ee9f2016-03-05 08:50:43 -08001273 ret = hfi1_wss_init();
1274 if (ret < 0)
1275 goto bail_wss;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001276 ret = pci_register_driver(&hfi1_pci_driver);
1277 if (ret < 0) {
1278 pr_err("Unable to register driver: error %d\n", -ret);
1279 goto bail_dev;
1280 }
1281 goto bail; /* all OK */
1282
1283bail_dev:
Dean Luick528ee9f2016-03-05 08:50:43 -08001284 hfi1_wss_exit();
1285bail_wss:
Mike Marciniszyn77241052015-07-30 15:17:43 -04001286 hfi1_dbg_exit();
1287 idr_destroy(&hfi1_unit_table);
1288 dev_cleanup();
1289bail:
1290 return ret;
1291}
1292
1293module_init(hfi1_mod_init);
1294
1295/*
1296 * Do the non-unit driver cleanup, memory free, etc. at unload.
1297 */
1298static void __exit hfi1_mod_cleanup(void)
1299{
1300 pci_unregister_driver(&hfi1_pci_driver);
Dennis Dalessandro41973442016-07-25 07:52:36 -07001301 node_affinity_destroy();
Dean Luick528ee9f2016-03-05 08:50:43 -08001302 hfi1_wss_exit();
Mike Marciniszyn77241052015-07-30 15:17:43 -04001303 hfi1_dbg_exit();
1304 hfi1_cpulist_count = 0;
1305 kfree(hfi1_cpulist);
1306
1307 idr_destroy(&hfi1_unit_table);
1308 dispose_firmware(); /* asymmetric with obtain_firmware() */
1309 dev_cleanup();
1310}
1311
1312module_exit(hfi1_mod_cleanup);
1313
1314/* this can only be called after a successful initialization */
1315static void cleanup_device_data(struct hfi1_devdata *dd)
1316{
1317 int ctxt;
1318 int pidx;
1319 struct hfi1_ctxtdata **tmp;
1320 unsigned long flags;
1321
1322 /* users can't do anything more with chip */
1323 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
1324 struct hfi1_pportdata *ppd = &dd->pport[pidx];
1325 struct cc_state *cc_state;
1326 int i;
1327
1328 if (ppd->statusp)
1329 *ppd->statusp &= ~HFI1_STATUS_CHIP_PRESENT;
1330
1331 for (i = 0; i < OPA_MAX_SLS; i++)
1332 hrtimer_cancel(&ppd->cca_timer[i].hrtimer);
1333
1334 spin_lock(&ppd->cc_state_lock);
Jianxin Xiong8adf71f2016-07-25 13:39:14 -07001335 cc_state = get_cc_state_protected(ppd);
Muhammad Falak R Wanieea57072016-05-01 18:05:31 +05301336 RCU_INIT_POINTER(ppd->cc_state, NULL);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001337 spin_unlock(&ppd->cc_state_lock);
1338
1339 if (cc_state)
Wei Yongjun476d95b2016-08-10 03:14:04 +00001340 kfree_rcu(cc_state, rcu);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001341 }
1342
1343 free_credit_return(dd);
1344
1345 /*
1346 * Free any resources still in use (usually just kernel contexts)
1347 * at unload; we do for ctxtcnt, because that's what we allocate.
1348 * We acquire lock to be really paranoid that rcd isn't being
1349 * accessed from some interrupt-related code (that should not happen,
1350 * but best to be sure).
1351 */
1352 spin_lock_irqsave(&dd->uctxt_lock, flags);
1353 tmp = dd->rcd;
1354 dd->rcd = NULL;
1355 spin_unlock_irqrestore(&dd->uctxt_lock, flags);
Mark F. Brown46b010d2015-11-09 19:18:20 -05001356
1357 if (dd->rcvhdrtail_dummy_kvaddr) {
1358 dma_free_coherent(&dd->pcidev->dev, sizeof(u64),
1359 (void *)dd->rcvhdrtail_dummy_kvaddr,
Tymoteusz Kielan60368182016-09-06 04:35:54 -07001360 dd->rcvhdrtail_dummy_dma);
Dan Carpentera8b7da52016-05-28 08:01:20 +03001361 dd->rcvhdrtail_dummy_kvaddr = NULL;
Mark F. Brown46b010d2015-11-09 19:18:20 -05001362 }
1363
Mike Marciniszyn77241052015-07-30 15:17:43 -04001364 for (ctxt = 0; tmp && ctxt < dd->num_rcv_contexts; ctxt++) {
1365 struct hfi1_ctxtdata *rcd = tmp[ctxt];
1366
1367 tmp[ctxt] = NULL; /* debugging paranoia */
1368 if (rcd) {
1369 hfi1_clear_tids(rcd);
1370 hfi1_free_ctxtdata(dd, rcd);
1371 }
1372 }
1373 kfree(tmp);
Jubin John35f6bef2016-02-14 12:46:10 -08001374 free_pio_map(dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001375 /* must follow rcv context free - need to remove rcv's hooks */
1376 for (ctxt = 0; ctxt < dd->num_send_contexts; ctxt++)
1377 sc_free(dd->send_contexts[ctxt].sc);
1378 dd->num_send_contexts = 0;
1379 kfree(dd->send_contexts);
1380 dd->send_contexts = NULL;
Jubin John79d0c082016-02-26 13:33:33 -08001381 kfree(dd->hw_to_sw);
1382 dd->hw_to_sw = NULL;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001383 kfree(dd->boardname);
1384 vfree(dd->events);
1385 vfree(dd->status);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001386}
1387
1388/*
1389 * Clean up on unit shutdown, or error during unit load after
1390 * successful initialization.
1391 */
1392static void postinit_cleanup(struct hfi1_devdata *dd)
1393{
1394 hfi1_start_cleanup(dd);
1395
1396 hfi1_pcie_ddcleanup(dd);
1397 hfi1_pcie_cleanup(dd->pcidev);
1398
1399 cleanup_device_data(dd);
1400
1401 hfi1_free_devdata(dd);
1402}
1403
Krzysztof Blaszkowski11501ab2016-10-25 13:12:11 -07001404static int init_validate_rcvhdrcnt(struct device *dev, uint thecnt)
1405{
1406 if (thecnt <= HFI1_MIN_HDRQ_EGRBUF_CNT) {
1407 hfi1_early_err(dev, "Receive header queue count too small\n");
1408 return -EINVAL;
1409 }
1410
1411 if (thecnt > HFI1_MAX_HDRQ_EGRBUF_CNT) {
1412 hfi1_early_err(dev,
1413 "Receive header queue count cannot be greater than %u\n",
1414 HFI1_MAX_HDRQ_EGRBUF_CNT);
1415 return -EINVAL;
1416 }
1417
1418 if (thecnt % HDRQ_INCREMENT) {
1419 hfi1_early_err(dev, "Receive header queue count %d must be divisible by %lu\n",
1420 thecnt, HDRQ_INCREMENT);
1421 return -EINVAL;
1422 }
1423
1424 return 0;
1425}
1426
Mike Marciniszyn77241052015-07-30 15:17:43 -04001427static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1428{
1429 int ret = 0, j, pidx, initfail;
Krzysztof Blaszkowski83fb4af2016-10-17 04:19:24 -07001430 struct hfi1_devdata *dd;
Harish Chegondie8597eb2015-12-01 15:38:20 -05001431 struct hfi1_pportdata *ppd;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001432
1433 /* First, lock the non-writable module parameters */
1434 HFI1_CAP_LOCK();
1435
1436 /* Validate some global module parameters */
Krzysztof Blaszkowski11501ab2016-10-25 13:12:11 -07001437 ret = init_validate_rcvhdrcnt(&pdev->dev, rcvhdrcnt);
1438 if (ret)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001439 goto bail;
Krzysztof Blaszkowski11501ab2016-10-25 13:12:11 -07001440
Mike Marciniszyn77241052015-07-30 15:17:43 -04001441 /* use the encoding function as a sanitization check */
1442 if (!encode_rcv_header_entry_size(hfi1_hdrq_entsize)) {
1443 hfi1_early_err(&pdev->dev, "Invalid HdrQ Entry size %u\n",
1444 hfi1_hdrq_entsize);
Sebastian Sanchez07859de2015-12-10 16:02:49 -05001445 ret = -EINVAL;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001446 goto bail;
1447 }
1448
1449 /* The receive eager buffer size must be set before the receive
1450 * contexts are created.
1451 *
1452 * Set the eager buffer size. Validate that it falls in a range
1453 * allowed by the hardware - all powers of 2 between the min and
1454 * max. The maximum valid MTU is within the eager buffer range
1455 * so we do not need to cap the max_mtu by an eager buffer size
1456 * setting.
1457 */
1458 if (eager_buffer_size) {
1459 if (!is_power_of_2(eager_buffer_size))
1460 eager_buffer_size =
1461 roundup_pow_of_two(eager_buffer_size);
1462 eager_buffer_size =
1463 clamp_val(eager_buffer_size,
1464 MIN_EAGER_BUFFER * 8,
1465 MAX_EAGER_BUFFER_TOTAL);
1466 hfi1_early_info(&pdev->dev, "Eager buffer size %u\n",
1467 eager_buffer_size);
1468 } else {
1469 hfi1_early_err(&pdev->dev, "Invalid Eager buffer size of 0\n");
1470 ret = -EINVAL;
1471 goto bail;
1472 }
1473
1474 /* restrict value of hfi1_rcvarr_split */
1475 hfi1_rcvarr_split = clamp_val(hfi1_rcvarr_split, 0, 100);
1476
1477 ret = hfi1_pcie_init(pdev, ent);
1478 if (ret)
1479 goto bail;
1480
Krzysztof Blaszkowski83fb4af2016-10-17 04:19:24 -07001481 if (!(ent->device == PCI_DEVICE_ID_INTEL0 ||
1482 ent->device == PCI_DEVICE_ID_INTEL1)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04001483 hfi1_early_err(&pdev->dev,
1484 "Failing on unknown Intel deviceid 0x%x\n",
1485 ent->device);
1486 ret = -ENODEV;
Krzysztof Blaszkowski83fb4af2016-10-17 04:19:24 -07001487 goto clean_bail;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001488 }
1489
Krzysztof Blaszkowski83fb4af2016-10-17 04:19:24 -07001490 /*
1491 * Do device-specific initialization, function table setup, dd
1492 * allocation, etc.
1493 */
1494 dd = hfi1_init_dd(pdev, ent);
1495
1496 if (IS_ERR(dd)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04001497 ret = PTR_ERR(dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001498 goto clean_bail; /* error already printed */
Krzysztof Blaszkowski83fb4af2016-10-17 04:19:24 -07001499 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04001500
1501 ret = create_workqueues(dd);
1502 if (ret)
1503 goto clean_bail;
1504
1505 /* do the generic initialization */
1506 initfail = hfi1_init(dd, 0);
1507
1508 ret = hfi1_register_ib_device(dd);
1509
1510 /*
1511 * Now ready for use. this should be cleared whenever we
1512 * detect a reset, or initiate one. If earlier failure,
1513 * we still create devices, so diags, etc. can be used
1514 * to determine cause of problem.
1515 */
Dean Luicked6f6532016-02-18 11:12:25 -08001516 if (!initfail && !ret) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04001517 dd->flags |= HFI1_INITTED;
Dean Luicked6f6532016-02-18 11:12:25 -08001518 /* create debufs files after init and ib register */
1519 hfi1_dbg_ibdev_init(&dd->verbs_dev);
1520 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04001521
1522 j = hfi1_device_create(dd);
1523 if (j)
1524 dd_dev_err(dd, "Failed to create /dev devices: %d\n", -j);
1525
1526 if (initfail || ret) {
1527 stop_timers(dd);
1528 flush_workqueue(ib_wq);
Harish Chegondie8597eb2015-12-01 15:38:20 -05001529 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04001530 hfi1_quiet_serdes(dd->pport + pidx);
Harish Chegondie8597eb2015-12-01 15:38:20 -05001531 ppd = dd->pport + pidx;
1532 if (ppd->hfi1_wq) {
1533 destroy_workqueue(ppd->hfi1_wq);
1534 ppd->hfi1_wq = NULL;
1535 }
1536 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04001537 if (!j)
1538 hfi1_device_remove(dd);
1539 if (!ret)
1540 hfi1_unregister_ib_device(dd);
1541 postinit_cleanup(dd);
1542 if (initfail)
1543 ret = initfail;
1544 goto bail; /* everything already cleaned */
1545 }
1546
1547 sdma_start(dd);
1548
1549 return 0;
1550
1551clean_bail:
1552 hfi1_pcie_cleanup(pdev);
1553bail:
1554 return ret;
1555}
1556
Tadeusz Strukacd7c8f2016-10-25 08:57:55 -07001557static void wait_for_clients(struct hfi1_devdata *dd)
1558{
1559 /*
1560 * Remove the device init value and complete the device if there is
1561 * no clients or wait for active clients to finish.
1562 */
1563 if (atomic_dec_and_test(&dd->user_refcount))
1564 complete(&dd->user_comp);
1565
1566 wait_for_completion(&dd->user_comp);
1567}
1568
Mike Marciniszyn77241052015-07-30 15:17:43 -04001569static void remove_one(struct pci_dev *pdev)
1570{
1571 struct hfi1_devdata *dd = pci_get_drvdata(pdev);
1572
Dean Luicked6f6532016-02-18 11:12:25 -08001573 /* close debugfs files before ib unregister */
1574 hfi1_dbg_ibdev_exit(&dd->verbs_dev);
Tadeusz Strukacd7c8f2016-10-25 08:57:55 -07001575
1576 /* remove the /dev hfi1 interface */
1577 hfi1_device_remove(dd);
1578
1579 /* wait for existing user space clients to finish */
1580 wait_for_clients(dd);
1581
Mike Marciniszyn77241052015-07-30 15:17:43 -04001582 /* unregister from IB core */
1583 hfi1_unregister_ib_device(dd);
1584
1585 /*
1586 * Disable the IB link, disable interrupts on the device,
1587 * clear dma engines, etc.
1588 */
1589 shutdown_device(dd);
1590
1591 stop_timers(dd);
1592
1593 /* wait until all of our (qsfp) queue_work() calls complete */
1594 flush_workqueue(ib_wq);
1595
Mike Marciniszyn77241052015-07-30 15:17:43 -04001596 postinit_cleanup(dd);
1597}
1598
Alex Estrin9cac0a02018-05-02 06:43:15 -07001599static void shutdown_one(struct pci_dev *pdev)
1600{
1601 struct hfi1_devdata *dd = pci_get_drvdata(pdev);
1602
1603 shutdown_device(dd);
1604}
1605
Mike Marciniszyn77241052015-07-30 15:17:43 -04001606/**
1607 * hfi1_create_rcvhdrq - create a receive header queue
1608 * @dd: the hfi1_ib device
1609 * @rcd: the context data
1610 *
1611 * This must be contiguous memory (from an i/o perspective), and must be
1612 * DMA'able (which means for some systems, it will go through an IOMMU,
1613 * or be forced into a low address range).
1614 */
1615int hfi1_create_rcvhdrq(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
1616{
1617 unsigned amt;
1618 u64 reg;
1619
1620 if (!rcd->rcvhdrq) {
Tymoteusz Kielan60368182016-09-06 04:35:54 -07001621 dma_addr_t dma_hdrqtail;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001622 gfp_t gfp_flags;
1623
1624 /*
1625 * rcvhdrqentsize is in DWs, so we have to convert to bytes
1626 * (* sizeof(u32)).
1627 */
Amitoj Kaur Chawla84449912016-03-04 22:30:43 +05301628 amt = PAGE_ALIGN(rcd->rcvhdrq_cnt * rcd->rcvhdrqentsize *
1629 sizeof(u32));
Mike Marciniszyn77241052015-07-30 15:17:43 -04001630
1631 gfp_flags = (rcd->ctxt >= dd->first_user_ctxt) ?
1632 GFP_USER : GFP_KERNEL;
1633 rcd->rcvhdrq = dma_zalloc_coherent(
Tymoteusz Kielan60368182016-09-06 04:35:54 -07001634 &dd->pcidev->dev, amt, &rcd->rcvhdrq_dma,
Mike Marciniszyn77241052015-07-30 15:17:43 -04001635 gfp_flags | __GFP_COMP);
1636
1637 if (!rcd->rcvhdrq) {
1638 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08001639 "attempt to allocate %d bytes for ctxt %u rcvhdrq failed\n",
1640 amt, rcd->ctxt);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001641 goto bail;
1642 }
1643
Mike Marciniszyn77241052015-07-30 15:17:43 -04001644 if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL)) {
1645 rcd->rcvhdrtail_kvaddr = dma_zalloc_coherent(
Tymoteusz Kielan60368182016-09-06 04:35:54 -07001646 &dd->pcidev->dev, PAGE_SIZE, &dma_hdrqtail,
Mike Marciniszyn77241052015-07-30 15:17:43 -04001647 gfp_flags);
1648 if (!rcd->rcvhdrtail_kvaddr)
1649 goto bail_free;
Tymoteusz Kielan60368182016-09-06 04:35:54 -07001650 rcd->rcvhdrqtailaddr_dma = dma_hdrqtail;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001651 }
1652
1653 rcd->rcvhdrq_size = amt;
1654 }
1655 /*
1656 * These values are per-context:
1657 * RcvHdrCnt
1658 * RcvHdrEntSize
1659 * RcvHdrSize
1660 */
1661 reg = ((u64)(rcd->rcvhdrq_cnt >> HDRQ_SIZE_SHIFT)
1662 & RCV_HDR_CNT_CNT_MASK)
1663 << RCV_HDR_CNT_CNT_SHIFT;
1664 write_kctxt_csr(dd, rcd->ctxt, RCV_HDR_CNT, reg);
1665 reg = (encode_rcv_header_entry_size(rcd->rcvhdrqentsize)
1666 & RCV_HDR_ENT_SIZE_ENT_SIZE_MASK)
1667 << RCV_HDR_ENT_SIZE_ENT_SIZE_SHIFT;
1668 write_kctxt_csr(dd, rcd->ctxt, RCV_HDR_ENT_SIZE, reg);
1669 reg = (dd->rcvhdrsize & RCV_HDR_SIZE_HDR_SIZE_MASK)
1670 << RCV_HDR_SIZE_HDR_SIZE_SHIFT;
1671 write_kctxt_csr(dd, rcd->ctxt, RCV_HDR_SIZE, reg);
Mark F. Brown46b010d2015-11-09 19:18:20 -05001672
1673 /*
1674 * Program dummy tail address for every receive context
1675 * before enabling any receive context
1676 */
1677 write_kctxt_csr(dd, rcd->ctxt, RCV_HDR_TAIL_ADDR,
Tymoteusz Kielan60368182016-09-06 04:35:54 -07001678 dd->rcvhdrtail_dummy_dma);
Mark F. Brown46b010d2015-11-09 19:18:20 -05001679
Mike Marciniszyn77241052015-07-30 15:17:43 -04001680 return 0;
1681
1682bail_free:
1683 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08001684 "attempt to allocate 1 page for ctxt %u rcvhdrqtailaddr failed\n",
1685 rcd->ctxt);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001686 vfree(rcd->user_event_mask);
1687 rcd->user_event_mask = NULL;
1688 dma_free_coherent(&dd->pcidev->dev, amt, rcd->rcvhdrq,
Tymoteusz Kielan60368182016-09-06 04:35:54 -07001689 rcd->rcvhdrq_dma);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001690 rcd->rcvhdrq = NULL;
1691bail:
1692 return -ENOMEM;
1693}
1694
1695/**
1696 * allocate eager buffers, both kernel and user contexts.
1697 * @rcd: the context we are setting up.
1698 *
1699 * Allocate the eager TID buffers and program them into hip.
1700 * They are no longer completely contiguous, we do multiple allocation
1701 * calls. Otherwise we get the OOM code involved, by asking for too
1702 * much per call, with disastrous results on some kernels.
1703 */
1704int hfi1_setup_eagerbufs(struct hfi1_ctxtdata *rcd)
1705{
1706 struct hfi1_devdata *dd = rcd->dd;
1707 u32 max_entries, egrtop, alloced_bytes = 0, idx = 0;
1708 gfp_t gfp_flags;
1709 u16 order;
1710 int ret = 0;
1711 u16 round_mtu = roundup_pow_of_two(hfi1_max_mtu);
1712
1713 /*
1714 * GFP_USER, but without GFP_FS, so buffer cache can be
1715 * coalesced (we hope); otherwise, even at order 4,
1716 * heavy filesystem activity makes these fail, and we can
1717 * use compound pages.
1718 */
Mel Gorman71baba42015-11-06 16:28:28 -08001719 gfp_flags = __GFP_RECLAIM | __GFP_IO | __GFP_COMP;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001720
1721 /*
1722 * The minimum size of the eager buffers is a groups of MTU-sized
1723 * buffers.
1724 * The global eager_buffer_size parameter is checked against the
1725 * theoretical lower limit of the value. Here, we check against the
1726 * MTU.
1727 */
1728 if (rcd->egrbufs.size < (round_mtu * dd->rcv_entries.group_size))
1729 rcd->egrbufs.size = round_mtu * dd->rcv_entries.group_size;
1730 /*
1731 * If using one-pkt-per-egr-buffer, lower the eager buffer
1732 * size to the max MTU (page-aligned).
1733 */
1734 if (!HFI1_CAP_KGET_MASK(rcd->flags, MULTI_PKT_EGR))
1735 rcd->egrbufs.rcvtid_size = round_mtu;
1736
1737 /*
1738 * Eager buffers sizes of 1MB or less require smaller TID sizes
1739 * to satisfy the "multiple of 8 RcvArray entries" requirement.
1740 */
1741 if (rcd->egrbufs.size <= (1 << 20))
1742 rcd->egrbufs.rcvtid_size = max((unsigned long)round_mtu,
1743 rounddown_pow_of_two(rcd->egrbufs.size / 8));
1744
1745 while (alloced_bytes < rcd->egrbufs.size &&
1746 rcd->egrbufs.alloced < rcd->egrbufs.count) {
1747 rcd->egrbufs.buffers[idx].addr =
1748 dma_zalloc_coherent(&dd->pcidev->dev,
1749 rcd->egrbufs.rcvtid_size,
Tymoteusz Kielan60368182016-09-06 04:35:54 -07001750 &rcd->egrbufs.buffers[idx].dma,
Mike Marciniszyn77241052015-07-30 15:17:43 -04001751 gfp_flags);
1752 if (rcd->egrbufs.buffers[idx].addr) {
1753 rcd->egrbufs.buffers[idx].len =
1754 rcd->egrbufs.rcvtid_size;
1755 rcd->egrbufs.rcvtids[rcd->egrbufs.alloced].addr =
1756 rcd->egrbufs.buffers[idx].addr;
Tymoteusz Kielan60368182016-09-06 04:35:54 -07001757 rcd->egrbufs.rcvtids[rcd->egrbufs.alloced].dma =
1758 rcd->egrbufs.buffers[idx].dma;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001759 rcd->egrbufs.alloced++;
1760 alloced_bytes += rcd->egrbufs.rcvtid_size;
1761 idx++;
1762 } else {
1763 u32 new_size, i, j;
1764 u64 offset = 0;
1765
1766 /*
1767 * Fail the eager buffer allocation if:
1768 * - we are already using the lowest acceptable size
1769 * - we are using one-pkt-per-egr-buffer (this implies
1770 * that we are accepting only one size)
1771 */
1772 if (rcd->egrbufs.rcvtid_size == round_mtu ||
1773 !HFI1_CAP_KGET_MASK(rcd->flags, MULTI_PKT_EGR)) {
1774 dd_dev_err(dd, "ctxt%u: Failed to allocate eager buffers\n",
Jubin John17fb4f22016-02-14 20:21:52 -08001775 rcd->ctxt);
Michael J. Ruhlb894ea82017-05-04 05:14:28 -07001776 ret = -ENOMEM;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001777 goto bail_rcvegrbuf_phys;
1778 }
1779
1780 new_size = rcd->egrbufs.rcvtid_size / 2;
1781
1782 /*
1783 * If the first attempt to allocate memory failed, don't
1784 * fail everything but continue with the next lower
1785 * size.
1786 */
1787 if (idx == 0) {
1788 rcd->egrbufs.rcvtid_size = new_size;
1789 continue;
1790 }
1791
1792 /*
1793 * Re-partition already allocated buffers to a smaller
1794 * size.
1795 */
1796 rcd->egrbufs.alloced = 0;
1797 for (i = 0, j = 0, offset = 0; j < idx; i++) {
1798 if (i >= rcd->egrbufs.count)
1799 break;
Tymoteusz Kielan60368182016-09-06 04:35:54 -07001800 rcd->egrbufs.rcvtids[i].dma =
1801 rcd->egrbufs.buffers[j].dma + offset;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001802 rcd->egrbufs.rcvtids[i].addr =
1803 rcd->egrbufs.buffers[j].addr + offset;
1804 rcd->egrbufs.alloced++;
Tymoteusz Kielan60368182016-09-06 04:35:54 -07001805 if ((rcd->egrbufs.buffers[j].dma + offset +
Mike Marciniszyn77241052015-07-30 15:17:43 -04001806 new_size) ==
Tymoteusz Kielan60368182016-09-06 04:35:54 -07001807 (rcd->egrbufs.buffers[j].dma +
Mike Marciniszyn77241052015-07-30 15:17:43 -04001808 rcd->egrbufs.buffers[j].len)) {
1809 j++;
1810 offset = 0;
Jubin Johne4909742016-02-14 20:22:00 -08001811 } else {
Mike Marciniszyn77241052015-07-30 15:17:43 -04001812 offset += new_size;
Jubin Johne4909742016-02-14 20:22:00 -08001813 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04001814 }
1815 rcd->egrbufs.rcvtid_size = new_size;
1816 }
1817 }
1818 rcd->egrbufs.numbufs = idx;
1819 rcd->egrbufs.size = alloced_bytes;
1820
Sebastian Sanchez6c63e422015-11-06 20:06:56 -05001821 hfi1_cdbg(PROC,
1822 "ctxt%u: Alloced %u rcv tid entries @ %uKB, total %zuKB\n",
Grzegorz Heldt23002d52016-07-25 13:39:33 -07001823 rcd->ctxt, rcd->egrbufs.alloced,
1824 rcd->egrbufs.rcvtid_size / 1024, rcd->egrbufs.size / 1024);
Sebastian Sanchez6c63e422015-11-06 20:06:56 -05001825
Mike Marciniszyn77241052015-07-30 15:17:43 -04001826 /*
1827 * Set the contexts rcv array head update threshold to the closest
1828 * power of 2 (so we can use a mask instead of modulo) below half
1829 * the allocated entries.
1830 */
1831 rcd->egrbufs.threshold =
1832 rounddown_pow_of_two(rcd->egrbufs.alloced / 2);
1833 /*
1834 * Compute the expected RcvArray entry base. This is done after
1835 * allocating the eager buffers in order to maximize the
1836 * expected RcvArray entries for the context.
1837 */
1838 max_entries = rcd->rcv_array_groups * dd->rcv_entries.group_size;
1839 egrtop = roundup(rcd->egrbufs.alloced, dd->rcv_entries.group_size);
1840 rcd->expected_count = max_entries - egrtop;
1841 if (rcd->expected_count > MAX_TID_PAIR_ENTRIES * 2)
1842 rcd->expected_count = MAX_TID_PAIR_ENTRIES * 2;
1843
1844 rcd->expected_base = rcd->eager_base + egrtop;
Sebastian Sanchez6c63e422015-11-06 20:06:56 -05001845 hfi1_cdbg(PROC, "ctxt%u: eager:%u, exp:%u, egrbase:%u, expbase:%u\n",
1846 rcd->ctxt, rcd->egrbufs.alloced, rcd->expected_count,
1847 rcd->eager_base, rcd->expected_base);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001848
1849 if (!hfi1_rcvbuf_validate(rcd->egrbufs.rcvtid_size, PT_EAGER, &order)) {
Sebastian Sanchez6c63e422015-11-06 20:06:56 -05001850 hfi1_cdbg(PROC,
1851 "ctxt%u: current Eager buffer size is invalid %u\n",
1852 rcd->ctxt, rcd->egrbufs.rcvtid_size);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001853 ret = -EINVAL;
1854 goto bail;
1855 }
1856
1857 for (idx = 0; idx < rcd->egrbufs.alloced; idx++) {
1858 hfi1_put_tid(dd, rcd->eager_base + idx, PT_EAGER,
Tymoteusz Kielan60368182016-09-06 04:35:54 -07001859 rcd->egrbufs.rcvtids[idx].dma, order);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001860 cond_resched();
1861 }
1862 goto bail;
1863
1864bail_rcvegrbuf_phys:
1865 for (idx = 0; idx < rcd->egrbufs.alloced &&
Jubin John17fb4f22016-02-14 20:21:52 -08001866 rcd->egrbufs.buffers[idx].addr;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001867 idx++) {
1868 dma_free_coherent(&dd->pcidev->dev,
1869 rcd->egrbufs.buffers[idx].len,
1870 rcd->egrbufs.buffers[idx].addr,
Tymoteusz Kielan60368182016-09-06 04:35:54 -07001871 rcd->egrbufs.buffers[idx].dma);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001872 rcd->egrbufs.buffers[idx].addr = NULL;
Tymoteusz Kielan60368182016-09-06 04:35:54 -07001873 rcd->egrbufs.buffers[idx].dma = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001874 rcd->egrbufs.buffers[idx].len = 0;
1875 }
1876bail:
1877 return ret;
1878}