blob: 9e5863dfa60aa6d80eac5f2cf524d3a895a18101 [file] [log] [blame]
Roland Dreier225c7b12007-05-08 18:00:38 -07001/*
Jack Morgenstein51a379d2008-07-25 10:32:52 -07002 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
Roland Dreier225c7b12007-05-08 18:00:38 -07003 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
Roland Dreier225c7b12007-05-08 18:00:38 -070034#include <linux/interrupt.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090035#include <linux/slab.h>
Paul Gortmakeree40fa02011-05-27 16:14:23 -040036#include <linux/export.h>
Andrea Righi27ac7922008-07-23 21:28:13 -070037#include <linux/mm.h>
Al Viro9cbe05c2007-05-15 20:36:30 +010038#include <linux/dma-mapping.h>
Roland Dreier225c7b12007-05-08 18:00:38 -070039
40#include <linux/mlx4/cmd.h>
41
42#include "mlx4.h"
43#include "fw.h"
44
45enum {
Yevgeny Petrilin0b7ca5a2011-03-22 22:37:47 +000046 MLX4_IRQNAME_SIZE = 32
Arputham Benjaminf5f59512009-09-05 20:24:50 -070047};
48
49enum {
Roland Dreier225c7b12007-05-08 18:00:38 -070050 MLX4_NUM_ASYNC_EQE = 0x100,
51 MLX4_NUM_SPARE_EQE = 0x80,
52 MLX4_EQ_ENTRY_SIZE = 0x20
53};
54
55/*
56 * Must be packed because start is 64 bits but only aligned to 32 bits.
57 */
58struct mlx4_eq_context {
59 __be32 flags;
60 u16 reserved1[3];
61 __be16 page_offset;
62 u8 log_eq_size;
63 u8 reserved2[4];
64 u8 eq_period;
65 u8 reserved3;
66 u8 eq_max_count;
67 u8 reserved4[3];
68 u8 intr;
69 u8 log_page_size;
70 u8 reserved5[2];
71 u8 mtt_base_addr_h;
72 __be32 mtt_base_addr_l;
73 u32 reserved6[2];
74 __be32 consumer_index;
75 __be32 producer_index;
76 u32 reserved7[4];
77};
78
79#define MLX4_EQ_STATUS_OK ( 0 << 28)
80#define MLX4_EQ_STATUS_WRITE_FAIL (10 << 28)
81#define MLX4_EQ_OWNER_SW ( 0 << 24)
82#define MLX4_EQ_OWNER_HW ( 1 << 24)
83#define MLX4_EQ_FLAG_EC ( 1 << 18)
84#define MLX4_EQ_FLAG_OI ( 1 << 17)
85#define MLX4_EQ_STATE_ARMED ( 9 << 8)
86#define MLX4_EQ_STATE_FIRED (10 << 8)
87#define MLX4_EQ_STATE_ALWAYS_ARMED (11 << 8)
88
89#define MLX4_ASYNC_EVENT_MASK ((1ull << MLX4_EVENT_TYPE_PATH_MIG) | \
90 (1ull << MLX4_EVENT_TYPE_COMM_EST) | \
91 (1ull << MLX4_EVENT_TYPE_SQ_DRAINED) | \
92 (1ull << MLX4_EVENT_TYPE_CQ_ERROR) | \
93 (1ull << MLX4_EVENT_TYPE_WQ_CATAS_ERROR) | \
94 (1ull << MLX4_EVENT_TYPE_EEC_CATAS_ERROR) | \
95 (1ull << MLX4_EVENT_TYPE_PATH_MIG_FAILED) | \
96 (1ull << MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR) | \
97 (1ull << MLX4_EVENT_TYPE_WQ_ACCESS_ERROR) | \
Roland Dreier225c7b12007-05-08 18:00:38 -070098 (1ull << MLX4_EVENT_TYPE_PORT_CHANGE) | \
99 (1ull << MLX4_EVENT_TYPE_ECC_DETECT) | \
100 (1ull << MLX4_EVENT_TYPE_SRQ_CATAS_ERROR) | \
101 (1ull << MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE) | \
102 (1ull << MLX4_EVENT_TYPE_SRQ_LIMIT) | \
103 (1ull << MLX4_EVENT_TYPE_CMD))
Roland Dreier225c7b12007-05-08 18:00:38 -0700104
Roland Dreier225c7b12007-05-08 18:00:38 -0700105static void eq_set_ci(struct mlx4_eq *eq, int req_not)
106{
107 __raw_writel((__force u32) cpu_to_be32((eq->cons_index & 0xffffff) |
108 req_not << 31),
109 eq->doorbell);
110 /* We still want ordering, just not swabbing, so add a barrier */
111 mb();
112}
113
114static struct mlx4_eqe *get_eqe(struct mlx4_eq *eq, u32 entry)
115{
116 unsigned long off = (entry & (eq->nent - 1)) * MLX4_EQ_ENTRY_SIZE;
117 return eq->page_list[off / PAGE_SIZE].buf + off % PAGE_SIZE;
118}
119
120static struct mlx4_eqe *next_eqe_sw(struct mlx4_eq *eq)
121{
122 struct mlx4_eqe *eqe = get_eqe(eq, eq->cons_index);
123 return !!(eqe->owner & 0x80) ^ !!(eq->cons_index & eq->nent) ? NULL : eqe;
124}
125
126static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
127{
128 struct mlx4_eqe *eqe;
129 int cqn;
130 int eqes_found = 0;
131 int set_ci = 0;
Yevgeny Petrilin27bf91d2009-03-18 19:45:11 -0700132 int port;
Roland Dreier225c7b12007-05-08 18:00:38 -0700133
134 while ((eqe = next_eqe_sw(eq))) {
135 /*
136 * Make sure we read EQ entry contents after we've
137 * checked the ownership bit.
138 */
139 rmb();
140
141 switch (eqe->type) {
142 case MLX4_EVENT_TYPE_COMP:
143 cqn = be32_to_cpu(eqe->event.comp.cqn) & 0xffffff;
144 mlx4_cq_completion(dev, cqn);
145 break;
146
147 case MLX4_EVENT_TYPE_PATH_MIG:
148 case MLX4_EVENT_TYPE_COMM_EST:
149 case MLX4_EVENT_TYPE_SQ_DRAINED:
150 case MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE:
151 case MLX4_EVENT_TYPE_WQ_CATAS_ERROR:
152 case MLX4_EVENT_TYPE_PATH_MIG_FAILED:
153 case MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
154 case MLX4_EVENT_TYPE_WQ_ACCESS_ERROR:
155 mlx4_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,
156 eqe->type);
157 break;
158
159 case MLX4_EVENT_TYPE_SRQ_LIMIT:
160 case MLX4_EVENT_TYPE_SRQ_CATAS_ERROR:
161 mlx4_srq_event(dev, be32_to_cpu(eqe->event.srq.srqn) & 0xffffff,
162 eqe->type);
163 break;
164
165 case MLX4_EVENT_TYPE_CMD:
166 mlx4_cmd_event(dev,
167 be16_to_cpu(eqe->event.cmd.token),
168 eqe->event.cmd.status,
169 be64_to_cpu(eqe->event.cmd.out_param));
170 break;
171
172 case MLX4_EVENT_TYPE_PORT_CHANGE:
Yevgeny Petrilin27bf91d2009-03-18 19:45:11 -0700173 port = be32_to_cpu(eqe->event.port_change.port) >> 28;
174 if (eqe->subtype == MLX4_PORT_CHANGE_SUBTYPE_DOWN) {
175 mlx4_dispatch_event(dev, MLX4_DEV_EVENT_PORT_DOWN,
176 port);
177 mlx4_priv(dev)->sense.do_sense_port[port] = 1;
178 } else {
179 mlx4_dispatch_event(dev, MLX4_DEV_EVENT_PORT_UP,
180 port);
181 mlx4_priv(dev)->sense.do_sense_port[port] = 0;
182 }
Roland Dreier225c7b12007-05-08 18:00:38 -0700183 break;
184
185 case MLX4_EVENT_TYPE_CQ_ERROR:
186 mlx4_warn(dev, "CQ %s on CQN %06x\n",
187 eqe->event.cq_err.syndrome == 1 ?
188 "overrun" : "access violation",
189 be32_to_cpu(eqe->event.cq_err.cqn) & 0xffffff);
190 mlx4_cq_event(dev, be32_to_cpu(eqe->event.cq_err.cqn),
191 eqe->type);
192 break;
193
194 case MLX4_EVENT_TYPE_EQ_OVERFLOW:
195 mlx4_warn(dev, "EQ overrun on EQN %d\n", eq->eqn);
196 break;
197
198 case MLX4_EVENT_TYPE_EEC_CATAS_ERROR:
199 case MLX4_EVENT_TYPE_ECC_DETECT:
200 default:
201 mlx4_warn(dev, "Unhandled event %02x(%02x) on EQ %d at index %u\n",
202 eqe->type, eqe->subtype, eq->eqn, eq->cons_index);
203 break;
Joe Perchesee289b62010-05-17 22:47:34 -0700204 }
Roland Dreier225c7b12007-05-08 18:00:38 -0700205
206 ++eq->cons_index;
207 eqes_found = 1;
208 ++set_ci;
209
210 /*
211 * The HCA will think the queue has overflowed if we
212 * don't tell it we've been processing events. We
213 * create our EQs with MLX4_NUM_SPARE_EQE extra
214 * entries, so we must update our consumer index at
215 * least that often.
216 */
217 if (unlikely(set_ci >= MLX4_NUM_SPARE_EQE)) {
Roland Dreier225c7b12007-05-08 18:00:38 -0700218 eq_set_ci(eq, 0);
219 set_ci = 0;
220 }
221 }
222
223 eq_set_ci(eq, 1);
224
225 return eqes_found;
226}
227
228static irqreturn_t mlx4_interrupt(int irq, void *dev_ptr)
229{
230 struct mlx4_dev *dev = dev_ptr;
231 struct mlx4_priv *priv = mlx4_priv(dev);
232 int work = 0;
233 int i;
234
235 writel(priv->eq_table.clr_mask, priv->eq_table.clr_int);
236
Yevgeny Petrilinb8dd7862008-12-22 07:15:03 -0800237 for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i)
Roland Dreier225c7b12007-05-08 18:00:38 -0700238 work |= mlx4_eq_int(dev, &priv->eq_table.eq[i]);
239
240 return IRQ_RETVAL(work);
241}
242
243static irqreturn_t mlx4_msi_x_interrupt(int irq, void *eq_ptr)
244{
245 struct mlx4_eq *eq = eq_ptr;
246 struct mlx4_dev *dev = eq->dev;
247
248 mlx4_eq_int(dev, eq);
249
250 /* MSI-X vectors always belong to us */
251 return IRQ_HANDLED;
252}
253
Roland Dreier225c7b12007-05-08 18:00:38 -0700254static int mlx4_MAP_EQ(struct mlx4_dev *dev, u64 event_mask, int unmap,
255 int eq_num)
256{
257 return mlx4_cmd(dev, event_mask, (unmap << 31) | eq_num,
Jack Morgensteinf9baff52011-12-13 04:10:51 +0000258 0, MLX4_CMD_MAP_EQ, MLX4_CMD_TIME_CLASS_B,
259 MLX4_CMD_WRAPPED);
Roland Dreier225c7b12007-05-08 18:00:38 -0700260}
261
262static int mlx4_SW2HW_EQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
263 int eq_num)
264{
265 return mlx4_cmd(dev, mailbox->dma, eq_num, 0, MLX4_CMD_SW2HW_EQ,
Jack Morgensteinf9baff52011-12-13 04:10:51 +0000266 MLX4_CMD_TIME_CLASS_A,
267 MLX4_CMD_WRAPPED);
Roland Dreier225c7b12007-05-08 18:00:38 -0700268}
269
270static int mlx4_HW2SW_EQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
271 int eq_num)
272{
273 return mlx4_cmd_box(dev, 0, mailbox->dma, eq_num, 0, MLX4_CMD_HW2SW_EQ,
Jack Morgensteinf9baff52011-12-13 04:10:51 +0000274 MLX4_CMD_TIME_CLASS_A,
275 MLX4_CMD_WRAPPED);
Roland Dreier225c7b12007-05-08 18:00:38 -0700276}
277
Yevgeny Petrilinb8dd7862008-12-22 07:15:03 -0800278static int mlx4_num_eq_uar(struct mlx4_dev *dev)
279{
280 /*
281 * Each UAR holds 4 EQ doorbells. To figure out how many UARs
282 * we need to map, take the difference of highest index and
283 * the lowest index we'll use and add 1.
284 */
Yevgeny Petrilin0b7ca5a2011-03-22 22:37:47 +0000285 return (dev->caps.num_comp_vectors + 1 + dev->caps.reserved_eqs +
286 dev->caps.comp_pool)/4 - dev->caps.reserved_eqs/4 + 1;
Yevgeny Petrilinb8dd7862008-12-22 07:15:03 -0800287}
288
Roland Dreier3d73c282007-10-10 15:43:54 -0700289static void __iomem *mlx4_get_eq_uar(struct mlx4_dev *dev, struct mlx4_eq *eq)
Roland Dreier225c7b12007-05-08 18:00:38 -0700290{
291 struct mlx4_priv *priv = mlx4_priv(dev);
292 int index;
293
294 index = eq->eqn / 4 - dev->caps.reserved_eqs / 4;
295
296 if (!priv->eq_table.uar_map[index]) {
297 priv->eq_table.uar_map[index] =
298 ioremap(pci_resource_start(dev->pdev, 2) +
299 ((eq->eqn / 4) << PAGE_SHIFT),
300 PAGE_SIZE);
301 if (!priv->eq_table.uar_map[index]) {
302 mlx4_err(dev, "Couldn't map EQ doorbell for EQN 0x%06x\n",
303 eq->eqn);
304 return NULL;
305 }
306 }
307
308 return priv->eq_table.uar_map[index] + 0x800 + 8 * (eq->eqn % 4);
309}
310
Roland Dreier3d73c282007-10-10 15:43:54 -0700311static int mlx4_create_eq(struct mlx4_dev *dev, int nent,
312 u8 intr, struct mlx4_eq *eq)
Roland Dreier225c7b12007-05-08 18:00:38 -0700313{
314 struct mlx4_priv *priv = mlx4_priv(dev);
315 struct mlx4_cmd_mailbox *mailbox;
316 struct mlx4_eq_context *eq_context;
317 int npages;
318 u64 *dma_list = NULL;
319 dma_addr_t t;
320 u64 mtt_addr;
321 int err = -ENOMEM;
322 int i;
323
324 eq->dev = dev;
325 eq->nent = roundup_pow_of_two(max(nent, 2));
326 npages = PAGE_ALIGN(eq->nent * MLX4_EQ_ENTRY_SIZE) / PAGE_SIZE;
327
328 eq->page_list = kmalloc(npages * sizeof *eq->page_list,
329 GFP_KERNEL);
330 if (!eq->page_list)
331 goto err_out;
332
333 for (i = 0; i < npages; ++i)
334 eq->page_list[i].buf = NULL;
335
336 dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL);
337 if (!dma_list)
338 goto err_out_free;
339
340 mailbox = mlx4_alloc_cmd_mailbox(dev);
341 if (IS_ERR(mailbox))
342 goto err_out_free;
343 eq_context = mailbox->buf;
344
345 for (i = 0; i < npages; ++i) {
346 eq->page_list[i].buf = dma_alloc_coherent(&dev->pdev->dev,
347 PAGE_SIZE, &t, GFP_KERNEL);
348 if (!eq->page_list[i].buf)
349 goto err_out_free_pages;
350
351 dma_list[i] = t;
352 eq->page_list[i].map = t;
353
354 memset(eq->page_list[i].buf, 0, PAGE_SIZE);
355 }
356
357 eq->eqn = mlx4_bitmap_alloc(&priv->eq_table.bitmap);
358 if (eq->eqn == -1)
359 goto err_out_free_pages;
360
361 eq->doorbell = mlx4_get_eq_uar(dev, eq);
362 if (!eq->doorbell) {
363 err = -ENOMEM;
364 goto err_out_free_eq;
365 }
366
367 err = mlx4_mtt_init(dev, npages, PAGE_SHIFT, &eq->mtt);
368 if (err)
369 goto err_out_free_eq;
370
371 err = mlx4_write_mtt(dev, &eq->mtt, 0, npages, dma_list);
372 if (err)
373 goto err_out_free_mtt;
374
375 memset(eq_context, 0, sizeof *eq_context);
376 eq_context->flags = cpu_to_be32(MLX4_EQ_STATUS_OK |
377 MLX4_EQ_STATE_ARMED);
378 eq_context->log_eq_size = ilog2(eq->nent);
379 eq_context->intr = intr;
380 eq_context->log_page_size = PAGE_SHIFT - MLX4_ICM_PAGE_SHIFT;
381
382 mtt_addr = mlx4_mtt_addr(dev, &eq->mtt);
383 eq_context->mtt_base_addr_h = mtt_addr >> 32;
384 eq_context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff);
385
386 err = mlx4_SW2HW_EQ(dev, mailbox, eq->eqn);
387 if (err) {
388 mlx4_warn(dev, "SW2HW_EQ failed (%d)\n", err);
389 goto err_out_free_mtt;
390 }
391
392 kfree(dma_list);
393 mlx4_free_cmd_mailbox(dev, mailbox);
394
395 eq->cons_index = 0;
396
397 return err;
398
399err_out_free_mtt:
400 mlx4_mtt_cleanup(dev, &eq->mtt);
401
402err_out_free_eq:
403 mlx4_bitmap_free(&priv->eq_table.bitmap, eq->eqn);
404
405err_out_free_pages:
406 for (i = 0; i < npages; ++i)
407 if (eq->page_list[i].buf)
408 dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
409 eq->page_list[i].buf,
410 eq->page_list[i].map);
411
412 mlx4_free_cmd_mailbox(dev, mailbox);
413
414err_out_free:
415 kfree(eq->page_list);
416 kfree(dma_list);
417
418err_out:
419 return err;
420}
421
422static void mlx4_free_eq(struct mlx4_dev *dev,
423 struct mlx4_eq *eq)
424{
425 struct mlx4_priv *priv = mlx4_priv(dev);
426 struct mlx4_cmd_mailbox *mailbox;
427 int err;
428 int npages = PAGE_ALIGN(MLX4_EQ_ENTRY_SIZE * eq->nent) / PAGE_SIZE;
429 int i;
430
431 mailbox = mlx4_alloc_cmd_mailbox(dev);
432 if (IS_ERR(mailbox))
433 return;
434
435 err = mlx4_HW2SW_EQ(dev, mailbox, eq->eqn);
436 if (err)
437 mlx4_warn(dev, "HW2SW_EQ failed (%d)\n", err);
438
439 if (0) {
440 mlx4_dbg(dev, "Dumping EQ context %02x:\n", eq->eqn);
441 for (i = 0; i < sizeof (struct mlx4_eq_context) / 4; ++i) {
442 if (i % 4 == 0)
Joe Perches0a645e82010-07-10 07:22:46 +0000443 pr_cont("[%02x] ", i * 4);
444 pr_cont(" %08x", be32_to_cpup(mailbox->buf + i * 4));
Roland Dreier225c7b12007-05-08 18:00:38 -0700445 if ((i + 1) % 4 == 0)
Joe Perches0a645e82010-07-10 07:22:46 +0000446 pr_cont("\n");
Roland Dreier225c7b12007-05-08 18:00:38 -0700447 }
448 }
449
450 mlx4_mtt_cleanup(dev, &eq->mtt);
451 for (i = 0; i < npages; ++i)
Dotan Baraka8dc0df2011-10-06 09:33:12 -0700452 dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
Roland Dreier225c7b12007-05-08 18:00:38 -0700453 eq->page_list[i].buf,
454 eq->page_list[i].map);
455
456 kfree(eq->page_list);
457 mlx4_bitmap_free(&priv->eq_table.bitmap, eq->eqn);
458 mlx4_free_cmd_mailbox(dev, mailbox);
459}
460
461static void mlx4_free_irqs(struct mlx4_dev *dev)
462{
463 struct mlx4_eq_table *eq_table = &mlx4_priv(dev)->eq_table;
Yevgeny Petrilin0b7ca5a2011-03-22 22:37:47 +0000464 struct mlx4_priv *priv = mlx4_priv(dev);
465 int i, vec;
Roland Dreier225c7b12007-05-08 18:00:38 -0700466
467 if (eq_table->have_irq)
468 free_irq(dev->pdev->irq, dev);
Yevgeny Petrilin0b7ca5a2011-03-22 22:37:47 +0000469
Yevgeny Petrilinb8dd7862008-12-22 07:15:03 -0800470 for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i)
Roland Dreierd1fdf242009-06-14 13:30:45 -0700471 if (eq_table->eq[i].have_irq) {
Roland Dreier225c7b12007-05-08 18:00:38 -0700472 free_irq(eq_table->eq[i].irq, eq_table->eq + i);
Roland Dreierd1fdf242009-06-14 13:30:45 -0700473 eq_table->eq[i].have_irq = 0;
474 }
Yevgeny Petrilinb8dd7862008-12-22 07:15:03 -0800475
Yevgeny Petrilin0b7ca5a2011-03-22 22:37:47 +0000476 for (i = 0; i < dev->caps.comp_pool; i++) {
477 /*
478 * Freeing the assigned irq's
479 * all bits should be 0, but we need to validate
480 */
481 if (priv->msix_ctl.pool_bm & 1ULL << i) {
482 /* NO need protecting*/
483 vec = dev->caps.num_comp_vectors + 1 + i;
484 free_irq(priv->eq_table.eq[vec].irq,
485 &priv->eq_table.eq[vec]);
486 }
487 }
488
489
Yevgeny Petrilinb8dd7862008-12-22 07:15:03 -0800490 kfree(eq_table->irq_names);
Roland Dreier225c7b12007-05-08 18:00:38 -0700491}
492
Roland Dreier3d73c282007-10-10 15:43:54 -0700493static int mlx4_map_clr_int(struct mlx4_dev *dev)
Roland Dreier225c7b12007-05-08 18:00:38 -0700494{
495 struct mlx4_priv *priv = mlx4_priv(dev);
496
497 priv->clr_base = ioremap(pci_resource_start(dev->pdev, priv->fw.clr_int_bar) +
498 priv->fw.clr_int_base, MLX4_CLR_INT_SIZE);
499 if (!priv->clr_base) {
500 mlx4_err(dev, "Couldn't map interrupt clear register, aborting.\n");
501 return -ENOMEM;
502 }
503
504 return 0;
505}
506
507static void mlx4_unmap_clr_int(struct mlx4_dev *dev)
508{
509 struct mlx4_priv *priv = mlx4_priv(dev);
510
511 iounmap(priv->clr_base);
512}
513
Yevgeny Petrilinb8dd7862008-12-22 07:15:03 -0800514int mlx4_alloc_eq_table(struct mlx4_dev *dev)
515{
516 struct mlx4_priv *priv = mlx4_priv(dev);
517
518 priv->eq_table.eq = kcalloc(dev->caps.num_eqs - dev->caps.reserved_eqs,
519 sizeof *priv->eq_table.eq, GFP_KERNEL);
520 if (!priv->eq_table.eq)
521 return -ENOMEM;
522
523 return 0;
524}
525
526void mlx4_free_eq_table(struct mlx4_dev *dev)
527{
528 kfree(mlx4_priv(dev)->eq_table.eq);
529}
530
Roland Dreier3d73c282007-10-10 15:43:54 -0700531int mlx4_init_eq_table(struct mlx4_dev *dev)
Roland Dreier225c7b12007-05-08 18:00:38 -0700532{
533 struct mlx4_priv *priv = mlx4_priv(dev);
534 int err;
535 int i;
536
Yevgeny Petrilinb8dd7862008-12-22 07:15:03 -0800537 priv->eq_table.uar_map = kcalloc(sizeof *priv->eq_table.uar_map,
538 mlx4_num_eq_uar(dev), GFP_KERNEL);
539 if (!priv->eq_table.uar_map) {
540 err = -ENOMEM;
541 goto err_out_free;
542 }
543
Roland Dreier225c7b12007-05-08 18:00:38 -0700544 err = mlx4_bitmap_init(&priv->eq_table.bitmap, dev->caps.num_eqs,
Yevgeny Petrilin93fc9e12008-10-22 10:25:29 -0700545 dev->caps.num_eqs - 1, dev->caps.reserved_eqs, 0);
Roland Dreier225c7b12007-05-08 18:00:38 -0700546 if (err)
Yevgeny Petrilinb8dd7862008-12-22 07:15:03 -0800547 goto err_out_free;
Roland Dreier225c7b12007-05-08 18:00:38 -0700548
Yevgeny Petrilinb8dd7862008-12-22 07:15:03 -0800549 for (i = 0; i < mlx4_num_eq_uar(dev); ++i)
Roland Dreier225c7b12007-05-08 18:00:38 -0700550 priv->eq_table.uar_map[i] = NULL;
551
552 err = mlx4_map_clr_int(dev);
553 if (err)
Yevgeny Petrilinb8dd7862008-12-22 07:15:03 -0800554 goto err_out_bitmap;
Roland Dreier225c7b12007-05-08 18:00:38 -0700555
556 priv->eq_table.clr_mask =
557 swab32(1 << (priv->eq_table.inta_pin & 31));
558 priv->eq_table.clr_int = priv->clr_base +
559 (priv->eq_table.inta_pin < 32 ? 4 : 0);
560
Arputham Benjaminf5f59512009-09-05 20:24:50 -0700561 priv->eq_table.irq_names =
Yevgeny Petrilin0b7ca5a2011-03-22 22:37:47 +0000562 kmalloc(MLX4_IRQNAME_SIZE * (dev->caps.num_comp_vectors + 1 +
563 dev->caps.comp_pool),
Arputham Benjaminf5f59512009-09-05 20:24:50 -0700564 GFP_KERNEL);
Yevgeny Petrilinb8dd7862008-12-22 07:15:03 -0800565 if (!priv->eq_table.irq_names) {
566 err = -ENOMEM;
567 goto err_out_bitmap;
568 }
569
570 for (i = 0; i < dev->caps.num_comp_vectors; ++i) {
Yevgeny Petrilinc3794742011-03-30 23:30:17 +0000571 err = mlx4_create_eq(dev, dev->caps.num_cqs -
572 dev->caps.reserved_cqs +
573 MLX4_NUM_SPARE_EQE,
Yevgeny Petrilinb8dd7862008-12-22 07:15:03 -0800574 (dev->flags & MLX4_FLAG_MSI_X) ? i : 0,
575 &priv->eq_table.eq[i]);
Yevgeny Petrilina5b19b62009-06-08 00:39:58 -0700576 if (err) {
577 --i;
Yevgeny Petrilinb8dd7862008-12-22 07:15:03 -0800578 goto err_out_unmap;
Yevgeny Petrilina5b19b62009-06-08 00:39:58 -0700579 }
Yevgeny Petrilinb8dd7862008-12-22 07:15:03 -0800580 }
Roland Dreier225c7b12007-05-08 18:00:38 -0700581
582 err = mlx4_create_eq(dev, MLX4_NUM_ASYNC_EQE + MLX4_NUM_SPARE_EQE,
Yevgeny Petrilinb8dd7862008-12-22 07:15:03 -0800583 (dev->flags & MLX4_FLAG_MSI_X) ? dev->caps.num_comp_vectors : 0,
584 &priv->eq_table.eq[dev->caps.num_comp_vectors]);
Roland Dreier225c7b12007-05-08 18:00:38 -0700585 if (err)
586 goto err_out_comp;
587
Yevgeny Petrilin0b7ca5a2011-03-22 22:37:47 +0000588 /*if additional completion vectors poolsize is 0 this loop will not run*/
589 for (i = dev->caps.num_comp_vectors + 1;
590 i < dev->caps.num_comp_vectors + dev->caps.comp_pool + 1; ++i) {
591
592 err = mlx4_create_eq(dev, dev->caps.num_cqs -
593 dev->caps.reserved_cqs +
594 MLX4_NUM_SPARE_EQE,
595 (dev->flags & MLX4_FLAG_MSI_X) ? i : 0,
596 &priv->eq_table.eq[i]);
597 if (err) {
598 --i;
599 goto err_out_unmap;
600 }
601 }
602
603
Roland Dreier225c7b12007-05-08 18:00:38 -0700604 if (dev->flags & MLX4_FLAG_MSI_X) {
Yevgeny Petrilinb8dd7862008-12-22 07:15:03 -0800605 const char *eq_name;
Roland Dreier225c7b12007-05-08 18:00:38 -0700606
Yevgeny Petrilinb8dd7862008-12-22 07:15:03 -0800607 for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) {
608 if (i < dev->caps.num_comp_vectors) {
Arputham Benjaminf5f59512009-09-05 20:24:50 -0700609 snprintf(priv->eq_table.irq_names +
610 i * MLX4_IRQNAME_SIZE,
611 MLX4_IRQNAME_SIZE,
612 "mlx4-comp-%d@pci:%s", i,
613 pci_name(dev->pdev));
614 } else {
615 snprintf(priv->eq_table.irq_names +
616 i * MLX4_IRQNAME_SIZE,
617 MLX4_IRQNAME_SIZE,
618 "mlx4-async@pci:%s",
619 pci_name(dev->pdev));
620 }
Yevgeny Petrilinb8dd7862008-12-22 07:15:03 -0800621
Arputham Benjaminf5f59512009-09-05 20:24:50 -0700622 eq_name = priv->eq_table.irq_names +
623 i * MLX4_IRQNAME_SIZE;
Roland Dreier225c7b12007-05-08 18:00:38 -0700624 err = request_irq(priv->eq_table.eq[i].irq,
Yevgeny Petrilinb8dd7862008-12-22 07:15:03 -0800625 mlx4_msi_x_interrupt, 0, eq_name,
626 priv->eq_table.eq + i);
Roland Dreier225c7b12007-05-08 18:00:38 -0700627 if (err)
Jack Morgensteinee49bd92007-07-12 17:50:45 +0300628 goto err_out_async;
Roland Dreier225c7b12007-05-08 18:00:38 -0700629
630 priv->eq_table.eq[i].have_irq = 1;
631 }
Roland Dreier225c7b12007-05-08 18:00:38 -0700632 } else {
Arputham Benjaminf5f59512009-09-05 20:24:50 -0700633 snprintf(priv->eq_table.irq_names,
634 MLX4_IRQNAME_SIZE,
635 DRV_NAME "@pci:%s",
636 pci_name(dev->pdev));
Roland Dreier225c7b12007-05-08 18:00:38 -0700637 err = request_irq(dev->pdev->irq, mlx4_interrupt,
Arputham Benjaminf5f59512009-09-05 20:24:50 -0700638 IRQF_SHARED, priv->eq_table.irq_names, dev);
Roland Dreier225c7b12007-05-08 18:00:38 -0700639 if (err)
640 goto err_out_async;
641
642 priv->eq_table.have_irq = 1;
643 }
644
645 err = mlx4_MAP_EQ(dev, MLX4_ASYNC_EVENT_MASK, 0,
Yevgeny Petrilinb8dd7862008-12-22 07:15:03 -0800646 priv->eq_table.eq[dev->caps.num_comp_vectors].eqn);
Roland Dreier225c7b12007-05-08 18:00:38 -0700647 if (err)
648 mlx4_warn(dev, "MAP_EQ for async EQ %d failed (%d)\n",
Yevgeny Petrilinb8dd7862008-12-22 07:15:03 -0800649 priv->eq_table.eq[dev->caps.num_comp_vectors].eqn, err);
Roland Dreier225c7b12007-05-08 18:00:38 -0700650
Yevgeny Petrilinb8dd7862008-12-22 07:15:03 -0800651 for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i)
Roland Dreier225c7b12007-05-08 18:00:38 -0700652 eq_set_ci(&priv->eq_table.eq[i], 1);
653
Roland Dreier225c7b12007-05-08 18:00:38 -0700654 return 0;
655
Roland Dreier225c7b12007-05-08 18:00:38 -0700656err_out_async:
Yevgeny Petrilinb8dd7862008-12-22 07:15:03 -0800657 mlx4_free_eq(dev, &priv->eq_table.eq[dev->caps.num_comp_vectors]);
Roland Dreier225c7b12007-05-08 18:00:38 -0700658
659err_out_comp:
Yevgeny Petrilinb8dd7862008-12-22 07:15:03 -0800660 i = dev->caps.num_comp_vectors - 1;
Roland Dreier225c7b12007-05-08 18:00:38 -0700661
662err_out_unmap:
Yevgeny Petrilinb8dd7862008-12-22 07:15:03 -0800663 while (i >= 0) {
664 mlx4_free_eq(dev, &priv->eq_table.eq[i]);
665 --i;
666 }
Roland Dreier225c7b12007-05-08 18:00:38 -0700667 mlx4_unmap_clr_int(dev);
668 mlx4_free_irqs(dev);
669
Yevgeny Petrilinb8dd7862008-12-22 07:15:03 -0800670err_out_bitmap:
Roland Dreier225c7b12007-05-08 18:00:38 -0700671 mlx4_bitmap_cleanup(&priv->eq_table.bitmap);
Yevgeny Petrilinb8dd7862008-12-22 07:15:03 -0800672
673err_out_free:
674 kfree(priv->eq_table.uar_map);
675
Roland Dreier225c7b12007-05-08 18:00:38 -0700676 return err;
677}
678
679void mlx4_cleanup_eq_table(struct mlx4_dev *dev)
680{
681 struct mlx4_priv *priv = mlx4_priv(dev);
682 int i;
683
Roland Dreier225c7b12007-05-08 18:00:38 -0700684 mlx4_MAP_EQ(dev, MLX4_ASYNC_EVENT_MASK, 1,
Yevgeny Petrilinb8dd7862008-12-22 07:15:03 -0800685 priv->eq_table.eq[dev->caps.num_comp_vectors].eqn);
Roland Dreier225c7b12007-05-08 18:00:38 -0700686
687 mlx4_free_irqs(dev);
688
Yevgeny Petrilin0b7ca5a2011-03-22 22:37:47 +0000689 for (i = 0; i < dev->caps.num_comp_vectors + dev->caps.comp_pool + 1; ++i)
Roland Dreier225c7b12007-05-08 18:00:38 -0700690 mlx4_free_eq(dev, &priv->eq_table.eq[i]);
Roland Dreier225c7b12007-05-08 18:00:38 -0700691
692 mlx4_unmap_clr_int(dev);
693
Yevgeny Petrilinb8dd7862008-12-22 07:15:03 -0800694 for (i = 0; i < mlx4_num_eq_uar(dev); ++i)
Roland Dreier225c7b12007-05-08 18:00:38 -0700695 if (priv->eq_table.uar_map[i])
696 iounmap(priv->eq_table.uar_map[i]);
697
698 mlx4_bitmap_cleanup(&priv->eq_table.bitmap);
Yevgeny Petrilinb8dd7862008-12-22 07:15:03 -0800699
700 kfree(priv->eq_table.uar_map);
Roland Dreier225c7b12007-05-08 18:00:38 -0700701}
Yevgeny Petriline7c1c2c42010-08-24 03:46:18 +0000702
703/* A test that verifies that we can accept interrupts on all
704 * the irq vectors of the device.
705 * Interrupts are checked using the NOP command.
706 */
707int mlx4_test_interrupts(struct mlx4_dev *dev)
708{
709 struct mlx4_priv *priv = mlx4_priv(dev);
710 int i;
711 int err;
712
713 err = mlx4_NOP(dev);
714 /* When not in MSI_X, there is only one irq to check */
715 if (!(dev->flags & MLX4_FLAG_MSI_X))
716 return err;
717
718 /* A loop over all completion vectors, for each vector we will check
719 * whether it works by mapping command completions to that vector
720 * and performing a NOP command
721 */
722 for(i = 0; !err && (i < dev->caps.num_comp_vectors); ++i) {
723 /* Temporary use polling for command completions */
724 mlx4_cmd_use_polling(dev);
725
726 /* Map the new eq to handle all asyncronous events */
727 err = mlx4_MAP_EQ(dev, MLX4_ASYNC_EVENT_MASK, 0,
728 priv->eq_table.eq[i].eqn);
729 if (err) {
730 mlx4_warn(dev, "Failed mapping eq for interrupt test\n");
731 mlx4_cmd_use_events(dev);
732 break;
733 }
734
735 /* Go back to using events */
736 mlx4_cmd_use_events(dev);
737 err = mlx4_NOP(dev);
738 }
739
740 /* Return to default */
741 mlx4_MAP_EQ(dev, MLX4_ASYNC_EVENT_MASK, 0,
742 priv->eq_table.eq[dev->caps.num_comp_vectors].eqn);
743 return err;
744}
745EXPORT_SYMBOL(mlx4_test_interrupts);
Yevgeny Petrilin0b7ca5a2011-03-22 22:37:47 +0000746
747int mlx4_assign_eq(struct mlx4_dev *dev, char* name, int * vector)
748{
749
750 struct mlx4_priv *priv = mlx4_priv(dev);
751 int vec = 0, err = 0, i;
752
753 spin_lock(&priv->msix_ctl.pool_lock);
754 for (i = 0; !vec && i < dev->caps.comp_pool; i++) {
755 if (~priv->msix_ctl.pool_bm & 1ULL << i) {
756 priv->msix_ctl.pool_bm |= 1ULL << i;
757 vec = dev->caps.num_comp_vectors + 1 + i;
758 snprintf(priv->eq_table.irq_names +
759 vec * MLX4_IRQNAME_SIZE,
760 MLX4_IRQNAME_SIZE, "%s", name);
761 err = request_irq(priv->eq_table.eq[vec].irq,
762 mlx4_msi_x_interrupt, 0,
763 &priv->eq_table.irq_names[vec<<5],
764 priv->eq_table.eq + vec);
765 if (err) {
766 /*zero out bit by fliping it*/
767 priv->msix_ctl.pool_bm ^= 1 << i;
768 vec = 0;
769 continue;
770 /*we dont want to break here*/
771 }
772 eq_set_ci(&priv->eq_table.eq[vec], 1);
773 }
774 }
775 spin_unlock(&priv->msix_ctl.pool_lock);
776
777 if (vec) {
778 *vector = vec;
779 } else {
780 *vector = 0;
781 err = (i == dev->caps.comp_pool) ? -ENOSPC : err;
782 }
783 return err;
784}
785EXPORT_SYMBOL(mlx4_assign_eq);
786
787void mlx4_release_eq(struct mlx4_dev *dev, int vec)
788{
789 struct mlx4_priv *priv = mlx4_priv(dev);
790 /*bm index*/
791 int i = vec - dev->caps.num_comp_vectors - 1;
792
793 if (likely(i >= 0)) {
794 /*sanity check , making sure were not trying to free irq's
795 Belonging to a legacy EQ*/
796 spin_lock(&priv->msix_ctl.pool_lock);
797 if (priv->msix_ctl.pool_bm & 1ULL << i) {
798 free_irq(priv->eq_table.eq[vec].irq,
799 &priv->eq_table.eq[vec]);
800 priv->msix_ctl.pool_bm &= ~(1ULL << i);
801 }
802 spin_unlock(&priv->msix_ctl.pool_lock);
803 }
804
805}
806EXPORT_SYMBOL(mlx4_release_eq);
807