blob: cbcf2b4722e4598e7f520add89dbb6868329fde7 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 * $Id: mthca_eq.c 1382 2004-12-24 02:21:02Z roland $
33 */
34
35#include <linux/init.h>
36#include <linux/errno.h>
37#include <linux/interrupt.h>
38#include <linux/pci.h>
39
40#include "mthca_dev.h"
41#include "mthca_cmd.h"
42#include "mthca_config_reg.h"
43
44enum {
45 MTHCA_NUM_ASYNC_EQE = 0x80,
46 MTHCA_NUM_CMD_EQE = 0x80,
47 MTHCA_EQ_ENTRY_SIZE = 0x20
48};
49
50/*
51 * Must be packed because start is 64 bits but only aligned to 32 bits.
52 */
53struct mthca_eq_context {
54 u32 flags;
55 u64 start;
56 u32 logsize_usrpage;
57 u32 tavor_pd; /* reserved for Arbel */
58 u8 reserved1[3];
59 u8 intr;
60 u32 arbel_pd; /* lost_count for Tavor */
61 u32 lkey;
62 u32 reserved2[2];
63 u32 consumer_index;
64 u32 producer_index;
65 u32 reserved3[4];
66} __attribute__((packed));
67
68#define MTHCA_EQ_STATUS_OK ( 0 << 28)
69#define MTHCA_EQ_STATUS_OVERFLOW ( 9 << 28)
70#define MTHCA_EQ_STATUS_WRITE_FAIL (10 << 28)
71#define MTHCA_EQ_OWNER_SW ( 0 << 24)
72#define MTHCA_EQ_OWNER_HW ( 1 << 24)
73#define MTHCA_EQ_FLAG_TR ( 1 << 18)
74#define MTHCA_EQ_FLAG_OI ( 1 << 17)
75#define MTHCA_EQ_STATE_ARMED ( 1 << 8)
76#define MTHCA_EQ_STATE_FIRED ( 2 << 8)
77#define MTHCA_EQ_STATE_ALWAYS_ARMED ( 3 << 8)
78#define MTHCA_EQ_STATE_ARBEL ( 8 << 8)
79
80enum {
81 MTHCA_EVENT_TYPE_COMP = 0x00,
82 MTHCA_EVENT_TYPE_PATH_MIG = 0x01,
83 MTHCA_EVENT_TYPE_COMM_EST = 0x02,
84 MTHCA_EVENT_TYPE_SQ_DRAINED = 0x03,
85 MTHCA_EVENT_TYPE_SRQ_LAST_WQE = 0x13,
86 MTHCA_EVENT_TYPE_CQ_ERROR = 0x04,
87 MTHCA_EVENT_TYPE_WQ_CATAS_ERROR = 0x05,
88 MTHCA_EVENT_TYPE_EEC_CATAS_ERROR = 0x06,
89 MTHCA_EVENT_TYPE_PATH_MIG_FAILED = 0x07,
90 MTHCA_EVENT_TYPE_WQ_INVAL_REQ_ERROR = 0x10,
91 MTHCA_EVENT_TYPE_WQ_ACCESS_ERROR = 0x11,
92 MTHCA_EVENT_TYPE_SRQ_CATAS_ERROR = 0x12,
93 MTHCA_EVENT_TYPE_LOCAL_CATAS_ERROR = 0x08,
94 MTHCA_EVENT_TYPE_PORT_CHANGE = 0x09,
95 MTHCA_EVENT_TYPE_EQ_OVERFLOW = 0x0f,
96 MTHCA_EVENT_TYPE_ECC_DETECT = 0x0e,
97 MTHCA_EVENT_TYPE_CMD = 0x0a
98};
99
100#define MTHCA_ASYNC_EVENT_MASK ((1ULL << MTHCA_EVENT_TYPE_PATH_MIG) | \
101 (1ULL << MTHCA_EVENT_TYPE_COMM_EST) | \
102 (1ULL << MTHCA_EVENT_TYPE_SQ_DRAINED) | \
103 (1ULL << MTHCA_EVENT_TYPE_CQ_ERROR) | \
104 (1ULL << MTHCA_EVENT_TYPE_WQ_CATAS_ERROR) | \
105 (1ULL << MTHCA_EVENT_TYPE_EEC_CATAS_ERROR) | \
106 (1ULL << MTHCA_EVENT_TYPE_PATH_MIG_FAILED) | \
107 (1ULL << MTHCA_EVENT_TYPE_WQ_INVAL_REQ_ERROR) | \
108 (1ULL << MTHCA_EVENT_TYPE_WQ_ACCESS_ERROR) | \
109 (1ULL << MTHCA_EVENT_TYPE_LOCAL_CATAS_ERROR) | \
110 (1ULL << MTHCA_EVENT_TYPE_PORT_CHANGE) | \
111 (1ULL << MTHCA_EVENT_TYPE_ECC_DETECT))
112#define MTHCA_SRQ_EVENT_MASK (1ULL << MTHCA_EVENT_TYPE_SRQ_CATAS_ERROR) | \
113 (1ULL << MTHCA_EVENT_TYPE_SRQ_LAST_WQE)
114#define MTHCA_CMD_EVENT_MASK (1ULL << MTHCA_EVENT_TYPE_CMD)
115
116#define MTHCA_EQ_DB_INC_CI (1 << 24)
117#define MTHCA_EQ_DB_REQ_NOT (2 << 24)
118#define MTHCA_EQ_DB_DISARM_CQ (3 << 24)
119#define MTHCA_EQ_DB_SET_CI (4 << 24)
120#define MTHCA_EQ_DB_ALWAYS_ARM (5 << 24)
121
122struct mthca_eqe {
123 u8 reserved1;
124 u8 type;
125 u8 reserved2;
126 u8 subtype;
127 union {
128 u32 raw[6];
129 struct {
130 u32 cqn;
131 } __attribute__((packed)) comp;
132 struct {
133 u16 reserved1;
134 u16 token;
135 u32 reserved2;
136 u8 reserved3[3];
137 u8 status;
138 u64 out_param;
139 } __attribute__((packed)) cmd;
140 struct {
141 u32 qpn;
142 } __attribute__((packed)) qp;
143 struct {
144 u32 cqn;
145 u32 reserved1;
146 u8 reserved2[3];
147 u8 syndrome;
148 } __attribute__((packed)) cq_err;
149 struct {
150 u32 reserved1[2];
151 u32 port;
152 } __attribute__((packed)) port_change;
153 } event;
154 u8 reserved3[3];
155 u8 owner;
156} __attribute__((packed));
157
158#define MTHCA_EQ_ENTRY_OWNER_SW (0 << 7)
159#define MTHCA_EQ_ENTRY_OWNER_HW (1 << 7)
160
161static inline u64 async_mask(struct mthca_dev *dev)
162{
163 return dev->mthca_flags & MTHCA_FLAG_SRQ ?
164 MTHCA_ASYNC_EVENT_MASK | MTHCA_SRQ_EVENT_MASK :
165 MTHCA_ASYNC_EVENT_MASK;
166}
167
168static inline void tavor_set_eq_ci(struct mthca_dev *dev, struct mthca_eq *eq, u32 ci)
169{
170 u32 doorbell[2];
171
172 doorbell[0] = cpu_to_be32(MTHCA_EQ_DB_SET_CI | eq->eqn);
173 doorbell[1] = cpu_to_be32(ci & (eq->nent - 1));
174
175 /*
176 * This barrier makes sure that all updates to ownership bits
177 * done by set_eqe_hw() hit memory before the consumer index
178 * is updated. set_eq_ci() allows the HCA to possibly write
179 * more EQ entries, and we want to avoid the exceedingly
180 * unlikely possibility of the HCA writing an entry and then
181 * having set_eqe_hw() overwrite the owner field.
182 */
183 wmb();
184 mthca_write64(doorbell,
185 dev->kar + MTHCA_EQ_DOORBELL,
186 MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
187}
188
189static inline void arbel_set_eq_ci(struct mthca_dev *dev, struct mthca_eq *eq, u32 ci)
190{
191 /* See comment in tavor_set_eq_ci() above. */
192 wmb();
193 __raw_writel(cpu_to_be32(ci), dev->eq_regs.arbel.eq_set_ci_base +
194 eq->eqn * 8);
195 /* We still want ordering, just not swabbing, so add a barrier */
196 mb();
197}
198
199static inline void set_eq_ci(struct mthca_dev *dev, struct mthca_eq *eq, u32 ci)
200{
Roland Dreierd10ddbf2005-04-16 15:26:32 -0700201 if (mthca_is_memfree(dev))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202 arbel_set_eq_ci(dev, eq, ci);
203 else
204 tavor_set_eq_ci(dev, eq, ci);
205}
206
207static inline void tavor_eq_req_not(struct mthca_dev *dev, int eqn)
208{
209 u32 doorbell[2];
210
211 doorbell[0] = cpu_to_be32(MTHCA_EQ_DB_REQ_NOT | eqn);
212 doorbell[1] = 0;
213
214 mthca_write64(doorbell,
215 dev->kar + MTHCA_EQ_DOORBELL,
216 MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
217}
218
219static inline void arbel_eq_req_not(struct mthca_dev *dev, u32 eqn_mask)
220{
221 writel(eqn_mask, dev->eq_regs.arbel.eq_arm);
222}
223
224static inline void disarm_cq(struct mthca_dev *dev, int eqn, int cqn)
225{
Roland Dreierd10ddbf2005-04-16 15:26:32 -0700226 if (!mthca_is_memfree(dev)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227 u32 doorbell[2];
228
229 doorbell[0] = cpu_to_be32(MTHCA_EQ_DB_DISARM_CQ | eqn);
230 doorbell[1] = cpu_to_be32(cqn);
231
232 mthca_write64(doorbell,
233 dev->kar + MTHCA_EQ_DOORBELL,
234 MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
235 }
236}
237
238static inline struct mthca_eqe *get_eqe(struct mthca_eq *eq, u32 entry)
239{
240 unsigned long off = (entry & (eq->nent - 1)) * MTHCA_EQ_ENTRY_SIZE;
241 return eq->page_list[off / PAGE_SIZE].buf + off % PAGE_SIZE;
242}
243
244static inline struct mthca_eqe* next_eqe_sw(struct mthca_eq *eq)
245{
246 struct mthca_eqe* eqe;
247 eqe = get_eqe(eq, eq->cons_index);
248 return (MTHCA_EQ_ENTRY_OWNER_HW & eqe->owner) ? NULL : eqe;
249}
250
251static inline void set_eqe_hw(struct mthca_eqe *eqe)
252{
253 eqe->owner = MTHCA_EQ_ENTRY_OWNER_HW;
254}
255
256static void port_change(struct mthca_dev *dev, int port, int active)
257{
258 struct ib_event record;
259
260 mthca_dbg(dev, "Port change to %s for port %d\n",
261 active ? "active" : "down", port);
262
263 record.device = &dev->ib_dev;
264 record.event = active ? IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;
265 record.element.port_num = port;
266
267 ib_dispatch_event(&record);
268}
269
270static int mthca_eq_int(struct mthca_dev *dev, struct mthca_eq *eq)
271{
272 struct mthca_eqe *eqe;
273 int disarm_cqn;
274 int eqes_found = 0;
275
276 while ((eqe = next_eqe_sw(eq))) {
277 int set_ci = 0;
278
279 /*
280 * Make sure we read EQ entry contents after we've
281 * checked the ownership bit.
282 */
283 rmb();
284
285 switch (eqe->type) {
286 case MTHCA_EVENT_TYPE_COMP:
287 disarm_cqn = be32_to_cpu(eqe->event.comp.cqn) & 0xffffff;
288 disarm_cq(dev, eq->eqn, disarm_cqn);
289 mthca_cq_event(dev, disarm_cqn);
290 break;
291
292 case MTHCA_EVENT_TYPE_PATH_MIG:
293 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,
294 IB_EVENT_PATH_MIG);
295 break;
296
297 case MTHCA_EVENT_TYPE_COMM_EST:
298 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,
299 IB_EVENT_COMM_EST);
300 break;
301
302 case MTHCA_EVENT_TYPE_SQ_DRAINED:
303 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,
304 IB_EVENT_SQ_DRAINED);
305 break;
306
307 case MTHCA_EVENT_TYPE_WQ_CATAS_ERROR:
308 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,
309 IB_EVENT_QP_FATAL);
310 break;
311
312 case MTHCA_EVENT_TYPE_PATH_MIG_FAILED:
313 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,
314 IB_EVENT_PATH_MIG_ERR);
315 break;
316
317 case MTHCA_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
318 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,
319 IB_EVENT_QP_REQ_ERR);
320 break;
321
322 case MTHCA_EVENT_TYPE_WQ_ACCESS_ERROR:
323 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,
324 IB_EVENT_QP_ACCESS_ERR);
325 break;
326
327 case MTHCA_EVENT_TYPE_CMD:
328 mthca_cmd_event(dev,
329 be16_to_cpu(eqe->event.cmd.token),
330 eqe->event.cmd.status,
331 be64_to_cpu(eqe->event.cmd.out_param));
332 /*
333 * cmd_event() may add more commands.
334 * The card will think the queue has overflowed if
335 * we don't tell it we've been processing events.
336 */
337 set_ci = 1;
338 break;
339
340 case MTHCA_EVENT_TYPE_PORT_CHANGE:
341 port_change(dev,
342 (be32_to_cpu(eqe->event.port_change.port) >> 28) & 3,
343 eqe->subtype == 0x4);
344 break;
345
346 case MTHCA_EVENT_TYPE_CQ_ERROR:
Roland Dreierb87dcfb2005-04-16 15:26:22 -0700347 mthca_warn(dev, "CQ %s on CQN %06x\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -0700348 eqe->event.cq_err.syndrome == 1 ?
349 "overrun" : "access violation",
Roland Dreierb87dcfb2005-04-16 15:26:22 -0700350 be32_to_cpu(eqe->event.cq_err.cqn) & 0xffffff);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700351 break;
352
353 case MTHCA_EVENT_TYPE_EQ_OVERFLOW:
354 mthca_warn(dev, "EQ overrun on EQN %d\n", eq->eqn);
355 break;
356
357 case MTHCA_EVENT_TYPE_EEC_CATAS_ERROR:
358 case MTHCA_EVENT_TYPE_SRQ_CATAS_ERROR:
359 case MTHCA_EVENT_TYPE_LOCAL_CATAS_ERROR:
360 case MTHCA_EVENT_TYPE_ECC_DETECT:
361 default:
362 mthca_warn(dev, "Unhandled event %02x(%02x) on EQ %d\n",
363 eqe->type, eqe->subtype, eq->eqn);
364 break;
365 };
366
367 set_eqe_hw(eqe);
368 ++eq->cons_index;
369 eqes_found = 1;
370
371 if (unlikely(set_ci)) {
372 /*
373 * Conditional on hca_type is OK here because
374 * this is a rare case, not the fast path.
375 */
376 set_eq_ci(dev, eq, eq->cons_index);
377 set_ci = 0;
378 }
379 }
380
381 /*
382 * Rely on caller to set consumer index so that we don't have
383 * to test hca_type in our interrupt handling fast path.
384 */
385 return eqes_found;
386}
387
388static irqreturn_t mthca_tavor_interrupt(int irq, void *dev_ptr, struct pt_regs *regs)
389{
390 struct mthca_dev *dev = dev_ptr;
391 u32 ecr;
392 int i;
393
394 if (dev->eq_table.clr_mask)
395 writel(dev->eq_table.clr_mask, dev->eq_table.clr_int);
396
397 ecr = readl(dev->eq_regs.tavor.ecr_base + 4);
398 if (ecr) {
399 writel(ecr, dev->eq_regs.tavor.ecr_base +
400 MTHCA_ECR_CLR_BASE - MTHCA_ECR_BASE + 4);
401
402 for (i = 0; i < MTHCA_NUM_EQ; ++i)
403 if (ecr & dev->eq_table.eq[i].eqn_mask &&
404 mthca_eq_int(dev, &dev->eq_table.eq[i])) {
405 tavor_set_eq_ci(dev, &dev->eq_table.eq[i],
406 dev->eq_table.eq[i].cons_index);
407 tavor_eq_req_not(dev, dev->eq_table.eq[i].eqn);
408 }
409 }
410
411 return IRQ_RETVAL(ecr);
412}
413
414static irqreturn_t mthca_tavor_msi_x_interrupt(int irq, void *eq_ptr,
415 struct pt_regs *regs)
416{
417 struct mthca_eq *eq = eq_ptr;
418 struct mthca_dev *dev = eq->dev;
419
420 mthca_eq_int(dev, eq);
421 tavor_set_eq_ci(dev, eq, eq->cons_index);
422 tavor_eq_req_not(dev, eq->eqn);
423
424 /* MSI-X vectors always belong to us */
425 return IRQ_HANDLED;
426}
427
428static irqreturn_t mthca_arbel_interrupt(int irq, void *dev_ptr, struct pt_regs *regs)
429{
430 struct mthca_dev *dev = dev_ptr;
431 int work = 0;
432 int i;
433
434 if (dev->eq_table.clr_mask)
435 writel(dev->eq_table.clr_mask, dev->eq_table.clr_int);
436
437 for (i = 0; i < MTHCA_NUM_EQ; ++i)
438 if (mthca_eq_int(dev, &dev->eq_table.eq[i])) {
439 work = 1;
440 arbel_set_eq_ci(dev, &dev->eq_table.eq[i],
441 dev->eq_table.eq[i].cons_index);
442 }
443
444 arbel_eq_req_not(dev, dev->eq_table.arm_mask);
445
446 return IRQ_RETVAL(work);
447}
448
449static irqreturn_t mthca_arbel_msi_x_interrupt(int irq, void *eq_ptr,
450 struct pt_regs *regs)
451{
452 struct mthca_eq *eq = eq_ptr;
453 struct mthca_dev *dev = eq->dev;
454
455 mthca_eq_int(dev, eq);
456 arbel_set_eq_ci(dev, eq, eq->cons_index);
457 arbel_eq_req_not(dev, eq->eqn_mask);
458
459 /* MSI-X vectors always belong to us */
460 return IRQ_HANDLED;
461}
462
463static int __devinit mthca_create_eq(struct mthca_dev *dev,
464 int nent,
465 u8 intr,
466 struct mthca_eq *eq)
467{
468 int npages = (nent * MTHCA_EQ_ENTRY_SIZE + PAGE_SIZE - 1) /
469 PAGE_SIZE;
470 u64 *dma_list = NULL;
471 dma_addr_t t;
Roland Dreiered878452005-06-27 14:36:45 -0700472 struct mthca_mailbox *mailbox;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700473 struct mthca_eq_context *eq_context;
474 int err = -ENOMEM;
475 int i;
476 u8 status;
477
478 /* Make sure EQ size is aligned to a power of 2 size. */
479 for (i = 1; i < nent; i <<= 1)
480 ; /* nothing */
481 nent = i;
482
483 eq->dev = dev;
484
485 eq->page_list = kmalloc(npages * sizeof *eq->page_list,
486 GFP_KERNEL);
487 if (!eq->page_list)
488 goto err_out;
489
490 for (i = 0; i < npages; ++i)
491 eq->page_list[i].buf = NULL;
492
493 dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL);
494 if (!dma_list)
495 goto err_out_free;
496
Roland Dreiered878452005-06-27 14:36:45 -0700497 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
498 if (IS_ERR(mailbox))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700499 goto err_out_free;
Roland Dreiered878452005-06-27 14:36:45 -0700500 eq_context = mailbox->buf;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700501
502 for (i = 0; i < npages; ++i) {
Roland Dreier64dc81f2005-06-27 14:36:40 -0700503 eq->page_list[i].buf = dma_alloc_coherent(&dev->pdev->dev,
504 PAGE_SIZE, &t, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700505 if (!eq->page_list[i].buf)
Roland Dreiered878452005-06-27 14:36:45 -0700506 goto err_out_free_pages;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700507
508 dma_list[i] = t;
509 pci_unmap_addr_set(&eq->page_list[i], mapping, t);
510
511 memset(eq->page_list[i].buf, 0, PAGE_SIZE);
512 }
513
514 for (i = 0; i < nent; ++i)
515 set_eqe_hw(get_eqe(eq, i));
516
517 eq->eqn = mthca_alloc(&dev->eq_table.alloc);
518 if (eq->eqn == -1)
Roland Dreiered878452005-06-27 14:36:45 -0700519 goto err_out_free_pages;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700520
521 err = mthca_mr_alloc_phys(dev, dev->driver_pd.pd_num,
522 dma_list, PAGE_SHIFT, npages,
523 0, npages * PAGE_SIZE,
524 MTHCA_MPT_FLAG_LOCAL_WRITE |
525 MTHCA_MPT_FLAG_LOCAL_READ,
526 &eq->mr);
527 if (err)
528 goto err_out_free_eq;
529
530 eq->nent = nent;
531
532 memset(eq_context, 0, sizeof *eq_context);
533 eq_context->flags = cpu_to_be32(MTHCA_EQ_STATUS_OK |
534 MTHCA_EQ_OWNER_HW |
535 MTHCA_EQ_STATE_ARMED |
536 MTHCA_EQ_FLAG_TR);
Roland Dreierd10ddbf2005-04-16 15:26:32 -0700537 if (mthca_is_memfree(dev))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700538 eq_context->flags |= cpu_to_be32(MTHCA_EQ_STATE_ARBEL);
539
540 eq_context->logsize_usrpage = cpu_to_be32((ffs(nent) - 1) << 24);
Roland Dreierd10ddbf2005-04-16 15:26:32 -0700541 if (mthca_is_memfree(dev)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700542 eq_context->arbel_pd = cpu_to_be32(dev->driver_pd.pd_num);
543 } else {
544 eq_context->logsize_usrpage |= cpu_to_be32(dev->driver_uar.index);
545 eq_context->tavor_pd = cpu_to_be32(dev->driver_pd.pd_num);
546 }
547 eq_context->intr = intr;
548 eq_context->lkey = cpu_to_be32(eq->mr.ibmr.lkey);
549
Roland Dreiered878452005-06-27 14:36:45 -0700550 err = mthca_SW2HW_EQ(dev, mailbox, eq->eqn, &status);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700551 if (err) {
552 mthca_warn(dev, "SW2HW_EQ failed (%d)\n", err);
553 goto err_out_free_mr;
554 }
555 if (status) {
556 mthca_warn(dev, "SW2HW_EQ returned status 0x%02x\n",
557 status);
558 err = -EINVAL;
559 goto err_out_free_mr;
560 }
561
562 kfree(dma_list);
Roland Dreiered878452005-06-27 14:36:45 -0700563 mthca_free_mailbox(dev, mailbox);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700564
565 eq->eqn_mask = swab32(1 << eq->eqn);
566 eq->cons_index = 0;
567
568 dev->eq_table.arm_mask |= eq->eqn_mask;
569
570 mthca_dbg(dev, "Allocated EQ %d with %d entries\n",
571 eq->eqn, nent);
572
573 return err;
574
575 err_out_free_mr:
576 mthca_free_mr(dev, &eq->mr);
577
578 err_out_free_eq:
579 mthca_free(&dev->eq_table.alloc, eq->eqn);
580
Roland Dreiered878452005-06-27 14:36:45 -0700581 err_out_free_pages:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700582 for (i = 0; i < npages; ++i)
583 if (eq->page_list[i].buf)
Roland Dreier64dc81f2005-06-27 14:36:40 -0700584 dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
585 eq->page_list[i].buf,
586 pci_unmap_addr(&eq->page_list[i],
587 mapping));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700588
Roland Dreiered878452005-06-27 14:36:45 -0700589 mthca_free_mailbox(dev, mailbox);
590
591 err_out_free:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700592 kfree(eq->page_list);
593 kfree(dma_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700594
595 err_out:
596 return err;
597}
598
599static void mthca_free_eq(struct mthca_dev *dev,
600 struct mthca_eq *eq)
601{
Roland Dreiered878452005-06-27 14:36:45 -0700602 struct mthca_mailbox *mailbox;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700603 int err;
604 u8 status;
605 int npages = (eq->nent * MTHCA_EQ_ENTRY_SIZE + PAGE_SIZE - 1) /
606 PAGE_SIZE;
607 int i;
608
Roland Dreiered878452005-06-27 14:36:45 -0700609 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
610 if (IS_ERR(mailbox))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700611 return;
612
Roland Dreiered878452005-06-27 14:36:45 -0700613 err = mthca_HW2SW_EQ(dev, mailbox, eq->eqn, &status);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700614 if (err)
615 mthca_warn(dev, "HW2SW_EQ failed (%d)\n", err);
616 if (status)
Bernhard Fischer177214a2005-06-27 14:36:39 -0700617 mthca_warn(dev, "HW2SW_EQ returned status 0x%02x\n", status);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700618
619 dev->eq_table.arm_mask &= ~eq->eqn_mask;
620
621 if (0) {
622 mthca_dbg(dev, "Dumping EQ context %02x:\n", eq->eqn);
623 for (i = 0; i < sizeof (struct mthca_eq_context) / 4; ++i) {
624 if (i % 4 == 0)
625 printk("[%02x] ", i * 4);
Roland Dreiered878452005-06-27 14:36:45 -0700626 printk(" %08x", be32_to_cpup(mailbox->buf + i * 4));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700627 if ((i + 1) % 4 == 0)
628 printk("\n");
629 }
630 }
631
632 mthca_free_mr(dev, &eq->mr);
633 for (i = 0; i < npages; ++i)
634 pci_free_consistent(dev->pdev, PAGE_SIZE,
635 eq->page_list[i].buf,
636 pci_unmap_addr(&eq->page_list[i], mapping));
637
638 kfree(eq->page_list);
Roland Dreiered878452005-06-27 14:36:45 -0700639 mthca_free_mailbox(dev, mailbox);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700640}
641
642static void mthca_free_irqs(struct mthca_dev *dev)
643{
644 int i;
645
646 if (dev->eq_table.have_irq)
647 free_irq(dev->pdev->irq, dev);
648 for (i = 0; i < MTHCA_NUM_EQ; ++i)
649 if (dev->eq_table.eq[i].have_irq)
650 free_irq(dev->eq_table.eq[i].msi_x_vector,
651 dev->eq_table.eq + i);
652}
653
654static int __devinit mthca_map_reg(struct mthca_dev *dev,
655 unsigned long offset, unsigned long size,
656 void __iomem **map)
657{
658 unsigned long base = pci_resource_start(dev->pdev, 0);
659
660 if (!request_mem_region(base + offset, size, DRV_NAME))
661 return -EBUSY;
662
663 *map = ioremap(base + offset, size);
664 if (!*map) {
665 release_mem_region(base + offset, size);
666 return -ENOMEM;
667 }
668
669 return 0;
670}
671
672static void mthca_unmap_reg(struct mthca_dev *dev, unsigned long offset,
673 unsigned long size, void __iomem *map)
674{
675 unsigned long base = pci_resource_start(dev->pdev, 0);
676
677 release_mem_region(base + offset, size);
678 iounmap(map);
679}
680
681static int __devinit mthca_map_eq_regs(struct mthca_dev *dev)
682{
683 unsigned long mthca_base;
684
685 mthca_base = pci_resource_start(dev->pdev, 0);
686
Roland Dreierd10ddbf2005-04-16 15:26:32 -0700687 if (mthca_is_memfree(dev)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700688 /*
689 * We assume that the EQ arm and EQ set CI registers
690 * fall within the first BAR. We can't trust the
691 * values firmware gives us, since those addresses are
692 * valid on the HCA's side of the PCI bus but not
693 * necessarily the host side.
694 */
695 if (mthca_map_reg(dev, (pci_resource_len(dev->pdev, 0) - 1) &
696 dev->fw.arbel.clr_int_base, MTHCA_CLR_INT_SIZE,
697 &dev->clr_base)) {
698 mthca_err(dev, "Couldn't map interrupt clear register, "
699 "aborting.\n");
700 return -ENOMEM;
701 }
702
703 /*
704 * Add 4 because we limit ourselves to EQs 0 ... 31,
705 * so we only need the low word of the register.
706 */
707 if (mthca_map_reg(dev, ((pci_resource_len(dev->pdev, 0) - 1) &
708 dev->fw.arbel.eq_arm_base) + 4, 4,
709 &dev->eq_regs.arbel.eq_arm)) {
Bernhard Fischer177214a2005-06-27 14:36:39 -0700710 mthca_err(dev, "Couldn't map EQ arm register, aborting.\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700711 mthca_unmap_reg(dev, (pci_resource_len(dev->pdev, 0) - 1) &
712 dev->fw.arbel.clr_int_base, MTHCA_CLR_INT_SIZE,
713 dev->clr_base);
714 return -ENOMEM;
715 }
716
717 if (mthca_map_reg(dev, (pci_resource_len(dev->pdev, 0) - 1) &
718 dev->fw.arbel.eq_set_ci_base,
719 MTHCA_EQ_SET_CI_SIZE,
720 &dev->eq_regs.arbel.eq_set_ci_base)) {
Bernhard Fischer177214a2005-06-27 14:36:39 -0700721 mthca_err(dev, "Couldn't map EQ CI register, aborting.\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700722 mthca_unmap_reg(dev, ((pci_resource_len(dev->pdev, 0) - 1) &
723 dev->fw.arbel.eq_arm_base) + 4, 4,
724 dev->eq_regs.arbel.eq_arm);
725 mthca_unmap_reg(dev, (pci_resource_len(dev->pdev, 0) - 1) &
726 dev->fw.arbel.clr_int_base, MTHCA_CLR_INT_SIZE,
727 dev->clr_base);
728 return -ENOMEM;
729 }
730 } else {
731 if (mthca_map_reg(dev, MTHCA_CLR_INT_BASE, MTHCA_CLR_INT_SIZE,
732 &dev->clr_base)) {
733 mthca_err(dev, "Couldn't map interrupt clear register, "
734 "aborting.\n");
735 return -ENOMEM;
736 }
737
738 if (mthca_map_reg(dev, MTHCA_ECR_BASE,
739 MTHCA_ECR_SIZE + MTHCA_ECR_CLR_SIZE,
740 &dev->eq_regs.tavor.ecr_base)) {
741 mthca_err(dev, "Couldn't map ecr register, "
742 "aborting.\n");
743 mthca_unmap_reg(dev, MTHCA_CLR_INT_BASE, MTHCA_CLR_INT_SIZE,
744 dev->clr_base);
745 return -ENOMEM;
746 }
747 }
748
749 return 0;
750
751}
752
753static void __devexit mthca_unmap_eq_regs(struct mthca_dev *dev)
754{
Roland Dreierd10ddbf2005-04-16 15:26:32 -0700755 if (mthca_is_memfree(dev)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700756 mthca_unmap_reg(dev, (pci_resource_len(dev->pdev, 0) - 1) &
757 dev->fw.arbel.eq_set_ci_base,
758 MTHCA_EQ_SET_CI_SIZE,
759 dev->eq_regs.arbel.eq_set_ci_base);
760 mthca_unmap_reg(dev, ((pci_resource_len(dev->pdev, 0) - 1) &
761 dev->fw.arbel.eq_arm_base) + 4, 4,
762 dev->eq_regs.arbel.eq_arm);
763 mthca_unmap_reg(dev, (pci_resource_len(dev->pdev, 0) - 1) &
764 dev->fw.arbel.clr_int_base, MTHCA_CLR_INT_SIZE,
765 dev->clr_base);
766 } else {
767 mthca_unmap_reg(dev, MTHCA_ECR_BASE,
768 MTHCA_ECR_SIZE + MTHCA_ECR_CLR_SIZE,
769 dev->eq_regs.tavor.ecr_base);
770 mthca_unmap_reg(dev, MTHCA_CLR_INT_BASE, MTHCA_CLR_INT_SIZE,
771 dev->clr_base);
772 }
773}
774
775int __devinit mthca_map_eq_icm(struct mthca_dev *dev, u64 icm_virt)
776{
777 int ret;
778 u8 status;
779
780 /*
781 * We assume that mapping one page is enough for the whole EQ
782 * context table. This is fine with all current HCAs, because
783 * we only use 32 EQs and each EQ uses 32 bytes of context
784 * memory, or 1 KB total.
785 */
786 dev->eq_table.icm_virt = icm_virt;
787 dev->eq_table.icm_page = alloc_page(GFP_HIGHUSER);
788 if (!dev->eq_table.icm_page)
789 return -ENOMEM;
790 dev->eq_table.icm_dma = pci_map_page(dev->pdev, dev->eq_table.icm_page, 0,
791 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
792 if (pci_dma_mapping_error(dev->eq_table.icm_dma)) {
793 __free_page(dev->eq_table.icm_page);
794 return -ENOMEM;
795 }
796
797 ret = mthca_MAP_ICM_page(dev, dev->eq_table.icm_dma, icm_virt, &status);
798 if (!ret && status)
799 ret = -EINVAL;
800 if (ret) {
801 pci_unmap_page(dev->pdev, dev->eq_table.icm_dma, PAGE_SIZE,
802 PCI_DMA_BIDIRECTIONAL);
803 __free_page(dev->eq_table.icm_page);
804 }
805
806 return ret;
807}
808
809void __devexit mthca_unmap_eq_icm(struct mthca_dev *dev)
810{
811 u8 status;
812
813 mthca_UNMAP_ICM(dev, dev->eq_table.icm_virt, PAGE_SIZE / 4096, &status);
814 pci_unmap_page(dev->pdev, dev->eq_table.icm_dma, PAGE_SIZE,
815 PCI_DMA_BIDIRECTIONAL);
816 __free_page(dev->eq_table.icm_page);
817}
818
819int __devinit mthca_init_eq_table(struct mthca_dev *dev)
820{
821 int err;
822 u8 status;
823 u8 intr;
824 int i;
825
826 err = mthca_alloc_init(&dev->eq_table.alloc,
827 dev->limits.num_eqs,
828 dev->limits.num_eqs - 1,
829 dev->limits.reserved_eqs);
830 if (err)
831 return err;
832
833 err = mthca_map_eq_regs(dev);
834 if (err)
835 goto err_out_free;
836
837 if (dev->mthca_flags & MTHCA_FLAG_MSI ||
838 dev->mthca_flags & MTHCA_FLAG_MSI_X) {
839 dev->eq_table.clr_mask = 0;
840 } else {
841 dev->eq_table.clr_mask =
842 swab32(1 << (dev->eq_table.inta_pin & 31));
843 dev->eq_table.clr_int = dev->clr_base +
844 (dev->eq_table.inta_pin < 31 ? 4 : 0);
845 }
846
847 dev->eq_table.arm_mask = 0;
848
849 intr = (dev->mthca_flags & MTHCA_FLAG_MSI) ?
850 128 : dev->eq_table.inta_pin;
851
852 err = mthca_create_eq(dev, dev->limits.num_cqs,
853 (dev->mthca_flags & MTHCA_FLAG_MSI_X) ? 128 : intr,
854 &dev->eq_table.eq[MTHCA_EQ_COMP]);
855 if (err)
856 goto err_out_unmap;
857
858 err = mthca_create_eq(dev, MTHCA_NUM_ASYNC_EQE,
859 (dev->mthca_flags & MTHCA_FLAG_MSI_X) ? 129 : intr,
860 &dev->eq_table.eq[MTHCA_EQ_ASYNC]);
861 if (err)
862 goto err_out_comp;
863
864 err = mthca_create_eq(dev, MTHCA_NUM_CMD_EQE,
865 (dev->mthca_flags & MTHCA_FLAG_MSI_X) ? 130 : intr,
866 &dev->eq_table.eq[MTHCA_EQ_CMD]);
867 if (err)
868 goto err_out_async;
869
870 if (dev->mthca_flags & MTHCA_FLAG_MSI_X) {
871 static const char *eq_name[] = {
872 [MTHCA_EQ_COMP] = DRV_NAME " (comp)",
873 [MTHCA_EQ_ASYNC] = DRV_NAME " (async)",
874 [MTHCA_EQ_CMD] = DRV_NAME " (cmd)"
875 };
876
877 for (i = 0; i < MTHCA_NUM_EQ; ++i) {
878 err = request_irq(dev->eq_table.eq[i].msi_x_vector,
Roland Dreierd10ddbf2005-04-16 15:26:32 -0700879 mthca_is_memfree(dev) ?
Linus Torvalds1da177e2005-04-16 15:20:36 -0700880 mthca_arbel_msi_x_interrupt :
881 mthca_tavor_msi_x_interrupt,
882 0, eq_name[i], dev->eq_table.eq + i);
883 if (err)
884 goto err_out_cmd;
885 dev->eq_table.eq[i].have_irq = 1;
886 }
887 } else {
888 err = request_irq(dev->pdev->irq,
Roland Dreierd10ddbf2005-04-16 15:26:32 -0700889 mthca_is_memfree(dev) ?
Linus Torvalds1da177e2005-04-16 15:20:36 -0700890 mthca_arbel_interrupt :
891 mthca_tavor_interrupt,
892 SA_SHIRQ, DRV_NAME, dev);
893 if (err)
894 goto err_out_cmd;
895 dev->eq_table.have_irq = 1;
896 }
897
898 err = mthca_MAP_EQ(dev, async_mask(dev),
899 0, dev->eq_table.eq[MTHCA_EQ_ASYNC].eqn, &status);
900 if (err)
901 mthca_warn(dev, "MAP_EQ for async EQ %d failed (%d)\n",
902 dev->eq_table.eq[MTHCA_EQ_ASYNC].eqn, err);
903 if (status)
904 mthca_warn(dev, "MAP_EQ for async EQ %d returned status 0x%02x\n",
905 dev->eq_table.eq[MTHCA_EQ_ASYNC].eqn, status);
906
907 err = mthca_MAP_EQ(dev, MTHCA_CMD_EVENT_MASK,
908 0, dev->eq_table.eq[MTHCA_EQ_CMD].eqn, &status);
909 if (err)
910 mthca_warn(dev, "MAP_EQ for cmd EQ %d failed (%d)\n",
911 dev->eq_table.eq[MTHCA_EQ_CMD].eqn, err);
912 if (status)
913 mthca_warn(dev, "MAP_EQ for cmd EQ %d returned status 0x%02x\n",
914 dev->eq_table.eq[MTHCA_EQ_CMD].eqn, status);
915
916 for (i = 0; i < MTHCA_EQ_CMD; ++i)
Roland Dreierd10ddbf2005-04-16 15:26:32 -0700917 if (mthca_is_memfree(dev))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700918 arbel_eq_req_not(dev, dev->eq_table.eq[i].eqn_mask);
919 else
920 tavor_eq_req_not(dev, dev->eq_table.eq[i].eqn);
921
922 return 0;
923
924err_out_cmd:
925 mthca_free_irqs(dev);
926 mthca_free_eq(dev, &dev->eq_table.eq[MTHCA_EQ_CMD]);
927
928err_out_async:
929 mthca_free_eq(dev, &dev->eq_table.eq[MTHCA_EQ_ASYNC]);
930
931err_out_comp:
932 mthca_free_eq(dev, &dev->eq_table.eq[MTHCA_EQ_COMP]);
933
934err_out_unmap:
935 mthca_unmap_eq_regs(dev);
936
937err_out_free:
938 mthca_alloc_cleanup(&dev->eq_table.alloc);
939 return err;
940}
941
942void __devexit mthca_cleanup_eq_table(struct mthca_dev *dev)
943{
944 u8 status;
945 int i;
946
947 mthca_free_irqs(dev);
948
949 mthca_MAP_EQ(dev, async_mask(dev),
950 1, dev->eq_table.eq[MTHCA_EQ_ASYNC].eqn, &status);
951 mthca_MAP_EQ(dev, MTHCA_CMD_EVENT_MASK,
952 1, dev->eq_table.eq[MTHCA_EQ_CMD].eqn, &status);
953
954 for (i = 0; i < MTHCA_NUM_EQ; ++i)
955 mthca_free_eq(dev, &dev->eq_table.eq[i]);
956
957 mthca_unmap_eq_regs(dev);
958
959 mthca_alloc_cleanup(&dev->eq_table.alloc);
960}