Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. |
| 3 | * |
| 4 | * This software is available to you under a choice of one of two |
| 5 | * licenses. You may choose to be licensed under the terms of the GNU |
| 6 | * General Public License (GPL) Version 2, available from the file |
| 7 | * COPYING in the main directory of this source tree, or the |
| 8 | * OpenIB.org BSD license below: |
| 9 | * |
| 10 | * Redistribution and use in source and binary forms, with or |
| 11 | * without modification, are permitted provided that the following |
| 12 | * conditions are met: |
| 13 | * |
| 14 | * - Redistributions of source code must retain the above |
| 15 | * copyright notice, this list of conditions and the following |
| 16 | * disclaimer. |
| 17 | * |
| 18 | * - Redistributions in binary form must reproduce the above |
| 19 | * copyright notice, this list of conditions and the following |
| 20 | * disclaimer in the documentation and/or other materials |
| 21 | * provided with the distribution. |
| 22 | * |
| 23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
| 24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
| 25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND |
| 26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS |
| 27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN |
| 28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN |
| 29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
| 30 | * SOFTWARE. |
| 31 | * |
| 32 | * $Id: mthca_eq.c 1382 2004-12-24 02:21:02Z roland $ |
| 33 | */ |
| 34 | |
| 35 | #include <linux/init.h> |
| 36 | #include <linux/errno.h> |
| 37 | #include <linux/interrupt.h> |
| 38 | #include <linux/pci.h> |
| 39 | |
| 40 | #include "mthca_dev.h" |
| 41 | #include "mthca_cmd.h" |
| 42 | #include "mthca_config_reg.h" |
| 43 | |
| 44 | enum { |
| 45 | MTHCA_NUM_ASYNC_EQE = 0x80, |
| 46 | MTHCA_NUM_CMD_EQE = 0x80, |
| 47 | MTHCA_EQ_ENTRY_SIZE = 0x20 |
| 48 | }; |
| 49 | |
| 50 | /* |
| 51 | * Must be packed because start is 64 bits but only aligned to 32 bits. |
| 52 | */ |
| 53 | struct mthca_eq_context { |
| 54 | u32 flags; |
| 55 | u64 start; |
| 56 | u32 logsize_usrpage; |
| 57 | u32 tavor_pd; /* reserved for Arbel */ |
| 58 | u8 reserved1[3]; |
| 59 | u8 intr; |
| 60 | u32 arbel_pd; /* lost_count for Tavor */ |
| 61 | u32 lkey; |
| 62 | u32 reserved2[2]; |
| 63 | u32 consumer_index; |
| 64 | u32 producer_index; |
| 65 | u32 reserved3[4]; |
| 66 | } __attribute__((packed)); |
| 67 | |
| 68 | #define MTHCA_EQ_STATUS_OK ( 0 << 28) |
| 69 | #define MTHCA_EQ_STATUS_OVERFLOW ( 9 << 28) |
| 70 | #define MTHCA_EQ_STATUS_WRITE_FAIL (10 << 28) |
| 71 | #define MTHCA_EQ_OWNER_SW ( 0 << 24) |
| 72 | #define MTHCA_EQ_OWNER_HW ( 1 << 24) |
| 73 | #define MTHCA_EQ_FLAG_TR ( 1 << 18) |
| 74 | #define MTHCA_EQ_FLAG_OI ( 1 << 17) |
| 75 | #define MTHCA_EQ_STATE_ARMED ( 1 << 8) |
| 76 | #define MTHCA_EQ_STATE_FIRED ( 2 << 8) |
| 77 | #define MTHCA_EQ_STATE_ALWAYS_ARMED ( 3 << 8) |
| 78 | #define MTHCA_EQ_STATE_ARBEL ( 8 << 8) |
| 79 | |
| 80 | enum { |
| 81 | MTHCA_EVENT_TYPE_COMP = 0x00, |
| 82 | MTHCA_EVENT_TYPE_PATH_MIG = 0x01, |
| 83 | MTHCA_EVENT_TYPE_COMM_EST = 0x02, |
| 84 | MTHCA_EVENT_TYPE_SQ_DRAINED = 0x03, |
| 85 | MTHCA_EVENT_TYPE_SRQ_LAST_WQE = 0x13, |
| 86 | MTHCA_EVENT_TYPE_CQ_ERROR = 0x04, |
| 87 | MTHCA_EVENT_TYPE_WQ_CATAS_ERROR = 0x05, |
| 88 | MTHCA_EVENT_TYPE_EEC_CATAS_ERROR = 0x06, |
| 89 | MTHCA_EVENT_TYPE_PATH_MIG_FAILED = 0x07, |
| 90 | MTHCA_EVENT_TYPE_WQ_INVAL_REQ_ERROR = 0x10, |
| 91 | MTHCA_EVENT_TYPE_WQ_ACCESS_ERROR = 0x11, |
| 92 | MTHCA_EVENT_TYPE_SRQ_CATAS_ERROR = 0x12, |
| 93 | MTHCA_EVENT_TYPE_LOCAL_CATAS_ERROR = 0x08, |
| 94 | MTHCA_EVENT_TYPE_PORT_CHANGE = 0x09, |
| 95 | MTHCA_EVENT_TYPE_EQ_OVERFLOW = 0x0f, |
| 96 | MTHCA_EVENT_TYPE_ECC_DETECT = 0x0e, |
| 97 | MTHCA_EVENT_TYPE_CMD = 0x0a |
| 98 | }; |
| 99 | |
| 100 | #define MTHCA_ASYNC_EVENT_MASK ((1ULL << MTHCA_EVENT_TYPE_PATH_MIG) | \ |
| 101 | (1ULL << MTHCA_EVENT_TYPE_COMM_EST) | \ |
| 102 | (1ULL << MTHCA_EVENT_TYPE_SQ_DRAINED) | \ |
| 103 | (1ULL << MTHCA_EVENT_TYPE_CQ_ERROR) | \ |
| 104 | (1ULL << MTHCA_EVENT_TYPE_WQ_CATAS_ERROR) | \ |
| 105 | (1ULL << MTHCA_EVENT_TYPE_EEC_CATAS_ERROR) | \ |
| 106 | (1ULL << MTHCA_EVENT_TYPE_PATH_MIG_FAILED) | \ |
| 107 | (1ULL << MTHCA_EVENT_TYPE_WQ_INVAL_REQ_ERROR) | \ |
| 108 | (1ULL << MTHCA_EVENT_TYPE_WQ_ACCESS_ERROR) | \ |
| 109 | (1ULL << MTHCA_EVENT_TYPE_LOCAL_CATAS_ERROR) | \ |
| 110 | (1ULL << MTHCA_EVENT_TYPE_PORT_CHANGE) | \ |
| 111 | (1ULL << MTHCA_EVENT_TYPE_ECC_DETECT)) |
| 112 | #define MTHCA_SRQ_EVENT_MASK (1ULL << MTHCA_EVENT_TYPE_SRQ_CATAS_ERROR) | \ |
| 113 | (1ULL << MTHCA_EVENT_TYPE_SRQ_LAST_WQE) |
| 114 | #define MTHCA_CMD_EVENT_MASK (1ULL << MTHCA_EVENT_TYPE_CMD) |
| 115 | |
| 116 | #define MTHCA_EQ_DB_INC_CI (1 << 24) |
| 117 | #define MTHCA_EQ_DB_REQ_NOT (2 << 24) |
| 118 | #define MTHCA_EQ_DB_DISARM_CQ (3 << 24) |
| 119 | #define MTHCA_EQ_DB_SET_CI (4 << 24) |
| 120 | #define MTHCA_EQ_DB_ALWAYS_ARM (5 << 24) |
| 121 | |
| 122 | struct mthca_eqe { |
| 123 | u8 reserved1; |
| 124 | u8 type; |
| 125 | u8 reserved2; |
| 126 | u8 subtype; |
| 127 | union { |
| 128 | u32 raw[6]; |
| 129 | struct { |
| 130 | u32 cqn; |
| 131 | } __attribute__((packed)) comp; |
| 132 | struct { |
| 133 | u16 reserved1; |
| 134 | u16 token; |
| 135 | u32 reserved2; |
| 136 | u8 reserved3[3]; |
| 137 | u8 status; |
| 138 | u64 out_param; |
| 139 | } __attribute__((packed)) cmd; |
| 140 | struct { |
| 141 | u32 qpn; |
| 142 | } __attribute__((packed)) qp; |
| 143 | struct { |
| 144 | u32 cqn; |
| 145 | u32 reserved1; |
| 146 | u8 reserved2[3]; |
| 147 | u8 syndrome; |
| 148 | } __attribute__((packed)) cq_err; |
| 149 | struct { |
| 150 | u32 reserved1[2]; |
| 151 | u32 port; |
| 152 | } __attribute__((packed)) port_change; |
| 153 | } event; |
| 154 | u8 reserved3[3]; |
| 155 | u8 owner; |
| 156 | } __attribute__((packed)); |
| 157 | |
| 158 | #define MTHCA_EQ_ENTRY_OWNER_SW (0 << 7) |
| 159 | #define MTHCA_EQ_ENTRY_OWNER_HW (1 << 7) |
| 160 | |
| 161 | static inline u64 async_mask(struct mthca_dev *dev) |
| 162 | { |
| 163 | return dev->mthca_flags & MTHCA_FLAG_SRQ ? |
| 164 | MTHCA_ASYNC_EVENT_MASK | MTHCA_SRQ_EVENT_MASK : |
| 165 | MTHCA_ASYNC_EVENT_MASK; |
| 166 | } |
| 167 | |
| 168 | static inline void tavor_set_eq_ci(struct mthca_dev *dev, struct mthca_eq *eq, u32 ci) |
| 169 | { |
| 170 | u32 doorbell[2]; |
| 171 | |
| 172 | doorbell[0] = cpu_to_be32(MTHCA_EQ_DB_SET_CI | eq->eqn); |
| 173 | doorbell[1] = cpu_to_be32(ci & (eq->nent - 1)); |
| 174 | |
| 175 | /* |
| 176 | * This barrier makes sure that all updates to ownership bits |
| 177 | * done by set_eqe_hw() hit memory before the consumer index |
| 178 | * is updated. set_eq_ci() allows the HCA to possibly write |
| 179 | * more EQ entries, and we want to avoid the exceedingly |
| 180 | * unlikely possibility of the HCA writing an entry and then |
| 181 | * having set_eqe_hw() overwrite the owner field. |
| 182 | */ |
| 183 | wmb(); |
| 184 | mthca_write64(doorbell, |
| 185 | dev->kar + MTHCA_EQ_DOORBELL, |
| 186 | MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); |
| 187 | } |
| 188 | |
| 189 | static inline void arbel_set_eq_ci(struct mthca_dev *dev, struct mthca_eq *eq, u32 ci) |
| 190 | { |
| 191 | /* See comment in tavor_set_eq_ci() above. */ |
| 192 | wmb(); |
| 193 | __raw_writel(cpu_to_be32(ci), dev->eq_regs.arbel.eq_set_ci_base + |
| 194 | eq->eqn * 8); |
| 195 | /* We still want ordering, just not swabbing, so add a barrier */ |
| 196 | mb(); |
| 197 | } |
| 198 | |
| 199 | static inline void set_eq_ci(struct mthca_dev *dev, struct mthca_eq *eq, u32 ci) |
| 200 | { |
Roland Dreier | d10ddbf | 2005-04-16 15:26:32 -0700 | [diff] [blame] | 201 | if (mthca_is_memfree(dev)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 202 | arbel_set_eq_ci(dev, eq, ci); |
| 203 | else |
| 204 | tavor_set_eq_ci(dev, eq, ci); |
| 205 | } |
| 206 | |
| 207 | static inline void tavor_eq_req_not(struct mthca_dev *dev, int eqn) |
| 208 | { |
| 209 | u32 doorbell[2]; |
| 210 | |
| 211 | doorbell[0] = cpu_to_be32(MTHCA_EQ_DB_REQ_NOT | eqn); |
| 212 | doorbell[1] = 0; |
| 213 | |
| 214 | mthca_write64(doorbell, |
| 215 | dev->kar + MTHCA_EQ_DOORBELL, |
| 216 | MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); |
| 217 | } |
| 218 | |
| 219 | static inline void arbel_eq_req_not(struct mthca_dev *dev, u32 eqn_mask) |
| 220 | { |
| 221 | writel(eqn_mask, dev->eq_regs.arbel.eq_arm); |
| 222 | } |
| 223 | |
| 224 | static inline void disarm_cq(struct mthca_dev *dev, int eqn, int cqn) |
| 225 | { |
Roland Dreier | d10ddbf | 2005-04-16 15:26:32 -0700 | [diff] [blame] | 226 | if (!mthca_is_memfree(dev)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 227 | u32 doorbell[2]; |
| 228 | |
| 229 | doorbell[0] = cpu_to_be32(MTHCA_EQ_DB_DISARM_CQ | eqn); |
| 230 | doorbell[1] = cpu_to_be32(cqn); |
| 231 | |
| 232 | mthca_write64(doorbell, |
| 233 | dev->kar + MTHCA_EQ_DOORBELL, |
| 234 | MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); |
| 235 | } |
| 236 | } |
| 237 | |
| 238 | static inline struct mthca_eqe *get_eqe(struct mthca_eq *eq, u32 entry) |
| 239 | { |
| 240 | unsigned long off = (entry & (eq->nent - 1)) * MTHCA_EQ_ENTRY_SIZE; |
| 241 | return eq->page_list[off / PAGE_SIZE].buf + off % PAGE_SIZE; |
| 242 | } |
| 243 | |
| 244 | static inline struct mthca_eqe* next_eqe_sw(struct mthca_eq *eq) |
| 245 | { |
| 246 | struct mthca_eqe* eqe; |
| 247 | eqe = get_eqe(eq, eq->cons_index); |
| 248 | return (MTHCA_EQ_ENTRY_OWNER_HW & eqe->owner) ? NULL : eqe; |
| 249 | } |
| 250 | |
| 251 | static inline void set_eqe_hw(struct mthca_eqe *eqe) |
| 252 | { |
| 253 | eqe->owner = MTHCA_EQ_ENTRY_OWNER_HW; |
| 254 | } |
| 255 | |
| 256 | static void port_change(struct mthca_dev *dev, int port, int active) |
| 257 | { |
| 258 | struct ib_event record; |
| 259 | |
| 260 | mthca_dbg(dev, "Port change to %s for port %d\n", |
| 261 | active ? "active" : "down", port); |
| 262 | |
| 263 | record.device = &dev->ib_dev; |
| 264 | record.event = active ? IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR; |
| 265 | record.element.port_num = port; |
| 266 | |
| 267 | ib_dispatch_event(&record); |
| 268 | } |
| 269 | |
| 270 | static int mthca_eq_int(struct mthca_dev *dev, struct mthca_eq *eq) |
| 271 | { |
| 272 | struct mthca_eqe *eqe; |
| 273 | int disarm_cqn; |
| 274 | int eqes_found = 0; |
| 275 | |
| 276 | while ((eqe = next_eqe_sw(eq))) { |
| 277 | int set_ci = 0; |
| 278 | |
| 279 | /* |
| 280 | * Make sure we read EQ entry contents after we've |
| 281 | * checked the ownership bit. |
| 282 | */ |
| 283 | rmb(); |
| 284 | |
| 285 | switch (eqe->type) { |
| 286 | case MTHCA_EVENT_TYPE_COMP: |
| 287 | disarm_cqn = be32_to_cpu(eqe->event.comp.cqn) & 0xffffff; |
| 288 | disarm_cq(dev, eq->eqn, disarm_cqn); |
| 289 | mthca_cq_event(dev, disarm_cqn); |
| 290 | break; |
| 291 | |
| 292 | case MTHCA_EVENT_TYPE_PATH_MIG: |
| 293 | mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, |
| 294 | IB_EVENT_PATH_MIG); |
| 295 | break; |
| 296 | |
| 297 | case MTHCA_EVENT_TYPE_COMM_EST: |
| 298 | mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, |
| 299 | IB_EVENT_COMM_EST); |
| 300 | break; |
| 301 | |
| 302 | case MTHCA_EVENT_TYPE_SQ_DRAINED: |
| 303 | mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, |
| 304 | IB_EVENT_SQ_DRAINED); |
| 305 | break; |
| 306 | |
| 307 | case MTHCA_EVENT_TYPE_WQ_CATAS_ERROR: |
| 308 | mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, |
| 309 | IB_EVENT_QP_FATAL); |
| 310 | break; |
| 311 | |
| 312 | case MTHCA_EVENT_TYPE_PATH_MIG_FAILED: |
| 313 | mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, |
| 314 | IB_EVENT_PATH_MIG_ERR); |
| 315 | break; |
| 316 | |
| 317 | case MTHCA_EVENT_TYPE_WQ_INVAL_REQ_ERROR: |
| 318 | mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, |
| 319 | IB_EVENT_QP_REQ_ERR); |
| 320 | break; |
| 321 | |
| 322 | case MTHCA_EVENT_TYPE_WQ_ACCESS_ERROR: |
| 323 | mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, |
| 324 | IB_EVENT_QP_ACCESS_ERR); |
| 325 | break; |
| 326 | |
| 327 | case MTHCA_EVENT_TYPE_CMD: |
| 328 | mthca_cmd_event(dev, |
| 329 | be16_to_cpu(eqe->event.cmd.token), |
| 330 | eqe->event.cmd.status, |
| 331 | be64_to_cpu(eqe->event.cmd.out_param)); |
| 332 | /* |
| 333 | * cmd_event() may add more commands. |
| 334 | * The card will think the queue has overflowed if |
| 335 | * we don't tell it we've been processing events. |
| 336 | */ |
| 337 | set_ci = 1; |
| 338 | break; |
| 339 | |
| 340 | case MTHCA_EVENT_TYPE_PORT_CHANGE: |
| 341 | port_change(dev, |
| 342 | (be32_to_cpu(eqe->event.port_change.port) >> 28) & 3, |
| 343 | eqe->subtype == 0x4); |
| 344 | break; |
| 345 | |
| 346 | case MTHCA_EVENT_TYPE_CQ_ERROR: |
Roland Dreier | b87dcfb | 2005-04-16 15:26:22 -0700 | [diff] [blame] | 347 | mthca_warn(dev, "CQ %s on CQN %06x\n", |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 348 | eqe->event.cq_err.syndrome == 1 ? |
| 349 | "overrun" : "access violation", |
Roland Dreier | b87dcfb | 2005-04-16 15:26:22 -0700 | [diff] [blame] | 350 | be32_to_cpu(eqe->event.cq_err.cqn) & 0xffffff); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 351 | break; |
| 352 | |
| 353 | case MTHCA_EVENT_TYPE_EQ_OVERFLOW: |
| 354 | mthca_warn(dev, "EQ overrun on EQN %d\n", eq->eqn); |
| 355 | break; |
| 356 | |
| 357 | case MTHCA_EVENT_TYPE_EEC_CATAS_ERROR: |
| 358 | case MTHCA_EVENT_TYPE_SRQ_CATAS_ERROR: |
| 359 | case MTHCA_EVENT_TYPE_LOCAL_CATAS_ERROR: |
| 360 | case MTHCA_EVENT_TYPE_ECC_DETECT: |
| 361 | default: |
| 362 | mthca_warn(dev, "Unhandled event %02x(%02x) on EQ %d\n", |
| 363 | eqe->type, eqe->subtype, eq->eqn); |
| 364 | break; |
| 365 | }; |
| 366 | |
| 367 | set_eqe_hw(eqe); |
| 368 | ++eq->cons_index; |
| 369 | eqes_found = 1; |
| 370 | |
| 371 | if (unlikely(set_ci)) { |
| 372 | /* |
| 373 | * Conditional on hca_type is OK here because |
| 374 | * this is a rare case, not the fast path. |
| 375 | */ |
| 376 | set_eq_ci(dev, eq, eq->cons_index); |
| 377 | set_ci = 0; |
| 378 | } |
| 379 | } |
| 380 | |
| 381 | /* |
| 382 | * Rely on caller to set consumer index so that we don't have |
| 383 | * to test hca_type in our interrupt handling fast path. |
| 384 | */ |
| 385 | return eqes_found; |
| 386 | } |
| 387 | |
| 388 | static irqreturn_t mthca_tavor_interrupt(int irq, void *dev_ptr, struct pt_regs *regs) |
| 389 | { |
| 390 | struct mthca_dev *dev = dev_ptr; |
| 391 | u32 ecr; |
| 392 | int i; |
| 393 | |
| 394 | if (dev->eq_table.clr_mask) |
| 395 | writel(dev->eq_table.clr_mask, dev->eq_table.clr_int); |
| 396 | |
| 397 | ecr = readl(dev->eq_regs.tavor.ecr_base + 4); |
| 398 | if (ecr) { |
| 399 | writel(ecr, dev->eq_regs.tavor.ecr_base + |
| 400 | MTHCA_ECR_CLR_BASE - MTHCA_ECR_BASE + 4); |
| 401 | |
| 402 | for (i = 0; i < MTHCA_NUM_EQ; ++i) |
| 403 | if (ecr & dev->eq_table.eq[i].eqn_mask && |
| 404 | mthca_eq_int(dev, &dev->eq_table.eq[i])) { |
| 405 | tavor_set_eq_ci(dev, &dev->eq_table.eq[i], |
| 406 | dev->eq_table.eq[i].cons_index); |
| 407 | tavor_eq_req_not(dev, dev->eq_table.eq[i].eqn); |
| 408 | } |
| 409 | } |
| 410 | |
| 411 | return IRQ_RETVAL(ecr); |
| 412 | } |
| 413 | |
| 414 | static irqreturn_t mthca_tavor_msi_x_interrupt(int irq, void *eq_ptr, |
| 415 | struct pt_regs *regs) |
| 416 | { |
| 417 | struct mthca_eq *eq = eq_ptr; |
| 418 | struct mthca_dev *dev = eq->dev; |
| 419 | |
| 420 | mthca_eq_int(dev, eq); |
| 421 | tavor_set_eq_ci(dev, eq, eq->cons_index); |
| 422 | tavor_eq_req_not(dev, eq->eqn); |
| 423 | |
| 424 | /* MSI-X vectors always belong to us */ |
| 425 | return IRQ_HANDLED; |
| 426 | } |
| 427 | |
| 428 | static irqreturn_t mthca_arbel_interrupt(int irq, void *dev_ptr, struct pt_regs *regs) |
| 429 | { |
| 430 | struct mthca_dev *dev = dev_ptr; |
| 431 | int work = 0; |
| 432 | int i; |
| 433 | |
| 434 | if (dev->eq_table.clr_mask) |
| 435 | writel(dev->eq_table.clr_mask, dev->eq_table.clr_int); |
| 436 | |
| 437 | for (i = 0; i < MTHCA_NUM_EQ; ++i) |
| 438 | if (mthca_eq_int(dev, &dev->eq_table.eq[i])) { |
| 439 | work = 1; |
| 440 | arbel_set_eq_ci(dev, &dev->eq_table.eq[i], |
| 441 | dev->eq_table.eq[i].cons_index); |
| 442 | } |
| 443 | |
| 444 | arbel_eq_req_not(dev, dev->eq_table.arm_mask); |
| 445 | |
| 446 | return IRQ_RETVAL(work); |
| 447 | } |
| 448 | |
| 449 | static irqreturn_t mthca_arbel_msi_x_interrupt(int irq, void *eq_ptr, |
| 450 | struct pt_regs *regs) |
| 451 | { |
| 452 | struct mthca_eq *eq = eq_ptr; |
| 453 | struct mthca_dev *dev = eq->dev; |
| 454 | |
| 455 | mthca_eq_int(dev, eq); |
| 456 | arbel_set_eq_ci(dev, eq, eq->cons_index); |
| 457 | arbel_eq_req_not(dev, eq->eqn_mask); |
| 458 | |
| 459 | /* MSI-X vectors always belong to us */ |
| 460 | return IRQ_HANDLED; |
| 461 | } |
| 462 | |
| 463 | static int __devinit mthca_create_eq(struct mthca_dev *dev, |
| 464 | int nent, |
| 465 | u8 intr, |
| 466 | struct mthca_eq *eq) |
| 467 | { |
| 468 | int npages = (nent * MTHCA_EQ_ENTRY_SIZE + PAGE_SIZE - 1) / |
| 469 | PAGE_SIZE; |
| 470 | u64 *dma_list = NULL; |
| 471 | dma_addr_t t; |
| 472 | void *mailbox = NULL; |
| 473 | struct mthca_eq_context *eq_context; |
| 474 | int err = -ENOMEM; |
| 475 | int i; |
| 476 | u8 status; |
| 477 | |
| 478 | /* Make sure EQ size is aligned to a power of 2 size. */ |
| 479 | for (i = 1; i < nent; i <<= 1) |
| 480 | ; /* nothing */ |
| 481 | nent = i; |
| 482 | |
| 483 | eq->dev = dev; |
| 484 | |
| 485 | eq->page_list = kmalloc(npages * sizeof *eq->page_list, |
| 486 | GFP_KERNEL); |
| 487 | if (!eq->page_list) |
| 488 | goto err_out; |
| 489 | |
| 490 | for (i = 0; i < npages; ++i) |
| 491 | eq->page_list[i].buf = NULL; |
| 492 | |
| 493 | dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL); |
| 494 | if (!dma_list) |
| 495 | goto err_out_free; |
| 496 | |
| 497 | mailbox = kmalloc(sizeof *eq_context + MTHCA_CMD_MAILBOX_EXTRA, |
| 498 | GFP_KERNEL); |
| 499 | if (!mailbox) |
| 500 | goto err_out_free; |
| 501 | eq_context = MAILBOX_ALIGN(mailbox); |
| 502 | |
| 503 | for (i = 0; i < npages; ++i) { |
Roland Dreier | 64dc81f | 2005-06-27 14:36:40 -0700 | [diff] [blame] | 504 | eq->page_list[i].buf = dma_alloc_coherent(&dev->pdev->dev, |
| 505 | PAGE_SIZE, &t, GFP_KERNEL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 506 | if (!eq->page_list[i].buf) |
| 507 | goto err_out_free; |
| 508 | |
| 509 | dma_list[i] = t; |
| 510 | pci_unmap_addr_set(&eq->page_list[i], mapping, t); |
| 511 | |
| 512 | memset(eq->page_list[i].buf, 0, PAGE_SIZE); |
| 513 | } |
| 514 | |
| 515 | for (i = 0; i < nent; ++i) |
| 516 | set_eqe_hw(get_eqe(eq, i)); |
| 517 | |
| 518 | eq->eqn = mthca_alloc(&dev->eq_table.alloc); |
| 519 | if (eq->eqn == -1) |
| 520 | goto err_out_free; |
| 521 | |
| 522 | err = mthca_mr_alloc_phys(dev, dev->driver_pd.pd_num, |
| 523 | dma_list, PAGE_SHIFT, npages, |
| 524 | 0, npages * PAGE_SIZE, |
| 525 | MTHCA_MPT_FLAG_LOCAL_WRITE | |
| 526 | MTHCA_MPT_FLAG_LOCAL_READ, |
| 527 | &eq->mr); |
| 528 | if (err) |
| 529 | goto err_out_free_eq; |
| 530 | |
| 531 | eq->nent = nent; |
| 532 | |
| 533 | memset(eq_context, 0, sizeof *eq_context); |
| 534 | eq_context->flags = cpu_to_be32(MTHCA_EQ_STATUS_OK | |
| 535 | MTHCA_EQ_OWNER_HW | |
| 536 | MTHCA_EQ_STATE_ARMED | |
| 537 | MTHCA_EQ_FLAG_TR); |
Roland Dreier | d10ddbf | 2005-04-16 15:26:32 -0700 | [diff] [blame] | 538 | if (mthca_is_memfree(dev)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 539 | eq_context->flags |= cpu_to_be32(MTHCA_EQ_STATE_ARBEL); |
| 540 | |
| 541 | eq_context->logsize_usrpage = cpu_to_be32((ffs(nent) - 1) << 24); |
Roland Dreier | d10ddbf | 2005-04-16 15:26:32 -0700 | [diff] [blame] | 542 | if (mthca_is_memfree(dev)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 543 | eq_context->arbel_pd = cpu_to_be32(dev->driver_pd.pd_num); |
| 544 | } else { |
| 545 | eq_context->logsize_usrpage |= cpu_to_be32(dev->driver_uar.index); |
| 546 | eq_context->tavor_pd = cpu_to_be32(dev->driver_pd.pd_num); |
| 547 | } |
| 548 | eq_context->intr = intr; |
| 549 | eq_context->lkey = cpu_to_be32(eq->mr.ibmr.lkey); |
| 550 | |
| 551 | err = mthca_SW2HW_EQ(dev, eq_context, eq->eqn, &status); |
| 552 | if (err) { |
| 553 | mthca_warn(dev, "SW2HW_EQ failed (%d)\n", err); |
| 554 | goto err_out_free_mr; |
| 555 | } |
| 556 | if (status) { |
| 557 | mthca_warn(dev, "SW2HW_EQ returned status 0x%02x\n", |
| 558 | status); |
| 559 | err = -EINVAL; |
| 560 | goto err_out_free_mr; |
| 561 | } |
| 562 | |
| 563 | kfree(dma_list); |
| 564 | kfree(mailbox); |
| 565 | |
| 566 | eq->eqn_mask = swab32(1 << eq->eqn); |
| 567 | eq->cons_index = 0; |
| 568 | |
| 569 | dev->eq_table.arm_mask |= eq->eqn_mask; |
| 570 | |
| 571 | mthca_dbg(dev, "Allocated EQ %d with %d entries\n", |
| 572 | eq->eqn, nent); |
| 573 | |
| 574 | return err; |
| 575 | |
| 576 | err_out_free_mr: |
| 577 | mthca_free_mr(dev, &eq->mr); |
| 578 | |
| 579 | err_out_free_eq: |
| 580 | mthca_free(&dev->eq_table.alloc, eq->eqn); |
| 581 | |
| 582 | err_out_free: |
| 583 | for (i = 0; i < npages; ++i) |
| 584 | if (eq->page_list[i].buf) |
Roland Dreier | 64dc81f | 2005-06-27 14:36:40 -0700 | [diff] [blame] | 585 | dma_free_coherent(&dev->pdev->dev, PAGE_SIZE, |
| 586 | eq->page_list[i].buf, |
| 587 | pci_unmap_addr(&eq->page_list[i], |
| 588 | mapping)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 589 | |
| 590 | kfree(eq->page_list); |
| 591 | kfree(dma_list); |
| 592 | kfree(mailbox); |
| 593 | |
| 594 | err_out: |
| 595 | return err; |
| 596 | } |
| 597 | |
| 598 | static void mthca_free_eq(struct mthca_dev *dev, |
| 599 | struct mthca_eq *eq) |
| 600 | { |
| 601 | void *mailbox = NULL; |
| 602 | int err; |
| 603 | u8 status; |
| 604 | int npages = (eq->nent * MTHCA_EQ_ENTRY_SIZE + PAGE_SIZE - 1) / |
| 605 | PAGE_SIZE; |
| 606 | int i; |
| 607 | |
| 608 | mailbox = kmalloc(sizeof (struct mthca_eq_context) + MTHCA_CMD_MAILBOX_EXTRA, |
| 609 | GFP_KERNEL); |
| 610 | if (!mailbox) |
| 611 | return; |
| 612 | |
| 613 | err = mthca_HW2SW_EQ(dev, MAILBOX_ALIGN(mailbox), |
| 614 | eq->eqn, &status); |
| 615 | if (err) |
| 616 | mthca_warn(dev, "HW2SW_EQ failed (%d)\n", err); |
| 617 | if (status) |
Bernhard Fischer | 177214a | 2005-06-27 14:36:39 -0700 | [diff] [blame] | 618 | mthca_warn(dev, "HW2SW_EQ returned status 0x%02x\n", status); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 619 | |
| 620 | dev->eq_table.arm_mask &= ~eq->eqn_mask; |
| 621 | |
| 622 | if (0) { |
| 623 | mthca_dbg(dev, "Dumping EQ context %02x:\n", eq->eqn); |
| 624 | for (i = 0; i < sizeof (struct mthca_eq_context) / 4; ++i) { |
| 625 | if (i % 4 == 0) |
| 626 | printk("[%02x] ", i * 4); |
| 627 | printk(" %08x", be32_to_cpup(MAILBOX_ALIGN(mailbox) + i * 4)); |
| 628 | if ((i + 1) % 4 == 0) |
| 629 | printk("\n"); |
| 630 | } |
| 631 | } |
| 632 | |
| 633 | mthca_free_mr(dev, &eq->mr); |
| 634 | for (i = 0; i < npages; ++i) |
| 635 | pci_free_consistent(dev->pdev, PAGE_SIZE, |
| 636 | eq->page_list[i].buf, |
| 637 | pci_unmap_addr(&eq->page_list[i], mapping)); |
| 638 | |
| 639 | kfree(eq->page_list); |
| 640 | kfree(mailbox); |
| 641 | } |
| 642 | |
| 643 | static void mthca_free_irqs(struct mthca_dev *dev) |
| 644 | { |
| 645 | int i; |
| 646 | |
| 647 | if (dev->eq_table.have_irq) |
| 648 | free_irq(dev->pdev->irq, dev); |
| 649 | for (i = 0; i < MTHCA_NUM_EQ; ++i) |
| 650 | if (dev->eq_table.eq[i].have_irq) |
| 651 | free_irq(dev->eq_table.eq[i].msi_x_vector, |
| 652 | dev->eq_table.eq + i); |
| 653 | } |
| 654 | |
| 655 | static int __devinit mthca_map_reg(struct mthca_dev *dev, |
| 656 | unsigned long offset, unsigned long size, |
| 657 | void __iomem **map) |
| 658 | { |
| 659 | unsigned long base = pci_resource_start(dev->pdev, 0); |
| 660 | |
| 661 | if (!request_mem_region(base + offset, size, DRV_NAME)) |
| 662 | return -EBUSY; |
| 663 | |
| 664 | *map = ioremap(base + offset, size); |
| 665 | if (!*map) { |
| 666 | release_mem_region(base + offset, size); |
| 667 | return -ENOMEM; |
| 668 | } |
| 669 | |
| 670 | return 0; |
| 671 | } |
| 672 | |
| 673 | static void mthca_unmap_reg(struct mthca_dev *dev, unsigned long offset, |
| 674 | unsigned long size, void __iomem *map) |
| 675 | { |
| 676 | unsigned long base = pci_resource_start(dev->pdev, 0); |
| 677 | |
| 678 | release_mem_region(base + offset, size); |
| 679 | iounmap(map); |
| 680 | } |
| 681 | |
| 682 | static int __devinit mthca_map_eq_regs(struct mthca_dev *dev) |
| 683 | { |
| 684 | unsigned long mthca_base; |
| 685 | |
| 686 | mthca_base = pci_resource_start(dev->pdev, 0); |
| 687 | |
Roland Dreier | d10ddbf | 2005-04-16 15:26:32 -0700 | [diff] [blame] | 688 | if (mthca_is_memfree(dev)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 689 | /* |
| 690 | * We assume that the EQ arm and EQ set CI registers |
| 691 | * fall within the first BAR. We can't trust the |
| 692 | * values firmware gives us, since those addresses are |
| 693 | * valid on the HCA's side of the PCI bus but not |
| 694 | * necessarily the host side. |
| 695 | */ |
| 696 | if (mthca_map_reg(dev, (pci_resource_len(dev->pdev, 0) - 1) & |
| 697 | dev->fw.arbel.clr_int_base, MTHCA_CLR_INT_SIZE, |
| 698 | &dev->clr_base)) { |
| 699 | mthca_err(dev, "Couldn't map interrupt clear register, " |
| 700 | "aborting.\n"); |
| 701 | return -ENOMEM; |
| 702 | } |
| 703 | |
| 704 | /* |
| 705 | * Add 4 because we limit ourselves to EQs 0 ... 31, |
| 706 | * so we only need the low word of the register. |
| 707 | */ |
| 708 | if (mthca_map_reg(dev, ((pci_resource_len(dev->pdev, 0) - 1) & |
| 709 | dev->fw.arbel.eq_arm_base) + 4, 4, |
| 710 | &dev->eq_regs.arbel.eq_arm)) { |
Bernhard Fischer | 177214a | 2005-06-27 14:36:39 -0700 | [diff] [blame] | 711 | mthca_err(dev, "Couldn't map EQ arm register, aborting.\n"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 712 | mthca_unmap_reg(dev, (pci_resource_len(dev->pdev, 0) - 1) & |
| 713 | dev->fw.arbel.clr_int_base, MTHCA_CLR_INT_SIZE, |
| 714 | dev->clr_base); |
| 715 | return -ENOMEM; |
| 716 | } |
| 717 | |
| 718 | if (mthca_map_reg(dev, (pci_resource_len(dev->pdev, 0) - 1) & |
| 719 | dev->fw.arbel.eq_set_ci_base, |
| 720 | MTHCA_EQ_SET_CI_SIZE, |
| 721 | &dev->eq_regs.arbel.eq_set_ci_base)) { |
Bernhard Fischer | 177214a | 2005-06-27 14:36:39 -0700 | [diff] [blame] | 722 | mthca_err(dev, "Couldn't map EQ CI register, aborting.\n"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 723 | mthca_unmap_reg(dev, ((pci_resource_len(dev->pdev, 0) - 1) & |
| 724 | dev->fw.arbel.eq_arm_base) + 4, 4, |
| 725 | dev->eq_regs.arbel.eq_arm); |
| 726 | mthca_unmap_reg(dev, (pci_resource_len(dev->pdev, 0) - 1) & |
| 727 | dev->fw.arbel.clr_int_base, MTHCA_CLR_INT_SIZE, |
| 728 | dev->clr_base); |
| 729 | return -ENOMEM; |
| 730 | } |
| 731 | } else { |
| 732 | if (mthca_map_reg(dev, MTHCA_CLR_INT_BASE, MTHCA_CLR_INT_SIZE, |
| 733 | &dev->clr_base)) { |
| 734 | mthca_err(dev, "Couldn't map interrupt clear register, " |
| 735 | "aborting.\n"); |
| 736 | return -ENOMEM; |
| 737 | } |
| 738 | |
| 739 | if (mthca_map_reg(dev, MTHCA_ECR_BASE, |
| 740 | MTHCA_ECR_SIZE + MTHCA_ECR_CLR_SIZE, |
| 741 | &dev->eq_regs.tavor.ecr_base)) { |
| 742 | mthca_err(dev, "Couldn't map ecr register, " |
| 743 | "aborting.\n"); |
| 744 | mthca_unmap_reg(dev, MTHCA_CLR_INT_BASE, MTHCA_CLR_INT_SIZE, |
| 745 | dev->clr_base); |
| 746 | return -ENOMEM; |
| 747 | } |
| 748 | } |
| 749 | |
| 750 | return 0; |
| 751 | |
| 752 | } |
| 753 | |
| 754 | static void __devexit mthca_unmap_eq_regs(struct mthca_dev *dev) |
| 755 | { |
Roland Dreier | d10ddbf | 2005-04-16 15:26:32 -0700 | [diff] [blame] | 756 | if (mthca_is_memfree(dev)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 757 | mthca_unmap_reg(dev, (pci_resource_len(dev->pdev, 0) - 1) & |
| 758 | dev->fw.arbel.eq_set_ci_base, |
| 759 | MTHCA_EQ_SET_CI_SIZE, |
| 760 | dev->eq_regs.arbel.eq_set_ci_base); |
| 761 | mthca_unmap_reg(dev, ((pci_resource_len(dev->pdev, 0) - 1) & |
| 762 | dev->fw.arbel.eq_arm_base) + 4, 4, |
| 763 | dev->eq_regs.arbel.eq_arm); |
| 764 | mthca_unmap_reg(dev, (pci_resource_len(dev->pdev, 0) - 1) & |
| 765 | dev->fw.arbel.clr_int_base, MTHCA_CLR_INT_SIZE, |
| 766 | dev->clr_base); |
| 767 | } else { |
| 768 | mthca_unmap_reg(dev, MTHCA_ECR_BASE, |
| 769 | MTHCA_ECR_SIZE + MTHCA_ECR_CLR_SIZE, |
| 770 | dev->eq_regs.tavor.ecr_base); |
| 771 | mthca_unmap_reg(dev, MTHCA_CLR_INT_BASE, MTHCA_CLR_INT_SIZE, |
| 772 | dev->clr_base); |
| 773 | } |
| 774 | } |
| 775 | |
| 776 | int __devinit mthca_map_eq_icm(struct mthca_dev *dev, u64 icm_virt) |
| 777 | { |
| 778 | int ret; |
| 779 | u8 status; |
| 780 | |
| 781 | /* |
| 782 | * We assume that mapping one page is enough for the whole EQ |
| 783 | * context table. This is fine with all current HCAs, because |
| 784 | * we only use 32 EQs and each EQ uses 32 bytes of context |
| 785 | * memory, or 1 KB total. |
| 786 | */ |
| 787 | dev->eq_table.icm_virt = icm_virt; |
| 788 | dev->eq_table.icm_page = alloc_page(GFP_HIGHUSER); |
| 789 | if (!dev->eq_table.icm_page) |
| 790 | return -ENOMEM; |
| 791 | dev->eq_table.icm_dma = pci_map_page(dev->pdev, dev->eq_table.icm_page, 0, |
| 792 | PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); |
| 793 | if (pci_dma_mapping_error(dev->eq_table.icm_dma)) { |
| 794 | __free_page(dev->eq_table.icm_page); |
| 795 | return -ENOMEM; |
| 796 | } |
| 797 | |
| 798 | ret = mthca_MAP_ICM_page(dev, dev->eq_table.icm_dma, icm_virt, &status); |
| 799 | if (!ret && status) |
| 800 | ret = -EINVAL; |
| 801 | if (ret) { |
| 802 | pci_unmap_page(dev->pdev, dev->eq_table.icm_dma, PAGE_SIZE, |
| 803 | PCI_DMA_BIDIRECTIONAL); |
| 804 | __free_page(dev->eq_table.icm_page); |
| 805 | } |
| 806 | |
| 807 | return ret; |
| 808 | } |
| 809 | |
| 810 | void __devexit mthca_unmap_eq_icm(struct mthca_dev *dev) |
| 811 | { |
| 812 | u8 status; |
| 813 | |
| 814 | mthca_UNMAP_ICM(dev, dev->eq_table.icm_virt, PAGE_SIZE / 4096, &status); |
| 815 | pci_unmap_page(dev->pdev, dev->eq_table.icm_dma, PAGE_SIZE, |
| 816 | PCI_DMA_BIDIRECTIONAL); |
| 817 | __free_page(dev->eq_table.icm_page); |
| 818 | } |
| 819 | |
| 820 | int __devinit mthca_init_eq_table(struct mthca_dev *dev) |
| 821 | { |
| 822 | int err; |
| 823 | u8 status; |
| 824 | u8 intr; |
| 825 | int i; |
| 826 | |
| 827 | err = mthca_alloc_init(&dev->eq_table.alloc, |
| 828 | dev->limits.num_eqs, |
| 829 | dev->limits.num_eqs - 1, |
| 830 | dev->limits.reserved_eqs); |
| 831 | if (err) |
| 832 | return err; |
| 833 | |
| 834 | err = mthca_map_eq_regs(dev); |
| 835 | if (err) |
| 836 | goto err_out_free; |
| 837 | |
| 838 | if (dev->mthca_flags & MTHCA_FLAG_MSI || |
| 839 | dev->mthca_flags & MTHCA_FLAG_MSI_X) { |
| 840 | dev->eq_table.clr_mask = 0; |
| 841 | } else { |
| 842 | dev->eq_table.clr_mask = |
| 843 | swab32(1 << (dev->eq_table.inta_pin & 31)); |
| 844 | dev->eq_table.clr_int = dev->clr_base + |
| 845 | (dev->eq_table.inta_pin < 31 ? 4 : 0); |
| 846 | } |
| 847 | |
| 848 | dev->eq_table.arm_mask = 0; |
| 849 | |
| 850 | intr = (dev->mthca_flags & MTHCA_FLAG_MSI) ? |
| 851 | 128 : dev->eq_table.inta_pin; |
| 852 | |
| 853 | err = mthca_create_eq(dev, dev->limits.num_cqs, |
| 854 | (dev->mthca_flags & MTHCA_FLAG_MSI_X) ? 128 : intr, |
| 855 | &dev->eq_table.eq[MTHCA_EQ_COMP]); |
| 856 | if (err) |
| 857 | goto err_out_unmap; |
| 858 | |
| 859 | err = mthca_create_eq(dev, MTHCA_NUM_ASYNC_EQE, |
| 860 | (dev->mthca_flags & MTHCA_FLAG_MSI_X) ? 129 : intr, |
| 861 | &dev->eq_table.eq[MTHCA_EQ_ASYNC]); |
| 862 | if (err) |
| 863 | goto err_out_comp; |
| 864 | |
| 865 | err = mthca_create_eq(dev, MTHCA_NUM_CMD_EQE, |
| 866 | (dev->mthca_flags & MTHCA_FLAG_MSI_X) ? 130 : intr, |
| 867 | &dev->eq_table.eq[MTHCA_EQ_CMD]); |
| 868 | if (err) |
| 869 | goto err_out_async; |
| 870 | |
| 871 | if (dev->mthca_flags & MTHCA_FLAG_MSI_X) { |
| 872 | static const char *eq_name[] = { |
| 873 | [MTHCA_EQ_COMP] = DRV_NAME " (comp)", |
| 874 | [MTHCA_EQ_ASYNC] = DRV_NAME " (async)", |
| 875 | [MTHCA_EQ_CMD] = DRV_NAME " (cmd)" |
| 876 | }; |
| 877 | |
| 878 | for (i = 0; i < MTHCA_NUM_EQ; ++i) { |
| 879 | err = request_irq(dev->eq_table.eq[i].msi_x_vector, |
Roland Dreier | d10ddbf | 2005-04-16 15:26:32 -0700 | [diff] [blame] | 880 | mthca_is_memfree(dev) ? |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 881 | mthca_arbel_msi_x_interrupt : |
| 882 | mthca_tavor_msi_x_interrupt, |
| 883 | 0, eq_name[i], dev->eq_table.eq + i); |
| 884 | if (err) |
| 885 | goto err_out_cmd; |
| 886 | dev->eq_table.eq[i].have_irq = 1; |
| 887 | } |
| 888 | } else { |
| 889 | err = request_irq(dev->pdev->irq, |
Roland Dreier | d10ddbf | 2005-04-16 15:26:32 -0700 | [diff] [blame] | 890 | mthca_is_memfree(dev) ? |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 891 | mthca_arbel_interrupt : |
| 892 | mthca_tavor_interrupt, |
| 893 | SA_SHIRQ, DRV_NAME, dev); |
| 894 | if (err) |
| 895 | goto err_out_cmd; |
| 896 | dev->eq_table.have_irq = 1; |
| 897 | } |
| 898 | |
| 899 | err = mthca_MAP_EQ(dev, async_mask(dev), |
| 900 | 0, dev->eq_table.eq[MTHCA_EQ_ASYNC].eqn, &status); |
| 901 | if (err) |
| 902 | mthca_warn(dev, "MAP_EQ for async EQ %d failed (%d)\n", |
| 903 | dev->eq_table.eq[MTHCA_EQ_ASYNC].eqn, err); |
| 904 | if (status) |
| 905 | mthca_warn(dev, "MAP_EQ for async EQ %d returned status 0x%02x\n", |
| 906 | dev->eq_table.eq[MTHCA_EQ_ASYNC].eqn, status); |
| 907 | |
| 908 | err = mthca_MAP_EQ(dev, MTHCA_CMD_EVENT_MASK, |
| 909 | 0, dev->eq_table.eq[MTHCA_EQ_CMD].eqn, &status); |
| 910 | if (err) |
| 911 | mthca_warn(dev, "MAP_EQ for cmd EQ %d failed (%d)\n", |
| 912 | dev->eq_table.eq[MTHCA_EQ_CMD].eqn, err); |
| 913 | if (status) |
| 914 | mthca_warn(dev, "MAP_EQ for cmd EQ %d returned status 0x%02x\n", |
| 915 | dev->eq_table.eq[MTHCA_EQ_CMD].eqn, status); |
| 916 | |
| 917 | for (i = 0; i < MTHCA_EQ_CMD; ++i) |
Roland Dreier | d10ddbf | 2005-04-16 15:26:32 -0700 | [diff] [blame] | 918 | if (mthca_is_memfree(dev)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 919 | arbel_eq_req_not(dev, dev->eq_table.eq[i].eqn_mask); |
| 920 | else |
| 921 | tavor_eq_req_not(dev, dev->eq_table.eq[i].eqn); |
| 922 | |
| 923 | return 0; |
| 924 | |
| 925 | err_out_cmd: |
| 926 | mthca_free_irqs(dev); |
| 927 | mthca_free_eq(dev, &dev->eq_table.eq[MTHCA_EQ_CMD]); |
| 928 | |
| 929 | err_out_async: |
| 930 | mthca_free_eq(dev, &dev->eq_table.eq[MTHCA_EQ_ASYNC]); |
| 931 | |
| 932 | err_out_comp: |
| 933 | mthca_free_eq(dev, &dev->eq_table.eq[MTHCA_EQ_COMP]); |
| 934 | |
| 935 | err_out_unmap: |
| 936 | mthca_unmap_eq_regs(dev); |
| 937 | |
| 938 | err_out_free: |
| 939 | mthca_alloc_cleanup(&dev->eq_table.alloc); |
| 940 | return err; |
| 941 | } |
| 942 | |
| 943 | void __devexit mthca_cleanup_eq_table(struct mthca_dev *dev) |
| 944 | { |
| 945 | u8 status; |
| 946 | int i; |
| 947 | |
| 948 | mthca_free_irqs(dev); |
| 949 | |
| 950 | mthca_MAP_EQ(dev, async_mask(dev), |
| 951 | 1, dev->eq_table.eq[MTHCA_EQ_ASYNC].eqn, &status); |
| 952 | mthca_MAP_EQ(dev, MTHCA_CMD_EVENT_MASK, |
| 953 | 1, dev->eq_table.eq[MTHCA_EQ_CMD].eqn, &status); |
| 954 | |
| 955 | for (i = 0; i < MTHCA_NUM_EQ; ++i) |
| 956 | mthca_free_eq(dev, &dev->eq_table.eq[i]); |
| 957 | |
| 958 | mthca_unmap_eq_regs(dev); |
| 959 | |
| 960 | mthca_alloc_cleanup(&dev->eq_table.alloc); |
| 961 | } |