Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * |
| 3 | * Copyright (C) 2002 Intersil Americas Inc. |
| 4 | * Copyright 2004 Jens Maurer <Jens.Maurer@gmx.net> |
| 5 | * |
| 6 | * This program is free software; you can redistribute it and/or modify |
| 7 | * it under the terms of the GNU General Public License as published by |
| 8 | * the Free Software Foundation; either version 2 of the License |
| 9 | * |
| 10 | * This program is distributed in the hope that it will be useful, |
| 11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 13 | * GNU General Public License for more details. |
| 14 | * |
| 15 | * You should have received a copy of the GNU General Public License |
| 16 | * along with this program; if not, write to the Free Software |
| 17 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
| 18 | * |
| 19 | */ |
| 20 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 21 | #include <linux/netdevice.h> |
| 22 | #include <linux/module.h> |
| 23 | #include <linux/pci.h> |
| 24 | |
| 25 | #include <asm/io.h> |
| 26 | #include <asm/system.h> |
| 27 | #include <linux/if_arp.h> |
| 28 | |
| 29 | #include "prismcompat.h" |
| 30 | #include "isl_38xx.h" |
| 31 | #include "islpci_mgt.h" |
| 32 | #include "isl_oid.h" /* additional types and defs for isl38xx fw */ |
| 33 | #include "isl_ioctl.h" |
| 34 | |
| 35 | #include <net/iw_handler.h> |
| 36 | |
| 37 | /****************************************************************************** |
| 38 | Global variable definition section |
| 39 | ******************************************************************************/ |
| 40 | int pc_debug = VERBOSE; |
| 41 | module_param(pc_debug, int, 0); |
| 42 | |
| 43 | /****************************************************************************** |
| 44 | Driver general functions |
| 45 | ******************************************************************************/ |
| 46 | #if VERBOSE > SHOW_ERROR_MESSAGES |
| 47 | void |
| 48 | display_buffer(char *buffer, int length) |
| 49 | { |
| 50 | if ((pc_debug & SHOW_BUFFER_CONTENTS) == 0) |
| 51 | return; |
| 52 | |
| 53 | while (length > 0) { |
| 54 | printk("[%02x]", *buffer & 255); |
| 55 | length--; |
| 56 | buffer++; |
| 57 | } |
| 58 | |
| 59 | printk("\n"); |
| 60 | } |
| 61 | #endif |
| 62 | |
| 63 | /***************************************************************************** |
| 64 | Queue handling for management frames |
| 65 | ******************************************************************************/ |
| 66 | |
| 67 | /* |
| 68 | * Helper function to create a PIMFOR management frame header. |
| 69 | */ |
| 70 | static void |
| 71 | pimfor_encode_header(int operation, u32 oid, u32 length, pimfor_header_t *h) |
| 72 | { |
| 73 | h->version = PIMFOR_VERSION; |
| 74 | h->operation = operation; |
| 75 | h->device_id = PIMFOR_DEV_ID_MHLI_MIB; |
| 76 | h->flags = 0; |
| 77 | h->oid = cpu_to_be32(oid); |
| 78 | h->length = cpu_to_be32(length); |
| 79 | } |
| 80 | |
| 81 | /* |
| 82 | * Helper function to analyze a PIMFOR management frame header. |
| 83 | */ |
| 84 | static pimfor_header_t * |
| 85 | pimfor_decode_header(void *data, int len) |
| 86 | { |
| 87 | pimfor_header_t *h = data; |
| 88 | |
| 89 | while ((void *) h < data + len) { |
| 90 | if (h->flags & PIMFOR_FLAG_LITTLE_ENDIAN) { |
| 91 | le32_to_cpus(&h->oid); |
| 92 | le32_to_cpus(&h->length); |
| 93 | } else { |
| 94 | be32_to_cpus(&h->oid); |
| 95 | be32_to_cpus(&h->length); |
| 96 | } |
| 97 | if (h->oid != OID_INL_TUNNEL) |
| 98 | return h; |
| 99 | h++; |
| 100 | } |
| 101 | return NULL; |
| 102 | } |
| 103 | |
| 104 | /* |
| 105 | * Fill the receive queue for management frames with fresh buffers. |
| 106 | */ |
| 107 | int |
| 108 | islpci_mgmt_rx_fill(struct net_device *ndev) |
| 109 | { |
| 110 | islpci_private *priv = netdev_priv(ndev); |
| 111 | isl38xx_control_block *cb = /* volatile not needed */ |
| 112 | (isl38xx_control_block *) priv->control_block; |
| 113 | u32 curr = le32_to_cpu(cb->driver_curr_frag[ISL38XX_CB_RX_MGMTQ]); |
| 114 | |
| 115 | #if VERBOSE > SHOW_ERROR_MESSAGES |
| 116 | DEBUG(SHOW_FUNCTION_CALLS, "islpci_mgmt_rx_fill \n"); |
| 117 | #endif |
| 118 | |
| 119 | while (curr - priv->index_mgmt_rx < ISL38XX_CB_MGMT_QSIZE) { |
| 120 | u32 index = curr % ISL38XX_CB_MGMT_QSIZE; |
| 121 | struct islpci_membuf *buf = &priv->mgmt_rx[index]; |
| 122 | isl38xx_fragment *frag = &cb->rx_data_mgmt[index]; |
| 123 | |
| 124 | if (buf->mem == NULL) { |
| 125 | buf->mem = kmalloc(MGMT_FRAME_SIZE, GFP_ATOMIC); |
| 126 | if (!buf->mem) { |
| 127 | printk(KERN_WARNING |
| 128 | "Error allocating management frame.\n"); |
| 129 | return -ENOMEM; |
| 130 | } |
| 131 | buf->size = MGMT_FRAME_SIZE; |
| 132 | } |
| 133 | if (buf->pci_addr == 0) { |
| 134 | buf->pci_addr = pci_map_single(priv->pdev, buf->mem, |
| 135 | MGMT_FRAME_SIZE, |
| 136 | PCI_DMA_FROMDEVICE); |
| 137 | if (!buf->pci_addr) { |
| 138 | printk(KERN_WARNING |
Jean Delvare | 3fa63c7 | 2005-10-30 15:02:23 -0800 | [diff] [blame] | 139 | "Failed to make memory DMA'able.\n"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 140 | return -ENOMEM; |
| 141 | } |
| 142 | } |
| 143 | |
| 144 | /* be safe: always reset control block information */ |
| 145 | frag->size = cpu_to_le16(MGMT_FRAME_SIZE); |
| 146 | frag->flags = 0; |
| 147 | frag->address = cpu_to_le32(buf->pci_addr); |
| 148 | curr++; |
| 149 | |
| 150 | /* The fragment address in the control block must have |
| 151 | * been written before announcing the frame buffer to |
| 152 | * device */ |
| 153 | wmb(); |
| 154 | cb->driver_curr_frag[ISL38XX_CB_RX_MGMTQ] = cpu_to_le32(curr); |
| 155 | } |
| 156 | return 0; |
| 157 | } |
| 158 | |
| 159 | /* |
| 160 | * Create and transmit a management frame using "operation" and "oid", |
| 161 | * with arguments data/length. |
| 162 | * We either return an error and free the frame, or we return 0 and |
| 163 | * islpci_mgt_cleanup_transmit() frees the frame in the tx-done |
| 164 | * interrupt. |
| 165 | */ |
| 166 | static int |
| 167 | islpci_mgt_transmit(struct net_device *ndev, int operation, unsigned long oid, |
| 168 | void *data, int length) |
| 169 | { |
| 170 | islpci_private *priv = netdev_priv(ndev); |
| 171 | isl38xx_control_block *cb = |
| 172 | (isl38xx_control_block *) priv->control_block; |
| 173 | void *p; |
| 174 | int err = -EINVAL; |
| 175 | unsigned long flags; |
| 176 | isl38xx_fragment *frag; |
| 177 | struct islpci_membuf buf; |
| 178 | u32 curr_frag; |
| 179 | int index; |
| 180 | int frag_len = length + PIMFOR_HEADER_SIZE; |
| 181 | |
| 182 | #if VERBOSE > SHOW_ERROR_MESSAGES |
| 183 | DEBUG(SHOW_FUNCTION_CALLS, "islpci_mgt_transmit\n"); |
| 184 | #endif |
| 185 | |
| 186 | if (frag_len > MGMT_FRAME_SIZE) { |
| 187 | printk(KERN_DEBUG "%s: mgmt frame too large %d\n", |
| 188 | ndev->name, frag_len); |
| 189 | goto error; |
| 190 | } |
| 191 | |
| 192 | err = -ENOMEM; |
| 193 | p = buf.mem = kmalloc(frag_len, GFP_KERNEL); |
| 194 | if (!buf.mem) { |
| 195 | printk(KERN_DEBUG "%s: cannot allocate mgmt frame\n", |
| 196 | ndev->name); |
| 197 | goto error; |
| 198 | } |
| 199 | buf.size = frag_len; |
| 200 | |
| 201 | /* create the header directly in the fragment data area */ |
| 202 | pimfor_encode_header(operation, oid, length, (pimfor_header_t *) p); |
| 203 | p += PIMFOR_HEADER_SIZE; |
| 204 | |
| 205 | if (data) |
| 206 | memcpy(p, data, length); |
| 207 | else |
| 208 | memset(p, 0, length); |
| 209 | |
| 210 | #if VERBOSE > SHOW_ERROR_MESSAGES |
| 211 | { |
| 212 | pimfor_header_t *h = buf.mem; |
| 213 | DEBUG(SHOW_PIMFOR_FRAMES, |
| 214 | "PIMFOR: op %i, oid 0x%08lx, device %i, flags 0x%x length 0x%x \n", |
| 215 | h->operation, oid, h->device_id, h->flags, length); |
| 216 | |
| 217 | /* display the buffer contents for debugging */ |
| 218 | display_buffer((char *) h, sizeof (pimfor_header_t)); |
| 219 | display_buffer(p, length); |
| 220 | } |
| 221 | #endif |
| 222 | |
| 223 | err = -ENOMEM; |
| 224 | buf.pci_addr = pci_map_single(priv->pdev, buf.mem, frag_len, |
| 225 | PCI_DMA_TODEVICE); |
| 226 | if (!buf.pci_addr) { |
| 227 | printk(KERN_WARNING "%s: cannot map PCI memory for mgmt\n", |
| 228 | ndev->name); |
| 229 | goto error_free; |
| 230 | } |
| 231 | |
| 232 | /* Protect the control block modifications against interrupts. */ |
| 233 | spin_lock_irqsave(&priv->slock, flags); |
| 234 | curr_frag = le32_to_cpu(cb->driver_curr_frag[ISL38XX_CB_TX_MGMTQ]); |
| 235 | if (curr_frag - priv->index_mgmt_tx >= ISL38XX_CB_MGMT_QSIZE) { |
| 236 | printk(KERN_WARNING "%s: mgmt tx queue is still full\n", |
| 237 | ndev->name); |
| 238 | goto error_unlock; |
| 239 | } |
| 240 | |
| 241 | /* commit the frame to the tx device queue */ |
| 242 | index = curr_frag % ISL38XX_CB_MGMT_QSIZE; |
| 243 | priv->mgmt_tx[index] = buf; |
| 244 | frag = &cb->tx_data_mgmt[index]; |
| 245 | frag->size = cpu_to_le16(frag_len); |
| 246 | frag->flags = 0; /* for any other than the last fragment, set to 1 */ |
| 247 | frag->address = cpu_to_le32(buf.pci_addr); |
| 248 | |
| 249 | /* The fragment address in the control block must have |
| 250 | * been written before announcing the frame buffer to |
| 251 | * device */ |
| 252 | wmb(); |
| 253 | cb->driver_curr_frag[ISL38XX_CB_TX_MGMTQ] = cpu_to_le32(curr_frag + 1); |
| 254 | spin_unlock_irqrestore(&priv->slock, flags); |
| 255 | |
| 256 | /* trigger the device */ |
| 257 | islpci_trigger(priv); |
| 258 | return 0; |
| 259 | |
| 260 | error_unlock: |
| 261 | spin_unlock_irqrestore(&priv->slock, flags); |
| 262 | error_free: |
| 263 | kfree(buf.mem); |
| 264 | error: |
| 265 | return err; |
| 266 | } |
| 267 | |
| 268 | /* |
| 269 | * Receive a management frame from the device. |
| 270 | * This can be an arbitrary number of traps, and at most one response |
| 271 | * frame for a previous request sent via islpci_mgt_transmit(). |
| 272 | */ |
| 273 | int |
| 274 | islpci_mgt_receive(struct net_device *ndev) |
| 275 | { |
| 276 | islpci_private *priv = netdev_priv(ndev); |
| 277 | isl38xx_control_block *cb = |
| 278 | (isl38xx_control_block *) priv->control_block; |
| 279 | u32 curr_frag; |
| 280 | |
| 281 | #if VERBOSE > SHOW_ERROR_MESSAGES |
| 282 | DEBUG(SHOW_FUNCTION_CALLS, "islpci_mgt_receive \n"); |
| 283 | #endif |
| 284 | |
| 285 | /* Only once per interrupt, determine fragment range to |
| 286 | * process. This avoids an endless loop (i.e. lockup) if |
| 287 | * frames come in faster than we can process them. */ |
| 288 | curr_frag = le32_to_cpu(cb->device_curr_frag[ISL38XX_CB_RX_MGMTQ]); |
| 289 | barrier(); |
| 290 | |
| 291 | for (; priv->index_mgmt_rx < curr_frag; priv->index_mgmt_rx++) { |
| 292 | pimfor_header_t *header; |
| 293 | u32 index = priv->index_mgmt_rx % ISL38XX_CB_MGMT_QSIZE; |
| 294 | struct islpci_membuf *buf = &priv->mgmt_rx[index]; |
| 295 | u16 frag_len; |
| 296 | int size; |
| 297 | struct islpci_mgmtframe *frame; |
| 298 | |
| 299 | /* I have no idea (and no documentation) if flags != 0 |
| 300 | * is possible. Drop the frame, reuse the buffer. */ |
| 301 | if (le16_to_cpu(cb->rx_data_mgmt[index].flags) != 0) { |
| 302 | printk(KERN_WARNING "%s: unknown flags 0x%04x\n", |
| 303 | ndev->name, |
| 304 | le16_to_cpu(cb->rx_data_mgmt[index].flags)); |
| 305 | continue; |
| 306 | } |
| 307 | |
| 308 | /* The device only returns the size of the header(s) here. */ |
| 309 | frag_len = le16_to_cpu(cb->rx_data_mgmt[index].size); |
| 310 | |
| 311 | /* |
| 312 | * We appear to have no way to tell the device the |
| 313 | * size of a receive buffer. Thus, if this check |
| 314 | * triggers, we likely have kernel heap corruption. */ |
| 315 | if (frag_len > MGMT_FRAME_SIZE) { |
| 316 | printk(KERN_WARNING |
| 317 | "%s: Bogus packet size of %d (%#x).\n", |
| 318 | ndev->name, frag_len, frag_len); |
| 319 | frag_len = MGMT_FRAME_SIZE; |
| 320 | } |
| 321 | |
| 322 | /* Ensure the results of device DMA are visible to the CPU. */ |
| 323 | pci_dma_sync_single_for_cpu(priv->pdev, buf->pci_addr, |
| 324 | buf->size, PCI_DMA_FROMDEVICE); |
| 325 | |
| 326 | /* Perform endianess conversion for PIMFOR header in-place. */ |
| 327 | header = pimfor_decode_header(buf->mem, frag_len); |
| 328 | if (!header) { |
| 329 | printk(KERN_WARNING "%s: no PIMFOR header found\n", |
| 330 | ndev->name); |
| 331 | continue; |
| 332 | } |
| 333 | |
| 334 | /* The device ID from the PIMFOR packet received from |
| 335 | * the MVC is always 0. We forward a sensible device_id. |
| 336 | * Not that anyone upstream would care... */ |
| 337 | header->device_id = priv->ndev->ifindex; |
| 338 | |
| 339 | #if VERBOSE > SHOW_ERROR_MESSAGES |
| 340 | DEBUG(SHOW_PIMFOR_FRAMES, |
| 341 | "PIMFOR: op %i, oid 0x%08x, device %i, flags 0x%x length 0x%x \n", |
| 342 | header->operation, header->oid, header->device_id, |
| 343 | header->flags, header->length); |
| 344 | |
| 345 | /* display the buffer contents for debugging */ |
| 346 | display_buffer((char *) header, PIMFOR_HEADER_SIZE); |
| 347 | display_buffer((char *) header + PIMFOR_HEADER_SIZE, |
| 348 | header->length); |
| 349 | #endif |
| 350 | |
| 351 | /* nobody sends these */ |
| 352 | if (header->flags & PIMFOR_FLAG_APPLIC_ORIGIN) { |
| 353 | printk(KERN_DEBUG |
| 354 | "%s: errant PIMFOR application frame\n", |
| 355 | ndev->name); |
| 356 | continue; |
| 357 | } |
| 358 | |
| 359 | /* Determine frame size, skipping OID_INL_TUNNEL headers. */ |
| 360 | size = PIMFOR_HEADER_SIZE + header->length; |
| 361 | frame = kmalloc(sizeof (struct islpci_mgmtframe) + size, |
| 362 | GFP_ATOMIC); |
| 363 | if (!frame) { |
| 364 | printk(KERN_WARNING |
| 365 | "%s: Out of memory, cannot handle oid 0x%08x\n", |
| 366 | ndev->name, header->oid); |
| 367 | continue; |
| 368 | } |
| 369 | frame->ndev = ndev; |
| 370 | memcpy(&frame->buf, header, size); |
| 371 | frame->header = (pimfor_header_t *) frame->buf; |
| 372 | frame->data = frame->buf + PIMFOR_HEADER_SIZE; |
| 373 | |
| 374 | #if VERBOSE > SHOW_ERROR_MESSAGES |
| 375 | DEBUG(SHOW_PIMFOR_FRAMES, |
| 376 | "frame: header: %p, data: %p, size: %d\n", |
| 377 | frame->header, frame->data, size); |
| 378 | #endif |
| 379 | |
| 380 | if (header->operation == PIMFOR_OP_TRAP) { |
| 381 | #if VERBOSE > SHOW_ERROR_MESSAGES |
| 382 | printk(KERN_DEBUG |
| 383 | "TRAP: oid 0x%x, device %i, flags 0x%x length %i\n", |
| 384 | header->oid, header->device_id, header->flags, |
| 385 | header->length); |
| 386 | #endif |
| 387 | |
| 388 | /* Create work to handle trap out of interrupt |
| 389 | * context. */ |
| 390 | INIT_WORK(&frame->ws, prism54_process_trap, frame); |
| 391 | schedule_work(&frame->ws); |
| 392 | |
| 393 | } else { |
| 394 | /* Signal the one waiting process that a response |
| 395 | * has been received. */ |
| 396 | if ((frame = xchg(&priv->mgmt_received, frame)) != NULL) { |
| 397 | printk(KERN_WARNING |
| 398 | "%s: mgmt response not collected\n", |
| 399 | ndev->name); |
| 400 | kfree(frame); |
| 401 | } |
| 402 | #if VERBOSE > SHOW_ERROR_MESSAGES |
| 403 | DEBUG(SHOW_TRACING, "Wake up Mgmt Queue\n"); |
| 404 | #endif |
| 405 | wake_up(&priv->mgmt_wqueue); |
| 406 | } |
| 407 | |
| 408 | } |
| 409 | |
| 410 | return 0; |
| 411 | } |
| 412 | |
| 413 | /* |
| 414 | * Cleanup the transmit queue by freeing all frames handled by the device. |
| 415 | */ |
| 416 | void |
| 417 | islpci_mgt_cleanup_transmit(struct net_device *ndev) |
| 418 | { |
| 419 | islpci_private *priv = netdev_priv(ndev); |
| 420 | isl38xx_control_block *cb = /* volatile not needed */ |
| 421 | (isl38xx_control_block *) priv->control_block; |
| 422 | u32 curr_frag; |
| 423 | |
| 424 | #if VERBOSE > SHOW_ERROR_MESSAGES |
| 425 | DEBUG(SHOW_FUNCTION_CALLS, "islpci_mgt_cleanup_transmit\n"); |
| 426 | #endif |
| 427 | |
| 428 | /* Only once per cleanup, determine fragment range to |
| 429 | * process. This avoids an endless loop (i.e. lockup) if |
| 430 | * the device became confused, incrementing device_curr_frag |
| 431 | * rapidly. */ |
| 432 | curr_frag = le32_to_cpu(cb->device_curr_frag[ISL38XX_CB_TX_MGMTQ]); |
| 433 | barrier(); |
| 434 | |
| 435 | for (; priv->index_mgmt_tx < curr_frag; priv->index_mgmt_tx++) { |
| 436 | int index = priv->index_mgmt_tx % ISL38XX_CB_MGMT_QSIZE; |
| 437 | struct islpci_membuf *buf = &priv->mgmt_tx[index]; |
| 438 | pci_unmap_single(priv->pdev, buf->pci_addr, buf->size, |
| 439 | PCI_DMA_TODEVICE); |
| 440 | buf->pci_addr = 0; |
| 441 | kfree(buf->mem); |
| 442 | buf->mem = NULL; |
| 443 | buf->size = 0; |
| 444 | } |
| 445 | } |
| 446 | |
| 447 | /* |
| 448 | * Perform one request-response transaction to the device. |
| 449 | */ |
| 450 | int |
| 451 | islpci_mgt_transaction(struct net_device *ndev, |
| 452 | int operation, unsigned long oid, |
| 453 | void *senddata, int sendlen, |
| 454 | struct islpci_mgmtframe **recvframe) |
| 455 | { |
| 456 | islpci_private *priv = netdev_priv(ndev); |
Nishanth Aravamudan | 3173c89 | 2005-09-11 02:09:55 -0700 | [diff] [blame] | 457 | const long wait_cycle_jiffies = msecs_to_jiffies(ISL38XX_WAIT_CYCLE * 10); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 458 | long timeout_left = ISL38XX_MAX_WAIT_CYCLES * wait_cycle_jiffies; |
| 459 | int err; |
| 460 | DEFINE_WAIT(wait); |
| 461 | |
| 462 | *recvframe = NULL; |
| 463 | |
| 464 | if (down_interruptible(&priv->mgmt_sem)) |
| 465 | return -ERESTARTSYS; |
| 466 | |
| 467 | prepare_to_wait(&priv->mgmt_wqueue, &wait, TASK_UNINTERRUPTIBLE); |
| 468 | err = islpci_mgt_transmit(ndev, operation, oid, senddata, sendlen); |
| 469 | if (err) |
| 470 | goto out; |
| 471 | |
| 472 | err = -ETIMEDOUT; |
| 473 | while (timeout_left > 0) { |
| 474 | int timeleft; |
| 475 | struct islpci_mgmtframe *frame; |
| 476 | |
Nishanth Aravamudan | 3173c89 | 2005-09-11 02:09:55 -0700 | [diff] [blame] | 477 | timeleft = schedule_timeout_uninterruptible(wait_cycle_jiffies); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 478 | frame = xchg(&priv->mgmt_received, NULL); |
| 479 | if (frame) { |
| 480 | if (frame->header->oid == oid) { |
| 481 | *recvframe = frame; |
| 482 | err = 0; |
| 483 | goto out; |
| 484 | } else { |
| 485 | printk(KERN_DEBUG |
| 486 | "%s: expecting oid 0x%x, received 0x%x.\n", |
| 487 | ndev->name, (unsigned int) oid, |
| 488 | frame->header->oid); |
| 489 | kfree(frame); |
| 490 | frame = NULL; |
| 491 | } |
| 492 | } |
| 493 | if (timeleft == 0) { |
| 494 | printk(KERN_DEBUG |
| 495 | "%s: timeout waiting for mgmt response %lu, " |
| 496 | "triggering device\n", |
| 497 | ndev->name, timeout_left); |
| 498 | islpci_trigger(priv); |
| 499 | } |
| 500 | timeout_left += timeleft - wait_cycle_jiffies; |
| 501 | } |
| 502 | printk(KERN_WARNING "%s: timeout waiting for mgmt response\n", |
| 503 | ndev->name); |
| 504 | |
| 505 | /* TODO: we should reset the device here */ |
| 506 | out: |
| 507 | finish_wait(&priv->mgmt_wqueue, &wait); |
| 508 | up(&priv->mgmt_sem); |
| 509 | return err; |
| 510 | } |
| 511 | |