Bradley Grove | 26780d9 | 2013-08-23 10:35:45 -0400 | [diff] [blame] | 1 | /* |
| 2 | * linux/drivers/scsi/esas2r/esas2r_ioctl.c |
| 3 | * For use with ATTO ExpressSAS R6xx SAS/SATA RAID controllers |
| 4 | * |
| 5 | * Copyright (c) 2001-2013 ATTO Technology, Inc. |
| 6 | * (mailto:linuxdrivers@attotech.com) |
| 7 | * |
| 8 | * This program is free software; you can redistribute it and/or |
| 9 | * modify it under the terms of the GNU General Public License |
| 10 | * as published by the Free Software Foundation; either version 2 |
| 11 | * of the License, or (at your option) any later version. |
| 12 | * |
| 13 | * This program is distributed in the hope that it will be useful, |
| 14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 16 | * GNU General Public License for more details. |
| 17 | * |
| 18 | * NO WARRANTY |
| 19 | * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR |
| 20 | * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT |
| 21 | * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, |
| 22 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is |
| 23 | * solely responsible for determining the appropriateness of using and |
| 24 | * distributing the Program and assumes all risks associated with its |
| 25 | * exercise of rights under this Agreement, including but not limited to |
| 26 | * the risks and costs of program errors, damage to or loss of data, |
| 27 | * programs or equipment, and unavailability or interruption of operations. |
| 28 | * |
| 29 | * DISCLAIMER OF LIABILITY |
| 30 | * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY |
| 31 | * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
| 32 | * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND |
| 33 | * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR |
| 34 | * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE |
| 35 | * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED |
| 36 | * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES |
| 37 | * |
| 38 | * You should have received a copy of the GNU General Public License |
| 39 | * along with this program; if not, write to the Free Software |
| 40 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, |
| 41 | * USA. |
| 42 | */ |
| 43 | |
| 44 | #include "esas2r.h" |
| 45 | |
| 46 | /* |
| 47 | * Buffered ioctl handlers. A buffered ioctl is one which requires that we |
| 48 | * allocate a DMA-able memory area to communicate with the firmware. In |
| 49 | * order to prevent continually allocating and freeing consistent memory, |
| 50 | * we will allocate a global buffer the first time we need it and re-use |
| 51 | * it for subsequent ioctl calls that require it. |
| 52 | */ |
| 53 | |
| 54 | u8 *esas2r_buffered_ioctl; |
| 55 | dma_addr_t esas2r_buffered_ioctl_addr; |
| 56 | u32 esas2r_buffered_ioctl_size; |
| 57 | struct pci_dev *esas2r_buffered_ioctl_pcid; |
| 58 | |
| 59 | static DEFINE_SEMAPHORE(buffered_ioctl_semaphore); |
| 60 | typedef int (*BUFFERED_IOCTL_CALLBACK)(struct esas2r_adapter *, |
| 61 | struct esas2r_request *, |
| 62 | struct esas2r_sg_context *, |
| 63 | void *); |
| 64 | typedef void (*BUFFERED_IOCTL_DONE_CALLBACK)(struct esas2r_adapter *, |
| 65 | struct esas2r_request *, void *); |
| 66 | |
| 67 | struct esas2r_buffered_ioctl { |
| 68 | struct esas2r_adapter *a; |
| 69 | void *ioctl; |
| 70 | u32 length; |
| 71 | u32 control_code; |
| 72 | u32 offset; |
| 73 | BUFFERED_IOCTL_CALLBACK |
| 74 | callback; |
| 75 | void *context; |
| 76 | BUFFERED_IOCTL_DONE_CALLBACK |
| 77 | done_callback; |
| 78 | void *done_context; |
| 79 | |
| 80 | }; |
| 81 | |
| 82 | static void complete_fm_api_req(struct esas2r_adapter *a, |
| 83 | struct esas2r_request *rq) |
| 84 | { |
| 85 | a->fm_api_command_done = 1; |
| 86 | wake_up_interruptible(&a->fm_api_waiter); |
| 87 | } |
| 88 | |
| 89 | /* Callbacks for building scatter/gather lists for FM API requests */ |
| 90 | static u32 get_physaddr_fm_api(struct esas2r_sg_context *sgc, u64 *addr) |
| 91 | { |
| 92 | struct esas2r_adapter *a = (struct esas2r_adapter *)sgc->adapter; |
| 93 | int offset = sgc->cur_offset - a->save_offset; |
| 94 | |
| 95 | (*addr) = a->firmware.phys + offset; |
| 96 | return a->firmware.orig_len - offset; |
| 97 | } |
| 98 | |
| 99 | static u32 get_physaddr_fm_api_header(struct esas2r_sg_context *sgc, u64 *addr) |
| 100 | { |
| 101 | struct esas2r_adapter *a = (struct esas2r_adapter *)sgc->adapter; |
| 102 | int offset = sgc->cur_offset - a->save_offset; |
| 103 | |
| 104 | (*addr) = a->firmware.header_buff_phys + offset; |
| 105 | return sizeof(struct esas2r_flash_img) - offset; |
| 106 | } |
| 107 | |
| 108 | /* Handle EXPRESS_IOCTL_RW_FIRMWARE ioctl with img_type = FW_IMG_FM_API. */ |
| 109 | static void do_fm_api(struct esas2r_adapter *a, struct esas2r_flash_img *fi) |
| 110 | { |
| 111 | struct esas2r_request *rq; |
| 112 | |
| 113 | if (down_interruptible(&a->fm_api_semaphore)) { |
| 114 | fi->status = FI_STAT_BUSY; |
| 115 | return; |
| 116 | } |
| 117 | |
| 118 | rq = esas2r_alloc_request(a); |
| 119 | if (rq == NULL) { |
| 120 | up(&a->fm_api_semaphore); |
| 121 | fi->status = FI_STAT_BUSY; |
| 122 | return; |
| 123 | } |
| 124 | |
| 125 | if (fi == &a->firmware.header) { |
| 126 | a->firmware.header_buff = dma_alloc_coherent(&a->pcid->dev, |
| 127 | (size_t)sizeof( |
| 128 | struct |
| 129 | esas2r_flash_img), |
| 130 | (dma_addr_t *)&a-> |
| 131 | firmware. |
| 132 | header_buff_phys, |
| 133 | GFP_KERNEL); |
| 134 | |
| 135 | if (a->firmware.header_buff == NULL) { |
| 136 | esas2r_debug("failed to allocate header buffer!"); |
| 137 | fi->status = FI_STAT_BUSY; |
| 138 | return; |
| 139 | } |
| 140 | |
| 141 | memcpy(a->firmware.header_buff, fi, |
| 142 | sizeof(struct esas2r_flash_img)); |
| 143 | a->save_offset = a->firmware.header_buff; |
| 144 | a->fm_api_sgc.get_phys_addr = |
| 145 | (PGETPHYSADDR)get_physaddr_fm_api_header; |
| 146 | } else { |
| 147 | a->save_offset = (u8 *)fi; |
| 148 | a->fm_api_sgc.get_phys_addr = |
| 149 | (PGETPHYSADDR)get_physaddr_fm_api; |
| 150 | } |
| 151 | |
| 152 | rq->comp_cb = complete_fm_api_req; |
| 153 | a->fm_api_command_done = 0; |
| 154 | a->fm_api_sgc.cur_offset = a->save_offset; |
| 155 | |
| 156 | if (!esas2r_fm_api(a, (struct esas2r_flash_img *)a->save_offset, rq, |
| 157 | &a->fm_api_sgc)) |
| 158 | goto all_done; |
| 159 | |
| 160 | /* Now wait around for it to complete. */ |
| 161 | while (!a->fm_api_command_done) |
| 162 | wait_event_interruptible(a->fm_api_waiter, |
| 163 | a->fm_api_command_done); |
| 164 | all_done: |
| 165 | if (fi == &a->firmware.header) { |
| 166 | memcpy(fi, a->firmware.header_buff, |
| 167 | sizeof(struct esas2r_flash_img)); |
| 168 | |
| 169 | dma_free_coherent(&a->pcid->dev, |
| 170 | (size_t)sizeof(struct esas2r_flash_img), |
| 171 | a->firmware.header_buff, |
| 172 | (dma_addr_t)a->firmware.header_buff_phys); |
| 173 | } |
| 174 | |
| 175 | up(&a->fm_api_semaphore); |
| 176 | esas2r_free_request(a, (struct esas2r_request *)rq); |
| 177 | return; |
| 178 | |
| 179 | } |
| 180 | |
| 181 | static void complete_nvr_req(struct esas2r_adapter *a, |
| 182 | struct esas2r_request *rq) |
| 183 | { |
| 184 | a->nvram_command_done = 1; |
| 185 | wake_up_interruptible(&a->nvram_waiter); |
| 186 | } |
| 187 | |
| 188 | /* Callback for building scatter/gather lists for buffered ioctls */ |
| 189 | static u32 get_physaddr_buffered_ioctl(struct esas2r_sg_context *sgc, |
| 190 | u64 *addr) |
| 191 | { |
| 192 | int offset = (u8 *)sgc->cur_offset - esas2r_buffered_ioctl; |
| 193 | |
| 194 | (*addr) = esas2r_buffered_ioctl_addr + offset; |
| 195 | return esas2r_buffered_ioctl_size - offset; |
| 196 | } |
| 197 | |
| 198 | static void complete_buffered_ioctl_req(struct esas2r_adapter *a, |
| 199 | struct esas2r_request *rq) |
| 200 | { |
| 201 | a->buffered_ioctl_done = 1; |
| 202 | wake_up_interruptible(&a->buffered_ioctl_waiter); |
| 203 | } |
| 204 | |
| 205 | static u8 handle_buffered_ioctl(struct esas2r_buffered_ioctl *bi) |
| 206 | { |
| 207 | struct esas2r_adapter *a = bi->a; |
| 208 | struct esas2r_request *rq; |
| 209 | struct esas2r_sg_context sgc; |
| 210 | u8 result = IOCTL_SUCCESS; |
| 211 | |
| 212 | if (down_interruptible(&buffered_ioctl_semaphore)) |
| 213 | return IOCTL_OUT_OF_RESOURCES; |
| 214 | |
| 215 | /* allocate a buffer or use the existing buffer. */ |
| 216 | if (esas2r_buffered_ioctl) { |
| 217 | if (esas2r_buffered_ioctl_size < bi->length) { |
| 218 | /* free the too-small buffer and get a new one */ |
| 219 | dma_free_coherent(&a->pcid->dev, |
| 220 | (size_t)esas2r_buffered_ioctl_size, |
| 221 | esas2r_buffered_ioctl, |
| 222 | esas2r_buffered_ioctl_addr); |
| 223 | |
| 224 | goto allocate_buffer; |
| 225 | } |
| 226 | } else { |
| 227 | allocate_buffer: |
| 228 | esas2r_buffered_ioctl_size = bi->length; |
| 229 | esas2r_buffered_ioctl_pcid = a->pcid; |
| 230 | esas2r_buffered_ioctl = dma_alloc_coherent(&a->pcid->dev, |
| 231 | (size_t) |
| 232 | esas2r_buffered_ioctl_size, |
| 233 | & |
| 234 | esas2r_buffered_ioctl_addr, |
| 235 | GFP_KERNEL); |
| 236 | } |
| 237 | |
| 238 | if (!esas2r_buffered_ioctl) { |
| 239 | esas2r_log(ESAS2R_LOG_CRIT, |
| 240 | "could not allocate %d bytes of consistent memory " |
| 241 | "for a buffered ioctl!", |
| 242 | bi->length); |
| 243 | |
| 244 | esas2r_debug("buffered ioctl alloc failure"); |
| 245 | result = IOCTL_OUT_OF_RESOURCES; |
| 246 | goto exit_cleanly; |
| 247 | } |
| 248 | |
| 249 | memcpy(esas2r_buffered_ioctl, bi->ioctl, bi->length); |
| 250 | |
| 251 | rq = esas2r_alloc_request(a); |
| 252 | if (rq == NULL) { |
| 253 | esas2r_log(ESAS2R_LOG_CRIT, |
| 254 | "could not allocate an internal request"); |
| 255 | |
| 256 | result = IOCTL_OUT_OF_RESOURCES; |
| 257 | esas2r_debug("buffered ioctl - no requests"); |
| 258 | goto exit_cleanly; |
| 259 | } |
| 260 | |
| 261 | a->buffered_ioctl_done = 0; |
| 262 | rq->comp_cb = complete_buffered_ioctl_req; |
| 263 | sgc.cur_offset = esas2r_buffered_ioctl + bi->offset; |
| 264 | sgc.get_phys_addr = (PGETPHYSADDR)get_physaddr_buffered_ioctl; |
| 265 | sgc.length = esas2r_buffered_ioctl_size; |
| 266 | |
| 267 | if (!(*bi->callback)(a, rq, &sgc, bi->context)) { |
| 268 | /* completed immediately, no need to wait */ |
| 269 | a->buffered_ioctl_done = 0; |
| 270 | goto free_andexit_cleanly; |
| 271 | } |
| 272 | |
| 273 | /* now wait around for it to complete. */ |
| 274 | while (!a->buffered_ioctl_done) |
| 275 | wait_event_interruptible(a->buffered_ioctl_waiter, |
| 276 | a->buffered_ioctl_done); |
| 277 | |
| 278 | free_andexit_cleanly: |
| 279 | if (result == IOCTL_SUCCESS && bi->done_callback) |
| 280 | (*bi->done_callback)(a, rq, bi->done_context); |
| 281 | |
| 282 | esas2r_free_request(a, rq); |
| 283 | |
| 284 | exit_cleanly: |
| 285 | if (result == IOCTL_SUCCESS) |
| 286 | memcpy(bi->ioctl, esas2r_buffered_ioctl, bi->length); |
| 287 | |
| 288 | up(&buffered_ioctl_semaphore); |
| 289 | return result; |
| 290 | } |
| 291 | |
| 292 | /* SMP ioctl support */ |
| 293 | static int smp_ioctl_callback(struct esas2r_adapter *a, |
| 294 | struct esas2r_request *rq, |
| 295 | struct esas2r_sg_context *sgc, void *context) |
| 296 | { |
| 297 | struct atto_ioctl_smp *si = |
| 298 | (struct atto_ioctl_smp *)esas2r_buffered_ioctl; |
| 299 | |
| 300 | esas2r_sgc_init(sgc, a, rq, rq->vrq->ioctl.sge); |
| 301 | esas2r_build_ioctl_req(a, rq, sgc->length, VDA_IOCTL_SMP); |
| 302 | |
| 303 | if (!esas2r_build_sg_list(a, rq, sgc)) { |
| 304 | si->status = ATTO_STS_OUT_OF_RSRC; |
| 305 | return false; |
| 306 | } |
| 307 | |
| 308 | esas2r_start_request(a, rq); |
| 309 | return true; |
| 310 | } |
| 311 | |
| 312 | static u8 handle_smp_ioctl(struct esas2r_adapter *a, struct atto_ioctl_smp *si) |
| 313 | { |
| 314 | struct esas2r_buffered_ioctl bi; |
| 315 | |
| 316 | memset(&bi, 0, sizeof(bi)); |
| 317 | |
| 318 | bi.a = a; |
| 319 | bi.ioctl = si; |
| 320 | bi.length = sizeof(struct atto_ioctl_smp) |
| 321 | + le32_to_cpu(si->req_length) |
| 322 | + le32_to_cpu(si->rsp_length); |
| 323 | bi.offset = 0; |
| 324 | bi.callback = smp_ioctl_callback; |
| 325 | return handle_buffered_ioctl(&bi); |
| 326 | } |
| 327 | |
| 328 | |
| 329 | /* CSMI ioctl support */ |
| 330 | static void esas2r_csmi_ioctl_tunnel_comp_cb(struct esas2r_adapter *a, |
| 331 | struct esas2r_request *rq) |
| 332 | { |
| 333 | rq->target_id = le16_to_cpu(rq->func_rsp.ioctl_rsp.csmi.target_id); |
| 334 | rq->vrq->scsi.flags |= cpu_to_le32(rq->func_rsp.ioctl_rsp.csmi.lun); |
| 335 | |
| 336 | /* Now call the original completion callback. */ |
| 337 | (*rq->aux_req_cb)(a, rq); |
| 338 | } |
| 339 | |
| 340 | /* Tunnel a CSMI IOCTL to the back end driver for processing. */ |
| 341 | static bool csmi_ioctl_tunnel(struct esas2r_adapter *a, |
| 342 | union atto_ioctl_csmi *ci, |
| 343 | struct esas2r_request *rq, |
| 344 | struct esas2r_sg_context *sgc, |
| 345 | u32 ctrl_code, |
| 346 | u16 target_id) |
| 347 | { |
| 348 | struct atto_vda_ioctl_req *ioctl = &rq->vrq->ioctl; |
| 349 | |
Bradley Grove | 9588d24 | 2013-10-01 14:26:01 -0400 | [diff] [blame] | 350 | if (test_bit(AF_DEGRADED_MODE, &a->flags)) |
Bradley Grove | 26780d9 | 2013-08-23 10:35:45 -0400 | [diff] [blame] | 351 | return false; |
| 352 | |
| 353 | esas2r_sgc_init(sgc, a, rq, rq->vrq->ioctl.sge); |
| 354 | esas2r_build_ioctl_req(a, rq, sgc->length, VDA_IOCTL_CSMI); |
| 355 | ioctl->csmi.ctrl_code = cpu_to_le32(ctrl_code); |
| 356 | ioctl->csmi.target_id = cpu_to_le16(target_id); |
| 357 | ioctl->csmi.lun = (u8)le32_to_cpu(rq->vrq->scsi.flags); |
| 358 | |
| 359 | /* |
| 360 | * Always usurp the completion callback since the interrupt callback |
| 361 | * mechanism may be used. |
| 362 | */ |
| 363 | rq->aux_req_cx = ci; |
| 364 | rq->aux_req_cb = rq->comp_cb; |
| 365 | rq->comp_cb = esas2r_csmi_ioctl_tunnel_comp_cb; |
| 366 | |
| 367 | if (!esas2r_build_sg_list(a, rq, sgc)) |
| 368 | return false; |
| 369 | |
| 370 | esas2r_start_request(a, rq); |
| 371 | return true; |
| 372 | } |
| 373 | |
| 374 | static bool check_lun(struct scsi_lun lun) |
| 375 | { |
| 376 | bool result; |
| 377 | |
| 378 | result = ((lun.scsi_lun[7] == 0) && |
| 379 | (lun.scsi_lun[6] == 0) && |
| 380 | (lun.scsi_lun[5] == 0) && |
| 381 | (lun.scsi_lun[4] == 0) && |
| 382 | (lun.scsi_lun[3] == 0) && |
| 383 | (lun.scsi_lun[2] == 0) && |
| 384 | /* Byte 1 is intentionally skipped */ |
| 385 | (lun.scsi_lun[0] == 0)); |
| 386 | |
| 387 | return result; |
| 388 | } |
| 389 | |
| 390 | static int csmi_ioctl_callback(struct esas2r_adapter *a, |
| 391 | struct esas2r_request *rq, |
| 392 | struct esas2r_sg_context *sgc, void *context) |
| 393 | { |
| 394 | struct atto_csmi *ci = (struct atto_csmi *)context; |
| 395 | union atto_ioctl_csmi *ioctl_csmi = |
| 396 | (union atto_ioctl_csmi *)esas2r_buffered_ioctl; |
| 397 | u8 path = 0; |
| 398 | u8 tid = 0; |
| 399 | u8 lun = 0; |
| 400 | u32 sts = CSMI_STS_SUCCESS; |
| 401 | struct esas2r_target *t; |
| 402 | unsigned long flags; |
| 403 | |
| 404 | if (ci->control_code == CSMI_CC_GET_DEV_ADDR) { |
| 405 | struct atto_csmi_get_dev_addr *gda = &ci->data.dev_addr; |
| 406 | |
| 407 | path = gda->path_id; |
| 408 | tid = gda->target_id; |
| 409 | lun = gda->lun; |
| 410 | } else if (ci->control_code == CSMI_CC_TASK_MGT) { |
| 411 | struct atto_csmi_task_mgmt *tm = &ci->data.tsk_mgt; |
| 412 | |
| 413 | path = tm->path_id; |
| 414 | tid = tm->target_id; |
| 415 | lun = tm->lun; |
| 416 | } |
| 417 | |
Bradley Grove | 8d3ac48 | 2013-08-29 15:55:43 -0400 | [diff] [blame] | 418 | if (path > 0) { |
Bradley Grove | 26780d9 | 2013-08-23 10:35:45 -0400 | [diff] [blame] | 419 | rq->func_rsp.ioctl_rsp.csmi.csmi_status = cpu_to_le32( |
| 420 | CSMI_STS_INV_PARAM); |
| 421 | return false; |
| 422 | } |
| 423 | |
| 424 | rq->target_id = tid; |
| 425 | rq->vrq->scsi.flags |= cpu_to_le32(lun); |
| 426 | |
| 427 | switch (ci->control_code) { |
| 428 | case CSMI_CC_GET_DRVR_INFO: |
| 429 | { |
| 430 | struct atto_csmi_get_driver_info *gdi = &ioctl_csmi->drvr_info; |
| 431 | |
| 432 | strcpy(gdi->description, esas2r_get_model_name(a)); |
| 433 | gdi->csmi_major_rev = CSMI_MAJOR_REV; |
| 434 | gdi->csmi_minor_rev = CSMI_MINOR_REV; |
| 435 | break; |
| 436 | } |
| 437 | |
| 438 | case CSMI_CC_GET_CNTLR_CFG: |
| 439 | { |
| 440 | struct atto_csmi_get_cntlr_cfg *gcc = &ioctl_csmi->cntlr_cfg; |
| 441 | |
| 442 | gcc->base_io_addr = 0; |
| 443 | pci_read_config_dword(a->pcid, PCI_BASE_ADDRESS_2, |
| 444 | &gcc->base_memaddr_lo); |
| 445 | pci_read_config_dword(a->pcid, PCI_BASE_ADDRESS_3, |
| 446 | &gcc->base_memaddr_hi); |
| 447 | gcc->board_id = MAKEDWORD(a->pcid->subsystem_device, |
| 448 | a->pcid->subsystem_vendor); |
| 449 | gcc->slot_num = CSMI_SLOT_NUM_UNKNOWN; |
| 450 | gcc->cntlr_class = CSMI_CNTLR_CLASS_HBA; |
| 451 | gcc->io_bus_type = CSMI_BUS_TYPE_PCI; |
| 452 | gcc->pci_addr.bus_num = a->pcid->bus->number; |
| 453 | gcc->pci_addr.device_num = PCI_SLOT(a->pcid->devfn); |
| 454 | gcc->pci_addr.function_num = PCI_FUNC(a->pcid->devfn); |
| 455 | |
| 456 | memset(gcc->serial_num, 0, sizeof(gcc->serial_num)); |
| 457 | |
| 458 | gcc->major_rev = LOBYTE(LOWORD(a->fw_version)); |
| 459 | gcc->minor_rev = HIBYTE(LOWORD(a->fw_version)); |
| 460 | gcc->build_rev = LOBYTE(HIWORD(a->fw_version)); |
| 461 | gcc->release_rev = HIBYTE(HIWORD(a->fw_version)); |
| 462 | gcc->bios_major_rev = HIBYTE(HIWORD(a->flash_ver)); |
| 463 | gcc->bios_minor_rev = LOBYTE(HIWORD(a->flash_ver)); |
| 464 | gcc->bios_build_rev = LOWORD(a->flash_ver); |
| 465 | |
Bradley Grove | 9588d24 | 2013-10-01 14:26:01 -0400 | [diff] [blame] | 466 | if (test_bit(AF2_THUNDERLINK, &a->flags2)) |
Bradley Grove | 26780d9 | 2013-08-23 10:35:45 -0400 | [diff] [blame] | 467 | gcc->cntlr_flags = CSMI_CNTLRF_SAS_HBA |
| 468 | | CSMI_CNTLRF_SATA_HBA; |
| 469 | else |
| 470 | gcc->cntlr_flags = CSMI_CNTLRF_SAS_RAID |
| 471 | | CSMI_CNTLRF_SATA_RAID; |
| 472 | |
| 473 | gcc->rrom_major_rev = 0; |
| 474 | gcc->rrom_minor_rev = 0; |
| 475 | gcc->rrom_build_rev = 0; |
| 476 | gcc->rrom_release_rev = 0; |
| 477 | gcc->rrom_biosmajor_rev = 0; |
| 478 | gcc->rrom_biosminor_rev = 0; |
| 479 | gcc->rrom_biosbuild_rev = 0; |
| 480 | gcc->rrom_biosrelease_rev = 0; |
| 481 | break; |
| 482 | } |
| 483 | |
| 484 | case CSMI_CC_GET_CNTLR_STS: |
| 485 | { |
| 486 | struct atto_csmi_get_cntlr_sts *gcs = &ioctl_csmi->cntlr_sts; |
| 487 | |
Bradley Grove | 9588d24 | 2013-10-01 14:26:01 -0400 | [diff] [blame] | 488 | if (test_bit(AF_DEGRADED_MODE, &a->flags)) |
Bradley Grove | 26780d9 | 2013-08-23 10:35:45 -0400 | [diff] [blame] | 489 | gcs->status = CSMI_CNTLR_STS_FAILED; |
| 490 | else |
| 491 | gcs->status = CSMI_CNTLR_STS_GOOD; |
| 492 | |
| 493 | gcs->offline_reason = CSMI_OFFLINE_NO_REASON; |
| 494 | break; |
| 495 | } |
| 496 | |
| 497 | case CSMI_CC_FW_DOWNLOAD: |
| 498 | case CSMI_CC_GET_RAID_INFO: |
| 499 | case CSMI_CC_GET_RAID_CFG: |
| 500 | |
| 501 | sts = CSMI_STS_BAD_CTRL_CODE; |
| 502 | break; |
| 503 | |
| 504 | case CSMI_CC_SMP_PASSTHRU: |
| 505 | case CSMI_CC_SSP_PASSTHRU: |
| 506 | case CSMI_CC_STP_PASSTHRU: |
| 507 | case CSMI_CC_GET_PHY_INFO: |
| 508 | case CSMI_CC_SET_PHY_INFO: |
| 509 | case CSMI_CC_GET_LINK_ERRORS: |
| 510 | case CSMI_CC_GET_SATA_SIG: |
| 511 | case CSMI_CC_GET_CONN_INFO: |
| 512 | case CSMI_CC_PHY_CTRL: |
| 513 | |
| 514 | if (!csmi_ioctl_tunnel(a, ioctl_csmi, rq, sgc, |
| 515 | ci->control_code, |
| 516 | ESAS2R_TARG_ID_INV)) { |
| 517 | sts = CSMI_STS_FAILED; |
| 518 | break; |
| 519 | } |
| 520 | |
| 521 | return true; |
| 522 | |
| 523 | case CSMI_CC_GET_SCSI_ADDR: |
| 524 | { |
| 525 | struct atto_csmi_get_scsi_addr *gsa = &ioctl_csmi->scsi_addr; |
| 526 | |
| 527 | struct scsi_lun lun; |
| 528 | |
| 529 | memcpy(&lun, gsa->sas_lun, sizeof(struct scsi_lun)); |
| 530 | |
| 531 | if (!check_lun(lun)) { |
| 532 | sts = CSMI_STS_NO_SCSI_ADDR; |
| 533 | break; |
| 534 | } |
| 535 | |
| 536 | /* make sure the device is present */ |
| 537 | spin_lock_irqsave(&a->mem_lock, flags); |
| 538 | t = esas2r_targ_db_find_by_sas_addr(a, (u64 *)gsa->sas_addr); |
| 539 | spin_unlock_irqrestore(&a->mem_lock, flags); |
| 540 | |
| 541 | if (t == NULL) { |
| 542 | sts = CSMI_STS_NO_SCSI_ADDR; |
| 543 | break; |
| 544 | } |
| 545 | |
| 546 | gsa->host_index = 0xFF; |
| 547 | gsa->lun = gsa->sas_lun[1]; |
| 548 | rq->target_id = esas2r_targ_get_id(t, a); |
| 549 | break; |
| 550 | } |
| 551 | |
| 552 | case CSMI_CC_GET_DEV_ADDR: |
| 553 | { |
| 554 | struct atto_csmi_get_dev_addr *gda = &ioctl_csmi->dev_addr; |
| 555 | |
| 556 | /* make sure the target is present */ |
| 557 | t = a->targetdb + rq->target_id; |
| 558 | |
| 559 | if (t >= a->targetdb_end |
| 560 | || t->target_state != TS_PRESENT |
| 561 | || t->sas_addr == 0) { |
| 562 | sts = CSMI_STS_NO_DEV_ADDR; |
| 563 | break; |
| 564 | } |
| 565 | |
| 566 | /* fill in the result */ |
| 567 | *(u64 *)gda->sas_addr = t->sas_addr; |
| 568 | memset(gda->sas_lun, 0, sizeof(gda->sas_lun)); |
| 569 | gda->sas_lun[1] = (u8)le32_to_cpu(rq->vrq->scsi.flags); |
| 570 | break; |
| 571 | } |
| 572 | |
| 573 | case CSMI_CC_TASK_MGT: |
| 574 | |
| 575 | /* make sure the target is present */ |
| 576 | t = a->targetdb + rq->target_id; |
| 577 | |
| 578 | if (t >= a->targetdb_end |
| 579 | || t->target_state != TS_PRESENT |
| 580 | || !(t->flags & TF_PASS_THRU)) { |
| 581 | sts = CSMI_STS_NO_DEV_ADDR; |
| 582 | break; |
| 583 | } |
| 584 | |
| 585 | if (!csmi_ioctl_tunnel(a, ioctl_csmi, rq, sgc, |
| 586 | ci->control_code, |
| 587 | t->phys_targ_id)) { |
| 588 | sts = CSMI_STS_FAILED; |
| 589 | break; |
| 590 | } |
| 591 | |
| 592 | return true; |
| 593 | |
| 594 | default: |
| 595 | |
| 596 | sts = CSMI_STS_BAD_CTRL_CODE; |
| 597 | break; |
| 598 | } |
| 599 | |
| 600 | rq->func_rsp.ioctl_rsp.csmi.csmi_status = cpu_to_le32(sts); |
| 601 | |
| 602 | return false; |
| 603 | } |
| 604 | |
| 605 | |
| 606 | static void csmi_ioctl_done_callback(struct esas2r_adapter *a, |
| 607 | struct esas2r_request *rq, void *context) |
| 608 | { |
| 609 | struct atto_csmi *ci = (struct atto_csmi *)context; |
| 610 | union atto_ioctl_csmi *ioctl_csmi = |
| 611 | (union atto_ioctl_csmi *)esas2r_buffered_ioctl; |
| 612 | |
| 613 | switch (ci->control_code) { |
| 614 | case CSMI_CC_GET_DRVR_INFO: |
| 615 | { |
| 616 | struct atto_csmi_get_driver_info *gdi = |
| 617 | &ioctl_csmi->drvr_info; |
| 618 | |
| 619 | strcpy(gdi->name, ESAS2R_VERSION_STR); |
| 620 | |
| 621 | gdi->major_rev = ESAS2R_MAJOR_REV; |
| 622 | gdi->minor_rev = ESAS2R_MINOR_REV; |
| 623 | gdi->build_rev = 0; |
| 624 | gdi->release_rev = 0; |
| 625 | break; |
| 626 | } |
| 627 | |
| 628 | case CSMI_CC_GET_SCSI_ADDR: |
| 629 | { |
| 630 | struct atto_csmi_get_scsi_addr *gsa = &ioctl_csmi->scsi_addr; |
| 631 | |
| 632 | if (le32_to_cpu(rq->func_rsp.ioctl_rsp.csmi.csmi_status) == |
| 633 | CSMI_STS_SUCCESS) { |
| 634 | gsa->target_id = rq->target_id; |
| 635 | gsa->path_id = 0; |
| 636 | } |
| 637 | |
| 638 | break; |
| 639 | } |
| 640 | } |
| 641 | |
| 642 | ci->status = le32_to_cpu(rq->func_rsp.ioctl_rsp.csmi.csmi_status); |
| 643 | } |
| 644 | |
| 645 | |
| 646 | static u8 handle_csmi_ioctl(struct esas2r_adapter *a, struct atto_csmi *ci) |
| 647 | { |
| 648 | struct esas2r_buffered_ioctl bi; |
| 649 | |
| 650 | memset(&bi, 0, sizeof(bi)); |
| 651 | |
| 652 | bi.a = a; |
| 653 | bi.ioctl = &ci->data; |
| 654 | bi.length = sizeof(union atto_ioctl_csmi); |
| 655 | bi.offset = 0; |
| 656 | bi.callback = csmi_ioctl_callback; |
| 657 | bi.context = ci; |
| 658 | bi.done_callback = csmi_ioctl_done_callback; |
| 659 | bi.done_context = ci; |
| 660 | |
| 661 | return handle_buffered_ioctl(&bi); |
| 662 | } |
| 663 | |
| 664 | /* ATTO HBA ioctl support */ |
| 665 | |
| 666 | /* Tunnel an ATTO HBA IOCTL to the back end driver for processing. */ |
| 667 | static bool hba_ioctl_tunnel(struct esas2r_adapter *a, |
| 668 | struct atto_ioctl *hi, |
| 669 | struct esas2r_request *rq, |
| 670 | struct esas2r_sg_context *sgc) |
| 671 | { |
| 672 | esas2r_sgc_init(sgc, a, rq, rq->vrq->ioctl.sge); |
| 673 | |
| 674 | esas2r_build_ioctl_req(a, rq, sgc->length, VDA_IOCTL_HBA); |
| 675 | |
| 676 | if (!esas2r_build_sg_list(a, rq, sgc)) { |
| 677 | hi->status = ATTO_STS_OUT_OF_RSRC; |
| 678 | |
| 679 | return false; |
| 680 | } |
| 681 | |
| 682 | esas2r_start_request(a, rq); |
| 683 | |
| 684 | return true; |
| 685 | } |
| 686 | |
| 687 | static void scsi_passthru_comp_cb(struct esas2r_adapter *a, |
| 688 | struct esas2r_request *rq) |
| 689 | { |
| 690 | struct atto_ioctl *hi = (struct atto_ioctl *)rq->aux_req_cx; |
| 691 | struct atto_hba_scsi_pass_thru *spt = &hi->data.scsi_pass_thru; |
| 692 | u8 sts = ATTO_SPT_RS_FAILED; |
| 693 | |
| 694 | spt->scsi_status = rq->func_rsp.scsi_rsp.scsi_stat; |
| 695 | spt->sense_length = rq->sense_len; |
| 696 | spt->residual_length = |
| 697 | le32_to_cpu(rq->func_rsp.scsi_rsp.residual_length); |
| 698 | |
| 699 | switch (rq->req_stat) { |
| 700 | case RS_SUCCESS: |
| 701 | case RS_SCSI_ERROR: |
| 702 | sts = ATTO_SPT_RS_SUCCESS; |
| 703 | break; |
| 704 | case RS_UNDERRUN: |
| 705 | sts = ATTO_SPT_RS_UNDERRUN; |
| 706 | break; |
| 707 | case RS_OVERRUN: |
| 708 | sts = ATTO_SPT_RS_OVERRUN; |
| 709 | break; |
| 710 | case RS_SEL: |
| 711 | case RS_SEL2: |
| 712 | sts = ATTO_SPT_RS_NO_DEVICE; |
| 713 | break; |
| 714 | case RS_NO_LUN: |
| 715 | sts = ATTO_SPT_RS_NO_LUN; |
| 716 | break; |
| 717 | case RS_TIMEOUT: |
| 718 | sts = ATTO_SPT_RS_TIMEOUT; |
| 719 | break; |
| 720 | case RS_DEGRADED: |
| 721 | sts = ATTO_SPT_RS_DEGRADED; |
| 722 | break; |
| 723 | case RS_BUSY: |
| 724 | sts = ATTO_SPT_RS_BUSY; |
| 725 | break; |
| 726 | case RS_ABORTED: |
| 727 | sts = ATTO_SPT_RS_ABORTED; |
| 728 | break; |
| 729 | case RS_RESET: |
| 730 | sts = ATTO_SPT_RS_BUS_RESET; |
| 731 | break; |
| 732 | } |
| 733 | |
| 734 | spt->req_status = sts; |
| 735 | |
| 736 | /* Update the target ID to the next one present. */ |
| 737 | spt->target_id = |
| 738 | esas2r_targ_db_find_next_present(a, (u16)spt->target_id); |
| 739 | |
| 740 | /* Done, call the completion callback. */ |
| 741 | (*rq->aux_req_cb)(a, rq); |
| 742 | } |
| 743 | |
| 744 | static int hba_ioctl_callback(struct esas2r_adapter *a, |
| 745 | struct esas2r_request *rq, |
| 746 | struct esas2r_sg_context *sgc, |
| 747 | void *context) |
| 748 | { |
| 749 | struct atto_ioctl *hi = (struct atto_ioctl *)esas2r_buffered_ioctl; |
| 750 | |
| 751 | hi->status = ATTO_STS_SUCCESS; |
| 752 | |
| 753 | switch (hi->function) { |
| 754 | case ATTO_FUNC_GET_ADAP_INFO: |
| 755 | { |
| 756 | u8 *class_code = (u8 *)&a->pcid->class; |
| 757 | |
| 758 | struct atto_hba_get_adapter_info *gai = |
| 759 | &hi->data.get_adap_info; |
| 760 | int pcie_cap_reg; |
| 761 | |
| 762 | if (hi->flags & HBAF_TUNNEL) { |
| 763 | hi->status = ATTO_STS_UNSUPPORTED; |
| 764 | break; |
| 765 | } |
| 766 | |
| 767 | if (hi->version > ATTO_VER_GET_ADAP_INFO0) { |
| 768 | hi->status = ATTO_STS_INV_VERSION; |
| 769 | hi->version = ATTO_VER_GET_ADAP_INFO0; |
| 770 | break; |
| 771 | } |
| 772 | |
| 773 | memset(gai, 0, sizeof(*gai)); |
| 774 | |
| 775 | gai->pci.vendor_id = a->pcid->vendor; |
| 776 | gai->pci.device_id = a->pcid->device; |
| 777 | gai->pci.ss_vendor_id = a->pcid->subsystem_vendor; |
| 778 | gai->pci.ss_device_id = a->pcid->subsystem_device; |
| 779 | gai->pci.class_code[0] = class_code[0]; |
| 780 | gai->pci.class_code[1] = class_code[1]; |
| 781 | gai->pci.class_code[2] = class_code[2]; |
| 782 | gai->pci.rev_id = a->pcid->revision; |
| 783 | gai->pci.bus_num = a->pcid->bus->number; |
| 784 | gai->pci.dev_num = PCI_SLOT(a->pcid->devfn); |
| 785 | gai->pci.func_num = PCI_FUNC(a->pcid->devfn); |
| 786 | |
| 787 | pcie_cap_reg = pci_find_capability(a->pcid, PCI_CAP_ID_EXP); |
| 788 | if (pcie_cap_reg) { |
| 789 | u16 stat; |
| 790 | u32 caps; |
| 791 | |
| 792 | pci_read_config_word(a->pcid, |
| 793 | pcie_cap_reg + PCI_EXP_LNKSTA, |
| 794 | &stat); |
| 795 | pci_read_config_dword(a->pcid, |
| 796 | pcie_cap_reg + PCI_EXP_LNKCAP, |
| 797 | &caps); |
| 798 | |
| 799 | gai->pci.link_speed_curr = |
| 800 | (u8)(stat & PCI_EXP_LNKSTA_CLS); |
| 801 | gai->pci.link_speed_max = |
| 802 | (u8)(caps & PCI_EXP_LNKCAP_SLS); |
| 803 | gai->pci.link_width_curr = |
| 804 | (u8)((stat & PCI_EXP_LNKSTA_NLW) |
| 805 | >> PCI_EXP_LNKSTA_NLW_SHIFT); |
| 806 | gai->pci.link_width_max = |
| 807 | (u8)((caps & PCI_EXP_LNKCAP_MLW) |
| 808 | >> 4); |
| 809 | } |
| 810 | |
| 811 | gai->pci.msi_vector_cnt = 1; |
| 812 | |
| 813 | if (a->pcid->msix_enabled) |
| 814 | gai->pci.interrupt_mode = ATTO_GAI_PCIIM_MSIX; |
| 815 | else if (a->pcid->msi_enabled) |
| 816 | gai->pci.interrupt_mode = ATTO_GAI_PCIIM_MSI; |
| 817 | else |
| 818 | gai->pci.interrupt_mode = ATTO_GAI_PCIIM_LEGACY; |
| 819 | |
| 820 | gai->adap_type = ATTO_GAI_AT_ESASRAID2; |
| 821 | |
Bradley Grove | 9588d24 | 2013-10-01 14:26:01 -0400 | [diff] [blame] | 822 | if (test_bit(AF2_THUNDERLINK, &a->flags2)) |
Bradley Grove | 26780d9 | 2013-08-23 10:35:45 -0400 | [diff] [blame] | 823 | gai->adap_type = ATTO_GAI_AT_TLSASHBA; |
| 824 | |
Bradley Grove | 9588d24 | 2013-10-01 14:26:01 -0400 | [diff] [blame] | 825 | if (test_bit(AF_DEGRADED_MODE, &a->flags)) |
Bradley Grove | 26780d9 | 2013-08-23 10:35:45 -0400 | [diff] [blame] | 826 | gai->adap_flags |= ATTO_GAI_AF_DEGRADED; |
| 827 | |
| 828 | gai->adap_flags |= ATTO_GAI_AF_SPT_SUPP | |
| 829 | ATTO_GAI_AF_DEVADDR_SUPP; |
| 830 | |
| 831 | if (a->pcid->subsystem_device == ATTO_ESAS_R60F |
| 832 | || a->pcid->subsystem_device == ATTO_ESAS_R608 |
| 833 | || a->pcid->subsystem_device == ATTO_ESAS_R644 |
| 834 | || a->pcid->subsystem_device == ATTO_TSSC_3808E) |
| 835 | gai->adap_flags |= ATTO_GAI_AF_VIRT_SES; |
| 836 | |
| 837 | gai->num_ports = ESAS2R_NUM_PHYS; |
| 838 | gai->num_phys = ESAS2R_NUM_PHYS; |
| 839 | |
| 840 | strcpy(gai->firmware_rev, a->fw_rev); |
| 841 | strcpy(gai->flash_rev, a->flash_rev); |
| 842 | strcpy(gai->model_name_short, esas2r_get_model_name_short(a)); |
| 843 | strcpy(gai->model_name, esas2r_get_model_name(a)); |
| 844 | |
| 845 | gai->num_targets = ESAS2R_MAX_TARGETS; |
| 846 | |
| 847 | gai->num_busses = 1; |
| 848 | gai->num_targsper_bus = gai->num_targets; |
| 849 | gai->num_lunsper_targ = 256; |
| 850 | |
| 851 | if (a->pcid->subsystem_device == ATTO_ESAS_R6F0 |
| 852 | || a->pcid->subsystem_device == ATTO_ESAS_R60F) |
| 853 | gai->num_connectors = 4; |
| 854 | else |
| 855 | gai->num_connectors = 2; |
| 856 | |
| 857 | gai->adap_flags2 |= ATTO_GAI_AF2_ADAP_CTRL_SUPP; |
| 858 | |
| 859 | gai->num_targets_backend = a->num_targets_backend; |
| 860 | |
| 861 | gai->tunnel_flags = a->ioctl_tunnel |
| 862 | & (ATTO_GAI_TF_MEM_RW |
| 863 | | ATTO_GAI_TF_TRACE |
| 864 | | ATTO_GAI_TF_SCSI_PASS_THRU |
| 865 | | ATTO_GAI_TF_GET_DEV_ADDR |
| 866 | | ATTO_GAI_TF_PHY_CTRL |
| 867 | | ATTO_GAI_TF_CONN_CTRL |
| 868 | | ATTO_GAI_TF_GET_DEV_INFO); |
| 869 | break; |
| 870 | } |
| 871 | |
| 872 | case ATTO_FUNC_GET_ADAP_ADDR: |
| 873 | { |
| 874 | struct atto_hba_get_adapter_address *gaa = |
| 875 | &hi->data.get_adap_addr; |
| 876 | |
| 877 | if (hi->flags & HBAF_TUNNEL) { |
| 878 | hi->status = ATTO_STS_UNSUPPORTED; |
| 879 | break; |
| 880 | } |
| 881 | |
| 882 | if (hi->version > ATTO_VER_GET_ADAP_ADDR0) { |
| 883 | hi->status = ATTO_STS_INV_VERSION; |
| 884 | hi->version = ATTO_VER_GET_ADAP_ADDR0; |
| 885 | } else if (gaa->addr_type == ATTO_GAA_AT_PORT |
| 886 | || gaa->addr_type == ATTO_GAA_AT_NODE) { |
| 887 | if (gaa->addr_type == ATTO_GAA_AT_PORT |
| 888 | && gaa->port_id >= ESAS2R_NUM_PHYS) { |
| 889 | hi->status = ATTO_STS_NOT_APPL; |
| 890 | } else { |
| 891 | memcpy((u64 *)gaa->address, |
| 892 | &a->nvram->sas_addr[0], sizeof(u64)); |
| 893 | gaa->addr_len = sizeof(u64); |
| 894 | } |
| 895 | } else { |
| 896 | hi->status = ATTO_STS_INV_PARAM; |
| 897 | } |
| 898 | |
| 899 | break; |
| 900 | } |
| 901 | |
| 902 | case ATTO_FUNC_MEM_RW: |
| 903 | { |
| 904 | if (hi->flags & HBAF_TUNNEL) { |
| 905 | if (hba_ioctl_tunnel(a, hi, rq, sgc)) |
| 906 | return true; |
| 907 | |
| 908 | break; |
| 909 | } |
| 910 | |
| 911 | hi->status = ATTO_STS_UNSUPPORTED; |
| 912 | |
| 913 | break; |
| 914 | } |
| 915 | |
| 916 | case ATTO_FUNC_TRACE: |
| 917 | { |
| 918 | struct atto_hba_trace *trc = &hi->data.trace; |
| 919 | |
| 920 | if (hi->flags & HBAF_TUNNEL) { |
| 921 | if (hba_ioctl_tunnel(a, hi, rq, sgc)) |
| 922 | return true; |
| 923 | |
| 924 | break; |
| 925 | } |
| 926 | |
| 927 | if (hi->version > ATTO_VER_TRACE1) { |
| 928 | hi->status = ATTO_STS_INV_VERSION; |
| 929 | hi->version = ATTO_VER_TRACE1; |
| 930 | break; |
| 931 | } |
| 932 | |
| 933 | if (trc->trace_type == ATTO_TRC_TT_FWCOREDUMP |
| 934 | && hi->version >= ATTO_VER_TRACE1) { |
| 935 | if (trc->trace_func == ATTO_TRC_TF_UPLOAD) { |
| 936 | u32 len = hi->data_length; |
| 937 | u32 offset = trc->current_offset; |
| 938 | u32 total_len = ESAS2R_FWCOREDUMP_SZ; |
| 939 | |
| 940 | /* Size is zero if a core dump isn't present */ |
Bradley Grove | 9588d24 | 2013-10-01 14:26:01 -0400 | [diff] [blame] | 941 | if (!test_bit(AF2_COREDUMP_SAVED, &a->flags2)) |
Bradley Grove | 26780d9 | 2013-08-23 10:35:45 -0400 | [diff] [blame] | 942 | total_len = 0; |
| 943 | |
| 944 | if (len > total_len) |
| 945 | len = total_len; |
| 946 | |
| 947 | if (offset >= total_len |
| 948 | || offset + len > total_len |
| 949 | || len == 0) { |
| 950 | hi->status = ATTO_STS_INV_PARAM; |
| 951 | break; |
| 952 | } |
| 953 | |
| 954 | memcpy(trc + 1, |
| 955 | a->fw_coredump_buff + offset, |
| 956 | len); |
| 957 | |
| 958 | hi->data_length = len; |
| 959 | } else if (trc->trace_func == ATTO_TRC_TF_RESET) { |
| 960 | memset(a->fw_coredump_buff, 0, |
| 961 | ESAS2R_FWCOREDUMP_SZ); |
| 962 | |
Bradley Grove | 9588d24 | 2013-10-01 14:26:01 -0400 | [diff] [blame] | 963 | clear_bit(AF2_COREDUMP_SAVED, &a->flags2); |
Bradley Grove | 26780d9 | 2013-08-23 10:35:45 -0400 | [diff] [blame] | 964 | } else if (trc->trace_func != ATTO_TRC_TF_GET_INFO) { |
| 965 | hi->status = ATTO_STS_UNSUPPORTED; |
| 966 | break; |
| 967 | } |
| 968 | |
| 969 | /* Always return all the info we can. */ |
| 970 | trc->trace_mask = 0; |
| 971 | trc->current_offset = 0; |
| 972 | trc->total_length = ESAS2R_FWCOREDUMP_SZ; |
| 973 | |
| 974 | /* Return zero length buffer if core dump not present */ |
Bradley Grove | 9588d24 | 2013-10-01 14:26:01 -0400 | [diff] [blame] | 975 | if (!test_bit(AF2_COREDUMP_SAVED, &a->flags2)) |
Bradley Grove | 26780d9 | 2013-08-23 10:35:45 -0400 | [diff] [blame] | 976 | trc->total_length = 0; |
| 977 | } else { |
| 978 | hi->status = ATTO_STS_UNSUPPORTED; |
| 979 | } |
| 980 | |
| 981 | break; |
| 982 | } |
| 983 | |
| 984 | case ATTO_FUNC_SCSI_PASS_THRU: |
| 985 | { |
| 986 | struct atto_hba_scsi_pass_thru *spt = &hi->data.scsi_pass_thru; |
| 987 | struct scsi_lun lun; |
| 988 | |
| 989 | memcpy(&lun, spt->lun, sizeof(struct scsi_lun)); |
| 990 | |
| 991 | if (hi->flags & HBAF_TUNNEL) { |
| 992 | if (hba_ioctl_tunnel(a, hi, rq, sgc)) |
| 993 | return true; |
| 994 | |
| 995 | break; |
| 996 | } |
| 997 | |
| 998 | if (hi->version > ATTO_VER_SCSI_PASS_THRU0) { |
| 999 | hi->status = ATTO_STS_INV_VERSION; |
| 1000 | hi->version = ATTO_VER_SCSI_PASS_THRU0; |
| 1001 | break; |
| 1002 | } |
| 1003 | |
| 1004 | if (spt->target_id >= ESAS2R_MAX_TARGETS || !check_lun(lun)) { |
| 1005 | hi->status = ATTO_STS_INV_PARAM; |
| 1006 | break; |
| 1007 | } |
| 1008 | |
| 1009 | esas2r_sgc_init(sgc, a, rq, NULL); |
| 1010 | |
| 1011 | sgc->length = hi->data_length; |
| 1012 | sgc->cur_offset += offsetof(struct atto_ioctl, data.byte) |
| 1013 | + sizeof(struct atto_hba_scsi_pass_thru); |
| 1014 | |
| 1015 | /* Finish request initialization */ |
| 1016 | rq->target_id = (u16)spt->target_id; |
| 1017 | rq->vrq->scsi.flags |= cpu_to_le32(spt->lun[1]); |
| 1018 | memcpy(rq->vrq->scsi.cdb, spt->cdb, 16); |
| 1019 | rq->vrq->scsi.length = cpu_to_le32(hi->data_length); |
| 1020 | rq->sense_len = spt->sense_length; |
| 1021 | rq->sense_buf = (u8 *)spt->sense_data; |
| 1022 | /* NOTE: we ignore spt->timeout */ |
| 1023 | |
| 1024 | /* |
| 1025 | * always usurp the completion callback since the interrupt |
| 1026 | * callback mechanism may be used. |
| 1027 | */ |
| 1028 | |
| 1029 | rq->aux_req_cx = hi; |
| 1030 | rq->aux_req_cb = rq->comp_cb; |
| 1031 | rq->comp_cb = scsi_passthru_comp_cb; |
| 1032 | |
| 1033 | if (spt->flags & ATTO_SPTF_DATA_IN) { |
| 1034 | rq->vrq->scsi.flags |= cpu_to_le32(FCP_CMND_RDD); |
| 1035 | } else if (spt->flags & ATTO_SPTF_DATA_OUT) { |
| 1036 | rq->vrq->scsi.flags |= cpu_to_le32(FCP_CMND_WRD); |
| 1037 | } else { |
| 1038 | if (sgc->length) { |
| 1039 | hi->status = ATTO_STS_INV_PARAM; |
| 1040 | break; |
| 1041 | } |
| 1042 | } |
| 1043 | |
| 1044 | if (spt->flags & ATTO_SPTF_ORDERED_Q) |
| 1045 | rq->vrq->scsi.flags |= |
| 1046 | cpu_to_le32(FCP_CMND_TA_ORDRD_Q); |
| 1047 | else if (spt->flags & ATTO_SPTF_HEAD_OF_Q) |
| 1048 | rq->vrq->scsi.flags |= cpu_to_le32(FCP_CMND_TA_HEAD_Q); |
| 1049 | |
Bradley Grove | 9588d24 | 2013-10-01 14:26:01 -0400 | [diff] [blame] | 1050 | |
Bradley Grove | 26780d9 | 2013-08-23 10:35:45 -0400 | [diff] [blame] | 1051 | if (!esas2r_build_sg_list(a, rq, sgc)) { |
| 1052 | hi->status = ATTO_STS_OUT_OF_RSRC; |
| 1053 | break; |
| 1054 | } |
| 1055 | |
| 1056 | esas2r_start_request(a, rq); |
| 1057 | |
| 1058 | return true; |
| 1059 | } |
| 1060 | |
| 1061 | case ATTO_FUNC_GET_DEV_ADDR: |
| 1062 | { |
| 1063 | struct atto_hba_get_device_address *gda = |
| 1064 | &hi->data.get_dev_addr; |
| 1065 | struct esas2r_target *t; |
| 1066 | |
| 1067 | if (hi->flags & HBAF_TUNNEL) { |
| 1068 | if (hba_ioctl_tunnel(a, hi, rq, sgc)) |
| 1069 | return true; |
| 1070 | |
| 1071 | break; |
| 1072 | } |
| 1073 | |
| 1074 | if (hi->version > ATTO_VER_GET_DEV_ADDR0) { |
| 1075 | hi->status = ATTO_STS_INV_VERSION; |
| 1076 | hi->version = ATTO_VER_GET_DEV_ADDR0; |
| 1077 | break; |
| 1078 | } |
| 1079 | |
| 1080 | if (gda->target_id >= ESAS2R_MAX_TARGETS) { |
| 1081 | hi->status = ATTO_STS_INV_PARAM; |
| 1082 | break; |
| 1083 | } |
| 1084 | |
| 1085 | t = a->targetdb + (u16)gda->target_id; |
| 1086 | |
| 1087 | if (t->target_state != TS_PRESENT) { |
| 1088 | hi->status = ATTO_STS_FAILED; |
| 1089 | } else if (gda->addr_type == ATTO_GDA_AT_PORT) { |
| 1090 | if (t->sas_addr == 0) { |
| 1091 | hi->status = ATTO_STS_UNSUPPORTED; |
| 1092 | } else { |
| 1093 | *(u64 *)gda->address = t->sas_addr; |
| 1094 | |
| 1095 | gda->addr_len = sizeof(u64); |
| 1096 | } |
| 1097 | } else if (gda->addr_type == ATTO_GDA_AT_NODE) { |
| 1098 | hi->status = ATTO_STS_NOT_APPL; |
| 1099 | } else { |
| 1100 | hi->status = ATTO_STS_INV_PARAM; |
| 1101 | } |
| 1102 | |
| 1103 | /* update the target ID to the next one present. */ |
| 1104 | |
| 1105 | gda->target_id = |
| 1106 | esas2r_targ_db_find_next_present(a, |
| 1107 | (u16)gda->target_id); |
| 1108 | break; |
| 1109 | } |
| 1110 | |
| 1111 | case ATTO_FUNC_PHY_CTRL: |
| 1112 | case ATTO_FUNC_CONN_CTRL: |
| 1113 | { |
| 1114 | if (hba_ioctl_tunnel(a, hi, rq, sgc)) |
| 1115 | return true; |
| 1116 | |
| 1117 | break; |
| 1118 | } |
| 1119 | |
| 1120 | case ATTO_FUNC_ADAP_CTRL: |
| 1121 | { |
| 1122 | struct atto_hba_adap_ctrl *ac = &hi->data.adap_ctrl; |
| 1123 | |
| 1124 | if (hi->flags & HBAF_TUNNEL) { |
| 1125 | hi->status = ATTO_STS_UNSUPPORTED; |
| 1126 | break; |
| 1127 | } |
| 1128 | |
| 1129 | if (hi->version > ATTO_VER_ADAP_CTRL0) { |
| 1130 | hi->status = ATTO_STS_INV_VERSION; |
| 1131 | hi->version = ATTO_VER_ADAP_CTRL0; |
| 1132 | break; |
| 1133 | } |
| 1134 | |
| 1135 | if (ac->adap_func == ATTO_AC_AF_HARD_RST) { |
| 1136 | esas2r_reset_adapter(a); |
| 1137 | } else if (ac->adap_func != ATTO_AC_AF_GET_STATE) { |
| 1138 | hi->status = ATTO_STS_UNSUPPORTED; |
| 1139 | break; |
| 1140 | } |
| 1141 | |
Bradley Grove | 9588d24 | 2013-10-01 14:26:01 -0400 | [diff] [blame] | 1142 | if (test_bit(AF_CHPRST_NEEDED, &a->flags)) |
Bradley Grove | 26780d9 | 2013-08-23 10:35:45 -0400 | [diff] [blame] | 1143 | ac->adap_state = ATTO_AC_AS_RST_SCHED; |
Bradley Grove | 9588d24 | 2013-10-01 14:26:01 -0400 | [diff] [blame] | 1144 | else if (test_bit(AF_CHPRST_PENDING, &a->flags)) |
Bradley Grove | 26780d9 | 2013-08-23 10:35:45 -0400 | [diff] [blame] | 1145 | ac->adap_state = ATTO_AC_AS_RST_IN_PROG; |
Bradley Grove | 9588d24 | 2013-10-01 14:26:01 -0400 | [diff] [blame] | 1146 | else if (test_bit(AF_DISC_PENDING, &a->flags)) |
Bradley Grove | 26780d9 | 2013-08-23 10:35:45 -0400 | [diff] [blame] | 1147 | ac->adap_state = ATTO_AC_AS_RST_DISC; |
Bradley Grove | 9588d24 | 2013-10-01 14:26:01 -0400 | [diff] [blame] | 1148 | else if (test_bit(AF_DISABLED, &a->flags)) |
Bradley Grove | 26780d9 | 2013-08-23 10:35:45 -0400 | [diff] [blame] | 1149 | ac->adap_state = ATTO_AC_AS_DISABLED; |
Bradley Grove | 9588d24 | 2013-10-01 14:26:01 -0400 | [diff] [blame] | 1150 | else if (test_bit(AF_DEGRADED_MODE, &a->flags)) |
Bradley Grove | 26780d9 | 2013-08-23 10:35:45 -0400 | [diff] [blame] | 1151 | ac->adap_state = ATTO_AC_AS_DEGRADED; |
| 1152 | else |
| 1153 | ac->adap_state = ATTO_AC_AS_OK; |
| 1154 | |
| 1155 | break; |
| 1156 | } |
| 1157 | |
| 1158 | case ATTO_FUNC_GET_DEV_INFO: |
| 1159 | { |
| 1160 | struct atto_hba_get_device_info *gdi = &hi->data.get_dev_info; |
| 1161 | struct esas2r_target *t; |
| 1162 | |
| 1163 | if (hi->flags & HBAF_TUNNEL) { |
| 1164 | if (hba_ioctl_tunnel(a, hi, rq, sgc)) |
| 1165 | return true; |
| 1166 | |
| 1167 | break; |
| 1168 | } |
| 1169 | |
| 1170 | if (hi->version > ATTO_VER_GET_DEV_INFO0) { |
| 1171 | hi->status = ATTO_STS_INV_VERSION; |
| 1172 | hi->version = ATTO_VER_GET_DEV_INFO0; |
| 1173 | break; |
| 1174 | } |
| 1175 | |
| 1176 | if (gdi->target_id >= ESAS2R_MAX_TARGETS) { |
| 1177 | hi->status = ATTO_STS_INV_PARAM; |
| 1178 | break; |
| 1179 | } |
| 1180 | |
| 1181 | t = a->targetdb + (u16)gdi->target_id; |
| 1182 | |
| 1183 | /* update the target ID to the next one present. */ |
| 1184 | |
| 1185 | gdi->target_id = |
| 1186 | esas2r_targ_db_find_next_present(a, |
| 1187 | (u16)gdi->target_id); |
| 1188 | |
| 1189 | if (t->target_state != TS_PRESENT) { |
| 1190 | hi->status = ATTO_STS_FAILED; |
| 1191 | break; |
| 1192 | } |
| 1193 | |
| 1194 | hi->status = ATTO_STS_UNSUPPORTED; |
| 1195 | break; |
| 1196 | } |
| 1197 | |
| 1198 | default: |
| 1199 | |
| 1200 | hi->status = ATTO_STS_INV_FUNC; |
| 1201 | break; |
| 1202 | } |
| 1203 | |
| 1204 | return false; |
| 1205 | } |
| 1206 | |
| 1207 | static void hba_ioctl_done_callback(struct esas2r_adapter *a, |
| 1208 | struct esas2r_request *rq, void *context) |
| 1209 | { |
| 1210 | struct atto_ioctl *ioctl_hba = |
| 1211 | (struct atto_ioctl *)esas2r_buffered_ioctl; |
| 1212 | |
| 1213 | esas2r_debug("hba_ioctl_done_callback %d", a->index); |
| 1214 | |
| 1215 | if (ioctl_hba->function == ATTO_FUNC_GET_ADAP_INFO) { |
| 1216 | struct atto_hba_get_adapter_info *gai = |
| 1217 | &ioctl_hba->data.get_adap_info; |
| 1218 | |
| 1219 | esas2r_debug("ATTO_FUNC_GET_ADAP_INFO"); |
| 1220 | |
| 1221 | gai->drvr_rev_major = ESAS2R_MAJOR_REV; |
| 1222 | gai->drvr_rev_minor = ESAS2R_MINOR_REV; |
| 1223 | |
| 1224 | strcpy(gai->drvr_rev_ascii, ESAS2R_VERSION_STR); |
| 1225 | strcpy(gai->drvr_name, ESAS2R_DRVR_NAME); |
| 1226 | |
| 1227 | gai->num_busses = 1; |
| 1228 | gai->num_targsper_bus = ESAS2R_MAX_ID + 1; |
| 1229 | gai->num_lunsper_targ = 1; |
| 1230 | } |
| 1231 | } |
| 1232 | |
| 1233 | u8 handle_hba_ioctl(struct esas2r_adapter *a, |
| 1234 | struct atto_ioctl *ioctl_hba) |
| 1235 | { |
| 1236 | struct esas2r_buffered_ioctl bi; |
| 1237 | |
| 1238 | memset(&bi, 0, sizeof(bi)); |
| 1239 | |
| 1240 | bi.a = a; |
| 1241 | bi.ioctl = ioctl_hba; |
| 1242 | bi.length = sizeof(struct atto_ioctl) + ioctl_hba->data_length; |
| 1243 | bi.callback = hba_ioctl_callback; |
| 1244 | bi.context = NULL; |
| 1245 | bi.done_callback = hba_ioctl_done_callback; |
| 1246 | bi.done_context = NULL; |
| 1247 | bi.offset = 0; |
| 1248 | |
| 1249 | return handle_buffered_ioctl(&bi); |
| 1250 | } |
| 1251 | |
| 1252 | |
| 1253 | int esas2r_write_params(struct esas2r_adapter *a, struct esas2r_request *rq, |
| 1254 | struct esas2r_sas_nvram *data) |
| 1255 | { |
| 1256 | int result = 0; |
| 1257 | |
| 1258 | a->nvram_command_done = 0; |
| 1259 | rq->comp_cb = complete_nvr_req; |
| 1260 | |
| 1261 | if (esas2r_nvram_write(a, rq, data)) { |
| 1262 | /* now wait around for it to complete. */ |
| 1263 | while (!a->nvram_command_done) |
| 1264 | wait_event_interruptible(a->nvram_waiter, |
| 1265 | a->nvram_command_done); |
| 1266 | ; |
| 1267 | |
| 1268 | /* done, check the status. */ |
| 1269 | if (rq->req_stat == RS_SUCCESS) |
| 1270 | result = 1; |
| 1271 | } |
| 1272 | return result; |
| 1273 | } |
| 1274 | |
| 1275 | |
| 1276 | /* This function only cares about ATTO-specific ioctls (atto_express_ioctl) */ |
| 1277 | int esas2r_ioctl_handler(void *hostdata, int cmd, void __user *arg) |
| 1278 | { |
| 1279 | struct atto_express_ioctl *ioctl = NULL; |
| 1280 | struct esas2r_adapter *a; |
| 1281 | struct esas2r_request *rq; |
| 1282 | u16 code; |
| 1283 | int err; |
| 1284 | |
| 1285 | esas2r_log(ESAS2R_LOG_DEBG, "ioctl (%p, %x, %p)", hostdata, cmd, arg); |
| 1286 | |
| 1287 | if ((arg == NULL) |
| 1288 | || (cmd < EXPRESS_IOCTL_MIN) |
| 1289 | || (cmd > EXPRESS_IOCTL_MAX)) |
| 1290 | return -ENOTSUPP; |
| 1291 | |
| 1292 | if (!access_ok(VERIFY_WRITE, arg, sizeof(struct atto_express_ioctl))) { |
| 1293 | esas2r_log(ESAS2R_LOG_WARN, |
| 1294 | "ioctl_handler access_ok failed for cmd %d, " |
| 1295 | "address %p", cmd, |
| 1296 | arg); |
| 1297 | return -EFAULT; |
| 1298 | } |
| 1299 | |
| 1300 | /* allocate a kernel memory buffer for the IOCTL data */ |
| 1301 | ioctl = kzalloc(sizeof(struct atto_express_ioctl), GFP_KERNEL); |
| 1302 | if (ioctl == NULL) { |
| 1303 | esas2r_log(ESAS2R_LOG_WARN, |
| 1304 | "ioctl_handler kzalloc failed for %d bytes", |
| 1305 | sizeof(struct atto_express_ioctl)); |
| 1306 | return -ENOMEM; |
| 1307 | } |
| 1308 | |
| 1309 | err = __copy_from_user(ioctl, arg, sizeof(struct atto_express_ioctl)); |
| 1310 | if (err != 0) { |
| 1311 | esas2r_log(ESAS2R_LOG_WARN, |
| 1312 | "copy_from_user didn't copy everything (err %d, cmd %d)", |
| 1313 | err, |
| 1314 | cmd); |
| 1315 | kfree(ioctl); |
| 1316 | |
| 1317 | return -EFAULT; |
| 1318 | } |
| 1319 | |
| 1320 | /* verify the signature */ |
| 1321 | |
| 1322 | if (memcmp(ioctl->header.signature, |
| 1323 | EXPRESS_IOCTL_SIGNATURE, |
| 1324 | EXPRESS_IOCTL_SIGNATURE_SIZE) != 0) { |
| 1325 | esas2r_log(ESAS2R_LOG_WARN, "invalid signature"); |
| 1326 | kfree(ioctl); |
| 1327 | |
| 1328 | return -ENOTSUPP; |
| 1329 | } |
| 1330 | |
| 1331 | /* assume success */ |
| 1332 | |
| 1333 | ioctl->header.return_code = IOCTL_SUCCESS; |
| 1334 | err = 0; |
| 1335 | |
| 1336 | /* |
| 1337 | * handle EXPRESS_IOCTL_GET_CHANNELS |
| 1338 | * without paying attention to channel |
| 1339 | */ |
| 1340 | |
| 1341 | if (cmd == EXPRESS_IOCTL_GET_CHANNELS) { |
| 1342 | int i = 0, k = 0; |
| 1343 | |
| 1344 | ioctl->data.chanlist.num_channels = 0; |
| 1345 | |
| 1346 | while (i < MAX_ADAPTERS) { |
| 1347 | if (esas2r_adapters[i]) { |
| 1348 | ioctl->data.chanlist.num_channels++; |
| 1349 | ioctl->data.chanlist.channel[k] = i; |
| 1350 | k++; |
| 1351 | } |
| 1352 | i++; |
| 1353 | } |
| 1354 | |
| 1355 | goto ioctl_done; |
| 1356 | } |
| 1357 | |
| 1358 | /* get the channel */ |
| 1359 | |
| 1360 | if (ioctl->header.channel == 0xFF) { |
| 1361 | a = (struct esas2r_adapter *)hostdata; |
| 1362 | } else { |
| 1363 | a = esas2r_adapters[ioctl->header.channel]; |
| 1364 | if (ioctl->header.channel >= MAX_ADAPTERS || (a == NULL)) { |
| 1365 | ioctl->header.return_code = IOCTL_BAD_CHANNEL; |
| 1366 | esas2r_log(ESAS2R_LOG_WARN, "bad channel value"); |
| 1367 | kfree(ioctl); |
| 1368 | |
| 1369 | return -ENOTSUPP; |
| 1370 | } |
| 1371 | } |
| 1372 | |
| 1373 | switch (cmd) { |
| 1374 | case EXPRESS_IOCTL_RW_FIRMWARE: |
| 1375 | |
| 1376 | if (ioctl->data.fwrw.img_type == FW_IMG_FM_API) { |
| 1377 | err = esas2r_write_fw(a, |
| 1378 | (char *)ioctl->data.fwrw.image, |
| 1379 | 0, |
| 1380 | sizeof(struct |
| 1381 | atto_express_ioctl)); |
| 1382 | |
| 1383 | if (err >= 0) { |
| 1384 | err = esas2r_read_fw(a, |
| 1385 | (char *)ioctl->data.fwrw. |
| 1386 | image, |
| 1387 | 0, |
| 1388 | sizeof(struct |
| 1389 | atto_express_ioctl)); |
| 1390 | } |
| 1391 | } else if (ioctl->data.fwrw.img_type == FW_IMG_FS_API) { |
| 1392 | err = esas2r_write_fs(a, |
| 1393 | (char *)ioctl->data.fwrw.image, |
| 1394 | 0, |
| 1395 | sizeof(struct |
| 1396 | atto_express_ioctl)); |
| 1397 | |
| 1398 | if (err >= 0) { |
| 1399 | err = esas2r_read_fs(a, |
| 1400 | (char *)ioctl->data.fwrw. |
| 1401 | image, |
| 1402 | 0, |
| 1403 | sizeof(struct |
| 1404 | atto_express_ioctl)); |
| 1405 | } |
| 1406 | } else { |
| 1407 | ioctl->header.return_code = IOCTL_BAD_FLASH_IMGTYPE; |
| 1408 | } |
| 1409 | |
| 1410 | break; |
| 1411 | |
| 1412 | case EXPRESS_IOCTL_READ_PARAMS: |
| 1413 | |
| 1414 | memcpy(ioctl->data.prw.data_buffer, a->nvram, |
| 1415 | sizeof(struct esas2r_sas_nvram)); |
| 1416 | ioctl->data.prw.code = 1; |
| 1417 | break; |
| 1418 | |
| 1419 | case EXPRESS_IOCTL_WRITE_PARAMS: |
| 1420 | |
| 1421 | rq = esas2r_alloc_request(a); |
| 1422 | if (rq == NULL) { |
| 1423 | up(&a->nvram_semaphore); |
| 1424 | ioctl->data.prw.code = 0; |
| 1425 | break; |
| 1426 | } |
| 1427 | |
| 1428 | code = esas2r_write_params(a, rq, |
| 1429 | (struct esas2r_sas_nvram *)ioctl->data.prw.data_buffer); |
| 1430 | ioctl->data.prw.code = code; |
| 1431 | |
| 1432 | esas2r_free_request(a, rq); |
| 1433 | |
| 1434 | break; |
| 1435 | |
| 1436 | case EXPRESS_IOCTL_DEFAULT_PARAMS: |
| 1437 | |
| 1438 | esas2r_nvram_get_defaults(a, |
| 1439 | (struct esas2r_sas_nvram *)ioctl->data.prw.data_buffer); |
| 1440 | ioctl->data.prw.code = 1; |
| 1441 | break; |
| 1442 | |
| 1443 | case EXPRESS_IOCTL_CHAN_INFO: |
| 1444 | |
| 1445 | ioctl->data.chaninfo.major_rev = ESAS2R_MAJOR_REV; |
| 1446 | ioctl->data.chaninfo.minor_rev = ESAS2R_MINOR_REV; |
| 1447 | ioctl->data.chaninfo.IRQ = a->pcid->irq; |
| 1448 | ioctl->data.chaninfo.device_id = a->pcid->device; |
| 1449 | ioctl->data.chaninfo.vendor_id = a->pcid->vendor; |
| 1450 | ioctl->data.chaninfo.ven_dev_id = a->pcid->subsystem_device; |
| 1451 | ioctl->data.chaninfo.revision_id = a->pcid->revision; |
| 1452 | ioctl->data.chaninfo.pci_bus = a->pcid->bus->number; |
| 1453 | ioctl->data.chaninfo.pci_dev_func = a->pcid->devfn; |
| 1454 | ioctl->data.chaninfo.core_rev = 0; |
| 1455 | ioctl->data.chaninfo.host_no = a->host->host_no; |
| 1456 | ioctl->data.chaninfo.hbaapi_rev = 0; |
| 1457 | break; |
| 1458 | |
| 1459 | case EXPRESS_IOCTL_SMP: |
| 1460 | ioctl->header.return_code = handle_smp_ioctl(a, |
| 1461 | &ioctl->data. |
| 1462 | ioctl_smp); |
| 1463 | break; |
| 1464 | |
| 1465 | case EXPRESS_CSMI: |
| 1466 | ioctl->header.return_code = |
| 1467 | handle_csmi_ioctl(a, &ioctl->data.csmi); |
| 1468 | break; |
| 1469 | |
| 1470 | case EXPRESS_IOCTL_HBA: |
| 1471 | ioctl->header.return_code = handle_hba_ioctl(a, |
| 1472 | &ioctl->data. |
| 1473 | ioctl_hba); |
| 1474 | break; |
| 1475 | |
| 1476 | case EXPRESS_IOCTL_VDA: |
| 1477 | err = esas2r_write_vda(a, |
| 1478 | (char *)&ioctl->data.ioctl_vda, |
| 1479 | 0, |
| 1480 | sizeof(struct atto_ioctl_vda) + |
| 1481 | ioctl->data.ioctl_vda.data_length); |
| 1482 | |
| 1483 | if (err >= 0) { |
| 1484 | err = esas2r_read_vda(a, |
| 1485 | (char *)&ioctl->data.ioctl_vda, |
| 1486 | 0, |
| 1487 | sizeof(struct atto_ioctl_vda) + |
| 1488 | ioctl->data.ioctl_vda.data_length); |
| 1489 | } |
| 1490 | |
| 1491 | |
| 1492 | |
| 1493 | |
| 1494 | break; |
| 1495 | |
| 1496 | case EXPRESS_IOCTL_GET_MOD_INFO: |
| 1497 | |
| 1498 | ioctl->data.modinfo.adapter = a; |
| 1499 | ioctl->data.modinfo.pci_dev = a->pcid; |
| 1500 | ioctl->data.modinfo.scsi_host = a->host; |
| 1501 | ioctl->data.modinfo.host_no = a->host->host_no; |
| 1502 | |
| 1503 | break; |
| 1504 | |
| 1505 | default: |
| 1506 | esas2r_debug("esas2r_ioctl invalid cmd %p!", cmd); |
| 1507 | ioctl->header.return_code = IOCTL_ERR_INVCMD; |
| 1508 | } |
| 1509 | |
| 1510 | ioctl_done: |
| 1511 | |
| 1512 | if (err < 0) { |
| 1513 | esas2r_log(ESAS2R_LOG_WARN, "err %d on ioctl cmd %d", err, |
| 1514 | cmd); |
| 1515 | |
| 1516 | switch (err) { |
| 1517 | case -ENOMEM: |
| 1518 | case -EBUSY: |
| 1519 | ioctl->header.return_code = IOCTL_OUT_OF_RESOURCES; |
| 1520 | break; |
| 1521 | |
| 1522 | case -ENOSYS: |
| 1523 | case -EINVAL: |
| 1524 | ioctl->header.return_code = IOCTL_INVALID_PARAM; |
| 1525 | break; |
| 1526 | } |
| 1527 | |
| 1528 | ioctl->header.return_code = IOCTL_GENERAL_ERROR; |
| 1529 | } |
| 1530 | |
| 1531 | /* Always copy the buffer back, if only to pick up the status */ |
| 1532 | err = __copy_to_user(arg, ioctl, sizeof(struct atto_express_ioctl)); |
| 1533 | if (err != 0) { |
| 1534 | esas2r_log(ESAS2R_LOG_WARN, |
| 1535 | "ioctl_handler copy_to_user didn't copy " |
| 1536 | "everything (err %d, cmd %d)", err, |
| 1537 | cmd); |
| 1538 | kfree(ioctl); |
| 1539 | |
| 1540 | return -EFAULT; |
| 1541 | } |
| 1542 | |
| 1543 | kfree(ioctl); |
| 1544 | |
| 1545 | return 0; |
| 1546 | } |
| 1547 | |
| 1548 | int esas2r_ioctl(struct scsi_device *sd, int cmd, void __user *arg) |
| 1549 | { |
| 1550 | return esas2r_ioctl_handler(sd->host->hostdata, cmd, arg); |
| 1551 | } |
| 1552 | |
| 1553 | static void free_fw_buffers(struct esas2r_adapter *a) |
| 1554 | { |
| 1555 | if (a->firmware.data) { |
| 1556 | dma_free_coherent(&a->pcid->dev, |
| 1557 | (size_t)a->firmware.orig_len, |
| 1558 | a->firmware.data, |
| 1559 | (dma_addr_t)a->firmware.phys); |
| 1560 | |
| 1561 | a->firmware.data = NULL; |
| 1562 | } |
| 1563 | } |
| 1564 | |
| 1565 | static int allocate_fw_buffers(struct esas2r_adapter *a, u32 length) |
| 1566 | { |
| 1567 | free_fw_buffers(a); |
| 1568 | |
| 1569 | a->firmware.orig_len = length; |
| 1570 | |
| 1571 | a->firmware.data = (u8 *)dma_alloc_coherent(&a->pcid->dev, |
| 1572 | (size_t)length, |
| 1573 | (dma_addr_t *)&a->firmware. |
| 1574 | phys, |
| 1575 | GFP_KERNEL); |
| 1576 | |
| 1577 | if (!a->firmware.data) { |
| 1578 | esas2r_debug("buffer alloc failed!"); |
| 1579 | return 0; |
| 1580 | } |
| 1581 | |
| 1582 | return 1; |
| 1583 | } |
| 1584 | |
| 1585 | /* Handle a call to read firmware. */ |
| 1586 | int esas2r_read_fw(struct esas2r_adapter *a, char *buf, long off, int count) |
| 1587 | { |
| 1588 | esas2r_trace_enter(); |
| 1589 | /* if the cached header is a status, simply copy it over and return. */ |
| 1590 | if (a->firmware.state == FW_STATUS_ST) { |
| 1591 | int size = min_t(int, count, sizeof(a->firmware.header)); |
| 1592 | esas2r_trace_exit(); |
| 1593 | memcpy(buf, &a->firmware.header, size); |
| 1594 | esas2r_debug("esas2r_read_fw: STATUS size %d", size); |
| 1595 | return size; |
| 1596 | } |
| 1597 | |
| 1598 | /* |
| 1599 | * if the cached header is a command, do it if at |
| 1600 | * offset 0, otherwise copy the pieces. |
| 1601 | */ |
| 1602 | |
| 1603 | if (a->firmware.state == FW_COMMAND_ST) { |
| 1604 | u32 length = a->firmware.header.length; |
| 1605 | esas2r_trace_exit(); |
| 1606 | |
| 1607 | esas2r_debug("esas2r_read_fw: COMMAND length %d off %d", |
| 1608 | length, |
| 1609 | off); |
| 1610 | |
| 1611 | if (off == 0) { |
| 1612 | if (a->firmware.header.action == FI_ACT_UP) { |
| 1613 | if (!allocate_fw_buffers(a, length)) |
| 1614 | return -ENOMEM; |
| 1615 | |
| 1616 | |
| 1617 | /* copy header over */ |
| 1618 | |
| 1619 | memcpy(a->firmware.data, |
| 1620 | &a->firmware.header, |
| 1621 | sizeof(a->firmware.header)); |
| 1622 | |
| 1623 | do_fm_api(a, |
| 1624 | (struct esas2r_flash_img *)a->firmware.data); |
| 1625 | } else if (a->firmware.header.action == FI_ACT_UPSZ) { |
| 1626 | int size = |
| 1627 | min((int)count, |
| 1628 | (int)sizeof(a->firmware.header)); |
| 1629 | do_fm_api(a, &a->firmware.header); |
| 1630 | memcpy(buf, &a->firmware.header, size); |
| 1631 | esas2r_debug("FI_ACT_UPSZ size %d", size); |
| 1632 | return size; |
| 1633 | } else { |
| 1634 | esas2r_debug("invalid action %d", |
| 1635 | a->firmware.header.action); |
| 1636 | return -ENOSYS; |
| 1637 | } |
| 1638 | } |
| 1639 | |
| 1640 | if (count + off > length) |
| 1641 | count = length - off; |
| 1642 | |
| 1643 | if (count < 0) |
| 1644 | return 0; |
| 1645 | |
| 1646 | if (!a->firmware.data) { |
| 1647 | esas2r_debug( |
| 1648 | "read: nonzero offset but no buffer available!"); |
| 1649 | return -ENOMEM; |
| 1650 | } |
| 1651 | |
| 1652 | esas2r_debug("esas2r_read_fw: off %d count %d length %d ", off, |
| 1653 | count, |
| 1654 | length); |
| 1655 | |
| 1656 | memcpy(buf, &a->firmware.data[off], count); |
| 1657 | |
| 1658 | /* when done, release the buffer */ |
| 1659 | |
| 1660 | if (length <= off + count) { |
| 1661 | esas2r_debug("esas2r_read_fw: freeing buffer!"); |
| 1662 | |
| 1663 | free_fw_buffers(a); |
| 1664 | } |
| 1665 | |
| 1666 | return count; |
| 1667 | } |
| 1668 | |
| 1669 | esas2r_trace_exit(); |
| 1670 | esas2r_debug("esas2r_read_fw: invalid firmware state %d", |
| 1671 | a->firmware.state); |
| 1672 | |
| 1673 | return -EINVAL; |
| 1674 | } |
| 1675 | |
| 1676 | /* Handle a call to write firmware. */ |
| 1677 | int esas2r_write_fw(struct esas2r_adapter *a, const char *buf, long off, |
| 1678 | int count) |
| 1679 | { |
| 1680 | u32 length; |
| 1681 | |
| 1682 | if (off == 0) { |
| 1683 | struct esas2r_flash_img *header = |
| 1684 | (struct esas2r_flash_img *)buf; |
| 1685 | |
| 1686 | /* assume version 0 flash image */ |
| 1687 | |
| 1688 | int min_size = sizeof(struct esas2r_flash_img_v0); |
| 1689 | |
| 1690 | a->firmware.state = FW_INVALID_ST; |
| 1691 | |
| 1692 | /* validate the version field first */ |
| 1693 | |
| 1694 | if (count < 4 |
| 1695 | || header->fi_version > FI_VERSION_1) { |
| 1696 | esas2r_debug( |
| 1697 | "esas2r_write_fw: short header or invalid version"); |
| 1698 | return -EINVAL; |
| 1699 | } |
| 1700 | |
| 1701 | /* See if its a version 1 flash image */ |
| 1702 | |
| 1703 | if (header->fi_version == FI_VERSION_1) |
| 1704 | min_size = sizeof(struct esas2r_flash_img); |
| 1705 | |
| 1706 | /* If this is the start, the header must be full and valid. */ |
| 1707 | if (count < min_size) { |
| 1708 | esas2r_debug("esas2r_write_fw: short header, aborting"); |
| 1709 | return -EINVAL; |
| 1710 | } |
| 1711 | |
| 1712 | /* Make sure the size is reasonable. */ |
| 1713 | length = header->length; |
| 1714 | |
| 1715 | if (length > 1024 * 1024) { |
| 1716 | esas2r_debug( |
| 1717 | "esas2r_write_fw: hosed, length %d fi_version %d", |
| 1718 | length, header->fi_version); |
| 1719 | return -EINVAL; |
| 1720 | } |
| 1721 | |
| 1722 | /* |
| 1723 | * If this is a write command, allocate memory because |
| 1724 | * we have to cache everything. otherwise, just cache |
| 1725 | * the header, because the read op will do the command. |
| 1726 | */ |
| 1727 | |
| 1728 | if (header->action == FI_ACT_DOWN) { |
| 1729 | if (!allocate_fw_buffers(a, length)) |
| 1730 | return -ENOMEM; |
| 1731 | |
| 1732 | /* |
| 1733 | * Store the command, so there is context on subsequent |
| 1734 | * calls. |
| 1735 | */ |
| 1736 | memcpy(&a->firmware.header, |
| 1737 | buf, |
| 1738 | sizeof(*header)); |
| 1739 | } else if (header->action == FI_ACT_UP |
| 1740 | || header->action == FI_ACT_UPSZ) { |
| 1741 | /* Save the command, result will be picked up on read */ |
| 1742 | memcpy(&a->firmware.header, |
| 1743 | buf, |
| 1744 | sizeof(*header)); |
| 1745 | |
| 1746 | a->firmware.state = FW_COMMAND_ST; |
| 1747 | |
| 1748 | esas2r_debug( |
| 1749 | "esas2r_write_fw: COMMAND, count %d, action %d ", |
| 1750 | count, header->action); |
| 1751 | |
| 1752 | /* |
| 1753 | * Pretend we took the whole buffer, |
| 1754 | * so we don't get bothered again. |
| 1755 | */ |
| 1756 | |
| 1757 | return count; |
| 1758 | } else { |
| 1759 | esas2r_debug("esas2r_write_fw: invalid action %d ", |
| 1760 | a->firmware.header.action); |
| 1761 | return -ENOSYS; |
| 1762 | } |
| 1763 | } else { |
| 1764 | length = a->firmware.header.length; |
| 1765 | } |
| 1766 | |
| 1767 | /* |
| 1768 | * We only get here on a download command, regardless of offset. |
| 1769 | * the chunks written by the system need to be cached, and when |
| 1770 | * the final one arrives, issue the fmapi command. |
| 1771 | */ |
| 1772 | |
| 1773 | if (off + count > length) |
| 1774 | count = length - off; |
| 1775 | |
| 1776 | if (count > 0) { |
| 1777 | esas2r_debug("esas2r_write_fw: off %d count %d length %d", off, |
| 1778 | count, |
| 1779 | length); |
| 1780 | |
| 1781 | /* |
| 1782 | * On a full upload, the system tries sending the whole buffer. |
| 1783 | * there's nothing to do with it, so just drop it here, before |
| 1784 | * trying to copy over into unallocated memory! |
| 1785 | */ |
| 1786 | if (a->firmware.header.action == FI_ACT_UP) |
| 1787 | return count; |
| 1788 | |
| 1789 | if (!a->firmware.data) { |
| 1790 | esas2r_debug( |
| 1791 | "write: nonzero offset but no buffer available!"); |
| 1792 | return -ENOMEM; |
| 1793 | } |
| 1794 | |
| 1795 | memcpy(&a->firmware.data[off], buf, count); |
| 1796 | |
| 1797 | if (length == off + count) { |
| 1798 | do_fm_api(a, |
| 1799 | (struct esas2r_flash_img *)a->firmware.data); |
| 1800 | |
| 1801 | /* |
| 1802 | * Now copy the header result to be picked up by the |
| 1803 | * next read |
| 1804 | */ |
| 1805 | memcpy(&a->firmware.header, |
| 1806 | a->firmware.data, |
| 1807 | sizeof(a->firmware.header)); |
| 1808 | |
| 1809 | a->firmware.state = FW_STATUS_ST; |
| 1810 | |
| 1811 | esas2r_debug("write completed"); |
| 1812 | |
| 1813 | /* |
| 1814 | * Since the system has the data buffered, the only way |
| 1815 | * this can leak is if a root user writes a program |
| 1816 | * that writes a shorter buffer than it claims, and the |
| 1817 | * copyin fails. |
| 1818 | */ |
| 1819 | free_fw_buffers(a); |
| 1820 | } |
| 1821 | } |
| 1822 | |
| 1823 | return count; |
| 1824 | } |
| 1825 | |
| 1826 | /* Callback for the completion of a VDA request. */ |
| 1827 | static void vda_complete_req(struct esas2r_adapter *a, |
| 1828 | struct esas2r_request *rq) |
| 1829 | { |
| 1830 | a->vda_command_done = 1; |
| 1831 | wake_up_interruptible(&a->vda_waiter); |
| 1832 | } |
| 1833 | |
| 1834 | /* Scatter/gather callback for VDA requests */ |
| 1835 | static u32 get_physaddr_vda(struct esas2r_sg_context *sgc, u64 *addr) |
| 1836 | { |
| 1837 | struct esas2r_adapter *a = (struct esas2r_adapter *)sgc->adapter; |
| 1838 | int offset = (u8 *)sgc->cur_offset - (u8 *)a->vda_buffer; |
| 1839 | |
| 1840 | (*addr) = a->ppvda_buffer + offset; |
| 1841 | return VDA_MAX_BUFFER_SIZE - offset; |
| 1842 | } |
| 1843 | |
| 1844 | /* Handle a call to read a VDA command. */ |
| 1845 | int esas2r_read_vda(struct esas2r_adapter *a, char *buf, long off, int count) |
| 1846 | { |
| 1847 | if (!a->vda_buffer) |
| 1848 | return -ENOMEM; |
| 1849 | |
| 1850 | if (off == 0) { |
| 1851 | struct esas2r_request *rq; |
| 1852 | struct atto_ioctl_vda *vi = |
| 1853 | (struct atto_ioctl_vda *)a->vda_buffer; |
| 1854 | struct esas2r_sg_context sgc; |
| 1855 | bool wait_for_completion; |
| 1856 | |
| 1857 | /* |
| 1858 | * Presumeably, someone has already written to the vda_buffer, |
| 1859 | * and now they are reading the node the response, so now we |
| 1860 | * will actually issue the request to the chip and reply. |
| 1861 | */ |
| 1862 | |
| 1863 | /* allocate a request */ |
| 1864 | rq = esas2r_alloc_request(a); |
| 1865 | if (rq == NULL) { |
| 1866 | esas2r_debug("esas2r_read_vda: out of requestss"); |
| 1867 | return -EBUSY; |
| 1868 | } |
| 1869 | |
| 1870 | rq->comp_cb = vda_complete_req; |
| 1871 | |
| 1872 | sgc.first_req = rq; |
| 1873 | sgc.adapter = a; |
| 1874 | sgc.cur_offset = a->vda_buffer + VDA_BUFFER_HEADER_SZ; |
| 1875 | sgc.get_phys_addr = (PGETPHYSADDR)get_physaddr_vda; |
| 1876 | |
| 1877 | a->vda_command_done = 0; |
| 1878 | |
| 1879 | wait_for_completion = |
| 1880 | esas2r_process_vda_ioctl(a, vi, rq, &sgc); |
| 1881 | |
| 1882 | if (wait_for_completion) { |
| 1883 | /* now wait around for it to complete. */ |
| 1884 | |
| 1885 | while (!a->vda_command_done) |
| 1886 | wait_event_interruptible(a->vda_waiter, |
| 1887 | a->vda_command_done); |
| 1888 | } |
| 1889 | |
| 1890 | esas2r_free_request(a, (struct esas2r_request *)rq); |
| 1891 | } |
| 1892 | |
| 1893 | if (off > VDA_MAX_BUFFER_SIZE) |
| 1894 | return 0; |
| 1895 | |
| 1896 | if (count + off > VDA_MAX_BUFFER_SIZE) |
| 1897 | count = VDA_MAX_BUFFER_SIZE - off; |
| 1898 | |
| 1899 | if (count < 0) |
| 1900 | return 0; |
| 1901 | |
| 1902 | memcpy(buf, a->vda_buffer + off, count); |
| 1903 | |
| 1904 | return count; |
| 1905 | } |
| 1906 | |
| 1907 | /* Handle a call to write a VDA command. */ |
| 1908 | int esas2r_write_vda(struct esas2r_adapter *a, const char *buf, long off, |
| 1909 | int count) |
| 1910 | { |
| 1911 | /* |
| 1912 | * allocate memory for it, if not already done. once allocated, |
| 1913 | * we will keep it around until the driver is unloaded. |
| 1914 | */ |
| 1915 | |
| 1916 | if (!a->vda_buffer) { |
| 1917 | dma_addr_t dma_addr; |
| 1918 | a->vda_buffer = (u8 *)dma_alloc_coherent(&a->pcid->dev, |
| 1919 | (size_t) |
| 1920 | VDA_MAX_BUFFER_SIZE, |
| 1921 | &dma_addr, |
| 1922 | GFP_KERNEL); |
| 1923 | |
| 1924 | a->ppvda_buffer = dma_addr; |
| 1925 | } |
| 1926 | |
| 1927 | if (!a->vda_buffer) |
| 1928 | return -ENOMEM; |
| 1929 | |
| 1930 | if (off > VDA_MAX_BUFFER_SIZE) |
| 1931 | return 0; |
| 1932 | |
| 1933 | if (count + off > VDA_MAX_BUFFER_SIZE) |
| 1934 | count = VDA_MAX_BUFFER_SIZE - off; |
| 1935 | |
| 1936 | if (count < 1) |
| 1937 | return 0; |
| 1938 | |
| 1939 | memcpy(a->vda_buffer + off, buf, count); |
| 1940 | |
| 1941 | return count; |
| 1942 | } |
| 1943 | |
| 1944 | /* Callback for the completion of an FS_API request.*/ |
| 1945 | static void fs_api_complete_req(struct esas2r_adapter *a, |
| 1946 | struct esas2r_request *rq) |
| 1947 | { |
| 1948 | a->fs_api_command_done = 1; |
| 1949 | |
| 1950 | wake_up_interruptible(&a->fs_api_waiter); |
| 1951 | } |
| 1952 | |
| 1953 | /* Scatter/gather callback for VDA requests */ |
| 1954 | static u32 get_physaddr_fs_api(struct esas2r_sg_context *sgc, u64 *addr) |
| 1955 | { |
| 1956 | struct esas2r_adapter *a = (struct esas2r_adapter *)sgc->adapter; |
| 1957 | struct esas2r_ioctl_fs *fs = |
| 1958 | (struct esas2r_ioctl_fs *)a->fs_api_buffer; |
| 1959 | u32 offset = (u8 *)sgc->cur_offset - (u8 *)fs; |
| 1960 | |
| 1961 | (*addr) = a->ppfs_api_buffer + offset; |
| 1962 | |
| 1963 | return a->fs_api_buffer_size - offset; |
| 1964 | } |
| 1965 | |
| 1966 | /* Handle a call to read firmware via FS_API. */ |
| 1967 | int esas2r_read_fs(struct esas2r_adapter *a, char *buf, long off, int count) |
| 1968 | { |
| 1969 | if (!a->fs_api_buffer) |
| 1970 | return -ENOMEM; |
| 1971 | |
| 1972 | if (off == 0) { |
| 1973 | struct esas2r_request *rq; |
| 1974 | struct esas2r_sg_context sgc; |
| 1975 | struct esas2r_ioctl_fs *fs = |
| 1976 | (struct esas2r_ioctl_fs *)a->fs_api_buffer; |
| 1977 | |
| 1978 | /* If another flash request is already in progress, return. */ |
| 1979 | if (down_interruptible(&a->fs_api_semaphore)) { |
| 1980 | busy: |
| 1981 | fs->status = ATTO_STS_OUT_OF_RSRC; |
| 1982 | return -EBUSY; |
| 1983 | } |
| 1984 | |
| 1985 | /* |
| 1986 | * Presumeably, someone has already written to the |
| 1987 | * fs_api_buffer, and now they are reading the node the |
| 1988 | * response, so now we will actually issue the request to the |
| 1989 | * chip and reply. Allocate a request |
| 1990 | */ |
| 1991 | |
| 1992 | rq = esas2r_alloc_request(a); |
| 1993 | if (rq == NULL) { |
| 1994 | esas2r_debug("esas2r_read_fs: out of requests"); |
| 1995 | up(&a->fs_api_semaphore); |
| 1996 | goto busy; |
| 1997 | } |
| 1998 | |
| 1999 | rq->comp_cb = fs_api_complete_req; |
| 2000 | |
| 2001 | /* Set up the SGCONTEXT for to build the s/g table */ |
| 2002 | |
| 2003 | sgc.cur_offset = fs->data; |
| 2004 | sgc.get_phys_addr = (PGETPHYSADDR)get_physaddr_fs_api; |
| 2005 | |
| 2006 | a->fs_api_command_done = 0; |
| 2007 | |
| 2008 | if (!esas2r_process_fs_ioctl(a, fs, rq, &sgc)) { |
| 2009 | if (fs->status == ATTO_STS_OUT_OF_RSRC) |
| 2010 | count = -EBUSY; |
| 2011 | |
| 2012 | goto dont_wait; |
| 2013 | } |
| 2014 | |
| 2015 | /* Now wait around for it to complete. */ |
| 2016 | |
| 2017 | while (!a->fs_api_command_done) |
| 2018 | wait_event_interruptible(a->fs_api_waiter, |
| 2019 | a->fs_api_command_done); |
| 2020 | ; |
| 2021 | dont_wait: |
| 2022 | /* Free the request and keep going */ |
| 2023 | up(&a->fs_api_semaphore); |
| 2024 | esas2r_free_request(a, (struct esas2r_request *)rq); |
| 2025 | |
| 2026 | /* Pick up possible error code from above */ |
| 2027 | if (count < 0) |
| 2028 | return count; |
| 2029 | } |
| 2030 | |
| 2031 | if (off > a->fs_api_buffer_size) |
| 2032 | return 0; |
| 2033 | |
| 2034 | if (count + off > a->fs_api_buffer_size) |
| 2035 | count = a->fs_api_buffer_size - off; |
| 2036 | |
| 2037 | if (count < 0) |
| 2038 | return 0; |
| 2039 | |
| 2040 | memcpy(buf, a->fs_api_buffer + off, count); |
| 2041 | |
| 2042 | return count; |
| 2043 | } |
| 2044 | |
| 2045 | /* Handle a call to write firmware via FS_API. */ |
| 2046 | int esas2r_write_fs(struct esas2r_adapter *a, const char *buf, long off, |
| 2047 | int count) |
| 2048 | { |
| 2049 | if (off == 0) { |
| 2050 | struct esas2r_ioctl_fs *fs = (struct esas2r_ioctl_fs *)buf; |
| 2051 | u32 length = fs->command.length + offsetof( |
| 2052 | struct esas2r_ioctl_fs, |
| 2053 | data); |
| 2054 | |
| 2055 | /* |
| 2056 | * Special case, for BEGIN commands, the length field |
| 2057 | * is lying to us, so just get enough for the header. |
| 2058 | */ |
| 2059 | |
| 2060 | if (fs->command.command == ESAS2R_FS_CMD_BEGINW) |
| 2061 | length = offsetof(struct esas2r_ioctl_fs, data); |
| 2062 | |
| 2063 | /* |
| 2064 | * Beginning a command. We assume we'll get at least |
| 2065 | * enough in the first write so we can look at the |
| 2066 | * header and see how much we need to alloc. |
| 2067 | */ |
| 2068 | |
| 2069 | if (count < offsetof(struct esas2r_ioctl_fs, data)) |
| 2070 | return -EINVAL; |
| 2071 | |
| 2072 | /* Allocate a buffer or use the existing buffer. */ |
| 2073 | if (a->fs_api_buffer) { |
| 2074 | if (a->fs_api_buffer_size < length) { |
| 2075 | /* Free too-small buffer and get a new one */ |
| 2076 | dma_free_coherent(&a->pcid->dev, |
| 2077 | (size_t)a->fs_api_buffer_size, |
| 2078 | a->fs_api_buffer, |
| 2079 | (dma_addr_t)a->ppfs_api_buffer); |
| 2080 | |
| 2081 | goto re_allocate_buffer; |
| 2082 | } |
| 2083 | } else { |
| 2084 | re_allocate_buffer: |
| 2085 | a->fs_api_buffer_size = length; |
| 2086 | |
| 2087 | a->fs_api_buffer = (u8 *)dma_alloc_coherent( |
| 2088 | &a->pcid->dev, |
| 2089 | (size_t)a->fs_api_buffer_size, |
| 2090 | (dma_addr_t *)&a->ppfs_api_buffer, |
| 2091 | GFP_KERNEL); |
| 2092 | } |
| 2093 | } |
| 2094 | |
| 2095 | if (!a->fs_api_buffer) |
| 2096 | return -ENOMEM; |
| 2097 | |
| 2098 | if (off > a->fs_api_buffer_size) |
| 2099 | return 0; |
| 2100 | |
| 2101 | if (count + off > a->fs_api_buffer_size) |
| 2102 | count = a->fs_api_buffer_size - off; |
| 2103 | |
| 2104 | if (count < 1) |
| 2105 | return 0; |
| 2106 | |
| 2107 | memcpy(a->fs_api_buffer + off, buf, count); |
| 2108 | |
| 2109 | return count; |
| 2110 | } |