Bradley Grove | 26780d9 | 2013-08-23 10:35:45 -0400 | [diff] [blame] | 1 | /* |
| 2 | * linux/drivers/scsi/esas2r/esas2r_ioctl.c |
| 3 | * For use with ATTO ExpressSAS R6xx SAS/SATA RAID controllers |
| 4 | * |
| 5 | * Copyright (c) 2001-2013 ATTO Technology, Inc. |
| 6 | * (mailto:linuxdrivers@attotech.com) |
| 7 | * |
| 8 | * This program is free software; you can redistribute it and/or |
| 9 | * modify it under the terms of the GNU General Public License |
| 10 | * as published by the Free Software Foundation; either version 2 |
| 11 | * of the License, or (at your option) any later version. |
| 12 | * |
| 13 | * This program is distributed in the hope that it will be useful, |
| 14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 16 | * GNU General Public License for more details. |
| 17 | * |
| 18 | * NO WARRANTY |
| 19 | * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR |
| 20 | * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT |
| 21 | * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, |
| 22 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is |
| 23 | * solely responsible for determining the appropriateness of using and |
| 24 | * distributing the Program and assumes all risks associated with its |
| 25 | * exercise of rights under this Agreement, including but not limited to |
| 26 | * the risks and costs of program errors, damage to or loss of data, |
| 27 | * programs or equipment, and unavailability or interruption of operations. |
| 28 | * |
| 29 | * DISCLAIMER OF LIABILITY |
| 30 | * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY |
| 31 | * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
| 32 | * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND |
| 33 | * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR |
| 34 | * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE |
| 35 | * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED |
| 36 | * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES |
| 37 | * |
| 38 | * You should have received a copy of the GNU General Public License |
| 39 | * along with this program; if not, write to the Free Software |
| 40 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, |
| 41 | * USA. |
| 42 | */ |
| 43 | |
| 44 | #include "esas2r.h" |
| 45 | |
| 46 | /* |
| 47 | * Buffered ioctl handlers. A buffered ioctl is one which requires that we |
| 48 | * allocate a DMA-able memory area to communicate with the firmware. In |
| 49 | * order to prevent continually allocating and freeing consistent memory, |
| 50 | * we will allocate a global buffer the first time we need it and re-use |
| 51 | * it for subsequent ioctl calls that require it. |
| 52 | */ |
| 53 | |
| 54 | u8 *esas2r_buffered_ioctl; |
| 55 | dma_addr_t esas2r_buffered_ioctl_addr; |
| 56 | u32 esas2r_buffered_ioctl_size; |
| 57 | struct pci_dev *esas2r_buffered_ioctl_pcid; |
| 58 | |
| 59 | static DEFINE_SEMAPHORE(buffered_ioctl_semaphore); |
| 60 | typedef int (*BUFFERED_IOCTL_CALLBACK)(struct esas2r_adapter *, |
| 61 | struct esas2r_request *, |
| 62 | struct esas2r_sg_context *, |
| 63 | void *); |
| 64 | typedef void (*BUFFERED_IOCTL_DONE_CALLBACK)(struct esas2r_adapter *, |
| 65 | struct esas2r_request *, void *); |
| 66 | |
| 67 | struct esas2r_buffered_ioctl { |
| 68 | struct esas2r_adapter *a; |
| 69 | void *ioctl; |
| 70 | u32 length; |
| 71 | u32 control_code; |
| 72 | u32 offset; |
| 73 | BUFFERED_IOCTL_CALLBACK |
| 74 | callback; |
| 75 | void *context; |
| 76 | BUFFERED_IOCTL_DONE_CALLBACK |
| 77 | done_callback; |
| 78 | void *done_context; |
| 79 | |
| 80 | }; |
| 81 | |
| 82 | static void complete_fm_api_req(struct esas2r_adapter *a, |
| 83 | struct esas2r_request *rq) |
| 84 | { |
| 85 | a->fm_api_command_done = 1; |
| 86 | wake_up_interruptible(&a->fm_api_waiter); |
| 87 | } |
| 88 | |
| 89 | /* Callbacks for building scatter/gather lists for FM API requests */ |
| 90 | static u32 get_physaddr_fm_api(struct esas2r_sg_context *sgc, u64 *addr) |
| 91 | { |
| 92 | struct esas2r_adapter *a = (struct esas2r_adapter *)sgc->adapter; |
| 93 | int offset = sgc->cur_offset - a->save_offset; |
| 94 | |
| 95 | (*addr) = a->firmware.phys + offset; |
| 96 | return a->firmware.orig_len - offset; |
| 97 | } |
| 98 | |
| 99 | static u32 get_physaddr_fm_api_header(struct esas2r_sg_context *sgc, u64 *addr) |
| 100 | { |
| 101 | struct esas2r_adapter *a = (struct esas2r_adapter *)sgc->adapter; |
| 102 | int offset = sgc->cur_offset - a->save_offset; |
| 103 | |
| 104 | (*addr) = a->firmware.header_buff_phys + offset; |
| 105 | return sizeof(struct esas2r_flash_img) - offset; |
| 106 | } |
| 107 | |
| 108 | /* Handle EXPRESS_IOCTL_RW_FIRMWARE ioctl with img_type = FW_IMG_FM_API. */ |
| 109 | static void do_fm_api(struct esas2r_adapter *a, struct esas2r_flash_img *fi) |
| 110 | { |
| 111 | struct esas2r_request *rq; |
| 112 | |
| 113 | if (down_interruptible(&a->fm_api_semaphore)) { |
| 114 | fi->status = FI_STAT_BUSY; |
| 115 | return; |
| 116 | } |
| 117 | |
| 118 | rq = esas2r_alloc_request(a); |
| 119 | if (rq == NULL) { |
Bradley Grove | 26780d9 | 2013-08-23 10:35:45 -0400 | [diff] [blame] | 120 | fi->status = FI_STAT_BUSY; |
Tomas Henzl | ddcae01 | 2014-11-16 14:35:32 +0100 | [diff] [blame] | 121 | goto free_sem; |
Bradley Grove | 26780d9 | 2013-08-23 10:35:45 -0400 | [diff] [blame] | 122 | } |
| 123 | |
| 124 | if (fi == &a->firmware.header) { |
| 125 | a->firmware.header_buff = dma_alloc_coherent(&a->pcid->dev, |
| 126 | (size_t)sizeof( |
| 127 | struct |
| 128 | esas2r_flash_img), |
| 129 | (dma_addr_t *)&a-> |
| 130 | firmware. |
| 131 | header_buff_phys, |
| 132 | GFP_KERNEL); |
| 133 | |
| 134 | if (a->firmware.header_buff == NULL) { |
| 135 | esas2r_debug("failed to allocate header buffer!"); |
| 136 | fi->status = FI_STAT_BUSY; |
Tomas Henzl | ddcae01 | 2014-11-16 14:35:32 +0100 | [diff] [blame] | 137 | goto free_req; |
Bradley Grove | 26780d9 | 2013-08-23 10:35:45 -0400 | [diff] [blame] | 138 | } |
| 139 | |
| 140 | memcpy(a->firmware.header_buff, fi, |
| 141 | sizeof(struct esas2r_flash_img)); |
| 142 | a->save_offset = a->firmware.header_buff; |
| 143 | a->fm_api_sgc.get_phys_addr = |
| 144 | (PGETPHYSADDR)get_physaddr_fm_api_header; |
| 145 | } else { |
| 146 | a->save_offset = (u8 *)fi; |
| 147 | a->fm_api_sgc.get_phys_addr = |
| 148 | (PGETPHYSADDR)get_physaddr_fm_api; |
| 149 | } |
| 150 | |
| 151 | rq->comp_cb = complete_fm_api_req; |
| 152 | a->fm_api_command_done = 0; |
| 153 | a->fm_api_sgc.cur_offset = a->save_offset; |
| 154 | |
| 155 | if (!esas2r_fm_api(a, (struct esas2r_flash_img *)a->save_offset, rq, |
| 156 | &a->fm_api_sgc)) |
| 157 | goto all_done; |
| 158 | |
| 159 | /* Now wait around for it to complete. */ |
| 160 | while (!a->fm_api_command_done) |
| 161 | wait_event_interruptible(a->fm_api_waiter, |
| 162 | a->fm_api_command_done); |
| 163 | all_done: |
| 164 | if (fi == &a->firmware.header) { |
| 165 | memcpy(fi, a->firmware.header_buff, |
| 166 | sizeof(struct esas2r_flash_img)); |
| 167 | |
| 168 | dma_free_coherent(&a->pcid->dev, |
| 169 | (size_t)sizeof(struct esas2r_flash_img), |
| 170 | a->firmware.header_buff, |
| 171 | (dma_addr_t)a->firmware.header_buff_phys); |
| 172 | } |
Tomas Henzl | ddcae01 | 2014-11-16 14:35:32 +0100 | [diff] [blame] | 173 | free_req: |
Bradley Grove | 26780d9 | 2013-08-23 10:35:45 -0400 | [diff] [blame] | 174 | esas2r_free_request(a, (struct esas2r_request *)rq); |
Tomas Henzl | ddcae01 | 2014-11-16 14:35:32 +0100 | [diff] [blame] | 175 | free_sem: |
| 176 | up(&a->fm_api_semaphore); |
Bradley Grove | 26780d9 | 2013-08-23 10:35:45 -0400 | [diff] [blame] | 177 | return; |
| 178 | |
| 179 | } |
| 180 | |
| 181 | static void complete_nvr_req(struct esas2r_adapter *a, |
| 182 | struct esas2r_request *rq) |
| 183 | { |
| 184 | a->nvram_command_done = 1; |
| 185 | wake_up_interruptible(&a->nvram_waiter); |
| 186 | } |
| 187 | |
| 188 | /* Callback for building scatter/gather lists for buffered ioctls */ |
| 189 | static u32 get_physaddr_buffered_ioctl(struct esas2r_sg_context *sgc, |
| 190 | u64 *addr) |
| 191 | { |
| 192 | int offset = (u8 *)sgc->cur_offset - esas2r_buffered_ioctl; |
| 193 | |
| 194 | (*addr) = esas2r_buffered_ioctl_addr + offset; |
| 195 | return esas2r_buffered_ioctl_size - offset; |
| 196 | } |
| 197 | |
| 198 | static void complete_buffered_ioctl_req(struct esas2r_adapter *a, |
| 199 | struct esas2r_request *rq) |
| 200 | { |
| 201 | a->buffered_ioctl_done = 1; |
| 202 | wake_up_interruptible(&a->buffered_ioctl_waiter); |
| 203 | } |
| 204 | |
| 205 | static u8 handle_buffered_ioctl(struct esas2r_buffered_ioctl *bi) |
| 206 | { |
| 207 | struct esas2r_adapter *a = bi->a; |
| 208 | struct esas2r_request *rq; |
| 209 | struct esas2r_sg_context sgc; |
| 210 | u8 result = IOCTL_SUCCESS; |
| 211 | |
| 212 | if (down_interruptible(&buffered_ioctl_semaphore)) |
| 213 | return IOCTL_OUT_OF_RESOURCES; |
| 214 | |
| 215 | /* allocate a buffer or use the existing buffer. */ |
| 216 | if (esas2r_buffered_ioctl) { |
| 217 | if (esas2r_buffered_ioctl_size < bi->length) { |
| 218 | /* free the too-small buffer and get a new one */ |
| 219 | dma_free_coherent(&a->pcid->dev, |
| 220 | (size_t)esas2r_buffered_ioctl_size, |
| 221 | esas2r_buffered_ioctl, |
| 222 | esas2r_buffered_ioctl_addr); |
| 223 | |
| 224 | goto allocate_buffer; |
| 225 | } |
| 226 | } else { |
| 227 | allocate_buffer: |
| 228 | esas2r_buffered_ioctl_size = bi->length; |
| 229 | esas2r_buffered_ioctl_pcid = a->pcid; |
| 230 | esas2r_buffered_ioctl = dma_alloc_coherent(&a->pcid->dev, |
| 231 | (size_t) |
| 232 | esas2r_buffered_ioctl_size, |
| 233 | & |
| 234 | esas2r_buffered_ioctl_addr, |
| 235 | GFP_KERNEL); |
| 236 | } |
| 237 | |
| 238 | if (!esas2r_buffered_ioctl) { |
| 239 | esas2r_log(ESAS2R_LOG_CRIT, |
| 240 | "could not allocate %d bytes of consistent memory " |
| 241 | "for a buffered ioctl!", |
| 242 | bi->length); |
| 243 | |
| 244 | esas2r_debug("buffered ioctl alloc failure"); |
| 245 | result = IOCTL_OUT_OF_RESOURCES; |
| 246 | goto exit_cleanly; |
| 247 | } |
| 248 | |
| 249 | memcpy(esas2r_buffered_ioctl, bi->ioctl, bi->length); |
| 250 | |
| 251 | rq = esas2r_alloc_request(a); |
| 252 | if (rq == NULL) { |
| 253 | esas2r_log(ESAS2R_LOG_CRIT, |
| 254 | "could not allocate an internal request"); |
| 255 | |
| 256 | result = IOCTL_OUT_OF_RESOURCES; |
| 257 | esas2r_debug("buffered ioctl - no requests"); |
| 258 | goto exit_cleanly; |
| 259 | } |
| 260 | |
| 261 | a->buffered_ioctl_done = 0; |
| 262 | rq->comp_cb = complete_buffered_ioctl_req; |
| 263 | sgc.cur_offset = esas2r_buffered_ioctl + bi->offset; |
| 264 | sgc.get_phys_addr = (PGETPHYSADDR)get_physaddr_buffered_ioctl; |
| 265 | sgc.length = esas2r_buffered_ioctl_size; |
| 266 | |
| 267 | if (!(*bi->callback)(a, rq, &sgc, bi->context)) { |
| 268 | /* completed immediately, no need to wait */ |
| 269 | a->buffered_ioctl_done = 0; |
| 270 | goto free_andexit_cleanly; |
| 271 | } |
| 272 | |
| 273 | /* now wait around for it to complete. */ |
| 274 | while (!a->buffered_ioctl_done) |
| 275 | wait_event_interruptible(a->buffered_ioctl_waiter, |
| 276 | a->buffered_ioctl_done); |
| 277 | |
| 278 | free_andexit_cleanly: |
| 279 | if (result == IOCTL_SUCCESS && bi->done_callback) |
| 280 | (*bi->done_callback)(a, rq, bi->done_context); |
| 281 | |
| 282 | esas2r_free_request(a, rq); |
| 283 | |
| 284 | exit_cleanly: |
| 285 | if (result == IOCTL_SUCCESS) |
| 286 | memcpy(bi->ioctl, esas2r_buffered_ioctl, bi->length); |
| 287 | |
| 288 | up(&buffered_ioctl_semaphore); |
| 289 | return result; |
| 290 | } |
| 291 | |
| 292 | /* SMP ioctl support */ |
| 293 | static int smp_ioctl_callback(struct esas2r_adapter *a, |
| 294 | struct esas2r_request *rq, |
| 295 | struct esas2r_sg_context *sgc, void *context) |
| 296 | { |
| 297 | struct atto_ioctl_smp *si = |
| 298 | (struct atto_ioctl_smp *)esas2r_buffered_ioctl; |
| 299 | |
| 300 | esas2r_sgc_init(sgc, a, rq, rq->vrq->ioctl.sge); |
| 301 | esas2r_build_ioctl_req(a, rq, sgc->length, VDA_IOCTL_SMP); |
| 302 | |
| 303 | if (!esas2r_build_sg_list(a, rq, sgc)) { |
| 304 | si->status = ATTO_STS_OUT_OF_RSRC; |
| 305 | return false; |
| 306 | } |
| 307 | |
| 308 | esas2r_start_request(a, rq); |
| 309 | return true; |
| 310 | } |
| 311 | |
| 312 | static u8 handle_smp_ioctl(struct esas2r_adapter *a, struct atto_ioctl_smp *si) |
| 313 | { |
| 314 | struct esas2r_buffered_ioctl bi; |
| 315 | |
| 316 | memset(&bi, 0, sizeof(bi)); |
| 317 | |
| 318 | bi.a = a; |
| 319 | bi.ioctl = si; |
| 320 | bi.length = sizeof(struct atto_ioctl_smp) |
| 321 | + le32_to_cpu(si->req_length) |
| 322 | + le32_to_cpu(si->rsp_length); |
| 323 | bi.offset = 0; |
| 324 | bi.callback = smp_ioctl_callback; |
| 325 | return handle_buffered_ioctl(&bi); |
| 326 | } |
| 327 | |
| 328 | |
| 329 | /* CSMI ioctl support */ |
| 330 | static void esas2r_csmi_ioctl_tunnel_comp_cb(struct esas2r_adapter *a, |
| 331 | struct esas2r_request *rq) |
| 332 | { |
| 333 | rq->target_id = le16_to_cpu(rq->func_rsp.ioctl_rsp.csmi.target_id); |
| 334 | rq->vrq->scsi.flags |= cpu_to_le32(rq->func_rsp.ioctl_rsp.csmi.lun); |
| 335 | |
| 336 | /* Now call the original completion callback. */ |
| 337 | (*rq->aux_req_cb)(a, rq); |
| 338 | } |
| 339 | |
| 340 | /* Tunnel a CSMI IOCTL to the back end driver for processing. */ |
| 341 | static bool csmi_ioctl_tunnel(struct esas2r_adapter *a, |
| 342 | union atto_ioctl_csmi *ci, |
| 343 | struct esas2r_request *rq, |
| 344 | struct esas2r_sg_context *sgc, |
| 345 | u32 ctrl_code, |
| 346 | u16 target_id) |
| 347 | { |
| 348 | struct atto_vda_ioctl_req *ioctl = &rq->vrq->ioctl; |
| 349 | |
Bradley Grove | 9588d24 | 2013-10-01 14:26:01 -0400 | [diff] [blame] | 350 | if (test_bit(AF_DEGRADED_MODE, &a->flags)) |
Bradley Grove | 26780d9 | 2013-08-23 10:35:45 -0400 | [diff] [blame] | 351 | return false; |
| 352 | |
| 353 | esas2r_sgc_init(sgc, a, rq, rq->vrq->ioctl.sge); |
| 354 | esas2r_build_ioctl_req(a, rq, sgc->length, VDA_IOCTL_CSMI); |
| 355 | ioctl->csmi.ctrl_code = cpu_to_le32(ctrl_code); |
| 356 | ioctl->csmi.target_id = cpu_to_le16(target_id); |
| 357 | ioctl->csmi.lun = (u8)le32_to_cpu(rq->vrq->scsi.flags); |
| 358 | |
| 359 | /* |
| 360 | * Always usurp the completion callback since the interrupt callback |
| 361 | * mechanism may be used. |
| 362 | */ |
| 363 | rq->aux_req_cx = ci; |
| 364 | rq->aux_req_cb = rq->comp_cb; |
| 365 | rq->comp_cb = esas2r_csmi_ioctl_tunnel_comp_cb; |
| 366 | |
| 367 | if (!esas2r_build_sg_list(a, rq, sgc)) |
| 368 | return false; |
| 369 | |
| 370 | esas2r_start_request(a, rq); |
| 371 | return true; |
| 372 | } |
| 373 | |
| 374 | static bool check_lun(struct scsi_lun lun) |
| 375 | { |
| 376 | bool result; |
| 377 | |
| 378 | result = ((lun.scsi_lun[7] == 0) && |
| 379 | (lun.scsi_lun[6] == 0) && |
| 380 | (lun.scsi_lun[5] == 0) && |
| 381 | (lun.scsi_lun[4] == 0) && |
| 382 | (lun.scsi_lun[3] == 0) && |
| 383 | (lun.scsi_lun[2] == 0) && |
| 384 | /* Byte 1 is intentionally skipped */ |
| 385 | (lun.scsi_lun[0] == 0)); |
| 386 | |
| 387 | return result; |
| 388 | } |
| 389 | |
| 390 | static int csmi_ioctl_callback(struct esas2r_adapter *a, |
| 391 | struct esas2r_request *rq, |
| 392 | struct esas2r_sg_context *sgc, void *context) |
| 393 | { |
| 394 | struct atto_csmi *ci = (struct atto_csmi *)context; |
| 395 | union atto_ioctl_csmi *ioctl_csmi = |
| 396 | (union atto_ioctl_csmi *)esas2r_buffered_ioctl; |
| 397 | u8 path = 0; |
| 398 | u8 tid = 0; |
| 399 | u8 lun = 0; |
| 400 | u32 sts = CSMI_STS_SUCCESS; |
| 401 | struct esas2r_target *t; |
| 402 | unsigned long flags; |
| 403 | |
| 404 | if (ci->control_code == CSMI_CC_GET_DEV_ADDR) { |
| 405 | struct atto_csmi_get_dev_addr *gda = &ci->data.dev_addr; |
| 406 | |
| 407 | path = gda->path_id; |
| 408 | tid = gda->target_id; |
| 409 | lun = gda->lun; |
| 410 | } else if (ci->control_code == CSMI_CC_TASK_MGT) { |
| 411 | struct atto_csmi_task_mgmt *tm = &ci->data.tsk_mgt; |
| 412 | |
| 413 | path = tm->path_id; |
| 414 | tid = tm->target_id; |
| 415 | lun = tm->lun; |
| 416 | } |
| 417 | |
Bradley Grove | 8d3ac48 | 2013-08-29 15:55:43 -0400 | [diff] [blame] | 418 | if (path > 0) { |
Bradley Grove | 26780d9 | 2013-08-23 10:35:45 -0400 | [diff] [blame] | 419 | rq->func_rsp.ioctl_rsp.csmi.csmi_status = cpu_to_le32( |
| 420 | CSMI_STS_INV_PARAM); |
| 421 | return false; |
| 422 | } |
| 423 | |
| 424 | rq->target_id = tid; |
| 425 | rq->vrq->scsi.flags |= cpu_to_le32(lun); |
| 426 | |
| 427 | switch (ci->control_code) { |
| 428 | case CSMI_CC_GET_DRVR_INFO: |
| 429 | { |
| 430 | struct atto_csmi_get_driver_info *gdi = &ioctl_csmi->drvr_info; |
| 431 | |
| 432 | strcpy(gdi->description, esas2r_get_model_name(a)); |
| 433 | gdi->csmi_major_rev = CSMI_MAJOR_REV; |
| 434 | gdi->csmi_minor_rev = CSMI_MINOR_REV; |
| 435 | break; |
| 436 | } |
| 437 | |
| 438 | case CSMI_CC_GET_CNTLR_CFG: |
| 439 | { |
| 440 | struct atto_csmi_get_cntlr_cfg *gcc = &ioctl_csmi->cntlr_cfg; |
| 441 | |
| 442 | gcc->base_io_addr = 0; |
| 443 | pci_read_config_dword(a->pcid, PCI_BASE_ADDRESS_2, |
| 444 | &gcc->base_memaddr_lo); |
| 445 | pci_read_config_dword(a->pcid, PCI_BASE_ADDRESS_3, |
| 446 | &gcc->base_memaddr_hi); |
| 447 | gcc->board_id = MAKEDWORD(a->pcid->subsystem_device, |
| 448 | a->pcid->subsystem_vendor); |
| 449 | gcc->slot_num = CSMI_SLOT_NUM_UNKNOWN; |
| 450 | gcc->cntlr_class = CSMI_CNTLR_CLASS_HBA; |
| 451 | gcc->io_bus_type = CSMI_BUS_TYPE_PCI; |
| 452 | gcc->pci_addr.bus_num = a->pcid->bus->number; |
| 453 | gcc->pci_addr.device_num = PCI_SLOT(a->pcid->devfn); |
| 454 | gcc->pci_addr.function_num = PCI_FUNC(a->pcid->devfn); |
| 455 | |
| 456 | memset(gcc->serial_num, 0, sizeof(gcc->serial_num)); |
| 457 | |
| 458 | gcc->major_rev = LOBYTE(LOWORD(a->fw_version)); |
| 459 | gcc->minor_rev = HIBYTE(LOWORD(a->fw_version)); |
| 460 | gcc->build_rev = LOBYTE(HIWORD(a->fw_version)); |
| 461 | gcc->release_rev = HIBYTE(HIWORD(a->fw_version)); |
| 462 | gcc->bios_major_rev = HIBYTE(HIWORD(a->flash_ver)); |
| 463 | gcc->bios_minor_rev = LOBYTE(HIWORD(a->flash_ver)); |
| 464 | gcc->bios_build_rev = LOWORD(a->flash_ver); |
| 465 | |
Bradley Grove | 9588d24 | 2013-10-01 14:26:01 -0400 | [diff] [blame] | 466 | if (test_bit(AF2_THUNDERLINK, &a->flags2)) |
Bradley Grove | 26780d9 | 2013-08-23 10:35:45 -0400 | [diff] [blame] | 467 | gcc->cntlr_flags = CSMI_CNTLRF_SAS_HBA |
| 468 | | CSMI_CNTLRF_SATA_HBA; |
| 469 | else |
| 470 | gcc->cntlr_flags = CSMI_CNTLRF_SAS_RAID |
| 471 | | CSMI_CNTLRF_SATA_RAID; |
| 472 | |
| 473 | gcc->rrom_major_rev = 0; |
| 474 | gcc->rrom_minor_rev = 0; |
| 475 | gcc->rrom_build_rev = 0; |
| 476 | gcc->rrom_release_rev = 0; |
| 477 | gcc->rrom_biosmajor_rev = 0; |
| 478 | gcc->rrom_biosminor_rev = 0; |
| 479 | gcc->rrom_biosbuild_rev = 0; |
| 480 | gcc->rrom_biosrelease_rev = 0; |
| 481 | break; |
| 482 | } |
| 483 | |
| 484 | case CSMI_CC_GET_CNTLR_STS: |
| 485 | { |
| 486 | struct atto_csmi_get_cntlr_sts *gcs = &ioctl_csmi->cntlr_sts; |
| 487 | |
Bradley Grove | 9588d24 | 2013-10-01 14:26:01 -0400 | [diff] [blame] | 488 | if (test_bit(AF_DEGRADED_MODE, &a->flags)) |
Bradley Grove | 26780d9 | 2013-08-23 10:35:45 -0400 | [diff] [blame] | 489 | gcs->status = CSMI_CNTLR_STS_FAILED; |
| 490 | else |
| 491 | gcs->status = CSMI_CNTLR_STS_GOOD; |
| 492 | |
| 493 | gcs->offline_reason = CSMI_OFFLINE_NO_REASON; |
| 494 | break; |
| 495 | } |
| 496 | |
| 497 | case CSMI_CC_FW_DOWNLOAD: |
| 498 | case CSMI_CC_GET_RAID_INFO: |
| 499 | case CSMI_CC_GET_RAID_CFG: |
| 500 | |
| 501 | sts = CSMI_STS_BAD_CTRL_CODE; |
| 502 | break; |
| 503 | |
| 504 | case CSMI_CC_SMP_PASSTHRU: |
| 505 | case CSMI_CC_SSP_PASSTHRU: |
| 506 | case CSMI_CC_STP_PASSTHRU: |
| 507 | case CSMI_CC_GET_PHY_INFO: |
| 508 | case CSMI_CC_SET_PHY_INFO: |
| 509 | case CSMI_CC_GET_LINK_ERRORS: |
| 510 | case CSMI_CC_GET_SATA_SIG: |
| 511 | case CSMI_CC_GET_CONN_INFO: |
| 512 | case CSMI_CC_PHY_CTRL: |
| 513 | |
| 514 | if (!csmi_ioctl_tunnel(a, ioctl_csmi, rq, sgc, |
| 515 | ci->control_code, |
| 516 | ESAS2R_TARG_ID_INV)) { |
| 517 | sts = CSMI_STS_FAILED; |
| 518 | break; |
| 519 | } |
| 520 | |
| 521 | return true; |
| 522 | |
| 523 | case CSMI_CC_GET_SCSI_ADDR: |
| 524 | { |
| 525 | struct atto_csmi_get_scsi_addr *gsa = &ioctl_csmi->scsi_addr; |
| 526 | |
| 527 | struct scsi_lun lun; |
| 528 | |
| 529 | memcpy(&lun, gsa->sas_lun, sizeof(struct scsi_lun)); |
| 530 | |
| 531 | if (!check_lun(lun)) { |
| 532 | sts = CSMI_STS_NO_SCSI_ADDR; |
| 533 | break; |
| 534 | } |
| 535 | |
| 536 | /* make sure the device is present */ |
| 537 | spin_lock_irqsave(&a->mem_lock, flags); |
| 538 | t = esas2r_targ_db_find_by_sas_addr(a, (u64 *)gsa->sas_addr); |
| 539 | spin_unlock_irqrestore(&a->mem_lock, flags); |
| 540 | |
| 541 | if (t == NULL) { |
| 542 | sts = CSMI_STS_NO_SCSI_ADDR; |
| 543 | break; |
| 544 | } |
| 545 | |
| 546 | gsa->host_index = 0xFF; |
| 547 | gsa->lun = gsa->sas_lun[1]; |
| 548 | rq->target_id = esas2r_targ_get_id(t, a); |
| 549 | break; |
| 550 | } |
| 551 | |
| 552 | case CSMI_CC_GET_DEV_ADDR: |
| 553 | { |
| 554 | struct atto_csmi_get_dev_addr *gda = &ioctl_csmi->dev_addr; |
| 555 | |
| 556 | /* make sure the target is present */ |
| 557 | t = a->targetdb + rq->target_id; |
| 558 | |
| 559 | if (t >= a->targetdb_end |
| 560 | || t->target_state != TS_PRESENT |
| 561 | || t->sas_addr == 0) { |
| 562 | sts = CSMI_STS_NO_DEV_ADDR; |
| 563 | break; |
| 564 | } |
| 565 | |
| 566 | /* fill in the result */ |
| 567 | *(u64 *)gda->sas_addr = t->sas_addr; |
| 568 | memset(gda->sas_lun, 0, sizeof(gda->sas_lun)); |
| 569 | gda->sas_lun[1] = (u8)le32_to_cpu(rq->vrq->scsi.flags); |
| 570 | break; |
| 571 | } |
| 572 | |
| 573 | case CSMI_CC_TASK_MGT: |
| 574 | |
| 575 | /* make sure the target is present */ |
| 576 | t = a->targetdb + rq->target_id; |
| 577 | |
| 578 | if (t >= a->targetdb_end |
| 579 | || t->target_state != TS_PRESENT |
| 580 | || !(t->flags & TF_PASS_THRU)) { |
| 581 | sts = CSMI_STS_NO_DEV_ADDR; |
| 582 | break; |
| 583 | } |
| 584 | |
| 585 | if (!csmi_ioctl_tunnel(a, ioctl_csmi, rq, sgc, |
| 586 | ci->control_code, |
| 587 | t->phys_targ_id)) { |
| 588 | sts = CSMI_STS_FAILED; |
| 589 | break; |
| 590 | } |
| 591 | |
| 592 | return true; |
| 593 | |
| 594 | default: |
| 595 | |
| 596 | sts = CSMI_STS_BAD_CTRL_CODE; |
| 597 | break; |
| 598 | } |
| 599 | |
| 600 | rq->func_rsp.ioctl_rsp.csmi.csmi_status = cpu_to_le32(sts); |
| 601 | |
| 602 | return false; |
| 603 | } |
| 604 | |
| 605 | |
| 606 | static void csmi_ioctl_done_callback(struct esas2r_adapter *a, |
| 607 | struct esas2r_request *rq, void *context) |
| 608 | { |
| 609 | struct atto_csmi *ci = (struct atto_csmi *)context; |
| 610 | union atto_ioctl_csmi *ioctl_csmi = |
| 611 | (union atto_ioctl_csmi *)esas2r_buffered_ioctl; |
| 612 | |
| 613 | switch (ci->control_code) { |
| 614 | case CSMI_CC_GET_DRVR_INFO: |
| 615 | { |
| 616 | struct atto_csmi_get_driver_info *gdi = |
| 617 | &ioctl_csmi->drvr_info; |
| 618 | |
| 619 | strcpy(gdi->name, ESAS2R_VERSION_STR); |
| 620 | |
| 621 | gdi->major_rev = ESAS2R_MAJOR_REV; |
| 622 | gdi->minor_rev = ESAS2R_MINOR_REV; |
| 623 | gdi->build_rev = 0; |
| 624 | gdi->release_rev = 0; |
| 625 | break; |
| 626 | } |
| 627 | |
| 628 | case CSMI_CC_GET_SCSI_ADDR: |
| 629 | { |
| 630 | struct atto_csmi_get_scsi_addr *gsa = &ioctl_csmi->scsi_addr; |
| 631 | |
| 632 | if (le32_to_cpu(rq->func_rsp.ioctl_rsp.csmi.csmi_status) == |
| 633 | CSMI_STS_SUCCESS) { |
| 634 | gsa->target_id = rq->target_id; |
| 635 | gsa->path_id = 0; |
| 636 | } |
| 637 | |
| 638 | break; |
| 639 | } |
| 640 | } |
| 641 | |
| 642 | ci->status = le32_to_cpu(rq->func_rsp.ioctl_rsp.csmi.csmi_status); |
| 643 | } |
| 644 | |
| 645 | |
| 646 | static u8 handle_csmi_ioctl(struct esas2r_adapter *a, struct atto_csmi *ci) |
| 647 | { |
| 648 | struct esas2r_buffered_ioctl bi; |
| 649 | |
| 650 | memset(&bi, 0, sizeof(bi)); |
| 651 | |
| 652 | bi.a = a; |
| 653 | bi.ioctl = &ci->data; |
| 654 | bi.length = sizeof(union atto_ioctl_csmi); |
| 655 | bi.offset = 0; |
| 656 | bi.callback = csmi_ioctl_callback; |
| 657 | bi.context = ci; |
| 658 | bi.done_callback = csmi_ioctl_done_callback; |
| 659 | bi.done_context = ci; |
| 660 | |
| 661 | return handle_buffered_ioctl(&bi); |
| 662 | } |
| 663 | |
| 664 | /* ATTO HBA ioctl support */ |
| 665 | |
| 666 | /* Tunnel an ATTO HBA IOCTL to the back end driver for processing. */ |
| 667 | static bool hba_ioctl_tunnel(struct esas2r_adapter *a, |
| 668 | struct atto_ioctl *hi, |
| 669 | struct esas2r_request *rq, |
| 670 | struct esas2r_sg_context *sgc) |
| 671 | { |
| 672 | esas2r_sgc_init(sgc, a, rq, rq->vrq->ioctl.sge); |
| 673 | |
| 674 | esas2r_build_ioctl_req(a, rq, sgc->length, VDA_IOCTL_HBA); |
| 675 | |
| 676 | if (!esas2r_build_sg_list(a, rq, sgc)) { |
| 677 | hi->status = ATTO_STS_OUT_OF_RSRC; |
| 678 | |
| 679 | return false; |
| 680 | } |
| 681 | |
| 682 | esas2r_start_request(a, rq); |
| 683 | |
| 684 | return true; |
| 685 | } |
| 686 | |
| 687 | static void scsi_passthru_comp_cb(struct esas2r_adapter *a, |
| 688 | struct esas2r_request *rq) |
| 689 | { |
| 690 | struct atto_ioctl *hi = (struct atto_ioctl *)rq->aux_req_cx; |
| 691 | struct atto_hba_scsi_pass_thru *spt = &hi->data.scsi_pass_thru; |
| 692 | u8 sts = ATTO_SPT_RS_FAILED; |
| 693 | |
| 694 | spt->scsi_status = rq->func_rsp.scsi_rsp.scsi_stat; |
| 695 | spt->sense_length = rq->sense_len; |
| 696 | spt->residual_length = |
| 697 | le32_to_cpu(rq->func_rsp.scsi_rsp.residual_length); |
| 698 | |
| 699 | switch (rq->req_stat) { |
| 700 | case RS_SUCCESS: |
| 701 | case RS_SCSI_ERROR: |
| 702 | sts = ATTO_SPT_RS_SUCCESS; |
| 703 | break; |
| 704 | case RS_UNDERRUN: |
| 705 | sts = ATTO_SPT_RS_UNDERRUN; |
| 706 | break; |
| 707 | case RS_OVERRUN: |
| 708 | sts = ATTO_SPT_RS_OVERRUN; |
| 709 | break; |
| 710 | case RS_SEL: |
| 711 | case RS_SEL2: |
| 712 | sts = ATTO_SPT_RS_NO_DEVICE; |
| 713 | break; |
| 714 | case RS_NO_LUN: |
| 715 | sts = ATTO_SPT_RS_NO_LUN; |
| 716 | break; |
| 717 | case RS_TIMEOUT: |
| 718 | sts = ATTO_SPT_RS_TIMEOUT; |
| 719 | break; |
| 720 | case RS_DEGRADED: |
| 721 | sts = ATTO_SPT_RS_DEGRADED; |
| 722 | break; |
| 723 | case RS_BUSY: |
| 724 | sts = ATTO_SPT_RS_BUSY; |
| 725 | break; |
| 726 | case RS_ABORTED: |
| 727 | sts = ATTO_SPT_RS_ABORTED; |
| 728 | break; |
| 729 | case RS_RESET: |
| 730 | sts = ATTO_SPT_RS_BUS_RESET; |
| 731 | break; |
| 732 | } |
| 733 | |
| 734 | spt->req_status = sts; |
| 735 | |
| 736 | /* Update the target ID to the next one present. */ |
| 737 | spt->target_id = |
| 738 | esas2r_targ_db_find_next_present(a, (u16)spt->target_id); |
| 739 | |
| 740 | /* Done, call the completion callback. */ |
| 741 | (*rq->aux_req_cb)(a, rq); |
| 742 | } |
| 743 | |
| 744 | static int hba_ioctl_callback(struct esas2r_adapter *a, |
| 745 | struct esas2r_request *rq, |
| 746 | struct esas2r_sg_context *sgc, |
| 747 | void *context) |
| 748 | { |
| 749 | struct atto_ioctl *hi = (struct atto_ioctl *)esas2r_buffered_ioctl; |
| 750 | |
| 751 | hi->status = ATTO_STS_SUCCESS; |
| 752 | |
| 753 | switch (hi->function) { |
| 754 | case ATTO_FUNC_GET_ADAP_INFO: |
| 755 | { |
| 756 | u8 *class_code = (u8 *)&a->pcid->class; |
| 757 | |
| 758 | struct atto_hba_get_adapter_info *gai = |
| 759 | &hi->data.get_adap_info; |
| 760 | int pcie_cap_reg; |
| 761 | |
| 762 | if (hi->flags & HBAF_TUNNEL) { |
| 763 | hi->status = ATTO_STS_UNSUPPORTED; |
| 764 | break; |
| 765 | } |
| 766 | |
| 767 | if (hi->version > ATTO_VER_GET_ADAP_INFO0) { |
| 768 | hi->status = ATTO_STS_INV_VERSION; |
| 769 | hi->version = ATTO_VER_GET_ADAP_INFO0; |
| 770 | break; |
| 771 | } |
| 772 | |
| 773 | memset(gai, 0, sizeof(*gai)); |
| 774 | |
| 775 | gai->pci.vendor_id = a->pcid->vendor; |
| 776 | gai->pci.device_id = a->pcid->device; |
| 777 | gai->pci.ss_vendor_id = a->pcid->subsystem_vendor; |
| 778 | gai->pci.ss_device_id = a->pcid->subsystem_device; |
| 779 | gai->pci.class_code[0] = class_code[0]; |
| 780 | gai->pci.class_code[1] = class_code[1]; |
| 781 | gai->pci.class_code[2] = class_code[2]; |
| 782 | gai->pci.rev_id = a->pcid->revision; |
| 783 | gai->pci.bus_num = a->pcid->bus->number; |
| 784 | gai->pci.dev_num = PCI_SLOT(a->pcid->devfn); |
| 785 | gai->pci.func_num = PCI_FUNC(a->pcid->devfn); |
| 786 | |
| 787 | pcie_cap_reg = pci_find_capability(a->pcid, PCI_CAP_ID_EXP); |
| 788 | if (pcie_cap_reg) { |
| 789 | u16 stat; |
| 790 | u32 caps; |
| 791 | |
| 792 | pci_read_config_word(a->pcid, |
| 793 | pcie_cap_reg + PCI_EXP_LNKSTA, |
| 794 | &stat); |
| 795 | pci_read_config_dword(a->pcid, |
| 796 | pcie_cap_reg + PCI_EXP_LNKCAP, |
| 797 | &caps); |
| 798 | |
| 799 | gai->pci.link_speed_curr = |
| 800 | (u8)(stat & PCI_EXP_LNKSTA_CLS); |
| 801 | gai->pci.link_speed_max = |
| 802 | (u8)(caps & PCI_EXP_LNKCAP_SLS); |
| 803 | gai->pci.link_width_curr = |
| 804 | (u8)((stat & PCI_EXP_LNKSTA_NLW) |
| 805 | >> PCI_EXP_LNKSTA_NLW_SHIFT); |
| 806 | gai->pci.link_width_max = |
| 807 | (u8)((caps & PCI_EXP_LNKCAP_MLW) |
| 808 | >> 4); |
| 809 | } |
| 810 | |
| 811 | gai->pci.msi_vector_cnt = 1; |
| 812 | |
| 813 | if (a->pcid->msix_enabled) |
| 814 | gai->pci.interrupt_mode = ATTO_GAI_PCIIM_MSIX; |
| 815 | else if (a->pcid->msi_enabled) |
| 816 | gai->pci.interrupt_mode = ATTO_GAI_PCIIM_MSI; |
| 817 | else |
| 818 | gai->pci.interrupt_mode = ATTO_GAI_PCIIM_LEGACY; |
| 819 | |
| 820 | gai->adap_type = ATTO_GAI_AT_ESASRAID2; |
| 821 | |
Bradley Grove | 9588d24 | 2013-10-01 14:26:01 -0400 | [diff] [blame] | 822 | if (test_bit(AF2_THUNDERLINK, &a->flags2)) |
Bradley Grove | 26780d9 | 2013-08-23 10:35:45 -0400 | [diff] [blame] | 823 | gai->adap_type = ATTO_GAI_AT_TLSASHBA; |
| 824 | |
Bradley Grove | 9588d24 | 2013-10-01 14:26:01 -0400 | [diff] [blame] | 825 | if (test_bit(AF_DEGRADED_MODE, &a->flags)) |
Bradley Grove | 26780d9 | 2013-08-23 10:35:45 -0400 | [diff] [blame] | 826 | gai->adap_flags |= ATTO_GAI_AF_DEGRADED; |
| 827 | |
| 828 | gai->adap_flags |= ATTO_GAI_AF_SPT_SUPP | |
| 829 | ATTO_GAI_AF_DEVADDR_SUPP; |
| 830 | |
| 831 | if (a->pcid->subsystem_device == ATTO_ESAS_R60F |
| 832 | || a->pcid->subsystem_device == ATTO_ESAS_R608 |
| 833 | || a->pcid->subsystem_device == ATTO_ESAS_R644 |
| 834 | || a->pcid->subsystem_device == ATTO_TSSC_3808E) |
| 835 | gai->adap_flags |= ATTO_GAI_AF_VIRT_SES; |
| 836 | |
| 837 | gai->num_ports = ESAS2R_NUM_PHYS; |
| 838 | gai->num_phys = ESAS2R_NUM_PHYS; |
| 839 | |
| 840 | strcpy(gai->firmware_rev, a->fw_rev); |
| 841 | strcpy(gai->flash_rev, a->flash_rev); |
| 842 | strcpy(gai->model_name_short, esas2r_get_model_name_short(a)); |
| 843 | strcpy(gai->model_name, esas2r_get_model_name(a)); |
| 844 | |
| 845 | gai->num_targets = ESAS2R_MAX_TARGETS; |
| 846 | |
| 847 | gai->num_busses = 1; |
| 848 | gai->num_targsper_bus = gai->num_targets; |
| 849 | gai->num_lunsper_targ = 256; |
| 850 | |
| 851 | if (a->pcid->subsystem_device == ATTO_ESAS_R6F0 |
| 852 | || a->pcid->subsystem_device == ATTO_ESAS_R60F) |
| 853 | gai->num_connectors = 4; |
| 854 | else |
| 855 | gai->num_connectors = 2; |
| 856 | |
| 857 | gai->adap_flags2 |= ATTO_GAI_AF2_ADAP_CTRL_SUPP; |
| 858 | |
| 859 | gai->num_targets_backend = a->num_targets_backend; |
| 860 | |
| 861 | gai->tunnel_flags = a->ioctl_tunnel |
| 862 | & (ATTO_GAI_TF_MEM_RW |
| 863 | | ATTO_GAI_TF_TRACE |
| 864 | | ATTO_GAI_TF_SCSI_PASS_THRU |
| 865 | | ATTO_GAI_TF_GET_DEV_ADDR |
| 866 | | ATTO_GAI_TF_PHY_CTRL |
| 867 | | ATTO_GAI_TF_CONN_CTRL |
| 868 | | ATTO_GAI_TF_GET_DEV_INFO); |
| 869 | break; |
| 870 | } |
| 871 | |
| 872 | case ATTO_FUNC_GET_ADAP_ADDR: |
| 873 | { |
| 874 | struct atto_hba_get_adapter_address *gaa = |
| 875 | &hi->data.get_adap_addr; |
| 876 | |
| 877 | if (hi->flags & HBAF_TUNNEL) { |
| 878 | hi->status = ATTO_STS_UNSUPPORTED; |
| 879 | break; |
| 880 | } |
| 881 | |
| 882 | if (hi->version > ATTO_VER_GET_ADAP_ADDR0) { |
| 883 | hi->status = ATTO_STS_INV_VERSION; |
| 884 | hi->version = ATTO_VER_GET_ADAP_ADDR0; |
| 885 | } else if (gaa->addr_type == ATTO_GAA_AT_PORT |
| 886 | || gaa->addr_type == ATTO_GAA_AT_NODE) { |
| 887 | if (gaa->addr_type == ATTO_GAA_AT_PORT |
| 888 | && gaa->port_id >= ESAS2R_NUM_PHYS) { |
| 889 | hi->status = ATTO_STS_NOT_APPL; |
| 890 | } else { |
| 891 | memcpy((u64 *)gaa->address, |
| 892 | &a->nvram->sas_addr[0], sizeof(u64)); |
| 893 | gaa->addr_len = sizeof(u64); |
| 894 | } |
| 895 | } else { |
| 896 | hi->status = ATTO_STS_INV_PARAM; |
| 897 | } |
| 898 | |
| 899 | break; |
| 900 | } |
| 901 | |
| 902 | case ATTO_FUNC_MEM_RW: |
| 903 | { |
| 904 | if (hi->flags & HBAF_TUNNEL) { |
| 905 | if (hba_ioctl_tunnel(a, hi, rq, sgc)) |
| 906 | return true; |
| 907 | |
| 908 | break; |
| 909 | } |
| 910 | |
| 911 | hi->status = ATTO_STS_UNSUPPORTED; |
| 912 | |
| 913 | break; |
| 914 | } |
| 915 | |
| 916 | case ATTO_FUNC_TRACE: |
| 917 | { |
| 918 | struct atto_hba_trace *trc = &hi->data.trace; |
| 919 | |
| 920 | if (hi->flags & HBAF_TUNNEL) { |
| 921 | if (hba_ioctl_tunnel(a, hi, rq, sgc)) |
| 922 | return true; |
| 923 | |
| 924 | break; |
| 925 | } |
| 926 | |
| 927 | if (hi->version > ATTO_VER_TRACE1) { |
| 928 | hi->status = ATTO_STS_INV_VERSION; |
| 929 | hi->version = ATTO_VER_TRACE1; |
| 930 | break; |
| 931 | } |
| 932 | |
| 933 | if (trc->trace_type == ATTO_TRC_TT_FWCOREDUMP |
| 934 | && hi->version >= ATTO_VER_TRACE1) { |
| 935 | if (trc->trace_func == ATTO_TRC_TF_UPLOAD) { |
| 936 | u32 len = hi->data_length; |
| 937 | u32 offset = trc->current_offset; |
| 938 | u32 total_len = ESAS2R_FWCOREDUMP_SZ; |
| 939 | |
| 940 | /* Size is zero if a core dump isn't present */ |
Bradley Grove | 9588d24 | 2013-10-01 14:26:01 -0400 | [diff] [blame] | 941 | if (!test_bit(AF2_COREDUMP_SAVED, &a->flags2)) |
Bradley Grove | 26780d9 | 2013-08-23 10:35:45 -0400 | [diff] [blame] | 942 | total_len = 0; |
| 943 | |
| 944 | if (len > total_len) |
| 945 | len = total_len; |
| 946 | |
| 947 | if (offset >= total_len |
| 948 | || offset + len > total_len |
| 949 | || len == 0) { |
| 950 | hi->status = ATTO_STS_INV_PARAM; |
| 951 | break; |
| 952 | } |
| 953 | |
| 954 | memcpy(trc + 1, |
| 955 | a->fw_coredump_buff + offset, |
| 956 | len); |
| 957 | |
| 958 | hi->data_length = len; |
| 959 | } else if (trc->trace_func == ATTO_TRC_TF_RESET) { |
| 960 | memset(a->fw_coredump_buff, 0, |
| 961 | ESAS2R_FWCOREDUMP_SZ); |
| 962 | |
Bradley Grove | 9588d24 | 2013-10-01 14:26:01 -0400 | [diff] [blame] | 963 | clear_bit(AF2_COREDUMP_SAVED, &a->flags2); |
Bradley Grove | 26780d9 | 2013-08-23 10:35:45 -0400 | [diff] [blame] | 964 | } else if (trc->trace_func != ATTO_TRC_TF_GET_INFO) { |
| 965 | hi->status = ATTO_STS_UNSUPPORTED; |
| 966 | break; |
| 967 | } |
| 968 | |
| 969 | /* Always return all the info we can. */ |
| 970 | trc->trace_mask = 0; |
| 971 | trc->current_offset = 0; |
| 972 | trc->total_length = ESAS2R_FWCOREDUMP_SZ; |
| 973 | |
| 974 | /* Return zero length buffer if core dump not present */ |
Bradley Grove | 9588d24 | 2013-10-01 14:26:01 -0400 | [diff] [blame] | 975 | if (!test_bit(AF2_COREDUMP_SAVED, &a->flags2)) |
Bradley Grove | 26780d9 | 2013-08-23 10:35:45 -0400 | [diff] [blame] | 976 | trc->total_length = 0; |
| 977 | } else { |
| 978 | hi->status = ATTO_STS_UNSUPPORTED; |
| 979 | } |
| 980 | |
| 981 | break; |
| 982 | } |
| 983 | |
| 984 | case ATTO_FUNC_SCSI_PASS_THRU: |
| 985 | { |
| 986 | struct atto_hba_scsi_pass_thru *spt = &hi->data.scsi_pass_thru; |
| 987 | struct scsi_lun lun; |
| 988 | |
| 989 | memcpy(&lun, spt->lun, sizeof(struct scsi_lun)); |
| 990 | |
| 991 | if (hi->flags & HBAF_TUNNEL) { |
| 992 | if (hba_ioctl_tunnel(a, hi, rq, sgc)) |
| 993 | return true; |
| 994 | |
| 995 | break; |
| 996 | } |
| 997 | |
| 998 | if (hi->version > ATTO_VER_SCSI_PASS_THRU0) { |
| 999 | hi->status = ATTO_STS_INV_VERSION; |
| 1000 | hi->version = ATTO_VER_SCSI_PASS_THRU0; |
| 1001 | break; |
| 1002 | } |
| 1003 | |
| 1004 | if (spt->target_id >= ESAS2R_MAX_TARGETS || !check_lun(lun)) { |
| 1005 | hi->status = ATTO_STS_INV_PARAM; |
| 1006 | break; |
| 1007 | } |
| 1008 | |
| 1009 | esas2r_sgc_init(sgc, a, rq, NULL); |
| 1010 | |
| 1011 | sgc->length = hi->data_length; |
| 1012 | sgc->cur_offset += offsetof(struct atto_ioctl, data.byte) |
| 1013 | + sizeof(struct atto_hba_scsi_pass_thru); |
| 1014 | |
| 1015 | /* Finish request initialization */ |
| 1016 | rq->target_id = (u16)spt->target_id; |
| 1017 | rq->vrq->scsi.flags |= cpu_to_le32(spt->lun[1]); |
| 1018 | memcpy(rq->vrq->scsi.cdb, spt->cdb, 16); |
| 1019 | rq->vrq->scsi.length = cpu_to_le32(hi->data_length); |
| 1020 | rq->sense_len = spt->sense_length; |
| 1021 | rq->sense_buf = (u8 *)spt->sense_data; |
| 1022 | /* NOTE: we ignore spt->timeout */ |
| 1023 | |
| 1024 | /* |
| 1025 | * always usurp the completion callback since the interrupt |
| 1026 | * callback mechanism may be used. |
| 1027 | */ |
| 1028 | |
| 1029 | rq->aux_req_cx = hi; |
| 1030 | rq->aux_req_cb = rq->comp_cb; |
| 1031 | rq->comp_cb = scsi_passthru_comp_cb; |
| 1032 | |
| 1033 | if (spt->flags & ATTO_SPTF_DATA_IN) { |
| 1034 | rq->vrq->scsi.flags |= cpu_to_le32(FCP_CMND_RDD); |
| 1035 | } else if (spt->flags & ATTO_SPTF_DATA_OUT) { |
| 1036 | rq->vrq->scsi.flags |= cpu_to_le32(FCP_CMND_WRD); |
| 1037 | } else { |
| 1038 | if (sgc->length) { |
| 1039 | hi->status = ATTO_STS_INV_PARAM; |
| 1040 | break; |
| 1041 | } |
| 1042 | } |
| 1043 | |
| 1044 | if (spt->flags & ATTO_SPTF_ORDERED_Q) |
| 1045 | rq->vrq->scsi.flags |= |
| 1046 | cpu_to_le32(FCP_CMND_TA_ORDRD_Q); |
| 1047 | else if (spt->flags & ATTO_SPTF_HEAD_OF_Q) |
| 1048 | rq->vrq->scsi.flags |= cpu_to_le32(FCP_CMND_TA_HEAD_Q); |
| 1049 | |
Bradley Grove | 9588d24 | 2013-10-01 14:26:01 -0400 | [diff] [blame] | 1050 | |
Bradley Grove | 26780d9 | 2013-08-23 10:35:45 -0400 | [diff] [blame] | 1051 | if (!esas2r_build_sg_list(a, rq, sgc)) { |
| 1052 | hi->status = ATTO_STS_OUT_OF_RSRC; |
| 1053 | break; |
| 1054 | } |
| 1055 | |
| 1056 | esas2r_start_request(a, rq); |
| 1057 | |
| 1058 | return true; |
| 1059 | } |
| 1060 | |
| 1061 | case ATTO_FUNC_GET_DEV_ADDR: |
| 1062 | { |
| 1063 | struct atto_hba_get_device_address *gda = |
| 1064 | &hi->data.get_dev_addr; |
| 1065 | struct esas2r_target *t; |
| 1066 | |
| 1067 | if (hi->flags & HBAF_TUNNEL) { |
| 1068 | if (hba_ioctl_tunnel(a, hi, rq, sgc)) |
| 1069 | return true; |
| 1070 | |
| 1071 | break; |
| 1072 | } |
| 1073 | |
| 1074 | if (hi->version > ATTO_VER_GET_DEV_ADDR0) { |
| 1075 | hi->status = ATTO_STS_INV_VERSION; |
| 1076 | hi->version = ATTO_VER_GET_DEV_ADDR0; |
| 1077 | break; |
| 1078 | } |
| 1079 | |
| 1080 | if (gda->target_id >= ESAS2R_MAX_TARGETS) { |
| 1081 | hi->status = ATTO_STS_INV_PARAM; |
| 1082 | break; |
| 1083 | } |
| 1084 | |
| 1085 | t = a->targetdb + (u16)gda->target_id; |
| 1086 | |
| 1087 | if (t->target_state != TS_PRESENT) { |
| 1088 | hi->status = ATTO_STS_FAILED; |
| 1089 | } else if (gda->addr_type == ATTO_GDA_AT_PORT) { |
| 1090 | if (t->sas_addr == 0) { |
| 1091 | hi->status = ATTO_STS_UNSUPPORTED; |
| 1092 | } else { |
| 1093 | *(u64 *)gda->address = t->sas_addr; |
| 1094 | |
| 1095 | gda->addr_len = sizeof(u64); |
| 1096 | } |
| 1097 | } else if (gda->addr_type == ATTO_GDA_AT_NODE) { |
| 1098 | hi->status = ATTO_STS_NOT_APPL; |
| 1099 | } else { |
| 1100 | hi->status = ATTO_STS_INV_PARAM; |
| 1101 | } |
| 1102 | |
| 1103 | /* update the target ID to the next one present. */ |
| 1104 | |
| 1105 | gda->target_id = |
| 1106 | esas2r_targ_db_find_next_present(a, |
| 1107 | (u16)gda->target_id); |
| 1108 | break; |
| 1109 | } |
| 1110 | |
| 1111 | case ATTO_FUNC_PHY_CTRL: |
| 1112 | case ATTO_FUNC_CONN_CTRL: |
| 1113 | { |
| 1114 | if (hba_ioctl_tunnel(a, hi, rq, sgc)) |
| 1115 | return true; |
| 1116 | |
| 1117 | break; |
| 1118 | } |
| 1119 | |
| 1120 | case ATTO_FUNC_ADAP_CTRL: |
| 1121 | { |
| 1122 | struct atto_hba_adap_ctrl *ac = &hi->data.adap_ctrl; |
| 1123 | |
| 1124 | if (hi->flags & HBAF_TUNNEL) { |
| 1125 | hi->status = ATTO_STS_UNSUPPORTED; |
| 1126 | break; |
| 1127 | } |
| 1128 | |
| 1129 | if (hi->version > ATTO_VER_ADAP_CTRL0) { |
| 1130 | hi->status = ATTO_STS_INV_VERSION; |
| 1131 | hi->version = ATTO_VER_ADAP_CTRL0; |
| 1132 | break; |
| 1133 | } |
| 1134 | |
| 1135 | if (ac->adap_func == ATTO_AC_AF_HARD_RST) { |
| 1136 | esas2r_reset_adapter(a); |
| 1137 | } else if (ac->adap_func != ATTO_AC_AF_GET_STATE) { |
| 1138 | hi->status = ATTO_STS_UNSUPPORTED; |
| 1139 | break; |
| 1140 | } |
| 1141 | |
Bradley Grove | 9588d24 | 2013-10-01 14:26:01 -0400 | [diff] [blame] | 1142 | if (test_bit(AF_CHPRST_NEEDED, &a->flags)) |
Bradley Grove | 26780d9 | 2013-08-23 10:35:45 -0400 | [diff] [blame] | 1143 | ac->adap_state = ATTO_AC_AS_RST_SCHED; |
Bradley Grove | 9588d24 | 2013-10-01 14:26:01 -0400 | [diff] [blame] | 1144 | else if (test_bit(AF_CHPRST_PENDING, &a->flags)) |
Bradley Grove | 26780d9 | 2013-08-23 10:35:45 -0400 | [diff] [blame] | 1145 | ac->adap_state = ATTO_AC_AS_RST_IN_PROG; |
Bradley Grove | 9588d24 | 2013-10-01 14:26:01 -0400 | [diff] [blame] | 1146 | else if (test_bit(AF_DISC_PENDING, &a->flags)) |
Bradley Grove | 26780d9 | 2013-08-23 10:35:45 -0400 | [diff] [blame] | 1147 | ac->adap_state = ATTO_AC_AS_RST_DISC; |
Bradley Grove | 9588d24 | 2013-10-01 14:26:01 -0400 | [diff] [blame] | 1148 | else if (test_bit(AF_DISABLED, &a->flags)) |
Bradley Grove | 26780d9 | 2013-08-23 10:35:45 -0400 | [diff] [blame] | 1149 | ac->adap_state = ATTO_AC_AS_DISABLED; |
Bradley Grove | 9588d24 | 2013-10-01 14:26:01 -0400 | [diff] [blame] | 1150 | else if (test_bit(AF_DEGRADED_MODE, &a->flags)) |
Bradley Grove | 26780d9 | 2013-08-23 10:35:45 -0400 | [diff] [blame] | 1151 | ac->adap_state = ATTO_AC_AS_DEGRADED; |
| 1152 | else |
| 1153 | ac->adap_state = ATTO_AC_AS_OK; |
| 1154 | |
| 1155 | break; |
| 1156 | } |
| 1157 | |
| 1158 | case ATTO_FUNC_GET_DEV_INFO: |
| 1159 | { |
| 1160 | struct atto_hba_get_device_info *gdi = &hi->data.get_dev_info; |
| 1161 | struct esas2r_target *t; |
| 1162 | |
| 1163 | if (hi->flags & HBAF_TUNNEL) { |
| 1164 | if (hba_ioctl_tunnel(a, hi, rq, sgc)) |
| 1165 | return true; |
| 1166 | |
| 1167 | break; |
| 1168 | } |
| 1169 | |
| 1170 | if (hi->version > ATTO_VER_GET_DEV_INFO0) { |
| 1171 | hi->status = ATTO_STS_INV_VERSION; |
| 1172 | hi->version = ATTO_VER_GET_DEV_INFO0; |
| 1173 | break; |
| 1174 | } |
| 1175 | |
| 1176 | if (gdi->target_id >= ESAS2R_MAX_TARGETS) { |
| 1177 | hi->status = ATTO_STS_INV_PARAM; |
| 1178 | break; |
| 1179 | } |
| 1180 | |
| 1181 | t = a->targetdb + (u16)gdi->target_id; |
| 1182 | |
| 1183 | /* update the target ID to the next one present. */ |
| 1184 | |
| 1185 | gdi->target_id = |
| 1186 | esas2r_targ_db_find_next_present(a, |
| 1187 | (u16)gdi->target_id); |
| 1188 | |
| 1189 | if (t->target_state != TS_PRESENT) { |
| 1190 | hi->status = ATTO_STS_FAILED; |
| 1191 | break; |
| 1192 | } |
| 1193 | |
| 1194 | hi->status = ATTO_STS_UNSUPPORTED; |
| 1195 | break; |
| 1196 | } |
| 1197 | |
| 1198 | default: |
| 1199 | |
| 1200 | hi->status = ATTO_STS_INV_FUNC; |
| 1201 | break; |
| 1202 | } |
| 1203 | |
| 1204 | return false; |
| 1205 | } |
| 1206 | |
| 1207 | static void hba_ioctl_done_callback(struct esas2r_adapter *a, |
| 1208 | struct esas2r_request *rq, void *context) |
| 1209 | { |
| 1210 | struct atto_ioctl *ioctl_hba = |
| 1211 | (struct atto_ioctl *)esas2r_buffered_ioctl; |
| 1212 | |
| 1213 | esas2r_debug("hba_ioctl_done_callback %d", a->index); |
| 1214 | |
| 1215 | if (ioctl_hba->function == ATTO_FUNC_GET_ADAP_INFO) { |
| 1216 | struct atto_hba_get_adapter_info *gai = |
| 1217 | &ioctl_hba->data.get_adap_info; |
| 1218 | |
| 1219 | esas2r_debug("ATTO_FUNC_GET_ADAP_INFO"); |
| 1220 | |
| 1221 | gai->drvr_rev_major = ESAS2R_MAJOR_REV; |
| 1222 | gai->drvr_rev_minor = ESAS2R_MINOR_REV; |
| 1223 | |
| 1224 | strcpy(gai->drvr_rev_ascii, ESAS2R_VERSION_STR); |
| 1225 | strcpy(gai->drvr_name, ESAS2R_DRVR_NAME); |
| 1226 | |
| 1227 | gai->num_busses = 1; |
| 1228 | gai->num_targsper_bus = ESAS2R_MAX_ID + 1; |
| 1229 | gai->num_lunsper_targ = 1; |
| 1230 | } |
| 1231 | } |
| 1232 | |
| 1233 | u8 handle_hba_ioctl(struct esas2r_adapter *a, |
| 1234 | struct atto_ioctl *ioctl_hba) |
| 1235 | { |
| 1236 | struct esas2r_buffered_ioctl bi; |
| 1237 | |
| 1238 | memset(&bi, 0, sizeof(bi)); |
| 1239 | |
| 1240 | bi.a = a; |
| 1241 | bi.ioctl = ioctl_hba; |
| 1242 | bi.length = sizeof(struct atto_ioctl) + ioctl_hba->data_length; |
| 1243 | bi.callback = hba_ioctl_callback; |
| 1244 | bi.context = NULL; |
| 1245 | bi.done_callback = hba_ioctl_done_callback; |
| 1246 | bi.done_context = NULL; |
| 1247 | bi.offset = 0; |
| 1248 | |
| 1249 | return handle_buffered_ioctl(&bi); |
| 1250 | } |
| 1251 | |
| 1252 | |
| 1253 | int esas2r_write_params(struct esas2r_adapter *a, struct esas2r_request *rq, |
| 1254 | struct esas2r_sas_nvram *data) |
| 1255 | { |
| 1256 | int result = 0; |
| 1257 | |
| 1258 | a->nvram_command_done = 0; |
| 1259 | rq->comp_cb = complete_nvr_req; |
| 1260 | |
| 1261 | if (esas2r_nvram_write(a, rq, data)) { |
| 1262 | /* now wait around for it to complete. */ |
| 1263 | while (!a->nvram_command_done) |
| 1264 | wait_event_interruptible(a->nvram_waiter, |
| 1265 | a->nvram_command_done); |
| 1266 | ; |
| 1267 | |
| 1268 | /* done, check the status. */ |
| 1269 | if (rq->req_stat == RS_SUCCESS) |
| 1270 | result = 1; |
| 1271 | } |
| 1272 | return result; |
| 1273 | } |
| 1274 | |
| 1275 | |
| 1276 | /* This function only cares about ATTO-specific ioctls (atto_express_ioctl) */ |
| 1277 | int esas2r_ioctl_handler(void *hostdata, int cmd, void __user *arg) |
| 1278 | { |
| 1279 | struct atto_express_ioctl *ioctl = NULL; |
| 1280 | struct esas2r_adapter *a; |
| 1281 | struct esas2r_request *rq; |
| 1282 | u16 code; |
| 1283 | int err; |
| 1284 | |
| 1285 | esas2r_log(ESAS2R_LOG_DEBG, "ioctl (%p, %x, %p)", hostdata, cmd, arg); |
| 1286 | |
| 1287 | if ((arg == NULL) |
| 1288 | || (cmd < EXPRESS_IOCTL_MIN) |
| 1289 | || (cmd > EXPRESS_IOCTL_MAX)) |
| 1290 | return -ENOTSUPP; |
| 1291 | |
| 1292 | if (!access_ok(VERIFY_WRITE, arg, sizeof(struct atto_express_ioctl))) { |
| 1293 | esas2r_log(ESAS2R_LOG_WARN, |
| 1294 | "ioctl_handler access_ok failed for cmd %d, " |
| 1295 | "address %p", cmd, |
| 1296 | arg); |
| 1297 | return -EFAULT; |
| 1298 | } |
| 1299 | |
| 1300 | /* allocate a kernel memory buffer for the IOCTL data */ |
| 1301 | ioctl = kzalloc(sizeof(struct atto_express_ioctl), GFP_KERNEL); |
| 1302 | if (ioctl == NULL) { |
| 1303 | esas2r_log(ESAS2R_LOG_WARN, |
| 1304 | "ioctl_handler kzalloc failed for %d bytes", |
| 1305 | sizeof(struct atto_express_ioctl)); |
| 1306 | return -ENOMEM; |
| 1307 | } |
| 1308 | |
| 1309 | err = __copy_from_user(ioctl, arg, sizeof(struct atto_express_ioctl)); |
| 1310 | if (err != 0) { |
| 1311 | esas2r_log(ESAS2R_LOG_WARN, |
| 1312 | "copy_from_user didn't copy everything (err %d, cmd %d)", |
| 1313 | err, |
| 1314 | cmd); |
| 1315 | kfree(ioctl); |
| 1316 | |
| 1317 | return -EFAULT; |
| 1318 | } |
| 1319 | |
| 1320 | /* verify the signature */ |
| 1321 | |
| 1322 | if (memcmp(ioctl->header.signature, |
| 1323 | EXPRESS_IOCTL_SIGNATURE, |
| 1324 | EXPRESS_IOCTL_SIGNATURE_SIZE) != 0) { |
| 1325 | esas2r_log(ESAS2R_LOG_WARN, "invalid signature"); |
| 1326 | kfree(ioctl); |
| 1327 | |
| 1328 | return -ENOTSUPP; |
| 1329 | } |
| 1330 | |
| 1331 | /* assume success */ |
| 1332 | |
| 1333 | ioctl->header.return_code = IOCTL_SUCCESS; |
| 1334 | err = 0; |
| 1335 | |
| 1336 | /* |
| 1337 | * handle EXPRESS_IOCTL_GET_CHANNELS |
| 1338 | * without paying attention to channel |
| 1339 | */ |
| 1340 | |
| 1341 | if (cmd == EXPRESS_IOCTL_GET_CHANNELS) { |
| 1342 | int i = 0, k = 0; |
| 1343 | |
| 1344 | ioctl->data.chanlist.num_channels = 0; |
| 1345 | |
| 1346 | while (i < MAX_ADAPTERS) { |
| 1347 | if (esas2r_adapters[i]) { |
| 1348 | ioctl->data.chanlist.num_channels++; |
| 1349 | ioctl->data.chanlist.channel[k] = i; |
| 1350 | k++; |
| 1351 | } |
| 1352 | i++; |
| 1353 | } |
| 1354 | |
| 1355 | goto ioctl_done; |
| 1356 | } |
| 1357 | |
| 1358 | /* get the channel */ |
| 1359 | |
| 1360 | if (ioctl->header.channel == 0xFF) { |
| 1361 | a = (struct esas2r_adapter *)hostdata; |
| 1362 | } else { |
| 1363 | a = esas2r_adapters[ioctl->header.channel]; |
| 1364 | if (ioctl->header.channel >= MAX_ADAPTERS || (a == NULL)) { |
| 1365 | ioctl->header.return_code = IOCTL_BAD_CHANNEL; |
| 1366 | esas2r_log(ESAS2R_LOG_WARN, "bad channel value"); |
| 1367 | kfree(ioctl); |
| 1368 | |
| 1369 | return -ENOTSUPP; |
| 1370 | } |
| 1371 | } |
| 1372 | |
| 1373 | switch (cmd) { |
| 1374 | case EXPRESS_IOCTL_RW_FIRMWARE: |
| 1375 | |
| 1376 | if (ioctl->data.fwrw.img_type == FW_IMG_FM_API) { |
| 1377 | err = esas2r_write_fw(a, |
| 1378 | (char *)ioctl->data.fwrw.image, |
| 1379 | 0, |
| 1380 | sizeof(struct |
| 1381 | atto_express_ioctl)); |
| 1382 | |
| 1383 | if (err >= 0) { |
| 1384 | err = esas2r_read_fw(a, |
| 1385 | (char *)ioctl->data.fwrw. |
| 1386 | image, |
| 1387 | 0, |
| 1388 | sizeof(struct |
| 1389 | atto_express_ioctl)); |
| 1390 | } |
| 1391 | } else if (ioctl->data.fwrw.img_type == FW_IMG_FS_API) { |
| 1392 | err = esas2r_write_fs(a, |
| 1393 | (char *)ioctl->data.fwrw.image, |
| 1394 | 0, |
| 1395 | sizeof(struct |
| 1396 | atto_express_ioctl)); |
| 1397 | |
| 1398 | if (err >= 0) { |
| 1399 | err = esas2r_read_fs(a, |
| 1400 | (char *)ioctl->data.fwrw. |
| 1401 | image, |
| 1402 | 0, |
| 1403 | sizeof(struct |
| 1404 | atto_express_ioctl)); |
| 1405 | } |
| 1406 | } else { |
| 1407 | ioctl->header.return_code = IOCTL_BAD_FLASH_IMGTYPE; |
| 1408 | } |
| 1409 | |
| 1410 | break; |
| 1411 | |
| 1412 | case EXPRESS_IOCTL_READ_PARAMS: |
| 1413 | |
| 1414 | memcpy(ioctl->data.prw.data_buffer, a->nvram, |
| 1415 | sizeof(struct esas2r_sas_nvram)); |
| 1416 | ioctl->data.prw.code = 1; |
| 1417 | break; |
| 1418 | |
| 1419 | case EXPRESS_IOCTL_WRITE_PARAMS: |
| 1420 | |
| 1421 | rq = esas2r_alloc_request(a); |
| 1422 | if (rq == NULL) { |
Tomas Henzl | ba9e587 | 2014-11-16 14:35:33 +0100 | [diff] [blame] | 1423 | kfree(ioctl); |
| 1424 | esas2r_log(ESAS2R_LOG_WARN, |
| 1425 | "could not allocate an internal request"); |
| 1426 | return -ENOMEM; |
Bradley Grove | 26780d9 | 2013-08-23 10:35:45 -0400 | [diff] [blame] | 1427 | } |
| 1428 | |
| 1429 | code = esas2r_write_params(a, rq, |
| 1430 | (struct esas2r_sas_nvram *)ioctl->data.prw.data_buffer); |
| 1431 | ioctl->data.prw.code = code; |
| 1432 | |
| 1433 | esas2r_free_request(a, rq); |
| 1434 | |
| 1435 | break; |
| 1436 | |
| 1437 | case EXPRESS_IOCTL_DEFAULT_PARAMS: |
| 1438 | |
| 1439 | esas2r_nvram_get_defaults(a, |
| 1440 | (struct esas2r_sas_nvram *)ioctl->data.prw.data_buffer); |
| 1441 | ioctl->data.prw.code = 1; |
| 1442 | break; |
| 1443 | |
| 1444 | case EXPRESS_IOCTL_CHAN_INFO: |
| 1445 | |
| 1446 | ioctl->data.chaninfo.major_rev = ESAS2R_MAJOR_REV; |
| 1447 | ioctl->data.chaninfo.minor_rev = ESAS2R_MINOR_REV; |
| 1448 | ioctl->data.chaninfo.IRQ = a->pcid->irq; |
| 1449 | ioctl->data.chaninfo.device_id = a->pcid->device; |
| 1450 | ioctl->data.chaninfo.vendor_id = a->pcid->vendor; |
| 1451 | ioctl->data.chaninfo.ven_dev_id = a->pcid->subsystem_device; |
| 1452 | ioctl->data.chaninfo.revision_id = a->pcid->revision; |
| 1453 | ioctl->data.chaninfo.pci_bus = a->pcid->bus->number; |
| 1454 | ioctl->data.chaninfo.pci_dev_func = a->pcid->devfn; |
| 1455 | ioctl->data.chaninfo.core_rev = 0; |
| 1456 | ioctl->data.chaninfo.host_no = a->host->host_no; |
| 1457 | ioctl->data.chaninfo.hbaapi_rev = 0; |
| 1458 | break; |
| 1459 | |
| 1460 | case EXPRESS_IOCTL_SMP: |
| 1461 | ioctl->header.return_code = handle_smp_ioctl(a, |
| 1462 | &ioctl->data. |
| 1463 | ioctl_smp); |
| 1464 | break; |
| 1465 | |
| 1466 | case EXPRESS_CSMI: |
| 1467 | ioctl->header.return_code = |
| 1468 | handle_csmi_ioctl(a, &ioctl->data.csmi); |
| 1469 | break; |
| 1470 | |
| 1471 | case EXPRESS_IOCTL_HBA: |
| 1472 | ioctl->header.return_code = handle_hba_ioctl(a, |
| 1473 | &ioctl->data. |
| 1474 | ioctl_hba); |
| 1475 | break; |
| 1476 | |
| 1477 | case EXPRESS_IOCTL_VDA: |
| 1478 | err = esas2r_write_vda(a, |
| 1479 | (char *)&ioctl->data.ioctl_vda, |
| 1480 | 0, |
| 1481 | sizeof(struct atto_ioctl_vda) + |
| 1482 | ioctl->data.ioctl_vda.data_length); |
| 1483 | |
| 1484 | if (err >= 0) { |
| 1485 | err = esas2r_read_vda(a, |
| 1486 | (char *)&ioctl->data.ioctl_vda, |
| 1487 | 0, |
| 1488 | sizeof(struct atto_ioctl_vda) + |
| 1489 | ioctl->data.ioctl_vda.data_length); |
| 1490 | } |
| 1491 | |
| 1492 | |
| 1493 | |
| 1494 | |
| 1495 | break; |
| 1496 | |
| 1497 | case EXPRESS_IOCTL_GET_MOD_INFO: |
| 1498 | |
| 1499 | ioctl->data.modinfo.adapter = a; |
| 1500 | ioctl->data.modinfo.pci_dev = a->pcid; |
| 1501 | ioctl->data.modinfo.scsi_host = a->host; |
| 1502 | ioctl->data.modinfo.host_no = a->host->host_no; |
| 1503 | |
| 1504 | break; |
| 1505 | |
| 1506 | default: |
| 1507 | esas2r_debug("esas2r_ioctl invalid cmd %p!", cmd); |
| 1508 | ioctl->header.return_code = IOCTL_ERR_INVCMD; |
| 1509 | } |
| 1510 | |
| 1511 | ioctl_done: |
| 1512 | |
| 1513 | if (err < 0) { |
| 1514 | esas2r_log(ESAS2R_LOG_WARN, "err %d on ioctl cmd %d", err, |
| 1515 | cmd); |
| 1516 | |
| 1517 | switch (err) { |
| 1518 | case -ENOMEM: |
| 1519 | case -EBUSY: |
| 1520 | ioctl->header.return_code = IOCTL_OUT_OF_RESOURCES; |
| 1521 | break; |
| 1522 | |
| 1523 | case -ENOSYS: |
| 1524 | case -EINVAL: |
| 1525 | ioctl->header.return_code = IOCTL_INVALID_PARAM; |
| 1526 | break; |
Tomas Henzl | 68b14b8 | 2014-11-16 14:35:34 +0100 | [diff] [blame] | 1527 | |
| 1528 | default: |
| 1529 | ioctl->header.return_code = IOCTL_GENERAL_ERROR; |
| 1530 | break; |
Bradley Grove | 26780d9 | 2013-08-23 10:35:45 -0400 | [diff] [blame] | 1531 | } |
| 1532 | |
Bradley Grove | 26780d9 | 2013-08-23 10:35:45 -0400 | [diff] [blame] | 1533 | } |
| 1534 | |
| 1535 | /* Always copy the buffer back, if only to pick up the status */ |
| 1536 | err = __copy_to_user(arg, ioctl, sizeof(struct atto_express_ioctl)); |
| 1537 | if (err != 0) { |
| 1538 | esas2r_log(ESAS2R_LOG_WARN, |
| 1539 | "ioctl_handler copy_to_user didn't copy " |
| 1540 | "everything (err %d, cmd %d)", err, |
| 1541 | cmd); |
| 1542 | kfree(ioctl); |
| 1543 | |
| 1544 | return -EFAULT; |
| 1545 | } |
| 1546 | |
| 1547 | kfree(ioctl); |
| 1548 | |
| 1549 | return 0; |
| 1550 | } |
| 1551 | |
| 1552 | int esas2r_ioctl(struct scsi_device *sd, int cmd, void __user *arg) |
| 1553 | { |
| 1554 | return esas2r_ioctl_handler(sd->host->hostdata, cmd, arg); |
| 1555 | } |
| 1556 | |
| 1557 | static void free_fw_buffers(struct esas2r_adapter *a) |
| 1558 | { |
| 1559 | if (a->firmware.data) { |
| 1560 | dma_free_coherent(&a->pcid->dev, |
| 1561 | (size_t)a->firmware.orig_len, |
| 1562 | a->firmware.data, |
| 1563 | (dma_addr_t)a->firmware.phys); |
| 1564 | |
| 1565 | a->firmware.data = NULL; |
| 1566 | } |
| 1567 | } |
| 1568 | |
| 1569 | static int allocate_fw_buffers(struct esas2r_adapter *a, u32 length) |
| 1570 | { |
| 1571 | free_fw_buffers(a); |
| 1572 | |
| 1573 | a->firmware.orig_len = length; |
| 1574 | |
| 1575 | a->firmware.data = (u8 *)dma_alloc_coherent(&a->pcid->dev, |
| 1576 | (size_t)length, |
| 1577 | (dma_addr_t *)&a->firmware. |
| 1578 | phys, |
| 1579 | GFP_KERNEL); |
| 1580 | |
| 1581 | if (!a->firmware.data) { |
| 1582 | esas2r_debug("buffer alloc failed!"); |
| 1583 | return 0; |
| 1584 | } |
| 1585 | |
| 1586 | return 1; |
| 1587 | } |
| 1588 | |
| 1589 | /* Handle a call to read firmware. */ |
| 1590 | int esas2r_read_fw(struct esas2r_adapter *a, char *buf, long off, int count) |
| 1591 | { |
| 1592 | esas2r_trace_enter(); |
| 1593 | /* if the cached header is a status, simply copy it over and return. */ |
| 1594 | if (a->firmware.state == FW_STATUS_ST) { |
| 1595 | int size = min_t(int, count, sizeof(a->firmware.header)); |
| 1596 | esas2r_trace_exit(); |
| 1597 | memcpy(buf, &a->firmware.header, size); |
| 1598 | esas2r_debug("esas2r_read_fw: STATUS size %d", size); |
| 1599 | return size; |
| 1600 | } |
| 1601 | |
| 1602 | /* |
| 1603 | * if the cached header is a command, do it if at |
| 1604 | * offset 0, otherwise copy the pieces. |
| 1605 | */ |
| 1606 | |
| 1607 | if (a->firmware.state == FW_COMMAND_ST) { |
| 1608 | u32 length = a->firmware.header.length; |
| 1609 | esas2r_trace_exit(); |
| 1610 | |
| 1611 | esas2r_debug("esas2r_read_fw: COMMAND length %d off %d", |
| 1612 | length, |
| 1613 | off); |
| 1614 | |
| 1615 | if (off == 0) { |
| 1616 | if (a->firmware.header.action == FI_ACT_UP) { |
| 1617 | if (!allocate_fw_buffers(a, length)) |
| 1618 | return -ENOMEM; |
| 1619 | |
| 1620 | |
| 1621 | /* copy header over */ |
| 1622 | |
| 1623 | memcpy(a->firmware.data, |
| 1624 | &a->firmware.header, |
| 1625 | sizeof(a->firmware.header)); |
| 1626 | |
| 1627 | do_fm_api(a, |
| 1628 | (struct esas2r_flash_img *)a->firmware.data); |
| 1629 | } else if (a->firmware.header.action == FI_ACT_UPSZ) { |
| 1630 | int size = |
| 1631 | min((int)count, |
| 1632 | (int)sizeof(a->firmware.header)); |
| 1633 | do_fm_api(a, &a->firmware.header); |
| 1634 | memcpy(buf, &a->firmware.header, size); |
| 1635 | esas2r_debug("FI_ACT_UPSZ size %d", size); |
| 1636 | return size; |
| 1637 | } else { |
| 1638 | esas2r_debug("invalid action %d", |
| 1639 | a->firmware.header.action); |
| 1640 | return -ENOSYS; |
| 1641 | } |
| 1642 | } |
| 1643 | |
| 1644 | if (count + off > length) |
| 1645 | count = length - off; |
| 1646 | |
| 1647 | if (count < 0) |
| 1648 | return 0; |
| 1649 | |
| 1650 | if (!a->firmware.data) { |
| 1651 | esas2r_debug( |
| 1652 | "read: nonzero offset but no buffer available!"); |
| 1653 | return -ENOMEM; |
| 1654 | } |
| 1655 | |
| 1656 | esas2r_debug("esas2r_read_fw: off %d count %d length %d ", off, |
| 1657 | count, |
| 1658 | length); |
| 1659 | |
| 1660 | memcpy(buf, &a->firmware.data[off], count); |
| 1661 | |
| 1662 | /* when done, release the buffer */ |
| 1663 | |
| 1664 | if (length <= off + count) { |
| 1665 | esas2r_debug("esas2r_read_fw: freeing buffer!"); |
| 1666 | |
| 1667 | free_fw_buffers(a); |
| 1668 | } |
| 1669 | |
| 1670 | return count; |
| 1671 | } |
| 1672 | |
| 1673 | esas2r_trace_exit(); |
| 1674 | esas2r_debug("esas2r_read_fw: invalid firmware state %d", |
| 1675 | a->firmware.state); |
| 1676 | |
| 1677 | return -EINVAL; |
| 1678 | } |
| 1679 | |
| 1680 | /* Handle a call to write firmware. */ |
| 1681 | int esas2r_write_fw(struct esas2r_adapter *a, const char *buf, long off, |
| 1682 | int count) |
| 1683 | { |
| 1684 | u32 length; |
| 1685 | |
| 1686 | if (off == 0) { |
| 1687 | struct esas2r_flash_img *header = |
| 1688 | (struct esas2r_flash_img *)buf; |
| 1689 | |
| 1690 | /* assume version 0 flash image */ |
| 1691 | |
| 1692 | int min_size = sizeof(struct esas2r_flash_img_v0); |
| 1693 | |
| 1694 | a->firmware.state = FW_INVALID_ST; |
| 1695 | |
| 1696 | /* validate the version field first */ |
| 1697 | |
| 1698 | if (count < 4 |
| 1699 | || header->fi_version > FI_VERSION_1) { |
| 1700 | esas2r_debug( |
| 1701 | "esas2r_write_fw: short header or invalid version"); |
| 1702 | return -EINVAL; |
| 1703 | } |
| 1704 | |
| 1705 | /* See if its a version 1 flash image */ |
| 1706 | |
| 1707 | if (header->fi_version == FI_VERSION_1) |
| 1708 | min_size = sizeof(struct esas2r_flash_img); |
| 1709 | |
| 1710 | /* If this is the start, the header must be full and valid. */ |
| 1711 | if (count < min_size) { |
| 1712 | esas2r_debug("esas2r_write_fw: short header, aborting"); |
| 1713 | return -EINVAL; |
| 1714 | } |
| 1715 | |
| 1716 | /* Make sure the size is reasonable. */ |
| 1717 | length = header->length; |
| 1718 | |
| 1719 | if (length > 1024 * 1024) { |
| 1720 | esas2r_debug( |
| 1721 | "esas2r_write_fw: hosed, length %d fi_version %d", |
| 1722 | length, header->fi_version); |
| 1723 | return -EINVAL; |
| 1724 | } |
| 1725 | |
| 1726 | /* |
| 1727 | * If this is a write command, allocate memory because |
| 1728 | * we have to cache everything. otherwise, just cache |
| 1729 | * the header, because the read op will do the command. |
| 1730 | */ |
| 1731 | |
| 1732 | if (header->action == FI_ACT_DOWN) { |
| 1733 | if (!allocate_fw_buffers(a, length)) |
| 1734 | return -ENOMEM; |
| 1735 | |
| 1736 | /* |
| 1737 | * Store the command, so there is context on subsequent |
| 1738 | * calls. |
| 1739 | */ |
| 1740 | memcpy(&a->firmware.header, |
| 1741 | buf, |
| 1742 | sizeof(*header)); |
| 1743 | } else if (header->action == FI_ACT_UP |
| 1744 | || header->action == FI_ACT_UPSZ) { |
| 1745 | /* Save the command, result will be picked up on read */ |
| 1746 | memcpy(&a->firmware.header, |
| 1747 | buf, |
| 1748 | sizeof(*header)); |
| 1749 | |
| 1750 | a->firmware.state = FW_COMMAND_ST; |
| 1751 | |
| 1752 | esas2r_debug( |
| 1753 | "esas2r_write_fw: COMMAND, count %d, action %d ", |
| 1754 | count, header->action); |
| 1755 | |
| 1756 | /* |
| 1757 | * Pretend we took the whole buffer, |
| 1758 | * so we don't get bothered again. |
| 1759 | */ |
| 1760 | |
| 1761 | return count; |
| 1762 | } else { |
| 1763 | esas2r_debug("esas2r_write_fw: invalid action %d ", |
| 1764 | a->firmware.header.action); |
| 1765 | return -ENOSYS; |
| 1766 | } |
| 1767 | } else { |
| 1768 | length = a->firmware.header.length; |
| 1769 | } |
| 1770 | |
| 1771 | /* |
| 1772 | * We only get here on a download command, regardless of offset. |
| 1773 | * the chunks written by the system need to be cached, and when |
| 1774 | * the final one arrives, issue the fmapi command. |
| 1775 | */ |
| 1776 | |
| 1777 | if (off + count > length) |
| 1778 | count = length - off; |
| 1779 | |
| 1780 | if (count > 0) { |
| 1781 | esas2r_debug("esas2r_write_fw: off %d count %d length %d", off, |
| 1782 | count, |
| 1783 | length); |
| 1784 | |
| 1785 | /* |
| 1786 | * On a full upload, the system tries sending the whole buffer. |
| 1787 | * there's nothing to do with it, so just drop it here, before |
| 1788 | * trying to copy over into unallocated memory! |
| 1789 | */ |
| 1790 | if (a->firmware.header.action == FI_ACT_UP) |
| 1791 | return count; |
| 1792 | |
| 1793 | if (!a->firmware.data) { |
| 1794 | esas2r_debug( |
| 1795 | "write: nonzero offset but no buffer available!"); |
| 1796 | return -ENOMEM; |
| 1797 | } |
| 1798 | |
| 1799 | memcpy(&a->firmware.data[off], buf, count); |
| 1800 | |
| 1801 | if (length == off + count) { |
| 1802 | do_fm_api(a, |
| 1803 | (struct esas2r_flash_img *)a->firmware.data); |
| 1804 | |
| 1805 | /* |
| 1806 | * Now copy the header result to be picked up by the |
| 1807 | * next read |
| 1808 | */ |
| 1809 | memcpy(&a->firmware.header, |
| 1810 | a->firmware.data, |
| 1811 | sizeof(a->firmware.header)); |
| 1812 | |
| 1813 | a->firmware.state = FW_STATUS_ST; |
| 1814 | |
| 1815 | esas2r_debug("write completed"); |
| 1816 | |
| 1817 | /* |
| 1818 | * Since the system has the data buffered, the only way |
| 1819 | * this can leak is if a root user writes a program |
| 1820 | * that writes a shorter buffer than it claims, and the |
| 1821 | * copyin fails. |
| 1822 | */ |
| 1823 | free_fw_buffers(a); |
| 1824 | } |
| 1825 | } |
| 1826 | |
| 1827 | return count; |
| 1828 | } |
| 1829 | |
| 1830 | /* Callback for the completion of a VDA request. */ |
| 1831 | static void vda_complete_req(struct esas2r_adapter *a, |
| 1832 | struct esas2r_request *rq) |
| 1833 | { |
| 1834 | a->vda_command_done = 1; |
| 1835 | wake_up_interruptible(&a->vda_waiter); |
| 1836 | } |
| 1837 | |
| 1838 | /* Scatter/gather callback for VDA requests */ |
| 1839 | static u32 get_physaddr_vda(struct esas2r_sg_context *sgc, u64 *addr) |
| 1840 | { |
| 1841 | struct esas2r_adapter *a = (struct esas2r_adapter *)sgc->adapter; |
| 1842 | int offset = (u8 *)sgc->cur_offset - (u8 *)a->vda_buffer; |
| 1843 | |
| 1844 | (*addr) = a->ppvda_buffer + offset; |
| 1845 | return VDA_MAX_BUFFER_SIZE - offset; |
| 1846 | } |
| 1847 | |
| 1848 | /* Handle a call to read a VDA command. */ |
| 1849 | int esas2r_read_vda(struct esas2r_adapter *a, char *buf, long off, int count) |
| 1850 | { |
| 1851 | if (!a->vda_buffer) |
| 1852 | return -ENOMEM; |
| 1853 | |
| 1854 | if (off == 0) { |
| 1855 | struct esas2r_request *rq; |
| 1856 | struct atto_ioctl_vda *vi = |
| 1857 | (struct atto_ioctl_vda *)a->vda_buffer; |
| 1858 | struct esas2r_sg_context sgc; |
| 1859 | bool wait_for_completion; |
| 1860 | |
| 1861 | /* |
| 1862 | * Presumeably, someone has already written to the vda_buffer, |
| 1863 | * and now they are reading the node the response, so now we |
| 1864 | * will actually issue the request to the chip and reply. |
| 1865 | */ |
| 1866 | |
| 1867 | /* allocate a request */ |
| 1868 | rq = esas2r_alloc_request(a); |
| 1869 | if (rq == NULL) { |
| 1870 | esas2r_debug("esas2r_read_vda: out of requestss"); |
| 1871 | return -EBUSY; |
| 1872 | } |
| 1873 | |
| 1874 | rq->comp_cb = vda_complete_req; |
| 1875 | |
| 1876 | sgc.first_req = rq; |
| 1877 | sgc.adapter = a; |
| 1878 | sgc.cur_offset = a->vda_buffer + VDA_BUFFER_HEADER_SZ; |
| 1879 | sgc.get_phys_addr = (PGETPHYSADDR)get_physaddr_vda; |
| 1880 | |
| 1881 | a->vda_command_done = 0; |
| 1882 | |
| 1883 | wait_for_completion = |
| 1884 | esas2r_process_vda_ioctl(a, vi, rq, &sgc); |
| 1885 | |
| 1886 | if (wait_for_completion) { |
| 1887 | /* now wait around for it to complete. */ |
| 1888 | |
| 1889 | while (!a->vda_command_done) |
| 1890 | wait_event_interruptible(a->vda_waiter, |
| 1891 | a->vda_command_done); |
| 1892 | } |
| 1893 | |
| 1894 | esas2r_free_request(a, (struct esas2r_request *)rq); |
| 1895 | } |
| 1896 | |
| 1897 | if (off > VDA_MAX_BUFFER_SIZE) |
| 1898 | return 0; |
| 1899 | |
| 1900 | if (count + off > VDA_MAX_BUFFER_SIZE) |
| 1901 | count = VDA_MAX_BUFFER_SIZE - off; |
| 1902 | |
| 1903 | if (count < 0) |
| 1904 | return 0; |
| 1905 | |
| 1906 | memcpy(buf, a->vda_buffer + off, count); |
| 1907 | |
| 1908 | return count; |
| 1909 | } |
| 1910 | |
| 1911 | /* Handle a call to write a VDA command. */ |
| 1912 | int esas2r_write_vda(struct esas2r_adapter *a, const char *buf, long off, |
| 1913 | int count) |
| 1914 | { |
| 1915 | /* |
| 1916 | * allocate memory for it, if not already done. once allocated, |
| 1917 | * we will keep it around until the driver is unloaded. |
| 1918 | */ |
| 1919 | |
| 1920 | if (!a->vda_buffer) { |
| 1921 | dma_addr_t dma_addr; |
| 1922 | a->vda_buffer = (u8 *)dma_alloc_coherent(&a->pcid->dev, |
| 1923 | (size_t) |
| 1924 | VDA_MAX_BUFFER_SIZE, |
| 1925 | &dma_addr, |
| 1926 | GFP_KERNEL); |
| 1927 | |
| 1928 | a->ppvda_buffer = dma_addr; |
| 1929 | } |
| 1930 | |
| 1931 | if (!a->vda_buffer) |
| 1932 | return -ENOMEM; |
| 1933 | |
| 1934 | if (off > VDA_MAX_BUFFER_SIZE) |
| 1935 | return 0; |
| 1936 | |
| 1937 | if (count + off > VDA_MAX_BUFFER_SIZE) |
| 1938 | count = VDA_MAX_BUFFER_SIZE - off; |
| 1939 | |
| 1940 | if (count < 1) |
| 1941 | return 0; |
| 1942 | |
| 1943 | memcpy(a->vda_buffer + off, buf, count); |
| 1944 | |
| 1945 | return count; |
| 1946 | } |
| 1947 | |
| 1948 | /* Callback for the completion of an FS_API request.*/ |
| 1949 | static void fs_api_complete_req(struct esas2r_adapter *a, |
| 1950 | struct esas2r_request *rq) |
| 1951 | { |
| 1952 | a->fs_api_command_done = 1; |
| 1953 | |
| 1954 | wake_up_interruptible(&a->fs_api_waiter); |
| 1955 | } |
| 1956 | |
| 1957 | /* Scatter/gather callback for VDA requests */ |
| 1958 | static u32 get_physaddr_fs_api(struct esas2r_sg_context *sgc, u64 *addr) |
| 1959 | { |
| 1960 | struct esas2r_adapter *a = (struct esas2r_adapter *)sgc->adapter; |
| 1961 | struct esas2r_ioctl_fs *fs = |
| 1962 | (struct esas2r_ioctl_fs *)a->fs_api_buffer; |
| 1963 | u32 offset = (u8 *)sgc->cur_offset - (u8 *)fs; |
| 1964 | |
| 1965 | (*addr) = a->ppfs_api_buffer + offset; |
| 1966 | |
| 1967 | return a->fs_api_buffer_size - offset; |
| 1968 | } |
| 1969 | |
| 1970 | /* Handle a call to read firmware via FS_API. */ |
| 1971 | int esas2r_read_fs(struct esas2r_adapter *a, char *buf, long off, int count) |
| 1972 | { |
| 1973 | if (!a->fs_api_buffer) |
| 1974 | return -ENOMEM; |
| 1975 | |
| 1976 | if (off == 0) { |
| 1977 | struct esas2r_request *rq; |
| 1978 | struct esas2r_sg_context sgc; |
| 1979 | struct esas2r_ioctl_fs *fs = |
| 1980 | (struct esas2r_ioctl_fs *)a->fs_api_buffer; |
| 1981 | |
| 1982 | /* If another flash request is already in progress, return. */ |
| 1983 | if (down_interruptible(&a->fs_api_semaphore)) { |
| 1984 | busy: |
| 1985 | fs->status = ATTO_STS_OUT_OF_RSRC; |
| 1986 | return -EBUSY; |
| 1987 | } |
| 1988 | |
| 1989 | /* |
| 1990 | * Presumeably, someone has already written to the |
| 1991 | * fs_api_buffer, and now they are reading the node the |
| 1992 | * response, so now we will actually issue the request to the |
| 1993 | * chip and reply. Allocate a request |
| 1994 | */ |
| 1995 | |
| 1996 | rq = esas2r_alloc_request(a); |
| 1997 | if (rq == NULL) { |
| 1998 | esas2r_debug("esas2r_read_fs: out of requests"); |
| 1999 | up(&a->fs_api_semaphore); |
| 2000 | goto busy; |
| 2001 | } |
| 2002 | |
| 2003 | rq->comp_cb = fs_api_complete_req; |
| 2004 | |
| 2005 | /* Set up the SGCONTEXT for to build the s/g table */ |
| 2006 | |
| 2007 | sgc.cur_offset = fs->data; |
| 2008 | sgc.get_phys_addr = (PGETPHYSADDR)get_physaddr_fs_api; |
| 2009 | |
| 2010 | a->fs_api_command_done = 0; |
| 2011 | |
| 2012 | if (!esas2r_process_fs_ioctl(a, fs, rq, &sgc)) { |
| 2013 | if (fs->status == ATTO_STS_OUT_OF_RSRC) |
| 2014 | count = -EBUSY; |
| 2015 | |
| 2016 | goto dont_wait; |
| 2017 | } |
| 2018 | |
| 2019 | /* Now wait around for it to complete. */ |
| 2020 | |
| 2021 | while (!a->fs_api_command_done) |
| 2022 | wait_event_interruptible(a->fs_api_waiter, |
| 2023 | a->fs_api_command_done); |
| 2024 | ; |
| 2025 | dont_wait: |
| 2026 | /* Free the request and keep going */ |
| 2027 | up(&a->fs_api_semaphore); |
| 2028 | esas2r_free_request(a, (struct esas2r_request *)rq); |
| 2029 | |
| 2030 | /* Pick up possible error code from above */ |
| 2031 | if (count < 0) |
| 2032 | return count; |
| 2033 | } |
| 2034 | |
| 2035 | if (off > a->fs_api_buffer_size) |
| 2036 | return 0; |
| 2037 | |
| 2038 | if (count + off > a->fs_api_buffer_size) |
| 2039 | count = a->fs_api_buffer_size - off; |
| 2040 | |
| 2041 | if (count < 0) |
| 2042 | return 0; |
| 2043 | |
| 2044 | memcpy(buf, a->fs_api_buffer + off, count); |
| 2045 | |
| 2046 | return count; |
| 2047 | } |
| 2048 | |
| 2049 | /* Handle a call to write firmware via FS_API. */ |
| 2050 | int esas2r_write_fs(struct esas2r_adapter *a, const char *buf, long off, |
| 2051 | int count) |
| 2052 | { |
| 2053 | if (off == 0) { |
| 2054 | struct esas2r_ioctl_fs *fs = (struct esas2r_ioctl_fs *)buf; |
| 2055 | u32 length = fs->command.length + offsetof( |
| 2056 | struct esas2r_ioctl_fs, |
| 2057 | data); |
| 2058 | |
| 2059 | /* |
| 2060 | * Special case, for BEGIN commands, the length field |
| 2061 | * is lying to us, so just get enough for the header. |
| 2062 | */ |
| 2063 | |
| 2064 | if (fs->command.command == ESAS2R_FS_CMD_BEGINW) |
| 2065 | length = offsetof(struct esas2r_ioctl_fs, data); |
| 2066 | |
| 2067 | /* |
| 2068 | * Beginning a command. We assume we'll get at least |
| 2069 | * enough in the first write so we can look at the |
| 2070 | * header and see how much we need to alloc. |
| 2071 | */ |
| 2072 | |
| 2073 | if (count < offsetof(struct esas2r_ioctl_fs, data)) |
| 2074 | return -EINVAL; |
| 2075 | |
| 2076 | /* Allocate a buffer or use the existing buffer. */ |
| 2077 | if (a->fs_api_buffer) { |
| 2078 | if (a->fs_api_buffer_size < length) { |
| 2079 | /* Free too-small buffer and get a new one */ |
| 2080 | dma_free_coherent(&a->pcid->dev, |
| 2081 | (size_t)a->fs_api_buffer_size, |
| 2082 | a->fs_api_buffer, |
| 2083 | (dma_addr_t)a->ppfs_api_buffer); |
| 2084 | |
| 2085 | goto re_allocate_buffer; |
| 2086 | } |
| 2087 | } else { |
| 2088 | re_allocate_buffer: |
| 2089 | a->fs_api_buffer_size = length; |
| 2090 | |
| 2091 | a->fs_api_buffer = (u8 *)dma_alloc_coherent( |
| 2092 | &a->pcid->dev, |
| 2093 | (size_t)a->fs_api_buffer_size, |
| 2094 | (dma_addr_t *)&a->ppfs_api_buffer, |
| 2095 | GFP_KERNEL); |
| 2096 | } |
| 2097 | } |
| 2098 | |
| 2099 | if (!a->fs_api_buffer) |
| 2100 | return -ENOMEM; |
| 2101 | |
| 2102 | if (off > a->fs_api_buffer_size) |
| 2103 | return 0; |
| 2104 | |
| 2105 | if (count + off > a->fs_api_buffer_size) |
| 2106 | count = a->fs_api_buffer_size - off; |
| 2107 | |
| 2108 | if (count < 1) |
| 2109 | return 0; |
| 2110 | |
| 2111 | memcpy(a->fs_api_buffer + off, buf, count); |
| 2112 | |
| 2113 | return count; |
| 2114 | } |