FUJITA Tomonori | 0e5d030 | 2006-12-01 03:00:54 +0900 | [diff] [blame] | 1 | /* |
| 2 | * IBM eServer i/pSeries Virtual SCSI Target Driver |
| 3 | * Copyright (C) 2003-2005 Dave Boutcher (boutcher@us.ibm.com) IBM Corp. |
| 4 | * Santiago Leon (santil@us.ibm.com) IBM Corp. |
| 5 | * Linda Xie (lxie@us.ibm.com) IBM Corp. |
| 6 | * |
| 7 | * Copyright (C) 2005-2006 FUJITA Tomonori <tomof@acm.org> |
| 8 | * |
| 9 | * This program is free software; you can redistribute it and/or modify |
| 10 | * it under the terms of the GNU General Public License as published by |
| 11 | * the Free Software Foundation; either version 2 of the License, or |
| 12 | * (at your option) any later version. |
| 13 | * |
| 14 | * This program is distributed in the hope that it will be useful, |
| 15 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 17 | * GNU General Public License for more details. |
| 18 | * |
| 19 | * You should have received a copy of the GNU General Public License |
| 20 | * along with this program; if not, write to the Free Software |
| 21 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 |
| 22 | * USA |
| 23 | */ |
| 24 | #include <linux/interrupt.h> |
| 25 | #include <linux/module.h> |
| 26 | #include <scsi/scsi.h> |
| 27 | #include <scsi/scsi_host.h> |
| 28 | #include <scsi/scsi_tgt.h> |
| 29 | #include <scsi/libsrp.h> |
| 30 | #include <asm/hvcall.h> |
| 31 | #include <asm/iommu.h> |
| 32 | #include <asm/prom.h> |
| 33 | #include <asm/vio.h> |
| 34 | |
| 35 | #include "ibmvscsi.h" |
| 36 | |
| 37 | #define INITIAL_SRP_LIMIT 16 |
| 38 | #define DEFAULT_MAX_SECTORS 512 |
| 39 | |
| 40 | #define TGT_NAME "ibmvstgt" |
| 41 | |
| 42 | /* |
| 43 | * Hypervisor calls. |
| 44 | */ |
| 45 | #define h_copy_rdma(l, sa, sb, da, db) \ |
| 46 | plpar_hcall_norets(H_COPY_RDMA, l, sa, sb, da, db) |
| 47 | #define h_send_crq(ua, l, h) \ |
| 48 | plpar_hcall_norets(H_SEND_CRQ, ua, l, h) |
| 49 | #define h_reg_crq(ua, tok, sz)\ |
| 50 | plpar_hcall_norets(H_REG_CRQ, ua, tok, sz); |
| 51 | #define h_free_crq(ua) \ |
| 52 | plpar_hcall_norets(H_FREE_CRQ, ua); |
| 53 | |
| 54 | /* tmp - will replace with SCSI logging stuff */ |
| 55 | #define eprintk(fmt, args...) \ |
| 56 | do { \ |
| 57 | printk("%s(%d) " fmt, __FUNCTION__, __LINE__, ##args); \ |
| 58 | } while (0) |
| 59 | /* #define dprintk eprintk */ |
| 60 | #define dprintk(fmt, args...) |
| 61 | |
| 62 | struct vio_port { |
| 63 | struct vio_dev *dma_dev; |
| 64 | |
| 65 | struct crq_queue crq_queue; |
| 66 | struct work_struct crq_work; |
| 67 | |
| 68 | unsigned long liobn; |
| 69 | unsigned long riobn; |
Al Viro | 2fdb611 | 2006-12-06 21:15:22 +0000 | [diff] [blame] | 70 | struct srp_target *target; |
FUJITA Tomonori | 0e5d030 | 2006-12-01 03:00:54 +0900 | [diff] [blame] | 71 | }; |
| 72 | |
| 73 | static struct workqueue_struct *vtgtd; |
| 74 | |
| 75 | /* |
| 76 | * These are fixed for the system and come from the Open Firmware device tree. |
| 77 | * We just store them here to save getting them every time. |
| 78 | */ |
| 79 | static char system_id[64] = ""; |
| 80 | static char partition_name[97] = "UNKNOWN"; |
| 81 | static unsigned int partition_number = -1; |
| 82 | |
| 83 | static struct vio_port *target_to_port(struct srp_target *target) |
| 84 | { |
| 85 | return (struct vio_port *) target->ldata; |
| 86 | } |
| 87 | |
| 88 | static inline union viosrp_iu *vio_iu(struct iu_entry *iue) |
| 89 | { |
| 90 | return (union viosrp_iu *) (iue->sbuf->buf); |
| 91 | } |
| 92 | |
| 93 | static int send_iu(struct iu_entry *iue, uint64_t length, uint8_t format) |
| 94 | { |
| 95 | struct srp_target *target = iue->target; |
| 96 | struct vio_port *vport = target_to_port(target); |
| 97 | long rc, rc1; |
| 98 | union { |
| 99 | struct viosrp_crq cooked; |
| 100 | uint64_t raw[2]; |
| 101 | } crq; |
| 102 | |
| 103 | /* First copy the SRP */ |
| 104 | rc = h_copy_rdma(length, vport->liobn, iue->sbuf->dma, |
| 105 | vport->riobn, iue->remote_token); |
| 106 | |
| 107 | if (rc) |
| 108 | eprintk("Error %ld transferring data\n", rc); |
| 109 | |
| 110 | crq.cooked.valid = 0x80; |
| 111 | crq.cooked.format = format; |
| 112 | crq.cooked.reserved = 0x00; |
| 113 | crq.cooked.timeout = 0x00; |
| 114 | crq.cooked.IU_length = length; |
| 115 | crq.cooked.IU_data_ptr = vio_iu(iue)->srp.rsp.tag; |
| 116 | |
| 117 | if (rc == 0) |
| 118 | crq.cooked.status = 0x99; /* Just needs to be non-zero */ |
| 119 | else |
| 120 | crq.cooked.status = 0x00; |
| 121 | |
| 122 | rc1 = h_send_crq(vport->dma_dev->unit_address, crq.raw[0], crq.raw[1]); |
| 123 | |
| 124 | if (rc1) { |
| 125 | eprintk("%ld sending response\n", rc1); |
| 126 | return rc1; |
| 127 | } |
| 128 | |
| 129 | return rc; |
| 130 | } |
| 131 | |
| 132 | #define SRP_RSP_SENSE_DATA_LEN 18 |
| 133 | |
| 134 | static int send_rsp(struct iu_entry *iue, struct scsi_cmnd *sc, |
| 135 | unsigned char status, unsigned char asc) |
| 136 | { |
| 137 | union viosrp_iu *iu = vio_iu(iue); |
| 138 | uint64_t tag = iu->srp.rsp.tag; |
| 139 | |
| 140 | /* If the linked bit is on and status is good */ |
| 141 | if (test_bit(V_LINKED, &iue->flags) && (status == NO_SENSE)) |
| 142 | status = 0x10; |
| 143 | |
| 144 | memset(iu, 0, sizeof(struct srp_rsp)); |
| 145 | iu->srp.rsp.opcode = SRP_RSP; |
| 146 | iu->srp.rsp.req_lim_delta = 1; |
| 147 | iu->srp.rsp.tag = tag; |
| 148 | |
| 149 | if (test_bit(V_DIOVER, &iue->flags)) |
| 150 | iu->srp.rsp.flags |= SRP_RSP_FLAG_DIOVER; |
| 151 | |
| 152 | iu->srp.rsp.data_in_res_cnt = 0; |
| 153 | iu->srp.rsp.data_out_res_cnt = 0; |
| 154 | |
| 155 | iu->srp.rsp.flags &= ~SRP_RSP_FLAG_RSPVALID; |
| 156 | |
| 157 | iu->srp.rsp.resp_data_len = 0; |
| 158 | iu->srp.rsp.status = status; |
| 159 | if (status) { |
| 160 | uint8_t *sense = iu->srp.rsp.data; |
| 161 | |
| 162 | if (sc) { |
| 163 | iu->srp.rsp.flags |= SRP_RSP_FLAG_SNSVALID; |
| 164 | iu->srp.rsp.sense_data_len = SCSI_SENSE_BUFFERSIZE; |
| 165 | memcpy(sense, sc->sense_buffer, SCSI_SENSE_BUFFERSIZE); |
| 166 | } else { |
| 167 | iu->srp.rsp.status = SAM_STAT_CHECK_CONDITION; |
| 168 | iu->srp.rsp.flags |= SRP_RSP_FLAG_SNSVALID; |
| 169 | iu->srp.rsp.sense_data_len = SRP_RSP_SENSE_DATA_LEN; |
| 170 | |
| 171 | /* Valid bit and 'current errors' */ |
| 172 | sense[0] = (0x1 << 7 | 0x70); |
| 173 | /* Sense key */ |
| 174 | sense[2] = status; |
| 175 | /* Additional sense length */ |
| 176 | sense[7] = 0xa; /* 10 bytes */ |
| 177 | /* Additional sense code */ |
| 178 | sense[12] = asc; |
| 179 | } |
| 180 | } |
| 181 | |
| 182 | send_iu(iue, sizeof(iu->srp.rsp) + SRP_RSP_SENSE_DATA_LEN, |
| 183 | VIOSRP_SRP_FORMAT); |
| 184 | |
| 185 | return 0; |
| 186 | } |
| 187 | |
| 188 | static void handle_cmd_queue(struct srp_target *target) |
| 189 | { |
| 190 | struct Scsi_Host *shost = target->shost; |
| 191 | struct iu_entry *iue; |
| 192 | struct srp_cmd *cmd; |
| 193 | unsigned long flags; |
| 194 | int err; |
| 195 | |
| 196 | retry: |
| 197 | spin_lock_irqsave(&target->lock, flags); |
| 198 | |
| 199 | list_for_each_entry(iue, &target->cmd_queue, ilist) { |
| 200 | if (!test_and_set_bit(V_FLYING, &iue->flags)) { |
| 201 | spin_unlock_irqrestore(&target->lock, flags); |
| 202 | cmd = iue->sbuf->buf; |
| 203 | err = srp_cmd_queue(shost, cmd, iue, 0); |
| 204 | if (err) { |
| 205 | eprintk("cannot queue cmd %p %d\n", cmd, err); |
| 206 | srp_iu_put(iue); |
| 207 | } |
| 208 | goto retry; |
| 209 | } |
| 210 | } |
| 211 | |
| 212 | spin_unlock_irqrestore(&target->lock, flags); |
| 213 | } |
| 214 | |
| 215 | static int ibmvstgt_rdma(struct scsi_cmnd *sc, struct scatterlist *sg, int nsg, |
| 216 | struct srp_direct_buf *md, int nmd, |
| 217 | enum dma_data_direction dir, unsigned int rest) |
| 218 | { |
| 219 | struct iu_entry *iue = (struct iu_entry *) sc->SCp.ptr; |
| 220 | struct srp_target *target = iue->target; |
| 221 | struct vio_port *vport = target_to_port(target); |
| 222 | dma_addr_t token; |
| 223 | long err; |
| 224 | unsigned int done = 0; |
| 225 | int i, sidx, soff; |
| 226 | |
| 227 | sidx = soff = 0; |
| 228 | token = sg_dma_address(sg + sidx); |
| 229 | |
| 230 | for (i = 0; i < nmd && rest; i++) { |
| 231 | unsigned int mdone, mlen; |
| 232 | |
| 233 | mlen = min(rest, md[i].len); |
| 234 | for (mdone = 0; mlen;) { |
| 235 | int slen = min(sg_dma_len(sg + sidx) - soff, mlen); |
| 236 | |
| 237 | if (dir == DMA_TO_DEVICE) |
| 238 | err = h_copy_rdma(slen, |
| 239 | vport->riobn, |
| 240 | md[i].va + mdone, |
| 241 | vport->liobn, |
| 242 | token + soff); |
| 243 | else |
| 244 | err = h_copy_rdma(slen, |
| 245 | vport->liobn, |
| 246 | token + soff, |
| 247 | vport->riobn, |
| 248 | md[i].va + mdone); |
| 249 | |
| 250 | if (err != H_SUCCESS) { |
| 251 | eprintk("rdma error %d %d\n", dir, slen); |
| 252 | goto out; |
| 253 | } |
| 254 | |
| 255 | mlen -= slen; |
| 256 | mdone += slen; |
| 257 | soff += slen; |
| 258 | done += slen; |
| 259 | |
| 260 | if (soff == sg_dma_len(sg + sidx)) { |
| 261 | sidx++; |
| 262 | soff = 0; |
| 263 | token = sg_dma_address(sg + sidx); |
| 264 | |
| 265 | if (sidx > nsg) { |
| 266 | eprintk("out of sg %p %d %d\n", |
| 267 | iue, sidx, nsg); |
| 268 | goto out; |
| 269 | } |
| 270 | } |
| 271 | }; |
| 272 | |
| 273 | rest -= mlen; |
| 274 | } |
| 275 | out: |
| 276 | |
| 277 | return 0; |
| 278 | } |
| 279 | |
| 280 | static int ibmvstgt_transfer_data(struct scsi_cmnd *sc, |
| 281 | void (*done)(struct scsi_cmnd *)) |
| 282 | { |
| 283 | struct iu_entry *iue = (struct iu_entry *) sc->SCp.ptr; |
| 284 | int err; |
| 285 | |
| 286 | err = srp_transfer_data(sc, &vio_iu(iue)->srp.cmd, ibmvstgt_rdma, 1, 1); |
| 287 | |
| 288 | done(sc); |
| 289 | |
| 290 | return err; |
| 291 | } |
| 292 | |
| 293 | static int ibmvstgt_cmd_done(struct scsi_cmnd *sc, |
| 294 | void (*done)(struct scsi_cmnd *)) |
| 295 | { |
| 296 | unsigned long flags; |
| 297 | struct iu_entry *iue = (struct iu_entry *) sc->SCp.ptr; |
| 298 | struct srp_target *target = iue->target; |
| 299 | |
| 300 | dprintk("%p %p %x\n", iue, target, vio_iu(iue)->srp.cmd.cdb[0]); |
| 301 | |
| 302 | spin_lock_irqsave(&target->lock, flags); |
| 303 | list_del(&iue->ilist); |
| 304 | spin_unlock_irqrestore(&target->lock, flags); |
| 305 | |
| 306 | if (sc->result != SAM_STAT_GOOD) { |
| 307 | eprintk("operation failed %p %d %x\n", |
| 308 | iue, sc->result, vio_iu(iue)->srp.cmd.cdb[0]); |
| 309 | send_rsp(iue, sc, HARDWARE_ERROR, 0x00); |
| 310 | } else |
| 311 | send_rsp(iue, sc, NO_SENSE, 0x00); |
| 312 | |
| 313 | done(sc); |
| 314 | srp_iu_put(iue); |
| 315 | return 0; |
| 316 | } |
| 317 | |
| 318 | int send_adapter_info(struct iu_entry *iue, |
| 319 | dma_addr_t remote_buffer, uint16_t length) |
| 320 | { |
| 321 | struct srp_target *target = iue->target; |
| 322 | struct vio_port *vport = target_to_port(target); |
| 323 | struct Scsi_Host *shost = target->shost; |
| 324 | dma_addr_t data_token; |
| 325 | struct mad_adapter_info_data *info; |
| 326 | int err; |
| 327 | |
| 328 | info = dma_alloc_coherent(target->dev, sizeof(*info), &data_token, |
| 329 | GFP_KERNEL); |
| 330 | if (!info) { |
| 331 | eprintk("bad dma_alloc_coherent %p\n", target); |
| 332 | return 1; |
| 333 | } |
| 334 | |
| 335 | /* Get remote info */ |
| 336 | err = h_copy_rdma(sizeof(*info), vport->riobn, remote_buffer, |
| 337 | vport->liobn, data_token); |
| 338 | if (err == H_SUCCESS) { |
| 339 | dprintk("Client connect: %s (%d)\n", |
| 340 | info->partition_name, info->partition_number); |
| 341 | } |
| 342 | |
| 343 | memset(info, 0, sizeof(*info)); |
| 344 | |
| 345 | strcpy(info->srp_version, "16.a"); |
| 346 | strncpy(info->partition_name, partition_name, |
| 347 | sizeof(info->partition_name)); |
| 348 | info->partition_number = partition_number; |
| 349 | info->mad_version = 1; |
| 350 | info->os_type = 2; |
| 351 | info->port_max_txu[0] = shost->hostt->max_sectors << 9; |
| 352 | |
| 353 | /* Send our info to remote */ |
| 354 | err = h_copy_rdma(sizeof(*info), vport->liobn, data_token, |
| 355 | vport->riobn, remote_buffer); |
| 356 | |
| 357 | dma_free_coherent(target->dev, sizeof(*info), info, data_token); |
| 358 | |
| 359 | if (err != H_SUCCESS) { |
| 360 | eprintk("Error sending adapter info %d\n", err); |
| 361 | return 1; |
| 362 | } |
| 363 | |
| 364 | return 0; |
| 365 | } |
| 366 | |
| 367 | static void process_login(struct iu_entry *iue) |
| 368 | { |
| 369 | union viosrp_iu *iu = vio_iu(iue); |
| 370 | struct srp_login_rsp *rsp = &iu->srp.login_rsp; |
| 371 | uint64_t tag = iu->srp.rsp.tag; |
| 372 | |
| 373 | /* TODO handle case that requested size is wrong and |
| 374 | * buffer format is wrong |
| 375 | */ |
| 376 | memset(iu, 0, sizeof(struct srp_login_rsp)); |
| 377 | rsp->opcode = SRP_LOGIN_RSP; |
| 378 | rsp->req_lim_delta = INITIAL_SRP_LIMIT; |
| 379 | rsp->tag = tag; |
| 380 | rsp->max_it_iu_len = sizeof(union srp_iu); |
| 381 | rsp->max_ti_iu_len = sizeof(union srp_iu); |
| 382 | /* direct and indirect */ |
| 383 | rsp->buf_fmt = SRP_BUF_FORMAT_DIRECT | SRP_BUF_FORMAT_INDIRECT; |
| 384 | |
| 385 | send_iu(iue, sizeof(*rsp), VIOSRP_SRP_FORMAT); |
| 386 | } |
| 387 | |
| 388 | static inline void queue_cmd(struct iu_entry *iue) |
| 389 | { |
| 390 | struct srp_target *target = iue->target; |
| 391 | unsigned long flags; |
| 392 | |
| 393 | spin_lock_irqsave(&target->lock, flags); |
| 394 | list_add_tail(&iue->ilist, &target->cmd_queue); |
| 395 | spin_unlock_irqrestore(&target->lock, flags); |
| 396 | } |
| 397 | |
| 398 | static int process_tsk_mgmt(struct iu_entry *iue) |
| 399 | { |
| 400 | union viosrp_iu *iu = vio_iu(iue); |
| 401 | int fn; |
| 402 | |
| 403 | dprintk("%p %u\n", iue, iu->srp.tsk_mgmt.tsk_mgmt_func); |
| 404 | |
| 405 | switch (iu->srp.tsk_mgmt.tsk_mgmt_func) { |
| 406 | case SRP_TSK_ABORT_TASK: |
| 407 | fn = ABORT_TASK; |
| 408 | break; |
| 409 | case SRP_TSK_ABORT_TASK_SET: |
| 410 | fn = ABORT_TASK_SET; |
| 411 | break; |
| 412 | case SRP_TSK_CLEAR_TASK_SET: |
| 413 | fn = CLEAR_TASK_SET; |
| 414 | break; |
| 415 | case SRP_TSK_LUN_RESET: |
| 416 | fn = LOGICAL_UNIT_RESET; |
| 417 | break; |
| 418 | case SRP_TSK_CLEAR_ACA: |
| 419 | fn = CLEAR_ACA; |
| 420 | break; |
| 421 | default: |
| 422 | fn = 0; |
| 423 | } |
| 424 | if (fn) |
| 425 | scsi_tgt_tsk_mgmt_request(iue->target->shost, fn, |
| 426 | iu->srp.tsk_mgmt.task_tag, |
| 427 | (struct scsi_lun *) &iu->srp.tsk_mgmt.lun, |
| 428 | iue); |
| 429 | else |
| 430 | send_rsp(iue, NULL, ILLEGAL_REQUEST, 0x20); |
| 431 | |
| 432 | return !fn; |
| 433 | } |
| 434 | |
| 435 | static int process_mad_iu(struct iu_entry *iue) |
| 436 | { |
| 437 | union viosrp_iu *iu = vio_iu(iue); |
| 438 | struct viosrp_adapter_info *info; |
| 439 | struct viosrp_host_config *conf; |
| 440 | |
| 441 | switch (iu->mad.empty_iu.common.type) { |
| 442 | case VIOSRP_EMPTY_IU_TYPE: |
| 443 | eprintk("%s\n", "Unsupported EMPTY MAD IU"); |
| 444 | break; |
| 445 | case VIOSRP_ERROR_LOG_TYPE: |
| 446 | eprintk("%s\n", "Unsupported ERROR LOG MAD IU"); |
| 447 | iu->mad.error_log.common.status = 1; |
| 448 | send_iu(iue, sizeof(iu->mad.error_log), VIOSRP_MAD_FORMAT); |
| 449 | break; |
| 450 | case VIOSRP_ADAPTER_INFO_TYPE: |
| 451 | info = &iu->mad.adapter_info; |
| 452 | info->common.status = send_adapter_info(iue, info->buffer, |
| 453 | info->common.length); |
| 454 | send_iu(iue, sizeof(*info), VIOSRP_MAD_FORMAT); |
| 455 | break; |
| 456 | case VIOSRP_HOST_CONFIG_TYPE: |
| 457 | conf = &iu->mad.host_config; |
| 458 | conf->common.status = 1; |
| 459 | send_iu(iue, sizeof(*conf), VIOSRP_MAD_FORMAT); |
| 460 | break; |
| 461 | default: |
| 462 | eprintk("Unknown type %u\n", iu->srp.rsp.opcode); |
| 463 | } |
| 464 | |
| 465 | return 1; |
| 466 | } |
| 467 | |
| 468 | static int process_srp_iu(struct iu_entry *iue) |
| 469 | { |
| 470 | union viosrp_iu *iu = vio_iu(iue); |
| 471 | int done = 1; |
| 472 | u8 opcode = iu->srp.rsp.opcode; |
| 473 | |
| 474 | switch (opcode) { |
| 475 | case SRP_LOGIN_REQ: |
| 476 | process_login(iue); |
| 477 | break; |
| 478 | case SRP_TSK_MGMT: |
| 479 | done = process_tsk_mgmt(iue); |
| 480 | break; |
| 481 | case SRP_CMD: |
| 482 | queue_cmd(iue); |
| 483 | done = 0; |
| 484 | break; |
| 485 | case SRP_LOGIN_RSP: |
| 486 | case SRP_I_LOGOUT: |
| 487 | case SRP_T_LOGOUT: |
| 488 | case SRP_RSP: |
| 489 | case SRP_CRED_REQ: |
| 490 | case SRP_CRED_RSP: |
| 491 | case SRP_AER_REQ: |
| 492 | case SRP_AER_RSP: |
| 493 | eprintk("Unsupported type %u\n", opcode); |
| 494 | break; |
| 495 | default: |
| 496 | eprintk("Unknown type %u\n", opcode); |
| 497 | } |
| 498 | |
| 499 | return done; |
| 500 | } |
| 501 | |
| 502 | static void process_iu(struct viosrp_crq *crq, struct srp_target *target) |
| 503 | { |
| 504 | struct vio_port *vport = target_to_port(target); |
| 505 | struct iu_entry *iue; |
| 506 | long err, done; |
| 507 | |
| 508 | iue = srp_iu_get(target); |
| 509 | if (!iue) { |
| 510 | eprintk("Error getting IU from pool, %p\n", target); |
| 511 | return; |
| 512 | } |
| 513 | |
| 514 | iue->remote_token = crq->IU_data_ptr; |
| 515 | |
| 516 | err = h_copy_rdma(crq->IU_length, vport->riobn, |
| 517 | iue->remote_token, vport->liobn, iue->sbuf->dma); |
| 518 | |
| 519 | if (err != H_SUCCESS) { |
| 520 | eprintk("%ld transferring data error %p\n", err, iue); |
| 521 | done = 1; |
| 522 | goto out; |
| 523 | } |
| 524 | |
| 525 | if (crq->format == VIOSRP_MAD_FORMAT) |
| 526 | done = process_mad_iu(iue); |
| 527 | else |
| 528 | done = process_srp_iu(iue); |
| 529 | out: |
| 530 | if (done) |
| 531 | srp_iu_put(iue); |
| 532 | } |
| 533 | |
| 534 | static irqreturn_t ibmvstgt_interrupt(int irq, void *data) |
| 535 | { |
| 536 | struct srp_target *target = (struct srp_target *) data; |
| 537 | struct vio_port *vport = target_to_port(target); |
| 538 | |
| 539 | vio_disable_interrupts(vport->dma_dev); |
| 540 | queue_work(vtgtd, &vport->crq_work); |
| 541 | |
| 542 | return IRQ_HANDLED; |
| 543 | } |
| 544 | |
| 545 | static int crq_queue_create(struct crq_queue *queue, struct srp_target *target) |
| 546 | { |
| 547 | int err; |
| 548 | struct vio_port *vport = target_to_port(target); |
| 549 | |
| 550 | queue->msgs = (struct viosrp_crq *) get_zeroed_page(GFP_KERNEL); |
| 551 | if (!queue->msgs) |
| 552 | goto malloc_failed; |
| 553 | queue->size = PAGE_SIZE / sizeof(*queue->msgs); |
| 554 | |
| 555 | queue->msg_token = dma_map_single(target->dev, queue->msgs, |
| 556 | queue->size * sizeof(*queue->msgs), |
| 557 | DMA_BIDIRECTIONAL); |
| 558 | |
| 559 | if (dma_mapping_error(queue->msg_token)) |
| 560 | goto map_failed; |
| 561 | |
| 562 | err = h_reg_crq(vport->dma_dev->unit_address, queue->msg_token, |
| 563 | PAGE_SIZE); |
| 564 | |
| 565 | /* If the adapter was left active for some reason (like kexec) |
| 566 | * try freeing and re-registering |
| 567 | */ |
| 568 | if (err == H_RESOURCE) { |
| 569 | do { |
| 570 | err = h_free_crq(vport->dma_dev->unit_address); |
| 571 | } while (err == H_BUSY || H_IS_LONG_BUSY(err)); |
| 572 | |
| 573 | err = h_reg_crq(vport->dma_dev->unit_address, queue->msg_token, |
| 574 | PAGE_SIZE); |
| 575 | } |
| 576 | |
| 577 | if (err != H_SUCCESS && err != 2) { |
| 578 | eprintk("Error 0x%x opening virtual adapter\n", err); |
| 579 | goto reg_crq_failed; |
| 580 | } |
| 581 | |
| 582 | err = request_irq(vport->dma_dev->irq, &ibmvstgt_interrupt, |
| 583 | SA_INTERRUPT, "ibmvstgt", target); |
| 584 | if (err) |
| 585 | goto req_irq_failed; |
| 586 | |
| 587 | vio_enable_interrupts(vport->dma_dev); |
| 588 | |
| 589 | h_send_crq(vport->dma_dev->unit_address, 0xC001000000000000, 0); |
| 590 | |
| 591 | queue->cur = 0; |
| 592 | spin_lock_init(&queue->lock); |
| 593 | |
| 594 | return 0; |
| 595 | |
| 596 | req_irq_failed: |
| 597 | do { |
| 598 | err = h_free_crq(vport->dma_dev->unit_address); |
| 599 | } while (err == H_BUSY || H_IS_LONG_BUSY(err)); |
| 600 | |
| 601 | reg_crq_failed: |
| 602 | dma_unmap_single(target->dev, queue->msg_token, |
| 603 | queue->size * sizeof(*queue->msgs), DMA_BIDIRECTIONAL); |
| 604 | map_failed: |
| 605 | free_page((unsigned long) queue->msgs); |
| 606 | |
| 607 | malloc_failed: |
| 608 | return -ENOMEM; |
| 609 | } |
| 610 | |
| 611 | static void crq_queue_destroy(struct srp_target *target) |
| 612 | { |
| 613 | struct vio_port *vport = target_to_port(target); |
| 614 | struct crq_queue *queue = &vport->crq_queue; |
| 615 | int err; |
| 616 | |
| 617 | free_irq(vport->dma_dev->irq, target); |
| 618 | do { |
| 619 | err = h_free_crq(vport->dma_dev->unit_address); |
| 620 | } while (err == H_BUSY || H_IS_LONG_BUSY(err)); |
| 621 | |
| 622 | dma_unmap_single(target->dev, queue->msg_token, |
| 623 | queue->size * sizeof(*queue->msgs), DMA_BIDIRECTIONAL); |
| 624 | |
| 625 | free_page((unsigned long) queue->msgs); |
| 626 | } |
| 627 | |
| 628 | static void process_crq(struct viosrp_crq *crq, struct srp_target *target) |
| 629 | { |
| 630 | struct vio_port *vport = target_to_port(target); |
| 631 | dprintk("%x %x\n", crq->valid, crq->format); |
| 632 | |
| 633 | switch (crq->valid) { |
| 634 | case 0xC0: |
| 635 | /* initialization */ |
| 636 | switch (crq->format) { |
| 637 | case 0x01: |
| 638 | h_send_crq(vport->dma_dev->unit_address, |
| 639 | 0xC002000000000000, 0); |
| 640 | break; |
| 641 | case 0x02: |
| 642 | break; |
| 643 | default: |
| 644 | eprintk("Unknown format %u\n", crq->format); |
| 645 | } |
| 646 | break; |
| 647 | case 0xFF: |
| 648 | /* transport event */ |
| 649 | break; |
| 650 | case 0x80: |
| 651 | /* real payload */ |
| 652 | switch (crq->format) { |
| 653 | case VIOSRP_SRP_FORMAT: |
| 654 | case VIOSRP_MAD_FORMAT: |
| 655 | process_iu(crq, target); |
| 656 | break; |
| 657 | case VIOSRP_OS400_FORMAT: |
| 658 | case VIOSRP_AIX_FORMAT: |
| 659 | case VIOSRP_LINUX_FORMAT: |
| 660 | case VIOSRP_INLINE_FORMAT: |
| 661 | eprintk("Unsupported format %u\n", crq->format); |
| 662 | break; |
| 663 | default: |
| 664 | eprintk("Unknown format %u\n", crq->format); |
| 665 | } |
| 666 | break; |
| 667 | default: |
| 668 | eprintk("unknown message type 0x%02x!?\n", crq->valid); |
| 669 | } |
| 670 | } |
| 671 | |
| 672 | static inline struct viosrp_crq *next_crq(struct crq_queue *queue) |
| 673 | { |
| 674 | struct viosrp_crq *crq; |
| 675 | unsigned long flags; |
| 676 | |
| 677 | spin_lock_irqsave(&queue->lock, flags); |
| 678 | crq = &queue->msgs[queue->cur]; |
| 679 | if (crq->valid & 0x80) { |
| 680 | if (++queue->cur == queue->size) |
| 681 | queue->cur = 0; |
| 682 | } else |
| 683 | crq = NULL; |
| 684 | spin_unlock_irqrestore(&queue->lock, flags); |
| 685 | |
| 686 | return crq; |
| 687 | } |
| 688 | |
Al Viro | 2fdb611 | 2006-12-06 21:15:22 +0000 | [diff] [blame] | 689 | static void handle_crq(struct work_struct *work) |
FUJITA Tomonori | 0e5d030 | 2006-12-01 03:00:54 +0900 | [diff] [blame] | 690 | { |
Al Viro | 2fdb611 | 2006-12-06 21:15:22 +0000 | [diff] [blame] | 691 | struct vio_port *vport = container_of(work, struct vio_port, crq_work); |
| 692 | struct srp_target *target = vport->target; |
FUJITA Tomonori | 0e5d030 | 2006-12-01 03:00:54 +0900 | [diff] [blame] | 693 | struct viosrp_crq *crq; |
| 694 | int done = 0; |
| 695 | |
| 696 | while (!done) { |
| 697 | while ((crq = next_crq(&vport->crq_queue)) != NULL) { |
| 698 | process_crq(crq, target); |
| 699 | crq->valid = 0x00; |
| 700 | } |
| 701 | |
| 702 | vio_enable_interrupts(vport->dma_dev); |
| 703 | |
| 704 | crq = next_crq(&vport->crq_queue); |
| 705 | if (crq) { |
| 706 | vio_disable_interrupts(vport->dma_dev); |
| 707 | process_crq(crq, target); |
| 708 | crq->valid = 0x00; |
| 709 | } else |
| 710 | done = 1; |
| 711 | } |
| 712 | |
| 713 | handle_cmd_queue(target); |
| 714 | } |
| 715 | |
| 716 | |
| 717 | static int ibmvstgt_eh_abort_handler(struct scsi_cmnd *sc) |
| 718 | { |
| 719 | unsigned long flags; |
| 720 | struct iu_entry *iue = (struct iu_entry *) sc->SCp.ptr; |
| 721 | struct srp_target *target = iue->target; |
| 722 | |
| 723 | dprintk("%p %p %x\n", iue, target, vio_iu(iue)->srp.cmd.cdb[0]); |
| 724 | |
| 725 | spin_lock_irqsave(&target->lock, flags); |
| 726 | list_del(&iue->ilist); |
| 727 | spin_unlock_irqrestore(&target->lock, flags); |
| 728 | |
| 729 | srp_iu_put(iue); |
| 730 | |
| 731 | return 0; |
| 732 | } |
| 733 | |
| 734 | static int ibmvstgt_tsk_mgmt_response(u64 mid, int result) |
| 735 | { |
| 736 | struct iu_entry *iue = (struct iu_entry *) ((void *) mid); |
| 737 | union viosrp_iu *iu = vio_iu(iue); |
| 738 | unsigned char status, asc; |
| 739 | |
| 740 | eprintk("%p %d\n", iue, result); |
| 741 | status = NO_SENSE; |
| 742 | asc = 0; |
| 743 | |
| 744 | switch (iu->srp.tsk_mgmt.tsk_mgmt_func) { |
| 745 | case SRP_TSK_ABORT_TASK: |
| 746 | asc = 0x14; |
| 747 | if (result) |
| 748 | status = ABORTED_COMMAND; |
| 749 | break; |
| 750 | default: |
| 751 | break; |
| 752 | } |
| 753 | |
| 754 | send_rsp(iue, NULL, status, asc); |
| 755 | srp_iu_put(iue); |
| 756 | |
| 757 | return 0; |
| 758 | } |
| 759 | |
| 760 | static ssize_t system_id_show(struct class_device *cdev, char *buf) |
| 761 | { |
| 762 | return snprintf(buf, PAGE_SIZE, "%s\n", system_id); |
| 763 | } |
| 764 | |
| 765 | static ssize_t partition_number_show(struct class_device *cdev, char *buf) |
| 766 | { |
| 767 | return snprintf(buf, PAGE_SIZE, "%x\n", partition_number); |
| 768 | } |
| 769 | |
| 770 | static ssize_t unit_address_show(struct class_device *cdev, char *buf) |
| 771 | { |
| 772 | struct Scsi_Host *shost = class_to_shost(cdev); |
| 773 | struct srp_target *target = host_to_srp_target(shost); |
| 774 | struct vio_port *vport = target_to_port(target); |
| 775 | return snprintf(buf, PAGE_SIZE, "%x\n", vport->dma_dev->unit_address); |
| 776 | } |
| 777 | |
| 778 | static CLASS_DEVICE_ATTR(system_id, S_IRUGO, system_id_show, NULL); |
| 779 | static CLASS_DEVICE_ATTR(partition_number, S_IRUGO, partition_number_show, NULL); |
| 780 | static CLASS_DEVICE_ATTR(unit_address, S_IRUGO, unit_address_show, NULL); |
| 781 | |
| 782 | static struct class_device_attribute *ibmvstgt_attrs[] = { |
| 783 | &class_device_attr_system_id, |
| 784 | &class_device_attr_partition_number, |
| 785 | &class_device_attr_unit_address, |
| 786 | NULL, |
| 787 | }; |
| 788 | |
| 789 | static struct scsi_host_template ibmvstgt_sht = { |
| 790 | .name = TGT_NAME, |
| 791 | .module = THIS_MODULE, |
| 792 | .can_queue = INITIAL_SRP_LIMIT, |
| 793 | .sg_tablesize = SG_ALL, |
| 794 | .use_clustering = DISABLE_CLUSTERING, |
| 795 | .max_sectors = DEFAULT_MAX_SECTORS, |
| 796 | .transfer_response = ibmvstgt_cmd_done, |
| 797 | .transfer_data = ibmvstgt_transfer_data, |
| 798 | .eh_abort_handler = ibmvstgt_eh_abort_handler, |
| 799 | .tsk_mgmt_response = ibmvstgt_tsk_mgmt_response, |
| 800 | .shost_attrs = ibmvstgt_attrs, |
| 801 | .proc_name = TGT_NAME, |
| 802 | }; |
| 803 | |
| 804 | static int ibmvstgt_probe(struct vio_dev *dev, const struct vio_device_id *id) |
| 805 | { |
| 806 | struct Scsi_Host *shost; |
| 807 | struct srp_target *target; |
| 808 | struct vio_port *vport; |
| 809 | unsigned int *dma, dma_size; |
| 810 | int err = -ENOMEM; |
| 811 | |
| 812 | vport = kzalloc(sizeof(struct vio_port), GFP_KERNEL); |
| 813 | if (!vport) |
| 814 | return err; |
| 815 | shost = scsi_host_alloc(&ibmvstgt_sht, sizeof(struct srp_target)); |
| 816 | if (!shost) |
| 817 | goto free_vport; |
| 818 | err = scsi_tgt_alloc_queue(shost); |
| 819 | if (err) |
| 820 | goto put_host; |
| 821 | |
| 822 | target = host_to_srp_target(shost); |
| 823 | target->shost = shost; |
| 824 | vport->dma_dev = dev; |
| 825 | target->ldata = vport; |
Al Viro | 2fdb611 | 2006-12-06 21:15:22 +0000 | [diff] [blame] | 826 | vport->target = target; |
FUJITA Tomonori | 0e5d030 | 2006-12-01 03:00:54 +0900 | [diff] [blame] | 827 | err = srp_target_alloc(target, &dev->dev, INITIAL_SRP_LIMIT, |
| 828 | SRP_MAX_IU_LEN); |
| 829 | if (err) |
| 830 | goto put_host; |
| 831 | |
| 832 | dma = (unsigned int *) vio_get_attribute(dev, "ibm,my-dma-window", |
| 833 | &dma_size); |
| 834 | if (!dma || dma_size != 40) { |
| 835 | eprintk("Couldn't get window property %d\n", dma_size); |
| 836 | err = -EIO; |
| 837 | goto free_srp_target; |
| 838 | } |
| 839 | vport->liobn = dma[0]; |
| 840 | vport->riobn = dma[5]; |
| 841 | |
Al Viro | 2fdb611 | 2006-12-06 21:15:22 +0000 | [diff] [blame] | 842 | INIT_WORK(&vport->crq_work, handle_crq); |
FUJITA Tomonori | 0e5d030 | 2006-12-01 03:00:54 +0900 | [diff] [blame] | 843 | |
| 844 | err = crq_queue_create(&vport->crq_queue, target); |
| 845 | if (err) |
| 846 | goto free_srp_target; |
| 847 | |
| 848 | err = scsi_add_host(shost, target->dev); |
| 849 | if (err) |
| 850 | goto destroy_queue; |
| 851 | return 0; |
| 852 | |
| 853 | destroy_queue: |
| 854 | crq_queue_destroy(target); |
| 855 | free_srp_target: |
| 856 | srp_target_free(target); |
| 857 | put_host: |
| 858 | scsi_host_put(shost); |
| 859 | free_vport: |
| 860 | kfree(vport); |
| 861 | return err; |
| 862 | } |
| 863 | |
| 864 | static int ibmvstgt_remove(struct vio_dev *dev) |
| 865 | { |
| 866 | struct srp_target *target = (struct srp_target *) dev->dev.driver_data; |
| 867 | struct Scsi_Host *shost = target->shost; |
| 868 | struct vio_port *vport = target->ldata; |
| 869 | |
| 870 | crq_queue_destroy(target); |
| 871 | scsi_remove_host(shost); |
| 872 | scsi_tgt_free_queue(shost); |
| 873 | srp_target_free(target); |
| 874 | kfree(vport); |
| 875 | scsi_host_put(shost); |
| 876 | return 0; |
| 877 | } |
| 878 | |
| 879 | static struct vio_device_id ibmvstgt_device_table[] __devinitdata = { |
| 880 | {"v-scsi-host", "IBM,v-scsi-host"}, |
| 881 | {"",""} |
| 882 | }; |
| 883 | |
| 884 | MODULE_DEVICE_TABLE(vio, ibmvstgt_device_table); |
| 885 | |
| 886 | static struct vio_driver ibmvstgt_driver = { |
| 887 | .id_table = ibmvstgt_device_table, |
| 888 | .probe = ibmvstgt_probe, |
| 889 | .remove = ibmvstgt_remove, |
| 890 | .driver = { |
| 891 | .name = "ibmvscsis", |
| 892 | .owner = THIS_MODULE, |
| 893 | } |
| 894 | }; |
| 895 | |
| 896 | static int get_system_info(void) |
| 897 | { |
| 898 | struct device_node *rootdn; |
| 899 | const char *id, *model, *name; |
| 900 | unsigned int *num; |
| 901 | |
| 902 | rootdn = find_path_device("/"); |
| 903 | if (!rootdn) |
| 904 | return -ENOENT; |
| 905 | |
| 906 | model = get_property(rootdn, "model", NULL); |
| 907 | id = get_property(rootdn, "system-id", NULL); |
| 908 | if (model && id) |
| 909 | snprintf(system_id, sizeof(system_id), "%s-%s", model, id); |
| 910 | |
| 911 | name = get_property(rootdn, "ibm,partition-name", NULL); |
| 912 | if (name) |
| 913 | strncpy(partition_name, name, sizeof(partition_name)); |
| 914 | |
| 915 | num = (unsigned int *) get_property(rootdn, "ibm,partition-no", NULL); |
| 916 | if (num) |
| 917 | partition_number = *num; |
| 918 | |
| 919 | return 0; |
| 920 | } |
| 921 | |
| 922 | static int ibmvstgt_init(void) |
| 923 | { |
| 924 | int err = -ENOMEM; |
| 925 | |
| 926 | printk("IBM eServer i/pSeries Virtual SCSI Target Driver\n"); |
| 927 | |
| 928 | vtgtd = create_workqueue("ibmvtgtd"); |
| 929 | if (!vtgtd) |
| 930 | return err; |
| 931 | |
| 932 | err = get_system_info(); |
| 933 | if (err) |
| 934 | goto destroy_wq; |
| 935 | |
| 936 | err = vio_register_driver(&ibmvstgt_driver); |
| 937 | if (err) |
| 938 | goto destroy_wq; |
| 939 | |
| 940 | return 0; |
| 941 | |
| 942 | destroy_wq: |
| 943 | destroy_workqueue(vtgtd); |
| 944 | return err; |
| 945 | } |
| 946 | |
| 947 | static void ibmvstgt_exit(void) |
| 948 | { |
| 949 | printk("Unregister IBM virtual SCSI driver\n"); |
| 950 | |
| 951 | destroy_workqueue(vtgtd); |
| 952 | vio_unregister_driver(&ibmvstgt_driver); |
| 953 | } |
| 954 | |
| 955 | MODULE_DESCRIPTION("IBM Virtual SCSI Target"); |
| 956 | MODULE_AUTHOR("Santiago Leon"); |
| 957 | MODULE_LICENSE("GPL"); |
| 958 | |
| 959 | module_init(ibmvstgt_init); |
| 960 | module_exit(ibmvstgt_exit); |