blob: e9321a5e3f97ef000dfb2a92104da0d4f230564e [file] [log] [blame]
Bryant G. Ly88a678b2016-06-28 17:05:35 -05001/*******************************************************************************
2 * IBM Virtual SCSI Target Driver
3 * Copyright (C) 2003-2005 Dave Boutcher (boutcher@us.ibm.com) IBM Corp.
4 * Santiago Leon (santil@us.ibm.com) IBM Corp.
5 * Linda Xie (lxie@us.ibm.com) IBM Corp.
6 *
7 * Copyright (C) 2005-2011 FUJITA Tomonori <tomof@acm.org>
8 * Copyright (C) 2010 Nicholas A. Bellinger <nab@kernel.org>
9 *
10 * Authors: Bryant G. Ly <bryantly@linux.vnet.ibm.com>
11 * Authors: Michael Cyr <mikecyr@linux.vnet.ibm.com>
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 *
23 ****************************************************************************/
24
25#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
26
27#include <linux/module.h>
28#include <linux/kernel.h>
29#include <linux/slab.h>
30#include <linux/types.h>
31#include <linux/list.h>
32#include <linux/string.h>
Bryant G. Lye9409b22016-11-16 09:06:23 -080033#include <linux/delay.h>
Bryant G. Ly88a678b2016-06-28 17:05:35 -050034
35#include <target/target_core_base.h>
36#include <target/target_core_fabric.h>
37
38#include <asm/hvcall.h>
39#include <asm/vio.h>
40
41#include <scsi/viosrp.h>
42
43#include "ibmvscsi_tgt.h"
44
45#define IBMVSCSIS_VERSION "v0.2"
46
47#define INITIAL_SRP_LIMIT 800
48#define DEFAULT_MAX_SECTORS 256
49
50static uint max_vdma_size = MAX_H_COPY_RDMA;
51
52static char system_id[SYS_ID_NAME_LEN] = "";
53static char partition_name[PARTITION_NAMELEN] = "UNKNOWN";
54static uint partition_number = -1;
55
56/* Adapter list and lock to control it */
57static DEFINE_SPINLOCK(ibmvscsis_dev_lock);
58static LIST_HEAD(ibmvscsis_dev_list);
59
60static long ibmvscsis_parse_command(struct scsi_info *vscsi,
61 struct viosrp_crq *crq);
62
63static void ibmvscsis_adapter_idle(struct scsi_info *vscsi);
64
65static void ibmvscsis_determine_resid(struct se_cmd *se_cmd,
66 struct srp_rsp *rsp)
67{
68 u32 residual_count = se_cmd->residual_count;
69
70 if (!residual_count)
71 return;
72
73 if (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
74 if (se_cmd->data_direction == DMA_TO_DEVICE) {
75 /* residual data from an underflow write */
76 rsp->flags = SRP_RSP_FLAG_DOUNDER;
77 rsp->data_out_res_cnt = cpu_to_be32(residual_count);
78 } else if (se_cmd->data_direction == DMA_FROM_DEVICE) {
79 /* residual data from an underflow read */
80 rsp->flags = SRP_RSP_FLAG_DIUNDER;
81 rsp->data_in_res_cnt = cpu_to_be32(residual_count);
82 }
83 } else if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
84 if (se_cmd->data_direction == DMA_TO_DEVICE) {
85 /* residual data from an overflow write */
86 rsp->flags = SRP_RSP_FLAG_DOOVER;
87 rsp->data_out_res_cnt = cpu_to_be32(residual_count);
88 } else if (se_cmd->data_direction == DMA_FROM_DEVICE) {
89 /* residual data from an overflow read */
90 rsp->flags = SRP_RSP_FLAG_DIOVER;
91 rsp->data_in_res_cnt = cpu_to_be32(residual_count);
92 }
93 }
94}
95
96/**
97 * connection_broken() - Determine if the connection to the client is good
98 * @vscsi: Pointer to our adapter structure
99 *
100 * This function attempts to send a ping MAD to the client. If the call to
101 * queue the request returns H_CLOSED then the connection has been broken
102 * and the function returns TRUE.
103 *
104 * EXECUTION ENVIRONMENT:
105 * Interrupt or Process environment
106 */
107static bool connection_broken(struct scsi_info *vscsi)
108{
109 struct viosrp_crq *crq;
110 u64 buffer[2] = { 0, 0 };
111 long h_return_code;
112 bool rc = false;
113
114 /* create a PING crq */
115 crq = (struct viosrp_crq *)&buffer;
116 crq->valid = VALID_CMD_RESP_EL;
117 crq->format = MESSAGE_IN_CRQ;
118 crq->status = PING;
119
120 h_return_code = h_send_crq(vscsi->dds.unit_id,
121 cpu_to_be64(buffer[MSG_HI]),
122 cpu_to_be64(buffer[MSG_LOW]));
123
124 pr_debug("connection_broken: rc %ld\n", h_return_code);
125
126 if (h_return_code == H_CLOSED)
127 rc = true;
128
129 return rc;
130}
131
132/**
133 * ibmvscsis_unregister_command_q() - Helper Function-Unregister Command Queue
134 * @vscsi: Pointer to our adapter structure
135 *
136 * This function calls h_free_q then frees the interrupt bit etc.
137 * It must release the lock before doing so because of the time it can take
138 * for h_free_crq in PHYP
139 * NOTE: the caller must make sure that state and or flags will prevent
140 * interrupt handler from scheduling work.
141 * NOTE: anyone calling this function may need to set the CRQ_CLOSED flag
142 * we can't do it here, because we don't have the lock
143 *
144 * EXECUTION ENVIRONMENT:
145 * Process level
146 */
147static long ibmvscsis_unregister_command_q(struct scsi_info *vscsi)
148{
149 long qrc;
150 long rc = ADAPT_SUCCESS;
151 int ticks = 0;
152
153 do {
154 qrc = h_free_crq(vscsi->dds.unit_id);
155 switch (qrc) {
156 case H_SUCCESS:
157 break;
158
159 case H_HARDWARE:
160 case H_PARAMETER:
161 dev_err(&vscsi->dev, "unregister_command_q: error from h_free_crq %ld\n",
162 qrc);
163 rc = ERROR;
164 break;
165
166 case H_BUSY:
167 case H_LONG_BUSY_ORDER_1_MSEC:
168 /* msleep not good for small values */
169 usleep_range(1000, 2000);
170 ticks += 1;
171 break;
172 case H_LONG_BUSY_ORDER_10_MSEC:
173 usleep_range(10000, 20000);
174 ticks += 10;
175 break;
176 case H_LONG_BUSY_ORDER_100_MSEC:
177 msleep(100);
178 ticks += 100;
179 break;
180 case H_LONG_BUSY_ORDER_1_SEC:
181 ssleep(1);
182 ticks += 1000;
183 break;
184 case H_LONG_BUSY_ORDER_10_SEC:
185 ssleep(10);
186 ticks += 10000;
187 break;
188 case H_LONG_BUSY_ORDER_100_SEC:
189 ssleep(100);
190 ticks += 100000;
191 break;
192 default:
193 dev_err(&vscsi->dev, "unregister_command_q: unknown error %ld from h_free_crq\n",
194 qrc);
195 rc = ERROR;
196 break;
197 }
198
199 /*
200 * dont wait more then 300 seconds
201 * ticks are in milliseconds more or less
202 */
203 if (ticks > 300000 && qrc != H_SUCCESS) {
204 rc = ERROR;
205 dev_err(&vscsi->dev, "Excessive wait for h_free_crq\n");
206 }
207 } while (qrc != H_SUCCESS && rc == ADAPT_SUCCESS);
208
209 pr_debug("Freeing CRQ: phyp rc %ld, rc %ld\n", qrc, rc);
210
211 return rc;
212}
213
214/**
215 * ibmvscsis_delete_client_info() - Helper function to Delete Client Info
216 * @vscsi: Pointer to our adapter structure
217 * @client_closed: True if client closed its queue
218 *
219 * Deletes information specific to the client when the client goes away
220 *
221 * EXECUTION ENVIRONMENT:
222 * Interrupt or Process
223 */
224static void ibmvscsis_delete_client_info(struct scsi_info *vscsi,
225 bool client_closed)
226{
227 vscsi->client_cap = 0;
228
229 /*
230 * Some things we don't want to clear if we're closing the queue,
231 * because some clients don't resend the host handshake when they
232 * get a transport event.
233 */
234 if (client_closed)
235 vscsi->client_data.os_type = 0;
236}
237
238/**
239 * ibmvscsis_free_command_q() - Free Command Queue
240 * @vscsi: Pointer to our adapter structure
241 *
242 * This function calls unregister_command_q, then clears interrupts and
243 * any pending interrupt acknowledgments associated with the command q.
244 * It also clears memory if there is no error.
245 *
246 * PHYP did not meet the PAPR architecture so that we must give up the
247 * lock. This causes a timing hole regarding state change. To close the
248 * hole this routine does accounting on any change that occurred during
249 * the time the lock is not held.
250 * NOTE: must give up and then acquire the interrupt lock, the caller must
251 * make sure that state and or flags will prevent interrupt handler from
252 * scheduling work.
253 *
254 * EXECUTION ENVIRONMENT:
255 * Process level, interrupt lock is held
256 */
257static long ibmvscsis_free_command_q(struct scsi_info *vscsi)
258{
259 int bytes;
260 u32 flags_under_lock;
261 u16 state_under_lock;
262 long rc = ADAPT_SUCCESS;
263
264 if (!(vscsi->flags & CRQ_CLOSED)) {
265 vio_disable_interrupts(vscsi->dma_dev);
266
267 state_under_lock = vscsi->new_state;
268 flags_under_lock = vscsi->flags;
269 vscsi->phyp_acr_state = 0;
270 vscsi->phyp_acr_flags = 0;
271
272 spin_unlock_bh(&vscsi->intr_lock);
273 rc = ibmvscsis_unregister_command_q(vscsi);
274 spin_lock_bh(&vscsi->intr_lock);
275
276 if (state_under_lock != vscsi->new_state)
277 vscsi->phyp_acr_state = vscsi->new_state;
278
279 vscsi->phyp_acr_flags = ((~flags_under_lock) & vscsi->flags);
280
281 if (rc == ADAPT_SUCCESS) {
282 bytes = vscsi->cmd_q.size * PAGE_SIZE;
283 memset(vscsi->cmd_q.base_addr, 0, bytes);
284 vscsi->cmd_q.index = 0;
285 vscsi->flags |= CRQ_CLOSED;
286
287 ibmvscsis_delete_client_info(vscsi, false);
288 }
289
290 pr_debug("free_command_q: flags 0x%x, state 0x%hx, acr_flags 0x%x, acr_state 0x%hx\n",
291 vscsi->flags, vscsi->state, vscsi->phyp_acr_flags,
292 vscsi->phyp_acr_state);
293 }
294 return rc;
295}
296
297/**
298 * ibmvscsis_cmd_q_dequeue() - Get valid Command element
299 * @mask: Mask to use in case index wraps
300 * @current_index: Current index into command queue
301 * @base_addr: Pointer to start of command queue
302 *
303 * Returns a pointer to a valid command element or NULL, if the command
304 * queue is empty
305 *
306 * EXECUTION ENVIRONMENT:
307 * Interrupt environment, interrupt lock held
308 */
309static struct viosrp_crq *ibmvscsis_cmd_q_dequeue(uint mask,
310 uint *current_index,
311 struct viosrp_crq *base_addr)
312{
313 struct viosrp_crq *ptr;
314
315 ptr = base_addr + *current_index;
316
317 if (ptr->valid) {
318 *current_index = (*current_index + 1) & mask;
319 dma_rmb();
320 } else {
321 ptr = NULL;
322 }
323
324 return ptr;
325}
326
327/**
328 * ibmvscsis_send_init_message() - send initialize message to the client
329 * @vscsi: Pointer to our adapter structure
330 * @format: Which Init Message format to send
331 *
332 * EXECUTION ENVIRONMENT:
333 * Interrupt environment interrupt lock held
334 */
335static long ibmvscsis_send_init_message(struct scsi_info *vscsi, u8 format)
336{
337 struct viosrp_crq *crq;
338 u64 buffer[2] = { 0, 0 };
339 long rc;
340
341 crq = (struct viosrp_crq *)&buffer;
342 crq->valid = VALID_INIT_MSG;
343 crq->format = format;
344 rc = h_send_crq(vscsi->dds.unit_id, cpu_to_be64(buffer[MSG_HI]),
345 cpu_to_be64(buffer[MSG_LOW]));
346
347 return rc;
348}
349
350/**
351 * ibmvscsis_check_init_msg() - Check init message valid
352 * @vscsi: Pointer to our adapter structure
353 * @format: Pointer to return format of Init Message, if any.
354 * Set to UNUSED_FORMAT if no Init Message in queue.
355 *
356 * Checks if an initialize message was queued by the initiatior
357 * after the queue was created and before the interrupt was enabled.
358 *
359 * EXECUTION ENVIRONMENT:
360 * Process level only, interrupt lock held
361 */
362static long ibmvscsis_check_init_msg(struct scsi_info *vscsi, uint *format)
363{
364 struct viosrp_crq *crq;
365 long rc = ADAPT_SUCCESS;
366
367 crq = ibmvscsis_cmd_q_dequeue(vscsi->cmd_q.mask, &vscsi->cmd_q.index,
368 vscsi->cmd_q.base_addr);
369 if (!crq) {
370 *format = (uint)UNUSED_FORMAT;
371 } else if (crq->valid == VALID_INIT_MSG && crq->format == INIT_MSG) {
372 *format = (uint)INIT_MSG;
373 crq->valid = INVALIDATE_CMD_RESP_EL;
374 dma_rmb();
375
376 /*
377 * the caller has ensured no initialize message was
378 * sent after the queue was
379 * created so there should be no other message on the queue.
380 */
381 crq = ibmvscsis_cmd_q_dequeue(vscsi->cmd_q.mask,
382 &vscsi->cmd_q.index,
383 vscsi->cmd_q.base_addr);
384 if (crq) {
385 *format = (uint)(crq->format);
386 rc = ERROR;
387 crq->valid = INVALIDATE_CMD_RESP_EL;
388 dma_rmb();
389 }
390 } else {
391 *format = (uint)(crq->format);
392 rc = ERROR;
393 crq->valid = INVALIDATE_CMD_RESP_EL;
394 dma_rmb();
395 }
396
397 return rc;
398}
399
400/**
401 * ibmvscsis_establish_new_q() - Establish new CRQ queue
402 * @vscsi: Pointer to our adapter structure
403 * @new_state: New state being established after resetting the queue
404 *
405 * Must be called with interrupt lock held.
406 */
407static long ibmvscsis_establish_new_q(struct scsi_info *vscsi, uint new_state)
408{
409 long rc = ADAPT_SUCCESS;
410 uint format;
411
412 vscsi->flags &= PRESERVE_FLAG_FIELDS;
413 vscsi->rsp_q_timer.timer_pops = 0;
414 vscsi->debit = 0;
415 vscsi->credit = 0;
416
417 rc = vio_enable_interrupts(vscsi->dma_dev);
418 if (rc) {
419 pr_warn("reset_queue: failed to enable interrupts, rc %ld\n",
420 rc);
421 return rc;
422 }
423
424 rc = ibmvscsis_check_init_msg(vscsi, &format);
425 if (rc) {
426 dev_err(&vscsi->dev, "reset_queue: check_init_msg failed, rc %ld\n",
427 rc);
428 return rc;
429 }
430
431 if (format == UNUSED_FORMAT && new_state == WAIT_CONNECTION) {
432 rc = ibmvscsis_send_init_message(vscsi, INIT_MSG);
433 switch (rc) {
434 case H_SUCCESS:
435 case H_DROPPED:
436 case H_CLOSED:
437 rc = ADAPT_SUCCESS;
438 break;
439
440 case H_PARAMETER:
441 case H_HARDWARE:
442 break;
443
444 default:
445 vscsi->state = UNDEFINED;
446 rc = H_HARDWARE;
447 break;
448 }
449 }
450
451 return rc;
452}
453
454/**
455 * ibmvscsis_reset_queue() - Reset CRQ Queue
456 * @vscsi: Pointer to our adapter structure
457 * @new_state: New state to establish after resetting the queue
458 *
459 * This function calls h_free_q and then calls h_reg_q and does all
460 * of the bookkeeping to get us back to where we can communicate.
461 *
462 * Actually, we don't always call h_free_crq. A problem was discovered
463 * where one partition would close and reopen his queue, which would
464 * cause his partner to get a transport event, which would cause him to
465 * close and reopen his queue, which would cause the original partition
466 * to get a transport event, etc., etc. To prevent this, we don't
467 * actually close our queue if the client initiated the reset, (i.e.
468 * either we got a transport event or we have detected that the client's
469 * queue is gone)
470 *
471 * EXECUTION ENVIRONMENT:
472 * Process environment, called with interrupt lock held
473 */
474static void ibmvscsis_reset_queue(struct scsi_info *vscsi, uint new_state)
475{
476 int bytes;
477 long rc = ADAPT_SUCCESS;
478
479 pr_debug("reset_queue: flags 0x%x\n", vscsi->flags);
480
481 /* don't reset, the client did it for us */
482 if (vscsi->flags & (CLIENT_FAILED | TRANS_EVENT)) {
483 vscsi->flags &= PRESERVE_FLAG_FIELDS;
484 vscsi->rsp_q_timer.timer_pops = 0;
485 vscsi->debit = 0;
486 vscsi->credit = 0;
487 vscsi->state = new_state;
488 vio_enable_interrupts(vscsi->dma_dev);
489 } else {
490 rc = ibmvscsis_free_command_q(vscsi);
491 if (rc == ADAPT_SUCCESS) {
492 vscsi->state = new_state;
493
494 bytes = vscsi->cmd_q.size * PAGE_SIZE;
495 rc = h_reg_crq(vscsi->dds.unit_id,
496 vscsi->cmd_q.crq_token, bytes);
497 if (rc == H_CLOSED || rc == H_SUCCESS) {
498 rc = ibmvscsis_establish_new_q(vscsi,
499 new_state);
500 }
501
502 if (rc != ADAPT_SUCCESS) {
503 pr_debug("reset_queue: reg_crq rc %ld\n", rc);
504
505 vscsi->state = ERR_DISCONNECTED;
506 vscsi->flags |= RESPONSE_Q_DOWN;
507 ibmvscsis_free_command_q(vscsi);
508 }
509 } else {
510 vscsi->state = ERR_DISCONNECTED;
511 vscsi->flags |= RESPONSE_Q_DOWN;
512 }
513 }
514}
515
516/**
517 * ibmvscsis_free_cmd_resources() - Free command resources
518 * @vscsi: Pointer to our adapter structure
519 * @cmd: Command which is not longer in use
520 *
521 * Must be called with interrupt lock held.
522 */
523static void ibmvscsis_free_cmd_resources(struct scsi_info *vscsi,
524 struct ibmvscsis_cmd *cmd)
525{
526 struct iu_entry *iue = cmd->iue;
527
528 switch (cmd->type) {
529 case TASK_MANAGEMENT:
530 case SCSI_CDB:
531 /*
532 * When the queue goes down this value is cleared, so it
533 * cannot be cleared in this general purpose function.
534 */
535 if (vscsi->debit)
536 vscsi->debit -= 1;
537 break;
538 case ADAPTER_MAD:
539 vscsi->flags &= ~PROCESSING_MAD;
540 break;
541 case UNSET_TYPE:
542 break;
543 default:
544 dev_err(&vscsi->dev, "free_cmd_resources unknown type %d\n",
545 cmd->type);
546 break;
547 }
548
549 cmd->iue = NULL;
550 list_add_tail(&cmd->list, &vscsi->free_cmd);
551 srp_iu_put(iue);
552
553 if (list_empty(&vscsi->active_q) && list_empty(&vscsi->schedule_q) &&
554 list_empty(&vscsi->waiting_rsp) && (vscsi->flags & WAIT_FOR_IDLE)) {
555 vscsi->flags &= ~WAIT_FOR_IDLE;
556 complete(&vscsi->wait_idle);
557 }
558}
559
560/**
561 * ibmvscsis_disconnect() - Helper function to disconnect
562 * @work: Pointer to work_struct, gives access to our adapter structure
563 *
564 * An error has occurred or the driver received a Transport event,
565 * and the driver is requesting that the command queue be de-registered
566 * in a safe manner. If there is no outstanding I/O then we can stop the
567 * queue. If we are restarting the queue it will be reflected in the
568 * the state of the adapter.
569 *
570 * EXECUTION ENVIRONMENT:
571 * Process environment
572 */
573static void ibmvscsis_disconnect(struct work_struct *work)
574{
575 struct scsi_info *vscsi = container_of(work, struct scsi_info,
576 proc_work);
577 u16 new_state;
578 bool wait_idle = false;
579 long rc = ADAPT_SUCCESS;
580
581 spin_lock_bh(&vscsi->intr_lock);
582 new_state = vscsi->new_state;
583 vscsi->new_state = 0;
584
585 pr_debug("disconnect: flags 0x%x, state 0x%hx\n", vscsi->flags,
586 vscsi->state);
587
588 /*
589 * check which state we are in and see if we
590 * should transitition to the new state
591 */
592 switch (vscsi->state) {
593 /* Should never be called while in this state. */
594 case NO_QUEUE:
595 /*
596 * Can never transition from this state;
597 * igonore errors and logout.
598 */
599 case UNCONFIGURING:
600 break;
601
602 /* can transition from this state to UNCONFIGURING */
603 case ERR_DISCONNECT:
604 if (new_state == UNCONFIGURING)
605 vscsi->state = new_state;
606 break;
607
608 /*
609 * Can transition from this state to to unconfiguring
610 * or err disconnect.
611 */
612 case ERR_DISCONNECT_RECONNECT:
613 switch (new_state) {
614 case UNCONFIGURING:
615 case ERR_DISCONNECT:
616 vscsi->state = new_state;
617 break;
618
619 case WAIT_IDLE:
620 break;
621 default:
622 break;
623 }
624 break;
625
626 /* can transition from this state to UNCONFIGURING */
627 case ERR_DISCONNECTED:
628 if (new_state == UNCONFIGURING)
629 vscsi->state = new_state;
630 break;
631
632 /*
633 * If this is a transition into an error state.
634 * a client is attempting to establish a connection
635 * and has violated the RPA protocol.
636 * There can be nothing pending on the adapter although
637 * there can be requests in the command queue.
638 */
639 case WAIT_ENABLED:
640 case PART_UP_WAIT_ENAB:
641 switch (new_state) {
642 case ERR_DISCONNECT:
643 vscsi->flags |= RESPONSE_Q_DOWN;
644 vscsi->state = new_state;
645 vscsi->flags &= ~(SCHEDULE_DISCONNECT |
646 DISCONNECT_SCHEDULED);
647 ibmvscsis_free_command_q(vscsi);
648 break;
649 case ERR_DISCONNECT_RECONNECT:
650 ibmvscsis_reset_queue(vscsi, WAIT_ENABLED);
651 break;
652
653 /* should never happen */
654 case WAIT_IDLE:
655 rc = ERROR;
656 dev_err(&vscsi->dev, "disconnect: invalid state %d for WAIT_IDLE\n",
657 vscsi->state);
658 break;
659 }
660 break;
661
662 case WAIT_IDLE:
663 switch (new_state) {
664 case ERR_DISCONNECT:
665 case ERR_DISCONNECT_RECONNECT:
666 vscsi->state = new_state;
667 break;
668 }
669 break;
670
671 /*
672 * Initiator has not done a successful srp login
673 * or has done a successful srp logout ( adapter was not
674 * busy). In the first case there can be responses queued
675 * waiting for space on the initiators response queue (MAD)
676 * The second case the adapter is idle. Assume the worse case,
677 * i.e. the second case.
678 */
679 case WAIT_CONNECTION:
680 case CONNECTED:
681 case SRP_PROCESSING:
682 wait_idle = true;
683 vscsi->state = new_state;
684 break;
685
686 /* can transition from this state to UNCONFIGURING */
687 case UNDEFINED:
688 if (new_state == UNCONFIGURING)
689 vscsi->state = new_state;
690 break;
691 default:
692 break;
693 }
694
695 if (wait_idle) {
696 pr_debug("disconnect start wait, active %d, sched %d\n",
697 (int)list_empty(&vscsi->active_q),
698 (int)list_empty(&vscsi->schedule_q));
699 if (!list_empty(&vscsi->active_q) ||
700 !list_empty(&vscsi->schedule_q)) {
701 vscsi->flags |= WAIT_FOR_IDLE;
702 pr_debug("disconnect flags 0x%x\n", vscsi->flags);
703 /*
704 * This routine is can not be called with the interrupt
705 * lock held.
706 */
707 spin_unlock_bh(&vscsi->intr_lock);
708 wait_for_completion(&vscsi->wait_idle);
709 spin_lock_bh(&vscsi->intr_lock);
710 }
711 pr_debug("disconnect stop wait\n");
712
713 ibmvscsis_adapter_idle(vscsi);
714 }
715
716 spin_unlock_bh(&vscsi->intr_lock);
717}
718
719/**
720 * ibmvscsis_post_disconnect() - Schedule the disconnect
721 * @vscsi: Pointer to our adapter structure
722 * @new_state: State to move to after disconnecting
723 * @flag_bits: Flags to turn on in adapter structure
724 *
725 * If it's already been scheduled, then see if we need to "upgrade"
726 * the new state (if the one passed in is more "severe" than the
727 * previous one).
728 *
729 * PRECONDITION:
730 * interrupt lock is held
731 */
732static void ibmvscsis_post_disconnect(struct scsi_info *vscsi, uint new_state,
733 uint flag_bits)
734{
735 uint state;
736
737 /* check the validity of the new state */
738 switch (new_state) {
739 case UNCONFIGURING:
740 case ERR_DISCONNECT:
741 case ERR_DISCONNECT_RECONNECT:
742 case WAIT_IDLE:
743 break;
744
745 default:
746 dev_err(&vscsi->dev, "post_disconnect: Invalid new state %d\n",
747 new_state);
748 return;
749 }
750
751 vscsi->flags |= flag_bits;
752
753 pr_debug("post_disconnect: new_state 0x%x, flag_bits 0x%x, vscsi->flags 0x%x, state %hx\n",
754 new_state, flag_bits, vscsi->flags, vscsi->state);
755
756 if (!(vscsi->flags & (DISCONNECT_SCHEDULED | SCHEDULE_DISCONNECT))) {
757 vscsi->flags |= SCHEDULE_DISCONNECT;
758 vscsi->new_state = new_state;
759
760 INIT_WORK(&vscsi->proc_work, ibmvscsis_disconnect);
761 (void)queue_work(vscsi->work_q, &vscsi->proc_work);
762 } else {
763 if (vscsi->new_state)
764 state = vscsi->new_state;
765 else
766 state = vscsi->state;
767
768 switch (state) {
769 case NO_QUEUE:
770 case UNCONFIGURING:
771 break;
772
773 case ERR_DISCONNECTED:
774 case ERR_DISCONNECT:
775 case UNDEFINED:
776 if (new_state == UNCONFIGURING)
777 vscsi->new_state = new_state;
778 break;
779
780 case ERR_DISCONNECT_RECONNECT:
781 switch (new_state) {
782 case UNCONFIGURING:
783 case ERR_DISCONNECT:
784 vscsi->new_state = new_state;
785 break;
786 default:
787 break;
788 }
789 break;
790
791 case WAIT_ENABLED:
792 case PART_UP_WAIT_ENAB:
793 case WAIT_IDLE:
794 case WAIT_CONNECTION:
795 case CONNECTED:
796 case SRP_PROCESSING:
797 vscsi->new_state = new_state;
798 break;
799
800 default:
801 break;
802 }
803 }
804
805 pr_debug("Leaving post_disconnect: flags 0x%x, new_state 0x%x\n",
806 vscsi->flags, vscsi->new_state);
807}
808
809/**
810 * ibmvscsis_trans_event() - Handle a Transport Event
811 * @vscsi: Pointer to our adapter structure
812 * @crq: Pointer to CRQ entry containing the Transport Event
813 *
814 * Do the logic to close the I_T nexus. This function may not
815 * behave to specification.
816 *
817 * EXECUTION ENVIRONMENT:
818 * Interrupt, interrupt lock held
819 */
820static long ibmvscsis_trans_event(struct scsi_info *vscsi,
821 struct viosrp_crq *crq)
822{
823 long rc = ADAPT_SUCCESS;
824
825 pr_debug("trans_event: format %d, flags 0x%x, state 0x%hx\n",
826 (int)crq->format, vscsi->flags, vscsi->state);
827
828 switch (crq->format) {
829 case MIGRATED:
830 case PARTNER_FAILED:
831 case PARTNER_DEREGISTER:
832 ibmvscsis_delete_client_info(vscsi, true);
833 break;
834
835 default:
836 rc = ERROR;
837 dev_err(&vscsi->dev, "trans_event: invalid format %d\n",
838 (uint)crq->format);
839 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT,
840 RESPONSE_Q_DOWN);
841 break;
842 }
843
844 if (rc == ADAPT_SUCCESS) {
845 switch (vscsi->state) {
846 case NO_QUEUE:
847 case ERR_DISCONNECTED:
848 case UNDEFINED:
849 break;
850
851 case UNCONFIGURING:
852 vscsi->flags |= (RESPONSE_Q_DOWN | TRANS_EVENT);
853 break;
854
855 case WAIT_ENABLED:
856 break;
857
858 case WAIT_CONNECTION:
859 break;
860
861 case CONNECTED:
862 ibmvscsis_post_disconnect(vscsi, WAIT_IDLE,
863 (RESPONSE_Q_DOWN |
864 TRANS_EVENT));
865 break;
866
867 case PART_UP_WAIT_ENAB:
868 vscsi->state = WAIT_ENABLED;
869 break;
870
871 case SRP_PROCESSING:
872 if ((vscsi->debit > 0) ||
873 !list_empty(&vscsi->schedule_q) ||
874 !list_empty(&vscsi->waiting_rsp) ||
875 !list_empty(&vscsi->active_q)) {
876 pr_debug("debit %d, sched %d, wait %d, active %d\n",
877 vscsi->debit,
878 (int)list_empty(&vscsi->schedule_q),
879 (int)list_empty(&vscsi->waiting_rsp),
880 (int)list_empty(&vscsi->active_q));
881 pr_warn("connection lost with outstanding work\n");
882 } else {
883 pr_debug("trans_event: SRP Processing, but no outstanding work\n");
884 }
885
886 ibmvscsis_post_disconnect(vscsi, WAIT_IDLE,
887 (RESPONSE_Q_DOWN |
888 TRANS_EVENT));
889 break;
890
891 case ERR_DISCONNECT:
892 case ERR_DISCONNECT_RECONNECT:
893 case WAIT_IDLE:
894 vscsi->flags |= (RESPONSE_Q_DOWN | TRANS_EVENT);
895 break;
896 }
897 }
898
899 rc = vscsi->flags & SCHEDULE_DISCONNECT;
900
901 pr_debug("Leaving trans_event: flags 0x%x, state 0x%hx, rc %ld\n",
902 vscsi->flags, vscsi->state, rc);
903
904 return rc;
905}
906
907/**
908 * ibmvscsis_poll_cmd_q() - Poll Command Queue
909 * @vscsi: Pointer to our adapter structure
910 *
911 * Called to handle command elements that may have arrived while
912 * interrupts were disabled.
913 *
914 * EXECUTION ENVIRONMENT:
915 * intr_lock must be held
916 */
917static void ibmvscsis_poll_cmd_q(struct scsi_info *vscsi)
918{
919 struct viosrp_crq *crq;
920 long rc;
921 bool ack = true;
922 volatile u8 valid;
923
924 pr_debug("poll_cmd_q: flags 0x%x, state 0x%hx, q index %ud\n",
925 vscsi->flags, vscsi->state, vscsi->cmd_q.index);
926
927 rc = vscsi->flags & SCHEDULE_DISCONNECT;
928 crq = vscsi->cmd_q.base_addr + vscsi->cmd_q.index;
929 valid = crq->valid;
930 dma_rmb();
931
932 while (valid) {
933poll_work:
934 vscsi->cmd_q.index =
935 (vscsi->cmd_q.index + 1) & vscsi->cmd_q.mask;
936
937 if (!rc) {
938 rc = ibmvscsis_parse_command(vscsi, crq);
939 } else {
940 if ((uint)crq->valid == VALID_TRANS_EVENT) {
941 /*
942 * must service the transport layer events even
943 * in an error state, dont break out until all
944 * the consecutive transport events have been
945 * processed
946 */
947 rc = ibmvscsis_trans_event(vscsi, crq);
948 } else if (vscsi->flags & TRANS_EVENT) {
949 /*
950 * if a tranport event has occurred leave
951 * everything but transport events on the queue
952 */
953 pr_debug("poll_cmd_q, ignoring\n");
954
955 /*
956 * need to decrement the queue index so we can
957 * look at the elment again
958 */
959 if (vscsi->cmd_q.index)
960 vscsi->cmd_q.index -= 1;
961 else
962 /*
963 * index is at 0 it just wrapped.
964 * have it index last element in q
965 */
966 vscsi->cmd_q.index = vscsi->cmd_q.mask;
967 break;
968 }
969 }
970
971 crq->valid = INVALIDATE_CMD_RESP_EL;
972
973 crq = vscsi->cmd_q.base_addr + vscsi->cmd_q.index;
974 valid = crq->valid;
975 dma_rmb();
976 }
977
978 if (!rc) {
979 if (ack) {
980 vio_enable_interrupts(vscsi->dma_dev);
981 ack = false;
982 pr_debug("poll_cmd_q, reenabling interrupts\n");
983 }
984 valid = crq->valid;
985 dma_rmb();
986 if (valid)
987 goto poll_work;
988 }
989
990 pr_debug("Leaving poll_cmd_q: rc %ld\n", rc);
991}
992
993/**
994 * ibmvscsis_free_cmd_qs() - Free elements in queue
995 * @vscsi: Pointer to our adapter structure
996 *
997 * Free all of the elements on all queues that are waiting for
998 * whatever reason.
999 *
1000 * PRECONDITION:
1001 * Called with interrupt lock held
1002 */
1003static void ibmvscsis_free_cmd_qs(struct scsi_info *vscsi)
1004{
1005 struct ibmvscsis_cmd *cmd, *nxt;
1006
1007 pr_debug("free_cmd_qs: waiting_rsp empty %d, timer starter %d\n",
1008 (int)list_empty(&vscsi->waiting_rsp),
1009 vscsi->rsp_q_timer.started);
1010
1011 list_for_each_entry_safe(cmd, nxt, &vscsi->waiting_rsp, list) {
1012 list_del(&cmd->list);
1013 ibmvscsis_free_cmd_resources(vscsi, cmd);
1014 }
1015}
1016
1017/**
1018 * ibmvscsis_get_free_cmd() - Get free command from list
1019 * @vscsi: Pointer to our adapter structure
1020 *
1021 * Must be called with interrupt lock held.
1022 */
1023static struct ibmvscsis_cmd *ibmvscsis_get_free_cmd(struct scsi_info *vscsi)
1024{
1025 struct ibmvscsis_cmd *cmd = NULL;
1026 struct iu_entry *iue;
1027
1028 iue = srp_iu_get(&vscsi->target);
1029 if (iue) {
1030 cmd = list_first_entry_or_null(&vscsi->free_cmd,
1031 struct ibmvscsis_cmd, list);
1032 if (cmd) {
1033 list_del(&cmd->list);
1034 cmd->iue = iue;
1035 cmd->type = UNSET_TYPE;
1036 memset(&cmd->se_cmd, 0, sizeof(cmd->se_cmd));
1037 } else {
1038 srp_iu_put(iue);
1039 }
1040 }
1041
1042 return cmd;
1043}
1044
1045/**
1046 * ibmvscsis_adapter_idle() - Helper function to handle idle adapter
1047 * @vscsi: Pointer to our adapter structure
1048 *
1049 * This function is called when the adapter is idle when the driver
1050 * is attempting to clear an error condition.
1051 * The adapter is considered busy if any of its cmd queues
1052 * are non-empty. This function can be invoked
1053 * from the off level disconnect function.
1054 *
1055 * EXECUTION ENVIRONMENT:
1056 * Process environment called with interrupt lock held
1057 */
1058static void ibmvscsis_adapter_idle(struct scsi_info *vscsi)
1059{
1060 int free_qs = false;
1061
1062 pr_debug("adapter_idle: flags 0x%x, state 0x%hx\n", vscsi->flags,
1063 vscsi->state);
1064
1065 /* Only need to free qs if we're disconnecting from client */
1066 if (vscsi->state != WAIT_CONNECTION || vscsi->flags & TRANS_EVENT)
1067 free_qs = true;
1068
1069 switch (vscsi->state) {
1070 case ERR_DISCONNECT_RECONNECT:
1071 ibmvscsis_reset_queue(vscsi, WAIT_CONNECTION);
1072 pr_debug("adapter_idle, disc_rec: flags 0x%x\n", vscsi->flags);
1073 break;
1074
1075 case ERR_DISCONNECT:
1076 ibmvscsis_free_command_q(vscsi);
1077 vscsi->flags &= ~DISCONNECT_SCHEDULED;
1078 vscsi->flags |= RESPONSE_Q_DOWN;
1079 vscsi->state = ERR_DISCONNECTED;
1080 pr_debug("adapter_idle, disc: flags 0x%x, state 0x%hx\n",
1081 vscsi->flags, vscsi->state);
1082 break;
1083
1084 case WAIT_IDLE:
1085 vscsi->rsp_q_timer.timer_pops = 0;
1086 vscsi->debit = 0;
1087 vscsi->credit = 0;
1088 if (vscsi->flags & TRANS_EVENT) {
1089 vscsi->state = WAIT_CONNECTION;
1090 vscsi->flags &= PRESERVE_FLAG_FIELDS;
1091 } else {
1092 vscsi->state = CONNECTED;
1093 vscsi->flags &= ~DISCONNECT_SCHEDULED;
1094 }
1095
1096 pr_debug("adapter_idle, wait: flags 0x%x, state 0x%hx\n",
1097 vscsi->flags, vscsi->state);
1098 ibmvscsis_poll_cmd_q(vscsi);
1099 break;
1100
1101 case ERR_DISCONNECTED:
1102 vscsi->flags &= ~DISCONNECT_SCHEDULED;
1103 pr_debug("adapter_idle, disconnected: flags 0x%x, state 0x%hx\n",
1104 vscsi->flags, vscsi->state);
1105 break;
1106
1107 default:
1108 dev_err(&vscsi->dev, "adapter_idle: in invalid state %d\n",
1109 vscsi->state);
1110 break;
1111 }
1112
1113 if (free_qs)
1114 ibmvscsis_free_cmd_qs(vscsi);
1115
1116 /*
1117 * There is a timing window where we could lose a disconnect request.
1118 * The known path to this window occurs during the DISCONNECT_RECONNECT
1119 * case above: reset_queue calls free_command_q, which will release the
1120 * interrupt lock. During that time, a new post_disconnect call can be
1121 * made with a "more severe" state (DISCONNECT or UNCONFIGURING).
1122 * Because the DISCONNECT_SCHEDULED flag is already set, post_disconnect
1123 * will only set the new_state. Now free_command_q reacquires the intr
1124 * lock and clears the DISCONNECT_SCHEDULED flag (using PRESERVE_FLAG_
1125 * FIELDS), and the disconnect is lost. This is particularly bad when
1126 * the new disconnect was for UNCONFIGURING, since the unconfigure hangs
1127 * forever.
1128 * Fix is that free command queue sets acr state and acr flags if there
1129 * is a change under the lock
1130 * note free command queue writes to this state it clears it
1131 * before releasing the lock, different drivers call the free command
1132 * queue different times so dont initialize above
1133 */
1134 if (vscsi->phyp_acr_state != 0) {
1135 /*
1136 * set any bits in flags that may have been cleared by
1137 * a call to free command queue in switch statement
1138 * or reset queue
1139 */
1140 vscsi->flags |= vscsi->phyp_acr_flags;
1141 ibmvscsis_post_disconnect(vscsi, vscsi->phyp_acr_state, 0);
1142 vscsi->phyp_acr_state = 0;
1143 vscsi->phyp_acr_flags = 0;
1144
1145 pr_debug("adapter_idle: flags 0x%x, state 0x%hx, acr_flags 0x%x, acr_state 0x%hx\n",
1146 vscsi->flags, vscsi->state, vscsi->phyp_acr_flags,
1147 vscsi->phyp_acr_state);
1148 }
1149
1150 pr_debug("Leaving adapter_idle: flags 0x%x, state 0x%hx, new_state 0x%x\n",
1151 vscsi->flags, vscsi->state, vscsi->new_state);
1152}
1153
1154/**
1155 * ibmvscsis_copy_crq_packet() - Copy CRQ Packet
1156 * @vscsi: Pointer to our adapter structure
1157 * @cmd: Pointer to command element to use to process the request
1158 * @crq: Pointer to CRQ entry containing the request
1159 *
1160 * Copy the srp information unit from the hosted
1161 * partition using remote dma
1162 *
1163 * EXECUTION ENVIRONMENT:
1164 * Interrupt, interrupt lock held
1165 */
1166static long ibmvscsis_copy_crq_packet(struct scsi_info *vscsi,
1167 struct ibmvscsis_cmd *cmd,
1168 struct viosrp_crq *crq)
1169{
1170 struct iu_entry *iue = cmd->iue;
1171 long rc = 0;
1172 u16 len;
1173
1174 len = be16_to_cpu(crq->IU_length);
1175 if ((len > SRP_MAX_IU_LEN) || (len == 0)) {
1176 dev_err(&vscsi->dev, "copy_crq: Invalid len %d passed", len);
1177 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
1178 return SRP_VIOLATION;
1179 }
1180
1181 rc = h_copy_rdma(len, vscsi->dds.window[REMOTE].liobn,
1182 be64_to_cpu(crq->IU_data_ptr),
1183 vscsi->dds.window[LOCAL].liobn, iue->sbuf->dma);
1184
1185 switch (rc) {
1186 case H_SUCCESS:
1187 cmd->init_time = mftb();
1188 iue->remote_token = crq->IU_data_ptr;
1189 iue->iu_len = len;
1190 pr_debug("copy_crq: ioba 0x%llx, init_time 0x%llx\n",
1191 be64_to_cpu(crq->IU_data_ptr), cmd->init_time);
1192 break;
1193 case H_PERMISSION:
1194 if (connection_broken(vscsi))
1195 ibmvscsis_post_disconnect(vscsi,
1196 ERR_DISCONNECT_RECONNECT,
1197 (RESPONSE_Q_DOWN |
1198 CLIENT_FAILED));
1199 else
1200 ibmvscsis_post_disconnect(vscsi,
1201 ERR_DISCONNECT_RECONNECT, 0);
1202
1203 dev_err(&vscsi->dev, "copy_crq: h_copy_rdma failed, rc %ld\n",
1204 rc);
1205 break;
1206 case H_DEST_PARM:
1207 case H_SOURCE_PARM:
1208 default:
1209 dev_err(&vscsi->dev, "copy_crq: h_copy_rdma failed, rc %ld\n",
1210 rc);
1211 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
1212 break;
1213 }
1214
1215 return rc;
1216}
1217
1218/**
1219 * ibmvscsis_adapter_info - Service an Adapter Info MAnagement Data gram
1220 * @vscsi: Pointer to our adapter structure
1221 * @iue: Information Unit containing the Adapter Info MAD request
1222 *
1223 * EXECUTION ENVIRONMENT:
1224 * Interrupt adpater lock is held
1225 */
1226static long ibmvscsis_adapter_info(struct scsi_info *vscsi,
1227 struct iu_entry *iue)
1228{
1229 struct viosrp_adapter_info *mad = &vio_iu(iue)->mad.adapter_info;
1230 struct mad_adapter_info_data *info;
1231 uint flag_bits = 0;
1232 dma_addr_t token;
1233 long rc;
1234
1235 mad->common.status = cpu_to_be16(VIOSRP_MAD_SUCCESS);
1236
1237 if (be16_to_cpu(mad->common.length) > sizeof(*info)) {
1238 mad->common.status = cpu_to_be16(VIOSRP_MAD_FAILED);
1239 return 0;
1240 }
1241
1242 info = dma_alloc_coherent(&vscsi->dma_dev->dev, sizeof(*info), &token,
1243 GFP_KERNEL);
1244 if (!info) {
1245 dev_err(&vscsi->dev, "bad dma_alloc_coherent %p\n",
1246 iue->target);
1247 mad->common.status = cpu_to_be16(VIOSRP_MAD_FAILED);
1248 return 0;
1249 }
1250
1251 /* Get remote info */
1252 rc = h_copy_rdma(be16_to_cpu(mad->common.length),
1253 vscsi->dds.window[REMOTE].liobn,
1254 be64_to_cpu(mad->buffer),
1255 vscsi->dds.window[LOCAL].liobn, token);
1256
1257 if (rc != H_SUCCESS) {
1258 if (rc == H_PERMISSION) {
1259 if (connection_broken(vscsi))
1260 flag_bits = (RESPONSE_Q_DOWN | CLIENT_FAILED);
1261 }
1262 pr_warn("adapter_info: h_copy_rdma from client failed, rc %ld\n",
1263 rc);
1264 pr_debug("adapter_info: ioba 0x%llx, flags 0x%x, flag_bits 0x%x\n",
1265 be64_to_cpu(mad->buffer), vscsi->flags, flag_bits);
1266 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT,
1267 flag_bits);
1268 goto free_dma;
1269 }
1270
1271 /*
1272 * Copy client info, but ignore partition number, which we
1273 * already got from phyp - unless we failed to get it from
1274 * phyp (e.g. if we're running on a p5 system).
1275 */
1276 if (vscsi->client_data.partition_number == 0)
1277 vscsi->client_data.partition_number =
1278 be32_to_cpu(info->partition_number);
1279 strncpy(vscsi->client_data.srp_version, info->srp_version,
1280 sizeof(vscsi->client_data.srp_version));
1281 strncpy(vscsi->client_data.partition_name, info->partition_name,
1282 sizeof(vscsi->client_data.partition_name));
1283 vscsi->client_data.mad_version = be32_to_cpu(info->mad_version);
1284 vscsi->client_data.os_type = be32_to_cpu(info->os_type);
1285
1286 /* Copy our info */
1287 strncpy(info->srp_version, SRP_VERSION,
1288 sizeof(info->srp_version));
1289 strncpy(info->partition_name, vscsi->dds.partition_name,
1290 sizeof(info->partition_name));
1291 info->partition_number = cpu_to_be32(vscsi->dds.partition_num);
1292 info->mad_version = cpu_to_be32(MAD_VERSION_1);
1293 info->os_type = cpu_to_be32(LINUX);
1294 memset(&info->port_max_txu[0], 0, sizeof(info->port_max_txu));
1295 info->port_max_txu[0] = cpu_to_be32(128 * PAGE_SIZE);
1296
1297 dma_wmb();
1298 rc = h_copy_rdma(sizeof(*info), vscsi->dds.window[LOCAL].liobn,
1299 token, vscsi->dds.window[REMOTE].liobn,
1300 be64_to_cpu(mad->buffer));
1301 switch (rc) {
1302 case H_SUCCESS:
1303 break;
1304
1305 case H_SOURCE_PARM:
1306 case H_DEST_PARM:
1307 case H_PERMISSION:
1308 if (connection_broken(vscsi))
1309 flag_bits = (RESPONSE_Q_DOWN | CLIENT_FAILED);
1310 default:
1311 dev_err(&vscsi->dev, "adapter_info: h_copy_rdma to client failed, rc %ld\n",
1312 rc);
1313 ibmvscsis_post_disconnect(vscsi,
1314 ERR_DISCONNECT_RECONNECT,
1315 flag_bits);
1316 break;
1317 }
1318
1319free_dma:
1320 dma_free_coherent(&vscsi->dma_dev->dev, sizeof(*info), info, token);
1321 pr_debug("Leaving adapter_info, rc %ld\n", rc);
1322
1323 return rc;
1324}
1325
1326/**
1327 * ibmvscsis_cap_mad() - Service a Capabilities MAnagement Data gram
1328 * @vscsi: Pointer to our adapter structure
1329 * @iue: Information Unit containing the Capabilities MAD request
1330 *
1331 * NOTE: if you return an error from this routine you must be
1332 * disconnecting or you will cause a hang
1333 *
1334 * EXECUTION ENVIRONMENT:
1335 * Interrupt called with adapter lock held
1336 */
1337static int ibmvscsis_cap_mad(struct scsi_info *vscsi, struct iu_entry *iue)
1338{
1339 struct viosrp_capabilities *mad = &vio_iu(iue)->mad.capabilities;
1340 struct capabilities *cap;
1341 struct mad_capability_common *common;
1342 dma_addr_t token;
1343 u16 olen, len, status, min_len, cap_len;
1344 u32 flag;
1345 uint flag_bits = 0;
1346 long rc = 0;
1347
1348 olen = be16_to_cpu(mad->common.length);
1349 /*
1350 * struct capabilities hardcodes a couple capabilities after the
1351 * header, but the capabilities can actually be in any order.
1352 */
1353 min_len = offsetof(struct capabilities, migration);
1354 if ((olen < min_len) || (olen > PAGE_SIZE)) {
1355 pr_warn("cap_mad: invalid len %d\n", olen);
1356 mad->common.status = cpu_to_be16(VIOSRP_MAD_FAILED);
1357 return 0;
1358 }
1359
1360 cap = dma_alloc_coherent(&vscsi->dma_dev->dev, olen, &token,
1361 GFP_KERNEL);
1362 if (!cap) {
1363 dev_err(&vscsi->dev, "bad dma_alloc_coherent %p\n",
1364 iue->target);
1365 mad->common.status = cpu_to_be16(VIOSRP_MAD_FAILED);
1366 return 0;
1367 }
1368 rc = h_copy_rdma(olen, vscsi->dds.window[REMOTE].liobn,
1369 be64_to_cpu(mad->buffer),
1370 vscsi->dds.window[LOCAL].liobn, token);
1371 if (rc == H_SUCCESS) {
1372 strncpy(cap->name, dev_name(&vscsi->dma_dev->dev),
1373 SRP_MAX_LOC_LEN);
1374
1375 len = olen - min_len;
1376 status = VIOSRP_MAD_SUCCESS;
1377 common = (struct mad_capability_common *)&cap->migration;
1378
1379 while ((len > 0) && (status == VIOSRP_MAD_SUCCESS) && !rc) {
1380 pr_debug("cap_mad: len left %hd, cap type %d, cap len %hd\n",
1381 len, be32_to_cpu(common->cap_type),
1382 be16_to_cpu(common->length));
1383
1384 cap_len = be16_to_cpu(common->length);
1385 if (cap_len > len) {
1386 dev_err(&vscsi->dev, "cap_mad: cap len mismatch with total len\n");
1387 status = VIOSRP_MAD_FAILED;
1388 break;
1389 }
1390
1391 if (cap_len == 0) {
1392 dev_err(&vscsi->dev, "cap_mad: cap len is 0\n");
1393 status = VIOSRP_MAD_FAILED;
1394 break;
1395 }
1396
1397 switch (common->cap_type) {
1398 default:
1399 pr_debug("cap_mad: unsupported capability\n");
1400 common->server_support = 0;
1401 flag = cpu_to_be32((u32)CAP_LIST_SUPPORTED);
1402 cap->flags &= ~flag;
1403 break;
1404 }
1405
1406 len = len - cap_len;
1407 common = (struct mad_capability_common *)
1408 ((char *)common + cap_len);
1409 }
1410
1411 mad->common.status = cpu_to_be16(status);
1412
1413 dma_wmb();
1414 rc = h_copy_rdma(olen, vscsi->dds.window[LOCAL].liobn, token,
1415 vscsi->dds.window[REMOTE].liobn,
1416 be64_to_cpu(mad->buffer));
1417
1418 if (rc != H_SUCCESS) {
1419 pr_debug("cap_mad: failed to copy to client, rc %ld\n",
1420 rc);
1421
1422 if (rc == H_PERMISSION) {
1423 if (connection_broken(vscsi))
1424 flag_bits = (RESPONSE_Q_DOWN |
1425 CLIENT_FAILED);
1426 }
1427
1428 pr_warn("cap_mad: error copying data to client, rc %ld\n",
1429 rc);
1430 ibmvscsis_post_disconnect(vscsi,
1431 ERR_DISCONNECT_RECONNECT,
1432 flag_bits);
1433 }
1434 }
1435
1436 dma_free_coherent(&vscsi->dma_dev->dev, olen, cap, token);
1437
1438 pr_debug("Leaving cap_mad, rc %ld, client_cap 0x%x\n",
1439 rc, vscsi->client_cap);
1440
1441 return rc;
1442}
1443
1444/**
1445 * ibmvscsis_process_mad() - Service a MAnagement Data gram
1446 * @vscsi: Pointer to our adapter structure
1447 * @iue: Information Unit containing the MAD request
1448 *
1449 * Must be called with interrupt lock held.
1450 */
1451static long ibmvscsis_process_mad(struct scsi_info *vscsi, struct iu_entry *iue)
1452{
1453 struct mad_common *mad = (struct mad_common *)&vio_iu(iue)->mad;
1454 struct viosrp_empty_iu *empty;
1455 long rc = ADAPT_SUCCESS;
1456
1457 switch (be32_to_cpu(mad->type)) {
1458 case VIOSRP_EMPTY_IU_TYPE:
1459 empty = &vio_iu(iue)->mad.empty_iu;
1460 vscsi->empty_iu_id = be64_to_cpu(empty->buffer);
1461 vscsi->empty_iu_tag = be64_to_cpu(empty->common.tag);
1462 mad->status = cpu_to_be16(VIOSRP_MAD_SUCCESS);
1463 break;
1464 case VIOSRP_ADAPTER_INFO_TYPE:
1465 rc = ibmvscsis_adapter_info(vscsi, iue);
1466 break;
1467 case VIOSRP_CAPABILITIES_TYPE:
1468 rc = ibmvscsis_cap_mad(vscsi, iue);
1469 break;
1470 case VIOSRP_ENABLE_FAST_FAIL:
1471 if (vscsi->state == CONNECTED) {
1472 vscsi->fast_fail = true;
1473 mad->status = cpu_to_be16(VIOSRP_MAD_SUCCESS);
1474 } else {
1475 pr_warn("fast fail mad sent after login\n");
1476 mad->status = cpu_to_be16(VIOSRP_MAD_FAILED);
1477 }
1478 break;
1479 default:
1480 mad->status = cpu_to_be16(VIOSRP_MAD_NOT_SUPPORTED);
1481 break;
1482 }
1483
1484 return rc;
1485}
1486
1487/**
1488 * srp_snd_msg_failed() - Handle an error when sending a response
1489 * @vscsi: Pointer to our adapter structure
1490 * @rc: The return code from the h_send_crq command
1491 *
1492 * Must be called with interrupt lock held.
1493 */
1494static void srp_snd_msg_failed(struct scsi_info *vscsi, long rc)
1495{
1496 ktime_t kt;
1497
1498 if (rc != H_DROPPED) {
1499 ibmvscsis_free_cmd_qs(vscsi);
1500
1501 if (rc == H_CLOSED)
1502 vscsi->flags |= CLIENT_FAILED;
1503
1504 /* don't flag the same problem multiple times */
1505 if (!(vscsi->flags & RESPONSE_Q_DOWN)) {
1506 vscsi->flags |= RESPONSE_Q_DOWN;
1507 if (!(vscsi->state & (ERR_DISCONNECT |
1508 ERR_DISCONNECT_RECONNECT |
1509 ERR_DISCONNECTED | UNDEFINED))) {
1510 dev_err(&vscsi->dev, "snd_msg_failed: setting RESPONSE_Q_DOWN, state 0x%hx, flags 0x%x, rc %ld\n",
1511 vscsi->state, vscsi->flags, rc);
1512 }
1513 ibmvscsis_post_disconnect(vscsi,
1514 ERR_DISCONNECT_RECONNECT, 0);
1515 }
1516 return;
1517 }
1518
1519 /*
1520 * The response queue is full.
1521 * If the server is processing SRP requests, i.e.
1522 * the client has successfully done an
1523 * SRP_LOGIN, then it will wait forever for room in
1524 * the queue. However if the system admin
1525 * is attempting to unconfigure the server then one
1526 * or more children will be in a state where
1527 * they are being removed. So if there is even one
1528 * child being removed then the driver assumes
1529 * the system admin is attempting to break the
1530 * connection with the client and MAX_TIMER_POPS
1531 * is honored.
1532 */
1533 if ((vscsi->rsp_q_timer.timer_pops < MAX_TIMER_POPS) ||
1534 (vscsi->state == SRP_PROCESSING)) {
1535 pr_debug("snd_msg_failed: response queue full, flags 0x%x, timer started %d, pops %d\n",
1536 vscsi->flags, (int)vscsi->rsp_q_timer.started,
1537 vscsi->rsp_q_timer.timer_pops);
1538
1539 /*
1540 * Check if the timer is running; if it
1541 * is not then start it up.
1542 */
1543 if (!vscsi->rsp_q_timer.started) {
1544 if (vscsi->rsp_q_timer.timer_pops <
1545 MAX_TIMER_POPS) {
1546 kt = ktime_set(0, WAIT_NANO_SECONDS);
1547 } else {
1548 /*
1549 * slide the timeslice if the maximum
1550 * timer pops have already happened
1551 */
1552 kt = ktime_set(WAIT_SECONDS, 0);
1553 }
1554
1555 vscsi->rsp_q_timer.started = true;
1556 hrtimer_start(&vscsi->rsp_q_timer.timer, kt,
1557 HRTIMER_MODE_REL);
1558 }
1559 } else {
1560 /*
1561 * TBD: Do we need to worry about this? Need to get
1562 * remove working.
1563 */
1564 /*
1565 * waited a long time and it appears the system admin
1566 * is bring this driver down
1567 */
1568 vscsi->flags |= RESPONSE_Q_DOWN;
1569 ibmvscsis_free_cmd_qs(vscsi);
1570 /*
1571 * if the driver is already attempting to disconnect
1572 * from the client and has already logged an error
1573 * trace this event but don't put it in the error log
1574 */
1575 if (!(vscsi->state & (ERR_DISCONNECT |
1576 ERR_DISCONNECT_RECONNECT |
1577 ERR_DISCONNECTED | UNDEFINED))) {
1578 dev_err(&vscsi->dev, "client crq full too long\n");
1579 ibmvscsis_post_disconnect(vscsi,
1580 ERR_DISCONNECT_RECONNECT,
1581 0);
1582 }
1583 }
1584}
1585
1586/**
1587 * ibmvscsis_send_messages() - Send a Response
1588 * @vscsi: Pointer to our adapter structure
1589 *
1590 * Send a response, first checking the waiting queue. Responses are
1591 * sent in order they are received. If the response cannot be sent,
1592 * because the client queue is full, it stays on the waiting queue.
1593 *
1594 * PRECONDITION:
1595 * Called with interrupt lock held
1596 */
1597static void ibmvscsis_send_messages(struct scsi_info *vscsi)
1598{
1599 u64 msg_hi = 0;
1600 /* note do not attmempt to access the IU_data_ptr with this pointer
1601 * it is not valid
1602 */
1603 struct viosrp_crq *crq = (struct viosrp_crq *)&msg_hi;
1604 struct ibmvscsis_cmd *cmd, *nxt;
1605 struct iu_entry *iue;
1606 long rc = ADAPT_SUCCESS;
1607
1608 if (!(vscsi->flags & RESPONSE_Q_DOWN)) {
1609 list_for_each_entry_safe(cmd, nxt, &vscsi->waiting_rsp, list) {
Bryant G. Ly88a678b2016-06-28 17:05:35 -05001610 iue = cmd->iue;
1611
1612 crq->valid = VALID_CMD_RESP_EL;
1613 crq->format = cmd->rsp.format;
1614
1615 if (cmd->flags & CMD_FAST_FAIL)
1616 crq->status = VIOSRP_ADAPTER_FAIL;
1617
1618 crq->IU_length = cpu_to_be16(cmd->rsp.len);
1619
1620 rc = h_send_crq(vscsi->dma_dev->unit_address,
1621 be64_to_cpu(msg_hi),
1622 be64_to_cpu(cmd->rsp.tag));
1623
1624 pr_debug("send_messages: tag 0x%llx, rc %ld\n",
1625 be64_to_cpu(cmd->rsp.tag), rc);
1626
1627 /* if all ok free up the command element resources */
1628 if (rc == H_SUCCESS) {
1629 /* some movement has occurred */
1630 vscsi->rsp_q_timer.timer_pops = 0;
1631 list_del(&cmd->list);
1632
1633 ibmvscsis_free_cmd_resources(vscsi, cmd);
1634 } else {
1635 srp_snd_msg_failed(vscsi, rc);
1636 break;
1637 }
1638 }
1639
1640 if (!rc) {
1641 /*
1642 * The timer could pop with the queue empty. If
1643 * this happens, rc will always indicate a
1644 * success; clear the pop count.
1645 */
1646 vscsi->rsp_q_timer.timer_pops = 0;
1647 }
1648 } else {
1649 ibmvscsis_free_cmd_qs(vscsi);
1650 }
1651}
1652
1653/* Called with intr lock held */
1654static void ibmvscsis_send_mad_resp(struct scsi_info *vscsi,
1655 struct ibmvscsis_cmd *cmd,
1656 struct viosrp_crq *crq)
1657{
1658 struct iu_entry *iue = cmd->iue;
1659 struct mad_common *mad = (struct mad_common *)&vio_iu(iue)->mad;
1660 uint flag_bits = 0;
1661 long rc;
1662
1663 dma_wmb();
1664 rc = h_copy_rdma(sizeof(struct mad_common),
1665 vscsi->dds.window[LOCAL].liobn, iue->sbuf->dma,
1666 vscsi->dds.window[REMOTE].liobn,
1667 be64_to_cpu(crq->IU_data_ptr));
1668 if (!rc) {
1669 cmd->rsp.format = VIOSRP_MAD_FORMAT;
1670 cmd->rsp.len = sizeof(struct mad_common);
1671 cmd->rsp.tag = mad->tag;
1672 list_add_tail(&cmd->list, &vscsi->waiting_rsp);
1673 ibmvscsis_send_messages(vscsi);
1674 } else {
1675 pr_debug("Error sending mad response, rc %ld\n", rc);
1676 if (rc == H_PERMISSION) {
1677 if (connection_broken(vscsi))
1678 flag_bits = (RESPONSE_Q_DOWN | CLIENT_FAILED);
1679 }
1680 dev_err(&vscsi->dev, "mad: failed to copy to client, rc %ld\n",
1681 rc);
1682
1683 ibmvscsis_free_cmd_resources(vscsi, cmd);
1684 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT,
1685 flag_bits);
1686 }
1687}
1688
1689/**
1690 * ibmvscsis_mad() - Service a MAnagement Data gram.
1691 * @vscsi: Pointer to our adapter structure
1692 * @crq: Pointer to the CRQ entry containing the MAD request
1693 *
1694 * EXECUTION ENVIRONMENT:
1695 * Interrupt called with adapter lock held
1696 */
1697static long ibmvscsis_mad(struct scsi_info *vscsi, struct viosrp_crq *crq)
1698{
1699 struct iu_entry *iue;
1700 struct ibmvscsis_cmd *cmd;
1701 struct mad_common *mad;
1702 long rc = ADAPT_SUCCESS;
1703
1704 switch (vscsi->state) {
1705 /*
1706 * We have not exchanged Init Msgs yet, so this MAD was sent
1707 * before the last Transport Event; client will not be
1708 * expecting a response.
1709 */
1710 case WAIT_CONNECTION:
1711 pr_debug("mad: in Wait Connection state, ignoring MAD, flags %d\n",
1712 vscsi->flags);
1713 return ADAPT_SUCCESS;
1714
1715 case SRP_PROCESSING:
1716 case CONNECTED:
1717 break;
1718
1719 /*
1720 * We should never get here while we're in these states.
1721 * Just log an error and get out.
1722 */
1723 case UNCONFIGURING:
1724 case WAIT_IDLE:
1725 case ERR_DISCONNECT:
1726 case ERR_DISCONNECT_RECONNECT:
1727 default:
1728 dev_err(&vscsi->dev, "mad: invalid adapter state %d for mad\n",
1729 vscsi->state);
1730 return ADAPT_SUCCESS;
1731 }
1732
1733 cmd = ibmvscsis_get_free_cmd(vscsi);
1734 if (!cmd) {
1735 dev_err(&vscsi->dev, "mad: failed to get cmd, debit %d\n",
1736 vscsi->debit);
1737 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
1738 return ERROR;
1739 }
1740 iue = cmd->iue;
1741 cmd->type = ADAPTER_MAD;
1742
1743 rc = ibmvscsis_copy_crq_packet(vscsi, cmd, crq);
1744 if (!rc) {
1745 mad = (struct mad_common *)&vio_iu(iue)->mad;
1746
1747 pr_debug("mad: type %d\n", be32_to_cpu(mad->type));
1748
1749 if (be16_to_cpu(mad->length) < 0) {
1750 dev_err(&vscsi->dev, "mad: length is < 0\n");
1751 ibmvscsis_post_disconnect(vscsi,
1752 ERR_DISCONNECT_RECONNECT, 0);
1753 rc = SRP_VIOLATION;
1754 } else {
1755 rc = ibmvscsis_process_mad(vscsi, iue);
1756 }
1757
1758 pr_debug("mad: status %hd, rc %ld\n", be16_to_cpu(mad->status),
1759 rc);
1760
1761 if (!rc)
1762 ibmvscsis_send_mad_resp(vscsi, cmd, crq);
1763 } else {
1764 ibmvscsis_free_cmd_resources(vscsi, cmd);
1765 }
1766
1767 pr_debug("Leaving mad, rc %ld\n", rc);
1768 return rc;
1769}
1770
1771/**
1772 * ibmvscsis_login_rsp() - Create/copy a login response notice to the client
1773 * @vscsi: Pointer to our adapter structure
1774 * @cmd: Pointer to the command for the SRP Login request
1775 *
1776 * EXECUTION ENVIRONMENT:
1777 * Interrupt, interrupt lock held
1778 */
1779static long ibmvscsis_login_rsp(struct scsi_info *vscsi,
1780 struct ibmvscsis_cmd *cmd)
1781{
1782 struct iu_entry *iue = cmd->iue;
1783 struct srp_login_rsp *rsp = &vio_iu(iue)->srp.login_rsp;
1784 struct format_code *fmt;
1785 uint flag_bits = 0;
1786 long rc = ADAPT_SUCCESS;
1787
1788 memset(rsp, 0, sizeof(struct srp_login_rsp));
1789
1790 rsp->opcode = SRP_LOGIN_RSP;
1791 rsp->req_lim_delta = cpu_to_be32(vscsi->request_limit);
1792 rsp->tag = cmd->rsp.tag;
1793 rsp->max_it_iu_len = cpu_to_be32(SRP_MAX_IU_LEN);
1794 rsp->max_ti_iu_len = cpu_to_be32(SRP_MAX_IU_LEN);
1795 fmt = (struct format_code *)&rsp->buf_fmt;
1796 fmt->buffers = SUPPORTED_FORMATS;
1797 vscsi->credit = 0;
1798
1799 cmd->rsp.len = sizeof(struct srp_login_rsp);
1800
1801 dma_wmb();
1802 rc = h_copy_rdma(cmd->rsp.len, vscsi->dds.window[LOCAL].liobn,
1803 iue->sbuf->dma, vscsi->dds.window[REMOTE].liobn,
1804 be64_to_cpu(iue->remote_token));
1805
1806 switch (rc) {
1807 case H_SUCCESS:
1808 break;
1809
1810 case H_PERMISSION:
1811 if (connection_broken(vscsi))
1812 flag_bits = RESPONSE_Q_DOWN | CLIENT_FAILED;
1813 dev_err(&vscsi->dev, "login_rsp: error copying to client, rc %ld\n",
1814 rc);
1815 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT,
1816 flag_bits);
1817 break;
1818 case H_SOURCE_PARM:
1819 case H_DEST_PARM:
1820 default:
1821 dev_err(&vscsi->dev, "login_rsp: error copying to client, rc %ld\n",
1822 rc);
1823 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
1824 break;
1825 }
1826
1827 return rc;
1828}
1829
1830/**
1831 * ibmvscsis_srp_login_rej() - Create/copy a login rejection notice to client
1832 * @vscsi: Pointer to our adapter structure
1833 * @cmd: Pointer to the command for the SRP Login request
1834 * @reason: The reason the SRP Login is being rejected, per SRP protocol
1835 *
1836 * EXECUTION ENVIRONMENT:
1837 * Interrupt, interrupt lock held
1838 */
1839static long ibmvscsis_srp_login_rej(struct scsi_info *vscsi,
1840 struct ibmvscsis_cmd *cmd, u32 reason)
1841{
1842 struct iu_entry *iue = cmd->iue;
1843 struct srp_login_rej *rej = &vio_iu(iue)->srp.login_rej;
1844 struct format_code *fmt;
1845 uint flag_bits = 0;
1846 long rc = ADAPT_SUCCESS;
1847
1848 memset(rej, 0, sizeof(*rej));
1849
1850 rej->opcode = SRP_LOGIN_REJ;
1851 rej->reason = cpu_to_be32(reason);
1852 rej->tag = cmd->rsp.tag;
1853 fmt = (struct format_code *)&rej->buf_fmt;
1854 fmt->buffers = SUPPORTED_FORMATS;
1855
1856 cmd->rsp.len = sizeof(*rej);
1857
1858 dma_wmb();
1859 rc = h_copy_rdma(cmd->rsp.len, vscsi->dds.window[LOCAL].liobn,
1860 iue->sbuf->dma, vscsi->dds.window[REMOTE].liobn,
1861 be64_to_cpu(iue->remote_token));
1862
1863 switch (rc) {
1864 case H_SUCCESS:
1865 break;
1866 case H_PERMISSION:
1867 if (connection_broken(vscsi))
1868 flag_bits = RESPONSE_Q_DOWN | CLIENT_FAILED;
1869 dev_err(&vscsi->dev, "login_rej: error copying to client, rc %ld\n",
1870 rc);
1871 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT,
1872 flag_bits);
1873 break;
1874 case H_SOURCE_PARM:
1875 case H_DEST_PARM:
1876 default:
1877 dev_err(&vscsi->dev, "login_rej: error copying to client, rc %ld\n",
1878 rc);
1879 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
1880 break;
1881 }
1882
1883 return rc;
1884}
1885
1886static int ibmvscsis_make_nexus(struct ibmvscsis_tport *tport)
1887{
1888 char *name = tport->tport_name;
1889 struct ibmvscsis_nexus *nexus;
1890 int rc;
1891
1892 if (tport->ibmv_nexus) {
1893 pr_debug("tport->ibmv_nexus already exists\n");
1894 return 0;
1895 }
1896
1897 nexus = kzalloc(sizeof(*nexus), GFP_KERNEL);
1898 if (!nexus) {
1899 pr_err("Unable to allocate struct ibmvscsis_nexus\n");
1900 return -ENOMEM;
1901 }
1902
1903 nexus->se_sess = target_alloc_session(&tport->se_tpg, 0, 0,
1904 TARGET_PROT_NORMAL, name, nexus,
1905 NULL);
1906 if (IS_ERR(nexus->se_sess)) {
1907 rc = PTR_ERR(nexus->se_sess);
1908 goto transport_init_fail;
1909 }
1910
1911 tport->ibmv_nexus = nexus;
1912
1913 return 0;
1914
1915transport_init_fail:
1916 kfree(nexus);
1917 return rc;
1918}
1919
1920static int ibmvscsis_drop_nexus(struct ibmvscsis_tport *tport)
1921{
1922 struct se_session *se_sess;
1923 struct ibmvscsis_nexus *nexus;
1924
1925 nexus = tport->ibmv_nexus;
1926 if (!nexus)
1927 return -ENODEV;
1928
1929 se_sess = nexus->se_sess;
1930 if (!se_sess)
1931 return -ENODEV;
1932
1933 /*
1934 * Release the SCSI I_T Nexus to the emulated ibmvscsis Target Port
1935 */
Bryant G. Ly712db3e2016-08-31 11:28:59 -05001936 target_wait_for_sess_cmds(se_sess);
1937 transport_deregister_session_configfs(se_sess);
Bryant G. Ly88a678b2016-06-28 17:05:35 -05001938 transport_deregister_session(se_sess);
1939 tport->ibmv_nexus = NULL;
1940 kfree(nexus);
1941
1942 return 0;
1943}
1944
1945/**
1946 * ibmvscsis_srp_login() - Process an SRP Login Request
1947 * @vscsi: Pointer to our adapter structure
1948 * @cmd: Command element to use to process the SRP Login request
1949 * @crq: Pointer to CRQ entry containing the SRP Login request
1950 *
1951 * EXECUTION ENVIRONMENT:
1952 * Interrupt, called with interrupt lock held
1953 */
1954static long ibmvscsis_srp_login(struct scsi_info *vscsi,
1955 struct ibmvscsis_cmd *cmd,
1956 struct viosrp_crq *crq)
1957{
1958 struct iu_entry *iue = cmd->iue;
1959 struct srp_login_req *req = &vio_iu(iue)->srp.login_req;
1960 struct port_id {
1961 __be64 id_extension;
1962 __be64 io_guid;
1963 } *iport, *tport;
1964 struct format_code *fmt;
1965 u32 reason = 0x0;
1966 long rc = ADAPT_SUCCESS;
1967
1968 iport = (struct port_id *)req->initiator_port_id;
1969 tport = (struct port_id *)req->target_port_id;
1970 fmt = (struct format_code *)&req->req_buf_fmt;
1971 if (be32_to_cpu(req->req_it_iu_len) > SRP_MAX_IU_LEN)
1972 reason = SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE;
1973 else if (be32_to_cpu(req->req_it_iu_len) < 64)
1974 reason = SRP_LOGIN_REJ_UNABLE_ESTABLISH_CHANNEL;
1975 else if ((be64_to_cpu(iport->id_extension) > (MAX_NUM_PORTS - 1)) ||
1976 (be64_to_cpu(tport->id_extension) > (MAX_NUM_PORTS - 1)))
1977 reason = SRP_LOGIN_REJ_UNABLE_ASSOCIATE_CHANNEL;
1978 else if (req->req_flags & SRP_MULTICHAN_MULTI)
1979 reason = SRP_LOGIN_REJ_MULTI_CHANNEL_UNSUPPORTED;
1980 else if (fmt->buffers & (~SUPPORTED_FORMATS))
1981 reason = SRP_LOGIN_REJ_UNSUPPORTED_DESCRIPTOR_FMT;
Bryant G. Lyf6dbe382016-08-31 11:29:01 -05001982 else if ((fmt->buffers & SUPPORTED_FORMATS) == 0)
Bryant G. Ly88a678b2016-06-28 17:05:35 -05001983 reason = SRP_LOGIN_REJ_UNSUPPORTED_DESCRIPTOR_FMT;
1984
1985 if (vscsi->state == SRP_PROCESSING)
1986 reason = SRP_LOGIN_REJ_CHANNEL_LIMIT_REACHED;
1987
1988 rc = ibmvscsis_make_nexus(&vscsi->tport);
1989 if (rc)
1990 reason = SRP_LOGIN_REJ_UNABLE_ESTABLISH_CHANNEL;
1991
1992 cmd->rsp.format = VIOSRP_SRP_FORMAT;
1993 cmd->rsp.tag = req->tag;
1994
1995 pr_debug("srp_login: reason 0x%x\n", reason);
1996
1997 if (reason)
1998 rc = ibmvscsis_srp_login_rej(vscsi, cmd, reason);
1999 else
2000 rc = ibmvscsis_login_rsp(vscsi, cmd);
2001
2002 if (!rc) {
2003 if (!reason)
2004 vscsi->state = SRP_PROCESSING;
2005
2006 list_add_tail(&cmd->list, &vscsi->waiting_rsp);
2007 ibmvscsis_send_messages(vscsi);
2008 } else {
2009 ibmvscsis_free_cmd_resources(vscsi, cmd);
2010 }
2011
2012 pr_debug("Leaving srp_login, rc %ld\n", rc);
2013 return rc;
2014}
2015
2016/**
2017 * ibmvscsis_srp_i_logout() - Helper Function to close I_T Nexus
2018 * @vscsi: Pointer to our adapter structure
2019 * @cmd: Command element to use to process the Implicit Logout request
2020 * @crq: Pointer to CRQ entry containing the Implicit Logout request
2021 *
2022 * Do the logic to close the I_T nexus. This function may not
2023 * behave to specification.
2024 *
2025 * EXECUTION ENVIRONMENT:
2026 * Interrupt, interrupt lock held
2027 */
2028static long ibmvscsis_srp_i_logout(struct scsi_info *vscsi,
2029 struct ibmvscsis_cmd *cmd,
2030 struct viosrp_crq *crq)
2031{
2032 struct iu_entry *iue = cmd->iue;
2033 struct srp_i_logout *log_out = &vio_iu(iue)->srp.i_logout;
2034 long rc = ADAPT_SUCCESS;
2035
2036 if ((vscsi->debit > 0) || !list_empty(&vscsi->schedule_q) ||
2037 !list_empty(&vscsi->waiting_rsp)) {
2038 dev_err(&vscsi->dev, "i_logout: outstanding work\n");
2039 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT, 0);
2040 } else {
2041 cmd->rsp.format = SRP_FORMAT;
2042 cmd->rsp.tag = log_out->tag;
2043 cmd->rsp.len = sizeof(struct mad_common);
2044 list_add_tail(&cmd->list, &vscsi->waiting_rsp);
2045 ibmvscsis_send_messages(vscsi);
2046
2047 ibmvscsis_post_disconnect(vscsi, WAIT_IDLE, 0);
2048 }
2049
2050 return rc;
2051}
2052
2053/* Called with intr lock held */
2054static void ibmvscsis_srp_cmd(struct scsi_info *vscsi, struct viosrp_crq *crq)
2055{
2056 struct ibmvscsis_cmd *cmd;
2057 struct iu_entry *iue;
2058 struct srp_cmd *srp;
2059 struct srp_tsk_mgmt *tsk;
2060 long rc;
2061
2062 if (vscsi->request_limit - vscsi->debit <= 0) {
2063 /* Client has exceeded request limit */
2064 dev_err(&vscsi->dev, "Client exceeded the request limit (%d), debit %d\n",
2065 vscsi->request_limit, vscsi->debit);
2066 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
2067 return;
2068 }
2069
2070 cmd = ibmvscsis_get_free_cmd(vscsi);
2071 if (!cmd) {
2072 dev_err(&vscsi->dev, "srp_cmd failed to get cmd, debit %d\n",
2073 vscsi->debit);
2074 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
2075 return;
2076 }
2077 iue = cmd->iue;
2078 srp = &vio_iu(iue)->srp.cmd;
2079
2080 rc = ibmvscsis_copy_crq_packet(vscsi, cmd, crq);
2081 if (rc) {
2082 ibmvscsis_free_cmd_resources(vscsi, cmd);
2083 return;
2084 }
2085
2086 if (vscsi->state == SRP_PROCESSING) {
2087 switch (srp->opcode) {
2088 case SRP_LOGIN_REQ:
2089 rc = ibmvscsis_srp_login(vscsi, cmd, crq);
2090 break;
2091
2092 case SRP_TSK_MGMT:
2093 tsk = &vio_iu(iue)->srp.tsk_mgmt;
2094 pr_debug("tsk_mgmt tag: %llu (0x%llx)\n", tsk->tag,
2095 tsk->tag);
2096 cmd->rsp.tag = tsk->tag;
2097 vscsi->debit += 1;
2098 cmd->type = TASK_MANAGEMENT;
2099 list_add_tail(&cmd->list, &vscsi->schedule_q);
2100 queue_work(vscsi->work_q, &cmd->work);
2101 break;
2102
2103 case SRP_CMD:
2104 pr_debug("srp_cmd tag: %llu (0x%llx)\n", srp->tag,
2105 srp->tag);
2106 cmd->rsp.tag = srp->tag;
2107 vscsi->debit += 1;
2108 cmd->type = SCSI_CDB;
2109 /*
2110 * We want to keep track of work waiting for
2111 * the workqueue.
2112 */
2113 list_add_tail(&cmd->list, &vscsi->schedule_q);
2114 queue_work(vscsi->work_q, &cmd->work);
2115 break;
2116
2117 case SRP_I_LOGOUT:
2118 rc = ibmvscsis_srp_i_logout(vscsi, cmd, crq);
2119 break;
2120
2121 case SRP_CRED_RSP:
2122 case SRP_AER_RSP:
2123 default:
2124 ibmvscsis_free_cmd_resources(vscsi, cmd);
2125 dev_err(&vscsi->dev, "invalid srp cmd, opcode %d\n",
2126 (uint)srp->opcode);
2127 ibmvscsis_post_disconnect(vscsi,
2128 ERR_DISCONNECT_RECONNECT, 0);
2129 break;
2130 }
2131 } else if (srp->opcode == SRP_LOGIN_REQ && vscsi->state == CONNECTED) {
2132 rc = ibmvscsis_srp_login(vscsi, cmd, crq);
2133 } else {
2134 ibmvscsis_free_cmd_resources(vscsi, cmd);
2135 dev_err(&vscsi->dev, "Invalid state %d to handle srp cmd\n",
2136 vscsi->state);
2137 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
2138 }
2139}
2140
2141/**
2142 * ibmvscsis_ping_response() - Respond to a ping request
2143 * @vscsi: Pointer to our adapter structure
2144 *
2145 * Let the client know that the server is alive and waiting on
2146 * its native I/O stack.
2147 * If any type of error occurs from the call to queue a ping
2148 * response then the client is either not accepting or receiving
2149 * interrupts. Disconnect with an error.
2150 *
2151 * EXECUTION ENVIRONMENT:
2152 * Interrupt, interrupt lock held
2153 */
2154static long ibmvscsis_ping_response(struct scsi_info *vscsi)
2155{
2156 struct viosrp_crq *crq;
2157 u64 buffer[2] = { 0, 0 };
2158 long rc;
2159
2160 crq = (struct viosrp_crq *)&buffer;
2161 crq->valid = VALID_CMD_RESP_EL;
2162 crq->format = (u8)MESSAGE_IN_CRQ;
2163 crq->status = PING_RESPONSE;
2164
2165 rc = h_send_crq(vscsi->dds.unit_id, cpu_to_be64(buffer[MSG_HI]),
2166 cpu_to_be64(buffer[MSG_LOW]));
2167
2168 switch (rc) {
2169 case H_SUCCESS:
2170 break;
2171 case H_CLOSED:
2172 vscsi->flags |= CLIENT_FAILED;
2173 case H_DROPPED:
2174 vscsi->flags |= RESPONSE_Q_DOWN;
2175 case H_REMOTE_PARM:
2176 dev_err(&vscsi->dev, "ping_response: h_send_crq failed, rc %ld\n",
2177 rc);
2178 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
2179 break;
2180 default:
2181 dev_err(&vscsi->dev, "ping_response: h_send_crq returned unknown rc %ld\n",
2182 rc);
2183 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT, 0);
2184 break;
2185 }
2186
2187 return rc;
2188}
2189
2190/**
2191 * ibmvscsis_handle_init_compl_msg() - Respond to an Init Complete Message
2192 * @vscsi: Pointer to our adapter structure
2193 *
2194 * Must be called with interrupt lock held.
2195 */
2196static long ibmvscsis_handle_init_compl_msg(struct scsi_info *vscsi)
2197{
2198 long rc = ADAPT_SUCCESS;
2199
2200 switch (vscsi->state) {
2201 case NO_QUEUE:
2202 case ERR_DISCONNECT:
2203 case ERR_DISCONNECT_RECONNECT:
2204 case ERR_DISCONNECTED:
2205 case UNCONFIGURING:
2206 case UNDEFINED:
2207 rc = ERROR;
2208 break;
2209
2210 case WAIT_CONNECTION:
2211 vscsi->state = CONNECTED;
2212 break;
2213
2214 case WAIT_IDLE:
2215 case SRP_PROCESSING:
2216 case CONNECTED:
2217 case WAIT_ENABLED:
2218 case PART_UP_WAIT_ENAB:
2219 default:
2220 rc = ERROR;
2221 dev_err(&vscsi->dev, "init_msg: invalid state %d to get init compl msg\n",
2222 vscsi->state);
2223 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
2224 break;
2225 }
2226
2227 return rc;
2228}
2229
2230/**
2231 * ibmvscsis_handle_init_msg() - Respond to an Init Message
2232 * @vscsi: Pointer to our adapter structure
2233 *
2234 * Must be called with interrupt lock held.
2235 */
2236static long ibmvscsis_handle_init_msg(struct scsi_info *vscsi)
2237{
2238 long rc = ADAPT_SUCCESS;
2239
2240 switch (vscsi->state) {
2241 case WAIT_ENABLED:
2242 vscsi->state = PART_UP_WAIT_ENAB;
2243 break;
2244
2245 case WAIT_CONNECTION:
2246 rc = ibmvscsis_send_init_message(vscsi, INIT_COMPLETE_MSG);
2247 switch (rc) {
2248 case H_SUCCESS:
2249 vscsi->state = CONNECTED;
2250 break;
2251
2252 case H_PARAMETER:
2253 dev_err(&vscsi->dev, "init_msg: failed to send, rc %ld\n",
2254 rc);
2255 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT, 0);
2256 break;
2257
2258 case H_DROPPED:
2259 dev_err(&vscsi->dev, "init_msg: failed to send, rc %ld\n",
2260 rc);
2261 rc = ERROR;
2262 ibmvscsis_post_disconnect(vscsi,
2263 ERR_DISCONNECT_RECONNECT, 0);
2264 break;
2265
2266 case H_CLOSED:
2267 pr_warn("init_msg: failed to send, rc %ld\n", rc);
2268 rc = 0;
2269 break;
2270 }
2271 break;
2272
2273 case UNDEFINED:
2274 rc = ERROR;
2275 break;
2276
2277 case UNCONFIGURING:
2278 break;
2279
2280 case PART_UP_WAIT_ENAB:
2281 case CONNECTED:
2282 case SRP_PROCESSING:
2283 case WAIT_IDLE:
2284 case NO_QUEUE:
2285 case ERR_DISCONNECT:
2286 case ERR_DISCONNECT_RECONNECT:
2287 case ERR_DISCONNECTED:
2288 default:
2289 rc = ERROR;
2290 dev_err(&vscsi->dev, "init_msg: invalid state %d to get init msg\n",
2291 vscsi->state);
2292 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
2293 break;
2294 }
2295
2296 return rc;
2297}
2298
2299/**
2300 * ibmvscsis_init_msg() - Respond to an init message
2301 * @vscsi: Pointer to our adapter structure
2302 * @crq: Pointer to CRQ element containing the Init Message
2303 *
2304 * EXECUTION ENVIRONMENT:
2305 * Interrupt, interrupt lock held
2306 */
2307static long ibmvscsis_init_msg(struct scsi_info *vscsi, struct viosrp_crq *crq)
2308{
2309 long rc = ADAPT_SUCCESS;
2310
2311 pr_debug("init_msg: state 0x%hx\n", vscsi->state);
2312
2313 rc = h_vioctl(vscsi->dds.unit_id, H_GET_PARTNER_INFO,
2314 (u64)vscsi->map_ioba | ((u64)PAGE_SIZE << 32), 0, 0, 0,
2315 0);
2316 if (rc == H_SUCCESS) {
2317 vscsi->client_data.partition_number =
2318 be64_to_cpu(*(u64 *)vscsi->map_buf);
2319 pr_debug("init_msg, part num %d\n",
2320 vscsi->client_data.partition_number);
2321 } else {
2322 pr_debug("init_msg h_vioctl rc %ld\n", rc);
2323 rc = ADAPT_SUCCESS;
2324 }
2325
2326 if (crq->format == INIT_MSG) {
2327 rc = ibmvscsis_handle_init_msg(vscsi);
2328 } else if (crq->format == INIT_COMPLETE_MSG) {
2329 rc = ibmvscsis_handle_init_compl_msg(vscsi);
2330 } else {
2331 rc = ERROR;
2332 dev_err(&vscsi->dev, "init_msg: invalid format %d\n",
2333 (uint)crq->format);
2334 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
2335 }
2336
2337 return rc;
2338}
2339
2340/**
2341 * ibmvscsis_parse_command() - Parse an element taken from the cmd rsp queue.
2342 * @vscsi: Pointer to our adapter structure
2343 * @crq: Pointer to CRQ element containing the SRP request
2344 *
2345 * This function will return success if the command queue element is valid
2346 * and the srp iu or MAD request it pointed to was also valid. That does
2347 * not mean that an error was not returned to the client.
2348 *
2349 * EXECUTION ENVIRONMENT:
2350 * Interrupt, intr lock held
2351 */
2352static long ibmvscsis_parse_command(struct scsi_info *vscsi,
2353 struct viosrp_crq *crq)
2354{
2355 long rc = ADAPT_SUCCESS;
2356
2357 switch (crq->valid) {
2358 case VALID_CMD_RESP_EL:
2359 switch (crq->format) {
2360 case OS400_FORMAT:
2361 case AIX_FORMAT:
2362 case LINUX_FORMAT:
2363 case MAD_FORMAT:
2364 if (vscsi->flags & PROCESSING_MAD) {
2365 rc = ERROR;
2366 dev_err(&vscsi->dev, "parse_command: already processing mad\n");
2367 ibmvscsis_post_disconnect(vscsi,
2368 ERR_DISCONNECT_RECONNECT,
2369 0);
2370 } else {
2371 vscsi->flags |= PROCESSING_MAD;
2372 rc = ibmvscsis_mad(vscsi, crq);
2373 }
2374 break;
2375
2376 case SRP_FORMAT:
2377 ibmvscsis_srp_cmd(vscsi, crq);
2378 break;
2379
2380 case MESSAGE_IN_CRQ:
2381 if (crq->status == PING)
2382 ibmvscsis_ping_response(vscsi);
2383 break;
2384
2385 default:
2386 dev_err(&vscsi->dev, "parse_command: invalid format %d\n",
2387 (uint)crq->format);
2388 ibmvscsis_post_disconnect(vscsi,
2389 ERR_DISCONNECT_RECONNECT, 0);
2390 break;
2391 }
2392 break;
2393
2394 case VALID_TRANS_EVENT:
2395 rc = ibmvscsis_trans_event(vscsi, crq);
2396 break;
2397
2398 case VALID_INIT_MSG:
2399 rc = ibmvscsis_init_msg(vscsi, crq);
2400 break;
2401
2402 default:
2403 dev_err(&vscsi->dev, "parse_command: invalid valid field %d\n",
2404 (uint)crq->valid);
2405 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
2406 break;
2407 }
2408
2409 /*
2410 * Return only what the interrupt handler cares
2411 * about. Most errors we keep right on trucking.
2412 */
2413 rc = vscsi->flags & SCHEDULE_DISCONNECT;
2414
2415 return rc;
2416}
2417
2418static int read_dma_window(struct scsi_info *vscsi)
2419{
2420 struct vio_dev *vdev = vscsi->dma_dev;
2421 const __be32 *dma_window;
2422 const __be32 *prop;
2423
2424 /* TODO Using of_parse_dma_window would be better, but it doesn't give
2425 * a way to read multiple windows without already knowing the size of
2426 * a window or the number of windows.
2427 */
2428 dma_window = (const __be32 *)vio_get_attribute(vdev,
2429 "ibm,my-dma-window",
2430 NULL);
2431 if (!dma_window) {
2432 pr_err("Couldn't find ibm,my-dma-window property\n");
2433 return -1;
2434 }
2435
2436 vscsi->dds.window[LOCAL].liobn = be32_to_cpu(*dma_window);
2437 dma_window++;
2438
2439 prop = (const __be32 *)vio_get_attribute(vdev, "ibm,#dma-address-cells",
2440 NULL);
2441 if (!prop) {
2442 pr_warn("Couldn't find ibm,#dma-address-cells property\n");
2443 dma_window++;
2444 } else {
2445 dma_window += be32_to_cpu(*prop);
2446 }
2447
2448 prop = (const __be32 *)vio_get_attribute(vdev, "ibm,#dma-size-cells",
2449 NULL);
2450 if (!prop) {
2451 pr_warn("Couldn't find ibm,#dma-size-cells property\n");
2452 dma_window++;
2453 } else {
2454 dma_window += be32_to_cpu(*prop);
2455 }
2456
2457 /* dma_window should point to the second window now */
2458 vscsi->dds.window[REMOTE].liobn = be32_to_cpu(*dma_window);
2459
2460 return 0;
2461}
2462
2463static struct ibmvscsis_tport *ibmvscsis_lookup_port(const char *name)
2464{
2465 struct ibmvscsis_tport *tport = NULL;
2466 struct vio_dev *vdev;
2467 struct scsi_info *vscsi;
2468
2469 spin_lock_bh(&ibmvscsis_dev_lock);
2470 list_for_each_entry(vscsi, &ibmvscsis_dev_list, list) {
2471 vdev = vscsi->dma_dev;
2472 if (!strcmp(dev_name(&vdev->dev), name)) {
2473 tport = &vscsi->tport;
2474 break;
2475 }
2476 }
2477 spin_unlock_bh(&ibmvscsis_dev_lock);
2478
2479 return tport;
2480}
2481
2482/**
2483 * ibmvscsis_parse_cmd() - Parse SRP Command
2484 * @vscsi: Pointer to our adapter structure
2485 * @cmd: Pointer to command element with SRP command
2486 *
2487 * Parse the srp command; if it is valid then submit it to tcm.
2488 * Note: The return code does not reflect the status of the SCSI CDB.
2489 *
2490 * EXECUTION ENVIRONMENT:
2491 * Process level
2492 */
2493static void ibmvscsis_parse_cmd(struct scsi_info *vscsi,
2494 struct ibmvscsis_cmd *cmd)
2495{
2496 struct iu_entry *iue = cmd->iue;
2497 struct srp_cmd *srp = (struct srp_cmd *)iue->sbuf->buf;
2498 struct ibmvscsis_nexus *nexus;
2499 u64 data_len = 0;
2500 enum dma_data_direction dir;
2501 int attr = 0;
2502 int rc = 0;
2503
2504 nexus = vscsi->tport.ibmv_nexus;
2505 /*
2506 * additional length in bytes. Note that the SRP spec says that
2507 * additional length is in 4-byte words, but technically the
2508 * additional length field is only the upper 6 bits of the byte.
2509 * The lower 2 bits are reserved. If the lower 2 bits are 0 (as
2510 * all reserved fields should be), then interpreting the byte as
2511 * an int will yield the length in bytes.
2512 */
2513 if (srp->add_cdb_len & 0x03) {
2514 dev_err(&vscsi->dev, "parse_cmd: reserved bits set in IU\n");
2515 spin_lock_bh(&vscsi->intr_lock);
2516 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
2517 ibmvscsis_free_cmd_resources(vscsi, cmd);
2518 spin_unlock_bh(&vscsi->intr_lock);
2519 return;
2520 }
2521
2522 if (srp_get_desc_table(srp, &dir, &data_len)) {
2523 dev_err(&vscsi->dev, "0x%llx: parsing SRP descriptor table failed.\n",
2524 srp->tag);
2525 goto fail;
2526 return;
2527 }
2528
2529 cmd->rsp.sol_not = srp->sol_not;
2530
2531 switch (srp->task_attr) {
2532 case SRP_SIMPLE_TASK:
2533 attr = TCM_SIMPLE_TAG;
2534 break;
2535 case SRP_ORDERED_TASK:
2536 attr = TCM_ORDERED_TAG;
2537 break;
2538 case SRP_HEAD_TASK:
2539 attr = TCM_HEAD_TAG;
2540 break;
2541 case SRP_ACA_TASK:
2542 attr = TCM_ACA_TAG;
2543 break;
2544 default:
2545 dev_err(&vscsi->dev, "Invalid task attribute %d\n",
2546 srp->task_attr);
2547 goto fail;
2548 }
2549
2550 cmd->se_cmd.tag = be64_to_cpu(srp->tag);
2551
2552 spin_lock_bh(&vscsi->intr_lock);
2553 list_add_tail(&cmd->list, &vscsi->active_q);
2554 spin_unlock_bh(&vscsi->intr_lock);
2555
2556 srp->lun.scsi_lun[0] &= 0x3f;
2557
Bryant G. Ly88a678b2016-06-28 17:05:35 -05002558 rc = target_submit_cmd(&cmd->se_cmd, nexus->se_sess, srp->cdb,
2559 cmd->sense_buf, scsilun_to_int(&srp->lun),
2560 data_len, attr, dir, 0);
2561 if (rc) {
2562 dev_err(&vscsi->dev, "target_submit_cmd failed, rc %d\n", rc);
2563 goto fail;
2564 }
2565 return;
2566
2567fail:
2568 spin_lock_bh(&vscsi->intr_lock);
2569 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
2570 spin_unlock_bh(&vscsi->intr_lock);
2571}
2572
2573/**
2574 * ibmvscsis_parse_task() - Parse SRP Task Management Request
2575 * @vscsi: Pointer to our adapter structure
2576 * @cmd: Pointer to command element with SRP task management request
2577 *
2578 * Parse the srp task management request; if it is valid then submit it to tcm.
2579 * Note: The return code does not reflect the status of the task management
2580 * request.
2581 *
2582 * EXECUTION ENVIRONMENT:
2583 * Processor level
2584 */
2585static void ibmvscsis_parse_task(struct scsi_info *vscsi,
2586 struct ibmvscsis_cmd *cmd)
2587{
2588 struct iu_entry *iue = cmd->iue;
2589 struct srp_tsk_mgmt *srp_tsk = &vio_iu(iue)->srp.tsk_mgmt;
2590 int tcm_type;
2591 u64 tag_to_abort = 0;
2592 int rc = 0;
2593 struct ibmvscsis_nexus *nexus;
2594
2595 nexus = vscsi->tport.ibmv_nexus;
2596
2597 cmd->rsp.sol_not = srp_tsk->sol_not;
2598
2599 switch (srp_tsk->tsk_mgmt_func) {
2600 case SRP_TSK_ABORT_TASK:
2601 tcm_type = TMR_ABORT_TASK;
2602 tag_to_abort = be64_to_cpu(srp_tsk->task_tag);
2603 break;
2604 case SRP_TSK_ABORT_TASK_SET:
2605 tcm_type = TMR_ABORT_TASK_SET;
2606 break;
2607 case SRP_TSK_CLEAR_TASK_SET:
2608 tcm_type = TMR_CLEAR_TASK_SET;
2609 break;
2610 case SRP_TSK_LUN_RESET:
2611 tcm_type = TMR_LUN_RESET;
2612 break;
2613 case SRP_TSK_CLEAR_ACA:
2614 tcm_type = TMR_CLEAR_ACA;
2615 break;
2616 default:
2617 dev_err(&vscsi->dev, "unknown task mgmt func %d\n",
2618 srp_tsk->tsk_mgmt_func);
2619 cmd->se_cmd.se_tmr_req->response =
2620 TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED;
2621 rc = -1;
2622 break;
2623 }
2624
2625 if (!rc) {
2626 cmd->se_cmd.tag = be64_to_cpu(srp_tsk->tag);
2627
2628 spin_lock_bh(&vscsi->intr_lock);
2629 list_add_tail(&cmd->list, &vscsi->active_q);
2630 spin_unlock_bh(&vscsi->intr_lock);
2631
2632 srp_tsk->lun.scsi_lun[0] &= 0x3f;
2633
2634 pr_debug("calling submit_tmr, func %d\n",
2635 srp_tsk->tsk_mgmt_func);
2636 rc = target_submit_tmr(&cmd->se_cmd, nexus->se_sess, NULL,
2637 scsilun_to_int(&srp_tsk->lun), srp_tsk,
2638 tcm_type, GFP_KERNEL, tag_to_abort, 0);
2639 if (rc) {
2640 dev_err(&vscsi->dev, "target_submit_tmr failed, rc %d\n",
2641 rc);
2642 cmd->se_cmd.se_tmr_req->response =
2643 TMR_FUNCTION_REJECTED;
2644 }
2645 }
2646
2647 if (rc)
2648 transport_send_check_condition_and_sense(&cmd->se_cmd, 0, 0);
2649}
2650
2651static void ibmvscsis_scheduler(struct work_struct *work)
2652{
2653 struct ibmvscsis_cmd *cmd = container_of(work, struct ibmvscsis_cmd,
2654 work);
2655 struct scsi_info *vscsi = cmd->adapter;
2656
2657 spin_lock_bh(&vscsi->intr_lock);
2658
2659 /* Remove from schedule_q */
2660 list_del(&cmd->list);
2661
2662 /* Don't submit cmd if we're disconnecting */
2663 if (vscsi->flags & (SCHEDULE_DISCONNECT | DISCONNECT_SCHEDULED)) {
2664 ibmvscsis_free_cmd_resources(vscsi, cmd);
2665
2666 /* ibmvscsis_disconnect might be waiting for us */
2667 if (list_empty(&vscsi->active_q) &&
2668 list_empty(&vscsi->schedule_q) &&
2669 (vscsi->flags & WAIT_FOR_IDLE)) {
2670 vscsi->flags &= ~WAIT_FOR_IDLE;
2671 complete(&vscsi->wait_idle);
2672 }
2673
2674 spin_unlock_bh(&vscsi->intr_lock);
2675 return;
2676 }
2677
2678 spin_unlock_bh(&vscsi->intr_lock);
2679
2680 switch (cmd->type) {
2681 case SCSI_CDB:
2682 ibmvscsis_parse_cmd(vscsi, cmd);
2683 break;
2684 case TASK_MANAGEMENT:
2685 ibmvscsis_parse_task(vscsi, cmd);
2686 break;
2687 default:
2688 dev_err(&vscsi->dev, "scheduler, invalid cmd type %d\n",
2689 cmd->type);
2690 spin_lock_bh(&vscsi->intr_lock);
2691 ibmvscsis_free_cmd_resources(vscsi, cmd);
2692 spin_unlock_bh(&vscsi->intr_lock);
2693 break;
2694 }
2695}
2696
2697static int ibmvscsis_alloc_cmds(struct scsi_info *vscsi, int num)
2698{
2699 struct ibmvscsis_cmd *cmd;
2700 int i;
2701
2702 INIT_LIST_HEAD(&vscsi->free_cmd);
2703 vscsi->cmd_pool = kcalloc(num, sizeof(struct ibmvscsis_cmd),
2704 GFP_KERNEL);
2705 if (!vscsi->cmd_pool)
2706 return -ENOMEM;
2707
2708 for (i = 0, cmd = (struct ibmvscsis_cmd *)vscsi->cmd_pool; i < num;
2709 i++, cmd++) {
2710 cmd->adapter = vscsi;
2711 INIT_WORK(&cmd->work, ibmvscsis_scheduler);
2712 list_add_tail(&cmd->list, &vscsi->free_cmd);
2713 }
2714
2715 return 0;
2716}
2717
2718static void ibmvscsis_free_cmds(struct scsi_info *vscsi)
2719{
2720 kfree(vscsi->cmd_pool);
2721 vscsi->cmd_pool = NULL;
2722 INIT_LIST_HEAD(&vscsi->free_cmd);
2723}
2724
2725/**
2726 * ibmvscsis_service_wait_q() - Service Waiting Queue
2727 * @timer: Pointer to timer which has expired
2728 *
2729 * This routine is called when the timer pops to service the waiting
2730 * queue. Elements on the queue have completed, their responses have been
2731 * copied to the client, but the client's response queue was full so
2732 * the queue message could not be sent. The routine grabs the proper locks
2733 * and calls send messages.
2734 *
2735 * EXECUTION ENVIRONMENT:
2736 * called at interrupt level
2737 */
2738static enum hrtimer_restart ibmvscsis_service_wait_q(struct hrtimer *timer)
2739{
2740 struct timer_cb *p_timer = container_of(timer, struct timer_cb, timer);
2741 struct scsi_info *vscsi = container_of(p_timer, struct scsi_info,
2742 rsp_q_timer);
2743
2744 spin_lock_bh(&vscsi->intr_lock);
2745 p_timer->timer_pops += 1;
2746 p_timer->started = false;
2747 ibmvscsis_send_messages(vscsi);
2748 spin_unlock_bh(&vscsi->intr_lock);
2749
2750 return HRTIMER_NORESTART;
2751}
2752
2753static long ibmvscsis_alloctimer(struct scsi_info *vscsi)
2754{
2755 struct timer_cb *p_timer;
2756
2757 p_timer = &vscsi->rsp_q_timer;
2758 hrtimer_init(&p_timer->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
2759
2760 p_timer->timer.function = ibmvscsis_service_wait_q;
2761 p_timer->started = false;
2762 p_timer->timer_pops = 0;
2763
2764 return ADAPT_SUCCESS;
2765}
2766
2767static void ibmvscsis_freetimer(struct scsi_info *vscsi)
2768{
2769 struct timer_cb *p_timer;
2770
2771 p_timer = &vscsi->rsp_q_timer;
2772
2773 (void)hrtimer_cancel(&p_timer->timer);
2774
2775 p_timer->started = false;
2776 p_timer->timer_pops = 0;
2777}
2778
2779static irqreturn_t ibmvscsis_interrupt(int dummy, void *data)
2780{
2781 struct scsi_info *vscsi = data;
2782
2783 vio_disable_interrupts(vscsi->dma_dev);
2784 tasklet_schedule(&vscsi->work_task);
2785
2786 return IRQ_HANDLED;
2787}
2788
2789/**
2790 * ibmvscsis_check_q() - Helper function to Check Init Message Valid
2791 * @vscsi: Pointer to our adapter structure
2792 *
2793 * Checks if a initialize message was queued by the initiatior
2794 * while the timing window was open. This function is called from
2795 * probe after the CRQ is created and interrupts are enabled.
2796 * It would only be used by adapters who wait for some event before
2797 * completing the init handshake with the client. For ibmvscsi, this
2798 * event is waiting for the port to be enabled.
2799 *
2800 * EXECUTION ENVIRONMENT:
2801 * Process level only, interrupt lock held
2802 */
2803static long ibmvscsis_check_q(struct scsi_info *vscsi)
2804{
2805 uint format;
2806 long rc;
2807
2808 rc = ibmvscsis_check_init_msg(vscsi, &format);
2809 if (rc)
2810 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
2811 else if (format == UNUSED_FORMAT)
2812 vscsi->state = WAIT_ENABLED;
2813 else
2814 vscsi->state = PART_UP_WAIT_ENAB;
2815
2816 return rc;
2817}
2818
2819/**
2820 * ibmvscsis_enable_change_state() - Set new state based on enabled status
2821 * @vscsi: Pointer to our adapter structure
2822 *
2823 * This function determines our new state now that we are enabled. This
2824 * may involve sending an Init Complete message to the client.
2825 *
2826 * Must be called with interrupt lock held.
2827 */
2828static long ibmvscsis_enable_change_state(struct scsi_info *vscsi)
2829{
2830 long rc = ADAPT_SUCCESS;
2831
2832handle_state_change:
2833 switch (vscsi->state) {
2834 case WAIT_ENABLED:
2835 rc = ibmvscsis_send_init_message(vscsi, INIT_MSG);
2836 switch (rc) {
2837 case H_SUCCESS:
2838 case H_DROPPED:
2839 case H_CLOSED:
2840 vscsi->state = WAIT_CONNECTION;
2841 rc = ADAPT_SUCCESS;
2842 break;
2843
2844 case H_PARAMETER:
2845 break;
2846
2847 case H_HARDWARE:
2848 break;
2849
2850 default:
2851 vscsi->state = UNDEFINED;
2852 rc = H_HARDWARE;
2853 break;
2854 }
2855 break;
2856 case PART_UP_WAIT_ENAB:
2857 rc = ibmvscsis_send_init_message(vscsi, INIT_COMPLETE_MSG);
2858 switch (rc) {
2859 case H_SUCCESS:
2860 vscsi->state = CONNECTED;
2861 rc = ADAPT_SUCCESS;
2862 break;
2863
2864 case H_DROPPED:
2865 case H_CLOSED:
2866 vscsi->state = WAIT_ENABLED;
2867 goto handle_state_change;
2868
2869 case H_PARAMETER:
2870 break;
2871
2872 case H_HARDWARE:
2873 break;
2874
2875 default:
2876 rc = H_HARDWARE;
2877 break;
2878 }
2879 break;
2880
2881 case WAIT_CONNECTION:
2882 case WAIT_IDLE:
2883 case SRP_PROCESSING:
2884 case CONNECTED:
2885 rc = ADAPT_SUCCESS;
2886 break;
2887 /* should not be able to get here */
2888 case UNCONFIGURING:
2889 rc = ERROR;
2890 vscsi->state = UNDEFINED;
2891 break;
2892
2893 /* driver should never allow this to happen */
2894 case ERR_DISCONNECT:
2895 case ERR_DISCONNECT_RECONNECT:
2896 default:
2897 dev_err(&vscsi->dev, "in invalid state %d during enable_change_state\n",
2898 vscsi->state);
2899 rc = ADAPT_SUCCESS;
2900 break;
2901 }
2902
2903 return rc;
2904}
2905
2906/**
2907 * ibmvscsis_create_command_q() - Create Command Queue
2908 * @vscsi: Pointer to our adapter structure
2909 * @num_cmds: Currently unused. In the future, may be used to determine
2910 * the size of the CRQ.
2911 *
2912 * Allocates memory for command queue maps remote memory into an ioba
2913 * initializes the command response queue
2914 *
2915 * EXECUTION ENVIRONMENT:
2916 * Process level only
2917 */
2918static long ibmvscsis_create_command_q(struct scsi_info *vscsi, int num_cmds)
2919{
2920 long rc = 0;
2921 int pages;
2922 struct vio_dev *vdev = vscsi->dma_dev;
2923
2924 /* We might support multiple pages in the future, but just 1 for now */
2925 pages = 1;
2926
2927 vscsi->cmd_q.size = pages;
2928
2929 vscsi->cmd_q.base_addr =
2930 (struct viosrp_crq *)get_zeroed_page(GFP_KERNEL);
2931 if (!vscsi->cmd_q.base_addr)
2932 return -ENOMEM;
2933
2934 vscsi->cmd_q.mask = ((uint)pages * CRQ_PER_PAGE) - 1;
2935
2936 vscsi->cmd_q.crq_token = dma_map_single(&vdev->dev,
2937 vscsi->cmd_q.base_addr,
2938 PAGE_SIZE, DMA_BIDIRECTIONAL);
2939 if (dma_mapping_error(&vdev->dev, vscsi->cmd_q.crq_token)) {
2940 free_page((unsigned long)vscsi->cmd_q.base_addr);
2941 return -ENOMEM;
2942 }
2943
2944 rc = h_reg_crq(vscsi->dds.unit_id, vscsi->cmd_q.crq_token, PAGE_SIZE);
2945 if (rc) {
2946 if (rc == H_CLOSED) {
2947 vscsi->state = WAIT_ENABLED;
2948 rc = 0;
2949 } else {
2950 dma_unmap_single(&vdev->dev, vscsi->cmd_q.crq_token,
2951 PAGE_SIZE, DMA_BIDIRECTIONAL);
2952 free_page((unsigned long)vscsi->cmd_q.base_addr);
2953 rc = -ENODEV;
2954 }
2955 } else {
2956 vscsi->state = WAIT_ENABLED;
2957 }
2958
2959 return rc;
2960}
2961
2962/**
2963 * ibmvscsis_destroy_command_q - Destroy Command Queue
2964 * @vscsi: Pointer to our adapter structure
2965 *
2966 * Releases memory for command queue and unmaps mapped remote memory.
2967 *
2968 * EXECUTION ENVIRONMENT:
2969 * Process level only
2970 */
2971static void ibmvscsis_destroy_command_q(struct scsi_info *vscsi)
2972{
2973 dma_unmap_single(&vscsi->dma_dev->dev, vscsi->cmd_q.crq_token,
2974 PAGE_SIZE, DMA_BIDIRECTIONAL);
2975 free_page((unsigned long)vscsi->cmd_q.base_addr);
2976 vscsi->cmd_q.base_addr = NULL;
2977 vscsi->state = NO_QUEUE;
2978}
2979
2980static u8 ibmvscsis_fast_fail(struct scsi_info *vscsi,
2981 struct ibmvscsis_cmd *cmd)
2982{
2983 struct iu_entry *iue = cmd->iue;
2984 struct se_cmd *se_cmd = &cmd->se_cmd;
2985 struct srp_cmd *srp = (struct srp_cmd *)iue->sbuf->buf;
2986 struct scsi_sense_hdr sshdr;
2987 u8 rc = se_cmd->scsi_status;
2988
2989 if (vscsi->fast_fail && (READ_CMD(srp->cdb) || WRITE_CMD(srp->cdb)))
2990 if (scsi_normalize_sense(se_cmd->sense_buffer,
2991 se_cmd->scsi_sense_length, &sshdr))
2992 if (sshdr.sense_key == HARDWARE_ERROR &&
2993 (se_cmd->residual_count == 0 ||
2994 se_cmd->residual_count == se_cmd->data_length)) {
2995 rc = NO_SENSE;
2996 cmd->flags |= CMD_FAST_FAIL;
2997 }
2998
2999 return rc;
3000}
3001
3002/**
3003 * srp_build_response() - Build an SRP response buffer
3004 * @vscsi: Pointer to our adapter structure
3005 * @cmd: Pointer to command for which to send the response
3006 * @len_p: Where to return the length of the IU response sent. This
3007 * is needed to construct the CRQ response.
3008 *
3009 * Build the SRP response buffer and copy it to the client's memory space.
3010 */
3011static long srp_build_response(struct scsi_info *vscsi,
3012 struct ibmvscsis_cmd *cmd, uint *len_p)
3013{
3014 struct iu_entry *iue = cmd->iue;
3015 struct se_cmd *se_cmd = &cmd->se_cmd;
3016 struct srp_rsp *rsp;
3017 uint len;
3018 u32 rsp_code;
3019 char *data;
3020 u32 *tsk_status;
3021 long rc = ADAPT_SUCCESS;
3022
3023 spin_lock_bh(&vscsi->intr_lock);
3024
3025 rsp = &vio_iu(iue)->srp.rsp;
3026 len = sizeof(*rsp);
3027 memset(rsp, 0, len);
3028 data = rsp->data;
3029
3030 rsp->opcode = SRP_RSP;
3031
3032 if (vscsi->credit > 0 && vscsi->state == SRP_PROCESSING)
3033 rsp->req_lim_delta = cpu_to_be32(vscsi->credit);
3034 else
3035 rsp->req_lim_delta = cpu_to_be32(1 + vscsi->credit);
3036 rsp->tag = cmd->rsp.tag;
3037 rsp->flags = 0;
3038
3039 if (cmd->type == SCSI_CDB) {
3040 rsp->status = ibmvscsis_fast_fail(vscsi, cmd);
3041 if (rsp->status) {
3042 pr_debug("build_resp: cmd %p, scsi status %d\n", cmd,
3043 (int)rsp->status);
3044 ibmvscsis_determine_resid(se_cmd, rsp);
3045 if (se_cmd->scsi_sense_length && se_cmd->sense_buffer) {
3046 rsp->sense_data_len =
3047 cpu_to_be32(se_cmd->scsi_sense_length);
3048 rsp->flags |= SRP_RSP_FLAG_SNSVALID;
3049 len += se_cmd->scsi_sense_length;
3050 memcpy(data, se_cmd->sense_buffer,
3051 se_cmd->scsi_sense_length);
3052 }
3053 rsp->sol_not = (cmd->rsp.sol_not & UCSOLNT) >>
3054 UCSOLNT_RESP_SHIFT;
3055 } else if (cmd->flags & CMD_FAST_FAIL) {
3056 pr_debug("build_resp: cmd %p, fast fail\n", cmd);
3057 rsp->sol_not = (cmd->rsp.sol_not & UCSOLNT) >>
3058 UCSOLNT_RESP_SHIFT;
3059 } else {
3060 rsp->sol_not = (cmd->rsp.sol_not & SCSOLNT) >>
3061 SCSOLNT_RESP_SHIFT;
3062 }
3063 } else {
3064 /* this is task management */
3065 rsp->status = 0;
3066 rsp->resp_data_len = cpu_to_be32(4);
3067 rsp->flags |= SRP_RSP_FLAG_RSPVALID;
3068
3069 switch (se_cmd->se_tmr_req->response) {
3070 case TMR_FUNCTION_COMPLETE:
3071 case TMR_TASK_DOES_NOT_EXIST:
3072 rsp_code = SRP_TASK_MANAGEMENT_FUNCTION_COMPLETE;
3073 rsp->sol_not = (cmd->rsp.sol_not & SCSOLNT) >>
3074 SCSOLNT_RESP_SHIFT;
3075 break;
3076 case TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED:
3077 case TMR_LUN_DOES_NOT_EXIST:
3078 rsp_code = SRP_TASK_MANAGEMENT_FUNCTION_NOT_SUPPORTED;
3079 rsp->sol_not = (cmd->rsp.sol_not & UCSOLNT) >>
3080 UCSOLNT_RESP_SHIFT;
3081 break;
3082 case TMR_FUNCTION_FAILED:
3083 case TMR_FUNCTION_REJECTED:
3084 default:
3085 rsp_code = SRP_TASK_MANAGEMENT_FUNCTION_FAILED;
3086 rsp->sol_not = (cmd->rsp.sol_not & UCSOLNT) >>
3087 UCSOLNT_RESP_SHIFT;
3088 break;
3089 }
3090
3091 tsk_status = (u32 *)data;
3092 *tsk_status = cpu_to_be32(rsp_code);
3093 data = (char *)(tsk_status + 1);
3094 len += 4;
3095 }
3096
3097 dma_wmb();
3098 rc = h_copy_rdma(len, vscsi->dds.window[LOCAL].liobn, iue->sbuf->dma,
3099 vscsi->dds.window[REMOTE].liobn,
3100 be64_to_cpu(iue->remote_token));
3101
3102 switch (rc) {
3103 case H_SUCCESS:
3104 vscsi->credit = 0;
3105 *len_p = len;
3106 break;
3107 case H_PERMISSION:
3108 if (connection_broken(vscsi))
3109 vscsi->flags |= RESPONSE_Q_DOWN | CLIENT_FAILED;
3110
3111 dev_err(&vscsi->dev, "build_response: error copying to client, rc %ld, flags 0x%x, state 0x%hx\n",
3112 rc, vscsi->flags, vscsi->state);
3113 break;
3114 case H_SOURCE_PARM:
3115 case H_DEST_PARM:
3116 default:
3117 dev_err(&vscsi->dev, "build_response: error copying to client, rc %ld\n",
3118 rc);
3119 break;
3120 }
3121
3122 spin_unlock_bh(&vscsi->intr_lock);
3123
3124 return rc;
3125}
3126
3127static int ibmvscsis_rdma(struct ibmvscsis_cmd *cmd, struct scatterlist *sg,
3128 int nsg, struct srp_direct_buf *md, int nmd,
3129 enum dma_data_direction dir, unsigned int bytes)
3130{
3131 struct iu_entry *iue = cmd->iue;
3132 struct srp_target *target = iue->target;
3133 struct scsi_info *vscsi = target->ldata;
3134 struct scatterlist *sgp;
3135 dma_addr_t client_ioba, server_ioba;
3136 ulong buf_len;
3137 ulong client_len, server_len;
3138 int md_idx;
3139 long tx_len;
3140 long rc = 0;
3141
Bryant G. Ly88a678b2016-06-28 17:05:35 -05003142 if (bytes == 0)
3143 return 0;
3144
3145 sgp = sg;
3146 client_len = 0;
3147 server_len = 0;
3148 md_idx = 0;
3149 tx_len = bytes;
3150
3151 do {
3152 if (client_len == 0) {
3153 if (md_idx >= nmd) {
3154 dev_err(&vscsi->dev, "rdma: ran out of client memory descriptors\n");
3155 rc = -EIO;
3156 break;
3157 }
3158 client_ioba = be64_to_cpu(md[md_idx].va);
3159 client_len = be32_to_cpu(md[md_idx].len);
3160 }
3161 if (server_len == 0) {
3162 if (!sgp) {
3163 dev_err(&vscsi->dev, "rdma: ran out of scatter/gather list\n");
3164 rc = -EIO;
3165 break;
3166 }
3167 server_ioba = sg_dma_address(sgp);
3168 server_len = sg_dma_len(sgp);
3169 }
3170
3171 buf_len = tx_len;
3172
3173 if (buf_len > client_len)
3174 buf_len = client_len;
3175
3176 if (buf_len > server_len)
3177 buf_len = server_len;
3178
3179 if (buf_len > max_vdma_size)
3180 buf_len = max_vdma_size;
3181
3182 if (dir == DMA_TO_DEVICE) {
3183 /* read from client */
3184 rc = h_copy_rdma(buf_len,
3185 vscsi->dds.window[REMOTE].liobn,
3186 client_ioba,
3187 vscsi->dds.window[LOCAL].liobn,
3188 server_ioba);
3189 } else {
Bryant G. Ly88a678b2016-06-28 17:05:35 -05003190 /* The h_copy_rdma will cause phyp, running in another
3191 * partition, to read memory, so we need to make sure
3192 * the data has been written out, hence these syncs.
3193 */
3194 /* ensure that everything is in memory */
3195 isync();
3196 /* ensure that memory has been made visible */
3197 dma_wmb();
3198 rc = h_copy_rdma(buf_len,
3199 vscsi->dds.window[LOCAL].liobn,
3200 server_ioba,
3201 vscsi->dds.window[REMOTE].liobn,
3202 client_ioba);
3203 }
3204 switch (rc) {
3205 case H_SUCCESS:
3206 break;
3207 case H_PERMISSION:
3208 case H_SOURCE_PARM:
3209 case H_DEST_PARM:
3210 if (connection_broken(vscsi)) {
3211 spin_lock_bh(&vscsi->intr_lock);
3212 vscsi->flags |=
3213 (RESPONSE_Q_DOWN | CLIENT_FAILED);
3214 spin_unlock_bh(&vscsi->intr_lock);
3215 }
3216 dev_err(&vscsi->dev, "rdma: h_copy_rdma failed, rc %ld\n",
3217 rc);
3218 break;
3219
3220 default:
3221 dev_err(&vscsi->dev, "rdma: unknown error %ld from h_copy_rdma\n",
3222 rc);
3223 break;
3224 }
3225
3226 if (!rc) {
3227 tx_len -= buf_len;
3228 if (tx_len) {
3229 client_len -= buf_len;
3230 if (client_len == 0)
3231 md_idx++;
3232 else
3233 client_ioba += buf_len;
3234
3235 server_len -= buf_len;
3236 if (server_len == 0)
3237 sgp = sg_next(sgp);
3238 else
3239 server_ioba += buf_len;
3240 } else {
3241 break;
3242 }
3243 }
3244 } while (!rc);
3245
3246 return rc;
3247}
3248
3249/**
3250 * ibmvscsis_handle_crq() - Handle CRQ
3251 * @data: Pointer to our adapter structure
3252 *
3253 * Read the command elements from the command queue and copy the payloads
3254 * associated with the command elements to local memory and execute the
3255 * SRP requests.
3256 *
3257 * Note: this is an edge triggered interrupt. It can not be shared.
3258 */
3259static void ibmvscsis_handle_crq(unsigned long data)
3260{
3261 struct scsi_info *vscsi = (struct scsi_info *)data;
3262 struct viosrp_crq *crq;
3263 long rc;
3264 bool ack = true;
3265 volatile u8 valid;
3266
3267 spin_lock_bh(&vscsi->intr_lock);
3268
3269 pr_debug("got interrupt\n");
3270
3271 /*
3272 * if we are in a path where we are waiting for all pending commands
3273 * to complete because we received a transport event and anything in
3274 * the command queue is for a new connection, do nothing
3275 */
3276 if (TARGET_STOP(vscsi)) {
3277 vio_enable_interrupts(vscsi->dma_dev);
3278
3279 pr_debug("handle_crq, don't process: flags 0x%x, state 0x%hx\n",
3280 vscsi->flags, vscsi->state);
3281 spin_unlock_bh(&vscsi->intr_lock);
3282 return;
3283 }
3284
3285 rc = vscsi->flags & SCHEDULE_DISCONNECT;
3286 crq = vscsi->cmd_q.base_addr + vscsi->cmd_q.index;
3287 valid = crq->valid;
3288 dma_rmb();
3289
3290 while (valid) {
3291 /*
3292 * These are edege triggered interrupts. After dropping out of
3293 * the while loop, the code must check for work since an
3294 * interrupt could be lost, and an elment be left on the queue,
3295 * hence the label.
3296 */
3297cmd_work:
3298 vscsi->cmd_q.index =
3299 (vscsi->cmd_q.index + 1) & vscsi->cmd_q.mask;
3300
3301 if (!rc) {
3302 rc = ibmvscsis_parse_command(vscsi, crq);
3303 } else {
3304 if ((uint)crq->valid == VALID_TRANS_EVENT) {
3305 /*
3306 * must service the transport layer events even
3307 * in an error state, dont break out until all
3308 * the consecutive transport events have been
3309 * processed
3310 */
3311 rc = ibmvscsis_trans_event(vscsi, crq);
3312 } else if (vscsi->flags & TRANS_EVENT) {
3313 /*
Bryant G. Ly81290212016-08-31 11:29:00 -05003314 * if a transport event has occurred leave
Bryant G. Ly88a678b2016-06-28 17:05:35 -05003315 * everything but transport events on the queue
Bryant G. Ly81290212016-08-31 11:29:00 -05003316 *
Bryant G. Ly88a678b2016-06-28 17:05:35 -05003317 * need to decrement the queue index so we can
3318 * look at the elment again
3319 */
3320 if (vscsi->cmd_q.index)
3321 vscsi->cmd_q.index -= 1;
3322 else
3323 /*
3324 * index is at 0 it just wrapped.
3325 * have it index last element in q
3326 */
3327 vscsi->cmd_q.index = vscsi->cmd_q.mask;
3328 break;
3329 }
3330 }
3331
3332 crq->valid = INVALIDATE_CMD_RESP_EL;
3333
3334 crq = vscsi->cmd_q.base_addr + vscsi->cmd_q.index;
3335 valid = crq->valid;
3336 dma_rmb();
3337 }
3338
3339 if (!rc) {
3340 if (ack) {
3341 vio_enable_interrupts(vscsi->dma_dev);
3342 ack = false;
3343 pr_debug("handle_crq, reenabling interrupts\n");
3344 }
3345 valid = crq->valid;
3346 dma_rmb();
3347 if (valid)
3348 goto cmd_work;
3349 } else {
3350 pr_debug("handle_crq, error: flags 0x%x, state 0x%hx, crq index 0x%x\n",
3351 vscsi->flags, vscsi->state, vscsi->cmd_q.index);
3352 }
3353
3354 pr_debug("Leaving handle_crq: schedule_q empty %d, flags 0x%x, state 0x%hx\n",
3355 (int)list_empty(&vscsi->schedule_q), vscsi->flags,
3356 vscsi->state);
3357
3358 spin_unlock_bh(&vscsi->intr_lock);
3359}
3360
3361static int ibmvscsis_probe(struct vio_dev *vdev,
3362 const struct vio_device_id *id)
3363{
3364 struct scsi_info *vscsi;
3365 int rc = 0;
3366 long hrc = 0;
3367 char wq_name[24];
3368
3369 vscsi = kzalloc(sizeof(*vscsi), GFP_KERNEL);
3370 if (!vscsi) {
3371 rc = -ENOMEM;
3372 pr_err("probe: allocation of adapter failed\n");
3373 return rc;
3374 }
3375
3376 vscsi->dma_dev = vdev;
3377 vscsi->dev = vdev->dev;
3378 INIT_LIST_HEAD(&vscsi->schedule_q);
3379 INIT_LIST_HEAD(&vscsi->waiting_rsp);
3380 INIT_LIST_HEAD(&vscsi->active_q);
3381
3382 snprintf(vscsi->tport.tport_name, 256, "%s", dev_name(&vdev->dev));
3383
3384 pr_debug("probe tport_name: %s\n", vscsi->tport.tport_name);
3385
3386 rc = read_dma_window(vscsi);
3387 if (rc)
3388 goto free_adapter;
3389 pr_debug("Probe: liobn 0x%x, riobn 0x%x\n",
3390 vscsi->dds.window[LOCAL].liobn,
3391 vscsi->dds.window[REMOTE].liobn);
3392
3393 strcpy(vscsi->eye, "VSCSI ");
3394 strncat(vscsi->eye, vdev->name, MAX_EYE);
3395
3396 vscsi->dds.unit_id = vdev->unit_address;
3397
3398 spin_lock_bh(&ibmvscsis_dev_lock);
3399 list_add_tail(&vscsi->list, &ibmvscsis_dev_list);
3400 spin_unlock_bh(&ibmvscsis_dev_lock);
3401
3402 /*
3403 * TBD: How do we determine # of cmds to request? Do we know how
3404 * many "children" we have?
3405 */
3406 vscsi->request_limit = INITIAL_SRP_LIMIT;
3407 rc = srp_target_alloc(&vscsi->target, &vdev->dev, vscsi->request_limit,
3408 SRP_MAX_IU_LEN);
3409 if (rc)
3410 goto rem_list;
3411
3412 vscsi->target.ldata = vscsi;
3413
3414 rc = ibmvscsis_alloc_cmds(vscsi, vscsi->request_limit);
3415 if (rc) {
3416 dev_err(&vscsi->dev, "alloc_cmds failed, rc %d, num %d\n",
3417 rc, vscsi->request_limit);
3418 goto free_target;
3419 }
3420
3421 /*
3422 * Note: the lock is used in freeing timers, so must initialize
3423 * first so that ordering in case of error is correct.
3424 */
3425 spin_lock_init(&vscsi->intr_lock);
3426
3427 rc = ibmvscsis_alloctimer(vscsi);
3428 if (rc) {
3429 dev_err(&vscsi->dev, "probe: alloctimer failed, rc %d\n", rc);
3430 goto free_cmds;
3431 }
3432
3433 rc = ibmvscsis_create_command_q(vscsi, 256);
3434 if (rc) {
3435 dev_err(&vscsi->dev, "probe: create_command_q failed, rc %d\n",
3436 rc);
3437 goto free_timer;
3438 }
3439
3440 vscsi->map_buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
3441 if (!vscsi->map_buf) {
3442 rc = -ENOMEM;
3443 dev_err(&vscsi->dev, "probe: allocating cmd buffer failed\n");
3444 goto destroy_queue;
3445 }
3446
3447 vscsi->map_ioba = dma_map_single(&vdev->dev, vscsi->map_buf, PAGE_SIZE,
3448 DMA_BIDIRECTIONAL);
3449 if (dma_mapping_error(&vdev->dev, vscsi->map_ioba)) {
Wei Yongjun38247fe2016-09-15 03:25:23 +00003450 rc = -ENOMEM;
Bryant G. Ly88a678b2016-06-28 17:05:35 -05003451 dev_err(&vscsi->dev, "probe: error mapping command buffer\n");
3452 goto free_buf;
3453 }
3454
3455 hrc = h_vioctl(vscsi->dds.unit_id, H_GET_PARTNER_INFO,
3456 (u64)vscsi->map_ioba | ((u64)PAGE_SIZE << 32), 0, 0, 0,
3457 0);
3458 if (hrc == H_SUCCESS)
3459 vscsi->client_data.partition_number =
3460 be64_to_cpu(*(u64 *)vscsi->map_buf);
3461 /*
3462 * We expect the VIOCTL to fail if we're configured as "any
3463 * client can connect" and the client isn't activated yet.
3464 * We'll make the call again when he sends an init msg.
3465 */
3466 pr_debug("probe hrc %ld, client partition num %d\n",
3467 hrc, vscsi->client_data.partition_number);
3468
3469 tasklet_init(&vscsi->work_task, ibmvscsis_handle_crq,
3470 (unsigned long)vscsi);
3471
3472 init_completion(&vscsi->wait_idle);
3473
3474 snprintf(wq_name, 24, "ibmvscsis%s", dev_name(&vdev->dev));
3475 vscsi->work_q = create_workqueue(wq_name);
3476 if (!vscsi->work_q) {
3477 rc = -ENOMEM;
3478 dev_err(&vscsi->dev, "create_workqueue failed\n");
3479 goto unmap_buf;
3480 }
3481
3482 rc = request_irq(vdev->irq, ibmvscsis_interrupt, 0, "ibmvscsis", vscsi);
3483 if (rc) {
3484 rc = -EPERM;
3485 dev_err(&vscsi->dev, "probe: request_irq failed, rc %d\n", rc);
3486 goto destroy_WQ;
3487 }
3488
3489 spin_lock_bh(&vscsi->intr_lock);
3490 vio_enable_interrupts(vdev);
3491 if (rc) {
3492 dev_err(&vscsi->dev, "enabling interrupts failed, rc %d\n", rc);
3493 rc = -ENODEV;
3494 spin_unlock_bh(&vscsi->intr_lock);
3495 goto free_irq;
3496 }
3497
3498 if (ibmvscsis_check_q(vscsi)) {
3499 rc = ERROR;
3500 dev_err(&vscsi->dev, "probe: check_q failed, rc %d\n", rc);
3501 spin_unlock_bh(&vscsi->intr_lock);
3502 goto disable_interrupt;
3503 }
3504 spin_unlock_bh(&vscsi->intr_lock);
3505
3506 dev_set_drvdata(&vdev->dev, vscsi);
3507
3508 return 0;
3509
3510disable_interrupt:
3511 vio_disable_interrupts(vdev);
3512free_irq:
3513 free_irq(vdev->irq, vscsi);
3514destroy_WQ:
3515 destroy_workqueue(vscsi->work_q);
3516unmap_buf:
3517 dma_unmap_single(&vdev->dev, vscsi->map_ioba, PAGE_SIZE,
3518 DMA_BIDIRECTIONAL);
3519free_buf:
3520 kfree(vscsi->map_buf);
3521destroy_queue:
3522 tasklet_kill(&vscsi->work_task);
3523 ibmvscsis_unregister_command_q(vscsi);
3524 ibmvscsis_destroy_command_q(vscsi);
3525free_timer:
3526 ibmvscsis_freetimer(vscsi);
3527free_cmds:
3528 ibmvscsis_free_cmds(vscsi);
3529free_target:
3530 srp_target_free(&vscsi->target);
3531rem_list:
3532 spin_lock_bh(&ibmvscsis_dev_lock);
3533 list_del(&vscsi->list);
3534 spin_unlock_bh(&ibmvscsis_dev_lock);
3535free_adapter:
3536 kfree(vscsi);
3537
3538 return rc;
3539}
3540
3541static int ibmvscsis_remove(struct vio_dev *vdev)
3542{
3543 struct scsi_info *vscsi = dev_get_drvdata(&vdev->dev);
3544
3545 pr_debug("remove (%s)\n", dev_name(&vscsi->dma_dev->dev));
3546
3547 /*
3548 * TBD: Need to handle if there are commands on the waiting_rsp q
3549 * Actually, can there still be cmds outstanding to tcm?
3550 */
3551
3552 vio_disable_interrupts(vdev);
3553 free_irq(vdev->irq, vscsi);
3554 destroy_workqueue(vscsi->work_q);
3555 dma_unmap_single(&vdev->dev, vscsi->map_ioba, PAGE_SIZE,
3556 DMA_BIDIRECTIONAL);
3557 kfree(vscsi->map_buf);
3558 tasklet_kill(&vscsi->work_task);
3559 ibmvscsis_unregister_command_q(vscsi);
3560 ibmvscsis_destroy_command_q(vscsi);
3561 ibmvscsis_freetimer(vscsi);
3562 ibmvscsis_free_cmds(vscsi);
3563 srp_target_free(&vscsi->target);
3564 spin_lock_bh(&ibmvscsis_dev_lock);
3565 list_del(&vscsi->list);
3566 spin_unlock_bh(&ibmvscsis_dev_lock);
3567 kfree(vscsi);
3568
3569 return 0;
3570}
3571
3572static ssize_t system_id_show(struct device *dev,
3573 struct device_attribute *attr, char *buf)
3574{
3575 return snprintf(buf, PAGE_SIZE, "%s\n", system_id);
3576}
3577
3578static ssize_t partition_number_show(struct device *dev,
3579 struct device_attribute *attr, char *buf)
3580{
3581 return snprintf(buf, PAGE_SIZE, "%x\n", partition_number);
3582}
3583
3584static ssize_t unit_address_show(struct device *dev,
3585 struct device_attribute *attr, char *buf)
3586{
3587 struct scsi_info *vscsi = container_of(dev, struct scsi_info, dev);
3588
3589 return snprintf(buf, PAGE_SIZE, "%x\n", vscsi->dma_dev->unit_address);
3590}
3591
3592static int ibmvscsis_get_system_info(void)
3593{
3594 struct device_node *rootdn, *vdevdn;
3595 const char *id, *model, *name;
3596 const uint *num;
3597
3598 rootdn = of_find_node_by_path("/");
3599 if (!rootdn)
3600 return -ENOENT;
3601
3602 model = of_get_property(rootdn, "model", NULL);
3603 id = of_get_property(rootdn, "system-id", NULL);
3604 if (model && id)
3605 snprintf(system_id, sizeof(system_id), "%s-%s", model, id);
3606
3607 name = of_get_property(rootdn, "ibm,partition-name", NULL);
3608 if (name)
3609 strncpy(partition_name, name, sizeof(partition_name));
3610
3611 num = of_get_property(rootdn, "ibm,partition-no", NULL);
3612 if (num)
3613 partition_number = *num;
3614
3615 of_node_put(rootdn);
3616
3617 vdevdn = of_find_node_by_path("/vdevice");
3618 if (vdevdn) {
3619 const uint *mvds;
3620
3621 mvds = of_get_property(vdevdn, "ibm,max-virtual-dma-size",
3622 NULL);
3623 if (mvds)
3624 max_vdma_size = *mvds;
3625 of_node_put(vdevdn);
3626 }
3627
3628 return 0;
3629}
3630
3631static char *ibmvscsis_get_fabric_name(void)
3632{
3633 return "ibmvscsis";
3634}
3635
3636static char *ibmvscsis_get_fabric_wwn(struct se_portal_group *se_tpg)
3637{
3638 struct ibmvscsis_tport *tport =
3639 container_of(se_tpg, struct ibmvscsis_tport, se_tpg);
3640
3641 return tport->tport_name;
3642}
3643
3644static u16 ibmvscsis_get_tag(struct se_portal_group *se_tpg)
3645{
3646 struct ibmvscsis_tport *tport =
3647 container_of(se_tpg, struct ibmvscsis_tport, se_tpg);
3648
3649 return tport->tport_tpgt;
3650}
3651
3652static u32 ibmvscsis_get_default_depth(struct se_portal_group *se_tpg)
3653{
3654 return 1;
3655}
3656
3657static int ibmvscsis_check_true(struct se_portal_group *se_tpg)
3658{
3659 return 1;
3660}
3661
3662static int ibmvscsis_check_false(struct se_portal_group *se_tpg)
3663{
3664 return 0;
3665}
3666
3667static u32 ibmvscsis_tpg_get_inst_index(struct se_portal_group *se_tpg)
3668{
3669 return 1;
3670}
3671
3672static int ibmvscsis_check_stop_free(struct se_cmd *se_cmd)
3673{
3674 return target_put_sess_cmd(se_cmd);
3675}
3676
3677static void ibmvscsis_release_cmd(struct se_cmd *se_cmd)
3678{
3679 struct ibmvscsis_cmd *cmd = container_of(se_cmd, struct ibmvscsis_cmd,
3680 se_cmd);
3681 struct scsi_info *vscsi = cmd->adapter;
3682
Bryant G. Ly88a678b2016-06-28 17:05:35 -05003683 spin_lock_bh(&vscsi->intr_lock);
3684 /* Remove from active_q */
Wei Yongjun980b32712016-07-22 14:03:46 +00003685 list_move_tail(&cmd->list, &vscsi->waiting_rsp);
Bryant G. Ly88a678b2016-06-28 17:05:35 -05003686 ibmvscsis_send_messages(vscsi);
3687 spin_unlock_bh(&vscsi->intr_lock);
3688}
3689
3690static u32 ibmvscsis_sess_get_index(struct se_session *se_sess)
3691{
3692 return 0;
3693}
3694
3695static int ibmvscsis_write_pending(struct se_cmd *se_cmd)
3696{
3697 struct ibmvscsis_cmd *cmd = container_of(se_cmd, struct ibmvscsis_cmd,
3698 se_cmd);
3699 struct iu_entry *iue = cmd->iue;
3700 int rc;
3701
Bryant G. Ly88a678b2016-06-28 17:05:35 -05003702 rc = srp_transfer_data(cmd, &vio_iu(iue)->srp.cmd, ibmvscsis_rdma,
3703 1, 1);
3704 if (rc) {
3705 pr_err("srp_transfer_data() failed: %d\n", rc);
3706 return -EAGAIN;
3707 }
3708 /*
3709 * We now tell TCM to add this WRITE CDB directly into the TCM storage
3710 * object execution queue.
3711 */
3712 target_execute_cmd(se_cmd);
3713 return 0;
3714}
3715
3716static int ibmvscsis_write_pending_status(struct se_cmd *se_cmd)
3717{
3718 return 0;
3719}
3720
3721static void ibmvscsis_set_default_node_attrs(struct se_node_acl *nacl)
3722{
3723}
3724
3725static int ibmvscsis_get_cmd_state(struct se_cmd *se_cmd)
3726{
3727 return 0;
3728}
3729
3730static int ibmvscsis_queue_data_in(struct se_cmd *se_cmd)
3731{
3732 struct ibmvscsis_cmd *cmd = container_of(se_cmd, struct ibmvscsis_cmd,
3733 se_cmd);
3734 struct iu_entry *iue = cmd->iue;
3735 struct scsi_info *vscsi = cmd->adapter;
3736 char *sd;
3737 uint len = 0;
3738 int rc;
3739
Bryant G. Ly88a678b2016-06-28 17:05:35 -05003740 rc = srp_transfer_data(cmd, &vio_iu(iue)->srp.cmd, ibmvscsis_rdma, 1,
3741 1);
3742 if (rc) {
3743 pr_err("srp_transfer_data failed: %d\n", rc);
3744 sd = se_cmd->sense_buffer;
3745 se_cmd->scsi_sense_length = 18;
3746 memset(se_cmd->sense_buffer, 0, se_cmd->scsi_sense_length);
3747 /* Logical Unit Communication Time-out asc/ascq = 0x0801 */
3748 scsi_build_sense_buffer(0, se_cmd->sense_buffer, MEDIUM_ERROR,
3749 0x08, 0x01);
3750 }
3751
3752 srp_build_response(vscsi, cmd, &len);
3753 cmd->rsp.format = SRP_FORMAT;
3754 cmd->rsp.len = len;
3755
3756 return 0;
3757}
3758
3759static int ibmvscsis_queue_status(struct se_cmd *se_cmd)
3760{
3761 struct ibmvscsis_cmd *cmd = container_of(se_cmd, struct ibmvscsis_cmd,
3762 se_cmd);
3763 struct scsi_info *vscsi = cmd->adapter;
3764 uint len;
3765
3766 pr_debug("queue_status %p\n", se_cmd);
3767
3768 srp_build_response(vscsi, cmd, &len);
3769 cmd->rsp.format = SRP_FORMAT;
3770 cmd->rsp.len = len;
3771
3772 return 0;
3773}
3774
3775static void ibmvscsis_queue_tm_rsp(struct se_cmd *se_cmd)
3776{
3777 struct ibmvscsis_cmd *cmd = container_of(se_cmd, struct ibmvscsis_cmd,
3778 se_cmd);
3779 struct scsi_info *vscsi = cmd->adapter;
3780 uint len;
3781
3782 pr_debug("queue_tm_rsp %p, status %d\n",
3783 se_cmd, (int)se_cmd->se_tmr_req->response);
3784
3785 srp_build_response(vscsi, cmd, &len);
3786 cmd->rsp.format = SRP_FORMAT;
3787 cmd->rsp.len = len;
3788}
3789
3790static void ibmvscsis_aborted_task(struct se_cmd *se_cmd)
3791{
3792 /* TBD: What (if anything) should we do here? */
3793 pr_debug("ibmvscsis_aborted_task %p\n", se_cmd);
3794}
3795
3796static struct se_wwn *ibmvscsis_make_tport(struct target_fabric_configfs *tf,
3797 struct config_group *group,
3798 const char *name)
3799{
3800 struct ibmvscsis_tport *tport;
3801
3802 tport = ibmvscsis_lookup_port(name);
3803 if (tport) {
3804 tport->tport_proto_id = SCSI_PROTOCOL_SRP;
3805 pr_debug("make_tport(%s), pointer:%p, tport_id:%x\n",
3806 name, tport, tport->tport_proto_id);
3807 return &tport->tport_wwn;
3808 }
3809
3810 return ERR_PTR(-EINVAL);
3811}
3812
3813static void ibmvscsis_drop_tport(struct se_wwn *wwn)
3814{
3815 struct ibmvscsis_tport *tport = container_of(wwn,
3816 struct ibmvscsis_tport,
3817 tport_wwn);
3818
3819 pr_debug("drop_tport(%s)\n",
3820 config_item_name(&tport->tport_wwn.wwn_group.cg_item));
3821}
3822
3823static struct se_portal_group *ibmvscsis_make_tpg(struct se_wwn *wwn,
3824 struct config_group *group,
3825 const char *name)
3826{
3827 struct ibmvscsis_tport *tport =
3828 container_of(wwn, struct ibmvscsis_tport, tport_wwn);
3829 int rc;
3830
3831 tport->releasing = false;
3832
3833 rc = core_tpg_register(&tport->tport_wwn, &tport->se_tpg,
3834 tport->tport_proto_id);
3835 if (rc)
3836 return ERR_PTR(rc);
3837
3838 return &tport->se_tpg;
3839}
3840
3841static void ibmvscsis_drop_tpg(struct se_portal_group *se_tpg)
3842{
3843 struct ibmvscsis_tport *tport = container_of(se_tpg,
3844 struct ibmvscsis_tport,
3845 se_tpg);
3846
3847 tport->releasing = true;
3848 tport->enabled = false;
3849
3850 /*
3851 * Release the virtual I_T Nexus for this ibmvscsis TPG
3852 */
3853 ibmvscsis_drop_nexus(tport);
3854 /*
3855 * Deregister the se_tpg from TCM..
3856 */
3857 core_tpg_deregister(se_tpg);
3858}
3859
3860static ssize_t ibmvscsis_wwn_version_show(struct config_item *item,
3861 char *page)
3862{
3863 return scnprintf(page, PAGE_SIZE, "%s\n", IBMVSCSIS_VERSION);
3864}
3865CONFIGFS_ATTR_RO(ibmvscsis_wwn_, version);
3866
3867static struct configfs_attribute *ibmvscsis_wwn_attrs[] = {
3868 &ibmvscsis_wwn_attr_version,
3869 NULL,
3870};
3871
3872static ssize_t ibmvscsis_tpg_enable_show(struct config_item *item,
3873 char *page)
3874{
3875 struct se_portal_group *se_tpg = to_tpg(item);
3876 struct ibmvscsis_tport *tport = container_of(se_tpg,
3877 struct ibmvscsis_tport,
3878 se_tpg);
3879
3880 return snprintf(page, PAGE_SIZE, "%d\n", (tport->enabled) ? 1 : 0);
3881}
3882
3883static ssize_t ibmvscsis_tpg_enable_store(struct config_item *item,
3884 const char *page, size_t count)
3885{
3886 struct se_portal_group *se_tpg = to_tpg(item);
3887 struct ibmvscsis_tport *tport = container_of(se_tpg,
3888 struct ibmvscsis_tport,
3889 se_tpg);
3890 struct scsi_info *vscsi = container_of(tport, struct scsi_info, tport);
3891 unsigned long tmp;
3892 int rc;
3893 long lrc;
3894
3895 rc = kstrtoul(page, 0, &tmp);
3896 if (rc < 0) {
3897 pr_err("Unable to extract srpt_tpg_store_enable\n");
3898 return -EINVAL;
3899 }
3900
3901 if ((tmp != 0) && (tmp != 1)) {
3902 pr_err("Illegal value for srpt_tpg_store_enable\n");
3903 return -EINVAL;
3904 }
3905
3906 if (tmp) {
3907 tport->enabled = true;
3908 spin_lock_bh(&vscsi->intr_lock);
3909 lrc = ibmvscsis_enable_change_state(vscsi);
3910 if (lrc)
3911 pr_err("enable_change_state failed, rc %ld state %d\n",
3912 lrc, vscsi->state);
3913 spin_unlock_bh(&vscsi->intr_lock);
3914 } else {
3915 tport->enabled = false;
3916 }
3917
3918 pr_debug("tpg_enable_store, state %d\n", vscsi->state);
3919
3920 return count;
3921}
3922CONFIGFS_ATTR(ibmvscsis_tpg_, enable);
3923
3924static struct configfs_attribute *ibmvscsis_tpg_attrs[] = {
3925 &ibmvscsis_tpg_attr_enable,
3926 NULL,
3927};
3928
3929static const struct target_core_fabric_ops ibmvscsis_ops = {
3930 .module = THIS_MODULE,
3931 .name = "ibmvscsis",
3932 .get_fabric_name = ibmvscsis_get_fabric_name,
3933 .tpg_get_wwn = ibmvscsis_get_fabric_wwn,
3934 .tpg_get_tag = ibmvscsis_get_tag,
3935 .tpg_get_default_depth = ibmvscsis_get_default_depth,
3936 .tpg_check_demo_mode = ibmvscsis_check_true,
3937 .tpg_check_demo_mode_cache = ibmvscsis_check_true,
3938 .tpg_check_demo_mode_write_protect = ibmvscsis_check_false,
3939 .tpg_check_prod_mode_write_protect = ibmvscsis_check_false,
3940 .tpg_get_inst_index = ibmvscsis_tpg_get_inst_index,
3941 .check_stop_free = ibmvscsis_check_stop_free,
3942 .release_cmd = ibmvscsis_release_cmd,
3943 .sess_get_index = ibmvscsis_sess_get_index,
3944 .write_pending = ibmvscsis_write_pending,
3945 .write_pending_status = ibmvscsis_write_pending_status,
3946 .set_default_node_attributes = ibmvscsis_set_default_node_attrs,
3947 .get_cmd_state = ibmvscsis_get_cmd_state,
3948 .queue_data_in = ibmvscsis_queue_data_in,
3949 .queue_status = ibmvscsis_queue_status,
3950 .queue_tm_rsp = ibmvscsis_queue_tm_rsp,
3951 .aborted_task = ibmvscsis_aborted_task,
3952 /*
3953 * Setup function pointers for logic in target_core_fabric_configfs.c
3954 */
3955 .fabric_make_wwn = ibmvscsis_make_tport,
3956 .fabric_drop_wwn = ibmvscsis_drop_tport,
3957 .fabric_make_tpg = ibmvscsis_make_tpg,
3958 .fabric_drop_tpg = ibmvscsis_drop_tpg,
3959
3960 .tfc_wwn_attrs = ibmvscsis_wwn_attrs,
3961 .tfc_tpg_base_attrs = ibmvscsis_tpg_attrs,
3962};
3963
3964static void ibmvscsis_dev_release(struct device *dev) {};
3965
3966static struct class_attribute ibmvscsis_class_attrs[] = {
3967 __ATTR_NULL,
3968};
3969
3970static struct device_attribute dev_attr_system_id =
3971 __ATTR(system_id, S_IRUGO, system_id_show, NULL);
3972
3973static struct device_attribute dev_attr_partition_number =
3974 __ATTR(partition_number, S_IRUGO, partition_number_show, NULL);
3975
3976static struct device_attribute dev_attr_unit_address =
3977 __ATTR(unit_address, S_IRUGO, unit_address_show, NULL);
3978
3979static struct attribute *ibmvscsis_dev_attrs[] = {
3980 &dev_attr_system_id.attr,
3981 &dev_attr_partition_number.attr,
3982 &dev_attr_unit_address.attr,
3983};
3984ATTRIBUTE_GROUPS(ibmvscsis_dev);
3985
3986static struct class ibmvscsis_class = {
3987 .name = "ibmvscsis",
3988 .dev_release = ibmvscsis_dev_release,
3989 .class_attrs = ibmvscsis_class_attrs,
3990 .dev_groups = ibmvscsis_dev_groups,
3991};
3992
3993static struct vio_device_id ibmvscsis_device_table[] = {
3994 { "v-scsi-host", "IBM,v-scsi-host" },
3995 { "", "" }
3996};
3997MODULE_DEVICE_TABLE(vio, ibmvscsis_device_table);
3998
3999static struct vio_driver ibmvscsis_driver = {
4000 .name = "ibmvscsis",
4001 .id_table = ibmvscsis_device_table,
4002 .probe = ibmvscsis_probe,
4003 .remove = ibmvscsis_remove,
4004};
4005
4006/*
4007 * ibmvscsis_init() - Kernel Module initialization
4008 *
4009 * Note: vio_register_driver() registers callback functions, and at least one
4010 * of those callback functions calls TCM - Linux IO Target Subsystem, thus
4011 * the SCSI Target template must be registered before vio_register_driver()
4012 * is called.
4013 */
4014static int __init ibmvscsis_init(void)
4015{
4016 int rc = 0;
4017
4018 rc = ibmvscsis_get_system_info();
4019 if (rc) {
4020 pr_err("rc %d from get_system_info\n", rc);
4021 goto out;
4022 }
4023
4024 rc = class_register(&ibmvscsis_class);
4025 if (rc) {
4026 pr_err("failed class register\n");
4027 goto out;
4028 }
4029
4030 rc = target_register_template(&ibmvscsis_ops);
4031 if (rc) {
4032 pr_err("rc %d from target_register_template\n", rc);
4033 goto unregister_class;
4034 }
4035
4036 rc = vio_register_driver(&ibmvscsis_driver);
4037 if (rc) {
4038 pr_err("rc %d from vio_register_driver\n", rc);
4039 goto unregister_target;
4040 }
4041
4042 return 0;
4043
4044unregister_target:
4045 target_unregister_template(&ibmvscsis_ops);
4046unregister_class:
4047 class_unregister(&ibmvscsis_class);
4048out:
4049 return rc;
4050}
4051
4052static void __exit ibmvscsis_exit(void)
4053{
4054 pr_info("Unregister IBM virtual SCSI host driver\n");
4055 vio_unregister_driver(&ibmvscsis_driver);
4056 target_unregister_template(&ibmvscsis_ops);
4057 class_unregister(&ibmvscsis_class);
4058}
4059
4060MODULE_DESCRIPTION("IBMVSCSIS fabric driver");
4061MODULE_AUTHOR("Bryant G. Ly and Michael Cyr");
4062MODULE_LICENSE("GPL");
4063MODULE_VERSION(IBMVSCSIS_VERSION);
4064module_init(ibmvscsis_init);
4065module_exit(ibmvscsis_exit);