s390/scm_blk: suspend writes

Stop writing to scm after certain error conditions such as a concurrent
firmware upgrade. Resume to normal state once scm_blk_set_available is
called (due to an scm availability notification).

Reviewed-by: Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
Signed-off-by: Sebastian Ott <sebott@linux.vnet.ibm.com>
Signed-off-by: Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
diff --git a/drivers/s390/block/scm_blk.c b/drivers/s390/block/scm_blk.c
index d9c7e94..5ac9c93 100644
--- a/drivers/s390/block/scm_blk.c
+++ b/drivers/s390/block/scm_blk.c
@@ -135,6 +135,11 @@
 	.release = scm_release,
 };
 
+static bool scm_permit_request(struct scm_blk_dev *bdev, struct request *req)
+{
+	return rq_data_dir(req) != WRITE || bdev->state != SCM_WR_PROHIBIT;
+}
+
 static void scm_request_prepare(struct scm_request *scmrq)
 {
 	struct scm_blk_dev *bdev = scmrq->bdev;
@@ -222,6 +227,10 @@
 		if (req->cmd_type != REQ_TYPE_FS)
 			continue;
 
+		if (!scm_permit_request(bdev, req)) {
+			scm_ensure_queue_restart(bdev);
+			return;
+		}
 		scmrq = scm_request_fetch();
 		if (!scmrq) {
 			SCM_LOG(5, "no request");
@@ -285,6 +294,38 @@
 	tasklet_hi_schedule(&bdev->tasklet);
 }
 
+static void scm_blk_handle_error(struct scm_request *scmrq)
+{
+	struct scm_blk_dev *bdev = scmrq->bdev;
+	unsigned long flags;
+
+	if (scmrq->error != -EIO)
+		goto restart;
+
+	/* For -EIO the response block is valid. */
+	switch (scmrq->aob->response.eqc) {
+	case EQC_WR_PROHIBIT:
+		spin_lock_irqsave(&bdev->lock, flags);
+		if (bdev->state != SCM_WR_PROHIBIT)
+			pr_info("%lu: Write access to the SCM increment is suspended\n",
+				(unsigned long) bdev->scmdev->address);
+		bdev->state = SCM_WR_PROHIBIT;
+		spin_unlock_irqrestore(&bdev->lock, flags);
+		goto requeue;
+	default:
+		break;
+	}
+
+restart:
+	if (!scm_start_aob(scmrq->aob))
+		return;
+
+requeue:
+	spin_lock_irqsave(&bdev->rq_lock, flags);
+	scm_request_requeue(scmrq);
+	spin_unlock_irqrestore(&bdev->rq_lock, flags);
+}
+
 static void scm_blk_tasklet(struct scm_blk_dev *bdev)
 {
 	struct scm_request *scmrq;
@@ -298,11 +339,8 @@
 		spin_unlock_irqrestore(&bdev->lock, flags);
 
 		if (scmrq->error && scmrq->retries-- > 0) {
-			if (scm_start_aob(scmrq->aob)) {
-				spin_lock_irqsave(&bdev->rq_lock, flags);
-				scm_request_requeue(scmrq);
-				spin_unlock_irqrestore(&bdev->rq_lock, flags);
-			}
+			scm_blk_handle_error(scmrq);
+
 			/* Request restarted or requeued, handle next. */
 			spin_lock_irqsave(&bdev->lock, flags);
 			continue;
@@ -336,6 +374,7 @@
 	}
 
 	bdev->scmdev = scmdev;
+	bdev->state = SCM_OPER;
 	spin_lock_init(&bdev->rq_lock);
 	spin_lock_init(&bdev->lock);
 	INIT_LIST_HEAD(&bdev->finished_requests);
@@ -400,6 +439,18 @@
 	put_disk(bdev->gendisk);
 }
 
+void scm_blk_set_available(struct scm_blk_dev *bdev)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&bdev->lock, flags);
+	if (bdev->state == SCM_WR_PROHIBIT)
+		pr_info("%lu: Write access to the SCM increment is restored\n",
+			(unsigned long) bdev->scmdev->address);
+	bdev->state = SCM_OPER;
+	spin_unlock_irqrestore(&bdev->lock, flags);
+}
+
 static int __init scm_blk_init(void)
 {
 	int ret = -EINVAL;