s390/scm_blk: suspend writes

Stop writing to scm after certain error conditions such as a concurrent
firmware upgrade. Resume to normal state once scm_blk_set_available is
called (due to an scm availability notification).

Reviewed-by: Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
Signed-off-by: Sebastian Ott <sebott@linux.vnet.ibm.com>
Signed-off-by: Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
diff --git a/arch/s390/include/asm/eadm.h b/arch/s390/include/asm/eadm.h
index a4a1ea4..78141be 100644
--- a/arch/s390/include/asm/eadm.h
+++ b/arch/s390/include/asm/eadm.h
@@ -34,6 +34,8 @@
 	u32 reserved[4];
 } __packed;
 
+#define EQC_WR_PROHIBIT 22
+
 struct msb {
 	u8 fmt:4;
 	u8 oc:4;
diff --git a/drivers/s390/block/scm_blk.c b/drivers/s390/block/scm_blk.c
index d9c7e94..5ac9c93 100644
--- a/drivers/s390/block/scm_blk.c
+++ b/drivers/s390/block/scm_blk.c
@@ -135,6 +135,11 @@
 	.release = scm_release,
 };
 
+static bool scm_permit_request(struct scm_blk_dev *bdev, struct request *req)
+{
+	return rq_data_dir(req) != WRITE || bdev->state != SCM_WR_PROHIBIT;
+}
+
 static void scm_request_prepare(struct scm_request *scmrq)
 {
 	struct scm_blk_dev *bdev = scmrq->bdev;
@@ -222,6 +227,10 @@
 		if (req->cmd_type != REQ_TYPE_FS)
 			continue;
 
+		if (!scm_permit_request(bdev, req)) {
+			scm_ensure_queue_restart(bdev);
+			return;
+		}
 		scmrq = scm_request_fetch();
 		if (!scmrq) {
 			SCM_LOG(5, "no request");
@@ -285,6 +294,38 @@
 	tasklet_hi_schedule(&bdev->tasklet);
 }
 
+static void scm_blk_handle_error(struct scm_request *scmrq)
+{
+	struct scm_blk_dev *bdev = scmrq->bdev;
+	unsigned long flags;
+
+	if (scmrq->error != -EIO)
+		goto restart;
+
+	/* For -EIO the response block is valid. */
+	switch (scmrq->aob->response.eqc) {
+	case EQC_WR_PROHIBIT:
+		spin_lock_irqsave(&bdev->lock, flags);
+		if (bdev->state != SCM_WR_PROHIBIT)
+			pr_info("%lu: Write access to the SCM increment is suspended\n",
+				(unsigned long) bdev->scmdev->address);
+		bdev->state = SCM_WR_PROHIBIT;
+		spin_unlock_irqrestore(&bdev->lock, flags);
+		goto requeue;
+	default:
+		break;
+	}
+
+restart:
+	if (!scm_start_aob(scmrq->aob))
+		return;
+
+requeue:
+	spin_lock_irqsave(&bdev->rq_lock, flags);
+	scm_request_requeue(scmrq);
+	spin_unlock_irqrestore(&bdev->rq_lock, flags);
+}
+
 static void scm_blk_tasklet(struct scm_blk_dev *bdev)
 {
 	struct scm_request *scmrq;
@@ -298,11 +339,8 @@
 		spin_unlock_irqrestore(&bdev->lock, flags);
 
 		if (scmrq->error && scmrq->retries-- > 0) {
-			if (scm_start_aob(scmrq->aob)) {
-				spin_lock_irqsave(&bdev->rq_lock, flags);
-				scm_request_requeue(scmrq);
-				spin_unlock_irqrestore(&bdev->rq_lock, flags);
-			}
+			scm_blk_handle_error(scmrq);
+
 			/* Request restarted or requeued, handle next. */
 			spin_lock_irqsave(&bdev->lock, flags);
 			continue;
@@ -336,6 +374,7 @@
 	}
 
 	bdev->scmdev = scmdev;
+	bdev->state = SCM_OPER;
 	spin_lock_init(&bdev->rq_lock);
 	spin_lock_init(&bdev->lock);
 	INIT_LIST_HEAD(&bdev->finished_requests);
@@ -400,6 +439,18 @@
 	put_disk(bdev->gendisk);
 }
 
+void scm_blk_set_available(struct scm_blk_dev *bdev)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&bdev->lock, flags);
+	if (bdev->state == SCM_WR_PROHIBIT)
+		pr_info("%lu: Write access to the SCM increment is restored\n",
+			(unsigned long) bdev->scmdev->address);
+	bdev->state = SCM_OPER;
+	spin_unlock_irqrestore(&bdev->lock, flags);
+}
+
 static int __init scm_blk_init(void)
 {
 	int ret = -EINVAL;
diff --git a/drivers/s390/block/scm_blk.h b/drivers/s390/block/scm_blk.h
index 3c1ccf4..8b387b3 100644
--- a/drivers/s390/block/scm_blk.h
+++ b/drivers/s390/block/scm_blk.h
@@ -21,6 +21,7 @@
 	spinlock_t rq_lock;	/* guard the request queue */
 	spinlock_t lock;	/* guard the rest of the blockdev */
 	atomic_t queued_reqs;
+	enum {SCM_OPER, SCM_WR_PROHIBIT} state;
 	struct list_head finished_requests;
 #ifdef CONFIG_SCM_BLOCK_CLUSTER_WRITE
 	struct list_head cluster_list;
@@ -48,6 +49,7 @@
 
 int scm_blk_dev_setup(struct scm_blk_dev *, struct scm_device *);
 void scm_blk_dev_cleanup(struct scm_blk_dev *);
+void scm_blk_set_available(struct scm_blk_dev *);
 void scm_blk_irq(struct scm_device *, void *, int);
 
 void scm_request_finish(struct scm_request *);