[S390] more workqueue fixes.

Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
diff --git a/drivers/s390/char/tape.h b/drivers/s390/char/tape.h
index 1f4c899..c9f1c4c 100644
--- a/drivers/s390/char/tape.h
+++ b/drivers/s390/char/tape.h
@@ -179,6 +179,7 @@
 /* Block Frontend Data */
 struct tape_blk_data
 {
+	struct tape_device *	device;
 	/* Block device request queue. */
 	request_queue_t *	request_queue;
 	spinlock_t		request_queue_lock;
@@ -240,7 +241,7 @@
 #endif
 
 	/* Function to start or stop the next request later. */
-	struct work_struct		tape_dnr;
+	struct delayed_work		tape_dnr;
 };
 
 /* Externals from tape_core.c */
diff --git a/drivers/s390/char/tape_34xx.c b/drivers/s390/char/tape_34xx.c
index 7b95dab..e765875 100644
--- a/drivers/s390/char/tape_34xx.c
+++ b/drivers/s390/char/tape_34xx.c
@@ -95,6 +95,12 @@
 	return rc;
 }
 
+struct tape_34xx_work {
+	struct tape_device	*device;
+	enum tape_op		 op;
+	struct work_struct	 work;
+};
+
 /*
  * These functions are currently used only to schedule a medium_sense for
  * later execution. This is because we get an interrupt whenever a medium
@@ -103,13 +109,10 @@
  * interrupt handler.
  */
 static void
-tape_34xx_work_handler(void *data)
+tape_34xx_work_handler(struct work_struct *work)
 {
-	struct {
-		struct tape_device	*device;
-		enum tape_op		 op;
-		struct work_struct	 work;
-	} *p = data;
+	struct tape_34xx_work *p =
+		container_of(work, struct tape_34xx_work, work);
 
 	switch(p->op) {
 		case TO_MSEN:
@@ -126,17 +129,13 @@
 static int
 tape_34xx_schedule_work(struct tape_device *device, enum tape_op op)
 {
-	struct {
-		struct tape_device	*device;
-		enum tape_op		 op;
-		struct work_struct	 work;
-	} *p;
+	struct tape_34xx_work *p;
 
 	if ((p = kmalloc(sizeof(*p), GFP_ATOMIC)) == NULL)
 		return -ENOMEM;
 
 	memset(p, 0, sizeof(*p));
-	INIT_WORK(&p->work, tape_34xx_work_handler, p);
+	INIT_WORK(&p->work, tape_34xx_work_handler);
 
 	p->device = tape_get_device_reference(device);
 	p->op     = op;
diff --git a/drivers/s390/char/tape_3590.c b/drivers/s390/char/tape_3590.c
index 928cbef..9df912f 100644
--- a/drivers/s390/char/tape_3590.c
+++ b/drivers/s390/char/tape_3590.c
@@ -236,9 +236,10 @@
 };
 
 static void
-tape_3590_work_handler(void *data)
+tape_3590_work_handler(struct work_struct *work)
 {
-	struct work_handler_data *p = data;
+	struct work_handler_data *p =
+		container_of(work, struct work_handler_data, work);
 
 	switch (p->op) {
 	case TO_MSEN:
@@ -263,7 +264,7 @@
 	if ((p = kzalloc(sizeof(*p), GFP_ATOMIC)) == NULL)
 		return -ENOMEM;
 
-	INIT_WORK(&p->work, tape_3590_work_handler, p);
+	INIT_WORK(&p->work, tape_3590_work_handler);
 
 	p->device = tape_get_device_reference(device);
 	p->op = op;
diff --git a/drivers/s390/char/tape_block.c b/drivers/s390/char/tape_block.c
index 3225fcd..c8a89b3 100644
--- a/drivers/s390/char/tape_block.c
+++ b/drivers/s390/char/tape_block.c
@@ -15,6 +15,7 @@
 #include <linux/blkdev.h>
 #include <linux/interrupt.h>
 #include <linux/buffer_head.h>
+#include <linux/kernel.h>
 
 #include <asm/debug.h>
 
@@ -143,7 +144,8 @@
  * queue.
  */
 static void
-tapeblock_requeue(void *data) {
+tapeblock_requeue(struct work_struct *work) {
+	struct tape_blk_data *	blkdat;
 	struct tape_device *	device;
 	request_queue_t *	queue;
 	int			nr_queued;
@@ -151,7 +153,8 @@
 	struct list_head *	l;
 	int			rc;
 
-	device = (struct tape_device *) data;
+	blkdat = container_of(work, struct tape_blk_data, requeue_task);
+	device = blkdat->device;
 	if (!device)
 		return;
 
@@ -212,6 +215,7 @@
 	int			rc;
 
 	blkdat = &device->blk_data;
+	blkdat->device = device;
 	spin_lock_init(&blkdat->request_queue_lock);
 	atomic_set(&blkdat->requeue_scheduled, 0);
 
@@ -255,8 +259,8 @@
 
 	add_disk(disk);
 
-	INIT_WORK(&blkdat->requeue_task, tapeblock_requeue,
-		tape_get_device_reference(device));
+	tape_get_device_reference(device);
+	INIT_WORK(&blkdat->requeue_task, tapeblock_requeue);
 
 	return 0;
 
@@ -271,7 +275,7 @@
 tapeblock_cleanup_device(struct tape_device *device)
 {
 	flush_scheduled_work();
-	device->blk_data.requeue_task.data = tape_put_device(device);
+	tape_put_device(device);
 
 	if (!device->blk_data.disk) {
 		PRINT_ERR("(%s): No gendisk to clean up!\n",
diff --git a/drivers/s390/char/tape_core.c b/drivers/s390/char/tape_core.c
index 2826aed..c6c2e91 100644
--- a/drivers/s390/char/tape_core.c
+++ b/drivers/s390/char/tape_core.c
@@ -28,7 +28,7 @@
 #define PRINTK_HEADER "TAPE_CORE: "
 
 static void __tape_do_irq (struct ccw_device *, unsigned long, struct irb *);
-static void tape_delayed_next_request(void * data);
+static void tape_delayed_next_request(struct work_struct *);
 
 /*
  * One list to contain all tape devices of all disciplines, so
@@ -272,7 +272,7 @@
 				return 0;
 			case -EBUSY:
 				request->status	= TAPE_REQUEST_CANCEL;
-				schedule_work(&device->tape_dnr);
+				schedule_delayed_work(&device->tape_dnr, 0);
 				return 0;
 			case -ENODEV:
 				DBF_EXCEPTION(2, "device gone, retry\n");
@@ -470,7 +470,7 @@
 	*device->modeset_byte = 0;
 	device->first_minor = -1;
 	atomic_set(&device->ref_count, 1);
-	INIT_WORK(&device->tape_dnr, tape_delayed_next_request, device);
+	INIT_DELAYED_WORK(&device->tape_dnr, tape_delayed_next_request);
 
 	return device;
 }
@@ -724,7 +724,7 @@
 	} else if (rc == -EBUSY) {
 		/* The common I/O subsystem is currently busy. Retry later. */
 		request->status = TAPE_REQUEST_QUEUED;
-		schedule_work(&device->tape_dnr);
+		schedule_delayed_work(&device->tape_dnr, 0);
 		rc = 0;
 	} else {
 		/* Start failed. Remove request and indicate failure. */
@@ -790,11 +790,11 @@
 }
 
 static void
-tape_delayed_next_request(void *data)
+tape_delayed_next_request(struct work_struct *work)
 {
-	struct tape_device *	device;
+	struct tape_device *device =
+		container_of(work, struct tape_device, tape_dnr.work);
 
-	device = (struct tape_device *) data;
 	DBF_LH(6, "tape_delayed_next_request(%p)\n", device);
 	spin_lock_irq(get_ccwdev_lock(device->cdev));
 	__tape_start_next_request(device);
diff --git a/drivers/s390/cio/css.h b/drivers/s390/cio/css.h
index 9ff064e..dfd5462 100644
--- a/drivers/s390/cio/css.h
+++ b/drivers/s390/cio/css.h
@@ -73,6 +73,8 @@
 }  __attribute__ ((packed,aligned(4)));
 
 struct ccw_device_private {
+	struct ccw_device *cdev;
+	struct subchannel *sch;
 	int state;		/* device state */
 	atomic_t onoff;
 	unsigned long registered;
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index d3d3716..0f60462 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -585,12 +585,13 @@
 }
 
 static void
-ccw_device_add_changed(void *data)
+ccw_device_add_changed(struct work_struct *work)
 {
-
+	struct ccw_device_private *priv;
 	struct ccw_device *cdev;
 
-	cdev = data;
+	priv = container_of(work, struct ccw_device_private, kick_work);
+	cdev = priv->cdev;
 	if (device_add(&cdev->dev)) {
 		put_device(&cdev->dev);
 		return;
@@ -605,13 +606,15 @@
 extern int css_get_ssd_info(struct subchannel *sch);
 
 void
-ccw_device_do_unreg_rereg(void *data)
+ccw_device_do_unreg_rereg(struct work_struct *work)
 {
+	struct ccw_device_private *priv;
 	struct ccw_device *cdev;
 	struct subchannel *sch;
 	int need_rename;
 
-	cdev = data;
+	priv = container_of(work, struct ccw_device_private, kick_work);
+	cdev = priv->cdev;
 	sch = to_subchannel(cdev->dev.parent);
 	if (cdev->private->dev_id.devno != sch->schib.pmcw.dev) {
 		/*
@@ -659,7 +662,7 @@
 		snprintf (cdev->dev.bus_id, BUS_ID_SIZE, "0.%x.%04x",
 			  sch->schid.ssid, sch->schib.pmcw.dev);
 	PREPARE_WORK(&cdev->private->kick_work,
-		     ccw_device_add_changed, cdev);
+		     ccw_device_add_changed);
 	queue_work(ccw_device_work, &cdev->private->kick_work);
 }
 
@@ -677,14 +680,16 @@
  * Register recognized device.
  */
 static void
-io_subchannel_register(void *data)
+io_subchannel_register(struct work_struct *work)
 {
+	struct ccw_device_private *priv;
 	struct ccw_device *cdev;
 	struct subchannel *sch;
 	int ret;
 	unsigned long flags;
 
-	cdev = data;
+	priv = container_of(work, struct ccw_device_private, kick_work);
+	cdev = priv->cdev;
 	sch = to_subchannel(cdev->dev.parent);
 
 	/*
@@ -734,11 +739,14 @@
 }
 
 void
-ccw_device_call_sch_unregister(void *data)
+ccw_device_call_sch_unregister(struct work_struct *work)
 {
-	struct ccw_device *cdev = data;
+	struct ccw_device_private *priv;
+	struct ccw_device *cdev;
 	struct subchannel *sch;
 
+	priv = container_of(work, struct ccw_device_private, kick_work);
+	cdev = priv->cdev;
 	sch = to_subchannel(cdev->dev.parent);
 	css_sch_device_unregister(sch);
 	/* Reset intparm to zeroes. */
@@ -768,7 +776,7 @@
 			break;
 		sch = to_subchannel(cdev->dev.parent);
 		PREPARE_WORK(&cdev->private->kick_work,
-			     ccw_device_call_sch_unregister, cdev);
+			     ccw_device_call_sch_unregister);
 		queue_work(slow_path_wq, &cdev->private->kick_work);
 		if (atomic_dec_and_test(&ccw_device_init_count))
 			wake_up(&ccw_device_init_wq);
@@ -783,7 +791,7 @@
 		if (!get_device(&cdev->dev))
 			break;
 		PREPARE_WORK(&cdev->private->kick_work,
-			     io_subchannel_register, cdev);
+			     io_subchannel_register);
 		queue_work(slow_path_wq, &cdev->private->kick_work);
 		break;
 	}
@@ -865,6 +873,7 @@
 		kfree(cdev);
 		return -ENOMEM;
 	}
+	cdev->private->cdev = cdev;
 	atomic_set(&cdev->private->onoff, 0);
 	cdev->dev.parent = &sch->dev;
 	cdev->dev.release = ccw_device_release;
@@ -890,12 +899,13 @@
 	return rc;
 }
 
-static void
-ccw_device_unregister(void *data)
+static void ccw_device_unregister(struct work_struct *work)
 {
+	struct ccw_device_private *priv;
 	struct ccw_device *cdev;
 
-	cdev = (struct ccw_device *)data;
+	priv = container_of(work, struct ccw_device_private, kick_work);
+	cdev = priv->cdev;
 	if (test_and_clear_bit(1, &cdev->private->registered))
 		device_unregister(&cdev->dev);
 	put_device(&cdev->dev);
@@ -921,7 +931,7 @@
 	 */
 	if (get_device(&cdev->dev)) {
 		PREPARE_WORK(&cdev->private->kick_work,
-			     ccw_device_unregister, cdev);
+			     ccw_device_unregister);
 		queue_work(ccw_device_work, &cdev->private->kick_work);
 	}
 	return 0;
@@ -1048,6 +1058,7 @@
 	memset(&console_cdev, 0, sizeof(struct ccw_device));
 	memset(&console_private, 0, sizeof(struct ccw_device_private));
 	console_cdev.private = &console_private;
+	console_private.cdev = &console_cdev;
 	ret = ccw_device_console_enable(&console_cdev, sch);
 	if (ret) {
 		cio_release_console();
diff --git a/drivers/s390/cio/device.h b/drivers/s390/cio/device.h
index 9233b5c..d5fe95e 100644
--- a/drivers/s390/cio/device.h
+++ b/drivers/s390/cio/device.h
@@ -78,8 +78,8 @@
 
 int ccw_device_cancel_halt_clear(struct ccw_device *);
 
-void ccw_device_do_unreg_rereg(void *);
-void ccw_device_call_sch_unregister(void *);
+void ccw_device_do_unreg_rereg(struct work_struct *);
+void ccw_device_call_sch_unregister(struct work_struct *);
 
 int ccw_device_recognition(struct ccw_device *);
 int ccw_device_online(struct ccw_device *);
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c
index 09c7672..0f0301c 100644
--- a/drivers/s390/cio/device_fsm.c
+++ b/drivers/s390/cio/device_fsm.c
@@ -194,7 +194,7 @@
 	    cdev->id.dev_model != cdev->private->senseid.dev_model ||
 	    cdev->private->dev_id.devno != sch->schib.pmcw.dev) {
 		PREPARE_WORK(&cdev->private->kick_work,
-			     ccw_device_do_unreg_rereg, cdev);
+			     ccw_device_do_unreg_rereg);
 		queue_work(ccw_device_work, &cdev->private->kick_work);
 		return 0;
 	}
@@ -329,19 +329,21 @@
 }
 
 static void
-ccw_device_oper_notify(void *data)
+ccw_device_oper_notify(struct work_struct *work)
 {
+	struct ccw_device_private *priv;
 	struct ccw_device *cdev;
 	struct subchannel *sch;
 	int ret;
 
-	cdev = data;
+	priv = container_of(work, struct ccw_device_private, kick_work);
+	cdev = priv->cdev;
 	sch = to_subchannel(cdev->dev.parent);
 	ret = (sch->driver && sch->driver->notify) ?
 		sch->driver->notify(&sch->dev, CIO_OPER) : 0;
 	if (!ret)
 		/* Driver doesn't want device back. */
-		ccw_device_do_unreg_rereg(cdev);
+		ccw_device_do_unreg_rereg(work);
 	else {
 		/* Reenable channel measurements, if needed. */
 		cmf_reenable(cdev);
@@ -377,8 +379,7 @@
 
 	if (cdev->private->flags.donotify) {
 		cdev->private->flags.donotify = 0;
-		PREPARE_WORK(&cdev->private->kick_work, ccw_device_oper_notify,
-			     cdev);
+		PREPARE_WORK(&cdev->private->kick_work, ccw_device_oper_notify);
 		queue_work(ccw_device_notify_work, &cdev->private->kick_work);
 	}
 	wake_up(&cdev->private->wait_q);
@@ -528,13 +529,15 @@
 
 
 static void
-ccw_device_nopath_notify(void *data)
+ccw_device_nopath_notify(struct work_struct *work)
 {
+	struct ccw_device_private *priv;
 	struct ccw_device *cdev;
 	struct subchannel *sch;
 	int ret;
 
-	cdev = data;
+	priv = container_of(work, struct ccw_device_private, kick_work);
+	cdev = priv->cdev;
 	sch = to_subchannel(cdev->dev.parent);
 	/* Extra sanity. */
 	if (sch->lpm)
@@ -547,8 +550,7 @@
 			cio_disable_subchannel(sch);
 			if (get_device(&cdev->dev)) {
 				PREPARE_WORK(&cdev->private->kick_work,
-					     ccw_device_call_sch_unregister,
-					     cdev);
+					     ccw_device_call_sch_unregister);
 				queue_work(ccw_device_work,
 					   &cdev->private->kick_work);
 			} else
@@ -607,7 +609,7 @@
 		/* Reset oper notify indication after verify error. */
 		cdev->private->flags.donotify = 0;
 		PREPARE_WORK(&cdev->private->kick_work,
-			     ccw_device_nopath_notify, cdev);
+			     ccw_device_nopath_notify);
 		queue_work(ccw_device_notify_work, &cdev->private->kick_work);
 		ccw_device_done(cdev, DEV_STATE_NOT_OPER);
 		break;
@@ -738,7 +740,7 @@
 	sch = to_subchannel(cdev->dev.parent);
 	if (get_device(&cdev->dev)) {
 		PREPARE_WORK(&cdev->private->kick_work,
-			     ccw_device_call_sch_unregister, cdev);
+			     ccw_device_call_sch_unregister);
 		queue_work(ccw_device_work, &cdev->private->kick_work);
 	}
 	wake_up(&cdev->private->wait_q);
@@ -769,7 +771,7 @@
 	}
 	if (get_device(&cdev->dev)) {
 		PREPARE_WORK(&cdev->private->kick_work,
-			     ccw_device_call_sch_unregister, cdev);
+			     ccw_device_call_sch_unregister);
 		queue_work(ccw_device_work, &cdev->private->kick_work);
 	}
 	wake_up(&cdev->private->wait_q);
@@ -874,7 +876,7 @@
 		sch = to_subchannel(cdev->dev.parent);
 		if (!sch->lpm) {
 			PREPARE_WORK(&cdev->private->kick_work,
-				     ccw_device_nopath_notify, cdev);
+				     ccw_device_nopath_notify);
 			queue_work(ccw_device_notify_work,
 				   &cdev->private->kick_work);
 		} else
@@ -969,7 +971,7 @@
 			      ERR_PTR(-EIO));
 	if (!sch->lpm) {
 		PREPARE_WORK(&cdev->private->kick_work,
-			     ccw_device_nopath_notify, cdev);
+			     ccw_device_nopath_notify);
 		queue_work(ccw_device_notify_work, &cdev->private->kick_work);
 	} else if (cdev->private->flags.doverify)
 		/* Start delayed path verification. */
@@ -992,7 +994,7 @@
 		sch = to_subchannel(cdev->dev.parent);
 		if (!sch->lpm) {
 			PREPARE_WORK(&cdev->private->kick_work,
-				     ccw_device_nopath_notify, cdev);
+				     ccw_device_nopath_notify);
 			queue_work(ccw_device_notify_work,
 				   &cdev->private->kick_work);
 		} else
@@ -1021,7 +1023,7 @@
 	if (ret == -ENODEV) {
 		if (!sch->lpm) {
 			PREPARE_WORK(&cdev->private->kick_work,
-				     ccw_device_nopath_notify, cdev);
+				     ccw_device_nopath_notify);
 			queue_work(ccw_device_notify_work,
 				   &cdev->private->kick_work);
 		} else
@@ -1033,7 +1035,7 @@
 			      ERR_PTR(-EIO));
 	if (!sch->lpm) {
 		PREPARE_WORK(&cdev->private->kick_work,
-			     ccw_device_nopath_notify, cdev);
+			     ccw_device_nopath_notify);
 		queue_work(ccw_device_notify_work, &cdev->private->kick_work);
 	} else
 		/* Start delayed path verification. */
diff --git a/drivers/s390/cio/qdio.c b/drivers/s390/cio/qdio.c
index 8d5fa1b..d066dbf 100644
--- a/drivers/s390/cio/qdio.c
+++ b/drivers/s390/cio/qdio.c
@@ -2045,11 +2045,13 @@
 }
 
 static void
-qdio_call_shutdown(void *data)
+qdio_call_shutdown(struct work_struct *work)
 {
+	struct ccw_device_private *priv;
 	struct ccw_device *cdev;
 
-	cdev = (struct ccw_device *)data;
+	priv = container_of(work, struct ccw_device_private, kick_work);
+	cdev = priv->cdev;
 	qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
 	put_device(&cdev->dev);
 }
@@ -2091,7 +2093,7 @@
 		if (get_device(&cdev->dev)) {
 			/* Can't call shutdown from interrupt context. */
 			PREPARE_WORK(&cdev->private->kick_work,
-				     qdio_call_shutdown, (void *)cdev);
+				     qdio_call_shutdown);
 			queue_work(ccw_device_work, &cdev->private->kick_work);
 		}
 		break;