blob: 4951aa82e9f5c30fd676d3978900f09924588322 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * File...........: linux/drivers/s390/block/dasd.c
3 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
4 * Horst Hummel <Horst.Hummel@de.ibm.com>
5 * Carsten Otte <Cotte@de.ibm.com>
6 * Martin Schwidefsky <schwidefsky@de.ibm.com>
7 * Bugreports.to..: <Linux390@de.ibm.com>
Stefan Haberlandd41dd122009-06-16 10:30:25 +02008 * Copyright IBM Corp. 1999, 2009
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 */
10
Stefan Haberlandfc19f382009-03-26 15:23:49 +010011#define KMSG_COMPONENT "dasd"
12#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
13
Linus Torvalds1da177e2005-04-16 15:20:36 -070014#include <linux/kmod.h>
15#include <linux/init.h>
16#include <linux/interrupt.h>
17#include <linux/ctype.h>
18#include <linux/major.h>
19#include <linux/slab.h>
20#include <linux/buffer_head.h>
Christoph Hellwiga885c8c2006-01-08 01:02:50 -080021#include <linux/hdreg.h>
Cornelia Huckf3445a12009-04-14 15:36:23 +020022#include <linux/async.h>
Stefan Haberland9eb25122010-02-26 22:37:46 +010023#include <linux/mutex.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070024
25#include <asm/ccwdev.h>
26#include <asm/ebcdic.h>
27#include <asm/idals.h>
Stefan Weinhuberf3eb5382009-03-26 15:23:48 +010028#include <asm/itcw.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070029
30/* This is ugly... */
31#define PRINTK_HEADER "dasd:"
32
33#include "dasd_int.h"
34/*
35 * SECTION: Constant definitions to be used within this file
36 */
37#define DASD_CHANQ_MAX_SIZE 4
38
39/*
40 * SECTION: exported variables of dasd.c
41 */
42debug_info_t *dasd_debug_area;
43struct dasd_discipline *dasd_diag_discipline_pointer;
Heiko Carstens2b67fc42007-02-05 21:16:47 +010044void dasd_int_handler(struct ccw_device *, unsigned long, struct irb *);
Linus Torvalds1da177e2005-04-16 15:20:36 -070045
46MODULE_AUTHOR("Holger Smolinski <Holger.Smolinski@de.ibm.com>");
47MODULE_DESCRIPTION("Linux on S/390 DASD device driver,"
48 " Copyright 2000 IBM Corporation");
49MODULE_SUPPORTED_DEVICE("dasd");
Linus Torvalds1da177e2005-04-16 15:20:36 -070050MODULE_LICENSE("GPL");
51
52/*
53 * SECTION: prototypes for static functions of dasd.c
54 */
Stefan Weinhuber8e09f212008-01-26 14:11:23 +010055static int dasd_alloc_queue(struct dasd_block *);
56static void dasd_setup_queue(struct dasd_block *);
57static void dasd_free_queue(struct dasd_block *);
58static void dasd_flush_request_queue(struct dasd_block *);
59static int dasd_flush_block_queue(struct dasd_block *);
60static void dasd_device_tasklet(struct dasd_device *);
61static void dasd_block_tasklet(struct dasd_block *);
Al Viro4927b3f2006-12-06 19:18:20 +000062static void do_kick_device(struct work_struct *);
Stefan Haberlandd41dd122009-06-16 10:30:25 +020063static void do_restore_device(struct work_struct *);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +010064static void dasd_return_cqr_cb(struct dasd_ccw_req *, void *);
Stefan Weinhuber48cae882009-02-11 10:37:31 +010065static void dasd_device_timeout(unsigned long);
66static void dasd_block_timeout(unsigned long);
Stefan Weinhubereb6e1992009-12-07 12:51:51 +010067static void __dasd_process_erp(struct dasd_device *, struct dasd_ccw_req *);
Linus Torvalds1da177e2005-04-16 15:20:36 -070068
69/*
70 * SECTION: Operations on the device structure.
71 */
72static wait_queue_head_t dasd_init_waitq;
Horst Hummel8f617012006-08-30 14:33:33 +020073static wait_queue_head_t dasd_flush_wq;
Stefan Haberlandc80ee722008-05-30 10:03:31 +020074static wait_queue_head_t generic_waitq;
Linus Torvalds1da177e2005-04-16 15:20:36 -070075
76/*
77 * Allocate memory for a new device structure.
78 */
Stefan Weinhuber8e09f212008-01-26 14:11:23 +010079struct dasd_device *dasd_alloc_device(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -070080{
81 struct dasd_device *device;
82
Stefan Weinhuber8e09f212008-01-26 14:11:23 +010083 device = kzalloc(sizeof(struct dasd_device), GFP_ATOMIC);
84 if (!device)
Linus Torvalds1da177e2005-04-16 15:20:36 -070085 return ERR_PTR(-ENOMEM);
Linus Torvalds1da177e2005-04-16 15:20:36 -070086
87 /* Get two pages for normal block device operations. */
88 device->ccw_mem = (void *) __get_free_pages(GFP_ATOMIC | GFP_DMA, 1);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +010089 if (!device->ccw_mem) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070090 kfree(device);
91 return ERR_PTR(-ENOMEM);
92 }
93 /* Get one page for error recovery. */
94 device->erp_mem = (void *) get_zeroed_page(GFP_ATOMIC | GFP_DMA);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +010095 if (!device->erp_mem) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070096 free_pages((unsigned long) device->ccw_mem, 1);
97 kfree(device);
98 return ERR_PTR(-ENOMEM);
99 }
100
101 dasd_init_chunklist(&device->ccw_chunks, device->ccw_mem, PAGE_SIZE*2);
102 dasd_init_chunklist(&device->erp_chunks, device->erp_mem, PAGE_SIZE);
103 spin_lock_init(&device->mem_lock);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100104 atomic_set(&device->tasklet_scheduled, 0);
Horst Hummel138c0142006-06-29 14:58:12 +0200105 tasklet_init(&device->tasklet,
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100106 (void (*)(unsigned long)) dasd_device_tasklet,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107 (unsigned long) device);
108 INIT_LIST_HEAD(&device->ccw_queue);
109 init_timer(&device->timer);
Stefan Weinhuber48cae882009-02-11 10:37:31 +0100110 device->timer.function = dasd_device_timeout;
111 device->timer.data = (unsigned long) device;
Al Viro4927b3f2006-12-06 19:18:20 +0000112 INIT_WORK(&device->kick_work, do_kick_device);
Stefan Haberlandd41dd122009-06-16 10:30:25 +0200113 INIT_WORK(&device->restore_device, do_restore_device);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700114 device->state = DASD_STATE_NEW;
115 device->target = DASD_STATE_NEW;
Stefan Haberland9eb25122010-02-26 22:37:46 +0100116 mutex_init(&device->state_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117
118 return device;
119}
120
121/*
122 * Free memory of a device structure.
123 */
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100124void dasd_free_device(struct dasd_device *device)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700125{
Jesper Juhl17fd6822005-11-07 01:01:30 -0800126 kfree(device->private);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127 free_page((unsigned long) device->erp_mem);
128 free_pages((unsigned long) device->ccw_mem, 1);
129 kfree(device);
130}
131
132/*
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100133 * Allocate memory for a new device structure.
134 */
135struct dasd_block *dasd_alloc_block(void)
136{
137 struct dasd_block *block;
138
139 block = kzalloc(sizeof(*block), GFP_ATOMIC);
140 if (!block)
141 return ERR_PTR(-ENOMEM);
142 /* open_count = 0 means device online but not in use */
143 atomic_set(&block->open_count, -1);
144
145 spin_lock_init(&block->request_queue_lock);
146 atomic_set(&block->tasklet_scheduled, 0);
147 tasklet_init(&block->tasklet,
148 (void (*)(unsigned long)) dasd_block_tasklet,
149 (unsigned long) block);
150 INIT_LIST_HEAD(&block->ccw_queue);
151 spin_lock_init(&block->queue_lock);
152 init_timer(&block->timer);
Stefan Weinhuber48cae882009-02-11 10:37:31 +0100153 block->timer.function = dasd_block_timeout;
154 block->timer.data = (unsigned long) block;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100155
156 return block;
157}
158
159/*
160 * Free memory of a device structure.
161 */
162void dasd_free_block(struct dasd_block *block)
163{
164 kfree(block);
165}
166
167/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700168 * Make a new device known to the system.
169 */
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100170static int dasd_state_new_to_known(struct dasd_device *device)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700171{
172 int rc;
173
174 /*
Horst Hummel138c0142006-06-29 14:58:12 +0200175 * As long as the device is not in state DASD_STATE_NEW we want to
Linus Torvalds1da177e2005-04-16 15:20:36 -0700176 * keep the reference count > 0.
177 */
178 dasd_get_device(device);
179
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100180 if (device->block) {
181 rc = dasd_alloc_queue(device->block);
182 if (rc) {
183 dasd_put_device(device);
184 return rc;
185 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700186 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700187 device->state = DASD_STATE_KNOWN;
188 return 0;
189}
190
191/*
192 * Let the system forget about a device.
193 */
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100194static int dasd_state_known_to_new(struct dasd_device *device)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195{
Stefan Weinhuber20c64462006-03-24 03:15:25 -0800196 /* Disable extended error reporting for this device. */
197 dasd_eer_disable(device);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198 /* Forget the discipline information. */
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100199 if (device->discipline) {
200 if (device->discipline->uncheck_device)
201 device->discipline->uncheck_device(device);
Peter Oberparleiteraa888612006-02-20 18:28:13 -0800202 module_put(device->discipline->owner);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100203 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700204 device->discipline = NULL;
Peter Oberparleiteraa888612006-02-20 18:28:13 -0800205 if (device->base_discipline)
206 module_put(device->base_discipline->owner);
207 device->base_discipline = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208 device->state = DASD_STATE_NEW;
209
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100210 if (device->block)
211 dasd_free_queue(device->block);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212
213 /* Give up reference we took in dasd_state_new_to_known. */
214 dasd_put_device(device);
Horst Hummel8f617012006-08-30 14:33:33 +0200215 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700216}
217
218/*
219 * Request the irq line for the device.
220 */
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100221static int dasd_state_known_to_basic(struct dasd_device *device)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700222{
223 int rc;
224
225 /* Allocate and register gendisk structure. */
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100226 if (device->block) {
227 rc = dasd_gendisk_alloc(device->block);
228 if (rc)
229 return rc;
230 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700231 /* register 'device' debug area, used for all DBF_DEV_XXX calls */
Stefan Haberlandfc19f382009-03-26 15:23:49 +0100232 device->debug_area = debug_register(dev_name(&device->cdev->dev), 4, 1,
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100233 8 * sizeof(long));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700234 debug_register_view(device->debug_area, &debug_sprintf_view);
Horst Hummelb0035f12006-09-20 15:59:07 +0200235 debug_set_level(device->debug_area, DBF_WARNING);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700236 DBF_DEV_EVENT(DBF_EMERG, device, "%s", "debug area created");
237
238 device->state = DASD_STATE_BASIC;
239 return 0;
240}
241
242/*
243 * Release the irq line for the device. Terminate any running i/o.
244 */
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100245static int dasd_state_basic_to_known(struct dasd_device *device)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700246{
Horst Hummel8f617012006-08-30 14:33:33 +0200247 int rc;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100248 if (device->block) {
249 dasd_gendisk_free(device->block);
250 dasd_block_clear_timer(device->block);
251 }
252 rc = dasd_flush_device_queue(device);
Horst Hummel8f617012006-08-30 14:33:33 +0200253 if (rc)
254 return rc;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100255 dasd_device_clear_timer(device);
Horst Hummel8f617012006-08-30 14:33:33 +0200256
Linus Torvalds1da177e2005-04-16 15:20:36 -0700257 DBF_DEV_EVENT(DBF_EMERG, device, "%p debug area deleted", device);
258 if (device->debug_area != NULL) {
259 debug_unregister(device->debug_area);
260 device->debug_area = NULL;
261 }
262 device->state = DASD_STATE_KNOWN;
Horst Hummel8f617012006-08-30 14:33:33 +0200263 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700264}
265
266/*
267 * Do the initial analysis. The do_analysis function may return
268 * -EAGAIN in which case the device keeps the state DASD_STATE_BASIC
269 * until the discipline decides to continue the startup sequence
270 * by calling the function dasd_change_state. The eckd disciplines
271 * uses this to start a ccw that detects the format. The completion
272 * interrupt for this detection ccw uses the kernel event daemon to
273 * trigger the call to dasd_change_state. All this is done in the
274 * discipline code, see dasd_eckd.c.
Horst Hummel90f00942006-03-07 21:55:39 -0800275 * After the analysis ccw is done (do_analysis returned 0) the block
276 * device is setup.
277 * In case the analysis returns an error, the device setup is stopped
278 * (a fake disk was already added to allow formatting).
Linus Torvalds1da177e2005-04-16 15:20:36 -0700279 */
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100280static int dasd_state_basic_to_ready(struct dasd_device *device)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281{
282 int rc;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100283 struct dasd_block *block;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700284
285 rc = 0;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100286 block = device->block;
Horst Hummel90f00942006-03-07 21:55:39 -0800287 /* make disk known with correct capacity */
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100288 if (block) {
289 if (block->base->discipline->do_analysis != NULL)
290 rc = block->base->discipline->do_analysis(block);
291 if (rc) {
292 if (rc != -EAGAIN)
293 device->state = DASD_STATE_UNFMT;
294 return rc;
295 }
296 dasd_setup_queue(block);
297 set_capacity(block->gdp,
298 block->blocks << block->s2b_shift);
299 device->state = DASD_STATE_READY;
300 rc = dasd_scan_partitions(block);
301 if (rc)
302 device->state = DASD_STATE_BASIC;
303 } else {
304 device->state = DASD_STATE_READY;
305 }
Horst Hummel90f00942006-03-07 21:55:39 -0800306 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700307}
308
309/*
310 * Remove device from block device layer. Destroy dirty buffers.
311 * Forget format information. Check if the target level is basic
312 * and if it is create fake disk for formatting.
313 */
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100314static int dasd_state_ready_to_basic(struct dasd_device *device)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700315{
Horst Hummel8f617012006-08-30 14:33:33 +0200316 int rc;
317
Linus Torvalds1da177e2005-04-16 15:20:36 -0700318 device->state = DASD_STATE_BASIC;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100319 if (device->block) {
320 struct dasd_block *block = device->block;
321 rc = dasd_flush_block_queue(block);
322 if (rc) {
323 device->state = DASD_STATE_READY;
324 return rc;
325 }
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100326 dasd_flush_request_queue(block);
Stefan Haberlandb695adf2010-02-26 22:37:48 +0100327 dasd_destroy_partitions(block);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100328 block->blocks = 0;
329 block->bp_block = 0;
330 block->s2b_shift = 0;
331 }
Horst Hummel8f617012006-08-30 14:33:33 +0200332 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700333}
334
335/*
Horst Hummel90f00942006-03-07 21:55:39 -0800336 * Back to basic.
337 */
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100338static int dasd_state_unfmt_to_basic(struct dasd_device *device)
Horst Hummel90f00942006-03-07 21:55:39 -0800339{
340 device->state = DASD_STATE_BASIC;
Horst Hummel8f617012006-08-30 14:33:33 +0200341 return 0;
Horst Hummel90f00942006-03-07 21:55:39 -0800342}
343
344/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700345 * Make the device online and schedule the bottom half to start
346 * the requeueing of requests from the linux request queue to the
347 * ccw queue.
348 */
Horst Hummel8f617012006-08-30 14:33:33 +0200349static int
Linus Torvalds1da177e2005-04-16 15:20:36 -0700350dasd_state_ready_to_online(struct dasd_device * device)
351{
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100352 int rc;
Stefan Weinhuber13018092009-01-09 12:14:50 +0100353 struct gendisk *disk;
354 struct disk_part_iter piter;
355 struct hd_struct *part;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100356
357 if (device->discipline->ready_to_online) {
358 rc = device->discipline->ready_to_online(device);
359 if (rc)
360 return rc;
361 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700362 device->state = DASD_STATE_ONLINE;
Stefan Weinhuber13018092009-01-09 12:14:50 +0100363 if (device->block) {
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100364 dasd_schedule_block_bh(device->block);
Stefan Weinhuber13018092009-01-09 12:14:50 +0100365 disk = device->block->bdev->bd_disk;
366 disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0);
367 while ((part = disk_part_iter_next(&piter)))
368 kobject_uevent(&part_to_dev(part)->kobj, KOBJ_CHANGE);
369 disk_part_iter_exit(&piter);
370 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700371 return 0;
372}
373
374/*
375 * Stop the requeueing of requests again.
376 */
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100377static int dasd_state_online_to_ready(struct dasd_device *device)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700378{
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100379 int rc;
Stefan Weinhuber13018092009-01-09 12:14:50 +0100380 struct gendisk *disk;
381 struct disk_part_iter piter;
382 struct hd_struct *part;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100383
384 if (device->discipline->online_to_ready) {
385 rc = device->discipline->online_to_ready(device);
386 if (rc)
387 return rc;
388 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700389 device->state = DASD_STATE_READY;
Stefan Weinhuber13018092009-01-09 12:14:50 +0100390 if (device->block) {
391 disk = device->block->bdev->bd_disk;
392 disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0);
393 while ((part = disk_part_iter_next(&piter)))
394 kobject_uevent(&part_to_dev(part)->kobj, KOBJ_CHANGE);
395 disk_part_iter_exit(&piter);
396 }
Horst Hummel8f617012006-08-30 14:33:33 +0200397 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700398}
399
400/*
401 * Device startup state changes.
402 */
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100403static int dasd_increase_state(struct dasd_device *device)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700404{
405 int rc;
406
407 rc = 0;
408 if (device->state == DASD_STATE_NEW &&
409 device->target >= DASD_STATE_KNOWN)
410 rc = dasd_state_new_to_known(device);
411
412 if (!rc &&
413 device->state == DASD_STATE_KNOWN &&
414 device->target >= DASD_STATE_BASIC)
415 rc = dasd_state_known_to_basic(device);
416
417 if (!rc &&
418 device->state == DASD_STATE_BASIC &&
419 device->target >= DASD_STATE_READY)
420 rc = dasd_state_basic_to_ready(device);
421
422 if (!rc &&
Horst Hummel39ccf952006-04-27 18:40:10 -0700423 device->state == DASD_STATE_UNFMT &&
424 device->target > DASD_STATE_UNFMT)
425 rc = -EPERM;
426
427 if (!rc &&
Linus Torvalds1da177e2005-04-16 15:20:36 -0700428 device->state == DASD_STATE_READY &&
429 device->target >= DASD_STATE_ONLINE)
430 rc = dasd_state_ready_to_online(device);
431
432 return rc;
433}
434
435/*
436 * Device shutdown state changes.
437 */
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100438static int dasd_decrease_state(struct dasd_device *device)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700439{
Horst Hummel8f617012006-08-30 14:33:33 +0200440 int rc;
441
442 rc = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700443 if (device->state == DASD_STATE_ONLINE &&
444 device->target <= DASD_STATE_READY)
Horst Hummel8f617012006-08-30 14:33:33 +0200445 rc = dasd_state_online_to_ready(device);
Horst Hummel138c0142006-06-29 14:58:12 +0200446
Horst Hummel8f617012006-08-30 14:33:33 +0200447 if (!rc &&
448 device->state == DASD_STATE_READY &&
Linus Torvalds1da177e2005-04-16 15:20:36 -0700449 device->target <= DASD_STATE_BASIC)
Horst Hummel8f617012006-08-30 14:33:33 +0200450 rc = dasd_state_ready_to_basic(device);
Horst Hummel90f00942006-03-07 21:55:39 -0800451
Horst Hummel8f617012006-08-30 14:33:33 +0200452 if (!rc &&
453 device->state == DASD_STATE_UNFMT &&
Horst Hummel90f00942006-03-07 21:55:39 -0800454 device->target <= DASD_STATE_BASIC)
Horst Hummel8f617012006-08-30 14:33:33 +0200455 rc = dasd_state_unfmt_to_basic(device);
Horst Hummel90f00942006-03-07 21:55:39 -0800456
Horst Hummel8f617012006-08-30 14:33:33 +0200457 if (!rc &&
458 device->state == DASD_STATE_BASIC &&
Linus Torvalds1da177e2005-04-16 15:20:36 -0700459 device->target <= DASD_STATE_KNOWN)
Horst Hummel8f617012006-08-30 14:33:33 +0200460 rc = dasd_state_basic_to_known(device);
Horst Hummel138c0142006-06-29 14:58:12 +0200461
Horst Hummel8f617012006-08-30 14:33:33 +0200462 if (!rc &&
463 device->state == DASD_STATE_KNOWN &&
Linus Torvalds1da177e2005-04-16 15:20:36 -0700464 device->target <= DASD_STATE_NEW)
Horst Hummel8f617012006-08-30 14:33:33 +0200465 rc = dasd_state_known_to_new(device);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700466
Horst Hummel8f617012006-08-30 14:33:33 +0200467 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700468}
469
470/*
471 * This is the main startup/shutdown routine.
472 */
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100473static void dasd_change_state(struct dasd_device *device)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700474{
Sebastian Ott181d9522009-06-22 12:08:21 +0200475 int rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700476
477 if (device->state == device->target)
478 /* Already where we want to go today... */
479 return;
480 if (device->state < device->target)
481 rc = dasd_increase_state(device);
482 else
483 rc = dasd_decrease_state(device);
Sebastian Ott181d9522009-06-22 12:08:21 +0200484 if (rc == -EAGAIN)
485 return;
486 if (rc)
487 device->target = device->state;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700488
Stefan Haberland9eb25122010-02-26 22:37:46 +0100489 if (device->state == device->target)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700490 wake_up(&dasd_init_waitq);
Horst Hummel4dfd5c42007-04-27 16:01:47 +0200491
492 /* let user-space know that the device status changed */
493 kobject_uevent(&device->cdev->dev.kobj, KOBJ_CHANGE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700494}
495
496/*
497 * Kick starter for devices that did not complete the startup/shutdown
498 * procedure or were sleeping because of a pending state.
499 * dasd_kick_device will schedule a call do do_kick_device to the kernel
500 * event daemon.
501 */
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100502static void do_kick_device(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700503{
Al Viro4927b3f2006-12-06 19:18:20 +0000504 struct dasd_device *device = container_of(work, struct dasd_device, kick_work);
Stefan Haberland9eb25122010-02-26 22:37:46 +0100505 mutex_lock(&device->state_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700506 dasd_change_state(device);
Stefan Haberland9eb25122010-02-26 22:37:46 +0100507 mutex_unlock(&device->state_mutex);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100508 dasd_schedule_device_bh(device);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700509 dasd_put_device(device);
510}
511
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100512void dasd_kick_device(struct dasd_device *device)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700513{
514 dasd_get_device(device);
515 /* queue call to dasd_kick_device to the kernel event daemon. */
516 schedule_work(&device->kick_work);
517}
518
519/*
Stefan Haberlandd41dd122009-06-16 10:30:25 +0200520 * dasd_restore_device will schedule a call do do_restore_device to the kernel
521 * event daemon.
522 */
523static void do_restore_device(struct work_struct *work)
524{
525 struct dasd_device *device = container_of(work, struct dasd_device,
526 restore_device);
527 device->cdev->drv->restore(device->cdev);
528 dasd_put_device(device);
529}
530
531void dasd_restore_device(struct dasd_device *device)
532{
533 dasd_get_device(device);
534 /* queue call to dasd_restore_device to the kernel event daemon. */
535 schedule_work(&device->restore_device);
536}
537
538/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700539 * Set the target state for a device and starts the state change.
540 */
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100541void dasd_set_target_state(struct dasd_device *device, int target)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700542{
Cornelia Huckf3445a12009-04-14 15:36:23 +0200543 dasd_get_device(device);
Stefan Haberland9eb25122010-02-26 22:37:46 +0100544 mutex_lock(&device->state_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700545 /* If we are in probeonly mode stop at DASD_STATE_READY. */
546 if (dasd_probeonly && target > DASD_STATE_READY)
547 target = DASD_STATE_READY;
548 if (device->target != target) {
Stefan Haberland9eb25122010-02-26 22:37:46 +0100549 if (device->state == target)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700550 wake_up(&dasd_init_waitq);
551 device->target = target;
552 }
553 if (device->state != device->target)
554 dasd_change_state(device);
Stefan Haberland9eb25122010-02-26 22:37:46 +0100555 mutex_unlock(&device->state_mutex);
556 dasd_put_device(device);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700557}
558
559/*
560 * Enable devices with device numbers in [from..to].
561 */
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100562static inline int _wait_for_device(struct dasd_device *device)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700563{
564 return (device->state == device->target);
565}
566
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100567void dasd_enable_device(struct dasd_device *device)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700568{
569 dasd_set_target_state(device, DASD_STATE_ONLINE);
570 if (device->state <= DASD_STATE_KNOWN)
571 /* No discipline for device found. */
572 dasd_set_target_state(device, DASD_STATE_NEW);
573 /* Now wait for the devices to come up. */
574 wait_event(dasd_init_waitq, _wait_for_device(device));
575}
576
577/*
578 * SECTION: device operation (interrupt handler, start i/o, term i/o ...)
579 */
580#ifdef CONFIG_DASD_PROFILE
581
582struct dasd_profile_info_t dasd_global_profile;
583unsigned int dasd_profile_level = DASD_PROFILE_OFF;
584
585/*
586 * Increments counter in global and local profiling structures.
587 */
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100588#define dasd_profile_counter(value, counter, block) \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700589{ \
590 int index; \
591 for (index = 0; index < 31 && value >> (2+index); index++); \
592 dasd_global_profile.counter[index]++; \
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100593 block->profile.counter[index]++; \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700594}
595
596/*
597 * Add profiling information for cqr before execution.
598 */
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100599static void dasd_profile_start(struct dasd_block *block,
600 struct dasd_ccw_req *cqr,
601 struct request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700602{
603 struct list_head *l;
604 unsigned int counter;
605
606 if (dasd_profile_level != DASD_PROFILE_ON)
607 return;
608
609 /* count the length of the chanq for statistics */
610 counter = 0;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100611 list_for_each(l, &block->ccw_queue)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700612 if (++counter >= 31)
613 break;
614 dasd_global_profile.dasd_io_nr_req[counter]++;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100615 block->profile.dasd_io_nr_req[counter]++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700616}
617
618/*
619 * Add profiling information for cqr after execution.
620 */
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100621static void dasd_profile_end(struct dasd_block *block,
622 struct dasd_ccw_req *cqr,
623 struct request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700624{
625 long strtime, irqtime, endtime, tottime; /* in microseconds */
626 long tottimeps, sectors;
627
628 if (dasd_profile_level != DASD_PROFILE_ON)
629 return;
630
Tejun Heo83096eb2009-05-07 22:24:39 +0900631 sectors = blk_rq_sectors(req);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700632 if (!cqr->buildclk || !cqr->startclk ||
633 !cqr->stopclk || !cqr->endclk ||
634 !sectors)
635 return;
636
637 strtime = ((cqr->startclk - cqr->buildclk) >> 12);
638 irqtime = ((cqr->stopclk - cqr->startclk) >> 12);
639 endtime = ((cqr->endclk - cqr->stopclk) >> 12);
640 tottime = ((cqr->endclk - cqr->buildclk) >> 12);
641 tottimeps = tottime / sectors;
642
643 if (!dasd_global_profile.dasd_io_reqs)
644 memset(&dasd_global_profile, 0,
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100645 sizeof(struct dasd_profile_info_t));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700646 dasd_global_profile.dasd_io_reqs++;
647 dasd_global_profile.dasd_io_sects += sectors;
648
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100649 if (!block->profile.dasd_io_reqs)
650 memset(&block->profile, 0,
651 sizeof(struct dasd_profile_info_t));
652 block->profile.dasd_io_reqs++;
653 block->profile.dasd_io_sects += sectors;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700654
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100655 dasd_profile_counter(sectors, dasd_io_secs, block);
656 dasd_profile_counter(tottime, dasd_io_times, block);
657 dasd_profile_counter(tottimeps, dasd_io_timps, block);
658 dasd_profile_counter(strtime, dasd_io_time1, block);
659 dasd_profile_counter(irqtime, dasd_io_time2, block);
660 dasd_profile_counter(irqtime / sectors, dasd_io_time2ps, block);
661 dasd_profile_counter(endtime, dasd_io_time3, block);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700662}
663#else
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100664#define dasd_profile_start(block, cqr, req) do {} while (0)
665#define dasd_profile_end(block, cqr, req) do {} while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700666#endif /* CONFIG_DASD_PROFILE */
667
668/*
669 * Allocate memory for a channel program with 'cplength' channel
670 * command words and 'datasize' additional space. There are two
671 * variantes: 1) dasd_kmalloc_request uses kmalloc to get the needed
672 * memory and 2) dasd_smalloc_request uses the static ccw memory
673 * that gets allocated for each device.
674 */
Stefan Haberland68b781f2009-09-11 10:28:29 +0200675struct dasd_ccw_req *dasd_kmalloc_request(int magic, int cplength,
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100676 int datasize,
677 struct dasd_device *device)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700678{
679 struct dasd_ccw_req *cqr;
680
681 /* Sanity checks */
Stefan Haberland68b781f2009-09-11 10:28:29 +0200682 BUG_ON(datasize > PAGE_SIZE ||
Eric Sesterhenn7ac1e872006-03-24 18:48:13 +0100683 (cplength*sizeof(struct ccw1)) > PAGE_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700684
Eric Sesterhenn88abaab2006-03-24 03:15:31 -0800685 cqr = kzalloc(sizeof(struct dasd_ccw_req), GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700686 if (cqr == NULL)
687 return ERR_PTR(-ENOMEM);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700688 cqr->cpaddr = NULL;
689 if (cplength > 0) {
Eric Sesterhenn88abaab2006-03-24 03:15:31 -0800690 cqr->cpaddr = kcalloc(cplength, sizeof(struct ccw1),
Linus Torvalds1da177e2005-04-16 15:20:36 -0700691 GFP_ATOMIC | GFP_DMA);
692 if (cqr->cpaddr == NULL) {
693 kfree(cqr);
694 return ERR_PTR(-ENOMEM);
695 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700696 }
697 cqr->data = NULL;
698 if (datasize > 0) {
Eric Sesterhenn88abaab2006-03-24 03:15:31 -0800699 cqr->data = kzalloc(datasize, GFP_ATOMIC | GFP_DMA);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700700 if (cqr->data == NULL) {
Jesper Juhl17fd6822005-11-07 01:01:30 -0800701 kfree(cqr->cpaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700702 kfree(cqr);
703 return ERR_PTR(-ENOMEM);
704 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700705 }
Stefan Haberland68b781f2009-09-11 10:28:29 +0200706 cqr->magic = magic;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700707 set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
708 dasd_get_device(device);
709 return cqr;
710}
711
Stefan Haberland68b781f2009-09-11 10:28:29 +0200712struct dasd_ccw_req *dasd_smalloc_request(int magic, int cplength,
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100713 int datasize,
714 struct dasd_device *device)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700715{
716 unsigned long flags;
717 struct dasd_ccw_req *cqr;
718 char *data;
719 int size;
720
721 /* Sanity checks */
Stefan Haberland68b781f2009-09-11 10:28:29 +0200722 BUG_ON(datasize > PAGE_SIZE ||
Eric Sesterhenn7ac1e872006-03-24 18:48:13 +0100723 (cplength*sizeof(struct ccw1)) > PAGE_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700724
725 size = (sizeof(struct dasd_ccw_req) + 7L) & -8L;
726 if (cplength > 0)
727 size += cplength * sizeof(struct ccw1);
728 if (datasize > 0)
729 size += datasize;
730 spin_lock_irqsave(&device->mem_lock, flags);
731 cqr = (struct dasd_ccw_req *)
732 dasd_alloc_chunk(&device->ccw_chunks, size);
733 spin_unlock_irqrestore(&device->mem_lock, flags);
734 if (cqr == NULL)
735 return ERR_PTR(-ENOMEM);
736 memset(cqr, 0, sizeof(struct dasd_ccw_req));
737 data = (char *) cqr + ((sizeof(struct dasd_ccw_req) + 7L) & -8L);
738 cqr->cpaddr = NULL;
739 if (cplength > 0) {
740 cqr->cpaddr = (struct ccw1 *) data;
741 data += cplength*sizeof(struct ccw1);
742 memset(cqr->cpaddr, 0, cplength*sizeof(struct ccw1));
743 }
744 cqr->data = NULL;
745 if (datasize > 0) {
746 cqr->data = data;
747 memset(cqr->data, 0, datasize);
748 }
Stefan Haberland68b781f2009-09-11 10:28:29 +0200749 cqr->magic = magic;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700750 set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
751 dasd_get_device(device);
752 return cqr;
753}
754
755/*
756 * Free memory of a channel program. This function needs to free all the
757 * idal lists that might have been created by dasd_set_cda and the
758 * struct dasd_ccw_req itself.
759 */
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100760void dasd_kfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700761{
Martin Schwidefsky347a8dc2006-01-06 00:19:28 -0800762#ifdef CONFIG_64BIT
Linus Torvalds1da177e2005-04-16 15:20:36 -0700763 struct ccw1 *ccw;
764
765 /* Clear any idals used for the request. */
766 ccw = cqr->cpaddr;
767 do {
768 clear_normalized_cda(ccw);
769 } while (ccw++->flags & (CCW_FLAG_CC | CCW_FLAG_DC));
770#endif
Jesper Juhl17fd6822005-11-07 01:01:30 -0800771 kfree(cqr->cpaddr);
772 kfree(cqr->data);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700773 kfree(cqr);
774 dasd_put_device(device);
775}
776
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100777void dasd_sfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700778{
779 unsigned long flags;
780
781 spin_lock_irqsave(&device->mem_lock, flags);
782 dasd_free_chunk(&device->ccw_chunks, cqr);
783 spin_unlock_irqrestore(&device->mem_lock, flags);
784 dasd_put_device(device);
785}
786
787/*
788 * Check discipline magic in cqr.
789 */
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100790static inline int dasd_check_cqr(struct dasd_ccw_req *cqr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700791{
792 struct dasd_device *device;
793
794 if (cqr == NULL)
795 return -EINVAL;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100796 device = cqr->startdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700797 if (strncmp((char *) &cqr->magic, device->discipline->ebcname, 4)) {
Stefan Haberlandfc19f382009-03-26 15:23:49 +0100798 DBF_DEV_EVENT(DBF_WARNING, device,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700799 " dasd_ccw_req 0x%08x magic doesn't match"
800 " discipline 0x%08x",
801 cqr->magic,
802 *(unsigned int *) device->discipline->name);
803 return -EINVAL;
804 }
805 return 0;
806}
807
808/*
809 * Terminate the current i/o and set the request to clear_pending.
810 * Timer keeps device runnig.
811 * ccw_device_clear can fail if the i/o subsystem
812 * is in a bad mood.
813 */
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100814int dasd_term_IO(struct dasd_ccw_req *cqr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700815{
816 struct dasd_device *device;
817 int retries, rc;
Stefan Haberlandfc19f382009-03-26 15:23:49 +0100818 char errorstring[ERRORLENGTH];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700819
820 /* Check the cqr */
821 rc = dasd_check_cqr(cqr);
822 if (rc)
823 return rc;
824 retries = 0;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100825 device = (struct dasd_device *) cqr->startdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700826 while ((retries < 5) && (cqr->status == DASD_CQR_IN_IO)) {
827 rc = ccw_device_clear(device->cdev, (long) cqr);
828 switch (rc) {
829 case 0: /* termination successful */
Horst Hummelc2ba4442006-02-01 03:06:37 -0800830 cqr->retries--;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100831 cqr->status = DASD_CQR_CLEAR_PENDING;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700832 cqr->stopclk = get_clock();
Horst Hummel8f617012006-08-30 14:33:33 +0200833 cqr->starttime = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700834 DBF_DEV_EVENT(DBF_DEBUG, device,
835 "terminate cqr %p successful",
836 cqr);
837 break;
838 case -ENODEV:
839 DBF_DEV_EVENT(DBF_ERR, device, "%s",
840 "device gone, retry");
841 break;
842 case -EIO:
843 DBF_DEV_EVENT(DBF_ERR, device, "%s",
844 "I/O error, retry");
845 break;
846 case -EINVAL:
847 case -EBUSY:
848 DBF_DEV_EVENT(DBF_ERR, device, "%s",
849 "device busy, retry later");
850 break;
851 default:
Stefan Haberlandfc19f382009-03-26 15:23:49 +0100852 /* internal error 10 - unknown rc*/
853 snprintf(errorstring, ERRORLENGTH, "10 %d", rc);
854 dev_err(&device->cdev->dev, "An error occurred in the "
855 "DASD device driver, reason=%s\n", errorstring);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700856 BUG();
857 break;
858 }
859 retries++;
860 }
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100861 dasd_schedule_device_bh(device);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700862 return rc;
863}
864
865/*
866 * Start the i/o. This start_IO can fail if the channel is really busy.
867 * In that case set up a timer to start the request later.
868 */
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100869int dasd_start_IO(struct dasd_ccw_req *cqr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700870{
871 struct dasd_device *device;
872 int rc;
Stefan Haberlandfc19f382009-03-26 15:23:49 +0100873 char errorstring[ERRORLENGTH];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700874
875 /* Check the cqr */
876 rc = dasd_check_cqr(cqr);
Stefan Weinhuber6cc7f162009-06-12 10:26:39 +0200877 if (rc) {
878 cqr->intrc = rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700879 return rc;
Stefan Weinhuber6cc7f162009-06-12 10:26:39 +0200880 }
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100881 device = (struct dasd_device *) cqr->startdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700882 if (cqr->retries < 0) {
Stefan Haberlandfc19f382009-03-26 15:23:49 +0100883 /* internal error 14 - start_IO run out of retries */
884 sprintf(errorstring, "14 %p", cqr);
885 dev_err(&device->cdev->dev, "An error occurred in the DASD "
886 "device driver, reason=%s\n", errorstring);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100887 cqr->status = DASD_CQR_ERROR;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700888 return -EIO;
889 }
890 cqr->startclk = get_clock();
891 cqr->starttime = jiffies;
892 cqr->retries--;
Stefan Weinhuberf3eb5382009-03-26 15:23:48 +0100893 if (cqr->cpmode == 1) {
894 rc = ccw_device_tm_start(device->cdev, cqr->cpaddr,
895 (long) cqr, cqr->lpm);
896 } else {
897 rc = ccw_device_start(device->cdev, cqr->cpaddr,
898 (long) cqr, cqr->lpm, 0);
899 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700900 switch (rc) {
901 case 0:
902 cqr->status = DASD_CQR_IN_IO;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700903 break;
904 case -EBUSY:
Stefan Haberlandfc19f382009-03-26 15:23:49 +0100905 DBF_DEV_EVENT(DBF_DEBUG, device, "%s",
Linus Torvalds1da177e2005-04-16 15:20:36 -0700906 "start_IO: device busy, retry later");
907 break;
908 case -ETIMEDOUT:
Stefan Haberlandfc19f382009-03-26 15:23:49 +0100909 DBF_DEV_EVENT(DBF_DEBUG, device, "%s",
Linus Torvalds1da177e2005-04-16 15:20:36 -0700910 "start_IO: request timeout, retry later");
911 break;
912 case -EACCES:
913 /* -EACCES indicates that the request used only a
914 * subset of the available pathes and all these
915 * pathes are gone.
916 * Do a retry with all available pathes.
917 */
918 cqr->lpm = LPM_ANYPATH;
Stefan Haberlandfc19f382009-03-26 15:23:49 +0100919 DBF_DEV_EVENT(DBF_DEBUG, device, "%s",
Linus Torvalds1da177e2005-04-16 15:20:36 -0700920 "start_IO: selected pathes gone,"
921 " retry on all pathes");
922 break;
923 case -ENODEV:
Stefan Weinhuberf3eb5382009-03-26 15:23:48 +0100924 DBF_DEV_EVENT(DBF_DEBUG, device, "%s",
925 "start_IO: -ENODEV device gone, retry");
926 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700927 case -EIO:
Stefan Haberlandfc19f382009-03-26 15:23:49 +0100928 DBF_DEV_EVENT(DBF_DEBUG, device, "%s",
Stefan Weinhuberf3eb5382009-03-26 15:23:48 +0100929 "start_IO: -EIO device gone, retry");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700930 break;
Stefan Haberlandd41dd122009-06-16 10:30:25 +0200931 case -EINVAL:
932 /* most likely caused in power management context */
933 DBF_DEV_EVENT(DBF_DEBUG, device, "%s",
934 "start_IO: -EINVAL device currently "
935 "not accessible");
936 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700937 default:
Stefan Haberlandfc19f382009-03-26 15:23:49 +0100938 /* internal error 11 - unknown rc */
939 snprintf(errorstring, ERRORLENGTH, "11 %d", rc);
940 dev_err(&device->cdev->dev,
941 "An error occurred in the DASD device driver, "
942 "reason=%s\n", errorstring);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700943 BUG();
944 break;
945 }
Stefan Weinhuber6cc7f162009-06-12 10:26:39 +0200946 cqr->intrc = rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700947 return rc;
948}
949
950/*
951 * Timeout function for dasd devices. This is used for different purposes
952 * 1) missing interrupt handler for normal operation
953 * 2) delayed start of request where start_IO failed with -EBUSY
954 * 3) timeout for missing state change interrupts
955 * The head of the ccw queue will have status DASD_CQR_IN_IO for 1),
956 * DASD_CQR_QUEUED for 2) and 3).
957 */
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100958static void dasd_device_timeout(unsigned long ptr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700959{
960 unsigned long flags;
961 struct dasd_device *device;
962
963 device = (struct dasd_device *) ptr;
964 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
965 /* re-activate request queue */
Stefan Weinhubereb6e1992009-12-07 12:51:51 +0100966 dasd_device_remove_stop_bits(device, DASD_STOPPED_PENDING);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700967 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100968 dasd_schedule_device_bh(device);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700969}
970
971/*
972 * Setup timeout for a device in jiffies.
973 */
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100974void dasd_device_set_timer(struct dasd_device *device, int expires)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700975{
Stefan Weinhuber48cae882009-02-11 10:37:31 +0100976 if (expires == 0)
977 del_timer(&device->timer);
978 else
979 mod_timer(&device->timer, jiffies + expires);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700980}
981
982/*
983 * Clear timeout for a device.
984 */
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100985void dasd_device_clear_timer(struct dasd_device *device)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700986{
Stefan Weinhuber48cae882009-02-11 10:37:31 +0100987 del_timer(&device->timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700988}
989
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100990static void dasd_handle_killed_request(struct ccw_device *cdev,
991 unsigned long intparm)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700992{
993 struct dasd_ccw_req *cqr;
994 struct dasd_device *device;
995
Stefan Weinhuberf16f5842008-05-15 16:52:36 +0200996 if (!intparm)
997 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700998 cqr = (struct dasd_ccw_req *) intparm;
999 if (cqr->status != DASD_CQR_IN_IO) {
Stefan Haberlandb8ed5dd2009-12-07 12:51:52 +01001000 DBF_EVENT_DEVID(DBF_DEBUG, cdev,
1001 "invalid status in handle_killed_request: "
1002 "%02x", cqr->status);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001003 return;
1004 }
1005
Stefan Haberland589c74d2010-02-26 22:37:47 +01001006 device = dasd_device_from_cdev_locked(cdev);
1007 if (IS_ERR(device)) {
1008 DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s",
1009 "unable to get device from cdev");
1010 return;
1011 }
1012
1013 if (!cqr->startdev ||
1014 device != cqr->startdev ||
1015 strncmp(cqr->startdev->discipline->ebcname,
1016 (char *) &cqr->magic, 4)) {
Stefan Haberland294001a2010-01-27 10:12:35 +01001017 DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s",
1018 "invalid device in request");
Stefan Haberland589c74d2010-02-26 22:37:47 +01001019 dasd_put_device(device);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001020 return;
1021 }
1022
1023 /* Schedule request to be retried. */
1024 cqr->status = DASD_CQR_QUEUED;
1025
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001026 dasd_device_clear_timer(device);
1027 dasd_schedule_device_bh(device);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001028 dasd_put_device(device);
1029}
1030
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001031void dasd_generic_handle_state_change(struct dasd_device *device)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001032{
Stefan Weinhuber20c64462006-03-24 03:15:25 -08001033 /* First of all start sense subsystem status request. */
1034 dasd_eer_snss(device);
1035
Stefan Weinhubereb6e1992009-12-07 12:51:51 +01001036 dasd_device_remove_stop_bits(device, DASD_STOPPED_PENDING);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001037 dasd_schedule_device_bh(device);
1038 if (device->block)
1039 dasd_schedule_block_bh(device->block);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001040}
1041
1042/*
1043 * Interrupt handler for "normal" ssch-io based dasd devices.
1044 */
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001045void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
1046 struct irb *irb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001047{
1048 struct dasd_ccw_req *cqr, *next;
1049 struct dasd_device *device;
1050 unsigned long long now;
1051 int expires;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001052
1053 if (IS_ERR(irb)) {
1054 switch (PTR_ERR(irb)) {
1055 case -EIO:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001056 break;
1057 case -ETIMEDOUT:
Stefan Haberlandb8ed5dd2009-12-07 12:51:52 +01001058 DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s: "
1059 "request timed out\n", __func__);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001060 break;
1061 default:
Stefan Haberlandb8ed5dd2009-12-07 12:51:52 +01001062 DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s: "
1063 "unknown error %ld\n", __func__,
1064 PTR_ERR(irb));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001065 }
Stefan Weinhuberf16f5842008-05-15 16:52:36 +02001066 dasd_handle_killed_request(cdev, intparm);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001067 return;
1068 }
1069
1070 now = get_clock();
1071
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001072 /* check for unsolicited interrupts */
1073 cqr = (struct dasd_ccw_req *) intparm;
Stefan Weinhuberf3eb5382009-03-26 15:23:48 +01001074 if (!cqr || ((scsw_cc(&irb->scsw) == 1) &&
1075 (scsw_fctl(&irb->scsw) & SCSW_FCTL_START_FUNC) &&
1076 (scsw_stctl(&irb->scsw) & SCSW_STCTL_STATUS_PEND))) {
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001077 if (cqr && cqr->status == DASD_CQR_IN_IO)
1078 cqr->status = DASD_CQR_QUEUED;
Martin Schwidefskya00bfd72006-09-20 15:59:05 +02001079 device = dasd_device_from_cdev_locked(cdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001080 if (!IS_ERR(device)) {
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001081 dasd_device_clear_timer(device);
1082 device->discipline->handle_unsolicited_interrupt(device,
1083 irb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001084 dasd_put_device(device);
1085 }
1086 return;
1087 }
1088
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001089 device = (struct dasd_device *) cqr->startdev;
1090 if (!device ||
Linus Torvalds1da177e2005-04-16 15:20:36 -07001091 strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) {
Stefan Haberland294001a2010-01-27 10:12:35 +01001092 DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s",
1093 "invalid device in request");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001094 return;
1095 }
1096
1097 /* Check for clear pending */
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001098 if (cqr->status == DASD_CQR_CLEAR_PENDING &&
Stefan Weinhuberf3eb5382009-03-26 15:23:48 +01001099 scsw_fctl(&irb->scsw) & SCSW_FCTL_CLEAR_FUNC) {
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001100 cqr->status = DASD_CQR_CLEARED;
1101 dasd_device_clear_timer(device);
Horst Hummel8f617012006-08-30 14:33:33 +02001102 wake_up(&dasd_flush_wq);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001103 dasd_schedule_device_bh(device);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001104 return;
1105 }
1106
Stefan Weinhuberf3eb5382009-03-26 15:23:48 +01001107 /* check status - the request might have been killed by dyn detach */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001108 if (cqr->status != DASD_CQR_IN_IO) {
Stefan Haberlandfc19f382009-03-26 15:23:49 +01001109 DBF_DEV_EVENT(DBF_DEBUG, device, "invalid status: bus_id %s, "
1110 "status %02x", dev_name(&cdev->dev), cqr->status);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001111 return;
1112 }
Stefan Haberlandfc19f382009-03-26 15:23:49 +01001113
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001114 next = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001115 expires = 0;
Stefan Weinhuberf3eb5382009-03-26 15:23:48 +01001116 if (scsw_dstat(&irb->scsw) == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) &&
1117 scsw_cstat(&irb->scsw) == 0) {
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001118 /* request was completed successfully */
1119 cqr->status = DASD_CQR_SUCCESS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001120 cqr->stopclk = now;
1121 /* Start first request on queue if possible -> fast_io. */
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001122 if (cqr->devlist.next != &device->ccw_queue) {
1123 next = list_entry(cqr->devlist.next,
1124 struct dasd_ccw_req, devlist);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001125 }
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001126 } else { /* error */
1127 memcpy(&cqr->irb, irb, sizeof(struct irb));
Stefan Haberlandfc19f382009-03-26 15:23:49 +01001128 /* log sense for every failed I/O to s390 debugfeature */
1129 dasd_log_sense_dbf(cqr, irb);
Horst Hummel9575bf22006-12-08 15:54:15 +01001130 if (device->features & DASD_FEATURE_ERPLOG) {
Horst Hummel9575bf22006-12-08 15:54:15 +01001131 dasd_log_sense(cqr, irb);
1132 }
Stefan Haberlandfc19f382009-03-26 15:23:49 +01001133
Stefan Haberland6c5f57c2008-02-05 16:50:46 +01001134 /*
1135 * If we don't want complex ERP for this request, then just
1136 * reset this and retry it in the fastpath
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001137 */
Stefan Haberland6c5f57c2008-02-05 16:50:46 +01001138 if (!test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags) &&
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001139 cqr->retries > 0) {
Stefan Haberlandfc19f382009-03-26 15:23:49 +01001140 if (cqr->lpm == LPM_ANYPATH)
1141 DBF_DEV_EVENT(DBF_DEBUG, device,
1142 "default ERP in fastpath "
1143 "(%i retries left)",
1144 cqr->retries);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001145 cqr->lpm = LPM_ANYPATH;
1146 cqr->status = DASD_CQR_QUEUED;
1147 next = cqr;
1148 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001149 cqr->status = DASD_CQR_ERROR;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001150 }
1151 if (next && (next->status == DASD_CQR_QUEUED) &&
1152 (!device->stopped)) {
1153 if (device->discipline->start_IO(next) == 0)
1154 expires = next->expires;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001155 }
1156 if (expires != 0)
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001157 dasd_device_set_timer(device, expires);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001158 else
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001159 dasd_device_clear_timer(device);
1160 dasd_schedule_device_bh(device);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001161}
1162
1163/*
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001164 * If we have an error on a dasd_block layer request then we cancel
1165 * and return all further requests from the same dasd_block as well.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001166 */
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001167static void __dasd_device_recovery(struct dasd_device *device,
1168 struct dasd_ccw_req *ref_cqr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001169{
1170 struct list_head *l, *n;
1171 struct dasd_ccw_req *cqr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001172
1173 /*
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001174 * only requeue request that came from the dasd_block layer
Linus Torvalds1da177e2005-04-16 15:20:36 -07001175 */
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001176 if (!ref_cqr->block)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001177 return;
Horst Hummelf24acd42005-05-01 08:58:59 -07001178
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001179 list_for_each_safe(l, n, &device->ccw_queue) {
1180 cqr = list_entry(l, struct dasd_ccw_req, devlist);
1181 if (cqr->status == DASD_CQR_QUEUED &&
1182 ref_cqr->block == cqr->block) {
1183 cqr->status = DASD_CQR_CLEARED;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001184 }
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001185 }
1186};
1187
1188/*
1189 * Remove those ccw requests from the queue that need to be returned
1190 * to the upper layer.
1191 */
1192static void __dasd_device_process_ccw_queue(struct dasd_device *device,
1193 struct list_head *final_queue)
1194{
1195 struct list_head *l, *n;
1196 struct dasd_ccw_req *cqr;
1197
1198 /* Process request with final status. */
1199 list_for_each_safe(l, n, &device->ccw_queue) {
1200 cqr = list_entry(l, struct dasd_ccw_req, devlist);
1201
1202 /* Stop list processing at the first non-final request. */
1203 if (cqr->status == DASD_CQR_QUEUED ||
1204 cqr->status == DASD_CQR_IN_IO ||
1205 cqr->status == DASD_CQR_CLEAR_PENDING)
1206 break;
1207 if (cqr->status == DASD_CQR_ERROR) {
1208 __dasd_device_recovery(device, cqr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001209 }
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001210 /* Rechain finished requests to final queue */
1211 list_move_tail(&cqr->devlist, final_queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001212 }
1213}
1214
1215/*
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001216 * the cqrs from the final queue are returned to the upper layer
1217 * by setting a dasd_block state and calling the callback function
1218 */
1219static void __dasd_device_process_final_queue(struct dasd_device *device,
1220 struct list_head *final_queue)
1221{
1222 struct list_head *l, *n;
1223 struct dasd_ccw_req *cqr;
Stefan Weinhuber03513bc2008-02-19 15:29:27 +01001224 struct dasd_block *block;
Stefan Haberlandc80ee722008-05-30 10:03:31 +02001225 void (*callback)(struct dasd_ccw_req *, void *data);
1226 void *callback_data;
Stefan Haberlandfc19f382009-03-26 15:23:49 +01001227 char errorstring[ERRORLENGTH];
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001228
1229 list_for_each_safe(l, n, final_queue) {
1230 cqr = list_entry(l, struct dasd_ccw_req, devlist);
1231 list_del_init(&cqr->devlist);
Stefan Weinhuber03513bc2008-02-19 15:29:27 +01001232 block = cqr->block;
Stefan Haberlandc80ee722008-05-30 10:03:31 +02001233 callback = cqr->callback;
1234 callback_data = cqr->callback_data;
Stefan Weinhuber03513bc2008-02-19 15:29:27 +01001235 if (block)
1236 spin_lock_bh(&block->queue_lock);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001237 switch (cqr->status) {
1238 case DASD_CQR_SUCCESS:
1239 cqr->status = DASD_CQR_DONE;
1240 break;
1241 case DASD_CQR_ERROR:
1242 cqr->status = DASD_CQR_NEED_ERP;
1243 break;
1244 case DASD_CQR_CLEARED:
1245 cqr->status = DASD_CQR_TERMINATED;
1246 break;
1247 default:
Stefan Haberlandfc19f382009-03-26 15:23:49 +01001248 /* internal error 12 - wrong cqr status*/
1249 snprintf(errorstring, ERRORLENGTH, "12 %p %x02", cqr, cqr->status);
1250 dev_err(&device->cdev->dev,
1251 "An error occurred in the DASD device driver, "
1252 "reason=%s\n", errorstring);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001253 BUG();
1254 }
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001255 if (cqr->callback != NULL)
Stefan Haberlandc80ee722008-05-30 10:03:31 +02001256 (callback)(cqr, callback_data);
Stefan Weinhuber03513bc2008-02-19 15:29:27 +01001257 if (block)
1258 spin_unlock_bh(&block->queue_lock);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001259 }
1260}
1261
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001262/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001263 * Take a look at the first request on the ccw queue and check
1264 * if it reached its expire time. If so, terminate the IO.
1265 */
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001266static void __dasd_device_check_expire(struct dasd_device *device)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001267{
1268 struct dasd_ccw_req *cqr;
1269
1270 if (list_empty(&device->ccw_queue))
1271 return;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001272 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist);
Horst Hummel29145a62006-12-04 15:40:15 +01001273 if ((cqr->status == DASD_CQR_IN_IO && cqr->expires != 0) &&
1274 (time_after_eq(jiffies, cqr->expires + cqr->starttime))) {
1275 if (device->discipline->term_IO(cqr) != 0) {
1276 /* Hmpf, try again in 5 sec */
Stefan Haberlandfc19f382009-03-26 15:23:49 +01001277 dev_err(&device->cdev->dev,
1278 "cqr %p timed out (%is) but cannot be "
1279 "ended, retrying in 5 s\n",
1280 cqr, (cqr->expires/HZ));
Stefan Haberland7dc1da92008-01-26 14:11:26 +01001281 cqr->expires += 5*HZ;
1282 dasd_device_set_timer(device, 5*HZ);
Horst Hummel29145a62006-12-04 15:40:15 +01001283 } else {
Stefan Haberlandfc19f382009-03-26 15:23:49 +01001284 dev_err(&device->cdev->dev,
1285 "cqr %p timed out (%is), %i retries "
1286 "remaining\n", cqr, (cqr->expires/HZ),
1287 cqr->retries);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001288 }
1289 }
1290}
1291
1292/*
1293 * Take a look at the first request on the ccw queue and check
1294 * if it needs to be started.
1295 */
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001296static void __dasd_device_start_head(struct dasd_device *device)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001297{
1298 struct dasd_ccw_req *cqr;
1299 int rc;
1300
1301 if (list_empty(&device->ccw_queue))
1302 return;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001303 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist);
Peter Oberparleiter25ee4cf2006-04-10 22:53:47 -07001304 if (cqr->status != DASD_CQR_QUEUED)
1305 return;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001306 /* when device is stopped, return request to previous layer */
1307 if (device->stopped) {
1308 cqr->status = DASD_CQR_CLEARED;
1309 dasd_schedule_device_bh(device);
Peter Oberparleiter25ee4cf2006-04-10 22:53:47 -07001310 return;
Horst Hummel1c01b8a2006-01-06 00:19:15 -08001311 }
Peter Oberparleiter25ee4cf2006-04-10 22:53:47 -07001312
1313 rc = device->discipline->start_IO(cqr);
1314 if (rc == 0)
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001315 dasd_device_set_timer(device, cqr->expires);
Peter Oberparleiter25ee4cf2006-04-10 22:53:47 -07001316 else if (rc == -EACCES) {
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001317 dasd_schedule_device_bh(device);
Peter Oberparleiter25ee4cf2006-04-10 22:53:47 -07001318 } else
1319 /* Hmpf, try again in 1/2 sec */
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001320 dasd_device_set_timer(device, 50);
Horst Hummel8f617012006-08-30 14:33:33 +02001321}
1322
Linus Torvalds1da177e2005-04-16 15:20:36 -07001323/*
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001324 * Go through all request on the dasd_device request queue,
1325 * terminate them on the cdev if necessary, and return them to the
1326 * submitting layer via callback.
1327 * Note:
1328 * Make sure that all 'submitting layers' still exist when
1329 * this function is called!. In other words, when 'device' is a base
1330 * device then all block layer requests must have been removed before
1331 * via dasd_flush_block_queue.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001332 */
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001333int dasd_flush_device_queue(struct dasd_device *device)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001334{
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001335 struct dasd_ccw_req *cqr, *n;
1336 int rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001337 struct list_head flush_queue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001338
1339 INIT_LIST_HEAD(&flush_queue);
1340 spin_lock_irq(get_ccwdev_lock(device->cdev));
Horst Hummel8f617012006-08-30 14:33:33 +02001341 rc = 0;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001342 list_for_each_entry_safe(cqr, n, &device->ccw_queue, devlist) {
Horst Hummel8f617012006-08-30 14:33:33 +02001343 /* Check status and move request to flush_queue */
1344 switch (cqr->status) {
1345 case DASD_CQR_IN_IO:
1346 rc = device->discipline->term_IO(cqr);
1347 if (rc) {
1348 /* unable to terminate requeust */
Stefan Haberlandfc19f382009-03-26 15:23:49 +01001349 dev_err(&device->cdev->dev,
1350 "Flushing the DASD request queue "
1351 "failed for request %p\n", cqr);
Horst Hummel8f617012006-08-30 14:33:33 +02001352 /* stop flush processing */
1353 goto finished;
1354 }
1355 break;
1356 case DASD_CQR_QUEUED:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001357 cqr->stopclk = get_clock();
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001358 cqr->status = DASD_CQR_CLEARED;
Horst Hummel8f617012006-08-30 14:33:33 +02001359 break;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001360 default: /* no need to modify the others */
Horst Hummel8f617012006-08-30 14:33:33 +02001361 break;
1362 }
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001363 list_move_tail(&cqr->devlist, &flush_queue);
Horst Hummel8f617012006-08-30 14:33:33 +02001364 }
Horst Hummel8f617012006-08-30 14:33:33 +02001365finished:
1366 spin_unlock_irq(get_ccwdev_lock(device->cdev));
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001367 /*
1368 * After this point all requests must be in state CLEAR_PENDING,
1369 * CLEARED, SUCCESS or ERROR. Now wait for CLEAR_PENDING to become
1370 * one of the others.
1371 */
1372 list_for_each_entry_safe(cqr, n, &flush_queue, devlist)
1373 wait_event(dasd_flush_wq,
1374 (cqr->status != DASD_CQR_CLEAR_PENDING));
1375 /*
1376 * Now set each request back to TERMINATED, DONE or NEED_ERP
1377 * and call the callback function of flushed requests
1378 */
1379 __dasd_device_process_final_queue(device, &flush_queue);
Horst Hummel8f617012006-08-30 14:33:33 +02001380 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001381}
1382
1383/*
1384 * Acquire the device lock and process queues for the device.
1385 */
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001386static void dasd_device_tasklet(struct dasd_device *device)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001387{
1388 struct list_head final_queue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001389
1390 atomic_set (&device->tasklet_scheduled, 0);
1391 INIT_LIST_HEAD(&final_queue);
1392 spin_lock_irq(get_ccwdev_lock(device->cdev));
1393 /* Check expire time of first request on the ccw queue. */
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001394 __dasd_device_check_expire(device);
1395 /* find final requests on ccw queue */
1396 __dasd_device_process_ccw_queue(device, &final_queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001397 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1398 /* Now call the callback function of requests with final status */
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001399 __dasd_device_process_final_queue(device, &final_queue);
1400 spin_lock_irq(get_ccwdev_lock(device->cdev));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001401 /* Now check if the head of the ccw queue needs to be started. */
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001402 __dasd_device_start_head(device);
1403 spin_unlock_irq(get_ccwdev_lock(device->cdev));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001404 dasd_put_device(device);
1405}
1406
1407/*
1408 * Schedules a call to dasd_tasklet over the device tasklet.
1409 */
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001410void dasd_schedule_device_bh(struct dasd_device *device)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001411{
1412 /* Protect against rescheduling. */
Martin Schwidefsky973bd992006-01-06 00:19:07 -08001413 if (atomic_cmpxchg (&device->tasklet_scheduled, 0, 1) != 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001414 return;
1415 dasd_get_device(device);
1416 tasklet_hi_schedule(&device->tasklet);
1417}
1418
Stefan Weinhubereb6e1992009-12-07 12:51:51 +01001419void dasd_device_set_stop_bits(struct dasd_device *device, int bits)
1420{
1421 device->stopped |= bits;
1422}
1423EXPORT_SYMBOL_GPL(dasd_device_set_stop_bits);
1424
1425void dasd_device_remove_stop_bits(struct dasd_device *device, int bits)
1426{
1427 device->stopped &= ~bits;
1428 if (!device->stopped)
1429 wake_up(&generic_waitq);
1430}
1431EXPORT_SYMBOL_GPL(dasd_device_remove_stop_bits);
1432
Linus Torvalds1da177e2005-04-16 15:20:36 -07001433/*
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001434 * Queue a request to the head of the device ccw_queue.
1435 * Start the I/O if possible.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001436 */
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001437void dasd_add_request_head(struct dasd_ccw_req *cqr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001438{
1439 struct dasd_device *device;
1440 unsigned long flags;
1441
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001442 device = cqr->startdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001443 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001444 cqr->status = DASD_CQR_QUEUED;
1445 list_add(&cqr->devlist, &device->ccw_queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001446 /* let the bh start the request to keep them in order */
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001447 dasd_schedule_device_bh(device);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001448 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
1449}
1450
1451/*
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001452 * Queue a request to the tail of the device ccw_queue.
1453 * Start the I/O if possible.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001454 */
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001455void dasd_add_request_tail(struct dasd_ccw_req *cqr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001456{
1457 struct dasd_device *device;
1458 unsigned long flags;
1459
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001460 device = cqr->startdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001461 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001462 cqr->status = DASD_CQR_QUEUED;
1463 list_add_tail(&cqr->devlist, &device->ccw_queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001464 /* let the bh start the request to keep them in order */
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001465 dasd_schedule_device_bh(device);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001466 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
1467}
1468
1469/*
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001470 * Wakeup helper for the 'sleep_on' functions.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001471 */
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001472static void dasd_wakeup_cb(struct dasd_ccw_req *cqr, void *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001473{
1474 wake_up((wait_queue_head_t *) data);
1475}
1476
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001477static inline int _wait_for_wakeup(struct dasd_ccw_req *cqr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001478{
1479 struct dasd_device *device;
1480 int rc;
1481
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001482 device = cqr->startdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001483 spin_lock_irq(get_ccwdev_lock(device->cdev));
Horst Hummelc2ba4442006-02-01 03:06:37 -08001484 rc = ((cqr->status == DASD_CQR_DONE ||
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001485 cqr->status == DASD_CQR_NEED_ERP ||
1486 cqr->status == DASD_CQR_TERMINATED) &&
1487 list_empty(&cqr->devlist));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001488 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1489 return rc;
1490}
1491
1492/*
Stefan Weinhubereb6e1992009-12-07 12:51:51 +01001493 * checks if error recovery is necessary, returns 1 if yes, 0 otherwise.
1494 */
1495static int __dasd_sleep_on_erp(struct dasd_ccw_req *cqr)
1496{
1497 struct dasd_device *device;
1498 dasd_erp_fn_t erp_fn;
1499
1500 if (cqr->status == DASD_CQR_FILLED)
1501 return 0;
1502 device = cqr->startdev;
1503 if (test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags)) {
1504 if (cqr->status == DASD_CQR_TERMINATED) {
1505 device->discipline->handle_terminated_request(cqr);
1506 return 1;
1507 }
1508 if (cqr->status == DASD_CQR_NEED_ERP) {
1509 erp_fn = device->discipline->erp_action(cqr);
1510 erp_fn(cqr);
1511 return 1;
1512 }
1513 if (cqr->status == DASD_CQR_FAILED)
1514 dasd_log_sense(cqr, &cqr->irb);
1515 if (cqr->refers) {
1516 __dasd_process_erp(device, cqr);
1517 return 1;
1518 }
1519 }
1520 return 0;
1521}
1522
1523static int __dasd_sleep_on_loop_condition(struct dasd_ccw_req *cqr)
1524{
1525 if (test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags)) {
1526 if (cqr->refers) /* erp is not done yet */
1527 return 1;
1528 return ((cqr->status != DASD_CQR_DONE) &&
1529 (cqr->status != DASD_CQR_FAILED));
1530 } else
1531 return (cqr->status == DASD_CQR_FILLED);
1532}
1533
1534static int _dasd_sleep_on(struct dasd_ccw_req *maincqr, int interruptible)
1535{
1536 struct dasd_device *device;
1537 int rc;
1538 struct list_head ccw_queue;
1539 struct dasd_ccw_req *cqr;
1540
1541 INIT_LIST_HEAD(&ccw_queue);
1542 maincqr->status = DASD_CQR_FILLED;
1543 device = maincqr->startdev;
1544 list_add(&maincqr->blocklist, &ccw_queue);
1545 for (cqr = maincqr; __dasd_sleep_on_loop_condition(cqr);
1546 cqr = list_first_entry(&ccw_queue,
1547 struct dasd_ccw_req, blocklist)) {
1548
1549 if (__dasd_sleep_on_erp(cqr))
1550 continue;
1551 if (cqr->status != DASD_CQR_FILLED) /* could be failed */
1552 continue;
1553
1554 /* Non-temporary stop condition will trigger fail fast */
1555 if (device->stopped & ~DASD_STOPPED_PENDING &&
1556 test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) &&
1557 (!dasd_eer_enabled(device))) {
1558 cqr->status = DASD_CQR_FAILED;
1559 continue;
1560 }
1561
1562 /* Don't try to start requests if device is stopped */
1563 if (interruptible) {
1564 rc = wait_event_interruptible(
1565 generic_waitq, !(device->stopped));
1566 if (rc == -ERESTARTSYS) {
1567 cqr->status = DASD_CQR_FAILED;
1568 maincqr->intrc = rc;
1569 continue;
1570 }
1571 } else
1572 wait_event(generic_waitq, !(device->stopped));
1573
1574 cqr->callback = dasd_wakeup_cb;
1575 cqr->callback_data = (void *) &generic_waitq;
1576 dasd_add_request_tail(cqr);
1577 if (interruptible) {
1578 rc = wait_event_interruptible(
1579 generic_waitq, _wait_for_wakeup(cqr));
1580 if (rc == -ERESTARTSYS) {
1581 dasd_cancel_req(cqr);
1582 /* wait (non-interruptible) for final status */
1583 wait_event(generic_waitq,
1584 _wait_for_wakeup(cqr));
1585 cqr->status = DASD_CQR_FAILED;
1586 maincqr->intrc = rc;
1587 continue;
1588 }
1589 } else
1590 wait_event(generic_waitq, _wait_for_wakeup(cqr));
1591 }
1592
1593 maincqr->endclk = get_clock();
1594 if ((maincqr->status != DASD_CQR_DONE) &&
1595 (maincqr->intrc != -ERESTARTSYS))
1596 dasd_log_sense(maincqr, &maincqr->irb);
1597 if (maincqr->status == DASD_CQR_DONE)
1598 rc = 0;
1599 else if (maincqr->intrc)
1600 rc = maincqr->intrc;
1601 else
1602 rc = -EIO;
1603 return rc;
1604}
1605
1606/*
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001607 * Queue a request to the tail of the device ccw_queue and wait for
1608 * it's completion.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001609 */
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001610int dasd_sleep_on(struct dasd_ccw_req *cqr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001611{
Stefan Weinhubereb6e1992009-12-07 12:51:51 +01001612 return _dasd_sleep_on(cqr, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001613}
1614
1615/*
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001616 * Queue a request to the tail of the device ccw_queue and wait
1617 * interruptible for it's completion.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001618 */
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001619int dasd_sleep_on_interruptible(struct dasd_ccw_req *cqr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001620{
Stefan Weinhubereb6e1992009-12-07 12:51:51 +01001621 return _dasd_sleep_on(cqr, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001622}
1623
1624/*
1625 * Whoa nelly now it gets really hairy. For some functions (e.g. steal lock
1626 * for eckd devices) the currently running request has to be terminated
1627 * and be put back to status queued, before the special request is added
1628 * to the head of the queue. Then the special request is waited on normally.
1629 */
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001630static inline int _dasd_term_running_cqr(struct dasd_device *device)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001631{
1632 struct dasd_ccw_req *cqr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001633
1634 if (list_empty(&device->ccw_queue))
1635 return 0;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001636 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist);
Horst Hummel8f617012006-08-30 14:33:33 +02001637 return device->discipline->term_IO(cqr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001638}
1639
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001640int dasd_sleep_on_immediatly(struct dasd_ccw_req *cqr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001641{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001642 struct dasd_device *device;
1643 int rc;
Horst Hummel138c0142006-06-29 14:58:12 +02001644
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001645 device = cqr->startdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001646 spin_lock_irq(get_ccwdev_lock(device->cdev));
1647 rc = _dasd_term_running_cqr(device);
1648 if (rc) {
1649 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1650 return rc;
1651 }
Horst Hummel138c0142006-06-29 14:58:12 +02001652
Linus Torvalds1da177e2005-04-16 15:20:36 -07001653 cqr->callback = dasd_wakeup_cb;
Stefan Haberlandc80ee722008-05-30 10:03:31 +02001654 cqr->callback_data = (void *) &generic_waitq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001655 cqr->status = DASD_CQR_QUEUED;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001656 list_add(&cqr->devlist, &device->ccw_queue);
Horst Hummel138c0142006-06-29 14:58:12 +02001657
Linus Torvalds1da177e2005-04-16 15:20:36 -07001658 /* let the bh start the request to keep them in order */
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001659 dasd_schedule_device_bh(device);
Horst Hummel138c0142006-06-29 14:58:12 +02001660
Linus Torvalds1da177e2005-04-16 15:20:36 -07001661 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1662
Stefan Haberlandc80ee722008-05-30 10:03:31 +02001663 wait_event(generic_waitq, _wait_for_wakeup(cqr));
Horst Hummel138c0142006-06-29 14:58:12 +02001664
Stefan Weinhuber6cc7f162009-06-12 10:26:39 +02001665 if (cqr->status == DASD_CQR_DONE)
1666 rc = 0;
1667 else if (cqr->intrc)
1668 rc = cqr->intrc;
1669 else
1670 rc = -EIO;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001671 return rc;
1672}
1673
1674/*
1675 * Cancels a request that was started with dasd_sleep_on_req.
1676 * This is useful to timeout requests. The request will be
1677 * terminated if it is currently in i/o.
1678 * Returns 1 if the request has been terminated.
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001679 * 0 if there was no need to terminate the request (not started yet)
1680 * negative error code if termination failed
1681 * Cancellation of a request is an asynchronous operation! The calling
1682 * function has to wait until the request is properly returned via callback.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001683 */
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001684int dasd_cancel_req(struct dasd_ccw_req *cqr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001685{
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001686 struct dasd_device *device = cqr->startdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001687 unsigned long flags;
1688 int rc;
1689
1690 rc = 0;
1691 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
1692 switch (cqr->status) {
1693 case DASD_CQR_QUEUED:
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001694 /* request was not started - just set to cleared */
1695 cqr->status = DASD_CQR_CLEARED;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001696 break;
1697 case DASD_CQR_IN_IO:
1698 /* request in IO - terminate IO and release again */
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001699 rc = device->discipline->term_IO(cqr);
1700 if (rc) {
Stefan Haberlandfc19f382009-03-26 15:23:49 +01001701 dev_err(&device->cdev->dev,
1702 "Cancelling request %p failed with rc=%d\n",
1703 cqr, rc);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001704 } else {
1705 cqr->stopclk = get_clock();
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001706 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001707 break;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001708 default: /* already finished or clear pending - do nothing */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001709 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001710 }
1711 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001712 dasd_schedule_device_bh(device);
1713 return rc;
1714}
1715
1716
1717/*
1718 * SECTION: Operations of the dasd_block layer.
1719 */
1720
1721/*
1722 * Timeout function for dasd_block. This is used when the block layer
1723 * is waiting for something that may not come reliably, (e.g. a state
1724 * change interrupt)
1725 */
1726static void dasd_block_timeout(unsigned long ptr)
1727{
1728 unsigned long flags;
1729 struct dasd_block *block;
1730
1731 block = (struct dasd_block *) ptr;
1732 spin_lock_irqsave(get_ccwdev_lock(block->base->cdev), flags);
1733 /* re-activate request queue */
Stefan Weinhubereb6e1992009-12-07 12:51:51 +01001734 dasd_device_remove_stop_bits(block->base, DASD_STOPPED_PENDING);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001735 spin_unlock_irqrestore(get_ccwdev_lock(block->base->cdev), flags);
1736 dasd_schedule_block_bh(block);
1737}
1738
1739/*
1740 * Setup timeout for a dasd_block in jiffies.
1741 */
1742void dasd_block_set_timer(struct dasd_block *block, int expires)
1743{
Stefan Weinhuber48cae882009-02-11 10:37:31 +01001744 if (expires == 0)
1745 del_timer(&block->timer);
1746 else
1747 mod_timer(&block->timer, jiffies + expires);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001748}
1749
1750/*
1751 * Clear timeout for a dasd_block.
1752 */
1753void dasd_block_clear_timer(struct dasd_block *block)
1754{
Stefan Weinhuber48cae882009-02-11 10:37:31 +01001755 del_timer(&block->timer);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001756}
1757
1758/*
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001759 * Process finished error recovery ccw.
1760 */
Stefan Weinhubereb6e1992009-12-07 12:51:51 +01001761static void __dasd_process_erp(struct dasd_device *device,
1762 struct dasd_ccw_req *cqr)
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001763{
1764 dasd_erp_fn_t erp_fn;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001765
1766 if (cqr->status == DASD_CQR_DONE)
1767 DBF_DEV_EVENT(DBF_NOTICE, device, "%s", "ERP successful");
1768 else
Stefan Haberlandfc19f382009-03-26 15:23:49 +01001769 dev_err(&device->cdev->dev, "ERP failed for the DASD\n");
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001770 erp_fn = device->discipline->erp_postaction(cqr);
1771 erp_fn(cqr);
1772}
1773
1774/*
1775 * Fetch requests from the block device queue.
1776 */
1777static void __dasd_process_request_queue(struct dasd_block *block)
1778{
1779 struct request_queue *queue;
1780 struct request *req;
1781 struct dasd_ccw_req *cqr;
1782 struct dasd_device *basedev;
1783 unsigned long flags;
1784 queue = block->request_queue;
1785 basedev = block->base;
1786 /* No queue ? Then there is nothing to do. */
1787 if (queue == NULL)
1788 return;
1789
1790 /*
1791 * We requeue request from the block device queue to the ccw
1792 * queue only in two states. In state DASD_STATE_READY the
1793 * partition detection is done and we need to requeue requests
1794 * for that. State DASD_STATE_ONLINE is normal block device
1795 * operation.
1796 */
Stefan Weinhuber97f604b2009-09-11 10:28:28 +02001797 if (basedev->state < DASD_STATE_READY) {
1798 while ((req = blk_fetch_request(block->request_queue)))
1799 __blk_end_request_all(req, -EIO);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001800 return;
Stefan Weinhuber97f604b2009-09-11 10:28:28 +02001801 }
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001802 /* Now we try to fetch requests from the request queue */
Tejun Heo9934c8c2009-05-08 11:54:16 +09001803 while (!blk_queue_plugged(queue) && (req = blk_peek_request(queue))) {
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001804 if (basedev->features & DASD_FEATURE_READONLY &&
1805 rq_data_dir(req) == WRITE) {
1806 DBF_DEV_EVENT(DBF_ERR, basedev,
1807 "Rejecting write request %p",
1808 req);
Tejun Heo9934c8c2009-05-08 11:54:16 +09001809 blk_start_request(req);
Tejun Heo40cbbb72009-04-23 11:05:19 +09001810 __blk_end_request_all(req, -EIO);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001811 continue;
1812 }
1813 cqr = basedev->discipline->build_cp(basedev, block, req);
1814 if (IS_ERR(cqr)) {
1815 if (PTR_ERR(cqr) == -EBUSY)
1816 break; /* normal end condition */
1817 if (PTR_ERR(cqr) == -ENOMEM)
1818 break; /* terminate request queue loop */
1819 if (PTR_ERR(cqr) == -EAGAIN) {
1820 /*
1821 * The current request cannot be build right
1822 * now, we have to try later. If this request
1823 * is the head-of-queue we stop the device
1824 * for 1/2 second.
1825 */
1826 if (!list_empty(&block->ccw_queue))
1827 break;
Stefan Weinhubereb6e1992009-12-07 12:51:51 +01001828 spin_lock_irqsave(
1829 get_ccwdev_lock(basedev->cdev), flags);
1830 dasd_device_set_stop_bits(basedev,
1831 DASD_STOPPED_PENDING);
1832 spin_unlock_irqrestore(
1833 get_ccwdev_lock(basedev->cdev), flags);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001834 dasd_block_set_timer(block, HZ/2);
1835 break;
1836 }
1837 DBF_DEV_EVENT(DBF_ERR, basedev,
1838 "CCW creation failed (rc=%ld) "
1839 "on request %p",
1840 PTR_ERR(cqr), req);
Tejun Heo9934c8c2009-05-08 11:54:16 +09001841 blk_start_request(req);
Tejun Heo40cbbb72009-04-23 11:05:19 +09001842 __blk_end_request_all(req, -EIO);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001843 continue;
1844 }
1845 /*
1846 * Note: callback is set to dasd_return_cqr_cb in
1847 * __dasd_block_start_head to cover erp requests as well
1848 */
1849 cqr->callback_data = (void *) req;
1850 cqr->status = DASD_CQR_FILLED;
Tejun Heo9934c8c2009-05-08 11:54:16 +09001851 blk_start_request(req);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001852 list_add_tail(&cqr->blocklist, &block->ccw_queue);
1853 dasd_profile_start(block, cqr, req);
1854 }
1855}
1856
1857static void __dasd_cleanup_cqr(struct dasd_ccw_req *cqr)
1858{
1859 struct request *req;
1860 int status;
Kiyoshi Ueda4c4e2142008-01-28 10:29:42 +01001861 int error = 0;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001862
1863 req = (struct request *) cqr->callback_data;
1864 dasd_profile_end(cqr->block, cqr, req);
Stefan Weinhuberfe6b8e72008-02-05 16:50:47 +01001865 status = cqr->block->base->discipline->free_cp(cqr, req);
Kiyoshi Ueda4c4e2142008-01-28 10:29:42 +01001866 if (status <= 0)
1867 error = status ? status : -EIO;
Tejun Heo40cbbb72009-04-23 11:05:19 +09001868 __blk_end_request_all(req, error);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001869}
1870
1871/*
1872 * Process ccw request queue.
1873 */
1874static void __dasd_process_block_ccw_queue(struct dasd_block *block,
1875 struct list_head *final_queue)
1876{
1877 struct list_head *l, *n;
1878 struct dasd_ccw_req *cqr;
1879 dasd_erp_fn_t erp_fn;
1880 unsigned long flags;
1881 struct dasd_device *base = block->base;
1882
1883restart:
1884 /* Process request with final status. */
1885 list_for_each_safe(l, n, &block->ccw_queue) {
1886 cqr = list_entry(l, struct dasd_ccw_req, blocklist);
1887 if (cqr->status != DASD_CQR_DONE &&
1888 cqr->status != DASD_CQR_FAILED &&
1889 cqr->status != DASD_CQR_NEED_ERP &&
1890 cqr->status != DASD_CQR_TERMINATED)
1891 continue;
1892
1893 if (cqr->status == DASD_CQR_TERMINATED) {
1894 base->discipline->handle_terminated_request(cqr);
1895 goto restart;
1896 }
1897
1898 /* Process requests that may be recovered */
1899 if (cqr->status == DASD_CQR_NEED_ERP) {
Stefan Haberland6c5f57c2008-02-05 16:50:46 +01001900 erp_fn = base->discipline->erp_action(cqr);
1901 erp_fn(cqr);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001902 goto restart;
1903 }
1904
Stefan Haberlanda9cffb22008-11-14 18:18:08 +01001905 /* log sense for fatal error */
1906 if (cqr->status == DASD_CQR_FAILED) {
1907 dasd_log_sense(cqr, &cqr->irb);
1908 }
1909
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001910 /* First of all call extended error reporting. */
1911 if (dasd_eer_enabled(base) &&
1912 cqr->status == DASD_CQR_FAILED) {
1913 dasd_eer_write(base, cqr, DASD_EER_FATALERROR);
1914
1915 /* restart request */
1916 cqr->status = DASD_CQR_FILLED;
1917 cqr->retries = 255;
1918 spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags);
Stefan Weinhubereb6e1992009-12-07 12:51:51 +01001919 dasd_device_set_stop_bits(base, DASD_STOPPED_QUIESCE);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001920 spin_unlock_irqrestore(get_ccwdev_lock(base->cdev),
1921 flags);
1922 goto restart;
1923 }
1924
1925 /* Process finished ERP request. */
1926 if (cqr->refers) {
Stefan Weinhubereb6e1992009-12-07 12:51:51 +01001927 __dasd_process_erp(base, cqr);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001928 goto restart;
1929 }
1930
1931 /* Rechain finished requests to final queue */
1932 cqr->endclk = get_clock();
1933 list_move_tail(&cqr->blocklist, final_queue);
1934 }
1935}
1936
1937static void dasd_return_cqr_cb(struct dasd_ccw_req *cqr, void *data)
1938{
1939 dasd_schedule_block_bh(cqr->block);
1940}
1941
1942static void __dasd_block_start_head(struct dasd_block *block)
1943{
1944 struct dasd_ccw_req *cqr;
1945
1946 if (list_empty(&block->ccw_queue))
1947 return;
1948 /* We allways begin with the first requests on the queue, as some
1949 * of previously started requests have to be enqueued on a
1950 * dasd_device again for error recovery.
1951 */
1952 list_for_each_entry(cqr, &block->ccw_queue, blocklist) {
1953 if (cqr->status != DASD_CQR_FILLED)
1954 continue;
1955 /* Non-temporary stop condition will trigger fail fast */
1956 if (block->base->stopped & ~DASD_STOPPED_PENDING &&
1957 test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) &&
1958 (!dasd_eer_enabled(block->base))) {
1959 cqr->status = DASD_CQR_FAILED;
1960 dasd_schedule_block_bh(block);
1961 continue;
1962 }
1963 /* Don't try to start requests if device is stopped */
1964 if (block->base->stopped)
1965 return;
1966
1967 /* just a fail safe check, should not happen */
1968 if (!cqr->startdev)
1969 cqr->startdev = block->base;
1970
1971 /* make sure that the requests we submit find their way back */
1972 cqr->callback = dasd_return_cqr_cb;
1973
1974 dasd_add_request_tail(cqr);
1975 }
1976}
1977
1978/*
1979 * Central dasd_block layer routine. Takes requests from the generic
1980 * block layer request queue, creates ccw requests, enqueues them on
1981 * a dasd_device and processes ccw requests that have been returned.
1982 */
1983static void dasd_block_tasklet(struct dasd_block *block)
1984{
1985 struct list_head final_queue;
1986 struct list_head *l, *n;
1987 struct dasd_ccw_req *cqr;
1988
1989 atomic_set(&block->tasklet_scheduled, 0);
1990 INIT_LIST_HEAD(&final_queue);
1991 spin_lock(&block->queue_lock);
1992 /* Finish off requests on ccw queue */
1993 __dasd_process_block_ccw_queue(block, &final_queue);
1994 spin_unlock(&block->queue_lock);
1995 /* Now call the callback function of requests with final status */
1996 spin_lock_irq(&block->request_queue_lock);
1997 list_for_each_safe(l, n, &final_queue) {
1998 cqr = list_entry(l, struct dasd_ccw_req, blocklist);
1999 list_del_init(&cqr->blocklist);
2000 __dasd_cleanup_cqr(cqr);
2001 }
2002 spin_lock(&block->queue_lock);
2003 /* Get new request from the block device request queue */
2004 __dasd_process_request_queue(block);
2005 /* Now check if the head of the ccw queue needs to be started. */
2006 __dasd_block_start_head(block);
2007 spin_unlock(&block->queue_lock);
2008 spin_unlock_irq(&block->request_queue_lock);
2009 dasd_put_device(block->base);
2010}
2011
2012static void _dasd_wake_block_flush_cb(struct dasd_ccw_req *cqr, void *data)
2013{
2014 wake_up(&dasd_flush_wq);
2015}
2016
2017/*
2018 * Go through all request on the dasd_block request queue, cancel them
2019 * on the respective dasd_device, and return them to the generic
2020 * block layer.
2021 */
2022static int dasd_flush_block_queue(struct dasd_block *block)
2023{
2024 struct dasd_ccw_req *cqr, *n;
2025 int rc, i;
2026 struct list_head flush_queue;
2027
2028 INIT_LIST_HEAD(&flush_queue);
2029 spin_lock_bh(&block->queue_lock);
2030 rc = 0;
2031restart:
2032 list_for_each_entry_safe(cqr, n, &block->ccw_queue, blocklist) {
2033 /* if this request currently owned by a dasd_device cancel it */
2034 if (cqr->status >= DASD_CQR_QUEUED)
2035 rc = dasd_cancel_req(cqr);
2036 if (rc < 0)
2037 break;
2038 /* Rechain request (including erp chain) so it won't be
2039 * touched by the dasd_block_tasklet anymore.
2040 * Replace the callback so we notice when the request
2041 * is returned from the dasd_device layer.
2042 */
2043 cqr->callback = _dasd_wake_block_flush_cb;
2044 for (i = 0; cqr != NULL; cqr = cqr->refers, i++)
2045 list_move_tail(&cqr->blocklist, &flush_queue);
2046 if (i > 1)
2047 /* moved more than one request - need to restart */
2048 goto restart;
2049 }
2050 spin_unlock_bh(&block->queue_lock);
2051 /* Now call the callback function of flushed requests */
2052restart_cb:
2053 list_for_each_entry_safe(cqr, n, &flush_queue, blocklist) {
2054 wait_event(dasd_flush_wq, (cqr->status < DASD_CQR_QUEUED));
2055 /* Process finished ERP request. */
2056 if (cqr->refers) {
Stefan Haberland0cd4bd42008-12-25 13:38:54 +01002057 spin_lock_bh(&block->queue_lock);
Stefan Weinhubereb6e1992009-12-07 12:51:51 +01002058 __dasd_process_erp(block->base, cqr);
Stefan Haberland0cd4bd42008-12-25 13:38:54 +01002059 spin_unlock_bh(&block->queue_lock);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01002060 /* restart list_for_xx loop since dasd_process_erp
2061 * might remove multiple elements */
2062 goto restart_cb;
2063 }
2064 /* call the callback function */
Stefan Haberland0cd4bd42008-12-25 13:38:54 +01002065 spin_lock_irq(&block->request_queue_lock);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01002066 cqr->endclk = get_clock();
2067 list_del_init(&cqr->blocklist);
2068 __dasd_cleanup_cqr(cqr);
Stefan Haberland0cd4bd42008-12-25 13:38:54 +01002069 spin_unlock_irq(&block->request_queue_lock);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01002070 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002071 return rc;
2072}
2073
2074/*
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01002075 * Schedules a call to dasd_tasklet over the device tasklet.
2076 */
2077void dasd_schedule_block_bh(struct dasd_block *block)
2078{
2079 /* Protect against rescheduling. */
2080 if (atomic_cmpxchg(&block->tasklet_scheduled, 0, 1) != 0)
2081 return;
2082 /* life cycle of block is bound to it's base device */
2083 dasd_get_device(block->base);
2084 tasklet_hi_schedule(&block->tasklet);
2085}
2086
2087
2088/*
2089 * SECTION: external block device operations
2090 * (request queue handling, open, release, etc.)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002091 */
2092
2093/*
2094 * Dasd request queue function. Called from ll_rw_blk.c
2095 */
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01002096static void do_dasd_request(struct request_queue *queue)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002097{
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01002098 struct dasd_block *block;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002099
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01002100 block = queue->queuedata;
2101 spin_lock(&block->queue_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002102 /* Get new request from the block device request queue */
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01002103 __dasd_process_request_queue(block);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002104 /* Now check if the head of the ccw queue needs to be started. */
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01002105 __dasd_block_start_head(block);
2106 spin_unlock(&block->queue_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002107}
2108
2109/*
2110 * Allocate and initialize request queue and default I/O scheduler.
2111 */
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01002112static int dasd_alloc_queue(struct dasd_block *block)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002113{
2114 int rc;
2115
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01002116 block->request_queue = blk_init_queue(do_dasd_request,
2117 &block->request_queue_lock);
2118 if (block->request_queue == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002119 return -ENOMEM;
2120
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01002121 block->request_queue->queuedata = block;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002122
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01002123 elevator_exit(block->request_queue->elevator);
Josef 'Jeff' Sipek08a8a0c2008-04-17 07:45:56 +02002124 block->request_queue->elevator = NULL;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01002125 rc = elevator_init(block->request_queue, "deadline");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002126 if (rc) {
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01002127 blk_cleanup_queue(block->request_queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002128 return rc;
2129 }
2130 return 0;
2131}
2132
2133/*
2134 * Allocate and initialize request queue.
2135 */
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01002136static void dasd_setup_queue(struct dasd_block *block)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002137{
2138 int max;
2139
Martin K. Petersene1defc42009-05-22 17:17:49 -04002140 blk_queue_logical_block_size(block->request_queue, block->bp_block);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01002141 max = block->base->discipline->max_blocks << block->s2b_shift;
Martin K. Petersen086fa5f2010-02-26 00:20:38 -05002142 blk_queue_max_hw_sectors(block->request_queue, max);
Martin K. Petersen8a783622010-02-26 00:20:39 -05002143 blk_queue_max_segments(block->request_queue, -1L);
Stefan Weinhuberf3eb5382009-03-26 15:23:48 +01002144 /* with page sized segments we can translate each segement into
2145 * one idaw/tidaw
2146 */
2147 blk_queue_max_segment_size(block->request_queue, PAGE_SIZE);
2148 blk_queue_segment_boundary(block->request_queue, PAGE_SIZE - 1);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01002149 blk_queue_ordered(block->request_queue, QUEUE_ORDERED_DRAIN, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002150}
2151
2152/*
2153 * Deactivate and free request queue.
2154 */
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01002155static void dasd_free_queue(struct dasd_block *block)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002156{
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01002157 if (block->request_queue) {
2158 blk_cleanup_queue(block->request_queue);
2159 block->request_queue = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002160 }
2161}
2162
2163/*
2164 * Flush request on the request queue.
2165 */
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01002166static void dasd_flush_request_queue(struct dasd_block *block)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002167{
2168 struct request *req;
2169
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01002170 if (!block->request_queue)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002171 return;
Horst Hummel138c0142006-06-29 14:58:12 +02002172
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01002173 spin_lock_irq(&block->request_queue_lock);
Tejun Heo9934c8c2009-05-08 11:54:16 +09002174 while ((req = blk_fetch_request(block->request_queue)))
Tejun Heo40cbbb72009-04-23 11:05:19 +09002175 __blk_end_request_all(req, -EIO);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01002176 spin_unlock_irq(&block->request_queue_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002177}
2178
Al Viro57a7c0b2008-03-02 10:36:08 -05002179static int dasd_open(struct block_device *bdev, fmode_t mode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002180{
Al Viro57a7c0b2008-03-02 10:36:08 -05002181 struct dasd_block *block = bdev->bd_disk->private_data;
Stefan Haberland9eb25122010-02-26 22:37:46 +01002182 struct dasd_device *base;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002183 int rc;
2184
Stefan Haberland9eb25122010-02-26 22:37:46 +01002185 if (!block)
2186 return -ENODEV;
2187
2188 base = block->base;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01002189 atomic_inc(&block->open_count);
2190 if (test_bit(DASD_FLAG_OFFLINE, &base->flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002191 rc = -ENODEV;
2192 goto unlock;
2193 }
2194
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01002195 if (!try_module_get(base->discipline->owner)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002196 rc = -EINVAL;
2197 goto unlock;
2198 }
2199
2200 if (dasd_probeonly) {
Stefan Haberlandfc19f382009-03-26 15:23:49 +01002201 dev_info(&base->cdev->dev,
2202 "Accessing the DASD failed because it is in "
2203 "probeonly mode\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002204 rc = -EPERM;
2205 goto out;
2206 }
2207
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01002208 if (base->state <= DASD_STATE_BASIC) {
2209 DBF_DEV_EVENT(DBF_ERR, base, " %s",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002210 " Cannot open unrecognized device");
2211 rc = -ENODEV;
2212 goto out;
2213 }
2214
2215 return 0;
2216
2217out:
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01002218 module_put(base->discipline->owner);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002219unlock:
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01002220 atomic_dec(&block->open_count);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002221 return rc;
2222}
2223
Al Viro57a7c0b2008-03-02 10:36:08 -05002224static int dasd_release(struct gendisk *disk, fmode_t mode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002225{
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01002226 struct dasd_block *block = disk->private_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002227
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01002228 atomic_dec(&block->open_count);
2229 module_put(block->base->discipline->owner);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002230 return 0;
2231}
2232
Christoph Hellwiga885c8c2006-01-08 01:02:50 -08002233/*
2234 * Return disk geometry.
2235 */
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01002236static int dasd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
Christoph Hellwiga885c8c2006-01-08 01:02:50 -08002237{
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01002238 struct dasd_block *block;
2239 struct dasd_device *base;
Christoph Hellwiga885c8c2006-01-08 01:02:50 -08002240
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01002241 block = bdev->bd_disk->private_data;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01002242 if (!block)
Christoph Hellwiga885c8c2006-01-08 01:02:50 -08002243 return -ENODEV;
Julia Lawallcf05b822009-08-23 18:09:05 +02002244 base = block->base;
Christoph Hellwiga885c8c2006-01-08 01:02:50 -08002245
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01002246 if (!base->discipline ||
2247 !base->discipline->fill_geometry)
Christoph Hellwiga885c8c2006-01-08 01:02:50 -08002248 return -EINVAL;
2249
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01002250 base->discipline->fill_geometry(block, geo);
2251 geo->start = get_start_sect(bdev) >> block->s2b_shift;
Christoph Hellwiga885c8c2006-01-08 01:02:50 -08002252 return 0;
2253}
2254
Alexey Dobriyan83d5cde2009-09-21 17:01:13 -07002255const struct block_device_operations
Linus Torvalds1da177e2005-04-16 15:20:36 -07002256dasd_device_operations = {
2257 .owner = THIS_MODULE,
Al Viro57a7c0b2008-03-02 10:36:08 -05002258 .open = dasd_open,
2259 .release = dasd_release,
Heiko Carstens0000d032009-03-26 15:23:45 +01002260 .ioctl = dasd_ioctl,
2261 .compat_ioctl = dasd_ioctl,
Christoph Hellwiga885c8c2006-01-08 01:02:50 -08002262 .getgeo = dasd_getgeo,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002263};
2264
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01002265/*******************************************************************************
2266 * end of block device operations
2267 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002268
2269static void
2270dasd_exit(void)
2271{
2272#ifdef CONFIG_PROC_FS
2273 dasd_proc_exit();
2274#endif
Stefan Weinhuber20c64462006-03-24 03:15:25 -08002275 dasd_eer_exit();
Horst Hummel6bb0e012005-07-27 11:45:03 -07002276 if (dasd_page_cache != NULL) {
2277 kmem_cache_destroy(dasd_page_cache);
2278 dasd_page_cache = NULL;
2279 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002280 dasd_gendisk_exit();
2281 dasd_devmap_exit();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002282 if (dasd_debug_area != NULL) {
2283 debug_unregister(dasd_debug_area);
2284 dasd_debug_area = NULL;
2285 }
2286}
2287
2288/*
2289 * SECTION: common functions for ccw_driver use
2290 */
2291
Cornelia Huckf3445a12009-04-14 15:36:23 +02002292static void dasd_generic_auto_online(void *data, async_cookie_t cookie)
2293{
2294 struct ccw_device *cdev = data;
2295 int ret;
2296
2297 ret = ccw_device_set_online(cdev);
2298 if (ret)
2299 pr_warning("%s: Setting the DASD online failed with rc=%d\n",
2300 dev_name(&cdev->dev), ret);
Cornelia Huckf3445a12009-04-14 15:36:23 +02002301}
2302
Horst Hummel1c01b8a2006-01-06 00:19:15 -08002303/*
2304 * Initial attempt at a probe function. this can be simplified once
2305 * the other detection code is gone.
2306 */
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01002307int dasd_generic_probe(struct ccw_device *cdev,
2308 struct dasd_discipline *discipline)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002309{
2310 int ret;
2311
2312 ret = dasd_add_sysfs_files(cdev);
2313 if (ret) {
Stefan Haberlandb8ed5dd2009-12-07 12:51:52 +01002314 DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s",
2315 "dasd_generic_probe: could not add "
2316 "sysfs entries");
Horst Hummel40545572006-06-29 15:08:18 +02002317 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002318 }
Horst Hummel40545572006-06-29 15:08:18 +02002319 cdev->handler = &dasd_int_handler;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002320
Horst Hummel40545572006-06-29 15:08:18 +02002321 /*
2322 * Automatically online either all dasd devices (dasd_autodetect)
2323 * or all devices specified with dasd= parameters during
2324 * initial probe.
2325 */
2326 if ((dasd_get_feature(cdev, DASD_FEATURE_INITIAL_ONLINE) > 0 ) ||
Kay Sievers2a0217d2008-10-10 21:33:09 +02002327 (dasd_autodetect && dasd_busid_known(dev_name(&cdev->dev)) != 0))
Cornelia Huckf3445a12009-04-14 15:36:23 +02002328 async_schedule(dasd_generic_auto_online, cdev);
Stefan Haberlandde3e0da2008-01-26 14:11:08 +01002329 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002330}
2331
Horst Hummel1c01b8a2006-01-06 00:19:15 -08002332/*
2333 * This will one day be called from a global not_oper handler.
2334 * It is also used by driver_unregister during module unload.
2335 */
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01002336void dasd_generic_remove(struct ccw_device *cdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002337{
2338 struct dasd_device *device;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01002339 struct dasd_block *block;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002340
Horst Hummel59afda72005-05-16 21:53:39 -07002341 cdev->handler = NULL;
2342
Linus Torvalds1da177e2005-04-16 15:20:36 -07002343 dasd_remove_sysfs_files(cdev);
2344 device = dasd_device_from_cdev(cdev);
2345 if (IS_ERR(device))
2346 return;
2347 if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags)) {
2348 /* Already doing offline processing */
2349 dasd_put_device(device);
2350 return;
2351 }
2352 /*
2353 * This device is removed unconditionally. Set offline
2354 * flag to prevent dasd_open from opening it while it is
2355 * no quite down yet.
2356 */
2357 dasd_set_target_state(device, DASD_STATE_NEW);
2358 /* dasd_delete_device destroys the device reference. */
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01002359 block = device->block;
2360 device->block = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002361 dasd_delete_device(device);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01002362 /*
2363 * life cycle of block is bound to device, so delete it after
2364 * device was safely removed
2365 */
2366 if (block)
2367 dasd_free_block(block);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002368}
2369
Horst Hummel1c01b8a2006-01-06 00:19:15 -08002370/*
2371 * Activate a device. This is called from dasd_{eckd,fba}_probe() when either
Linus Torvalds1da177e2005-04-16 15:20:36 -07002372 * the device is detected for the first time and is supposed to be used
Horst Hummel1c01b8a2006-01-06 00:19:15 -08002373 * or the user has started activation through sysfs.
2374 */
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01002375int dasd_generic_set_online(struct ccw_device *cdev,
2376 struct dasd_discipline *base_discipline)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002377{
Peter Oberparleiteraa888612006-02-20 18:28:13 -08002378 struct dasd_discipline *discipline;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002379 struct dasd_device *device;
Horst Hummelc6eb7b72005-09-03 15:57:58 -07002380 int rc;
Horst Hummelf24acd42005-05-01 08:58:59 -07002381
Horst Hummel40545572006-06-29 15:08:18 +02002382 /* first online clears initial online feature flag */
2383 dasd_set_feature(cdev, DASD_FEATURE_INITIAL_ONLINE, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002384 device = dasd_create_device(cdev);
2385 if (IS_ERR(device))
2386 return PTR_ERR(device);
2387
Peter Oberparleiteraa888612006-02-20 18:28:13 -08002388 discipline = base_discipline;
Horst Hummelc6eb7b72005-09-03 15:57:58 -07002389 if (device->features & DASD_FEATURE_USEDIAG) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002390 if (!dasd_diag_discipline_pointer) {
Stefan Haberlandfc19f382009-03-26 15:23:49 +01002391 pr_warning("%s Setting the DASD online failed because "
2392 "of missing DIAG discipline\n",
2393 dev_name(&cdev->dev));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002394 dasd_delete_device(device);
2395 return -ENODEV;
2396 }
2397 discipline = dasd_diag_discipline_pointer;
2398 }
Peter Oberparleiteraa888612006-02-20 18:28:13 -08002399 if (!try_module_get(base_discipline->owner)) {
2400 dasd_delete_device(device);
2401 return -EINVAL;
2402 }
2403 if (!try_module_get(discipline->owner)) {
2404 module_put(base_discipline->owner);
2405 dasd_delete_device(device);
2406 return -EINVAL;
2407 }
2408 device->base_discipline = base_discipline;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002409 device->discipline = discipline;
2410
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01002411 /* check_device will allocate block device if necessary */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002412 rc = discipline->check_device(device);
2413 if (rc) {
Stefan Haberlandfc19f382009-03-26 15:23:49 +01002414 pr_warning("%s Setting the DASD online with discipline %s "
2415 "failed with rc=%i\n",
2416 dev_name(&cdev->dev), discipline->name, rc);
Peter Oberparleiteraa888612006-02-20 18:28:13 -08002417 module_put(discipline->owner);
2418 module_put(base_discipline->owner);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002419 dasd_delete_device(device);
2420 return rc;
2421 }
2422
2423 dasd_set_target_state(device, DASD_STATE_ONLINE);
2424 if (device->state <= DASD_STATE_KNOWN) {
Stefan Haberlandfc19f382009-03-26 15:23:49 +01002425 pr_warning("%s Setting the DASD online failed because of a "
2426 "missing discipline\n", dev_name(&cdev->dev));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002427 rc = -ENODEV;
2428 dasd_set_target_state(device, DASD_STATE_NEW);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01002429 if (device->block)
2430 dasd_free_block(device->block);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002431 dasd_delete_device(device);
2432 } else
2433 pr_debug("dasd_generic device %s found\n",
Kay Sievers2a0217d2008-10-10 21:33:09 +02002434 dev_name(&cdev->dev));
Stefan Haberland589c74d2010-02-26 22:37:47 +01002435
2436 wait_event(dasd_init_waitq, _wait_for_device(device));
2437
Linus Torvalds1da177e2005-04-16 15:20:36 -07002438 dasd_put_device(device);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002439 return rc;
2440}
2441
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01002442int dasd_generic_set_offline(struct ccw_device *cdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002443{
2444 struct dasd_device *device;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01002445 struct dasd_block *block;
Horst Hummeldafd87a2006-04-10 22:53:47 -07002446 int max_count, open_count;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002447
2448 device = dasd_device_from_cdev(cdev);
2449 if (IS_ERR(device))
2450 return PTR_ERR(device);
2451 if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags)) {
2452 /* Already doing offline processing */
2453 dasd_put_device(device);
2454 return 0;
2455 }
2456 /*
2457 * We must make sure that this device is currently not in use.
2458 * The open_count is increased for every opener, that includes
2459 * the blkdev_get in dasd_scan_partitions. We are only interested
2460 * in the other openers.
2461 */
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01002462 if (device->block) {
Heiko Carstensa8061702008-04-17 07:46:26 +02002463 max_count = device->block->bdev ? 0 : -1;
2464 open_count = atomic_read(&device->block->open_count);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01002465 if (open_count > max_count) {
2466 if (open_count > 0)
Stefan Haberlandfc19f382009-03-26 15:23:49 +01002467 pr_warning("%s: The DASD cannot be set offline "
2468 "with open count %i\n",
2469 dev_name(&cdev->dev), open_count);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01002470 else
Stefan Haberlandfc19f382009-03-26 15:23:49 +01002471 pr_warning("%s: The DASD cannot be set offline "
2472 "while it is in use\n",
2473 dev_name(&cdev->dev));
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01002474 clear_bit(DASD_FLAG_OFFLINE, &device->flags);
2475 dasd_put_device(device);
2476 return -EBUSY;
2477 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002478 }
2479 dasd_set_target_state(device, DASD_STATE_NEW);
2480 /* dasd_delete_device destroys the device reference. */
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01002481 block = device->block;
2482 device->block = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002483 dasd_delete_device(device);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01002484 /*
2485 * life cycle of block is bound to device, so delete it after
2486 * device was safely removed
2487 */
2488 if (block)
2489 dasd_free_block(block);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002490 return 0;
2491}
2492
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01002493int dasd_generic_notify(struct ccw_device *cdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002494{
2495 struct dasd_device *device;
2496 struct dasd_ccw_req *cqr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002497 int ret;
2498
Peter Oberparleiter91c36912008-08-21 19:46:39 +02002499 device = dasd_device_from_cdev_locked(cdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002500 if (IS_ERR(device))
2501 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002502 ret = 0;
2503 switch (event) {
2504 case CIO_GONE:
Sebastian Ott47593bf2009-03-31 19:16:05 +02002505 case CIO_BOXED:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002506 case CIO_NO_PATH:
Stefan Weinhuber20c64462006-03-24 03:15:25 -08002507 /* First of all call extended error reporting. */
2508 dasd_eer_write(device, NULL, DASD_EER_NOPATH);
2509
Linus Torvalds1da177e2005-04-16 15:20:36 -07002510 if (device->state < DASD_STATE_BASIC)
2511 break;
2512 /* Device is active. We want to keep it. */
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01002513 list_for_each_entry(cqr, &device->ccw_queue, devlist)
2514 if (cqr->status == DASD_CQR_IN_IO) {
2515 cqr->status = DASD_CQR_QUEUED;
2516 cqr->retries++;
2517 }
Stefan Weinhubereb6e1992009-12-07 12:51:51 +01002518 dasd_device_set_stop_bits(device, DASD_STOPPED_DC_WAIT);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01002519 dasd_device_clear_timer(device);
2520 dasd_schedule_device_bh(device);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002521 ret = 1;
2522 break;
2523 case CIO_OPER:
2524 /* FIXME: add a sanity check. */
Stefan Weinhubereb6e1992009-12-07 12:51:51 +01002525 dasd_device_remove_stop_bits(device, DASD_STOPPED_DC_WAIT);
Stefan Haberlandd41dd122009-06-16 10:30:25 +02002526 if (device->stopped & DASD_UNRESUMED_PM) {
Stefan Weinhubereb6e1992009-12-07 12:51:51 +01002527 dasd_device_remove_stop_bits(device, DASD_UNRESUMED_PM);
Stefan Haberlandd41dd122009-06-16 10:30:25 +02002528 dasd_restore_device(device);
2529 ret = 1;
2530 break;
2531 }
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01002532 dasd_schedule_device_bh(device);
2533 if (device->block)
2534 dasd_schedule_block_bh(device->block);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002535 ret = 1;
2536 break;
2537 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002538 dasd_put_device(device);
2539 return ret;
2540}
2541
Stefan Haberlandd41dd122009-06-16 10:30:25 +02002542int dasd_generic_pm_freeze(struct ccw_device *cdev)
2543{
2544 struct dasd_ccw_req *cqr, *n;
2545 int rc;
2546 struct list_head freeze_queue;
2547 struct dasd_device *device = dasd_device_from_cdev(cdev);
2548
2549 if (IS_ERR(device))
2550 return PTR_ERR(device);
2551 /* disallow new I/O */
Stefan Weinhubereb6e1992009-12-07 12:51:51 +01002552 dasd_device_set_stop_bits(device, DASD_STOPPED_PM);
Stefan Haberlandd41dd122009-06-16 10:30:25 +02002553 /* clear active requests */
2554 INIT_LIST_HEAD(&freeze_queue);
2555 spin_lock_irq(get_ccwdev_lock(cdev));
2556 rc = 0;
2557 list_for_each_entry_safe(cqr, n, &device->ccw_queue, devlist) {
2558 /* Check status and move request to flush_queue */
2559 if (cqr->status == DASD_CQR_IN_IO) {
2560 rc = device->discipline->term_IO(cqr);
2561 if (rc) {
2562 /* unable to terminate requeust */
2563 dev_err(&device->cdev->dev,
2564 "Unable to terminate request %p "
2565 "on suspend\n", cqr);
2566 spin_unlock_irq(get_ccwdev_lock(cdev));
2567 dasd_put_device(device);
2568 return rc;
2569 }
2570 }
2571 list_move_tail(&cqr->devlist, &freeze_queue);
2572 }
2573
2574 spin_unlock_irq(get_ccwdev_lock(cdev));
2575
2576 list_for_each_entry_safe(cqr, n, &freeze_queue, devlist) {
2577 wait_event(dasd_flush_wq,
2578 (cqr->status != DASD_CQR_CLEAR_PENDING));
2579 if (cqr->status == DASD_CQR_CLEARED)
2580 cqr->status = DASD_CQR_QUEUED;
2581 }
2582 /* move freeze_queue to start of the ccw_queue */
2583 spin_lock_irq(get_ccwdev_lock(cdev));
2584 list_splice_tail(&freeze_queue, &device->ccw_queue);
2585 spin_unlock_irq(get_ccwdev_lock(cdev));
2586
2587 if (device->discipline->freeze)
2588 rc = device->discipline->freeze(device);
2589
2590 dasd_put_device(device);
2591 return rc;
2592}
2593EXPORT_SYMBOL_GPL(dasd_generic_pm_freeze);
2594
2595int dasd_generic_restore_device(struct ccw_device *cdev)
2596{
2597 struct dasd_device *device = dasd_device_from_cdev(cdev);
2598 int rc = 0;
2599
2600 if (IS_ERR(device))
2601 return PTR_ERR(device);
2602
Stefan Haberlande6125fb2009-06-22 12:08:17 +02002603 /* allow new IO again */
Stefan Weinhubereb6e1992009-12-07 12:51:51 +01002604 dasd_device_remove_stop_bits(device,
2605 (DASD_STOPPED_PM | DASD_UNRESUMED_PM));
Stefan Haberlande6125fb2009-06-22 12:08:17 +02002606
Stefan Haberlandd41dd122009-06-16 10:30:25 +02002607 dasd_schedule_device_bh(device);
Stefan Haberlandd41dd122009-06-16 10:30:25 +02002608
Stefan Weinhubereb6e1992009-12-07 12:51:51 +01002609 /*
2610 * call discipline restore function
2611 * if device is stopped do nothing e.g. for disconnected devices
2612 */
2613 if (device->discipline->restore && !(device->stopped))
Stefan Haberlandd41dd122009-06-16 10:30:25 +02002614 rc = device->discipline->restore(device);
Stefan Weinhubereb6e1992009-12-07 12:51:51 +01002615 if (rc || device->stopped)
Stefan Haberlande6125fb2009-06-22 12:08:17 +02002616 /*
2617 * if the resume failed for the DASD we put it in
2618 * an UNRESUMED stop state
2619 */
2620 device->stopped |= DASD_UNRESUMED_PM;
Stefan Haberlandd41dd122009-06-16 10:30:25 +02002621
Stefan Haberland6fca97a2009-10-06 10:34:15 +02002622 if (device->block)
2623 dasd_schedule_block_bh(device->block);
2624
Stefan Haberlandd41dd122009-06-16 10:30:25 +02002625 dasd_put_device(device);
Stefan Haberlande6125fb2009-06-22 12:08:17 +02002626 return 0;
Stefan Haberlandd41dd122009-06-16 10:30:25 +02002627}
2628EXPORT_SYMBOL_GPL(dasd_generic_restore_device);
2629
Heiko Carstens763968e2007-05-10 15:45:46 +02002630static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device,
2631 void *rdc_buffer,
2632 int rdc_buffer_size,
Stefan Haberland68b781f2009-09-11 10:28:29 +02002633 int magic)
Cornelia Huck17283b52007-05-04 18:47:51 +02002634{
2635 struct dasd_ccw_req *cqr;
2636 struct ccw1 *ccw;
Stefan Haberlandd9fa9442009-10-14 12:43:48 +02002637 unsigned long *idaw;
Cornelia Huck17283b52007-05-04 18:47:51 +02002638
2639 cqr = dasd_smalloc_request(magic, 1 /* RDC */, rdc_buffer_size, device);
2640
2641 if (IS_ERR(cqr)) {
Stefan Haberlandfc19f382009-03-26 15:23:49 +01002642 /* internal error 13 - Allocating the RDC request failed*/
2643 dev_err(&device->cdev->dev,
2644 "An error occurred in the DASD device driver, "
2645 "reason=%s\n", "13");
Cornelia Huck17283b52007-05-04 18:47:51 +02002646 return cqr;
2647 }
2648
2649 ccw = cqr->cpaddr;
2650 ccw->cmd_code = CCW_CMD_RDC;
Stefan Haberlandd9fa9442009-10-14 12:43:48 +02002651 if (idal_is_needed(rdc_buffer, rdc_buffer_size)) {
2652 idaw = (unsigned long *) (cqr->data);
2653 ccw->cda = (__u32)(addr_t) idaw;
2654 ccw->flags = CCW_FLAG_IDA;
2655 idaw = idal_create_words(idaw, rdc_buffer, rdc_buffer_size);
2656 } else {
2657 ccw->cda = (__u32)(addr_t) rdc_buffer;
2658 ccw->flags = 0;
2659 }
Cornelia Huck17283b52007-05-04 18:47:51 +02002660
Stefan Haberlandd9fa9442009-10-14 12:43:48 +02002661 ccw->count = rdc_buffer_size;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01002662 cqr->startdev = device;
2663 cqr->memdev = device;
Cornelia Huck17283b52007-05-04 18:47:51 +02002664 cqr->expires = 10*HZ;
Stefan Weinhubereb6e1992009-12-07 12:51:51 +01002665 cqr->retries = 256;
Cornelia Huck17283b52007-05-04 18:47:51 +02002666 cqr->buildclk = get_clock();
2667 cqr->status = DASD_CQR_FILLED;
2668 return cqr;
2669}
2670
2671
Stefan Haberland68b781f2009-09-11 10:28:29 +02002672int dasd_generic_read_dev_chars(struct dasd_device *device, int magic,
Sebastian Ott92636b12009-06-12 10:26:37 +02002673 void *rdc_buffer, int rdc_buffer_size)
Cornelia Huck17283b52007-05-04 18:47:51 +02002674{
2675 int ret;
2676 struct dasd_ccw_req *cqr;
2677
Sebastian Ott92636b12009-06-12 10:26:37 +02002678 cqr = dasd_generic_build_rdc(device, rdc_buffer, rdc_buffer_size,
Cornelia Huck17283b52007-05-04 18:47:51 +02002679 magic);
2680 if (IS_ERR(cqr))
2681 return PTR_ERR(cqr);
2682
2683 ret = dasd_sleep_on(cqr);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01002684 dasd_sfree_request(cqr, cqr->memdev);
Cornelia Huck17283b52007-05-04 18:47:51 +02002685 return ret;
2686}
Cornelia Huckaaff0f62007-05-10 15:45:45 +02002687EXPORT_SYMBOL_GPL(dasd_generic_read_dev_chars);
Stefan Weinhuber20c64462006-03-24 03:15:25 -08002688
Stefan Weinhuberf3eb5382009-03-26 15:23:48 +01002689/*
2690 * In command mode and transport mode we need to look for sense
2691 * data in different places. The sense data itself is allways
2692 * an array of 32 bytes, so we can unify the sense data access
2693 * for both modes.
2694 */
2695char *dasd_get_sense(struct irb *irb)
2696{
2697 struct tsb *tsb = NULL;
2698 char *sense = NULL;
2699
2700 if (scsw_is_tm(&irb->scsw) && (irb->scsw.tm.fcxs == 0x01)) {
2701 if (irb->scsw.tm.tcw)
2702 tsb = tcw_get_tsb((struct tcw *)(unsigned long)
2703 irb->scsw.tm.tcw);
2704 if (tsb && tsb->length == 64 && tsb->flags)
2705 switch (tsb->flags & 0x07) {
2706 case 1: /* tsa_iostat */
2707 sense = tsb->tsa.iostat.sense;
2708 break;
2709 case 2: /* tsa_ddpc */
2710 sense = tsb->tsa.ddpc.sense;
2711 break;
2712 default:
2713 /* currently we don't use interrogate data */
2714 break;
2715 }
2716 } else if (irb->esw.esw0.erw.cons) {
2717 sense = irb->ecw;
2718 }
2719 return sense;
2720}
2721EXPORT_SYMBOL_GPL(dasd_get_sense);
2722
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01002723static int __init dasd_init(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002724{
2725 int rc;
2726
2727 init_waitqueue_head(&dasd_init_waitq);
Horst Hummel8f617012006-08-30 14:33:33 +02002728 init_waitqueue_head(&dasd_flush_wq);
Stefan Haberlandc80ee722008-05-30 10:03:31 +02002729 init_waitqueue_head(&generic_waitq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002730
2731 /* register 'common' DASD debug area, used for all DBF_XXX calls */
Peter Tiedemann361f4942008-01-26 14:11:30 +01002732 dasd_debug_area = debug_register("dasd", 1, 1, 8 * sizeof(long));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002733 if (dasd_debug_area == NULL) {
2734 rc = -ENOMEM;
2735 goto failed;
2736 }
2737 debug_register_view(dasd_debug_area, &debug_sprintf_view);
Horst Hummelb0035f12006-09-20 15:59:07 +02002738 debug_set_level(dasd_debug_area, DBF_WARNING);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002739
2740 DBF_EVENT(DBF_EMERG, "%s", "debug area created");
2741
2742 dasd_diag_discipline_pointer = NULL;
2743
Linus Torvalds1da177e2005-04-16 15:20:36 -07002744 rc = dasd_devmap_init();
2745 if (rc)
2746 goto failed;
2747 rc = dasd_gendisk_init();
2748 if (rc)
2749 goto failed;
2750 rc = dasd_parse();
2751 if (rc)
2752 goto failed;
Stefan Weinhuber20c64462006-03-24 03:15:25 -08002753 rc = dasd_eer_init();
2754 if (rc)
2755 goto failed;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002756#ifdef CONFIG_PROC_FS
2757 rc = dasd_proc_init();
2758 if (rc)
2759 goto failed;
2760#endif
2761
2762 return 0;
2763failed:
Stefan Haberlandfc19f382009-03-26 15:23:49 +01002764 pr_info("The DASD device driver could not be initialized\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002765 dasd_exit();
2766 return rc;
2767}
2768
2769module_init(dasd_init);
2770module_exit(dasd_exit);
2771
2772EXPORT_SYMBOL(dasd_debug_area);
2773EXPORT_SYMBOL(dasd_diag_discipline_pointer);
2774
2775EXPORT_SYMBOL(dasd_add_request_head);
2776EXPORT_SYMBOL(dasd_add_request_tail);
2777EXPORT_SYMBOL(dasd_cancel_req);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01002778EXPORT_SYMBOL(dasd_device_clear_timer);
2779EXPORT_SYMBOL(dasd_block_clear_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002780EXPORT_SYMBOL(dasd_enable_device);
2781EXPORT_SYMBOL(dasd_int_handler);
2782EXPORT_SYMBOL(dasd_kfree_request);
2783EXPORT_SYMBOL(dasd_kick_device);
2784EXPORT_SYMBOL(dasd_kmalloc_request);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01002785EXPORT_SYMBOL(dasd_schedule_device_bh);
2786EXPORT_SYMBOL(dasd_schedule_block_bh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002787EXPORT_SYMBOL(dasd_set_target_state);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01002788EXPORT_SYMBOL(dasd_device_set_timer);
2789EXPORT_SYMBOL(dasd_block_set_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002790EXPORT_SYMBOL(dasd_sfree_request);
2791EXPORT_SYMBOL(dasd_sleep_on);
2792EXPORT_SYMBOL(dasd_sleep_on_immediatly);
2793EXPORT_SYMBOL(dasd_sleep_on_interruptible);
2794EXPORT_SYMBOL(dasd_smalloc_request);
2795EXPORT_SYMBOL(dasd_start_IO);
2796EXPORT_SYMBOL(dasd_term_IO);
2797
2798EXPORT_SYMBOL_GPL(dasd_generic_probe);
2799EXPORT_SYMBOL_GPL(dasd_generic_remove);
2800EXPORT_SYMBOL_GPL(dasd_generic_notify);
2801EXPORT_SYMBOL_GPL(dasd_generic_set_online);
2802EXPORT_SYMBOL_GPL(dasd_generic_set_offline);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01002803EXPORT_SYMBOL_GPL(dasd_generic_handle_state_change);
2804EXPORT_SYMBOL_GPL(dasd_flush_device_queue);
2805EXPORT_SYMBOL_GPL(dasd_alloc_block);
2806EXPORT_SYMBOL_GPL(dasd_free_block);