blob: fa2339cb1681f43fb19e337ad037f75d908b8d89 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * File...........: linux/drivers/s390/block/dasd.c
3 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
4 * Horst Hummel <Horst.Hummel@de.ibm.com>
5 * Carsten Otte <Cotte@de.ibm.com>
6 * Martin Schwidefsky <schwidefsky@de.ibm.com>
7 * Bugreports.to..: <Linux390@de.ibm.com>
Stefan Haberlandd41dd122009-06-16 10:30:25 +02008 * Copyright IBM Corp. 1999, 2009
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 */
10
Stefan Haberlandfc19f382009-03-26 15:23:49 +010011#define KMSG_COMPONENT "dasd"
12#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
13
Linus Torvalds1da177e2005-04-16 15:20:36 -070014#include <linux/kmod.h>
15#include <linux/init.h>
16#include <linux/interrupt.h>
17#include <linux/ctype.h>
18#include <linux/major.h>
19#include <linux/slab.h>
20#include <linux/buffer_head.h>
Christoph Hellwiga885c8c2006-01-08 01:02:50 -080021#include <linux/hdreg.h>
Cornelia Huckf3445a12009-04-14 15:36:23 +020022#include <linux/async.h>
Stefan Haberland9eb25122010-02-26 22:37:46 +010023#include <linux/mutex.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070024
25#include <asm/ccwdev.h>
26#include <asm/ebcdic.h>
27#include <asm/idals.h>
Stefan Weinhuberf3eb5382009-03-26 15:23:48 +010028#include <asm/itcw.h>
Stefan Weinhuber33b62a32010-03-08 12:26:24 +010029#include <asm/diag.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070030
31/* This is ugly... */
32#define PRINTK_HEADER "dasd:"
33
34#include "dasd_int.h"
35/*
36 * SECTION: Constant definitions to be used within this file
37 */
38#define DASD_CHANQ_MAX_SIZE 4
39
Stefan Weinhuber1c1e0932010-05-12 09:32:11 +020040#define DASD_SLEEPON_START_TAG (void *) 1
41#define DASD_SLEEPON_END_TAG (void *) 2
42
Linus Torvalds1da177e2005-04-16 15:20:36 -070043/*
44 * SECTION: exported variables of dasd.c
45 */
46debug_info_t *dasd_debug_area;
47struct dasd_discipline *dasd_diag_discipline_pointer;
Heiko Carstens2b67fc42007-02-05 21:16:47 +010048void dasd_int_handler(struct ccw_device *, unsigned long, struct irb *);
Linus Torvalds1da177e2005-04-16 15:20:36 -070049
50MODULE_AUTHOR("Holger Smolinski <Holger.Smolinski@de.ibm.com>");
51MODULE_DESCRIPTION("Linux on S/390 DASD device driver,"
52 " Copyright 2000 IBM Corporation");
53MODULE_SUPPORTED_DEVICE("dasd");
Linus Torvalds1da177e2005-04-16 15:20:36 -070054MODULE_LICENSE("GPL");
55
56/*
57 * SECTION: prototypes for static functions of dasd.c
58 */
Stefan Weinhuber8e09f212008-01-26 14:11:23 +010059static int dasd_alloc_queue(struct dasd_block *);
60static void dasd_setup_queue(struct dasd_block *);
61static void dasd_free_queue(struct dasd_block *);
62static void dasd_flush_request_queue(struct dasd_block *);
63static int dasd_flush_block_queue(struct dasd_block *);
64static void dasd_device_tasklet(struct dasd_device *);
65static void dasd_block_tasklet(struct dasd_block *);
Al Viro4927b3f2006-12-06 19:18:20 +000066static void do_kick_device(struct work_struct *);
Stefan Haberlandd41dd122009-06-16 10:30:25 +020067static void do_restore_device(struct work_struct *);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +010068static void dasd_return_cqr_cb(struct dasd_ccw_req *, void *);
Stefan Weinhuber48cae882009-02-11 10:37:31 +010069static void dasd_device_timeout(unsigned long);
70static void dasd_block_timeout(unsigned long);
Stefan Weinhubereb6e1992009-12-07 12:51:51 +010071static void __dasd_process_erp(struct dasd_device *, struct dasd_ccw_req *);
Linus Torvalds1da177e2005-04-16 15:20:36 -070072
73/*
74 * SECTION: Operations on the device structure.
75 */
76static wait_queue_head_t dasd_init_waitq;
Horst Hummel8f617012006-08-30 14:33:33 +020077static wait_queue_head_t dasd_flush_wq;
Stefan Haberlandc80ee722008-05-30 10:03:31 +020078static wait_queue_head_t generic_waitq;
Linus Torvalds1da177e2005-04-16 15:20:36 -070079
80/*
81 * Allocate memory for a new device structure.
82 */
Stefan Weinhuber8e09f212008-01-26 14:11:23 +010083struct dasd_device *dasd_alloc_device(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -070084{
85 struct dasd_device *device;
86
Stefan Weinhuber8e09f212008-01-26 14:11:23 +010087 device = kzalloc(sizeof(struct dasd_device), GFP_ATOMIC);
88 if (!device)
Linus Torvalds1da177e2005-04-16 15:20:36 -070089 return ERR_PTR(-ENOMEM);
Linus Torvalds1da177e2005-04-16 15:20:36 -070090
91 /* Get two pages for normal block device operations. */
92 device->ccw_mem = (void *) __get_free_pages(GFP_ATOMIC | GFP_DMA, 1);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +010093 if (!device->ccw_mem) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070094 kfree(device);
95 return ERR_PTR(-ENOMEM);
96 }
97 /* Get one page for error recovery. */
98 device->erp_mem = (void *) get_zeroed_page(GFP_ATOMIC | GFP_DMA);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +010099 if (!device->erp_mem) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100 free_pages((unsigned long) device->ccw_mem, 1);
101 kfree(device);
102 return ERR_PTR(-ENOMEM);
103 }
104
105 dasd_init_chunklist(&device->ccw_chunks, device->ccw_mem, PAGE_SIZE*2);
106 dasd_init_chunklist(&device->erp_chunks, device->erp_mem, PAGE_SIZE);
107 spin_lock_init(&device->mem_lock);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100108 atomic_set(&device->tasklet_scheduled, 0);
Horst Hummel138c0142006-06-29 14:58:12 +0200109 tasklet_init(&device->tasklet,
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100110 (void (*)(unsigned long)) dasd_device_tasklet,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111 (unsigned long) device);
112 INIT_LIST_HEAD(&device->ccw_queue);
113 init_timer(&device->timer);
Stefan Weinhuber48cae882009-02-11 10:37:31 +0100114 device->timer.function = dasd_device_timeout;
115 device->timer.data = (unsigned long) device;
Al Viro4927b3f2006-12-06 19:18:20 +0000116 INIT_WORK(&device->kick_work, do_kick_device);
Stefan Haberlandd41dd122009-06-16 10:30:25 +0200117 INIT_WORK(&device->restore_device, do_restore_device);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700118 device->state = DASD_STATE_NEW;
119 device->target = DASD_STATE_NEW;
Stefan Haberland9eb25122010-02-26 22:37:46 +0100120 mutex_init(&device->state_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700121
122 return device;
123}
124
125/*
126 * Free memory of a device structure.
127 */
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100128void dasd_free_device(struct dasd_device *device)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700129{
Jesper Juhl17fd6822005-11-07 01:01:30 -0800130 kfree(device->private);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700131 free_page((unsigned long) device->erp_mem);
132 free_pages((unsigned long) device->ccw_mem, 1);
133 kfree(device);
134}
135
136/*
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100137 * Allocate memory for a new device structure.
138 */
139struct dasd_block *dasd_alloc_block(void)
140{
141 struct dasd_block *block;
142
143 block = kzalloc(sizeof(*block), GFP_ATOMIC);
144 if (!block)
145 return ERR_PTR(-ENOMEM);
146 /* open_count = 0 means device online but not in use */
147 atomic_set(&block->open_count, -1);
148
149 spin_lock_init(&block->request_queue_lock);
150 atomic_set(&block->tasklet_scheduled, 0);
151 tasklet_init(&block->tasklet,
152 (void (*)(unsigned long)) dasd_block_tasklet,
153 (unsigned long) block);
154 INIT_LIST_HEAD(&block->ccw_queue);
155 spin_lock_init(&block->queue_lock);
156 init_timer(&block->timer);
Stefan Weinhuber48cae882009-02-11 10:37:31 +0100157 block->timer.function = dasd_block_timeout;
158 block->timer.data = (unsigned long) block;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100159
160 return block;
161}
162
163/*
164 * Free memory of a device structure.
165 */
166void dasd_free_block(struct dasd_block *block)
167{
168 kfree(block);
169}
170
171/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700172 * Make a new device known to the system.
173 */
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100174static int dasd_state_new_to_known(struct dasd_device *device)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700175{
176 int rc;
177
178 /*
Horst Hummel138c0142006-06-29 14:58:12 +0200179 * As long as the device is not in state DASD_STATE_NEW we want to
Linus Torvalds1da177e2005-04-16 15:20:36 -0700180 * keep the reference count > 0.
181 */
182 dasd_get_device(device);
183
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100184 if (device->block) {
185 rc = dasd_alloc_queue(device->block);
186 if (rc) {
187 dasd_put_device(device);
188 return rc;
189 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700190 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700191 device->state = DASD_STATE_KNOWN;
192 return 0;
193}
194
195/*
196 * Let the system forget about a device.
197 */
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100198static int dasd_state_known_to_new(struct dasd_device *device)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700199{
Stefan Weinhuber20c64462006-03-24 03:15:25 -0800200 /* Disable extended error reporting for this device. */
201 dasd_eer_disable(device);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202 /* Forget the discipline information. */
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100203 if (device->discipline) {
204 if (device->discipline->uncheck_device)
205 device->discipline->uncheck_device(device);
Peter Oberparleiteraa888612006-02-20 18:28:13 -0800206 module_put(device->discipline->owner);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100207 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208 device->discipline = NULL;
Peter Oberparleiteraa888612006-02-20 18:28:13 -0800209 if (device->base_discipline)
210 module_put(device->base_discipline->owner);
211 device->base_discipline = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212 device->state = DASD_STATE_NEW;
213
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100214 if (device->block)
215 dasd_free_queue(device->block);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700216
217 /* Give up reference we took in dasd_state_new_to_known. */
218 dasd_put_device(device);
Horst Hummel8f617012006-08-30 14:33:33 +0200219 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700220}
221
222/*
223 * Request the irq line for the device.
224 */
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100225static int dasd_state_known_to_basic(struct dasd_device *device)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700226{
227 int rc;
228
229 /* Allocate and register gendisk structure. */
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100230 if (device->block) {
231 rc = dasd_gendisk_alloc(device->block);
232 if (rc)
233 return rc;
234 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700235 /* register 'device' debug area, used for all DBF_DEV_XXX calls */
Stefan Haberlandfc19f382009-03-26 15:23:49 +0100236 device->debug_area = debug_register(dev_name(&device->cdev->dev), 4, 1,
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100237 8 * sizeof(long));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700238 debug_register_view(device->debug_area, &debug_sprintf_view);
Horst Hummelb0035f12006-09-20 15:59:07 +0200239 debug_set_level(device->debug_area, DBF_WARNING);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700240 DBF_DEV_EVENT(DBF_EMERG, device, "%s", "debug area created");
241
242 device->state = DASD_STATE_BASIC;
243 return 0;
244}
245
246/*
247 * Release the irq line for the device. Terminate any running i/o.
248 */
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100249static int dasd_state_basic_to_known(struct dasd_device *device)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700250{
Horst Hummel8f617012006-08-30 14:33:33 +0200251 int rc;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100252 if (device->block) {
253 dasd_gendisk_free(device->block);
254 dasd_block_clear_timer(device->block);
255 }
256 rc = dasd_flush_device_queue(device);
Horst Hummel8f617012006-08-30 14:33:33 +0200257 if (rc)
258 return rc;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100259 dasd_device_clear_timer(device);
Horst Hummel8f617012006-08-30 14:33:33 +0200260
Linus Torvalds1da177e2005-04-16 15:20:36 -0700261 DBF_DEV_EVENT(DBF_EMERG, device, "%p debug area deleted", device);
262 if (device->debug_area != NULL) {
263 debug_unregister(device->debug_area);
264 device->debug_area = NULL;
265 }
266 device->state = DASD_STATE_KNOWN;
Horst Hummel8f617012006-08-30 14:33:33 +0200267 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700268}
269
270/*
271 * Do the initial analysis. The do_analysis function may return
272 * -EAGAIN in which case the device keeps the state DASD_STATE_BASIC
273 * until the discipline decides to continue the startup sequence
274 * by calling the function dasd_change_state. The eckd disciplines
275 * uses this to start a ccw that detects the format. The completion
276 * interrupt for this detection ccw uses the kernel event daemon to
277 * trigger the call to dasd_change_state. All this is done in the
278 * discipline code, see dasd_eckd.c.
Horst Hummel90f00942006-03-07 21:55:39 -0800279 * After the analysis ccw is done (do_analysis returned 0) the block
280 * device is setup.
281 * In case the analysis returns an error, the device setup is stopped
282 * (a fake disk was already added to allow formatting).
Linus Torvalds1da177e2005-04-16 15:20:36 -0700283 */
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100284static int dasd_state_basic_to_ready(struct dasd_device *device)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700285{
286 int rc;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100287 struct dasd_block *block;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700288
289 rc = 0;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100290 block = device->block;
Horst Hummel90f00942006-03-07 21:55:39 -0800291 /* make disk known with correct capacity */
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100292 if (block) {
293 if (block->base->discipline->do_analysis != NULL)
294 rc = block->base->discipline->do_analysis(block);
295 if (rc) {
296 if (rc != -EAGAIN)
297 device->state = DASD_STATE_UNFMT;
298 return rc;
299 }
300 dasd_setup_queue(block);
301 set_capacity(block->gdp,
302 block->blocks << block->s2b_shift);
303 device->state = DASD_STATE_READY;
304 rc = dasd_scan_partitions(block);
305 if (rc)
306 device->state = DASD_STATE_BASIC;
307 } else {
308 device->state = DASD_STATE_READY;
309 }
Horst Hummel90f00942006-03-07 21:55:39 -0800310 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700311}
312
313/*
314 * Remove device from block device layer. Destroy dirty buffers.
315 * Forget format information. Check if the target level is basic
316 * and if it is create fake disk for formatting.
317 */
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100318static int dasd_state_ready_to_basic(struct dasd_device *device)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700319{
Horst Hummel8f617012006-08-30 14:33:33 +0200320 int rc;
321
Linus Torvalds1da177e2005-04-16 15:20:36 -0700322 device->state = DASD_STATE_BASIC;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100323 if (device->block) {
324 struct dasd_block *block = device->block;
325 rc = dasd_flush_block_queue(block);
326 if (rc) {
327 device->state = DASD_STATE_READY;
328 return rc;
329 }
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100330 dasd_flush_request_queue(block);
Stefan Haberlandb695adf2010-02-26 22:37:48 +0100331 dasd_destroy_partitions(block);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100332 block->blocks = 0;
333 block->bp_block = 0;
334 block->s2b_shift = 0;
335 }
Horst Hummel8f617012006-08-30 14:33:33 +0200336 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700337}
338
339/*
Horst Hummel90f00942006-03-07 21:55:39 -0800340 * Back to basic.
341 */
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100342static int dasd_state_unfmt_to_basic(struct dasd_device *device)
Horst Hummel90f00942006-03-07 21:55:39 -0800343{
344 device->state = DASD_STATE_BASIC;
Horst Hummel8f617012006-08-30 14:33:33 +0200345 return 0;
Horst Hummel90f00942006-03-07 21:55:39 -0800346}
347
348/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700349 * Make the device online and schedule the bottom half to start
350 * the requeueing of requests from the linux request queue to the
351 * ccw queue.
352 */
Horst Hummel8f617012006-08-30 14:33:33 +0200353static int
Linus Torvalds1da177e2005-04-16 15:20:36 -0700354dasd_state_ready_to_online(struct dasd_device * device)
355{
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100356 int rc;
Stefan Weinhuber13018092009-01-09 12:14:50 +0100357 struct gendisk *disk;
358 struct disk_part_iter piter;
359 struct hd_struct *part;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100360
361 if (device->discipline->ready_to_online) {
362 rc = device->discipline->ready_to_online(device);
363 if (rc)
364 return rc;
365 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700366 device->state = DASD_STATE_ONLINE;
Stefan Weinhuber13018092009-01-09 12:14:50 +0100367 if (device->block) {
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100368 dasd_schedule_block_bh(device->block);
Stefan Weinhuber13018092009-01-09 12:14:50 +0100369 disk = device->block->bdev->bd_disk;
370 disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0);
371 while ((part = disk_part_iter_next(&piter)))
372 kobject_uevent(&part_to_dev(part)->kobj, KOBJ_CHANGE);
373 disk_part_iter_exit(&piter);
374 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700375 return 0;
376}
377
378/*
379 * Stop the requeueing of requests again.
380 */
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100381static int dasd_state_online_to_ready(struct dasd_device *device)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700382{
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100383 int rc;
Stefan Weinhuber13018092009-01-09 12:14:50 +0100384 struct gendisk *disk;
385 struct disk_part_iter piter;
386 struct hd_struct *part;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100387
388 if (device->discipline->online_to_ready) {
389 rc = device->discipline->online_to_ready(device);
390 if (rc)
391 return rc;
392 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700393 device->state = DASD_STATE_READY;
Stefan Weinhuber13018092009-01-09 12:14:50 +0100394 if (device->block) {
395 disk = device->block->bdev->bd_disk;
396 disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0);
397 while ((part = disk_part_iter_next(&piter)))
398 kobject_uevent(&part_to_dev(part)->kobj, KOBJ_CHANGE);
399 disk_part_iter_exit(&piter);
400 }
Horst Hummel8f617012006-08-30 14:33:33 +0200401 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700402}
403
404/*
405 * Device startup state changes.
406 */
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100407static int dasd_increase_state(struct dasd_device *device)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700408{
409 int rc;
410
411 rc = 0;
412 if (device->state == DASD_STATE_NEW &&
413 device->target >= DASD_STATE_KNOWN)
414 rc = dasd_state_new_to_known(device);
415
416 if (!rc &&
417 device->state == DASD_STATE_KNOWN &&
418 device->target >= DASD_STATE_BASIC)
419 rc = dasd_state_known_to_basic(device);
420
421 if (!rc &&
422 device->state == DASD_STATE_BASIC &&
423 device->target >= DASD_STATE_READY)
424 rc = dasd_state_basic_to_ready(device);
425
426 if (!rc &&
Horst Hummel39ccf952006-04-27 18:40:10 -0700427 device->state == DASD_STATE_UNFMT &&
428 device->target > DASD_STATE_UNFMT)
429 rc = -EPERM;
430
431 if (!rc &&
Linus Torvalds1da177e2005-04-16 15:20:36 -0700432 device->state == DASD_STATE_READY &&
433 device->target >= DASD_STATE_ONLINE)
434 rc = dasd_state_ready_to_online(device);
435
436 return rc;
437}
438
439/*
440 * Device shutdown state changes.
441 */
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100442static int dasd_decrease_state(struct dasd_device *device)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700443{
Horst Hummel8f617012006-08-30 14:33:33 +0200444 int rc;
445
446 rc = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700447 if (device->state == DASD_STATE_ONLINE &&
448 device->target <= DASD_STATE_READY)
Horst Hummel8f617012006-08-30 14:33:33 +0200449 rc = dasd_state_online_to_ready(device);
Horst Hummel138c0142006-06-29 14:58:12 +0200450
Horst Hummel8f617012006-08-30 14:33:33 +0200451 if (!rc &&
452 device->state == DASD_STATE_READY &&
Linus Torvalds1da177e2005-04-16 15:20:36 -0700453 device->target <= DASD_STATE_BASIC)
Horst Hummel8f617012006-08-30 14:33:33 +0200454 rc = dasd_state_ready_to_basic(device);
Horst Hummel90f00942006-03-07 21:55:39 -0800455
Horst Hummel8f617012006-08-30 14:33:33 +0200456 if (!rc &&
457 device->state == DASD_STATE_UNFMT &&
Horst Hummel90f00942006-03-07 21:55:39 -0800458 device->target <= DASD_STATE_BASIC)
Horst Hummel8f617012006-08-30 14:33:33 +0200459 rc = dasd_state_unfmt_to_basic(device);
Horst Hummel90f00942006-03-07 21:55:39 -0800460
Horst Hummel8f617012006-08-30 14:33:33 +0200461 if (!rc &&
462 device->state == DASD_STATE_BASIC &&
Linus Torvalds1da177e2005-04-16 15:20:36 -0700463 device->target <= DASD_STATE_KNOWN)
Horst Hummel8f617012006-08-30 14:33:33 +0200464 rc = dasd_state_basic_to_known(device);
Horst Hummel138c0142006-06-29 14:58:12 +0200465
Horst Hummel8f617012006-08-30 14:33:33 +0200466 if (!rc &&
467 device->state == DASD_STATE_KNOWN &&
Linus Torvalds1da177e2005-04-16 15:20:36 -0700468 device->target <= DASD_STATE_NEW)
Horst Hummel8f617012006-08-30 14:33:33 +0200469 rc = dasd_state_known_to_new(device);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700470
Horst Hummel8f617012006-08-30 14:33:33 +0200471 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700472}
473
474/*
475 * This is the main startup/shutdown routine.
476 */
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100477static void dasd_change_state(struct dasd_device *device)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700478{
Sebastian Ott181d9522009-06-22 12:08:21 +0200479 int rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700480
481 if (device->state == device->target)
482 /* Already where we want to go today... */
483 return;
484 if (device->state < device->target)
485 rc = dasd_increase_state(device);
486 else
487 rc = dasd_decrease_state(device);
Sebastian Ott181d9522009-06-22 12:08:21 +0200488 if (rc == -EAGAIN)
489 return;
490 if (rc)
491 device->target = device->state;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700492
Stefan Haberland9eb25122010-02-26 22:37:46 +0100493 if (device->state == device->target)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700494 wake_up(&dasd_init_waitq);
Horst Hummel4dfd5c42007-04-27 16:01:47 +0200495
496 /* let user-space know that the device status changed */
497 kobject_uevent(&device->cdev->dev.kobj, KOBJ_CHANGE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700498}
499
500/*
501 * Kick starter for devices that did not complete the startup/shutdown
502 * procedure or were sleeping because of a pending state.
503 * dasd_kick_device will schedule a call do do_kick_device to the kernel
504 * event daemon.
505 */
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100506static void do_kick_device(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700507{
Al Viro4927b3f2006-12-06 19:18:20 +0000508 struct dasd_device *device = container_of(work, struct dasd_device, kick_work);
Stefan Haberland9eb25122010-02-26 22:37:46 +0100509 mutex_lock(&device->state_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700510 dasd_change_state(device);
Stefan Haberland9eb25122010-02-26 22:37:46 +0100511 mutex_unlock(&device->state_mutex);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100512 dasd_schedule_device_bh(device);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700513 dasd_put_device(device);
514}
515
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100516void dasd_kick_device(struct dasd_device *device)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700517{
518 dasd_get_device(device);
519 /* queue call to dasd_kick_device to the kernel event daemon. */
520 schedule_work(&device->kick_work);
521}
522
523/*
Stefan Haberlandd41dd122009-06-16 10:30:25 +0200524 * dasd_restore_device will schedule a call do do_restore_device to the kernel
525 * event daemon.
526 */
527static void do_restore_device(struct work_struct *work)
528{
529 struct dasd_device *device = container_of(work, struct dasd_device,
530 restore_device);
531 device->cdev->drv->restore(device->cdev);
532 dasd_put_device(device);
533}
534
535void dasd_restore_device(struct dasd_device *device)
536{
537 dasd_get_device(device);
538 /* queue call to dasd_restore_device to the kernel event daemon. */
539 schedule_work(&device->restore_device);
540}
541
542/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700543 * Set the target state for a device and starts the state change.
544 */
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100545void dasd_set_target_state(struct dasd_device *device, int target)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700546{
Cornelia Huckf3445a12009-04-14 15:36:23 +0200547 dasd_get_device(device);
Stefan Haberland9eb25122010-02-26 22:37:46 +0100548 mutex_lock(&device->state_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700549 /* If we are in probeonly mode stop at DASD_STATE_READY. */
550 if (dasd_probeonly && target > DASD_STATE_READY)
551 target = DASD_STATE_READY;
552 if (device->target != target) {
Stefan Haberland9eb25122010-02-26 22:37:46 +0100553 if (device->state == target)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700554 wake_up(&dasd_init_waitq);
555 device->target = target;
556 }
557 if (device->state != device->target)
558 dasd_change_state(device);
Stefan Haberland9eb25122010-02-26 22:37:46 +0100559 mutex_unlock(&device->state_mutex);
560 dasd_put_device(device);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700561}
562
563/*
564 * Enable devices with device numbers in [from..to].
565 */
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100566static inline int _wait_for_device(struct dasd_device *device)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700567{
568 return (device->state == device->target);
569}
570
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100571void dasd_enable_device(struct dasd_device *device)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700572{
573 dasd_set_target_state(device, DASD_STATE_ONLINE);
574 if (device->state <= DASD_STATE_KNOWN)
575 /* No discipline for device found. */
576 dasd_set_target_state(device, DASD_STATE_NEW);
577 /* Now wait for the devices to come up. */
578 wait_event(dasd_init_waitq, _wait_for_device(device));
579}
580
581/*
582 * SECTION: device operation (interrupt handler, start i/o, term i/o ...)
583 */
584#ifdef CONFIG_DASD_PROFILE
585
586struct dasd_profile_info_t dasd_global_profile;
587unsigned int dasd_profile_level = DASD_PROFILE_OFF;
588
589/*
590 * Increments counter in global and local profiling structures.
591 */
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100592#define dasd_profile_counter(value, counter, block) \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700593{ \
594 int index; \
595 for (index = 0; index < 31 && value >> (2+index); index++); \
596 dasd_global_profile.counter[index]++; \
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100597 block->profile.counter[index]++; \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700598}
599
600/*
601 * Add profiling information for cqr before execution.
602 */
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100603static void dasd_profile_start(struct dasd_block *block,
604 struct dasd_ccw_req *cqr,
605 struct request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700606{
607 struct list_head *l;
608 unsigned int counter;
609
610 if (dasd_profile_level != DASD_PROFILE_ON)
611 return;
612
613 /* count the length of the chanq for statistics */
614 counter = 0;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100615 list_for_each(l, &block->ccw_queue)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700616 if (++counter >= 31)
617 break;
618 dasd_global_profile.dasd_io_nr_req[counter]++;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100619 block->profile.dasd_io_nr_req[counter]++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700620}
621
622/*
623 * Add profiling information for cqr after execution.
624 */
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100625static void dasd_profile_end(struct dasd_block *block,
626 struct dasd_ccw_req *cqr,
627 struct request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700628{
629 long strtime, irqtime, endtime, tottime; /* in microseconds */
630 long tottimeps, sectors;
631
632 if (dasd_profile_level != DASD_PROFILE_ON)
633 return;
634
Tejun Heo83096eb2009-05-07 22:24:39 +0900635 sectors = blk_rq_sectors(req);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700636 if (!cqr->buildclk || !cqr->startclk ||
637 !cqr->stopclk || !cqr->endclk ||
638 !sectors)
639 return;
640
641 strtime = ((cqr->startclk - cqr->buildclk) >> 12);
642 irqtime = ((cqr->stopclk - cqr->startclk) >> 12);
643 endtime = ((cqr->endclk - cqr->stopclk) >> 12);
644 tottime = ((cqr->endclk - cqr->buildclk) >> 12);
645 tottimeps = tottime / sectors;
646
647 if (!dasd_global_profile.dasd_io_reqs)
648 memset(&dasd_global_profile, 0,
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100649 sizeof(struct dasd_profile_info_t));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700650 dasd_global_profile.dasd_io_reqs++;
651 dasd_global_profile.dasd_io_sects += sectors;
652
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100653 if (!block->profile.dasd_io_reqs)
654 memset(&block->profile, 0,
655 sizeof(struct dasd_profile_info_t));
656 block->profile.dasd_io_reqs++;
657 block->profile.dasd_io_sects += sectors;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700658
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100659 dasd_profile_counter(sectors, dasd_io_secs, block);
660 dasd_profile_counter(tottime, dasd_io_times, block);
661 dasd_profile_counter(tottimeps, dasd_io_timps, block);
662 dasd_profile_counter(strtime, dasd_io_time1, block);
663 dasd_profile_counter(irqtime, dasd_io_time2, block);
664 dasd_profile_counter(irqtime / sectors, dasd_io_time2ps, block);
665 dasd_profile_counter(endtime, dasd_io_time3, block);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700666}
667#else
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100668#define dasd_profile_start(block, cqr, req) do {} while (0)
669#define dasd_profile_end(block, cqr, req) do {} while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700670#endif /* CONFIG_DASD_PROFILE */
671
672/*
673 * Allocate memory for a channel program with 'cplength' channel
674 * command words and 'datasize' additional space. There are two
675 * variantes: 1) dasd_kmalloc_request uses kmalloc to get the needed
676 * memory and 2) dasd_smalloc_request uses the static ccw memory
677 * that gets allocated for each device.
678 */
Stefan Haberland68b781f2009-09-11 10:28:29 +0200679struct dasd_ccw_req *dasd_kmalloc_request(int magic, int cplength,
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100680 int datasize,
681 struct dasd_device *device)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700682{
683 struct dasd_ccw_req *cqr;
684
685 /* Sanity checks */
Stefan Haberland68b781f2009-09-11 10:28:29 +0200686 BUG_ON(datasize > PAGE_SIZE ||
Eric Sesterhenn7ac1e872006-03-24 18:48:13 +0100687 (cplength*sizeof(struct ccw1)) > PAGE_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700688
Eric Sesterhenn88abaab2006-03-24 03:15:31 -0800689 cqr = kzalloc(sizeof(struct dasd_ccw_req), GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700690 if (cqr == NULL)
691 return ERR_PTR(-ENOMEM);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700692 cqr->cpaddr = NULL;
693 if (cplength > 0) {
Eric Sesterhenn88abaab2006-03-24 03:15:31 -0800694 cqr->cpaddr = kcalloc(cplength, sizeof(struct ccw1),
Linus Torvalds1da177e2005-04-16 15:20:36 -0700695 GFP_ATOMIC | GFP_DMA);
696 if (cqr->cpaddr == NULL) {
697 kfree(cqr);
698 return ERR_PTR(-ENOMEM);
699 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700700 }
701 cqr->data = NULL;
702 if (datasize > 0) {
Eric Sesterhenn88abaab2006-03-24 03:15:31 -0800703 cqr->data = kzalloc(datasize, GFP_ATOMIC | GFP_DMA);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700704 if (cqr->data == NULL) {
Jesper Juhl17fd6822005-11-07 01:01:30 -0800705 kfree(cqr->cpaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700706 kfree(cqr);
707 return ERR_PTR(-ENOMEM);
708 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700709 }
Stefan Haberland68b781f2009-09-11 10:28:29 +0200710 cqr->magic = magic;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700711 set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
712 dasd_get_device(device);
713 return cqr;
714}
715
Stefan Haberland68b781f2009-09-11 10:28:29 +0200716struct dasd_ccw_req *dasd_smalloc_request(int magic, int cplength,
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100717 int datasize,
718 struct dasd_device *device)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700719{
720 unsigned long flags;
721 struct dasd_ccw_req *cqr;
722 char *data;
723 int size;
724
725 /* Sanity checks */
Stefan Haberland68b781f2009-09-11 10:28:29 +0200726 BUG_ON(datasize > PAGE_SIZE ||
Eric Sesterhenn7ac1e872006-03-24 18:48:13 +0100727 (cplength*sizeof(struct ccw1)) > PAGE_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700728
729 size = (sizeof(struct dasd_ccw_req) + 7L) & -8L;
730 if (cplength > 0)
731 size += cplength * sizeof(struct ccw1);
732 if (datasize > 0)
733 size += datasize;
734 spin_lock_irqsave(&device->mem_lock, flags);
735 cqr = (struct dasd_ccw_req *)
736 dasd_alloc_chunk(&device->ccw_chunks, size);
737 spin_unlock_irqrestore(&device->mem_lock, flags);
738 if (cqr == NULL)
739 return ERR_PTR(-ENOMEM);
740 memset(cqr, 0, sizeof(struct dasd_ccw_req));
741 data = (char *) cqr + ((sizeof(struct dasd_ccw_req) + 7L) & -8L);
742 cqr->cpaddr = NULL;
743 if (cplength > 0) {
744 cqr->cpaddr = (struct ccw1 *) data;
745 data += cplength*sizeof(struct ccw1);
746 memset(cqr->cpaddr, 0, cplength*sizeof(struct ccw1));
747 }
748 cqr->data = NULL;
749 if (datasize > 0) {
750 cqr->data = data;
751 memset(cqr->data, 0, datasize);
752 }
Stefan Haberland68b781f2009-09-11 10:28:29 +0200753 cqr->magic = magic;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700754 set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
755 dasd_get_device(device);
756 return cqr;
757}
758
759/*
760 * Free memory of a channel program. This function needs to free all the
761 * idal lists that might have been created by dasd_set_cda and the
762 * struct dasd_ccw_req itself.
763 */
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100764void dasd_kfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700765{
Martin Schwidefsky347a8dc2006-01-06 00:19:28 -0800766#ifdef CONFIG_64BIT
Linus Torvalds1da177e2005-04-16 15:20:36 -0700767 struct ccw1 *ccw;
768
769 /* Clear any idals used for the request. */
770 ccw = cqr->cpaddr;
771 do {
772 clear_normalized_cda(ccw);
773 } while (ccw++->flags & (CCW_FLAG_CC | CCW_FLAG_DC));
774#endif
Jesper Juhl17fd6822005-11-07 01:01:30 -0800775 kfree(cqr->cpaddr);
776 kfree(cqr->data);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700777 kfree(cqr);
778 dasd_put_device(device);
779}
780
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100781void dasd_sfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700782{
783 unsigned long flags;
784
785 spin_lock_irqsave(&device->mem_lock, flags);
786 dasd_free_chunk(&device->ccw_chunks, cqr);
787 spin_unlock_irqrestore(&device->mem_lock, flags);
788 dasd_put_device(device);
789}
790
791/*
792 * Check discipline magic in cqr.
793 */
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100794static inline int dasd_check_cqr(struct dasd_ccw_req *cqr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700795{
796 struct dasd_device *device;
797
798 if (cqr == NULL)
799 return -EINVAL;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100800 device = cqr->startdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700801 if (strncmp((char *) &cqr->magic, device->discipline->ebcname, 4)) {
Stefan Haberlandfc19f382009-03-26 15:23:49 +0100802 DBF_DEV_EVENT(DBF_WARNING, device,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700803 " dasd_ccw_req 0x%08x magic doesn't match"
804 " discipline 0x%08x",
805 cqr->magic,
806 *(unsigned int *) device->discipline->name);
807 return -EINVAL;
808 }
809 return 0;
810}
811
812/*
813 * Terminate the current i/o and set the request to clear_pending.
814 * Timer keeps device runnig.
815 * ccw_device_clear can fail if the i/o subsystem
816 * is in a bad mood.
817 */
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100818int dasd_term_IO(struct dasd_ccw_req *cqr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700819{
820 struct dasd_device *device;
821 int retries, rc;
Stefan Haberlandfc19f382009-03-26 15:23:49 +0100822 char errorstring[ERRORLENGTH];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700823
824 /* Check the cqr */
825 rc = dasd_check_cqr(cqr);
826 if (rc)
827 return rc;
828 retries = 0;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100829 device = (struct dasd_device *) cqr->startdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700830 while ((retries < 5) && (cqr->status == DASD_CQR_IN_IO)) {
831 rc = ccw_device_clear(device->cdev, (long) cqr);
832 switch (rc) {
833 case 0: /* termination successful */
Horst Hummelc2ba4442006-02-01 03:06:37 -0800834 cqr->retries--;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100835 cqr->status = DASD_CQR_CLEAR_PENDING;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700836 cqr->stopclk = get_clock();
Horst Hummel8f617012006-08-30 14:33:33 +0200837 cqr->starttime = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700838 DBF_DEV_EVENT(DBF_DEBUG, device,
839 "terminate cqr %p successful",
840 cqr);
841 break;
842 case -ENODEV:
843 DBF_DEV_EVENT(DBF_ERR, device, "%s",
844 "device gone, retry");
845 break;
846 case -EIO:
847 DBF_DEV_EVENT(DBF_ERR, device, "%s",
848 "I/O error, retry");
849 break;
850 case -EINVAL:
851 case -EBUSY:
852 DBF_DEV_EVENT(DBF_ERR, device, "%s",
853 "device busy, retry later");
854 break;
855 default:
Stefan Haberlandfc19f382009-03-26 15:23:49 +0100856 /* internal error 10 - unknown rc*/
857 snprintf(errorstring, ERRORLENGTH, "10 %d", rc);
858 dev_err(&device->cdev->dev, "An error occurred in the "
859 "DASD device driver, reason=%s\n", errorstring);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700860 BUG();
861 break;
862 }
863 retries++;
864 }
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100865 dasd_schedule_device_bh(device);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700866 return rc;
867}
868
869/*
870 * Start the i/o. This start_IO can fail if the channel is really busy.
871 * In that case set up a timer to start the request later.
872 */
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100873int dasd_start_IO(struct dasd_ccw_req *cqr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700874{
875 struct dasd_device *device;
876 int rc;
Stefan Haberlandfc19f382009-03-26 15:23:49 +0100877 char errorstring[ERRORLENGTH];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700878
879 /* Check the cqr */
880 rc = dasd_check_cqr(cqr);
Stefan Weinhuber6cc7f162009-06-12 10:26:39 +0200881 if (rc) {
882 cqr->intrc = rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700883 return rc;
Stefan Weinhuber6cc7f162009-06-12 10:26:39 +0200884 }
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100885 device = (struct dasd_device *) cqr->startdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700886 if (cqr->retries < 0) {
Stefan Haberlandfc19f382009-03-26 15:23:49 +0100887 /* internal error 14 - start_IO run out of retries */
888 sprintf(errorstring, "14 %p", cqr);
889 dev_err(&device->cdev->dev, "An error occurred in the DASD "
890 "device driver, reason=%s\n", errorstring);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100891 cqr->status = DASD_CQR_ERROR;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700892 return -EIO;
893 }
894 cqr->startclk = get_clock();
895 cqr->starttime = jiffies;
896 cqr->retries--;
Stefan Weinhuberf3eb5382009-03-26 15:23:48 +0100897 if (cqr->cpmode == 1) {
898 rc = ccw_device_tm_start(device->cdev, cqr->cpaddr,
899 (long) cqr, cqr->lpm);
900 } else {
901 rc = ccw_device_start(device->cdev, cqr->cpaddr,
902 (long) cqr, cqr->lpm, 0);
903 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700904 switch (rc) {
905 case 0:
906 cqr->status = DASD_CQR_IN_IO;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700907 break;
908 case -EBUSY:
Stefan Haberlandfc19f382009-03-26 15:23:49 +0100909 DBF_DEV_EVENT(DBF_DEBUG, device, "%s",
Linus Torvalds1da177e2005-04-16 15:20:36 -0700910 "start_IO: device busy, retry later");
911 break;
912 case -ETIMEDOUT:
Stefan Haberlandfc19f382009-03-26 15:23:49 +0100913 DBF_DEV_EVENT(DBF_DEBUG, device, "%s",
Linus Torvalds1da177e2005-04-16 15:20:36 -0700914 "start_IO: request timeout, retry later");
915 break;
916 case -EACCES:
917 /* -EACCES indicates that the request used only a
918 * subset of the available pathes and all these
919 * pathes are gone.
920 * Do a retry with all available pathes.
921 */
922 cqr->lpm = LPM_ANYPATH;
Stefan Haberlandfc19f382009-03-26 15:23:49 +0100923 DBF_DEV_EVENT(DBF_DEBUG, device, "%s",
Linus Torvalds1da177e2005-04-16 15:20:36 -0700924 "start_IO: selected pathes gone,"
925 " retry on all pathes");
926 break;
927 case -ENODEV:
Stefan Weinhuberf3eb5382009-03-26 15:23:48 +0100928 DBF_DEV_EVENT(DBF_DEBUG, device, "%s",
929 "start_IO: -ENODEV device gone, retry");
930 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700931 case -EIO:
Stefan Haberlandfc19f382009-03-26 15:23:49 +0100932 DBF_DEV_EVENT(DBF_DEBUG, device, "%s",
Stefan Weinhuberf3eb5382009-03-26 15:23:48 +0100933 "start_IO: -EIO device gone, retry");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700934 break;
Stefan Haberlandd41dd122009-06-16 10:30:25 +0200935 case -EINVAL:
936 /* most likely caused in power management context */
937 DBF_DEV_EVENT(DBF_DEBUG, device, "%s",
938 "start_IO: -EINVAL device currently "
939 "not accessible");
940 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700941 default:
Stefan Haberlandfc19f382009-03-26 15:23:49 +0100942 /* internal error 11 - unknown rc */
943 snprintf(errorstring, ERRORLENGTH, "11 %d", rc);
944 dev_err(&device->cdev->dev,
945 "An error occurred in the DASD device driver, "
946 "reason=%s\n", errorstring);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700947 BUG();
948 break;
949 }
Stefan Weinhuber6cc7f162009-06-12 10:26:39 +0200950 cqr->intrc = rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700951 return rc;
952}
953
954/*
955 * Timeout function for dasd devices. This is used for different purposes
956 * 1) missing interrupt handler for normal operation
957 * 2) delayed start of request where start_IO failed with -EBUSY
958 * 3) timeout for missing state change interrupts
959 * The head of the ccw queue will have status DASD_CQR_IN_IO for 1),
960 * DASD_CQR_QUEUED for 2) and 3).
961 */
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100962static void dasd_device_timeout(unsigned long ptr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700963{
964 unsigned long flags;
965 struct dasd_device *device;
966
967 device = (struct dasd_device *) ptr;
968 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
969 /* re-activate request queue */
Stefan Weinhubereb6e1992009-12-07 12:51:51 +0100970 dasd_device_remove_stop_bits(device, DASD_STOPPED_PENDING);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700971 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100972 dasd_schedule_device_bh(device);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700973}
974
975/*
976 * Setup timeout for a device in jiffies.
977 */
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100978void dasd_device_set_timer(struct dasd_device *device, int expires)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700979{
Stefan Weinhuber48cae882009-02-11 10:37:31 +0100980 if (expires == 0)
981 del_timer(&device->timer);
982 else
983 mod_timer(&device->timer, jiffies + expires);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700984}
985
986/*
987 * Clear timeout for a device.
988 */
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100989void dasd_device_clear_timer(struct dasd_device *device)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700990{
Stefan Weinhuber48cae882009-02-11 10:37:31 +0100991 del_timer(&device->timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700992}
993
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100994static void dasd_handle_killed_request(struct ccw_device *cdev,
995 unsigned long intparm)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700996{
997 struct dasd_ccw_req *cqr;
998 struct dasd_device *device;
999
Stefan Weinhuberf16f5842008-05-15 16:52:36 +02001000 if (!intparm)
1001 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001002 cqr = (struct dasd_ccw_req *) intparm;
1003 if (cqr->status != DASD_CQR_IN_IO) {
Stefan Haberlandb8ed5dd2009-12-07 12:51:52 +01001004 DBF_EVENT_DEVID(DBF_DEBUG, cdev,
1005 "invalid status in handle_killed_request: "
1006 "%02x", cqr->status);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001007 return;
1008 }
1009
Stefan Haberland589c74d2010-02-26 22:37:47 +01001010 device = dasd_device_from_cdev_locked(cdev);
1011 if (IS_ERR(device)) {
1012 DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s",
1013 "unable to get device from cdev");
1014 return;
1015 }
1016
1017 if (!cqr->startdev ||
1018 device != cqr->startdev ||
1019 strncmp(cqr->startdev->discipline->ebcname,
1020 (char *) &cqr->magic, 4)) {
Stefan Haberland294001a2010-01-27 10:12:35 +01001021 DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s",
1022 "invalid device in request");
Stefan Haberland589c74d2010-02-26 22:37:47 +01001023 dasd_put_device(device);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001024 return;
1025 }
1026
1027 /* Schedule request to be retried. */
1028 cqr->status = DASD_CQR_QUEUED;
1029
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001030 dasd_device_clear_timer(device);
1031 dasd_schedule_device_bh(device);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001032 dasd_put_device(device);
1033}
1034
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001035void dasd_generic_handle_state_change(struct dasd_device *device)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001036{
Stefan Weinhuber20c64462006-03-24 03:15:25 -08001037 /* First of all start sense subsystem status request. */
1038 dasd_eer_snss(device);
1039
Stefan Weinhubereb6e1992009-12-07 12:51:51 +01001040 dasd_device_remove_stop_bits(device, DASD_STOPPED_PENDING);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001041 dasd_schedule_device_bh(device);
1042 if (device->block)
1043 dasd_schedule_block_bh(device->block);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001044}
1045
1046/*
1047 * Interrupt handler for "normal" ssch-io based dasd devices.
1048 */
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001049void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
1050 struct irb *irb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001051{
1052 struct dasd_ccw_req *cqr, *next;
1053 struct dasd_device *device;
1054 unsigned long long now;
1055 int expires;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001056
1057 if (IS_ERR(irb)) {
1058 switch (PTR_ERR(irb)) {
1059 case -EIO:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001060 break;
1061 case -ETIMEDOUT:
Stefan Haberlandb8ed5dd2009-12-07 12:51:52 +01001062 DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s: "
1063 "request timed out\n", __func__);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001064 break;
1065 default:
Stefan Haberlandb8ed5dd2009-12-07 12:51:52 +01001066 DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s: "
1067 "unknown error %ld\n", __func__,
1068 PTR_ERR(irb));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001069 }
Stefan Weinhuberf16f5842008-05-15 16:52:36 +02001070 dasd_handle_killed_request(cdev, intparm);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001071 return;
1072 }
1073
1074 now = get_clock();
1075
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001076 /* check for unsolicited interrupts */
1077 cqr = (struct dasd_ccw_req *) intparm;
Stefan Weinhuberf3eb5382009-03-26 15:23:48 +01001078 if (!cqr || ((scsw_cc(&irb->scsw) == 1) &&
1079 (scsw_fctl(&irb->scsw) & SCSW_FCTL_START_FUNC) &&
1080 (scsw_stctl(&irb->scsw) & SCSW_STCTL_STATUS_PEND))) {
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001081 if (cqr && cqr->status == DASD_CQR_IN_IO)
1082 cqr->status = DASD_CQR_QUEUED;
Martin Schwidefskya00bfd72006-09-20 15:59:05 +02001083 device = dasd_device_from_cdev_locked(cdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001084 if (!IS_ERR(device)) {
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001085 dasd_device_clear_timer(device);
1086 device->discipline->handle_unsolicited_interrupt(device,
1087 irb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001088 dasd_put_device(device);
1089 }
1090 return;
1091 }
1092
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001093 device = (struct dasd_device *) cqr->startdev;
1094 if (!device ||
Linus Torvalds1da177e2005-04-16 15:20:36 -07001095 strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) {
Stefan Haberland294001a2010-01-27 10:12:35 +01001096 DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s",
1097 "invalid device in request");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001098 return;
1099 }
1100
1101 /* Check for clear pending */
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001102 if (cqr->status == DASD_CQR_CLEAR_PENDING &&
Stefan Weinhuberf3eb5382009-03-26 15:23:48 +01001103 scsw_fctl(&irb->scsw) & SCSW_FCTL_CLEAR_FUNC) {
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001104 cqr->status = DASD_CQR_CLEARED;
1105 dasd_device_clear_timer(device);
Horst Hummel8f617012006-08-30 14:33:33 +02001106 wake_up(&dasd_flush_wq);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001107 dasd_schedule_device_bh(device);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001108 return;
1109 }
1110
Stefan Weinhuberf3eb5382009-03-26 15:23:48 +01001111 /* check status - the request might have been killed by dyn detach */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001112 if (cqr->status != DASD_CQR_IN_IO) {
Stefan Haberlandfc19f382009-03-26 15:23:49 +01001113 DBF_DEV_EVENT(DBF_DEBUG, device, "invalid status: bus_id %s, "
1114 "status %02x", dev_name(&cdev->dev), cqr->status);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001115 return;
1116 }
Stefan Haberlandfc19f382009-03-26 15:23:49 +01001117
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001118 next = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001119 expires = 0;
Stefan Weinhuberf3eb5382009-03-26 15:23:48 +01001120 if (scsw_dstat(&irb->scsw) == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) &&
1121 scsw_cstat(&irb->scsw) == 0) {
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001122 /* request was completed successfully */
1123 cqr->status = DASD_CQR_SUCCESS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001124 cqr->stopclk = now;
1125 /* Start first request on queue if possible -> fast_io. */
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001126 if (cqr->devlist.next != &device->ccw_queue) {
1127 next = list_entry(cqr->devlist.next,
1128 struct dasd_ccw_req, devlist);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001129 }
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001130 } else { /* error */
1131 memcpy(&cqr->irb, irb, sizeof(struct irb));
Stefan Haberlandfc19f382009-03-26 15:23:49 +01001132 /* log sense for every failed I/O to s390 debugfeature */
1133 dasd_log_sense_dbf(cqr, irb);
Horst Hummel9575bf22006-12-08 15:54:15 +01001134 if (device->features & DASD_FEATURE_ERPLOG) {
Horst Hummel9575bf22006-12-08 15:54:15 +01001135 dasd_log_sense(cqr, irb);
1136 }
Stefan Haberlandfc19f382009-03-26 15:23:49 +01001137
Stefan Haberland6c5f57c2008-02-05 16:50:46 +01001138 /*
1139 * If we don't want complex ERP for this request, then just
1140 * reset this and retry it in the fastpath
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001141 */
Stefan Haberland6c5f57c2008-02-05 16:50:46 +01001142 if (!test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags) &&
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001143 cqr->retries > 0) {
Stefan Haberlandfc19f382009-03-26 15:23:49 +01001144 if (cqr->lpm == LPM_ANYPATH)
1145 DBF_DEV_EVENT(DBF_DEBUG, device,
1146 "default ERP in fastpath "
1147 "(%i retries left)",
1148 cqr->retries);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001149 cqr->lpm = LPM_ANYPATH;
1150 cqr->status = DASD_CQR_QUEUED;
1151 next = cqr;
1152 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001153 cqr->status = DASD_CQR_ERROR;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001154 }
1155 if (next && (next->status == DASD_CQR_QUEUED) &&
1156 (!device->stopped)) {
1157 if (device->discipline->start_IO(next) == 0)
1158 expires = next->expires;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001159 }
1160 if (expires != 0)
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001161 dasd_device_set_timer(device, expires);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001162 else
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001163 dasd_device_clear_timer(device);
1164 dasd_schedule_device_bh(device);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001165}
1166
1167/*
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001168 * If we have an error on a dasd_block layer request then we cancel
1169 * and return all further requests from the same dasd_block as well.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001170 */
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001171static void __dasd_device_recovery(struct dasd_device *device,
1172 struct dasd_ccw_req *ref_cqr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001173{
1174 struct list_head *l, *n;
1175 struct dasd_ccw_req *cqr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001176
1177 /*
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001178 * only requeue request that came from the dasd_block layer
Linus Torvalds1da177e2005-04-16 15:20:36 -07001179 */
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001180 if (!ref_cqr->block)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001181 return;
Horst Hummelf24acd42005-05-01 08:58:59 -07001182
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001183 list_for_each_safe(l, n, &device->ccw_queue) {
1184 cqr = list_entry(l, struct dasd_ccw_req, devlist);
1185 if (cqr->status == DASD_CQR_QUEUED &&
1186 ref_cqr->block == cqr->block) {
1187 cqr->status = DASD_CQR_CLEARED;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001188 }
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001189 }
1190};
1191
1192/*
1193 * Remove those ccw requests from the queue that need to be returned
1194 * to the upper layer.
1195 */
1196static void __dasd_device_process_ccw_queue(struct dasd_device *device,
1197 struct list_head *final_queue)
1198{
1199 struct list_head *l, *n;
1200 struct dasd_ccw_req *cqr;
1201
1202 /* Process request with final status. */
1203 list_for_each_safe(l, n, &device->ccw_queue) {
1204 cqr = list_entry(l, struct dasd_ccw_req, devlist);
1205
1206 /* Stop list processing at the first non-final request. */
1207 if (cqr->status == DASD_CQR_QUEUED ||
1208 cqr->status == DASD_CQR_IN_IO ||
1209 cqr->status == DASD_CQR_CLEAR_PENDING)
1210 break;
1211 if (cqr->status == DASD_CQR_ERROR) {
1212 __dasd_device_recovery(device, cqr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001213 }
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001214 /* Rechain finished requests to final queue */
1215 list_move_tail(&cqr->devlist, final_queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001216 }
1217}
1218
1219/*
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001220 * the cqrs from the final queue are returned to the upper layer
1221 * by setting a dasd_block state and calling the callback function
1222 */
1223static void __dasd_device_process_final_queue(struct dasd_device *device,
1224 struct list_head *final_queue)
1225{
1226 struct list_head *l, *n;
1227 struct dasd_ccw_req *cqr;
Stefan Weinhuber03513bc2008-02-19 15:29:27 +01001228 struct dasd_block *block;
Stefan Haberlandc80ee722008-05-30 10:03:31 +02001229 void (*callback)(struct dasd_ccw_req *, void *data);
1230 void *callback_data;
Stefan Haberlandfc19f382009-03-26 15:23:49 +01001231 char errorstring[ERRORLENGTH];
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001232
1233 list_for_each_safe(l, n, final_queue) {
1234 cqr = list_entry(l, struct dasd_ccw_req, devlist);
1235 list_del_init(&cqr->devlist);
Stefan Weinhuber03513bc2008-02-19 15:29:27 +01001236 block = cqr->block;
Stefan Haberlandc80ee722008-05-30 10:03:31 +02001237 callback = cqr->callback;
1238 callback_data = cqr->callback_data;
Stefan Weinhuber03513bc2008-02-19 15:29:27 +01001239 if (block)
1240 spin_lock_bh(&block->queue_lock);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001241 switch (cqr->status) {
1242 case DASD_CQR_SUCCESS:
1243 cqr->status = DASD_CQR_DONE;
1244 break;
1245 case DASD_CQR_ERROR:
1246 cqr->status = DASD_CQR_NEED_ERP;
1247 break;
1248 case DASD_CQR_CLEARED:
1249 cqr->status = DASD_CQR_TERMINATED;
1250 break;
1251 default:
Stefan Haberlandfc19f382009-03-26 15:23:49 +01001252 /* internal error 12 - wrong cqr status*/
1253 snprintf(errorstring, ERRORLENGTH, "12 %p %x02", cqr, cqr->status);
1254 dev_err(&device->cdev->dev,
1255 "An error occurred in the DASD device driver, "
1256 "reason=%s\n", errorstring);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001257 BUG();
1258 }
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001259 if (cqr->callback != NULL)
Stefan Haberlandc80ee722008-05-30 10:03:31 +02001260 (callback)(cqr, callback_data);
Stefan Weinhuber03513bc2008-02-19 15:29:27 +01001261 if (block)
1262 spin_unlock_bh(&block->queue_lock);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001263 }
1264}
1265
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001266/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001267 * Take a look at the first request on the ccw queue and check
1268 * if it reached its expire time. If so, terminate the IO.
1269 */
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001270static void __dasd_device_check_expire(struct dasd_device *device)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001271{
1272 struct dasd_ccw_req *cqr;
1273
1274 if (list_empty(&device->ccw_queue))
1275 return;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001276 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist);
Horst Hummel29145a62006-12-04 15:40:15 +01001277 if ((cqr->status == DASD_CQR_IN_IO && cqr->expires != 0) &&
1278 (time_after_eq(jiffies, cqr->expires + cqr->starttime))) {
1279 if (device->discipline->term_IO(cqr) != 0) {
1280 /* Hmpf, try again in 5 sec */
Stefan Haberlandfc19f382009-03-26 15:23:49 +01001281 dev_err(&device->cdev->dev,
1282 "cqr %p timed out (%is) but cannot be "
1283 "ended, retrying in 5 s\n",
1284 cqr, (cqr->expires/HZ));
Stefan Haberland7dc1da92008-01-26 14:11:26 +01001285 cqr->expires += 5*HZ;
1286 dasd_device_set_timer(device, 5*HZ);
Horst Hummel29145a62006-12-04 15:40:15 +01001287 } else {
Stefan Haberlandfc19f382009-03-26 15:23:49 +01001288 dev_err(&device->cdev->dev,
1289 "cqr %p timed out (%is), %i retries "
1290 "remaining\n", cqr, (cqr->expires/HZ),
1291 cqr->retries);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001292 }
1293 }
1294}
1295
1296/*
1297 * Take a look at the first request on the ccw queue and check
1298 * if it needs to be started.
1299 */
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001300static void __dasd_device_start_head(struct dasd_device *device)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001301{
1302 struct dasd_ccw_req *cqr;
1303 int rc;
1304
1305 if (list_empty(&device->ccw_queue))
1306 return;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001307 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist);
Peter Oberparleiter25ee4cf2006-04-10 22:53:47 -07001308 if (cqr->status != DASD_CQR_QUEUED)
1309 return;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001310 /* when device is stopped, return request to previous layer */
1311 if (device->stopped) {
1312 cqr->status = DASD_CQR_CLEARED;
1313 dasd_schedule_device_bh(device);
Peter Oberparleiter25ee4cf2006-04-10 22:53:47 -07001314 return;
Horst Hummel1c01b8a2006-01-06 00:19:15 -08001315 }
Peter Oberparleiter25ee4cf2006-04-10 22:53:47 -07001316
1317 rc = device->discipline->start_IO(cqr);
1318 if (rc == 0)
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001319 dasd_device_set_timer(device, cqr->expires);
Peter Oberparleiter25ee4cf2006-04-10 22:53:47 -07001320 else if (rc == -EACCES) {
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001321 dasd_schedule_device_bh(device);
Peter Oberparleiter25ee4cf2006-04-10 22:53:47 -07001322 } else
1323 /* Hmpf, try again in 1/2 sec */
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001324 dasd_device_set_timer(device, 50);
Horst Hummel8f617012006-08-30 14:33:33 +02001325}
1326
Linus Torvalds1da177e2005-04-16 15:20:36 -07001327/*
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001328 * Go through all request on the dasd_device request queue,
1329 * terminate them on the cdev if necessary, and return them to the
1330 * submitting layer via callback.
1331 * Note:
1332 * Make sure that all 'submitting layers' still exist when
1333 * this function is called!. In other words, when 'device' is a base
1334 * device then all block layer requests must have been removed before
1335 * via dasd_flush_block_queue.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001336 */
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001337int dasd_flush_device_queue(struct dasd_device *device)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001338{
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001339 struct dasd_ccw_req *cqr, *n;
1340 int rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001341 struct list_head flush_queue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001342
1343 INIT_LIST_HEAD(&flush_queue);
1344 spin_lock_irq(get_ccwdev_lock(device->cdev));
Horst Hummel8f617012006-08-30 14:33:33 +02001345 rc = 0;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001346 list_for_each_entry_safe(cqr, n, &device->ccw_queue, devlist) {
Horst Hummel8f617012006-08-30 14:33:33 +02001347 /* Check status and move request to flush_queue */
1348 switch (cqr->status) {
1349 case DASD_CQR_IN_IO:
1350 rc = device->discipline->term_IO(cqr);
1351 if (rc) {
1352 /* unable to terminate requeust */
Stefan Haberlandfc19f382009-03-26 15:23:49 +01001353 dev_err(&device->cdev->dev,
1354 "Flushing the DASD request queue "
1355 "failed for request %p\n", cqr);
Horst Hummel8f617012006-08-30 14:33:33 +02001356 /* stop flush processing */
1357 goto finished;
1358 }
1359 break;
1360 case DASD_CQR_QUEUED:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001361 cqr->stopclk = get_clock();
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001362 cqr->status = DASD_CQR_CLEARED;
Horst Hummel8f617012006-08-30 14:33:33 +02001363 break;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001364 default: /* no need to modify the others */
Horst Hummel8f617012006-08-30 14:33:33 +02001365 break;
1366 }
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001367 list_move_tail(&cqr->devlist, &flush_queue);
Horst Hummel8f617012006-08-30 14:33:33 +02001368 }
Horst Hummel8f617012006-08-30 14:33:33 +02001369finished:
1370 spin_unlock_irq(get_ccwdev_lock(device->cdev));
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001371 /*
1372 * After this point all requests must be in state CLEAR_PENDING,
1373 * CLEARED, SUCCESS or ERROR. Now wait for CLEAR_PENDING to become
1374 * one of the others.
1375 */
1376 list_for_each_entry_safe(cqr, n, &flush_queue, devlist)
1377 wait_event(dasd_flush_wq,
1378 (cqr->status != DASD_CQR_CLEAR_PENDING));
1379 /*
1380 * Now set each request back to TERMINATED, DONE or NEED_ERP
1381 * and call the callback function of flushed requests
1382 */
1383 __dasd_device_process_final_queue(device, &flush_queue);
Horst Hummel8f617012006-08-30 14:33:33 +02001384 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001385}
1386
1387/*
1388 * Acquire the device lock and process queues for the device.
1389 */
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001390static void dasd_device_tasklet(struct dasd_device *device)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001391{
1392 struct list_head final_queue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001393
1394 atomic_set (&device->tasklet_scheduled, 0);
1395 INIT_LIST_HEAD(&final_queue);
1396 spin_lock_irq(get_ccwdev_lock(device->cdev));
1397 /* Check expire time of first request on the ccw queue. */
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001398 __dasd_device_check_expire(device);
1399 /* find final requests on ccw queue */
1400 __dasd_device_process_ccw_queue(device, &final_queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001401 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1402 /* Now call the callback function of requests with final status */
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001403 __dasd_device_process_final_queue(device, &final_queue);
1404 spin_lock_irq(get_ccwdev_lock(device->cdev));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001405 /* Now check if the head of the ccw queue needs to be started. */
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001406 __dasd_device_start_head(device);
1407 spin_unlock_irq(get_ccwdev_lock(device->cdev));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001408 dasd_put_device(device);
1409}
1410
1411/*
1412 * Schedules a call to dasd_tasklet over the device tasklet.
1413 */
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001414void dasd_schedule_device_bh(struct dasd_device *device)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001415{
1416 /* Protect against rescheduling. */
Martin Schwidefsky973bd992006-01-06 00:19:07 -08001417 if (atomic_cmpxchg (&device->tasklet_scheduled, 0, 1) != 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001418 return;
1419 dasd_get_device(device);
1420 tasklet_hi_schedule(&device->tasklet);
1421}
1422
Stefan Weinhubereb6e1992009-12-07 12:51:51 +01001423void dasd_device_set_stop_bits(struct dasd_device *device, int bits)
1424{
1425 device->stopped |= bits;
1426}
1427EXPORT_SYMBOL_GPL(dasd_device_set_stop_bits);
1428
1429void dasd_device_remove_stop_bits(struct dasd_device *device, int bits)
1430{
1431 device->stopped &= ~bits;
1432 if (!device->stopped)
1433 wake_up(&generic_waitq);
1434}
1435EXPORT_SYMBOL_GPL(dasd_device_remove_stop_bits);
1436
Linus Torvalds1da177e2005-04-16 15:20:36 -07001437/*
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001438 * Queue a request to the head of the device ccw_queue.
1439 * Start the I/O if possible.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001440 */
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001441void dasd_add_request_head(struct dasd_ccw_req *cqr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001442{
1443 struct dasd_device *device;
1444 unsigned long flags;
1445
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001446 device = cqr->startdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001447 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001448 cqr->status = DASD_CQR_QUEUED;
1449 list_add(&cqr->devlist, &device->ccw_queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001450 /* let the bh start the request to keep them in order */
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001451 dasd_schedule_device_bh(device);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001452 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
1453}
1454
1455/*
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001456 * Queue a request to the tail of the device ccw_queue.
1457 * Start the I/O if possible.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001458 */
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001459void dasd_add_request_tail(struct dasd_ccw_req *cqr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001460{
1461 struct dasd_device *device;
1462 unsigned long flags;
1463
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001464 device = cqr->startdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001465 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001466 cqr->status = DASD_CQR_QUEUED;
1467 list_add_tail(&cqr->devlist, &device->ccw_queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001468 /* let the bh start the request to keep them in order */
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001469 dasd_schedule_device_bh(device);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001470 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
1471}
1472
1473/*
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001474 * Wakeup helper for the 'sleep_on' functions.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001475 */
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001476static void dasd_wakeup_cb(struct dasd_ccw_req *cqr, void *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001477{
Stefan Weinhuber1c1e0932010-05-12 09:32:11 +02001478 spin_lock_irq(get_ccwdev_lock(cqr->startdev->cdev));
1479 cqr->callback_data = DASD_SLEEPON_END_TAG;
1480 spin_unlock_irq(get_ccwdev_lock(cqr->startdev->cdev));
1481 wake_up(&generic_waitq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001482}
1483
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001484static inline int _wait_for_wakeup(struct dasd_ccw_req *cqr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001485{
1486 struct dasd_device *device;
1487 int rc;
1488
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001489 device = cqr->startdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001490 spin_lock_irq(get_ccwdev_lock(device->cdev));
Stefan Weinhuber1c1e0932010-05-12 09:32:11 +02001491 rc = (cqr->callback_data == DASD_SLEEPON_END_TAG);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001492 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1493 return rc;
1494}
1495
1496/*
Stefan Weinhubereb6e1992009-12-07 12:51:51 +01001497 * checks if error recovery is necessary, returns 1 if yes, 0 otherwise.
1498 */
1499static int __dasd_sleep_on_erp(struct dasd_ccw_req *cqr)
1500{
1501 struct dasd_device *device;
1502 dasd_erp_fn_t erp_fn;
1503
1504 if (cqr->status == DASD_CQR_FILLED)
1505 return 0;
1506 device = cqr->startdev;
1507 if (test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags)) {
1508 if (cqr->status == DASD_CQR_TERMINATED) {
1509 device->discipline->handle_terminated_request(cqr);
1510 return 1;
1511 }
1512 if (cqr->status == DASD_CQR_NEED_ERP) {
1513 erp_fn = device->discipline->erp_action(cqr);
1514 erp_fn(cqr);
1515 return 1;
1516 }
1517 if (cqr->status == DASD_CQR_FAILED)
1518 dasd_log_sense(cqr, &cqr->irb);
1519 if (cqr->refers) {
1520 __dasd_process_erp(device, cqr);
1521 return 1;
1522 }
1523 }
1524 return 0;
1525}
1526
1527static int __dasd_sleep_on_loop_condition(struct dasd_ccw_req *cqr)
1528{
1529 if (test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags)) {
1530 if (cqr->refers) /* erp is not done yet */
1531 return 1;
1532 return ((cqr->status != DASD_CQR_DONE) &&
1533 (cqr->status != DASD_CQR_FAILED));
1534 } else
1535 return (cqr->status == DASD_CQR_FILLED);
1536}
1537
1538static int _dasd_sleep_on(struct dasd_ccw_req *maincqr, int interruptible)
1539{
1540 struct dasd_device *device;
1541 int rc;
1542 struct list_head ccw_queue;
1543 struct dasd_ccw_req *cqr;
1544
1545 INIT_LIST_HEAD(&ccw_queue);
1546 maincqr->status = DASD_CQR_FILLED;
1547 device = maincqr->startdev;
1548 list_add(&maincqr->blocklist, &ccw_queue);
1549 for (cqr = maincqr; __dasd_sleep_on_loop_condition(cqr);
1550 cqr = list_first_entry(&ccw_queue,
1551 struct dasd_ccw_req, blocklist)) {
1552
1553 if (__dasd_sleep_on_erp(cqr))
1554 continue;
1555 if (cqr->status != DASD_CQR_FILLED) /* could be failed */
1556 continue;
1557
1558 /* Non-temporary stop condition will trigger fail fast */
1559 if (device->stopped & ~DASD_STOPPED_PENDING &&
1560 test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) &&
1561 (!dasd_eer_enabled(device))) {
1562 cqr->status = DASD_CQR_FAILED;
1563 continue;
1564 }
1565
1566 /* Don't try to start requests if device is stopped */
1567 if (interruptible) {
1568 rc = wait_event_interruptible(
1569 generic_waitq, !(device->stopped));
1570 if (rc == -ERESTARTSYS) {
1571 cqr->status = DASD_CQR_FAILED;
1572 maincqr->intrc = rc;
1573 continue;
1574 }
1575 } else
1576 wait_event(generic_waitq, !(device->stopped));
1577
1578 cqr->callback = dasd_wakeup_cb;
Stefan Weinhuber1c1e0932010-05-12 09:32:11 +02001579 cqr->callback_data = DASD_SLEEPON_START_TAG;
Stefan Weinhubereb6e1992009-12-07 12:51:51 +01001580 dasd_add_request_tail(cqr);
1581 if (interruptible) {
1582 rc = wait_event_interruptible(
1583 generic_waitq, _wait_for_wakeup(cqr));
1584 if (rc == -ERESTARTSYS) {
1585 dasd_cancel_req(cqr);
1586 /* wait (non-interruptible) for final status */
1587 wait_event(generic_waitq,
1588 _wait_for_wakeup(cqr));
1589 cqr->status = DASD_CQR_FAILED;
1590 maincqr->intrc = rc;
1591 continue;
1592 }
1593 } else
1594 wait_event(generic_waitq, _wait_for_wakeup(cqr));
1595 }
1596
1597 maincqr->endclk = get_clock();
1598 if ((maincqr->status != DASD_CQR_DONE) &&
1599 (maincqr->intrc != -ERESTARTSYS))
1600 dasd_log_sense(maincqr, &maincqr->irb);
1601 if (maincqr->status == DASD_CQR_DONE)
1602 rc = 0;
1603 else if (maincqr->intrc)
1604 rc = maincqr->intrc;
1605 else
1606 rc = -EIO;
1607 return rc;
1608}
1609
1610/*
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001611 * Queue a request to the tail of the device ccw_queue and wait for
1612 * it's completion.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001613 */
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001614int dasd_sleep_on(struct dasd_ccw_req *cqr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001615{
Stefan Weinhubereb6e1992009-12-07 12:51:51 +01001616 return _dasd_sleep_on(cqr, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001617}
1618
1619/*
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001620 * Queue a request to the tail of the device ccw_queue and wait
1621 * interruptible for it's completion.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001622 */
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001623int dasd_sleep_on_interruptible(struct dasd_ccw_req *cqr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001624{
Stefan Weinhubereb6e1992009-12-07 12:51:51 +01001625 return _dasd_sleep_on(cqr, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001626}
1627
1628/*
1629 * Whoa nelly now it gets really hairy. For some functions (e.g. steal lock
1630 * for eckd devices) the currently running request has to be terminated
1631 * and be put back to status queued, before the special request is added
1632 * to the head of the queue. Then the special request is waited on normally.
1633 */
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001634static inline int _dasd_term_running_cqr(struct dasd_device *device)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001635{
1636 struct dasd_ccw_req *cqr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001637
1638 if (list_empty(&device->ccw_queue))
1639 return 0;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001640 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist);
Horst Hummel8f617012006-08-30 14:33:33 +02001641 return device->discipline->term_IO(cqr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001642}
1643
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001644int dasd_sleep_on_immediatly(struct dasd_ccw_req *cqr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001645{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001646 struct dasd_device *device;
1647 int rc;
Horst Hummel138c0142006-06-29 14:58:12 +02001648
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001649 device = cqr->startdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001650 spin_lock_irq(get_ccwdev_lock(device->cdev));
1651 rc = _dasd_term_running_cqr(device);
1652 if (rc) {
1653 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1654 return rc;
1655 }
Horst Hummel138c0142006-06-29 14:58:12 +02001656
Linus Torvalds1da177e2005-04-16 15:20:36 -07001657 cqr->callback = dasd_wakeup_cb;
Stefan Weinhuber1c1e0932010-05-12 09:32:11 +02001658 cqr->callback_data = DASD_SLEEPON_START_TAG;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001659 cqr->status = DASD_CQR_QUEUED;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001660 list_add(&cqr->devlist, &device->ccw_queue);
Horst Hummel138c0142006-06-29 14:58:12 +02001661
Linus Torvalds1da177e2005-04-16 15:20:36 -07001662 /* let the bh start the request to keep them in order */
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001663 dasd_schedule_device_bh(device);
Horst Hummel138c0142006-06-29 14:58:12 +02001664
Linus Torvalds1da177e2005-04-16 15:20:36 -07001665 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1666
Stefan Haberlandc80ee722008-05-30 10:03:31 +02001667 wait_event(generic_waitq, _wait_for_wakeup(cqr));
Horst Hummel138c0142006-06-29 14:58:12 +02001668
Stefan Weinhuber6cc7f162009-06-12 10:26:39 +02001669 if (cqr->status == DASD_CQR_DONE)
1670 rc = 0;
1671 else if (cqr->intrc)
1672 rc = cqr->intrc;
1673 else
1674 rc = -EIO;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001675 return rc;
1676}
1677
1678/*
1679 * Cancels a request that was started with dasd_sleep_on_req.
1680 * This is useful to timeout requests. The request will be
1681 * terminated if it is currently in i/o.
1682 * Returns 1 if the request has been terminated.
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001683 * 0 if there was no need to terminate the request (not started yet)
1684 * negative error code if termination failed
1685 * Cancellation of a request is an asynchronous operation! The calling
1686 * function has to wait until the request is properly returned via callback.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001687 */
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001688int dasd_cancel_req(struct dasd_ccw_req *cqr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001689{
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001690 struct dasd_device *device = cqr->startdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001691 unsigned long flags;
1692 int rc;
1693
1694 rc = 0;
1695 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
1696 switch (cqr->status) {
1697 case DASD_CQR_QUEUED:
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001698 /* request was not started - just set to cleared */
1699 cqr->status = DASD_CQR_CLEARED;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001700 break;
1701 case DASD_CQR_IN_IO:
1702 /* request in IO - terminate IO and release again */
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001703 rc = device->discipline->term_IO(cqr);
1704 if (rc) {
Stefan Haberlandfc19f382009-03-26 15:23:49 +01001705 dev_err(&device->cdev->dev,
1706 "Cancelling request %p failed with rc=%d\n",
1707 cqr, rc);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001708 } else {
1709 cqr->stopclk = get_clock();
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001710 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001711 break;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001712 default: /* already finished or clear pending - do nothing */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001713 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001714 }
1715 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001716 dasd_schedule_device_bh(device);
1717 return rc;
1718}
1719
1720
1721/*
1722 * SECTION: Operations of the dasd_block layer.
1723 */
1724
1725/*
1726 * Timeout function for dasd_block. This is used when the block layer
1727 * is waiting for something that may not come reliably, (e.g. a state
1728 * change interrupt)
1729 */
1730static void dasd_block_timeout(unsigned long ptr)
1731{
1732 unsigned long flags;
1733 struct dasd_block *block;
1734
1735 block = (struct dasd_block *) ptr;
1736 spin_lock_irqsave(get_ccwdev_lock(block->base->cdev), flags);
1737 /* re-activate request queue */
Stefan Weinhubereb6e1992009-12-07 12:51:51 +01001738 dasd_device_remove_stop_bits(block->base, DASD_STOPPED_PENDING);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001739 spin_unlock_irqrestore(get_ccwdev_lock(block->base->cdev), flags);
1740 dasd_schedule_block_bh(block);
1741}
1742
1743/*
1744 * Setup timeout for a dasd_block in jiffies.
1745 */
1746void dasd_block_set_timer(struct dasd_block *block, int expires)
1747{
Stefan Weinhuber48cae882009-02-11 10:37:31 +01001748 if (expires == 0)
1749 del_timer(&block->timer);
1750 else
1751 mod_timer(&block->timer, jiffies + expires);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001752}
1753
1754/*
1755 * Clear timeout for a dasd_block.
1756 */
1757void dasd_block_clear_timer(struct dasd_block *block)
1758{
Stefan Weinhuber48cae882009-02-11 10:37:31 +01001759 del_timer(&block->timer);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001760}
1761
1762/*
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001763 * Process finished error recovery ccw.
1764 */
Stefan Weinhubereb6e1992009-12-07 12:51:51 +01001765static void __dasd_process_erp(struct dasd_device *device,
1766 struct dasd_ccw_req *cqr)
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001767{
1768 dasd_erp_fn_t erp_fn;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001769
1770 if (cqr->status == DASD_CQR_DONE)
1771 DBF_DEV_EVENT(DBF_NOTICE, device, "%s", "ERP successful");
1772 else
Stefan Haberlandfc19f382009-03-26 15:23:49 +01001773 dev_err(&device->cdev->dev, "ERP failed for the DASD\n");
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001774 erp_fn = device->discipline->erp_postaction(cqr);
1775 erp_fn(cqr);
1776}
1777
1778/*
1779 * Fetch requests from the block device queue.
1780 */
1781static void __dasd_process_request_queue(struct dasd_block *block)
1782{
1783 struct request_queue *queue;
1784 struct request *req;
1785 struct dasd_ccw_req *cqr;
1786 struct dasd_device *basedev;
1787 unsigned long flags;
1788 queue = block->request_queue;
1789 basedev = block->base;
1790 /* No queue ? Then there is nothing to do. */
1791 if (queue == NULL)
1792 return;
1793
1794 /*
1795 * We requeue request from the block device queue to the ccw
1796 * queue only in two states. In state DASD_STATE_READY the
1797 * partition detection is done and we need to requeue requests
1798 * for that. State DASD_STATE_ONLINE is normal block device
1799 * operation.
1800 */
Stefan Weinhuber97f604b2009-09-11 10:28:28 +02001801 if (basedev->state < DASD_STATE_READY) {
1802 while ((req = blk_fetch_request(block->request_queue)))
1803 __blk_end_request_all(req, -EIO);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001804 return;
Stefan Weinhuber97f604b2009-09-11 10:28:28 +02001805 }
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001806 /* Now we try to fetch requests from the request queue */
Tejun Heo9934c8c2009-05-08 11:54:16 +09001807 while (!blk_queue_plugged(queue) && (req = blk_peek_request(queue))) {
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001808 if (basedev->features & DASD_FEATURE_READONLY &&
1809 rq_data_dir(req) == WRITE) {
1810 DBF_DEV_EVENT(DBF_ERR, basedev,
1811 "Rejecting write request %p",
1812 req);
Tejun Heo9934c8c2009-05-08 11:54:16 +09001813 blk_start_request(req);
Tejun Heo40cbbb72009-04-23 11:05:19 +09001814 __blk_end_request_all(req, -EIO);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001815 continue;
1816 }
1817 cqr = basedev->discipline->build_cp(basedev, block, req);
1818 if (IS_ERR(cqr)) {
1819 if (PTR_ERR(cqr) == -EBUSY)
1820 break; /* normal end condition */
1821 if (PTR_ERR(cqr) == -ENOMEM)
1822 break; /* terminate request queue loop */
1823 if (PTR_ERR(cqr) == -EAGAIN) {
1824 /*
1825 * The current request cannot be build right
1826 * now, we have to try later. If this request
1827 * is the head-of-queue we stop the device
1828 * for 1/2 second.
1829 */
1830 if (!list_empty(&block->ccw_queue))
1831 break;
Stefan Weinhubereb6e1992009-12-07 12:51:51 +01001832 spin_lock_irqsave(
1833 get_ccwdev_lock(basedev->cdev), flags);
1834 dasd_device_set_stop_bits(basedev,
1835 DASD_STOPPED_PENDING);
1836 spin_unlock_irqrestore(
1837 get_ccwdev_lock(basedev->cdev), flags);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001838 dasd_block_set_timer(block, HZ/2);
1839 break;
1840 }
1841 DBF_DEV_EVENT(DBF_ERR, basedev,
1842 "CCW creation failed (rc=%ld) "
1843 "on request %p",
1844 PTR_ERR(cqr), req);
Tejun Heo9934c8c2009-05-08 11:54:16 +09001845 blk_start_request(req);
Tejun Heo40cbbb72009-04-23 11:05:19 +09001846 __blk_end_request_all(req, -EIO);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001847 continue;
1848 }
1849 /*
1850 * Note: callback is set to dasd_return_cqr_cb in
1851 * __dasd_block_start_head to cover erp requests as well
1852 */
1853 cqr->callback_data = (void *) req;
1854 cqr->status = DASD_CQR_FILLED;
Tejun Heo9934c8c2009-05-08 11:54:16 +09001855 blk_start_request(req);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001856 list_add_tail(&cqr->blocklist, &block->ccw_queue);
1857 dasd_profile_start(block, cqr, req);
1858 }
1859}
1860
1861static void __dasd_cleanup_cqr(struct dasd_ccw_req *cqr)
1862{
1863 struct request *req;
1864 int status;
Kiyoshi Ueda4c4e2142008-01-28 10:29:42 +01001865 int error = 0;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001866
1867 req = (struct request *) cqr->callback_data;
1868 dasd_profile_end(cqr->block, cqr, req);
Stefan Weinhuberfe6b8e72008-02-05 16:50:47 +01001869 status = cqr->block->base->discipline->free_cp(cqr, req);
Kiyoshi Ueda4c4e2142008-01-28 10:29:42 +01001870 if (status <= 0)
1871 error = status ? status : -EIO;
Tejun Heo40cbbb72009-04-23 11:05:19 +09001872 __blk_end_request_all(req, error);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001873}
1874
1875/*
1876 * Process ccw request queue.
1877 */
1878static void __dasd_process_block_ccw_queue(struct dasd_block *block,
1879 struct list_head *final_queue)
1880{
1881 struct list_head *l, *n;
1882 struct dasd_ccw_req *cqr;
1883 dasd_erp_fn_t erp_fn;
1884 unsigned long flags;
1885 struct dasd_device *base = block->base;
1886
1887restart:
1888 /* Process request with final status. */
1889 list_for_each_safe(l, n, &block->ccw_queue) {
1890 cqr = list_entry(l, struct dasd_ccw_req, blocklist);
1891 if (cqr->status != DASD_CQR_DONE &&
1892 cqr->status != DASD_CQR_FAILED &&
1893 cqr->status != DASD_CQR_NEED_ERP &&
1894 cqr->status != DASD_CQR_TERMINATED)
1895 continue;
1896
1897 if (cqr->status == DASD_CQR_TERMINATED) {
1898 base->discipline->handle_terminated_request(cqr);
1899 goto restart;
1900 }
1901
1902 /* Process requests that may be recovered */
1903 if (cqr->status == DASD_CQR_NEED_ERP) {
Stefan Haberland6c5f57c2008-02-05 16:50:46 +01001904 erp_fn = base->discipline->erp_action(cqr);
Stefan Haberland6a5176c2010-04-22 17:17:02 +02001905 if (IS_ERR(erp_fn(cqr)))
1906 continue;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001907 goto restart;
1908 }
1909
Stefan Haberlanda9cffb22008-11-14 18:18:08 +01001910 /* log sense for fatal error */
1911 if (cqr->status == DASD_CQR_FAILED) {
1912 dasd_log_sense(cqr, &cqr->irb);
1913 }
1914
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001915 /* First of all call extended error reporting. */
1916 if (dasd_eer_enabled(base) &&
1917 cqr->status == DASD_CQR_FAILED) {
1918 dasd_eer_write(base, cqr, DASD_EER_FATALERROR);
1919
1920 /* restart request */
1921 cqr->status = DASD_CQR_FILLED;
1922 cqr->retries = 255;
1923 spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags);
Stefan Weinhubereb6e1992009-12-07 12:51:51 +01001924 dasd_device_set_stop_bits(base, DASD_STOPPED_QUIESCE);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001925 spin_unlock_irqrestore(get_ccwdev_lock(base->cdev),
1926 flags);
1927 goto restart;
1928 }
1929
1930 /* Process finished ERP request. */
1931 if (cqr->refers) {
Stefan Weinhubereb6e1992009-12-07 12:51:51 +01001932 __dasd_process_erp(base, cqr);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001933 goto restart;
1934 }
1935
1936 /* Rechain finished requests to final queue */
1937 cqr->endclk = get_clock();
1938 list_move_tail(&cqr->blocklist, final_queue);
1939 }
1940}
1941
1942static void dasd_return_cqr_cb(struct dasd_ccw_req *cqr, void *data)
1943{
1944 dasd_schedule_block_bh(cqr->block);
1945}
1946
1947static void __dasd_block_start_head(struct dasd_block *block)
1948{
1949 struct dasd_ccw_req *cqr;
1950
1951 if (list_empty(&block->ccw_queue))
1952 return;
1953 /* We allways begin with the first requests on the queue, as some
1954 * of previously started requests have to be enqueued on a
1955 * dasd_device again for error recovery.
1956 */
1957 list_for_each_entry(cqr, &block->ccw_queue, blocklist) {
1958 if (cqr->status != DASD_CQR_FILLED)
1959 continue;
1960 /* Non-temporary stop condition will trigger fail fast */
1961 if (block->base->stopped & ~DASD_STOPPED_PENDING &&
1962 test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) &&
1963 (!dasd_eer_enabled(block->base))) {
1964 cqr->status = DASD_CQR_FAILED;
1965 dasd_schedule_block_bh(block);
1966 continue;
1967 }
1968 /* Don't try to start requests if device is stopped */
1969 if (block->base->stopped)
1970 return;
1971
1972 /* just a fail safe check, should not happen */
1973 if (!cqr->startdev)
1974 cqr->startdev = block->base;
1975
1976 /* make sure that the requests we submit find their way back */
1977 cqr->callback = dasd_return_cqr_cb;
1978
1979 dasd_add_request_tail(cqr);
1980 }
1981}
1982
1983/*
1984 * Central dasd_block layer routine. Takes requests from the generic
1985 * block layer request queue, creates ccw requests, enqueues them on
1986 * a dasd_device and processes ccw requests that have been returned.
1987 */
1988static void dasd_block_tasklet(struct dasd_block *block)
1989{
1990 struct list_head final_queue;
1991 struct list_head *l, *n;
1992 struct dasd_ccw_req *cqr;
1993
1994 atomic_set(&block->tasklet_scheduled, 0);
1995 INIT_LIST_HEAD(&final_queue);
1996 spin_lock(&block->queue_lock);
1997 /* Finish off requests on ccw queue */
1998 __dasd_process_block_ccw_queue(block, &final_queue);
1999 spin_unlock(&block->queue_lock);
2000 /* Now call the callback function of requests with final status */
2001 spin_lock_irq(&block->request_queue_lock);
2002 list_for_each_safe(l, n, &final_queue) {
2003 cqr = list_entry(l, struct dasd_ccw_req, blocklist);
2004 list_del_init(&cqr->blocklist);
2005 __dasd_cleanup_cqr(cqr);
2006 }
2007 spin_lock(&block->queue_lock);
2008 /* Get new request from the block device request queue */
2009 __dasd_process_request_queue(block);
2010 /* Now check if the head of the ccw queue needs to be started. */
2011 __dasd_block_start_head(block);
2012 spin_unlock(&block->queue_lock);
2013 spin_unlock_irq(&block->request_queue_lock);
2014 dasd_put_device(block->base);
2015}
2016
2017static void _dasd_wake_block_flush_cb(struct dasd_ccw_req *cqr, void *data)
2018{
2019 wake_up(&dasd_flush_wq);
2020}
2021
2022/*
2023 * Go through all request on the dasd_block request queue, cancel them
2024 * on the respective dasd_device, and return them to the generic
2025 * block layer.
2026 */
2027static int dasd_flush_block_queue(struct dasd_block *block)
2028{
2029 struct dasd_ccw_req *cqr, *n;
2030 int rc, i;
2031 struct list_head flush_queue;
2032
2033 INIT_LIST_HEAD(&flush_queue);
2034 spin_lock_bh(&block->queue_lock);
2035 rc = 0;
2036restart:
2037 list_for_each_entry_safe(cqr, n, &block->ccw_queue, blocklist) {
2038 /* if this request currently owned by a dasd_device cancel it */
2039 if (cqr->status >= DASD_CQR_QUEUED)
2040 rc = dasd_cancel_req(cqr);
2041 if (rc < 0)
2042 break;
2043 /* Rechain request (including erp chain) so it won't be
2044 * touched by the dasd_block_tasklet anymore.
2045 * Replace the callback so we notice when the request
2046 * is returned from the dasd_device layer.
2047 */
2048 cqr->callback = _dasd_wake_block_flush_cb;
2049 for (i = 0; cqr != NULL; cqr = cqr->refers, i++)
2050 list_move_tail(&cqr->blocklist, &flush_queue);
2051 if (i > 1)
2052 /* moved more than one request - need to restart */
2053 goto restart;
2054 }
2055 spin_unlock_bh(&block->queue_lock);
2056 /* Now call the callback function of flushed requests */
2057restart_cb:
2058 list_for_each_entry_safe(cqr, n, &flush_queue, blocklist) {
2059 wait_event(dasd_flush_wq, (cqr->status < DASD_CQR_QUEUED));
2060 /* Process finished ERP request. */
2061 if (cqr->refers) {
Stefan Haberland0cd4bd42008-12-25 13:38:54 +01002062 spin_lock_bh(&block->queue_lock);
Stefan Weinhubereb6e1992009-12-07 12:51:51 +01002063 __dasd_process_erp(block->base, cqr);
Stefan Haberland0cd4bd42008-12-25 13:38:54 +01002064 spin_unlock_bh(&block->queue_lock);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01002065 /* restart list_for_xx loop since dasd_process_erp
2066 * might remove multiple elements */
2067 goto restart_cb;
2068 }
2069 /* call the callback function */
Stefan Haberland0cd4bd42008-12-25 13:38:54 +01002070 spin_lock_irq(&block->request_queue_lock);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01002071 cqr->endclk = get_clock();
2072 list_del_init(&cqr->blocklist);
2073 __dasd_cleanup_cqr(cqr);
Stefan Haberland0cd4bd42008-12-25 13:38:54 +01002074 spin_unlock_irq(&block->request_queue_lock);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01002075 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002076 return rc;
2077}
2078
2079/*
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01002080 * Schedules a call to dasd_tasklet over the device tasklet.
2081 */
2082void dasd_schedule_block_bh(struct dasd_block *block)
2083{
2084 /* Protect against rescheduling. */
2085 if (atomic_cmpxchg(&block->tasklet_scheduled, 0, 1) != 0)
2086 return;
2087 /* life cycle of block is bound to it's base device */
2088 dasd_get_device(block->base);
2089 tasklet_hi_schedule(&block->tasklet);
2090}
2091
2092
2093/*
2094 * SECTION: external block device operations
2095 * (request queue handling, open, release, etc.)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002096 */
2097
2098/*
2099 * Dasd request queue function. Called from ll_rw_blk.c
2100 */
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01002101static void do_dasd_request(struct request_queue *queue)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002102{
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01002103 struct dasd_block *block;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002104
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01002105 block = queue->queuedata;
2106 spin_lock(&block->queue_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002107 /* Get new request from the block device request queue */
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01002108 __dasd_process_request_queue(block);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002109 /* Now check if the head of the ccw queue needs to be started. */
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01002110 __dasd_block_start_head(block);
2111 spin_unlock(&block->queue_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002112}
2113
2114/*
2115 * Allocate and initialize request queue and default I/O scheduler.
2116 */
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01002117static int dasd_alloc_queue(struct dasd_block *block)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002118{
2119 int rc;
2120
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01002121 block->request_queue = blk_init_queue(do_dasd_request,
2122 &block->request_queue_lock);
2123 if (block->request_queue == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002124 return -ENOMEM;
2125
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01002126 block->request_queue->queuedata = block;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002127
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01002128 elevator_exit(block->request_queue->elevator);
Josef 'Jeff' Sipek08a8a0c2008-04-17 07:45:56 +02002129 block->request_queue->elevator = NULL;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01002130 rc = elevator_init(block->request_queue, "deadline");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002131 if (rc) {
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01002132 blk_cleanup_queue(block->request_queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002133 return rc;
2134 }
2135 return 0;
2136}
2137
2138/*
2139 * Allocate and initialize request queue.
2140 */
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01002141static void dasd_setup_queue(struct dasd_block *block)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002142{
2143 int max;
2144
Martin K. Petersene1defc42009-05-22 17:17:49 -04002145 blk_queue_logical_block_size(block->request_queue, block->bp_block);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01002146 max = block->base->discipline->max_blocks << block->s2b_shift;
Martin K. Petersen086fa5f2010-02-26 00:20:38 -05002147 blk_queue_max_hw_sectors(block->request_queue, max);
Martin K. Petersen8a783622010-02-26 00:20:39 -05002148 blk_queue_max_segments(block->request_queue, -1L);
Stefan Weinhuberf3eb5382009-03-26 15:23:48 +01002149 /* with page sized segments we can translate each segement into
2150 * one idaw/tidaw
2151 */
2152 blk_queue_max_segment_size(block->request_queue, PAGE_SIZE);
2153 blk_queue_segment_boundary(block->request_queue, PAGE_SIZE - 1);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01002154 blk_queue_ordered(block->request_queue, QUEUE_ORDERED_DRAIN, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002155}
2156
2157/*
2158 * Deactivate and free request queue.
2159 */
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01002160static void dasd_free_queue(struct dasd_block *block)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002161{
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01002162 if (block->request_queue) {
2163 blk_cleanup_queue(block->request_queue);
2164 block->request_queue = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002165 }
2166}
2167
2168/*
2169 * Flush request on the request queue.
2170 */
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01002171static void dasd_flush_request_queue(struct dasd_block *block)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002172{
2173 struct request *req;
2174
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01002175 if (!block->request_queue)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002176 return;
Horst Hummel138c0142006-06-29 14:58:12 +02002177
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01002178 spin_lock_irq(&block->request_queue_lock);
Tejun Heo9934c8c2009-05-08 11:54:16 +09002179 while ((req = blk_fetch_request(block->request_queue)))
Tejun Heo40cbbb72009-04-23 11:05:19 +09002180 __blk_end_request_all(req, -EIO);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01002181 spin_unlock_irq(&block->request_queue_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002182}
2183
Al Viro57a7c0b2008-03-02 10:36:08 -05002184static int dasd_open(struct block_device *bdev, fmode_t mode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002185{
Al Viro57a7c0b2008-03-02 10:36:08 -05002186 struct dasd_block *block = bdev->bd_disk->private_data;
Stefan Haberland9eb25122010-02-26 22:37:46 +01002187 struct dasd_device *base;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002188 int rc;
2189
Stefan Haberland9eb25122010-02-26 22:37:46 +01002190 if (!block)
2191 return -ENODEV;
2192
2193 base = block->base;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01002194 atomic_inc(&block->open_count);
2195 if (test_bit(DASD_FLAG_OFFLINE, &base->flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002196 rc = -ENODEV;
2197 goto unlock;
2198 }
2199
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01002200 if (!try_module_get(base->discipline->owner)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002201 rc = -EINVAL;
2202 goto unlock;
2203 }
2204
2205 if (dasd_probeonly) {
Stefan Haberlandfc19f382009-03-26 15:23:49 +01002206 dev_info(&base->cdev->dev,
2207 "Accessing the DASD failed because it is in "
2208 "probeonly mode\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002209 rc = -EPERM;
2210 goto out;
2211 }
2212
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01002213 if (base->state <= DASD_STATE_BASIC) {
2214 DBF_DEV_EVENT(DBF_ERR, base, " %s",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002215 " Cannot open unrecognized device");
2216 rc = -ENODEV;
2217 goto out;
2218 }
2219
Stefan Weinhuber33b62a32010-03-08 12:26:24 +01002220 if ((mode & FMODE_WRITE) &&
2221 (test_bit(DASD_FLAG_DEVICE_RO, &base->flags) ||
2222 (base->features & DASD_FEATURE_READONLY))) {
2223 rc = -EROFS;
2224 goto out;
2225 }
2226
Linus Torvalds1da177e2005-04-16 15:20:36 -07002227 return 0;
2228
2229out:
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01002230 module_put(base->discipline->owner);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002231unlock:
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01002232 atomic_dec(&block->open_count);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002233 return rc;
2234}
2235
Al Viro57a7c0b2008-03-02 10:36:08 -05002236static int dasd_release(struct gendisk *disk, fmode_t mode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002237{
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01002238 struct dasd_block *block = disk->private_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002239
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01002240 atomic_dec(&block->open_count);
2241 module_put(block->base->discipline->owner);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002242 return 0;
2243}
2244
Christoph Hellwiga885c8c2006-01-08 01:02:50 -08002245/*
2246 * Return disk geometry.
2247 */
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01002248static int dasd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
Christoph Hellwiga885c8c2006-01-08 01:02:50 -08002249{
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01002250 struct dasd_block *block;
2251 struct dasd_device *base;
Christoph Hellwiga885c8c2006-01-08 01:02:50 -08002252
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01002253 block = bdev->bd_disk->private_data;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01002254 if (!block)
Christoph Hellwiga885c8c2006-01-08 01:02:50 -08002255 return -ENODEV;
Julia Lawallcf05b822009-08-23 18:09:05 +02002256 base = block->base;
Christoph Hellwiga885c8c2006-01-08 01:02:50 -08002257
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01002258 if (!base->discipline ||
2259 !base->discipline->fill_geometry)
Christoph Hellwiga885c8c2006-01-08 01:02:50 -08002260 return -EINVAL;
2261
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01002262 base->discipline->fill_geometry(block, geo);
2263 geo->start = get_start_sect(bdev) >> block->s2b_shift;
Christoph Hellwiga885c8c2006-01-08 01:02:50 -08002264 return 0;
2265}
2266
Alexey Dobriyan83d5cde2009-09-21 17:01:13 -07002267const struct block_device_operations
Linus Torvalds1da177e2005-04-16 15:20:36 -07002268dasd_device_operations = {
2269 .owner = THIS_MODULE,
Al Viro57a7c0b2008-03-02 10:36:08 -05002270 .open = dasd_open,
2271 .release = dasd_release,
Heiko Carstens0000d032009-03-26 15:23:45 +01002272 .ioctl = dasd_ioctl,
2273 .compat_ioctl = dasd_ioctl,
Christoph Hellwiga885c8c2006-01-08 01:02:50 -08002274 .getgeo = dasd_getgeo,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002275};
2276
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01002277/*******************************************************************************
2278 * end of block device operations
2279 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002280
2281static void
2282dasd_exit(void)
2283{
2284#ifdef CONFIG_PROC_FS
2285 dasd_proc_exit();
2286#endif
Stefan Weinhuber20c64462006-03-24 03:15:25 -08002287 dasd_eer_exit();
Horst Hummel6bb0e012005-07-27 11:45:03 -07002288 if (dasd_page_cache != NULL) {
2289 kmem_cache_destroy(dasd_page_cache);
2290 dasd_page_cache = NULL;
2291 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002292 dasd_gendisk_exit();
2293 dasd_devmap_exit();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002294 if (dasd_debug_area != NULL) {
2295 debug_unregister(dasd_debug_area);
2296 dasd_debug_area = NULL;
2297 }
2298}
2299
2300/*
2301 * SECTION: common functions for ccw_driver use
2302 */
2303
Stefan Weinhuber33b62a32010-03-08 12:26:24 +01002304/*
2305 * Is the device read-only?
2306 * Note that this function does not report the setting of the
2307 * readonly device attribute, but how it is configured in z/VM.
2308 */
2309int dasd_device_is_ro(struct dasd_device *device)
2310{
2311 struct ccw_dev_id dev_id;
2312 struct diag210 diag_data;
2313 int rc;
2314
2315 if (!MACHINE_IS_VM)
2316 return 0;
2317 ccw_device_get_id(device->cdev, &dev_id);
2318 memset(&diag_data, 0, sizeof(diag_data));
2319 diag_data.vrdcdvno = dev_id.devno;
2320 diag_data.vrdclen = sizeof(diag_data);
2321 rc = diag210(&diag_data);
2322 if (rc == 0 || rc == 2) {
2323 return diag_data.vrdcvfla & 0x80;
2324 } else {
2325 DBF_EVENT(DBF_WARNING, "diag210 failed for dev=%04x with rc=%d",
2326 dev_id.devno, rc);
2327 return 0;
2328 }
2329}
2330EXPORT_SYMBOL_GPL(dasd_device_is_ro);
2331
Cornelia Huckf3445a12009-04-14 15:36:23 +02002332static void dasd_generic_auto_online(void *data, async_cookie_t cookie)
2333{
2334 struct ccw_device *cdev = data;
2335 int ret;
2336
2337 ret = ccw_device_set_online(cdev);
2338 if (ret)
2339 pr_warning("%s: Setting the DASD online failed with rc=%d\n",
2340 dev_name(&cdev->dev), ret);
Cornelia Huckf3445a12009-04-14 15:36:23 +02002341}
2342
Horst Hummel1c01b8a2006-01-06 00:19:15 -08002343/*
2344 * Initial attempt at a probe function. this can be simplified once
2345 * the other detection code is gone.
2346 */
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01002347int dasd_generic_probe(struct ccw_device *cdev,
2348 struct dasd_discipline *discipline)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002349{
2350 int ret;
2351
2352 ret = dasd_add_sysfs_files(cdev);
2353 if (ret) {
Stefan Haberlandb8ed5dd2009-12-07 12:51:52 +01002354 DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s",
2355 "dasd_generic_probe: could not add "
2356 "sysfs entries");
Horst Hummel40545572006-06-29 15:08:18 +02002357 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002358 }
Horst Hummel40545572006-06-29 15:08:18 +02002359 cdev->handler = &dasd_int_handler;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002360
Horst Hummel40545572006-06-29 15:08:18 +02002361 /*
2362 * Automatically online either all dasd devices (dasd_autodetect)
2363 * or all devices specified with dasd= parameters during
2364 * initial probe.
2365 */
2366 if ((dasd_get_feature(cdev, DASD_FEATURE_INITIAL_ONLINE) > 0 ) ||
Kay Sievers2a0217d2008-10-10 21:33:09 +02002367 (dasd_autodetect && dasd_busid_known(dev_name(&cdev->dev)) != 0))
Cornelia Huckf3445a12009-04-14 15:36:23 +02002368 async_schedule(dasd_generic_auto_online, cdev);
Stefan Haberlandde3e0da2008-01-26 14:11:08 +01002369 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002370}
2371
Horst Hummel1c01b8a2006-01-06 00:19:15 -08002372/*
2373 * This will one day be called from a global not_oper handler.
2374 * It is also used by driver_unregister during module unload.
2375 */
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01002376void dasd_generic_remove(struct ccw_device *cdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002377{
2378 struct dasd_device *device;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01002379 struct dasd_block *block;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002380
Horst Hummel59afda72005-05-16 21:53:39 -07002381 cdev->handler = NULL;
2382
Linus Torvalds1da177e2005-04-16 15:20:36 -07002383 dasd_remove_sysfs_files(cdev);
2384 device = dasd_device_from_cdev(cdev);
2385 if (IS_ERR(device))
2386 return;
2387 if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags)) {
2388 /* Already doing offline processing */
2389 dasd_put_device(device);
2390 return;
2391 }
2392 /*
2393 * This device is removed unconditionally. Set offline
2394 * flag to prevent dasd_open from opening it while it is
2395 * no quite down yet.
2396 */
2397 dasd_set_target_state(device, DASD_STATE_NEW);
2398 /* dasd_delete_device destroys the device reference. */
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01002399 block = device->block;
2400 device->block = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002401 dasd_delete_device(device);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01002402 /*
2403 * life cycle of block is bound to device, so delete it after
2404 * device was safely removed
2405 */
2406 if (block)
2407 dasd_free_block(block);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002408}
2409
Horst Hummel1c01b8a2006-01-06 00:19:15 -08002410/*
2411 * Activate a device. This is called from dasd_{eckd,fba}_probe() when either
Linus Torvalds1da177e2005-04-16 15:20:36 -07002412 * the device is detected for the first time and is supposed to be used
Horst Hummel1c01b8a2006-01-06 00:19:15 -08002413 * or the user has started activation through sysfs.
2414 */
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01002415int dasd_generic_set_online(struct ccw_device *cdev,
2416 struct dasd_discipline *base_discipline)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002417{
Peter Oberparleiteraa888612006-02-20 18:28:13 -08002418 struct dasd_discipline *discipline;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002419 struct dasd_device *device;
Horst Hummelc6eb7b72005-09-03 15:57:58 -07002420 int rc;
Horst Hummelf24acd42005-05-01 08:58:59 -07002421
Horst Hummel40545572006-06-29 15:08:18 +02002422 /* first online clears initial online feature flag */
2423 dasd_set_feature(cdev, DASD_FEATURE_INITIAL_ONLINE, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002424 device = dasd_create_device(cdev);
2425 if (IS_ERR(device))
2426 return PTR_ERR(device);
2427
Peter Oberparleiteraa888612006-02-20 18:28:13 -08002428 discipline = base_discipline;
Horst Hummelc6eb7b72005-09-03 15:57:58 -07002429 if (device->features & DASD_FEATURE_USEDIAG) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002430 if (!dasd_diag_discipline_pointer) {
Stefan Haberlandfc19f382009-03-26 15:23:49 +01002431 pr_warning("%s Setting the DASD online failed because "
2432 "of missing DIAG discipline\n",
2433 dev_name(&cdev->dev));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002434 dasd_delete_device(device);
2435 return -ENODEV;
2436 }
2437 discipline = dasd_diag_discipline_pointer;
2438 }
Peter Oberparleiteraa888612006-02-20 18:28:13 -08002439 if (!try_module_get(base_discipline->owner)) {
2440 dasd_delete_device(device);
2441 return -EINVAL;
2442 }
2443 if (!try_module_get(discipline->owner)) {
2444 module_put(base_discipline->owner);
2445 dasd_delete_device(device);
2446 return -EINVAL;
2447 }
2448 device->base_discipline = base_discipline;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002449 device->discipline = discipline;
2450
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01002451 /* check_device will allocate block device if necessary */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002452 rc = discipline->check_device(device);
2453 if (rc) {
Stefan Haberlandfc19f382009-03-26 15:23:49 +01002454 pr_warning("%s Setting the DASD online with discipline %s "
2455 "failed with rc=%i\n",
2456 dev_name(&cdev->dev), discipline->name, rc);
Peter Oberparleiteraa888612006-02-20 18:28:13 -08002457 module_put(discipline->owner);
2458 module_put(base_discipline->owner);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002459 dasd_delete_device(device);
2460 return rc;
2461 }
2462
2463 dasd_set_target_state(device, DASD_STATE_ONLINE);
2464 if (device->state <= DASD_STATE_KNOWN) {
Stefan Haberlandfc19f382009-03-26 15:23:49 +01002465 pr_warning("%s Setting the DASD online failed because of a "
2466 "missing discipline\n", dev_name(&cdev->dev));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002467 rc = -ENODEV;
2468 dasd_set_target_state(device, DASD_STATE_NEW);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01002469 if (device->block)
2470 dasd_free_block(device->block);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002471 dasd_delete_device(device);
2472 } else
2473 pr_debug("dasd_generic device %s found\n",
Kay Sievers2a0217d2008-10-10 21:33:09 +02002474 dev_name(&cdev->dev));
Stefan Haberland589c74d2010-02-26 22:37:47 +01002475
2476 wait_event(dasd_init_waitq, _wait_for_device(device));
2477
Linus Torvalds1da177e2005-04-16 15:20:36 -07002478 dasd_put_device(device);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002479 return rc;
2480}
2481
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01002482int dasd_generic_set_offline(struct ccw_device *cdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002483{
2484 struct dasd_device *device;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01002485 struct dasd_block *block;
Horst Hummeldafd87a2006-04-10 22:53:47 -07002486 int max_count, open_count;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002487
2488 device = dasd_device_from_cdev(cdev);
2489 if (IS_ERR(device))
2490 return PTR_ERR(device);
2491 if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags)) {
2492 /* Already doing offline processing */
2493 dasd_put_device(device);
2494 return 0;
2495 }
2496 /*
2497 * We must make sure that this device is currently not in use.
2498 * The open_count is increased for every opener, that includes
2499 * the blkdev_get in dasd_scan_partitions. We are only interested
2500 * in the other openers.
2501 */
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01002502 if (device->block) {
Heiko Carstensa8061702008-04-17 07:46:26 +02002503 max_count = device->block->bdev ? 0 : -1;
2504 open_count = atomic_read(&device->block->open_count);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01002505 if (open_count > max_count) {
2506 if (open_count > 0)
Stefan Haberlandfc19f382009-03-26 15:23:49 +01002507 pr_warning("%s: The DASD cannot be set offline "
2508 "with open count %i\n",
2509 dev_name(&cdev->dev), open_count);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01002510 else
Stefan Haberlandfc19f382009-03-26 15:23:49 +01002511 pr_warning("%s: The DASD cannot be set offline "
2512 "while it is in use\n",
2513 dev_name(&cdev->dev));
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01002514 clear_bit(DASD_FLAG_OFFLINE, &device->flags);
2515 dasd_put_device(device);
2516 return -EBUSY;
2517 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002518 }
2519 dasd_set_target_state(device, DASD_STATE_NEW);
2520 /* dasd_delete_device destroys the device reference. */
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01002521 block = device->block;
2522 device->block = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002523 dasd_delete_device(device);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01002524 /*
2525 * life cycle of block is bound to device, so delete it after
2526 * device was safely removed
2527 */
2528 if (block)
2529 dasd_free_block(block);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002530 return 0;
2531}
2532
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01002533int dasd_generic_notify(struct ccw_device *cdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002534{
2535 struct dasd_device *device;
2536 struct dasd_ccw_req *cqr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002537 int ret;
2538
Peter Oberparleiter91c36912008-08-21 19:46:39 +02002539 device = dasd_device_from_cdev_locked(cdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002540 if (IS_ERR(device))
2541 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002542 ret = 0;
2543 switch (event) {
2544 case CIO_GONE:
Sebastian Ott47593bf2009-03-31 19:16:05 +02002545 case CIO_BOXED:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002546 case CIO_NO_PATH:
Stefan Weinhuber20c64462006-03-24 03:15:25 -08002547 /* First of all call extended error reporting. */
2548 dasd_eer_write(device, NULL, DASD_EER_NOPATH);
2549
Linus Torvalds1da177e2005-04-16 15:20:36 -07002550 if (device->state < DASD_STATE_BASIC)
2551 break;
2552 /* Device is active. We want to keep it. */
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01002553 list_for_each_entry(cqr, &device->ccw_queue, devlist)
2554 if (cqr->status == DASD_CQR_IN_IO) {
2555 cqr->status = DASD_CQR_QUEUED;
2556 cqr->retries++;
2557 }
Stefan Weinhubereb6e1992009-12-07 12:51:51 +01002558 dasd_device_set_stop_bits(device, DASD_STOPPED_DC_WAIT);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01002559 dasd_device_clear_timer(device);
2560 dasd_schedule_device_bh(device);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002561 ret = 1;
2562 break;
2563 case CIO_OPER:
2564 /* FIXME: add a sanity check. */
Stefan Weinhubereb6e1992009-12-07 12:51:51 +01002565 dasd_device_remove_stop_bits(device, DASD_STOPPED_DC_WAIT);
Stefan Haberlandd41dd122009-06-16 10:30:25 +02002566 if (device->stopped & DASD_UNRESUMED_PM) {
Stefan Weinhubereb6e1992009-12-07 12:51:51 +01002567 dasd_device_remove_stop_bits(device, DASD_UNRESUMED_PM);
Stefan Haberlandd41dd122009-06-16 10:30:25 +02002568 dasd_restore_device(device);
2569 ret = 1;
2570 break;
2571 }
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01002572 dasd_schedule_device_bh(device);
2573 if (device->block)
2574 dasd_schedule_block_bh(device->block);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002575 ret = 1;
2576 break;
2577 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002578 dasd_put_device(device);
2579 return ret;
2580}
2581
Stefan Haberlandd41dd122009-06-16 10:30:25 +02002582int dasd_generic_pm_freeze(struct ccw_device *cdev)
2583{
2584 struct dasd_ccw_req *cqr, *n;
2585 int rc;
2586 struct list_head freeze_queue;
2587 struct dasd_device *device = dasd_device_from_cdev(cdev);
2588
2589 if (IS_ERR(device))
2590 return PTR_ERR(device);
2591 /* disallow new I/O */
Stefan Weinhubereb6e1992009-12-07 12:51:51 +01002592 dasd_device_set_stop_bits(device, DASD_STOPPED_PM);
Stefan Haberlandd41dd122009-06-16 10:30:25 +02002593 /* clear active requests */
2594 INIT_LIST_HEAD(&freeze_queue);
2595 spin_lock_irq(get_ccwdev_lock(cdev));
2596 rc = 0;
2597 list_for_each_entry_safe(cqr, n, &device->ccw_queue, devlist) {
2598 /* Check status and move request to flush_queue */
2599 if (cqr->status == DASD_CQR_IN_IO) {
2600 rc = device->discipline->term_IO(cqr);
2601 if (rc) {
2602 /* unable to terminate requeust */
2603 dev_err(&device->cdev->dev,
2604 "Unable to terminate request %p "
2605 "on suspend\n", cqr);
2606 spin_unlock_irq(get_ccwdev_lock(cdev));
2607 dasd_put_device(device);
2608 return rc;
2609 }
2610 }
2611 list_move_tail(&cqr->devlist, &freeze_queue);
2612 }
2613
2614 spin_unlock_irq(get_ccwdev_lock(cdev));
2615
2616 list_for_each_entry_safe(cqr, n, &freeze_queue, devlist) {
2617 wait_event(dasd_flush_wq,
2618 (cqr->status != DASD_CQR_CLEAR_PENDING));
2619 if (cqr->status == DASD_CQR_CLEARED)
2620 cqr->status = DASD_CQR_QUEUED;
2621 }
2622 /* move freeze_queue to start of the ccw_queue */
2623 spin_lock_irq(get_ccwdev_lock(cdev));
2624 list_splice_tail(&freeze_queue, &device->ccw_queue);
2625 spin_unlock_irq(get_ccwdev_lock(cdev));
2626
2627 if (device->discipline->freeze)
2628 rc = device->discipline->freeze(device);
2629
2630 dasd_put_device(device);
2631 return rc;
2632}
2633EXPORT_SYMBOL_GPL(dasd_generic_pm_freeze);
2634
2635int dasd_generic_restore_device(struct ccw_device *cdev)
2636{
2637 struct dasd_device *device = dasd_device_from_cdev(cdev);
2638 int rc = 0;
2639
2640 if (IS_ERR(device))
2641 return PTR_ERR(device);
2642
Stefan Haberlande6125fb2009-06-22 12:08:17 +02002643 /* allow new IO again */
Stefan Weinhubereb6e1992009-12-07 12:51:51 +01002644 dasd_device_remove_stop_bits(device,
2645 (DASD_STOPPED_PM | DASD_UNRESUMED_PM));
Stefan Haberlande6125fb2009-06-22 12:08:17 +02002646
Stefan Haberlandd41dd122009-06-16 10:30:25 +02002647 dasd_schedule_device_bh(device);
Stefan Haberlandd41dd122009-06-16 10:30:25 +02002648
Stefan Weinhubereb6e1992009-12-07 12:51:51 +01002649 /*
2650 * call discipline restore function
2651 * if device is stopped do nothing e.g. for disconnected devices
2652 */
2653 if (device->discipline->restore && !(device->stopped))
Stefan Haberlandd41dd122009-06-16 10:30:25 +02002654 rc = device->discipline->restore(device);
Stefan Weinhubereb6e1992009-12-07 12:51:51 +01002655 if (rc || device->stopped)
Stefan Haberlande6125fb2009-06-22 12:08:17 +02002656 /*
2657 * if the resume failed for the DASD we put it in
2658 * an UNRESUMED stop state
2659 */
2660 device->stopped |= DASD_UNRESUMED_PM;
Stefan Haberlandd41dd122009-06-16 10:30:25 +02002661
Stefan Haberland6fca97a2009-10-06 10:34:15 +02002662 if (device->block)
2663 dasd_schedule_block_bh(device->block);
2664
Stefan Haberlandd41dd122009-06-16 10:30:25 +02002665 dasd_put_device(device);
Stefan Haberlande6125fb2009-06-22 12:08:17 +02002666 return 0;
Stefan Haberlandd41dd122009-06-16 10:30:25 +02002667}
2668EXPORT_SYMBOL_GPL(dasd_generic_restore_device);
2669
Heiko Carstens763968e2007-05-10 15:45:46 +02002670static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device,
2671 void *rdc_buffer,
2672 int rdc_buffer_size,
Stefan Haberland68b781f2009-09-11 10:28:29 +02002673 int magic)
Cornelia Huck17283b52007-05-04 18:47:51 +02002674{
2675 struct dasd_ccw_req *cqr;
2676 struct ccw1 *ccw;
Stefan Haberlandd9fa9442009-10-14 12:43:48 +02002677 unsigned long *idaw;
Cornelia Huck17283b52007-05-04 18:47:51 +02002678
2679 cqr = dasd_smalloc_request(magic, 1 /* RDC */, rdc_buffer_size, device);
2680
2681 if (IS_ERR(cqr)) {
Stefan Haberlandfc19f382009-03-26 15:23:49 +01002682 /* internal error 13 - Allocating the RDC request failed*/
2683 dev_err(&device->cdev->dev,
2684 "An error occurred in the DASD device driver, "
2685 "reason=%s\n", "13");
Cornelia Huck17283b52007-05-04 18:47:51 +02002686 return cqr;
2687 }
2688
2689 ccw = cqr->cpaddr;
2690 ccw->cmd_code = CCW_CMD_RDC;
Stefan Haberlandd9fa9442009-10-14 12:43:48 +02002691 if (idal_is_needed(rdc_buffer, rdc_buffer_size)) {
2692 idaw = (unsigned long *) (cqr->data);
2693 ccw->cda = (__u32)(addr_t) idaw;
2694 ccw->flags = CCW_FLAG_IDA;
2695 idaw = idal_create_words(idaw, rdc_buffer, rdc_buffer_size);
2696 } else {
2697 ccw->cda = (__u32)(addr_t) rdc_buffer;
2698 ccw->flags = 0;
2699 }
Cornelia Huck17283b52007-05-04 18:47:51 +02002700
Stefan Haberlandd9fa9442009-10-14 12:43:48 +02002701 ccw->count = rdc_buffer_size;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01002702 cqr->startdev = device;
2703 cqr->memdev = device;
Cornelia Huck17283b52007-05-04 18:47:51 +02002704 cqr->expires = 10*HZ;
Stefan Weinhubereb6e1992009-12-07 12:51:51 +01002705 cqr->retries = 256;
Cornelia Huck17283b52007-05-04 18:47:51 +02002706 cqr->buildclk = get_clock();
2707 cqr->status = DASD_CQR_FILLED;
2708 return cqr;
2709}
2710
2711
Stefan Haberland68b781f2009-09-11 10:28:29 +02002712int dasd_generic_read_dev_chars(struct dasd_device *device, int magic,
Sebastian Ott92636b12009-06-12 10:26:37 +02002713 void *rdc_buffer, int rdc_buffer_size)
Cornelia Huck17283b52007-05-04 18:47:51 +02002714{
2715 int ret;
2716 struct dasd_ccw_req *cqr;
2717
Sebastian Ott92636b12009-06-12 10:26:37 +02002718 cqr = dasd_generic_build_rdc(device, rdc_buffer, rdc_buffer_size,
Cornelia Huck17283b52007-05-04 18:47:51 +02002719 magic);
2720 if (IS_ERR(cqr))
2721 return PTR_ERR(cqr);
2722
2723 ret = dasd_sleep_on(cqr);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01002724 dasd_sfree_request(cqr, cqr->memdev);
Cornelia Huck17283b52007-05-04 18:47:51 +02002725 return ret;
2726}
Cornelia Huckaaff0f62007-05-10 15:45:45 +02002727EXPORT_SYMBOL_GPL(dasd_generic_read_dev_chars);
Stefan Weinhuber20c64462006-03-24 03:15:25 -08002728
Stefan Weinhuberf3eb5382009-03-26 15:23:48 +01002729/*
2730 * In command mode and transport mode we need to look for sense
2731 * data in different places. The sense data itself is allways
2732 * an array of 32 bytes, so we can unify the sense data access
2733 * for both modes.
2734 */
2735char *dasd_get_sense(struct irb *irb)
2736{
2737 struct tsb *tsb = NULL;
2738 char *sense = NULL;
2739
2740 if (scsw_is_tm(&irb->scsw) && (irb->scsw.tm.fcxs == 0x01)) {
2741 if (irb->scsw.tm.tcw)
2742 tsb = tcw_get_tsb((struct tcw *)(unsigned long)
2743 irb->scsw.tm.tcw);
2744 if (tsb && tsb->length == 64 && tsb->flags)
2745 switch (tsb->flags & 0x07) {
2746 case 1: /* tsa_iostat */
2747 sense = tsb->tsa.iostat.sense;
2748 break;
2749 case 2: /* tsa_ddpc */
2750 sense = tsb->tsa.ddpc.sense;
2751 break;
2752 default:
2753 /* currently we don't use interrogate data */
2754 break;
2755 }
2756 } else if (irb->esw.esw0.erw.cons) {
2757 sense = irb->ecw;
2758 }
2759 return sense;
2760}
2761EXPORT_SYMBOL_GPL(dasd_get_sense);
2762
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01002763static int __init dasd_init(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002764{
2765 int rc;
2766
2767 init_waitqueue_head(&dasd_init_waitq);
Horst Hummel8f617012006-08-30 14:33:33 +02002768 init_waitqueue_head(&dasd_flush_wq);
Stefan Haberlandc80ee722008-05-30 10:03:31 +02002769 init_waitqueue_head(&generic_waitq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002770
2771 /* register 'common' DASD debug area, used for all DBF_XXX calls */
Peter Tiedemann361f4942008-01-26 14:11:30 +01002772 dasd_debug_area = debug_register("dasd", 1, 1, 8 * sizeof(long));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002773 if (dasd_debug_area == NULL) {
2774 rc = -ENOMEM;
2775 goto failed;
2776 }
2777 debug_register_view(dasd_debug_area, &debug_sprintf_view);
Horst Hummelb0035f12006-09-20 15:59:07 +02002778 debug_set_level(dasd_debug_area, DBF_WARNING);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002779
2780 DBF_EVENT(DBF_EMERG, "%s", "debug area created");
2781
2782 dasd_diag_discipline_pointer = NULL;
2783
Linus Torvalds1da177e2005-04-16 15:20:36 -07002784 rc = dasd_devmap_init();
2785 if (rc)
2786 goto failed;
2787 rc = dasd_gendisk_init();
2788 if (rc)
2789 goto failed;
2790 rc = dasd_parse();
2791 if (rc)
2792 goto failed;
Stefan Weinhuber20c64462006-03-24 03:15:25 -08002793 rc = dasd_eer_init();
2794 if (rc)
2795 goto failed;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002796#ifdef CONFIG_PROC_FS
2797 rc = dasd_proc_init();
2798 if (rc)
2799 goto failed;
2800#endif
2801
2802 return 0;
2803failed:
Stefan Haberlandfc19f382009-03-26 15:23:49 +01002804 pr_info("The DASD device driver could not be initialized\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002805 dasd_exit();
2806 return rc;
2807}
2808
2809module_init(dasd_init);
2810module_exit(dasd_exit);
2811
2812EXPORT_SYMBOL(dasd_debug_area);
2813EXPORT_SYMBOL(dasd_diag_discipline_pointer);
2814
2815EXPORT_SYMBOL(dasd_add_request_head);
2816EXPORT_SYMBOL(dasd_add_request_tail);
2817EXPORT_SYMBOL(dasd_cancel_req);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01002818EXPORT_SYMBOL(dasd_device_clear_timer);
2819EXPORT_SYMBOL(dasd_block_clear_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002820EXPORT_SYMBOL(dasd_enable_device);
2821EXPORT_SYMBOL(dasd_int_handler);
2822EXPORT_SYMBOL(dasd_kfree_request);
2823EXPORT_SYMBOL(dasd_kick_device);
2824EXPORT_SYMBOL(dasd_kmalloc_request);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01002825EXPORT_SYMBOL(dasd_schedule_device_bh);
2826EXPORT_SYMBOL(dasd_schedule_block_bh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002827EXPORT_SYMBOL(dasd_set_target_state);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01002828EXPORT_SYMBOL(dasd_device_set_timer);
2829EXPORT_SYMBOL(dasd_block_set_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002830EXPORT_SYMBOL(dasd_sfree_request);
2831EXPORT_SYMBOL(dasd_sleep_on);
2832EXPORT_SYMBOL(dasd_sleep_on_immediatly);
2833EXPORT_SYMBOL(dasd_sleep_on_interruptible);
2834EXPORT_SYMBOL(dasd_smalloc_request);
2835EXPORT_SYMBOL(dasd_start_IO);
2836EXPORT_SYMBOL(dasd_term_IO);
2837
2838EXPORT_SYMBOL_GPL(dasd_generic_probe);
2839EXPORT_SYMBOL_GPL(dasd_generic_remove);
2840EXPORT_SYMBOL_GPL(dasd_generic_notify);
2841EXPORT_SYMBOL_GPL(dasd_generic_set_online);
2842EXPORT_SYMBOL_GPL(dasd_generic_set_offline);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01002843EXPORT_SYMBOL_GPL(dasd_generic_handle_state_change);
2844EXPORT_SYMBOL_GPL(dasd_flush_device_queue);
2845EXPORT_SYMBOL_GPL(dasd_alloc_block);
2846EXPORT_SYMBOL_GPL(dasd_free_block);