blob: f32f7447588b34d7f0c41e5233bde89effd08db9 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * File...........: linux/drivers/s390/block/dasd.c
3 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
4 * Horst Hummel <Horst.Hummel@de.ibm.com>
5 * Carsten Otte <Cotte@de.ibm.com>
6 * Martin Schwidefsky <schwidefsky@de.ibm.com>
7 * Bugreports.to..: <Linux390@de.ibm.com>
8 * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999-2001
9 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070010 */
11
12#include <linux/config.h>
13#include <linux/kmod.h>
14#include <linux/init.h>
15#include <linux/interrupt.h>
16#include <linux/ctype.h>
17#include <linux/major.h>
18#include <linux/slab.h>
19#include <linux/buffer_head.h>
Christoph Hellwiga885c8c2006-01-08 01:02:50 -080020#include <linux/hdreg.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070021
22#include <asm/ccwdev.h>
23#include <asm/ebcdic.h>
24#include <asm/idals.h>
25#include <asm/todclk.h>
26
27/* This is ugly... */
28#define PRINTK_HEADER "dasd:"
29
30#include "dasd_int.h"
31/*
32 * SECTION: Constant definitions to be used within this file
33 */
34#define DASD_CHANQ_MAX_SIZE 4
35
36/*
37 * SECTION: exported variables of dasd.c
38 */
39debug_info_t *dasd_debug_area;
40struct dasd_discipline *dasd_diag_discipline_pointer;
41
42MODULE_AUTHOR("Holger Smolinski <Holger.Smolinski@de.ibm.com>");
43MODULE_DESCRIPTION("Linux on S/390 DASD device driver,"
44 " Copyright 2000 IBM Corporation");
45MODULE_SUPPORTED_DEVICE("dasd");
46MODULE_PARM(dasd, "1-" __MODULE_STRING(256) "s");
47MODULE_LICENSE("GPL");
48
49/*
50 * SECTION: prototypes for static functions of dasd.c
51 */
52static int dasd_alloc_queue(struct dasd_device * device);
53static void dasd_setup_queue(struct dasd_device * device);
54static void dasd_free_queue(struct dasd_device * device);
55static void dasd_flush_request_queue(struct dasd_device *);
56static void dasd_int_handler(struct ccw_device *, unsigned long, struct irb *);
57static void dasd_flush_ccw_queue(struct dasd_device *, int);
58static void dasd_tasklet(struct dasd_device *);
59static void do_kick_device(void *data);
60
61/*
62 * SECTION: Operations on the device structure.
63 */
64static wait_queue_head_t dasd_init_waitq;
65
66/*
67 * Allocate memory for a new device structure.
68 */
69struct dasd_device *
70dasd_alloc_device(void)
71{
72 struct dasd_device *device;
73
74 device = kmalloc(sizeof (struct dasd_device), GFP_ATOMIC);
75 if (device == NULL)
76 return ERR_PTR(-ENOMEM);
77 memset(device, 0, sizeof (struct dasd_device));
78 /* open_count = 0 means device online but not in use */
79 atomic_set(&device->open_count, -1);
80
81 /* Get two pages for normal block device operations. */
82 device->ccw_mem = (void *) __get_free_pages(GFP_ATOMIC | GFP_DMA, 1);
83 if (device->ccw_mem == NULL) {
84 kfree(device);
85 return ERR_PTR(-ENOMEM);
86 }
87 /* Get one page for error recovery. */
88 device->erp_mem = (void *) get_zeroed_page(GFP_ATOMIC | GFP_DMA);
89 if (device->erp_mem == NULL) {
90 free_pages((unsigned long) device->ccw_mem, 1);
91 kfree(device);
92 return ERR_PTR(-ENOMEM);
93 }
94
95 dasd_init_chunklist(&device->ccw_chunks, device->ccw_mem, PAGE_SIZE*2);
96 dasd_init_chunklist(&device->erp_chunks, device->erp_mem, PAGE_SIZE);
97 spin_lock_init(&device->mem_lock);
98 spin_lock_init(&device->request_queue_lock);
99 atomic_set (&device->tasklet_scheduled, 0);
100 tasklet_init(&device->tasklet,
101 (void (*)(unsigned long)) dasd_tasklet,
102 (unsigned long) device);
103 INIT_LIST_HEAD(&device->ccw_queue);
104 init_timer(&device->timer);
105 INIT_WORK(&device->kick_work, do_kick_device, device);
106 device->state = DASD_STATE_NEW;
107 device->target = DASD_STATE_NEW;
108
109 return device;
110}
111
112/*
113 * Free memory of a device structure.
114 */
115void
116dasd_free_device(struct dasd_device *device)
117{
Jesper Juhl17fd6822005-11-07 01:01:30 -0800118 kfree(device->private);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119 free_page((unsigned long) device->erp_mem);
120 free_pages((unsigned long) device->ccw_mem, 1);
121 kfree(device);
122}
123
124/*
125 * Make a new device known to the system.
126 */
127static inline int
128dasd_state_new_to_known(struct dasd_device *device)
129{
130 int rc;
131
132 /*
133 * As long as the device is not in state DASD_STATE_NEW we want to
134 * keep the reference count > 0.
135 */
136 dasd_get_device(device);
137
138 rc = dasd_alloc_queue(device);
139 if (rc) {
140 dasd_put_device(device);
141 return rc;
142 }
143
144 device->state = DASD_STATE_KNOWN;
145 return 0;
146}
147
148/*
149 * Let the system forget about a device.
150 */
151static inline void
152dasd_state_known_to_new(struct dasd_device * device)
153{
Stefan Weinhuber20c64462006-03-24 03:15:25 -0800154 /* Disable extended error reporting for this device. */
155 dasd_eer_disable(device);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156 /* Forget the discipline information. */
Peter Oberparleiteraa888612006-02-20 18:28:13 -0800157 if (device->discipline)
158 module_put(device->discipline->owner);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159 device->discipline = NULL;
Peter Oberparleiteraa888612006-02-20 18:28:13 -0800160 if (device->base_discipline)
161 module_put(device->base_discipline->owner);
162 device->base_discipline = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700163 device->state = DASD_STATE_NEW;
164
165 dasd_free_queue(device);
166
167 /* Give up reference we took in dasd_state_new_to_known. */
168 dasd_put_device(device);
169}
170
171/*
172 * Request the irq line for the device.
173 */
174static inline int
175dasd_state_known_to_basic(struct dasd_device * device)
176{
177 int rc;
178
179 /* Allocate and register gendisk structure. */
180 rc = dasd_gendisk_alloc(device);
181 if (rc)
182 return rc;
183
184 /* register 'device' debug area, used for all DBF_DEV_XXX calls */
Michael Holzheu66a464d2005-06-25 14:55:33 -0700185 device->debug_area = debug_register(device->cdev->dev.bus_id, 1, 2,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700186 8 * sizeof (long));
187 debug_register_view(device->debug_area, &debug_sprintf_view);
188 debug_set_level(device->debug_area, DBF_EMERG);
189 DBF_DEV_EVENT(DBF_EMERG, device, "%s", "debug area created");
190
191 device->state = DASD_STATE_BASIC;
192 return 0;
193}
194
195/*
196 * Release the irq line for the device. Terminate any running i/o.
197 */
198static inline void
199dasd_state_basic_to_known(struct dasd_device * device)
200{
201 dasd_gendisk_free(device);
202 dasd_flush_ccw_queue(device, 1);
203 DBF_DEV_EVENT(DBF_EMERG, device, "%p debug area deleted", device);
204 if (device->debug_area != NULL) {
205 debug_unregister(device->debug_area);
206 device->debug_area = NULL;
207 }
208 device->state = DASD_STATE_KNOWN;
209}
210
211/*
212 * Do the initial analysis. The do_analysis function may return
213 * -EAGAIN in which case the device keeps the state DASD_STATE_BASIC
214 * until the discipline decides to continue the startup sequence
215 * by calling the function dasd_change_state. The eckd disciplines
216 * uses this to start a ccw that detects the format. The completion
217 * interrupt for this detection ccw uses the kernel event daemon to
218 * trigger the call to dasd_change_state. All this is done in the
219 * discipline code, see dasd_eckd.c.
Horst Hummel90f00942006-03-07 21:55:39 -0800220 * After the analysis ccw is done (do_analysis returned 0) the block
221 * device is setup.
222 * In case the analysis returns an error, the device setup is stopped
223 * (a fake disk was already added to allow formatting).
Linus Torvalds1da177e2005-04-16 15:20:36 -0700224 */
225static inline int
226dasd_state_basic_to_ready(struct dasd_device * device)
227{
228 int rc;
229
230 rc = 0;
231 if (device->discipline->do_analysis != NULL)
232 rc = device->discipline->do_analysis(device);
Horst Hummel90f00942006-03-07 21:55:39 -0800233 if (rc) {
234 if (rc != -EAGAIN)
235 device->state = DASD_STATE_UNFMT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700236 return rc;
Horst Hummel90f00942006-03-07 21:55:39 -0800237 }
238 /* make disk known with correct capacity */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700239 dasd_setup_queue(device);
Horst Hummel90f00942006-03-07 21:55:39 -0800240 set_capacity(device->gdp, device->blocks << device->s2b_shift);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700241 device->state = DASD_STATE_READY;
Horst Hummel90f00942006-03-07 21:55:39 -0800242 rc = dasd_scan_partitions(device);
243 if (rc)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244 device->state = DASD_STATE_BASIC;
Horst Hummel90f00942006-03-07 21:55:39 -0800245 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700246}
247
248/*
249 * Remove device from block device layer. Destroy dirty buffers.
250 * Forget format information. Check if the target level is basic
251 * and if it is create fake disk for formatting.
252 */
253static inline void
254dasd_state_ready_to_basic(struct dasd_device * device)
255{
256 dasd_flush_ccw_queue(device, 0);
257 dasd_destroy_partitions(device);
258 dasd_flush_request_queue(device);
259 device->blocks = 0;
260 device->bp_block = 0;
261 device->s2b_shift = 0;
262 device->state = DASD_STATE_BASIC;
263}
264
265/*
Horst Hummel90f00942006-03-07 21:55:39 -0800266 * Back to basic.
267 */
268static inline void
269dasd_state_unfmt_to_basic(struct dasd_device * device)
270{
271 device->state = DASD_STATE_BASIC;
272}
273
274/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275 * Make the device online and schedule the bottom half to start
276 * the requeueing of requests from the linux request queue to the
277 * ccw queue.
278 */
279static inline int
280dasd_state_ready_to_online(struct dasd_device * device)
281{
282 device->state = DASD_STATE_ONLINE;
283 dasd_schedule_bh(device);
284 return 0;
285}
286
287/*
288 * Stop the requeueing of requests again.
289 */
290static inline void
291dasd_state_online_to_ready(struct dasd_device * device)
292{
293 device->state = DASD_STATE_READY;
294}
295
296/*
297 * Device startup state changes.
298 */
299static inline int
300dasd_increase_state(struct dasd_device *device)
301{
302 int rc;
303
304 rc = 0;
305 if (device->state == DASD_STATE_NEW &&
306 device->target >= DASD_STATE_KNOWN)
307 rc = dasd_state_new_to_known(device);
308
309 if (!rc &&
310 device->state == DASD_STATE_KNOWN &&
311 device->target >= DASD_STATE_BASIC)
312 rc = dasd_state_known_to_basic(device);
313
314 if (!rc &&
315 device->state == DASD_STATE_BASIC &&
316 device->target >= DASD_STATE_READY)
317 rc = dasd_state_basic_to_ready(device);
318
319 if (!rc &&
320 device->state == DASD_STATE_READY &&
321 device->target >= DASD_STATE_ONLINE)
322 rc = dasd_state_ready_to_online(device);
323
324 return rc;
325}
326
327/*
328 * Device shutdown state changes.
329 */
330static inline int
331dasd_decrease_state(struct dasd_device *device)
332{
333 if (device->state == DASD_STATE_ONLINE &&
334 device->target <= DASD_STATE_READY)
335 dasd_state_online_to_ready(device);
336
337 if (device->state == DASD_STATE_READY &&
338 device->target <= DASD_STATE_BASIC)
339 dasd_state_ready_to_basic(device);
Horst Hummel90f00942006-03-07 21:55:39 -0800340
341 if (device->state == DASD_STATE_UNFMT &&
342 device->target <= DASD_STATE_BASIC)
343 dasd_state_unfmt_to_basic(device);
344
345 if (device->state == DASD_STATE_BASIC &&
Linus Torvalds1da177e2005-04-16 15:20:36 -0700346 device->target <= DASD_STATE_KNOWN)
347 dasd_state_basic_to_known(device);
348
349 if (device->state == DASD_STATE_KNOWN &&
350 device->target <= DASD_STATE_NEW)
351 dasd_state_known_to_new(device);
352
353 return 0;
354}
355
356/*
357 * This is the main startup/shutdown routine.
358 */
359static void
360dasd_change_state(struct dasd_device *device)
361{
362 int rc;
363
364 if (device->state == device->target)
365 /* Already where we want to go today... */
366 return;
367 if (device->state < device->target)
368 rc = dasd_increase_state(device);
369 else
370 rc = dasd_decrease_state(device);
371 if (rc && rc != -EAGAIN)
372 device->target = device->state;
373
374 if (device->state == device->target)
375 wake_up(&dasd_init_waitq);
376}
377
378/*
379 * Kick starter for devices that did not complete the startup/shutdown
380 * procedure or were sleeping because of a pending state.
381 * dasd_kick_device will schedule a call do do_kick_device to the kernel
382 * event daemon.
383 */
384static void
385do_kick_device(void *data)
386{
387 struct dasd_device *device;
388
389 device = (struct dasd_device *) data;
390 dasd_change_state(device);
391 dasd_schedule_bh(device);
392 dasd_put_device(device);
393}
394
395void
396dasd_kick_device(struct dasd_device *device)
397{
398 dasd_get_device(device);
399 /* queue call to dasd_kick_device to the kernel event daemon. */
400 schedule_work(&device->kick_work);
401}
402
403/*
404 * Set the target state for a device and starts the state change.
405 */
406void
407dasd_set_target_state(struct dasd_device *device, int target)
408{
409 /* If we are in probeonly mode stop at DASD_STATE_READY. */
410 if (dasd_probeonly && target > DASD_STATE_READY)
411 target = DASD_STATE_READY;
412 if (device->target != target) {
413 if (device->state == target)
414 wake_up(&dasd_init_waitq);
415 device->target = target;
416 }
417 if (device->state != device->target)
418 dasd_change_state(device);
419}
420
421/*
422 * Enable devices with device numbers in [from..to].
423 */
424static inline int
425_wait_for_device(struct dasd_device *device)
426{
427 return (device->state == device->target);
428}
429
430void
431dasd_enable_device(struct dasd_device *device)
432{
433 dasd_set_target_state(device, DASD_STATE_ONLINE);
434 if (device->state <= DASD_STATE_KNOWN)
435 /* No discipline for device found. */
436 dasd_set_target_state(device, DASD_STATE_NEW);
437 /* Now wait for the devices to come up. */
438 wait_event(dasd_init_waitq, _wait_for_device(device));
439}
440
441/*
442 * SECTION: device operation (interrupt handler, start i/o, term i/o ...)
443 */
444#ifdef CONFIG_DASD_PROFILE
445
446struct dasd_profile_info_t dasd_global_profile;
447unsigned int dasd_profile_level = DASD_PROFILE_OFF;
448
449/*
450 * Increments counter in global and local profiling structures.
451 */
452#define dasd_profile_counter(value, counter, device) \
453{ \
454 int index; \
455 for (index = 0; index < 31 && value >> (2+index); index++); \
456 dasd_global_profile.counter[index]++; \
457 device->profile.counter[index]++; \
458}
459
460/*
461 * Add profiling information for cqr before execution.
462 */
463static inline void
464dasd_profile_start(struct dasd_device *device, struct dasd_ccw_req * cqr,
465 struct request *req)
466{
467 struct list_head *l;
468 unsigned int counter;
469
470 if (dasd_profile_level != DASD_PROFILE_ON)
471 return;
472
473 /* count the length of the chanq for statistics */
474 counter = 0;
475 list_for_each(l, &device->ccw_queue)
476 if (++counter >= 31)
477 break;
478 dasd_global_profile.dasd_io_nr_req[counter]++;
479 device->profile.dasd_io_nr_req[counter]++;
480}
481
482/*
483 * Add profiling information for cqr after execution.
484 */
485static inline void
486dasd_profile_end(struct dasd_device *device, struct dasd_ccw_req * cqr,
487 struct request *req)
488{
489 long strtime, irqtime, endtime, tottime; /* in microseconds */
490 long tottimeps, sectors;
491
492 if (dasd_profile_level != DASD_PROFILE_ON)
493 return;
494
495 sectors = req->nr_sectors;
496 if (!cqr->buildclk || !cqr->startclk ||
497 !cqr->stopclk || !cqr->endclk ||
498 !sectors)
499 return;
500
501 strtime = ((cqr->startclk - cqr->buildclk) >> 12);
502 irqtime = ((cqr->stopclk - cqr->startclk) >> 12);
503 endtime = ((cqr->endclk - cqr->stopclk) >> 12);
504 tottime = ((cqr->endclk - cqr->buildclk) >> 12);
505 tottimeps = tottime / sectors;
506
507 if (!dasd_global_profile.dasd_io_reqs)
508 memset(&dasd_global_profile, 0,
509 sizeof (struct dasd_profile_info_t));
510 dasd_global_profile.dasd_io_reqs++;
511 dasd_global_profile.dasd_io_sects += sectors;
512
513 if (!device->profile.dasd_io_reqs)
514 memset(&device->profile, 0,
515 sizeof (struct dasd_profile_info_t));
516 device->profile.dasd_io_reqs++;
517 device->profile.dasd_io_sects += sectors;
518
519 dasd_profile_counter(sectors, dasd_io_secs, device);
520 dasd_profile_counter(tottime, dasd_io_times, device);
521 dasd_profile_counter(tottimeps, dasd_io_timps, device);
522 dasd_profile_counter(strtime, dasd_io_time1, device);
523 dasd_profile_counter(irqtime, dasd_io_time2, device);
524 dasd_profile_counter(irqtime / sectors, dasd_io_time2ps, device);
525 dasd_profile_counter(endtime, dasd_io_time3, device);
526}
527#else
528#define dasd_profile_start(device, cqr, req) do {} while (0)
529#define dasd_profile_end(device, cqr, req) do {} while (0)
530#endif /* CONFIG_DASD_PROFILE */
531
532/*
533 * Allocate memory for a channel program with 'cplength' channel
534 * command words and 'datasize' additional space. There are two
535 * variantes: 1) dasd_kmalloc_request uses kmalloc to get the needed
536 * memory and 2) dasd_smalloc_request uses the static ccw memory
537 * that gets allocated for each device.
538 */
539struct dasd_ccw_req *
540dasd_kmalloc_request(char *magic, int cplength, int datasize,
541 struct dasd_device * device)
542{
543 struct dasd_ccw_req *cqr;
544
545 /* Sanity checks */
546 if ( magic == NULL || datasize > PAGE_SIZE ||
547 (cplength*sizeof(struct ccw1)) > PAGE_SIZE)
548 BUG();
549
550 cqr = kmalloc(sizeof(struct dasd_ccw_req), GFP_ATOMIC);
551 if (cqr == NULL)
552 return ERR_PTR(-ENOMEM);
553 memset(cqr, 0, sizeof(struct dasd_ccw_req));
554 cqr->cpaddr = NULL;
555 if (cplength > 0) {
556 cqr->cpaddr = kmalloc(cplength*sizeof(struct ccw1),
557 GFP_ATOMIC | GFP_DMA);
558 if (cqr->cpaddr == NULL) {
559 kfree(cqr);
560 return ERR_PTR(-ENOMEM);
561 }
562 memset(cqr->cpaddr, 0, cplength*sizeof(struct ccw1));
563 }
564 cqr->data = NULL;
565 if (datasize > 0) {
566 cqr->data = kmalloc(datasize, GFP_ATOMIC | GFP_DMA);
567 if (cqr->data == NULL) {
Jesper Juhl17fd6822005-11-07 01:01:30 -0800568 kfree(cqr->cpaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700569 kfree(cqr);
570 return ERR_PTR(-ENOMEM);
571 }
572 memset(cqr->data, 0, datasize);
573 }
574 strncpy((char *) &cqr->magic, magic, 4);
575 ASCEBC((char *) &cqr->magic, 4);
576 set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
577 dasd_get_device(device);
578 return cqr;
579}
580
581struct dasd_ccw_req *
582dasd_smalloc_request(char *magic, int cplength, int datasize,
583 struct dasd_device * device)
584{
585 unsigned long flags;
586 struct dasd_ccw_req *cqr;
587 char *data;
588 int size;
589
590 /* Sanity checks */
591 if ( magic == NULL || datasize > PAGE_SIZE ||
592 (cplength*sizeof(struct ccw1)) > PAGE_SIZE)
593 BUG();
594
595 size = (sizeof(struct dasd_ccw_req) + 7L) & -8L;
596 if (cplength > 0)
597 size += cplength * sizeof(struct ccw1);
598 if (datasize > 0)
599 size += datasize;
600 spin_lock_irqsave(&device->mem_lock, flags);
601 cqr = (struct dasd_ccw_req *)
602 dasd_alloc_chunk(&device->ccw_chunks, size);
603 spin_unlock_irqrestore(&device->mem_lock, flags);
604 if (cqr == NULL)
605 return ERR_PTR(-ENOMEM);
606 memset(cqr, 0, sizeof(struct dasd_ccw_req));
607 data = (char *) cqr + ((sizeof(struct dasd_ccw_req) + 7L) & -8L);
608 cqr->cpaddr = NULL;
609 if (cplength > 0) {
610 cqr->cpaddr = (struct ccw1 *) data;
611 data += cplength*sizeof(struct ccw1);
612 memset(cqr->cpaddr, 0, cplength*sizeof(struct ccw1));
613 }
614 cqr->data = NULL;
615 if (datasize > 0) {
616 cqr->data = data;
617 memset(cqr->data, 0, datasize);
618 }
619 strncpy((char *) &cqr->magic, magic, 4);
620 ASCEBC((char *) &cqr->magic, 4);
621 set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
622 dasd_get_device(device);
623 return cqr;
624}
625
626/*
627 * Free memory of a channel program. This function needs to free all the
628 * idal lists that might have been created by dasd_set_cda and the
629 * struct dasd_ccw_req itself.
630 */
631void
632dasd_kfree_request(struct dasd_ccw_req * cqr, struct dasd_device * device)
633{
Martin Schwidefsky347a8dc2006-01-06 00:19:28 -0800634#ifdef CONFIG_64BIT
Linus Torvalds1da177e2005-04-16 15:20:36 -0700635 struct ccw1 *ccw;
636
637 /* Clear any idals used for the request. */
638 ccw = cqr->cpaddr;
639 do {
640 clear_normalized_cda(ccw);
641 } while (ccw++->flags & (CCW_FLAG_CC | CCW_FLAG_DC));
642#endif
Jesper Juhl17fd6822005-11-07 01:01:30 -0800643 kfree(cqr->cpaddr);
644 kfree(cqr->data);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700645 kfree(cqr);
646 dasd_put_device(device);
647}
648
649void
650dasd_sfree_request(struct dasd_ccw_req * cqr, struct dasd_device * device)
651{
652 unsigned long flags;
653
654 spin_lock_irqsave(&device->mem_lock, flags);
655 dasd_free_chunk(&device->ccw_chunks, cqr);
656 spin_unlock_irqrestore(&device->mem_lock, flags);
657 dasd_put_device(device);
658}
659
660/*
661 * Check discipline magic in cqr.
662 */
663static inline int
664dasd_check_cqr(struct dasd_ccw_req *cqr)
665{
666 struct dasd_device *device;
667
668 if (cqr == NULL)
669 return -EINVAL;
670 device = cqr->device;
671 if (strncmp((char *) &cqr->magic, device->discipline->ebcname, 4)) {
672 DEV_MESSAGE(KERN_WARNING, device,
673 " dasd_ccw_req 0x%08x magic doesn't match"
674 " discipline 0x%08x",
675 cqr->magic,
676 *(unsigned int *) device->discipline->name);
677 return -EINVAL;
678 }
679 return 0;
680}
681
682/*
683 * Terminate the current i/o and set the request to clear_pending.
684 * Timer keeps device runnig.
685 * ccw_device_clear can fail if the i/o subsystem
686 * is in a bad mood.
687 */
688int
689dasd_term_IO(struct dasd_ccw_req * cqr)
690{
691 struct dasd_device *device;
692 int retries, rc;
693
694 /* Check the cqr */
695 rc = dasd_check_cqr(cqr);
696 if (rc)
697 return rc;
698 retries = 0;
699 device = (struct dasd_device *) cqr->device;
700 while ((retries < 5) && (cqr->status == DASD_CQR_IN_IO)) {
701 rc = ccw_device_clear(device->cdev, (long) cqr);
702 switch (rc) {
703 case 0: /* termination successful */
Horst Hummelc2ba4442006-02-01 03:06:37 -0800704 cqr->retries--;
705 cqr->status = DASD_CQR_CLEAR;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700706 cqr->stopclk = get_clock();
707 DBF_DEV_EVENT(DBF_DEBUG, device,
708 "terminate cqr %p successful",
709 cqr);
710 break;
711 case -ENODEV:
712 DBF_DEV_EVENT(DBF_ERR, device, "%s",
713 "device gone, retry");
714 break;
715 case -EIO:
716 DBF_DEV_EVENT(DBF_ERR, device, "%s",
717 "I/O error, retry");
718 break;
719 case -EINVAL:
720 case -EBUSY:
721 DBF_DEV_EVENT(DBF_ERR, device, "%s",
722 "device busy, retry later");
723 break;
724 default:
725 DEV_MESSAGE(KERN_ERR, device,
726 "line %d unknown RC=%d, please "
727 "report to linux390@de.ibm.com",
728 __LINE__, rc);
729 BUG();
730 break;
731 }
732 retries++;
733 }
734 dasd_schedule_bh(device);
735 return rc;
736}
737
738/*
739 * Start the i/o. This start_IO can fail if the channel is really busy.
740 * In that case set up a timer to start the request later.
741 */
742int
743dasd_start_IO(struct dasd_ccw_req * cqr)
744{
745 struct dasd_device *device;
746 int rc;
747
748 /* Check the cqr */
749 rc = dasd_check_cqr(cqr);
750 if (rc)
751 return rc;
752 device = (struct dasd_device *) cqr->device;
753 if (cqr->retries < 0) {
754 DEV_MESSAGE(KERN_DEBUG, device,
755 "start_IO: request %p (%02x/%i) - no retry left.",
756 cqr, cqr->status, cqr->retries);
757 cqr->status = DASD_CQR_FAILED;
758 return -EIO;
759 }
760 cqr->startclk = get_clock();
761 cqr->starttime = jiffies;
762 cqr->retries--;
763 rc = ccw_device_start(device->cdev, cqr->cpaddr, (long) cqr,
764 cqr->lpm, 0);
765 switch (rc) {
766 case 0:
767 cqr->status = DASD_CQR_IN_IO;
768 DBF_DEV_EVENT(DBF_DEBUG, device,
769 "start_IO: request %p started successful",
770 cqr);
771 break;
772 case -EBUSY:
773 DBF_DEV_EVENT(DBF_ERR, device, "%s",
774 "start_IO: device busy, retry later");
775 break;
776 case -ETIMEDOUT:
777 DBF_DEV_EVENT(DBF_ERR, device, "%s",
778 "start_IO: request timeout, retry later");
779 break;
780 case -EACCES:
781 /* -EACCES indicates that the request used only a
782 * subset of the available pathes and all these
783 * pathes are gone.
784 * Do a retry with all available pathes.
785 */
786 cqr->lpm = LPM_ANYPATH;
787 DBF_DEV_EVENT(DBF_ERR, device, "%s",
788 "start_IO: selected pathes gone,"
789 " retry on all pathes");
790 break;
791 case -ENODEV:
792 case -EIO:
793 DBF_DEV_EVENT(DBF_ERR, device, "%s",
794 "start_IO: device gone, retry");
795 break;
796 default:
797 DEV_MESSAGE(KERN_ERR, device,
798 "line %d unknown RC=%d, please report"
799 " to linux390@de.ibm.com", __LINE__, rc);
800 BUG();
801 break;
802 }
803 return rc;
804}
805
806/*
807 * Timeout function for dasd devices. This is used for different purposes
808 * 1) missing interrupt handler for normal operation
809 * 2) delayed start of request where start_IO failed with -EBUSY
810 * 3) timeout for missing state change interrupts
811 * The head of the ccw queue will have status DASD_CQR_IN_IO for 1),
812 * DASD_CQR_QUEUED for 2) and 3).
813 */
814static void
815dasd_timeout_device(unsigned long ptr)
816{
817 unsigned long flags;
818 struct dasd_device *device;
819
820 device = (struct dasd_device *) ptr;
821 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
822 /* re-activate request queue */
823 device->stopped &= ~DASD_STOPPED_PENDING;
824 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
825 dasd_schedule_bh(device);
826}
827
828/*
829 * Setup timeout for a device in jiffies.
830 */
831void
832dasd_set_timer(struct dasd_device *device, int expires)
833{
834 if (expires == 0) {
835 if (timer_pending(&device->timer))
836 del_timer(&device->timer);
837 return;
838 }
839 if (timer_pending(&device->timer)) {
840 if (mod_timer(&device->timer, jiffies + expires))
841 return;
842 }
843 device->timer.function = dasd_timeout_device;
844 device->timer.data = (unsigned long) device;
845 device->timer.expires = jiffies + expires;
846 add_timer(&device->timer);
847}
848
849/*
850 * Clear timeout for a device.
851 */
852void
853dasd_clear_timer(struct dasd_device *device)
854{
855 if (timer_pending(&device->timer))
856 del_timer(&device->timer);
857}
858
859static void
860dasd_handle_killed_request(struct ccw_device *cdev, unsigned long intparm)
861{
862 struct dasd_ccw_req *cqr;
863 struct dasd_device *device;
864
865 cqr = (struct dasd_ccw_req *) intparm;
866 if (cqr->status != DASD_CQR_IN_IO) {
867 MESSAGE(KERN_DEBUG,
868 "invalid status in handle_killed_request: "
869 "bus_id %s, status %02x",
870 cdev->dev.bus_id, cqr->status);
871 return;
872 }
873
874 device = (struct dasd_device *) cqr->device;
875 if (device == NULL ||
876 device != dasd_device_from_cdev(cdev) ||
877 strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) {
878 MESSAGE(KERN_DEBUG, "invalid device in request: bus_id %s",
879 cdev->dev.bus_id);
880 return;
881 }
882
883 /* Schedule request to be retried. */
884 cqr->status = DASD_CQR_QUEUED;
885
886 dasd_clear_timer(device);
887 dasd_schedule_bh(device);
888 dasd_put_device(device);
889}
890
891static void
892dasd_handle_state_change_pending(struct dasd_device *device)
893{
894 struct dasd_ccw_req *cqr;
895 struct list_head *l, *n;
896
Stefan Weinhuber20c64462006-03-24 03:15:25 -0800897 /* First of all start sense subsystem status request. */
898 dasd_eer_snss(device);
899
Linus Torvalds1da177e2005-04-16 15:20:36 -0700900 device->stopped &= ~DASD_STOPPED_PENDING;
901
902 /* restart all 'running' IO on queue */
903 list_for_each_safe(l, n, &device->ccw_queue) {
904 cqr = list_entry(l, struct dasd_ccw_req, list);
905 if (cqr->status == DASD_CQR_IN_IO) {
906 cqr->status = DASD_CQR_QUEUED;
907 }
908 }
909 dasd_clear_timer(device);
910 dasd_schedule_bh(device);
911}
912
913/*
914 * Interrupt handler for "normal" ssch-io based dasd devices.
915 */
916void
917dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
918 struct irb *irb)
919{
920 struct dasd_ccw_req *cqr, *next;
921 struct dasd_device *device;
922 unsigned long long now;
923 int expires;
924 dasd_era_t era;
925 char mask;
926
927 if (IS_ERR(irb)) {
928 switch (PTR_ERR(irb)) {
929 case -EIO:
930 dasd_handle_killed_request(cdev, intparm);
931 break;
932 case -ETIMEDOUT:
933 printk(KERN_WARNING"%s(%s): request timed out\n",
934 __FUNCTION__, cdev->dev.bus_id);
935 //FIXME - dasd uses own timeout interface...
936 break;
937 default:
938 printk(KERN_WARNING"%s(%s): unknown error %ld\n",
939 __FUNCTION__, cdev->dev.bus_id, PTR_ERR(irb));
940 }
941 return;
942 }
943
944 now = get_clock();
945
946 DBF_EVENT(DBF_ERR, "Interrupt: bus_id %s CS/DS %04x ip %08x",
947 cdev->dev.bus_id, ((irb->scsw.cstat<<8)|irb->scsw.dstat),
948 (unsigned int) intparm);
949
950 /* first of all check for state change pending interrupt */
951 mask = DEV_STAT_ATTENTION | DEV_STAT_DEV_END | DEV_STAT_UNIT_EXCEP;
952 if ((irb->scsw.dstat & mask) == mask) {
953 device = dasd_device_from_cdev(cdev);
954 if (!IS_ERR(device)) {
955 dasd_handle_state_change_pending(device);
956 dasd_put_device(device);
957 }
958 return;
959 }
960
961 cqr = (struct dasd_ccw_req *) intparm;
962
963 /* check for unsolicited interrupts */
964 if (cqr == NULL) {
965 MESSAGE(KERN_DEBUG,
966 "unsolicited interrupt received: bus_id %s",
967 cdev->dev.bus_id);
968 return;
969 }
970
971 device = (struct dasd_device *) cqr->device;
972 if (device == NULL ||
973 strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) {
974 MESSAGE(KERN_DEBUG, "invalid device in request: bus_id %s",
975 cdev->dev.bus_id);
976 return;
977 }
978
979 /* Check for clear pending */
980 if (cqr->status == DASD_CQR_CLEAR &&
981 irb->scsw.fctl & SCSW_FCTL_CLEAR_FUNC) {
982 cqr->status = DASD_CQR_QUEUED;
983 dasd_clear_timer(device);
984 dasd_schedule_bh(device);
985 return;
986 }
987
988 /* check status - the request might have been killed by dyn detach */
989 if (cqr->status != DASD_CQR_IN_IO) {
990 MESSAGE(KERN_DEBUG,
991 "invalid status: bus_id %s, status %02x",
992 cdev->dev.bus_id, cqr->status);
993 return;
994 }
995 DBF_DEV_EVENT(DBF_DEBUG, device, "Int: CS/DS 0x%04x for cqr %p",
996 ((irb->scsw.cstat << 8) | irb->scsw.dstat), cqr);
997
998 /* Find out the appropriate era_action. */
999 if (irb->scsw.fctl & SCSW_FCTL_HALT_FUNC)
1000 era = dasd_era_fatal;
1001 else if (irb->scsw.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) &&
1002 irb->scsw.cstat == 0 &&
1003 !irb->esw.esw0.erw.cons)
1004 era = dasd_era_none;
1005 else if (!test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags))
1006 era = dasd_era_fatal; /* don't recover this request */
1007 else if (irb->esw.esw0.erw.cons)
1008 era = device->discipline->examine_error(cqr, irb);
1009 else
1010 era = dasd_era_recover;
1011
1012 DBF_DEV_EVENT(DBF_DEBUG, device, "era_code %d", era);
1013 expires = 0;
1014 if (era == dasd_era_none) {
1015 cqr->status = DASD_CQR_DONE;
1016 cqr->stopclk = now;
1017 /* Start first request on queue if possible -> fast_io. */
1018 if (cqr->list.next != &device->ccw_queue) {
1019 next = list_entry(cqr->list.next,
1020 struct dasd_ccw_req, list);
1021 if ((next->status == DASD_CQR_QUEUED) &&
1022 (!device->stopped)) {
1023 if (device->discipline->start_IO(next) == 0)
1024 expires = next->expires;
1025 else
1026 DEV_MESSAGE(KERN_DEBUG, device, "%s",
1027 "Interrupt fastpath "
1028 "failed!");
1029 }
1030 }
1031 } else { /* error */
1032 memcpy(&cqr->irb, irb, sizeof (struct irb));
1033#ifdef ERP_DEBUG
1034 /* dump sense data */
1035 dasd_log_sense(cqr, irb);
1036#endif
1037 switch (era) {
1038 case dasd_era_fatal:
1039 cqr->status = DASD_CQR_FAILED;
1040 cqr->stopclk = now;
1041 break;
1042 case dasd_era_recover:
1043 cqr->status = DASD_CQR_ERROR;
1044 break;
1045 default:
1046 BUG();
1047 }
1048 }
1049 if (expires != 0)
1050 dasd_set_timer(device, expires);
1051 else
1052 dasd_clear_timer(device);
1053 dasd_schedule_bh(device);
1054}
1055
1056/*
1057 * posts the buffer_cache about a finalized request
1058 */
1059static inline void
1060dasd_end_request(struct request *req, int uptodate)
1061{
1062 if (end_that_request_first(req, uptodate, req->hard_nr_sectors))
1063 BUG();
1064 add_disk_randomness(req->rq_disk);
Tejun Heo8ffdc652006-01-06 09:49:03 +01001065 end_that_request_last(req, uptodate);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001066}
1067
1068/*
1069 * Process finished error recovery ccw.
1070 */
1071static inline void
1072__dasd_process_erp(struct dasd_device *device, struct dasd_ccw_req *cqr)
1073{
1074 dasd_erp_fn_t erp_fn;
1075
1076 if (cqr->status == DASD_CQR_DONE)
1077 DBF_DEV_EVENT(DBF_NOTICE, device, "%s", "ERP successful");
1078 else
1079 DEV_MESSAGE(KERN_ERR, device, "%s", "ERP unsuccessful");
1080 erp_fn = device->discipline->erp_postaction(cqr);
1081 erp_fn(cqr);
1082}
1083
1084/*
1085 * Process ccw request queue.
1086 */
1087static inline void
1088__dasd_process_ccw_queue(struct dasd_device * device,
1089 struct list_head *final_queue)
1090{
1091 struct list_head *l, *n;
1092 struct dasd_ccw_req *cqr;
1093 dasd_erp_fn_t erp_fn;
1094
1095restart:
1096 /* Process request with final status. */
1097 list_for_each_safe(l, n, &device->ccw_queue) {
1098 cqr = list_entry(l, struct dasd_ccw_req, list);
1099 /* Stop list processing at the first non-final request. */
1100 if (cqr->status != DASD_CQR_DONE &&
1101 cqr->status != DASD_CQR_FAILED &&
1102 cqr->status != DASD_CQR_ERROR)
1103 break;
1104 /* Process requests with DASD_CQR_ERROR */
1105 if (cqr->status == DASD_CQR_ERROR) {
1106 if (cqr->irb.scsw.fctl & SCSW_FCTL_HALT_FUNC) {
1107 cqr->status = DASD_CQR_FAILED;
1108 cqr->stopclk = get_clock();
1109 } else {
1110 if (cqr->irb.esw.esw0.erw.cons) {
1111 erp_fn = device->discipline->
1112 erp_action(cqr);
1113 erp_fn(cqr);
1114 } else
1115 dasd_default_erp_action(cqr);
1116 }
1117 goto restart;
1118 }
Stefan Weinhuber20c64462006-03-24 03:15:25 -08001119
1120 /* First of all call extended error reporting. */
1121 if (dasd_eer_enabled(device) &&
1122 cqr->status == DASD_CQR_FAILED) {
1123 dasd_eer_write(device, cqr, DASD_EER_FATALERROR);
1124
1125 /* restart request */
1126 cqr->status = DASD_CQR_QUEUED;
1127 cqr->retries = 255;
1128 device->stopped |= DASD_STOPPED_QUIESCE;
1129 goto restart;
1130 }
1131
Linus Torvalds1da177e2005-04-16 15:20:36 -07001132 /* Process finished ERP request. */
1133 if (cqr->refers) {
1134 __dasd_process_erp(device, cqr);
1135 goto restart;
1136 }
1137
1138 /* Rechain finished requests to final queue */
1139 cqr->endclk = get_clock();
1140 list_move_tail(&cqr->list, final_queue);
1141 }
1142}
1143
1144static void
1145dasd_end_request_cb(struct dasd_ccw_req * cqr, void *data)
1146{
1147 struct request *req;
1148 struct dasd_device *device;
1149 int status;
1150
1151 req = (struct request *) data;
1152 device = cqr->device;
1153 dasd_profile_end(device, cqr, req);
1154 status = cqr->device->discipline->free_cp(cqr,req);
1155 spin_lock_irq(&device->request_queue_lock);
1156 dasd_end_request(req, status);
1157 spin_unlock_irq(&device->request_queue_lock);
1158}
1159
1160
1161/*
1162 * Fetch requests from the block device queue.
1163 */
1164static inline void
1165__dasd_process_blk_queue(struct dasd_device * device)
1166{
1167 request_queue_t *queue;
1168 struct request *req;
1169 struct dasd_ccw_req *cqr;
Horst Hummelc6eb7b72005-09-03 15:57:58 -07001170 int nr_queued;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001171
1172 queue = device->request_queue;
1173 /* No queue ? Then there is nothing to do. */
1174 if (queue == NULL)
1175 return;
1176
1177 /*
1178 * We requeue request from the block device queue to the ccw
1179 * queue only in two states. In state DASD_STATE_READY the
1180 * partition detection is done and we need to requeue requests
1181 * for that. State DASD_STATE_ONLINE is normal block device
1182 * operation.
1183 */
1184 if (device->state != DASD_STATE_READY &&
1185 device->state != DASD_STATE_ONLINE)
1186 return;
1187 nr_queued = 0;
1188 /* Now we try to fetch requests from the request queue */
1189 list_for_each_entry(cqr, &device->ccw_queue, list)
1190 if (cqr->status == DASD_CQR_QUEUED)
1191 nr_queued++;
1192 while (!blk_queue_plugged(queue) &&
1193 elv_next_request(queue) &&
1194 nr_queued < DASD_CHANQ_MAX_SIZE) {
1195 req = elv_next_request(queue);
Horst Hummelf24acd42005-05-01 08:58:59 -07001196
Horst Hummelc6eb7b72005-09-03 15:57:58 -07001197 if (device->features & DASD_FEATURE_READONLY &&
1198 rq_data_dir(req) == WRITE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001199 DBF_DEV_EVENT(DBF_ERR, device,
1200 "Rejecting write request %p",
1201 req);
1202 blkdev_dequeue_request(req);
1203 dasd_end_request(req, 0);
1204 continue;
1205 }
1206 if (device->stopped & DASD_STOPPED_DC_EIO) {
1207 blkdev_dequeue_request(req);
1208 dasd_end_request(req, 0);
1209 continue;
1210 }
1211 cqr = device->discipline->build_cp(device, req);
1212 if (IS_ERR(cqr)) {
1213 if (PTR_ERR(cqr) == -ENOMEM)
1214 break; /* terminate request queue loop */
1215 DBF_DEV_EVENT(DBF_ERR, device,
1216 "CCW creation failed (rc=%ld) "
1217 "on request %p",
1218 PTR_ERR(cqr), req);
1219 blkdev_dequeue_request(req);
1220 dasd_end_request(req, 0);
1221 continue;
1222 }
1223 cqr->callback = dasd_end_request_cb;
1224 cqr->callback_data = (void *) req;
1225 cqr->status = DASD_CQR_QUEUED;
1226 blkdev_dequeue_request(req);
1227 list_add_tail(&cqr->list, &device->ccw_queue);
1228 dasd_profile_start(device, cqr, req);
1229 nr_queued++;
1230 }
1231}
1232
1233/*
1234 * Take a look at the first request on the ccw queue and check
1235 * if it reached its expire time. If so, terminate the IO.
1236 */
1237static inline void
1238__dasd_check_expire(struct dasd_device * device)
1239{
1240 struct dasd_ccw_req *cqr;
1241
1242 if (list_empty(&device->ccw_queue))
1243 return;
1244 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, list);
1245 if (cqr->status == DASD_CQR_IN_IO && cqr->expires != 0) {
1246 if (time_after_eq(jiffies, cqr->expires + cqr->starttime)) {
1247 if (device->discipline->term_IO(cqr) != 0)
1248 /* Hmpf, try again in 1/10 sec */
1249 dasd_set_timer(device, 10);
1250 }
1251 }
1252}
1253
1254/*
1255 * Take a look at the first request on the ccw queue and check
1256 * if it needs to be started.
1257 */
1258static inline void
1259__dasd_start_head(struct dasd_device * device)
1260{
1261 struct dasd_ccw_req *cqr;
1262 int rc;
1263
1264 if (list_empty(&device->ccw_queue))
1265 return;
1266 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, list);
Horst Hummel1c01b8a2006-01-06 00:19:15 -08001267 /* check FAILFAST */
1268 if (device->stopped & ~DASD_STOPPED_PENDING &&
Stefan Weinhuber20c64462006-03-24 03:15:25 -08001269 test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) &&
1270 (!dasd_eer_enabled(device))) {
Horst Hummel1c01b8a2006-01-06 00:19:15 -08001271 cqr->status = DASD_CQR_FAILED;
1272 dasd_schedule_bh(device);
1273 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001274 if ((cqr->status == DASD_CQR_QUEUED) &&
1275 (!device->stopped)) {
1276 /* try to start the first I/O that can be started */
1277 rc = device->discipline->start_IO(cqr);
1278 if (rc == 0)
1279 dasd_set_timer(device, cqr->expires);
1280 else if (rc == -EACCES) {
1281 dasd_schedule_bh(device);
1282 } else
1283 /* Hmpf, try again in 1/2 sec */
1284 dasd_set_timer(device, 50);
1285 }
1286}
1287
1288/*
1289 * Remove requests from the ccw queue.
1290 */
1291static void
1292dasd_flush_ccw_queue(struct dasd_device * device, int all)
1293{
1294 struct list_head flush_queue;
1295 struct list_head *l, *n;
1296 struct dasd_ccw_req *cqr;
1297
1298 INIT_LIST_HEAD(&flush_queue);
1299 spin_lock_irq(get_ccwdev_lock(device->cdev));
1300 list_for_each_safe(l, n, &device->ccw_queue) {
1301 cqr = list_entry(l, struct dasd_ccw_req, list);
1302 /* Flush all request or only block device requests? */
1303 if (all == 0 && cqr->callback == dasd_end_request_cb)
1304 continue;
1305 if (cqr->status == DASD_CQR_IN_IO)
1306 device->discipline->term_IO(cqr);
1307 if (cqr->status != DASD_CQR_DONE ||
1308 cqr->status != DASD_CQR_FAILED) {
1309 cqr->status = DASD_CQR_FAILED;
1310 cqr->stopclk = get_clock();
1311 }
1312 /* Process finished ERP request. */
1313 if (cqr->refers) {
1314 __dasd_process_erp(device, cqr);
1315 continue;
1316 }
1317 /* Rechain request on device request queue */
1318 cqr->endclk = get_clock();
1319 list_move_tail(&cqr->list, &flush_queue);
1320 }
1321 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1322 /* Now call the callback function of flushed requests */
1323 list_for_each_safe(l, n, &flush_queue) {
1324 cqr = list_entry(l, struct dasd_ccw_req, list);
1325 if (cqr->callback != NULL)
1326 (cqr->callback)(cqr, cqr->callback_data);
1327 }
1328}
1329
1330/*
1331 * Acquire the device lock and process queues for the device.
1332 */
1333static void
1334dasd_tasklet(struct dasd_device * device)
1335{
1336 struct list_head final_queue;
1337 struct list_head *l, *n;
1338 struct dasd_ccw_req *cqr;
1339
1340 atomic_set (&device->tasklet_scheduled, 0);
1341 INIT_LIST_HEAD(&final_queue);
1342 spin_lock_irq(get_ccwdev_lock(device->cdev));
1343 /* Check expire time of first request on the ccw queue. */
1344 __dasd_check_expire(device);
1345 /* Finish off requests on ccw queue */
1346 __dasd_process_ccw_queue(device, &final_queue);
1347 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1348 /* Now call the callback function of requests with final status */
1349 list_for_each_safe(l, n, &final_queue) {
1350 cqr = list_entry(l, struct dasd_ccw_req, list);
Horst Hummelc2ba4442006-02-01 03:06:37 -08001351 list_del_init(&cqr->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001352 if (cqr->callback != NULL)
1353 (cqr->callback)(cqr, cqr->callback_data);
1354 }
1355 spin_lock_irq(&device->request_queue_lock);
1356 spin_lock(get_ccwdev_lock(device->cdev));
1357 /* Get new request from the block device request queue */
1358 __dasd_process_blk_queue(device);
1359 /* Now check if the head of the ccw queue needs to be started. */
1360 __dasd_start_head(device);
1361 spin_unlock(get_ccwdev_lock(device->cdev));
1362 spin_unlock_irq(&device->request_queue_lock);
1363 dasd_put_device(device);
1364}
1365
1366/*
1367 * Schedules a call to dasd_tasklet over the device tasklet.
1368 */
1369void
1370dasd_schedule_bh(struct dasd_device * device)
1371{
1372 /* Protect against rescheduling. */
Martin Schwidefsky973bd992006-01-06 00:19:07 -08001373 if (atomic_cmpxchg (&device->tasklet_scheduled, 0, 1) != 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001374 return;
1375 dasd_get_device(device);
1376 tasklet_hi_schedule(&device->tasklet);
1377}
1378
1379/*
1380 * Queue a request to the head of the ccw_queue. Start the I/O if
1381 * possible.
1382 */
1383void
1384dasd_add_request_head(struct dasd_ccw_req *req)
1385{
1386 struct dasd_device *device;
1387 unsigned long flags;
1388
1389 device = req->device;
1390 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
1391 req->status = DASD_CQR_QUEUED;
1392 req->device = device;
1393 list_add(&req->list, &device->ccw_queue);
1394 /* let the bh start the request to keep them in order */
1395 dasd_schedule_bh(device);
1396 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
1397}
1398
1399/*
1400 * Queue a request to the tail of the ccw_queue. Start the I/O if
1401 * possible.
1402 */
1403void
1404dasd_add_request_tail(struct dasd_ccw_req *req)
1405{
1406 struct dasd_device *device;
1407 unsigned long flags;
1408
1409 device = req->device;
1410 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
1411 req->status = DASD_CQR_QUEUED;
1412 req->device = device;
1413 list_add_tail(&req->list, &device->ccw_queue);
1414 /* let the bh start the request to keep them in order */
1415 dasd_schedule_bh(device);
1416 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
1417}
1418
1419/*
1420 * Wakeup callback.
1421 */
1422static void
1423dasd_wakeup_cb(struct dasd_ccw_req *cqr, void *data)
1424{
1425 wake_up((wait_queue_head_t *) data);
1426}
1427
1428static inline int
1429_wait_for_wakeup(struct dasd_ccw_req *cqr)
1430{
1431 struct dasd_device *device;
1432 int rc;
1433
1434 device = cqr->device;
1435 spin_lock_irq(get_ccwdev_lock(device->cdev));
Horst Hummelc2ba4442006-02-01 03:06:37 -08001436 rc = ((cqr->status == DASD_CQR_DONE ||
1437 cqr->status == DASD_CQR_FAILED) &&
1438 list_empty(&cqr->list));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001439 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1440 return rc;
1441}
1442
1443/*
1444 * Attempts to start a special ccw queue and waits for its completion.
1445 */
1446int
1447dasd_sleep_on(struct dasd_ccw_req * cqr)
1448{
1449 wait_queue_head_t wait_q;
1450 struct dasd_device *device;
1451 int rc;
1452
1453 device = cqr->device;
1454 spin_lock_irq(get_ccwdev_lock(device->cdev));
1455
1456 init_waitqueue_head (&wait_q);
1457 cqr->callback = dasd_wakeup_cb;
1458 cqr->callback_data = (void *) &wait_q;
1459 cqr->status = DASD_CQR_QUEUED;
1460 list_add_tail(&cqr->list, &device->ccw_queue);
1461
1462 /* let the bh start the request to keep them in order */
1463 dasd_schedule_bh(device);
1464
1465 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1466
1467 wait_event(wait_q, _wait_for_wakeup(cqr));
1468
1469 /* Request status is either done or failed. */
1470 rc = (cqr->status == DASD_CQR_FAILED) ? -EIO : 0;
1471 return rc;
1472}
1473
1474/*
1475 * Attempts to start a special ccw queue and wait interruptible
1476 * for its completion.
1477 */
1478int
1479dasd_sleep_on_interruptible(struct dasd_ccw_req * cqr)
1480{
1481 wait_queue_head_t wait_q;
1482 struct dasd_device *device;
1483 int rc, finished;
1484
1485 device = cqr->device;
1486 spin_lock_irq(get_ccwdev_lock(device->cdev));
1487
1488 init_waitqueue_head (&wait_q);
1489 cqr->callback = dasd_wakeup_cb;
1490 cqr->callback_data = (void *) &wait_q;
1491 cqr->status = DASD_CQR_QUEUED;
1492 list_add_tail(&cqr->list, &device->ccw_queue);
1493
1494 /* let the bh start the request to keep them in order */
1495 dasd_schedule_bh(device);
1496 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1497
1498 finished = 0;
1499 while (!finished) {
1500 rc = wait_event_interruptible(wait_q, _wait_for_wakeup(cqr));
1501 if (rc != -ERESTARTSYS) {
Horst Hummelc2ba4442006-02-01 03:06:37 -08001502 /* Request is final (done or failed) */
1503 rc = (cqr->status == DASD_CQR_DONE) ? 0 : -EIO;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001504 break;
1505 }
1506 spin_lock_irq(get_ccwdev_lock(device->cdev));
Horst Hummelc2ba4442006-02-01 03:06:37 -08001507 switch (cqr->status) {
1508 case DASD_CQR_IN_IO:
1509 /* terminate runnig cqr */
1510 if (device->discipline->term_IO) {
1511 cqr->retries = -1;
1512 device->discipline->term_IO(cqr);
1513 /*nished =
1514 * wait (non-interruptible) for final status
1515 * because signal ist still pending
1516 */
1517 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1518 wait_event(wait_q, _wait_for_wakeup(cqr));
1519 spin_lock_irq(get_ccwdev_lock(device->cdev));
1520 rc = (cqr->status == DASD_CQR_DONE) ? 0 : -EIO;
1521 finished = 1;
1522 }
1523 break;
1524 case DASD_CQR_QUEUED:
1525 /* request */
1526 list_del_init(&cqr->list);
1527 rc = -EIO;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001528 finished = 1;
Horst Hummelc2ba4442006-02-01 03:06:37 -08001529 break;
1530 default:
1531 /* cqr with 'non-interruptable' status - just wait */
1532 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001533 }
1534 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1535 }
1536 return rc;
1537}
1538
1539/*
1540 * Whoa nelly now it gets really hairy. For some functions (e.g. steal lock
1541 * for eckd devices) the currently running request has to be terminated
1542 * and be put back to status queued, before the special request is added
1543 * to the head of the queue. Then the special request is waited on normally.
1544 */
1545static inline int
1546_dasd_term_running_cqr(struct dasd_device *device)
1547{
1548 struct dasd_ccw_req *cqr;
1549 int rc;
1550
1551 if (list_empty(&device->ccw_queue))
1552 return 0;
1553 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, list);
1554 rc = device->discipline->term_IO(cqr);
1555 if (rc == 0) {
1556 /* termination successful */
1557 cqr->status = DASD_CQR_QUEUED;
1558 cqr->startclk = cqr->stopclk = 0;
1559 cqr->starttime = 0;
1560 }
1561 return rc;
1562}
1563
1564int
1565dasd_sleep_on_immediatly(struct dasd_ccw_req * cqr)
1566{
1567 wait_queue_head_t wait_q;
1568 struct dasd_device *device;
1569 int rc;
1570
1571 device = cqr->device;
1572 spin_lock_irq(get_ccwdev_lock(device->cdev));
1573 rc = _dasd_term_running_cqr(device);
1574 if (rc) {
1575 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1576 return rc;
1577 }
1578
1579 init_waitqueue_head (&wait_q);
1580 cqr->callback = dasd_wakeup_cb;
1581 cqr->callback_data = (void *) &wait_q;
1582 cqr->status = DASD_CQR_QUEUED;
1583 list_add(&cqr->list, &device->ccw_queue);
1584
1585 /* let the bh start the request to keep them in order */
1586 dasd_schedule_bh(device);
1587
1588 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1589
1590 wait_event(wait_q, _wait_for_wakeup(cqr));
1591
1592 /* Request status is either done or failed. */
1593 rc = (cqr->status == DASD_CQR_FAILED) ? -EIO : 0;
1594 return rc;
1595}
1596
1597/*
1598 * Cancels a request that was started with dasd_sleep_on_req.
1599 * This is useful to timeout requests. The request will be
1600 * terminated if it is currently in i/o.
1601 * Returns 1 if the request has been terminated.
1602 */
1603int
1604dasd_cancel_req(struct dasd_ccw_req *cqr)
1605{
1606 struct dasd_device *device = cqr->device;
1607 unsigned long flags;
1608 int rc;
1609
1610 rc = 0;
1611 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
1612 switch (cqr->status) {
1613 case DASD_CQR_QUEUED:
1614 /* request was not started - just set to failed */
1615 cqr->status = DASD_CQR_FAILED;
1616 break;
1617 case DASD_CQR_IN_IO:
1618 /* request in IO - terminate IO and release again */
1619 if (device->discipline->term_IO(cqr) != 0)
1620 /* what to do if unable to terminate ??????
1621 e.g. not _IN_IO */
1622 cqr->status = DASD_CQR_FAILED;
1623 cqr->stopclk = get_clock();
1624 rc = 1;
1625 break;
1626 case DASD_CQR_DONE:
1627 case DASD_CQR_FAILED:
1628 /* already finished - do nothing */
1629 break;
1630 default:
1631 DEV_MESSAGE(KERN_ALERT, device,
1632 "invalid status %02x in request",
1633 cqr->status);
1634 BUG();
1635
1636 }
1637 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
1638 dasd_schedule_bh(device);
1639 return rc;
1640}
1641
1642/*
1643 * SECTION: Block device operations (request queue, partitions, open, release).
1644 */
1645
1646/*
1647 * Dasd request queue function. Called from ll_rw_blk.c
1648 */
1649static void
1650do_dasd_request(request_queue_t * queue)
1651{
1652 struct dasd_device *device;
1653
1654 device = (struct dasd_device *) queue->queuedata;
1655 spin_lock(get_ccwdev_lock(device->cdev));
1656 /* Get new request from the block device request queue */
1657 __dasd_process_blk_queue(device);
1658 /* Now check if the head of the ccw queue needs to be started. */
1659 __dasd_start_head(device);
1660 spin_unlock(get_ccwdev_lock(device->cdev));
1661}
1662
1663/*
1664 * Allocate and initialize request queue and default I/O scheduler.
1665 */
1666static int
1667dasd_alloc_queue(struct dasd_device * device)
1668{
1669 int rc;
1670
1671 device->request_queue = blk_init_queue(do_dasd_request,
1672 &device->request_queue_lock);
1673 if (device->request_queue == NULL)
1674 return -ENOMEM;
1675
1676 device->request_queue->queuedata = device;
1677
1678 elevator_exit(device->request_queue->elevator);
1679 rc = elevator_init(device->request_queue, "deadline");
1680 if (rc) {
1681 blk_cleanup_queue(device->request_queue);
1682 return rc;
1683 }
1684 return 0;
1685}
1686
1687/*
1688 * Allocate and initialize request queue.
1689 */
1690static void
1691dasd_setup_queue(struct dasd_device * device)
1692{
1693 int max;
1694
1695 blk_queue_hardsect_size(device->request_queue, device->bp_block);
1696 max = device->discipline->max_blocks << device->s2b_shift;
1697 blk_queue_max_sectors(device->request_queue, max);
1698 blk_queue_max_phys_segments(device->request_queue, -1L);
1699 blk_queue_max_hw_segments(device->request_queue, -1L);
1700 blk_queue_max_segment_size(device->request_queue, -1L);
1701 blk_queue_segment_boundary(device->request_queue, -1L);
Heiko Carstensed68cb32006-01-14 13:21:05 -08001702 blk_queue_ordered(device->request_queue, QUEUE_ORDERED_TAG, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001703}
1704
1705/*
1706 * Deactivate and free request queue.
1707 */
1708static void
1709dasd_free_queue(struct dasd_device * device)
1710{
1711 if (device->request_queue) {
1712 blk_cleanup_queue(device->request_queue);
1713 device->request_queue = NULL;
1714 }
1715}
1716
1717/*
1718 * Flush request on the request queue.
1719 */
1720static void
1721dasd_flush_request_queue(struct dasd_device * device)
1722{
1723 struct request *req;
1724
1725 if (!device->request_queue)
1726 return;
1727
1728 spin_lock_irq(&device->request_queue_lock);
1729 while (!list_empty(&device->request_queue->queue_head)) {
1730 req = elv_next_request(device->request_queue);
1731 if (req == NULL)
1732 break;
1733 dasd_end_request(req, 0);
1734 blkdev_dequeue_request(req);
1735 }
1736 spin_unlock_irq(&device->request_queue_lock);
1737}
1738
1739static int
1740dasd_open(struct inode *inp, struct file *filp)
1741{
1742 struct gendisk *disk = inp->i_bdev->bd_disk;
1743 struct dasd_device *device = disk->private_data;
1744 int rc;
1745
1746 atomic_inc(&device->open_count);
1747 if (test_bit(DASD_FLAG_OFFLINE, &device->flags)) {
1748 rc = -ENODEV;
1749 goto unlock;
1750 }
1751
1752 if (!try_module_get(device->discipline->owner)) {
1753 rc = -EINVAL;
1754 goto unlock;
1755 }
1756
1757 if (dasd_probeonly) {
1758 DEV_MESSAGE(KERN_INFO, device, "%s",
1759 "No access to device due to probeonly mode");
1760 rc = -EPERM;
1761 goto out;
1762 }
1763
Horst Hummel90f00942006-03-07 21:55:39 -08001764 if (device->state <= DASD_STATE_BASIC) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001765 DBF_DEV_EVENT(DBF_ERR, device, " %s",
1766 " Cannot open unrecognized device");
1767 rc = -ENODEV;
1768 goto out;
1769 }
1770
1771 return 0;
1772
1773out:
1774 module_put(device->discipline->owner);
1775unlock:
1776 atomic_dec(&device->open_count);
1777 return rc;
1778}
1779
1780static int
1781dasd_release(struct inode *inp, struct file *filp)
1782{
1783 struct gendisk *disk = inp->i_bdev->bd_disk;
1784 struct dasd_device *device = disk->private_data;
1785
1786 atomic_dec(&device->open_count);
1787 module_put(device->discipline->owner);
1788 return 0;
1789}
1790
Christoph Hellwiga885c8c2006-01-08 01:02:50 -08001791/*
1792 * Return disk geometry.
1793 */
1794static int
1795dasd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
1796{
1797 struct dasd_device *device;
1798
1799 device = bdev->bd_disk->private_data;
1800 if (!device)
1801 return -ENODEV;
1802
1803 if (!device->discipline ||
1804 !device->discipline->fill_geometry)
1805 return -EINVAL;
1806
1807 device->discipline->fill_geometry(device, geo);
1808 geo->start = get_start_sect(bdev) >> device->s2b_shift;
1809 return 0;
1810}
1811
Linus Torvalds1da177e2005-04-16 15:20:36 -07001812struct block_device_operations
1813dasd_device_operations = {
1814 .owner = THIS_MODULE,
1815 .open = dasd_open,
1816 .release = dasd_release,
1817 .ioctl = dasd_ioctl,
Christoph Hellwig82620372006-01-09 20:52:12 -08001818 .compat_ioctl = dasd_compat_ioctl,
Christoph Hellwiga885c8c2006-01-08 01:02:50 -08001819 .getgeo = dasd_getgeo,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001820};
1821
1822
1823static void
1824dasd_exit(void)
1825{
1826#ifdef CONFIG_PROC_FS
1827 dasd_proc_exit();
1828#endif
Stefan Weinhuber20c64462006-03-24 03:15:25 -08001829 dasd_eer_exit();
Horst Hummel6bb0e012005-07-27 11:45:03 -07001830 if (dasd_page_cache != NULL) {
1831 kmem_cache_destroy(dasd_page_cache);
1832 dasd_page_cache = NULL;
1833 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001834 dasd_gendisk_exit();
1835 dasd_devmap_exit();
1836 devfs_remove("dasd");
1837 if (dasd_debug_area != NULL) {
1838 debug_unregister(dasd_debug_area);
1839 dasd_debug_area = NULL;
1840 }
1841}
1842
1843/*
1844 * SECTION: common functions for ccw_driver use
1845 */
1846
Horst Hummel1c01b8a2006-01-06 00:19:15 -08001847/*
1848 * Initial attempt at a probe function. this can be simplified once
1849 * the other detection code is gone.
1850 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001851int
1852dasd_generic_probe (struct ccw_device *cdev,
1853 struct dasd_discipline *discipline)
1854{
1855 int ret;
1856
1857 ret = dasd_add_sysfs_files(cdev);
1858 if (ret) {
1859 printk(KERN_WARNING
1860 "dasd_generic_probe: could not add sysfs entries "
1861 "for %s\n", cdev->dev.bus_id);
Horst Hummel59afda72005-05-16 21:53:39 -07001862 } else {
1863 cdev->handler = &dasd_int_handler;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001864 }
1865
Linus Torvalds1da177e2005-04-16 15:20:36 -07001866 return ret;
1867}
1868
Horst Hummel1c01b8a2006-01-06 00:19:15 -08001869/*
1870 * This will one day be called from a global not_oper handler.
1871 * It is also used by driver_unregister during module unload.
1872 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001873void
1874dasd_generic_remove (struct ccw_device *cdev)
1875{
1876 struct dasd_device *device;
1877
Horst Hummel59afda72005-05-16 21:53:39 -07001878 cdev->handler = NULL;
1879
Linus Torvalds1da177e2005-04-16 15:20:36 -07001880 dasd_remove_sysfs_files(cdev);
1881 device = dasd_device_from_cdev(cdev);
1882 if (IS_ERR(device))
1883 return;
1884 if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags)) {
1885 /* Already doing offline processing */
1886 dasd_put_device(device);
1887 return;
1888 }
1889 /*
1890 * This device is removed unconditionally. Set offline
1891 * flag to prevent dasd_open from opening it while it is
1892 * no quite down yet.
1893 */
1894 dasd_set_target_state(device, DASD_STATE_NEW);
1895 /* dasd_delete_device destroys the device reference. */
1896 dasd_delete_device(device);
1897}
1898
Horst Hummel1c01b8a2006-01-06 00:19:15 -08001899/*
1900 * Activate a device. This is called from dasd_{eckd,fba}_probe() when either
Linus Torvalds1da177e2005-04-16 15:20:36 -07001901 * the device is detected for the first time and is supposed to be used
Horst Hummel1c01b8a2006-01-06 00:19:15 -08001902 * or the user has started activation through sysfs.
1903 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001904int
1905dasd_generic_set_online (struct ccw_device *cdev,
Peter Oberparleiteraa888612006-02-20 18:28:13 -08001906 struct dasd_discipline *base_discipline)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001907
1908{
Peter Oberparleiteraa888612006-02-20 18:28:13 -08001909 struct dasd_discipline *discipline;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001910 struct dasd_device *device;
Horst Hummelc6eb7b72005-09-03 15:57:58 -07001911 int rc;
Horst Hummelf24acd42005-05-01 08:58:59 -07001912
Linus Torvalds1da177e2005-04-16 15:20:36 -07001913 device = dasd_create_device(cdev);
1914 if (IS_ERR(device))
1915 return PTR_ERR(device);
1916
Peter Oberparleiteraa888612006-02-20 18:28:13 -08001917 discipline = base_discipline;
Horst Hummelc6eb7b72005-09-03 15:57:58 -07001918 if (device->features & DASD_FEATURE_USEDIAG) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001919 if (!dasd_diag_discipline_pointer) {
1920 printk (KERN_WARNING
1921 "dasd_generic couldn't online device %s "
1922 "- discipline DIAG not available\n",
1923 cdev->dev.bus_id);
1924 dasd_delete_device(device);
1925 return -ENODEV;
1926 }
1927 discipline = dasd_diag_discipline_pointer;
1928 }
Peter Oberparleiteraa888612006-02-20 18:28:13 -08001929 if (!try_module_get(base_discipline->owner)) {
1930 dasd_delete_device(device);
1931 return -EINVAL;
1932 }
1933 if (!try_module_get(discipline->owner)) {
1934 module_put(base_discipline->owner);
1935 dasd_delete_device(device);
1936 return -EINVAL;
1937 }
1938 device->base_discipline = base_discipline;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001939 device->discipline = discipline;
1940
1941 rc = discipline->check_device(device);
1942 if (rc) {
1943 printk (KERN_WARNING
1944 "dasd_generic couldn't online device %s "
1945 "with discipline %s rc=%i\n",
1946 cdev->dev.bus_id, discipline->name, rc);
Peter Oberparleiteraa888612006-02-20 18:28:13 -08001947 module_put(discipline->owner);
1948 module_put(base_discipline->owner);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001949 dasd_delete_device(device);
1950 return rc;
1951 }
1952
1953 dasd_set_target_state(device, DASD_STATE_ONLINE);
1954 if (device->state <= DASD_STATE_KNOWN) {
1955 printk (KERN_WARNING
1956 "dasd_generic discipline not found for %s\n",
1957 cdev->dev.bus_id);
1958 rc = -ENODEV;
1959 dasd_set_target_state(device, DASD_STATE_NEW);
1960 dasd_delete_device(device);
1961 } else
1962 pr_debug("dasd_generic device %s found\n",
1963 cdev->dev.bus_id);
1964
1965 /* FIXME: we have to wait for the root device but we don't want
1966 * to wait for each single device but for all at once. */
1967 wait_event(dasd_init_waitq, _wait_for_device(device));
1968
1969 dasd_put_device(device);
1970
1971 return rc;
1972}
1973
1974int
1975dasd_generic_set_offline (struct ccw_device *cdev)
1976{
1977 struct dasd_device *device;
1978 int max_count;
1979
1980 device = dasd_device_from_cdev(cdev);
1981 if (IS_ERR(device))
1982 return PTR_ERR(device);
1983 if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags)) {
1984 /* Already doing offline processing */
1985 dasd_put_device(device);
1986 return 0;
1987 }
1988 /*
1989 * We must make sure that this device is currently not in use.
1990 * The open_count is increased for every opener, that includes
1991 * the blkdev_get in dasd_scan_partitions. We are only interested
1992 * in the other openers.
1993 */
1994 max_count = device->bdev ? 0 : -1;
1995 if (atomic_read(&device->open_count) > max_count) {
1996 printk (KERN_WARNING "Can't offline dasd device with open"
1997 " count = %i.\n",
1998 atomic_read(&device->open_count));
1999 clear_bit(DASD_FLAG_OFFLINE, &device->flags);
2000 dasd_put_device(device);
2001 return -EBUSY;
2002 }
2003 dasd_set_target_state(device, DASD_STATE_NEW);
2004 /* dasd_delete_device destroys the device reference. */
2005 dasd_delete_device(device);
2006
2007 return 0;
2008}
2009
2010int
2011dasd_generic_notify(struct ccw_device *cdev, int event)
2012{
2013 struct dasd_device *device;
2014 struct dasd_ccw_req *cqr;
2015 unsigned long flags;
2016 int ret;
2017
2018 device = dasd_device_from_cdev(cdev);
2019 if (IS_ERR(device))
2020 return 0;
2021 spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
2022 ret = 0;
2023 switch (event) {
2024 case CIO_GONE:
2025 case CIO_NO_PATH:
Stefan Weinhuber20c64462006-03-24 03:15:25 -08002026 /* First of all call extended error reporting. */
2027 dasd_eer_write(device, NULL, DASD_EER_NOPATH);
2028
Linus Torvalds1da177e2005-04-16 15:20:36 -07002029 if (device->state < DASD_STATE_BASIC)
2030 break;
2031 /* Device is active. We want to keep it. */
2032 if (test_bit(DASD_FLAG_DSC_ERROR, &device->flags)) {
2033 list_for_each_entry(cqr, &device->ccw_queue, list)
2034 if (cqr->status == DASD_CQR_IN_IO)
2035 cqr->status = DASD_CQR_FAILED;
2036 device->stopped |= DASD_STOPPED_DC_EIO;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002037 } else {
2038 list_for_each_entry(cqr, &device->ccw_queue, list)
2039 if (cqr->status == DASD_CQR_IN_IO) {
2040 cqr->status = DASD_CQR_QUEUED;
2041 cqr->retries++;
2042 }
2043 device->stopped |= DASD_STOPPED_DC_WAIT;
2044 dasd_set_timer(device, 0);
2045 }
Horst Hummel1c01b8a2006-01-06 00:19:15 -08002046 dasd_schedule_bh(device);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002047 ret = 1;
2048 break;
2049 case CIO_OPER:
2050 /* FIXME: add a sanity check. */
2051 device->stopped &= ~(DASD_STOPPED_DC_WAIT|DASD_STOPPED_DC_EIO);
2052 dasd_schedule_bh(device);
2053 ret = 1;
2054 break;
2055 }
2056 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
2057 dasd_put_device(device);
2058 return ret;
2059}
2060
2061/*
2062 * Automatically online either all dasd devices (dasd_autodetect) or
2063 * all devices specified with dasd= parameters.
2064 */
Cornelia Huckc5512882005-06-25 14:55:28 -07002065static int
2066__dasd_auto_online(struct device *dev, void *data)
2067{
2068 struct ccw_device *cdev;
2069
2070 cdev = to_ccwdev(dev);
2071 if (dasd_autodetect || dasd_busid_known(cdev->dev.bus_id) == 0)
2072 ccw_device_set_online(cdev);
2073 return 0;
2074}
2075
Linus Torvalds1da177e2005-04-16 15:20:36 -07002076void
2077dasd_generic_auto_online (struct ccw_driver *dasd_discipline_driver)
2078{
2079 struct device_driver *drv;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002080
2081 drv = get_driver(&dasd_discipline_driver->driver);
Cornelia Huckc5512882005-06-25 14:55:28 -07002082 driver_for_each_device(drv, NULL, NULL, __dasd_auto_online);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002083 put_driver(drv);
2084}
2085
Stefan Weinhuber20c64462006-03-24 03:15:25 -08002086
Linus Torvalds1da177e2005-04-16 15:20:36 -07002087static int __init
2088dasd_init(void)
2089{
2090 int rc;
2091
2092 init_waitqueue_head(&dasd_init_waitq);
2093
2094 /* register 'common' DASD debug area, used for all DBF_XXX calls */
Michael Holzheu66a464d2005-06-25 14:55:33 -07002095 dasd_debug_area = debug_register("dasd", 1, 2, 8 * sizeof (long));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002096 if (dasd_debug_area == NULL) {
2097 rc = -ENOMEM;
2098 goto failed;
2099 }
2100 debug_register_view(dasd_debug_area, &debug_sprintf_view);
2101 debug_set_level(dasd_debug_area, DBF_EMERG);
2102
2103 DBF_EVENT(DBF_EMERG, "%s", "debug area created");
2104
2105 dasd_diag_discipline_pointer = NULL;
2106
2107 rc = devfs_mk_dir("dasd");
2108 if (rc)
2109 goto failed;
2110 rc = dasd_devmap_init();
2111 if (rc)
2112 goto failed;
2113 rc = dasd_gendisk_init();
2114 if (rc)
2115 goto failed;
2116 rc = dasd_parse();
2117 if (rc)
2118 goto failed;
Stefan Weinhuber20c64462006-03-24 03:15:25 -08002119 rc = dasd_eer_init();
2120 if (rc)
2121 goto failed;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002122#ifdef CONFIG_PROC_FS
2123 rc = dasd_proc_init();
2124 if (rc)
2125 goto failed;
2126#endif
2127
2128 return 0;
2129failed:
2130 MESSAGE(KERN_INFO, "%s", "initialization not performed due to errors");
2131 dasd_exit();
2132 return rc;
2133}
2134
2135module_init(dasd_init);
2136module_exit(dasd_exit);
2137
2138EXPORT_SYMBOL(dasd_debug_area);
2139EXPORT_SYMBOL(dasd_diag_discipline_pointer);
2140
2141EXPORT_SYMBOL(dasd_add_request_head);
2142EXPORT_SYMBOL(dasd_add_request_tail);
2143EXPORT_SYMBOL(dasd_cancel_req);
2144EXPORT_SYMBOL(dasd_clear_timer);
2145EXPORT_SYMBOL(dasd_enable_device);
2146EXPORT_SYMBOL(dasd_int_handler);
2147EXPORT_SYMBOL(dasd_kfree_request);
2148EXPORT_SYMBOL(dasd_kick_device);
2149EXPORT_SYMBOL(dasd_kmalloc_request);
2150EXPORT_SYMBOL(dasd_schedule_bh);
2151EXPORT_SYMBOL(dasd_set_target_state);
2152EXPORT_SYMBOL(dasd_set_timer);
2153EXPORT_SYMBOL(dasd_sfree_request);
2154EXPORT_SYMBOL(dasd_sleep_on);
2155EXPORT_SYMBOL(dasd_sleep_on_immediatly);
2156EXPORT_SYMBOL(dasd_sleep_on_interruptible);
2157EXPORT_SYMBOL(dasd_smalloc_request);
2158EXPORT_SYMBOL(dasd_start_IO);
2159EXPORT_SYMBOL(dasd_term_IO);
2160
2161EXPORT_SYMBOL_GPL(dasd_generic_probe);
2162EXPORT_SYMBOL_GPL(dasd_generic_remove);
2163EXPORT_SYMBOL_GPL(dasd_generic_notify);
2164EXPORT_SYMBOL_GPL(dasd_generic_set_online);
2165EXPORT_SYMBOL_GPL(dasd_generic_set_offline);
2166EXPORT_SYMBOL_GPL(dasd_generic_auto_online);
2167
Linus Torvalds1da177e2005-04-16 15:20:36 -07002168/*
2169 * Overrides for Emacs so that we follow Linus's tabbing style.
2170 * Emacs will notice this stuff at the end of the file and automatically
2171 * adjust the settings for this buffer only. This must remain at the end
2172 * of the file.
2173 * ---------------------------------------------------------------------------
2174 * Local variables:
2175 * c-indent-level: 4
2176 * c-brace-imaginary-offset: 0
2177 * c-brace-offset: -4
2178 * c-argdecl-indent: 4
2179 * c-label-offset: -4
2180 * c-continued-statement-offset: 4
2181 * c-continued-brace-offset: 0
2182 * indent-tabs-mode: 1
2183 * tab-width: 8
2184 * End:
2185 */