blob: 06bb992a4c6c9a9c6890f97bc7581fb2877aa463 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * File...........: linux/drivers/s390/block/dasd.c
3 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
4 * Horst Hummel <Horst.Hummel@de.ibm.com>
5 * Carsten Otte <Cotte@de.ibm.com>
6 * Martin Schwidefsky <schwidefsky@de.ibm.com>
7 * Bugreports.to..: <Linux390@de.ibm.com>
8 * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999-2001
9 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070010 */
11
12#include <linux/config.h>
13#include <linux/kmod.h>
14#include <linux/init.h>
15#include <linux/interrupt.h>
16#include <linux/ctype.h>
17#include <linux/major.h>
18#include <linux/slab.h>
19#include <linux/buffer_head.h>
Christoph Hellwiga885c8c2006-01-08 01:02:50 -080020#include <linux/hdreg.h>
Stefan Weinhuber12c3a542006-02-03 03:03:49 -080021#include <linux/notifier.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070022
23#include <asm/ccwdev.h>
24#include <asm/ebcdic.h>
25#include <asm/idals.h>
26#include <asm/todclk.h>
27
28/* This is ugly... */
29#define PRINTK_HEADER "dasd:"
30
31#include "dasd_int.h"
32/*
33 * SECTION: Constant definitions to be used within this file
34 */
35#define DASD_CHANQ_MAX_SIZE 4
36
37/*
38 * SECTION: exported variables of dasd.c
39 */
40debug_info_t *dasd_debug_area;
41struct dasd_discipline *dasd_diag_discipline_pointer;
42
43MODULE_AUTHOR("Holger Smolinski <Holger.Smolinski@de.ibm.com>");
44MODULE_DESCRIPTION("Linux on S/390 DASD device driver,"
45 " Copyright 2000 IBM Corporation");
46MODULE_SUPPORTED_DEVICE("dasd");
47MODULE_PARM(dasd, "1-" __MODULE_STRING(256) "s");
48MODULE_LICENSE("GPL");
49
50/*
51 * SECTION: prototypes for static functions of dasd.c
52 */
53static int dasd_alloc_queue(struct dasd_device * device);
54static void dasd_setup_queue(struct dasd_device * device);
55static void dasd_free_queue(struct dasd_device * device);
56static void dasd_flush_request_queue(struct dasd_device *);
57static void dasd_int_handler(struct ccw_device *, unsigned long, struct irb *);
58static void dasd_flush_ccw_queue(struct dasd_device *, int);
59static void dasd_tasklet(struct dasd_device *);
60static void do_kick_device(void *data);
Stefan Weinhuber12c3a542006-02-03 03:03:49 -080061static void dasd_disable_eer(struct dasd_device *device);
Linus Torvalds1da177e2005-04-16 15:20:36 -070062
63/*
64 * SECTION: Operations on the device structure.
65 */
66static wait_queue_head_t dasd_init_waitq;
67
68/*
69 * Allocate memory for a new device structure.
70 */
71struct dasd_device *
72dasd_alloc_device(void)
73{
74 struct dasd_device *device;
75
76 device = kmalloc(sizeof (struct dasd_device), GFP_ATOMIC);
77 if (device == NULL)
78 return ERR_PTR(-ENOMEM);
79 memset(device, 0, sizeof (struct dasd_device));
80 /* open_count = 0 means device online but not in use */
81 atomic_set(&device->open_count, -1);
82
83 /* Get two pages for normal block device operations. */
84 device->ccw_mem = (void *) __get_free_pages(GFP_ATOMIC | GFP_DMA, 1);
85 if (device->ccw_mem == NULL) {
86 kfree(device);
87 return ERR_PTR(-ENOMEM);
88 }
89 /* Get one page for error recovery. */
90 device->erp_mem = (void *) get_zeroed_page(GFP_ATOMIC | GFP_DMA);
91 if (device->erp_mem == NULL) {
92 free_pages((unsigned long) device->ccw_mem, 1);
93 kfree(device);
94 return ERR_PTR(-ENOMEM);
95 }
96
97 dasd_init_chunklist(&device->ccw_chunks, device->ccw_mem, PAGE_SIZE*2);
98 dasd_init_chunklist(&device->erp_chunks, device->erp_mem, PAGE_SIZE);
99 spin_lock_init(&device->mem_lock);
100 spin_lock_init(&device->request_queue_lock);
101 atomic_set (&device->tasklet_scheduled, 0);
102 tasklet_init(&device->tasklet,
103 (void (*)(unsigned long)) dasd_tasklet,
104 (unsigned long) device);
105 INIT_LIST_HEAD(&device->ccw_queue);
106 init_timer(&device->timer);
107 INIT_WORK(&device->kick_work, do_kick_device, device);
108 device->state = DASD_STATE_NEW;
109 device->target = DASD_STATE_NEW;
110
111 return device;
112}
113
114/*
115 * Free memory of a device structure.
116 */
117void
118dasd_free_device(struct dasd_device *device)
119{
Jesper Juhl17fd6822005-11-07 01:01:30 -0800120 kfree(device->private);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700121 free_page((unsigned long) device->erp_mem);
122 free_pages((unsigned long) device->ccw_mem, 1);
123 kfree(device);
124}
125
126/*
127 * Make a new device known to the system.
128 */
129static inline int
130dasd_state_new_to_known(struct dasd_device *device)
131{
132 int rc;
133
134 /*
135 * As long as the device is not in state DASD_STATE_NEW we want to
136 * keep the reference count > 0.
137 */
138 dasd_get_device(device);
139
140 rc = dasd_alloc_queue(device);
141 if (rc) {
142 dasd_put_device(device);
143 return rc;
144 }
145
146 device->state = DASD_STATE_KNOWN;
147 return 0;
148}
149
150/*
151 * Let the system forget about a device.
152 */
153static inline void
154dasd_state_known_to_new(struct dasd_device * device)
155{
Stefan Weinhuber12c3a542006-02-03 03:03:49 -0800156 /* disable extended error reporting for this device */
157 dasd_disable_eer(device);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700158 /* Forget the discipline information. */
Peter Oberparleiteraa888612006-02-20 18:28:13 -0800159 if (device->discipline)
160 module_put(device->discipline->owner);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700161 device->discipline = NULL;
Peter Oberparleiteraa888612006-02-20 18:28:13 -0800162 if (device->base_discipline)
163 module_put(device->base_discipline->owner);
164 device->base_discipline = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700165 device->state = DASD_STATE_NEW;
166
167 dasd_free_queue(device);
168
169 /* Give up reference we took in dasd_state_new_to_known. */
170 dasd_put_device(device);
171}
172
173/*
174 * Request the irq line for the device.
175 */
176static inline int
177dasd_state_known_to_basic(struct dasd_device * device)
178{
179 int rc;
180
181 /* Allocate and register gendisk structure. */
182 rc = dasd_gendisk_alloc(device);
183 if (rc)
184 return rc;
185
186 /* register 'device' debug area, used for all DBF_DEV_XXX calls */
Michael Holzheu66a464d2005-06-25 14:55:33 -0700187 device->debug_area = debug_register(device->cdev->dev.bus_id, 1, 2,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700188 8 * sizeof (long));
189 debug_register_view(device->debug_area, &debug_sprintf_view);
190 debug_set_level(device->debug_area, DBF_EMERG);
191 DBF_DEV_EVENT(DBF_EMERG, device, "%s", "debug area created");
192
193 device->state = DASD_STATE_BASIC;
194 return 0;
195}
196
197/*
198 * Release the irq line for the device. Terminate any running i/o.
199 */
200static inline void
201dasd_state_basic_to_known(struct dasd_device * device)
202{
203 dasd_gendisk_free(device);
204 dasd_flush_ccw_queue(device, 1);
205 DBF_DEV_EVENT(DBF_EMERG, device, "%p debug area deleted", device);
206 if (device->debug_area != NULL) {
207 debug_unregister(device->debug_area);
208 device->debug_area = NULL;
209 }
210 device->state = DASD_STATE_KNOWN;
211}
212
213/*
214 * Do the initial analysis. The do_analysis function may return
215 * -EAGAIN in which case the device keeps the state DASD_STATE_BASIC
216 * until the discipline decides to continue the startup sequence
217 * by calling the function dasd_change_state. The eckd disciplines
218 * uses this to start a ccw that detects the format. The completion
219 * interrupt for this detection ccw uses the kernel event daemon to
220 * trigger the call to dasd_change_state. All this is done in the
221 * discipline code, see dasd_eckd.c.
222 * After the analysis ccw is done (do_analysis returned 0 or error)
223 * the block device is setup. Either a fake disk is added to allow
224 * formatting or a proper device request queue is created.
225 */
226static inline int
227dasd_state_basic_to_ready(struct dasd_device * device)
228{
229 int rc;
230
231 rc = 0;
232 if (device->discipline->do_analysis != NULL)
233 rc = device->discipline->do_analysis(device);
234 if (rc)
235 return rc;
236 dasd_setup_queue(device);
237 device->state = DASD_STATE_READY;
238 if (dasd_scan_partitions(device) != 0)
239 device->state = DASD_STATE_BASIC;
240 return 0;
241}
242
243/*
244 * Remove device from block device layer. Destroy dirty buffers.
245 * Forget format information. Check if the target level is basic
246 * and if it is create fake disk for formatting.
247 */
248static inline void
249dasd_state_ready_to_basic(struct dasd_device * device)
250{
251 dasd_flush_ccw_queue(device, 0);
252 dasd_destroy_partitions(device);
253 dasd_flush_request_queue(device);
254 device->blocks = 0;
255 device->bp_block = 0;
256 device->s2b_shift = 0;
257 device->state = DASD_STATE_BASIC;
258}
259
260/*
261 * Make the device online and schedule the bottom half to start
262 * the requeueing of requests from the linux request queue to the
263 * ccw queue.
264 */
265static inline int
266dasd_state_ready_to_online(struct dasd_device * device)
267{
268 device->state = DASD_STATE_ONLINE;
269 dasd_schedule_bh(device);
270 return 0;
271}
272
273/*
274 * Stop the requeueing of requests again.
275 */
276static inline void
277dasd_state_online_to_ready(struct dasd_device * device)
278{
279 device->state = DASD_STATE_READY;
280}
281
282/*
283 * Device startup state changes.
284 */
285static inline int
286dasd_increase_state(struct dasd_device *device)
287{
288 int rc;
289
290 rc = 0;
291 if (device->state == DASD_STATE_NEW &&
292 device->target >= DASD_STATE_KNOWN)
293 rc = dasd_state_new_to_known(device);
294
295 if (!rc &&
296 device->state == DASD_STATE_KNOWN &&
297 device->target >= DASD_STATE_BASIC)
298 rc = dasd_state_known_to_basic(device);
299
300 if (!rc &&
301 device->state == DASD_STATE_BASIC &&
302 device->target >= DASD_STATE_READY)
303 rc = dasd_state_basic_to_ready(device);
304
305 if (!rc &&
306 device->state == DASD_STATE_READY &&
307 device->target >= DASD_STATE_ONLINE)
308 rc = dasd_state_ready_to_online(device);
309
310 return rc;
311}
312
313/*
314 * Device shutdown state changes.
315 */
316static inline int
317dasd_decrease_state(struct dasd_device *device)
318{
319 if (device->state == DASD_STATE_ONLINE &&
320 device->target <= DASD_STATE_READY)
321 dasd_state_online_to_ready(device);
322
323 if (device->state == DASD_STATE_READY &&
324 device->target <= DASD_STATE_BASIC)
325 dasd_state_ready_to_basic(device);
326
327 if (device->state == DASD_STATE_BASIC &&
328 device->target <= DASD_STATE_KNOWN)
329 dasd_state_basic_to_known(device);
330
331 if (device->state == DASD_STATE_KNOWN &&
332 device->target <= DASD_STATE_NEW)
333 dasd_state_known_to_new(device);
334
335 return 0;
336}
337
338/*
339 * This is the main startup/shutdown routine.
340 */
341static void
342dasd_change_state(struct dasd_device *device)
343{
344 int rc;
345
346 if (device->state == device->target)
347 /* Already where we want to go today... */
348 return;
349 if (device->state < device->target)
350 rc = dasd_increase_state(device);
351 else
352 rc = dasd_decrease_state(device);
353 if (rc && rc != -EAGAIN)
354 device->target = device->state;
355
356 if (device->state == device->target)
357 wake_up(&dasd_init_waitq);
358}
359
360/*
361 * Kick starter for devices that did not complete the startup/shutdown
362 * procedure or were sleeping because of a pending state.
363 * dasd_kick_device will schedule a call do do_kick_device to the kernel
364 * event daemon.
365 */
366static void
367do_kick_device(void *data)
368{
369 struct dasd_device *device;
370
371 device = (struct dasd_device *) data;
372 dasd_change_state(device);
373 dasd_schedule_bh(device);
374 dasd_put_device(device);
375}
376
377void
378dasd_kick_device(struct dasd_device *device)
379{
380 dasd_get_device(device);
381 /* queue call to dasd_kick_device to the kernel event daemon. */
382 schedule_work(&device->kick_work);
383}
384
385/*
386 * Set the target state for a device and starts the state change.
387 */
388void
389dasd_set_target_state(struct dasd_device *device, int target)
390{
391 /* If we are in probeonly mode stop at DASD_STATE_READY. */
392 if (dasd_probeonly && target > DASD_STATE_READY)
393 target = DASD_STATE_READY;
394 if (device->target != target) {
395 if (device->state == target)
396 wake_up(&dasd_init_waitq);
397 device->target = target;
398 }
399 if (device->state != device->target)
400 dasd_change_state(device);
401}
402
403/*
404 * Enable devices with device numbers in [from..to].
405 */
406static inline int
407_wait_for_device(struct dasd_device *device)
408{
409 return (device->state == device->target);
410}
411
412void
413dasd_enable_device(struct dasd_device *device)
414{
415 dasd_set_target_state(device, DASD_STATE_ONLINE);
416 if (device->state <= DASD_STATE_KNOWN)
417 /* No discipline for device found. */
418 dasd_set_target_state(device, DASD_STATE_NEW);
419 /* Now wait for the devices to come up. */
420 wait_event(dasd_init_waitq, _wait_for_device(device));
421}
422
423/*
424 * SECTION: device operation (interrupt handler, start i/o, term i/o ...)
425 */
426#ifdef CONFIG_DASD_PROFILE
427
428struct dasd_profile_info_t dasd_global_profile;
429unsigned int dasd_profile_level = DASD_PROFILE_OFF;
430
431/*
432 * Increments counter in global and local profiling structures.
433 */
434#define dasd_profile_counter(value, counter, device) \
435{ \
436 int index; \
437 for (index = 0; index < 31 && value >> (2+index); index++); \
438 dasd_global_profile.counter[index]++; \
439 device->profile.counter[index]++; \
440}
441
442/*
443 * Add profiling information for cqr before execution.
444 */
445static inline void
446dasd_profile_start(struct dasd_device *device, struct dasd_ccw_req * cqr,
447 struct request *req)
448{
449 struct list_head *l;
450 unsigned int counter;
451
452 if (dasd_profile_level != DASD_PROFILE_ON)
453 return;
454
455 /* count the length of the chanq for statistics */
456 counter = 0;
457 list_for_each(l, &device->ccw_queue)
458 if (++counter >= 31)
459 break;
460 dasd_global_profile.dasd_io_nr_req[counter]++;
461 device->profile.dasd_io_nr_req[counter]++;
462}
463
464/*
465 * Add profiling information for cqr after execution.
466 */
467static inline void
468dasd_profile_end(struct dasd_device *device, struct dasd_ccw_req * cqr,
469 struct request *req)
470{
471 long strtime, irqtime, endtime, tottime; /* in microseconds */
472 long tottimeps, sectors;
473
474 if (dasd_profile_level != DASD_PROFILE_ON)
475 return;
476
477 sectors = req->nr_sectors;
478 if (!cqr->buildclk || !cqr->startclk ||
479 !cqr->stopclk || !cqr->endclk ||
480 !sectors)
481 return;
482
483 strtime = ((cqr->startclk - cqr->buildclk) >> 12);
484 irqtime = ((cqr->stopclk - cqr->startclk) >> 12);
485 endtime = ((cqr->endclk - cqr->stopclk) >> 12);
486 tottime = ((cqr->endclk - cqr->buildclk) >> 12);
487 tottimeps = tottime / sectors;
488
489 if (!dasd_global_profile.dasd_io_reqs)
490 memset(&dasd_global_profile, 0,
491 sizeof (struct dasd_profile_info_t));
492 dasd_global_profile.dasd_io_reqs++;
493 dasd_global_profile.dasd_io_sects += sectors;
494
495 if (!device->profile.dasd_io_reqs)
496 memset(&device->profile, 0,
497 sizeof (struct dasd_profile_info_t));
498 device->profile.dasd_io_reqs++;
499 device->profile.dasd_io_sects += sectors;
500
501 dasd_profile_counter(sectors, dasd_io_secs, device);
502 dasd_profile_counter(tottime, dasd_io_times, device);
503 dasd_profile_counter(tottimeps, dasd_io_timps, device);
504 dasd_profile_counter(strtime, dasd_io_time1, device);
505 dasd_profile_counter(irqtime, dasd_io_time2, device);
506 dasd_profile_counter(irqtime / sectors, dasd_io_time2ps, device);
507 dasd_profile_counter(endtime, dasd_io_time3, device);
508}
509#else
510#define dasd_profile_start(device, cqr, req) do {} while (0)
511#define dasd_profile_end(device, cqr, req) do {} while (0)
512#endif /* CONFIG_DASD_PROFILE */
513
514/*
515 * Allocate memory for a channel program with 'cplength' channel
516 * command words and 'datasize' additional space. There are two
517 * variantes: 1) dasd_kmalloc_request uses kmalloc to get the needed
518 * memory and 2) dasd_smalloc_request uses the static ccw memory
519 * that gets allocated for each device.
520 */
521struct dasd_ccw_req *
522dasd_kmalloc_request(char *magic, int cplength, int datasize,
523 struct dasd_device * device)
524{
525 struct dasd_ccw_req *cqr;
526
527 /* Sanity checks */
528 if ( magic == NULL || datasize > PAGE_SIZE ||
529 (cplength*sizeof(struct ccw1)) > PAGE_SIZE)
530 BUG();
531
532 cqr = kmalloc(sizeof(struct dasd_ccw_req), GFP_ATOMIC);
533 if (cqr == NULL)
534 return ERR_PTR(-ENOMEM);
535 memset(cqr, 0, sizeof(struct dasd_ccw_req));
536 cqr->cpaddr = NULL;
537 if (cplength > 0) {
538 cqr->cpaddr = kmalloc(cplength*sizeof(struct ccw1),
539 GFP_ATOMIC | GFP_DMA);
540 if (cqr->cpaddr == NULL) {
541 kfree(cqr);
542 return ERR_PTR(-ENOMEM);
543 }
544 memset(cqr->cpaddr, 0, cplength*sizeof(struct ccw1));
545 }
546 cqr->data = NULL;
547 if (datasize > 0) {
548 cqr->data = kmalloc(datasize, GFP_ATOMIC | GFP_DMA);
549 if (cqr->data == NULL) {
Jesper Juhl17fd6822005-11-07 01:01:30 -0800550 kfree(cqr->cpaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700551 kfree(cqr);
552 return ERR_PTR(-ENOMEM);
553 }
554 memset(cqr->data, 0, datasize);
555 }
556 strncpy((char *) &cqr->magic, magic, 4);
557 ASCEBC((char *) &cqr->magic, 4);
558 set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
559 dasd_get_device(device);
560 return cqr;
561}
562
563struct dasd_ccw_req *
564dasd_smalloc_request(char *magic, int cplength, int datasize,
565 struct dasd_device * device)
566{
567 unsigned long flags;
568 struct dasd_ccw_req *cqr;
569 char *data;
570 int size;
571
572 /* Sanity checks */
573 if ( magic == NULL || datasize > PAGE_SIZE ||
574 (cplength*sizeof(struct ccw1)) > PAGE_SIZE)
575 BUG();
576
577 size = (sizeof(struct dasd_ccw_req) + 7L) & -8L;
578 if (cplength > 0)
579 size += cplength * sizeof(struct ccw1);
580 if (datasize > 0)
581 size += datasize;
582 spin_lock_irqsave(&device->mem_lock, flags);
583 cqr = (struct dasd_ccw_req *)
584 dasd_alloc_chunk(&device->ccw_chunks, size);
585 spin_unlock_irqrestore(&device->mem_lock, flags);
586 if (cqr == NULL)
587 return ERR_PTR(-ENOMEM);
588 memset(cqr, 0, sizeof(struct dasd_ccw_req));
589 data = (char *) cqr + ((sizeof(struct dasd_ccw_req) + 7L) & -8L);
590 cqr->cpaddr = NULL;
591 if (cplength > 0) {
592 cqr->cpaddr = (struct ccw1 *) data;
593 data += cplength*sizeof(struct ccw1);
594 memset(cqr->cpaddr, 0, cplength*sizeof(struct ccw1));
595 }
596 cqr->data = NULL;
597 if (datasize > 0) {
598 cqr->data = data;
599 memset(cqr->data, 0, datasize);
600 }
601 strncpy((char *) &cqr->magic, magic, 4);
602 ASCEBC((char *) &cqr->magic, 4);
603 set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
604 dasd_get_device(device);
605 return cqr;
606}
607
608/*
609 * Free memory of a channel program. This function needs to free all the
610 * idal lists that might have been created by dasd_set_cda and the
611 * struct dasd_ccw_req itself.
612 */
613void
614dasd_kfree_request(struct dasd_ccw_req * cqr, struct dasd_device * device)
615{
Martin Schwidefsky347a8dc2006-01-06 00:19:28 -0800616#ifdef CONFIG_64BIT
Linus Torvalds1da177e2005-04-16 15:20:36 -0700617 struct ccw1 *ccw;
618
619 /* Clear any idals used for the request. */
620 ccw = cqr->cpaddr;
621 do {
622 clear_normalized_cda(ccw);
623 } while (ccw++->flags & (CCW_FLAG_CC | CCW_FLAG_DC));
624#endif
Jesper Juhl17fd6822005-11-07 01:01:30 -0800625 kfree(cqr->cpaddr);
626 kfree(cqr->data);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700627 kfree(cqr);
628 dasd_put_device(device);
629}
630
631void
632dasd_sfree_request(struct dasd_ccw_req * cqr, struct dasd_device * device)
633{
634 unsigned long flags;
635
636 spin_lock_irqsave(&device->mem_lock, flags);
637 dasd_free_chunk(&device->ccw_chunks, cqr);
638 spin_unlock_irqrestore(&device->mem_lock, flags);
639 dasd_put_device(device);
640}
641
642/*
643 * Check discipline magic in cqr.
644 */
645static inline int
646dasd_check_cqr(struct dasd_ccw_req *cqr)
647{
648 struct dasd_device *device;
649
650 if (cqr == NULL)
651 return -EINVAL;
652 device = cqr->device;
653 if (strncmp((char *) &cqr->magic, device->discipline->ebcname, 4)) {
654 DEV_MESSAGE(KERN_WARNING, device,
655 " dasd_ccw_req 0x%08x magic doesn't match"
656 " discipline 0x%08x",
657 cqr->magic,
658 *(unsigned int *) device->discipline->name);
659 return -EINVAL;
660 }
661 return 0;
662}
663
664/*
665 * Terminate the current i/o and set the request to clear_pending.
666 * Timer keeps device runnig.
667 * ccw_device_clear can fail if the i/o subsystem
668 * is in a bad mood.
669 */
670int
671dasd_term_IO(struct dasd_ccw_req * cqr)
672{
673 struct dasd_device *device;
674 int retries, rc;
675
676 /* Check the cqr */
677 rc = dasd_check_cqr(cqr);
678 if (rc)
679 return rc;
680 retries = 0;
681 device = (struct dasd_device *) cqr->device;
682 while ((retries < 5) && (cqr->status == DASD_CQR_IN_IO)) {
683 rc = ccw_device_clear(device->cdev, (long) cqr);
684 switch (rc) {
685 case 0: /* termination successful */
Horst Hummelc2ba4442006-02-01 03:06:37 -0800686 cqr->retries--;
687 cqr->status = DASD_CQR_CLEAR;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700688 cqr->stopclk = get_clock();
689 DBF_DEV_EVENT(DBF_DEBUG, device,
690 "terminate cqr %p successful",
691 cqr);
692 break;
693 case -ENODEV:
694 DBF_DEV_EVENT(DBF_ERR, device, "%s",
695 "device gone, retry");
696 break;
697 case -EIO:
698 DBF_DEV_EVENT(DBF_ERR, device, "%s",
699 "I/O error, retry");
700 break;
701 case -EINVAL:
702 case -EBUSY:
703 DBF_DEV_EVENT(DBF_ERR, device, "%s",
704 "device busy, retry later");
705 break;
706 default:
707 DEV_MESSAGE(KERN_ERR, device,
708 "line %d unknown RC=%d, please "
709 "report to linux390@de.ibm.com",
710 __LINE__, rc);
711 BUG();
712 break;
713 }
714 retries++;
715 }
716 dasd_schedule_bh(device);
717 return rc;
718}
719
720/*
721 * Start the i/o. This start_IO can fail if the channel is really busy.
722 * In that case set up a timer to start the request later.
723 */
724int
725dasd_start_IO(struct dasd_ccw_req * cqr)
726{
727 struct dasd_device *device;
728 int rc;
729
730 /* Check the cqr */
731 rc = dasd_check_cqr(cqr);
732 if (rc)
733 return rc;
734 device = (struct dasd_device *) cqr->device;
735 if (cqr->retries < 0) {
736 DEV_MESSAGE(KERN_DEBUG, device,
737 "start_IO: request %p (%02x/%i) - no retry left.",
738 cqr, cqr->status, cqr->retries);
739 cqr->status = DASD_CQR_FAILED;
740 return -EIO;
741 }
742 cqr->startclk = get_clock();
743 cqr->starttime = jiffies;
744 cqr->retries--;
745 rc = ccw_device_start(device->cdev, cqr->cpaddr, (long) cqr,
746 cqr->lpm, 0);
747 switch (rc) {
748 case 0:
749 cqr->status = DASD_CQR_IN_IO;
750 DBF_DEV_EVENT(DBF_DEBUG, device,
751 "start_IO: request %p started successful",
752 cqr);
753 break;
754 case -EBUSY:
755 DBF_DEV_EVENT(DBF_ERR, device, "%s",
756 "start_IO: device busy, retry later");
757 break;
758 case -ETIMEDOUT:
759 DBF_DEV_EVENT(DBF_ERR, device, "%s",
760 "start_IO: request timeout, retry later");
761 break;
762 case -EACCES:
763 /* -EACCES indicates that the request used only a
764 * subset of the available pathes and all these
765 * pathes are gone.
766 * Do a retry with all available pathes.
767 */
768 cqr->lpm = LPM_ANYPATH;
769 DBF_DEV_EVENT(DBF_ERR, device, "%s",
770 "start_IO: selected pathes gone,"
771 " retry on all pathes");
772 break;
773 case -ENODEV:
774 case -EIO:
775 DBF_DEV_EVENT(DBF_ERR, device, "%s",
776 "start_IO: device gone, retry");
777 break;
778 default:
779 DEV_MESSAGE(KERN_ERR, device,
780 "line %d unknown RC=%d, please report"
781 " to linux390@de.ibm.com", __LINE__, rc);
782 BUG();
783 break;
784 }
785 return rc;
786}
787
788/*
789 * Timeout function for dasd devices. This is used for different purposes
790 * 1) missing interrupt handler for normal operation
791 * 2) delayed start of request where start_IO failed with -EBUSY
792 * 3) timeout for missing state change interrupts
793 * The head of the ccw queue will have status DASD_CQR_IN_IO for 1),
794 * DASD_CQR_QUEUED for 2) and 3).
795 */
796static void
797dasd_timeout_device(unsigned long ptr)
798{
799 unsigned long flags;
800 struct dasd_device *device;
801
802 device = (struct dasd_device *) ptr;
803 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
804 /* re-activate request queue */
805 device->stopped &= ~DASD_STOPPED_PENDING;
806 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
807 dasd_schedule_bh(device);
808}
809
810/*
811 * Setup timeout for a device in jiffies.
812 */
813void
814dasd_set_timer(struct dasd_device *device, int expires)
815{
816 if (expires == 0) {
817 if (timer_pending(&device->timer))
818 del_timer(&device->timer);
819 return;
820 }
821 if (timer_pending(&device->timer)) {
822 if (mod_timer(&device->timer, jiffies + expires))
823 return;
824 }
825 device->timer.function = dasd_timeout_device;
826 device->timer.data = (unsigned long) device;
827 device->timer.expires = jiffies + expires;
828 add_timer(&device->timer);
829}
830
831/*
832 * Clear timeout for a device.
833 */
834void
835dasd_clear_timer(struct dasd_device *device)
836{
837 if (timer_pending(&device->timer))
838 del_timer(&device->timer);
839}
840
841static void
842dasd_handle_killed_request(struct ccw_device *cdev, unsigned long intparm)
843{
844 struct dasd_ccw_req *cqr;
845 struct dasd_device *device;
846
847 cqr = (struct dasd_ccw_req *) intparm;
848 if (cqr->status != DASD_CQR_IN_IO) {
849 MESSAGE(KERN_DEBUG,
850 "invalid status in handle_killed_request: "
851 "bus_id %s, status %02x",
852 cdev->dev.bus_id, cqr->status);
853 return;
854 }
855
856 device = (struct dasd_device *) cqr->device;
857 if (device == NULL ||
858 device != dasd_device_from_cdev(cdev) ||
859 strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) {
860 MESSAGE(KERN_DEBUG, "invalid device in request: bus_id %s",
861 cdev->dev.bus_id);
862 return;
863 }
864
865 /* Schedule request to be retried. */
866 cqr->status = DASD_CQR_QUEUED;
867
868 dasd_clear_timer(device);
869 dasd_schedule_bh(device);
870 dasd_put_device(device);
871}
872
873static void
874dasd_handle_state_change_pending(struct dasd_device *device)
875{
876 struct dasd_ccw_req *cqr;
877 struct list_head *l, *n;
878
Stefan Weinhuber12c3a542006-02-03 03:03:49 -0800879 /* first of all call extended error reporting */
880 dasd_write_eer_trigger(DASD_EER_STATECHANGE, device, NULL);
881
Linus Torvalds1da177e2005-04-16 15:20:36 -0700882 device->stopped &= ~DASD_STOPPED_PENDING;
883
884 /* restart all 'running' IO on queue */
885 list_for_each_safe(l, n, &device->ccw_queue) {
886 cqr = list_entry(l, struct dasd_ccw_req, list);
887 if (cqr->status == DASD_CQR_IN_IO) {
888 cqr->status = DASD_CQR_QUEUED;
889 }
890 }
891 dasd_clear_timer(device);
892 dasd_schedule_bh(device);
893}
894
895/*
896 * Interrupt handler for "normal" ssch-io based dasd devices.
897 */
898void
899dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
900 struct irb *irb)
901{
902 struct dasd_ccw_req *cqr, *next;
903 struct dasd_device *device;
904 unsigned long long now;
905 int expires;
906 dasd_era_t era;
907 char mask;
908
909 if (IS_ERR(irb)) {
910 switch (PTR_ERR(irb)) {
911 case -EIO:
912 dasd_handle_killed_request(cdev, intparm);
913 break;
914 case -ETIMEDOUT:
915 printk(KERN_WARNING"%s(%s): request timed out\n",
916 __FUNCTION__, cdev->dev.bus_id);
917 //FIXME - dasd uses own timeout interface...
918 break;
919 default:
920 printk(KERN_WARNING"%s(%s): unknown error %ld\n",
921 __FUNCTION__, cdev->dev.bus_id, PTR_ERR(irb));
922 }
923 return;
924 }
925
926 now = get_clock();
927
928 DBF_EVENT(DBF_ERR, "Interrupt: bus_id %s CS/DS %04x ip %08x",
929 cdev->dev.bus_id, ((irb->scsw.cstat<<8)|irb->scsw.dstat),
930 (unsigned int) intparm);
931
932 /* first of all check for state change pending interrupt */
933 mask = DEV_STAT_ATTENTION | DEV_STAT_DEV_END | DEV_STAT_UNIT_EXCEP;
934 if ((irb->scsw.dstat & mask) == mask) {
935 device = dasd_device_from_cdev(cdev);
936 if (!IS_ERR(device)) {
937 dasd_handle_state_change_pending(device);
938 dasd_put_device(device);
939 }
940 return;
941 }
942
943 cqr = (struct dasd_ccw_req *) intparm;
944
945 /* check for unsolicited interrupts */
946 if (cqr == NULL) {
947 MESSAGE(KERN_DEBUG,
948 "unsolicited interrupt received: bus_id %s",
949 cdev->dev.bus_id);
950 return;
951 }
952
953 device = (struct dasd_device *) cqr->device;
954 if (device == NULL ||
955 strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) {
956 MESSAGE(KERN_DEBUG, "invalid device in request: bus_id %s",
957 cdev->dev.bus_id);
958 return;
959 }
960
961 /* Check for clear pending */
962 if (cqr->status == DASD_CQR_CLEAR &&
963 irb->scsw.fctl & SCSW_FCTL_CLEAR_FUNC) {
964 cqr->status = DASD_CQR_QUEUED;
965 dasd_clear_timer(device);
966 dasd_schedule_bh(device);
967 return;
968 }
969
970 /* check status - the request might have been killed by dyn detach */
971 if (cqr->status != DASD_CQR_IN_IO) {
972 MESSAGE(KERN_DEBUG,
973 "invalid status: bus_id %s, status %02x",
974 cdev->dev.bus_id, cqr->status);
975 return;
976 }
977 DBF_DEV_EVENT(DBF_DEBUG, device, "Int: CS/DS 0x%04x for cqr %p",
978 ((irb->scsw.cstat << 8) | irb->scsw.dstat), cqr);
979
980 /* Find out the appropriate era_action. */
981 if (irb->scsw.fctl & SCSW_FCTL_HALT_FUNC)
982 era = dasd_era_fatal;
983 else if (irb->scsw.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) &&
984 irb->scsw.cstat == 0 &&
985 !irb->esw.esw0.erw.cons)
986 era = dasd_era_none;
987 else if (!test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags))
988 era = dasd_era_fatal; /* don't recover this request */
989 else if (irb->esw.esw0.erw.cons)
990 era = device->discipline->examine_error(cqr, irb);
991 else
992 era = dasd_era_recover;
993
994 DBF_DEV_EVENT(DBF_DEBUG, device, "era_code %d", era);
995 expires = 0;
996 if (era == dasd_era_none) {
997 cqr->status = DASD_CQR_DONE;
998 cqr->stopclk = now;
999 /* Start first request on queue if possible -> fast_io. */
1000 if (cqr->list.next != &device->ccw_queue) {
1001 next = list_entry(cqr->list.next,
1002 struct dasd_ccw_req, list);
1003 if ((next->status == DASD_CQR_QUEUED) &&
1004 (!device->stopped)) {
1005 if (device->discipline->start_IO(next) == 0)
1006 expires = next->expires;
1007 else
1008 DEV_MESSAGE(KERN_DEBUG, device, "%s",
1009 "Interrupt fastpath "
1010 "failed!");
1011 }
1012 }
1013 } else { /* error */
1014 memcpy(&cqr->irb, irb, sizeof (struct irb));
1015#ifdef ERP_DEBUG
1016 /* dump sense data */
1017 dasd_log_sense(cqr, irb);
1018#endif
1019 switch (era) {
1020 case dasd_era_fatal:
1021 cqr->status = DASD_CQR_FAILED;
1022 cqr->stopclk = now;
1023 break;
1024 case dasd_era_recover:
1025 cqr->status = DASD_CQR_ERROR;
1026 break;
1027 default:
1028 BUG();
1029 }
1030 }
1031 if (expires != 0)
1032 dasd_set_timer(device, expires);
1033 else
1034 dasd_clear_timer(device);
1035 dasd_schedule_bh(device);
1036}
1037
1038/*
1039 * posts the buffer_cache about a finalized request
1040 */
1041static inline void
1042dasd_end_request(struct request *req, int uptodate)
1043{
1044 if (end_that_request_first(req, uptodate, req->hard_nr_sectors))
1045 BUG();
1046 add_disk_randomness(req->rq_disk);
Tejun Heo8ffdc652006-01-06 09:49:03 +01001047 end_that_request_last(req, uptodate);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001048}
1049
1050/*
1051 * Process finished error recovery ccw.
1052 */
1053static inline void
1054__dasd_process_erp(struct dasd_device *device, struct dasd_ccw_req *cqr)
1055{
1056 dasd_erp_fn_t erp_fn;
1057
1058 if (cqr->status == DASD_CQR_DONE)
1059 DBF_DEV_EVENT(DBF_NOTICE, device, "%s", "ERP successful");
1060 else
1061 DEV_MESSAGE(KERN_ERR, device, "%s", "ERP unsuccessful");
1062 erp_fn = device->discipline->erp_postaction(cqr);
1063 erp_fn(cqr);
1064}
1065
1066/*
1067 * Process ccw request queue.
1068 */
1069static inline void
1070__dasd_process_ccw_queue(struct dasd_device * device,
1071 struct list_head *final_queue)
1072{
1073 struct list_head *l, *n;
1074 struct dasd_ccw_req *cqr;
1075 dasd_erp_fn_t erp_fn;
1076
1077restart:
1078 /* Process request with final status. */
1079 list_for_each_safe(l, n, &device->ccw_queue) {
1080 cqr = list_entry(l, struct dasd_ccw_req, list);
1081 /* Stop list processing at the first non-final request. */
1082 if (cqr->status != DASD_CQR_DONE &&
1083 cqr->status != DASD_CQR_FAILED &&
1084 cqr->status != DASD_CQR_ERROR)
1085 break;
1086 /* Process requests with DASD_CQR_ERROR */
1087 if (cqr->status == DASD_CQR_ERROR) {
1088 if (cqr->irb.scsw.fctl & SCSW_FCTL_HALT_FUNC) {
1089 cqr->status = DASD_CQR_FAILED;
1090 cqr->stopclk = get_clock();
1091 } else {
1092 if (cqr->irb.esw.esw0.erw.cons) {
1093 erp_fn = device->discipline->
1094 erp_action(cqr);
1095 erp_fn(cqr);
1096 } else
1097 dasd_default_erp_action(cqr);
1098 }
1099 goto restart;
1100 }
Stefan Weinhuber12c3a542006-02-03 03:03:49 -08001101
1102 /* first of all call extended error reporting */
1103 if (device->eer && cqr->status == DASD_CQR_FAILED) {
1104 dasd_write_eer_trigger(DASD_EER_FATALERROR,
1105 device, cqr);
1106
1107 /* restart request */
1108 cqr->status = DASD_CQR_QUEUED;
1109 cqr->retries = 255;
1110 device->stopped |= DASD_STOPPED_QUIESCE;
1111 goto restart;
1112 }
1113
Linus Torvalds1da177e2005-04-16 15:20:36 -07001114 /* Process finished ERP request. */
1115 if (cqr->refers) {
1116 __dasd_process_erp(device, cqr);
1117 goto restart;
1118 }
1119
1120 /* Rechain finished requests to final queue */
1121 cqr->endclk = get_clock();
1122 list_move_tail(&cqr->list, final_queue);
1123 }
1124}
1125
1126static void
1127dasd_end_request_cb(struct dasd_ccw_req * cqr, void *data)
1128{
1129 struct request *req;
1130 struct dasd_device *device;
1131 int status;
1132
1133 req = (struct request *) data;
1134 device = cqr->device;
1135 dasd_profile_end(device, cqr, req);
1136 status = cqr->device->discipline->free_cp(cqr,req);
1137 spin_lock_irq(&device->request_queue_lock);
1138 dasd_end_request(req, status);
1139 spin_unlock_irq(&device->request_queue_lock);
1140}
1141
1142
1143/*
1144 * Fetch requests from the block device queue.
1145 */
1146static inline void
1147__dasd_process_blk_queue(struct dasd_device * device)
1148{
1149 request_queue_t *queue;
1150 struct request *req;
1151 struct dasd_ccw_req *cqr;
Horst Hummelc6eb7b72005-09-03 15:57:58 -07001152 int nr_queued;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001153
1154 queue = device->request_queue;
1155 /* No queue ? Then there is nothing to do. */
1156 if (queue == NULL)
1157 return;
1158
1159 /*
1160 * We requeue request from the block device queue to the ccw
1161 * queue only in two states. In state DASD_STATE_READY the
1162 * partition detection is done and we need to requeue requests
1163 * for that. State DASD_STATE_ONLINE is normal block device
1164 * operation.
1165 */
1166 if (device->state != DASD_STATE_READY &&
1167 device->state != DASD_STATE_ONLINE)
1168 return;
1169 nr_queued = 0;
1170 /* Now we try to fetch requests from the request queue */
1171 list_for_each_entry(cqr, &device->ccw_queue, list)
1172 if (cqr->status == DASD_CQR_QUEUED)
1173 nr_queued++;
1174 while (!blk_queue_plugged(queue) &&
1175 elv_next_request(queue) &&
1176 nr_queued < DASD_CHANQ_MAX_SIZE) {
1177 req = elv_next_request(queue);
Horst Hummelf24acd42005-05-01 08:58:59 -07001178
Horst Hummelc6eb7b72005-09-03 15:57:58 -07001179 if (device->features & DASD_FEATURE_READONLY &&
1180 rq_data_dir(req) == WRITE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001181 DBF_DEV_EVENT(DBF_ERR, device,
1182 "Rejecting write request %p",
1183 req);
1184 blkdev_dequeue_request(req);
1185 dasd_end_request(req, 0);
1186 continue;
1187 }
1188 if (device->stopped & DASD_STOPPED_DC_EIO) {
1189 blkdev_dequeue_request(req);
1190 dasd_end_request(req, 0);
1191 continue;
1192 }
1193 cqr = device->discipline->build_cp(device, req);
1194 if (IS_ERR(cqr)) {
1195 if (PTR_ERR(cqr) == -ENOMEM)
1196 break; /* terminate request queue loop */
1197 DBF_DEV_EVENT(DBF_ERR, device,
1198 "CCW creation failed (rc=%ld) "
1199 "on request %p",
1200 PTR_ERR(cqr), req);
1201 blkdev_dequeue_request(req);
1202 dasd_end_request(req, 0);
1203 continue;
1204 }
1205 cqr->callback = dasd_end_request_cb;
1206 cqr->callback_data = (void *) req;
1207 cqr->status = DASD_CQR_QUEUED;
1208 blkdev_dequeue_request(req);
1209 list_add_tail(&cqr->list, &device->ccw_queue);
1210 dasd_profile_start(device, cqr, req);
1211 nr_queued++;
1212 }
1213}
1214
1215/*
1216 * Take a look at the first request on the ccw queue and check
1217 * if it reached its expire time. If so, terminate the IO.
1218 */
1219static inline void
1220__dasd_check_expire(struct dasd_device * device)
1221{
1222 struct dasd_ccw_req *cqr;
1223
1224 if (list_empty(&device->ccw_queue))
1225 return;
1226 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, list);
1227 if (cqr->status == DASD_CQR_IN_IO && cqr->expires != 0) {
1228 if (time_after_eq(jiffies, cqr->expires + cqr->starttime)) {
1229 if (device->discipline->term_IO(cqr) != 0)
1230 /* Hmpf, try again in 1/10 sec */
1231 dasd_set_timer(device, 10);
1232 }
1233 }
1234}
1235
1236/*
1237 * Take a look at the first request on the ccw queue and check
1238 * if it needs to be started.
1239 */
1240static inline void
1241__dasd_start_head(struct dasd_device * device)
1242{
1243 struct dasd_ccw_req *cqr;
1244 int rc;
1245
1246 if (list_empty(&device->ccw_queue))
1247 return;
1248 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, list);
Horst Hummel1c01b8a2006-01-06 00:19:15 -08001249 /* check FAILFAST */
1250 if (device->stopped & ~DASD_STOPPED_PENDING &&
Stefan Weinhuber12c3a542006-02-03 03:03:49 -08001251 test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) &&
1252 (!device->eer)) {
Horst Hummel1c01b8a2006-01-06 00:19:15 -08001253 cqr->status = DASD_CQR_FAILED;
1254 dasd_schedule_bh(device);
1255 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001256 if ((cqr->status == DASD_CQR_QUEUED) &&
1257 (!device->stopped)) {
1258 /* try to start the first I/O that can be started */
1259 rc = device->discipline->start_IO(cqr);
1260 if (rc == 0)
1261 dasd_set_timer(device, cqr->expires);
1262 else if (rc == -EACCES) {
1263 dasd_schedule_bh(device);
1264 } else
1265 /* Hmpf, try again in 1/2 sec */
1266 dasd_set_timer(device, 50);
1267 }
1268}
1269
1270/*
1271 * Remove requests from the ccw queue.
1272 */
1273static void
1274dasd_flush_ccw_queue(struct dasd_device * device, int all)
1275{
1276 struct list_head flush_queue;
1277 struct list_head *l, *n;
1278 struct dasd_ccw_req *cqr;
1279
1280 INIT_LIST_HEAD(&flush_queue);
1281 spin_lock_irq(get_ccwdev_lock(device->cdev));
1282 list_for_each_safe(l, n, &device->ccw_queue) {
1283 cqr = list_entry(l, struct dasd_ccw_req, list);
1284 /* Flush all request or only block device requests? */
1285 if (all == 0 && cqr->callback == dasd_end_request_cb)
1286 continue;
1287 if (cqr->status == DASD_CQR_IN_IO)
1288 device->discipline->term_IO(cqr);
1289 if (cqr->status != DASD_CQR_DONE ||
1290 cqr->status != DASD_CQR_FAILED) {
1291 cqr->status = DASD_CQR_FAILED;
1292 cqr->stopclk = get_clock();
1293 }
1294 /* Process finished ERP request. */
1295 if (cqr->refers) {
1296 __dasd_process_erp(device, cqr);
1297 continue;
1298 }
1299 /* Rechain request on device request queue */
1300 cqr->endclk = get_clock();
1301 list_move_tail(&cqr->list, &flush_queue);
1302 }
1303 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1304 /* Now call the callback function of flushed requests */
1305 list_for_each_safe(l, n, &flush_queue) {
1306 cqr = list_entry(l, struct dasd_ccw_req, list);
1307 if (cqr->callback != NULL)
1308 (cqr->callback)(cqr, cqr->callback_data);
1309 }
1310}
1311
1312/*
1313 * Acquire the device lock and process queues for the device.
1314 */
1315static void
1316dasd_tasklet(struct dasd_device * device)
1317{
1318 struct list_head final_queue;
1319 struct list_head *l, *n;
1320 struct dasd_ccw_req *cqr;
1321
1322 atomic_set (&device->tasklet_scheduled, 0);
1323 INIT_LIST_HEAD(&final_queue);
1324 spin_lock_irq(get_ccwdev_lock(device->cdev));
1325 /* Check expire time of first request on the ccw queue. */
1326 __dasd_check_expire(device);
1327 /* Finish off requests on ccw queue */
1328 __dasd_process_ccw_queue(device, &final_queue);
1329 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1330 /* Now call the callback function of requests with final status */
1331 list_for_each_safe(l, n, &final_queue) {
1332 cqr = list_entry(l, struct dasd_ccw_req, list);
Horst Hummelc2ba4442006-02-01 03:06:37 -08001333 list_del_init(&cqr->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001334 if (cqr->callback != NULL)
1335 (cqr->callback)(cqr, cqr->callback_data);
1336 }
1337 spin_lock_irq(&device->request_queue_lock);
1338 spin_lock(get_ccwdev_lock(device->cdev));
1339 /* Get new request from the block device request queue */
1340 __dasd_process_blk_queue(device);
1341 /* Now check if the head of the ccw queue needs to be started. */
1342 __dasd_start_head(device);
1343 spin_unlock(get_ccwdev_lock(device->cdev));
1344 spin_unlock_irq(&device->request_queue_lock);
1345 dasd_put_device(device);
1346}
1347
1348/*
1349 * Schedules a call to dasd_tasklet over the device tasklet.
1350 */
1351void
1352dasd_schedule_bh(struct dasd_device * device)
1353{
1354 /* Protect against rescheduling. */
Martin Schwidefsky973bd992006-01-06 00:19:07 -08001355 if (atomic_cmpxchg (&device->tasklet_scheduled, 0, 1) != 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001356 return;
1357 dasd_get_device(device);
1358 tasklet_hi_schedule(&device->tasklet);
1359}
1360
1361/*
1362 * Queue a request to the head of the ccw_queue. Start the I/O if
1363 * possible.
1364 */
1365void
1366dasd_add_request_head(struct dasd_ccw_req *req)
1367{
1368 struct dasd_device *device;
1369 unsigned long flags;
1370
1371 device = req->device;
1372 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
1373 req->status = DASD_CQR_QUEUED;
1374 req->device = device;
1375 list_add(&req->list, &device->ccw_queue);
1376 /* let the bh start the request to keep them in order */
1377 dasd_schedule_bh(device);
1378 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
1379}
1380
1381/*
1382 * Queue a request to the tail of the ccw_queue. Start the I/O if
1383 * possible.
1384 */
1385void
1386dasd_add_request_tail(struct dasd_ccw_req *req)
1387{
1388 struct dasd_device *device;
1389 unsigned long flags;
1390
1391 device = req->device;
1392 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
1393 req->status = DASD_CQR_QUEUED;
1394 req->device = device;
1395 list_add_tail(&req->list, &device->ccw_queue);
1396 /* let the bh start the request to keep them in order */
1397 dasd_schedule_bh(device);
1398 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
1399}
1400
1401/*
1402 * Wakeup callback.
1403 */
1404static void
1405dasd_wakeup_cb(struct dasd_ccw_req *cqr, void *data)
1406{
1407 wake_up((wait_queue_head_t *) data);
1408}
1409
1410static inline int
1411_wait_for_wakeup(struct dasd_ccw_req *cqr)
1412{
1413 struct dasd_device *device;
1414 int rc;
1415
1416 device = cqr->device;
1417 spin_lock_irq(get_ccwdev_lock(device->cdev));
Horst Hummelc2ba4442006-02-01 03:06:37 -08001418 rc = ((cqr->status == DASD_CQR_DONE ||
1419 cqr->status == DASD_CQR_FAILED) &&
1420 list_empty(&cqr->list));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001421 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1422 return rc;
1423}
1424
1425/*
1426 * Attempts to start a special ccw queue and waits for its completion.
1427 */
1428int
1429dasd_sleep_on(struct dasd_ccw_req * cqr)
1430{
1431 wait_queue_head_t wait_q;
1432 struct dasd_device *device;
1433 int rc;
1434
1435 device = cqr->device;
1436 spin_lock_irq(get_ccwdev_lock(device->cdev));
1437
1438 init_waitqueue_head (&wait_q);
1439 cqr->callback = dasd_wakeup_cb;
1440 cqr->callback_data = (void *) &wait_q;
1441 cqr->status = DASD_CQR_QUEUED;
1442 list_add_tail(&cqr->list, &device->ccw_queue);
1443
1444 /* let the bh start the request to keep them in order */
1445 dasd_schedule_bh(device);
1446
1447 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1448
1449 wait_event(wait_q, _wait_for_wakeup(cqr));
1450
1451 /* Request status is either done or failed. */
1452 rc = (cqr->status == DASD_CQR_FAILED) ? -EIO : 0;
1453 return rc;
1454}
1455
1456/*
1457 * Attempts to start a special ccw queue and wait interruptible
1458 * for its completion.
1459 */
1460int
1461dasd_sleep_on_interruptible(struct dasd_ccw_req * cqr)
1462{
1463 wait_queue_head_t wait_q;
1464 struct dasd_device *device;
1465 int rc, finished;
1466
1467 device = cqr->device;
1468 spin_lock_irq(get_ccwdev_lock(device->cdev));
1469
1470 init_waitqueue_head (&wait_q);
1471 cqr->callback = dasd_wakeup_cb;
1472 cqr->callback_data = (void *) &wait_q;
1473 cqr->status = DASD_CQR_QUEUED;
1474 list_add_tail(&cqr->list, &device->ccw_queue);
1475
1476 /* let the bh start the request to keep them in order */
1477 dasd_schedule_bh(device);
1478 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1479
1480 finished = 0;
1481 while (!finished) {
1482 rc = wait_event_interruptible(wait_q, _wait_for_wakeup(cqr));
1483 if (rc != -ERESTARTSYS) {
Horst Hummelc2ba4442006-02-01 03:06:37 -08001484 /* Request is final (done or failed) */
1485 rc = (cqr->status == DASD_CQR_DONE) ? 0 : -EIO;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001486 break;
1487 }
1488 spin_lock_irq(get_ccwdev_lock(device->cdev));
Horst Hummelc2ba4442006-02-01 03:06:37 -08001489 switch (cqr->status) {
1490 case DASD_CQR_IN_IO:
1491 /* terminate runnig cqr */
1492 if (device->discipline->term_IO) {
1493 cqr->retries = -1;
1494 device->discipline->term_IO(cqr);
1495 /*nished =
1496 * wait (non-interruptible) for final status
1497 * because signal ist still pending
1498 */
1499 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1500 wait_event(wait_q, _wait_for_wakeup(cqr));
1501 spin_lock_irq(get_ccwdev_lock(device->cdev));
1502 rc = (cqr->status == DASD_CQR_DONE) ? 0 : -EIO;
1503 finished = 1;
1504 }
1505 break;
1506 case DASD_CQR_QUEUED:
1507 /* request */
1508 list_del_init(&cqr->list);
1509 rc = -EIO;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001510 finished = 1;
Horst Hummelc2ba4442006-02-01 03:06:37 -08001511 break;
1512 default:
1513 /* cqr with 'non-interruptable' status - just wait */
1514 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001515 }
1516 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1517 }
1518 return rc;
1519}
1520
1521/*
1522 * Whoa nelly now it gets really hairy. For some functions (e.g. steal lock
1523 * for eckd devices) the currently running request has to be terminated
1524 * and be put back to status queued, before the special request is added
1525 * to the head of the queue. Then the special request is waited on normally.
1526 */
1527static inline int
1528_dasd_term_running_cqr(struct dasd_device *device)
1529{
1530 struct dasd_ccw_req *cqr;
1531 int rc;
1532
1533 if (list_empty(&device->ccw_queue))
1534 return 0;
1535 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, list);
1536 rc = device->discipline->term_IO(cqr);
1537 if (rc == 0) {
1538 /* termination successful */
1539 cqr->status = DASD_CQR_QUEUED;
1540 cqr->startclk = cqr->stopclk = 0;
1541 cqr->starttime = 0;
1542 }
1543 return rc;
1544}
1545
1546int
1547dasd_sleep_on_immediatly(struct dasd_ccw_req * cqr)
1548{
1549 wait_queue_head_t wait_q;
1550 struct dasd_device *device;
1551 int rc;
1552
1553 device = cqr->device;
1554 spin_lock_irq(get_ccwdev_lock(device->cdev));
1555 rc = _dasd_term_running_cqr(device);
1556 if (rc) {
1557 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1558 return rc;
1559 }
1560
1561 init_waitqueue_head (&wait_q);
1562 cqr->callback = dasd_wakeup_cb;
1563 cqr->callback_data = (void *) &wait_q;
1564 cqr->status = DASD_CQR_QUEUED;
1565 list_add(&cqr->list, &device->ccw_queue);
1566
1567 /* let the bh start the request to keep them in order */
1568 dasd_schedule_bh(device);
1569
1570 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1571
1572 wait_event(wait_q, _wait_for_wakeup(cqr));
1573
1574 /* Request status is either done or failed. */
1575 rc = (cqr->status == DASD_CQR_FAILED) ? -EIO : 0;
1576 return rc;
1577}
1578
1579/*
1580 * Cancels a request that was started with dasd_sleep_on_req.
1581 * This is useful to timeout requests. The request will be
1582 * terminated if it is currently in i/o.
1583 * Returns 1 if the request has been terminated.
1584 */
1585int
1586dasd_cancel_req(struct dasd_ccw_req *cqr)
1587{
1588 struct dasd_device *device = cqr->device;
1589 unsigned long flags;
1590 int rc;
1591
1592 rc = 0;
1593 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
1594 switch (cqr->status) {
1595 case DASD_CQR_QUEUED:
1596 /* request was not started - just set to failed */
1597 cqr->status = DASD_CQR_FAILED;
1598 break;
1599 case DASD_CQR_IN_IO:
1600 /* request in IO - terminate IO and release again */
1601 if (device->discipline->term_IO(cqr) != 0)
1602 /* what to do if unable to terminate ??????
1603 e.g. not _IN_IO */
1604 cqr->status = DASD_CQR_FAILED;
1605 cqr->stopclk = get_clock();
1606 rc = 1;
1607 break;
1608 case DASD_CQR_DONE:
1609 case DASD_CQR_FAILED:
1610 /* already finished - do nothing */
1611 break;
1612 default:
1613 DEV_MESSAGE(KERN_ALERT, device,
1614 "invalid status %02x in request",
1615 cqr->status);
1616 BUG();
1617
1618 }
1619 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
1620 dasd_schedule_bh(device);
1621 return rc;
1622}
1623
1624/*
1625 * SECTION: Block device operations (request queue, partitions, open, release).
1626 */
1627
1628/*
1629 * Dasd request queue function. Called from ll_rw_blk.c
1630 */
1631static void
1632do_dasd_request(request_queue_t * queue)
1633{
1634 struct dasd_device *device;
1635
1636 device = (struct dasd_device *) queue->queuedata;
1637 spin_lock(get_ccwdev_lock(device->cdev));
1638 /* Get new request from the block device request queue */
1639 __dasd_process_blk_queue(device);
1640 /* Now check if the head of the ccw queue needs to be started. */
1641 __dasd_start_head(device);
1642 spin_unlock(get_ccwdev_lock(device->cdev));
1643}
1644
1645/*
1646 * Allocate and initialize request queue and default I/O scheduler.
1647 */
1648static int
1649dasd_alloc_queue(struct dasd_device * device)
1650{
1651 int rc;
1652
1653 device->request_queue = blk_init_queue(do_dasd_request,
1654 &device->request_queue_lock);
1655 if (device->request_queue == NULL)
1656 return -ENOMEM;
1657
1658 device->request_queue->queuedata = device;
1659
1660 elevator_exit(device->request_queue->elevator);
1661 rc = elevator_init(device->request_queue, "deadline");
1662 if (rc) {
1663 blk_cleanup_queue(device->request_queue);
1664 return rc;
1665 }
1666 return 0;
1667}
1668
1669/*
1670 * Allocate and initialize request queue.
1671 */
1672static void
1673dasd_setup_queue(struct dasd_device * device)
1674{
1675 int max;
1676
1677 blk_queue_hardsect_size(device->request_queue, device->bp_block);
1678 max = device->discipline->max_blocks << device->s2b_shift;
1679 blk_queue_max_sectors(device->request_queue, max);
1680 blk_queue_max_phys_segments(device->request_queue, -1L);
1681 blk_queue_max_hw_segments(device->request_queue, -1L);
1682 blk_queue_max_segment_size(device->request_queue, -1L);
1683 blk_queue_segment_boundary(device->request_queue, -1L);
Heiko Carstensed68cb32006-01-14 13:21:05 -08001684 blk_queue_ordered(device->request_queue, QUEUE_ORDERED_TAG, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001685}
1686
1687/*
1688 * Deactivate and free request queue.
1689 */
1690static void
1691dasd_free_queue(struct dasd_device * device)
1692{
1693 if (device->request_queue) {
1694 blk_cleanup_queue(device->request_queue);
1695 device->request_queue = NULL;
1696 }
1697}
1698
1699/*
1700 * Flush request on the request queue.
1701 */
1702static void
1703dasd_flush_request_queue(struct dasd_device * device)
1704{
1705 struct request *req;
1706
1707 if (!device->request_queue)
1708 return;
1709
1710 spin_lock_irq(&device->request_queue_lock);
1711 while (!list_empty(&device->request_queue->queue_head)) {
1712 req = elv_next_request(device->request_queue);
1713 if (req == NULL)
1714 break;
1715 dasd_end_request(req, 0);
1716 blkdev_dequeue_request(req);
1717 }
1718 spin_unlock_irq(&device->request_queue_lock);
1719}
1720
1721static int
1722dasd_open(struct inode *inp, struct file *filp)
1723{
1724 struct gendisk *disk = inp->i_bdev->bd_disk;
1725 struct dasd_device *device = disk->private_data;
1726 int rc;
1727
1728 atomic_inc(&device->open_count);
1729 if (test_bit(DASD_FLAG_OFFLINE, &device->flags)) {
1730 rc = -ENODEV;
1731 goto unlock;
1732 }
1733
1734 if (!try_module_get(device->discipline->owner)) {
1735 rc = -EINVAL;
1736 goto unlock;
1737 }
1738
1739 if (dasd_probeonly) {
1740 DEV_MESSAGE(KERN_INFO, device, "%s",
1741 "No access to device due to probeonly mode");
1742 rc = -EPERM;
1743 goto out;
1744 }
1745
1746 if (device->state < DASD_STATE_BASIC) {
1747 DBF_DEV_EVENT(DBF_ERR, device, " %s",
1748 " Cannot open unrecognized device");
1749 rc = -ENODEV;
1750 goto out;
1751 }
1752
1753 return 0;
1754
1755out:
1756 module_put(device->discipline->owner);
1757unlock:
1758 atomic_dec(&device->open_count);
1759 return rc;
1760}
1761
1762static int
1763dasd_release(struct inode *inp, struct file *filp)
1764{
1765 struct gendisk *disk = inp->i_bdev->bd_disk;
1766 struct dasd_device *device = disk->private_data;
1767
1768 atomic_dec(&device->open_count);
1769 module_put(device->discipline->owner);
1770 return 0;
1771}
1772
Christoph Hellwiga885c8c2006-01-08 01:02:50 -08001773/*
1774 * Return disk geometry.
1775 */
1776static int
1777dasd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
1778{
1779 struct dasd_device *device;
1780
1781 device = bdev->bd_disk->private_data;
1782 if (!device)
1783 return -ENODEV;
1784
1785 if (!device->discipline ||
1786 !device->discipline->fill_geometry)
1787 return -EINVAL;
1788
1789 device->discipline->fill_geometry(device, geo);
1790 geo->start = get_start_sect(bdev) >> device->s2b_shift;
1791 return 0;
1792}
1793
Linus Torvalds1da177e2005-04-16 15:20:36 -07001794struct block_device_operations
1795dasd_device_operations = {
1796 .owner = THIS_MODULE,
1797 .open = dasd_open,
1798 .release = dasd_release,
1799 .ioctl = dasd_ioctl,
Christoph Hellwig82620372006-01-09 20:52:12 -08001800 .compat_ioctl = dasd_compat_ioctl,
Christoph Hellwiga885c8c2006-01-08 01:02:50 -08001801 .getgeo = dasd_getgeo,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001802};
1803
1804
1805static void
1806dasd_exit(void)
1807{
1808#ifdef CONFIG_PROC_FS
1809 dasd_proc_exit();
1810#endif
1811 dasd_ioctl_exit();
Horst Hummel6bb0e012005-07-27 11:45:03 -07001812 if (dasd_page_cache != NULL) {
1813 kmem_cache_destroy(dasd_page_cache);
1814 dasd_page_cache = NULL;
1815 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001816 dasd_gendisk_exit();
1817 dasd_devmap_exit();
1818 devfs_remove("dasd");
1819 if (dasd_debug_area != NULL) {
1820 debug_unregister(dasd_debug_area);
1821 dasd_debug_area = NULL;
1822 }
1823}
1824
1825/*
1826 * SECTION: common functions for ccw_driver use
1827 */
1828
Horst Hummel1c01b8a2006-01-06 00:19:15 -08001829/*
1830 * Initial attempt at a probe function. this can be simplified once
1831 * the other detection code is gone.
1832 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001833int
1834dasd_generic_probe (struct ccw_device *cdev,
1835 struct dasd_discipline *discipline)
1836{
1837 int ret;
1838
1839 ret = dasd_add_sysfs_files(cdev);
1840 if (ret) {
1841 printk(KERN_WARNING
1842 "dasd_generic_probe: could not add sysfs entries "
1843 "for %s\n", cdev->dev.bus_id);
Horst Hummel59afda72005-05-16 21:53:39 -07001844 } else {
1845 cdev->handler = &dasd_int_handler;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001846 }
1847
Linus Torvalds1da177e2005-04-16 15:20:36 -07001848 return ret;
1849}
1850
Horst Hummel1c01b8a2006-01-06 00:19:15 -08001851/*
1852 * This will one day be called from a global not_oper handler.
1853 * It is also used by driver_unregister during module unload.
1854 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001855void
1856dasd_generic_remove (struct ccw_device *cdev)
1857{
1858 struct dasd_device *device;
1859
Horst Hummel59afda72005-05-16 21:53:39 -07001860 cdev->handler = NULL;
1861
Linus Torvalds1da177e2005-04-16 15:20:36 -07001862 dasd_remove_sysfs_files(cdev);
1863 device = dasd_device_from_cdev(cdev);
1864 if (IS_ERR(device))
1865 return;
1866 if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags)) {
1867 /* Already doing offline processing */
1868 dasd_put_device(device);
1869 return;
1870 }
1871 /*
1872 * This device is removed unconditionally. Set offline
1873 * flag to prevent dasd_open from opening it while it is
1874 * no quite down yet.
1875 */
1876 dasd_set_target_state(device, DASD_STATE_NEW);
1877 /* dasd_delete_device destroys the device reference. */
1878 dasd_delete_device(device);
1879}
1880
Horst Hummel1c01b8a2006-01-06 00:19:15 -08001881/*
1882 * Activate a device. This is called from dasd_{eckd,fba}_probe() when either
Linus Torvalds1da177e2005-04-16 15:20:36 -07001883 * the device is detected for the first time and is supposed to be used
Horst Hummel1c01b8a2006-01-06 00:19:15 -08001884 * or the user has started activation through sysfs.
1885 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001886int
1887dasd_generic_set_online (struct ccw_device *cdev,
Peter Oberparleiteraa888612006-02-20 18:28:13 -08001888 struct dasd_discipline *base_discipline)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001889
1890{
Peter Oberparleiteraa888612006-02-20 18:28:13 -08001891 struct dasd_discipline *discipline;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001892 struct dasd_device *device;
Horst Hummelc6eb7b72005-09-03 15:57:58 -07001893 int rc;
Horst Hummelf24acd42005-05-01 08:58:59 -07001894
Linus Torvalds1da177e2005-04-16 15:20:36 -07001895 device = dasd_create_device(cdev);
1896 if (IS_ERR(device))
1897 return PTR_ERR(device);
1898
Peter Oberparleiteraa888612006-02-20 18:28:13 -08001899 discipline = base_discipline;
Horst Hummelc6eb7b72005-09-03 15:57:58 -07001900 if (device->features & DASD_FEATURE_USEDIAG) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001901 if (!dasd_diag_discipline_pointer) {
1902 printk (KERN_WARNING
1903 "dasd_generic couldn't online device %s "
1904 "- discipline DIAG not available\n",
1905 cdev->dev.bus_id);
1906 dasd_delete_device(device);
1907 return -ENODEV;
1908 }
1909 discipline = dasd_diag_discipline_pointer;
1910 }
Peter Oberparleiteraa888612006-02-20 18:28:13 -08001911 if (!try_module_get(base_discipline->owner)) {
1912 dasd_delete_device(device);
1913 return -EINVAL;
1914 }
1915 if (!try_module_get(discipline->owner)) {
1916 module_put(base_discipline->owner);
1917 dasd_delete_device(device);
1918 return -EINVAL;
1919 }
1920 device->base_discipline = base_discipline;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001921 device->discipline = discipline;
1922
1923 rc = discipline->check_device(device);
1924 if (rc) {
1925 printk (KERN_WARNING
1926 "dasd_generic couldn't online device %s "
1927 "with discipline %s rc=%i\n",
1928 cdev->dev.bus_id, discipline->name, rc);
Peter Oberparleiteraa888612006-02-20 18:28:13 -08001929 module_put(discipline->owner);
1930 module_put(base_discipline->owner);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001931 dasd_delete_device(device);
1932 return rc;
1933 }
1934
1935 dasd_set_target_state(device, DASD_STATE_ONLINE);
1936 if (device->state <= DASD_STATE_KNOWN) {
1937 printk (KERN_WARNING
1938 "dasd_generic discipline not found for %s\n",
1939 cdev->dev.bus_id);
1940 rc = -ENODEV;
1941 dasd_set_target_state(device, DASD_STATE_NEW);
1942 dasd_delete_device(device);
1943 } else
1944 pr_debug("dasd_generic device %s found\n",
1945 cdev->dev.bus_id);
1946
1947 /* FIXME: we have to wait for the root device but we don't want
1948 * to wait for each single device but for all at once. */
1949 wait_event(dasd_init_waitq, _wait_for_device(device));
1950
1951 dasd_put_device(device);
1952
1953 return rc;
1954}
1955
1956int
1957dasd_generic_set_offline (struct ccw_device *cdev)
1958{
1959 struct dasd_device *device;
1960 int max_count;
1961
1962 device = dasd_device_from_cdev(cdev);
1963 if (IS_ERR(device))
1964 return PTR_ERR(device);
1965 if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags)) {
1966 /* Already doing offline processing */
1967 dasd_put_device(device);
1968 return 0;
1969 }
1970 /*
1971 * We must make sure that this device is currently not in use.
1972 * The open_count is increased for every opener, that includes
1973 * the blkdev_get in dasd_scan_partitions. We are only interested
1974 * in the other openers.
1975 */
1976 max_count = device->bdev ? 0 : -1;
1977 if (atomic_read(&device->open_count) > max_count) {
1978 printk (KERN_WARNING "Can't offline dasd device with open"
1979 " count = %i.\n",
1980 atomic_read(&device->open_count));
1981 clear_bit(DASD_FLAG_OFFLINE, &device->flags);
1982 dasd_put_device(device);
1983 return -EBUSY;
1984 }
1985 dasd_set_target_state(device, DASD_STATE_NEW);
1986 /* dasd_delete_device destroys the device reference. */
1987 dasd_delete_device(device);
1988
1989 return 0;
1990}
1991
1992int
1993dasd_generic_notify(struct ccw_device *cdev, int event)
1994{
1995 struct dasd_device *device;
1996 struct dasd_ccw_req *cqr;
1997 unsigned long flags;
1998 int ret;
1999
2000 device = dasd_device_from_cdev(cdev);
2001 if (IS_ERR(device))
2002 return 0;
2003 spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
2004 ret = 0;
2005 switch (event) {
2006 case CIO_GONE:
2007 case CIO_NO_PATH:
Stefan Weinhuber12c3a542006-02-03 03:03:49 -08002008 /* first of all call extended error reporting */
2009 dasd_write_eer_trigger(DASD_EER_NOPATH, device, NULL);
2010
Linus Torvalds1da177e2005-04-16 15:20:36 -07002011 if (device->state < DASD_STATE_BASIC)
2012 break;
2013 /* Device is active. We want to keep it. */
2014 if (test_bit(DASD_FLAG_DSC_ERROR, &device->flags)) {
2015 list_for_each_entry(cqr, &device->ccw_queue, list)
2016 if (cqr->status == DASD_CQR_IN_IO)
2017 cqr->status = DASD_CQR_FAILED;
2018 device->stopped |= DASD_STOPPED_DC_EIO;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002019 } else {
2020 list_for_each_entry(cqr, &device->ccw_queue, list)
2021 if (cqr->status == DASD_CQR_IN_IO) {
2022 cqr->status = DASD_CQR_QUEUED;
2023 cqr->retries++;
2024 }
2025 device->stopped |= DASD_STOPPED_DC_WAIT;
2026 dasd_set_timer(device, 0);
2027 }
Horst Hummel1c01b8a2006-01-06 00:19:15 -08002028 dasd_schedule_bh(device);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002029 ret = 1;
2030 break;
2031 case CIO_OPER:
2032 /* FIXME: add a sanity check. */
2033 device->stopped &= ~(DASD_STOPPED_DC_WAIT|DASD_STOPPED_DC_EIO);
2034 dasd_schedule_bh(device);
2035 ret = 1;
2036 break;
2037 }
2038 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
2039 dasd_put_device(device);
2040 return ret;
2041}
2042
2043/*
2044 * Automatically online either all dasd devices (dasd_autodetect) or
2045 * all devices specified with dasd= parameters.
2046 */
Cornelia Huckc5512882005-06-25 14:55:28 -07002047static int
2048__dasd_auto_online(struct device *dev, void *data)
2049{
2050 struct ccw_device *cdev;
2051
2052 cdev = to_ccwdev(dev);
2053 if (dasd_autodetect || dasd_busid_known(cdev->dev.bus_id) == 0)
2054 ccw_device_set_online(cdev);
2055 return 0;
2056}
2057
Linus Torvalds1da177e2005-04-16 15:20:36 -07002058void
2059dasd_generic_auto_online (struct ccw_driver *dasd_discipline_driver)
2060{
2061 struct device_driver *drv;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002062
2063 drv = get_driver(&dasd_discipline_driver->driver);
Cornelia Huckc5512882005-06-25 14:55:28 -07002064 driver_for_each_device(drv, NULL, NULL, __dasd_auto_online);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002065 put_driver(drv);
2066}
2067
Stefan Weinhuber12c3a542006-02-03 03:03:49 -08002068/*
2069 * notifications for extended error reports
2070 */
2071static struct notifier_block *dasd_eer_chain;
2072
2073int
2074dasd_register_eer_notifier(struct notifier_block *nb)
2075{
2076 return notifier_chain_register(&dasd_eer_chain, nb);
2077}
2078
2079int
2080dasd_unregister_eer_notifier(struct notifier_block *nb)
2081{
2082 return notifier_chain_unregister(&dasd_eer_chain, nb);
2083}
2084
2085/*
2086 * Notify the registered error reporting module of a problem
2087 */
2088void
2089dasd_write_eer_trigger(unsigned int id, struct dasd_device *device,
2090 struct dasd_ccw_req *cqr)
2091{
2092 if (device->eer) {
2093 struct dasd_eer_trigger temp;
2094 temp.id = id;
2095 temp.device = device;
2096 temp.cqr = cqr;
2097 notifier_call_chain(&dasd_eer_chain, DASD_EER_TRIGGER,
2098 (void *)&temp);
2099 }
2100}
2101
2102/*
2103 * Tell the registered error reporting module to disable error reporting for
2104 * a given device and to cleanup any private data structures on that device.
2105 */
2106static void
2107dasd_disable_eer(struct dasd_device *device)
2108{
2109 notifier_call_chain(&dasd_eer_chain, DASD_EER_DISABLE, (void *)device);
2110}
2111
2112
Linus Torvalds1da177e2005-04-16 15:20:36 -07002113static int __init
2114dasd_init(void)
2115{
2116 int rc;
2117
2118 init_waitqueue_head(&dasd_init_waitq);
2119
2120 /* register 'common' DASD debug area, used for all DBF_XXX calls */
Michael Holzheu66a464d2005-06-25 14:55:33 -07002121 dasd_debug_area = debug_register("dasd", 1, 2, 8 * sizeof (long));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002122 if (dasd_debug_area == NULL) {
2123 rc = -ENOMEM;
2124 goto failed;
2125 }
2126 debug_register_view(dasd_debug_area, &debug_sprintf_view);
2127 debug_set_level(dasd_debug_area, DBF_EMERG);
2128
2129 DBF_EVENT(DBF_EMERG, "%s", "debug area created");
2130
2131 dasd_diag_discipline_pointer = NULL;
2132
2133 rc = devfs_mk_dir("dasd");
2134 if (rc)
2135 goto failed;
2136 rc = dasd_devmap_init();
2137 if (rc)
2138 goto failed;
2139 rc = dasd_gendisk_init();
2140 if (rc)
2141 goto failed;
2142 rc = dasd_parse();
2143 if (rc)
2144 goto failed;
2145 rc = dasd_ioctl_init();
2146 if (rc)
2147 goto failed;
2148#ifdef CONFIG_PROC_FS
2149 rc = dasd_proc_init();
2150 if (rc)
2151 goto failed;
2152#endif
2153
2154 return 0;
2155failed:
2156 MESSAGE(KERN_INFO, "%s", "initialization not performed due to errors");
2157 dasd_exit();
2158 return rc;
2159}
2160
2161module_init(dasd_init);
2162module_exit(dasd_exit);
2163
2164EXPORT_SYMBOL(dasd_debug_area);
2165EXPORT_SYMBOL(dasd_diag_discipline_pointer);
2166
2167EXPORT_SYMBOL(dasd_add_request_head);
2168EXPORT_SYMBOL(dasd_add_request_tail);
2169EXPORT_SYMBOL(dasd_cancel_req);
2170EXPORT_SYMBOL(dasd_clear_timer);
2171EXPORT_SYMBOL(dasd_enable_device);
2172EXPORT_SYMBOL(dasd_int_handler);
2173EXPORT_SYMBOL(dasd_kfree_request);
2174EXPORT_SYMBOL(dasd_kick_device);
2175EXPORT_SYMBOL(dasd_kmalloc_request);
2176EXPORT_SYMBOL(dasd_schedule_bh);
2177EXPORT_SYMBOL(dasd_set_target_state);
2178EXPORT_SYMBOL(dasd_set_timer);
2179EXPORT_SYMBOL(dasd_sfree_request);
2180EXPORT_SYMBOL(dasd_sleep_on);
2181EXPORT_SYMBOL(dasd_sleep_on_immediatly);
2182EXPORT_SYMBOL(dasd_sleep_on_interruptible);
2183EXPORT_SYMBOL(dasd_smalloc_request);
2184EXPORT_SYMBOL(dasd_start_IO);
2185EXPORT_SYMBOL(dasd_term_IO);
2186
2187EXPORT_SYMBOL_GPL(dasd_generic_probe);
2188EXPORT_SYMBOL_GPL(dasd_generic_remove);
2189EXPORT_SYMBOL_GPL(dasd_generic_notify);
2190EXPORT_SYMBOL_GPL(dasd_generic_set_online);
2191EXPORT_SYMBOL_GPL(dasd_generic_set_offline);
2192EXPORT_SYMBOL_GPL(dasd_generic_auto_online);
2193
Stefan Weinhuber12c3a542006-02-03 03:03:49 -08002194EXPORT_SYMBOL(dasd_register_eer_notifier);
2195EXPORT_SYMBOL(dasd_unregister_eer_notifier);
2196EXPORT_SYMBOL(dasd_write_eer_trigger);
2197
2198
Linus Torvalds1da177e2005-04-16 15:20:36 -07002199/*
2200 * Overrides for Emacs so that we follow Linus's tabbing style.
2201 * Emacs will notice this stuff at the end of the file and automatically
2202 * adjust the settings for this buffer only. This must remain at the end
2203 * of the file.
2204 * ---------------------------------------------------------------------------
2205 * Local variables:
2206 * c-indent-level: 4
2207 * c-brace-imaginary-offset: 0
2208 * c-brace-offset: -4
2209 * c-argdecl-indent: 4
2210 * c-label-offset: -4
2211 * c-continued-statement-offset: 4
2212 * c-continued-brace-offset: 0
2213 * indent-tabs-mode: 1
2214 * tab-width: 8
2215 * End:
2216 */