blob: 219bee7bd77c1410599b13719b66672baa0bf985 [file] [log] [blame]
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001/*
2 * PAV alias management for the DASD ECKD discipline
3 *
4 * Copyright IBM Corporation, 2007
5 * Author(s): Stefan Weinhuber <wein@de.ibm.com>
6 */
7
8#include <linux/list.h>
9#include <asm/ebcdic.h>
10#include "dasd_int.h"
11#include "dasd_eckd.h"
12
13#ifdef PRINTK_HEADER
14#undef PRINTK_HEADER
15#endif /* PRINTK_HEADER */
16#define PRINTK_HEADER "dasd(eckd):"
17
18
19/*
20 * General concept of alias management:
21 * - PAV and DASD alias management is specific to the eckd discipline.
22 * - A device is connected to an lcu as long as the device exists.
23 * dasd_alias_make_device_known_to_lcu will be called wenn the
24 * device is checked by the eckd discipline and
25 * dasd_alias_disconnect_device_from_lcu will be called
26 * before the device is deleted.
27 * - The dasd_alias_add_device / dasd_alias_remove_device
28 * functions mark the point when a device is 'ready for service'.
29 * - A summary unit check is a rare occasion, but it is mandatory to
30 * support it. It requires some complex recovery actions before the
31 * devices can be used again (see dasd_alias_handle_summary_unit_check).
32 * - dasd_alias_get_start_dev will find an alias device that can be used
33 * instead of the base device and does some (very simple) load balancing.
34 * This is the function that gets called for each I/O, so when improving
35 * something, this function should get faster or better, the rest has just
36 * to be correct.
37 */
38
39
40static void summary_unit_check_handling_work(struct work_struct *);
41static void lcu_update_work(struct work_struct *);
42static int _schedule_lcu_update(struct alias_lcu *, struct dasd_device *);
43
44static struct alias_root aliastree = {
45 .serverlist = LIST_HEAD_INIT(aliastree.serverlist),
46 .lock = __SPIN_LOCK_UNLOCKED(aliastree.lock),
47};
48
49static struct alias_server *_find_server(struct dasd_uid *uid)
50{
51 struct alias_server *pos;
52 list_for_each_entry(pos, &aliastree.serverlist, server) {
53 if (!strncmp(pos->uid.vendor, uid->vendor,
54 sizeof(uid->vendor))
55 && !strncmp(pos->uid.serial, uid->serial,
56 sizeof(uid->serial)))
57 return pos;
58 };
59 return NULL;
60}
61
62static struct alias_lcu *_find_lcu(struct alias_server *server,
63 struct dasd_uid *uid)
64{
65 struct alias_lcu *pos;
66 list_for_each_entry(pos, &server->lculist, lcu) {
67 if (pos->uid.ssid == uid->ssid)
68 return pos;
69 };
70 return NULL;
71}
72
73static struct alias_pav_group *_find_group(struct alias_lcu *lcu,
74 struct dasd_uid *uid)
75{
76 struct alias_pav_group *pos;
77 __u8 search_unit_addr;
78
79 /* for hyper pav there is only one group */
80 if (lcu->pav == HYPER_PAV) {
81 if (list_empty(&lcu->grouplist))
82 return NULL;
83 else
84 return list_first_entry(&lcu->grouplist,
85 struct alias_pav_group, group);
86 }
87
88 /* for base pav we have to find the group that matches the base */
89 if (uid->type == UA_BASE_DEVICE)
90 search_unit_addr = uid->real_unit_addr;
91 else
92 search_unit_addr = uid->base_unit_addr;
93 list_for_each_entry(pos, &lcu->grouplist, group) {
Stefan Weinhuber4abb08c2008-08-01 16:39:09 +020094 if (pos->uid.base_unit_addr == search_unit_addr &&
95 !strncmp(pos->uid.vduit, uid->vduit, sizeof(uid->vduit)))
Stefan Weinhuber8e09f212008-01-26 14:11:23 +010096 return pos;
97 };
98 return NULL;
99}
100
101static struct alias_server *_allocate_server(struct dasd_uid *uid)
102{
103 struct alias_server *server;
104
105 server = kzalloc(sizeof(*server), GFP_KERNEL);
106 if (!server)
107 return ERR_PTR(-ENOMEM);
108 memcpy(server->uid.vendor, uid->vendor, sizeof(uid->vendor));
109 memcpy(server->uid.serial, uid->serial, sizeof(uid->serial));
110 INIT_LIST_HEAD(&server->server);
111 INIT_LIST_HEAD(&server->lculist);
112 return server;
113}
114
115static void _free_server(struct alias_server *server)
116{
117 kfree(server);
118}
119
120static struct alias_lcu *_allocate_lcu(struct dasd_uid *uid)
121{
122 struct alias_lcu *lcu;
123
124 lcu = kzalloc(sizeof(*lcu), GFP_KERNEL);
125 if (!lcu)
126 return ERR_PTR(-ENOMEM);
127 lcu->uac = kzalloc(sizeof(*(lcu->uac)), GFP_KERNEL | GFP_DMA);
128 if (!lcu->uac)
129 goto out_err1;
130 lcu->rsu_cqr = kzalloc(sizeof(*lcu->rsu_cqr), GFP_KERNEL | GFP_DMA);
131 if (!lcu->rsu_cqr)
132 goto out_err2;
133 lcu->rsu_cqr->cpaddr = kzalloc(sizeof(struct ccw1),
134 GFP_KERNEL | GFP_DMA);
135 if (!lcu->rsu_cqr->cpaddr)
136 goto out_err3;
137 lcu->rsu_cqr->data = kzalloc(16, GFP_KERNEL | GFP_DMA);
138 if (!lcu->rsu_cqr->data)
139 goto out_err4;
140
141 memcpy(lcu->uid.vendor, uid->vendor, sizeof(uid->vendor));
142 memcpy(lcu->uid.serial, uid->serial, sizeof(uid->serial));
143 lcu->uid.ssid = uid->ssid;
144 lcu->pav = NO_PAV;
145 lcu->flags = NEED_UAC_UPDATE | UPDATE_PENDING;
146 INIT_LIST_HEAD(&lcu->lcu);
147 INIT_LIST_HEAD(&lcu->inactive_devices);
148 INIT_LIST_HEAD(&lcu->active_devices);
149 INIT_LIST_HEAD(&lcu->grouplist);
150 INIT_WORK(&lcu->suc_data.worker, summary_unit_check_handling_work);
151 INIT_DELAYED_WORK(&lcu->ruac_data.dwork, lcu_update_work);
152 spin_lock_init(&lcu->lock);
153 return lcu;
154
155out_err4:
156 kfree(lcu->rsu_cqr->cpaddr);
157out_err3:
158 kfree(lcu->rsu_cqr);
159out_err2:
160 kfree(lcu->uac);
161out_err1:
162 kfree(lcu);
163 return ERR_PTR(-ENOMEM);
164}
165
166static void _free_lcu(struct alias_lcu *lcu)
167{
168 kfree(lcu->rsu_cqr->data);
169 kfree(lcu->rsu_cqr->cpaddr);
170 kfree(lcu->rsu_cqr);
171 kfree(lcu->uac);
172 kfree(lcu);
173}
174
175/*
176 * This is the function that will allocate all the server and lcu data,
177 * so this function must be called first for a new device.
178 * If the return value is 1, the lcu was already known before, if it
179 * is 0, this is a new lcu.
180 * Negative return code indicates that something went wrong (e.g. -ENOMEM)
181 */
182int dasd_alias_make_device_known_to_lcu(struct dasd_device *device)
183{
184 struct dasd_eckd_private *private;
185 unsigned long flags;
186 struct alias_server *server, *newserver;
187 struct alias_lcu *lcu, *newlcu;
188 int is_lcu_known;
189 struct dasd_uid *uid;
190
191 private = (struct dasd_eckd_private *) device->private;
192 uid = &private->uid;
193 spin_lock_irqsave(&aliastree.lock, flags);
194 is_lcu_known = 1;
195 server = _find_server(uid);
196 if (!server) {
197 spin_unlock_irqrestore(&aliastree.lock, flags);
198 newserver = _allocate_server(uid);
199 if (IS_ERR(newserver))
200 return PTR_ERR(newserver);
201 spin_lock_irqsave(&aliastree.lock, flags);
202 server = _find_server(uid);
203 if (!server) {
204 list_add(&newserver->server, &aliastree.serverlist);
205 server = newserver;
206 is_lcu_known = 0;
207 } else {
208 /* someone was faster */
209 _free_server(newserver);
210 }
211 }
212
213 lcu = _find_lcu(server, uid);
214 if (!lcu) {
215 spin_unlock_irqrestore(&aliastree.lock, flags);
216 newlcu = _allocate_lcu(uid);
217 if (IS_ERR(newlcu))
218 return PTR_ERR(lcu);
219 spin_lock_irqsave(&aliastree.lock, flags);
220 lcu = _find_lcu(server, uid);
221 if (!lcu) {
222 list_add(&newlcu->lcu, &server->lculist);
223 lcu = newlcu;
224 is_lcu_known = 0;
225 } else {
226 /* someone was faster */
227 _free_lcu(newlcu);
228 }
229 is_lcu_known = 0;
230 }
231 spin_lock(&lcu->lock);
232 list_add(&device->alias_list, &lcu->inactive_devices);
233 private->lcu = lcu;
234 spin_unlock(&lcu->lock);
235 spin_unlock_irqrestore(&aliastree.lock, flags);
236
237 return is_lcu_known;
238}
239
240/*
241 * This function removes a device from the scope of alias management.
242 * The complicated part is to make sure that it is not in use by
243 * any of the workers. If necessary cancel the work.
244 */
245void dasd_alias_disconnect_device_from_lcu(struct dasd_device *device)
246{
247 struct dasd_eckd_private *private;
248 unsigned long flags;
249 struct alias_lcu *lcu;
250 struct alias_server *server;
251 int was_pending;
252
253 private = (struct dasd_eckd_private *) device->private;
254 lcu = private->lcu;
255 spin_lock_irqsave(&lcu->lock, flags);
256 list_del_init(&device->alias_list);
257 /* make sure that the workers don't use this device */
258 if (device == lcu->suc_data.device) {
259 spin_unlock_irqrestore(&lcu->lock, flags);
260 cancel_work_sync(&lcu->suc_data.worker);
261 spin_lock_irqsave(&lcu->lock, flags);
262 if (device == lcu->suc_data.device)
263 lcu->suc_data.device = NULL;
264 }
265 was_pending = 0;
266 if (device == lcu->ruac_data.device) {
267 spin_unlock_irqrestore(&lcu->lock, flags);
268 was_pending = 1;
269 cancel_delayed_work_sync(&lcu->ruac_data.dwork);
270 spin_lock_irqsave(&lcu->lock, flags);
271 if (device == lcu->ruac_data.device)
272 lcu->ruac_data.device = NULL;
273 }
274 private->lcu = NULL;
275 spin_unlock_irqrestore(&lcu->lock, flags);
276
277 spin_lock_irqsave(&aliastree.lock, flags);
278 spin_lock(&lcu->lock);
279 if (list_empty(&lcu->grouplist) &&
280 list_empty(&lcu->active_devices) &&
281 list_empty(&lcu->inactive_devices)) {
282 list_del(&lcu->lcu);
283 spin_unlock(&lcu->lock);
284 _free_lcu(lcu);
285 lcu = NULL;
286 } else {
287 if (was_pending)
288 _schedule_lcu_update(lcu, NULL);
289 spin_unlock(&lcu->lock);
290 }
291 server = _find_server(&private->uid);
292 if (server && list_empty(&server->lculist)) {
293 list_del(&server->server);
294 _free_server(server);
295 }
296 spin_unlock_irqrestore(&aliastree.lock, flags);
297}
298
299/*
300 * This function assumes that the unit address configuration stored
301 * in the lcu is up to date and will update the device uid before
302 * adding it to a pav group.
303 */
304static int _add_device_to_lcu(struct alias_lcu *lcu,
305 struct dasd_device *device)
306{
307
308 struct dasd_eckd_private *private;
309 struct alias_pav_group *group;
310 struct dasd_uid *uid;
311
312 private = (struct dasd_eckd_private *) device->private;
313 uid = &private->uid;
314 uid->type = lcu->uac->unit[uid->real_unit_addr].ua_type;
315 uid->base_unit_addr = lcu->uac->unit[uid->real_unit_addr].base_ua;
316 dasd_set_uid(device->cdev, &private->uid);
317
318 /* if we have no PAV anyway, we don't need to bother with PAV groups */
319 if (lcu->pav == NO_PAV) {
320 list_move(&device->alias_list, &lcu->active_devices);
321 return 0;
322 }
323
324 group = _find_group(lcu, uid);
325 if (!group) {
326 group = kzalloc(sizeof(*group), GFP_ATOMIC);
327 if (!group)
328 return -ENOMEM;
329 memcpy(group->uid.vendor, uid->vendor, sizeof(uid->vendor));
330 memcpy(group->uid.serial, uid->serial, sizeof(uid->serial));
331 group->uid.ssid = uid->ssid;
332 if (uid->type == UA_BASE_DEVICE)
333 group->uid.base_unit_addr = uid->real_unit_addr;
334 else
335 group->uid.base_unit_addr = uid->base_unit_addr;
Stefan Weinhuber4abb08c2008-08-01 16:39:09 +0200336 memcpy(group->uid.vduit, uid->vduit, sizeof(uid->vduit));
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100337 INIT_LIST_HEAD(&group->group);
338 INIT_LIST_HEAD(&group->baselist);
339 INIT_LIST_HEAD(&group->aliaslist);
340 list_add(&group->group, &lcu->grouplist);
341 }
342 if (uid->type == UA_BASE_DEVICE)
343 list_move(&device->alias_list, &group->baselist);
344 else
345 list_move(&device->alias_list, &group->aliaslist);
346 private->pavgroup = group;
347 return 0;
348};
349
350static void _remove_device_from_lcu(struct alias_lcu *lcu,
351 struct dasd_device *device)
352{
353 struct dasd_eckd_private *private;
354 struct alias_pav_group *group;
355
356 private = (struct dasd_eckd_private *) device->private;
357 list_move(&device->alias_list, &lcu->inactive_devices);
358 group = private->pavgroup;
359 if (!group)
360 return;
361 private->pavgroup = NULL;
362 if (list_empty(&group->baselist) && list_empty(&group->aliaslist)) {
363 list_del(&group->group);
364 kfree(group);
365 return;
366 }
367 if (group->next == device)
368 group->next = NULL;
369};
370
371static int read_unit_address_configuration(struct dasd_device *device,
372 struct alias_lcu *lcu)
373{
374 struct dasd_psf_prssd_data *prssdp;
375 struct dasd_ccw_req *cqr;
376 struct ccw1 *ccw;
377 int rc;
378 unsigned long flags;
379
380 cqr = dasd_kmalloc_request("ECKD",
381 1 /* PSF */ + 1 /* RSSD */ ,
382 (sizeof(struct dasd_psf_prssd_data)),
383 device);
384 if (IS_ERR(cqr))
385 return PTR_ERR(cqr);
386 cqr->startdev = device;
387 cqr->memdev = device;
388 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
389 cqr->retries = 10;
390 cqr->expires = 20 * HZ;
391
392 /* Prepare for Read Subsystem Data */
393 prssdp = (struct dasd_psf_prssd_data *) cqr->data;
394 memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data));
395 prssdp->order = PSF_ORDER_PRSSD;
396 prssdp->suborder = 0x0e; /* Read unit address configuration */
397 /* all other bytes of prssdp must be zero */
398
399 ccw = cqr->cpaddr;
400 ccw->cmd_code = DASD_ECKD_CCW_PSF;
401 ccw->count = sizeof(struct dasd_psf_prssd_data);
402 ccw->flags |= CCW_FLAG_CC;
403 ccw->cda = (__u32)(addr_t) prssdp;
404
405 /* Read Subsystem Data - feature codes */
406 memset(lcu->uac, 0, sizeof(*(lcu->uac)));
407
408 ccw++;
409 ccw->cmd_code = DASD_ECKD_CCW_RSSD;
410 ccw->count = sizeof(*(lcu->uac));
411 ccw->cda = (__u32)(addr_t) lcu->uac;
412
413 cqr->buildclk = get_clock();
414 cqr->status = DASD_CQR_FILLED;
415
416 /* need to unset flag here to detect race with summary unit check */
417 spin_lock_irqsave(&lcu->lock, flags);
418 lcu->flags &= ~NEED_UAC_UPDATE;
419 spin_unlock_irqrestore(&lcu->lock, flags);
420
421 do {
422 rc = dasd_sleep_on(cqr);
423 } while (rc && (cqr->retries > 0));
424 if (rc) {
425 spin_lock_irqsave(&lcu->lock, flags);
426 lcu->flags |= NEED_UAC_UPDATE;
427 spin_unlock_irqrestore(&lcu->lock, flags);
428 }
429 dasd_kfree_request(cqr, cqr->memdev);
430 return rc;
431}
432
433static int _lcu_update(struct dasd_device *refdev, struct alias_lcu *lcu)
434{
435 unsigned long flags;
436 struct alias_pav_group *pavgroup, *tempgroup;
437 struct dasd_device *device, *tempdev;
438 int i, rc;
439 struct dasd_eckd_private *private;
440
441 spin_lock_irqsave(&lcu->lock, flags);
442 list_for_each_entry_safe(pavgroup, tempgroup, &lcu->grouplist, group) {
443 list_for_each_entry_safe(device, tempdev, &pavgroup->baselist,
444 alias_list) {
445 list_move(&device->alias_list, &lcu->active_devices);
446 private = (struct dasd_eckd_private *) device->private;
447 private->pavgroup = NULL;
448 }
449 list_for_each_entry_safe(device, tempdev, &pavgroup->aliaslist,
450 alias_list) {
451 list_move(&device->alias_list, &lcu->active_devices);
452 private = (struct dasd_eckd_private *) device->private;
453 private->pavgroup = NULL;
454 }
455 list_del(&pavgroup->group);
456 kfree(pavgroup);
457 }
458 spin_unlock_irqrestore(&lcu->lock, flags);
459
460 rc = read_unit_address_configuration(refdev, lcu);
461 if (rc)
462 return rc;
463
464 spin_lock_irqsave(&lcu->lock, flags);
465 lcu->pav = NO_PAV;
466 for (i = 0; i < MAX_DEVICES_PER_LCU; ++i) {
467 switch (lcu->uac->unit[i].ua_type) {
468 case UA_BASE_PAV_ALIAS:
469 lcu->pav = BASE_PAV;
470 break;
471 case UA_HYPER_PAV_ALIAS:
472 lcu->pav = HYPER_PAV;
473 break;
474 }
475 if (lcu->pav != NO_PAV)
476 break;
477 }
478
479 list_for_each_entry_safe(device, tempdev, &lcu->active_devices,
480 alias_list) {
481 _add_device_to_lcu(lcu, device);
482 }
483 spin_unlock_irqrestore(&lcu->lock, flags);
484 return 0;
485}
486
487static void lcu_update_work(struct work_struct *work)
488{
489 struct alias_lcu *lcu;
490 struct read_uac_work_data *ruac_data;
491 struct dasd_device *device;
492 unsigned long flags;
493 int rc;
494
495 ruac_data = container_of(work, struct read_uac_work_data, dwork.work);
496 lcu = container_of(ruac_data, struct alias_lcu, ruac_data);
497 device = ruac_data->device;
498 rc = _lcu_update(device, lcu);
499 /*
500 * Need to check flags again, as there could have been another
501 * prepare_update or a new device a new device while we were still
502 * processing the data
503 */
504 spin_lock_irqsave(&lcu->lock, flags);
505 if (rc || (lcu->flags & NEED_UAC_UPDATE)) {
506 DEV_MESSAGE(KERN_WARNING, device, "could not update"
507 " alias data in lcu (rc = %d), retry later", rc);
508 schedule_delayed_work(&lcu->ruac_data.dwork, 30*HZ);
509 } else {
510 lcu->ruac_data.device = NULL;
511 lcu->flags &= ~UPDATE_PENDING;
512 }
513 spin_unlock_irqrestore(&lcu->lock, flags);
514}
515
516static int _schedule_lcu_update(struct alias_lcu *lcu,
517 struct dasd_device *device)
518{
519 struct dasd_device *usedev = NULL;
520 struct alias_pav_group *group;
521
522 lcu->flags |= NEED_UAC_UPDATE;
523 if (lcu->ruac_data.device) {
524 /* already scheduled or running */
525 return 0;
526 }
527 if (device && !list_empty(&device->alias_list))
528 usedev = device;
529
530 if (!usedev && !list_empty(&lcu->grouplist)) {
531 group = list_first_entry(&lcu->grouplist,
532 struct alias_pav_group, group);
533 if (!list_empty(&group->baselist))
534 usedev = list_first_entry(&group->baselist,
535 struct dasd_device,
536 alias_list);
537 else if (!list_empty(&group->aliaslist))
538 usedev = list_first_entry(&group->aliaslist,
539 struct dasd_device,
540 alias_list);
541 }
542 if (!usedev && !list_empty(&lcu->active_devices)) {
543 usedev = list_first_entry(&lcu->active_devices,
544 struct dasd_device, alias_list);
545 }
546 /*
547 * if we haven't found a proper device yet, give up for now, the next
548 * device that will be set active will trigger an lcu update
549 */
550 if (!usedev)
551 return -EINVAL;
552 lcu->ruac_data.device = usedev;
553 schedule_delayed_work(&lcu->ruac_data.dwork, 0);
554 return 0;
555}
556
557int dasd_alias_add_device(struct dasd_device *device)
558{
559 struct dasd_eckd_private *private;
560 struct alias_lcu *lcu;
561 unsigned long flags;
562 int rc;
563
564 private = (struct dasd_eckd_private *) device->private;
565 lcu = private->lcu;
566 rc = 0;
567 spin_lock_irqsave(&lcu->lock, flags);
568 if (!(lcu->flags & UPDATE_PENDING)) {
569 rc = _add_device_to_lcu(lcu, device);
570 if (rc)
571 lcu->flags |= UPDATE_PENDING;
572 }
573 if (lcu->flags & UPDATE_PENDING) {
574 list_move(&device->alias_list, &lcu->active_devices);
575 _schedule_lcu_update(lcu, device);
576 }
577 spin_unlock_irqrestore(&lcu->lock, flags);
578 return rc;
579}
580
581int dasd_alias_remove_device(struct dasd_device *device)
582{
583 struct dasd_eckd_private *private;
584 struct alias_lcu *lcu;
585 unsigned long flags;
586
587 private = (struct dasd_eckd_private *) device->private;
588 lcu = private->lcu;
589 spin_lock_irqsave(&lcu->lock, flags);
590 _remove_device_from_lcu(lcu, device);
591 spin_unlock_irqrestore(&lcu->lock, flags);
592 return 0;
593}
594
595struct dasd_device *dasd_alias_get_start_dev(struct dasd_device *base_device)
596{
597
598 struct dasd_device *alias_device;
599 struct alias_pav_group *group;
600 struct alias_lcu *lcu;
601 struct dasd_eckd_private *private, *alias_priv;
602 unsigned long flags;
603
604 private = (struct dasd_eckd_private *) base_device->private;
605 group = private->pavgroup;
606 lcu = private->lcu;
607 if (!group || !lcu)
608 return NULL;
609 if (lcu->pav == NO_PAV ||
610 lcu->flags & (NEED_UAC_UPDATE | UPDATE_PENDING))
611 return NULL;
612
613 spin_lock_irqsave(&lcu->lock, flags);
614 alias_device = group->next;
615 if (!alias_device) {
616 if (list_empty(&group->aliaslist)) {
617 spin_unlock_irqrestore(&lcu->lock, flags);
618 return NULL;
619 } else {
620 alias_device = list_first_entry(&group->aliaslist,
621 struct dasd_device,
622 alias_list);
623 }
624 }
625 if (list_is_last(&alias_device->alias_list, &group->aliaslist))
626 group->next = list_first_entry(&group->aliaslist,
627 struct dasd_device, alias_list);
628 else
629 group->next = list_first_entry(&alias_device->alias_list,
630 struct dasd_device, alias_list);
631 spin_unlock_irqrestore(&lcu->lock, flags);
632 alias_priv = (struct dasd_eckd_private *) alias_device->private;
633 if ((alias_priv->count < private->count) && !alias_device->stopped)
634 return alias_device;
635 else
636 return NULL;
637}
638
639/*
640 * Summary unit check handling depends on the way alias devices
641 * are handled so it is done here rather then in dasd_eckd.c
642 */
643static int reset_summary_unit_check(struct alias_lcu *lcu,
644 struct dasd_device *device,
645 char reason)
646{
647 struct dasd_ccw_req *cqr;
648 int rc = 0;
Stefan Weinhuberf3eb5382009-03-26 15:23:48 +0100649 struct ccw1 *ccw;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100650
651 cqr = lcu->rsu_cqr;
652 strncpy((char *) &cqr->magic, "ECKD", 4);
653 ASCEBC((char *) &cqr->magic, 4);
Stefan Weinhuberf3eb5382009-03-26 15:23:48 +0100654 ccw = cqr->cpaddr;
655 ccw->cmd_code = DASD_ECKD_CCW_RSCK;
656 ccw->flags = 0 ;
657 ccw->count = 16;
658 ccw->cda = (__u32)(addr_t) cqr->data;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100659 ((char *)cqr->data)[0] = reason;
660
661 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
662 cqr->retries = 255; /* set retry counter to enable basic ERP */
663 cqr->startdev = device;
664 cqr->memdev = device;
665 cqr->block = NULL;
666 cqr->expires = 5 * HZ;
667 cqr->buildclk = get_clock();
668 cqr->status = DASD_CQR_FILLED;
669
670 rc = dasd_sleep_on_immediatly(cqr);
671 return rc;
672}
673
674static void _restart_all_base_devices_on_lcu(struct alias_lcu *lcu)
675{
676 struct alias_pav_group *pavgroup;
677 struct dasd_device *device;
678 struct dasd_eckd_private *private;
679
680 /* active and inactive list can contain alias as well as base devices */
681 list_for_each_entry(device, &lcu->active_devices, alias_list) {
682 private = (struct dasd_eckd_private *) device->private;
683 if (private->uid.type != UA_BASE_DEVICE)
684 continue;
685 dasd_schedule_block_bh(device->block);
686 dasd_schedule_device_bh(device);
687 }
688 list_for_each_entry(device, &lcu->inactive_devices, alias_list) {
689 private = (struct dasd_eckd_private *) device->private;
690 if (private->uid.type != UA_BASE_DEVICE)
691 continue;
692 dasd_schedule_block_bh(device->block);
693 dasd_schedule_device_bh(device);
694 }
695 list_for_each_entry(pavgroup, &lcu->grouplist, group) {
696 list_for_each_entry(device, &pavgroup->baselist, alias_list) {
697 dasd_schedule_block_bh(device->block);
698 dasd_schedule_device_bh(device);
699 }
700 }
701}
702
703static void flush_all_alias_devices_on_lcu(struct alias_lcu *lcu)
704{
705 struct alias_pav_group *pavgroup;
706 struct dasd_device *device, *temp;
707 struct dasd_eckd_private *private;
708 int rc;
709 unsigned long flags;
710 LIST_HEAD(active);
711
712 /*
713 * Problem here ist that dasd_flush_device_queue may wait
714 * for termination of a request to complete. We can't keep
715 * the lcu lock during that time, so we must assume that
716 * the lists may have changed.
717 * Idea: first gather all active alias devices in a separate list,
718 * then flush the first element of this list unlocked, and afterwards
719 * check if it is still on the list before moving it to the
720 * active_devices list.
721 */
722
723 spin_lock_irqsave(&lcu->lock, flags);
724 list_for_each_entry_safe(device, temp, &lcu->active_devices,
725 alias_list) {
726 private = (struct dasd_eckd_private *) device->private;
727 if (private->uid.type == UA_BASE_DEVICE)
728 continue;
729 list_move(&device->alias_list, &active);
730 }
731
732 list_for_each_entry(pavgroup, &lcu->grouplist, group) {
733 list_splice_init(&pavgroup->aliaslist, &active);
734 }
735 while (!list_empty(&active)) {
736 device = list_first_entry(&active, struct dasd_device,
737 alias_list);
738 spin_unlock_irqrestore(&lcu->lock, flags);
739 rc = dasd_flush_device_queue(device);
740 spin_lock_irqsave(&lcu->lock, flags);
741 /*
742 * only move device around if it wasn't moved away while we
743 * were waiting for the flush
744 */
745 if (device == list_first_entry(&active,
746 struct dasd_device, alias_list))
747 list_move(&device->alias_list, &lcu->active_devices);
748 }
749 spin_unlock_irqrestore(&lcu->lock, flags);
750}
751
Heiko Carstensa8061702008-04-17 07:46:26 +0200752static void __stop_device_on_lcu(struct dasd_device *device,
753 struct dasd_device *pos)
754{
755 /* If pos == device then device is already locked! */
756 if (pos == device) {
757 pos->stopped |= DASD_STOPPED_SU;
758 return;
759 }
760 spin_lock(get_ccwdev_lock(pos->cdev));
761 pos->stopped |= DASD_STOPPED_SU;
762 spin_unlock(get_ccwdev_lock(pos->cdev));
763}
764
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100765/*
766 * This function is called in interrupt context, so the
767 * cdev lock for device is already locked!
768 */
769static void _stop_all_devices_on_lcu(struct alias_lcu *lcu,
770 struct dasd_device *device)
771{
772 struct alias_pav_group *pavgroup;
773 struct dasd_device *pos;
774
Heiko Carstensa8061702008-04-17 07:46:26 +0200775 list_for_each_entry(pos, &lcu->active_devices, alias_list)
776 __stop_device_on_lcu(device, pos);
777 list_for_each_entry(pos, &lcu->inactive_devices, alias_list)
778 __stop_device_on_lcu(device, pos);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100779 list_for_each_entry(pavgroup, &lcu->grouplist, group) {
Heiko Carstensa8061702008-04-17 07:46:26 +0200780 list_for_each_entry(pos, &pavgroup->baselist, alias_list)
781 __stop_device_on_lcu(device, pos);
782 list_for_each_entry(pos, &pavgroup->aliaslist, alias_list)
783 __stop_device_on_lcu(device, pos);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100784 }
785}
786
787static void _unstop_all_devices_on_lcu(struct alias_lcu *lcu)
788{
789 struct alias_pav_group *pavgroup;
790 struct dasd_device *device;
791 unsigned long flags;
792
793 list_for_each_entry(device, &lcu->active_devices, alias_list) {
794 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
795 device->stopped &= ~DASD_STOPPED_SU;
796 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
797 }
798
799 list_for_each_entry(device, &lcu->inactive_devices, alias_list) {
800 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
801 device->stopped &= ~DASD_STOPPED_SU;
802 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
803 }
804
805 list_for_each_entry(pavgroup, &lcu->grouplist, group) {
806 list_for_each_entry(device, &pavgroup->baselist, alias_list) {
807 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
808 device->stopped &= ~DASD_STOPPED_SU;
809 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev),
810 flags);
811 }
812 list_for_each_entry(device, &pavgroup->aliaslist, alias_list) {
813 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
814 device->stopped &= ~DASD_STOPPED_SU;
815 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev),
816 flags);
817 }
818 }
819}
820
821static void summary_unit_check_handling_work(struct work_struct *work)
822{
823 struct alias_lcu *lcu;
824 struct summary_unit_check_work_data *suc_data;
825 unsigned long flags;
826 struct dasd_device *device;
827
828 suc_data = container_of(work, struct summary_unit_check_work_data,
829 worker);
830 lcu = container_of(suc_data, struct alias_lcu, suc_data);
831 device = suc_data->device;
832
833 /* 1. flush alias devices */
834 flush_all_alias_devices_on_lcu(lcu);
835
836 /* 2. reset summary unit check */
837 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
838 device->stopped &= ~(DASD_STOPPED_SU | DASD_STOPPED_PENDING);
839 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
840 reset_summary_unit_check(lcu, device, suc_data->reason);
841
842 spin_lock_irqsave(&lcu->lock, flags);
843 _unstop_all_devices_on_lcu(lcu);
844 _restart_all_base_devices_on_lcu(lcu);
845 /* 3. read new alias configuration */
846 _schedule_lcu_update(lcu, device);
847 lcu->suc_data.device = NULL;
848 spin_unlock_irqrestore(&lcu->lock, flags);
849}
850
851/*
852 * note: this will be called from int handler context (cdev locked)
853 */
854void dasd_alias_handle_summary_unit_check(struct dasd_device *device,
855 struct irb *irb)
856{
857 struct alias_lcu *lcu;
858 char reason;
859 struct dasd_eckd_private *private;
Stefan Weinhuberf3eb5382009-03-26 15:23:48 +0100860 char *sense;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100861
862 private = (struct dasd_eckd_private *) device->private;
863
Stefan Weinhuberf3eb5382009-03-26 15:23:48 +0100864 sense = dasd_get_sense(irb);
865 if (sense) {
866 reason = sense[8];
867 DBF_DEV_EVENT(DBF_NOTICE, device, "%s %x",
868 "eckd handle summary unit check: reason", reason);
869 } else {
870 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
871 "eckd handle summary unit check:"
872 " no reason code available");
873 return;
874 }
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100875
876 lcu = private->lcu;
877 if (!lcu) {
878 DEV_MESSAGE(KERN_WARNING, device, "%s",
879 "device not ready to handle summary"
880 " unit check (no lcu structure)");
881 return;
882 }
883 spin_lock(&lcu->lock);
884 _stop_all_devices_on_lcu(lcu, device);
885 /* prepare for lcu_update */
886 private->lcu->flags |= NEED_UAC_UPDATE | UPDATE_PENDING;
887 /* If this device is about to be removed just return and wait for
888 * the next interrupt on a different device
889 */
890 if (list_empty(&device->alias_list)) {
891 DEV_MESSAGE(KERN_WARNING, device, "%s",
892 "device is in offline processing,"
893 " don't do summary unit check handling");
894 spin_unlock(&lcu->lock);
895 return;
896 }
897 if (lcu->suc_data.device) {
898 /* already scheduled or running */
899 DEV_MESSAGE(KERN_WARNING, device, "%s",
900 "previous instance of summary unit check worker"
901 " still pending");
902 spin_unlock(&lcu->lock);
903 return ;
904 }
905 lcu->suc_data.reason = reason;
906 lcu->suc_data.device = device;
907 spin_unlock(&lcu->lock);
908 schedule_work(&lcu->suc_data.worker);
909};