blob: 1e560188dd13808d86cebc705901ba1b25e45714 [file] [log] [blame]
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001/*
2 * PAV alias management for the DASD ECKD discipline
3 *
Heiko Carstensa53c8fa2012-07-20 11:15:04 +02004 * Copyright IBM Corp. 2007
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01005 * Author(s): Stefan Weinhuber <wein@de.ibm.com>
6 */
7
Stefan Haberlandca99dab2009-09-11 10:28:30 +02008#define KMSG_COMPONENT "dasd-eckd"
Stefan Haberlandfc19f382009-03-26 15:23:49 +01009
Stefan Weinhuber8e09f212008-01-26 14:11:23 +010010#include <linux/list.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090011#include <linux/slab.h>
Stefan Weinhuber8e09f212008-01-26 14:11:23 +010012#include <asm/ebcdic.h>
13#include "dasd_int.h"
14#include "dasd_eckd.h"
15
16#ifdef PRINTK_HEADER
17#undef PRINTK_HEADER
18#endif /* PRINTK_HEADER */
19#define PRINTK_HEADER "dasd(eckd):"
20
21
22/*
23 * General concept of alias management:
24 * - PAV and DASD alias management is specific to the eckd discipline.
25 * - A device is connected to an lcu as long as the device exists.
26 * dasd_alias_make_device_known_to_lcu will be called wenn the
27 * device is checked by the eckd discipline and
28 * dasd_alias_disconnect_device_from_lcu will be called
29 * before the device is deleted.
30 * - The dasd_alias_add_device / dasd_alias_remove_device
31 * functions mark the point when a device is 'ready for service'.
32 * - A summary unit check is a rare occasion, but it is mandatory to
33 * support it. It requires some complex recovery actions before the
34 * devices can be used again (see dasd_alias_handle_summary_unit_check).
35 * - dasd_alias_get_start_dev will find an alias device that can be used
36 * instead of the base device and does some (very simple) load balancing.
37 * This is the function that gets called for each I/O, so when improving
38 * something, this function should get faster or better, the rest has just
39 * to be correct.
40 */
41
42
43static void summary_unit_check_handling_work(struct work_struct *);
44static void lcu_update_work(struct work_struct *);
45static int _schedule_lcu_update(struct alias_lcu *, struct dasd_device *);
46
47static struct alias_root aliastree = {
48 .serverlist = LIST_HEAD_INIT(aliastree.serverlist),
49 .lock = __SPIN_LOCK_UNLOCKED(aliastree.lock),
50};
51
52static struct alias_server *_find_server(struct dasd_uid *uid)
53{
54 struct alias_server *pos;
55 list_for_each_entry(pos, &aliastree.serverlist, server) {
56 if (!strncmp(pos->uid.vendor, uid->vendor,
57 sizeof(uid->vendor))
58 && !strncmp(pos->uid.serial, uid->serial,
59 sizeof(uid->serial)))
60 return pos;
Peter Senna Tschudin3b974872015-08-04 17:11:15 +020061 }
Stefan Weinhuber8e09f212008-01-26 14:11:23 +010062 return NULL;
63}
64
65static struct alias_lcu *_find_lcu(struct alias_server *server,
66 struct dasd_uid *uid)
67{
68 struct alias_lcu *pos;
69 list_for_each_entry(pos, &server->lculist, lcu) {
70 if (pos->uid.ssid == uid->ssid)
71 return pos;
Peter Senna Tschudin3b974872015-08-04 17:11:15 +020072 }
Stefan Weinhuber8e09f212008-01-26 14:11:23 +010073 return NULL;
74}
75
76static struct alias_pav_group *_find_group(struct alias_lcu *lcu,
77 struct dasd_uid *uid)
78{
79 struct alias_pav_group *pos;
80 __u8 search_unit_addr;
81
82 /* for hyper pav there is only one group */
83 if (lcu->pav == HYPER_PAV) {
84 if (list_empty(&lcu->grouplist))
85 return NULL;
86 else
87 return list_first_entry(&lcu->grouplist,
88 struct alias_pav_group, group);
89 }
90
91 /* for base pav we have to find the group that matches the base */
92 if (uid->type == UA_BASE_DEVICE)
93 search_unit_addr = uid->real_unit_addr;
94 else
95 search_unit_addr = uid->base_unit_addr;
96 list_for_each_entry(pos, &lcu->grouplist, group) {
Stefan Weinhuber4abb08c2008-08-01 16:39:09 +020097 if (pos->uid.base_unit_addr == search_unit_addr &&
98 !strncmp(pos->uid.vduit, uid->vduit, sizeof(uid->vduit)))
Stefan Weinhuber8e09f212008-01-26 14:11:23 +010099 return pos;
Peter Senna Tschudin3b974872015-08-04 17:11:15 +0200100 }
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100101 return NULL;
102}
103
104static struct alias_server *_allocate_server(struct dasd_uid *uid)
105{
106 struct alias_server *server;
107
108 server = kzalloc(sizeof(*server), GFP_KERNEL);
109 if (!server)
110 return ERR_PTR(-ENOMEM);
111 memcpy(server->uid.vendor, uid->vendor, sizeof(uid->vendor));
112 memcpy(server->uid.serial, uid->serial, sizeof(uid->serial));
113 INIT_LIST_HEAD(&server->server);
114 INIT_LIST_HEAD(&server->lculist);
115 return server;
116}
117
118static void _free_server(struct alias_server *server)
119{
120 kfree(server);
121}
122
123static struct alias_lcu *_allocate_lcu(struct dasd_uid *uid)
124{
125 struct alias_lcu *lcu;
126
127 lcu = kzalloc(sizeof(*lcu), GFP_KERNEL);
128 if (!lcu)
129 return ERR_PTR(-ENOMEM);
130 lcu->uac = kzalloc(sizeof(*(lcu->uac)), GFP_KERNEL | GFP_DMA);
131 if (!lcu->uac)
132 goto out_err1;
133 lcu->rsu_cqr = kzalloc(sizeof(*lcu->rsu_cqr), GFP_KERNEL | GFP_DMA);
134 if (!lcu->rsu_cqr)
135 goto out_err2;
136 lcu->rsu_cqr->cpaddr = kzalloc(sizeof(struct ccw1),
137 GFP_KERNEL | GFP_DMA);
138 if (!lcu->rsu_cqr->cpaddr)
139 goto out_err3;
140 lcu->rsu_cqr->data = kzalloc(16, GFP_KERNEL | GFP_DMA);
141 if (!lcu->rsu_cqr->data)
142 goto out_err4;
143
144 memcpy(lcu->uid.vendor, uid->vendor, sizeof(uid->vendor));
145 memcpy(lcu->uid.serial, uid->serial, sizeof(uid->serial));
146 lcu->uid.ssid = uid->ssid;
147 lcu->pav = NO_PAV;
148 lcu->flags = NEED_UAC_UPDATE | UPDATE_PENDING;
149 INIT_LIST_HEAD(&lcu->lcu);
150 INIT_LIST_HEAD(&lcu->inactive_devices);
151 INIT_LIST_HEAD(&lcu->active_devices);
152 INIT_LIST_HEAD(&lcu->grouplist);
153 INIT_WORK(&lcu->suc_data.worker, summary_unit_check_handling_work);
154 INIT_DELAYED_WORK(&lcu->ruac_data.dwork, lcu_update_work);
155 spin_lock_init(&lcu->lock);
Stefan Weinhuberf4ac1d02009-12-07 12:51:53 +0100156 init_completion(&lcu->lcu_setup);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100157 return lcu;
158
159out_err4:
160 kfree(lcu->rsu_cqr->cpaddr);
161out_err3:
162 kfree(lcu->rsu_cqr);
163out_err2:
164 kfree(lcu->uac);
165out_err1:
166 kfree(lcu);
167 return ERR_PTR(-ENOMEM);
168}
169
170static void _free_lcu(struct alias_lcu *lcu)
171{
172 kfree(lcu->rsu_cqr->data);
173 kfree(lcu->rsu_cqr->cpaddr);
174 kfree(lcu->rsu_cqr);
175 kfree(lcu->uac);
176 kfree(lcu);
177}
178
179/*
180 * This is the function that will allocate all the server and lcu data,
181 * so this function must be called first for a new device.
182 * If the return value is 1, the lcu was already known before, if it
183 * is 0, this is a new lcu.
184 * Negative return code indicates that something went wrong (e.g. -ENOMEM)
185 */
186int dasd_alias_make_device_known_to_lcu(struct dasd_device *device)
187{
Sebastian Ott543691a42016-03-04 10:34:05 +0100188 struct dasd_eckd_private *private = device->private;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100189 unsigned long flags;
190 struct alias_server *server, *newserver;
191 struct alias_lcu *lcu, *newlcu;
Stefan Haberland2dedf0d2010-05-17 10:00:11 +0200192 struct dasd_uid uid;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100193
Stefan Haberland2dedf0d2010-05-17 10:00:11 +0200194 device->discipline->get_uid(device, &uid);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100195 spin_lock_irqsave(&aliastree.lock, flags);
Stefan Haberland2dedf0d2010-05-17 10:00:11 +0200196 server = _find_server(&uid);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100197 if (!server) {
198 spin_unlock_irqrestore(&aliastree.lock, flags);
Stefan Haberland2dedf0d2010-05-17 10:00:11 +0200199 newserver = _allocate_server(&uid);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100200 if (IS_ERR(newserver))
201 return PTR_ERR(newserver);
202 spin_lock_irqsave(&aliastree.lock, flags);
Stefan Haberland2dedf0d2010-05-17 10:00:11 +0200203 server = _find_server(&uid);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100204 if (!server) {
205 list_add(&newserver->server, &aliastree.serverlist);
206 server = newserver;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100207 } else {
208 /* someone was faster */
209 _free_server(newserver);
210 }
211 }
212
Stefan Haberland2dedf0d2010-05-17 10:00:11 +0200213 lcu = _find_lcu(server, &uid);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100214 if (!lcu) {
215 spin_unlock_irqrestore(&aliastree.lock, flags);
Stefan Haberland2dedf0d2010-05-17 10:00:11 +0200216 newlcu = _allocate_lcu(&uid);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100217 if (IS_ERR(newlcu))
Roel Kluin6d53cfe2009-12-18 17:43:17 +0100218 return PTR_ERR(newlcu);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100219 spin_lock_irqsave(&aliastree.lock, flags);
Stefan Haberland2dedf0d2010-05-17 10:00:11 +0200220 lcu = _find_lcu(server, &uid);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100221 if (!lcu) {
222 list_add(&newlcu->lcu, &server->lculist);
223 lcu = newlcu;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100224 } else {
225 /* someone was faster */
226 _free_lcu(newlcu);
227 }
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100228 }
229 spin_lock(&lcu->lock);
230 list_add(&device->alias_list, &lcu->inactive_devices);
231 private->lcu = lcu;
232 spin_unlock(&lcu->lock);
233 spin_unlock_irqrestore(&aliastree.lock, flags);
234
Stefan Haberlandf9f8d022012-01-18 18:03:40 +0100235 return 0;
Stefan Weinhuberf4ac1d02009-12-07 12:51:53 +0100236}
237
238/*
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100239 * This function removes a device from the scope of alias management.
240 * The complicated part is to make sure that it is not in use by
241 * any of the workers. If necessary cancel the work.
242 */
243void dasd_alias_disconnect_device_from_lcu(struct dasd_device *device)
244{
Sebastian Ott543691a42016-03-04 10:34:05 +0100245 struct dasd_eckd_private *private = device->private;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100246 unsigned long flags;
247 struct alias_lcu *lcu;
248 struct alias_server *server;
249 int was_pending;
Stefan Haberland2dedf0d2010-05-17 10:00:11 +0200250 struct dasd_uid uid;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100251
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100252 lcu = private->lcu;
Stefan Haberlandf602f6d62011-01-31 11:30:03 +0100253 /* nothing to do if already disconnected */
254 if (!lcu)
255 return;
Stefan Haberland2dedf0d2010-05-17 10:00:11 +0200256 device->discipline->get_uid(device, &uid);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100257 spin_lock_irqsave(&lcu->lock, flags);
258 list_del_init(&device->alias_list);
259 /* make sure that the workers don't use this device */
260 if (device == lcu->suc_data.device) {
261 spin_unlock_irqrestore(&lcu->lock, flags);
262 cancel_work_sync(&lcu->suc_data.worker);
263 spin_lock_irqsave(&lcu->lock, flags);
Stefan Haberland9d862ab2015-12-15 10:45:05 +0100264 if (device == lcu->suc_data.device) {
265 dasd_put_device(device);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100266 lcu->suc_data.device = NULL;
Stefan Haberland9d862ab2015-12-15 10:45:05 +0100267 }
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100268 }
269 was_pending = 0;
270 if (device == lcu->ruac_data.device) {
271 spin_unlock_irqrestore(&lcu->lock, flags);
272 was_pending = 1;
273 cancel_delayed_work_sync(&lcu->ruac_data.dwork);
274 spin_lock_irqsave(&lcu->lock, flags);
Stefan Haberland9d862ab2015-12-15 10:45:05 +0100275 if (device == lcu->ruac_data.device) {
276 dasd_put_device(device);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100277 lcu->ruac_data.device = NULL;
Stefan Haberland9d862ab2015-12-15 10:45:05 +0100278 }
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100279 }
280 private->lcu = NULL;
281 spin_unlock_irqrestore(&lcu->lock, flags);
282
283 spin_lock_irqsave(&aliastree.lock, flags);
284 spin_lock(&lcu->lock);
285 if (list_empty(&lcu->grouplist) &&
286 list_empty(&lcu->active_devices) &&
287 list_empty(&lcu->inactive_devices)) {
288 list_del(&lcu->lcu);
289 spin_unlock(&lcu->lock);
290 _free_lcu(lcu);
291 lcu = NULL;
292 } else {
293 if (was_pending)
294 _schedule_lcu_update(lcu, NULL);
295 spin_unlock(&lcu->lock);
296 }
Stefan Haberland2dedf0d2010-05-17 10:00:11 +0200297 server = _find_server(&uid);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100298 if (server && list_empty(&server->lculist)) {
299 list_del(&server->server);
300 _free_server(server);
301 }
302 spin_unlock_irqrestore(&aliastree.lock, flags);
303}
304
305/*
306 * This function assumes that the unit address configuration stored
307 * in the lcu is up to date and will update the device uid before
308 * adding it to a pav group.
309 */
Stefan Haberland2dedf0d2010-05-17 10:00:11 +0200310
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100311static int _add_device_to_lcu(struct alias_lcu *lcu,
Stefan Haberland2dedf0d2010-05-17 10:00:11 +0200312 struct dasd_device *device,
313 struct dasd_device *pos)
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100314{
315
Sebastian Ott543691a42016-03-04 10:34:05 +0100316 struct dasd_eckd_private *private = device->private;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100317 struct alias_pav_group *group;
Stefan Haberland2dedf0d2010-05-17 10:00:11 +0200318 struct dasd_uid uid;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100319
Stefan Haberland59a9ed52016-02-23 10:15:27 +0100320 spin_lock(get_ccwdev_lock(device->cdev));
Stefan Haberland2dedf0d2010-05-17 10:00:11 +0200321 private->uid.type = lcu->uac->unit[private->uid.real_unit_addr].ua_type;
322 private->uid.base_unit_addr =
323 lcu->uac->unit[private->uid.real_unit_addr].base_ua;
324 uid = private->uid;
Stefan Haberland59a9ed52016-02-23 10:15:27 +0100325 spin_unlock(get_ccwdev_lock(device->cdev));
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100326 /* if we have no PAV anyway, we don't need to bother with PAV groups */
327 if (lcu->pav == NO_PAV) {
328 list_move(&device->alias_list, &lcu->active_devices);
329 return 0;
330 }
Stefan Haberland2dedf0d2010-05-17 10:00:11 +0200331 group = _find_group(lcu, &uid);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100332 if (!group) {
333 group = kzalloc(sizeof(*group), GFP_ATOMIC);
334 if (!group)
335 return -ENOMEM;
Stefan Haberland2dedf0d2010-05-17 10:00:11 +0200336 memcpy(group->uid.vendor, uid.vendor, sizeof(uid.vendor));
337 memcpy(group->uid.serial, uid.serial, sizeof(uid.serial));
338 group->uid.ssid = uid.ssid;
339 if (uid.type == UA_BASE_DEVICE)
340 group->uid.base_unit_addr = uid.real_unit_addr;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100341 else
Stefan Haberland2dedf0d2010-05-17 10:00:11 +0200342 group->uid.base_unit_addr = uid.base_unit_addr;
343 memcpy(group->uid.vduit, uid.vduit, sizeof(uid.vduit));
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100344 INIT_LIST_HEAD(&group->group);
345 INIT_LIST_HEAD(&group->baselist);
346 INIT_LIST_HEAD(&group->aliaslist);
347 list_add(&group->group, &lcu->grouplist);
348 }
Stefan Haberland2dedf0d2010-05-17 10:00:11 +0200349 if (uid.type == UA_BASE_DEVICE)
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100350 list_move(&device->alias_list, &group->baselist);
351 else
352 list_move(&device->alias_list, &group->aliaslist);
353 private->pavgroup = group;
354 return 0;
355};
356
357static void _remove_device_from_lcu(struct alias_lcu *lcu,
358 struct dasd_device *device)
359{
Sebastian Ott543691a42016-03-04 10:34:05 +0100360 struct dasd_eckd_private *private = device->private;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100361 struct alias_pav_group *group;
362
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100363 list_move(&device->alias_list, &lcu->inactive_devices);
364 group = private->pavgroup;
365 if (!group)
366 return;
367 private->pavgroup = NULL;
368 if (list_empty(&group->baselist) && list_empty(&group->aliaslist)) {
369 list_del(&group->group);
370 kfree(group);
371 return;
372 }
373 if (group->next == device)
374 group->next = NULL;
375};
376
Stefan Haberland03429f32012-09-11 17:19:12 +0200377static int
378suborder_not_supported(struct dasd_ccw_req *cqr)
379{
380 char *sense;
381 char reason;
382 char msg_format;
383 char msg_no;
384
385 sense = dasd_get_sense(&cqr->irb);
386 if (!sense)
387 return 0;
388
389 reason = sense[0];
390 msg_format = (sense[7] & 0xF0);
391 msg_no = (sense[7] & 0x0F);
392
393 /* command reject, Format 0 MSG 4 - invalid parameter */
394 if ((reason == 0x80) && (msg_format == 0x00) && (msg_no == 0x04))
395 return 1;
396
397 return 0;
398}
399
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100400static int read_unit_address_configuration(struct dasd_device *device,
401 struct alias_lcu *lcu)
402{
403 struct dasd_psf_prssd_data *prssdp;
404 struct dasd_ccw_req *cqr;
405 struct ccw1 *ccw;
406 int rc;
407 unsigned long flags;
408
Stefan Haberland68b781f2009-09-11 10:28:29 +0200409 cqr = dasd_kmalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */,
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100410 (sizeof(struct dasd_psf_prssd_data)),
411 device);
412 if (IS_ERR(cqr))
413 return PTR_ERR(cqr);
414 cqr->startdev = device;
415 cqr->memdev = device;
416 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
417 cqr->retries = 10;
418 cqr->expires = 20 * HZ;
419
420 /* Prepare for Read Subsystem Data */
421 prssdp = (struct dasd_psf_prssd_data *) cqr->data;
422 memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data));
423 prssdp->order = PSF_ORDER_PRSSD;
424 prssdp->suborder = 0x0e; /* Read unit address configuration */
425 /* all other bytes of prssdp must be zero */
426
427 ccw = cqr->cpaddr;
428 ccw->cmd_code = DASD_ECKD_CCW_PSF;
429 ccw->count = sizeof(struct dasd_psf_prssd_data);
430 ccw->flags |= CCW_FLAG_CC;
431 ccw->cda = (__u32)(addr_t) prssdp;
432
433 /* Read Subsystem Data - feature codes */
434 memset(lcu->uac, 0, sizeof(*(lcu->uac)));
435
436 ccw++;
437 ccw->cmd_code = DASD_ECKD_CCW_RSSD;
438 ccw->count = sizeof(*(lcu->uac));
439 ccw->cda = (__u32)(addr_t) lcu->uac;
440
Heiko Carstens1aae0562013-01-30 09:49:40 +0100441 cqr->buildclk = get_tod_clock();
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100442 cqr->status = DASD_CQR_FILLED;
443
444 /* need to unset flag here to detect race with summary unit check */
445 spin_lock_irqsave(&lcu->lock, flags);
446 lcu->flags &= ~NEED_UAC_UPDATE;
447 spin_unlock_irqrestore(&lcu->lock, flags);
448
449 do {
450 rc = dasd_sleep_on(cqr);
Stefan Haberland03429f32012-09-11 17:19:12 +0200451 if (rc && suborder_not_supported(cqr))
452 return -EOPNOTSUPP;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100453 } while (rc && (cqr->retries > 0));
454 if (rc) {
455 spin_lock_irqsave(&lcu->lock, flags);
456 lcu->flags |= NEED_UAC_UPDATE;
457 spin_unlock_irqrestore(&lcu->lock, flags);
458 }
459 dasd_kfree_request(cqr, cqr->memdev);
460 return rc;
461}
462
463static int _lcu_update(struct dasd_device *refdev, struct alias_lcu *lcu)
464{
465 unsigned long flags;
466 struct alias_pav_group *pavgroup, *tempgroup;
467 struct dasd_device *device, *tempdev;
468 int i, rc;
469 struct dasd_eckd_private *private;
470
471 spin_lock_irqsave(&lcu->lock, flags);
472 list_for_each_entry_safe(pavgroup, tempgroup, &lcu->grouplist, group) {
473 list_for_each_entry_safe(device, tempdev, &pavgroup->baselist,
474 alias_list) {
475 list_move(&device->alias_list, &lcu->active_devices);
Sebastian Ott543691a42016-03-04 10:34:05 +0100476 private = device->private;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100477 private->pavgroup = NULL;
478 }
479 list_for_each_entry_safe(device, tempdev, &pavgroup->aliaslist,
480 alias_list) {
481 list_move(&device->alias_list, &lcu->active_devices);
Sebastian Ott543691a42016-03-04 10:34:05 +0100482 private = device->private;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100483 private->pavgroup = NULL;
484 }
485 list_del(&pavgroup->group);
486 kfree(pavgroup);
487 }
488 spin_unlock_irqrestore(&lcu->lock, flags);
489
490 rc = read_unit_address_configuration(refdev, lcu);
491 if (rc)
492 return rc;
493
Stefan Haberland59a9ed52016-02-23 10:15:27 +0100494 spin_lock_irqsave(&lcu->lock, flags);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100495 lcu->pav = NO_PAV;
496 for (i = 0; i < MAX_DEVICES_PER_LCU; ++i) {
497 switch (lcu->uac->unit[i].ua_type) {
498 case UA_BASE_PAV_ALIAS:
499 lcu->pav = BASE_PAV;
500 break;
501 case UA_HYPER_PAV_ALIAS:
502 lcu->pav = HYPER_PAV;
503 break;
504 }
505 if (lcu->pav != NO_PAV)
506 break;
507 }
508
509 list_for_each_entry_safe(device, tempdev, &lcu->active_devices,
510 alias_list) {
Stefan Haberland2dedf0d2010-05-17 10:00:11 +0200511 _add_device_to_lcu(lcu, device, refdev);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100512 }
Stefan Haberland9bfefde2015-12-15 11:00:51 +0100513 spin_unlock_irqrestore(&lcu->lock, flags);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100514 return 0;
515}
516
517static void lcu_update_work(struct work_struct *work)
518{
519 struct alias_lcu *lcu;
520 struct read_uac_work_data *ruac_data;
521 struct dasd_device *device;
522 unsigned long flags;
523 int rc;
524
525 ruac_data = container_of(work, struct read_uac_work_data, dwork.work);
526 lcu = container_of(ruac_data, struct alias_lcu, ruac_data);
527 device = ruac_data->device;
528 rc = _lcu_update(device, lcu);
529 /*
530 * Need to check flags again, as there could have been another
531 * prepare_update or a new device a new device while we were still
532 * processing the data
533 */
534 spin_lock_irqsave(&lcu->lock, flags);
Stefan Haberland03429f32012-09-11 17:19:12 +0200535 if ((rc && (rc != -EOPNOTSUPP)) || (lcu->flags & NEED_UAC_UPDATE)) {
Stefan Haberlandfc19f382009-03-26 15:23:49 +0100536 DBF_DEV_EVENT(DBF_WARNING, device, "could not update"
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100537 " alias data in lcu (rc = %d), retry later", rc);
Stefan Haberland9d862ab2015-12-15 10:45:05 +0100538 if (!schedule_delayed_work(&lcu->ruac_data.dwork, 30*HZ))
539 dasd_put_device(device);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100540 } else {
Stefan Haberland9d862ab2015-12-15 10:45:05 +0100541 dasd_put_device(device);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100542 lcu->ruac_data.device = NULL;
543 lcu->flags &= ~UPDATE_PENDING;
544 }
545 spin_unlock_irqrestore(&lcu->lock, flags);
546}
547
548static int _schedule_lcu_update(struct alias_lcu *lcu,
549 struct dasd_device *device)
550{
551 struct dasd_device *usedev = NULL;
552 struct alias_pav_group *group;
553
554 lcu->flags |= NEED_UAC_UPDATE;
555 if (lcu->ruac_data.device) {
556 /* already scheduled or running */
557 return 0;
558 }
559 if (device && !list_empty(&device->alias_list))
560 usedev = device;
561
562 if (!usedev && !list_empty(&lcu->grouplist)) {
563 group = list_first_entry(&lcu->grouplist,
564 struct alias_pav_group, group);
565 if (!list_empty(&group->baselist))
566 usedev = list_first_entry(&group->baselist,
567 struct dasd_device,
568 alias_list);
569 else if (!list_empty(&group->aliaslist))
570 usedev = list_first_entry(&group->aliaslist,
571 struct dasd_device,
572 alias_list);
573 }
574 if (!usedev && !list_empty(&lcu->active_devices)) {
575 usedev = list_first_entry(&lcu->active_devices,
576 struct dasd_device, alias_list);
577 }
578 /*
579 * if we haven't found a proper device yet, give up for now, the next
580 * device that will be set active will trigger an lcu update
581 */
582 if (!usedev)
583 return -EINVAL;
Stefan Haberland9d862ab2015-12-15 10:45:05 +0100584 dasd_get_device(usedev);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100585 lcu->ruac_data.device = usedev;
Stefan Haberland9d862ab2015-12-15 10:45:05 +0100586 if (!schedule_delayed_work(&lcu->ruac_data.dwork, 0))
587 dasd_put_device(usedev);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100588 return 0;
589}
590
591int dasd_alias_add_device(struct dasd_device *device)
592{
Sebastian Ott543691a42016-03-04 10:34:05 +0100593 struct dasd_eckd_private *private = device->private;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100594 struct alias_lcu *lcu;
595 unsigned long flags;
596 int rc;
597
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100598 lcu = private->lcu;
599 rc = 0;
Stefan Haberland59a9ed52016-02-23 10:15:27 +0100600 spin_lock_irqsave(&lcu->lock, flags);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100601 if (!(lcu->flags & UPDATE_PENDING)) {
Stefan Haberland2dedf0d2010-05-17 10:00:11 +0200602 rc = _add_device_to_lcu(lcu, device, device);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100603 if (rc)
604 lcu->flags |= UPDATE_PENDING;
605 }
606 if (lcu->flags & UPDATE_PENDING) {
607 list_move(&device->alias_list, &lcu->active_devices);
608 _schedule_lcu_update(lcu, device);
609 }
Stefan Haberland59a9ed52016-02-23 10:15:27 +0100610 spin_unlock_irqrestore(&lcu->lock, flags);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100611 return rc;
612}
613
Stefan Haberland501183f2010-05-17 10:00:10 +0200614int dasd_alias_update_add_device(struct dasd_device *device)
615{
Sebastian Ott543691a42016-03-04 10:34:05 +0100616 struct dasd_eckd_private *private = device->private;
617
Stefan Haberland501183f2010-05-17 10:00:10 +0200618 private->lcu->flags |= UPDATE_PENDING;
619 return dasd_alias_add_device(device);
620}
621
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100622int dasd_alias_remove_device(struct dasd_device *device)
623{
Sebastian Ott543691a42016-03-04 10:34:05 +0100624 struct dasd_eckd_private *private = device->private;
625 struct alias_lcu *lcu = private->lcu;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100626 unsigned long flags;
627
Stefan Haberlandf602f6d62011-01-31 11:30:03 +0100628 /* nothing to do if already removed */
629 if (!lcu)
630 return 0;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100631 spin_lock_irqsave(&lcu->lock, flags);
632 _remove_device_from_lcu(lcu, device);
633 spin_unlock_irqrestore(&lcu->lock, flags);
634 return 0;
635}
636
637struct dasd_device *dasd_alias_get_start_dev(struct dasd_device *base_device)
638{
Sebastian Ott543691a42016-03-04 10:34:05 +0100639 struct dasd_eckd_private *alias_priv, *private = base_device->private;
640 struct alias_pav_group *group = private->pavgroup;
641 struct alias_lcu *lcu = private->lcu;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100642 struct dasd_device *alias_device;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100643 unsigned long flags;
644
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100645 if (!group || !lcu)
646 return NULL;
647 if (lcu->pav == NO_PAV ||
648 lcu->flags & (NEED_UAC_UPDATE | UPDATE_PENDING))
649 return NULL;
Stefan Haberlandb38f27e2011-12-27 11:27:28 +0100650 if (unlikely(!(private->features.feature[8] & 0x01))) {
651 /*
652 * PAV enabled but prefix not, very unlikely
653 * seems to be a lost pathgroup
654 * use base device to do IO
655 */
656 DBF_DEV_EVENT(DBF_ERR, base_device, "%s",
657 "Prefix not enabled with PAV enabled\n");
658 return NULL;
659 }
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100660
661 spin_lock_irqsave(&lcu->lock, flags);
662 alias_device = group->next;
663 if (!alias_device) {
664 if (list_empty(&group->aliaslist)) {
665 spin_unlock_irqrestore(&lcu->lock, flags);
666 return NULL;
667 } else {
668 alias_device = list_first_entry(&group->aliaslist,
669 struct dasd_device,
670 alias_list);
671 }
672 }
673 if (list_is_last(&alias_device->alias_list, &group->aliaslist))
674 group->next = list_first_entry(&group->aliaslist,
675 struct dasd_device, alias_list);
676 else
677 group->next = list_first_entry(&alias_device->alias_list,
678 struct dasd_device, alias_list);
679 spin_unlock_irqrestore(&lcu->lock, flags);
Sebastian Ott543691a42016-03-04 10:34:05 +0100680 alias_priv = alias_device->private;
Stefan Haberlandf81a49d2015-07-10 10:47:09 +0200681 if ((alias_priv->count < private->count) && !alias_device->stopped &&
682 !test_bit(DASD_FLAG_OFFLINE, &alias_device->flags))
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100683 return alias_device;
684 else
685 return NULL;
686}
687
688/*
689 * Summary unit check handling depends on the way alias devices
690 * are handled so it is done here rather then in dasd_eckd.c
691 */
692static int reset_summary_unit_check(struct alias_lcu *lcu,
693 struct dasd_device *device,
694 char reason)
695{
696 struct dasd_ccw_req *cqr;
697 int rc = 0;
Stefan Weinhuberf3eb5382009-03-26 15:23:48 +0100698 struct ccw1 *ccw;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100699
700 cqr = lcu->rsu_cqr;
701 strncpy((char *) &cqr->magic, "ECKD", 4);
702 ASCEBC((char *) &cqr->magic, 4);
Stefan Weinhuberf3eb5382009-03-26 15:23:48 +0100703 ccw = cqr->cpaddr;
704 ccw->cmd_code = DASD_ECKD_CCW_RSCK;
Stefan Haberland020bf042015-12-15 10:16:43 +0100705 ccw->flags = CCW_FLAG_SLI;
Stefan Weinhuberf3eb5382009-03-26 15:23:48 +0100706 ccw->count = 16;
707 ccw->cda = (__u32)(addr_t) cqr->data;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100708 ((char *)cqr->data)[0] = reason;
709
710 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
711 cqr->retries = 255; /* set retry counter to enable basic ERP */
712 cqr->startdev = device;
713 cqr->memdev = device;
714 cqr->block = NULL;
715 cqr->expires = 5 * HZ;
Heiko Carstens1aae0562013-01-30 09:49:40 +0100716 cqr->buildclk = get_tod_clock();
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100717 cqr->status = DASD_CQR_FILLED;
718
719 rc = dasd_sleep_on_immediatly(cqr);
720 return rc;
721}
722
723static void _restart_all_base_devices_on_lcu(struct alias_lcu *lcu)
724{
725 struct alias_pav_group *pavgroup;
726 struct dasd_device *device;
727 struct dasd_eckd_private *private;
728
729 /* active and inactive list can contain alias as well as base devices */
730 list_for_each_entry(device, &lcu->active_devices, alias_list) {
Sebastian Ott543691a42016-03-04 10:34:05 +0100731 private = device->private;
Stefan Haberland9bfefde2015-12-15 11:00:51 +0100732 if (private->uid.type != UA_BASE_DEVICE)
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100733 continue;
734 dasd_schedule_block_bh(device->block);
735 dasd_schedule_device_bh(device);
736 }
737 list_for_each_entry(device, &lcu->inactive_devices, alias_list) {
Sebastian Ott543691a42016-03-04 10:34:05 +0100738 private = device->private;
Stefan Haberland9bfefde2015-12-15 11:00:51 +0100739 if (private->uid.type != UA_BASE_DEVICE)
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100740 continue;
741 dasd_schedule_block_bh(device->block);
742 dasd_schedule_device_bh(device);
743 }
744 list_for_each_entry(pavgroup, &lcu->grouplist, group) {
745 list_for_each_entry(device, &pavgroup->baselist, alias_list) {
746 dasd_schedule_block_bh(device->block);
747 dasd_schedule_device_bh(device);
748 }
749 }
750}
751
752static void flush_all_alias_devices_on_lcu(struct alias_lcu *lcu)
753{
754 struct alias_pav_group *pavgroup;
755 struct dasd_device *device, *temp;
756 struct dasd_eckd_private *private;
757 int rc;
758 unsigned long flags;
759 LIST_HEAD(active);
760
761 /*
762 * Problem here ist that dasd_flush_device_queue may wait
763 * for termination of a request to complete. We can't keep
764 * the lcu lock during that time, so we must assume that
765 * the lists may have changed.
766 * Idea: first gather all active alias devices in a separate list,
767 * then flush the first element of this list unlocked, and afterwards
768 * check if it is still on the list before moving it to the
769 * active_devices list.
770 */
771
772 spin_lock_irqsave(&lcu->lock, flags);
773 list_for_each_entry_safe(device, temp, &lcu->active_devices,
774 alias_list) {
Sebastian Ott543691a42016-03-04 10:34:05 +0100775 private = device->private;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100776 if (private->uid.type == UA_BASE_DEVICE)
777 continue;
778 list_move(&device->alias_list, &active);
779 }
780
781 list_for_each_entry(pavgroup, &lcu->grouplist, group) {
782 list_splice_init(&pavgroup->aliaslist, &active);
783 }
784 while (!list_empty(&active)) {
785 device = list_first_entry(&active, struct dasd_device,
786 alias_list);
787 spin_unlock_irqrestore(&lcu->lock, flags);
788 rc = dasd_flush_device_queue(device);
789 spin_lock_irqsave(&lcu->lock, flags);
790 /*
791 * only move device around if it wasn't moved away while we
792 * were waiting for the flush
793 */
794 if (device == list_first_entry(&active,
Stefan Haberland6933c352015-10-14 11:01:05 +0200795 struct dasd_device, alias_list)) {
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100796 list_move(&device->alias_list, &lcu->active_devices);
Sebastian Ott543691a42016-03-04 10:34:05 +0100797 private = device->private;
Stefan Haberland6933c352015-10-14 11:01:05 +0200798 private->pavgroup = NULL;
799 }
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100800 }
801 spin_unlock_irqrestore(&lcu->lock, flags);
802}
803
Stefan Haberland9bfefde2015-12-15 11:00:51 +0100804static void _stop_all_devices_on_lcu(struct alias_lcu *lcu)
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100805{
806 struct alias_pav_group *pavgroup;
Stefan Haberland9bfefde2015-12-15 11:00:51 +0100807 struct dasd_device *device;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100808
Stefan Haberland59a9ed52016-02-23 10:15:27 +0100809 list_for_each_entry(device, &lcu->active_devices, alias_list) {
810 spin_lock(get_ccwdev_lock(device->cdev));
Stefan Haberland9bfefde2015-12-15 11:00:51 +0100811 dasd_device_set_stop_bits(device, DASD_STOPPED_SU);
Stefan Haberland59a9ed52016-02-23 10:15:27 +0100812 spin_unlock(get_ccwdev_lock(device->cdev));
813 }
814 list_for_each_entry(device, &lcu->inactive_devices, alias_list) {
815 spin_lock(get_ccwdev_lock(device->cdev));
Stefan Haberland9bfefde2015-12-15 11:00:51 +0100816 dasd_device_set_stop_bits(device, DASD_STOPPED_SU);
Stefan Haberland59a9ed52016-02-23 10:15:27 +0100817 spin_unlock(get_ccwdev_lock(device->cdev));
818 }
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100819 list_for_each_entry(pavgroup, &lcu->grouplist, group) {
Stefan Haberland59a9ed52016-02-23 10:15:27 +0100820 list_for_each_entry(device, &pavgroup->baselist, alias_list) {
821 spin_lock(get_ccwdev_lock(device->cdev));
Stefan Haberland9bfefde2015-12-15 11:00:51 +0100822 dasd_device_set_stop_bits(device, DASD_STOPPED_SU);
Stefan Haberland59a9ed52016-02-23 10:15:27 +0100823 spin_unlock(get_ccwdev_lock(device->cdev));
824 }
825 list_for_each_entry(device, &pavgroup->aliaslist, alias_list) {
826 spin_lock(get_ccwdev_lock(device->cdev));
Stefan Haberland9bfefde2015-12-15 11:00:51 +0100827 dasd_device_set_stop_bits(device, DASD_STOPPED_SU);
Stefan Haberland59a9ed52016-02-23 10:15:27 +0100828 spin_unlock(get_ccwdev_lock(device->cdev));
829 }
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100830 }
831}
832
833static void _unstop_all_devices_on_lcu(struct alias_lcu *lcu)
834{
835 struct alias_pav_group *pavgroup;
836 struct dasd_device *device;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100837
Stefan Haberland59a9ed52016-02-23 10:15:27 +0100838 list_for_each_entry(device, &lcu->active_devices, alias_list) {
839 spin_lock(get_ccwdev_lock(device->cdev));
Stefan Weinhubereb6e1992009-12-07 12:51:51 +0100840 dasd_device_remove_stop_bits(device, DASD_STOPPED_SU);
Stefan Haberland59a9ed52016-02-23 10:15:27 +0100841 spin_unlock(get_ccwdev_lock(device->cdev));
842 }
843 list_for_each_entry(device, &lcu->inactive_devices, alias_list) {
844 spin_lock(get_ccwdev_lock(device->cdev));
Stefan Weinhubereb6e1992009-12-07 12:51:51 +0100845 dasd_device_remove_stop_bits(device, DASD_STOPPED_SU);
Stefan Haberland59a9ed52016-02-23 10:15:27 +0100846 spin_unlock(get_ccwdev_lock(device->cdev));
847 }
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100848 list_for_each_entry(pavgroup, &lcu->grouplist, group) {
Stefan Haberland59a9ed52016-02-23 10:15:27 +0100849 list_for_each_entry(device, &pavgroup->baselist, alias_list) {
850 spin_lock(get_ccwdev_lock(device->cdev));
Stefan Weinhubereb6e1992009-12-07 12:51:51 +0100851 dasd_device_remove_stop_bits(device, DASD_STOPPED_SU);
Stefan Haberland59a9ed52016-02-23 10:15:27 +0100852 spin_unlock(get_ccwdev_lock(device->cdev));
853 }
854 list_for_each_entry(device, &pavgroup->aliaslist, alias_list) {
855 spin_lock(get_ccwdev_lock(device->cdev));
Stefan Weinhubereb6e1992009-12-07 12:51:51 +0100856 dasd_device_remove_stop_bits(device, DASD_STOPPED_SU);
Stefan Haberland59a9ed52016-02-23 10:15:27 +0100857 spin_unlock(get_ccwdev_lock(device->cdev));
858 }
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100859 }
860}
861
862static void summary_unit_check_handling_work(struct work_struct *work)
863{
864 struct alias_lcu *lcu;
865 struct summary_unit_check_work_data *suc_data;
866 unsigned long flags;
867 struct dasd_device *device;
868
869 suc_data = container_of(work, struct summary_unit_check_work_data,
870 worker);
871 lcu = container_of(suc_data, struct alias_lcu, suc_data);
872 device = suc_data->device;
873
874 /* 1. flush alias devices */
875 flush_all_alias_devices_on_lcu(lcu);
876
877 /* 2. reset summary unit check */
878 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
Stefan Weinhubereb6e1992009-12-07 12:51:51 +0100879 dasd_device_remove_stop_bits(device,
880 (DASD_STOPPED_SU | DASD_STOPPED_PENDING));
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100881 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
882 reset_summary_unit_check(lcu, device, suc_data->reason);
883
Stefan Haberland59a9ed52016-02-23 10:15:27 +0100884 spin_lock_irqsave(&lcu->lock, flags);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100885 _unstop_all_devices_on_lcu(lcu);
886 _restart_all_base_devices_on_lcu(lcu);
887 /* 3. read new alias configuration */
888 _schedule_lcu_update(lcu, device);
889 lcu->suc_data.device = NULL;
Stefan Haberland9d862ab2015-12-15 10:45:05 +0100890 dasd_put_device(device);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100891 spin_unlock_irqrestore(&lcu->lock, flags);
892}
893
Stefan Haberland59a9ed52016-02-23 10:15:27 +0100894void dasd_alias_handle_summary_unit_check(struct work_struct *work)
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100895{
Stefan Haberland59a9ed52016-02-23 10:15:27 +0100896 struct dasd_device *device = container_of(work, struct dasd_device,
897 suc_work);
Sebastian Ott543691a42016-03-04 10:34:05 +0100898 struct dasd_eckd_private *private = device->private;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100899 struct alias_lcu *lcu;
Stefan Haberland59a9ed52016-02-23 10:15:27 +0100900 unsigned long flags;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100901
902 lcu = private->lcu;
903 if (!lcu) {
Stefan Haberlandfc19f382009-03-26 15:23:49 +0100904 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100905 "device not ready to handle summary"
906 " unit check (no lcu structure)");
Stefan Haberland59a9ed52016-02-23 10:15:27 +0100907 goto out;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100908 }
Stefan Haberland59a9ed52016-02-23 10:15:27 +0100909 spin_lock_irqsave(&lcu->lock, flags);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100910 /* If this device is about to be removed just return and wait for
911 * the next interrupt on a different device
912 */
913 if (list_empty(&device->alias_list)) {
Stefan Haberlandfc19f382009-03-26 15:23:49 +0100914 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100915 "device is in offline processing,"
916 " don't do summary unit check handling");
Stefan Haberland59a9ed52016-02-23 10:15:27 +0100917 goto out_unlock;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100918 }
919 if (lcu->suc_data.device) {
920 /* already scheduled or running */
Stefan Haberlandfc19f382009-03-26 15:23:49 +0100921 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100922 "previous instance of summary unit check worker"
923 " still pending");
Stefan Haberland59a9ed52016-02-23 10:15:27 +0100924 goto out_unlock;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100925 }
Stefan Haberland9bfefde2015-12-15 11:00:51 +0100926 _stop_all_devices_on_lcu(lcu);
927 /* prepare for lcu_update */
Stefan Haberland59a9ed52016-02-23 10:15:27 +0100928 lcu->flags |= NEED_UAC_UPDATE | UPDATE_PENDING;
929 lcu->suc_data.reason = private->suc_reason;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100930 lcu->suc_data.device = device;
Stefan Haberland9d862ab2015-12-15 10:45:05 +0100931 dasd_get_device(device);
Stefan Haberland9d862ab2015-12-15 10:45:05 +0100932 if (!schedule_work(&lcu->suc_data.worker))
933 dasd_put_device(device);
Stefan Haberland59a9ed52016-02-23 10:15:27 +0100934out_unlock:
935 spin_unlock_irqrestore(&lcu->lock, flags);
936out:
937 clear_bit(DASD_FLAG_SUC, &device->flags);
938 dasd_put_device(device);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100939};