blob: 8cf9905d484be5fc120b2e05a65103f0acad95c9 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * drivers/s390/cio/chsc.c
3 * S/390 common I/O routines -- channel subsystem call
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 *
5 * Copyright (C) 1999-2002 IBM Deutschland Entwicklung GmbH,
6 * IBM Corporation
7 * Author(s): Ingo Adlung (adlung@de.ibm.com)
Cornelia Huck4ce3b302006-01-14 13:21:04 -08008 * Cornelia Huck (cornelia.huck@de.ibm.com)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 * Arnd Bergmann (arndb@de.ibm.com)
10 */
11
12#include <linux/module.h>
13#include <linux/config.h>
14#include <linux/slab.h>
15#include <linux/init.h>
16#include <linux/device.h>
17
18#include <asm/cio.h>
19
20#include "css.h"
21#include "cio.h"
22#include "cio_debug.h"
23#include "ioasm.h"
24#include "chsc.h"
25
Linus Torvalds1da177e2005-04-16 15:20:36 -070026static void *sei_page;
27
28static int new_channel_path(int chpid);
29
30static inline void
31set_chp_logically_online(int chp, int onoff)
32{
Cornelia Hucka28c6942006-01-06 00:19:23 -080033 css[0]->chps[chp]->state = onoff;
Linus Torvalds1da177e2005-04-16 15:20:36 -070034}
35
36static int
37get_chp_status(int chp)
38{
Cornelia Hucka28c6942006-01-06 00:19:23 -080039 return (css[0]->chps[chp] ? css[0]->chps[chp]->state : -ENODEV);
Linus Torvalds1da177e2005-04-16 15:20:36 -070040}
41
42void
43chsc_validate_chpids(struct subchannel *sch)
44{
45 int mask, chp;
46
47 for (chp = 0; chp <= 7; chp++) {
48 mask = 0x80 >> chp;
49 if (!get_chp_status(sch->schib.pmcw.chpid[chp]))
50 /* disable using this path */
51 sch->opm &= ~mask;
52 }
53}
54
55void
56chpid_is_actually_online(int chp)
57{
58 int state;
59
60 state = get_chp_status(chp);
61 if (state < 0) {
62 need_rescan = 1;
63 queue_work(slow_path_wq, &slow_path_work);
64 } else
65 WARN_ON(!state);
66}
67
68/* FIXME: this is _always_ called for every subchannel. shouldn't we
69 * process more than one at a time? */
70static int
71chsc_get_sch_desc_irq(struct subchannel *sch, void *page)
72{
73 int ccode, j;
74
75 struct {
76 struct chsc_header request;
Cornelia Huckfb6958a2006-01-06 00:19:25 -080077 u16 reserved1a:10;
78 u16 ssid:2;
79 u16 reserved1b:4;
Linus Torvalds1da177e2005-04-16 15:20:36 -070080 u16 f_sch; /* first subchannel */
81 u16 reserved2;
82 u16 l_sch; /* last subchannel */
83 u32 reserved3;
84 struct chsc_header response;
85 u32 reserved4;
86 u8 sch_valid : 1;
87 u8 dev_valid : 1;
88 u8 st : 3; /* subchannel type */
89 u8 zeroes : 3;
90 u8 unit_addr; /* unit address */
91 u16 devno; /* device number */
92 u8 path_mask;
93 u8 fla_valid_mask;
94 u16 sch; /* subchannel */
95 u8 chpid[8]; /* chpids 0-7 */
96 u16 fla[8]; /* full link addresses 0-7 */
97 } *ssd_area;
98
99 ssd_area = page;
100
101 ssd_area->request = (struct chsc_header) {
102 .length = 0x0010,
103 .code = 0x0004,
104 };
105
Cornelia Huckfb6958a2006-01-06 00:19:25 -0800106 ssd_area->ssid = sch->schid.ssid;
Cornelia Hucka8237fc2006-01-06 00:19:21 -0800107 ssd_area->f_sch = sch->schid.sch_no;
108 ssd_area->l_sch = sch->schid.sch_no;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109
110 ccode = chsc(ssd_area);
111 if (ccode > 0) {
112 pr_debug("chsc returned with ccode = %d\n", ccode);
113 return (ccode == 3) ? -ENODEV : -EBUSY;
114 }
115
116 switch (ssd_area->response.code) {
117 case 0x0001: /* everything ok */
118 break;
119 case 0x0002:
120 CIO_CRW_EVENT(2, "Invalid command!\n");
121 return -EINVAL;
122 case 0x0003:
123 CIO_CRW_EVENT(2, "Error in chsc request block!\n");
124 return -EINVAL;
125 case 0x0004:
126 CIO_CRW_EVENT(2, "Model does not provide ssd\n");
127 return -EOPNOTSUPP;
128 default:
129 CIO_CRW_EVENT(2, "Unknown CHSC response %d\n",
130 ssd_area->response.code);
131 return -EIO;
132 }
133
134 /*
135 * ssd_area->st stores the type of the detected
136 * subchannel, with the following definitions:
137 *
138 * 0: I/O subchannel: All fields have meaning
139 * 1: CHSC subchannel: Only sch_val, st and sch
140 * have meaning
141 * 2: Message subchannel: All fields except unit_addr
142 * have meaning
143 * 3: ADM subchannel: Only sch_val, st and sch
144 * have meaning
145 *
146 * Other types are currently undefined.
147 */
148 if (ssd_area->st > 3) { /* uhm, that looks strange... */
149 CIO_CRW_EVENT(0, "Strange subchannel type %d"
Cornelia Huckfb6958a2006-01-06 00:19:25 -0800150 " for sch 0.%x.%04x\n", ssd_area->st,
151 sch->schid.ssid, sch->schid.sch_no);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152 /*
153 * There may have been a new subchannel type defined in the
154 * time since this code was written; since we don't know which
155 * fields have meaning and what to do with it we just jump out
156 */
157 return 0;
158 } else {
159 const char *type[4] = {"I/O", "chsc", "message", "ADM"};
Cornelia Huckfb6958a2006-01-06 00:19:25 -0800160 CIO_CRW_EVENT(6, "ssd: sch 0.%x.%04x is %s subchannel\n",
161 sch->schid.ssid, sch->schid.sch_no,
162 type[ssd_area->st]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700163
164 sch->ssd_info.valid = 1;
165 sch->ssd_info.type = ssd_area->st;
166 }
167
168 if (ssd_area->st == 0 || ssd_area->st == 2) {
169 for (j = 0; j < 8; j++) {
170 if (!((0x80 >> j) & ssd_area->path_mask &
171 ssd_area->fla_valid_mask))
172 continue;
173 sch->ssd_info.chpid[j] = ssd_area->chpid[j];
174 sch->ssd_info.fla[j] = ssd_area->fla[j];
175 }
176 }
177 return 0;
178}
179
180int
181css_get_ssd_info(struct subchannel *sch)
182{
183 int ret;
184 void *page;
185
186 page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
187 if (!page)
188 return -ENOMEM;
189 spin_lock_irq(&sch->lock);
190 ret = chsc_get_sch_desc_irq(sch, page);
191 if (ret) {
192 static int cio_chsc_err_msg;
193
194 if (!cio_chsc_err_msg) {
195 printk(KERN_ERR
196 "chsc_get_sch_descriptions:"
197 " Error %d while doing chsc; "
198 "processing some machine checks may "
199 "not work\n", ret);
200 cio_chsc_err_msg = 1;
201 }
202 }
203 spin_unlock_irq(&sch->lock);
204 free_page((unsigned long)page);
205 if (!ret) {
206 int j, chpid;
207 /* Allocate channel path structures, if needed. */
208 for (j = 0; j < 8; j++) {
209 chpid = sch->ssd_info.chpid[j];
210 if (chpid && (get_chp_status(chpid) < 0))
211 new_channel_path(chpid);
212 }
213 }
214 return ret;
215}
216
217static int
218s390_subchannel_remove_chpid(struct device *dev, void *data)
219{
220 int j;
221 int mask;
222 struct subchannel *sch;
Cornelia Hucka28c6942006-01-06 00:19:23 -0800223 struct channel_path *chpid;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700224 struct schib schib;
225
226 sch = to_subchannel(dev);
227 chpid = data;
228 for (j = 0; j < 8; j++)
Cornelia Hucka28c6942006-01-06 00:19:23 -0800229 if (sch->schib.pmcw.chpid[j] == chpid->id)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700230 break;
231 if (j >= 8)
232 return 0;
233
234 mask = 0x80 >> j;
Cornelia Huckc48d8652006-02-11 17:55:57 -0800235 spin_lock_irq(&sch->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700236
Cornelia Hucka8237fc2006-01-06 00:19:21 -0800237 stsch(sch->schid, &schib);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700238 if (!schib.pmcw.dnv)
239 goto out_unreg;
240 memcpy(&sch->schib, &schib, sizeof(struct schib));
241 /* Check for single path devices. */
242 if (sch->schib.pmcw.pim == 0x80)
243 goto out_unreg;
244 if (sch->vpm == mask)
245 goto out_unreg;
246
247 if ((sch->schib.scsw.actl & (SCSW_ACTL_CLEAR_PEND |
248 SCSW_ACTL_HALT_PEND |
249 SCSW_ACTL_START_PEND |
250 SCSW_ACTL_RESUME_PEND)) &&
251 (sch->schib.pmcw.lpum == mask)) {
252 int cc = cio_cancel(sch);
253
254 if (cc == -ENODEV)
255 goto out_unreg;
256
257 if (cc == -EINVAL) {
258 cc = cio_clear(sch);
259 if (cc == -ENODEV)
260 goto out_unreg;
261 /* Call handler. */
262 if (sch->driver && sch->driver->termination)
263 sch->driver->termination(&sch->dev);
264 goto out_unlock;
265 }
266 } else if ((sch->schib.scsw.actl & SCSW_ACTL_DEVACT) &&
267 (sch->schib.scsw.actl & SCSW_ACTL_SCHACT) &&
268 (sch->schib.pmcw.lpum == mask)) {
269 int cc;
270
271 cc = cio_clear(sch);
272 if (cc == -ENODEV)
273 goto out_unreg;
274 /* Call handler. */
275 if (sch->driver && sch->driver->termination)
276 sch->driver->termination(&sch->dev);
277 goto out_unlock;
278 }
279
280 /* trigger path verification. */
281 if (sch->driver && sch->driver->verify)
282 sch->driver->verify(&sch->dev);
283out_unlock:
Cornelia Huckc48d8652006-02-11 17:55:57 -0800284 spin_unlock_irq(&sch->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700285 return 0;
286out_unreg:
Cornelia Huckc48d8652006-02-11 17:55:57 -0800287 spin_unlock_irq(&sch->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700288 sch->lpm = 0;
Cornelia Hucka8237fc2006-01-06 00:19:21 -0800289 if (css_enqueue_subchannel_slow(sch->schid)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700290 css_clear_subchannel_slow_list();
291 need_rescan = 1;
292 }
293 return 0;
294}
295
296static inline void
297s390_set_chpid_offline( __u8 chpid)
298{
299 char dbf_txt[15];
Cornelia Hucka28c6942006-01-06 00:19:23 -0800300 struct device *dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700301
302 sprintf(dbf_txt, "chpr%x", chpid);
303 CIO_TRACE_EVENT(2, dbf_txt);
304
305 if (get_chp_status(chpid) <= 0)
306 return;
Cornelia Hucka28c6942006-01-06 00:19:23 -0800307 dev = get_device(&css[0]->chps[chpid]->dev);
308 bus_for_each_dev(&css_bus_type, NULL, to_channelpath(dev),
Linus Torvalds1da177e2005-04-16 15:20:36 -0700309 s390_subchannel_remove_chpid);
310
311 if (need_rescan || css_slow_subchannels_exist())
312 queue_work(slow_path_wq, &slow_path_work);
Cornelia Hucka28c6942006-01-06 00:19:23 -0800313 put_device(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700314}
315
Cornelia Huckf97a56f2006-01-06 00:19:22 -0800316struct res_acc_data {
317 struct channel_path *chp;
318 u32 fla_mask;
319 u16 fla;
320};
321
Linus Torvalds1da177e2005-04-16 15:20:36 -0700322static int
Cornelia Huckf97a56f2006-01-06 00:19:22 -0800323s390_process_res_acc_sch(struct res_acc_data *res_data, struct subchannel *sch)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700324{
325 int found;
326 int chp;
327 int ccode;
328
329 found = 0;
330 for (chp = 0; chp <= 7; chp++)
331 /*
332 * check if chpid is in information updated by ssd
333 */
334 if (sch->ssd_info.valid &&
Cornelia Huckf97a56f2006-01-06 00:19:22 -0800335 sch->ssd_info.chpid[chp] == res_data->chp->id &&
336 (sch->ssd_info.fla[chp] & res_data->fla_mask)
337 == res_data->fla) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700338 found = 1;
339 break;
340 }
341
342 if (found == 0)
343 return 0;
344
345 /*
346 * Do a stsch to update our subchannel structure with the
347 * new path information and eventually check for logically
348 * offline chpids.
349 */
Cornelia Hucka8237fc2006-01-06 00:19:21 -0800350 ccode = stsch(sch->schid, &sch->schib);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700351 if (ccode > 0)
352 return 0;
353
354 return 0x80 >> chp;
355}
356
Cornelia Huckf97a56f2006-01-06 00:19:22 -0800357static inline int
358s390_process_res_acc_new_sch(struct subchannel_id schid)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700359{
Cornelia Huckf97a56f2006-01-06 00:19:22 -0800360 struct schib schib;
361 int ret;
362 /*
363 * We don't know the device yet, but since a path
364 * may be available now to the device we'll have
365 * to do recognition again.
366 * Since we don't have any idea about which chpid
367 * that beast may be on we'll have to do a stsch
368 * on all devices, grr...
369 */
Cornelia Huckfb6958a2006-01-06 00:19:25 -0800370 if (stsch_err(schid, &schib))
Cornelia Huckf97a56f2006-01-06 00:19:22 -0800371 /* We're through */
372 return need_rescan ? -EAGAIN : -ENXIO;
373
374 /* Put it on the slow path. */
375 ret = css_enqueue_subchannel_slow(schid);
376 if (ret) {
377 css_clear_subchannel_slow_list();
378 need_rescan = 1;
379 return -EAGAIN;
380 }
381 return 0;
382}
383
384static int
385__s390_process_res_acc(struct subchannel_id schid, void *data)
386{
387 int chp_mask, old_lpm;
388 struct res_acc_data *res_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700389 struct subchannel *sch;
Cornelia Huckf97a56f2006-01-06 00:19:22 -0800390
391 res_data = (struct res_acc_data *)data;
392 sch = get_subchannel_by_schid(schid);
393 if (!sch)
394 /* Check if a subchannel is newly available. */
395 return s390_process_res_acc_new_sch(schid);
396
397 spin_lock_irq(&sch->lock);
398
399 chp_mask = s390_process_res_acc_sch(res_data, sch);
400
401 if (chp_mask == 0) {
402 spin_unlock_irq(&sch->lock);
403 return 0;
404 }
405 old_lpm = sch->lpm;
406 sch->lpm = ((sch->schib.pmcw.pim &
407 sch->schib.pmcw.pam &
408 sch->schib.pmcw.pom)
409 | chp_mask) & sch->opm;
410 if (!old_lpm && sch->lpm)
411 device_trigger_reprobe(sch);
412 else if (sch->driver && sch->driver->verify)
413 sch->driver->verify(&sch->dev);
414
415 spin_unlock_irq(&sch->lock);
416 put_device(&sch->dev);
417 return (res_data->fla_mask == 0xffff) ? -ENODEV : 0;
418}
419
420
421static int
422s390_process_res_acc (struct res_acc_data *res_data)
423{
Cornelia Hucka8237fc2006-01-06 00:19:21 -0800424 int rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700425 char dbf_txt[15];
426
Cornelia Huckf97a56f2006-01-06 00:19:22 -0800427 sprintf(dbf_txt, "accpr%x", res_data->chp->id);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700428 CIO_TRACE_EVENT( 2, dbf_txt);
Cornelia Huckf97a56f2006-01-06 00:19:22 -0800429 if (res_data->fla != 0) {
430 sprintf(dbf_txt, "fla%x", res_data->fla);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700431 CIO_TRACE_EVENT( 2, dbf_txt);
432 }
433
434 /*
435 * I/O resources may have become accessible.
436 * Scan through all subchannels that may be concerned and
437 * do a validation on those.
438 * The more information we have (info), the less scanning
439 * will we have to do.
440 */
Cornelia Huckf97a56f2006-01-06 00:19:22 -0800441 rc = for_each_subchannel(__s390_process_res_acc, res_data);
442 if (css_slow_subchannels_exist())
443 rc = -EAGAIN;
444 else if (rc != -EAGAIN)
445 rc = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700446 return rc;
447}
448
449static int
450__get_chpid_from_lir(void *data)
451{
452 struct lir {
453 u8 iq;
454 u8 ic;
455 u16 sci;
456 /* incident-node descriptor */
457 u32 indesc[28];
458 /* attached-node descriptor */
459 u32 andesc[28];
460 /* incident-specific information */
461 u32 isinfo[28];
462 } *lir;
463
464 lir = (struct lir*) data;
465 if (!(lir->iq&0x80))
466 /* NULL link incident record */
467 return -EINVAL;
468 if (!(lir->indesc[0]&0xc0000000))
469 /* node descriptor not valid */
470 return -EINVAL;
471 if (!(lir->indesc[0]&0x10000000))
472 /* don't handle device-type nodes - FIXME */
473 return -EINVAL;
474 /* Byte 3 contains the chpid. Could also be CTCA, but we don't care */
475
476 return (u16) (lir->indesc[0]&0x000000ff);
477}
478
479int
480chsc_process_crw(void)
481{
482 int chpid, ret;
Cornelia Huckf97a56f2006-01-06 00:19:22 -0800483 struct res_acc_data res_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700484 struct {
485 struct chsc_header request;
486 u32 reserved1;
487 u32 reserved2;
488 u32 reserved3;
489 struct chsc_header response;
490 u32 reserved4;
491 u8 flags;
492 u8 vf; /* validity flags */
493 u8 rs; /* reporting source */
494 u8 cc; /* content code */
495 u16 fla; /* full link address */
496 u16 rsid; /* reporting source id */
497 u32 reserved5;
498 u32 reserved6;
499 u32 ccdf[96]; /* content-code dependent field */
500 /* ccdf has to be big enough for a link-incident record */
501 } *sei_area;
502
503 if (!sei_page)
504 return 0;
505 /*
506 * build the chsc request block for store event information
507 * and do the call
508 * This function is only called by the machine check handler thread,
509 * so we don't need locking for the sei_page.
510 */
511 sei_area = sei_page;
512
513 CIO_TRACE_EVENT( 2, "prcss");
514 ret = 0;
515 do {
516 int ccode, status;
Cornelia Hucka28c6942006-01-06 00:19:23 -0800517 struct device *dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700518 memset(sei_area, 0, sizeof(*sei_area));
Cornelia Huckf97a56f2006-01-06 00:19:22 -0800519 memset(&res_data, 0, sizeof(struct res_acc_data));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700520 sei_area->request = (struct chsc_header) {
521 .length = 0x0010,
522 .code = 0x000e,
523 };
524
525 ccode = chsc(sei_area);
526 if (ccode > 0)
527 return 0;
528
529 switch (sei_area->response.code) {
530 /* for debug purposes, check for problems */
531 case 0x0001:
532 CIO_CRW_EVENT(4, "chsc_process_crw: event information "
533 "successfully stored\n");
534 break; /* everything ok */
535 case 0x0002:
536 CIO_CRW_EVENT(2,
537 "chsc_process_crw: invalid command!\n");
538 return 0;
539 case 0x0003:
540 CIO_CRW_EVENT(2, "chsc_process_crw: error in chsc "
541 "request block!\n");
542 return 0;
543 case 0x0005:
544 CIO_CRW_EVENT(2, "chsc_process_crw: no event "
545 "information stored\n");
546 return 0;
547 default:
548 CIO_CRW_EVENT(2, "chsc_process_crw: chsc response %d\n",
549 sei_area->response.code);
550 return 0;
551 }
552
553 /* Check if we might have lost some information. */
554 if (sei_area->flags & 0x40)
555 CIO_CRW_EVENT(2, "chsc_process_crw: Event information "
556 "has been lost due to overflow!\n");
557
558 if (sei_area->rs != 4) {
559 CIO_CRW_EVENT(2, "chsc_process_crw: reporting source "
560 "(%04X) isn't a chpid!\n",
561 sei_area->rsid);
562 continue;
563 }
564
565 /* which kind of information was stored? */
566 switch (sei_area->cc) {
567 case 1: /* link incident*/
568 CIO_CRW_EVENT(4, "chsc_process_crw: "
569 "channel subsystem reports link incident,"
570 " reporting source is chpid %x\n",
571 sei_area->rsid);
572 chpid = __get_chpid_from_lir(sei_area->ccdf);
573 if (chpid < 0)
574 CIO_CRW_EVENT(4, "%s: Invalid LIR, skipping\n",
575 __FUNCTION__);
576 else
577 s390_set_chpid_offline(chpid);
578 break;
579
580 case 2: /* i/o resource accessibiliy */
581 CIO_CRW_EVENT(4, "chsc_process_crw: "
582 "channel subsystem reports some I/O "
583 "devices may have become accessible\n");
584 pr_debug("Data received after sei: \n");
585 pr_debug("Validity flags: %x\n", sei_area->vf);
586
587 /* allocate a new channel path structure, if needed */
588 status = get_chp_status(sei_area->rsid);
589 if (status < 0)
590 new_channel_path(sei_area->rsid);
591 else if (!status)
Cornelia Huckf97a56f2006-01-06 00:19:22 -0800592 break;
Cornelia Hucka28c6942006-01-06 00:19:23 -0800593 dev = get_device(&css[0]->chps[sei_area->rsid]->dev);
594 res_data.chp = to_channelpath(dev);
Cornelia Huckf97a56f2006-01-06 00:19:22 -0800595 pr_debug("chpid: %x", sei_area->rsid);
596 if ((sei_area->vf & 0xc0) != 0) {
597 res_data.fla = sei_area->fla;
598 if ((sei_area->vf & 0xc0) == 0xc0) {
599 pr_debug(" full link addr: %x",
600 sei_area->fla);
601 res_data.fla_mask = 0xffff;
602 } else {
603 pr_debug(" link addr: %x",
604 sei_area->fla);
605 res_data.fla_mask = 0xff00;
606 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700607 }
Cornelia Huckf97a56f2006-01-06 00:19:22 -0800608 ret = s390_process_res_acc(&res_data);
609 pr_debug("\n\n");
Cornelia Hucka28c6942006-01-06 00:19:23 -0800610 put_device(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700611 break;
612
613 default: /* other stuff */
614 CIO_CRW_EVENT(4, "chsc_process_crw: event %d\n",
615 sei_area->cc);
616 break;
617 }
618 } while (sei_area->flags & 0x80);
619 return ret;
620}
621
Cornelia Huckf97a56f2006-01-06 00:19:22 -0800622static inline int
623__chp_add_new_sch(struct subchannel_id schid)
624{
625 struct schib schib;
626 int ret;
627
628 if (stsch(schid, &schib))
629 /* We're through */
630 return need_rescan ? -EAGAIN : -ENXIO;
631
632 /* Put it on the slow path. */
633 ret = css_enqueue_subchannel_slow(schid);
634 if (ret) {
635 css_clear_subchannel_slow_list();
636 need_rescan = 1;
637 return -EAGAIN;
638 }
639 return 0;
640}
641
642
643static int
644__chp_add(struct subchannel_id schid, void *data)
645{
646 int i;
647 struct channel_path *chp;
648 struct subchannel *sch;
649
650 chp = (struct channel_path *)data;
651 sch = get_subchannel_by_schid(schid);
652 if (!sch)
653 /* Check if the subchannel is now available. */
654 return __chp_add_new_sch(schid);
Cornelia Huckc48d8652006-02-11 17:55:57 -0800655 spin_lock_irq(&sch->lock);
Cornelia Huckf97a56f2006-01-06 00:19:22 -0800656 for (i=0; i<8; i++)
657 if (sch->schib.pmcw.chpid[i] == chp->id) {
658 if (stsch(sch->schid, &sch->schib) != 0) {
659 /* Endgame. */
660 spin_unlock(&sch->lock);
661 return -ENXIO;
662 }
663 break;
664 }
665 if (i==8) {
666 spin_unlock(&sch->lock);
667 return 0;
668 }
669 sch->lpm = ((sch->schib.pmcw.pim &
670 sch->schib.pmcw.pam &
671 sch->schib.pmcw.pom)
672 | 0x80 >> i) & sch->opm;
673
674 if (sch->driver && sch->driver->verify)
675 sch->driver->verify(&sch->dev);
676
Cornelia Huckc48d8652006-02-11 17:55:57 -0800677 spin_unlock_irq(&sch->lock);
Cornelia Huckf97a56f2006-01-06 00:19:22 -0800678 put_device(&sch->dev);
679 return 0;
680}
681
Linus Torvalds1da177e2005-04-16 15:20:36 -0700682static int
683chp_add(int chpid)
684{
Cornelia Huckf97a56f2006-01-06 00:19:22 -0800685 int rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700686 char dbf_txt[15];
Cornelia Hucka28c6942006-01-06 00:19:23 -0800687 struct device *dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700688
689 if (!get_chp_status(chpid))
690 return 0; /* no need to do the rest */
691
692 sprintf(dbf_txt, "cadd%x", chpid);
693 CIO_TRACE_EVENT(2, dbf_txt);
694
Cornelia Hucka28c6942006-01-06 00:19:23 -0800695 dev = get_device(&css[0]->chps[chpid]->dev);
696 rc = for_each_subchannel(__chp_add, to_channelpath(dev));
Cornelia Huckf97a56f2006-01-06 00:19:22 -0800697 if (css_slow_subchannels_exist())
698 rc = -EAGAIN;
699 if (rc != -EAGAIN)
700 rc = 0;
Cornelia Hucka28c6942006-01-06 00:19:23 -0800701 put_device(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700702 return rc;
703}
704
705/*
706 * Handling of crw machine checks with channel path source.
707 */
708int
709chp_process_crw(int chpid, int on)
710{
711 if (on == 0) {
712 /* Path has gone. We use the link incident routine.*/
713 s390_set_chpid_offline(chpid);
714 return 0; /* De-register is async anyway. */
715 }
716 /*
717 * Path has come. Allocate a new channel path structure,
718 * if needed.
719 */
720 if (get_chp_status(chpid) < 0)
721 new_channel_path(chpid);
722 /* Avoid the extra overhead in process_rec_acc. */
723 return chp_add(chpid);
724}
725
726static inline int
727__check_for_io_and_kill(struct subchannel *sch, int index)
728{
729 int cc;
730
731 if (!device_is_online(sch))
732 /* cio could be doing I/O. */
733 return 0;
Cornelia Hucka8237fc2006-01-06 00:19:21 -0800734 cc = stsch(sch->schid, &sch->schib);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700735 if (cc)
736 return 0;
737 if (sch->schib.scsw.actl && sch->schib.pmcw.lpum == (0x80 >> index)) {
738 device_set_waiting(sch);
739 return 1;
740 }
741 return 0;
742}
743
744static inline void
745__s390_subchannel_vary_chpid(struct subchannel *sch, __u8 chpid, int on)
746{
747 int chp, old_lpm;
748 unsigned long flags;
749
750 if (!sch->ssd_info.valid)
751 return;
752
753 spin_lock_irqsave(&sch->lock, flags);
754 old_lpm = sch->lpm;
755 for (chp = 0; chp < 8; chp++) {
756 if (sch->ssd_info.chpid[chp] != chpid)
757 continue;
758
759 if (on) {
760 sch->opm |= (0x80 >> chp);
761 sch->lpm |= (0x80 >> chp);
762 if (!old_lpm)
763 device_trigger_reprobe(sch);
764 else if (sch->driver && sch->driver->verify)
765 sch->driver->verify(&sch->dev);
766 } else {
767 sch->opm &= ~(0x80 >> chp);
768 sch->lpm &= ~(0x80 >> chp);
769 /*
770 * Give running I/O a grace period in which it
771 * can successfully terminate, even using the
772 * just varied off path. Then kill it.
773 */
774 if (!__check_for_io_and_kill(sch, chp) && !sch->lpm) {
Cornelia Hucka8237fc2006-01-06 00:19:21 -0800775 if (css_enqueue_subchannel_slow(sch->schid)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700776 css_clear_subchannel_slow_list();
777 need_rescan = 1;
778 }
779 } else if (sch->driver && sch->driver->verify)
780 sch->driver->verify(&sch->dev);
781 }
782 break;
783 }
784 spin_unlock_irqrestore(&sch->lock, flags);
785}
786
787static int
788s390_subchannel_vary_chpid_off(struct device *dev, void *data)
789{
790 struct subchannel *sch;
791 __u8 *chpid;
792
793 sch = to_subchannel(dev);
794 chpid = data;
795
796 __s390_subchannel_vary_chpid(sch, *chpid, 0);
797 return 0;
798}
799
800static int
801s390_subchannel_vary_chpid_on(struct device *dev, void *data)
802{
803 struct subchannel *sch;
804 __u8 *chpid;
805
806 sch = to_subchannel(dev);
807 chpid = data;
808
809 __s390_subchannel_vary_chpid(sch, *chpid, 1);
810 return 0;
811}
812
Cornelia Huckf97a56f2006-01-06 00:19:22 -0800813static int
814__s390_vary_chpid_on(struct subchannel_id schid, void *data)
815{
816 struct schib schib;
817 struct subchannel *sch;
818
819 sch = get_subchannel_by_schid(schid);
820 if (sch) {
821 put_device(&sch->dev);
822 return 0;
823 }
Cornelia Huckfb6958a2006-01-06 00:19:25 -0800824 if (stsch_err(schid, &schib))
Cornelia Huckf97a56f2006-01-06 00:19:22 -0800825 /* We're through */
826 return -ENXIO;
827 /* Put it on the slow path. */
828 if (css_enqueue_subchannel_slow(schid)) {
829 css_clear_subchannel_slow_list();
830 need_rescan = 1;
831 return -EAGAIN;
832 }
833 return 0;
834}
835
Linus Torvalds1da177e2005-04-16 15:20:36 -0700836/*
837 * Function: s390_vary_chpid
838 * Varies the specified chpid online or offline
839 */
840static int
841s390_vary_chpid( __u8 chpid, int on)
842{
843 char dbf_text[15];
Cornelia Huckf97a56f2006-01-06 00:19:22 -0800844 int status;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700845
846 sprintf(dbf_text, on?"varyon%x":"varyoff%x", chpid);
847 CIO_TRACE_EVENT( 2, dbf_text);
848
849 status = get_chp_status(chpid);
850 if (status < 0) {
851 printk(KERN_ERR "Can't vary unknown chpid %02X\n", chpid);
852 return -EINVAL;
853 }
854
855 if (!on && !status) {
856 printk(KERN_ERR "chpid %x is already offline\n", chpid);
857 return -EINVAL;
858 }
859
860 set_chp_logically_online(chpid, on);
861
862 /*
863 * Redo PathVerification on the devices the chpid connects to
864 */
865
866 bus_for_each_dev(&css_bus_type, NULL, &chpid, on ?
867 s390_subchannel_vary_chpid_on :
868 s390_subchannel_vary_chpid_off);
Cornelia Huckf97a56f2006-01-06 00:19:22 -0800869 if (on)
870 /* Scan for new devices on varied on path. */
871 for_each_subchannel(__s390_vary_chpid_on, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700872 if (need_rescan || css_slow_subchannels_exist())
873 queue_work(slow_path_wq, &slow_path_work);
874 return 0;
875}
876
877/*
878 * Files for the channel path entries.
879 */
880static ssize_t
Yani Ioannou3fd3c0a2005-05-17 06:43:27 -0400881chp_status_show(struct device *dev, struct device_attribute *attr, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700882{
883 struct channel_path *chp = container_of(dev, struct channel_path, dev);
884
885 if (!chp)
886 return 0;
887 return (get_chp_status(chp->id) ? sprintf(buf, "online\n") :
888 sprintf(buf, "offline\n"));
889}
890
891static ssize_t
Yani Ioannou3fd3c0a2005-05-17 06:43:27 -0400892chp_status_write(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700893{
894 struct channel_path *cp = container_of(dev, struct channel_path, dev);
895 char cmd[10];
896 int num_args;
897 int error;
898
899 num_args = sscanf(buf, "%5s", cmd);
900 if (!num_args)
901 return count;
902
903 if (!strnicmp(cmd, "on", 2))
904 error = s390_vary_chpid(cp->id, 1);
905 else if (!strnicmp(cmd, "off", 3))
906 error = s390_vary_chpid(cp->id, 0);
907 else
908 error = -EINVAL;
909
910 return error < 0 ? error : count;
911
912}
913
914static DEVICE_ATTR(status, 0644, chp_status_show, chp_status_write);
915
916static ssize_t
Yani Ioannou3fd3c0a2005-05-17 06:43:27 -0400917chp_type_show(struct device *dev, struct device_attribute *attr, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700918{
919 struct channel_path *chp = container_of(dev, struct channel_path, dev);
920
921 if (!chp)
922 return 0;
923 return sprintf(buf, "%x\n", chp->desc.desc);
924}
925
926static DEVICE_ATTR(type, 0444, chp_type_show, NULL);
927
928static struct attribute * chp_attrs[] = {
929 &dev_attr_status.attr,
930 &dev_attr_type.attr,
931 NULL,
932};
933
934static struct attribute_group chp_attr_group = {
935 .attrs = chp_attrs,
936};
937
938static void
939chp_release(struct device *dev)
940{
941 struct channel_path *cp;
942
943 cp = container_of(dev, struct channel_path, dev);
944 kfree(cp);
945}
946
947static int
948chsc_determine_channel_path_description(int chpid,
949 struct channel_path_desc *desc)
950{
951 int ccode, ret;
952
953 struct {
954 struct chsc_header request;
955 u32 : 24;
956 u32 first_chpid : 8;
957 u32 : 24;
958 u32 last_chpid : 8;
959 u32 zeroes1;
960 struct chsc_header response;
961 u32 zeroes2;
962 struct channel_path_desc desc;
963 } *scpd_area;
964
965 scpd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
966 if (!scpd_area)
967 return -ENOMEM;
968
969 scpd_area->request = (struct chsc_header) {
970 .length = 0x0010,
971 .code = 0x0002,
972 };
973
974 scpd_area->first_chpid = chpid;
975 scpd_area->last_chpid = chpid;
976
977 ccode = chsc(scpd_area);
978 if (ccode > 0) {
979 ret = (ccode == 3) ? -ENODEV : -EBUSY;
980 goto out;
981 }
982
983 switch (scpd_area->response.code) {
984 case 0x0001: /* Success. */
985 memcpy(desc, &scpd_area->desc,
986 sizeof(struct channel_path_desc));
987 ret = 0;
988 break;
989 case 0x0003: /* Invalid block. */
990 case 0x0007: /* Invalid format. */
991 case 0x0008: /* Other invalid block. */
992 CIO_CRW_EVENT(2, "Error in chsc request block!\n");
993 ret = -EINVAL;
994 break;
995 case 0x0004: /* Command not provided in model. */
996 CIO_CRW_EVENT(2, "Model does not provide scpd\n");
997 ret = -EOPNOTSUPP;
998 break;
999 default:
1000 CIO_CRW_EVENT(2, "Unknown CHSC response %d\n",
1001 scpd_area->response.code);
1002 ret = -EIO;
1003 }
1004out:
1005 free_page((unsigned long)scpd_area);
1006 return ret;
1007}
1008
1009/*
1010 * Entries for chpids on the system bus.
1011 * This replaces /proc/chpids.
1012 */
1013static int
1014new_channel_path(int chpid)
1015{
1016 struct channel_path *chp;
1017 int ret;
1018
1019 chp = kmalloc(sizeof(struct channel_path), GFP_KERNEL);
1020 if (!chp)
1021 return -ENOMEM;
1022 memset(chp, 0, sizeof(struct channel_path));
1023
1024 /* fill in status, etc. */
1025 chp->id = chpid;
1026 chp->state = 1;
1027 chp->dev = (struct device) {
Cornelia Hucka28c6942006-01-06 00:19:23 -08001028 .parent = &css[0]->device,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001029 .release = chp_release,
1030 };
1031 snprintf(chp->dev.bus_id, BUS_ID_SIZE, "chp0.%x", chpid);
1032
1033 /* Obtain channel path description and fill it in. */
1034 ret = chsc_determine_channel_path_description(chpid, &chp->desc);
1035 if (ret)
1036 goto out_free;
1037
1038 /* make it known to the system */
1039 ret = device_register(&chp->dev);
1040 if (ret) {
1041 printk(KERN_WARNING "%s: could not register %02x\n",
1042 __func__, chpid);
1043 goto out_free;
1044 }
1045 ret = sysfs_create_group(&chp->dev.kobj, &chp_attr_group);
1046 if (ret) {
1047 device_unregister(&chp->dev);
1048 goto out_free;
1049 } else
Cornelia Hucka28c6942006-01-06 00:19:23 -08001050 css[0]->chps[chpid] = chp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001051 return ret;
1052out_free:
1053 kfree(chp);
1054 return ret;
1055}
1056
1057void *
1058chsc_get_chp_desc(struct subchannel *sch, int chp_no)
1059{
1060 struct channel_path *chp;
1061 struct channel_path_desc *desc;
1062
Cornelia Hucka28c6942006-01-06 00:19:23 -08001063 chp = css[0]->chps[sch->schib.pmcw.chpid[chp_no]];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001064 if (!chp)
1065 return NULL;
1066 desc = kmalloc(sizeof(struct channel_path_desc), GFP_KERNEL);
1067 if (!desc)
1068 return NULL;
1069 memcpy(desc, &chp->desc, sizeof(struct channel_path_desc));
1070 return desc;
1071}
1072
1073
1074static int __init
1075chsc_alloc_sei_area(void)
1076{
1077 sei_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
1078 if (!sei_page)
1079 printk(KERN_WARNING"Can't allocate page for processing of " \
1080 "chsc machine checks!\n");
1081 return (sei_page ? 0 : -ENOMEM);
1082}
1083
Cornelia Huckfb6958a2006-01-06 00:19:25 -08001084int __init
1085chsc_enable_facility(int operation_code)
1086{
1087 int ret;
1088 struct {
1089 struct chsc_header request;
1090 u8 reserved1:4;
1091 u8 format:4;
1092 u8 reserved2;
1093 u16 operation_code;
1094 u32 reserved3;
1095 u32 reserved4;
1096 u32 operation_data_area[252];
1097 struct chsc_header response;
1098 u32 reserved5:4;
1099 u32 format2:4;
1100 u32 reserved6:24;
1101 } *sda_area;
1102
1103 sda_area = (void *)get_zeroed_page(GFP_KERNEL|GFP_DMA);
1104 if (!sda_area)
1105 return -ENOMEM;
1106 sda_area->request = (struct chsc_header) {
1107 .length = 0x0400,
1108 .code = 0x0031,
1109 };
1110 sda_area->operation_code = operation_code;
1111
1112 ret = chsc(sda_area);
1113 if (ret > 0) {
1114 ret = (ret == 3) ? -ENODEV : -EBUSY;
1115 goto out;
1116 }
1117 switch (sda_area->response.code) {
1118 case 0x0003: /* invalid request block */
1119 case 0x0007:
1120 ret = -EINVAL;
1121 break;
1122 case 0x0004: /* command not provided */
1123 case 0x0101: /* facility not provided */
1124 ret = -EOPNOTSUPP;
1125 break;
1126 }
1127 out:
1128 free_page((unsigned long)sda_area);
1129 return ret;
1130}
1131
Linus Torvalds1da177e2005-04-16 15:20:36 -07001132subsys_initcall(chsc_alloc_sei_area);
1133
1134struct css_general_char css_general_characteristics;
1135struct css_chsc_char css_chsc_characteristics;
1136
1137int __init
1138chsc_determine_css_characteristics(void)
1139{
1140 int result;
1141 struct {
1142 struct chsc_header request;
1143 u32 reserved1;
1144 u32 reserved2;
1145 u32 reserved3;
1146 struct chsc_header response;
1147 u32 reserved4;
1148 u32 general_char[510];
1149 u32 chsc_char[518];
1150 } *scsc_area;
1151
1152 scsc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
1153 if (!scsc_area) {
1154 printk(KERN_WARNING"cio: Was not able to determine available" \
1155 "CHSCs due to no memory.\n");
1156 return -ENOMEM;
1157 }
1158
1159 scsc_area->request = (struct chsc_header) {
1160 .length = 0x0010,
1161 .code = 0x0010,
1162 };
1163
1164 result = chsc(scsc_area);
1165 if (result) {
1166 printk(KERN_WARNING"cio: Was not able to determine " \
1167 "available CHSCs, cc=%i.\n", result);
1168 result = -EIO;
1169 goto exit;
1170 }
1171
1172 if (scsc_area->response.code != 1) {
1173 printk(KERN_WARNING"cio: Was not able to determine " \
1174 "available CHSCs.\n");
1175 result = -EIO;
1176 goto exit;
1177 }
1178 memcpy(&css_general_characteristics, scsc_area->general_char,
1179 sizeof(css_general_characteristics));
1180 memcpy(&css_chsc_characteristics, scsc_area->chsc_char,
1181 sizeof(css_chsc_characteristics));
1182exit:
1183 free_page ((unsigned long) scsc_area);
1184 return result;
1185}
1186
1187EXPORT_SYMBOL_GPL(css_general_characteristics);
1188EXPORT_SYMBOL_GPL(css_chsc_characteristics);