blob: 3dec460bba2740ee6f42ddf771a122f39e45fd82 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * drivers/s390/cio/chsc.c
3 * S/390 common I/O routines -- channel subsystem call
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 *
5 * Copyright (C) 1999-2002 IBM Deutschland Entwicklung GmbH,
6 * IBM Corporation
7 * Author(s): Ingo Adlung (adlung@de.ibm.com)
Cornelia Huck4ce3b302006-01-14 13:21:04 -08008 * Cornelia Huck (cornelia.huck@de.ibm.com)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 * Arnd Bergmann (arndb@de.ibm.com)
10 */
11
12#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include <linux/slab.h>
14#include <linux/init.h>
15#include <linux/device.h>
16
17#include <asm/cio.h>
Peter Oberparleitere5854a52007-04-27 16:01:31 +020018#include <asm/chpid.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070019
20#include "css.h"
21#include "cio.h"
22#include "cio_debug.h"
23#include "ioasm.h"
Peter Oberparleitere6b6e102007-04-27 16:01:28 +020024#include "chp.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070025#include "chsc.h"
26
Linus Torvalds1da177e2005-04-16 15:20:36 -070027static void *sei_page;
28
Linus Torvalds1da177e2005-04-16 15:20:36 -070029/* FIXME: this is _always_ called for every subchannel. shouldn't we
30 * process more than one at a time? */
31static int
32chsc_get_sch_desc_irq(struct subchannel *sch, void *page)
33{
34 int ccode, j;
35
36 struct {
37 struct chsc_header request;
Cornelia Huckfb6958a2006-01-06 00:19:25 -080038 u16 reserved1a:10;
39 u16 ssid:2;
40 u16 reserved1b:4;
Linus Torvalds1da177e2005-04-16 15:20:36 -070041 u16 f_sch; /* first subchannel */
42 u16 reserved2;
43 u16 l_sch; /* last subchannel */
44 u32 reserved3;
45 struct chsc_header response;
46 u32 reserved4;
47 u8 sch_valid : 1;
48 u8 dev_valid : 1;
49 u8 st : 3; /* subchannel type */
50 u8 zeroes : 3;
51 u8 unit_addr; /* unit address */
52 u16 devno; /* device number */
53 u8 path_mask;
54 u8 fla_valid_mask;
55 u16 sch; /* subchannel */
56 u8 chpid[8]; /* chpids 0-7 */
57 u16 fla[8]; /* full link addresses 0-7 */
Peter Oberparleiter0f008aa2007-02-05 21:17:40 +010058 } __attribute__ ((packed)) *ssd_area;
Linus Torvalds1da177e2005-04-16 15:20:36 -070059
60 ssd_area = page;
61
Cornelia Huck495a5b42006-03-24 03:15:14 -080062 ssd_area->request.length = 0x0010;
63 ssd_area->request.code = 0x0004;
Linus Torvalds1da177e2005-04-16 15:20:36 -070064
Cornelia Huckfb6958a2006-01-06 00:19:25 -080065 ssd_area->ssid = sch->schid.ssid;
Cornelia Hucka8237fc2006-01-06 00:19:21 -080066 ssd_area->f_sch = sch->schid.sch_no;
67 ssd_area->l_sch = sch->schid.sch_no;
Linus Torvalds1da177e2005-04-16 15:20:36 -070068
69 ccode = chsc(ssd_area);
70 if (ccode > 0) {
71 pr_debug("chsc returned with ccode = %d\n", ccode);
72 return (ccode == 3) ? -ENODEV : -EBUSY;
73 }
74
75 switch (ssd_area->response.code) {
76 case 0x0001: /* everything ok */
77 break;
78 case 0x0002:
79 CIO_CRW_EVENT(2, "Invalid command!\n");
80 return -EINVAL;
81 case 0x0003:
82 CIO_CRW_EVENT(2, "Error in chsc request block!\n");
83 return -EINVAL;
84 case 0x0004:
85 CIO_CRW_EVENT(2, "Model does not provide ssd\n");
86 return -EOPNOTSUPP;
87 default:
88 CIO_CRW_EVENT(2, "Unknown CHSC response %d\n",
89 ssd_area->response.code);
90 return -EIO;
91 }
92
93 /*
94 * ssd_area->st stores the type of the detected
95 * subchannel, with the following definitions:
96 *
97 * 0: I/O subchannel: All fields have meaning
98 * 1: CHSC subchannel: Only sch_val, st and sch
99 * have meaning
100 * 2: Message subchannel: All fields except unit_addr
101 * have meaning
102 * 3: ADM subchannel: Only sch_val, st and sch
103 * have meaning
104 *
105 * Other types are currently undefined.
106 */
107 if (ssd_area->st > 3) { /* uhm, that looks strange... */
108 CIO_CRW_EVENT(0, "Strange subchannel type %d"
Cornelia Huckfb6958a2006-01-06 00:19:25 -0800109 " for sch 0.%x.%04x\n", ssd_area->st,
110 sch->schid.ssid, sch->schid.sch_no);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111 /*
112 * There may have been a new subchannel type defined in the
113 * time since this code was written; since we don't know which
114 * fields have meaning and what to do with it we just jump out
115 */
116 return 0;
117 } else {
118 const char *type[4] = {"I/O", "chsc", "message", "ADM"};
Cornelia Huckfb6958a2006-01-06 00:19:25 -0800119 CIO_CRW_EVENT(6, "ssd: sch 0.%x.%04x is %s subchannel\n",
120 sch->schid.ssid, sch->schid.sch_no,
121 type[ssd_area->st]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700122
123 sch->ssd_info.valid = 1;
124 sch->ssd_info.type = ssd_area->st;
125 }
126
127 if (ssd_area->st == 0 || ssd_area->st == 2) {
128 for (j = 0; j < 8; j++) {
129 if (!((0x80 >> j) & ssd_area->path_mask &
130 ssd_area->fla_valid_mask))
131 continue;
132 sch->ssd_info.chpid[j] = ssd_area->chpid[j];
133 sch->ssd_info.fla[j] = ssd_area->fla[j];
134 }
135 }
136 return 0;
137}
138
139int
140css_get_ssd_info(struct subchannel *sch)
141{
142 int ret;
143 void *page;
144
145 page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
146 if (!page)
147 return -ENOMEM;
Cornelia Huck2ec22982006-12-08 15:54:26 +0100148 spin_lock_irq(sch->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700149 ret = chsc_get_sch_desc_irq(sch, page);
150 if (ret) {
151 static int cio_chsc_err_msg;
152
153 if (!cio_chsc_err_msg) {
154 printk(KERN_ERR
155 "chsc_get_sch_descriptions:"
156 " Error %d while doing chsc; "
157 "processing some machine checks may "
158 "not work\n", ret);
159 cio_chsc_err_msg = 1;
160 }
161 }
Cornelia Huck2ec22982006-12-08 15:54:26 +0100162 spin_unlock_irq(sch->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700163 free_page((unsigned long)page);
164 if (!ret) {
Peter Oberparleiterf86635f2007-04-27 16:01:26 +0200165 int j, mask;
166 struct chp_id chpid;
167
168 chp_id_init(&chpid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700169 /* Allocate channel path structures, if needed. */
170 for (j = 0; j < 8; j++) {
Cornelia Huck7e8ae7b2006-10-06 16:38:29 +0200171 mask = 0x80 >> j;
Peter Oberparleiterf86635f2007-04-27 16:01:26 +0200172 chpid.id = sch->ssd_info.chpid[j];
Cornelia Huck7e8ae7b2006-10-06 16:38:29 +0200173 if ((sch->schib.pmcw.pim & mask) &&
Peter Oberparleitere6b6e102007-04-27 16:01:28 +0200174 !chp_is_registered(chpid))
175 chp_new(chpid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700176 }
177 }
178 return ret;
179}
180
181static int
182s390_subchannel_remove_chpid(struct device *dev, void *data)
183{
184 int j;
185 int mask;
186 struct subchannel *sch;
Peter Oberparleitere6b6e102007-04-27 16:01:28 +0200187 struct chp_id *chpid;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700188 struct schib schib;
189
190 sch = to_subchannel(dev);
191 chpid = data;
Cornelia Huck7e8ae7b2006-10-06 16:38:29 +0200192 for (j = 0; j < 8; j++) {
193 mask = 0x80 >> j;
194 if ((sch->schib.pmcw.pim & mask) &&
Peter Oberparleitere6b6e102007-04-27 16:01:28 +0200195 (sch->schib.pmcw.chpid[j] == chpid->id))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700196 break;
Cornelia Huck7e8ae7b2006-10-06 16:38:29 +0200197 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198 if (j >= 8)
199 return 0;
200
Cornelia Huck2ec22982006-12-08 15:54:26 +0100201 spin_lock_irq(sch->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202
Cornelia Hucka8237fc2006-01-06 00:19:21 -0800203 stsch(sch->schid, &schib);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700204 if (!schib.pmcw.dnv)
205 goto out_unreg;
206 memcpy(&sch->schib, &schib, sizeof(struct schib));
207 /* Check for single path devices. */
208 if (sch->schib.pmcw.pim == 0x80)
209 goto out_unreg;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700210
Peter Oberparleiter329b7852006-04-27 18:40:02 -0700211 if ((sch->schib.scsw.actl & SCSW_ACTL_DEVACT) &&
212 (sch->schib.scsw.actl & SCSW_ACTL_SCHACT) &&
Cornelia Huckb4f7b1e2006-06-29 15:03:35 +0200213 (sch->schib.pmcw.lpum == mask)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214 int cc;
215
216 cc = cio_clear(sch);
217 if (cc == -ENODEV)
218 goto out_unreg;
Cornelia Huckd23861f2006-12-04 15:41:04 +0100219 /* Request retry of internal operation. */
220 device_set_intretry(sch);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700221 /* Call handler. */
222 if (sch->driver && sch->driver->termination)
223 sch->driver->termination(&sch->dev);
224 goto out_unlock;
225 }
226
227 /* trigger path verification. */
228 if (sch->driver && sch->driver->verify)
229 sch->driver->verify(&sch->dev);
Peter Oberparleiter28bdc6f2006-09-20 15:59:59 +0200230 else if (sch->lpm == mask)
Peter Oberparleiter3b885082006-08-30 14:33:37 +0200231 goto out_unreg;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700232out_unlock:
Cornelia Huck2ec22982006-12-08 15:54:26 +0100233 spin_unlock_irq(sch->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700234 return 0;
235out_unreg:
Cornelia Huck2ec22982006-12-08 15:54:26 +0100236 spin_unlock_irq(sch->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700237 sch->lpm = 0;
Cornelia Hucka8237fc2006-01-06 00:19:21 -0800238 if (css_enqueue_subchannel_slow(sch->schid)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700239 css_clear_subchannel_slow_list();
240 need_rescan = 1;
241 }
242 return 0;
243}
244
Peter Oberparleitere6b6e102007-04-27 16:01:28 +0200245void chsc_chp_offline(struct chp_id chpid)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700246{
247 char dbf_txt[15];
248
Peter Oberparleiterf86635f2007-04-27 16:01:26 +0200249 sprintf(dbf_txt, "chpr%x.%02x", chpid.cssid, chpid.id);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700250 CIO_TRACE_EVENT(2, dbf_txt);
251
Peter Oberparleitere6b6e102007-04-27 16:01:28 +0200252 if (chp_get_status(chpid) <= 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700253 return;
Peter Oberparleitere6b6e102007-04-27 16:01:28 +0200254 bus_for_each_dev(&css_bus_type, NULL, &chpid,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700255 s390_subchannel_remove_chpid);
256
257 if (need_rescan || css_slow_subchannels_exist())
258 queue_work(slow_path_wq, &slow_path_work);
259}
260
Cornelia Huckf97a56f2006-01-06 00:19:22 -0800261struct res_acc_data {
Peter Oberparleitere6b6e102007-04-27 16:01:28 +0200262 struct chp_id chpid;
Cornelia Huckf97a56f2006-01-06 00:19:22 -0800263 u32 fla_mask;
264 u16 fla;
265};
266
Peter Oberparleitere6b6e102007-04-27 16:01:28 +0200267static int s390_process_res_acc_sch(struct res_acc_data *res_data,
268 struct subchannel *sch)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700269{
270 int found;
271 int chp;
272 int ccode;
Peter Oberparleitere6b6e102007-04-27 16:01:28 +0200273
Linus Torvalds1da177e2005-04-16 15:20:36 -0700274 found = 0;
275 for (chp = 0; chp <= 7; chp++)
276 /*
277 * check if chpid is in information updated by ssd
278 */
279 if (sch->ssd_info.valid &&
Peter Oberparleitere6b6e102007-04-27 16:01:28 +0200280 sch->ssd_info.chpid[chp] == res_data->chpid.id &&
Cornelia Huckf97a56f2006-01-06 00:19:22 -0800281 (sch->ssd_info.fla[chp] & res_data->fla_mask)
282 == res_data->fla) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700283 found = 1;
284 break;
285 }
Peter Oberparleitere6b6e102007-04-27 16:01:28 +0200286
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287 if (found == 0)
288 return 0;
289
290 /*
291 * Do a stsch to update our subchannel structure with the
292 * new path information and eventually check for logically
293 * offline chpids.
294 */
Cornelia Hucka8237fc2006-01-06 00:19:21 -0800295 ccode = stsch(sch->schid, &sch->schib);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700296 if (ccode > 0)
297 return 0;
298
299 return 0x80 >> chp;
300}
301
Heiko Carstens4d284ca2007-02-05 21:18:53 +0100302static int
Cornelia Huckf97a56f2006-01-06 00:19:22 -0800303s390_process_res_acc_new_sch(struct subchannel_id schid)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700304{
Cornelia Huckf97a56f2006-01-06 00:19:22 -0800305 struct schib schib;
306 int ret;
307 /*
308 * We don't know the device yet, but since a path
309 * may be available now to the device we'll have
310 * to do recognition again.
311 * Since we don't have any idea about which chpid
312 * that beast may be on we'll have to do a stsch
313 * on all devices, grr...
314 */
Cornelia Huckfb6958a2006-01-06 00:19:25 -0800315 if (stsch_err(schid, &schib))
Cornelia Huckf97a56f2006-01-06 00:19:22 -0800316 /* We're through */
317 return need_rescan ? -EAGAIN : -ENXIO;
318
319 /* Put it on the slow path. */
320 ret = css_enqueue_subchannel_slow(schid);
321 if (ret) {
322 css_clear_subchannel_slow_list();
323 need_rescan = 1;
324 return -EAGAIN;
325 }
326 return 0;
327}
328
329static int
330__s390_process_res_acc(struct subchannel_id schid, void *data)
331{
332 int chp_mask, old_lpm;
333 struct res_acc_data *res_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700334 struct subchannel *sch;
Cornelia Huckf97a56f2006-01-06 00:19:22 -0800335
Cornelia Huck12975ae2006-10-11 15:31:47 +0200336 res_data = data;
Cornelia Huckf97a56f2006-01-06 00:19:22 -0800337 sch = get_subchannel_by_schid(schid);
338 if (!sch)
339 /* Check if a subchannel is newly available. */
340 return s390_process_res_acc_new_sch(schid);
341
Cornelia Huck2ec22982006-12-08 15:54:26 +0100342 spin_lock_irq(sch->lock);
Cornelia Huckf97a56f2006-01-06 00:19:22 -0800343
344 chp_mask = s390_process_res_acc_sch(res_data, sch);
345
346 if (chp_mask == 0) {
Cornelia Huck2ec22982006-12-08 15:54:26 +0100347 spin_unlock_irq(sch->lock);
Peter Oberparleiterdd9963f2006-09-20 15:59:54 +0200348 put_device(&sch->dev);
Cornelia Huckf97a56f2006-01-06 00:19:22 -0800349 return 0;
350 }
351 old_lpm = sch->lpm;
352 sch->lpm = ((sch->schib.pmcw.pim &
353 sch->schib.pmcw.pam &
354 sch->schib.pmcw.pom)
355 | chp_mask) & sch->opm;
356 if (!old_lpm && sch->lpm)
357 device_trigger_reprobe(sch);
358 else if (sch->driver && sch->driver->verify)
359 sch->driver->verify(&sch->dev);
360
Cornelia Huck2ec22982006-12-08 15:54:26 +0100361 spin_unlock_irq(sch->lock);
Cornelia Huckf97a56f2006-01-06 00:19:22 -0800362 put_device(&sch->dev);
Peter Oberparleiterdd9963f2006-09-20 15:59:54 +0200363 return 0;
Cornelia Huckf97a56f2006-01-06 00:19:22 -0800364}
365
366
367static int
368s390_process_res_acc (struct res_acc_data *res_data)
369{
Cornelia Hucka8237fc2006-01-06 00:19:21 -0800370 int rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700371 char dbf_txt[15];
372
Peter Oberparleitere6b6e102007-04-27 16:01:28 +0200373 sprintf(dbf_txt, "accpr%x.%02x", res_data->chpid.cssid,
374 res_data->chpid.id);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700375 CIO_TRACE_EVENT( 2, dbf_txt);
Cornelia Huckf97a56f2006-01-06 00:19:22 -0800376 if (res_data->fla != 0) {
377 sprintf(dbf_txt, "fla%x", res_data->fla);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700378 CIO_TRACE_EVENT( 2, dbf_txt);
379 }
380
381 /*
382 * I/O resources may have become accessible.
383 * Scan through all subchannels that may be concerned and
384 * do a validation on those.
385 * The more information we have (info), the less scanning
386 * will we have to do.
387 */
Cornelia Huckf97a56f2006-01-06 00:19:22 -0800388 rc = for_each_subchannel(__s390_process_res_acc, res_data);
389 if (css_slow_subchannels_exist())
390 rc = -EAGAIN;
391 else if (rc != -EAGAIN)
392 rc = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700393 return rc;
394}
395
396static int
397__get_chpid_from_lir(void *data)
398{
399 struct lir {
400 u8 iq;
401 u8 ic;
402 u16 sci;
403 /* incident-node descriptor */
404 u32 indesc[28];
405 /* attached-node descriptor */
406 u32 andesc[28];
407 /* incident-specific information */
408 u32 isinfo[28];
Peter Oberparleiter0f008aa2007-02-05 21:17:40 +0100409 } __attribute__ ((packed)) *lir;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700410
Cornelia Huck12975ae2006-10-11 15:31:47 +0200411 lir = data;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700412 if (!(lir->iq&0x80))
413 /* NULL link incident record */
414 return -EINVAL;
415 if (!(lir->indesc[0]&0xc0000000))
416 /* node descriptor not valid */
417 return -EINVAL;
418 if (!(lir->indesc[0]&0x10000000))
419 /* don't handle device-type nodes - FIXME */
420 return -EINVAL;
421 /* Byte 3 contains the chpid. Could also be CTCA, but we don't care */
422
423 return (u16) (lir->indesc[0]&0x000000ff);
424}
425
Peter Oberparleiter184357a2007-02-05 21:17:42 +0100426struct chsc_sei_area {
427 struct chsc_header request;
428 u32 reserved1;
429 u32 reserved2;
430 u32 reserved3;
431 struct chsc_header response;
432 u32 reserved4;
433 u8 flags;
434 u8 vf; /* validity flags */
435 u8 rs; /* reporting source */
436 u8 cc; /* content code */
437 u16 fla; /* full link address */
438 u16 rsid; /* reporting source id */
439 u32 reserved5;
440 u32 reserved6;
441 u8 ccdf[4096 - 16 - 24]; /* content-code dependent field */
442 /* ccdf has to be big enough for a link-incident record */
443} __attribute__ ((packed));
444
445static int chsc_process_sei_link_incident(struct chsc_sei_area *sei_area)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700446{
Peter Oberparleiterf86635f2007-04-27 16:01:26 +0200447 struct chp_id chpid;
448 int id;
Peter Oberparleiter184357a2007-02-05 21:17:42 +0100449
450 CIO_CRW_EVENT(4, "chsc: link incident (rs=%02x, rs_id=%04x)\n",
451 sei_area->rs, sei_area->rsid);
452 if (sei_area->rs != 4)
453 return 0;
Peter Oberparleiterf86635f2007-04-27 16:01:26 +0200454 id = __get_chpid_from_lir(sei_area->ccdf);
455 if (id < 0)
Peter Oberparleiter184357a2007-02-05 21:17:42 +0100456 CIO_CRW_EVENT(4, "chsc: link incident - invalid LIR\n");
Peter Oberparleiterf86635f2007-04-27 16:01:26 +0200457 else {
458 chp_id_init(&chpid);
459 chpid.id = id;
Peter Oberparleitere6b6e102007-04-27 16:01:28 +0200460 chsc_chp_offline(chpid);
Peter Oberparleiterf86635f2007-04-27 16:01:26 +0200461 }
Peter Oberparleiter184357a2007-02-05 21:17:42 +0100462
463 return 0;
464}
465
466static int chsc_process_sei_res_acc(struct chsc_sei_area *sei_area)
467{
Cornelia Huckf97a56f2006-01-06 00:19:22 -0800468 struct res_acc_data res_data;
Peter Oberparleiterf86635f2007-04-27 16:01:26 +0200469 struct chp_id chpid;
Peter Oberparleiter184357a2007-02-05 21:17:42 +0100470 int status;
471 int rc;
472
473 CIO_CRW_EVENT(4, "chsc: resource accessibility event (rs=%02x, "
474 "rs_id=%04x)\n", sei_area->rs, sei_area->rsid);
475 if (sei_area->rs != 4)
476 return 0;
Peter Oberparleiterf86635f2007-04-27 16:01:26 +0200477 chp_id_init(&chpid);
478 chpid.id = sei_area->rsid;
Peter Oberparleiter184357a2007-02-05 21:17:42 +0100479 /* allocate a new channel path structure, if needed */
Peter Oberparleitere6b6e102007-04-27 16:01:28 +0200480 status = chp_get_status(chpid);
Peter Oberparleiter184357a2007-02-05 21:17:42 +0100481 if (status < 0)
Peter Oberparleitere6b6e102007-04-27 16:01:28 +0200482 chp_new(chpid);
Peter Oberparleiter184357a2007-02-05 21:17:42 +0100483 else if (!status)
484 return 0;
Peter Oberparleiter184357a2007-02-05 21:17:42 +0100485 memset(&res_data, 0, sizeof(struct res_acc_data));
Peter Oberparleitere6b6e102007-04-27 16:01:28 +0200486 res_data.chpid = chpid;
Peter Oberparleiter184357a2007-02-05 21:17:42 +0100487 if ((sei_area->vf & 0xc0) != 0) {
488 res_data.fla = sei_area->fla;
489 if ((sei_area->vf & 0xc0) == 0xc0)
490 /* full link address */
491 res_data.fla_mask = 0xffff;
492 else
493 /* link address */
494 res_data.fla_mask = 0xff00;
495 }
496 rc = s390_process_res_acc(&res_data);
Peter Oberparleiter184357a2007-02-05 21:17:42 +0100497
498 return rc;
499}
500
Peter Oberparleitere5854a52007-04-27 16:01:31 +0200501struct chp_config_data {
502 u8 map[32];
503 u8 op;
504 u8 pc;
505};
506
507static int chsc_process_sei_chp_config(struct chsc_sei_area *sei_area)
508{
509 struct chp_config_data *data;
510 struct chp_id chpid;
511 int num;
512
513 CIO_CRW_EVENT(4, "chsc: channel-path-configuration notification\n");
514 if (sei_area->rs != 0)
515 return 0;
516 data = (struct chp_config_data *) &(sei_area->ccdf);
517 chp_id_init(&chpid);
518 for (num = 0; num <= __MAX_CHPID; num++) {
519 if (!chp_test_bit(data->map, num))
520 continue;
521 chpid.id = num;
522 printk(KERN_WARNING "cio: processing configure event %d for "
523 "chpid %x.%02x\n", data->op, chpid.cssid, chpid.id);
524 switch (data->op) {
525 case 0:
526 chp_cfg_schedule(chpid, 1);
527 break;
528 case 1:
529 chp_cfg_schedule(chpid, 0);
530 break;
531 case 2:
532 chp_cfg_cancel_deconfigure(chpid);
533 break;
534 }
535 }
536
537 return 0;
538}
539
Peter Oberparleiter184357a2007-02-05 21:17:42 +0100540static int chsc_process_sei(struct chsc_sei_area *sei_area)
541{
542 int rc;
543
544 /* Check if we might have lost some information. */
545 if (sei_area->flags & 0x40)
546 CIO_CRW_EVENT(2, "chsc: event overflow\n");
547 /* which kind of information was stored? */
548 rc = 0;
549 switch (sei_area->cc) {
550 case 1: /* link incident*/
551 rc = chsc_process_sei_link_incident(sei_area);
552 break;
553 case 2: /* i/o resource accessibiliy */
554 rc = chsc_process_sei_res_acc(sei_area);
555 break;
Peter Oberparleitere5854a52007-04-27 16:01:31 +0200556 case 8: /* channel-path-configuration notification */
557 rc = chsc_process_sei_chp_config(sei_area);
558 break;
Peter Oberparleiter184357a2007-02-05 21:17:42 +0100559 default: /* other stuff */
560 CIO_CRW_EVENT(4, "chsc: unhandled sei content code %d\n",
561 sei_area->cc);
562 break;
563 }
564
565 return rc;
566}
567
568int chsc_process_crw(void)
569{
570 struct chsc_sei_area *sei_area;
571 int ret;
572 int rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700573
574 if (!sei_page)
575 return 0;
Peter Oberparleiter184357a2007-02-05 21:17:42 +0100576 /* Access to sei_page is serialized through machine check handler
577 * thread, so no need for locking. */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700578 sei_area = sei_page;
579
580 CIO_TRACE_EVENT( 2, "prcss");
581 ret = 0;
582 do {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700583 memset(sei_area, 0, sizeof(*sei_area));
Cornelia Huck495a5b42006-03-24 03:15:14 -0800584 sei_area->request.length = 0x0010;
585 sei_area->request.code = 0x000e;
Peter Oberparleiter184357a2007-02-05 21:17:42 +0100586 if (chsc(sei_area))
587 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700588
Peter Oberparleiter184357a2007-02-05 21:17:42 +0100589 if (sei_area->response.code == 0x0001) {
590 CIO_CRW_EVENT(4, "chsc: sei successful\n");
591 rc = chsc_process_sei(sei_area);
592 if (rc)
593 ret = rc;
594 } else {
595 CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x)\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -0700596 sei_area->response.code);
Peter Oberparleiter184357a2007-02-05 21:17:42 +0100597 ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700598 break;
599 }
600 } while (sei_area->flags & 0x80);
Peter Oberparleiter184357a2007-02-05 21:17:42 +0100601
Linus Torvalds1da177e2005-04-16 15:20:36 -0700602 return ret;
603}
604
Heiko Carstens4d284ca2007-02-05 21:18:53 +0100605static int
Cornelia Huckf97a56f2006-01-06 00:19:22 -0800606__chp_add_new_sch(struct subchannel_id schid)
607{
608 struct schib schib;
609 int ret;
610
Cornelia Huck758976f2007-02-05 21:17:36 +0100611 if (stsch_err(schid, &schib))
Cornelia Huckf97a56f2006-01-06 00:19:22 -0800612 /* We're through */
613 return need_rescan ? -EAGAIN : -ENXIO;
614
615 /* Put it on the slow path. */
616 ret = css_enqueue_subchannel_slow(schid);
617 if (ret) {
618 css_clear_subchannel_slow_list();
619 need_rescan = 1;
620 return -EAGAIN;
621 }
622 return 0;
623}
624
625
626static int
627__chp_add(struct subchannel_id schid, void *data)
628{
Cornelia Huck7e8ae7b2006-10-06 16:38:29 +0200629 int i, mask;
Peter Oberparleitere6b6e102007-04-27 16:01:28 +0200630 struct chp_id *chpid;
Cornelia Huckf97a56f2006-01-06 00:19:22 -0800631 struct subchannel *sch;
632
Peter Oberparleitere6b6e102007-04-27 16:01:28 +0200633 chpid = data;
Cornelia Huckf97a56f2006-01-06 00:19:22 -0800634 sch = get_subchannel_by_schid(schid);
635 if (!sch)
636 /* Check if the subchannel is now available. */
637 return __chp_add_new_sch(schid);
Cornelia Huck2ec22982006-12-08 15:54:26 +0100638 spin_lock_irq(sch->lock);
Cornelia Huck7e8ae7b2006-10-06 16:38:29 +0200639 for (i=0; i<8; i++) {
640 mask = 0x80 >> i;
641 if ((sch->schib.pmcw.pim & mask) &&
Peter Oberparleitere6b6e102007-04-27 16:01:28 +0200642 (sch->schib.pmcw.chpid[i] == chpid->id)) {
Cornelia Huckf97a56f2006-01-06 00:19:22 -0800643 if (stsch(sch->schid, &sch->schib) != 0) {
644 /* Endgame. */
Cornelia Huck2ec22982006-12-08 15:54:26 +0100645 spin_unlock_irq(sch->lock);
Cornelia Huckf97a56f2006-01-06 00:19:22 -0800646 return -ENXIO;
647 }
648 break;
649 }
Cornelia Huck7e8ae7b2006-10-06 16:38:29 +0200650 }
Cornelia Huckf97a56f2006-01-06 00:19:22 -0800651 if (i==8) {
Cornelia Huck2ec22982006-12-08 15:54:26 +0100652 spin_unlock_irq(sch->lock);
Cornelia Huckf97a56f2006-01-06 00:19:22 -0800653 return 0;
654 }
655 sch->lpm = ((sch->schib.pmcw.pim &
656 sch->schib.pmcw.pam &
657 sch->schib.pmcw.pom)
Cornelia Huck7e8ae7b2006-10-06 16:38:29 +0200658 | mask) & sch->opm;
Cornelia Huckf97a56f2006-01-06 00:19:22 -0800659
660 if (sch->driver && sch->driver->verify)
661 sch->driver->verify(&sch->dev);
662
Cornelia Huck2ec22982006-12-08 15:54:26 +0100663 spin_unlock_irq(sch->lock);
Cornelia Huckf97a56f2006-01-06 00:19:22 -0800664 put_device(&sch->dev);
665 return 0;
666}
667
Peter Oberparleitere6b6e102007-04-27 16:01:28 +0200668int chsc_chp_online(struct chp_id chpid)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700669{
Cornelia Huckf97a56f2006-01-06 00:19:22 -0800670 int rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700671 char dbf_txt[15];
672
Peter Oberparleiterf86635f2007-04-27 16:01:26 +0200673 sprintf(dbf_txt, "cadd%x.%02x", chpid.cssid, chpid.id);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700674 CIO_TRACE_EVENT(2, dbf_txt);
675
Peter Oberparleitere6b6e102007-04-27 16:01:28 +0200676 if (chp_get_status(chpid) == 0)
677 return 0;
678 rc = for_each_subchannel(__chp_add, &chpid);
Cornelia Huckf97a56f2006-01-06 00:19:22 -0800679 if (css_slow_subchannels_exist())
680 rc = -EAGAIN;
681 if (rc != -EAGAIN)
682 rc = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700683 return rc;
684}
685
Heiko Carstens4d284ca2007-02-05 21:18:53 +0100686static int check_for_io_on_path(struct subchannel *sch, int index)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700687{
688 int cc;
689
Cornelia Hucka8237fc2006-01-06 00:19:21 -0800690 cc = stsch(sch->schid, &sch->schib);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700691 if (cc)
692 return 0;
Cornelia Hucke7769b42006-10-11 15:31:41 +0200693 if (sch->schib.scsw.actl && sch->schib.pmcw.lpum == (0x80 >> index))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700694 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700695 return 0;
696}
697
Cornelia Huckd23861f2006-12-04 15:41:04 +0100698static void terminate_internal_io(struct subchannel *sch)
699{
700 if (cio_clear(sch)) {
701 /* Recheck device in case clear failed. */
702 sch->lpm = 0;
703 if (device_trigger_verify(sch) != 0) {
704 if(css_enqueue_subchannel_slow(sch->schid)) {
705 css_clear_subchannel_slow_list();
706 need_rescan = 1;
707 }
708 }
709 return;
710 }
711 /* Request retry of internal operation. */
712 device_set_intretry(sch);
713 /* Call handler. */
714 if (sch->driver && sch->driver->termination)
715 sch->driver->termination(&sch->dev);
716}
717
Peter Oberparleiterf86635f2007-04-27 16:01:26 +0200718static void __s390_subchannel_vary_chpid(struct subchannel *sch,
719 struct chp_id chpid, int on)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700720{
721 int chp, old_lpm;
722 unsigned long flags;
723
724 if (!sch->ssd_info.valid)
725 return;
726
Cornelia Huck2ec22982006-12-08 15:54:26 +0100727 spin_lock_irqsave(sch->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700728 old_lpm = sch->lpm;
729 for (chp = 0; chp < 8; chp++) {
Peter Oberparleiterf86635f2007-04-27 16:01:26 +0200730 if (sch->ssd_info.chpid[chp] != chpid.id)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700731 continue;
732
733 if (on) {
734 sch->opm |= (0x80 >> chp);
735 sch->lpm |= (0x80 >> chp);
736 if (!old_lpm)
737 device_trigger_reprobe(sch);
738 else if (sch->driver && sch->driver->verify)
739 sch->driver->verify(&sch->dev);
Cornelia Huck24cb5b42006-12-04 15:41:01 +0100740 break;
741 }
742 sch->opm &= ~(0x80 >> chp);
743 sch->lpm &= ~(0x80 >> chp);
Cornelia Huckd23861f2006-12-04 15:41:04 +0100744 if (check_for_io_on_path(sch, chp)) {
745 if (device_is_online(sch))
746 /* Path verification is done after killing. */
747 device_kill_io(sch);
748 else
749 /* Kill and retry internal I/O. */
750 terminate_internal_io(sch);
751 } else if (!sch->lpm) {
Cornelia Huck24cb5b42006-12-04 15:41:01 +0100752 if (device_trigger_verify(sch) != 0) {
Cornelia Hucka8237fc2006-01-06 00:19:21 -0800753 if (css_enqueue_subchannel_slow(sch->schid)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700754 css_clear_subchannel_slow_list();
755 need_rescan = 1;
756 }
Cornelia Huck24cb5b42006-12-04 15:41:01 +0100757 }
758 } else if (sch->driver && sch->driver->verify)
759 sch->driver->verify(&sch->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700760 break;
761 }
Cornelia Huck2ec22982006-12-08 15:54:26 +0100762 spin_unlock_irqrestore(sch->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700763}
764
Peter Oberparleiterf86635f2007-04-27 16:01:26 +0200765static int s390_subchannel_vary_chpid_off(struct device *dev, void *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700766{
767 struct subchannel *sch;
Peter Oberparleiterf86635f2007-04-27 16:01:26 +0200768 struct chp_id *chpid;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700769
770 sch = to_subchannel(dev);
771 chpid = data;
772
773 __s390_subchannel_vary_chpid(sch, *chpid, 0);
774 return 0;
775}
776
Peter Oberparleiterf86635f2007-04-27 16:01:26 +0200777static int s390_subchannel_vary_chpid_on(struct device *dev, void *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700778{
779 struct subchannel *sch;
Peter Oberparleiterf86635f2007-04-27 16:01:26 +0200780 struct chp_id *chpid;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700781
782 sch = to_subchannel(dev);
783 chpid = data;
784
785 __s390_subchannel_vary_chpid(sch, *chpid, 1);
786 return 0;
787}
788
Cornelia Huckf97a56f2006-01-06 00:19:22 -0800789static int
790__s390_vary_chpid_on(struct subchannel_id schid, void *data)
791{
792 struct schib schib;
793 struct subchannel *sch;
794
795 sch = get_subchannel_by_schid(schid);
796 if (sch) {
797 put_device(&sch->dev);
798 return 0;
799 }
Cornelia Huckfb6958a2006-01-06 00:19:25 -0800800 if (stsch_err(schid, &schib))
Cornelia Huckf97a56f2006-01-06 00:19:22 -0800801 /* We're through */
802 return -ENXIO;
803 /* Put it on the slow path. */
804 if (css_enqueue_subchannel_slow(schid)) {
805 css_clear_subchannel_slow_list();
806 need_rescan = 1;
807 return -EAGAIN;
808 }
809 return 0;
810}
811
Peter Oberparleitere6b6e102007-04-27 16:01:28 +0200812/**
813 * chsc_chp_vary - propagate channel-path vary operation to subchannels
814 * @chpid: channl-path ID
815 * @on: non-zero for vary online, zero for vary offline
Linus Torvalds1da177e2005-04-16 15:20:36 -0700816 */
Peter Oberparleitere6b6e102007-04-27 16:01:28 +0200817int chsc_chp_vary(struct chp_id chpid, int on)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700818{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700819 /*
820 * Redo PathVerification on the devices the chpid connects to
821 */
822
823 bus_for_each_dev(&css_bus_type, NULL, &chpid, on ?
824 s390_subchannel_vary_chpid_on :
825 s390_subchannel_vary_chpid_off);
Cornelia Huckf97a56f2006-01-06 00:19:22 -0800826 if (on)
827 /* Scan for new devices on varied on path. */
828 for_each_subchannel(__s390_vary_chpid_on, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700829 if (need_rescan || css_slow_subchannels_exist())
830 queue_work(slow_path_wq, &slow_path_work);
831 return 0;
832}
833
Cornelia Huck495a5b42006-03-24 03:15:14 -0800834static void
835chsc_remove_cmg_attr(struct channel_subsystem *css)
836{
837 int i;
838
839 for (i = 0; i <= __MAX_CHPID; i++) {
840 if (!css->chps[i])
841 continue;
Peter Oberparleitere6b6e102007-04-27 16:01:28 +0200842 chp_remove_cmg_attr(css->chps[i]);
Cornelia Huck495a5b42006-03-24 03:15:14 -0800843 }
844}
845
846static int
847chsc_add_cmg_attr(struct channel_subsystem *css)
848{
849 int i, ret;
850
851 ret = 0;
852 for (i = 0; i <= __MAX_CHPID; i++) {
853 if (!css->chps[i])
854 continue;
Peter Oberparleitere6b6e102007-04-27 16:01:28 +0200855 ret = chp_add_cmg_attr(css->chps[i]);
Cornelia Huck495a5b42006-03-24 03:15:14 -0800856 if (ret)
857 goto cleanup;
858 }
859 return ret;
860cleanup:
861 for (--i; i >= 0; i--) {
862 if (!css->chps[i])
863 continue;
Peter Oberparleitere6b6e102007-04-27 16:01:28 +0200864 chp_remove_cmg_attr(css->chps[i]);
Cornelia Huck495a5b42006-03-24 03:15:14 -0800865 }
866 return ret;
867}
868
Cornelia Huck495a5b42006-03-24 03:15:14 -0800869static int
870__chsc_do_secm(struct channel_subsystem *css, int enable, void *page)
871{
872 struct {
873 struct chsc_header request;
874 u32 operation_code : 2;
875 u32 : 30;
876 u32 key : 4;
877 u32 : 28;
878 u32 zeroes1;
879 u32 cub_addr1;
880 u32 zeroes2;
881 u32 cub_addr2;
882 u32 reserved[13];
883 struct chsc_header response;
884 u32 status : 8;
885 u32 : 4;
886 u32 fmt : 4;
887 u32 : 16;
Peter Oberparleiter0f008aa2007-02-05 21:17:40 +0100888 } __attribute__ ((packed)) *secm_area;
Cornelia Huck495a5b42006-03-24 03:15:14 -0800889 int ret, ccode;
890
891 secm_area = page;
892 secm_area->request.length = 0x0050;
893 secm_area->request.code = 0x0016;
894
895 secm_area->key = PAGE_DEFAULT_KEY;
896 secm_area->cub_addr1 = (u64)(unsigned long)css->cub_addr1;
897 secm_area->cub_addr2 = (u64)(unsigned long)css->cub_addr2;
898
899 secm_area->operation_code = enable ? 0 : 1;
900
901 ccode = chsc(secm_area);
902 if (ccode > 0)
903 return (ccode == 3) ? -ENODEV : -EBUSY;
904
905 switch (secm_area->response.code) {
906 case 0x0001: /* Success. */
907 ret = 0;
908 break;
909 case 0x0003: /* Invalid block. */
910 case 0x0007: /* Invalid format. */
911 case 0x0008: /* Other invalid block. */
912 CIO_CRW_EVENT(2, "Error in chsc request block!\n");
913 ret = -EINVAL;
914 break;
915 case 0x0004: /* Command not provided in model. */
916 CIO_CRW_EVENT(2, "Model does not provide secm\n");
917 ret = -EOPNOTSUPP;
918 break;
919 case 0x0102: /* cub adresses incorrect */
920 CIO_CRW_EVENT(2, "Invalid addresses in chsc request block\n");
921 ret = -EINVAL;
922 break;
923 case 0x0103: /* key error */
924 CIO_CRW_EVENT(2, "Access key error in secm\n");
925 ret = -EINVAL;
926 break;
927 case 0x0105: /* error while starting */
928 CIO_CRW_EVENT(2, "Error while starting channel measurement\n");
929 ret = -EIO;
930 break;
931 default:
932 CIO_CRW_EVENT(2, "Unknown CHSC response %d\n",
933 secm_area->response.code);
934 ret = -EIO;
935 }
936 return ret;
937}
938
939int
940chsc_secm(struct channel_subsystem *css, int enable)
941{
942 void *secm_area;
943 int ret;
944
945 secm_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
946 if (!secm_area)
947 return -ENOMEM;
948
949 mutex_lock(&css->mutex);
950 if (enable && !css->cm_enabled) {
951 css->cub_addr1 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
952 css->cub_addr2 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
953 if (!css->cub_addr1 || !css->cub_addr2) {
954 free_page((unsigned long)css->cub_addr1);
955 free_page((unsigned long)css->cub_addr2);
956 free_page((unsigned long)secm_area);
957 mutex_unlock(&css->mutex);
958 return -ENOMEM;
959 }
960 }
961 ret = __chsc_do_secm(css, enable, secm_area);
962 if (!ret) {
963 css->cm_enabled = enable;
964 if (css->cm_enabled) {
965 ret = chsc_add_cmg_attr(css);
966 if (ret) {
967 memset(secm_area, 0, PAGE_SIZE);
968 __chsc_do_secm(css, 0, secm_area);
969 css->cm_enabled = 0;
970 }
971 } else
972 chsc_remove_cmg_attr(css);
973 }
974 if (enable && !css->cm_enabled) {
975 free_page((unsigned long)css->cub_addr1);
976 free_page((unsigned long)css->cub_addr2);
977 }
978 mutex_unlock(&css->mutex);
979 free_page((unsigned long)secm_area);
980 return ret;
981}
982
Peter Oberparleitere6b6e102007-04-27 16:01:28 +0200983int chsc_determine_channel_path_description(struct chp_id chpid,
984 struct channel_path_desc *desc)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700985{
986 int ccode, ret;
987
988 struct {
989 struct chsc_header request;
990 u32 : 24;
991 u32 first_chpid : 8;
992 u32 : 24;
993 u32 last_chpid : 8;
994 u32 zeroes1;
995 struct chsc_header response;
996 u32 zeroes2;
997 struct channel_path_desc desc;
Peter Oberparleiter0f008aa2007-02-05 21:17:40 +0100998 } __attribute__ ((packed)) *scpd_area;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700999
1000 scpd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
1001 if (!scpd_area)
1002 return -ENOMEM;
1003
Cornelia Huck495a5b42006-03-24 03:15:14 -08001004 scpd_area->request.length = 0x0010;
1005 scpd_area->request.code = 0x0002;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001006
Peter Oberparleiterf86635f2007-04-27 16:01:26 +02001007 scpd_area->first_chpid = chpid.id;
1008 scpd_area->last_chpid = chpid.id;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001009
1010 ccode = chsc(scpd_area);
1011 if (ccode > 0) {
1012 ret = (ccode == 3) ? -ENODEV : -EBUSY;
1013 goto out;
1014 }
1015
1016 switch (scpd_area->response.code) {
1017 case 0x0001: /* Success. */
1018 memcpy(desc, &scpd_area->desc,
1019 sizeof(struct channel_path_desc));
1020 ret = 0;
1021 break;
1022 case 0x0003: /* Invalid block. */
1023 case 0x0007: /* Invalid format. */
1024 case 0x0008: /* Other invalid block. */
1025 CIO_CRW_EVENT(2, "Error in chsc request block!\n");
1026 ret = -EINVAL;
1027 break;
1028 case 0x0004: /* Command not provided in model. */
1029 CIO_CRW_EVENT(2, "Model does not provide scpd\n");
1030 ret = -EOPNOTSUPP;
1031 break;
1032 default:
1033 CIO_CRW_EVENT(2, "Unknown CHSC response %d\n",
1034 scpd_area->response.code);
1035 ret = -EIO;
1036 }
1037out:
1038 free_page((unsigned long)scpd_area);
1039 return ret;
1040}
1041
Cornelia Huck495a5b42006-03-24 03:15:14 -08001042static void
1043chsc_initialize_cmg_chars(struct channel_path *chp, u8 cmcv,
1044 struct cmg_chars *chars)
1045{
1046 switch (chp->cmg) {
1047 case 2:
1048 case 3:
1049 chp->cmg_chars = kmalloc(sizeof(struct cmg_chars),
1050 GFP_KERNEL);
1051 if (chp->cmg_chars) {
1052 int i, mask;
1053 struct cmg_chars *cmg_chars;
1054
1055 cmg_chars = chp->cmg_chars;
1056 for (i = 0; i < NR_MEASUREMENT_CHARS; i++) {
1057 mask = 0x80 >> (i + 3);
1058 if (cmcv & mask)
1059 cmg_chars->values[i] = chars->values[i];
1060 else
1061 cmg_chars->values[i] = 0;
1062 }
1063 }
1064 break;
1065 default:
1066 /* No cmg-dependent data. */
1067 break;
1068 }
1069}
1070
Peter Oberparleitere6b6e102007-04-27 16:01:28 +02001071int chsc_get_channel_measurement_chars(struct channel_path *chp)
Cornelia Huck495a5b42006-03-24 03:15:14 -08001072{
1073 int ccode, ret;
1074
1075 struct {
1076 struct chsc_header request;
1077 u32 : 24;
1078 u32 first_chpid : 8;
1079 u32 : 24;
1080 u32 last_chpid : 8;
1081 u32 zeroes1;
1082 struct chsc_header response;
1083 u32 zeroes2;
1084 u32 not_valid : 1;
1085 u32 shared : 1;
1086 u32 : 22;
1087 u32 chpid : 8;
1088 u32 cmcv : 5;
1089 u32 : 11;
1090 u32 cmgq : 8;
1091 u32 cmg : 8;
1092 u32 zeroes3;
1093 u32 data[NR_MEASUREMENT_CHARS];
Peter Oberparleiter0f008aa2007-02-05 21:17:40 +01001094 } __attribute__ ((packed)) *scmc_area;
Cornelia Huck495a5b42006-03-24 03:15:14 -08001095
1096 scmc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
1097 if (!scmc_area)
1098 return -ENOMEM;
1099
1100 scmc_area->request.length = 0x0010;
1101 scmc_area->request.code = 0x0022;
1102
Peter Oberparleiterf86635f2007-04-27 16:01:26 +02001103 scmc_area->first_chpid = chp->chpid.id;
1104 scmc_area->last_chpid = chp->chpid.id;
Cornelia Huck495a5b42006-03-24 03:15:14 -08001105
1106 ccode = chsc(scmc_area);
1107 if (ccode > 0) {
1108 ret = (ccode == 3) ? -ENODEV : -EBUSY;
1109 goto out;
1110 }
1111
1112 switch (scmc_area->response.code) {
1113 case 0x0001: /* Success. */
1114 if (!scmc_area->not_valid) {
1115 chp->cmg = scmc_area->cmg;
1116 chp->shared = scmc_area->shared;
1117 chsc_initialize_cmg_chars(chp, scmc_area->cmcv,
1118 (struct cmg_chars *)
1119 &scmc_area->data);
1120 } else {
1121 chp->cmg = -1;
1122 chp->shared = -1;
1123 }
1124 ret = 0;
1125 break;
1126 case 0x0003: /* Invalid block. */
1127 case 0x0007: /* Invalid format. */
1128 case 0x0008: /* Invalid bit combination. */
1129 CIO_CRW_EVENT(2, "Error in chsc request block!\n");
1130 ret = -EINVAL;
1131 break;
1132 case 0x0004: /* Command not provided. */
1133 CIO_CRW_EVENT(2, "Model does not provide scmc\n");
1134 ret = -EOPNOTSUPP;
1135 break;
1136 default:
1137 CIO_CRW_EVENT(2, "Unknown CHSC response %d\n",
1138 scmc_area->response.code);
1139 ret = -EIO;
1140 }
1141out:
1142 free_page((unsigned long)scmc_area);
1143 return ret;
1144}
1145
Linus Torvalds1da177e2005-04-16 15:20:36 -07001146static int __init
1147chsc_alloc_sei_area(void)
1148{
1149 sei_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
1150 if (!sei_page)
1151 printk(KERN_WARNING"Can't allocate page for processing of " \
1152 "chsc machine checks!\n");
1153 return (sei_page ? 0 : -ENOMEM);
1154}
1155
Cornelia Huckfb6958a2006-01-06 00:19:25 -08001156int __init
1157chsc_enable_facility(int operation_code)
1158{
1159 int ret;
1160 struct {
1161 struct chsc_header request;
1162 u8 reserved1:4;
1163 u8 format:4;
1164 u8 reserved2;
1165 u16 operation_code;
1166 u32 reserved3;
1167 u32 reserved4;
1168 u32 operation_data_area[252];
1169 struct chsc_header response;
1170 u32 reserved5:4;
1171 u32 format2:4;
1172 u32 reserved6:24;
Peter Oberparleiter0f008aa2007-02-05 21:17:40 +01001173 } __attribute__ ((packed)) *sda_area;
Cornelia Huckfb6958a2006-01-06 00:19:25 -08001174
1175 sda_area = (void *)get_zeroed_page(GFP_KERNEL|GFP_DMA);
1176 if (!sda_area)
1177 return -ENOMEM;
Cornelia Huck495a5b42006-03-24 03:15:14 -08001178 sda_area->request.length = 0x0400;
1179 sda_area->request.code = 0x0031;
Cornelia Huckfb6958a2006-01-06 00:19:25 -08001180 sda_area->operation_code = operation_code;
1181
1182 ret = chsc(sda_area);
1183 if (ret > 0) {
1184 ret = (ret == 3) ? -ENODEV : -EBUSY;
1185 goto out;
1186 }
1187 switch (sda_area->response.code) {
Cornelia Huck15730dd2006-03-06 15:43:02 -08001188 case 0x0001: /* everything ok */
1189 ret = 0;
1190 break;
Cornelia Huckfb6958a2006-01-06 00:19:25 -08001191 case 0x0003: /* invalid request block */
1192 case 0x0007:
1193 ret = -EINVAL;
1194 break;
1195 case 0x0004: /* command not provided */
1196 case 0x0101: /* facility not provided */
1197 ret = -EOPNOTSUPP;
1198 break;
Cornelia Huck15730dd2006-03-06 15:43:02 -08001199 default: /* something went wrong */
1200 ret = -EIO;
Cornelia Huckfb6958a2006-01-06 00:19:25 -08001201 }
1202 out:
1203 free_page((unsigned long)sda_area);
1204 return ret;
1205}
1206
Linus Torvalds1da177e2005-04-16 15:20:36 -07001207subsys_initcall(chsc_alloc_sei_area);
1208
1209struct css_general_char css_general_characteristics;
1210struct css_chsc_char css_chsc_characteristics;
1211
1212int __init
1213chsc_determine_css_characteristics(void)
1214{
1215 int result;
1216 struct {
1217 struct chsc_header request;
1218 u32 reserved1;
1219 u32 reserved2;
1220 u32 reserved3;
1221 struct chsc_header response;
1222 u32 reserved4;
1223 u32 general_char[510];
1224 u32 chsc_char[518];
Peter Oberparleiter0f008aa2007-02-05 21:17:40 +01001225 } __attribute__ ((packed)) *scsc_area;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001226
1227 scsc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
1228 if (!scsc_area) {
1229 printk(KERN_WARNING"cio: Was not able to determine available" \
1230 "CHSCs due to no memory.\n");
1231 return -ENOMEM;
1232 }
1233
Cornelia Huck495a5b42006-03-24 03:15:14 -08001234 scsc_area->request.length = 0x0010;
1235 scsc_area->request.code = 0x0010;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001236
1237 result = chsc(scsc_area);
1238 if (result) {
1239 printk(KERN_WARNING"cio: Was not able to determine " \
1240 "available CHSCs, cc=%i.\n", result);
1241 result = -EIO;
1242 goto exit;
1243 }
1244
1245 if (scsc_area->response.code != 1) {
1246 printk(KERN_WARNING"cio: Was not able to determine " \
1247 "available CHSCs.\n");
1248 result = -EIO;
1249 goto exit;
1250 }
1251 memcpy(&css_general_characteristics, scsc_area->general_char,
1252 sizeof(css_general_characteristics));
1253 memcpy(&css_chsc_characteristics, scsc_area->chsc_char,
1254 sizeof(css_chsc_characteristics));
1255exit:
1256 free_page ((unsigned long) scsc_area);
1257 return result;
1258}
1259
1260EXPORT_SYMBOL_GPL(css_general_characteristics);
1261EXPORT_SYMBOL_GPL(css_chsc_characteristics);