blob: a1ebf5722ae550273df301068a36786ddef0a8fd [file] [log] [blame]
Horst Hummel138c0142006-06-29 14:58:12 +02001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 * File...........: linux/drivers/s390/block/dasd_eckd.c
3 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
Horst Hummel138c0142006-06-29 14:58:12 +02004 * Horst Hummel <Horst.Hummel@de.ibm.com>
Linus Torvalds1da177e2005-04-16 15:20:36 -07005 * Carsten Otte <Cotte@de.ibm.com>
6 * Martin Schwidefsky <schwidefsky@de.ibm.com>
7 * Bugreports.to..: <Linux390@de.ibm.com>
Stefan Haberlandd41dd122009-06-16 10:30:25 +02008 * Copyright IBM Corp. 1999, 2009
Nigel Hislopab1d8482008-10-10 21:33:25 +02009 * EMC Symmetrix ioctl Copyright EMC Corporation, 2008
10 * Author.........: Nigel Hislop <hislop_nigel@emc.com>
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 */
12
Stefan Haberlandca99dab2009-09-11 10:28:30 +020013#define KMSG_COMPONENT "dasd-eckd"
Stefan Haberlandfc19f382009-03-26 15:23:49 +010014
Linus Torvalds1da177e2005-04-16 15:20:36 -070015#include <linux/stddef.h>
16#include <linux/kernel.h>
17#include <linux/slab.h>
18#include <linux/hdreg.h> /* HDIO_GETGEO */
19#include <linux/bio.h>
20#include <linux/module.h>
21#include <linux/init.h>
22
23#include <asm/debug.h>
24#include <asm/idals.h>
25#include <asm/ebcdic.h>
Heiko Carstensf8b068592010-01-13 20:44:40 +010026#include <asm/compat.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070027#include <asm/io.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include <asm/uaccess.h>
Horst Hummel40545572006-06-29 15:08:18 +020029#include <asm/cio.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070030#include <asm/ccwdev.h>
Stefan Weinhuberf3eb5382009-03-26 15:23:48 +010031#include <asm/itcw.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070032
33#include "dasd_int.h"
34#include "dasd_eckd.h"
Stefan Weinhuberf3eb5382009-03-26 15:23:48 +010035#include "../cio/chsc.h"
36
Linus Torvalds1da177e2005-04-16 15:20:36 -070037
38#ifdef PRINTK_HEADER
39#undef PRINTK_HEADER
40#endif /* PRINTK_HEADER */
41#define PRINTK_HEADER "dasd(eckd):"
42
43#define ECKD_C0(i) (i->home_bytes)
44#define ECKD_F(i) (i->formula)
45#define ECKD_F1(i) (ECKD_F(i)==0x01?(i->factors.f_0x01.f1):\
46 (i->factors.f_0x02.f1))
47#define ECKD_F2(i) (ECKD_F(i)==0x01?(i->factors.f_0x01.f2):\
48 (i->factors.f_0x02.f2))
49#define ECKD_F3(i) (ECKD_F(i)==0x01?(i->factors.f_0x01.f3):\
50 (i->factors.f_0x02.f3))
51#define ECKD_F4(i) (ECKD_F(i)==0x02?(i->factors.f_0x02.f4):0)
52#define ECKD_F5(i) (ECKD_F(i)==0x02?(i->factors.f_0x02.f5):0)
53#define ECKD_F6(i) (i->factor6)
54#define ECKD_F7(i) (i->factor7)
55#define ECKD_F8(i) (i->factor8)
56
57MODULE_LICENSE("GPL");
58
59static struct dasd_discipline dasd_eckd_discipline;
60
Linus Torvalds1da177e2005-04-16 15:20:36 -070061/* The ccw bus type uses this table to find devices that it sends to
62 * dasd_eckd_probe */
63static struct ccw_device_id dasd_eckd_ids[] = {
Heiko Carstensd2c993d2006-07-12 16:41:55 +020064 { CCW_DEVICE_DEVTYPE (0x3990, 0, 0x3390, 0), .driver_info = 0x1},
65 { CCW_DEVICE_DEVTYPE (0x2105, 0, 0x3390, 0), .driver_info = 0x2},
66 { CCW_DEVICE_DEVTYPE (0x3880, 0, 0x3390, 0), .driver_info = 0x3},
67 { CCW_DEVICE_DEVTYPE (0x3990, 0, 0x3380, 0), .driver_info = 0x4},
68 { CCW_DEVICE_DEVTYPE (0x2105, 0, 0x3380, 0), .driver_info = 0x5},
69 { CCW_DEVICE_DEVTYPE (0x9343, 0, 0x9345, 0), .driver_info = 0x6},
70 { CCW_DEVICE_DEVTYPE (0x2107, 0, 0x3390, 0), .driver_info = 0x7},
71 { CCW_DEVICE_DEVTYPE (0x2107, 0, 0x3380, 0), .driver_info = 0x8},
72 { CCW_DEVICE_DEVTYPE (0x1750, 0, 0x3390, 0), .driver_info = 0x9},
73 { CCW_DEVICE_DEVTYPE (0x1750, 0, 0x3380, 0), .driver_info = 0xa},
Linus Torvalds1da177e2005-04-16 15:20:36 -070074 { /* end of list */ },
75};
76
77MODULE_DEVICE_TABLE(ccw, dasd_eckd_ids);
78
79static struct ccw_driver dasd_eckd_driver; /* see below */
80
Stefan Weinhubereb6e1992009-12-07 12:51:51 +010081#define INIT_CQR_OK 0
82#define INIT_CQR_UNFORMATTED 1
83#define INIT_CQR_ERROR 2
84
Stefan Weinhuberf932bce2010-08-09 18:12:59 +020085/* emergency request for reserve/release */
86static struct {
87 struct dasd_ccw_req cqr;
88 struct ccw1 ccw;
89 char data[32];
90} *dasd_reserve_req;
91static DEFINE_MUTEX(dasd_reserve_mutex);
92
Stefan Weinhubera4d26c62011-01-05 12:48:03 +010093/* definitions for the path verification worker */
94struct path_verification_work_data {
95 struct work_struct worker;
96 struct dasd_device *device;
97 struct dasd_ccw_req cqr;
98 struct ccw1 ccw;
99 __u8 rcd_buffer[DASD_ECKD_RCD_DATA_SIZE];
100 int isglobal;
101 __u8 tbvpm;
102};
103static struct path_verification_work_data *path_verification_worker;
104static DEFINE_MUTEX(dasd_path_verification_mutex);
Stefan Weinhubereb6e1992009-12-07 12:51:51 +0100105
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106/* initial attempt at a probe function. this can be simplified once
107 * the other detection code is gone */
108static int
109dasd_eckd_probe (struct ccw_device *cdev)
110{
111 int ret;
112
Horst Hummel40545572006-06-29 15:08:18 +0200113 /* set ECKD specific ccw-device options */
Peter Oberparleiter454e1fa2009-12-07 12:51:30 +0100114 ret = ccw_device_set_options(cdev, CCWDEV_ALLOW_FORCE |
115 CCWDEV_DO_PATHGROUP | CCWDEV_DO_MULTIPATH);
Horst Hummel40545572006-06-29 15:08:18 +0200116 if (ret) {
Stefan Haberlandb8ed5dd2009-12-07 12:51:52 +0100117 DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s",
118 "dasd_eckd_probe: could not set "
119 "ccw-device options");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700120 return ret;
Horst Hummel40545572006-06-29 15:08:18 +0200121 }
122 ret = dasd_generic_probe(cdev, &dasd_eckd_discipline);
123 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700124}
125
126static int
127dasd_eckd_set_online(struct ccw_device *cdev)
128{
Horst Hummel40545572006-06-29 15:08:18 +0200129 return dasd_generic_set_online(cdev, &dasd_eckd_discipline);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130}
131
Linus Torvalds1da177e2005-04-16 15:20:36 -0700132static const int sizes_trk0[] = { 28, 148, 84 };
133#define LABEL_SIZE 140
134
135static inline unsigned int
136round_up_multiple(unsigned int no, unsigned int mult)
137{
138 int rem = no % mult;
139 return (rem ? no - rem + mult : no);
140}
141
142static inline unsigned int
143ceil_quot(unsigned int d1, unsigned int d2)
144{
145 return (d1 + (d2 - 1)) / d2;
146}
147
Heiko Carstens4d284ca2007-02-05 21:18:53 +0100148static unsigned int
Linus Torvalds1da177e2005-04-16 15:20:36 -0700149recs_per_track(struct dasd_eckd_characteristics * rdc,
150 unsigned int kl, unsigned int dl)
151{
152 int dn, kn;
153
154 switch (rdc->dev_type) {
155 case 0x3380:
156 if (kl)
157 return 1499 / (15 + 7 + ceil_quot(kl + 12, 32) +
158 ceil_quot(dl + 12, 32));
159 else
160 return 1499 / (15 + ceil_quot(dl + 12, 32));
161 case 0x3390:
162 dn = ceil_quot(dl + 6, 232) + 1;
163 if (kl) {
164 kn = ceil_quot(kl + 6, 232) + 1;
165 return 1729 / (10 + 9 + ceil_quot(kl + 6 * kn, 34) +
166 9 + ceil_quot(dl + 6 * dn, 34));
167 } else
168 return 1729 / (10 + 9 + ceil_quot(dl + 6 * dn, 34));
169 case 0x9345:
170 dn = ceil_quot(dl + 6, 232) + 1;
171 if (kl) {
172 kn = ceil_quot(kl + 6, 232) + 1;
173 return 1420 / (18 + 7 + ceil_quot(kl + 6 * kn, 34) +
174 ceil_quot(dl + 6 * dn, 34));
175 } else
176 return 1420 / (18 + 7 + ceil_quot(dl + 6 * dn, 34));
177 }
178 return 0;
179}
180
Stefan Weinhuberb44b0ab32009-03-26 15:23:47 +0100181static void set_ch_t(struct ch_t *geo, __u32 cyl, __u8 head)
182{
183 geo->cyl = (__u16) cyl;
184 geo->head = cyl >> 16;
185 geo->head <<= 4;
186 geo->head |= head;
187}
188
Heiko Carstens4d284ca2007-02-05 21:18:53 +0100189static int
Linus Torvalds1da177e2005-04-16 15:20:36 -0700190check_XRC (struct ccw1 *de_ccw,
191 struct DE_eckd_data *data,
192 struct dasd_device *device)
193{
194 struct dasd_eckd_private *private;
Martin Schwidefskyd54853e2007-02-05 21:18:19 +0100195 int rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700196
197 private = (struct dasd_eckd_private *) device->private;
Martin Schwidefskyd54853e2007-02-05 21:18:19 +0100198 if (!private->rdc_data.facilities.XRC_supported)
199 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700200
201 /* switch on System Time Stamp - needed for XRC Support */
Martin Schwidefskyd54853e2007-02-05 21:18:19 +0100202 data->ga_extended |= 0x08; /* switch on 'Time Stamp Valid' */
203 data->ga_extended |= 0x02; /* switch on 'Extended Parameter' */
Horst Hummel138c0142006-06-29 14:58:12 +0200204
Martin Schwidefskyd54853e2007-02-05 21:18:19 +0100205 rc = get_sync_clock(&data->ep_sys_time);
206 /* Ignore return code if sync clock is switched off. */
207 if (rc == -ENOSYS || rc == -EACCES)
208 rc = 0;
Horst Hummel138c0142006-06-29 14:58:12 +0200209
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100210 de_ccw->count = sizeof(struct DE_eckd_data);
Martin Schwidefskyd54853e2007-02-05 21:18:19 +0100211 de_ccw->flags |= CCW_FLAG_SLI;
212 return rc;
213}
Horst Hummel138c0142006-06-29 14:58:12 +0200214
Heiko Carstens4d284ca2007-02-05 21:18:53 +0100215static int
Stefan Weinhuberb44b0ab32009-03-26 15:23:47 +0100216define_extent(struct ccw1 *ccw, struct DE_eckd_data *data, unsigned int trk,
217 unsigned int totrk, int cmd, struct dasd_device *device)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700218{
219 struct dasd_eckd_private *private;
Stefan Weinhuberb44b0ab32009-03-26 15:23:47 +0100220 u32 begcyl, endcyl;
221 u16 heads, beghead, endhead;
Martin Schwidefskyd54853e2007-02-05 21:18:19 +0100222 int rc = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700223
224 private = (struct dasd_eckd_private *) device->private;
225
226 ccw->cmd_code = DASD_ECKD_CCW_DEFINE_EXTENT;
227 ccw->flags = 0;
228 ccw->count = 16;
229 ccw->cda = (__u32) __pa(data);
230
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100231 memset(data, 0, sizeof(struct DE_eckd_data));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700232 switch (cmd) {
233 case DASD_ECKD_CCW_READ_HOME_ADDRESS:
234 case DASD_ECKD_CCW_READ_RECORD_ZERO:
235 case DASD_ECKD_CCW_READ:
236 case DASD_ECKD_CCW_READ_MT:
237 case DASD_ECKD_CCW_READ_CKD:
238 case DASD_ECKD_CCW_READ_CKD_MT:
239 case DASD_ECKD_CCW_READ_KD:
240 case DASD_ECKD_CCW_READ_KD_MT:
241 case DASD_ECKD_CCW_READ_COUNT:
242 data->mask.perm = 0x1;
243 data->attributes.operation = private->attrib.operation;
244 break;
245 case DASD_ECKD_CCW_WRITE:
246 case DASD_ECKD_CCW_WRITE_MT:
247 case DASD_ECKD_CCW_WRITE_KD:
248 case DASD_ECKD_CCW_WRITE_KD_MT:
249 data->mask.perm = 0x02;
250 data->attributes.operation = private->attrib.operation;
Martin Schwidefskyd54853e2007-02-05 21:18:19 +0100251 rc = check_XRC (ccw, data, device);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700252 break;
253 case DASD_ECKD_CCW_WRITE_CKD:
254 case DASD_ECKD_CCW_WRITE_CKD_MT:
255 data->attributes.operation = DASD_BYPASS_CACHE;
Martin Schwidefskyd54853e2007-02-05 21:18:19 +0100256 rc = check_XRC (ccw, data, device);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700257 break;
258 case DASD_ECKD_CCW_ERASE:
259 case DASD_ECKD_CCW_WRITE_HOME_ADDRESS:
260 case DASD_ECKD_CCW_WRITE_RECORD_ZERO:
261 data->mask.perm = 0x3;
262 data->mask.auth = 0x1;
263 data->attributes.operation = DASD_BYPASS_CACHE;
Martin Schwidefskyd54853e2007-02-05 21:18:19 +0100264 rc = check_XRC (ccw, data, device);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700265 break;
266 default:
Stefan Haberlandfc19f382009-03-26 15:23:49 +0100267 dev_err(&device->cdev->dev,
268 "0x%x is not a known command\n", cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700269 break;
270 }
271
272 data->attributes.mode = 0x3; /* ECKD */
273
274 if ((private->rdc_data.cu_type == 0x2105 ||
275 private->rdc_data.cu_type == 0x2107 ||
276 private->rdc_data.cu_type == 0x1750)
277 && !(private->uses_cdl && trk < 2))
278 data->ga_extended |= 0x40; /* Regular Data Format Mode */
279
Stefan Weinhuberb44b0ab32009-03-26 15:23:47 +0100280 heads = private->rdc_data.trk_per_cyl;
281 begcyl = trk / heads;
282 beghead = trk % heads;
283 endcyl = totrk / heads;
284 endhead = totrk % heads;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700285
286 /* check for sequential prestage - enhance cylinder range */
287 if (data->attributes.operation == DASD_SEQ_PRESTAGE ||
288 data->attributes.operation == DASD_SEQ_ACCESS) {
Horst Hummel138c0142006-06-29 14:58:12 +0200289
Stefan Weinhuberb44b0ab32009-03-26 15:23:47 +0100290 if (endcyl + private->attrib.nr_cyl < private->real_cyl)
291 endcyl += private->attrib.nr_cyl;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700292 else
Stefan Weinhuberb44b0ab32009-03-26 15:23:47 +0100293 endcyl = (private->real_cyl - 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700294 }
295
Stefan Weinhuberb44b0ab32009-03-26 15:23:47 +0100296 set_ch_t(&data->beg_ext, begcyl, beghead);
297 set_ch_t(&data->end_ext, endcyl, endhead);
Martin Schwidefskyd54853e2007-02-05 21:18:19 +0100298 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700299}
300
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100301static int check_XRC_on_prefix(struct PFX_eckd_data *pfxdata,
302 struct dasd_device *device)
303{
304 struct dasd_eckd_private *private;
305 int rc;
306
307 private = (struct dasd_eckd_private *) device->private;
308 if (!private->rdc_data.facilities.XRC_supported)
309 return 0;
310
311 /* switch on System Time Stamp - needed for XRC Support */
Stefan Weinhuberf3eb5382009-03-26 15:23:48 +0100312 pfxdata->define_extent.ga_extended |= 0x08; /* 'Time Stamp Valid' */
313 pfxdata->define_extent.ga_extended |= 0x02; /* 'Extended Parameter' */
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100314 pfxdata->validity.time_stamp = 1; /* 'Time Stamp Valid' */
315
Stefan Weinhuberf3eb5382009-03-26 15:23:48 +0100316 rc = get_sync_clock(&pfxdata->define_extent.ep_sys_time);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100317 /* Ignore return code if sync clock is switched off. */
318 if (rc == -ENOSYS || rc == -EACCES)
319 rc = 0;
320 return rc;
321}
322
Stefan Weinhuberf3eb5382009-03-26 15:23:48 +0100323static void fill_LRE_data(struct LRE_eckd_data *data, unsigned int trk,
324 unsigned int rec_on_trk, int count, int cmd,
325 struct dasd_device *device, unsigned int reclen,
326 unsigned int tlf)
327{
328 struct dasd_eckd_private *private;
329 int sector;
330 int dn, d;
331
332 private = (struct dasd_eckd_private *) device->private;
333
334 memset(data, 0, sizeof(*data));
335 sector = 0;
336 if (rec_on_trk) {
337 switch (private->rdc_data.dev_type) {
338 case 0x3390:
339 dn = ceil_quot(reclen + 6, 232);
340 d = 9 + ceil_quot(reclen + 6 * (dn + 1), 34);
341 sector = (49 + (rec_on_trk - 1) * (10 + d)) / 8;
342 break;
343 case 0x3380:
344 d = 7 + ceil_quot(reclen + 12, 32);
345 sector = (39 + (rec_on_trk - 1) * (8 + d)) / 7;
346 break;
347 }
348 }
349 data->sector = sector;
350 /* note: meaning of count depends on the operation
351 * for record based I/O it's the number of records, but for
352 * track based I/O it's the number of tracks
353 */
354 data->count = count;
355 switch (cmd) {
356 case DASD_ECKD_CCW_WRITE_HOME_ADDRESS:
357 data->operation.orientation = 0x3;
358 data->operation.operation = 0x03;
359 break;
360 case DASD_ECKD_CCW_READ_HOME_ADDRESS:
361 data->operation.orientation = 0x3;
362 data->operation.operation = 0x16;
363 break;
364 case DASD_ECKD_CCW_WRITE_RECORD_ZERO:
365 data->operation.orientation = 0x1;
366 data->operation.operation = 0x03;
367 data->count++;
368 break;
369 case DASD_ECKD_CCW_READ_RECORD_ZERO:
370 data->operation.orientation = 0x3;
371 data->operation.operation = 0x16;
372 data->count++;
373 break;
374 case DASD_ECKD_CCW_WRITE:
375 case DASD_ECKD_CCW_WRITE_MT:
376 case DASD_ECKD_CCW_WRITE_KD:
377 case DASD_ECKD_CCW_WRITE_KD_MT:
378 data->auxiliary.length_valid = 0x1;
379 data->length = reclen;
380 data->operation.operation = 0x01;
381 break;
382 case DASD_ECKD_CCW_WRITE_CKD:
383 case DASD_ECKD_CCW_WRITE_CKD_MT:
384 data->auxiliary.length_valid = 0x1;
385 data->length = reclen;
386 data->operation.operation = 0x03;
387 break;
388 case DASD_ECKD_CCW_WRITE_TRACK_DATA:
389 data->auxiliary.length_valid = 0x1;
390 data->length = reclen; /* not tlf, as one might think */
391 data->operation.operation = 0x3F;
392 data->extended_operation = 0x23;
393 break;
394 case DASD_ECKD_CCW_READ:
395 case DASD_ECKD_CCW_READ_MT:
396 case DASD_ECKD_CCW_READ_KD:
397 case DASD_ECKD_CCW_READ_KD_MT:
398 data->auxiliary.length_valid = 0x1;
399 data->length = reclen;
400 data->operation.operation = 0x06;
401 break;
402 case DASD_ECKD_CCW_READ_CKD:
403 case DASD_ECKD_CCW_READ_CKD_MT:
404 data->auxiliary.length_valid = 0x1;
405 data->length = reclen;
406 data->operation.operation = 0x16;
407 break;
408 case DASD_ECKD_CCW_READ_COUNT:
409 data->operation.operation = 0x06;
410 break;
411 case DASD_ECKD_CCW_READ_TRACK_DATA:
412 data->auxiliary.length_valid = 0x1;
413 data->length = tlf;
414 data->operation.operation = 0x0C;
415 break;
416 case DASD_ECKD_CCW_ERASE:
417 data->length = reclen;
418 data->auxiliary.length_valid = 0x1;
419 data->operation.operation = 0x0b;
420 break;
421 default:
422 DBF_DEV_EVENT(DBF_ERR, device,
423 "fill LRE unknown opcode 0x%x", cmd);
424 BUG();
425 }
426 set_ch_t(&data->seek_addr,
427 trk / private->rdc_data.trk_per_cyl,
428 trk % private->rdc_data.trk_per_cyl);
429 data->search_arg.cyl = data->seek_addr.cyl;
430 data->search_arg.head = data->seek_addr.head;
431 data->search_arg.record = rec_on_trk;
432}
433
434static int prefix_LRE(struct ccw1 *ccw, struct PFX_eckd_data *pfxdata,
435 unsigned int trk, unsigned int totrk, int cmd,
436 struct dasd_device *basedev, struct dasd_device *startdev,
437 unsigned char format, unsigned int rec_on_trk, int count,
438 unsigned int blksize, unsigned int tlf)
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100439{
440 struct dasd_eckd_private *basepriv, *startpriv;
Stefan Weinhuberf3eb5382009-03-26 15:23:48 +0100441 struct DE_eckd_data *dedata;
442 struct LRE_eckd_data *lredata;
Stefan Weinhuberb44b0ab32009-03-26 15:23:47 +0100443 u32 begcyl, endcyl;
444 u16 heads, beghead, endhead;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100445 int rc = 0;
446
447 basepriv = (struct dasd_eckd_private *) basedev->private;
448 startpriv = (struct dasd_eckd_private *) startdev->private;
Stefan Weinhuberf3eb5382009-03-26 15:23:48 +0100449 dedata = &pfxdata->define_extent;
450 lredata = &pfxdata->locate_record;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100451
452 ccw->cmd_code = DASD_ECKD_CCW_PFX;
453 ccw->flags = 0;
454 ccw->count = sizeof(*pfxdata);
455 ccw->cda = (__u32) __pa(pfxdata);
456
457 memset(pfxdata, 0, sizeof(*pfxdata));
458 /* prefix data */
Stefan Weinhuberf3eb5382009-03-26 15:23:48 +0100459 if (format > 1) {
460 DBF_DEV_EVENT(DBF_ERR, basedev,
461 "PFX LRE unknown format 0x%x", format);
462 BUG();
463 return -EINVAL;
464 }
465 pfxdata->format = format;
Stefan Weinhuber4abb08c2008-08-01 16:39:09 +0200466 pfxdata->base_address = basepriv->ned->unit_addr;
467 pfxdata->base_lss = basepriv->ned->ID;
Stefan Weinhuberf3eb5382009-03-26 15:23:48 +0100468 pfxdata->validity.define_extent = 1;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100469
470 /* private uid is kept up to date, conf_data may be outdated */
471 if (startpriv->uid.type != UA_BASE_DEVICE) {
472 pfxdata->validity.verify_base = 1;
473 if (startpriv->uid.type == UA_HYPER_PAV_ALIAS)
474 pfxdata->validity.hyper_pav = 1;
475 }
476
477 /* define extend data (mostly)*/
478 switch (cmd) {
479 case DASD_ECKD_CCW_READ_HOME_ADDRESS:
480 case DASD_ECKD_CCW_READ_RECORD_ZERO:
481 case DASD_ECKD_CCW_READ:
482 case DASD_ECKD_CCW_READ_MT:
483 case DASD_ECKD_CCW_READ_CKD:
484 case DASD_ECKD_CCW_READ_CKD_MT:
485 case DASD_ECKD_CCW_READ_KD:
486 case DASD_ECKD_CCW_READ_KD_MT:
487 case DASD_ECKD_CCW_READ_COUNT:
Stefan Weinhuberf3eb5382009-03-26 15:23:48 +0100488 dedata->mask.perm = 0x1;
489 dedata->attributes.operation = basepriv->attrib.operation;
490 break;
491 case DASD_ECKD_CCW_READ_TRACK_DATA:
492 dedata->mask.perm = 0x1;
493 dedata->attributes.operation = basepriv->attrib.operation;
494 dedata->blk_size = 0;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100495 break;
496 case DASD_ECKD_CCW_WRITE:
497 case DASD_ECKD_CCW_WRITE_MT:
498 case DASD_ECKD_CCW_WRITE_KD:
499 case DASD_ECKD_CCW_WRITE_KD_MT:
Stefan Weinhuberf3eb5382009-03-26 15:23:48 +0100500 dedata->mask.perm = 0x02;
501 dedata->attributes.operation = basepriv->attrib.operation;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100502 rc = check_XRC_on_prefix(pfxdata, basedev);
503 break;
504 case DASD_ECKD_CCW_WRITE_CKD:
505 case DASD_ECKD_CCW_WRITE_CKD_MT:
Stefan Weinhuberf3eb5382009-03-26 15:23:48 +0100506 dedata->attributes.operation = DASD_BYPASS_CACHE;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100507 rc = check_XRC_on_prefix(pfxdata, basedev);
508 break;
509 case DASD_ECKD_CCW_ERASE:
510 case DASD_ECKD_CCW_WRITE_HOME_ADDRESS:
511 case DASD_ECKD_CCW_WRITE_RECORD_ZERO:
Stefan Weinhuberf3eb5382009-03-26 15:23:48 +0100512 dedata->mask.perm = 0x3;
513 dedata->mask.auth = 0x1;
514 dedata->attributes.operation = DASD_BYPASS_CACHE;
515 rc = check_XRC_on_prefix(pfxdata, basedev);
516 break;
517 case DASD_ECKD_CCW_WRITE_TRACK_DATA:
518 dedata->mask.perm = 0x02;
519 dedata->attributes.operation = basepriv->attrib.operation;
520 dedata->blk_size = blksize;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100521 rc = check_XRC_on_prefix(pfxdata, basedev);
522 break;
523 default:
Stefan Weinhuberf3eb5382009-03-26 15:23:48 +0100524 DBF_DEV_EVENT(DBF_ERR, basedev,
525 "PFX LRE unknown opcode 0x%x", cmd);
526 BUG();
527 return -EINVAL;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100528 }
529
Stefan Weinhuberf3eb5382009-03-26 15:23:48 +0100530 dedata->attributes.mode = 0x3; /* ECKD */
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100531
532 if ((basepriv->rdc_data.cu_type == 0x2105 ||
533 basepriv->rdc_data.cu_type == 0x2107 ||
534 basepriv->rdc_data.cu_type == 0x1750)
535 && !(basepriv->uses_cdl && trk < 2))
Stefan Weinhuberf3eb5382009-03-26 15:23:48 +0100536 dedata->ga_extended |= 0x40; /* Regular Data Format Mode */
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100537
Stefan Weinhuberb44b0ab32009-03-26 15:23:47 +0100538 heads = basepriv->rdc_data.trk_per_cyl;
539 begcyl = trk / heads;
540 beghead = trk % heads;
541 endcyl = totrk / heads;
542 endhead = totrk % heads;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100543
544 /* check for sequential prestage - enhance cylinder range */
Stefan Weinhuberf3eb5382009-03-26 15:23:48 +0100545 if (dedata->attributes.operation == DASD_SEQ_PRESTAGE ||
546 dedata->attributes.operation == DASD_SEQ_ACCESS) {
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100547
Stefan Weinhuberb44b0ab32009-03-26 15:23:47 +0100548 if (endcyl + basepriv->attrib.nr_cyl < basepriv->real_cyl)
549 endcyl += basepriv->attrib.nr_cyl;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100550 else
Stefan Weinhuberb44b0ab32009-03-26 15:23:47 +0100551 endcyl = (basepriv->real_cyl - 1);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100552 }
553
Stefan Weinhuberf3eb5382009-03-26 15:23:48 +0100554 set_ch_t(&dedata->beg_ext, begcyl, beghead);
555 set_ch_t(&dedata->end_ext, endcyl, endhead);
556
557 if (format == 1) {
558 fill_LRE_data(lredata, trk, rec_on_trk, count, cmd,
559 basedev, blksize, tlf);
560 }
561
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100562 return rc;
563}
564
Stefan Weinhuberf3eb5382009-03-26 15:23:48 +0100565static int prefix(struct ccw1 *ccw, struct PFX_eckd_data *pfxdata,
566 unsigned int trk, unsigned int totrk, int cmd,
567 struct dasd_device *basedev, struct dasd_device *startdev)
568{
569 return prefix_LRE(ccw, pfxdata, trk, totrk, cmd, basedev, startdev,
570 0, 0, 0, 0, 0);
571}
572
Heiko Carstens4d284ca2007-02-05 21:18:53 +0100573static void
Stefan Weinhuberb44b0ab32009-03-26 15:23:47 +0100574locate_record(struct ccw1 *ccw, struct LO_eckd_data *data, unsigned int trk,
575 unsigned int rec_on_trk, int no_rec, int cmd,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700576 struct dasd_device * device, int reclen)
577{
578 struct dasd_eckd_private *private;
579 int sector;
580 int dn, d;
Horst Hummel138c0142006-06-29 14:58:12 +0200581
Linus Torvalds1da177e2005-04-16 15:20:36 -0700582 private = (struct dasd_eckd_private *) device->private;
583
584 DBF_DEV_EVENT(DBF_INFO, device,
585 "Locate: trk %d, rec %d, no_rec %d, cmd %d, reclen %d",
586 trk, rec_on_trk, no_rec, cmd, reclen);
587
588 ccw->cmd_code = DASD_ECKD_CCW_LOCATE_RECORD;
589 ccw->flags = 0;
590 ccw->count = 16;
591 ccw->cda = (__u32) __pa(data);
592
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100593 memset(data, 0, sizeof(struct LO_eckd_data));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700594 sector = 0;
595 if (rec_on_trk) {
596 switch (private->rdc_data.dev_type) {
597 case 0x3390:
598 dn = ceil_quot(reclen + 6, 232);
599 d = 9 + ceil_quot(reclen + 6 * (dn + 1), 34);
600 sector = (49 + (rec_on_trk - 1) * (10 + d)) / 8;
601 break;
602 case 0x3380:
603 d = 7 + ceil_quot(reclen + 12, 32);
604 sector = (39 + (rec_on_trk - 1) * (8 + d)) / 7;
605 break;
606 }
607 }
608 data->sector = sector;
609 data->count = no_rec;
610 switch (cmd) {
611 case DASD_ECKD_CCW_WRITE_HOME_ADDRESS:
612 data->operation.orientation = 0x3;
613 data->operation.operation = 0x03;
614 break;
615 case DASD_ECKD_CCW_READ_HOME_ADDRESS:
616 data->operation.orientation = 0x3;
617 data->operation.operation = 0x16;
618 break;
619 case DASD_ECKD_CCW_WRITE_RECORD_ZERO:
620 data->operation.orientation = 0x1;
621 data->operation.operation = 0x03;
622 data->count++;
623 break;
624 case DASD_ECKD_CCW_READ_RECORD_ZERO:
625 data->operation.orientation = 0x3;
626 data->operation.operation = 0x16;
627 data->count++;
628 break;
629 case DASD_ECKD_CCW_WRITE:
630 case DASD_ECKD_CCW_WRITE_MT:
631 case DASD_ECKD_CCW_WRITE_KD:
632 case DASD_ECKD_CCW_WRITE_KD_MT:
633 data->auxiliary.last_bytes_used = 0x1;
634 data->length = reclen;
635 data->operation.operation = 0x01;
636 break;
637 case DASD_ECKD_CCW_WRITE_CKD:
638 case DASD_ECKD_CCW_WRITE_CKD_MT:
639 data->auxiliary.last_bytes_used = 0x1;
640 data->length = reclen;
641 data->operation.operation = 0x03;
642 break;
643 case DASD_ECKD_CCW_READ:
644 case DASD_ECKD_CCW_READ_MT:
645 case DASD_ECKD_CCW_READ_KD:
646 case DASD_ECKD_CCW_READ_KD_MT:
647 data->auxiliary.last_bytes_used = 0x1;
648 data->length = reclen;
649 data->operation.operation = 0x06;
650 break;
651 case DASD_ECKD_CCW_READ_CKD:
652 case DASD_ECKD_CCW_READ_CKD_MT:
653 data->auxiliary.last_bytes_used = 0x1;
654 data->length = reclen;
655 data->operation.operation = 0x16;
656 break;
657 case DASD_ECKD_CCW_READ_COUNT:
658 data->operation.operation = 0x06;
659 break;
660 case DASD_ECKD_CCW_ERASE:
661 data->length = reclen;
662 data->auxiliary.last_bytes_used = 0x1;
663 data->operation.operation = 0x0b;
664 break;
665 default:
Stefan Haberlandfc19f382009-03-26 15:23:49 +0100666 DBF_DEV_EVENT(DBF_ERR, device, "unknown locate record "
667 "opcode 0x%x", cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700668 }
Stefan Weinhuberb44b0ab32009-03-26 15:23:47 +0100669 set_ch_t(&data->seek_addr,
670 trk / private->rdc_data.trk_per_cyl,
671 trk % private->rdc_data.trk_per_cyl);
672 data->search_arg.cyl = data->seek_addr.cyl;
673 data->search_arg.head = data->seek_addr.head;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700674 data->search_arg.record = rec_on_trk;
675}
676
677/*
678 * Returns 1 if the block is one of the special blocks that needs
679 * to get read/written with the KD variant of the command.
680 * That is DASD_ECKD_READ_KD_MT instead of DASD_ECKD_READ_MT and
681 * DASD_ECKD_WRITE_KD_MT instead of DASD_ECKD_WRITE_MT.
682 * Luckily the KD variants differ only by one bit (0x08) from the
683 * normal variant. So don't wonder about code like:
684 * if (dasd_eckd_cdl_special(blk_per_trk, recid))
685 * ccw->cmd_code |= 0x8;
686 */
687static inline int
688dasd_eckd_cdl_special(int blk_per_trk, int recid)
689{
690 if (recid < 3)
691 return 1;
692 if (recid < blk_per_trk)
693 return 0;
694 if (recid < 2 * blk_per_trk)
695 return 1;
696 return 0;
697}
698
699/*
700 * Returns the record size for the special blocks of the cdl format.
701 * Only returns something useful if dasd_eckd_cdl_special is true
702 * for the recid.
703 */
704static inline int
705dasd_eckd_cdl_reclen(int recid)
706{
707 if (recid < 3)
708 return sizes_trk0[recid];
709 return LABEL_SIZE;
710}
711
Horst Hummel3d052592006-04-27 18:40:28 -0700712/*
713 * Generate device unique id that specifies the physical device.
714 */
Stefan Haberland2dedf0d2010-05-17 10:00:11 +0200715static int dasd_eckd_generate_uid(struct dasd_device *device)
Horst Hummel3d052592006-04-27 18:40:28 -0700716{
717 struct dasd_eckd_private *private;
Stefan Haberland2dedf0d2010-05-17 10:00:11 +0200718 struct dasd_uid *uid;
Stefan Weinhuber4abb08c2008-08-01 16:39:09 +0200719 int count;
Stefan Haberland2dedf0d2010-05-17 10:00:11 +0200720 unsigned long flags;
Horst Hummel3d052592006-04-27 18:40:28 -0700721
722 private = (struct dasd_eckd_private *) device->private;
723 if (!private)
724 return -ENODEV;
Stefan Weinhuber4abb08c2008-08-01 16:39:09 +0200725 if (!private->ned || !private->gneq)
Horst Hummel3d052592006-04-27 18:40:28 -0700726 return -ENODEV;
Stefan Haberland2dedf0d2010-05-17 10:00:11 +0200727 uid = &private->uid;
728 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
Horst Hummel3d052592006-04-27 18:40:28 -0700729 memset(uid, 0, sizeof(struct dasd_uid));
Stefan Weinhuber4abb08c2008-08-01 16:39:09 +0200730 memcpy(uid->vendor, private->ned->HDA_manufacturer,
Horst Hummeld0710c72006-08-10 15:45:16 +0200731 sizeof(uid->vendor) - 1);
Horst Hummel3d052592006-04-27 18:40:28 -0700732 EBCASC(uid->vendor, sizeof(uid->vendor) - 1);
Stefan Weinhuber4abb08c2008-08-01 16:39:09 +0200733 memcpy(uid->serial, private->ned->HDA_location,
Horst Hummeld0710c72006-08-10 15:45:16 +0200734 sizeof(uid->serial) - 1);
Horst Hummel3d052592006-04-27 18:40:28 -0700735 EBCASC(uid->serial, sizeof(uid->serial) - 1);
Stefan Weinhuber4abb08c2008-08-01 16:39:09 +0200736 uid->ssid = private->gneq->subsystemID;
Joe Perchesa419aef2009-08-18 11:18:35 -0700737 uid->real_unit_addr = private->ned->unit_addr;
Stefan Weinhuber4abb08c2008-08-01 16:39:09 +0200738 if (private->sneq) {
739 uid->type = private->sneq->sua_flags;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100740 if (uid->type == UA_BASE_PAV_ALIAS)
Stefan Weinhuber4abb08c2008-08-01 16:39:09 +0200741 uid->base_unit_addr = private->sneq->base_unit_addr;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100742 } else {
743 uid->type = UA_BASE_DEVICE;
744 }
Stefan Weinhuber4abb08c2008-08-01 16:39:09 +0200745 if (private->vdsneq) {
746 for (count = 0; count < 16; count++) {
747 sprintf(uid->vduit+2*count, "%02x",
748 private->vdsneq->uit[count]);
749 }
750 }
Stefan Haberland2dedf0d2010-05-17 10:00:11 +0200751 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
Horst Hummel3d052592006-04-27 18:40:28 -0700752 return 0;
753}
754
Stefan Haberland2dedf0d2010-05-17 10:00:11 +0200755static int dasd_eckd_get_uid(struct dasd_device *device, struct dasd_uid *uid)
756{
757 struct dasd_eckd_private *private;
758 unsigned long flags;
759
760 if (device->private) {
761 private = (struct dasd_eckd_private *)device->private;
762 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
763 *uid = private->uid;
764 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
765 return 0;
766 }
767 return -EINVAL;
768}
769
Stefan Weinhubera4d26c62011-01-05 12:48:03 +0100770static void dasd_eckd_fill_rcd_cqr(struct dasd_device *device,
771 struct dasd_ccw_req *cqr,
772 __u8 *rcd_buffer,
773 __u8 lpm)
Cornelia Huck17283b52007-05-04 18:47:51 +0200774{
Cornelia Huck17283b52007-05-04 18:47:51 +0200775 struct ccw1 *ccw;
Stefan Weinhubera4d26c62011-01-05 12:48:03 +0100776 /*
777 * buffer has to start with EBCDIC "V1.0" to show
778 * support for virtual device SNEQ
779 */
780 rcd_buffer[0] = 0xE5;
781 rcd_buffer[1] = 0xF1;
782 rcd_buffer[2] = 0x4B;
783 rcd_buffer[3] = 0xF0;
Cornelia Huck17283b52007-05-04 18:47:51 +0200784
785 ccw = cqr->cpaddr;
Stefan Weinhubera4d26c62011-01-05 12:48:03 +0100786 ccw->cmd_code = DASD_ECKD_CCW_RCD;
787 ccw->flags = 0;
Cornelia Huck17283b52007-05-04 18:47:51 +0200788 ccw->cda = (__u32)(addr_t)rcd_buffer;
Stefan Weinhubera4d26c62011-01-05 12:48:03 +0100789 ccw->count = DASD_ECKD_RCD_DATA_SIZE;
790 cqr->magic = DASD_ECKD_MAGIC;
Cornelia Huck17283b52007-05-04 18:47:51 +0200791
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100792 cqr->startdev = device;
793 cqr->memdev = device;
794 cqr->block = NULL;
Cornelia Huck17283b52007-05-04 18:47:51 +0200795 cqr->expires = 10*HZ;
796 cqr->lpm = lpm;
Stefan Weinhubereb6e1992009-12-07 12:51:51 +0100797 cqr->retries = 256;
Cornelia Huck17283b52007-05-04 18:47:51 +0200798 cqr->buildclk = get_clock();
799 cqr->status = DASD_CQR_FILLED;
Stefan Weinhubera4d26c62011-01-05 12:48:03 +0100800 set_bit(DASD_CQR_VERIFY_PATH, &cqr->flags);
801}
802
803static int dasd_eckd_read_conf_immediately(struct dasd_device *device,
804 struct dasd_ccw_req *cqr,
805 __u8 *rcd_buffer,
806 __u8 lpm)
807{
808 struct ciw *ciw;
809 int rc;
810 /*
811 * sanity check: scan for RCD command in extended SenseID data
812 * some devices do not support RCD
813 */
814 ciw = ccw_device_get_ciw(device->cdev, CIW_TYPE_RCD);
815 if (!ciw || ciw->cmd != DASD_ECKD_CCW_RCD)
816 return -EOPNOTSUPP;
817
818 dasd_eckd_fill_rcd_cqr(device, cqr, rcd_buffer, lpm);
819 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
820 cqr->retries = 5;
821 rc = dasd_sleep_on_immediatly(cqr);
822 return rc;
Cornelia Huck17283b52007-05-04 18:47:51 +0200823}
824
825static int dasd_eckd_read_conf_lpm(struct dasd_device *device,
826 void **rcd_buffer,
827 int *rcd_buffer_size, __u8 lpm)
828{
829 struct ciw *ciw;
830 char *rcd_buf = NULL;
831 int ret;
832 struct dasd_ccw_req *cqr;
833
834 /*
Stefan Weinhubera4d26c62011-01-05 12:48:03 +0100835 * sanity check: scan for RCD command in extended SenseID data
836 * some devices do not support RCD
Cornelia Huck17283b52007-05-04 18:47:51 +0200837 */
838 ciw = ccw_device_get_ciw(device->cdev, CIW_TYPE_RCD);
Stefan Weinhubera4d26c62011-01-05 12:48:03 +0100839 if (!ciw || ciw->cmd != DASD_ECKD_CCW_RCD) {
Cornelia Huck17283b52007-05-04 18:47:51 +0200840 ret = -EOPNOTSUPP;
841 goto out_error;
842 }
Stefan Weinhubera4d26c62011-01-05 12:48:03 +0100843 rcd_buf = kzalloc(DASD_ECKD_RCD_DATA_SIZE, GFP_KERNEL | GFP_DMA);
Cornelia Huck17283b52007-05-04 18:47:51 +0200844 if (!rcd_buf) {
845 ret = -ENOMEM;
846 goto out_error;
847 }
Stefan Weinhubera4d26c62011-01-05 12:48:03 +0100848 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* RCD */,
849 0, /* use rcd_buf as data ara */
850 device);
Cornelia Huck17283b52007-05-04 18:47:51 +0200851 if (IS_ERR(cqr)) {
Stefan Weinhubera4d26c62011-01-05 12:48:03 +0100852 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
853 "Could not allocate RCD request");
854 ret = -ENOMEM;
Cornelia Huck17283b52007-05-04 18:47:51 +0200855 goto out_error;
856 }
Stefan Weinhubera4d26c62011-01-05 12:48:03 +0100857 dasd_eckd_fill_rcd_cqr(device, cqr, rcd_buf, lpm);
Cornelia Huck17283b52007-05-04 18:47:51 +0200858 ret = dasd_sleep_on(cqr);
859 /*
860 * on success we update the user input parms
861 */
Stefan Weinhuber8e09f212008-01-26 14:11:23 +0100862 dasd_sfree_request(cqr, cqr->memdev);
Cornelia Huck17283b52007-05-04 18:47:51 +0200863 if (ret)
864 goto out_error;
865
Stefan Weinhubera4d26c62011-01-05 12:48:03 +0100866 *rcd_buffer_size = DASD_ECKD_RCD_DATA_SIZE;
Cornelia Huck17283b52007-05-04 18:47:51 +0200867 *rcd_buffer = rcd_buf;
868 return 0;
869out_error:
870 kfree(rcd_buf);
871 *rcd_buffer = NULL;
872 *rcd_buffer_size = 0;
873 return ret;
874}
875
Stefan Weinhuber4abb08c2008-08-01 16:39:09 +0200876static int dasd_eckd_identify_conf_parts(struct dasd_eckd_private *private)
877{
878
879 struct dasd_sneq *sneq;
880 int i, count;
881
882 private->ned = NULL;
883 private->sneq = NULL;
884 private->vdsneq = NULL;
885 private->gneq = NULL;
886 count = private->conf_len / sizeof(struct dasd_sneq);
887 sneq = (struct dasd_sneq *)private->conf_data;
888 for (i = 0; i < count; ++i) {
889 if (sneq->flags.identifier == 1 && sneq->format == 1)
890 private->sneq = sneq;
891 else if (sneq->flags.identifier == 1 && sneq->format == 4)
892 private->vdsneq = (struct vd_sneq *)sneq;
893 else if (sneq->flags.identifier == 2)
894 private->gneq = (struct dasd_gneq *)sneq;
895 else if (sneq->flags.identifier == 3 && sneq->res1 == 1)
896 private->ned = (struct dasd_ned *)sneq;
897 sneq++;
898 }
899 if (!private->ned || !private->gneq) {
900 private->ned = NULL;
901 private->sneq = NULL;
902 private->vdsneq = NULL;
903 private->gneq = NULL;
904 return -EINVAL;
905 }
906 return 0;
907
908};
909
910static unsigned char dasd_eckd_path_access(void *conf_data, int conf_len)
911{
912 struct dasd_gneq *gneq;
913 int i, count, found;
914
915 count = conf_len / sizeof(*gneq);
916 gneq = (struct dasd_gneq *)conf_data;
917 found = 0;
918 for (i = 0; i < count; ++i) {
919 if (gneq->flags.identifier == 2) {
920 found = 1;
921 break;
922 }
923 gneq++;
924 }
925 if (found)
926 return ((char *)gneq)[18] & 0x07;
927 else
928 return 0;
929}
930
931static int dasd_eckd_read_conf(struct dasd_device *device)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700932{
933 void *conf_data;
934 int conf_len, conf_data_saved;
935 int rc;
Stefan Weinhubera4d26c62011-01-05 12:48:03 +0100936 __u8 lpm, opm;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700937 struct dasd_eckd_private *private;
Stefan Weinhubera4d26c62011-01-05 12:48:03 +0100938 struct dasd_path *path_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700939
940 private = (struct dasd_eckd_private *) device->private;
Stefan Weinhubera4d26c62011-01-05 12:48:03 +0100941 path_data = &device->path_data;
942 opm = ccw_device_get_path_mask(device->cdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700943 lpm = 0x80;
944 conf_data_saved = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700945 /* get configuration data per operational path */
946 for (lpm = 0x80; lpm; lpm>>= 1) {
Stefan Weinhubera4d26c62011-01-05 12:48:03 +0100947 if (lpm & opm) {
Cornelia Huck17283b52007-05-04 18:47:51 +0200948 rc = dasd_eckd_read_conf_lpm(device, &conf_data,
949 &conf_len, lpm);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700950 if (rc && rc != -EOPNOTSUPP) { /* -EOPNOTSUPP is ok */
Stefan Haberlandb8ed5dd2009-12-07 12:51:52 +0100951 DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
Stefan Haberlandfc19f382009-03-26 15:23:49 +0100952 "Read configuration data returned "
Stefan Haberlandb8ed5dd2009-12-07 12:51:52 +0100953 "error %d", rc);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700954 return rc;
955 }
956 if (conf_data == NULL) {
Stefan Haberlandb8ed5dd2009-12-07 12:51:52 +0100957 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
958 "No configuration data "
959 "retrieved");
Stefan Weinhubera4d26c62011-01-05 12:48:03 +0100960 /* no further analysis possible */
961 path_data->opm |= lpm;
Gabriel Craciunescud133a962007-07-31 00:39:19 -0700962 continue; /* no error */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700963 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700964 /* save first valid configuration data */
Stefan Weinhuber4abb08c2008-08-01 16:39:09 +0200965 if (!conf_data_saved) {
966 kfree(private->conf_data);
967 private->conf_data = conf_data;
968 private->conf_len = conf_len;
969 if (dasd_eckd_identify_conf_parts(private)) {
970 private->conf_data = NULL;
971 private->conf_len = 0;
972 kfree(conf_data);
973 continue;
974 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700975 conf_data_saved++;
976 }
Stefan Weinhuber4abb08c2008-08-01 16:39:09 +0200977 switch (dasd_eckd_path_access(conf_data, conf_len)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700978 case 0x02:
979 path_data->npm |= lpm;
980 break;
981 case 0x03:
982 path_data->ppm |= lpm;
983 break;
984 }
Stefan Weinhubera4d26c62011-01-05 12:48:03 +0100985 path_data->opm |= lpm;
Stefan Weinhuber4abb08c2008-08-01 16:39:09 +0200986 if (conf_data != private->conf_data)
987 kfree(conf_data);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700988 }
989 }
990 return 0;
991}
992
Stefan Weinhubera4d26c62011-01-05 12:48:03 +0100993static int verify_fcx_max_data(struct dasd_device *device, __u8 lpm)
994{
995 struct dasd_eckd_private *private;
996 int mdc;
997 u32 fcx_max_data;
998
999 private = (struct dasd_eckd_private *) device->private;
1000 if (private->fcx_max_data) {
1001 mdc = ccw_device_get_mdc(device->cdev, lpm);
1002 if ((mdc < 0)) {
1003 dev_warn(&device->cdev->dev,
1004 "Detecting the maximum data size for zHPF "
1005 "requests failed (rc=%d) for a new path %x\n",
1006 mdc, lpm);
1007 return mdc;
1008 }
1009 fcx_max_data = mdc * FCX_MAX_DATA_FACTOR;
1010 if (fcx_max_data < private->fcx_max_data) {
1011 dev_warn(&device->cdev->dev,
1012 "The maximum data size for zHPF requests %u "
1013 "on a new path %x is below the active maximum "
1014 "%u\n", fcx_max_data, lpm,
1015 private->fcx_max_data);
1016 return -EACCES;
1017 }
1018 }
1019 return 0;
1020}
1021
1022static void do_path_verification_work(struct work_struct *work)
1023{
1024 struct path_verification_work_data *data;
1025 struct dasd_device *device;
1026 __u8 lpm, opm, npm, ppm, epm;
1027 unsigned long flags;
1028 int rc;
1029
1030 data = container_of(work, struct path_verification_work_data, worker);
1031 device = data->device;
1032
1033 opm = 0;
1034 npm = 0;
1035 ppm = 0;
1036 epm = 0;
1037 for (lpm = 0x80; lpm; lpm >>= 1) {
1038 if (lpm & data->tbvpm) {
1039 memset(data->rcd_buffer, 0, sizeof(data->rcd_buffer));
1040 memset(&data->cqr, 0, sizeof(data->cqr));
1041 data->cqr.cpaddr = &data->ccw;
1042 rc = dasd_eckd_read_conf_immediately(device, &data->cqr,
1043 data->rcd_buffer,
1044 lpm);
1045 if (!rc) {
1046 switch (dasd_eckd_path_access(data->rcd_buffer,
1047 DASD_ECKD_RCD_DATA_SIZE)) {
1048 case 0x02:
1049 npm |= lpm;
1050 break;
1051 case 0x03:
1052 ppm |= lpm;
1053 break;
1054 }
1055 opm |= lpm;
1056 } else if (rc == -EOPNOTSUPP) {
1057 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
1058 "path verification: No configuration "
1059 "data retrieved");
1060 opm |= lpm;
1061 } else if (rc == -EAGAIN) {
1062 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
1063 "path verification: device is stopped,"
1064 " try again later");
1065 epm |= lpm;
1066 } else {
1067 dev_warn(&device->cdev->dev,
1068 "Reading device feature codes failed "
1069 "(rc=%d) for new path %x\n", rc, lpm);
1070 continue;
1071 }
1072 if (verify_fcx_max_data(device, lpm)) {
1073 opm &= ~lpm;
1074 npm &= ~lpm;
1075 ppm &= ~lpm;
1076 }
1077 }
1078 }
1079 /*
1080 * There is a small chance that a path is lost again between
1081 * above path verification and the following modification of
1082 * the device opm mask. We could avoid that race here by using
1083 * yet another path mask, but we rather deal with this unlikely
1084 * situation in dasd_start_IO.
1085 */
1086 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
1087 if (!device->path_data.opm && opm) {
1088 device->path_data.opm = opm;
1089 dasd_generic_path_operational(device);
1090 } else
1091 device->path_data.opm |= opm;
1092 device->path_data.npm |= npm;
1093 device->path_data.ppm |= ppm;
1094 device->path_data.tbvpm |= epm;
1095 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
1096
1097 dasd_put_device(device);
1098 if (data->isglobal)
1099 mutex_unlock(&dasd_path_verification_mutex);
1100 else
1101 kfree(data);
1102}
1103
1104static int dasd_eckd_verify_path(struct dasd_device *device, __u8 lpm)
1105{
1106 struct path_verification_work_data *data;
1107
1108 data = kmalloc(sizeof(*data), GFP_ATOMIC | GFP_DMA);
1109 if (!data) {
1110 if (mutex_trylock(&dasd_path_verification_mutex)) {
1111 data = path_verification_worker;
1112 data->isglobal = 1;
1113 } else
1114 return -ENOMEM;
1115 } else {
1116 memset(data, 0, sizeof(*data));
1117 data->isglobal = 0;
1118 }
1119 INIT_WORK(&data->worker, do_path_verification_work);
1120 dasd_get_device(device);
1121 data->device = device;
1122 data->tbvpm = lpm;
1123 schedule_work(&data->worker);
1124 return 0;
1125}
1126
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001127static int dasd_eckd_read_features(struct dasd_device *device)
1128{
1129 struct dasd_psf_prssd_data *prssdp;
1130 struct dasd_rssd_features *features;
1131 struct dasd_ccw_req *cqr;
1132 struct ccw1 *ccw;
1133 int rc;
1134 struct dasd_eckd_private *private;
1135
1136 private = (struct dasd_eckd_private *) device->private;
Stefan Weinhuber68d1e5f2009-09-22 22:58:52 +02001137 memset(&private->features, 0, sizeof(struct dasd_rssd_features));
Stefan Haberland68b781f2009-09-11 10:28:29 +02001138 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */,
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001139 (sizeof(struct dasd_psf_prssd_data) +
1140 sizeof(struct dasd_rssd_features)),
1141 device);
1142 if (IS_ERR(cqr)) {
Stefan Haberlandb8ed5dd2009-12-07 12:51:52 +01001143 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", "Could not "
1144 "allocate initialization request");
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001145 return PTR_ERR(cqr);
1146 }
1147 cqr->startdev = device;
1148 cqr->memdev = device;
1149 cqr->block = NULL;
Stefan Weinhubereb6e1992009-12-07 12:51:51 +01001150 cqr->retries = 256;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001151 cqr->expires = 10 * HZ;
1152
1153 /* Prepare for Read Subsystem Data */
1154 prssdp = (struct dasd_psf_prssd_data *) cqr->data;
1155 memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data));
1156 prssdp->order = PSF_ORDER_PRSSD;
1157 prssdp->suborder = 0x41; /* Read Feature Codes */
1158 /* all other bytes of prssdp must be zero */
1159
1160 ccw = cqr->cpaddr;
1161 ccw->cmd_code = DASD_ECKD_CCW_PSF;
1162 ccw->count = sizeof(struct dasd_psf_prssd_data);
1163 ccw->flags |= CCW_FLAG_CC;
1164 ccw->cda = (__u32)(addr_t) prssdp;
1165
1166 /* Read Subsystem Data - feature codes */
1167 features = (struct dasd_rssd_features *) (prssdp + 1);
1168 memset(features, 0, sizeof(struct dasd_rssd_features));
1169
1170 ccw++;
1171 ccw->cmd_code = DASD_ECKD_CCW_RSSD;
1172 ccw->count = sizeof(struct dasd_rssd_features);
1173 ccw->cda = (__u32)(addr_t) features;
1174
1175 cqr->buildclk = get_clock();
1176 cqr->status = DASD_CQR_FILLED;
1177 rc = dasd_sleep_on(cqr);
1178 if (rc == 0) {
1179 prssdp = (struct dasd_psf_prssd_data *) cqr->data;
1180 features = (struct dasd_rssd_features *) (prssdp + 1);
1181 memcpy(&private->features, features,
1182 sizeof(struct dasd_rssd_features));
Stefan Weinhuber68d1e5f2009-09-22 22:58:52 +02001183 } else
1184 dev_warn(&device->cdev->dev, "Reading device feature codes"
1185 " failed with rc=%d\n", rc);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001186 dasd_sfree_request(cqr, cqr->memdev);
1187 return rc;
1188}
1189
1190
Horst Hummel3d052592006-04-27 18:40:28 -07001191/*
Horst Hummel40545572006-06-29 15:08:18 +02001192 * Build CP for Perform Subsystem Function - SSC.
1193 */
Stefan Weinhuberf3eb5382009-03-26 15:23:48 +01001194static struct dasd_ccw_req *dasd_eckd_build_psf_ssc(struct dasd_device *device,
1195 int enable_pav)
Horst Hummel40545572006-06-29 15:08:18 +02001196{
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001197 struct dasd_ccw_req *cqr;
1198 struct dasd_psf_ssc_data *psf_ssc_data;
1199 struct ccw1 *ccw;
Horst Hummel40545572006-06-29 15:08:18 +02001200
Stefan Haberland68b781f2009-09-11 10:28:29 +02001201 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ ,
Horst Hummel40545572006-06-29 15:08:18 +02001202 sizeof(struct dasd_psf_ssc_data),
1203 device);
1204
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001205 if (IS_ERR(cqr)) {
Stefan Haberlandfc19f382009-03-26 15:23:49 +01001206 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
Horst Hummel40545572006-06-29 15:08:18 +02001207 "Could not allocate PSF-SSC request");
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001208 return cqr;
1209 }
1210 psf_ssc_data = (struct dasd_psf_ssc_data *)cqr->data;
1211 psf_ssc_data->order = PSF_ORDER_SSC;
Stefan Weinhuber626350b2009-12-07 12:51:50 +01001212 psf_ssc_data->suborder = 0xc0;
Stefan Weinhuberf3eb5382009-03-26 15:23:48 +01001213 if (enable_pav) {
Stefan Weinhuber626350b2009-12-07 12:51:50 +01001214 psf_ssc_data->suborder |= 0x08;
Stefan Weinhuberf3eb5382009-03-26 15:23:48 +01001215 psf_ssc_data->reserved[0] = 0x88;
1216 }
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001217 ccw = cqr->cpaddr;
1218 ccw->cmd_code = DASD_ECKD_CCW_PSF;
1219 ccw->cda = (__u32)(addr_t)psf_ssc_data;
1220 ccw->count = 66;
Horst Hummel40545572006-06-29 15:08:18 +02001221
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001222 cqr->startdev = device;
1223 cqr->memdev = device;
1224 cqr->block = NULL;
Stefan Weinhubereb6e1992009-12-07 12:51:51 +01001225 cqr->retries = 256;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001226 cqr->expires = 10*HZ;
1227 cqr->buildclk = get_clock();
1228 cqr->status = DASD_CQR_FILLED;
1229 return cqr;
Horst Hummel40545572006-06-29 15:08:18 +02001230}
1231
1232/*
1233 * Perform Subsystem Function.
1234 * It is necessary to trigger CIO for channel revalidation since this
1235 * call might change behaviour of DASD devices.
1236 */
1237static int
Stefan Weinhuberf3eb5382009-03-26 15:23:48 +01001238dasd_eckd_psf_ssc(struct dasd_device *device, int enable_pav)
Horst Hummel40545572006-06-29 15:08:18 +02001239{
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001240 struct dasd_ccw_req *cqr;
1241 int rc;
Horst Hummel40545572006-06-29 15:08:18 +02001242
Stefan Weinhuberf3eb5382009-03-26 15:23:48 +01001243 cqr = dasd_eckd_build_psf_ssc(device, enable_pav);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001244 if (IS_ERR(cqr))
1245 return PTR_ERR(cqr);
Horst Hummel40545572006-06-29 15:08:18 +02001246
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001247 rc = dasd_sleep_on(cqr);
1248 if (!rc)
1249 /* trigger CIO to reprobe devices */
1250 css_schedule_reprobe();
1251 dasd_sfree_request(cqr, cqr->memdev);
1252 return rc;
Horst Hummel40545572006-06-29 15:08:18 +02001253}
1254
1255/*
1256 * Valide storage server of current device.
1257 */
Stefan Weinhuberf4ac1d02009-12-07 12:51:53 +01001258static void dasd_eckd_validate_server(struct dasd_device *device)
Horst Hummel40545572006-06-29 15:08:18 +02001259{
1260 int rc;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001261 struct dasd_eckd_private *private;
Stefan Weinhuberf3eb5382009-03-26 15:23:48 +01001262 int enable_pav;
Horst Hummel40545572006-06-29 15:08:18 +02001263
Horst Hummel40545572006-06-29 15:08:18 +02001264 if (dasd_nopav || MACHINE_IS_VM)
Stefan Weinhuberf3eb5382009-03-26 15:23:48 +01001265 enable_pav = 0;
1266 else
1267 enable_pav = 1;
1268 rc = dasd_eckd_psf_ssc(device, enable_pav);
Stefan Weinhubereb6e1992009-12-07 12:51:51 +01001269
Horst Hummel8e79a442006-08-24 13:22:36 +02001270 /* may be requested feature is not available on server,
1271 * therefore just report error and go ahead */
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001272 private = (struct dasd_eckd_private *) device->private;
Stefan Haberlandb8ed5dd2009-12-07 12:51:52 +01001273 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "PSF-SSC for SSID %04x "
1274 "returned rc=%d", private->uid.ssid, rc);
Horst Hummel40545572006-06-29 15:08:18 +02001275}
1276
Stefan Weinhuberef19298b2011-01-05 12:48:02 +01001277static u32 get_fcx_max_data(struct dasd_device *device)
1278{
1279#if defined(CONFIG_64BIT)
1280 int tpm, mdc;
1281 int fcx_in_css, fcx_in_gneq, fcx_in_features;
1282 struct dasd_eckd_private *private;
1283
1284 if (dasd_nofcx)
1285 return 0;
1286 /* is transport mode supported? */
1287 private = (struct dasd_eckd_private *) device->private;
1288 fcx_in_css = css_general_characteristics.fcx;
1289 fcx_in_gneq = private->gneq->reserved2[7] & 0x04;
1290 fcx_in_features = private->features.feature[40] & 0x80;
1291 tpm = fcx_in_css && fcx_in_gneq && fcx_in_features;
1292
1293 if (!tpm)
1294 return 0;
1295
1296 mdc = ccw_device_get_mdc(device->cdev, 0);
1297 if (mdc < 0) {
1298 dev_warn(&device->cdev->dev, "Detecting the maximum supported"
1299 " data size for zHPF requests failed\n");
1300 return 0;
1301 } else
1302 return mdc * FCX_MAX_DATA_FACTOR;
1303#else
1304 return 0;
1305#endif
1306}
1307
Horst Hummel40545572006-06-29 15:08:18 +02001308/*
Horst Hummel3d052592006-04-27 18:40:28 -07001309 * Check device characteristics.
1310 * If the device is accessible using ECKD discipline, the device is enabled.
1311 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001312static int
1313dasd_eckd_check_characteristics(struct dasd_device *device)
1314{
1315 struct dasd_eckd_private *private;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001316 struct dasd_block *block;
Stefan Haberland2dedf0d2010-05-17 10:00:11 +02001317 struct dasd_uid temp_uid;
Stefan Haberland7c8faa82010-08-09 18:13:00 +02001318 int is_known, rc, i;
Stefan Weinhuber33b62a32010-03-08 12:26:24 +01001319 int readonly;
Stefan Haberland7c8faa82010-08-09 18:13:00 +02001320 unsigned long value;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001321
Peter Oberparleiter454e1fa2009-12-07 12:51:30 +01001322 if (!ccw_device_is_pathgroup(device->cdev)) {
1323 dev_warn(&device->cdev->dev,
1324 "A channel path group could not be established\n");
1325 return -EIO;
1326 }
1327 if (!ccw_device_is_multipath(device->cdev)) {
1328 dev_info(&device->cdev->dev,
1329 "The DASD is not operating in multipath mode\n");
1330 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001331 private = (struct dasd_eckd_private *) device->private;
Sebastian Ott92636b12009-06-12 10:26:37 +02001332 if (!private) {
1333 private = kzalloc(sizeof(*private), GFP_KERNEL | GFP_DMA);
1334 if (!private) {
Stefan Haberlandfc19f382009-03-26 15:23:49 +01001335 dev_warn(&device->cdev->dev,
1336 "Allocating memory for private DASD data "
1337 "failed\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001338 return -ENOMEM;
1339 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001340 device->private = (void *) private;
Sebastian Ott92636b12009-06-12 10:26:37 +02001341 } else {
1342 memset(private, 0, sizeof(*private));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001343 }
1344 /* Invalidate status of initial analysis. */
1345 private->init_cqr_status = -1;
1346 /* Set default cache operations. */
1347 private->attrib.operation = DASD_NORMAL_CACHE;
1348 private->attrib.nr_cyl = 0;
1349
Horst Hummel40545572006-06-29 15:08:18 +02001350 /* Read Configuration Data */
1351 rc = dasd_eckd_read_conf(device);
1352 if (rc)
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001353 goto out_err1;
Horst Hummel40545572006-06-29 15:08:18 +02001354
Stefan Haberland7c8faa82010-08-09 18:13:00 +02001355 /* set default timeout */
1356 device->default_expires = DASD_EXPIRES;
1357 if (private->gneq) {
1358 value = 1;
1359 for (i = 0; i < private->gneq->timeout.value; i++)
1360 value = 10 * value;
1361 value = value * private->gneq->timeout.number;
1362 /* do not accept useless values */
1363 if (value != 0 && value <= DASD_EXPIRES_MAX)
1364 device->default_expires = value;
1365 }
1366
Stefan Haberland2dedf0d2010-05-17 10:00:11 +02001367 /* Generate device unique id */
1368 rc = dasd_eckd_generate_uid(device);
Horst Hummel40545572006-06-29 15:08:18 +02001369 if (rc)
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001370 goto out_err1;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001371
Stefan Haberland2dedf0d2010-05-17 10:00:11 +02001372 dasd_eckd_get_uid(device, &temp_uid);
1373 if (temp_uid.type == UA_BASE_DEVICE) {
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001374 block = dasd_alloc_block();
1375 if (IS_ERR(block)) {
Stefan Haberlandb8ed5dd2009-12-07 12:51:52 +01001376 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
1377 "could not allocate dasd "
1378 "block structure");
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001379 rc = PTR_ERR(block);
1380 goto out_err1;
1381 }
1382 device->block = block;
1383 block->base = device;
1384 }
1385
1386 /* register lcu with alias handling, enable PAV if this is a new lcu */
1387 is_known = dasd_alias_make_device_known_to_lcu(device);
1388 if (is_known < 0) {
1389 rc = is_known;
1390 goto out_err2;
1391 }
Stefan Weinhuberf4ac1d02009-12-07 12:51:53 +01001392 /*
Nikanth Karthikesan817f2c82010-09-20 11:44:00 +05301393 * dasd_eckd_validate_server is done on the first device that
Stefan Weinhuberf4ac1d02009-12-07 12:51:53 +01001394 * is found for an LCU. All later other devices have to wait
1395 * for it, so they will read the correct feature codes.
1396 */
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001397 if (!is_known) {
Stefan Weinhuberf4ac1d02009-12-07 12:51:53 +01001398 dasd_eckd_validate_server(device);
1399 dasd_alias_lcu_setup_complete(device);
1400 } else
1401 dasd_alias_wait_for_lcu_setup(device);
1402
1403 /* device may report different configuration data after LCU setup */
1404 rc = dasd_eckd_read_conf(device);
1405 if (rc)
1406 goto out_err3;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001407
1408 /* Read Feature Codes */
Stefan Weinhuber68d1e5f2009-09-22 22:58:52 +02001409 dasd_eckd_read_features(device);
Horst Hummel40545572006-06-29 15:08:18 +02001410
Linus Torvalds1da177e2005-04-16 15:20:36 -07001411 /* Read Device Characteristics */
Stefan Haberland68b781f2009-09-11 10:28:29 +02001412 rc = dasd_generic_read_dev_chars(device, DASD_ECKD_MAGIC,
1413 &private->rdc_data, 64);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001414 if (rc) {
Stefan Haberlandb8ed5dd2009-12-07 12:51:52 +01001415 DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
1416 "Read device characteristic failed, rc=%d", rc);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001417 goto out_err3;
1418 }
Nikanth Karthikesan817f2c82010-09-20 11:44:00 +05301419 /* find the valid cylinder size */
Stefan Weinhuberb44b0ab32009-03-26 15:23:47 +01001420 if (private->rdc_data.no_cyl == LV_COMPAT_CYL &&
1421 private->rdc_data.long_no_cyl)
1422 private->real_cyl = private->rdc_data.long_no_cyl;
1423 else
1424 private->real_cyl = private->rdc_data.no_cyl;
1425
Stefan Weinhuberef19298b2011-01-05 12:48:02 +01001426 private->fcx_max_data = get_fcx_max_data(device);
1427
Stefan Weinhuber33b62a32010-03-08 12:26:24 +01001428 readonly = dasd_device_is_ro(device);
1429 if (readonly)
1430 set_bit(DASD_FLAG_DEVICE_RO, &device->flags);
1431
Stefan Haberlandfc19f382009-03-26 15:23:49 +01001432 dev_info(&device->cdev->dev, "New DASD %04X/%02X (CU %04X/%02X) "
Stefan Weinhuber33b62a32010-03-08 12:26:24 +01001433 "with %d cylinders, %d heads, %d sectors%s\n",
Stefan Haberlandfc19f382009-03-26 15:23:49 +01001434 private->rdc_data.dev_type,
1435 private->rdc_data.dev_model,
1436 private->rdc_data.cu_type,
1437 private->rdc_data.cu_model.model,
Sebastian Ott92636b12009-06-12 10:26:37 +02001438 private->real_cyl,
Stefan Haberlandfc19f382009-03-26 15:23:49 +01001439 private->rdc_data.trk_per_cyl,
Stefan Weinhuber33b62a32010-03-08 12:26:24 +01001440 private->rdc_data.sec_per_trk,
1441 readonly ? ", read-only device" : "");
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001442 return 0;
1443
1444out_err3:
1445 dasd_alias_disconnect_device_from_lcu(device);
1446out_err2:
1447 dasd_free_block(device->block);
1448 device->block = NULL;
1449out_err1:
Stefan Weinhuber4abb08c2008-08-01 16:39:09 +02001450 kfree(private->conf_data);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001451 kfree(device->private);
1452 device->private = NULL;
Horst Hummel3d052592006-04-27 18:40:28 -07001453 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001454}
1455
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001456static void dasd_eckd_uncheck_device(struct dasd_device *device)
1457{
Stefan Weinhuber4abb08c2008-08-01 16:39:09 +02001458 struct dasd_eckd_private *private;
1459
1460 private = (struct dasd_eckd_private *) device->private;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001461 dasd_alias_disconnect_device_from_lcu(device);
Stefan Weinhuber4abb08c2008-08-01 16:39:09 +02001462 private->ned = NULL;
1463 private->sneq = NULL;
1464 private->vdsneq = NULL;
1465 private->gneq = NULL;
1466 private->conf_len = 0;
1467 kfree(private->conf_data);
1468 private->conf_data = NULL;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001469}
1470
Linus Torvalds1da177e2005-04-16 15:20:36 -07001471static struct dasd_ccw_req *
1472dasd_eckd_analysis_ccw(struct dasd_device *device)
1473{
1474 struct dasd_eckd_private *private;
1475 struct eckd_count *count_data;
1476 struct LO_eckd_data *LO_data;
1477 struct dasd_ccw_req *cqr;
1478 struct ccw1 *ccw;
1479 int cplength, datasize;
1480 int i;
1481
1482 private = (struct dasd_eckd_private *) device->private;
1483
1484 cplength = 8;
1485 datasize = sizeof(struct DE_eckd_data) + 2*sizeof(struct LO_eckd_data);
Stefan Haberland68b781f2009-09-11 10:28:29 +02001486 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize, device);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001487 if (IS_ERR(cqr))
1488 return cqr;
1489 ccw = cqr->cpaddr;
1490 /* Define extent for the first 3 tracks. */
1491 define_extent(ccw++, cqr->data, 0, 2,
1492 DASD_ECKD_CCW_READ_COUNT, device);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001493 LO_data = cqr->data + sizeof(struct DE_eckd_data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001494 /* Locate record for the first 4 records on track 0. */
1495 ccw[-1].flags |= CCW_FLAG_CC;
1496 locate_record(ccw++, LO_data++, 0, 0, 4,
1497 DASD_ECKD_CCW_READ_COUNT, device, 0);
1498
1499 count_data = private->count_area;
1500 for (i = 0; i < 4; i++) {
1501 ccw[-1].flags |= CCW_FLAG_CC;
1502 ccw->cmd_code = DASD_ECKD_CCW_READ_COUNT;
1503 ccw->flags = 0;
1504 ccw->count = 8;
1505 ccw->cda = (__u32)(addr_t) count_data;
1506 ccw++;
1507 count_data++;
1508 }
1509
1510 /* Locate record for the first record on track 2. */
1511 ccw[-1].flags |= CCW_FLAG_CC;
1512 locate_record(ccw++, LO_data++, 2, 0, 1,
1513 DASD_ECKD_CCW_READ_COUNT, device, 0);
1514 /* Read count ccw. */
1515 ccw[-1].flags |= CCW_FLAG_CC;
1516 ccw->cmd_code = DASD_ECKD_CCW_READ_COUNT;
1517 ccw->flags = 0;
1518 ccw->count = 8;
1519 ccw->cda = (__u32)(addr_t) count_data;
1520
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001521 cqr->block = NULL;
1522 cqr->startdev = device;
1523 cqr->memdev = device;
Stefan Weinhubereb6e1992009-12-07 12:51:51 +01001524 cqr->retries = 255;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001525 cqr->buildclk = get_clock();
1526 cqr->status = DASD_CQR_FILLED;
1527 return cqr;
1528}
1529
Stefan Weinhubereb6e1992009-12-07 12:51:51 +01001530/* differentiate between 'no record found' and any other error */
1531static int dasd_eckd_analysis_evaluation(struct dasd_ccw_req *init_cqr)
1532{
1533 char *sense;
1534 if (init_cqr->status == DASD_CQR_DONE)
1535 return INIT_CQR_OK;
1536 else if (init_cqr->status == DASD_CQR_NEED_ERP ||
1537 init_cqr->status == DASD_CQR_FAILED) {
1538 sense = dasd_get_sense(&init_cqr->irb);
1539 if (sense && (sense[1] & SNS1_NO_REC_FOUND))
1540 return INIT_CQR_UNFORMATTED;
1541 else
1542 return INIT_CQR_ERROR;
1543 } else
1544 return INIT_CQR_ERROR;
1545}
1546
Linus Torvalds1da177e2005-04-16 15:20:36 -07001547/*
1548 * This is the callback function for the init_analysis cqr. It saves
1549 * the status of the initial analysis ccw before it frees it and kicks
1550 * the device to continue the startup sequence. This will call
1551 * dasd_eckd_do_analysis again (if the devices has not been marked
1552 * for deletion in the meantime).
1553 */
Stefan Weinhubereb6e1992009-12-07 12:51:51 +01001554static void dasd_eckd_analysis_callback(struct dasd_ccw_req *init_cqr,
1555 void *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001556{
1557 struct dasd_eckd_private *private;
1558 struct dasd_device *device;
1559
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001560 device = init_cqr->startdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001561 private = (struct dasd_eckd_private *) device->private;
Stefan Weinhubereb6e1992009-12-07 12:51:51 +01001562 private->init_cqr_status = dasd_eckd_analysis_evaluation(init_cqr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001563 dasd_sfree_request(init_cqr, device);
1564 dasd_kick_device(device);
1565}
1566
Stefan Weinhubereb6e1992009-12-07 12:51:51 +01001567static int dasd_eckd_start_analysis(struct dasd_block *block)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001568{
1569 struct dasd_eckd_private *private;
1570 struct dasd_ccw_req *init_cqr;
1571
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001572 private = (struct dasd_eckd_private *) block->base->private;
1573 init_cqr = dasd_eckd_analysis_ccw(block->base);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001574 if (IS_ERR(init_cqr))
1575 return PTR_ERR(init_cqr);
1576 init_cqr->callback = dasd_eckd_analysis_callback;
1577 init_cqr->callback_data = NULL;
1578 init_cqr->expires = 5*HZ;
Stefan Weinhubereb6e1992009-12-07 12:51:51 +01001579 /* first try without ERP, so we can later handle unformatted
1580 * devices as special case
1581 */
1582 clear_bit(DASD_CQR_FLAGS_USE_ERP, &init_cqr->flags);
1583 init_cqr->retries = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001584 dasd_add_request_head(init_cqr);
1585 return -EAGAIN;
1586}
1587
Stefan Weinhubereb6e1992009-12-07 12:51:51 +01001588static int dasd_eckd_end_analysis(struct dasd_block *block)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001589{
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001590 struct dasd_device *device;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001591 struct dasd_eckd_private *private;
1592 struct eckd_count *count_area;
1593 unsigned int sb, blk_per_trk;
1594 int status, i;
Stefan Weinhubereb6e1992009-12-07 12:51:51 +01001595 struct dasd_ccw_req *init_cqr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001596
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001597 device = block->base;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001598 private = (struct dasd_eckd_private *) device->private;
1599 status = private->init_cqr_status;
1600 private->init_cqr_status = -1;
Stefan Weinhubereb6e1992009-12-07 12:51:51 +01001601 if (status == INIT_CQR_ERROR) {
1602 /* try again, this time with full ERP */
1603 init_cqr = dasd_eckd_analysis_ccw(device);
1604 dasd_sleep_on(init_cqr);
1605 status = dasd_eckd_analysis_evaluation(init_cqr);
1606 dasd_sfree_request(init_cqr, device);
1607 }
1608
1609 if (status == INIT_CQR_UNFORMATTED) {
1610 dev_warn(&device->cdev->dev, "The DASD is not formatted\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001611 return -EMEDIUMTYPE;
Stefan Weinhubereb6e1992009-12-07 12:51:51 +01001612 } else if (status == INIT_CQR_ERROR) {
1613 dev_err(&device->cdev->dev,
1614 "Detecting the DASD disk layout failed because "
1615 "of an I/O error\n");
1616 return -EIO;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001617 }
1618
1619 private->uses_cdl = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001620 /* Check Track 0 for Compatible Disk Layout */
1621 count_area = NULL;
1622 for (i = 0; i < 3; i++) {
1623 if (private->count_area[i].kl != 4 ||
1624 private->count_area[i].dl != dasd_eckd_cdl_reclen(i) - 4) {
1625 private->uses_cdl = 0;
1626 break;
1627 }
1628 }
1629 if (i == 3)
1630 count_area = &private->count_area[4];
1631
1632 if (private->uses_cdl == 0) {
1633 for (i = 0; i < 5; i++) {
1634 if ((private->count_area[i].kl != 0) ||
1635 (private->count_area[i].dl !=
1636 private->count_area[0].dl))
1637 break;
1638 }
1639 if (i == 5)
1640 count_area = &private->count_area[0];
1641 } else {
1642 if (private->count_area[3].record == 1)
Stefan Haberlandfc19f382009-03-26 15:23:49 +01001643 dev_warn(&device->cdev->dev,
1644 "Track 0 has no records following the VTOC\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001645 }
1646 if (count_area != NULL && count_area->kl == 0) {
1647 /* we found notthing violating our disk layout */
1648 if (dasd_check_blocksize(count_area->dl) == 0)
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001649 block->bp_block = count_area->dl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001650 }
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001651 if (block->bp_block == 0) {
Stefan Haberlandfc19f382009-03-26 15:23:49 +01001652 dev_warn(&device->cdev->dev,
1653 "The disk layout of the DASD is not supported\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001654 return -EMEDIUMTYPE;
1655 }
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001656 block->s2b_shift = 0; /* bits to shift 512 to get a block */
1657 for (sb = 512; sb < block->bp_block; sb = sb << 1)
1658 block->s2b_shift++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001659
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001660 blk_per_trk = recs_per_track(&private->rdc_data, 0, block->bp_block);
Stefan Weinhuberb44b0ab32009-03-26 15:23:47 +01001661 block->blocks = (private->real_cyl *
Linus Torvalds1da177e2005-04-16 15:20:36 -07001662 private->rdc_data.trk_per_cyl *
1663 blk_per_trk);
1664
Stefan Haberlandfc19f382009-03-26 15:23:49 +01001665 dev_info(&device->cdev->dev,
1666 "DASD with %d KB/block, %d KB total size, %d KB/track, "
1667 "%s\n", (block->bp_block >> 10),
1668 ((private->real_cyl *
1669 private->rdc_data.trk_per_cyl *
1670 blk_per_trk * (block->bp_block >> 9)) >> 1),
1671 ((blk_per_trk * block->bp_block) >> 10),
1672 private->uses_cdl ?
1673 "compatible disk layout" : "linux disk layout");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001674
1675 return 0;
1676}
1677
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001678static int dasd_eckd_do_analysis(struct dasd_block *block)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001679{
1680 struct dasd_eckd_private *private;
1681
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001682 private = (struct dasd_eckd_private *) block->base->private;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001683 if (private->init_cqr_status < 0)
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001684 return dasd_eckd_start_analysis(block);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001685 else
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001686 return dasd_eckd_end_analysis(block);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001687}
1688
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001689static int dasd_eckd_ready_to_online(struct dasd_device *device)
1690{
1691 return dasd_alias_add_device(device);
1692};
1693
1694static int dasd_eckd_online_to_ready(struct dasd_device *device)
1695{
Stefan Haberland501183f2010-05-17 10:00:10 +02001696 cancel_work_sync(&device->reload_device);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001697 return dasd_alias_remove_device(device);
1698};
1699
Linus Torvalds1da177e2005-04-16 15:20:36 -07001700static int
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001701dasd_eckd_fill_geometry(struct dasd_block *block, struct hd_geometry *geo)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001702{
1703 struct dasd_eckd_private *private;
1704
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001705 private = (struct dasd_eckd_private *) block->base->private;
1706 if (dasd_check_blocksize(block->bp_block) == 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001707 geo->sectors = recs_per_track(&private->rdc_data,
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001708 0, block->bp_block);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001709 }
1710 geo->cylinders = private->rdc_data.no_cyl;
1711 geo->heads = private->rdc_data.trk_per_cyl;
1712 return 0;
1713}
1714
1715static struct dasd_ccw_req *
1716dasd_eckd_format_device(struct dasd_device * device,
1717 struct format_data_t * fdata)
1718{
1719 struct dasd_eckd_private *private;
1720 struct dasd_ccw_req *fcp;
1721 struct eckd_count *ect;
1722 struct ccw1 *ccw;
1723 void *data;
Stefan Weinhuberb44b0ab32009-03-26 15:23:47 +01001724 int rpt;
1725 struct ch_t address;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001726 int cplength, datasize;
1727 int i;
Jean-Baptiste Joretf9a28f72009-03-26 15:23:46 +01001728 int intensity = 0;
1729 int r0_perm;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001730
1731 private = (struct dasd_eckd_private *) device->private;
1732 rpt = recs_per_track(&private->rdc_data, 0, fdata->blksize);
Stefan Weinhuberb44b0ab32009-03-26 15:23:47 +01001733 set_ch_t(&address,
1734 fdata->start_unit / private->rdc_data.trk_per_cyl,
1735 fdata->start_unit % private->rdc_data.trk_per_cyl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001736
1737 /* Sanity checks. */
1738 if (fdata->start_unit >=
Stefan Weinhuberb44b0ab32009-03-26 15:23:47 +01001739 (private->real_cyl * private->rdc_data.trk_per_cyl)) {
Stefan Haberlandfc19f382009-03-26 15:23:49 +01001740 dev_warn(&device->cdev->dev, "Start track number %d used in "
1741 "formatting is too big\n", fdata->start_unit);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001742 return ERR_PTR(-EINVAL);
1743 }
1744 if (fdata->start_unit > fdata->stop_unit) {
Stefan Haberlandfc19f382009-03-26 15:23:49 +01001745 dev_warn(&device->cdev->dev, "Start track %d used in "
1746 "formatting exceeds end track\n", fdata->start_unit);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001747 return ERR_PTR(-EINVAL);
1748 }
1749 if (dasd_check_blocksize(fdata->blksize) != 0) {
Stefan Haberlandfc19f382009-03-26 15:23:49 +01001750 dev_warn(&device->cdev->dev,
1751 "The DASD cannot be formatted with block size %d\n",
1752 fdata->blksize);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001753 return ERR_PTR(-EINVAL);
1754 }
1755
1756 /*
1757 * fdata->intensity is a bit string that tells us what to do:
1758 * Bit 0: write record zero
1759 * Bit 1: write home address, currently not supported
1760 * Bit 2: invalidate tracks
1761 * Bit 3: use OS/390 compatible disk layout (cdl)
Jean-Baptiste Joretf9a28f72009-03-26 15:23:46 +01001762 * Bit 4: do not allow storage subsystem to modify record zero
Linus Torvalds1da177e2005-04-16 15:20:36 -07001763 * Only some bit combinations do make sense.
1764 */
Jean-Baptiste Joretf9a28f72009-03-26 15:23:46 +01001765 if (fdata->intensity & 0x10) {
1766 r0_perm = 0;
1767 intensity = fdata->intensity & ~0x10;
1768 } else {
1769 r0_perm = 1;
1770 intensity = fdata->intensity;
1771 }
1772 switch (intensity) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001773 case 0x00: /* Normal format */
1774 case 0x08: /* Normal format, use cdl. */
1775 cplength = 2 + rpt;
1776 datasize = sizeof(struct DE_eckd_data) +
1777 sizeof(struct LO_eckd_data) +
1778 rpt * sizeof(struct eckd_count);
1779 break;
1780 case 0x01: /* Write record zero and format track. */
1781 case 0x09: /* Write record zero and format track, use cdl. */
1782 cplength = 3 + rpt;
1783 datasize = sizeof(struct DE_eckd_data) +
1784 sizeof(struct LO_eckd_data) +
1785 sizeof(struct eckd_count) +
1786 rpt * sizeof(struct eckd_count);
1787 break;
1788 case 0x04: /* Invalidate track. */
1789 case 0x0c: /* Invalidate track, use cdl. */
1790 cplength = 3;
1791 datasize = sizeof(struct DE_eckd_data) +
1792 sizeof(struct LO_eckd_data) +
1793 sizeof(struct eckd_count);
1794 break;
1795 default:
Stefan Haberlandfc19f382009-03-26 15:23:49 +01001796 dev_warn(&device->cdev->dev, "An I/O control call used "
1797 "incorrect flags 0x%x\n", fdata->intensity);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001798 return ERR_PTR(-EINVAL);
1799 }
1800 /* Allocate the format ccw request. */
Stefan Haberland68b781f2009-09-11 10:28:29 +02001801 fcp = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize, device);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001802 if (IS_ERR(fcp))
1803 return fcp;
1804
1805 data = fcp->data;
1806 ccw = fcp->cpaddr;
1807
Jean-Baptiste Joretf9a28f72009-03-26 15:23:46 +01001808 switch (intensity & ~0x08) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001809 case 0x00: /* Normal format. */
1810 define_extent(ccw++, (struct DE_eckd_data *) data,
1811 fdata->start_unit, fdata->start_unit,
1812 DASD_ECKD_CCW_WRITE_CKD, device);
Jean-Baptiste Joretf9a28f72009-03-26 15:23:46 +01001813 /* grant subsystem permission to format R0 */
1814 if (r0_perm)
1815 ((struct DE_eckd_data *)data)->ga_extended |= 0x04;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001816 data += sizeof(struct DE_eckd_data);
1817 ccw[-1].flags |= CCW_FLAG_CC;
1818 locate_record(ccw++, (struct LO_eckd_data *) data,
1819 fdata->start_unit, 0, rpt,
1820 DASD_ECKD_CCW_WRITE_CKD, device,
1821 fdata->blksize);
1822 data += sizeof(struct LO_eckd_data);
1823 break;
1824 case 0x01: /* Write record zero + format track. */
1825 define_extent(ccw++, (struct DE_eckd_data *) data,
1826 fdata->start_unit, fdata->start_unit,
1827 DASD_ECKD_CCW_WRITE_RECORD_ZERO,
1828 device);
1829 data += sizeof(struct DE_eckd_data);
1830 ccw[-1].flags |= CCW_FLAG_CC;
1831 locate_record(ccw++, (struct LO_eckd_data *) data,
1832 fdata->start_unit, 0, rpt + 1,
1833 DASD_ECKD_CCW_WRITE_RECORD_ZERO, device,
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001834 device->block->bp_block);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001835 data += sizeof(struct LO_eckd_data);
1836 break;
1837 case 0x04: /* Invalidate track. */
1838 define_extent(ccw++, (struct DE_eckd_data *) data,
1839 fdata->start_unit, fdata->start_unit,
1840 DASD_ECKD_CCW_WRITE_CKD, device);
1841 data += sizeof(struct DE_eckd_data);
1842 ccw[-1].flags |= CCW_FLAG_CC;
1843 locate_record(ccw++, (struct LO_eckd_data *) data,
1844 fdata->start_unit, 0, 1,
1845 DASD_ECKD_CCW_WRITE_CKD, device, 8);
1846 data += sizeof(struct LO_eckd_data);
1847 break;
1848 }
Jean-Baptiste Joretf9a28f72009-03-26 15:23:46 +01001849 if (intensity & 0x01) { /* write record zero */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001850 ect = (struct eckd_count *) data;
1851 data += sizeof(struct eckd_count);
Stefan Weinhuberb44b0ab32009-03-26 15:23:47 +01001852 ect->cyl = address.cyl;
1853 ect->head = address.head;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001854 ect->record = 0;
1855 ect->kl = 0;
1856 ect->dl = 8;
1857 ccw[-1].flags |= CCW_FLAG_CC;
1858 ccw->cmd_code = DASD_ECKD_CCW_WRITE_RECORD_ZERO;
1859 ccw->flags = CCW_FLAG_SLI;
1860 ccw->count = 8;
1861 ccw->cda = (__u32)(addr_t) ect;
1862 ccw++;
1863 }
Jean-Baptiste Joretf9a28f72009-03-26 15:23:46 +01001864 if ((intensity & ~0x08) & 0x04) { /* erase track */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001865 ect = (struct eckd_count *) data;
1866 data += sizeof(struct eckd_count);
Stefan Weinhuberb44b0ab32009-03-26 15:23:47 +01001867 ect->cyl = address.cyl;
1868 ect->head = address.head;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001869 ect->record = 1;
1870 ect->kl = 0;
1871 ect->dl = 0;
1872 ccw[-1].flags |= CCW_FLAG_CC;
1873 ccw->cmd_code = DASD_ECKD_CCW_WRITE_CKD;
1874 ccw->flags = CCW_FLAG_SLI;
1875 ccw->count = 8;
1876 ccw->cda = (__u32)(addr_t) ect;
1877 } else { /* write remaining records */
1878 for (i = 0; i < rpt; i++) {
1879 ect = (struct eckd_count *) data;
1880 data += sizeof(struct eckd_count);
Stefan Weinhuberb44b0ab32009-03-26 15:23:47 +01001881 ect->cyl = address.cyl;
1882 ect->head = address.head;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001883 ect->record = i + 1;
1884 ect->kl = 0;
1885 ect->dl = fdata->blksize;
1886 /* Check for special tracks 0-1 when formatting CDL */
Jean-Baptiste Joretf9a28f72009-03-26 15:23:46 +01001887 if ((intensity & 0x08) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07001888 fdata->start_unit == 0) {
1889 if (i < 3) {
1890 ect->kl = 4;
1891 ect->dl = sizes_trk0[i] - 4;
Horst Hummel138c0142006-06-29 14:58:12 +02001892 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001893 }
Jean-Baptiste Joretf9a28f72009-03-26 15:23:46 +01001894 if ((intensity & 0x08) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07001895 fdata->start_unit == 1) {
1896 ect->kl = 44;
1897 ect->dl = LABEL_SIZE - 44;
1898 }
1899 ccw[-1].flags |= CCW_FLAG_CC;
1900 ccw->cmd_code = DASD_ECKD_CCW_WRITE_CKD;
1901 ccw->flags = CCW_FLAG_SLI;
1902 ccw->count = 8;
1903 ccw->cda = (__u32)(addr_t) ect;
1904 ccw++;
1905 }
1906 }
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001907 fcp->startdev = device;
1908 fcp->memdev = device;
Stefan Weinhubereb6e1992009-12-07 12:51:51 +01001909 fcp->retries = 256;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001910 fcp->buildclk = get_clock();
1911 fcp->status = DASD_CQR_FILLED;
1912 return fcp;
1913}
1914
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001915static void dasd_eckd_handle_terminated_request(struct dasd_ccw_req *cqr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001916{
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001917 cqr->status = DASD_CQR_FILLED;
1918 if (cqr->block && (cqr->startdev != cqr->block->base)) {
1919 dasd_eckd_reset_ccw_to_base_io(cqr);
1920 cqr->startdev = cqr->block->base;
Stefan Weinhubera4d26c62011-01-05 12:48:03 +01001921 cqr->lpm = cqr->block->base->path_data.opm;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001922 }
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001923};
Linus Torvalds1da177e2005-04-16 15:20:36 -07001924
1925static dasd_erp_fn_t
1926dasd_eckd_erp_action(struct dasd_ccw_req * cqr)
1927{
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001928 struct dasd_device *device = (struct dasd_device *) cqr->startdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001929 struct ccw_device *cdev = device->cdev;
1930
1931 switch (cdev->id.cu_type) {
1932 case 0x3990:
1933 case 0x2105:
1934 case 0x2107:
1935 case 0x1750:
1936 return dasd_3990_erp_action;
1937 case 0x9343:
1938 case 0x3880:
1939 default:
1940 return dasd_default_erp_action;
1941 }
1942}
1943
1944static dasd_erp_fn_t
1945dasd_eckd_erp_postaction(struct dasd_ccw_req * cqr)
1946{
1947 return dasd_default_erp_postaction;
1948}
1949
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001950
1951static void dasd_eckd_handle_unsolicited_interrupt(struct dasd_device *device,
1952 struct irb *irb)
1953{
1954 char mask;
Stefan Weinhuberf3eb5382009-03-26 15:23:48 +01001955 char *sense = NULL;
Stefan Haberland501183f2010-05-17 10:00:10 +02001956 struct dasd_eckd_private *private;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001957
Stefan Haberland501183f2010-05-17 10:00:10 +02001958 private = (struct dasd_eckd_private *) device->private;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001959 /* first of all check for state change pending interrupt */
1960 mask = DEV_STAT_ATTENTION | DEV_STAT_DEV_END | DEV_STAT_UNIT_EXCEP;
Stefan Weinhuberf3eb5382009-03-26 15:23:48 +01001961 if ((scsw_dstat(&irb->scsw) & mask) == mask) {
Stefan Haberland501183f2010-05-17 10:00:10 +02001962 /* for alias only and not in offline processing*/
1963 if (!device->block && private->lcu &&
1964 !test_bit(DASD_FLAG_OFFLINE, &device->flags)) {
1965 /*
1966 * the state change could be caused by an alias
1967 * reassignment remove device from alias handling
1968 * to prevent new requests from being scheduled on
1969 * the wrong alias device
1970 */
1971 dasd_alias_remove_device(device);
1972
1973 /* schedule worker to reload device */
1974 dasd_reload_device(device);
1975 }
1976
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001977 dasd_generic_handle_state_change(device);
1978 return;
1979 }
1980
1981 /* summary unit check */
Stefan Weinhubera5a00612010-10-25 16:10:47 +02001982 sense = dasd_get_sense(irb);
1983 if (sense && (sense[7] == 0x0D) &&
1984 (scsw_dstat(&irb->scsw) & DEV_STAT_UNIT_CHECK)) {
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01001985 dasd_alias_handle_summary_unit_check(device, irb);
1986 return;
1987 }
1988
Stefan Haberlandf60c7682008-04-17 07:46:08 +02001989 /* service information message SIM */
Stefan Weinhuberf3eb5382009-03-26 15:23:48 +01001990 if (sense && !(sense[27] & DASD_SENSE_BIT_0) &&
1991 ((sense[6] & DASD_SIM_SENSE) == DASD_SIM_SENSE)) {
1992 dasd_3990_erp_handle_sim(device, sense);
Stefan Haberland9d853ca2008-07-17 17:16:41 +02001993 dasd_schedule_device_bh(device);
Stefan Haberlandf60c7682008-04-17 07:46:08 +02001994 return;
1995 }
1996
Stefan Weinhubera5a00612010-10-25 16:10:47 +02001997 if ((scsw_cc(&irb->scsw) == 1) && !sense &&
1998 (scsw_fctl(&irb->scsw) == SCSW_FCTL_START_FUNC) &&
1999 (scsw_actl(&irb->scsw) == SCSW_ACTL_START_PEND) &&
2000 (scsw_stctl(&irb->scsw) == SCSW_STCTL_STATUS_PEND)) {
Stefan Haberlandada3df92008-10-10 21:33:23 +02002001 /* fake irb do nothing, they are handled elsewhere */
2002 dasd_schedule_device_bh(device);
2003 return;
2004 }
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01002005
Stefan Haberlandada3df92008-10-10 21:33:23 +02002006 dasd_schedule_device_bh(device);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01002007 return;
2008};
2009
Stefan Weinhuberf3eb5382009-03-26 15:23:48 +01002010
2011static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single(
2012 struct dasd_device *startdev,
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01002013 struct dasd_block *block,
Stefan Weinhuberf3eb5382009-03-26 15:23:48 +01002014 struct request *req,
2015 sector_t first_rec,
2016 sector_t last_rec,
2017 sector_t first_trk,
2018 sector_t last_trk,
2019 unsigned int first_offs,
2020 unsigned int last_offs,
2021 unsigned int blk_per_trk,
2022 unsigned int blksize)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002023{
2024 struct dasd_eckd_private *private;
2025 unsigned long *idaws;
2026 struct LO_eckd_data *LO_data;
2027 struct dasd_ccw_req *cqr;
2028 struct ccw1 *ccw;
NeilBrown5705f702007-09-25 12:35:59 +02002029 struct req_iterator iter;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002030 struct bio_vec *bv;
2031 char *dst;
Stefan Weinhuberf3eb5382009-03-26 15:23:48 +01002032 unsigned int off;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002033 int count, cidaw, cplength, datasize;
Stefan Weinhuberf3eb5382009-03-26 15:23:48 +01002034 sector_t recid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002035 unsigned char cmd, rcmd;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01002036 int use_prefix;
2037 struct dasd_device *basedev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002038
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01002039 basedev = block->base;
2040 private = (struct dasd_eckd_private *) basedev->private;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002041 if (rq_data_dir(req) == READ)
2042 cmd = DASD_ECKD_CCW_READ_MT;
2043 else if (rq_data_dir(req) == WRITE)
2044 cmd = DASD_ECKD_CCW_WRITE_MT;
2045 else
2046 return ERR_PTR(-EINVAL);
Stefan Weinhuberf3eb5382009-03-26 15:23:48 +01002047
Linus Torvalds1da177e2005-04-16 15:20:36 -07002048 /* Check struct bio and count the number of blocks for the request. */
2049 count = 0;
2050 cidaw = 0;
NeilBrown5705f702007-09-25 12:35:59 +02002051 rq_for_each_segment(bv, req, iter) {
Jens Axboe6c92e692007-08-16 13:43:12 +02002052 if (bv->bv_len & (blksize - 1))
2053 /* Eckd can only do full blocks. */
2054 return ERR_PTR(-EINVAL);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01002055 count += bv->bv_len >> (block->s2b_shift + 9);
Martin Schwidefsky347a8dc2006-01-06 00:19:28 -08002056#if defined(CONFIG_64BIT)
Jens Axboe6c92e692007-08-16 13:43:12 +02002057 if (idal_is_needed (page_address(bv->bv_page), bv->bv_len))
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01002058 cidaw += bv->bv_len >> (block->s2b_shift + 9);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002059#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002060 }
2061 /* Paranoia. */
2062 if (count != last_rec - first_rec + 1)
2063 return ERR_PTR(-EINVAL);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01002064
2065 /* use the prefix command if available */
2066 use_prefix = private->features.feature[8] & 0x01;
2067 if (use_prefix) {
2068 /* 1x prefix + number of blocks */
2069 cplength = 2 + count;
2070 /* 1x prefix + cidaws*sizeof(long) */
2071 datasize = sizeof(struct PFX_eckd_data) +
2072 sizeof(struct LO_eckd_data) +
2073 cidaw * sizeof(unsigned long);
2074 } else {
2075 /* 1x define extent + 1x locate record + number of blocks */
2076 cplength = 2 + count;
2077 /* 1x define extent + 1x locate record + cidaws*sizeof(long) */
2078 datasize = sizeof(struct DE_eckd_data) +
2079 sizeof(struct LO_eckd_data) +
2080 cidaw * sizeof(unsigned long);
2081 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002082 /* Find out the number of additional locate record ccws for cdl. */
2083 if (private->uses_cdl && first_rec < 2*blk_per_trk) {
2084 if (last_rec >= 2*blk_per_trk)
2085 count = 2*blk_per_trk - first_rec;
2086 cplength += count;
2087 datasize += count*sizeof(struct LO_eckd_data);
2088 }
2089 /* Allocate the ccw request. */
Stefan Haberland68b781f2009-09-11 10:28:29 +02002090 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize,
2091 startdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002092 if (IS_ERR(cqr))
2093 return cqr;
2094 ccw = cqr->cpaddr;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01002095 /* First ccw is define extent or prefix. */
2096 if (use_prefix) {
2097 if (prefix(ccw++, cqr->data, first_trk,
2098 last_trk, cmd, basedev, startdev) == -EAGAIN) {
2099 /* Clock not in sync and XRC is enabled.
2100 * Try again later.
2101 */
2102 dasd_sfree_request(cqr, startdev);
2103 return ERR_PTR(-EAGAIN);
2104 }
2105 idaws = (unsigned long *) (cqr->data +
2106 sizeof(struct PFX_eckd_data));
2107 } else {
2108 if (define_extent(ccw++, cqr->data, first_trk,
2109 last_trk, cmd, startdev) == -EAGAIN) {
2110 /* Clock not in sync and XRC is enabled.
2111 * Try again later.
2112 */
2113 dasd_sfree_request(cqr, startdev);
2114 return ERR_PTR(-EAGAIN);
2115 }
2116 idaws = (unsigned long *) (cqr->data +
2117 sizeof(struct DE_eckd_data));
Martin Schwidefskyd54853e2007-02-05 21:18:19 +01002118 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002119 /* Build locate_record+read/write/ccws. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002120 LO_data = (struct LO_eckd_data *) (idaws + cidaw);
2121 recid = first_rec;
2122 if (private->uses_cdl == 0 || recid > 2*blk_per_trk) {
2123 /* Only standard blocks so there is just one locate record. */
2124 ccw[-1].flags |= CCW_FLAG_CC;
2125 locate_record(ccw++, LO_data++, first_trk, first_offs + 1,
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01002126 last_rec - recid + 1, cmd, basedev, blksize);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002127 }
NeilBrown5705f702007-09-25 12:35:59 +02002128 rq_for_each_segment(bv, req, iter) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002129 dst = page_address(bv->bv_page) + bv->bv_offset;
2130 if (dasd_page_cache) {
2131 char *copy = kmem_cache_alloc(dasd_page_cache,
Christoph Lameter441e1432006-12-06 20:33:19 -08002132 GFP_DMA | __GFP_NOWARN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002133 if (copy && rq_data_dir(req) == WRITE)
2134 memcpy(copy + bv->bv_offset, dst, bv->bv_len);
2135 if (copy)
2136 dst = copy + bv->bv_offset;
2137 }
2138 for (off = 0; off < bv->bv_len; off += blksize) {
2139 sector_t trkid = recid;
2140 unsigned int recoffs = sector_div(trkid, blk_per_trk);
2141 rcmd = cmd;
2142 count = blksize;
2143 /* Locate record for cdl special block ? */
2144 if (private->uses_cdl && recid < 2*blk_per_trk) {
2145 if (dasd_eckd_cdl_special(blk_per_trk, recid)){
2146 rcmd |= 0x8;
2147 count = dasd_eckd_cdl_reclen(recid);
Horst Hummelec5883a2005-05-01 08:58:59 -07002148 if (count < blksize &&
2149 rq_data_dir(req) == READ)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002150 memset(dst + count, 0xe5,
2151 blksize - count);
2152 }
2153 ccw[-1].flags |= CCW_FLAG_CC;
2154 locate_record(ccw++, LO_data++,
2155 trkid, recoffs + 1,
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01002156 1, rcmd, basedev, count);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002157 }
2158 /* Locate record for standard blocks ? */
2159 if (private->uses_cdl && recid == 2*blk_per_trk) {
2160 ccw[-1].flags |= CCW_FLAG_CC;
2161 locate_record(ccw++, LO_data++,
2162 trkid, recoffs + 1,
2163 last_rec - recid + 1,
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01002164 cmd, basedev, count);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002165 }
2166 /* Read/write ccw. */
2167 ccw[-1].flags |= CCW_FLAG_CC;
2168 ccw->cmd_code = rcmd;
2169 ccw->count = count;
2170 if (idal_is_needed(dst, blksize)) {
2171 ccw->cda = (__u32)(addr_t) idaws;
2172 ccw->flags = CCW_FLAG_IDA;
2173 idaws = idal_create_words(idaws, dst, blksize);
2174 } else {
2175 ccw->cda = (__u32)(addr_t) dst;
2176 ccw->flags = 0;
2177 }
2178 ccw++;
2179 dst += blksize;
2180 recid++;
2181 }
2182 }
Holger Smolinski13de2272009-01-09 12:14:51 +01002183 if (blk_noretry_request(req) ||
2184 block->base->features & DASD_FEATURE_FAILFAST)
Horst Hummel1c01b8a2006-01-06 00:19:15 -08002185 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01002186 cqr->startdev = startdev;
2187 cqr->memdev = startdev;
2188 cqr->block = block;
Stefan Haberland7c8faa82010-08-09 18:13:00 +02002189 cqr->expires = startdev->default_expires * HZ; /* default 5 minutes */
Stefan Weinhubera4d26c62011-01-05 12:48:03 +01002190 cqr->lpm = startdev->path_data.ppm;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002191 cqr->retries = 256;
2192 cqr->buildclk = get_clock();
2193 cqr->status = DASD_CQR_FILLED;
2194 return cqr;
2195}
2196
Stefan Weinhuberf3eb5382009-03-26 15:23:48 +01002197static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_track(
2198 struct dasd_device *startdev,
2199 struct dasd_block *block,
2200 struct request *req,
2201 sector_t first_rec,
2202 sector_t last_rec,
2203 sector_t first_trk,
2204 sector_t last_trk,
2205 unsigned int first_offs,
2206 unsigned int last_offs,
2207 unsigned int blk_per_trk,
2208 unsigned int blksize)
2209{
2210 struct dasd_eckd_private *private;
2211 unsigned long *idaws;
2212 struct dasd_ccw_req *cqr;
2213 struct ccw1 *ccw;
2214 struct req_iterator iter;
2215 struct bio_vec *bv;
2216 char *dst, *idaw_dst;
2217 unsigned int cidaw, cplength, datasize;
2218 unsigned int tlf;
2219 sector_t recid;
2220 unsigned char cmd;
2221 struct dasd_device *basedev;
2222 unsigned int trkcount, count, count_to_trk_end;
2223 unsigned int idaw_len, seg_len, part_len, len_to_track_end;
2224 unsigned char new_track, end_idaw;
2225 sector_t trkid;
2226 unsigned int recoffs;
2227
2228 basedev = block->base;
2229 private = (struct dasd_eckd_private *) basedev->private;
2230 if (rq_data_dir(req) == READ)
2231 cmd = DASD_ECKD_CCW_READ_TRACK_DATA;
2232 else if (rq_data_dir(req) == WRITE)
2233 cmd = DASD_ECKD_CCW_WRITE_TRACK_DATA;
2234 else
2235 return ERR_PTR(-EINVAL);
2236
2237 /* Track based I/O needs IDAWs for each page, and not just for
2238 * 64 bit addresses. We need additional idals for pages
2239 * that get filled from two tracks, so we use the number
2240 * of records as upper limit.
2241 */
2242 cidaw = last_rec - first_rec + 1;
2243 trkcount = last_trk - first_trk + 1;
2244
2245 /* 1x prefix + one read/write ccw per track */
2246 cplength = 1 + trkcount;
2247
2248 /* on 31-bit we need space for two 32 bit addresses per page
2249 * on 64-bit one 64 bit address
2250 */
2251 datasize = sizeof(struct PFX_eckd_data) +
2252 cidaw * sizeof(unsigned long long);
2253
2254 /* Allocate the ccw request. */
Stefan Haberland68b781f2009-09-11 10:28:29 +02002255 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize,
2256 startdev);
Stefan Weinhuberf3eb5382009-03-26 15:23:48 +01002257 if (IS_ERR(cqr))
2258 return cqr;
2259 ccw = cqr->cpaddr;
2260 /* transfer length factor: how many bytes to read from the last track */
2261 if (first_trk == last_trk)
2262 tlf = last_offs - first_offs + 1;
2263 else
2264 tlf = last_offs + 1;
2265 tlf *= blksize;
2266
2267 if (prefix_LRE(ccw++, cqr->data, first_trk,
2268 last_trk, cmd, basedev, startdev,
2269 1 /* format */, first_offs + 1,
2270 trkcount, blksize,
2271 tlf) == -EAGAIN) {
2272 /* Clock not in sync and XRC is enabled.
2273 * Try again later.
2274 */
2275 dasd_sfree_request(cqr, startdev);
2276 return ERR_PTR(-EAGAIN);
2277 }
2278
2279 /*
2280 * The translation of request into ccw programs must meet the
2281 * following conditions:
2282 * - all idaws but the first and the last must address full pages
2283 * (or 2K blocks on 31-bit)
2284 * - the scope of a ccw and it's idal ends with the track boundaries
2285 */
2286 idaws = (unsigned long *) (cqr->data + sizeof(struct PFX_eckd_data));
2287 recid = first_rec;
2288 new_track = 1;
2289 end_idaw = 0;
2290 len_to_track_end = 0;
2291 idaw_dst = 0;
2292 idaw_len = 0;
2293 rq_for_each_segment(bv, req, iter) {
2294 dst = page_address(bv->bv_page) + bv->bv_offset;
2295 seg_len = bv->bv_len;
2296 while (seg_len) {
2297 if (new_track) {
2298 trkid = recid;
2299 recoffs = sector_div(trkid, blk_per_trk);
2300 count_to_trk_end = blk_per_trk - recoffs;
2301 count = min((last_rec - recid + 1),
2302 (sector_t)count_to_trk_end);
2303 len_to_track_end = count * blksize;
2304 ccw[-1].flags |= CCW_FLAG_CC;
2305 ccw->cmd_code = cmd;
2306 ccw->count = len_to_track_end;
2307 ccw->cda = (__u32)(addr_t)idaws;
2308 ccw->flags = CCW_FLAG_IDA;
2309 ccw++;
2310 recid += count;
2311 new_track = 0;
Stefan Weinhuber52db45c2009-04-14 15:36:24 +02002312 /* first idaw for a ccw may start anywhere */
2313 if (!idaw_dst)
2314 idaw_dst = dst;
Stefan Weinhuberf3eb5382009-03-26 15:23:48 +01002315 }
Stefan Weinhuber52db45c2009-04-14 15:36:24 +02002316 /* If we start a new idaw, we must make sure that it
2317 * starts on an IDA_BLOCK_SIZE boundary.
Stefan Weinhuberf3eb5382009-03-26 15:23:48 +01002318 * If we continue an idaw, we must make sure that the
2319 * current segment begins where the so far accumulated
2320 * idaw ends
2321 */
Stefan Weinhuber52db45c2009-04-14 15:36:24 +02002322 if (!idaw_dst) {
2323 if (__pa(dst) & (IDA_BLOCK_SIZE-1)) {
2324 dasd_sfree_request(cqr, startdev);
2325 return ERR_PTR(-ERANGE);
2326 } else
2327 idaw_dst = dst;
2328 }
Stefan Weinhuberf3eb5382009-03-26 15:23:48 +01002329 if ((idaw_dst + idaw_len) != dst) {
2330 dasd_sfree_request(cqr, startdev);
2331 return ERR_PTR(-ERANGE);
2332 }
2333 part_len = min(seg_len, len_to_track_end);
2334 seg_len -= part_len;
2335 dst += part_len;
2336 idaw_len += part_len;
2337 len_to_track_end -= part_len;
2338 /* collected memory area ends on an IDA_BLOCK border,
2339 * -> create an idaw
2340 * idal_create_words will handle cases where idaw_len
2341 * is larger then IDA_BLOCK_SIZE
2342 */
2343 if (!(__pa(idaw_dst + idaw_len) & (IDA_BLOCK_SIZE-1)))
2344 end_idaw = 1;
2345 /* We also need to end the idaw at track end */
2346 if (!len_to_track_end) {
2347 new_track = 1;
2348 end_idaw = 1;
2349 }
2350 if (end_idaw) {
2351 idaws = idal_create_words(idaws, idaw_dst,
2352 idaw_len);
2353 idaw_dst = 0;
2354 idaw_len = 0;
2355 end_idaw = 0;
2356 }
2357 }
2358 }
2359
2360 if (blk_noretry_request(req) ||
2361 block->base->features & DASD_FEATURE_FAILFAST)
2362 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
2363 cqr->startdev = startdev;
2364 cqr->memdev = startdev;
2365 cqr->block = block;
Stefan Haberland7c8faa82010-08-09 18:13:00 +02002366 cqr->expires = startdev->default_expires * HZ; /* default 5 minutes */
Stefan Weinhubera4d26c62011-01-05 12:48:03 +01002367 cqr->lpm = startdev->path_data.ppm;
Stefan Weinhuberf3eb5382009-03-26 15:23:48 +01002368 cqr->retries = 256;
2369 cqr->buildclk = get_clock();
2370 cqr->status = DASD_CQR_FILLED;
2371 return cqr;
2372}
2373
2374static int prepare_itcw(struct itcw *itcw,
2375 unsigned int trk, unsigned int totrk, int cmd,
2376 struct dasd_device *basedev,
2377 struct dasd_device *startdev,
2378 unsigned int rec_on_trk, int count,
2379 unsigned int blksize,
2380 unsigned int total_data_size,
2381 unsigned int tlf,
2382 unsigned int blk_per_trk)
2383{
2384 struct PFX_eckd_data pfxdata;
2385 struct dasd_eckd_private *basepriv, *startpriv;
2386 struct DE_eckd_data *dedata;
2387 struct LRE_eckd_data *lredata;
2388 struct dcw *dcw;
2389
2390 u32 begcyl, endcyl;
2391 u16 heads, beghead, endhead;
2392 u8 pfx_cmd;
2393
2394 int rc = 0;
2395 int sector = 0;
2396 int dn, d;
2397
2398
2399 /* setup prefix data */
2400 basepriv = (struct dasd_eckd_private *) basedev->private;
2401 startpriv = (struct dasd_eckd_private *) startdev->private;
2402 dedata = &pfxdata.define_extent;
2403 lredata = &pfxdata.locate_record;
2404
2405 memset(&pfxdata, 0, sizeof(pfxdata));
2406 pfxdata.format = 1; /* PFX with LRE */
2407 pfxdata.base_address = basepriv->ned->unit_addr;
2408 pfxdata.base_lss = basepriv->ned->ID;
2409 pfxdata.validity.define_extent = 1;
2410
2411 /* private uid is kept up to date, conf_data may be outdated */
2412 if (startpriv->uid.type != UA_BASE_DEVICE) {
2413 pfxdata.validity.verify_base = 1;
2414 if (startpriv->uid.type == UA_HYPER_PAV_ALIAS)
2415 pfxdata.validity.hyper_pav = 1;
2416 }
2417
2418 switch (cmd) {
2419 case DASD_ECKD_CCW_READ_TRACK_DATA:
2420 dedata->mask.perm = 0x1;
2421 dedata->attributes.operation = basepriv->attrib.operation;
2422 dedata->blk_size = blksize;
2423 dedata->ga_extended |= 0x42;
2424 lredata->operation.orientation = 0x0;
2425 lredata->operation.operation = 0x0C;
2426 lredata->auxiliary.check_bytes = 0x01;
2427 pfx_cmd = DASD_ECKD_CCW_PFX_READ;
2428 break;
2429 case DASD_ECKD_CCW_WRITE_TRACK_DATA:
2430 dedata->mask.perm = 0x02;
2431 dedata->attributes.operation = basepriv->attrib.operation;
2432 dedata->blk_size = blksize;
2433 rc = check_XRC_on_prefix(&pfxdata, basedev);
2434 dedata->ga_extended |= 0x42;
2435 lredata->operation.orientation = 0x0;
2436 lredata->operation.operation = 0x3F;
2437 lredata->extended_operation = 0x23;
2438 lredata->auxiliary.check_bytes = 0x2;
2439 pfx_cmd = DASD_ECKD_CCW_PFX;
2440 break;
2441 default:
2442 DBF_DEV_EVENT(DBF_ERR, basedev,
2443 "prepare itcw, unknown opcode 0x%x", cmd);
2444 BUG();
2445 break;
2446 }
2447 if (rc)
2448 return rc;
2449
2450 dedata->attributes.mode = 0x3; /* ECKD */
2451
2452 heads = basepriv->rdc_data.trk_per_cyl;
2453 begcyl = trk / heads;
2454 beghead = trk % heads;
2455 endcyl = totrk / heads;
2456 endhead = totrk % heads;
2457
2458 /* check for sequential prestage - enhance cylinder range */
2459 if (dedata->attributes.operation == DASD_SEQ_PRESTAGE ||
2460 dedata->attributes.operation == DASD_SEQ_ACCESS) {
2461
2462 if (endcyl + basepriv->attrib.nr_cyl < basepriv->real_cyl)
2463 endcyl += basepriv->attrib.nr_cyl;
2464 else
2465 endcyl = (basepriv->real_cyl - 1);
2466 }
2467
2468 set_ch_t(&dedata->beg_ext, begcyl, beghead);
2469 set_ch_t(&dedata->end_ext, endcyl, endhead);
2470
2471 dedata->ep_format = 0x20; /* records per track is valid */
2472 dedata->ep_rec_per_track = blk_per_trk;
2473
2474 if (rec_on_trk) {
2475 switch (basepriv->rdc_data.dev_type) {
2476 case 0x3390:
2477 dn = ceil_quot(blksize + 6, 232);
2478 d = 9 + ceil_quot(blksize + 6 * (dn + 1), 34);
2479 sector = (49 + (rec_on_trk - 1) * (10 + d)) / 8;
2480 break;
2481 case 0x3380:
2482 d = 7 + ceil_quot(blksize + 12, 32);
2483 sector = (39 + (rec_on_trk - 1) * (8 + d)) / 7;
2484 break;
2485 }
2486 }
2487
2488 lredata->auxiliary.length_valid = 1;
2489 lredata->auxiliary.length_scope = 1;
2490 lredata->auxiliary.imbedded_ccw_valid = 1;
2491 lredata->length = tlf;
2492 lredata->imbedded_ccw = cmd;
2493 lredata->count = count;
2494 lredata->sector = sector;
2495 set_ch_t(&lredata->seek_addr, begcyl, beghead);
2496 lredata->search_arg.cyl = lredata->seek_addr.cyl;
2497 lredata->search_arg.head = lredata->seek_addr.head;
2498 lredata->search_arg.record = rec_on_trk;
2499
2500 dcw = itcw_add_dcw(itcw, pfx_cmd, 0,
2501 &pfxdata, sizeof(pfxdata), total_data_size);
2502
2503 return rc;
2504}
2505
2506static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track(
2507 struct dasd_device *startdev,
2508 struct dasd_block *block,
2509 struct request *req,
2510 sector_t first_rec,
2511 sector_t last_rec,
2512 sector_t first_trk,
2513 sector_t last_trk,
2514 unsigned int first_offs,
2515 unsigned int last_offs,
2516 unsigned int blk_per_trk,
2517 unsigned int blksize)
2518{
2519 struct dasd_eckd_private *private;
2520 struct dasd_ccw_req *cqr;
2521 struct req_iterator iter;
2522 struct bio_vec *bv;
2523 char *dst;
2524 unsigned int trkcount, ctidaw;
2525 unsigned char cmd;
2526 struct dasd_device *basedev;
2527 unsigned int tlf;
2528 struct itcw *itcw;
2529 struct tidaw *last_tidaw = NULL;
2530 int itcw_op;
2531 size_t itcw_size;
Stefan Weinhuberef19298b2011-01-05 12:48:02 +01002532 u8 tidaw_flags;
2533 unsigned int seg_len, part_len, len_to_track_end;
2534 unsigned char new_track;
2535 sector_t recid, trkid;
2536 unsigned int offs;
2537 unsigned int count, count_to_trk_end;
Stefan Weinhuberf3eb5382009-03-26 15:23:48 +01002538
2539 basedev = block->base;
2540 private = (struct dasd_eckd_private *) basedev->private;
2541 if (rq_data_dir(req) == READ) {
2542 cmd = DASD_ECKD_CCW_READ_TRACK_DATA;
2543 itcw_op = ITCW_OP_READ;
2544 } else if (rq_data_dir(req) == WRITE) {
2545 cmd = DASD_ECKD_CCW_WRITE_TRACK_DATA;
2546 itcw_op = ITCW_OP_WRITE;
2547 } else
2548 return ERR_PTR(-EINVAL);
2549
2550 /* trackbased I/O needs address all memory via TIDAWs,
2551 * not just for 64 bit addresses. This allows us to map
2552 * each segment directly to one tidaw.
Stefan Weinhuberef19298b2011-01-05 12:48:02 +01002553 * In the case of write requests, additional tidaws may
2554 * be needed when a segment crosses a track boundary.
Stefan Weinhuberf3eb5382009-03-26 15:23:48 +01002555 */
2556 trkcount = last_trk - first_trk + 1;
2557 ctidaw = 0;
2558 rq_for_each_segment(bv, req, iter) {
2559 ++ctidaw;
2560 }
Stefan Weinhuberef19298b2011-01-05 12:48:02 +01002561 if (rq_data_dir(req) == WRITE)
2562 ctidaw += (last_trk - first_trk);
Stefan Weinhuberf3eb5382009-03-26 15:23:48 +01002563
2564 /* Allocate the ccw request. */
2565 itcw_size = itcw_calc_size(0, ctidaw, 0);
Stefan Haberland68b781f2009-09-11 10:28:29 +02002566 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 0, itcw_size, startdev);
Stefan Weinhuberf3eb5382009-03-26 15:23:48 +01002567 if (IS_ERR(cqr))
2568 return cqr;
2569
Stefan Weinhuberf3eb5382009-03-26 15:23:48 +01002570 /* transfer length factor: how many bytes to read from the last track */
2571 if (first_trk == last_trk)
2572 tlf = last_offs - first_offs + 1;
2573 else
2574 tlf = last_offs + 1;
2575 tlf *= blksize;
2576
2577 itcw = itcw_init(cqr->data, itcw_size, itcw_op, 0, ctidaw, 0);
Stefan Weinhuberef19298b2011-01-05 12:48:02 +01002578 if (IS_ERR(itcw)) {
2579 dasd_sfree_request(cqr, startdev);
2580 return ERR_PTR(-EINVAL);
2581 }
Stefan Weinhuberf3eb5382009-03-26 15:23:48 +01002582 cqr->cpaddr = itcw_get_tcw(itcw);
Stefan Weinhuberf3eb5382009-03-26 15:23:48 +01002583 if (prepare_itcw(itcw, first_trk, last_trk,
2584 cmd, basedev, startdev,
2585 first_offs + 1,
2586 trkcount, blksize,
2587 (last_rec - first_rec + 1) * blksize,
2588 tlf, blk_per_trk) == -EAGAIN) {
2589 /* Clock not in sync and XRC is enabled.
2590 * Try again later.
2591 */
2592 dasd_sfree_request(cqr, startdev);
2593 return ERR_PTR(-EAGAIN);
2594 }
Stefan Weinhuberf3eb5382009-03-26 15:23:48 +01002595 /*
2596 * A tidaw can address 4k of memory, but must not cross page boundaries
2597 * We can let the block layer handle this by setting
2598 * blk_queue_segment_boundary to page boundaries and
2599 * blk_max_segment_size to page size when setting up the request queue.
Stefan Weinhuberef19298b2011-01-05 12:48:02 +01002600 * For write requests, a TIDAW must not cross track boundaries, because
2601 * we have to set the CBC flag on the last tidaw for each track.
Stefan Weinhuberf3eb5382009-03-26 15:23:48 +01002602 */
Stefan Weinhuberef19298b2011-01-05 12:48:02 +01002603 if (rq_data_dir(req) == WRITE) {
2604 new_track = 1;
2605 recid = first_rec;
2606 rq_for_each_segment(bv, req, iter) {
2607 dst = page_address(bv->bv_page) + bv->bv_offset;
2608 seg_len = bv->bv_len;
2609 while (seg_len) {
2610 if (new_track) {
2611 trkid = recid;
2612 offs = sector_div(trkid, blk_per_trk);
2613 count_to_trk_end = blk_per_trk - offs;
2614 count = min((last_rec - recid + 1),
2615 (sector_t)count_to_trk_end);
2616 len_to_track_end = count * blksize;
2617 recid += count;
2618 new_track = 0;
2619 }
2620 part_len = min(seg_len, len_to_track_end);
2621 seg_len -= part_len;
2622 len_to_track_end -= part_len;
2623 /* We need to end the tidaw at track end */
2624 if (!len_to_track_end) {
2625 new_track = 1;
2626 tidaw_flags = TIDAW_FLAGS_INSERT_CBC;
2627 } else
2628 tidaw_flags = 0;
2629 last_tidaw = itcw_add_tidaw(itcw, tidaw_flags,
2630 dst, part_len);
2631 if (IS_ERR(last_tidaw))
2632 return ERR_PTR(-EINVAL);
2633 dst += part_len;
2634 }
2635 }
2636 } else {
2637 rq_for_each_segment(bv, req, iter) {
2638 dst = page_address(bv->bv_page) + bv->bv_offset;
2639 last_tidaw = itcw_add_tidaw(itcw, 0x00,
2640 dst, bv->bv_len);
2641 if (IS_ERR(last_tidaw))
2642 return ERR_PTR(-EINVAL);
2643 }
Stefan Weinhuberf3eb5382009-03-26 15:23:48 +01002644 }
Stefan Weinhuberef19298b2011-01-05 12:48:02 +01002645 last_tidaw->flags |= TIDAW_FLAGS_LAST;
2646 last_tidaw->flags &= ~TIDAW_FLAGS_INSERT_CBC;
Stefan Weinhuberf3eb5382009-03-26 15:23:48 +01002647 itcw_finalize(itcw);
2648
2649 if (blk_noretry_request(req) ||
2650 block->base->features & DASD_FEATURE_FAILFAST)
2651 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
Stefan Weinhuberef19298b2011-01-05 12:48:02 +01002652 cqr->cpmode = 1;
Stefan Weinhuberf3eb5382009-03-26 15:23:48 +01002653 cqr->startdev = startdev;
2654 cqr->memdev = startdev;
2655 cqr->block = block;
Stefan Haberland7c8faa82010-08-09 18:13:00 +02002656 cqr->expires = startdev->default_expires * HZ; /* default 5 minutes */
Stefan Weinhubera4d26c62011-01-05 12:48:03 +01002657 cqr->lpm = startdev->path_data.ppm;
Stefan Weinhuberf3eb5382009-03-26 15:23:48 +01002658 cqr->retries = 256;
2659 cqr->buildclk = get_clock();
2660 cqr->status = DASD_CQR_FILLED;
2661 return cqr;
2662}
2663
2664static struct dasd_ccw_req *dasd_eckd_build_cp(struct dasd_device *startdev,
2665 struct dasd_block *block,
2666 struct request *req)
2667{
Stefan Weinhuberef19298b2011-01-05 12:48:02 +01002668 int cmdrtd, cmdwtd;
Stefan Weinhuberf3eb5382009-03-26 15:23:48 +01002669 int use_prefix;
Stefan Weinhuberef19298b2011-01-05 12:48:02 +01002670 int fcx_multitrack;
Stefan Weinhuber45b44d72009-06-12 10:26:36 +02002671 struct dasd_eckd_private *private;
Stefan Weinhuberf3eb5382009-03-26 15:23:48 +01002672 struct dasd_device *basedev;
2673 sector_t first_rec, last_rec;
2674 sector_t first_trk, last_trk;
2675 unsigned int first_offs, last_offs;
2676 unsigned int blk_per_trk, blksize;
2677 int cdlspecial;
Stefan Weinhuberef19298b2011-01-05 12:48:02 +01002678 unsigned int data_size;
Stefan Weinhuberf3eb5382009-03-26 15:23:48 +01002679 struct dasd_ccw_req *cqr;
2680
2681 basedev = block->base;
2682 private = (struct dasd_eckd_private *) basedev->private;
2683
2684 /* Calculate number of blocks/records per track. */
2685 blksize = block->bp_block;
2686 blk_per_trk = recs_per_track(&private->rdc_data, 0, blksize);
Stefan Haberland6fca97a2009-10-06 10:34:15 +02002687 if (blk_per_trk == 0)
2688 return ERR_PTR(-EINVAL);
Stefan Weinhuberf3eb5382009-03-26 15:23:48 +01002689 /* Calculate record id of first and last block. */
Tejun Heo83096eb2009-05-07 22:24:39 +09002690 first_rec = first_trk = blk_rq_pos(req) >> block->s2b_shift;
Stefan Weinhuberf3eb5382009-03-26 15:23:48 +01002691 first_offs = sector_div(first_trk, blk_per_trk);
2692 last_rec = last_trk =
Tejun Heo83096eb2009-05-07 22:24:39 +09002693 (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift;
Stefan Weinhuberf3eb5382009-03-26 15:23:48 +01002694 last_offs = sector_div(last_trk, blk_per_trk);
2695 cdlspecial = (private->uses_cdl && first_rec < 2*blk_per_trk);
2696
Stefan Weinhuberef19298b2011-01-05 12:48:02 +01002697 fcx_multitrack = private->features.feature[40] & 0x20;
2698 data_size = blk_rq_bytes(req);
2699 /* tpm write request add CBC data on each track boundary */
2700 if (rq_data_dir(req) == WRITE)
2701 data_size += (last_trk - first_trk) * 4;
Stefan Weinhuberf3eb5382009-03-26 15:23:48 +01002702
2703 /* is read track data and write track data in command mode supported? */
2704 cmdrtd = private->features.feature[9] & 0x20;
2705 cmdwtd = private->features.feature[12] & 0x40;
2706 use_prefix = private->features.feature[8] & 0x01;
2707
2708 cqr = NULL;
2709 if (cdlspecial || dasd_page_cache) {
2710 /* do nothing, just fall through to the cmd mode single case */
Stefan Weinhuberef19298b2011-01-05 12:48:02 +01002711 } else if ((data_size <= private->fcx_max_data)
2712 && (fcx_multitrack || (first_trk == last_trk))) {
Stefan Weinhuberf3eb5382009-03-26 15:23:48 +01002713 cqr = dasd_eckd_build_cp_tpm_track(startdev, block, req,
2714 first_rec, last_rec,
2715 first_trk, last_trk,
2716 first_offs, last_offs,
2717 blk_per_trk, blksize);
Stefan Weinhuberef19298b2011-01-05 12:48:02 +01002718 if (IS_ERR(cqr) && (PTR_ERR(cqr) != -EAGAIN) &&
2719 (PTR_ERR(cqr) != -ENOMEM))
Stefan Weinhuberf3eb5382009-03-26 15:23:48 +01002720 cqr = NULL;
2721 } else if (use_prefix &&
2722 (((rq_data_dir(req) == READ) && cmdrtd) ||
2723 ((rq_data_dir(req) == WRITE) && cmdwtd))) {
2724 cqr = dasd_eckd_build_cp_cmd_track(startdev, block, req,
2725 first_rec, last_rec,
2726 first_trk, last_trk,
2727 first_offs, last_offs,
2728 blk_per_trk, blksize);
Stefan Weinhuberef19298b2011-01-05 12:48:02 +01002729 if (IS_ERR(cqr) && (PTR_ERR(cqr) != -EAGAIN) &&
2730 (PTR_ERR(cqr) != -ENOMEM))
Stefan Weinhuberf3eb5382009-03-26 15:23:48 +01002731 cqr = NULL;
2732 }
2733 if (!cqr)
2734 cqr = dasd_eckd_build_cp_cmd_single(startdev, block, req,
2735 first_rec, last_rec,
2736 first_trk, last_trk,
2737 first_offs, last_offs,
2738 blk_per_trk, blksize);
2739 return cqr;
2740}
2741
Linus Torvalds1da177e2005-04-16 15:20:36 -07002742static int
2743dasd_eckd_free_cp(struct dasd_ccw_req *cqr, struct request *req)
2744{
2745 struct dasd_eckd_private *private;
2746 struct ccw1 *ccw;
NeilBrown5705f702007-09-25 12:35:59 +02002747 struct req_iterator iter;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002748 struct bio_vec *bv;
2749 char *dst, *cda;
2750 unsigned int blksize, blk_per_trk, off;
2751 sector_t recid;
NeilBrown5705f702007-09-25 12:35:59 +02002752 int status;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002753
2754 if (!dasd_page_cache)
2755 goto out;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01002756 private = (struct dasd_eckd_private *) cqr->block->base->private;
2757 blksize = cqr->block->bp_block;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002758 blk_per_trk = recs_per_track(&private->rdc_data, 0, blksize);
Tejun Heo83096eb2009-05-07 22:24:39 +09002759 recid = blk_rq_pos(req) >> cqr->block->s2b_shift;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002760 ccw = cqr->cpaddr;
2761 /* Skip over define extent & locate record. */
2762 ccw++;
2763 if (private->uses_cdl == 0 || recid > 2*blk_per_trk)
2764 ccw++;
NeilBrown5705f702007-09-25 12:35:59 +02002765 rq_for_each_segment(bv, req, iter) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002766 dst = page_address(bv->bv_page) + bv->bv_offset;
2767 for (off = 0; off < bv->bv_len; off += blksize) {
2768 /* Skip locate record. */
2769 if (private->uses_cdl && recid <= 2*blk_per_trk)
2770 ccw++;
2771 if (dst) {
2772 if (ccw->flags & CCW_FLAG_IDA)
2773 cda = *((char **)((addr_t) ccw->cda));
2774 else
2775 cda = (char *)((addr_t) ccw->cda);
2776 if (dst != cda) {
2777 if (rq_data_dir(req) == READ)
2778 memcpy(dst, cda, bv->bv_len);
2779 kmem_cache_free(dasd_page_cache,
2780 (void *)((addr_t)cda & PAGE_MASK));
2781 }
2782 dst = NULL;
2783 }
2784 ccw++;
2785 recid++;
2786 }
2787 }
2788out:
2789 status = cqr->status == DASD_CQR_DONE;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01002790 dasd_sfree_request(cqr, cqr->memdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002791 return status;
2792}
2793
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01002794/*
Stefan Weinhuberf3eb5382009-03-26 15:23:48 +01002795 * Modify ccw/tcw in cqr so it can be started on a base device.
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01002796 *
2797 * Note that this is not enough to restart the cqr!
2798 * Either reset cqr->startdev as well (summary unit check handling)
2799 * or restart via separate cqr (as in ERP handling).
2800 */
2801void dasd_eckd_reset_ccw_to_base_io(struct dasd_ccw_req *cqr)
2802{
2803 struct ccw1 *ccw;
2804 struct PFX_eckd_data *pfxdata;
Stefan Weinhuberf3eb5382009-03-26 15:23:48 +01002805 struct tcw *tcw;
2806 struct tccb *tccb;
2807 struct dcw *dcw;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01002808
Stefan Weinhuberf3eb5382009-03-26 15:23:48 +01002809 if (cqr->cpmode == 1) {
2810 tcw = cqr->cpaddr;
2811 tccb = tcw_get_tccb(tcw);
2812 dcw = (struct dcw *)&tccb->tca[0];
2813 pfxdata = (struct PFX_eckd_data *)&dcw->cd[0];
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01002814 pfxdata->validity.verify_base = 0;
2815 pfxdata->validity.hyper_pav = 0;
Stefan Weinhuberf3eb5382009-03-26 15:23:48 +01002816 } else {
2817 ccw = cqr->cpaddr;
2818 pfxdata = cqr->data;
2819 if (ccw->cmd_code == DASD_ECKD_CCW_PFX) {
2820 pfxdata->validity.verify_base = 0;
2821 pfxdata->validity.hyper_pav = 0;
2822 }
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01002823 }
2824}
2825
2826#define DASD_ECKD_CHANQ_MAX_SIZE 4
2827
2828static struct dasd_ccw_req *dasd_eckd_build_alias_cp(struct dasd_device *base,
2829 struct dasd_block *block,
2830 struct request *req)
2831{
2832 struct dasd_eckd_private *private;
2833 struct dasd_device *startdev;
2834 unsigned long flags;
2835 struct dasd_ccw_req *cqr;
2836
2837 startdev = dasd_alias_get_start_dev(base);
2838 if (!startdev)
2839 startdev = base;
2840 private = (struct dasd_eckd_private *) startdev->private;
2841 if (private->count >= DASD_ECKD_CHANQ_MAX_SIZE)
2842 return ERR_PTR(-EBUSY);
2843
2844 spin_lock_irqsave(get_ccwdev_lock(startdev->cdev), flags);
2845 private->count++;
2846 cqr = dasd_eckd_build_cp(startdev, block, req);
2847 if (IS_ERR(cqr))
2848 private->count--;
2849 spin_unlock_irqrestore(get_ccwdev_lock(startdev->cdev), flags);
2850 return cqr;
2851}
2852
2853static int dasd_eckd_free_alias_cp(struct dasd_ccw_req *cqr,
2854 struct request *req)
2855{
2856 struct dasd_eckd_private *private;
2857 unsigned long flags;
2858
2859 spin_lock_irqsave(get_ccwdev_lock(cqr->memdev->cdev), flags);
2860 private = (struct dasd_eckd_private *) cqr->memdev->private;
2861 private->count--;
2862 spin_unlock_irqrestore(get_ccwdev_lock(cqr->memdev->cdev), flags);
2863 return dasd_eckd_free_cp(cqr, req);
2864}
2865
Linus Torvalds1da177e2005-04-16 15:20:36 -07002866static int
2867dasd_eckd_fill_info(struct dasd_device * device,
2868 struct dasd_information2_t * info)
2869{
2870 struct dasd_eckd_private *private;
2871
2872 private = (struct dasd_eckd_private *) device->private;
2873 info->label_block = 2;
2874 info->FBA_layout = private->uses_cdl ? 0 : 1;
2875 info->format = private->uses_cdl ? DASD_FORMAT_CDL : DASD_FORMAT_LDL;
2876 info->characteristics_size = sizeof(struct dasd_eckd_characteristics);
2877 memcpy(info->characteristics, &private->rdc_data,
2878 sizeof(struct dasd_eckd_characteristics));
Stefan Weinhuber4abb08c2008-08-01 16:39:09 +02002879 info->confdata_size = min((unsigned long)private->conf_len,
2880 sizeof(info->configuration_data));
2881 memcpy(info->configuration_data, private->conf_data,
2882 info->confdata_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002883 return 0;
2884}
2885
2886/*
2887 * SECTION: ioctl functions for eckd devices.
2888 */
2889
2890/*
2891 * Release device ioctl.
Horst Hummel138c0142006-06-29 14:58:12 +02002892 * Buils a channel programm to releases a prior reserved
Linus Torvalds1da177e2005-04-16 15:20:36 -07002893 * (see dasd_eckd_reserve) device.
2894 */
2895static int
Christoph Hellwig1107ccf2006-03-24 03:15:20 -08002896dasd_eckd_release(struct dasd_device *device)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002897{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002898 struct dasd_ccw_req *cqr;
2899 int rc;
Stefan Weinhuberf3eb5382009-03-26 15:23:48 +01002900 struct ccw1 *ccw;
Stefan Weinhuberf932bce2010-08-09 18:12:59 +02002901 int useglobal;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002902
2903 if (!capable(CAP_SYS_ADMIN))
2904 return -EACCES;
2905
Stefan Weinhuberf932bce2010-08-09 18:12:59 +02002906 useglobal = 0;
Stefan Haberland68b781f2009-09-11 10:28:29 +02002907 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002908 if (IS_ERR(cqr)) {
Stefan Weinhuberf932bce2010-08-09 18:12:59 +02002909 mutex_lock(&dasd_reserve_mutex);
2910 useglobal = 1;
2911 cqr = &dasd_reserve_req->cqr;
2912 memset(cqr, 0, sizeof(*cqr));
2913 memset(&dasd_reserve_req->ccw, 0,
2914 sizeof(dasd_reserve_req->ccw));
2915 cqr->cpaddr = &dasd_reserve_req->ccw;
2916 cqr->data = &dasd_reserve_req->data;
2917 cqr->magic = DASD_ECKD_MAGIC;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002918 }
Stefan Weinhuberf3eb5382009-03-26 15:23:48 +01002919 ccw = cqr->cpaddr;
2920 ccw->cmd_code = DASD_ECKD_CCW_RELEASE;
2921 ccw->flags |= CCW_FLAG_SLI;
2922 ccw->count = 32;
2923 ccw->cda = (__u32)(addr_t) cqr->data;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01002924 cqr->startdev = device;
2925 cqr->memdev = device;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002926 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
Horst Hummel1c01b8a2006-01-06 00:19:15 -08002927 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
Horst Hummel336c3402007-02-05 21:17:24 +01002928 cqr->retries = 2; /* set retry counter to enable basic ERP */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002929 cqr->expires = 2 * HZ;
2930 cqr->buildclk = get_clock();
2931 cqr->status = DASD_CQR_FILLED;
2932
2933 rc = dasd_sleep_on_immediatly(cqr);
2934
Stefan Weinhuberf932bce2010-08-09 18:12:59 +02002935 if (useglobal)
2936 mutex_unlock(&dasd_reserve_mutex);
2937 else
2938 dasd_sfree_request(cqr, cqr->memdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002939 return rc;
2940}
2941
2942/*
2943 * Reserve device ioctl.
2944 * Options are set to 'synchronous wait for interrupt' and
Horst Hummel138c0142006-06-29 14:58:12 +02002945 * 'timeout the request'. This leads to a terminate IO if
2946 * the interrupt is outstanding for a certain time.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002947 */
2948static int
Christoph Hellwig1107ccf2006-03-24 03:15:20 -08002949dasd_eckd_reserve(struct dasd_device *device)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002950{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002951 struct dasd_ccw_req *cqr;
2952 int rc;
Stefan Weinhuberf3eb5382009-03-26 15:23:48 +01002953 struct ccw1 *ccw;
Stefan Weinhuberf932bce2010-08-09 18:12:59 +02002954 int useglobal;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002955
2956 if (!capable(CAP_SYS_ADMIN))
2957 return -EACCES;
2958
Stefan Weinhuberf932bce2010-08-09 18:12:59 +02002959 useglobal = 0;
Stefan Haberland68b781f2009-09-11 10:28:29 +02002960 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002961 if (IS_ERR(cqr)) {
Stefan Weinhuberf932bce2010-08-09 18:12:59 +02002962 mutex_lock(&dasd_reserve_mutex);
2963 useglobal = 1;
2964 cqr = &dasd_reserve_req->cqr;
2965 memset(cqr, 0, sizeof(*cqr));
2966 memset(&dasd_reserve_req->ccw, 0,
2967 sizeof(dasd_reserve_req->ccw));
2968 cqr->cpaddr = &dasd_reserve_req->ccw;
2969 cqr->data = &dasd_reserve_req->data;
2970 cqr->magic = DASD_ECKD_MAGIC;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002971 }
Stefan Weinhuberf3eb5382009-03-26 15:23:48 +01002972 ccw = cqr->cpaddr;
2973 ccw->cmd_code = DASD_ECKD_CCW_RESERVE;
2974 ccw->flags |= CCW_FLAG_SLI;
2975 ccw->count = 32;
2976 ccw->cda = (__u32)(addr_t) cqr->data;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01002977 cqr->startdev = device;
2978 cqr->memdev = device;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002979 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
Horst Hummel1c01b8a2006-01-06 00:19:15 -08002980 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
Horst Hummel336c3402007-02-05 21:17:24 +01002981 cqr->retries = 2; /* set retry counter to enable basic ERP */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002982 cqr->expires = 2 * HZ;
2983 cqr->buildclk = get_clock();
2984 cqr->status = DASD_CQR_FILLED;
2985
2986 rc = dasd_sleep_on_immediatly(cqr);
2987
Stefan Weinhuberf932bce2010-08-09 18:12:59 +02002988 if (useglobal)
2989 mutex_unlock(&dasd_reserve_mutex);
2990 else
2991 dasd_sfree_request(cqr, cqr->memdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002992 return rc;
2993}
2994
2995/*
2996 * Steal lock ioctl - unconditional reserve device.
Horst Hummel138c0142006-06-29 14:58:12 +02002997 * Buils a channel programm to break a device's reservation.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002998 * (unconditional reserve)
2999 */
3000static int
Christoph Hellwig1107ccf2006-03-24 03:15:20 -08003001dasd_eckd_steal_lock(struct dasd_device *device)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003002{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003003 struct dasd_ccw_req *cqr;
3004 int rc;
Stefan Weinhuberf3eb5382009-03-26 15:23:48 +01003005 struct ccw1 *ccw;
Stefan Weinhuberf932bce2010-08-09 18:12:59 +02003006 int useglobal;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003007
3008 if (!capable(CAP_SYS_ADMIN))
3009 return -EACCES;
3010
Stefan Weinhuberf932bce2010-08-09 18:12:59 +02003011 useglobal = 0;
Stefan Haberland68b781f2009-09-11 10:28:29 +02003012 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003013 if (IS_ERR(cqr)) {
Stefan Weinhuberf932bce2010-08-09 18:12:59 +02003014 mutex_lock(&dasd_reserve_mutex);
3015 useglobal = 1;
3016 cqr = &dasd_reserve_req->cqr;
3017 memset(cqr, 0, sizeof(*cqr));
3018 memset(&dasd_reserve_req->ccw, 0,
3019 sizeof(dasd_reserve_req->ccw));
3020 cqr->cpaddr = &dasd_reserve_req->ccw;
3021 cqr->data = &dasd_reserve_req->data;
3022 cqr->magic = DASD_ECKD_MAGIC;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003023 }
Stefan Weinhuberf3eb5382009-03-26 15:23:48 +01003024 ccw = cqr->cpaddr;
3025 ccw->cmd_code = DASD_ECKD_CCW_SLCK;
3026 ccw->flags |= CCW_FLAG_SLI;
3027 ccw->count = 32;
3028 ccw->cda = (__u32)(addr_t) cqr->data;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01003029 cqr->startdev = device;
3030 cqr->memdev = device;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003031 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
Horst Hummel1c01b8a2006-01-06 00:19:15 -08003032 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
Horst Hummel336c3402007-02-05 21:17:24 +01003033 cqr->retries = 2; /* set retry counter to enable basic ERP */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003034 cqr->expires = 2 * HZ;
3035 cqr->buildclk = get_clock();
3036 cqr->status = DASD_CQR_FILLED;
3037
3038 rc = dasd_sleep_on_immediatly(cqr);
3039
Stefan Weinhuberf932bce2010-08-09 18:12:59 +02003040 if (useglobal)
3041 mutex_unlock(&dasd_reserve_mutex);
3042 else
3043 dasd_sfree_request(cqr, cqr->memdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003044 return rc;
3045}
3046
3047/*
Stefan Weinhuber196339f2010-10-29 16:50:43 +02003048 * SNID - Sense Path Group ID
3049 * This ioctl may be used in situations where I/O is stalled due to
3050 * a reserve, so if the normal dasd_smalloc_request fails, we use the
3051 * preallocated dasd_reserve_req.
3052 */
3053static int dasd_eckd_snid(struct dasd_device *device,
3054 void __user *argp)
3055{
3056 struct dasd_ccw_req *cqr;
3057 int rc;
3058 struct ccw1 *ccw;
3059 int useglobal;
3060 struct dasd_snid_ioctl_data usrparm;
3061
3062 if (!capable(CAP_SYS_ADMIN))
3063 return -EACCES;
3064
3065 if (copy_from_user(&usrparm, argp, sizeof(usrparm)))
3066 return -EFAULT;
3067
3068 useglobal = 0;
3069 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1,
3070 sizeof(struct dasd_snid_data), device);
3071 if (IS_ERR(cqr)) {
3072 mutex_lock(&dasd_reserve_mutex);
3073 useglobal = 1;
3074 cqr = &dasd_reserve_req->cqr;
3075 memset(cqr, 0, sizeof(*cqr));
3076 memset(&dasd_reserve_req->ccw, 0,
3077 sizeof(dasd_reserve_req->ccw));
3078 cqr->cpaddr = &dasd_reserve_req->ccw;
3079 cqr->data = &dasd_reserve_req->data;
3080 cqr->magic = DASD_ECKD_MAGIC;
3081 }
3082 ccw = cqr->cpaddr;
3083 ccw->cmd_code = DASD_ECKD_CCW_SNID;
3084 ccw->flags |= CCW_FLAG_SLI;
3085 ccw->count = 12;
3086 ccw->cda = (__u32)(addr_t) cqr->data;
3087 cqr->startdev = device;
3088 cqr->memdev = device;
3089 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
3090 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
3091 cqr->retries = 5;
3092 cqr->expires = 10 * HZ;
3093 cqr->buildclk = get_clock();
3094 cqr->status = DASD_CQR_FILLED;
3095 cqr->lpm = usrparm.path_mask;
3096
3097 rc = dasd_sleep_on_immediatly(cqr);
3098 /* verify that I/O processing didn't modify the path mask */
3099 if (!rc && usrparm.path_mask && (cqr->lpm != usrparm.path_mask))
3100 rc = -EIO;
3101 if (!rc) {
3102 usrparm.data = *((struct dasd_snid_data *)cqr->data);
3103 if (copy_to_user(argp, &usrparm, sizeof(usrparm)))
3104 rc = -EFAULT;
3105 }
3106
3107 if (useglobal)
3108 mutex_unlock(&dasd_reserve_mutex);
3109 else
3110 dasd_sfree_request(cqr, cqr->memdev);
3111 return rc;
3112}
3113
3114/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07003115 * Read performance statistics
3116 */
3117static int
Christoph Hellwig1107ccf2006-03-24 03:15:20 -08003118dasd_eckd_performance(struct dasd_device *device, void __user *argp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003119{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003120 struct dasd_psf_prssd_data *prssdp;
3121 struct dasd_rssd_perf_stats_t *stats;
3122 struct dasd_ccw_req *cqr;
3123 struct ccw1 *ccw;
3124 int rc;
3125
Stefan Haberland68b781f2009-09-11 10:28:29 +02003126 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */,
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01003127 (sizeof(struct dasd_psf_prssd_data) +
3128 sizeof(struct dasd_rssd_perf_stats_t)),
Linus Torvalds1da177e2005-04-16 15:20:36 -07003129 device);
3130 if (IS_ERR(cqr)) {
Stefan Haberlandfc19f382009-03-26 15:23:49 +01003131 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
Linus Torvalds1da177e2005-04-16 15:20:36 -07003132 "Could not allocate initialization request");
3133 return PTR_ERR(cqr);
3134 }
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01003135 cqr->startdev = device;
3136 cqr->memdev = device;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003137 cqr->retries = 0;
Stefan Weinhubereb6e1992009-12-07 12:51:51 +01003138 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003139 cqr->expires = 10 * HZ;
3140
3141 /* Prepare for Read Subsystem Data */
3142 prssdp = (struct dasd_psf_prssd_data *) cqr->data;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01003143 memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003144 prssdp->order = PSF_ORDER_PRSSD;
Joe Perches5d67d162008-01-26 14:11:20 +01003145 prssdp->suborder = 0x01; /* Performance Statistics */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003146 prssdp->varies[1] = 0x01; /* Perf Statistics for the Subsystem */
3147
3148 ccw = cqr->cpaddr;
3149 ccw->cmd_code = DASD_ECKD_CCW_PSF;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01003150 ccw->count = sizeof(struct dasd_psf_prssd_data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003151 ccw->flags |= CCW_FLAG_CC;
3152 ccw->cda = (__u32)(addr_t) prssdp;
3153
3154 /* Read Subsystem Data - Performance Statistics */
3155 stats = (struct dasd_rssd_perf_stats_t *) (prssdp + 1);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01003156 memset(stats, 0, sizeof(struct dasd_rssd_perf_stats_t));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003157
3158 ccw++;
3159 ccw->cmd_code = DASD_ECKD_CCW_RSSD;
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01003160 ccw->count = sizeof(struct dasd_rssd_perf_stats_t);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003161 ccw->cda = (__u32)(addr_t) stats;
3162
3163 cqr->buildclk = get_clock();
3164 cqr->status = DASD_CQR_FILLED;
3165 rc = dasd_sleep_on(cqr);
3166 if (rc == 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003167 prssdp = (struct dasd_psf_prssd_data *) cqr->data;
3168 stats = (struct dasd_rssd_perf_stats_t *) (prssdp + 1);
Christoph Hellwig1107ccf2006-03-24 03:15:20 -08003169 if (copy_to_user(argp, stats,
3170 sizeof(struct dasd_rssd_perf_stats_t)))
3171 rc = -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003172 }
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01003173 dasd_sfree_request(cqr, cqr->memdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003174 return rc;
3175}
3176
3177/*
3178 * Get attributes (cache operations)
3179 * Returnes the cache attributes used in Define Extend (DE).
3180 */
3181static int
Christoph Hellwig1107ccf2006-03-24 03:15:20 -08003182dasd_eckd_get_attrib(struct dasd_device *device, void __user *argp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003183{
Christoph Hellwig1107ccf2006-03-24 03:15:20 -08003184 struct dasd_eckd_private *private =
3185 (struct dasd_eckd_private *)device->private;
3186 struct attrib_data_t attrib = private->attrib;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003187 int rc;
3188
3189 if (!capable(CAP_SYS_ADMIN))
3190 return -EACCES;
Christoph Hellwig1107ccf2006-03-24 03:15:20 -08003191 if (!argp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003192 return -EINVAL;
3193
Christoph Hellwig1107ccf2006-03-24 03:15:20 -08003194 rc = 0;
3195 if (copy_to_user(argp, (long *) &attrib,
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01003196 sizeof(struct attrib_data_t)))
Christoph Hellwig1107ccf2006-03-24 03:15:20 -08003197 rc = -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003198
3199 return rc;
3200}
3201
3202/*
3203 * Set attributes (cache operations)
3204 * Stores the attributes for cache operation to be used in Define Extend (DE).
3205 */
3206static int
Christoph Hellwig1107ccf2006-03-24 03:15:20 -08003207dasd_eckd_set_attrib(struct dasd_device *device, void __user *argp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003208{
Christoph Hellwig1107ccf2006-03-24 03:15:20 -08003209 struct dasd_eckd_private *private =
3210 (struct dasd_eckd_private *)device->private;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003211 struct attrib_data_t attrib;
3212
3213 if (!capable(CAP_SYS_ADMIN))
3214 return -EACCES;
Christoph Hellwig1107ccf2006-03-24 03:15:20 -08003215 if (!argp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003216 return -EINVAL;
3217
Christoph Hellwig1107ccf2006-03-24 03:15:20 -08003218 if (copy_from_user(&attrib, argp, sizeof(struct attrib_data_t)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003219 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003220 private->attrib = attrib;
3221
Stefan Haberlandfc19f382009-03-26 15:23:49 +01003222 dev_info(&device->cdev->dev,
3223 "The DASD cache mode was set to %x (%i cylinder prestage)\n",
3224 private->attrib.operation, private->attrib.nr_cyl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003225 return 0;
3226}
3227
Nigel Hislopab1d8482008-10-10 21:33:25 +02003228/*
3229 * Issue syscall I/O to EMC Symmetrix array.
3230 * CCWs are PSF and RSSD
3231 */
3232static int dasd_symm_io(struct dasd_device *device, void __user *argp)
3233{
3234 struct dasd_symmio_parms usrparm;
3235 char *psf_data, *rssd_result;
3236 struct dasd_ccw_req *cqr;
3237 struct ccw1 *ccw;
Nigel Hislop52898022010-03-08 12:25:16 +01003238 char psf0, psf1;
Nigel Hislopab1d8482008-10-10 21:33:25 +02003239 int rc;
3240
Nigel Hislop52898022010-03-08 12:25:16 +01003241 if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RAWIO))
3242 return -EACCES;
3243 psf0 = psf1 = 0;
3244
Nigel Hislopab1d8482008-10-10 21:33:25 +02003245 /* Copy parms from caller */
3246 rc = -EFAULT;
3247 if (copy_from_user(&usrparm, argp, sizeof(usrparm)))
3248 goto out;
Heiko Carstensf8b068592010-01-13 20:44:40 +01003249 if (is_compat_task() || sizeof(long) == 4) {
3250 /* Make sure pointers are sane even on 31 bit. */
Nigel Hislopab1d8482008-10-10 21:33:25 +02003251 rc = -EINVAL;
Heiko Carstensf8b068592010-01-13 20:44:40 +01003252 if ((usrparm.psf_data >> 32) != 0)
3253 goto out;
3254 if ((usrparm.rssd_result >> 32) != 0)
3255 goto out;
3256 usrparm.psf_data &= 0x7fffffffULL;
3257 usrparm.rssd_result &= 0x7fffffffULL;
Nigel Hislopab1d8482008-10-10 21:33:25 +02003258 }
Nigel Hislopab1d8482008-10-10 21:33:25 +02003259 /* alloc I/O data area */
3260 psf_data = kzalloc(usrparm.psf_data_len, GFP_KERNEL | GFP_DMA);
3261 rssd_result = kzalloc(usrparm.rssd_result_len, GFP_KERNEL | GFP_DMA);
3262 if (!psf_data || !rssd_result) {
3263 rc = -ENOMEM;
3264 goto out_free;
3265 }
3266
3267 /* get syscall header from user space */
3268 rc = -EFAULT;
3269 if (copy_from_user(psf_data,
3270 (void __user *)(unsigned long) usrparm.psf_data,
3271 usrparm.psf_data_len))
3272 goto out_free;
Nigel Hislop52898022010-03-08 12:25:16 +01003273 psf0 = psf_data[0];
3274 psf1 = psf_data[1];
Nigel Hislopab1d8482008-10-10 21:33:25 +02003275
3276 /* setup CCWs for PSF + RSSD */
Stefan Haberland68b781f2009-09-11 10:28:29 +02003277 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 2 , 0, device);
Nigel Hislopab1d8482008-10-10 21:33:25 +02003278 if (IS_ERR(cqr)) {
Stefan Haberlandfc19f382009-03-26 15:23:49 +01003279 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
Nigel Hislopab1d8482008-10-10 21:33:25 +02003280 "Could not allocate initialization request");
3281 rc = PTR_ERR(cqr);
3282 goto out_free;
3283 }
3284
3285 cqr->startdev = device;
3286 cqr->memdev = device;
3287 cqr->retries = 3;
3288 cqr->expires = 10 * HZ;
3289 cqr->buildclk = get_clock();
3290 cqr->status = DASD_CQR_FILLED;
3291
3292 /* Build the ccws */
3293 ccw = cqr->cpaddr;
3294
3295 /* PSF ccw */
3296 ccw->cmd_code = DASD_ECKD_CCW_PSF;
3297 ccw->count = usrparm.psf_data_len;
3298 ccw->flags |= CCW_FLAG_CC;
3299 ccw->cda = (__u32)(addr_t) psf_data;
3300
3301 ccw++;
3302
3303 /* RSSD ccw */
3304 ccw->cmd_code = DASD_ECKD_CCW_RSSD;
3305 ccw->count = usrparm.rssd_result_len;
3306 ccw->flags = CCW_FLAG_SLI ;
3307 ccw->cda = (__u32)(addr_t) rssd_result;
3308
3309 rc = dasd_sleep_on(cqr);
3310 if (rc)
3311 goto out_sfree;
3312
3313 rc = -EFAULT;
3314 if (copy_to_user((void __user *)(unsigned long) usrparm.rssd_result,
3315 rssd_result, usrparm.rssd_result_len))
3316 goto out_sfree;
3317 rc = 0;
3318
3319out_sfree:
3320 dasd_sfree_request(cqr, cqr->memdev);
3321out_free:
3322 kfree(rssd_result);
3323 kfree(psf_data);
3324out:
Nigel Hislop52898022010-03-08 12:25:16 +01003325 DBF_DEV_EVENT(DBF_WARNING, device,
3326 "Symmetrix ioctl (0x%02x 0x%02x): rc=%d",
3327 (int) psf0, (int) psf1, rc);
Nigel Hislopab1d8482008-10-10 21:33:25 +02003328 return rc;
3329}
3330
Christoph Hellwig1107ccf2006-03-24 03:15:20 -08003331static int
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01003332dasd_eckd_ioctl(struct dasd_block *block, unsigned int cmd, void __user *argp)
Christoph Hellwig1107ccf2006-03-24 03:15:20 -08003333{
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01003334 struct dasd_device *device = block->base;
3335
Christoph Hellwig1107ccf2006-03-24 03:15:20 -08003336 switch (cmd) {
3337 case BIODASDGATTR:
3338 return dasd_eckd_get_attrib(device, argp);
3339 case BIODASDSATTR:
3340 return dasd_eckd_set_attrib(device, argp);
3341 case BIODASDPSRD:
3342 return dasd_eckd_performance(device, argp);
3343 case BIODASDRLSE:
3344 return dasd_eckd_release(device);
3345 case BIODASDRSRV:
3346 return dasd_eckd_reserve(device);
3347 case BIODASDSLCK:
3348 return dasd_eckd_steal_lock(device);
Stefan Weinhuber196339f2010-10-29 16:50:43 +02003349 case BIODASDSNID:
3350 return dasd_eckd_snid(device, argp);
Nigel Hislopab1d8482008-10-10 21:33:25 +02003351 case BIODASDSYMMIO:
3352 return dasd_symm_io(device, argp);
Christoph Hellwig1107ccf2006-03-24 03:15:20 -08003353 default:
3354 return -ENOIOCTLCMD;
3355 }
3356}
3357
Linus Torvalds1da177e2005-04-16 15:20:36 -07003358/*
Horst Hummel445b5b42006-06-29 14:57:52 +02003359 * Dump the range of CCWs into 'page' buffer
3360 * and return number of printed chars.
3361 */
Heiko Carstens4d284ca2007-02-05 21:18:53 +01003362static int
Horst Hummel445b5b42006-06-29 14:57:52 +02003363dasd_eckd_dump_ccw_range(struct ccw1 *from, struct ccw1 *to, char *page)
3364{
3365 int len, count;
3366 char *datap;
3367
3368 len = 0;
3369 while (from <= to) {
3370 len += sprintf(page + len, KERN_ERR PRINTK_HEADER
3371 " CCW %p: %08X %08X DAT:",
3372 from, ((int *) from)[0], ((int *) from)[1]);
3373
3374 /* get pointer to data (consider IDALs) */
3375 if (from->flags & CCW_FLAG_IDA)
3376 datap = (char *) *((addr_t *) (addr_t) from->cda);
3377 else
3378 datap = (char *) ((addr_t) from->cda);
3379
3380 /* dump data (max 32 bytes) */
3381 for (count = 0; count < from->count && count < 32; count++) {
3382 if (count % 8 == 0) len += sprintf(page + len, " ");
3383 if (count % 4 == 0) len += sprintf(page + len, " ");
3384 len += sprintf(page + len, "%02x", datap[count]);
3385 }
3386 len += sprintf(page + len, "\n");
3387 from++;
3388 }
3389 return len;
3390}
3391
Stefan Haberlandfc19f382009-03-26 15:23:49 +01003392static void
Stefan Haberlandaeec92c2009-07-07 16:37:06 +02003393dasd_eckd_dump_sense_dbf(struct dasd_device *device, struct irb *irb,
3394 char *reason)
Stefan Haberlandfc19f382009-03-26 15:23:49 +01003395{
3396 u64 *sense;
Stefan Weinhubera5a00612010-10-25 16:10:47 +02003397 u64 *stat;
Stefan Haberlandfc19f382009-03-26 15:23:49 +01003398
Stefan Haberlandaeec92c2009-07-07 16:37:06 +02003399 sense = (u64 *) dasd_get_sense(irb);
Stefan Weinhubera5a00612010-10-25 16:10:47 +02003400 stat = (u64 *) &irb->scsw;
Stefan Haberlandfc19f382009-03-26 15:23:49 +01003401 if (sense) {
Stefan Weinhubera5a00612010-10-25 16:10:47 +02003402 DBF_DEV_EVENT(DBF_EMERG, device, "%s: %016llx %08x : "
3403 "%016llx %016llx %016llx %016llx",
3404 reason, *stat, *((u32 *) (stat + 1)),
Stefan Haberlanded3640b2010-10-25 16:10:24 +02003405 sense[0], sense[1], sense[2], sense[3]);
Stefan Haberlandfc19f382009-03-26 15:23:49 +01003406 } else {
Stefan Weinhubera5a00612010-10-25 16:10:47 +02003407 DBF_DEV_EVENT(DBF_EMERG, device, "%s: %016llx %08x : %s",
3408 reason, *stat, *((u32 *) (stat + 1)),
3409 "NO VALID SENSE");
Stefan Haberlandfc19f382009-03-26 15:23:49 +01003410 }
3411}
3412
Horst Hummel445b5b42006-06-29 14:57:52 +02003413/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07003414 * Print sense data and related channel program.
3415 * Parts are printed because printk buffer is only 1024 bytes.
3416 */
Stefan Weinhuberf3eb5382009-03-26 15:23:48 +01003417static void dasd_eckd_dump_sense_ccw(struct dasd_device *device,
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01003418 struct dasd_ccw_req *req, struct irb *irb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003419{
3420 char *page;
Horst Hummel445b5b42006-06-29 14:57:52 +02003421 struct ccw1 *first, *last, *fail, *from, *to;
3422 int len, sl, sct;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003423
3424 page = (char *) get_zeroed_page(GFP_ATOMIC);
3425 if (page == NULL) {
Stefan Haberlandfc19f382009-03-26 15:23:49 +01003426 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
3427 "No memory to dump sense data\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003428 return;
3429 }
Horst Hummel445b5b42006-06-29 14:57:52 +02003430 /* dump the sense data */
3431 len = sprintf(page, KERN_ERR PRINTK_HEADER
Linus Torvalds1da177e2005-04-16 15:20:36 -07003432 " I/O status report for device %s:\n",
Kay Sievers2a0217d2008-10-10 21:33:09 +02003433 dev_name(&device->cdev->dev));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003434 len += sprintf(page + len, KERN_ERR PRINTK_HEADER
Stefan Weinhubera5a00612010-10-25 16:10:47 +02003435 " in req: %p CC:%02X FC:%02X AC:%02X SC:%02X DS:%02X "
3436 "CS:%02X RC:%d\n",
3437 req, scsw_cc(&irb->scsw), scsw_fctl(&irb->scsw),
3438 scsw_actl(&irb->scsw), scsw_stctl(&irb->scsw),
3439 scsw_dstat(&irb->scsw), scsw_cstat(&irb->scsw),
3440 req ? req->intrc : 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003441 len += sprintf(page + len, KERN_ERR PRINTK_HEADER
3442 " device %s: Failing CCW: %p\n",
Kay Sievers2a0217d2008-10-10 21:33:09 +02003443 dev_name(&device->cdev->dev),
Peter Oberparleiter23d805b2008-07-14 09:58:50 +02003444 (void *) (addr_t) irb->scsw.cmd.cpa);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003445 if (irb->esw.esw0.erw.cons) {
3446 for (sl = 0; sl < 4; sl++) {
3447 len += sprintf(page + len, KERN_ERR PRINTK_HEADER
3448 " Sense(hex) %2d-%2d:",
3449 (8 * sl), ((8 * sl) + 7));
3450
3451 for (sct = 0; sct < 8; sct++) {
3452 len += sprintf(page + len, " %02x",
3453 irb->ecw[8 * sl + sct]);
3454 }
3455 len += sprintf(page + len, "\n");
3456 }
3457
3458 if (irb->ecw[27] & DASD_SENSE_BIT_0) {
3459 /* 24 Byte Sense Data */
Horst Hummel445b5b42006-06-29 14:57:52 +02003460 sprintf(page + len, KERN_ERR PRINTK_HEADER
3461 " 24 Byte: %x MSG %x, "
3462 "%s MSGb to SYSOP\n",
3463 irb->ecw[7] >> 4, irb->ecw[7] & 0x0f,
3464 irb->ecw[1] & 0x10 ? "" : "no");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003465 } else {
3466 /* 32 Byte Sense Data */
Horst Hummel445b5b42006-06-29 14:57:52 +02003467 sprintf(page + len, KERN_ERR PRINTK_HEADER
3468 " 32 Byte: Format: %x "
3469 "Exception class %x\n",
3470 irb->ecw[6] & 0x0f, irb->ecw[22] >> 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003471 }
3472 } else {
Horst Hummel445b5b42006-06-29 14:57:52 +02003473 sprintf(page + len, KERN_ERR PRINTK_HEADER
3474 " SORRY - NO VALID SENSE AVAILABLE\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003475 }
Horst Hummel445b5b42006-06-29 14:57:52 +02003476 printk("%s", page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003477
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01003478 if (req) {
3479 /* req == NULL for unsolicited interrupts */
3480 /* dump the Channel Program (max 140 Bytes per line) */
3481 /* Count CCW and print first CCWs (maximum 1024 % 140 = 7) */
3482 first = req->cpaddr;
3483 for (last = first; last->flags & (CCW_FLAG_CC | CCW_FLAG_DC); last++);
3484 to = min(first + 6, last);
3485 len = sprintf(page, KERN_ERR PRINTK_HEADER
3486 " Related CP in req: %p\n", req);
3487 dasd_eckd_dump_ccw_range(first, to, page + len);
Horst Hummel445b5b42006-06-29 14:57:52 +02003488 printk("%s", page);
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01003489
3490 /* print failing CCW area (maximum 4) */
3491 /* scsw->cda is either valid or zero */
3492 len = 0;
3493 from = ++to;
Peter Oberparleiter23d805b2008-07-14 09:58:50 +02003494 fail = (struct ccw1 *)(addr_t)
3495 irb->scsw.cmd.cpa; /* failing CCW */
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01003496 if (from < fail - 2) {
3497 from = fail - 2; /* there is a gap - print header */
3498 len += sprintf(page, KERN_ERR PRINTK_HEADER "......\n");
3499 }
3500 to = min(fail + 1, last);
3501 len += dasd_eckd_dump_ccw_range(from, to, page + len);
3502
3503 /* print last CCWs (maximum 2) */
3504 from = max(from, ++to);
3505 if (from < last - 1) {
3506 from = last - 1; /* there is a gap - print header */
3507 len += sprintf(page + len, KERN_ERR PRINTK_HEADER "......\n");
3508 }
3509 len += dasd_eckd_dump_ccw_range(from, last, page + len);
3510 if (len > 0)
3511 printk("%s", page);
3512 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003513 free_page((unsigned long) page);
3514}
3515
Stefan Weinhuberf3eb5382009-03-26 15:23:48 +01003516
3517/*
3518 * Print sense data from a tcw.
3519 */
3520static void dasd_eckd_dump_sense_tcw(struct dasd_device *device,
3521 struct dasd_ccw_req *req, struct irb *irb)
3522{
3523 char *page;
3524 int len, sl, sct, residual;
Stefan Weinhuberf3eb5382009-03-26 15:23:48 +01003525 struct tsb *tsb;
Stefan Weinhuberef19298b2011-01-05 12:48:02 +01003526 u8 *sense, *rcq;
Stefan Weinhuberf3eb5382009-03-26 15:23:48 +01003527
3528 page = (char *) get_zeroed_page(GFP_ATOMIC);
3529 if (page == NULL) {
Stefan Haberlandfc19f382009-03-26 15:23:49 +01003530 DBF_DEV_EVENT(DBF_WARNING, device, " %s",
Stefan Weinhuberf3eb5382009-03-26 15:23:48 +01003531 "No memory to dump sense data");
3532 return;
3533 }
3534 /* dump the sense data */
3535 len = sprintf(page, KERN_ERR PRINTK_HEADER
3536 " I/O status report for device %s:\n",
3537 dev_name(&device->cdev->dev));
3538 len += sprintf(page + len, KERN_ERR PRINTK_HEADER
Stefan Weinhubera5a00612010-10-25 16:10:47 +02003539 " in req: %p CC:%02X FC:%02X AC:%02X SC:%02X DS:%02X "
3540 "CS:%02X fcxs:%02X schxs:%02X RC:%d\n",
3541 req, scsw_cc(&irb->scsw), scsw_fctl(&irb->scsw),
3542 scsw_actl(&irb->scsw), scsw_stctl(&irb->scsw),
3543 scsw_dstat(&irb->scsw), scsw_cstat(&irb->scsw),
3544 irb->scsw.tm.fcxs, irb->scsw.tm.schxs,
3545 req ? req->intrc : 0);
Stefan Weinhuberf3eb5382009-03-26 15:23:48 +01003546 len += sprintf(page + len, KERN_ERR PRINTK_HEADER
3547 " device %s: Failing TCW: %p\n",
3548 dev_name(&device->cdev->dev),
3549 (void *) (addr_t) irb->scsw.tm.tcw);
3550
3551 tsb = NULL;
3552 sense = NULL;
Stefan Weinhubera5a00612010-10-25 16:10:47 +02003553 if (irb->scsw.tm.tcw && (irb->scsw.tm.fcxs & 0x01))
Stefan Weinhuberf3eb5382009-03-26 15:23:48 +01003554 tsb = tcw_get_tsb(
3555 (struct tcw *)(unsigned long)irb->scsw.tm.tcw);
3556
Stefan Haberlandb8fde722010-03-24 11:49:54 +01003557 if (tsb) {
Stefan Weinhuberf3eb5382009-03-26 15:23:48 +01003558 len += sprintf(page + len, KERN_ERR PRINTK_HEADER
3559 " tsb->length %d\n", tsb->length);
3560 len += sprintf(page + len, KERN_ERR PRINTK_HEADER
3561 " tsb->flags %x\n", tsb->flags);
3562 len += sprintf(page + len, KERN_ERR PRINTK_HEADER
3563 " tsb->dcw_offset %d\n", tsb->dcw_offset);
3564 len += sprintf(page + len, KERN_ERR PRINTK_HEADER
3565 " tsb->count %d\n", tsb->count);
3566 residual = tsb->count - 28;
3567 len += sprintf(page + len, KERN_ERR PRINTK_HEADER
3568 " residual %d\n", residual);
3569
3570 switch (tsb->flags & 0x07) {
3571 case 1: /* tsa_iostat */
3572 len += sprintf(page + len, KERN_ERR PRINTK_HEADER
3573 " tsb->tsa.iostat.dev_time %d\n",
3574 tsb->tsa.iostat.dev_time);
3575 len += sprintf(page + len, KERN_ERR PRINTK_HEADER
3576 " tsb->tsa.iostat.def_time %d\n",
3577 tsb->tsa.iostat.def_time);
3578 len += sprintf(page + len, KERN_ERR PRINTK_HEADER
3579 " tsb->tsa.iostat.queue_time %d\n",
3580 tsb->tsa.iostat.queue_time);
3581 len += sprintf(page + len, KERN_ERR PRINTK_HEADER
3582 " tsb->tsa.iostat.dev_busy_time %d\n",
3583 tsb->tsa.iostat.dev_busy_time);
3584 len += sprintf(page + len, KERN_ERR PRINTK_HEADER
3585 " tsb->tsa.iostat.dev_act_time %d\n",
3586 tsb->tsa.iostat.dev_act_time);
3587 sense = tsb->tsa.iostat.sense;
3588 break;
3589 case 2: /* ts_ddpc */
3590 len += sprintf(page + len, KERN_ERR PRINTK_HEADER
3591 " tsb->tsa.ddpc.rc %d\n", tsb->tsa.ddpc.rc);
Stefan Weinhuberef19298b2011-01-05 12:48:02 +01003592 for (sl = 0; sl < 2; sl++) {
3593 len += sprintf(page + len,
3594 KERN_ERR PRINTK_HEADER
3595 " tsb->tsa.ddpc.rcq %2d-%2d: ",
3596 (8 * sl), ((8 * sl) + 7));
3597 rcq = tsb->tsa.ddpc.rcq;
Stefan Weinhuberf3eb5382009-03-26 15:23:48 +01003598 for (sct = 0; sct < 8; sct++) {
3599 len += sprintf(page + len, " %02x",
Stefan Weinhuberef19298b2011-01-05 12:48:02 +01003600 rcq[8 * sl + sct]);
Stefan Weinhuberf3eb5382009-03-26 15:23:48 +01003601 }
3602 len += sprintf(page + len, "\n");
3603 }
3604 sense = tsb->tsa.ddpc.sense;
3605 break;
3606 case 3: /* tsa_intrg */
3607 len += sprintf(page + len, KERN_ERR PRINTK_HEADER
3608 " tsb->tsa.intrg.: not supportet yet \n");
3609 break;
3610 }
3611
3612 if (sense) {
3613 for (sl = 0; sl < 4; sl++) {
3614 len += sprintf(page + len,
3615 KERN_ERR PRINTK_HEADER
3616 " Sense(hex) %2d-%2d:",
3617 (8 * sl), ((8 * sl) + 7));
3618 for (sct = 0; sct < 8; sct++) {
3619 len += sprintf(page + len, " %02x",
3620 sense[8 * sl + sct]);
3621 }
3622 len += sprintf(page + len, "\n");
3623 }
3624
3625 if (sense[27] & DASD_SENSE_BIT_0) {
3626 /* 24 Byte Sense Data */
3627 sprintf(page + len, KERN_ERR PRINTK_HEADER
3628 " 24 Byte: %x MSG %x, "
3629 "%s MSGb to SYSOP\n",
3630 sense[7] >> 4, sense[7] & 0x0f,
3631 sense[1] & 0x10 ? "" : "no");
3632 } else {
3633 /* 32 Byte Sense Data */
3634 sprintf(page + len, KERN_ERR PRINTK_HEADER
3635 " 32 Byte: Format: %x "
3636 "Exception class %x\n",
3637 sense[6] & 0x0f, sense[22] >> 4);
3638 }
3639 } else {
3640 sprintf(page + len, KERN_ERR PRINTK_HEADER
3641 " SORRY - NO VALID SENSE AVAILABLE\n");
3642 }
3643 } else {
3644 sprintf(page + len, KERN_ERR PRINTK_HEADER
3645 " SORRY - NO TSB DATA AVAILABLE\n");
3646 }
3647 printk("%s", page);
3648 free_page((unsigned long) page);
3649}
3650
3651static void dasd_eckd_dump_sense(struct dasd_device *device,
3652 struct dasd_ccw_req *req, struct irb *irb)
3653{
Stefan Weinhubera5a00612010-10-25 16:10:47 +02003654 if (scsw_is_tm(&irb->scsw))
Stefan Weinhuberf3eb5382009-03-26 15:23:48 +01003655 dasd_eckd_dump_sense_tcw(device, req, irb);
3656 else
3657 dasd_eckd_dump_sense_ccw(device, req, irb);
3658}
3659
Stefan Haberland501183f2010-05-17 10:00:10 +02003660static int dasd_eckd_pm_freeze(struct dasd_device *device)
Stefan Haberlandd41dd122009-06-16 10:30:25 +02003661{
3662 /*
3663 * the device should be disconnected from our LCU structure
3664 * on restore we will reconnect it and reread LCU specific
3665 * information like PAV support that might have changed
3666 */
3667 dasd_alias_remove_device(device);
3668 dasd_alias_disconnect_device_from_lcu(device);
3669
3670 return 0;
3671}
3672
Stefan Haberland501183f2010-05-17 10:00:10 +02003673static int dasd_eckd_restore_device(struct dasd_device *device)
Stefan Haberlandd41dd122009-06-16 10:30:25 +02003674{
3675 struct dasd_eckd_private *private;
Stefan Haberland6fca97a2009-10-06 10:34:15 +02003676 struct dasd_eckd_characteristics temp_rdc_data;
Stefan Haberlandd41dd122009-06-16 10:30:25 +02003677 int is_known, rc;
3678 struct dasd_uid temp_uid;
Stefan Haberlanda7602f62009-10-14 12:43:46 +02003679 unsigned long flags;
Stefan Haberlandd41dd122009-06-16 10:30:25 +02003680
Stefan Haberlandd41dd122009-06-16 10:30:25 +02003681 private = (struct dasd_eckd_private *) device->private;
3682
3683 /* Read Configuration Data */
3684 rc = dasd_eckd_read_conf(device);
3685 if (rc)
3686 goto out_err;
3687
Stefan Haberland2dedf0d2010-05-17 10:00:11 +02003688 dasd_eckd_get_uid(device, &temp_uid);
3689 /* Generate device unique id */
3690 rc = dasd_eckd_generate_uid(device);
3691 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
Stefan Haberlandd41dd122009-06-16 10:30:25 +02003692 if (memcmp(&private->uid, &temp_uid, sizeof(struct dasd_uid)) != 0)
Stefan Haberlanda7602f62009-10-14 12:43:46 +02003693 dev_err(&device->cdev->dev, "The UID of the DASD has "
3694 "changed\n");
Stefan Haberland2dedf0d2010-05-17 10:00:11 +02003695 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
Stefan Haberlandd41dd122009-06-16 10:30:25 +02003696 if (rc)
3697 goto out_err;
Stefan Haberlandd41dd122009-06-16 10:30:25 +02003698
3699 /* register lcu with alias handling, enable PAV if this is a new lcu */
3700 is_known = dasd_alias_make_device_known_to_lcu(device);
3701 if (is_known < 0)
3702 return is_known;
3703 if (!is_known) {
Stefan Weinhuberf4ac1d02009-12-07 12:51:53 +01003704 dasd_eckd_validate_server(device);
3705 dasd_alias_lcu_setup_complete(device);
3706 } else
3707 dasd_alias_wait_for_lcu_setup(device);
3708
3709 /* RE-Read Configuration Data */
3710 rc = dasd_eckd_read_conf(device);
3711 if (rc)
3712 goto out_err;
Stefan Haberlandd41dd122009-06-16 10:30:25 +02003713
3714 /* Read Feature Codes */
Stefan Weinhuber68d1e5f2009-09-22 22:58:52 +02003715 dasd_eckd_read_features(device);
Stefan Haberlandd41dd122009-06-16 10:30:25 +02003716
3717 /* Read Device Characteristics */
Stefan Haberland68b781f2009-09-11 10:28:29 +02003718 rc = dasd_generic_read_dev_chars(device, DASD_ECKD_MAGIC,
Stefan Haberland6fca97a2009-10-06 10:34:15 +02003719 &temp_rdc_data, 64);
Stefan Haberlandd41dd122009-06-16 10:30:25 +02003720 if (rc) {
Stefan Haberlandb8ed5dd2009-12-07 12:51:52 +01003721 DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
3722 "Read device characteristic failed, rc=%d", rc);
Stefan Haberlandd41dd122009-06-16 10:30:25 +02003723 goto out_err;
3724 }
Stefan Haberlanda7602f62009-10-14 12:43:46 +02003725 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
Stefan Haberland6fca97a2009-10-06 10:34:15 +02003726 memcpy(&private->rdc_data, &temp_rdc_data, sizeof(temp_rdc_data));
Stefan Haberlanda7602f62009-10-14 12:43:46 +02003727 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
Stefan Haberlandd41dd122009-06-16 10:30:25 +02003728
3729 /* add device to alias management */
3730 dasd_alias_add_device(device);
3731
3732 return 0;
3733
3734out_err:
Stefan Haberlande6125fb2009-06-22 12:08:17 +02003735 return -1;
Stefan Haberlandd41dd122009-06-16 10:30:25 +02003736}
3737
Stefan Haberland501183f2010-05-17 10:00:10 +02003738static int dasd_eckd_reload_device(struct dasd_device *device)
3739{
3740 struct dasd_eckd_private *private;
3741 int rc, old_base;
Stefan Haberland2dedf0d2010-05-17 10:00:11 +02003742 char print_uid[60];
3743 struct dasd_uid uid;
3744 unsigned long flags;
Stefan Haberland501183f2010-05-17 10:00:10 +02003745
3746 private = (struct dasd_eckd_private *) device->private;
Stefan Haberland2dedf0d2010-05-17 10:00:11 +02003747
3748 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
Stefan Haberland501183f2010-05-17 10:00:10 +02003749 old_base = private->uid.base_unit_addr;
Stefan Haberland2dedf0d2010-05-17 10:00:11 +02003750 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
3751
Stefan Haberland501183f2010-05-17 10:00:10 +02003752 /* Read Configuration Data */
3753 rc = dasd_eckd_read_conf(device);
3754 if (rc)
3755 goto out_err;
3756
Stefan Haberland2dedf0d2010-05-17 10:00:11 +02003757 rc = dasd_eckd_generate_uid(device);
Stefan Haberland501183f2010-05-17 10:00:10 +02003758 if (rc)
3759 goto out_err;
Stefan Haberland501183f2010-05-17 10:00:10 +02003760 /*
3761 * update unit address configuration and
3762 * add device to alias management
3763 */
3764 dasd_alias_update_add_device(device);
3765
Stefan Haberland2dedf0d2010-05-17 10:00:11 +02003766 dasd_eckd_get_uid(device, &uid);
3767
3768 if (old_base != uid.base_unit_addr) {
3769 if (strlen(uid.vduit) > 0)
3770 snprintf(print_uid, sizeof(print_uid),
3771 "%s.%s.%04x.%02x.%s", uid.vendor, uid.serial,
3772 uid.ssid, uid.base_unit_addr, uid.vduit);
Stefan Haberland501183f2010-05-17 10:00:10 +02003773 else
Stefan Haberland2dedf0d2010-05-17 10:00:11 +02003774 snprintf(print_uid, sizeof(print_uid),
3775 "%s.%s.%04x.%02x", uid.vendor, uid.serial,
3776 uid.ssid, uid.base_unit_addr);
Stefan Haberland501183f2010-05-17 10:00:10 +02003777
3778 dev_info(&device->cdev->dev,
3779 "An Alias device was reassigned to a new base device "
Stefan Haberland2dedf0d2010-05-17 10:00:11 +02003780 "with UID: %s\n", print_uid);
Stefan Haberland501183f2010-05-17 10:00:10 +02003781 }
3782 return 0;
3783
3784out_err:
3785 return -1;
3786}
3787
Stefan Haberlandd41dd122009-06-16 10:30:25 +02003788static struct ccw_driver dasd_eckd_driver = {
3789 .name = "dasd-eckd",
3790 .owner = THIS_MODULE,
3791 .ids = dasd_eckd_ids,
3792 .probe = dasd_eckd_probe,
3793 .remove = dasd_generic_remove,
3794 .set_offline = dasd_generic_set_offline,
3795 .set_online = dasd_eckd_set_online,
3796 .notify = dasd_generic_notify,
Stefan Weinhubera4d26c62011-01-05 12:48:03 +01003797 .path_event = dasd_generic_path_event,
Stefan Haberlandd41dd122009-06-16 10:30:25 +02003798 .freeze = dasd_generic_pm_freeze,
3799 .thaw = dasd_generic_restore_device,
3800 .restore = dasd_generic_restore_device,
Stefan Haberlanda23ed002010-05-26 23:27:09 +02003801 .uc_handler = dasd_generic_uc_handler,
Stefan Haberlandd41dd122009-06-16 10:30:25 +02003802};
Stefan Weinhuberf3eb5382009-03-26 15:23:48 +01003803
Linus Torvalds1da177e2005-04-16 15:20:36 -07003804/*
3805 * max_blocks is dependent on the amount of storage that is available
3806 * in the static io buffer for each device. Currently each device has
3807 * 8192 bytes (=2 pages). For 64 bit one dasd_mchunkt_t structure has
3808 * 24 bytes, the struct dasd_ccw_req has 136 bytes and each block can use
3809 * up to 16 bytes (8 for the ccw and 8 for the idal pointer). In
3810 * addition we have one define extent ccw + 16 bytes of data and one
3811 * locate record ccw + 16 bytes of data. That makes:
3812 * (8192 - 24 - 136 - 8 - 16 - 8 - 16) / 16 = 499 blocks at maximum.
3813 * We want to fit two into the available memory so that we can immediately
3814 * start the next request if one finishes off. That makes 249.5 blocks
3815 * for one request. Give a little safety and the result is 240.
3816 */
3817static struct dasd_discipline dasd_eckd_discipline = {
3818 .owner = THIS_MODULE,
3819 .name = "ECKD",
3820 .ebcname = "ECKD",
Stefan Weinhuberef19298b2011-01-05 12:48:02 +01003821 .max_blocks = 190,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003822 .check_device = dasd_eckd_check_characteristics,
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01003823 .uncheck_device = dasd_eckd_uncheck_device,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003824 .do_analysis = dasd_eckd_do_analysis,
Stefan Weinhubera4d26c62011-01-05 12:48:03 +01003825 .verify_path = dasd_eckd_verify_path,
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01003826 .ready_to_online = dasd_eckd_ready_to_online,
3827 .online_to_ready = dasd_eckd_online_to_ready,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003828 .fill_geometry = dasd_eckd_fill_geometry,
3829 .start_IO = dasd_start_IO,
3830 .term_IO = dasd_term_IO,
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01003831 .handle_terminated_request = dasd_eckd_handle_terminated_request,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003832 .format_device = dasd_eckd_format_device,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003833 .erp_action = dasd_eckd_erp_action,
3834 .erp_postaction = dasd_eckd_erp_postaction,
Stefan Weinhuber8e09f212008-01-26 14:11:23 +01003835 .handle_unsolicited_interrupt = dasd_eckd_handle_unsolicited_interrupt,
3836 .build_cp = dasd_eckd_build_alias_cp,
3837 .free_cp = dasd_eckd_free_alias_cp,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003838 .dump_sense = dasd_eckd_dump_sense,
Stefan Haberlandfc19f382009-03-26 15:23:49 +01003839 .dump_sense_dbf = dasd_eckd_dump_sense_dbf,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003840 .fill_info = dasd_eckd_fill_info,
Christoph Hellwig1107ccf2006-03-24 03:15:20 -08003841 .ioctl = dasd_eckd_ioctl,
Stefan Haberlandd41dd122009-06-16 10:30:25 +02003842 .freeze = dasd_eckd_pm_freeze,
3843 .restore = dasd_eckd_restore_device,
Stefan Haberland501183f2010-05-17 10:00:10 +02003844 .reload = dasd_eckd_reload_device,
Stefan Haberland2dedf0d2010-05-17 10:00:11 +02003845 .get_uid = dasd_eckd_get_uid,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003846};
3847
3848static int __init
3849dasd_eckd_init(void)
3850{
Sebastian Ott736e6ea2009-06-12 10:26:38 +02003851 int ret;
3852
Linus Torvalds1da177e2005-04-16 15:20:36 -07003853 ASCEBC(dasd_eckd_discipline.ebcname, 4);
Stefan Weinhuberf932bce2010-08-09 18:12:59 +02003854 dasd_reserve_req = kmalloc(sizeof(*dasd_reserve_req),
3855 GFP_KERNEL | GFP_DMA);
3856 if (!dasd_reserve_req)
3857 return -ENOMEM;
Stefan Weinhubera4d26c62011-01-05 12:48:03 +01003858 path_verification_worker = kmalloc(sizeof(*path_verification_worker),
3859 GFP_KERNEL | GFP_DMA);
3860 if (!path_verification_worker) {
3861 kfree(dasd_reserve_req);
3862 return -ENOMEM;
3863 }
Sebastian Ott736e6ea2009-06-12 10:26:38 +02003864 ret = ccw_driver_register(&dasd_eckd_driver);
3865 if (!ret)
3866 wait_for_device_probe();
Stefan Weinhubera4d26c62011-01-05 12:48:03 +01003867 else {
3868 kfree(path_verification_worker);
Stefan Weinhuberf932bce2010-08-09 18:12:59 +02003869 kfree(dasd_reserve_req);
Stefan Weinhubera4d26c62011-01-05 12:48:03 +01003870 }
Sebastian Ott736e6ea2009-06-12 10:26:38 +02003871 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003872}
3873
3874static void __exit
3875dasd_eckd_cleanup(void)
3876{
3877 ccw_driver_unregister(&dasd_eckd_driver);
Stefan Weinhubera4d26c62011-01-05 12:48:03 +01003878 kfree(path_verification_worker);
Stefan Weinhuberf932bce2010-08-09 18:12:59 +02003879 kfree(dasd_reserve_req);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003880}
3881
3882module_init(dasd_eckd_init);
3883module_exit(dasd_eckd_cleanup);