Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
Peter Oberparleiter | 9679baa | 2009-12-07 12:51:27 +0100 | [diff] [blame] | 2 | * CCW device PGID and path verification I/O handling. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3 | * |
Heiko Carstens | a53c8fa | 2012-07-20 11:15:04 +0200 | [diff] [blame] | 4 | * Copyright IBM Corp. 2002, 2009 |
Peter Oberparleiter | 9679baa | 2009-12-07 12:51:27 +0100 | [diff] [blame] | 5 | * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com> |
| 6 | * Martin Schwidefsky <schwidefsky@de.ibm.com> |
| 7 | * Peter Oberparleiter <peter.oberparleiter@de.ibm.com> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8 | */ |
| 9 | |
Peter Oberparleiter | 9679baa | 2009-12-07 12:51:27 +0100 | [diff] [blame] | 10 | #include <linux/kernel.h> |
| 11 | #include <linux/string.h> |
Sebastian Ott | ef12cb9 | 2015-10-26 12:38:13 +0100 | [diff] [blame] | 12 | #include <linux/bitops.h> |
Peter Oberparleiter | 9679baa | 2009-12-07 12:51:27 +0100 | [diff] [blame] | 13 | #include <linux/types.h> |
| 14 | #include <linux/errno.h> |
Sebastian Ott | ef12cb9 | 2015-10-26 12:38:13 +0100 | [diff] [blame] | 15 | #include <linux/slab.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 16 | #include <asm/ccwdev.h> |
| 17 | #include <asm/cio.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 18 | |
| 19 | #include "cio.h" |
| 20 | #include "cio_debug.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 21 | #include "device.h" |
Cornelia Huck | cd6b4f2 | 2008-01-26 14:10:43 +0100 | [diff] [blame] | 22 | #include "io_sch.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 23 | |
Peter Oberparleiter | de1b043 | 2009-12-07 12:51:40 +0100 | [diff] [blame] | 24 | #define PGID_RETRIES 256 |
Peter Oberparleiter | 9679baa | 2009-12-07 12:51:27 +0100 | [diff] [blame] | 25 | #define PGID_TIMEOUT (10 * HZ) |
Stefan Bader | 7b7db1b | 2006-08-30 14:33:39 +0200 | [diff] [blame] | 26 | |
Sebastian Ott | 88e7616 | 2013-01-28 19:32:56 +0100 | [diff] [blame] | 27 | static void verify_start(struct ccw_device *cdev); |
| 28 | |
Stefan Bader | 7b7db1b | 2006-08-30 14:33:39 +0200 | [diff] [blame] | 29 | /* |
Peter Oberparleiter | 9679baa | 2009-12-07 12:51:27 +0100 | [diff] [blame] | 30 | * Process path verification data and report result. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 31 | */ |
Peter Oberparleiter | 9679baa | 2009-12-07 12:51:27 +0100 | [diff] [blame] | 32 | static void verify_done(struct ccw_device *cdev, int rc) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 33 | { |
Cornelia Huck | 6810a2b | 2006-01-06 00:19:13 -0800 | [diff] [blame] | 34 | struct subchannel *sch = to_subchannel(cdev->dev.parent); |
Peter Oberparleiter | 9679baa | 2009-12-07 12:51:27 +0100 | [diff] [blame] | 35 | struct ccw_dev_id *id = &cdev->private->dev_id; |
Peter Oberparleiter | 454e1fa | 2009-12-07 12:51:30 +0100 | [diff] [blame] | 36 | int mpath = cdev->private->flags.mpath; |
| 37 | int pgroup = cdev->private->flags.pgroup; |
Cornelia Huck | 6810a2b | 2006-01-06 00:19:13 -0800 | [diff] [blame] | 38 | |
Peter Oberparleiter | 9679baa | 2009-12-07 12:51:27 +0100 | [diff] [blame] | 39 | if (rc) |
| 40 | goto out; |
| 41 | /* Ensure consistent multipathing state at device and channel. */ |
| 42 | if (sch->config.mp != mpath) { |
| 43 | sch->config.mp = mpath; |
| 44 | rc = cio_commit_config(sch); |
Cornelia Huck | 6810a2b | 2006-01-06 00:19:13 -0800 | [diff] [blame] | 45 | } |
Peter Oberparleiter | 9679baa | 2009-12-07 12:51:27 +0100 | [diff] [blame] | 46 | out: |
| 47 | CIO_MSG_EVENT(2, "vrfy: device 0.%x.%04x: rc=%d pgroup=%d mpath=%d " |
| 48 | "vpm=%02x\n", id->ssid, id->devno, rc, pgroup, mpath, |
| 49 | sch->vpm); |
| 50 | ccw_device_verify_done(cdev, rc); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 51 | } |
| 52 | |
| 53 | /* |
Peter Oberparleiter | 9679baa | 2009-12-07 12:51:27 +0100 | [diff] [blame] | 54 | * Create channel program to perform a NOOP. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 55 | */ |
Peter Oberparleiter | 9679baa | 2009-12-07 12:51:27 +0100 | [diff] [blame] | 56 | static void nop_build_cp(struct ccw_device *cdev) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 57 | { |
Peter Oberparleiter | 9679baa | 2009-12-07 12:51:27 +0100 | [diff] [blame] | 58 | struct ccw_request *req = &cdev->private->req; |
| 59 | struct ccw1 *cp = cdev->private->iccws; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 60 | |
Peter Oberparleiter | 9679baa | 2009-12-07 12:51:27 +0100 | [diff] [blame] | 61 | cp->cmd_code = CCW_CMD_NOOP; |
| 62 | cp->cda = 0; |
| 63 | cp->count = 0; |
| 64 | cp->flags = CCW_FLAG_SLI; |
| 65 | req->cp = cp; |
| 66 | } |
Stefan Bader | 7b7db1b | 2006-08-30 14:33:39 +0200 | [diff] [blame] | 67 | |
Peter Oberparleiter | 9679baa | 2009-12-07 12:51:27 +0100 | [diff] [blame] | 68 | /* |
| 69 | * Perform NOOP on a single path. |
| 70 | */ |
| 71 | static void nop_do(struct ccw_device *cdev) |
| 72 | { |
| 73 | struct subchannel *sch = to_subchannel(cdev->dev.parent); |
| 74 | struct ccw_request *req = &cdev->private->req; |
| 75 | |
Sebastian Ott | e6a0b7c | 2013-01-28 19:31:50 +0100 | [diff] [blame] | 76 | req->lpm = lpm_adjust(req->lpm, sch->schib.pmcw.pam & sch->opm & |
| 77 | ~cdev->private->path_noirq_mask); |
Peter Oberparleiter | 9679baa | 2009-12-07 12:51:27 +0100 | [diff] [blame] | 78 | if (!req->lpm) |
| 79 | goto out_nopath; |
| 80 | nop_build_cp(cdev); |
| 81 | ccw_request_start(cdev); |
| 82 | return; |
| 83 | |
| 84 | out_nopath: |
| 85 | verify_done(cdev, sch->vpm ? 0 : -EACCES); |
| 86 | } |
| 87 | |
| 88 | /* |
| 89 | * Adjust NOOP I/O status. |
| 90 | */ |
| 91 | static enum io_status nop_filter(struct ccw_device *cdev, void *data, |
| 92 | struct irb *irb, enum io_status status) |
| 93 | { |
| 94 | /* Only subchannel status might indicate a path error. */ |
| 95 | if (status == IO_STATUS_ERROR && irb->scsw.cmd.cstat == 0) |
| 96 | return IO_DONE; |
| 97 | return status; |
| 98 | } |
| 99 | |
| 100 | /* |
| 101 | * Process NOOP request result for a single path. |
| 102 | */ |
| 103 | static void nop_callback(struct ccw_device *cdev, void *data, int rc) |
| 104 | { |
| 105 | struct subchannel *sch = to_subchannel(cdev->dev.parent); |
| 106 | struct ccw_request *req = &cdev->private->req; |
| 107 | |
Sebastian Ott | 69f5576 | 2013-01-28 19:29:43 +0100 | [diff] [blame] | 108 | switch (rc) { |
| 109 | case 0: |
Peter Oberparleiter | 9679baa | 2009-12-07 12:51:27 +0100 | [diff] [blame] | 110 | sch->vpm |= req->lpm; |
Sebastian Ott | 69f5576 | 2013-01-28 19:29:43 +0100 | [diff] [blame] | 111 | break; |
| 112 | case -ETIME: |
| 113 | cdev->private->path_noirq_mask |= req->lpm; |
| 114 | break; |
| 115 | case -EACCES: |
| 116 | cdev->private->path_notoper_mask |= req->lpm; |
| 117 | break; |
| 118 | default: |
Peter Oberparleiter | 9679baa | 2009-12-07 12:51:27 +0100 | [diff] [blame] | 119 | goto err; |
Sebastian Ott | 69f5576 | 2013-01-28 19:29:43 +0100 | [diff] [blame] | 120 | } |
| 121 | /* Continue on the next path. */ |
Peter Oberparleiter | 9679baa | 2009-12-07 12:51:27 +0100 | [diff] [blame] | 122 | req->lpm >>= 1; |
| 123 | nop_do(cdev); |
| 124 | return; |
| 125 | |
| 126 | err: |
| 127 | verify_done(cdev, rc); |
| 128 | } |
| 129 | |
| 130 | /* |
| 131 | * Create channel program to perform SET PGID on a single path. |
| 132 | */ |
| 133 | static void spid_build_cp(struct ccw_device *cdev, u8 fn) |
| 134 | { |
| 135 | struct ccw_request *req = &cdev->private->req; |
| 136 | struct ccw1 *cp = cdev->private->iccws; |
Sebastian Ott | 9d49f86 | 2015-09-21 18:40:33 +0200 | [diff] [blame] | 137 | int i = pathmask_to_pos(req->lpm); |
Peter Oberparleiter | 9679baa | 2009-12-07 12:51:27 +0100 | [diff] [blame] | 138 | struct pgid *pgid = &cdev->private->pgid[i]; |
| 139 | |
| 140 | pgid->inf.fc = fn; |
| 141 | cp->cmd_code = CCW_CMD_SET_PGID; |
| 142 | cp->cda = (u32) (addr_t) pgid; |
| 143 | cp->count = sizeof(*pgid); |
| 144 | cp->flags = CCW_FLAG_SLI; |
| 145 | req->cp = cp; |
| 146 | } |
| 147 | |
Sebastian Ott | 88e7616 | 2013-01-28 19:32:56 +0100 | [diff] [blame] | 148 | static void pgid_wipeout_callback(struct ccw_device *cdev, void *data, int rc) |
| 149 | { |
| 150 | if (rc) { |
| 151 | /* We don't know the path groups' state. Abort. */ |
| 152 | verify_done(cdev, rc); |
| 153 | return; |
| 154 | } |
| 155 | /* |
| 156 | * Path groups have been reset. Restart path verification but |
| 157 | * leave paths in path_noirq_mask out. |
| 158 | */ |
| 159 | cdev->private->flags.pgid_unknown = 0; |
| 160 | verify_start(cdev); |
| 161 | } |
| 162 | |
| 163 | /* |
| 164 | * Reset pathgroups and restart path verification, leave unusable paths out. |
| 165 | */ |
| 166 | static void pgid_wipeout_start(struct ccw_device *cdev) |
| 167 | { |
| 168 | struct subchannel *sch = to_subchannel(cdev->dev.parent); |
| 169 | struct ccw_dev_id *id = &cdev->private->dev_id; |
| 170 | struct ccw_request *req = &cdev->private->req; |
| 171 | u8 fn; |
| 172 | |
| 173 | CIO_MSG_EVENT(2, "wipe: device 0.%x.%04x: pvm=%02x nim=%02x\n", |
| 174 | id->ssid, id->devno, cdev->private->pgid_valid_mask, |
| 175 | cdev->private->path_noirq_mask); |
| 176 | |
| 177 | /* Initialize request data. */ |
| 178 | memset(req, 0, sizeof(*req)); |
| 179 | req->timeout = PGID_TIMEOUT; |
| 180 | req->maxretries = PGID_RETRIES; |
| 181 | req->lpm = sch->schib.pmcw.pam; |
| 182 | req->callback = pgid_wipeout_callback; |
| 183 | fn = SPID_FUNC_DISBAND; |
| 184 | if (cdev->private->flags.mpath) |
| 185 | fn |= SPID_FUNC_MULTI_PATH; |
| 186 | spid_build_cp(cdev, fn); |
| 187 | ccw_request_start(cdev); |
| 188 | } |
| 189 | |
Peter Oberparleiter | 9679baa | 2009-12-07 12:51:27 +0100 | [diff] [blame] | 190 | /* |
| 191 | * Perform establish/resign SET PGID on a single path. |
| 192 | */ |
| 193 | static void spid_do(struct ccw_device *cdev) |
| 194 | { |
| 195 | struct subchannel *sch = to_subchannel(cdev->dev.parent); |
| 196 | struct ccw_request *req = &cdev->private->req; |
| 197 | u8 fn; |
| 198 | |
Peter Oberparleiter | 52ef060 | 2009-12-07 12:51:31 +0100 | [diff] [blame] | 199 | /* Use next available path that is not already in correct state. */ |
Peter Oberparleiter | d302e1a | 2009-12-18 17:43:23 +0100 | [diff] [blame] | 200 | req->lpm = lpm_adjust(req->lpm, cdev->private->pgid_todo_mask); |
Peter Oberparleiter | 9679baa | 2009-12-07 12:51:27 +0100 | [diff] [blame] | 201 | if (!req->lpm) |
| 202 | goto out_nopath; |
| 203 | /* Channel program setup. */ |
| 204 | if (req->lpm & sch->opm) |
| 205 | fn = SPID_FUNC_ESTABLISH; |
| 206 | else |
| 207 | fn = SPID_FUNC_RESIGN; |
Peter Oberparleiter | 454e1fa | 2009-12-07 12:51:30 +0100 | [diff] [blame] | 208 | if (cdev->private->flags.mpath) |
Peter Oberparleiter | 9679baa | 2009-12-07 12:51:27 +0100 | [diff] [blame] | 209 | fn |= SPID_FUNC_MULTI_PATH; |
| 210 | spid_build_cp(cdev, fn); |
| 211 | ccw_request_start(cdev); |
| 212 | return; |
| 213 | |
| 214 | out_nopath: |
Sebastian Ott | 88e7616 | 2013-01-28 19:32:56 +0100 | [diff] [blame] | 215 | if (cdev->private->flags.pgid_unknown) { |
| 216 | /* At least one SPID could be partially done. */ |
| 217 | pgid_wipeout_start(cdev); |
| 218 | return; |
| 219 | } |
Peter Oberparleiter | 9679baa | 2009-12-07 12:51:27 +0100 | [diff] [blame] | 220 | verify_done(cdev, sch->vpm ? 0 : -EACCES); |
| 221 | } |
| 222 | |
Peter Oberparleiter | 9679baa | 2009-12-07 12:51:27 +0100 | [diff] [blame] | 223 | /* |
| 224 | * Process SET PGID request result for a single path. |
| 225 | */ |
| 226 | static void spid_callback(struct ccw_device *cdev, void *data, int rc) |
| 227 | { |
| 228 | struct subchannel *sch = to_subchannel(cdev->dev.parent); |
| 229 | struct ccw_request *req = &cdev->private->req; |
| 230 | |
| 231 | switch (rc) { |
| 232 | case 0: |
| 233 | sch->vpm |= req->lpm & sch->opm; |
| 234 | break; |
Sebastian Ott | 69f5576 | 2013-01-28 19:29:43 +0100 | [diff] [blame] | 235 | case -ETIME: |
| 236 | cdev->private->flags.pgid_unknown = 1; |
| 237 | cdev->private->path_noirq_mask |= req->lpm; |
| 238 | break; |
Peter Oberparleiter | 9679baa | 2009-12-07 12:51:27 +0100 | [diff] [blame] | 239 | case -EACCES: |
Sebastian Ott | 69f5576 | 2013-01-28 19:29:43 +0100 | [diff] [blame] | 240 | cdev->private->path_notoper_mask |= req->lpm; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 241 | break; |
| 242 | case -EOPNOTSUPP: |
Peter Oberparleiter | 454e1fa | 2009-12-07 12:51:30 +0100 | [diff] [blame] | 243 | if (cdev->private->flags.mpath) { |
Peter Oberparleiter | 9679baa | 2009-12-07 12:51:27 +0100 | [diff] [blame] | 244 | /* Try without multipathing. */ |
Peter Oberparleiter | 454e1fa | 2009-12-07 12:51:30 +0100 | [diff] [blame] | 245 | cdev->private->flags.mpath = 0; |
Peter Oberparleiter | 9679baa | 2009-12-07 12:51:27 +0100 | [diff] [blame] | 246 | goto out_restart; |
| 247 | } |
| 248 | /* Try without pathgrouping. */ |
Peter Oberparleiter | 454e1fa | 2009-12-07 12:51:30 +0100 | [diff] [blame] | 249 | cdev->private->flags.pgroup = 0; |
Peter Oberparleiter | 9679baa | 2009-12-07 12:51:27 +0100 | [diff] [blame] | 250 | goto out_restart; |
| 251 | default: |
| 252 | goto err; |
| 253 | } |
| 254 | req->lpm >>= 1; |
| 255 | spid_do(cdev); |
| 256 | return; |
| 257 | |
| 258 | out_restart: |
| 259 | verify_start(cdev); |
| 260 | return; |
| 261 | err: |
| 262 | verify_done(cdev, rc); |
| 263 | } |
| 264 | |
Peter Oberparleiter | 52ef060 | 2009-12-07 12:51:31 +0100 | [diff] [blame] | 265 | static void spid_start(struct ccw_device *cdev) |
| 266 | { |
| 267 | struct ccw_request *req = &cdev->private->req; |
| 268 | |
| 269 | /* Initialize request data. */ |
| 270 | memset(req, 0, sizeof(*req)); |
| 271 | req->timeout = PGID_TIMEOUT; |
| 272 | req->maxretries = PGID_RETRIES; |
| 273 | req->lpm = 0x80; |
Sebastian Ott | 982bdf8 | 2010-08-09 18:12:53 +0200 | [diff] [blame] | 274 | req->singlepath = 1; |
Peter Oberparleiter | 52ef060 | 2009-12-07 12:51:31 +0100 | [diff] [blame] | 275 | req->callback = spid_callback; |
| 276 | spid_do(cdev); |
| 277 | } |
| 278 | |
Sebastian Ott | 585b954 | 2010-10-25 16:10:34 +0200 | [diff] [blame] | 279 | static int pgid_is_reset(struct pgid *p) |
| 280 | { |
| 281 | char *c; |
| 282 | |
| 283 | for (c = (char *)p + 1; c < (char *)(p + 1); c++) { |
| 284 | if (*c != 0) |
| 285 | return 0; |
| 286 | } |
| 287 | return 1; |
| 288 | } |
| 289 | |
Peter Oberparleiter | 9679baa | 2009-12-07 12:51:27 +0100 | [diff] [blame] | 290 | static int pgid_cmp(struct pgid *p1, struct pgid *p2) |
| 291 | { |
| 292 | return memcmp((char *) p1 + 1, (char *) p2 + 1, |
| 293 | sizeof(struct pgid) - 1); |
| 294 | } |
| 295 | |
| 296 | /* |
| 297 | * Determine pathgroup state from PGID data. |
| 298 | */ |
| 299 | static void pgid_analyze(struct ccw_device *cdev, struct pgid **p, |
Sebastian Ott | d99e79e | 2012-11-30 16:48:59 +0100 | [diff] [blame] | 300 | int *mismatch, u8 *reserved, u8 *reset) |
Peter Oberparleiter | 9679baa | 2009-12-07 12:51:27 +0100 | [diff] [blame] | 301 | { |
| 302 | struct pgid *pgid = &cdev->private->pgid[0]; |
| 303 | struct pgid *first = NULL; |
| 304 | int lpm; |
| 305 | int i; |
| 306 | |
| 307 | *mismatch = 0; |
| 308 | *reserved = 0; |
| 309 | *reset = 0; |
| 310 | for (i = 0, lpm = 0x80; i < 8; i++, pgid++, lpm >>= 1) { |
| 311 | if ((cdev->private->pgid_valid_mask & lpm) == 0) |
| 312 | continue; |
| 313 | if (pgid->inf.ps.state2 == SNID_STATE2_RESVD_ELSE) |
Sebastian Ott | d99e79e | 2012-11-30 16:48:59 +0100 | [diff] [blame] | 314 | *reserved |= lpm; |
Sebastian Ott | 585b954 | 2010-10-25 16:10:34 +0200 | [diff] [blame] | 315 | if (pgid_is_reset(pgid)) { |
| 316 | *reset |= lpm; |
Peter Oberparleiter | 9679baa | 2009-12-07 12:51:27 +0100 | [diff] [blame] | 317 | continue; |
| 318 | } |
| 319 | if (!first) { |
| 320 | first = pgid; |
| 321 | continue; |
| 322 | } |
| 323 | if (pgid_cmp(pgid, first) != 0) |
| 324 | *mismatch = 1; |
| 325 | } |
| 326 | if (!first) |
| 327 | first = &channel_subsystems[0]->global_pgid; |
| 328 | *p = first; |
| 329 | } |
| 330 | |
Peter Oberparleiter | d302e1a | 2009-12-18 17:43:23 +0100 | [diff] [blame] | 331 | static u8 pgid_to_donepm(struct ccw_device *cdev) |
Peter Oberparleiter | 52ef060 | 2009-12-07 12:51:31 +0100 | [diff] [blame] | 332 | { |
| 333 | struct subchannel *sch = to_subchannel(cdev->dev.parent); |
| 334 | struct pgid *pgid; |
| 335 | int i; |
| 336 | int lpm; |
Peter Oberparleiter | d302e1a | 2009-12-18 17:43:23 +0100 | [diff] [blame] | 337 | u8 donepm = 0; |
Peter Oberparleiter | 52ef060 | 2009-12-07 12:51:31 +0100 | [diff] [blame] | 338 | |
Peter Oberparleiter | d302e1a | 2009-12-18 17:43:23 +0100 | [diff] [blame] | 339 | /* Set bits for paths which are already in the target state. */ |
Peter Oberparleiter | 52ef060 | 2009-12-07 12:51:31 +0100 | [diff] [blame] | 340 | for (i = 0; i < 8; i++) { |
| 341 | lpm = 0x80 >> i; |
| 342 | if ((cdev->private->pgid_valid_mask & lpm) == 0) |
| 343 | continue; |
| 344 | pgid = &cdev->private->pgid[i]; |
| 345 | if (sch->opm & lpm) { |
| 346 | if (pgid->inf.ps.state1 != SNID_STATE1_GROUPED) |
| 347 | continue; |
| 348 | } else { |
| 349 | if (pgid->inf.ps.state1 != SNID_STATE1_UNGROUPED) |
| 350 | continue; |
| 351 | } |
| 352 | if (cdev->private->flags.mpath) { |
| 353 | if (pgid->inf.ps.state3 != SNID_STATE3_MULTI_PATH) |
| 354 | continue; |
| 355 | } else { |
| 356 | if (pgid->inf.ps.state3 != SNID_STATE3_SINGLE_PATH) |
| 357 | continue; |
| 358 | } |
Peter Oberparleiter | d302e1a | 2009-12-18 17:43:23 +0100 | [diff] [blame] | 359 | donepm |= lpm; |
Peter Oberparleiter | 52ef060 | 2009-12-07 12:51:31 +0100 | [diff] [blame] | 360 | } |
| 361 | |
Peter Oberparleiter | d302e1a | 2009-12-18 17:43:23 +0100 | [diff] [blame] | 362 | return donepm; |
Peter Oberparleiter | 52ef060 | 2009-12-07 12:51:31 +0100 | [diff] [blame] | 363 | } |
| 364 | |
Peter Oberparleiter | 9679baa | 2009-12-07 12:51:27 +0100 | [diff] [blame] | 365 | static void pgid_fill(struct ccw_device *cdev, struct pgid *pgid) |
| 366 | { |
| 367 | int i; |
| 368 | |
| 369 | for (i = 0; i < 8; i++) |
| 370 | memcpy(&cdev->private->pgid[i], pgid, sizeof(struct pgid)); |
| 371 | } |
| 372 | |
| 373 | /* |
| 374 | * Process SENSE PGID data and report result. |
| 375 | */ |
| 376 | static void snid_done(struct ccw_device *cdev, int rc) |
| 377 | { |
| 378 | struct ccw_dev_id *id = &cdev->private->dev_id; |
Peter Oberparleiter | 52ef060 | 2009-12-07 12:51:31 +0100 | [diff] [blame] | 379 | struct subchannel *sch = to_subchannel(cdev->dev.parent); |
Peter Oberparleiter | 9679baa | 2009-12-07 12:51:27 +0100 | [diff] [blame] | 380 | struct pgid *pgid; |
| 381 | int mismatch = 0; |
Sebastian Ott | d99e79e | 2012-11-30 16:48:59 +0100 | [diff] [blame] | 382 | u8 reserved = 0; |
Sebastian Ott | 585b954 | 2010-10-25 16:10:34 +0200 | [diff] [blame] | 383 | u8 reset = 0; |
Peter Oberparleiter | d302e1a | 2009-12-18 17:43:23 +0100 | [diff] [blame] | 384 | u8 donepm; |
Peter Oberparleiter | 9679baa | 2009-12-07 12:51:27 +0100 | [diff] [blame] | 385 | |
| 386 | if (rc) |
| 387 | goto out; |
| 388 | pgid_analyze(cdev, &pgid, &mismatch, &reserved, &reset); |
Sebastian Ott | d99e79e | 2012-11-30 16:48:59 +0100 | [diff] [blame] | 389 | if (reserved == cdev->private->pgid_valid_mask) |
Peter Oberparleiter | 9679baa | 2009-12-07 12:51:27 +0100 | [diff] [blame] | 390 | rc = -EUSERS; |
Peter Oberparleiter | 52ef060 | 2009-12-07 12:51:31 +0100 | [diff] [blame] | 391 | else if (mismatch) |
| 392 | rc = -EOPNOTSUPP; |
| 393 | else { |
Peter Oberparleiter | d302e1a | 2009-12-18 17:43:23 +0100 | [diff] [blame] | 394 | donepm = pgid_to_donepm(cdev); |
| 395 | sch->vpm = donepm & sch->opm; |
Sebastian Ott | 585b954 | 2010-10-25 16:10:34 +0200 | [diff] [blame] | 396 | cdev->private->pgid_reset_mask |= reset; |
Sebastian Ott | e6a0b7c | 2013-01-28 19:31:50 +0100 | [diff] [blame] | 397 | cdev->private->pgid_todo_mask &= |
| 398 | ~(donepm | cdev->private->path_noirq_mask); |
Peter Oberparleiter | 52ef060 | 2009-12-07 12:51:31 +0100 | [diff] [blame] | 399 | pgid_fill(cdev, pgid); |
| 400 | } |
Peter Oberparleiter | 9679baa | 2009-12-07 12:51:27 +0100 | [diff] [blame] | 401 | out: |
Peter Oberparleiter | 52ef060 | 2009-12-07 12:51:31 +0100 | [diff] [blame] | 402 | CIO_MSG_EVENT(2, "snid: device 0.%x.%04x: rc=%d pvm=%02x vpm=%02x " |
Sebastian Ott | d99e79e | 2012-11-30 16:48:59 +0100 | [diff] [blame] | 403 | "todo=%02x mism=%d rsvd=%02x reset=%02x\n", id->ssid, |
Peter Oberparleiter | d302e1a | 2009-12-18 17:43:23 +0100 | [diff] [blame] | 404 | id->devno, rc, cdev->private->pgid_valid_mask, sch->vpm, |
| 405 | cdev->private->pgid_todo_mask, mismatch, reserved, reset); |
Peter Oberparleiter | 52ef060 | 2009-12-07 12:51:31 +0100 | [diff] [blame] | 406 | switch (rc) { |
| 407 | case 0: |
Sebastian Ott | 88e7616 | 2013-01-28 19:32:56 +0100 | [diff] [blame] | 408 | if (cdev->private->flags.pgid_unknown) { |
| 409 | pgid_wipeout_start(cdev); |
| 410 | return; |
| 411 | } |
Peter Oberparleiter | 52ef060 | 2009-12-07 12:51:31 +0100 | [diff] [blame] | 412 | /* Anything left to do? */ |
Peter Oberparleiter | d302e1a | 2009-12-18 17:43:23 +0100 | [diff] [blame] | 413 | if (cdev->private->pgid_todo_mask == 0) { |
Peter Oberparleiter | 52ef060 | 2009-12-07 12:51:31 +0100 | [diff] [blame] | 414 | verify_done(cdev, sch->vpm == 0 ? -EACCES : 0); |
| 415 | return; |
| 416 | } |
| 417 | /* Perform path-grouping. */ |
| 418 | spid_start(cdev); |
| 419 | break; |
| 420 | case -EOPNOTSUPP: |
| 421 | /* Path-grouping not supported. */ |
| 422 | cdev->private->flags.pgroup = 0; |
| 423 | cdev->private->flags.mpath = 0; |
| 424 | verify_start(cdev); |
| 425 | break; |
| 426 | default: |
| 427 | verify_done(cdev, rc); |
| 428 | } |
Peter Oberparleiter | 9679baa | 2009-12-07 12:51:27 +0100 | [diff] [blame] | 429 | } |
| 430 | |
| 431 | /* |
| 432 | * Create channel program to perform a SENSE PGID on a single path. |
| 433 | */ |
| 434 | static void snid_build_cp(struct ccw_device *cdev) |
| 435 | { |
| 436 | struct ccw_request *req = &cdev->private->req; |
| 437 | struct ccw1 *cp = cdev->private->iccws; |
Sebastian Ott | 9d49f86 | 2015-09-21 18:40:33 +0200 | [diff] [blame] | 438 | int i = pathmask_to_pos(req->lpm); |
Peter Oberparleiter | 9679baa | 2009-12-07 12:51:27 +0100 | [diff] [blame] | 439 | |
| 440 | /* Channel program setup. */ |
| 441 | cp->cmd_code = CCW_CMD_SENSE_PGID; |
| 442 | cp->cda = (u32) (addr_t) &cdev->private->pgid[i]; |
| 443 | cp->count = sizeof(struct pgid); |
| 444 | cp->flags = CCW_FLAG_SLI; |
| 445 | req->cp = cp; |
| 446 | } |
| 447 | |
| 448 | /* |
| 449 | * Perform SENSE PGID on a single path. |
| 450 | */ |
| 451 | static void snid_do(struct ccw_device *cdev) |
| 452 | { |
| 453 | struct subchannel *sch = to_subchannel(cdev->dev.parent); |
| 454 | struct ccw_request *req = &cdev->private->req; |
Sebastian Ott | 88e7616 | 2013-01-28 19:32:56 +0100 | [diff] [blame] | 455 | int ret; |
Peter Oberparleiter | 9679baa | 2009-12-07 12:51:27 +0100 | [diff] [blame] | 456 | |
Sebastian Ott | e6a0b7c | 2013-01-28 19:31:50 +0100 | [diff] [blame] | 457 | req->lpm = lpm_adjust(req->lpm, sch->schib.pmcw.pam & |
| 458 | ~cdev->private->path_noirq_mask); |
Peter Oberparleiter | 9679baa | 2009-12-07 12:51:27 +0100 | [diff] [blame] | 459 | if (!req->lpm) |
| 460 | goto out_nopath; |
| 461 | snid_build_cp(cdev); |
| 462 | ccw_request_start(cdev); |
| 463 | return; |
| 464 | |
| 465 | out_nopath: |
Sebastian Ott | 88e7616 | 2013-01-28 19:32:56 +0100 | [diff] [blame] | 466 | if (cdev->private->pgid_valid_mask) |
| 467 | ret = 0; |
| 468 | else if (cdev->private->path_noirq_mask) |
| 469 | ret = -ETIME; |
| 470 | else |
| 471 | ret = -EACCES; |
| 472 | snid_done(cdev, ret); |
Peter Oberparleiter | 9679baa | 2009-12-07 12:51:27 +0100 | [diff] [blame] | 473 | } |
| 474 | |
| 475 | /* |
| 476 | * Process SENSE PGID request result for single path. |
| 477 | */ |
| 478 | static void snid_callback(struct ccw_device *cdev, void *data, int rc) |
| 479 | { |
| 480 | struct ccw_request *req = &cdev->private->req; |
| 481 | |
Sebastian Ott | 69f5576 | 2013-01-28 19:29:43 +0100 | [diff] [blame] | 482 | switch (rc) { |
| 483 | case 0: |
Peter Oberparleiter | 9679baa | 2009-12-07 12:51:27 +0100 | [diff] [blame] | 484 | cdev->private->pgid_valid_mask |= req->lpm; |
Sebastian Ott | 69f5576 | 2013-01-28 19:29:43 +0100 | [diff] [blame] | 485 | break; |
| 486 | case -ETIME: |
| 487 | cdev->private->flags.pgid_unknown = 1; |
| 488 | cdev->private->path_noirq_mask |= req->lpm; |
| 489 | break; |
| 490 | case -EACCES: |
| 491 | cdev->private->path_notoper_mask |= req->lpm; |
| 492 | break; |
| 493 | default: |
Peter Oberparleiter | 9679baa | 2009-12-07 12:51:27 +0100 | [diff] [blame] | 494 | goto err; |
Sebastian Ott | 69f5576 | 2013-01-28 19:29:43 +0100 | [diff] [blame] | 495 | } |
| 496 | /* Continue on the next path. */ |
Peter Oberparleiter | 9679baa | 2009-12-07 12:51:27 +0100 | [diff] [blame] | 497 | req->lpm >>= 1; |
| 498 | snid_do(cdev); |
| 499 | return; |
| 500 | |
| 501 | err: |
| 502 | snid_done(cdev, rc); |
| 503 | } |
| 504 | |
Peter Oberparleiter | 9679baa | 2009-12-07 12:51:27 +0100 | [diff] [blame] | 505 | /* |
| 506 | * Perform path verification. |
| 507 | */ |
| 508 | static void verify_start(struct ccw_device *cdev) |
| 509 | { |
| 510 | struct subchannel *sch = to_subchannel(cdev->dev.parent); |
| 511 | struct ccw_request *req = &cdev->private->req; |
Peter Oberparleiter | 52ef060 | 2009-12-07 12:51:31 +0100 | [diff] [blame] | 512 | struct ccw_dev_id *devid = &cdev->private->dev_id; |
Peter Oberparleiter | 9679baa | 2009-12-07 12:51:27 +0100 | [diff] [blame] | 513 | |
| 514 | sch->vpm = 0; |
Peter Oberparleiter | d302e1a | 2009-12-18 17:43:23 +0100 | [diff] [blame] | 515 | sch->lpm = sch->schib.pmcw.pam; |
Sebastian Ott | 69f5576 | 2013-01-28 19:29:43 +0100 | [diff] [blame] | 516 | |
| 517 | /* Initialize PGID data. */ |
| 518 | memset(cdev->private->pgid, 0, sizeof(cdev->private->pgid)); |
| 519 | cdev->private->pgid_valid_mask = 0; |
| 520 | cdev->private->pgid_todo_mask = sch->schib.pmcw.pam; |
| 521 | cdev->private->path_notoper_mask = 0; |
| 522 | |
Peter Oberparleiter | 9679baa | 2009-12-07 12:51:27 +0100 | [diff] [blame] | 523 | /* Initialize request data. */ |
| 524 | memset(req, 0, sizeof(*req)); |
| 525 | req->timeout = PGID_TIMEOUT; |
| 526 | req->maxretries = PGID_RETRIES; |
| 527 | req->lpm = 0x80; |
Sebastian Ott | 982bdf8 | 2010-08-09 18:12:53 +0200 | [diff] [blame] | 528 | req->singlepath = 1; |
Peter Oberparleiter | 454e1fa | 2009-12-07 12:51:30 +0100 | [diff] [blame] | 529 | if (cdev->private->flags.pgroup) { |
Peter Oberparleiter | 52ef060 | 2009-12-07 12:51:31 +0100 | [diff] [blame] | 530 | CIO_TRACE_EVENT(4, "snid"); |
| 531 | CIO_HEX_EVENT(4, devid, sizeof(*devid)); |
| 532 | req->callback = snid_callback; |
| 533 | snid_do(cdev); |
Peter Oberparleiter | 9679baa | 2009-12-07 12:51:27 +0100 | [diff] [blame] | 534 | } else { |
Peter Oberparleiter | 52ef060 | 2009-12-07 12:51:31 +0100 | [diff] [blame] | 535 | CIO_TRACE_EVENT(4, "nop"); |
| 536 | CIO_HEX_EVENT(4, devid, sizeof(*devid)); |
Peter Oberparleiter | 9679baa | 2009-12-07 12:51:27 +0100 | [diff] [blame] | 537 | req->filter = nop_filter; |
| 538 | req->callback = nop_callback; |
| 539 | nop_do(cdev); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 540 | } |
| 541 | } |
| 542 | |
Peter Oberparleiter | 9679baa | 2009-12-07 12:51:27 +0100 | [diff] [blame] | 543 | /** |
| 544 | * ccw_device_verify_start - perform path verification |
| 545 | * @cdev: ccw device |
| 546 | * |
| 547 | * Perform an I/O on each available channel path to @cdev to determine which |
| 548 | * paths are operational. The resulting path mask is stored in sch->vpm. |
| 549 | * If device options specify pathgrouping, establish a pathgroup for the |
| 550 | * operational paths. When finished, call ccw_device_verify_done with a |
| 551 | * return code specifying the result. |
| 552 | */ |
| 553 | void ccw_device_verify_start(struct ccw_device *cdev) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 554 | { |
Peter Oberparleiter | 9679baa | 2009-12-07 12:51:27 +0100 | [diff] [blame] | 555 | CIO_TRACE_EVENT(4, "vrfy"); |
| 556 | CIO_HEX_EVENT(4, &cdev->private->dev_id, sizeof(cdev->private->dev_id)); |
Peter Oberparleiter | 52ef060 | 2009-12-07 12:51:31 +0100 | [diff] [blame] | 557 | /* |
| 558 | * Initialize pathgroup and multipath state with target values. |
| 559 | * They may change in the course of path verification. |
| 560 | */ |
| 561 | cdev->private->flags.pgroup = cdev->private->options.pgroup; |
| 562 | cdev->private->flags.mpath = cdev->private->options.mpath; |
Peter Oberparleiter | 9679baa | 2009-12-07 12:51:27 +0100 | [diff] [blame] | 563 | cdev->private->flags.doverify = 0; |
Sebastian Ott | 69f5576 | 2013-01-28 19:29:43 +0100 | [diff] [blame] | 564 | cdev->private->path_noirq_mask = 0; |
Peter Oberparleiter | 9679baa | 2009-12-07 12:51:27 +0100 | [diff] [blame] | 565 | verify_start(cdev); |
| 566 | } |
Cornelia Huck | f1ee328 | 2006-10-04 20:02:02 +0200 | [diff] [blame] | 567 | |
Peter Oberparleiter | 9679baa | 2009-12-07 12:51:27 +0100 | [diff] [blame] | 568 | /* |
| 569 | * Process disband SET PGID request result. |
| 570 | */ |
| 571 | static void disband_callback(struct ccw_device *cdev, void *data, int rc) |
| 572 | { |
| 573 | struct subchannel *sch = to_subchannel(cdev->dev.parent); |
| 574 | struct ccw_dev_id *id = &cdev->private->dev_id; |
| 575 | |
| 576 | if (rc) |
| 577 | goto out; |
| 578 | /* Ensure consistent multipathing state at device and channel. */ |
Peter Oberparleiter | 454e1fa | 2009-12-07 12:51:30 +0100 | [diff] [blame] | 579 | cdev->private->flags.mpath = 0; |
Peter Oberparleiter | 9679baa | 2009-12-07 12:51:27 +0100 | [diff] [blame] | 580 | if (sch->config.mp) { |
| 581 | sch->config.mp = 0; |
| 582 | rc = cio_commit_config(sch); |
| 583 | } |
| 584 | out: |
| 585 | CIO_MSG_EVENT(0, "disb: device 0.%x.%04x: rc=%d\n", id->ssid, id->devno, |
| 586 | rc); |
| 587 | ccw_device_disband_done(cdev, rc); |
| 588 | } |
| 589 | |
| 590 | /** |
| 591 | * ccw_device_disband_start - disband pathgroup |
| 592 | * @cdev: ccw device |
| 593 | * |
| 594 | * Execute a SET PGID channel program on @cdev to disband a previously |
| 595 | * established pathgroup. When finished, call ccw_device_disband_done with |
| 596 | * a return code specifying the result. |
| 597 | */ |
| 598 | void ccw_device_disband_start(struct ccw_device *cdev) |
| 599 | { |
| 600 | struct subchannel *sch = to_subchannel(cdev->dev.parent); |
| 601 | struct ccw_request *req = &cdev->private->req; |
| 602 | u8 fn; |
| 603 | |
| 604 | CIO_TRACE_EVENT(4, "disb"); |
| 605 | CIO_HEX_EVENT(4, &cdev->private->dev_id, sizeof(cdev->private->dev_id)); |
| 606 | /* Request setup. */ |
| 607 | memset(req, 0, sizeof(*req)); |
| 608 | req->timeout = PGID_TIMEOUT; |
| 609 | req->maxretries = PGID_RETRIES; |
| 610 | req->lpm = sch->schib.pmcw.pam & sch->opm; |
Sebastian Ott | 982bdf8 | 2010-08-09 18:12:53 +0200 | [diff] [blame] | 611 | req->singlepath = 1; |
Peter Oberparleiter | 9679baa | 2009-12-07 12:51:27 +0100 | [diff] [blame] | 612 | req->callback = disband_callback; |
| 613 | fn = SPID_FUNC_DISBAND; |
Peter Oberparleiter | 454e1fa | 2009-12-07 12:51:30 +0100 | [diff] [blame] | 614 | if (cdev->private->flags.mpath) |
Peter Oberparleiter | 9679baa | 2009-12-07 12:51:27 +0100 | [diff] [blame] | 615 | fn |= SPID_FUNC_MULTI_PATH; |
| 616 | spid_build_cp(cdev, fn); |
| 617 | ccw_request_start(cdev); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 618 | } |
Peter Oberparleiter | d7d12ef | 2009-12-07 12:51:32 +0100 | [diff] [blame] | 619 | |
Sebastian Ott | ef12cb9 | 2015-10-26 12:38:13 +0100 | [diff] [blame] | 620 | struct stlck_data { |
| 621 | struct completion done; |
| 622 | int rc; |
| 623 | }; |
| 624 | |
Peter Oberparleiter | d7d12ef | 2009-12-07 12:51:32 +0100 | [diff] [blame] | 625 | static void stlck_build_cp(struct ccw_device *cdev, void *buf1, void *buf2) |
| 626 | { |
| 627 | struct ccw_request *req = &cdev->private->req; |
| 628 | struct ccw1 *cp = cdev->private->iccws; |
| 629 | |
| 630 | cp[0].cmd_code = CCW_CMD_STLCK; |
| 631 | cp[0].cda = (u32) (addr_t) buf1; |
| 632 | cp[0].count = 32; |
| 633 | cp[0].flags = CCW_FLAG_CC; |
| 634 | cp[1].cmd_code = CCW_CMD_RELEASE; |
| 635 | cp[1].cda = (u32) (addr_t) buf2; |
| 636 | cp[1].count = 32; |
| 637 | cp[1].flags = 0; |
| 638 | req->cp = cp; |
| 639 | } |
| 640 | |
| 641 | static void stlck_callback(struct ccw_device *cdev, void *data, int rc) |
| 642 | { |
Sebastian Ott | ef12cb9 | 2015-10-26 12:38:13 +0100 | [diff] [blame] | 643 | struct stlck_data *sdata = data; |
| 644 | |
| 645 | sdata->rc = rc; |
| 646 | complete(&sdata->done); |
Peter Oberparleiter | d7d12ef | 2009-12-07 12:51:32 +0100 | [diff] [blame] | 647 | } |
| 648 | |
| 649 | /** |
| 650 | * ccw_device_stlck_start - perform unconditional release |
| 651 | * @cdev: ccw device |
| 652 | * @data: data pointer to be passed to ccw_device_stlck_done |
| 653 | * @buf1: data pointer used in channel program |
| 654 | * @buf2: data pointer used in channel program |
| 655 | * |
| 656 | * Execute a channel program on @cdev to release an existing PGID reservation. |
Peter Oberparleiter | d7d12ef | 2009-12-07 12:51:32 +0100 | [diff] [blame] | 657 | */ |
Sebastian Ott | ef12cb9 | 2015-10-26 12:38:13 +0100 | [diff] [blame] | 658 | static void ccw_device_stlck_start(struct ccw_device *cdev, void *data, |
| 659 | void *buf1, void *buf2) |
Peter Oberparleiter | d7d12ef | 2009-12-07 12:51:32 +0100 | [diff] [blame] | 660 | { |
| 661 | struct subchannel *sch = to_subchannel(cdev->dev.parent); |
| 662 | struct ccw_request *req = &cdev->private->req; |
| 663 | |
| 664 | CIO_TRACE_EVENT(4, "stlck"); |
| 665 | CIO_HEX_EVENT(4, &cdev->private->dev_id, sizeof(cdev->private->dev_id)); |
| 666 | /* Request setup. */ |
| 667 | memset(req, 0, sizeof(*req)); |
| 668 | req->timeout = PGID_TIMEOUT; |
| 669 | req->maxretries = PGID_RETRIES; |
| 670 | req->lpm = sch->schib.pmcw.pam & sch->opm; |
| 671 | req->data = data; |
| 672 | req->callback = stlck_callback; |
| 673 | stlck_build_cp(cdev, buf1, buf2); |
| 674 | ccw_request_start(cdev); |
| 675 | } |
| 676 | |
Sebastian Ott | ef12cb9 | 2015-10-26 12:38:13 +0100 | [diff] [blame] | 677 | /* |
| 678 | * Perform unconditional reserve + release. |
| 679 | */ |
| 680 | int ccw_device_stlck(struct ccw_device *cdev) |
| 681 | { |
| 682 | struct subchannel *sch = to_subchannel(cdev->dev.parent); |
| 683 | struct stlck_data data; |
| 684 | u8 *buffer; |
| 685 | int rc; |
| 686 | |
| 687 | /* Check if steal lock operation is valid for this device. */ |
| 688 | if (cdev->drv) { |
| 689 | if (!cdev->private->options.force) |
| 690 | return -EINVAL; |
| 691 | } |
| 692 | buffer = kzalloc(64, GFP_DMA | GFP_KERNEL); |
| 693 | if (!buffer) |
| 694 | return -ENOMEM; |
| 695 | init_completion(&data.done); |
| 696 | data.rc = -EIO; |
| 697 | spin_lock_irq(sch->lock); |
| 698 | rc = cio_enable_subchannel(sch, (u32) (addr_t) sch); |
| 699 | if (rc) |
| 700 | goto out_unlock; |
| 701 | /* Perform operation. */ |
| 702 | cdev->private->state = DEV_STATE_STEAL_LOCK; |
| 703 | ccw_device_stlck_start(cdev, &data, &buffer[0], &buffer[32]); |
| 704 | spin_unlock_irq(sch->lock); |
| 705 | /* Wait for operation to finish. */ |
| 706 | if (wait_for_completion_interruptible(&data.done)) { |
| 707 | /* Got a signal. */ |
| 708 | spin_lock_irq(sch->lock); |
| 709 | ccw_request_cancel(cdev); |
| 710 | spin_unlock_irq(sch->lock); |
| 711 | wait_for_completion(&data.done); |
| 712 | } |
| 713 | rc = data.rc; |
| 714 | /* Check results. */ |
| 715 | spin_lock_irq(sch->lock); |
| 716 | cio_disable_subchannel(sch); |
| 717 | cdev->private->state = DEV_STATE_BOXED; |
| 718 | out_unlock: |
| 719 | spin_unlock_irq(sch->lock); |
| 720 | kfree(buffer); |
| 721 | |
| 722 | return rc; |
| 723 | } |