blob: 585618663ba458bb846e8659630a8df778659b6e [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * drivers/s390/char/tape_core.c
3 * basic function of the tape device driver
4 *
5 * S390 and zSeries version
Frank Munzert3ef32e622009-06-16 10:30:39 +02006 * Copyright IBM Corp. 2001, 2009
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 * Author(s): Carsten Otte <cotte@de.ibm.com>
8 * Michael Holzheu <holzheu@de.ibm.com>
9 * Tuan Ngo-Anh <ngoanh@de.ibm.com>
10 * Martin Schwidefsky <schwidefsky@de.ibm.com>
Stefan Bader41117962005-07-27 11:45:04 -070011 * Stefan Bader <shbader@de.ibm.com>
Linus Torvalds1da177e2005-04-16 15:20:36 -070012 */
13
Carsten Otteab640db2009-03-26 15:24:38 +010014#define KMSG_COMPONENT "tape"
Michael Holzheubb509912009-12-18 17:43:21 +010015#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
16
Linus Torvalds1da177e2005-04-16 15:20:36 -070017#include <linux/module.h>
18#include <linux/init.h> // for kernel parameters
19#include <linux/kmod.h> // for requesting modules
20#include <linux/spinlock.h> // for locks
21#include <linux/vmalloc.h>
22#include <linux/list.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090023#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070024
25#include <asm/types.h> // for variable types
26
27#define TAPE_DBF_AREA tape_core_dbf
28
29#include "tape.h"
30#include "tape_std.h"
31
Michael Holzheucced1dd2007-02-05 21:18:26 +010032#define LONG_BUSY_TIMEOUT 180 /* seconds */
Linus Torvalds1da177e2005-04-16 15:20:36 -070033
34static void __tape_do_irq (struct ccw_device *, unsigned long, struct irb *);
Martin Schwidefskyc1637532006-12-08 15:53:57 +010035static void tape_delayed_next_request(struct work_struct *);
Michael Holzheucced1dd2007-02-05 21:18:26 +010036static void tape_long_busy_timeout(unsigned long data);
Linus Torvalds1da177e2005-04-16 15:20:36 -070037
38/*
39 * One list to contain all tape devices of all disciplines, so
40 * we can assign the devices to minor numbers of the same major
41 * The list is protected by the rwlock
42 */
Denis Chengc11ca972008-01-26 14:11:13 +010043static LIST_HEAD(tape_device_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -070044static DEFINE_RWLOCK(tape_device_lock);
45
46/*
47 * Pointer to debug area.
48 */
49debug_info_t *TAPE_DBF_AREA = NULL;
50EXPORT_SYMBOL(TAPE_DBF_AREA);
51
52/*
53 * Printable strings for tape enumerations.
54 */
55const char *tape_state_verbose[TS_SIZE] =
56{
57 [TS_UNUSED] = "UNUSED",
58 [TS_IN_USE] = "IN_USE",
59 [TS_BLKUSE] = "BLKUSE",
60 [TS_INIT] = "INIT ",
61 [TS_NOT_OPER] = "NOT_OP"
62};
63
64const char *tape_op_verbose[TO_SIZE] =
65{
66 [TO_BLOCK] = "BLK", [TO_BSB] = "BSB",
67 [TO_BSF] = "BSF", [TO_DSE] = "DSE",
68 [TO_FSB] = "FSB", [TO_FSF] = "FSF",
69 [TO_LBL] = "LBL", [TO_NOP] = "NOP",
70 [TO_RBA] = "RBA", [TO_RBI] = "RBI",
71 [TO_RFO] = "RFO", [TO_REW] = "REW",
72 [TO_RUN] = "RUN", [TO_WRI] = "WRI",
73 [TO_WTM] = "WTM", [TO_MSEN] = "MSN",
74 [TO_LOAD] = "LOA", [TO_READ_CONFIG] = "RCF",
75 [TO_READ_ATTMSG] = "RAT",
76 [TO_DIS] = "DIS", [TO_ASSIGN] = "ASS",
Michael Holzheucced1dd2007-02-05 21:18:26 +010077 [TO_UNASSIGN] = "UAS", [TO_CRYPT_ON] = "CON",
78 [TO_CRYPT_OFF] = "COF", [TO_KEKL_SET] = "KLS",
Michael Holzheue2963062007-05-04 18:47:53 +020079 [TO_KEKL_QUERY] = "KLQ",[TO_RDC] = "RDC",
Linus Torvalds1da177e2005-04-16 15:20:36 -070080};
81
Cornelia Huckf455adc2008-05-15 16:52:37 +020082static int devid_to_int(struct ccw_dev_id *dev_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -070083{
Cornelia Huckf455adc2008-05-15 16:52:37 +020084 return dev_id->devno + (dev_id->ssid << 16);
Linus Torvalds1da177e2005-04-16 15:20:36 -070085}
86
87/*
88 * Some channel attached tape specific attributes.
89 *
90 * FIXME: In the future the first_minor and blocksize attribute should be
91 * replaced by a link to the cdev tree.
92 */
93static ssize_t
Yani Ioannou3fd3c0a2005-05-17 06:43:27 -040094tape_medium_state_show(struct device *dev, struct device_attribute *attr, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -070095{
96 struct tape_device *tdev;
97
Greg Kroah-Hartmandff59b62009-05-04 12:40:54 -070098 tdev = dev_get_drvdata(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -070099 return scnprintf(buf, PAGE_SIZE, "%i\n", tdev->medium_state);
100}
101
102static
103DEVICE_ATTR(medium_state, 0444, tape_medium_state_show, NULL);
104
105static ssize_t
Yani Ioannou3fd3c0a2005-05-17 06:43:27 -0400106tape_first_minor_show(struct device *dev, struct device_attribute *attr, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107{
108 struct tape_device *tdev;
109
Greg Kroah-Hartmandff59b62009-05-04 12:40:54 -0700110 tdev = dev_get_drvdata(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111 return scnprintf(buf, PAGE_SIZE, "%i\n", tdev->first_minor);
112}
113
114static
115DEVICE_ATTR(first_minor, 0444, tape_first_minor_show, NULL);
116
117static ssize_t
Yani Ioannou3fd3c0a2005-05-17 06:43:27 -0400118tape_state_show(struct device *dev, struct device_attribute *attr, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119{
120 struct tape_device *tdev;
121
Greg Kroah-Hartmandff59b62009-05-04 12:40:54 -0700122 tdev = dev_get_drvdata(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700123 return scnprintf(buf, PAGE_SIZE, "%s\n", (tdev->first_minor < 0) ?
124 "OFFLINE" : tape_state_verbose[tdev->tape_state]);
125}
126
127static
128DEVICE_ATTR(state, 0444, tape_state_show, NULL);
129
130static ssize_t
Yani Ioannou3fd3c0a2005-05-17 06:43:27 -0400131tape_operation_show(struct device *dev, struct device_attribute *attr, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700132{
133 struct tape_device *tdev;
134 ssize_t rc;
135
Greg Kroah-Hartmandff59b62009-05-04 12:40:54 -0700136 tdev = dev_get_drvdata(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700137 if (tdev->first_minor < 0)
138 return scnprintf(buf, PAGE_SIZE, "N/A\n");
139
140 spin_lock_irq(get_ccwdev_lock(tdev->cdev));
141 if (list_empty(&tdev->req_queue))
142 rc = scnprintf(buf, PAGE_SIZE, "---\n");
143 else {
144 struct tape_request *req;
145
146 req = list_entry(tdev->req_queue.next, struct tape_request,
147 list);
148 rc = scnprintf(buf,PAGE_SIZE, "%s\n", tape_op_verbose[req->op]);
149 }
150 spin_unlock_irq(get_ccwdev_lock(tdev->cdev));
151 return rc;
152}
153
154static
155DEVICE_ATTR(operation, 0444, tape_operation_show, NULL);
156
157static ssize_t
Yani Ioannou3fd3c0a2005-05-17 06:43:27 -0400158tape_blocksize_show(struct device *dev, struct device_attribute *attr, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159{
160 struct tape_device *tdev;
161
Greg Kroah-Hartmandff59b62009-05-04 12:40:54 -0700162 tdev = dev_get_drvdata(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700163
164 return scnprintf(buf, PAGE_SIZE, "%i\n", tdev->char_data.block_size);
165}
166
167static
168DEVICE_ATTR(blocksize, 0444, tape_blocksize_show, NULL);
169
170static struct attribute *tape_attrs[] = {
171 &dev_attr_medium_state.attr,
172 &dev_attr_first_minor.attr,
173 &dev_attr_state.attr,
174 &dev_attr_operation.attr,
175 &dev_attr_blocksize.attr,
176 NULL
177};
178
179static struct attribute_group tape_attr_group = {
180 .attrs = tape_attrs,
181};
182
183/*
184 * Tape state functions
185 */
186void
187tape_state_set(struct tape_device *device, enum tape_state newstate)
188{
189 const char *str;
190
191 if (device->tape_state == TS_NOT_OPER) {
192 DBF_EVENT(3, "ts_set err: not oper\n");
193 return;
194 }
195 DBF_EVENT(4, "ts. dev: %x\n", device->first_minor);
Peter Oberparleiterf9760692006-04-10 22:53:49 -0700196 DBF_EVENT(4, "old ts:\t\n");
197 if (device->tape_state < TS_SIZE && device->tape_state >=0 )
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198 str = tape_state_verbose[device->tape_state];
199 else
200 str = "UNKNOWN TS";
201 DBF_EVENT(4, "%s\n", str);
202 DBF_EVENT(4, "new ts:\t\n");
Peter Oberparleiterf9760692006-04-10 22:53:49 -0700203 if (newstate < TS_SIZE && newstate >= 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700204 str = tape_state_verbose[newstate];
205 else
206 str = "UNKNOWN TS";
207 DBF_EVENT(4, "%s\n", str);
208 device->tape_state = newstate;
209 wake_up(&device->state_change_wq);
210}
211
Martin Schwidefsky3b210e72010-11-10 10:05:52 +0100212struct tape_med_state_work_data {
213 struct tape_device *device;
214 enum tape_medium_state state;
215 struct work_struct work;
216};
217
218static void
219tape_med_state_work_handler(struct work_struct *work)
220{
221 static char env_state_loaded[] = "MEDIUM_STATE=LOADED";
222 static char env_state_unloaded[] = "MEDIUM_STATE=UNLOADED";
223 struct tape_med_state_work_data *p =
224 container_of(work, struct tape_med_state_work_data, work);
225 struct tape_device *device = p->device;
226 char *envp[] = { NULL, NULL };
227
228 switch (p->state) {
229 case MS_UNLOADED:
230 pr_info("%s: The tape cartridge has been successfully "
231 "unloaded\n", dev_name(&device->cdev->dev));
232 envp[0] = env_state_unloaded;
233 kobject_uevent_env(&device->cdev->dev.kobj, KOBJ_CHANGE, envp);
234 break;
235 case MS_LOADED:
236 pr_info("%s: A tape cartridge has been mounted\n",
237 dev_name(&device->cdev->dev));
238 envp[0] = env_state_loaded;
239 kobject_uevent_env(&device->cdev->dev.kobj, KOBJ_CHANGE, envp);
240 break;
241 default:
242 break;
243 }
244 tape_put_device(device);
245 kfree(p);
246}
247
248static void
249tape_med_state_work(struct tape_device *device, enum tape_medium_state state)
250{
251 struct tape_med_state_work_data *p;
252
253 p = kzalloc(sizeof(*p), GFP_ATOMIC);
254 if (p) {
255 INIT_WORK(&p->work, tape_med_state_work_handler);
256 p->device = tape_get_device(device);
257 p->state = state;
258 schedule_work(&p->work);
259 }
260}
261
Linus Torvalds1da177e2005-04-16 15:20:36 -0700262void
263tape_med_state_set(struct tape_device *device, enum tape_medium_state newstate)
264{
Martin Schwidefsky3b210e72010-11-10 10:05:52 +0100265 enum tape_medium_state oldstate;
266
267 oldstate = device->medium_state;
268 if (oldstate == newstate)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700269 return;
Martin Schwidefsky3b210e72010-11-10 10:05:52 +0100270 device->medium_state = newstate;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700271 switch(newstate){
272 case MS_UNLOADED:
273 device->tape_generic_status |= GMT_DR_OPEN(~0);
Martin Schwidefsky3b210e72010-11-10 10:05:52 +0100274 if (oldstate == MS_LOADED)
275 tape_med_state_work(device, MS_UNLOADED);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700276 break;
277 case MS_LOADED:
278 device->tape_generic_status &= ~GMT_DR_OPEN(~0);
Martin Schwidefsky3b210e72010-11-10 10:05:52 +0100279 if (oldstate == MS_UNLOADED)
280 tape_med_state_work(device, MS_LOADED);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281 break;
282 default:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700283 break;
284 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700285 wake_up(&device->state_change_wq);
286}
287
288/*
289 * Stop running ccw. Has to be called with the device lock held.
290 */
Heiko Carstens4d284ca2007-02-05 21:18:53 +0100291static int
Stefan Bader41117962005-07-27 11:45:04 -0700292__tape_cancel_io(struct tape_device *device, struct tape_request *request)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700293{
294 int retries;
295 int rc;
296
297 /* Check if interrupt has already been processed */
298 if (request->callback == NULL)
299 return 0;
300
301 rc = 0;
302 for (retries = 0; retries < 5; retries++) {
303 rc = ccw_device_clear(device->cdev, (long) request);
304
Stefan Bader41117962005-07-27 11:45:04 -0700305 switch (rc) {
306 case 0:
307 request->status = TAPE_REQUEST_DONE;
308 return 0;
309 case -EBUSY:
310 request->status = TAPE_REQUEST_CANCEL;
Martin Schwidefskyc1637532006-12-08 15:53:57 +0100311 schedule_delayed_work(&device->tape_dnr, 0);
Stefan Bader41117962005-07-27 11:45:04 -0700312 return 0;
313 case -ENODEV:
314 DBF_EXCEPTION(2, "device gone, retry\n");
315 break;
316 case -EIO:
317 DBF_EXCEPTION(2, "I/O error, retry\n");
318 break;
319 default:
320 BUG();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700321 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700322 }
323
324 return rc;
325}
326
327/*
328 * Add device into the sorted list, giving it the first
329 * available minor number.
330 */
331static int
332tape_assign_minor(struct tape_device *device)
333{
334 struct tape_device *tmp;
335 int minor;
336
337 minor = 0;
338 write_lock(&tape_device_lock);
339 list_for_each_entry(tmp, &tape_device_list, node) {
340 if (minor < tmp->first_minor)
341 break;
342 minor += TAPE_MINORS_PER_DEV;
343 }
344 if (minor >= 256) {
345 write_unlock(&tape_device_lock);
346 return -ENODEV;
347 }
348 device->first_minor = minor;
349 list_add_tail(&device->node, &tmp->node);
350 write_unlock(&tape_device_lock);
351 return 0;
352}
353
354/* remove device from the list */
355static void
356tape_remove_minor(struct tape_device *device)
357{
358 write_lock(&tape_device_lock);
359 list_del_init(&device->node);
360 device->first_minor = -1;
361 write_unlock(&tape_device_lock);
362}
363
364/*
365 * Set a device online.
366 *
367 * This function is called by the common I/O layer to move a device from the
368 * detected but offline into the online state.
369 * If we return an error (RC < 0) the device remains in the offline state. This
370 * can happen if the device is assigned somewhere else, for example.
371 */
372int
373tape_generic_online(struct tape_device *device,
374 struct tape_discipline *discipline)
375{
376 int rc;
377
378 DBF_LH(6, "tape_enable_device(%p, %p)\n", device, discipline);
379
380 if (device->tape_state != TS_INIT) {
381 DBF_LH(3, "Tapestate not INIT (%d)\n", device->tape_state);
382 return -EINVAL;
383 }
384
Michael Holzheucced1dd2007-02-05 21:18:26 +0100385 init_timer(&device->lb_timeout);
386 device->lb_timeout.function = tape_long_busy_timeout;
387
Linus Torvalds1da177e2005-04-16 15:20:36 -0700388 /* Let the discipline have a go at the device. */
389 device->discipline = discipline;
390 if (!try_module_get(discipline->owner)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700391 return -EINVAL;
392 }
393
394 rc = discipline->setup_device(device);
395 if (rc)
396 goto out;
397 rc = tape_assign_minor(device);
398 if (rc)
399 goto out_discipline;
400
401 rc = tapechar_setup_device(device);
402 if (rc)
403 goto out_minor;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700404
405 tape_state_set(device, TS_UNUSED);
406
407 DBF_LH(3, "(%08x): Drive set online\n", device->cdev_id);
408
409 return 0;
410
Roel Kluin68d36bd2009-09-11 10:28:55 +0200411out_minor:
412 tape_remove_minor(device);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700413out_discipline:
414 device->discipline->cleanup_device(device);
415 device->discipline = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700416out:
417 module_put(discipline->owner);
418 return rc;
419}
420
Heiko Carstens4d284ca2007-02-05 21:18:53 +0100421static void
Linus Torvalds1da177e2005-04-16 15:20:36 -0700422tape_cleanup_device(struct tape_device *device)
423{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700424 tapechar_cleanup_device(device);
425 device->discipline->cleanup_device(device);
426 module_put(device->discipline->owner);
427 tape_remove_minor(device);
428 tape_med_state_set(device, MS_UNKNOWN);
429}
430
431/*
Frank Munzert3ef32e622009-06-16 10:30:39 +0200432 * Suspend device.
433 *
434 * Called by the common I/O layer if the drive should be suspended on user
435 * request. We refuse to suspend if the device is loaded or in use for the
436 * following reason:
437 * While the Linux guest is suspended, it might be logged off which causes
438 * devices to be detached. Tape devices are automatically rewound and unloaded
439 * during DETACH processing (unless the tape device was attached with the
440 * NOASSIGN or MULTIUSER option). After rewind/unload, there is no way to
441 * resume the original state of the tape device, since we would need to
442 * manually re-load the cartridge which was active at suspend time.
443 */
444int tape_generic_pm_suspend(struct ccw_device *cdev)
445{
446 struct tape_device *device;
447
Martin Schwidefsky4f0076f2009-06-22 12:08:19 +0200448 device = dev_get_drvdata(&cdev->dev);
Frank Munzert3ef32e622009-06-16 10:30:39 +0200449 if (!device) {
450 return -ENODEV;
451 }
452
453 DBF_LH(3, "(%08x): tape_generic_pm_suspend(%p)\n",
454 device->cdev_id, device);
455
456 if (device->medium_state != MS_UNLOADED) {
457 pr_err("A cartridge is loaded in tape device %s, "
458 "refusing to suspend\n", dev_name(&cdev->dev));
459 return -EBUSY;
460 }
461
462 spin_lock_irq(get_ccwdev_lock(device->cdev));
463 switch (device->tape_state) {
464 case TS_INIT:
465 case TS_NOT_OPER:
466 case TS_UNUSED:
467 spin_unlock_irq(get_ccwdev_lock(device->cdev));
468 break;
469 default:
470 pr_err("Tape device %s is busy, refusing to "
471 "suspend\n", dev_name(&cdev->dev));
472 spin_unlock_irq(get_ccwdev_lock(device->cdev));
473 return -EBUSY;
474 }
475
476 DBF_LH(3, "(%08x): Drive suspended.\n", device->cdev_id);
477 return 0;
478}
479
480/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700481 * Set device offline.
482 *
483 * Called by the common I/O layer if the drive should set offline on user
484 * request. We may prevent this by returning an error.
485 * Manual offline is only allowed while the drive is not in use.
486 */
487int
Frank Munzert4d7a3cd2009-04-23 13:58:09 +0200488tape_generic_offline(struct ccw_device *cdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700489{
Frank Munzert4d7a3cd2009-04-23 13:58:09 +0200490 struct tape_device *device;
491
Greg Kroah-Hartmandff59b62009-05-04 12:40:54 -0700492 device = dev_get_drvdata(&cdev->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700493 if (!device) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700494 return -ENODEV;
495 }
496
497 DBF_LH(3, "(%08x): tape_generic_offline(%p)\n",
498 device->cdev_id, device);
499
500 spin_lock_irq(get_ccwdev_lock(device->cdev));
501 switch (device->tape_state) {
502 case TS_INIT:
503 case TS_NOT_OPER:
504 spin_unlock_irq(get_ccwdev_lock(device->cdev));
505 break;
506 case TS_UNUSED:
507 tape_state_set(device, TS_INIT);
508 spin_unlock_irq(get_ccwdev_lock(device->cdev));
509 tape_cleanup_device(device);
510 break;
511 default:
512 DBF_EVENT(3, "(%08x): Set offline failed "
513 "- drive in use.\n",
514 device->cdev_id);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700515 spin_unlock_irq(get_ccwdev_lock(device->cdev));
516 return -EBUSY;
517 }
518
519 DBF_LH(3, "(%08x): Drive set offline.\n", device->cdev_id);
520 return 0;
521}
522
523/*
524 * Allocate memory for a new device structure.
525 */
526static struct tape_device *
527tape_alloc_device(void)
528{
529 struct tape_device *device;
530
Eric Sesterhenn88abaab2006-03-24 03:15:31 -0800531 device = kzalloc(sizeof(struct tape_device), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700532 if (device == NULL) {
533 DBF_EXCEPTION(2, "ti:no mem\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700534 return ERR_PTR(-ENOMEM);
535 }
Eric Sesterhenn88abaab2006-03-24 03:15:31 -0800536 device->modeset_byte = kmalloc(1, GFP_KERNEL | GFP_DMA);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700537 if (device->modeset_byte == NULL) {
538 DBF_EXCEPTION(2, "ti:no mem\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700539 kfree(device);
540 return ERR_PTR(-ENOMEM);
541 }
Martin Schwidefsky369a4632009-12-07 12:52:04 +0100542 mutex_init(&device->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700543 INIT_LIST_HEAD(&device->req_queue);
544 INIT_LIST_HEAD(&device->node);
545 init_waitqueue_head(&device->state_change_wq);
Martin Schwidefsky4657fb82008-05-30 10:03:33 +0200546 init_waitqueue_head(&device->wait_queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700547 device->tape_state = TS_INIT;
548 device->medium_state = MS_UNKNOWN;
549 *device->modeset_byte = 0;
550 device->first_minor = -1;
551 atomic_set(&device->ref_count, 1);
Martin Schwidefskyc1637532006-12-08 15:53:57 +0100552 INIT_DELAYED_WORK(&device->tape_dnr, tape_delayed_next_request);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700553
554 return device;
555}
556
557/*
558 * Get a reference to an existing device structure. This will automatically
559 * increment the reference count.
560 */
561struct tape_device *
Martin Schwidefsky8fd138c2009-12-07 12:52:03 +0100562tape_get_device(struct tape_device *device)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700563{
Martin Schwidefsky8fd138c2009-12-07 12:52:03 +0100564 int count;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700565
Martin Schwidefsky8fd138c2009-12-07 12:52:03 +0100566 count = atomic_inc_return(&device->ref_count);
567 DBF_EVENT(4, "tape_get_device(%p) = %i\n", device, count);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700568 return device;
569}
570
571/*
572 * Decrease the reference counter of a devices structure. If the
573 * reference counter reaches zero free the device structure.
574 * The function returns a NULL pointer to be used by the caller
575 * for clearing reference pointers.
576 */
Martin Schwidefsky8fd138c2009-12-07 12:52:03 +0100577void
Linus Torvalds1da177e2005-04-16 15:20:36 -0700578tape_put_device(struct tape_device *device)
579{
Martin Schwidefsky8fd138c2009-12-07 12:52:03 +0100580 int count;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700581
Martin Schwidefsky8fd138c2009-12-07 12:52:03 +0100582 count = atomic_dec_return(&device->ref_count);
583 DBF_EVENT(4, "tape_put_device(%p) -> %i\n", device, count);
584 BUG_ON(count < 0);
585 if (count == 0) {
586 kfree(device->modeset_byte);
587 kfree(device);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700588 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700589}
590
591/*
592 * Find tape device by a device index.
593 */
594struct tape_device *
Martin Schwidefsky8fd138c2009-12-07 12:52:03 +0100595tape_find_device(int devindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700596{
597 struct tape_device *device, *tmp;
598
599 device = ERR_PTR(-ENODEV);
600 read_lock(&tape_device_lock);
601 list_for_each_entry(tmp, &tape_device_list, node) {
602 if (tmp->first_minor / TAPE_MINORS_PER_DEV == devindex) {
Martin Schwidefsky8fd138c2009-12-07 12:52:03 +0100603 device = tape_get_device(tmp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700604 break;
605 }
606 }
607 read_unlock(&tape_device_lock);
608 return device;
609}
610
611/*
612 * Driverfs tape probe function.
613 */
614int
615tape_generic_probe(struct ccw_device *cdev)
616{
617 struct tape_device *device;
Heiko Carstensd7cf0d52006-07-18 13:46:58 +0200618 int ret;
Cornelia Huckf455adc2008-05-15 16:52:37 +0200619 struct ccw_dev_id dev_id;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700620
621 device = tape_alloc_device();
622 if (IS_ERR(device))
623 return -ENODEV;
Peter Oberparleiter454e1fa2009-12-07 12:51:30 +0100624 ccw_device_set_options(cdev, CCWDEV_DO_PATHGROUP |
625 CCWDEV_DO_MULTIPATH);
Heiko Carstensd7cf0d52006-07-18 13:46:58 +0200626 ret = sysfs_create_group(&cdev->dev.kobj, &tape_attr_group);
627 if (ret) {
628 tape_put_device(device);
Heiko Carstensd7cf0d52006-07-18 13:46:58 +0200629 return ret;
630 }
Greg Kroah-Hartmandff59b62009-05-04 12:40:54 -0700631 dev_set_drvdata(&cdev->dev, device);
Heiko Carstensd7cf0d52006-07-18 13:46:58 +0200632 cdev->handler = __tape_do_irq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700633 device->cdev = cdev;
Cornelia Huckf455adc2008-05-15 16:52:37 +0200634 ccw_device_get_id(cdev, &dev_id);
635 device->cdev_id = devid_to_int(&dev_id);
Heiko Carstensd7cf0d52006-07-18 13:46:58 +0200636 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700637}
638
Heiko Carstens4d284ca2007-02-05 21:18:53 +0100639static void
Linus Torvalds1da177e2005-04-16 15:20:36 -0700640__tape_discard_requests(struct tape_device *device)
641{
642 struct tape_request * request;
643 struct list_head * l, *n;
644
645 list_for_each_safe(l, n, &device->req_queue) {
646 request = list_entry(l, struct tape_request, list);
647 if (request->status == TAPE_REQUEST_IN_IO)
648 request->status = TAPE_REQUEST_DONE;
649 list_del(&request->list);
650
651 /* Decrease ref_count for removed request. */
Martin Schwidefsky8fd138c2009-12-07 12:52:03 +0100652 request->device = NULL;
653 tape_put_device(device);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700654 request->rc = -EIO;
655 if (request->callback != NULL)
656 request->callback(request, request->callback_data);
657 }
658}
659
660/*
661 * Driverfs tape remove function.
662 *
663 * This function is called whenever the common I/O layer detects the device
664 * gone. This can happen at any time and we cannot refuse.
665 */
666void
667tape_generic_remove(struct ccw_device *cdev)
668{
669 struct tape_device * device;
670
Greg Kroah-Hartmandff59b62009-05-04 12:40:54 -0700671 device = dev_get_drvdata(&cdev->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700672 if (!device) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700673 return;
674 }
675 DBF_LH(3, "(%08x): tape_generic_remove(%p)\n", device->cdev_id, cdev);
676
677 spin_lock_irq(get_ccwdev_lock(device->cdev));
678 switch (device->tape_state) {
679 case TS_INIT:
680 tape_state_set(device, TS_NOT_OPER);
681 case TS_NOT_OPER:
682 /*
683 * Nothing to do.
684 */
685 spin_unlock_irq(get_ccwdev_lock(device->cdev));
686 break;
687 case TS_UNUSED:
688 /*
689 * Need only to release the device.
690 */
691 tape_state_set(device, TS_NOT_OPER);
692 spin_unlock_irq(get_ccwdev_lock(device->cdev));
693 tape_cleanup_device(device);
694 break;
695 default:
696 /*
697 * There may be requests on the queue. We will not get
698 * an interrupt for a request that was running. So we
699 * just post them all as I/O errors.
700 */
701 DBF_EVENT(3, "(%08x): Drive in use vanished!\n",
702 device->cdev_id);
Michael Holzheu59e36922009-09-11 10:29:07 +0200703 pr_warning("%s: A tape unit was detached while in "
704 "use\n", dev_name(&device->cdev->dev));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700705 tape_state_set(device, TS_NOT_OPER);
706 __tape_discard_requests(device);
707 spin_unlock_irq(get_ccwdev_lock(device->cdev));
708 tape_cleanup_device(device);
709 }
710
Martin Schwidefsky8fd138c2009-12-07 12:52:03 +0100711 device = dev_get_drvdata(&cdev->dev);
712 if (device) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700713 sysfs_remove_group(&cdev->dev.kobj, &tape_attr_group);
Martin Schwidefsky8fd138c2009-12-07 12:52:03 +0100714 dev_set_drvdata(&cdev->dev, NULL);
715 tape_put_device(device);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700716 }
717}
718
719/*
720 * Allocate a new tape ccw request
721 */
722struct tape_request *
723tape_alloc_request(int cplength, int datasize)
724{
725 struct tape_request *request;
726
Stoyan Gaydarov6aa0d3a2009-03-26 15:24:47 +0100727 BUG_ON(datasize > PAGE_SIZE || (cplength*sizeof(struct ccw1)) > PAGE_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700728
729 DBF_LH(6, "tape_alloc_request(%d, %d)\n", cplength, datasize);
730
Eric Sesterhenn88abaab2006-03-24 03:15:31 -0800731 request = kzalloc(sizeof(struct tape_request), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700732 if (request == NULL) {
733 DBF_EXCEPTION(1, "cqra nomem\n");
734 return ERR_PTR(-ENOMEM);
735 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700736 /* allocate channel program */
737 if (cplength > 0) {
Eric Sesterhenn88abaab2006-03-24 03:15:31 -0800738 request->cpaddr = kcalloc(cplength, sizeof(struct ccw1),
Linus Torvalds1da177e2005-04-16 15:20:36 -0700739 GFP_ATOMIC | GFP_DMA);
740 if (request->cpaddr == NULL) {
741 DBF_EXCEPTION(1, "cqra nomem\n");
742 kfree(request);
743 return ERR_PTR(-ENOMEM);
744 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700745 }
746 /* alloc small kernel buffer */
747 if (datasize > 0) {
Eric Sesterhenn88abaab2006-03-24 03:15:31 -0800748 request->cpdata = kzalloc(datasize, GFP_KERNEL | GFP_DMA);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700749 if (request->cpdata == NULL) {
750 DBF_EXCEPTION(1, "cqra nomem\n");
Jesper Juhl17fd6822005-11-07 01:01:30 -0800751 kfree(request->cpaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700752 kfree(request);
753 return ERR_PTR(-ENOMEM);
754 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700755 }
756 DBF_LH(6, "New request %p(%p/%p)\n", request, request->cpaddr,
757 request->cpdata);
758
759 return request;
760}
761
762/*
763 * Free tape ccw request
764 */
765void
766tape_free_request (struct tape_request * request)
767{
768 DBF_LH(6, "Free request %p\n", request);
769
Martin Schwidefsky8fd138c2009-12-07 12:52:03 +0100770 if (request->device)
771 tape_put_device(request->device);
Jesper Juhl17fd6822005-11-07 01:01:30 -0800772 kfree(request->cpdata);
773 kfree(request->cpaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700774 kfree(request);
775}
776
Heiko Carstens4d284ca2007-02-05 21:18:53 +0100777static int
Stefan Bader41117962005-07-27 11:45:04 -0700778__tape_start_io(struct tape_device *device, struct tape_request *request)
779{
780 int rc;
781
Stefan Bader41117962005-07-27 11:45:04 -0700782 rc = ccw_device_start(
783 device->cdev,
784 request->cpaddr,
785 (unsigned long) request,
786 0x00,
787 request->options
788 );
789 if (rc == 0) {
790 request->status = TAPE_REQUEST_IN_IO;
791 } else if (rc == -EBUSY) {
792 /* The common I/O subsystem is currently busy. Retry later. */
793 request->status = TAPE_REQUEST_QUEUED;
Martin Schwidefskyc1637532006-12-08 15:53:57 +0100794 schedule_delayed_work(&device->tape_dnr, 0);
Stefan Bader41117962005-07-27 11:45:04 -0700795 rc = 0;
796 } else {
797 /* Start failed. Remove request and indicate failure. */
798 DBF_EVENT(1, "tape: start request failed with RC = %i\n", rc);
799 }
800 return rc;
801}
802
Heiko Carstens4d284ca2007-02-05 21:18:53 +0100803static void
Stefan Bader41117962005-07-27 11:45:04 -0700804__tape_start_next_request(struct tape_device *device)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700805{
806 struct list_head *l, *n;
807 struct tape_request *request;
808 int rc;
809
Stefan Bader41117962005-07-27 11:45:04 -0700810 DBF_LH(6, "__tape_start_next_request(%p)\n", device);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700811 /*
812 * Try to start each request on request queue until one is
813 * started successful.
814 */
815 list_for_each_safe(l, n, &device->req_queue) {
816 request = list_entry(l, struct tape_request, list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700817
Stefan Bader41117962005-07-27 11:45:04 -0700818 /*
819 * Avoid race condition if bottom-half was triggered more than
820 * once.
821 */
822 if (request->status == TAPE_REQUEST_IN_IO)
823 return;
Michael Holzheu5f384332006-03-24 03:15:28 -0800824 /*
825 * Request has already been stopped. We have to wait until
826 * the request is removed from the queue in the interrupt
827 * handling.
828 */
829 if (request->status == TAPE_REQUEST_DONE)
830 return;
Stefan Bader41117962005-07-27 11:45:04 -0700831
832 /*
833 * We wanted to cancel the request but the common I/O layer
834 * was busy at that time. This can only happen if this
835 * function is called by delayed_next_request.
836 * Otherwise we start the next request on the queue.
837 */
838 if (request->status == TAPE_REQUEST_CANCEL) {
839 rc = __tape_cancel_io(device, request);
840 } else {
841 rc = __tape_start_io(device, request);
842 }
843 if (rc == 0)
844 return;
845
846 /* Set ending status. */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700847 request->rc = rc;
848 request->status = TAPE_REQUEST_DONE;
Stefan Bader41117962005-07-27 11:45:04 -0700849
850 /* Remove from request queue. */
851 list_del(&request->list);
852
853 /* Do callback. */
854 if (request->callback != NULL)
855 request->callback(request, request->callback_data);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700856 }
857}
858
859static void
Martin Schwidefskyc1637532006-12-08 15:53:57 +0100860tape_delayed_next_request(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700861{
Martin Schwidefskyc1637532006-12-08 15:53:57 +0100862 struct tape_device *device =
863 container_of(work, struct tape_device, tape_dnr.work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700864
Stefan Bader41117962005-07-27 11:45:04 -0700865 DBF_LH(6, "tape_delayed_next_request(%p)\n", device);
866 spin_lock_irq(get_ccwdev_lock(device->cdev));
867 __tape_start_next_request(device);
868 spin_unlock_irq(get_ccwdev_lock(device->cdev));
869}
870
Michael Holzheucced1dd2007-02-05 21:18:26 +0100871static void tape_long_busy_timeout(unsigned long data)
872{
873 struct tape_request *request;
874 struct tape_device *device;
875
876 device = (struct tape_device *) data;
877 spin_lock_irq(get_ccwdev_lock(device->cdev));
878 request = list_entry(device->req_queue.next, struct tape_request, list);
Stoyan Gaydarov6aa0d3a2009-03-26 15:24:47 +0100879 BUG_ON(request->status != TAPE_REQUEST_LONG_BUSY);
Michael Holzheucced1dd2007-02-05 21:18:26 +0100880 DBF_LH(6, "%08x: Long busy timeout.\n", device->cdev_id);
881 __tape_start_next_request(device);
Martin Schwidefsky8fd138c2009-12-07 12:52:03 +0100882 device->lb_timeout.data = 0UL;
883 tape_put_device(device);
Michael Holzheucced1dd2007-02-05 21:18:26 +0100884 spin_unlock_irq(get_ccwdev_lock(device->cdev));
885}
886
Heiko Carstens4d284ca2007-02-05 21:18:53 +0100887static void
Stefan Bader41117962005-07-27 11:45:04 -0700888__tape_end_request(
889 struct tape_device * device,
890 struct tape_request * request,
891 int rc)
892{
893 DBF_LH(6, "__tape_end_request(%p, %p, %i)\n", device, request, rc);
894 if (request) {
895 request->rc = rc;
896 request->status = TAPE_REQUEST_DONE;
897
898 /* Remove from request queue. */
899 list_del(&request->list);
900
901 /* Do callback. */
902 if (request->callback != NULL)
903 request->callback(request, request->callback_data);
904 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700905
906 /* Start next request. */
907 if (!list_empty(&device->req_queue))
Stefan Bader41117962005-07-27 11:45:04 -0700908 __tape_start_next_request(device);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700909}
910
911/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700912 * Write sense data to dbf
913 */
914void
915tape_dump_sense_dbf(struct tape_device *device, struct tape_request *request,
916 struct irb *irb)
917{
918 unsigned int *sptr;
919 const char* op;
920
921 if (request != NULL)
922 op = tape_op_verbose[request->op];
923 else
924 op = "---";
925 DBF_EVENT(3, "DSTAT : %02x CSTAT: %02x\n",
Peter Oberparleiter23d805b2008-07-14 09:58:50 +0200926 irb->scsw.cmd.dstat, irb->scsw.cmd.cstat);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700927 DBF_EVENT(3, "DEVICE: %08x OP\t: %s\n", device->cdev_id, op);
928 sptr = (unsigned int *) irb->ecw;
929 DBF_EVENT(3, "%08x %08x\n", sptr[0], sptr[1]);
930 DBF_EVENT(3, "%08x %08x\n", sptr[2], sptr[3]);
931 DBF_EVENT(3, "%08x %08x\n", sptr[4], sptr[5]);
932 DBF_EVENT(3, "%08x %08x\n", sptr[6], sptr[7]);
933}
934
935/*
936 * I/O helper function. Adds the request to the request queue
937 * and starts it if the tape is idle. Has to be called with
938 * the device lock held.
939 */
Heiko Carstens4d284ca2007-02-05 21:18:53 +0100940static int
Stefan Bader41117962005-07-27 11:45:04 -0700941__tape_start_request(struct tape_device *device, struct tape_request *request)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700942{
943 int rc;
944
945 switch (request->op) {
946 case TO_MSEN:
947 case TO_ASSIGN:
948 case TO_UNASSIGN:
949 case TO_READ_ATTMSG:
Michael Holzheue2963062007-05-04 18:47:53 +0200950 case TO_RDC:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700951 if (device->tape_state == TS_INIT)
952 break;
953 if (device->tape_state == TS_UNUSED)
954 break;
955 default:
956 if (device->tape_state == TS_BLKUSE)
957 break;
958 if (device->tape_state != TS_IN_USE)
959 return -ENODEV;
960 }
961
962 /* Increase use count of device for the added request. */
Martin Schwidefsky8fd138c2009-12-07 12:52:03 +0100963 request->device = tape_get_device(device);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700964
965 if (list_empty(&device->req_queue)) {
966 /* No other requests are on the queue. Start this one. */
Stefan Bader41117962005-07-27 11:45:04 -0700967 rc = __tape_start_io(device, request);
968 if (rc)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700969 return rc;
Stefan Bader41117962005-07-27 11:45:04 -0700970
Linus Torvalds1da177e2005-04-16 15:20:36 -0700971 DBF_LH(5, "Request %p added for execution.\n", request);
972 list_add(&request->list, &device->req_queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700973 } else {
974 DBF_LH(5, "Request %p add to queue.\n", request);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700975 request->status = TAPE_REQUEST_QUEUED;
Stefan Bader41117962005-07-27 11:45:04 -0700976 list_add_tail(&request->list, &device->req_queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700977 }
978 return 0;
979}
980
981/*
982 * Add the request to the request queue, try to start it if the
983 * tape is idle. Return without waiting for end of i/o.
984 */
985int
986tape_do_io_async(struct tape_device *device, struct tape_request *request)
987{
988 int rc;
989
990 DBF_LH(6, "tape_do_io_async(%p, %p)\n", device, request);
991
992 spin_lock_irq(get_ccwdev_lock(device->cdev));
993 /* Add request to request queue and try to start it. */
Stefan Bader41117962005-07-27 11:45:04 -0700994 rc = __tape_start_request(device, request);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700995 spin_unlock_irq(get_ccwdev_lock(device->cdev));
996 return rc;
997}
998
999/*
1000 * tape_do_io/__tape_wake_up
1001 * Add the request to the request queue, try to start it if the
1002 * tape is idle and wait uninterruptible for its completion.
1003 */
1004static void
1005__tape_wake_up(struct tape_request *request, void *data)
1006{
1007 request->callback = NULL;
1008 wake_up((wait_queue_head_t *) data);
1009}
1010
1011int
1012tape_do_io(struct tape_device *device, struct tape_request *request)
1013{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001014 int rc;
1015
Linus Torvalds1da177e2005-04-16 15:20:36 -07001016 spin_lock_irq(get_ccwdev_lock(device->cdev));
1017 /* Setup callback */
1018 request->callback = __tape_wake_up;
Martin Schwidefsky4657fb82008-05-30 10:03:33 +02001019 request->callback_data = &device->wait_queue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001020 /* Add request to request queue and try to start it. */
Stefan Bader41117962005-07-27 11:45:04 -07001021 rc = __tape_start_request(device, request);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001022 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1023 if (rc)
1024 return rc;
1025 /* Request added to the queue. Wait for its completion. */
Martin Schwidefsky4657fb82008-05-30 10:03:33 +02001026 wait_event(device->wait_queue, (request->callback == NULL));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001027 /* Get rc from request */
1028 return request->rc;
1029}
1030
1031/*
1032 * tape_do_io_interruptible/__tape_wake_up_interruptible
1033 * Add the request to the request queue, try to start it if the
1034 * tape is idle and wait uninterruptible for its completion.
1035 */
1036static void
1037__tape_wake_up_interruptible(struct tape_request *request, void *data)
1038{
1039 request->callback = NULL;
1040 wake_up_interruptible((wait_queue_head_t *) data);
1041}
1042
1043int
1044tape_do_io_interruptible(struct tape_device *device,
1045 struct tape_request *request)
1046{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001047 int rc;
1048
Linus Torvalds1da177e2005-04-16 15:20:36 -07001049 spin_lock_irq(get_ccwdev_lock(device->cdev));
1050 /* Setup callback */
1051 request->callback = __tape_wake_up_interruptible;
Martin Schwidefsky4657fb82008-05-30 10:03:33 +02001052 request->callback_data = &device->wait_queue;
Stefan Bader41117962005-07-27 11:45:04 -07001053 rc = __tape_start_request(device, request);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001054 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1055 if (rc)
1056 return rc;
1057 /* Request added to the queue. Wait for its completion. */
Martin Schwidefsky4657fb82008-05-30 10:03:33 +02001058 rc = wait_event_interruptible(device->wait_queue,
1059 (request->callback == NULL));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001060 if (rc != -ERESTARTSYS)
1061 /* Request finished normally. */
1062 return request->rc;
Stefan Bader41117962005-07-27 11:45:04 -07001063
Linus Torvalds1da177e2005-04-16 15:20:36 -07001064 /* Interrupted by a signal. We have to stop the current request. */
1065 spin_lock_irq(get_ccwdev_lock(device->cdev));
Stefan Bader41117962005-07-27 11:45:04 -07001066 rc = __tape_cancel_io(device, request);
1067 spin_unlock_irq(get_ccwdev_lock(device->cdev));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001068 if (rc == 0) {
Stefan Bader41117962005-07-27 11:45:04 -07001069 /* Wait for the interrupt that acknowledges the halt. */
1070 do {
1071 rc = wait_event_interruptible(
Martin Schwidefsky4657fb82008-05-30 10:03:33 +02001072 device->wait_queue,
Stefan Bader41117962005-07-27 11:45:04 -07001073 (request->callback == NULL)
1074 );
Michael Holzheu4cd190a2006-03-24 03:15:27 -08001075 } while (rc == -ERESTARTSYS);
Stefan Bader41117962005-07-27 11:45:04 -07001076
Linus Torvalds1da177e2005-04-16 15:20:36 -07001077 DBF_EVENT(3, "IO stopped on %08x\n", device->cdev_id);
1078 rc = -ERESTARTSYS;
1079 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001080 return rc;
1081}
1082
1083/*
Michael Holzheu5f384332006-03-24 03:15:28 -08001084 * Stop running ccw.
1085 */
1086int
1087tape_cancel_io(struct tape_device *device, struct tape_request *request)
1088{
1089 int rc;
1090
1091 spin_lock_irq(get_ccwdev_lock(device->cdev));
1092 rc = __tape_cancel_io(device, request);
1093 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1094 return rc;
1095}
1096
1097/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001098 * Tape interrupt routine, called from the ccw_device layer
1099 */
1100static void
1101__tape_do_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
1102{
1103 struct tape_device *device;
1104 struct tape_request *request;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001105 int rc;
1106
Greg Kroah-Hartmandff59b62009-05-04 12:40:54 -07001107 device = dev_get_drvdata(&cdev->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001108 if (device == NULL) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001109 return;
1110 }
1111 request = (struct tape_request *) intparm;
1112
1113 DBF_LH(6, "__tape_do_irq(device=%p, request=%p)\n", device, request);
1114
1115 /* On special conditions irb is an error pointer */
1116 if (IS_ERR(irb)) {
Stefan Bader41117962005-07-27 11:45:04 -07001117 /* FIXME: What to do with the request? */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001118 switch (PTR_ERR(irb)) {
1119 case -ETIMEDOUT:
Sebastian Ottf2166bb2010-10-29 16:50:44 +02001120 DBF_LH(1, "(%08x): Request timed out\n",
1121 device->cdev_id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001122 case -EIO:
Stefan Bader41117962005-07-27 11:45:04 -07001123 __tape_end_request(device, request, -EIO);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001124 break;
1125 default:
Sebastian Ottf2166bb2010-10-29 16:50:44 +02001126 DBF_LH(1, "(%08x): Unexpected i/o error %li\n",
1127 device->cdev_id, PTR_ERR(irb));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001128 }
1129 return;
1130 }
1131
Stefan Bader41117962005-07-27 11:45:04 -07001132 /*
1133 * If the condition code is not zero and the start function bit is
1134 * still set, this is an deferred error and the last start I/O did
Stefan Bader842d3fb2006-03-24 03:15:26 -08001135 * not succeed. At this point the condition that caused the deferred
1136 * error might still apply. So we just schedule the request to be
1137 * started later.
Stefan Bader41117962005-07-27 11:45:04 -07001138 */
Peter Oberparleiter23d805b2008-07-14 09:58:50 +02001139 if (irb->scsw.cmd.cc != 0 &&
1140 (irb->scsw.cmd.fctl & SCSW_FCTL_START_FUNC) &&
Michael Holzheu5f384332006-03-24 03:15:28 -08001141 (request->status == TAPE_REQUEST_IN_IO)) {
1142 DBF_EVENT(3,"(%08x): deferred cc=%i, fctl=%i. restarting\n",
Peter Oberparleiter23d805b2008-07-14 09:58:50 +02001143 device->cdev_id, irb->scsw.cmd.cc, irb->scsw.cmd.fctl);
Stefan Bader842d3fb2006-03-24 03:15:26 -08001144 request->status = TAPE_REQUEST_QUEUED;
Michael Holzheu5f384332006-03-24 03:15:28 -08001145 schedule_delayed_work(&device->tape_dnr, HZ);
Stefan Bader41117962005-07-27 11:45:04 -07001146 return;
1147 }
1148
Linus Torvalds1da177e2005-04-16 15:20:36 -07001149 /* May be an unsolicited irq */
1150 if(request != NULL)
Peter Oberparleiter23d805b2008-07-14 09:58:50 +02001151 request->rescnt = irb->scsw.cmd.count;
1152 else if ((irb->scsw.cmd.dstat == 0x85 || irb->scsw.cmd.dstat == 0x80) &&
Michael Holzheucced1dd2007-02-05 21:18:26 +01001153 !list_empty(&device->req_queue)) {
1154 /* Not Ready to Ready after long busy ? */
1155 struct tape_request *req;
1156 req = list_entry(device->req_queue.next,
1157 struct tape_request, list);
1158 if (req->status == TAPE_REQUEST_LONG_BUSY) {
1159 DBF_EVENT(3, "(%08x): del timer\n", device->cdev_id);
1160 if (del_timer(&device->lb_timeout)) {
Martin Schwidefsky8fd138c2009-12-07 12:52:03 +01001161 device->lb_timeout.data = 0UL;
1162 tape_put_device(device);
Michael Holzheucced1dd2007-02-05 21:18:26 +01001163 __tape_start_next_request(device);
1164 }
1165 return;
1166 }
1167 }
Peter Oberparleiter23d805b2008-07-14 09:58:50 +02001168 if (irb->scsw.cmd.dstat != 0x0c) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001169 /* Set the 'ONLINE' flag depending on sense byte 1 */
1170 if(*(((__u8 *) irb->ecw) + 1) & SENSE_DRIVE_ONLINE)
1171 device->tape_generic_status |= GMT_ONLINE(~0);
1172 else
1173 device->tape_generic_status &= ~GMT_ONLINE(~0);
1174
1175 /*
1176 * Any request that does not come back with channel end
1177 * and device end is unusual. Log the sense data.
1178 */
1179 DBF_EVENT(3,"-- Tape Interrupthandler --\n");
1180 tape_dump_sense_dbf(device, request, irb);
1181 } else {
1182 /* Upon normal completion the device _is_ online */
1183 device->tape_generic_status |= GMT_ONLINE(~0);
1184 }
1185 if (device->tape_state == TS_NOT_OPER) {
1186 DBF_EVENT(6, "tape:device is not operational\n");
1187 return;
1188 }
1189
1190 /*
1191 * Request that were canceled still come back with an interrupt.
1192 * To detect these request the state will be set to TAPE_REQUEST_DONE.
1193 */
1194 if(request != NULL && request->status == TAPE_REQUEST_DONE) {
Stefan Bader41117962005-07-27 11:45:04 -07001195 __tape_end_request(device, request, -EIO);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001196 return;
1197 }
1198
1199 rc = device->discipline->irq(device, request, irb);
1200 /*
1201 * rc < 0 : request finished unsuccessfully.
1202 * rc == TAPE_IO_SUCCESS: request finished successfully.
1203 * rc == TAPE_IO_PENDING: request is still running. Ignore rc.
1204 * rc == TAPE_IO_RETRY: request finished but needs another go.
1205 * rc == TAPE_IO_STOP: request needs to get terminated.
1206 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001207 switch (rc) {
Stefan Bader41117962005-07-27 11:45:04 -07001208 case TAPE_IO_SUCCESS:
1209 /* Upon normal completion the device _is_ online */
1210 device->tape_generic_status |= GMT_ONLINE(~0);
1211 __tape_end_request(device, request, rc);
1212 break;
1213 case TAPE_IO_PENDING:
1214 break;
Michael Holzheucced1dd2007-02-05 21:18:26 +01001215 case TAPE_IO_LONG_BUSY:
1216 device->lb_timeout.data =
Martin Schwidefsky8fd138c2009-12-07 12:52:03 +01001217 (unsigned long) tape_get_device(device);
Michael Holzheucced1dd2007-02-05 21:18:26 +01001218 device->lb_timeout.expires = jiffies +
1219 LONG_BUSY_TIMEOUT * HZ;
1220 DBF_EVENT(3, "(%08x): add timer\n", device->cdev_id);
1221 add_timer(&device->lb_timeout);
1222 request->status = TAPE_REQUEST_LONG_BUSY;
1223 break;
Stefan Bader41117962005-07-27 11:45:04 -07001224 case TAPE_IO_RETRY:
1225 rc = __tape_start_io(device, request);
1226 if (rc)
1227 __tape_end_request(device, request, rc);
1228 break;
1229 case TAPE_IO_STOP:
1230 rc = __tape_cancel_io(device, request);
1231 if (rc)
1232 __tape_end_request(device, request, rc);
1233 break;
1234 default:
1235 if (rc > 0) {
1236 DBF_EVENT(6, "xunknownrc\n");
Stefan Bader41117962005-07-27 11:45:04 -07001237 __tape_end_request(device, request, -EIO);
1238 } else {
1239 __tape_end_request(device, request, rc);
1240 }
1241 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001242 }
1243}
1244
1245/*
Heiko Carstens161beff2012-05-09 16:27:37 +02001246 * Tape device open function used by tape_char frontend.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001247 */
1248int
1249tape_open(struct tape_device *device)
1250{
1251 int rc;
1252
Frank Munzertb3c21e42008-10-28 11:10:19 +01001253 spin_lock_irq(get_ccwdev_lock(device->cdev));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001254 if (device->tape_state == TS_NOT_OPER) {
1255 DBF_EVENT(6, "TAPE:nodev\n");
1256 rc = -ENODEV;
1257 } else if (device->tape_state == TS_IN_USE) {
1258 DBF_EVENT(6, "TAPE:dbusy\n");
1259 rc = -EBUSY;
1260 } else if (device->tape_state == TS_BLKUSE) {
1261 DBF_EVENT(6, "TAPE:dbusy\n");
1262 rc = -EBUSY;
1263 } else if (device->discipline != NULL &&
1264 !try_module_get(device->discipline->owner)) {
1265 DBF_EVENT(6, "TAPE:nodisc\n");
1266 rc = -ENODEV;
1267 } else {
1268 tape_state_set(device, TS_IN_USE);
1269 rc = 0;
1270 }
Frank Munzertb3c21e42008-10-28 11:10:19 +01001271 spin_unlock_irq(get_ccwdev_lock(device->cdev));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001272 return rc;
1273}
1274
1275/*
Heiko Carstens161beff2012-05-09 16:27:37 +02001276 * Tape device release function used by tape_char frontend.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001277 */
1278int
1279tape_release(struct tape_device *device)
1280{
Frank Munzertb3c21e42008-10-28 11:10:19 +01001281 spin_lock_irq(get_ccwdev_lock(device->cdev));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001282 if (device->tape_state == TS_IN_USE)
1283 tape_state_set(device, TS_UNUSED);
1284 module_put(device->discipline->owner);
Frank Munzertb3c21e42008-10-28 11:10:19 +01001285 spin_unlock_irq(get_ccwdev_lock(device->cdev));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001286 return 0;
1287}
1288
1289/*
1290 * Execute a magnetic tape command a number of times.
1291 */
1292int
1293tape_mtop(struct tape_device *device, int mt_op, int mt_count)
1294{
1295 tape_mtop_fn fn;
1296 int rc;
1297
1298 DBF_EVENT(6, "TAPE:mtio\n");
1299 DBF_EVENT(6, "TAPE:ioop: %x\n", mt_op);
1300 DBF_EVENT(6, "TAPE:arg: %x\n", mt_count);
1301
1302 if (mt_op < 0 || mt_op >= TAPE_NR_MTOPS)
1303 return -EINVAL;
1304 fn = device->discipline->mtop_array[mt_op];
1305 if (fn == NULL)
1306 return -EINVAL;
1307
1308 /* We assume that the backends can handle count up to 500. */
1309 if (mt_op == MTBSR || mt_op == MTFSR || mt_op == MTFSF ||
1310 mt_op == MTBSF || mt_op == MTFSFM || mt_op == MTBSFM) {
1311 rc = 0;
1312 for (; mt_count > 500; mt_count -= 500)
1313 if ((rc = fn(device, 500)) != 0)
1314 break;
1315 if (rc == 0)
1316 rc = fn(device, mt_count);
1317 } else
1318 rc = fn(device, mt_count);
1319 return rc;
1320
1321}
1322
1323/*
1324 * Tape init function.
1325 */
1326static int
1327tape_init (void)
1328{
Michael Holzheu66a464d2005-06-25 14:55:33 -07001329 TAPE_DBF_AREA = debug_register ( "tape", 2, 2, 4*sizeof(long));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001330 debug_register_view(TAPE_DBF_AREA, &debug_sprintf_view);
1331#ifdef DBF_LIKE_HELL
1332 debug_set_level(TAPE_DBF_AREA, 6);
1333#endif
Heiko Carstense018ba12006-02-01 03:06:31 -08001334 DBF_EVENT(3, "tape init\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001335 tape_proc_init();
1336 tapechar_init ();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001337 return 0;
1338}
1339
1340/*
1341 * Tape exit function.
1342 */
1343static void
1344tape_exit(void)
1345{
1346 DBF_EVENT(6, "tape exit\n");
1347
1348 /* Get rid of the frontends */
1349 tapechar_exit();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001350 tape_proc_cleanup();
1351 debug_unregister (TAPE_DBF_AREA);
1352}
1353
1354MODULE_AUTHOR("(C) 2001 IBM Deutschland Entwicklung GmbH by Carsten Otte and "
1355 "Michael Holzheu (cotte@de.ibm.com,holzheu@de.ibm.com)");
Heiko Carstense018ba12006-02-01 03:06:31 -08001356MODULE_DESCRIPTION("Linux on zSeries channel attached tape device driver");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001357MODULE_LICENSE("GPL");
1358
1359module_init(tape_init);
1360module_exit(tape_exit);
1361
1362EXPORT_SYMBOL(tape_generic_remove);
1363EXPORT_SYMBOL(tape_generic_probe);
1364EXPORT_SYMBOL(tape_generic_online);
1365EXPORT_SYMBOL(tape_generic_offline);
Frank Munzert3ef32e622009-06-16 10:30:39 +02001366EXPORT_SYMBOL(tape_generic_pm_suspend);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001367EXPORT_SYMBOL(tape_put_device);
Martin Schwidefsky8fd138c2009-12-07 12:52:03 +01001368EXPORT_SYMBOL(tape_get_device);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001369EXPORT_SYMBOL(tape_state_verbose);
1370EXPORT_SYMBOL(tape_op_verbose);
1371EXPORT_SYMBOL(tape_state_set);
1372EXPORT_SYMBOL(tape_med_state_set);
1373EXPORT_SYMBOL(tape_alloc_request);
1374EXPORT_SYMBOL(tape_free_request);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001375EXPORT_SYMBOL(tape_dump_sense_dbf);
1376EXPORT_SYMBOL(tape_do_io);
1377EXPORT_SYMBOL(tape_do_io_async);
1378EXPORT_SYMBOL(tape_do_io_interruptible);
Michael Holzheu5f384332006-03-24 03:15:28 -08001379EXPORT_SYMBOL(tape_cancel_io);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001380EXPORT_SYMBOL(tape_mtop);