blob: a9d58629e7954f66b23bca2db1ed02792ae27fdb [file] [log] [blame]
Frank Munzert810cb5b2007-07-17 13:36:06 +02001/*
2 * Linux driver for System z and s390 unit record devices
3 * (z/VM virtual punch, reader, printer)
4 *
5 * Copyright IBM Corp. 2001, 2007
6 * Authors: Malcolm Beattie <beattiem@uk.ibm.com>
7 * Michael Holzheu <holzheu@de.ibm.com>
8 * Frank Munzert <munzert@de.ibm.com>
9 */
10
11#include <linux/cdev.h>
12
13#include <asm/uaccess.h>
14#include <asm/cio.h>
15#include <asm/ccwdev.h>
16#include <asm/debug.h>
17
18#include "vmur.h"
19
20/*
21 * Driver overview
22 *
23 * Unit record device support is implemented as a character device driver.
24 * We can fit at least 16 bits into a device minor number and use the
25 * simple method of mapping a character device number with minor abcd
26 * to the unit record device with devno abcd.
27 * I/O to virtual unit record devices is handled as follows:
28 * Reads: Diagnose code 0x14 (input spool file manipulation)
29 * is used to read spool data page-wise.
30 * Writes: The CCW used is WRITE_CCW_CMD (0x01). The device's record length
31 * is available by reading sysfs attr reclen. Each write() to the device
32 * must specify an integral multiple (maximal 511) of reclen.
33 */
34
35static char ur_banner[] = "z/VM virtual unit record device driver";
36
37MODULE_AUTHOR("IBM Corporation");
38MODULE_DESCRIPTION("s390 z/VM virtual unit record device driver");
39MODULE_LICENSE("GPL");
40
41#define PRINTK_HEADER "vmur: "
42
43static dev_t ur_first_dev_maj_min;
44static struct class *vmur_class;
45static struct debug_info *vmur_dbf;
46
47/* We put the device's record length (for writes) in the driver_info field */
48static struct ccw_device_id ur_ids[] = {
49 { CCWDEV_CU_DI(READER_PUNCH_DEVTYPE, 80) },
50 { CCWDEV_CU_DI(PRINTER_DEVTYPE, 132) },
51 { /* end of list */ }
52};
53
54MODULE_DEVICE_TABLE(ccw, ur_ids);
55
56static int ur_probe(struct ccw_device *cdev);
57static void ur_remove(struct ccw_device *cdev);
58static int ur_set_online(struct ccw_device *cdev);
59static int ur_set_offline(struct ccw_device *cdev);
60
61static struct ccw_driver ur_driver = {
62 .name = "vmur",
63 .owner = THIS_MODULE,
64 .ids = ur_ids,
65 .probe = ur_probe,
66 .remove = ur_remove,
67 .set_online = ur_set_online,
68 .set_offline = ur_set_offline,
69};
70
71/*
72 * Allocation, freeing, getting and putting of urdev structures
73 */
74static struct urdev *urdev_alloc(struct ccw_device *cdev)
75{
76 struct urdev *urd;
77
78 urd = kzalloc(sizeof(struct urdev), GFP_KERNEL);
79 if (!urd)
80 return NULL;
81 urd->cdev = cdev;
82 urd->reclen = cdev->id.driver_info;
83 ccw_device_get_id(cdev, &urd->dev_id);
84 mutex_init(&urd->io_mutex);
85 mutex_init(&urd->open_mutex);
86 return urd;
87}
88
89static void urdev_free(struct urdev *urd)
90{
91 kfree(urd);
92}
93
94/*
95 * This is how the character device driver gets a reference to a
96 * ur device. When this call returns successfully, a reference has
97 * been taken (by get_device) on the underlying kobject. The recipient
98 * of this urdev pointer must eventually drop it with urdev_put(urd)
99 * which does the corresponding put_device().
100 */
101static struct urdev *urdev_get_from_devno(u16 devno)
102{
103 char bus_id[16];
104 struct ccw_device *cdev;
105
106 sprintf(bus_id, "0.0.%04x", devno);
107 cdev = get_ccwdev_by_busid(&ur_driver, bus_id);
108 if (!cdev)
109 return NULL;
110
111 return cdev->dev.driver_data;
112}
113
114static void urdev_put(struct urdev *urd)
115{
116 put_device(&urd->cdev->dev);
117}
118
119/*
120 * Low-level functions to do I/O to a ur device.
121 * alloc_chan_prog
Michael Holzheu1eade382007-08-10 14:32:30 +0200122 * free_chan_prog
Frank Munzert810cb5b2007-07-17 13:36:06 +0200123 * do_ur_io
124 * ur_int_handler
125 *
126 * alloc_chan_prog allocates and builds the channel program
Michael Holzheu1eade382007-08-10 14:32:30 +0200127 * free_chan_prog frees memory of the channel program
Frank Munzert810cb5b2007-07-17 13:36:06 +0200128 *
129 * do_ur_io issues the channel program to the device and blocks waiting
130 * on a completion event it publishes at urd->io_done. The function
131 * serialises itself on the device's mutex so that only one I/O
132 * is issued at a time (and that I/O is synchronous).
133 *
134 * ur_int_handler catches the "I/O done" interrupt, writes the
135 * subchannel status word into the scsw member of the urdev structure
136 * and complete()s the io_done to wake the waiting do_ur_io.
137 *
138 * The caller of do_ur_io is responsible for kfree()ing the channel program
139 * address pointer that alloc_chan_prog returned.
140 */
141
Michael Holzheu1eade382007-08-10 14:32:30 +0200142static void free_chan_prog(struct ccw1 *cpa)
143{
144 struct ccw1 *ptr = cpa;
145
146 while (ptr->cda) {
147 kfree((void *)(addr_t) ptr->cda);
148 ptr++;
149 }
150 kfree(cpa);
151}
Frank Munzert810cb5b2007-07-17 13:36:06 +0200152
153/*
154 * alloc_chan_prog
155 * The channel program we use is write commands chained together
156 * with a final NOP CCW command-chained on (which ensures that CE and DE
157 * are presented together in a single interrupt instead of as separate
158 * interrupts unless an incorrect length indication kicks in first). The
Michael Holzheu1eade382007-08-10 14:32:30 +0200159 * data length in each CCW is reclen.
Frank Munzert810cb5b2007-07-17 13:36:06 +0200160 */
Michael Holzheu1eade382007-08-10 14:32:30 +0200161static struct ccw1 *alloc_chan_prog(const char __user *ubuf, int rec_count,
162 int reclen)
Frank Munzert810cb5b2007-07-17 13:36:06 +0200163{
Frank Munzert810cb5b2007-07-17 13:36:06 +0200164 struct ccw1 *cpa;
Michael Holzheu1eade382007-08-10 14:32:30 +0200165 void *kbuf;
Frank Munzert810cb5b2007-07-17 13:36:06 +0200166 int i;
167
Michael Holzheu1eade382007-08-10 14:32:30 +0200168 TRACE("alloc_chan_prog(%p, %i, %i)\n", ubuf, rec_count, reclen);
Frank Munzert810cb5b2007-07-17 13:36:06 +0200169
170 /*
171 * We chain a NOP onto the writes to force CE+DE together.
172 * That means we allocate room for CCWs to cover count/reclen
173 * records plus a NOP.
174 */
Michael Holzheu1eade382007-08-10 14:32:30 +0200175 cpa = kzalloc((rec_count + 1) * sizeof(struct ccw1),
176 GFP_KERNEL | GFP_DMA);
Frank Munzert810cb5b2007-07-17 13:36:06 +0200177 if (!cpa)
Michael Holzheu1eade382007-08-10 14:32:30 +0200178 return ERR_PTR(-ENOMEM);
Frank Munzert810cb5b2007-07-17 13:36:06 +0200179
Michael Holzheu1eade382007-08-10 14:32:30 +0200180 for (i = 0; i < rec_count; i++) {
Frank Munzert810cb5b2007-07-17 13:36:06 +0200181 cpa[i].cmd_code = WRITE_CCW_CMD;
182 cpa[i].flags = CCW_FLAG_CC | CCW_FLAG_SLI;
183 cpa[i].count = reclen;
Michael Holzheu1eade382007-08-10 14:32:30 +0200184 kbuf = kmalloc(reclen, GFP_KERNEL | GFP_DMA);
185 if (!kbuf) {
186 free_chan_prog(cpa);
187 return ERR_PTR(-ENOMEM);
188 }
189 cpa[i].cda = (u32)(addr_t) kbuf;
190 if (copy_from_user(kbuf, ubuf, reclen)) {
191 free_chan_prog(cpa);
192 return ERR_PTR(-EFAULT);
193 }
194 ubuf += reclen;
Frank Munzert810cb5b2007-07-17 13:36:06 +0200195 }
196 /* The following NOP CCW forces CE+DE to be presented together */
197 cpa[i].cmd_code = CCW_CMD_NOOP;
Frank Munzert810cb5b2007-07-17 13:36:06 +0200198 return cpa;
199}
200
201static int do_ur_io(struct urdev *urd, struct ccw1 *cpa)
202{
203 int rc;
204 struct ccw_device *cdev = urd->cdev;
Heiko Carstens278bc682007-08-10 14:32:31 +0200205 DECLARE_COMPLETION_ONSTACK(event);
Frank Munzert810cb5b2007-07-17 13:36:06 +0200206
207 TRACE("do_ur_io: cpa=%p\n", cpa);
208
209 rc = mutex_lock_interruptible(&urd->io_mutex);
210 if (rc)
211 return rc;
212
213 urd->io_done = &event;
214
215 spin_lock_irq(get_ccwdev_lock(cdev));
216 rc = ccw_device_start(cdev, cpa, 1, 0, 0);
217 spin_unlock_irq(get_ccwdev_lock(cdev));
218
219 TRACE("do_ur_io: ccw_device_start returned %d\n", rc);
220 if (rc)
221 goto out;
222
223 wait_for_completion(&event);
224 TRACE("do_ur_io: I/O complete\n");
225 rc = 0;
226
227out:
228 mutex_unlock(&urd->io_mutex);
229 return rc;
230}
231
232/*
233 * ur interrupt handler, called from the ccw_device layer
234 */
235static void ur_int_handler(struct ccw_device *cdev, unsigned long intparm,
236 struct irb *irb)
237{
238 struct urdev *urd;
239
240 TRACE("ur_int_handler: intparm=0x%lx cstat=%02x dstat=%02x res=%u\n",
241 intparm, irb->scsw.cstat, irb->scsw.dstat, irb->scsw.count);
242
243 if (!intparm) {
244 TRACE("ur_int_handler: unsolicited interrupt\n");
245 return;
246 }
247 urd = cdev->dev.driver_data;
248 /* On special conditions irb is an error pointer */
249 if (IS_ERR(irb))
250 urd->io_request_rc = PTR_ERR(irb);
251 else if (irb->scsw.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END))
252 urd->io_request_rc = 0;
253 else
254 urd->io_request_rc = -EIO;
255
256 complete(urd->io_done);
257}
258
259/*
260 * reclen sysfs attribute - The record length to be used for write CCWs
261 */
262static ssize_t ur_attr_reclen_show(struct device *dev,
263 struct device_attribute *attr, char *buf)
264{
265 struct urdev *urd = dev->driver_data;
266
267 return sprintf(buf, "%zu\n", urd->reclen);
268}
269
270static DEVICE_ATTR(reclen, 0444, ur_attr_reclen_show, NULL);
271
272static int ur_create_attributes(struct device *dev)
273{
274 return device_create_file(dev, &dev_attr_reclen);
275}
276
277static void ur_remove_attributes(struct device *dev)
278{
279 device_remove_file(dev, &dev_attr_reclen);
280}
281
282/*
283 * diagnose code 0x210 - retrieve device information
284 * cc=0 normal completion, we have a real device
285 * cc=1 CP paging error
286 * cc=2 The virtual device exists, but is not associated with a real device
287 * cc=3 Invalid device address, or the virtual device does not exist
288 */
289static int get_urd_class(struct urdev *urd)
290{
291 static struct diag210 ur_diag210;
292 int cc;
293
294 ur_diag210.vrdcdvno = urd->dev_id.devno;
295 ur_diag210.vrdclen = sizeof(struct diag210);
296
297 cc = diag210(&ur_diag210);
298 switch (cc) {
299 case 0:
300 return -ENOTSUPP;
301 case 2:
302 return ur_diag210.vrdcvcla; /* virtual device class */
303 case 3:
304 return -ENODEV;
305 default:
306 return -EIO;
307 }
308}
309
310/*
311 * Allocation and freeing of urfile structures
312 */
313static struct urfile *urfile_alloc(struct urdev *urd)
314{
315 struct urfile *urf;
316
317 urf = kzalloc(sizeof(struct urfile), GFP_KERNEL);
318 if (!urf)
319 return NULL;
320 urf->urd = urd;
321
322 TRACE("urfile_alloc: urd=%p urf=%p rl=%zu\n", urd, urf,
323 urf->dev_reclen);
324
325 return urf;
326}
327
328static void urfile_free(struct urfile *urf)
329{
330 TRACE("urfile_free: urf=%p urd=%p\n", urf, urf->urd);
331 kfree(urf);
332}
333
334/*
335 * The fops implementation of the character device driver
336 */
337static ssize_t do_write(struct urdev *urd, const char __user *udata,
338 size_t count, size_t reclen, loff_t *ppos)
339{
340 struct ccw1 *cpa;
Frank Munzert810cb5b2007-07-17 13:36:06 +0200341 int rc;
342
Michael Holzheu1eade382007-08-10 14:32:30 +0200343 cpa = alloc_chan_prog(udata, count / reclen, reclen);
344 if (IS_ERR(cpa))
345 return PTR_ERR(cpa);
Frank Munzert810cb5b2007-07-17 13:36:06 +0200346
347 rc = do_ur_io(urd, cpa);
348 if (rc)
349 goto fail_kfree_cpa;
350
351 if (urd->io_request_rc) {
352 rc = urd->io_request_rc;
353 goto fail_kfree_cpa;
354 }
355 *ppos += count;
356 rc = count;
Michael Holzheu1eade382007-08-10 14:32:30 +0200357
Frank Munzert810cb5b2007-07-17 13:36:06 +0200358fail_kfree_cpa:
Michael Holzheu1eade382007-08-10 14:32:30 +0200359 free_chan_prog(cpa);
Frank Munzert810cb5b2007-07-17 13:36:06 +0200360 return rc;
361}
362
363static ssize_t ur_write(struct file *file, const char __user *udata,
364 size_t count, loff_t *ppos)
365{
366 struct urfile *urf = file->private_data;
367
368 TRACE("ur_write: count=%zu\n", count);
369
370 if (count == 0)
371 return 0;
372
373 if (count % urf->dev_reclen)
374 return -EINVAL; /* count must be a multiple of reclen */
375
376 if (count > urf->dev_reclen * MAX_RECS_PER_IO)
377 count = urf->dev_reclen * MAX_RECS_PER_IO;
378
379 return do_write(urf->urd, udata, count, urf->dev_reclen, ppos);
380}
381
382static int do_diag_14(unsigned long rx, unsigned long ry1,
383 unsigned long subcode)
384{
385 register unsigned long _ry1 asm("2") = ry1;
386 register unsigned long _ry2 asm("3") = subcode;
387 int rc = 0;
388
389 asm volatile(
390#ifdef CONFIG_64BIT
391 " sam31\n"
392 " diag %2,2,0x14\n"
393 " sam64\n"
394#else
395 " diag %2,2,0x14\n"
396#endif
397 " ipm %0\n"
398 " srl %0,28\n"
399 : "=d" (rc), "+d" (_ry2)
400 : "d" (rx), "d" (_ry1)
401 : "cc");
402
403 TRACE("diag 14: subcode=0x%lx, cc=%i\n", subcode, rc);
404 return rc;
405}
406
407/*
408 * diagnose code 0x14 subcode 0x0028 - position spool file to designated
409 * record
410 * cc=0 normal completion
411 * cc=2 no file active on the virtual reader or device not ready
412 * cc=3 record specified is beyond EOF
413 */
414static int diag_position_to_record(int devno, int record)
415{
416 int cc;
417
418 cc = do_diag_14(record, devno, 0x28);
419 switch (cc) {
420 case 0:
421 return 0;
422 case 2:
423 return -ENOMEDIUM;
424 case 3:
425 return -ENODATA; /* position beyond end of file */
426 default:
427 return -EIO;
428 }
429}
430
431/*
432 * diagnose code 0x14 subcode 0x0000 - read next spool file buffer
433 * cc=0 normal completion
434 * cc=1 EOF reached
435 * cc=2 no file active on the virtual reader, and no file eligible
436 * cc=3 file already active on the virtual reader or specified virtual
437 * reader does not exist or is not a reader
438 */
439static int diag_read_file(int devno, char *buf)
440{
441 int cc;
442
443 cc = do_diag_14((unsigned long) buf, devno, 0x00);
444 switch (cc) {
445 case 0:
446 return 0;
447 case 1:
448 return -ENODATA;
449 case 2:
450 return -ENOMEDIUM;
451 default:
452 return -EIO;
453 }
454}
455
456static ssize_t diag14_read(struct file *file, char __user *ubuf, size_t count,
457 loff_t *offs)
458{
459 size_t len, copied, res;
460 char *buf;
461 int rc;
462 u16 reclen;
463 struct urdev *urd;
464
465 urd = ((struct urfile *) file->private_data)->urd;
466 reclen = ((struct urfile *) file->private_data)->file_reclen;
467
468 rc = diag_position_to_record(urd->dev_id.devno, *offs / PAGE_SIZE + 1);
469 if (rc == -ENODATA)
470 return 0;
471 if (rc)
472 return rc;
473
474 len = min((size_t) PAGE_SIZE, count);
475 buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
476 if (!buf)
477 return -ENOMEM;
478
479 copied = 0;
480 res = (size_t) (*offs % PAGE_SIZE);
481 do {
482 rc = diag_read_file(urd->dev_id.devno, buf);
483 if (rc == -ENODATA) {
484 break;
485 }
486 if (rc)
487 goto fail;
Frank Munzert2b3d8c92007-07-27 12:29:17 +0200488 if (reclen && (copied == 0) && (*offs < PAGE_SIZE))
Frank Munzert810cb5b2007-07-17 13:36:06 +0200489 *((u16 *) &buf[FILE_RECLEN_OFFSET]) = reclen;
490 len = min(count - copied, PAGE_SIZE - res);
491 if (copy_to_user(ubuf + copied, buf + res, len)) {
492 rc = -EFAULT;
493 goto fail;
494 }
495 res = 0;
496 copied += len;
497 } while (copied != count);
498
499 *offs += copied;
500 rc = copied;
501fail:
502 kfree(buf);
503 return rc;
504}
505
506static ssize_t ur_read(struct file *file, char __user *ubuf, size_t count,
507 loff_t *offs)
508{
509 struct urdev *urd;
510 int rc;
511
512 TRACE("ur_read: count=%zu ppos=%li\n", count, (unsigned long) *offs);
513
514 if (count == 0)
515 return 0;
516
517 urd = ((struct urfile *) file->private_data)->urd;
518 rc = mutex_lock_interruptible(&urd->io_mutex);
519 if (rc)
520 return rc;
521 rc = diag14_read(file, ubuf, count, offs);
522 mutex_unlock(&urd->io_mutex);
523 return rc;
524}
525
526/*
527 * diagnose code 0x14 subcode 0x0fff - retrieve next file descriptor
528 * cc=0 normal completion
529 * cc=1 no files on reader queue or no subsequent file
530 * cc=2 spid specified is invalid
531 */
532static int diag_read_next_file_info(struct file_control_block *buf, int spid)
533{
534 int cc;
535
536 cc = do_diag_14((unsigned long) buf, spid, 0xfff);
537 switch (cc) {
538 case 0:
539 return 0;
540 default:
541 return -ENODATA;
542 }
543}
544
545static int verify_device(struct urdev *urd)
546{
547 struct file_control_block fcb;
548 char *buf;
549 int rc;
550
551 switch (urd->class) {
552 case DEV_CLASS_UR_O:
553 return 0; /* no check needed here */
554 case DEV_CLASS_UR_I:
555 /* check for empty reader device (beginning of chain) */
556 rc = diag_read_next_file_info(&fcb, 0);
557 if (rc)
558 return rc;
Michael Holzheuf2405592007-08-10 14:32:32 +0200559 /* if file is in hold status, we do not read it */
560 if (fcb.file_stat & (FLG_SYSTEM_HOLD | FLG_USER_HOLD))
561 return -EPERM;
Frank Munzert810cb5b2007-07-17 13:36:06 +0200562 /* open file on virtual reader */
563 buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
564 if (!buf)
565 return -ENOMEM;
566 rc = diag_read_file(urd->dev_id.devno, buf);
567 kfree(buf);
Frank Munzert810cb5b2007-07-17 13:36:06 +0200568 if ((rc != 0) && (rc != -ENODATA)) /* EOF does not hurt */
569 return rc;
Michael Holzheu4eac3452007-08-10 14:32:33 +0200570 /* check if the file on top of the queue is open now */
571 rc = diag_read_next_file_info(&fcb, 0);
572 if (rc)
573 return rc;
574 if (!(fcb.file_stat & FLG_IN_USE))
575 return -EMFILE;
Frank Munzert810cb5b2007-07-17 13:36:06 +0200576 return 0;
577 default:
578 return -ENOTSUPP;
579 }
580}
581
582static int get_file_reclen(struct urdev *urd)
583{
584 struct file_control_block fcb;
585 int rc;
586
587 switch (urd->class) {
588 case DEV_CLASS_UR_O:
589 return 0;
590 case DEV_CLASS_UR_I:
591 rc = diag_read_next_file_info(&fcb, 0);
592 if (rc)
593 return rc;
594 break;
595 default:
596 return -ENOTSUPP;
597 }
598 if (fcb.file_stat & FLG_CP_DUMP)
599 return 0;
600
601 return fcb.rec_len;
602}
603
604static int ur_open(struct inode *inode, struct file *file)
605{
606 u16 devno;
607 struct urdev *urd;
608 struct urfile *urf;
609 unsigned short accmode;
610 int rc;
611
612 accmode = file->f_flags & O_ACCMODE;
613
614 if (accmode == O_RDWR)
615 return -EACCES;
616
617 /*
618 * We treat the minor number as the devno of the ur device
619 * to find in the driver tree.
620 */
621 devno = MINOR(file->f_dentry->d_inode->i_rdev);
622
623 urd = urdev_get_from_devno(devno);
624 if (!urd)
625 return -ENXIO;
626
627 if (file->f_flags & O_NONBLOCK) {
628 if (!mutex_trylock(&urd->open_mutex)) {
629 rc = -EBUSY;
630 goto fail_put;
631 }
632 } else {
633 if (mutex_lock_interruptible(&urd->open_mutex)) {
634 rc = -ERESTARTSYS;
635 goto fail_put;
636 }
637 }
638
639 TRACE("ur_open\n");
640
641 if (((accmode == O_RDONLY) && (urd->class != DEV_CLASS_UR_I)) ||
642 ((accmode == O_WRONLY) && (urd->class != DEV_CLASS_UR_O))) {
643 TRACE("ur_open: unsupported dev class (%d)\n", urd->class);
644 rc = -EACCES;
645 goto fail_unlock;
646 }
647
648 rc = verify_device(urd);
649 if (rc)
650 goto fail_unlock;
651
652 urf = urfile_alloc(urd);
653 if (!urf) {
654 rc = -ENOMEM;
655 goto fail_unlock;
656 }
657
658 urf->dev_reclen = urd->reclen;
659 rc = get_file_reclen(urd);
660 if (rc < 0)
661 goto fail_urfile_free;
662 urf->file_reclen = rc;
663 file->private_data = urf;
664 return 0;
665
666fail_urfile_free:
667 urfile_free(urf);
668fail_unlock:
669 mutex_unlock(&urd->open_mutex);
670fail_put:
671 urdev_put(urd);
672 return rc;
673}
674
675static int ur_release(struct inode *inode, struct file *file)
676{
677 struct urfile *urf = file->private_data;
678
679 TRACE("ur_release\n");
680 mutex_unlock(&urf->urd->open_mutex);
681 urdev_put(urf->urd);
682 urfile_free(urf);
683 return 0;
684}
685
686static loff_t ur_llseek(struct file *file, loff_t offset, int whence)
687{
688 loff_t newpos;
689
690 if ((file->f_flags & O_ACCMODE) != O_RDONLY)
691 return -ESPIPE; /* seek allowed only for reader */
692 if (offset % PAGE_SIZE)
693 return -ESPIPE; /* only multiples of 4K allowed */
694 switch (whence) {
695 case 0: /* SEEK_SET */
696 newpos = offset;
697 break;
698 case 1: /* SEEK_CUR */
699 newpos = file->f_pos + offset;
700 break;
701 default:
702 return -EINVAL;
703 }
704 file->f_pos = newpos;
705 return newpos;
706}
707
708static struct file_operations ur_fops = {
709 .owner = THIS_MODULE,
710 .open = ur_open,
711 .release = ur_release,
712 .read = ur_read,
713 .write = ur_write,
714 .llseek = ur_llseek,
715};
716
717/*
718 * ccw_device infrastructure:
719 * ur_probe gets its own ref to the device (i.e. get_device),
720 * creates the struct urdev, the device attributes, sets up
721 * the interrupt handler and validates the virtual unit record device.
722 * ur_remove removes the device attributes, frees the struct urdev
723 * and drops (put_device) the ref to the device we got in ur_probe.
724 */
725static int ur_probe(struct ccw_device *cdev)
726{
727 struct urdev *urd;
728 int rc;
729
730 TRACE("ur_probe: cdev=%p state=%d\n", cdev, *(int *) cdev->private);
731
732 if (!get_device(&cdev->dev))
733 return -ENODEV;
734
735 urd = urdev_alloc(cdev);
736 if (!urd) {
737 rc = -ENOMEM;
738 goto fail;
739 }
740 rc = ur_create_attributes(&cdev->dev);
741 if (rc) {
742 rc = -ENOMEM;
743 goto fail;
744 }
745 cdev->dev.driver_data = urd;
746 cdev->handler = ur_int_handler;
747
748 /* validate virtual unit record device */
749 urd->class = get_urd_class(urd);
750 if (urd->class < 0) {
751 rc = urd->class;
752 goto fail;
753 }
754 if ((urd->class != DEV_CLASS_UR_I) && (urd->class != DEV_CLASS_UR_O)) {
755 rc = -ENOTSUPP;
756 goto fail;
757 }
758
759 return 0;
760
761fail:
762 urdev_free(urd);
763 put_device(&cdev->dev);
764 return rc;
765}
766
767static void ur_remove(struct ccw_device *cdev)
768{
769 struct urdev *urd = cdev->dev.driver_data;
770
771 TRACE("ur_remove\n");
772 if (cdev->online)
773 ur_set_offline(cdev);
774 ur_remove_attributes(&cdev->dev);
775 urdev_free(urd);
776 put_device(&cdev->dev);
777}
778
779static int ur_set_online(struct ccw_device *cdev)
780{
781 struct urdev *urd;
782 int minor, major, rc;
783 char node_id[16];
784
785 TRACE("ur_set_online: cdev=%p state=%d\n", cdev,
786 *(int *) cdev->private);
787
788 if (!try_module_get(ur_driver.owner))
789 return -EINVAL;
790
791 urd = (struct urdev *) cdev->dev.driver_data;
792 minor = urd->dev_id.devno;
793 major = MAJOR(ur_first_dev_maj_min);
794
795 urd->char_device = cdev_alloc();
796 if (!urd->char_device) {
797 rc = -ENOMEM;
798 goto fail_module_put;
799 }
800
801 cdev_init(urd->char_device, &ur_fops);
802 urd->char_device->dev = MKDEV(major, minor);
803 urd->char_device->owner = ur_fops.owner;
804
805 rc = cdev_add(urd->char_device, urd->char_device->dev, 1);
806 if (rc)
807 goto fail_free_cdev;
808 if (urd->cdev->id.cu_type == READER_PUNCH_DEVTYPE) {
809 if (urd->class == DEV_CLASS_UR_I)
810 sprintf(node_id, "vmrdr-%s", cdev->dev.bus_id);
811 if (urd->class == DEV_CLASS_UR_O)
812 sprintf(node_id, "vmpun-%s", cdev->dev.bus_id);
813 } else if (urd->cdev->id.cu_type == PRINTER_DEVTYPE) {
814 sprintf(node_id, "vmprt-%s", cdev->dev.bus_id);
815 } else {
816 rc = -ENOTSUPP;
817 goto fail_free_cdev;
818 }
819
820 urd->device = device_create(vmur_class, NULL, urd->char_device->dev,
821 "%s", node_id);
822 if (IS_ERR(urd->device)) {
823 rc = PTR_ERR(urd->device);
824 TRACE("ur_set_online: device_create rc=%d\n", rc);
825 goto fail_free_cdev;
826 }
827
828 return 0;
829
830fail_free_cdev:
831 cdev_del(urd->char_device);
832fail_module_put:
833 module_put(ur_driver.owner);
834
835 return rc;
836}
837
838static int ur_set_offline(struct ccw_device *cdev)
839{
840 struct urdev *urd;
841
842 TRACE("ur_set_offline: cdev=%p cdev->private=%p state=%d\n",
843 cdev, cdev->private, *(int *) cdev->private);
844 urd = (struct urdev *) cdev->dev.driver_data;
845 device_destroy(vmur_class, urd->char_device->dev);
846 cdev_del(urd->char_device);
847 module_put(ur_driver.owner);
848
849 return 0;
850}
851
852/*
853 * Module initialisation and cleanup
854 */
855static int __init ur_init(void)
856{
857 int rc;
858 dev_t dev;
859
860 if (!MACHINE_IS_VM) {
861 PRINT_ERR("%s is only available under z/VM.\n", ur_banner);
862 return -ENODEV;
863 }
864
865 vmur_dbf = debug_register("vmur", 4, 1, 4 * sizeof(long));
866 if (!vmur_dbf)
867 return -ENOMEM;
868 rc = debug_register_view(vmur_dbf, &debug_sprintf_view);
869 if (rc)
870 goto fail_free_dbf;
871
872 debug_set_level(vmur_dbf, 6);
873
874 rc = ccw_driver_register(&ur_driver);
875 if (rc)
876 goto fail_free_dbf;
877
878 rc = alloc_chrdev_region(&dev, 0, NUM_MINORS, "vmur");
879 if (rc) {
880 PRINT_ERR("alloc_chrdev_region failed: err = %d\n", rc);
881 goto fail_unregister_driver;
882 }
883 ur_first_dev_maj_min = MKDEV(MAJOR(dev), 0);
884
885 vmur_class = class_create(THIS_MODULE, "vmur");
886 if (IS_ERR(vmur_class)) {
887 rc = PTR_ERR(vmur_class);
888 goto fail_unregister_region;
889 }
890 PRINT_INFO("%s loaded.\n", ur_banner);
891 return 0;
892
893fail_unregister_region:
894 unregister_chrdev_region(ur_first_dev_maj_min, NUM_MINORS);
895fail_unregister_driver:
896 ccw_driver_unregister(&ur_driver);
897fail_free_dbf:
898 debug_unregister(vmur_dbf);
899 return rc;
900}
901
902static void __exit ur_exit(void)
903{
904 class_destroy(vmur_class);
905 unregister_chrdev_region(ur_first_dev_maj_min, NUM_MINORS);
906 ccw_driver_unregister(&ur_driver);
907 debug_unregister(vmur_dbf);
908 PRINT_INFO("%s unloaded.\n", ur_banner);
909}
910
911module_init(ur_init);
912module_exit(ur_exit);